xref: /freebsd/sys/netpfil/pf/pf_ioctl.c (revision 65c318630123fcf2b6f491bf4d02a5cad3031d20)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2001 Daniel Hartmeier
5  * Copyright (c) 2002,2003 Henning Brauer
6  * Copyright (c) 2012 Gleb Smirnoff <glebius@FreeBSD.org>
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  *
13  *    - Redistributions of source code must retain the above copyright
14  *      notice, this list of conditions and the following disclaimer.
15  *    - Redistributions in binary form must reproduce the above
16  *      copyright notice, this list of conditions and the following
17  *      disclaimer in the documentation and/or other materials provided
18  *      with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
26  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
27  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
28  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
30  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31  * POSSIBILITY OF SUCH DAMAGE.
32  *
33  * Effort sponsored in part by the Defense Advanced Research Projects
34  * Agency (DARPA) and Air Force Research Laboratory, Air Force
35  * Materiel Command, USAF, under agreement number F30602-01-2-0537.
36  *
37  *	$OpenBSD: pf_ioctl.c,v 1.213 2009/02/15 21:46:12 mbalmer Exp $
38  */
39 
40 #include <sys/cdefs.h>
41 #include "opt_inet.h"
42 #include "opt_inet6.h"
43 #include "opt_bpf.h"
44 #include "opt_pf.h"
45 
46 #include <sys/param.h>
47 #include <sys/_bitset.h>
48 #include <sys/bitset.h>
49 #include <sys/bus.h>
50 #include <sys/conf.h>
51 #include <sys/endian.h>
52 #include <sys/fcntl.h>
53 #include <sys/filio.h>
54 #include <sys/hash.h>
55 #include <sys/interrupt.h>
56 #include <sys/jail.h>
57 #include <sys/kernel.h>
58 #include <sys/kthread.h>
59 #include <sys/lock.h>
60 #include <sys/mbuf.h>
61 #include <sys/module.h>
62 #include <sys/nv.h>
63 #include <sys/proc.h>
64 #include <sys/sdt.h>
65 #include <sys/smp.h>
66 #include <sys/socket.h>
67 #include <sys/sysctl.h>
68 #include <sys/md5.h>
69 #include <sys/ucred.h>
70 
71 #include <net/if.h>
72 #include <net/if_var.h>
73 #include <net/if_private.h>
74 #include <net/vnet.h>
75 #include <net/route.h>
76 #include <net/pfil.h>
77 #include <net/pfvar.h>
78 #include <net/if_pfsync.h>
79 #include <net/if_pflog.h>
80 
81 #include <netinet/in.h>
82 #include <netinet/ip.h>
83 #include <netinet/ip_var.h>
84 #include <netinet6/ip6_var.h>
85 #include <netinet/ip_icmp.h>
86 #include <netpfil/pf/pf_nl.h>
87 #include <netpfil/pf/pf_nv.h>
88 
89 #ifdef INET6
90 #include <netinet/ip6.h>
91 #endif /* INET6 */
92 
93 #ifdef ALTQ
94 #include <net/altq/altq.h>
95 #endif
96 
97 SDT_PROBE_DEFINE3(pf, ioctl, ioctl, error, "int", "int", "int");
98 SDT_PROBE_DEFINE3(pf, ioctl, function, error, "char *", "int", "int");
99 SDT_PROBE_DEFINE2(pf, ioctl, addrule, error, "int", "int");
100 SDT_PROBE_DEFINE2(pf, ioctl, nvchk, error, "int", "int");
101 
102 static struct pf_kpool	*pf_get_kpool(const char *, u_int32_t, u_int8_t,
103 			    u_int32_t, u_int8_t, u_int8_t, u_int8_t, int);
104 
105 static void		 pf_mv_kpool(struct pf_kpalist *, struct pf_kpalist *);
106 static void		 pf_empty_kpool(struct pf_kpalist *);
107 static int		 pfioctl(struct cdev *, u_long, caddr_t, int,
108 			    struct thread *);
109 static int		 pf_begin_eth(uint32_t *, const char *);
110 static int		 pf_rollback_eth(uint32_t, const char *);
111 static int		 pf_commit_eth(uint32_t, const char *);
112 static void		 pf_free_eth_rule(struct pf_keth_rule *);
113 #ifdef ALTQ
114 static int		 pf_begin_altq(u_int32_t *);
115 static int		 pf_rollback_altq(u_int32_t);
116 static int		 pf_commit_altq(u_int32_t);
117 static int		 pf_enable_altq(struct pf_altq *);
118 static int		 pf_disable_altq(struct pf_altq *);
119 static uint16_t		 pf_qname2qid(const char *);
120 static void		 pf_qid_unref(uint16_t);
121 #endif /* ALTQ */
122 static int		 pf_begin_rules(u_int32_t *, int, const char *);
123 static int		 pf_rollback_rules(u_int32_t, int, char *);
124 static int		 pf_setup_pfsync_matching(struct pf_kruleset *);
125 static void		 pf_hash_rule_rolling(MD5_CTX *, struct pf_krule *);
126 static void		 pf_hash_rule(struct pf_krule *);
127 static void		 pf_hash_rule_addr(MD5_CTX *, struct pf_rule_addr *);
128 static int		 pf_commit_rules(u_int32_t, int, char *);
129 static int		 pf_addr_setup(struct pf_kruleset *,
130 			    struct pf_addr_wrap *, sa_family_t);
131 static void		 pf_src_node_copy(const struct pf_ksrc_node *,
132 			    struct pf_src_node *);
133 #ifdef ALTQ
134 static int		 pf_export_kaltq(struct pf_altq *,
135 			    struct pfioc_altq_v1 *, size_t);
136 static int		 pf_import_kaltq(struct pfioc_altq_v1 *,
137 			    struct pf_altq *, size_t);
138 #endif /* ALTQ */
139 
140 VNET_DEFINE(struct pf_krule,	pf_default_rule);
141 
142 static __inline int             pf_krule_compare(struct pf_krule *,
143 				    struct pf_krule *);
144 
145 RB_GENERATE(pf_krule_global, pf_krule, entry_global, pf_krule_compare);
146 
147 #ifdef ALTQ
148 VNET_DEFINE_STATIC(int,		pf_altq_running);
149 #define	V_pf_altq_running	VNET(pf_altq_running)
150 #endif
151 
152 #define	TAGID_MAX	 50000
153 struct pf_tagname {
154 	TAILQ_ENTRY(pf_tagname)	namehash_entries;
155 	TAILQ_ENTRY(pf_tagname)	taghash_entries;
156 	char			name[PF_TAG_NAME_SIZE];
157 	uint16_t		tag;
158 	int			ref;
159 };
160 
161 struct pf_tagset {
162 	TAILQ_HEAD(, pf_tagname)	*namehash;
163 	TAILQ_HEAD(, pf_tagname)	*taghash;
164 	unsigned int			 mask;
165 	uint32_t			 seed;
166 	BITSET_DEFINE(, TAGID_MAX)	 avail;
167 };
168 
169 VNET_DEFINE(struct pf_tagset, pf_tags);
170 #define	V_pf_tags	VNET(pf_tags)
171 static unsigned int	pf_rule_tag_hashsize;
172 #define	PF_RULE_TAG_HASH_SIZE_DEFAULT	128
173 SYSCTL_UINT(_net_pf, OID_AUTO, rule_tag_hashsize, CTLFLAG_RDTUN,
174     &pf_rule_tag_hashsize, PF_RULE_TAG_HASH_SIZE_DEFAULT,
175     "Size of pf(4) rule tag hashtable");
176 
177 #ifdef ALTQ
178 VNET_DEFINE(struct pf_tagset, pf_qids);
179 #define	V_pf_qids	VNET(pf_qids)
180 static unsigned int	pf_queue_tag_hashsize;
181 #define	PF_QUEUE_TAG_HASH_SIZE_DEFAULT	128
182 SYSCTL_UINT(_net_pf, OID_AUTO, queue_tag_hashsize, CTLFLAG_RDTUN,
183     &pf_queue_tag_hashsize, PF_QUEUE_TAG_HASH_SIZE_DEFAULT,
184     "Size of pf(4) queue tag hashtable");
185 #endif
186 VNET_DEFINE(uma_zone_t,	 pf_tag_z);
187 #define	V_pf_tag_z		 VNET(pf_tag_z)
188 static MALLOC_DEFINE(M_PFALTQ, "pf_altq", "pf(4) altq configuration db");
189 static MALLOC_DEFINE(M_PFRULE, "pf_rule", "pf(4) rules");
190 
191 #if (PF_QNAME_SIZE != PF_TAG_NAME_SIZE)
192 #error PF_QNAME_SIZE must be equal to PF_TAG_NAME_SIZE
193 #endif
194 
195 VNET_DEFINE_STATIC(bool, pf_filter_local) = false;
196 #define V_pf_filter_local	VNET(pf_filter_local)
197 SYSCTL_BOOL(_net_pf, OID_AUTO, filter_local, CTLFLAG_VNET | CTLFLAG_RW,
198     &VNET_NAME(pf_filter_local), false,
199     "Enable filtering for packets delivered to local network stack");
200 
201 #ifdef PF_DEFAULT_TO_DROP
202 VNET_DEFINE_STATIC(bool, default_to_drop) = true;
203 #else
204 VNET_DEFINE_STATIC(bool, default_to_drop);
205 #endif
206 #define	V_default_to_drop VNET(default_to_drop)
207 SYSCTL_BOOL(_net_pf, OID_AUTO, default_to_drop, CTLFLAG_RDTUN | CTLFLAG_VNET,
208     &VNET_NAME(default_to_drop), false,
209     "Make the default rule drop all packets.");
210 
211 static void		 pf_init_tagset(struct pf_tagset *, unsigned int *,
212 			    unsigned int);
213 static void		 pf_cleanup_tagset(struct pf_tagset *);
214 static uint16_t		 tagname2hashindex(const struct pf_tagset *, const char *);
215 static uint16_t		 tag2hashindex(const struct pf_tagset *, uint16_t);
216 static u_int16_t	 tagname2tag(struct pf_tagset *, const char *);
217 static u_int16_t	 pf_tagname2tag(const char *);
218 static void		 tag_unref(struct pf_tagset *, u_int16_t);
219 
220 struct cdev *pf_dev;
221 
222 /*
223  * XXX - These are new and need to be checked when moveing to a new version
224  */
225 static void		 pf_clear_all_states(void);
226 static int		 pf_killstates_row(struct pf_kstate_kill *,
227 			    struct pf_idhash *);
228 static int		 pf_killstates_nv(struct pfioc_nv *);
229 static int		 pf_clearstates_nv(struct pfioc_nv *);
230 static int		 pf_getstate(struct pfioc_nv *);
231 static int		 pf_getstatus(struct pfioc_nv *);
232 static int		 pf_clear_tables(void);
233 static void		 pf_kill_srcnodes(struct pfioc_src_node_kill *);
234 static int		 pf_keepcounters(struct pfioc_nv *);
235 static void		 pf_tbladdr_copyout(struct pf_addr_wrap *);
236 
237 /*
238  * Wrapper functions for pfil(9) hooks
239  */
240 static pfil_return_t pf_eth_check_in(struct mbuf **m, struct ifnet *ifp,
241     int flags, void *ruleset __unused, struct inpcb *inp);
242 static pfil_return_t pf_eth_check_out(struct mbuf **m, struct ifnet *ifp,
243     int flags, void *ruleset __unused, struct inpcb *inp);
244 #ifdef INET
245 static pfil_return_t pf_check_in(struct mbuf **m, struct ifnet *ifp,
246     int flags, void *ruleset __unused, struct inpcb *inp);
247 static pfil_return_t pf_check_out(struct mbuf **m, struct ifnet *ifp,
248     int flags, void *ruleset __unused, struct inpcb *inp);
249 #endif
250 #ifdef INET6
251 static pfil_return_t pf_check6_in(struct mbuf **m, struct ifnet *ifp,
252     int flags, void *ruleset __unused, struct inpcb *inp);
253 static pfil_return_t pf_check6_out(struct mbuf **m, struct ifnet *ifp,
254     int flags, void *ruleset __unused, struct inpcb *inp);
255 #endif
256 
257 static void		hook_pf_eth(void);
258 static void		hook_pf(void);
259 static void		dehook_pf_eth(void);
260 static void		dehook_pf(void);
261 static int		shutdown_pf(void);
262 static int		pf_load(void);
263 static void		pf_unload(void);
264 
265 static struct cdevsw pf_cdevsw = {
266 	.d_ioctl =	pfioctl,
267 	.d_name =	PF_NAME,
268 	.d_version =	D_VERSION,
269 };
270 
271 VNET_DEFINE_STATIC(bool, pf_pfil_hooked);
272 #define V_pf_pfil_hooked	VNET(pf_pfil_hooked)
273 VNET_DEFINE_STATIC(bool, pf_pfil_eth_hooked);
274 #define V_pf_pfil_eth_hooked	VNET(pf_pfil_eth_hooked)
275 
276 /*
277  * We need a flag that is neither hooked nor running to know when
278  * the VNET is "valid".  We primarily need this to control (global)
279  * external event, e.g., eventhandlers.
280  */
281 VNET_DEFINE(int, pf_vnet_active);
282 #define V_pf_vnet_active	VNET(pf_vnet_active)
283 
284 int pf_end_threads;
285 struct proc *pf_purge_proc;
286 
287 VNET_DEFINE(struct rmlock, pf_rules_lock);
288 VNET_DEFINE_STATIC(struct sx, pf_ioctl_lock);
289 #define	V_pf_ioctl_lock		VNET(pf_ioctl_lock)
290 struct sx			pf_end_lock;
291 
292 /* pfsync */
293 VNET_DEFINE(pfsync_state_import_t *, pfsync_state_import_ptr);
294 VNET_DEFINE(pfsync_insert_state_t *, pfsync_insert_state_ptr);
295 VNET_DEFINE(pfsync_update_state_t *, pfsync_update_state_ptr);
296 VNET_DEFINE(pfsync_delete_state_t *, pfsync_delete_state_ptr);
297 VNET_DEFINE(pfsync_clear_states_t *, pfsync_clear_states_ptr);
298 VNET_DEFINE(pfsync_defer_t *, pfsync_defer_ptr);
299 VNET_DEFINE(pflow_export_state_t *, pflow_export_state_ptr);
300 pfsync_detach_ifnet_t *pfsync_detach_ifnet_ptr;
301 
302 /* pflog */
303 pflog_packet_t			*pflog_packet_ptr = NULL;
304 
305 /*
306  * Copy a user-provided string, returning an error if truncation would occur.
307  * Avoid scanning past "sz" bytes in the source string since there's no
308  * guarantee that it's nul-terminated.
309  */
310 static int
pf_user_strcpy(char * dst,const char * src,size_t sz)311 pf_user_strcpy(char *dst, const char *src, size_t sz)
312 {
313 	if (strnlen(src, sz) == sz)
314 		return (EINVAL);
315 	(void)strlcpy(dst, src, sz);
316 	return (0);
317 }
318 
319 static void
pfattach_vnet(void)320 pfattach_vnet(void)
321 {
322 	u_int32_t *my_timeout = V_pf_default_rule.timeout;
323 
324 	bzero(&V_pf_status, sizeof(V_pf_status));
325 
326 	pf_initialize();
327 	pfr_initialize();
328 	pfi_initialize_vnet();
329 	pf_normalize_init();
330 	pf_syncookies_init();
331 
332 	V_pf_limits[PF_LIMIT_STATES].limit = PFSTATE_HIWAT;
333 	V_pf_limits[PF_LIMIT_SRC_NODES].limit = PFSNODE_HIWAT;
334 	V_pf_limits[PF_LIMIT_ANCHORS].limit = PF_ANCHOR_HIWAT;
335 	V_pf_limits[PF_LIMIT_ETH_ANCHORS].limit = PF_ANCHOR_HIWAT;
336 
337 	RB_INIT(&V_pf_anchors);
338 	pf_init_kruleset(&pf_main_ruleset);
339 
340 	pf_init_keth(V_pf_keth);
341 
342 	/* default rule should never be garbage collected */
343 	V_pf_default_rule.entries.tqe_prev = &V_pf_default_rule.entries.tqe_next;
344 	V_pf_default_rule.action = V_default_to_drop ? PF_DROP : PF_PASS;
345 	V_pf_default_rule.nr = (uint32_t)-1;
346 	V_pf_default_rule.rtableid = -1;
347 
348 	pf_counter_u64_init(&V_pf_default_rule.evaluations, M_WAITOK);
349 	for (int i = 0; i < 2; i++) {
350 		pf_counter_u64_init(&V_pf_default_rule.packets[i], M_WAITOK);
351 		pf_counter_u64_init(&V_pf_default_rule.bytes[i], M_WAITOK);
352 	}
353 	V_pf_default_rule.states_cur = counter_u64_alloc(M_WAITOK);
354 	V_pf_default_rule.states_tot = counter_u64_alloc(M_WAITOK);
355 	for (pf_sn_types_t sn_type = 0; sn_type<PF_SN_MAX; sn_type++)
356 		V_pf_default_rule.src_nodes[sn_type] = counter_u64_alloc(M_WAITOK);
357 
358 	V_pf_default_rule.timestamp = uma_zalloc_pcpu(pf_timestamp_pcpu_zone,
359 	    M_WAITOK | M_ZERO);
360 
361 #ifdef PF_WANT_32_TO_64_COUNTER
362 	V_pf_kifmarker = malloc(sizeof(*V_pf_kifmarker), PFI_MTYPE, M_WAITOK | M_ZERO);
363 	V_pf_rulemarker = malloc(sizeof(*V_pf_rulemarker), M_PFRULE, M_WAITOK | M_ZERO);
364 	PF_RULES_WLOCK();
365 	LIST_INSERT_HEAD(&V_pf_allkiflist, V_pf_kifmarker, pfik_allkiflist);
366 	LIST_INSERT_HEAD(&V_pf_allrulelist, &V_pf_default_rule, allrulelist);
367 	V_pf_allrulecount++;
368 	LIST_INSERT_HEAD(&V_pf_allrulelist, V_pf_rulemarker, allrulelist);
369 	PF_RULES_WUNLOCK();
370 #endif
371 
372 	/* initialize default timeouts */
373 	my_timeout[PFTM_TCP_FIRST_PACKET] = PFTM_TCP_FIRST_PACKET_VAL;
374 	my_timeout[PFTM_TCP_OPENING] = PFTM_TCP_OPENING_VAL;
375 	my_timeout[PFTM_TCP_ESTABLISHED] = PFTM_TCP_ESTABLISHED_VAL;
376 	my_timeout[PFTM_TCP_CLOSING] = PFTM_TCP_CLOSING_VAL;
377 	my_timeout[PFTM_TCP_FIN_WAIT] = PFTM_TCP_FIN_WAIT_VAL;
378 	my_timeout[PFTM_TCP_CLOSED] = PFTM_TCP_CLOSED_VAL;
379 	my_timeout[PFTM_SCTP_FIRST_PACKET] = PFTM_TCP_FIRST_PACKET_VAL;
380 	my_timeout[PFTM_SCTP_OPENING] = PFTM_TCP_OPENING_VAL;
381 	my_timeout[PFTM_SCTP_ESTABLISHED] = PFTM_TCP_ESTABLISHED_VAL;
382 	my_timeout[PFTM_SCTP_CLOSING] = PFTM_TCP_CLOSING_VAL;
383 	my_timeout[PFTM_SCTP_CLOSED] = PFTM_TCP_CLOSED_VAL;
384 	my_timeout[PFTM_UDP_FIRST_PACKET] = PFTM_UDP_FIRST_PACKET_VAL;
385 	my_timeout[PFTM_UDP_SINGLE] = PFTM_UDP_SINGLE_VAL;
386 	my_timeout[PFTM_UDP_MULTIPLE] = PFTM_UDP_MULTIPLE_VAL;
387 	my_timeout[PFTM_ICMP_FIRST_PACKET] = PFTM_ICMP_FIRST_PACKET_VAL;
388 	my_timeout[PFTM_ICMP_ERROR_REPLY] = PFTM_ICMP_ERROR_REPLY_VAL;
389 	my_timeout[PFTM_OTHER_FIRST_PACKET] = PFTM_OTHER_FIRST_PACKET_VAL;
390 	my_timeout[PFTM_OTHER_SINGLE] = PFTM_OTHER_SINGLE_VAL;
391 	my_timeout[PFTM_OTHER_MULTIPLE] = PFTM_OTHER_MULTIPLE_VAL;
392 	my_timeout[PFTM_FRAG] = PFTM_FRAG_VAL;
393 	my_timeout[PFTM_INTERVAL] = PFTM_INTERVAL_VAL;
394 	my_timeout[PFTM_SRC_NODE] = PFTM_SRC_NODE_VAL;
395 	my_timeout[PFTM_TS_DIFF] = PFTM_TS_DIFF_VAL;
396 	my_timeout[PFTM_ADAPTIVE_START] = PFSTATE_ADAPT_START;
397 	my_timeout[PFTM_ADAPTIVE_END] = PFSTATE_ADAPT_END;
398 
399 	V_pf_status.debug = PF_DEBUG_URGENT;
400 	/*
401 	 * XXX This is different than in OpenBSD where reassembly is enabled by
402 	 * defult. In FreeBSD we expect people to still use scrub rules and
403 	 * switch to the new syntax later. Only when they switch they must
404 	 * explicitly enable reassemle. We could change the default once the
405 	 * scrub rule functionality is hopefully removed some day in future.
406 	 */
407 	V_pf_status.reass = 0;
408 
409 	V_pf_pfil_hooked = false;
410 	V_pf_pfil_eth_hooked = false;
411 
412 	/* XXX do our best to avoid a conflict */
413 	V_pf_status.hostid = arc4random();
414 
415 	for (int i = 0; i < PFRES_MAX; i++)
416 		V_pf_status.counters[i] = counter_u64_alloc(M_WAITOK);
417 	for (int i = 0; i < KLCNT_MAX; i++)
418 		V_pf_status.lcounters[i] = counter_u64_alloc(M_WAITOK);
419 	for (int i = 0; i < FCNT_MAX; i++)
420 		pf_counter_u64_init(&V_pf_status.fcounters[i], M_WAITOK);
421 	for (int i = 0; i < SCNT_MAX; i++)
422 		V_pf_status.scounters[i] = counter_u64_alloc(M_WAITOK);
423 
424 	if (swi_add(&V_pf_swi_ie, "pf send", pf_intr, curvnet, SWI_NET,
425 	    INTR_MPSAFE, &V_pf_swi_cookie) != 0)
426 		/* XXXGL: leaked all above. */
427 		return;
428 }
429 
430 static struct pf_kpool *
pf_get_kpool(const char * anchor,u_int32_t ticket,u_int8_t rule_action,u_int32_t rule_number,u_int8_t r_last,u_int8_t active,u_int8_t check_ticket,int which)431 pf_get_kpool(const char *anchor, u_int32_t ticket, u_int8_t rule_action,
432     u_int32_t rule_number, u_int8_t r_last, u_int8_t active,
433     u_int8_t check_ticket, int which)
434 {
435 	struct pf_kruleset	*ruleset;
436 	struct pf_krule		*rule;
437 	int			 rs_num;
438 
439 	MPASS(which == PF_RDR || which == PF_NAT || which == PF_RT);
440 
441 	ruleset = pf_find_kruleset(anchor);
442 	if (ruleset == NULL)
443 		return (NULL);
444 	rs_num = pf_get_ruleset_number(rule_action);
445 	if (rs_num >= PF_RULESET_MAX)
446 		return (NULL);
447 	if (active) {
448 		if (check_ticket && ticket !=
449 		    ruleset->rules[rs_num].active.ticket)
450 			return (NULL);
451 		if (r_last)
452 			rule = TAILQ_LAST(ruleset->rules[rs_num].active.ptr,
453 			    pf_krulequeue);
454 		else
455 			rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
456 	} else {
457 		if (check_ticket && ticket !=
458 		    ruleset->rules[rs_num].inactive.ticket)
459 			return (NULL);
460 		if (r_last)
461 			rule = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr,
462 			    pf_krulequeue);
463 		else
464 			rule = TAILQ_FIRST(ruleset->rules[rs_num].inactive.ptr);
465 	}
466 	if (!r_last) {
467 		while ((rule != NULL) && (rule->nr != rule_number))
468 			rule = TAILQ_NEXT(rule, entries);
469 	}
470 	if (rule == NULL)
471 		return (NULL);
472 
473 	switch (which) {
474 	case PF_RDR:
475 		return (&rule->rdr);
476 	case PF_NAT:
477 		return (&rule->nat);
478 	case PF_RT:
479 		return (&rule->route);
480 	default:
481 		panic("Unknow pool type %d", which);
482 	}
483 }
484 
485 static void
pf_mv_kpool(struct pf_kpalist * poola,struct pf_kpalist * poolb)486 pf_mv_kpool(struct pf_kpalist *poola, struct pf_kpalist *poolb)
487 {
488 	struct pf_kpooladdr	*mv_pool_pa;
489 
490 	while ((mv_pool_pa = TAILQ_FIRST(poola)) != NULL) {
491 		TAILQ_REMOVE(poola, mv_pool_pa, entries);
492 		TAILQ_INSERT_TAIL(poolb, mv_pool_pa, entries);
493 	}
494 }
495 
496 static void
pf_empty_kpool(struct pf_kpalist * poola)497 pf_empty_kpool(struct pf_kpalist *poola)
498 {
499 	struct pf_kpooladdr *pa;
500 
501 	while ((pa = TAILQ_FIRST(poola)) != NULL) {
502 		switch (pa->addr.type) {
503 		case PF_ADDR_DYNIFTL:
504 			pfi_dynaddr_remove(pa->addr.p.dyn);
505 			break;
506 		case PF_ADDR_TABLE:
507 			/* XXX: this could be unfinished pooladdr on pabuf */
508 			if (pa->addr.p.tbl != NULL)
509 				pfr_detach_table(pa->addr.p.tbl);
510 			break;
511 		}
512 		if (pa->kif)
513 			pfi_kkif_unref(pa->kif);
514 		TAILQ_REMOVE(poola, pa, entries);
515 		free(pa, M_PFRULE);
516 	}
517 }
518 
519 static void
pf_unlink_rule_locked(struct pf_krulequeue * rulequeue,struct pf_krule * rule)520 pf_unlink_rule_locked(struct pf_krulequeue *rulequeue, struct pf_krule *rule)
521 {
522 
523 	PF_RULES_WASSERT();
524 	PF_UNLNKDRULES_ASSERT();
525 
526 	TAILQ_REMOVE(rulequeue, rule, entries);
527 
528 	rule->rule_ref |= PFRULE_REFS;
529 	TAILQ_INSERT_TAIL(&V_pf_unlinked_rules, rule, entries);
530 }
531 
532 static void
pf_unlink_rule(struct pf_krulequeue * rulequeue,struct pf_krule * rule)533 pf_unlink_rule(struct pf_krulequeue *rulequeue, struct pf_krule *rule)
534 {
535 
536 	PF_RULES_WASSERT();
537 
538 	PF_UNLNKDRULES_LOCK();
539 	pf_unlink_rule_locked(rulequeue, rule);
540 	PF_UNLNKDRULES_UNLOCK();
541 }
542 
543 static void
pf_free_eth_rule(struct pf_keth_rule * rule)544 pf_free_eth_rule(struct pf_keth_rule *rule)
545 {
546 	PF_RULES_WASSERT();
547 
548 	if (rule == NULL)
549 		return;
550 
551 	if (rule->tag)
552 		tag_unref(&V_pf_tags, rule->tag);
553 	if (rule->match_tag)
554 		tag_unref(&V_pf_tags, rule->match_tag);
555 #ifdef ALTQ
556 	pf_qid_unref(rule->qid);
557 #endif
558 
559 	if (rule->bridge_to)
560 		pfi_kkif_unref(rule->bridge_to);
561 	if (rule->kif)
562 		pfi_kkif_unref(rule->kif);
563 
564 	if (rule->ipsrc.addr.type == PF_ADDR_TABLE)
565 		pfr_detach_table(rule->ipsrc.addr.p.tbl);
566 	if (rule->ipdst.addr.type == PF_ADDR_TABLE)
567 		pfr_detach_table(rule->ipdst.addr.p.tbl);
568 
569 	counter_u64_free(rule->evaluations);
570 	for (int i = 0; i < 2; i++) {
571 		counter_u64_free(rule->packets[i]);
572 		counter_u64_free(rule->bytes[i]);
573 	}
574 	uma_zfree_pcpu(pf_timestamp_pcpu_zone, rule->timestamp);
575 	pf_keth_anchor_remove(rule);
576 
577 	free(rule, M_PFRULE);
578 }
579 
580 void
pf_free_rule(struct pf_krule * rule)581 pf_free_rule(struct pf_krule *rule)
582 {
583 
584 	PF_RULES_WASSERT();
585 	PF_CONFIG_ASSERT();
586 
587 	if (rule->tag)
588 		tag_unref(&V_pf_tags, rule->tag);
589 	if (rule->match_tag)
590 		tag_unref(&V_pf_tags, rule->match_tag);
591 #ifdef ALTQ
592 	if (rule->pqid != rule->qid)
593 		pf_qid_unref(rule->pqid);
594 	pf_qid_unref(rule->qid);
595 #endif
596 	switch (rule->src.addr.type) {
597 	case PF_ADDR_DYNIFTL:
598 		pfi_dynaddr_remove(rule->src.addr.p.dyn);
599 		break;
600 	case PF_ADDR_TABLE:
601 		pfr_detach_table(rule->src.addr.p.tbl);
602 		break;
603 	}
604 	switch (rule->dst.addr.type) {
605 	case PF_ADDR_DYNIFTL:
606 		pfi_dynaddr_remove(rule->dst.addr.p.dyn);
607 		break;
608 	case PF_ADDR_TABLE:
609 		pfr_detach_table(rule->dst.addr.p.tbl);
610 		break;
611 	}
612 	if (rule->overload_tbl)
613 		pfr_detach_table(rule->overload_tbl);
614 	if (rule->kif)
615 		pfi_kkif_unref(rule->kif);
616 	if (rule->rcv_kif)
617 		pfi_kkif_unref(rule->rcv_kif);
618 	pf_remove_kanchor(rule);
619 	pf_empty_kpool(&rule->rdr.list);
620 	pf_empty_kpool(&rule->nat.list);
621 	pf_empty_kpool(&rule->route.list);
622 
623 	pf_krule_free(rule);
624 }
625 
626 static void
pf_init_tagset(struct pf_tagset * ts,unsigned int * tunable_size,unsigned int default_size)627 pf_init_tagset(struct pf_tagset *ts, unsigned int *tunable_size,
628     unsigned int default_size)
629 {
630 	unsigned int i;
631 	unsigned int hashsize;
632 
633 	if (*tunable_size == 0 || !powerof2(*tunable_size))
634 		*tunable_size = default_size;
635 
636 	hashsize = *tunable_size;
637 	ts->namehash = mallocarray(hashsize, sizeof(*ts->namehash), M_PFHASH,
638 	    M_WAITOK);
639 	ts->taghash = mallocarray(hashsize, sizeof(*ts->taghash), M_PFHASH,
640 	    M_WAITOK);
641 	ts->mask = hashsize - 1;
642 	ts->seed = arc4random();
643 	for (i = 0; i < hashsize; i++) {
644 		TAILQ_INIT(&ts->namehash[i]);
645 		TAILQ_INIT(&ts->taghash[i]);
646 	}
647 	BIT_FILL(TAGID_MAX, &ts->avail);
648 }
649 
650 static void
pf_cleanup_tagset(struct pf_tagset * ts)651 pf_cleanup_tagset(struct pf_tagset *ts)
652 {
653 	unsigned int i;
654 	unsigned int hashsize;
655 	struct pf_tagname *t, *tmp;
656 
657 	/*
658 	 * Only need to clean up one of the hashes as each tag is hashed
659 	 * into each table.
660 	 */
661 	hashsize = ts->mask + 1;
662 	for (i = 0; i < hashsize; i++)
663 		TAILQ_FOREACH_SAFE(t, &ts->namehash[i], namehash_entries, tmp)
664 			uma_zfree(V_pf_tag_z, t);
665 
666 	free(ts->namehash, M_PFHASH);
667 	free(ts->taghash, M_PFHASH);
668 }
669 
670 static uint16_t
tagname2hashindex(const struct pf_tagset * ts,const char * tagname)671 tagname2hashindex(const struct pf_tagset *ts, const char *tagname)
672 {
673 	size_t len;
674 
675 	len = strnlen(tagname, PF_TAG_NAME_SIZE - 1);
676 	return (murmur3_32_hash(tagname, len, ts->seed) & ts->mask);
677 }
678 
679 static uint16_t
tag2hashindex(const struct pf_tagset * ts,uint16_t tag)680 tag2hashindex(const struct pf_tagset *ts, uint16_t tag)
681 {
682 
683 	return (tag & ts->mask);
684 }
685 
686 static u_int16_t
tagname2tag(struct pf_tagset * ts,const char * tagname)687 tagname2tag(struct pf_tagset *ts, const char *tagname)
688 {
689 	struct pf_tagname	*tag;
690 	u_int32_t		 index;
691 	u_int16_t		 new_tagid;
692 
693 	PF_RULES_WASSERT();
694 
695 	index = tagname2hashindex(ts, tagname);
696 	TAILQ_FOREACH(tag, &ts->namehash[index], namehash_entries)
697 		if (strcmp(tagname, tag->name) == 0) {
698 			tag->ref++;
699 			return (tag->tag);
700 		}
701 
702 	/*
703 	 * new entry
704 	 *
705 	 * to avoid fragmentation, we do a linear search from the beginning
706 	 * and take the first free slot we find.
707 	 */
708 	new_tagid = BIT_FFS(TAGID_MAX, &ts->avail);
709 	/*
710 	 * Tags are 1-based, with valid tags in the range [1..TAGID_MAX].
711 	 * BIT_FFS() returns a 1-based bit number, with 0 indicating no bits
712 	 * set.  It may also return a bit number greater than TAGID_MAX due
713 	 * to rounding of the number of bits in the vector up to a multiple
714 	 * of the vector word size at declaration/allocation time.
715 	 */
716 	if ((new_tagid == 0) || (new_tagid > TAGID_MAX))
717 		return (0);
718 
719 	/* Mark the tag as in use.  Bits are 0-based for BIT_CLR() */
720 	BIT_CLR(TAGID_MAX, new_tagid - 1, &ts->avail);
721 
722 	/* allocate and fill new struct pf_tagname */
723 	tag = uma_zalloc(V_pf_tag_z, M_NOWAIT);
724 	if (tag == NULL)
725 		return (0);
726 	strlcpy(tag->name, tagname, sizeof(tag->name));
727 	tag->tag = new_tagid;
728 	tag->ref = 1;
729 
730 	/* Insert into namehash */
731 	TAILQ_INSERT_TAIL(&ts->namehash[index], tag, namehash_entries);
732 
733 	/* Insert into taghash */
734 	index = tag2hashindex(ts, new_tagid);
735 	TAILQ_INSERT_TAIL(&ts->taghash[index], tag, taghash_entries);
736 
737 	return (tag->tag);
738 }
739 
740 static void
tag_unref(struct pf_tagset * ts,u_int16_t tag)741 tag_unref(struct pf_tagset *ts, u_int16_t tag)
742 {
743 	struct pf_tagname	*t;
744 	uint16_t		 index;
745 
746 	PF_RULES_WASSERT();
747 
748 	index = tag2hashindex(ts, tag);
749 	TAILQ_FOREACH(t, &ts->taghash[index], taghash_entries)
750 		if (tag == t->tag) {
751 			if (--t->ref == 0) {
752 				TAILQ_REMOVE(&ts->taghash[index], t,
753 				    taghash_entries);
754 				index = tagname2hashindex(ts, t->name);
755 				TAILQ_REMOVE(&ts->namehash[index], t,
756 				    namehash_entries);
757 				/* Bits are 0-based for BIT_SET() */
758 				BIT_SET(TAGID_MAX, tag - 1, &ts->avail);
759 				uma_zfree(V_pf_tag_z, t);
760 			}
761 			break;
762 		}
763 }
764 
765 static uint16_t
pf_tagname2tag(const char * tagname)766 pf_tagname2tag(const char *tagname)
767 {
768 	return (tagname2tag(&V_pf_tags, tagname));
769 }
770 
771 static int
pf_begin_eth(uint32_t * ticket,const char * anchor)772 pf_begin_eth(uint32_t *ticket, const char *anchor)
773 {
774 	struct pf_keth_rule *rule, *tmp;
775 	struct pf_keth_ruleset *rs;
776 
777 	PF_RULES_WASSERT();
778 
779 	rs = pf_find_or_create_keth_ruleset(anchor);
780 	if (rs == NULL)
781 		return (EINVAL);
782 
783 	/* Purge old inactive rules. */
784 	TAILQ_FOREACH_SAFE(rule, rs->inactive.rules, entries,
785 	    tmp) {
786 		TAILQ_REMOVE(rs->inactive.rules, rule,
787 		    entries);
788 		pf_free_eth_rule(rule);
789 	}
790 
791 	*ticket = ++rs->inactive.ticket;
792 	rs->inactive.open = 1;
793 
794 	return (0);
795 }
796 
797 static int
pf_rollback_eth(uint32_t ticket,const char * anchor)798 pf_rollback_eth(uint32_t ticket, const char *anchor)
799 {
800 	struct pf_keth_rule *rule, *tmp;
801 	struct pf_keth_ruleset *rs;
802 
803 	PF_RULES_WASSERT();
804 
805 	rs = pf_find_keth_ruleset(anchor);
806 	if (rs == NULL)
807 		return (EINVAL);
808 
809 	if (!rs->inactive.open ||
810 	    ticket != rs->inactive.ticket)
811 		return (0);
812 
813 	/* Purge old inactive rules. */
814 	TAILQ_FOREACH_SAFE(rule, rs->inactive.rules, entries,
815 	    tmp) {
816 		TAILQ_REMOVE(rs->inactive.rules, rule, entries);
817 		pf_free_eth_rule(rule);
818 	}
819 
820 	rs->inactive.open = 0;
821 
822 	pf_remove_if_empty_keth_ruleset(rs);
823 
824 	return (0);
825 }
826 
827 #define	PF_SET_SKIP_STEPS(i)					\
828 	do {							\
829 		while (head[i] != cur) {			\
830 			head[i]->skip[i].ptr = cur;		\
831 			head[i] = TAILQ_NEXT(head[i], entries);	\
832 		}						\
833 	} while (0)
834 
835 static void
pf_eth_calc_skip_steps(struct pf_keth_ruleq * rules)836 pf_eth_calc_skip_steps(struct pf_keth_ruleq *rules)
837 {
838 	struct pf_keth_rule *cur, *prev, *head[PFE_SKIP_COUNT];
839 	int i;
840 
841 	cur = TAILQ_FIRST(rules);
842 	prev = cur;
843 	for (i = 0; i < PFE_SKIP_COUNT; ++i)
844 		head[i] = cur;
845 	while (cur != NULL) {
846 		if (cur->kif != prev->kif || cur->ifnot != prev->ifnot)
847 			PF_SET_SKIP_STEPS(PFE_SKIP_IFP);
848 		if (cur->direction != prev->direction)
849 			PF_SET_SKIP_STEPS(PFE_SKIP_DIR);
850 		if (cur->proto != prev->proto)
851 			PF_SET_SKIP_STEPS(PFE_SKIP_PROTO);
852 		if (memcmp(&cur->src, &prev->src, sizeof(cur->src)) != 0)
853 			PF_SET_SKIP_STEPS(PFE_SKIP_SRC_ADDR);
854 		if (memcmp(&cur->dst, &prev->dst, sizeof(cur->dst)) != 0)
855 			PF_SET_SKIP_STEPS(PFE_SKIP_DST_ADDR);
856 		if (cur->ipsrc.neg != prev->ipsrc.neg ||
857 		    pf_addr_wrap_neq(&cur->ipsrc.addr, &prev->ipsrc.addr))
858 			PF_SET_SKIP_STEPS(PFE_SKIP_SRC_IP_ADDR);
859 		if (cur->ipdst.neg != prev->ipdst.neg ||
860 		    pf_addr_wrap_neq(&cur->ipdst.addr, &prev->ipdst.addr))
861 			PF_SET_SKIP_STEPS(PFE_SKIP_DST_IP_ADDR);
862 
863 		prev = cur;
864 		cur = TAILQ_NEXT(cur, entries);
865 	}
866 	for (i = 0; i < PFE_SKIP_COUNT; ++i)
867 		PF_SET_SKIP_STEPS(i);
868 }
869 
870 static int
pf_commit_eth(uint32_t ticket,const char * anchor)871 pf_commit_eth(uint32_t ticket, const char *anchor)
872 {
873 	struct pf_keth_ruleq *rules;
874 	struct pf_keth_ruleset *rs;
875 
876 	rs = pf_find_keth_ruleset(anchor);
877 	if (rs == NULL) {
878 		return (EINVAL);
879 	}
880 
881 	if (!rs->inactive.open ||
882 	    ticket != rs->inactive.ticket)
883 		return (EBUSY);
884 
885 	PF_RULES_WASSERT();
886 
887 	pf_eth_calc_skip_steps(rs->inactive.rules);
888 
889 	rules = rs->active.rules;
890 	atomic_store_ptr(&rs->active.rules, rs->inactive.rules);
891 	rs->inactive.rules = rules;
892 	rs->inactive.ticket = rs->active.ticket;
893 
894 	return (pf_rollback_eth(rs->inactive.ticket,
895 	    rs->anchor ? rs->anchor->path : ""));
896 }
897 
898 #ifdef ALTQ
899 static uint16_t
pf_qname2qid(const char * qname)900 pf_qname2qid(const char *qname)
901 {
902 	return (tagname2tag(&V_pf_qids, qname));
903 }
904 
905 static void
pf_qid_unref(uint16_t qid)906 pf_qid_unref(uint16_t qid)
907 {
908 	tag_unref(&V_pf_qids, qid);
909 }
910 
911 static int
pf_begin_altq(u_int32_t * ticket)912 pf_begin_altq(u_int32_t *ticket)
913 {
914 	struct pf_altq	*altq, *tmp;
915 	int		 error = 0;
916 
917 	PF_RULES_WASSERT();
918 
919 	/* Purge the old altq lists */
920 	TAILQ_FOREACH_SAFE(altq, V_pf_altq_ifs_inactive, entries, tmp) {
921 		if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
922 			/* detach and destroy the discipline */
923 			error = altq_remove(altq);
924 		}
925 		free(altq, M_PFALTQ);
926 	}
927 	TAILQ_INIT(V_pf_altq_ifs_inactive);
928 	TAILQ_FOREACH_SAFE(altq, V_pf_altqs_inactive, entries, tmp) {
929 		pf_qid_unref(altq->qid);
930 		free(altq, M_PFALTQ);
931 	}
932 	TAILQ_INIT(V_pf_altqs_inactive);
933 	if (error)
934 		return (error);
935 	*ticket = ++V_ticket_altqs_inactive;
936 	V_altqs_inactive_open = 1;
937 	return (0);
938 }
939 
940 static int
pf_rollback_altq(u_int32_t ticket)941 pf_rollback_altq(u_int32_t ticket)
942 {
943 	struct pf_altq	*altq, *tmp;
944 	int		 error = 0;
945 
946 	PF_RULES_WASSERT();
947 
948 	if (!V_altqs_inactive_open || ticket != V_ticket_altqs_inactive)
949 		return (0);
950 	/* Purge the old altq lists */
951 	TAILQ_FOREACH_SAFE(altq, V_pf_altq_ifs_inactive, entries, tmp) {
952 		if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
953 			/* detach and destroy the discipline */
954 			error = altq_remove(altq);
955 		}
956 		free(altq, M_PFALTQ);
957 	}
958 	TAILQ_INIT(V_pf_altq_ifs_inactive);
959 	TAILQ_FOREACH_SAFE(altq, V_pf_altqs_inactive, entries, tmp) {
960 		pf_qid_unref(altq->qid);
961 		free(altq, M_PFALTQ);
962 	}
963 	TAILQ_INIT(V_pf_altqs_inactive);
964 	V_altqs_inactive_open = 0;
965 	return (error);
966 }
967 
968 static int
pf_commit_altq(u_int32_t ticket)969 pf_commit_altq(u_int32_t ticket)
970 {
971 	struct pf_altqqueue	*old_altqs, *old_altq_ifs;
972 	struct pf_altq		*altq, *tmp;
973 	int			 err, error = 0;
974 
975 	PF_RULES_WASSERT();
976 
977 	if (!V_altqs_inactive_open || ticket != V_ticket_altqs_inactive)
978 		return (EBUSY);
979 
980 	/* swap altqs, keep the old. */
981 	old_altqs = V_pf_altqs_active;
982 	old_altq_ifs = V_pf_altq_ifs_active;
983 	V_pf_altqs_active = V_pf_altqs_inactive;
984 	V_pf_altq_ifs_active = V_pf_altq_ifs_inactive;
985 	V_pf_altqs_inactive = old_altqs;
986 	V_pf_altq_ifs_inactive = old_altq_ifs;
987 	V_ticket_altqs_active = V_ticket_altqs_inactive;
988 
989 	/* Attach new disciplines */
990 	TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) {
991 		if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
992 			/* attach the discipline */
993 			error = altq_pfattach(altq);
994 			if (error == 0 && V_pf_altq_running)
995 				error = pf_enable_altq(altq);
996 			if (error != 0)
997 				return (error);
998 		}
999 	}
1000 
1001 	/* Purge the old altq lists */
1002 	TAILQ_FOREACH_SAFE(altq, V_pf_altq_ifs_inactive, entries, tmp) {
1003 		if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
1004 			/* detach and destroy the discipline */
1005 			if (V_pf_altq_running)
1006 				error = pf_disable_altq(altq);
1007 			err = altq_pfdetach(altq);
1008 			if (err != 0 && error == 0)
1009 				error = err;
1010 			err = altq_remove(altq);
1011 			if (err != 0 && error == 0)
1012 				error = err;
1013 		}
1014 		free(altq, M_PFALTQ);
1015 	}
1016 	TAILQ_INIT(V_pf_altq_ifs_inactive);
1017 	TAILQ_FOREACH_SAFE(altq, V_pf_altqs_inactive, entries, tmp) {
1018 		pf_qid_unref(altq->qid);
1019 		free(altq, M_PFALTQ);
1020 	}
1021 	TAILQ_INIT(V_pf_altqs_inactive);
1022 
1023 	V_altqs_inactive_open = 0;
1024 	return (error);
1025 }
1026 
1027 static int
pf_enable_altq(struct pf_altq * altq)1028 pf_enable_altq(struct pf_altq *altq)
1029 {
1030 	struct ifnet		*ifp;
1031 	struct tb_profile	 tb;
1032 	int			 error = 0;
1033 
1034 	if ((ifp = ifunit(altq->ifname)) == NULL)
1035 		return (EINVAL);
1036 
1037 	if (ifp->if_snd.altq_type != ALTQT_NONE)
1038 		error = altq_enable(&ifp->if_snd);
1039 
1040 	/* set tokenbucket regulator */
1041 	if (error == 0 && ifp != NULL && ALTQ_IS_ENABLED(&ifp->if_snd)) {
1042 		tb.rate = altq->ifbandwidth;
1043 		tb.depth = altq->tbrsize;
1044 		error = tbr_set(&ifp->if_snd, &tb);
1045 	}
1046 
1047 	return (error);
1048 }
1049 
1050 static int
pf_disable_altq(struct pf_altq * altq)1051 pf_disable_altq(struct pf_altq *altq)
1052 {
1053 	struct ifnet		*ifp;
1054 	struct tb_profile	 tb;
1055 	int			 error;
1056 
1057 	if ((ifp = ifunit(altq->ifname)) == NULL)
1058 		return (EINVAL);
1059 
1060 	/*
1061 	 * when the discipline is no longer referenced, it was overridden
1062 	 * by a new one.  if so, just return.
1063 	 */
1064 	if (altq->altq_disc != ifp->if_snd.altq_disc)
1065 		return (0);
1066 
1067 	error = altq_disable(&ifp->if_snd);
1068 
1069 	if (error == 0) {
1070 		/* clear tokenbucket regulator */
1071 		tb.rate = 0;
1072 		error = tbr_set(&ifp->if_snd, &tb);
1073 	}
1074 
1075 	return (error);
1076 }
1077 
1078 static int
pf_altq_ifnet_event_add(struct ifnet * ifp,int remove,u_int32_t ticket,struct pf_altq * altq)1079 pf_altq_ifnet_event_add(struct ifnet *ifp, int remove, u_int32_t ticket,
1080     struct pf_altq *altq)
1081 {
1082 	struct ifnet	*ifp1;
1083 	int		 error = 0;
1084 
1085 	/* Deactivate the interface in question */
1086 	altq->local_flags &= ~PFALTQ_FLAG_IF_REMOVED;
1087 	if ((ifp1 = ifunit(altq->ifname)) == NULL ||
1088 	    (remove && ifp1 == ifp)) {
1089 		altq->local_flags |= PFALTQ_FLAG_IF_REMOVED;
1090 	} else {
1091 		error = altq_add(ifp1, altq);
1092 
1093 		if (ticket != V_ticket_altqs_inactive)
1094 			error = EBUSY;
1095 
1096 		if (error)
1097 			free(altq, M_PFALTQ);
1098 	}
1099 
1100 	return (error);
1101 }
1102 
1103 void
pf_altq_ifnet_event(struct ifnet * ifp,int remove)1104 pf_altq_ifnet_event(struct ifnet *ifp, int remove)
1105 {
1106 	struct pf_altq	*a1, *a2, *a3;
1107 	u_int32_t	 ticket;
1108 	int		 error = 0;
1109 
1110 	/*
1111 	 * No need to re-evaluate the configuration for events on interfaces
1112 	 * that do not support ALTQ, as it's not possible for such
1113 	 * interfaces to be part of the configuration.
1114 	 */
1115 	if (!ALTQ_IS_READY(&ifp->if_snd))
1116 		return;
1117 
1118 	/* Interrupt userland queue modifications */
1119 	if (V_altqs_inactive_open)
1120 		pf_rollback_altq(V_ticket_altqs_inactive);
1121 
1122 	/* Start new altq ruleset */
1123 	if (pf_begin_altq(&ticket))
1124 		return;
1125 
1126 	/* Copy the current active set */
1127 	TAILQ_FOREACH(a1, V_pf_altq_ifs_active, entries) {
1128 		a2 = malloc(sizeof(*a2), M_PFALTQ, M_NOWAIT);
1129 		if (a2 == NULL) {
1130 			error = ENOMEM;
1131 			break;
1132 		}
1133 		bcopy(a1, a2, sizeof(struct pf_altq));
1134 
1135 		error = pf_altq_ifnet_event_add(ifp, remove, ticket, a2);
1136 		if (error)
1137 			break;
1138 
1139 		TAILQ_INSERT_TAIL(V_pf_altq_ifs_inactive, a2, entries);
1140 	}
1141 	if (error)
1142 		goto out;
1143 	TAILQ_FOREACH(a1, V_pf_altqs_active, entries) {
1144 		a2 = malloc(sizeof(*a2), M_PFALTQ, M_NOWAIT);
1145 		if (a2 == NULL) {
1146 			error = ENOMEM;
1147 			break;
1148 		}
1149 		bcopy(a1, a2, sizeof(struct pf_altq));
1150 
1151 		if ((a2->qid = pf_qname2qid(a2->qname)) == 0) {
1152 			error = EBUSY;
1153 			free(a2, M_PFALTQ);
1154 			break;
1155 		}
1156 		a2->altq_disc = NULL;
1157 		TAILQ_FOREACH(a3, V_pf_altq_ifs_inactive, entries) {
1158 			if (strncmp(a3->ifname, a2->ifname,
1159 				IFNAMSIZ) == 0) {
1160 				a2->altq_disc = a3->altq_disc;
1161 				break;
1162 			}
1163 		}
1164 		error = pf_altq_ifnet_event_add(ifp, remove, ticket, a2);
1165 		if (error)
1166 			break;
1167 
1168 		TAILQ_INSERT_TAIL(V_pf_altqs_inactive, a2, entries);
1169 	}
1170 
1171 out:
1172 	if (error != 0)
1173 		pf_rollback_altq(ticket);
1174 	else
1175 		pf_commit_altq(ticket);
1176 }
1177 #endif /* ALTQ */
1178 
1179 static struct pf_krule_global *
pf_rule_tree_alloc(int flags)1180 pf_rule_tree_alloc(int flags)
1181 {
1182 	struct pf_krule_global *tree;
1183 
1184 	tree = malloc(sizeof(struct pf_krule_global), M_TEMP, flags);
1185 	if (tree == NULL)
1186 		return (NULL);
1187 	RB_INIT(tree);
1188 	return (tree);
1189 }
1190 
1191 static void
pf_rule_tree_free(struct pf_krule_global * tree)1192 pf_rule_tree_free(struct pf_krule_global *tree)
1193 {
1194 
1195 	free(tree, M_TEMP);
1196 }
1197 
1198 static int
pf_begin_rules(u_int32_t * ticket,int rs_num,const char * anchor)1199 pf_begin_rules(u_int32_t *ticket, int rs_num, const char *anchor)
1200 {
1201 	struct pf_krule_global *tree;
1202 	struct pf_kruleset	*rs;
1203 	struct pf_krule		*rule;
1204 
1205 	PF_RULES_WASSERT();
1206 
1207 	if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
1208 		return (EINVAL);
1209 	tree = pf_rule_tree_alloc(M_NOWAIT);
1210 	if (tree == NULL)
1211 		return (ENOMEM);
1212 	rs = pf_find_or_create_kruleset(anchor);
1213 	if (rs == NULL) {
1214 		free(tree, M_TEMP);
1215 		return (EINVAL);
1216 	}
1217 	pf_rule_tree_free(rs->rules[rs_num].inactive.tree);
1218 	rs->rules[rs_num].inactive.tree = tree;
1219 
1220 	while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) {
1221 		pf_unlink_rule(rs->rules[rs_num].inactive.ptr, rule);
1222 		rs->rules[rs_num].inactive.rcount--;
1223 	}
1224 	*ticket = ++rs->rules[rs_num].inactive.ticket;
1225 	rs->rules[rs_num].inactive.open = 1;
1226 	return (0);
1227 }
1228 
1229 static int
pf_rollback_rules(u_int32_t ticket,int rs_num,char * anchor)1230 pf_rollback_rules(u_int32_t ticket, int rs_num, char *anchor)
1231 {
1232 	struct pf_kruleset	*rs;
1233 	struct pf_krule		*rule;
1234 
1235 	PF_RULES_WASSERT();
1236 
1237 	if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
1238 		return (EINVAL);
1239 	rs = pf_find_kruleset(anchor);
1240 	if (rs == NULL || !rs->rules[rs_num].inactive.open ||
1241 	    rs->rules[rs_num].inactive.ticket != ticket)
1242 		return (0);
1243 	while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) {
1244 		pf_unlink_rule(rs->rules[rs_num].inactive.ptr, rule);
1245 		rs->rules[rs_num].inactive.rcount--;
1246 	}
1247 	rs->rules[rs_num].inactive.open = 0;
1248 	return (0);
1249 }
1250 
1251 #define PF_MD5_UPD(st, elm)						\
1252 		MD5Update(ctx, (u_int8_t *) &(st)->elm, sizeof((st)->elm))
1253 
1254 #define PF_MD5_UPD_STR(st, elm)						\
1255 		MD5Update(ctx, (u_int8_t *) (st)->elm, strlen((st)->elm))
1256 
1257 #define PF_MD5_UPD_HTONL(st, elm, stor) do {				\
1258 		(stor) = htonl((st)->elm);				\
1259 		MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int32_t));\
1260 } while (0)
1261 
1262 #define PF_MD5_UPD_HTONS(st, elm, stor) do {				\
1263 		(stor) = htons((st)->elm);				\
1264 		MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int16_t));\
1265 } while (0)
1266 
1267 static void
pf_hash_rule_addr(MD5_CTX * ctx,struct pf_rule_addr * pfr)1268 pf_hash_rule_addr(MD5_CTX *ctx, struct pf_rule_addr *pfr)
1269 {
1270 	PF_MD5_UPD(pfr, addr.type);
1271 	switch (pfr->addr.type) {
1272 		case PF_ADDR_DYNIFTL:
1273 			PF_MD5_UPD(pfr, addr.v.ifname);
1274 			PF_MD5_UPD(pfr, addr.iflags);
1275 			break;
1276 		case PF_ADDR_TABLE:
1277 			if (strncmp(pfr->addr.v.tblname, PF_OPTIMIZER_TABLE_PFX,
1278 			    strlen(PF_OPTIMIZER_TABLE_PFX)))
1279 				PF_MD5_UPD(pfr, addr.v.tblname);
1280 			break;
1281 		case PF_ADDR_ADDRMASK:
1282 			/* XXX ignore af? */
1283 			PF_MD5_UPD(pfr, addr.v.a.addr.addr32);
1284 			PF_MD5_UPD(pfr, addr.v.a.mask.addr32);
1285 			break;
1286 	}
1287 
1288 	PF_MD5_UPD(pfr, port[0]);
1289 	PF_MD5_UPD(pfr, port[1]);
1290 	PF_MD5_UPD(pfr, neg);
1291 	PF_MD5_UPD(pfr, port_op);
1292 }
1293 
1294 static void
pf_hash_rule_rolling(MD5_CTX * ctx,struct pf_krule * rule)1295 pf_hash_rule_rolling(MD5_CTX *ctx, struct pf_krule *rule)
1296 {
1297 	u_int16_t x;
1298 	u_int32_t y;
1299 
1300 	pf_hash_rule_addr(ctx, &rule->src);
1301 	pf_hash_rule_addr(ctx, &rule->dst);
1302 	for (int i = 0; i < PF_RULE_MAX_LABEL_COUNT; i++)
1303 		PF_MD5_UPD_STR(rule, label[i]);
1304 	PF_MD5_UPD_STR(rule, ifname);
1305 	PF_MD5_UPD_STR(rule, rcv_ifname);
1306 	PF_MD5_UPD_STR(rule, match_tagname);
1307 	PF_MD5_UPD_HTONS(rule, match_tag, x); /* dup? */
1308 	PF_MD5_UPD_HTONL(rule, os_fingerprint, y);
1309 	PF_MD5_UPD_HTONL(rule, prob, y);
1310 	PF_MD5_UPD_HTONL(rule, uid.uid[0], y);
1311 	PF_MD5_UPD_HTONL(rule, uid.uid[1], y);
1312 	PF_MD5_UPD(rule, uid.op);
1313 	PF_MD5_UPD_HTONL(rule, gid.gid[0], y);
1314 	PF_MD5_UPD_HTONL(rule, gid.gid[1], y);
1315 	PF_MD5_UPD(rule, gid.op);
1316 	PF_MD5_UPD_HTONL(rule, rule_flag, y);
1317 	PF_MD5_UPD(rule, action);
1318 	PF_MD5_UPD(rule, direction);
1319 	PF_MD5_UPD(rule, af);
1320 	PF_MD5_UPD(rule, quick);
1321 	PF_MD5_UPD(rule, ifnot);
1322 	PF_MD5_UPD(rule, rcvifnot);
1323 	PF_MD5_UPD(rule, match_tag_not);
1324 	PF_MD5_UPD(rule, natpass);
1325 	PF_MD5_UPD(rule, keep_state);
1326 	PF_MD5_UPD(rule, proto);
1327 	PF_MD5_UPD(rule, type);
1328 	PF_MD5_UPD(rule, code);
1329 	PF_MD5_UPD(rule, flags);
1330 	PF_MD5_UPD(rule, flagset);
1331 	PF_MD5_UPD(rule, allow_opts);
1332 	PF_MD5_UPD(rule, rt);
1333 	PF_MD5_UPD(rule, tos);
1334 	PF_MD5_UPD(rule, scrub_flags);
1335 	PF_MD5_UPD(rule, min_ttl);
1336 	PF_MD5_UPD(rule, set_tos);
1337 	if (rule->anchor != NULL)
1338 		PF_MD5_UPD_STR(rule, anchor->path);
1339 }
1340 
1341 static void
pf_hash_rule(struct pf_krule * rule)1342 pf_hash_rule(struct pf_krule *rule)
1343 {
1344 	MD5_CTX		ctx;
1345 
1346 	MD5Init(&ctx);
1347 	pf_hash_rule_rolling(&ctx, rule);
1348 	MD5Final(rule->md5sum, &ctx);
1349 }
1350 
1351 static int
pf_krule_compare(struct pf_krule * a,struct pf_krule * b)1352 pf_krule_compare(struct pf_krule *a, struct pf_krule *b)
1353 {
1354 
1355 	return (memcmp(a->md5sum, b->md5sum, PF_MD5_DIGEST_LENGTH));
1356 }
1357 
1358 static int
pf_commit_rules(u_int32_t ticket,int rs_num,char * anchor)1359 pf_commit_rules(u_int32_t ticket, int rs_num, char *anchor)
1360 {
1361 	struct pf_kruleset	*rs;
1362 	struct pf_krule		*rule, *old_rule;
1363 	struct pf_krulequeue	*old_rules;
1364 	struct pf_krule_global  *old_tree;
1365 	int			 error;
1366 	u_int32_t		 old_rcount;
1367 
1368 	PF_RULES_WASSERT();
1369 
1370 	if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
1371 		return (EINVAL);
1372 	rs = pf_find_kruleset(anchor);
1373 	if (rs == NULL || !rs->rules[rs_num].inactive.open ||
1374 	    ticket != rs->rules[rs_num].inactive.ticket)
1375 		return (EBUSY);
1376 
1377 	/* Calculate checksum for the main ruleset */
1378 	if (rs == &pf_main_ruleset) {
1379 		error = pf_setup_pfsync_matching(rs);
1380 		if (error != 0)
1381 			return (error);
1382 	}
1383 
1384 	/* Swap rules, keep the old. */
1385 	old_rules = rs->rules[rs_num].active.ptr;
1386 	old_rcount = rs->rules[rs_num].active.rcount;
1387 	old_tree = rs->rules[rs_num].active.tree;
1388 
1389 	rs->rules[rs_num].active.ptr =
1390 	    rs->rules[rs_num].inactive.ptr;
1391 	rs->rules[rs_num].active.tree =
1392 	    rs->rules[rs_num].inactive.tree;
1393 	rs->rules[rs_num].active.rcount =
1394 	    rs->rules[rs_num].inactive.rcount;
1395 
1396 	/* Attempt to preserve counter information. */
1397 	if (V_pf_status.keep_counters && old_tree != NULL) {
1398 		TAILQ_FOREACH(rule, rs->rules[rs_num].active.ptr,
1399 		    entries) {
1400 			old_rule = RB_FIND(pf_krule_global, old_tree, rule);
1401 			if (old_rule == NULL) {
1402 				continue;
1403 			}
1404 			pf_counter_u64_critical_enter();
1405 			pf_counter_u64_rollup_protected(&rule->evaluations,
1406 			    pf_counter_u64_fetch(&old_rule->evaluations));
1407 			pf_counter_u64_rollup_protected(&rule->packets[0],
1408 			    pf_counter_u64_fetch(&old_rule->packets[0]));
1409 			pf_counter_u64_rollup_protected(&rule->packets[1],
1410 			    pf_counter_u64_fetch(&old_rule->packets[1]));
1411 			pf_counter_u64_rollup_protected(&rule->bytes[0],
1412 			    pf_counter_u64_fetch(&old_rule->bytes[0]));
1413 			pf_counter_u64_rollup_protected(&rule->bytes[1],
1414 			    pf_counter_u64_fetch(&old_rule->bytes[1]));
1415 			pf_counter_u64_critical_exit();
1416 		}
1417 	}
1418 
1419 	rs->rules[rs_num].inactive.ptr = old_rules;
1420 	rs->rules[rs_num].inactive.tree = NULL; /* important for pf_ioctl_addrule */
1421 	rs->rules[rs_num].inactive.rcount = old_rcount;
1422 
1423 	rs->rules[rs_num].active.ticket =
1424 	    rs->rules[rs_num].inactive.ticket;
1425 	pf_calc_skip_steps(rs->rules[rs_num].active.ptr);
1426 
1427 	/* Purge the old rule list. */
1428 	PF_UNLNKDRULES_LOCK();
1429 	while ((rule = TAILQ_FIRST(old_rules)) != NULL)
1430 		pf_unlink_rule_locked(old_rules, rule);
1431 	PF_UNLNKDRULES_UNLOCK();
1432 	rs->rules[rs_num].inactive.rcount = 0;
1433 	rs->rules[rs_num].inactive.open = 0;
1434 	pf_remove_if_empty_kruleset(rs);
1435 	free(old_tree, M_TEMP);
1436 
1437 	return (0);
1438 }
1439 
1440 static int
pf_setup_pfsync_matching(struct pf_kruleset * rs)1441 pf_setup_pfsync_matching(struct pf_kruleset *rs)
1442 {
1443 	MD5_CTX			 ctx;
1444 	struct pf_krule		*rule;
1445 	int			 rs_cnt;
1446 	u_int8_t		 digest[PF_MD5_DIGEST_LENGTH];
1447 
1448 	MD5Init(&ctx);
1449 	for (rs_cnt = 0; rs_cnt < PF_RULESET_MAX; rs_cnt++) {
1450 		/* XXX PF_RULESET_SCRUB as well? */
1451 		if (rs_cnt == PF_RULESET_SCRUB)
1452 			continue;
1453 
1454 		if (rs->rules[rs_cnt].inactive.rcount) {
1455 			TAILQ_FOREACH(rule, rs->rules[rs_cnt].inactive.ptr,
1456 			    entries) {
1457 				pf_hash_rule_rolling(&ctx, rule);
1458 			}
1459 		}
1460 	}
1461 
1462 	MD5Final(digest, &ctx);
1463 	memcpy(V_pf_status.pf_chksum, digest, sizeof(V_pf_status.pf_chksum));
1464 	return (0);
1465 }
1466 
1467 static int
pf_eth_addr_setup(struct pf_keth_ruleset * ruleset,struct pf_addr_wrap * addr)1468 pf_eth_addr_setup(struct pf_keth_ruleset *ruleset, struct pf_addr_wrap *addr)
1469 {
1470 	int error = 0;
1471 
1472 	switch (addr->type) {
1473 	case PF_ADDR_TABLE:
1474 		addr->p.tbl = pfr_eth_attach_table(ruleset, addr->v.tblname);
1475 		if (addr->p.tbl == NULL)
1476 			error = ENOMEM;
1477 		break;
1478 	default:
1479 		error = EINVAL;
1480 	}
1481 
1482 	return (error);
1483 }
1484 
1485 static int
pf_addr_setup(struct pf_kruleset * ruleset,struct pf_addr_wrap * addr,sa_family_t af)1486 pf_addr_setup(struct pf_kruleset *ruleset, struct pf_addr_wrap *addr,
1487     sa_family_t af)
1488 {
1489 	int error = 0;
1490 
1491 	switch (addr->type) {
1492 	case PF_ADDR_TABLE:
1493 		addr->p.tbl = pfr_attach_table(ruleset, addr->v.tblname);
1494 		if (addr->p.tbl == NULL)
1495 			error = ENOMEM;
1496 		break;
1497 	case PF_ADDR_DYNIFTL:
1498 		error = pfi_dynaddr_setup(addr, af);
1499 		break;
1500 	}
1501 
1502 	return (error);
1503 }
1504 
1505 void
pf_addr_copyout(struct pf_addr_wrap * addr)1506 pf_addr_copyout(struct pf_addr_wrap *addr)
1507 {
1508 
1509 	switch (addr->type) {
1510 	case PF_ADDR_DYNIFTL:
1511 		pfi_dynaddr_copyout(addr);
1512 		break;
1513 	case PF_ADDR_TABLE:
1514 		pf_tbladdr_copyout(addr);
1515 		break;
1516 	}
1517 }
1518 
1519 static void
pf_src_node_copy(const struct pf_ksrc_node * in,struct pf_src_node * out)1520 pf_src_node_copy(const struct pf_ksrc_node *in, struct pf_src_node *out)
1521 {
1522 	int	secs = time_uptime;
1523 
1524 	bzero(out, sizeof(struct pf_src_node));
1525 
1526 	bcopy(&in->addr, &out->addr, sizeof(struct pf_addr));
1527 	bcopy(&in->raddr, &out->raddr, sizeof(struct pf_addr));
1528 
1529 	if (in->rule != NULL)
1530 		out->rule.nr = in->rule->nr;
1531 
1532 	for (int i = 0; i < 2; i++) {
1533 		out->bytes[i] = counter_u64_fetch(in->bytes[i]);
1534 		out->packets[i] = counter_u64_fetch(in->packets[i]);
1535 	}
1536 
1537 	out->states = in->states;
1538 	out->conn = in->conn;
1539 	out->af = in->af;
1540 	out->ruletype = in->ruletype;
1541 
1542 	out->creation = secs - in->creation;
1543 	if (out->expire > secs)
1544 		out->expire -= secs;
1545 	else
1546 		out->expire = 0;
1547 
1548 	/* Adjust the connection rate estimate. */
1549 	out->conn_rate.limit = in->conn_rate.limit;
1550 	out->conn_rate.seconds = in->conn_rate.seconds;
1551 	/* If there's no limit there's no counter_rate. */
1552 	if (in->conn_rate.cr != NULL)
1553 		out->conn_rate.count = counter_rate_get(in->conn_rate.cr);
1554 }
1555 
1556 #ifdef ALTQ
1557 /*
1558  * Handle export of struct pf_kaltq to user binaries that may be using any
1559  * version of struct pf_altq.
1560  */
1561 static int
pf_export_kaltq(struct pf_altq * q,struct pfioc_altq_v1 * pa,size_t ioc_size)1562 pf_export_kaltq(struct pf_altq *q, struct pfioc_altq_v1 *pa, size_t ioc_size)
1563 {
1564 	u_int32_t version;
1565 
1566 	if (ioc_size == sizeof(struct pfioc_altq_v0))
1567 		version = 0;
1568 	else
1569 		version = pa->version;
1570 
1571 	if (version > PFIOC_ALTQ_VERSION)
1572 		return (EINVAL);
1573 
1574 #define ASSIGN(x) exported_q->x = q->x
1575 #define COPY(x) \
1576 	bcopy(&q->x, &exported_q->x, min(sizeof(q->x), sizeof(exported_q->x)))
1577 #define SATU16(x) (u_int32_t)uqmin((x), USHRT_MAX)
1578 #define SATU32(x) (u_int32_t)uqmin((x), UINT_MAX)
1579 
1580 	switch (version) {
1581 	case 0: {
1582 		struct pf_altq_v0 *exported_q =
1583 		    &((struct pfioc_altq_v0 *)pa)->altq;
1584 
1585 		COPY(ifname);
1586 
1587 		ASSIGN(scheduler);
1588 		ASSIGN(tbrsize);
1589 		exported_q->tbrsize = SATU16(q->tbrsize);
1590 		exported_q->ifbandwidth = SATU32(q->ifbandwidth);
1591 
1592 		COPY(qname);
1593 		COPY(parent);
1594 		ASSIGN(parent_qid);
1595 		exported_q->bandwidth = SATU32(q->bandwidth);
1596 		ASSIGN(priority);
1597 		ASSIGN(local_flags);
1598 
1599 		ASSIGN(qlimit);
1600 		ASSIGN(flags);
1601 
1602 		if (q->scheduler == ALTQT_HFSC) {
1603 #define ASSIGN_OPT(x) exported_q->pq_u.hfsc_opts.x = q->pq_u.hfsc_opts.x
1604 #define ASSIGN_OPT_SATU32(x) exported_q->pq_u.hfsc_opts.x = \
1605 			    SATU32(q->pq_u.hfsc_opts.x)
1606 
1607 			ASSIGN_OPT_SATU32(rtsc_m1);
1608 			ASSIGN_OPT(rtsc_d);
1609 			ASSIGN_OPT_SATU32(rtsc_m2);
1610 
1611 			ASSIGN_OPT_SATU32(lssc_m1);
1612 			ASSIGN_OPT(lssc_d);
1613 			ASSIGN_OPT_SATU32(lssc_m2);
1614 
1615 			ASSIGN_OPT_SATU32(ulsc_m1);
1616 			ASSIGN_OPT(ulsc_d);
1617 			ASSIGN_OPT_SATU32(ulsc_m2);
1618 
1619 			ASSIGN_OPT(flags);
1620 
1621 #undef ASSIGN_OPT
1622 #undef ASSIGN_OPT_SATU32
1623 		} else
1624 			COPY(pq_u);
1625 
1626 		ASSIGN(qid);
1627 		break;
1628 	}
1629 	case 1:	{
1630 		struct pf_altq_v1 *exported_q =
1631 		    &((struct pfioc_altq_v1 *)pa)->altq;
1632 
1633 		COPY(ifname);
1634 
1635 		ASSIGN(scheduler);
1636 		ASSIGN(tbrsize);
1637 		ASSIGN(ifbandwidth);
1638 
1639 		COPY(qname);
1640 		COPY(parent);
1641 		ASSIGN(parent_qid);
1642 		ASSIGN(bandwidth);
1643 		ASSIGN(priority);
1644 		ASSIGN(local_flags);
1645 
1646 		ASSIGN(qlimit);
1647 		ASSIGN(flags);
1648 		COPY(pq_u);
1649 
1650 		ASSIGN(qid);
1651 		break;
1652 	}
1653 	default:
1654 		panic("%s: unhandled struct pfioc_altq version", __func__);
1655 		break;
1656 	}
1657 
1658 #undef ASSIGN
1659 #undef COPY
1660 #undef SATU16
1661 #undef SATU32
1662 
1663 	return (0);
1664 }
1665 
1666 /*
1667  * Handle import to struct pf_kaltq of struct pf_altq from user binaries
1668  * that may be using any version of it.
1669  */
1670 static int
pf_import_kaltq(struct pfioc_altq_v1 * pa,struct pf_altq * q,size_t ioc_size)1671 pf_import_kaltq(struct pfioc_altq_v1 *pa, struct pf_altq *q, size_t ioc_size)
1672 {
1673 	u_int32_t version;
1674 
1675 	if (ioc_size == sizeof(struct pfioc_altq_v0))
1676 		version = 0;
1677 	else
1678 		version = pa->version;
1679 
1680 	if (version > PFIOC_ALTQ_VERSION)
1681 		return (EINVAL);
1682 
1683 #define ASSIGN(x) q->x = imported_q->x
1684 #define COPY(x) \
1685 	bcopy(&imported_q->x, &q->x, min(sizeof(imported_q->x), sizeof(q->x)))
1686 
1687 	switch (version) {
1688 	case 0: {
1689 		struct pf_altq_v0 *imported_q =
1690 		    &((struct pfioc_altq_v0 *)pa)->altq;
1691 
1692 		COPY(ifname);
1693 
1694 		ASSIGN(scheduler);
1695 		ASSIGN(tbrsize); /* 16-bit -> 32-bit */
1696 		ASSIGN(ifbandwidth); /* 32-bit -> 64-bit */
1697 
1698 		COPY(qname);
1699 		COPY(parent);
1700 		ASSIGN(parent_qid);
1701 		ASSIGN(bandwidth); /* 32-bit -> 64-bit */
1702 		ASSIGN(priority);
1703 		ASSIGN(local_flags);
1704 
1705 		ASSIGN(qlimit);
1706 		ASSIGN(flags);
1707 
1708 		if (imported_q->scheduler == ALTQT_HFSC) {
1709 #define ASSIGN_OPT(x) q->pq_u.hfsc_opts.x = imported_q->pq_u.hfsc_opts.x
1710 
1711 			/*
1712 			 * The m1 and m2 parameters are being copied from
1713 			 * 32-bit to 64-bit.
1714 			 */
1715 			ASSIGN_OPT(rtsc_m1);
1716 			ASSIGN_OPT(rtsc_d);
1717 			ASSIGN_OPT(rtsc_m2);
1718 
1719 			ASSIGN_OPT(lssc_m1);
1720 			ASSIGN_OPT(lssc_d);
1721 			ASSIGN_OPT(lssc_m2);
1722 
1723 			ASSIGN_OPT(ulsc_m1);
1724 			ASSIGN_OPT(ulsc_d);
1725 			ASSIGN_OPT(ulsc_m2);
1726 
1727 			ASSIGN_OPT(flags);
1728 
1729 #undef ASSIGN_OPT
1730 		} else
1731 			COPY(pq_u);
1732 
1733 		ASSIGN(qid);
1734 		break;
1735 	}
1736 	case 1: {
1737 		struct pf_altq_v1 *imported_q =
1738 		    &((struct pfioc_altq_v1 *)pa)->altq;
1739 
1740 		COPY(ifname);
1741 
1742 		ASSIGN(scheduler);
1743 		ASSIGN(tbrsize);
1744 		ASSIGN(ifbandwidth);
1745 
1746 		COPY(qname);
1747 		COPY(parent);
1748 		ASSIGN(parent_qid);
1749 		ASSIGN(bandwidth);
1750 		ASSIGN(priority);
1751 		ASSIGN(local_flags);
1752 
1753 		ASSIGN(qlimit);
1754 		ASSIGN(flags);
1755 		COPY(pq_u);
1756 
1757 		ASSIGN(qid);
1758 		break;
1759 	}
1760 	default:
1761 		panic("%s: unhandled struct pfioc_altq version", __func__);
1762 		break;
1763 	}
1764 
1765 #undef ASSIGN
1766 #undef COPY
1767 
1768 	return (0);
1769 }
1770 
1771 static struct pf_altq *
pf_altq_get_nth_active(u_int32_t n)1772 pf_altq_get_nth_active(u_int32_t n)
1773 {
1774 	struct pf_altq		*altq;
1775 	u_int32_t		 nr;
1776 
1777 	nr = 0;
1778 	TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) {
1779 		if (nr == n)
1780 			return (altq);
1781 		nr++;
1782 	}
1783 
1784 	TAILQ_FOREACH(altq, V_pf_altqs_active, entries) {
1785 		if (nr == n)
1786 			return (altq);
1787 		nr++;
1788 	}
1789 
1790 	return (NULL);
1791 }
1792 #endif /* ALTQ */
1793 
1794 struct pf_krule *
pf_krule_alloc(void)1795 pf_krule_alloc(void)
1796 {
1797 	struct pf_krule *rule;
1798 
1799 	rule = malloc(sizeof(struct pf_krule), M_PFRULE, M_WAITOK | M_ZERO);
1800 	mtx_init(&rule->nat.mtx, "pf_krule_nat_pool", NULL, MTX_DEF);
1801 	mtx_init(&rule->rdr.mtx, "pf_krule_rdr_pool", NULL, MTX_DEF);
1802 	mtx_init(&rule->route.mtx, "pf_krule_route_pool", NULL, MTX_DEF);
1803 	rule->timestamp = uma_zalloc_pcpu(pf_timestamp_pcpu_zone,
1804 	    M_WAITOK | M_ZERO);
1805 	return (rule);
1806 }
1807 
1808 void
pf_krule_free(struct pf_krule * rule)1809 pf_krule_free(struct pf_krule *rule)
1810 {
1811 #ifdef PF_WANT_32_TO_64_COUNTER
1812 	bool wowned;
1813 #endif
1814 
1815 	if (rule == NULL)
1816 		return;
1817 
1818 #ifdef PF_WANT_32_TO_64_COUNTER
1819 	if (rule->allrulelinked) {
1820 		wowned = PF_RULES_WOWNED();
1821 		if (!wowned)
1822 			PF_RULES_WLOCK();
1823 		LIST_REMOVE(rule, allrulelist);
1824 		V_pf_allrulecount--;
1825 		if (!wowned)
1826 			PF_RULES_WUNLOCK();
1827 	}
1828 #endif
1829 
1830 	pf_counter_u64_deinit(&rule->evaluations);
1831 	for (int i = 0; i < 2; i++) {
1832 		pf_counter_u64_deinit(&rule->packets[i]);
1833 		pf_counter_u64_deinit(&rule->bytes[i]);
1834 	}
1835 	counter_u64_free(rule->states_cur);
1836 	counter_u64_free(rule->states_tot);
1837 	for (pf_sn_types_t sn_type=0; sn_type<PF_SN_MAX; sn_type++)
1838 		counter_u64_free(rule->src_nodes[sn_type]);
1839 	uma_zfree_pcpu(pf_timestamp_pcpu_zone, rule->timestamp);
1840 
1841 	mtx_destroy(&rule->nat.mtx);
1842 	mtx_destroy(&rule->rdr.mtx);
1843 	mtx_destroy(&rule->route.mtx);
1844 	free(rule, M_PFRULE);
1845 }
1846 
1847 void
pf_krule_clear_counters(struct pf_krule * rule)1848 pf_krule_clear_counters(struct pf_krule *rule)
1849 {
1850 	pf_counter_u64_zero(&rule->evaluations);
1851 	for (int i = 0; i < 2; i++) {
1852 		pf_counter_u64_zero(&rule->packets[i]);
1853 		pf_counter_u64_zero(&rule->bytes[i]);
1854 	}
1855 	counter_u64_zero(rule->states_tot);
1856 }
1857 
1858 static void
pf_kpooladdr_to_pooladdr(const struct pf_kpooladdr * kpool,struct pf_pooladdr * pool)1859 pf_kpooladdr_to_pooladdr(const struct pf_kpooladdr *kpool,
1860     struct pf_pooladdr *pool)
1861 {
1862 
1863 	bzero(pool, sizeof(*pool));
1864 	bcopy(&kpool->addr, &pool->addr, sizeof(pool->addr));
1865 	strlcpy(pool->ifname, kpool->ifname, sizeof(pool->ifname));
1866 }
1867 
1868 static int
pf_pooladdr_to_kpooladdr(const struct pf_pooladdr * pool,struct pf_kpooladdr * kpool)1869 pf_pooladdr_to_kpooladdr(const struct pf_pooladdr *pool,
1870     struct pf_kpooladdr *kpool)
1871 {
1872 	int ret;
1873 
1874 	bzero(kpool, sizeof(*kpool));
1875 	bcopy(&pool->addr, &kpool->addr, sizeof(kpool->addr));
1876 	ret = pf_user_strcpy(kpool->ifname, pool->ifname,
1877 	    sizeof(kpool->ifname));
1878 	return (ret);
1879 }
1880 
1881 static void
pf_pool_to_kpool(const struct pf_pool * pool,struct pf_kpool * kpool)1882 pf_pool_to_kpool(const struct pf_pool *pool, struct pf_kpool *kpool)
1883 {
1884 	_Static_assert(sizeof(pool->key) == sizeof(kpool->key), "");
1885 	_Static_assert(sizeof(pool->counter) == sizeof(kpool->counter), "");
1886 
1887 	bcopy(&pool->key, &kpool->key, sizeof(kpool->key));
1888 	bcopy(&pool->counter, &kpool->counter, sizeof(kpool->counter));
1889 
1890 	kpool->tblidx = pool->tblidx;
1891 	kpool->proxy_port[0] = pool->proxy_port[0];
1892 	kpool->proxy_port[1] = pool->proxy_port[1];
1893 	kpool->opts = pool->opts;
1894 }
1895 
1896 static int
pf_rule_to_krule(const struct pf_rule * rule,struct pf_krule * krule)1897 pf_rule_to_krule(const struct pf_rule *rule, struct pf_krule *krule)
1898 {
1899 	int ret;
1900 
1901 #ifndef INET
1902 	if (rule->af == AF_INET) {
1903 		return (EAFNOSUPPORT);
1904 	}
1905 #endif /* INET */
1906 #ifndef INET6
1907 	if (rule->af == AF_INET6) {
1908 		return (EAFNOSUPPORT);
1909 	}
1910 #endif /* INET6 */
1911 
1912 	ret = pf_check_rule_addr(&rule->src);
1913 	if (ret != 0)
1914 		return (ret);
1915 	ret = pf_check_rule_addr(&rule->dst);
1916 	if (ret != 0)
1917 		return (ret);
1918 
1919 	bcopy(&rule->src, &krule->src, sizeof(rule->src));
1920 	bcopy(&rule->dst, &krule->dst, sizeof(rule->dst));
1921 
1922 	ret = pf_user_strcpy(krule->label[0], rule->label, sizeof(rule->label));
1923 	if (ret != 0)
1924 		return (ret);
1925 	ret = pf_user_strcpy(krule->ifname, rule->ifname, sizeof(rule->ifname));
1926 	if (ret != 0)
1927 		return (ret);
1928 	ret = pf_user_strcpy(krule->qname, rule->qname, sizeof(rule->qname));
1929 	if (ret != 0)
1930 		return (ret);
1931 	ret = pf_user_strcpy(krule->pqname, rule->pqname, sizeof(rule->pqname));
1932 	if (ret != 0)
1933 		return (ret);
1934 	ret = pf_user_strcpy(krule->tagname, rule->tagname,
1935 	    sizeof(rule->tagname));
1936 	if (ret != 0)
1937 		return (ret);
1938 	ret = pf_user_strcpy(krule->match_tagname, rule->match_tagname,
1939 	    sizeof(rule->match_tagname));
1940 	if (ret != 0)
1941 		return (ret);
1942 	ret = pf_user_strcpy(krule->overload_tblname, rule->overload_tblname,
1943 	    sizeof(rule->overload_tblname));
1944 	if (ret != 0)
1945 		return (ret);
1946 
1947 	pf_pool_to_kpool(&rule->rpool, &krule->rdr);
1948 
1949 	/* Don't allow userspace to set evaluations, packets or bytes. */
1950 	/* kif, anchor, overload_tbl are not copied over. */
1951 
1952 	krule->os_fingerprint = rule->os_fingerprint;
1953 
1954 	krule->rtableid = rule->rtableid;
1955 	/* pf_rule->timeout is smaller than pf_krule->timeout */
1956 	bcopy(rule->timeout, krule->timeout, sizeof(rule->timeout));
1957 	krule->max_states = rule->max_states;
1958 	krule->max_src_nodes = rule->max_src_nodes;
1959 	krule->max_src_states = rule->max_src_states;
1960 	krule->max_src_conn = rule->max_src_conn;
1961 	krule->max_src_conn_rate.limit = rule->max_src_conn_rate.limit;
1962 	krule->max_src_conn_rate.seconds = rule->max_src_conn_rate.seconds;
1963 	krule->qid = rule->qid;
1964 	krule->pqid = rule->pqid;
1965 	krule->nr = rule->nr;
1966 	krule->prob = rule->prob;
1967 	krule->cuid = rule->cuid;
1968 	krule->cpid = rule->cpid;
1969 
1970 	krule->return_icmp = rule->return_icmp;
1971 	krule->return_icmp6 = rule->return_icmp6;
1972 	krule->max_mss = rule->max_mss;
1973 	krule->tag = rule->tag;
1974 	krule->match_tag = rule->match_tag;
1975 	krule->scrub_flags = rule->scrub_flags;
1976 
1977 	bcopy(&rule->uid, &krule->uid, sizeof(krule->uid));
1978 	bcopy(&rule->gid, &krule->gid, sizeof(krule->gid));
1979 
1980 	krule->rule_flag = rule->rule_flag;
1981 	krule->action = rule->action;
1982 	krule->direction = rule->direction;
1983 	krule->log = rule->log;
1984 	krule->logif = rule->logif;
1985 	krule->quick = rule->quick;
1986 	krule->ifnot = rule->ifnot;
1987 	krule->match_tag_not = rule->match_tag_not;
1988 	krule->natpass = rule->natpass;
1989 
1990 	krule->keep_state = rule->keep_state;
1991 	krule->af = rule->af;
1992 	krule->proto = rule->proto;
1993 	krule->type = rule->type;
1994 	krule->code = rule->code;
1995 	krule->flags = rule->flags;
1996 	krule->flagset = rule->flagset;
1997 	krule->min_ttl = rule->min_ttl;
1998 	krule->allow_opts = rule->allow_opts;
1999 	krule->rt = rule->rt;
2000 	krule->return_ttl = rule->return_ttl;
2001 	krule->tos = rule->tos;
2002 	krule->set_tos = rule->set_tos;
2003 
2004 	krule->flush = rule->flush;
2005 	krule->prio = rule->prio;
2006 	krule->set_prio[0] = rule->set_prio[0];
2007 	krule->set_prio[1] = rule->set_prio[1];
2008 
2009 	bcopy(&rule->divert, &krule->divert, sizeof(krule->divert));
2010 
2011 	return (0);
2012 }
2013 
2014 int
pf_ioctl_getrules(struct pfioc_rule * pr)2015 pf_ioctl_getrules(struct pfioc_rule *pr)
2016 {
2017 	struct pf_kruleset	*ruleset;
2018 	struct pf_krule		*tail;
2019 	int			 rs_num;
2020 
2021 	PF_RULES_WLOCK();
2022 	ruleset = pf_find_kruleset(pr->anchor);
2023 	if (ruleset == NULL) {
2024 		PF_RULES_WUNLOCK();
2025 		return (EINVAL);
2026 	}
2027 	rs_num = pf_get_ruleset_number(pr->rule.action);
2028 	if (rs_num >= PF_RULESET_MAX) {
2029 		PF_RULES_WUNLOCK();
2030 		return (EINVAL);
2031 	}
2032 	tail = TAILQ_LAST(ruleset->rules[rs_num].active.ptr,
2033 	    pf_krulequeue);
2034 	if (tail)
2035 		pr->nr = tail->nr + 1;
2036 	else
2037 		pr->nr = 0;
2038 	pr->ticket = ruleset->rules[rs_num].active.ticket;
2039 	PF_RULES_WUNLOCK();
2040 
2041 	return (0);
2042 }
2043 
2044 static int
pf_rule_checkaf(struct pf_krule * r)2045 pf_rule_checkaf(struct pf_krule *r)
2046 {
2047 	switch (r->af) {
2048 	case 0:
2049 		if (r->rule_flag & PFRULE_AFTO)
2050 			return (EPFNOSUPPORT);
2051 		break;
2052 	case AF_INET:
2053 		if ((r->rule_flag & PFRULE_AFTO) && r->naf != AF_INET6)
2054 			return (EPFNOSUPPORT);
2055 		break;
2056 #ifdef INET6
2057 	case AF_INET6:
2058 		if ((r->rule_flag & PFRULE_AFTO) && r->naf != AF_INET)
2059 			return (EPFNOSUPPORT);
2060 		break;
2061 #endif /* INET6 */
2062 	default:
2063 		return (EPFNOSUPPORT);
2064 	}
2065 
2066 	if ((r->rule_flag & PFRULE_AFTO) == 0 && r->naf != 0)
2067 		return (EPFNOSUPPORT);
2068 
2069 	return (0);
2070 }
2071 
2072 static int
pf_validate_range(uint8_t op,uint16_t port[2])2073 pf_validate_range(uint8_t op, uint16_t port[2])
2074 {
2075 	uint16_t a = ntohs(port[0]);
2076 	uint16_t b = ntohs(port[1]);
2077 
2078 	if ((op == PF_OP_RRG && a > b) ||  /* 34:12,  i.e. none */
2079 	    (op == PF_OP_IRG && a >= b) || /* 34><12, i.e. none */
2080 	    (op == PF_OP_XRG && a > b))	   /* 34<>22, i.e. all */
2081 		return 1;
2082 	return 0;
2083 }
2084 
2085 int
pf_ioctl_addrule(struct pf_krule * rule,uint32_t ticket,uint32_t pool_ticket,const char * anchor,const char * anchor_call,uid_t uid,pid_t pid)2086 pf_ioctl_addrule(struct pf_krule *rule, uint32_t ticket,
2087     uint32_t pool_ticket, const char *anchor, const char *anchor_call,
2088     uid_t uid, pid_t pid)
2089 {
2090 	struct pf_kruleset	*ruleset;
2091 	struct pf_krule		*tail;
2092 	struct pf_kpooladdr	*pa;
2093 	struct pfi_kkif		*kif = NULL, *rcv_kif = NULL;
2094 	int			 rs_num;
2095 	int			 error = 0;
2096 
2097 #define	ERROUT(x)		ERROUT_FUNCTION(errout, x)
2098 #define	ERROUT_UNLOCKED(x)	ERROUT_FUNCTION(errout_unlocked, x)
2099 
2100 	if ((rule->return_icmp >> 8) > ICMP_MAXTYPE)
2101 		ERROUT_UNLOCKED(EINVAL);
2102 
2103 	if ((error = pf_rule_checkaf(rule)))
2104 		ERROUT_UNLOCKED(error);
2105 	if (pf_validate_range(rule->src.port_op, rule->src.port))
2106 		ERROUT_UNLOCKED(EINVAL);
2107 	if (pf_validate_range(rule->dst.port_op, rule->dst.port))
2108 		ERROUT_UNLOCKED(EINVAL);
2109 
2110 	if (rule->ifname[0])
2111 		kif = pf_kkif_create(M_WAITOK);
2112 	if (rule->rcv_ifname[0])
2113 		rcv_kif = pf_kkif_create(M_WAITOK);
2114 	pf_counter_u64_init(&rule->evaluations, M_WAITOK);
2115 	for (int i = 0; i < 2; i++) {
2116 		pf_counter_u64_init(&rule->packets[i], M_WAITOK);
2117 		pf_counter_u64_init(&rule->bytes[i], M_WAITOK);
2118 	}
2119 	rule->states_cur = counter_u64_alloc(M_WAITOK);
2120 	rule->states_tot = counter_u64_alloc(M_WAITOK);
2121 	for (pf_sn_types_t sn_type=0; sn_type<PF_SN_MAX; sn_type++)
2122 		rule->src_nodes[sn_type] = counter_u64_alloc(M_WAITOK);
2123 	rule->cuid = uid;
2124 	rule->cpid = pid;
2125 	TAILQ_INIT(&rule->rdr.list);
2126 	TAILQ_INIT(&rule->nat.list);
2127 	TAILQ_INIT(&rule->route.list);
2128 
2129 	PF_CONFIG_LOCK();
2130 	PF_RULES_WLOCK();
2131 #ifdef PF_WANT_32_TO_64_COUNTER
2132 	LIST_INSERT_HEAD(&V_pf_allrulelist, rule, allrulelist);
2133 	MPASS(!rule->allrulelinked);
2134 	rule->allrulelinked = true;
2135 	V_pf_allrulecount++;
2136 #endif
2137 	ruleset = pf_find_kruleset(anchor);
2138 	if (ruleset == NULL)
2139 		ERROUT(EINVAL);
2140 	rs_num = pf_get_ruleset_number(rule->action);
2141 	if (rs_num >= PF_RULESET_MAX)
2142 		ERROUT(EINVAL);
2143 	if (ticket != ruleset->rules[rs_num].inactive.ticket) {
2144 		DPFPRINTF(PF_DEBUG_MISC,
2145 		    "ticket: %d != [%d]%d", ticket, rs_num,
2146 		    ruleset->rules[rs_num].inactive.ticket);
2147 		ERROUT(EBUSY);
2148 	}
2149 	if (pool_ticket != V_ticket_pabuf) {
2150 		DPFPRINTF(PF_DEBUG_MISC,
2151 		    "pool_ticket: %d != %d", pool_ticket,
2152 		    V_ticket_pabuf);
2153 		ERROUT(EBUSY);
2154 	}
2155 	/*
2156 	 * XXXMJG hack: there is no mechanism to ensure they started the
2157 	 * transaction. Ticket checked above may happen to match by accident,
2158 	 * even if nobody called DIOCXBEGIN, let alone this process.
2159 	 * Partially work around it by checking if the RB tree got allocated,
2160 	 * see pf_begin_rules.
2161 	 */
2162 	if (ruleset->rules[rs_num].inactive.tree == NULL) {
2163 		ERROUT(EINVAL);
2164 	}
2165 
2166 	tail = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr,
2167 	    pf_krulequeue);
2168 	if (tail)
2169 		rule->nr = tail->nr + 1;
2170 	else
2171 		rule->nr = 0;
2172 	if (rule->ifname[0]) {
2173 		rule->kif = pfi_kkif_attach(kif, rule->ifname);
2174 		kif = NULL;
2175 		pfi_kkif_ref(rule->kif);
2176 	} else
2177 		rule->kif = NULL;
2178 
2179 	if (rule->rcv_ifname[0]) {
2180 		rule->rcv_kif = pfi_kkif_attach(rcv_kif, rule->rcv_ifname);
2181 		rcv_kif = NULL;
2182 		pfi_kkif_ref(rule->rcv_kif);
2183 	} else
2184 		rule->rcv_kif = NULL;
2185 
2186 	if (rule->rtableid > 0 && rule->rtableid >= rt_numfibs)
2187 		ERROUT(EBUSY);
2188 #ifdef ALTQ
2189 	/* set queue IDs */
2190 	if (rule->qname[0] != 0) {
2191 		if ((rule->qid = pf_qname2qid(rule->qname)) == 0)
2192 			ERROUT(EBUSY);
2193 		else if (rule->pqname[0] != 0) {
2194 			if ((rule->pqid =
2195 			    pf_qname2qid(rule->pqname)) == 0)
2196 				ERROUT(EBUSY);
2197 		} else
2198 			rule->pqid = rule->qid;
2199 	}
2200 #endif
2201 	if (rule->tagname[0])
2202 		if ((rule->tag = pf_tagname2tag(rule->tagname)) == 0)
2203 			ERROUT(EBUSY);
2204 	if (rule->match_tagname[0])
2205 		if ((rule->match_tag =
2206 		    pf_tagname2tag(rule->match_tagname)) == 0)
2207 			ERROUT(EBUSY);
2208 	if (rule->rt && !rule->direction)
2209 		ERROUT(EINVAL);
2210 	if (!rule->log)
2211 		rule->logif = 0;
2212 	if (! pf_init_threshold(&rule->pktrate, rule->pktrate.limit,
2213 	   rule->pktrate.seconds))
2214 		ERROUT(ENOMEM);
2215 	if (pf_addr_setup(ruleset, &rule->src.addr, rule->af))
2216 		ERROUT(ENOMEM);
2217 	if (pf_addr_setup(ruleset, &rule->dst.addr, rule->af))
2218 		ERROUT(ENOMEM);
2219 	if (pf_kanchor_setup(rule, ruleset, anchor_call))
2220 		ERROUT(EINVAL);
2221 	if (rule->scrub_flags & PFSTATE_SETPRIO &&
2222 	    (rule->set_prio[0] > PF_PRIO_MAX ||
2223 	    rule->set_prio[1] > PF_PRIO_MAX))
2224 		ERROUT(EINVAL);
2225 	for (int i = 0; i < 3; i++) {
2226 		TAILQ_FOREACH(pa, &V_pf_pabuf[i], entries)
2227 			if (pa->addr.type == PF_ADDR_TABLE) {
2228 				pa->addr.p.tbl = pfr_attach_table(ruleset,
2229 				    pa->addr.v.tblname);
2230 				if (pa->addr.p.tbl == NULL)
2231 					ERROUT(ENOMEM);
2232 			}
2233 	}
2234 
2235 	rule->overload_tbl = NULL;
2236 	if (rule->overload_tblname[0]) {
2237 		if ((rule->overload_tbl = pfr_attach_table(ruleset,
2238 		    rule->overload_tblname)) == NULL)
2239 			ERROUT(EINVAL);
2240 		else
2241 			rule->overload_tbl->pfrkt_flags |=
2242 			    PFR_TFLAG_ACTIVE;
2243 	}
2244 
2245 	pf_mv_kpool(&V_pf_pabuf[0], &rule->nat.list);
2246 
2247 	/*
2248 	 * Old version of pfctl provide route redirection pools in single
2249 	 * common redirection pool rdr. New versions use rdr only for
2250 	 * rdr-to rules.
2251 	 */
2252 	if (rule->rt > PF_NOPFROUTE && TAILQ_EMPTY(&V_pf_pabuf[2])) {
2253 		pf_mv_kpool(&V_pf_pabuf[1], &rule->route.list);
2254 	} else {
2255 		pf_mv_kpool(&V_pf_pabuf[1], &rule->rdr.list);
2256 		pf_mv_kpool(&V_pf_pabuf[2], &rule->route.list);
2257 	}
2258 
2259 	if (((rule->action == PF_NAT) || (rule->action == PF_RDR) ||
2260 	    (rule->action == PF_BINAT))	&& rule->anchor == NULL &&
2261 	    TAILQ_FIRST(&rule->rdr.list) == NULL) {
2262 		ERROUT(EINVAL);
2263 	}
2264 
2265 	if (rule->rt > PF_NOPFROUTE && (TAILQ_FIRST(&rule->route.list) == NULL)) {
2266 		ERROUT(EINVAL);
2267 	}
2268 
2269 	if (rule->action == PF_PASS && (rule->rdr.opts & PF_POOL_STICKYADDR ||
2270 	    rule->nat.opts & PF_POOL_STICKYADDR) && !rule->keep_state) {
2271 		ERROUT(EINVAL);
2272 	}
2273 
2274 	MPASS(error == 0);
2275 
2276 	rule->nat.cur = TAILQ_FIRST(&rule->nat.list);
2277 	rule->rdr.cur = TAILQ_FIRST(&rule->rdr.list);
2278 	rule->route.cur = TAILQ_FIRST(&rule->route.list);
2279 	rule->route.ipv6_nexthop_af = AF_INET6;
2280 	TAILQ_INSERT_TAIL(ruleset->rules[rs_num].inactive.ptr,
2281 	    rule, entries);
2282 	ruleset->rules[rs_num].inactive.rcount++;
2283 
2284 	PF_RULES_WUNLOCK();
2285 	pf_hash_rule(rule);
2286 	if (RB_INSERT(pf_krule_global, ruleset->rules[rs_num].inactive.tree, rule) != NULL) {
2287 		PF_RULES_WLOCK();
2288 		TAILQ_REMOVE(ruleset->rules[rs_num].inactive.ptr, rule, entries);
2289 		ruleset->rules[rs_num].inactive.rcount--;
2290 		pf_free_rule(rule);
2291 		rule = NULL;
2292 		ERROUT(EEXIST);
2293 	}
2294 	PF_CONFIG_UNLOCK();
2295 
2296 	return (0);
2297 
2298 #undef ERROUT
2299 #undef ERROUT_UNLOCKED
2300 errout:
2301 	PF_RULES_WUNLOCK();
2302 	PF_CONFIG_UNLOCK();
2303 errout_unlocked:
2304 	pf_kkif_free(rcv_kif);
2305 	pf_kkif_free(kif);
2306 	pf_krule_free(rule);
2307 	return (error);
2308 }
2309 
2310 static bool
pf_label_match(const struct pf_krule * rule,const char * label)2311 pf_label_match(const struct pf_krule *rule, const char *label)
2312 {
2313 	int i = 0;
2314 
2315 	while (*rule->label[i]) {
2316 		if (strcmp(rule->label[i], label) == 0)
2317 			return (true);
2318 		i++;
2319 	}
2320 
2321 	return (false);
2322 }
2323 
2324 static unsigned int
pf_kill_matching_state(struct pf_state_key_cmp * key,int dir)2325 pf_kill_matching_state(struct pf_state_key_cmp *key, int dir)
2326 {
2327 	struct pf_kstate *s;
2328 	int more = 0;
2329 
2330 	s = pf_find_state_all(key, dir, &more);
2331 	if (s == NULL)
2332 		return (0);
2333 
2334 	if (more) {
2335 		PF_STATE_UNLOCK(s);
2336 		return (0);
2337 	}
2338 
2339 	pf_remove_state(s);
2340 	return (1);
2341 }
2342 
2343 static int
pf_killstates_row(struct pf_kstate_kill * psk,struct pf_idhash * ih)2344 pf_killstates_row(struct pf_kstate_kill *psk, struct pf_idhash *ih)
2345 {
2346 	struct pf_kstate	*s;
2347 	struct pf_state_key	*sk;
2348 	struct pf_addr		*srcaddr, *dstaddr;
2349 	struct pf_state_key_cmp	 match_key;
2350 	int			 idx, killed = 0;
2351 	unsigned int		 dir;
2352 	u_int16_t		 srcport, dstport;
2353 	struct pfi_kkif		*kif;
2354 
2355 relock_DIOCKILLSTATES:
2356 	PF_HASHROW_LOCK(ih);
2357 	LIST_FOREACH(s, &ih->states, entry) {
2358 		/* For floating states look at the original kif. */
2359 		kif = s->kif == V_pfi_all ? s->orig_kif : s->kif;
2360 
2361 		sk = s->key[psk->psk_nat ? PF_SK_STACK : PF_SK_WIRE];
2362 		if (s->direction == PF_OUT) {
2363 			srcaddr = &sk->addr[1];
2364 			dstaddr = &sk->addr[0];
2365 			srcport = sk->port[1];
2366 			dstport = sk->port[0];
2367 		} else {
2368 			srcaddr = &sk->addr[0];
2369 			dstaddr = &sk->addr[1];
2370 			srcport = sk->port[0];
2371 			dstport = sk->port[1];
2372 		}
2373 
2374 		if (psk->psk_af && sk->af != psk->psk_af)
2375 			continue;
2376 
2377 		if (psk->psk_proto && psk->psk_proto != sk->proto)
2378 			continue;
2379 
2380 		if (! pf_match_addr(psk->psk_src.neg,
2381 		    &psk->psk_src.addr.v.a.addr,
2382 		    &psk->psk_src.addr.v.a.mask, srcaddr, sk->af))
2383 			continue;
2384 
2385 		if (! pf_match_addr(psk->psk_dst.neg,
2386 		    &psk->psk_dst.addr.v.a.addr,
2387 		    &psk->psk_dst.addr.v.a.mask, dstaddr, sk->af))
2388 			continue;
2389 
2390 		if (!  pf_match_addr(psk->psk_rt_addr.neg,
2391 		    &psk->psk_rt_addr.addr.v.a.addr,
2392 		    &psk->psk_rt_addr.addr.v.a.mask,
2393 		    &s->act.rt_addr, sk->af))
2394 			continue;
2395 
2396 		if (psk->psk_src.port_op != 0 &&
2397 		    ! pf_match_port(psk->psk_src.port_op,
2398 		    psk->psk_src.port[0], psk->psk_src.port[1], srcport))
2399 			continue;
2400 
2401 		if (psk->psk_dst.port_op != 0 &&
2402 		    ! pf_match_port(psk->psk_dst.port_op,
2403 		    psk->psk_dst.port[0], psk->psk_dst.port[1], dstport))
2404 			continue;
2405 
2406 		if (psk->psk_label[0] &&
2407 		    ! pf_label_match(s->rule, psk->psk_label))
2408 			continue;
2409 
2410 		if (psk->psk_ifname[0] && strcmp(psk->psk_ifname,
2411 		    kif->pfik_name))
2412 			continue;
2413 
2414 		if (psk->psk_kill_match) {
2415 			/* Create the key to find matching states, with lock
2416 			 * held. */
2417 
2418 			bzero(&match_key, sizeof(match_key));
2419 
2420 			if (s->direction == PF_OUT) {
2421 				dir = PF_IN;
2422 				idx = psk->psk_nat ? PF_SK_WIRE : PF_SK_STACK;
2423 			} else {
2424 				dir = PF_OUT;
2425 				idx = psk->psk_nat ? PF_SK_STACK : PF_SK_WIRE;
2426 			}
2427 
2428 			match_key.af = s->key[idx]->af;
2429 			match_key.proto = s->key[idx]->proto;
2430 			pf_addrcpy(&match_key.addr[0],
2431 			    &s->key[idx]->addr[1], match_key.af);
2432 			match_key.port[0] = s->key[idx]->port[1];
2433 			pf_addrcpy(&match_key.addr[1],
2434 			    &s->key[idx]->addr[0], match_key.af);
2435 			match_key.port[1] = s->key[idx]->port[0];
2436 		}
2437 
2438 		pf_remove_state(s);
2439 		killed++;
2440 
2441 		if (psk->psk_kill_match)
2442 			killed += pf_kill_matching_state(&match_key, dir);
2443 
2444 		goto relock_DIOCKILLSTATES;
2445 	}
2446 	PF_HASHROW_UNLOCK(ih);
2447 
2448 	return (killed);
2449 }
2450 
2451 void
unhandled_af(int af)2452 unhandled_af(int af)
2453 {
2454 	panic("unhandled af %d", af);
2455 }
2456 
2457 int
pf_start(void)2458 pf_start(void)
2459 {
2460 	int error = 0;
2461 
2462 	sx_xlock(&V_pf_ioctl_lock);
2463 	if (V_pf_status.running)
2464 		error = EEXIST;
2465 	else {
2466 		hook_pf();
2467 		if (! TAILQ_EMPTY(V_pf_keth->active.rules))
2468 			hook_pf_eth();
2469 		V_pf_status.running = 1;
2470 		V_pf_status.since = time_uptime;
2471 		new_unrhdr64(&V_pf_stateid, time_second);
2472 
2473 		DPFPRINTF(PF_DEBUG_MISC, "pf: started");
2474 	}
2475 	sx_xunlock(&V_pf_ioctl_lock);
2476 
2477 	return (error);
2478 }
2479 
2480 int
pf_stop(void)2481 pf_stop(void)
2482 {
2483 	int error = 0;
2484 
2485 	sx_xlock(&V_pf_ioctl_lock);
2486 	if (!V_pf_status.running)
2487 		error = ENOENT;
2488 	else {
2489 		V_pf_status.running = 0;
2490 		dehook_pf();
2491 		dehook_pf_eth();
2492 		V_pf_status.since = time_uptime;
2493 		DPFPRINTF(PF_DEBUG_MISC, "pf: stopped");
2494 	}
2495 	sx_xunlock(&V_pf_ioctl_lock);
2496 
2497 	return (error);
2498 }
2499 
2500 void
pf_ioctl_clear_status(void)2501 pf_ioctl_clear_status(void)
2502 {
2503 	PF_RULES_WLOCK();
2504 	for (int i = 0; i < PFRES_MAX; i++)
2505 		counter_u64_zero(V_pf_status.counters[i]);
2506 	for (int i = 0; i < FCNT_MAX; i++)
2507 		pf_counter_u64_zero(&V_pf_status.fcounters[i]);
2508 	for (int i = 0; i < SCNT_MAX; i++)
2509 		counter_u64_zero(V_pf_status.scounters[i]);
2510 	for (int i = 0; i < KLCNT_MAX; i++)
2511 		counter_u64_zero(V_pf_status.lcounters[i]);
2512 	V_pf_status.since = time_uptime;
2513 	if (*V_pf_status.ifname)
2514 		pfi_update_status(V_pf_status.ifname, NULL);
2515 	PF_RULES_WUNLOCK();
2516 }
2517 
2518 int
pf_ioctl_set_timeout(int timeout,int seconds,int * prev_seconds)2519 pf_ioctl_set_timeout(int timeout, int seconds, int *prev_seconds)
2520 {
2521 	uint32_t old;
2522 
2523 	if (timeout < 0 || timeout >= PFTM_MAX ||
2524 	    seconds < 0)
2525 		return (EINVAL);
2526 
2527 	PF_RULES_WLOCK();
2528 	old = V_pf_default_rule.timeout[timeout];
2529 	if (timeout == PFTM_INTERVAL && seconds == 0)
2530 		seconds = 1;
2531 	V_pf_default_rule.timeout[timeout] = seconds;
2532 	if (timeout == PFTM_INTERVAL && seconds < old)
2533 		wakeup(pf_purge_thread);
2534 
2535 	if (prev_seconds != NULL)
2536 		*prev_seconds = old;
2537 
2538 	PF_RULES_WUNLOCK();
2539 
2540 	return (0);
2541 }
2542 
2543 int
pf_ioctl_get_timeout(int timeout,int * seconds)2544 pf_ioctl_get_timeout(int timeout, int *seconds)
2545 {
2546 	PF_RULES_RLOCK_TRACKER;
2547 
2548 	if (timeout < 0 || timeout >= PFTM_MAX)
2549 		return (EINVAL);
2550 
2551 	PF_RULES_RLOCK();
2552 	*seconds = V_pf_default_rule.timeout[timeout];
2553 	PF_RULES_RUNLOCK();
2554 
2555 	return (0);
2556 }
2557 
2558 int
pf_ioctl_set_limit(int index,unsigned int limit,unsigned int * old_limit)2559 pf_ioctl_set_limit(int index, unsigned int limit, unsigned int *old_limit)
2560 {
2561 
2562 	PF_RULES_WLOCK();
2563 	if (index < 0 || index >= PF_LIMIT_MAX ||
2564 	    V_pf_limits[index].zone == NULL) {
2565 		PF_RULES_WUNLOCK();
2566 		return (EINVAL);
2567 	}
2568 	uma_zone_set_max(V_pf_limits[index].zone,
2569 	    limit == 0 ? INT_MAX : limit);
2570 	if (old_limit != NULL)
2571 		*old_limit = V_pf_limits[index].limit;
2572 	V_pf_limits[index].limit = limit;
2573 	PF_RULES_WUNLOCK();
2574 
2575 	return (0);
2576 }
2577 
2578 int
pf_ioctl_get_limit(int index,unsigned int * limit)2579 pf_ioctl_get_limit(int index, unsigned int *limit)
2580 {
2581 	PF_RULES_RLOCK_TRACKER;
2582 
2583 	if (index < 0 || index >= PF_LIMIT_MAX)
2584 		return (EINVAL);
2585 
2586 	PF_RULES_RLOCK();
2587 	*limit = V_pf_limits[index].limit;
2588 	PF_RULES_RUNLOCK();
2589 
2590 	return (0);
2591 }
2592 
2593 int
pf_ioctl_begin_addrs(uint32_t * ticket)2594 pf_ioctl_begin_addrs(uint32_t *ticket)
2595 {
2596 	PF_RULES_WLOCK();
2597 	pf_empty_kpool(&V_pf_pabuf[0]);
2598 	pf_empty_kpool(&V_pf_pabuf[1]);
2599 	pf_empty_kpool(&V_pf_pabuf[2]);
2600 	*ticket = ++V_ticket_pabuf;
2601 	PF_RULES_WUNLOCK();
2602 
2603 	return (0);
2604 }
2605 
2606 int
pf_ioctl_add_addr(struct pf_nl_pooladdr * pp)2607 pf_ioctl_add_addr(struct pf_nl_pooladdr *pp)
2608 {
2609 	struct pf_kpooladdr	*pa = NULL;
2610 	struct pfi_kkif		*kif = NULL;
2611 	int error;
2612 
2613 	if (pp->which != PF_RDR && pp->which != PF_NAT &&
2614 	    pp->which != PF_RT)
2615 		return (EINVAL);
2616 
2617 	switch (pp->af) {
2618 #ifdef INET
2619 	case AF_INET:
2620 		/* FALLTHROUGH */
2621 #endif /* INET */
2622 #ifdef INET6
2623 	case AF_INET6:
2624 		/* FALLTHROUGH */
2625 #endif /* INET6 */
2626 	case AF_UNSPEC:
2627 		break;
2628 	default:
2629 		return (EAFNOSUPPORT);
2630 	}
2631 
2632 	if (pp->addr.addr.type != PF_ADDR_ADDRMASK &&
2633 	    pp->addr.addr.type != PF_ADDR_DYNIFTL &&
2634 	    pp->addr.addr.type != PF_ADDR_TABLE)
2635 		return (EINVAL);
2636 
2637 	if (pp->addr.addr.p.dyn != NULL)
2638 		return (EINVAL);
2639 
2640 	pa = malloc(sizeof(*pa), M_PFRULE, M_WAITOK);
2641 	error = pf_pooladdr_to_kpooladdr(&pp->addr, pa);
2642 	if (error != 0)
2643 		goto out;
2644 	if (pa->ifname[0])
2645 		kif = pf_kkif_create(M_WAITOK);
2646 	PF_RULES_WLOCK();
2647 	if (pp->ticket != V_ticket_pabuf) {
2648 		PF_RULES_WUNLOCK();
2649 		if (pa->ifname[0])
2650 			pf_kkif_free(kif);
2651 		error = EBUSY;
2652 		goto out;
2653 	}
2654 	if (pa->ifname[0]) {
2655 		pa->kif = pfi_kkif_attach(kif, pa->ifname);
2656 		kif = NULL;
2657 		pfi_kkif_ref(pa->kif);
2658 	} else
2659 		pa->kif = NULL;
2660 	if (pa->addr.type == PF_ADDR_DYNIFTL && ((error =
2661 	    pfi_dynaddr_setup(&pa->addr, pp->af)) != 0)) {
2662 		if (pa->ifname[0])
2663 			pfi_kkif_unref(pa->kif);
2664 		PF_RULES_WUNLOCK();
2665 		goto out;
2666 	}
2667 	pa->af = pp->af;
2668 	switch (pp->which) {
2669 	case PF_NAT:
2670 		TAILQ_INSERT_TAIL(&V_pf_pabuf[0], pa, entries);
2671 		break;
2672 	case PF_RDR:
2673 		TAILQ_INSERT_TAIL(&V_pf_pabuf[1], pa, entries);
2674 		break;
2675 	case PF_RT:
2676 		TAILQ_INSERT_TAIL(&V_pf_pabuf[2], pa, entries);
2677 		break;
2678 	}
2679 	PF_RULES_WUNLOCK();
2680 
2681 	return (0);
2682 
2683 out:
2684 	free(pa, M_PFRULE);
2685 	return (error);
2686 }
2687 
2688 int
pf_ioctl_get_addrs(struct pf_nl_pooladdr * pp)2689 pf_ioctl_get_addrs(struct pf_nl_pooladdr *pp)
2690 {
2691 	struct pf_kpool		*pool;
2692 	struct pf_kpooladdr	*pa;
2693 
2694 	PF_RULES_RLOCK_TRACKER;
2695 
2696 	if (pp->which != PF_RDR && pp->which != PF_NAT &&
2697 	    pp->which != PF_RT)
2698 		return (EINVAL);
2699 
2700 	pp->anchor[sizeof(pp->anchor) - 1] = 0;
2701 	pp->nr = 0;
2702 
2703 	PF_RULES_RLOCK();
2704 	pool = pf_get_kpool(pp->anchor, pp->ticket, pp->r_action,
2705 	    pp->r_num, 0, 1, 0, pp->which);
2706 	if (pool == NULL) {
2707 		PF_RULES_RUNLOCK();
2708 		return (EBUSY);
2709 	}
2710 	TAILQ_FOREACH(pa, &pool->list, entries)
2711 		pp->nr++;
2712 	PF_RULES_RUNLOCK();
2713 
2714 	return (0);
2715 }
2716 
2717 int
pf_ioctl_get_addr(struct pf_nl_pooladdr * pp)2718 pf_ioctl_get_addr(struct pf_nl_pooladdr *pp)
2719 {
2720 	struct pf_kpool		*pool;
2721 	struct pf_kpooladdr	*pa;
2722 	u_int32_t		 nr = 0;
2723 
2724 	if (pp->which != PF_RDR && pp->which != PF_NAT &&
2725 	    pp->which != PF_RT)
2726 		return (EINVAL);
2727 
2728 	PF_RULES_RLOCK_TRACKER;
2729 
2730 	pp->anchor[sizeof(pp->anchor) - 1] = '\0';
2731 
2732 	PF_RULES_RLOCK();
2733 	pool = pf_get_kpool(pp->anchor, pp->ticket, pp->r_action,
2734 	    pp->r_num, 0, 1, 1, pp->which);
2735 	if (pool == NULL) {
2736 		PF_RULES_RUNLOCK();
2737 		return (EBUSY);
2738 	}
2739 	pa = TAILQ_FIRST(&pool->list);
2740 	while ((pa != NULL) && (nr < pp->nr)) {
2741 		pa = TAILQ_NEXT(pa, entries);
2742 		nr++;
2743 	}
2744 	if (pa == NULL) {
2745 		PF_RULES_RUNLOCK();
2746 		return (EBUSY);
2747 	}
2748 	pf_kpooladdr_to_pooladdr(pa, &pp->addr);
2749 	pp->af = pa->af;
2750 	pf_addr_copyout(&pp->addr.addr);
2751 	PF_RULES_RUNLOCK();
2752 
2753 	return (0);
2754 }
2755 
2756 int
pf_ioctl_get_rulesets(struct pfioc_ruleset * pr)2757 pf_ioctl_get_rulesets(struct pfioc_ruleset *pr)
2758 {
2759 	struct pf_kruleset	*ruleset;
2760 	struct pf_kanchor	*anchor;
2761 
2762 	PF_RULES_RLOCK_TRACKER;
2763 
2764 	pr->path[sizeof(pr->path) - 1] = '\0';
2765 
2766 	PF_RULES_RLOCK();
2767 	if ((ruleset = pf_find_kruleset(pr->path)) == NULL) {
2768 		PF_RULES_RUNLOCK();
2769 		return (ENOENT);
2770 	}
2771 	pr->nr = 0;
2772 	if (ruleset == &pf_main_ruleset) {
2773 		/* XXX kludge for pf_main_ruleset */
2774 		RB_FOREACH(anchor, pf_kanchor_global, &V_pf_anchors)
2775 			if (anchor->parent == NULL)
2776 				pr->nr++;
2777 	} else {
2778 		RB_FOREACH(anchor, pf_kanchor_node,
2779 		    &ruleset->anchor->children)
2780 			pr->nr++;
2781 	}
2782 	PF_RULES_RUNLOCK();
2783 
2784 	return (0);
2785 }
2786 
2787 int
pf_ioctl_get_ruleset(struct pfioc_ruleset * pr)2788 pf_ioctl_get_ruleset(struct pfioc_ruleset *pr)
2789 {
2790 	struct pf_kruleset	*ruleset;
2791 	struct pf_kanchor	*anchor;
2792 	u_int32_t		 nr = 0;
2793 	int			 error = 0;
2794 
2795 	PF_RULES_RLOCK_TRACKER;
2796 
2797 	PF_RULES_RLOCK();
2798 	if ((ruleset = pf_find_kruleset(pr->path)) == NULL) {
2799 		PF_RULES_RUNLOCK();
2800 		return (ENOENT);
2801 	}
2802 
2803 	pr->name[0] = '\0';
2804 	if (ruleset == &pf_main_ruleset) {
2805 		/* XXX kludge for pf_main_ruleset */
2806 		RB_FOREACH(anchor, pf_kanchor_global, &V_pf_anchors)
2807 			if (anchor->parent == NULL && nr++ == pr->nr) {
2808 				strlcpy(pr->name, anchor->name,
2809 				    sizeof(pr->name));
2810 				break;
2811 			}
2812 	} else {
2813 		RB_FOREACH(anchor, pf_kanchor_node,
2814 		    &ruleset->anchor->children)
2815 			if (nr++ == pr->nr) {
2816 				strlcpy(pr->name, anchor->name,
2817 				    sizeof(pr->name));
2818 				break;
2819 			}
2820 	}
2821 	if (!pr->name[0])
2822 		error = EBUSY;
2823 	PF_RULES_RUNLOCK();
2824 
2825 	return (error);
2826 }
2827 
2828 int
pf_ioctl_natlook(struct pfioc_natlook * pnl)2829 pf_ioctl_natlook(struct pfioc_natlook *pnl)
2830 {
2831 	struct pf_state_key	*sk;
2832 	struct pf_kstate	*state;
2833 	struct pf_state_key_cmp	 key;
2834 	int			 m = 0, direction = pnl->direction;
2835 	int			 sidx, didx;
2836 
2837 	/* NATLOOK src and dst are reversed, so reverse sidx/didx */
2838 	sidx = (direction == PF_IN) ? 1 : 0;
2839 	didx = (direction == PF_IN) ? 0 : 1;
2840 
2841 	if (!pnl->proto ||
2842 	    PF_AZERO(&pnl->saddr, pnl->af) ||
2843 	    PF_AZERO(&pnl->daddr, pnl->af) ||
2844 	    ((pnl->proto == IPPROTO_TCP ||
2845 	    pnl->proto == IPPROTO_UDP) &&
2846 	    (!pnl->dport || !pnl->sport)))
2847 		return (EINVAL);
2848 
2849 	switch (pnl->direction) {
2850 	case PF_IN:
2851 	case PF_OUT:
2852 	case PF_INOUT:
2853 		break;
2854 	default:
2855 		return (EINVAL);
2856 	}
2857 
2858 	switch (pnl->af) {
2859 #ifdef INET
2860 	case AF_INET:
2861 		break;
2862 #endif /* INET */
2863 #ifdef INET6
2864 	case AF_INET6:
2865 		break;
2866 #endif /* INET6 */
2867 	default:
2868 		return (EAFNOSUPPORT);
2869 	}
2870 
2871 	bzero(&key, sizeof(key));
2872 	key.af = pnl->af;
2873 	key.proto = pnl->proto;
2874 	pf_addrcpy(&key.addr[sidx], &pnl->saddr, pnl->af);
2875 	key.port[sidx] = pnl->sport;
2876 	pf_addrcpy(&key.addr[didx], &pnl->daddr, pnl->af);
2877 	key.port[didx] = pnl->dport;
2878 
2879 	state = pf_find_state_all(&key, direction, &m);
2880 	if (state == NULL)
2881 		return (ENOENT);
2882 
2883 	if (m > 1) {
2884 		PF_STATE_UNLOCK(state);
2885 		return (E2BIG);	/* more than one state */
2886 	}
2887 
2888 	sk = state->key[sidx];
2889 	pf_addrcpy(&pnl->rsaddr,
2890 	    &sk->addr[sidx], sk->af);
2891 	pnl->rsport = sk->port[sidx];
2892 	pf_addrcpy(&pnl->rdaddr,
2893 	    &sk->addr[didx], sk->af);
2894 	pnl->rdport = sk->port[didx];
2895 	PF_STATE_UNLOCK(state);
2896 
2897 	return (0);
2898 }
2899 
2900 static int
pfioctl(struct cdev * dev,u_long cmd,caddr_t addr,int flags,struct thread * td)2901 pfioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td)
2902 {
2903 	int			 error = 0;
2904 	PF_RULES_RLOCK_TRACKER;
2905 
2906 #define	ERROUT_IOCTL(target, x)					\
2907     do {								\
2908 	    error = (x);						\
2909 	    SDT_PROBE3(pf, ioctl, ioctl, error, cmd, error, __LINE__);	\
2910 	    goto target;						\
2911     } while (0)
2912 
2913 
2914 	/* XXX keep in sync with switch() below */
2915 	if (securelevel_gt(td->td_ucred, 2))
2916 		switch (cmd) {
2917 		case DIOCGETRULES:
2918 		case DIOCGETRULENV:
2919 		case DIOCGETADDRS:
2920 		case DIOCGETADDR:
2921 		case DIOCGETSTATE:
2922 		case DIOCGETSTATENV:
2923 		case DIOCSETSTATUSIF:
2924 		case DIOCGETSTATUSNV:
2925 		case DIOCCLRSTATUS:
2926 		case DIOCNATLOOK:
2927 		case DIOCSETDEBUG:
2928 #ifdef COMPAT_FREEBSD14
2929 		case DIOCGETSTATES:
2930 		case DIOCGETSTATESV2:
2931 #endif
2932 		case DIOCGETTIMEOUT:
2933 		case DIOCCLRRULECTRS:
2934 		case DIOCGETLIMIT:
2935 		case DIOCGETALTQSV0:
2936 		case DIOCGETALTQSV1:
2937 		case DIOCGETALTQV0:
2938 		case DIOCGETALTQV1:
2939 		case DIOCGETQSTATSV0:
2940 		case DIOCGETQSTATSV1:
2941 		case DIOCGETRULESETS:
2942 		case DIOCGETRULESET:
2943 		case DIOCRGETTABLES:
2944 		case DIOCRGETTSTATS:
2945 		case DIOCRCLRTSTATS:
2946 		case DIOCRCLRADDRS:
2947 		case DIOCRADDADDRS:
2948 		case DIOCRDELADDRS:
2949 		case DIOCRSETADDRS:
2950 		case DIOCRGETADDRS:
2951 		case DIOCRGETASTATS:
2952 		case DIOCRCLRASTATS:
2953 		case DIOCRTSTADDRS:
2954 		case DIOCOSFPGET:
2955 		case DIOCGETSRCNODES:
2956 		case DIOCCLRSRCNODES:
2957 		case DIOCGETSYNCOOKIES:
2958 		case DIOCIGETIFACES:
2959 		case DIOCGIFSPEEDV0:
2960 		case DIOCGIFSPEEDV1:
2961 		case DIOCSETIFFLAG:
2962 		case DIOCCLRIFFLAG:
2963 		case DIOCGETETHRULES:
2964 		case DIOCGETETHRULE:
2965 		case DIOCGETETHRULESETS:
2966 		case DIOCGETETHRULESET:
2967 			break;
2968 		case DIOCRCLRTABLES:
2969 		case DIOCRADDTABLES:
2970 		case DIOCRDELTABLES:
2971 		case DIOCRSETTFLAGS:
2972 			if (((struct pfioc_table *)addr)->pfrio_flags &
2973 			    PFR_FLAG_DUMMY)
2974 				break; /* dummy operation ok */
2975 			return (EPERM);
2976 		default:
2977 			return (EPERM);
2978 		}
2979 
2980 	if (!(flags & FWRITE))
2981 		switch (cmd) {
2982 		case DIOCGETRULES:
2983 		case DIOCGETADDRS:
2984 		case DIOCGETADDR:
2985 		case DIOCGETSTATE:
2986 		case DIOCGETSTATENV:
2987 		case DIOCGETSTATUSNV:
2988 #ifdef COMPAT_FREEBSD14
2989 		case DIOCGETSTATES:
2990 		case DIOCGETSTATESV2:
2991 #endif
2992 		case DIOCGETTIMEOUT:
2993 		case DIOCGETLIMIT:
2994 		case DIOCGETALTQSV0:
2995 		case DIOCGETALTQSV1:
2996 		case DIOCGETALTQV0:
2997 		case DIOCGETALTQV1:
2998 		case DIOCGETQSTATSV0:
2999 		case DIOCGETQSTATSV1:
3000 		case DIOCGETRULESETS:
3001 		case DIOCGETRULESET:
3002 		case DIOCNATLOOK:
3003 		case DIOCRGETTABLES:
3004 		case DIOCRGETTSTATS:
3005 		case DIOCRGETADDRS:
3006 		case DIOCRGETASTATS:
3007 		case DIOCRTSTADDRS:
3008 		case DIOCOSFPGET:
3009 		case DIOCGETSRCNODES:
3010 		case DIOCGETSYNCOOKIES:
3011 		case DIOCIGETIFACES:
3012 		case DIOCGIFSPEEDV1:
3013 		case DIOCGIFSPEEDV0:
3014 		case DIOCGETRULENV:
3015 		case DIOCGETETHRULES:
3016 		case DIOCGETETHRULE:
3017 		case DIOCGETETHRULESETS:
3018 		case DIOCGETETHRULESET:
3019 			break;
3020 		case DIOCRCLRTABLES:
3021 		case DIOCRADDTABLES:
3022 		case DIOCRDELTABLES:
3023 		case DIOCRCLRTSTATS:
3024 		case DIOCRCLRADDRS:
3025 		case DIOCRADDADDRS:
3026 		case DIOCRDELADDRS:
3027 		case DIOCRSETADDRS:
3028 		case DIOCRSETTFLAGS:
3029 			if (((struct pfioc_table *)addr)->pfrio_flags &
3030 			    PFR_FLAG_DUMMY) {
3031 				flags |= FWRITE; /* need write lock for dummy */
3032 				break; /* dummy operation ok */
3033 			}
3034 			return (EACCES);
3035 		default:
3036 			return (EACCES);
3037 		}
3038 
3039 	CURVNET_SET(TD_TO_VNET(td));
3040 
3041 	switch (cmd) {
3042 #ifdef COMPAT_FREEBSD14
3043 	case DIOCSTART:
3044 		error = pf_start();
3045 		break;
3046 
3047 	case DIOCSTOP:
3048 		error = pf_stop();
3049 		break;
3050 #endif
3051 
3052 	case DIOCGETETHRULES: {
3053 		struct pfioc_nv		*nv = (struct pfioc_nv *)addr;
3054 		nvlist_t		*nvl;
3055 		void			*packed;
3056 		struct pf_keth_rule	*tail;
3057 		struct pf_keth_ruleset	*rs;
3058 		u_int32_t		 ticket, nr;
3059 		const char		*anchor = "";
3060 
3061 		nvl = NULL;
3062 		packed = NULL;
3063 
3064 #define	ERROUT(x)	ERROUT_IOCTL(DIOCGETETHRULES_error, x)
3065 
3066 		if (nv->len > pf_ioctl_maxcount)
3067 			ERROUT(ENOMEM);
3068 
3069 		/* Copy the request in */
3070 		packed = malloc(nv->len, M_NVLIST, M_WAITOK);
3071 		error = copyin(nv->data, packed, nv->len);
3072 		if (error)
3073 			ERROUT(error);
3074 
3075 		nvl = nvlist_unpack(packed, nv->len, 0);
3076 		if (nvl == NULL)
3077 			ERROUT(EBADMSG);
3078 
3079 		if (! nvlist_exists_string(nvl, "anchor"))
3080 			ERROUT(EBADMSG);
3081 
3082 		anchor = nvlist_get_string(nvl, "anchor");
3083 
3084 		rs = pf_find_keth_ruleset(anchor);
3085 
3086 		nvlist_destroy(nvl);
3087 		nvl = NULL;
3088 		free(packed, M_NVLIST);
3089 		packed = NULL;
3090 
3091 		if (rs == NULL)
3092 			ERROUT(ENOENT);
3093 
3094 		/* Reply */
3095 		nvl = nvlist_create(0);
3096 		if (nvl == NULL)
3097 			ERROUT(ENOMEM);
3098 
3099 		PF_RULES_RLOCK();
3100 
3101 		ticket = rs->active.ticket;
3102 		tail = TAILQ_LAST(rs->active.rules, pf_keth_ruleq);
3103 		if (tail)
3104 			nr = tail->nr + 1;
3105 		else
3106 			nr = 0;
3107 
3108 		PF_RULES_RUNLOCK();
3109 
3110 		nvlist_add_number(nvl, "ticket", ticket);
3111 		nvlist_add_number(nvl, "nr", nr);
3112 
3113 		packed = nvlist_pack(nvl, &nv->len);
3114 		if (packed == NULL)
3115 			ERROUT(ENOMEM);
3116 
3117 		if (nv->size == 0)
3118 			ERROUT(0);
3119 		else if (nv->size < nv->len)
3120 			ERROUT(ENOSPC);
3121 
3122 		error = copyout(packed, nv->data, nv->len);
3123 
3124 #undef ERROUT
3125 DIOCGETETHRULES_error:
3126 		free(packed, M_NVLIST);
3127 		nvlist_destroy(nvl);
3128 		break;
3129 	}
3130 
3131 	case DIOCGETETHRULE: {
3132 		struct epoch_tracker	 et;
3133 		struct pfioc_nv		*nv = (struct pfioc_nv *)addr;
3134 		nvlist_t		*nvl = NULL;
3135 		void			*nvlpacked = NULL;
3136 		struct pf_keth_rule	*rule = NULL;
3137 		struct pf_keth_ruleset	*rs;
3138 		u_int32_t		 ticket, nr;
3139 		bool			 clear = false;
3140 		const char		*anchor;
3141 
3142 #define ERROUT(x)	ERROUT_IOCTL(DIOCGETETHRULE_error, x)
3143 
3144 		if (nv->len > pf_ioctl_maxcount)
3145 			ERROUT(ENOMEM);
3146 
3147 		nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
3148 		error = copyin(nv->data, nvlpacked, nv->len);
3149 		if (error)
3150 			ERROUT(error);
3151 
3152 		nvl = nvlist_unpack(nvlpacked, nv->len, 0);
3153 		if (nvl == NULL)
3154 			ERROUT(EBADMSG);
3155 		if (! nvlist_exists_number(nvl, "ticket"))
3156 			ERROUT(EBADMSG);
3157 		ticket = nvlist_get_number(nvl, "ticket");
3158 		if (! nvlist_exists_string(nvl, "anchor"))
3159 			ERROUT(EBADMSG);
3160 		anchor = nvlist_get_string(nvl, "anchor");
3161 
3162 		if (nvlist_exists_bool(nvl, "clear"))
3163 			clear = nvlist_get_bool(nvl, "clear");
3164 
3165 		if (clear && !(flags & FWRITE))
3166 			ERROUT(EACCES);
3167 
3168 		if (! nvlist_exists_number(nvl, "nr"))
3169 			ERROUT(EBADMSG);
3170 		nr = nvlist_get_number(nvl, "nr");
3171 
3172 		PF_RULES_RLOCK();
3173 		rs = pf_find_keth_ruleset(anchor);
3174 		if (rs == NULL) {
3175 			PF_RULES_RUNLOCK();
3176 			ERROUT(ENOENT);
3177 		}
3178 		if (ticket != rs->active.ticket) {
3179 			PF_RULES_RUNLOCK();
3180 			ERROUT(EBUSY);
3181 		}
3182 
3183 		nvlist_destroy(nvl);
3184 		nvl = NULL;
3185 		free(nvlpacked, M_NVLIST);
3186 		nvlpacked = NULL;
3187 
3188 		rule = TAILQ_FIRST(rs->active.rules);
3189 		while ((rule != NULL) && (rule->nr != nr))
3190 			rule = TAILQ_NEXT(rule, entries);
3191 		if (rule == NULL) {
3192 			PF_RULES_RUNLOCK();
3193 			ERROUT(ENOENT);
3194 		}
3195 		/* Make sure rule can't go away. */
3196 		NET_EPOCH_ENTER(et);
3197 		PF_RULES_RUNLOCK();
3198 		nvl = pf_keth_rule_to_nveth_rule(rule);
3199 		if (pf_keth_anchor_nvcopyout(rs, rule, nvl)) {
3200 			NET_EPOCH_EXIT(et);
3201 			ERROUT(EBUSY);
3202 		}
3203 		NET_EPOCH_EXIT(et);
3204 		if (nvl == NULL)
3205 			ERROUT(ENOMEM);
3206 
3207 		nvlpacked = nvlist_pack(nvl, &nv->len);
3208 		if (nvlpacked == NULL)
3209 			ERROUT(ENOMEM);
3210 
3211 		if (nv->size == 0)
3212 			ERROUT(0);
3213 		else if (nv->size < nv->len)
3214 			ERROUT(ENOSPC);
3215 
3216 		error = copyout(nvlpacked, nv->data, nv->len);
3217 		if (error == 0 && clear) {
3218 			counter_u64_zero(rule->evaluations);
3219 			for (int i = 0; i < 2; i++) {
3220 				counter_u64_zero(rule->packets[i]);
3221 				counter_u64_zero(rule->bytes[i]);
3222 			}
3223 		}
3224 
3225 #undef ERROUT
3226 DIOCGETETHRULE_error:
3227 		free(nvlpacked, M_NVLIST);
3228 		nvlist_destroy(nvl);
3229 		break;
3230 	}
3231 
3232 	case DIOCADDETHRULE: {
3233 		struct pfioc_nv		*nv = (struct pfioc_nv *)addr;
3234 		nvlist_t		*nvl = NULL;
3235 		void			*nvlpacked = NULL;
3236 		struct pf_keth_rule	*rule = NULL, *tail = NULL;
3237 		struct pf_keth_ruleset	*ruleset = NULL;
3238 		struct pfi_kkif		*kif = NULL, *bridge_to_kif = NULL;
3239 		const char		*anchor = "", *anchor_call = "";
3240 
3241 #define ERROUT(x)	ERROUT_IOCTL(DIOCADDETHRULE_error, x)
3242 
3243 		if (nv->len > pf_ioctl_maxcount)
3244 			ERROUT(ENOMEM);
3245 
3246 		nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
3247 		error = copyin(nv->data, nvlpacked, nv->len);
3248 		if (error)
3249 			ERROUT(error);
3250 
3251 		nvl = nvlist_unpack(nvlpacked, nv->len, 0);
3252 		if (nvl == NULL)
3253 			ERROUT(EBADMSG);
3254 
3255 		if (! nvlist_exists_number(nvl, "ticket"))
3256 			ERROUT(EBADMSG);
3257 
3258 		if (nvlist_exists_string(nvl, "anchor"))
3259 			anchor = nvlist_get_string(nvl, "anchor");
3260 		if (nvlist_exists_string(nvl, "anchor_call"))
3261 			anchor_call = nvlist_get_string(nvl, "anchor_call");
3262 
3263 		ruleset = pf_find_keth_ruleset(anchor);
3264 		if (ruleset == NULL)
3265 			ERROUT(EINVAL);
3266 
3267 		if (nvlist_get_number(nvl, "ticket") !=
3268 		    ruleset->inactive.ticket) {
3269 			DPFPRINTF(PF_DEBUG_MISC,
3270 			    "ticket: %d != %d",
3271 			    (u_int32_t)nvlist_get_number(nvl, "ticket"),
3272 			    ruleset->inactive.ticket);
3273 			ERROUT(EBUSY);
3274 		}
3275 
3276 		rule = malloc(sizeof(*rule), M_PFRULE, M_WAITOK);
3277 		rule->timestamp = NULL;
3278 
3279 		error = pf_nveth_rule_to_keth_rule(nvl, rule);
3280 		if (error != 0)
3281 			ERROUT(error);
3282 
3283 		if (rule->ifname[0])
3284 			kif = pf_kkif_create(M_WAITOK);
3285 		if (rule->bridge_to_name[0])
3286 			bridge_to_kif = pf_kkif_create(M_WAITOK);
3287 		rule->evaluations = counter_u64_alloc(M_WAITOK);
3288 		for (int i = 0; i < 2; i++) {
3289 			rule->packets[i] = counter_u64_alloc(M_WAITOK);
3290 			rule->bytes[i] = counter_u64_alloc(M_WAITOK);
3291 		}
3292 		rule->timestamp = uma_zalloc_pcpu(pf_timestamp_pcpu_zone,
3293 		    M_WAITOK | M_ZERO);
3294 
3295 		PF_RULES_WLOCK();
3296 
3297 		if (rule->ifname[0]) {
3298 			rule->kif = pfi_kkif_attach(kif, rule->ifname);
3299 			pfi_kkif_ref(rule->kif);
3300 		} else
3301 			rule->kif = NULL;
3302 		if (rule->bridge_to_name[0]) {
3303 			rule->bridge_to = pfi_kkif_attach(bridge_to_kif,
3304 			    rule->bridge_to_name);
3305 			pfi_kkif_ref(rule->bridge_to);
3306 		} else
3307 			rule->bridge_to = NULL;
3308 
3309 #ifdef ALTQ
3310 		/* set queue IDs */
3311 		if (rule->qname[0] != 0) {
3312 			if ((rule->qid = pf_qname2qid(rule->qname)) == 0)
3313 				error = EBUSY;
3314 			else
3315 				rule->qid = rule->qid;
3316 		}
3317 #endif
3318 		if (rule->tagname[0])
3319 			if ((rule->tag = pf_tagname2tag(rule->tagname)) == 0)
3320 				error = EBUSY;
3321 		if (rule->match_tagname[0])
3322 			if ((rule->match_tag = pf_tagname2tag(
3323 			    rule->match_tagname)) == 0)
3324 				error = EBUSY;
3325 
3326 		if (error == 0 && rule->ipdst.addr.type == PF_ADDR_TABLE)
3327 			error = pf_eth_addr_setup(ruleset, &rule->ipdst.addr);
3328 		if (error == 0 && rule->ipsrc.addr.type == PF_ADDR_TABLE)
3329 			error = pf_eth_addr_setup(ruleset, &rule->ipsrc.addr);
3330 
3331 		if (error) {
3332 			pf_free_eth_rule(rule);
3333 			PF_RULES_WUNLOCK();
3334 			ERROUT(error);
3335 		}
3336 
3337 		if (pf_keth_anchor_setup(rule, ruleset, anchor_call)) {
3338 			pf_free_eth_rule(rule);
3339 			PF_RULES_WUNLOCK();
3340 			ERROUT(EINVAL);
3341 		}
3342 
3343 		tail = TAILQ_LAST(ruleset->inactive.rules, pf_keth_ruleq);
3344 		if (tail)
3345 			rule->nr = tail->nr + 1;
3346 		else
3347 			rule->nr = 0;
3348 
3349 		TAILQ_INSERT_TAIL(ruleset->inactive.rules, rule, entries);
3350 
3351 		PF_RULES_WUNLOCK();
3352 
3353 #undef ERROUT
3354 DIOCADDETHRULE_error:
3355 		nvlist_destroy(nvl);
3356 		free(nvlpacked, M_NVLIST);
3357 		break;
3358 	}
3359 
3360 	case DIOCGETETHRULESETS: {
3361 		struct epoch_tracker	 et;
3362 		struct pfioc_nv		*nv = (struct pfioc_nv *)addr;
3363 		nvlist_t		*nvl = NULL;
3364 		void			*nvlpacked = NULL;
3365 		struct pf_keth_ruleset	*ruleset;
3366 		struct pf_keth_anchor	*anchor;
3367 		int			 nr = 0;
3368 
3369 #define ERROUT(x)	ERROUT_IOCTL(DIOCGETETHRULESETS_error, x)
3370 
3371 		if (nv->len > pf_ioctl_maxcount)
3372 			ERROUT(ENOMEM);
3373 
3374 		nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
3375 		error = copyin(nv->data, nvlpacked, nv->len);
3376 		if (error)
3377 			ERROUT(error);
3378 
3379 		nvl = nvlist_unpack(nvlpacked, nv->len, 0);
3380 		if (nvl == NULL)
3381 			ERROUT(EBADMSG);
3382 		if (! nvlist_exists_string(nvl, "path"))
3383 			ERROUT(EBADMSG);
3384 
3385 		NET_EPOCH_ENTER(et);
3386 
3387 		if ((ruleset = pf_find_keth_ruleset(
3388 		    nvlist_get_string(nvl, "path"))) == NULL) {
3389 			NET_EPOCH_EXIT(et);
3390 			ERROUT(ENOENT);
3391 		}
3392 
3393 		if (ruleset->anchor == NULL) {
3394 			RB_FOREACH(anchor, pf_keth_anchor_global, &V_pf_keth_anchors)
3395 				if (anchor->parent == NULL)
3396 					nr++;
3397 		} else {
3398 			RB_FOREACH(anchor, pf_keth_anchor_node,
3399 			    &ruleset->anchor->children)
3400 				nr++;
3401 		}
3402 
3403 		NET_EPOCH_EXIT(et);
3404 
3405 		nvlist_destroy(nvl);
3406 		nvl = NULL;
3407 		free(nvlpacked, M_NVLIST);
3408 		nvlpacked = NULL;
3409 
3410 		nvl = nvlist_create(0);
3411 		if (nvl == NULL)
3412 			ERROUT(ENOMEM);
3413 
3414 		nvlist_add_number(nvl, "nr", nr);
3415 
3416 		nvlpacked = nvlist_pack(nvl, &nv->len);
3417 		if (nvlpacked == NULL)
3418 			ERROUT(ENOMEM);
3419 
3420 		if (nv->size == 0)
3421 			ERROUT(0);
3422 		else if (nv->size < nv->len)
3423 			ERROUT(ENOSPC);
3424 
3425 		error = copyout(nvlpacked, nv->data, nv->len);
3426 
3427 #undef ERROUT
3428 DIOCGETETHRULESETS_error:
3429 		free(nvlpacked, M_NVLIST);
3430 		nvlist_destroy(nvl);
3431 		break;
3432 	}
3433 
3434 	case DIOCGETETHRULESET: {
3435 		struct epoch_tracker	 et;
3436 		struct pfioc_nv		*nv = (struct pfioc_nv *)addr;
3437 		nvlist_t		*nvl = NULL;
3438 		void			*nvlpacked = NULL;
3439 		struct pf_keth_ruleset	*ruleset;
3440 		struct pf_keth_anchor	*anchor;
3441 		int			 nr = 0, req_nr = 0;
3442 		bool			 found = false;
3443 
3444 #define ERROUT(x)	ERROUT_IOCTL(DIOCGETETHRULESET_error, x)
3445 
3446 		if (nv->len > pf_ioctl_maxcount)
3447 			ERROUT(ENOMEM);
3448 
3449 		nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
3450 		error = copyin(nv->data, nvlpacked, nv->len);
3451 		if (error)
3452 			ERROUT(error);
3453 
3454 		nvl = nvlist_unpack(nvlpacked, nv->len, 0);
3455 		if (nvl == NULL)
3456 			ERROUT(EBADMSG);
3457 		if (! nvlist_exists_string(nvl, "path"))
3458 			ERROUT(EBADMSG);
3459 		if (! nvlist_exists_number(nvl, "nr"))
3460 			ERROUT(EBADMSG);
3461 
3462 		req_nr = nvlist_get_number(nvl, "nr");
3463 
3464 		NET_EPOCH_ENTER(et);
3465 
3466 		if ((ruleset = pf_find_keth_ruleset(
3467 		    nvlist_get_string(nvl, "path"))) == NULL) {
3468 			NET_EPOCH_EXIT(et);
3469 			ERROUT(ENOENT);
3470 		}
3471 
3472 		nvlist_destroy(nvl);
3473 		nvl = NULL;
3474 		free(nvlpacked, M_NVLIST);
3475 		nvlpacked = NULL;
3476 
3477 		nvl = nvlist_create(0);
3478 		if (nvl == NULL) {
3479 			NET_EPOCH_EXIT(et);
3480 			ERROUT(ENOMEM);
3481 		}
3482 
3483 		if (ruleset->anchor == NULL) {
3484 			RB_FOREACH(anchor, pf_keth_anchor_global,
3485 			    &V_pf_keth_anchors) {
3486 				if (anchor->parent == NULL && nr++ == req_nr) {
3487 					found = true;
3488 					break;
3489 				}
3490 			}
3491 		} else {
3492 			RB_FOREACH(anchor, pf_keth_anchor_node,
3493 			     &ruleset->anchor->children) {
3494 				if (nr++ == req_nr) {
3495 					found = true;
3496 					break;
3497 				}
3498 			}
3499 		}
3500 
3501 		NET_EPOCH_EXIT(et);
3502 		if (found) {
3503 			nvlist_add_number(nvl, "nr", nr);
3504 			nvlist_add_string(nvl, "name", anchor->name);
3505 			if (ruleset->anchor)
3506 				nvlist_add_string(nvl, "path",
3507 				    ruleset->anchor->path);
3508 			else
3509 				nvlist_add_string(nvl, "path", "");
3510 		} else {
3511 			ERROUT(EBUSY);
3512 		}
3513 
3514 		nvlpacked = nvlist_pack(nvl, &nv->len);
3515 		if (nvlpacked == NULL)
3516 			ERROUT(ENOMEM);
3517 
3518 		if (nv->size == 0)
3519 			ERROUT(0);
3520 		else if (nv->size < nv->len)
3521 			ERROUT(ENOSPC);
3522 
3523 		error = copyout(nvlpacked, nv->data, nv->len);
3524 
3525 #undef ERROUT
3526 DIOCGETETHRULESET_error:
3527 		free(nvlpacked, M_NVLIST);
3528 		nvlist_destroy(nvl);
3529 		break;
3530 	}
3531 
3532 	case DIOCADDRULENV: {
3533 		struct pfioc_nv	*nv = (struct pfioc_nv *)addr;
3534 		nvlist_t	*nvl = NULL;
3535 		void		*nvlpacked = NULL;
3536 		struct pf_krule	*rule = NULL;
3537 		const char	*anchor = "", *anchor_call = "";
3538 		uint32_t	 ticket = 0, pool_ticket = 0;
3539 
3540 #define	ERROUT(x)	ERROUT_IOCTL(DIOCADDRULENV_error, x)
3541 
3542 		if (nv->len > pf_ioctl_maxcount)
3543 			ERROUT(ENOMEM);
3544 
3545 		nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
3546 		error = copyin(nv->data, nvlpacked, nv->len);
3547 		if (error)
3548 			ERROUT(error);
3549 
3550 		nvl = nvlist_unpack(nvlpacked, nv->len, 0);
3551 		if (nvl == NULL)
3552 			ERROUT(EBADMSG);
3553 
3554 		if (! nvlist_exists_number(nvl, "ticket"))
3555 			ERROUT(EINVAL);
3556 		ticket = nvlist_get_number(nvl, "ticket");
3557 
3558 		if (! nvlist_exists_number(nvl, "pool_ticket"))
3559 			ERROUT(EINVAL);
3560 		pool_ticket = nvlist_get_number(nvl, "pool_ticket");
3561 
3562 		if (! nvlist_exists_nvlist(nvl, "rule"))
3563 			ERROUT(EINVAL);
3564 
3565 		rule = pf_krule_alloc();
3566 		error = pf_nvrule_to_krule(nvlist_get_nvlist(nvl, "rule"),
3567 		    rule);
3568 		if (error)
3569 			ERROUT(error);
3570 
3571 		if (nvlist_exists_string(nvl, "anchor"))
3572 			anchor = nvlist_get_string(nvl, "anchor");
3573 		if (nvlist_exists_string(nvl, "anchor_call"))
3574 			anchor_call = nvlist_get_string(nvl, "anchor_call");
3575 
3576 		if ((error = nvlist_error(nvl)))
3577 			ERROUT(error);
3578 
3579 		/* Frees rule on error */
3580 		error = pf_ioctl_addrule(rule, ticket, pool_ticket, anchor,
3581 		    anchor_call, td->td_ucred->cr_ruid,
3582 		    td->td_proc ? td->td_proc->p_pid : 0);
3583 
3584 		nvlist_destroy(nvl);
3585 		free(nvlpacked, M_NVLIST);
3586 		break;
3587 #undef ERROUT
3588 DIOCADDRULENV_error:
3589 		pf_krule_free(rule);
3590 		nvlist_destroy(nvl);
3591 		free(nvlpacked, M_NVLIST);
3592 
3593 		break;
3594 	}
3595 	case DIOCADDRULE: {
3596 		struct pfioc_rule	*pr = (struct pfioc_rule *)addr;
3597 		struct pf_krule		*rule;
3598 
3599 		rule = pf_krule_alloc();
3600 		error = pf_rule_to_krule(&pr->rule, rule);
3601 		if (error != 0) {
3602 			pf_krule_free(rule);
3603 			goto fail;
3604 		}
3605 
3606 		pr->anchor[sizeof(pr->anchor) - 1] = '\0';
3607 
3608 		/* Frees rule on error */
3609 		error = pf_ioctl_addrule(rule, pr->ticket, pr->pool_ticket,
3610 		    pr->anchor, pr->anchor_call, td->td_ucred->cr_ruid,
3611 		    td->td_proc ? td->td_proc->p_pid : 0);
3612 		break;
3613 	}
3614 
3615 	case DIOCGETRULES: {
3616 		struct pfioc_rule	*pr = (struct pfioc_rule *)addr;
3617 
3618 		pr->anchor[sizeof(pr->anchor) - 1] = '\0';
3619 
3620 		error = pf_ioctl_getrules(pr);
3621 
3622 		break;
3623 	}
3624 
3625 	case DIOCGETRULENV: {
3626 		struct pfioc_nv		*nv = (struct pfioc_nv *)addr;
3627 		nvlist_t		*nvrule = NULL;
3628 		nvlist_t		*nvl = NULL;
3629 		struct pf_kruleset	*ruleset;
3630 		struct pf_krule		*rule;
3631 		void			*nvlpacked = NULL;
3632 		int			 rs_num, nr;
3633 		bool			 clear_counter = false;
3634 
3635 #define	ERROUT(x)	ERROUT_IOCTL(DIOCGETRULENV_error, x)
3636 
3637 		if (nv->len > pf_ioctl_maxcount)
3638 			ERROUT(ENOMEM);
3639 
3640 		/* Copy the request in */
3641 		nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
3642 		error = copyin(nv->data, nvlpacked, nv->len);
3643 		if (error)
3644 			ERROUT(error);
3645 
3646 		nvl = nvlist_unpack(nvlpacked, nv->len, 0);
3647 		if (nvl == NULL)
3648 			ERROUT(EBADMSG);
3649 
3650 		if (! nvlist_exists_string(nvl, "anchor"))
3651 			ERROUT(EBADMSG);
3652 		if (! nvlist_exists_number(nvl, "ruleset"))
3653 			ERROUT(EBADMSG);
3654 		if (! nvlist_exists_number(nvl, "ticket"))
3655 			ERROUT(EBADMSG);
3656 		if (! nvlist_exists_number(nvl, "nr"))
3657 			ERROUT(EBADMSG);
3658 
3659 		if (nvlist_exists_bool(nvl, "clear_counter"))
3660 			clear_counter = nvlist_get_bool(nvl, "clear_counter");
3661 
3662 		if (clear_counter && !(flags & FWRITE))
3663 			ERROUT(EACCES);
3664 
3665 		nr = nvlist_get_number(nvl, "nr");
3666 
3667 		PF_RULES_WLOCK();
3668 		ruleset = pf_find_kruleset(nvlist_get_string(nvl, "anchor"));
3669 		if (ruleset == NULL) {
3670 			PF_RULES_WUNLOCK();
3671 			ERROUT(ENOENT);
3672 		}
3673 
3674 		rs_num = pf_get_ruleset_number(nvlist_get_number(nvl, "ruleset"));
3675 		if (rs_num >= PF_RULESET_MAX) {
3676 			PF_RULES_WUNLOCK();
3677 			ERROUT(EINVAL);
3678 		}
3679 
3680 		if (nvlist_get_number(nvl, "ticket") !=
3681 		    ruleset->rules[rs_num].active.ticket) {
3682 			PF_RULES_WUNLOCK();
3683 			ERROUT(EBUSY);
3684 		}
3685 
3686 		if ((error = nvlist_error(nvl))) {
3687 			PF_RULES_WUNLOCK();
3688 			ERROUT(error);
3689 		}
3690 
3691 		rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
3692 		while ((rule != NULL) && (rule->nr != nr))
3693 			rule = TAILQ_NEXT(rule, entries);
3694 		if (rule == NULL) {
3695 			PF_RULES_WUNLOCK();
3696 			ERROUT(EBUSY);
3697 		}
3698 
3699 		nvrule = pf_krule_to_nvrule(rule);
3700 
3701 		nvlist_destroy(nvl);
3702 		nvl = nvlist_create(0);
3703 		if (nvl == NULL) {
3704 			PF_RULES_WUNLOCK();
3705 			ERROUT(ENOMEM);
3706 		}
3707 		nvlist_add_number(nvl, "nr", nr);
3708 		nvlist_add_nvlist(nvl, "rule", nvrule);
3709 		nvlist_destroy(nvrule);
3710 		nvrule = NULL;
3711 		if (pf_kanchor_nvcopyout(ruleset, rule, nvl)) {
3712 			PF_RULES_WUNLOCK();
3713 			ERROUT(EBUSY);
3714 		}
3715 
3716 		free(nvlpacked, M_NVLIST);
3717 		nvlpacked = nvlist_pack(nvl, &nv->len);
3718 		if (nvlpacked == NULL) {
3719 			PF_RULES_WUNLOCK();
3720 			ERROUT(ENOMEM);
3721 		}
3722 
3723 		if (nv->size == 0) {
3724 			PF_RULES_WUNLOCK();
3725 			ERROUT(0);
3726 		}
3727 		else if (nv->size < nv->len) {
3728 			PF_RULES_WUNLOCK();
3729 			ERROUT(ENOSPC);
3730 		}
3731 
3732 		if (clear_counter)
3733 			pf_krule_clear_counters(rule);
3734 
3735 		PF_RULES_WUNLOCK();
3736 
3737 		error = copyout(nvlpacked, nv->data, nv->len);
3738 
3739 #undef ERROUT
3740 DIOCGETRULENV_error:
3741 		free(nvlpacked, M_NVLIST);
3742 		nvlist_destroy(nvrule);
3743 		nvlist_destroy(nvl);
3744 
3745 		break;
3746 	}
3747 
3748 	case DIOCCHANGERULE: {
3749 		struct pfioc_rule	*pcr = (struct pfioc_rule *)addr;
3750 		struct pf_kruleset	*ruleset;
3751 		struct pf_krule		*oldrule = NULL, *newrule = NULL;
3752 		struct pfi_kkif		*kif = NULL;
3753 		struct pf_kpooladdr	*pa;
3754 		u_int32_t		 nr = 0;
3755 		int			 rs_num;
3756 
3757 		pcr->anchor[sizeof(pcr->anchor) - 1] = '\0';
3758 
3759 		if (pcr->action < PF_CHANGE_ADD_HEAD ||
3760 		    pcr->action > PF_CHANGE_GET_TICKET) {
3761 			error = EINVAL;
3762 			goto fail;
3763 		}
3764 		if (pcr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
3765 			error = EINVAL;
3766 			goto fail;
3767 		}
3768 
3769 		if (pcr->action != PF_CHANGE_REMOVE) {
3770 			newrule = pf_krule_alloc();
3771 			error = pf_rule_to_krule(&pcr->rule, newrule);
3772 			if (error != 0) {
3773 				pf_krule_free(newrule);
3774 				goto fail;
3775 			}
3776 
3777 			if ((error = pf_rule_checkaf(newrule))) {
3778 				pf_krule_free(newrule);
3779 				goto fail;
3780 			}
3781 			if (newrule->ifname[0])
3782 				kif = pf_kkif_create(M_WAITOK);
3783 			pf_counter_u64_init(&newrule->evaluations, M_WAITOK);
3784 			for (int i = 0; i < 2; i++) {
3785 				pf_counter_u64_init(&newrule->packets[i], M_WAITOK);
3786 				pf_counter_u64_init(&newrule->bytes[i], M_WAITOK);
3787 			}
3788 			newrule->states_cur = counter_u64_alloc(M_WAITOK);
3789 			newrule->states_tot = counter_u64_alloc(M_WAITOK);
3790 			for (pf_sn_types_t sn_type=0; sn_type<PF_SN_MAX; sn_type++)
3791 				newrule->src_nodes[sn_type] = counter_u64_alloc(M_WAITOK);
3792 			newrule->cuid = td->td_ucred->cr_ruid;
3793 			newrule->cpid = td->td_proc ? td->td_proc->p_pid : 0;
3794 			TAILQ_INIT(&newrule->nat.list);
3795 			TAILQ_INIT(&newrule->rdr.list);
3796 			TAILQ_INIT(&newrule->route.list);
3797 		}
3798 #define	ERROUT(x)	ERROUT_IOCTL(DIOCCHANGERULE_error, x)
3799 
3800 		PF_CONFIG_LOCK();
3801 		PF_RULES_WLOCK();
3802 #ifdef PF_WANT_32_TO_64_COUNTER
3803 		if (newrule != NULL) {
3804 			LIST_INSERT_HEAD(&V_pf_allrulelist, newrule, allrulelist);
3805 			newrule->allrulelinked = true;
3806 			V_pf_allrulecount++;
3807 		}
3808 #endif
3809 
3810 		if (!(pcr->action == PF_CHANGE_REMOVE ||
3811 		    pcr->action == PF_CHANGE_GET_TICKET) &&
3812 		    pcr->pool_ticket != V_ticket_pabuf)
3813 			ERROUT(EBUSY);
3814 
3815 		ruleset = pf_find_kruleset(pcr->anchor);
3816 		if (ruleset == NULL)
3817 			ERROUT(EINVAL);
3818 
3819 		rs_num = pf_get_ruleset_number(pcr->rule.action);
3820 		if (rs_num >= PF_RULESET_MAX)
3821 			ERROUT(EINVAL);
3822 
3823 		/*
3824 		 * XXXMJG: there is no guarantee that the ruleset was
3825 		 * created by the usual route of calling DIOCXBEGIN.
3826 		 * As a result it is possible the rule tree will not
3827 		 * be allocated yet. Hack around it by doing it here.
3828 		 * Note it is fine to let the tree persist in case of
3829 		 * error as it will be freed down the road on future
3830 		 * updates (if need be).
3831 		 */
3832 		if (ruleset->rules[rs_num].active.tree == NULL) {
3833 			ruleset->rules[rs_num].active.tree = pf_rule_tree_alloc(M_NOWAIT);
3834 			if (ruleset->rules[rs_num].active.tree == NULL) {
3835 				ERROUT(ENOMEM);
3836 			}
3837 		}
3838 
3839 		if (pcr->action == PF_CHANGE_GET_TICKET) {
3840 			pcr->ticket = ++ruleset->rules[rs_num].active.ticket;
3841 			ERROUT(0);
3842 		} else if (pcr->ticket !=
3843 			    ruleset->rules[rs_num].active.ticket)
3844 				ERROUT(EINVAL);
3845 
3846 		if (pcr->action != PF_CHANGE_REMOVE) {
3847 			if (newrule->ifname[0]) {
3848 				newrule->kif = pfi_kkif_attach(kif,
3849 				    newrule->ifname);
3850 				kif = NULL;
3851 				pfi_kkif_ref(newrule->kif);
3852 			} else
3853 				newrule->kif = NULL;
3854 
3855 			if (newrule->rtableid > 0 &&
3856 			    newrule->rtableid >= rt_numfibs)
3857 				error = EBUSY;
3858 
3859 #ifdef ALTQ
3860 			/* set queue IDs */
3861 			if (newrule->qname[0] != 0) {
3862 				if ((newrule->qid =
3863 				    pf_qname2qid(newrule->qname)) == 0)
3864 					error = EBUSY;
3865 				else if (newrule->pqname[0] != 0) {
3866 					if ((newrule->pqid =
3867 					    pf_qname2qid(newrule->pqname)) == 0)
3868 						error = EBUSY;
3869 				} else
3870 					newrule->pqid = newrule->qid;
3871 			}
3872 #endif /* ALTQ */
3873 			if (newrule->tagname[0])
3874 				if ((newrule->tag =
3875 				    pf_tagname2tag(newrule->tagname)) == 0)
3876 					error = EBUSY;
3877 			if (newrule->match_tagname[0])
3878 				if ((newrule->match_tag = pf_tagname2tag(
3879 				    newrule->match_tagname)) == 0)
3880 					error = EBUSY;
3881 			if (newrule->rt && !newrule->direction)
3882 				error = EINVAL;
3883 			if (!newrule->log)
3884 				newrule->logif = 0;
3885 			if (pf_addr_setup(ruleset, &newrule->src.addr, newrule->af))
3886 				error = ENOMEM;
3887 			if (pf_addr_setup(ruleset, &newrule->dst.addr, newrule->af))
3888 				error = ENOMEM;
3889 			if (pf_kanchor_setup(newrule, ruleset, pcr->anchor_call))
3890 				error = EINVAL;
3891 			for (int i = 0; i < 3; i++) {
3892 				TAILQ_FOREACH(pa, &V_pf_pabuf[i], entries)
3893 					if (pa->addr.type == PF_ADDR_TABLE) {
3894 						pa->addr.p.tbl =
3895 						    pfr_attach_table(ruleset,
3896 						    pa->addr.v.tblname);
3897 						if (pa->addr.p.tbl == NULL)
3898 							error = ENOMEM;
3899 					}
3900 			}
3901 
3902 			newrule->overload_tbl = NULL;
3903 			if (newrule->overload_tblname[0]) {
3904 				if ((newrule->overload_tbl = pfr_attach_table(
3905 				    ruleset, newrule->overload_tblname)) ==
3906 				    NULL)
3907 					error = EINVAL;
3908 				else
3909 					newrule->overload_tbl->pfrkt_flags |=
3910 					    PFR_TFLAG_ACTIVE;
3911 			}
3912 
3913 			pf_mv_kpool(&V_pf_pabuf[0], &newrule->nat.list);
3914 			pf_mv_kpool(&V_pf_pabuf[1], &newrule->rdr.list);
3915 			pf_mv_kpool(&V_pf_pabuf[2], &newrule->route.list);
3916 			if (((((newrule->action == PF_NAT) ||
3917 			    (newrule->action == PF_RDR) ||
3918 			    (newrule->action == PF_BINAT) ||
3919 			    (newrule->rt > PF_NOPFROUTE)) &&
3920 			    !newrule->anchor)) &&
3921 			    (TAILQ_FIRST(&newrule->rdr.list) == NULL))
3922 				error = EINVAL;
3923 
3924 			if (error) {
3925 				pf_free_rule(newrule);
3926 				PF_RULES_WUNLOCK();
3927 				PF_CONFIG_UNLOCK();
3928 				goto fail;
3929 			}
3930 
3931 			newrule->nat.cur = TAILQ_FIRST(&newrule->nat.list);
3932 			newrule->rdr.cur = TAILQ_FIRST(&newrule->rdr.list);
3933 		}
3934 		pf_empty_kpool(&V_pf_pabuf[0]);
3935 		pf_empty_kpool(&V_pf_pabuf[1]);
3936 		pf_empty_kpool(&V_pf_pabuf[2]);
3937 
3938 		if (pcr->action == PF_CHANGE_ADD_HEAD)
3939 			oldrule = TAILQ_FIRST(
3940 			    ruleset->rules[rs_num].active.ptr);
3941 		else if (pcr->action == PF_CHANGE_ADD_TAIL)
3942 			oldrule = TAILQ_LAST(
3943 			    ruleset->rules[rs_num].active.ptr, pf_krulequeue);
3944 		else {
3945 			oldrule = TAILQ_FIRST(
3946 			    ruleset->rules[rs_num].active.ptr);
3947 			while ((oldrule != NULL) && (oldrule->nr != pcr->nr))
3948 				oldrule = TAILQ_NEXT(oldrule, entries);
3949 			if (oldrule == NULL) {
3950 				if (newrule != NULL)
3951 					pf_free_rule(newrule);
3952 				PF_RULES_WUNLOCK();
3953 				PF_CONFIG_UNLOCK();
3954 				error = EINVAL;
3955 				goto fail;
3956 			}
3957 		}
3958 
3959 		if (pcr->action == PF_CHANGE_REMOVE) {
3960 			pf_unlink_rule(ruleset->rules[rs_num].active.ptr,
3961 			    oldrule);
3962 			RB_REMOVE(pf_krule_global,
3963 			    ruleset->rules[rs_num].active.tree, oldrule);
3964 			ruleset->rules[rs_num].active.rcount--;
3965 		} else {
3966 			pf_hash_rule(newrule);
3967 			if (RB_INSERT(pf_krule_global,
3968 			    ruleset->rules[rs_num].active.tree, newrule) != NULL) {
3969 				pf_free_rule(newrule);
3970 				PF_RULES_WUNLOCK();
3971 				PF_CONFIG_UNLOCK();
3972 				error = EEXIST;
3973 				goto fail;
3974 			}
3975 
3976 			if (oldrule == NULL)
3977 				TAILQ_INSERT_TAIL(
3978 				    ruleset->rules[rs_num].active.ptr,
3979 				    newrule, entries);
3980 			else if (pcr->action == PF_CHANGE_ADD_HEAD ||
3981 			    pcr->action == PF_CHANGE_ADD_BEFORE)
3982 				TAILQ_INSERT_BEFORE(oldrule, newrule, entries);
3983 			else
3984 				TAILQ_INSERT_AFTER(
3985 				    ruleset->rules[rs_num].active.ptr,
3986 				    oldrule, newrule, entries);
3987 			ruleset->rules[rs_num].active.rcount++;
3988 		}
3989 
3990 		nr = 0;
3991 		TAILQ_FOREACH(oldrule,
3992 		    ruleset->rules[rs_num].active.ptr, entries)
3993 			oldrule->nr = nr++;
3994 
3995 		ruleset->rules[rs_num].active.ticket++;
3996 
3997 		pf_calc_skip_steps(ruleset->rules[rs_num].active.ptr);
3998 		pf_remove_if_empty_kruleset(ruleset);
3999 
4000 		PF_RULES_WUNLOCK();
4001 		PF_CONFIG_UNLOCK();
4002 		break;
4003 
4004 #undef ERROUT
4005 DIOCCHANGERULE_error:
4006 		PF_RULES_WUNLOCK();
4007 		PF_CONFIG_UNLOCK();
4008 		pf_krule_free(newrule);
4009 		pf_kkif_free(kif);
4010 		break;
4011 	}
4012 
4013 	case DIOCCLRSTATESNV: {
4014 		error = pf_clearstates_nv((struct pfioc_nv *)addr);
4015 		break;
4016 	}
4017 
4018 	case DIOCKILLSTATESNV: {
4019 		error = pf_killstates_nv((struct pfioc_nv *)addr);
4020 		break;
4021 	}
4022 
4023 	case DIOCADDSTATE: {
4024 		struct pfioc_state		*ps = (struct pfioc_state *)addr;
4025 		struct pfsync_state_1301	*sp = &ps->state;
4026 
4027 		if (sp->timeout >= PFTM_MAX) {
4028 			error = EINVAL;
4029 			goto fail;
4030 		}
4031 		if (V_pfsync_state_import_ptr != NULL) {
4032 			PF_RULES_RLOCK();
4033 			error = V_pfsync_state_import_ptr(
4034 			    (union pfsync_state_union *)sp, PFSYNC_SI_IOCTL,
4035 			    PFSYNC_MSG_VERSION_1301);
4036 			PF_RULES_RUNLOCK();
4037 		} else
4038 			error = EOPNOTSUPP;
4039 		break;
4040 	}
4041 
4042 	case DIOCGETSTATE: {
4043 		struct pfioc_state	*ps = (struct pfioc_state *)addr;
4044 		struct pf_kstate	*s;
4045 
4046 		s = pf_find_state_byid(ps->state.id, ps->state.creatorid);
4047 		if (s == NULL) {
4048 			error = ENOENT;
4049 			goto fail;
4050 		}
4051 
4052 		pfsync_state_export((union pfsync_state_union*)&ps->state,
4053 		    s, PFSYNC_MSG_VERSION_1301);
4054 		PF_STATE_UNLOCK(s);
4055 		break;
4056 	}
4057 
4058 	case DIOCGETSTATENV: {
4059 		error = pf_getstate((struct pfioc_nv *)addr);
4060 		break;
4061 	}
4062 
4063 #ifdef COMPAT_FREEBSD14
4064 	case DIOCGETSTATES: {
4065 		struct pfioc_states	*ps = (struct pfioc_states *)addr;
4066 		struct pf_kstate	*s;
4067 		struct pfsync_state_1301	*pstore, *p;
4068 		int			 i, nr;
4069 		size_t			 slice_count = 16, count;
4070 		void			*out;
4071 
4072 		if (ps->ps_len <= 0) {
4073 			nr = uma_zone_get_cur(V_pf_state_z);
4074 			ps->ps_len = sizeof(struct pfsync_state_1301) * nr;
4075 			break;
4076 		}
4077 
4078 		out = ps->ps_states;
4079 		pstore = mallocarray(slice_count,
4080 		    sizeof(struct pfsync_state_1301), M_TEMP, M_WAITOK | M_ZERO);
4081 		nr = 0;
4082 
4083 		for (i = 0; i <= V_pf_hashmask; i++) {
4084 			struct pf_idhash *ih = &V_pf_idhash[i];
4085 
4086 DIOCGETSTATES_retry:
4087 			p = pstore;
4088 
4089 			if (LIST_EMPTY(&ih->states))
4090 				continue;
4091 
4092 			PF_HASHROW_LOCK(ih);
4093 			count = 0;
4094 			LIST_FOREACH(s, &ih->states, entry) {
4095 				if (s->timeout == PFTM_UNLINKED)
4096 					continue;
4097 				count++;
4098 			}
4099 
4100 			if (count > slice_count) {
4101 				PF_HASHROW_UNLOCK(ih);
4102 				free(pstore, M_TEMP);
4103 				slice_count = count * 2;
4104 				pstore = mallocarray(slice_count,
4105 				    sizeof(struct pfsync_state_1301), M_TEMP,
4106 				    M_WAITOK | M_ZERO);
4107 				goto DIOCGETSTATES_retry;
4108 			}
4109 
4110 			if ((nr+count) * sizeof(*p) > ps->ps_len) {
4111 				PF_HASHROW_UNLOCK(ih);
4112 				goto DIOCGETSTATES_full;
4113 			}
4114 
4115 			LIST_FOREACH(s, &ih->states, entry) {
4116 				if (s->timeout == PFTM_UNLINKED)
4117 					continue;
4118 
4119 				pfsync_state_export((union pfsync_state_union*)p,
4120 				    s, PFSYNC_MSG_VERSION_1301);
4121 				p++;
4122 				nr++;
4123 			}
4124 			PF_HASHROW_UNLOCK(ih);
4125 			error = copyout(pstore, out,
4126 			    sizeof(struct pfsync_state_1301) * count);
4127 			if (error)
4128 				goto fail;
4129 			out = ps->ps_states + nr;
4130 		}
4131 DIOCGETSTATES_full:
4132 		ps->ps_len = sizeof(struct pfsync_state_1301) * nr;
4133 		free(pstore, M_TEMP);
4134 
4135 		break;
4136 	}
4137 
4138 	case DIOCGETSTATESV2: {
4139 		struct pfioc_states_v2	*ps = (struct pfioc_states_v2 *)addr;
4140 		struct pf_kstate	*s;
4141 		struct pf_state_export	*pstore, *p;
4142 		int i, nr;
4143 		size_t slice_count = 16, count;
4144 		void *out;
4145 
4146 		if (ps->ps_req_version > PF_STATE_VERSION) {
4147 			error = ENOTSUP;
4148 			goto fail;
4149 		}
4150 
4151 		if (ps->ps_len <= 0) {
4152 			nr = uma_zone_get_cur(V_pf_state_z);
4153 			ps->ps_len = sizeof(struct pf_state_export) * nr;
4154 			break;
4155 		}
4156 
4157 		out = ps->ps_states;
4158 		pstore = mallocarray(slice_count,
4159 		    sizeof(struct pf_state_export), M_TEMP, M_WAITOK | M_ZERO);
4160 		nr = 0;
4161 
4162 		for (i = 0; i <= V_pf_hashmask; i++) {
4163 			struct pf_idhash *ih = &V_pf_idhash[i];
4164 
4165 DIOCGETSTATESV2_retry:
4166 			p = pstore;
4167 
4168 			if (LIST_EMPTY(&ih->states))
4169 				continue;
4170 
4171 			PF_HASHROW_LOCK(ih);
4172 			count = 0;
4173 			LIST_FOREACH(s, &ih->states, entry) {
4174 				if (s->timeout == PFTM_UNLINKED)
4175 					continue;
4176 				count++;
4177 			}
4178 
4179 			if (count > slice_count) {
4180 				PF_HASHROW_UNLOCK(ih);
4181 				free(pstore, M_TEMP);
4182 				slice_count = count * 2;
4183 				pstore = mallocarray(slice_count,
4184 				    sizeof(struct pf_state_export), M_TEMP,
4185 				    M_WAITOK | M_ZERO);
4186 				goto DIOCGETSTATESV2_retry;
4187 			}
4188 
4189 			if ((nr+count) * sizeof(*p) > ps->ps_len) {
4190 				PF_HASHROW_UNLOCK(ih);
4191 				goto DIOCGETSTATESV2_full;
4192 			}
4193 
4194 			LIST_FOREACH(s, &ih->states, entry) {
4195 				if (s->timeout == PFTM_UNLINKED)
4196 					continue;
4197 
4198 				pf_state_export(p, s);
4199 				p++;
4200 				nr++;
4201 			}
4202 			PF_HASHROW_UNLOCK(ih);
4203 			error = copyout(pstore, out,
4204 			    sizeof(struct pf_state_export) * count);
4205 			if (error)
4206 				goto fail;
4207 			out = ps->ps_states + nr;
4208 		}
4209 DIOCGETSTATESV2_full:
4210 		ps->ps_len = nr * sizeof(struct pf_state_export);
4211 		free(pstore, M_TEMP);
4212 
4213 		break;
4214 	}
4215 #endif
4216 	case DIOCGETSTATUSNV: {
4217 		error = pf_getstatus((struct pfioc_nv *)addr);
4218 		break;
4219 	}
4220 
4221 	case DIOCSETSTATUSIF: {
4222 		struct pfioc_if	*pi = (struct pfioc_if *)addr;
4223 
4224 		if (pi->ifname[0] == 0) {
4225 			bzero(V_pf_status.ifname, IFNAMSIZ);
4226 			break;
4227 		}
4228 		PF_RULES_WLOCK();
4229 		error = pf_user_strcpy(V_pf_status.ifname, pi->ifname, IFNAMSIZ);
4230 		PF_RULES_WUNLOCK();
4231 		break;
4232 	}
4233 
4234 	case DIOCCLRSTATUS: {
4235 		pf_ioctl_clear_status();
4236 		break;
4237 	}
4238 
4239 	case DIOCNATLOOK: {
4240 		struct pfioc_natlook	*pnl = (struct pfioc_natlook *)addr;
4241 
4242 		error = pf_ioctl_natlook(pnl);
4243 		break;
4244 	}
4245 
4246 	case DIOCSETTIMEOUT: {
4247 		struct pfioc_tm	*pt = (struct pfioc_tm *)addr;
4248 
4249 		error = pf_ioctl_set_timeout(pt->timeout, pt->seconds,
4250 		    &pt->seconds);
4251 		break;
4252 	}
4253 
4254 	case DIOCGETTIMEOUT: {
4255 		struct pfioc_tm	*pt = (struct pfioc_tm *)addr;
4256 
4257 		error = pf_ioctl_get_timeout(pt->timeout, &pt->seconds);
4258 		break;
4259 	}
4260 
4261 	case DIOCGETLIMIT: {
4262 		struct pfioc_limit	*pl = (struct pfioc_limit *)addr;
4263 
4264 		error = pf_ioctl_get_limit(pl->index, &pl->limit);
4265 		break;
4266 	}
4267 
4268 	case DIOCSETLIMIT: {
4269 		struct pfioc_limit	*pl = (struct pfioc_limit *)addr;
4270 		unsigned int old_limit;
4271 
4272 		error = pf_ioctl_set_limit(pl->index, pl->limit, &old_limit);
4273 		pl->limit = old_limit;
4274 		break;
4275 	}
4276 
4277 	case DIOCSETDEBUG: {
4278 		u_int32_t	*level = (u_int32_t *)addr;
4279 
4280 		PF_RULES_WLOCK();
4281 		V_pf_status.debug = *level;
4282 		PF_RULES_WUNLOCK();
4283 		break;
4284 	}
4285 
4286 	case DIOCCLRRULECTRS: {
4287 		/* obsoleted by DIOCGETRULE with action=PF_GET_CLR_CNTR */
4288 		struct pf_kruleset	*ruleset = &pf_main_ruleset;
4289 		struct pf_krule		*rule;
4290 
4291 		PF_RULES_WLOCK();
4292 		TAILQ_FOREACH(rule,
4293 		    ruleset->rules[PF_RULESET_FILTER].active.ptr, entries) {
4294 			pf_counter_u64_zero(&rule->evaluations);
4295 			for (int i = 0; i < 2; i++) {
4296 				pf_counter_u64_zero(&rule->packets[i]);
4297 				pf_counter_u64_zero(&rule->bytes[i]);
4298 			}
4299 		}
4300 		PF_RULES_WUNLOCK();
4301 		break;
4302 	}
4303 
4304 	case DIOCGIFSPEEDV0:
4305 	case DIOCGIFSPEEDV1: {
4306 		struct pf_ifspeed_v1	*psp = (struct pf_ifspeed_v1 *)addr;
4307 		struct pf_ifspeed_v1	ps;
4308 		struct ifnet		*ifp;
4309 
4310 		if (psp->ifname[0] == '\0') {
4311 			error = EINVAL;
4312 			goto fail;
4313 		}
4314 
4315 		error = pf_user_strcpy(ps.ifname, psp->ifname, IFNAMSIZ);
4316 		if (error != 0)
4317 			goto fail;
4318 		ifp = ifunit(ps.ifname);
4319 		if (ifp != NULL) {
4320 			psp->baudrate32 =
4321 			    (u_int32_t)uqmin(ifp->if_baudrate, UINT_MAX);
4322 			if (cmd == DIOCGIFSPEEDV1)
4323 				psp->baudrate = ifp->if_baudrate;
4324 		} else {
4325 			error = EINVAL;
4326 		}
4327 		break;
4328 	}
4329 
4330 #ifdef ALTQ
4331 	case DIOCSTARTALTQ: {
4332 		struct pf_altq		*altq;
4333 
4334 		PF_RULES_WLOCK();
4335 		/* enable all altq interfaces on active list */
4336 		TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) {
4337 			if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
4338 				error = pf_enable_altq(altq);
4339 				if (error != 0)
4340 					break;
4341 			}
4342 		}
4343 		if (error == 0)
4344 			V_pf_altq_running = 1;
4345 		PF_RULES_WUNLOCK();
4346 		DPFPRINTF(PF_DEBUG_MISC, "altq: started");
4347 		break;
4348 	}
4349 
4350 	case DIOCSTOPALTQ: {
4351 		struct pf_altq		*altq;
4352 
4353 		PF_RULES_WLOCK();
4354 		/* disable all altq interfaces on active list */
4355 		TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) {
4356 			if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
4357 				error = pf_disable_altq(altq);
4358 				if (error != 0)
4359 					break;
4360 			}
4361 		}
4362 		if (error == 0)
4363 			V_pf_altq_running = 0;
4364 		PF_RULES_WUNLOCK();
4365 		DPFPRINTF(PF_DEBUG_MISC, "altq: stopped");
4366 		break;
4367 	}
4368 
4369 	case DIOCADDALTQV0:
4370 	case DIOCADDALTQV1: {
4371 		struct pfioc_altq_v1	*pa = (struct pfioc_altq_v1 *)addr;
4372 		struct pf_altq		*altq, *a;
4373 		struct ifnet		*ifp;
4374 
4375 		altq = malloc(sizeof(*altq), M_PFALTQ, M_WAITOK | M_ZERO);
4376 		error = pf_import_kaltq(pa, altq, IOCPARM_LEN(cmd));
4377 		if (error)
4378 			goto fail;
4379 		altq->local_flags = 0;
4380 
4381 		PF_RULES_WLOCK();
4382 		if (pa->ticket != V_ticket_altqs_inactive) {
4383 			PF_RULES_WUNLOCK();
4384 			free(altq, M_PFALTQ);
4385 			error = EBUSY;
4386 			goto fail;
4387 		}
4388 
4389 		/*
4390 		 * if this is for a queue, find the discipline and
4391 		 * copy the necessary fields
4392 		 */
4393 		if (altq->qname[0] != 0) {
4394 			if ((altq->qid = pf_qname2qid(altq->qname)) == 0) {
4395 				PF_RULES_WUNLOCK();
4396 				error = EBUSY;
4397 				free(altq, M_PFALTQ);
4398 				goto fail;
4399 			}
4400 			altq->altq_disc = NULL;
4401 			TAILQ_FOREACH(a, V_pf_altq_ifs_inactive, entries) {
4402 				if (strncmp(a->ifname, altq->ifname,
4403 				    IFNAMSIZ) == 0) {
4404 					altq->altq_disc = a->altq_disc;
4405 					break;
4406 				}
4407 			}
4408 		}
4409 
4410 		if ((ifp = ifunit(altq->ifname)) == NULL)
4411 			altq->local_flags |= PFALTQ_FLAG_IF_REMOVED;
4412 		else
4413 			error = altq_add(ifp, altq);
4414 
4415 		if (error) {
4416 			PF_RULES_WUNLOCK();
4417 			free(altq, M_PFALTQ);
4418 			goto fail;
4419 		}
4420 
4421 		if (altq->qname[0] != 0)
4422 			TAILQ_INSERT_TAIL(V_pf_altqs_inactive, altq, entries);
4423 		else
4424 			TAILQ_INSERT_TAIL(V_pf_altq_ifs_inactive, altq, entries);
4425 		/* version error check done on import above */
4426 		pf_export_kaltq(altq, pa, IOCPARM_LEN(cmd));
4427 		PF_RULES_WUNLOCK();
4428 		break;
4429 	}
4430 
4431 	case DIOCGETALTQSV0:
4432 	case DIOCGETALTQSV1: {
4433 		struct pfioc_altq_v1	*pa = (struct pfioc_altq_v1 *)addr;
4434 		struct pf_altq		*altq;
4435 
4436 		PF_RULES_RLOCK();
4437 		pa->nr = 0;
4438 		TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries)
4439 			pa->nr++;
4440 		TAILQ_FOREACH(altq, V_pf_altqs_active, entries)
4441 			pa->nr++;
4442 		pa->ticket = V_ticket_altqs_active;
4443 		PF_RULES_RUNLOCK();
4444 		break;
4445 	}
4446 
4447 	case DIOCGETALTQV0:
4448 	case DIOCGETALTQV1: {
4449 		struct pfioc_altq_v1	*pa = (struct pfioc_altq_v1 *)addr;
4450 		struct pf_altq		*altq;
4451 
4452 		PF_RULES_RLOCK();
4453 		if (pa->ticket != V_ticket_altqs_active) {
4454 			PF_RULES_RUNLOCK();
4455 			error = EBUSY;
4456 			goto fail;
4457 		}
4458 		altq = pf_altq_get_nth_active(pa->nr);
4459 		if (altq == NULL) {
4460 			PF_RULES_RUNLOCK();
4461 			error = EBUSY;
4462 			goto fail;
4463 		}
4464 		pf_export_kaltq(altq, pa, IOCPARM_LEN(cmd));
4465 		PF_RULES_RUNLOCK();
4466 		break;
4467 	}
4468 
4469 	case DIOCCHANGEALTQV0:
4470 	case DIOCCHANGEALTQV1:
4471 		/* CHANGEALTQ not supported yet! */
4472 		error = ENODEV;
4473 		break;
4474 
4475 	case DIOCGETQSTATSV0:
4476 	case DIOCGETQSTATSV1: {
4477 		struct pfioc_qstats_v1	*pq = (struct pfioc_qstats_v1 *)addr;
4478 		struct pf_altq		*altq;
4479 		int			 nbytes;
4480 		u_int32_t		 version;
4481 
4482 		PF_RULES_RLOCK();
4483 		if (pq->ticket != V_ticket_altqs_active) {
4484 			PF_RULES_RUNLOCK();
4485 			error = EBUSY;
4486 			goto fail;
4487 		}
4488 		nbytes = pq->nbytes;
4489 		altq = pf_altq_get_nth_active(pq->nr);
4490 		if (altq == NULL) {
4491 			PF_RULES_RUNLOCK();
4492 			error = EBUSY;
4493 			goto fail;
4494 		}
4495 
4496 		if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) != 0) {
4497 			PF_RULES_RUNLOCK();
4498 			error = ENXIO;
4499 			goto fail;
4500 		}
4501 		PF_RULES_RUNLOCK();
4502 		if (cmd == DIOCGETQSTATSV0)
4503 			version = 0;  /* DIOCGETQSTATSV0 means stats struct v0 */
4504 		else
4505 			version = pq->version;
4506 		error = altq_getqstats(altq, pq->buf, &nbytes, version);
4507 		if (error == 0) {
4508 			pq->scheduler = altq->scheduler;
4509 			pq->nbytes = nbytes;
4510 		}
4511 		break;
4512 	}
4513 #endif /* ALTQ */
4514 
4515 	case DIOCBEGINADDRS: {
4516 		struct pfioc_pooladdr	*pp = (struct pfioc_pooladdr *)addr;
4517 
4518 		error = pf_ioctl_begin_addrs(&pp->ticket);
4519 		break;
4520 	}
4521 
4522 	case DIOCADDADDR: {
4523 		struct pfioc_pooladdr	*pp = (struct pfioc_pooladdr *)addr;
4524 		struct pf_nl_pooladdr npp = {};
4525 
4526 		npp.which = PF_RDR;
4527 		memcpy(&npp, pp, sizeof(*pp));
4528 		error = pf_ioctl_add_addr(&npp);
4529 		break;
4530 	}
4531 
4532 	case DIOCGETADDRS: {
4533 		struct pfioc_pooladdr	*pp = (struct pfioc_pooladdr *)addr;
4534 		struct pf_nl_pooladdr npp = {};
4535 
4536 		npp.which = PF_RDR;
4537 		memcpy(&npp, pp, sizeof(*pp));
4538 		error = pf_ioctl_get_addrs(&npp);
4539 		memcpy(pp, &npp, sizeof(*pp));
4540 
4541 		break;
4542 	}
4543 
4544 	case DIOCGETADDR: {
4545 		struct pfioc_pooladdr	*pp = (struct pfioc_pooladdr *)addr;
4546 		struct pf_nl_pooladdr npp = {};
4547 
4548 		npp.which = PF_RDR;
4549 		memcpy(&npp, pp, sizeof(*pp));
4550 		error = pf_ioctl_get_addr(&npp);
4551 		memcpy(pp, &npp, sizeof(*pp));
4552 
4553 		break;
4554 	}
4555 
4556 	case DIOCCHANGEADDR: {
4557 		struct pfioc_pooladdr	*pca = (struct pfioc_pooladdr *)addr;
4558 		struct pf_kpool		*pool;
4559 		struct pf_kpooladdr	*oldpa = NULL, *newpa = NULL;
4560 		struct pf_kruleset	*ruleset;
4561 		struct pfi_kkif		*kif = NULL;
4562 
4563 		pca->anchor[sizeof(pca->anchor) - 1] = '\0';
4564 
4565 		if (pca->action < PF_CHANGE_ADD_HEAD ||
4566 		    pca->action > PF_CHANGE_REMOVE) {
4567 			error = EINVAL;
4568 			goto fail;
4569 		}
4570 		if (pca->addr.addr.type != PF_ADDR_ADDRMASK &&
4571 		    pca->addr.addr.type != PF_ADDR_DYNIFTL &&
4572 		    pca->addr.addr.type != PF_ADDR_TABLE) {
4573 			error = EINVAL;
4574 			goto fail;
4575 		}
4576 		if (pca->addr.addr.p.dyn != NULL) {
4577 			error = EINVAL;
4578 			goto fail;
4579 		}
4580 
4581 		if (pca->action != PF_CHANGE_REMOVE) {
4582 #ifndef INET
4583 			if (pca->af == AF_INET) {
4584 				error = EAFNOSUPPORT;
4585 				goto fail;
4586 			}
4587 #endif /* INET */
4588 #ifndef INET6
4589 			if (pca->af == AF_INET6) {
4590 				error = EAFNOSUPPORT;
4591 				goto fail;
4592 			}
4593 #endif /* INET6 */
4594 			newpa = malloc(sizeof(*newpa), M_PFRULE, M_WAITOK);
4595 			bcopy(&pca->addr, newpa, sizeof(struct pf_pooladdr));
4596 			if (newpa->ifname[0])
4597 				kif = pf_kkif_create(M_WAITOK);
4598 			newpa->kif = NULL;
4599 		}
4600 #define	ERROUT(x)	ERROUT_IOCTL(DIOCCHANGEADDR_error, x)
4601 		PF_RULES_WLOCK();
4602 		ruleset = pf_find_kruleset(pca->anchor);
4603 		if (ruleset == NULL)
4604 			ERROUT(EBUSY);
4605 
4606 		pool = pf_get_kpool(pca->anchor, pca->ticket, pca->r_action,
4607 		    pca->r_num, pca->r_last, 1, 1, PF_RDR);
4608 		if (pool == NULL)
4609 			ERROUT(EBUSY);
4610 
4611 		if (pca->action != PF_CHANGE_REMOVE) {
4612 			if (newpa->ifname[0]) {
4613 				newpa->kif = pfi_kkif_attach(kif, newpa->ifname);
4614 				pfi_kkif_ref(newpa->kif);
4615 				kif = NULL;
4616 			}
4617 
4618 			switch (newpa->addr.type) {
4619 			case PF_ADDR_DYNIFTL:
4620 				error = pfi_dynaddr_setup(&newpa->addr,
4621 				    pca->af);
4622 				break;
4623 			case PF_ADDR_TABLE:
4624 				newpa->addr.p.tbl = pfr_attach_table(ruleset,
4625 				    newpa->addr.v.tblname);
4626 				if (newpa->addr.p.tbl == NULL)
4627 					error = ENOMEM;
4628 				break;
4629 			}
4630 			if (error)
4631 				goto DIOCCHANGEADDR_error;
4632 		}
4633 
4634 		switch (pca->action) {
4635 		case PF_CHANGE_ADD_HEAD:
4636 			oldpa = TAILQ_FIRST(&pool->list);
4637 			break;
4638 		case PF_CHANGE_ADD_TAIL:
4639 			oldpa = TAILQ_LAST(&pool->list, pf_kpalist);
4640 			break;
4641 		default:
4642 			oldpa = TAILQ_FIRST(&pool->list);
4643 			for (int i = 0; oldpa && i < pca->nr; i++)
4644 				oldpa = TAILQ_NEXT(oldpa, entries);
4645 
4646 			if (oldpa == NULL)
4647 				ERROUT(EINVAL);
4648 		}
4649 
4650 		if (pca->action == PF_CHANGE_REMOVE) {
4651 			TAILQ_REMOVE(&pool->list, oldpa, entries);
4652 			switch (oldpa->addr.type) {
4653 			case PF_ADDR_DYNIFTL:
4654 				pfi_dynaddr_remove(oldpa->addr.p.dyn);
4655 				break;
4656 			case PF_ADDR_TABLE:
4657 				pfr_detach_table(oldpa->addr.p.tbl);
4658 				break;
4659 			}
4660 			if (oldpa->kif)
4661 				pfi_kkif_unref(oldpa->kif);
4662 			free(oldpa, M_PFRULE);
4663 		} else {
4664 			if (oldpa == NULL)
4665 				TAILQ_INSERT_TAIL(&pool->list, newpa, entries);
4666 			else if (pca->action == PF_CHANGE_ADD_HEAD ||
4667 			    pca->action == PF_CHANGE_ADD_BEFORE)
4668 				TAILQ_INSERT_BEFORE(oldpa, newpa, entries);
4669 			else
4670 				TAILQ_INSERT_AFTER(&pool->list, oldpa,
4671 				    newpa, entries);
4672 		}
4673 
4674 		pool->cur = TAILQ_FIRST(&pool->list);
4675 		pf_addrcpy(&pool->counter, &pool->cur->addr.v.a.addr, pca->af);
4676 		PF_RULES_WUNLOCK();
4677 		break;
4678 
4679 #undef ERROUT
4680 DIOCCHANGEADDR_error:
4681 		if (newpa != NULL) {
4682 			if (newpa->kif)
4683 				pfi_kkif_unref(newpa->kif);
4684 			free(newpa, M_PFRULE);
4685 		}
4686 		PF_RULES_WUNLOCK();
4687 		pf_kkif_free(kif);
4688 		break;
4689 	}
4690 
4691 	case DIOCGETRULESETS: {
4692 		struct pfioc_ruleset	*pr = (struct pfioc_ruleset *)addr;
4693 
4694 		pr->path[sizeof(pr->path) - 1] = '\0';
4695 
4696 		error = pf_ioctl_get_rulesets(pr);
4697 		break;
4698 	}
4699 
4700 	case DIOCGETRULESET: {
4701 		struct pfioc_ruleset	*pr = (struct pfioc_ruleset *)addr;
4702 
4703 		pr->path[sizeof(pr->path) - 1] = '\0';
4704 
4705 		error = pf_ioctl_get_ruleset(pr);
4706 		break;
4707 	}
4708 
4709 	case DIOCRCLRTABLES: {
4710 		struct pfioc_table *io = (struct pfioc_table *)addr;
4711 
4712 		if (io->pfrio_esize != 0) {
4713 			error = ENODEV;
4714 			goto fail;
4715 		}
4716 		PF_RULES_WLOCK();
4717 		error = pfr_clr_tables(&io->pfrio_table, &io->pfrio_ndel,
4718 		    io->pfrio_flags | PFR_FLAG_USERIOCTL);
4719 		PF_RULES_WUNLOCK();
4720 		break;
4721 	}
4722 
4723 	case DIOCRADDTABLES: {
4724 		struct pfioc_table *io = (struct pfioc_table *)addr;
4725 		struct pfr_table *pfrts;
4726 		size_t totlen;
4727 
4728 		if (io->pfrio_esize != sizeof(struct pfr_table)) {
4729 			error = ENODEV;
4730 			goto fail;
4731 		}
4732 
4733 		if (io->pfrio_size < 0 || io->pfrio_size > pf_ioctl_maxcount ||
4734 		    WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_table))) {
4735 			error = ENOMEM;
4736 			goto fail;
4737 		}
4738 
4739 		totlen = io->pfrio_size * sizeof(struct pfr_table);
4740 		pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table),
4741 		    M_TEMP, M_WAITOK);
4742 		error = copyin(io->pfrio_buffer, pfrts, totlen);
4743 		if (error) {
4744 			free(pfrts, M_TEMP);
4745 			goto fail;
4746 		}
4747 		PF_RULES_WLOCK();
4748 		error = pfr_add_tables(pfrts, io->pfrio_size,
4749 		    &io->pfrio_nadd, io->pfrio_flags | PFR_FLAG_USERIOCTL);
4750 		PF_RULES_WUNLOCK();
4751 		free(pfrts, M_TEMP);
4752 		break;
4753 	}
4754 
4755 	case DIOCRDELTABLES: {
4756 		struct pfioc_table *io = (struct pfioc_table *)addr;
4757 		struct pfr_table *pfrts;
4758 		size_t totlen;
4759 
4760 		if (io->pfrio_esize != sizeof(struct pfr_table)) {
4761 			error = ENODEV;
4762 			goto fail;
4763 		}
4764 
4765 		if (io->pfrio_size < 0 || io->pfrio_size > pf_ioctl_maxcount ||
4766 		    WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_table))) {
4767 			error = ENOMEM;
4768 			goto fail;
4769 		}
4770 
4771 		totlen = io->pfrio_size * sizeof(struct pfr_table);
4772 		pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table),
4773 		    M_TEMP, M_WAITOK);
4774 		error = copyin(io->pfrio_buffer, pfrts, totlen);
4775 		if (error) {
4776 			free(pfrts, M_TEMP);
4777 			goto fail;
4778 		}
4779 		PF_RULES_WLOCK();
4780 		error = pfr_del_tables(pfrts, io->pfrio_size,
4781 		    &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL);
4782 		PF_RULES_WUNLOCK();
4783 		free(pfrts, M_TEMP);
4784 		break;
4785 	}
4786 
4787 	case DIOCRGETTABLES: {
4788 		struct pfioc_table *io = (struct pfioc_table *)addr;
4789 		struct pfr_table *pfrts;
4790 		size_t totlen;
4791 		int n;
4792 
4793 		if (io->pfrio_esize != sizeof(struct pfr_table)) {
4794 			error = ENODEV;
4795 			goto fail;
4796 		}
4797 		PF_RULES_RLOCK();
4798 		n = pfr_table_count(&io->pfrio_table, io->pfrio_flags);
4799 		if (n < 0) {
4800 			PF_RULES_RUNLOCK();
4801 			error = EINVAL;
4802 			goto fail;
4803 		}
4804 		io->pfrio_size = min(io->pfrio_size, n);
4805 
4806 		totlen = io->pfrio_size * sizeof(struct pfr_table);
4807 
4808 		pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table),
4809 		    M_TEMP, M_NOWAIT | M_ZERO);
4810 		if (pfrts == NULL) {
4811 			error = ENOMEM;
4812 			PF_RULES_RUNLOCK();
4813 			goto fail;
4814 		}
4815 		error = pfr_get_tables(&io->pfrio_table, pfrts,
4816 		    &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
4817 		PF_RULES_RUNLOCK();
4818 		if (error == 0)
4819 			error = copyout(pfrts, io->pfrio_buffer, totlen);
4820 		free(pfrts, M_TEMP);
4821 		break;
4822 	}
4823 
4824 	case DIOCRGETTSTATS: {
4825 		struct pfioc_table *io = (struct pfioc_table *)addr;
4826 		struct pfr_tstats *pfrtstats;
4827 		size_t totlen;
4828 		int n;
4829 
4830 		if (io->pfrio_esize != sizeof(struct pfr_tstats)) {
4831 			error = ENODEV;
4832 			goto fail;
4833 		}
4834 		PF_TABLE_STATS_LOCK();
4835 		PF_RULES_RLOCK();
4836 		n = pfr_table_count(&io->pfrio_table, io->pfrio_flags);
4837 		if (n < 0) {
4838 			PF_RULES_RUNLOCK();
4839 			PF_TABLE_STATS_UNLOCK();
4840 			error = EINVAL;
4841 			goto fail;
4842 		}
4843 		io->pfrio_size = min(io->pfrio_size, n);
4844 
4845 		totlen = io->pfrio_size * sizeof(struct pfr_tstats);
4846 		pfrtstats = mallocarray(io->pfrio_size,
4847 		    sizeof(struct pfr_tstats), M_TEMP, M_NOWAIT | M_ZERO);
4848 		if (pfrtstats == NULL) {
4849 			error = ENOMEM;
4850 			PF_RULES_RUNLOCK();
4851 			PF_TABLE_STATS_UNLOCK();
4852 			goto fail;
4853 		}
4854 		error = pfr_get_tstats(&io->pfrio_table, pfrtstats,
4855 		    &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
4856 		PF_RULES_RUNLOCK();
4857 		PF_TABLE_STATS_UNLOCK();
4858 		if (error == 0)
4859 			error = copyout(pfrtstats, io->pfrio_buffer, totlen);
4860 		free(pfrtstats, M_TEMP);
4861 		break;
4862 	}
4863 
4864 	case DIOCRCLRTSTATS: {
4865 		struct pfioc_table *io = (struct pfioc_table *)addr;
4866 		struct pfr_table *pfrts;
4867 		size_t totlen;
4868 
4869 		if (io->pfrio_esize != sizeof(struct pfr_table)) {
4870 			error = ENODEV;
4871 			goto fail;
4872 		}
4873 
4874 		if (io->pfrio_size < 0 || io->pfrio_size > pf_ioctl_maxcount ||
4875 		    WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_table))) {
4876 			/* We used to count tables and use the minimum required
4877 			 * size, so we didn't fail on overly large requests.
4878 			 * Keep doing so. */
4879 			io->pfrio_size = pf_ioctl_maxcount;
4880 			goto fail;
4881 		}
4882 
4883 		totlen = io->pfrio_size * sizeof(struct pfr_table);
4884 		pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table),
4885 		    M_TEMP, M_WAITOK);
4886 		error = copyin(io->pfrio_buffer, pfrts, totlen);
4887 		if (error) {
4888 			free(pfrts, M_TEMP);
4889 			goto fail;
4890 		}
4891 
4892 		PF_TABLE_STATS_LOCK();
4893 		PF_RULES_RLOCK();
4894 		error = pfr_clr_tstats(pfrts, io->pfrio_size,
4895 		    &io->pfrio_nzero, io->pfrio_flags | PFR_FLAG_USERIOCTL);
4896 		PF_RULES_RUNLOCK();
4897 		PF_TABLE_STATS_UNLOCK();
4898 		free(pfrts, M_TEMP);
4899 		break;
4900 	}
4901 
4902 	case DIOCRSETTFLAGS: {
4903 		struct pfioc_table *io = (struct pfioc_table *)addr;
4904 		struct pfr_table *pfrts;
4905 		size_t totlen;
4906 		int n;
4907 
4908 		if (io->pfrio_esize != sizeof(struct pfr_table)) {
4909 			error = ENODEV;
4910 			goto fail;
4911 		}
4912 
4913 		PF_RULES_RLOCK();
4914 		n = pfr_table_count(&io->pfrio_table, io->pfrio_flags);
4915 		if (n < 0) {
4916 			PF_RULES_RUNLOCK();
4917 			error = EINVAL;
4918 			goto fail;
4919 		}
4920 
4921 		io->pfrio_size = min(io->pfrio_size, n);
4922 		PF_RULES_RUNLOCK();
4923 
4924 		totlen = io->pfrio_size * sizeof(struct pfr_table);
4925 		pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table),
4926 		    M_TEMP, M_WAITOK);
4927 		error = copyin(io->pfrio_buffer, pfrts, totlen);
4928 		if (error) {
4929 			free(pfrts, M_TEMP);
4930 			goto fail;
4931 		}
4932 		PF_RULES_WLOCK();
4933 		error = pfr_set_tflags(pfrts, io->pfrio_size,
4934 		    io->pfrio_setflag, io->pfrio_clrflag, &io->pfrio_nchange,
4935 		    &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL);
4936 		PF_RULES_WUNLOCK();
4937 		free(pfrts, M_TEMP);
4938 		break;
4939 	}
4940 
4941 	case DIOCRCLRADDRS: {
4942 		struct pfioc_table *io = (struct pfioc_table *)addr;
4943 
4944 		if (io->pfrio_esize != 0) {
4945 			error = ENODEV;
4946 			goto fail;
4947 		}
4948 		PF_RULES_WLOCK();
4949 		error = pfr_clr_addrs(&io->pfrio_table, &io->pfrio_ndel,
4950 		    io->pfrio_flags | PFR_FLAG_USERIOCTL);
4951 		PF_RULES_WUNLOCK();
4952 		break;
4953 	}
4954 
4955 	case DIOCRADDADDRS: {
4956 		struct pfioc_table *io = (struct pfioc_table *)addr;
4957 		struct pfr_addr *pfras;
4958 		size_t totlen;
4959 
4960 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
4961 			error = ENODEV;
4962 			goto fail;
4963 		}
4964 		if (io->pfrio_size < 0 ||
4965 		    io->pfrio_size > pf_ioctl_maxcount ||
4966 		    WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
4967 			error = EINVAL;
4968 			goto fail;
4969 		}
4970 		totlen = io->pfrio_size * sizeof(struct pfr_addr);
4971 		pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
4972 		    M_TEMP, M_WAITOK);
4973 		error = copyin(io->pfrio_buffer, pfras, totlen);
4974 		if (error) {
4975 			free(pfras, M_TEMP);
4976 			goto fail;
4977 		}
4978 		PF_RULES_WLOCK();
4979 		io->pfrio_nadd = 0;
4980 		error = pfr_add_addrs(&io->pfrio_table, pfras,
4981 		    io->pfrio_size, &io->pfrio_nadd, io->pfrio_flags |
4982 		    PFR_FLAG_USERIOCTL);
4983 		PF_RULES_WUNLOCK();
4984 		if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK)
4985 			error = copyout(pfras, io->pfrio_buffer, totlen);
4986 		free(pfras, M_TEMP);
4987 		break;
4988 	}
4989 
4990 	case DIOCRDELADDRS: {
4991 		struct pfioc_table *io = (struct pfioc_table *)addr;
4992 		struct pfr_addr *pfras;
4993 		size_t totlen;
4994 
4995 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
4996 			error = ENODEV;
4997 			goto fail;
4998 		}
4999 		if (io->pfrio_size < 0 ||
5000 		    io->pfrio_size > pf_ioctl_maxcount ||
5001 		    WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
5002 			error = EINVAL;
5003 			goto fail;
5004 		}
5005 		totlen = io->pfrio_size * sizeof(struct pfr_addr);
5006 		pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
5007 		    M_TEMP, M_WAITOK);
5008 		error = copyin(io->pfrio_buffer, pfras, totlen);
5009 		if (error) {
5010 			free(pfras, M_TEMP);
5011 			goto fail;
5012 		}
5013 		PF_RULES_WLOCK();
5014 		error = pfr_del_addrs(&io->pfrio_table, pfras,
5015 		    io->pfrio_size, &io->pfrio_ndel, io->pfrio_flags |
5016 		    PFR_FLAG_USERIOCTL);
5017 		PF_RULES_WUNLOCK();
5018 		if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK)
5019 			error = copyout(pfras, io->pfrio_buffer, totlen);
5020 		free(pfras, M_TEMP);
5021 		break;
5022 	}
5023 
5024 	case DIOCRSETADDRS: {
5025 		struct pfioc_table *io = (struct pfioc_table *)addr;
5026 		struct pfr_addr *pfras;
5027 		size_t totlen, count;
5028 
5029 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
5030 			error = ENODEV;
5031 			goto fail;
5032 		}
5033 		if (io->pfrio_size < 0 || io->pfrio_size2 < 0) {
5034 			error = EINVAL;
5035 			goto fail;
5036 		}
5037 		count = max(io->pfrio_size, io->pfrio_size2);
5038 		if (count > pf_ioctl_maxcount ||
5039 		    WOULD_OVERFLOW(count, sizeof(struct pfr_addr))) {
5040 			error = EINVAL;
5041 			goto fail;
5042 		}
5043 		totlen = count * sizeof(struct pfr_addr);
5044 		pfras = mallocarray(count, sizeof(struct pfr_addr), M_TEMP,
5045 		    M_WAITOK);
5046 		error = copyin(io->pfrio_buffer, pfras, totlen);
5047 		if (error) {
5048 			free(pfras, M_TEMP);
5049 			goto fail;
5050 		}
5051 		PF_RULES_WLOCK();
5052 		error = pfr_set_addrs(&io->pfrio_table, pfras,
5053 		    io->pfrio_size, &io->pfrio_size2, &io->pfrio_nadd,
5054 		    &io->pfrio_ndel, &io->pfrio_nchange, io->pfrio_flags |
5055 		    PFR_FLAG_USERIOCTL, 0);
5056 		PF_RULES_WUNLOCK();
5057 		if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK)
5058 			error = copyout(pfras, io->pfrio_buffer, totlen);
5059 		free(pfras, M_TEMP);
5060 		break;
5061 	}
5062 
5063 	case DIOCRGETADDRS: {
5064 		struct pfioc_table *io = (struct pfioc_table *)addr;
5065 		struct pfr_addr *pfras;
5066 		size_t totlen;
5067 
5068 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
5069 			error = ENODEV;
5070 			goto fail;
5071 		}
5072 		if (io->pfrio_size < 0 ||
5073 		    io->pfrio_size > pf_ioctl_maxcount ||
5074 		    WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
5075 			error = EINVAL;
5076 			goto fail;
5077 		}
5078 		totlen = io->pfrio_size * sizeof(struct pfr_addr);
5079 		pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
5080 		    M_TEMP, M_WAITOK | M_ZERO);
5081 		PF_RULES_RLOCK();
5082 		error = pfr_get_addrs(&io->pfrio_table, pfras,
5083 		    &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
5084 		PF_RULES_RUNLOCK();
5085 		if (error == 0)
5086 			error = copyout(pfras, io->pfrio_buffer, totlen);
5087 		free(pfras, M_TEMP);
5088 		break;
5089 	}
5090 
5091 	case DIOCRGETASTATS: {
5092 		struct pfioc_table *io = (struct pfioc_table *)addr;
5093 		struct pfr_astats *pfrastats;
5094 		size_t totlen;
5095 
5096 		if (io->pfrio_esize != sizeof(struct pfr_astats)) {
5097 			error = ENODEV;
5098 			goto fail;
5099 		}
5100 		if (io->pfrio_size < 0 ||
5101 		    io->pfrio_size > pf_ioctl_maxcount ||
5102 		    WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_astats))) {
5103 			error = EINVAL;
5104 			goto fail;
5105 		}
5106 		totlen = io->pfrio_size * sizeof(struct pfr_astats);
5107 		pfrastats = mallocarray(io->pfrio_size,
5108 		    sizeof(struct pfr_astats), M_TEMP, M_WAITOK | M_ZERO);
5109 		PF_RULES_RLOCK();
5110 		error = pfr_get_astats(&io->pfrio_table, pfrastats,
5111 		    &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
5112 		PF_RULES_RUNLOCK();
5113 		if (error == 0)
5114 			error = copyout(pfrastats, io->pfrio_buffer, totlen);
5115 		free(pfrastats, M_TEMP);
5116 		break;
5117 	}
5118 
5119 	case DIOCRCLRASTATS: {
5120 		struct pfioc_table *io = (struct pfioc_table *)addr;
5121 		struct pfr_addr *pfras;
5122 		size_t totlen;
5123 
5124 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
5125 			error = ENODEV;
5126 			goto fail;
5127 		}
5128 		if (io->pfrio_size < 0 ||
5129 		    io->pfrio_size > pf_ioctl_maxcount ||
5130 		    WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
5131 			error = EINVAL;
5132 			goto fail;
5133 		}
5134 		totlen = io->pfrio_size * sizeof(struct pfr_addr);
5135 		pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
5136 		    M_TEMP, M_WAITOK);
5137 		error = copyin(io->pfrio_buffer, pfras, totlen);
5138 		if (error) {
5139 			free(pfras, M_TEMP);
5140 			goto fail;
5141 		}
5142 		PF_RULES_WLOCK();
5143 		error = pfr_clr_astats(&io->pfrio_table, pfras,
5144 		    io->pfrio_size, &io->pfrio_nzero, io->pfrio_flags |
5145 		    PFR_FLAG_USERIOCTL);
5146 		PF_RULES_WUNLOCK();
5147 		if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK)
5148 			error = copyout(pfras, io->pfrio_buffer, totlen);
5149 		free(pfras, M_TEMP);
5150 		break;
5151 	}
5152 
5153 	case DIOCRTSTADDRS: {
5154 		struct pfioc_table *io = (struct pfioc_table *)addr;
5155 		struct pfr_addr *pfras;
5156 		size_t totlen;
5157 
5158 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
5159 			error = ENODEV;
5160 			goto fail;
5161 		}
5162 		if (io->pfrio_size < 0 ||
5163 		    io->pfrio_size > pf_ioctl_maxcount ||
5164 		    WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
5165 			error = EINVAL;
5166 			goto fail;
5167 		}
5168 		totlen = io->pfrio_size * sizeof(struct pfr_addr);
5169 		pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
5170 		    M_TEMP, M_WAITOK);
5171 		error = copyin(io->pfrio_buffer, pfras, totlen);
5172 		if (error) {
5173 			free(pfras, M_TEMP);
5174 			goto fail;
5175 		}
5176 		PF_RULES_RLOCK();
5177 		error = pfr_tst_addrs(&io->pfrio_table, pfras,
5178 		    io->pfrio_size, &io->pfrio_nmatch, io->pfrio_flags |
5179 		    PFR_FLAG_USERIOCTL);
5180 		PF_RULES_RUNLOCK();
5181 		if (error == 0)
5182 			error = copyout(pfras, io->pfrio_buffer, totlen);
5183 		free(pfras, M_TEMP);
5184 		break;
5185 	}
5186 
5187 	case DIOCRINADEFINE: {
5188 		struct pfioc_table *io = (struct pfioc_table *)addr;
5189 		struct pfr_addr *pfras;
5190 		size_t totlen;
5191 
5192 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
5193 			error = ENODEV;
5194 			goto fail;
5195 		}
5196 		if (io->pfrio_size < 0 ||
5197 		    io->pfrio_size > pf_ioctl_maxcount ||
5198 		    WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
5199 			error = EINVAL;
5200 			goto fail;
5201 		}
5202 		totlen = io->pfrio_size * sizeof(struct pfr_addr);
5203 		pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
5204 		    M_TEMP, M_WAITOK);
5205 		error = copyin(io->pfrio_buffer, pfras, totlen);
5206 		if (error) {
5207 			free(pfras, M_TEMP);
5208 			goto fail;
5209 		}
5210 		PF_RULES_WLOCK();
5211 		error = pfr_ina_define(&io->pfrio_table, pfras,
5212 		    io->pfrio_size, &io->pfrio_nadd, &io->pfrio_naddr,
5213 		    io->pfrio_ticket, io->pfrio_flags | PFR_FLAG_USERIOCTL);
5214 		PF_RULES_WUNLOCK();
5215 		free(pfras, M_TEMP);
5216 		break;
5217 	}
5218 
5219 	case DIOCOSFPADD: {
5220 		struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr;
5221 		PF_RULES_WLOCK();
5222 		error = pf_osfp_add(io);
5223 		PF_RULES_WUNLOCK();
5224 		break;
5225 	}
5226 
5227 	case DIOCOSFPGET: {
5228 		struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr;
5229 		PF_RULES_RLOCK();
5230 		error = pf_osfp_get(io);
5231 		PF_RULES_RUNLOCK();
5232 		break;
5233 	}
5234 
5235 	case DIOCXBEGIN: {
5236 		struct pfioc_trans	*io = (struct pfioc_trans *)addr;
5237 		struct pfioc_trans_e	*ioes, *ioe;
5238 		size_t			 totlen;
5239 		int			 i;
5240 
5241 		if (io->esize != sizeof(*ioe)) {
5242 			error = ENODEV;
5243 			goto fail;
5244 		}
5245 		if (io->size < 0 ||
5246 		    io->size > pf_ioctl_maxcount ||
5247 		    WOULD_OVERFLOW(io->size, sizeof(struct pfioc_trans_e))) {
5248 			error = EINVAL;
5249 			goto fail;
5250 		}
5251 		totlen = sizeof(struct pfioc_trans_e) * io->size;
5252 		ioes = mallocarray(io->size, sizeof(struct pfioc_trans_e),
5253 		    M_TEMP, M_WAITOK);
5254 		error = copyin(io->array, ioes, totlen);
5255 		if (error) {
5256 			free(ioes, M_TEMP);
5257 			goto fail;
5258 		}
5259 		PF_RULES_WLOCK();
5260 		for (i = 0, ioe = ioes; i < io->size; i++, ioe++) {
5261 			ioe->anchor[sizeof(ioe->anchor) - 1] = '\0';
5262 			switch (ioe->rs_num) {
5263 			case PF_RULESET_ETH:
5264 				if ((error = pf_begin_eth(&ioe->ticket, ioe->anchor))) {
5265 					PF_RULES_WUNLOCK();
5266 					free(ioes, M_TEMP);
5267 					goto fail;
5268 				}
5269 				break;
5270 #ifdef ALTQ
5271 			case PF_RULESET_ALTQ:
5272 				if (ioe->anchor[0]) {
5273 					PF_RULES_WUNLOCK();
5274 					free(ioes, M_TEMP);
5275 					error = EINVAL;
5276 					goto fail;
5277 				}
5278 				if ((error = pf_begin_altq(&ioe->ticket))) {
5279 					PF_RULES_WUNLOCK();
5280 					free(ioes, M_TEMP);
5281 					goto fail;
5282 				}
5283 				break;
5284 #endif /* ALTQ */
5285 			case PF_RULESET_TABLE:
5286 			    {
5287 				struct pfr_table table;
5288 
5289 				bzero(&table, sizeof(table));
5290 				strlcpy(table.pfrt_anchor, ioe->anchor,
5291 				    sizeof(table.pfrt_anchor));
5292 				if ((error = pfr_ina_begin(&table,
5293 				    &ioe->ticket, NULL, 0))) {
5294 					PF_RULES_WUNLOCK();
5295 					free(ioes, M_TEMP);
5296 					goto fail;
5297 				}
5298 				break;
5299 			    }
5300 			default:
5301 				if ((error = pf_begin_rules(&ioe->ticket,
5302 				    ioe->rs_num, ioe->anchor))) {
5303 					PF_RULES_WUNLOCK();
5304 					free(ioes, M_TEMP);
5305 					goto fail;
5306 				}
5307 				break;
5308 			}
5309 		}
5310 		PF_RULES_WUNLOCK();
5311 		error = copyout(ioes, io->array, totlen);
5312 		free(ioes, M_TEMP);
5313 		break;
5314 	}
5315 
5316 	case DIOCXROLLBACK: {
5317 		struct pfioc_trans	*io = (struct pfioc_trans *)addr;
5318 		struct pfioc_trans_e	*ioe, *ioes;
5319 		size_t			 totlen;
5320 		int			 i;
5321 
5322 		if (io->esize != sizeof(*ioe)) {
5323 			error = ENODEV;
5324 			goto fail;
5325 		}
5326 		if (io->size < 0 ||
5327 		    io->size > pf_ioctl_maxcount ||
5328 		    WOULD_OVERFLOW(io->size, sizeof(struct pfioc_trans_e))) {
5329 			error = EINVAL;
5330 			goto fail;
5331 		}
5332 		totlen = sizeof(struct pfioc_trans_e) * io->size;
5333 		ioes = mallocarray(io->size, sizeof(struct pfioc_trans_e),
5334 		    M_TEMP, M_WAITOK);
5335 		error = copyin(io->array, ioes, totlen);
5336 		if (error) {
5337 			free(ioes, M_TEMP);
5338 			goto fail;
5339 		}
5340 		PF_RULES_WLOCK();
5341 		for (i = 0, ioe = ioes; i < io->size; i++, ioe++) {
5342 			ioe->anchor[sizeof(ioe->anchor) - 1] = '\0';
5343 			switch (ioe->rs_num) {
5344 			case PF_RULESET_ETH:
5345 				if ((error = pf_rollback_eth(ioe->ticket,
5346 				    ioe->anchor))) {
5347 					PF_RULES_WUNLOCK();
5348 					free(ioes, M_TEMP);
5349 					goto fail; /* really bad */
5350 				}
5351 				break;
5352 #ifdef ALTQ
5353 			case PF_RULESET_ALTQ:
5354 				if (ioe->anchor[0]) {
5355 					PF_RULES_WUNLOCK();
5356 					free(ioes, M_TEMP);
5357 					error = EINVAL;
5358 					goto fail;
5359 				}
5360 				if ((error = pf_rollback_altq(ioe->ticket))) {
5361 					PF_RULES_WUNLOCK();
5362 					free(ioes, M_TEMP);
5363 					goto fail; /* really bad */
5364 				}
5365 				break;
5366 #endif /* ALTQ */
5367 			case PF_RULESET_TABLE:
5368 			    {
5369 				struct pfr_table table;
5370 
5371 				bzero(&table, sizeof(table));
5372 				strlcpy(table.pfrt_anchor, ioe->anchor,
5373 				    sizeof(table.pfrt_anchor));
5374 				if ((error = pfr_ina_rollback(&table,
5375 				    ioe->ticket, NULL, 0))) {
5376 					PF_RULES_WUNLOCK();
5377 					free(ioes, M_TEMP);
5378 					goto fail; /* really bad */
5379 				}
5380 				break;
5381 			    }
5382 			default:
5383 				if ((error = pf_rollback_rules(ioe->ticket,
5384 				    ioe->rs_num, ioe->anchor))) {
5385 					PF_RULES_WUNLOCK();
5386 					free(ioes, M_TEMP);
5387 					goto fail; /* really bad */
5388 				}
5389 				break;
5390 			}
5391 		}
5392 		PF_RULES_WUNLOCK();
5393 		free(ioes, M_TEMP);
5394 		break;
5395 	}
5396 
5397 	case DIOCXCOMMIT: {
5398 		struct pfioc_trans	*io = (struct pfioc_trans *)addr;
5399 		struct pfioc_trans_e	*ioe, *ioes;
5400 		struct pf_kruleset	*rs;
5401 		struct pf_keth_ruleset	*ers;
5402 		size_t			 totlen;
5403 		int			 i;
5404 
5405 		if (io->esize != sizeof(*ioe)) {
5406 			error = ENODEV;
5407 			goto fail;
5408 		}
5409 
5410 		if (io->size < 0 ||
5411 		    io->size > pf_ioctl_maxcount ||
5412 		    WOULD_OVERFLOW(io->size, sizeof(struct pfioc_trans_e))) {
5413 			error = EINVAL;
5414 			goto fail;
5415 		}
5416 
5417 		totlen = sizeof(struct pfioc_trans_e) * io->size;
5418 		ioes = mallocarray(io->size, sizeof(struct pfioc_trans_e),
5419 		    M_TEMP, M_WAITOK);
5420 		error = copyin(io->array, ioes, totlen);
5421 		if (error) {
5422 			free(ioes, M_TEMP);
5423 			goto fail;
5424 		}
5425 		PF_RULES_WLOCK();
5426 		/* First makes sure everything will succeed. */
5427 		for (i = 0, ioe = ioes; i < io->size; i++, ioe++) {
5428 			ioe->anchor[sizeof(ioe->anchor) - 1] = '\0';
5429 			switch (ioe->rs_num) {
5430 			case PF_RULESET_ETH:
5431 				ers = pf_find_keth_ruleset(ioe->anchor);
5432 				if (ers == NULL || ioe->ticket == 0 ||
5433 				    ioe->ticket != ers->inactive.ticket) {
5434 					PF_RULES_WUNLOCK();
5435 					free(ioes, M_TEMP);
5436 					error = EINVAL;
5437 					goto fail;
5438 				}
5439 				break;
5440 #ifdef ALTQ
5441 			case PF_RULESET_ALTQ:
5442 				if (ioe->anchor[0]) {
5443 					PF_RULES_WUNLOCK();
5444 					free(ioes, M_TEMP);
5445 					error = EINVAL;
5446 					goto fail;
5447 				}
5448 				if (!V_altqs_inactive_open || ioe->ticket !=
5449 				    V_ticket_altqs_inactive) {
5450 					PF_RULES_WUNLOCK();
5451 					free(ioes, M_TEMP);
5452 					error = EBUSY;
5453 					goto fail;
5454 				}
5455 				break;
5456 #endif /* ALTQ */
5457 			case PF_RULESET_TABLE:
5458 				rs = pf_find_kruleset(ioe->anchor);
5459 				if (rs == NULL || !rs->topen || ioe->ticket !=
5460 				    rs->tticket) {
5461 					PF_RULES_WUNLOCK();
5462 					free(ioes, M_TEMP);
5463 					error = EBUSY;
5464 					goto fail;
5465 				}
5466 				break;
5467 			default:
5468 				if (ioe->rs_num < 0 || ioe->rs_num >=
5469 				    PF_RULESET_MAX) {
5470 					PF_RULES_WUNLOCK();
5471 					free(ioes, M_TEMP);
5472 					error = EINVAL;
5473 					goto fail;
5474 				}
5475 				rs = pf_find_kruleset(ioe->anchor);
5476 				if (rs == NULL ||
5477 				    !rs->rules[ioe->rs_num].inactive.open ||
5478 				    rs->rules[ioe->rs_num].inactive.ticket !=
5479 				    ioe->ticket) {
5480 					PF_RULES_WUNLOCK();
5481 					free(ioes, M_TEMP);
5482 					error = EBUSY;
5483 					goto fail;
5484 				}
5485 				break;
5486 			}
5487 		}
5488 		/* Now do the commit - no errors should happen here. */
5489 		for (i = 0, ioe = ioes; i < io->size; i++, ioe++) {
5490 			switch (ioe->rs_num) {
5491 			case PF_RULESET_ETH:
5492 				if ((error = pf_commit_eth(ioe->ticket, ioe->anchor))) {
5493 					PF_RULES_WUNLOCK();
5494 					free(ioes, M_TEMP);
5495 					goto fail; /* really bad */
5496 				}
5497 				break;
5498 #ifdef ALTQ
5499 			case PF_RULESET_ALTQ:
5500 				if ((error = pf_commit_altq(ioe->ticket))) {
5501 					PF_RULES_WUNLOCK();
5502 					free(ioes, M_TEMP);
5503 					goto fail; /* really bad */
5504 				}
5505 				break;
5506 #endif /* ALTQ */
5507 			case PF_RULESET_TABLE:
5508 			    {
5509 				struct pfr_table table;
5510 
5511 				bzero(&table, sizeof(table));
5512 				(void)strlcpy(table.pfrt_anchor, ioe->anchor,
5513 				    sizeof(table.pfrt_anchor));
5514 				if ((error = pfr_ina_commit(&table,
5515 				    ioe->ticket, NULL, NULL, 0))) {
5516 					PF_RULES_WUNLOCK();
5517 					free(ioes, M_TEMP);
5518 					goto fail; /* really bad */
5519 				}
5520 				break;
5521 			    }
5522 			default:
5523 				if ((error = pf_commit_rules(ioe->ticket,
5524 				    ioe->rs_num, ioe->anchor))) {
5525 					PF_RULES_WUNLOCK();
5526 					free(ioes, M_TEMP);
5527 					goto fail; /* really bad */
5528 				}
5529 				break;
5530 			}
5531 		}
5532 		PF_RULES_WUNLOCK();
5533 
5534 		/* Only hook into EtherNet taffic if we've got rules for it. */
5535 		if (! TAILQ_EMPTY(V_pf_keth->active.rules))
5536 			hook_pf_eth();
5537 		else
5538 			dehook_pf_eth();
5539 
5540 		free(ioes, M_TEMP);
5541 		break;
5542 	}
5543 
5544 	case DIOCGETSRCNODES: {
5545 		struct pfioc_src_nodes	*psn = (struct pfioc_src_nodes *)addr;
5546 		struct pf_srchash	*sh;
5547 		struct pf_ksrc_node	*n;
5548 		struct pf_src_node	*p, *pstore;
5549 		uint32_t		 i, nr = 0;
5550 
5551 		for (i = 0, sh = V_pf_srchash; i <= V_pf_srchashmask;
5552 				i++, sh++) {
5553 			PF_HASHROW_LOCK(sh);
5554 			LIST_FOREACH(n, &sh->nodes, entry)
5555 				nr++;
5556 			PF_HASHROW_UNLOCK(sh);
5557 		}
5558 
5559 		psn->psn_len = min(psn->psn_len,
5560 		    sizeof(struct pf_src_node) * nr);
5561 
5562 		if (psn->psn_len == 0) {
5563 			psn->psn_len = sizeof(struct pf_src_node) * nr;
5564 			goto fail;
5565 		}
5566 
5567 		nr = 0;
5568 
5569 		p = pstore = malloc(psn->psn_len, M_TEMP, M_WAITOK | M_ZERO);
5570 		for (i = 0, sh = V_pf_srchash; i <= V_pf_srchashmask;
5571 		    i++, sh++) {
5572 		    PF_HASHROW_LOCK(sh);
5573 		    LIST_FOREACH(n, &sh->nodes, entry) {
5574 
5575 			if ((nr + 1) * sizeof(*p) > (unsigned)psn->psn_len)
5576 				break;
5577 
5578 			pf_src_node_copy(n, p);
5579 
5580 			p++;
5581 			nr++;
5582 		    }
5583 		    PF_HASHROW_UNLOCK(sh);
5584 		}
5585 		error = copyout(pstore, psn->psn_src_nodes,
5586 		    sizeof(struct pf_src_node) * nr);
5587 		if (error) {
5588 			free(pstore, M_TEMP);
5589 			goto fail;
5590 		}
5591 		psn->psn_len = sizeof(struct pf_src_node) * nr;
5592 		free(pstore, M_TEMP);
5593 		break;
5594 	}
5595 
5596 	case DIOCCLRSRCNODES: {
5597 		pf_kill_srcnodes(NULL);
5598 		break;
5599 	}
5600 
5601 	case DIOCKILLSRCNODES:
5602 		pf_kill_srcnodes((struct pfioc_src_node_kill *)addr);
5603 		break;
5604 
5605 #ifdef COMPAT_FREEBSD13
5606 	case DIOCKEEPCOUNTERS_FREEBSD13:
5607 #endif
5608 	case DIOCKEEPCOUNTERS:
5609 		error = pf_keepcounters((struct pfioc_nv *)addr);
5610 		break;
5611 
5612 	case DIOCGETSYNCOOKIES:
5613 		error = pf_get_syncookies((struct pfioc_nv *)addr);
5614 		break;
5615 
5616 	case DIOCSETSYNCOOKIES:
5617 		error = pf_set_syncookies((struct pfioc_nv *)addr);
5618 		break;
5619 
5620 	case DIOCSETHOSTID: {
5621 		u_int32_t	*hostid = (u_int32_t *)addr;
5622 
5623 		PF_RULES_WLOCK();
5624 		if (*hostid == 0)
5625 			V_pf_status.hostid = arc4random();
5626 		else
5627 			V_pf_status.hostid = *hostid;
5628 		PF_RULES_WUNLOCK();
5629 		break;
5630 	}
5631 
5632 	case DIOCOSFPFLUSH:
5633 		PF_RULES_WLOCK();
5634 		pf_osfp_flush();
5635 		PF_RULES_WUNLOCK();
5636 		break;
5637 
5638 	case DIOCIGETIFACES: {
5639 		struct pfioc_iface *io = (struct pfioc_iface *)addr;
5640 		struct pfi_kif *ifstore;
5641 		size_t bufsiz;
5642 
5643 		if (io->pfiio_esize != sizeof(struct pfi_kif)) {
5644 			error = ENODEV;
5645 			goto fail;
5646 		}
5647 
5648 		if (io->pfiio_size < 0 ||
5649 		    io->pfiio_size > pf_ioctl_maxcount ||
5650 		    WOULD_OVERFLOW(io->pfiio_size, sizeof(struct pfi_kif))) {
5651 			error = EINVAL;
5652 			goto fail;
5653 		}
5654 
5655 		io->pfiio_name[sizeof(io->pfiio_name) - 1] = '\0';
5656 
5657 		bufsiz = io->pfiio_size * sizeof(struct pfi_kif);
5658 		ifstore = mallocarray(io->pfiio_size, sizeof(struct pfi_kif),
5659 		    M_TEMP, M_WAITOK | M_ZERO);
5660 
5661 		PF_RULES_RLOCK();
5662 		pfi_get_ifaces(io->pfiio_name, ifstore, &io->pfiio_size);
5663 		PF_RULES_RUNLOCK();
5664 		error = copyout(ifstore, io->pfiio_buffer, bufsiz);
5665 		free(ifstore, M_TEMP);
5666 		break;
5667 	}
5668 
5669 	case DIOCSETIFFLAG: {
5670 		struct pfioc_iface *io = (struct pfioc_iface *)addr;
5671 
5672 		io->pfiio_name[sizeof(io->pfiio_name) - 1] = '\0';
5673 
5674 		PF_RULES_WLOCK();
5675 		error = pfi_set_flags(io->pfiio_name, io->pfiio_flags);
5676 		PF_RULES_WUNLOCK();
5677 		break;
5678 	}
5679 
5680 	case DIOCCLRIFFLAG: {
5681 		struct pfioc_iface *io = (struct pfioc_iface *)addr;
5682 
5683 		io->pfiio_name[sizeof(io->pfiio_name) - 1] = '\0';
5684 
5685 		PF_RULES_WLOCK();
5686 		error = pfi_clear_flags(io->pfiio_name, io->pfiio_flags);
5687 		PF_RULES_WUNLOCK();
5688 		break;
5689 	}
5690 
5691 	case DIOCSETREASS: {
5692 		u_int32_t	*reass = (u_int32_t *)addr;
5693 
5694 		V_pf_status.reass = *reass & (PF_REASS_ENABLED|PF_REASS_NODF);
5695 		/* Removal of DF flag without reassembly enabled is not a
5696 		 * valid combination. Disable reassembly in such case. */
5697 		if (!(V_pf_status.reass & PF_REASS_ENABLED))
5698 			V_pf_status.reass = 0;
5699 		break;
5700 	}
5701 
5702 	default:
5703 		error = ENODEV;
5704 		break;
5705 	}
5706 fail:
5707 	CURVNET_RESTORE();
5708 
5709 #undef ERROUT_IOCTL
5710 
5711 	return (error);
5712 }
5713 
5714 void
pfsync_state_export(union pfsync_state_union * sp,struct pf_kstate * st,int msg_version)5715 pfsync_state_export(union pfsync_state_union *sp, struct pf_kstate *st, int msg_version)
5716 {
5717 	bzero(sp, sizeof(union pfsync_state_union));
5718 
5719 	/* copy from state key */
5720 	sp->pfs_1301.key[PF_SK_WIRE].addr[0] = st->key[PF_SK_WIRE]->addr[0];
5721 	sp->pfs_1301.key[PF_SK_WIRE].addr[1] = st->key[PF_SK_WIRE]->addr[1];
5722 	sp->pfs_1301.key[PF_SK_WIRE].port[0] = st->key[PF_SK_WIRE]->port[0];
5723 	sp->pfs_1301.key[PF_SK_WIRE].port[1] = st->key[PF_SK_WIRE]->port[1];
5724 	sp->pfs_1301.key[PF_SK_STACK].addr[0] = st->key[PF_SK_STACK]->addr[0];
5725 	sp->pfs_1301.key[PF_SK_STACK].addr[1] = st->key[PF_SK_STACK]->addr[1];
5726 	sp->pfs_1301.key[PF_SK_STACK].port[0] = st->key[PF_SK_STACK]->port[0];
5727 	sp->pfs_1301.key[PF_SK_STACK].port[1] = st->key[PF_SK_STACK]->port[1];
5728 	sp->pfs_1301.proto = st->key[PF_SK_WIRE]->proto;
5729 	sp->pfs_1301.af = st->key[PF_SK_WIRE]->af;
5730 
5731 	/* copy from state */
5732 	strlcpy(sp->pfs_1301.ifname, st->kif->pfik_name, sizeof(sp->pfs_1301.ifname));
5733 	bcopy(&st->act.rt_addr, &sp->pfs_1301.rt_addr, sizeof(sp->pfs_1301.rt_addr));
5734 	sp->pfs_1301.creation = htonl(time_uptime - (st->creation / 1000));
5735 	sp->pfs_1301.expire = pf_state_expires(st);
5736 	if (sp->pfs_1301.expire <= time_uptime)
5737 		sp->pfs_1301.expire = htonl(0);
5738 	else
5739 		sp->pfs_1301.expire = htonl(sp->pfs_1301.expire - time_uptime);
5740 
5741 	sp->pfs_1301.direction = st->direction;
5742 	sp->pfs_1301.log = st->act.log;
5743 	sp->pfs_1301.timeout = st->timeout;
5744 
5745 	switch (msg_version) {
5746 		case PFSYNC_MSG_VERSION_1301:
5747 			sp->pfs_1301.state_flags = st->state_flags;
5748 			break;
5749 		case PFSYNC_MSG_VERSION_1400:
5750 			sp->pfs_1400.state_flags = htons(st->state_flags);
5751 			sp->pfs_1400.qid = htons(st->act.qid);
5752 			sp->pfs_1400.pqid = htons(st->act.pqid);
5753 			sp->pfs_1400.dnpipe = htons(st->act.dnpipe);
5754 			sp->pfs_1400.dnrpipe = htons(st->act.dnrpipe);
5755 			sp->pfs_1400.rtableid = htonl(st->act.rtableid);
5756 			sp->pfs_1400.min_ttl = st->act.min_ttl;
5757 			sp->pfs_1400.set_tos = st->act.set_tos;
5758 			sp->pfs_1400.max_mss = htons(st->act.max_mss);
5759 			sp->pfs_1400.set_prio[0] = st->act.set_prio[0];
5760 			sp->pfs_1400.set_prio[1] = st->act.set_prio[1];
5761 			sp->pfs_1400.rt = st->act.rt;
5762 			if (st->act.rt_kif)
5763 				strlcpy(sp->pfs_1400.rt_ifname,
5764 				    st->act.rt_kif->pfik_name,
5765 				    sizeof(sp->pfs_1400.rt_ifname));
5766 			break;
5767 		default:
5768 			panic("%s: Unsupported pfsync_msg_version %d",
5769 			    __func__, msg_version);
5770 	}
5771 
5772 	/*
5773 	 * XXX Why do we bother pfsyncing source node information if source
5774 	 * nodes are not synced? Showing users that there is source tracking
5775 	 * when there is none seems useless.
5776 	 */
5777 	if (st->sns[PF_SN_LIMIT] != NULL)
5778 		sp->pfs_1301.sync_flags |= PFSYNC_FLAG_SRCNODE;
5779 	if (st->sns[PF_SN_NAT] != NULL || st->sns[PF_SN_ROUTE])
5780 		sp->pfs_1301.sync_flags |= PFSYNC_FLAG_NATSRCNODE;
5781 
5782 	sp->pfs_1301.id = st->id;
5783 	sp->pfs_1301.creatorid = st->creatorid;
5784 	pf_state_peer_hton(&st->src, &sp->pfs_1301.src);
5785 	pf_state_peer_hton(&st->dst, &sp->pfs_1301.dst);
5786 
5787 	if (st->rule == NULL)
5788 		sp->pfs_1301.rule = htonl(-1);
5789 	else
5790 		sp->pfs_1301.rule = htonl(st->rule->nr);
5791 	if (st->anchor == NULL)
5792 		sp->pfs_1301.anchor = htonl(-1);
5793 	else
5794 		sp->pfs_1301.anchor = htonl(st->anchor->nr);
5795 	if (st->nat_rule == NULL)
5796 		sp->pfs_1301.nat_rule = htonl(-1);
5797 	else
5798 		sp->pfs_1301.nat_rule = htonl(st->nat_rule->nr);
5799 
5800 	pf_state_counter_hton(st->packets[0], sp->pfs_1301.packets[0]);
5801 	pf_state_counter_hton(st->packets[1], sp->pfs_1301.packets[1]);
5802 	pf_state_counter_hton(st->bytes[0], sp->pfs_1301.bytes[0]);
5803 	pf_state_counter_hton(st->bytes[1], sp->pfs_1301.bytes[1]);
5804 }
5805 
5806 void
pf_state_export(struct pf_state_export * sp,struct pf_kstate * st)5807 pf_state_export(struct pf_state_export *sp, struct pf_kstate *st)
5808 {
5809 	bzero(sp, sizeof(*sp));
5810 
5811 	sp->version = PF_STATE_VERSION;
5812 
5813 	/* copy from state key */
5814 	sp->key[PF_SK_WIRE].addr[0] = st->key[PF_SK_WIRE]->addr[0];
5815 	sp->key[PF_SK_WIRE].addr[1] = st->key[PF_SK_WIRE]->addr[1];
5816 	sp->key[PF_SK_WIRE].port[0] = st->key[PF_SK_WIRE]->port[0];
5817 	sp->key[PF_SK_WIRE].port[1] = st->key[PF_SK_WIRE]->port[1];
5818 	sp->key[PF_SK_STACK].addr[0] = st->key[PF_SK_STACK]->addr[0];
5819 	sp->key[PF_SK_STACK].addr[1] = st->key[PF_SK_STACK]->addr[1];
5820 	sp->key[PF_SK_STACK].port[0] = st->key[PF_SK_STACK]->port[0];
5821 	sp->key[PF_SK_STACK].port[1] = st->key[PF_SK_STACK]->port[1];
5822 	sp->proto = st->key[PF_SK_WIRE]->proto;
5823 	sp->af = st->key[PF_SK_WIRE]->af;
5824 
5825 	/* copy from state */
5826 	strlcpy(sp->ifname, st->kif->pfik_name, sizeof(sp->ifname));
5827 	strlcpy(sp->orig_ifname, st->orig_kif->pfik_name,
5828 	    sizeof(sp->orig_ifname));
5829 	memcpy(&sp->rt_addr, &st->act.rt_addr, sizeof(sp->rt_addr));
5830 	sp->creation = htonl(time_uptime - (st->creation / 1000));
5831 	sp->expire = pf_state_expires(st);
5832 	if (sp->expire <= time_uptime)
5833 		sp->expire = htonl(0);
5834 	else
5835 		sp->expire = htonl(sp->expire - time_uptime);
5836 
5837 	sp->direction = st->direction;
5838 	sp->log = st->act.log;
5839 	sp->timeout = st->timeout;
5840 	/* 8 bits for the old libpfctl, 16 bits for the new libpfctl */
5841 	sp->state_flags_compat = st->state_flags;
5842 	sp->state_flags = htons(st->state_flags);
5843 	if (st->sns[PF_SN_LIMIT] != NULL)
5844 		sp->sync_flags |= PFSYNC_FLAG_SRCNODE;
5845 	if (st->sns[PF_SN_NAT] != NULL || st->sns[PF_SN_ROUTE] != NULL)
5846 		sp->sync_flags |= PFSYNC_FLAG_NATSRCNODE;
5847 	sp->id = st->id;
5848 	sp->creatorid = st->creatorid;
5849 	pf_state_peer_hton(&st->src, &sp->src);
5850 	pf_state_peer_hton(&st->dst, &sp->dst);
5851 
5852 	if (st->rule == NULL)
5853 		sp->rule = htonl(-1);
5854 	else
5855 		sp->rule = htonl(st->rule->nr);
5856 	if (st->anchor == NULL)
5857 		sp->anchor = htonl(-1);
5858 	else
5859 		sp->anchor = htonl(st->anchor->nr);
5860 	if (st->nat_rule == NULL)
5861 		sp->nat_rule = htonl(-1);
5862 	else
5863 		sp->nat_rule = htonl(st->nat_rule->nr);
5864 
5865 	sp->packets[0] = st->packets[0];
5866 	sp->packets[1] = st->packets[1];
5867 	sp->bytes[0] = st->bytes[0];
5868 	sp->bytes[1] = st->bytes[1];
5869 
5870 	sp->qid = htons(st->act.qid);
5871 	sp->pqid = htons(st->act.pqid);
5872 	sp->dnpipe = htons(st->act.dnpipe);
5873 	sp->dnrpipe = htons(st->act.dnrpipe);
5874 	sp->rtableid = htonl(st->act.rtableid);
5875 	sp->min_ttl = st->act.min_ttl;
5876 	sp->set_tos = st->act.set_tos;
5877 	sp->max_mss = htons(st->act.max_mss);
5878 	sp->rt = st->act.rt;
5879 	if (st->act.rt_kif)
5880 		strlcpy(sp->rt_ifname, st->act.rt_kif->pfik_name,
5881 		    sizeof(sp->rt_ifname));
5882 	sp->set_prio[0] = st->act.set_prio[0];
5883 	sp->set_prio[1] = st->act.set_prio[1];
5884 
5885 }
5886 
5887 static void
pf_tbladdr_copyout(struct pf_addr_wrap * aw)5888 pf_tbladdr_copyout(struct pf_addr_wrap *aw)
5889 {
5890 	struct pfr_ktable *kt;
5891 
5892 	KASSERT(aw->type == PF_ADDR_TABLE, ("%s: type %u", __func__, aw->type));
5893 
5894 	kt = aw->p.tbl;
5895 	if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
5896 		kt = kt->pfrkt_root;
5897 	aw->p.tbl = NULL;
5898 	aw->p.tblcnt = (kt->pfrkt_flags & PFR_TFLAG_ACTIVE) ?
5899 		kt->pfrkt_cnt : -1;
5900 }
5901 
5902 static int
pf_add_status_counters(nvlist_t * nvl,const char * name,counter_u64_t * counters,size_t number,char ** names)5903 pf_add_status_counters(nvlist_t *nvl, const char *name, counter_u64_t *counters,
5904     size_t number, char **names)
5905 {
5906 	nvlist_t        *nvc;
5907 
5908 	nvc = nvlist_create(0);
5909 	if (nvc == NULL)
5910 		return (ENOMEM);
5911 
5912 	for (int i = 0; i < number; i++) {
5913 		nvlist_append_number_array(nvc, "counters",
5914 		    counter_u64_fetch(counters[i]));
5915 		nvlist_append_string_array(nvc, "names",
5916 		    names[i]);
5917 		nvlist_append_number_array(nvc, "ids",
5918 		    i);
5919 	}
5920 	nvlist_add_nvlist(nvl, name, nvc);
5921 	nvlist_destroy(nvc);
5922 
5923 	return (0);
5924 }
5925 
5926 static int
pf_getstatus(struct pfioc_nv * nv)5927 pf_getstatus(struct pfioc_nv *nv)
5928 {
5929 	nvlist_t        *nvl = NULL, *nvc = NULL;
5930 	void            *nvlpacked = NULL;
5931 	int              error;
5932 	struct pf_status s;
5933 	char *pf_reasons[PFRES_MAX+1] = PFRES_NAMES;
5934 	char *pf_lcounter[KLCNT_MAX+1] = KLCNT_NAMES;
5935 	char *pf_fcounter[FCNT_MAX+1] = FCNT_NAMES;
5936 	time_t since;
5937 
5938 	PF_RULES_RLOCK_TRACKER;
5939 
5940 #define ERROUT(x)      ERROUT_FUNCTION(errout, x)
5941 
5942 	PF_RULES_RLOCK();
5943 
5944 	nvl = nvlist_create(0);
5945 	if (nvl == NULL)
5946 		ERROUT(ENOMEM);
5947 
5948 	since = time_second - (time_uptime - V_pf_status.since);
5949 
5950 	nvlist_add_bool(nvl, "running", V_pf_status.running);
5951 	nvlist_add_number(nvl, "since", since);
5952 	nvlist_add_number(nvl, "debug", V_pf_status.debug);
5953 	nvlist_add_number(nvl, "hostid", V_pf_status.hostid);
5954 	nvlist_add_number(nvl, "states", V_pf_status.states);
5955 	nvlist_add_number(nvl, "src_nodes", V_pf_status.src_nodes);
5956 	nvlist_add_number(nvl, "reass", V_pf_status.reass);
5957 	nvlist_add_bool(nvl, "syncookies_active",
5958 	    V_pf_status.syncookies_active);
5959 	nvlist_add_number(nvl, "halfopen_states", V_pf_status.states_halfopen);
5960 
5961 	/* counters */
5962 	error = pf_add_status_counters(nvl, "counters", V_pf_status.counters,
5963 	    PFRES_MAX, pf_reasons);
5964 	if (error != 0)
5965 		ERROUT(error);
5966 
5967 	/* lcounters */
5968 	error = pf_add_status_counters(nvl, "lcounters", V_pf_status.lcounters,
5969 	    KLCNT_MAX, pf_lcounter);
5970 	if (error != 0)
5971 		ERROUT(error);
5972 
5973 	/* fcounters */
5974 	nvc = nvlist_create(0);
5975 	if (nvc == NULL)
5976 		ERROUT(ENOMEM);
5977 
5978 	for (int i = 0; i < FCNT_MAX; i++) {
5979 		nvlist_append_number_array(nvc, "counters",
5980 		    pf_counter_u64_fetch(&V_pf_status.fcounters[i]));
5981 		nvlist_append_string_array(nvc, "names",
5982 		    pf_fcounter[i]);
5983 		nvlist_append_number_array(nvc, "ids",
5984 		    i);
5985 	}
5986 	nvlist_add_nvlist(nvl, "fcounters", nvc);
5987 	nvlist_destroy(nvc);
5988 	nvc = NULL;
5989 
5990 	/* scounters */
5991 	error = pf_add_status_counters(nvl, "scounters", V_pf_status.scounters,
5992 	    SCNT_MAX, pf_fcounter);
5993 	if (error != 0)
5994 		ERROUT(error);
5995 
5996 	nvlist_add_string(nvl, "ifname", V_pf_status.ifname);
5997 	nvlist_add_binary(nvl, "chksum", V_pf_status.pf_chksum,
5998 	    PF_MD5_DIGEST_LENGTH);
5999 
6000 	pfi_update_status(V_pf_status.ifname, &s);
6001 
6002 	/* pcounters / bcounters */
6003 	for (int i = 0; i < 2; i++) {
6004 		for (int j = 0; j < 2; j++) {
6005 			for (int k = 0; k < 2; k++) {
6006 				nvlist_append_number_array(nvl, "pcounters",
6007 				    s.pcounters[i][j][k]);
6008 			}
6009 			nvlist_append_number_array(nvl, "bcounters",
6010 			    s.bcounters[i][j]);
6011 		}
6012 	}
6013 
6014 	nvlpacked = nvlist_pack(nvl, &nv->len);
6015 	if (nvlpacked == NULL)
6016 		ERROUT(ENOMEM);
6017 
6018 	if (nv->size == 0)
6019 		ERROUT(0);
6020 	else if (nv->size < nv->len)
6021 		ERROUT(ENOSPC);
6022 
6023 	PF_RULES_RUNLOCK();
6024 	error = copyout(nvlpacked, nv->data, nv->len);
6025 	goto done;
6026 
6027 #undef ERROUT
6028 errout:
6029 	PF_RULES_RUNLOCK();
6030 done:
6031 	free(nvlpacked, M_NVLIST);
6032 	nvlist_destroy(nvc);
6033 	nvlist_destroy(nvl);
6034 
6035 	return (error);
6036 }
6037 
6038 /*
6039  * XXX - Check for version mismatch!!!
6040  */
6041 static void
pf_clear_all_states(void)6042 pf_clear_all_states(void)
6043 {
6044 	struct epoch_tracker	 et;
6045 	struct pf_kstate	*s;
6046 	u_int i;
6047 
6048 	NET_EPOCH_ENTER(et);
6049 	for (i = 0; i <= V_pf_hashmask; i++) {
6050 		struct pf_idhash *ih = &V_pf_idhash[i];
6051 relock:
6052 		PF_HASHROW_LOCK(ih);
6053 		LIST_FOREACH(s, &ih->states, entry) {
6054 			s->timeout = PFTM_PURGE;
6055 			/* Don't send out individual delete messages. */
6056 			s->state_flags |= PFSTATE_NOSYNC;
6057 			pf_remove_state(s);
6058 			goto relock;
6059 		}
6060 		PF_HASHROW_UNLOCK(ih);
6061 	}
6062 	NET_EPOCH_EXIT(et);
6063 }
6064 
6065 static int
pf_clear_tables(void)6066 pf_clear_tables(void)
6067 {
6068 	struct pfioc_table io;
6069 	int error;
6070 
6071 	bzero(&io, sizeof(io));
6072 	io.pfrio_flags |= PFR_FLAG_ALLRSETS;
6073 
6074 	error = pfr_clr_tables(&io.pfrio_table, &io.pfrio_ndel,
6075 	    io.pfrio_flags);
6076 
6077 	return (error);
6078 }
6079 
6080 static void
pf_kill_srcnodes(struct pfioc_src_node_kill * psnk)6081 pf_kill_srcnodes(struct pfioc_src_node_kill *psnk)
6082 {
6083 	struct pf_ksrc_node_list	 kill;
6084 	u_int 				 killed;
6085 
6086 	LIST_INIT(&kill);
6087 	for (int i = 0; i <= V_pf_srchashmask; i++) {
6088 		struct pf_srchash *sh = &V_pf_srchash[i];
6089 		struct pf_ksrc_node *sn, *tmp;
6090 
6091 		PF_HASHROW_LOCK(sh);
6092 		LIST_FOREACH_SAFE(sn, &sh->nodes, entry, tmp)
6093 			if (psnk == NULL ||
6094 			    (pf_match_addr(psnk->psnk_src.neg,
6095 			      &psnk->psnk_src.addr.v.a.addr,
6096 			      &psnk->psnk_src.addr.v.a.mask,
6097 			      &sn->addr, sn->af) &&
6098 			    pf_match_addr(psnk->psnk_dst.neg,
6099 			      &psnk->psnk_dst.addr.v.a.addr,
6100 			      &psnk->psnk_dst.addr.v.a.mask,
6101 			      &sn->raddr, sn->af))) {
6102 				pf_unlink_src_node(sn);
6103 				LIST_INSERT_HEAD(&kill, sn, entry);
6104 				sn->expire = 1;
6105 			}
6106 		PF_HASHROW_UNLOCK(sh);
6107 	}
6108 
6109 	for (int i = 0; i <= V_pf_hashmask; i++) {
6110 		struct pf_idhash *ih = &V_pf_idhash[i];
6111 		struct pf_kstate *s;
6112 
6113 		PF_HASHROW_LOCK(ih);
6114 		LIST_FOREACH(s, &ih->states, entry) {
6115 			for(pf_sn_types_t sn_type=0; sn_type<PF_SN_MAX;
6116 			    sn_type++) {
6117 				if (s->sns[sn_type] &&
6118 				    s->sns[sn_type]->expire == 1) {
6119 					s->sns[sn_type] = NULL;
6120 				}
6121 			}
6122 		}
6123 		PF_HASHROW_UNLOCK(ih);
6124 	}
6125 
6126 	killed = pf_free_src_nodes(&kill);
6127 
6128 	if (psnk != NULL)
6129 		psnk->psnk_killed = killed;
6130 }
6131 
6132 static int
pf_keepcounters(struct pfioc_nv * nv)6133 pf_keepcounters(struct pfioc_nv *nv)
6134 {
6135 	nvlist_t	*nvl = NULL;
6136 	void		*nvlpacked = NULL;
6137 	int		 error = 0;
6138 
6139 #define	ERROUT(x)	ERROUT_FUNCTION(on_error, x)
6140 
6141 	if (nv->len > pf_ioctl_maxcount)
6142 		ERROUT(ENOMEM);
6143 
6144 	nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
6145 	error = copyin(nv->data, nvlpacked, nv->len);
6146 	if (error)
6147 		ERROUT(error);
6148 
6149 	nvl = nvlist_unpack(nvlpacked, nv->len, 0);
6150 	if (nvl == NULL)
6151 		ERROUT(EBADMSG);
6152 
6153 	if (! nvlist_exists_bool(nvl, "keep_counters"))
6154 		ERROUT(EBADMSG);
6155 
6156 	V_pf_status.keep_counters = nvlist_get_bool(nvl, "keep_counters");
6157 
6158 on_error:
6159 	nvlist_destroy(nvl);
6160 	free(nvlpacked, M_NVLIST);
6161 	return (error);
6162 }
6163 
6164 unsigned int
pf_clear_states(const struct pf_kstate_kill * kill)6165 pf_clear_states(const struct pf_kstate_kill *kill)
6166 {
6167 	struct pf_state_key_cmp	 match_key;
6168 	struct pf_kstate	*s;
6169 	struct pfi_kkif	*kif;
6170 	int		 idx;
6171 	unsigned int	 killed = 0, dir;
6172 
6173 	NET_EPOCH_ASSERT();
6174 
6175 	for (unsigned int i = 0; i <= V_pf_hashmask; i++) {
6176 		struct pf_idhash *ih = &V_pf_idhash[i];
6177 
6178 relock_DIOCCLRSTATES:
6179 		PF_HASHROW_LOCK(ih);
6180 		LIST_FOREACH(s, &ih->states, entry) {
6181 			/* For floating states look at the original kif. */
6182 			kif = s->kif == V_pfi_all ? s->orig_kif : s->kif;
6183 
6184 			if (kill->psk_ifname[0] &&
6185 			    strcmp(kill->psk_ifname,
6186 			    kif->pfik_name))
6187 				continue;
6188 
6189 			if (kill->psk_kill_match) {
6190 				bzero(&match_key, sizeof(match_key));
6191 
6192 				if (s->direction == PF_OUT) {
6193 					dir = PF_IN;
6194 					idx = PF_SK_STACK;
6195 				} else {
6196 					dir = PF_OUT;
6197 					idx = PF_SK_WIRE;
6198 				}
6199 
6200 				match_key.af = s->key[idx]->af;
6201 				match_key.proto = s->key[idx]->proto;
6202 				pf_addrcpy(&match_key.addr[0],
6203 				    &s->key[idx]->addr[1], match_key.af);
6204 				match_key.port[0] = s->key[idx]->port[1];
6205 				pf_addrcpy(&match_key.addr[1],
6206 				    &s->key[idx]->addr[0], match_key.af);
6207 				match_key.port[1] = s->key[idx]->port[0];
6208 			}
6209 
6210 			/*
6211 			 * Don't send out individual
6212 			 * delete messages.
6213 			 */
6214 			s->state_flags |= PFSTATE_NOSYNC;
6215 			pf_remove_state(s);
6216 			killed++;
6217 
6218 			if (kill->psk_kill_match)
6219 				killed += pf_kill_matching_state(&match_key,
6220 				    dir);
6221 
6222 			goto relock_DIOCCLRSTATES;
6223 		}
6224 		PF_HASHROW_UNLOCK(ih);
6225 	}
6226 
6227 	if (V_pfsync_clear_states_ptr != NULL)
6228 		V_pfsync_clear_states_ptr(V_pf_status.hostid, kill->psk_ifname);
6229 
6230 	return (killed);
6231 }
6232 
6233 void
pf_killstates(struct pf_kstate_kill * kill,unsigned int * killed)6234 pf_killstates(struct pf_kstate_kill *kill, unsigned int *killed)
6235 {
6236 	struct pf_kstate	*s;
6237 
6238 	NET_EPOCH_ASSERT();
6239 	if (kill->psk_pfcmp.id) {
6240 		if (kill->psk_pfcmp.creatorid == 0)
6241 			kill->psk_pfcmp.creatorid = V_pf_status.hostid;
6242 		if ((s = pf_find_state_byid(kill->psk_pfcmp.id,
6243 		    kill->psk_pfcmp.creatorid))) {
6244 			pf_remove_state(s);
6245 			*killed = 1;
6246 		}
6247 		return;
6248 	}
6249 
6250 	for (unsigned int i = 0; i <= V_pf_hashmask; i++)
6251 		*killed += pf_killstates_row(kill, &V_pf_idhash[i]);
6252 }
6253 
6254 static int
pf_killstates_nv(struct pfioc_nv * nv)6255 pf_killstates_nv(struct pfioc_nv *nv)
6256 {
6257 	struct pf_kstate_kill	 kill;
6258 	struct epoch_tracker	 et;
6259 	nvlist_t		*nvl = NULL;
6260 	void			*nvlpacked = NULL;
6261 	int			 error = 0;
6262 	unsigned int		 killed = 0;
6263 
6264 #define ERROUT(x)	ERROUT_FUNCTION(on_error, x)
6265 
6266 	if (nv->len > pf_ioctl_maxcount)
6267 		ERROUT(ENOMEM);
6268 
6269 	nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
6270 	error = copyin(nv->data, nvlpacked, nv->len);
6271 	if (error)
6272 		ERROUT(error);
6273 
6274 	nvl = nvlist_unpack(nvlpacked, nv->len, 0);
6275 	if (nvl == NULL)
6276 		ERROUT(EBADMSG);
6277 
6278 	error = pf_nvstate_kill_to_kstate_kill(nvl, &kill);
6279 	if (error)
6280 		ERROUT(error);
6281 
6282 	NET_EPOCH_ENTER(et);
6283 	pf_killstates(&kill, &killed);
6284 	NET_EPOCH_EXIT(et);
6285 
6286 	free(nvlpacked, M_NVLIST);
6287 	nvlpacked = NULL;
6288 	nvlist_destroy(nvl);
6289 	nvl = nvlist_create(0);
6290 	if (nvl == NULL)
6291 		ERROUT(ENOMEM);
6292 
6293 	nvlist_add_number(nvl, "killed", killed);
6294 
6295 	nvlpacked = nvlist_pack(nvl, &nv->len);
6296 	if (nvlpacked == NULL)
6297 		ERROUT(ENOMEM);
6298 
6299 	if (nv->size == 0)
6300 		ERROUT(0);
6301 	else if (nv->size < nv->len)
6302 		ERROUT(ENOSPC);
6303 
6304 	error = copyout(nvlpacked, nv->data, nv->len);
6305 
6306 on_error:
6307 	nvlist_destroy(nvl);
6308 	free(nvlpacked, M_NVLIST);
6309 	return (error);
6310 }
6311 
6312 static int
pf_clearstates_nv(struct pfioc_nv * nv)6313 pf_clearstates_nv(struct pfioc_nv *nv)
6314 {
6315 	struct pf_kstate_kill	 kill;
6316 	struct epoch_tracker	 et;
6317 	nvlist_t		*nvl = NULL;
6318 	void			*nvlpacked = NULL;
6319 	int			 error = 0;
6320 	unsigned int		 killed;
6321 
6322 #define ERROUT(x)	ERROUT_FUNCTION(on_error, x)
6323 
6324 	if (nv->len > pf_ioctl_maxcount)
6325 		ERROUT(ENOMEM);
6326 
6327 	nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
6328 	error = copyin(nv->data, nvlpacked, nv->len);
6329 	if (error)
6330 		ERROUT(error);
6331 
6332 	nvl = nvlist_unpack(nvlpacked, nv->len, 0);
6333 	if (nvl == NULL)
6334 		ERROUT(EBADMSG);
6335 
6336 	error = pf_nvstate_kill_to_kstate_kill(nvl, &kill);
6337 	if (error)
6338 		ERROUT(error);
6339 
6340 	NET_EPOCH_ENTER(et);
6341 	killed = pf_clear_states(&kill);
6342 	NET_EPOCH_EXIT(et);
6343 
6344 	free(nvlpacked, M_NVLIST);
6345 	nvlpacked = NULL;
6346 	nvlist_destroy(nvl);
6347 	nvl = nvlist_create(0);
6348 	if (nvl == NULL)
6349 		ERROUT(ENOMEM);
6350 
6351 	nvlist_add_number(nvl, "killed", killed);
6352 
6353 	nvlpacked = nvlist_pack(nvl, &nv->len);
6354 	if (nvlpacked == NULL)
6355 		ERROUT(ENOMEM);
6356 
6357 	if (nv->size == 0)
6358 		ERROUT(0);
6359 	else if (nv->size < nv->len)
6360 		ERROUT(ENOSPC);
6361 
6362 	error = copyout(nvlpacked, nv->data, nv->len);
6363 
6364 #undef ERROUT
6365 on_error:
6366 	nvlist_destroy(nvl);
6367 	free(nvlpacked, M_NVLIST);
6368 	return (error);
6369 }
6370 
6371 static int
pf_getstate(struct pfioc_nv * nv)6372 pf_getstate(struct pfioc_nv *nv)
6373 {
6374 	nvlist_t		*nvl = NULL, *nvls;
6375 	void			*nvlpacked = NULL;
6376 	struct pf_kstate	*s = NULL;
6377 	int			 error = 0;
6378 	uint64_t		 id, creatorid;
6379 
6380 #define ERROUT(x)	ERROUT_FUNCTION(errout, x)
6381 
6382 	if (nv->len > pf_ioctl_maxcount)
6383 		ERROUT(ENOMEM);
6384 
6385 	nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
6386 	error = copyin(nv->data, nvlpacked, nv->len);
6387 	if (error)
6388 		ERROUT(error);
6389 
6390 	nvl = nvlist_unpack(nvlpacked, nv->len, 0);
6391 	if (nvl == NULL)
6392 		ERROUT(EBADMSG);
6393 
6394 	PFNV_CHK(pf_nvuint64(nvl, "id", &id));
6395 	PFNV_CHK(pf_nvuint64(nvl, "creatorid", &creatorid));
6396 
6397 	s = pf_find_state_byid(id, creatorid);
6398 	if (s == NULL)
6399 		ERROUT(ENOENT);
6400 
6401 	free(nvlpacked, M_NVLIST);
6402 	nvlpacked = NULL;
6403 	nvlist_destroy(nvl);
6404 	nvl = nvlist_create(0);
6405 	if (nvl == NULL)
6406 		ERROUT(ENOMEM);
6407 
6408 	nvls = pf_state_to_nvstate(s);
6409 	if (nvls == NULL)
6410 		ERROUT(ENOMEM);
6411 
6412 	nvlist_add_nvlist(nvl, "state", nvls);
6413 	nvlist_destroy(nvls);
6414 
6415 	nvlpacked = nvlist_pack(nvl, &nv->len);
6416 	if (nvlpacked == NULL)
6417 		ERROUT(ENOMEM);
6418 
6419 	if (nv->size == 0)
6420 		ERROUT(0);
6421 	else if (nv->size < nv->len)
6422 		ERROUT(ENOSPC);
6423 
6424 	error = copyout(nvlpacked, nv->data, nv->len);
6425 
6426 #undef ERROUT
6427 errout:
6428 	if (s != NULL)
6429 		PF_STATE_UNLOCK(s);
6430 	free(nvlpacked, M_NVLIST);
6431 	nvlist_destroy(nvl);
6432 	return (error);
6433 }
6434 
6435 /*
6436  * XXX - Check for version mismatch!!!
6437  */
6438 
6439 /*
6440  * Duplicate pfctl -Fa operation to get rid of as much as we can.
6441  */
6442 static int
shutdown_pf(void)6443 shutdown_pf(void)
6444 {
6445 	int error = 0;
6446 	u_int32_t t[5];
6447 	char nn = '\0';
6448 	struct pf_kanchor *anchor, *tmp_anchor;
6449 	struct pf_keth_anchor *eth_anchor, *tmp_eth_anchor;
6450 	int rs_num;
6451 
6452 	do {
6453 		/* Unlink rules of all user defined anchors */
6454 		RB_FOREACH_SAFE(anchor, pf_kanchor_global, &V_pf_anchors,
6455 		    tmp_anchor) {
6456 			for (rs_num = 0; rs_num < PF_RULESET_MAX; ++rs_num) {
6457 				if ((error = pf_begin_rules(&t[rs_num], rs_num,
6458 				    anchor->path)) != 0) {
6459 					DPFPRINTF(PF_DEBUG_MISC, "%s: "
6460 					    "anchor.path=%s rs_num=%d",
6461 					    __func__, anchor->path, rs_num);
6462 					goto error;	/* XXX: rollback? */
6463 				}
6464 			}
6465 			for (rs_num = 0; rs_num < PF_RULESET_MAX; ++rs_num) {
6466 				error = pf_commit_rules(t[rs_num], rs_num,
6467 				    anchor->path);
6468 				MPASS(error == 0);
6469 			}
6470 		}
6471 
6472 		/* Unlink rules of all user defined ether anchors */
6473 		RB_FOREACH_SAFE(eth_anchor, pf_keth_anchor_global,
6474 		    &V_pf_keth_anchors, tmp_eth_anchor) {
6475 			if ((error = pf_begin_eth(&t[0], eth_anchor->path))
6476 			    != 0) {
6477 				DPFPRINTF(PF_DEBUG_MISC, "%s: eth "
6478 				    "anchor.path=%s", __func__,
6479 				    eth_anchor->path);
6480 				goto error;
6481 			}
6482 			error = pf_commit_eth(t[0], eth_anchor->path);
6483 			MPASS(error == 0);
6484 		}
6485 
6486 		if ((error = pf_begin_rules(&t[0], PF_RULESET_SCRUB, &nn))
6487 		    != 0) {
6488 			DPFPRINTF(PF_DEBUG_MISC, "%s: SCRUB", __func__);
6489 			break;
6490 		}
6491 		if ((error = pf_begin_rules(&t[1], PF_RULESET_FILTER, &nn))
6492 		    != 0) {
6493 			DPFPRINTF(PF_DEBUG_MISC, "%s: FILTER", __func__);
6494 			break;		/* XXX: rollback? */
6495 		}
6496 		if ((error = pf_begin_rules(&t[2], PF_RULESET_NAT, &nn))
6497 		    != 0) {
6498 			DPFPRINTF(PF_DEBUG_MISC, "%s: NAT", __func__);
6499 			break;		/* XXX: rollback? */
6500 		}
6501 		if ((error = pf_begin_rules(&t[3], PF_RULESET_BINAT, &nn))
6502 		    != 0) {
6503 			DPFPRINTF(PF_DEBUG_MISC, "%s: BINAT", __func__);
6504 			break;		/* XXX: rollback? */
6505 		}
6506 		if ((error = pf_begin_rules(&t[4], PF_RULESET_RDR, &nn))
6507 		    != 0) {
6508 			DPFPRINTF(PF_DEBUG_MISC, "%s: RDR", __func__);
6509 			break;		/* XXX: rollback? */
6510 		}
6511 
6512 		error = pf_commit_rules(t[0], PF_RULESET_SCRUB, &nn);
6513 		MPASS(error == 0);
6514 		error = pf_commit_rules(t[1], PF_RULESET_FILTER, &nn);
6515 		MPASS(error == 0);
6516 		error = pf_commit_rules(t[2], PF_RULESET_NAT, &nn);
6517 		MPASS(error == 0);
6518 		error = pf_commit_rules(t[3], PF_RULESET_BINAT, &nn);
6519 		MPASS(error == 0);
6520 		error = pf_commit_rules(t[4], PF_RULESET_RDR, &nn);
6521 		MPASS(error == 0);
6522 
6523 		if ((error = pf_clear_tables()) != 0)
6524 			break;
6525 
6526 		if ((error = pf_begin_eth(&t[0], &nn)) != 0) {
6527 			DPFPRINTF(PF_DEBUG_MISC, "%s: eth", __func__);
6528 			break;
6529 		}
6530 		error = pf_commit_eth(t[0], &nn);
6531 		MPASS(error == 0);
6532 
6533 #ifdef ALTQ
6534 		if ((error = pf_begin_altq(&t[0])) != 0) {
6535 			DPFPRINTF(PF_DEBUG_MISC, "%s: ALTQ", __func__);
6536 			break;
6537 		}
6538 		pf_commit_altq(t[0]);
6539 #endif
6540 
6541 		pf_clear_all_states();
6542 
6543 		pf_kill_srcnodes(NULL);
6544 
6545 		/* status does not use malloced mem so no need to cleanup */
6546 		/* fingerprints and interfaces have their own cleanup code */
6547 	} while(0);
6548 
6549 error:
6550 	return (error);
6551 }
6552 
6553 static pfil_return_t
pf_check_return(int chk,struct mbuf ** m)6554 pf_check_return(int chk, struct mbuf **m)
6555 {
6556 
6557 	switch (chk) {
6558 	case PF_PASS:
6559 		if (*m == NULL)
6560 			return (PFIL_CONSUMED);
6561 		else
6562 			return (PFIL_PASS);
6563 		break;
6564 	default:
6565 		if (*m != NULL) {
6566 			m_freem(*m);
6567 			*m = NULL;
6568 		}
6569 		return (PFIL_DROPPED);
6570 	}
6571 }
6572 
6573 static pfil_return_t
pf_eth_check_in(struct mbuf ** m,struct ifnet * ifp,int flags,void * ruleset __unused,struct inpcb * inp)6574 pf_eth_check_in(struct mbuf **m, struct ifnet *ifp, int flags,
6575     void *ruleset __unused, struct inpcb *inp)
6576 {
6577 	int chk;
6578 
6579 	CURVNET_ASSERT_SET();
6580 
6581 	chk = pf_test_eth(PF_IN, flags, ifp, m, inp);
6582 
6583 	return (pf_check_return(chk, m));
6584 }
6585 
6586 static pfil_return_t
pf_eth_check_out(struct mbuf ** m,struct ifnet * ifp,int flags,void * ruleset __unused,struct inpcb * inp)6587 pf_eth_check_out(struct mbuf **m, struct ifnet *ifp, int flags,
6588     void *ruleset __unused, struct inpcb *inp)
6589 {
6590 	int chk;
6591 
6592 	CURVNET_ASSERT_SET();
6593 
6594 	chk = pf_test_eth(PF_OUT, flags, ifp, m, inp);
6595 
6596 	return (pf_check_return(chk, m));
6597 }
6598 
6599 #ifdef INET
6600 static pfil_return_t
pf_check_in(struct mbuf ** m,struct ifnet * ifp,int flags,void * ruleset __unused,struct inpcb * inp)6601 pf_check_in(struct mbuf **m, struct ifnet *ifp, int flags,
6602     void *ruleset __unused, struct inpcb *inp)
6603 {
6604 	int chk;
6605 
6606 	CURVNET_ASSERT_SET();
6607 
6608 	chk = pf_test(AF_INET, PF_IN, flags, ifp, m, inp, NULL);
6609 
6610 	return (pf_check_return(chk, m));
6611 }
6612 
6613 static pfil_return_t
pf_check_out(struct mbuf ** m,struct ifnet * ifp,int flags,void * ruleset __unused,struct inpcb * inp)6614 pf_check_out(struct mbuf **m, struct ifnet *ifp, int flags,
6615     void *ruleset __unused,  struct inpcb *inp)
6616 {
6617 	int chk;
6618 
6619 	CURVNET_ASSERT_SET();
6620 
6621 	chk = pf_test(AF_INET, PF_OUT, flags, ifp, m, inp, NULL);
6622 
6623 	return (pf_check_return(chk, m));
6624 }
6625 #endif
6626 
6627 #ifdef INET6
6628 static pfil_return_t
pf_check6_in(struct mbuf ** m,struct ifnet * ifp,int flags,void * ruleset __unused,struct inpcb * inp)6629 pf_check6_in(struct mbuf **m, struct ifnet *ifp, int flags,
6630     void *ruleset __unused,  struct inpcb *inp)
6631 {
6632 	int chk;
6633 
6634 	CURVNET_ASSERT_SET();
6635 
6636 	/*
6637 	 * In case of loopback traffic IPv6 uses the real interface in
6638 	 * order to support scoped addresses. In order to support stateful
6639 	 * filtering we have change this to lo0 as it is the case in IPv4.
6640 	 */
6641 	chk = pf_test(AF_INET6, PF_IN, flags, (*m)->m_flags & M_LOOP ? V_loif : ifp,
6642 	    m, inp, NULL);
6643 
6644 	return (pf_check_return(chk, m));
6645 }
6646 
6647 static pfil_return_t
pf_check6_out(struct mbuf ** m,struct ifnet * ifp,int flags,void * ruleset __unused,struct inpcb * inp)6648 pf_check6_out(struct mbuf **m, struct ifnet *ifp, int flags,
6649     void *ruleset __unused,  struct inpcb *inp)
6650 {
6651 	int chk;
6652 
6653 	CURVNET_ASSERT_SET();
6654 
6655 	chk = pf_test(AF_INET6, PF_OUT, flags, ifp, m, inp, NULL);
6656 
6657 	return (pf_check_return(chk, m));
6658 }
6659 #endif /* INET6 */
6660 
6661 VNET_DEFINE_STATIC(pfil_hook_t, pf_eth_in_hook);
6662 VNET_DEFINE_STATIC(pfil_hook_t, pf_eth_out_hook);
6663 #define	V_pf_eth_in_hook	VNET(pf_eth_in_hook)
6664 #define	V_pf_eth_out_hook	VNET(pf_eth_out_hook)
6665 
6666 #ifdef INET
6667 VNET_DEFINE_STATIC(pfil_hook_t, pf_ip4_in_hook);
6668 VNET_DEFINE_STATIC(pfil_hook_t, pf_ip4_out_hook);
6669 #define	V_pf_ip4_in_hook	VNET(pf_ip4_in_hook)
6670 #define	V_pf_ip4_out_hook	VNET(pf_ip4_out_hook)
6671 #endif
6672 #ifdef INET6
6673 VNET_DEFINE_STATIC(pfil_hook_t, pf_ip6_in_hook);
6674 VNET_DEFINE_STATIC(pfil_hook_t, pf_ip6_out_hook);
6675 #define	V_pf_ip6_in_hook	VNET(pf_ip6_in_hook)
6676 #define	V_pf_ip6_out_hook	VNET(pf_ip6_out_hook)
6677 #endif
6678 
6679 static void
hook_pf_eth(void)6680 hook_pf_eth(void)
6681 {
6682 	struct pfil_hook_args pha = {
6683 		.pa_version = PFIL_VERSION,
6684 		.pa_modname = "pf",
6685 		.pa_type = PFIL_TYPE_ETHERNET,
6686 	};
6687 	struct pfil_link_args pla = {
6688 		.pa_version = PFIL_VERSION,
6689 	};
6690 	int ret __diagused;
6691 
6692 	if (atomic_load_bool(&V_pf_pfil_eth_hooked))
6693 		return;
6694 
6695 	pha.pa_mbuf_chk = pf_eth_check_in;
6696 	pha.pa_flags = PFIL_IN;
6697 	pha.pa_rulname = "eth-in";
6698 	V_pf_eth_in_hook = pfil_add_hook(&pha);
6699 	pla.pa_flags = PFIL_IN | PFIL_HEADPTR | PFIL_HOOKPTR;
6700 	pla.pa_head = V_link_pfil_head;
6701 	pla.pa_hook = V_pf_eth_in_hook;
6702 	ret = pfil_link(&pla);
6703 	MPASS(ret == 0);
6704 	pha.pa_mbuf_chk = pf_eth_check_out;
6705 	pha.pa_flags = PFIL_OUT;
6706 	pha.pa_rulname = "eth-out";
6707 	V_pf_eth_out_hook = pfil_add_hook(&pha);
6708 	pla.pa_flags = PFIL_OUT | PFIL_HEADPTR | PFIL_HOOKPTR;
6709 	pla.pa_head = V_link_pfil_head;
6710 	pla.pa_hook = V_pf_eth_out_hook;
6711 	ret = pfil_link(&pla);
6712 	MPASS(ret == 0);
6713 
6714 	atomic_store_bool(&V_pf_pfil_eth_hooked, true);
6715 }
6716 
6717 static void
hook_pf(void)6718 hook_pf(void)
6719 {
6720 	struct pfil_hook_args pha = {
6721 		.pa_version = PFIL_VERSION,
6722 		.pa_modname = "pf",
6723 	};
6724 	struct pfil_link_args pla = {
6725 		.pa_version = PFIL_VERSION,
6726 	};
6727 	int ret __diagused;
6728 
6729 	if (atomic_load_bool(&V_pf_pfil_hooked))
6730 		return;
6731 
6732 #ifdef INET
6733 	pha.pa_type = PFIL_TYPE_IP4;
6734 	pha.pa_mbuf_chk = pf_check_in;
6735 	pha.pa_flags = PFIL_IN;
6736 	pha.pa_rulname = "default-in";
6737 	V_pf_ip4_in_hook = pfil_add_hook(&pha);
6738 	pla.pa_flags = PFIL_IN | PFIL_HEADPTR | PFIL_HOOKPTR;
6739 	pla.pa_head = V_inet_pfil_head;
6740 	pla.pa_hook = V_pf_ip4_in_hook;
6741 	ret = pfil_link(&pla);
6742 	MPASS(ret == 0);
6743 	pha.pa_mbuf_chk = pf_check_out;
6744 	pha.pa_flags = PFIL_OUT;
6745 	pha.pa_rulname = "default-out";
6746 	V_pf_ip4_out_hook = pfil_add_hook(&pha);
6747 	pla.pa_flags = PFIL_OUT | PFIL_HEADPTR | PFIL_HOOKPTR;
6748 	pla.pa_head = V_inet_pfil_head;
6749 	pla.pa_hook = V_pf_ip4_out_hook;
6750 	ret = pfil_link(&pla);
6751 	MPASS(ret == 0);
6752 	if (V_pf_filter_local) {
6753 		pla.pa_flags = PFIL_OUT | PFIL_HEADPTR | PFIL_HOOKPTR;
6754 		pla.pa_head = V_inet_local_pfil_head;
6755 		pla.pa_hook = V_pf_ip4_out_hook;
6756 		ret = pfil_link(&pla);
6757 		MPASS(ret == 0);
6758 	}
6759 #endif
6760 #ifdef INET6
6761 	pha.pa_type = PFIL_TYPE_IP6;
6762 	pha.pa_mbuf_chk = pf_check6_in;
6763 	pha.pa_flags = PFIL_IN;
6764 	pha.pa_rulname = "default-in6";
6765 	V_pf_ip6_in_hook = pfil_add_hook(&pha);
6766 	pla.pa_flags = PFIL_IN | PFIL_HEADPTR | PFIL_HOOKPTR;
6767 	pla.pa_head = V_inet6_pfil_head;
6768 	pla.pa_hook = V_pf_ip6_in_hook;
6769 	ret = pfil_link(&pla);
6770 	MPASS(ret == 0);
6771 	pha.pa_mbuf_chk = pf_check6_out;
6772 	pha.pa_rulname = "default-out6";
6773 	pha.pa_flags = PFIL_OUT;
6774 	V_pf_ip6_out_hook = pfil_add_hook(&pha);
6775 	pla.pa_flags = PFIL_OUT | PFIL_HEADPTR | PFIL_HOOKPTR;
6776 	pla.pa_head = V_inet6_pfil_head;
6777 	pla.pa_hook = V_pf_ip6_out_hook;
6778 	ret = pfil_link(&pla);
6779 	MPASS(ret == 0);
6780 	if (V_pf_filter_local) {
6781 		pla.pa_flags = PFIL_OUT | PFIL_HEADPTR | PFIL_HOOKPTR;
6782 		pla.pa_head = V_inet6_local_pfil_head;
6783 		pla.pa_hook = V_pf_ip6_out_hook;
6784 		ret = pfil_link(&pla);
6785 		MPASS(ret == 0);
6786 	}
6787 #endif
6788 
6789 	atomic_store_bool(&V_pf_pfil_hooked, true);
6790 }
6791 
6792 static void
dehook_pf_eth(void)6793 dehook_pf_eth(void)
6794 {
6795 
6796 	if (!atomic_load_bool(&V_pf_pfil_eth_hooked))
6797 		return;
6798 
6799 	pfil_remove_hook(V_pf_eth_in_hook);
6800 	pfil_remove_hook(V_pf_eth_out_hook);
6801 
6802 	atomic_store_bool(&V_pf_pfil_eth_hooked, false);
6803 }
6804 
6805 static void
dehook_pf(void)6806 dehook_pf(void)
6807 {
6808 
6809 	if (!atomic_load_bool(&V_pf_pfil_hooked))
6810 		return;
6811 
6812 #ifdef INET
6813 	pfil_remove_hook(V_pf_ip4_in_hook);
6814 	pfil_remove_hook(V_pf_ip4_out_hook);
6815 #endif
6816 #ifdef INET6
6817 	pfil_remove_hook(V_pf_ip6_in_hook);
6818 	pfil_remove_hook(V_pf_ip6_out_hook);
6819 #endif
6820 
6821 	atomic_store_bool(&V_pf_pfil_hooked, false);
6822 }
6823 
6824 static void
pf_load_vnet(void)6825 pf_load_vnet(void)
6826 {
6827 	V_pf_tag_z = uma_zcreate("pf tags", sizeof(struct pf_tagname),
6828 	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
6829 
6830 	rm_init_flags(&V_pf_rules_lock, "pf rulesets", RM_RECURSE);
6831 	sx_init(&V_pf_ioctl_lock, "pf ioctl");
6832 
6833 	pf_init_tagset(&V_pf_tags, &pf_rule_tag_hashsize,
6834 	    PF_RULE_TAG_HASH_SIZE_DEFAULT);
6835 #ifdef ALTQ
6836 	pf_init_tagset(&V_pf_qids, &pf_queue_tag_hashsize,
6837 	    PF_QUEUE_TAG_HASH_SIZE_DEFAULT);
6838 #endif
6839 
6840 	V_pf_keth = &V_pf_main_keth_anchor.ruleset;
6841 
6842 	pfattach_vnet();
6843 	V_pf_vnet_active = 1;
6844 }
6845 
6846 static int
pf_load(void)6847 pf_load(void)
6848 {
6849 	int error;
6850 
6851 	sx_init(&pf_end_lock, "pf end thread");
6852 
6853 	pf_mtag_initialize();
6854 
6855 	pf_dev = make_dev(&pf_cdevsw, 0, UID_ROOT, GID_WHEEL, 0600, PF_NAME);
6856 	if (pf_dev == NULL)
6857 		return (ENOMEM);
6858 
6859 	pf_end_threads = 0;
6860 	error = kproc_create(pf_purge_thread, NULL, &pf_purge_proc, 0, 0, "pf purge");
6861 	if (error != 0)
6862 		return (error);
6863 
6864 	pfi_initialize();
6865 
6866 	return (0);
6867 }
6868 
6869 static void
pf_unload_vnet(void)6870 pf_unload_vnet(void)
6871 {
6872 	int ret __diagused;
6873 
6874 	V_pf_vnet_active = 0;
6875 	V_pf_status.running = 0;
6876 	dehook_pf();
6877 	dehook_pf_eth();
6878 
6879 	PF_RULES_WLOCK();
6880 	pf_syncookies_cleanup();
6881 	shutdown_pf();
6882 	PF_RULES_WUNLOCK();
6883 
6884 	ret = swi_remove(V_pf_swi_cookie);
6885 	MPASS(ret == 0);
6886 	ret = intr_event_destroy(V_pf_swi_ie);
6887 	MPASS(ret == 0);
6888 
6889 	pf_unload_vnet_purge();
6890 
6891 	pf_normalize_cleanup();
6892 	PF_RULES_WLOCK();
6893 	pfi_cleanup_vnet();
6894 	PF_RULES_WUNLOCK();
6895 	pfr_cleanup();
6896 	pf_osfp_flush();
6897 	pf_cleanup();
6898 	if (IS_DEFAULT_VNET(curvnet))
6899 		pf_mtag_cleanup();
6900 
6901 	pf_cleanup_tagset(&V_pf_tags);
6902 #ifdef ALTQ
6903 	pf_cleanup_tagset(&V_pf_qids);
6904 #endif
6905 	uma_zdestroy(V_pf_tag_z);
6906 
6907 #ifdef PF_WANT_32_TO_64_COUNTER
6908 	PF_RULES_WLOCK();
6909 	LIST_REMOVE(V_pf_kifmarker, pfik_allkiflist);
6910 
6911 	MPASS(LIST_EMPTY(&V_pf_allkiflist));
6912 	MPASS(V_pf_allkifcount == 0);
6913 
6914 	LIST_REMOVE(&V_pf_default_rule, allrulelist);
6915 	V_pf_allrulecount--;
6916 	LIST_REMOVE(V_pf_rulemarker, allrulelist);
6917 
6918 	MPASS(LIST_EMPTY(&V_pf_allrulelist));
6919 	MPASS(V_pf_allrulecount == 0);
6920 
6921 	PF_RULES_WUNLOCK();
6922 
6923 	free(V_pf_kifmarker, PFI_MTYPE);
6924 	free(V_pf_rulemarker, M_PFRULE);
6925 #endif
6926 
6927 	/* Free counters last as we updated them during shutdown. */
6928 	pf_counter_u64_deinit(&V_pf_default_rule.evaluations);
6929 	for (int i = 0; i < 2; i++) {
6930 		pf_counter_u64_deinit(&V_pf_default_rule.packets[i]);
6931 		pf_counter_u64_deinit(&V_pf_default_rule.bytes[i]);
6932 	}
6933 	counter_u64_free(V_pf_default_rule.states_cur);
6934 	counter_u64_free(V_pf_default_rule.states_tot);
6935 	for (pf_sn_types_t sn_type=0; sn_type<PF_SN_MAX; sn_type++)
6936 		counter_u64_free(V_pf_default_rule.src_nodes[sn_type]);
6937 	uma_zfree_pcpu(pf_timestamp_pcpu_zone, V_pf_default_rule.timestamp);
6938 
6939 	for (int i = 0; i < PFRES_MAX; i++)
6940 		counter_u64_free(V_pf_status.counters[i]);
6941 	for (int i = 0; i < KLCNT_MAX; i++)
6942 		counter_u64_free(V_pf_status.lcounters[i]);
6943 	for (int i = 0; i < FCNT_MAX; i++)
6944 		pf_counter_u64_deinit(&V_pf_status.fcounters[i]);
6945 	for (int i = 0; i < SCNT_MAX; i++)
6946 		counter_u64_free(V_pf_status.scounters[i]);
6947 
6948 	rm_destroy(&V_pf_rules_lock);
6949 	sx_destroy(&V_pf_ioctl_lock);
6950 }
6951 
6952 static void
pf_unload(void)6953 pf_unload(void)
6954 {
6955 
6956 	sx_xlock(&pf_end_lock);
6957 	pf_end_threads = 1;
6958 	while (pf_end_threads < 2) {
6959 		wakeup_one(pf_purge_thread);
6960 		sx_sleep(pf_purge_proc, &pf_end_lock, 0, "pftmo", 0);
6961 	}
6962 	sx_xunlock(&pf_end_lock);
6963 
6964 	pf_nl_unregister();
6965 
6966 	if (pf_dev != NULL)
6967 		destroy_dev(pf_dev);
6968 
6969 	pfi_cleanup();
6970 
6971 	sx_destroy(&pf_end_lock);
6972 }
6973 
6974 static void
vnet_pf_init(void * unused __unused)6975 vnet_pf_init(void *unused __unused)
6976 {
6977 
6978 	pf_load_vnet();
6979 }
6980 VNET_SYSINIT(vnet_pf_init, SI_SUB_PROTO_FIREWALL, SI_ORDER_THIRD,
6981     vnet_pf_init, NULL);
6982 
6983 static void
vnet_pf_uninit(const void * unused __unused)6984 vnet_pf_uninit(const void *unused __unused)
6985 {
6986 
6987 	pf_unload_vnet();
6988 }
6989 SYSUNINIT(pf_unload, SI_SUB_PROTO_FIREWALL, SI_ORDER_SECOND, pf_unload, NULL);
6990 VNET_SYSUNINIT(vnet_pf_uninit, SI_SUB_PROTO_FIREWALL, SI_ORDER_THIRD,
6991     vnet_pf_uninit, NULL);
6992 
6993 static int
pf_modevent(module_t mod,int type,void * data)6994 pf_modevent(module_t mod, int type, void *data)
6995 {
6996 	int error = 0;
6997 
6998 	switch(type) {
6999 	case MOD_LOAD:
7000 		error = pf_load();
7001 		pf_nl_register();
7002 		break;
7003 	case MOD_UNLOAD:
7004 		/* Handled in SYSUNINIT(pf_unload) to ensure it's done after
7005 		 * the vnet_pf_uninit()s */
7006 		break;
7007 	default:
7008 		error = EINVAL;
7009 		break;
7010 	}
7011 
7012 	return (error);
7013 }
7014 
7015 static moduledata_t pf_mod = {
7016 	"pf",
7017 	pf_modevent,
7018 	0
7019 };
7020 
7021 DECLARE_MODULE(pf, pf_mod, SI_SUB_PROTO_FIREWALL, SI_ORDER_SECOND);
7022 MODULE_DEPEND(pf, netlink, 1, 1, 1);
7023 MODULE_DEPEND(pf, crypto, 1, 1, 1);
7024 MODULE_VERSION(pf, PF_MODVER);
7025