xref: /freebsd/sys/netpfil/pf/pf_ioctl.c (revision c00aca9a714ee3cdb867d4014898ec4e345465a5)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2001 Daniel Hartmeier
5  * Copyright (c) 2002,2003 Henning Brauer
6  * Copyright (c) 2012 Gleb Smirnoff <glebius@FreeBSD.org>
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  *
13  *    - Redistributions of source code must retain the above copyright
14  *      notice, this list of conditions and the following disclaimer.
15  *    - Redistributions in binary form must reproduce the above
16  *      copyright notice, this list of conditions and the following
17  *      disclaimer in the documentation and/or other materials provided
18  *      with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
26  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
27  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
28  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
30  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31  * POSSIBILITY OF SUCH DAMAGE.
32  *
33  * Effort sponsored in part by the Defense Advanced Research Projects
34  * Agency (DARPA) and Air Force Research Laboratory, Air Force
35  * Materiel Command, USAF, under agreement number F30602-01-2-0537.
36  *
37  *	$OpenBSD: pf_ioctl.c,v 1.213 2009/02/15 21:46:12 mbalmer Exp $
38  */
39 
40 #include <sys/cdefs.h>
41 #include "opt_inet.h"
42 #include "opt_inet6.h"
43 #include "opt_bpf.h"
44 #include "opt_pf.h"
45 
46 #include <sys/param.h>
47 #include <sys/_bitset.h>
48 #include <sys/bitset.h>
49 #include <sys/bus.h>
50 #include <sys/conf.h>
51 #include <sys/endian.h>
52 #include <sys/fcntl.h>
53 #include <sys/filio.h>
54 #include <sys/hash.h>
55 #include <sys/interrupt.h>
56 #include <sys/jail.h>
57 #include <sys/kernel.h>
58 #include <sys/kthread.h>
59 #include <sys/lock.h>
60 #include <sys/mbuf.h>
61 #include <sys/module.h>
62 #include <sys/nv.h>
63 #include <sys/proc.h>
64 #include <sys/sdt.h>
65 #include <sys/smp.h>
66 #include <sys/socket.h>
67 #include <sys/sysctl.h>
68 #include <sys/md5.h>
69 #include <sys/ucred.h>
70 
71 #include <net/if.h>
72 #include <net/if_var.h>
73 #include <net/if_private.h>
74 #include <net/vnet.h>
75 #include <net/route.h>
76 #include <net/pfil.h>
77 #include <net/pfvar.h>
78 #include <net/if_pfsync.h>
79 #include <net/if_pflog.h>
80 
81 #include <netinet/in.h>
82 #include <netinet/ip.h>
83 #include <netinet/ip_var.h>
84 #include <netinet6/ip6_var.h>
85 #include <netinet/ip_icmp.h>
86 #include <netpfil/pf/pf_nl.h>
87 #include <netpfil/pf/pf_nv.h>
88 
89 #ifdef INET6
90 #include <netinet/ip6.h>
91 #endif /* INET6 */
92 
93 #ifdef ALTQ
94 #include <net/altq/altq.h>
95 #endif
96 
97 SDT_PROBE_DEFINE3(pf, ioctl, ioctl, error, "int", "int", "int");
98 SDT_PROBE_DEFINE3(pf, ioctl, function, error, "char *", "int", "int");
99 SDT_PROBE_DEFINE2(pf, ioctl, addrule, error, "int", "int");
100 SDT_PROBE_DEFINE2(pf, ioctl, nvchk, error, "int", "int");
101 
102 static struct pf_kpool	*pf_get_kpool(const char *, u_int32_t, u_int8_t,
103 			    u_int32_t, u_int8_t, u_int8_t, u_int8_t, int);
104 
105 static void		 pf_mv_kpool(struct pf_kpalist *, struct pf_kpalist *);
106 static void		 pf_empty_kpool(struct pf_kpalist *);
107 static int		 pfioctl(struct cdev *, u_long, caddr_t, int,
108 			    struct thread *);
109 static int		 pf_begin_eth(uint32_t *, const char *);
110 static int		 pf_rollback_eth(uint32_t, const char *);
111 static int		 pf_commit_eth(uint32_t, const char *);
112 static void		 pf_free_eth_rule(struct pf_keth_rule *);
113 #ifdef ALTQ
114 static int		 pf_begin_altq(u_int32_t *);
115 static int		 pf_rollback_altq(u_int32_t);
116 static int		 pf_commit_altq(u_int32_t);
117 static int		 pf_enable_altq(struct pf_altq *);
118 static int		 pf_disable_altq(struct pf_altq *);
119 static uint16_t		 pf_qname2qid(const char *);
120 static void		 pf_qid_unref(uint16_t);
121 #endif /* ALTQ */
122 static int		 pf_begin_rules(u_int32_t *, int, const char *);
123 static int		 pf_rollback_rules(u_int32_t, int, char *);
124 static int		 pf_setup_pfsync_matching(struct pf_kruleset *);
125 static void		 pf_hash_rule_rolling(MD5_CTX *, struct pf_krule *);
126 static void		 pf_hash_rule(struct pf_krule *);
127 static void		 pf_hash_rule_addr(MD5_CTX *, struct pf_rule_addr *);
128 static int		 pf_commit_rules(u_int32_t, int, char *);
129 static int		 pf_addr_setup(struct pf_kruleset *,
130 			    struct pf_addr_wrap *, sa_family_t);
131 static void		 pf_src_node_copy(const struct pf_ksrc_node *,
132 			    struct pf_src_node *);
133 #ifdef ALTQ
134 static int		 pf_export_kaltq(struct pf_altq *,
135 			    struct pfioc_altq_v1 *, size_t);
136 static int		 pf_import_kaltq(struct pfioc_altq_v1 *,
137 			    struct pf_altq *, size_t);
138 #endif /* ALTQ */
139 
140 VNET_DEFINE(struct pf_krule,	pf_default_rule);
141 
142 static __inline int             pf_krule_compare(struct pf_krule *,
143 				    struct pf_krule *);
144 
145 RB_GENERATE(pf_krule_global, pf_krule, entry_global, pf_krule_compare);
146 
147 #ifdef ALTQ
148 VNET_DEFINE_STATIC(int,		pf_altq_running);
149 #define	V_pf_altq_running	VNET(pf_altq_running)
150 #endif
151 
152 #define	TAGID_MAX	 50000
153 struct pf_tagname {
154 	TAILQ_ENTRY(pf_tagname)	namehash_entries;
155 	TAILQ_ENTRY(pf_tagname)	taghash_entries;
156 	char			name[PF_TAG_NAME_SIZE];
157 	uint16_t		tag;
158 	int			ref;
159 };
160 
161 struct pf_tagset {
162 	TAILQ_HEAD(, pf_tagname)	*namehash;
163 	TAILQ_HEAD(, pf_tagname)	*taghash;
164 	unsigned int			 mask;
165 	uint32_t			 seed;
166 	BITSET_DEFINE(, TAGID_MAX)	 avail;
167 };
168 
169 VNET_DEFINE(struct pf_tagset, pf_tags);
170 #define	V_pf_tags	VNET(pf_tags)
171 static unsigned int	pf_rule_tag_hashsize;
172 #define	PF_RULE_TAG_HASH_SIZE_DEFAULT	128
173 SYSCTL_UINT(_net_pf, OID_AUTO, rule_tag_hashsize, CTLFLAG_RDTUN,
174     &pf_rule_tag_hashsize, PF_RULE_TAG_HASH_SIZE_DEFAULT,
175     "Size of pf(4) rule tag hashtable");
176 
177 #ifdef ALTQ
178 VNET_DEFINE(struct pf_tagset, pf_qids);
179 #define	V_pf_qids	VNET(pf_qids)
180 static unsigned int	pf_queue_tag_hashsize;
181 #define	PF_QUEUE_TAG_HASH_SIZE_DEFAULT	128
182 SYSCTL_UINT(_net_pf, OID_AUTO, queue_tag_hashsize, CTLFLAG_RDTUN,
183     &pf_queue_tag_hashsize, PF_QUEUE_TAG_HASH_SIZE_DEFAULT,
184     "Size of pf(4) queue tag hashtable");
185 #endif
186 VNET_DEFINE(uma_zone_t,	 pf_tag_z);
187 #define	V_pf_tag_z		 VNET(pf_tag_z)
188 static MALLOC_DEFINE(M_PFALTQ, "pf_altq", "pf(4) altq configuration db");
189 static MALLOC_DEFINE(M_PFRULE, "pf_rule", "pf(4) rules");
190 MALLOC_DEFINE(M_PF, "pf", "pf(4)");
191 
192 #if (PF_QNAME_SIZE != PF_TAG_NAME_SIZE)
193 #error PF_QNAME_SIZE must be equal to PF_TAG_NAME_SIZE
194 #endif
195 
196 VNET_DEFINE_STATIC(bool, pf_filter_local) = false;
197 #define V_pf_filter_local	VNET(pf_filter_local)
198 SYSCTL_BOOL(_net_pf, OID_AUTO, filter_local, CTLFLAG_VNET | CTLFLAG_RW,
199     &VNET_NAME(pf_filter_local), false,
200     "Enable filtering for packets delivered to local network stack");
201 
202 #ifdef PF_DEFAULT_TO_DROP
203 VNET_DEFINE_STATIC(bool, default_to_drop) = true;
204 #else
205 VNET_DEFINE_STATIC(bool, default_to_drop);
206 #endif
207 #define	V_default_to_drop VNET(default_to_drop)
208 SYSCTL_BOOL(_net_pf, OID_AUTO, default_to_drop, CTLFLAG_RDTUN | CTLFLAG_VNET,
209     &VNET_NAME(default_to_drop), false,
210     "Make the default rule drop all packets.");
211 
212 static void		 pf_init_tagset(struct pf_tagset *, unsigned int *,
213 			    unsigned int);
214 static void		 pf_cleanup_tagset(struct pf_tagset *);
215 static uint16_t		 tagname2hashindex(const struct pf_tagset *, const char *);
216 static uint16_t		 tag2hashindex(const struct pf_tagset *, uint16_t);
217 static u_int16_t	 tagname2tag(struct pf_tagset *, const char *);
218 static u_int16_t	 pf_tagname2tag(const char *);
219 static void		 tag_unref(struct pf_tagset *, u_int16_t);
220 
221 struct cdev *pf_dev;
222 
223 /*
224  * XXX - These are new and need to be checked when moveing to a new version
225  */
226 static void		 pf_clear_all_states(void);
227 static int		 pf_killstates_row(struct pf_kstate_kill *,
228 			    struct pf_idhash *);
229 static int		 pf_killstates_nv(struct pfioc_nv *);
230 static int		 pf_clearstates_nv(struct pfioc_nv *);
231 static int		 pf_getstate(struct pfioc_nv *);
232 static int		 pf_getstatus(struct pfioc_nv *);
233 static int		 pf_clear_tables(void);
234 static void		 pf_kill_srcnodes(struct pfioc_src_node_kill *);
235 static int		 pf_keepcounters(struct pfioc_nv *);
236 static void		 pf_tbladdr_copyout(struct pf_addr_wrap *);
237 
238 /*
239  * Wrapper functions for pfil(9) hooks
240  */
241 static pfil_return_t pf_eth_check_in(struct mbuf **m, struct ifnet *ifp,
242     int flags, void *ruleset __unused, struct inpcb *inp);
243 static pfil_return_t pf_eth_check_out(struct mbuf **m, struct ifnet *ifp,
244     int flags, void *ruleset __unused, struct inpcb *inp);
245 #ifdef INET
246 static pfil_return_t pf_check_in(struct mbuf **m, struct ifnet *ifp,
247     int flags, void *ruleset __unused, struct inpcb *inp);
248 static pfil_return_t pf_check_out(struct mbuf **m, struct ifnet *ifp,
249     int flags, void *ruleset __unused, struct inpcb *inp);
250 #endif
251 #ifdef INET6
252 static pfil_return_t pf_check6_in(struct mbuf **m, struct ifnet *ifp,
253     int flags, void *ruleset __unused, struct inpcb *inp);
254 static pfil_return_t pf_check6_out(struct mbuf **m, struct ifnet *ifp,
255     int flags, void *ruleset __unused, struct inpcb *inp);
256 #endif
257 
258 static void		hook_pf_eth(void);
259 static void		hook_pf(void);
260 static void		dehook_pf_eth(void);
261 static void		dehook_pf(void);
262 static int		shutdown_pf(void);
263 static int		pf_load(void);
264 static void		pf_unload(void);
265 
266 static struct cdevsw pf_cdevsw = {
267 	.d_ioctl =	pfioctl,
268 	.d_name =	PF_NAME,
269 	.d_version =	D_VERSION,
270 };
271 
272 VNET_DEFINE_STATIC(bool, pf_pfil_hooked);
273 #define V_pf_pfil_hooked	VNET(pf_pfil_hooked)
274 VNET_DEFINE_STATIC(bool, pf_pfil_eth_hooked);
275 #define V_pf_pfil_eth_hooked	VNET(pf_pfil_eth_hooked)
276 
277 /*
278  * We need a flag that is neither hooked nor running to know when
279  * the VNET is "valid".  We primarily need this to control (global)
280  * external event, e.g., eventhandlers.
281  */
282 VNET_DEFINE(int, pf_vnet_active);
283 #define V_pf_vnet_active	VNET(pf_vnet_active)
284 
285 int pf_end_threads;
286 struct proc *pf_purge_proc;
287 
288 VNET_DEFINE(struct rmlock, pf_rules_lock);
289 VNET_DEFINE_STATIC(struct sx, pf_ioctl_lock);
290 #define	V_pf_ioctl_lock		VNET(pf_ioctl_lock)
291 struct sx			pf_end_lock;
292 
293 /* pfsync */
294 VNET_DEFINE(pfsync_state_import_t *, pfsync_state_import_ptr);
295 VNET_DEFINE(pfsync_insert_state_t *, pfsync_insert_state_ptr);
296 VNET_DEFINE(pfsync_update_state_t *, pfsync_update_state_ptr);
297 VNET_DEFINE(pfsync_delete_state_t *, pfsync_delete_state_ptr);
298 VNET_DEFINE(pfsync_clear_states_t *, pfsync_clear_states_ptr);
299 VNET_DEFINE(pfsync_defer_t *, pfsync_defer_ptr);
300 VNET_DEFINE(pflow_export_state_t *, pflow_export_state_ptr);
301 pfsync_detach_ifnet_t *pfsync_detach_ifnet_ptr;
302 
303 /* pflog */
304 pflog_packet_t			*pflog_packet_ptr = NULL;
305 
306 /*
307  * Copy a user-provided string, returning an error if truncation would occur.
308  * Avoid scanning past "sz" bytes in the source string since there's no
309  * guarantee that it's nul-terminated.
310  */
311 static int
pf_user_strcpy(char * dst,const char * src,size_t sz)312 pf_user_strcpy(char *dst, const char *src, size_t sz)
313 {
314 	if (strnlen(src, sz) == sz)
315 		return (EINVAL);
316 	(void)strlcpy(dst, src, sz);
317 	return (0);
318 }
319 
320 static void
pfattach_vnet(void)321 pfattach_vnet(void)
322 {
323 	u_int32_t *my_timeout = V_pf_default_rule.timeout;
324 
325 	bzero(&V_pf_status, sizeof(V_pf_status));
326 
327 	pf_initialize();
328 	pfr_initialize();
329 	pfi_initialize_vnet();
330 	pf_normalize_init();
331 	pf_syncookies_init();
332 
333 	V_pf_limits[PF_LIMIT_STATES].limit = PFSTATE_HIWAT;
334 	V_pf_limits[PF_LIMIT_SRC_NODES].limit = PFSNODE_HIWAT;
335 	V_pf_limits[PF_LIMIT_ANCHORS].limit = PF_ANCHOR_HIWAT;
336 	V_pf_limits[PF_LIMIT_ETH_ANCHORS].limit = PF_ANCHOR_HIWAT;
337 
338 	RB_INIT(&V_pf_anchors);
339 	pf_init_kruleset(&pf_main_ruleset);
340 
341 	pf_init_keth(V_pf_keth);
342 
343 	/* default rule should never be garbage collected */
344 	V_pf_default_rule.entries.tqe_prev = &V_pf_default_rule.entries.tqe_next;
345 	V_pf_default_rule.action = V_default_to_drop ? PF_DROP : PF_PASS;
346 	V_pf_default_rule.nr = (uint32_t)-1;
347 	V_pf_default_rule.rtableid = -1;
348 
349 	pf_counter_u64_init(&V_pf_default_rule.evaluations, M_WAITOK);
350 	for (int i = 0; i < 2; i++) {
351 		pf_counter_u64_init(&V_pf_default_rule.packets[i], M_WAITOK);
352 		pf_counter_u64_init(&V_pf_default_rule.bytes[i], M_WAITOK);
353 	}
354 	V_pf_default_rule.states_cur = counter_u64_alloc(M_WAITOK);
355 	V_pf_default_rule.states_tot = counter_u64_alloc(M_WAITOK);
356 	for (pf_sn_types_t sn_type = 0; sn_type<PF_SN_MAX; sn_type++)
357 		V_pf_default_rule.src_nodes[sn_type] = counter_u64_alloc(M_WAITOK);
358 
359 	V_pf_default_rule.timestamp = uma_zalloc_pcpu(pf_timestamp_pcpu_zone,
360 	    M_WAITOK | M_ZERO);
361 
362 #ifdef PF_WANT_32_TO_64_COUNTER
363 	V_pf_kifmarker = malloc(sizeof(*V_pf_kifmarker), PFI_MTYPE, M_WAITOK | M_ZERO);
364 	V_pf_rulemarker = malloc(sizeof(*V_pf_rulemarker), M_PFRULE, M_WAITOK | M_ZERO);
365 	PF_RULES_WLOCK();
366 	LIST_INSERT_HEAD(&V_pf_allkiflist, V_pf_kifmarker, pfik_allkiflist);
367 	LIST_INSERT_HEAD(&V_pf_allrulelist, &V_pf_default_rule, allrulelist);
368 	V_pf_allrulecount++;
369 	LIST_INSERT_HEAD(&V_pf_allrulelist, V_pf_rulemarker, allrulelist);
370 	PF_RULES_WUNLOCK();
371 #endif
372 
373 	/* initialize default timeouts */
374 	my_timeout[PFTM_TCP_FIRST_PACKET] = PFTM_TCP_FIRST_PACKET_VAL;
375 	my_timeout[PFTM_TCP_OPENING] = PFTM_TCP_OPENING_VAL;
376 	my_timeout[PFTM_TCP_ESTABLISHED] = PFTM_TCP_ESTABLISHED_VAL;
377 	my_timeout[PFTM_TCP_CLOSING] = PFTM_TCP_CLOSING_VAL;
378 	my_timeout[PFTM_TCP_FIN_WAIT] = PFTM_TCP_FIN_WAIT_VAL;
379 	my_timeout[PFTM_TCP_CLOSED] = PFTM_TCP_CLOSED_VAL;
380 	my_timeout[PFTM_SCTP_FIRST_PACKET] = PFTM_TCP_FIRST_PACKET_VAL;
381 	my_timeout[PFTM_SCTP_OPENING] = PFTM_TCP_OPENING_VAL;
382 	my_timeout[PFTM_SCTP_ESTABLISHED] = PFTM_TCP_ESTABLISHED_VAL;
383 	my_timeout[PFTM_SCTP_CLOSING] = PFTM_TCP_CLOSING_VAL;
384 	my_timeout[PFTM_SCTP_CLOSED] = PFTM_TCP_CLOSED_VAL;
385 	my_timeout[PFTM_UDP_FIRST_PACKET] = PFTM_UDP_FIRST_PACKET_VAL;
386 	my_timeout[PFTM_UDP_SINGLE] = PFTM_UDP_SINGLE_VAL;
387 	my_timeout[PFTM_UDP_MULTIPLE] = PFTM_UDP_MULTIPLE_VAL;
388 	my_timeout[PFTM_ICMP_FIRST_PACKET] = PFTM_ICMP_FIRST_PACKET_VAL;
389 	my_timeout[PFTM_ICMP_ERROR_REPLY] = PFTM_ICMP_ERROR_REPLY_VAL;
390 	my_timeout[PFTM_OTHER_FIRST_PACKET] = PFTM_OTHER_FIRST_PACKET_VAL;
391 	my_timeout[PFTM_OTHER_SINGLE] = PFTM_OTHER_SINGLE_VAL;
392 	my_timeout[PFTM_OTHER_MULTIPLE] = PFTM_OTHER_MULTIPLE_VAL;
393 	my_timeout[PFTM_FRAG] = PFTM_FRAG_VAL;
394 	my_timeout[PFTM_INTERVAL] = PFTM_INTERVAL_VAL;
395 	my_timeout[PFTM_SRC_NODE] = PFTM_SRC_NODE_VAL;
396 	my_timeout[PFTM_TS_DIFF] = PFTM_TS_DIFF_VAL;
397 	my_timeout[PFTM_ADAPTIVE_START] = PFSTATE_ADAPT_START;
398 	my_timeout[PFTM_ADAPTIVE_END] = PFSTATE_ADAPT_END;
399 
400 	V_pf_status.debug = PF_DEBUG_URGENT;
401 	/*
402 	 * XXX This is different than in OpenBSD where reassembly is enabled by
403 	 * defult. In FreeBSD we expect people to still use scrub rules and
404 	 * switch to the new syntax later. Only when they switch they must
405 	 * explicitly enable reassemle. We could change the default once the
406 	 * scrub rule functionality is hopefully removed some day in future.
407 	 */
408 	V_pf_status.reass = 0;
409 
410 	V_pf_pfil_hooked = false;
411 	V_pf_pfil_eth_hooked = false;
412 
413 	/* XXX do our best to avoid a conflict */
414 	V_pf_status.hostid = arc4random();
415 
416 	for (int i = 0; i < PFRES_MAX; i++)
417 		V_pf_status.counters[i] = counter_u64_alloc(M_WAITOK);
418 	for (int i = 0; i < KLCNT_MAX; i++)
419 		V_pf_status.lcounters[i] = counter_u64_alloc(M_WAITOK);
420 	for (int i = 0; i < FCNT_MAX; i++)
421 		pf_counter_u64_init(&V_pf_status.fcounters[i], M_WAITOK);
422 	for (int i = 0; i < SCNT_MAX; i++)
423 		V_pf_status.scounters[i] = counter_u64_alloc(M_WAITOK);
424 	for (int i = 0; i < NCNT_MAX; i++)
425 		V_pf_status.ncounters[i] = counter_u64_alloc(M_WAITOK);
426 
427 	if (swi_add(&V_pf_swi_ie, "pf send", pf_intr, curvnet, SWI_NET,
428 	    INTR_MPSAFE, &V_pf_swi_cookie) != 0)
429 		/* XXXGL: leaked all above. */
430 		return;
431 }
432 
433 static struct pf_kpool *
pf_get_kpool(const char * anchor,u_int32_t ticket,u_int8_t rule_action,u_int32_t rule_number,u_int8_t r_last,u_int8_t active,u_int8_t check_ticket,int which)434 pf_get_kpool(const char *anchor, u_int32_t ticket, u_int8_t rule_action,
435     u_int32_t rule_number, u_int8_t r_last, u_int8_t active,
436     u_int8_t check_ticket, int which)
437 {
438 	struct pf_kruleset	*ruleset;
439 	struct pf_krule		*rule;
440 	int			 rs_num;
441 
442 	MPASS(which == PF_RDR || which == PF_NAT || which == PF_RT);
443 
444 	ruleset = pf_find_kruleset(anchor);
445 	if (ruleset == NULL)
446 		return (NULL);
447 	rs_num = pf_get_ruleset_number(rule_action);
448 	if (rs_num >= PF_RULESET_MAX)
449 		return (NULL);
450 	if (active) {
451 		if (check_ticket && ticket !=
452 		    ruleset->rules[rs_num].active.ticket)
453 			return (NULL);
454 		if (r_last)
455 			rule = TAILQ_LAST(ruleset->rules[rs_num].active.ptr,
456 			    pf_krulequeue);
457 		else
458 			rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
459 	} else {
460 		if (check_ticket && ticket !=
461 		    ruleset->rules[rs_num].inactive.ticket)
462 			return (NULL);
463 		if (r_last)
464 			rule = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr,
465 			    pf_krulequeue);
466 		else
467 			rule = TAILQ_FIRST(ruleset->rules[rs_num].inactive.ptr);
468 	}
469 	if (!r_last) {
470 		while ((rule != NULL) && (rule->nr != rule_number))
471 			rule = TAILQ_NEXT(rule, entries);
472 	}
473 	if (rule == NULL)
474 		return (NULL);
475 
476 	switch (which) {
477 	case PF_RDR:
478 		return (&rule->rdr);
479 	case PF_NAT:
480 		return (&rule->nat);
481 	case PF_RT:
482 		return (&rule->route);
483 	default:
484 		panic("Unknow pool type %d", which);
485 	}
486 }
487 
488 static void
pf_mv_kpool(struct pf_kpalist * poola,struct pf_kpalist * poolb)489 pf_mv_kpool(struct pf_kpalist *poola, struct pf_kpalist *poolb)
490 {
491 	struct pf_kpooladdr	*mv_pool_pa;
492 
493 	while ((mv_pool_pa = TAILQ_FIRST(poola)) != NULL) {
494 		TAILQ_REMOVE(poola, mv_pool_pa, entries);
495 		TAILQ_INSERT_TAIL(poolb, mv_pool_pa, entries);
496 	}
497 }
498 
499 static void
pf_empty_kpool(struct pf_kpalist * poola)500 pf_empty_kpool(struct pf_kpalist *poola)
501 {
502 	struct pf_kpooladdr *pa;
503 
504 	while ((pa = TAILQ_FIRST(poola)) != NULL) {
505 		switch (pa->addr.type) {
506 		case PF_ADDR_DYNIFTL:
507 			pfi_dynaddr_remove(pa->addr.p.dyn);
508 			break;
509 		case PF_ADDR_TABLE:
510 			/* XXX: this could be unfinished pooladdr on pabuf */
511 			if (pa->addr.p.tbl != NULL)
512 				pfr_detach_table(pa->addr.p.tbl);
513 			break;
514 		}
515 		if (pa->kif)
516 			pfi_kkif_unref(pa->kif);
517 		TAILQ_REMOVE(poola, pa, entries);
518 		free(pa, M_PFRULE);
519 	}
520 }
521 
522 static void
pf_unlink_rule_locked(struct pf_krulequeue * rulequeue,struct pf_krule * rule)523 pf_unlink_rule_locked(struct pf_krulequeue *rulequeue, struct pf_krule *rule)
524 {
525 
526 	PF_RULES_WASSERT();
527 	PF_UNLNKDRULES_ASSERT();
528 
529 	TAILQ_REMOVE(rulequeue, rule, entries);
530 
531 	rule->rule_ref |= PFRULE_REFS;
532 	TAILQ_INSERT_TAIL(&V_pf_unlinked_rules, rule, entries);
533 }
534 
535 static void
pf_unlink_rule(struct pf_krulequeue * rulequeue,struct pf_krule * rule)536 pf_unlink_rule(struct pf_krulequeue *rulequeue, struct pf_krule *rule)
537 {
538 
539 	PF_RULES_WASSERT();
540 
541 	PF_UNLNKDRULES_LOCK();
542 	pf_unlink_rule_locked(rulequeue, rule);
543 	PF_UNLNKDRULES_UNLOCK();
544 }
545 
546 static void
pf_free_eth_rule(struct pf_keth_rule * rule)547 pf_free_eth_rule(struct pf_keth_rule *rule)
548 {
549 	PF_RULES_WASSERT();
550 
551 	if (rule == NULL)
552 		return;
553 
554 	if (rule->tag)
555 		tag_unref(&V_pf_tags, rule->tag);
556 	if (rule->match_tag)
557 		tag_unref(&V_pf_tags, rule->match_tag);
558 #ifdef ALTQ
559 	pf_qid_unref(rule->qid);
560 #endif
561 
562 	if (rule->bridge_to)
563 		pfi_kkif_unref(rule->bridge_to);
564 	if (rule->kif)
565 		pfi_kkif_unref(rule->kif);
566 
567 	if (rule->ipsrc.addr.type == PF_ADDR_TABLE)
568 		pfr_detach_table(rule->ipsrc.addr.p.tbl);
569 	if (rule->ipdst.addr.type == PF_ADDR_TABLE)
570 		pfr_detach_table(rule->ipdst.addr.p.tbl);
571 
572 	counter_u64_free(rule->evaluations);
573 	for (int i = 0; i < 2; i++) {
574 		counter_u64_free(rule->packets[i]);
575 		counter_u64_free(rule->bytes[i]);
576 	}
577 	uma_zfree_pcpu(pf_timestamp_pcpu_zone, rule->timestamp);
578 	pf_keth_anchor_remove(rule);
579 
580 	free(rule, M_PFRULE);
581 }
582 
583 void
pf_free_rule(struct pf_krule * rule)584 pf_free_rule(struct pf_krule *rule)
585 {
586 
587 	PF_RULES_WASSERT();
588 	PF_CONFIG_ASSERT();
589 
590 	if (rule->tag)
591 		tag_unref(&V_pf_tags, rule->tag);
592 	if (rule->match_tag)
593 		tag_unref(&V_pf_tags, rule->match_tag);
594 #ifdef ALTQ
595 	if (rule->pqid != rule->qid)
596 		pf_qid_unref(rule->pqid);
597 	pf_qid_unref(rule->qid);
598 #endif
599 	switch (rule->src.addr.type) {
600 	case PF_ADDR_DYNIFTL:
601 		pfi_dynaddr_remove(rule->src.addr.p.dyn);
602 		break;
603 	case PF_ADDR_TABLE:
604 		pfr_detach_table(rule->src.addr.p.tbl);
605 		break;
606 	}
607 	switch (rule->dst.addr.type) {
608 	case PF_ADDR_DYNIFTL:
609 		pfi_dynaddr_remove(rule->dst.addr.p.dyn);
610 		break;
611 	case PF_ADDR_TABLE:
612 		pfr_detach_table(rule->dst.addr.p.tbl);
613 		break;
614 	}
615 	if (rule->overload_tbl)
616 		pfr_detach_table(rule->overload_tbl);
617 	if (rule->kif)
618 		pfi_kkif_unref(rule->kif);
619 	if (rule->rcv_kif)
620 		pfi_kkif_unref(rule->rcv_kif);
621 	pf_remove_kanchor(rule);
622 	pf_empty_kpool(&rule->rdr.list);
623 	pf_empty_kpool(&rule->nat.list);
624 	pf_empty_kpool(&rule->route.list);
625 
626 	pf_krule_free(rule);
627 }
628 
629 static void
pf_init_tagset(struct pf_tagset * ts,unsigned int * tunable_size,unsigned int default_size)630 pf_init_tagset(struct pf_tagset *ts, unsigned int *tunable_size,
631     unsigned int default_size)
632 {
633 	unsigned int i;
634 	unsigned int hashsize;
635 
636 	if (*tunable_size == 0 || !powerof2(*tunable_size))
637 		*tunable_size = default_size;
638 
639 	hashsize = *tunable_size;
640 	ts->namehash = mallocarray(hashsize, sizeof(*ts->namehash), M_PFHASH,
641 	    M_WAITOK);
642 	ts->taghash = mallocarray(hashsize, sizeof(*ts->taghash), M_PFHASH,
643 	    M_WAITOK);
644 	ts->mask = hashsize - 1;
645 	ts->seed = arc4random();
646 	for (i = 0; i < hashsize; i++) {
647 		TAILQ_INIT(&ts->namehash[i]);
648 		TAILQ_INIT(&ts->taghash[i]);
649 	}
650 	BIT_FILL(TAGID_MAX, &ts->avail);
651 }
652 
653 static void
pf_cleanup_tagset(struct pf_tagset * ts)654 pf_cleanup_tagset(struct pf_tagset *ts)
655 {
656 	unsigned int i;
657 	unsigned int hashsize;
658 	struct pf_tagname *t, *tmp;
659 
660 	/*
661 	 * Only need to clean up one of the hashes as each tag is hashed
662 	 * into each table.
663 	 */
664 	hashsize = ts->mask + 1;
665 	for (i = 0; i < hashsize; i++)
666 		TAILQ_FOREACH_SAFE(t, &ts->namehash[i], namehash_entries, tmp)
667 			uma_zfree(V_pf_tag_z, t);
668 
669 	free(ts->namehash, M_PFHASH);
670 	free(ts->taghash, M_PFHASH);
671 }
672 
673 static uint16_t
tagname2hashindex(const struct pf_tagset * ts,const char * tagname)674 tagname2hashindex(const struct pf_tagset *ts, const char *tagname)
675 {
676 	size_t len;
677 
678 	len = strnlen(tagname, PF_TAG_NAME_SIZE - 1);
679 	return (murmur3_32_hash(tagname, len, ts->seed) & ts->mask);
680 }
681 
682 static uint16_t
tag2hashindex(const struct pf_tagset * ts,uint16_t tag)683 tag2hashindex(const struct pf_tagset *ts, uint16_t tag)
684 {
685 
686 	return (tag & ts->mask);
687 }
688 
689 static u_int16_t
tagname2tag(struct pf_tagset * ts,const char * tagname)690 tagname2tag(struct pf_tagset *ts, const char *tagname)
691 {
692 	struct pf_tagname	*tag;
693 	u_int32_t		 index;
694 	u_int16_t		 new_tagid;
695 
696 	PF_RULES_WASSERT();
697 
698 	index = tagname2hashindex(ts, tagname);
699 	TAILQ_FOREACH(tag, &ts->namehash[index], namehash_entries)
700 		if (strcmp(tagname, tag->name) == 0) {
701 			tag->ref++;
702 			return (tag->tag);
703 		}
704 
705 	/*
706 	 * new entry
707 	 *
708 	 * to avoid fragmentation, we do a linear search from the beginning
709 	 * and take the first free slot we find.
710 	 */
711 	new_tagid = BIT_FFS(TAGID_MAX, &ts->avail);
712 	/*
713 	 * Tags are 1-based, with valid tags in the range [1..TAGID_MAX].
714 	 * BIT_FFS() returns a 1-based bit number, with 0 indicating no bits
715 	 * set.  It may also return a bit number greater than TAGID_MAX due
716 	 * to rounding of the number of bits in the vector up to a multiple
717 	 * of the vector word size at declaration/allocation time.
718 	 */
719 	if ((new_tagid == 0) || (new_tagid > TAGID_MAX))
720 		return (0);
721 
722 	/* Mark the tag as in use.  Bits are 0-based for BIT_CLR() */
723 	BIT_CLR(TAGID_MAX, new_tagid - 1, &ts->avail);
724 
725 	/* allocate and fill new struct pf_tagname */
726 	tag = uma_zalloc(V_pf_tag_z, M_NOWAIT);
727 	if (tag == NULL)
728 		return (0);
729 	strlcpy(tag->name, tagname, sizeof(tag->name));
730 	tag->tag = new_tagid;
731 	tag->ref = 1;
732 
733 	/* Insert into namehash */
734 	TAILQ_INSERT_TAIL(&ts->namehash[index], tag, namehash_entries);
735 
736 	/* Insert into taghash */
737 	index = tag2hashindex(ts, new_tagid);
738 	TAILQ_INSERT_TAIL(&ts->taghash[index], tag, taghash_entries);
739 
740 	return (tag->tag);
741 }
742 
743 static void
tag_unref(struct pf_tagset * ts,u_int16_t tag)744 tag_unref(struct pf_tagset *ts, u_int16_t tag)
745 {
746 	struct pf_tagname	*t;
747 	uint16_t		 index;
748 
749 	PF_RULES_WASSERT();
750 
751 	index = tag2hashindex(ts, tag);
752 	TAILQ_FOREACH(t, &ts->taghash[index], taghash_entries)
753 		if (tag == t->tag) {
754 			if (--t->ref == 0) {
755 				TAILQ_REMOVE(&ts->taghash[index], t,
756 				    taghash_entries);
757 				index = tagname2hashindex(ts, t->name);
758 				TAILQ_REMOVE(&ts->namehash[index], t,
759 				    namehash_entries);
760 				/* Bits are 0-based for BIT_SET() */
761 				BIT_SET(TAGID_MAX, tag - 1, &ts->avail);
762 				uma_zfree(V_pf_tag_z, t);
763 			}
764 			break;
765 		}
766 }
767 
768 static uint16_t
pf_tagname2tag(const char * tagname)769 pf_tagname2tag(const char *tagname)
770 {
771 	return (tagname2tag(&V_pf_tags, tagname));
772 }
773 
774 static int
pf_begin_eth(uint32_t * ticket,const char * anchor)775 pf_begin_eth(uint32_t *ticket, const char *anchor)
776 {
777 	struct pf_keth_rule *rule, *tmp;
778 	struct pf_keth_ruleset *rs;
779 
780 	PF_RULES_WASSERT();
781 
782 	rs = pf_find_or_create_keth_ruleset(anchor);
783 	if (rs == NULL)
784 		return (EINVAL);
785 
786 	/* Purge old inactive rules. */
787 	TAILQ_FOREACH_SAFE(rule, rs->inactive.rules, entries,
788 	    tmp) {
789 		TAILQ_REMOVE(rs->inactive.rules, rule,
790 		    entries);
791 		pf_free_eth_rule(rule);
792 	}
793 
794 	*ticket = ++rs->inactive.ticket;
795 	rs->inactive.open = 1;
796 
797 	return (0);
798 }
799 
800 static int
pf_rollback_eth(uint32_t ticket,const char * anchor)801 pf_rollback_eth(uint32_t ticket, const char *anchor)
802 {
803 	struct pf_keth_rule *rule, *tmp;
804 	struct pf_keth_ruleset *rs;
805 
806 	PF_RULES_WASSERT();
807 
808 	rs = pf_find_keth_ruleset(anchor);
809 	if (rs == NULL)
810 		return (EINVAL);
811 
812 	if (!rs->inactive.open ||
813 	    ticket != rs->inactive.ticket)
814 		return (0);
815 
816 	/* Purge old inactive rules. */
817 	TAILQ_FOREACH_SAFE(rule, rs->inactive.rules, entries,
818 	    tmp) {
819 		TAILQ_REMOVE(rs->inactive.rules, rule, entries);
820 		pf_free_eth_rule(rule);
821 	}
822 
823 	rs->inactive.open = 0;
824 
825 	pf_remove_if_empty_keth_ruleset(rs);
826 
827 	return (0);
828 }
829 
830 #define	PF_SET_SKIP_STEPS(i)					\
831 	do {							\
832 		while (head[i] != cur) {			\
833 			head[i]->skip[i].ptr = cur;		\
834 			head[i] = TAILQ_NEXT(head[i], entries);	\
835 		}						\
836 	} while (0)
837 
838 static void
pf_eth_calc_skip_steps(struct pf_keth_ruleq * rules)839 pf_eth_calc_skip_steps(struct pf_keth_ruleq *rules)
840 {
841 	struct pf_keth_rule *cur, *prev, *head[PFE_SKIP_COUNT];
842 	int i;
843 
844 	cur = TAILQ_FIRST(rules);
845 	prev = cur;
846 	for (i = 0; i < PFE_SKIP_COUNT; ++i)
847 		head[i] = cur;
848 	while (cur != NULL) {
849 		if (cur->kif != prev->kif || cur->ifnot != prev->ifnot)
850 			PF_SET_SKIP_STEPS(PFE_SKIP_IFP);
851 		if (cur->direction != prev->direction)
852 			PF_SET_SKIP_STEPS(PFE_SKIP_DIR);
853 		if (cur->proto != prev->proto)
854 			PF_SET_SKIP_STEPS(PFE_SKIP_PROTO);
855 		if (memcmp(&cur->src, &prev->src, sizeof(cur->src)) != 0)
856 			PF_SET_SKIP_STEPS(PFE_SKIP_SRC_ADDR);
857 		if (memcmp(&cur->dst, &prev->dst, sizeof(cur->dst)) != 0)
858 			PF_SET_SKIP_STEPS(PFE_SKIP_DST_ADDR);
859 		if (cur->ipsrc.neg != prev->ipsrc.neg ||
860 		    pf_addr_wrap_neq(&cur->ipsrc.addr, &prev->ipsrc.addr))
861 			PF_SET_SKIP_STEPS(PFE_SKIP_SRC_IP_ADDR);
862 		if (cur->ipdst.neg != prev->ipdst.neg ||
863 		    pf_addr_wrap_neq(&cur->ipdst.addr, &prev->ipdst.addr))
864 			PF_SET_SKIP_STEPS(PFE_SKIP_DST_IP_ADDR);
865 
866 		prev = cur;
867 		cur = TAILQ_NEXT(cur, entries);
868 	}
869 	for (i = 0; i < PFE_SKIP_COUNT; ++i)
870 		PF_SET_SKIP_STEPS(i);
871 }
872 
873 static int
pf_commit_eth(uint32_t ticket,const char * anchor)874 pf_commit_eth(uint32_t ticket, const char *anchor)
875 {
876 	struct pf_keth_ruleq *rules;
877 	struct pf_keth_ruleset *rs;
878 
879 	rs = pf_find_keth_ruleset(anchor);
880 	if (rs == NULL) {
881 		return (EINVAL);
882 	}
883 
884 	if (!rs->inactive.open ||
885 	    ticket != rs->inactive.ticket)
886 		return (EBUSY);
887 
888 	PF_RULES_WASSERT();
889 
890 	pf_eth_calc_skip_steps(rs->inactive.rules);
891 
892 	rules = rs->active.rules;
893 	atomic_store_ptr(&rs->active.rules, rs->inactive.rules);
894 	rs->inactive.rules = rules;
895 	rs->inactive.ticket = rs->active.ticket;
896 
897 	return (pf_rollback_eth(rs->inactive.ticket,
898 	    rs->anchor ? rs->anchor->path : ""));
899 }
900 
901 #ifdef ALTQ
902 static uint16_t
pf_qname2qid(const char * qname)903 pf_qname2qid(const char *qname)
904 {
905 	return (tagname2tag(&V_pf_qids, qname));
906 }
907 
908 static void
pf_qid_unref(uint16_t qid)909 pf_qid_unref(uint16_t qid)
910 {
911 	tag_unref(&V_pf_qids, qid);
912 }
913 
914 static int
pf_begin_altq(u_int32_t * ticket)915 pf_begin_altq(u_int32_t *ticket)
916 {
917 	struct pf_altq	*altq, *tmp;
918 	int		 error = 0;
919 
920 	PF_RULES_WASSERT();
921 
922 	/* Purge the old altq lists */
923 	TAILQ_FOREACH_SAFE(altq, V_pf_altq_ifs_inactive, entries, tmp) {
924 		if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
925 			/* detach and destroy the discipline */
926 			error = altq_remove(altq);
927 		}
928 		free(altq, M_PFALTQ);
929 	}
930 	TAILQ_INIT(V_pf_altq_ifs_inactive);
931 	TAILQ_FOREACH_SAFE(altq, V_pf_altqs_inactive, entries, tmp) {
932 		pf_qid_unref(altq->qid);
933 		free(altq, M_PFALTQ);
934 	}
935 	TAILQ_INIT(V_pf_altqs_inactive);
936 	if (error)
937 		return (error);
938 	*ticket = ++V_ticket_altqs_inactive;
939 	V_altqs_inactive_open = 1;
940 	return (0);
941 }
942 
943 static int
pf_rollback_altq(u_int32_t ticket)944 pf_rollback_altq(u_int32_t ticket)
945 {
946 	struct pf_altq	*altq, *tmp;
947 	int		 error = 0;
948 
949 	PF_RULES_WASSERT();
950 
951 	if (!V_altqs_inactive_open || ticket != V_ticket_altqs_inactive)
952 		return (0);
953 	/* Purge the old altq lists */
954 	TAILQ_FOREACH_SAFE(altq, V_pf_altq_ifs_inactive, entries, tmp) {
955 		if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
956 			/* detach and destroy the discipline */
957 			error = altq_remove(altq);
958 		}
959 		free(altq, M_PFALTQ);
960 	}
961 	TAILQ_INIT(V_pf_altq_ifs_inactive);
962 	TAILQ_FOREACH_SAFE(altq, V_pf_altqs_inactive, entries, tmp) {
963 		pf_qid_unref(altq->qid);
964 		free(altq, M_PFALTQ);
965 	}
966 	TAILQ_INIT(V_pf_altqs_inactive);
967 	V_altqs_inactive_open = 0;
968 	return (error);
969 }
970 
971 static int
pf_commit_altq(u_int32_t ticket)972 pf_commit_altq(u_int32_t ticket)
973 {
974 	struct pf_altqqueue	*old_altqs, *old_altq_ifs;
975 	struct pf_altq		*altq, *tmp;
976 	int			 err, error = 0;
977 
978 	PF_RULES_WASSERT();
979 
980 	if (!V_altqs_inactive_open || ticket != V_ticket_altqs_inactive)
981 		return (EBUSY);
982 
983 	/* swap altqs, keep the old. */
984 	old_altqs = V_pf_altqs_active;
985 	old_altq_ifs = V_pf_altq_ifs_active;
986 	V_pf_altqs_active = V_pf_altqs_inactive;
987 	V_pf_altq_ifs_active = V_pf_altq_ifs_inactive;
988 	V_pf_altqs_inactive = old_altqs;
989 	V_pf_altq_ifs_inactive = old_altq_ifs;
990 	V_ticket_altqs_active = V_ticket_altqs_inactive;
991 
992 	/* Attach new disciplines */
993 	TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) {
994 		if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
995 			/* attach the discipline */
996 			error = altq_pfattach(altq);
997 			if (error == 0 && V_pf_altq_running)
998 				error = pf_enable_altq(altq);
999 			if (error != 0)
1000 				return (error);
1001 		}
1002 	}
1003 
1004 	/* Purge the old altq lists */
1005 	TAILQ_FOREACH_SAFE(altq, V_pf_altq_ifs_inactive, entries, tmp) {
1006 		if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
1007 			/* detach and destroy the discipline */
1008 			if (V_pf_altq_running)
1009 				error = pf_disable_altq(altq);
1010 			err = altq_pfdetach(altq);
1011 			if (err != 0 && error == 0)
1012 				error = err;
1013 			err = altq_remove(altq);
1014 			if (err != 0 && error == 0)
1015 				error = err;
1016 		}
1017 		free(altq, M_PFALTQ);
1018 	}
1019 	TAILQ_INIT(V_pf_altq_ifs_inactive);
1020 	TAILQ_FOREACH_SAFE(altq, V_pf_altqs_inactive, entries, tmp) {
1021 		pf_qid_unref(altq->qid);
1022 		free(altq, M_PFALTQ);
1023 	}
1024 	TAILQ_INIT(V_pf_altqs_inactive);
1025 
1026 	V_altqs_inactive_open = 0;
1027 	return (error);
1028 }
1029 
1030 static int
pf_enable_altq(struct pf_altq * altq)1031 pf_enable_altq(struct pf_altq *altq)
1032 {
1033 	struct ifnet		*ifp;
1034 	struct tb_profile	 tb;
1035 	int			 error = 0;
1036 
1037 	if ((ifp = ifunit(altq->ifname)) == NULL)
1038 		return (EINVAL);
1039 
1040 	if (ifp->if_snd.altq_type != ALTQT_NONE)
1041 		error = altq_enable(&ifp->if_snd);
1042 
1043 	/* set tokenbucket regulator */
1044 	if (error == 0 && ifp != NULL && ALTQ_IS_ENABLED(&ifp->if_snd)) {
1045 		tb.rate = altq->ifbandwidth;
1046 		tb.depth = altq->tbrsize;
1047 		error = tbr_set(&ifp->if_snd, &tb);
1048 	}
1049 
1050 	return (error);
1051 }
1052 
1053 static int
pf_disable_altq(struct pf_altq * altq)1054 pf_disable_altq(struct pf_altq *altq)
1055 {
1056 	struct ifnet		*ifp;
1057 	struct tb_profile	 tb;
1058 	int			 error;
1059 
1060 	if ((ifp = ifunit(altq->ifname)) == NULL)
1061 		return (EINVAL);
1062 
1063 	/*
1064 	 * when the discipline is no longer referenced, it was overridden
1065 	 * by a new one.  if so, just return.
1066 	 */
1067 	if (altq->altq_disc != ifp->if_snd.altq_disc)
1068 		return (0);
1069 
1070 	error = altq_disable(&ifp->if_snd);
1071 
1072 	if (error == 0) {
1073 		/* clear tokenbucket regulator */
1074 		tb.rate = 0;
1075 		error = tbr_set(&ifp->if_snd, &tb);
1076 	}
1077 
1078 	return (error);
1079 }
1080 
1081 static int
pf_altq_ifnet_event_add(struct ifnet * ifp,int remove,u_int32_t ticket,struct pf_altq * altq)1082 pf_altq_ifnet_event_add(struct ifnet *ifp, int remove, u_int32_t ticket,
1083     struct pf_altq *altq)
1084 {
1085 	struct ifnet	*ifp1;
1086 	int		 error = 0;
1087 
1088 	/* Deactivate the interface in question */
1089 	altq->local_flags &= ~PFALTQ_FLAG_IF_REMOVED;
1090 	if ((ifp1 = ifunit(altq->ifname)) == NULL ||
1091 	    (remove && ifp1 == ifp)) {
1092 		altq->local_flags |= PFALTQ_FLAG_IF_REMOVED;
1093 	} else {
1094 		error = altq_add(ifp1, altq);
1095 
1096 		if (ticket != V_ticket_altqs_inactive)
1097 			error = EBUSY;
1098 
1099 		if (error)
1100 			free(altq, M_PFALTQ);
1101 	}
1102 
1103 	return (error);
1104 }
1105 
1106 void
pf_altq_ifnet_event(struct ifnet * ifp,int remove)1107 pf_altq_ifnet_event(struct ifnet *ifp, int remove)
1108 {
1109 	struct pf_altq	*a1, *a2, *a3;
1110 	u_int32_t	 ticket;
1111 	int		 error = 0;
1112 
1113 	/*
1114 	 * No need to re-evaluate the configuration for events on interfaces
1115 	 * that do not support ALTQ, as it's not possible for such
1116 	 * interfaces to be part of the configuration.
1117 	 */
1118 	if (!ALTQ_IS_READY(&ifp->if_snd))
1119 		return;
1120 
1121 	/* Interrupt userland queue modifications */
1122 	if (V_altqs_inactive_open)
1123 		pf_rollback_altq(V_ticket_altqs_inactive);
1124 
1125 	/* Start new altq ruleset */
1126 	if (pf_begin_altq(&ticket))
1127 		return;
1128 
1129 	/* Copy the current active set */
1130 	TAILQ_FOREACH(a1, V_pf_altq_ifs_active, entries) {
1131 		a2 = malloc(sizeof(*a2), M_PFALTQ, M_NOWAIT);
1132 		if (a2 == NULL) {
1133 			error = ENOMEM;
1134 			break;
1135 		}
1136 		bcopy(a1, a2, sizeof(struct pf_altq));
1137 
1138 		error = pf_altq_ifnet_event_add(ifp, remove, ticket, a2);
1139 		if (error)
1140 			break;
1141 
1142 		TAILQ_INSERT_TAIL(V_pf_altq_ifs_inactive, a2, entries);
1143 	}
1144 	if (error)
1145 		goto out;
1146 	TAILQ_FOREACH(a1, V_pf_altqs_active, entries) {
1147 		a2 = malloc(sizeof(*a2), M_PFALTQ, M_NOWAIT);
1148 		if (a2 == NULL) {
1149 			error = ENOMEM;
1150 			break;
1151 		}
1152 		bcopy(a1, a2, sizeof(struct pf_altq));
1153 
1154 		if ((a2->qid = pf_qname2qid(a2->qname)) == 0) {
1155 			error = EBUSY;
1156 			free(a2, M_PFALTQ);
1157 			break;
1158 		}
1159 		a2->altq_disc = NULL;
1160 		TAILQ_FOREACH(a3, V_pf_altq_ifs_inactive, entries) {
1161 			if (strncmp(a3->ifname, a2->ifname,
1162 				IFNAMSIZ) == 0) {
1163 				a2->altq_disc = a3->altq_disc;
1164 				break;
1165 			}
1166 		}
1167 		error = pf_altq_ifnet_event_add(ifp, remove, ticket, a2);
1168 		if (error)
1169 			break;
1170 
1171 		TAILQ_INSERT_TAIL(V_pf_altqs_inactive, a2, entries);
1172 	}
1173 
1174 out:
1175 	if (error != 0)
1176 		pf_rollback_altq(ticket);
1177 	else
1178 		pf_commit_altq(ticket);
1179 }
1180 #endif /* ALTQ */
1181 
1182 static struct pf_krule_global *
pf_rule_tree_alloc(int flags)1183 pf_rule_tree_alloc(int flags)
1184 {
1185 	struct pf_krule_global *tree;
1186 
1187 	tree = malloc(sizeof(struct pf_krule_global), M_PF, flags);
1188 	if (tree == NULL)
1189 		return (NULL);
1190 	RB_INIT(tree);
1191 	return (tree);
1192 }
1193 
1194 void
pf_rule_tree_free(struct pf_krule_global * tree)1195 pf_rule_tree_free(struct pf_krule_global *tree)
1196 {
1197 
1198 	free(tree, M_PF);
1199 }
1200 
1201 static int
pf_begin_rules(u_int32_t * ticket,int rs_num,const char * anchor)1202 pf_begin_rules(u_int32_t *ticket, int rs_num, const char *anchor)
1203 {
1204 	struct pf_krule_global *tree;
1205 	struct pf_kruleset	*rs;
1206 	struct pf_krule		*rule;
1207 
1208 	PF_RULES_WASSERT();
1209 
1210 	if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
1211 		return (EINVAL);
1212 	tree = pf_rule_tree_alloc(M_NOWAIT);
1213 	if (tree == NULL)
1214 		return (ENOMEM);
1215 	rs = pf_find_or_create_kruleset(anchor);
1216 	if (rs == NULL) {
1217 		pf_rule_tree_free(tree);
1218 		return (EINVAL);
1219 	}
1220 	pf_rule_tree_free(rs->rules[rs_num].inactive.tree);
1221 	rs->rules[rs_num].inactive.tree = tree;
1222 
1223 	while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) {
1224 		pf_unlink_rule(rs->rules[rs_num].inactive.ptr, rule);
1225 		rs->rules[rs_num].inactive.rcount--;
1226 	}
1227 	*ticket = ++rs->rules[rs_num].inactive.ticket;
1228 	rs->rules[rs_num].inactive.open = 1;
1229 	return (0);
1230 }
1231 
1232 static int
pf_rollback_rules(u_int32_t ticket,int rs_num,char * anchor)1233 pf_rollback_rules(u_int32_t ticket, int rs_num, char *anchor)
1234 {
1235 	struct pf_kruleset	*rs;
1236 	struct pf_krule		*rule;
1237 
1238 	PF_RULES_WASSERT();
1239 
1240 	if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
1241 		return (EINVAL);
1242 	rs = pf_find_kruleset(anchor);
1243 	if (rs == NULL || !rs->rules[rs_num].inactive.open ||
1244 	    rs->rules[rs_num].inactive.ticket != ticket)
1245 		return (0);
1246 	while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) {
1247 		pf_unlink_rule(rs->rules[rs_num].inactive.ptr, rule);
1248 		rs->rules[rs_num].inactive.rcount--;
1249 	}
1250 	rs->rules[rs_num].inactive.open = 0;
1251 	return (0);
1252 }
1253 
1254 #define PF_MD5_UPD(st, elm)						\
1255 		MD5Update(ctx, (u_int8_t *) &(st)->elm, sizeof((st)->elm))
1256 
1257 #define PF_MD5_UPD_STR(st, elm)						\
1258 		MD5Update(ctx, (u_int8_t *) (st)->elm, strlen((st)->elm))
1259 
1260 #define PF_MD5_UPD_HTONL(st, elm, stor) do {				\
1261 		(stor) = htonl((st)->elm);				\
1262 		MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int32_t));\
1263 } while (0)
1264 
1265 #define PF_MD5_UPD_HTONS(st, elm, stor) do {				\
1266 		(stor) = htons((st)->elm);				\
1267 		MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int16_t));\
1268 } while (0)
1269 
1270 static void
pf_hash_rule_addr(MD5_CTX * ctx,struct pf_rule_addr * pfr)1271 pf_hash_rule_addr(MD5_CTX *ctx, struct pf_rule_addr *pfr)
1272 {
1273 	PF_MD5_UPD(pfr, addr.type);
1274 	switch (pfr->addr.type) {
1275 		case PF_ADDR_DYNIFTL:
1276 			PF_MD5_UPD(pfr, addr.v.ifname);
1277 			PF_MD5_UPD(pfr, addr.iflags);
1278 			break;
1279 		case PF_ADDR_TABLE:
1280 			if (strncmp(pfr->addr.v.tblname, PF_OPTIMIZER_TABLE_PFX,
1281 			    strlen(PF_OPTIMIZER_TABLE_PFX)))
1282 				PF_MD5_UPD(pfr, addr.v.tblname);
1283 			break;
1284 		case PF_ADDR_ADDRMASK:
1285 			/* XXX ignore af? */
1286 			PF_MD5_UPD(pfr, addr.v.a.addr.addr32);
1287 			PF_MD5_UPD(pfr, addr.v.a.mask.addr32);
1288 			break;
1289 	}
1290 
1291 	PF_MD5_UPD(pfr, port[0]);
1292 	PF_MD5_UPD(pfr, port[1]);
1293 	PF_MD5_UPD(pfr, neg);
1294 	PF_MD5_UPD(pfr, port_op);
1295 }
1296 
1297 static void
pf_hash_rule_rolling(MD5_CTX * ctx,struct pf_krule * rule)1298 pf_hash_rule_rolling(MD5_CTX *ctx, struct pf_krule *rule)
1299 {
1300 	u_int16_t x;
1301 	u_int32_t y;
1302 
1303 	pf_hash_rule_addr(ctx, &rule->src);
1304 	pf_hash_rule_addr(ctx, &rule->dst);
1305 	for (int i = 0; i < PF_RULE_MAX_LABEL_COUNT; i++)
1306 		PF_MD5_UPD_STR(rule, label[i]);
1307 	PF_MD5_UPD_STR(rule, ifname);
1308 	PF_MD5_UPD_STR(rule, rcv_ifname);
1309 	PF_MD5_UPD_STR(rule, match_tagname);
1310 	PF_MD5_UPD_HTONS(rule, match_tag, x); /* dup? */
1311 	PF_MD5_UPD_HTONL(rule, os_fingerprint, y);
1312 	PF_MD5_UPD_HTONL(rule, prob, y);
1313 	PF_MD5_UPD_HTONL(rule, uid.uid[0], y);
1314 	PF_MD5_UPD_HTONL(rule, uid.uid[1], y);
1315 	PF_MD5_UPD(rule, uid.op);
1316 	PF_MD5_UPD_HTONL(rule, gid.gid[0], y);
1317 	PF_MD5_UPD_HTONL(rule, gid.gid[1], y);
1318 	PF_MD5_UPD(rule, gid.op);
1319 	PF_MD5_UPD_HTONL(rule, rule_flag, y);
1320 	PF_MD5_UPD(rule, action);
1321 	PF_MD5_UPD(rule, direction);
1322 	PF_MD5_UPD(rule, af);
1323 	PF_MD5_UPD(rule, quick);
1324 	PF_MD5_UPD(rule, ifnot);
1325 	PF_MD5_UPD(rule, rcvifnot);
1326 	PF_MD5_UPD(rule, match_tag_not);
1327 	PF_MD5_UPD(rule, natpass);
1328 	PF_MD5_UPD(rule, keep_state);
1329 	PF_MD5_UPD(rule, proto);
1330 	PF_MD5_UPD(rule, type);
1331 	PF_MD5_UPD(rule, code);
1332 	PF_MD5_UPD(rule, flags);
1333 	PF_MD5_UPD(rule, flagset);
1334 	PF_MD5_UPD(rule, allow_opts);
1335 	PF_MD5_UPD(rule, rt);
1336 	PF_MD5_UPD(rule, tos);
1337 	PF_MD5_UPD(rule, scrub_flags);
1338 	PF_MD5_UPD(rule, min_ttl);
1339 	PF_MD5_UPD(rule, set_tos);
1340 	if (rule->anchor != NULL)
1341 		PF_MD5_UPD_STR(rule, anchor->path);
1342 }
1343 
1344 static void
pf_hash_rule(struct pf_krule * rule)1345 pf_hash_rule(struct pf_krule *rule)
1346 {
1347 	MD5_CTX		ctx;
1348 
1349 	MD5Init(&ctx);
1350 	pf_hash_rule_rolling(&ctx, rule);
1351 	MD5Final(rule->md5sum, &ctx);
1352 }
1353 
1354 static int
pf_krule_compare(struct pf_krule * a,struct pf_krule * b)1355 pf_krule_compare(struct pf_krule *a, struct pf_krule *b)
1356 {
1357 
1358 	return (memcmp(a->md5sum, b->md5sum, PF_MD5_DIGEST_LENGTH));
1359 }
1360 
1361 static int
pf_commit_rules(u_int32_t ticket,int rs_num,char * anchor)1362 pf_commit_rules(u_int32_t ticket, int rs_num, char *anchor)
1363 {
1364 	struct pf_kruleset	*rs;
1365 	struct pf_krule		*rule, *old_rule;
1366 	struct pf_krulequeue	*old_rules;
1367 	struct pf_krule_global  *old_tree;
1368 	int			 error;
1369 	u_int32_t		 old_rcount;
1370 
1371 	PF_RULES_WASSERT();
1372 
1373 	if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
1374 		return (EINVAL);
1375 	rs = pf_find_kruleset(anchor);
1376 	if (rs == NULL || !rs->rules[rs_num].inactive.open ||
1377 	    ticket != rs->rules[rs_num].inactive.ticket)
1378 		return (EBUSY);
1379 
1380 	/* Calculate checksum for the main ruleset */
1381 	if (rs == &pf_main_ruleset) {
1382 		error = pf_setup_pfsync_matching(rs);
1383 		if (error != 0)
1384 			return (error);
1385 	}
1386 
1387 	/* Swap rules, keep the old. */
1388 	old_rules = rs->rules[rs_num].active.ptr;
1389 	old_rcount = rs->rules[rs_num].active.rcount;
1390 	old_tree = rs->rules[rs_num].active.tree;
1391 
1392 	rs->rules[rs_num].active.ptr =
1393 	    rs->rules[rs_num].inactive.ptr;
1394 	rs->rules[rs_num].active.tree =
1395 	    rs->rules[rs_num].inactive.tree;
1396 	rs->rules[rs_num].active.rcount =
1397 	    rs->rules[rs_num].inactive.rcount;
1398 
1399 	/* Attempt to preserve counter information. */
1400 	if (V_pf_status.keep_counters && old_tree != NULL) {
1401 		TAILQ_FOREACH(rule, rs->rules[rs_num].active.ptr,
1402 		    entries) {
1403 			old_rule = RB_FIND(pf_krule_global, old_tree, rule);
1404 			if (old_rule == NULL) {
1405 				continue;
1406 			}
1407 			pf_counter_u64_critical_enter();
1408 			pf_counter_u64_rollup_protected(&rule->evaluations,
1409 			    pf_counter_u64_fetch(&old_rule->evaluations));
1410 			pf_counter_u64_rollup_protected(&rule->packets[0],
1411 			    pf_counter_u64_fetch(&old_rule->packets[0]));
1412 			pf_counter_u64_rollup_protected(&rule->packets[1],
1413 			    pf_counter_u64_fetch(&old_rule->packets[1]));
1414 			pf_counter_u64_rollup_protected(&rule->bytes[0],
1415 			    pf_counter_u64_fetch(&old_rule->bytes[0]));
1416 			pf_counter_u64_rollup_protected(&rule->bytes[1],
1417 			    pf_counter_u64_fetch(&old_rule->bytes[1]));
1418 			pf_counter_u64_critical_exit();
1419 		}
1420 	}
1421 
1422 	rs->rules[rs_num].inactive.ptr = old_rules;
1423 	rs->rules[rs_num].inactive.tree = NULL; /* important for pf_ioctl_addrule */
1424 	rs->rules[rs_num].inactive.rcount = old_rcount;
1425 
1426 	rs->rules[rs_num].active.ticket =
1427 	    rs->rules[rs_num].inactive.ticket;
1428 	pf_calc_skip_steps(rs->rules[rs_num].active.ptr);
1429 
1430 	/* Purge the old rule list. */
1431 	PF_UNLNKDRULES_LOCK();
1432 	while ((rule = TAILQ_FIRST(old_rules)) != NULL)
1433 		pf_unlink_rule_locked(old_rules, rule);
1434 	PF_UNLNKDRULES_UNLOCK();
1435 	rs->rules[rs_num].inactive.rcount = 0;
1436 	rs->rules[rs_num].inactive.open = 0;
1437 	pf_remove_if_empty_kruleset(rs);
1438 	pf_rule_tree_free(old_tree);
1439 
1440 	return (0);
1441 }
1442 
1443 static int
pf_setup_pfsync_matching(struct pf_kruleset * rs)1444 pf_setup_pfsync_matching(struct pf_kruleset *rs)
1445 {
1446 	MD5_CTX			 ctx;
1447 	struct pf_krule		*rule;
1448 	int			 rs_cnt;
1449 	u_int8_t		 digest[PF_MD5_DIGEST_LENGTH];
1450 
1451 	MD5Init(&ctx);
1452 	for (rs_cnt = 0; rs_cnt < PF_RULESET_MAX; rs_cnt++) {
1453 		/* XXX PF_RULESET_SCRUB as well? */
1454 		if (rs_cnt == PF_RULESET_SCRUB)
1455 			continue;
1456 
1457 		if (rs->rules[rs_cnt].inactive.rcount) {
1458 			TAILQ_FOREACH(rule, rs->rules[rs_cnt].inactive.ptr,
1459 			    entries) {
1460 				pf_hash_rule_rolling(&ctx, rule);
1461 			}
1462 		}
1463 	}
1464 
1465 	MD5Final(digest, &ctx);
1466 	memcpy(V_pf_status.pf_chksum, digest, sizeof(V_pf_status.pf_chksum));
1467 	return (0);
1468 }
1469 
1470 static int
pf_eth_addr_setup(struct pf_keth_ruleset * ruleset,struct pf_addr_wrap * addr)1471 pf_eth_addr_setup(struct pf_keth_ruleset *ruleset, struct pf_addr_wrap *addr)
1472 {
1473 	int error = 0;
1474 
1475 	switch (addr->type) {
1476 	case PF_ADDR_TABLE:
1477 		addr->p.tbl = pfr_eth_attach_table(ruleset, addr->v.tblname);
1478 		if (addr->p.tbl == NULL)
1479 			error = ENOMEM;
1480 		break;
1481 	default:
1482 		error = EINVAL;
1483 	}
1484 
1485 	return (error);
1486 }
1487 
1488 static int
pf_addr_setup(struct pf_kruleset * ruleset,struct pf_addr_wrap * addr,sa_family_t af)1489 pf_addr_setup(struct pf_kruleset *ruleset, struct pf_addr_wrap *addr,
1490     sa_family_t af)
1491 {
1492 	int error = 0;
1493 
1494 	switch (addr->type) {
1495 	case PF_ADDR_TABLE:
1496 		addr->p.tbl = pfr_attach_table(ruleset, addr->v.tblname);
1497 		if (addr->p.tbl == NULL)
1498 			error = ENOMEM;
1499 		break;
1500 	case PF_ADDR_DYNIFTL:
1501 		error = pfi_dynaddr_setup(addr, af);
1502 		break;
1503 	}
1504 
1505 	return (error);
1506 }
1507 
1508 void
pf_addr_copyout(struct pf_addr_wrap * addr)1509 pf_addr_copyout(struct pf_addr_wrap *addr)
1510 {
1511 
1512 	switch (addr->type) {
1513 	case PF_ADDR_DYNIFTL:
1514 		pfi_dynaddr_copyout(addr);
1515 		break;
1516 	case PF_ADDR_TABLE:
1517 		pf_tbladdr_copyout(addr);
1518 		break;
1519 	}
1520 }
1521 
1522 static void
pf_src_node_copy(const struct pf_ksrc_node * in,struct pf_src_node * out)1523 pf_src_node_copy(const struct pf_ksrc_node *in, struct pf_src_node *out)
1524 {
1525 	int	secs = time_uptime;
1526 
1527 	bzero(out, sizeof(struct pf_src_node));
1528 
1529 	bcopy(&in->addr, &out->addr, sizeof(struct pf_addr));
1530 	bcopy(&in->raddr, &out->raddr, sizeof(struct pf_addr));
1531 
1532 	if (in->rule != NULL)
1533 		out->rule.nr = in->rule->nr;
1534 
1535 	for (int i = 0; i < 2; i++) {
1536 		out->bytes[i] = counter_u64_fetch(in->bytes[i]);
1537 		out->packets[i] = counter_u64_fetch(in->packets[i]);
1538 	}
1539 
1540 	out->states = in->states;
1541 	out->conn = in->conn;
1542 	out->af = in->af;
1543 	out->ruletype = in->ruletype;
1544 
1545 	out->creation = secs - in->creation;
1546 	if (out->expire > secs)
1547 		out->expire -= secs;
1548 	else
1549 		out->expire = 0;
1550 
1551 	/* Adjust the connection rate estimate. */
1552 	out->conn_rate.limit = in->conn_rate.limit;
1553 	out->conn_rate.seconds = in->conn_rate.seconds;
1554 	/* If there's no limit there's no counter_rate. */
1555 	if (in->conn_rate.cr != NULL)
1556 		out->conn_rate.count = counter_rate_get(in->conn_rate.cr);
1557 }
1558 
1559 #ifdef ALTQ
1560 /*
1561  * Handle export of struct pf_kaltq to user binaries that may be using any
1562  * version of struct pf_altq.
1563  */
1564 static int
pf_export_kaltq(struct pf_altq * q,struct pfioc_altq_v1 * pa,size_t ioc_size)1565 pf_export_kaltq(struct pf_altq *q, struct pfioc_altq_v1 *pa, size_t ioc_size)
1566 {
1567 	u_int32_t version;
1568 
1569 	if (ioc_size == sizeof(struct pfioc_altq_v0))
1570 		version = 0;
1571 	else
1572 		version = pa->version;
1573 
1574 	if (version > PFIOC_ALTQ_VERSION)
1575 		return (EINVAL);
1576 
1577 #define ASSIGN(x) exported_q->x = q->x
1578 #define COPY(x) \
1579 	bcopy(&q->x, &exported_q->x, min(sizeof(q->x), sizeof(exported_q->x)))
1580 #define SATU16(x) (u_int32_t)uqmin((x), USHRT_MAX)
1581 #define SATU32(x) (u_int32_t)uqmin((x), UINT_MAX)
1582 
1583 	switch (version) {
1584 	case 0: {
1585 		struct pf_altq_v0 *exported_q =
1586 		    &((struct pfioc_altq_v0 *)pa)->altq;
1587 
1588 		COPY(ifname);
1589 
1590 		ASSIGN(scheduler);
1591 		ASSIGN(tbrsize);
1592 		exported_q->tbrsize = SATU16(q->tbrsize);
1593 		exported_q->ifbandwidth = SATU32(q->ifbandwidth);
1594 
1595 		COPY(qname);
1596 		COPY(parent);
1597 		ASSIGN(parent_qid);
1598 		exported_q->bandwidth = SATU32(q->bandwidth);
1599 		ASSIGN(priority);
1600 		ASSIGN(local_flags);
1601 
1602 		ASSIGN(qlimit);
1603 		ASSIGN(flags);
1604 
1605 		if (q->scheduler == ALTQT_HFSC) {
1606 #define ASSIGN_OPT(x) exported_q->pq_u.hfsc_opts.x = q->pq_u.hfsc_opts.x
1607 #define ASSIGN_OPT_SATU32(x) exported_q->pq_u.hfsc_opts.x = \
1608 			    SATU32(q->pq_u.hfsc_opts.x)
1609 
1610 			ASSIGN_OPT_SATU32(rtsc_m1);
1611 			ASSIGN_OPT(rtsc_d);
1612 			ASSIGN_OPT_SATU32(rtsc_m2);
1613 
1614 			ASSIGN_OPT_SATU32(lssc_m1);
1615 			ASSIGN_OPT(lssc_d);
1616 			ASSIGN_OPT_SATU32(lssc_m2);
1617 
1618 			ASSIGN_OPT_SATU32(ulsc_m1);
1619 			ASSIGN_OPT(ulsc_d);
1620 			ASSIGN_OPT_SATU32(ulsc_m2);
1621 
1622 			ASSIGN_OPT(flags);
1623 
1624 #undef ASSIGN_OPT
1625 #undef ASSIGN_OPT_SATU32
1626 		} else
1627 			COPY(pq_u);
1628 
1629 		ASSIGN(qid);
1630 		break;
1631 	}
1632 	case 1:	{
1633 		struct pf_altq_v1 *exported_q =
1634 		    &((struct pfioc_altq_v1 *)pa)->altq;
1635 
1636 		COPY(ifname);
1637 
1638 		ASSIGN(scheduler);
1639 		ASSIGN(tbrsize);
1640 		ASSIGN(ifbandwidth);
1641 
1642 		COPY(qname);
1643 		COPY(parent);
1644 		ASSIGN(parent_qid);
1645 		ASSIGN(bandwidth);
1646 		ASSIGN(priority);
1647 		ASSIGN(local_flags);
1648 
1649 		ASSIGN(qlimit);
1650 		ASSIGN(flags);
1651 		COPY(pq_u);
1652 
1653 		ASSIGN(qid);
1654 		break;
1655 	}
1656 	default:
1657 		panic("%s: unhandled struct pfioc_altq version", __func__);
1658 		break;
1659 	}
1660 
1661 #undef ASSIGN
1662 #undef COPY
1663 #undef SATU16
1664 #undef SATU32
1665 
1666 	return (0);
1667 }
1668 
1669 /*
1670  * Handle import to struct pf_kaltq of struct pf_altq from user binaries
1671  * that may be using any version of it.
1672  */
1673 static int
pf_import_kaltq(struct pfioc_altq_v1 * pa,struct pf_altq * q,size_t ioc_size)1674 pf_import_kaltq(struct pfioc_altq_v1 *pa, struct pf_altq *q, size_t ioc_size)
1675 {
1676 	u_int32_t version;
1677 
1678 	if (ioc_size == sizeof(struct pfioc_altq_v0))
1679 		version = 0;
1680 	else
1681 		version = pa->version;
1682 
1683 	if (version > PFIOC_ALTQ_VERSION)
1684 		return (EINVAL);
1685 
1686 #define ASSIGN(x) q->x = imported_q->x
1687 #define COPY(x) \
1688 	bcopy(&imported_q->x, &q->x, min(sizeof(imported_q->x), sizeof(q->x)))
1689 
1690 	switch (version) {
1691 	case 0: {
1692 		struct pf_altq_v0 *imported_q =
1693 		    &((struct pfioc_altq_v0 *)pa)->altq;
1694 
1695 		COPY(ifname);
1696 
1697 		ASSIGN(scheduler);
1698 		ASSIGN(tbrsize); /* 16-bit -> 32-bit */
1699 		ASSIGN(ifbandwidth); /* 32-bit -> 64-bit */
1700 
1701 		COPY(qname);
1702 		COPY(parent);
1703 		ASSIGN(parent_qid);
1704 		ASSIGN(bandwidth); /* 32-bit -> 64-bit */
1705 		ASSIGN(priority);
1706 		ASSIGN(local_flags);
1707 
1708 		ASSIGN(qlimit);
1709 		ASSIGN(flags);
1710 
1711 		if (imported_q->scheduler == ALTQT_HFSC) {
1712 #define ASSIGN_OPT(x) q->pq_u.hfsc_opts.x = imported_q->pq_u.hfsc_opts.x
1713 
1714 			/*
1715 			 * The m1 and m2 parameters are being copied from
1716 			 * 32-bit to 64-bit.
1717 			 */
1718 			ASSIGN_OPT(rtsc_m1);
1719 			ASSIGN_OPT(rtsc_d);
1720 			ASSIGN_OPT(rtsc_m2);
1721 
1722 			ASSIGN_OPT(lssc_m1);
1723 			ASSIGN_OPT(lssc_d);
1724 			ASSIGN_OPT(lssc_m2);
1725 
1726 			ASSIGN_OPT(ulsc_m1);
1727 			ASSIGN_OPT(ulsc_d);
1728 			ASSIGN_OPT(ulsc_m2);
1729 
1730 			ASSIGN_OPT(flags);
1731 
1732 #undef ASSIGN_OPT
1733 		} else
1734 			COPY(pq_u);
1735 
1736 		ASSIGN(qid);
1737 		break;
1738 	}
1739 	case 1: {
1740 		struct pf_altq_v1 *imported_q =
1741 		    &((struct pfioc_altq_v1 *)pa)->altq;
1742 
1743 		COPY(ifname);
1744 
1745 		ASSIGN(scheduler);
1746 		ASSIGN(tbrsize);
1747 		ASSIGN(ifbandwidth);
1748 
1749 		COPY(qname);
1750 		COPY(parent);
1751 		ASSIGN(parent_qid);
1752 		ASSIGN(bandwidth);
1753 		ASSIGN(priority);
1754 		ASSIGN(local_flags);
1755 
1756 		ASSIGN(qlimit);
1757 		ASSIGN(flags);
1758 		COPY(pq_u);
1759 
1760 		ASSIGN(qid);
1761 		break;
1762 	}
1763 	default:
1764 		panic("%s: unhandled struct pfioc_altq version", __func__);
1765 		break;
1766 	}
1767 
1768 #undef ASSIGN
1769 #undef COPY
1770 
1771 	return (0);
1772 }
1773 
1774 static struct pf_altq *
pf_altq_get_nth_active(u_int32_t n)1775 pf_altq_get_nth_active(u_int32_t n)
1776 {
1777 	struct pf_altq		*altq;
1778 	u_int32_t		 nr;
1779 
1780 	nr = 0;
1781 	TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) {
1782 		if (nr == n)
1783 			return (altq);
1784 		nr++;
1785 	}
1786 
1787 	TAILQ_FOREACH(altq, V_pf_altqs_active, entries) {
1788 		if (nr == n)
1789 			return (altq);
1790 		nr++;
1791 	}
1792 
1793 	return (NULL);
1794 }
1795 #endif /* ALTQ */
1796 
1797 struct pf_krule *
pf_krule_alloc(void)1798 pf_krule_alloc(void)
1799 {
1800 	struct pf_krule *rule;
1801 
1802 	rule = malloc(sizeof(struct pf_krule), M_PFRULE, M_WAITOK | M_ZERO);
1803 	mtx_init(&rule->nat.mtx, "pf_krule_nat_pool", NULL, MTX_DEF);
1804 	mtx_init(&rule->rdr.mtx, "pf_krule_rdr_pool", NULL, MTX_DEF);
1805 	mtx_init(&rule->route.mtx, "pf_krule_route_pool", NULL, MTX_DEF);
1806 	rule->timestamp = uma_zalloc_pcpu(pf_timestamp_pcpu_zone,
1807 	    M_WAITOK | M_ZERO);
1808 	return (rule);
1809 }
1810 
1811 void
pf_krule_free(struct pf_krule * rule)1812 pf_krule_free(struct pf_krule *rule)
1813 {
1814 #ifdef PF_WANT_32_TO_64_COUNTER
1815 	bool wowned;
1816 #endif
1817 
1818 	if (rule == NULL)
1819 		return;
1820 
1821 #ifdef PF_WANT_32_TO_64_COUNTER
1822 	if (rule->allrulelinked) {
1823 		wowned = PF_RULES_WOWNED();
1824 		if (!wowned)
1825 			PF_RULES_WLOCK();
1826 		LIST_REMOVE(rule, allrulelist);
1827 		V_pf_allrulecount--;
1828 		if (!wowned)
1829 			PF_RULES_WUNLOCK();
1830 	}
1831 #endif
1832 
1833 	pf_counter_u64_deinit(&rule->evaluations);
1834 	for (int i = 0; i < 2; i++) {
1835 		pf_counter_u64_deinit(&rule->packets[i]);
1836 		pf_counter_u64_deinit(&rule->bytes[i]);
1837 	}
1838 	counter_u64_free(rule->states_cur);
1839 	counter_u64_free(rule->states_tot);
1840 	for (pf_sn_types_t sn_type=0; sn_type<PF_SN_MAX; sn_type++)
1841 		counter_u64_free(rule->src_nodes[sn_type]);
1842 	uma_zfree_pcpu(pf_timestamp_pcpu_zone, rule->timestamp);
1843 
1844 	mtx_destroy(&rule->nat.mtx);
1845 	mtx_destroy(&rule->rdr.mtx);
1846 	mtx_destroy(&rule->route.mtx);
1847 	free(rule, M_PFRULE);
1848 }
1849 
1850 void
pf_krule_clear_counters(struct pf_krule * rule)1851 pf_krule_clear_counters(struct pf_krule *rule)
1852 {
1853 	pf_counter_u64_zero(&rule->evaluations);
1854 	for (int i = 0; i < 2; i++) {
1855 		pf_counter_u64_zero(&rule->packets[i]);
1856 		pf_counter_u64_zero(&rule->bytes[i]);
1857 	}
1858 	counter_u64_zero(rule->states_tot);
1859 }
1860 
1861 static void
pf_kpooladdr_to_pooladdr(const struct pf_kpooladdr * kpool,struct pf_pooladdr * pool)1862 pf_kpooladdr_to_pooladdr(const struct pf_kpooladdr *kpool,
1863     struct pf_pooladdr *pool)
1864 {
1865 
1866 	bzero(pool, sizeof(*pool));
1867 	bcopy(&kpool->addr, &pool->addr, sizeof(pool->addr));
1868 	strlcpy(pool->ifname, kpool->ifname, sizeof(pool->ifname));
1869 }
1870 
1871 static int
pf_pooladdr_to_kpooladdr(const struct pf_pooladdr * pool,struct pf_kpooladdr * kpool)1872 pf_pooladdr_to_kpooladdr(const struct pf_pooladdr *pool,
1873     struct pf_kpooladdr *kpool)
1874 {
1875 	int ret;
1876 
1877 	bzero(kpool, sizeof(*kpool));
1878 	bcopy(&pool->addr, &kpool->addr, sizeof(kpool->addr));
1879 	ret = pf_user_strcpy(kpool->ifname, pool->ifname,
1880 	    sizeof(kpool->ifname));
1881 	return (ret);
1882 }
1883 
1884 static void
pf_pool_to_kpool(const struct pf_pool * pool,struct pf_kpool * kpool)1885 pf_pool_to_kpool(const struct pf_pool *pool, struct pf_kpool *kpool)
1886 {
1887 	_Static_assert(sizeof(pool->key) == sizeof(kpool->key), "");
1888 	_Static_assert(sizeof(pool->counter) == sizeof(kpool->counter), "");
1889 
1890 	bcopy(&pool->key, &kpool->key, sizeof(kpool->key));
1891 	bcopy(&pool->counter, &kpool->counter, sizeof(kpool->counter));
1892 
1893 	kpool->tblidx = pool->tblidx;
1894 	kpool->proxy_port[0] = pool->proxy_port[0];
1895 	kpool->proxy_port[1] = pool->proxy_port[1];
1896 	kpool->opts = pool->opts;
1897 }
1898 
1899 static int
pf_rule_to_krule(const struct pf_rule * rule,struct pf_krule * krule)1900 pf_rule_to_krule(const struct pf_rule *rule, struct pf_krule *krule)
1901 {
1902 	int ret;
1903 
1904 #ifndef INET
1905 	if (rule->af == AF_INET) {
1906 		return (EAFNOSUPPORT);
1907 	}
1908 #endif /* INET */
1909 #ifndef INET6
1910 	if (rule->af == AF_INET6) {
1911 		return (EAFNOSUPPORT);
1912 	}
1913 #endif /* INET6 */
1914 
1915 	ret = pf_check_rule_addr(&rule->src);
1916 	if (ret != 0)
1917 		return (ret);
1918 	ret = pf_check_rule_addr(&rule->dst);
1919 	if (ret != 0)
1920 		return (ret);
1921 
1922 	bcopy(&rule->src, &krule->src, sizeof(rule->src));
1923 	bcopy(&rule->dst, &krule->dst, sizeof(rule->dst));
1924 
1925 	ret = pf_user_strcpy(krule->label[0], rule->label, sizeof(rule->label));
1926 	if (ret != 0)
1927 		return (ret);
1928 	ret = pf_user_strcpy(krule->ifname, rule->ifname, sizeof(rule->ifname));
1929 	if (ret != 0)
1930 		return (ret);
1931 	ret = pf_user_strcpy(krule->qname, rule->qname, sizeof(rule->qname));
1932 	if (ret != 0)
1933 		return (ret);
1934 	ret = pf_user_strcpy(krule->pqname, rule->pqname, sizeof(rule->pqname));
1935 	if (ret != 0)
1936 		return (ret);
1937 	ret = pf_user_strcpy(krule->tagname, rule->tagname,
1938 	    sizeof(rule->tagname));
1939 	if (ret != 0)
1940 		return (ret);
1941 	ret = pf_user_strcpy(krule->match_tagname, rule->match_tagname,
1942 	    sizeof(rule->match_tagname));
1943 	if (ret != 0)
1944 		return (ret);
1945 	ret = pf_user_strcpy(krule->overload_tblname, rule->overload_tblname,
1946 	    sizeof(rule->overload_tblname));
1947 	if (ret != 0)
1948 		return (ret);
1949 
1950 	pf_pool_to_kpool(&rule->rpool, &krule->rdr);
1951 
1952 	/* Don't allow userspace to set evaluations, packets or bytes. */
1953 	/* kif, anchor, overload_tbl are not copied over. */
1954 
1955 	krule->os_fingerprint = rule->os_fingerprint;
1956 
1957 	krule->rtableid = rule->rtableid;
1958 	/* pf_rule->timeout is smaller than pf_krule->timeout */
1959 	bcopy(rule->timeout, krule->timeout, sizeof(rule->timeout));
1960 	krule->max_states = rule->max_states;
1961 	krule->max_src_nodes = rule->max_src_nodes;
1962 	krule->max_src_states = rule->max_src_states;
1963 	krule->max_src_conn = rule->max_src_conn;
1964 	krule->max_src_conn_rate.limit = rule->max_src_conn_rate.limit;
1965 	krule->max_src_conn_rate.seconds = rule->max_src_conn_rate.seconds;
1966 	krule->qid = rule->qid;
1967 	krule->pqid = rule->pqid;
1968 	krule->nr = rule->nr;
1969 	krule->prob = rule->prob;
1970 	krule->cuid = rule->cuid;
1971 	krule->cpid = rule->cpid;
1972 
1973 	krule->return_icmp = rule->return_icmp;
1974 	krule->return_icmp6 = rule->return_icmp6;
1975 	krule->max_mss = rule->max_mss;
1976 	krule->tag = rule->tag;
1977 	krule->match_tag = rule->match_tag;
1978 	krule->scrub_flags = rule->scrub_flags;
1979 
1980 	bcopy(&rule->uid, &krule->uid, sizeof(krule->uid));
1981 	bcopy(&rule->gid, &krule->gid, sizeof(krule->gid));
1982 
1983 	krule->rule_flag = rule->rule_flag;
1984 	krule->action = rule->action;
1985 	krule->direction = rule->direction;
1986 	krule->log = rule->log;
1987 	krule->logif = rule->logif;
1988 	krule->quick = rule->quick;
1989 	krule->ifnot = rule->ifnot;
1990 	krule->match_tag_not = rule->match_tag_not;
1991 	krule->natpass = rule->natpass;
1992 
1993 	krule->keep_state = rule->keep_state;
1994 	krule->af = rule->af;
1995 	krule->proto = rule->proto;
1996 	krule->type = rule->type;
1997 	krule->code = rule->code;
1998 	krule->flags = rule->flags;
1999 	krule->flagset = rule->flagset;
2000 	krule->min_ttl = rule->min_ttl;
2001 	krule->allow_opts = rule->allow_opts;
2002 	krule->rt = rule->rt;
2003 	krule->return_ttl = rule->return_ttl;
2004 	krule->tos = rule->tos;
2005 	krule->set_tos = rule->set_tos;
2006 
2007 	krule->flush = rule->flush;
2008 	krule->prio = rule->prio;
2009 	krule->set_prio[0] = rule->set_prio[0];
2010 	krule->set_prio[1] = rule->set_prio[1];
2011 
2012 	bcopy(&rule->divert, &krule->divert, sizeof(krule->divert));
2013 
2014 	return (0);
2015 }
2016 
2017 int
pf_ioctl_getrules(struct pfioc_rule * pr)2018 pf_ioctl_getrules(struct pfioc_rule *pr)
2019 {
2020 	struct pf_kruleset	*ruleset;
2021 	struct pf_krule		*tail;
2022 	int			 rs_num;
2023 
2024 	PF_RULES_WLOCK();
2025 	ruleset = pf_find_kruleset(pr->anchor);
2026 	if (ruleset == NULL) {
2027 		PF_RULES_WUNLOCK();
2028 		return (EINVAL);
2029 	}
2030 	rs_num = pf_get_ruleset_number(pr->rule.action);
2031 	if (rs_num >= PF_RULESET_MAX) {
2032 		PF_RULES_WUNLOCK();
2033 		return (EINVAL);
2034 	}
2035 	tail = TAILQ_LAST(ruleset->rules[rs_num].active.ptr,
2036 	    pf_krulequeue);
2037 	if (tail)
2038 		pr->nr = tail->nr + 1;
2039 	else
2040 		pr->nr = 0;
2041 	pr->ticket = ruleset->rules[rs_num].active.ticket;
2042 	PF_RULES_WUNLOCK();
2043 
2044 	return (0);
2045 }
2046 
2047 static int
pf_rule_checkaf(struct pf_krule * r)2048 pf_rule_checkaf(struct pf_krule *r)
2049 {
2050 	switch (r->af) {
2051 	case 0:
2052 		if (r->rule_flag & PFRULE_AFTO)
2053 			return (EPFNOSUPPORT);
2054 		break;
2055 	case AF_INET:
2056 		if ((r->rule_flag & PFRULE_AFTO) && r->naf != AF_INET6)
2057 			return (EPFNOSUPPORT);
2058 		break;
2059 #ifdef INET6
2060 	case AF_INET6:
2061 		if ((r->rule_flag & PFRULE_AFTO) && r->naf != AF_INET)
2062 			return (EPFNOSUPPORT);
2063 		break;
2064 #endif /* INET6 */
2065 	default:
2066 		return (EPFNOSUPPORT);
2067 	}
2068 
2069 	if ((r->rule_flag & PFRULE_AFTO) == 0 && r->naf != 0)
2070 		return (EPFNOSUPPORT);
2071 
2072 	return (0);
2073 }
2074 
2075 static int
pf_validate_range(uint8_t op,uint16_t port[2])2076 pf_validate_range(uint8_t op, uint16_t port[2])
2077 {
2078 	uint16_t a = ntohs(port[0]);
2079 	uint16_t b = ntohs(port[1]);
2080 
2081 	if ((op == PF_OP_RRG && a > b) ||  /* 34:12,  i.e. none */
2082 	    (op == PF_OP_IRG && a >= b) || /* 34><12, i.e. none */
2083 	    (op == PF_OP_XRG && a > b))	   /* 34<>22, i.e. all */
2084 		return 1;
2085 	return 0;
2086 }
2087 
2088 int
pf_ioctl_addrule(struct pf_krule * rule,uint32_t ticket,uint32_t pool_ticket,const char * anchor,const char * anchor_call,uid_t uid,pid_t pid)2089 pf_ioctl_addrule(struct pf_krule *rule, uint32_t ticket,
2090     uint32_t pool_ticket, const char *anchor, const char *anchor_call,
2091     uid_t uid, pid_t pid)
2092 {
2093 	struct pf_kruleset	*ruleset;
2094 	struct pf_krule		*tail;
2095 	struct pf_kpooladdr	*pa;
2096 	struct pfi_kkif		*kif = NULL, *rcv_kif = NULL;
2097 	int			 rs_num;
2098 	int			 error = 0;
2099 
2100 #define	ERROUT(x)		ERROUT_FUNCTION(errout, x)
2101 #define	ERROUT_UNLOCKED(x)	ERROUT_FUNCTION(errout_unlocked, x)
2102 
2103 	if ((rule->return_icmp >> 8) > ICMP_MAXTYPE)
2104 		ERROUT_UNLOCKED(EINVAL);
2105 
2106 	if ((error = pf_rule_checkaf(rule)))
2107 		ERROUT_UNLOCKED(error);
2108 	if (pf_validate_range(rule->src.port_op, rule->src.port))
2109 		ERROUT_UNLOCKED(EINVAL);
2110 	if (pf_validate_range(rule->dst.port_op, rule->dst.port))
2111 		ERROUT_UNLOCKED(EINVAL);
2112 
2113 	if (rule->ifname[0])
2114 		kif = pf_kkif_create(M_WAITOK);
2115 	if (rule->rcv_ifname[0])
2116 		rcv_kif = pf_kkif_create(M_WAITOK);
2117 	pf_counter_u64_init(&rule->evaluations, M_WAITOK);
2118 	for (int i = 0; i < 2; i++) {
2119 		pf_counter_u64_init(&rule->packets[i], M_WAITOK);
2120 		pf_counter_u64_init(&rule->bytes[i], M_WAITOK);
2121 	}
2122 	rule->states_cur = counter_u64_alloc(M_WAITOK);
2123 	rule->states_tot = counter_u64_alloc(M_WAITOK);
2124 	for (pf_sn_types_t sn_type=0; sn_type<PF_SN_MAX; sn_type++)
2125 		rule->src_nodes[sn_type] = counter_u64_alloc(M_WAITOK);
2126 	rule->cuid = uid;
2127 	rule->cpid = pid;
2128 	TAILQ_INIT(&rule->rdr.list);
2129 	TAILQ_INIT(&rule->nat.list);
2130 	TAILQ_INIT(&rule->route.list);
2131 
2132 	PF_CONFIG_LOCK();
2133 	PF_RULES_WLOCK();
2134 #ifdef PF_WANT_32_TO_64_COUNTER
2135 	LIST_INSERT_HEAD(&V_pf_allrulelist, rule, allrulelist);
2136 	MPASS(!rule->allrulelinked);
2137 	rule->allrulelinked = true;
2138 	V_pf_allrulecount++;
2139 #endif
2140 	ruleset = pf_find_kruleset(anchor);
2141 	if (ruleset == NULL)
2142 		ERROUT(EINVAL);
2143 	rs_num = pf_get_ruleset_number(rule->action);
2144 	if (rs_num >= PF_RULESET_MAX)
2145 		ERROUT(EINVAL);
2146 	if (ticket != ruleset->rules[rs_num].inactive.ticket) {
2147 		DPFPRINTF(PF_DEBUG_MISC,
2148 		    "ticket: %d != [%d]%d", ticket, rs_num,
2149 		    ruleset->rules[rs_num].inactive.ticket);
2150 		ERROUT(EBUSY);
2151 	}
2152 	if (pool_ticket != V_ticket_pabuf) {
2153 		DPFPRINTF(PF_DEBUG_MISC,
2154 		    "pool_ticket: %d != %d", pool_ticket,
2155 		    V_ticket_pabuf);
2156 		ERROUT(EBUSY);
2157 	}
2158 	/*
2159 	 * XXXMJG hack: there is no mechanism to ensure they started the
2160 	 * transaction. Ticket checked above may happen to match by accident,
2161 	 * even if nobody called DIOCXBEGIN, let alone this process.
2162 	 * Partially work around it by checking if the RB tree got allocated,
2163 	 * see pf_begin_rules.
2164 	 */
2165 	if (ruleset->rules[rs_num].inactive.tree == NULL) {
2166 		ERROUT(EINVAL);
2167 	}
2168 
2169 	tail = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr,
2170 	    pf_krulequeue);
2171 	if (tail)
2172 		rule->nr = tail->nr + 1;
2173 	else
2174 		rule->nr = 0;
2175 	if (rule->ifname[0]) {
2176 		rule->kif = pfi_kkif_attach(kif, rule->ifname);
2177 		kif = NULL;
2178 		pfi_kkif_ref(rule->kif);
2179 	} else
2180 		rule->kif = NULL;
2181 
2182 	if (rule->rcv_ifname[0]) {
2183 		rule->rcv_kif = pfi_kkif_attach(rcv_kif, rule->rcv_ifname);
2184 		rcv_kif = NULL;
2185 		pfi_kkif_ref(rule->rcv_kif);
2186 	} else
2187 		rule->rcv_kif = NULL;
2188 
2189 	if (rule->rtableid > 0 && rule->rtableid >= rt_numfibs)
2190 		ERROUT(EBUSY);
2191 #ifdef ALTQ
2192 	/* set queue IDs */
2193 	if (rule->qname[0] != 0) {
2194 		if ((rule->qid = pf_qname2qid(rule->qname)) == 0)
2195 			ERROUT(EBUSY);
2196 		else if (rule->pqname[0] != 0) {
2197 			if ((rule->pqid =
2198 			    pf_qname2qid(rule->pqname)) == 0)
2199 				ERROUT(EBUSY);
2200 		} else
2201 			rule->pqid = rule->qid;
2202 	}
2203 #endif
2204 	if (rule->tagname[0])
2205 		if ((rule->tag = pf_tagname2tag(rule->tagname)) == 0)
2206 			ERROUT(EBUSY);
2207 	if (rule->match_tagname[0])
2208 		if ((rule->match_tag =
2209 		    pf_tagname2tag(rule->match_tagname)) == 0)
2210 			ERROUT(EBUSY);
2211 	if (rule->rt && !rule->direction)
2212 		ERROUT(EINVAL);
2213 	if (!rule->log)
2214 		rule->logif = 0;
2215 	if (! pf_init_threshold(&rule->pktrate, rule->pktrate.limit,
2216 	   rule->pktrate.seconds))
2217 		ERROUT(ENOMEM);
2218 	if (pf_addr_setup(ruleset, &rule->src.addr, rule->af))
2219 		ERROUT(ENOMEM);
2220 	if (pf_addr_setup(ruleset, &rule->dst.addr, rule->af))
2221 		ERROUT(ENOMEM);
2222 	if (pf_kanchor_setup(rule, ruleset, anchor_call))
2223 		ERROUT(EINVAL);
2224 	if (rule->scrub_flags & PFSTATE_SETPRIO &&
2225 	    (rule->set_prio[0] > PF_PRIO_MAX ||
2226 	    rule->set_prio[1] > PF_PRIO_MAX))
2227 		ERROUT(EINVAL);
2228 	for (int i = 0; i < 3; i++) {
2229 		TAILQ_FOREACH(pa, &V_pf_pabuf[i], entries)
2230 			if (pa->addr.type == PF_ADDR_TABLE) {
2231 				pa->addr.p.tbl = pfr_attach_table(ruleset,
2232 				    pa->addr.v.tblname);
2233 				if (pa->addr.p.tbl == NULL)
2234 					ERROUT(ENOMEM);
2235 			}
2236 	}
2237 
2238 	rule->overload_tbl = NULL;
2239 	if (rule->overload_tblname[0]) {
2240 		if ((rule->overload_tbl = pfr_attach_table(ruleset,
2241 		    rule->overload_tblname)) == NULL)
2242 			ERROUT(EINVAL);
2243 		else
2244 			rule->overload_tbl->pfrkt_flags |=
2245 			    PFR_TFLAG_ACTIVE;
2246 	}
2247 
2248 	pf_mv_kpool(&V_pf_pabuf[0], &rule->nat.list);
2249 
2250 	/*
2251 	 * Old version of pfctl provide route redirection pools in single
2252 	 * common redirection pool rdr. New versions use rdr only for
2253 	 * rdr-to rules.
2254 	 */
2255 	if (rule->rt > PF_NOPFROUTE && TAILQ_EMPTY(&V_pf_pabuf[2])) {
2256 		pf_mv_kpool(&V_pf_pabuf[1], &rule->route.list);
2257 	} else {
2258 		pf_mv_kpool(&V_pf_pabuf[1], &rule->rdr.list);
2259 		pf_mv_kpool(&V_pf_pabuf[2], &rule->route.list);
2260 	}
2261 
2262 	if (((rule->action == PF_NAT) || (rule->action == PF_RDR) ||
2263 	    (rule->action == PF_BINAT))	&& rule->anchor == NULL &&
2264 	    TAILQ_FIRST(&rule->rdr.list) == NULL) {
2265 		ERROUT(EINVAL);
2266 	}
2267 
2268 	if (rule->rt > PF_NOPFROUTE && (TAILQ_FIRST(&rule->route.list) == NULL)) {
2269 		ERROUT(EINVAL);
2270 	}
2271 
2272 	if (rule->action == PF_PASS && (rule->rdr.opts & PF_POOL_STICKYADDR ||
2273 	    rule->nat.opts & PF_POOL_STICKYADDR) && !rule->keep_state) {
2274 		ERROUT(EINVAL);
2275 	}
2276 
2277 	MPASS(error == 0);
2278 
2279 	rule->nat.cur = TAILQ_FIRST(&rule->nat.list);
2280 	rule->rdr.cur = TAILQ_FIRST(&rule->rdr.list);
2281 	rule->route.cur = TAILQ_FIRST(&rule->route.list);
2282 	rule->route.ipv6_nexthop_af = AF_INET6;
2283 	TAILQ_INSERT_TAIL(ruleset->rules[rs_num].inactive.ptr,
2284 	    rule, entries);
2285 	ruleset->rules[rs_num].inactive.rcount++;
2286 
2287 	PF_RULES_WUNLOCK();
2288 	pf_hash_rule(rule);
2289 	if (RB_INSERT(pf_krule_global, ruleset->rules[rs_num].inactive.tree, rule) != NULL) {
2290 		PF_RULES_WLOCK();
2291 		TAILQ_REMOVE(ruleset->rules[rs_num].inactive.ptr, rule, entries);
2292 		ruleset->rules[rs_num].inactive.rcount--;
2293 		pf_free_rule(rule);
2294 		rule = NULL;
2295 		ERROUT(EEXIST);
2296 	}
2297 	PF_CONFIG_UNLOCK();
2298 
2299 	return (0);
2300 
2301 #undef ERROUT
2302 #undef ERROUT_UNLOCKED
2303 errout:
2304 	PF_RULES_WUNLOCK();
2305 	PF_CONFIG_UNLOCK();
2306 errout_unlocked:
2307 	pf_kkif_free(rcv_kif);
2308 	pf_kkif_free(kif);
2309 	pf_krule_free(rule);
2310 	return (error);
2311 }
2312 
2313 static bool
pf_label_match(const struct pf_krule * rule,const char * label)2314 pf_label_match(const struct pf_krule *rule, const char *label)
2315 {
2316 	int i = 0;
2317 
2318 	while (*rule->label[i]) {
2319 		if (strcmp(rule->label[i], label) == 0)
2320 			return (true);
2321 		i++;
2322 	}
2323 
2324 	return (false);
2325 }
2326 
2327 static unsigned int
pf_kill_matching_state(struct pf_state_key_cmp * key,int dir)2328 pf_kill_matching_state(struct pf_state_key_cmp *key, int dir)
2329 {
2330 	struct pf_kstate *s;
2331 	int more = 0;
2332 
2333 	s = pf_find_state_all(key, dir, &more);
2334 	if (s == NULL)
2335 		return (0);
2336 
2337 	if (more) {
2338 		PF_STATE_UNLOCK(s);
2339 		return (0);
2340 	}
2341 
2342 	pf_remove_state(s);
2343 	return (1);
2344 }
2345 
2346 static int
pf_killstates_row(struct pf_kstate_kill * psk,struct pf_idhash * ih)2347 pf_killstates_row(struct pf_kstate_kill *psk, struct pf_idhash *ih)
2348 {
2349 	struct pf_kstate	*s;
2350 	struct pf_state_key	*sk;
2351 	struct pf_addr		*srcaddr, *dstaddr;
2352 	struct pf_state_key_cmp	 match_key;
2353 	int			 idx, killed = 0;
2354 	unsigned int		 dir;
2355 	u_int16_t		 srcport, dstport;
2356 	struct pfi_kkif		*kif;
2357 
2358 relock_DIOCKILLSTATES:
2359 	PF_HASHROW_LOCK(ih);
2360 	LIST_FOREACH(s, &ih->states, entry) {
2361 		/* For floating states look at the original kif. */
2362 		kif = s->kif == V_pfi_all ? s->orig_kif : s->kif;
2363 
2364 		sk = s->key[psk->psk_nat ? PF_SK_STACK : PF_SK_WIRE];
2365 		if (s->direction == PF_OUT) {
2366 			srcaddr = &sk->addr[1];
2367 			dstaddr = &sk->addr[0];
2368 			srcport = sk->port[1];
2369 			dstport = sk->port[0];
2370 		} else {
2371 			srcaddr = &sk->addr[0];
2372 			dstaddr = &sk->addr[1];
2373 			srcport = sk->port[0];
2374 			dstport = sk->port[1];
2375 		}
2376 
2377 		if (psk->psk_af && sk->af != psk->psk_af)
2378 			continue;
2379 
2380 		if (psk->psk_proto && psk->psk_proto != sk->proto)
2381 			continue;
2382 
2383 		if (! pf_match_addr(psk->psk_src.neg,
2384 		    &psk->psk_src.addr.v.a.addr,
2385 		    &psk->psk_src.addr.v.a.mask, srcaddr, sk->af))
2386 			continue;
2387 
2388 		if (! pf_match_addr(psk->psk_dst.neg,
2389 		    &psk->psk_dst.addr.v.a.addr,
2390 		    &psk->psk_dst.addr.v.a.mask, dstaddr, sk->af))
2391 			continue;
2392 
2393 		if (!  pf_match_addr(psk->psk_rt_addr.neg,
2394 		    &psk->psk_rt_addr.addr.v.a.addr,
2395 		    &psk->psk_rt_addr.addr.v.a.mask,
2396 		    &s->act.rt_addr, sk->af))
2397 			continue;
2398 
2399 		if (psk->psk_src.port_op != 0 &&
2400 		    ! pf_match_port(psk->psk_src.port_op,
2401 		    psk->psk_src.port[0], psk->psk_src.port[1], srcport))
2402 			continue;
2403 
2404 		if (psk->psk_dst.port_op != 0 &&
2405 		    ! pf_match_port(psk->psk_dst.port_op,
2406 		    psk->psk_dst.port[0], psk->psk_dst.port[1], dstport))
2407 			continue;
2408 
2409 		if (psk->psk_label[0] &&
2410 		    ! pf_label_match(s->rule, psk->psk_label))
2411 			continue;
2412 
2413 		if (psk->psk_ifname[0] && strcmp(psk->psk_ifname,
2414 		    kif->pfik_name))
2415 			continue;
2416 
2417 		if (psk->psk_kill_match) {
2418 			/* Create the key to find matching states, with lock
2419 			 * held. */
2420 
2421 			bzero(&match_key, sizeof(match_key));
2422 
2423 			if (s->direction == PF_OUT) {
2424 				dir = PF_IN;
2425 				idx = psk->psk_nat ? PF_SK_WIRE : PF_SK_STACK;
2426 			} else {
2427 				dir = PF_OUT;
2428 				idx = psk->psk_nat ? PF_SK_STACK : PF_SK_WIRE;
2429 			}
2430 
2431 			match_key.af = s->key[idx]->af;
2432 			match_key.proto = s->key[idx]->proto;
2433 			pf_addrcpy(&match_key.addr[0],
2434 			    &s->key[idx]->addr[1], match_key.af);
2435 			match_key.port[0] = s->key[idx]->port[1];
2436 			pf_addrcpy(&match_key.addr[1],
2437 			    &s->key[idx]->addr[0], match_key.af);
2438 			match_key.port[1] = s->key[idx]->port[0];
2439 		}
2440 
2441 		pf_remove_state(s);
2442 		killed++;
2443 
2444 		if (psk->psk_kill_match)
2445 			killed += pf_kill_matching_state(&match_key, dir);
2446 
2447 		goto relock_DIOCKILLSTATES;
2448 	}
2449 	PF_HASHROW_UNLOCK(ih);
2450 
2451 	return (killed);
2452 }
2453 
2454 void
unhandled_af(int af)2455 unhandled_af(int af)
2456 {
2457 	panic("unhandled af %d", af);
2458 }
2459 
2460 int
pf_start(void)2461 pf_start(void)
2462 {
2463 	int error = 0;
2464 
2465 	sx_xlock(&V_pf_ioctl_lock);
2466 	if (V_pf_status.running)
2467 		error = EEXIST;
2468 	else {
2469 		hook_pf();
2470 		if (! TAILQ_EMPTY(V_pf_keth->active.rules))
2471 			hook_pf_eth();
2472 		V_pf_status.running = 1;
2473 		V_pf_status.since = time_uptime;
2474 		new_unrhdr64(&V_pf_stateid, time_second);
2475 
2476 		DPFPRINTF(PF_DEBUG_MISC, "pf: started");
2477 	}
2478 	sx_xunlock(&V_pf_ioctl_lock);
2479 
2480 	return (error);
2481 }
2482 
2483 int
pf_stop(void)2484 pf_stop(void)
2485 {
2486 	int error = 0;
2487 
2488 	sx_xlock(&V_pf_ioctl_lock);
2489 	if (!V_pf_status.running)
2490 		error = ENOENT;
2491 	else {
2492 		V_pf_status.running = 0;
2493 		dehook_pf();
2494 		dehook_pf_eth();
2495 		V_pf_status.since = time_uptime;
2496 		DPFPRINTF(PF_DEBUG_MISC, "pf: stopped");
2497 	}
2498 	sx_xunlock(&V_pf_ioctl_lock);
2499 
2500 	return (error);
2501 }
2502 
2503 void
pf_ioctl_clear_status(void)2504 pf_ioctl_clear_status(void)
2505 {
2506 	PF_RULES_WLOCK();
2507 	for (int i = 0; i < PFRES_MAX; i++)
2508 		counter_u64_zero(V_pf_status.counters[i]);
2509 	for (int i = 0; i < FCNT_MAX; i++)
2510 		pf_counter_u64_zero(&V_pf_status.fcounters[i]);
2511 	for (int i = 0; i < SCNT_MAX; i++)
2512 		counter_u64_zero(V_pf_status.scounters[i]);
2513 	for (int i = 0; i < NCNT_MAX; i++)
2514 		counter_u64_zero(V_pf_status.ncounters[i]);
2515 	for (int i = 0; i < KLCNT_MAX; i++)
2516 		counter_u64_zero(V_pf_status.lcounters[i]);
2517 	V_pf_status.since = time_uptime;
2518 	if (*V_pf_status.ifname)
2519 		pfi_update_status(V_pf_status.ifname, NULL);
2520 	PF_RULES_WUNLOCK();
2521 }
2522 
2523 int
pf_ioctl_set_timeout(int timeout,int seconds,int * prev_seconds)2524 pf_ioctl_set_timeout(int timeout, int seconds, int *prev_seconds)
2525 {
2526 	uint32_t old;
2527 
2528 	if (timeout < 0 || timeout >= PFTM_MAX ||
2529 	    seconds < 0)
2530 		return (EINVAL);
2531 
2532 	PF_RULES_WLOCK();
2533 	old = V_pf_default_rule.timeout[timeout];
2534 	if (timeout == PFTM_INTERVAL && seconds == 0)
2535 		seconds = 1;
2536 	V_pf_default_rule.timeout[timeout] = seconds;
2537 	if (timeout == PFTM_INTERVAL && seconds < old)
2538 		wakeup(pf_purge_thread);
2539 
2540 	if (prev_seconds != NULL)
2541 		*prev_seconds = old;
2542 
2543 	PF_RULES_WUNLOCK();
2544 
2545 	return (0);
2546 }
2547 
2548 int
pf_ioctl_get_timeout(int timeout,int * seconds)2549 pf_ioctl_get_timeout(int timeout, int *seconds)
2550 {
2551 	PF_RULES_RLOCK_TRACKER;
2552 
2553 	if (timeout < 0 || timeout >= PFTM_MAX)
2554 		return (EINVAL);
2555 
2556 	PF_RULES_RLOCK();
2557 	*seconds = V_pf_default_rule.timeout[timeout];
2558 	PF_RULES_RUNLOCK();
2559 
2560 	return (0);
2561 }
2562 
2563 int
pf_ioctl_set_limit(int index,unsigned int limit,unsigned int * old_limit)2564 pf_ioctl_set_limit(int index, unsigned int limit, unsigned int *old_limit)
2565 {
2566 
2567 	PF_RULES_WLOCK();
2568 	if (index < 0 || index >= PF_LIMIT_MAX ||
2569 	    V_pf_limits[index].zone == NULL) {
2570 		PF_RULES_WUNLOCK();
2571 		return (EINVAL);
2572 	}
2573 	uma_zone_set_max(V_pf_limits[index].zone,
2574 	    limit == 0 ? INT_MAX : limit);
2575 	if (old_limit != NULL)
2576 		*old_limit = V_pf_limits[index].limit;
2577 	V_pf_limits[index].limit = limit;
2578 	PF_RULES_WUNLOCK();
2579 
2580 	return (0);
2581 }
2582 
2583 int
pf_ioctl_get_limit(int index,unsigned int * limit)2584 pf_ioctl_get_limit(int index, unsigned int *limit)
2585 {
2586 	PF_RULES_RLOCK_TRACKER;
2587 
2588 	if (index < 0 || index >= PF_LIMIT_MAX)
2589 		return (EINVAL);
2590 
2591 	PF_RULES_RLOCK();
2592 	*limit = V_pf_limits[index].limit;
2593 	PF_RULES_RUNLOCK();
2594 
2595 	return (0);
2596 }
2597 
2598 int
pf_ioctl_begin_addrs(uint32_t * ticket)2599 pf_ioctl_begin_addrs(uint32_t *ticket)
2600 {
2601 	PF_RULES_WLOCK();
2602 	pf_empty_kpool(&V_pf_pabuf[0]);
2603 	pf_empty_kpool(&V_pf_pabuf[1]);
2604 	pf_empty_kpool(&V_pf_pabuf[2]);
2605 	*ticket = ++V_ticket_pabuf;
2606 	PF_RULES_WUNLOCK();
2607 
2608 	return (0);
2609 }
2610 
2611 int
pf_ioctl_add_addr(struct pf_nl_pooladdr * pp)2612 pf_ioctl_add_addr(struct pf_nl_pooladdr *pp)
2613 {
2614 	struct pf_kpooladdr	*pa = NULL;
2615 	struct pfi_kkif		*kif = NULL;
2616 	int error;
2617 
2618 	if (pp->which != PF_RDR && pp->which != PF_NAT &&
2619 	    pp->which != PF_RT)
2620 		return (EINVAL);
2621 
2622 	switch (pp->af) {
2623 #ifdef INET
2624 	case AF_INET:
2625 		/* FALLTHROUGH */
2626 #endif /* INET */
2627 #ifdef INET6
2628 	case AF_INET6:
2629 		/* FALLTHROUGH */
2630 #endif /* INET6 */
2631 	case AF_UNSPEC:
2632 		break;
2633 	default:
2634 		return (EAFNOSUPPORT);
2635 	}
2636 
2637 	if (pp->addr.addr.type != PF_ADDR_ADDRMASK &&
2638 	    pp->addr.addr.type != PF_ADDR_DYNIFTL &&
2639 	    pp->addr.addr.type != PF_ADDR_TABLE)
2640 		return (EINVAL);
2641 
2642 	if (pp->addr.addr.p.dyn != NULL)
2643 		return (EINVAL);
2644 
2645 	pa = malloc(sizeof(*pa), M_PFRULE, M_WAITOK);
2646 	error = pf_pooladdr_to_kpooladdr(&pp->addr, pa);
2647 	if (error != 0)
2648 		goto out;
2649 	if (pa->ifname[0])
2650 		kif = pf_kkif_create(M_WAITOK);
2651 	PF_RULES_WLOCK();
2652 	if (pp->ticket != V_ticket_pabuf) {
2653 		PF_RULES_WUNLOCK();
2654 		if (pa->ifname[0])
2655 			pf_kkif_free(kif);
2656 		error = EBUSY;
2657 		goto out;
2658 	}
2659 	if (pa->ifname[0]) {
2660 		pa->kif = pfi_kkif_attach(kif, pa->ifname);
2661 		kif = NULL;
2662 		pfi_kkif_ref(pa->kif);
2663 	} else
2664 		pa->kif = NULL;
2665 	if (pa->addr.type == PF_ADDR_DYNIFTL && ((error =
2666 	    pfi_dynaddr_setup(&pa->addr, pp->af)) != 0)) {
2667 		if (pa->ifname[0])
2668 			pfi_kkif_unref(pa->kif);
2669 		PF_RULES_WUNLOCK();
2670 		goto out;
2671 	}
2672 	pa->af = pp->af;
2673 	switch (pp->which) {
2674 	case PF_NAT:
2675 		TAILQ_INSERT_TAIL(&V_pf_pabuf[0], pa, entries);
2676 		break;
2677 	case PF_RDR:
2678 		TAILQ_INSERT_TAIL(&V_pf_pabuf[1], pa, entries);
2679 		break;
2680 	case PF_RT:
2681 		TAILQ_INSERT_TAIL(&V_pf_pabuf[2], pa, entries);
2682 		break;
2683 	}
2684 	PF_RULES_WUNLOCK();
2685 
2686 	return (0);
2687 
2688 out:
2689 	free(pa, M_PFRULE);
2690 	return (error);
2691 }
2692 
2693 int
pf_ioctl_get_addrs(struct pf_nl_pooladdr * pp)2694 pf_ioctl_get_addrs(struct pf_nl_pooladdr *pp)
2695 {
2696 	struct pf_kpool		*pool;
2697 	struct pf_kpooladdr	*pa;
2698 
2699 	PF_RULES_RLOCK_TRACKER;
2700 
2701 	if (pp->which != PF_RDR && pp->which != PF_NAT &&
2702 	    pp->which != PF_RT)
2703 		return (EINVAL);
2704 
2705 	pp->anchor[sizeof(pp->anchor) - 1] = 0;
2706 	pp->nr = 0;
2707 
2708 	PF_RULES_RLOCK();
2709 	pool = pf_get_kpool(pp->anchor, pp->ticket, pp->r_action,
2710 	    pp->r_num, 0, 1, 0, pp->which);
2711 	if (pool == NULL) {
2712 		PF_RULES_RUNLOCK();
2713 		return (EBUSY);
2714 	}
2715 	TAILQ_FOREACH(pa, &pool->list, entries)
2716 		pp->nr++;
2717 	PF_RULES_RUNLOCK();
2718 
2719 	return (0);
2720 }
2721 
2722 int
pf_ioctl_get_addr(struct pf_nl_pooladdr * pp)2723 pf_ioctl_get_addr(struct pf_nl_pooladdr *pp)
2724 {
2725 	struct pf_kpool		*pool;
2726 	struct pf_kpooladdr	*pa;
2727 	u_int32_t		 nr = 0;
2728 
2729 	if (pp->which != PF_RDR && pp->which != PF_NAT &&
2730 	    pp->which != PF_RT)
2731 		return (EINVAL);
2732 
2733 	PF_RULES_RLOCK_TRACKER;
2734 
2735 	pp->anchor[sizeof(pp->anchor) - 1] = '\0';
2736 
2737 	PF_RULES_RLOCK();
2738 	pool = pf_get_kpool(pp->anchor, pp->ticket, pp->r_action,
2739 	    pp->r_num, 0, 1, 1, pp->which);
2740 	if (pool == NULL) {
2741 		PF_RULES_RUNLOCK();
2742 		return (EBUSY);
2743 	}
2744 	pa = TAILQ_FIRST(&pool->list);
2745 	while ((pa != NULL) && (nr < pp->nr)) {
2746 		pa = TAILQ_NEXT(pa, entries);
2747 		nr++;
2748 	}
2749 	if (pa == NULL) {
2750 		PF_RULES_RUNLOCK();
2751 		return (EBUSY);
2752 	}
2753 	pf_kpooladdr_to_pooladdr(pa, &pp->addr);
2754 	pp->af = pa->af;
2755 	pf_addr_copyout(&pp->addr.addr);
2756 	PF_RULES_RUNLOCK();
2757 
2758 	return (0);
2759 }
2760 
2761 int
pf_ioctl_get_rulesets(struct pfioc_ruleset * pr)2762 pf_ioctl_get_rulesets(struct pfioc_ruleset *pr)
2763 {
2764 	struct pf_kruleset	*ruleset;
2765 	struct pf_kanchor	*anchor;
2766 
2767 	PF_RULES_RLOCK_TRACKER;
2768 
2769 	pr->path[sizeof(pr->path) - 1] = '\0';
2770 
2771 	PF_RULES_RLOCK();
2772 	if ((ruleset = pf_find_kruleset(pr->path)) == NULL) {
2773 		PF_RULES_RUNLOCK();
2774 		return (ENOENT);
2775 	}
2776 	pr->nr = 0;
2777 	if (ruleset == &pf_main_ruleset) {
2778 		/* XXX kludge for pf_main_ruleset */
2779 		RB_FOREACH(anchor, pf_kanchor_global, &V_pf_anchors)
2780 			if (anchor->parent == NULL)
2781 				pr->nr++;
2782 	} else {
2783 		RB_FOREACH(anchor, pf_kanchor_node,
2784 		    &ruleset->anchor->children)
2785 			pr->nr++;
2786 	}
2787 	PF_RULES_RUNLOCK();
2788 
2789 	return (0);
2790 }
2791 
2792 int
pf_ioctl_get_ruleset(struct pfioc_ruleset * pr)2793 pf_ioctl_get_ruleset(struct pfioc_ruleset *pr)
2794 {
2795 	struct pf_kruleset	*ruleset;
2796 	struct pf_kanchor	*anchor;
2797 	u_int32_t		 nr = 0;
2798 	int			 error = 0;
2799 
2800 	PF_RULES_RLOCK_TRACKER;
2801 
2802 	PF_RULES_RLOCK();
2803 	if ((ruleset = pf_find_kruleset(pr->path)) == NULL) {
2804 		PF_RULES_RUNLOCK();
2805 		return (ENOENT);
2806 	}
2807 
2808 	pr->name[0] = '\0';
2809 	if (ruleset == &pf_main_ruleset) {
2810 		/* XXX kludge for pf_main_ruleset */
2811 		RB_FOREACH(anchor, pf_kanchor_global, &V_pf_anchors)
2812 			if (anchor->parent == NULL && nr++ == pr->nr) {
2813 				strlcpy(pr->name, anchor->name,
2814 				    sizeof(pr->name));
2815 				break;
2816 			}
2817 	} else {
2818 		RB_FOREACH(anchor, pf_kanchor_node,
2819 		    &ruleset->anchor->children)
2820 			if (nr++ == pr->nr) {
2821 				strlcpy(pr->name, anchor->name,
2822 				    sizeof(pr->name));
2823 				break;
2824 			}
2825 	}
2826 	if (!pr->name[0])
2827 		error = EBUSY;
2828 	PF_RULES_RUNLOCK();
2829 
2830 	return (error);
2831 }
2832 
2833 int
pf_ioctl_natlook(struct pfioc_natlook * pnl)2834 pf_ioctl_natlook(struct pfioc_natlook *pnl)
2835 {
2836 	struct pf_state_key	*sk;
2837 	struct pf_kstate	*state;
2838 	struct pf_state_key_cmp	 key;
2839 	int			 m = 0, direction = pnl->direction;
2840 	int			 sidx, didx;
2841 
2842 	/* NATLOOK src and dst are reversed, so reverse sidx/didx */
2843 	sidx = (direction == PF_IN) ? 1 : 0;
2844 	didx = (direction == PF_IN) ? 0 : 1;
2845 
2846 	if (!pnl->proto ||
2847 	    PF_AZERO(&pnl->saddr, pnl->af) ||
2848 	    PF_AZERO(&pnl->daddr, pnl->af) ||
2849 	    ((pnl->proto == IPPROTO_TCP ||
2850 	    pnl->proto == IPPROTO_UDP) &&
2851 	    (!pnl->dport || !pnl->sport)))
2852 		return (EINVAL);
2853 
2854 	switch (pnl->direction) {
2855 	case PF_IN:
2856 	case PF_OUT:
2857 	case PF_INOUT:
2858 		break;
2859 	default:
2860 		return (EINVAL);
2861 	}
2862 
2863 	switch (pnl->af) {
2864 #ifdef INET
2865 	case AF_INET:
2866 		break;
2867 #endif /* INET */
2868 #ifdef INET6
2869 	case AF_INET6:
2870 		break;
2871 #endif /* INET6 */
2872 	default:
2873 		return (EAFNOSUPPORT);
2874 	}
2875 
2876 	bzero(&key, sizeof(key));
2877 	key.af = pnl->af;
2878 	key.proto = pnl->proto;
2879 	pf_addrcpy(&key.addr[sidx], &pnl->saddr, pnl->af);
2880 	key.port[sidx] = pnl->sport;
2881 	pf_addrcpy(&key.addr[didx], &pnl->daddr, pnl->af);
2882 	key.port[didx] = pnl->dport;
2883 
2884 	state = pf_find_state_all(&key, direction, &m);
2885 	if (state == NULL)
2886 		return (ENOENT);
2887 
2888 	if (m > 1) {
2889 		PF_STATE_UNLOCK(state);
2890 		return (E2BIG);	/* more than one state */
2891 	}
2892 
2893 	sk = state->key[sidx];
2894 	pf_addrcpy(&pnl->rsaddr,
2895 	    &sk->addr[sidx], sk->af);
2896 	pnl->rsport = sk->port[sidx];
2897 	pf_addrcpy(&pnl->rdaddr,
2898 	    &sk->addr[didx], sk->af);
2899 	pnl->rdport = sk->port[didx];
2900 	PF_STATE_UNLOCK(state);
2901 
2902 	return (0);
2903 }
2904 
2905 static int
pfioctl(struct cdev * dev,u_long cmd,caddr_t addr,int flags,struct thread * td)2906 pfioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td)
2907 {
2908 	int			 error = 0;
2909 	PF_RULES_RLOCK_TRACKER;
2910 
2911 #define	ERROUT_IOCTL(target, x)					\
2912     do {								\
2913 	    error = (x);						\
2914 	    SDT_PROBE3(pf, ioctl, ioctl, error, cmd, error, __LINE__);	\
2915 	    goto target;						\
2916     } while (0)
2917 
2918 
2919 	/* XXX keep in sync with switch() below */
2920 	if (securelevel_gt(td->td_ucred, 2))
2921 		switch (cmd) {
2922 		case DIOCGETRULES:
2923 		case DIOCGETRULENV:
2924 		case DIOCGETADDRS:
2925 		case DIOCGETADDR:
2926 		case DIOCGETSTATE:
2927 		case DIOCGETSTATENV:
2928 		case DIOCSETSTATUSIF:
2929 		case DIOCGETSTATUSNV:
2930 		case DIOCCLRSTATUS:
2931 		case DIOCNATLOOK:
2932 		case DIOCSETDEBUG:
2933 #ifdef COMPAT_FREEBSD14
2934 		case DIOCGETSTATES:
2935 		case DIOCGETSTATESV2:
2936 #endif
2937 		case DIOCGETTIMEOUT:
2938 		case DIOCCLRRULECTRS:
2939 		case DIOCGETLIMIT:
2940 		case DIOCGETALTQSV0:
2941 		case DIOCGETALTQSV1:
2942 		case DIOCGETALTQV0:
2943 		case DIOCGETALTQV1:
2944 		case DIOCGETQSTATSV0:
2945 		case DIOCGETQSTATSV1:
2946 		case DIOCGETRULESETS:
2947 		case DIOCGETRULESET:
2948 		case DIOCRGETTABLES:
2949 		case DIOCRGETTSTATS:
2950 		case DIOCRCLRTSTATS:
2951 		case DIOCRCLRADDRS:
2952 		case DIOCRADDADDRS:
2953 		case DIOCRDELADDRS:
2954 		case DIOCRSETADDRS:
2955 		case DIOCRGETADDRS:
2956 		case DIOCRGETASTATS:
2957 		case DIOCRCLRASTATS:
2958 		case DIOCRTSTADDRS:
2959 		case DIOCOSFPGET:
2960 		case DIOCGETSRCNODES:
2961 		case DIOCCLRSRCNODES:
2962 		case DIOCGETSYNCOOKIES:
2963 		case DIOCIGETIFACES:
2964 		case DIOCGIFSPEEDV0:
2965 		case DIOCGIFSPEEDV1:
2966 		case DIOCSETIFFLAG:
2967 		case DIOCCLRIFFLAG:
2968 		case DIOCGETETHRULES:
2969 		case DIOCGETETHRULE:
2970 		case DIOCGETETHRULESETS:
2971 		case DIOCGETETHRULESET:
2972 			break;
2973 		case DIOCRCLRTABLES:
2974 		case DIOCRADDTABLES:
2975 		case DIOCRDELTABLES:
2976 		case DIOCRSETTFLAGS:
2977 			if (((struct pfioc_table *)addr)->pfrio_flags &
2978 			    PFR_FLAG_DUMMY)
2979 				break; /* dummy operation ok */
2980 			return (EPERM);
2981 		default:
2982 			return (EPERM);
2983 		}
2984 
2985 	if (!(flags & FWRITE))
2986 		switch (cmd) {
2987 		case DIOCGETRULES:
2988 		case DIOCGETADDRS:
2989 		case DIOCGETADDR:
2990 		case DIOCGETSTATE:
2991 		case DIOCGETSTATENV:
2992 		case DIOCGETSTATUSNV:
2993 #ifdef COMPAT_FREEBSD14
2994 		case DIOCGETSTATES:
2995 		case DIOCGETSTATESV2:
2996 #endif
2997 		case DIOCGETTIMEOUT:
2998 		case DIOCGETLIMIT:
2999 		case DIOCGETALTQSV0:
3000 		case DIOCGETALTQSV1:
3001 		case DIOCGETALTQV0:
3002 		case DIOCGETALTQV1:
3003 		case DIOCGETQSTATSV0:
3004 		case DIOCGETQSTATSV1:
3005 		case DIOCGETRULESETS:
3006 		case DIOCGETRULESET:
3007 		case DIOCNATLOOK:
3008 		case DIOCRGETTABLES:
3009 		case DIOCRGETTSTATS:
3010 		case DIOCRGETADDRS:
3011 		case DIOCRGETASTATS:
3012 		case DIOCRTSTADDRS:
3013 		case DIOCOSFPGET:
3014 		case DIOCGETSRCNODES:
3015 		case DIOCGETSYNCOOKIES:
3016 		case DIOCIGETIFACES:
3017 		case DIOCGIFSPEEDV1:
3018 		case DIOCGIFSPEEDV0:
3019 		case DIOCGETRULENV:
3020 		case DIOCGETETHRULES:
3021 		case DIOCGETETHRULE:
3022 		case DIOCGETETHRULESETS:
3023 		case DIOCGETETHRULESET:
3024 			break;
3025 		case DIOCRCLRTABLES:
3026 		case DIOCRADDTABLES:
3027 		case DIOCRDELTABLES:
3028 		case DIOCRCLRTSTATS:
3029 		case DIOCRCLRADDRS:
3030 		case DIOCRADDADDRS:
3031 		case DIOCRDELADDRS:
3032 		case DIOCRSETADDRS:
3033 		case DIOCRSETTFLAGS:
3034 			if (((struct pfioc_table *)addr)->pfrio_flags &
3035 			    PFR_FLAG_DUMMY) {
3036 				flags |= FWRITE; /* need write lock for dummy */
3037 				break; /* dummy operation ok */
3038 			}
3039 			return (EACCES);
3040 		default:
3041 			return (EACCES);
3042 		}
3043 
3044 	CURVNET_SET(TD_TO_VNET(td));
3045 
3046 	switch (cmd) {
3047 #ifdef COMPAT_FREEBSD14
3048 	case DIOCSTART:
3049 		error = pf_start();
3050 		break;
3051 
3052 	case DIOCSTOP:
3053 		error = pf_stop();
3054 		break;
3055 #endif
3056 
3057 	case DIOCGETETHRULES: {
3058 		struct pfioc_nv		*nv = (struct pfioc_nv *)addr;
3059 		nvlist_t		*nvl;
3060 		void			*packed;
3061 		struct pf_keth_rule	*tail;
3062 		struct pf_keth_ruleset	*rs;
3063 		u_int32_t		 ticket, nr;
3064 		const char		*anchor = "";
3065 
3066 		nvl = NULL;
3067 		packed = NULL;
3068 
3069 #define	ERROUT(x)	ERROUT_IOCTL(DIOCGETETHRULES_error, x)
3070 
3071 		if (nv->len > pf_ioctl_maxcount)
3072 			ERROUT(ENOMEM);
3073 
3074 		/* Copy the request in */
3075 		packed = malloc(nv->len, M_NVLIST, M_WAITOK);
3076 		error = copyin(nv->data, packed, nv->len);
3077 		if (error)
3078 			ERROUT(error);
3079 
3080 		nvl = nvlist_unpack(packed, nv->len, 0);
3081 		if (nvl == NULL)
3082 			ERROUT(EBADMSG);
3083 
3084 		if (! nvlist_exists_string(nvl, "anchor"))
3085 			ERROUT(EBADMSG);
3086 
3087 		anchor = nvlist_get_string(nvl, "anchor");
3088 
3089 		rs = pf_find_keth_ruleset(anchor);
3090 
3091 		nvlist_destroy(nvl);
3092 		nvl = NULL;
3093 		free(packed, M_NVLIST);
3094 		packed = NULL;
3095 
3096 		if (rs == NULL)
3097 			ERROUT(ENOENT);
3098 
3099 		/* Reply */
3100 		nvl = nvlist_create(0);
3101 		if (nvl == NULL)
3102 			ERROUT(ENOMEM);
3103 
3104 		PF_RULES_RLOCK();
3105 
3106 		ticket = rs->active.ticket;
3107 		tail = TAILQ_LAST(rs->active.rules, pf_keth_ruleq);
3108 		if (tail)
3109 			nr = tail->nr + 1;
3110 		else
3111 			nr = 0;
3112 
3113 		PF_RULES_RUNLOCK();
3114 
3115 		nvlist_add_number(nvl, "ticket", ticket);
3116 		nvlist_add_number(nvl, "nr", nr);
3117 
3118 		packed = nvlist_pack(nvl, &nv->len);
3119 		if (packed == NULL)
3120 			ERROUT(ENOMEM);
3121 
3122 		if (nv->size == 0)
3123 			ERROUT(0);
3124 		else if (nv->size < nv->len)
3125 			ERROUT(ENOSPC);
3126 
3127 		error = copyout(packed, nv->data, nv->len);
3128 
3129 #undef ERROUT
3130 DIOCGETETHRULES_error:
3131 		free(packed, M_NVLIST);
3132 		nvlist_destroy(nvl);
3133 		break;
3134 	}
3135 
3136 	case DIOCGETETHRULE: {
3137 		struct epoch_tracker	 et;
3138 		struct pfioc_nv		*nv = (struct pfioc_nv *)addr;
3139 		nvlist_t		*nvl = NULL;
3140 		void			*nvlpacked = NULL;
3141 		struct pf_keth_rule	*rule = NULL;
3142 		struct pf_keth_ruleset	*rs;
3143 		u_int32_t		 ticket, nr;
3144 		bool			 clear = false;
3145 		const char		*anchor;
3146 
3147 #define ERROUT(x)	ERROUT_IOCTL(DIOCGETETHRULE_error, x)
3148 
3149 		if (nv->len > pf_ioctl_maxcount)
3150 			ERROUT(ENOMEM);
3151 
3152 		nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
3153 		error = copyin(nv->data, nvlpacked, nv->len);
3154 		if (error)
3155 			ERROUT(error);
3156 
3157 		nvl = nvlist_unpack(nvlpacked, nv->len, 0);
3158 		if (nvl == NULL)
3159 			ERROUT(EBADMSG);
3160 		if (! nvlist_exists_number(nvl, "ticket"))
3161 			ERROUT(EBADMSG);
3162 		ticket = nvlist_get_number(nvl, "ticket");
3163 		if (! nvlist_exists_string(nvl, "anchor"))
3164 			ERROUT(EBADMSG);
3165 		anchor = nvlist_get_string(nvl, "anchor");
3166 
3167 		if (nvlist_exists_bool(nvl, "clear"))
3168 			clear = nvlist_get_bool(nvl, "clear");
3169 
3170 		if (clear && !(flags & FWRITE))
3171 			ERROUT(EACCES);
3172 
3173 		if (! nvlist_exists_number(nvl, "nr"))
3174 			ERROUT(EBADMSG);
3175 		nr = nvlist_get_number(nvl, "nr");
3176 
3177 		PF_RULES_RLOCK();
3178 		rs = pf_find_keth_ruleset(anchor);
3179 		if (rs == NULL) {
3180 			PF_RULES_RUNLOCK();
3181 			ERROUT(ENOENT);
3182 		}
3183 		if (ticket != rs->active.ticket) {
3184 			PF_RULES_RUNLOCK();
3185 			ERROUT(EBUSY);
3186 		}
3187 
3188 		nvlist_destroy(nvl);
3189 		nvl = NULL;
3190 		free(nvlpacked, M_NVLIST);
3191 		nvlpacked = NULL;
3192 
3193 		rule = TAILQ_FIRST(rs->active.rules);
3194 		while ((rule != NULL) && (rule->nr != nr))
3195 			rule = TAILQ_NEXT(rule, entries);
3196 		if (rule == NULL) {
3197 			PF_RULES_RUNLOCK();
3198 			ERROUT(ENOENT);
3199 		}
3200 		/* Make sure rule can't go away. */
3201 		NET_EPOCH_ENTER(et);
3202 		PF_RULES_RUNLOCK();
3203 		nvl = pf_keth_rule_to_nveth_rule(rule);
3204 		if (pf_keth_anchor_nvcopyout(rs, rule, nvl)) {
3205 			NET_EPOCH_EXIT(et);
3206 			ERROUT(EBUSY);
3207 		}
3208 		NET_EPOCH_EXIT(et);
3209 		if (nvl == NULL)
3210 			ERROUT(ENOMEM);
3211 
3212 		nvlpacked = nvlist_pack(nvl, &nv->len);
3213 		if (nvlpacked == NULL)
3214 			ERROUT(ENOMEM);
3215 
3216 		if (nv->size == 0)
3217 			ERROUT(0);
3218 		else if (nv->size < nv->len)
3219 			ERROUT(ENOSPC);
3220 
3221 		error = copyout(nvlpacked, nv->data, nv->len);
3222 		if (error == 0 && clear) {
3223 			counter_u64_zero(rule->evaluations);
3224 			for (int i = 0; i < 2; i++) {
3225 				counter_u64_zero(rule->packets[i]);
3226 				counter_u64_zero(rule->bytes[i]);
3227 			}
3228 		}
3229 
3230 #undef ERROUT
3231 DIOCGETETHRULE_error:
3232 		free(nvlpacked, M_NVLIST);
3233 		nvlist_destroy(nvl);
3234 		break;
3235 	}
3236 
3237 	case DIOCADDETHRULE: {
3238 		struct pfioc_nv		*nv = (struct pfioc_nv *)addr;
3239 		nvlist_t		*nvl = NULL;
3240 		void			*nvlpacked = NULL;
3241 		struct pf_keth_rule	*rule = NULL, *tail = NULL;
3242 		struct pf_keth_ruleset	*ruleset = NULL;
3243 		struct pfi_kkif		*kif = NULL, *bridge_to_kif = NULL;
3244 		const char		*anchor = "", *anchor_call = "";
3245 
3246 #define ERROUT(x)	ERROUT_IOCTL(DIOCADDETHRULE_error, x)
3247 
3248 		if (nv->len > pf_ioctl_maxcount)
3249 			ERROUT(ENOMEM);
3250 
3251 		nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
3252 		error = copyin(nv->data, nvlpacked, nv->len);
3253 		if (error)
3254 			ERROUT(error);
3255 
3256 		nvl = nvlist_unpack(nvlpacked, nv->len, 0);
3257 		if (nvl == NULL)
3258 			ERROUT(EBADMSG);
3259 
3260 		if (! nvlist_exists_number(nvl, "ticket"))
3261 			ERROUT(EBADMSG);
3262 
3263 		if (nvlist_exists_string(nvl, "anchor"))
3264 			anchor = nvlist_get_string(nvl, "anchor");
3265 		if (nvlist_exists_string(nvl, "anchor_call"))
3266 			anchor_call = nvlist_get_string(nvl, "anchor_call");
3267 
3268 		ruleset = pf_find_keth_ruleset(anchor);
3269 		if (ruleset == NULL)
3270 			ERROUT(EINVAL);
3271 
3272 		if (nvlist_get_number(nvl, "ticket") !=
3273 		    ruleset->inactive.ticket) {
3274 			DPFPRINTF(PF_DEBUG_MISC,
3275 			    "ticket: %d != %d",
3276 			    (u_int32_t)nvlist_get_number(nvl, "ticket"),
3277 			    ruleset->inactive.ticket);
3278 			ERROUT(EBUSY);
3279 		}
3280 
3281 		rule = malloc(sizeof(*rule), M_PFRULE, M_WAITOK);
3282 		rule->timestamp = NULL;
3283 
3284 		error = pf_nveth_rule_to_keth_rule(nvl, rule);
3285 		if (error != 0)
3286 			ERROUT(error);
3287 
3288 		if (rule->ifname[0])
3289 			kif = pf_kkif_create(M_WAITOK);
3290 		if (rule->bridge_to_name[0])
3291 			bridge_to_kif = pf_kkif_create(M_WAITOK);
3292 		rule->evaluations = counter_u64_alloc(M_WAITOK);
3293 		for (int i = 0; i < 2; i++) {
3294 			rule->packets[i] = counter_u64_alloc(M_WAITOK);
3295 			rule->bytes[i] = counter_u64_alloc(M_WAITOK);
3296 		}
3297 		rule->timestamp = uma_zalloc_pcpu(pf_timestamp_pcpu_zone,
3298 		    M_WAITOK | M_ZERO);
3299 
3300 		PF_RULES_WLOCK();
3301 
3302 		if (rule->ifname[0]) {
3303 			rule->kif = pfi_kkif_attach(kif, rule->ifname);
3304 			pfi_kkif_ref(rule->kif);
3305 		} else
3306 			rule->kif = NULL;
3307 		if (rule->bridge_to_name[0]) {
3308 			rule->bridge_to = pfi_kkif_attach(bridge_to_kif,
3309 			    rule->bridge_to_name);
3310 			pfi_kkif_ref(rule->bridge_to);
3311 		} else
3312 			rule->bridge_to = NULL;
3313 
3314 #ifdef ALTQ
3315 		/* set queue IDs */
3316 		if (rule->qname[0] != 0) {
3317 			if ((rule->qid = pf_qname2qid(rule->qname)) == 0)
3318 				error = EBUSY;
3319 			else
3320 				rule->qid = rule->qid;
3321 		}
3322 #endif
3323 		if (rule->tagname[0])
3324 			if ((rule->tag = pf_tagname2tag(rule->tagname)) == 0)
3325 				error = EBUSY;
3326 		if (rule->match_tagname[0])
3327 			if ((rule->match_tag = pf_tagname2tag(
3328 			    rule->match_tagname)) == 0)
3329 				error = EBUSY;
3330 
3331 		if (error == 0 && rule->ipdst.addr.type == PF_ADDR_TABLE)
3332 			error = pf_eth_addr_setup(ruleset, &rule->ipdst.addr);
3333 		if (error == 0 && rule->ipsrc.addr.type == PF_ADDR_TABLE)
3334 			error = pf_eth_addr_setup(ruleset, &rule->ipsrc.addr);
3335 
3336 		if (error) {
3337 			pf_free_eth_rule(rule);
3338 			PF_RULES_WUNLOCK();
3339 			ERROUT(error);
3340 		}
3341 
3342 		if (pf_keth_anchor_setup(rule, ruleset, anchor_call)) {
3343 			pf_free_eth_rule(rule);
3344 			PF_RULES_WUNLOCK();
3345 			ERROUT(EINVAL);
3346 		}
3347 
3348 		tail = TAILQ_LAST(ruleset->inactive.rules, pf_keth_ruleq);
3349 		if (tail)
3350 			rule->nr = tail->nr + 1;
3351 		else
3352 			rule->nr = 0;
3353 
3354 		TAILQ_INSERT_TAIL(ruleset->inactive.rules, rule, entries);
3355 
3356 		PF_RULES_WUNLOCK();
3357 
3358 #undef ERROUT
3359 DIOCADDETHRULE_error:
3360 		nvlist_destroy(nvl);
3361 		free(nvlpacked, M_NVLIST);
3362 		break;
3363 	}
3364 
3365 	case DIOCGETETHRULESETS: {
3366 		struct epoch_tracker	 et;
3367 		struct pfioc_nv		*nv = (struct pfioc_nv *)addr;
3368 		nvlist_t		*nvl = NULL;
3369 		void			*nvlpacked = NULL;
3370 		struct pf_keth_ruleset	*ruleset;
3371 		struct pf_keth_anchor	*anchor;
3372 		int			 nr = 0;
3373 
3374 #define ERROUT(x)	ERROUT_IOCTL(DIOCGETETHRULESETS_error, x)
3375 
3376 		if (nv->len > pf_ioctl_maxcount)
3377 			ERROUT(ENOMEM);
3378 
3379 		nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
3380 		error = copyin(nv->data, nvlpacked, nv->len);
3381 		if (error)
3382 			ERROUT(error);
3383 
3384 		nvl = nvlist_unpack(nvlpacked, nv->len, 0);
3385 		if (nvl == NULL)
3386 			ERROUT(EBADMSG);
3387 		if (! nvlist_exists_string(nvl, "path"))
3388 			ERROUT(EBADMSG);
3389 
3390 		NET_EPOCH_ENTER(et);
3391 
3392 		if ((ruleset = pf_find_keth_ruleset(
3393 		    nvlist_get_string(nvl, "path"))) == NULL) {
3394 			NET_EPOCH_EXIT(et);
3395 			ERROUT(ENOENT);
3396 		}
3397 
3398 		if (ruleset->anchor == NULL) {
3399 			RB_FOREACH(anchor, pf_keth_anchor_global, &V_pf_keth_anchors)
3400 				if (anchor->parent == NULL)
3401 					nr++;
3402 		} else {
3403 			RB_FOREACH(anchor, pf_keth_anchor_node,
3404 			    &ruleset->anchor->children)
3405 				nr++;
3406 		}
3407 
3408 		NET_EPOCH_EXIT(et);
3409 
3410 		nvlist_destroy(nvl);
3411 		nvl = NULL;
3412 		free(nvlpacked, M_NVLIST);
3413 		nvlpacked = NULL;
3414 
3415 		nvl = nvlist_create(0);
3416 		if (nvl == NULL)
3417 			ERROUT(ENOMEM);
3418 
3419 		nvlist_add_number(nvl, "nr", nr);
3420 
3421 		nvlpacked = nvlist_pack(nvl, &nv->len);
3422 		if (nvlpacked == NULL)
3423 			ERROUT(ENOMEM);
3424 
3425 		if (nv->size == 0)
3426 			ERROUT(0);
3427 		else if (nv->size < nv->len)
3428 			ERROUT(ENOSPC);
3429 
3430 		error = copyout(nvlpacked, nv->data, nv->len);
3431 
3432 #undef ERROUT
3433 DIOCGETETHRULESETS_error:
3434 		free(nvlpacked, M_NVLIST);
3435 		nvlist_destroy(nvl);
3436 		break;
3437 	}
3438 
3439 	case DIOCGETETHRULESET: {
3440 		struct epoch_tracker	 et;
3441 		struct pfioc_nv		*nv = (struct pfioc_nv *)addr;
3442 		nvlist_t		*nvl = NULL;
3443 		void			*nvlpacked = NULL;
3444 		struct pf_keth_ruleset	*ruleset;
3445 		struct pf_keth_anchor	*anchor;
3446 		int			 nr = 0, req_nr = 0;
3447 		bool			 found = false;
3448 
3449 #define ERROUT(x)	ERROUT_IOCTL(DIOCGETETHRULESET_error, x)
3450 
3451 		if (nv->len > pf_ioctl_maxcount)
3452 			ERROUT(ENOMEM);
3453 
3454 		nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
3455 		error = copyin(nv->data, nvlpacked, nv->len);
3456 		if (error)
3457 			ERROUT(error);
3458 
3459 		nvl = nvlist_unpack(nvlpacked, nv->len, 0);
3460 		if (nvl == NULL)
3461 			ERROUT(EBADMSG);
3462 		if (! nvlist_exists_string(nvl, "path"))
3463 			ERROUT(EBADMSG);
3464 		if (! nvlist_exists_number(nvl, "nr"))
3465 			ERROUT(EBADMSG);
3466 
3467 		req_nr = nvlist_get_number(nvl, "nr");
3468 
3469 		NET_EPOCH_ENTER(et);
3470 
3471 		if ((ruleset = pf_find_keth_ruleset(
3472 		    nvlist_get_string(nvl, "path"))) == NULL) {
3473 			NET_EPOCH_EXIT(et);
3474 			ERROUT(ENOENT);
3475 		}
3476 
3477 		nvlist_destroy(nvl);
3478 		nvl = NULL;
3479 		free(nvlpacked, M_NVLIST);
3480 		nvlpacked = NULL;
3481 
3482 		nvl = nvlist_create(0);
3483 		if (nvl == NULL) {
3484 			NET_EPOCH_EXIT(et);
3485 			ERROUT(ENOMEM);
3486 		}
3487 
3488 		if (ruleset->anchor == NULL) {
3489 			RB_FOREACH(anchor, pf_keth_anchor_global,
3490 			    &V_pf_keth_anchors) {
3491 				if (anchor->parent == NULL && nr++ == req_nr) {
3492 					found = true;
3493 					break;
3494 				}
3495 			}
3496 		} else {
3497 			RB_FOREACH(anchor, pf_keth_anchor_node,
3498 			     &ruleset->anchor->children) {
3499 				if (nr++ == req_nr) {
3500 					found = true;
3501 					break;
3502 				}
3503 			}
3504 		}
3505 
3506 		NET_EPOCH_EXIT(et);
3507 		if (found) {
3508 			nvlist_add_number(nvl, "nr", nr);
3509 			nvlist_add_string(nvl, "name", anchor->name);
3510 			if (ruleset->anchor)
3511 				nvlist_add_string(nvl, "path",
3512 				    ruleset->anchor->path);
3513 			else
3514 				nvlist_add_string(nvl, "path", "");
3515 		} else {
3516 			ERROUT(EBUSY);
3517 		}
3518 
3519 		nvlpacked = nvlist_pack(nvl, &nv->len);
3520 		if (nvlpacked == NULL)
3521 			ERROUT(ENOMEM);
3522 
3523 		if (nv->size == 0)
3524 			ERROUT(0);
3525 		else if (nv->size < nv->len)
3526 			ERROUT(ENOSPC);
3527 
3528 		error = copyout(nvlpacked, nv->data, nv->len);
3529 
3530 #undef ERROUT
3531 DIOCGETETHRULESET_error:
3532 		free(nvlpacked, M_NVLIST);
3533 		nvlist_destroy(nvl);
3534 		break;
3535 	}
3536 
3537 	case DIOCADDRULENV: {
3538 		struct pfioc_nv	*nv = (struct pfioc_nv *)addr;
3539 		nvlist_t	*nvl = NULL;
3540 		void		*nvlpacked = NULL;
3541 		struct pf_krule	*rule = NULL;
3542 		const char	*anchor = "", *anchor_call = "";
3543 		uint32_t	 ticket = 0, pool_ticket = 0;
3544 
3545 #define	ERROUT(x)	ERROUT_IOCTL(DIOCADDRULENV_error, x)
3546 
3547 		if (nv->len > pf_ioctl_maxcount)
3548 			ERROUT(ENOMEM);
3549 
3550 		nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
3551 		error = copyin(nv->data, nvlpacked, nv->len);
3552 		if (error)
3553 			ERROUT(error);
3554 
3555 		nvl = nvlist_unpack(nvlpacked, nv->len, 0);
3556 		if (nvl == NULL)
3557 			ERROUT(EBADMSG);
3558 
3559 		if (! nvlist_exists_number(nvl, "ticket"))
3560 			ERROUT(EINVAL);
3561 		ticket = nvlist_get_number(nvl, "ticket");
3562 
3563 		if (! nvlist_exists_number(nvl, "pool_ticket"))
3564 			ERROUT(EINVAL);
3565 		pool_ticket = nvlist_get_number(nvl, "pool_ticket");
3566 
3567 		if (! nvlist_exists_nvlist(nvl, "rule"))
3568 			ERROUT(EINVAL);
3569 
3570 		rule = pf_krule_alloc();
3571 		error = pf_nvrule_to_krule(nvlist_get_nvlist(nvl, "rule"),
3572 		    rule);
3573 		if (error)
3574 			ERROUT(error);
3575 
3576 		if (nvlist_exists_string(nvl, "anchor"))
3577 			anchor = nvlist_get_string(nvl, "anchor");
3578 		if (nvlist_exists_string(nvl, "anchor_call"))
3579 			anchor_call = nvlist_get_string(nvl, "anchor_call");
3580 
3581 		if ((error = nvlist_error(nvl)))
3582 			ERROUT(error);
3583 
3584 		/* Frees rule on error */
3585 		error = pf_ioctl_addrule(rule, ticket, pool_ticket, anchor,
3586 		    anchor_call, td->td_ucred->cr_ruid,
3587 		    td->td_proc ? td->td_proc->p_pid : 0);
3588 
3589 		nvlist_destroy(nvl);
3590 		free(nvlpacked, M_NVLIST);
3591 		break;
3592 #undef ERROUT
3593 DIOCADDRULENV_error:
3594 		pf_krule_free(rule);
3595 		nvlist_destroy(nvl);
3596 		free(nvlpacked, M_NVLIST);
3597 
3598 		break;
3599 	}
3600 	case DIOCADDRULE: {
3601 		struct pfioc_rule	*pr = (struct pfioc_rule *)addr;
3602 		struct pf_krule		*rule;
3603 
3604 		rule = pf_krule_alloc();
3605 		error = pf_rule_to_krule(&pr->rule, rule);
3606 		if (error != 0) {
3607 			pf_krule_free(rule);
3608 			goto fail;
3609 		}
3610 
3611 		pr->anchor[sizeof(pr->anchor) - 1] = '\0';
3612 
3613 		/* Frees rule on error */
3614 		error = pf_ioctl_addrule(rule, pr->ticket, pr->pool_ticket,
3615 		    pr->anchor, pr->anchor_call, td->td_ucred->cr_ruid,
3616 		    td->td_proc ? td->td_proc->p_pid : 0);
3617 		break;
3618 	}
3619 
3620 	case DIOCGETRULES: {
3621 		struct pfioc_rule	*pr = (struct pfioc_rule *)addr;
3622 
3623 		pr->anchor[sizeof(pr->anchor) - 1] = '\0';
3624 
3625 		error = pf_ioctl_getrules(pr);
3626 
3627 		break;
3628 	}
3629 
3630 	case DIOCGETRULENV: {
3631 		struct pfioc_nv		*nv = (struct pfioc_nv *)addr;
3632 		nvlist_t		*nvrule = NULL;
3633 		nvlist_t		*nvl = NULL;
3634 		struct pf_kruleset	*ruleset;
3635 		struct pf_krule		*rule;
3636 		void			*nvlpacked = NULL;
3637 		int			 rs_num, nr;
3638 		bool			 clear_counter = false;
3639 
3640 #define	ERROUT(x)	ERROUT_IOCTL(DIOCGETRULENV_error, x)
3641 
3642 		if (nv->len > pf_ioctl_maxcount)
3643 			ERROUT(ENOMEM);
3644 
3645 		/* Copy the request in */
3646 		nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
3647 		error = copyin(nv->data, nvlpacked, nv->len);
3648 		if (error)
3649 			ERROUT(error);
3650 
3651 		nvl = nvlist_unpack(nvlpacked, nv->len, 0);
3652 		if (nvl == NULL)
3653 			ERROUT(EBADMSG);
3654 
3655 		if (! nvlist_exists_string(nvl, "anchor"))
3656 			ERROUT(EBADMSG);
3657 		if (! nvlist_exists_number(nvl, "ruleset"))
3658 			ERROUT(EBADMSG);
3659 		if (! nvlist_exists_number(nvl, "ticket"))
3660 			ERROUT(EBADMSG);
3661 		if (! nvlist_exists_number(nvl, "nr"))
3662 			ERROUT(EBADMSG);
3663 
3664 		if (nvlist_exists_bool(nvl, "clear_counter"))
3665 			clear_counter = nvlist_get_bool(nvl, "clear_counter");
3666 
3667 		if (clear_counter && !(flags & FWRITE))
3668 			ERROUT(EACCES);
3669 
3670 		nr = nvlist_get_number(nvl, "nr");
3671 
3672 		PF_RULES_WLOCK();
3673 		ruleset = pf_find_kruleset(nvlist_get_string(nvl, "anchor"));
3674 		if (ruleset == NULL) {
3675 			PF_RULES_WUNLOCK();
3676 			ERROUT(ENOENT);
3677 		}
3678 
3679 		rs_num = pf_get_ruleset_number(nvlist_get_number(nvl, "ruleset"));
3680 		if (rs_num >= PF_RULESET_MAX) {
3681 			PF_RULES_WUNLOCK();
3682 			ERROUT(EINVAL);
3683 		}
3684 
3685 		if (nvlist_get_number(nvl, "ticket") !=
3686 		    ruleset->rules[rs_num].active.ticket) {
3687 			PF_RULES_WUNLOCK();
3688 			ERROUT(EBUSY);
3689 		}
3690 
3691 		if ((error = nvlist_error(nvl))) {
3692 			PF_RULES_WUNLOCK();
3693 			ERROUT(error);
3694 		}
3695 
3696 		rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
3697 		while ((rule != NULL) && (rule->nr != nr))
3698 			rule = TAILQ_NEXT(rule, entries);
3699 		if (rule == NULL) {
3700 			PF_RULES_WUNLOCK();
3701 			ERROUT(EBUSY);
3702 		}
3703 
3704 		nvrule = pf_krule_to_nvrule(rule);
3705 
3706 		nvlist_destroy(nvl);
3707 		nvl = nvlist_create(0);
3708 		if (nvl == NULL) {
3709 			PF_RULES_WUNLOCK();
3710 			ERROUT(ENOMEM);
3711 		}
3712 		nvlist_add_number(nvl, "nr", nr);
3713 		nvlist_add_nvlist(nvl, "rule", nvrule);
3714 		nvlist_destroy(nvrule);
3715 		nvrule = NULL;
3716 		if (pf_kanchor_nvcopyout(ruleset, rule, nvl)) {
3717 			PF_RULES_WUNLOCK();
3718 			ERROUT(EBUSY);
3719 		}
3720 
3721 		free(nvlpacked, M_NVLIST);
3722 		nvlpacked = nvlist_pack(nvl, &nv->len);
3723 		if (nvlpacked == NULL) {
3724 			PF_RULES_WUNLOCK();
3725 			ERROUT(ENOMEM);
3726 		}
3727 
3728 		if (nv->size == 0) {
3729 			PF_RULES_WUNLOCK();
3730 			ERROUT(0);
3731 		}
3732 		else if (nv->size < nv->len) {
3733 			PF_RULES_WUNLOCK();
3734 			ERROUT(ENOSPC);
3735 		}
3736 
3737 		if (clear_counter)
3738 			pf_krule_clear_counters(rule);
3739 
3740 		PF_RULES_WUNLOCK();
3741 
3742 		error = copyout(nvlpacked, nv->data, nv->len);
3743 
3744 #undef ERROUT
3745 DIOCGETRULENV_error:
3746 		free(nvlpacked, M_NVLIST);
3747 		nvlist_destroy(nvrule);
3748 		nvlist_destroy(nvl);
3749 
3750 		break;
3751 	}
3752 
3753 	case DIOCCHANGERULE: {
3754 		struct pfioc_rule	*pcr = (struct pfioc_rule *)addr;
3755 		struct pf_kruleset	*ruleset;
3756 		struct pf_krule		*oldrule = NULL, *newrule = NULL;
3757 		struct pfi_kkif		*kif = NULL;
3758 		struct pf_kpooladdr	*pa;
3759 		u_int32_t		 nr = 0;
3760 		int			 rs_num;
3761 
3762 		pcr->anchor[sizeof(pcr->anchor) - 1] = '\0';
3763 
3764 		if (pcr->action < PF_CHANGE_ADD_HEAD ||
3765 		    pcr->action > PF_CHANGE_GET_TICKET) {
3766 			error = EINVAL;
3767 			goto fail;
3768 		}
3769 		if (pcr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
3770 			error = EINVAL;
3771 			goto fail;
3772 		}
3773 
3774 		if (pcr->action != PF_CHANGE_REMOVE) {
3775 			newrule = pf_krule_alloc();
3776 			error = pf_rule_to_krule(&pcr->rule, newrule);
3777 			if (error != 0) {
3778 				pf_krule_free(newrule);
3779 				goto fail;
3780 			}
3781 
3782 			if ((error = pf_rule_checkaf(newrule))) {
3783 				pf_krule_free(newrule);
3784 				goto fail;
3785 			}
3786 			if (newrule->ifname[0])
3787 				kif = pf_kkif_create(M_WAITOK);
3788 			pf_counter_u64_init(&newrule->evaluations, M_WAITOK);
3789 			for (int i = 0; i < 2; i++) {
3790 				pf_counter_u64_init(&newrule->packets[i], M_WAITOK);
3791 				pf_counter_u64_init(&newrule->bytes[i], M_WAITOK);
3792 			}
3793 			newrule->states_cur = counter_u64_alloc(M_WAITOK);
3794 			newrule->states_tot = counter_u64_alloc(M_WAITOK);
3795 			for (pf_sn_types_t sn_type=0; sn_type<PF_SN_MAX; sn_type++)
3796 				newrule->src_nodes[sn_type] = counter_u64_alloc(M_WAITOK);
3797 			newrule->cuid = td->td_ucred->cr_ruid;
3798 			newrule->cpid = td->td_proc ? td->td_proc->p_pid : 0;
3799 			TAILQ_INIT(&newrule->nat.list);
3800 			TAILQ_INIT(&newrule->rdr.list);
3801 			TAILQ_INIT(&newrule->route.list);
3802 		}
3803 #define	ERROUT(x)	ERROUT_IOCTL(DIOCCHANGERULE_error, x)
3804 
3805 		PF_CONFIG_LOCK();
3806 		PF_RULES_WLOCK();
3807 #ifdef PF_WANT_32_TO_64_COUNTER
3808 		if (newrule != NULL) {
3809 			LIST_INSERT_HEAD(&V_pf_allrulelist, newrule, allrulelist);
3810 			newrule->allrulelinked = true;
3811 			V_pf_allrulecount++;
3812 		}
3813 #endif
3814 
3815 		if (!(pcr->action == PF_CHANGE_REMOVE ||
3816 		    pcr->action == PF_CHANGE_GET_TICKET) &&
3817 		    pcr->pool_ticket != V_ticket_pabuf)
3818 			ERROUT(EBUSY);
3819 
3820 		ruleset = pf_find_kruleset(pcr->anchor);
3821 		if (ruleset == NULL)
3822 			ERROUT(EINVAL);
3823 
3824 		rs_num = pf_get_ruleset_number(pcr->rule.action);
3825 		if (rs_num >= PF_RULESET_MAX)
3826 			ERROUT(EINVAL);
3827 
3828 		/*
3829 		 * XXXMJG: there is no guarantee that the ruleset was
3830 		 * created by the usual route of calling DIOCXBEGIN.
3831 		 * As a result it is possible the rule tree will not
3832 		 * be allocated yet. Hack around it by doing it here.
3833 		 * Note it is fine to let the tree persist in case of
3834 		 * error as it will be freed down the road on future
3835 		 * updates (if need be).
3836 		 */
3837 		if (ruleset->rules[rs_num].active.tree == NULL) {
3838 			ruleset->rules[rs_num].active.tree = pf_rule_tree_alloc(M_NOWAIT);
3839 			if (ruleset->rules[rs_num].active.tree == NULL) {
3840 				ERROUT(ENOMEM);
3841 			}
3842 		}
3843 
3844 		if (pcr->action == PF_CHANGE_GET_TICKET) {
3845 			pcr->ticket = ++ruleset->rules[rs_num].active.ticket;
3846 			ERROUT(0);
3847 		} else if (pcr->ticket !=
3848 			    ruleset->rules[rs_num].active.ticket)
3849 				ERROUT(EINVAL);
3850 
3851 		if (pcr->action != PF_CHANGE_REMOVE) {
3852 			if (newrule->ifname[0]) {
3853 				newrule->kif = pfi_kkif_attach(kif,
3854 				    newrule->ifname);
3855 				kif = NULL;
3856 				pfi_kkif_ref(newrule->kif);
3857 			} else
3858 				newrule->kif = NULL;
3859 
3860 			if (newrule->rtableid > 0 &&
3861 			    newrule->rtableid >= rt_numfibs)
3862 				error = EBUSY;
3863 
3864 #ifdef ALTQ
3865 			/* set queue IDs */
3866 			if (newrule->qname[0] != 0) {
3867 				if ((newrule->qid =
3868 				    pf_qname2qid(newrule->qname)) == 0)
3869 					error = EBUSY;
3870 				else if (newrule->pqname[0] != 0) {
3871 					if ((newrule->pqid =
3872 					    pf_qname2qid(newrule->pqname)) == 0)
3873 						error = EBUSY;
3874 				} else
3875 					newrule->pqid = newrule->qid;
3876 			}
3877 #endif /* ALTQ */
3878 			if (newrule->tagname[0])
3879 				if ((newrule->tag =
3880 				    pf_tagname2tag(newrule->tagname)) == 0)
3881 					error = EBUSY;
3882 			if (newrule->match_tagname[0])
3883 				if ((newrule->match_tag = pf_tagname2tag(
3884 				    newrule->match_tagname)) == 0)
3885 					error = EBUSY;
3886 			if (newrule->rt && !newrule->direction)
3887 				error = EINVAL;
3888 			if (!newrule->log)
3889 				newrule->logif = 0;
3890 			if (pf_addr_setup(ruleset, &newrule->src.addr, newrule->af))
3891 				error = ENOMEM;
3892 			if (pf_addr_setup(ruleset, &newrule->dst.addr, newrule->af))
3893 				error = ENOMEM;
3894 			if (pf_kanchor_setup(newrule, ruleset, pcr->anchor_call))
3895 				error = EINVAL;
3896 			for (int i = 0; i < 3; i++) {
3897 				TAILQ_FOREACH(pa, &V_pf_pabuf[i], entries)
3898 					if (pa->addr.type == PF_ADDR_TABLE) {
3899 						pa->addr.p.tbl =
3900 						    pfr_attach_table(ruleset,
3901 						    pa->addr.v.tblname);
3902 						if (pa->addr.p.tbl == NULL)
3903 							error = ENOMEM;
3904 					}
3905 			}
3906 
3907 			newrule->overload_tbl = NULL;
3908 			if (newrule->overload_tblname[0]) {
3909 				if ((newrule->overload_tbl = pfr_attach_table(
3910 				    ruleset, newrule->overload_tblname)) ==
3911 				    NULL)
3912 					error = EINVAL;
3913 				else
3914 					newrule->overload_tbl->pfrkt_flags |=
3915 					    PFR_TFLAG_ACTIVE;
3916 			}
3917 
3918 			pf_mv_kpool(&V_pf_pabuf[0], &newrule->nat.list);
3919 			pf_mv_kpool(&V_pf_pabuf[1], &newrule->rdr.list);
3920 			pf_mv_kpool(&V_pf_pabuf[2], &newrule->route.list);
3921 			if (((((newrule->action == PF_NAT) ||
3922 			    (newrule->action == PF_RDR) ||
3923 			    (newrule->action == PF_BINAT) ||
3924 			    (newrule->rt > PF_NOPFROUTE)) &&
3925 			    !newrule->anchor)) &&
3926 			    (TAILQ_FIRST(&newrule->rdr.list) == NULL))
3927 				error = EINVAL;
3928 
3929 			if (error) {
3930 				pf_free_rule(newrule);
3931 				PF_RULES_WUNLOCK();
3932 				PF_CONFIG_UNLOCK();
3933 				goto fail;
3934 			}
3935 
3936 			newrule->nat.cur = TAILQ_FIRST(&newrule->nat.list);
3937 			newrule->rdr.cur = TAILQ_FIRST(&newrule->rdr.list);
3938 		}
3939 		pf_empty_kpool(&V_pf_pabuf[0]);
3940 		pf_empty_kpool(&V_pf_pabuf[1]);
3941 		pf_empty_kpool(&V_pf_pabuf[2]);
3942 
3943 		if (pcr->action == PF_CHANGE_ADD_HEAD)
3944 			oldrule = TAILQ_FIRST(
3945 			    ruleset->rules[rs_num].active.ptr);
3946 		else if (pcr->action == PF_CHANGE_ADD_TAIL)
3947 			oldrule = TAILQ_LAST(
3948 			    ruleset->rules[rs_num].active.ptr, pf_krulequeue);
3949 		else {
3950 			oldrule = TAILQ_FIRST(
3951 			    ruleset->rules[rs_num].active.ptr);
3952 			while ((oldrule != NULL) && (oldrule->nr != pcr->nr))
3953 				oldrule = TAILQ_NEXT(oldrule, entries);
3954 			if (oldrule == NULL) {
3955 				if (newrule != NULL)
3956 					pf_free_rule(newrule);
3957 				PF_RULES_WUNLOCK();
3958 				PF_CONFIG_UNLOCK();
3959 				error = EINVAL;
3960 				goto fail;
3961 			}
3962 		}
3963 
3964 		if (pcr->action == PF_CHANGE_REMOVE) {
3965 			pf_unlink_rule(ruleset->rules[rs_num].active.ptr,
3966 			    oldrule);
3967 			RB_REMOVE(pf_krule_global,
3968 			    ruleset->rules[rs_num].active.tree, oldrule);
3969 			ruleset->rules[rs_num].active.rcount--;
3970 		} else {
3971 			pf_hash_rule(newrule);
3972 			if (RB_INSERT(pf_krule_global,
3973 			    ruleset->rules[rs_num].active.tree, newrule) != NULL) {
3974 				pf_free_rule(newrule);
3975 				PF_RULES_WUNLOCK();
3976 				PF_CONFIG_UNLOCK();
3977 				error = EEXIST;
3978 				goto fail;
3979 			}
3980 
3981 			if (oldrule == NULL)
3982 				TAILQ_INSERT_TAIL(
3983 				    ruleset->rules[rs_num].active.ptr,
3984 				    newrule, entries);
3985 			else if (pcr->action == PF_CHANGE_ADD_HEAD ||
3986 			    pcr->action == PF_CHANGE_ADD_BEFORE)
3987 				TAILQ_INSERT_BEFORE(oldrule, newrule, entries);
3988 			else
3989 				TAILQ_INSERT_AFTER(
3990 				    ruleset->rules[rs_num].active.ptr,
3991 				    oldrule, newrule, entries);
3992 			ruleset->rules[rs_num].active.rcount++;
3993 		}
3994 
3995 		nr = 0;
3996 		TAILQ_FOREACH(oldrule,
3997 		    ruleset->rules[rs_num].active.ptr, entries)
3998 			oldrule->nr = nr++;
3999 
4000 		ruleset->rules[rs_num].active.ticket++;
4001 
4002 		pf_calc_skip_steps(ruleset->rules[rs_num].active.ptr);
4003 		pf_remove_if_empty_kruleset(ruleset);
4004 
4005 		PF_RULES_WUNLOCK();
4006 		PF_CONFIG_UNLOCK();
4007 		break;
4008 
4009 #undef ERROUT
4010 DIOCCHANGERULE_error:
4011 		PF_RULES_WUNLOCK();
4012 		PF_CONFIG_UNLOCK();
4013 		pf_krule_free(newrule);
4014 		pf_kkif_free(kif);
4015 		break;
4016 	}
4017 
4018 	case DIOCCLRSTATESNV: {
4019 		error = pf_clearstates_nv((struct pfioc_nv *)addr);
4020 		break;
4021 	}
4022 
4023 	case DIOCKILLSTATESNV: {
4024 		error = pf_killstates_nv((struct pfioc_nv *)addr);
4025 		break;
4026 	}
4027 
4028 	case DIOCADDSTATE: {
4029 		struct pfioc_state		*ps = (struct pfioc_state *)addr;
4030 		struct pfsync_state_1301	*sp = &ps->state;
4031 
4032 		if (sp->timeout >= PFTM_MAX) {
4033 			error = EINVAL;
4034 			goto fail;
4035 		}
4036 		if (V_pfsync_state_import_ptr != NULL) {
4037 			PF_RULES_RLOCK();
4038 			error = V_pfsync_state_import_ptr(
4039 			    (union pfsync_state_union *)sp, PFSYNC_SI_IOCTL,
4040 			    PFSYNC_MSG_VERSION_1301);
4041 			PF_RULES_RUNLOCK();
4042 		} else
4043 			error = EOPNOTSUPP;
4044 		break;
4045 	}
4046 
4047 	case DIOCGETSTATE: {
4048 		struct pfioc_state	*ps = (struct pfioc_state *)addr;
4049 		struct pf_kstate	*s;
4050 
4051 		s = pf_find_state_byid(ps->state.id, ps->state.creatorid);
4052 		if (s == NULL) {
4053 			error = ENOENT;
4054 			goto fail;
4055 		}
4056 
4057 		pfsync_state_export((union pfsync_state_union*)&ps->state,
4058 		    s, PFSYNC_MSG_VERSION_1301);
4059 		PF_STATE_UNLOCK(s);
4060 		break;
4061 	}
4062 
4063 	case DIOCGETSTATENV: {
4064 		error = pf_getstate((struct pfioc_nv *)addr);
4065 		break;
4066 	}
4067 
4068 #ifdef COMPAT_FREEBSD14
4069 	case DIOCGETSTATES: {
4070 		struct pfioc_states	*ps = (struct pfioc_states *)addr;
4071 		struct pf_kstate	*s;
4072 		struct pfsync_state_1301	*pstore, *p;
4073 		int			 i, nr;
4074 		size_t			 slice_count = 16, count;
4075 		void			*out;
4076 
4077 		if (ps->ps_len <= 0) {
4078 			nr = uma_zone_get_cur(V_pf_state_z);
4079 			ps->ps_len = sizeof(struct pfsync_state_1301) * nr;
4080 			break;
4081 		}
4082 
4083 		out = ps->ps_states;
4084 		pstore = mallocarray(slice_count,
4085 		    sizeof(struct pfsync_state_1301), M_PF, M_WAITOK | M_ZERO);
4086 		nr = 0;
4087 
4088 		for (i = 0; i <= V_pf_hashmask; i++) {
4089 			struct pf_idhash *ih = &V_pf_idhash[i];
4090 
4091 DIOCGETSTATES_retry:
4092 			p = pstore;
4093 
4094 			if (LIST_EMPTY(&ih->states))
4095 				continue;
4096 
4097 			PF_HASHROW_LOCK(ih);
4098 			count = 0;
4099 			LIST_FOREACH(s, &ih->states, entry) {
4100 				if (s->timeout == PFTM_UNLINKED)
4101 					continue;
4102 				count++;
4103 			}
4104 
4105 			if (count > slice_count) {
4106 				PF_HASHROW_UNLOCK(ih);
4107 				free(pstore, M_PF);
4108 				slice_count = count * 2;
4109 				pstore = mallocarray(slice_count,
4110 				    sizeof(struct pfsync_state_1301), M_PF,
4111 				    M_WAITOK | M_ZERO);
4112 				goto DIOCGETSTATES_retry;
4113 			}
4114 
4115 			if ((nr+count) * sizeof(*p) > ps->ps_len) {
4116 				PF_HASHROW_UNLOCK(ih);
4117 				goto DIOCGETSTATES_full;
4118 			}
4119 
4120 			LIST_FOREACH(s, &ih->states, entry) {
4121 				if (s->timeout == PFTM_UNLINKED)
4122 					continue;
4123 
4124 				pfsync_state_export((union pfsync_state_union*)p,
4125 				    s, PFSYNC_MSG_VERSION_1301);
4126 				p++;
4127 				nr++;
4128 			}
4129 			PF_HASHROW_UNLOCK(ih);
4130 			error = copyout(pstore, out,
4131 			    sizeof(struct pfsync_state_1301) * count);
4132 			if (error) {
4133 				free(pstore, M_PF);
4134 				goto fail;
4135 			}
4136 			out = ps->ps_states + nr;
4137 		}
4138 DIOCGETSTATES_full:
4139 		ps->ps_len = sizeof(struct pfsync_state_1301) * nr;
4140 		free(pstore, M_PF);
4141 
4142 		break;
4143 	}
4144 
4145 	case DIOCGETSTATESV2: {
4146 		struct pfioc_states_v2	*ps = (struct pfioc_states_v2 *)addr;
4147 		struct pf_kstate	*s;
4148 		struct pf_state_export	*pstore, *p;
4149 		int i, nr;
4150 		size_t slice_count = 16, count;
4151 		void *out;
4152 
4153 		if (ps->ps_req_version > PF_STATE_VERSION) {
4154 			error = ENOTSUP;
4155 			goto fail;
4156 		}
4157 
4158 		if (ps->ps_len <= 0) {
4159 			nr = uma_zone_get_cur(V_pf_state_z);
4160 			ps->ps_len = sizeof(struct pf_state_export) * nr;
4161 			break;
4162 		}
4163 
4164 		out = ps->ps_states;
4165 		pstore = mallocarray(slice_count,
4166 		    sizeof(struct pf_state_export), M_PF, M_WAITOK | M_ZERO);
4167 		nr = 0;
4168 
4169 		for (i = 0; i <= V_pf_hashmask; i++) {
4170 			struct pf_idhash *ih = &V_pf_idhash[i];
4171 
4172 DIOCGETSTATESV2_retry:
4173 			p = pstore;
4174 
4175 			if (LIST_EMPTY(&ih->states))
4176 				continue;
4177 
4178 			PF_HASHROW_LOCK(ih);
4179 			count = 0;
4180 			LIST_FOREACH(s, &ih->states, entry) {
4181 				if (s->timeout == PFTM_UNLINKED)
4182 					continue;
4183 				count++;
4184 			}
4185 
4186 			if (count > slice_count) {
4187 				PF_HASHROW_UNLOCK(ih);
4188 				free(pstore, M_PF);
4189 				slice_count = count * 2;
4190 				pstore = mallocarray(slice_count,
4191 				    sizeof(struct pf_state_export), M_PF,
4192 				    M_WAITOK | M_ZERO);
4193 				goto DIOCGETSTATESV2_retry;
4194 			}
4195 
4196 			if ((nr+count) * sizeof(*p) > ps->ps_len) {
4197 				PF_HASHROW_UNLOCK(ih);
4198 				goto DIOCGETSTATESV2_full;
4199 			}
4200 
4201 			LIST_FOREACH(s, &ih->states, entry) {
4202 				if (s->timeout == PFTM_UNLINKED)
4203 					continue;
4204 
4205 				pf_state_export(p, s);
4206 				p++;
4207 				nr++;
4208 			}
4209 			PF_HASHROW_UNLOCK(ih);
4210 			error = copyout(pstore, out,
4211 			    sizeof(struct pf_state_export) * count);
4212 			if (error) {
4213 				free(pstore, M_PF);
4214 				goto fail;
4215 			}
4216 			out = ps->ps_states + nr;
4217 		}
4218 DIOCGETSTATESV2_full:
4219 		ps->ps_len = nr * sizeof(struct pf_state_export);
4220 		free(pstore, M_PF);
4221 
4222 		break;
4223 	}
4224 #endif
4225 	case DIOCGETSTATUSNV: {
4226 		error = pf_getstatus((struct pfioc_nv *)addr);
4227 		break;
4228 	}
4229 
4230 	case DIOCSETSTATUSIF: {
4231 		struct pfioc_if	*pi = (struct pfioc_if *)addr;
4232 
4233 		if (pi->ifname[0] == 0) {
4234 			bzero(V_pf_status.ifname, IFNAMSIZ);
4235 			break;
4236 		}
4237 		PF_RULES_WLOCK();
4238 		error = pf_user_strcpy(V_pf_status.ifname, pi->ifname, IFNAMSIZ);
4239 		PF_RULES_WUNLOCK();
4240 		break;
4241 	}
4242 
4243 	case DIOCCLRSTATUS: {
4244 		pf_ioctl_clear_status();
4245 		break;
4246 	}
4247 
4248 	case DIOCNATLOOK: {
4249 		struct pfioc_natlook	*pnl = (struct pfioc_natlook *)addr;
4250 
4251 		error = pf_ioctl_natlook(pnl);
4252 		break;
4253 	}
4254 
4255 	case DIOCSETTIMEOUT: {
4256 		struct pfioc_tm	*pt = (struct pfioc_tm *)addr;
4257 
4258 		error = pf_ioctl_set_timeout(pt->timeout, pt->seconds,
4259 		    &pt->seconds);
4260 		break;
4261 	}
4262 
4263 	case DIOCGETTIMEOUT: {
4264 		struct pfioc_tm	*pt = (struct pfioc_tm *)addr;
4265 
4266 		error = pf_ioctl_get_timeout(pt->timeout, &pt->seconds);
4267 		break;
4268 	}
4269 
4270 	case DIOCGETLIMIT: {
4271 		struct pfioc_limit	*pl = (struct pfioc_limit *)addr;
4272 
4273 		error = pf_ioctl_get_limit(pl->index, &pl->limit);
4274 		break;
4275 	}
4276 
4277 	case DIOCSETLIMIT: {
4278 		struct pfioc_limit	*pl = (struct pfioc_limit *)addr;
4279 		unsigned int old_limit;
4280 
4281 		error = pf_ioctl_set_limit(pl->index, pl->limit, &old_limit);
4282 		pl->limit = old_limit;
4283 		break;
4284 	}
4285 
4286 	case DIOCSETDEBUG: {
4287 		u_int32_t	*level = (u_int32_t *)addr;
4288 
4289 		PF_RULES_WLOCK();
4290 		V_pf_status.debug = *level;
4291 		PF_RULES_WUNLOCK();
4292 		break;
4293 	}
4294 
4295 	case DIOCCLRRULECTRS: {
4296 		/* obsoleted by DIOCGETRULE with action=PF_GET_CLR_CNTR */
4297 		struct pf_kruleset	*ruleset = &pf_main_ruleset;
4298 		struct pf_krule		*rule;
4299 
4300 		PF_RULES_WLOCK();
4301 		TAILQ_FOREACH(rule,
4302 		    ruleset->rules[PF_RULESET_FILTER].active.ptr, entries) {
4303 			pf_counter_u64_zero(&rule->evaluations);
4304 			for (int i = 0; i < 2; i++) {
4305 				pf_counter_u64_zero(&rule->packets[i]);
4306 				pf_counter_u64_zero(&rule->bytes[i]);
4307 			}
4308 		}
4309 		PF_RULES_WUNLOCK();
4310 		break;
4311 	}
4312 
4313 	case DIOCGIFSPEEDV0:
4314 	case DIOCGIFSPEEDV1: {
4315 		struct pf_ifspeed_v1	*psp = (struct pf_ifspeed_v1 *)addr;
4316 		struct pf_ifspeed_v1	ps;
4317 		struct ifnet		*ifp;
4318 
4319 		if (psp->ifname[0] == '\0') {
4320 			error = EINVAL;
4321 			goto fail;
4322 		}
4323 
4324 		error = pf_user_strcpy(ps.ifname, psp->ifname, IFNAMSIZ);
4325 		if (error != 0)
4326 			goto fail;
4327 		ifp = ifunit(ps.ifname);
4328 		if (ifp != NULL) {
4329 			psp->baudrate32 =
4330 			    (u_int32_t)uqmin(ifp->if_baudrate, UINT_MAX);
4331 			if (cmd == DIOCGIFSPEEDV1)
4332 				psp->baudrate = ifp->if_baudrate;
4333 		} else {
4334 			error = EINVAL;
4335 		}
4336 		break;
4337 	}
4338 
4339 #ifdef ALTQ
4340 	case DIOCSTARTALTQ: {
4341 		struct pf_altq		*altq;
4342 
4343 		PF_RULES_WLOCK();
4344 		/* enable all altq interfaces on active list */
4345 		TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) {
4346 			if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
4347 				error = pf_enable_altq(altq);
4348 				if (error != 0)
4349 					break;
4350 			}
4351 		}
4352 		if (error == 0)
4353 			V_pf_altq_running = 1;
4354 		PF_RULES_WUNLOCK();
4355 		DPFPRINTF(PF_DEBUG_MISC, "altq: started");
4356 		break;
4357 	}
4358 
4359 	case DIOCSTOPALTQ: {
4360 		struct pf_altq		*altq;
4361 
4362 		PF_RULES_WLOCK();
4363 		/* disable all altq interfaces on active list */
4364 		TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) {
4365 			if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
4366 				error = pf_disable_altq(altq);
4367 				if (error != 0)
4368 					break;
4369 			}
4370 		}
4371 		if (error == 0)
4372 			V_pf_altq_running = 0;
4373 		PF_RULES_WUNLOCK();
4374 		DPFPRINTF(PF_DEBUG_MISC, "altq: stopped");
4375 		break;
4376 	}
4377 
4378 	case DIOCADDALTQV0:
4379 	case DIOCADDALTQV1: {
4380 		struct pfioc_altq_v1	*pa = (struct pfioc_altq_v1 *)addr;
4381 		struct pf_altq		*altq, *a;
4382 		struct ifnet		*ifp;
4383 
4384 		altq = malloc(sizeof(*altq), M_PFALTQ, M_WAITOK | M_ZERO);
4385 		error = pf_import_kaltq(pa, altq, IOCPARM_LEN(cmd));
4386 		if (error)
4387 			goto fail;
4388 		altq->local_flags = 0;
4389 
4390 		PF_RULES_WLOCK();
4391 		if (pa->ticket != V_ticket_altqs_inactive) {
4392 			PF_RULES_WUNLOCK();
4393 			free(altq, M_PFALTQ);
4394 			error = EBUSY;
4395 			goto fail;
4396 		}
4397 
4398 		/*
4399 		 * if this is for a queue, find the discipline and
4400 		 * copy the necessary fields
4401 		 */
4402 		if (altq->qname[0] != 0) {
4403 			if ((altq->qid = pf_qname2qid(altq->qname)) == 0) {
4404 				PF_RULES_WUNLOCK();
4405 				error = EBUSY;
4406 				free(altq, M_PFALTQ);
4407 				goto fail;
4408 			}
4409 			altq->altq_disc = NULL;
4410 			TAILQ_FOREACH(a, V_pf_altq_ifs_inactive, entries) {
4411 				if (strncmp(a->ifname, altq->ifname,
4412 				    IFNAMSIZ) == 0) {
4413 					altq->altq_disc = a->altq_disc;
4414 					break;
4415 				}
4416 			}
4417 		}
4418 
4419 		if ((ifp = ifunit(altq->ifname)) == NULL)
4420 			altq->local_flags |= PFALTQ_FLAG_IF_REMOVED;
4421 		else
4422 			error = altq_add(ifp, altq);
4423 
4424 		if (error) {
4425 			PF_RULES_WUNLOCK();
4426 			free(altq, M_PFALTQ);
4427 			goto fail;
4428 		}
4429 
4430 		if (altq->qname[0] != 0)
4431 			TAILQ_INSERT_TAIL(V_pf_altqs_inactive, altq, entries);
4432 		else
4433 			TAILQ_INSERT_TAIL(V_pf_altq_ifs_inactive, altq, entries);
4434 		/* version error check done on import above */
4435 		pf_export_kaltq(altq, pa, IOCPARM_LEN(cmd));
4436 		PF_RULES_WUNLOCK();
4437 		break;
4438 	}
4439 
4440 	case DIOCGETALTQSV0:
4441 	case DIOCGETALTQSV1: {
4442 		struct pfioc_altq_v1	*pa = (struct pfioc_altq_v1 *)addr;
4443 		struct pf_altq		*altq;
4444 
4445 		PF_RULES_RLOCK();
4446 		pa->nr = 0;
4447 		TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries)
4448 			pa->nr++;
4449 		TAILQ_FOREACH(altq, V_pf_altqs_active, entries)
4450 			pa->nr++;
4451 		pa->ticket = V_ticket_altqs_active;
4452 		PF_RULES_RUNLOCK();
4453 		break;
4454 	}
4455 
4456 	case DIOCGETALTQV0:
4457 	case DIOCGETALTQV1: {
4458 		struct pfioc_altq_v1	*pa = (struct pfioc_altq_v1 *)addr;
4459 		struct pf_altq		*altq;
4460 
4461 		PF_RULES_RLOCK();
4462 		if (pa->ticket != V_ticket_altqs_active) {
4463 			PF_RULES_RUNLOCK();
4464 			error = EBUSY;
4465 			goto fail;
4466 		}
4467 		altq = pf_altq_get_nth_active(pa->nr);
4468 		if (altq == NULL) {
4469 			PF_RULES_RUNLOCK();
4470 			error = EBUSY;
4471 			goto fail;
4472 		}
4473 		pf_export_kaltq(altq, pa, IOCPARM_LEN(cmd));
4474 		PF_RULES_RUNLOCK();
4475 		break;
4476 	}
4477 
4478 	case DIOCCHANGEALTQV0:
4479 	case DIOCCHANGEALTQV1:
4480 		/* CHANGEALTQ not supported yet! */
4481 		error = ENODEV;
4482 		break;
4483 
4484 	case DIOCGETQSTATSV0:
4485 	case DIOCGETQSTATSV1: {
4486 		struct pfioc_qstats_v1	*pq = (struct pfioc_qstats_v1 *)addr;
4487 		struct pf_altq		*altq;
4488 		int			 nbytes;
4489 		u_int32_t		 version;
4490 
4491 		PF_RULES_RLOCK();
4492 		if (pq->ticket != V_ticket_altqs_active) {
4493 			PF_RULES_RUNLOCK();
4494 			error = EBUSY;
4495 			goto fail;
4496 		}
4497 		nbytes = pq->nbytes;
4498 		altq = pf_altq_get_nth_active(pq->nr);
4499 		if (altq == NULL) {
4500 			PF_RULES_RUNLOCK();
4501 			error = EBUSY;
4502 			goto fail;
4503 		}
4504 
4505 		if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) != 0) {
4506 			PF_RULES_RUNLOCK();
4507 			error = ENXIO;
4508 			goto fail;
4509 		}
4510 		PF_RULES_RUNLOCK();
4511 		if (cmd == DIOCGETQSTATSV0)
4512 			version = 0;  /* DIOCGETQSTATSV0 means stats struct v0 */
4513 		else
4514 			version = pq->version;
4515 		error = altq_getqstats(altq, pq->buf, &nbytes, version);
4516 		if (error == 0) {
4517 			pq->scheduler = altq->scheduler;
4518 			pq->nbytes = nbytes;
4519 		}
4520 		break;
4521 	}
4522 #endif /* ALTQ */
4523 
4524 	case DIOCBEGINADDRS: {
4525 		struct pfioc_pooladdr	*pp = (struct pfioc_pooladdr *)addr;
4526 
4527 		error = pf_ioctl_begin_addrs(&pp->ticket);
4528 		break;
4529 	}
4530 
4531 	case DIOCADDADDR: {
4532 		struct pfioc_pooladdr	*pp = (struct pfioc_pooladdr *)addr;
4533 		struct pf_nl_pooladdr npp = {};
4534 
4535 		npp.which = PF_RDR;
4536 		memcpy(&npp, pp, sizeof(*pp));
4537 		error = pf_ioctl_add_addr(&npp);
4538 		break;
4539 	}
4540 
4541 	case DIOCGETADDRS: {
4542 		struct pfioc_pooladdr	*pp = (struct pfioc_pooladdr *)addr;
4543 		struct pf_nl_pooladdr npp = {};
4544 
4545 		npp.which = PF_RDR;
4546 		memcpy(&npp, pp, sizeof(*pp));
4547 		error = pf_ioctl_get_addrs(&npp);
4548 		memcpy(pp, &npp, sizeof(*pp));
4549 
4550 		break;
4551 	}
4552 
4553 	case DIOCGETADDR: {
4554 		struct pfioc_pooladdr	*pp = (struct pfioc_pooladdr *)addr;
4555 		struct pf_nl_pooladdr npp = {};
4556 
4557 		npp.which = PF_RDR;
4558 		memcpy(&npp, pp, sizeof(*pp));
4559 		error = pf_ioctl_get_addr(&npp);
4560 		memcpy(pp, &npp, sizeof(*pp));
4561 
4562 		break;
4563 	}
4564 
4565 	case DIOCCHANGEADDR: {
4566 		struct pfioc_pooladdr	*pca = (struct pfioc_pooladdr *)addr;
4567 		struct pf_kpool		*pool;
4568 		struct pf_kpooladdr	*oldpa = NULL, *newpa = NULL;
4569 		struct pf_kruleset	*ruleset;
4570 		struct pfi_kkif		*kif = NULL;
4571 
4572 		pca->anchor[sizeof(pca->anchor) - 1] = '\0';
4573 
4574 		if (pca->action < PF_CHANGE_ADD_HEAD ||
4575 		    pca->action > PF_CHANGE_REMOVE) {
4576 			error = EINVAL;
4577 			goto fail;
4578 		}
4579 		if (pca->addr.addr.type != PF_ADDR_ADDRMASK &&
4580 		    pca->addr.addr.type != PF_ADDR_DYNIFTL &&
4581 		    pca->addr.addr.type != PF_ADDR_TABLE) {
4582 			error = EINVAL;
4583 			goto fail;
4584 		}
4585 		if (pca->addr.addr.p.dyn != NULL) {
4586 			error = EINVAL;
4587 			goto fail;
4588 		}
4589 
4590 		if (pca->action != PF_CHANGE_REMOVE) {
4591 #ifndef INET
4592 			if (pca->af == AF_INET) {
4593 				error = EAFNOSUPPORT;
4594 				goto fail;
4595 			}
4596 #endif /* INET */
4597 #ifndef INET6
4598 			if (pca->af == AF_INET6) {
4599 				error = EAFNOSUPPORT;
4600 				goto fail;
4601 			}
4602 #endif /* INET6 */
4603 			newpa = malloc(sizeof(*newpa), M_PFRULE, M_WAITOK);
4604 			bcopy(&pca->addr, newpa, sizeof(struct pf_pooladdr));
4605 			if (newpa->ifname[0])
4606 				kif = pf_kkif_create(M_WAITOK);
4607 			newpa->kif = NULL;
4608 		}
4609 #define	ERROUT(x)	ERROUT_IOCTL(DIOCCHANGEADDR_error, x)
4610 		PF_RULES_WLOCK();
4611 		ruleset = pf_find_kruleset(pca->anchor);
4612 		if (ruleset == NULL)
4613 			ERROUT(EBUSY);
4614 
4615 		pool = pf_get_kpool(pca->anchor, pca->ticket, pca->r_action,
4616 		    pca->r_num, pca->r_last, 1, 1, PF_RDR);
4617 		if (pool == NULL)
4618 			ERROUT(EBUSY);
4619 
4620 		if (pca->action != PF_CHANGE_REMOVE) {
4621 			if (newpa->ifname[0]) {
4622 				newpa->kif = pfi_kkif_attach(kif, newpa->ifname);
4623 				pfi_kkif_ref(newpa->kif);
4624 				kif = NULL;
4625 			}
4626 
4627 			switch (newpa->addr.type) {
4628 			case PF_ADDR_DYNIFTL:
4629 				error = pfi_dynaddr_setup(&newpa->addr,
4630 				    pca->af);
4631 				break;
4632 			case PF_ADDR_TABLE:
4633 				newpa->addr.p.tbl = pfr_attach_table(ruleset,
4634 				    newpa->addr.v.tblname);
4635 				if (newpa->addr.p.tbl == NULL)
4636 					error = ENOMEM;
4637 				break;
4638 			}
4639 			if (error)
4640 				goto DIOCCHANGEADDR_error;
4641 		}
4642 
4643 		switch (pca->action) {
4644 		case PF_CHANGE_ADD_HEAD:
4645 			oldpa = TAILQ_FIRST(&pool->list);
4646 			break;
4647 		case PF_CHANGE_ADD_TAIL:
4648 			oldpa = TAILQ_LAST(&pool->list, pf_kpalist);
4649 			break;
4650 		default:
4651 			oldpa = TAILQ_FIRST(&pool->list);
4652 			for (int i = 0; oldpa && i < pca->nr; i++)
4653 				oldpa = TAILQ_NEXT(oldpa, entries);
4654 
4655 			if (oldpa == NULL)
4656 				ERROUT(EINVAL);
4657 		}
4658 
4659 		if (pca->action == PF_CHANGE_REMOVE) {
4660 			TAILQ_REMOVE(&pool->list, oldpa, entries);
4661 			switch (oldpa->addr.type) {
4662 			case PF_ADDR_DYNIFTL:
4663 				pfi_dynaddr_remove(oldpa->addr.p.dyn);
4664 				break;
4665 			case PF_ADDR_TABLE:
4666 				pfr_detach_table(oldpa->addr.p.tbl);
4667 				break;
4668 			}
4669 			if (oldpa->kif)
4670 				pfi_kkif_unref(oldpa->kif);
4671 			free(oldpa, M_PFRULE);
4672 		} else {
4673 			if (oldpa == NULL)
4674 				TAILQ_INSERT_TAIL(&pool->list, newpa, entries);
4675 			else if (pca->action == PF_CHANGE_ADD_HEAD ||
4676 			    pca->action == PF_CHANGE_ADD_BEFORE)
4677 				TAILQ_INSERT_BEFORE(oldpa, newpa, entries);
4678 			else
4679 				TAILQ_INSERT_AFTER(&pool->list, oldpa,
4680 				    newpa, entries);
4681 		}
4682 
4683 		pool->cur = TAILQ_FIRST(&pool->list);
4684 		pf_addrcpy(&pool->counter, &pool->cur->addr.v.a.addr, pca->af);
4685 		PF_RULES_WUNLOCK();
4686 		break;
4687 
4688 #undef ERROUT
4689 DIOCCHANGEADDR_error:
4690 		if (newpa != NULL) {
4691 			if (newpa->kif)
4692 				pfi_kkif_unref(newpa->kif);
4693 			free(newpa, M_PFRULE);
4694 		}
4695 		PF_RULES_WUNLOCK();
4696 		pf_kkif_free(kif);
4697 		break;
4698 	}
4699 
4700 	case DIOCGETRULESETS: {
4701 		struct pfioc_ruleset	*pr = (struct pfioc_ruleset *)addr;
4702 
4703 		pr->path[sizeof(pr->path) - 1] = '\0';
4704 
4705 		error = pf_ioctl_get_rulesets(pr);
4706 		break;
4707 	}
4708 
4709 	case DIOCGETRULESET: {
4710 		struct pfioc_ruleset	*pr = (struct pfioc_ruleset *)addr;
4711 
4712 		pr->path[sizeof(pr->path) - 1] = '\0';
4713 
4714 		error = pf_ioctl_get_ruleset(pr);
4715 		break;
4716 	}
4717 
4718 	case DIOCRCLRTABLES: {
4719 		struct pfioc_table *io = (struct pfioc_table *)addr;
4720 
4721 		if (io->pfrio_esize != 0) {
4722 			error = ENODEV;
4723 			goto fail;
4724 		}
4725 		PF_RULES_WLOCK();
4726 		error = pfr_clr_tables(&io->pfrio_table, &io->pfrio_ndel,
4727 		    io->pfrio_flags | PFR_FLAG_USERIOCTL);
4728 		PF_RULES_WUNLOCK();
4729 		break;
4730 	}
4731 
4732 	case DIOCRADDTABLES: {
4733 		struct pfioc_table *io = (struct pfioc_table *)addr;
4734 		struct pfr_table *pfrts;
4735 		size_t totlen;
4736 
4737 		if (io->pfrio_esize != sizeof(struct pfr_table)) {
4738 			error = ENODEV;
4739 			goto fail;
4740 		}
4741 
4742 		if (io->pfrio_size < 0 || io->pfrio_size > pf_ioctl_maxcount ||
4743 		    WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_table))) {
4744 			error = ENOMEM;
4745 			goto fail;
4746 		}
4747 
4748 		totlen = io->pfrio_size * sizeof(struct pfr_table);
4749 		pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table),
4750 		    M_PF, M_WAITOK);
4751 		error = copyin(io->pfrio_buffer, pfrts, totlen);
4752 		if (error) {
4753 			free(pfrts, M_PF);
4754 			goto fail;
4755 		}
4756 		PF_RULES_WLOCK();
4757 		error = pfr_add_tables(pfrts, io->pfrio_size,
4758 		    &io->pfrio_nadd, io->pfrio_flags | PFR_FLAG_USERIOCTL);
4759 		PF_RULES_WUNLOCK();
4760 		free(pfrts, M_PF);
4761 		break;
4762 	}
4763 
4764 	case DIOCRDELTABLES: {
4765 		struct pfioc_table *io = (struct pfioc_table *)addr;
4766 		struct pfr_table *pfrts;
4767 		size_t totlen;
4768 
4769 		if (io->pfrio_esize != sizeof(struct pfr_table)) {
4770 			error = ENODEV;
4771 			goto fail;
4772 		}
4773 
4774 		if (io->pfrio_size < 0 || io->pfrio_size > pf_ioctl_maxcount ||
4775 		    WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_table))) {
4776 			error = ENOMEM;
4777 			goto fail;
4778 		}
4779 
4780 		totlen = io->pfrio_size * sizeof(struct pfr_table);
4781 		pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table),
4782 		    M_PF, M_WAITOK);
4783 		error = copyin(io->pfrio_buffer, pfrts, totlen);
4784 		if (error) {
4785 			free(pfrts, M_PF);
4786 			goto fail;
4787 		}
4788 		PF_RULES_WLOCK();
4789 		error = pfr_del_tables(pfrts, io->pfrio_size,
4790 		    &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL);
4791 		PF_RULES_WUNLOCK();
4792 		free(pfrts, M_PF);
4793 		break;
4794 	}
4795 
4796 	case DIOCRGETTABLES: {
4797 		struct pfioc_table *io = (struct pfioc_table *)addr;
4798 		struct pfr_table *pfrts;
4799 		size_t totlen;
4800 		int n;
4801 
4802 		if (io->pfrio_esize != sizeof(struct pfr_table)) {
4803 			error = ENODEV;
4804 			goto fail;
4805 		}
4806 		PF_RULES_RLOCK();
4807 		n = pfr_table_count(&io->pfrio_table, io->pfrio_flags);
4808 		if (n < 0) {
4809 			PF_RULES_RUNLOCK();
4810 			error = EINVAL;
4811 			goto fail;
4812 		}
4813 		io->pfrio_size = min(io->pfrio_size, n);
4814 
4815 		totlen = io->pfrio_size * sizeof(struct pfr_table);
4816 
4817 		pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table),
4818 		    M_PF, M_NOWAIT | M_ZERO);
4819 		if (pfrts == NULL) {
4820 			error = ENOMEM;
4821 			PF_RULES_RUNLOCK();
4822 			goto fail;
4823 		}
4824 		error = pfr_get_tables(&io->pfrio_table, pfrts,
4825 		    &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
4826 		PF_RULES_RUNLOCK();
4827 		if (error == 0)
4828 			error = copyout(pfrts, io->pfrio_buffer, totlen);
4829 		free(pfrts, M_PF);
4830 		break;
4831 	}
4832 
4833 	case DIOCRGETTSTATS: {
4834 		struct pfioc_table *io = (struct pfioc_table *)addr;
4835 		struct pfr_tstats *pfrtstats;
4836 		size_t totlen;
4837 		int n;
4838 
4839 		if (io->pfrio_esize != sizeof(struct pfr_tstats)) {
4840 			error = ENODEV;
4841 			goto fail;
4842 		}
4843 		PF_TABLE_STATS_LOCK();
4844 		PF_RULES_RLOCK();
4845 		n = pfr_table_count(&io->pfrio_table, io->pfrio_flags);
4846 		if (n < 0) {
4847 			PF_RULES_RUNLOCK();
4848 			PF_TABLE_STATS_UNLOCK();
4849 			error = EINVAL;
4850 			goto fail;
4851 		}
4852 		io->pfrio_size = min(io->pfrio_size, n);
4853 
4854 		totlen = io->pfrio_size * sizeof(struct pfr_tstats);
4855 		pfrtstats = mallocarray(io->pfrio_size,
4856 		    sizeof(struct pfr_tstats), M_PF, M_NOWAIT | M_ZERO);
4857 		if (pfrtstats == NULL) {
4858 			error = ENOMEM;
4859 			PF_RULES_RUNLOCK();
4860 			PF_TABLE_STATS_UNLOCK();
4861 			goto fail;
4862 		}
4863 		error = pfr_get_tstats(&io->pfrio_table, pfrtstats,
4864 		    &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
4865 		PF_RULES_RUNLOCK();
4866 		PF_TABLE_STATS_UNLOCK();
4867 		if (error == 0)
4868 			error = copyout(pfrtstats, io->pfrio_buffer, totlen);
4869 		free(pfrtstats, M_PF);
4870 		break;
4871 	}
4872 
4873 	case DIOCRCLRTSTATS: {
4874 		struct pfioc_table *io = (struct pfioc_table *)addr;
4875 		struct pfr_table *pfrts;
4876 		size_t totlen;
4877 
4878 		if (io->pfrio_esize != sizeof(struct pfr_table)) {
4879 			error = ENODEV;
4880 			goto fail;
4881 		}
4882 
4883 		if (io->pfrio_size < 0 || io->pfrio_size > pf_ioctl_maxcount ||
4884 		    WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_table))) {
4885 			/* We used to count tables and use the minimum required
4886 			 * size, so we didn't fail on overly large requests.
4887 			 * Keep doing so. */
4888 			io->pfrio_size = pf_ioctl_maxcount;
4889 			goto fail;
4890 		}
4891 
4892 		totlen = io->pfrio_size * sizeof(struct pfr_table);
4893 		pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table),
4894 		    M_PF, M_WAITOK);
4895 		error = copyin(io->pfrio_buffer, pfrts, totlen);
4896 		if (error) {
4897 			free(pfrts, M_PF);
4898 			goto fail;
4899 		}
4900 
4901 		PF_TABLE_STATS_LOCK();
4902 		PF_RULES_RLOCK();
4903 		error = pfr_clr_tstats(pfrts, io->pfrio_size,
4904 		    &io->pfrio_nzero, io->pfrio_flags | PFR_FLAG_USERIOCTL);
4905 		PF_RULES_RUNLOCK();
4906 		PF_TABLE_STATS_UNLOCK();
4907 		free(pfrts, M_PF);
4908 		break;
4909 	}
4910 
4911 	case DIOCRSETTFLAGS: {
4912 		struct pfioc_table *io = (struct pfioc_table *)addr;
4913 		struct pfr_table *pfrts;
4914 		size_t totlen;
4915 		int n;
4916 
4917 		if (io->pfrio_esize != sizeof(struct pfr_table)) {
4918 			error = ENODEV;
4919 			goto fail;
4920 		}
4921 
4922 		PF_RULES_RLOCK();
4923 		n = pfr_table_count(&io->pfrio_table, io->pfrio_flags);
4924 		if (n < 0) {
4925 			PF_RULES_RUNLOCK();
4926 			error = EINVAL;
4927 			goto fail;
4928 		}
4929 
4930 		io->pfrio_size = min(io->pfrio_size, n);
4931 		PF_RULES_RUNLOCK();
4932 
4933 		totlen = io->pfrio_size * sizeof(struct pfr_table);
4934 		pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table),
4935 		    M_PF, M_WAITOK);
4936 		error = copyin(io->pfrio_buffer, pfrts, totlen);
4937 		if (error) {
4938 			free(pfrts, M_PF);
4939 			goto fail;
4940 		}
4941 		PF_RULES_WLOCK();
4942 		error = pfr_set_tflags(pfrts, io->pfrio_size,
4943 		    io->pfrio_setflag, io->pfrio_clrflag, &io->pfrio_nchange,
4944 		    &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL);
4945 		PF_RULES_WUNLOCK();
4946 		free(pfrts, M_PF);
4947 		break;
4948 	}
4949 
4950 	case DIOCRCLRADDRS: {
4951 		struct pfioc_table *io = (struct pfioc_table *)addr;
4952 
4953 		if (io->pfrio_esize != 0) {
4954 			error = ENODEV;
4955 			goto fail;
4956 		}
4957 		PF_RULES_WLOCK();
4958 		error = pfr_clr_addrs(&io->pfrio_table, &io->pfrio_ndel,
4959 		    io->pfrio_flags | PFR_FLAG_USERIOCTL);
4960 		PF_RULES_WUNLOCK();
4961 		break;
4962 	}
4963 
4964 	case DIOCRADDADDRS: {
4965 		struct pfioc_table *io = (struct pfioc_table *)addr;
4966 		struct pfr_addr *pfras;
4967 		size_t totlen;
4968 
4969 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
4970 			error = ENODEV;
4971 			goto fail;
4972 		}
4973 		if (io->pfrio_size < 0 ||
4974 		    io->pfrio_size > pf_ioctl_maxcount ||
4975 		    WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
4976 			error = EINVAL;
4977 			goto fail;
4978 		}
4979 		totlen = io->pfrio_size * sizeof(struct pfr_addr);
4980 		pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
4981 		    M_PF, M_WAITOK);
4982 		error = copyin(io->pfrio_buffer, pfras, totlen);
4983 		if (error) {
4984 			free(pfras, M_PF);
4985 			goto fail;
4986 		}
4987 		PF_RULES_WLOCK();
4988 		io->pfrio_nadd = 0;
4989 		error = pfr_add_addrs(&io->pfrio_table, pfras,
4990 		    io->pfrio_size, &io->pfrio_nadd, io->pfrio_flags |
4991 		    PFR_FLAG_USERIOCTL);
4992 		PF_RULES_WUNLOCK();
4993 		if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK)
4994 			error = copyout(pfras, io->pfrio_buffer, totlen);
4995 		free(pfras, M_PF);
4996 		break;
4997 	}
4998 
4999 	case DIOCRDELADDRS: {
5000 		struct pfioc_table *io = (struct pfioc_table *)addr;
5001 		struct pfr_addr *pfras;
5002 		size_t totlen;
5003 
5004 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
5005 			error = ENODEV;
5006 			goto fail;
5007 		}
5008 		if (io->pfrio_size < 0 ||
5009 		    io->pfrio_size > pf_ioctl_maxcount ||
5010 		    WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
5011 			error = EINVAL;
5012 			goto fail;
5013 		}
5014 		totlen = io->pfrio_size * sizeof(struct pfr_addr);
5015 		pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
5016 		    M_PF, M_WAITOK);
5017 		error = copyin(io->pfrio_buffer, pfras, totlen);
5018 		if (error) {
5019 			free(pfras, M_PF);
5020 			goto fail;
5021 		}
5022 		PF_RULES_WLOCK();
5023 		error = pfr_del_addrs(&io->pfrio_table, pfras,
5024 		    io->pfrio_size, &io->pfrio_ndel, io->pfrio_flags |
5025 		    PFR_FLAG_USERIOCTL);
5026 		PF_RULES_WUNLOCK();
5027 		if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK)
5028 			error = copyout(pfras, io->pfrio_buffer, totlen);
5029 		free(pfras, M_PF);
5030 		break;
5031 	}
5032 
5033 	case DIOCRSETADDRS: {
5034 		struct pfioc_table *io = (struct pfioc_table *)addr;
5035 		struct pfr_addr *pfras;
5036 		size_t totlen, count;
5037 
5038 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
5039 			error = ENODEV;
5040 			goto fail;
5041 		}
5042 		if (io->pfrio_size < 0 || io->pfrio_size2 < 0) {
5043 			error = EINVAL;
5044 			goto fail;
5045 		}
5046 		count = max(io->pfrio_size, io->pfrio_size2);
5047 		if (count > pf_ioctl_maxcount ||
5048 		    WOULD_OVERFLOW(count, sizeof(struct pfr_addr))) {
5049 			error = EINVAL;
5050 			goto fail;
5051 		}
5052 		totlen = count * sizeof(struct pfr_addr);
5053 		pfras = mallocarray(count, sizeof(struct pfr_addr), M_PF,
5054 		    M_WAITOK);
5055 		error = copyin(io->pfrio_buffer, pfras, totlen);
5056 		if (error) {
5057 			free(pfras, M_PF);
5058 			goto fail;
5059 		}
5060 		PF_RULES_WLOCK();
5061 		error = pfr_set_addrs(&io->pfrio_table, pfras,
5062 		    io->pfrio_size, &io->pfrio_size2, &io->pfrio_nadd,
5063 		    &io->pfrio_ndel, &io->pfrio_nchange, io->pfrio_flags |
5064 		    PFR_FLAG_USERIOCTL, 0);
5065 		PF_RULES_WUNLOCK();
5066 		if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK)
5067 			error = copyout(pfras, io->pfrio_buffer, totlen);
5068 		free(pfras, M_PF);
5069 		break;
5070 	}
5071 
5072 	case DIOCRGETADDRS: {
5073 		struct pfioc_table *io = (struct pfioc_table *)addr;
5074 		struct pfr_addr *pfras;
5075 		size_t totlen;
5076 
5077 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
5078 			error = ENODEV;
5079 			goto fail;
5080 		}
5081 		if (io->pfrio_size < 0 ||
5082 		    io->pfrio_size > pf_ioctl_maxcount ||
5083 		    WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
5084 			error = EINVAL;
5085 			goto fail;
5086 		}
5087 		totlen = io->pfrio_size * sizeof(struct pfr_addr);
5088 		pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
5089 		    M_PF, M_WAITOK | M_ZERO);
5090 		PF_RULES_RLOCK();
5091 		error = pfr_get_addrs(&io->pfrio_table, pfras,
5092 		    &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
5093 		PF_RULES_RUNLOCK();
5094 		if (error == 0)
5095 			error = copyout(pfras, io->pfrio_buffer, totlen);
5096 		free(pfras, M_PF);
5097 		break;
5098 	}
5099 
5100 	case DIOCRGETASTATS: {
5101 		struct pfioc_table *io = (struct pfioc_table *)addr;
5102 		struct pfr_astats *pfrastats;
5103 		size_t totlen;
5104 
5105 		if (io->pfrio_esize != sizeof(struct pfr_astats)) {
5106 			error = ENODEV;
5107 			goto fail;
5108 		}
5109 		if (io->pfrio_size < 0 ||
5110 		    io->pfrio_size > pf_ioctl_maxcount ||
5111 		    WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_astats))) {
5112 			error = EINVAL;
5113 			goto fail;
5114 		}
5115 		totlen = io->pfrio_size * sizeof(struct pfr_astats);
5116 		pfrastats = mallocarray(io->pfrio_size,
5117 		    sizeof(struct pfr_astats), M_PF, M_WAITOK | M_ZERO);
5118 		PF_RULES_RLOCK();
5119 		error = pfr_get_astats(&io->pfrio_table, pfrastats,
5120 		    &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
5121 		PF_RULES_RUNLOCK();
5122 		if (error == 0)
5123 			error = copyout(pfrastats, io->pfrio_buffer, totlen);
5124 		free(pfrastats, M_PF);
5125 		break;
5126 	}
5127 
5128 	case DIOCRCLRASTATS: {
5129 		struct pfioc_table *io = (struct pfioc_table *)addr;
5130 		struct pfr_addr *pfras;
5131 		size_t totlen;
5132 
5133 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
5134 			error = ENODEV;
5135 			goto fail;
5136 		}
5137 		if (io->pfrio_size < 0 ||
5138 		    io->pfrio_size > pf_ioctl_maxcount ||
5139 		    WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
5140 			error = EINVAL;
5141 			goto fail;
5142 		}
5143 		totlen = io->pfrio_size * sizeof(struct pfr_addr);
5144 		pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
5145 		    M_PF, M_WAITOK);
5146 		error = copyin(io->pfrio_buffer, pfras, totlen);
5147 		if (error) {
5148 			free(pfras, M_PF);
5149 			goto fail;
5150 		}
5151 		PF_RULES_WLOCK();
5152 		error = pfr_clr_astats(&io->pfrio_table, pfras,
5153 		    io->pfrio_size, &io->pfrio_nzero, io->pfrio_flags |
5154 		    PFR_FLAG_USERIOCTL);
5155 		PF_RULES_WUNLOCK();
5156 		if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK)
5157 			error = copyout(pfras, io->pfrio_buffer, totlen);
5158 		free(pfras, M_PF);
5159 		break;
5160 	}
5161 
5162 	case DIOCRTSTADDRS: {
5163 		struct pfioc_table *io = (struct pfioc_table *)addr;
5164 		struct pfr_addr *pfras;
5165 		size_t totlen;
5166 
5167 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
5168 			error = ENODEV;
5169 			goto fail;
5170 		}
5171 		if (io->pfrio_size < 0 ||
5172 		    io->pfrio_size > pf_ioctl_maxcount ||
5173 		    WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
5174 			error = EINVAL;
5175 			goto fail;
5176 		}
5177 		totlen = io->pfrio_size * sizeof(struct pfr_addr);
5178 		pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
5179 		    M_PF, M_WAITOK);
5180 		error = copyin(io->pfrio_buffer, pfras, totlen);
5181 		if (error) {
5182 			free(pfras, M_PF);
5183 			goto fail;
5184 		}
5185 		PF_RULES_RLOCK();
5186 		error = pfr_tst_addrs(&io->pfrio_table, pfras,
5187 		    io->pfrio_size, &io->pfrio_nmatch, io->pfrio_flags |
5188 		    PFR_FLAG_USERIOCTL);
5189 		PF_RULES_RUNLOCK();
5190 		if (error == 0)
5191 			error = copyout(pfras, io->pfrio_buffer, totlen);
5192 		free(pfras, M_PF);
5193 		break;
5194 	}
5195 
5196 	case DIOCRINADEFINE: {
5197 		struct pfioc_table *io = (struct pfioc_table *)addr;
5198 		struct pfr_addr *pfras;
5199 		size_t totlen;
5200 
5201 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
5202 			error = ENODEV;
5203 			goto fail;
5204 		}
5205 		if (io->pfrio_size < 0 ||
5206 		    io->pfrio_size > pf_ioctl_maxcount ||
5207 		    WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
5208 			error = EINVAL;
5209 			goto fail;
5210 		}
5211 		totlen = io->pfrio_size * sizeof(struct pfr_addr);
5212 		pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
5213 		    M_PF, M_WAITOK);
5214 		error = copyin(io->pfrio_buffer, pfras, totlen);
5215 		if (error) {
5216 			free(pfras, M_PF);
5217 			goto fail;
5218 		}
5219 		PF_RULES_WLOCK();
5220 		error = pfr_ina_define(&io->pfrio_table, pfras,
5221 		    io->pfrio_size, &io->pfrio_nadd, &io->pfrio_naddr,
5222 		    io->pfrio_ticket, io->pfrio_flags | PFR_FLAG_USERIOCTL);
5223 		PF_RULES_WUNLOCK();
5224 		free(pfras, M_PF);
5225 		break;
5226 	}
5227 
5228 	case DIOCOSFPADD: {
5229 		struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr;
5230 		PF_RULES_WLOCK();
5231 		error = pf_osfp_add(io);
5232 		PF_RULES_WUNLOCK();
5233 		break;
5234 	}
5235 
5236 	case DIOCOSFPGET: {
5237 		struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr;
5238 		PF_RULES_RLOCK();
5239 		error = pf_osfp_get(io);
5240 		PF_RULES_RUNLOCK();
5241 		break;
5242 	}
5243 
5244 	case DIOCXBEGIN: {
5245 		struct pfioc_trans	*io = (struct pfioc_trans *)addr;
5246 		struct pfioc_trans_e	*ioes, *ioe;
5247 		size_t			 totlen;
5248 		int			 i;
5249 
5250 		if (io->esize != sizeof(*ioe)) {
5251 			error = ENODEV;
5252 			goto fail;
5253 		}
5254 		if (io->size < 0 ||
5255 		    io->size > pf_ioctl_maxcount ||
5256 		    WOULD_OVERFLOW(io->size, sizeof(struct pfioc_trans_e))) {
5257 			error = EINVAL;
5258 			goto fail;
5259 		}
5260 		totlen = sizeof(struct pfioc_trans_e) * io->size;
5261 		ioes = mallocarray(io->size, sizeof(struct pfioc_trans_e),
5262 		    M_PF, M_WAITOK);
5263 		error = copyin(io->array, ioes, totlen);
5264 		if (error) {
5265 			free(ioes, M_PF);
5266 			goto fail;
5267 		}
5268 		PF_RULES_WLOCK();
5269 		for (i = 0, ioe = ioes; i < io->size; i++, ioe++) {
5270 			ioe->anchor[sizeof(ioe->anchor) - 1] = '\0';
5271 			switch (ioe->rs_num) {
5272 			case PF_RULESET_ETH:
5273 				if ((error = pf_begin_eth(&ioe->ticket, ioe->anchor))) {
5274 					PF_RULES_WUNLOCK();
5275 					free(ioes, M_PF);
5276 					goto fail;
5277 				}
5278 				break;
5279 #ifdef ALTQ
5280 			case PF_RULESET_ALTQ:
5281 				if (ioe->anchor[0]) {
5282 					PF_RULES_WUNLOCK();
5283 					free(ioes, M_PF);
5284 					error = EINVAL;
5285 					goto fail;
5286 				}
5287 				if ((error = pf_begin_altq(&ioe->ticket))) {
5288 					PF_RULES_WUNLOCK();
5289 					free(ioes, M_PF);
5290 					goto fail;
5291 				}
5292 				break;
5293 #endif /* ALTQ */
5294 			case PF_RULESET_TABLE:
5295 			    {
5296 				struct pfr_table table;
5297 
5298 				bzero(&table, sizeof(table));
5299 				strlcpy(table.pfrt_anchor, ioe->anchor,
5300 				    sizeof(table.pfrt_anchor));
5301 				if ((error = pfr_ina_begin(&table,
5302 				    &ioe->ticket, NULL, 0))) {
5303 					PF_RULES_WUNLOCK();
5304 					free(ioes, M_PF);
5305 					goto fail;
5306 				}
5307 				break;
5308 			    }
5309 			default:
5310 				if ((error = pf_begin_rules(&ioe->ticket,
5311 				    ioe->rs_num, ioe->anchor))) {
5312 					PF_RULES_WUNLOCK();
5313 					free(ioes, M_PF);
5314 					goto fail;
5315 				}
5316 				break;
5317 			}
5318 		}
5319 		PF_RULES_WUNLOCK();
5320 		error = copyout(ioes, io->array, totlen);
5321 		free(ioes, M_PF);
5322 		break;
5323 	}
5324 
5325 	case DIOCXROLLBACK: {
5326 		struct pfioc_trans	*io = (struct pfioc_trans *)addr;
5327 		struct pfioc_trans_e	*ioe, *ioes;
5328 		size_t			 totlen;
5329 		int			 i;
5330 
5331 		if (io->esize != sizeof(*ioe)) {
5332 			error = ENODEV;
5333 			goto fail;
5334 		}
5335 		if (io->size < 0 ||
5336 		    io->size > pf_ioctl_maxcount ||
5337 		    WOULD_OVERFLOW(io->size, sizeof(struct pfioc_trans_e))) {
5338 			error = EINVAL;
5339 			goto fail;
5340 		}
5341 		totlen = sizeof(struct pfioc_trans_e) * io->size;
5342 		ioes = mallocarray(io->size, sizeof(struct pfioc_trans_e),
5343 		    M_PF, M_WAITOK);
5344 		error = copyin(io->array, ioes, totlen);
5345 		if (error) {
5346 			free(ioes, M_PF);
5347 			goto fail;
5348 		}
5349 		PF_RULES_WLOCK();
5350 		for (i = 0, ioe = ioes; i < io->size; i++, ioe++) {
5351 			ioe->anchor[sizeof(ioe->anchor) - 1] = '\0';
5352 			switch (ioe->rs_num) {
5353 			case PF_RULESET_ETH:
5354 				if ((error = pf_rollback_eth(ioe->ticket,
5355 				    ioe->anchor))) {
5356 					PF_RULES_WUNLOCK();
5357 					free(ioes, M_PF);
5358 					goto fail; /* really bad */
5359 				}
5360 				break;
5361 #ifdef ALTQ
5362 			case PF_RULESET_ALTQ:
5363 				if (ioe->anchor[0]) {
5364 					PF_RULES_WUNLOCK();
5365 					free(ioes, M_PF);
5366 					error = EINVAL;
5367 					goto fail;
5368 				}
5369 				if ((error = pf_rollback_altq(ioe->ticket))) {
5370 					PF_RULES_WUNLOCK();
5371 					free(ioes, M_PF);
5372 					goto fail; /* really bad */
5373 				}
5374 				break;
5375 #endif /* ALTQ */
5376 			case PF_RULESET_TABLE:
5377 			    {
5378 				struct pfr_table table;
5379 
5380 				bzero(&table, sizeof(table));
5381 				strlcpy(table.pfrt_anchor, ioe->anchor,
5382 				    sizeof(table.pfrt_anchor));
5383 				if ((error = pfr_ina_rollback(&table,
5384 				    ioe->ticket, NULL, 0))) {
5385 					PF_RULES_WUNLOCK();
5386 					free(ioes, M_PF);
5387 					goto fail; /* really bad */
5388 				}
5389 				break;
5390 			    }
5391 			default:
5392 				if ((error = pf_rollback_rules(ioe->ticket,
5393 				    ioe->rs_num, ioe->anchor))) {
5394 					PF_RULES_WUNLOCK();
5395 					free(ioes, M_PF);
5396 					goto fail; /* really bad */
5397 				}
5398 				break;
5399 			}
5400 		}
5401 		PF_RULES_WUNLOCK();
5402 		free(ioes, M_PF);
5403 		break;
5404 	}
5405 
5406 	case DIOCXCOMMIT: {
5407 		struct pfioc_trans	*io = (struct pfioc_trans *)addr;
5408 		struct pfioc_trans_e	*ioe, *ioes;
5409 		struct pf_kruleset	*rs;
5410 		struct pf_keth_ruleset	*ers;
5411 		size_t			 totlen;
5412 		int			 i;
5413 
5414 		if (io->esize != sizeof(*ioe)) {
5415 			error = ENODEV;
5416 			goto fail;
5417 		}
5418 
5419 		if (io->size < 0 ||
5420 		    io->size > pf_ioctl_maxcount ||
5421 		    WOULD_OVERFLOW(io->size, sizeof(struct pfioc_trans_e))) {
5422 			error = EINVAL;
5423 			goto fail;
5424 		}
5425 
5426 		totlen = sizeof(struct pfioc_trans_e) * io->size;
5427 		ioes = mallocarray(io->size, sizeof(struct pfioc_trans_e),
5428 		    M_PF, M_WAITOK);
5429 		error = copyin(io->array, ioes, totlen);
5430 		if (error) {
5431 			free(ioes, M_PF);
5432 			goto fail;
5433 		}
5434 		PF_RULES_WLOCK();
5435 		/* First makes sure everything will succeed. */
5436 		for (i = 0, ioe = ioes; i < io->size; i++, ioe++) {
5437 			ioe->anchor[sizeof(ioe->anchor) - 1] = '\0';
5438 			switch (ioe->rs_num) {
5439 			case PF_RULESET_ETH:
5440 				ers = pf_find_keth_ruleset(ioe->anchor);
5441 				if (ers == NULL || ioe->ticket == 0 ||
5442 				    ioe->ticket != ers->inactive.ticket) {
5443 					PF_RULES_WUNLOCK();
5444 					free(ioes, M_PF);
5445 					error = EINVAL;
5446 					goto fail;
5447 				}
5448 				break;
5449 #ifdef ALTQ
5450 			case PF_RULESET_ALTQ:
5451 				if (ioe->anchor[0]) {
5452 					PF_RULES_WUNLOCK();
5453 					free(ioes, M_PF);
5454 					error = EINVAL;
5455 					goto fail;
5456 				}
5457 				if (!V_altqs_inactive_open || ioe->ticket !=
5458 				    V_ticket_altqs_inactive) {
5459 					PF_RULES_WUNLOCK();
5460 					free(ioes, M_PF);
5461 					error = EBUSY;
5462 					goto fail;
5463 				}
5464 				break;
5465 #endif /* ALTQ */
5466 			case PF_RULESET_TABLE:
5467 				rs = pf_find_kruleset(ioe->anchor);
5468 				if (rs == NULL || !rs->topen || ioe->ticket !=
5469 				    rs->tticket) {
5470 					PF_RULES_WUNLOCK();
5471 					free(ioes, M_PF);
5472 					error = EBUSY;
5473 					goto fail;
5474 				}
5475 				break;
5476 			default:
5477 				if (ioe->rs_num < 0 || ioe->rs_num >=
5478 				    PF_RULESET_MAX) {
5479 					PF_RULES_WUNLOCK();
5480 					free(ioes, M_PF);
5481 					error = EINVAL;
5482 					goto fail;
5483 				}
5484 				rs = pf_find_kruleset(ioe->anchor);
5485 				if (rs == NULL ||
5486 				    !rs->rules[ioe->rs_num].inactive.open ||
5487 				    rs->rules[ioe->rs_num].inactive.ticket !=
5488 				    ioe->ticket) {
5489 					PF_RULES_WUNLOCK();
5490 					free(ioes, M_PF);
5491 					error = EBUSY;
5492 					goto fail;
5493 				}
5494 				break;
5495 			}
5496 		}
5497 		/* Now do the commit - no errors should happen here. */
5498 		for (i = 0, ioe = ioes; i < io->size; i++, ioe++) {
5499 			switch (ioe->rs_num) {
5500 			case PF_RULESET_ETH:
5501 				if ((error = pf_commit_eth(ioe->ticket, ioe->anchor))) {
5502 					PF_RULES_WUNLOCK();
5503 					free(ioes, M_PF);
5504 					goto fail; /* really bad */
5505 				}
5506 				break;
5507 #ifdef ALTQ
5508 			case PF_RULESET_ALTQ:
5509 				if ((error = pf_commit_altq(ioe->ticket))) {
5510 					PF_RULES_WUNLOCK();
5511 					free(ioes, M_PF);
5512 					goto fail; /* really bad */
5513 				}
5514 				break;
5515 #endif /* ALTQ */
5516 			case PF_RULESET_TABLE:
5517 			    {
5518 				struct pfr_table table;
5519 
5520 				bzero(&table, sizeof(table));
5521 				(void)strlcpy(table.pfrt_anchor, ioe->anchor,
5522 				    sizeof(table.pfrt_anchor));
5523 				if ((error = pfr_ina_commit(&table,
5524 				    ioe->ticket, NULL, NULL, 0))) {
5525 					PF_RULES_WUNLOCK();
5526 					free(ioes, M_PF);
5527 					goto fail; /* really bad */
5528 				}
5529 				break;
5530 			    }
5531 			default:
5532 				if ((error = pf_commit_rules(ioe->ticket,
5533 				    ioe->rs_num, ioe->anchor))) {
5534 					PF_RULES_WUNLOCK();
5535 					free(ioes, M_PF);
5536 					goto fail; /* really bad */
5537 				}
5538 				break;
5539 			}
5540 		}
5541 		PF_RULES_WUNLOCK();
5542 
5543 		/* Only hook into EtherNet taffic if we've got rules for it. */
5544 		if (! TAILQ_EMPTY(V_pf_keth->active.rules))
5545 			hook_pf_eth();
5546 		else
5547 			dehook_pf_eth();
5548 
5549 		free(ioes, M_PF);
5550 		break;
5551 	}
5552 
5553 	case DIOCGETSRCNODES: {
5554 		struct pfioc_src_nodes	*psn = (struct pfioc_src_nodes *)addr;
5555 		struct pf_srchash	*sh;
5556 		struct pf_ksrc_node	*n;
5557 		struct pf_src_node	*p, *pstore;
5558 		uint32_t		 i, nr = 0;
5559 
5560 		for (i = 0, sh = V_pf_srchash; i <= V_pf_srchashmask;
5561 				i++, sh++) {
5562 			PF_HASHROW_LOCK(sh);
5563 			LIST_FOREACH(n, &sh->nodes, entry)
5564 				nr++;
5565 			PF_HASHROW_UNLOCK(sh);
5566 		}
5567 
5568 		psn->psn_len = min(psn->psn_len,
5569 		    sizeof(struct pf_src_node) * nr);
5570 
5571 		if (psn->psn_len == 0) {
5572 			psn->psn_len = sizeof(struct pf_src_node) * nr;
5573 			goto fail;
5574 		}
5575 
5576 		nr = 0;
5577 
5578 		p = pstore = malloc(psn->psn_len, M_PF, M_WAITOK | M_ZERO);
5579 		for (i = 0, sh = V_pf_srchash; i <= V_pf_srchashmask;
5580 		    i++, sh++) {
5581 		    PF_HASHROW_LOCK(sh);
5582 		    LIST_FOREACH(n, &sh->nodes, entry) {
5583 
5584 			if ((nr + 1) * sizeof(*p) > (unsigned)psn->psn_len)
5585 				break;
5586 
5587 			pf_src_node_copy(n, p);
5588 
5589 			p++;
5590 			nr++;
5591 		    }
5592 		    PF_HASHROW_UNLOCK(sh);
5593 		}
5594 		error = copyout(pstore, psn->psn_src_nodes,
5595 		    sizeof(struct pf_src_node) * nr);
5596 		if (error) {
5597 			free(pstore, M_PF);
5598 			goto fail;
5599 		}
5600 		psn->psn_len = sizeof(struct pf_src_node) * nr;
5601 		free(pstore, M_PF);
5602 		break;
5603 	}
5604 
5605 	case DIOCCLRSRCNODES: {
5606 		pf_kill_srcnodes(NULL);
5607 		break;
5608 	}
5609 
5610 	case DIOCKILLSRCNODES:
5611 		pf_kill_srcnodes((struct pfioc_src_node_kill *)addr);
5612 		break;
5613 
5614 #ifdef COMPAT_FREEBSD13
5615 	case DIOCKEEPCOUNTERS_FREEBSD13:
5616 #endif
5617 	case DIOCKEEPCOUNTERS:
5618 		error = pf_keepcounters((struct pfioc_nv *)addr);
5619 		break;
5620 
5621 	case DIOCGETSYNCOOKIES:
5622 		error = pf_get_syncookies((struct pfioc_nv *)addr);
5623 		break;
5624 
5625 	case DIOCSETSYNCOOKIES:
5626 		error = pf_set_syncookies((struct pfioc_nv *)addr);
5627 		break;
5628 
5629 	case DIOCSETHOSTID: {
5630 		u_int32_t	*hostid = (u_int32_t *)addr;
5631 
5632 		PF_RULES_WLOCK();
5633 		if (*hostid == 0)
5634 			V_pf_status.hostid = arc4random();
5635 		else
5636 			V_pf_status.hostid = *hostid;
5637 		PF_RULES_WUNLOCK();
5638 		break;
5639 	}
5640 
5641 	case DIOCOSFPFLUSH:
5642 		PF_RULES_WLOCK();
5643 		pf_osfp_flush();
5644 		PF_RULES_WUNLOCK();
5645 		break;
5646 
5647 	case DIOCIGETIFACES: {
5648 		struct pfioc_iface *io = (struct pfioc_iface *)addr;
5649 		struct pfi_kif *ifstore;
5650 		size_t bufsiz;
5651 
5652 		if (io->pfiio_esize != sizeof(struct pfi_kif)) {
5653 			error = ENODEV;
5654 			goto fail;
5655 		}
5656 
5657 		if (io->pfiio_size < 0 ||
5658 		    io->pfiio_size > pf_ioctl_maxcount ||
5659 		    WOULD_OVERFLOW(io->pfiio_size, sizeof(struct pfi_kif))) {
5660 			error = EINVAL;
5661 			goto fail;
5662 		}
5663 
5664 		io->pfiio_name[sizeof(io->pfiio_name) - 1] = '\0';
5665 
5666 		bufsiz = io->pfiio_size * sizeof(struct pfi_kif);
5667 		ifstore = mallocarray(io->pfiio_size, sizeof(struct pfi_kif),
5668 		    M_PF, M_WAITOK | M_ZERO);
5669 
5670 		PF_RULES_RLOCK();
5671 		pfi_get_ifaces(io->pfiio_name, ifstore, &io->pfiio_size);
5672 		PF_RULES_RUNLOCK();
5673 		error = copyout(ifstore, io->pfiio_buffer, bufsiz);
5674 		free(ifstore, M_PF);
5675 		break;
5676 	}
5677 
5678 	case DIOCSETIFFLAG: {
5679 		struct pfioc_iface *io = (struct pfioc_iface *)addr;
5680 
5681 		io->pfiio_name[sizeof(io->pfiio_name) - 1] = '\0';
5682 
5683 		PF_RULES_WLOCK();
5684 		error = pfi_set_flags(io->pfiio_name, io->pfiio_flags);
5685 		PF_RULES_WUNLOCK();
5686 		break;
5687 	}
5688 
5689 	case DIOCCLRIFFLAG: {
5690 		struct pfioc_iface *io = (struct pfioc_iface *)addr;
5691 
5692 		io->pfiio_name[sizeof(io->pfiio_name) - 1] = '\0';
5693 
5694 		PF_RULES_WLOCK();
5695 		error = pfi_clear_flags(io->pfiio_name, io->pfiio_flags);
5696 		PF_RULES_WUNLOCK();
5697 		break;
5698 	}
5699 
5700 	case DIOCSETREASS: {
5701 		u_int32_t	*reass = (u_int32_t *)addr;
5702 
5703 		V_pf_status.reass = *reass & (PF_REASS_ENABLED|PF_REASS_NODF);
5704 		/* Removal of DF flag without reassembly enabled is not a
5705 		 * valid combination. Disable reassembly in such case. */
5706 		if (!(V_pf_status.reass & PF_REASS_ENABLED))
5707 			V_pf_status.reass = 0;
5708 		break;
5709 	}
5710 
5711 	default:
5712 		error = ENODEV;
5713 		break;
5714 	}
5715 fail:
5716 	CURVNET_RESTORE();
5717 
5718 #undef ERROUT_IOCTL
5719 
5720 	return (error);
5721 }
5722 
5723 void
pfsync_state_export(union pfsync_state_union * sp,struct pf_kstate * st,int msg_version)5724 pfsync_state_export(union pfsync_state_union *sp, struct pf_kstate *st, int msg_version)
5725 {
5726 	bzero(sp, sizeof(union pfsync_state_union));
5727 
5728 	/* copy from state key */
5729 	sp->pfs_1301.key[PF_SK_WIRE].addr[0] = st->key[PF_SK_WIRE]->addr[0];
5730 	sp->pfs_1301.key[PF_SK_WIRE].addr[1] = st->key[PF_SK_WIRE]->addr[1];
5731 	sp->pfs_1301.key[PF_SK_WIRE].port[0] = st->key[PF_SK_WIRE]->port[0];
5732 	sp->pfs_1301.key[PF_SK_WIRE].port[1] = st->key[PF_SK_WIRE]->port[1];
5733 	sp->pfs_1301.key[PF_SK_STACK].addr[0] = st->key[PF_SK_STACK]->addr[0];
5734 	sp->pfs_1301.key[PF_SK_STACK].addr[1] = st->key[PF_SK_STACK]->addr[1];
5735 	sp->pfs_1301.key[PF_SK_STACK].port[0] = st->key[PF_SK_STACK]->port[0];
5736 	sp->pfs_1301.key[PF_SK_STACK].port[1] = st->key[PF_SK_STACK]->port[1];
5737 	sp->pfs_1301.proto = st->key[PF_SK_WIRE]->proto;
5738 	sp->pfs_1301.af = st->key[PF_SK_WIRE]->af;
5739 
5740 	/* copy from state */
5741 	strlcpy(sp->pfs_1301.ifname, st->kif->pfik_name, sizeof(sp->pfs_1301.ifname));
5742 	bcopy(&st->act.rt_addr, &sp->pfs_1301.rt_addr, sizeof(sp->pfs_1301.rt_addr));
5743 	sp->pfs_1301.creation = htonl(time_uptime - (st->creation / 1000));
5744 	sp->pfs_1301.expire = pf_state_expires(st);
5745 	if (sp->pfs_1301.expire <= time_uptime)
5746 		sp->pfs_1301.expire = htonl(0);
5747 	else
5748 		sp->pfs_1301.expire = htonl(sp->pfs_1301.expire - time_uptime);
5749 
5750 	sp->pfs_1301.direction = st->direction;
5751 	sp->pfs_1301.log = st->act.log;
5752 	sp->pfs_1301.timeout = st->timeout;
5753 
5754 	switch (msg_version) {
5755 		case PFSYNC_MSG_VERSION_1301:
5756 			sp->pfs_1301.state_flags = st->state_flags;
5757 			break;
5758 		case PFSYNC_MSG_VERSION_1400:
5759 			sp->pfs_1400.state_flags = htons(st->state_flags);
5760 			sp->pfs_1400.qid = htons(st->act.qid);
5761 			sp->pfs_1400.pqid = htons(st->act.pqid);
5762 			sp->pfs_1400.dnpipe = htons(st->act.dnpipe);
5763 			sp->pfs_1400.dnrpipe = htons(st->act.dnrpipe);
5764 			sp->pfs_1400.rtableid = htonl(st->act.rtableid);
5765 			sp->pfs_1400.min_ttl = st->act.min_ttl;
5766 			sp->pfs_1400.set_tos = st->act.set_tos;
5767 			sp->pfs_1400.max_mss = htons(st->act.max_mss);
5768 			sp->pfs_1400.set_prio[0] = st->act.set_prio[0];
5769 			sp->pfs_1400.set_prio[1] = st->act.set_prio[1];
5770 			sp->pfs_1400.rt = st->act.rt;
5771 			if (st->act.rt_kif)
5772 				strlcpy(sp->pfs_1400.rt_ifname,
5773 				    st->act.rt_kif->pfik_name,
5774 				    sizeof(sp->pfs_1400.rt_ifname));
5775 			break;
5776 		default:
5777 			panic("%s: Unsupported pfsync_msg_version %d",
5778 			    __func__, msg_version);
5779 	}
5780 
5781 	/*
5782 	 * XXX Why do we bother pfsyncing source node information if source
5783 	 * nodes are not synced? Showing users that there is source tracking
5784 	 * when there is none seems useless.
5785 	 */
5786 	if (st->sns[PF_SN_LIMIT] != NULL)
5787 		sp->pfs_1301.sync_flags |= PFSYNC_FLAG_SRCNODE;
5788 	if (st->sns[PF_SN_NAT] != NULL || st->sns[PF_SN_ROUTE])
5789 		sp->pfs_1301.sync_flags |= PFSYNC_FLAG_NATSRCNODE;
5790 
5791 	sp->pfs_1301.id = st->id;
5792 	sp->pfs_1301.creatorid = st->creatorid;
5793 	pf_state_peer_hton(&st->src, &sp->pfs_1301.src);
5794 	pf_state_peer_hton(&st->dst, &sp->pfs_1301.dst);
5795 
5796 	if (st->rule == NULL)
5797 		sp->pfs_1301.rule = htonl(-1);
5798 	else
5799 		sp->pfs_1301.rule = htonl(st->rule->nr);
5800 	if (st->anchor == NULL)
5801 		sp->pfs_1301.anchor = htonl(-1);
5802 	else
5803 		sp->pfs_1301.anchor = htonl(st->anchor->nr);
5804 	if (st->nat_rule == NULL)
5805 		sp->pfs_1301.nat_rule = htonl(-1);
5806 	else
5807 		sp->pfs_1301.nat_rule = htonl(st->nat_rule->nr);
5808 
5809 	pf_state_counter_hton(st->packets[0], sp->pfs_1301.packets[0]);
5810 	pf_state_counter_hton(st->packets[1], sp->pfs_1301.packets[1]);
5811 	pf_state_counter_hton(st->bytes[0], sp->pfs_1301.bytes[0]);
5812 	pf_state_counter_hton(st->bytes[1], sp->pfs_1301.bytes[1]);
5813 }
5814 
5815 void
pf_state_export(struct pf_state_export * sp,struct pf_kstate * st)5816 pf_state_export(struct pf_state_export *sp, struct pf_kstate *st)
5817 {
5818 	bzero(sp, sizeof(*sp));
5819 
5820 	sp->version = PF_STATE_VERSION;
5821 
5822 	/* copy from state key */
5823 	sp->key[PF_SK_WIRE].addr[0] = st->key[PF_SK_WIRE]->addr[0];
5824 	sp->key[PF_SK_WIRE].addr[1] = st->key[PF_SK_WIRE]->addr[1];
5825 	sp->key[PF_SK_WIRE].port[0] = st->key[PF_SK_WIRE]->port[0];
5826 	sp->key[PF_SK_WIRE].port[1] = st->key[PF_SK_WIRE]->port[1];
5827 	sp->key[PF_SK_STACK].addr[0] = st->key[PF_SK_STACK]->addr[0];
5828 	sp->key[PF_SK_STACK].addr[1] = st->key[PF_SK_STACK]->addr[1];
5829 	sp->key[PF_SK_STACK].port[0] = st->key[PF_SK_STACK]->port[0];
5830 	sp->key[PF_SK_STACK].port[1] = st->key[PF_SK_STACK]->port[1];
5831 	sp->proto = st->key[PF_SK_WIRE]->proto;
5832 	sp->af = st->key[PF_SK_WIRE]->af;
5833 
5834 	/* copy from state */
5835 	strlcpy(sp->ifname, st->kif->pfik_name, sizeof(sp->ifname));
5836 	strlcpy(sp->orig_ifname, st->orig_kif->pfik_name,
5837 	    sizeof(sp->orig_ifname));
5838 	memcpy(&sp->rt_addr, &st->act.rt_addr, sizeof(sp->rt_addr));
5839 	sp->creation = htonl(time_uptime - (st->creation / 1000));
5840 	sp->expire = pf_state_expires(st);
5841 	if (sp->expire <= time_uptime)
5842 		sp->expire = htonl(0);
5843 	else
5844 		sp->expire = htonl(sp->expire - time_uptime);
5845 
5846 	sp->direction = st->direction;
5847 	sp->log = st->act.log;
5848 	sp->timeout = st->timeout;
5849 	/* 8 bits for the old libpfctl, 16 bits for the new libpfctl */
5850 	sp->state_flags_compat = st->state_flags;
5851 	sp->state_flags = htons(st->state_flags);
5852 	if (st->sns[PF_SN_LIMIT] != NULL)
5853 		sp->sync_flags |= PFSYNC_FLAG_SRCNODE;
5854 	if (st->sns[PF_SN_NAT] != NULL || st->sns[PF_SN_ROUTE] != NULL)
5855 		sp->sync_flags |= PFSYNC_FLAG_NATSRCNODE;
5856 	sp->id = st->id;
5857 	sp->creatorid = st->creatorid;
5858 	pf_state_peer_hton(&st->src, &sp->src);
5859 	pf_state_peer_hton(&st->dst, &sp->dst);
5860 
5861 	if (st->rule == NULL)
5862 		sp->rule = htonl(-1);
5863 	else
5864 		sp->rule = htonl(st->rule->nr);
5865 	if (st->anchor == NULL)
5866 		sp->anchor = htonl(-1);
5867 	else
5868 		sp->anchor = htonl(st->anchor->nr);
5869 	if (st->nat_rule == NULL)
5870 		sp->nat_rule = htonl(-1);
5871 	else
5872 		sp->nat_rule = htonl(st->nat_rule->nr);
5873 
5874 	sp->packets[0] = st->packets[0];
5875 	sp->packets[1] = st->packets[1];
5876 	sp->bytes[0] = st->bytes[0];
5877 	sp->bytes[1] = st->bytes[1];
5878 
5879 	sp->qid = htons(st->act.qid);
5880 	sp->pqid = htons(st->act.pqid);
5881 	sp->dnpipe = htons(st->act.dnpipe);
5882 	sp->dnrpipe = htons(st->act.dnrpipe);
5883 	sp->rtableid = htonl(st->act.rtableid);
5884 	sp->min_ttl = st->act.min_ttl;
5885 	sp->set_tos = st->act.set_tos;
5886 	sp->max_mss = htons(st->act.max_mss);
5887 	sp->rt = st->act.rt;
5888 	if (st->act.rt_kif)
5889 		strlcpy(sp->rt_ifname, st->act.rt_kif->pfik_name,
5890 		    sizeof(sp->rt_ifname));
5891 	sp->set_prio[0] = st->act.set_prio[0];
5892 	sp->set_prio[1] = st->act.set_prio[1];
5893 
5894 }
5895 
5896 static void
pf_tbladdr_copyout(struct pf_addr_wrap * aw)5897 pf_tbladdr_copyout(struct pf_addr_wrap *aw)
5898 {
5899 	struct pfr_ktable *kt;
5900 
5901 	KASSERT(aw->type == PF_ADDR_TABLE, ("%s: type %u", __func__, aw->type));
5902 
5903 	kt = aw->p.tbl;
5904 	if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
5905 		kt = kt->pfrkt_root;
5906 	aw->p.tbl = NULL;
5907 	aw->p.tblcnt = (kt->pfrkt_flags & PFR_TFLAG_ACTIVE) ?
5908 		kt->pfrkt_cnt : -1;
5909 }
5910 
5911 static int
pf_add_status_counters(nvlist_t * nvl,const char * name,counter_u64_t * counters,size_t number,char ** names)5912 pf_add_status_counters(nvlist_t *nvl, const char *name, counter_u64_t *counters,
5913     size_t number, char **names)
5914 {
5915 	nvlist_t        *nvc;
5916 
5917 	nvc = nvlist_create(0);
5918 	if (nvc == NULL)
5919 		return (ENOMEM);
5920 
5921 	for (int i = 0; i < number; i++) {
5922 		nvlist_append_number_array(nvc, "counters",
5923 		    counter_u64_fetch(counters[i]));
5924 		nvlist_append_string_array(nvc, "names",
5925 		    names[i]);
5926 		nvlist_append_number_array(nvc, "ids",
5927 		    i);
5928 	}
5929 	nvlist_add_nvlist(nvl, name, nvc);
5930 	nvlist_destroy(nvc);
5931 
5932 	return (0);
5933 }
5934 
5935 static int
pf_getstatus(struct pfioc_nv * nv)5936 pf_getstatus(struct pfioc_nv *nv)
5937 {
5938 	nvlist_t        *nvl = NULL, *nvc = NULL;
5939 	void            *nvlpacked = NULL;
5940 	int              error;
5941 	struct pf_status s;
5942 	char *pf_reasons[PFRES_MAX+1] = PFRES_NAMES;
5943 	char *pf_lcounter[KLCNT_MAX+1] = KLCNT_NAMES;
5944 	char *pf_fcounter[FCNT_MAX+1] = FCNT_NAMES;
5945 	time_t since;
5946 
5947 	PF_RULES_RLOCK_TRACKER;
5948 
5949 #define ERROUT(x)      ERROUT_FUNCTION(errout, x)
5950 
5951 	PF_RULES_RLOCK();
5952 
5953 	nvl = nvlist_create(0);
5954 	if (nvl == NULL)
5955 		ERROUT(ENOMEM);
5956 
5957 	since = time_second - (time_uptime - V_pf_status.since);
5958 
5959 	nvlist_add_bool(nvl, "running", V_pf_status.running);
5960 	nvlist_add_number(nvl, "since", since);
5961 	nvlist_add_number(nvl, "debug", V_pf_status.debug);
5962 	nvlist_add_number(nvl, "hostid", V_pf_status.hostid);
5963 	nvlist_add_number(nvl, "states", V_pf_status.states);
5964 	nvlist_add_number(nvl, "src_nodes", V_pf_status.src_nodes);
5965 	nvlist_add_number(nvl, "reass", V_pf_status.reass);
5966 	nvlist_add_bool(nvl, "syncookies_active",
5967 	    V_pf_status.syncookies_active);
5968 	nvlist_add_number(nvl, "halfopen_states", V_pf_status.states_halfopen);
5969 
5970 	/* counters */
5971 	error = pf_add_status_counters(nvl, "counters", V_pf_status.counters,
5972 	    PFRES_MAX, pf_reasons);
5973 	if (error != 0)
5974 		ERROUT(error);
5975 
5976 	/* lcounters */
5977 	error = pf_add_status_counters(nvl, "lcounters", V_pf_status.lcounters,
5978 	    KLCNT_MAX, pf_lcounter);
5979 	if (error != 0)
5980 		ERROUT(error);
5981 
5982 	/* fcounters */
5983 	nvc = nvlist_create(0);
5984 	if (nvc == NULL)
5985 		ERROUT(ENOMEM);
5986 
5987 	for (int i = 0; i < FCNT_MAX; i++) {
5988 		nvlist_append_number_array(nvc, "counters",
5989 		    pf_counter_u64_fetch(&V_pf_status.fcounters[i]));
5990 		nvlist_append_string_array(nvc, "names",
5991 		    pf_fcounter[i]);
5992 		nvlist_append_number_array(nvc, "ids",
5993 		    i);
5994 	}
5995 	nvlist_add_nvlist(nvl, "fcounters", nvc);
5996 	nvlist_destroy(nvc);
5997 	nvc = NULL;
5998 
5999 	/* scounters */
6000 	error = pf_add_status_counters(nvl, "scounters", V_pf_status.scounters,
6001 	    SCNT_MAX, pf_fcounter);
6002 	if (error != 0)
6003 		ERROUT(error);
6004 
6005 	nvlist_add_string(nvl, "ifname", V_pf_status.ifname);
6006 	nvlist_add_binary(nvl, "chksum", V_pf_status.pf_chksum,
6007 	    PF_MD5_DIGEST_LENGTH);
6008 
6009 	pfi_update_status(V_pf_status.ifname, &s);
6010 
6011 	/* pcounters / bcounters */
6012 	for (int i = 0; i < 2; i++) {
6013 		for (int j = 0; j < 2; j++) {
6014 			for (int k = 0; k < 2; k++) {
6015 				nvlist_append_number_array(nvl, "pcounters",
6016 				    s.pcounters[i][j][k]);
6017 			}
6018 			nvlist_append_number_array(nvl, "bcounters",
6019 			    s.bcounters[i][j]);
6020 		}
6021 	}
6022 
6023 	nvlpacked = nvlist_pack(nvl, &nv->len);
6024 	if (nvlpacked == NULL)
6025 		ERROUT(ENOMEM);
6026 
6027 	if (nv->size == 0)
6028 		ERROUT(0);
6029 	else if (nv->size < nv->len)
6030 		ERROUT(ENOSPC);
6031 
6032 	PF_RULES_RUNLOCK();
6033 	error = copyout(nvlpacked, nv->data, nv->len);
6034 	goto done;
6035 
6036 #undef ERROUT
6037 errout:
6038 	PF_RULES_RUNLOCK();
6039 done:
6040 	free(nvlpacked, M_NVLIST);
6041 	nvlist_destroy(nvc);
6042 	nvlist_destroy(nvl);
6043 
6044 	return (error);
6045 }
6046 
6047 /*
6048  * XXX - Check for version mismatch!!!
6049  */
6050 static void
pf_clear_all_states(void)6051 pf_clear_all_states(void)
6052 {
6053 	struct epoch_tracker	 et;
6054 	struct pf_kstate	*s;
6055 	u_int i;
6056 
6057 	NET_EPOCH_ENTER(et);
6058 	for (i = 0; i <= V_pf_hashmask; i++) {
6059 		struct pf_idhash *ih = &V_pf_idhash[i];
6060 relock:
6061 		PF_HASHROW_LOCK(ih);
6062 		LIST_FOREACH(s, &ih->states, entry) {
6063 			s->timeout = PFTM_PURGE;
6064 			/* Don't send out individual delete messages. */
6065 			s->state_flags |= PFSTATE_NOSYNC;
6066 			pf_remove_state(s);
6067 			goto relock;
6068 		}
6069 		PF_HASHROW_UNLOCK(ih);
6070 	}
6071 	NET_EPOCH_EXIT(et);
6072 }
6073 
6074 static int
pf_clear_tables(void)6075 pf_clear_tables(void)
6076 {
6077 	struct pfioc_table io;
6078 	int error;
6079 
6080 	bzero(&io, sizeof(io));
6081 	io.pfrio_flags |= PFR_FLAG_ALLRSETS;
6082 
6083 	error = pfr_clr_tables(&io.pfrio_table, &io.pfrio_ndel,
6084 	    io.pfrio_flags);
6085 
6086 	return (error);
6087 }
6088 
6089 static void
pf_kill_srcnodes(struct pfioc_src_node_kill * psnk)6090 pf_kill_srcnodes(struct pfioc_src_node_kill *psnk)
6091 {
6092 	struct pf_ksrc_node_list	 kill;
6093 	u_int 				 killed;
6094 
6095 	LIST_INIT(&kill);
6096 	for (int i = 0; i <= V_pf_srchashmask; i++) {
6097 		struct pf_srchash *sh = &V_pf_srchash[i];
6098 		struct pf_ksrc_node *sn, *tmp;
6099 
6100 		PF_HASHROW_LOCK(sh);
6101 		LIST_FOREACH_SAFE(sn, &sh->nodes, entry, tmp)
6102 			if (psnk == NULL ||
6103 			    (pf_match_addr(psnk->psnk_src.neg,
6104 			      &psnk->psnk_src.addr.v.a.addr,
6105 			      &psnk->psnk_src.addr.v.a.mask,
6106 			      &sn->addr, sn->af) &&
6107 			    pf_match_addr(psnk->psnk_dst.neg,
6108 			      &psnk->psnk_dst.addr.v.a.addr,
6109 			      &psnk->psnk_dst.addr.v.a.mask,
6110 			      &sn->raddr, sn->af))) {
6111 				pf_unlink_src_node(sn);
6112 				LIST_INSERT_HEAD(&kill, sn, entry);
6113 				sn->expire = 1;
6114 			}
6115 		PF_HASHROW_UNLOCK(sh);
6116 	}
6117 
6118 	for (int i = 0; i <= V_pf_hashmask; i++) {
6119 		struct pf_idhash *ih = &V_pf_idhash[i];
6120 		struct pf_kstate *s;
6121 
6122 		PF_HASHROW_LOCK(ih);
6123 		LIST_FOREACH(s, &ih->states, entry) {
6124 			for(pf_sn_types_t sn_type=0; sn_type<PF_SN_MAX;
6125 			    sn_type++) {
6126 				if (s->sns[sn_type] &&
6127 				    s->sns[sn_type]->expire == 1) {
6128 					s->sns[sn_type] = NULL;
6129 				}
6130 			}
6131 		}
6132 		PF_HASHROW_UNLOCK(ih);
6133 	}
6134 
6135 	killed = pf_free_src_nodes(&kill);
6136 
6137 	if (psnk != NULL)
6138 		psnk->psnk_killed = killed;
6139 }
6140 
6141 static int
pf_keepcounters(struct pfioc_nv * nv)6142 pf_keepcounters(struct pfioc_nv *nv)
6143 {
6144 	nvlist_t	*nvl = NULL;
6145 	void		*nvlpacked = NULL;
6146 	int		 error = 0;
6147 
6148 #define	ERROUT(x)	ERROUT_FUNCTION(on_error, x)
6149 
6150 	if (nv->len > pf_ioctl_maxcount)
6151 		ERROUT(ENOMEM);
6152 
6153 	nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
6154 	error = copyin(nv->data, nvlpacked, nv->len);
6155 	if (error)
6156 		ERROUT(error);
6157 
6158 	nvl = nvlist_unpack(nvlpacked, nv->len, 0);
6159 	if (nvl == NULL)
6160 		ERROUT(EBADMSG);
6161 
6162 	if (! nvlist_exists_bool(nvl, "keep_counters"))
6163 		ERROUT(EBADMSG);
6164 
6165 	V_pf_status.keep_counters = nvlist_get_bool(nvl, "keep_counters");
6166 
6167 on_error:
6168 	nvlist_destroy(nvl);
6169 	free(nvlpacked, M_NVLIST);
6170 	return (error);
6171 }
6172 
6173 unsigned int
pf_clear_states(const struct pf_kstate_kill * kill)6174 pf_clear_states(const struct pf_kstate_kill *kill)
6175 {
6176 	struct pf_state_key_cmp	 match_key;
6177 	struct pf_kstate	*s;
6178 	struct pfi_kkif	*kif;
6179 	int		 idx;
6180 	unsigned int	 killed = 0, dir;
6181 
6182 	NET_EPOCH_ASSERT();
6183 
6184 	for (unsigned int i = 0; i <= V_pf_hashmask; i++) {
6185 		struct pf_idhash *ih = &V_pf_idhash[i];
6186 
6187 relock_DIOCCLRSTATES:
6188 		PF_HASHROW_LOCK(ih);
6189 		LIST_FOREACH(s, &ih->states, entry) {
6190 			/* For floating states look at the original kif. */
6191 			kif = s->kif == V_pfi_all ? s->orig_kif : s->kif;
6192 
6193 			if (kill->psk_ifname[0] &&
6194 			    strcmp(kill->psk_ifname,
6195 			    kif->pfik_name))
6196 				continue;
6197 
6198 			if (kill->psk_kill_match) {
6199 				bzero(&match_key, sizeof(match_key));
6200 
6201 				if (s->direction == PF_OUT) {
6202 					dir = PF_IN;
6203 					idx = PF_SK_STACK;
6204 				} else {
6205 					dir = PF_OUT;
6206 					idx = PF_SK_WIRE;
6207 				}
6208 
6209 				match_key.af = s->key[idx]->af;
6210 				match_key.proto = s->key[idx]->proto;
6211 				pf_addrcpy(&match_key.addr[0],
6212 				    &s->key[idx]->addr[1], match_key.af);
6213 				match_key.port[0] = s->key[idx]->port[1];
6214 				pf_addrcpy(&match_key.addr[1],
6215 				    &s->key[idx]->addr[0], match_key.af);
6216 				match_key.port[1] = s->key[idx]->port[0];
6217 			}
6218 
6219 			/*
6220 			 * Don't send out individual
6221 			 * delete messages.
6222 			 */
6223 			s->state_flags |= PFSTATE_NOSYNC;
6224 			pf_remove_state(s);
6225 			killed++;
6226 
6227 			if (kill->psk_kill_match)
6228 				killed += pf_kill_matching_state(&match_key,
6229 				    dir);
6230 
6231 			goto relock_DIOCCLRSTATES;
6232 		}
6233 		PF_HASHROW_UNLOCK(ih);
6234 	}
6235 
6236 	if (V_pfsync_clear_states_ptr != NULL)
6237 		V_pfsync_clear_states_ptr(V_pf_status.hostid, kill->psk_ifname);
6238 
6239 	return (killed);
6240 }
6241 
6242 void
pf_killstates(struct pf_kstate_kill * kill,unsigned int * killed)6243 pf_killstates(struct pf_kstate_kill *kill, unsigned int *killed)
6244 {
6245 	struct pf_kstate	*s;
6246 
6247 	NET_EPOCH_ASSERT();
6248 	if (kill->psk_pfcmp.id) {
6249 		if (kill->psk_pfcmp.creatorid == 0)
6250 			kill->psk_pfcmp.creatorid = V_pf_status.hostid;
6251 		if ((s = pf_find_state_byid(kill->psk_pfcmp.id,
6252 		    kill->psk_pfcmp.creatorid))) {
6253 			pf_remove_state(s);
6254 			*killed = 1;
6255 		}
6256 		return;
6257 	}
6258 
6259 	for (unsigned int i = 0; i <= V_pf_hashmask; i++)
6260 		*killed += pf_killstates_row(kill, &V_pf_idhash[i]);
6261 }
6262 
6263 static int
pf_killstates_nv(struct pfioc_nv * nv)6264 pf_killstates_nv(struct pfioc_nv *nv)
6265 {
6266 	struct pf_kstate_kill	 kill;
6267 	struct epoch_tracker	 et;
6268 	nvlist_t		*nvl = NULL;
6269 	void			*nvlpacked = NULL;
6270 	int			 error = 0;
6271 	unsigned int		 killed = 0;
6272 
6273 #define ERROUT(x)	ERROUT_FUNCTION(on_error, x)
6274 
6275 	if (nv->len > pf_ioctl_maxcount)
6276 		ERROUT(ENOMEM);
6277 
6278 	nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
6279 	error = copyin(nv->data, nvlpacked, nv->len);
6280 	if (error)
6281 		ERROUT(error);
6282 
6283 	nvl = nvlist_unpack(nvlpacked, nv->len, 0);
6284 	if (nvl == NULL)
6285 		ERROUT(EBADMSG);
6286 
6287 	error = pf_nvstate_kill_to_kstate_kill(nvl, &kill);
6288 	if (error)
6289 		ERROUT(error);
6290 
6291 	NET_EPOCH_ENTER(et);
6292 	pf_killstates(&kill, &killed);
6293 	NET_EPOCH_EXIT(et);
6294 
6295 	free(nvlpacked, M_NVLIST);
6296 	nvlpacked = NULL;
6297 	nvlist_destroy(nvl);
6298 	nvl = nvlist_create(0);
6299 	if (nvl == NULL)
6300 		ERROUT(ENOMEM);
6301 
6302 	nvlist_add_number(nvl, "killed", killed);
6303 
6304 	nvlpacked = nvlist_pack(nvl, &nv->len);
6305 	if (nvlpacked == NULL)
6306 		ERROUT(ENOMEM);
6307 
6308 	if (nv->size == 0)
6309 		ERROUT(0);
6310 	else if (nv->size < nv->len)
6311 		ERROUT(ENOSPC);
6312 
6313 	error = copyout(nvlpacked, nv->data, nv->len);
6314 
6315 on_error:
6316 	nvlist_destroy(nvl);
6317 	free(nvlpacked, M_NVLIST);
6318 	return (error);
6319 }
6320 
6321 static int
pf_clearstates_nv(struct pfioc_nv * nv)6322 pf_clearstates_nv(struct pfioc_nv *nv)
6323 {
6324 	struct pf_kstate_kill	 kill;
6325 	struct epoch_tracker	 et;
6326 	nvlist_t		*nvl = NULL;
6327 	void			*nvlpacked = NULL;
6328 	int			 error = 0;
6329 	unsigned int		 killed;
6330 
6331 #define ERROUT(x)	ERROUT_FUNCTION(on_error, x)
6332 
6333 	if (nv->len > pf_ioctl_maxcount)
6334 		ERROUT(ENOMEM);
6335 
6336 	nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
6337 	error = copyin(nv->data, nvlpacked, nv->len);
6338 	if (error)
6339 		ERROUT(error);
6340 
6341 	nvl = nvlist_unpack(nvlpacked, nv->len, 0);
6342 	if (nvl == NULL)
6343 		ERROUT(EBADMSG);
6344 
6345 	error = pf_nvstate_kill_to_kstate_kill(nvl, &kill);
6346 	if (error)
6347 		ERROUT(error);
6348 
6349 	NET_EPOCH_ENTER(et);
6350 	killed = pf_clear_states(&kill);
6351 	NET_EPOCH_EXIT(et);
6352 
6353 	free(nvlpacked, M_NVLIST);
6354 	nvlpacked = NULL;
6355 	nvlist_destroy(nvl);
6356 	nvl = nvlist_create(0);
6357 	if (nvl == NULL)
6358 		ERROUT(ENOMEM);
6359 
6360 	nvlist_add_number(nvl, "killed", killed);
6361 
6362 	nvlpacked = nvlist_pack(nvl, &nv->len);
6363 	if (nvlpacked == NULL)
6364 		ERROUT(ENOMEM);
6365 
6366 	if (nv->size == 0)
6367 		ERROUT(0);
6368 	else if (nv->size < nv->len)
6369 		ERROUT(ENOSPC);
6370 
6371 	error = copyout(nvlpacked, nv->data, nv->len);
6372 
6373 #undef ERROUT
6374 on_error:
6375 	nvlist_destroy(nvl);
6376 	free(nvlpacked, M_NVLIST);
6377 	return (error);
6378 }
6379 
6380 static int
pf_getstate(struct pfioc_nv * nv)6381 pf_getstate(struct pfioc_nv *nv)
6382 {
6383 	nvlist_t		*nvl = NULL, *nvls;
6384 	void			*nvlpacked = NULL;
6385 	struct pf_kstate	*s = NULL;
6386 	int			 error = 0;
6387 	uint64_t		 id, creatorid;
6388 
6389 #define ERROUT(x)	ERROUT_FUNCTION(errout, x)
6390 
6391 	if (nv->len > pf_ioctl_maxcount)
6392 		ERROUT(ENOMEM);
6393 
6394 	nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
6395 	error = copyin(nv->data, nvlpacked, nv->len);
6396 	if (error)
6397 		ERROUT(error);
6398 
6399 	nvl = nvlist_unpack(nvlpacked, nv->len, 0);
6400 	if (nvl == NULL)
6401 		ERROUT(EBADMSG);
6402 
6403 	PFNV_CHK(pf_nvuint64(nvl, "id", &id));
6404 	PFNV_CHK(pf_nvuint64(nvl, "creatorid", &creatorid));
6405 
6406 	s = pf_find_state_byid(id, creatorid);
6407 	if (s == NULL)
6408 		ERROUT(ENOENT);
6409 
6410 	free(nvlpacked, M_NVLIST);
6411 	nvlpacked = NULL;
6412 	nvlist_destroy(nvl);
6413 	nvl = nvlist_create(0);
6414 	if (nvl == NULL)
6415 		ERROUT(ENOMEM);
6416 
6417 	nvls = pf_state_to_nvstate(s);
6418 	if (nvls == NULL)
6419 		ERROUT(ENOMEM);
6420 
6421 	nvlist_add_nvlist(nvl, "state", nvls);
6422 	nvlist_destroy(nvls);
6423 
6424 	nvlpacked = nvlist_pack(nvl, &nv->len);
6425 	if (nvlpacked == NULL)
6426 		ERROUT(ENOMEM);
6427 
6428 	if (nv->size == 0)
6429 		ERROUT(0);
6430 	else if (nv->size < nv->len)
6431 		ERROUT(ENOSPC);
6432 
6433 	error = copyout(nvlpacked, nv->data, nv->len);
6434 
6435 #undef ERROUT
6436 errout:
6437 	if (s != NULL)
6438 		PF_STATE_UNLOCK(s);
6439 	free(nvlpacked, M_NVLIST);
6440 	nvlist_destroy(nvl);
6441 	return (error);
6442 }
6443 
6444 /*
6445  * XXX - Check for version mismatch!!!
6446  */
6447 
6448 /*
6449  * Duplicate pfctl -Fa operation to get rid of as much as we can.
6450  */
6451 static int
shutdown_pf(void)6452 shutdown_pf(void)
6453 {
6454 	int error = 0;
6455 	u_int32_t t[5];
6456 	char nn = '\0';
6457 	struct pf_kanchor *anchor, *tmp_anchor;
6458 	struct pf_keth_anchor *eth_anchor, *tmp_eth_anchor;
6459 	int rs_num;
6460 
6461 	do {
6462 		/* Unlink rules of all user defined anchors */
6463 		RB_FOREACH_SAFE(anchor, pf_kanchor_global, &V_pf_anchors,
6464 		    tmp_anchor) {
6465 			for (rs_num = 0; rs_num < PF_RULESET_MAX; ++rs_num) {
6466 				if ((error = pf_begin_rules(&t[rs_num], rs_num,
6467 				    anchor->path)) != 0) {
6468 					DPFPRINTF(PF_DEBUG_MISC, "%s: "
6469 					    "anchor.path=%s rs_num=%d",
6470 					    __func__, anchor->path, rs_num);
6471 					goto error;	/* XXX: rollback? */
6472 				}
6473 			}
6474 			for (rs_num = 0; rs_num < PF_RULESET_MAX; ++rs_num) {
6475 				error = pf_commit_rules(t[rs_num], rs_num,
6476 				    anchor->path);
6477 				MPASS(error == 0);
6478 			}
6479 		}
6480 
6481 		/* Unlink rules of all user defined ether anchors */
6482 		RB_FOREACH_SAFE(eth_anchor, pf_keth_anchor_global,
6483 		    &V_pf_keth_anchors, tmp_eth_anchor) {
6484 			if ((error = pf_begin_eth(&t[0], eth_anchor->path))
6485 			    != 0) {
6486 				DPFPRINTF(PF_DEBUG_MISC, "%s: eth "
6487 				    "anchor.path=%s", __func__,
6488 				    eth_anchor->path);
6489 				goto error;
6490 			}
6491 			error = pf_commit_eth(t[0], eth_anchor->path);
6492 			MPASS(error == 0);
6493 		}
6494 
6495 		if ((error = pf_begin_rules(&t[0], PF_RULESET_SCRUB, &nn))
6496 		    != 0) {
6497 			DPFPRINTF(PF_DEBUG_MISC, "%s: SCRUB", __func__);
6498 			break;
6499 		}
6500 		if ((error = pf_begin_rules(&t[1], PF_RULESET_FILTER, &nn))
6501 		    != 0) {
6502 			DPFPRINTF(PF_DEBUG_MISC, "%s: FILTER", __func__);
6503 			break;		/* XXX: rollback? */
6504 		}
6505 		if ((error = pf_begin_rules(&t[2], PF_RULESET_NAT, &nn))
6506 		    != 0) {
6507 			DPFPRINTF(PF_DEBUG_MISC, "%s: NAT", __func__);
6508 			break;		/* XXX: rollback? */
6509 		}
6510 		if ((error = pf_begin_rules(&t[3], PF_RULESET_BINAT, &nn))
6511 		    != 0) {
6512 			DPFPRINTF(PF_DEBUG_MISC, "%s: BINAT", __func__);
6513 			break;		/* XXX: rollback? */
6514 		}
6515 		if ((error = pf_begin_rules(&t[4], PF_RULESET_RDR, &nn))
6516 		    != 0) {
6517 			DPFPRINTF(PF_DEBUG_MISC, "%s: RDR", __func__);
6518 			break;		/* XXX: rollback? */
6519 		}
6520 
6521 		error = pf_commit_rules(t[0], PF_RULESET_SCRUB, &nn);
6522 		MPASS(error == 0);
6523 		error = pf_commit_rules(t[1], PF_RULESET_FILTER, &nn);
6524 		MPASS(error == 0);
6525 		error = pf_commit_rules(t[2], PF_RULESET_NAT, &nn);
6526 		MPASS(error == 0);
6527 		error = pf_commit_rules(t[3], PF_RULESET_BINAT, &nn);
6528 		MPASS(error == 0);
6529 		error = pf_commit_rules(t[4], PF_RULESET_RDR, &nn);
6530 		MPASS(error == 0);
6531 
6532 		if ((error = pf_clear_tables()) != 0)
6533 			break;
6534 
6535 		if ((error = pf_begin_eth(&t[0], &nn)) != 0) {
6536 			DPFPRINTF(PF_DEBUG_MISC, "%s: eth", __func__);
6537 			break;
6538 		}
6539 		error = pf_commit_eth(t[0], &nn);
6540 		MPASS(error == 0);
6541 
6542 #ifdef ALTQ
6543 		if ((error = pf_begin_altq(&t[0])) != 0) {
6544 			DPFPRINTF(PF_DEBUG_MISC, "%s: ALTQ", __func__);
6545 			break;
6546 		}
6547 		pf_commit_altq(t[0]);
6548 #endif
6549 
6550 		pf_clear_all_states();
6551 
6552 		pf_kill_srcnodes(NULL);
6553 
6554 		/* status does not use malloced mem so no need to cleanup */
6555 		/* fingerprints and interfaces have their own cleanup code */
6556 	} while(0);
6557 
6558 error:
6559 	return (error);
6560 }
6561 
6562 static pfil_return_t
pf_check_return(int chk,struct mbuf ** m)6563 pf_check_return(int chk, struct mbuf **m)
6564 {
6565 
6566 	switch (chk) {
6567 	case PF_PASS:
6568 		if (*m == NULL)
6569 			return (PFIL_CONSUMED);
6570 		else
6571 			return (PFIL_PASS);
6572 		break;
6573 	default:
6574 		if (*m != NULL) {
6575 			m_freem(*m);
6576 			*m = NULL;
6577 		}
6578 		return (PFIL_DROPPED);
6579 	}
6580 }
6581 
6582 static pfil_return_t
pf_eth_check_in(struct mbuf ** m,struct ifnet * ifp,int flags,void * ruleset __unused,struct inpcb * inp)6583 pf_eth_check_in(struct mbuf **m, struct ifnet *ifp, int flags,
6584     void *ruleset __unused, struct inpcb *inp)
6585 {
6586 	int chk;
6587 
6588 	CURVNET_ASSERT_SET();
6589 
6590 	chk = pf_test_eth(PF_IN, flags, ifp, m, inp);
6591 
6592 	return (pf_check_return(chk, m));
6593 }
6594 
6595 static pfil_return_t
pf_eth_check_out(struct mbuf ** m,struct ifnet * ifp,int flags,void * ruleset __unused,struct inpcb * inp)6596 pf_eth_check_out(struct mbuf **m, struct ifnet *ifp, int flags,
6597     void *ruleset __unused, struct inpcb *inp)
6598 {
6599 	int chk;
6600 
6601 	CURVNET_ASSERT_SET();
6602 
6603 	chk = pf_test_eth(PF_OUT, flags, ifp, m, inp);
6604 
6605 	return (pf_check_return(chk, m));
6606 }
6607 
6608 #ifdef INET
6609 static pfil_return_t
pf_check_in(struct mbuf ** m,struct ifnet * ifp,int flags,void * ruleset __unused,struct inpcb * inp)6610 pf_check_in(struct mbuf **m, struct ifnet *ifp, int flags,
6611     void *ruleset __unused, struct inpcb *inp)
6612 {
6613 	int chk;
6614 
6615 	CURVNET_ASSERT_SET();
6616 
6617 	chk = pf_test(AF_INET, PF_IN, flags, ifp, m, inp, NULL);
6618 
6619 	return (pf_check_return(chk, m));
6620 }
6621 
6622 static pfil_return_t
pf_check_out(struct mbuf ** m,struct ifnet * ifp,int flags,void * ruleset __unused,struct inpcb * inp)6623 pf_check_out(struct mbuf **m, struct ifnet *ifp, int flags,
6624     void *ruleset __unused,  struct inpcb *inp)
6625 {
6626 	int chk;
6627 
6628 	CURVNET_ASSERT_SET();
6629 
6630 	chk = pf_test(AF_INET, PF_OUT, flags, ifp, m, inp, NULL);
6631 
6632 	return (pf_check_return(chk, m));
6633 }
6634 #endif
6635 
6636 #ifdef INET6
6637 static pfil_return_t
pf_check6_in(struct mbuf ** m,struct ifnet * ifp,int flags,void * ruleset __unused,struct inpcb * inp)6638 pf_check6_in(struct mbuf **m, struct ifnet *ifp, int flags,
6639     void *ruleset __unused,  struct inpcb *inp)
6640 {
6641 	int chk;
6642 
6643 	CURVNET_ASSERT_SET();
6644 
6645 	/*
6646 	 * In case of loopback traffic IPv6 uses the real interface in
6647 	 * order to support scoped addresses. In order to support stateful
6648 	 * filtering we have change this to lo0 as it is the case in IPv4.
6649 	 */
6650 	chk = pf_test(AF_INET6, PF_IN, flags, (*m)->m_flags & M_LOOP ? V_loif : ifp,
6651 	    m, inp, NULL);
6652 
6653 	return (pf_check_return(chk, m));
6654 }
6655 
6656 static pfil_return_t
pf_check6_out(struct mbuf ** m,struct ifnet * ifp,int flags,void * ruleset __unused,struct inpcb * inp)6657 pf_check6_out(struct mbuf **m, struct ifnet *ifp, int flags,
6658     void *ruleset __unused,  struct inpcb *inp)
6659 {
6660 	int chk;
6661 
6662 	CURVNET_ASSERT_SET();
6663 
6664 	chk = pf_test(AF_INET6, PF_OUT, flags, ifp, m, inp, NULL);
6665 
6666 	return (pf_check_return(chk, m));
6667 }
6668 #endif /* INET6 */
6669 
6670 VNET_DEFINE_STATIC(pfil_hook_t, pf_eth_in_hook);
6671 VNET_DEFINE_STATIC(pfil_hook_t, pf_eth_out_hook);
6672 #define	V_pf_eth_in_hook	VNET(pf_eth_in_hook)
6673 #define	V_pf_eth_out_hook	VNET(pf_eth_out_hook)
6674 
6675 #ifdef INET
6676 VNET_DEFINE_STATIC(pfil_hook_t, pf_ip4_in_hook);
6677 VNET_DEFINE_STATIC(pfil_hook_t, pf_ip4_out_hook);
6678 #define	V_pf_ip4_in_hook	VNET(pf_ip4_in_hook)
6679 #define	V_pf_ip4_out_hook	VNET(pf_ip4_out_hook)
6680 #endif
6681 #ifdef INET6
6682 VNET_DEFINE_STATIC(pfil_hook_t, pf_ip6_in_hook);
6683 VNET_DEFINE_STATIC(pfil_hook_t, pf_ip6_out_hook);
6684 #define	V_pf_ip6_in_hook	VNET(pf_ip6_in_hook)
6685 #define	V_pf_ip6_out_hook	VNET(pf_ip6_out_hook)
6686 #endif
6687 
6688 static void
hook_pf_eth(void)6689 hook_pf_eth(void)
6690 {
6691 	struct pfil_hook_args pha = {
6692 		.pa_version = PFIL_VERSION,
6693 		.pa_modname = "pf",
6694 		.pa_type = PFIL_TYPE_ETHERNET,
6695 	};
6696 	struct pfil_link_args pla = {
6697 		.pa_version = PFIL_VERSION,
6698 	};
6699 	int ret __diagused;
6700 
6701 	if (atomic_load_bool(&V_pf_pfil_eth_hooked))
6702 		return;
6703 
6704 	pha.pa_mbuf_chk = pf_eth_check_in;
6705 	pha.pa_flags = PFIL_IN;
6706 	pha.pa_rulname = "eth-in";
6707 	V_pf_eth_in_hook = pfil_add_hook(&pha);
6708 	pla.pa_flags = PFIL_IN | PFIL_HEADPTR | PFIL_HOOKPTR;
6709 	pla.pa_head = V_link_pfil_head;
6710 	pla.pa_hook = V_pf_eth_in_hook;
6711 	ret = pfil_link(&pla);
6712 	MPASS(ret == 0);
6713 	pha.pa_mbuf_chk = pf_eth_check_out;
6714 	pha.pa_flags = PFIL_OUT;
6715 	pha.pa_rulname = "eth-out";
6716 	V_pf_eth_out_hook = pfil_add_hook(&pha);
6717 	pla.pa_flags = PFIL_OUT | PFIL_HEADPTR | PFIL_HOOKPTR;
6718 	pla.pa_head = V_link_pfil_head;
6719 	pla.pa_hook = V_pf_eth_out_hook;
6720 	ret = pfil_link(&pla);
6721 	MPASS(ret == 0);
6722 
6723 	atomic_store_bool(&V_pf_pfil_eth_hooked, true);
6724 }
6725 
6726 static void
hook_pf(void)6727 hook_pf(void)
6728 {
6729 	struct pfil_hook_args pha = {
6730 		.pa_version = PFIL_VERSION,
6731 		.pa_modname = "pf",
6732 	};
6733 	struct pfil_link_args pla = {
6734 		.pa_version = PFIL_VERSION,
6735 	};
6736 	int ret __diagused;
6737 
6738 	if (atomic_load_bool(&V_pf_pfil_hooked))
6739 		return;
6740 
6741 #ifdef INET
6742 	pha.pa_type = PFIL_TYPE_IP4;
6743 	pha.pa_mbuf_chk = pf_check_in;
6744 	pha.pa_flags = PFIL_IN;
6745 	pha.pa_rulname = "default-in";
6746 	V_pf_ip4_in_hook = pfil_add_hook(&pha);
6747 	pla.pa_flags = PFIL_IN | PFIL_HEADPTR | PFIL_HOOKPTR;
6748 	pla.pa_head = V_inet_pfil_head;
6749 	pla.pa_hook = V_pf_ip4_in_hook;
6750 	ret = pfil_link(&pla);
6751 	MPASS(ret == 0);
6752 	pha.pa_mbuf_chk = pf_check_out;
6753 	pha.pa_flags = PFIL_OUT;
6754 	pha.pa_rulname = "default-out";
6755 	V_pf_ip4_out_hook = pfil_add_hook(&pha);
6756 	pla.pa_flags = PFIL_OUT | PFIL_HEADPTR | PFIL_HOOKPTR;
6757 	pla.pa_head = V_inet_pfil_head;
6758 	pla.pa_hook = V_pf_ip4_out_hook;
6759 	ret = pfil_link(&pla);
6760 	MPASS(ret == 0);
6761 	if (V_pf_filter_local) {
6762 		pla.pa_flags = PFIL_OUT | PFIL_HEADPTR | PFIL_HOOKPTR;
6763 		pla.pa_head = V_inet_local_pfil_head;
6764 		pla.pa_hook = V_pf_ip4_out_hook;
6765 		ret = pfil_link(&pla);
6766 		MPASS(ret == 0);
6767 	}
6768 #endif
6769 #ifdef INET6
6770 	pha.pa_type = PFIL_TYPE_IP6;
6771 	pha.pa_mbuf_chk = pf_check6_in;
6772 	pha.pa_flags = PFIL_IN;
6773 	pha.pa_rulname = "default-in6";
6774 	V_pf_ip6_in_hook = pfil_add_hook(&pha);
6775 	pla.pa_flags = PFIL_IN | PFIL_HEADPTR | PFIL_HOOKPTR;
6776 	pla.pa_head = V_inet6_pfil_head;
6777 	pla.pa_hook = V_pf_ip6_in_hook;
6778 	ret = pfil_link(&pla);
6779 	MPASS(ret == 0);
6780 	pha.pa_mbuf_chk = pf_check6_out;
6781 	pha.pa_rulname = "default-out6";
6782 	pha.pa_flags = PFIL_OUT;
6783 	V_pf_ip6_out_hook = pfil_add_hook(&pha);
6784 	pla.pa_flags = PFIL_OUT | PFIL_HEADPTR | PFIL_HOOKPTR;
6785 	pla.pa_head = V_inet6_pfil_head;
6786 	pla.pa_hook = V_pf_ip6_out_hook;
6787 	ret = pfil_link(&pla);
6788 	MPASS(ret == 0);
6789 	if (V_pf_filter_local) {
6790 		pla.pa_flags = PFIL_OUT | PFIL_HEADPTR | PFIL_HOOKPTR;
6791 		pla.pa_head = V_inet6_local_pfil_head;
6792 		pla.pa_hook = V_pf_ip6_out_hook;
6793 		ret = pfil_link(&pla);
6794 		MPASS(ret == 0);
6795 	}
6796 #endif
6797 
6798 	atomic_store_bool(&V_pf_pfil_hooked, true);
6799 }
6800 
6801 static void
dehook_pf_eth(void)6802 dehook_pf_eth(void)
6803 {
6804 
6805 	if (!atomic_load_bool(&V_pf_pfil_eth_hooked))
6806 		return;
6807 
6808 	pfil_remove_hook(V_pf_eth_in_hook);
6809 	pfil_remove_hook(V_pf_eth_out_hook);
6810 
6811 	atomic_store_bool(&V_pf_pfil_eth_hooked, false);
6812 }
6813 
6814 static void
dehook_pf(void)6815 dehook_pf(void)
6816 {
6817 
6818 	if (!atomic_load_bool(&V_pf_pfil_hooked))
6819 		return;
6820 
6821 #ifdef INET
6822 	pfil_remove_hook(V_pf_ip4_in_hook);
6823 	pfil_remove_hook(V_pf_ip4_out_hook);
6824 #endif
6825 #ifdef INET6
6826 	pfil_remove_hook(V_pf_ip6_in_hook);
6827 	pfil_remove_hook(V_pf_ip6_out_hook);
6828 #endif
6829 
6830 	atomic_store_bool(&V_pf_pfil_hooked, false);
6831 }
6832 
6833 static void
pf_load_vnet(void)6834 pf_load_vnet(void)
6835 {
6836 	V_pf_tag_z = uma_zcreate("pf tags", sizeof(struct pf_tagname),
6837 	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
6838 
6839 	rm_init_flags(&V_pf_rules_lock, "pf rulesets", RM_RECURSE);
6840 	sx_init(&V_pf_ioctl_lock, "pf ioctl");
6841 
6842 	pf_init_tagset(&V_pf_tags, &pf_rule_tag_hashsize,
6843 	    PF_RULE_TAG_HASH_SIZE_DEFAULT);
6844 #ifdef ALTQ
6845 	pf_init_tagset(&V_pf_qids, &pf_queue_tag_hashsize,
6846 	    PF_QUEUE_TAG_HASH_SIZE_DEFAULT);
6847 #endif
6848 
6849 	V_pf_keth = &V_pf_main_keth_anchor.ruleset;
6850 
6851 	pfattach_vnet();
6852 	V_pf_vnet_active = 1;
6853 }
6854 
6855 static int
pf_load(void)6856 pf_load(void)
6857 {
6858 	int error;
6859 
6860 	sx_init(&pf_end_lock, "pf end thread");
6861 
6862 	pf_mtag_initialize();
6863 
6864 	pf_dev = make_dev(&pf_cdevsw, 0, UID_ROOT, GID_WHEEL, 0600, PF_NAME);
6865 	if (pf_dev == NULL)
6866 		return (ENOMEM);
6867 
6868 	pf_end_threads = 0;
6869 	error = kproc_create(pf_purge_thread, NULL, &pf_purge_proc, 0, 0, "pf purge");
6870 	if (error != 0)
6871 		return (error);
6872 
6873 	pfi_initialize();
6874 
6875 	return (0);
6876 }
6877 
6878 static void
pf_unload_vnet(void)6879 pf_unload_vnet(void)
6880 {
6881 	int ret __diagused;
6882 
6883 	V_pf_vnet_active = 0;
6884 	V_pf_status.running = 0;
6885 	dehook_pf();
6886 	dehook_pf_eth();
6887 
6888 	PF_RULES_WLOCK();
6889 	pf_syncookies_cleanup();
6890 	shutdown_pf();
6891 	PF_RULES_WUNLOCK();
6892 
6893 	ret = swi_remove(V_pf_swi_cookie);
6894 	MPASS(ret == 0);
6895 	ret = intr_event_destroy(V_pf_swi_ie);
6896 	MPASS(ret == 0);
6897 
6898 	pf_unload_vnet_purge();
6899 
6900 	pf_normalize_cleanup();
6901 	PF_RULES_WLOCK();
6902 	pfi_cleanup_vnet();
6903 	PF_RULES_WUNLOCK();
6904 	pfr_cleanup();
6905 	pf_osfp_flush();
6906 	pf_cleanup();
6907 	if (IS_DEFAULT_VNET(curvnet))
6908 		pf_mtag_cleanup();
6909 
6910 	pf_cleanup_tagset(&V_pf_tags);
6911 #ifdef ALTQ
6912 	pf_cleanup_tagset(&V_pf_qids);
6913 #endif
6914 	uma_zdestroy(V_pf_tag_z);
6915 
6916 #ifdef PF_WANT_32_TO_64_COUNTER
6917 	PF_RULES_WLOCK();
6918 	LIST_REMOVE(V_pf_kifmarker, pfik_allkiflist);
6919 
6920 	MPASS(LIST_EMPTY(&V_pf_allkiflist));
6921 	MPASS(V_pf_allkifcount == 0);
6922 
6923 	LIST_REMOVE(&V_pf_default_rule, allrulelist);
6924 	V_pf_allrulecount--;
6925 	LIST_REMOVE(V_pf_rulemarker, allrulelist);
6926 
6927 	MPASS(LIST_EMPTY(&V_pf_allrulelist));
6928 	MPASS(V_pf_allrulecount == 0);
6929 
6930 	PF_RULES_WUNLOCK();
6931 
6932 	free(V_pf_kifmarker, PFI_MTYPE);
6933 	free(V_pf_rulemarker, M_PFRULE);
6934 #endif
6935 
6936 	/* Free counters last as we updated them during shutdown. */
6937 	pf_counter_u64_deinit(&V_pf_default_rule.evaluations);
6938 	for (int i = 0; i < 2; i++) {
6939 		pf_counter_u64_deinit(&V_pf_default_rule.packets[i]);
6940 		pf_counter_u64_deinit(&V_pf_default_rule.bytes[i]);
6941 	}
6942 	counter_u64_free(V_pf_default_rule.states_cur);
6943 	counter_u64_free(V_pf_default_rule.states_tot);
6944 	for (pf_sn_types_t sn_type=0; sn_type<PF_SN_MAX; sn_type++)
6945 		counter_u64_free(V_pf_default_rule.src_nodes[sn_type]);
6946 	uma_zfree_pcpu(pf_timestamp_pcpu_zone, V_pf_default_rule.timestamp);
6947 
6948 	for (int i = 0; i < PFRES_MAX; i++)
6949 		counter_u64_free(V_pf_status.counters[i]);
6950 	for (int i = 0; i < KLCNT_MAX; i++)
6951 		counter_u64_free(V_pf_status.lcounters[i]);
6952 	for (int i = 0; i < FCNT_MAX; i++)
6953 		pf_counter_u64_deinit(&V_pf_status.fcounters[i]);
6954 	for (int i = 0; i < SCNT_MAX; i++)
6955 		counter_u64_free(V_pf_status.scounters[i]);
6956 	for (int i = 0; i < NCNT_MAX; i++)
6957 		counter_u64_free(V_pf_status.ncounters[i]);
6958 
6959 	rm_destroy(&V_pf_rules_lock);
6960 	sx_destroy(&V_pf_ioctl_lock);
6961 }
6962 
6963 static void
pf_unload(void)6964 pf_unload(void)
6965 {
6966 
6967 	sx_xlock(&pf_end_lock);
6968 	pf_end_threads = 1;
6969 	while (pf_end_threads < 2) {
6970 		wakeup_one(pf_purge_thread);
6971 		sx_sleep(pf_purge_proc, &pf_end_lock, 0, "pftmo", 0);
6972 	}
6973 	sx_xunlock(&pf_end_lock);
6974 
6975 	pf_nl_unregister();
6976 
6977 	if (pf_dev != NULL)
6978 		destroy_dev(pf_dev);
6979 
6980 	pfi_cleanup();
6981 
6982 	sx_destroy(&pf_end_lock);
6983 }
6984 
6985 static void
vnet_pf_init(void * unused __unused)6986 vnet_pf_init(void *unused __unused)
6987 {
6988 
6989 	pf_load_vnet();
6990 }
6991 VNET_SYSINIT(vnet_pf_init, SI_SUB_PROTO_FIREWALL, SI_ORDER_THIRD,
6992     vnet_pf_init, NULL);
6993 
6994 static void
vnet_pf_uninit(const void * unused __unused)6995 vnet_pf_uninit(const void *unused __unused)
6996 {
6997 
6998 	pf_unload_vnet();
6999 }
7000 SYSUNINIT(pf_unload, SI_SUB_PROTO_FIREWALL, SI_ORDER_SECOND, pf_unload, NULL);
7001 VNET_SYSUNINIT(vnet_pf_uninit, SI_SUB_PROTO_FIREWALL, SI_ORDER_THIRD,
7002     vnet_pf_uninit, NULL);
7003 
7004 static int
pf_modevent(module_t mod,int type,void * data)7005 pf_modevent(module_t mod, int type, void *data)
7006 {
7007 	int error = 0;
7008 
7009 	switch(type) {
7010 	case MOD_LOAD:
7011 		error = pf_load();
7012 		pf_nl_register();
7013 		break;
7014 	case MOD_UNLOAD:
7015 		/* Handled in SYSUNINIT(pf_unload) to ensure it's done after
7016 		 * the vnet_pf_uninit()s */
7017 		break;
7018 	default:
7019 		error = EINVAL;
7020 		break;
7021 	}
7022 
7023 	return (error);
7024 }
7025 
7026 static moduledata_t pf_mod = {
7027 	"pf",
7028 	pf_modevent,
7029 	0
7030 };
7031 
7032 DECLARE_MODULE(pf, pf_mod, SI_SUB_PROTO_FIREWALL, SI_ORDER_SECOND);
7033 MODULE_DEPEND(pf, netlink, 1, 1, 1);
7034 MODULE_DEPEND(pf, crypto, 1, 1, 1);
7035 MODULE_VERSION(pf, PF_MODVER);
7036