xref: /freebsd/sys/netpfil/pf/pf_ioctl.c (revision 275ff85b254c1f160f965dd9dbb5801f66022eab)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2001 Daniel Hartmeier
5  * Copyright (c) 2002,2003 Henning Brauer
6  * Copyright (c) 2012 Gleb Smirnoff <glebius@FreeBSD.org>
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  *
13  *    - Redistributions of source code must retain the above copyright
14  *      notice, this list of conditions and the following disclaimer.
15  *    - Redistributions in binary form must reproduce the above
16  *      copyright notice, this list of conditions and the following
17  *      disclaimer in the documentation and/or other materials provided
18  *      with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
26  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
27  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
28  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
30  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31  * POSSIBILITY OF SUCH DAMAGE.
32  *
33  * Effort sponsored in part by the Defense Advanced Research Projects
34  * Agency (DARPA) and Air Force Research Laboratory, Air Force
35  * Materiel Command, USAF, under agreement number F30602-01-2-0537.
36  *
37  *	$OpenBSD: pf_ioctl.c,v 1.213 2009/02/15 21:46:12 mbalmer Exp $
38  */
39 
40 #include <sys/cdefs.h>
41 #include "opt_inet.h"
42 #include "opt_inet6.h"
43 #include "opt_bpf.h"
44 #include "opt_pf.h"
45 
46 #include <sys/param.h>
47 #include <sys/_bitset.h>
48 #include <sys/bitset.h>
49 #include <sys/bus.h>
50 #include <sys/conf.h>
51 #include <sys/endian.h>
52 #include <sys/fcntl.h>
53 #include <sys/filio.h>
54 #include <sys/hash.h>
55 #include <sys/interrupt.h>
56 #include <sys/jail.h>
57 #include <sys/kernel.h>
58 #include <sys/kthread.h>
59 #include <sys/lock.h>
60 #include <sys/mbuf.h>
61 #include <sys/module.h>
62 #include <sys/nv.h>
63 #include <sys/proc.h>
64 #include <sys/sdt.h>
65 #include <sys/smp.h>
66 #include <sys/socket.h>
67 #include <sys/sysctl.h>
68 #include <sys/md5.h>
69 #include <sys/ucred.h>
70 
71 #include <net/if.h>
72 #include <net/if_var.h>
73 #include <net/if_private.h>
74 #include <net/vnet.h>
75 #include <net/route.h>
76 #include <net/pfil.h>
77 #include <net/pfvar.h>
78 #include <net/if_pfsync.h>
79 #include <net/if_pflog.h>
80 
81 #include <netinet/in.h>
82 #include <netinet/ip.h>
83 #include <netinet/ip_var.h>
84 #include <netinet6/ip6_var.h>
85 #include <netinet/ip_icmp.h>
86 #include <netpfil/pf/pf_nl.h>
87 #include <netpfil/pf/pf_nv.h>
88 
89 #ifdef INET6
90 #include <netinet/ip6.h>
91 #endif /* INET6 */
92 
93 #ifdef ALTQ
94 #include <net/altq/altq.h>
95 #endif
96 
97 SDT_PROBE_DEFINE3(pf, ioctl, ioctl, error, "int", "int", "int");
98 SDT_PROBE_DEFINE3(pf, ioctl, function, error, "char *", "int", "int");
99 SDT_PROBE_DEFINE2(pf, ioctl, addrule, error, "int", "int");
100 SDT_PROBE_DEFINE2(pf, ioctl, nvchk, error, "int", "int");
101 
102 static struct pf_kpool	*pf_get_kpool(const char *, u_int32_t, u_int8_t,
103 			    u_int32_t, u_int8_t, u_int8_t, u_int8_t, int);
104 
105 static void		 pf_mv_kpool(struct pf_kpalist *, struct pf_kpalist *);
106 static void		 pf_empty_kpool(struct pf_kpalist *);
107 static int		 pfioctl(struct cdev *, u_long, caddr_t, int,
108 			    struct thread *);
109 static int		 pf_begin_eth(uint32_t *, const char *);
110 static int		 pf_rollback_eth(uint32_t, const char *);
111 static int		 pf_commit_eth(uint32_t, const char *);
112 static void		 pf_free_eth_rule(struct pf_keth_rule *);
113 #ifdef ALTQ
114 static int		 pf_begin_altq(u_int32_t *);
115 static int		 pf_rollback_altq(u_int32_t);
116 static int		 pf_commit_altq(u_int32_t);
117 static int		 pf_enable_altq(struct pf_altq *);
118 static int		 pf_disable_altq(struct pf_altq *);
119 static uint16_t		 pf_qname2qid(const char *);
120 static void		 pf_qid_unref(uint16_t);
121 #endif /* ALTQ */
122 static int		 pf_begin_rules(u_int32_t *, int, const char *);
123 static int		 pf_rollback_rules(u_int32_t, int, char *);
124 static int		 pf_setup_pfsync_matching(struct pf_kruleset *);
125 static void		 pf_hash_rule_rolling(MD5_CTX *, struct pf_krule *);
126 static void		 pf_hash_rule(struct pf_krule *);
127 static void		 pf_hash_rule_addr(MD5_CTX *, struct pf_rule_addr *);
128 static int		 pf_commit_rules(u_int32_t, int, char *);
129 static int		 pf_addr_setup(struct pf_kruleset *,
130 			    struct pf_addr_wrap *, sa_family_t);
131 static void		 pf_src_node_copy(const struct pf_ksrc_node *,
132 			    struct pf_src_node *);
133 #ifdef ALTQ
134 static int		 pf_export_kaltq(struct pf_altq *,
135 			    struct pfioc_altq_v1 *, size_t);
136 static int		 pf_import_kaltq(struct pfioc_altq_v1 *,
137 			    struct pf_altq *, size_t);
138 #endif /* ALTQ */
139 
140 VNET_DEFINE(struct pf_krule,	pf_default_rule);
141 
142 static __inline int             pf_krule_compare(struct pf_krule *,
143 				    struct pf_krule *);
144 
145 RB_GENERATE(pf_krule_global, pf_krule, entry_global, pf_krule_compare);
146 
147 #ifdef ALTQ
148 VNET_DEFINE_STATIC(int,		pf_altq_running);
149 #define	V_pf_altq_running	VNET(pf_altq_running)
150 #endif
151 
152 #define	TAGID_MAX	 50000
153 struct pf_tagname {
154 	TAILQ_ENTRY(pf_tagname)	namehash_entries;
155 	TAILQ_ENTRY(pf_tagname)	taghash_entries;
156 	char			name[PF_TAG_NAME_SIZE];
157 	uint16_t		tag;
158 	int			ref;
159 };
160 
161 struct pf_tagset {
162 	TAILQ_HEAD(, pf_tagname)	*namehash;
163 	TAILQ_HEAD(, pf_tagname)	*taghash;
164 	unsigned int			 mask;
165 	uint32_t			 seed;
166 	BITSET_DEFINE(, TAGID_MAX)	 avail;
167 };
168 
169 VNET_DEFINE(struct pf_tagset, pf_tags);
170 #define	V_pf_tags	VNET(pf_tags)
171 static unsigned int	pf_rule_tag_hashsize;
172 #define	PF_RULE_TAG_HASH_SIZE_DEFAULT	128
173 SYSCTL_UINT(_net_pf, OID_AUTO, rule_tag_hashsize, CTLFLAG_RDTUN,
174     &pf_rule_tag_hashsize, PF_RULE_TAG_HASH_SIZE_DEFAULT,
175     "Size of pf(4) rule tag hashtable");
176 
177 #ifdef ALTQ
178 VNET_DEFINE(struct pf_tagset, pf_qids);
179 #define	V_pf_qids	VNET(pf_qids)
180 static unsigned int	pf_queue_tag_hashsize;
181 #define	PF_QUEUE_TAG_HASH_SIZE_DEFAULT	128
182 SYSCTL_UINT(_net_pf, OID_AUTO, queue_tag_hashsize, CTLFLAG_RDTUN,
183     &pf_queue_tag_hashsize, PF_QUEUE_TAG_HASH_SIZE_DEFAULT,
184     "Size of pf(4) queue tag hashtable");
185 #endif
186 VNET_DEFINE(uma_zone_t,	 pf_tag_z);
187 #define	V_pf_tag_z		 VNET(pf_tag_z)
188 static MALLOC_DEFINE(M_PFALTQ, "pf_altq", "pf(4) altq configuration db");
189 static MALLOC_DEFINE(M_PFRULE, "pf_rule", "pf(4) rules");
190 MALLOC_DEFINE(M_PF, "pf", "pf(4)");
191 
192 #if (PF_QNAME_SIZE != PF_TAG_NAME_SIZE)
193 #error PF_QNAME_SIZE must be equal to PF_TAG_NAME_SIZE
194 #endif
195 
196 VNET_DEFINE_STATIC(bool, pf_filter_local) = false;
197 #define V_pf_filter_local	VNET(pf_filter_local)
198 SYSCTL_BOOL(_net_pf, OID_AUTO, filter_local, CTLFLAG_VNET | CTLFLAG_RW,
199     &VNET_NAME(pf_filter_local), false,
200     "Enable filtering for packets delivered to local network stack");
201 
202 #ifdef PF_DEFAULT_TO_DROP
203 VNET_DEFINE_STATIC(bool, default_to_drop) = true;
204 #else
205 VNET_DEFINE_STATIC(bool, default_to_drop);
206 #endif
207 #define	V_default_to_drop VNET(default_to_drop)
208 SYSCTL_BOOL(_net_pf, OID_AUTO, default_to_drop, CTLFLAG_RDTUN | CTLFLAG_VNET,
209     &VNET_NAME(default_to_drop), false,
210     "Make the default rule drop all packets.");
211 
212 static void		 pf_init_tagset(struct pf_tagset *, unsigned int *,
213 			    unsigned int);
214 static void		 pf_cleanup_tagset(struct pf_tagset *);
215 static uint16_t		 tagname2hashindex(const struct pf_tagset *, const char *);
216 static uint16_t		 tag2hashindex(const struct pf_tagset *, uint16_t);
217 static u_int16_t	 tagname2tag(struct pf_tagset *, const char *);
218 static u_int16_t	 pf_tagname2tag(const char *);
219 static void		 tag_unref(struct pf_tagset *, u_int16_t);
220 
221 struct cdev *pf_dev;
222 
223 /*
224  * XXX - These are new and need to be checked when moveing to a new version
225  */
226 static void		 pf_clear_all_states(void);
227 static int		 pf_killstates_row(struct pf_kstate_kill *,
228 			    struct pf_idhash *);
229 static int		 pf_killstates_nv(struct pfioc_nv *);
230 static int		 pf_clearstates_nv(struct pfioc_nv *);
231 static int		 pf_getstate(struct pfioc_nv *);
232 static int		 pf_getstatus(struct pfioc_nv *);
233 static int		 pf_clear_tables(void);
234 static void		 pf_kill_srcnodes(struct pfioc_src_node_kill *);
235 static int		 pf_keepcounters(struct pfioc_nv *);
236 static void		 pf_tbladdr_copyout(struct pf_addr_wrap *);
237 
238 /*
239  * Wrapper functions for pfil(9) hooks
240  */
241 static pfil_return_t pf_eth_check_in(struct mbuf **m, struct ifnet *ifp,
242     int flags, void *ruleset __unused, struct inpcb *inp);
243 static pfil_return_t pf_eth_check_out(struct mbuf **m, struct ifnet *ifp,
244     int flags, void *ruleset __unused, struct inpcb *inp);
245 #ifdef INET
246 static pfil_return_t pf_check_in(struct mbuf **m, struct ifnet *ifp,
247     int flags, void *ruleset __unused, struct inpcb *inp);
248 static pfil_return_t pf_check_out(struct mbuf **m, struct ifnet *ifp,
249     int flags, void *ruleset __unused, struct inpcb *inp);
250 #endif
251 #ifdef INET6
252 static pfil_return_t pf_check6_in(struct mbuf **m, struct ifnet *ifp,
253     int flags, void *ruleset __unused, struct inpcb *inp);
254 static pfil_return_t pf_check6_out(struct mbuf **m, struct ifnet *ifp,
255     int flags, void *ruleset __unused, struct inpcb *inp);
256 #endif
257 
258 static void		hook_pf_eth(void);
259 static void		hook_pf(void);
260 static void		dehook_pf_eth(void);
261 static void		dehook_pf(void);
262 static int		shutdown_pf(void);
263 static int		pf_load(void);
264 static void		pf_unload(void);
265 
266 static struct cdevsw pf_cdevsw = {
267 	.d_ioctl =	pfioctl,
268 	.d_name =	PF_NAME,
269 	.d_version =	D_VERSION,
270 };
271 
272 VNET_DEFINE_STATIC(bool, pf_pfil_hooked);
273 #define V_pf_pfil_hooked	VNET(pf_pfil_hooked)
274 VNET_DEFINE_STATIC(bool, pf_pfil_eth_hooked);
275 #define V_pf_pfil_eth_hooked	VNET(pf_pfil_eth_hooked)
276 
277 /*
278  * We need a flag that is neither hooked nor running to know when
279  * the VNET is "valid".  We primarily need this to control (global)
280  * external event, e.g., eventhandlers.
281  */
282 VNET_DEFINE(int, pf_vnet_active);
283 #define V_pf_vnet_active	VNET(pf_vnet_active)
284 
285 int pf_end_threads;
286 struct proc *pf_purge_proc;
287 
288 VNET_DEFINE(struct rmlock, pf_rules_lock);
289 VNET_DEFINE_STATIC(struct sx, pf_ioctl_lock);
290 #define	V_pf_ioctl_lock		VNET(pf_ioctl_lock)
291 struct sx			pf_end_lock;
292 
293 /* pfsync */
294 VNET_DEFINE(pfsync_state_import_t *, pfsync_state_import_ptr);
295 VNET_DEFINE(pfsync_insert_state_t *, pfsync_insert_state_ptr);
296 VNET_DEFINE(pfsync_update_state_t *, pfsync_update_state_ptr);
297 VNET_DEFINE(pfsync_delete_state_t *, pfsync_delete_state_ptr);
298 VNET_DEFINE(pfsync_clear_states_t *, pfsync_clear_states_ptr);
299 VNET_DEFINE(pfsync_defer_t *, pfsync_defer_ptr);
300 VNET_DEFINE(pflow_export_state_t *, pflow_export_state_ptr);
301 pfsync_detach_ifnet_t *pfsync_detach_ifnet_ptr;
302 
303 /* pflog */
304 pflog_packet_t			*pflog_packet_ptr = NULL;
305 
306 /*
307  * Copy a user-provided string, returning an error if truncation would occur.
308  * Avoid scanning past "sz" bytes in the source string since there's no
309  * guarantee that it's nul-terminated.
310  */
311 static int
pf_user_strcpy(char * dst,const char * src,size_t sz)312 pf_user_strcpy(char *dst, const char *src, size_t sz)
313 {
314 	if (strnlen(src, sz) == sz)
315 		return (EINVAL);
316 	(void)strlcpy(dst, src, sz);
317 	return (0);
318 }
319 
320 static void
pfattach_vnet(void)321 pfattach_vnet(void)
322 {
323 	u_int32_t *my_timeout = V_pf_default_rule.timeout;
324 
325 	bzero(&V_pf_status, sizeof(V_pf_status));
326 
327 	pf_initialize();
328 	pfr_initialize();
329 	pfi_initialize_vnet();
330 	pf_normalize_init();
331 	pf_syncookies_init();
332 
333 	V_pf_limits[PF_LIMIT_STATES].limit = PFSTATE_HIWAT;
334 	V_pf_limits[PF_LIMIT_SRC_NODES].limit = PFSNODE_HIWAT;
335 	V_pf_limits[PF_LIMIT_ANCHORS].limit = PF_ANCHOR_HIWAT;
336 	V_pf_limits[PF_LIMIT_ETH_ANCHORS].limit = PF_ANCHOR_HIWAT;
337 
338 	RB_INIT(&V_pf_anchors);
339 	pf_init_kruleset(&pf_main_ruleset);
340 
341 	pf_init_keth(V_pf_keth);
342 
343 	/* default rule should never be garbage collected */
344 	V_pf_default_rule.entries.tqe_prev = &V_pf_default_rule.entries.tqe_next;
345 	V_pf_default_rule.action = V_default_to_drop ? PF_DROP : PF_PASS;
346 	V_pf_default_rule.nr = (uint32_t)-1;
347 	V_pf_default_rule.rtableid = -1;
348 
349 	pf_counter_u64_init(&V_pf_default_rule.evaluations, M_WAITOK);
350 	for (int i = 0; i < 2; i++) {
351 		pf_counter_u64_init(&V_pf_default_rule.packets[i], M_WAITOK);
352 		pf_counter_u64_init(&V_pf_default_rule.bytes[i], M_WAITOK);
353 	}
354 	V_pf_default_rule.states_cur = counter_u64_alloc(M_WAITOK);
355 	V_pf_default_rule.states_tot = counter_u64_alloc(M_WAITOK);
356 	for (pf_sn_types_t sn_type = 0; sn_type<PF_SN_MAX; sn_type++)
357 		V_pf_default_rule.src_nodes[sn_type] = counter_u64_alloc(M_WAITOK);
358 
359 	V_pf_default_rule.timestamp = uma_zalloc_pcpu(pf_timestamp_pcpu_zone,
360 	    M_WAITOK | M_ZERO);
361 
362 #ifdef PF_WANT_32_TO_64_COUNTER
363 	V_pf_kifmarker = malloc(sizeof(*V_pf_kifmarker), PFI_MTYPE, M_WAITOK | M_ZERO);
364 	V_pf_rulemarker = malloc(sizeof(*V_pf_rulemarker), M_PFRULE, M_WAITOK | M_ZERO);
365 	PF_RULES_WLOCK();
366 	LIST_INSERT_HEAD(&V_pf_allkiflist, V_pf_kifmarker, pfik_allkiflist);
367 	LIST_INSERT_HEAD(&V_pf_allrulelist, &V_pf_default_rule, allrulelist);
368 	V_pf_allrulecount++;
369 	LIST_INSERT_HEAD(&V_pf_allrulelist, V_pf_rulemarker, allrulelist);
370 	PF_RULES_WUNLOCK();
371 #endif
372 
373 	/* initialize default timeouts */
374 	my_timeout[PFTM_TCP_FIRST_PACKET] = PFTM_TCP_FIRST_PACKET_VAL;
375 	my_timeout[PFTM_TCP_OPENING] = PFTM_TCP_OPENING_VAL;
376 	my_timeout[PFTM_TCP_ESTABLISHED] = PFTM_TCP_ESTABLISHED_VAL;
377 	my_timeout[PFTM_TCP_CLOSING] = PFTM_TCP_CLOSING_VAL;
378 	my_timeout[PFTM_TCP_FIN_WAIT] = PFTM_TCP_FIN_WAIT_VAL;
379 	my_timeout[PFTM_TCP_CLOSED] = PFTM_TCP_CLOSED_VAL;
380 	my_timeout[PFTM_SCTP_FIRST_PACKET] = PFTM_TCP_FIRST_PACKET_VAL;
381 	my_timeout[PFTM_SCTP_OPENING] = PFTM_TCP_OPENING_VAL;
382 	my_timeout[PFTM_SCTP_ESTABLISHED] = PFTM_TCP_ESTABLISHED_VAL;
383 	my_timeout[PFTM_SCTP_CLOSING] = PFTM_TCP_CLOSING_VAL;
384 	my_timeout[PFTM_SCTP_CLOSED] = PFTM_TCP_CLOSED_VAL;
385 	my_timeout[PFTM_UDP_FIRST_PACKET] = PFTM_UDP_FIRST_PACKET_VAL;
386 	my_timeout[PFTM_UDP_SINGLE] = PFTM_UDP_SINGLE_VAL;
387 	my_timeout[PFTM_UDP_MULTIPLE] = PFTM_UDP_MULTIPLE_VAL;
388 	my_timeout[PFTM_ICMP_FIRST_PACKET] = PFTM_ICMP_FIRST_PACKET_VAL;
389 	my_timeout[PFTM_ICMP_ERROR_REPLY] = PFTM_ICMP_ERROR_REPLY_VAL;
390 	my_timeout[PFTM_OTHER_FIRST_PACKET] = PFTM_OTHER_FIRST_PACKET_VAL;
391 	my_timeout[PFTM_OTHER_SINGLE] = PFTM_OTHER_SINGLE_VAL;
392 	my_timeout[PFTM_OTHER_MULTIPLE] = PFTM_OTHER_MULTIPLE_VAL;
393 	my_timeout[PFTM_FRAG] = PFTM_FRAG_VAL;
394 	my_timeout[PFTM_INTERVAL] = PFTM_INTERVAL_VAL;
395 	my_timeout[PFTM_SRC_NODE] = PFTM_SRC_NODE_VAL;
396 	my_timeout[PFTM_TS_DIFF] = PFTM_TS_DIFF_VAL;
397 	my_timeout[PFTM_ADAPTIVE_START] = PFSTATE_ADAPT_START;
398 	my_timeout[PFTM_ADAPTIVE_END] = PFSTATE_ADAPT_END;
399 
400 	V_pf_status.debug = PF_DEBUG_URGENT;
401 	/*
402 	 * XXX This is different than in OpenBSD where reassembly is enabled by
403 	 * defult. In FreeBSD we expect people to still use scrub rules and
404 	 * switch to the new syntax later. Only when they switch they must
405 	 * explicitly enable reassemle. We could change the default once the
406 	 * scrub rule functionality is hopefully removed some day in future.
407 	 */
408 	V_pf_status.reass = 0;
409 
410 	V_pf_pfil_hooked = false;
411 	V_pf_pfil_eth_hooked = false;
412 
413 	/* XXX do our best to avoid a conflict */
414 	V_pf_status.hostid = arc4random();
415 
416 	for (int i = 0; i < PFRES_MAX; i++)
417 		V_pf_status.counters[i] = counter_u64_alloc(M_WAITOK);
418 	for (int i = 0; i < KLCNT_MAX; i++)
419 		V_pf_status.lcounters[i] = counter_u64_alloc(M_WAITOK);
420 	for (int i = 0; i < FCNT_MAX; i++)
421 		pf_counter_u64_init(&V_pf_status.fcounters[i], M_WAITOK);
422 	for (int i = 0; i < SCNT_MAX; i++)
423 		V_pf_status.scounters[i] = counter_u64_alloc(M_WAITOK);
424 
425 	if (swi_add(&V_pf_swi_ie, "pf send", pf_intr, curvnet, SWI_NET,
426 	    INTR_MPSAFE, &V_pf_swi_cookie) != 0)
427 		/* XXXGL: leaked all above. */
428 		return;
429 }
430 
431 static struct pf_kpool *
pf_get_kpool(const char * anchor,u_int32_t ticket,u_int8_t rule_action,u_int32_t rule_number,u_int8_t r_last,u_int8_t active,u_int8_t check_ticket,int which)432 pf_get_kpool(const char *anchor, u_int32_t ticket, u_int8_t rule_action,
433     u_int32_t rule_number, u_int8_t r_last, u_int8_t active,
434     u_int8_t check_ticket, int which)
435 {
436 	struct pf_kruleset	*ruleset;
437 	struct pf_krule		*rule;
438 	int			 rs_num;
439 
440 	MPASS(which == PF_RDR || which == PF_NAT || which == PF_RT);
441 
442 	ruleset = pf_find_kruleset(anchor);
443 	if (ruleset == NULL)
444 		return (NULL);
445 	rs_num = pf_get_ruleset_number(rule_action);
446 	if (rs_num >= PF_RULESET_MAX)
447 		return (NULL);
448 	if (active) {
449 		if (check_ticket && ticket !=
450 		    ruleset->rules[rs_num].active.ticket)
451 			return (NULL);
452 		if (r_last)
453 			rule = TAILQ_LAST(ruleset->rules[rs_num].active.ptr,
454 			    pf_krulequeue);
455 		else
456 			rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
457 	} else {
458 		if (check_ticket && ticket !=
459 		    ruleset->rules[rs_num].inactive.ticket)
460 			return (NULL);
461 		if (r_last)
462 			rule = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr,
463 			    pf_krulequeue);
464 		else
465 			rule = TAILQ_FIRST(ruleset->rules[rs_num].inactive.ptr);
466 	}
467 	if (!r_last) {
468 		while ((rule != NULL) && (rule->nr != rule_number))
469 			rule = TAILQ_NEXT(rule, entries);
470 	}
471 	if (rule == NULL)
472 		return (NULL);
473 
474 	switch (which) {
475 	case PF_RDR:
476 		return (&rule->rdr);
477 	case PF_NAT:
478 		return (&rule->nat);
479 	case PF_RT:
480 		return (&rule->route);
481 	default:
482 		panic("Unknow pool type %d", which);
483 	}
484 }
485 
486 static void
pf_mv_kpool(struct pf_kpalist * poola,struct pf_kpalist * poolb)487 pf_mv_kpool(struct pf_kpalist *poola, struct pf_kpalist *poolb)
488 {
489 	struct pf_kpooladdr	*mv_pool_pa;
490 
491 	while ((mv_pool_pa = TAILQ_FIRST(poola)) != NULL) {
492 		TAILQ_REMOVE(poola, mv_pool_pa, entries);
493 		TAILQ_INSERT_TAIL(poolb, mv_pool_pa, entries);
494 	}
495 }
496 
497 static void
pf_empty_kpool(struct pf_kpalist * poola)498 pf_empty_kpool(struct pf_kpalist *poola)
499 {
500 	struct pf_kpooladdr *pa;
501 
502 	while ((pa = TAILQ_FIRST(poola)) != NULL) {
503 		switch (pa->addr.type) {
504 		case PF_ADDR_DYNIFTL:
505 			pfi_dynaddr_remove(pa->addr.p.dyn);
506 			break;
507 		case PF_ADDR_TABLE:
508 			/* XXX: this could be unfinished pooladdr on pabuf */
509 			if (pa->addr.p.tbl != NULL)
510 				pfr_detach_table(pa->addr.p.tbl);
511 			break;
512 		}
513 		if (pa->kif)
514 			pfi_kkif_unref(pa->kif);
515 		TAILQ_REMOVE(poola, pa, entries);
516 		free(pa, M_PFRULE);
517 	}
518 }
519 
520 static void
pf_unlink_rule_locked(struct pf_krulequeue * rulequeue,struct pf_krule * rule)521 pf_unlink_rule_locked(struct pf_krulequeue *rulequeue, struct pf_krule *rule)
522 {
523 
524 	PF_RULES_WASSERT();
525 	PF_UNLNKDRULES_ASSERT();
526 
527 	TAILQ_REMOVE(rulequeue, rule, entries);
528 
529 	rule->rule_ref |= PFRULE_REFS;
530 	TAILQ_INSERT_TAIL(&V_pf_unlinked_rules, rule, entries);
531 }
532 
533 static void
pf_unlink_rule(struct pf_krulequeue * rulequeue,struct pf_krule * rule)534 pf_unlink_rule(struct pf_krulequeue *rulequeue, struct pf_krule *rule)
535 {
536 
537 	PF_RULES_WASSERT();
538 
539 	PF_UNLNKDRULES_LOCK();
540 	pf_unlink_rule_locked(rulequeue, rule);
541 	PF_UNLNKDRULES_UNLOCK();
542 }
543 
544 static void
pf_free_eth_rule(struct pf_keth_rule * rule)545 pf_free_eth_rule(struct pf_keth_rule *rule)
546 {
547 	PF_RULES_WASSERT();
548 
549 	if (rule == NULL)
550 		return;
551 
552 	if (rule->tag)
553 		tag_unref(&V_pf_tags, rule->tag);
554 	if (rule->match_tag)
555 		tag_unref(&V_pf_tags, rule->match_tag);
556 #ifdef ALTQ
557 	pf_qid_unref(rule->qid);
558 #endif
559 
560 	if (rule->bridge_to)
561 		pfi_kkif_unref(rule->bridge_to);
562 	if (rule->kif)
563 		pfi_kkif_unref(rule->kif);
564 
565 	if (rule->ipsrc.addr.type == PF_ADDR_TABLE)
566 		pfr_detach_table(rule->ipsrc.addr.p.tbl);
567 	if (rule->ipdst.addr.type == PF_ADDR_TABLE)
568 		pfr_detach_table(rule->ipdst.addr.p.tbl);
569 
570 	counter_u64_free(rule->evaluations);
571 	for (int i = 0; i < 2; i++) {
572 		counter_u64_free(rule->packets[i]);
573 		counter_u64_free(rule->bytes[i]);
574 	}
575 	uma_zfree_pcpu(pf_timestamp_pcpu_zone, rule->timestamp);
576 	pf_keth_anchor_remove(rule);
577 
578 	free(rule, M_PFRULE);
579 }
580 
581 void
pf_free_rule(struct pf_krule * rule)582 pf_free_rule(struct pf_krule *rule)
583 {
584 
585 	PF_RULES_WASSERT();
586 	PF_CONFIG_ASSERT();
587 
588 	if (rule->tag)
589 		tag_unref(&V_pf_tags, rule->tag);
590 	if (rule->match_tag)
591 		tag_unref(&V_pf_tags, rule->match_tag);
592 #ifdef ALTQ
593 	if (rule->pqid != rule->qid)
594 		pf_qid_unref(rule->pqid);
595 	pf_qid_unref(rule->qid);
596 #endif
597 	switch (rule->src.addr.type) {
598 	case PF_ADDR_DYNIFTL:
599 		pfi_dynaddr_remove(rule->src.addr.p.dyn);
600 		break;
601 	case PF_ADDR_TABLE:
602 		pfr_detach_table(rule->src.addr.p.tbl);
603 		break;
604 	}
605 	switch (rule->dst.addr.type) {
606 	case PF_ADDR_DYNIFTL:
607 		pfi_dynaddr_remove(rule->dst.addr.p.dyn);
608 		break;
609 	case PF_ADDR_TABLE:
610 		pfr_detach_table(rule->dst.addr.p.tbl);
611 		break;
612 	}
613 	if (rule->overload_tbl)
614 		pfr_detach_table(rule->overload_tbl);
615 	if (rule->kif)
616 		pfi_kkif_unref(rule->kif);
617 	if (rule->rcv_kif)
618 		pfi_kkif_unref(rule->rcv_kif);
619 	pf_remove_kanchor(rule);
620 	pf_empty_kpool(&rule->rdr.list);
621 	pf_empty_kpool(&rule->nat.list);
622 	pf_empty_kpool(&rule->route.list);
623 
624 	pf_krule_free(rule);
625 }
626 
627 static void
pf_init_tagset(struct pf_tagset * ts,unsigned int * tunable_size,unsigned int default_size)628 pf_init_tagset(struct pf_tagset *ts, unsigned int *tunable_size,
629     unsigned int default_size)
630 {
631 	unsigned int i;
632 	unsigned int hashsize;
633 
634 	if (*tunable_size == 0 || !powerof2(*tunable_size))
635 		*tunable_size = default_size;
636 
637 	hashsize = *tunable_size;
638 	ts->namehash = mallocarray(hashsize, sizeof(*ts->namehash), M_PFHASH,
639 	    M_WAITOK);
640 	ts->taghash = mallocarray(hashsize, sizeof(*ts->taghash), M_PFHASH,
641 	    M_WAITOK);
642 	ts->mask = hashsize - 1;
643 	ts->seed = arc4random();
644 	for (i = 0; i < hashsize; i++) {
645 		TAILQ_INIT(&ts->namehash[i]);
646 		TAILQ_INIT(&ts->taghash[i]);
647 	}
648 	BIT_FILL(TAGID_MAX, &ts->avail);
649 }
650 
651 static void
pf_cleanup_tagset(struct pf_tagset * ts)652 pf_cleanup_tagset(struct pf_tagset *ts)
653 {
654 	unsigned int i;
655 	unsigned int hashsize;
656 	struct pf_tagname *t, *tmp;
657 
658 	/*
659 	 * Only need to clean up one of the hashes as each tag is hashed
660 	 * into each table.
661 	 */
662 	hashsize = ts->mask + 1;
663 	for (i = 0; i < hashsize; i++)
664 		TAILQ_FOREACH_SAFE(t, &ts->namehash[i], namehash_entries, tmp)
665 			uma_zfree(V_pf_tag_z, t);
666 
667 	free(ts->namehash, M_PFHASH);
668 	free(ts->taghash, M_PFHASH);
669 }
670 
671 static uint16_t
tagname2hashindex(const struct pf_tagset * ts,const char * tagname)672 tagname2hashindex(const struct pf_tagset *ts, const char *tagname)
673 {
674 	size_t len;
675 
676 	len = strnlen(tagname, PF_TAG_NAME_SIZE - 1);
677 	return (murmur3_32_hash(tagname, len, ts->seed) & ts->mask);
678 }
679 
680 static uint16_t
tag2hashindex(const struct pf_tagset * ts,uint16_t tag)681 tag2hashindex(const struct pf_tagset *ts, uint16_t tag)
682 {
683 
684 	return (tag & ts->mask);
685 }
686 
687 static u_int16_t
tagname2tag(struct pf_tagset * ts,const char * tagname)688 tagname2tag(struct pf_tagset *ts, const char *tagname)
689 {
690 	struct pf_tagname	*tag;
691 	u_int32_t		 index;
692 	u_int16_t		 new_tagid;
693 
694 	PF_RULES_WASSERT();
695 
696 	index = tagname2hashindex(ts, tagname);
697 	TAILQ_FOREACH(tag, &ts->namehash[index], namehash_entries)
698 		if (strcmp(tagname, tag->name) == 0) {
699 			tag->ref++;
700 			return (tag->tag);
701 		}
702 
703 	/*
704 	 * new entry
705 	 *
706 	 * to avoid fragmentation, we do a linear search from the beginning
707 	 * and take the first free slot we find.
708 	 */
709 	new_tagid = BIT_FFS(TAGID_MAX, &ts->avail);
710 	/*
711 	 * Tags are 1-based, with valid tags in the range [1..TAGID_MAX].
712 	 * BIT_FFS() returns a 1-based bit number, with 0 indicating no bits
713 	 * set.  It may also return a bit number greater than TAGID_MAX due
714 	 * to rounding of the number of bits in the vector up to a multiple
715 	 * of the vector word size at declaration/allocation time.
716 	 */
717 	if ((new_tagid == 0) || (new_tagid > TAGID_MAX))
718 		return (0);
719 
720 	/* Mark the tag as in use.  Bits are 0-based for BIT_CLR() */
721 	BIT_CLR(TAGID_MAX, new_tagid - 1, &ts->avail);
722 
723 	/* allocate and fill new struct pf_tagname */
724 	tag = uma_zalloc(V_pf_tag_z, M_NOWAIT);
725 	if (tag == NULL)
726 		return (0);
727 	strlcpy(tag->name, tagname, sizeof(tag->name));
728 	tag->tag = new_tagid;
729 	tag->ref = 1;
730 
731 	/* Insert into namehash */
732 	TAILQ_INSERT_TAIL(&ts->namehash[index], tag, namehash_entries);
733 
734 	/* Insert into taghash */
735 	index = tag2hashindex(ts, new_tagid);
736 	TAILQ_INSERT_TAIL(&ts->taghash[index], tag, taghash_entries);
737 
738 	return (tag->tag);
739 }
740 
741 static void
tag_unref(struct pf_tagset * ts,u_int16_t tag)742 tag_unref(struct pf_tagset *ts, u_int16_t tag)
743 {
744 	struct pf_tagname	*t;
745 	uint16_t		 index;
746 
747 	PF_RULES_WASSERT();
748 
749 	index = tag2hashindex(ts, tag);
750 	TAILQ_FOREACH(t, &ts->taghash[index], taghash_entries)
751 		if (tag == t->tag) {
752 			if (--t->ref == 0) {
753 				TAILQ_REMOVE(&ts->taghash[index], t,
754 				    taghash_entries);
755 				index = tagname2hashindex(ts, t->name);
756 				TAILQ_REMOVE(&ts->namehash[index], t,
757 				    namehash_entries);
758 				/* Bits are 0-based for BIT_SET() */
759 				BIT_SET(TAGID_MAX, tag - 1, &ts->avail);
760 				uma_zfree(V_pf_tag_z, t);
761 			}
762 			break;
763 		}
764 }
765 
766 static uint16_t
pf_tagname2tag(const char * tagname)767 pf_tagname2tag(const char *tagname)
768 {
769 	return (tagname2tag(&V_pf_tags, tagname));
770 }
771 
772 static int
pf_begin_eth(uint32_t * ticket,const char * anchor)773 pf_begin_eth(uint32_t *ticket, const char *anchor)
774 {
775 	struct pf_keth_rule *rule, *tmp;
776 	struct pf_keth_ruleset *rs;
777 
778 	PF_RULES_WASSERT();
779 
780 	rs = pf_find_or_create_keth_ruleset(anchor);
781 	if (rs == NULL)
782 		return (EINVAL);
783 
784 	/* Purge old inactive rules. */
785 	TAILQ_FOREACH_SAFE(rule, rs->inactive.rules, entries,
786 	    tmp) {
787 		TAILQ_REMOVE(rs->inactive.rules, rule,
788 		    entries);
789 		pf_free_eth_rule(rule);
790 	}
791 
792 	*ticket = ++rs->inactive.ticket;
793 	rs->inactive.open = 1;
794 
795 	return (0);
796 }
797 
798 static int
pf_rollback_eth(uint32_t ticket,const char * anchor)799 pf_rollback_eth(uint32_t ticket, const char *anchor)
800 {
801 	struct pf_keth_rule *rule, *tmp;
802 	struct pf_keth_ruleset *rs;
803 
804 	PF_RULES_WASSERT();
805 
806 	rs = pf_find_keth_ruleset(anchor);
807 	if (rs == NULL)
808 		return (EINVAL);
809 
810 	if (!rs->inactive.open ||
811 	    ticket != rs->inactive.ticket)
812 		return (0);
813 
814 	/* Purge old inactive rules. */
815 	TAILQ_FOREACH_SAFE(rule, rs->inactive.rules, entries,
816 	    tmp) {
817 		TAILQ_REMOVE(rs->inactive.rules, rule, entries);
818 		pf_free_eth_rule(rule);
819 	}
820 
821 	rs->inactive.open = 0;
822 
823 	pf_remove_if_empty_keth_ruleset(rs);
824 
825 	return (0);
826 }
827 
828 #define	PF_SET_SKIP_STEPS(i)					\
829 	do {							\
830 		while (head[i] != cur) {			\
831 			head[i]->skip[i].ptr = cur;		\
832 			head[i] = TAILQ_NEXT(head[i], entries);	\
833 		}						\
834 	} while (0)
835 
836 static void
pf_eth_calc_skip_steps(struct pf_keth_ruleq * rules)837 pf_eth_calc_skip_steps(struct pf_keth_ruleq *rules)
838 {
839 	struct pf_keth_rule *cur, *prev, *head[PFE_SKIP_COUNT];
840 	int i;
841 
842 	cur = TAILQ_FIRST(rules);
843 	prev = cur;
844 	for (i = 0; i < PFE_SKIP_COUNT; ++i)
845 		head[i] = cur;
846 	while (cur != NULL) {
847 		if (cur->kif != prev->kif || cur->ifnot != prev->ifnot)
848 			PF_SET_SKIP_STEPS(PFE_SKIP_IFP);
849 		if (cur->direction != prev->direction)
850 			PF_SET_SKIP_STEPS(PFE_SKIP_DIR);
851 		if (cur->proto != prev->proto)
852 			PF_SET_SKIP_STEPS(PFE_SKIP_PROTO);
853 		if (memcmp(&cur->src, &prev->src, sizeof(cur->src)) != 0)
854 			PF_SET_SKIP_STEPS(PFE_SKIP_SRC_ADDR);
855 		if (memcmp(&cur->dst, &prev->dst, sizeof(cur->dst)) != 0)
856 			PF_SET_SKIP_STEPS(PFE_SKIP_DST_ADDR);
857 		if (cur->ipsrc.neg != prev->ipsrc.neg ||
858 		    pf_addr_wrap_neq(&cur->ipsrc.addr, &prev->ipsrc.addr))
859 			PF_SET_SKIP_STEPS(PFE_SKIP_SRC_IP_ADDR);
860 		if (cur->ipdst.neg != prev->ipdst.neg ||
861 		    pf_addr_wrap_neq(&cur->ipdst.addr, &prev->ipdst.addr))
862 			PF_SET_SKIP_STEPS(PFE_SKIP_DST_IP_ADDR);
863 
864 		prev = cur;
865 		cur = TAILQ_NEXT(cur, entries);
866 	}
867 	for (i = 0; i < PFE_SKIP_COUNT; ++i)
868 		PF_SET_SKIP_STEPS(i);
869 }
870 
871 static int
pf_commit_eth(uint32_t ticket,const char * anchor)872 pf_commit_eth(uint32_t ticket, const char *anchor)
873 {
874 	struct pf_keth_ruleq *rules;
875 	struct pf_keth_ruleset *rs;
876 
877 	rs = pf_find_keth_ruleset(anchor);
878 	if (rs == NULL) {
879 		return (EINVAL);
880 	}
881 
882 	if (!rs->inactive.open ||
883 	    ticket != rs->inactive.ticket)
884 		return (EBUSY);
885 
886 	PF_RULES_WASSERT();
887 
888 	pf_eth_calc_skip_steps(rs->inactive.rules);
889 
890 	rules = rs->active.rules;
891 	atomic_store_ptr(&rs->active.rules, rs->inactive.rules);
892 	rs->inactive.rules = rules;
893 	rs->inactive.ticket = rs->active.ticket;
894 
895 	return (pf_rollback_eth(rs->inactive.ticket,
896 	    rs->anchor ? rs->anchor->path : ""));
897 }
898 
899 #ifdef ALTQ
900 static uint16_t
pf_qname2qid(const char * qname)901 pf_qname2qid(const char *qname)
902 {
903 	return (tagname2tag(&V_pf_qids, qname));
904 }
905 
906 static void
pf_qid_unref(uint16_t qid)907 pf_qid_unref(uint16_t qid)
908 {
909 	tag_unref(&V_pf_qids, qid);
910 }
911 
912 static int
pf_begin_altq(u_int32_t * ticket)913 pf_begin_altq(u_int32_t *ticket)
914 {
915 	struct pf_altq	*altq, *tmp;
916 	int		 error = 0;
917 
918 	PF_RULES_WASSERT();
919 
920 	/* Purge the old altq lists */
921 	TAILQ_FOREACH_SAFE(altq, V_pf_altq_ifs_inactive, entries, tmp) {
922 		if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
923 			/* detach and destroy the discipline */
924 			error = altq_remove(altq);
925 		}
926 		free(altq, M_PFALTQ);
927 	}
928 	TAILQ_INIT(V_pf_altq_ifs_inactive);
929 	TAILQ_FOREACH_SAFE(altq, V_pf_altqs_inactive, entries, tmp) {
930 		pf_qid_unref(altq->qid);
931 		free(altq, M_PFALTQ);
932 	}
933 	TAILQ_INIT(V_pf_altqs_inactive);
934 	if (error)
935 		return (error);
936 	*ticket = ++V_ticket_altqs_inactive;
937 	V_altqs_inactive_open = 1;
938 	return (0);
939 }
940 
941 static int
pf_rollback_altq(u_int32_t ticket)942 pf_rollback_altq(u_int32_t ticket)
943 {
944 	struct pf_altq	*altq, *tmp;
945 	int		 error = 0;
946 
947 	PF_RULES_WASSERT();
948 
949 	if (!V_altqs_inactive_open || ticket != V_ticket_altqs_inactive)
950 		return (0);
951 	/* Purge the old altq lists */
952 	TAILQ_FOREACH_SAFE(altq, V_pf_altq_ifs_inactive, entries, tmp) {
953 		if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
954 			/* detach and destroy the discipline */
955 			error = altq_remove(altq);
956 		}
957 		free(altq, M_PFALTQ);
958 	}
959 	TAILQ_INIT(V_pf_altq_ifs_inactive);
960 	TAILQ_FOREACH_SAFE(altq, V_pf_altqs_inactive, entries, tmp) {
961 		pf_qid_unref(altq->qid);
962 		free(altq, M_PFALTQ);
963 	}
964 	TAILQ_INIT(V_pf_altqs_inactive);
965 	V_altqs_inactive_open = 0;
966 	return (error);
967 }
968 
969 static int
pf_commit_altq(u_int32_t ticket)970 pf_commit_altq(u_int32_t ticket)
971 {
972 	struct pf_altqqueue	*old_altqs, *old_altq_ifs;
973 	struct pf_altq		*altq, *tmp;
974 	int			 err, error = 0;
975 
976 	PF_RULES_WASSERT();
977 
978 	if (!V_altqs_inactive_open || ticket != V_ticket_altqs_inactive)
979 		return (EBUSY);
980 
981 	/* swap altqs, keep the old. */
982 	old_altqs = V_pf_altqs_active;
983 	old_altq_ifs = V_pf_altq_ifs_active;
984 	V_pf_altqs_active = V_pf_altqs_inactive;
985 	V_pf_altq_ifs_active = V_pf_altq_ifs_inactive;
986 	V_pf_altqs_inactive = old_altqs;
987 	V_pf_altq_ifs_inactive = old_altq_ifs;
988 	V_ticket_altqs_active = V_ticket_altqs_inactive;
989 
990 	/* Attach new disciplines */
991 	TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) {
992 		if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
993 			/* attach the discipline */
994 			error = altq_pfattach(altq);
995 			if (error == 0 && V_pf_altq_running)
996 				error = pf_enable_altq(altq);
997 			if (error != 0)
998 				return (error);
999 		}
1000 	}
1001 
1002 	/* Purge the old altq lists */
1003 	TAILQ_FOREACH_SAFE(altq, V_pf_altq_ifs_inactive, entries, tmp) {
1004 		if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
1005 			/* detach and destroy the discipline */
1006 			if (V_pf_altq_running)
1007 				error = pf_disable_altq(altq);
1008 			err = altq_pfdetach(altq);
1009 			if (err != 0 && error == 0)
1010 				error = err;
1011 			err = altq_remove(altq);
1012 			if (err != 0 && error == 0)
1013 				error = err;
1014 		}
1015 		free(altq, M_PFALTQ);
1016 	}
1017 	TAILQ_INIT(V_pf_altq_ifs_inactive);
1018 	TAILQ_FOREACH_SAFE(altq, V_pf_altqs_inactive, entries, tmp) {
1019 		pf_qid_unref(altq->qid);
1020 		free(altq, M_PFALTQ);
1021 	}
1022 	TAILQ_INIT(V_pf_altqs_inactive);
1023 
1024 	V_altqs_inactive_open = 0;
1025 	return (error);
1026 }
1027 
1028 static int
pf_enable_altq(struct pf_altq * altq)1029 pf_enable_altq(struct pf_altq *altq)
1030 {
1031 	struct ifnet		*ifp;
1032 	struct tb_profile	 tb;
1033 	int			 error = 0;
1034 
1035 	if ((ifp = ifunit(altq->ifname)) == NULL)
1036 		return (EINVAL);
1037 
1038 	if (ifp->if_snd.altq_type != ALTQT_NONE)
1039 		error = altq_enable(&ifp->if_snd);
1040 
1041 	/* set tokenbucket regulator */
1042 	if (error == 0 && ifp != NULL && ALTQ_IS_ENABLED(&ifp->if_snd)) {
1043 		tb.rate = altq->ifbandwidth;
1044 		tb.depth = altq->tbrsize;
1045 		error = tbr_set(&ifp->if_snd, &tb);
1046 	}
1047 
1048 	return (error);
1049 }
1050 
1051 static int
pf_disable_altq(struct pf_altq * altq)1052 pf_disable_altq(struct pf_altq *altq)
1053 {
1054 	struct ifnet		*ifp;
1055 	struct tb_profile	 tb;
1056 	int			 error;
1057 
1058 	if ((ifp = ifunit(altq->ifname)) == NULL)
1059 		return (EINVAL);
1060 
1061 	/*
1062 	 * when the discipline is no longer referenced, it was overridden
1063 	 * by a new one.  if so, just return.
1064 	 */
1065 	if (altq->altq_disc != ifp->if_snd.altq_disc)
1066 		return (0);
1067 
1068 	error = altq_disable(&ifp->if_snd);
1069 
1070 	if (error == 0) {
1071 		/* clear tokenbucket regulator */
1072 		tb.rate = 0;
1073 		error = tbr_set(&ifp->if_snd, &tb);
1074 	}
1075 
1076 	return (error);
1077 }
1078 
1079 static int
pf_altq_ifnet_event_add(struct ifnet * ifp,int remove,u_int32_t ticket,struct pf_altq * altq)1080 pf_altq_ifnet_event_add(struct ifnet *ifp, int remove, u_int32_t ticket,
1081     struct pf_altq *altq)
1082 {
1083 	struct ifnet	*ifp1;
1084 	int		 error = 0;
1085 
1086 	/* Deactivate the interface in question */
1087 	altq->local_flags &= ~PFALTQ_FLAG_IF_REMOVED;
1088 	if ((ifp1 = ifunit(altq->ifname)) == NULL ||
1089 	    (remove && ifp1 == ifp)) {
1090 		altq->local_flags |= PFALTQ_FLAG_IF_REMOVED;
1091 	} else {
1092 		error = altq_add(ifp1, altq);
1093 
1094 		if (ticket != V_ticket_altqs_inactive)
1095 			error = EBUSY;
1096 
1097 		if (error)
1098 			free(altq, M_PFALTQ);
1099 	}
1100 
1101 	return (error);
1102 }
1103 
1104 void
pf_altq_ifnet_event(struct ifnet * ifp,int remove)1105 pf_altq_ifnet_event(struct ifnet *ifp, int remove)
1106 {
1107 	struct pf_altq	*a1, *a2, *a3;
1108 	u_int32_t	 ticket;
1109 	int		 error = 0;
1110 
1111 	/*
1112 	 * No need to re-evaluate the configuration for events on interfaces
1113 	 * that do not support ALTQ, as it's not possible for such
1114 	 * interfaces to be part of the configuration.
1115 	 */
1116 	if (!ALTQ_IS_READY(&ifp->if_snd))
1117 		return;
1118 
1119 	/* Interrupt userland queue modifications */
1120 	if (V_altqs_inactive_open)
1121 		pf_rollback_altq(V_ticket_altqs_inactive);
1122 
1123 	/* Start new altq ruleset */
1124 	if (pf_begin_altq(&ticket))
1125 		return;
1126 
1127 	/* Copy the current active set */
1128 	TAILQ_FOREACH(a1, V_pf_altq_ifs_active, entries) {
1129 		a2 = malloc(sizeof(*a2), M_PFALTQ, M_NOWAIT);
1130 		if (a2 == NULL) {
1131 			error = ENOMEM;
1132 			break;
1133 		}
1134 		bcopy(a1, a2, sizeof(struct pf_altq));
1135 
1136 		error = pf_altq_ifnet_event_add(ifp, remove, ticket, a2);
1137 		if (error)
1138 			break;
1139 
1140 		TAILQ_INSERT_TAIL(V_pf_altq_ifs_inactive, a2, entries);
1141 	}
1142 	if (error)
1143 		goto out;
1144 	TAILQ_FOREACH(a1, V_pf_altqs_active, entries) {
1145 		a2 = malloc(sizeof(*a2), M_PFALTQ, M_NOWAIT);
1146 		if (a2 == NULL) {
1147 			error = ENOMEM;
1148 			break;
1149 		}
1150 		bcopy(a1, a2, sizeof(struct pf_altq));
1151 
1152 		if ((a2->qid = pf_qname2qid(a2->qname)) == 0) {
1153 			error = EBUSY;
1154 			free(a2, M_PFALTQ);
1155 			break;
1156 		}
1157 		a2->altq_disc = NULL;
1158 		TAILQ_FOREACH(a3, V_pf_altq_ifs_inactive, entries) {
1159 			if (strncmp(a3->ifname, a2->ifname,
1160 				IFNAMSIZ) == 0) {
1161 				a2->altq_disc = a3->altq_disc;
1162 				break;
1163 			}
1164 		}
1165 		error = pf_altq_ifnet_event_add(ifp, remove, ticket, a2);
1166 		if (error)
1167 			break;
1168 
1169 		TAILQ_INSERT_TAIL(V_pf_altqs_inactive, a2, entries);
1170 	}
1171 
1172 out:
1173 	if (error != 0)
1174 		pf_rollback_altq(ticket);
1175 	else
1176 		pf_commit_altq(ticket);
1177 }
1178 #endif /* ALTQ */
1179 
1180 static struct pf_krule_global *
pf_rule_tree_alloc(int flags)1181 pf_rule_tree_alloc(int flags)
1182 {
1183 	struct pf_krule_global *tree;
1184 
1185 	tree = malloc(sizeof(struct pf_krule_global), M_PF, flags);
1186 	if (tree == NULL)
1187 		return (NULL);
1188 	RB_INIT(tree);
1189 	return (tree);
1190 }
1191 
1192 void
pf_rule_tree_free(struct pf_krule_global * tree)1193 pf_rule_tree_free(struct pf_krule_global *tree)
1194 {
1195 
1196 	free(tree, M_PF);
1197 }
1198 
1199 static int
pf_begin_rules(u_int32_t * ticket,int rs_num,const char * anchor)1200 pf_begin_rules(u_int32_t *ticket, int rs_num, const char *anchor)
1201 {
1202 	struct pf_krule_global *tree;
1203 	struct pf_kruleset	*rs;
1204 	struct pf_krule		*rule;
1205 
1206 	PF_RULES_WASSERT();
1207 
1208 	if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
1209 		return (EINVAL);
1210 	tree = pf_rule_tree_alloc(M_NOWAIT);
1211 	if (tree == NULL)
1212 		return (ENOMEM);
1213 	rs = pf_find_or_create_kruleset(anchor);
1214 	if (rs == NULL) {
1215 		pf_rule_tree_free(tree);
1216 		return (EINVAL);
1217 	}
1218 	pf_rule_tree_free(rs->rules[rs_num].inactive.tree);
1219 	rs->rules[rs_num].inactive.tree = tree;
1220 
1221 	while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) {
1222 		pf_unlink_rule(rs->rules[rs_num].inactive.ptr, rule);
1223 		rs->rules[rs_num].inactive.rcount--;
1224 	}
1225 	*ticket = ++rs->rules[rs_num].inactive.ticket;
1226 	rs->rules[rs_num].inactive.open = 1;
1227 	return (0);
1228 }
1229 
1230 static int
pf_rollback_rules(u_int32_t ticket,int rs_num,char * anchor)1231 pf_rollback_rules(u_int32_t ticket, int rs_num, char *anchor)
1232 {
1233 	struct pf_kruleset	*rs;
1234 	struct pf_krule		*rule;
1235 
1236 	PF_RULES_WASSERT();
1237 
1238 	if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
1239 		return (EINVAL);
1240 	rs = pf_find_kruleset(anchor);
1241 	if (rs == NULL || !rs->rules[rs_num].inactive.open ||
1242 	    rs->rules[rs_num].inactive.ticket != ticket)
1243 		return (0);
1244 	while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) {
1245 		pf_unlink_rule(rs->rules[rs_num].inactive.ptr, rule);
1246 		rs->rules[rs_num].inactive.rcount--;
1247 	}
1248 	rs->rules[rs_num].inactive.open = 0;
1249 	return (0);
1250 }
1251 
1252 #define PF_MD5_UPD(st, elm)						\
1253 		MD5Update(ctx, (u_int8_t *) &(st)->elm, sizeof((st)->elm))
1254 
1255 #define PF_MD5_UPD_STR(st, elm)						\
1256 		MD5Update(ctx, (u_int8_t *) (st)->elm, strlen((st)->elm))
1257 
1258 #define PF_MD5_UPD_HTONL(st, elm, stor) do {				\
1259 		(stor) = htonl((st)->elm);				\
1260 		MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int32_t));\
1261 } while (0)
1262 
1263 #define PF_MD5_UPD_HTONS(st, elm, stor) do {				\
1264 		(stor) = htons((st)->elm);				\
1265 		MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int16_t));\
1266 } while (0)
1267 
1268 static void
pf_hash_rule_addr(MD5_CTX * ctx,struct pf_rule_addr * pfr)1269 pf_hash_rule_addr(MD5_CTX *ctx, struct pf_rule_addr *pfr)
1270 {
1271 	PF_MD5_UPD(pfr, addr.type);
1272 	switch (pfr->addr.type) {
1273 		case PF_ADDR_DYNIFTL:
1274 			PF_MD5_UPD(pfr, addr.v.ifname);
1275 			PF_MD5_UPD(pfr, addr.iflags);
1276 			break;
1277 		case PF_ADDR_TABLE:
1278 			if (strncmp(pfr->addr.v.tblname, PF_OPTIMIZER_TABLE_PFX,
1279 			    strlen(PF_OPTIMIZER_TABLE_PFX)))
1280 				PF_MD5_UPD(pfr, addr.v.tblname);
1281 			break;
1282 		case PF_ADDR_ADDRMASK:
1283 			/* XXX ignore af? */
1284 			PF_MD5_UPD(pfr, addr.v.a.addr.addr32);
1285 			PF_MD5_UPD(pfr, addr.v.a.mask.addr32);
1286 			break;
1287 	}
1288 
1289 	PF_MD5_UPD(pfr, port[0]);
1290 	PF_MD5_UPD(pfr, port[1]);
1291 	PF_MD5_UPD(pfr, neg);
1292 	PF_MD5_UPD(pfr, port_op);
1293 }
1294 
1295 static void
pf_hash_rule_rolling(MD5_CTX * ctx,struct pf_krule * rule)1296 pf_hash_rule_rolling(MD5_CTX *ctx, struct pf_krule *rule)
1297 {
1298 	u_int16_t x;
1299 	u_int32_t y;
1300 
1301 	pf_hash_rule_addr(ctx, &rule->src);
1302 	pf_hash_rule_addr(ctx, &rule->dst);
1303 	for (int i = 0; i < PF_RULE_MAX_LABEL_COUNT; i++)
1304 		PF_MD5_UPD_STR(rule, label[i]);
1305 	PF_MD5_UPD_STR(rule, ifname);
1306 	PF_MD5_UPD_STR(rule, rcv_ifname);
1307 	PF_MD5_UPD_STR(rule, match_tagname);
1308 	PF_MD5_UPD_HTONS(rule, match_tag, x); /* dup? */
1309 	PF_MD5_UPD_HTONL(rule, os_fingerprint, y);
1310 	PF_MD5_UPD_HTONL(rule, prob, y);
1311 	PF_MD5_UPD_HTONL(rule, uid.uid[0], y);
1312 	PF_MD5_UPD_HTONL(rule, uid.uid[1], y);
1313 	PF_MD5_UPD(rule, uid.op);
1314 	PF_MD5_UPD_HTONL(rule, gid.gid[0], y);
1315 	PF_MD5_UPD_HTONL(rule, gid.gid[1], y);
1316 	PF_MD5_UPD(rule, gid.op);
1317 	PF_MD5_UPD_HTONL(rule, rule_flag, y);
1318 	PF_MD5_UPD(rule, action);
1319 	PF_MD5_UPD(rule, direction);
1320 	PF_MD5_UPD(rule, af);
1321 	PF_MD5_UPD(rule, quick);
1322 	PF_MD5_UPD(rule, ifnot);
1323 	PF_MD5_UPD(rule, rcvifnot);
1324 	PF_MD5_UPD(rule, match_tag_not);
1325 	PF_MD5_UPD(rule, natpass);
1326 	PF_MD5_UPD(rule, keep_state);
1327 	PF_MD5_UPD(rule, proto);
1328 	PF_MD5_UPD(rule, type);
1329 	PF_MD5_UPD(rule, code);
1330 	PF_MD5_UPD(rule, flags);
1331 	PF_MD5_UPD(rule, flagset);
1332 	PF_MD5_UPD(rule, allow_opts);
1333 	PF_MD5_UPD(rule, rt);
1334 	PF_MD5_UPD(rule, tos);
1335 	PF_MD5_UPD(rule, scrub_flags);
1336 	PF_MD5_UPD(rule, min_ttl);
1337 	PF_MD5_UPD(rule, set_tos);
1338 	if (rule->anchor != NULL)
1339 		PF_MD5_UPD_STR(rule, anchor->path);
1340 }
1341 
1342 static void
pf_hash_rule(struct pf_krule * rule)1343 pf_hash_rule(struct pf_krule *rule)
1344 {
1345 	MD5_CTX		ctx;
1346 
1347 	MD5Init(&ctx);
1348 	pf_hash_rule_rolling(&ctx, rule);
1349 	MD5Final(rule->md5sum, &ctx);
1350 }
1351 
1352 static int
pf_krule_compare(struct pf_krule * a,struct pf_krule * b)1353 pf_krule_compare(struct pf_krule *a, struct pf_krule *b)
1354 {
1355 
1356 	return (memcmp(a->md5sum, b->md5sum, PF_MD5_DIGEST_LENGTH));
1357 }
1358 
1359 static int
pf_commit_rules(u_int32_t ticket,int rs_num,char * anchor)1360 pf_commit_rules(u_int32_t ticket, int rs_num, char *anchor)
1361 {
1362 	struct pf_kruleset	*rs;
1363 	struct pf_krule		*rule, *old_rule;
1364 	struct pf_krulequeue	*old_rules;
1365 	struct pf_krule_global  *old_tree;
1366 	int			 error;
1367 	u_int32_t		 old_rcount;
1368 
1369 	PF_RULES_WASSERT();
1370 
1371 	if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
1372 		return (EINVAL);
1373 	rs = pf_find_kruleset(anchor);
1374 	if (rs == NULL || !rs->rules[rs_num].inactive.open ||
1375 	    ticket != rs->rules[rs_num].inactive.ticket)
1376 		return (EBUSY);
1377 
1378 	/* Calculate checksum for the main ruleset */
1379 	if (rs == &pf_main_ruleset) {
1380 		error = pf_setup_pfsync_matching(rs);
1381 		if (error != 0)
1382 			return (error);
1383 	}
1384 
1385 	/* Swap rules, keep the old. */
1386 	old_rules = rs->rules[rs_num].active.ptr;
1387 	old_rcount = rs->rules[rs_num].active.rcount;
1388 	old_tree = rs->rules[rs_num].active.tree;
1389 
1390 	rs->rules[rs_num].active.ptr =
1391 	    rs->rules[rs_num].inactive.ptr;
1392 	rs->rules[rs_num].active.tree =
1393 	    rs->rules[rs_num].inactive.tree;
1394 	rs->rules[rs_num].active.rcount =
1395 	    rs->rules[rs_num].inactive.rcount;
1396 
1397 	/* Attempt to preserve counter information. */
1398 	if (V_pf_status.keep_counters && old_tree != NULL) {
1399 		TAILQ_FOREACH(rule, rs->rules[rs_num].active.ptr,
1400 		    entries) {
1401 			old_rule = RB_FIND(pf_krule_global, old_tree, rule);
1402 			if (old_rule == NULL) {
1403 				continue;
1404 			}
1405 			pf_counter_u64_critical_enter();
1406 			pf_counter_u64_rollup_protected(&rule->evaluations,
1407 			    pf_counter_u64_fetch(&old_rule->evaluations));
1408 			pf_counter_u64_rollup_protected(&rule->packets[0],
1409 			    pf_counter_u64_fetch(&old_rule->packets[0]));
1410 			pf_counter_u64_rollup_protected(&rule->packets[1],
1411 			    pf_counter_u64_fetch(&old_rule->packets[1]));
1412 			pf_counter_u64_rollup_protected(&rule->bytes[0],
1413 			    pf_counter_u64_fetch(&old_rule->bytes[0]));
1414 			pf_counter_u64_rollup_protected(&rule->bytes[1],
1415 			    pf_counter_u64_fetch(&old_rule->bytes[1]));
1416 			pf_counter_u64_critical_exit();
1417 		}
1418 	}
1419 
1420 	rs->rules[rs_num].inactive.ptr = old_rules;
1421 	rs->rules[rs_num].inactive.tree = NULL; /* important for pf_ioctl_addrule */
1422 	rs->rules[rs_num].inactive.rcount = old_rcount;
1423 
1424 	rs->rules[rs_num].active.ticket =
1425 	    rs->rules[rs_num].inactive.ticket;
1426 	pf_calc_skip_steps(rs->rules[rs_num].active.ptr);
1427 
1428 	/* Purge the old rule list. */
1429 	PF_UNLNKDRULES_LOCK();
1430 	while ((rule = TAILQ_FIRST(old_rules)) != NULL)
1431 		pf_unlink_rule_locked(old_rules, rule);
1432 	PF_UNLNKDRULES_UNLOCK();
1433 	rs->rules[rs_num].inactive.rcount = 0;
1434 	rs->rules[rs_num].inactive.open = 0;
1435 	pf_remove_if_empty_kruleset(rs);
1436 	pf_rule_tree_free(old_tree);
1437 
1438 	return (0);
1439 }
1440 
1441 static int
pf_setup_pfsync_matching(struct pf_kruleset * rs)1442 pf_setup_pfsync_matching(struct pf_kruleset *rs)
1443 {
1444 	MD5_CTX			 ctx;
1445 	struct pf_krule		*rule;
1446 	int			 rs_cnt;
1447 	u_int8_t		 digest[PF_MD5_DIGEST_LENGTH];
1448 
1449 	MD5Init(&ctx);
1450 	for (rs_cnt = 0; rs_cnt < PF_RULESET_MAX; rs_cnt++) {
1451 		/* XXX PF_RULESET_SCRUB as well? */
1452 		if (rs_cnt == PF_RULESET_SCRUB)
1453 			continue;
1454 
1455 		if (rs->rules[rs_cnt].inactive.rcount) {
1456 			TAILQ_FOREACH(rule, rs->rules[rs_cnt].inactive.ptr,
1457 			    entries) {
1458 				pf_hash_rule_rolling(&ctx, rule);
1459 			}
1460 		}
1461 	}
1462 
1463 	MD5Final(digest, &ctx);
1464 	memcpy(V_pf_status.pf_chksum, digest, sizeof(V_pf_status.pf_chksum));
1465 	return (0);
1466 }
1467 
1468 static int
pf_eth_addr_setup(struct pf_keth_ruleset * ruleset,struct pf_addr_wrap * addr)1469 pf_eth_addr_setup(struct pf_keth_ruleset *ruleset, struct pf_addr_wrap *addr)
1470 {
1471 	int error = 0;
1472 
1473 	switch (addr->type) {
1474 	case PF_ADDR_TABLE:
1475 		addr->p.tbl = pfr_eth_attach_table(ruleset, addr->v.tblname);
1476 		if (addr->p.tbl == NULL)
1477 			error = ENOMEM;
1478 		break;
1479 	default:
1480 		error = EINVAL;
1481 	}
1482 
1483 	return (error);
1484 }
1485 
1486 static int
pf_addr_setup(struct pf_kruleset * ruleset,struct pf_addr_wrap * addr,sa_family_t af)1487 pf_addr_setup(struct pf_kruleset *ruleset, struct pf_addr_wrap *addr,
1488     sa_family_t af)
1489 {
1490 	int error = 0;
1491 
1492 	switch (addr->type) {
1493 	case PF_ADDR_TABLE:
1494 		addr->p.tbl = pfr_attach_table(ruleset, addr->v.tblname);
1495 		if (addr->p.tbl == NULL)
1496 			error = ENOMEM;
1497 		break;
1498 	case PF_ADDR_DYNIFTL:
1499 		error = pfi_dynaddr_setup(addr, af);
1500 		break;
1501 	}
1502 
1503 	return (error);
1504 }
1505 
1506 void
pf_addr_copyout(struct pf_addr_wrap * addr)1507 pf_addr_copyout(struct pf_addr_wrap *addr)
1508 {
1509 
1510 	switch (addr->type) {
1511 	case PF_ADDR_DYNIFTL:
1512 		pfi_dynaddr_copyout(addr);
1513 		break;
1514 	case PF_ADDR_TABLE:
1515 		pf_tbladdr_copyout(addr);
1516 		break;
1517 	}
1518 }
1519 
1520 static void
pf_src_node_copy(const struct pf_ksrc_node * in,struct pf_src_node * out)1521 pf_src_node_copy(const struct pf_ksrc_node *in, struct pf_src_node *out)
1522 {
1523 	int	secs = time_uptime;
1524 
1525 	bzero(out, sizeof(struct pf_src_node));
1526 
1527 	bcopy(&in->addr, &out->addr, sizeof(struct pf_addr));
1528 	bcopy(&in->raddr, &out->raddr, sizeof(struct pf_addr));
1529 
1530 	if (in->rule != NULL)
1531 		out->rule.nr = in->rule->nr;
1532 
1533 	for (int i = 0; i < 2; i++) {
1534 		out->bytes[i] = counter_u64_fetch(in->bytes[i]);
1535 		out->packets[i] = counter_u64_fetch(in->packets[i]);
1536 	}
1537 
1538 	out->states = in->states;
1539 	out->conn = in->conn;
1540 	out->af = in->af;
1541 	out->ruletype = in->ruletype;
1542 
1543 	out->creation = secs - in->creation;
1544 	if (out->expire > secs)
1545 		out->expire -= secs;
1546 	else
1547 		out->expire = 0;
1548 
1549 	/* Adjust the connection rate estimate. */
1550 	out->conn_rate.limit = in->conn_rate.limit;
1551 	out->conn_rate.seconds = in->conn_rate.seconds;
1552 	/* If there's no limit there's no counter_rate. */
1553 	if (in->conn_rate.cr != NULL)
1554 		out->conn_rate.count = counter_rate_get(in->conn_rate.cr);
1555 }
1556 
1557 #ifdef ALTQ
1558 /*
1559  * Handle export of struct pf_kaltq to user binaries that may be using any
1560  * version of struct pf_altq.
1561  */
1562 static int
pf_export_kaltq(struct pf_altq * q,struct pfioc_altq_v1 * pa,size_t ioc_size)1563 pf_export_kaltq(struct pf_altq *q, struct pfioc_altq_v1 *pa, size_t ioc_size)
1564 {
1565 	u_int32_t version;
1566 
1567 	if (ioc_size == sizeof(struct pfioc_altq_v0))
1568 		version = 0;
1569 	else
1570 		version = pa->version;
1571 
1572 	if (version > PFIOC_ALTQ_VERSION)
1573 		return (EINVAL);
1574 
1575 #define ASSIGN(x) exported_q->x = q->x
1576 #define COPY(x) \
1577 	bcopy(&q->x, &exported_q->x, min(sizeof(q->x), sizeof(exported_q->x)))
1578 #define SATU16(x) (u_int32_t)uqmin((x), USHRT_MAX)
1579 #define SATU32(x) (u_int32_t)uqmin((x), UINT_MAX)
1580 
1581 	switch (version) {
1582 	case 0: {
1583 		struct pf_altq_v0 *exported_q =
1584 		    &((struct pfioc_altq_v0 *)pa)->altq;
1585 
1586 		COPY(ifname);
1587 
1588 		ASSIGN(scheduler);
1589 		ASSIGN(tbrsize);
1590 		exported_q->tbrsize = SATU16(q->tbrsize);
1591 		exported_q->ifbandwidth = SATU32(q->ifbandwidth);
1592 
1593 		COPY(qname);
1594 		COPY(parent);
1595 		ASSIGN(parent_qid);
1596 		exported_q->bandwidth = SATU32(q->bandwidth);
1597 		ASSIGN(priority);
1598 		ASSIGN(local_flags);
1599 
1600 		ASSIGN(qlimit);
1601 		ASSIGN(flags);
1602 
1603 		if (q->scheduler == ALTQT_HFSC) {
1604 #define ASSIGN_OPT(x) exported_q->pq_u.hfsc_opts.x = q->pq_u.hfsc_opts.x
1605 #define ASSIGN_OPT_SATU32(x) exported_q->pq_u.hfsc_opts.x = \
1606 			    SATU32(q->pq_u.hfsc_opts.x)
1607 
1608 			ASSIGN_OPT_SATU32(rtsc_m1);
1609 			ASSIGN_OPT(rtsc_d);
1610 			ASSIGN_OPT_SATU32(rtsc_m2);
1611 
1612 			ASSIGN_OPT_SATU32(lssc_m1);
1613 			ASSIGN_OPT(lssc_d);
1614 			ASSIGN_OPT_SATU32(lssc_m2);
1615 
1616 			ASSIGN_OPT_SATU32(ulsc_m1);
1617 			ASSIGN_OPT(ulsc_d);
1618 			ASSIGN_OPT_SATU32(ulsc_m2);
1619 
1620 			ASSIGN_OPT(flags);
1621 
1622 #undef ASSIGN_OPT
1623 #undef ASSIGN_OPT_SATU32
1624 		} else
1625 			COPY(pq_u);
1626 
1627 		ASSIGN(qid);
1628 		break;
1629 	}
1630 	case 1:	{
1631 		struct pf_altq_v1 *exported_q =
1632 		    &((struct pfioc_altq_v1 *)pa)->altq;
1633 
1634 		COPY(ifname);
1635 
1636 		ASSIGN(scheduler);
1637 		ASSIGN(tbrsize);
1638 		ASSIGN(ifbandwidth);
1639 
1640 		COPY(qname);
1641 		COPY(parent);
1642 		ASSIGN(parent_qid);
1643 		ASSIGN(bandwidth);
1644 		ASSIGN(priority);
1645 		ASSIGN(local_flags);
1646 
1647 		ASSIGN(qlimit);
1648 		ASSIGN(flags);
1649 		COPY(pq_u);
1650 
1651 		ASSIGN(qid);
1652 		break;
1653 	}
1654 	default:
1655 		panic("%s: unhandled struct pfioc_altq version", __func__);
1656 		break;
1657 	}
1658 
1659 #undef ASSIGN
1660 #undef COPY
1661 #undef SATU16
1662 #undef SATU32
1663 
1664 	return (0);
1665 }
1666 
1667 /*
1668  * Handle import to struct pf_kaltq of struct pf_altq from user binaries
1669  * that may be using any version of it.
1670  */
1671 static int
pf_import_kaltq(struct pfioc_altq_v1 * pa,struct pf_altq * q,size_t ioc_size)1672 pf_import_kaltq(struct pfioc_altq_v1 *pa, struct pf_altq *q, size_t ioc_size)
1673 {
1674 	u_int32_t version;
1675 
1676 	if (ioc_size == sizeof(struct pfioc_altq_v0))
1677 		version = 0;
1678 	else
1679 		version = pa->version;
1680 
1681 	if (version > PFIOC_ALTQ_VERSION)
1682 		return (EINVAL);
1683 
1684 #define ASSIGN(x) q->x = imported_q->x
1685 #define COPY(x) \
1686 	bcopy(&imported_q->x, &q->x, min(sizeof(imported_q->x), sizeof(q->x)))
1687 
1688 	switch (version) {
1689 	case 0: {
1690 		struct pf_altq_v0 *imported_q =
1691 		    &((struct pfioc_altq_v0 *)pa)->altq;
1692 
1693 		COPY(ifname);
1694 
1695 		ASSIGN(scheduler);
1696 		ASSIGN(tbrsize); /* 16-bit -> 32-bit */
1697 		ASSIGN(ifbandwidth); /* 32-bit -> 64-bit */
1698 
1699 		COPY(qname);
1700 		COPY(parent);
1701 		ASSIGN(parent_qid);
1702 		ASSIGN(bandwidth); /* 32-bit -> 64-bit */
1703 		ASSIGN(priority);
1704 		ASSIGN(local_flags);
1705 
1706 		ASSIGN(qlimit);
1707 		ASSIGN(flags);
1708 
1709 		if (imported_q->scheduler == ALTQT_HFSC) {
1710 #define ASSIGN_OPT(x) q->pq_u.hfsc_opts.x = imported_q->pq_u.hfsc_opts.x
1711 
1712 			/*
1713 			 * The m1 and m2 parameters are being copied from
1714 			 * 32-bit to 64-bit.
1715 			 */
1716 			ASSIGN_OPT(rtsc_m1);
1717 			ASSIGN_OPT(rtsc_d);
1718 			ASSIGN_OPT(rtsc_m2);
1719 
1720 			ASSIGN_OPT(lssc_m1);
1721 			ASSIGN_OPT(lssc_d);
1722 			ASSIGN_OPT(lssc_m2);
1723 
1724 			ASSIGN_OPT(ulsc_m1);
1725 			ASSIGN_OPT(ulsc_d);
1726 			ASSIGN_OPT(ulsc_m2);
1727 
1728 			ASSIGN_OPT(flags);
1729 
1730 #undef ASSIGN_OPT
1731 		} else
1732 			COPY(pq_u);
1733 
1734 		ASSIGN(qid);
1735 		break;
1736 	}
1737 	case 1: {
1738 		struct pf_altq_v1 *imported_q =
1739 		    &((struct pfioc_altq_v1 *)pa)->altq;
1740 
1741 		COPY(ifname);
1742 
1743 		ASSIGN(scheduler);
1744 		ASSIGN(tbrsize);
1745 		ASSIGN(ifbandwidth);
1746 
1747 		COPY(qname);
1748 		COPY(parent);
1749 		ASSIGN(parent_qid);
1750 		ASSIGN(bandwidth);
1751 		ASSIGN(priority);
1752 		ASSIGN(local_flags);
1753 
1754 		ASSIGN(qlimit);
1755 		ASSIGN(flags);
1756 		COPY(pq_u);
1757 
1758 		ASSIGN(qid);
1759 		break;
1760 	}
1761 	default:
1762 		panic("%s: unhandled struct pfioc_altq version", __func__);
1763 		break;
1764 	}
1765 
1766 #undef ASSIGN
1767 #undef COPY
1768 
1769 	return (0);
1770 }
1771 
1772 static struct pf_altq *
pf_altq_get_nth_active(u_int32_t n)1773 pf_altq_get_nth_active(u_int32_t n)
1774 {
1775 	struct pf_altq		*altq;
1776 	u_int32_t		 nr;
1777 
1778 	nr = 0;
1779 	TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) {
1780 		if (nr == n)
1781 			return (altq);
1782 		nr++;
1783 	}
1784 
1785 	TAILQ_FOREACH(altq, V_pf_altqs_active, entries) {
1786 		if (nr == n)
1787 			return (altq);
1788 		nr++;
1789 	}
1790 
1791 	return (NULL);
1792 }
1793 #endif /* ALTQ */
1794 
1795 struct pf_krule *
pf_krule_alloc(void)1796 pf_krule_alloc(void)
1797 {
1798 	struct pf_krule *rule;
1799 
1800 	rule = malloc(sizeof(struct pf_krule), M_PFRULE, M_WAITOK | M_ZERO);
1801 	mtx_init(&rule->nat.mtx, "pf_krule_nat_pool", NULL, MTX_DEF);
1802 	mtx_init(&rule->rdr.mtx, "pf_krule_rdr_pool", NULL, MTX_DEF);
1803 	mtx_init(&rule->route.mtx, "pf_krule_route_pool", NULL, MTX_DEF);
1804 	rule->timestamp = uma_zalloc_pcpu(pf_timestamp_pcpu_zone,
1805 	    M_WAITOK | M_ZERO);
1806 	return (rule);
1807 }
1808 
1809 void
pf_krule_free(struct pf_krule * rule)1810 pf_krule_free(struct pf_krule *rule)
1811 {
1812 #ifdef PF_WANT_32_TO_64_COUNTER
1813 	bool wowned;
1814 #endif
1815 
1816 	if (rule == NULL)
1817 		return;
1818 
1819 #ifdef PF_WANT_32_TO_64_COUNTER
1820 	if (rule->allrulelinked) {
1821 		wowned = PF_RULES_WOWNED();
1822 		if (!wowned)
1823 			PF_RULES_WLOCK();
1824 		LIST_REMOVE(rule, allrulelist);
1825 		V_pf_allrulecount--;
1826 		if (!wowned)
1827 			PF_RULES_WUNLOCK();
1828 	}
1829 #endif
1830 
1831 	pf_counter_u64_deinit(&rule->evaluations);
1832 	for (int i = 0; i < 2; i++) {
1833 		pf_counter_u64_deinit(&rule->packets[i]);
1834 		pf_counter_u64_deinit(&rule->bytes[i]);
1835 	}
1836 	counter_u64_free(rule->states_cur);
1837 	counter_u64_free(rule->states_tot);
1838 	for (pf_sn_types_t sn_type=0; sn_type<PF_SN_MAX; sn_type++)
1839 		counter_u64_free(rule->src_nodes[sn_type]);
1840 	uma_zfree_pcpu(pf_timestamp_pcpu_zone, rule->timestamp);
1841 
1842 	mtx_destroy(&rule->nat.mtx);
1843 	mtx_destroy(&rule->rdr.mtx);
1844 	mtx_destroy(&rule->route.mtx);
1845 	free(rule, M_PFRULE);
1846 }
1847 
1848 void
pf_krule_clear_counters(struct pf_krule * rule)1849 pf_krule_clear_counters(struct pf_krule *rule)
1850 {
1851 	pf_counter_u64_zero(&rule->evaluations);
1852 	for (int i = 0; i < 2; i++) {
1853 		pf_counter_u64_zero(&rule->packets[i]);
1854 		pf_counter_u64_zero(&rule->bytes[i]);
1855 	}
1856 	counter_u64_zero(rule->states_tot);
1857 }
1858 
1859 static void
pf_kpooladdr_to_pooladdr(const struct pf_kpooladdr * kpool,struct pf_pooladdr * pool)1860 pf_kpooladdr_to_pooladdr(const struct pf_kpooladdr *kpool,
1861     struct pf_pooladdr *pool)
1862 {
1863 
1864 	bzero(pool, sizeof(*pool));
1865 	bcopy(&kpool->addr, &pool->addr, sizeof(pool->addr));
1866 	strlcpy(pool->ifname, kpool->ifname, sizeof(pool->ifname));
1867 }
1868 
1869 static int
pf_pooladdr_to_kpooladdr(const struct pf_pooladdr * pool,struct pf_kpooladdr * kpool)1870 pf_pooladdr_to_kpooladdr(const struct pf_pooladdr *pool,
1871     struct pf_kpooladdr *kpool)
1872 {
1873 	int ret;
1874 
1875 	bzero(kpool, sizeof(*kpool));
1876 	bcopy(&pool->addr, &kpool->addr, sizeof(kpool->addr));
1877 	ret = pf_user_strcpy(kpool->ifname, pool->ifname,
1878 	    sizeof(kpool->ifname));
1879 	return (ret);
1880 }
1881 
1882 static void
pf_pool_to_kpool(const struct pf_pool * pool,struct pf_kpool * kpool)1883 pf_pool_to_kpool(const struct pf_pool *pool, struct pf_kpool *kpool)
1884 {
1885 	_Static_assert(sizeof(pool->key) == sizeof(kpool->key), "");
1886 	_Static_assert(sizeof(pool->counter) == sizeof(kpool->counter), "");
1887 
1888 	bcopy(&pool->key, &kpool->key, sizeof(kpool->key));
1889 	bcopy(&pool->counter, &kpool->counter, sizeof(kpool->counter));
1890 
1891 	kpool->tblidx = pool->tblidx;
1892 	kpool->proxy_port[0] = pool->proxy_port[0];
1893 	kpool->proxy_port[1] = pool->proxy_port[1];
1894 	kpool->opts = pool->opts;
1895 }
1896 
1897 static int
pf_rule_to_krule(const struct pf_rule * rule,struct pf_krule * krule)1898 pf_rule_to_krule(const struct pf_rule *rule, struct pf_krule *krule)
1899 {
1900 	int ret;
1901 
1902 #ifndef INET
1903 	if (rule->af == AF_INET) {
1904 		return (EAFNOSUPPORT);
1905 	}
1906 #endif /* INET */
1907 #ifndef INET6
1908 	if (rule->af == AF_INET6) {
1909 		return (EAFNOSUPPORT);
1910 	}
1911 #endif /* INET6 */
1912 
1913 	ret = pf_check_rule_addr(&rule->src);
1914 	if (ret != 0)
1915 		return (ret);
1916 	ret = pf_check_rule_addr(&rule->dst);
1917 	if (ret != 0)
1918 		return (ret);
1919 
1920 	bcopy(&rule->src, &krule->src, sizeof(rule->src));
1921 	bcopy(&rule->dst, &krule->dst, sizeof(rule->dst));
1922 
1923 	ret = pf_user_strcpy(krule->label[0], rule->label, sizeof(rule->label));
1924 	if (ret != 0)
1925 		return (ret);
1926 	ret = pf_user_strcpy(krule->ifname, rule->ifname, sizeof(rule->ifname));
1927 	if (ret != 0)
1928 		return (ret);
1929 	ret = pf_user_strcpy(krule->qname, rule->qname, sizeof(rule->qname));
1930 	if (ret != 0)
1931 		return (ret);
1932 	ret = pf_user_strcpy(krule->pqname, rule->pqname, sizeof(rule->pqname));
1933 	if (ret != 0)
1934 		return (ret);
1935 	ret = pf_user_strcpy(krule->tagname, rule->tagname,
1936 	    sizeof(rule->tagname));
1937 	if (ret != 0)
1938 		return (ret);
1939 	ret = pf_user_strcpy(krule->match_tagname, rule->match_tagname,
1940 	    sizeof(rule->match_tagname));
1941 	if (ret != 0)
1942 		return (ret);
1943 	ret = pf_user_strcpy(krule->overload_tblname, rule->overload_tblname,
1944 	    sizeof(rule->overload_tblname));
1945 	if (ret != 0)
1946 		return (ret);
1947 
1948 	pf_pool_to_kpool(&rule->rpool, &krule->rdr);
1949 
1950 	/* Don't allow userspace to set evaluations, packets or bytes. */
1951 	/* kif, anchor, overload_tbl are not copied over. */
1952 
1953 	krule->os_fingerprint = rule->os_fingerprint;
1954 
1955 	krule->rtableid = rule->rtableid;
1956 	/* pf_rule->timeout is smaller than pf_krule->timeout */
1957 	bcopy(rule->timeout, krule->timeout, sizeof(rule->timeout));
1958 	krule->max_states = rule->max_states;
1959 	krule->max_src_nodes = rule->max_src_nodes;
1960 	krule->max_src_states = rule->max_src_states;
1961 	krule->max_src_conn = rule->max_src_conn;
1962 	krule->max_src_conn_rate.limit = rule->max_src_conn_rate.limit;
1963 	krule->max_src_conn_rate.seconds = rule->max_src_conn_rate.seconds;
1964 	krule->qid = rule->qid;
1965 	krule->pqid = rule->pqid;
1966 	krule->nr = rule->nr;
1967 	krule->prob = rule->prob;
1968 	krule->cuid = rule->cuid;
1969 	krule->cpid = rule->cpid;
1970 
1971 	krule->return_icmp = rule->return_icmp;
1972 	krule->return_icmp6 = rule->return_icmp6;
1973 	krule->max_mss = rule->max_mss;
1974 	krule->tag = rule->tag;
1975 	krule->match_tag = rule->match_tag;
1976 	krule->scrub_flags = rule->scrub_flags;
1977 
1978 	bcopy(&rule->uid, &krule->uid, sizeof(krule->uid));
1979 	bcopy(&rule->gid, &krule->gid, sizeof(krule->gid));
1980 
1981 	krule->rule_flag = rule->rule_flag;
1982 	krule->action = rule->action;
1983 	krule->direction = rule->direction;
1984 	krule->log = rule->log;
1985 	krule->logif = rule->logif;
1986 	krule->quick = rule->quick;
1987 	krule->ifnot = rule->ifnot;
1988 	krule->match_tag_not = rule->match_tag_not;
1989 	krule->natpass = rule->natpass;
1990 
1991 	krule->keep_state = rule->keep_state;
1992 	krule->af = rule->af;
1993 	krule->proto = rule->proto;
1994 	krule->type = rule->type;
1995 	krule->code = rule->code;
1996 	krule->flags = rule->flags;
1997 	krule->flagset = rule->flagset;
1998 	krule->min_ttl = rule->min_ttl;
1999 	krule->allow_opts = rule->allow_opts;
2000 	krule->rt = rule->rt;
2001 	krule->return_ttl = rule->return_ttl;
2002 	krule->tos = rule->tos;
2003 	krule->set_tos = rule->set_tos;
2004 
2005 	krule->flush = rule->flush;
2006 	krule->prio = rule->prio;
2007 	krule->set_prio[0] = rule->set_prio[0];
2008 	krule->set_prio[1] = rule->set_prio[1];
2009 
2010 	bcopy(&rule->divert, &krule->divert, sizeof(krule->divert));
2011 
2012 	return (0);
2013 }
2014 
2015 int
pf_ioctl_getrules(struct pfioc_rule * pr)2016 pf_ioctl_getrules(struct pfioc_rule *pr)
2017 {
2018 	struct pf_kruleset	*ruleset;
2019 	struct pf_krule		*tail;
2020 	int			 rs_num;
2021 
2022 	PF_RULES_WLOCK();
2023 	ruleset = pf_find_kruleset(pr->anchor);
2024 	if (ruleset == NULL) {
2025 		PF_RULES_WUNLOCK();
2026 		return (EINVAL);
2027 	}
2028 	rs_num = pf_get_ruleset_number(pr->rule.action);
2029 	if (rs_num >= PF_RULESET_MAX) {
2030 		PF_RULES_WUNLOCK();
2031 		return (EINVAL);
2032 	}
2033 	tail = TAILQ_LAST(ruleset->rules[rs_num].active.ptr,
2034 	    pf_krulequeue);
2035 	if (tail)
2036 		pr->nr = tail->nr + 1;
2037 	else
2038 		pr->nr = 0;
2039 	pr->ticket = ruleset->rules[rs_num].active.ticket;
2040 	PF_RULES_WUNLOCK();
2041 
2042 	return (0);
2043 }
2044 
2045 static int
pf_rule_checkaf(struct pf_krule * r)2046 pf_rule_checkaf(struct pf_krule *r)
2047 {
2048 	switch (r->af) {
2049 	case 0:
2050 		if (r->rule_flag & PFRULE_AFTO)
2051 			return (EPFNOSUPPORT);
2052 		break;
2053 	case AF_INET:
2054 		if ((r->rule_flag & PFRULE_AFTO) && r->naf != AF_INET6)
2055 			return (EPFNOSUPPORT);
2056 		break;
2057 #ifdef INET6
2058 	case AF_INET6:
2059 		if ((r->rule_flag & PFRULE_AFTO) && r->naf != AF_INET)
2060 			return (EPFNOSUPPORT);
2061 		break;
2062 #endif /* INET6 */
2063 	default:
2064 		return (EPFNOSUPPORT);
2065 	}
2066 
2067 	if ((r->rule_flag & PFRULE_AFTO) == 0 && r->naf != 0)
2068 		return (EPFNOSUPPORT);
2069 
2070 	return (0);
2071 }
2072 
2073 static int
pf_validate_range(uint8_t op,uint16_t port[2])2074 pf_validate_range(uint8_t op, uint16_t port[2])
2075 {
2076 	uint16_t a = ntohs(port[0]);
2077 	uint16_t b = ntohs(port[1]);
2078 
2079 	if ((op == PF_OP_RRG && a > b) ||  /* 34:12,  i.e. none */
2080 	    (op == PF_OP_IRG && a >= b) || /* 34><12, i.e. none */
2081 	    (op == PF_OP_XRG && a > b))	   /* 34<>22, i.e. all */
2082 		return 1;
2083 	return 0;
2084 }
2085 
2086 int
pf_ioctl_addrule(struct pf_krule * rule,uint32_t ticket,uint32_t pool_ticket,const char * anchor,const char * anchor_call,uid_t uid,pid_t pid)2087 pf_ioctl_addrule(struct pf_krule *rule, uint32_t ticket,
2088     uint32_t pool_ticket, const char *anchor, const char *anchor_call,
2089     uid_t uid, pid_t pid)
2090 {
2091 	struct pf_kruleset	*ruleset;
2092 	struct pf_krule		*tail;
2093 	struct pf_kpooladdr	*pa;
2094 	struct pfi_kkif		*kif = NULL, *rcv_kif = NULL;
2095 	int			 rs_num;
2096 	int			 error = 0;
2097 
2098 #define	ERROUT(x)		ERROUT_FUNCTION(errout, x)
2099 #define	ERROUT_UNLOCKED(x)	ERROUT_FUNCTION(errout_unlocked, x)
2100 
2101 	if ((rule->return_icmp >> 8) > ICMP_MAXTYPE)
2102 		ERROUT_UNLOCKED(EINVAL);
2103 
2104 	if ((error = pf_rule_checkaf(rule)))
2105 		ERROUT_UNLOCKED(error);
2106 	if (pf_validate_range(rule->src.port_op, rule->src.port))
2107 		ERROUT_UNLOCKED(EINVAL);
2108 	if (pf_validate_range(rule->dst.port_op, rule->dst.port))
2109 		ERROUT_UNLOCKED(EINVAL);
2110 
2111 	if (rule->ifname[0])
2112 		kif = pf_kkif_create(M_WAITOK);
2113 	if (rule->rcv_ifname[0])
2114 		rcv_kif = pf_kkif_create(M_WAITOK);
2115 	pf_counter_u64_init(&rule->evaluations, M_WAITOK);
2116 	for (int i = 0; i < 2; i++) {
2117 		pf_counter_u64_init(&rule->packets[i], M_WAITOK);
2118 		pf_counter_u64_init(&rule->bytes[i], M_WAITOK);
2119 	}
2120 	rule->states_cur = counter_u64_alloc(M_WAITOK);
2121 	rule->states_tot = counter_u64_alloc(M_WAITOK);
2122 	for (pf_sn_types_t sn_type=0; sn_type<PF_SN_MAX; sn_type++)
2123 		rule->src_nodes[sn_type] = counter_u64_alloc(M_WAITOK);
2124 	rule->cuid = uid;
2125 	rule->cpid = pid;
2126 	TAILQ_INIT(&rule->rdr.list);
2127 	TAILQ_INIT(&rule->nat.list);
2128 	TAILQ_INIT(&rule->route.list);
2129 
2130 	PF_CONFIG_LOCK();
2131 	PF_RULES_WLOCK();
2132 #ifdef PF_WANT_32_TO_64_COUNTER
2133 	LIST_INSERT_HEAD(&V_pf_allrulelist, rule, allrulelist);
2134 	MPASS(!rule->allrulelinked);
2135 	rule->allrulelinked = true;
2136 	V_pf_allrulecount++;
2137 #endif
2138 	ruleset = pf_find_kruleset(anchor);
2139 	if (ruleset == NULL)
2140 		ERROUT(EINVAL);
2141 	rs_num = pf_get_ruleset_number(rule->action);
2142 	if (rs_num >= PF_RULESET_MAX)
2143 		ERROUT(EINVAL);
2144 	if (ticket != ruleset->rules[rs_num].inactive.ticket) {
2145 		DPFPRINTF(PF_DEBUG_MISC,
2146 		    "ticket: %d != [%d]%d", ticket, rs_num,
2147 		    ruleset->rules[rs_num].inactive.ticket);
2148 		ERROUT(EBUSY);
2149 	}
2150 	if (pool_ticket != V_ticket_pabuf) {
2151 		DPFPRINTF(PF_DEBUG_MISC,
2152 		    "pool_ticket: %d != %d", pool_ticket,
2153 		    V_ticket_pabuf);
2154 		ERROUT(EBUSY);
2155 	}
2156 	/*
2157 	 * XXXMJG hack: there is no mechanism to ensure they started the
2158 	 * transaction. Ticket checked above may happen to match by accident,
2159 	 * even if nobody called DIOCXBEGIN, let alone this process.
2160 	 * Partially work around it by checking if the RB tree got allocated,
2161 	 * see pf_begin_rules.
2162 	 */
2163 	if (ruleset->rules[rs_num].inactive.tree == NULL) {
2164 		ERROUT(EINVAL);
2165 	}
2166 
2167 	tail = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr,
2168 	    pf_krulequeue);
2169 	if (tail)
2170 		rule->nr = tail->nr + 1;
2171 	else
2172 		rule->nr = 0;
2173 	if (rule->ifname[0]) {
2174 		rule->kif = pfi_kkif_attach(kif, rule->ifname);
2175 		kif = NULL;
2176 		pfi_kkif_ref(rule->kif);
2177 	} else
2178 		rule->kif = NULL;
2179 
2180 	if (rule->rcv_ifname[0]) {
2181 		rule->rcv_kif = pfi_kkif_attach(rcv_kif, rule->rcv_ifname);
2182 		rcv_kif = NULL;
2183 		pfi_kkif_ref(rule->rcv_kif);
2184 	} else
2185 		rule->rcv_kif = NULL;
2186 
2187 	if (rule->rtableid > 0 && rule->rtableid >= rt_numfibs)
2188 		ERROUT(EBUSY);
2189 #ifdef ALTQ
2190 	/* set queue IDs */
2191 	if (rule->qname[0] != 0) {
2192 		if ((rule->qid = pf_qname2qid(rule->qname)) == 0)
2193 			ERROUT(EBUSY);
2194 		else if (rule->pqname[0] != 0) {
2195 			if ((rule->pqid =
2196 			    pf_qname2qid(rule->pqname)) == 0)
2197 				ERROUT(EBUSY);
2198 		} else
2199 			rule->pqid = rule->qid;
2200 	}
2201 #endif
2202 	if (rule->tagname[0])
2203 		if ((rule->tag = pf_tagname2tag(rule->tagname)) == 0)
2204 			ERROUT(EBUSY);
2205 	if (rule->match_tagname[0])
2206 		if ((rule->match_tag =
2207 		    pf_tagname2tag(rule->match_tagname)) == 0)
2208 			ERROUT(EBUSY);
2209 	if (rule->rt && !rule->direction)
2210 		ERROUT(EINVAL);
2211 	if (!rule->log)
2212 		rule->logif = 0;
2213 	if (! pf_init_threshold(&rule->pktrate, rule->pktrate.limit,
2214 	   rule->pktrate.seconds))
2215 		ERROUT(ENOMEM);
2216 	if (pf_addr_setup(ruleset, &rule->src.addr, rule->af))
2217 		ERROUT(ENOMEM);
2218 	if (pf_addr_setup(ruleset, &rule->dst.addr, rule->af))
2219 		ERROUT(ENOMEM);
2220 	if (pf_kanchor_setup(rule, ruleset, anchor_call))
2221 		ERROUT(EINVAL);
2222 	if (rule->scrub_flags & PFSTATE_SETPRIO &&
2223 	    (rule->set_prio[0] > PF_PRIO_MAX ||
2224 	    rule->set_prio[1] > PF_PRIO_MAX))
2225 		ERROUT(EINVAL);
2226 	for (int i = 0; i < 3; i++) {
2227 		TAILQ_FOREACH(pa, &V_pf_pabuf[i], entries)
2228 			if (pa->addr.type == PF_ADDR_TABLE) {
2229 				pa->addr.p.tbl = pfr_attach_table(ruleset,
2230 				    pa->addr.v.tblname);
2231 				if (pa->addr.p.tbl == NULL)
2232 					ERROUT(ENOMEM);
2233 			}
2234 	}
2235 
2236 	rule->overload_tbl = NULL;
2237 	if (rule->overload_tblname[0]) {
2238 		if ((rule->overload_tbl = pfr_attach_table(ruleset,
2239 		    rule->overload_tblname)) == NULL)
2240 			ERROUT(EINVAL);
2241 		else
2242 			rule->overload_tbl->pfrkt_flags |=
2243 			    PFR_TFLAG_ACTIVE;
2244 	}
2245 
2246 	pf_mv_kpool(&V_pf_pabuf[0], &rule->nat.list);
2247 
2248 	/*
2249 	 * Old version of pfctl provide route redirection pools in single
2250 	 * common redirection pool rdr. New versions use rdr only for
2251 	 * rdr-to rules.
2252 	 */
2253 	if (rule->rt > PF_NOPFROUTE && TAILQ_EMPTY(&V_pf_pabuf[2])) {
2254 		pf_mv_kpool(&V_pf_pabuf[1], &rule->route.list);
2255 	} else {
2256 		pf_mv_kpool(&V_pf_pabuf[1], &rule->rdr.list);
2257 		pf_mv_kpool(&V_pf_pabuf[2], &rule->route.list);
2258 	}
2259 
2260 	if (((rule->action == PF_NAT) || (rule->action == PF_RDR) ||
2261 	    (rule->action == PF_BINAT))	&& rule->anchor == NULL &&
2262 	    TAILQ_FIRST(&rule->rdr.list) == NULL) {
2263 		ERROUT(EINVAL);
2264 	}
2265 
2266 	if (rule->rt > PF_NOPFROUTE && (TAILQ_FIRST(&rule->route.list) == NULL)) {
2267 		ERROUT(EINVAL);
2268 	}
2269 
2270 	if (rule->action == PF_PASS && (rule->rdr.opts & PF_POOL_STICKYADDR ||
2271 	    rule->nat.opts & PF_POOL_STICKYADDR) && !rule->keep_state) {
2272 		ERROUT(EINVAL);
2273 	}
2274 
2275 	MPASS(error == 0);
2276 
2277 	rule->nat.cur = TAILQ_FIRST(&rule->nat.list);
2278 	rule->rdr.cur = TAILQ_FIRST(&rule->rdr.list);
2279 	rule->route.cur = TAILQ_FIRST(&rule->route.list);
2280 	rule->route.ipv6_nexthop_af = AF_INET6;
2281 	TAILQ_INSERT_TAIL(ruleset->rules[rs_num].inactive.ptr,
2282 	    rule, entries);
2283 	ruleset->rules[rs_num].inactive.rcount++;
2284 
2285 	PF_RULES_WUNLOCK();
2286 	pf_hash_rule(rule);
2287 	if (RB_INSERT(pf_krule_global, ruleset->rules[rs_num].inactive.tree, rule) != NULL) {
2288 		PF_RULES_WLOCK();
2289 		TAILQ_REMOVE(ruleset->rules[rs_num].inactive.ptr, rule, entries);
2290 		ruleset->rules[rs_num].inactive.rcount--;
2291 		pf_free_rule(rule);
2292 		rule = NULL;
2293 		ERROUT(EEXIST);
2294 	}
2295 	PF_CONFIG_UNLOCK();
2296 
2297 	return (0);
2298 
2299 #undef ERROUT
2300 #undef ERROUT_UNLOCKED
2301 errout:
2302 	PF_RULES_WUNLOCK();
2303 	PF_CONFIG_UNLOCK();
2304 errout_unlocked:
2305 	pf_kkif_free(rcv_kif);
2306 	pf_kkif_free(kif);
2307 	pf_krule_free(rule);
2308 	return (error);
2309 }
2310 
2311 static bool
pf_label_match(const struct pf_krule * rule,const char * label)2312 pf_label_match(const struct pf_krule *rule, const char *label)
2313 {
2314 	int i = 0;
2315 
2316 	while (*rule->label[i]) {
2317 		if (strcmp(rule->label[i], label) == 0)
2318 			return (true);
2319 		i++;
2320 	}
2321 
2322 	return (false);
2323 }
2324 
2325 static unsigned int
pf_kill_matching_state(struct pf_state_key_cmp * key,int dir)2326 pf_kill_matching_state(struct pf_state_key_cmp *key, int dir)
2327 {
2328 	struct pf_kstate *s;
2329 	int more = 0;
2330 
2331 	s = pf_find_state_all(key, dir, &more);
2332 	if (s == NULL)
2333 		return (0);
2334 
2335 	if (more) {
2336 		PF_STATE_UNLOCK(s);
2337 		return (0);
2338 	}
2339 
2340 	pf_remove_state(s);
2341 	return (1);
2342 }
2343 
2344 static int
pf_killstates_row(struct pf_kstate_kill * psk,struct pf_idhash * ih)2345 pf_killstates_row(struct pf_kstate_kill *psk, struct pf_idhash *ih)
2346 {
2347 	struct pf_kstate	*s;
2348 	struct pf_state_key	*sk;
2349 	struct pf_addr		*srcaddr, *dstaddr;
2350 	struct pf_state_key_cmp	 match_key;
2351 	int			 idx, killed = 0;
2352 	unsigned int		 dir;
2353 	u_int16_t		 srcport, dstport;
2354 	struct pfi_kkif		*kif;
2355 
2356 relock_DIOCKILLSTATES:
2357 	PF_HASHROW_LOCK(ih);
2358 	LIST_FOREACH(s, &ih->states, entry) {
2359 		/* For floating states look at the original kif. */
2360 		kif = s->kif == V_pfi_all ? s->orig_kif : s->kif;
2361 
2362 		sk = s->key[psk->psk_nat ? PF_SK_STACK : PF_SK_WIRE];
2363 		if (s->direction == PF_OUT) {
2364 			srcaddr = &sk->addr[1];
2365 			dstaddr = &sk->addr[0];
2366 			srcport = sk->port[1];
2367 			dstport = sk->port[0];
2368 		} else {
2369 			srcaddr = &sk->addr[0];
2370 			dstaddr = &sk->addr[1];
2371 			srcport = sk->port[0];
2372 			dstport = sk->port[1];
2373 		}
2374 
2375 		if (psk->psk_af && sk->af != psk->psk_af)
2376 			continue;
2377 
2378 		if (psk->psk_proto && psk->psk_proto != sk->proto)
2379 			continue;
2380 
2381 		if (! pf_match_addr(psk->psk_src.neg,
2382 		    &psk->psk_src.addr.v.a.addr,
2383 		    &psk->psk_src.addr.v.a.mask, srcaddr, sk->af))
2384 			continue;
2385 
2386 		if (! pf_match_addr(psk->psk_dst.neg,
2387 		    &psk->psk_dst.addr.v.a.addr,
2388 		    &psk->psk_dst.addr.v.a.mask, dstaddr, sk->af))
2389 			continue;
2390 
2391 		if (!  pf_match_addr(psk->psk_rt_addr.neg,
2392 		    &psk->psk_rt_addr.addr.v.a.addr,
2393 		    &psk->psk_rt_addr.addr.v.a.mask,
2394 		    &s->act.rt_addr, sk->af))
2395 			continue;
2396 
2397 		if (psk->psk_src.port_op != 0 &&
2398 		    ! pf_match_port(psk->psk_src.port_op,
2399 		    psk->psk_src.port[0], psk->psk_src.port[1], srcport))
2400 			continue;
2401 
2402 		if (psk->psk_dst.port_op != 0 &&
2403 		    ! pf_match_port(psk->psk_dst.port_op,
2404 		    psk->psk_dst.port[0], psk->psk_dst.port[1], dstport))
2405 			continue;
2406 
2407 		if (psk->psk_label[0] &&
2408 		    ! pf_label_match(s->rule, psk->psk_label))
2409 			continue;
2410 
2411 		if (psk->psk_ifname[0] && strcmp(psk->psk_ifname,
2412 		    kif->pfik_name))
2413 			continue;
2414 
2415 		if (psk->psk_kill_match) {
2416 			/* Create the key to find matching states, with lock
2417 			 * held. */
2418 
2419 			bzero(&match_key, sizeof(match_key));
2420 
2421 			if (s->direction == PF_OUT) {
2422 				dir = PF_IN;
2423 				idx = psk->psk_nat ? PF_SK_WIRE : PF_SK_STACK;
2424 			} else {
2425 				dir = PF_OUT;
2426 				idx = psk->psk_nat ? PF_SK_STACK : PF_SK_WIRE;
2427 			}
2428 
2429 			match_key.af = s->key[idx]->af;
2430 			match_key.proto = s->key[idx]->proto;
2431 			pf_addrcpy(&match_key.addr[0],
2432 			    &s->key[idx]->addr[1], match_key.af);
2433 			match_key.port[0] = s->key[idx]->port[1];
2434 			pf_addrcpy(&match_key.addr[1],
2435 			    &s->key[idx]->addr[0], match_key.af);
2436 			match_key.port[1] = s->key[idx]->port[0];
2437 		}
2438 
2439 		pf_remove_state(s);
2440 		killed++;
2441 
2442 		if (psk->psk_kill_match)
2443 			killed += pf_kill_matching_state(&match_key, dir);
2444 
2445 		goto relock_DIOCKILLSTATES;
2446 	}
2447 	PF_HASHROW_UNLOCK(ih);
2448 
2449 	return (killed);
2450 }
2451 
2452 void
unhandled_af(int af)2453 unhandled_af(int af)
2454 {
2455 	panic("unhandled af %d", af);
2456 }
2457 
2458 int
pf_start(void)2459 pf_start(void)
2460 {
2461 	int error = 0;
2462 
2463 	sx_xlock(&V_pf_ioctl_lock);
2464 	if (V_pf_status.running)
2465 		error = EEXIST;
2466 	else {
2467 		hook_pf();
2468 		if (! TAILQ_EMPTY(V_pf_keth->active.rules))
2469 			hook_pf_eth();
2470 		V_pf_status.running = 1;
2471 		V_pf_status.since = time_uptime;
2472 		new_unrhdr64(&V_pf_stateid, time_second);
2473 
2474 		DPFPRINTF(PF_DEBUG_MISC, "pf: started");
2475 	}
2476 	sx_xunlock(&V_pf_ioctl_lock);
2477 
2478 	return (error);
2479 }
2480 
2481 int
pf_stop(void)2482 pf_stop(void)
2483 {
2484 	int error = 0;
2485 
2486 	sx_xlock(&V_pf_ioctl_lock);
2487 	if (!V_pf_status.running)
2488 		error = ENOENT;
2489 	else {
2490 		V_pf_status.running = 0;
2491 		dehook_pf();
2492 		dehook_pf_eth();
2493 		V_pf_status.since = time_uptime;
2494 		DPFPRINTF(PF_DEBUG_MISC, "pf: stopped");
2495 	}
2496 	sx_xunlock(&V_pf_ioctl_lock);
2497 
2498 	return (error);
2499 }
2500 
2501 void
pf_ioctl_clear_status(void)2502 pf_ioctl_clear_status(void)
2503 {
2504 	PF_RULES_WLOCK();
2505 	for (int i = 0; i < PFRES_MAX; i++)
2506 		counter_u64_zero(V_pf_status.counters[i]);
2507 	for (int i = 0; i < FCNT_MAX; i++)
2508 		pf_counter_u64_zero(&V_pf_status.fcounters[i]);
2509 	for (int i = 0; i < SCNT_MAX; i++)
2510 		counter_u64_zero(V_pf_status.scounters[i]);
2511 	for (int i = 0; i < KLCNT_MAX; i++)
2512 		counter_u64_zero(V_pf_status.lcounters[i]);
2513 	V_pf_status.since = time_uptime;
2514 	if (*V_pf_status.ifname)
2515 		pfi_update_status(V_pf_status.ifname, NULL);
2516 	PF_RULES_WUNLOCK();
2517 }
2518 
2519 int
pf_ioctl_set_timeout(int timeout,int seconds,int * prev_seconds)2520 pf_ioctl_set_timeout(int timeout, int seconds, int *prev_seconds)
2521 {
2522 	uint32_t old;
2523 
2524 	if (timeout < 0 || timeout >= PFTM_MAX ||
2525 	    seconds < 0)
2526 		return (EINVAL);
2527 
2528 	PF_RULES_WLOCK();
2529 	old = V_pf_default_rule.timeout[timeout];
2530 	if (timeout == PFTM_INTERVAL && seconds == 0)
2531 		seconds = 1;
2532 	V_pf_default_rule.timeout[timeout] = seconds;
2533 	if (timeout == PFTM_INTERVAL && seconds < old)
2534 		wakeup(pf_purge_thread);
2535 
2536 	if (prev_seconds != NULL)
2537 		*prev_seconds = old;
2538 
2539 	PF_RULES_WUNLOCK();
2540 
2541 	return (0);
2542 }
2543 
2544 int
pf_ioctl_get_timeout(int timeout,int * seconds)2545 pf_ioctl_get_timeout(int timeout, int *seconds)
2546 {
2547 	PF_RULES_RLOCK_TRACKER;
2548 
2549 	if (timeout < 0 || timeout >= PFTM_MAX)
2550 		return (EINVAL);
2551 
2552 	PF_RULES_RLOCK();
2553 	*seconds = V_pf_default_rule.timeout[timeout];
2554 	PF_RULES_RUNLOCK();
2555 
2556 	return (0);
2557 }
2558 
2559 int
pf_ioctl_set_limit(int index,unsigned int limit,unsigned int * old_limit)2560 pf_ioctl_set_limit(int index, unsigned int limit, unsigned int *old_limit)
2561 {
2562 
2563 	PF_RULES_WLOCK();
2564 	if (index < 0 || index >= PF_LIMIT_MAX ||
2565 	    V_pf_limits[index].zone == NULL) {
2566 		PF_RULES_WUNLOCK();
2567 		return (EINVAL);
2568 	}
2569 	uma_zone_set_max(V_pf_limits[index].zone,
2570 	    limit == 0 ? INT_MAX : limit);
2571 	if (old_limit != NULL)
2572 		*old_limit = V_pf_limits[index].limit;
2573 	V_pf_limits[index].limit = limit;
2574 	PF_RULES_WUNLOCK();
2575 
2576 	return (0);
2577 }
2578 
2579 int
pf_ioctl_get_limit(int index,unsigned int * limit)2580 pf_ioctl_get_limit(int index, unsigned int *limit)
2581 {
2582 	PF_RULES_RLOCK_TRACKER;
2583 
2584 	if (index < 0 || index >= PF_LIMIT_MAX)
2585 		return (EINVAL);
2586 
2587 	PF_RULES_RLOCK();
2588 	*limit = V_pf_limits[index].limit;
2589 	PF_RULES_RUNLOCK();
2590 
2591 	return (0);
2592 }
2593 
2594 int
pf_ioctl_begin_addrs(uint32_t * ticket)2595 pf_ioctl_begin_addrs(uint32_t *ticket)
2596 {
2597 	PF_RULES_WLOCK();
2598 	pf_empty_kpool(&V_pf_pabuf[0]);
2599 	pf_empty_kpool(&V_pf_pabuf[1]);
2600 	pf_empty_kpool(&V_pf_pabuf[2]);
2601 	*ticket = ++V_ticket_pabuf;
2602 	PF_RULES_WUNLOCK();
2603 
2604 	return (0);
2605 }
2606 
2607 int
pf_ioctl_add_addr(struct pf_nl_pooladdr * pp)2608 pf_ioctl_add_addr(struct pf_nl_pooladdr *pp)
2609 {
2610 	struct pf_kpooladdr	*pa = NULL;
2611 	struct pfi_kkif		*kif = NULL;
2612 	int error;
2613 
2614 	if (pp->which != PF_RDR && pp->which != PF_NAT &&
2615 	    pp->which != PF_RT)
2616 		return (EINVAL);
2617 
2618 	switch (pp->af) {
2619 #ifdef INET
2620 	case AF_INET:
2621 		/* FALLTHROUGH */
2622 #endif /* INET */
2623 #ifdef INET6
2624 	case AF_INET6:
2625 		/* FALLTHROUGH */
2626 #endif /* INET6 */
2627 	case AF_UNSPEC:
2628 		break;
2629 	default:
2630 		return (EAFNOSUPPORT);
2631 	}
2632 
2633 	if (pp->addr.addr.type != PF_ADDR_ADDRMASK &&
2634 	    pp->addr.addr.type != PF_ADDR_DYNIFTL &&
2635 	    pp->addr.addr.type != PF_ADDR_TABLE)
2636 		return (EINVAL);
2637 
2638 	if (pp->addr.addr.p.dyn != NULL)
2639 		return (EINVAL);
2640 
2641 	pa = malloc(sizeof(*pa), M_PFRULE, M_WAITOK);
2642 	error = pf_pooladdr_to_kpooladdr(&pp->addr, pa);
2643 	if (error != 0)
2644 		goto out;
2645 	if (pa->ifname[0])
2646 		kif = pf_kkif_create(M_WAITOK);
2647 	PF_RULES_WLOCK();
2648 	if (pp->ticket != V_ticket_pabuf) {
2649 		PF_RULES_WUNLOCK();
2650 		if (pa->ifname[0])
2651 			pf_kkif_free(kif);
2652 		error = EBUSY;
2653 		goto out;
2654 	}
2655 	if (pa->ifname[0]) {
2656 		pa->kif = pfi_kkif_attach(kif, pa->ifname);
2657 		kif = NULL;
2658 		pfi_kkif_ref(pa->kif);
2659 	} else
2660 		pa->kif = NULL;
2661 	if (pa->addr.type == PF_ADDR_DYNIFTL && ((error =
2662 	    pfi_dynaddr_setup(&pa->addr, pp->af)) != 0)) {
2663 		if (pa->ifname[0])
2664 			pfi_kkif_unref(pa->kif);
2665 		PF_RULES_WUNLOCK();
2666 		goto out;
2667 	}
2668 	pa->af = pp->af;
2669 	switch (pp->which) {
2670 	case PF_NAT:
2671 		TAILQ_INSERT_TAIL(&V_pf_pabuf[0], pa, entries);
2672 		break;
2673 	case PF_RDR:
2674 		TAILQ_INSERT_TAIL(&V_pf_pabuf[1], pa, entries);
2675 		break;
2676 	case PF_RT:
2677 		TAILQ_INSERT_TAIL(&V_pf_pabuf[2], pa, entries);
2678 		break;
2679 	}
2680 	PF_RULES_WUNLOCK();
2681 
2682 	return (0);
2683 
2684 out:
2685 	free(pa, M_PFRULE);
2686 	return (error);
2687 }
2688 
2689 int
pf_ioctl_get_addrs(struct pf_nl_pooladdr * pp)2690 pf_ioctl_get_addrs(struct pf_nl_pooladdr *pp)
2691 {
2692 	struct pf_kpool		*pool;
2693 	struct pf_kpooladdr	*pa;
2694 
2695 	PF_RULES_RLOCK_TRACKER;
2696 
2697 	if (pp->which != PF_RDR && pp->which != PF_NAT &&
2698 	    pp->which != PF_RT)
2699 		return (EINVAL);
2700 
2701 	pp->anchor[sizeof(pp->anchor) - 1] = 0;
2702 	pp->nr = 0;
2703 
2704 	PF_RULES_RLOCK();
2705 	pool = pf_get_kpool(pp->anchor, pp->ticket, pp->r_action,
2706 	    pp->r_num, 0, 1, 0, pp->which);
2707 	if (pool == NULL) {
2708 		PF_RULES_RUNLOCK();
2709 		return (EBUSY);
2710 	}
2711 	TAILQ_FOREACH(pa, &pool->list, entries)
2712 		pp->nr++;
2713 	PF_RULES_RUNLOCK();
2714 
2715 	return (0);
2716 }
2717 
2718 int
pf_ioctl_get_addr(struct pf_nl_pooladdr * pp)2719 pf_ioctl_get_addr(struct pf_nl_pooladdr *pp)
2720 {
2721 	struct pf_kpool		*pool;
2722 	struct pf_kpooladdr	*pa;
2723 	u_int32_t		 nr = 0;
2724 
2725 	if (pp->which != PF_RDR && pp->which != PF_NAT &&
2726 	    pp->which != PF_RT)
2727 		return (EINVAL);
2728 
2729 	PF_RULES_RLOCK_TRACKER;
2730 
2731 	pp->anchor[sizeof(pp->anchor) - 1] = '\0';
2732 
2733 	PF_RULES_RLOCK();
2734 	pool = pf_get_kpool(pp->anchor, pp->ticket, pp->r_action,
2735 	    pp->r_num, 0, 1, 1, pp->which);
2736 	if (pool == NULL) {
2737 		PF_RULES_RUNLOCK();
2738 		return (EBUSY);
2739 	}
2740 	pa = TAILQ_FIRST(&pool->list);
2741 	while ((pa != NULL) && (nr < pp->nr)) {
2742 		pa = TAILQ_NEXT(pa, entries);
2743 		nr++;
2744 	}
2745 	if (pa == NULL) {
2746 		PF_RULES_RUNLOCK();
2747 		return (EBUSY);
2748 	}
2749 	pf_kpooladdr_to_pooladdr(pa, &pp->addr);
2750 	pp->af = pa->af;
2751 	pf_addr_copyout(&pp->addr.addr);
2752 	PF_RULES_RUNLOCK();
2753 
2754 	return (0);
2755 }
2756 
2757 int
pf_ioctl_get_rulesets(struct pfioc_ruleset * pr)2758 pf_ioctl_get_rulesets(struct pfioc_ruleset *pr)
2759 {
2760 	struct pf_kruleset	*ruleset;
2761 	struct pf_kanchor	*anchor;
2762 
2763 	PF_RULES_RLOCK_TRACKER;
2764 
2765 	pr->path[sizeof(pr->path) - 1] = '\0';
2766 
2767 	PF_RULES_RLOCK();
2768 	if ((ruleset = pf_find_kruleset(pr->path)) == NULL) {
2769 		PF_RULES_RUNLOCK();
2770 		return (ENOENT);
2771 	}
2772 	pr->nr = 0;
2773 	if (ruleset == &pf_main_ruleset) {
2774 		/* XXX kludge for pf_main_ruleset */
2775 		RB_FOREACH(anchor, pf_kanchor_global, &V_pf_anchors)
2776 			if (anchor->parent == NULL)
2777 				pr->nr++;
2778 	} else {
2779 		RB_FOREACH(anchor, pf_kanchor_node,
2780 		    &ruleset->anchor->children)
2781 			pr->nr++;
2782 	}
2783 	PF_RULES_RUNLOCK();
2784 
2785 	return (0);
2786 }
2787 
2788 int
pf_ioctl_get_ruleset(struct pfioc_ruleset * pr)2789 pf_ioctl_get_ruleset(struct pfioc_ruleset *pr)
2790 {
2791 	struct pf_kruleset	*ruleset;
2792 	struct pf_kanchor	*anchor;
2793 	u_int32_t		 nr = 0;
2794 	int			 error = 0;
2795 
2796 	PF_RULES_RLOCK_TRACKER;
2797 
2798 	PF_RULES_RLOCK();
2799 	if ((ruleset = pf_find_kruleset(pr->path)) == NULL) {
2800 		PF_RULES_RUNLOCK();
2801 		return (ENOENT);
2802 	}
2803 
2804 	pr->name[0] = '\0';
2805 	if (ruleset == &pf_main_ruleset) {
2806 		/* XXX kludge for pf_main_ruleset */
2807 		RB_FOREACH(anchor, pf_kanchor_global, &V_pf_anchors)
2808 			if (anchor->parent == NULL && nr++ == pr->nr) {
2809 				strlcpy(pr->name, anchor->name,
2810 				    sizeof(pr->name));
2811 				break;
2812 			}
2813 	} else {
2814 		RB_FOREACH(anchor, pf_kanchor_node,
2815 		    &ruleset->anchor->children)
2816 			if (nr++ == pr->nr) {
2817 				strlcpy(pr->name, anchor->name,
2818 				    sizeof(pr->name));
2819 				break;
2820 			}
2821 	}
2822 	if (!pr->name[0])
2823 		error = EBUSY;
2824 	PF_RULES_RUNLOCK();
2825 
2826 	return (error);
2827 }
2828 
2829 int
pf_ioctl_natlook(struct pfioc_natlook * pnl)2830 pf_ioctl_natlook(struct pfioc_natlook *pnl)
2831 {
2832 	struct pf_state_key	*sk;
2833 	struct pf_kstate	*state;
2834 	struct pf_state_key_cmp	 key;
2835 	int			 m = 0, direction = pnl->direction;
2836 	int			 sidx, didx;
2837 
2838 	/* NATLOOK src and dst are reversed, so reverse sidx/didx */
2839 	sidx = (direction == PF_IN) ? 1 : 0;
2840 	didx = (direction == PF_IN) ? 0 : 1;
2841 
2842 	if (!pnl->proto ||
2843 	    PF_AZERO(&pnl->saddr, pnl->af) ||
2844 	    PF_AZERO(&pnl->daddr, pnl->af) ||
2845 	    ((pnl->proto == IPPROTO_TCP ||
2846 	    pnl->proto == IPPROTO_UDP) &&
2847 	    (!pnl->dport || !pnl->sport)))
2848 		return (EINVAL);
2849 
2850 	switch (pnl->direction) {
2851 	case PF_IN:
2852 	case PF_OUT:
2853 	case PF_INOUT:
2854 		break;
2855 	default:
2856 		return (EINVAL);
2857 	}
2858 
2859 	switch (pnl->af) {
2860 #ifdef INET
2861 	case AF_INET:
2862 		break;
2863 #endif /* INET */
2864 #ifdef INET6
2865 	case AF_INET6:
2866 		break;
2867 #endif /* INET6 */
2868 	default:
2869 		return (EAFNOSUPPORT);
2870 	}
2871 
2872 	bzero(&key, sizeof(key));
2873 	key.af = pnl->af;
2874 	key.proto = pnl->proto;
2875 	pf_addrcpy(&key.addr[sidx], &pnl->saddr, pnl->af);
2876 	key.port[sidx] = pnl->sport;
2877 	pf_addrcpy(&key.addr[didx], &pnl->daddr, pnl->af);
2878 	key.port[didx] = pnl->dport;
2879 
2880 	state = pf_find_state_all(&key, direction, &m);
2881 	if (state == NULL)
2882 		return (ENOENT);
2883 
2884 	if (m > 1) {
2885 		PF_STATE_UNLOCK(state);
2886 		return (E2BIG);	/* more than one state */
2887 	}
2888 
2889 	sk = state->key[sidx];
2890 	pf_addrcpy(&pnl->rsaddr,
2891 	    &sk->addr[sidx], sk->af);
2892 	pnl->rsport = sk->port[sidx];
2893 	pf_addrcpy(&pnl->rdaddr,
2894 	    &sk->addr[didx], sk->af);
2895 	pnl->rdport = sk->port[didx];
2896 	PF_STATE_UNLOCK(state);
2897 
2898 	return (0);
2899 }
2900 
2901 static int
pfioctl(struct cdev * dev,u_long cmd,caddr_t addr,int flags,struct thread * td)2902 pfioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td)
2903 {
2904 	int			 error = 0;
2905 	PF_RULES_RLOCK_TRACKER;
2906 
2907 #define	ERROUT_IOCTL(target, x)					\
2908     do {								\
2909 	    error = (x);						\
2910 	    SDT_PROBE3(pf, ioctl, ioctl, error, cmd, error, __LINE__);	\
2911 	    goto target;						\
2912     } while (0)
2913 
2914 
2915 	/* XXX keep in sync with switch() below */
2916 	if (securelevel_gt(td->td_ucred, 2))
2917 		switch (cmd) {
2918 		case DIOCGETRULES:
2919 		case DIOCGETRULENV:
2920 		case DIOCGETADDRS:
2921 		case DIOCGETADDR:
2922 		case DIOCGETSTATE:
2923 		case DIOCGETSTATENV:
2924 		case DIOCSETSTATUSIF:
2925 		case DIOCGETSTATUSNV:
2926 		case DIOCCLRSTATUS:
2927 		case DIOCNATLOOK:
2928 		case DIOCSETDEBUG:
2929 #ifdef COMPAT_FREEBSD14
2930 		case DIOCGETSTATES:
2931 		case DIOCGETSTATESV2:
2932 #endif
2933 		case DIOCGETTIMEOUT:
2934 		case DIOCCLRRULECTRS:
2935 		case DIOCGETLIMIT:
2936 		case DIOCGETALTQSV0:
2937 		case DIOCGETALTQSV1:
2938 		case DIOCGETALTQV0:
2939 		case DIOCGETALTQV1:
2940 		case DIOCGETQSTATSV0:
2941 		case DIOCGETQSTATSV1:
2942 		case DIOCGETRULESETS:
2943 		case DIOCGETRULESET:
2944 		case DIOCRGETTABLES:
2945 		case DIOCRGETTSTATS:
2946 		case DIOCRCLRTSTATS:
2947 		case DIOCRCLRADDRS:
2948 		case DIOCRADDADDRS:
2949 		case DIOCRDELADDRS:
2950 		case DIOCRSETADDRS:
2951 		case DIOCRGETADDRS:
2952 		case DIOCRGETASTATS:
2953 		case DIOCRCLRASTATS:
2954 		case DIOCRTSTADDRS:
2955 		case DIOCOSFPGET:
2956 		case DIOCGETSRCNODES:
2957 		case DIOCCLRSRCNODES:
2958 		case DIOCGETSYNCOOKIES:
2959 		case DIOCIGETIFACES:
2960 		case DIOCGIFSPEEDV0:
2961 		case DIOCGIFSPEEDV1:
2962 		case DIOCSETIFFLAG:
2963 		case DIOCCLRIFFLAG:
2964 		case DIOCGETETHRULES:
2965 		case DIOCGETETHRULE:
2966 		case DIOCGETETHRULESETS:
2967 		case DIOCGETETHRULESET:
2968 			break;
2969 		case DIOCRCLRTABLES:
2970 		case DIOCRADDTABLES:
2971 		case DIOCRDELTABLES:
2972 		case DIOCRSETTFLAGS:
2973 			if (((struct pfioc_table *)addr)->pfrio_flags &
2974 			    PFR_FLAG_DUMMY)
2975 				break; /* dummy operation ok */
2976 			return (EPERM);
2977 		default:
2978 			return (EPERM);
2979 		}
2980 
2981 	if (!(flags & FWRITE))
2982 		switch (cmd) {
2983 		case DIOCGETRULES:
2984 		case DIOCGETADDRS:
2985 		case DIOCGETADDR:
2986 		case DIOCGETSTATE:
2987 		case DIOCGETSTATENV:
2988 		case DIOCGETSTATUSNV:
2989 #ifdef COMPAT_FREEBSD14
2990 		case DIOCGETSTATES:
2991 		case DIOCGETSTATESV2:
2992 #endif
2993 		case DIOCGETTIMEOUT:
2994 		case DIOCGETLIMIT:
2995 		case DIOCGETALTQSV0:
2996 		case DIOCGETALTQSV1:
2997 		case DIOCGETALTQV0:
2998 		case DIOCGETALTQV1:
2999 		case DIOCGETQSTATSV0:
3000 		case DIOCGETQSTATSV1:
3001 		case DIOCGETRULESETS:
3002 		case DIOCGETRULESET:
3003 		case DIOCNATLOOK:
3004 		case DIOCRGETTABLES:
3005 		case DIOCRGETTSTATS:
3006 		case DIOCRGETADDRS:
3007 		case DIOCRGETASTATS:
3008 		case DIOCRTSTADDRS:
3009 		case DIOCOSFPGET:
3010 		case DIOCGETSRCNODES:
3011 		case DIOCGETSYNCOOKIES:
3012 		case DIOCIGETIFACES:
3013 		case DIOCGIFSPEEDV1:
3014 		case DIOCGIFSPEEDV0:
3015 		case DIOCGETRULENV:
3016 		case DIOCGETETHRULES:
3017 		case DIOCGETETHRULE:
3018 		case DIOCGETETHRULESETS:
3019 		case DIOCGETETHRULESET:
3020 			break;
3021 		case DIOCRCLRTABLES:
3022 		case DIOCRADDTABLES:
3023 		case DIOCRDELTABLES:
3024 		case DIOCRCLRTSTATS:
3025 		case DIOCRCLRADDRS:
3026 		case DIOCRADDADDRS:
3027 		case DIOCRDELADDRS:
3028 		case DIOCRSETADDRS:
3029 		case DIOCRSETTFLAGS:
3030 			if (((struct pfioc_table *)addr)->pfrio_flags &
3031 			    PFR_FLAG_DUMMY) {
3032 				flags |= FWRITE; /* need write lock for dummy */
3033 				break; /* dummy operation ok */
3034 			}
3035 			return (EACCES);
3036 		default:
3037 			return (EACCES);
3038 		}
3039 
3040 	CURVNET_SET(TD_TO_VNET(td));
3041 
3042 	switch (cmd) {
3043 #ifdef COMPAT_FREEBSD14
3044 	case DIOCSTART:
3045 		error = pf_start();
3046 		break;
3047 
3048 	case DIOCSTOP:
3049 		error = pf_stop();
3050 		break;
3051 #endif
3052 
3053 	case DIOCGETETHRULES: {
3054 		struct pfioc_nv		*nv = (struct pfioc_nv *)addr;
3055 		nvlist_t		*nvl;
3056 		void			*packed;
3057 		struct pf_keth_rule	*tail;
3058 		struct pf_keth_ruleset	*rs;
3059 		u_int32_t		 ticket, nr;
3060 		const char		*anchor = "";
3061 
3062 		nvl = NULL;
3063 		packed = NULL;
3064 
3065 #define	ERROUT(x)	ERROUT_IOCTL(DIOCGETETHRULES_error, x)
3066 
3067 		if (nv->len > pf_ioctl_maxcount)
3068 			ERROUT(ENOMEM);
3069 
3070 		/* Copy the request in */
3071 		packed = malloc(nv->len, M_NVLIST, M_WAITOK);
3072 		error = copyin(nv->data, packed, nv->len);
3073 		if (error)
3074 			ERROUT(error);
3075 
3076 		nvl = nvlist_unpack(packed, nv->len, 0);
3077 		if (nvl == NULL)
3078 			ERROUT(EBADMSG);
3079 
3080 		if (! nvlist_exists_string(nvl, "anchor"))
3081 			ERROUT(EBADMSG);
3082 
3083 		anchor = nvlist_get_string(nvl, "anchor");
3084 
3085 		rs = pf_find_keth_ruleset(anchor);
3086 
3087 		nvlist_destroy(nvl);
3088 		nvl = NULL;
3089 		free(packed, M_NVLIST);
3090 		packed = NULL;
3091 
3092 		if (rs == NULL)
3093 			ERROUT(ENOENT);
3094 
3095 		/* Reply */
3096 		nvl = nvlist_create(0);
3097 		if (nvl == NULL)
3098 			ERROUT(ENOMEM);
3099 
3100 		PF_RULES_RLOCK();
3101 
3102 		ticket = rs->active.ticket;
3103 		tail = TAILQ_LAST(rs->active.rules, pf_keth_ruleq);
3104 		if (tail)
3105 			nr = tail->nr + 1;
3106 		else
3107 			nr = 0;
3108 
3109 		PF_RULES_RUNLOCK();
3110 
3111 		nvlist_add_number(nvl, "ticket", ticket);
3112 		nvlist_add_number(nvl, "nr", nr);
3113 
3114 		packed = nvlist_pack(nvl, &nv->len);
3115 		if (packed == NULL)
3116 			ERROUT(ENOMEM);
3117 
3118 		if (nv->size == 0)
3119 			ERROUT(0);
3120 		else if (nv->size < nv->len)
3121 			ERROUT(ENOSPC);
3122 
3123 		error = copyout(packed, nv->data, nv->len);
3124 
3125 #undef ERROUT
3126 DIOCGETETHRULES_error:
3127 		free(packed, M_NVLIST);
3128 		nvlist_destroy(nvl);
3129 		break;
3130 	}
3131 
3132 	case DIOCGETETHRULE: {
3133 		struct epoch_tracker	 et;
3134 		struct pfioc_nv		*nv = (struct pfioc_nv *)addr;
3135 		nvlist_t		*nvl = NULL;
3136 		void			*nvlpacked = NULL;
3137 		struct pf_keth_rule	*rule = NULL;
3138 		struct pf_keth_ruleset	*rs;
3139 		u_int32_t		 ticket, nr;
3140 		bool			 clear = false;
3141 		const char		*anchor;
3142 
3143 #define ERROUT(x)	ERROUT_IOCTL(DIOCGETETHRULE_error, x)
3144 
3145 		if (nv->len > pf_ioctl_maxcount)
3146 			ERROUT(ENOMEM);
3147 
3148 		nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
3149 		error = copyin(nv->data, nvlpacked, nv->len);
3150 		if (error)
3151 			ERROUT(error);
3152 
3153 		nvl = nvlist_unpack(nvlpacked, nv->len, 0);
3154 		if (nvl == NULL)
3155 			ERROUT(EBADMSG);
3156 		if (! nvlist_exists_number(nvl, "ticket"))
3157 			ERROUT(EBADMSG);
3158 		ticket = nvlist_get_number(nvl, "ticket");
3159 		if (! nvlist_exists_string(nvl, "anchor"))
3160 			ERROUT(EBADMSG);
3161 		anchor = nvlist_get_string(nvl, "anchor");
3162 
3163 		if (nvlist_exists_bool(nvl, "clear"))
3164 			clear = nvlist_get_bool(nvl, "clear");
3165 
3166 		if (clear && !(flags & FWRITE))
3167 			ERROUT(EACCES);
3168 
3169 		if (! nvlist_exists_number(nvl, "nr"))
3170 			ERROUT(EBADMSG);
3171 		nr = nvlist_get_number(nvl, "nr");
3172 
3173 		PF_RULES_RLOCK();
3174 		rs = pf_find_keth_ruleset(anchor);
3175 		if (rs == NULL) {
3176 			PF_RULES_RUNLOCK();
3177 			ERROUT(ENOENT);
3178 		}
3179 		if (ticket != rs->active.ticket) {
3180 			PF_RULES_RUNLOCK();
3181 			ERROUT(EBUSY);
3182 		}
3183 
3184 		nvlist_destroy(nvl);
3185 		nvl = NULL;
3186 		free(nvlpacked, M_NVLIST);
3187 		nvlpacked = NULL;
3188 
3189 		rule = TAILQ_FIRST(rs->active.rules);
3190 		while ((rule != NULL) && (rule->nr != nr))
3191 			rule = TAILQ_NEXT(rule, entries);
3192 		if (rule == NULL) {
3193 			PF_RULES_RUNLOCK();
3194 			ERROUT(ENOENT);
3195 		}
3196 		/* Make sure rule can't go away. */
3197 		NET_EPOCH_ENTER(et);
3198 		PF_RULES_RUNLOCK();
3199 		nvl = pf_keth_rule_to_nveth_rule(rule);
3200 		if (pf_keth_anchor_nvcopyout(rs, rule, nvl)) {
3201 			NET_EPOCH_EXIT(et);
3202 			ERROUT(EBUSY);
3203 		}
3204 		NET_EPOCH_EXIT(et);
3205 		if (nvl == NULL)
3206 			ERROUT(ENOMEM);
3207 
3208 		nvlpacked = nvlist_pack(nvl, &nv->len);
3209 		if (nvlpacked == NULL)
3210 			ERROUT(ENOMEM);
3211 
3212 		if (nv->size == 0)
3213 			ERROUT(0);
3214 		else if (nv->size < nv->len)
3215 			ERROUT(ENOSPC);
3216 
3217 		error = copyout(nvlpacked, nv->data, nv->len);
3218 		if (error == 0 && clear) {
3219 			counter_u64_zero(rule->evaluations);
3220 			for (int i = 0; i < 2; i++) {
3221 				counter_u64_zero(rule->packets[i]);
3222 				counter_u64_zero(rule->bytes[i]);
3223 			}
3224 		}
3225 
3226 #undef ERROUT
3227 DIOCGETETHRULE_error:
3228 		free(nvlpacked, M_NVLIST);
3229 		nvlist_destroy(nvl);
3230 		break;
3231 	}
3232 
3233 	case DIOCADDETHRULE: {
3234 		struct pfioc_nv		*nv = (struct pfioc_nv *)addr;
3235 		nvlist_t		*nvl = NULL;
3236 		void			*nvlpacked = NULL;
3237 		struct pf_keth_rule	*rule = NULL, *tail = NULL;
3238 		struct pf_keth_ruleset	*ruleset = NULL;
3239 		struct pfi_kkif		*kif = NULL, *bridge_to_kif = NULL;
3240 		const char		*anchor = "", *anchor_call = "";
3241 
3242 #define ERROUT(x)	ERROUT_IOCTL(DIOCADDETHRULE_error, x)
3243 
3244 		if (nv->len > pf_ioctl_maxcount)
3245 			ERROUT(ENOMEM);
3246 
3247 		nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
3248 		error = copyin(nv->data, nvlpacked, nv->len);
3249 		if (error)
3250 			ERROUT(error);
3251 
3252 		nvl = nvlist_unpack(nvlpacked, nv->len, 0);
3253 		if (nvl == NULL)
3254 			ERROUT(EBADMSG);
3255 
3256 		if (! nvlist_exists_number(nvl, "ticket"))
3257 			ERROUT(EBADMSG);
3258 
3259 		if (nvlist_exists_string(nvl, "anchor"))
3260 			anchor = nvlist_get_string(nvl, "anchor");
3261 		if (nvlist_exists_string(nvl, "anchor_call"))
3262 			anchor_call = nvlist_get_string(nvl, "anchor_call");
3263 
3264 		ruleset = pf_find_keth_ruleset(anchor);
3265 		if (ruleset == NULL)
3266 			ERROUT(EINVAL);
3267 
3268 		if (nvlist_get_number(nvl, "ticket") !=
3269 		    ruleset->inactive.ticket) {
3270 			DPFPRINTF(PF_DEBUG_MISC,
3271 			    "ticket: %d != %d",
3272 			    (u_int32_t)nvlist_get_number(nvl, "ticket"),
3273 			    ruleset->inactive.ticket);
3274 			ERROUT(EBUSY);
3275 		}
3276 
3277 		rule = malloc(sizeof(*rule), M_PFRULE, M_WAITOK);
3278 		rule->timestamp = NULL;
3279 
3280 		error = pf_nveth_rule_to_keth_rule(nvl, rule);
3281 		if (error != 0)
3282 			ERROUT(error);
3283 
3284 		if (rule->ifname[0])
3285 			kif = pf_kkif_create(M_WAITOK);
3286 		if (rule->bridge_to_name[0])
3287 			bridge_to_kif = pf_kkif_create(M_WAITOK);
3288 		rule->evaluations = counter_u64_alloc(M_WAITOK);
3289 		for (int i = 0; i < 2; i++) {
3290 			rule->packets[i] = counter_u64_alloc(M_WAITOK);
3291 			rule->bytes[i] = counter_u64_alloc(M_WAITOK);
3292 		}
3293 		rule->timestamp = uma_zalloc_pcpu(pf_timestamp_pcpu_zone,
3294 		    M_WAITOK | M_ZERO);
3295 
3296 		PF_RULES_WLOCK();
3297 
3298 		if (rule->ifname[0]) {
3299 			rule->kif = pfi_kkif_attach(kif, rule->ifname);
3300 			pfi_kkif_ref(rule->kif);
3301 		} else
3302 			rule->kif = NULL;
3303 		if (rule->bridge_to_name[0]) {
3304 			rule->bridge_to = pfi_kkif_attach(bridge_to_kif,
3305 			    rule->bridge_to_name);
3306 			pfi_kkif_ref(rule->bridge_to);
3307 		} else
3308 			rule->bridge_to = NULL;
3309 
3310 #ifdef ALTQ
3311 		/* set queue IDs */
3312 		if (rule->qname[0] != 0) {
3313 			if ((rule->qid = pf_qname2qid(rule->qname)) == 0)
3314 				error = EBUSY;
3315 			else
3316 				rule->qid = rule->qid;
3317 		}
3318 #endif
3319 		if (rule->tagname[0])
3320 			if ((rule->tag = pf_tagname2tag(rule->tagname)) == 0)
3321 				error = EBUSY;
3322 		if (rule->match_tagname[0])
3323 			if ((rule->match_tag = pf_tagname2tag(
3324 			    rule->match_tagname)) == 0)
3325 				error = EBUSY;
3326 
3327 		if (error == 0 && rule->ipdst.addr.type == PF_ADDR_TABLE)
3328 			error = pf_eth_addr_setup(ruleset, &rule->ipdst.addr);
3329 		if (error == 0 && rule->ipsrc.addr.type == PF_ADDR_TABLE)
3330 			error = pf_eth_addr_setup(ruleset, &rule->ipsrc.addr);
3331 
3332 		if (error) {
3333 			pf_free_eth_rule(rule);
3334 			PF_RULES_WUNLOCK();
3335 			ERROUT(error);
3336 		}
3337 
3338 		if (pf_keth_anchor_setup(rule, ruleset, anchor_call)) {
3339 			pf_free_eth_rule(rule);
3340 			PF_RULES_WUNLOCK();
3341 			ERROUT(EINVAL);
3342 		}
3343 
3344 		tail = TAILQ_LAST(ruleset->inactive.rules, pf_keth_ruleq);
3345 		if (tail)
3346 			rule->nr = tail->nr + 1;
3347 		else
3348 			rule->nr = 0;
3349 
3350 		TAILQ_INSERT_TAIL(ruleset->inactive.rules, rule, entries);
3351 
3352 		PF_RULES_WUNLOCK();
3353 
3354 #undef ERROUT
3355 DIOCADDETHRULE_error:
3356 		nvlist_destroy(nvl);
3357 		free(nvlpacked, M_NVLIST);
3358 		break;
3359 	}
3360 
3361 	case DIOCGETETHRULESETS: {
3362 		struct epoch_tracker	 et;
3363 		struct pfioc_nv		*nv = (struct pfioc_nv *)addr;
3364 		nvlist_t		*nvl = NULL;
3365 		void			*nvlpacked = NULL;
3366 		struct pf_keth_ruleset	*ruleset;
3367 		struct pf_keth_anchor	*anchor;
3368 		int			 nr = 0;
3369 
3370 #define ERROUT(x)	ERROUT_IOCTL(DIOCGETETHRULESETS_error, x)
3371 
3372 		if (nv->len > pf_ioctl_maxcount)
3373 			ERROUT(ENOMEM);
3374 
3375 		nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
3376 		error = copyin(nv->data, nvlpacked, nv->len);
3377 		if (error)
3378 			ERROUT(error);
3379 
3380 		nvl = nvlist_unpack(nvlpacked, nv->len, 0);
3381 		if (nvl == NULL)
3382 			ERROUT(EBADMSG);
3383 		if (! nvlist_exists_string(nvl, "path"))
3384 			ERROUT(EBADMSG);
3385 
3386 		NET_EPOCH_ENTER(et);
3387 
3388 		if ((ruleset = pf_find_keth_ruleset(
3389 		    nvlist_get_string(nvl, "path"))) == NULL) {
3390 			NET_EPOCH_EXIT(et);
3391 			ERROUT(ENOENT);
3392 		}
3393 
3394 		if (ruleset->anchor == NULL) {
3395 			RB_FOREACH(anchor, pf_keth_anchor_global, &V_pf_keth_anchors)
3396 				if (anchor->parent == NULL)
3397 					nr++;
3398 		} else {
3399 			RB_FOREACH(anchor, pf_keth_anchor_node,
3400 			    &ruleset->anchor->children)
3401 				nr++;
3402 		}
3403 
3404 		NET_EPOCH_EXIT(et);
3405 
3406 		nvlist_destroy(nvl);
3407 		nvl = NULL;
3408 		free(nvlpacked, M_NVLIST);
3409 		nvlpacked = NULL;
3410 
3411 		nvl = nvlist_create(0);
3412 		if (nvl == NULL)
3413 			ERROUT(ENOMEM);
3414 
3415 		nvlist_add_number(nvl, "nr", nr);
3416 
3417 		nvlpacked = nvlist_pack(nvl, &nv->len);
3418 		if (nvlpacked == NULL)
3419 			ERROUT(ENOMEM);
3420 
3421 		if (nv->size == 0)
3422 			ERROUT(0);
3423 		else if (nv->size < nv->len)
3424 			ERROUT(ENOSPC);
3425 
3426 		error = copyout(nvlpacked, nv->data, nv->len);
3427 
3428 #undef ERROUT
3429 DIOCGETETHRULESETS_error:
3430 		free(nvlpacked, M_NVLIST);
3431 		nvlist_destroy(nvl);
3432 		break;
3433 	}
3434 
3435 	case DIOCGETETHRULESET: {
3436 		struct epoch_tracker	 et;
3437 		struct pfioc_nv		*nv = (struct pfioc_nv *)addr;
3438 		nvlist_t		*nvl = NULL;
3439 		void			*nvlpacked = NULL;
3440 		struct pf_keth_ruleset	*ruleset;
3441 		struct pf_keth_anchor	*anchor;
3442 		int			 nr = 0, req_nr = 0;
3443 		bool			 found = false;
3444 
3445 #define ERROUT(x)	ERROUT_IOCTL(DIOCGETETHRULESET_error, x)
3446 
3447 		if (nv->len > pf_ioctl_maxcount)
3448 			ERROUT(ENOMEM);
3449 
3450 		nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
3451 		error = copyin(nv->data, nvlpacked, nv->len);
3452 		if (error)
3453 			ERROUT(error);
3454 
3455 		nvl = nvlist_unpack(nvlpacked, nv->len, 0);
3456 		if (nvl == NULL)
3457 			ERROUT(EBADMSG);
3458 		if (! nvlist_exists_string(nvl, "path"))
3459 			ERROUT(EBADMSG);
3460 		if (! nvlist_exists_number(nvl, "nr"))
3461 			ERROUT(EBADMSG);
3462 
3463 		req_nr = nvlist_get_number(nvl, "nr");
3464 
3465 		NET_EPOCH_ENTER(et);
3466 
3467 		if ((ruleset = pf_find_keth_ruleset(
3468 		    nvlist_get_string(nvl, "path"))) == NULL) {
3469 			NET_EPOCH_EXIT(et);
3470 			ERROUT(ENOENT);
3471 		}
3472 
3473 		nvlist_destroy(nvl);
3474 		nvl = NULL;
3475 		free(nvlpacked, M_NVLIST);
3476 		nvlpacked = NULL;
3477 
3478 		nvl = nvlist_create(0);
3479 		if (nvl == NULL) {
3480 			NET_EPOCH_EXIT(et);
3481 			ERROUT(ENOMEM);
3482 		}
3483 
3484 		if (ruleset->anchor == NULL) {
3485 			RB_FOREACH(anchor, pf_keth_anchor_global,
3486 			    &V_pf_keth_anchors) {
3487 				if (anchor->parent == NULL && nr++ == req_nr) {
3488 					found = true;
3489 					break;
3490 				}
3491 			}
3492 		} else {
3493 			RB_FOREACH(anchor, pf_keth_anchor_node,
3494 			     &ruleset->anchor->children) {
3495 				if (nr++ == req_nr) {
3496 					found = true;
3497 					break;
3498 				}
3499 			}
3500 		}
3501 
3502 		NET_EPOCH_EXIT(et);
3503 		if (found) {
3504 			nvlist_add_number(nvl, "nr", nr);
3505 			nvlist_add_string(nvl, "name", anchor->name);
3506 			if (ruleset->anchor)
3507 				nvlist_add_string(nvl, "path",
3508 				    ruleset->anchor->path);
3509 			else
3510 				nvlist_add_string(nvl, "path", "");
3511 		} else {
3512 			ERROUT(EBUSY);
3513 		}
3514 
3515 		nvlpacked = nvlist_pack(nvl, &nv->len);
3516 		if (nvlpacked == NULL)
3517 			ERROUT(ENOMEM);
3518 
3519 		if (nv->size == 0)
3520 			ERROUT(0);
3521 		else if (nv->size < nv->len)
3522 			ERROUT(ENOSPC);
3523 
3524 		error = copyout(nvlpacked, nv->data, nv->len);
3525 
3526 #undef ERROUT
3527 DIOCGETETHRULESET_error:
3528 		free(nvlpacked, M_NVLIST);
3529 		nvlist_destroy(nvl);
3530 		break;
3531 	}
3532 
3533 	case DIOCADDRULENV: {
3534 		struct pfioc_nv	*nv = (struct pfioc_nv *)addr;
3535 		nvlist_t	*nvl = NULL;
3536 		void		*nvlpacked = NULL;
3537 		struct pf_krule	*rule = NULL;
3538 		const char	*anchor = "", *anchor_call = "";
3539 		uint32_t	 ticket = 0, pool_ticket = 0;
3540 
3541 #define	ERROUT(x)	ERROUT_IOCTL(DIOCADDRULENV_error, x)
3542 
3543 		if (nv->len > pf_ioctl_maxcount)
3544 			ERROUT(ENOMEM);
3545 
3546 		nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
3547 		error = copyin(nv->data, nvlpacked, nv->len);
3548 		if (error)
3549 			ERROUT(error);
3550 
3551 		nvl = nvlist_unpack(nvlpacked, nv->len, 0);
3552 		if (nvl == NULL)
3553 			ERROUT(EBADMSG);
3554 
3555 		if (! nvlist_exists_number(nvl, "ticket"))
3556 			ERROUT(EINVAL);
3557 		ticket = nvlist_get_number(nvl, "ticket");
3558 
3559 		if (! nvlist_exists_number(nvl, "pool_ticket"))
3560 			ERROUT(EINVAL);
3561 		pool_ticket = nvlist_get_number(nvl, "pool_ticket");
3562 
3563 		if (! nvlist_exists_nvlist(nvl, "rule"))
3564 			ERROUT(EINVAL);
3565 
3566 		rule = pf_krule_alloc();
3567 		error = pf_nvrule_to_krule(nvlist_get_nvlist(nvl, "rule"),
3568 		    rule);
3569 		if (error)
3570 			ERROUT(error);
3571 
3572 		if (nvlist_exists_string(nvl, "anchor"))
3573 			anchor = nvlist_get_string(nvl, "anchor");
3574 		if (nvlist_exists_string(nvl, "anchor_call"))
3575 			anchor_call = nvlist_get_string(nvl, "anchor_call");
3576 
3577 		if ((error = nvlist_error(nvl)))
3578 			ERROUT(error);
3579 
3580 		/* Frees rule on error */
3581 		error = pf_ioctl_addrule(rule, ticket, pool_ticket, anchor,
3582 		    anchor_call, td->td_ucred->cr_ruid,
3583 		    td->td_proc ? td->td_proc->p_pid : 0);
3584 
3585 		nvlist_destroy(nvl);
3586 		free(nvlpacked, M_NVLIST);
3587 		break;
3588 #undef ERROUT
3589 DIOCADDRULENV_error:
3590 		pf_krule_free(rule);
3591 		nvlist_destroy(nvl);
3592 		free(nvlpacked, M_NVLIST);
3593 
3594 		break;
3595 	}
3596 	case DIOCADDRULE: {
3597 		struct pfioc_rule	*pr = (struct pfioc_rule *)addr;
3598 		struct pf_krule		*rule;
3599 
3600 		rule = pf_krule_alloc();
3601 		error = pf_rule_to_krule(&pr->rule, rule);
3602 		if (error != 0) {
3603 			pf_krule_free(rule);
3604 			goto fail;
3605 		}
3606 
3607 		pr->anchor[sizeof(pr->anchor) - 1] = '\0';
3608 
3609 		/* Frees rule on error */
3610 		error = pf_ioctl_addrule(rule, pr->ticket, pr->pool_ticket,
3611 		    pr->anchor, pr->anchor_call, td->td_ucred->cr_ruid,
3612 		    td->td_proc ? td->td_proc->p_pid : 0);
3613 		break;
3614 	}
3615 
3616 	case DIOCGETRULES: {
3617 		struct pfioc_rule	*pr = (struct pfioc_rule *)addr;
3618 
3619 		pr->anchor[sizeof(pr->anchor) - 1] = '\0';
3620 
3621 		error = pf_ioctl_getrules(pr);
3622 
3623 		break;
3624 	}
3625 
3626 	case DIOCGETRULENV: {
3627 		struct pfioc_nv		*nv = (struct pfioc_nv *)addr;
3628 		nvlist_t		*nvrule = NULL;
3629 		nvlist_t		*nvl = NULL;
3630 		struct pf_kruleset	*ruleset;
3631 		struct pf_krule		*rule;
3632 		void			*nvlpacked = NULL;
3633 		int			 rs_num, nr;
3634 		bool			 clear_counter = false;
3635 
3636 #define	ERROUT(x)	ERROUT_IOCTL(DIOCGETRULENV_error, x)
3637 
3638 		if (nv->len > pf_ioctl_maxcount)
3639 			ERROUT(ENOMEM);
3640 
3641 		/* Copy the request in */
3642 		nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
3643 		error = copyin(nv->data, nvlpacked, nv->len);
3644 		if (error)
3645 			ERROUT(error);
3646 
3647 		nvl = nvlist_unpack(nvlpacked, nv->len, 0);
3648 		if (nvl == NULL)
3649 			ERROUT(EBADMSG);
3650 
3651 		if (! nvlist_exists_string(nvl, "anchor"))
3652 			ERROUT(EBADMSG);
3653 		if (! nvlist_exists_number(nvl, "ruleset"))
3654 			ERROUT(EBADMSG);
3655 		if (! nvlist_exists_number(nvl, "ticket"))
3656 			ERROUT(EBADMSG);
3657 		if (! nvlist_exists_number(nvl, "nr"))
3658 			ERROUT(EBADMSG);
3659 
3660 		if (nvlist_exists_bool(nvl, "clear_counter"))
3661 			clear_counter = nvlist_get_bool(nvl, "clear_counter");
3662 
3663 		if (clear_counter && !(flags & FWRITE))
3664 			ERROUT(EACCES);
3665 
3666 		nr = nvlist_get_number(nvl, "nr");
3667 
3668 		PF_RULES_WLOCK();
3669 		ruleset = pf_find_kruleset(nvlist_get_string(nvl, "anchor"));
3670 		if (ruleset == NULL) {
3671 			PF_RULES_WUNLOCK();
3672 			ERROUT(ENOENT);
3673 		}
3674 
3675 		rs_num = pf_get_ruleset_number(nvlist_get_number(nvl, "ruleset"));
3676 		if (rs_num >= PF_RULESET_MAX) {
3677 			PF_RULES_WUNLOCK();
3678 			ERROUT(EINVAL);
3679 		}
3680 
3681 		if (nvlist_get_number(nvl, "ticket") !=
3682 		    ruleset->rules[rs_num].active.ticket) {
3683 			PF_RULES_WUNLOCK();
3684 			ERROUT(EBUSY);
3685 		}
3686 
3687 		if ((error = nvlist_error(nvl))) {
3688 			PF_RULES_WUNLOCK();
3689 			ERROUT(error);
3690 		}
3691 
3692 		rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
3693 		while ((rule != NULL) && (rule->nr != nr))
3694 			rule = TAILQ_NEXT(rule, entries);
3695 		if (rule == NULL) {
3696 			PF_RULES_WUNLOCK();
3697 			ERROUT(EBUSY);
3698 		}
3699 
3700 		nvrule = pf_krule_to_nvrule(rule);
3701 
3702 		nvlist_destroy(nvl);
3703 		nvl = nvlist_create(0);
3704 		if (nvl == NULL) {
3705 			PF_RULES_WUNLOCK();
3706 			ERROUT(ENOMEM);
3707 		}
3708 		nvlist_add_number(nvl, "nr", nr);
3709 		nvlist_add_nvlist(nvl, "rule", nvrule);
3710 		nvlist_destroy(nvrule);
3711 		nvrule = NULL;
3712 		if (pf_kanchor_nvcopyout(ruleset, rule, nvl)) {
3713 			PF_RULES_WUNLOCK();
3714 			ERROUT(EBUSY);
3715 		}
3716 
3717 		free(nvlpacked, M_NVLIST);
3718 		nvlpacked = nvlist_pack(nvl, &nv->len);
3719 		if (nvlpacked == NULL) {
3720 			PF_RULES_WUNLOCK();
3721 			ERROUT(ENOMEM);
3722 		}
3723 
3724 		if (nv->size == 0) {
3725 			PF_RULES_WUNLOCK();
3726 			ERROUT(0);
3727 		}
3728 		else if (nv->size < nv->len) {
3729 			PF_RULES_WUNLOCK();
3730 			ERROUT(ENOSPC);
3731 		}
3732 
3733 		if (clear_counter)
3734 			pf_krule_clear_counters(rule);
3735 
3736 		PF_RULES_WUNLOCK();
3737 
3738 		error = copyout(nvlpacked, nv->data, nv->len);
3739 
3740 #undef ERROUT
3741 DIOCGETRULENV_error:
3742 		free(nvlpacked, M_NVLIST);
3743 		nvlist_destroy(nvrule);
3744 		nvlist_destroy(nvl);
3745 
3746 		break;
3747 	}
3748 
3749 	case DIOCCHANGERULE: {
3750 		struct pfioc_rule	*pcr = (struct pfioc_rule *)addr;
3751 		struct pf_kruleset	*ruleset;
3752 		struct pf_krule		*oldrule = NULL, *newrule = NULL;
3753 		struct pfi_kkif		*kif = NULL;
3754 		struct pf_kpooladdr	*pa;
3755 		u_int32_t		 nr = 0;
3756 		int			 rs_num;
3757 
3758 		pcr->anchor[sizeof(pcr->anchor) - 1] = '\0';
3759 
3760 		if (pcr->action < PF_CHANGE_ADD_HEAD ||
3761 		    pcr->action > PF_CHANGE_GET_TICKET) {
3762 			error = EINVAL;
3763 			goto fail;
3764 		}
3765 		if (pcr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
3766 			error = EINVAL;
3767 			goto fail;
3768 		}
3769 
3770 		if (pcr->action != PF_CHANGE_REMOVE) {
3771 			newrule = pf_krule_alloc();
3772 			error = pf_rule_to_krule(&pcr->rule, newrule);
3773 			if (error != 0) {
3774 				pf_krule_free(newrule);
3775 				goto fail;
3776 			}
3777 
3778 			if ((error = pf_rule_checkaf(newrule))) {
3779 				pf_krule_free(newrule);
3780 				goto fail;
3781 			}
3782 			if (newrule->ifname[0])
3783 				kif = pf_kkif_create(M_WAITOK);
3784 			pf_counter_u64_init(&newrule->evaluations, M_WAITOK);
3785 			for (int i = 0; i < 2; i++) {
3786 				pf_counter_u64_init(&newrule->packets[i], M_WAITOK);
3787 				pf_counter_u64_init(&newrule->bytes[i], M_WAITOK);
3788 			}
3789 			newrule->states_cur = counter_u64_alloc(M_WAITOK);
3790 			newrule->states_tot = counter_u64_alloc(M_WAITOK);
3791 			for (pf_sn_types_t sn_type=0; sn_type<PF_SN_MAX; sn_type++)
3792 				newrule->src_nodes[sn_type] = counter_u64_alloc(M_WAITOK);
3793 			newrule->cuid = td->td_ucred->cr_ruid;
3794 			newrule->cpid = td->td_proc ? td->td_proc->p_pid : 0;
3795 			TAILQ_INIT(&newrule->nat.list);
3796 			TAILQ_INIT(&newrule->rdr.list);
3797 			TAILQ_INIT(&newrule->route.list);
3798 		}
3799 #define	ERROUT(x)	ERROUT_IOCTL(DIOCCHANGERULE_error, x)
3800 
3801 		PF_CONFIG_LOCK();
3802 		PF_RULES_WLOCK();
3803 #ifdef PF_WANT_32_TO_64_COUNTER
3804 		if (newrule != NULL) {
3805 			LIST_INSERT_HEAD(&V_pf_allrulelist, newrule, allrulelist);
3806 			newrule->allrulelinked = true;
3807 			V_pf_allrulecount++;
3808 		}
3809 #endif
3810 
3811 		if (!(pcr->action == PF_CHANGE_REMOVE ||
3812 		    pcr->action == PF_CHANGE_GET_TICKET) &&
3813 		    pcr->pool_ticket != V_ticket_pabuf)
3814 			ERROUT(EBUSY);
3815 
3816 		ruleset = pf_find_kruleset(pcr->anchor);
3817 		if (ruleset == NULL)
3818 			ERROUT(EINVAL);
3819 
3820 		rs_num = pf_get_ruleset_number(pcr->rule.action);
3821 		if (rs_num >= PF_RULESET_MAX)
3822 			ERROUT(EINVAL);
3823 
3824 		/*
3825 		 * XXXMJG: there is no guarantee that the ruleset was
3826 		 * created by the usual route of calling DIOCXBEGIN.
3827 		 * As a result it is possible the rule tree will not
3828 		 * be allocated yet. Hack around it by doing it here.
3829 		 * Note it is fine to let the tree persist in case of
3830 		 * error as it will be freed down the road on future
3831 		 * updates (if need be).
3832 		 */
3833 		if (ruleset->rules[rs_num].active.tree == NULL) {
3834 			ruleset->rules[rs_num].active.tree = pf_rule_tree_alloc(M_NOWAIT);
3835 			if (ruleset->rules[rs_num].active.tree == NULL) {
3836 				ERROUT(ENOMEM);
3837 			}
3838 		}
3839 
3840 		if (pcr->action == PF_CHANGE_GET_TICKET) {
3841 			pcr->ticket = ++ruleset->rules[rs_num].active.ticket;
3842 			ERROUT(0);
3843 		} else if (pcr->ticket !=
3844 			    ruleset->rules[rs_num].active.ticket)
3845 				ERROUT(EINVAL);
3846 
3847 		if (pcr->action != PF_CHANGE_REMOVE) {
3848 			if (newrule->ifname[0]) {
3849 				newrule->kif = pfi_kkif_attach(kif,
3850 				    newrule->ifname);
3851 				kif = NULL;
3852 				pfi_kkif_ref(newrule->kif);
3853 			} else
3854 				newrule->kif = NULL;
3855 
3856 			if (newrule->rtableid > 0 &&
3857 			    newrule->rtableid >= rt_numfibs)
3858 				error = EBUSY;
3859 
3860 #ifdef ALTQ
3861 			/* set queue IDs */
3862 			if (newrule->qname[0] != 0) {
3863 				if ((newrule->qid =
3864 				    pf_qname2qid(newrule->qname)) == 0)
3865 					error = EBUSY;
3866 				else if (newrule->pqname[0] != 0) {
3867 					if ((newrule->pqid =
3868 					    pf_qname2qid(newrule->pqname)) == 0)
3869 						error = EBUSY;
3870 				} else
3871 					newrule->pqid = newrule->qid;
3872 			}
3873 #endif /* ALTQ */
3874 			if (newrule->tagname[0])
3875 				if ((newrule->tag =
3876 				    pf_tagname2tag(newrule->tagname)) == 0)
3877 					error = EBUSY;
3878 			if (newrule->match_tagname[0])
3879 				if ((newrule->match_tag = pf_tagname2tag(
3880 				    newrule->match_tagname)) == 0)
3881 					error = EBUSY;
3882 			if (newrule->rt && !newrule->direction)
3883 				error = EINVAL;
3884 			if (!newrule->log)
3885 				newrule->logif = 0;
3886 			if (pf_addr_setup(ruleset, &newrule->src.addr, newrule->af))
3887 				error = ENOMEM;
3888 			if (pf_addr_setup(ruleset, &newrule->dst.addr, newrule->af))
3889 				error = ENOMEM;
3890 			if (pf_kanchor_setup(newrule, ruleset, pcr->anchor_call))
3891 				error = EINVAL;
3892 			for (int i = 0; i < 3; i++) {
3893 				TAILQ_FOREACH(pa, &V_pf_pabuf[i], entries)
3894 					if (pa->addr.type == PF_ADDR_TABLE) {
3895 						pa->addr.p.tbl =
3896 						    pfr_attach_table(ruleset,
3897 						    pa->addr.v.tblname);
3898 						if (pa->addr.p.tbl == NULL)
3899 							error = ENOMEM;
3900 					}
3901 			}
3902 
3903 			newrule->overload_tbl = NULL;
3904 			if (newrule->overload_tblname[0]) {
3905 				if ((newrule->overload_tbl = pfr_attach_table(
3906 				    ruleset, newrule->overload_tblname)) ==
3907 				    NULL)
3908 					error = EINVAL;
3909 				else
3910 					newrule->overload_tbl->pfrkt_flags |=
3911 					    PFR_TFLAG_ACTIVE;
3912 			}
3913 
3914 			pf_mv_kpool(&V_pf_pabuf[0], &newrule->nat.list);
3915 			pf_mv_kpool(&V_pf_pabuf[1], &newrule->rdr.list);
3916 			pf_mv_kpool(&V_pf_pabuf[2], &newrule->route.list);
3917 			if (((((newrule->action == PF_NAT) ||
3918 			    (newrule->action == PF_RDR) ||
3919 			    (newrule->action == PF_BINAT) ||
3920 			    (newrule->rt > PF_NOPFROUTE)) &&
3921 			    !newrule->anchor)) &&
3922 			    (TAILQ_FIRST(&newrule->rdr.list) == NULL))
3923 				error = EINVAL;
3924 
3925 			if (error) {
3926 				pf_free_rule(newrule);
3927 				PF_RULES_WUNLOCK();
3928 				PF_CONFIG_UNLOCK();
3929 				goto fail;
3930 			}
3931 
3932 			newrule->nat.cur = TAILQ_FIRST(&newrule->nat.list);
3933 			newrule->rdr.cur = TAILQ_FIRST(&newrule->rdr.list);
3934 		}
3935 		pf_empty_kpool(&V_pf_pabuf[0]);
3936 		pf_empty_kpool(&V_pf_pabuf[1]);
3937 		pf_empty_kpool(&V_pf_pabuf[2]);
3938 
3939 		if (pcr->action == PF_CHANGE_ADD_HEAD)
3940 			oldrule = TAILQ_FIRST(
3941 			    ruleset->rules[rs_num].active.ptr);
3942 		else if (pcr->action == PF_CHANGE_ADD_TAIL)
3943 			oldrule = TAILQ_LAST(
3944 			    ruleset->rules[rs_num].active.ptr, pf_krulequeue);
3945 		else {
3946 			oldrule = TAILQ_FIRST(
3947 			    ruleset->rules[rs_num].active.ptr);
3948 			while ((oldrule != NULL) && (oldrule->nr != pcr->nr))
3949 				oldrule = TAILQ_NEXT(oldrule, entries);
3950 			if (oldrule == NULL) {
3951 				if (newrule != NULL)
3952 					pf_free_rule(newrule);
3953 				PF_RULES_WUNLOCK();
3954 				PF_CONFIG_UNLOCK();
3955 				error = EINVAL;
3956 				goto fail;
3957 			}
3958 		}
3959 
3960 		if (pcr->action == PF_CHANGE_REMOVE) {
3961 			pf_unlink_rule(ruleset->rules[rs_num].active.ptr,
3962 			    oldrule);
3963 			RB_REMOVE(pf_krule_global,
3964 			    ruleset->rules[rs_num].active.tree, oldrule);
3965 			ruleset->rules[rs_num].active.rcount--;
3966 		} else {
3967 			pf_hash_rule(newrule);
3968 			if (RB_INSERT(pf_krule_global,
3969 			    ruleset->rules[rs_num].active.tree, newrule) != NULL) {
3970 				pf_free_rule(newrule);
3971 				PF_RULES_WUNLOCK();
3972 				PF_CONFIG_UNLOCK();
3973 				error = EEXIST;
3974 				goto fail;
3975 			}
3976 
3977 			if (oldrule == NULL)
3978 				TAILQ_INSERT_TAIL(
3979 				    ruleset->rules[rs_num].active.ptr,
3980 				    newrule, entries);
3981 			else if (pcr->action == PF_CHANGE_ADD_HEAD ||
3982 			    pcr->action == PF_CHANGE_ADD_BEFORE)
3983 				TAILQ_INSERT_BEFORE(oldrule, newrule, entries);
3984 			else
3985 				TAILQ_INSERT_AFTER(
3986 				    ruleset->rules[rs_num].active.ptr,
3987 				    oldrule, newrule, entries);
3988 			ruleset->rules[rs_num].active.rcount++;
3989 		}
3990 
3991 		nr = 0;
3992 		TAILQ_FOREACH(oldrule,
3993 		    ruleset->rules[rs_num].active.ptr, entries)
3994 			oldrule->nr = nr++;
3995 
3996 		ruleset->rules[rs_num].active.ticket++;
3997 
3998 		pf_calc_skip_steps(ruleset->rules[rs_num].active.ptr);
3999 		pf_remove_if_empty_kruleset(ruleset);
4000 
4001 		PF_RULES_WUNLOCK();
4002 		PF_CONFIG_UNLOCK();
4003 		break;
4004 
4005 #undef ERROUT
4006 DIOCCHANGERULE_error:
4007 		PF_RULES_WUNLOCK();
4008 		PF_CONFIG_UNLOCK();
4009 		pf_krule_free(newrule);
4010 		pf_kkif_free(kif);
4011 		break;
4012 	}
4013 
4014 	case DIOCCLRSTATESNV: {
4015 		error = pf_clearstates_nv((struct pfioc_nv *)addr);
4016 		break;
4017 	}
4018 
4019 	case DIOCKILLSTATESNV: {
4020 		error = pf_killstates_nv((struct pfioc_nv *)addr);
4021 		break;
4022 	}
4023 
4024 	case DIOCADDSTATE: {
4025 		struct pfioc_state		*ps = (struct pfioc_state *)addr;
4026 		struct pfsync_state_1301	*sp = &ps->state;
4027 
4028 		if (sp->timeout >= PFTM_MAX) {
4029 			error = EINVAL;
4030 			goto fail;
4031 		}
4032 		if (V_pfsync_state_import_ptr != NULL) {
4033 			PF_RULES_RLOCK();
4034 			error = V_pfsync_state_import_ptr(
4035 			    (union pfsync_state_union *)sp, PFSYNC_SI_IOCTL,
4036 			    PFSYNC_MSG_VERSION_1301);
4037 			PF_RULES_RUNLOCK();
4038 		} else
4039 			error = EOPNOTSUPP;
4040 		break;
4041 	}
4042 
4043 	case DIOCGETSTATE: {
4044 		struct pfioc_state	*ps = (struct pfioc_state *)addr;
4045 		struct pf_kstate	*s;
4046 
4047 		s = pf_find_state_byid(ps->state.id, ps->state.creatorid);
4048 		if (s == NULL) {
4049 			error = ENOENT;
4050 			goto fail;
4051 		}
4052 
4053 		pfsync_state_export((union pfsync_state_union*)&ps->state,
4054 		    s, PFSYNC_MSG_VERSION_1301);
4055 		PF_STATE_UNLOCK(s);
4056 		break;
4057 	}
4058 
4059 	case DIOCGETSTATENV: {
4060 		error = pf_getstate((struct pfioc_nv *)addr);
4061 		break;
4062 	}
4063 
4064 #ifdef COMPAT_FREEBSD14
4065 	case DIOCGETSTATES: {
4066 		struct pfioc_states	*ps = (struct pfioc_states *)addr;
4067 		struct pf_kstate	*s;
4068 		struct pfsync_state_1301	*pstore, *p;
4069 		int			 i, nr;
4070 		size_t			 slice_count = 16, count;
4071 		void			*out;
4072 
4073 		if (ps->ps_len <= 0) {
4074 			nr = uma_zone_get_cur(V_pf_state_z);
4075 			ps->ps_len = sizeof(struct pfsync_state_1301) * nr;
4076 			break;
4077 		}
4078 
4079 		out = ps->ps_states;
4080 		pstore = mallocarray(slice_count,
4081 		    sizeof(struct pfsync_state_1301), M_PF, M_WAITOK | M_ZERO);
4082 		nr = 0;
4083 
4084 		for (i = 0; i <= V_pf_hashmask; i++) {
4085 			struct pf_idhash *ih = &V_pf_idhash[i];
4086 
4087 DIOCGETSTATES_retry:
4088 			p = pstore;
4089 
4090 			if (LIST_EMPTY(&ih->states))
4091 				continue;
4092 
4093 			PF_HASHROW_LOCK(ih);
4094 			count = 0;
4095 			LIST_FOREACH(s, &ih->states, entry) {
4096 				if (s->timeout == PFTM_UNLINKED)
4097 					continue;
4098 				count++;
4099 			}
4100 
4101 			if (count > slice_count) {
4102 				PF_HASHROW_UNLOCK(ih);
4103 				free(pstore, M_PF);
4104 				slice_count = count * 2;
4105 				pstore = mallocarray(slice_count,
4106 				    sizeof(struct pfsync_state_1301), M_PF,
4107 				    M_WAITOK | M_ZERO);
4108 				goto DIOCGETSTATES_retry;
4109 			}
4110 
4111 			if ((nr+count) * sizeof(*p) > ps->ps_len) {
4112 				PF_HASHROW_UNLOCK(ih);
4113 				goto DIOCGETSTATES_full;
4114 			}
4115 
4116 			LIST_FOREACH(s, &ih->states, entry) {
4117 				if (s->timeout == PFTM_UNLINKED)
4118 					continue;
4119 
4120 				pfsync_state_export((union pfsync_state_union*)p,
4121 				    s, PFSYNC_MSG_VERSION_1301);
4122 				p++;
4123 				nr++;
4124 			}
4125 			PF_HASHROW_UNLOCK(ih);
4126 			error = copyout(pstore, out,
4127 			    sizeof(struct pfsync_state_1301) * count);
4128 			if (error) {
4129 				free(pstore, M_PF);
4130 				goto fail;
4131 			}
4132 			out = ps->ps_states + nr;
4133 		}
4134 DIOCGETSTATES_full:
4135 		ps->ps_len = sizeof(struct pfsync_state_1301) * nr;
4136 		free(pstore, M_PF);
4137 
4138 		break;
4139 	}
4140 
4141 	case DIOCGETSTATESV2: {
4142 		struct pfioc_states_v2	*ps = (struct pfioc_states_v2 *)addr;
4143 		struct pf_kstate	*s;
4144 		struct pf_state_export	*pstore, *p;
4145 		int i, nr;
4146 		size_t slice_count = 16, count;
4147 		void *out;
4148 
4149 		if (ps->ps_req_version > PF_STATE_VERSION) {
4150 			error = ENOTSUP;
4151 			goto fail;
4152 		}
4153 
4154 		if (ps->ps_len <= 0) {
4155 			nr = uma_zone_get_cur(V_pf_state_z);
4156 			ps->ps_len = sizeof(struct pf_state_export) * nr;
4157 			break;
4158 		}
4159 
4160 		out = ps->ps_states;
4161 		pstore = mallocarray(slice_count,
4162 		    sizeof(struct pf_state_export), M_PF, M_WAITOK | M_ZERO);
4163 		nr = 0;
4164 
4165 		for (i = 0; i <= V_pf_hashmask; i++) {
4166 			struct pf_idhash *ih = &V_pf_idhash[i];
4167 
4168 DIOCGETSTATESV2_retry:
4169 			p = pstore;
4170 
4171 			if (LIST_EMPTY(&ih->states))
4172 				continue;
4173 
4174 			PF_HASHROW_LOCK(ih);
4175 			count = 0;
4176 			LIST_FOREACH(s, &ih->states, entry) {
4177 				if (s->timeout == PFTM_UNLINKED)
4178 					continue;
4179 				count++;
4180 			}
4181 
4182 			if (count > slice_count) {
4183 				PF_HASHROW_UNLOCK(ih);
4184 				free(pstore, M_PF);
4185 				slice_count = count * 2;
4186 				pstore = mallocarray(slice_count,
4187 				    sizeof(struct pf_state_export), M_PF,
4188 				    M_WAITOK | M_ZERO);
4189 				goto DIOCGETSTATESV2_retry;
4190 			}
4191 
4192 			if ((nr+count) * sizeof(*p) > ps->ps_len) {
4193 				PF_HASHROW_UNLOCK(ih);
4194 				goto DIOCGETSTATESV2_full;
4195 			}
4196 
4197 			LIST_FOREACH(s, &ih->states, entry) {
4198 				if (s->timeout == PFTM_UNLINKED)
4199 					continue;
4200 
4201 				pf_state_export(p, s);
4202 				p++;
4203 				nr++;
4204 			}
4205 			PF_HASHROW_UNLOCK(ih);
4206 			error = copyout(pstore, out,
4207 			    sizeof(struct pf_state_export) * count);
4208 			if (error) {
4209 				free(pstore, M_PF);
4210 				goto fail;
4211 			}
4212 			out = ps->ps_states + nr;
4213 		}
4214 DIOCGETSTATESV2_full:
4215 		ps->ps_len = nr * sizeof(struct pf_state_export);
4216 		free(pstore, M_PF);
4217 
4218 		break;
4219 	}
4220 #endif
4221 	case DIOCGETSTATUSNV: {
4222 		error = pf_getstatus((struct pfioc_nv *)addr);
4223 		break;
4224 	}
4225 
4226 	case DIOCSETSTATUSIF: {
4227 		struct pfioc_if	*pi = (struct pfioc_if *)addr;
4228 
4229 		if (pi->ifname[0] == 0) {
4230 			bzero(V_pf_status.ifname, IFNAMSIZ);
4231 			break;
4232 		}
4233 		PF_RULES_WLOCK();
4234 		error = pf_user_strcpy(V_pf_status.ifname, pi->ifname, IFNAMSIZ);
4235 		PF_RULES_WUNLOCK();
4236 		break;
4237 	}
4238 
4239 	case DIOCCLRSTATUS: {
4240 		pf_ioctl_clear_status();
4241 		break;
4242 	}
4243 
4244 	case DIOCNATLOOK: {
4245 		struct pfioc_natlook	*pnl = (struct pfioc_natlook *)addr;
4246 
4247 		error = pf_ioctl_natlook(pnl);
4248 		break;
4249 	}
4250 
4251 	case DIOCSETTIMEOUT: {
4252 		struct pfioc_tm	*pt = (struct pfioc_tm *)addr;
4253 
4254 		error = pf_ioctl_set_timeout(pt->timeout, pt->seconds,
4255 		    &pt->seconds);
4256 		break;
4257 	}
4258 
4259 	case DIOCGETTIMEOUT: {
4260 		struct pfioc_tm	*pt = (struct pfioc_tm *)addr;
4261 
4262 		error = pf_ioctl_get_timeout(pt->timeout, &pt->seconds);
4263 		break;
4264 	}
4265 
4266 	case DIOCGETLIMIT: {
4267 		struct pfioc_limit	*pl = (struct pfioc_limit *)addr;
4268 
4269 		error = pf_ioctl_get_limit(pl->index, &pl->limit);
4270 		break;
4271 	}
4272 
4273 	case DIOCSETLIMIT: {
4274 		struct pfioc_limit	*pl = (struct pfioc_limit *)addr;
4275 		unsigned int old_limit;
4276 
4277 		error = pf_ioctl_set_limit(pl->index, pl->limit, &old_limit);
4278 		pl->limit = old_limit;
4279 		break;
4280 	}
4281 
4282 	case DIOCSETDEBUG: {
4283 		u_int32_t	*level = (u_int32_t *)addr;
4284 
4285 		PF_RULES_WLOCK();
4286 		V_pf_status.debug = *level;
4287 		PF_RULES_WUNLOCK();
4288 		break;
4289 	}
4290 
4291 	case DIOCCLRRULECTRS: {
4292 		/* obsoleted by DIOCGETRULE with action=PF_GET_CLR_CNTR */
4293 		struct pf_kruleset	*ruleset = &pf_main_ruleset;
4294 		struct pf_krule		*rule;
4295 
4296 		PF_RULES_WLOCK();
4297 		TAILQ_FOREACH(rule,
4298 		    ruleset->rules[PF_RULESET_FILTER].active.ptr, entries) {
4299 			pf_counter_u64_zero(&rule->evaluations);
4300 			for (int i = 0; i < 2; i++) {
4301 				pf_counter_u64_zero(&rule->packets[i]);
4302 				pf_counter_u64_zero(&rule->bytes[i]);
4303 			}
4304 		}
4305 		PF_RULES_WUNLOCK();
4306 		break;
4307 	}
4308 
4309 	case DIOCGIFSPEEDV0:
4310 	case DIOCGIFSPEEDV1: {
4311 		struct pf_ifspeed_v1	*psp = (struct pf_ifspeed_v1 *)addr;
4312 		struct pf_ifspeed_v1	ps;
4313 		struct ifnet		*ifp;
4314 
4315 		if (psp->ifname[0] == '\0') {
4316 			error = EINVAL;
4317 			goto fail;
4318 		}
4319 
4320 		error = pf_user_strcpy(ps.ifname, psp->ifname, IFNAMSIZ);
4321 		if (error != 0)
4322 			goto fail;
4323 		ifp = ifunit(ps.ifname);
4324 		if (ifp != NULL) {
4325 			psp->baudrate32 =
4326 			    (u_int32_t)uqmin(ifp->if_baudrate, UINT_MAX);
4327 			if (cmd == DIOCGIFSPEEDV1)
4328 				psp->baudrate = ifp->if_baudrate;
4329 		} else {
4330 			error = EINVAL;
4331 		}
4332 		break;
4333 	}
4334 
4335 #ifdef ALTQ
4336 	case DIOCSTARTALTQ: {
4337 		struct pf_altq		*altq;
4338 
4339 		PF_RULES_WLOCK();
4340 		/* enable all altq interfaces on active list */
4341 		TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) {
4342 			if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
4343 				error = pf_enable_altq(altq);
4344 				if (error != 0)
4345 					break;
4346 			}
4347 		}
4348 		if (error == 0)
4349 			V_pf_altq_running = 1;
4350 		PF_RULES_WUNLOCK();
4351 		DPFPRINTF(PF_DEBUG_MISC, "altq: started");
4352 		break;
4353 	}
4354 
4355 	case DIOCSTOPALTQ: {
4356 		struct pf_altq		*altq;
4357 
4358 		PF_RULES_WLOCK();
4359 		/* disable all altq interfaces on active list */
4360 		TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) {
4361 			if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
4362 				error = pf_disable_altq(altq);
4363 				if (error != 0)
4364 					break;
4365 			}
4366 		}
4367 		if (error == 0)
4368 			V_pf_altq_running = 0;
4369 		PF_RULES_WUNLOCK();
4370 		DPFPRINTF(PF_DEBUG_MISC, "altq: stopped");
4371 		break;
4372 	}
4373 
4374 	case DIOCADDALTQV0:
4375 	case DIOCADDALTQV1: {
4376 		struct pfioc_altq_v1	*pa = (struct pfioc_altq_v1 *)addr;
4377 		struct pf_altq		*altq, *a;
4378 		struct ifnet		*ifp;
4379 
4380 		altq = malloc(sizeof(*altq), M_PFALTQ, M_WAITOK | M_ZERO);
4381 		error = pf_import_kaltq(pa, altq, IOCPARM_LEN(cmd));
4382 		if (error)
4383 			goto fail;
4384 		altq->local_flags = 0;
4385 
4386 		PF_RULES_WLOCK();
4387 		if (pa->ticket != V_ticket_altqs_inactive) {
4388 			PF_RULES_WUNLOCK();
4389 			free(altq, M_PFALTQ);
4390 			error = EBUSY;
4391 			goto fail;
4392 		}
4393 
4394 		/*
4395 		 * if this is for a queue, find the discipline and
4396 		 * copy the necessary fields
4397 		 */
4398 		if (altq->qname[0] != 0) {
4399 			if ((altq->qid = pf_qname2qid(altq->qname)) == 0) {
4400 				PF_RULES_WUNLOCK();
4401 				error = EBUSY;
4402 				free(altq, M_PFALTQ);
4403 				goto fail;
4404 			}
4405 			altq->altq_disc = NULL;
4406 			TAILQ_FOREACH(a, V_pf_altq_ifs_inactive, entries) {
4407 				if (strncmp(a->ifname, altq->ifname,
4408 				    IFNAMSIZ) == 0) {
4409 					altq->altq_disc = a->altq_disc;
4410 					break;
4411 				}
4412 			}
4413 		}
4414 
4415 		if ((ifp = ifunit(altq->ifname)) == NULL)
4416 			altq->local_flags |= PFALTQ_FLAG_IF_REMOVED;
4417 		else
4418 			error = altq_add(ifp, altq);
4419 
4420 		if (error) {
4421 			PF_RULES_WUNLOCK();
4422 			free(altq, M_PFALTQ);
4423 			goto fail;
4424 		}
4425 
4426 		if (altq->qname[0] != 0)
4427 			TAILQ_INSERT_TAIL(V_pf_altqs_inactive, altq, entries);
4428 		else
4429 			TAILQ_INSERT_TAIL(V_pf_altq_ifs_inactive, altq, entries);
4430 		/* version error check done on import above */
4431 		pf_export_kaltq(altq, pa, IOCPARM_LEN(cmd));
4432 		PF_RULES_WUNLOCK();
4433 		break;
4434 	}
4435 
4436 	case DIOCGETALTQSV0:
4437 	case DIOCGETALTQSV1: {
4438 		struct pfioc_altq_v1	*pa = (struct pfioc_altq_v1 *)addr;
4439 		struct pf_altq		*altq;
4440 
4441 		PF_RULES_RLOCK();
4442 		pa->nr = 0;
4443 		TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries)
4444 			pa->nr++;
4445 		TAILQ_FOREACH(altq, V_pf_altqs_active, entries)
4446 			pa->nr++;
4447 		pa->ticket = V_ticket_altqs_active;
4448 		PF_RULES_RUNLOCK();
4449 		break;
4450 	}
4451 
4452 	case DIOCGETALTQV0:
4453 	case DIOCGETALTQV1: {
4454 		struct pfioc_altq_v1	*pa = (struct pfioc_altq_v1 *)addr;
4455 		struct pf_altq		*altq;
4456 
4457 		PF_RULES_RLOCK();
4458 		if (pa->ticket != V_ticket_altqs_active) {
4459 			PF_RULES_RUNLOCK();
4460 			error = EBUSY;
4461 			goto fail;
4462 		}
4463 		altq = pf_altq_get_nth_active(pa->nr);
4464 		if (altq == NULL) {
4465 			PF_RULES_RUNLOCK();
4466 			error = EBUSY;
4467 			goto fail;
4468 		}
4469 		pf_export_kaltq(altq, pa, IOCPARM_LEN(cmd));
4470 		PF_RULES_RUNLOCK();
4471 		break;
4472 	}
4473 
4474 	case DIOCCHANGEALTQV0:
4475 	case DIOCCHANGEALTQV1:
4476 		/* CHANGEALTQ not supported yet! */
4477 		error = ENODEV;
4478 		break;
4479 
4480 	case DIOCGETQSTATSV0:
4481 	case DIOCGETQSTATSV1: {
4482 		struct pfioc_qstats_v1	*pq = (struct pfioc_qstats_v1 *)addr;
4483 		struct pf_altq		*altq;
4484 		int			 nbytes;
4485 		u_int32_t		 version;
4486 
4487 		PF_RULES_RLOCK();
4488 		if (pq->ticket != V_ticket_altqs_active) {
4489 			PF_RULES_RUNLOCK();
4490 			error = EBUSY;
4491 			goto fail;
4492 		}
4493 		nbytes = pq->nbytes;
4494 		altq = pf_altq_get_nth_active(pq->nr);
4495 		if (altq == NULL) {
4496 			PF_RULES_RUNLOCK();
4497 			error = EBUSY;
4498 			goto fail;
4499 		}
4500 
4501 		if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) != 0) {
4502 			PF_RULES_RUNLOCK();
4503 			error = ENXIO;
4504 			goto fail;
4505 		}
4506 		PF_RULES_RUNLOCK();
4507 		if (cmd == DIOCGETQSTATSV0)
4508 			version = 0;  /* DIOCGETQSTATSV0 means stats struct v0 */
4509 		else
4510 			version = pq->version;
4511 		error = altq_getqstats(altq, pq->buf, &nbytes, version);
4512 		if (error == 0) {
4513 			pq->scheduler = altq->scheduler;
4514 			pq->nbytes = nbytes;
4515 		}
4516 		break;
4517 	}
4518 #endif /* ALTQ */
4519 
4520 	case DIOCBEGINADDRS: {
4521 		struct pfioc_pooladdr	*pp = (struct pfioc_pooladdr *)addr;
4522 
4523 		error = pf_ioctl_begin_addrs(&pp->ticket);
4524 		break;
4525 	}
4526 
4527 	case DIOCADDADDR: {
4528 		struct pfioc_pooladdr	*pp = (struct pfioc_pooladdr *)addr;
4529 		struct pf_nl_pooladdr npp = {};
4530 
4531 		npp.which = PF_RDR;
4532 		memcpy(&npp, pp, sizeof(*pp));
4533 		error = pf_ioctl_add_addr(&npp);
4534 		break;
4535 	}
4536 
4537 	case DIOCGETADDRS: {
4538 		struct pfioc_pooladdr	*pp = (struct pfioc_pooladdr *)addr;
4539 		struct pf_nl_pooladdr npp = {};
4540 
4541 		npp.which = PF_RDR;
4542 		memcpy(&npp, pp, sizeof(*pp));
4543 		error = pf_ioctl_get_addrs(&npp);
4544 		memcpy(pp, &npp, sizeof(*pp));
4545 
4546 		break;
4547 	}
4548 
4549 	case DIOCGETADDR: {
4550 		struct pfioc_pooladdr	*pp = (struct pfioc_pooladdr *)addr;
4551 		struct pf_nl_pooladdr npp = {};
4552 
4553 		npp.which = PF_RDR;
4554 		memcpy(&npp, pp, sizeof(*pp));
4555 		error = pf_ioctl_get_addr(&npp);
4556 		memcpy(pp, &npp, sizeof(*pp));
4557 
4558 		break;
4559 	}
4560 
4561 	case DIOCCHANGEADDR: {
4562 		struct pfioc_pooladdr	*pca = (struct pfioc_pooladdr *)addr;
4563 		struct pf_kpool		*pool;
4564 		struct pf_kpooladdr	*oldpa = NULL, *newpa = NULL;
4565 		struct pf_kruleset	*ruleset;
4566 		struct pfi_kkif		*kif = NULL;
4567 
4568 		pca->anchor[sizeof(pca->anchor) - 1] = '\0';
4569 
4570 		if (pca->action < PF_CHANGE_ADD_HEAD ||
4571 		    pca->action > PF_CHANGE_REMOVE) {
4572 			error = EINVAL;
4573 			goto fail;
4574 		}
4575 		if (pca->addr.addr.type != PF_ADDR_ADDRMASK &&
4576 		    pca->addr.addr.type != PF_ADDR_DYNIFTL &&
4577 		    pca->addr.addr.type != PF_ADDR_TABLE) {
4578 			error = EINVAL;
4579 			goto fail;
4580 		}
4581 		if (pca->addr.addr.p.dyn != NULL) {
4582 			error = EINVAL;
4583 			goto fail;
4584 		}
4585 
4586 		if (pca->action != PF_CHANGE_REMOVE) {
4587 #ifndef INET
4588 			if (pca->af == AF_INET) {
4589 				error = EAFNOSUPPORT;
4590 				goto fail;
4591 			}
4592 #endif /* INET */
4593 #ifndef INET6
4594 			if (pca->af == AF_INET6) {
4595 				error = EAFNOSUPPORT;
4596 				goto fail;
4597 			}
4598 #endif /* INET6 */
4599 			newpa = malloc(sizeof(*newpa), M_PFRULE, M_WAITOK);
4600 			bcopy(&pca->addr, newpa, sizeof(struct pf_pooladdr));
4601 			if (newpa->ifname[0])
4602 				kif = pf_kkif_create(M_WAITOK);
4603 			newpa->kif = NULL;
4604 		}
4605 #define	ERROUT(x)	ERROUT_IOCTL(DIOCCHANGEADDR_error, x)
4606 		PF_RULES_WLOCK();
4607 		ruleset = pf_find_kruleset(pca->anchor);
4608 		if (ruleset == NULL)
4609 			ERROUT(EBUSY);
4610 
4611 		pool = pf_get_kpool(pca->anchor, pca->ticket, pca->r_action,
4612 		    pca->r_num, pca->r_last, 1, 1, PF_RDR);
4613 		if (pool == NULL)
4614 			ERROUT(EBUSY);
4615 
4616 		if (pca->action != PF_CHANGE_REMOVE) {
4617 			if (newpa->ifname[0]) {
4618 				newpa->kif = pfi_kkif_attach(kif, newpa->ifname);
4619 				pfi_kkif_ref(newpa->kif);
4620 				kif = NULL;
4621 			}
4622 
4623 			switch (newpa->addr.type) {
4624 			case PF_ADDR_DYNIFTL:
4625 				error = pfi_dynaddr_setup(&newpa->addr,
4626 				    pca->af);
4627 				break;
4628 			case PF_ADDR_TABLE:
4629 				newpa->addr.p.tbl = pfr_attach_table(ruleset,
4630 				    newpa->addr.v.tblname);
4631 				if (newpa->addr.p.tbl == NULL)
4632 					error = ENOMEM;
4633 				break;
4634 			}
4635 			if (error)
4636 				goto DIOCCHANGEADDR_error;
4637 		}
4638 
4639 		switch (pca->action) {
4640 		case PF_CHANGE_ADD_HEAD:
4641 			oldpa = TAILQ_FIRST(&pool->list);
4642 			break;
4643 		case PF_CHANGE_ADD_TAIL:
4644 			oldpa = TAILQ_LAST(&pool->list, pf_kpalist);
4645 			break;
4646 		default:
4647 			oldpa = TAILQ_FIRST(&pool->list);
4648 			for (int i = 0; oldpa && i < pca->nr; i++)
4649 				oldpa = TAILQ_NEXT(oldpa, entries);
4650 
4651 			if (oldpa == NULL)
4652 				ERROUT(EINVAL);
4653 		}
4654 
4655 		if (pca->action == PF_CHANGE_REMOVE) {
4656 			TAILQ_REMOVE(&pool->list, oldpa, entries);
4657 			switch (oldpa->addr.type) {
4658 			case PF_ADDR_DYNIFTL:
4659 				pfi_dynaddr_remove(oldpa->addr.p.dyn);
4660 				break;
4661 			case PF_ADDR_TABLE:
4662 				pfr_detach_table(oldpa->addr.p.tbl);
4663 				break;
4664 			}
4665 			if (oldpa->kif)
4666 				pfi_kkif_unref(oldpa->kif);
4667 			free(oldpa, M_PFRULE);
4668 		} else {
4669 			if (oldpa == NULL)
4670 				TAILQ_INSERT_TAIL(&pool->list, newpa, entries);
4671 			else if (pca->action == PF_CHANGE_ADD_HEAD ||
4672 			    pca->action == PF_CHANGE_ADD_BEFORE)
4673 				TAILQ_INSERT_BEFORE(oldpa, newpa, entries);
4674 			else
4675 				TAILQ_INSERT_AFTER(&pool->list, oldpa,
4676 				    newpa, entries);
4677 		}
4678 
4679 		pool->cur = TAILQ_FIRST(&pool->list);
4680 		pf_addrcpy(&pool->counter, &pool->cur->addr.v.a.addr, pca->af);
4681 		PF_RULES_WUNLOCK();
4682 		break;
4683 
4684 #undef ERROUT
4685 DIOCCHANGEADDR_error:
4686 		if (newpa != NULL) {
4687 			if (newpa->kif)
4688 				pfi_kkif_unref(newpa->kif);
4689 			free(newpa, M_PFRULE);
4690 		}
4691 		PF_RULES_WUNLOCK();
4692 		pf_kkif_free(kif);
4693 		break;
4694 	}
4695 
4696 	case DIOCGETRULESETS: {
4697 		struct pfioc_ruleset	*pr = (struct pfioc_ruleset *)addr;
4698 
4699 		pr->path[sizeof(pr->path) - 1] = '\0';
4700 
4701 		error = pf_ioctl_get_rulesets(pr);
4702 		break;
4703 	}
4704 
4705 	case DIOCGETRULESET: {
4706 		struct pfioc_ruleset	*pr = (struct pfioc_ruleset *)addr;
4707 
4708 		pr->path[sizeof(pr->path) - 1] = '\0';
4709 
4710 		error = pf_ioctl_get_ruleset(pr);
4711 		break;
4712 	}
4713 
4714 	case DIOCRCLRTABLES: {
4715 		struct pfioc_table *io = (struct pfioc_table *)addr;
4716 
4717 		if (io->pfrio_esize != 0) {
4718 			error = ENODEV;
4719 			goto fail;
4720 		}
4721 		PF_RULES_WLOCK();
4722 		error = pfr_clr_tables(&io->pfrio_table, &io->pfrio_ndel,
4723 		    io->pfrio_flags | PFR_FLAG_USERIOCTL);
4724 		PF_RULES_WUNLOCK();
4725 		break;
4726 	}
4727 
4728 	case DIOCRADDTABLES: {
4729 		struct pfioc_table *io = (struct pfioc_table *)addr;
4730 		struct pfr_table *pfrts;
4731 		size_t totlen;
4732 
4733 		if (io->pfrio_esize != sizeof(struct pfr_table)) {
4734 			error = ENODEV;
4735 			goto fail;
4736 		}
4737 
4738 		if (io->pfrio_size < 0 || io->pfrio_size > pf_ioctl_maxcount ||
4739 		    WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_table))) {
4740 			error = ENOMEM;
4741 			goto fail;
4742 		}
4743 
4744 		totlen = io->pfrio_size * sizeof(struct pfr_table);
4745 		pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table),
4746 		    M_PF, M_WAITOK);
4747 		error = copyin(io->pfrio_buffer, pfrts, totlen);
4748 		if (error) {
4749 			free(pfrts, M_PF);
4750 			goto fail;
4751 		}
4752 		PF_RULES_WLOCK();
4753 		error = pfr_add_tables(pfrts, io->pfrio_size,
4754 		    &io->pfrio_nadd, io->pfrio_flags | PFR_FLAG_USERIOCTL);
4755 		PF_RULES_WUNLOCK();
4756 		free(pfrts, M_PF);
4757 		break;
4758 	}
4759 
4760 	case DIOCRDELTABLES: {
4761 		struct pfioc_table *io = (struct pfioc_table *)addr;
4762 		struct pfr_table *pfrts;
4763 		size_t totlen;
4764 
4765 		if (io->pfrio_esize != sizeof(struct pfr_table)) {
4766 			error = ENODEV;
4767 			goto fail;
4768 		}
4769 
4770 		if (io->pfrio_size < 0 || io->pfrio_size > pf_ioctl_maxcount ||
4771 		    WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_table))) {
4772 			error = ENOMEM;
4773 			goto fail;
4774 		}
4775 
4776 		totlen = io->pfrio_size * sizeof(struct pfr_table);
4777 		pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table),
4778 		    M_PF, M_WAITOK);
4779 		error = copyin(io->pfrio_buffer, pfrts, totlen);
4780 		if (error) {
4781 			free(pfrts, M_PF);
4782 			goto fail;
4783 		}
4784 		PF_RULES_WLOCK();
4785 		error = pfr_del_tables(pfrts, io->pfrio_size,
4786 		    &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL);
4787 		PF_RULES_WUNLOCK();
4788 		free(pfrts, M_PF);
4789 		break;
4790 	}
4791 
4792 	case DIOCRGETTABLES: {
4793 		struct pfioc_table *io = (struct pfioc_table *)addr;
4794 		struct pfr_table *pfrts;
4795 		size_t totlen;
4796 		int n;
4797 
4798 		if (io->pfrio_esize != sizeof(struct pfr_table)) {
4799 			error = ENODEV;
4800 			goto fail;
4801 		}
4802 		PF_RULES_RLOCK();
4803 		n = pfr_table_count(&io->pfrio_table, io->pfrio_flags);
4804 		if (n < 0) {
4805 			PF_RULES_RUNLOCK();
4806 			error = EINVAL;
4807 			goto fail;
4808 		}
4809 		io->pfrio_size = min(io->pfrio_size, n);
4810 
4811 		totlen = io->pfrio_size * sizeof(struct pfr_table);
4812 
4813 		pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table),
4814 		    M_PF, M_NOWAIT | M_ZERO);
4815 		if (pfrts == NULL) {
4816 			error = ENOMEM;
4817 			PF_RULES_RUNLOCK();
4818 			goto fail;
4819 		}
4820 		error = pfr_get_tables(&io->pfrio_table, pfrts,
4821 		    &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
4822 		PF_RULES_RUNLOCK();
4823 		if (error == 0)
4824 			error = copyout(pfrts, io->pfrio_buffer, totlen);
4825 		free(pfrts, M_PF);
4826 		break;
4827 	}
4828 
4829 	case DIOCRGETTSTATS: {
4830 		struct pfioc_table *io = (struct pfioc_table *)addr;
4831 		struct pfr_tstats *pfrtstats;
4832 		size_t totlen;
4833 		int n;
4834 
4835 		if (io->pfrio_esize != sizeof(struct pfr_tstats)) {
4836 			error = ENODEV;
4837 			goto fail;
4838 		}
4839 		PF_TABLE_STATS_LOCK();
4840 		PF_RULES_RLOCK();
4841 		n = pfr_table_count(&io->pfrio_table, io->pfrio_flags);
4842 		if (n < 0) {
4843 			PF_RULES_RUNLOCK();
4844 			PF_TABLE_STATS_UNLOCK();
4845 			error = EINVAL;
4846 			goto fail;
4847 		}
4848 		io->pfrio_size = min(io->pfrio_size, n);
4849 
4850 		totlen = io->pfrio_size * sizeof(struct pfr_tstats);
4851 		pfrtstats = mallocarray(io->pfrio_size,
4852 		    sizeof(struct pfr_tstats), M_PF, M_NOWAIT | M_ZERO);
4853 		if (pfrtstats == NULL) {
4854 			error = ENOMEM;
4855 			PF_RULES_RUNLOCK();
4856 			PF_TABLE_STATS_UNLOCK();
4857 			goto fail;
4858 		}
4859 		error = pfr_get_tstats(&io->pfrio_table, pfrtstats,
4860 		    &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
4861 		PF_RULES_RUNLOCK();
4862 		PF_TABLE_STATS_UNLOCK();
4863 		if (error == 0)
4864 			error = copyout(pfrtstats, io->pfrio_buffer, totlen);
4865 		free(pfrtstats, M_PF);
4866 		break;
4867 	}
4868 
4869 	case DIOCRCLRTSTATS: {
4870 		struct pfioc_table *io = (struct pfioc_table *)addr;
4871 		struct pfr_table *pfrts;
4872 		size_t totlen;
4873 
4874 		if (io->pfrio_esize != sizeof(struct pfr_table)) {
4875 			error = ENODEV;
4876 			goto fail;
4877 		}
4878 
4879 		if (io->pfrio_size < 0 || io->pfrio_size > pf_ioctl_maxcount ||
4880 		    WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_table))) {
4881 			/* We used to count tables and use the minimum required
4882 			 * size, so we didn't fail on overly large requests.
4883 			 * Keep doing so. */
4884 			io->pfrio_size = pf_ioctl_maxcount;
4885 			goto fail;
4886 		}
4887 
4888 		totlen = io->pfrio_size * sizeof(struct pfr_table);
4889 		pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table),
4890 		    M_PF, M_WAITOK);
4891 		error = copyin(io->pfrio_buffer, pfrts, totlen);
4892 		if (error) {
4893 			free(pfrts, M_PF);
4894 			goto fail;
4895 		}
4896 
4897 		PF_TABLE_STATS_LOCK();
4898 		PF_RULES_RLOCK();
4899 		error = pfr_clr_tstats(pfrts, io->pfrio_size,
4900 		    &io->pfrio_nzero, io->pfrio_flags | PFR_FLAG_USERIOCTL);
4901 		PF_RULES_RUNLOCK();
4902 		PF_TABLE_STATS_UNLOCK();
4903 		free(pfrts, M_PF);
4904 		break;
4905 	}
4906 
4907 	case DIOCRSETTFLAGS: {
4908 		struct pfioc_table *io = (struct pfioc_table *)addr;
4909 		struct pfr_table *pfrts;
4910 		size_t totlen;
4911 		int n;
4912 
4913 		if (io->pfrio_esize != sizeof(struct pfr_table)) {
4914 			error = ENODEV;
4915 			goto fail;
4916 		}
4917 
4918 		PF_RULES_RLOCK();
4919 		n = pfr_table_count(&io->pfrio_table, io->pfrio_flags);
4920 		if (n < 0) {
4921 			PF_RULES_RUNLOCK();
4922 			error = EINVAL;
4923 			goto fail;
4924 		}
4925 
4926 		io->pfrio_size = min(io->pfrio_size, n);
4927 		PF_RULES_RUNLOCK();
4928 
4929 		totlen = io->pfrio_size * sizeof(struct pfr_table);
4930 		pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table),
4931 		    M_PF, M_WAITOK);
4932 		error = copyin(io->pfrio_buffer, pfrts, totlen);
4933 		if (error) {
4934 			free(pfrts, M_PF);
4935 			goto fail;
4936 		}
4937 		PF_RULES_WLOCK();
4938 		error = pfr_set_tflags(pfrts, io->pfrio_size,
4939 		    io->pfrio_setflag, io->pfrio_clrflag, &io->pfrio_nchange,
4940 		    &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL);
4941 		PF_RULES_WUNLOCK();
4942 		free(pfrts, M_PF);
4943 		break;
4944 	}
4945 
4946 	case DIOCRCLRADDRS: {
4947 		struct pfioc_table *io = (struct pfioc_table *)addr;
4948 
4949 		if (io->pfrio_esize != 0) {
4950 			error = ENODEV;
4951 			goto fail;
4952 		}
4953 		PF_RULES_WLOCK();
4954 		error = pfr_clr_addrs(&io->pfrio_table, &io->pfrio_ndel,
4955 		    io->pfrio_flags | PFR_FLAG_USERIOCTL);
4956 		PF_RULES_WUNLOCK();
4957 		break;
4958 	}
4959 
4960 	case DIOCRADDADDRS: {
4961 		struct pfioc_table *io = (struct pfioc_table *)addr;
4962 		struct pfr_addr *pfras;
4963 		size_t totlen;
4964 
4965 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
4966 			error = ENODEV;
4967 			goto fail;
4968 		}
4969 		if (io->pfrio_size < 0 ||
4970 		    io->pfrio_size > pf_ioctl_maxcount ||
4971 		    WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
4972 			error = EINVAL;
4973 			goto fail;
4974 		}
4975 		totlen = io->pfrio_size * sizeof(struct pfr_addr);
4976 		pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
4977 		    M_PF, M_WAITOK);
4978 		error = copyin(io->pfrio_buffer, pfras, totlen);
4979 		if (error) {
4980 			free(pfras, M_PF);
4981 			goto fail;
4982 		}
4983 		PF_RULES_WLOCK();
4984 		io->pfrio_nadd = 0;
4985 		error = pfr_add_addrs(&io->pfrio_table, pfras,
4986 		    io->pfrio_size, &io->pfrio_nadd, io->pfrio_flags |
4987 		    PFR_FLAG_USERIOCTL);
4988 		PF_RULES_WUNLOCK();
4989 		if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK)
4990 			error = copyout(pfras, io->pfrio_buffer, totlen);
4991 		free(pfras, M_PF);
4992 		break;
4993 	}
4994 
4995 	case DIOCRDELADDRS: {
4996 		struct pfioc_table *io = (struct pfioc_table *)addr;
4997 		struct pfr_addr *pfras;
4998 		size_t totlen;
4999 
5000 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
5001 			error = ENODEV;
5002 			goto fail;
5003 		}
5004 		if (io->pfrio_size < 0 ||
5005 		    io->pfrio_size > pf_ioctl_maxcount ||
5006 		    WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
5007 			error = EINVAL;
5008 			goto fail;
5009 		}
5010 		totlen = io->pfrio_size * sizeof(struct pfr_addr);
5011 		pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
5012 		    M_PF, M_WAITOK);
5013 		error = copyin(io->pfrio_buffer, pfras, totlen);
5014 		if (error) {
5015 			free(pfras, M_PF);
5016 			goto fail;
5017 		}
5018 		PF_RULES_WLOCK();
5019 		error = pfr_del_addrs(&io->pfrio_table, pfras,
5020 		    io->pfrio_size, &io->pfrio_ndel, io->pfrio_flags |
5021 		    PFR_FLAG_USERIOCTL);
5022 		PF_RULES_WUNLOCK();
5023 		if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK)
5024 			error = copyout(pfras, io->pfrio_buffer, totlen);
5025 		free(pfras, M_PF);
5026 		break;
5027 	}
5028 
5029 	case DIOCRSETADDRS: {
5030 		struct pfioc_table *io = (struct pfioc_table *)addr;
5031 		struct pfr_addr *pfras;
5032 		size_t totlen, count;
5033 
5034 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
5035 			error = ENODEV;
5036 			goto fail;
5037 		}
5038 		if (io->pfrio_size < 0 || io->pfrio_size2 < 0) {
5039 			error = EINVAL;
5040 			goto fail;
5041 		}
5042 		count = max(io->pfrio_size, io->pfrio_size2);
5043 		if (count > pf_ioctl_maxcount ||
5044 		    WOULD_OVERFLOW(count, sizeof(struct pfr_addr))) {
5045 			error = EINVAL;
5046 			goto fail;
5047 		}
5048 		totlen = count * sizeof(struct pfr_addr);
5049 		pfras = mallocarray(count, sizeof(struct pfr_addr), M_PF,
5050 		    M_WAITOK);
5051 		error = copyin(io->pfrio_buffer, pfras, totlen);
5052 		if (error) {
5053 			free(pfras, M_PF);
5054 			goto fail;
5055 		}
5056 		PF_RULES_WLOCK();
5057 		error = pfr_set_addrs(&io->pfrio_table, pfras,
5058 		    io->pfrio_size, &io->pfrio_size2, &io->pfrio_nadd,
5059 		    &io->pfrio_ndel, &io->pfrio_nchange, io->pfrio_flags |
5060 		    PFR_FLAG_USERIOCTL, 0);
5061 		PF_RULES_WUNLOCK();
5062 		if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK)
5063 			error = copyout(pfras, io->pfrio_buffer, totlen);
5064 		free(pfras, M_PF);
5065 		break;
5066 	}
5067 
5068 	case DIOCRGETADDRS: {
5069 		struct pfioc_table *io = (struct pfioc_table *)addr;
5070 		struct pfr_addr *pfras;
5071 		size_t totlen;
5072 
5073 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
5074 			error = ENODEV;
5075 			goto fail;
5076 		}
5077 		if (io->pfrio_size < 0 ||
5078 		    io->pfrio_size > pf_ioctl_maxcount ||
5079 		    WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
5080 			error = EINVAL;
5081 			goto fail;
5082 		}
5083 		totlen = io->pfrio_size * sizeof(struct pfr_addr);
5084 		pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
5085 		    M_PF, M_WAITOK | M_ZERO);
5086 		PF_RULES_RLOCK();
5087 		error = pfr_get_addrs(&io->pfrio_table, pfras,
5088 		    &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
5089 		PF_RULES_RUNLOCK();
5090 		if (error == 0)
5091 			error = copyout(pfras, io->pfrio_buffer, totlen);
5092 		free(pfras, M_PF);
5093 		break;
5094 	}
5095 
5096 	case DIOCRGETASTATS: {
5097 		struct pfioc_table *io = (struct pfioc_table *)addr;
5098 		struct pfr_astats *pfrastats;
5099 		size_t totlen;
5100 
5101 		if (io->pfrio_esize != sizeof(struct pfr_astats)) {
5102 			error = ENODEV;
5103 			goto fail;
5104 		}
5105 		if (io->pfrio_size < 0 ||
5106 		    io->pfrio_size > pf_ioctl_maxcount ||
5107 		    WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_astats))) {
5108 			error = EINVAL;
5109 			goto fail;
5110 		}
5111 		totlen = io->pfrio_size * sizeof(struct pfr_astats);
5112 		pfrastats = mallocarray(io->pfrio_size,
5113 		    sizeof(struct pfr_astats), M_PF, M_WAITOK | M_ZERO);
5114 		PF_RULES_RLOCK();
5115 		error = pfr_get_astats(&io->pfrio_table, pfrastats,
5116 		    &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
5117 		PF_RULES_RUNLOCK();
5118 		if (error == 0)
5119 			error = copyout(pfrastats, io->pfrio_buffer, totlen);
5120 		free(pfrastats, M_PF);
5121 		break;
5122 	}
5123 
5124 	case DIOCRCLRASTATS: {
5125 		struct pfioc_table *io = (struct pfioc_table *)addr;
5126 		struct pfr_addr *pfras;
5127 		size_t totlen;
5128 
5129 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
5130 			error = ENODEV;
5131 			goto fail;
5132 		}
5133 		if (io->pfrio_size < 0 ||
5134 		    io->pfrio_size > pf_ioctl_maxcount ||
5135 		    WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
5136 			error = EINVAL;
5137 			goto fail;
5138 		}
5139 		totlen = io->pfrio_size * sizeof(struct pfr_addr);
5140 		pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
5141 		    M_PF, M_WAITOK);
5142 		error = copyin(io->pfrio_buffer, pfras, totlen);
5143 		if (error) {
5144 			free(pfras, M_PF);
5145 			goto fail;
5146 		}
5147 		PF_RULES_WLOCK();
5148 		error = pfr_clr_astats(&io->pfrio_table, pfras,
5149 		    io->pfrio_size, &io->pfrio_nzero, io->pfrio_flags |
5150 		    PFR_FLAG_USERIOCTL);
5151 		PF_RULES_WUNLOCK();
5152 		if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK)
5153 			error = copyout(pfras, io->pfrio_buffer, totlen);
5154 		free(pfras, M_PF);
5155 		break;
5156 	}
5157 
5158 	case DIOCRTSTADDRS: {
5159 		struct pfioc_table *io = (struct pfioc_table *)addr;
5160 		struct pfr_addr *pfras;
5161 		size_t totlen;
5162 
5163 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
5164 			error = ENODEV;
5165 			goto fail;
5166 		}
5167 		if (io->pfrio_size < 0 ||
5168 		    io->pfrio_size > pf_ioctl_maxcount ||
5169 		    WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
5170 			error = EINVAL;
5171 			goto fail;
5172 		}
5173 		totlen = io->pfrio_size * sizeof(struct pfr_addr);
5174 		pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
5175 		    M_PF, M_WAITOK);
5176 		error = copyin(io->pfrio_buffer, pfras, totlen);
5177 		if (error) {
5178 			free(pfras, M_PF);
5179 			goto fail;
5180 		}
5181 		PF_RULES_RLOCK();
5182 		error = pfr_tst_addrs(&io->pfrio_table, pfras,
5183 		    io->pfrio_size, &io->pfrio_nmatch, io->pfrio_flags |
5184 		    PFR_FLAG_USERIOCTL);
5185 		PF_RULES_RUNLOCK();
5186 		if (error == 0)
5187 			error = copyout(pfras, io->pfrio_buffer, totlen);
5188 		free(pfras, M_PF);
5189 		break;
5190 	}
5191 
5192 	case DIOCRINADEFINE: {
5193 		struct pfioc_table *io = (struct pfioc_table *)addr;
5194 		struct pfr_addr *pfras;
5195 		size_t totlen;
5196 
5197 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
5198 			error = ENODEV;
5199 			goto fail;
5200 		}
5201 		if (io->pfrio_size < 0 ||
5202 		    io->pfrio_size > pf_ioctl_maxcount ||
5203 		    WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
5204 			error = EINVAL;
5205 			goto fail;
5206 		}
5207 		totlen = io->pfrio_size * sizeof(struct pfr_addr);
5208 		pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
5209 		    M_PF, M_WAITOK);
5210 		error = copyin(io->pfrio_buffer, pfras, totlen);
5211 		if (error) {
5212 			free(pfras, M_PF);
5213 			goto fail;
5214 		}
5215 		PF_RULES_WLOCK();
5216 		error = pfr_ina_define(&io->pfrio_table, pfras,
5217 		    io->pfrio_size, &io->pfrio_nadd, &io->pfrio_naddr,
5218 		    io->pfrio_ticket, io->pfrio_flags | PFR_FLAG_USERIOCTL);
5219 		PF_RULES_WUNLOCK();
5220 		free(pfras, M_PF);
5221 		break;
5222 	}
5223 
5224 	case DIOCOSFPADD: {
5225 		struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr;
5226 		PF_RULES_WLOCK();
5227 		error = pf_osfp_add(io);
5228 		PF_RULES_WUNLOCK();
5229 		break;
5230 	}
5231 
5232 	case DIOCOSFPGET: {
5233 		struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr;
5234 		PF_RULES_RLOCK();
5235 		error = pf_osfp_get(io);
5236 		PF_RULES_RUNLOCK();
5237 		break;
5238 	}
5239 
5240 	case DIOCXBEGIN: {
5241 		struct pfioc_trans	*io = (struct pfioc_trans *)addr;
5242 		struct pfioc_trans_e	*ioes, *ioe;
5243 		size_t			 totlen;
5244 		int			 i;
5245 
5246 		if (io->esize != sizeof(*ioe)) {
5247 			error = ENODEV;
5248 			goto fail;
5249 		}
5250 		if (io->size < 0 ||
5251 		    io->size > pf_ioctl_maxcount ||
5252 		    WOULD_OVERFLOW(io->size, sizeof(struct pfioc_trans_e))) {
5253 			error = EINVAL;
5254 			goto fail;
5255 		}
5256 		totlen = sizeof(struct pfioc_trans_e) * io->size;
5257 		ioes = mallocarray(io->size, sizeof(struct pfioc_trans_e),
5258 		    M_PF, M_WAITOK);
5259 		error = copyin(io->array, ioes, totlen);
5260 		if (error) {
5261 			free(ioes, M_PF);
5262 			goto fail;
5263 		}
5264 		PF_RULES_WLOCK();
5265 		for (i = 0, ioe = ioes; i < io->size; i++, ioe++) {
5266 			ioe->anchor[sizeof(ioe->anchor) - 1] = '\0';
5267 			switch (ioe->rs_num) {
5268 			case PF_RULESET_ETH:
5269 				if ((error = pf_begin_eth(&ioe->ticket, ioe->anchor))) {
5270 					PF_RULES_WUNLOCK();
5271 					free(ioes, M_PF);
5272 					goto fail;
5273 				}
5274 				break;
5275 #ifdef ALTQ
5276 			case PF_RULESET_ALTQ:
5277 				if (ioe->anchor[0]) {
5278 					PF_RULES_WUNLOCK();
5279 					free(ioes, M_PF);
5280 					error = EINVAL;
5281 					goto fail;
5282 				}
5283 				if ((error = pf_begin_altq(&ioe->ticket))) {
5284 					PF_RULES_WUNLOCK();
5285 					free(ioes, M_PF);
5286 					goto fail;
5287 				}
5288 				break;
5289 #endif /* ALTQ */
5290 			case PF_RULESET_TABLE:
5291 			    {
5292 				struct pfr_table table;
5293 
5294 				bzero(&table, sizeof(table));
5295 				strlcpy(table.pfrt_anchor, ioe->anchor,
5296 				    sizeof(table.pfrt_anchor));
5297 				if ((error = pfr_ina_begin(&table,
5298 				    &ioe->ticket, NULL, 0))) {
5299 					PF_RULES_WUNLOCK();
5300 					free(ioes, M_PF);
5301 					goto fail;
5302 				}
5303 				break;
5304 			    }
5305 			default:
5306 				if ((error = pf_begin_rules(&ioe->ticket,
5307 				    ioe->rs_num, ioe->anchor))) {
5308 					PF_RULES_WUNLOCK();
5309 					free(ioes, M_PF);
5310 					goto fail;
5311 				}
5312 				break;
5313 			}
5314 		}
5315 		PF_RULES_WUNLOCK();
5316 		error = copyout(ioes, io->array, totlen);
5317 		free(ioes, M_PF);
5318 		break;
5319 	}
5320 
5321 	case DIOCXROLLBACK: {
5322 		struct pfioc_trans	*io = (struct pfioc_trans *)addr;
5323 		struct pfioc_trans_e	*ioe, *ioes;
5324 		size_t			 totlen;
5325 		int			 i;
5326 
5327 		if (io->esize != sizeof(*ioe)) {
5328 			error = ENODEV;
5329 			goto fail;
5330 		}
5331 		if (io->size < 0 ||
5332 		    io->size > pf_ioctl_maxcount ||
5333 		    WOULD_OVERFLOW(io->size, sizeof(struct pfioc_trans_e))) {
5334 			error = EINVAL;
5335 			goto fail;
5336 		}
5337 		totlen = sizeof(struct pfioc_trans_e) * io->size;
5338 		ioes = mallocarray(io->size, sizeof(struct pfioc_trans_e),
5339 		    M_PF, M_WAITOK);
5340 		error = copyin(io->array, ioes, totlen);
5341 		if (error) {
5342 			free(ioes, M_PF);
5343 			goto fail;
5344 		}
5345 		PF_RULES_WLOCK();
5346 		for (i = 0, ioe = ioes; i < io->size; i++, ioe++) {
5347 			ioe->anchor[sizeof(ioe->anchor) - 1] = '\0';
5348 			switch (ioe->rs_num) {
5349 			case PF_RULESET_ETH:
5350 				if ((error = pf_rollback_eth(ioe->ticket,
5351 				    ioe->anchor))) {
5352 					PF_RULES_WUNLOCK();
5353 					free(ioes, M_PF);
5354 					goto fail; /* really bad */
5355 				}
5356 				break;
5357 #ifdef ALTQ
5358 			case PF_RULESET_ALTQ:
5359 				if (ioe->anchor[0]) {
5360 					PF_RULES_WUNLOCK();
5361 					free(ioes, M_PF);
5362 					error = EINVAL;
5363 					goto fail;
5364 				}
5365 				if ((error = pf_rollback_altq(ioe->ticket))) {
5366 					PF_RULES_WUNLOCK();
5367 					free(ioes, M_PF);
5368 					goto fail; /* really bad */
5369 				}
5370 				break;
5371 #endif /* ALTQ */
5372 			case PF_RULESET_TABLE:
5373 			    {
5374 				struct pfr_table table;
5375 
5376 				bzero(&table, sizeof(table));
5377 				strlcpy(table.pfrt_anchor, ioe->anchor,
5378 				    sizeof(table.pfrt_anchor));
5379 				if ((error = pfr_ina_rollback(&table,
5380 				    ioe->ticket, NULL, 0))) {
5381 					PF_RULES_WUNLOCK();
5382 					free(ioes, M_PF);
5383 					goto fail; /* really bad */
5384 				}
5385 				break;
5386 			    }
5387 			default:
5388 				if ((error = pf_rollback_rules(ioe->ticket,
5389 				    ioe->rs_num, ioe->anchor))) {
5390 					PF_RULES_WUNLOCK();
5391 					free(ioes, M_PF);
5392 					goto fail; /* really bad */
5393 				}
5394 				break;
5395 			}
5396 		}
5397 		PF_RULES_WUNLOCK();
5398 		free(ioes, M_PF);
5399 		break;
5400 	}
5401 
5402 	case DIOCXCOMMIT: {
5403 		struct pfioc_trans	*io = (struct pfioc_trans *)addr;
5404 		struct pfioc_trans_e	*ioe, *ioes;
5405 		struct pf_kruleset	*rs;
5406 		struct pf_keth_ruleset	*ers;
5407 		size_t			 totlen;
5408 		int			 i;
5409 
5410 		if (io->esize != sizeof(*ioe)) {
5411 			error = ENODEV;
5412 			goto fail;
5413 		}
5414 
5415 		if (io->size < 0 ||
5416 		    io->size > pf_ioctl_maxcount ||
5417 		    WOULD_OVERFLOW(io->size, sizeof(struct pfioc_trans_e))) {
5418 			error = EINVAL;
5419 			goto fail;
5420 		}
5421 
5422 		totlen = sizeof(struct pfioc_trans_e) * io->size;
5423 		ioes = mallocarray(io->size, sizeof(struct pfioc_trans_e),
5424 		    M_PF, M_WAITOK);
5425 		error = copyin(io->array, ioes, totlen);
5426 		if (error) {
5427 			free(ioes, M_PF);
5428 			goto fail;
5429 		}
5430 		PF_RULES_WLOCK();
5431 		/* First makes sure everything will succeed. */
5432 		for (i = 0, ioe = ioes; i < io->size; i++, ioe++) {
5433 			ioe->anchor[sizeof(ioe->anchor) - 1] = '\0';
5434 			switch (ioe->rs_num) {
5435 			case PF_RULESET_ETH:
5436 				ers = pf_find_keth_ruleset(ioe->anchor);
5437 				if (ers == NULL || ioe->ticket == 0 ||
5438 				    ioe->ticket != ers->inactive.ticket) {
5439 					PF_RULES_WUNLOCK();
5440 					free(ioes, M_PF);
5441 					error = EINVAL;
5442 					goto fail;
5443 				}
5444 				break;
5445 #ifdef ALTQ
5446 			case PF_RULESET_ALTQ:
5447 				if (ioe->anchor[0]) {
5448 					PF_RULES_WUNLOCK();
5449 					free(ioes, M_PF);
5450 					error = EINVAL;
5451 					goto fail;
5452 				}
5453 				if (!V_altqs_inactive_open || ioe->ticket !=
5454 				    V_ticket_altqs_inactive) {
5455 					PF_RULES_WUNLOCK();
5456 					free(ioes, M_PF);
5457 					error = EBUSY;
5458 					goto fail;
5459 				}
5460 				break;
5461 #endif /* ALTQ */
5462 			case PF_RULESET_TABLE:
5463 				rs = pf_find_kruleset(ioe->anchor);
5464 				if (rs == NULL || !rs->topen || ioe->ticket !=
5465 				    rs->tticket) {
5466 					PF_RULES_WUNLOCK();
5467 					free(ioes, M_PF);
5468 					error = EBUSY;
5469 					goto fail;
5470 				}
5471 				break;
5472 			default:
5473 				if (ioe->rs_num < 0 || ioe->rs_num >=
5474 				    PF_RULESET_MAX) {
5475 					PF_RULES_WUNLOCK();
5476 					free(ioes, M_PF);
5477 					error = EINVAL;
5478 					goto fail;
5479 				}
5480 				rs = pf_find_kruleset(ioe->anchor);
5481 				if (rs == NULL ||
5482 				    !rs->rules[ioe->rs_num].inactive.open ||
5483 				    rs->rules[ioe->rs_num].inactive.ticket !=
5484 				    ioe->ticket) {
5485 					PF_RULES_WUNLOCK();
5486 					free(ioes, M_PF);
5487 					error = EBUSY;
5488 					goto fail;
5489 				}
5490 				break;
5491 			}
5492 		}
5493 		/* Now do the commit - no errors should happen here. */
5494 		for (i = 0, ioe = ioes; i < io->size; i++, ioe++) {
5495 			switch (ioe->rs_num) {
5496 			case PF_RULESET_ETH:
5497 				if ((error = pf_commit_eth(ioe->ticket, ioe->anchor))) {
5498 					PF_RULES_WUNLOCK();
5499 					free(ioes, M_PF);
5500 					goto fail; /* really bad */
5501 				}
5502 				break;
5503 #ifdef ALTQ
5504 			case PF_RULESET_ALTQ:
5505 				if ((error = pf_commit_altq(ioe->ticket))) {
5506 					PF_RULES_WUNLOCK();
5507 					free(ioes, M_PF);
5508 					goto fail; /* really bad */
5509 				}
5510 				break;
5511 #endif /* ALTQ */
5512 			case PF_RULESET_TABLE:
5513 			    {
5514 				struct pfr_table table;
5515 
5516 				bzero(&table, sizeof(table));
5517 				(void)strlcpy(table.pfrt_anchor, ioe->anchor,
5518 				    sizeof(table.pfrt_anchor));
5519 				if ((error = pfr_ina_commit(&table,
5520 				    ioe->ticket, NULL, NULL, 0))) {
5521 					PF_RULES_WUNLOCK();
5522 					free(ioes, M_PF);
5523 					goto fail; /* really bad */
5524 				}
5525 				break;
5526 			    }
5527 			default:
5528 				if ((error = pf_commit_rules(ioe->ticket,
5529 				    ioe->rs_num, ioe->anchor))) {
5530 					PF_RULES_WUNLOCK();
5531 					free(ioes, M_PF);
5532 					goto fail; /* really bad */
5533 				}
5534 				break;
5535 			}
5536 		}
5537 		PF_RULES_WUNLOCK();
5538 
5539 		/* Only hook into EtherNet taffic if we've got rules for it. */
5540 		if (! TAILQ_EMPTY(V_pf_keth->active.rules))
5541 			hook_pf_eth();
5542 		else
5543 			dehook_pf_eth();
5544 
5545 		free(ioes, M_PF);
5546 		break;
5547 	}
5548 
5549 	case DIOCGETSRCNODES: {
5550 		struct pfioc_src_nodes	*psn = (struct pfioc_src_nodes *)addr;
5551 		struct pf_srchash	*sh;
5552 		struct pf_ksrc_node	*n;
5553 		struct pf_src_node	*p, *pstore;
5554 		uint32_t		 i, nr = 0;
5555 
5556 		for (i = 0, sh = V_pf_srchash; i <= V_pf_srchashmask;
5557 				i++, sh++) {
5558 			PF_HASHROW_LOCK(sh);
5559 			LIST_FOREACH(n, &sh->nodes, entry)
5560 				nr++;
5561 			PF_HASHROW_UNLOCK(sh);
5562 		}
5563 
5564 		psn->psn_len = min(psn->psn_len,
5565 		    sizeof(struct pf_src_node) * nr);
5566 
5567 		if (psn->psn_len == 0) {
5568 			psn->psn_len = sizeof(struct pf_src_node) * nr;
5569 			goto fail;
5570 		}
5571 
5572 		nr = 0;
5573 
5574 		p = pstore = malloc(psn->psn_len, M_PF, M_WAITOK | M_ZERO);
5575 		for (i = 0, sh = V_pf_srchash; i <= V_pf_srchashmask;
5576 		    i++, sh++) {
5577 		    PF_HASHROW_LOCK(sh);
5578 		    LIST_FOREACH(n, &sh->nodes, entry) {
5579 
5580 			if ((nr + 1) * sizeof(*p) > (unsigned)psn->psn_len)
5581 				break;
5582 
5583 			pf_src_node_copy(n, p);
5584 
5585 			p++;
5586 			nr++;
5587 		    }
5588 		    PF_HASHROW_UNLOCK(sh);
5589 		}
5590 		error = copyout(pstore, psn->psn_src_nodes,
5591 		    sizeof(struct pf_src_node) * nr);
5592 		if (error) {
5593 			free(pstore, M_PF);
5594 			goto fail;
5595 		}
5596 		psn->psn_len = sizeof(struct pf_src_node) * nr;
5597 		free(pstore, M_PF);
5598 		break;
5599 	}
5600 
5601 	case DIOCCLRSRCNODES: {
5602 		pf_kill_srcnodes(NULL);
5603 		break;
5604 	}
5605 
5606 	case DIOCKILLSRCNODES:
5607 		pf_kill_srcnodes((struct pfioc_src_node_kill *)addr);
5608 		break;
5609 
5610 #ifdef COMPAT_FREEBSD13
5611 	case DIOCKEEPCOUNTERS_FREEBSD13:
5612 #endif
5613 	case DIOCKEEPCOUNTERS:
5614 		error = pf_keepcounters((struct pfioc_nv *)addr);
5615 		break;
5616 
5617 	case DIOCGETSYNCOOKIES:
5618 		error = pf_get_syncookies((struct pfioc_nv *)addr);
5619 		break;
5620 
5621 	case DIOCSETSYNCOOKIES:
5622 		error = pf_set_syncookies((struct pfioc_nv *)addr);
5623 		break;
5624 
5625 	case DIOCSETHOSTID: {
5626 		u_int32_t	*hostid = (u_int32_t *)addr;
5627 
5628 		PF_RULES_WLOCK();
5629 		if (*hostid == 0)
5630 			V_pf_status.hostid = arc4random();
5631 		else
5632 			V_pf_status.hostid = *hostid;
5633 		PF_RULES_WUNLOCK();
5634 		break;
5635 	}
5636 
5637 	case DIOCOSFPFLUSH:
5638 		PF_RULES_WLOCK();
5639 		pf_osfp_flush();
5640 		PF_RULES_WUNLOCK();
5641 		break;
5642 
5643 	case DIOCIGETIFACES: {
5644 		struct pfioc_iface *io = (struct pfioc_iface *)addr;
5645 		struct pfi_kif *ifstore;
5646 		size_t bufsiz;
5647 
5648 		if (io->pfiio_esize != sizeof(struct pfi_kif)) {
5649 			error = ENODEV;
5650 			goto fail;
5651 		}
5652 
5653 		if (io->pfiio_size < 0 ||
5654 		    io->pfiio_size > pf_ioctl_maxcount ||
5655 		    WOULD_OVERFLOW(io->pfiio_size, sizeof(struct pfi_kif))) {
5656 			error = EINVAL;
5657 			goto fail;
5658 		}
5659 
5660 		io->pfiio_name[sizeof(io->pfiio_name) - 1] = '\0';
5661 
5662 		bufsiz = io->pfiio_size * sizeof(struct pfi_kif);
5663 		ifstore = mallocarray(io->pfiio_size, sizeof(struct pfi_kif),
5664 		    M_PF, M_WAITOK | M_ZERO);
5665 
5666 		PF_RULES_RLOCK();
5667 		pfi_get_ifaces(io->pfiio_name, ifstore, &io->pfiio_size);
5668 		PF_RULES_RUNLOCK();
5669 		error = copyout(ifstore, io->pfiio_buffer, bufsiz);
5670 		free(ifstore, M_PF);
5671 		break;
5672 	}
5673 
5674 	case DIOCSETIFFLAG: {
5675 		struct pfioc_iface *io = (struct pfioc_iface *)addr;
5676 
5677 		io->pfiio_name[sizeof(io->pfiio_name) - 1] = '\0';
5678 
5679 		PF_RULES_WLOCK();
5680 		error = pfi_set_flags(io->pfiio_name, io->pfiio_flags);
5681 		PF_RULES_WUNLOCK();
5682 		break;
5683 	}
5684 
5685 	case DIOCCLRIFFLAG: {
5686 		struct pfioc_iface *io = (struct pfioc_iface *)addr;
5687 
5688 		io->pfiio_name[sizeof(io->pfiio_name) - 1] = '\0';
5689 
5690 		PF_RULES_WLOCK();
5691 		error = pfi_clear_flags(io->pfiio_name, io->pfiio_flags);
5692 		PF_RULES_WUNLOCK();
5693 		break;
5694 	}
5695 
5696 	case DIOCSETREASS: {
5697 		u_int32_t	*reass = (u_int32_t *)addr;
5698 
5699 		V_pf_status.reass = *reass & (PF_REASS_ENABLED|PF_REASS_NODF);
5700 		/* Removal of DF flag without reassembly enabled is not a
5701 		 * valid combination. Disable reassembly in such case. */
5702 		if (!(V_pf_status.reass & PF_REASS_ENABLED))
5703 			V_pf_status.reass = 0;
5704 		break;
5705 	}
5706 
5707 	default:
5708 		error = ENODEV;
5709 		break;
5710 	}
5711 fail:
5712 	CURVNET_RESTORE();
5713 
5714 #undef ERROUT_IOCTL
5715 
5716 	return (error);
5717 }
5718 
5719 void
pfsync_state_export(union pfsync_state_union * sp,struct pf_kstate * st,int msg_version)5720 pfsync_state_export(union pfsync_state_union *sp, struct pf_kstate *st, int msg_version)
5721 {
5722 	bzero(sp, sizeof(union pfsync_state_union));
5723 
5724 	/* copy from state key */
5725 	sp->pfs_1301.key[PF_SK_WIRE].addr[0] = st->key[PF_SK_WIRE]->addr[0];
5726 	sp->pfs_1301.key[PF_SK_WIRE].addr[1] = st->key[PF_SK_WIRE]->addr[1];
5727 	sp->pfs_1301.key[PF_SK_WIRE].port[0] = st->key[PF_SK_WIRE]->port[0];
5728 	sp->pfs_1301.key[PF_SK_WIRE].port[1] = st->key[PF_SK_WIRE]->port[1];
5729 	sp->pfs_1301.key[PF_SK_STACK].addr[0] = st->key[PF_SK_STACK]->addr[0];
5730 	sp->pfs_1301.key[PF_SK_STACK].addr[1] = st->key[PF_SK_STACK]->addr[1];
5731 	sp->pfs_1301.key[PF_SK_STACK].port[0] = st->key[PF_SK_STACK]->port[0];
5732 	sp->pfs_1301.key[PF_SK_STACK].port[1] = st->key[PF_SK_STACK]->port[1];
5733 	sp->pfs_1301.proto = st->key[PF_SK_WIRE]->proto;
5734 	sp->pfs_1301.af = st->key[PF_SK_WIRE]->af;
5735 
5736 	/* copy from state */
5737 	strlcpy(sp->pfs_1301.ifname, st->kif->pfik_name, sizeof(sp->pfs_1301.ifname));
5738 	bcopy(&st->act.rt_addr, &sp->pfs_1301.rt_addr, sizeof(sp->pfs_1301.rt_addr));
5739 	sp->pfs_1301.creation = htonl(time_uptime - (st->creation / 1000));
5740 	sp->pfs_1301.expire = pf_state_expires(st);
5741 	if (sp->pfs_1301.expire <= time_uptime)
5742 		sp->pfs_1301.expire = htonl(0);
5743 	else
5744 		sp->pfs_1301.expire = htonl(sp->pfs_1301.expire - time_uptime);
5745 
5746 	sp->pfs_1301.direction = st->direction;
5747 	sp->pfs_1301.log = st->act.log;
5748 	sp->pfs_1301.timeout = st->timeout;
5749 
5750 	switch (msg_version) {
5751 		case PFSYNC_MSG_VERSION_1301:
5752 			sp->pfs_1301.state_flags = st->state_flags;
5753 			break;
5754 		case PFSYNC_MSG_VERSION_1400:
5755 			sp->pfs_1400.state_flags = htons(st->state_flags);
5756 			sp->pfs_1400.qid = htons(st->act.qid);
5757 			sp->pfs_1400.pqid = htons(st->act.pqid);
5758 			sp->pfs_1400.dnpipe = htons(st->act.dnpipe);
5759 			sp->pfs_1400.dnrpipe = htons(st->act.dnrpipe);
5760 			sp->pfs_1400.rtableid = htonl(st->act.rtableid);
5761 			sp->pfs_1400.min_ttl = st->act.min_ttl;
5762 			sp->pfs_1400.set_tos = st->act.set_tos;
5763 			sp->pfs_1400.max_mss = htons(st->act.max_mss);
5764 			sp->pfs_1400.set_prio[0] = st->act.set_prio[0];
5765 			sp->pfs_1400.set_prio[1] = st->act.set_prio[1];
5766 			sp->pfs_1400.rt = st->act.rt;
5767 			if (st->act.rt_kif)
5768 				strlcpy(sp->pfs_1400.rt_ifname,
5769 				    st->act.rt_kif->pfik_name,
5770 				    sizeof(sp->pfs_1400.rt_ifname));
5771 			break;
5772 		default:
5773 			panic("%s: Unsupported pfsync_msg_version %d",
5774 			    __func__, msg_version);
5775 	}
5776 
5777 	/*
5778 	 * XXX Why do we bother pfsyncing source node information if source
5779 	 * nodes are not synced? Showing users that there is source tracking
5780 	 * when there is none seems useless.
5781 	 */
5782 	if (st->sns[PF_SN_LIMIT] != NULL)
5783 		sp->pfs_1301.sync_flags |= PFSYNC_FLAG_SRCNODE;
5784 	if (st->sns[PF_SN_NAT] != NULL || st->sns[PF_SN_ROUTE])
5785 		sp->pfs_1301.sync_flags |= PFSYNC_FLAG_NATSRCNODE;
5786 
5787 	sp->pfs_1301.id = st->id;
5788 	sp->pfs_1301.creatorid = st->creatorid;
5789 	pf_state_peer_hton(&st->src, &sp->pfs_1301.src);
5790 	pf_state_peer_hton(&st->dst, &sp->pfs_1301.dst);
5791 
5792 	if (st->rule == NULL)
5793 		sp->pfs_1301.rule = htonl(-1);
5794 	else
5795 		sp->pfs_1301.rule = htonl(st->rule->nr);
5796 	if (st->anchor == NULL)
5797 		sp->pfs_1301.anchor = htonl(-1);
5798 	else
5799 		sp->pfs_1301.anchor = htonl(st->anchor->nr);
5800 	if (st->nat_rule == NULL)
5801 		sp->pfs_1301.nat_rule = htonl(-1);
5802 	else
5803 		sp->pfs_1301.nat_rule = htonl(st->nat_rule->nr);
5804 
5805 	pf_state_counter_hton(st->packets[0], sp->pfs_1301.packets[0]);
5806 	pf_state_counter_hton(st->packets[1], sp->pfs_1301.packets[1]);
5807 	pf_state_counter_hton(st->bytes[0], sp->pfs_1301.bytes[0]);
5808 	pf_state_counter_hton(st->bytes[1], sp->pfs_1301.bytes[1]);
5809 }
5810 
5811 void
pf_state_export(struct pf_state_export * sp,struct pf_kstate * st)5812 pf_state_export(struct pf_state_export *sp, struct pf_kstate *st)
5813 {
5814 	bzero(sp, sizeof(*sp));
5815 
5816 	sp->version = PF_STATE_VERSION;
5817 
5818 	/* copy from state key */
5819 	sp->key[PF_SK_WIRE].addr[0] = st->key[PF_SK_WIRE]->addr[0];
5820 	sp->key[PF_SK_WIRE].addr[1] = st->key[PF_SK_WIRE]->addr[1];
5821 	sp->key[PF_SK_WIRE].port[0] = st->key[PF_SK_WIRE]->port[0];
5822 	sp->key[PF_SK_WIRE].port[1] = st->key[PF_SK_WIRE]->port[1];
5823 	sp->key[PF_SK_STACK].addr[0] = st->key[PF_SK_STACK]->addr[0];
5824 	sp->key[PF_SK_STACK].addr[1] = st->key[PF_SK_STACK]->addr[1];
5825 	sp->key[PF_SK_STACK].port[0] = st->key[PF_SK_STACK]->port[0];
5826 	sp->key[PF_SK_STACK].port[1] = st->key[PF_SK_STACK]->port[1];
5827 	sp->proto = st->key[PF_SK_WIRE]->proto;
5828 	sp->af = st->key[PF_SK_WIRE]->af;
5829 
5830 	/* copy from state */
5831 	strlcpy(sp->ifname, st->kif->pfik_name, sizeof(sp->ifname));
5832 	strlcpy(sp->orig_ifname, st->orig_kif->pfik_name,
5833 	    sizeof(sp->orig_ifname));
5834 	memcpy(&sp->rt_addr, &st->act.rt_addr, sizeof(sp->rt_addr));
5835 	sp->creation = htonl(time_uptime - (st->creation / 1000));
5836 	sp->expire = pf_state_expires(st);
5837 	if (sp->expire <= time_uptime)
5838 		sp->expire = htonl(0);
5839 	else
5840 		sp->expire = htonl(sp->expire - time_uptime);
5841 
5842 	sp->direction = st->direction;
5843 	sp->log = st->act.log;
5844 	sp->timeout = st->timeout;
5845 	/* 8 bits for the old libpfctl, 16 bits for the new libpfctl */
5846 	sp->state_flags_compat = st->state_flags;
5847 	sp->state_flags = htons(st->state_flags);
5848 	if (st->sns[PF_SN_LIMIT] != NULL)
5849 		sp->sync_flags |= PFSYNC_FLAG_SRCNODE;
5850 	if (st->sns[PF_SN_NAT] != NULL || st->sns[PF_SN_ROUTE] != NULL)
5851 		sp->sync_flags |= PFSYNC_FLAG_NATSRCNODE;
5852 	sp->id = st->id;
5853 	sp->creatorid = st->creatorid;
5854 	pf_state_peer_hton(&st->src, &sp->src);
5855 	pf_state_peer_hton(&st->dst, &sp->dst);
5856 
5857 	if (st->rule == NULL)
5858 		sp->rule = htonl(-1);
5859 	else
5860 		sp->rule = htonl(st->rule->nr);
5861 	if (st->anchor == NULL)
5862 		sp->anchor = htonl(-1);
5863 	else
5864 		sp->anchor = htonl(st->anchor->nr);
5865 	if (st->nat_rule == NULL)
5866 		sp->nat_rule = htonl(-1);
5867 	else
5868 		sp->nat_rule = htonl(st->nat_rule->nr);
5869 
5870 	sp->packets[0] = st->packets[0];
5871 	sp->packets[1] = st->packets[1];
5872 	sp->bytes[0] = st->bytes[0];
5873 	sp->bytes[1] = st->bytes[1];
5874 
5875 	sp->qid = htons(st->act.qid);
5876 	sp->pqid = htons(st->act.pqid);
5877 	sp->dnpipe = htons(st->act.dnpipe);
5878 	sp->dnrpipe = htons(st->act.dnrpipe);
5879 	sp->rtableid = htonl(st->act.rtableid);
5880 	sp->min_ttl = st->act.min_ttl;
5881 	sp->set_tos = st->act.set_tos;
5882 	sp->max_mss = htons(st->act.max_mss);
5883 	sp->rt = st->act.rt;
5884 	if (st->act.rt_kif)
5885 		strlcpy(sp->rt_ifname, st->act.rt_kif->pfik_name,
5886 		    sizeof(sp->rt_ifname));
5887 	sp->set_prio[0] = st->act.set_prio[0];
5888 	sp->set_prio[1] = st->act.set_prio[1];
5889 
5890 }
5891 
5892 static void
pf_tbladdr_copyout(struct pf_addr_wrap * aw)5893 pf_tbladdr_copyout(struct pf_addr_wrap *aw)
5894 {
5895 	struct pfr_ktable *kt;
5896 
5897 	KASSERT(aw->type == PF_ADDR_TABLE, ("%s: type %u", __func__, aw->type));
5898 
5899 	kt = aw->p.tbl;
5900 	if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
5901 		kt = kt->pfrkt_root;
5902 	aw->p.tbl = NULL;
5903 	aw->p.tblcnt = (kt->pfrkt_flags & PFR_TFLAG_ACTIVE) ?
5904 		kt->pfrkt_cnt : -1;
5905 }
5906 
5907 static int
pf_add_status_counters(nvlist_t * nvl,const char * name,counter_u64_t * counters,size_t number,char ** names)5908 pf_add_status_counters(nvlist_t *nvl, const char *name, counter_u64_t *counters,
5909     size_t number, char **names)
5910 {
5911 	nvlist_t        *nvc;
5912 
5913 	nvc = nvlist_create(0);
5914 	if (nvc == NULL)
5915 		return (ENOMEM);
5916 
5917 	for (int i = 0; i < number; i++) {
5918 		nvlist_append_number_array(nvc, "counters",
5919 		    counter_u64_fetch(counters[i]));
5920 		nvlist_append_string_array(nvc, "names",
5921 		    names[i]);
5922 		nvlist_append_number_array(nvc, "ids",
5923 		    i);
5924 	}
5925 	nvlist_add_nvlist(nvl, name, nvc);
5926 	nvlist_destroy(nvc);
5927 
5928 	return (0);
5929 }
5930 
5931 static int
pf_getstatus(struct pfioc_nv * nv)5932 pf_getstatus(struct pfioc_nv *nv)
5933 {
5934 	nvlist_t        *nvl = NULL, *nvc = NULL;
5935 	void            *nvlpacked = NULL;
5936 	int              error;
5937 	struct pf_status s;
5938 	char *pf_reasons[PFRES_MAX+1] = PFRES_NAMES;
5939 	char *pf_lcounter[KLCNT_MAX+1] = KLCNT_NAMES;
5940 	char *pf_fcounter[FCNT_MAX+1] = FCNT_NAMES;
5941 	time_t since;
5942 
5943 	PF_RULES_RLOCK_TRACKER;
5944 
5945 #define ERROUT(x)      ERROUT_FUNCTION(errout, x)
5946 
5947 	PF_RULES_RLOCK();
5948 
5949 	nvl = nvlist_create(0);
5950 	if (nvl == NULL)
5951 		ERROUT(ENOMEM);
5952 
5953 	since = time_second - (time_uptime - V_pf_status.since);
5954 
5955 	nvlist_add_bool(nvl, "running", V_pf_status.running);
5956 	nvlist_add_number(nvl, "since", since);
5957 	nvlist_add_number(nvl, "debug", V_pf_status.debug);
5958 	nvlist_add_number(nvl, "hostid", V_pf_status.hostid);
5959 	nvlist_add_number(nvl, "states", V_pf_status.states);
5960 	nvlist_add_number(nvl, "src_nodes", V_pf_status.src_nodes);
5961 	nvlist_add_number(nvl, "reass", V_pf_status.reass);
5962 	nvlist_add_bool(nvl, "syncookies_active",
5963 	    V_pf_status.syncookies_active);
5964 	nvlist_add_number(nvl, "halfopen_states", V_pf_status.states_halfopen);
5965 
5966 	/* counters */
5967 	error = pf_add_status_counters(nvl, "counters", V_pf_status.counters,
5968 	    PFRES_MAX, pf_reasons);
5969 	if (error != 0)
5970 		ERROUT(error);
5971 
5972 	/* lcounters */
5973 	error = pf_add_status_counters(nvl, "lcounters", V_pf_status.lcounters,
5974 	    KLCNT_MAX, pf_lcounter);
5975 	if (error != 0)
5976 		ERROUT(error);
5977 
5978 	/* fcounters */
5979 	nvc = nvlist_create(0);
5980 	if (nvc == NULL)
5981 		ERROUT(ENOMEM);
5982 
5983 	for (int i = 0; i < FCNT_MAX; i++) {
5984 		nvlist_append_number_array(nvc, "counters",
5985 		    pf_counter_u64_fetch(&V_pf_status.fcounters[i]));
5986 		nvlist_append_string_array(nvc, "names",
5987 		    pf_fcounter[i]);
5988 		nvlist_append_number_array(nvc, "ids",
5989 		    i);
5990 	}
5991 	nvlist_add_nvlist(nvl, "fcounters", nvc);
5992 	nvlist_destroy(nvc);
5993 	nvc = NULL;
5994 
5995 	/* scounters */
5996 	error = pf_add_status_counters(nvl, "scounters", V_pf_status.scounters,
5997 	    SCNT_MAX, pf_fcounter);
5998 	if (error != 0)
5999 		ERROUT(error);
6000 
6001 	nvlist_add_string(nvl, "ifname", V_pf_status.ifname);
6002 	nvlist_add_binary(nvl, "chksum", V_pf_status.pf_chksum,
6003 	    PF_MD5_DIGEST_LENGTH);
6004 
6005 	pfi_update_status(V_pf_status.ifname, &s);
6006 
6007 	/* pcounters / bcounters */
6008 	for (int i = 0; i < 2; i++) {
6009 		for (int j = 0; j < 2; j++) {
6010 			for (int k = 0; k < 2; k++) {
6011 				nvlist_append_number_array(nvl, "pcounters",
6012 				    s.pcounters[i][j][k]);
6013 			}
6014 			nvlist_append_number_array(nvl, "bcounters",
6015 			    s.bcounters[i][j]);
6016 		}
6017 	}
6018 
6019 	nvlpacked = nvlist_pack(nvl, &nv->len);
6020 	if (nvlpacked == NULL)
6021 		ERROUT(ENOMEM);
6022 
6023 	if (nv->size == 0)
6024 		ERROUT(0);
6025 	else if (nv->size < nv->len)
6026 		ERROUT(ENOSPC);
6027 
6028 	PF_RULES_RUNLOCK();
6029 	error = copyout(nvlpacked, nv->data, nv->len);
6030 	goto done;
6031 
6032 #undef ERROUT
6033 errout:
6034 	PF_RULES_RUNLOCK();
6035 done:
6036 	free(nvlpacked, M_NVLIST);
6037 	nvlist_destroy(nvc);
6038 	nvlist_destroy(nvl);
6039 
6040 	return (error);
6041 }
6042 
6043 /*
6044  * XXX - Check for version mismatch!!!
6045  */
6046 static void
pf_clear_all_states(void)6047 pf_clear_all_states(void)
6048 {
6049 	struct epoch_tracker	 et;
6050 	struct pf_kstate	*s;
6051 	u_int i;
6052 
6053 	NET_EPOCH_ENTER(et);
6054 	for (i = 0; i <= V_pf_hashmask; i++) {
6055 		struct pf_idhash *ih = &V_pf_idhash[i];
6056 relock:
6057 		PF_HASHROW_LOCK(ih);
6058 		LIST_FOREACH(s, &ih->states, entry) {
6059 			s->timeout = PFTM_PURGE;
6060 			/* Don't send out individual delete messages. */
6061 			s->state_flags |= PFSTATE_NOSYNC;
6062 			pf_remove_state(s);
6063 			goto relock;
6064 		}
6065 		PF_HASHROW_UNLOCK(ih);
6066 	}
6067 	NET_EPOCH_EXIT(et);
6068 }
6069 
6070 static int
pf_clear_tables(void)6071 pf_clear_tables(void)
6072 {
6073 	struct pfioc_table io;
6074 	int error;
6075 
6076 	bzero(&io, sizeof(io));
6077 	io.pfrio_flags |= PFR_FLAG_ALLRSETS;
6078 
6079 	error = pfr_clr_tables(&io.pfrio_table, &io.pfrio_ndel,
6080 	    io.pfrio_flags);
6081 
6082 	return (error);
6083 }
6084 
6085 static void
pf_kill_srcnodes(struct pfioc_src_node_kill * psnk)6086 pf_kill_srcnodes(struct pfioc_src_node_kill *psnk)
6087 {
6088 	struct pf_ksrc_node_list	 kill;
6089 	u_int 				 killed;
6090 
6091 	LIST_INIT(&kill);
6092 	for (int i = 0; i <= V_pf_srchashmask; i++) {
6093 		struct pf_srchash *sh = &V_pf_srchash[i];
6094 		struct pf_ksrc_node *sn, *tmp;
6095 
6096 		PF_HASHROW_LOCK(sh);
6097 		LIST_FOREACH_SAFE(sn, &sh->nodes, entry, tmp)
6098 			if (psnk == NULL ||
6099 			    (pf_match_addr(psnk->psnk_src.neg,
6100 			      &psnk->psnk_src.addr.v.a.addr,
6101 			      &psnk->psnk_src.addr.v.a.mask,
6102 			      &sn->addr, sn->af) &&
6103 			    pf_match_addr(psnk->psnk_dst.neg,
6104 			      &psnk->psnk_dst.addr.v.a.addr,
6105 			      &psnk->psnk_dst.addr.v.a.mask,
6106 			      &sn->raddr, sn->af))) {
6107 				pf_unlink_src_node(sn);
6108 				LIST_INSERT_HEAD(&kill, sn, entry);
6109 				sn->expire = 1;
6110 			}
6111 		PF_HASHROW_UNLOCK(sh);
6112 	}
6113 
6114 	for (int i = 0; i <= V_pf_hashmask; i++) {
6115 		struct pf_idhash *ih = &V_pf_idhash[i];
6116 		struct pf_kstate *s;
6117 
6118 		PF_HASHROW_LOCK(ih);
6119 		LIST_FOREACH(s, &ih->states, entry) {
6120 			for(pf_sn_types_t sn_type=0; sn_type<PF_SN_MAX;
6121 			    sn_type++) {
6122 				if (s->sns[sn_type] &&
6123 				    s->sns[sn_type]->expire == 1) {
6124 					s->sns[sn_type] = NULL;
6125 				}
6126 			}
6127 		}
6128 		PF_HASHROW_UNLOCK(ih);
6129 	}
6130 
6131 	killed = pf_free_src_nodes(&kill);
6132 
6133 	if (psnk != NULL)
6134 		psnk->psnk_killed = killed;
6135 }
6136 
6137 static int
pf_keepcounters(struct pfioc_nv * nv)6138 pf_keepcounters(struct pfioc_nv *nv)
6139 {
6140 	nvlist_t	*nvl = NULL;
6141 	void		*nvlpacked = NULL;
6142 	int		 error = 0;
6143 
6144 #define	ERROUT(x)	ERROUT_FUNCTION(on_error, x)
6145 
6146 	if (nv->len > pf_ioctl_maxcount)
6147 		ERROUT(ENOMEM);
6148 
6149 	nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
6150 	error = copyin(nv->data, nvlpacked, nv->len);
6151 	if (error)
6152 		ERROUT(error);
6153 
6154 	nvl = nvlist_unpack(nvlpacked, nv->len, 0);
6155 	if (nvl == NULL)
6156 		ERROUT(EBADMSG);
6157 
6158 	if (! nvlist_exists_bool(nvl, "keep_counters"))
6159 		ERROUT(EBADMSG);
6160 
6161 	V_pf_status.keep_counters = nvlist_get_bool(nvl, "keep_counters");
6162 
6163 on_error:
6164 	nvlist_destroy(nvl);
6165 	free(nvlpacked, M_NVLIST);
6166 	return (error);
6167 }
6168 
6169 unsigned int
pf_clear_states(const struct pf_kstate_kill * kill)6170 pf_clear_states(const struct pf_kstate_kill *kill)
6171 {
6172 	struct pf_state_key_cmp	 match_key;
6173 	struct pf_kstate	*s;
6174 	struct pfi_kkif	*kif;
6175 	int		 idx;
6176 	unsigned int	 killed = 0, dir;
6177 
6178 	NET_EPOCH_ASSERT();
6179 
6180 	for (unsigned int i = 0; i <= V_pf_hashmask; i++) {
6181 		struct pf_idhash *ih = &V_pf_idhash[i];
6182 
6183 relock_DIOCCLRSTATES:
6184 		PF_HASHROW_LOCK(ih);
6185 		LIST_FOREACH(s, &ih->states, entry) {
6186 			/* For floating states look at the original kif. */
6187 			kif = s->kif == V_pfi_all ? s->orig_kif : s->kif;
6188 
6189 			if (kill->psk_ifname[0] &&
6190 			    strcmp(kill->psk_ifname,
6191 			    kif->pfik_name))
6192 				continue;
6193 
6194 			if (kill->psk_kill_match) {
6195 				bzero(&match_key, sizeof(match_key));
6196 
6197 				if (s->direction == PF_OUT) {
6198 					dir = PF_IN;
6199 					idx = PF_SK_STACK;
6200 				} else {
6201 					dir = PF_OUT;
6202 					idx = PF_SK_WIRE;
6203 				}
6204 
6205 				match_key.af = s->key[idx]->af;
6206 				match_key.proto = s->key[idx]->proto;
6207 				pf_addrcpy(&match_key.addr[0],
6208 				    &s->key[idx]->addr[1], match_key.af);
6209 				match_key.port[0] = s->key[idx]->port[1];
6210 				pf_addrcpy(&match_key.addr[1],
6211 				    &s->key[idx]->addr[0], match_key.af);
6212 				match_key.port[1] = s->key[idx]->port[0];
6213 			}
6214 
6215 			/*
6216 			 * Don't send out individual
6217 			 * delete messages.
6218 			 */
6219 			s->state_flags |= PFSTATE_NOSYNC;
6220 			pf_remove_state(s);
6221 			killed++;
6222 
6223 			if (kill->psk_kill_match)
6224 				killed += pf_kill_matching_state(&match_key,
6225 				    dir);
6226 
6227 			goto relock_DIOCCLRSTATES;
6228 		}
6229 		PF_HASHROW_UNLOCK(ih);
6230 	}
6231 
6232 	if (V_pfsync_clear_states_ptr != NULL)
6233 		V_pfsync_clear_states_ptr(V_pf_status.hostid, kill->psk_ifname);
6234 
6235 	return (killed);
6236 }
6237 
6238 void
pf_killstates(struct pf_kstate_kill * kill,unsigned int * killed)6239 pf_killstates(struct pf_kstate_kill *kill, unsigned int *killed)
6240 {
6241 	struct pf_kstate	*s;
6242 
6243 	NET_EPOCH_ASSERT();
6244 	if (kill->psk_pfcmp.id) {
6245 		if (kill->psk_pfcmp.creatorid == 0)
6246 			kill->psk_pfcmp.creatorid = V_pf_status.hostid;
6247 		if ((s = pf_find_state_byid(kill->psk_pfcmp.id,
6248 		    kill->psk_pfcmp.creatorid))) {
6249 			pf_remove_state(s);
6250 			*killed = 1;
6251 		}
6252 		return;
6253 	}
6254 
6255 	for (unsigned int i = 0; i <= V_pf_hashmask; i++)
6256 		*killed += pf_killstates_row(kill, &V_pf_idhash[i]);
6257 }
6258 
6259 static int
pf_killstates_nv(struct pfioc_nv * nv)6260 pf_killstates_nv(struct pfioc_nv *nv)
6261 {
6262 	struct pf_kstate_kill	 kill;
6263 	struct epoch_tracker	 et;
6264 	nvlist_t		*nvl = NULL;
6265 	void			*nvlpacked = NULL;
6266 	int			 error = 0;
6267 	unsigned int		 killed = 0;
6268 
6269 #define ERROUT(x)	ERROUT_FUNCTION(on_error, x)
6270 
6271 	if (nv->len > pf_ioctl_maxcount)
6272 		ERROUT(ENOMEM);
6273 
6274 	nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
6275 	error = copyin(nv->data, nvlpacked, nv->len);
6276 	if (error)
6277 		ERROUT(error);
6278 
6279 	nvl = nvlist_unpack(nvlpacked, nv->len, 0);
6280 	if (nvl == NULL)
6281 		ERROUT(EBADMSG);
6282 
6283 	error = pf_nvstate_kill_to_kstate_kill(nvl, &kill);
6284 	if (error)
6285 		ERROUT(error);
6286 
6287 	NET_EPOCH_ENTER(et);
6288 	pf_killstates(&kill, &killed);
6289 	NET_EPOCH_EXIT(et);
6290 
6291 	free(nvlpacked, M_NVLIST);
6292 	nvlpacked = NULL;
6293 	nvlist_destroy(nvl);
6294 	nvl = nvlist_create(0);
6295 	if (nvl == NULL)
6296 		ERROUT(ENOMEM);
6297 
6298 	nvlist_add_number(nvl, "killed", killed);
6299 
6300 	nvlpacked = nvlist_pack(nvl, &nv->len);
6301 	if (nvlpacked == NULL)
6302 		ERROUT(ENOMEM);
6303 
6304 	if (nv->size == 0)
6305 		ERROUT(0);
6306 	else if (nv->size < nv->len)
6307 		ERROUT(ENOSPC);
6308 
6309 	error = copyout(nvlpacked, nv->data, nv->len);
6310 
6311 on_error:
6312 	nvlist_destroy(nvl);
6313 	free(nvlpacked, M_NVLIST);
6314 	return (error);
6315 }
6316 
6317 static int
pf_clearstates_nv(struct pfioc_nv * nv)6318 pf_clearstates_nv(struct pfioc_nv *nv)
6319 {
6320 	struct pf_kstate_kill	 kill;
6321 	struct epoch_tracker	 et;
6322 	nvlist_t		*nvl = NULL;
6323 	void			*nvlpacked = NULL;
6324 	int			 error = 0;
6325 	unsigned int		 killed;
6326 
6327 #define ERROUT(x)	ERROUT_FUNCTION(on_error, x)
6328 
6329 	if (nv->len > pf_ioctl_maxcount)
6330 		ERROUT(ENOMEM);
6331 
6332 	nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
6333 	error = copyin(nv->data, nvlpacked, nv->len);
6334 	if (error)
6335 		ERROUT(error);
6336 
6337 	nvl = nvlist_unpack(nvlpacked, nv->len, 0);
6338 	if (nvl == NULL)
6339 		ERROUT(EBADMSG);
6340 
6341 	error = pf_nvstate_kill_to_kstate_kill(nvl, &kill);
6342 	if (error)
6343 		ERROUT(error);
6344 
6345 	NET_EPOCH_ENTER(et);
6346 	killed = pf_clear_states(&kill);
6347 	NET_EPOCH_EXIT(et);
6348 
6349 	free(nvlpacked, M_NVLIST);
6350 	nvlpacked = NULL;
6351 	nvlist_destroy(nvl);
6352 	nvl = nvlist_create(0);
6353 	if (nvl == NULL)
6354 		ERROUT(ENOMEM);
6355 
6356 	nvlist_add_number(nvl, "killed", killed);
6357 
6358 	nvlpacked = nvlist_pack(nvl, &nv->len);
6359 	if (nvlpacked == NULL)
6360 		ERROUT(ENOMEM);
6361 
6362 	if (nv->size == 0)
6363 		ERROUT(0);
6364 	else if (nv->size < nv->len)
6365 		ERROUT(ENOSPC);
6366 
6367 	error = copyout(nvlpacked, nv->data, nv->len);
6368 
6369 #undef ERROUT
6370 on_error:
6371 	nvlist_destroy(nvl);
6372 	free(nvlpacked, M_NVLIST);
6373 	return (error);
6374 }
6375 
6376 static int
pf_getstate(struct pfioc_nv * nv)6377 pf_getstate(struct pfioc_nv *nv)
6378 {
6379 	nvlist_t		*nvl = NULL, *nvls;
6380 	void			*nvlpacked = NULL;
6381 	struct pf_kstate	*s = NULL;
6382 	int			 error = 0;
6383 	uint64_t		 id, creatorid;
6384 
6385 #define ERROUT(x)	ERROUT_FUNCTION(errout, x)
6386 
6387 	if (nv->len > pf_ioctl_maxcount)
6388 		ERROUT(ENOMEM);
6389 
6390 	nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
6391 	error = copyin(nv->data, nvlpacked, nv->len);
6392 	if (error)
6393 		ERROUT(error);
6394 
6395 	nvl = nvlist_unpack(nvlpacked, nv->len, 0);
6396 	if (nvl == NULL)
6397 		ERROUT(EBADMSG);
6398 
6399 	PFNV_CHK(pf_nvuint64(nvl, "id", &id));
6400 	PFNV_CHK(pf_nvuint64(nvl, "creatorid", &creatorid));
6401 
6402 	s = pf_find_state_byid(id, creatorid);
6403 	if (s == NULL)
6404 		ERROUT(ENOENT);
6405 
6406 	free(nvlpacked, M_NVLIST);
6407 	nvlpacked = NULL;
6408 	nvlist_destroy(nvl);
6409 	nvl = nvlist_create(0);
6410 	if (nvl == NULL)
6411 		ERROUT(ENOMEM);
6412 
6413 	nvls = pf_state_to_nvstate(s);
6414 	if (nvls == NULL)
6415 		ERROUT(ENOMEM);
6416 
6417 	nvlist_add_nvlist(nvl, "state", nvls);
6418 	nvlist_destroy(nvls);
6419 
6420 	nvlpacked = nvlist_pack(nvl, &nv->len);
6421 	if (nvlpacked == NULL)
6422 		ERROUT(ENOMEM);
6423 
6424 	if (nv->size == 0)
6425 		ERROUT(0);
6426 	else if (nv->size < nv->len)
6427 		ERROUT(ENOSPC);
6428 
6429 	error = copyout(nvlpacked, nv->data, nv->len);
6430 
6431 #undef ERROUT
6432 errout:
6433 	if (s != NULL)
6434 		PF_STATE_UNLOCK(s);
6435 	free(nvlpacked, M_NVLIST);
6436 	nvlist_destroy(nvl);
6437 	return (error);
6438 }
6439 
6440 /*
6441  * XXX - Check for version mismatch!!!
6442  */
6443 
6444 /*
6445  * Duplicate pfctl -Fa operation to get rid of as much as we can.
6446  */
6447 static int
shutdown_pf(void)6448 shutdown_pf(void)
6449 {
6450 	int error = 0;
6451 	u_int32_t t[5];
6452 	char nn = '\0';
6453 	struct pf_kanchor *anchor, *tmp_anchor;
6454 	struct pf_keth_anchor *eth_anchor, *tmp_eth_anchor;
6455 	int rs_num;
6456 
6457 	do {
6458 		/* Unlink rules of all user defined anchors */
6459 		RB_FOREACH_SAFE(anchor, pf_kanchor_global, &V_pf_anchors,
6460 		    tmp_anchor) {
6461 			for (rs_num = 0; rs_num < PF_RULESET_MAX; ++rs_num) {
6462 				if ((error = pf_begin_rules(&t[rs_num], rs_num,
6463 				    anchor->path)) != 0) {
6464 					DPFPRINTF(PF_DEBUG_MISC, "%s: "
6465 					    "anchor.path=%s rs_num=%d",
6466 					    __func__, anchor->path, rs_num);
6467 					goto error;	/* XXX: rollback? */
6468 				}
6469 			}
6470 			for (rs_num = 0; rs_num < PF_RULESET_MAX; ++rs_num) {
6471 				error = pf_commit_rules(t[rs_num], rs_num,
6472 				    anchor->path);
6473 				MPASS(error == 0);
6474 			}
6475 		}
6476 
6477 		/* Unlink rules of all user defined ether anchors */
6478 		RB_FOREACH_SAFE(eth_anchor, pf_keth_anchor_global,
6479 		    &V_pf_keth_anchors, tmp_eth_anchor) {
6480 			if ((error = pf_begin_eth(&t[0], eth_anchor->path))
6481 			    != 0) {
6482 				DPFPRINTF(PF_DEBUG_MISC, "%s: eth "
6483 				    "anchor.path=%s", __func__,
6484 				    eth_anchor->path);
6485 				goto error;
6486 			}
6487 			error = pf_commit_eth(t[0], eth_anchor->path);
6488 			MPASS(error == 0);
6489 		}
6490 
6491 		if ((error = pf_begin_rules(&t[0], PF_RULESET_SCRUB, &nn))
6492 		    != 0) {
6493 			DPFPRINTF(PF_DEBUG_MISC, "%s: SCRUB", __func__);
6494 			break;
6495 		}
6496 		if ((error = pf_begin_rules(&t[1], PF_RULESET_FILTER, &nn))
6497 		    != 0) {
6498 			DPFPRINTF(PF_DEBUG_MISC, "%s: FILTER", __func__);
6499 			break;		/* XXX: rollback? */
6500 		}
6501 		if ((error = pf_begin_rules(&t[2], PF_RULESET_NAT, &nn))
6502 		    != 0) {
6503 			DPFPRINTF(PF_DEBUG_MISC, "%s: NAT", __func__);
6504 			break;		/* XXX: rollback? */
6505 		}
6506 		if ((error = pf_begin_rules(&t[3], PF_RULESET_BINAT, &nn))
6507 		    != 0) {
6508 			DPFPRINTF(PF_DEBUG_MISC, "%s: BINAT", __func__);
6509 			break;		/* XXX: rollback? */
6510 		}
6511 		if ((error = pf_begin_rules(&t[4], PF_RULESET_RDR, &nn))
6512 		    != 0) {
6513 			DPFPRINTF(PF_DEBUG_MISC, "%s: RDR", __func__);
6514 			break;		/* XXX: rollback? */
6515 		}
6516 
6517 		error = pf_commit_rules(t[0], PF_RULESET_SCRUB, &nn);
6518 		MPASS(error == 0);
6519 		error = pf_commit_rules(t[1], PF_RULESET_FILTER, &nn);
6520 		MPASS(error == 0);
6521 		error = pf_commit_rules(t[2], PF_RULESET_NAT, &nn);
6522 		MPASS(error == 0);
6523 		error = pf_commit_rules(t[3], PF_RULESET_BINAT, &nn);
6524 		MPASS(error == 0);
6525 		error = pf_commit_rules(t[4], PF_RULESET_RDR, &nn);
6526 		MPASS(error == 0);
6527 
6528 		if ((error = pf_clear_tables()) != 0)
6529 			break;
6530 
6531 		if ((error = pf_begin_eth(&t[0], &nn)) != 0) {
6532 			DPFPRINTF(PF_DEBUG_MISC, "%s: eth", __func__);
6533 			break;
6534 		}
6535 		error = pf_commit_eth(t[0], &nn);
6536 		MPASS(error == 0);
6537 
6538 #ifdef ALTQ
6539 		if ((error = pf_begin_altq(&t[0])) != 0) {
6540 			DPFPRINTF(PF_DEBUG_MISC, "%s: ALTQ", __func__);
6541 			break;
6542 		}
6543 		pf_commit_altq(t[0]);
6544 #endif
6545 
6546 		pf_clear_all_states();
6547 
6548 		pf_kill_srcnodes(NULL);
6549 
6550 		/* status does not use malloced mem so no need to cleanup */
6551 		/* fingerprints and interfaces have their own cleanup code */
6552 	} while(0);
6553 
6554 error:
6555 	return (error);
6556 }
6557 
6558 static pfil_return_t
pf_check_return(int chk,struct mbuf ** m)6559 pf_check_return(int chk, struct mbuf **m)
6560 {
6561 
6562 	switch (chk) {
6563 	case PF_PASS:
6564 		if (*m == NULL)
6565 			return (PFIL_CONSUMED);
6566 		else
6567 			return (PFIL_PASS);
6568 		break;
6569 	default:
6570 		if (*m != NULL) {
6571 			m_freem(*m);
6572 			*m = NULL;
6573 		}
6574 		return (PFIL_DROPPED);
6575 	}
6576 }
6577 
6578 static pfil_return_t
pf_eth_check_in(struct mbuf ** m,struct ifnet * ifp,int flags,void * ruleset __unused,struct inpcb * inp)6579 pf_eth_check_in(struct mbuf **m, struct ifnet *ifp, int flags,
6580     void *ruleset __unused, struct inpcb *inp)
6581 {
6582 	int chk;
6583 
6584 	CURVNET_ASSERT_SET();
6585 
6586 	chk = pf_test_eth(PF_IN, flags, ifp, m, inp);
6587 
6588 	return (pf_check_return(chk, m));
6589 }
6590 
6591 static pfil_return_t
pf_eth_check_out(struct mbuf ** m,struct ifnet * ifp,int flags,void * ruleset __unused,struct inpcb * inp)6592 pf_eth_check_out(struct mbuf **m, struct ifnet *ifp, int flags,
6593     void *ruleset __unused, struct inpcb *inp)
6594 {
6595 	int chk;
6596 
6597 	CURVNET_ASSERT_SET();
6598 
6599 	chk = pf_test_eth(PF_OUT, flags, ifp, m, inp);
6600 
6601 	return (pf_check_return(chk, m));
6602 }
6603 
6604 #ifdef INET
6605 static pfil_return_t
pf_check_in(struct mbuf ** m,struct ifnet * ifp,int flags,void * ruleset __unused,struct inpcb * inp)6606 pf_check_in(struct mbuf **m, struct ifnet *ifp, int flags,
6607     void *ruleset __unused, struct inpcb *inp)
6608 {
6609 	int chk;
6610 
6611 	CURVNET_ASSERT_SET();
6612 
6613 	chk = pf_test(AF_INET, PF_IN, flags, ifp, m, inp, NULL);
6614 
6615 	return (pf_check_return(chk, m));
6616 }
6617 
6618 static pfil_return_t
pf_check_out(struct mbuf ** m,struct ifnet * ifp,int flags,void * ruleset __unused,struct inpcb * inp)6619 pf_check_out(struct mbuf **m, struct ifnet *ifp, int flags,
6620     void *ruleset __unused,  struct inpcb *inp)
6621 {
6622 	int chk;
6623 
6624 	CURVNET_ASSERT_SET();
6625 
6626 	chk = pf_test(AF_INET, PF_OUT, flags, ifp, m, inp, NULL);
6627 
6628 	return (pf_check_return(chk, m));
6629 }
6630 #endif
6631 
6632 #ifdef INET6
6633 static pfil_return_t
pf_check6_in(struct mbuf ** m,struct ifnet * ifp,int flags,void * ruleset __unused,struct inpcb * inp)6634 pf_check6_in(struct mbuf **m, struct ifnet *ifp, int flags,
6635     void *ruleset __unused,  struct inpcb *inp)
6636 {
6637 	int chk;
6638 
6639 	CURVNET_ASSERT_SET();
6640 
6641 	/*
6642 	 * In case of loopback traffic IPv6 uses the real interface in
6643 	 * order to support scoped addresses. In order to support stateful
6644 	 * filtering we have change this to lo0 as it is the case in IPv4.
6645 	 */
6646 	chk = pf_test(AF_INET6, PF_IN, flags, (*m)->m_flags & M_LOOP ? V_loif : ifp,
6647 	    m, inp, NULL);
6648 
6649 	return (pf_check_return(chk, m));
6650 }
6651 
6652 static pfil_return_t
pf_check6_out(struct mbuf ** m,struct ifnet * ifp,int flags,void * ruleset __unused,struct inpcb * inp)6653 pf_check6_out(struct mbuf **m, struct ifnet *ifp, int flags,
6654     void *ruleset __unused,  struct inpcb *inp)
6655 {
6656 	int chk;
6657 
6658 	CURVNET_ASSERT_SET();
6659 
6660 	chk = pf_test(AF_INET6, PF_OUT, flags, ifp, m, inp, NULL);
6661 
6662 	return (pf_check_return(chk, m));
6663 }
6664 #endif /* INET6 */
6665 
6666 VNET_DEFINE_STATIC(pfil_hook_t, pf_eth_in_hook);
6667 VNET_DEFINE_STATIC(pfil_hook_t, pf_eth_out_hook);
6668 #define	V_pf_eth_in_hook	VNET(pf_eth_in_hook)
6669 #define	V_pf_eth_out_hook	VNET(pf_eth_out_hook)
6670 
6671 #ifdef INET
6672 VNET_DEFINE_STATIC(pfil_hook_t, pf_ip4_in_hook);
6673 VNET_DEFINE_STATIC(pfil_hook_t, pf_ip4_out_hook);
6674 #define	V_pf_ip4_in_hook	VNET(pf_ip4_in_hook)
6675 #define	V_pf_ip4_out_hook	VNET(pf_ip4_out_hook)
6676 #endif
6677 #ifdef INET6
6678 VNET_DEFINE_STATIC(pfil_hook_t, pf_ip6_in_hook);
6679 VNET_DEFINE_STATIC(pfil_hook_t, pf_ip6_out_hook);
6680 #define	V_pf_ip6_in_hook	VNET(pf_ip6_in_hook)
6681 #define	V_pf_ip6_out_hook	VNET(pf_ip6_out_hook)
6682 #endif
6683 
6684 static void
hook_pf_eth(void)6685 hook_pf_eth(void)
6686 {
6687 	struct pfil_hook_args pha = {
6688 		.pa_version = PFIL_VERSION,
6689 		.pa_modname = "pf",
6690 		.pa_type = PFIL_TYPE_ETHERNET,
6691 	};
6692 	struct pfil_link_args pla = {
6693 		.pa_version = PFIL_VERSION,
6694 	};
6695 	int ret __diagused;
6696 
6697 	if (atomic_load_bool(&V_pf_pfil_eth_hooked))
6698 		return;
6699 
6700 	pha.pa_mbuf_chk = pf_eth_check_in;
6701 	pha.pa_flags = PFIL_IN;
6702 	pha.pa_rulname = "eth-in";
6703 	V_pf_eth_in_hook = pfil_add_hook(&pha);
6704 	pla.pa_flags = PFIL_IN | PFIL_HEADPTR | PFIL_HOOKPTR;
6705 	pla.pa_head = V_link_pfil_head;
6706 	pla.pa_hook = V_pf_eth_in_hook;
6707 	ret = pfil_link(&pla);
6708 	MPASS(ret == 0);
6709 	pha.pa_mbuf_chk = pf_eth_check_out;
6710 	pha.pa_flags = PFIL_OUT;
6711 	pha.pa_rulname = "eth-out";
6712 	V_pf_eth_out_hook = pfil_add_hook(&pha);
6713 	pla.pa_flags = PFIL_OUT | PFIL_HEADPTR | PFIL_HOOKPTR;
6714 	pla.pa_head = V_link_pfil_head;
6715 	pla.pa_hook = V_pf_eth_out_hook;
6716 	ret = pfil_link(&pla);
6717 	MPASS(ret == 0);
6718 
6719 	atomic_store_bool(&V_pf_pfil_eth_hooked, true);
6720 }
6721 
6722 static void
hook_pf(void)6723 hook_pf(void)
6724 {
6725 	struct pfil_hook_args pha = {
6726 		.pa_version = PFIL_VERSION,
6727 		.pa_modname = "pf",
6728 	};
6729 	struct pfil_link_args pla = {
6730 		.pa_version = PFIL_VERSION,
6731 	};
6732 	int ret __diagused;
6733 
6734 	if (atomic_load_bool(&V_pf_pfil_hooked))
6735 		return;
6736 
6737 #ifdef INET
6738 	pha.pa_type = PFIL_TYPE_IP4;
6739 	pha.pa_mbuf_chk = pf_check_in;
6740 	pha.pa_flags = PFIL_IN;
6741 	pha.pa_rulname = "default-in";
6742 	V_pf_ip4_in_hook = pfil_add_hook(&pha);
6743 	pla.pa_flags = PFIL_IN | PFIL_HEADPTR | PFIL_HOOKPTR;
6744 	pla.pa_head = V_inet_pfil_head;
6745 	pla.pa_hook = V_pf_ip4_in_hook;
6746 	ret = pfil_link(&pla);
6747 	MPASS(ret == 0);
6748 	pha.pa_mbuf_chk = pf_check_out;
6749 	pha.pa_flags = PFIL_OUT;
6750 	pha.pa_rulname = "default-out";
6751 	V_pf_ip4_out_hook = pfil_add_hook(&pha);
6752 	pla.pa_flags = PFIL_OUT | PFIL_HEADPTR | PFIL_HOOKPTR;
6753 	pla.pa_head = V_inet_pfil_head;
6754 	pla.pa_hook = V_pf_ip4_out_hook;
6755 	ret = pfil_link(&pla);
6756 	MPASS(ret == 0);
6757 	if (V_pf_filter_local) {
6758 		pla.pa_flags = PFIL_OUT | PFIL_HEADPTR | PFIL_HOOKPTR;
6759 		pla.pa_head = V_inet_local_pfil_head;
6760 		pla.pa_hook = V_pf_ip4_out_hook;
6761 		ret = pfil_link(&pla);
6762 		MPASS(ret == 0);
6763 	}
6764 #endif
6765 #ifdef INET6
6766 	pha.pa_type = PFIL_TYPE_IP6;
6767 	pha.pa_mbuf_chk = pf_check6_in;
6768 	pha.pa_flags = PFIL_IN;
6769 	pha.pa_rulname = "default-in6";
6770 	V_pf_ip6_in_hook = pfil_add_hook(&pha);
6771 	pla.pa_flags = PFIL_IN | PFIL_HEADPTR | PFIL_HOOKPTR;
6772 	pla.pa_head = V_inet6_pfil_head;
6773 	pla.pa_hook = V_pf_ip6_in_hook;
6774 	ret = pfil_link(&pla);
6775 	MPASS(ret == 0);
6776 	pha.pa_mbuf_chk = pf_check6_out;
6777 	pha.pa_rulname = "default-out6";
6778 	pha.pa_flags = PFIL_OUT;
6779 	V_pf_ip6_out_hook = pfil_add_hook(&pha);
6780 	pla.pa_flags = PFIL_OUT | PFIL_HEADPTR | PFIL_HOOKPTR;
6781 	pla.pa_head = V_inet6_pfil_head;
6782 	pla.pa_hook = V_pf_ip6_out_hook;
6783 	ret = pfil_link(&pla);
6784 	MPASS(ret == 0);
6785 	if (V_pf_filter_local) {
6786 		pla.pa_flags = PFIL_OUT | PFIL_HEADPTR | PFIL_HOOKPTR;
6787 		pla.pa_head = V_inet6_local_pfil_head;
6788 		pla.pa_hook = V_pf_ip6_out_hook;
6789 		ret = pfil_link(&pla);
6790 		MPASS(ret == 0);
6791 	}
6792 #endif
6793 
6794 	atomic_store_bool(&V_pf_pfil_hooked, true);
6795 }
6796 
6797 static void
dehook_pf_eth(void)6798 dehook_pf_eth(void)
6799 {
6800 
6801 	if (!atomic_load_bool(&V_pf_pfil_eth_hooked))
6802 		return;
6803 
6804 	pfil_remove_hook(V_pf_eth_in_hook);
6805 	pfil_remove_hook(V_pf_eth_out_hook);
6806 
6807 	atomic_store_bool(&V_pf_pfil_eth_hooked, false);
6808 }
6809 
6810 static void
dehook_pf(void)6811 dehook_pf(void)
6812 {
6813 
6814 	if (!atomic_load_bool(&V_pf_pfil_hooked))
6815 		return;
6816 
6817 #ifdef INET
6818 	pfil_remove_hook(V_pf_ip4_in_hook);
6819 	pfil_remove_hook(V_pf_ip4_out_hook);
6820 #endif
6821 #ifdef INET6
6822 	pfil_remove_hook(V_pf_ip6_in_hook);
6823 	pfil_remove_hook(V_pf_ip6_out_hook);
6824 #endif
6825 
6826 	atomic_store_bool(&V_pf_pfil_hooked, false);
6827 }
6828 
6829 static void
pf_load_vnet(void)6830 pf_load_vnet(void)
6831 {
6832 	V_pf_tag_z = uma_zcreate("pf tags", sizeof(struct pf_tagname),
6833 	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
6834 
6835 	rm_init_flags(&V_pf_rules_lock, "pf rulesets", RM_RECURSE);
6836 	sx_init(&V_pf_ioctl_lock, "pf ioctl");
6837 
6838 	pf_init_tagset(&V_pf_tags, &pf_rule_tag_hashsize,
6839 	    PF_RULE_TAG_HASH_SIZE_DEFAULT);
6840 #ifdef ALTQ
6841 	pf_init_tagset(&V_pf_qids, &pf_queue_tag_hashsize,
6842 	    PF_QUEUE_TAG_HASH_SIZE_DEFAULT);
6843 #endif
6844 
6845 	V_pf_keth = &V_pf_main_keth_anchor.ruleset;
6846 
6847 	pfattach_vnet();
6848 	V_pf_vnet_active = 1;
6849 }
6850 
6851 static int
pf_load(void)6852 pf_load(void)
6853 {
6854 	int error;
6855 
6856 	sx_init(&pf_end_lock, "pf end thread");
6857 
6858 	pf_mtag_initialize();
6859 
6860 	pf_dev = make_dev(&pf_cdevsw, 0, UID_ROOT, GID_WHEEL, 0600, PF_NAME);
6861 	if (pf_dev == NULL)
6862 		return (ENOMEM);
6863 
6864 	pf_end_threads = 0;
6865 	error = kproc_create(pf_purge_thread, NULL, &pf_purge_proc, 0, 0, "pf purge");
6866 	if (error != 0)
6867 		return (error);
6868 
6869 	pfi_initialize();
6870 
6871 	return (0);
6872 }
6873 
6874 static void
pf_unload_vnet(void)6875 pf_unload_vnet(void)
6876 {
6877 	int ret __diagused;
6878 
6879 	V_pf_vnet_active = 0;
6880 	V_pf_status.running = 0;
6881 	dehook_pf();
6882 	dehook_pf_eth();
6883 
6884 	PF_RULES_WLOCK();
6885 	pf_syncookies_cleanup();
6886 	shutdown_pf();
6887 	PF_RULES_WUNLOCK();
6888 
6889 	ret = swi_remove(V_pf_swi_cookie);
6890 	MPASS(ret == 0);
6891 	ret = intr_event_destroy(V_pf_swi_ie);
6892 	MPASS(ret == 0);
6893 
6894 	pf_unload_vnet_purge();
6895 
6896 	pf_normalize_cleanup();
6897 	PF_RULES_WLOCK();
6898 	pfi_cleanup_vnet();
6899 	PF_RULES_WUNLOCK();
6900 	pfr_cleanup();
6901 	pf_osfp_flush();
6902 	pf_cleanup();
6903 	if (IS_DEFAULT_VNET(curvnet))
6904 		pf_mtag_cleanup();
6905 
6906 	pf_cleanup_tagset(&V_pf_tags);
6907 #ifdef ALTQ
6908 	pf_cleanup_tagset(&V_pf_qids);
6909 #endif
6910 	uma_zdestroy(V_pf_tag_z);
6911 
6912 #ifdef PF_WANT_32_TO_64_COUNTER
6913 	PF_RULES_WLOCK();
6914 	LIST_REMOVE(V_pf_kifmarker, pfik_allkiflist);
6915 
6916 	MPASS(LIST_EMPTY(&V_pf_allkiflist));
6917 	MPASS(V_pf_allkifcount == 0);
6918 
6919 	LIST_REMOVE(&V_pf_default_rule, allrulelist);
6920 	V_pf_allrulecount--;
6921 	LIST_REMOVE(V_pf_rulemarker, allrulelist);
6922 
6923 	MPASS(LIST_EMPTY(&V_pf_allrulelist));
6924 	MPASS(V_pf_allrulecount == 0);
6925 
6926 	PF_RULES_WUNLOCK();
6927 
6928 	free(V_pf_kifmarker, PFI_MTYPE);
6929 	free(V_pf_rulemarker, M_PFRULE);
6930 #endif
6931 
6932 	/* Free counters last as we updated them during shutdown. */
6933 	pf_counter_u64_deinit(&V_pf_default_rule.evaluations);
6934 	for (int i = 0; i < 2; i++) {
6935 		pf_counter_u64_deinit(&V_pf_default_rule.packets[i]);
6936 		pf_counter_u64_deinit(&V_pf_default_rule.bytes[i]);
6937 	}
6938 	counter_u64_free(V_pf_default_rule.states_cur);
6939 	counter_u64_free(V_pf_default_rule.states_tot);
6940 	for (pf_sn_types_t sn_type=0; sn_type<PF_SN_MAX; sn_type++)
6941 		counter_u64_free(V_pf_default_rule.src_nodes[sn_type]);
6942 	uma_zfree_pcpu(pf_timestamp_pcpu_zone, V_pf_default_rule.timestamp);
6943 
6944 	for (int i = 0; i < PFRES_MAX; i++)
6945 		counter_u64_free(V_pf_status.counters[i]);
6946 	for (int i = 0; i < KLCNT_MAX; i++)
6947 		counter_u64_free(V_pf_status.lcounters[i]);
6948 	for (int i = 0; i < FCNT_MAX; i++)
6949 		pf_counter_u64_deinit(&V_pf_status.fcounters[i]);
6950 	for (int i = 0; i < SCNT_MAX; i++)
6951 		counter_u64_free(V_pf_status.scounters[i]);
6952 
6953 	rm_destroy(&V_pf_rules_lock);
6954 	sx_destroy(&V_pf_ioctl_lock);
6955 }
6956 
6957 static void
pf_unload(void)6958 pf_unload(void)
6959 {
6960 
6961 	sx_xlock(&pf_end_lock);
6962 	pf_end_threads = 1;
6963 	while (pf_end_threads < 2) {
6964 		wakeup_one(pf_purge_thread);
6965 		sx_sleep(pf_purge_proc, &pf_end_lock, 0, "pftmo", 0);
6966 	}
6967 	sx_xunlock(&pf_end_lock);
6968 
6969 	pf_nl_unregister();
6970 
6971 	if (pf_dev != NULL)
6972 		destroy_dev(pf_dev);
6973 
6974 	pfi_cleanup();
6975 
6976 	sx_destroy(&pf_end_lock);
6977 }
6978 
6979 static void
vnet_pf_init(void * unused __unused)6980 vnet_pf_init(void *unused __unused)
6981 {
6982 
6983 	pf_load_vnet();
6984 }
6985 VNET_SYSINIT(vnet_pf_init, SI_SUB_PROTO_FIREWALL, SI_ORDER_THIRD,
6986     vnet_pf_init, NULL);
6987 
6988 static void
vnet_pf_uninit(const void * unused __unused)6989 vnet_pf_uninit(const void *unused __unused)
6990 {
6991 
6992 	pf_unload_vnet();
6993 }
6994 SYSUNINIT(pf_unload, SI_SUB_PROTO_FIREWALL, SI_ORDER_SECOND, pf_unload, NULL);
6995 VNET_SYSUNINIT(vnet_pf_uninit, SI_SUB_PROTO_FIREWALL, SI_ORDER_THIRD,
6996     vnet_pf_uninit, NULL);
6997 
6998 static int
pf_modevent(module_t mod,int type,void * data)6999 pf_modevent(module_t mod, int type, void *data)
7000 {
7001 	int error = 0;
7002 
7003 	switch(type) {
7004 	case MOD_LOAD:
7005 		error = pf_load();
7006 		pf_nl_register();
7007 		break;
7008 	case MOD_UNLOAD:
7009 		/* Handled in SYSUNINIT(pf_unload) to ensure it's done after
7010 		 * the vnet_pf_uninit()s */
7011 		break;
7012 	default:
7013 		error = EINVAL;
7014 		break;
7015 	}
7016 
7017 	return (error);
7018 }
7019 
7020 static moduledata_t pf_mod = {
7021 	"pf",
7022 	pf_modevent,
7023 	0
7024 };
7025 
7026 DECLARE_MODULE(pf, pf_mod, SI_SUB_PROTO_FIREWALL, SI_ORDER_SECOND);
7027 MODULE_DEPEND(pf, netlink, 1, 1, 1);
7028 MODULE_DEPEND(pf, crypto, 1, 1, 1);
7029 MODULE_VERSION(pf, PF_MODVER);
7030