xref: /freebsd/sys/netpfil/pf/pf_ioctl.c (revision e8eb3096d8d140edc506aab40761d1f00bf3e9e8)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2001 Daniel Hartmeier
5  * Copyright (c) 2002,2003 Henning Brauer
6  * Copyright (c) 2012 Gleb Smirnoff <glebius@FreeBSD.org>
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  *
13  *    - Redistributions of source code must retain the above copyright
14  *      notice, this list of conditions and the following disclaimer.
15  *    - Redistributions in binary form must reproduce the above
16  *      copyright notice, this list of conditions and the following
17  *      disclaimer in the documentation and/or other materials provided
18  *      with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
26  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
27  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
28  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
30  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31  * POSSIBILITY OF SUCH DAMAGE.
32  *
33  * Effort sponsored in part by the Defense Advanced Research Projects
34  * Agency (DARPA) and Air Force Research Laboratory, Air Force
35  * Materiel Command, USAF, under agreement number F30602-01-2-0537.
36  *
37  *	$OpenBSD: pf_ioctl.c,v 1.213 2009/02/15 21:46:12 mbalmer Exp $
38  */
39 
40 #include <sys/cdefs.h>
41 #include "opt_inet.h"
42 #include "opt_inet6.h"
43 #include "opt_bpf.h"
44 #include "opt_pf.h"
45 
46 #include <sys/param.h>
47 #include <sys/_bitset.h>
48 #include <sys/bitset.h>
49 #include <sys/bus.h>
50 #include <sys/conf.h>
51 #include <sys/endian.h>
52 #include <sys/fcntl.h>
53 #include <sys/filio.h>
54 #include <sys/hash.h>
55 #include <sys/interrupt.h>
56 #include <sys/jail.h>
57 #include <sys/kernel.h>
58 #include <sys/kthread.h>
59 #include <sys/lock.h>
60 #include <sys/mbuf.h>
61 #include <sys/module.h>
62 #include <sys/nv.h>
63 #include <sys/proc.h>
64 #include <sys/sdt.h>
65 #include <sys/smp.h>
66 #include <sys/socket.h>
67 #include <sys/sysctl.h>
68 #include <sys/md5.h>
69 #include <sys/ucred.h>
70 
71 #include <net/if.h>
72 #include <net/if_var.h>
73 #include <net/if_private.h>
74 #include <net/vnet.h>
75 #include <net/route.h>
76 #include <net/pfil.h>
77 #include <net/pfvar.h>
78 #include <net/if_pfsync.h>
79 #include <net/if_pflog.h>
80 
81 #include <netinet/in.h>
82 #include <netinet/ip.h>
83 #include <netinet/ip_var.h>
84 #include <netinet6/ip6_var.h>
85 #include <netinet/ip_icmp.h>
86 #include <netpfil/pf/pf_nl.h>
87 #include <netpfil/pf/pf_nv.h>
88 
89 #ifdef INET6
90 #include <netinet/ip6.h>
91 #endif /* INET6 */
92 
93 #ifdef ALTQ
94 #include <net/altq/altq.h>
95 #endif
96 
97 SDT_PROBE_DEFINE3(pf, ioctl, ioctl, error, "int", "int", "int");
98 SDT_PROBE_DEFINE3(pf, ioctl, function, error, "char *", "int", "int");
99 SDT_PROBE_DEFINE2(pf, ioctl, addrule, error, "int", "int");
100 SDT_PROBE_DEFINE2(pf, ioctl, nvchk, error, "int", "int");
101 
102 static struct pf_kpool	*pf_get_kpool(const char *, u_int32_t, u_int8_t,
103 			    u_int32_t, u_int8_t, u_int8_t, u_int8_t);
104 
105 static void		 pf_mv_kpool(struct pf_kpalist *, struct pf_kpalist *);
106 static void		 pf_empty_kpool(struct pf_kpalist *);
107 static int		 pfioctl(struct cdev *, u_long, caddr_t, int,
108 			    struct thread *);
109 static int		 pf_begin_eth(uint32_t *, const char *);
110 static void		 pf_rollback_eth_cb(struct epoch_context *);
111 static int		 pf_rollback_eth(uint32_t, const char *);
112 static int		 pf_commit_eth(uint32_t, const char *);
113 static void		 pf_free_eth_rule(struct pf_keth_rule *);
114 #ifdef ALTQ
115 static int		 pf_begin_altq(u_int32_t *);
116 static int		 pf_rollback_altq(u_int32_t);
117 static int		 pf_commit_altq(u_int32_t);
118 static int		 pf_enable_altq(struct pf_altq *);
119 static int		 pf_disable_altq(struct pf_altq *);
120 static uint16_t		 pf_qname2qid(const char *);
121 static void		 pf_qid_unref(uint16_t);
122 #endif /* ALTQ */
123 static int		 pf_begin_rules(u_int32_t *, int, const char *);
124 static int		 pf_rollback_rules(u_int32_t, int, char *);
125 static int		 pf_setup_pfsync_matching(struct pf_kruleset *);
126 static void		 pf_hash_rule_rolling(MD5_CTX *, struct pf_krule *);
127 static void		 pf_hash_rule(struct pf_krule *);
128 static void		 pf_hash_rule_addr(MD5_CTX *, struct pf_rule_addr *);
129 static int		 pf_commit_rules(u_int32_t, int, char *);
130 static int		 pf_addr_setup(struct pf_kruleset *,
131 			    struct pf_addr_wrap *, sa_family_t);
132 static void		 pf_src_node_copy(const struct pf_ksrc_node *,
133 			    struct pf_src_node *);
134 #ifdef ALTQ
135 static int		 pf_export_kaltq(struct pf_altq *,
136 			    struct pfioc_altq_v1 *, size_t);
137 static int		 pf_import_kaltq(struct pfioc_altq_v1 *,
138 			    struct pf_altq *, size_t);
139 #endif /* ALTQ */
140 
141 VNET_DEFINE(struct pf_krule,	pf_default_rule);
142 
143 static __inline int             pf_krule_compare(struct pf_krule *,
144 				    struct pf_krule *);
145 
146 RB_GENERATE(pf_krule_global, pf_krule, entry_global, pf_krule_compare);
147 
148 #ifdef ALTQ
149 VNET_DEFINE_STATIC(int,		pf_altq_running);
150 #define	V_pf_altq_running	VNET(pf_altq_running)
151 #endif
152 
153 #define	TAGID_MAX	 50000
154 struct pf_tagname {
155 	TAILQ_ENTRY(pf_tagname)	namehash_entries;
156 	TAILQ_ENTRY(pf_tagname)	taghash_entries;
157 	char			name[PF_TAG_NAME_SIZE];
158 	uint16_t		tag;
159 	int			ref;
160 };
161 
162 struct pf_tagset {
163 	TAILQ_HEAD(, pf_tagname)	*namehash;
164 	TAILQ_HEAD(, pf_tagname)	*taghash;
165 	unsigned int			 mask;
166 	uint32_t			 seed;
167 	BITSET_DEFINE(, TAGID_MAX)	 avail;
168 };
169 
170 VNET_DEFINE(struct pf_tagset, pf_tags);
171 #define	V_pf_tags	VNET(pf_tags)
172 static unsigned int	pf_rule_tag_hashsize;
173 #define	PF_RULE_TAG_HASH_SIZE_DEFAULT	128
174 SYSCTL_UINT(_net_pf, OID_AUTO, rule_tag_hashsize, CTLFLAG_RDTUN,
175     &pf_rule_tag_hashsize, PF_RULE_TAG_HASH_SIZE_DEFAULT,
176     "Size of pf(4) rule tag hashtable");
177 
178 #ifdef ALTQ
179 VNET_DEFINE(struct pf_tagset, pf_qids);
180 #define	V_pf_qids	VNET(pf_qids)
181 static unsigned int	pf_queue_tag_hashsize;
182 #define	PF_QUEUE_TAG_HASH_SIZE_DEFAULT	128
183 SYSCTL_UINT(_net_pf, OID_AUTO, queue_tag_hashsize, CTLFLAG_RDTUN,
184     &pf_queue_tag_hashsize, PF_QUEUE_TAG_HASH_SIZE_DEFAULT,
185     "Size of pf(4) queue tag hashtable");
186 #endif
187 VNET_DEFINE(uma_zone_t,	 pf_tag_z);
188 #define	V_pf_tag_z		 VNET(pf_tag_z)
189 static MALLOC_DEFINE(M_PFALTQ, "pf_altq", "pf(4) altq configuration db");
190 static MALLOC_DEFINE(M_PFRULE, "pf_rule", "pf(4) rules");
191 
192 #if (PF_QNAME_SIZE != PF_TAG_NAME_SIZE)
193 #error PF_QNAME_SIZE must be equal to PF_TAG_NAME_SIZE
194 #endif
195 
196 VNET_DEFINE_STATIC(bool, pf_filter_local) = false;
197 #define V_pf_filter_local	VNET(pf_filter_local)
198 SYSCTL_BOOL(_net_pf, OID_AUTO, filter_local, CTLFLAG_VNET | CTLFLAG_RW,
199     &VNET_NAME(pf_filter_local), false,
200     "Enable filtering for packets delivered to local network stack");
201 
202 #ifdef PF_DEFAULT_TO_DROP
203 VNET_DEFINE_STATIC(bool, default_to_drop) = true;
204 #else
205 VNET_DEFINE_STATIC(bool, default_to_drop);
206 #endif
207 #define	V_default_to_drop VNET(default_to_drop)
208 SYSCTL_BOOL(_net_pf, OID_AUTO, default_to_drop, CTLFLAG_RDTUN | CTLFLAG_VNET,
209     &VNET_NAME(default_to_drop), false,
210     "Make the default rule drop all packets.");
211 
212 static void		 pf_init_tagset(struct pf_tagset *, unsigned int *,
213 			    unsigned int);
214 static void		 pf_cleanup_tagset(struct pf_tagset *);
215 static uint16_t		 tagname2hashindex(const struct pf_tagset *, const char *);
216 static uint16_t		 tag2hashindex(const struct pf_tagset *, uint16_t);
217 static u_int16_t	 tagname2tag(struct pf_tagset *, const char *);
218 static u_int16_t	 pf_tagname2tag(const char *);
219 static void		 tag_unref(struct pf_tagset *, u_int16_t);
220 
221 #define DPFPRINTF(n, x) if (V_pf_status.debug >= (n)) printf x
222 
223 struct cdev *pf_dev;
224 
225 /*
226  * XXX - These are new and need to be checked when moveing to a new version
227  */
228 static void		 pf_clear_all_states(void);
229 static int		 pf_killstates_row(struct pf_kstate_kill *,
230 			    struct pf_idhash *);
231 static int		 pf_killstates_nv(struct pfioc_nv *);
232 static int		 pf_clearstates_nv(struct pfioc_nv *);
233 static int		 pf_getstate(struct pfioc_nv *);
234 static int		 pf_getstatus(struct pfioc_nv *);
235 static int		 pf_clear_tables(void);
236 static void		 pf_kill_srcnodes(struct pfioc_src_node_kill *);
237 static int		 pf_keepcounters(struct pfioc_nv *);
238 static void		 pf_tbladdr_copyout(struct pf_addr_wrap *);
239 
240 /*
241  * Wrapper functions for pfil(9) hooks
242  */
243 static pfil_return_t pf_eth_check_in(struct mbuf **m, struct ifnet *ifp,
244     int flags, void *ruleset __unused, struct inpcb *inp);
245 static pfil_return_t pf_eth_check_out(struct mbuf **m, struct ifnet *ifp,
246     int flags, void *ruleset __unused, struct inpcb *inp);
247 #ifdef INET
248 static pfil_return_t pf_check_in(struct mbuf **m, struct ifnet *ifp,
249     int flags, void *ruleset __unused, struct inpcb *inp);
250 static pfil_return_t pf_check_out(struct mbuf **m, struct ifnet *ifp,
251     int flags, void *ruleset __unused, struct inpcb *inp);
252 #endif
253 #ifdef INET6
254 static pfil_return_t pf_check6_in(struct mbuf **m, struct ifnet *ifp,
255     int flags, void *ruleset __unused, struct inpcb *inp);
256 static pfil_return_t pf_check6_out(struct mbuf **m, struct ifnet *ifp,
257     int flags, void *ruleset __unused, struct inpcb *inp);
258 #endif
259 
260 static void		hook_pf_eth(void);
261 static void		hook_pf(void);
262 static void		dehook_pf_eth(void);
263 static void		dehook_pf(void);
264 static int		shutdown_pf(void);
265 static int		pf_load(void);
266 static void		pf_unload(void);
267 
268 static struct cdevsw pf_cdevsw = {
269 	.d_ioctl =	pfioctl,
270 	.d_name =	PF_NAME,
271 	.d_version =	D_VERSION,
272 };
273 
274 VNET_DEFINE_STATIC(bool, pf_pfil_hooked);
275 #define V_pf_pfil_hooked	VNET(pf_pfil_hooked)
276 VNET_DEFINE_STATIC(bool, pf_pfil_eth_hooked);
277 #define V_pf_pfil_eth_hooked	VNET(pf_pfil_eth_hooked)
278 
279 /*
280  * We need a flag that is neither hooked nor running to know when
281  * the VNET is "valid".  We primarily need this to control (global)
282  * external event, e.g., eventhandlers.
283  */
284 VNET_DEFINE(int, pf_vnet_active);
285 #define V_pf_vnet_active	VNET(pf_vnet_active)
286 
287 int pf_end_threads;
288 struct proc *pf_purge_proc;
289 
290 VNET_DEFINE(struct rmlock, pf_rules_lock);
291 VNET_DEFINE_STATIC(struct sx, pf_ioctl_lock);
292 #define	V_pf_ioctl_lock		VNET(pf_ioctl_lock)
293 struct sx			pf_end_lock;
294 
295 /* pfsync */
296 VNET_DEFINE(pfsync_state_import_t *, pfsync_state_import_ptr);
297 VNET_DEFINE(pfsync_insert_state_t *, pfsync_insert_state_ptr);
298 VNET_DEFINE(pfsync_update_state_t *, pfsync_update_state_ptr);
299 VNET_DEFINE(pfsync_delete_state_t *, pfsync_delete_state_ptr);
300 VNET_DEFINE(pfsync_clear_states_t *, pfsync_clear_states_ptr);
301 VNET_DEFINE(pfsync_defer_t *, pfsync_defer_ptr);
302 VNET_DEFINE(pflow_export_state_t *, pflow_export_state_ptr);
303 pfsync_detach_ifnet_t *pfsync_detach_ifnet_ptr;
304 
305 /* pflog */
306 pflog_packet_t			*pflog_packet_ptr = NULL;
307 
308 /*
309  * Copy a user-provided string, returning an error if truncation would occur.
310  * Avoid scanning past "sz" bytes in the source string since there's no
311  * guarantee that it's nul-terminated.
312  */
313 static int
pf_user_strcpy(char * dst,const char * src,size_t sz)314 pf_user_strcpy(char *dst, const char *src, size_t sz)
315 {
316 	if (strnlen(src, sz) == sz)
317 		return (EINVAL);
318 	(void)strlcpy(dst, src, sz);
319 	return (0);
320 }
321 
322 static void
pfattach_vnet(void)323 pfattach_vnet(void)
324 {
325 	u_int32_t *my_timeout = V_pf_default_rule.timeout;
326 
327 	bzero(&V_pf_status, sizeof(V_pf_status));
328 
329 	pf_initialize();
330 	pfr_initialize();
331 	pfi_initialize_vnet();
332 	pf_normalize_init();
333 	pf_syncookies_init();
334 
335 	V_pf_limits[PF_LIMIT_STATES].limit = PFSTATE_HIWAT;
336 	V_pf_limits[PF_LIMIT_SRC_NODES].limit = PFSNODE_HIWAT;
337 
338 	RB_INIT(&V_pf_anchors);
339 	pf_init_kruleset(&pf_main_ruleset);
340 
341 	pf_init_keth(V_pf_keth);
342 
343 	/* default rule should never be garbage collected */
344 	V_pf_default_rule.entries.tqe_prev = &V_pf_default_rule.entries.tqe_next;
345 	V_pf_default_rule.action = V_default_to_drop ? PF_DROP : PF_PASS;
346 	V_pf_default_rule.nr = -1;
347 	V_pf_default_rule.rtableid = -1;
348 
349 	pf_counter_u64_init(&V_pf_default_rule.evaluations, M_WAITOK);
350 	for (int i = 0; i < 2; i++) {
351 		pf_counter_u64_init(&V_pf_default_rule.packets[i], M_WAITOK);
352 		pf_counter_u64_init(&V_pf_default_rule.bytes[i], M_WAITOK);
353 	}
354 	V_pf_default_rule.states_cur = counter_u64_alloc(M_WAITOK);
355 	V_pf_default_rule.states_tot = counter_u64_alloc(M_WAITOK);
356 	V_pf_default_rule.src_nodes = counter_u64_alloc(M_WAITOK);
357 
358 	V_pf_default_rule.timestamp = uma_zalloc_pcpu(pf_timestamp_pcpu_zone,
359 	    M_WAITOK | M_ZERO);
360 
361 #ifdef PF_WANT_32_TO_64_COUNTER
362 	V_pf_kifmarker = malloc(sizeof(*V_pf_kifmarker), PFI_MTYPE, M_WAITOK | M_ZERO);
363 	V_pf_rulemarker = malloc(sizeof(*V_pf_rulemarker), M_PFRULE, M_WAITOK | M_ZERO);
364 	PF_RULES_WLOCK();
365 	LIST_INSERT_HEAD(&V_pf_allkiflist, V_pf_kifmarker, pfik_allkiflist);
366 	LIST_INSERT_HEAD(&V_pf_allrulelist, &V_pf_default_rule, allrulelist);
367 	V_pf_allrulecount++;
368 	LIST_INSERT_HEAD(&V_pf_allrulelist, V_pf_rulemarker, allrulelist);
369 	PF_RULES_WUNLOCK();
370 #endif
371 
372 	/* initialize default timeouts */
373 	my_timeout[PFTM_TCP_FIRST_PACKET] = PFTM_TCP_FIRST_PACKET_VAL;
374 	my_timeout[PFTM_TCP_OPENING] = PFTM_TCP_OPENING_VAL;
375 	my_timeout[PFTM_TCP_ESTABLISHED] = PFTM_TCP_ESTABLISHED_VAL;
376 	my_timeout[PFTM_TCP_CLOSING] = PFTM_TCP_CLOSING_VAL;
377 	my_timeout[PFTM_TCP_FIN_WAIT] = PFTM_TCP_FIN_WAIT_VAL;
378 	my_timeout[PFTM_TCP_CLOSED] = PFTM_TCP_CLOSED_VAL;
379 	my_timeout[PFTM_SCTP_FIRST_PACKET] = PFTM_TCP_FIRST_PACKET_VAL;
380 	my_timeout[PFTM_SCTP_OPENING] = PFTM_TCP_OPENING_VAL;
381 	my_timeout[PFTM_SCTP_ESTABLISHED] = PFTM_TCP_ESTABLISHED_VAL;
382 	my_timeout[PFTM_SCTP_CLOSING] = PFTM_TCP_CLOSING_VAL;
383 	my_timeout[PFTM_SCTP_CLOSED] = PFTM_TCP_CLOSED_VAL;
384 	my_timeout[PFTM_UDP_FIRST_PACKET] = PFTM_UDP_FIRST_PACKET_VAL;
385 	my_timeout[PFTM_UDP_SINGLE] = PFTM_UDP_SINGLE_VAL;
386 	my_timeout[PFTM_UDP_MULTIPLE] = PFTM_UDP_MULTIPLE_VAL;
387 	my_timeout[PFTM_ICMP_FIRST_PACKET] = PFTM_ICMP_FIRST_PACKET_VAL;
388 	my_timeout[PFTM_ICMP_ERROR_REPLY] = PFTM_ICMP_ERROR_REPLY_VAL;
389 	my_timeout[PFTM_OTHER_FIRST_PACKET] = PFTM_OTHER_FIRST_PACKET_VAL;
390 	my_timeout[PFTM_OTHER_SINGLE] = PFTM_OTHER_SINGLE_VAL;
391 	my_timeout[PFTM_OTHER_MULTIPLE] = PFTM_OTHER_MULTIPLE_VAL;
392 	my_timeout[PFTM_FRAG] = PFTM_FRAG_VAL;
393 	my_timeout[PFTM_INTERVAL] = PFTM_INTERVAL_VAL;
394 	my_timeout[PFTM_SRC_NODE] = PFTM_SRC_NODE_VAL;
395 	my_timeout[PFTM_TS_DIFF] = PFTM_TS_DIFF_VAL;
396 	my_timeout[PFTM_ADAPTIVE_START] = PFSTATE_ADAPT_START;
397 	my_timeout[PFTM_ADAPTIVE_END] = PFSTATE_ADAPT_END;
398 
399 	V_pf_status.debug = PF_DEBUG_URGENT;
400 	/*
401 	 * XXX This is different than in OpenBSD where reassembly is enabled by
402 	 * defult. In FreeBSD we expect people to still use scrub rules and
403 	 * switch to the new syntax later. Only when they switch they must
404 	 * explicitly enable reassemle. We could change the default once the
405 	 * scrub rule functionality is hopefully removed some day in future.
406 	 */
407 	V_pf_status.reass = 0;
408 
409 	V_pf_pfil_hooked = false;
410 	V_pf_pfil_eth_hooked = false;
411 
412 	/* XXX do our best to avoid a conflict */
413 	V_pf_status.hostid = arc4random();
414 
415 	for (int i = 0; i < PFRES_MAX; i++)
416 		V_pf_status.counters[i] = counter_u64_alloc(M_WAITOK);
417 	for (int i = 0; i < KLCNT_MAX; i++)
418 		V_pf_status.lcounters[i] = counter_u64_alloc(M_WAITOK);
419 	for (int i = 0; i < FCNT_MAX; i++)
420 		pf_counter_u64_init(&V_pf_status.fcounters[i], M_WAITOK);
421 	for (int i = 0; i < SCNT_MAX; i++)
422 		V_pf_status.scounters[i] = counter_u64_alloc(M_WAITOK);
423 
424 	if (swi_add(&V_pf_swi_ie, "pf send", pf_intr, curvnet, SWI_NET,
425 	    INTR_MPSAFE, &V_pf_swi_cookie) != 0)
426 		/* XXXGL: leaked all above. */
427 		return;
428 }
429 
430 static struct pf_kpool *
pf_get_kpool(const char * anchor,u_int32_t ticket,u_int8_t rule_action,u_int32_t rule_number,u_int8_t r_last,u_int8_t active,u_int8_t check_ticket)431 pf_get_kpool(const char *anchor, u_int32_t ticket, u_int8_t rule_action,
432     u_int32_t rule_number, u_int8_t r_last, u_int8_t active,
433     u_int8_t check_ticket)
434 {
435 	struct pf_kruleset	*ruleset;
436 	struct pf_krule		*rule;
437 	int			 rs_num;
438 
439 	ruleset = pf_find_kruleset(anchor);
440 	if (ruleset == NULL)
441 		return (NULL);
442 	rs_num = pf_get_ruleset_number(rule_action);
443 	if (rs_num >= PF_RULESET_MAX)
444 		return (NULL);
445 	if (active) {
446 		if (check_ticket && ticket !=
447 		    ruleset->rules[rs_num].active.ticket)
448 			return (NULL);
449 		if (r_last)
450 			rule = TAILQ_LAST(ruleset->rules[rs_num].active.ptr,
451 			    pf_krulequeue);
452 		else
453 			rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
454 	} else {
455 		if (check_ticket && ticket !=
456 		    ruleset->rules[rs_num].inactive.ticket)
457 			return (NULL);
458 		if (r_last)
459 			rule = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr,
460 			    pf_krulequeue);
461 		else
462 			rule = TAILQ_FIRST(ruleset->rules[rs_num].inactive.ptr);
463 	}
464 	if (!r_last) {
465 		while ((rule != NULL) && (rule->nr != rule_number))
466 			rule = TAILQ_NEXT(rule, entries);
467 	}
468 	if (rule == NULL)
469 		return (NULL);
470 
471 	return (&rule->rpool);
472 }
473 
474 static void
pf_mv_kpool(struct pf_kpalist * poola,struct pf_kpalist * poolb)475 pf_mv_kpool(struct pf_kpalist *poola, struct pf_kpalist *poolb)
476 {
477 	struct pf_kpooladdr	*mv_pool_pa;
478 
479 	while ((mv_pool_pa = TAILQ_FIRST(poola)) != NULL) {
480 		TAILQ_REMOVE(poola, mv_pool_pa, entries);
481 		TAILQ_INSERT_TAIL(poolb, mv_pool_pa, entries);
482 	}
483 }
484 
485 static void
pf_empty_kpool(struct pf_kpalist * poola)486 pf_empty_kpool(struct pf_kpalist *poola)
487 {
488 	struct pf_kpooladdr *pa;
489 
490 	while ((pa = TAILQ_FIRST(poola)) != NULL) {
491 		switch (pa->addr.type) {
492 		case PF_ADDR_DYNIFTL:
493 			pfi_dynaddr_remove(pa->addr.p.dyn);
494 			break;
495 		case PF_ADDR_TABLE:
496 			/* XXX: this could be unfinished pooladdr on pabuf */
497 			if (pa->addr.p.tbl != NULL)
498 				pfr_detach_table(pa->addr.p.tbl);
499 			break;
500 		}
501 		if (pa->kif)
502 			pfi_kkif_unref(pa->kif);
503 		TAILQ_REMOVE(poola, pa, entries);
504 		free(pa, M_PFRULE);
505 	}
506 }
507 
508 static void
pf_unlink_rule_locked(struct pf_krulequeue * rulequeue,struct pf_krule * rule)509 pf_unlink_rule_locked(struct pf_krulequeue *rulequeue, struct pf_krule *rule)
510 {
511 
512 	PF_RULES_WASSERT();
513 	PF_UNLNKDRULES_ASSERT();
514 
515 	TAILQ_REMOVE(rulequeue, rule, entries);
516 
517 	rule->rule_ref |= PFRULE_REFS;
518 	TAILQ_INSERT_TAIL(&V_pf_unlinked_rules, rule, entries);
519 }
520 
521 static void
pf_unlink_rule(struct pf_krulequeue * rulequeue,struct pf_krule * rule)522 pf_unlink_rule(struct pf_krulequeue *rulequeue, struct pf_krule *rule)
523 {
524 
525 	PF_RULES_WASSERT();
526 
527 	PF_UNLNKDRULES_LOCK();
528 	pf_unlink_rule_locked(rulequeue, rule);
529 	PF_UNLNKDRULES_UNLOCK();
530 }
531 
532 static void
pf_free_eth_rule(struct pf_keth_rule * rule)533 pf_free_eth_rule(struct pf_keth_rule *rule)
534 {
535 	PF_RULES_WASSERT();
536 
537 	if (rule == NULL)
538 		return;
539 
540 	if (rule->tag)
541 		tag_unref(&V_pf_tags, rule->tag);
542 	if (rule->match_tag)
543 		tag_unref(&V_pf_tags, rule->match_tag);
544 #ifdef ALTQ
545 	pf_qid_unref(rule->qid);
546 #endif
547 
548 	if (rule->bridge_to)
549 		pfi_kkif_unref(rule->bridge_to);
550 	if (rule->kif)
551 		pfi_kkif_unref(rule->kif);
552 
553 	if (rule->ipsrc.addr.type == PF_ADDR_TABLE)
554 		pfr_detach_table(rule->ipsrc.addr.p.tbl);
555 	if (rule->ipdst.addr.type == PF_ADDR_TABLE)
556 		pfr_detach_table(rule->ipdst.addr.p.tbl);
557 
558 	counter_u64_free(rule->evaluations);
559 	for (int i = 0; i < 2; i++) {
560 		counter_u64_free(rule->packets[i]);
561 		counter_u64_free(rule->bytes[i]);
562 	}
563 	uma_zfree_pcpu(pf_timestamp_pcpu_zone, rule->timestamp);
564 	pf_keth_anchor_remove(rule);
565 
566 	free(rule, M_PFRULE);
567 }
568 
569 void
pf_free_rule(struct pf_krule * rule)570 pf_free_rule(struct pf_krule *rule)
571 {
572 
573 	PF_RULES_WASSERT();
574 	PF_CONFIG_ASSERT();
575 
576 	if (rule->tag)
577 		tag_unref(&V_pf_tags, rule->tag);
578 	if (rule->match_tag)
579 		tag_unref(&V_pf_tags, rule->match_tag);
580 #ifdef ALTQ
581 	if (rule->pqid != rule->qid)
582 		pf_qid_unref(rule->pqid);
583 	pf_qid_unref(rule->qid);
584 #endif
585 	switch (rule->src.addr.type) {
586 	case PF_ADDR_DYNIFTL:
587 		pfi_dynaddr_remove(rule->src.addr.p.dyn);
588 		break;
589 	case PF_ADDR_TABLE:
590 		pfr_detach_table(rule->src.addr.p.tbl);
591 		break;
592 	}
593 	switch (rule->dst.addr.type) {
594 	case PF_ADDR_DYNIFTL:
595 		pfi_dynaddr_remove(rule->dst.addr.p.dyn);
596 		break;
597 	case PF_ADDR_TABLE:
598 		pfr_detach_table(rule->dst.addr.p.tbl);
599 		break;
600 	}
601 	if (rule->overload_tbl)
602 		pfr_detach_table(rule->overload_tbl);
603 	if (rule->kif)
604 		pfi_kkif_unref(rule->kif);
605 	if (rule->rcv_kif)
606 		pfi_kkif_unref(rule->rcv_kif);
607 	pf_kanchor_remove(rule);
608 	pf_empty_kpool(&rule->rpool.list);
609 
610 	pf_krule_free(rule);
611 }
612 
613 static void
pf_init_tagset(struct pf_tagset * ts,unsigned int * tunable_size,unsigned int default_size)614 pf_init_tagset(struct pf_tagset *ts, unsigned int *tunable_size,
615     unsigned int default_size)
616 {
617 	unsigned int i;
618 	unsigned int hashsize;
619 
620 	if (*tunable_size == 0 || !powerof2(*tunable_size))
621 		*tunable_size = default_size;
622 
623 	hashsize = *tunable_size;
624 	ts->namehash = mallocarray(hashsize, sizeof(*ts->namehash), M_PFHASH,
625 	    M_WAITOK);
626 	ts->taghash = mallocarray(hashsize, sizeof(*ts->taghash), M_PFHASH,
627 	    M_WAITOK);
628 	ts->mask = hashsize - 1;
629 	ts->seed = arc4random();
630 	for (i = 0; i < hashsize; i++) {
631 		TAILQ_INIT(&ts->namehash[i]);
632 		TAILQ_INIT(&ts->taghash[i]);
633 	}
634 	BIT_FILL(TAGID_MAX, &ts->avail);
635 }
636 
637 static void
pf_cleanup_tagset(struct pf_tagset * ts)638 pf_cleanup_tagset(struct pf_tagset *ts)
639 {
640 	unsigned int i;
641 	unsigned int hashsize;
642 	struct pf_tagname *t, *tmp;
643 
644 	/*
645 	 * Only need to clean up one of the hashes as each tag is hashed
646 	 * into each table.
647 	 */
648 	hashsize = ts->mask + 1;
649 	for (i = 0; i < hashsize; i++)
650 		TAILQ_FOREACH_SAFE(t, &ts->namehash[i], namehash_entries, tmp)
651 			uma_zfree(V_pf_tag_z, t);
652 
653 	free(ts->namehash, M_PFHASH);
654 	free(ts->taghash, M_PFHASH);
655 }
656 
657 static uint16_t
tagname2hashindex(const struct pf_tagset * ts,const char * tagname)658 tagname2hashindex(const struct pf_tagset *ts, const char *tagname)
659 {
660 	size_t len;
661 
662 	len = strnlen(tagname, PF_TAG_NAME_SIZE - 1);
663 	return (murmur3_32_hash(tagname, len, ts->seed) & ts->mask);
664 }
665 
666 static uint16_t
tag2hashindex(const struct pf_tagset * ts,uint16_t tag)667 tag2hashindex(const struct pf_tagset *ts, uint16_t tag)
668 {
669 
670 	return (tag & ts->mask);
671 }
672 
673 static u_int16_t
tagname2tag(struct pf_tagset * ts,const char * tagname)674 tagname2tag(struct pf_tagset *ts, const char *tagname)
675 {
676 	struct pf_tagname	*tag;
677 	u_int32_t		 index;
678 	u_int16_t		 new_tagid;
679 
680 	PF_RULES_WASSERT();
681 
682 	index = tagname2hashindex(ts, tagname);
683 	TAILQ_FOREACH(tag, &ts->namehash[index], namehash_entries)
684 		if (strcmp(tagname, tag->name) == 0) {
685 			tag->ref++;
686 			return (tag->tag);
687 		}
688 
689 	/*
690 	 * new entry
691 	 *
692 	 * to avoid fragmentation, we do a linear search from the beginning
693 	 * and take the first free slot we find.
694 	 */
695 	new_tagid = BIT_FFS(TAGID_MAX, &ts->avail);
696 	/*
697 	 * Tags are 1-based, with valid tags in the range [1..TAGID_MAX].
698 	 * BIT_FFS() returns a 1-based bit number, with 0 indicating no bits
699 	 * set.  It may also return a bit number greater than TAGID_MAX due
700 	 * to rounding of the number of bits in the vector up to a multiple
701 	 * of the vector word size at declaration/allocation time.
702 	 */
703 	if ((new_tagid == 0) || (new_tagid > TAGID_MAX))
704 		return (0);
705 
706 	/* Mark the tag as in use.  Bits are 0-based for BIT_CLR() */
707 	BIT_CLR(TAGID_MAX, new_tagid - 1, &ts->avail);
708 
709 	/* allocate and fill new struct pf_tagname */
710 	tag = uma_zalloc(V_pf_tag_z, M_NOWAIT);
711 	if (tag == NULL)
712 		return (0);
713 	strlcpy(tag->name, tagname, sizeof(tag->name));
714 	tag->tag = new_tagid;
715 	tag->ref = 1;
716 
717 	/* Insert into namehash */
718 	TAILQ_INSERT_TAIL(&ts->namehash[index], tag, namehash_entries);
719 
720 	/* Insert into taghash */
721 	index = tag2hashindex(ts, new_tagid);
722 	TAILQ_INSERT_TAIL(&ts->taghash[index], tag, taghash_entries);
723 
724 	return (tag->tag);
725 }
726 
727 static void
tag_unref(struct pf_tagset * ts,u_int16_t tag)728 tag_unref(struct pf_tagset *ts, u_int16_t tag)
729 {
730 	struct pf_tagname	*t;
731 	uint16_t		 index;
732 
733 	PF_RULES_WASSERT();
734 
735 	index = tag2hashindex(ts, tag);
736 	TAILQ_FOREACH(t, &ts->taghash[index], taghash_entries)
737 		if (tag == t->tag) {
738 			if (--t->ref == 0) {
739 				TAILQ_REMOVE(&ts->taghash[index], t,
740 				    taghash_entries);
741 				index = tagname2hashindex(ts, t->name);
742 				TAILQ_REMOVE(&ts->namehash[index], t,
743 				    namehash_entries);
744 				/* Bits are 0-based for BIT_SET() */
745 				BIT_SET(TAGID_MAX, tag - 1, &ts->avail);
746 				uma_zfree(V_pf_tag_z, t);
747 			}
748 			break;
749 		}
750 }
751 
752 static uint16_t
pf_tagname2tag(const char * tagname)753 pf_tagname2tag(const char *tagname)
754 {
755 	return (tagname2tag(&V_pf_tags, tagname));
756 }
757 
758 static int
pf_begin_eth(uint32_t * ticket,const char * anchor)759 pf_begin_eth(uint32_t *ticket, const char *anchor)
760 {
761 	struct pf_keth_rule *rule, *tmp;
762 	struct pf_keth_ruleset *rs;
763 
764 	PF_RULES_WASSERT();
765 
766 	rs = pf_find_or_create_keth_ruleset(anchor);
767 	if (rs == NULL)
768 		return (EINVAL);
769 
770 	/* Purge old inactive rules. */
771 	TAILQ_FOREACH_SAFE(rule, rs->inactive.rules, entries,
772 	    tmp) {
773 		TAILQ_REMOVE(rs->inactive.rules, rule,
774 		    entries);
775 		pf_free_eth_rule(rule);
776 	}
777 
778 	*ticket = ++rs->inactive.ticket;
779 	rs->inactive.open = 1;
780 
781 	return (0);
782 }
783 
784 static void
pf_rollback_eth_cb(struct epoch_context * ctx)785 pf_rollback_eth_cb(struct epoch_context *ctx)
786 {
787 	struct pf_keth_ruleset *rs;
788 
789 	rs = __containerof(ctx, struct pf_keth_ruleset, epoch_ctx);
790 
791 	CURVNET_SET(rs->vnet);
792 
793 	PF_RULES_WLOCK();
794 	pf_rollback_eth(rs->inactive.ticket,
795 	    rs->anchor ? rs->anchor->path : "");
796 	PF_RULES_WUNLOCK();
797 
798 	CURVNET_RESTORE();
799 }
800 
801 static int
pf_rollback_eth(uint32_t ticket,const char * anchor)802 pf_rollback_eth(uint32_t ticket, const char *anchor)
803 {
804 	struct pf_keth_rule *rule, *tmp;
805 	struct pf_keth_ruleset *rs;
806 
807 	PF_RULES_WASSERT();
808 
809 	rs = pf_find_keth_ruleset(anchor);
810 	if (rs == NULL)
811 		return (EINVAL);
812 
813 	if (!rs->inactive.open ||
814 	    ticket != rs->inactive.ticket)
815 		return (0);
816 
817 	/* Purge old inactive rules. */
818 	TAILQ_FOREACH_SAFE(rule, rs->inactive.rules, entries,
819 	    tmp) {
820 		TAILQ_REMOVE(rs->inactive.rules, rule, entries);
821 		pf_free_eth_rule(rule);
822 	}
823 
824 	rs->inactive.open = 0;
825 
826 	pf_remove_if_empty_keth_ruleset(rs);
827 
828 	return (0);
829 }
830 
831 #define	PF_SET_SKIP_STEPS(i)					\
832 	do {							\
833 		while (head[i] != cur) {			\
834 			head[i]->skip[i].ptr = cur;		\
835 			head[i] = TAILQ_NEXT(head[i], entries);	\
836 		}						\
837 	} while (0)
838 
839 static void
pf_eth_calc_skip_steps(struct pf_keth_ruleq * rules)840 pf_eth_calc_skip_steps(struct pf_keth_ruleq *rules)
841 {
842 	struct pf_keth_rule *cur, *prev, *head[PFE_SKIP_COUNT];
843 	int i;
844 
845 	cur = TAILQ_FIRST(rules);
846 	prev = cur;
847 	for (i = 0; i < PFE_SKIP_COUNT; ++i)
848 		head[i] = cur;
849 	while (cur != NULL) {
850 		if (cur->kif != prev->kif || cur->ifnot != prev->ifnot)
851 			PF_SET_SKIP_STEPS(PFE_SKIP_IFP);
852 		if (cur->direction != prev->direction)
853 			PF_SET_SKIP_STEPS(PFE_SKIP_DIR);
854 		if (cur->proto != prev->proto)
855 			PF_SET_SKIP_STEPS(PFE_SKIP_PROTO);
856 		if (memcmp(&cur->src, &prev->src, sizeof(cur->src)) != 0)
857 			PF_SET_SKIP_STEPS(PFE_SKIP_SRC_ADDR);
858 		if (memcmp(&cur->dst, &prev->dst, sizeof(cur->dst)) != 0)
859 			PF_SET_SKIP_STEPS(PFE_SKIP_DST_ADDR);
860 		if (cur->ipsrc.neg != prev->ipsrc.neg ||
861 		    pf_addr_wrap_neq(&cur->ipsrc.addr, &prev->ipsrc.addr))
862 			PF_SET_SKIP_STEPS(PFE_SKIP_SRC_IP_ADDR);
863 		if (cur->ipdst.neg != prev->ipdst.neg ||
864 		    pf_addr_wrap_neq(&cur->ipdst.addr, &prev->ipdst.addr))
865 			PF_SET_SKIP_STEPS(PFE_SKIP_DST_IP_ADDR);
866 
867 		prev = cur;
868 		cur = TAILQ_NEXT(cur, entries);
869 	}
870 	for (i = 0; i < PFE_SKIP_COUNT; ++i)
871 		PF_SET_SKIP_STEPS(i);
872 }
873 
874 static int
pf_commit_eth(uint32_t ticket,const char * anchor)875 pf_commit_eth(uint32_t ticket, const char *anchor)
876 {
877 	struct pf_keth_ruleq *rules;
878 	struct pf_keth_ruleset *rs;
879 
880 	rs = pf_find_keth_ruleset(anchor);
881 	if (rs == NULL) {
882 		return (EINVAL);
883 	}
884 
885 	if (!rs->inactive.open ||
886 	    ticket != rs->inactive.ticket)
887 		return (EBUSY);
888 
889 	PF_RULES_WASSERT();
890 
891 	pf_eth_calc_skip_steps(rs->inactive.rules);
892 
893 	rules = rs->active.rules;
894 	ck_pr_store_ptr(&rs->active.rules, rs->inactive.rules);
895 	rs->inactive.rules = rules;
896 	rs->inactive.ticket = rs->active.ticket;
897 
898 	/* Clean up inactive rules (i.e. previously active rules), only when
899 	 * we're sure they're no longer used. */
900 	NET_EPOCH_CALL(pf_rollback_eth_cb, &rs->epoch_ctx);
901 
902 	return (0);
903 }
904 
905 #ifdef ALTQ
906 static uint16_t
pf_qname2qid(const char * qname)907 pf_qname2qid(const char *qname)
908 {
909 	return (tagname2tag(&V_pf_qids, qname));
910 }
911 
912 static void
pf_qid_unref(uint16_t qid)913 pf_qid_unref(uint16_t qid)
914 {
915 	tag_unref(&V_pf_qids, qid);
916 }
917 
918 static int
pf_begin_altq(u_int32_t * ticket)919 pf_begin_altq(u_int32_t *ticket)
920 {
921 	struct pf_altq	*altq, *tmp;
922 	int		 error = 0;
923 
924 	PF_RULES_WASSERT();
925 
926 	/* Purge the old altq lists */
927 	TAILQ_FOREACH_SAFE(altq, V_pf_altq_ifs_inactive, entries, tmp) {
928 		if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
929 			/* detach and destroy the discipline */
930 			error = altq_remove(altq);
931 		}
932 		free(altq, M_PFALTQ);
933 	}
934 	TAILQ_INIT(V_pf_altq_ifs_inactive);
935 	TAILQ_FOREACH_SAFE(altq, V_pf_altqs_inactive, entries, tmp) {
936 		pf_qid_unref(altq->qid);
937 		free(altq, M_PFALTQ);
938 	}
939 	TAILQ_INIT(V_pf_altqs_inactive);
940 	if (error)
941 		return (error);
942 	*ticket = ++V_ticket_altqs_inactive;
943 	V_altqs_inactive_open = 1;
944 	return (0);
945 }
946 
947 static int
pf_rollback_altq(u_int32_t ticket)948 pf_rollback_altq(u_int32_t ticket)
949 {
950 	struct pf_altq	*altq, *tmp;
951 	int		 error = 0;
952 
953 	PF_RULES_WASSERT();
954 
955 	if (!V_altqs_inactive_open || ticket != V_ticket_altqs_inactive)
956 		return (0);
957 	/* Purge the old altq lists */
958 	TAILQ_FOREACH_SAFE(altq, V_pf_altq_ifs_inactive, entries, tmp) {
959 		if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
960 			/* detach and destroy the discipline */
961 			error = altq_remove(altq);
962 		}
963 		free(altq, M_PFALTQ);
964 	}
965 	TAILQ_INIT(V_pf_altq_ifs_inactive);
966 	TAILQ_FOREACH_SAFE(altq, V_pf_altqs_inactive, entries, tmp) {
967 		pf_qid_unref(altq->qid);
968 		free(altq, M_PFALTQ);
969 	}
970 	TAILQ_INIT(V_pf_altqs_inactive);
971 	V_altqs_inactive_open = 0;
972 	return (error);
973 }
974 
975 static int
pf_commit_altq(u_int32_t ticket)976 pf_commit_altq(u_int32_t ticket)
977 {
978 	struct pf_altqqueue	*old_altqs, *old_altq_ifs;
979 	struct pf_altq		*altq, *tmp;
980 	int			 err, error = 0;
981 
982 	PF_RULES_WASSERT();
983 
984 	if (!V_altqs_inactive_open || ticket != V_ticket_altqs_inactive)
985 		return (EBUSY);
986 
987 	/* swap altqs, keep the old. */
988 	old_altqs = V_pf_altqs_active;
989 	old_altq_ifs = V_pf_altq_ifs_active;
990 	V_pf_altqs_active = V_pf_altqs_inactive;
991 	V_pf_altq_ifs_active = V_pf_altq_ifs_inactive;
992 	V_pf_altqs_inactive = old_altqs;
993 	V_pf_altq_ifs_inactive = old_altq_ifs;
994 	V_ticket_altqs_active = V_ticket_altqs_inactive;
995 
996 	/* Attach new disciplines */
997 	TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) {
998 		if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
999 			/* attach the discipline */
1000 			error = altq_pfattach(altq);
1001 			if (error == 0 && V_pf_altq_running)
1002 				error = pf_enable_altq(altq);
1003 			if (error != 0)
1004 				return (error);
1005 		}
1006 	}
1007 
1008 	/* Purge the old altq lists */
1009 	TAILQ_FOREACH_SAFE(altq, V_pf_altq_ifs_inactive, entries, tmp) {
1010 		if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
1011 			/* detach and destroy the discipline */
1012 			if (V_pf_altq_running)
1013 				error = pf_disable_altq(altq);
1014 			err = altq_pfdetach(altq);
1015 			if (err != 0 && error == 0)
1016 				error = err;
1017 			err = altq_remove(altq);
1018 			if (err != 0 && error == 0)
1019 				error = err;
1020 		}
1021 		free(altq, M_PFALTQ);
1022 	}
1023 	TAILQ_INIT(V_pf_altq_ifs_inactive);
1024 	TAILQ_FOREACH_SAFE(altq, V_pf_altqs_inactive, entries, tmp) {
1025 		pf_qid_unref(altq->qid);
1026 		free(altq, M_PFALTQ);
1027 	}
1028 	TAILQ_INIT(V_pf_altqs_inactive);
1029 
1030 	V_altqs_inactive_open = 0;
1031 	return (error);
1032 }
1033 
1034 static int
pf_enable_altq(struct pf_altq * altq)1035 pf_enable_altq(struct pf_altq *altq)
1036 {
1037 	struct ifnet		*ifp;
1038 	struct tb_profile	 tb;
1039 	int			 error = 0;
1040 
1041 	if ((ifp = ifunit(altq->ifname)) == NULL)
1042 		return (EINVAL);
1043 
1044 	if (ifp->if_snd.altq_type != ALTQT_NONE)
1045 		error = altq_enable(&ifp->if_snd);
1046 
1047 	/* set tokenbucket regulator */
1048 	if (error == 0 && ifp != NULL && ALTQ_IS_ENABLED(&ifp->if_snd)) {
1049 		tb.rate = altq->ifbandwidth;
1050 		tb.depth = altq->tbrsize;
1051 		error = tbr_set(&ifp->if_snd, &tb);
1052 	}
1053 
1054 	return (error);
1055 }
1056 
1057 static int
pf_disable_altq(struct pf_altq * altq)1058 pf_disable_altq(struct pf_altq *altq)
1059 {
1060 	struct ifnet		*ifp;
1061 	struct tb_profile	 tb;
1062 	int			 error;
1063 
1064 	if ((ifp = ifunit(altq->ifname)) == NULL)
1065 		return (EINVAL);
1066 
1067 	/*
1068 	 * when the discipline is no longer referenced, it was overridden
1069 	 * by a new one.  if so, just return.
1070 	 */
1071 	if (altq->altq_disc != ifp->if_snd.altq_disc)
1072 		return (0);
1073 
1074 	error = altq_disable(&ifp->if_snd);
1075 
1076 	if (error == 0) {
1077 		/* clear tokenbucket regulator */
1078 		tb.rate = 0;
1079 		error = tbr_set(&ifp->if_snd, &tb);
1080 	}
1081 
1082 	return (error);
1083 }
1084 
1085 static int
pf_altq_ifnet_event_add(struct ifnet * ifp,int remove,u_int32_t ticket,struct pf_altq * altq)1086 pf_altq_ifnet_event_add(struct ifnet *ifp, int remove, u_int32_t ticket,
1087     struct pf_altq *altq)
1088 {
1089 	struct ifnet	*ifp1;
1090 	int		 error = 0;
1091 
1092 	/* Deactivate the interface in question */
1093 	altq->local_flags &= ~PFALTQ_FLAG_IF_REMOVED;
1094 	if ((ifp1 = ifunit(altq->ifname)) == NULL ||
1095 	    (remove && ifp1 == ifp)) {
1096 		altq->local_flags |= PFALTQ_FLAG_IF_REMOVED;
1097 	} else {
1098 		error = altq_add(ifp1, altq);
1099 
1100 		if (ticket != V_ticket_altqs_inactive)
1101 			error = EBUSY;
1102 
1103 		if (error)
1104 			free(altq, M_PFALTQ);
1105 	}
1106 
1107 	return (error);
1108 }
1109 
1110 void
pf_altq_ifnet_event(struct ifnet * ifp,int remove)1111 pf_altq_ifnet_event(struct ifnet *ifp, int remove)
1112 {
1113 	struct pf_altq	*a1, *a2, *a3;
1114 	u_int32_t	 ticket;
1115 	int		 error = 0;
1116 
1117 	/*
1118 	 * No need to re-evaluate the configuration for events on interfaces
1119 	 * that do not support ALTQ, as it's not possible for such
1120 	 * interfaces to be part of the configuration.
1121 	 */
1122 	if (!ALTQ_IS_READY(&ifp->if_snd))
1123 		return;
1124 
1125 	/* Interrupt userland queue modifications */
1126 	if (V_altqs_inactive_open)
1127 		pf_rollback_altq(V_ticket_altqs_inactive);
1128 
1129 	/* Start new altq ruleset */
1130 	if (pf_begin_altq(&ticket))
1131 		return;
1132 
1133 	/* Copy the current active set */
1134 	TAILQ_FOREACH(a1, V_pf_altq_ifs_active, entries) {
1135 		a2 = malloc(sizeof(*a2), M_PFALTQ, M_NOWAIT);
1136 		if (a2 == NULL) {
1137 			error = ENOMEM;
1138 			break;
1139 		}
1140 		bcopy(a1, a2, sizeof(struct pf_altq));
1141 
1142 		error = pf_altq_ifnet_event_add(ifp, remove, ticket, a2);
1143 		if (error)
1144 			break;
1145 
1146 		TAILQ_INSERT_TAIL(V_pf_altq_ifs_inactive, a2, entries);
1147 	}
1148 	if (error)
1149 		goto out;
1150 	TAILQ_FOREACH(a1, V_pf_altqs_active, entries) {
1151 		a2 = malloc(sizeof(*a2), M_PFALTQ, M_NOWAIT);
1152 		if (a2 == NULL) {
1153 			error = ENOMEM;
1154 			break;
1155 		}
1156 		bcopy(a1, a2, sizeof(struct pf_altq));
1157 
1158 		if ((a2->qid = pf_qname2qid(a2->qname)) == 0) {
1159 			error = EBUSY;
1160 			free(a2, M_PFALTQ);
1161 			break;
1162 		}
1163 		a2->altq_disc = NULL;
1164 		TAILQ_FOREACH(a3, V_pf_altq_ifs_inactive, entries) {
1165 			if (strncmp(a3->ifname, a2->ifname,
1166 				IFNAMSIZ) == 0) {
1167 				a2->altq_disc = a3->altq_disc;
1168 				break;
1169 			}
1170 		}
1171 		error = pf_altq_ifnet_event_add(ifp, remove, ticket, a2);
1172 		if (error)
1173 			break;
1174 
1175 		TAILQ_INSERT_TAIL(V_pf_altqs_inactive, a2, entries);
1176 	}
1177 
1178 out:
1179 	if (error != 0)
1180 		pf_rollback_altq(ticket);
1181 	else
1182 		pf_commit_altq(ticket);
1183 }
1184 #endif /* ALTQ */
1185 
1186 static struct pf_krule_global *
pf_rule_tree_alloc(int flags)1187 pf_rule_tree_alloc(int flags)
1188 {
1189 	struct pf_krule_global *tree;
1190 
1191 	tree = malloc(sizeof(struct pf_krule_global), M_TEMP, flags);
1192 	if (tree == NULL)
1193 		return (NULL);
1194 	RB_INIT(tree);
1195 	return (tree);
1196 }
1197 
1198 static void
pf_rule_tree_free(struct pf_krule_global * tree)1199 pf_rule_tree_free(struct pf_krule_global *tree)
1200 {
1201 
1202 	free(tree, M_TEMP);
1203 }
1204 
1205 static int
pf_begin_rules(u_int32_t * ticket,int rs_num,const char * anchor)1206 pf_begin_rules(u_int32_t *ticket, int rs_num, const char *anchor)
1207 {
1208 	struct pf_krule_global *tree;
1209 	struct pf_kruleset	*rs;
1210 	struct pf_krule		*rule;
1211 
1212 	PF_RULES_WASSERT();
1213 
1214 	if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
1215 		return (EINVAL);
1216 	tree = pf_rule_tree_alloc(M_NOWAIT);
1217 	if (tree == NULL)
1218 		return (ENOMEM);
1219 	rs = pf_find_or_create_kruleset(anchor);
1220 	if (rs == NULL) {
1221 		free(tree, M_TEMP);
1222 		return (EINVAL);
1223 	}
1224 	pf_rule_tree_free(rs->rules[rs_num].inactive.tree);
1225 	rs->rules[rs_num].inactive.tree = tree;
1226 
1227 	while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) {
1228 		pf_unlink_rule(rs->rules[rs_num].inactive.ptr, rule);
1229 		rs->rules[rs_num].inactive.rcount--;
1230 	}
1231 	*ticket = ++rs->rules[rs_num].inactive.ticket;
1232 	rs->rules[rs_num].inactive.open = 1;
1233 	return (0);
1234 }
1235 
1236 static int
pf_rollback_rules(u_int32_t ticket,int rs_num,char * anchor)1237 pf_rollback_rules(u_int32_t ticket, int rs_num, char *anchor)
1238 {
1239 	struct pf_kruleset	*rs;
1240 	struct pf_krule		*rule;
1241 
1242 	PF_RULES_WASSERT();
1243 
1244 	if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
1245 		return (EINVAL);
1246 	rs = pf_find_kruleset(anchor);
1247 	if (rs == NULL || !rs->rules[rs_num].inactive.open ||
1248 	    rs->rules[rs_num].inactive.ticket != ticket)
1249 		return (0);
1250 	while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) {
1251 		pf_unlink_rule(rs->rules[rs_num].inactive.ptr, rule);
1252 		rs->rules[rs_num].inactive.rcount--;
1253 	}
1254 	rs->rules[rs_num].inactive.open = 0;
1255 	return (0);
1256 }
1257 
1258 #define PF_MD5_UPD(st, elm)						\
1259 		MD5Update(ctx, (u_int8_t *) &(st)->elm, sizeof((st)->elm))
1260 
1261 #define PF_MD5_UPD_STR(st, elm)						\
1262 		MD5Update(ctx, (u_int8_t *) (st)->elm, strlen((st)->elm))
1263 
1264 #define PF_MD5_UPD_HTONL(st, elm, stor) do {				\
1265 		(stor) = htonl((st)->elm);				\
1266 		MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int32_t));\
1267 } while (0)
1268 
1269 #define PF_MD5_UPD_HTONS(st, elm, stor) do {				\
1270 		(stor) = htons((st)->elm);				\
1271 		MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int16_t));\
1272 } while (0)
1273 
1274 static void
pf_hash_rule_addr(MD5_CTX * ctx,struct pf_rule_addr * pfr)1275 pf_hash_rule_addr(MD5_CTX *ctx, struct pf_rule_addr *pfr)
1276 {
1277 	PF_MD5_UPD(pfr, addr.type);
1278 	switch (pfr->addr.type) {
1279 		case PF_ADDR_DYNIFTL:
1280 			PF_MD5_UPD(pfr, addr.v.ifname);
1281 			PF_MD5_UPD(pfr, addr.iflags);
1282 			break;
1283 		case PF_ADDR_TABLE:
1284 			PF_MD5_UPD(pfr, addr.v.tblname);
1285 			break;
1286 		case PF_ADDR_ADDRMASK:
1287 			/* XXX ignore af? */
1288 			PF_MD5_UPD(pfr, addr.v.a.addr.addr32);
1289 			PF_MD5_UPD(pfr, addr.v.a.mask.addr32);
1290 			break;
1291 	}
1292 
1293 	PF_MD5_UPD(pfr, port[0]);
1294 	PF_MD5_UPD(pfr, port[1]);
1295 	PF_MD5_UPD(pfr, neg);
1296 	PF_MD5_UPD(pfr, port_op);
1297 }
1298 
1299 static void
pf_hash_rule_rolling(MD5_CTX * ctx,struct pf_krule * rule)1300 pf_hash_rule_rolling(MD5_CTX *ctx, struct pf_krule *rule)
1301 {
1302 	u_int16_t x;
1303 	u_int32_t y;
1304 
1305 	pf_hash_rule_addr(ctx, &rule->src);
1306 	pf_hash_rule_addr(ctx, &rule->dst);
1307 	for (int i = 0; i < PF_RULE_MAX_LABEL_COUNT; i++)
1308 		PF_MD5_UPD_STR(rule, label[i]);
1309 	PF_MD5_UPD_STR(rule, ifname);
1310 	PF_MD5_UPD_STR(rule, rcv_ifname);
1311 	PF_MD5_UPD_STR(rule, match_tagname);
1312 	PF_MD5_UPD_HTONS(rule, match_tag, x); /* dup? */
1313 	PF_MD5_UPD_HTONL(rule, os_fingerprint, y);
1314 	PF_MD5_UPD_HTONL(rule, prob, y);
1315 	PF_MD5_UPD_HTONL(rule, uid.uid[0], y);
1316 	PF_MD5_UPD_HTONL(rule, uid.uid[1], y);
1317 	PF_MD5_UPD(rule, uid.op);
1318 	PF_MD5_UPD_HTONL(rule, gid.gid[0], y);
1319 	PF_MD5_UPD_HTONL(rule, gid.gid[1], y);
1320 	PF_MD5_UPD(rule, gid.op);
1321 	PF_MD5_UPD_HTONL(rule, rule_flag, y);
1322 	PF_MD5_UPD(rule, action);
1323 	PF_MD5_UPD(rule, direction);
1324 	PF_MD5_UPD(rule, af);
1325 	PF_MD5_UPD(rule, quick);
1326 	PF_MD5_UPD(rule, ifnot);
1327 	PF_MD5_UPD(rule, match_tag_not);
1328 	PF_MD5_UPD(rule, natpass);
1329 	PF_MD5_UPD(rule, keep_state);
1330 	PF_MD5_UPD(rule, proto);
1331 	PF_MD5_UPD(rule, type);
1332 	PF_MD5_UPD(rule, code);
1333 	PF_MD5_UPD(rule, flags);
1334 	PF_MD5_UPD(rule, flagset);
1335 	PF_MD5_UPD(rule, allow_opts);
1336 	PF_MD5_UPD(rule, rt);
1337 	PF_MD5_UPD(rule, tos);
1338 	PF_MD5_UPD(rule, scrub_flags);
1339 	PF_MD5_UPD(rule, min_ttl);
1340 	PF_MD5_UPD(rule, set_tos);
1341 	if (rule->anchor != NULL)
1342 		PF_MD5_UPD_STR(rule, anchor->path);
1343 }
1344 
1345 static void
pf_hash_rule(struct pf_krule * rule)1346 pf_hash_rule(struct pf_krule *rule)
1347 {
1348 	MD5_CTX		ctx;
1349 
1350 	MD5Init(&ctx);
1351 	pf_hash_rule_rolling(&ctx, rule);
1352 	MD5Final(rule->md5sum, &ctx);
1353 }
1354 
1355 static int
pf_krule_compare(struct pf_krule * a,struct pf_krule * b)1356 pf_krule_compare(struct pf_krule *a, struct pf_krule *b)
1357 {
1358 
1359 	return (memcmp(a->md5sum, b->md5sum, PF_MD5_DIGEST_LENGTH));
1360 }
1361 
1362 static int
pf_commit_rules(u_int32_t ticket,int rs_num,char * anchor)1363 pf_commit_rules(u_int32_t ticket, int rs_num, char *anchor)
1364 {
1365 	struct pf_kruleset	*rs;
1366 	struct pf_krule		*rule, **old_array, *old_rule;
1367 	struct pf_krulequeue	*old_rules;
1368 	struct pf_krule_global  *old_tree;
1369 	int			 error;
1370 	u_int32_t		 old_rcount;
1371 
1372 	PF_RULES_WASSERT();
1373 
1374 	if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
1375 		return (EINVAL);
1376 	rs = pf_find_kruleset(anchor);
1377 	if (rs == NULL || !rs->rules[rs_num].inactive.open ||
1378 	    ticket != rs->rules[rs_num].inactive.ticket)
1379 		return (EBUSY);
1380 
1381 	/* Calculate checksum for the main ruleset */
1382 	if (rs == &pf_main_ruleset) {
1383 		error = pf_setup_pfsync_matching(rs);
1384 		if (error != 0)
1385 			return (error);
1386 	}
1387 
1388 	/* Swap rules, keep the old. */
1389 	old_rules = rs->rules[rs_num].active.ptr;
1390 	old_rcount = rs->rules[rs_num].active.rcount;
1391 	old_array = rs->rules[rs_num].active.ptr_array;
1392 	old_tree = rs->rules[rs_num].active.tree;
1393 
1394 	rs->rules[rs_num].active.ptr =
1395 	    rs->rules[rs_num].inactive.ptr;
1396 	rs->rules[rs_num].active.ptr_array =
1397 	    rs->rules[rs_num].inactive.ptr_array;
1398 	rs->rules[rs_num].active.tree =
1399 	    rs->rules[rs_num].inactive.tree;
1400 	rs->rules[rs_num].active.rcount =
1401 	    rs->rules[rs_num].inactive.rcount;
1402 
1403 	/* Attempt to preserve counter information. */
1404 	if (V_pf_status.keep_counters && old_tree != NULL) {
1405 		TAILQ_FOREACH(rule, rs->rules[rs_num].active.ptr,
1406 		    entries) {
1407 			old_rule = RB_FIND(pf_krule_global, old_tree, rule);
1408 			if (old_rule == NULL) {
1409 				continue;
1410 			}
1411 			pf_counter_u64_critical_enter();
1412 			pf_counter_u64_rollup_protected(&rule->evaluations,
1413 			    pf_counter_u64_fetch(&old_rule->evaluations));
1414 			pf_counter_u64_rollup_protected(&rule->packets[0],
1415 			    pf_counter_u64_fetch(&old_rule->packets[0]));
1416 			pf_counter_u64_rollup_protected(&rule->packets[1],
1417 			    pf_counter_u64_fetch(&old_rule->packets[1]));
1418 			pf_counter_u64_rollup_protected(&rule->bytes[0],
1419 			    pf_counter_u64_fetch(&old_rule->bytes[0]));
1420 			pf_counter_u64_rollup_protected(&rule->bytes[1],
1421 			    pf_counter_u64_fetch(&old_rule->bytes[1]));
1422 			pf_counter_u64_critical_exit();
1423 		}
1424 	}
1425 
1426 	rs->rules[rs_num].inactive.ptr = old_rules;
1427 	rs->rules[rs_num].inactive.ptr_array = old_array;
1428 	rs->rules[rs_num].inactive.tree = NULL; /* important for pf_ioctl_addrule */
1429 	rs->rules[rs_num].inactive.rcount = old_rcount;
1430 
1431 	rs->rules[rs_num].active.ticket =
1432 	    rs->rules[rs_num].inactive.ticket;
1433 	pf_calc_skip_steps(rs->rules[rs_num].active.ptr);
1434 
1435 	/* Purge the old rule list. */
1436 	PF_UNLNKDRULES_LOCK();
1437 	while ((rule = TAILQ_FIRST(old_rules)) != NULL)
1438 		pf_unlink_rule_locked(old_rules, rule);
1439 	PF_UNLNKDRULES_UNLOCK();
1440 	if (rs->rules[rs_num].inactive.ptr_array)
1441 		free(rs->rules[rs_num].inactive.ptr_array, M_TEMP);
1442 	rs->rules[rs_num].inactive.ptr_array = NULL;
1443 	rs->rules[rs_num].inactive.rcount = 0;
1444 	rs->rules[rs_num].inactive.open = 0;
1445 	pf_remove_if_empty_kruleset(rs);
1446 	free(old_tree, M_TEMP);
1447 
1448 	return (0);
1449 }
1450 
1451 static int
pf_setup_pfsync_matching(struct pf_kruleset * rs)1452 pf_setup_pfsync_matching(struct pf_kruleset *rs)
1453 {
1454 	MD5_CTX			 ctx;
1455 	struct pf_krule		*rule;
1456 	int			 rs_cnt;
1457 	u_int8_t		 digest[PF_MD5_DIGEST_LENGTH];
1458 
1459 	MD5Init(&ctx);
1460 	for (rs_cnt = 0; rs_cnt < PF_RULESET_MAX; rs_cnt++) {
1461 		/* XXX PF_RULESET_SCRUB as well? */
1462 		if (rs_cnt == PF_RULESET_SCRUB)
1463 			continue;
1464 
1465 		if (rs->rules[rs_cnt].inactive.ptr_array)
1466 			free(rs->rules[rs_cnt].inactive.ptr_array, M_TEMP);
1467 		rs->rules[rs_cnt].inactive.ptr_array = NULL;
1468 
1469 		if (rs->rules[rs_cnt].inactive.rcount) {
1470 			rs->rules[rs_cnt].inactive.ptr_array =
1471 			    mallocarray(rs->rules[rs_cnt].inactive.rcount,
1472 			    sizeof(struct pf_rule **),
1473 			    M_TEMP, M_NOWAIT);
1474 
1475 			if (!rs->rules[rs_cnt].inactive.ptr_array)
1476 				return (ENOMEM);
1477 		}
1478 
1479 		TAILQ_FOREACH(rule, rs->rules[rs_cnt].inactive.ptr,
1480 		    entries) {
1481 			pf_hash_rule_rolling(&ctx, rule);
1482 			(rs->rules[rs_cnt].inactive.ptr_array)[rule->nr] = rule;
1483 		}
1484 	}
1485 
1486 	MD5Final(digest, &ctx);
1487 	memcpy(V_pf_status.pf_chksum, digest, sizeof(V_pf_status.pf_chksum));
1488 	return (0);
1489 }
1490 
1491 static int
pf_eth_addr_setup(struct pf_keth_ruleset * ruleset,struct pf_addr_wrap * addr)1492 pf_eth_addr_setup(struct pf_keth_ruleset *ruleset, struct pf_addr_wrap *addr)
1493 {
1494 	int error = 0;
1495 
1496 	switch (addr->type) {
1497 	case PF_ADDR_TABLE:
1498 		addr->p.tbl = pfr_eth_attach_table(ruleset, addr->v.tblname);
1499 		if (addr->p.tbl == NULL)
1500 			error = ENOMEM;
1501 		break;
1502 	default:
1503 		error = EINVAL;
1504 	}
1505 
1506 	return (error);
1507 }
1508 
1509 static int
pf_addr_setup(struct pf_kruleset * ruleset,struct pf_addr_wrap * addr,sa_family_t af)1510 pf_addr_setup(struct pf_kruleset *ruleset, struct pf_addr_wrap *addr,
1511     sa_family_t af)
1512 {
1513 	int error = 0;
1514 
1515 	switch (addr->type) {
1516 	case PF_ADDR_TABLE:
1517 		addr->p.tbl = pfr_attach_table(ruleset, addr->v.tblname);
1518 		if (addr->p.tbl == NULL)
1519 			error = ENOMEM;
1520 		break;
1521 	case PF_ADDR_DYNIFTL:
1522 		error = pfi_dynaddr_setup(addr, af);
1523 		break;
1524 	}
1525 
1526 	return (error);
1527 }
1528 
1529 void
pf_addr_copyout(struct pf_addr_wrap * addr)1530 pf_addr_copyout(struct pf_addr_wrap *addr)
1531 {
1532 
1533 	switch (addr->type) {
1534 	case PF_ADDR_DYNIFTL:
1535 		pfi_dynaddr_copyout(addr);
1536 		break;
1537 	case PF_ADDR_TABLE:
1538 		pf_tbladdr_copyout(addr);
1539 		break;
1540 	}
1541 }
1542 
1543 static void
pf_src_node_copy(const struct pf_ksrc_node * in,struct pf_src_node * out)1544 pf_src_node_copy(const struct pf_ksrc_node *in, struct pf_src_node *out)
1545 {
1546 	int	secs = time_uptime, diff;
1547 
1548 	bzero(out, sizeof(struct pf_src_node));
1549 
1550 	bcopy(&in->addr, &out->addr, sizeof(struct pf_addr));
1551 	bcopy(&in->raddr, &out->raddr, sizeof(struct pf_addr));
1552 
1553 	if (in->rule != NULL)
1554 		out->rule.nr = in->rule->nr;
1555 
1556 	for (int i = 0; i < 2; i++) {
1557 		out->bytes[i] = counter_u64_fetch(in->bytes[i]);
1558 		out->packets[i] = counter_u64_fetch(in->packets[i]);
1559 	}
1560 
1561 	out->states = in->states;
1562 	out->conn = in->conn;
1563 	out->af = in->af;
1564 	out->ruletype = in->ruletype;
1565 
1566 	out->creation = secs - in->creation;
1567 	if (out->expire > secs)
1568 		out->expire -= secs;
1569 	else
1570 		out->expire = 0;
1571 
1572 	/* Adjust the connection rate estimate. */
1573 	out->conn_rate = in->conn_rate;
1574 	diff = secs - in->conn_rate.last;
1575 	if (diff >= in->conn_rate.seconds)
1576 		out->conn_rate.count = 0;
1577 	else
1578 		out->conn_rate.count -=
1579 		    in->conn_rate.count * diff /
1580 		    in->conn_rate.seconds;
1581 }
1582 
1583 #ifdef ALTQ
1584 /*
1585  * Handle export of struct pf_kaltq to user binaries that may be using any
1586  * version of struct pf_altq.
1587  */
1588 static int
pf_export_kaltq(struct pf_altq * q,struct pfioc_altq_v1 * pa,size_t ioc_size)1589 pf_export_kaltq(struct pf_altq *q, struct pfioc_altq_v1 *pa, size_t ioc_size)
1590 {
1591 	u_int32_t version;
1592 
1593 	if (ioc_size == sizeof(struct pfioc_altq_v0))
1594 		version = 0;
1595 	else
1596 		version = pa->version;
1597 
1598 	if (version > PFIOC_ALTQ_VERSION)
1599 		return (EINVAL);
1600 
1601 #define ASSIGN(x) exported_q->x = q->x
1602 #define COPY(x) \
1603 	bcopy(&q->x, &exported_q->x, min(sizeof(q->x), sizeof(exported_q->x)))
1604 #define SATU16(x) (u_int32_t)uqmin((x), USHRT_MAX)
1605 #define SATU32(x) (u_int32_t)uqmin((x), UINT_MAX)
1606 
1607 	switch (version) {
1608 	case 0: {
1609 		struct pf_altq_v0 *exported_q =
1610 		    &((struct pfioc_altq_v0 *)pa)->altq;
1611 
1612 		COPY(ifname);
1613 
1614 		ASSIGN(scheduler);
1615 		ASSIGN(tbrsize);
1616 		exported_q->tbrsize = SATU16(q->tbrsize);
1617 		exported_q->ifbandwidth = SATU32(q->ifbandwidth);
1618 
1619 		COPY(qname);
1620 		COPY(parent);
1621 		ASSIGN(parent_qid);
1622 		exported_q->bandwidth = SATU32(q->bandwidth);
1623 		ASSIGN(priority);
1624 		ASSIGN(local_flags);
1625 
1626 		ASSIGN(qlimit);
1627 		ASSIGN(flags);
1628 
1629 		if (q->scheduler == ALTQT_HFSC) {
1630 #define ASSIGN_OPT(x) exported_q->pq_u.hfsc_opts.x = q->pq_u.hfsc_opts.x
1631 #define ASSIGN_OPT_SATU32(x) exported_q->pq_u.hfsc_opts.x = \
1632 			    SATU32(q->pq_u.hfsc_opts.x)
1633 
1634 			ASSIGN_OPT_SATU32(rtsc_m1);
1635 			ASSIGN_OPT(rtsc_d);
1636 			ASSIGN_OPT_SATU32(rtsc_m2);
1637 
1638 			ASSIGN_OPT_SATU32(lssc_m1);
1639 			ASSIGN_OPT(lssc_d);
1640 			ASSIGN_OPT_SATU32(lssc_m2);
1641 
1642 			ASSIGN_OPT_SATU32(ulsc_m1);
1643 			ASSIGN_OPT(ulsc_d);
1644 			ASSIGN_OPT_SATU32(ulsc_m2);
1645 
1646 			ASSIGN_OPT(flags);
1647 
1648 #undef ASSIGN_OPT
1649 #undef ASSIGN_OPT_SATU32
1650 		} else
1651 			COPY(pq_u);
1652 
1653 		ASSIGN(qid);
1654 		break;
1655 	}
1656 	case 1:	{
1657 		struct pf_altq_v1 *exported_q =
1658 		    &((struct pfioc_altq_v1 *)pa)->altq;
1659 
1660 		COPY(ifname);
1661 
1662 		ASSIGN(scheduler);
1663 		ASSIGN(tbrsize);
1664 		ASSIGN(ifbandwidth);
1665 
1666 		COPY(qname);
1667 		COPY(parent);
1668 		ASSIGN(parent_qid);
1669 		ASSIGN(bandwidth);
1670 		ASSIGN(priority);
1671 		ASSIGN(local_flags);
1672 
1673 		ASSIGN(qlimit);
1674 		ASSIGN(flags);
1675 		COPY(pq_u);
1676 
1677 		ASSIGN(qid);
1678 		break;
1679 	}
1680 	default:
1681 		panic("%s: unhandled struct pfioc_altq version", __func__);
1682 		break;
1683 	}
1684 
1685 #undef ASSIGN
1686 #undef COPY
1687 #undef SATU16
1688 #undef SATU32
1689 
1690 	return (0);
1691 }
1692 
1693 /*
1694  * Handle import to struct pf_kaltq of struct pf_altq from user binaries
1695  * that may be using any version of it.
1696  */
1697 static int
pf_import_kaltq(struct pfioc_altq_v1 * pa,struct pf_altq * q,size_t ioc_size)1698 pf_import_kaltq(struct pfioc_altq_v1 *pa, struct pf_altq *q, size_t ioc_size)
1699 {
1700 	u_int32_t version;
1701 
1702 	if (ioc_size == sizeof(struct pfioc_altq_v0))
1703 		version = 0;
1704 	else
1705 		version = pa->version;
1706 
1707 	if (version > PFIOC_ALTQ_VERSION)
1708 		return (EINVAL);
1709 
1710 #define ASSIGN(x) q->x = imported_q->x
1711 #define COPY(x) \
1712 	bcopy(&imported_q->x, &q->x, min(sizeof(imported_q->x), sizeof(q->x)))
1713 
1714 	switch (version) {
1715 	case 0: {
1716 		struct pf_altq_v0 *imported_q =
1717 		    &((struct pfioc_altq_v0 *)pa)->altq;
1718 
1719 		COPY(ifname);
1720 
1721 		ASSIGN(scheduler);
1722 		ASSIGN(tbrsize); /* 16-bit -> 32-bit */
1723 		ASSIGN(ifbandwidth); /* 32-bit -> 64-bit */
1724 
1725 		COPY(qname);
1726 		COPY(parent);
1727 		ASSIGN(parent_qid);
1728 		ASSIGN(bandwidth); /* 32-bit -> 64-bit */
1729 		ASSIGN(priority);
1730 		ASSIGN(local_flags);
1731 
1732 		ASSIGN(qlimit);
1733 		ASSIGN(flags);
1734 
1735 		if (imported_q->scheduler == ALTQT_HFSC) {
1736 #define ASSIGN_OPT(x) q->pq_u.hfsc_opts.x = imported_q->pq_u.hfsc_opts.x
1737 
1738 			/*
1739 			 * The m1 and m2 parameters are being copied from
1740 			 * 32-bit to 64-bit.
1741 			 */
1742 			ASSIGN_OPT(rtsc_m1);
1743 			ASSIGN_OPT(rtsc_d);
1744 			ASSIGN_OPT(rtsc_m2);
1745 
1746 			ASSIGN_OPT(lssc_m1);
1747 			ASSIGN_OPT(lssc_d);
1748 			ASSIGN_OPT(lssc_m2);
1749 
1750 			ASSIGN_OPT(ulsc_m1);
1751 			ASSIGN_OPT(ulsc_d);
1752 			ASSIGN_OPT(ulsc_m2);
1753 
1754 			ASSIGN_OPT(flags);
1755 
1756 #undef ASSIGN_OPT
1757 		} else
1758 			COPY(pq_u);
1759 
1760 		ASSIGN(qid);
1761 		break;
1762 	}
1763 	case 1: {
1764 		struct pf_altq_v1 *imported_q =
1765 		    &((struct pfioc_altq_v1 *)pa)->altq;
1766 
1767 		COPY(ifname);
1768 
1769 		ASSIGN(scheduler);
1770 		ASSIGN(tbrsize);
1771 		ASSIGN(ifbandwidth);
1772 
1773 		COPY(qname);
1774 		COPY(parent);
1775 		ASSIGN(parent_qid);
1776 		ASSIGN(bandwidth);
1777 		ASSIGN(priority);
1778 		ASSIGN(local_flags);
1779 
1780 		ASSIGN(qlimit);
1781 		ASSIGN(flags);
1782 		COPY(pq_u);
1783 
1784 		ASSIGN(qid);
1785 		break;
1786 	}
1787 	default:
1788 		panic("%s: unhandled struct pfioc_altq version", __func__);
1789 		break;
1790 	}
1791 
1792 #undef ASSIGN
1793 #undef COPY
1794 
1795 	return (0);
1796 }
1797 
1798 static struct pf_altq *
pf_altq_get_nth_active(u_int32_t n)1799 pf_altq_get_nth_active(u_int32_t n)
1800 {
1801 	struct pf_altq		*altq;
1802 	u_int32_t		 nr;
1803 
1804 	nr = 0;
1805 	TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) {
1806 		if (nr == n)
1807 			return (altq);
1808 		nr++;
1809 	}
1810 
1811 	TAILQ_FOREACH(altq, V_pf_altqs_active, entries) {
1812 		if (nr == n)
1813 			return (altq);
1814 		nr++;
1815 	}
1816 
1817 	return (NULL);
1818 }
1819 #endif /* ALTQ */
1820 
1821 struct pf_krule *
pf_krule_alloc(void)1822 pf_krule_alloc(void)
1823 {
1824 	struct pf_krule *rule;
1825 
1826 	rule = malloc(sizeof(struct pf_krule), M_PFRULE, M_WAITOK | M_ZERO);
1827 	mtx_init(&rule->rpool.mtx, "pf_krule_pool", NULL, MTX_DEF);
1828 	rule->timestamp = uma_zalloc_pcpu(pf_timestamp_pcpu_zone,
1829 	    M_WAITOK | M_ZERO);
1830 	return (rule);
1831 }
1832 
1833 void
pf_krule_free(struct pf_krule * rule)1834 pf_krule_free(struct pf_krule *rule)
1835 {
1836 #ifdef PF_WANT_32_TO_64_COUNTER
1837 	bool wowned;
1838 #endif
1839 
1840 	if (rule == NULL)
1841 		return;
1842 
1843 #ifdef PF_WANT_32_TO_64_COUNTER
1844 	if (rule->allrulelinked) {
1845 		wowned = PF_RULES_WOWNED();
1846 		if (!wowned)
1847 			PF_RULES_WLOCK();
1848 		LIST_REMOVE(rule, allrulelist);
1849 		V_pf_allrulecount--;
1850 		if (!wowned)
1851 			PF_RULES_WUNLOCK();
1852 	}
1853 #endif
1854 
1855 	pf_counter_u64_deinit(&rule->evaluations);
1856 	for (int i = 0; i < 2; i++) {
1857 		pf_counter_u64_deinit(&rule->packets[i]);
1858 		pf_counter_u64_deinit(&rule->bytes[i]);
1859 	}
1860 	counter_u64_free(rule->states_cur);
1861 	counter_u64_free(rule->states_tot);
1862 	counter_u64_free(rule->src_nodes);
1863 	uma_zfree_pcpu(pf_timestamp_pcpu_zone, rule->timestamp);
1864 
1865 	mtx_destroy(&rule->rpool.mtx);
1866 	free(rule, M_PFRULE);
1867 }
1868 
1869 void
pf_krule_clear_counters(struct pf_krule * rule)1870 pf_krule_clear_counters(struct pf_krule *rule)
1871 {
1872 	pf_counter_u64_zero(&rule->evaluations);
1873 	for (int i = 0; i < 2; i++) {
1874 		pf_counter_u64_zero(&rule->packets[i]);
1875 		pf_counter_u64_zero(&rule->bytes[i]);
1876 	}
1877 	counter_u64_zero(rule->states_tot);
1878 }
1879 
1880 static void
pf_kpooladdr_to_pooladdr(const struct pf_kpooladdr * kpool,struct pf_pooladdr * pool)1881 pf_kpooladdr_to_pooladdr(const struct pf_kpooladdr *kpool,
1882     struct pf_pooladdr *pool)
1883 {
1884 
1885 	bzero(pool, sizeof(*pool));
1886 	bcopy(&kpool->addr, &pool->addr, sizeof(pool->addr));
1887 	strlcpy(pool->ifname, kpool->ifname, sizeof(pool->ifname));
1888 }
1889 
1890 static int
pf_pooladdr_to_kpooladdr(const struct pf_pooladdr * pool,struct pf_kpooladdr * kpool)1891 pf_pooladdr_to_kpooladdr(const struct pf_pooladdr *pool,
1892     struct pf_kpooladdr *kpool)
1893 {
1894 	int ret;
1895 
1896 	bzero(kpool, sizeof(*kpool));
1897 	bcopy(&pool->addr, &kpool->addr, sizeof(kpool->addr));
1898 	ret = pf_user_strcpy(kpool->ifname, pool->ifname,
1899 	    sizeof(kpool->ifname));
1900 	return (ret);
1901 }
1902 
1903 static void
pf_pool_to_kpool(const struct pf_pool * pool,struct pf_kpool * kpool)1904 pf_pool_to_kpool(const struct pf_pool *pool, struct pf_kpool *kpool)
1905 {
1906 	_Static_assert(sizeof(pool->key) == sizeof(kpool->key), "");
1907 	_Static_assert(sizeof(pool->counter) == sizeof(kpool->counter), "");
1908 
1909 	bcopy(&pool->key, &kpool->key, sizeof(kpool->key));
1910 	bcopy(&pool->counter, &kpool->counter, sizeof(kpool->counter));
1911 
1912 	kpool->tblidx = pool->tblidx;
1913 	kpool->proxy_port[0] = pool->proxy_port[0];
1914 	kpool->proxy_port[1] = pool->proxy_port[1];
1915 	kpool->opts = pool->opts;
1916 }
1917 
1918 static int
pf_rule_to_krule(const struct pf_rule * rule,struct pf_krule * krule)1919 pf_rule_to_krule(const struct pf_rule *rule, struct pf_krule *krule)
1920 {
1921 	int ret;
1922 
1923 #ifndef INET
1924 	if (rule->af == AF_INET) {
1925 		return (EAFNOSUPPORT);
1926 	}
1927 #endif /* INET */
1928 #ifndef INET6
1929 	if (rule->af == AF_INET6) {
1930 		return (EAFNOSUPPORT);
1931 	}
1932 #endif /* INET6 */
1933 
1934 	ret = pf_check_rule_addr(&rule->src);
1935 	if (ret != 0)
1936 		return (ret);
1937 	ret = pf_check_rule_addr(&rule->dst);
1938 	if (ret != 0)
1939 		return (ret);
1940 
1941 	bcopy(&rule->src, &krule->src, sizeof(rule->src));
1942 	bcopy(&rule->dst, &krule->dst, sizeof(rule->dst));
1943 
1944 	ret = pf_user_strcpy(krule->label[0], rule->label, sizeof(rule->label));
1945 	if (ret != 0)
1946 		return (ret);
1947 	ret = pf_user_strcpy(krule->ifname, rule->ifname, sizeof(rule->ifname));
1948 	if (ret != 0)
1949 		return (ret);
1950 	ret = pf_user_strcpy(krule->qname, rule->qname, sizeof(rule->qname));
1951 	if (ret != 0)
1952 		return (ret);
1953 	ret = pf_user_strcpy(krule->pqname, rule->pqname, sizeof(rule->pqname));
1954 	if (ret != 0)
1955 		return (ret);
1956 	ret = pf_user_strcpy(krule->tagname, rule->tagname,
1957 	    sizeof(rule->tagname));
1958 	if (ret != 0)
1959 		return (ret);
1960 	ret = pf_user_strcpy(krule->match_tagname, rule->match_tagname,
1961 	    sizeof(rule->match_tagname));
1962 	if (ret != 0)
1963 		return (ret);
1964 	ret = pf_user_strcpy(krule->overload_tblname, rule->overload_tblname,
1965 	    sizeof(rule->overload_tblname));
1966 	if (ret != 0)
1967 		return (ret);
1968 
1969 	pf_pool_to_kpool(&rule->rpool, &krule->rpool);
1970 
1971 	/* Don't allow userspace to set evaluations, packets or bytes. */
1972 	/* kif, anchor, overload_tbl are not copied over. */
1973 
1974 	krule->os_fingerprint = rule->os_fingerprint;
1975 
1976 	krule->rtableid = rule->rtableid;
1977 	/* pf_rule->timeout is smaller than pf_krule->timeout */
1978 	bcopy(rule->timeout, krule->timeout, sizeof(rule->timeout));
1979 	krule->max_states = rule->max_states;
1980 	krule->max_src_nodes = rule->max_src_nodes;
1981 	krule->max_src_states = rule->max_src_states;
1982 	krule->max_src_conn = rule->max_src_conn;
1983 	krule->max_src_conn_rate.limit = rule->max_src_conn_rate.limit;
1984 	krule->max_src_conn_rate.seconds = rule->max_src_conn_rate.seconds;
1985 	krule->qid = rule->qid;
1986 	krule->pqid = rule->pqid;
1987 	krule->nr = rule->nr;
1988 	krule->prob = rule->prob;
1989 	krule->cuid = rule->cuid;
1990 	krule->cpid = rule->cpid;
1991 
1992 	krule->return_icmp = rule->return_icmp;
1993 	krule->return_icmp6 = rule->return_icmp6;
1994 	krule->max_mss = rule->max_mss;
1995 	krule->tag = rule->tag;
1996 	krule->match_tag = rule->match_tag;
1997 	krule->scrub_flags = rule->scrub_flags;
1998 
1999 	bcopy(&rule->uid, &krule->uid, sizeof(krule->uid));
2000 	bcopy(&rule->gid, &krule->gid, sizeof(krule->gid));
2001 
2002 	krule->rule_flag = rule->rule_flag;
2003 	krule->action = rule->action;
2004 	krule->direction = rule->direction;
2005 	krule->log = rule->log;
2006 	krule->logif = rule->logif;
2007 	krule->quick = rule->quick;
2008 	krule->ifnot = rule->ifnot;
2009 	krule->match_tag_not = rule->match_tag_not;
2010 	krule->natpass = rule->natpass;
2011 
2012 	krule->keep_state = rule->keep_state;
2013 	krule->af = rule->af;
2014 	krule->proto = rule->proto;
2015 	krule->type = rule->type;
2016 	krule->code = rule->code;
2017 	krule->flags = rule->flags;
2018 	krule->flagset = rule->flagset;
2019 	krule->min_ttl = rule->min_ttl;
2020 	krule->allow_opts = rule->allow_opts;
2021 	krule->rt = rule->rt;
2022 	krule->return_ttl = rule->return_ttl;
2023 	krule->tos = rule->tos;
2024 	krule->set_tos = rule->set_tos;
2025 
2026 	krule->flush = rule->flush;
2027 	krule->prio = rule->prio;
2028 	krule->set_prio[0] = rule->set_prio[0];
2029 	krule->set_prio[1] = rule->set_prio[1];
2030 
2031 	bcopy(&rule->divert, &krule->divert, sizeof(krule->divert));
2032 
2033 	return (0);
2034 }
2035 
2036 int
pf_ioctl_getrules(struct pfioc_rule * pr)2037 pf_ioctl_getrules(struct pfioc_rule *pr)
2038 {
2039 	struct pf_kruleset	*ruleset;
2040 	struct pf_krule		*tail;
2041 	int			 rs_num;
2042 
2043 	PF_RULES_WLOCK();
2044 	ruleset = pf_find_kruleset(pr->anchor);
2045 	if (ruleset == NULL) {
2046 		PF_RULES_WUNLOCK();
2047 		return (EINVAL);
2048 	}
2049 	rs_num = pf_get_ruleset_number(pr->rule.action);
2050 	if (rs_num >= PF_RULESET_MAX) {
2051 		PF_RULES_WUNLOCK();
2052 		return (EINVAL);
2053 	}
2054 	tail = TAILQ_LAST(ruleset->rules[rs_num].active.ptr,
2055 	    pf_krulequeue);
2056 	if (tail)
2057 		pr->nr = tail->nr + 1;
2058 	else
2059 		pr->nr = 0;
2060 	pr->ticket = ruleset->rules[rs_num].active.ticket;
2061 	PF_RULES_WUNLOCK();
2062 
2063 	return (0);
2064 }
2065 
2066 int
pf_ioctl_addrule(struct pf_krule * rule,uint32_t ticket,uint32_t pool_ticket,const char * anchor,const char * anchor_call,uid_t uid,pid_t pid)2067 pf_ioctl_addrule(struct pf_krule *rule, uint32_t ticket,
2068     uint32_t pool_ticket, const char *anchor, const char *anchor_call,
2069     uid_t uid, pid_t pid)
2070 {
2071 	struct pf_kruleset	*ruleset;
2072 	struct pf_krule		*tail;
2073 	struct pf_kpooladdr	*pa;
2074 	struct pfi_kkif		*kif = NULL, *rcv_kif = NULL;
2075 	int			 rs_num;
2076 	int			 error = 0;
2077 
2078 	if ((rule->return_icmp >> 8) > ICMP_MAXTYPE) {
2079 		error = EINVAL;
2080 		goto errout_unlocked;
2081 	}
2082 
2083 #define	ERROUT(x)	ERROUT_FUNCTION(errout, x)
2084 
2085 	if (rule->ifname[0])
2086 		kif = pf_kkif_create(M_WAITOK);
2087 	if (rule->rcv_ifname[0])
2088 		rcv_kif = pf_kkif_create(M_WAITOK);
2089 	pf_counter_u64_init(&rule->evaluations, M_WAITOK);
2090 	for (int i = 0; i < 2; i++) {
2091 		pf_counter_u64_init(&rule->packets[i], M_WAITOK);
2092 		pf_counter_u64_init(&rule->bytes[i], M_WAITOK);
2093 	}
2094 	rule->states_cur = counter_u64_alloc(M_WAITOK);
2095 	rule->states_tot = counter_u64_alloc(M_WAITOK);
2096 	rule->src_nodes = counter_u64_alloc(M_WAITOK);
2097 	rule->cuid = uid;
2098 	rule->cpid = pid;
2099 	TAILQ_INIT(&rule->rpool.list);
2100 
2101 	PF_CONFIG_LOCK();
2102 	PF_RULES_WLOCK();
2103 #ifdef PF_WANT_32_TO_64_COUNTER
2104 	LIST_INSERT_HEAD(&V_pf_allrulelist, rule, allrulelist);
2105 	MPASS(!rule->allrulelinked);
2106 	rule->allrulelinked = true;
2107 	V_pf_allrulecount++;
2108 #endif
2109 	ruleset = pf_find_kruleset(anchor);
2110 	if (ruleset == NULL)
2111 		ERROUT(EINVAL);
2112 	rs_num = pf_get_ruleset_number(rule->action);
2113 	if (rs_num >= PF_RULESET_MAX)
2114 		ERROUT(EINVAL);
2115 	if (ticket != ruleset->rules[rs_num].inactive.ticket) {
2116 		DPFPRINTF(PF_DEBUG_MISC,
2117 		    ("ticket: %d != [%d]%d\n", ticket, rs_num,
2118 		    ruleset->rules[rs_num].inactive.ticket));
2119 		ERROUT(EBUSY);
2120 	}
2121 	if (pool_ticket != V_ticket_pabuf) {
2122 		DPFPRINTF(PF_DEBUG_MISC,
2123 		    ("pool_ticket: %d != %d\n", pool_ticket,
2124 		    V_ticket_pabuf));
2125 		ERROUT(EBUSY);
2126 	}
2127 	/*
2128 	 * XXXMJG hack: there is no mechanism to ensure they started the
2129 	 * transaction. Ticket checked above may happen to match by accident,
2130 	 * even if nobody called DIOCXBEGIN, let alone this process.
2131 	 * Partially work around it by checking if the RB tree got allocated,
2132 	 * see pf_begin_rules.
2133 	 */
2134 	if (ruleset->rules[rs_num].inactive.tree == NULL) {
2135 		ERROUT(EINVAL);
2136 	}
2137 
2138 	tail = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr,
2139 	    pf_krulequeue);
2140 	if (tail)
2141 		rule->nr = tail->nr + 1;
2142 	else
2143 		rule->nr = 0;
2144 	if (rule->ifname[0]) {
2145 		rule->kif = pfi_kkif_attach(kif, rule->ifname);
2146 		kif = NULL;
2147 		pfi_kkif_ref(rule->kif);
2148 	} else
2149 		rule->kif = NULL;
2150 
2151 	if (rule->rcv_ifname[0]) {
2152 		rule->rcv_kif = pfi_kkif_attach(rcv_kif, rule->rcv_ifname);
2153 		rcv_kif = NULL;
2154 		pfi_kkif_ref(rule->rcv_kif);
2155 	} else
2156 		rule->rcv_kif = NULL;
2157 
2158 	if (rule->rtableid > 0 && rule->rtableid >= rt_numfibs)
2159 		error = EBUSY;
2160 
2161 #ifdef ALTQ
2162 	/* set queue IDs */
2163 	if (rule->qname[0] != 0) {
2164 		if ((rule->qid = pf_qname2qid(rule->qname)) == 0)
2165 			error = EBUSY;
2166 		else if (rule->pqname[0] != 0) {
2167 			if ((rule->pqid =
2168 			    pf_qname2qid(rule->pqname)) == 0)
2169 				error = EBUSY;
2170 		} else
2171 			rule->pqid = rule->qid;
2172 	}
2173 #endif
2174 	if (rule->tagname[0])
2175 		if ((rule->tag = pf_tagname2tag(rule->tagname)) == 0)
2176 			error = EBUSY;
2177 	if (rule->match_tagname[0])
2178 		if ((rule->match_tag =
2179 		    pf_tagname2tag(rule->match_tagname)) == 0)
2180 			error = EBUSY;
2181 	if (rule->rt && !rule->direction)
2182 		error = EINVAL;
2183 	if (!rule->log)
2184 		rule->logif = 0;
2185 	if (rule->logif >= PFLOGIFS_MAX)
2186 		error = EINVAL;
2187 	if (pf_addr_setup(ruleset, &rule->src.addr, rule->af))
2188 		error = ENOMEM;
2189 	if (pf_addr_setup(ruleset, &rule->dst.addr, rule->af))
2190 		error = ENOMEM;
2191 	if (pf_kanchor_setup(rule, ruleset, anchor_call))
2192 		error = EINVAL;
2193 	if (rule->scrub_flags & PFSTATE_SETPRIO &&
2194 	    (rule->set_prio[0] > PF_PRIO_MAX ||
2195 	    rule->set_prio[1] > PF_PRIO_MAX))
2196 		error = EINVAL;
2197 	TAILQ_FOREACH(pa, &V_pf_pabuf, entries)
2198 		if (pa->addr.type == PF_ADDR_TABLE) {
2199 			pa->addr.p.tbl = pfr_attach_table(ruleset,
2200 			    pa->addr.v.tblname);
2201 			if (pa->addr.p.tbl == NULL)
2202 				error = ENOMEM;
2203 		}
2204 
2205 	rule->overload_tbl = NULL;
2206 	if (rule->overload_tblname[0]) {
2207 		if ((rule->overload_tbl = pfr_attach_table(ruleset,
2208 		    rule->overload_tblname)) == NULL)
2209 			error = EINVAL;
2210 		else
2211 			rule->overload_tbl->pfrkt_flags |=
2212 			    PFR_TFLAG_ACTIVE;
2213 	}
2214 
2215 	pf_mv_kpool(&V_pf_pabuf, &rule->rpool.list);
2216 	if (((((rule->action == PF_NAT) || (rule->action == PF_RDR) ||
2217 	    (rule->action == PF_BINAT)) && rule->anchor == NULL) ||
2218 	    (rule->rt > PF_NOPFROUTE)) &&
2219 	    (TAILQ_FIRST(&rule->rpool.list) == NULL))
2220 		error = EINVAL;
2221 
2222 	if (rule->action == PF_PASS && rule->rpool.opts & PF_POOL_STICKYADDR &&
2223 	    !rule->keep_state) {
2224 		error = EINVAL;
2225 	}
2226 
2227 	if (error) {
2228 		pf_free_rule(rule);
2229 		rule = NULL;
2230 		ERROUT(error);
2231 	}
2232 
2233 	rule->rpool.cur = TAILQ_FIRST(&rule->rpool.list);
2234 	TAILQ_INSERT_TAIL(ruleset->rules[rs_num].inactive.ptr,
2235 	    rule, entries);
2236 	ruleset->rules[rs_num].inactive.rcount++;
2237 
2238 	PF_RULES_WUNLOCK();
2239 	pf_hash_rule(rule);
2240 	if (RB_INSERT(pf_krule_global, ruleset->rules[rs_num].inactive.tree, rule) != NULL) {
2241 		PF_RULES_WLOCK();
2242 		TAILQ_REMOVE(ruleset->rules[rs_num].inactive.ptr, rule, entries);
2243 		ruleset->rules[rs_num].inactive.rcount--;
2244 		pf_free_rule(rule);
2245 		rule = NULL;
2246 		ERROUT(EEXIST);
2247 	}
2248 	PF_CONFIG_UNLOCK();
2249 
2250 	return (0);
2251 
2252 #undef ERROUT
2253 errout:
2254 	PF_RULES_WUNLOCK();
2255 	PF_CONFIG_UNLOCK();
2256 errout_unlocked:
2257 	pf_kkif_free(rcv_kif);
2258 	pf_kkif_free(kif);
2259 	pf_krule_free(rule);
2260 	return (error);
2261 }
2262 
2263 static bool
pf_label_match(const struct pf_krule * rule,const char * label)2264 pf_label_match(const struct pf_krule *rule, const char *label)
2265 {
2266 	int i = 0;
2267 
2268 	while (*rule->label[i]) {
2269 		if (strcmp(rule->label[i], label) == 0)
2270 			return (true);
2271 		i++;
2272 	}
2273 
2274 	return (false);
2275 }
2276 
2277 static unsigned int
pf_kill_matching_state(struct pf_state_key_cmp * key,int dir)2278 pf_kill_matching_state(struct pf_state_key_cmp *key, int dir)
2279 {
2280 	struct pf_kstate *s;
2281 	int more = 0;
2282 
2283 	s = pf_find_state_all(key, dir, &more);
2284 	if (s == NULL)
2285 		return (0);
2286 
2287 	if (more) {
2288 		PF_STATE_UNLOCK(s);
2289 		return (0);
2290 	}
2291 
2292 	pf_unlink_state(s);
2293 	return (1);
2294 }
2295 
2296 static int
pf_killstates_row(struct pf_kstate_kill * psk,struct pf_idhash * ih)2297 pf_killstates_row(struct pf_kstate_kill *psk, struct pf_idhash *ih)
2298 {
2299 	struct pf_kstate	*s;
2300 	struct pf_state_key	*sk;
2301 	struct pf_addr		*srcaddr, *dstaddr;
2302 	struct pf_state_key_cmp	 match_key;
2303 	int			 idx, killed = 0;
2304 	unsigned int		 dir;
2305 	u_int16_t		 srcport, dstport;
2306 	struct pfi_kkif		*kif;
2307 
2308 relock_DIOCKILLSTATES:
2309 	PF_HASHROW_LOCK(ih);
2310 	LIST_FOREACH(s, &ih->states, entry) {
2311 		/* For floating states look at the original kif. */
2312 		kif = s->kif == V_pfi_all ? s->orig_kif : s->kif;
2313 
2314 		sk = s->key[psk->psk_nat ? PF_SK_STACK : PF_SK_WIRE];
2315 		if (s->direction == PF_OUT) {
2316 			srcaddr = &sk->addr[1];
2317 			dstaddr = &sk->addr[0];
2318 			srcport = sk->port[1];
2319 			dstport = sk->port[0];
2320 		} else {
2321 			srcaddr = &sk->addr[0];
2322 			dstaddr = &sk->addr[1];
2323 			srcport = sk->port[0];
2324 			dstport = sk->port[1];
2325 		}
2326 
2327 		if (psk->psk_af && sk->af != psk->psk_af)
2328 			continue;
2329 
2330 		if (psk->psk_proto && psk->psk_proto != sk->proto)
2331 			continue;
2332 
2333 		if (! PF_MATCHA(psk->psk_src.neg, &psk->psk_src.addr.v.a.addr,
2334 		    &psk->psk_src.addr.v.a.mask, srcaddr, sk->af))
2335 			continue;
2336 
2337 		if (! PF_MATCHA(psk->psk_dst.neg, &psk->psk_dst.addr.v.a.addr,
2338 		    &psk->psk_dst.addr.v.a.mask, dstaddr, sk->af))
2339 			continue;
2340 
2341 		if (!  PF_MATCHA(psk->psk_rt_addr.neg,
2342 		    &psk->psk_rt_addr.addr.v.a.addr,
2343 		    &psk->psk_rt_addr.addr.v.a.mask,
2344 		    &s->rt_addr, sk->af))
2345 			continue;
2346 
2347 		if (psk->psk_src.port_op != 0 &&
2348 		    ! pf_match_port(psk->psk_src.port_op,
2349 		    psk->psk_src.port[0], psk->psk_src.port[1], srcport))
2350 			continue;
2351 
2352 		if (psk->psk_dst.port_op != 0 &&
2353 		    ! pf_match_port(psk->psk_dst.port_op,
2354 		    psk->psk_dst.port[0], psk->psk_dst.port[1], dstport))
2355 			continue;
2356 
2357 		if (psk->psk_label[0] &&
2358 		    ! pf_label_match(s->rule, psk->psk_label))
2359 			continue;
2360 
2361 		if (psk->psk_ifname[0] && strcmp(psk->psk_ifname,
2362 		    kif->pfik_name))
2363 			continue;
2364 
2365 		if (psk->psk_kill_match) {
2366 			/* Create the key to find matching states, with lock
2367 			 * held. */
2368 
2369 			bzero(&match_key, sizeof(match_key));
2370 
2371 			if (s->direction == PF_OUT) {
2372 				dir = PF_IN;
2373 				idx = psk->psk_nat ? PF_SK_WIRE : PF_SK_STACK;
2374 			} else {
2375 				dir = PF_OUT;
2376 				idx = psk->psk_nat ? PF_SK_STACK : PF_SK_WIRE;
2377 			}
2378 
2379 			match_key.af = s->key[idx]->af;
2380 			match_key.proto = s->key[idx]->proto;
2381 			PF_ACPY(&match_key.addr[0],
2382 			    &s->key[idx]->addr[1], match_key.af);
2383 			match_key.port[0] = s->key[idx]->port[1];
2384 			PF_ACPY(&match_key.addr[1],
2385 			    &s->key[idx]->addr[0], match_key.af);
2386 			match_key.port[1] = s->key[idx]->port[0];
2387 		}
2388 
2389 		pf_unlink_state(s);
2390 		killed++;
2391 
2392 		if (psk->psk_kill_match)
2393 			killed += pf_kill_matching_state(&match_key, dir);
2394 
2395 		goto relock_DIOCKILLSTATES;
2396 	}
2397 	PF_HASHROW_UNLOCK(ih);
2398 
2399 	return (killed);
2400 }
2401 
2402 int
pf_start(void)2403 pf_start(void)
2404 {
2405 	int error = 0;
2406 
2407 	sx_xlock(&V_pf_ioctl_lock);
2408 	if (V_pf_status.running)
2409 		error = EEXIST;
2410 	else {
2411 		hook_pf();
2412 		if (! TAILQ_EMPTY(V_pf_keth->active.rules))
2413 			hook_pf_eth();
2414 		V_pf_status.running = 1;
2415 		V_pf_status.since = time_second;
2416 		new_unrhdr64(&V_pf_stateid, time_second);
2417 
2418 		DPFPRINTF(PF_DEBUG_MISC, ("pf: started\n"));
2419 	}
2420 	sx_xunlock(&V_pf_ioctl_lock);
2421 
2422 	return (error);
2423 }
2424 
2425 int
pf_stop(void)2426 pf_stop(void)
2427 {
2428 	int error = 0;
2429 
2430 	sx_xlock(&V_pf_ioctl_lock);
2431 	if (!V_pf_status.running)
2432 		error = ENOENT;
2433 	else {
2434 		V_pf_status.running = 0;
2435 		dehook_pf();
2436 		dehook_pf_eth();
2437 		V_pf_status.since = time_second;
2438 		DPFPRINTF(PF_DEBUG_MISC, ("pf: stopped\n"));
2439 	}
2440 	sx_xunlock(&V_pf_ioctl_lock);
2441 
2442 	return (error);
2443 }
2444 
2445 void
pf_ioctl_clear_status(void)2446 pf_ioctl_clear_status(void)
2447 {
2448 	PF_RULES_WLOCK();
2449 	for (int i = 0; i < PFRES_MAX; i++)
2450 		counter_u64_zero(V_pf_status.counters[i]);
2451 	for (int i = 0; i < FCNT_MAX; i++)
2452 		pf_counter_u64_zero(&V_pf_status.fcounters[i]);
2453 	for (int i = 0; i < SCNT_MAX; i++)
2454 		counter_u64_zero(V_pf_status.scounters[i]);
2455 	for (int i = 0; i < KLCNT_MAX; i++)
2456 		counter_u64_zero(V_pf_status.lcounters[i]);
2457 	V_pf_status.since = time_second;
2458 	if (*V_pf_status.ifname)
2459 		pfi_update_status(V_pf_status.ifname, NULL);
2460 	PF_RULES_WUNLOCK();
2461 }
2462 
2463 int
pf_ioctl_set_timeout(int timeout,int seconds,int * prev_seconds)2464 pf_ioctl_set_timeout(int timeout, int seconds, int *prev_seconds)
2465 {
2466 	uint32_t old;
2467 
2468 	if (timeout < 0 || timeout >= PFTM_MAX ||
2469 	    seconds < 0)
2470 		return (EINVAL);
2471 
2472 	PF_RULES_WLOCK();
2473 	old = V_pf_default_rule.timeout[timeout];
2474 	if (timeout == PFTM_INTERVAL && seconds == 0)
2475 		seconds = 1;
2476 	V_pf_default_rule.timeout[timeout] = seconds;
2477 	if (timeout == PFTM_INTERVAL && seconds < old)
2478 		wakeup(pf_purge_thread);
2479 
2480 	if (prev_seconds != NULL)
2481 		*prev_seconds = old;
2482 
2483 	PF_RULES_WUNLOCK();
2484 
2485 	return (0);
2486 }
2487 
2488 int
pf_ioctl_get_timeout(int timeout,int * seconds)2489 pf_ioctl_get_timeout(int timeout, int *seconds)
2490 {
2491 	PF_RULES_RLOCK_TRACKER;
2492 
2493 	if (timeout < 0 || timeout >= PFTM_MAX)
2494 		return (EINVAL);
2495 
2496 	PF_RULES_RLOCK();
2497 	*seconds = V_pf_default_rule.timeout[timeout];
2498 	PF_RULES_RUNLOCK();
2499 
2500 	return (0);
2501 }
2502 
2503 int
pf_ioctl_set_limit(int index,unsigned int limit,unsigned int * old_limit)2504 pf_ioctl_set_limit(int index, unsigned int limit, unsigned int *old_limit)
2505 {
2506 
2507 	PF_RULES_WLOCK();
2508 	if (index < 0 || index >= PF_LIMIT_MAX ||
2509 	    V_pf_limits[index].zone == NULL) {
2510 		PF_RULES_WUNLOCK();
2511 		return (EINVAL);
2512 	}
2513 	uma_zone_set_max(V_pf_limits[index].zone, limit);
2514 	if (old_limit != NULL)
2515 		*old_limit = V_pf_limits[index].limit;
2516 	V_pf_limits[index].limit = limit;
2517 	PF_RULES_WUNLOCK();
2518 
2519 	return (0);
2520 }
2521 
2522 int
pf_ioctl_get_limit(int index,unsigned int * limit)2523 pf_ioctl_get_limit(int index, unsigned int *limit)
2524 {
2525 	PF_RULES_RLOCK_TRACKER;
2526 
2527 	if (index < 0 || index >= PF_LIMIT_MAX)
2528 		return (EINVAL);
2529 
2530 	PF_RULES_RLOCK();
2531 	*limit = V_pf_limits[index].limit;
2532 	PF_RULES_RUNLOCK();
2533 
2534 	return (0);
2535 }
2536 
2537 int
pf_ioctl_begin_addrs(uint32_t * ticket)2538 pf_ioctl_begin_addrs(uint32_t *ticket)
2539 {
2540 	PF_RULES_WLOCK();
2541 	pf_empty_kpool(&V_pf_pabuf);
2542 	*ticket = ++V_ticket_pabuf;
2543 	PF_RULES_WUNLOCK();
2544 
2545 	return (0);
2546 }
2547 
2548 int
pf_ioctl_add_addr(struct pfioc_pooladdr * pp)2549 pf_ioctl_add_addr(struct pfioc_pooladdr *pp)
2550 {
2551 	struct pf_kpooladdr	*pa = NULL;
2552 	struct pfi_kkif		*kif = NULL;
2553 	int error;
2554 
2555 #ifndef INET
2556 	if (pp->af == AF_INET)
2557 		return (EAFNOSUPPORT);
2558 #endif /* INET */
2559 #ifndef INET6
2560 	if (pp->af == AF_INET6)
2561 		return (EAFNOSUPPORT);
2562 #endif /* INET6 */
2563 
2564 	if (pp->addr.addr.type != PF_ADDR_ADDRMASK &&
2565 	    pp->addr.addr.type != PF_ADDR_DYNIFTL &&
2566 	    pp->addr.addr.type != PF_ADDR_TABLE)
2567 		return (EINVAL);
2568 
2569 	if (pp->addr.addr.p.dyn != NULL)
2570 		return (EINVAL);
2571 
2572 	pa = malloc(sizeof(*pa), M_PFRULE, M_WAITOK);
2573 	error = pf_pooladdr_to_kpooladdr(&pp->addr, pa);
2574 	if (error != 0)
2575 		goto out;
2576 	if (pa->ifname[0])
2577 		kif = pf_kkif_create(M_WAITOK);
2578 	PF_RULES_WLOCK();
2579 	if (pp->ticket != V_ticket_pabuf) {
2580 		PF_RULES_WUNLOCK();
2581 		if (pa->ifname[0])
2582 			pf_kkif_free(kif);
2583 		error = EBUSY;
2584 		goto out;
2585 	}
2586 	if (pa->ifname[0]) {
2587 		pa->kif = pfi_kkif_attach(kif, pa->ifname);
2588 		kif = NULL;
2589 		pfi_kkif_ref(pa->kif);
2590 	} else
2591 		pa->kif = NULL;
2592 	if (pa->addr.type == PF_ADDR_DYNIFTL && ((error =
2593 	    pfi_dynaddr_setup(&pa->addr, pp->af)) != 0)) {
2594 		if (pa->ifname[0])
2595 			pfi_kkif_unref(pa->kif);
2596 		PF_RULES_WUNLOCK();
2597 		goto out;
2598 	}
2599 	TAILQ_INSERT_TAIL(&V_pf_pabuf, pa, entries);
2600 	PF_RULES_WUNLOCK();
2601 
2602 	return (0);
2603 
2604 out:
2605 	free(pa, M_PFRULE);
2606 	return (error);
2607 }
2608 
2609 int
pf_ioctl_get_addrs(struct pfioc_pooladdr * pp)2610 pf_ioctl_get_addrs(struct pfioc_pooladdr *pp)
2611 {
2612 	struct pf_kpool		*pool;
2613 	struct pf_kpooladdr	*pa;
2614 
2615 	PF_RULES_RLOCK_TRACKER;
2616 
2617 	pp->anchor[sizeof(pp->anchor) - 1] = 0;
2618 	pp->nr = 0;
2619 
2620 	PF_RULES_RLOCK();
2621 	pool = pf_get_kpool(pp->anchor, pp->ticket, pp->r_action,
2622 	    pp->r_num, 0, 1, 0);
2623 	if (pool == NULL) {
2624 		PF_RULES_RUNLOCK();
2625 		return (EBUSY);
2626 	}
2627 	TAILQ_FOREACH(pa, &pool->list, entries)
2628 		pp->nr++;
2629 	PF_RULES_RUNLOCK();
2630 
2631 	return (0);
2632 }
2633 
2634 int
pf_ioctl_get_addr(struct pfioc_pooladdr * pp)2635 pf_ioctl_get_addr(struct pfioc_pooladdr *pp)
2636 {
2637 	struct pf_kpool		*pool;
2638 	struct pf_kpooladdr	*pa;
2639 	u_int32_t		 nr = 0;
2640 
2641 	PF_RULES_RLOCK_TRACKER;
2642 
2643 	pp->anchor[sizeof(pp->anchor) - 1] = 0;
2644 
2645 	PF_RULES_RLOCK();
2646 	pool = pf_get_kpool(pp->anchor, pp->ticket, pp->r_action,
2647 	    pp->r_num, 0, 1, 1);
2648 	if (pool == NULL) {
2649 		PF_RULES_RUNLOCK();
2650 		return (EBUSY);
2651 	}
2652 	pa = TAILQ_FIRST(&pool->list);
2653 	while ((pa != NULL) && (nr < pp->nr)) {
2654 		pa = TAILQ_NEXT(pa, entries);
2655 		nr++;
2656 	}
2657 	if (pa == NULL) {
2658 		PF_RULES_RUNLOCK();
2659 		return (EBUSY);
2660 	}
2661 	pf_kpooladdr_to_pooladdr(pa, &pp->addr);
2662 	pf_addr_copyout(&pp->addr.addr);
2663 	PF_RULES_RUNLOCK();
2664 
2665 	return (0);
2666 }
2667 
2668 int
pf_ioctl_get_rulesets(struct pfioc_ruleset * pr)2669 pf_ioctl_get_rulesets(struct pfioc_ruleset *pr)
2670 {
2671 	struct pf_kruleset	*ruleset;
2672 	struct pf_kanchor	*anchor;
2673 
2674 	PF_RULES_RLOCK_TRACKER;
2675 
2676 	pr->path[sizeof(pr->path) - 1] = 0;
2677 
2678 	PF_RULES_RLOCK();
2679 	if ((ruleset = pf_find_kruleset(pr->path)) == NULL) {
2680 		PF_RULES_RUNLOCK();
2681 		return (ENOENT);
2682 	}
2683 	pr->nr = 0;
2684 	if (ruleset->anchor == NULL) {
2685 		/* XXX kludge for pf_main_ruleset */
2686 		RB_FOREACH(anchor, pf_kanchor_global, &V_pf_anchors)
2687 			if (anchor->parent == NULL)
2688 				pr->nr++;
2689 	} else {
2690 		RB_FOREACH(anchor, pf_kanchor_node,
2691 		    &ruleset->anchor->children)
2692 			pr->nr++;
2693 	}
2694 	PF_RULES_RUNLOCK();
2695 
2696 	return (0);
2697 }
2698 
2699 int
pf_ioctl_get_ruleset(struct pfioc_ruleset * pr)2700 pf_ioctl_get_ruleset(struct pfioc_ruleset *pr)
2701 {
2702 	struct pf_kruleset	*ruleset;
2703 	struct pf_kanchor	*anchor;
2704 	u_int32_t		 nr = 0;
2705 	int			 error = 0;
2706 
2707 	PF_RULES_RLOCK_TRACKER;
2708 
2709 	PF_RULES_RLOCK();
2710 	if ((ruleset = pf_find_kruleset(pr->path)) == NULL) {
2711 		PF_RULES_RUNLOCK();
2712 		return (ENOENT);
2713 	}
2714 
2715 	pr->name[0] = 0;
2716 	if (ruleset->anchor == NULL) {
2717 		/* XXX kludge for pf_main_ruleset */
2718 		RB_FOREACH(anchor, pf_kanchor_global, &V_pf_anchors)
2719 			if (anchor->parent == NULL && nr++ == pr->nr) {
2720 				strlcpy(pr->name, anchor->name,
2721 				    sizeof(pr->name));
2722 				break;
2723 			}
2724 	} else {
2725 		RB_FOREACH(anchor, pf_kanchor_node,
2726 		    &ruleset->anchor->children)
2727 			if (nr++ == pr->nr) {
2728 				strlcpy(pr->name, anchor->name,
2729 				    sizeof(pr->name));
2730 				break;
2731 			}
2732 	}
2733 	if (!pr->name[0])
2734 		error = EBUSY;
2735 	PF_RULES_RUNLOCK();
2736 
2737 	return (error);
2738 }
2739 
2740 static int
pfioctl(struct cdev * dev,u_long cmd,caddr_t addr,int flags,struct thread * td)2741 pfioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td)
2742 {
2743 	int			 error = 0;
2744 	PF_RULES_RLOCK_TRACKER;
2745 
2746 #define	ERROUT_IOCTL(target, x)					\
2747     do {								\
2748 	    error = (x);						\
2749 	    SDT_PROBE3(pf, ioctl, ioctl, error, cmd, error, __LINE__);	\
2750 	    goto target;						\
2751     } while (0)
2752 
2753 
2754 	/* XXX keep in sync with switch() below */
2755 	if (securelevel_gt(td->td_ucred, 2))
2756 		switch (cmd) {
2757 		case DIOCGETRULES:
2758 		case DIOCGETRULENV:
2759 		case DIOCGETADDRS:
2760 		case DIOCGETADDR:
2761 		case DIOCGETSTATE:
2762 		case DIOCGETSTATENV:
2763 		case DIOCSETSTATUSIF:
2764 		case DIOCGETSTATUSNV:
2765 		case DIOCCLRSTATUS:
2766 		case DIOCNATLOOK:
2767 		case DIOCSETDEBUG:
2768 #ifdef COMPAT_FREEBSD14
2769 		case DIOCGETSTATES:
2770 		case DIOCGETSTATESV2:
2771 #endif
2772 		case DIOCGETTIMEOUT:
2773 		case DIOCCLRRULECTRS:
2774 		case DIOCGETLIMIT:
2775 		case DIOCGETALTQSV0:
2776 		case DIOCGETALTQSV1:
2777 		case DIOCGETALTQV0:
2778 		case DIOCGETALTQV1:
2779 		case DIOCGETQSTATSV0:
2780 		case DIOCGETQSTATSV1:
2781 		case DIOCGETRULESETS:
2782 		case DIOCGETRULESET:
2783 		case DIOCRGETTABLES:
2784 		case DIOCRGETTSTATS:
2785 		case DIOCRCLRTSTATS:
2786 		case DIOCRCLRADDRS:
2787 		case DIOCRADDADDRS:
2788 		case DIOCRDELADDRS:
2789 		case DIOCRSETADDRS:
2790 		case DIOCRGETADDRS:
2791 		case DIOCRGETASTATS:
2792 		case DIOCRCLRASTATS:
2793 		case DIOCRTSTADDRS:
2794 		case DIOCOSFPGET:
2795 		case DIOCGETSRCNODES:
2796 		case DIOCCLRSRCNODES:
2797 		case DIOCGETSYNCOOKIES:
2798 		case DIOCIGETIFACES:
2799 		case DIOCGIFSPEEDV0:
2800 		case DIOCGIFSPEEDV1:
2801 		case DIOCSETIFFLAG:
2802 		case DIOCCLRIFFLAG:
2803 		case DIOCGETETHRULES:
2804 		case DIOCGETETHRULE:
2805 		case DIOCGETETHRULESETS:
2806 		case DIOCGETETHRULESET:
2807 			break;
2808 		case DIOCRCLRTABLES:
2809 		case DIOCRADDTABLES:
2810 		case DIOCRDELTABLES:
2811 		case DIOCRSETTFLAGS:
2812 			if (((struct pfioc_table *)addr)->pfrio_flags &
2813 			    PFR_FLAG_DUMMY)
2814 				break; /* dummy operation ok */
2815 			return (EPERM);
2816 		default:
2817 			return (EPERM);
2818 		}
2819 
2820 	if (!(flags & FWRITE))
2821 		switch (cmd) {
2822 		case DIOCGETRULES:
2823 		case DIOCGETADDRS:
2824 		case DIOCGETADDR:
2825 		case DIOCGETSTATE:
2826 		case DIOCGETSTATENV:
2827 		case DIOCGETSTATUSNV:
2828 #ifdef COMPAT_FREEBSD14
2829 		case DIOCGETSTATES:
2830 		case DIOCGETSTATESV2:
2831 #endif
2832 		case DIOCGETTIMEOUT:
2833 		case DIOCGETLIMIT:
2834 		case DIOCGETALTQSV0:
2835 		case DIOCGETALTQSV1:
2836 		case DIOCGETALTQV0:
2837 		case DIOCGETALTQV1:
2838 		case DIOCGETQSTATSV0:
2839 		case DIOCGETQSTATSV1:
2840 		case DIOCGETRULESETS:
2841 		case DIOCGETRULESET:
2842 		case DIOCNATLOOK:
2843 		case DIOCRGETTABLES:
2844 		case DIOCRGETTSTATS:
2845 		case DIOCRGETADDRS:
2846 		case DIOCRGETASTATS:
2847 		case DIOCRTSTADDRS:
2848 		case DIOCOSFPGET:
2849 		case DIOCGETSRCNODES:
2850 		case DIOCGETSYNCOOKIES:
2851 		case DIOCIGETIFACES:
2852 		case DIOCGIFSPEEDV1:
2853 		case DIOCGIFSPEEDV0:
2854 		case DIOCGETRULENV:
2855 		case DIOCGETETHRULES:
2856 		case DIOCGETETHRULE:
2857 		case DIOCGETETHRULESETS:
2858 		case DIOCGETETHRULESET:
2859 			break;
2860 		case DIOCRCLRTABLES:
2861 		case DIOCRADDTABLES:
2862 		case DIOCRDELTABLES:
2863 		case DIOCRCLRTSTATS:
2864 		case DIOCRCLRADDRS:
2865 		case DIOCRADDADDRS:
2866 		case DIOCRDELADDRS:
2867 		case DIOCRSETADDRS:
2868 		case DIOCRSETTFLAGS:
2869 			if (((struct pfioc_table *)addr)->pfrio_flags &
2870 			    PFR_FLAG_DUMMY) {
2871 				flags |= FWRITE; /* need write lock for dummy */
2872 				break; /* dummy operation ok */
2873 			}
2874 			return (EACCES);
2875 		default:
2876 			return (EACCES);
2877 		}
2878 
2879 	CURVNET_SET(TD_TO_VNET(td));
2880 
2881 	switch (cmd) {
2882 #ifdef COMPAT_FREEBSD14
2883 	case DIOCSTART:
2884 		error = pf_start();
2885 		break;
2886 
2887 	case DIOCSTOP:
2888 		error = pf_stop();
2889 		break;
2890 #endif
2891 
2892 	case DIOCGETETHRULES: {
2893 		struct pfioc_nv		*nv = (struct pfioc_nv *)addr;
2894 		nvlist_t		*nvl;
2895 		void			*packed;
2896 		struct pf_keth_rule	*tail;
2897 		struct pf_keth_ruleset	*rs;
2898 		u_int32_t		 ticket, nr;
2899 		const char		*anchor = "";
2900 
2901 		nvl = NULL;
2902 		packed = NULL;
2903 
2904 #define	ERROUT(x)	ERROUT_IOCTL(DIOCGETETHRULES_error, x)
2905 
2906 		if (nv->len > pf_ioctl_maxcount)
2907 			ERROUT(ENOMEM);
2908 
2909 		/* Copy the request in */
2910 		packed = malloc(nv->len, M_NVLIST, M_WAITOK);
2911 		error = copyin(nv->data, packed, nv->len);
2912 		if (error)
2913 			ERROUT(error);
2914 
2915 		nvl = nvlist_unpack(packed, nv->len, 0);
2916 		if (nvl == NULL)
2917 			ERROUT(EBADMSG);
2918 
2919 		if (! nvlist_exists_string(nvl, "anchor"))
2920 			ERROUT(EBADMSG);
2921 
2922 		anchor = nvlist_get_string(nvl, "anchor");
2923 
2924 		rs = pf_find_keth_ruleset(anchor);
2925 
2926 		nvlist_destroy(nvl);
2927 		nvl = NULL;
2928 		free(packed, M_NVLIST);
2929 		packed = NULL;
2930 
2931 		if (rs == NULL)
2932 			ERROUT(ENOENT);
2933 
2934 		/* Reply */
2935 		nvl = nvlist_create(0);
2936 		if (nvl == NULL)
2937 			ERROUT(ENOMEM);
2938 
2939 		PF_RULES_RLOCK();
2940 
2941 		ticket = rs->active.ticket;
2942 		tail = TAILQ_LAST(rs->active.rules, pf_keth_ruleq);
2943 		if (tail)
2944 			nr = tail->nr + 1;
2945 		else
2946 			nr = 0;
2947 
2948 		PF_RULES_RUNLOCK();
2949 
2950 		nvlist_add_number(nvl, "ticket", ticket);
2951 		nvlist_add_number(nvl, "nr", nr);
2952 
2953 		packed = nvlist_pack(nvl, &nv->len);
2954 		if (packed == NULL)
2955 			ERROUT(ENOMEM);
2956 
2957 		if (nv->size == 0)
2958 			ERROUT(0);
2959 		else if (nv->size < nv->len)
2960 			ERROUT(ENOSPC);
2961 
2962 		error = copyout(packed, nv->data, nv->len);
2963 
2964 #undef ERROUT
2965 DIOCGETETHRULES_error:
2966 		free(packed, M_NVLIST);
2967 		nvlist_destroy(nvl);
2968 		break;
2969 	}
2970 
2971 	case DIOCGETETHRULE: {
2972 		struct epoch_tracker	 et;
2973 		struct pfioc_nv		*nv = (struct pfioc_nv *)addr;
2974 		nvlist_t		*nvl = NULL;
2975 		void			*nvlpacked = NULL;
2976 		struct pf_keth_rule	*rule = NULL;
2977 		struct pf_keth_ruleset	*rs;
2978 		u_int32_t		 ticket, nr;
2979 		bool			 clear = false;
2980 		const char		*anchor;
2981 
2982 #define ERROUT(x)	ERROUT_IOCTL(DIOCGETETHRULE_error, x)
2983 
2984 		if (nv->len > pf_ioctl_maxcount)
2985 			ERROUT(ENOMEM);
2986 
2987 		nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
2988 		error = copyin(nv->data, nvlpacked, nv->len);
2989 		if (error)
2990 			ERROUT(error);
2991 
2992 		nvl = nvlist_unpack(nvlpacked, nv->len, 0);
2993 		if (nvl == NULL)
2994 			ERROUT(EBADMSG);
2995 		if (! nvlist_exists_number(nvl, "ticket"))
2996 			ERROUT(EBADMSG);
2997 		ticket = nvlist_get_number(nvl, "ticket");
2998 		if (! nvlist_exists_string(nvl, "anchor"))
2999 			ERROUT(EBADMSG);
3000 		anchor = nvlist_get_string(nvl, "anchor");
3001 
3002 		if (nvlist_exists_bool(nvl, "clear"))
3003 			clear = nvlist_get_bool(nvl, "clear");
3004 
3005 		if (clear && !(flags & FWRITE))
3006 			ERROUT(EACCES);
3007 
3008 		if (! nvlist_exists_number(nvl, "nr"))
3009 			ERROUT(EBADMSG);
3010 		nr = nvlist_get_number(nvl, "nr");
3011 
3012 		PF_RULES_RLOCK();
3013 		rs = pf_find_keth_ruleset(anchor);
3014 		if (rs == NULL) {
3015 			PF_RULES_RUNLOCK();
3016 			ERROUT(ENOENT);
3017 		}
3018 		if (ticket != rs->active.ticket) {
3019 			PF_RULES_RUNLOCK();
3020 			ERROUT(EBUSY);
3021 		}
3022 
3023 		nvlist_destroy(nvl);
3024 		nvl = NULL;
3025 		free(nvlpacked, M_NVLIST);
3026 		nvlpacked = NULL;
3027 
3028 		rule = TAILQ_FIRST(rs->active.rules);
3029 		while ((rule != NULL) && (rule->nr != nr))
3030 			rule = TAILQ_NEXT(rule, entries);
3031 		if (rule == NULL) {
3032 			PF_RULES_RUNLOCK();
3033 			ERROUT(ENOENT);
3034 		}
3035 		/* Make sure rule can't go away. */
3036 		NET_EPOCH_ENTER(et);
3037 		PF_RULES_RUNLOCK();
3038 		nvl = pf_keth_rule_to_nveth_rule(rule);
3039 		if (pf_keth_anchor_nvcopyout(rs, rule, nvl)) {
3040 			NET_EPOCH_EXIT(et);
3041 			ERROUT(EBUSY);
3042 		}
3043 		NET_EPOCH_EXIT(et);
3044 		if (nvl == NULL)
3045 			ERROUT(ENOMEM);
3046 
3047 		nvlpacked = nvlist_pack(nvl, &nv->len);
3048 		if (nvlpacked == NULL)
3049 			ERROUT(ENOMEM);
3050 
3051 		if (nv->size == 0)
3052 			ERROUT(0);
3053 		else if (nv->size < nv->len)
3054 			ERROUT(ENOSPC);
3055 
3056 		error = copyout(nvlpacked, nv->data, nv->len);
3057 		if (error == 0 && clear) {
3058 			counter_u64_zero(rule->evaluations);
3059 			for (int i = 0; i < 2; i++) {
3060 				counter_u64_zero(rule->packets[i]);
3061 				counter_u64_zero(rule->bytes[i]);
3062 			}
3063 		}
3064 
3065 #undef ERROUT
3066 DIOCGETETHRULE_error:
3067 		free(nvlpacked, M_NVLIST);
3068 		nvlist_destroy(nvl);
3069 		break;
3070 	}
3071 
3072 	case DIOCADDETHRULE: {
3073 		struct pfioc_nv		*nv = (struct pfioc_nv *)addr;
3074 		nvlist_t		*nvl = NULL;
3075 		void			*nvlpacked = NULL;
3076 		struct pf_keth_rule	*rule = NULL, *tail = NULL;
3077 		struct pf_keth_ruleset	*ruleset = NULL;
3078 		struct pfi_kkif		*kif = NULL, *bridge_to_kif = NULL;
3079 		const char		*anchor = "", *anchor_call = "";
3080 
3081 #define ERROUT(x)	ERROUT_IOCTL(DIOCADDETHRULE_error, x)
3082 
3083 		if (nv->len > pf_ioctl_maxcount)
3084 			ERROUT(ENOMEM);
3085 
3086 		nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
3087 		error = copyin(nv->data, nvlpacked, nv->len);
3088 		if (error)
3089 			ERROUT(error);
3090 
3091 		nvl = nvlist_unpack(nvlpacked, nv->len, 0);
3092 		if (nvl == NULL)
3093 			ERROUT(EBADMSG);
3094 
3095 		if (! nvlist_exists_number(nvl, "ticket"))
3096 			ERROUT(EBADMSG);
3097 
3098 		if (nvlist_exists_string(nvl, "anchor"))
3099 			anchor = nvlist_get_string(nvl, "anchor");
3100 		if (nvlist_exists_string(nvl, "anchor_call"))
3101 			anchor_call = nvlist_get_string(nvl, "anchor_call");
3102 
3103 		ruleset = pf_find_keth_ruleset(anchor);
3104 		if (ruleset == NULL)
3105 			ERROUT(EINVAL);
3106 
3107 		if (nvlist_get_number(nvl, "ticket") !=
3108 		    ruleset->inactive.ticket) {
3109 			DPFPRINTF(PF_DEBUG_MISC,
3110 			    ("ticket: %d != %d\n",
3111 			    (u_int32_t)nvlist_get_number(nvl, "ticket"),
3112 			    ruleset->inactive.ticket));
3113 			ERROUT(EBUSY);
3114 		}
3115 
3116 		rule = malloc(sizeof(*rule), M_PFRULE, M_WAITOK);
3117 		rule->timestamp = NULL;
3118 
3119 		error = pf_nveth_rule_to_keth_rule(nvl, rule);
3120 		if (error != 0)
3121 			ERROUT(error);
3122 
3123 		if (rule->ifname[0])
3124 			kif = pf_kkif_create(M_WAITOK);
3125 		if (rule->bridge_to_name[0])
3126 			bridge_to_kif = pf_kkif_create(M_WAITOK);
3127 		rule->evaluations = counter_u64_alloc(M_WAITOK);
3128 		for (int i = 0; i < 2; i++) {
3129 			rule->packets[i] = counter_u64_alloc(M_WAITOK);
3130 			rule->bytes[i] = counter_u64_alloc(M_WAITOK);
3131 		}
3132 		rule->timestamp = uma_zalloc_pcpu(pf_timestamp_pcpu_zone,
3133 		    M_WAITOK | M_ZERO);
3134 
3135 		PF_RULES_WLOCK();
3136 
3137 		if (rule->ifname[0]) {
3138 			rule->kif = pfi_kkif_attach(kif, rule->ifname);
3139 			pfi_kkif_ref(rule->kif);
3140 		} else
3141 			rule->kif = NULL;
3142 		if (rule->bridge_to_name[0]) {
3143 			rule->bridge_to = pfi_kkif_attach(bridge_to_kif,
3144 			    rule->bridge_to_name);
3145 			pfi_kkif_ref(rule->bridge_to);
3146 		} else
3147 			rule->bridge_to = NULL;
3148 
3149 #ifdef ALTQ
3150 		/* set queue IDs */
3151 		if (rule->qname[0] != 0) {
3152 			if ((rule->qid = pf_qname2qid(rule->qname)) == 0)
3153 				error = EBUSY;
3154 			else
3155 				rule->qid = rule->qid;
3156 		}
3157 #endif
3158 		if (rule->tagname[0])
3159 			if ((rule->tag = pf_tagname2tag(rule->tagname)) == 0)
3160 				error = EBUSY;
3161 		if (rule->match_tagname[0])
3162 			if ((rule->match_tag = pf_tagname2tag(
3163 			    rule->match_tagname)) == 0)
3164 				error = EBUSY;
3165 
3166 		if (error == 0 && rule->ipdst.addr.type == PF_ADDR_TABLE)
3167 			error = pf_eth_addr_setup(ruleset, &rule->ipdst.addr);
3168 		if (error == 0 && rule->ipsrc.addr.type == PF_ADDR_TABLE)
3169 			error = pf_eth_addr_setup(ruleset, &rule->ipsrc.addr);
3170 
3171 		if (error) {
3172 			pf_free_eth_rule(rule);
3173 			PF_RULES_WUNLOCK();
3174 			ERROUT(error);
3175 		}
3176 
3177 		if (pf_keth_anchor_setup(rule, ruleset, anchor_call)) {
3178 			pf_free_eth_rule(rule);
3179 			PF_RULES_WUNLOCK();
3180 			ERROUT(EINVAL);
3181 		}
3182 
3183 		tail = TAILQ_LAST(ruleset->inactive.rules, pf_keth_ruleq);
3184 		if (tail)
3185 			rule->nr = tail->nr + 1;
3186 		else
3187 			rule->nr = 0;
3188 
3189 		TAILQ_INSERT_TAIL(ruleset->inactive.rules, rule, entries);
3190 
3191 		PF_RULES_WUNLOCK();
3192 
3193 #undef ERROUT
3194 DIOCADDETHRULE_error:
3195 		nvlist_destroy(nvl);
3196 		free(nvlpacked, M_NVLIST);
3197 		break;
3198 	}
3199 
3200 	case DIOCGETETHRULESETS: {
3201 		struct epoch_tracker	 et;
3202 		struct pfioc_nv		*nv = (struct pfioc_nv *)addr;
3203 		nvlist_t		*nvl = NULL;
3204 		void			*nvlpacked = NULL;
3205 		struct pf_keth_ruleset	*ruleset;
3206 		struct pf_keth_anchor	*anchor;
3207 		int			 nr = 0;
3208 
3209 #define ERROUT(x)	ERROUT_IOCTL(DIOCGETETHRULESETS_error, x)
3210 
3211 		if (nv->len > pf_ioctl_maxcount)
3212 			ERROUT(ENOMEM);
3213 
3214 		nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
3215 		error = copyin(nv->data, nvlpacked, nv->len);
3216 		if (error)
3217 			ERROUT(error);
3218 
3219 		nvl = nvlist_unpack(nvlpacked, nv->len, 0);
3220 		if (nvl == NULL)
3221 			ERROUT(EBADMSG);
3222 		if (! nvlist_exists_string(nvl, "path"))
3223 			ERROUT(EBADMSG);
3224 
3225 		NET_EPOCH_ENTER(et);
3226 
3227 		if ((ruleset = pf_find_keth_ruleset(
3228 		    nvlist_get_string(nvl, "path"))) == NULL) {
3229 			NET_EPOCH_EXIT(et);
3230 			ERROUT(ENOENT);
3231 		}
3232 
3233 		if (ruleset->anchor == NULL) {
3234 			RB_FOREACH(anchor, pf_keth_anchor_global, &V_pf_keth_anchors)
3235 				if (anchor->parent == NULL)
3236 					nr++;
3237 		} else {
3238 			RB_FOREACH(anchor, pf_keth_anchor_node,
3239 			    &ruleset->anchor->children)
3240 				nr++;
3241 		}
3242 
3243 		NET_EPOCH_EXIT(et);
3244 
3245 		nvlist_destroy(nvl);
3246 		nvl = NULL;
3247 		free(nvlpacked, M_NVLIST);
3248 		nvlpacked = NULL;
3249 
3250 		nvl = nvlist_create(0);
3251 		if (nvl == NULL)
3252 			ERROUT(ENOMEM);
3253 
3254 		nvlist_add_number(nvl, "nr", nr);
3255 
3256 		nvlpacked = nvlist_pack(nvl, &nv->len);
3257 		if (nvlpacked == NULL)
3258 			ERROUT(ENOMEM);
3259 
3260 		if (nv->size == 0)
3261 			ERROUT(0);
3262 		else if (nv->size < nv->len)
3263 			ERROUT(ENOSPC);
3264 
3265 		error = copyout(nvlpacked, nv->data, nv->len);
3266 
3267 #undef ERROUT
3268 DIOCGETETHRULESETS_error:
3269 		free(nvlpacked, M_NVLIST);
3270 		nvlist_destroy(nvl);
3271 		break;
3272 	}
3273 
3274 	case DIOCGETETHRULESET: {
3275 		struct epoch_tracker	 et;
3276 		struct pfioc_nv		*nv = (struct pfioc_nv *)addr;
3277 		nvlist_t		*nvl = NULL;
3278 		void			*nvlpacked = NULL;
3279 		struct pf_keth_ruleset	*ruleset;
3280 		struct pf_keth_anchor	*anchor;
3281 		int			 nr = 0, req_nr = 0;
3282 		bool			 found = false;
3283 
3284 #define ERROUT(x)	ERROUT_IOCTL(DIOCGETETHRULESET_error, x)
3285 
3286 		if (nv->len > pf_ioctl_maxcount)
3287 			ERROUT(ENOMEM);
3288 
3289 		nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
3290 		error = copyin(nv->data, nvlpacked, nv->len);
3291 		if (error)
3292 			ERROUT(error);
3293 
3294 		nvl = nvlist_unpack(nvlpacked, nv->len, 0);
3295 		if (nvl == NULL)
3296 			ERROUT(EBADMSG);
3297 		if (! nvlist_exists_string(nvl, "path"))
3298 			ERROUT(EBADMSG);
3299 		if (! nvlist_exists_number(nvl, "nr"))
3300 			ERROUT(EBADMSG);
3301 
3302 		req_nr = nvlist_get_number(nvl, "nr");
3303 
3304 		NET_EPOCH_ENTER(et);
3305 
3306 		if ((ruleset = pf_find_keth_ruleset(
3307 		    nvlist_get_string(nvl, "path"))) == NULL) {
3308 			NET_EPOCH_EXIT(et);
3309 			ERROUT(ENOENT);
3310 		}
3311 
3312 		nvlist_destroy(nvl);
3313 		nvl = NULL;
3314 		free(nvlpacked, M_NVLIST);
3315 		nvlpacked = NULL;
3316 
3317 		nvl = nvlist_create(0);
3318 		if (nvl == NULL) {
3319 			NET_EPOCH_EXIT(et);
3320 			ERROUT(ENOMEM);
3321 		}
3322 
3323 		if (ruleset->anchor == NULL) {
3324 			RB_FOREACH(anchor, pf_keth_anchor_global,
3325 			    &V_pf_keth_anchors) {
3326 				if (anchor->parent == NULL && nr++ == req_nr) {
3327 					found = true;
3328 					break;
3329 				}
3330 			}
3331 		} else {
3332 			RB_FOREACH(anchor, pf_keth_anchor_node,
3333 			     &ruleset->anchor->children) {
3334 				if (nr++ == req_nr) {
3335 					found = true;
3336 					break;
3337 				}
3338 			}
3339 		}
3340 
3341 		NET_EPOCH_EXIT(et);
3342 		if (found) {
3343 			nvlist_add_number(nvl, "nr", nr);
3344 			nvlist_add_string(nvl, "name", anchor->name);
3345 			if (ruleset->anchor)
3346 				nvlist_add_string(nvl, "path",
3347 				    ruleset->anchor->path);
3348 			else
3349 				nvlist_add_string(nvl, "path", "");
3350 		} else {
3351 			ERROUT(EBUSY);
3352 		}
3353 
3354 		nvlpacked = nvlist_pack(nvl, &nv->len);
3355 		if (nvlpacked == NULL)
3356 			ERROUT(ENOMEM);
3357 
3358 		if (nv->size == 0)
3359 			ERROUT(0);
3360 		else if (nv->size < nv->len)
3361 			ERROUT(ENOSPC);
3362 
3363 		error = copyout(nvlpacked, nv->data, nv->len);
3364 
3365 #undef ERROUT
3366 DIOCGETETHRULESET_error:
3367 		free(nvlpacked, M_NVLIST);
3368 		nvlist_destroy(nvl);
3369 		break;
3370 	}
3371 
3372 	case DIOCADDRULENV: {
3373 		struct pfioc_nv	*nv = (struct pfioc_nv *)addr;
3374 		nvlist_t	*nvl = NULL;
3375 		void		*nvlpacked = NULL;
3376 		struct pf_krule	*rule = NULL;
3377 		const char	*anchor = "", *anchor_call = "";
3378 		uint32_t	 ticket = 0, pool_ticket = 0;
3379 
3380 #define	ERROUT(x)	ERROUT_IOCTL(DIOCADDRULENV_error, x)
3381 
3382 		if (nv->len > pf_ioctl_maxcount)
3383 			ERROUT(ENOMEM);
3384 
3385 		nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
3386 		error = copyin(nv->data, nvlpacked, nv->len);
3387 		if (error)
3388 			ERROUT(error);
3389 
3390 		nvl = nvlist_unpack(nvlpacked, nv->len, 0);
3391 		if (nvl == NULL)
3392 			ERROUT(EBADMSG);
3393 
3394 		if (! nvlist_exists_number(nvl, "ticket"))
3395 			ERROUT(EINVAL);
3396 		ticket = nvlist_get_number(nvl, "ticket");
3397 
3398 		if (! nvlist_exists_number(nvl, "pool_ticket"))
3399 			ERROUT(EINVAL);
3400 		pool_ticket = nvlist_get_number(nvl, "pool_ticket");
3401 
3402 		if (! nvlist_exists_nvlist(nvl, "rule"))
3403 			ERROUT(EINVAL);
3404 
3405 		rule = pf_krule_alloc();
3406 		error = pf_nvrule_to_krule(nvlist_get_nvlist(nvl, "rule"),
3407 		    rule);
3408 		if (error)
3409 			ERROUT(error);
3410 
3411 		if (nvlist_exists_string(nvl, "anchor"))
3412 			anchor = nvlist_get_string(nvl, "anchor");
3413 		if (nvlist_exists_string(nvl, "anchor_call"))
3414 			anchor_call = nvlist_get_string(nvl, "anchor_call");
3415 
3416 		if ((error = nvlist_error(nvl)))
3417 			ERROUT(error);
3418 
3419 		/* Frees rule on error */
3420 		error = pf_ioctl_addrule(rule, ticket, pool_ticket, anchor,
3421 		    anchor_call, td->td_ucred->cr_ruid,
3422 		    td->td_proc ? td->td_proc->p_pid : 0);
3423 
3424 		nvlist_destroy(nvl);
3425 		free(nvlpacked, M_NVLIST);
3426 		break;
3427 #undef ERROUT
3428 DIOCADDRULENV_error:
3429 		pf_krule_free(rule);
3430 		nvlist_destroy(nvl);
3431 		free(nvlpacked, M_NVLIST);
3432 
3433 		break;
3434 	}
3435 	case DIOCADDRULE: {
3436 		struct pfioc_rule	*pr = (struct pfioc_rule *)addr;
3437 		struct pf_krule		*rule;
3438 
3439 		rule = pf_krule_alloc();
3440 		error = pf_rule_to_krule(&pr->rule, rule);
3441 		if (error != 0) {
3442 			pf_krule_free(rule);
3443 			break;
3444 		}
3445 
3446 		pr->anchor[sizeof(pr->anchor) - 1] = 0;
3447 
3448 		/* Frees rule on error */
3449 		error = pf_ioctl_addrule(rule, pr->ticket, pr->pool_ticket,
3450 		    pr->anchor, pr->anchor_call, td->td_ucred->cr_ruid,
3451 		    td->td_proc ? td->td_proc->p_pid : 0);
3452 		break;
3453 	}
3454 
3455 	case DIOCGETRULES: {
3456 		struct pfioc_rule	*pr = (struct pfioc_rule *)addr;
3457 
3458 		pr->anchor[sizeof(pr->anchor) - 1] = 0;
3459 
3460 		error = pf_ioctl_getrules(pr);
3461 
3462 		break;
3463 	}
3464 
3465 	case DIOCGETRULENV: {
3466 		struct pfioc_nv		*nv = (struct pfioc_nv *)addr;
3467 		nvlist_t		*nvrule = NULL;
3468 		nvlist_t		*nvl = NULL;
3469 		struct pf_kruleset	*ruleset;
3470 		struct pf_krule		*rule;
3471 		void			*nvlpacked = NULL;
3472 		int			 rs_num, nr;
3473 		bool			 clear_counter = false;
3474 
3475 #define	ERROUT(x)	ERROUT_IOCTL(DIOCGETRULENV_error, x)
3476 
3477 		if (nv->len > pf_ioctl_maxcount)
3478 			ERROUT(ENOMEM);
3479 
3480 		/* Copy the request in */
3481 		nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
3482 		error = copyin(nv->data, nvlpacked, nv->len);
3483 		if (error)
3484 			ERROUT(error);
3485 
3486 		nvl = nvlist_unpack(nvlpacked, nv->len, 0);
3487 		if (nvl == NULL)
3488 			ERROUT(EBADMSG);
3489 
3490 		if (! nvlist_exists_string(nvl, "anchor"))
3491 			ERROUT(EBADMSG);
3492 		if (! nvlist_exists_number(nvl, "ruleset"))
3493 			ERROUT(EBADMSG);
3494 		if (! nvlist_exists_number(nvl, "ticket"))
3495 			ERROUT(EBADMSG);
3496 		if (! nvlist_exists_number(nvl, "nr"))
3497 			ERROUT(EBADMSG);
3498 
3499 		if (nvlist_exists_bool(nvl, "clear_counter"))
3500 			clear_counter = nvlist_get_bool(nvl, "clear_counter");
3501 
3502 		if (clear_counter && !(flags & FWRITE))
3503 			ERROUT(EACCES);
3504 
3505 		nr = nvlist_get_number(nvl, "nr");
3506 
3507 		PF_RULES_WLOCK();
3508 		ruleset = pf_find_kruleset(nvlist_get_string(nvl, "anchor"));
3509 		if (ruleset == NULL) {
3510 			PF_RULES_WUNLOCK();
3511 			ERROUT(ENOENT);
3512 		}
3513 
3514 		rs_num = pf_get_ruleset_number(nvlist_get_number(nvl, "ruleset"));
3515 		if (rs_num >= PF_RULESET_MAX) {
3516 			PF_RULES_WUNLOCK();
3517 			ERROUT(EINVAL);
3518 		}
3519 
3520 		if (nvlist_get_number(nvl, "ticket") !=
3521 		    ruleset->rules[rs_num].active.ticket) {
3522 			PF_RULES_WUNLOCK();
3523 			ERROUT(EBUSY);
3524 		}
3525 
3526 		if ((error = nvlist_error(nvl))) {
3527 			PF_RULES_WUNLOCK();
3528 			ERROUT(error);
3529 		}
3530 
3531 		rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
3532 		while ((rule != NULL) && (rule->nr != nr))
3533 			rule = TAILQ_NEXT(rule, entries);
3534 		if (rule == NULL) {
3535 			PF_RULES_WUNLOCK();
3536 			ERROUT(EBUSY);
3537 		}
3538 
3539 		nvrule = pf_krule_to_nvrule(rule);
3540 
3541 		nvlist_destroy(nvl);
3542 		nvl = nvlist_create(0);
3543 		if (nvl == NULL) {
3544 			PF_RULES_WUNLOCK();
3545 			ERROUT(ENOMEM);
3546 		}
3547 		nvlist_add_number(nvl, "nr", nr);
3548 		nvlist_add_nvlist(nvl, "rule", nvrule);
3549 		nvlist_destroy(nvrule);
3550 		nvrule = NULL;
3551 		if (pf_kanchor_nvcopyout(ruleset, rule, nvl)) {
3552 			PF_RULES_WUNLOCK();
3553 			ERROUT(EBUSY);
3554 		}
3555 
3556 		free(nvlpacked, M_NVLIST);
3557 		nvlpacked = nvlist_pack(nvl, &nv->len);
3558 		if (nvlpacked == NULL) {
3559 			PF_RULES_WUNLOCK();
3560 			ERROUT(ENOMEM);
3561 		}
3562 
3563 		if (nv->size == 0) {
3564 			PF_RULES_WUNLOCK();
3565 			ERROUT(0);
3566 		}
3567 		else if (nv->size < nv->len) {
3568 			PF_RULES_WUNLOCK();
3569 			ERROUT(ENOSPC);
3570 		}
3571 
3572 		if (clear_counter)
3573 			pf_krule_clear_counters(rule);
3574 
3575 		PF_RULES_WUNLOCK();
3576 
3577 		error = copyout(nvlpacked, nv->data, nv->len);
3578 
3579 #undef ERROUT
3580 DIOCGETRULENV_error:
3581 		free(nvlpacked, M_NVLIST);
3582 		nvlist_destroy(nvrule);
3583 		nvlist_destroy(nvl);
3584 
3585 		break;
3586 	}
3587 
3588 	case DIOCCHANGERULE: {
3589 		struct pfioc_rule	*pcr = (struct pfioc_rule *)addr;
3590 		struct pf_kruleset	*ruleset;
3591 		struct pf_krule		*oldrule = NULL, *newrule = NULL;
3592 		struct pfi_kkif		*kif = NULL;
3593 		struct pf_kpooladdr	*pa;
3594 		u_int32_t		 nr = 0;
3595 		int			 rs_num;
3596 
3597 		pcr->anchor[sizeof(pcr->anchor) - 1] = 0;
3598 
3599 		if (pcr->action < PF_CHANGE_ADD_HEAD ||
3600 		    pcr->action > PF_CHANGE_GET_TICKET) {
3601 			error = EINVAL;
3602 			break;
3603 		}
3604 		if (pcr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
3605 			error = EINVAL;
3606 			break;
3607 		}
3608 
3609 		if (pcr->action != PF_CHANGE_REMOVE) {
3610 			newrule = pf_krule_alloc();
3611 			error = pf_rule_to_krule(&pcr->rule, newrule);
3612 			if (error != 0) {
3613 				pf_krule_free(newrule);
3614 				break;
3615 			}
3616 
3617 			if (newrule->ifname[0])
3618 				kif = pf_kkif_create(M_WAITOK);
3619 			pf_counter_u64_init(&newrule->evaluations, M_WAITOK);
3620 			for (int i = 0; i < 2; i++) {
3621 				pf_counter_u64_init(&newrule->packets[i], M_WAITOK);
3622 				pf_counter_u64_init(&newrule->bytes[i], M_WAITOK);
3623 			}
3624 			newrule->states_cur = counter_u64_alloc(M_WAITOK);
3625 			newrule->states_tot = counter_u64_alloc(M_WAITOK);
3626 			newrule->src_nodes = counter_u64_alloc(M_WAITOK);
3627 			newrule->cuid = td->td_ucred->cr_ruid;
3628 			newrule->cpid = td->td_proc ? td->td_proc->p_pid : 0;
3629 			TAILQ_INIT(&newrule->rpool.list);
3630 		}
3631 #define	ERROUT(x)	ERROUT_IOCTL(DIOCCHANGERULE_error, x)
3632 
3633 		PF_CONFIG_LOCK();
3634 		PF_RULES_WLOCK();
3635 #ifdef PF_WANT_32_TO_64_COUNTER
3636 		if (newrule != NULL) {
3637 			LIST_INSERT_HEAD(&V_pf_allrulelist, newrule, allrulelist);
3638 			newrule->allrulelinked = true;
3639 			V_pf_allrulecount++;
3640 		}
3641 #endif
3642 
3643 		if (!(pcr->action == PF_CHANGE_REMOVE ||
3644 		    pcr->action == PF_CHANGE_GET_TICKET) &&
3645 		    pcr->pool_ticket != V_ticket_pabuf)
3646 			ERROUT(EBUSY);
3647 
3648 		ruleset = pf_find_kruleset(pcr->anchor);
3649 		if (ruleset == NULL)
3650 			ERROUT(EINVAL);
3651 
3652 		rs_num = pf_get_ruleset_number(pcr->rule.action);
3653 		if (rs_num >= PF_RULESET_MAX)
3654 			ERROUT(EINVAL);
3655 
3656 		/*
3657 		 * XXXMJG: there is no guarantee that the ruleset was
3658 		 * created by the usual route of calling DIOCXBEGIN.
3659 		 * As a result it is possible the rule tree will not
3660 		 * be allocated yet. Hack around it by doing it here.
3661 		 * Note it is fine to let the tree persist in case of
3662 		 * error as it will be freed down the road on future
3663 		 * updates (if need be).
3664 		 */
3665 		if (ruleset->rules[rs_num].active.tree == NULL) {
3666 			ruleset->rules[rs_num].active.tree = pf_rule_tree_alloc(M_NOWAIT);
3667 			if (ruleset->rules[rs_num].active.tree == NULL) {
3668 				ERROUT(ENOMEM);
3669 			}
3670 		}
3671 
3672 		if (pcr->action == PF_CHANGE_GET_TICKET) {
3673 			pcr->ticket = ++ruleset->rules[rs_num].active.ticket;
3674 			ERROUT(0);
3675 		} else if (pcr->ticket !=
3676 			    ruleset->rules[rs_num].active.ticket)
3677 				ERROUT(EINVAL);
3678 
3679 		if (pcr->action != PF_CHANGE_REMOVE) {
3680 			if (newrule->ifname[0]) {
3681 				newrule->kif = pfi_kkif_attach(kif,
3682 				    newrule->ifname);
3683 				kif = NULL;
3684 				pfi_kkif_ref(newrule->kif);
3685 			} else
3686 				newrule->kif = NULL;
3687 
3688 			if (newrule->rtableid > 0 &&
3689 			    newrule->rtableid >= rt_numfibs)
3690 				error = EBUSY;
3691 
3692 #ifdef ALTQ
3693 			/* set queue IDs */
3694 			if (newrule->qname[0] != 0) {
3695 				if ((newrule->qid =
3696 				    pf_qname2qid(newrule->qname)) == 0)
3697 					error = EBUSY;
3698 				else if (newrule->pqname[0] != 0) {
3699 					if ((newrule->pqid =
3700 					    pf_qname2qid(newrule->pqname)) == 0)
3701 						error = EBUSY;
3702 				} else
3703 					newrule->pqid = newrule->qid;
3704 			}
3705 #endif /* ALTQ */
3706 			if (newrule->tagname[0])
3707 				if ((newrule->tag =
3708 				    pf_tagname2tag(newrule->tagname)) == 0)
3709 					error = EBUSY;
3710 			if (newrule->match_tagname[0])
3711 				if ((newrule->match_tag = pf_tagname2tag(
3712 				    newrule->match_tagname)) == 0)
3713 					error = EBUSY;
3714 			if (newrule->rt && !newrule->direction)
3715 				error = EINVAL;
3716 			if (!newrule->log)
3717 				newrule->logif = 0;
3718 			if (newrule->logif >= PFLOGIFS_MAX)
3719 				error = EINVAL;
3720 			if (pf_addr_setup(ruleset, &newrule->src.addr, newrule->af))
3721 				error = ENOMEM;
3722 			if (pf_addr_setup(ruleset, &newrule->dst.addr, newrule->af))
3723 				error = ENOMEM;
3724 			if (pf_kanchor_setup(newrule, ruleset, pcr->anchor_call))
3725 				error = EINVAL;
3726 			TAILQ_FOREACH(pa, &V_pf_pabuf, entries)
3727 				if (pa->addr.type == PF_ADDR_TABLE) {
3728 					pa->addr.p.tbl =
3729 					    pfr_attach_table(ruleset,
3730 					    pa->addr.v.tblname);
3731 					if (pa->addr.p.tbl == NULL)
3732 						error = ENOMEM;
3733 				}
3734 
3735 			newrule->overload_tbl = NULL;
3736 			if (newrule->overload_tblname[0]) {
3737 				if ((newrule->overload_tbl = pfr_attach_table(
3738 				    ruleset, newrule->overload_tblname)) ==
3739 				    NULL)
3740 					error = EINVAL;
3741 				else
3742 					newrule->overload_tbl->pfrkt_flags |=
3743 					    PFR_TFLAG_ACTIVE;
3744 			}
3745 
3746 			pf_mv_kpool(&V_pf_pabuf, &newrule->rpool.list);
3747 			if (((((newrule->action == PF_NAT) ||
3748 			    (newrule->action == PF_RDR) ||
3749 			    (newrule->action == PF_BINAT) ||
3750 			    (newrule->rt > PF_NOPFROUTE)) &&
3751 			    !newrule->anchor)) &&
3752 			    (TAILQ_FIRST(&newrule->rpool.list) == NULL))
3753 				error = EINVAL;
3754 
3755 			if (error) {
3756 				pf_free_rule(newrule);
3757 				PF_RULES_WUNLOCK();
3758 				PF_CONFIG_UNLOCK();
3759 				break;
3760 			}
3761 
3762 			newrule->rpool.cur = TAILQ_FIRST(&newrule->rpool.list);
3763 		}
3764 		pf_empty_kpool(&V_pf_pabuf);
3765 
3766 		if (pcr->action == PF_CHANGE_ADD_HEAD)
3767 			oldrule = TAILQ_FIRST(
3768 			    ruleset->rules[rs_num].active.ptr);
3769 		else if (pcr->action == PF_CHANGE_ADD_TAIL)
3770 			oldrule = TAILQ_LAST(
3771 			    ruleset->rules[rs_num].active.ptr, pf_krulequeue);
3772 		else {
3773 			oldrule = TAILQ_FIRST(
3774 			    ruleset->rules[rs_num].active.ptr);
3775 			while ((oldrule != NULL) && (oldrule->nr != pcr->nr))
3776 				oldrule = TAILQ_NEXT(oldrule, entries);
3777 			if (oldrule == NULL) {
3778 				if (newrule != NULL)
3779 					pf_free_rule(newrule);
3780 				PF_RULES_WUNLOCK();
3781 				PF_CONFIG_UNLOCK();
3782 				error = EINVAL;
3783 				break;
3784 			}
3785 		}
3786 
3787 		if (pcr->action == PF_CHANGE_REMOVE) {
3788 			pf_unlink_rule(ruleset->rules[rs_num].active.ptr,
3789 			    oldrule);
3790 			RB_REMOVE(pf_krule_global,
3791 			    ruleset->rules[rs_num].active.tree, oldrule);
3792 			ruleset->rules[rs_num].active.rcount--;
3793 		} else {
3794 			pf_hash_rule(newrule);
3795 			if (RB_INSERT(pf_krule_global,
3796 			    ruleset->rules[rs_num].active.tree, newrule) != NULL) {
3797 				pf_free_rule(newrule);
3798 				PF_RULES_WUNLOCK();
3799 				PF_CONFIG_UNLOCK();
3800 				error = EEXIST;
3801 				break;
3802 			}
3803 
3804 			if (oldrule == NULL)
3805 				TAILQ_INSERT_TAIL(
3806 				    ruleset->rules[rs_num].active.ptr,
3807 				    newrule, entries);
3808 			else if (pcr->action == PF_CHANGE_ADD_HEAD ||
3809 			    pcr->action == PF_CHANGE_ADD_BEFORE)
3810 				TAILQ_INSERT_BEFORE(oldrule, newrule, entries);
3811 			else
3812 				TAILQ_INSERT_AFTER(
3813 				    ruleset->rules[rs_num].active.ptr,
3814 				    oldrule, newrule, entries);
3815 			ruleset->rules[rs_num].active.rcount++;
3816 		}
3817 
3818 		nr = 0;
3819 		TAILQ_FOREACH(oldrule,
3820 		    ruleset->rules[rs_num].active.ptr, entries)
3821 			oldrule->nr = nr++;
3822 
3823 		ruleset->rules[rs_num].active.ticket++;
3824 
3825 		pf_calc_skip_steps(ruleset->rules[rs_num].active.ptr);
3826 		pf_remove_if_empty_kruleset(ruleset);
3827 
3828 		PF_RULES_WUNLOCK();
3829 		PF_CONFIG_UNLOCK();
3830 		break;
3831 
3832 #undef ERROUT
3833 DIOCCHANGERULE_error:
3834 		PF_RULES_WUNLOCK();
3835 		PF_CONFIG_UNLOCK();
3836 		pf_krule_free(newrule);
3837 		pf_kkif_free(kif);
3838 		break;
3839 	}
3840 
3841 	case DIOCCLRSTATESNV: {
3842 		error = pf_clearstates_nv((struct pfioc_nv *)addr);
3843 		break;
3844 	}
3845 
3846 	case DIOCKILLSTATESNV: {
3847 		error = pf_killstates_nv((struct pfioc_nv *)addr);
3848 		break;
3849 	}
3850 
3851 	case DIOCADDSTATE: {
3852 		struct pfioc_state		*ps = (struct pfioc_state *)addr;
3853 		struct pfsync_state_1301	*sp = &ps->state;
3854 
3855 		if (sp->timeout >= PFTM_MAX) {
3856 			error = EINVAL;
3857 			break;
3858 		}
3859 		if (V_pfsync_state_import_ptr != NULL) {
3860 			PF_RULES_RLOCK();
3861 			error = V_pfsync_state_import_ptr(
3862 			    (union pfsync_state_union *)sp, PFSYNC_SI_IOCTL,
3863 			    PFSYNC_MSG_VERSION_1301);
3864 			PF_RULES_RUNLOCK();
3865 		} else
3866 			error = EOPNOTSUPP;
3867 		break;
3868 	}
3869 
3870 	case DIOCGETSTATE: {
3871 		struct pfioc_state	*ps = (struct pfioc_state *)addr;
3872 		struct pf_kstate	*s;
3873 
3874 		s = pf_find_state_byid(ps->state.id, ps->state.creatorid);
3875 		if (s == NULL) {
3876 			error = ENOENT;
3877 			break;
3878 		}
3879 
3880 		pfsync_state_export((union pfsync_state_union*)&ps->state,
3881 		    s, PFSYNC_MSG_VERSION_1301);
3882 		PF_STATE_UNLOCK(s);
3883 		break;
3884 	}
3885 
3886 	case DIOCGETSTATENV: {
3887 		error = pf_getstate((struct pfioc_nv *)addr);
3888 		break;
3889 	}
3890 
3891 #ifdef COMPAT_FREEBSD14
3892 	case DIOCGETSTATES: {
3893 		struct pfioc_states	*ps = (struct pfioc_states *)addr;
3894 		struct pf_kstate	*s;
3895 		struct pfsync_state_1301	*pstore, *p;
3896 		int			 i, nr;
3897 		size_t			 slice_count = 16, count;
3898 		void			*out;
3899 
3900 		if (ps->ps_len <= 0) {
3901 			nr = uma_zone_get_cur(V_pf_state_z);
3902 			ps->ps_len = sizeof(struct pfsync_state_1301) * nr;
3903 			break;
3904 		}
3905 
3906 		out = ps->ps_states;
3907 		pstore = mallocarray(slice_count,
3908 		    sizeof(struct pfsync_state_1301), M_TEMP, M_WAITOK | M_ZERO);
3909 		nr = 0;
3910 
3911 		for (i = 0; i <= V_pf_hashmask; i++) {
3912 			struct pf_idhash *ih = &V_pf_idhash[i];
3913 
3914 DIOCGETSTATES_retry:
3915 			p = pstore;
3916 
3917 			if (LIST_EMPTY(&ih->states))
3918 				continue;
3919 
3920 			PF_HASHROW_LOCK(ih);
3921 			count = 0;
3922 			LIST_FOREACH(s, &ih->states, entry) {
3923 				if (s->timeout == PFTM_UNLINKED)
3924 					continue;
3925 				count++;
3926 			}
3927 
3928 			if (count > slice_count) {
3929 				PF_HASHROW_UNLOCK(ih);
3930 				free(pstore, M_TEMP);
3931 				slice_count = count * 2;
3932 				pstore = mallocarray(slice_count,
3933 				    sizeof(struct pfsync_state_1301), M_TEMP,
3934 				    M_WAITOK | M_ZERO);
3935 				goto DIOCGETSTATES_retry;
3936 			}
3937 
3938 			if ((nr+count) * sizeof(*p) > ps->ps_len) {
3939 				PF_HASHROW_UNLOCK(ih);
3940 				goto DIOCGETSTATES_full;
3941 			}
3942 
3943 			LIST_FOREACH(s, &ih->states, entry) {
3944 				if (s->timeout == PFTM_UNLINKED)
3945 					continue;
3946 
3947 				pfsync_state_export((union pfsync_state_union*)p,
3948 				    s, PFSYNC_MSG_VERSION_1301);
3949 				p++;
3950 				nr++;
3951 			}
3952 			PF_HASHROW_UNLOCK(ih);
3953 			error = copyout(pstore, out,
3954 			    sizeof(struct pfsync_state_1301) * count);
3955 			if (error)
3956 				break;
3957 			out = ps->ps_states + nr;
3958 		}
3959 DIOCGETSTATES_full:
3960 		ps->ps_len = sizeof(struct pfsync_state_1301) * nr;
3961 		free(pstore, M_TEMP);
3962 
3963 		break;
3964 	}
3965 
3966 	case DIOCGETSTATESV2: {
3967 		struct pfioc_states_v2	*ps = (struct pfioc_states_v2 *)addr;
3968 		struct pf_kstate	*s;
3969 		struct pf_state_export	*pstore, *p;
3970 		int i, nr;
3971 		size_t slice_count = 16, count;
3972 		void *out;
3973 
3974 		if (ps->ps_req_version > PF_STATE_VERSION) {
3975 			error = ENOTSUP;
3976 			break;
3977 		}
3978 
3979 		if (ps->ps_len <= 0) {
3980 			nr = uma_zone_get_cur(V_pf_state_z);
3981 			ps->ps_len = sizeof(struct pf_state_export) * nr;
3982 			break;
3983 		}
3984 
3985 		out = ps->ps_states;
3986 		pstore = mallocarray(slice_count,
3987 		    sizeof(struct pf_state_export), M_TEMP, M_WAITOK | M_ZERO);
3988 		nr = 0;
3989 
3990 		for (i = 0; i <= V_pf_hashmask; i++) {
3991 			struct pf_idhash *ih = &V_pf_idhash[i];
3992 
3993 DIOCGETSTATESV2_retry:
3994 			p = pstore;
3995 
3996 			if (LIST_EMPTY(&ih->states))
3997 				continue;
3998 
3999 			PF_HASHROW_LOCK(ih);
4000 			count = 0;
4001 			LIST_FOREACH(s, &ih->states, entry) {
4002 				if (s->timeout == PFTM_UNLINKED)
4003 					continue;
4004 				count++;
4005 			}
4006 
4007 			if (count > slice_count) {
4008 				PF_HASHROW_UNLOCK(ih);
4009 				free(pstore, M_TEMP);
4010 				slice_count = count * 2;
4011 				pstore = mallocarray(slice_count,
4012 				    sizeof(struct pf_state_export), M_TEMP,
4013 				    M_WAITOK | M_ZERO);
4014 				goto DIOCGETSTATESV2_retry;
4015 			}
4016 
4017 			if ((nr+count) * sizeof(*p) > ps->ps_len) {
4018 				PF_HASHROW_UNLOCK(ih);
4019 				goto DIOCGETSTATESV2_full;
4020 			}
4021 
4022 			LIST_FOREACH(s, &ih->states, entry) {
4023 				if (s->timeout == PFTM_UNLINKED)
4024 					continue;
4025 
4026 				pf_state_export(p, s);
4027 				p++;
4028 				nr++;
4029 			}
4030 			PF_HASHROW_UNLOCK(ih);
4031 			error = copyout(pstore, out,
4032 			    sizeof(struct pf_state_export) * count);
4033 			if (error)
4034 				break;
4035 			out = ps->ps_states + nr;
4036 		}
4037 DIOCGETSTATESV2_full:
4038 		ps->ps_len = nr * sizeof(struct pf_state_export);
4039 		free(pstore, M_TEMP);
4040 
4041 		break;
4042 	}
4043 #endif
4044 	case DIOCGETSTATUSNV: {
4045 		error = pf_getstatus((struct pfioc_nv *)addr);
4046 		break;
4047 	}
4048 
4049 	case DIOCSETSTATUSIF: {
4050 		struct pfioc_if	*pi = (struct pfioc_if *)addr;
4051 
4052 		if (pi->ifname[0] == 0) {
4053 			bzero(V_pf_status.ifname, IFNAMSIZ);
4054 			break;
4055 		}
4056 		PF_RULES_WLOCK();
4057 		error = pf_user_strcpy(V_pf_status.ifname, pi->ifname, IFNAMSIZ);
4058 		PF_RULES_WUNLOCK();
4059 		break;
4060 	}
4061 
4062 	case DIOCCLRSTATUS: {
4063 		pf_ioctl_clear_status();
4064 		break;
4065 	}
4066 
4067 	case DIOCNATLOOK: {
4068 		struct pfioc_natlook	*pnl = (struct pfioc_natlook *)addr;
4069 		struct pf_state_key	*sk;
4070 		struct pf_kstate	*state;
4071 		struct pf_state_key_cmp	 key;
4072 		int			 m = 0, direction = pnl->direction;
4073 		int			 sidx, didx;
4074 
4075 		/* NATLOOK src and dst are reversed, so reverse sidx/didx */
4076 		sidx = (direction == PF_IN) ? 1 : 0;
4077 		didx = (direction == PF_IN) ? 0 : 1;
4078 
4079 		if (!pnl->proto ||
4080 		    PF_AZERO(&pnl->saddr, pnl->af) ||
4081 		    PF_AZERO(&pnl->daddr, pnl->af) ||
4082 		    ((pnl->proto == IPPROTO_TCP ||
4083 		    pnl->proto == IPPROTO_UDP) &&
4084 		    (!pnl->dport || !pnl->sport)))
4085 			error = EINVAL;
4086 		else {
4087 			bzero(&key, sizeof(key));
4088 			key.af = pnl->af;
4089 			key.proto = pnl->proto;
4090 			PF_ACPY(&key.addr[sidx], &pnl->saddr, pnl->af);
4091 			key.port[sidx] = pnl->sport;
4092 			PF_ACPY(&key.addr[didx], &pnl->daddr, pnl->af);
4093 			key.port[didx] = pnl->dport;
4094 
4095 			state = pf_find_state_all(&key, direction, &m);
4096 			if (state == NULL) {
4097 				error = ENOENT;
4098 			} else {
4099 				if (m > 1) {
4100 					PF_STATE_UNLOCK(state);
4101 					error = E2BIG;	/* more than one state */
4102 				} else {
4103 					sk = state->key[sidx];
4104 					PF_ACPY(&pnl->rsaddr, &sk->addr[sidx], sk->af);
4105 					pnl->rsport = sk->port[sidx];
4106 					PF_ACPY(&pnl->rdaddr, &sk->addr[didx], sk->af);
4107 					pnl->rdport = sk->port[didx];
4108 					PF_STATE_UNLOCK(state);
4109 				}
4110 			}
4111 		}
4112 		break;
4113 	}
4114 
4115 	case DIOCSETTIMEOUT: {
4116 		struct pfioc_tm	*pt = (struct pfioc_tm *)addr;
4117 
4118 		error = pf_ioctl_set_timeout(pt->timeout, pt->seconds,
4119 		    &pt->seconds);
4120 		break;
4121 	}
4122 
4123 	case DIOCGETTIMEOUT: {
4124 		struct pfioc_tm	*pt = (struct pfioc_tm *)addr;
4125 
4126 		error = pf_ioctl_get_timeout(pt->timeout, &pt->seconds);
4127 		break;
4128 	}
4129 
4130 	case DIOCGETLIMIT: {
4131 		struct pfioc_limit	*pl = (struct pfioc_limit *)addr;
4132 
4133 		error = pf_ioctl_get_limit(pl->index, &pl->limit);
4134 		break;
4135 	}
4136 
4137 	case DIOCSETLIMIT: {
4138 		struct pfioc_limit	*pl = (struct pfioc_limit *)addr;
4139 		unsigned int old_limit;
4140 
4141 		error = pf_ioctl_set_limit(pl->index, pl->limit, &old_limit);
4142 		pl->limit = old_limit;
4143 		break;
4144 	}
4145 
4146 	case DIOCSETDEBUG: {
4147 		u_int32_t	*level = (u_int32_t *)addr;
4148 
4149 		PF_RULES_WLOCK();
4150 		V_pf_status.debug = *level;
4151 		PF_RULES_WUNLOCK();
4152 		break;
4153 	}
4154 
4155 	case DIOCCLRRULECTRS: {
4156 		/* obsoleted by DIOCGETRULE with action=PF_GET_CLR_CNTR */
4157 		struct pf_kruleset	*ruleset = &pf_main_ruleset;
4158 		struct pf_krule		*rule;
4159 
4160 		PF_RULES_WLOCK();
4161 		TAILQ_FOREACH(rule,
4162 		    ruleset->rules[PF_RULESET_FILTER].active.ptr, entries) {
4163 			pf_counter_u64_zero(&rule->evaluations);
4164 			for (int i = 0; i < 2; i++) {
4165 				pf_counter_u64_zero(&rule->packets[i]);
4166 				pf_counter_u64_zero(&rule->bytes[i]);
4167 			}
4168 		}
4169 		PF_RULES_WUNLOCK();
4170 		break;
4171 	}
4172 
4173 	case DIOCGIFSPEEDV0:
4174 	case DIOCGIFSPEEDV1: {
4175 		struct pf_ifspeed_v1	*psp = (struct pf_ifspeed_v1 *)addr;
4176 		struct pf_ifspeed_v1	ps;
4177 		struct ifnet		*ifp;
4178 
4179 		if (psp->ifname[0] == '\0') {
4180 			error = EINVAL;
4181 			break;
4182 		}
4183 
4184 		error = pf_user_strcpy(ps.ifname, psp->ifname, IFNAMSIZ);
4185 		if (error != 0)
4186 			break;
4187 		ifp = ifunit(ps.ifname);
4188 		if (ifp != NULL) {
4189 			psp->baudrate32 =
4190 			    (u_int32_t)uqmin(ifp->if_baudrate, UINT_MAX);
4191 			if (cmd == DIOCGIFSPEEDV1)
4192 				psp->baudrate = ifp->if_baudrate;
4193 		} else {
4194 			error = EINVAL;
4195 		}
4196 		break;
4197 	}
4198 
4199 #ifdef ALTQ
4200 	case DIOCSTARTALTQ: {
4201 		struct pf_altq		*altq;
4202 
4203 		PF_RULES_WLOCK();
4204 		/* enable all altq interfaces on active list */
4205 		TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) {
4206 			if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
4207 				error = pf_enable_altq(altq);
4208 				if (error != 0)
4209 					break;
4210 			}
4211 		}
4212 		if (error == 0)
4213 			V_pf_altq_running = 1;
4214 		PF_RULES_WUNLOCK();
4215 		DPFPRINTF(PF_DEBUG_MISC, ("altq: started\n"));
4216 		break;
4217 	}
4218 
4219 	case DIOCSTOPALTQ: {
4220 		struct pf_altq		*altq;
4221 
4222 		PF_RULES_WLOCK();
4223 		/* disable all altq interfaces on active list */
4224 		TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) {
4225 			if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
4226 				error = pf_disable_altq(altq);
4227 				if (error != 0)
4228 					break;
4229 			}
4230 		}
4231 		if (error == 0)
4232 			V_pf_altq_running = 0;
4233 		PF_RULES_WUNLOCK();
4234 		DPFPRINTF(PF_DEBUG_MISC, ("altq: stopped\n"));
4235 		break;
4236 	}
4237 
4238 	case DIOCADDALTQV0:
4239 	case DIOCADDALTQV1: {
4240 		struct pfioc_altq_v1	*pa = (struct pfioc_altq_v1 *)addr;
4241 		struct pf_altq		*altq, *a;
4242 		struct ifnet		*ifp;
4243 
4244 		altq = malloc(sizeof(*altq), M_PFALTQ, M_WAITOK | M_ZERO);
4245 		error = pf_import_kaltq(pa, altq, IOCPARM_LEN(cmd));
4246 		if (error)
4247 			break;
4248 		altq->local_flags = 0;
4249 
4250 		PF_RULES_WLOCK();
4251 		if (pa->ticket != V_ticket_altqs_inactive) {
4252 			PF_RULES_WUNLOCK();
4253 			free(altq, M_PFALTQ);
4254 			error = EBUSY;
4255 			break;
4256 		}
4257 
4258 		/*
4259 		 * if this is for a queue, find the discipline and
4260 		 * copy the necessary fields
4261 		 */
4262 		if (altq->qname[0] != 0) {
4263 			if ((altq->qid = pf_qname2qid(altq->qname)) == 0) {
4264 				PF_RULES_WUNLOCK();
4265 				error = EBUSY;
4266 				free(altq, M_PFALTQ);
4267 				break;
4268 			}
4269 			altq->altq_disc = NULL;
4270 			TAILQ_FOREACH(a, V_pf_altq_ifs_inactive, entries) {
4271 				if (strncmp(a->ifname, altq->ifname,
4272 				    IFNAMSIZ) == 0) {
4273 					altq->altq_disc = a->altq_disc;
4274 					break;
4275 				}
4276 			}
4277 		}
4278 
4279 		if ((ifp = ifunit(altq->ifname)) == NULL)
4280 			altq->local_flags |= PFALTQ_FLAG_IF_REMOVED;
4281 		else
4282 			error = altq_add(ifp, altq);
4283 
4284 		if (error) {
4285 			PF_RULES_WUNLOCK();
4286 			free(altq, M_PFALTQ);
4287 			break;
4288 		}
4289 
4290 		if (altq->qname[0] != 0)
4291 			TAILQ_INSERT_TAIL(V_pf_altqs_inactive, altq, entries);
4292 		else
4293 			TAILQ_INSERT_TAIL(V_pf_altq_ifs_inactive, altq, entries);
4294 		/* version error check done on import above */
4295 		pf_export_kaltq(altq, pa, IOCPARM_LEN(cmd));
4296 		PF_RULES_WUNLOCK();
4297 		break;
4298 	}
4299 
4300 	case DIOCGETALTQSV0:
4301 	case DIOCGETALTQSV1: {
4302 		struct pfioc_altq_v1	*pa = (struct pfioc_altq_v1 *)addr;
4303 		struct pf_altq		*altq;
4304 
4305 		PF_RULES_RLOCK();
4306 		pa->nr = 0;
4307 		TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries)
4308 			pa->nr++;
4309 		TAILQ_FOREACH(altq, V_pf_altqs_active, entries)
4310 			pa->nr++;
4311 		pa->ticket = V_ticket_altqs_active;
4312 		PF_RULES_RUNLOCK();
4313 		break;
4314 	}
4315 
4316 	case DIOCGETALTQV0:
4317 	case DIOCGETALTQV1: {
4318 		struct pfioc_altq_v1	*pa = (struct pfioc_altq_v1 *)addr;
4319 		struct pf_altq		*altq;
4320 
4321 		PF_RULES_RLOCK();
4322 		if (pa->ticket != V_ticket_altqs_active) {
4323 			PF_RULES_RUNLOCK();
4324 			error = EBUSY;
4325 			break;
4326 		}
4327 		altq = pf_altq_get_nth_active(pa->nr);
4328 		if (altq == NULL) {
4329 			PF_RULES_RUNLOCK();
4330 			error = EBUSY;
4331 			break;
4332 		}
4333 		pf_export_kaltq(altq, pa, IOCPARM_LEN(cmd));
4334 		PF_RULES_RUNLOCK();
4335 		break;
4336 	}
4337 
4338 	case DIOCCHANGEALTQV0:
4339 	case DIOCCHANGEALTQV1:
4340 		/* CHANGEALTQ not supported yet! */
4341 		error = ENODEV;
4342 		break;
4343 
4344 	case DIOCGETQSTATSV0:
4345 	case DIOCGETQSTATSV1: {
4346 		struct pfioc_qstats_v1	*pq = (struct pfioc_qstats_v1 *)addr;
4347 		struct pf_altq		*altq;
4348 		int			 nbytes;
4349 		u_int32_t		 version;
4350 
4351 		PF_RULES_RLOCK();
4352 		if (pq->ticket != V_ticket_altqs_active) {
4353 			PF_RULES_RUNLOCK();
4354 			error = EBUSY;
4355 			break;
4356 		}
4357 		nbytes = pq->nbytes;
4358 		altq = pf_altq_get_nth_active(pq->nr);
4359 		if (altq == NULL) {
4360 			PF_RULES_RUNLOCK();
4361 			error = EBUSY;
4362 			break;
4363 		}
4364 
4365 		if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) != 0) {
4366 			PF_RULES_RUNLOCK();
4367 			error = ENXIO;
4368 			break;
4369 		}
4370 		PF_RULES_RUNLOCK();
4371 		if (cmd == DIOCGETQSTATSV0)
4372 			version = 0;  /* DIOCGETQSTATSV0 means stats struct v0 */
4373 		else
4374 			version = pq->version;
4375 		error = altq_getqstats(altq, pq->buf, &nbytes, version);
4376 		if (error == 0) {
4377 			pq->scheduler = altq->scheduler;
4378 			pq->nbytes = nbytes;
4379 		}
4380 		break;
4381 	}
4382 #endif /* ALTQ */
4383 
4384 	case DIOCBEGINADDRS: {
4385 		struct pfioc_pooladdr	*pp = (struct pfioc_pooladdr *)addr;
4386 
4387 		error = pf_ioctl_begin_addrs(&pp->ticket);
4388 		break;
4389 	}
4390 
4391 	case DIOCADDADDR: {
4392 		struct pfioc_pooladdr	*pp = (struct pfioc_pooladdr *)addr;
4393 
4394 		error = pf_ioctl_add_addr(pp);
4395 		break;
4396 	}
4397 
4398 	case DIOCGETADDRS: {
4399 		struct pfioc_pooladdr	*pp = (struct pfioc_pooladdr *)addr;
4400 
4401 		error = pf_ioctl_get_addrs(pp);
4402 		break;
4403 	}
4404 
4405 	case DIOCGETADDR: {
4406 		struct pfioc_pooladdr	*pp = (struct pfioc_pooladdr *)addr;
4407 
4408 		error = pf_ioctl_get_addr(pp);
4409 		break;
4410 	}
4411 
4412 	case DIOCCHANGEADDR: {
4413 		struct pfioc_pooladdr	*pca = (struct pfioc_pooladdr *)addr;
4414 		struct pf_kpool		*pool;
4415 		struct pf_kpooladdr	*oldpa = NULL, *newpa = NULL;
4416 		struct pf_kruleset	*ruleset;
4417 		struct pfi_kkif		*kif = NULL;
4418 
4419 		pca->anchor[sizeof(pca->anchor) - 1] = 0;
4420 
4421 		if (pca->action < PF_CHANGE_ADD_HEAD ||
4422 		    pca->action > PF_CHANGE_REMOVE) {
4423 			error = EINVAL;
4424 			break;
4425 		}
4426 		if (pca->addr.addr.type != PF_ADDR_ADDRMASK &&
4427 		    pca->addr.addr.type != PF_ADDR_DYNIFTL &&
4428 		    pca->addr.addr.type != PF_ADDR_TABLE) {
4429 			error = EINVAL;
4430 			break;
4431 		}
4432 		if (pca->addr.addr.p.dyn != NULL) {
4433 			error = EINVAL;
4434 			break;
4435 		}
4436 
4437 		if (pca->action != PF_CHANGE_REMOVE) {
4438 #ifndef INET
4439 			if (pca->af == AF_INET) {
4440 				error = EAFNOSUPPORT;
4441 				break;
4442 			}
4443 #endif /* INET */
4444 #ifndef INET6
4445 			if (pca->af == AF_INET6) {
4446 				error = EAFNOSUPPORT;
4447 				break;
4448 			}
4449 #endif /* INET6 */
4450 			newpa = malloc(sizeof(*newpa), M_PFRULE, M_WAITOK);
4451 			bcopy(&pca->addr, newpa, sizeof(struct pf_pooladdr));
4452 			if (newpa->ifname[0])
4453 				kif = pf_kkif_create(M_WAITOK);
4454 			newpa->kif = NULL;
4455 		}
4456 #define	ERROUT(x)	ERROUT_IOCTL(DIOCCHANGEADDR_error, x)
4457 		PF_RULES_WLOCK();
4458 		ruleset = pf_find_kruleset(pca->anchor);
4459 		if (ruleset == NULL)
4460 			ERROUT(EBUSY);
4461 
4462 		pool = pf_get_kpool(pca->anchor, pca->ticket, pca->r_action,
4463 		    pca->r_num, pca->r_last, 1, 1);
4464 		if (pool == NULL)
4465 			ERROUT(EBUSY);
4466 
4467 		if (pca->action != PF_CHANGE_REMOVE) {
4468 			if (newpa->ifname[0]) {
4469 				newpa->kif = pfi_kkif_attach(kif, newpa->ifname);
4470 				pfi_kkif_ref(newpa->kif);
4471 				kif = NULL;
4472 			}
4473 
4474 			switch (newpa->addr.type) {
4475 			case PF_ADDR_DYNIFTL:
4476 				error = pfi_dynaddr_setup(&newpa->addr,
4477 				    pca->af);
4478 				break;
4479 			case PF_ADDR_TABLE:
4480 				newpa->addr.p.tbl = pfr_attach_table(ruleset,
4481 				    newpa->addr.v.tblname);
4482 				if (newpa->addr.p.tbl == NULL)
4483 					error = ENOMEM;
4484 				break;
4485 			}
4486 			if (error)
4487 				goto DIOCCHANGEADDR_error;
4488 		}
4489 
4490 		switch (pca->action) {
4491 		case PF_CHANGE_ADD_HEAD:
4492 			oldpa = TAILQ_FIRST(&pool->list);
4493 			break;
4494 		case PF_CHANGE_ADD_TAIL:
4495 			oldpa = TAILQ_LAST(&pool->list, pf_kpalist);
4496 			break;
4497 		default:
4498 			oldpa = TAILQ_FIRST(&pool->list);
4499 			for (int i = 0; oldpa && i < pca->nr; i++)
4500 				oldpa = TAILQ_NEXT(oldpa, entries);
4501 
4502 			if (oldpa == NULL)
4503 				ERROUT(EINVAL);
4504 		}
4505 
4506 		if (pca->action == PF_CHANGE_REMOVE) {
4507 			TAILQ_REMOVE(&pool->list, oldpa, entries);
4508 			switch (oldpa->addr.type) {
4509 			case PF_ADDR_DYNIFTL:
4510 				pfi_dynaddr_remove(oldpa->addr.p.dyn);
4511 				break;
4512 			case PF_ADDR_TABLE:
4513 				pfr_detach_table(oldpa->addr.p.tbl);
4514 				break;
4515 			}
4516 			if (oldpa->kif)
4517 				pfi_kkif_unref(oldpa->kif);
4518 			free(oldpa, M_PFRULE);
4519 		} else {
4520 			if (oldpa == NULL)
4521 				TAILQ_INSERT_TAIL(&pool->list, newpa, entries);
4522 			else if (pca->action == PF_CHANGE_ADD_HEAD ||
4523 			    pca->action == PF_CHANGE_ADD_BEFORE)
4524 				TAILQ_INSERT_BEFORE(oldpa, newpa, entries);
4525 			else
4526 				TAILQ_INSERT_AFTER(&pool->list, oldpa,
4527 				    newpa, entries);
4528 		}
4529 
4530 		pool->cur = TAILQ_FIRST(&pool->list);
4531 		PF_ACPY(&pool->counter, &pool->cur->addr.v.a.addr, pca->af);
4532 		PF_RULES_WUNLOCK();
4533 		break;
4534 
4535 #undef ERROUT
4536 DIOCCHANGEADDR_error:
4537 		if (newpa != NULL) {
4538 			if (newpa->kif)
4539 				pfi_kkif_unref(newpa->kif);
4540 			free(newpa, M_PFRULE);
4541 		}
4542 		PF_RULES_WUNLOCK();
4543 		pf_kkif_free(kif);
4544 		break;
4545 	}
4546 
4547 	case DIOCGETRULESETS: {
4548 		struct pfioc_ruleset	*pr = (struct pfioc_ruleset *)addr;
4549 
4550 		pr->path[sizeof(pr->path) - 1] = 0;
4551 
4552 		error = pf_ioctl_get_rulesets(pr);
4553 		break;
4554 	}
4555 
4556 	case DIOCGETRULESET: {
4557 		struct pfioc_ruleset	*pr = (struct pfioc_ruleset *)addr;
4558 
4559 		pr->path[sizeof(pr->path) - 1] = 0;
4560 
4561 		error = pf_ioctl_get_ruleset(pr);
4562 		break;
4563 	}
4564 
4565 	case DIOCRCLRTABLES: {
4566 		struct pfioc_table *io = (struct pfioc_table *)addr;
4567 
4568 		if (io->pfrio_esize != 0) {
4569 			error = ENODEV;
4570 			break;
4571 		}
4572 		PF_RULES_WLOCK();
4573 		error = pfr_clr_tables(&io->pfrio_table, &io->pfrio_ndel,
4574 		    io->pfrio_flags | PFR_FLAG_USERIOCTL);
4575 		PF_RULES_WUNLOCK();
4576 		break;
4577 	}
4578 
4579 	case DIOCRADDTABLES: {
4580 		struct pfioc_table *io = (struct pfioc_table *)addr;
4581 		struct pfr_table *pfrts;
4582 		size_t totlen;
4583 
4584 		if (io->pfrio_esize != sizeof(struct pfr_table)) {
4585 			error = ENODEV;
4586 			break;
4587 		}
4588 
4589 		if (io->pfrio_size < 0 || io->pfrio_size > pf_ioctl_maxcount ||
4590 		    WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_table))) {
4591 			error = ENOMEM;
4592 			break;
4593 		}
4594 
4595 		totlen = io->pfrio_size * sizeof(struct pfr_table);
4596 		pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table),
4597 		    M_TEMP, M_WAITOK);
4598 		error = copyin(io->pfrio_buffer, pfrts, totlen);
4599 		if (error) {
4600 			free(pfrts, M_TEMP);
4601 			break;
4602 		}
4603 		PF_RULES_WLOCK();
4604 		error = pfr_add_tables(pfrts, io->pfrio_size,
4605 		    &io->pfrio_nadd, io->pfrio_flags | PFR_FLAG_USERIOCTL);
4606 		PF_RULES_WUNLOCK();
4607 		free(pfrts, M_TEMP);
4608 		break;
4609 	}
4610 
4611 	case DIOCRDELTABLES: {
4612 		struct pfioc_table *io = (struct pfioc_table *)addr;
4613 		struct pfr_table *pfrts;
4614 		size_t totlen;
4615 
4616 		if (io->pfrio_esize != sizeof(struct pfr_table)) {
4617 			error = ENODEV;
4618 			break;
4619 		}
4620 
4621 		if (io->pfrio_size < 0 || io->pfrio_size > pf_ioctl_maxcount ||
4622 		    WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_table))) {
4623 			error = ENOMEM;
4624 			break;
4625 		}
4626 
4627 		totlen = io->pfrio_size * sizeof(struct pfr_table);
4628 		pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table),
4629 		    M_TEMP, M_WAITOK);
4630 		error = copyin(io->pfrio_buffer, pfrts, totlen);
4631 		if (error) {
4632 			free(pfrts, M_TEMP);
4633 			break;
4634 		}
4635 		PF_RULES_WLOCK();
4636 		error = pfr_del_tables(pfrts, io->pfrio_size,
4637 		    &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL);
4638 		PF_RULES_WUNLOCK();
4639 		free(pfrts, M_TEMP);
4640 		break;
4641 	}
4642 
4643 	case DIOCRGETTABLES: {
4644 		struct pfioc_table *io = (struct pfioc_table *)addr;
4645 		struct pfr_table *pfrts;
4646 		size_t totlen;
4647 		int n;
4648 
4649 		if (io->pfrio_esize != sizeof(struct pfr_table)) {
4650 			error = ENODEV;
4651 			break;
4652 		}
4653 		PF_RULES_RLOCK();
4654 		n = pfr_table_count(&io->pfrio_table, io->pfrio_flags);
4655 		if (n < 0) {
4656 			PF_RULES_RUNLOCK();
4657 			error = EINVAL;
4658 			break;
4659 		}
4660 		io->pfrio_size = min(io->pfrio_size, n);
4661 
4662 		totlen = io->pfrio_size * sizeof(struct pfr_table);
4663 
4664 		pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table),
4665 		    M_TEMP, M_NOWAIT | M_ZERO);
4666 		if (pfrts == NULL) {
4667 			error = ENOMEM;
4668 			PF_RULES_RUNLOCK();
4669 			break;
4670 		}
4671 		error = pfr_get_tables(&io->pfrio_table, pfrts,
4672 		    &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
4673 		PF_RULES_RUNLOCK();
4674 		if (error == 0)
4675 			error = copyout(pfrts, io->pfrio_buffer, totlen);
4676 		free(pfrts, M_TEMP);
4677 		break;
4678 	}
4679 
4680 	case DIOCRGETTSTATS: {
4681 		struct pfioc_table *io = (struct pfioc_table *)addr;
4682 		struct pfr_tstats *pfrtstats;
4683 		size_t totlen;
4684 		int n;
4685 
4686 		if (io->pfrio_esize != sizeof(struct pfr_tstats)) {
4687 			error = ENODEV;
4688 			break;
4689 		}
4690 		PF_TABLE_STATS_LOCK();
4691 		PF_RULES_RLOCK();
4692 		n = pfr_table_count(&io->pfrio_table, io->pfrio_flags);
4693 		if (n < 0) {
4694 			PF_RULES_RUNLOCK();
4695 			PF_TABLE_STATS_UNLOCK();
4696 			error = EINVAL;
4697 			break;
4698 		}
4699 		io->pfrio_size = min(io->pfrio_size, n);
4700 
4701 		totlen = io->pfrio_size * sizeof(struct pfr_tstats);
4702 		pfrtstats = mallocarray(io->pfrio_size,
4703 		    sizeof(struct pfr_tstats), M_TEMP, M_NOWAIT | M_ZERO);
4704 		if (pfrtstats == NULL) {
4705 			error = ENOMEM;
4706 			PF_RULES_RUNLOCK();
4707 			PF_TABLE_STATS_UNLOCK();
4708 			break;
4709 		}
4710 		error = pfr_get_tstats(&io->pfrio_table, pfrtstats,
4711 		    &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
4712 		PF_RULES_RUNLOCK();
4713 		PF_TABLE_STATS_UNLOCK();
4714 		if (error == 0)
4715 			error = copyout(pfrtstats, io->pfrio_buffer, totlen);
4716 		free(pfrtstats, M_TEMP);
4717 		break;
4718 	}
4719 
4720 	case DIOCRCLRTSTATS: {
4721 		struct pfioc_table *io = (struct pfioc_table *)addr;
4722 		struct pfr_table *pfrts;
4723 		size_t totlen;
4724 
4725 		if (io->pfrio_esize != sizeof(struct pfr_table)) {
4726 			error = ENODEV;
4727 			break;
4728 		}
4729 
4730 		if (io->pfrio_size < 0 || io->pfrio_size > pf_ioctl_maxcount ||
4731 		    WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_table))) {
4732 			/* We used to count tables and use the minimum required
4733 			 * size, so we didn't fail on overly large requests.
4734 			 * Keep doing so. */
4735 			io->pfrio_size = pf_ioctl_maxcount;
4736 			break;
4737 		}
4738 
4739 		totlen = io->pfrio_size * sizeof(struct pfr_table);
4740 		pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table),
4741 		    M_TEMP, M_WAITOK);
4742 		error = copyin(io->pfrio_buffer, pfrts, totlen);
4743 		if (error) {
4744 			free(pfrts, M_TEMP);
4745 			break;
4746 		}
4747 
4748 		PF_TABLE_STATS_LOCK();
4749 		PF_RULES_RLOCK();
4750 		error = pfr_clr_tstats(pfrts, io->pfrio_size,
4751 		    &io->pfrio_nzero, io->pfrio_flags | PFR_FLAG_USERIOCTL);
4752 		PF_RULES_RUNLOCK();
4753 		PF_TABLE_STATS_UNLOCK();
4754 		free(pfrts, M_TEMP);
4755 		break;
4756 	}
4757 
4758 	case DIOCRSETTFLAGS: {
4759 		struct pfioc_table *io = (struct pfioc_table *)addr;
4760 		struct pfr_table *pfrts;
4761 		size_t totlen;
4762 		int n;
4763 
4764 		if (io->pfrio_esize != sizeof(struct pfr_table)) {
4765 			error = ENODEV;
4766 			break;
4767 		}
4768 
4769 		PF_RULES_RLOCK();
4770 		n = pfr_table_count(&io->pfrio_table, io->pfrio_flags);
4771 		if (n < 0) {
4772 			PF_RULES_RUNLOCK();
4773 			error = EINVAL;
4774 			break;
4775 		}
4776 
4777 		io->pfrio_size = min(io->pfrio_size, n);
4778 		PF_RULES_RUNLOCK();
4779 
4780 		totlen = io->pfrio_size * sizeof(struct pfr_table);
4781 		pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table),
4782 		    M_TEMP, M_WAITOK);
4783 		error = copyin(io->pfrio_buffer, pfrts, totlen);
4784 		if (error) {
4785 			free(pfrts, M_TEMP);
4786 			break;
4787 		}
4788 		PF_RULES_WLOCK();
4789 		error = pfr_set_tflags(pfrts, io->pfrio_size,
4790 		    io->pfrio_setflag, io->pfrio_clrflag, &io->pfrio_nchange,
4791 		    &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL);
4792 		PF_RULES_WUNLOCK();
4793 		free(pfrts, M_TEMP);
4794 		break;
4795 	}
4796 
4797 	case DIOCRCLRADDRS: {
4798 		struct pfioc_table *io = (struct pfioc_table *)addr;
4799 
4800 		if (io->pfrio_esize != 0) {
4801 			error = ENODEV;
4802 			break;
4803 		}
4804 		PF_RULES_WLOCK();
4805 		error = pfr_clr_addrs(&io->pfrio_table, &io->pfrio_ndel,
4806 		    io->pfrio_flags | PFR_FLAG_USERIOCTL);
4807 		PF_RULES_WUNLOCK();
4808 		break;
4809 	}
4810 
4811 	case DIOCRADDADDRS: {
4812 		struct pfioc_table *io = (struct pfioc_table *)addr;
4813 		struct pfr_addr *pfras;
4814 		size_t totlen;
4815 
4816 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
4817 			error = ENODEV;
4818 			break;
4819 		}
4820 		if (io->pfrio_size < 0 ||
4821 		    io->pfrio_size > pf_ioctl_maxcount ||
4822 		    WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
4823 			error = EINVAL;
4824 			break;
4825 		}
4826 		totlen = io->pfrio_size * sizeof(struct pfr_addr);
4827 		pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
4828 		    M_TEMP, M_WAITOK);
4829 		error = copyin(io->pfrio_buffer, pfras, totlen);
4830 		if (error) {
4831 			free(pfras, M_TEMP);
4832 			break;
4833 		}
4834 		PF_RULES_WLOCK();
4835 		error = pfr_add_addrs(&io->pfrio_table, pfras,
4836 		    io->pfrio_size, &io->pfrio_nadd, io->pfrio_flags |
4837 		    PFR_FLAG_USERIOCTL);
4838 		PF_RULES_WUNLOCK();
4839 		if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK)
4840 			error = copyout(pfras, io->pfrio_buffer, totlen);
4841 		free(pfras, M_TEMP);
4842 		break;
4843 	}
4844 
4845 	case DIOCRDELADDRS: {
4846 		struct pfioc_table *io = (struct pfioc_table *)addr;
4847 		struct pfr_addr *pfras;
4848 		size_t totlen;
4849 
4850 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
4851 			error = ENODEV;
4852 			break;
4853 		}
4854 		if (io->pfrio_size < 0 ||
4855 		    io->pfrio_size > pf_ioctl_maxcount ||
4856 		    WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
4857 			error = EINVAL;
4858 			break;
4859 		}
4860 		totlen = io->pfrio_size * sizeof(struct pfr_addr);
4861 		pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
4862 		    M_TEMP, M_WAITOK);
4863 		error = copyin(io->pfrio_buffer, pfras, totlen);
4864 		if (error) {
4865 			free(pfras, M_TEMP);
4866 			break;
4867 		}
4868 		PF_RULES_WLOCK();
4869 		error = pfr_del_addrs(&io->pfrio_table, pfras,
4870 		    io->pfrio_size, &io->pfrio_ndel, io->pfrio_flags |
4871 		    PFR_FLAG_USERIOCTL);
4872 		PF_RULES_WUNLOCK();
4873 		if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK)
4874 			error = copyout(pfras, io->pfrio_buffer, totlen);
4875 		free(pfras, M_TEMP);
4876 		break;
4877 	}
4878 
4879 	case DIOCRSETADDRS: {
4880 		struct pfioc_table *io = (struct pfioc_table *)addr;
4881 		struct pfr_addr *pfras;
4882 		size_t totlen, count;
4883 
4884 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
4885 			error = ENODEV;
4886 			break;
4887 		}
4888 		if (io->pfrio_size < 0 || io->pfrio_size2 < 0) {
4889 			error = EINVAL;
4890 			break;
4891 		}
4892 		count = max(io->pfrio_size, io->pfrio_size2);
4893 		if (count > pf_ioctl_maxcount ||
4894 		    WOULD_OVERFLOW(count, sizeof(struct pfr_addr))) {
4895 			error = EINVAL;
4896 			break;
4897 		}
4898 		totlen = count * sizeof(struct pfr_addr);
4899 		pfras = mallocarray(count, sizeof(struct pfr_addr), M_TEMP,
4900 		    M_WAITOK);
4901 		error = copyin(io->pfrio_buffer, pfras, totlen);
4902 		if (error) {
4903 			free(pfras, M_TEMP);
4904 			break;
4905 		}
4906 		PF_RULES_WLOCK();
4907 		error = pfr_set_addrs(&io->pfrio_table, pfras,
4908 		    io->pfrio_size, &io->pfrio_size2, &io->pfrio_nadd,
4909 		    &io->pfrio_ndel, &io->pfrio_nchange, io->pfrio_flags |
4910 		    PFR_FLAG_USERIOCTL, 0);
4911 		PF_RULES_WUNLOCK();
4912 		if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK)
4913 			error = copyout(pfras, io->pfrio_buffer, totlen);
4914 		free(pfras, M_TEMP);
4915 		break;
4916 	}
4917 
4918 	case DIOCRGETADDRS: {
4919 		struct pfioc_table *io = (struct pfioc_table *)addr;
4920 		struct pfr_addr *pfras;
4921 		size_t totlen;
4922 
4923 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
4924 			error = ENODEV;
4925 			break;
4926 		}
4927 		if (io->pfrio_size < 0 ||
4928 		    io->pfrio_size > pf_ioctl_maxcount ||
4929 		    WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
4930 			error = EINVAL;
4931 			break;
4932 		}
4933 		totlen = io->pfrio_size * sizeof(struct pfr_addr);
4934 		pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
4935 		    M_TEMP, M_WAITOK | M_ZERO);
4936 		PF_RULES_RLOCK();
4937 		error = pfr_get_addrs(&io->pfrio_table, pfras,
4938 		    &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
4939 		PF_RULES_RUNLOCK();
4940 		if (error == 0)
4941 			error = copyout(pfras, io->pfrio_buffer, totlen);
4942 		free(pfras, M_TEMP);
4943 		break;
4944 	}
4945 
4946 	case DIOCRGETASTATS: {
4947 		struct pfioc_table *io = (struct pfioc_table *)addr;
4948 		struct pfr_astats *pfrastats;
4949 		size_t totlen;
4950 
4951 		if (io->pfrio_esize != sizeof(struct pfr_astats)) {
4952 			error = ENODEV;
4953 			break;
4954 		}
4955 		if (io->pfrio_size < 0 ||
4956 		    io->pfrio_size > pf_ioctl_maxcount ||
4957 		    WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_astats))) {
4958 			error = EINVAL;
4959 			break;
4960 		}
4961 		totlen = io->pfrio_size * sizeof(struct pfr_astats);
4962 		pfrastats = mallocarray(io->pfrio_size,
4963 		    sizeof(struct pfr_astats), M_TEMP, M_WAITOK | M_ZERO);
4964 		PF_RULES_RLOCK();
4965 		error = pfr_get_astats(&io->pfrio_table, pfrastats,
4966 		    &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
4967 		PF_RULES_RUNLOCK();
4968 		if (error == 0)
4969 			error = copyout(pfrastats, io->pfrio_buffer, totlen);
4970 		free(pfrastats, M_TEMP);
4971 		break;
4972 	}
4973 
4974 	case DIOCRCLRASTATS: {
4975 		struct pfioc_table *io = (struct pfioc_table *)addr;
4976 		struct pfr_addr *pfras;
4977 		size_t totlen;
4978 
4979 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
4980 			error = ENODEV;
4981 			break;
4982 		}
4983 		if (io->pfrio_size < 0 ||
4984 		    io->pfrio_size > pf_ioctl_maxcount ||
4985 		    WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
4986 			error = EINVAL;
4987 			break;
4988 		}
4989 		totlen = io->pfrio_size * sizeof(struct pfr_addr);
4990 		pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
4991 		    M_TEMP, M_WAITOK);
4992 		error = copyin(io->pfrio_buffer, pfras, totlen);
4993 		if (error) {
4994 			free(pfras, M_TEMP);
4995 			break;
4996 		}
4997 		PF_RULES_WLOCK();
4998 		error = pfr_clr_astats(&io->pfrio_table, pfras,
4999 		    io->pfrio_size, &io->pfrio_nzero, io->pfrio_flags |
5000 		    PFR_FLAG_USERIOCTL);
5001 		PF_RULES_WUNLOCK();
5002 		if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK)
5003 			error = copyout(pfras, io->pfrio_buffer, totlen);
5004 		free(pfras, M_TEMP);
5005 		break;
5006 	}
5007 
5008 	case DIOCRTSTADDRS: {
5009 		struct pfioc_table *io = (struct pfioc_table *)addr;
5010 		struct pfr_addr *pfras;
5011 		size_t totlen;
5012 
5013 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
5014 			error = ENODEV;
5015 			break;
5016 		}
5017 		if (io->pfrio_size < 0 ||
5018 		    io->pfrio_size > pf_ioctl_maxcount ||
5019 		    WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
5020 			error = EINVAL;
5021 			break;
5022 		}
5023 		totlen = io->pfrio_size * sizeof(struct pfr_addr);
5024 		pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
5025 		    M_TEMP, M_WAITOK);
5026 		error = copyin(io->pfrio_buffer, pfras, totlen);
5027 		if (error) {
5028 			free(pfras, M_TEMP);
5029 			break;
5030 		}
5031 		PF_RULES_RLOCK();
5032 		error = pfr_tst_addrs(&io->pfrio_table, pfras,
5033 		    io->pfrio_size, &io->pfrio_nmatch, io->pfrio_flags |
5034 		    PFR_FLAG_USERIOCTL);
5035 		PF_RULES_RUNLOCK();
5036 		if (error == 0)
5037 			error = copyout(pfras, io->pfrio_buffer, totlen);
5038 		free(pfras, M_TEMP);
5039 		break;
5040 	}
5041 
5042 	case DIOCRINADEFINE: {
5043 		struct pfioc_table *io = (struct pfioc_table *)addr;
5044 		struct pfr_addr *pfras;
5045 		size_t totlen;
5046 
5047 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
5048 			error = ENODEV;
5049 			break;
5050 		}
5051 		if (io->pfrio_size < 0 ||
5052 		    io->pfrio_size > pf_ioctl_maxcount ||
5053 		    WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
5054 			error = EINVAL;
5055 			break;
5056 		}
5057 		totlen = io->pfrio_size * sizeof(struct pfr_addr);
5058 		pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
5059 		    M_TEMP, M_WAITOK);
5060 		error = copyin(io->pfrio_buffer, pfras, totlen);
5061 		if (error) {
5062 			free(pfras, M_TEMP);
5063 			break;
5064 		}
5065 		PF_RULES_WLOCK();
5066 		error = pfr_ina_define(&io->pfrio_table, pfras,
5067 		    io->pfrio_size, &io->pfrio_nadd, &io->pfrio_naddr,
5068 		    io->pfrio_ticket, io->pfrio_flags | PFR_FLAG_USERIOCTL);
5069 		PF_RULES_WUNLOCK();
5070 		free(pfras, M_TEMP);
5071 		break;
5072 	}
5073 
5074 	case DIOCOSFPADD: {
5075 		struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr;
5076 		PF_RULES_WLOCK();
5077 		error = pf_osfp_add(io);
5078 		PF_RULES_WUNLOCK();
5079 		break;
5080 	}
5081 
5082 	case DIOCOSFPGET: {
5083 		struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr;
5084 		PF_RULES_RLOCK();
5085 		error = pf_osfp_get(io);
5086 		PF_RULES_RUNLOCK();
5087 		break;
5088 	}
5089 
5090 	case DIOCXBEGIN: {
5091 		struct pfioc_trans	*io = (struct pfioc_trans *)addr;
5092 		struct pfioc_trans_e	*ioes, *ioe;
5093 		size_t			 totlen;
5094 		int			 i;
5095 
5096 		if (io->esize != sizeof(*ioe)) {
5097 			error = ENODEV;
5098 			break;
5099 		}
5100 		if (io->size < 0 ||
5101 		    io->size > pf_ioctl_maxcount ||
5102 		    WOULD_OVERFLOW(io->size, sizeof(struct pfioc_trans_e))) {
5103 			error = EINVAL;
5104 			break;
5105 		}
5106 		totlen = sizeof(struct pfioc_trans_e) * io->size;
5107 		ioes = mallocarray(io->size, sizeof(struct pfioc_trans_e),
5108 		    M_TEMP, M_WAITOK);
5109 		error = copyin(io->array, ioes, totlen);
5110 		if (error) {
5111 			free(ioes, M_TEMP);
5112 			break;
5113 		}
5114 		/* Ensure there's no more ethernet rules to clean up. */
5115 		NET_EPOCH_DRAIN_CALLBACKS();
5116 		PF_RULES_WLOCK();
5117 		for (i = 0, ioe = ioes; i < io->size; i++, ioe++) {
5118 			ioe->anchor[sizeof(ioe->anchor) - 1] = '\0';
5119 			switch (ioe->rs_num) {
5120 			case PF_RULESET_ETH:
5121 				if ((error = pf_begin_eth(&ioe->ticket, ioe->anchor))) {
5122 					PF_RULES_WUNLOCK();
5123 					free(ioes, M_TEMP);
5124 					goto fail;
5125 				}
5126 				break;
5127 #ifdef ALTQ
5128 			case PF_RULESET_ALTQ:
5129 				if (ioe->anchor[0]) {
5130 					PF_RULES_WUNLOCK();
5131 					free(ioes, M_TEMP);
5132 					error = EINVAL;
5133 					goto fail;
5134 				}
5135 				if ((error = pf_begin_altq(&ioe->ticket))) {
5136 					PF_RULES_WUNLOCK();
5137 					free(ioes, M_TEMP);
5138 					goto fail;
5139 				}
5140 				break;
5141 #endif /* ALTQ */
5142 			case PF_RULESET_TABLE:
5143 			    {
5144 				struct pfr_table table;
5145 
5146 				bzero(&table, sizeof(table));
5147 				strlcpy(table.pfrt_anchor, ioe->anchor,
5148 				    sizeof(table.pfrt_anchor));
5149 				if ((error = pfr_ina_begin(&table,
5150 				    &ioe->ticket, NULL, 0))) {
5151 					PF_RULES_WUNLOCK();
5152 					free(ioes, M_TEMP);
5153 					goto fail;
5154 				}
5155 				break;
5156 			    }
5157 			default:
5158 				if ((error = pf_begin_rules(&ioe->ticket,
5159 				    ioe->rs_num, ioe->anchor))) {
5160 					PF_RULES_WUNLOCK();
5161 					free(ioes, M_TEMP);
5162 					goto fail;
5163 				}
5164 				break;
5165 			}
5166 		}
5167 		PF_RULES_WUNLOCK();
5168 		error = copyout(ioes, io->array, totlen);
5169 		free(ioes, M_TEMP);
5170 		break;
5171 	}
5172 
5173 	case DIOCXROLLBACK: {
5174 		struct pfioc_trans	*io = (struct pfioc_trans *)addr;
5175 		struct pfioc_trans_e	*ioe, *ioes;
5176 		size_t			 totlen;
5177 		int			 i;
5178 
5179 		if (io->esize != sizeof(*ioe)) {
5180 			error = ENODEV;
5181 			break;
5182 		}
5183 		if (io->size < 0 ||
5184 		    io->size > pf_ioctl_maxcount ||
5185 		    WOULD_OVERFLOW(io->size, sizeof(struct pfioc_trans_e))) {
5186 			error = EINVAL;
5187 			break;
5188 		}
5189 		totlen = sizeof(struct pfioc_trans_e) * io->size;
5190 		ioes = mallocarray(io->size, sizeof(struct pfioc_trans_e),
5191 		    M_TEMP, M_WAITOK);
5192 		error = copyin(io->array, ioes, totlen);
5193 		if (error) {
5194 			free(ioes, M_TEMP);
5195 			break;
5196 		}
5197 		PF_RULES_WLOCK();
5198 		for (i = 0, ioe = ioes; i < io->size; i++, ioe++) {
5199 			ioe->anchor[sizeof(ioe->anchor) - 1] = '\0';
5200 			switch (ioe->rs_num) {
5201 			case PF_RULESET_ETH:
5202 				if ((error = pf_rollback_eth(ioe->ticket,
5203 				    ioe->anchor))) {
5204 					PF_RULES_WUNLOCK();
5205 					free(ioes, M_TEMP);
5206 					goto fail; /* really bad */
5207 				}
5208 				break;
5209 #ifdef ALTQ
5210 			case PF_RULESET_ALTQ:
5211 				if (ioe->anchor[0]) {
5212 					PF_RULES_WUNLOCK();
5213 					free(ioes, M_TEMP);
5214 					error = EINVAL;
5215 					goto fail;
5216 				}
5217 				if ((error = pf_rollback_altq(ioe->ticket))) {
5218 					PF_RULES_WUNLOCK();
5219 					free(ioes, M_TEMP);
5220 					goto fail; /* really bad */
5221 				}
5222 				break;
5223 #endif /* ALTQ */
5224 			case PF_RULESET_TABLE:
5225 			    {
5226 				struct pfr_table table;
5227 
5228 				bzero(&table, sizeof(table));
5229 				strlcpy(table.pfrt_anchor, ioe->anchor,
5230 				    sizeof(table.pfrt_anchor));
5231 				if ((error = pfr_ina_rollback(&table,
5232 				    ioe->ticket, NULL, 0))) {
5233 					PF_RULES_WUNLOCK();
5234 					free(ioes, M_TEMP);
5235 					goto fail; /* really bad */
5236 				}
5237 				break;
5238 			    }
5239 			default:
5240 				if ((error = pf_rollback_rules(ioe->ticket,
5241 				    ioe->rs_num, ioe->anchor))) {
5242 					PF_RULES_WUNLOCK();
5243 					free(ioes, M_TEMP);
5244 					goto fail; /* really bad */
5245 				}
5246 				break;
5247 			}
5248 		}
5249 		PF_RULES_WUNLOCK();
5250 		free(ioes, M_TEMP);
5251 		break;
5252 	}
5253 
5254 	case DIOCXCOMMIT: {
5255 		struct pfioc_trans	*io = (struct pfioc_trans *)addr;
5256 		struct pfioc_trans_e	*ioe, *ioes;
5257 		struct pf_kruleset	*rs;
5258 		struct pf_keth_ruleset	*ers;
5259 		size_t			 totlen;
5260 		int			 i;
5261 
5262 		if (io->esize != sizeof(*ioe)) {
5263 			error = ENODEV;
5264 			break;
5265 		}
5266 
5267 		if (io->size < 0 ||
5268 		    io->size > pf_ioctl_maxcount ||
5269 		    WOULD_OVERFLOW(io->size, sizeof(struct pfioc_trans_e))) {
5270 			error = EINVAL;
5271 			break;
5272 		}
5273 
5274 		totlen = sizeof(struct pfioc_trans_e) * io->size;
5275 		ioes = mallocarray(io->size, sizeof(struct pfioc_trans_e),
5276 		    M_TEMP, M_WAITOK);
5277 		error = copyin(io->array, ioes, totlen);
5278 		if (error) {
5279 			free(ioes, M_TEMP);
5280 			break;
5281 		}
5282 		PF_RULES_WLOCK();
5283 		/* First makes sure everything will succeed. */
5284 		for (i = 0, ioe = ioes; i < io->size; i++, ioe++) {
5285 			ioe->anchor[sizeof(ioe->anchor) - 1] = 0;
5286 			switch (ioe->rs_num) {
5287 			case PF_RULESET_ETH:
5288 				ers = pf_find_keth_ruleset(ioe->anchor);
5289 				if (ers == NULL || ioe->ticket == 0 ||
5290 				    ioe->ticket != ers->inactive.ticket) {
5291 					PF_RULES_WUNLOCK();
5292 					free(ioes, M_TEMP);
5293 					error = EINVAL;
5294 					goto fail;
5295 				}
5296 				break;
5297 #ifdef ALTQ
5298 			case PF_RULESET_ALTQ:
5299 				if (ioe->anchor[0]) {
5300 					PF_RULES_WUNLOCK();
5301 					free(ioes, M_TEMP);
5302 					error = EINVAL;
5303 					goto fail;
5304 				}
5305 				if (!V_altqs_inactive_open || ioe->ticket !=
5306 				    V_ticket_altqs_inactive) {
5307 					PF_RULES_WUNLOCK();
5308 					free(ioes, M_TEMP);
5309 					error = EBUSY;
5310 					goto fail;
5311 				}
5312 				break;
5313 #endif /* ALTQ */
5314 			case PF_RULESET_TABLE:
5315 				rs = pf_find_kruleset(ioe->anchor);
5316 				if (rs == NULL || !rs->topen || ioe->ticket !=
5317 				    rs->tticket) {
5318 					PF_RULES_WUNLOCK();
5319 					free(ioes, M_TEMP);
5320 					error = EBUSY;
5321 					goto fail;
5322 				}
5323 				break;
5324 			default:
5325 				if (ioe->rs_num < 0 || ioe->rs_num >=
5326 				    PF_RULESET_MAX) {
5327 					PF_RULES_WUNLOCK();
5328 					free(ioes, M_TEMP);
5329 					error = EINVAL;
5330 					goto fail;
5331 				}
5332 				rs = pf_find_kruleset(ioe->anchor);
5333 				if (rs == NULL ||
5334 				    !rs->rules[ioe->rs_num].inactive.open ||
5335 				    rs->rules[ioe->rs_num].inactive.ticket !=
5336 				    ioe->ticket) {
5337 					PF_RULES_WUNLOCK();
5338 					free(ioes, M_TEMP);
5339 					error = EBUSY;
5340 					goto fail;
5341 				}
5342 				break;
5343 			}
5344 		}
5345 		/* Now do the commit - no errors should happen here. */
5346 		for (i = 0, ioe = ioes; i < io->size; i++, ioe++) {
5347 			switch (ioe->rs_num) {
5348 			case PF_RULESET_ETH:
5349 				if ((error = pf_commit_eth(ioe->ticket, ioe->anchor))) {
5350 					PF_RULES_WUNLOCK();
5351 					free(ioes, M_TEMP);
5352 					goto fail; /* really bad */
5353 				}
5354 				break;
5355 #ifdef ALTQ
5356 			case PF_RULESET_ALTQ:
5357 				if ((error = pf_commit_altq(ioe->ticket))) {
5358 					PF_RULES_WUNLOCK();
5359 					free(ioes, M_TEMP);
5360 					goto fail; /* really bad */
5361 				}
5362 				break;
5363 #endif /* ALTQ */
5364 			case PF_RULESET_TABLE:
5365 			    {
5366 				struct pfr_table table;
5367 
5368 				bzero(&table, sizeof(table));
5369 				(void)strlcpy(table.pfrt_anchor, ioe->anchor,
5370 				    sizeof(table.pfrt_anchor));
5371 				if ((error = pfr_ina_commit(&table,
5372 				    ioe->ticket, NULL, NULL, 0))) {
5373 					PF_RULES_WUNLOCK();
5374 					free(ioes, M_TEMP);
5375 					goto fail; /* really bad */
5376 				}
5377 				break;
5378 			    }
5379 			default:
5380 				if ((error = pf_commit_rules(ioe->ticket,
5381 				    ioe->rs_num, ioe->anchor))) {
5382 					PF_RULES_WUNLOCK();
5383 					free(ioes, M_TEMP);
5384 					goto fail; /* really bad */
5385 				}
5386 				break;
5387 			}
5388 		}
5389 		PF_RULES_WUNLOCK();
5390 
5391 		/* Only hook into EtherNet taffic if we've got rules for it. */
5392 		if (! TAILQ_EMPTY(V_pf_keth->active.rules))
5393 			hook_pf_eth();
5394 		else
5395 			dehook_pf_eth();
5396 
5397 		free(ioes, M_TEMP);
5398 		break;
5399 	}
5400 
5401 	case DIOCGETSRCNODES: {
5402 		struct pfioc_src_nodes	*psn = (struct pfioc_src_nodes *)addr;
5403 		struct pf_srchash	*sh;
5404 		struct pf_ksrc_node	*n;
5405 		struct pf_src_node	*p, *pstore;
5406 		uint32_t		 i, nr = 0;
5407 
5408 		for (i = 0, sh = V_pf_srchash; i <= V_pf_srchashmask;
5409 				i++, sh++) {
5410 			PF_HASHROW_LOCK(sh);
5411 			LIST_FOREACH(n, &sh->nodes, entry)
5412 				nr++;
5413 			PF_HASHROW_UNLOCK(sh);
5414 		}
5415 
5416 		psn->psn_len = min(psn->psn_len,
5417 		    sizeof(struct pf_src_node) * nr);
5418 
5419 		if (psn->psn_len == 0) {
5420 			psn->psn_len = sizeof(struct pf_src_node) * nr;
5421 			break;
5422 		}
5423 
5424 		nr = 0;
5425 
5426 		p = pstore = malloc(psn->psn_len, M_TEMP, M_WAITOK | M_ZERO);
5427 		for (i = 0, sh = V_pf_srchash; i <= V_pf_srchashmask;
5428 		    i++, sh++) {
5429 		    PF_HASHROW_LOCK(sh);
5430 		    LIST_FOREACH(n, &sh->nodes, entry) {
5431 
5432 			if ((nr + 1) * sizeof(*p) > (unsigned)psn->psn_len)
5433 				break;
5434 
5435 			pf_src_node_copy(n, p);
5436 
5437 			p++;
5438 			nr++;
5439 		    }
5440 		    PF_HASHROW_UNLOCK(sh);
5441 		}
5442 		error = copyout(pstore, psn->psn_src_nodes,
5443 		    sizeof(struct pf_src_node) * nr);
5444 		if (error) {
5445 			free(pstore, M_TEMP);
5446 			break;
5447 		}
5448 		psn->psn_len = sizeof(struct pf_src_node) * nr;
5449 		free(pstore, M_TEMP);
5450 		break;
5451 	}
5452 
5453 	case DIOCCLRSRCNODES: {
5454 		pf_kill_srcnodes(NULL);
5455 		break;
5456 	}
5457 
5458 	case DIOCKILLSRCNODES:
5459 		pf_kill_srcnodes((struct pfioc_src_node_kill *)addr);
5460 		break;
5461 
5462 #ifdef COMPAT_FREEBSD13
5463 	case DIOCKEEPCOUNTERS_FREEBSD13:
5464 #endif
5465 	case DIOCKEEPCOUNTERS:
5466 		error = pf_keepcounters((struct pfioc_nv *)addr);
5467 		break;
5468 
5469 	case DIOCGETSYNCOOKIES:
5470 		error = pf_get_syncookies((struct pfioc_nv *)addr);
5471 		break;
5472 
5473 	case DIOCSETSYNCOOKIES:
5474 		error = pf_set_syncookies((struct pfioc_nv *)addr);
5475 		break;
5476 
5477 	case DIOCSETHOSTID: {
5478 		u_int32_t	*hostid = (u_int32_t *)addr;
5479 
5480 		PF_RULES_WLOCK();
5481 		if (*hostid == 0)
5482 			V_pf_status.hostid = arc4random();
5483 		else
5484 			V_pf_status.hostid = *hostid;
5485 		PF_RULES_WUNLOCK();
5486 		break;
5487 	}
5488 
5489 	case DIOCOSFPFLUSH:
5490 		PF_RULES_WLOCK();
5491 		pf_osfp_flush();
5492 		PF_RULES_WUNLOCK();
5493 		break;
5494 
5495 	case DIOCIGETIFACES: {
5496 		struct pfioc_iface *io = (struct pfioc_iface *)addr;
5497 		struct pfi_kif *ifstore;
5498 		size_t bufsiz;
5499 
5500 		if (io->pfiio_esize != sizeof(struct pfi_kif)) {
5501 			error = ENODEV;
5502 			break;
5503 		}
5504 
5505 		if (io->pfiio_size < 0 ||
5506 		    io->pfiio_size > pf_ioctl_maxcount ||
5507 		    WOULD_OVERFLOW(io->pfiio_size, sizeof(struct pfi_kif))) {
5508 			error = EINVAL;
5509 			break;
5510 		}
5511 
5512 		io->pfiio_name[sizeof(io->pfiio_name) - 1] = '\0';
5513 
5514 		bufsiz = io->pfiio_size * sizeof(struct pfi_kif);
5515 		ifstore = mallocarray(io->pfiio_size, sizeof(struct pfi_kif),
5516 		    M_TEMP, M_WAITOK | M_ZERO);
5517 
5518 		PF_RULES_RLOCK();
5519 		pfi_get_ifaces(io->pfiio_name, ifstore, &io->pfiio_size);
5520 		PF_RULES_RUNLOCK();
5521 		error = copyout(ifstore, io->pfiio_buffer, bufsiz);
5522 		free(ifstore, M_TEMP);
5523 		break;
5524 	}
5525 
5526 	case DIOCSETIFFLAG: {
5527 		struct pfioc_iface *io = (struct pfioc_iface *)addr;
5528 
5529 		io->pfiio_name[sizeof(io->pfiio_name) - 1] = '\0';
5530 
5531 		PF_RULES_WLOCK();
5532 		error = pfi_set_flags(io->pfiio_name, io->pfiio_flags);
5533 		PF_RULES_WUNLOCK();
5534 		break;
5535 	}
5536 
5537 	case DIOCCLRIFFLAG: {
5538 		struct pfioc_iface *io = (struct pfioc_iface *)addr;
5539 
5540 		io->pfiio_name[sizeof(io->pfiio_name) - 1] = '\0';
5541 
5542 		PF_RULES_WLOCK();
5543 		error = pfi_clear_flags(io->pfiio_name, io->pfiio_flags);
5544 		PF_RULES_WUNLOCK();
5545 		break;
5546 	}
5547 
5548 	case DIOCSETREASS: {
5549 		u_int32_t	*reass = (u_int32_t *)addr;
5550 
5551 		V_pf_status.reass = *reass & (PF_REASS_ENABLED|PF_REASS_NODF);
5552 		/* Removal of DF flag without reassembly enabled is not a
5553 		 * valid combination. Disable reassembly in such case. */
5554 		if (!(V_pf_status.reass & PF_REASS_ENABLED))
5555 			V_pf_status.reass = 0;
5556 		break;
5557 	}
5558 
5559 	default:
5560 		error = ENODEV;
5561 		break;
5562 	}
5563 fail:
5564 	CURVNET_RESTORE();
5565 
5566 #undef ERROUT_IOCTL
5567 
5568 	return (error);
5569 }
5570 
5571 void
pfsync_state_export(union pfsync_state_union * sp,struct pf_kstate * st,int msg_version)5572 pfsync_state_export(union pfsync_state_union *sp, struct pf_kstate *st, int msg_version)
5573 {
5574 	bzero(sp, sizeof(union pfsync_state_union));
5575 
5576 	/* copy from state key */
5577 	sp->pfs_1301.key[PF_SK_WIRE].addr[0] = st->key[PF_SK_WIRE]->addr[0];
5578 	sp->pfs_1301.key[PF_SK_WIRE].addr[1] = st->key[PF_SK_WIRE]->addr[1];
5579 	sp->pfs_1301.key[PF_SK_WIRE].port[0] = st->key[PF_SK_WIRE]->port[0];
5580 	sp->pfs_1301.key[PF_SK_WIRE].port[1] = st->key[PF_SK_WIRE]->port[1];
5581 	sp->pfs_1301.key[PF_SK_STACK].addr[0] = st->key[PF_SK_STACK]->addr[0];
5582 	sp->pfs_1301.key[PF_SK_STACK].addr[1] = st->key[PF_SK_STACK]->addr[1];
5583 	sp->pfs_1301.key[PF_SK_STACK].port[0] = st->key[PF_SK_STACK]->port[0];
5584 	sp->pfs_1301.key[PF_SK_STACK].port[1] = st->key[PF_SK_STACK]->port[1];
5585 	sp->pfs_1301.proto = st->key[PF_SK_WIRE]->proto;
5586 	sp->pfs_1301.af = st->key[PF_SK_WIRE]->af;
5587 
5588 	/* copy from state */
5589 	strlcpy(sp->pfs_1301.ifname, st->kif->pfik_name, sizeof(sp->pfs_1301.ifname));
5590 	bcopy(&st->rt_addr, &sp->pfs_1301.rt_addr, sizeof(sp->pfs_1301.rt_addr));
5591 	sp->pfs_1301.creation = htonl(time_uptime - (st->creation / 1000));
5592 	sp->pfs_1301.expire = pf_state_expires(st);
5593 	if (sp->pfs_1301.expire <= time_uptime)
5594 		sp->pfs_1301.expire = htonl(0);
5595 	else
5596 		sp->pfs_1301.expire = htonl(sp->pfs_1301.expire - time_uptime);
5597 
5598 	sp->pfs_1301.direction = st->direction;
5599 	sp->pfs_1301.log = st->act.log;
5600 	sp->pfs_1301.timeout = st->timeout;
5601 
5602 	switch (msg_version) {
5603 		case PFSYNC_MSG_VERSION_1301:
5604 			sp->pfs_1301.state_flags = st->state_flags;
5605 			break;
5606 		case PFSYNC_MSG_VERSION_1400:
5607 			sp->pfs_1400.state_flags = htons(st->state_flags);
5608 			sp->pfs_1400.qid = htons(st->act.qid);
5609 			sp->pfs_1400.pqid = htons(st->act.pqid);
5610 			sp->pfs_1400.dnpipe = htons(st->act.dnpipe);
5611 			sp->pfs_1400.dnrpipe = htons(st->act.dnrpipe);
5612 			sp->pfs_1400.rtableid = htonl(st->act.rtableid);
5613 			sp->pfs_1400.min_ttl = st->act.min_ttl;
5614 			sp->pfs_1400.set_tos = st->act.set_tos;
5615 			sp->pfs_1400.max_mss = htons(st->act.max_mss);
5616 			sp->pfs_1400.set_prio[0] = st->act.set_prio[0];
5617 			sp->pfs_1400.set_prio[1] = st->act.set_prio[1];
5618 			sp->pfs_1400.rt = st->rt;
5619 			if (st->rt_kif)
5620 				strlcpy(sp->pfs_1400.rt_ifname,
5621 				    st->rt_kif->pfik_name,
5622 				    sizeof(sp->pfs_1400.rt_ifname));
5623 			break;
5624 		default:
5625 			panic("%s: Unsupported pfsync_msg_version %d",
5626 			    __func__, msg_version);
5627 	}
5628 
5629 	if (st->src_node)
5630 		sp->pfs_1301.sync_flags |= PFSYNC_FLAG_SRCNODE;
5631 	if (st->nat_src_node)
5632 		sp->pfs_1301.sync_flags |= PFSYNC_FLAG_NATSRCNODE;
5633 
5634 	sp->pfs_1301.id = st->id;
5635 	sp->pfs_1301.creatorid = st->creatorid;
5636 	pf_state_peer_hton(&st->src, &sp->pfs_1301.src);
5637 	pf_state_peer_hton(&st->dst, &sp->pfs_1301.dst);
5638 
5639 	if (st->rule == NULL)
5640 		sp->pfs_1301.rule = htonl(-1);
5641 	else
5642 		sp->pfs_1301.rule = htonl(st->rule->nr);
5643 	if (st->anchor == NULL)
5644 		sp->pfs_1301.anchor = htonl(-1);
5645 	else
5646 		sp->pfs_1301.anchor = htonl(st->anchor->nr);
5647 	if (st->nat_rule == NULL)
5648 		sp->pfs_1301.nat_rule = htonl(-1);
5649 	else
5650 		sp->pfs_1301.nat_rule = htonl(st->nat_rule->nr);
5651 
5652 	pf_state_counter_hton(st->packets[0], sp->pfs_1301.packets[0]);
5653 	pf_state_counter_hton(st->packets[1], sp->pfs_1301.packets[1]);
5654 	pf_state_counter_hton(st->bytes[0], sp->pfs_1301.bytes[0]);
5655 	pf_state_counter_hton(st->bytes[1], sp->pfs_1301.bytes[1]);
5656 }
5657 
5658 void
pf_state_export(struct pf_state_export * sp,struct pf_kstate * st)5659 pf_state_export(struct pf_state_export *sp, struct pf_kstate *st)
5660 {
5661 	bzero(sp, sizeof(*sp));
5662 
5663 	sp->version = PF_STATE_VERSION;
5664 
5665 	/* copy from state key */
5666 	sp->key[PF_SK_WIRE].addr[0] = st->key[PF_SK_WIRE]->addr[0];
5667 	sp->key[PF_SK_WIRE].addr[1] = st->key[PF_SK_WIRE]->addr[1];
5668 	sp->key[PF_SK_WIRE].port[0] = st->key[PF_SK_WIRE]->port[0];
5669 	sp->key[PF_SK_WIRE].port[1] = st->key[PF_SK_WIRE]->port[1];
5670 	sp->key[PF_SK_STACK].addr[0] = st->key[PF_SK_STACK]->addr[0];
5671 	sp->key[PF_SK_STACK].addr[1] = st->key[PF_SK_STACK]->addr[1];
5672 	sp->key[PF_SK_STACK].port[0] = st->key[PF_SK_STACK]->port[0];
5673 	sp->key[PF_SK_STACK].port[1] = st->key[PF_SK_STACK]->port[1];
5674 	sp->proto = st->key[PF_SK_WIRE]->proto;
5675 	sp->af = st->key[PF_SK_WIRE]->af;
5676 
5677 	/* copy from state */
5678 	strlcpy(sp->ifname, st->kif->pfik_name, sizeof(sp->ifname));
5679 	strlcpy(sp->orig_ifname, st->orig_kif->pfik_name,
5680 	    sizeof(sp->orig_ifname));
5681 	bcopy(&st->rt_addr, &sp->rt_addr, sizeof(sp->rt_addr));
5682 	sp->creation = htonl(time_uptime - (st->creation / 1000));
5683 	sp->expire = pf_state_expires(st);
5684 	if (sp->expire <= time_uptime)
5685 		sp->expire = htonl(0);
5686 	else
5687 		sp->expire = htonl(sp->expire - time_uptime);
5688 
5689 	sp->direction = st->direction;
5690 	sp->log = st->act.log;
5691 	sp->timeout = st->timeout;
5692 	/* 8 bits for the old libpfctl, 16 bits for the new libpfctl */
5693 	sp->state_flags_compat = st->state_flags;
5694 	sp->state_flags = htons(st->state_flags);
5695 	if (st->src_node)
5696 		sp->sync_flags |= PFSYNC_FLAG_SRCNODE;
5697 	if (st->nat_src_node)
5698 		sp->sync_flags |= PFSYNC_FLAG_NATSRCNODE;
5699 
5700 	sp->id = st->id;
5701 	sp->creatorid = st->creatorid;
5702 	pf_state_peer_hton(&st->src, &sp->src);
5703 	pf_state_peer_hton(&st->dst, &sp->dst);
5704 
5705 	if (st->rule == NULL)
5706 		sp->rule = htonl(-1);
5707 	else
5708 		sp->rule = htonl(st->rule->nr);
5709 	if (st->anchor == NULL)
5710 		sp->anchor = htonl(-1);
5711 	else
5712 		sp->anchor = htonl(st->anchor->nr);
5713 	if (st->nat_rule == NULL)
5714 		sp->nat_rule = htonl(-1);
5715 	else
5716 		sp->nat_rule = htonl(st->nat_rule->nr);
5717 
5718 	sp->packets[0] = st->packets[0];
5719 	sp->packets[1] = st->packets[1];
5720 	sp->bytes[0] = st->bytes[0];
5721 	sp->bytes[1] = st->bytes[1];
5722 
5723 	sp->qid = htons(st->act.qid);
5724 	sp->pqid = htons(st->act.pqid);
5725 	sp->dnpipe = htons(st->act.dnpipe);
5726 	sp->dnrpipe = htons(st->act.dnrpipe);
5727 	sp->rtableid = htonl(st->act.rtableid);
5728 	sp->min_ttl = st->act.min_ttl;
5729 	sp->set_tos = st->act.set_tos;
5730 	sp->max_mss = htons(st->act.max_mss);
5731 	sp->rt = st->rt;
5732 	if (st->rt_kif)
5733 		strlcpy(sp->rt_ifname, st->rt_kif->pfik_name,
5734 		    sizeof(sp->rt_ifname));
5735 	sp->set_prio[0] = st->act.set_prio[0];
5736 	sp->set_prio[1] = st->act.set_prio[1];
5737 
5738 }
5739 
5740 static void
pf_tbladdr_copyout(struct pf_addr_wrap * aw)5741 pf_tbladdr_copyout(struct pf_addr_wrap *aw)
5742 {
5743 	struct pfr_ktable *kt;
5744 
5745 	KASSERT(aw->type == PF_ADDR_TABLE, ("%s: type %u", __func__, aw->type));
5746 
5747 	kt = aw->p.tbl;
5748 	if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
5749 		kt = kt->pfrkt_root;
5750 	aw->p.tbl = NULL;
5751 	aw->p.tblcnt = (kt->pfrkt_flags & PFR_TFLAG_ACTIVE) ?
5752 		kt->pfrkt_cnt : -1;
5753 }
5754 
5755 static int
pf_add_status_counters(nvlist_t * nvl,const char * name,counter_u64_t * counters,size_t number,char ** names)5756 pf_add_status_counters(nvlist_t *nvl, const char *name, counter_u64_t *counters,
5757     size_t number, char **names)
5758 {
5759 	nvlist_t        *nvc;
5760 
5761 	nvc = nvlist_create(0);
5762 	if (nvc == NULL)
5763 		return (ENOMEM);
5764 
5765 	for (int i = 0; i < number; i++) {
5766 		nvlist_append_number_array(nvc, "counters",
5767 		    counter_u64_fetch(counters[i]));
5768 		nvlist_append_string_array(nvc, "names",
5769 		    names[i]);
5770 		nvlist_append_number_array(nvc, "ids",
5771 		    i);
5772 	}
5773 	nvlist_add_nvlist(nvl, name, nvc);
5774 	nvlist_destroy(nvc);
5775 
5776 	return (0);
5777 }
5778 
5779 static int
pf_getstatus(struct pfioc_nv * nv)5780 pf_getstatus(struct pfioc_nv *nv)
5781 {
5782 	nvlist_t        *nvl = NULL, *nvc = NULL;
5783 	void            *nvlpacked = NULL;
5784 	int              error;
5785 	struct pf_status s;
5786 	char *pf_reasons[PFRES_MAX+1] = PFRES_NAMES;
5787 	char *pf_lcounter[KLCNT_MAX+1] = KLCNT_NAMES;
5788 	char *pf_fcounter[FCNT_MAX+1] = FCNT_NAMES;
5789 	PF_RULES_RLOCK_TRACKER;
5790 
5791 #define ERROUT(x)      ERROUT_FUNCTION(errout, x)
5792 
5793 	PF_RULES_RLOCK();
5794 
5795 	nvl = nvlist_create(0);
5796 	if (nvl == NULL)
5797 		ERROUT(ENOMEM);
5798 
5799 	nvlist_add_bool(nvl, "running", V_pf_status.running);
5800 	nvlist_add_number(nvl, "since", V_pf_status.since);
5801 	nvlist_add_number(nvl, "debug", V_pf_status.debug);
5802 	nvlist_add_number(nvl, "hostid", V_pf_status.hostid);
5803 	nvlist_add_number(nvl, "states", V_pf_status.states);
5804 	nvlist_add_number(nvl, "src_nodes", V_pf_status.src_nodes);
5805 	nvlist_add_number(nvl, "reass", V_pf_status.reass);
5806 	nvlist_add_bool(nvl, "syncookies_active",
5807 	    V_pf_status.syncookies_active);
5808 	nvlist_add_number(nvl, "halfopen_states", V_pf_status.states_halfopen);
5809 
5810 	/* counters */
5811 	error = pf_add_status_counters(nvl, "counters", V_pf_status.counters,
5812 	    PFRES_MAX, pf_reasons);
5813 	if (error != 0)
5814 		ERROUT(error);
5815 
5816 	/* lcounters */
5817 	error = pf_add_status_counters(nvl, "lcounters", V_pf_status.lcounters,
5818 	    KLCNT_MAX, pf_lcounter);
5819 	if (error != 0)
5820 		ERROUT(error);
5821 
5822 	/* fcounters */
5823 	nvc = nvlist_create(0);
5824 	if (nvc == NULL)
5825 		ERROUT(ENOMEM);
5826 
5827 	for (int i = 0; i < FCNT_MAX; i++) {
5828 		nvlist_append_number_array(nvc, "counters",
5829 		    pf_counter_u64_fetch(&V_pf_status.fcounters[i]));
5830 		nvlist_append_string_array(nvc, "names",
5831 		    pf_fcounter[i]);
5832 		nvlist_append_number_array(nvc, "ids",
5833 		    i);
5834 	}
5835 	nvlist_add_nvlist(nvl, "fcounters", nvc);
5836 	nvlist_destroy(nvc);
5837 	nvc = NULL;
5838 
5839 	/* scounters */
5840 	error = pf_add_status_counters(nvl, "scounters", V_pf_status.scounters,
5841 	    SCNT_MAX, pf_fcounter);
5842 	if (error != 0)
5843 		ERROUT(error);
5844 
5845 	nvlist_add_string(nvl, "ifname", V_pf_status.ifname);
5846 	nvlist_add_binary(nvl, "chksum", V_pf_status.pf_chksum,
5847 	    PF_MD5_DIGEST_LENGTH);
5848 
5849 	pfi_update_status(V_pf_status.ifname, &s);
5850 
5851 	/* pcounters / bcounters */
5852 	for (int i = 0; i < 2; i++) {
5853 		for (int j = 0; j < 2; j++) {
5854 			for (int k = 0; k < 2; k++) {
5855 				nvlist_append_number_array(nvl, "pcounters",
5856 				    s.pcounters[i][j][k]);
5857 			}
5858 			nvlist_append_number_array(nvl, "bcounters",
5859 			    s.bcounters[i][j]);
5860 		}
5861 	}
5862 
5863 	nvlpacked = nvlist_pack(nvl, &nv->len);
5864 	if (nvlpacked == NULL)
5865 		ERROUT(ENOMEM);
5866 
5867 	if (nv->size == 0)
5868 		ERROUT(0);
5869 	else if (nv->size < nv->len)
5870 		ERROUT(ENOSPC);
5871 
5872 	PF_RULES_RUNLOCK();
5873 	error = copyout(nvlpacked, nv->data, nv->len);
5874 	goto done;
5875 
5876 #undef ERROUT
5877 errout:
5878 	PF_RULES_RUNLOCK();
5879 done:
5880 	free(nvlpacked, M_NVLIST);
5881 	nvlist_destroy(nvc);
5882 	nvlist_destroy(nvl);
5883 
5884 	return (error);
5885 }
5886 
5887 /*
5888  * XXX - Check for version mismatch!!!
5889  */
5890 static void
pf_clear_all_states(void)5891 pf_clear_all_states(void)
5892 {
5893 	struct epoch_tracker	 et;
5894 	struct pf_kstate	*s;
5895 	u_int i;
5896 
5897 	NET_EPOCH_ENTER(et);
5898 	for (i = 0; i <= V_pf_hashmask; i++) {
5899 		struct pf_idhash *ih = &V_pf_idhash[i];
5900 relock:
5901 		PF_HASHROW_LOCK(ih);
5902 		LIST_FOREACH(s, &ih->states, entry) {
5903 			s->timeout = PFTM_PURGE;
5904 			/* Don't send out individual delete messages. */
5905 			s->state_flags |= PFSTATE_NOSYNC;
5906 			pf_unlink_state(s);
5907 			goto relock;
5908 		}
5909 		PF_HASHROW_UNLOCK(ih);
5910 	}
5911 	NET_EPOCH_EXIT(et);
5912 }
5913 
5914 static int
pf_clear_tables(void)5915 pf_clear_tables(void)
5916 {
5917 	struct pfioc_table io;
5918 	int error;
5919 
5920 	bzero(&io, sizeof(io));
5921 	io.pfrio_flags |= PFR_FLAG_ALLRSETS;
5922 
5923 	error = pfr_clr_tables(&io.pfrio_table, &io.pfrio_ndel,
5924 	    io.pfrio_flags);
5925 
5926 	return (error);
5927 }
5928 
5929 static void
pf_kill_srcnodes(struct pfioc_src_node_kill * psnk)5930 pf_kill_srcnodes(struct pfioc_src_node_kill *psnk)
5931 {
5932 	struct pf_ksrc_node_list	 kill;
5933 	u_int 				 killed;
5934 
5935 	LIST_INIT(&kill);
5936 	for (int i = 0; i <= V_pf_srchashmask; i++) {
5937 		struct pf_srchash *sh = &V_pf_srchash[i];
5938 		struct pf_ksrc_node *sn, *tmp;
5939 
5940 		PF_HASHROW_LOCK(sh);
5941 		LIST_FOREACH_SAFE(sn, &sh->nodes, entry, tmp)
5942 			if (psnk == NULL ||
5943 			    (PF_MATCHA(psnk->psnk_src.neg,
5944 			      &psnk->psnk_src.addr.v.a.addr,
5945 			      &psnk->psnk_src.addr.v.a.mask,
5946 			      &sn->addr, sn->af) &&
5947 			    PF_MATCHA(psnk->psnk_dst.neg,
5948 			      &psnk->psnk_dst.addr.v.a.addr,
5949 			      &psnk->psnk_dst.addr.v.a.mask,
5950 			      &sn->raddr, sn->af))) {
5951 				pf_unlink_src_node(sn);
5952 				LIST_INSERT_HEAD(&kill, sn, entry);
5953 				sn->expire = 1;
5954 			}
5955 		PF_HASHROW_UNLOCK(sh);
5956 	}
5957 
5958 	for (int i = 0; i <= V_pf_hashmask; i++) {
5959 		struct pf_idhash *ih = &V_pf_idhash[i];
5960 		struct pf_kstate *s;
5961 
5962 		PF_HASHROW_LOCK(ih);
5963 		LIST_FOREACH(s, &ih->states, entry) {
5964 			if (s->src_node && s->src_node->expire == 1)
5965 				s->src_node = NULL;
5966 			if (s->nat_src_node && s->nat_src_node->expire == 1)
5967 				s->nat_src_node = NULL;
5968 		}
5969 		PF_HASHROW_UNLOCK(ih);
5970 	}
5971 
5972 	killed = pf_free_src_nodes(&kill);
5973 
5974 	if (psnk != NULL)
5975 		psnk->psnk_killed = killed;
5976 }
5977 
5978 static int
pf_keepcounters(struct pfioc_nv * nv)5979 pf_keepcounters(struct pfioc_nv *nv)
5980 {
5981 	nvlist_t	*nvl = NULL;
5982 	void		*nvlpacked = NULL;
5983 	int		 error = 0;
5984 
5985 #define	ERROUT(x)	ERROUT_FUNCTION(on_error, x)
5986 
5987 	if (nv->len > pf_ioctl_maxcount)
5988 		ERROUT(ENOMEM);
5989 
5990 	nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
5991 	error = copyin(nv->data, nvlpacked, nv->len);
5992 	if (error)
5993 		ERROUT(error);
5994 
5995 	nvl = nvlist_unpack(nvlpacked, nv->len, 0);
5996 	if (nvl == NULL)
5997 		ERROUT(EBADMSG);
5998 
5999 	if (! nvlist_exists_bool(nvl, "keep_counters"))
6000 		ERROUT(EBADMSG);
6001 
6002 	V_pf_status.keep_counters = nvlist_get_bool(nvl, "keep_counters");
6003 
6004 on_error:
6005 	nvlist_destroy(nvl);
6006 	free(nvlpacked, M_NVLIST);
6007 	return (error);
6008 }
6009 
6010 unsigned int
pf_clear_states(const struct pf_kstate_kill * kill)6011 pf_clear_states(const struct pf_kstate_kill *kill)
6012 {
6013 	struct pf_state_key_cmp	 match_key;
6014 	struct pf_kstate	*s;
6015 	struct pfi_kkif	*kif;
6016 	int		 idx;
6017 	unsigned int	 killed = 0, dir;
6018 
6019 	NET_EPOCH_ASSERT();
6020 
6021 	for (unsigned int i = 0; i <= V_pf_hashmask; i++) {
6022 		struct pf_idhash *ih = &V_pf_idhash[i];
6023 
6024 relock_DIOCCLRSTATES:
6025 		PF_HASHROW_LOCK(ih);
6026 		LIST_FOREACH(s, &ih->states, entry) {
6027 			/* For floating states look at the original kif. */
6028 			kif = s->kif == V_pfi_all ? s->orig_kif : s->kif;
6029 
6030 			if (kill->psk_ifname[0] &&
6031 			    strcmp(kill->psk_ifname,
6032 			    kif->pfik_name))
6033 				continue;
6034 
6035 			if (kill->psk_kill_match) {
6036 				bzero(&match_key, sizeof(match_key));
6037 
6038 				if (s->direction == PF_OUT) {
6039 					dir = PF_IN;
6040 					idx = PF_SK_STACK;
6041 				} else {
6042 					dir = PF_OUT;
6043 					idx = PF_SK_WIRE;
6044 				}
6045 
6046 				match_key.af = s->key[idx]->af;
6047 				match_key.proto = s->key[idx]->proto;
6048 				PF_ACPY(&match_key.addr[0],
6049 				    &s->key[idx]->addr[1], match_key.af);
6050 				match_key.port[0] = s->key[idx]->port[1];
6051 				PF_ACPY(&match_key.addr[1],
6052 				    &s->key[idx]->addr[0], match_key.af);
6053 				match_key.port[1] = s->key[idx]->port[0];
6054 			}
6055 
6056 			/*
6057 			 * Don't send out individual
6058 			 * delete messages.
6059 			 */
6060 			s->state_flags |= PFSTATE_NOSYNC;
6061 			pf_unlink_state(s);
6062 			killed++;
6063 
6064 			if (kill->psk_kill_match)
6065 				killed += pf_kill_matching_state(&match_key,
6066 				    dir);
6067 
6068 			goto relock_DIOCCLRSTATES;
6069 		}
6070 		PF_HASHROW_UNLOCK(ih);
6071 	}
6072 
6073 	if (V_pfsync_clear_states_ptr != NULL)
6074 		V_pfsync_clear_states_ptr(V_pf_status.hostid, kill->psk_ifname);
6075 
6076 	return (killed);
6077 }
6078 
6079 void
pf_killstates(struct pf_kstate_kill * kill,unsigned int * killed)6080 pf_killstates(struct pf_kstate_kill *kill, unsigned int *killed)
6081 {
6082 	struct pf_kstate	*s;
6083 
6084 	NET_EPOCH_ASSERT();
6085 	if (kill->psk_pfcmp.id) {
6086 		if (kill->psk_pfcmp.creatorid == 0)
6087 			kill->psk_pfcmp.creatorid = V_pf_status.hostid;
6088 		if ((s = pf_find_state_byid(kill->psk_pfcmp.id,
6089 		    kill->psk_pfcmp.creatorid))) {
6090 			pf_unlink_state(s);
6091 			*killed = 1;
6092 		}
6093 		return;
6094 	}
6095 
6096 	for (unsigned int i = 0; i <= V_pf_hashmask; i++)
6097 		*killed += pf_killstates_row(kill, &V_pf_idhash[i]);
6098 }
6099 
6100 static int
pf_killstates_nv(struct pfioc_nv * nv)6101 pf_killstates_nv(struct pfioc_nv *nv)
6102 {
6103 	struct pf_kstate_kill	 kill;
6104 	struct epoch_tracker	 et;
6105 	nvlist_t		*nvl = NULL;
6106 	void			*nvlpacked = NULL;
6107 	int			 error = 0;
6108 	unsigned int		 killed = 0;
6109 
6110 #define ERROUT(x)	ERROUT_FUNCTION(on_error, x)
6111 
6112 	if (nv->len > pf_ioctl_maxcount)
6113 		ERROUT(ENOMEM);
6114 
6115 	nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
6116 	error = copyin(nv->data, nvlpacked, nv->len);
6117 	if (error)
6118 		ERROUT(error);
6119 
6120 	nvl = nvlist_unpack(nvlpacked, nv->len, 0);
6121 	if (nvl == NULL)
6122 		ERROUT(EBADMSG);
6123 
6124 	error = pf_nvstate_kill_to_kstate_kill(nvl, &kill);
6125 	if (error)
6126 		ERROUT(error);
6127 
6128 	NET_EPOCH_ENTER(et);
6129 	pf_killstates(&kill, &killed);
6130 	NET_EPOCH_EXIT(et);
6131 
6132 	free(nvlpacked, M_NVLIST);
6133 	nvlpacked = NULL;
6134 	nvlist_destroy(nvl);
6135 	nvl = nvlist_create(0);
6136 	if (nvl == NULL)
6137 		ERROUT(ENOMEM);
6138 
6139 	nvlist_add_number(nvl, "killed", killed);
6140 
6141 	nvlpacked = nvlist_pack(nvl, &nv->len);
6142 	if (nvlpacked == NULL)
6143 		ERROUT(ENOMEM);
6144 
6145 	if (nv->size == 0)
6146 		ERROUT(0);
6147 	else if (nv->size < nv->len)
6148 		ERROUT(ENOSPC);
6149 
6150 	error = copyout(nvlpacked, nv->data, nv->len);
6151 
6152 on_error:
6153 	nvlist_destroy(nvl);
6154 	free(nvlpacked, M_NVLIST);
6155 	return (error);
6156 }
6157 
6158 static int
pf_clearstates_nv(struct pfioc_nv * nv)6159 pf_clearstates_nv(struct pfioc_nv *nv)
6160 {
6161 	struct pf_kstate_kill	 kill;
6162 	struct epoch_tracker	 et;
6163 	nvlist_t		*nvl = NULL;
6164 	void			*nvlpacked = NULL;
6165 	int			 error = 0;
6166 	unsigned int		 killed;
6167 
6168 #define ERROUT(x)	ERROUT_FUNCTION(on_error, x)
6169 
6170 	if (nv->len > pf_ioctl_maxcount)
6171 		ERROUT(ENOMEM);
6172 
6173 	nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
6174 	error = copyin(nv->data, nvlpacked, nv->len);
6175 	if (error)
6176 		ERROUT(error);
6177 
6178 	nvl = nvlist_unpack(nvlpacked, nv->len, 0);
6179 	if (nvl == NULL)
6180 		ERROUT(EBADMSG);
6181 
6182 	error = pf_nvstate_kill_to_kstate_kill(nvl, &kill);
6183 	if (error)
6184 		ERROUT(error);
6185 
6186 	NET_EPOCH_ENTER(et);
6187 	killed = pf_clear_states(&kill);
6188 	NET_EPOCH_EXIT(et);
6189 
6190 	free(nvlpacked, M_NVLIST);
6191 	nvlpacked = NULL;
6192 	nvlist_destroy(nvl);
6193 	nvl = nvlist_create(0);
6194 	if (nvl == NULL)
6195 		ERROUT(ENOMEM);
6196 
6197 	nvlist_add_number(nvl, "killed", killed);
6198 
6199 	nvlpacked = nvlist_pack(nvl, &nv->len);
6200 	if (nvlpacked == NULL)
6201 		ERROUT(ENOMEM);
6202 
6203 	if (nv->size == 0)
6204 		ERROUT(0);
6205 	else if (nv->size < nv->len)
6206 		ERROUT(ENOSPC);
6207 
6208 	error = copyout(nvlpacked, nv->data, nv->len);
6209 
6210 #undef ERROUT
6211 on_error:
6212 	nvlist_destroy(nvl);
6213 	free(nvlpacked, M_NVLIST);
6214 	return (error);
6215 }
6216 
6217 static int
pf_getstate(struct pfioc_nv * nv)6218 pf_getstate(struct pfioc_nv *nv)
6219 {
6220 	nvlist_t		*nvl = NULL, *nvls;
6221 	void			*nvlpacked = NULL;
6222 	struct pf_kstate	*s = NULL;
6223 	int			 error = 0;
6224 	uint64_t		 id, creatorid;
6225 
6226 #define ERROUT(x)	ERROUT_FUNCTION(errout, x)
6227 
6228 	if (nv->len > pf_ioctl_maxcount)
6229 		ERROUT(ENOMEM);
6230 
6231 	nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
6232 	error = copyin(nv->data, nvlpacked, nv->len);
6233 	if (error)
6234 		ERROUT(error);
6235 
6236 	nvl = nvlist_unpack(nvlpacked, nv->len, 0);
6237 	if (nvl == NULL)
6238 		ERROUT(EBADMSG);
6239 
6240 	PFNV_CHK(pf_nvuint64(nvl, "id", &id));
6241 	PFNV_CHK(pf_nvuint64(nvl, "creatorid", &creatorid));
6242 
6243 	s = pf_find_state_byid(id, creatorid);
6244 	if (s == NULL)
6245 		ERROUT(ENOENT);
6246 
6247 	free(nvlpacked, M_NVLIST);
6248 	nvlpacked = NULL;
6249 	nvlist_destroy(nvl);
6250 	nvl = nvlist_create(0);
6251 	if (nvl == NULL)
6252 		ERROUT(ENOMEM);
6253 
6254 	nvls = pf_state_to_nvstate(s);
6255 	if (nvls == NULL)
6256 		ERROUT(ENOMEM);
6257 
6258 	nvlist_add_nvlist(nvl, "state", nvls);
6259 	nvlist_destroy(nvls);
6260 
6261 	nvlpacked = nvlist_pack(nvl, &nv->len);
6262 	if (nvlpacked == NULL)
6263 		ERROUT(ENOMEM);
6264 
6265 	if (nv->size == 0)
6266 		ERROUT(0);
6267 	else if (nv->size < nv->len)
6268 		ERROUT(ENOSPC);
6269 
6270 	error = copyout(nvlpacked, nv->data, nv->len);
6271 
6272 #undef ERROUT
6273 errout:
6274 	if (s != NULL)
6275 		PF_STATE_UNLOCK(s);
6276 	free(nvlpacked, M_NVLIST);
6277 	nvlist_destroy(nvl);
6278 	return (error);
6279 }
6280 
6281 /*
6282  * XXX - Check for version mismatch!!!
6283  */
6284 
6285 /*
6286  * Duplicate pfctl -Fa operation to get rid of as much as we can.
6287  */
6288 static int
shutdown_pf(void)6289 shutdown_pf(void)
6290 {
6291 	int error = 0;
6292 	u_int32_t t[5];
6293 	char nn = '\0';
6294 	struct pf_kanchor *anchor;
6295 	struct pf_keth_anchor *eth_anchor;
6296 	int rs_num;
6297 
6298 	do {
6299 		/* Unlink rules of all user defined anchors */
6300 		RB_FOREACH(anchor, pf_kanchor_global, &V_pf_anchors) {
6301 			/* Wildcard based anchors may not have a respective
6302 			 * explicit anchor rule or they may be left empty
6303 			 * without rules. It leads to anchor.refcnt=0, and the
6304 			 * rest of the logic does not expect it. */
6305 			if (anchor->refcnt == 0)
6306 				anchor->refcnt = 1;
6307 			for (rs_num = 0; rs_num < PF_RULESET_MAX; ++rs_num) {
6308 				if ((error = pf_begin_rules(&t[rs_num], rs_num,
6309 				    anchor->path)) != 0) {
6310 					DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: "
6311 					    "anchor.path=%s rs_num=%d\n",
6312 					    anchor->path, rs_num));
6313 					goto error;	/* XXX: rollback? */
6314 				}
6315 			}
6316 			for (rs_num = 0; rs_num < PF_RULESET_MAX; ++rs_num) {
6317 				error = pf_commit_rules(t[rs_num], rs_num,
6318 				    anchor->path);
6319 				MPASS(error == 0);
6320 			}
6321 		}
6322 
6323 		/* Unlink rules of all user defined ether anchors */
6324 		RB_FOREACH(eth_anchor, pf_keth_anchor_global,
6325 		    &V_pf_keth_anchors) {
6326 			/* Wildcard based anchors may not have a respective
6327 			 * explicit anchor rule or they may be left empty
6328 			 * without rules. It leads to anchor.refcnt=0, and the
6329 			 * rest of the logic does not expect it. */
6330 			if (eth_anchor->refcnt == 0)
6331 				eth_anchor->refcnt = 1;
6332 			if ((error = pf_begin_eth(&t[0], eth_anchor->path))
6333 			    != 0) {
6334 				DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: eth "
6335 				    "anchor.path=%s\n", eth_anchor->path));
6336 				goto error;
6337 			}
6338 			error = pf_commit_eth(t[0], eth_anchor->path);
6339 			MPASS(error == 0);
6340 		}
6341 
6342 		if ((error = pf_begin_rules(&t[0], PF_RULESET_SCRUB, &nn))
6343 		    != 0) {
6344 			DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: SCRUB\n"));
6345 			break;
6346 		}
6347 		if ((error = pf_begin_rules(&t[1], PF_RULESET_FILTER, &nn))
6348 		    != 0) {
6349 			DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: FILTER\n"));
6350 			break;		/* XXX: rollback? */
6351 		}
6352 		if ((error = pf_begin_rules(&t[2], PF_RULESET_NAT, &nn))
6353 		    != 0) {
6354 			DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: NAT\n"));
6355 			break;		/* XXX: rollback? */
6356 		}
6357 		if ((error = pf_begin_rules(&t[3], PF_RULESET_BINAT, &nn))
6358 		    != 0) {
6359 			DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: BINAT\n"));
6360 			break;		/* XXX: rollback? */
6361 		}
6362 		if ((error = pf_begin_rules(&t[4], PF_RULESET_RDR, &nn))
6363 		    != 0) {
6364 			DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: RDR\n"));
6365 			break;		/* XXX: rollback? */
6366 		}
6367 
6368 		error = pf_commit_rules(t[0], PF_RULESET_SCRUB, &nn);
6369 		MPASS(error == 0);
6370 		error = pf_commit_rules(t[1], PF_RULESET_FILTER, &nn);
6371 		MPASS(error == 0);
6372 		error = pf_commit_rules(t[2], PF_RULESET_NAT, &nn);
6373 		MPASS(error == 0);
6374 		error = pf_commit_rules(t[3], PF_RULESET_BINAT, &nn);
6375 		MPASS(error == 0);
6376 		error = pf_commit_rules(t[4], PF_RULESET_RDR, &nn);
6377 		MPASS(error == 0);
6378 
6379 		if ((error = pf_clear_tables()) != 0)
6380 			break;
6381 
6382 		if ((error = pf_begin_eth(&t[0], &nn)) != 0) {
6383 			DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: eth\n"));
6384 			break;
6385 		}
6386 		error = pf_commit_eth(t[0], &nn);
6387 		MPASS(error == 0);
6388 
6389 #ifdef ALTQ
6390 		if ((error = pf_begin_altq(&t[0])) != 0) {
6391 			DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: ALTQ\n"));
6392 			break;
6393 		}
6394 		pf_commit_altq(t[0]);
6395 #endif
6396 
6397 		pf_clear_all_states();
6398 
6399 		pf_kill_srcnodes(NULL);
6400 
6401 		/* status does not use malloced mem so no need to cleanup */
6402 		/* fingerprints and interfaces have their own cleanup code */
6403 	} while(0);
6404 
6405 error:
6406 	return (error);
6407 }
6408 
6409 static pfil_return_t
pf_check_return(int chk,struct mbuf ** m)6410 pf_check_return(int chk, struct mbuf **m)
6411 {
6412 
6413 	switch (chk) {
6414 	case PF_PASS:
6415 		if (*m == NULL)
6416 			return (PFIL_CONSUMED);
6417 		else
6418 			return (PFIL_PASS);
6419 		break;
6420 	default:
6421 		if (*m != NULL) {
6422 			m_freem(*m);
6423 			*m = NULL;
6424 		}
6425 		return (PFIL_DROPPED);
6426 	}
6427 }
6428 
6429 static pfil_return_t
pf_eth_check_in(struct mbuf ** m,struct ifnet * ifp,int flags,void * ruleset __unused,struct inpcb * inp)6430 pf_eth_check_in(struct mbuf **m, struct ifnet *ifp, int flags,
6431     void *ruleset __unused, struct inpcb *inp)
6432 {
6433 	int chk;
6434 
6435 	CURVNET_ASSERT_SET();
6436 
6437 	chk = pf_test_eth(PF_IN, flags, ifp, m, inp);
6438 
6439 	return (pf_check_return(chk, m));
6440 }
6441 
6442 static pfil_return_t
pf_eth_check_out(struct mbuf ** m,struct ifnet * ifp,int flags,void * ruleset __unused,struct inpcb * inp)6443 pf_eth_check_out(struct mbuf **m, struct ifnet *ifp, int flags,
6444     void *ruleset __unused, struct inpcb *inp)
6445 {
6446 	int chk;
6447 
6448 	CURVNET_ASSERT_SET();
6449 
6450 	chk = pf_test_eth(PF_OUT, flags, ifp, m, inp);
6451 
6452 	return (pf_check_return(chk, m));
6453 }
6454 
6455 #ifdef INET
6456 static pfil_return_t
pf_check_in(struct mbuf ** m,struct ifnet * ifp,int flags,void * ruleset __unused,struct inpcb * inp)6457 pf_check_in(struct mbuf **m, struct ifnet *ifp, int flags,
6458     void *ruleset __unused, struct inpcb *inp)
6459 {
6460 	int chk;
6461 
6462 	CURVNET_ASSERT_SET();
6463 
6464 	chk = pf_test(AF_INET, PF_IN, flags, ifp, m, inp, NULL);
6465 
6466 	return (pf_check_return(chk, m));
6467 }
6468 
6469 static pfil_return_t
pf_check_out(struct mbuf ** m,struct ifnet * ifp,int flags,void * ruleset __unused,struct inpcb * inp)6470 pf_check_out(struct mbuf **m, struct ifnet *ifp, int flags,
6471     void *ruleset __unused,  struct inpcb *inp)
6472 {
6473 	int chk;
6474 
6475 	CURVNET_ASSERT_SET();
6476 
6477 	chk = pf_test(AF_INET, PF_OUT, flags, ifp, m, inp, NULL);
6478 
6479 	return (pf_check_return(chk, m));
6480 }
6481 #endif
6482 
6483 #ifdef INET6
6484 static pfil_return_t
pf_check6_in(struct mbuf ** m,struct ifnet * ifp,int flags,void * ruleset __unused,struct inpcb * inp)6485 pf_check6_in(struct mbuf **m, struct ifnet *ifp, int flags,
6486     void *ruleset __unused,  struct inpcb *inp)
6487 {
6488 	int chk;
6489 
6490 	CURVNET_ASSERT_SET();
6491 
6492 	/*
6493 	 * In case of loopback traffic IPv6 uses the real interface in
6494 	 * order to support scoped addresses. In order to support stateful
6495 	 * filtering we have change this to lo0 as it is the case in IPv4.
6496 	 */
6497 	chk = pf_test(AF_INET6, PF_IN, flags, (*m)->m_flags & M_LOOP ? V_loif : ifp,
6498 	    m, inp, NULL);
6499 
6500 	return (pf_check_return(chk, m));
6501 }
6502 
6503 static pfil_return_t
pf_check6_out(struct mbuf ** m,struct ifnet * ifp,int flags,void * ruleset __unused,struct inpcb * inp)6504 pf_check6_out(struct mbuf **m, struct ifnet *ifp, int flags,
6505     void *ruleset __unused,  struct inpcb *inp)
6506 {
6507 	int chk;
6508 
6509 	CURVNET_ASSERT_SET();
6510 
6511 	chk = pf_test(AF_INET6, PF_OUT, flags, ifp, m, inp, NULL);
6512 
6513 	return (pf_check_return(chk, m));
6514 }
6515 #endif /* INET6 */
6516 
6517 VNET_DEFINE_STATIC(pfil_hook_t, pf_eth_in_hook);
6518 VNET_DEFINE_STATIC(pfil_hook_t, pf_eth_out_hook);
6519 #define	V_pf_eth_in_hook	VNET(pf_eth_in_hook)
6520 #define	V_pf_eth_out_hook	VNET(pf_eth_out_hook)
6521 
6522 #ifdef INET
6523 VNET_DEFINE_STATIC(pfil_hook_t, pf_ip4_in_hook);
6524 VNET_DEFINE_STATIC(pfil_hook_t, pf_ip4_out_hook);
6525 #define	V_pf_ip4_in_hook	VNET(pf_ip4_in_hook)
6526 #define	V_pf_ip4_out_hook	VNET(pf_ip4_out_hook)
6527 #endif
6528 #ifdef INET6
6529 VNET_DEFINE_STATIC(pfil_hook_t, pf_ip6_in_hook);
6530 VNET_DEFINE_STATIC(pfil_hook_t, pf_ip6_out_hook);
6531 #define	V_pf_ip6_in_hook	VNET(pf_ip6_in_hook)
6532 #define	V_pf_ip6_out_hook	VNET(pf_ip6_out_hook)
6533 #endif
6534 
6535 static void
hook_pf_eth(void)6536 hook_pf_eth(void)
6537 {
6538 	struct pfil_hook_args pha = {
6539 		.pa_version = PFIL_VERSION,
6540 		.pa_modname = "pf",
6541 		.pa_type = PFIL_TYPE_ETHERNET,
6542 	};
6543 	struct pfil_link_args pla = {
6544 		.pa_version = PFIL_VERSION,
6545 	};
6546 	int ret __diagused;
6547 
6548 	if (atomic_load_bool(&V_pf_pfil_eth_hooked))
6549 		return;
6550 
6551 	pha.pa_mbuf_chk = pf_eth_check_in;
6552 	pha.pa_flags = PFIL_IN;
6553 	pha.pa_rulname = "eth-in";
6554 	V_pf_eth_in_hook = pfil_add_hook(&pha);
6555 	pla.pa_flags = PFIL_IN | PFIL_HEADPTR | PFIL_HOOKPTR;
6556 	pla.pa_head = V_link_pfil_head;
6557 	pla.pa_hook = V_pf_eth_in_hook;
6558 	ret = pfil_link(&pla);
6559 	MPASS(ret == 0);
6560 	pha.pa_mbuf_chk = pf_eth_check_out;
6561 	pha.pa_flags = PFIL_OUT;
6562 	pha.pa_rulname = "eth-out";
6563 	V_pf_eth_out_hook = pfil_add_hook(&pha);
6564 	pla.pa_flags = PFIL_OUT | PFIL_HEADPTR | PFIL_HOOKPTR;
6565 	pla.pa_head = V_link_pfil_head;
6566 	pla.pa_hook = V_pf_eth_out_hook;
6567 	ret = pfil_link(&pla);
6568 	MPASS(ret == 0);
6569 
6570 	atomic_store_bool(&V_pf_pfil_eth_hooked, true);
6571 }
6572 
6573 static void
hook_pf(void)6574 hook_pf(void)
6575 {
6576 	struct pfil_hook_args pha = {
6577 		.pa_version = PFIL_VERSION,
6578 		.pa_modname = "pf",
6579 	};
6580 	struct pfil_link_args pla = {
6581 		.pa_version = PFIL_VERSION,
6582 	};
6583 	int ret __diagused;
6584 
6585 	if (atomic_load_bool(&V_pf_pfil_hooked))
6586 		return;
6587 
6588 #ifdef INET
6589 	pha.pa_type = PFIL_TYPE_IP4;
6590 	pha.pa_mbuf_chk = pf_check_in;
6591 	pha.pa_flags = PFIL_IN;
6592 	pha.pa_rulname = "default-in";
6593 	V_pf_ip4_in_hook = pfil_add_hook(&pha);
6594 	pla.pa_flags = PFIL_IN | PFIL_HEADPTR | PFIL_HOOKPTR;
6595 	pla.pa_head = V_inet_pfil_head;
6596 	pla.pa_hook = V_pf_ip4_in_hook;
6597 	ret = pfil_link(&pla);
6598 	MPASS(ret == 0);
6599 	pha.pa_mbuf_chk = pf_check_out;
6600 	pha.pa_flags = PFIL_OUT;
6601 	pha.pa_rulname = "default-out";
6602 	V_pf_ip4_out_hook = pfil_add_hook(&pha);
6603 	pla.pa_flags = PFIL_OUT | PFIL_HEADPTR | PFIL_HOOKPTR;
6604 	pla.pa_head = V_inet_pfil_head;
6605 	pla.pa_hook = V_pf_ip4_out_hook;
6606 	ret = pfil_link(&pla);
6607 	MPASS(ret == 0);
6608 	if (V_pf_filter_local) {
6609 		pla.pa_flags = PFIL_OUT | PFIL_HEADPTR | PFIL_HOOKPTR;
6610 		pla.pa_head = V_inet_local_pfil_head;
6611 		pla.pa_hook = V_pf_ip4_out_hook;
6612 		ret = pfil_link(&pla);
6613 		MPASS(ret == 0);
6614 	}
6615 #endif
6616 #ifdef INET6
6617 	pha.pa_type = PFIL_TYPE_IP6;
6618 	pha.pa_mbuf_chk = pf_check6_in;
6619 	pha.pa_flags = PFIL_IN;
6620 	pha.pa_rulname = "default-in6";
6621 	V_pf_ip6_in_hook = pfil_add_hook(&pha);
6622 	pla.pa_flags = PFIL_IN | PFIL_HEADPTR | PFIL_HOOKPTR;
6623 	pla.pa_head = V_inet6_pfil_head;
6624 	pla.pa_hook = V_pf_ip6_in_hook;
6625 	ret = pfil_link(&pla);
6626 	MPASS(ret == 0);
6627 	pha.pa_mbuf_chk = pf_check6_out;
6628 	pha.pa_rulname = "default-out6";
6629 	pha.pa_flags = PFIL_OUT;
6630 	V_pf_ip6_out_hook = pfil_add_hook(&pha);
6631 	pla.pa_flags = PFIL_OUT | PFIL_HEADPTR | PFIL_HOOKPTR;
6632 	pla.pa_head = V_inet6_pfil_head;
6633 	pla.pa_hook = V_pf_ip6_out_hook;
6634 	ret = pfil_link(&pla);
6635 	MPASS(ret == 0);
6636 	if (V_pf_filter_local) {
6637 		pla.pa_flags = PFIL_OUT | PFIL_HEADPTR | PFIL_HOOKPTR;
6638 		pla.pa_head = V_inet6_local_pfil_head;
6639 		pla.pa_hook = V_pf_ip6_out_hook;
6640 		ret = pfil_link(&pla);
6641 		MPASS(ret == 0);
6642 	}
6643 #endif
6644 
6645 	atomic_store_bool(&V_pf_pfil_hooked, true);
6646 }
6647 
6648 static void
dehook_pf_eth(void)6649 dehook_pf_eth(void)
6650 {
6651 
6652 	if (!atomic_load_bool(&V_pf_pfil_eth_hooked))
6653 		return;
6654 
6655 	pfil_remove_hook(V_pf_eth_in_hook);
6656 	pfil_remove_hook(V_pf_eth_out_hook);
6657 
6658 	atomic_store_bool(&V_pf_pfil_eth_hooked, false);
6659 }
6660 
6661 static void
dehook_pf(void)6662 dehook_pf(void)
6663 {
6664 
6665 	if (!atomic_load_bool(&V_pf_pfil_hooked))
6666 		return;
6667 
6668 #ifdef INET
6669 	pfil_remove_hook(V_pf_ip4_in_hook);
6670 	pfil_remove_hook(V_pf_ip4_out_hook);
6671 #endif
6672 #ifdef INET6
6673 	pfil_remove_hook(V_pf_ip6_in_hook);
6674 	pfil_remove_hook(V_pf_ip6_out_hook);
6675 #endif
6676 
6677 	atomic_store_bool(&V_pf_pfil_hooked, false);
6678 }
6679 
6680 static void
pf_load_vnet(void)6681 pf_load_vnet(void)
6682 {
6683 	V_pf_tag_z = uma_zcreate("pf tags", sizeof(struct pf_tagname),
6684 	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
6685 
6686 	rm_init_flags(&V_pf_rules_lock, "pf rulesets", RM_RECURSE);
6687 	sx_init(&V_pf_ioctl_lock, "pf ioctl");
6688 
6689 	pf_init_tagset(&V_pf_tags, &pf_rule_tag_hashsize,
6690 	    PF_RULE_TAG_HASH_SIZE_DEFAULT);
6691 #ifdef ALTQ
6692 	pf_init_tagset(&V_pf_qids, &pf_queue_tag_hashsize,
6693 	    PF_QUEUE_TAG_HASH_SIZE_DEFAULT);
6694 #endif
6695 
6696 	V_pf_keth = &V_pf_main_keth_anchor.ruleset;
6697 
6698 	pfattach_vnet();
6699 	V_pf_vnet_active = 1;
6700 }
6701 
6702 static int
pf_load(void)6703 pf_load(void)
6704 {
6705 	int error;
6706 
6707 	sx_init(&pf_end_lock, "pf end thread");
6708 
6709 	pf_mtag_initialize();
6710 
6711 	pf_dev = make_dev(&pf_cdevsw, 0, UID_ROOT, GID_WHEEL, 0600, PF_NAME);
6712 	if (pf_dev == NULL)
6713 		return (ENOMEM);
6714 
6715 	pf_end_threads = 0;
6716 	error = kproc_create(pf_purge_thread, NULL, &pf_purge_proc, 0, 0, "pf purge");
6717 	if (error != 0)
6718 		return (error);
6719 
6720 	pfi_initialize();
6721 
6722 	return (0);
6723 }
6724 
6725 static void
pf_unload_vnet(void)6726 pf_unload_vnet(void)
6727 {
6728 	int ret __diagused;
6729 
6730 	V_pf_vnet_active = 0;
6731 	V_pf_status.running = 0;
6732 	dehook_pf();
6733 	dehook_pf_eth();
6734 
6735 	PF_RULES_WLOCK();
6736 	pf_syncookies_cleanup();
6737 	shutdown_pf();
6738 	PF_RULES_WUNLOCK();
6739 
6740 	/* Make sure we've cleaned up ethernet rules before we continue. */
6741 	NET_EPOCH_DRAIN_CALLBACKS();
6742 
6743 	ret = swi_remove(V_pf_swi_cookie);
6744 	MPASS(ret == 0);
6745 	ret = intr_event_destroy(V_pf_swi_ie);
6746 	MPASS(ret == 0);
6747 
6748 	pf_unload_vnet_purge();
6749 
6750 	pf_normalize_cleanup();
6751 	PF_RULES_WLOCK();
6752 	pfi_cleanup_vnet();
6753 	PF_RULES_WUNLOCK();
6754 	pfr_cleanup();
6755 	pf_osfp_flush();
6756 	pf_cleanup();
6757 	if (IS_DEFAULT_VNET(curvnet))
6758 		pf_mtag_cleanup();
6759 
6760 	pf_cleanup_tagset(&V_pf_tags);
6761 #ifdef ALTQ
6762 	pf_cleanup_tagset(&V_pf_qids);
6763 #endif
6764 	uma_zdestroy(V_pf_tag_z);
6765 
6766 #ifdef PF_WANT_32_TO_64_COUNTER
6767 	PF_RULES_WLOCK();
6768 	LIST_REMOVE(V_pf_kifmarker, pfik_allkiflist);
6769 
6770 	MPASS(LIST_EMPTY(&V_pf_allkiflist));
6771 	MPASS(V_pf_allkifcount == 0);
6772 
6773 	LIST_REMOVE(&V_pf_default_rule, allrulelist);
6774 	V_pf_allrulecount--;
6775 	LIST_REMOVE(V_pf_rulemarker, allrulelist);
6776 
6777 	MPASS(LIST_EMPTY(&V_pf_allrulelist));
6778 	MPASS(V_pf_allrulecount == 0);
6779 
6780 	PF_RULES_WUNLOCK();
6781 
6782 	free(V_pf_kifmarker, PFI_MTYPE);
6783 	free(V_pf_rulemarker, M_PFRULE);
6784 #endif
6785 
6786 	/* Free counters last as we updated them during shutdown. */
6787 	pf_counter_u64_deinit(&V_pf_default_rule.evaluations);
6788 	for (int i = 0; i < 2; i++) {
6789 		pf_counter_u64_deinit(&V_pf_default_rule.packets[i]);
6790 		pf_counter_u64_deinit(&V_pf_default_rule.bytes[i]);
6791 	}
6792 	counter_u64_free(V_pf_default_rule.states_cur);
6793 	counter_u64_free(V_pf_default_rule.states_tot);
6794 	counter_u64_free(V_pf_default_rule.src_nodes);
6795 	uma_zfree_pcpu(pf_timestamp_pcpu_zone, V_pf_default_rule.timestamp);
6796 
6797 	for (int i = 0; i < PFRES_MAX; i++)
6798 		counter_u64_free(V_pf_status.counters[i]);
6799 	for (int i = 0; i < KLCNT_MAX; i++)
6800 		counter_u64_free(V_pf_status.lcounters[i]);
6801 	for (int i = 0; i < FCNT_MAX; i++)
6802 		pf_counter_u64_deinit(&V_pf_status.fcounters[i]);
6803 	for (int i = 0; i < SCNT_MAX; i++)
6804 		counter_u64_free(V_pf_status.scounters[i]);
6805 
6806 	rm_destroy(&V_pf_rules_lock);
6807 	sx_destroy(&V_pf_ioctl_lock);
6808 }
6809 
6810 static void
pf_unload(void)6811 pf_unload(void)
6812 {
6813 
6814 	sx_xlock(&pf_end_lock);
6815 	pf_end_threads = 1;
6816 	while (pf_end_threads < 2) {
6817 		wakeup_one(pf_purge_thread);
6818 		sx_sleep(pf_purge_proc, &pf_end_lock, 0, "pftmo", 0);
6819 	}
6820 	sx_xunlock(&pf_end_lock);
6821 
6822 	pf_nl_unregister();
6823 
6824 	if (pf_dev != NULL)
6825 		destroy_dev(pf_dev);
6826 
6827 	pfi_cleanup();
6828 
6829 	sx_destroy(&pf_end_lock);
6830 }
6831 
6832 static void
vnet_pf_init(void * unused __unused)6833 vnet_pf_init(void *unused __unused)
6834 {
6835 
6836 	pf_load_vnet();
6837 }
6838 VNET_SYSINIT(vnet_pf_init, SI_SUB_PROTO_FIREWALL, SI_ORDER_THIRD,
6839     vnet_pf_init, NULL);
6840 
6841 static void
vnet_pf_uninit(const void * unused __unused)6842 vnet_pf_uninit(const void *unused __unused)
6843 {
6844 
6845 	pf_unload_vnet();
6846 }
6847 SYSUNINIT(pf_unload, SI_SUB_PROTO_FIREWALL, SI_ORDER_SECOND, pf_unload, NULL);
6848 VNET_SYSUNINIT(vnet_pf_uninit, SI_SUB_PROTO_FIREWALL, SI_ORDER_THIRD,
6849     vnet_pf_uninit, NULL);
6850 
6851 static int
pf_modevent(module_t mod,int type,void * data)6852 pf_modevent(module_t mod, int type, void *data)
6853 {
6854 	int error = 0;
6855 
6856 	switch(type) {
6857 	case MOD_LOAD:
6858 		error = pf_load();
6859 		pf_nl_register();
6860 		break;
6861 	case MOD_UNLOAD:
6862 		/* Handled in SYSUNINIT(pf_unload) to ensure it's done after
6863 		 * the vnet_pf_uninit()s */
6864 		break;
6865 	default:
6866 		error = EINVAL;
6867 		break;
6868 	}
6869 
6870 	return (error);
6871 }
6872 
6873 static moduledata_t pf_mod = {
6874 	"pf",
6875 	pf_modevent,
6876 	0
6877 };
6878 
6879 DECLARE_MODULE(pf, pf_mod, SI_SUB_PROTO_FIREWALL, SI_ORDER_SECOND);
6880 MODULE_DEPEND(pf, netlink, 1, 1, 1);
6881 MODULE_VERSION(pf, PF_MODVER);
6882