xref: /freebsd/sys/netpfil/pf/pf_table.c (revision 67b65598088566997bbc88f5ae1f7f7da26c17cd)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2002 Cedric Berger
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  *    - Redistributions of source code must retain the above copyright
12  *      notice, this list of conditions and the following disclaimer.
13  *    - Redistributions in binary form must reproduce the above
14  *      copyright notice, this list of conditions and the following
15  *      disclaimer in the documentation and/or other materials provided
16  *      with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
21  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
22  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
23  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
24  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
25  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
28  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  *
31  *	$OpenBSD: pf_table.c,v 1.79 2008/10/08 06:24:50 mcbride Exp $
32  */
33 
34 #include <sys/cdefs.h>
35 #include "opt_inet.h"
36 #include "opt_inet6.h"
37 
38 #include <sys/param.h>
39 #include <sys/kernel.h>
40 #include <sys/lock.h>
41 #include <sys/malloc.h>
42 #include <sys/mbuf.h>
43 #include <sys/mutex.h>
44 #include <sys/refcount.h>
45 #include <sys/socket.h>
46 #include <vm/uma.h>
47 
48 #include <net/if.h>
49 #include <net/vnet.h>
50 #include <net/pfvar.h>
51 
52 #define DPFPRINTF(n, x) if (V_pf_status.debug >= (n)) printf x
53 
54 #define	ACCEPT_FLAGS(flags, oklist)		\
55 	do {					\
56 		if ((flags & ~(oklist)) &	\
57 		    PFR_FLAG_ALLMASK)		\
58 			return (EINVAL);	\
59 	} while (0)
60 
61 #define	FILLIN_SIN(sin, addr)			\
62 	do {					\
63 		(sin).sin_len = sizeof(sin);	\
64 		(sin).sin_family = AF_INET;	\
65 		(sin).sin_addr = (addr);	\
66 	} while (0)
67 
68 #define	FILLIN_SIN6(sin6, addr)			\
69 	do {					\
70 		(sin6).sin6_len = sizeof(sin6);	\
71 		(sin6).sin6_family = AF_INET6;	\
72 		(sin6).sin6_addr = (addr);	\
73 	} while (0)
74 
75 #define	SWAP(type, a1, a2)			\
76 	do {					\
77 		type tmp = a1;			\
78 		a1 = a2;			\
79 		a2 = tmp;			\
80 	} while (0)
81 
82 #define	AF_BITS(af)		(((af)==AF_INET)?32:128)
83 #define	ADDR_NETWORK(ad)	((ad)->pfra_net < AF_BITS((ad)->pfra_af))
84 #define	KENTRY_NETWORK(ke)	((ke)->pfrke_net < AF_BITS((ke)->pfrke_af))
85 #define	KENTRY_RNF_ROOT(ke) \
86 		((((struct radix_node *)(ke))->rn_flags & RNF_ROOT) != 0)
87 
88 #define	NO_ADDRESSES		(-1)
89 #define	ENQUEUE_UNMARKED_ONLY	(1)
90 #define	INVERT_NEG_FLAG		(1)
91 
92 struct pfr_walktree {
93 	enum pfrw_op {
94 		PFRW_MARK,
95 		PFRW_SWEEP,
96 		PFRW_ENQUEUE,
97 		PFRW_GET_ADDRS,
98 		PFRW_GET_ASTATS,
99 		PFRW_POOL_GET,
100 		PFRW_DYNADDR_UPDATE,
101 		PFRW_COUNTERS
102 	}	 pfrw_op;
103 	union {
104 		struct pfr_addr		*pfrw_addr;
105 		struct pfr_astats	*pfrw_astats;
106 		struct pfr_kentryworkq	*pfrw_workq;
107 		struct pfr_kentry	*pfrw_kentry;
108 		struct pfi_dynaddr	*pfrw_dyn;
109 	};
110 	int	 pfrw_free;
111 	int	 pfrw_flags;
112 };
113 
114 #define	senderr(e)	do { rv = (e); goto _bad; } while (0)
115 
116 static MALLOC_DEFINE(M_PFTABLE, "pf_table", "pf(4) tables structures");
117 VNET_DEFINE_STATIC(uma_zone_t, pfr_kentry_z);
118 #define	V_pfr_kentry_z		VNET(pfr_kentry_z)
119 VNET_DEFINE_STATIC(uma_zone_t, pfr_kentry_counter_z);
120 #define	V_pfr_kentry_counter_z	VNET(pfr_kentry_counter_z)
121 
122 static struct pf_addr	 pfr_ffaddr = {
123 	.addr32 = { 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff }
124 };
125 
126 static void		 pfr_copyout_astats(struct pfr_astats *,
127 			    const struct pfr_kentry *,
128 			    const struct pfr_walktree *);
129 static void		 pfr_copyout_addr(struct pfr_addr *,
130 			    const struct pfr_kentry *ke);
131 static int		 pfr_validate_addr(struct pfr_addr *);
132 static void		 pfr_enqueue_addrs(struct pfr_ktable *,
133 			    struct pfr_kentryworkq *, int *, int);
134 static void		 pfr_mark_addrs(struct pfr_ktable *);
135 static struct pfr_kentry
136 			*pfr_lookup_addr(struct pfr_ktable *,
137 			    struct pfr_addr *, int);
138 static struct pfr_kentry *pfr_create_kentry(struct pfr_addr *, bool);
139 static void		 pfr_destroy_kentries(struct pfr_kentryworkq *);
140 static void		 pfr_destroy_kentry(struct pfr_kentry *);
141 static void		 pfr_insert_kentries(struct pfr_ktable *,
142 			    struct pfr_kentryworkq *, long);
143 static void		 pfr_remove_kentries(struct pfr_ktable *,
144 			    struct pfr_kentryworkq *);
145 static void		 pfr_clstats_kentries(struct pfr_ktable *,
146 			    struct pfr_kentryworkq *, long, int);
147 static void		 pfr_reset_feedback(struct pfr_addr *, int);
148 static void		 pfr_prepare_network(union sockaddr_union *, int, int);
149 static int		 pfr_route_kentry(struct pfr_ktable *,
150 			    struct pfr_kentry *);
151 static int		 pfr_unroute_kentry(struct pfr_ktable *,
152 			    struct pfr_kentry *);
153 static int		 pfr_walktree(struct radix_node *, void *);
154 static int		 pfr_validate_table(struct pfr_table *, int, int);
155 static int		 pfr_fix_anchor(char *);
156 static void		 pfr_commit_ktable(struct pfr_ktable *, long);
157 static void		 pfr_insert_ktables(struct pfr_ktableworkq *);
158 static void		 pfr_insert_ktable(struct pfr_ktable *);
159 static void		 pfr_setflags_ktables(struct pfr_ktableworkq *);
160 static void		 pfr_setflags_ktable(struct pfr_ktable *, int);
161 static void		 pfr_clstats_ktables(struct pfr_ktableworkq *, long,
162 			    int);
163 static void		 pfr_clstats_ktable(struct pfr_ktable *, long, int);
164 static struct pfr_ktable
165 			*pfr_create_ktable(struct pfr_table *, long, int);
166 static void		 pfr_destroy_ktables(struct pfr_ktableworkq *, int);
167 static void		 pfr_destroy_ktable(struct pfr_ktable *, int);
168 static int		 pfr_ktable_compare(struct pfr_ktable *,
169 			    struct pfr_ktable *);
170 static struct pfr_ktable
171 			*pfr_lookup_table(struct pfr_table *);
172 static void		 pfr_clean_node_mask(struct pfr_ktable *,
173 			    struct pfr_kentryworkq *);
174 static int		 pfr_skip_table(struct pfr_table *,
175 			    struct pfr_ktable *, int);
176 static struct pfr_kentry
177 			*pfr_kentry_byidx(struct pfr_ktable *, int, int);
178 
179 static RB_PROTOTYPE(pfr_ktablehead, pfr_ktable, pfrkt_tree, pfr_ktable_compare);
180 static RB_GENERATE(pfr_ktablehead, pfr_ktable, pfrkt_tree, pfr_ktable_compare);
181 
182 VNET_DEFINE_STATIC(struct pfr_ktablehead, pfr_ktables);
183 #define	V_pfr_ktables	VNET(pfr_ktables)
184 
185 VNET_DEFINE_STATIC(struct pfr_table, pfr_nulltable);
186 #define	V_pfr_nulltable	VNET(pfr_nulltable)
187 
188 VNET_DEFINE_STATIC(int, pfr_ktable_cnt);
189 #define V_pfr_ktable_cnt	VNET(pfr_ktable_cnt)
190 
191 void
pfr_initialize(void)192 pfr_initialize(void)
193 {
194 
195 	V_pfr_kentry_counter_z = uma_zcreate("pf table entry counters",
196 	    PFR_NUM_COUNTERS * sizeof(uint64_t), NULL, NULL, NULL, NULL,
197 	    UMA_ALIGN_PTR, UMA_ZONE_PCPU);
198 	V_pfr_kentry_z = uma_zcreate("pf table entries",
199 	    sizeof(struct pfr_kentry), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
200 	    0);
201 	uma_zone_set_max(V_pfr_kentry_z, PFR_KENTRY_HIWAT);
202 	V_pf_limits[PF_LIMIT_TABLE_ENTRIES].zone = V_pfr_kentry_z;
203 	V_pf_limits[PF_LIMIT_TABLE_ENTRIES].limit = PFR_KENTRY_HIWAT;
204 }
205 
206 void
pfr_cleanup(void)207 pfr_cleanup(void)
208 {
209 
210 	uma_zdestroy(V_pfr_kentry_z);
211 	uma_zdestroy(V_pfr_kentry_counter_z);
212 }
213 
214 int
pfr_clr_addrs(struct pfr_table * tbl,int * ndel,int flags)215 pfr_clr_addrs(struct pfr_table *tbl, int *ndel, int flags)
216 {
217 	struct pfr_ktable	*kt;
218 	struct pfr_kentryworkq	 workq;
219 
220 	PF_RULES_WASSERT();
221 
222 	ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
223 	if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL))
224 		return (EINVAL);
225 	kt = pfr_lookup_table(tbl);
226 	if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
227 		return (ESRCH);
228 	if (kt->pfrkt_flags & PFR_TFLAG_CONST)
229 		return (EPERM);
230 	pfr_enqueue_addrs(kt, &workq, ndel, 0);
231 
232 	if (!(flags & PFR_FLAG_DUMMY)) {
233 		pfr_remove_kentries(kt, &workq);
234 		KASSERT(kt->pfrkt_cnt == 0, ("%s: non-null pfrkt_cnt", __func__));
235 	}
236 	return (0);
237 }
238 
239 int
pfr_add_addrs(struct pfr_table * tbl,struct pfr_addr * addr,int size,int * nadd,int flags)240 pfr_add_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
241     int *nadd, int flags)
242 {
243 	struct pfr_ktable	*kt, *tmpkt;
244 	struct pfr_kentryworkq	 workq;
245 	struct pfr_kentry	*p, *q;
246 	struct pfr_addr		*ad;
247 	int			 i, rv, xadd = 0;
248 	long			 tzero = time_second;
249 
250 	PF_RULES_WASSERT();
251 
252 	ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_FEEDBACK);
253 	if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL))
254 		return (EINVAL);
255 	kt = pfr_lookup_table(tbl);
256 	if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
257 		return (ESRCH);
258 	if (kt->pfrkt_flags & PFR_TFLAG_CONST)
259 		return (EPERM);
260 	tmpkt = pfr_create_ktable(&V_pfr_nulltable, 0, 0);
261 	if (tmpkt == NULL)
262 		return (ENOMEM);
263 	SLIST_INIT(&workq);
264 	for (i = 0, ad = addr; i < size; i++, ad++) {
265 		if (pfr_validate_addr(ad))
266 			senderr(EINVAL);
267 		p = pfr_lookup_addr(kt, ad, 1);
268 		q = pfr_lookup_addr(tmpkt, ad, 1);
269 		if (flags & PFR_FLAG_FEEDBACK) {
270 			if (q != NULL)
271 				ad->pfra_fback = PFR_FB_DUPLICATE;
272 			else if (p == NULL)
273 				ad->pfra_fback = PFR_FB_ADDED;
274 			else if (p->pfrke_not != ad->pfra_not)
275 				ad->pfra_fback = PFR_FB_CONFLICT;
276 			else
277 				ad->pfra_fback = PFR_FB_NONE;
278 		}
279 		if (p == NULL && q == NULL) {
280 			p = pfr_create_kentry(ad,
281 			    (kt->pfrkt_flags & PFR_TFLAG_COUNTERS) != 0);
282 			if (p == NULL)
283 				senderr(ENOMEM);
284 			if (pfr_route_kentry(tmpkt, p)) {
285 				pfr_destroy_kentry(p);
286 				ad->pfra_fback = PFR_FB_NONE;
287 			} else {
288 				SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
289 				xadd++;
290 			}
291 		}
292 	}
293 	pfr_clean_node_mask(tmpkt, &workq);
294 	if (!(flags & PFR_FLAG_DUMMY))
295 		pfr_insert_kentries(kt, &workq, tzero);
296 	else
297 		pfr_destroy_kentries(&workq);
298 	if (nadd != NULL)
299 		*nadd = xadd;
300 	pfr_destroy_ktable(tmpkt, 0);
301 	return (0);
302 _bad:
303 	pfr_clean_node_mask(tmpkt, &workq);
304 	pfr_destroy_kentries(&workq);
305 	if (flags & PFR_FLAG_FEEDBACK)
306 		pfr_reset_feedback(addr, size);
307 	pfr_destroy_ktable(tmpkt, 0);
308 	return (rv);
309 }
310 
311 int
pfr_del_addrs(struct pfr_table * tbl,struct pfr_addr * addr,int size,int * ndel,int flags)312 pfr_del_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
313     int *ndel, int flags)
314 {
315 	struct pfr_ktable	*kt;
316 	struct pfr_kentryworkq	 workq;
317 	struct pfr_kentry	*p;
318 	struct pfr_addr		*ad;
319 	int			 i, rv, xdel = 0, log = 1;
320 
321 	PF_RULES_WASSERT();
322 
323 	ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_FEEDBACK);
324 	if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL))
325 		return (EINVAL);
326 	kt = pfr_lookup_table(tbl);
327 	if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
328 		return (ESRCH);
329 	if (kt->pfrkt_flags & PFR_TFLAG_CONST)
330 		return (EPERM);
331 	/*
332 	 * there are two algorithms to choose from here.
333 	 * with:
334 	 *   n: number of addresses to delete
335 	 *   N: number of addresses in the table
336 	 *
337 	 * one is O(N) and is better for large 'n'
338 	 * one is O(n*LOG(N)) and is better for small 'n'
339 	 *
340 	 * following code try to decide which one is best.
341 	 */
342 	for (i = kt->pfrkt_cnt; i > 0; i >>= 1)
343 		log++;
344 	if (size > kt->pfrkt_cnt/log) {
345 		/* full table scan */
346 		pfr_mark_addrs(kt);
347 	} else {
348 		/* iterate over addresses to delete */
349 		for (i = 0, ad = addr; i < size; i++, ad++) {
350 			if (pfr_validate_addr(ad))
351 				return (EINVAL);
352 			p = pfr_lookup_addr(kt, ad, 1);
353 			if (p != NULL)
354 				p->pfrke_mark = 0;
355 		}
356 	}
357 	SLIST_INIT(&workq);
358 	for (i = 0, ad = addr; i < size; i++, ad++) {
359 		if (pfr_validate_addr(ad))
360 			senderr(EINVAL);
361 		p = pfr_lookup_addr(kt, ad, 1);
362 		if (flags & PFR_FLAG_FEEDBACK) {
363 			if (p == NULL)
364 				ad->pfra_fback = PFR_FB_NONE;
365 			else if (p->pfrke_not != ad->pfra_not)
366 				ad->pfra_fback = PFR_FB_CONFLICT;
367 			else if (p->pfrke_mark)
368 				ad->pfra_fback = PFR_FB_DUPLICATE;
369 			else
370 				ad->pfra_fback = PFR_FB_DELETED;
371 		}
372 		if (p != NULL && p->pfrke_not == ad->pfra_not &&
373 		    !p->pfrke_mark) {
374 			p->pfrke_mark = 1;
375 			SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
376 			xdel++;
377 		}
378 	}
379 	if (!(flags & PFR_FLAG_DUMMY))
380 		pfr_remove_kentries(kt, &workq);
381 	if (ndel != NULL)
382 		*ndel = xdel;
383 	return (0);
384 _bad:
385 	if (flags & PFR_FLAG_FEEDBACK)
386 		pfr_reset_feedback(addr, size);
387 	return (rv);
388 }
389 
390 int
pfr_set_addrs(struct pfr_table * tbl,struct pfr_addr * addr,int size,int * size2,int * nadd,int * ndel,int * nchange,int flags,u_int32_t ignore_pfrt_flags)391 pfr_set_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
392     int *size2, int *nadd, int *ndel, int *nchange, int flags,
393     u_int32_t ignore_pfrt_flags)
394 {
395 	struct pfr_ktable	*kt, *tmpkt;
396 	struct pfr_kentryworkq	 addq, delq, changeq;
397 	struct pfr_kentry	*p, *q;
398 	struct pfr_addr		 ad;
399 	int			 i, rv, xadd = 0, xdel = 0, xchange = 0;
400 	long			 tzero = time_second;
401 
402 	PF_RULES_WASSERT();
403 
404 	ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_FEEDBACK);
405 	if (pfr_validate_table(tbl, ignore_pfrt_flags, flags &
406 	    PFR_FLAG_USERIOCTL))
407 		return (EINVAL);
408 	kt = pfr_lookup_table(tbl);
409 	if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
410 		return (ESRCH);
411 	if (kt->pfrkt_flags & PFR_TFLAG_CONST)
412 		return (EPERM);
413 	tmpkt = pfr_create_ktable(&V_pfr_nulltable, 0, 0);
414 	if (tmpkt == NULL)
415 		return (ENOMEM);
416 	pfr_mark_addrs(kt);
417 	SLIST_INIT(&addq);
418 	SLIST_INIT(&delq);
419 	SLIST_INIT(&changeq);
420 	for (i = 0; i < size; i++) {
421 		/*
422 		 * XXXGL: undertand pf_if usage of this function
423 		 * and make ad a moving pointer
424 		 */
425 		bcopy(addr + i, &ad, sizeof(ad));
426 		if (pfr_validate_addr(&ad))
427 			senderr(EINVAL);
428 		ad.pfra_fback = PFR_FB_NONE;
429 		p = pfr_lookup_addr(kt, &ad, 1);
430 		if (p != NULL) {
431 			if (p->pfrke_mark) {
432 				ad.pfra_fback = PFR_FB_DUPLICATE;
433 				goto _skip;
434 			}
435 			p->pfrke_mark = 1;
436 			if (p->pfrke_not != ad.pfra_not) {
437 				SLIST_INSERT_HEAD(&changeq, p, pfrke_workq);
438 				ad.pfra_fback = PFR_FB_CHANGED;
439 				xchange++;
440 			}
441 		} else {
442 			q = pfr_lookup_addr(tmpkt, &ad, 1);
443 			if (q != NULL) {
444 				ad.pfra_fback = PFR_FB_DUPLICATE;
445 				goto _skip;
446 			}
447 			p = pfr_create_kentry(&ad,
448 			    (kt->pfrkt_flags & PFR_TFLAG_COUNTERS) != 0);
449 			if (p == NULL)
450 				senderr(ENOMEM);
451 			if (pfr_route_kentry(tmpkt, p)) {
452 				pfr_destroy_kentry(p);
453 				ad.pfra_fback = PFR_FB_NONE;
454 			} else {
455 				SLIST_INSERT_HEAD(&addq, p, pfrke_workq);
456 				ad.pfra_fback = PFR_FB_ADDED;
457 				xadd++;
458 			}
459 		}
460 _skip:
461 		if (flags & PFR_FLAG_FEEDBACK)
462 			bcopy(&ad, addr + i, sizeof(ad));
463 	}
464 	pfr_enqueue_addrs(kt, &delq, &xdel, ENQUEUE_UNMARKED_ONLY);
465 	if ((flags & PFR_FLAG_FEEDBACK) && *size2) {
466 		if (*size2 < size+xdel) {
467 			*size2 = size+xdel;
468 			senderr(0);
469 		}
470 		i = 0;
471 		SLIST_FOREACH(p, &delq, pfrke_workq) {
472 			pfr_copyout_addr(&ad, p);
473 			ad.pfra_fback = PFR_FB_DELETED;
474 			bcopy(&ad, addr + size + i, sizeof(ad));
475 			i++;
476 		}
477 	}
478 	pfr_clean_node_mask(tmpkt, &addq);
479 	if (!(flags & PFR_FLAG_DUMMY)) {
480 		pfr_insert_kentries(kt, &addq, tzero);
481 		pfr_remove_kentries(kt, &delq);
482 		pfr_clstats_kentries(kt, &changeq, tzero, INVERT_NEG_FLAG);
483 	} else
484 		pfr_destroy_kentries(&addq);
485 	if (nadd != NULL)
486 		*nadd = xadd;
487 	if (ndel != NULL)
488 		*ndel = xdel;
489 	if (nchange != NULL)
490 		*nchange = xchange;
491 	if ((flags & PFR_FLAG_FEEDBACK) && size2)
492 		*size2 = size+xdel;
493 	pfr_destroy_ktable(tmpkt, 0);
494 	return (0);
495 _bad:
496 	pfr_clean_node_mask(tmpkt, &addq);
497 	pfr_destroy_kentries(&addq);
498 	if (flags & PFR_FLAG_FEEDBACK)
499 		pfr_reset_feedback(addr, size);
500 	pfr_destroy_ktable(tmpkt, 0);
501 	return (rv);
502 }
503 
504 int
pfr_tst_addrs(struct pfr_table * tbl,struct pfr_addr * addr,int size,int * nmatch,int flags)505 pfr_tst_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
506 	int *nmatch, int flags)
507 {
508 	struct pfr_ktable	*kt;
509 	struct pfr_kentry	*p;
510 	struct pfr_addr		*ad;
511 	int			 i, xmatch = 0;
512 
513 	PF_RULES_RASSERT();
514 
515 	ACCEPT_FLAGS(flags, PFR_FLAG_REPLACE);
516 	if (pfr_validate_table(tbl, 0, 0))
517 		return (EINVAL);
518 	kt = pfr_lookup_table(tbl);
519 	if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
520 		return (ESRCH);
521 
522 	for (i = 0, ad = addr; i < size; i++, ad++) {
523 		if (pfr_validate_addr(ad))
524 			return (EINVAL);
525 		if (ADDR_NETWORK(ad))
526 			return (EINVAL);
527 		p = pfr_lookup_addr(kt, ad, 0);
528 		if (flags & PFR_FLAG_REPLACE)
529 			pfr_copyout_addr(ad, p);
530 		ad->pfra_fback = (p == NULL) ? PFR_FB_NONE :
531 		    (p->pfrke_not ? PFR_FB_NOTMATCH : PFR_FB_MATCH);
532 		if (p != NULL && !p->pfrke_not)
533 			xmatch++;
534 	}
535 	if (nmatch != NULL)
536 		*nmatch = xmatch;
537 	return (0);
538 }
539 
540 int
pfr_get_addrs(struct pfr_table * tbl,struct pfr_addr * addr,int * size,int flags)541 pfr_get_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int *size,
542 	int flags)
543 {
544 	struct pfr_ktable	*kt;
545 	struct pfr_walktree	 w;
546 	int			 rv;
547 
548 	PF_RULES_RASSERT();
549 
550 	ACCEPT_FLAGS(flags, 0);
551 	if (pfr_validate_table(tbl, 0, 0))
552 		return (EINVAL);
553 	kt = pfr_lookup_table(tbl);
554 	if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
555 		return (ESRCH);
556 	if (kt->pfrkt_cnt > *size) {
557 		*size = kt->pfrkt_cnt;
558 		return (0);
559 	}
560 
561 	bzero(&w, sizeof(w));
562 	w.pfrw_op = PFRW_GET_ADDRS;
563 	w.pfrw_addr = addr;
564 	w.pfrw_free = kt->pfrkt_cnt;
565 	rv = kt->pfrkt_ip4->rnh_walktree(&kt->pfrkt_ip4->rh, pfr_walktree, &w);
566 	if (!rv)
567 		rv = kt->pfrkt_ip6->rnh_walktree(&kt->pfrkt_ip6->rh,
568 		    pfr_walktree, &w);
569 	if (rv)
570 		return (rv);
571 
572 	KASSERT(w.pfrw_free == 0, ("%s: corruption detected (%d)", __func__,
573 	    w.pfrw_free));
574 
575 	*size = kt->pfrkt_cnt;
576 	return (0);
577 }
578 
579 int
pfr_get_astats(struct pfr_table * tbl,struct pfr_astats * addr,int * size,int flags)580 pfr_get_astats(struct pfr_table *tbl, struct pfr_astats *addr, int *size,
581 	int flags)
582 {
583 	struct pfr_ktable	*kt;
584 	struct pfr_walktree	 w;
585 	struct pfr_kentryworkq	 workq;
586 	int			 rv;
587 	long			 tzero = time_second;
588 
589 	PF_RULES_RASSERT();
590 
591 	/* XXX PFR_FLAG_CLSTATS disabled */
592 	ACCEPT_FLAGS(flags, 0);
593 	if (pfr_validate_table(tbl, 0, 0))
594 		return (EINVAL);
595 	kt = pfr_lookup_table(tbl);
596 	if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
597 		return (ESRCH);
598 	if (kt->pfrkt_cnt > *size) {
599 		*size = kt->pfrkt_cnt;
600 		return (0);
601 	}
602 
603 	bzero(&w, sizeof(w));
604 	w.pfrw_op = PFRW_GET_ASTATS;
605 	w.pfrw_astats = addr;
606 	w.pfrw_free = kt->pfrkt_cnt;
607 	/*
608 	 * Flags below are for backward compatibility. It was possible to have
609 	 * a table without per-entry counters. Now they are always allocated,
610 	 * we just discard data when reading it if table is not configured to
611 	 * have counters.
612 	 */
613 	w.pfrw_flags = kt->pfrkt_flags;
614 	rv = kt->pfrkt_ip4->rnh_walktree(&kt->pfrkt_ip4->rh, pfr_walktree, &w);
615 	if (!rv)
616 		rv = kt->pfrkt_ip6->rnh_walktree(&kt->pfrkt_ip6->rh,
617 		    pfr_walktree, &w);
618 	if (!rv && (flags & PFR_FLAG_CLSTATS)) {
619 		pfr_enqueue_addrs(kt, &workq, NULL, 0);
620 		pfr_clstats_kentries(kt, &workq, tzero, 0);
621 	}
622 	if (rv)
623 		return (rv);
624 
625 	if (w.pfrw_free) {
626 		printf("pfr_get_astats: corruption detected (%d).\n",
627 		    w.pfrw_free);
628 		return (ENOTTY);
629 	}
630 	*size = kt->pfrkt_cnt;
631 	return (0);
632 }
633 
634 int
pfr_clr_astats(struct pfr_table * tbl,struct pfr_addr * addr,int size,int * nzero,int flags)635 pfr_clr_astats(struct pfr_table *tbl, struct pfr_addr *addr, int size,
636     int *nzero, int flags)
637 {
638 	struct pfr_ktable	*kt;
639 	struct pfr_kentryworkq	 workq;
640 	struct pfr_kentry	*p;
641 	struct pfr_addr		*ad;
642 	int			 i, rv, xzero = 0;
643 
644 	PF_RULES_WASSERT();
645 
646 	ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_FEEDBACK);
647 	if (pfr_validate_table(tbl, 0, 0))
648 		return (EINVAL);
649 	kt = pfr_lookup_table(tbl);
650 	if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
651 		return (ESRCH);
652 	SLIST_INIT(&workq);
653 	for (i = 0, ad = addr; i < size; i++, ad++) {
654 		if (pfr_validate_addr(ad))
655 			senderr(EINVAL);
656 		p = pfr_lookup_addr(kt, ad, 1);
657 		if (flags & PFR_FLAG_FEEDBACK) {
658 			ad->pfra_fback = (p != NULL) ?
659 			    PFR_FB_CLEARED : PFR_FB_NONE;
660 		}
661 		if (p != NULL) {
662 			SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
663 			xzero++;
664 		}
665 	}
666 
667 	if (!(flags & PFR_FLAG_DUMMY))
668 		pfr_clstats_kentries(kt, &workq, time_second, 0);
669 	if (nzero != NULL)
670 		*nzero = xzero;
671 	return (0);
672 _bad:
673 	if (flags & PFR_FLAG_FEEDBACK)
674 		pfr_reset_feedback(addr, size);
675 	return (rv);
676 }
677 
678 static int
pfr_validate_addr(struct pfr_addr * ad)679 pfr_validate_addr(struct pfr_addr *ad)
680 {
681 	int i;
682 
683 	switch (ad->pfra_af) {
684 #ifdef INET
685 	case AF_INET:
686 		if (ad->pfra_net > 32)
687 			return (-1);
688 		break;
689 #endif /* INET */
690 #ifdef INET6
691 	case AF_INET6:
692 		if (ad->pfra_net > 128)
693 			return (-1);
694 		break;
695 #endif /* INET6 */
696 	default:
697 		return (-1);
698 	}
699 	if (ad->pfra_net < 128 &&
700 		(((caddr_t)ad)[ad->pfra_net/8] & (0xFF >> (ad->pfra_net%8))))
701 			return (-1);
702 	for (i = (ad->pfra_net+7)/8; i < sizeof(ad->pfra_u); i++)
703 		if (((caddr_t)ad)[i])
704 			return (-1);
705 	if (ad->pfra_not && ad->pfra_not != 1)
706 		return (-1);
707 	if (ad->pfra_fback)
708 		return (-1);
709 	return (0);
710 }
711 
712 static void
pfr_enqueue_addrs(struct pfr_ktable * kt,struct pfr_kentryworkq * workq,int * naddr,int sweep)713 pfr_enqueue_addrs(struct pfr_ktable *kt, struct pfr_kentryworkq *workq,
714 	int *naddr, int sweep)
715 {
716 	struct pfr_walktree	w;
717 
718 	SLIST_INIT(workq);
719 	bzero(&w, sizeof(w));
720 	w.pfrw_op = sweep ? PFRW_SWEEP : PFRW_ENQUEUE;
721 	w.pfrw_workq = workq;
722 	if (kt->pfrkt_ip4 != NULL)
723 		if (kt->pfrkt_ip4->rnh_walktree(&kt->pfrkt_ip4->rh,
724 		    pfr_walktree, &w))
725 			printf("pfr_enqueue_addrs: IPv4 walktree failed.\n");
726 	if (kt->pfrkt_ip6 != NULL)
727 		if (kt->pfrkt_ip6->rnh_walktree(&kt->pfrkt_ip6->rh,
728 		    pfr_walktree, &w))
729 			printf("pfr_enqueue_addrs: IPv6 walktree failed.\n");
730 	if (naddr != NULL)
731 		*naddr = w.pfrw_free;
732 }
733 
734 static void
pfr_mark_addrs(struct pfr_ktable * kt)735 pfr_mark_addrs(struct pfr_ktable *kt)
736 {
737 	struct pfr_walktree	w;
738 
739 	bzero(&w, sizeof(w));
740 	w.pfrw_op = PFRW_MARK;
741 	if (kt->pfrkt_ip4->rnh_walktree(&kt->pfrkt_ip4->rh, pfr_walktree, &w))
742 		printf("pfr_mark_addrs: IPv4 walktree failed.\n");
743 	if (kt->pfrkt_ip6->rnh_walktree(&kt->pfrkt_ip6->rh, pfr_walktree, &w))
744 		printf("pfr_mark_addrs: IPv6 walktree failed.\n");
745 }
746 
747 static struct pfr_kentry *
pfr_lookup_addr(struct pfr_ktable * kt,struct pfr_addr * ad,int exact)748 pfr_lookup_addr(struct pfr_ktable *kt, struct pfr_addr *ad, int exact)
749 {
750 	union sockaddr_union	 sa, mask;
751 	struct radix_head	*head = NULL;
752 	struct pfr_kentry	*ke;
753 
754 	PF_RULES_ASSERT();
755 
756 	bzero(&sa, sizeof(sa));
757 	if (ad->pfra_af == AF_INET) {
758 		FILLIN_SIN(sa.sin, ad->pfra_ip4addr);
759 		head = &kt->pfrkt_ip4->rh;
760 	} else if ( ad->pfra_af == AF_INET6 ) {
761 		FILLIN_SIN6(sa.sin6, ad->pfra_ip6addr);
762 		head = &kt->pfrkt_ip6->rh;
763 	}
764 	if (ADDR_NETWORK(ad)) {
765 		pfr_prepare_network(&mask, ad->pfra_af, ad->pfra_net);
766 		ke = (struct pfr_kentry *)rn_lookup(&sa, &mask, head);
767 		if (ke && KENTRY_RNF_ROOT(ke))
768 			ke = NULL;
769 	} else {
770 		ke = (struct pfr_kentry *)rn_match(&sa, head);
771 		if (ke && KENTRY_RNF_ROOT(ke))
772 			ke = NULL;
773 		if (exact && ke && KENTRY_NETWORK(ke))
774 			ke = NULL;
775 	}
776 	return (ke);
777 }
778 
779 static struct pfr_kentry *
pfr_create_kentry(struct pfr_addr * ad,bool counters)780 pfr_create_kentry(struct pfr_addr *ad, bool counters)
781 {
782 	struct pfr_kentry	*ke;
783 	counter_u64_t		 c;
784 
785 	ke = uma_zalloc(V_pfr_kentry_z, M_NOWAIT | M_ZERO);
786 	if (ke == NULL)
787 		return (NULL);
788 
789 	if (ad->pfra_af == AF_INET)
790 		FILLIN_SIN(ke->pfrke_sa.sin, ad->pfra_ip4addr);
791 	else if (ad->pfra_af == AF_INET6)
792 		FILLIN_SIN6(ke->pfrke_sa.sin6, ad->pfra_ip6addr);
793 	ke->pfrke_af = ad->pfra_af;
794 	ke->pfrke_net = ad->pfra_net;
795 	ke->pfrke_not = ad->pfra_not;
796 	ke->pfrke_counters.pfrkc_tzero = 0;
797 	if (counters) {
798 		c = uma_zalloc_pcpu(V_pfr_kentry_counter_z, M_NOWAIT | M_ZERO);
799 		if (c == NULL) {
800 			pfr_destroy_kentry(ke);
801 			return (NULL);
802 		}
803 		ke->pfrke_counters.pfrkc_counters = c;
804 	}
805 	return (ke);
806 }
807 
808 static void
pfr_destroy_kentries(struct pfr_kentryworkq * workq)809 pfr_destroy_kentries(struct pfr_kentryworkq *workq)
810 {
811 	struct pfr_kentry	*p, *q;
812 
813 	for (p = SLIST_FIRST(workq); p != NULL; p = q) {
814 		q = SLIST_NEXT(p, pfrke_workq);
815 		pfr_destroy_kentry(p);
816 	}
817 }
818 
819 static void
pfr_destroy_kentry(struct pfr_kentry * ke)820 pfr_destroy_kentry(struct pfr_kentry *ke)
821 {
822 	counter_u64_t c;
823 
824 	if ((c = ke->pfrke_counters.pfrkc_counters) != NULL)
825 		uma_zfree_pcpu(V_pfr_kentry_counter_z, c);
826 	uma_zfree(V_pfr_kentry_z, ke);
827 }
828 
829 static void
pfr_insert_kentries(struct pfr_ktable * kt,struct pfr_kentryworkq * workq,long tzero)830 pfr_insert_kentries(struct pfr_ktable *kt,
831     struct pfr_kentryworkq *workq, long tzero)
832 {
833 	struct pfr_kentry	*p;
834 	int			 rv, n = 0;
835 
836 	SLIST_FOREACH(p, workq, pfrke_workq) {
837 		rv = pfr_route_kentry(kt, p);
838 		if (rv) {
839 			printf("pfr_insert_kentries: cannot route entry "
840 			    "(code=%d).\n", rv);
841 			break;
842 		}
843 		p->pfrke_counters.pfrkc_tzero = tzero;
844 		n++;
845 	}
846 	kt->pfrkt_cnt += n;
847 }
848 
849 int
pfr_insert_kentry(struct pfr_ktable * kt,struct pfr_addr * ad,long tzero)850 pfr_insert_kentry(struct pfr_ktable *kt, struct pfr_addr *ad, long tzero)
851 {
852 	struct pfr_kentry	*p;
853 	int			 rv;
854 
855 	p = pfr_lookup_addr(kt, ad, 1);
856 	if (p != NULL)
857 		return (0);
858 	p = pfr_create_kentry(ad, (kt->pfrkt_flags & PFR_TFLAG_COUNTERS) != 0);
859 	if (p == NULL)
860 		return (ENOMEM);
861 
862 	rv = pfr_route_kentry(kt, p);
863 	if (rv)
864 		return (rv);
865 
866 	p->pfrke_counters.pfrkc_tzero = tzero;
867 	kt->pfrkt_cnt++;
868 
869 	return (0);
870 }
871 
872 static void
pfr_remove_kentries(struct pfr_ktable * kt,struct pfr_kentryworkq * workq)873 pfr_remove_kentries(struct pfr_ktable *kt,
874     struct pfr_kentryworkq *workq)
875 {
876 	struct pfr_kentry	*p;
877 	int			 n = 0;
878 
879 	SLIST_FOREACH(p, workq, pfrke_workq) {
880 		pfr_unroute_kentry(kt, p);
881 		n++;
882 	}
883 	kt->pfrkt_cnt -= n;
884 	pfr_destroy_kentries(workq);
885 }
886 
887 static void
pfr_clean_node_mask(struct pfr_ktable * kt,struct pfr_kentryworkq * workq)888 pfr_clean_node_mask(struct pfr_ktable *kt,
889     struct pfr_kentryworkq *workq)
890 {
891 	struct pfr_kentry	*p;
892 
893 	SLIST_FOREACH(p, workq, pfrke_workq)
894 		pfr_unroute_kentry(kt, p);
895 }
896 
897 static void
pfr_clstats_kentries(struct pfr_ktable * kt,struct pfr_kentryworkq * workq,long tzero,int negchange)898 pfr_clstats_kentries(struct pfr_ktable *kt, struct pfr_kentryworkq *workq,
899     long tzero, int negchange)
900 {
901 	struct pfr_kentry	*p;
902 	int			 i;
903 
904 	SLIST_FOREACH(p, workq, pfrke_workq) {
905 		if (negchange)
906 			p->pfrke_not = !p->pfrke_not;
907 		if ((kt->pfrkt_flags & PFR_TFLAG_COUNTERS) != 0)
908 			for (i = 0; i < PFR_NUM_COUNTERS; i++)
909 				counter_u64_zero(
910 				    p->pfrke_counters.pfrkc_counters + i);
911 		p->pfrke_counters.pfrkc_tzero = tzero;
912 	}
913 }
914 
915 static void
pfr_reset_feedback(struct pfr_addr * addr,int size)916 pfr_reset_feedback(struct pfr_addr *addr, int size)
917 {
918 	struct pfr_addr	*ad;
919 	int		i;
920 
921 	for (i = 0, ad = addr; i < size; i++, ad++)
922 		ad->pfra_fback = PFR_FB_NONE;
923 }
924 
925 static void
pfr_prepare_network(union sockaddr_union * sa,int af,int net)926 pfr_prepare_network(union sockaddr_union *sa, int af, int net)
927 {
928 	int	i;
929 
930 	bzero(sa, sizeof(*sa));
931 	if (af == AF_INET) {
932 		sa->sin.sin_len = sizeof(sa->sin);
933 		sa->sin.sin_family = AF_INET;
934 		sa->sin.sin_addr.s_addr = net ? htonl(-1 << (32-net)) : 0;
935 	} else if (af == AF_INET6) {
936 		sa->sin6.sin6_len = sizeof(sa->sin6);
937 		sa->sin6.sin6_family = AF_INET6;
938 		for (i = 0; i < 4; i++) {
939 			if (net <= 32) {
940 				sa->sin6.sin6_addr.s6_addr32[i] =
941 				    net ? htonl(-1 << (32-net)) : 0;
942 				break;
943 			}
944 			sa->sin6.sin6_addr.s6_addr32[i] = 0xFFFFFFFF;
945 			net -= 32;
946 		}
947 	}
948 }
949 
950 static int
pfr_route_kentry(struct pfr_ktable * kt,struct pfr_kentry * ke)951 pfr_route_kentry(struct pfr_ktable *kt, struct pfr_kentry *ke)
952 {
953 	union sockaddr_union	 mask;
954 	struct radix_node	*rn;
955 	struct radix_head	*head = NULL;
956 
957 	PF_RULES_WASSERT();
958 
959 	bzero(ke->pfrke_node, sizeof(ke->pfrke_node));
960 	if (ke->pfrke_af == AF_INET)
961 		head = &kt->pfrkt_ip4->rh;
962 	else if (ke->pfrke_af == AF_INET6)
963 		head = &kt->pfrkt_ip6->rh;
964 
965 	if (KENTRY_NETWORK(ke)) {
966 		pfr_prepare_network(&mask, ke->pfrke_af, ke->pfrke_net);
967 		rn = rn_addroute(&ke->pfrke_sa, &mask, head, ke->pfrke_node);
968 	} else
969 		rn = rn_addroute(&ke->pfrke_sa, NULL, head, ke->pfrke_node);
970 
971 	return (rn == NULL ? -1 : 0);
972 }
973 
974 static int
pfr_unroute_kentry(struct pfr_ktable * kt,struct pfr_kentry * ke)975 pfr_unroute_kentry(struct pfr_ktable *kt, struct pfr_kentry *ke)
976 {
977 	union sockaddr_union	 mask;
978 	struct radix_node	*rn;
979 	struct radix_head	*head = NULL;
980 
981 	if (ke->pfrke_af == AF_INET)
982 		head = &kt->pfrkt_ip4->rh;
983 	else if (ke->pfrke_af == AF_INET6)
984 		head = &kt->pfrkt_ip6->rh;
985 
986 	if (KENTRY_NETWORK(ke)) {
987 		pfr_prepare_network(&mask, ke->pfrke_af, ke->pfrke_net);
988 		rn = rn_delete(&ke->pfrke_sa, &mask, head);
989 	} else
990 		rn = rn_delete(&ke->pfrke_sa, NULL, head);
991 
992 	if (rn == NULL) {
993 		printf("pfr_unroute_kentry: delete failed.\n");
994 		return (-1);
995 	}
996 	return (0);
997 }
998 
999 static void
pfr_copyout_addr(struct pfr_addr * ad,const struct pfr_kentry * ke)1000 pfr_copyout_addr(struct pfr_addr *ad, const struct pfr_kentry *ke)
1001 {
1002 	bzero(ad, sizeof(*ad));
1003 	if (ke == NULL)
1004 		return;
1005 	ad->pfra_af = ke->pfrke_af;
1006 	ad->pfra_net = ke->pfrke_net;
1007 	ad->pfra_not = ke->pfrke_not;
1008 	if (ad->pfra_af == AF_INET)
1009 		ad->pfra_ip4addr = ke->pfrke_sa.sin.sin_addr;
1010 	else if (ad->pfra_af == AF_INET6)
1011 		ad->pfra_ip6addr = ke->pfrke_sa.sin6.sin6_addr;
1012 }
1013 
1014 static void
pfr_copyout_astats(struct pfr_astats * as,const struct pfr_kentry * ke,const struct pfr_walktree * w)1015 pfr_copyout_astats(struct pfr_astats *as, const struct pfr_kentry *ke,
1016     const struct pfr_walktree *w)
1017 {
1018 	int dir, op;
1019 	const struct pfr_kcounters *kc = &ke->pfrke_counters;
1020 
1021 	bzero(as, sizeof(*as));
1022 	pfr_copyout_addr(&as->pfras_a, ke);
1023 	as->pfras_tzero = kc->pfrkc_tzero;
1024 
1025 	if (! (w->pfrw_flags & PFR_TFLAG_COUNTERS) ||
1026 	    kc->pfrkc_counters == NULL) {
1027 		bzero(as->pfras_packets, sizeof(as->pfras_packets));
1028 		bzero(as->pfras_bytes, sizeof(as->pfras_bytes));
1029 		as->pfras_a.pfra_fback = PFR_FB_NOCOUNT;
1030 		return;
1031 	}
1032 
1033 	for (dir = 0; dir < PFR_DIR_MAX; dir++) {
1034 		for (op = 0; op < PFR_OP_ADDR_MAX; op ++) {
1035 			as->pfras_packets[dir][op] = counter_u64_fetch(
1036 			    pfr_kentry_counter(kc, dir, op, PFR_TYPE_PACKETS));
1037 			as->pfras_bytes[dir][op] = counter_u64_fetch(
1038 			    pfr_kentry_counter(kc, dir, op, PFR_TYPE_BYTES));
1039 		}
1040 	}
1041 }
1042 
1043 static void
pfr_sockaddr_to_pf_addr(const union sockaddr_union * sa,struct pf_addr * a)1044 pfr_sockaddr_to_pf_addr(const union sockaddr_union *sa, struct pf_addr *a)
1045 {
1046 	switch (sa->sa.sa_family) {
1047 	case AF_INET:
1048 		memcpy(&a->v4, &sa->sin.sin_addr, sizeof(a->v4));
1049 		break;
1050 	case AF_INET6:
1051 		memcpy(&a->v6, &sa->sin6.sin6_addr, sizeof(a->v6));
1052 		break;
1053 	default:
1054 		panic("Unknown AF");
1055 	}
1056 }
1057 
1058 static int
pfr_walktree(struct radix_node * rn,void * arg)1059 pfr_walktree(struct radix_node *rn, void *arg)
1060 {
1061 	struct pfr_kentry	*ke = (struct pfr_kentry *)rn;
1062 	struct pfr_walktree	*w = arg;
1063 
1064 	switch (w->pfrw_op) {
1065 	case PFRW_MARK:
1066 		ke->pfrke_mark = 0;
1067 		break;
1068 	case PFRW_SWEEP:
1069 		if (ke->pfrke_mark)
1070 			break;
1071 		/* FALLTHROUGH */
1072 	case PFRW_ENQUEUE:
1073 		SLIST_INSERT_HEAD(w->pfrw_workq, ke, pfrke_workq);
1074 		w->pfrw_free++;
1075 		break;
1076 	case PFRW_GET_ADDRS:
1077 		if (w->pfrw_free-- > 0) {
1078 			pfr_copyout_addr(w->pfrw_addr, ke);
1079 			w->pfrw_addr++;
1080 		}
1081 		break;
1082 	case PFRW_GET_ASTATS:
1083 		if (w->pfrw_free-- > 0) {
1084 			struct pfr_astats as;
1085 
1086 			pfr_copyout_astats(&as, ke, w);
1087 
1088 			bcopy(&as, w->pfrw_astats, sizeof(as));
1089 			w->pfrw_astats++;
1090 		}
1091 		break;
1092 	case PFRW_POOL_GET:
1093 		if (ke->pfrke_not)
1094 			break; /* negative entries are ignored */
1095 		if (!w->pfrw_free--) {
1096 			w->pfrw_kentry = ke;
1097 			return (1); /* finish search */
1098 		}
1099 		break;
1100 	case PFRW_DYNADDR_UPDATE:
1101 	    {
1102 		union sockaddr_union	pfr_mask;
1103 
1104 		if (ke->pfrke_af == AF_INET) {
1105 			if (w->pfrw_dyn->pfid_acnt4++ > 0)
1106 				break;
1107 			pfr_prepare_network(&pfr_mask, AF_INET, ke->pfrke_net);
1108 			pfr_sockaddr_to_pf_addr(&ke->pfrke_sa, &w->pfrw_dyn->pfid_addr4);
1109 			pfr_sockaddr_to_pf_addr(&pfr_mask, &w->pfrw_dyn->pfid_mask4);
1110 		} else if (ke->pfrke_af == AF_INET6){
1111 			if (w->pfrw_dyn->pfid_acnt6++ > 0)
1112 				break;
1113 			pfr_prepare_network(&pfr_mask, AF_INET6, ke->pfrke_net);
1114 			pfr_sockaddr_to_pf_addr(&ke->pfrke_sa, &w->pfrw_dyn->pfid_addr6);
1115 			pfr_sockaddr_to_pf_addr(&pfr_mask, &w->pfrw_dyn->pfid_mask6);
1116 		}
1117 		break;
1118 	    }
1119 	case PFRW_COUNTERS:
1120 	    {
1121 		if (w->pfrw_flags & PFR_TFLAG_COUNTERS) {
1122 			if (ke->pfrke_counters.pfrkc_counters != NULL)
1123 				break;
1124 			ke->pfrke_counters.pfrkc_counters =
1125 			    uma_zalloc_pcpu(V_pfr_kentry_counter_z,
1126 			    M_NOWAIT | M_ZERO);
1127 		} else {
1128 			uma_zfree_pcpu(V_pfr_kentry_counter_z,
1129 			    ke->pfrke_counters.pfrkc_counters);
1130 			ke->pfrke_counters.pfrkc_counters = NULL;
1131 		}
1132 		break;
1133 	    }
1134 	}
1135 	return (0);
1136 }
1137 
1138 int
pfr_clr_tables(struct pfr_table * filter,int * ndel,int flags)1139 pfr_clr_tables(struct pfr_table *filter, int *ndel, int flags)
1140 {
1141 	struct pfr_ktableworkq	 workq;
1142 	struct pfr_ktable	*p;
1143 	int			 xdel = 0;
1144 
1145 	ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_ALLRSETS);
1146 	if (pfr_fix_anchor(filter->pfrt_anchor))
1147 		return (EINVAL);
1148 	if (pfr_table_count(filter, flags) < 0)
1149 		return (ENOENT);
1150 
1151 	SLIST_INIT(&workq);
1152 	RB_FOREACH(p, pfr_ktablehead, &V_pfr_ktables) {
1153 		if (pfr_skip_table(filter, p, flags))
1154 			continue;
1155 		if (!strcmp(p->pfrkt_anchor, PF_RESERVED_ANCHOR))
1156 			continue;
1157 		if (!(p->pfrkt_flags & PFR_TFLAG_ACTIVE))
1158 			continue;
1159 		p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_ACTIVE;
1160 		SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1161 		xdel++;
1162 	}
1163 	if (!(flags & PFR_FLAG_DUMMY))
1164 		pfr_setflags_ktables(&workq);
1165 	if (ndel != NULL)
1166 		*ndel = xdel;
1167 	return (0);
1168 }
1169 
1170 int
pfr_add_tables(struct pfr_table * tbl,int size,int * nadd,int flags)1171 pfr_add_tables(struct pfr_table *tbl, int size, int *nadd, int flags)
1172 {
1173 	struct pfr_ktableworkq	 addq, changeq;
1174 	struct pfr_ktable	*p, *q, *r, key;
1175 	int			 i, rv, xadd = 0;
1176 	long			 tzero = time_second;
1177 
1178 	ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
1179 	SLIST_INIT(&addq);
1180 	SLIST_INIT(&changeq);
1181 	for (i = 0; i < size; i++) {
1182 		bcopy(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t));
1183 		if (pfr_validate_table(&key.pfrkt_t, PFR_TFLAG_USRMASK,
1184 		    flags & PFR_FLAG_USERIOCTL))
1185 			senderr(EINVAL);
1186 		key.pfrkt_flags |= PFR_TFLAG_ACTIVE;
1187 		p = RB_FIND(pfr_ktablehead, &V_pfr_ktables, &key);
1188 		if (p == NULL) {
1189 			p = pfr_create_ktable(&key.pfrkt_t, tzero, 1);
1190 			if (p == NULL)
1191 				senderr(ENOMEM);
1192 			SLIST_FOREACH(q, &addq, pfrkt_workq) {
1193 				if (!pfr_ktable_compare(p, q)) {
1194 					pfr_destroy_ktable(p, 0);
1195 					goto _skip;
1196 				}
1197 			}
1198 			SLIST_INSERT_HEAD(&addq, p, pfrkt_workq);
1199 			xadd++;
1200 			if (!key.pfrkt_anchor[0])
1201 				goto _skip;
1202 
1203 			/* find or create root table */
1204 			bzero(key.pfrkt_anchor, sizeof(key.pfrkt_anchor));
1205 			r = RB_FIND(pfr_ktablehead, &V_pfr_ktables, &key);
1206 			if (r != NULL) {
1207 				p->pfrkt_root = r;
1208 				goto _skip;
1209 			}
1210 			SLIST_FOREACH(q, &addq, pfrkt_workq) {
1211 				if (!pfr_ktable_compare(&key, q)) {
1212 					p->pfrkt_root = q;
1213 					goto _skip;
1214 				}
1215 			}
1216 			key.pfrkt_flags = 0;
1217 			r = pfr_create_ktable(&key.pfrkt_t, 0, 1);
1218 			if (r == NULL)
1219 				senderr(ENOMEM);
1220 			SLIST_INSERT_HEAD(&addq, r, pfrkt_workq);
1221 			p->pfrkt_root = r;
1222 		} else if (!(p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1223 			SLIST_FOREACH(q, &changeq, pfrkt_workq)
1224 				if (!pfr_ktable_compare(&key, q))
1225 					goto _skip;
1226 			p->pfrkt_nflags = (p->pfrkt_flags &
1227 			    ~PFR_TFLAG_USRMASK) | key.pfrkt_flags;
1228 			SLIST_INSERT_HEAD(&changeq, p, pfrkt_workq);
1229 			xadd++;
1230 		}
1231 _skip:
1232 	;
1233 	}
1234 	if (!(flags & PFR_FLAG_DUMMY)) {
1235 		pfr_insert_ktables(&addq);
1236 		pfr_setflags_ktables(&changeq);
1237 	} else
1238 		 pfr_destroy_ktables(&addq, 0);
1239 	if (nadd != NULL)
1240 		*nadd = xadd;
1241 	return (0);
1242 _bad:
1243 	pfr_destroy_ktables(&addq, 0);
1244 	return (rv);
1245 }
1246 
1247 int
pfr_del_tables(struct pfr_table * tbl,int size,int * ndel,int flags)1248 pfr_del_tables(struct pfr_table *tbl, int size, int *ndel, int flags)
1249 {
1250 	struct pfr_ktableworkq	 workq;
1251 	struct pfr_ktable	*p, *q, key;
1252 	int			 i, xdel = 0;
1253 
1254 	ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
1255 	SLIST_INIT(&workq);
1256 	for (i = 0; i < size; i++) {
1257 		bcopy(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t));
1258 		if (pfr_validate_table(&key.pfrkt_t, 0,
1259 		    flags & PFR_FLAG_USERIOCTL))
1260 			return (EINVAL);
1261 		p = RB_FIND(pfr_ktablehead, &V_pfr_ktables, &key);
1262 		if (p != NULL && (p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1263 			SLIST_FOREACH(q, &workq, pfrkt_workq)
1264 				if (!pfr_ktable_compare(p, q))
1265 					goto _skip;
1266 			p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_ACTIVE;
1267 			SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1268 			xdel++;
1269 		}
1270 _skip:
1271 	;
1272 	}
1273 
1274 	if (!(flags & PFR_FLAG_DUMMY))
1275 		pfr_setflags_ktables(&workq);
1276 	if (ndel != NULL)
1277 		*ndel = xdel;
1278 	return (0);
1279 }
1280 
1281 int
pfr_get_tables(struct pfr_table * filter,struct pfr_table * tbl,int * size,int flags)1282 pfr_get_tables(struct pfr_table *filter, struct pfr_table *tbl, int *size,
1283 	int flags)
1284 {
1285 	struct pfr_ktable	*p;
1286 	int			 n, nn;
1287 
1288 	PF_RULES_RASSERT();
1289 
1290 	ACCEPT_FLAGS(flags, PFR_FLAG_ALLRSETS);
1291 	if (pfr_fix_anchor(filter->pfrt_anchor))
1292 		return (EINVAL);
1293 	n = nn = pfr_table_count(filter, flags);
1294 	if (n < 0)
1295 		return (ENOENT);
1296 	if (n > *size) {
1297 		*size = n;
1298 		return (0);
1299 	}
1300 	RB_FOREACH(p, pfr_ktablehead, &V_pfr_ktables) {
1301 		if (pfr_skip_table(filter, p, flags))
1302 			continue;
1303 		if (n-- <= 0)
1304 			continue;
1305 		bcopy(&p->pfrkt_t, tbl++, sizeof(*tbl));
1306 	}
1307 
1308 	KASSERT(n == 0, ("%s: corruption detected (%d)", __func__, n));
1309 
1310 	*size = nn;
1311 	return (0);
1312 }
1313 
1314 int
pfr_get_tstats(struct pfr_table * filter,struct pfr_tstats * tbl,int * size,int flags)1315 pfr_get_tstats(struct pfr_table *filter, struct pfr_tstats *tbl, int *size,
1316 	int flags)
1317 {
1318 	struct pfr_ktable	*p;
1319 	struct pfr_ktableworkq	 workq;
1320 	int			 n, nn;
1321 	long			 tzero = time_second;
1322 	int			 pfr_dir, pfr_op;
1323 
1324 	/* XXX PFR_FLAG_CLSTATS disabled */
1325 	ACCEPT_FLAGS(flags, PFR_FLAG_ALLRSETS);
1326 	if (pfr_fix_anchor(filter->pfrt_anchor))
1327 		return (EINVAL);
1328 	n = nn = pfr_table_count(filter, flags);
1329 	if (n < 0)
1330 		return (ENOENT);
1331 	if (n > *size) {
1332 		*size = n;
1333 		return (0);
1334 	}
1335 	SLIST_INIT(&workq);
1336 	RB_FOREACH(p, pfr_ktablehead, &V_pfr_ktables) {
1337 		if (pfr_skip_table(filter, p, flags))
1338 			continue;
1339 		if (n-- <= 0)
1340 			continue;
1341 		bcopy(&p->pfrkt_kts.pfrts_t, &tbl->pfrts_t,
1342 		    sizeof(struct pfr_table));
1343 		for (pfr_dir = 0; pfr_dir < PFR_DIR_MAX; pfr_dir ++) {
1344 			for (pfr_op = 0; pfr_op < PFR_OP_TABLE_MAX; pfr_op ++) {
1345 				tbl->pfrts_packets[pfr_dir][pfr_op] =
1346 				    pfr_kstate_counter_fetch(
1347 					&p->pfrkt_packets[pfr_dir][pfr_op]);
1348 				tbl->pfrts_bytes[pfr_dir][pfr_op] =
1349 				    pfr_kstate_counter_fetch(
1350 					&p->pfrkt_bytes[pfr_dir][pfr_op]);
1351 			}
1352 		}
1353 		tbl->pfrts_match = pfr_kstate_counter_fetch(&p->pfrkt_match);
1354 		tbl->pfrts_nomatch = pfr_kstate_counter_fetch(&p->pfrkt_nomatch);
1355 		tbl->pfrts_tzero = p->pfrkt_tzero;
1356 		tbl->pfrts_cnt = p->pfrkt_cnt;
1357 		for (pfr_op = 0; pfr_op < PFR_REFCNT_MAX; pfr_op++)
1358 			tbl->pfrts_refcnt[pfr_op] = p->pfrkt_refcnt[pfr_op];
1359 		tbl++;
1360 		SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1361 	}
1362 	if (flags & PFR_FLAG_CLSTATS)
1363 		pfr_clstats_ktables(&workq, tzero,
1364 		    flags & PFR_FLAG_ADDRSTOO);
1365 
1366 	KASSERT(n == 0, ("%s: corruption detected (%d)", __func__, n));
1367 
1368 	*size = nn;
1369 	return (0);
1370 }
1371 
1372 int
pfr_clr_tstats(struct pfr_table * tbl,int size,int * nzero,int flags)1373 pfr_clr_tstats(struct pfr_table *tbl, int size, int *nzero, int flags)
1374 {
1375 	struct pfr_ktableworkq	 workq;
1376 	struct pfr_ktable	*p, key;
1377 	int			 i, xzero = 0;
1378 	long			 tzero = time_second;
1379 
1380 	ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_ADDRSTOO);
1381 	SLIST_INIT(&workq);
1382 	for (i = 0; i < size; i++) {
1383 		bcopy(tbl + i, &key.pfrkt_t, sizeof(key.pfrkt_t));
1384 		if (pfr_validate_table(&key.pfrkt_t, 0, 0))
1385 			return (EINVAL);
1386 		p = RB_FIND(pfr_ktablehead, &V_pfr_ktables, &key);
1387 		if (p != NULL) {
1388 			SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1389 			xzero++;
1390 		}
1391 	}
1392 	if (!(flags & PFR_FLAG_DUMMY))
1393 		pfr_clstats_ktables(&workq, tzero, flags & PFR_FLAG_ADDRSTOO);
1394 	if (nzero != NULL)
1395 		*nzero = xzero;
1396 	return (0);
1397 }
1398 
1399 int
pfr_set_tflags(struct pfr_table * tbl,int size,int setflag,int clrflag,int * nchange,int * ndel,int flags)1400 pfr_set_tflags(struct pfr_table *tbl, int size, int setflag, int clrflag,
1401 	int *nchange, int *ndel, int flags)
1402 {
1403 	struct pfr_ktableworkq	 workq;
1404 	struct pfr_ktable	*p, *q, key;
1405 	int			 i, xchange = 0, xdel = 0;
1406 
1407 	ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
1408 	if ((setflag & ~PFR_TFLAG_USRMASK) ||
1409 	    (clrflag & ~PFR_TFLAG_USRMASK) ||
1410 	    (setflag & clrflag))
1411 		return (EINVAL);
1412 	SLIST_INIT(&workq);
1413 	for (i = 0; i < size; i++) {
1414 		bcopy(tbl + i, &key.pfrkt_t, sizeof(key.pfrkt_t));
1415 		if (pfr_validate_table(&key.pfrkt_t, 0,
1416 		    flags & PFR_FLAG_USERIOCTL))
1417 			return (EINVAL);
1418 		p = RB_FIND(pfr_ktablehead, &V_pfr_ktables, &key);
1419 		if (p != NULL && (p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1420 			p->pfrkt_nflags = (p->pfrkt_flags | setflag) &
1421 			    ~clrflag;
1422 			if (p->pfrkt_nflags == p->pfrkt_flags)
1423 				goto _skip;
1424 			SLIST_FOREACH(q, &workq, pfrkt_workq)
1425 				if (!pfr_ktable_compare(p, q))
1426 					goto _skip;
1427 			SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1428 			if ((p->pfrkt_flags & PFR_TFLAG_PERSIST) &&
1429 			    (clrflag & PFR_TFLAG_PERSIST) &&
1430 			    !(p->pfrkt_flags & PFR_TFLAG_REFERENCED))
1431 				xdel++;
1432 			else
1433 				xchange++;
1434 		}
1435 _skip:
1436 	;
1437 	}
1438 	if (!(flags & PFR_FLAG_DUMMY))
1439 		pfr_setflags_ktables(&workq);
1440 	if (nchange != NULL)
1441 		*nchange = xchange;
1442 	if (ndel != NULL)
1443 		*ndel = xdel;
1444 	return (0);
1445 }
1446 
1447 int
pfr_ina_begin(struct pfr_table * trs,u_int32_t * ticket,int * ndel,int flags)1448 pfr_ina_begin(struct pfr_table *trs, u_int32_t *ticket, int *ndel, int flags)
1449 {
1450 	struct pfr_ktableworkq	 workq;
1451 	struct pfr_ktable	*p;
1452 	struct pf_kruleset	*rs;
1453 	int			 xdel = 0;
1454 
1455 	ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
1456 	rs = pf_find_or_create_kruleset(trs->pfrt_anchor);
1457 	if (rs == NULL)
1458 		return (ENOMEM);
1459 	SLIST_INIT(&workq);
1460 	RB_FOREACH(p, pfr_ktablehead, &V_pfr_ktables) {
1461 		if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
1462 		    pfr_skip_table(trs, p, 0))
1463 			continue;
1464 		p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_INACTIVE;
1465 		SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1466 		xdel++;
1467 	}
1468 	if (!(flags & PFR_FLAG_DUMMY)) {
1469 		pfr_setflags_ktables(&workq);
1470 		if (ticket != NULL)
1471 			*ticket = ++rs->tticket;
1472 		rs->topen = 1;
1473 	} else
1474 		pf_remove_if_empty_kruleset(rs);
1475 	if (ndel != NULL)
1476 		*ndel = xdel;
1477 	return (0);
1478 }
1479 
1480 int
pfr_ina_define(struct pfr_table * tbl,struct pfr_addr * addr,int size,int * nadd,int * naddr,u_int32_t ticket,int flags)1481 pfr_ina_define(struct pfr_table *tbl, struct pfr_addr *addr, int size,
1482     int *nadd, int *naddr, u_int32_t ticket, int flags)
1483 {
1484 	struct pfr_ktableworkq	 tableq;
1485 	struct pfr_kentryworkq	 addrq;
1486 	struct pfr_ktable	*kt, *rt, *shadow, key;
1487 	struct pfr_kentry	*p;
1488 	struct pfr_addr		*ad;
1489 	struct pf_kruleset	*rs;
1490 	int			 i, rv, xadd = 0, xaddr = 0;
1491 
1492 	PF_RULES_WASSERT();
1493 
1494 	ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_ADDRSTOO);
1495 	if (size && !(flags & PFR_FLAG_ADDRSTOO))
1496 		return (EINVAL);
1497 	if (pfr_validate_table(tbl, PFR_TFLAG_USRMASK,
1498 	    flags & PFR_FLAG_USERIOCTL))
1499 		return (EINVAL);
1500 	rs = pf_find_kruleset(tbl->pfrt_anchor);
1501 	if (rs == NULL || !rs->topen || ticket != rs->tticket)
1502 		return (EBUSY);
1503 	tbl->pfrt_flags |= PFR_TFLAG_INACTIVE;
1504 	SLIST_INIT(&tableq);
1505 	kt = RB_FIND(pfr_ktablehead, &V_pfr_ktables, (struct pfr_ktable *)tbl);
1506 	if (kt == NULL) {
1507 		kt = pfr_create_ktable(tbl, 0, 1);
1508 		if (kt == NULL)
1509 			return (ENOMEM);
1510 		SLIST_INSERT_HEAD(&tableq, kt, pfrkt_workq);
1511 		xadd++;
1512 		if (!tbl->pfrt_anchor[0])
1513 			goto _skip;
1514 
1515 		/* find or create root table */
1516 		bzero(&key, sizeof(key));
1517 		strlcpy(key.pfrkt_name, tbl->pfrt_name, sizeof(key.pfrkt_name));
1518 		rt = RB_FIND(pfr_ktablehead, &V_pfr_ktables, &key);
1519 		if (rt != NULL) {
1520 			kt->pfrkt_root = rt;
1521 			goto _skip;
1522 		}
1523 		rt = pfr_create_ktable(&key.pfrkt_t, 0, 1);
1524 		if (rt == NULL) {
1525 			pfr_destroy_ktables(&tableq, 0);
1526 			return (ENOMEM);
1527 		}
1528 		SLIST_INSERT_HEAD(&tableq, rt, pfrkt_workq);
1529 		kt->pfrkt_root = rt;
1530 	} else if (!(kt->pfrkt_flags & PFR_TFLAG_INACTIVE))
1531 		xadd++;
1532 _skip:
1533 	shadow = pfr_create_ktable(tbl, 0, 0);
1534 	if (shadow == NULL) {
1535 		pfr_destroy_ktables(&tableq, 0);
1536 		return (ENOMEM);
1537 	}
1538 	SLIST_INIT(&addrq);
1539 	for (i = 0, ad = addr; i < size; i++, ad++) {
1540 		if (pfr_validate_addr(ad))
1541 			senderr(EINVAL);
1542 		if (pfr_lookup_addr(shadow, ad, 1) != NULL)
1543 			continue;
1544 		p = pfr_create_kentry(ad,
1545 		    (shadow->pfrkt_flags & PFR_TFLAG_COUNTERS) != 0);
1546 		if (p == NULL)
1547 			senderr(ENOMEM);
1548 		if (pfr_route_kentry(shadow, p)) {
1549 			pfr_destroy_kentry(p);
1550 			continue;
1551 		}
1552 		SLIST_INSERT_HEAD(&addrq, p, pfrke_workq);
1553 		xaddr++;
1554 	}
1555 	if (!(flags & PFR_FLAG_DUMMY)) {
1556 		if (kt->pfrkt_shadow != NULL)
1557 			pfr_destroy_ktable(kt->pfrkt_shadow, 1);
1558 		kt->pfrkt_flags |= PFR_TFLAG_INACTIVE;
1559 		pfr_insert_ktables(&tableq);
1560 		shadow->pfrkt_cnt = (flags & PFR_FLAG_ADDRSTOO) ?
1561 		    xaddr : NO_ADDRESSES;
1562 		kt->pfrkt_shadow = shadow;
1563 	} else {
1564 		pfr_clean_node_mask(shadow, &addrq);
1565 		pfr_destroy_ktable(shadow, 0);
1566 		pfr_destroy_ktables(&tableq, 0);
1567 		pfr_destroy_kentries(&addrq);
1568 	}
1569 	if (nadd != NULL)
1570 		*nadd = xadd;
1571 	if (naddr != NULL)
1572 		*naddr = xaddr;
1573 	return (0);
1574 _bad:
1575 	pfr_destroy_ktable(shadow, 0);
1576 	pfr_destroy_ktables(&tableq, 0);
1577 	pfr_destroy_kentries(&addrq);
1578 	return (rv);
1579 }
1580 
1581 int
pfr_ina_rollback(struct pfr_table * trs,u_int32_t ticket,int * ndel,int flags)1582 pfr_ina_rollback(struct pfr_table *trs, u_int32_t ticket, int *ndel, int flags)
1583 {
1584 	struct pfr_ktableworkq	 workq;
1585 	struct pfr_ktable	*p;
1586 	struct pf_kruleset	*rs;
1587 	int			 xdel = 0;
1588 
1589 	PF_RULES_WASSERT();
1590 
1591 	ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
1592 	rs = pf_find_kruleset(trs->pfrt_anchor);
1593 	if (rs == NULL || !rs->topen || ticket != rs->tticket)
1594 		return (0);
1595 	SLIST_INIT(&workq);
1596 	RB_FOREACH(p, pfr_ktablehead, &V_pfr_ktables) {
1597 		if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
1598 		    pfr_skip_table(trs, p, 0))
1599 			continue;
1600 		p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_INACTIVE;
1601 		SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1602 		xdel++;
1603 	}
1604 	if (!(flags & PFR_FLAG_DUMMY)) {
1605 		pfr_setflags_ktables(&workq);
1606 		rs->topen = 0;
1607 		pf_remove_if_empty_kruleset(rs);
1608 	}
1609 	if (ndel != NULL)
1610 		*ndel = xdel;
1611 	return (0);
1612 }
1613 
1614 int
pfr_ina_commit(struct pfr_table * trs,u_int32_t ticket,int * nadd,int * nchange,int flags)1615 pfr_ina_commit(struct pfr_table *trs, u_int32_t ticket, int *nadd,
1616     int *nchange, int flags)
1617 {
1618 	struct pfr_ktable	*p, *q;
1619 	struct pfr_ktableworkq	 workq;
1620 	struct pf_kruleset	*rs;
1621 	int			 xadd = 0, xchange = 0;
1622 	long			 tzero = time_second;
1623 
1624 	PF_RULES_WASSERT();
1625 
1626 	ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
1627 	rs = pf_find_kruleset(trs->pfrt_anchor);
1628 	if (rs == NULL || !rs->topen || ticket != rs->tticket)
1629 		return (EBUSY);
1630 
1631 	SLIST_INIT(&workq);
1632 	RB_FOREACH(p, pfr_ktablehead, &V_pfr_ktables) {
1633 		if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
1634 		    pfr_skip_table(trs, p, 0))
1635 			continue;
1636 		SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1637 		if (p->pfrkt_flags & PFR_TFLAG_ACTIVE)
1638 			xchange++;
1639 		else
1640 			xadd++;
1641 	}
1642 
1643 	if (!(flags & PFR_FLAG_DUMMY)) {
1644 		for (p = SLIST_FIRST(&workq); p != NULL; p = q) {
1645 			q = SLIST_NEXT(p, pfrkt_workq);
1646 			pfr_commit_ktable(p, tzero);
1647 		}
1648 		rs->topen = 0;
1649 		pf_remove_if_empty_kruleset(rs);
1650 	}
1651 	if (nadd != NULL)
1652 		*nadd = xadd;
1653 	if (nchange != NULL)
1654 		*nchange = xchange;
1655 
1656 	return (0);
1657 }
1658 
1659 static void
pfr_commit_ktable(struct pfr_ktable * kt,long tzero)1660 pfr_commit_ktable(struct pfr_ktable *kt, long tzero)
1661 {
1662 	counter_u64_t		*pkc, *qkc;
1663 	struct pfr_ktable	*shadow = kt->pfrkt_shadow;
1664 	int			 nflags;
1665 
1666 	PF_RULES_WASSERT();
1667 
1668 	if (shadow->pfrkt_cnt == NO_ADDRESSES) {
1669 		if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
1670 			pfr_clstats_ktable(kt, tzero, 1);
1671 	} else if (kt->pfrkt_flags & PFR_TFLAG_ACTIVE) {
1672 		/* kt might contain addresses */
1673 		struct pfr_kentryworkq	 addrq, addq, changeq, delq, garbageq;
1674 		struct pfr_kentry	*p, *q, *next;
1675 		struct pfr_addr		 ad;
1676 
1677 		pfr_enqueue_addrs(shadow, &addrq, NULL, 0);
1678 		pfr_mark_addrs(kt);
1679 		SLIST_INIT(&addq);
1680 		SLIST_INIT(&changeq);
1681 		SLIST_INIT(&delq);
1682 		SLIST_INIT(&garbageq);
1683 		pfr_clean_node_mask(shadow, &addrq);
1684 		SLIST_FOREACH_SAFE(p, &addrq, pfrke_workq, next) {
1685 			pfr_copyout_addr(&ad, p);
1686 			q = pfr_lookup_addr(kt, &ad, 1);
1687 			if (q != NULL) {
1688 				if (q->pfrke_not != p->pfrke_not)
1689 					SLIST_INSERT_HEAD(&changeq, q,
1690 					    pfrke_workq);
1691 				pkc = &p->pfrke_counters.pfrkc_counters;
1692 				qkc = &q->pfrke_counters.pfrkc_counters;
1693 				if ((*pkc == NULL) != (*qkc == NULL))
1694 					SWAP(counter_u64_t, *pkc, *qkc);
1695 				q->pfrke_mark = 1;
1696 				SLIST_INSERT_HEAD(&garbageq, p, pfrke_workq);
1697 			} else {
1698 				p->pfrke_counters.pfrkc_tzero = tzero;
1699 				SLIST_INSERT_HEAD(&addq, p, pfrke_workq);
1700 			}
1701 		}
1702 		pfr_enqueue_addrs(kt, &delq, NULL, ENQUEUE_UNMARKED_ONLY);
1703 		pfr_insert_kentries(kt, &addq, tzero);
1704 		pfr_remove_kentries(kt, &delq);
1705 		pfr_clstats_kentries(kt, &changeq, tzero, INVERT_NEG_FLAG);
1706 		pfr_destroy_kentries(&garbageq);
1707 	} else {
1708 		/* kt cannot contain addresses */
1709 		SWAP(struct radix_node_head *, kt->pfrkt_ip4,
1710 		    shadow->pfrkt_ip4);
1711 		SWAP(struct radix_node_head *, kt->pfrkt_ip6,
1712 		    shadow->pfrkt_ip6);
1713 		SWAP(int, kt->pfrkt_cnt, shadow->pfrkt_cnt);
1714 		pfr_clstats_ktable(kt, tzero, 1);
1715 	}
1716 	nflags = ((shadow->pfrkt_flags & PFR_TFLAG_USRMASK) |
1717 	    (kt->pfrkt_flags & PFR_TFLAG_SETMASK) | PFR_TFLAG_ACTIVE)
1718 		& ~PFR_TFLAG_INACTIVE;
1719 	pfr_destroy_ktable(shadow, 0);
1720 	kt->pfrkt_shadow = NULL;
1721 	pfr_setflags_ktable(kt, nflags);
1722 }
1723 
1724 static int
pfr_validate_table(struct pfr_table * tbl,int allowedflags,int no_reserved)1725 pfr_validate_table(struct pfr_table *tbl, int allowedflags, int no_reserved)
1726 {
1727 	int i;
1728 
1729 	if (!tbl->pfrt_name[0])
1730 		return (-1);
1731 	if (no_reserved && !strcmp(tbl->pfrt_anchor, PF_RESERVED_ANCHOR))
1732 		 return (-1);
1733 	if (tbl->pfrt_name[PF_TABLE_NAME_SIZE-1])
1734 		return (-1);
1735 	for (i = strlen(tbl->pfrt_name); i < PF_TABLE_NAME_SIZE; i++)
1736 		if (tbl->pfrt_name[i])
1737 			return (-1);
1738 	if (pfr_fix_anchor(tbl->pfrt_anchor))
1739 		return (-1);
1740 	if (tbl->pfrt_flags & ~allowedflags)
1741 		return (-1);
1742 	return (0);
1743 }
1744 
1745 /*
1746  * Rewrite anchors referenced by tables to remove slashes
1747  * and check for validity.
1748  */
1749 static int
pfr_fix_anchor(char * anchor)1750 pfr_fix_anchor(char *anchor)
1751 {
1752 	size_t siz = MAXPATHLEN;
1753 	int i;
1754 
1755 	if (anchor[0] == '/') {
1756 		char *path;
1757 		int off;
1758 
1759 		path = anchor;
1760 		off = 1;
1761 		while (*++path == '/')
1762 			off++;
1763 		bcopy(path, anchor, siz - off);
1764 		memset(anchor + siz - off, 0, off);
1765 	}
1766 	if (anchor[siz - 1])
1767 		return (-1);
1768 	for (i = strlen(anchor); i < siz; i++)
1769 		if (anchor[i])
1770 			return (-1);
1771 	return (0);
1772 }
1773 
1774 int
pfr_table_count(struct pfr_table * filter,int flags)1775 pfr_table_count(struct pfr_table *filter, int flags)
1776 {
1777 	struct pf_kruleset *rs;
1778 
1779 	PF_RULES_ASSERT();
1780 
1781 	if (flags & PFR_FLAG_ALLRSETS)
1782 		return (V_pfr_ktable_cnt);
1783 	if (filter->pfrt_anchor[0]) {
1784 		rs = pf_find_kruleset(filter->pfrt_anchor);
1785 		return ((rs != NULL) ? rs->tables : -1);
1786 	}
1787 	return (pf_main_ruleset.tables);
1788 }
1789 
1790 static int
pfr_skip_table(struct pfr_table * filter,struct pfr_ktable * kt,int flags)1791 pfr_skip_table(struct pfr_table *filter, struct pfr_ktable *kt, int flags)
1792 {
1793 	if (flags & PFR_FLAG_ALLRSETS)
1794 		return (0);
1795 	if (strcmp(filter->pfrt_anchor, kt->pfrkt_anchor))
1796 		return (1);
1797 	return (0);
1798 }
1799 
1800 static void
pfr_insert_ktables(struct pfr_ktableworkq * workq)1801 pfr_insert_ktables(struct pfr_ktableworkq *workq)
1802 {
1803 	struct pfr_ktable	*p;
1804 
1805 	SLIST_FOREACH(p, workq, pfrkt_workq)
1806 		pfr_insert_ktable(p);
1807 }
1808 
1809 static void
pfr_insert_ktable(struct pfr_ktable * kt)1810 pfr_insert_ktable(struct pfr_ktable *kt)
1811 {
1812 
1813 	PF_RULES_WASSERT();
1814 
1815 	RB_INSERT(pfr_ktablehead, &V_pfr_ktables, kt);
1816 	V_pfr_ktable_cnt++;
1817 	if (kt->pfrkt_root != NULL)
1818 		if (!kt->pfrkt_root->pfrkt_refcnt[PFR_REFCNT_ANCHOR]++)
1819 			pfr_setflags_ktable(kt->pfrkt_root,
1820 			    kt->pfrkt_root->pfrkt_flags|PFR_TFLAG_REFDANCHOR);
1821 }
1822 
1823 static void
pfr_setflags_ktables(struct pfr_ktableworkq * workq)1824 pfr_setflags_ktables(struct pfr_ktableworkq *workq)
1825 {
1826 	struct pfr_ktable	*p, *q;
1827 
1828 	for (p = SLIST_FIRST(workq); p; p = q) {
1829 		q = SLIST_NEXT(p, pfrkt_workq);
1830 		pfr_setflags_ktable(p, p->pfrkt_nflags);
1831 	}
1832 }
1833 
1834 static void
pfr_setflags_ktable(struct pfr_ktable * kt,int newf)1835 pfr_setflags_ktable(struct pfr_ktable *kt, int newf)
1836 {
1837 	struct pfr_kentryworkq	addrq;
1838 	struct pfr_walktree	w;
1839 
1840 	PF_RULES_WASSERT();
1841 
1842 	if (!(newf & PFR_TFLAG_REFERENCED) &&
1843 	    !(newf & PFR_TFLAG_REFDANCHOR) &&
1844 	    !(newf & PFR_TFLAG_PERSIST))
1845 		newf &= ~PFR_TFLAG_ACTIVE;
1846 	if (!(newf & PFR_TFLAG_ACTIVE))
1847 		newf &= ~PFR_TFLAG_USRMASK;
1848 	if (!(newf & PFR_TFLAG_SETMASK)) {
1849 		RB_REMOVE(pfr_ktablehead, &V_pfr_ktables, kt);
1850 		if (kt->pfrkt_root != NULL)
1851 			if (!--kt->pfrkt_root->pfrkt_refcnt[PFR_REFCNT_ANCHOR])
1852 				pfr_setflags_ktable(kt->pfrkt_root,
1853 				    kt->pfrkt_root->pfrkt_flags &
1854 					~PFR_TFLAG_REFDANCHOR);
1855 		pfr_destroy_ktable(kt, 1);
1856 		V_pfr_ktable_cnt--;
1857 		return;
1858 	}
1859 	if (newf & PFR_TFLAG_COUNTERS && ! (kt->pfrkt_flags & PFR_TFLAG_COUNTERS)) {
1860 		bzero(&w, sizeof(w));
1861 		w.pfrw_op = PFRW_COUNTERS;
1862 		w.pfrw_flags |= PFR_TFLAG_COUNTERS;
1863 		kt->pfrkt_ip4->rnh_walktree(&kt->pfrkt_ip4->rh, pfr_walktree, &w);
1864 		kt->pfrkt_ip6->rnh_walktree(&kt->pfrkt_ip6->rh, pfr_walktree, &w);
1865 	}
1866 	if (! (newf & PFR_TFLAG_COUNTERS) && (kt->pfrkt_flags & PFR_TFLAG_COUNTERS)) {
1867 		bzero(&w, sizeof(w));
1868 		w.pfrw_op = PFRW_COUNTERS;
1869 		w.pfrw_flags |= 0;
1870 		kt->pfrkt_ip4->rnh_walktree(&kt->pfrkt_ip4->rh, pfr_walktree, &w);
1871 		kt->pfrkt_ip6->rnh_walktree(&kt->pfrkt_ip6->rh, pfr_walktree, &w);
1872 	}
1873 	if (!(newf & PFR_TFLAG_ACTIVE) && kt->pfrkt_cnt) {
1874 		pfr_enqueue_addrs(kt, &addrq, NULL, 0);
1875 		pfr_remove_kentries(kt, &addrq);
1876 	}
1877 	if (!(newf & PFR_TFLAG_INACTIVE) && kt->pfrkt_shadow != NULL) {
1878 		pfr_destroy_ktable(kt->pfrkt_shadow, 1);
1879 		kt->pfrkt_shadow = NULL;
1880 	}
1881 	kt->pfrkt_flags = newf;
1882 }
1883 
1884 static void
pfr_clstats_ktables(struct pfr_ktableworkq * workq,long tzero,int recurse)1885 pfr_clstats_ktables(struct pfr_ktableworkq *workq, long tzero, int recurse)
1886 {
1887 	struct pfr_ktable	*p;
1888 
1889 	SLIST_FOREACH(p, workq, pfrkt_workq)
1890 		pfr_clstats_ktable(p, tzero, recurse);
1891 }
1892 
1893 static void
pfr_clstats_ktable(struct pfr_ktable * kt,long tzero,int recurse)1894 pfr_clstats_ktable(struct pfr_ktable *kt, long tzero, int recurse)
1895 {
1896 	struct pfr_kentryworkq	 addrq;
1897 	int			 pfr_dir, pfr_op;
1898 
1899 	MPASS(PF_TABLE_STATS_OWNED() || PF_RULES_WOWNED());
1900 
1901 	if (recurse) {
1902 		pfr_enqueue_addrs(kt, &addrq, NULL, 0);
1903 		pfr_clstats_kentries(kt, &addrq, tzero, 0);
1904 	}
1905 	for (pfr_dir = 0; pfr_dir < PFR_DIR_MAX; pfr_dir ++) {
1906 		for (pfr_op = 0; pfr_op < PFR_OP_TABLE_MAX; pfr_op ++) {
1907 			pfr_kstate_counter_zero(&kt->pfrkt_packets[pfr_dir][pfr_op]);
1908 			pfr_kstate_counter_zero(&kt->pfrkt_bytes[pfr_dir][pfr_op]);
1909 		}
1910 	}
1911 	pfr_kstate_counter_zero(&kt->pfrkt_match);
1912 	pfr_kstate_counter_zero(&kt->pfrkt_nomatch);
1913 	kt->pfrkt_tzero = tzero;
1914 }
1915 
1916 static struct pfr_ktable *
pfr_create_ktable(struct pfr_table * tbl,long tzero,int attachruleset)1917 pfr_create_ktable(struct pfr_table *tbl, long tzero, int attachruleset)
1918 {
1919 	struct pfr_ktable	*kt;
1920 	struct pf_kruleset	*rs;
1921 	int			 pfr_dir, pfr_op;
1922 
1923 	PF_RULES_WASSERT();
1924 
1925 	kt = malloc(sizeof(*kt), M_PFTABLE, M_NOWAIT|M_ZERO);
1926 	if (kt == NULL)
1927 		return (NULL);
1928 	kt->pfrkt_t = *tbl;
1929 
1930 	if (attachruleset) {
1931 		rs = pf_find_or_create_kruleset(tbl->pfrt_anchor);
1932 		if (!rs) {
1933 			pfr_destroy_ktable(kt, 0);
1934 			return (NULL);
1935 		}
1936 		kt->pfrkt_rs = rs;
1937 		rs->tables++;
1938 	}
1939 
1940 	for (pfr_dir = 0; pfr_dir < PFR_DIR_MAX; pfr_dir ++) {
1941 		for (pfr_op = 0; pfr_op < PFR_OP_TABLE_MAX; pfr_op ++) {
1942 			if (pfr_kstate_counter_init(
1943 			    &kt->pfrkt_packets[pfr_dir][pfr_op], M_NOWAIT) != 0) {
1944 				pfr_destroy_ktable(kt, 0);
1945 				return (NULL);
1946 			}
1947 			if (pfr_kstate_counter_init(
1948 			    &kt->pfrkt_bytes[pfr_dir][pfr_op], M_NOWAIT) != 0) {
1949 				pfr_destroy_ktable(kt, 0);
1950 				return (NULL);
1951 			}
1952 		}
1953 	}
1954 	if (pfr_kstate_counter_init(&kt->pfrkt_match, M_NOWAIT) != 0) {
1955 		pfr_destroy_ktable(kt, 0);
1956 		return (NULL);
1957 	}
1958 
1959 	if (pfr_kstate_counter_init(&kt->pfrkt_nomatch, M_NOWAIT) != 0) {
1960 		pfr_destroy_ktable(kt, 0);
1961 		return (NULL);
1962 	}
1963 
1964 	if (!rn_inithead((void **)&kt->pfrkt_ip4,
1965 	    offsetof(struct sockaddr_in, sin_addr) * 8) ||
1966 	    !rn_inithead((void **)&kt->pfrkt_ip6,
1967 	    offsetof(struct sockaddr_in6, sin6_addr) * 8)) {
1968 		pfr_destroy_ktable(kt, 0);
1969 		return (NULL);
1970 	}
1971 	kt->pfrkt_tzero = tzero;
1972 
1973 	return (kt);
1974 }
1975 
1976 static void
pfr_destroy_ktables(struct pfr_ktableworkq * workq,int flushaddr)1977 pfr_destroy_ktables(struct pfr_ktableworkq *workq, int flushaddr)
1978 {
1979 	struct pfr_ktable	*p, *q;
1980 
1981 	for (p = SLIST_FIRST(workq); p; p = q) {
1982 		q = SLIST_NEXT(p, pfrkt_workq);
1983 		pfr_destroy_ktable(p, flushaddr);
1984 	}
1985 }
1986 
1987 static void
pfr_destroy_ktable(struct pfr_ktable * kt,int flushaddr)1988 pfr_destroy_ktable(struct pfr_ktable *kt, int flushaddr)
1989 {
1990 	struct pfr_kentryworkq	 addrq;
1991 	int			 pfr_dir, pfr_op;
1992 
1993 	if (flushaddr) {
1994 		pfr_enqueue_addrs(kt, &addrq, NULL, 0);
1995 		pfr_clean_node_mask(kt, &addrq);
1996 		pfr_destroy_kentries(&addrq);
1997 	}
1998 	if (kt->pfrkt_ip4 != NULL)
1999 		rn_detachhead((void **)&kt->pfrkt_ip4);
2000 	if (kt->pfrkt_ip6 != NULL)
2001 		rn_detachhead((void **)&kt->pfrkt_ip6);
2002 	if (kt->pfrkt_shadow != NULL)
2003 		pfr_destroy_ktable(kt->pfrkt_shadow, flushaddr);
2004 	if (kt->pfrkt_rs != NULL) {
2005 		kt->pfrkt_rs->tables--;
2006 		pf_remove_if_empty_kruleset(kt->pfrkt_rs);
2007 	}
2008 	for (pfr_dir = 0; pfr_dir < PFR_DIR_MAX; pfr_dir ++) {
2009 		for (pfr_op = 0; pfr_op < PFR_OP_TABLE_MAX; pfr_op ++) {
2010 			pfr_kstate_counter_deinit(&kt->pfrkt_packets[pfr_dir][pfr_op]);
2011 			pfr_kstate_counter_deinit(&kt->pfrkt_bytes[pfr_dir][pfr_op]);
2012 		}
2013 	}
2014 	pfr_kstate_counter_deinit(&kt->pfrkt_match);
2015 	pfr_kstate_counter_deinit(&kt->pfrkt_nomatch);
2016 
2017 	free(kt, M_PFTABLE);
2018 }
2019 
2020 static int
pfr_ktable_compare(struct pfr_ktable * p,struct pfr_ktable * q)2021 pfr_ktable_compare(struct pfr_ktable *p, struct pfr_ktable *q)
2022 {
2023 	int d;
2024 
2025 	if ((d = strncmp(p->pfrkt_name, q->pfrkt_name, PF_TABLE_NAME_SIZE)))
2026 		return (d);
2027 	return (strcmp(p->pfrkt_anchor, q->pfrkt_anchor));
2028 }
2029 
2030 static struct pfr_ktable *
pfr_lookup_table(struct pfr_table * tbl)2031 pfr_lookup_table(struct pfr_table *tbl)
2032 {
2033 	/* struct pfr_ktable start like a struct pfr_table */
2034 	return (RB_FIND(pfr_ktablehead, &V_pfr_ktables,
2035 	    (struct pfr_ktable *)tbl));
2036 }
2037 
2038 int
pfr_match_addr(struct pfr_ktable * kt,struct pf_addr * a,sa_family_t af)2039 pfr_match_addr(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af)
2040 {
2041 	struct pfr_kentry	*ke = NULL;
2042 	int			 match;
2043 
2044 	PF_RULES_RASSERT();
2045 
2046 	if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
2047 		kt = kt->pfrkt_root;
2048 	if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
2049 		return (0);
2050 
2051 	switch (af) {
2052 #ifdef INET
2053 	case AF_INET:
2054 	    {
2055 		struct sockaddr_in sin;
2056 
2057 		bzero(&sin, sizeof(sin));
2058 		sin.sin_len = sizeof(sin);
2059 		sin.sin_family = AF_INET;
2060 		sin.sin_addr.s_addr = a->addr32[0];
2061 		ke = (struct pfr_kentry *)rn_match(&sin, &kt->pfrkt_ip4->rh);
2062 		if (ke && KENTRY_RNF_ROOT(ke))
2063 			ke = NULL;
2064 		break;
2065 	    }
2066 #endif /* INET */
2067 #ifdef INET6
2068 	case AF_INET6:
2069 	    {
2070 		struct sockaddr_in6 sin6;
2071 
2072 		bzero(&sin6, sizeof(sin6));
2073 		sin6.sin6_len = sizeof(sin6);
2074 		sin6.sin6_family = AF_INET6;
2075 		bcopy(a, &sin6.sin6_addr, sizeof(sin6.sin6_addr));
2076 		ke = (struct pfr_kentry *)rn_match(&sin6, &kt->pfrkt_ip6->rh);
2077 		if (ke && KENTRY_RNF_ROOT(ke))
2078 			ke = NULL;
2079 		break;
2080 	    }
2081 #endif /* INET6 */
2082 	}
2083 	match = (ke && !ke->pfrke_not);
2084 	if (match)
2085 		pfr_kstate_counter_add(&kt->pfrkt_match, 1);
2086 	else
2087 		pfr_kstate_counter_add(&kt->pfrkt_nomatch, 1);
2088 	return (match);
2089 }
2090 
2091 void
pfr_update_stats(struct pfr_ktable * kt,struct pf_addr * a,sa_family_t af,u_int64_t len,int dir_out,int op_pass,int notrule)2092 pfr_update_stats(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af,
2093     u_int64_t len, int dir_out, int op_pass, int notrule)
2094 {
2095 	struct pfr_kentry	*ke = NULL;
2096 
2097 	if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
2098 		kt = kt->pfrkt_root;
2099 	if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
2100 		return;
2101 
2102 	switch (af) {
2103 #ifdef INET
2104 	case AF_INET:
2105 	    {
2106 		struct sockaddr_in sin;
2107 
2108 		bzero(&sin, sizeof(sin));
2109 		sin.sin_len = sizeof(sin);
2110 		sin.sin_family = AF_INET;
2111 		sin.sin_addr.s_addr = a->addr32[0];
2112 		ke = (struct pfr_kentry *)rn_match(&sin, &kt->pfrkt_ip4->rh);
2113 		if (ke && KENTRY_RNF_ROOT(ke))
2114 			ke = NULL;
2115 		break;
2116 	    }
2117 #endif /* INET */
2118 #ifdef INET6
2119 	case AF_INET6:
2120 	    {
2121 		struct sockaddr_in6 sin6;
2122 
2123 		bzero(&sin6, sizeof(sin6));
2124 		sin6.sin6_len = sizeof(sin6);
2125 		sin6.sin6_family = AF_INET6;
2126 		bcopy(a, &sin6.sin6_addr, sizeof(sin6.sin6_addr));
2127 		ke = (struct pfr_kentry *)rn_match(&sin6, &kt->pfrkt_ip6->rh);
2128 		if (ke && KENTRY_RNF_ROOT(ke))
2129 			ke = NULL;
2130 		break;
2131 	    }
2132 #endif /* INET6 */
2133 	default:
2134 		panic("%s: unknown address family %u", __func__, af);
2135 	}
2136 	if ((ke == NULL || ke->pfrke_not) != notrule) {
2137 		if (op_pass != PFR_OP_PASS)
2138 			DPFPRINTF(PF_DEBUG_URGENT,
2139 			    ("pfr_update_stats: assertion failed.\n"));
2140 		op_pass = PFR_OP_XPASS;
2141 	}
2142 	pfr_kstate_counter_add(&kt->pfrkt_packets[dir_out][op_pass], 1);
2143 	pfr_kstate_counter_add(&kt->pfrkt_bytes[dir_out][op_pass], len);
2144 	if (ke != NULL && op_pass != PFR_OP_XPASS &&
2145 	    (kt->pfrkt_flags & PFR_TFLAG_COUNTERS)) {
2146 		counter_u64_add(pfr_kentry_counter(&ke->pfrke_counters,
2147 		    dir_out, op_pass, PFR_TYPE_PACKETS), 1);
2148 		counter_u64_add(pfr_kentry_counter(&ke->pfrke_counters,
2149 		    dir_out, op_pass, PFR_TYPE_BYTES), len);
2150 	}
2151 }
2152 
2153 struct pfr_ktable *
pfr_eth_attach_table(struct pf_keth_ruleset * rs,char * name)2154 pfr_eth_attach_table(struct pf_keth_ruleset *rs, char *name)
2155 {
2156 	struct pfr_ktable	*kt, *rt;
2157 	struct pfr_table	 tbl;
2158 	struct pf_keth_anchor	*ac = rs->anchor;
2159 
2160 	PF_RULES_WASSERT();
2161 
2162 	bzero(&tbl, sizeof(tbl));
2163 	strlcpy(tbl.pfrt_name, name, sizeof(tbl.pfrt_name));
2164 	if (ac != NULL)
2165 		strlcpy(tbl.pfrt_anchor, ac->path, sizeof(tbl.pfrt_anchor));
2166 	kt = pfr_lookup_table(&tbl);
2167 	if (kt == NULL) {
2168 		kt = pfr_create_ktable(&tbl, time_second, 1);
2169 		if (kt == NULL)
2170 			return (NULL);
2171 		if (ac != NULL) {
2172 			bzero(tbl.pfrt_anchor, sizeof(tbl.pfrt_anchor));
2173 			rt = pfr_lookup_table(&tbl);
2174 			if (rt == NULL) {
2175 				rt = pfr_create_ktable(&tbl, 0, 1);
2176 				if (rt == NULL) {
2177 					pfr_destroy_ktable(kt, 0);
2178 					return (NULL);
2179 				}
2180 				pfr_insert_ktable(rt);
2181 			}
2182 			kt->pfrkt_root = rt;
2183 		}
2184 		pfr_insert_ktable(kt);
2185 	}
2186 	if (!kt->pfrkt_refcnt[PFR_REFCNT_RULE]++)
2187 		pfr_setflags_ktable(kt, kt->pfrkt_flags|PFR_TFLAG_REFERENCED);
2188 	return (kt);
2189 }
2190 
2191 struct pfr_ktable *
pfr_attach_table(struct pf_kruleset * rs,char * name)2192 pfr_attach_table(struct pf_kruleset *rs, char *name)
2193 {
2194 	struct pfr_ktable	*kt, *rt;
2195 	struct pfr_table	 tbl;
2196 	struct pf_kanchor	*ac = rs->anchor;
2197 
2198 	PF_RULES_WASSERT();
2199 
2200 	bzero(&tbl, sizeof(tbl));
2201 	strlcpy(tbl.pfrt_name, name, sizeof(tbl.pfrt_name));
2202 	if (ac != NULL)
2203 		strlcpy(tbl.pfrt_anchor, ac->path, sizeof(tbl.pfrt_anchor));
2204 	kt = pfr_lookup_table(&tbl);
2205 	if (kt == NULL) {
2206 		kt = pfr_create_ktable(&tbl, time_second, 1);
2207 		if (kt == NULL)
2208 			return (NULL);
2209 		if (ac != NULL) {
2210 			bzero(tbl.pfrt_anchor, sizeof(tbl.pfrt_anchor));
2211 			rt = pfr_lookup_table(&tbl);
2212 			if (rt == NULL) {
2213 				rt = pfr_create_ktable(&tbl, 0, 1);
2214 				if (rt == NULL) {
2215 					pfr_destroy_ktable(kt, 0);
2216 					return (NULL);
2217 				}
2218 				pfr_insert_ktable(rt);
2219 			}
2220 			kt->pfrkt_root = rt;
2221 		}
2222 		pfr_insert_ktable(kt);
2223 	}
2224 	if (!kt->pfrkt_refcnt[PFR_REFCNT_RULE]++)
2225 		pfr_setflags_ktable(kt, kt->pfrkt_flags|PFR_TFLAG_REFERENCED);
2226 	return (kt);
2227 }
2228 
2229 void
pfr_detach_table(struct pfr_ktable * kt)2230 pfr_detach_table(struct pfr_ktable *kt)
2231 {
2232 
2233 	PF_RULES_WASSERT();
2234 	KASSERT(kt->pfrkt_refcnt[PFR_REFCNT_RULE] > 0, ("%s: refcount %d\n",
2235 	    __func__, kt->pfrkt_refcnt[PFR_REFCNT_RULE]));
2236 
2237 	if (!--kt->pfrkt_refcnt[PFR_REFCNT_RULE])
2238 		pfr_setflags_ktable(kt, kt->pfrkt_flags&~PFR_TFLAG_REFERENCED);
2239 }
2240 
2241 int
pfr_pool_get(struct pfr_ktable * kt,int * pidx,struct pf_addr * counter,sa_family_t af,pf_addr_filter_func_t filter)2242 pfr_pool_get(struct pfr_ktable *kt, int *pidx, struct pf_addr *counter,
2243     sa_family_t af, pf_addr_filter_func_t filter)
2244 {
2245 	struct pf_addr		*addr, cur, mask, umask_addr;
2246 	union sockaddr_union	 uaddr, umask;
2247 	struct pfr_kentry	*ke, *ke2 = NULL;
2248 	int			 startidx, idx = -1, loop = 0, use_counter = 0;
2249 
2250 	MPASS(pidx != NULL);
2251 	MPASS(counter != NULL);
2252 
2253 	switch (af) {
2254 	case AF_INET:
2255 		uaddr.sin.sin_len = sizeof(struct sockaddr_in);
2256 		uaddr.sin.sin_family = AF_INET;
2257 		addr = (struct pf_addr *)&uaddr.sin.sin_addr;
2258 		break;
2259 	case AF_INET6:
2260 		uaddr.sin6.sin6_len = sizeof(struct sockaddr_in6);
2261 		uaddr.sin6.sin6_family = AF_INET6;
2262 		addr = (struct pf_addr *)&uaddr.sin6.sin6_addr;
2263 		break;
2264 	}
2265 
2266 	if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
2267 		kt = kt->pfrkt_root;
2268 	if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
2269 		return (-1);
2270 
2271 	idx = *pidx;
2272 	if (idx >= 0)
2273 		use_counter = 1;
2274 	if (idx < 0)
2275 		idx = 0;
2276 	startidx = idx;
2277 
2278 _next_block:
2279 	if (loop && startidx == idx) {
2280 		pfr_kstate_counter_add(&kt->pfrkt_nomatch, 1);
2281 		return (1);
2282 	}
2283 
2284 	ke = pfr_kentry_byidx(kt, idx, af);
2285 	if (ke == NULL) {
2286 		/* we don't have this idx, try looping */
2287 		if (loop || (ke = pfr_kentry_byidx(kt, 0, af)) == NULL) {
2288 			pfr_kstate_counter_add(&kt->pfrkt_nomatch, 1);
2289 			return (1);
2290 		}
2291 		idx = 0;
2292 		loop++;
2293 	}
2294 	pfr_prepare_network(&umask, af, ke->pfrke_net);
2295 	pfr_sockaddr_to_pf_addr(&ke->pfrke_sa, &cur);
2296 	pfr_sockaddr_to_pf_addr(&umask, &mask);
2297 
2298 	if (use_counter && !PF_AZERO(counter, af)) {
2299 		/* is supplied address within block? */
2300 		if (!PF_MATCHA(0, &cur, &mask, counter, af)) {
2301 			/* no, go to next block in table */
2302 			idx++;
2303 			use_counter = 0;
2304 			goto _next_block;
2305 		}
2306 		PF_ACPY(addr, counter, af);
2307 	} else {
2308 		/* use first address of block */
2309 		PF_ACPY(addr, &cur, af);
2310 	}
2311 
2312 	if (!KENTRY_NETWORK(ke)) {
2313 		/* this is a single IP address - no possible nested block */
2314 		if (filter && filter(af, addr)) {
2315 			idx++;
2316 			goto _next_block;
2317 		}
2318 		PF_ACPY(counter, addr, af);
2319 		*pidx = idx;
2320 		pfr_kstate_counter_add(&kt->pfrkt_match, 1);
2321 		return (0);
2322 	}
2323 	for (;;) {
2324 		/* we don't want to use a nested block */
2325 		switch (af) {
2326 		case AF_INET:
2327 			ke2 = (struct pfr_kentry *)rn_match(&uaddr,
2328 			    &kt->pfrkt_ip4->rh);
2329 			break;
2330 		case AF_INET6:
2331 			ke2 = (struct pfr_kentry *)rn_match(&uaddr,
2332 			    &kt->pfrkt_ip6->rh);
2333 			break;
2334 		}
2335 		/* no need to check KENTRY_RNF_ROOT() here */
2336 		if (ke2 == ke) {
2337 			/* lookup return the same block - perfect */
2338 			if (filter && filter(af, addr))
2339 				goto _next_entry;
2340 			PF_ACPY(counter, addr, af);
2341 			*pidx = idx;
2342 			pfr_kstate_counter_add(&kt->pfrkt_match, 1);
2343 			return (0);
2344 		}
2345 
2346 _next_entry:
2347 		/* we need to increase the counter past the nested block */
2348 		pfr_prepare_network(&umask, AF_INET, ke2->pfrke_net);
2349 		pfr_sockaddr_to_pf_addr(&umask, &umask_addr);
2350 		PF_POOLMASK(addr, addr, &umask_addr, &pfr_ffaddr, af);
2351 		PF_AINC(addr, af);
2352 		if (!PF_MATCHA(0, &cur, &mask, addr, af)) {
2353 			/* ok, we reached the end of our main block */
2354 			/* go to next block in table */
2355 			idx++;
2356 			use_counter = 0;
2357 			goto _next_block;
2358 		}
2359 	}
2360 }
2361 
2362 static struct pfr_kentry *
pfr_kentry_byidx(struct pfr_ktable * kt,int idx,int af)2363 pfr_kentry_byidx(struct pfr_ktable *kt, int idx, int af)
2364 {
2365 	struct pfr_walktree	w;
2366 
2367 	bzero(&w, sizeof(w));
2368 	w.pfrw_op = PFRW_POOL_GET;
2369 	w.pfrw_free = idx;
2370 
2371 	switch (af) {
2372 #ifdef INET
2373 	case AF_INET:
2374 		kt->pfrkt_ip4->rnh_walktree(&kt->pfrkt_ip4->rh, pfr_walktree, &w);
2375 		return (w.pfrw_kentry);
2376 #endif /* INET */
2377 #ifdef INET6
2378 	case AF_INET6:
2379 		kt->pfrkt_ip6->rnh_walktree(&kt->pfrkt_ip6->rh, pfr_walktree, &w);
2380 		return (w.pfrw_kentry);
2381 #endif /* INET6 */
2382 	default:
2383 		return (NULL);
2384 	}
2385 }
2386 
2387 void
pfr_dynaddr_update(struct pfr_ktable * kt,struct pfi_dynaddr * dyn)2388 pfr_dynaddr_update(struct pfr_ktable *kt, struct pfi_dynaddr *dyn)
2389 {
2390 	struct pfr_walktree	w;
2391 
2392 	bzero(&w, sizeof(w));
2393 	w.pfrw_op = PFRW_DYNADDR_UPDATE;
2394 	w.pfrw_dyn = dyn;
2395 
2396 	dyn->pfid_acnt4 = 0;
2397 	dyn->pfid_acnt6 = 0;
2398 	if (!dyn->pfid_af || dyn->pfid_af == AF_INET)
2399 		kt->pfrkt_ip4->rnh_walktree(&kt->pfrkt_ip4->rh, pfr_walktree, &w);
2400 	if (!dyn->pfid_af || dyn->pfid_af == AF_INET6)
2401 		kt->pfrkt_ip6->rnh_walktree(&kt->pfrkt_ip6->rh, pfr_walktree, &w);
2402 }
2403