xref: /freebsd/sys/netpfil/pf/pf_table.c (revision 4616481212302b5d875cfc7a00766af017318f7f)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2002 Cedric Berger
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  *    - Redistributions of source code must retain the above copyright
12  *      notice, this list of conditions and the following disclaimer.
13  *    - Redistributions in binary form must reproduce the above
14  *      copyright notice, this list of conditions and the following
15  *      disclaimer in the documentation and/or other materials provided
16  *      with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
21  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
22  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
23  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
24  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
25  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
28  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  *
31  *	$OpenBSD: pf_table.c,v 1.79 2008/10/08 06:24:50 mcbride Exp $
32  */
33 
34 #include <sys/cdefs.h>
35 #include "opt_inet.h"
36 #include "opt_inet6.h"
37 
38 #include <sys/param.h>
39 #include <sys/kernel.h>
40 #include <sys/lock.h>
41 #include <sys/malloc.h>
42 #include <sys/mbuf.h>
43 #include <sys/mutex.h>
44 #include <sys/refcount.h>
45 #include <sys/socket.h>
46 #include <vm/uma.h>
47 
48 #include <net/if.h>
49 #include <net/vnet.h>
50 #include <net/pfvar.h>
51 
52 #define	ACCEPT_FLAGS(flags, oklist)		\
53 	do {					\
54 		if ((flags & ~(oklist)) &	\
55 		    PFR_FLAG_ALLMASK)		\
56 			return (EINVAL);	\
57 	} while (0)
58 
59 #define	FILLIN_SIN(sin, addr)			\
60 	do {					\
61 		(sin).sin_len = sizeof(sin);	\
62 		(sin).sin_family = AF_INET;	\
63 		(sin).sin_addr = (addr);	\
64 	} while (0)
65 
66 #define	FILLIN_SIN6(sin6, addr)			\
67 	do {					\
68 		(sin6).sin6_len = sizeof(sin6);	\
69 		(sin6).sin6_family = AF_INET6;	\
70 		(sin6).sin6_addr = (addr);	\
71 	} while (0)
72 
73 #define	SWAP(type, a1, a2)			\
74 	do {					\
75 		type tmp = a1;			\
76 		a1 = a2;			\
77 		a2 = tmp;			\
78 	} while (0)
79 
80 #define	AF_BITS(af)		(((af)==AF_INET)?32:128)
81 #define	ADDR_NETWORK(ad)	((ad)->pfra_net < AF_BITS((ad)->pfra_af))
82 #define	KENTRY_NETWORK(ke)	((ke)->pfrke_net < AF_BITS((ke)->pfrke_af))
83 #define	KENTRY_RNF_ROOT(ke) \
84 		((((struct radix_node *)(ke))->rn_flags & RNF_ROOT) != 0)
85 
86 #define	NO_ADDRESSES		(-1)
87 #define	ENQUEUE_UNMARKED_ONLY	(1)
88 #define	INVERT_NEG_FLAG		(1)
89 
90 struct pfr_walktree {
91 	enum pfrw_op {
92 		PFRW_MARK,
93 		PFRW_SWEEP,
94 		PFRW_ENQUEUE,
95 		PFRW_GET_ADDRS,
96 		PFRW_GET_ASTATS,
97 		PFRW_POOL_GET,
98 		PFRW_DYNADDR_UPDATE,
99 		PFRW_COUNTERS
100 	}	 pfrw_op;
101 	union {
102 		struct pfr_addr		*pfrw_addr;
103 		struct pfr_astats	*pfrw_astats;
104 		struct pfr_kentryworkq	*pfrw_workq;
105 		struct pfr_kentry	*pfrw_kentry;
106 		struct pfi_dynaddr	*pfrw_dyn;
107 	};
108 	int	 pfrw_free;
109 	int	 pfrw_flags;
110 };
111 
112 #define	senderr(e)	do { rv = (e); goto _bad; } while (0)
113 
114 static MALLOC_DEFINE(M_PFTABLE, "pf_table", "pf(4) tables structures");
115 VNET_DEFINE_STATIC(uma_zone_t, pfr_kentry_z);
116 #define	V_pfr_kentry_z		VNET(pfr_kentry_z)
117 VNET_DEFINE_STATIC(uma_zone_t, pfr_kentry_counter_z);
118 #define	V_pfr_kentry_counter_z	VNET(pfr_kentry_counter_z)
119 
120 static struct pf_addr	 pfr_ffaddr = {
121 	.addr32 = { 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff }
122 };
123 
124 static void		 pfr_copyout_astats(struct pfr_astats *,
125 			    const struct pfr_kentry *,
126 			    const struct pfr_walktree *);
127 static void		 pfr_copyout_addr(struct pfr_addr *,
128 			    const struct pfr_kentry *ke);
129 static int		 pfr_validate_addr(struct pfr_addr *);
130 static void		 pfr_enqueue_addrs(struct pfr_ktable *,
131 			    struct pfr_kentryworkq *, int *, int);
132 static void		 pfr_mark_addrs(struct pfr_ktable *);
133 static struct pfr_kentry
134 			*pfr_lookup_addr(struct pfr_ktable *,
135 			    struct pfr_addr *, int);
136 static struct pfr_kentry *pfr_create_kentry(struct pfr_addr *, bool);
137 static void		 pfr_destroy_kentries(struct pfr_kentryworkq *);
138 static void		 pfr_destroy_kentry(struct pfr_kentry *);
139 static void		 pfr_insert_kentries(struct pfr_ktable *,
140 			    struct pfr_kentryworkq *, time_t);
141 static void		 pfr_remove_kentries(struct pfr_ktable *,
142 			    struct pfr_kentryworkq *);
143 static void		 pfr_clstats_kentries(struct pfr_ktable *,
144 			    struct pfr_kentryworkq *, time_t, int);
145 static void		 pfr_reset_feedback(struct pfr_addr *, int);
146 static void		 pfr_prepare_network(union sockaddr_union *, int, int);
147 static int		 pfr_route_kentry(struct pfr_ktable *,
148 			    struct pfr_kentry *);
149 static int		 pfr_unroute_kentry(struct pfr_ktable *,
150 			    struct pfr_kentry *);
151 static int		 pfr_walktree(struct radix_node *, void *);
152 static int		 pfr_validate_table(struct pfr_table *, int, int);
153 static int		 pfr_fix_anchor(char *);
154 static void		 pfr_commit_ktable(struct pfr_ktable *, time_t);
155 static void		 pfr_insert_ktables(struct pfr_ktableworkq *);
156 static void		 pfr_insert_ktable(struct pfr_ktable *);
157 static void		 pfr_setflags_ktables(struct pfr_ktableworkq *);
158 static void		 pfr_setflags_ktable(struct pfr_ktable *, int);
159 static void		 pfr_clstats_ktables(struct pfr_ktableworkq *, time_t,
160 			    int);
161 static void		 pfr_clstats_ktable(struct pfr_ktable *, time_t, int);
162 static struct pfr_ktable
163 			*pfr_create_ktable(struct pfr_table *, time_t, int);
164 static void		 pfr_destroy_ktables(struct pfr_ktableworkq *, int);
165 static void		 pfr_destroy_ktable(struct pfr_ktable *, int);
166 static int		 pfr_ktable_compare(struct pfr_ktable *,
167 			    struct pfr_ktable *);
168 static struct pfr_ktable
169 			*pfr_lookup_table(struct pfr_table *);
170 static void		 pfr_clean_node_mask(struct pfr_ktable *,
171 			    struct pfr_kentryworkq *);
172 static int		 pfr_skip_table(struct pfr_table *,
173 			    struct pfr_ktable *, int);
174 static struct pfr_kentry
175 			*pfr_kentry_byidx(struct pfr_ktable *, int, int);
176 
177 static RB_PROTOTYPE(pfr_ktablehead, pfr_ktable, pfrkt_tree, pfr_ktable_compare);
178 static RB_GENERATE(pfr_ktablehead, pfr_ktable, pfrkt_tree, pfr_ktable_compare);
179 
180 VNET_DEFINE_STATIC(struct pfr_ktablehead, pfr_ktables);
181 #define	V_pfr_ktables	VNET(pfr_ktables)
182 
183 VNET_DEFINE_STATIC(struct pfr_table, pfr_nulltable);
184 #define	V_pfr_nulltable	VNET(pfr_nulltable)
185 
186 VNET_DEFINE_STATIC(int, pfr_ktable_cnt);
187 #define V_pfr_ktable_cnt	VNET(pfr_ktable_cnt)
188 
189 void
pfr_initialize(void)190 pfr_initialize(void)
191 {
192 
193 	V_pfr_kentry_counter_z = uma_zcreate("pf table entry counters",
194 	    PFR_NUM_COUNTERS * sizeof(uint64_t), NULL, NULL, NULL, NULL,
195 	    UMA_ALIGN_PTR, UMA_ZONE_PCPU);
196 	V_pfr_kentry_z = uma_zcreate("pf table entries",
197 	    sizeof(struct pfr_kentry), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
198 	    0);
199 	uma_zone_set_max(V_pfr_kentry_z, PFR_KENTRY_HIWAT);
200 	V_pf_limits[PF_LIMIT_TABLE_ENTRIES].zone = V_pfr_kentry_z;
201 	V_pf_limits[PF_LIMIT_TABLE_ENTRIES].limit = PFR_KENTRY_HIWAT;
202 }
203 
204 void
pfr_cleanup(void)205 pfr_cleanup(void)
206 {
207 
208 	uma_zdestroy(V_pfr_kentry_z);
209 	uma_zdestroy(V_pfr_kentry_counter_z);
210 }
211 
212 int
pfr_clr_addrs(struct pfr_table * tbl,int * ndel,int flags)213 pfr_clr_addrs(struct pfr_table *tbl, int *ndel, int flags)
214 {
215 	struct pfr_ktable	*kt;
216 	struct pfr_kentryworkq	 workq;
217 
218 	PF_RULES_WASSERT();
219 
220 	ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
221 	if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL))
222 		return (EINVAL);
223 	kt = pfr_lookup_table(tbl);
224 	if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
225 		return (ESRCH);
226 	if (kt->pfrkt_flags & PFR_TFLAG_CONST)
227 		return (EPERM);
228 	pfr_enqueue_addrs(kt, &workq, ndel, 0);
229 
230 	if (!(flags & PFR_FLAG_DUMMY)) {
231 		pfr_remove_kentries(kt, &workq);
232 		KASSERT(kt->pfrkt_cnt == 0, ("%s: non-null pfrkt_cnt", __func__));
233 	}
234 	return (0);
235 }
236 
237 int
pfr_add_addrs(struct pfr_table * tbl,struct pfr_addr * addr,int size,int * nadd,int flags)238 pfr_add_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
239     int *nadd, int flags)
240 {
241 	struct pfr_ktable	*kt, *tmpkt;
242 	struct pfr_kentryworkq	 workq;
243 	struct pfr_kentry	*p, *q;
244 	struct pfr_addr		*ad;
245 	int			 i, rv, xadd = 0;
246 	time_t			 tzero = time_second;
247 
248 	PF_RULES_WASSERT();
249 
250 	ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_FEEDBACK);
251 	if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL))
252 		return (EINVAL);
253 	kt = pfr_lookup_table(tbl);
254 	if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
255 		return (ESRCH);
256 	if (kt->pfrkt_flags & PFR_TFLAG_CONST)
257 		return (EPERM);
258 	tmpkt = pfr_create_ktable(&V_pfr_nulltable, 0, 0);
259 	if (tmpkt == NULL)
260 		return (ENOMEM);
261 	SLIST_INIT(&workq);
262 	for (i = 0, ad = addr; i < size; i++, ad++) {
263 		if (pfr_validate_addr(ad))
264 			senderr(EINVAL);
265 		p = pfr_lookup_addr(kt, ad, 1);
266 		q = pfr_lookup_addr(tmpkt, ad, 1);
267 		if (flags & PFR_FLAG_FEEDBACK) {
268 			if (q != NULL)
269 				ad->pfra_fback = PFR_FB_DUPLICATE;
270 			else if (p == NULL)
271 				ad->pfra_fback = PFR_FB_ADDED;
272 			else if (p->pfrke_not != ad->pfra_not)
273 				ad->pfra_fback = PFR_FB_CONFLICT;
274 			else
275 				ad->pfra_fback = PFR_FB_NONE;
276 		}
277 		if (p == NULL && q == NULL) {
278 			p = pfr_create_kentry(ad,
279 			    (kt->pfrkt_flags & PFR_TFLAG_COUNTERS) != 0);
280 			if (p == NULL)
281 				senderr(ENOMEM);
282 			if (pfr_route_kentry(tmpkt, p)) {
283 				pfr_destroy_kentry(p);
284 				ad->pfra_fback = PFR_FB_NONE;
285 			} else {
286 				SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
287 				xadd++;
288 			}
289 		}
290 	}
291 	pfr_clean_node_mask(tmpkt, &workq);
292 	if (!(flags & PFR_FLAG_DUMMY))
293 		pfr_insert_kentries(kt, &workq, tzero);
294 	else
295 		pfr_destroy_kentries(&workq);
296 	if (nadd != NULL)
297 		*nadd += xadd;
298 	pfr_destroy_ktable(tmpkt, 0);
299 	return (0);
300 _bad:
301 	pfr_clean_node_mask(tmpkt, &workq);
302 	pfr_destroy_kentries(&workq);
303 	if (flags & PFR_FLAG_FEEDBACK)
304 		pfr_reset_feedback(addr, size);
305 	pfr_destroy_ktable(tmpkt, 0);
306 	return (rv);
307 }
308 
309 int
pfr_del_addrs(struct pfr_table * tbl,struct pfr_addr * addr,int size,int * ndel,int flags)310 pfr_del_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
311     int *ndel, int flags)
312 {
313 	struct pfr_ktable	*kt;
314 	struct pfr_kentryworkq	 workq;
315 	struct pfr_kentry	*p;
316 	struct pfr_addr		*ad;
317 	int			 i, rv, xdel = 0, log = 1;
318 
319 	PF_RULES_WASSERT();
320 
321 	ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_FEEDBACK);
322 	if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL))
323 		return (EINVAL);
324 	kt = pfr_lookup_table(tbl);
325 	if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
326 		return (ESRCH);
327 	if (kt->pfrkt_flags & PFR_TFLAG_CONST)
328 		return (EPERM);
329 	/*
330 	 * there are two algorithms to choose from here.
331 	 * with:
332 	 *   n: number of addresses to delete
333 	 *   N: number of addresses in the table
334 	 *
335 	 * one is O(N) and is better for large 'n'
336 	 * one is O(n*LOG(N)) and is better for small 'n'
337 	 *
338 	 * following code try to decide which one is best.
339 	 */
340 	for (i = kt->pfrkt_cnt; i > 0; i >>= 1)
341 		log++;
342 	if (size > kt->pfrkt_cnt/log) {
343 		/* full table scan */
344 		pfr_mark_addrs(kt);
345 	} else {
346 		/* iterate over addresses to delete */
347 		for (i = 0, ad = addr; i < size; i++, ad++) {
348 			if (pfr_validate_addr(ad))
349 				return (EINVAL);
350 			p = pfr_lookup_addr(kt, ad, 1);
351 			if (p != NULL)
352 				p->pfrke_mark = 0;
353 		}
354 	}
355 	SLIST_INIT(&workq);
356 	for (i = 0, ad = addr; i < size; i++, ad++) {
357 		if (pfr_validate_addr(ad))
358 			senderr(EINVAL);
359 		p = pfr_lookup_addr(kt, ad, 1);
360 		if (flags & PFR_FLAG_FEEDBACK) {
361 			if (p == NULL)
362 				ad->pfra_fback = PFR_FB_NONE;
363 			else if (p->pfrke_not != ad->pfra_not)
364 				ad->pfra_fback = PFR_FB_CONFLICT;
365 			else if (p->pfrke_mark)
366 				ad->pfra_fback = PFR_FB_DUPLICATE;
367 			else
368 				ad->pfra_fback = PFR_FB_DELETED;
369 		}
370 		if (p != NULL && p->pfrke_not == ad->pfra_not &&
371 		    !p->pfrke_mark) {
372 			p->pfrke_mark = 1;
373 			SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
374 			xdel++;
375 		}
376 	}
377 	if (!(flags & PFR_FLAG_DUMMY))
378 		pfr_remove_kentries(kt, &workq);
379 	if (ndel != NULL)
380 		*ndel = xdel;
381 	return (0);
382 _bad:
383 	if (flags & PFR_FLAG_FEEDBACK)
384 		pfr_reset_feedback(addr, size);
385 	return (rv);
386 }
387 
388 int
pfr_set_addrs(struct pfr_table * tbl,struct pfr_addr * addr,int size,int * size2,int * nadd,int * ndel,int * nchange,int flags,u_int32_t ignore_pfrt_flags)389 pfr_set_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
390     int *size2, int *nadd, int *ndel, int *nchange, int flags,
391     u_int32_t ignore_pfrt_flags)
392 {
393 	struct pfr_ktable	*kt, *tmpkt;
394 	struct pfr_kentryworkq	 addq, delq, changeq;
395 	struct pfr_kentry	*p, *q;
396 	struct pfr_addr		 ad;
397 	int			 i, rv, xadd = 0, xdel = 0, xchange = 0;
398 	time_t			 tzero = time_second;
399 
400 	PF_RULES_WASSERT();
401 
402 	ACCEPT_FLAGS(flags, PFR_FLAG_START | PFR_FLAG_DONE |
403 	    PFR_FLAG_DUMMY | PFR_FLAG_FEEDBACK);
404 	if (pfr_validate_table(tbl, ignore_pfrt_flags, flags &
405 	    PFR_FLAG_USERIOCTL))
406 		return (EINVAL);
407 	kt = pfr_lookup_table(tbl);
408 	if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
409 		return (ESRCH);
410 	if (kt->pfrkt_flags & PFR_TFLAG_CONST)
411 		return (EPERM);
412 	tmpkt = pfr_create_ktable(&V_pfr_nulltable, 0, 0);
413 	if (tmpkt == NULL)
414 		return (ENOMEM);
415 	if (flags & PFR_FLAG_START)
416 		pfr_mark_addrs(kt);
417 	SLIST_INIT(&addq);
418 	SLIST_INIT(&delq);
419 	SLIST_INIT(&changeq);
420 	for (i = 0; i < size; i++) {
421 		/*
422 		 * XXXGL: undertand pf_if usage of this function
423 		 * and make ad a moving pointer
424 		 */
425 		bcopy(addr + i, &ad, sizeof(ad));
426 		if (pfr_validate_addr(&ad))
427 			senderr(EINVAL);
428 		ad.pfra_fback = PFR_FB_NONE;
429 		p = pfr_lookup_addr(kt, &ad, 1);
430 		if (p != NULL) {
431 			if (p->pfrke_mark) {
432 				ad.pfra_fback = PFR_FB_DUPLICATE;
433 				goto _skip;
434 			}
435 			p->pfrke_mark = 1;
436 			if (p->pfrke_not != ad.pfra_not) {
437 				SLIST_INSERT_HEAD(&changeq, p, pfrke_workq);
438 				ad.pfra_fback = PFR_FB_CHANGED;
439 				xchange++;
440 			}
441 		} else {
442 			q = pfr_lookup_addr(tmpkt, &ad, 1);
443 			if (q != NULL) {
444 				ad.pfra_fback = PFR_FB_DUPLICATE;
445 				goto _skip;
446 			}
447 			p = pfr_create_kentry(&ad,
448 			    (kt->pfrkt_flags & PFR_TFLAG_COUNTERS) != 0);
449 			p->pfrke_mark = PFR_FB_ADDED;
450 			if (p == NULL)
451 				senderr(ENOMEM);
452 			if (pfr_route_kentry(tmpkt, p)) {
453 				pfr_destroy_kentry(p);
454 				ad.pfra_fback = PFR_FB_NONE;
455 			} else {
456 				SLIST_INSERT_HEAD(&addq, p, pfrke_workq);
457 				ad.pfra_fback = PFR_FB_ADDED;
458 				xadd++;
459 			}
460 		}
461 _skip:
462 		if (flags & PFR_FLAG_FEEDBACK)
463 			bcopy(&ad, addr + i, sizeof(ad));
464 	}
465 	if (flags & PFR_FLAG_DONE)
466 		pfr_enqueue_addrs(kt, &delq, &xdel, ENQUEUE_UNMARKED_ONLY);
467 	if ((flags & PFR_FLAG_FEEDBACK) && *size2) {
468 		if (*size2 < size+xdel) {
469 			*size2 = size+xdel;
470 			senderr(0);
471 		}
472 		i = 0;
473 		SLIST_FOREACH(p, &delq, pfrke_workq) {
474 			pfr_copyout_addr(&ad, p);
475 			ad.pfra_fback = PFR_FB_DELETED;
476 			bcopy(&ad, addr + size + i, sizeof(ad));
477 			i++;
478 		}
479 	}
480 	pfr_clean_node_mask(tmpkt, &addq);
481 	if (!(flags & PFR_FLAG_DUMMY)) {
482 		pfr_insert_kentries(kt, &addq, tzero);
483 		pfr_remove_kentries(kt, &delq);
484 		pfr_clstats_kentries(kt, &changeq, tzero, INVERT_NEG_FLAG);
485 	} else
486 		pfr_destroy_kentries(&addq);
487 	if (nadd != NULL)
488 		*nadd = xadd;
489 	if (ndel != NULL)
490 		*ndel = xdel;
491 	if (nchange != NULL)
492 		*nchange = xchange;
493 	if ((flags & PFR_FLAG_FEEDBACK) && size2)
494 		*size2 = size+xdel;
495 	pfr_destroy_ktable(tmpkt, 0);
496 	return (0);
497 _bad:
498 	pfr_clean_node_mask(tmpkt, &addq);
499 	pfr_destroy_kentries(&addq);
500 	if (flags & PFR_FLAG_FEEDBACK)
501 		pfr_reset_feedback(addr, size);
502 	pfr_destroy_ktable(tmpkt, 0);
503 	return (rv);
504 }
505 
506 int
pfr_tst_addrs(struct pfr_table * tbl,struct pfr_addr * addr,int size,int * nmatch,int flags)507 pfr_tst_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
508 	int *nmatch, int flags)
509 {
510 	struct pfr_ktable	*kt;
511 	struct pfr_kentry	*p;
512 	struct pfr_addr		*ad;
513 	int			 i, xmatch = 0;
514 
515 	PF_RULES_RASSERT();
516 
517 	ACCEPT_FLAGS(flags, PFR_FLAG_REPLACE);
518 	if (pfr_validate_table(tbl, 0, 0))
519 		return (EINVAL);
520 	kt = pfr_lookup_table(tbl);
521 	if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
522 		return (ESRCH);
523 
524 	for (i = 0, ad = addr; i < size; i++, ad++) {
525 		if (pfr_validate_addr(ad))
526 			return (EINVAL);
527 		if (ADDR_NETWORK(ad))
528 			return (EINVAL);
529 		p = pfr_lookup_addr(kt, ad, 0);
530 		if (flags & PFR_FLAG_REPLACE)
531 			pfr_copyout_addr(ad, p);
532 		ad->pfra_fback = (p == NULL) ? PFR_FB_NONE :
533 		    (p->pfrke_not ? PFR_FB_NOTMATCH : PFR_FB_MATCH);
534 		if (p != NULL && !p->pfrke_not)
535 			xmatch++;
536 	}
537 	if (nmatch != NULL)
538 		*nmatch = xmatch;
539 	return (0);
540 }
541 
542 int
pfr_get_addrs(struct pfr_table * tbl,struct pfr_addr * addr,int * size,int flags)543 pfr_get_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int *size,
544 	int flags)
545 {
546 	struct pfr_ktable	*kt;
547 	struct pfr_walktree	 w;
548 	int			 rv;
549 
550 	PF_RULES_RASSERT();
551 
552 	ACCEPT_FLAGS(flags, 0);
553 	if (pfr_validate_table(tbl, 0, 0))
554 		return (EINVAL);
555 	kt = pfr_lookup_table(tbl);
556 	if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
557 		return (ESRCH);
558 	if (kt->pfrkt_cnt > *size) {
559 		*size = kt->pfrkt_cnt;
560 		return (0);
561 	}
562 
563 	bzero(&w, sizeof(w));
564 	w.pfrw_op = PFRW_GET_ADDRS;
565 	w.pfrw_addr = addr;
566 	w.pfrw_free = kt->pfrkt_cnt;
567 	rv = kt->pfrkt_ip4->rnh_walktree(&kt->pfrkt_ip4->rh, pfr_walktree, &w);
568 	if (!rv)
569 		rv = kt->pfrkt_ip6->rnh_walktree(&kt->pfrkt_ip6->rh,
570 		    pfr_walktree, &w);
571 	if (rv)
572 		return (rv);
573 
574 	KASSERT(w.pfrw_free == 0, ("%s: corruption detected (%d)", __func__,
575 	    w.pfrw_free));
576 
577 	*size = kt->pfrkt_cnt;
578 	return (0);
579 }
580 
581 int
pfr_get_astats(struct pfr_table * tbl,struct pfr_astats * addr,int * size,int flags)582 pfr_get_astats(struct pfr_table *tbl, struct pfr_astats *addr, int *size,
583 	int flags)
584 {
585 	struct pfr_ktable	*kt;
586 	struct pfr_walktree	 w;
587 	struct pfr_kentryworkq	 workq;
588 	int			 rv;
589 	time_t			 tzero = time_second;
590 
591 	PF_RULES_RASSERT();
592 
593 	/* XXX PFR_FLAG_CLSTATS disabled */
594 	ACCEPT_FLAGS(flags, 0);
595 	if (pfr_validate_table(tbl, 0, 0))
596 		return (EINVAL);
597 	kt = pfr_lookup_table(tbl);
598 	if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
599 		return (ESRCH);
600 	if (kt->pfrkt_cnt > *size) {
601 		*size = kt->pfrkt_cnt;
602 		return (0);
603 	}
604 
605 	bzero(&w, sizeof(w));
606 	w.pfrw_op = PFRW_GET_ASTATS;
607 	w.pfrw_astats = addr;
608 	w.pfrw_free = kt->pfrkt_cnt;
609 	/*
610 	 * Flags below are for backward compatibility. It was possible to have
611 	 * a table without per-entry counters. Now they are always allocated,
612 	 * we just discard data when reading it if table is not configured to
613 	 * have counters.
614 	 */
615 	w.pfrw_flags = kt->pfrkt_flags;
616 	rv = kt->pfrkt_ip4->rnh_walktree(&kt->pfrkt_ip4->rh, pfr_walktree, &w);
617 	if (!rv)
618 		rv = kt->pfrkt_ip6->rnh_walktree(&kt->pfrkt_ip6->rh,
619 		    pfr_walktree, &w);
620 	if (!rv && (flags & PFR_FLAG_CLSTATS)) {
621 		pfr_enqueue_addrs(kt, &workq, NULL, 0);
622 		pfr_clstats_kentries(kt, &workq, tzero, 0);
623 	}
624 	if (rv)
625 		return (rv);
626 
627 	if (w.pfrw_free) {
628 		printf("pfr_get_astats: corruption detected (%d).\n",
629 		    w.pfrw_free);
630 		return (ENOTTY);
631 	}
632 	*size = kt->pfrkt_cnt;
633 	return (0);
634 }
635 
636 int
pfr_clr_astats(struct pfr_table * tbl,struct pfr_addr * addr,int size,int * nzero,int flags)637 pfr_clr_astats(struct pfr_table *tbl, struct pfr_addr *addr, int size,
638     int *nzero, int flags)
639 {
640 	struct pfr_ktable	*kt;
641 	struct pfr_kentryworkq	 workq;
642 	struct pfr_kentry	*p;
643 	struct pfr_addr		*ad;
644 	int			 i, rv, xzero = 0;
645 
646 	PF_RULES_WASSERT();
647 
648 	ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_FEEDBACK);
649 	if (pfr_validate_table(tbl, 0, 0))
650 		return (EINVAL);
651 	kt = pfr_lookup_table(tbl);
652 	if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
653 		return (ESRCH);
654 	SLIST_INIT(&workq);
655 	for (i = 0, ad = addr; i < size; i++, ad++) {
656 		if (pfr_validate_addr(ad))
657 			senderr(EINVAL);
658 		p = pfr_lookup_addr(kt, ad, 1);
659 		if (flags & PFR_FLAG_FEEDBACK) {
660 			ad->pfra_fback = (p != NULL) ?
661 			    PFR_FB_CLEARED : PFR_FB_NONE;
662 		}
663 		if (p != NULL) {
664 			SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
665 			xzero++;
666 		}
667 	}
668 
669 	if (!(flags & PFR_FLAG_DUMMY))
670 		pfr_clstats_kentries(kt, &workq, time_second, 0);
671 	if (nzero != NULL)
672 		*nzero = xzero;
673 	return (0);
674 _bad:
675 	if (flags & PFR_FLAG_FEEDBACK)
676 		pfr_reset_feedback(addr, size);
677 	return (rv);
678 }
679 
680 static int
pfr_validate_addr(struct pfr_addr * ad)681 pfr_validate_addr(struct pfr_addr *ad)
682 {
683 	int i;
684 
685 	switch (ad->pfra_af) {
686 #ifdef INET
687 	case AF_INET:
688 		if (ad->pfra_net > 32)
689 			return (-1);
690 		break;
691 #endif /* INET */
692 #ifdef INET6
693 	case AF_INET6:
694 		if (ad->pfra_net > 128)
695 			return (-1);
696 		break;
697 #endif /* INET6 */
698 	default:
699 		return (-1);
700 	}
701 	if (ad->pfra_net < 128 &&
702 		(((caddr_t)ad)[ad->pfra_net/8] & (0xFF >> (ad->pfra_net%8))))
703 			return (-1);
704 	for (i = (ad->pfra_net+7)/8; i < sizeof(ad->pfra_u); i++)
705 		if (((caddr_t)ad)[i])
706 			return (-1);
707 	if (ad->pfra_not && ad->pfra_not != 1)
708 		return (-1);
709 	if (ad->pfra_fback != PFR_FB_NONE)
710 		return (-1);
711 	return (0);
712 }
713 
714 static void
pfr_enqueue_addrs(struct pfr_ktable * kt,struct pfr_kentryworkq * workq,int * naddr,int sweep)715 pfr_enqueue_addrs(struct pfr_ktable *kt, struct pfr_kentryworkq *workq,
716 	int *naddr, int sweep)
717 {
718 	struct pfr_walktree	w;
719 
720 	SLIST_INIT(workq);
721 	bzero(&w, sizeof(w));
722 	w.pfrw_op = sweep ? PFRW_SWEEP : PFRW_ENQUEUE;
723 	w.pfrw_workq = workq;
724 	if (kt->pfrkt_ip4 != NULL)
725 		if (kt->pfrkt_ip4->rnh_walktree(&kt->pfrkt_ip4->rh,
726 		    pfr_walktree, &w))
727 			printf("pfr_enqueue_addrs: IPv4 walktree failed.\n");
728 	if (kt->pfrkt_ip6 != NULL)
729 		if (kt->pfrkt_ip6->rnh_walktree(&kt->pfrkt_ip6->rh,
730 		    pfr_walktree, &w))
731 			printf("pfr_enqueue_addrs: IPv6 walktree failed.\n");
732 	if (naddr != NULL)
733 		*naddr = w.pfrw_free;
734 }
735 
736 static void
pfr_mark_addrs(struct pfr_ktable * kt)737 pfr_mark_addrs(struct pfr_ktable *kt)
738 {
739 	struct pfr_walktree	w;
740 
741 	bzero(&w, sizeof(w));
742 	w.pfrw_op = PFRW_MARK;
743 	if (kt->pfrkt_ip4->rnh_walktree(&kt->pfrkt_ip4->rh, pfr_walktree, &w))
744 		printf("pfr_mark_addrs: IPv4 walktree failed.\n");
745 	if (kt->pfrkt_ip6->rnh_walktree(&kt->pfrkt_ip6->rh, pfr_walktree, &w))
746 		printf("pfr_mark_addrs: IPv6 walktree failed.\n");
747 }
748 
749 static struct pfr_kentry *
pfr_lookup_addr(struct pfr_ktable * kt,struct pfr_addr * ad,int exact)750 pfr_lookup_addr(struct pfr_ktable *kt, struct pfr_addr *ad, int exact)
751 {
752 	union sockaddr_union	 sa, mask;
753 	struct radix_head	*head = NULL;
754 	struct pfr_kentry	*ke;
755 
756 	PF_RULES_ASSERT();
757 
758 	bzero(&sa, sizeof(sa));
759 	switch (ad->pfra_af) {
760 	case AF_INET:
761 		FILLIN_SIN(sa.sin, ad->pfra_ip4addr);
762 		head = &kt->pfrkt_ip4->rh;
763 		break;
764 	case AF_INET6:
765 		FILLIN_SIN6(sa.sin6, ad->pfra_ip6addr);
766 		head = &kt->pfrkt_ip6->rh;
767 		break;
768 	default:
769 		unhandled_af(ad->pfra_af);
770 	}
771 	if (ADDR_NETWORK(ad)) {
772 		pfr_prepare_network(&mask, ad->pfra_af, ad->pfra_net);
773 		ke = (struct pfr_kentry *)rn_lookup(&sa, &mask, head);
774 		if (ke && KENTRY_RNF_ROOT(ke))
775 			ke = NULL;
776 	} else {
777 		ke = (struct pfr_kentry *)rn_match(&sa, head);
778 		if (ke && KENTRY_RNF_ROOT(ke))
779 			ke = NULL;
780 		if (exact && ke && KENTRY_NETWORK(ke))
781 			ke = NULL;
782 	}
783 	return (ke);
784 }
785 
786 static struct pfr_kentry *
pfr_create_kentry(struct pfr_addr * ad,bool counters)787 pfr_create_kentry(struct pfr_addr *ad, bool counters)
788 {
789 	struct pfr_kentry	*ke;
790 	counter_u64_t		 c;
791 
792 	ke = uma_zalloc(V_pfr_kentry_z, M_NOWAIT | M_ZERO);
793 	if (ke == NULL)
794 		return (NULL);
795 
796 	switch (ad->pfra_af) {
797 	case AF_INET:
798 		FILLIN_SIN(ke->pfrke_sa.sin, ad->pfra_ip4addr);
799 		break;
800 	case AF_INET6:
801 		FILLIN_SIN6(ke->pfrke_sa.sin6, ad->pfra_ip6addr);
802 		break;
803 	default:
804 		unhandled_af(ad->pfra_af);
805 	}
806 	ke->pfrke_af = ad->pfra_af;
807 	ke->pfrke_net = ad->pfra_net;
808 	ke->pfrke_not = ad->pfra_not;
809 	ke->pfrke_counters.pfrkc_tzero = 0;
810 	if (counters) {
811 		c = uma_zalloc_pcpu(V_pfr_kentry_counter_z, M_NOWAIT | M_ZERO);
812 		if (c == NULL) {
813 			pfr_destroy_kentry(ke);
814 			return (NULL);
815 		}
816 		ke->pfrke_counters.pfrkc_counters = c;
817 	}
818 	return (ke);
819 }
820 
821 static void
pfr_destroy_kentries(struct pfr_kentryworkq * workq)822 pfr_destroy_kentries(struct pfr_kentryworkq *workq)
823 {
824 	struct pfr_kentry	*p;
825 
826 	while ((p = SLIST_FIRST(workq)) != NULL) {
827 		SLIST_REMOVE_HEAD(workq, pfrke_workq);
828 		pfr_destroy_kentry(p);
829 	}
830 }
831 
832 static void
pfr_destroy_kentry(struct pfr_kentry * ke)833 pfr_destroy_kentry(struct pfr_kentry *ke)
834 {
835 	counter_u64_t c;
836 
837 	if ((c = ke->pfrke_counters.pfrkc_counters) != NULL)
838 		uma_zfree_pcpu(V_pfr_kentry_counter_z, c);
839 	uma_zfree(V_pfr_kentry_z, ke);
840 }
841 
842 static void
pfr_insert_kentries(struct pfr_ktable * kt,struct pfr_kentryworkq * workq,time_t tzero)843 pfr_insert_kentries(struct pfr_ktable *kt,
844     struct pfr_kentryworkq *workq, time_t tzero)
845 {
846 	struct pfr_kentry	*p;
847 	int			 rv, n = 0;
848 
849 	SLIST_FOREACH(p, workq, pfrke_workq) {
850 		rv = pfr_route_kentry(kt, p);
851 		if (rv) {
852 			printf("pfr_insert_kentries: cannot route entry "
853 			    "(code=%d).\n", rv);
854 			break;
855 		}
856 		p->pfrke_counters.pfrkc_tzero = tzero;
857 		n++;
858 	}
859 	kt->pfrkt_cnt += n;
860 }
861 
862 int
pfr_insert_kentry(struct pfr_ktable * kt,struct pfr_addr * ad,time_t tzero)863 pfr_insert_kentry(struct pfr_ktable *kt, struct pfr_addr *ad, time_t tzero)
864 {
865 	struct pfr_kentry	*p;
866 	int			 rv;
867 
868 	p = pfr_lookup_addr(kt, ad, 1);
869 	if (p != NULL)
870 		return (0);
871 	p = pfr_create_kentry(ad, (kt->pfrkt_flags & PFR_TFLAG_COUNTERS) != 0);
872 	if (p == NULL)
873 		return (ENOMEM);
874 
875 	rv = pfr_route_kentry(kt, p);
876 	if (rv)
877 		return (rv);
878 
879 	p->pfrke_counters.pfrkc_tzero = tzero;
880 	kt->pfrkt_cnt++;
881 
882 	return (0);
883 }
884 
885 int
pfr_remove_kentry(struct pfr_ktable * kt,struct pfr_addr * ad)886 pfr_remove_kentry(struct pfr_ktable *kt, struct pfr_addr *ad)
887 {
888 	struct pfr_kentryworkq	 workq = SLIST_HEAD_INITIALIZER(workq);
889 	struct pfr_kentry	*p;
890 
891 	p = pfr_lookup_addr(kt, ad, 1);
892 	if (p == NULL || p->pfrke_not)
893 		return (ESRCH);
894 
895 	if (p->pfrke_mark)
896 		return (0);
897 
898 	p->pfrke_mark = 1;
899 	SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
900 	pfr_remove_kentries(kt, &workq);
901 
902 	return (0);
903 }
904 
905 static void
pfr_remove_kentries(struct pfr_ktable * kt,struct pfr_kentryworkq * workq)906 pfr_remove_kentries(struct pfr_ktable *kt,
907     struct pfr_kentryworkq *workq)
908 {
909 	struct pfr_kentry	*p;
910 	int			 n = 0;
911 
912 	SLIST_FOREACH(p, workq, pfrke_workq) {
913 		pfr_unroute_kentry(kt, p);
914 		n++;
915 	}
916 	kt->pfrkt_cnt -= n;
917 	pfr_destroy_kentries(workq);
918 }
919 
920 static void
pfr_clean_node_mask(struct pfr_ktable * kt,struct pfr_kentryworkq * workq)921 pfr_clean_node_mask(struct pfr_ktable *kt,
922     struct pfr_kentryworkq *workq)
923 {
924 	struct pfr_kentry	*p;
925 
926 	SLIST_FOREACH(p, workq, pfrke_workq)
927 		pfr_unroute_kentry(kt, p);
928 }
929 
930 static void
pfr_clstats_kentries(struct pfr_ktable * kt,struct pfr_kentryworkq * workq,time_t tzero,int negchange)931 pfr_clstats_kentries(struct pfr_ktable *kt, struct pfr_kentryworkq *workq,
932     time_t tzero, int negchange)
933 {
934 	struct pfr_kentry	*p;
935 	int			 i;
936 
937 	SLIST_FOREACH(p, workq, pfrke_workq) {
938 		if (negchange)
939 			p->pfrke_not = !p->pfrke_not;
940 		if ((kt->pfrkt_flags & PFR_TFLAG_COUNTERS) != 0)
941 			for (i = 0; i < PFR_NUM_COUNTERS; i++)
942 				counter_u64_zero(
943 				    p->pfrke_counters.pfrkc_counters + i);
944 		p->pfrke_counters.pfrkc_tzero = tzero;
945 	}
946 }
947 
948 static void
pfr_reset_feedback(struct pfr_addr * addr,int size)949 pfr_reset_feedback(struct pfr_addr *addr, int size)
950 {
951 	struct pfr_addr	*ad;
952 	int		i;
953 
954 	for (i = 0, ad = addr; i < size; i++, ad++)
955 		ad->pfra_fback = PFR_FB_NONE;
956 }
957 
958 static void
pfr_prepare_network(union sockaddr_union * sa,int af,int net)959 pfr_prepare_network(union sockaddr_union *sa, int af, int net)
960 {
961 	int	i;
962 
963 	bzero(sa, sizeof(*sa));
964 	switch (af) {
965 	case AF_INET:
966 		sa->sin.sin_len = sizeof(sa->sin);
967 		sa->sin.sin_family = AF_INET;
968 		sa->sin.sin_addr.s_addr = net ? htonl(-1 << (32-net)) : 0;
969 		break;
970 	case AF_INET6:
971 		sa->sin6.sin6_len = sizeof(sa->sin6);
972 		sa->sin6.sin6_family = AF_INET6;
973 		for (i = 0; i < 4; i++) {
974 			if (net <= 32) {
975 				sa->sin6.sin6_addr.s6_addr32[i] =
976 				    net ? htonl(-1 << (32-net)) : 0;
977 				break;
978 			}
979 			sa->sin6.sin6_addr.s6_addr32[i] = 0xFFFFFFFF;
980 			net -= 32;
981 		}
982 		break;
983 	default:
984 		unhandled_af(af);
985 	}
986 }
987 
988 static int
pfr_route_kentry(struct pfr_ktable * kt,struct pfr_kentry * ke)989 pfr_route_kentry(struct pfr_ktable *kt, struct pfr_kentry *ke)
990 {
991 	union sockaddr_union	 mask;
992 	struct radix_node	*rn;
993 	struct radix_head	*head = NULL;
994 
995 	PF_RULES_WASSERT();
996 
997 	bzero(ke->pfrke_node, sizeof(ke->pfrke_node));
998 	switch (ke->pfrke_af) {
999 	case AF_INET:
1000 		head = &kt->pfrkt_ip4->rh;
1001 		break;
1002 	case AF_INET6:
1003 		head = &kt->pfrkt_ip6->rh;
1004 		break;
1005 	default:
1006 		unhandled_af(ke->pfrke_af);
1007 	}
1008 
1009 	if (KENTRY_NETWORK(ke)) {
1010 		pfr_prepare_network(&mask, ke->pfrke_af, ke->pfrke_net);
1011 		rn = rn_addroute(&ke->pfrke_sa, &mask, head, ke->pfrke_node);
1012 	} else
1013 		rn = rn_addroute(&ke->pfrke_sa, NULL, head, ke->pfrke_node);
1014 
1015 	return (rn == NULL ? -1 : 0);
1016 }
1017 
1018 static int
pfr_unroute_kentry(struct pfr_ktable * kt,struct pfr_kentry * ke)1019 pfr_unroute_kentry(struct pfr_ktable *kt, struct pfr_kentry *ke)
1020 {
1021 	union sockaddr_union	 mask;
1022 	struct radix_node	*rn;
1023 	struct radix_head	*head = NULL;
1024 
1025 	switch (ke->pfrke_af) {
1026 	case AF_INET:
1027 		head = &kt->pfrkt_ip4->rh;
1028 		break;
1029 	case AF_INET6:
1030 		head = &kt->pfrkt_ip6->rh;
1031 		break;
1032 	default:
1033 		unhandled_af(ke->pfrke_af);
1034 	}
1035 
1036 	if (KENTRY_NETWORK(ke)) {
1037 		pfr_prepare_network(&mask, ke->pfrke_af, ke->pfrke_net);
1038 		rn = rn_delete(&ke->pfrke_sa, &mask, head);
1039 	} else
1040 		rn = rn_delete(&ke->pfrke_sa, NULL, head);
1041 
1042 	if (rn == NULL) {
1043 		printf("pfr_unroute_kentry: delete failed.\n");
1044 		return (-1);
1045 	}
1046 	return (0);
1047 }
1048 
1049 static void
pfr_copyout_addr(struct pfr_addr * ad,const struct pfr_kentry * ke)1050 pfr_copyout_addr(struct pfr_addr *ad, const struct pfr_kentry *ke)
1051 {
1052 	bzero(ad, sizeof(*ad));
1053 	if (ke == NULL)
1054 		return;
1055 	ad->pfra_af = ke->pfrke_af;
1056 	ad->pfra_net = ke->pfrke_net;
1057 	ad->pfra_not = ke->pfrke_not;
1058 	switch (ad->pfra_af) {
1059 	case AF_INET:
1060 		ad->pfra_ip4addr = ke->pfrke_sa.sin.sin_addr;
1061 		break;
1062 	case AF_INET6:
1063 		ad->pfra_ip6addr = ke->pfrke_sa.sin6.sin6_addr;
1064 		break;
1065 	default:
1066 		unhandled_af(ad->pfra_af);
1067 	}
1068 }
1069 
1070 static void
pfr_copyout_astats(struct pfr_astats * as,const struct pfr_kentry * ke,const struct pfr_walktree * w)1071 pfr_copyout_astats(struct pfr_astats *as, const struct pfr_kentry *ke,
1072     const struct pfr_walktree *w)
1073 {
1074 	int dir, op;
1075 	const struct pfr_kcounters *kc = &ke->pfrke_counters;
1076 
1077 	bzero(as, sizeof(*as));
1078 	pfr_copyout_addr(&as->pfras_a, ke);
1079 	as->pfras_tzero = kc->pfrkc_tzero;
1080 
1081 	if (! (w->pfrw_flags & PFR_TFLAG_COUNTERS) ||
1082 	    kc->pfrkc_counters == NULL) {
1083 		bzero(as->pfras_packets, sizeof(as->pfras_packets));
1084 		bzero(as->pfras_bytes, sizeof(as->pfras_bytes));
1085 		as->pfras_a.pfra_fback = PFR_FB_NOCOUNT;
1086 		return;
1087 	}
1088 
1089 	for (dir = 0; dir < PFR_DIR_MAX; dir++) {
1090 		for (op = 0; op < PFR_OP_ADDR_MAX; op ++) {
1091 			as->pfras_packets[dir][op] = counter_u64_fetch(
1092 			    pfr_kentry_counter(kc, dir, op, PFR_TYPE_PACKETS));
1093 			as->pfras_bytes[dir][op] = counter_u64_fetch(
1094 			    pfr_kentry_counter(kc, dir, op, PFR_TYPE_BYTES));
1095 		}
1096 	}
1097 }
1098 
1099 static void
pfr_sockaddr_to_pf_addr(const union sockaddr_union * sa,struct pf_addr * a)1100 pfr_sockaddr_to_pf_addr(const union sockaddr_union *sa, struct pf_addr *a)
1101 {
1102 	switch (sa->sa.sa_family) {
1103 	case AF_INET:
1104 		memcpy(&a->v4, &sa->sin.sin_addr, sizeof(a->v4));
1105 		break;
1106 	case AF_INET6:
1107 		memcpy(&a->v6, &sa->sin6.sin6_addr, sizeof(a->v6));
1108 		break;
1109 	default:
1110 		unhandled_af(sa->sa.sa_family);
1111 	}
1112 }
1113 
1114 static int
pfr_walktree(struct radix_node * rn,void * arg)1115 pfr_walktree(struct radix_node *rn, void *arg)
1116 {
1117 	struct pfr_kentry	*ke = (struct pfr_kentry *)rn;
1118 	struct pfr_walktree	*w = arg;
1119 
1120 	switch (w->pfrw_op) {
1121 	case PFRW_MARK:
1122 		ke->pfrke_mark = 0;
1123 		break;
1124 	case PFRW_SWEEP:
1125 		if (ke->pfrke_mark)
1126 			break;
1127 		/* FALLTHROUGH */
1128 	case PFRW_ENQUEUE:
1129 		SLIST_INSERT_HEAD(w->pfrw_workq, ke, pfrke_workq);
1130 		w->pfrw_free++;
1131 		break;
1132 	case PFRW_GET_ADDRS:
1133 		if (w->pfrw_free-- > 0) {
1134 			pfr_copyout_addr(w->pfrw_addr, ke);
1135 			w->pfrw_addr++;
1136 		}
1137 		break;
1138 	case PFRW_GET_ASTATS:
1139 		if (w->pfrw_free-- > 0) {
1140 			struct pfr_astats as;
1141 
1142 			pfr_copyout_astats(&as, ke, w);
1143 
1144 			bcopy(&as, w->pfrw_astats, sizeof(as));
1145 			w->pfrw_astats++;
1146 		}
1147 		break;
1148 	case PFRW_POOL_GET:
1149 		if (ke->pfrke_not)
1150 			break; /* negative entries are ignored */
1151 		if (!w->pfrw_free--) {
1152 			w->pfrw_kentry = ke;
1153 			return (1); /* finish search */
1154 		}
1155 		break;
1156 	case PFRW_DYNADDR_UPDATE:
1157 	    {
1158 		union sockaddr_union	pfr_mask;
1159 
1160 		switch (ke->pfrke_af) {
1161 		case AF_INET:
1162 			if (w->pfrw_dyn->pfid_acnt4++ > 0)
1163 				break;
1164 			pfr_prepare_network(&pfr_mask, AF_INET, ke->pfrke_net);
1165 			pfr_sockaddr_to_pf_addr(&ke->pfrke_sa, &w->pfrw_dyn->pfid_addr4);
1166 			pfr_sockaddr_to_pf_addr(&pfr_mask, &w->pfrw_dyn->pfid_mask4);
1167 			break;
1168 		case AF_INET6:
1169 			if (w->pfrw_dyn->pfid_acnt6++ > 0)
1170 				break;
1171 			pfr_prepare_network(&pfr_mask, AF_INET6, ke->pfrke_net);
1172 			pfr_sockaddr_to_pf_addr(&ke->pfrke_sa, &w->pfrw_dyn->pfid_addr6);
1173 			pfr_sockaddr_to_pf_addr(&pfr_mask, &w->pfrw_dyn->pfid_mask6);
1174 			break;
1175 		default:
1176 			unhandled_af(ke->pfrke_af);
1177 		}
1178 		break;
1179 	    }
1180 	case PFRW_COUNTERS:
1181 	    {
1182 		if (w->pfrw_flags & PFR_TFLAG_COUNTERS) {
1183 			if (ke->pfrke_counters.pfrkc_counters != NULL)
1184 				break;
1185 			ke->pfrke_counters.pfrkc_counters =
1186 			    uma_zalloc_pcpu(V_pfr_kentry_counter_z,
1187 			    M_NOWAIT | M_ZERO);
1188 		} else {
1189 			uma_zfree_pcpu(V_pfr_kentry_counter_z,
1190 			    ke->pfrke_counters.pfrkc_counters);
1191 			ke->pfrke_counters.pfrkc_counters = NULL;
1192 		}
1193 		break;
1194 	    }
1195 	}
1196 	return (0);
1197 }
1198 
1199 int
pfr_clr_tables(struct pfr_table * filter,int * ndel,int flags)1200 pfr_clr_tables(struct pfr_table *filter, int *ndel, int flags)
1201 {
1202 	struct pfr_ktableworkq	 workq;
1203 	struct pfr_ktable	*p;
1204 	int			 xdel = 0;
1205 
1206 	ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_ALLRSETS);
1207 	if (pfr_fix_anchor(filter->pfrt_anchor))
1208 		return (EINVAL);
1209 	if (pfr_table_count(filter, flags) < 0)
1210 		return (ENOENT);
1211 
1212 	SLIST_INIT(&workq);
1213 	RB_FOREACH(p, pfr_ktablehead, &V_pfr_ktables) {
1214 		if (pfr_skip_table(filter, p, flags))
1215 			continue;
1216 		if (!strcmp(p->pfrkt_anchor, PF_RESERVED_ANCHOR))
1217 			continue;
1218 		if (!(p->pfrkt_flags & PFR_TFLAG_ACTIVE))
1219 			continue;
1220 		p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_ACTIVE;
1221 		SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1222 		xdel++;
1223 	}
1224 	if (!(flags & PFR_FLAG_DUMMY))
1225 		pfr_setflags_ktables(&workq);
1226 	if (ndel != NULL)
1227 		*ndel = xdel;
1228 	return (0);
1229 }
1230 
1231 int
pfr_add_tables(struct pfr_table * tbl,int size,int * nadd,int flags)1232 pfr_add_tables(struct pfr_table *tbl, int size, int *nadd, int flags)
1233 {
1234 	struct pfr_ktableworkq	 addq, changeq;
1235 	struct pfr_ktable	*p, *q, *r, key;
1236 	int			 i, rv, xadd = 0;
1237 	time_t			 tzero = time_second;
1238 
1239 	ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
1240 	SLIST_INIT(&addq);
1241 	SLIST_INIT(&changeq);
1242 	for (i = 0; i < size; i++) {
1243 		bcopy(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t));
1244 		if (pfr_validate_table(&key.pfrkt_t, PFR_TFLAG_USRMASK,
1245 		    flags & PFR_FLAG_USERIOCTL))
1246 			senderr(EINVAL);
1247 		key.pfrkt_flags |= PFR_TFLAG_ACTIVE;
1248 		p = RB_FIND(pfr_ktablehead, &V_pfr_ktables, &key);
1249 		if (p == NULL) {
1250 			p = pfr_create_ktable(&key.pfrkt_t, tzero, 1);
1251 			if (p == NULL)
1252 				senderr(ENOMEM);
1253 			SLIST_FOREACH(q, &addq, pfrkt_workq) {
1254 				if (!pfr_ktable_compare(p, q)) {
1255 					pfr_destroy_ktable(p, 0);
1256 					goto _skip;
1257 				}
1258 			}
1259 			SLIST_INSERT_HEAD(&addq, p, pfrkt_workq);
1260 			xadd++;
1261 			if (!key.pfrkt_anchor[0])
1262 				goto _skip;
1263 
1264 			/* find or create root table */
1265 			bzero(key.pfrkt_anchor, sizeof(key.pfrkt_anchor));
1266 			r = RB_FIND(pfr_ktablehead, &V_pfr_ktables, &key);
1267 			if (r != NULL) {
1268 				p->pfrkt_root = r;
1269 				goto _skip;
1270 			}
1271 			SLIST_FOREACH(q, &addq, pfrkt_workq) {
1272 				if (!pfr_ktable_compare(&key, q)) {
1273 					p->pfrkt_root = q;
1274 					goto _skip;
1275 				}
1276 			}
1277 			key.pfrkt_flags = 0;
1278 			r = pfr_create_ktable(&key.pfrkt_t, 0, 1);
1279 			if (r == NULL)
1280 				senderr(ENOMEM);
1281 			SLIST_INSERT_HEAD(&addq, r, pfrkt_workq);
1282 			p->pfrkt_root = r;
1283 		} else if (!(p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1284 			SLIST_FOREACH(q, &changeq, pfrkt_workq)
1285 				if (!pfr_ktable_compare(&key, q))
1286 					goto _skip;
1287 			p->pfrkt_nflags = (p->pfrkt_flags &
1288 			    ~PFR_TFLAG_USRMASK) | key.pfrkt_flags;
1289 			SLIST_INSERT_HEAD(&changeq, p, pfrkt_workq);
1290 			xadd++;
1291 		}
1292 _skip:
1293 	;
1294 	}
1295 	if (!(flags & PFR_FLAG_DUMMY)) {
1296 		pfr_insert_ktables(&addq);
1297 		pfr_setflags_ktables(&changeq);
1298 	} else
1299 		 pfr_destroy_ktables(&addq, 0);
1300 	if (nadd != NULL)
1301 		*nadd = xadd;
1302 	return (0);
1303 _bad:
1304 	pfr_destroy_ktables(&addq, 0);
1305 	return (rv);
1306 }
1307 
1308 int
pfr_del_tables(struct pfr_table * tbl,int size,int * ndel,int flags)1309 pfr_del_tables(struct pfr_table *tbl, int size, int *ndel, int flags)
1310 {
1311 	struct pfr_ktableworkq	 workq;
1312 	struct pfr_ktable	*p, *q, key;
1313 	int			 i, xdel = 0;
1314 
1315 	ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
1316 	SLIST_INIT(&workq);
1317 	for (i = 0; i < size; i++) {
1318 		bcopy(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t));
1319 		if (pfr_validate_table(&key.pfrkt_t, 0,
1320 		    flags & PFR_FLAG_USERIOCTL))
1321 			return (EINVAL);
1322 		p = RB_FIND(pfr_ktablehead, &V_pfr_ktables, &key);
1323 		if (p != NULL && (p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1324 			SLIST_FOREACH(q, &workq, pfrkt_workq)
1325 				if (!pfr_ktable_compare(p, q))
1326 					goto _skip;
1327 			p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_ACTIVE;
1328 			SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1329 			xdel++;
1330 		}
1331 _skip:
1332 	;
1333 	}
1334 
1335 	if (!(flags & PFR_FLAG_DUMMY))
1336 		pfr_setflags_ktables(&workq);
1337 	if (ndel != NULL)
1338 		*ndel = xdel;
1339 	return (0);
1340 }
1341 
1342 int
pfr_get_tables(struct pfr_table * filter,struct pfr_table * tbl,int * size,int flags)1343 pfr_get_tables(struct pfr_table *filter, struct pfr_table *tbl, int *size,
1344 	int flags)
1345 {
1346 	struct pfr_ktable	*p;
1347 	int			 n, nn;
1348 
1349 	PF_RULES_RASSERT();
1350 
1351 	ACCEPT_FLAGS(flags, PFR_FLAG_ALLRSETS);
1352 	if (pfr_fix_anchor(filter->pfrt_anchor))
1353 		return (EINVAL);
1354 	n = nn = pfr_table_count(filter, flags);
1355 	if (n < 0)
1356 		return (ENOENT);
1357 	if (n > *size) {
1358 		*size = n;
1359 		return (0);
1360 	}
1361 	RB_FOREACH(p, pfr_ktablehead, &V_pfr_ktables) {
1362 		if (pfr_skip_table(filter, p, flags))
1363 			continue;
1364 		if (n-- <= 0)
1365 			continue;
1366 		bcopy(&p->pfrkt_t, tbl++, sizeof(*tbl));
1367 	}
1368 
1369 	KASSERT(n == 0, ("%s: corruption detected (%d)", __func__, n));
1370 
1371 	*size = nn;
1372 	return (0);
1373 }
1374 
1375 int
pfr_get_tstats(struct pfr_table * filter,struct pfr_tstats * tbl,int * size,int flags)1376 pfr_get_tstats(struct pfr_table *filter, struct pfr_tstats *tbl, int *size,
1377 	int flags)
1378 {
1379 	struct pfr_ktable	*p;
1380 	struct pfr_ktableworkq	 workq;
1381 	int			 n, nn;
1382 	time_t			 tzero = time_second;
1383 	int			 pfr_dir, pfr_op;
1384 
1385 	/* XXX PFR_FLAG_CLSTATS disabled */
1386 	ACCEPT_FLAGS(flags, PFR_FLAG_ALLRSETS);
1387 	if (pfr_fix_anchor(filter->pfrt_anchor))
1388 		return (EINVAL);
1389 	n = nn = pfr_table_count(filter, flags);
1390 	if (n < 0)
1391 		return (ENOENT);
1392 	if (n > *size) {
1393 		*size = n;
1394 		return (0);
1395 	}
1396 	SLIST_INIT(&workq);
1397 	RB_FOREACH(p, pfr_ktablehead, &V_pfr_ktables) {
1398 		if (pfr_skip_table(filter, p, flags))
1399 			continue;
1400 		if (n-- <= 0)
1401 			continue;
1402 		bcopy(&p->pfrkt_kts.pfrts_t, &tbl->pfrts_t,
1403 		    sizeof(struct pfr_table));
1404 		for (pfr_dir = 0; pfr_dir < PFR_DIR_MAX; pfr_dir ++) {
1405 			for (pfr_op = 0; pfr_op < PFR_OP_TABLE_MAX; pfr_op ++) {
1406 				tbl->pfrts_packets[pfr_dir][pfr_op] =
1407 				    pfr_kstate_counter_fetch(
1408 					&p->pfrkt_packets[pfr_dir][pfr_op]);
1409 				tbl->pfrts_bytes[pfr_dir][pfr_op] =
1410 				    pfr_kstate_counter_fetch(
1411 					&p->pfrkt_bytes[pfr_dir][pfr_op]);
1412 			}
1413 		}
1414 		tbl->pfrts_match = pfr_kstate_counter_fetch(&p->pfrkt_match);
1415 		tbl->pfrts_nomatch = pfr_kstate_counter_fetch(&p->pfrkt_nomatch);
1416 		tbl->pfrts_tzero = p->pfrkt_tzero;
1417 		tbl->pfrts_cnt = p->pfrkt_cnt;
1418 		for (pfr_op = 0; pfr_op < PFR_REFCNT_MAX; pfr_op++)
1419 			tbl->pfrts_refcnt[pfr_op] = p->pfrkt_refcnt[pfr_op];
1420 		tbl++;
1421 		SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1422 	}
1423 	if (flags & PFR_FLAG_CLSTATS)
1424 		pfr_clstats_ktables(&workq, tzero,
1425 		    flags & PFR_FLAG_ADDRSTOO);
1426 
1427 	KASSERT(n == 0, ("%s: corruption detected (%d)", __func__, n));
1428 
1429 	*size = nn;
1430 	return (0);
1431 }
1432 
1433 int
pfr_clr_tstats(struct pfr_table * tbl,int size,int * nzero,int flags)1434 pfr_clr_tstats(struct pfr_table *tbl, int size, int *nzero, int flags)
1435 {
1436 	struct pfr_ktableworkq	 workq;
1437 	struct pfr_ktable	*p, key;
1438 	int			 i, xzero = 0;
1439 	time_t			 tzero = time_second;
1440 
1441 	ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_ADDRSTOO);
1442 	SLIST_INIT(&workq);
1443 	for (i = 0; i < size; i++) {
1444 		bcopy(tbl + i, &key.pfrkt_t, sizeof(key.pfrkt_t));
1445 		if (pfr_validate_table(&key.pfrkt_t, 0, 0))
1446 			return (EINVAL);
1447 		p = RB_FIND(pfr_ktablehead, &V_pfr_ktables, &key);
1448 		if (p != NULL) {
1449 			SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1450 			xzero++;
1451 		}
1452 	}
1453 	if (!(flags & PFR_FLAG_DUMMY))
1454 		pfr_clstats_ktables(&workq, tzero, flags & PFR_FLAG_ADDRSTOO);
1455 	if (nzero != NULL)
1456 		*nzero = xzero;
1457 	return (0);
1458 }
1459 
1460 int
pfr_set_tflags(struct pfr_table * tbl,int size,int setflag,int clrflag,int * nchange,int * ndel,int flags)1461 pfr_set_tflags(struct pfr_table *tbl, int size, int setflag, int clrflag,
1462 	int *nchange, int *ndel, int flags)
1463 {
1464 	struct pfr_ktableworkq	 workq;
1465 	struct pfr_ktable	*p, *q, key;
1466 	int			 i, xchange = 0, xdel = 0;
1467 
1468 	ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
1469 	if ((setflag & ~PFR_TFLAG_USRMASK) ||
1470 	    (clrflag & ~PFR_TFLAG_USRMASK) ||
1471 	    (setflag & clrflag))
1472 		return (EINVAL);
1473 	SLIST_INIT(&workq);
1474 	for (i = 0; i < size; i++) {
1475 		bcopy(tbl + i, &key.pfrkt_t, sizeof(key.pfrkt_t));
1476 		if (pfr_validate_table(&key.pfrkt_t, 0,
1477 		    flags & PFR_FLAG_USERIOCTL))
1478 			return (EINVAL);
1479 		p = RB_FIND(pfr_ktablehead, &V_pfr_ktables, &key);
1480 		if (p != NULL && (p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1481 			p->pfrkt_nflags = (p->pfrkt_flags | setflag) &
1482 			    ~clrflag;
1483 			if (p->pfrkt_nflags == p->pfrkt_flags)
1484 				goto _skip;
1485 			SLIST_FOREACH(q, &workq, pfrkt_workq)
1486 				if (!pfr_ktable_compare(p, q))
1487 					goto _skip;
1488 			SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1489 			if ((p->pfrkt_flags & PFR_TFLAG_PERSIST) &&
1490 			    (clrflag & PFR_TFLAG_PERSIST) &&
1491 			    !(p->pfrkt_flags & PFR_TFLAG_REFERENCED))
1492 				xdel++;
1493 			else
1494 				xchange++;
1495 		}
1496 _skip:
1497 	;
1498 	}
1499 	if (!(flags & PFR_FLAG_DUMMY))
1500 		pfr_setflags_ktables(&workq);
1501 	if (nchange != NULL)
1502 		*nchange = xchange;
1503 	if (ndel != NULL)
1504 		*ndel = xdel;
1505 	return (0);
1506 }
1507 
1508 int
pfr_ina_begin(struct pfr_table * trs,u_int32_t * ticket,int * ndel,int flags)1509 pfr_ina_begin(struct pfr_table *trs, u_int32_t *ticket, int *ndel, int flags)
1510 {
1511 	struct pfr_ktableworkq	 workq;
1512 	struct pfr_ktable	*p;
1513 	struct pf_kruleset	*rs;
1514 	int			 xdel = 0;
1515 
1516 	ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
1517 	rs = pf_find_or_create_kruleset(trs->pfrt_anchor);
1518 	if (rs == NULL)
1519 		return (ENOMEM);
1520 	SLIST_INIT(&workq);
1521 	RB_FOREACH(p, pfr_ktablehead, &V_pfr_ktables) {
1522 		if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
1523 		    pfr_skip_table(trs, p, 0))
1524 			continue;
1525 		p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_INACTIVE;
1526 		SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1527 		xdel++;
1528 	}
1529 	if (!(flags & PFR_FLAG_DUMMY)) {
1530 		pfr_setflags_ktables(&workq);
1531 		if (ticket != NULL)
1532 			*ticket = ++rs->tticket;
1533 		rs->topen = 1;
1534 	} else
1535 		pf_remove_if_empty_kruleset(rs);
1536 	if (ndel != NULL)
1537 		*ndel = xdel;
1538 	return (0);
1539 }
1540 
1541 int
pfr_ina_define(struct pfr_table * tbl,struct pfr_addr * addr,int size,int * nadd,int * naddr,u_int32_t ticket,int flags)1542 pfr_ina_define(struct pfr_table *tbl, struct pfr_addr *addr, int size,
1543     int *nadd, int *naddr, u_int32_t ticket, int flags)
1544 {
1545 	struct pfr_ktableworkq	 tableq;
1546 	struct pfr_kentryworkq	 addrq;
1547 	struct pfr_ktable	*kt, *rt, *shadow, key;
1548 	struct pfr_kentry	*p;
1549 	struct pfr_addr		*ad;
1550 	struct pf_kruleset	*rs;
1551 	int			 i, rv, xadd = 0, xaddr = 0;
1552 
1553 	PF_RULES_WASSERT();
1554 
1555 	ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_ADDRSTOO);
1556 	if (size && !(flags & PFR_FLAG_ADDRSTOO))
1557 		return (EINVAL);
1558 	if (pfr_validate_table(tbl, PFR_TFLAG_USRMASK,
1559 	    flags & PFR_FLAG_USERIOCTL))
1560 		return (EINVAL);
1561 	rs = pf_find_kruleset(tbl->pfrt_anchor);
1562 	if (rs == NULL || !rs->topen || ticket != rs->tticket)
1563 		return (EBUSY);
1564 	tbl->pfrt_flags |= PFR_TFLAG_INACTIVE;
1565 	SLIST_INIT(&tableq);
1566 	kt = RB_FIND(pfr_ktablehead, &V_pfr_ktables, (struct pfr_ktable *)tbl);
1567 	if (kt == NULL) {
1568 		kt = pfr_create_ktable(tbl, 0, 1);
1569 		if (kt == NULL)
1570 			return (ENOMEM);
1571 		SLIST_INSERT_HEAD(&tableq, kt, pfrkt_workq);
1572 		xadd++;
1573 		if (!tbl->pfrt_anchor[0])
1574 			goto _skip;
1575 
1576 		/* find or create root table */
1577 		bzero(&key, sizeof(key));
1578 		strlcpy(key.pfrkt_name, tbl->pfrt_name, sizeof(key.pfrkt_name));
1579 		rt = RB_FIND(pfr_ktablehead, &V_pfr_ktables, &key);
1580 		if (rt != NULL) {
1581 			kt->pfrkt_root = rt;
1582 			goto _skip;
1583 		}
1584 		rt = pfr_create_ktable(&key.pfrkt_t, 0, 1);
1585 		if (rt == NULL) {
1586 			pfr_destroy_ktables(&tableq, 0);
1587 			return (ENOMEM);
1588 		}
1589 		SLIST_INSERT_HEAD(&tableq, rt, pfrkt_workq);
1590 		kt->pfrkt_root = rt;
1591 	} else if (!(kt->pfrkt_flags & PFR_TFLAG_INACTIVE))
1592 		xadd++;
1593 _skip:
1594 	shadow = pfr_create_ktable(tbl, 0, 0);
1595 	if (shadow == NULL) {
1596 		pfr_destroy_ktables(&tableq, 0);
1597 		return (ENOMEM);
1598 	}
1599 	SLIST_INIT(&addrq);
1600 	for (i = 0, ad = addr; i < size; i++, ad++) {
1601 		if (pfr_validate_addr(ad))
1602 			senderr(EINVAL);
1603 		if (pfr_lookup_addr(shadow, ad, 1) != NULL)
1604 			continue;
1605 		p = pfr_create_kentry(ad,
1606 		    (shadow->pfrkt_flags & PFR_TFLAG_COUNTERS) != 0);
1607 		if (p == NULL)
1608 			senderr(ENOMEM);
1609 		if (pfr_route_kentry(shadow, p)) {
1610 			pfr_destroy_kentry(p);
1611 			continue;
1612 		}
1613 		SLIST_INSERT_HEAD(&addrq, p, pfrke_workq);
1614 		xaddr++;
1615 	}
1616 	if (!(flags & PFR_FLAG_DUMMY)) {
1617 		if (kt->pfrkt_shadow != NULL)
1618 			pfr_destroy_ktable(kt->pfrkt_shadow, 1);
1619 		kt->pfrkt_flags |= PFR_TFLAG_INACTIVE;
1620 		pfr_insert_ktables(&tableq);
1621 		shadow->pfrkt_cnt = (flags & PFR_FLAG_ADDRSTOO) ?
1622 		    xaddr : NO_ADDRESSES;
1623 		kt->pfrkt_shadow = shadow;
1624 	} else {
1625 		pfr_clean_node_mask(shadow, &addrq);
1626 		pfr_destroy_ktable(shadow, 0);
1627 		pfr_destroy_ktables(&tableq, 0);
1628 		pfr_destroy_kentries(&addrq);
1629 	}
1630 	if (nadd != NULL)
1631 		*nadd = xadd;
1632 	if (naddr != NULL)
1633 		*naddr = xaddr;
1634 	return (0);
1635 _bad:
1636 	pfr_destroy_ktable(shadow, 0);
1637 	pfr_destroy_ktables(&tableq, 0);
1638 	pfr_destroy_kentries(&addrq);
1639 	return (rv);
1640 }
1641 
1642 int
pfr_ina_rollback(struct pfr_table * trs,u_int32_t ticket,int * ndel,int flags)1643 pfr_ina_rollback(struct pfr_table *trs, u_int32_t ticket, int *ndel, int flags)
1644 {
1645 	struct pfr_ktableworkq	 workq;
1646 	struct pfr_ktable	*p;
1647 	struct pf_kruleset	*rs;
1648 	int			 xdel = 0;
1649 
1650 	PF_RULES_WASSERT();
1651 
1652 	ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
1653 	rs = pf_find_kruleset(trs->pfrt_anchor);
1654 	if (rs == NULL || !rs->topen || ticket != rs->tticket)
1655 		return (0);
1656 	SLIST_INIT(&workq);
1657 	RB_FOREACH(p, pfr_ktablehead, &V_pfr_ktables) {
1658 		if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
1659 		    pfr_skip_table(trs, p, 0))
1660 			continue;
1661 		p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_INACTIVE;
1662 		SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1663 		xdel++;
1664 	}
1665 	if (!(flags & PFR_FLAG_DUMMY)) {
1666 		pfr_setflags_ktables(&workq);
1667 		rs->topen = 0;
1668 		pf_remove_if_empty_kruleset(rs);
1669 	}
1670 	if (ndel != NULL)
1671 		*ndel = xdel;
1672 	return (0);
1673 }
1674 
1675 int
pfr_ina_commit(struct pfr_table * trs,u_int32_t ticket,int * nadd,int * nchange,int flags)1676 pfr_ina_commit(struct pfr_table *trs, u_int32_t ticket, int *nadd,
1677     int *nchange, int flags)
1678 {
1679 	struct pfr_ktable	*p, *q;
1680 	struct pfr_ktableworkq	 workq;
1681 	struct pf_kruleset	*rs;
1682 	int			 xadd = 0, xchange = 0;
1683 	time_t			 tzero = time_second;
1684 
1685 	PF_RULES_WASSERT();
1686 
1687 	ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
1688 	rs = pf_find_kruleset(trs->pfrt_anchor);
1689 	if (rs == NULL || !rs->topen || ticket != rs->tticket)
1690 		return (EBUSY);
1691 
1692 	SLIST_INIT(&workq);
1693 	RB_FOREACH(p, pfr_ktablehead, &V_pfr_ktables) {
1694 		if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
1695 		    pfr_skip_table(trs, p, 0))
1696 			continue;
1697 		SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1698 		if (p->pfrkt_flags & PFR_TFLAG_ACTIVE)
1699 			xchange++;
1700 		else
1701 			xadd++;
1702 	}
1703 
1704 	if (!(flags & PFR_FLAG_DUMMY)) {
1705 		SLIST_FOREACH_SAFE(p, &workq, pfrkt_workq, q) {
1706 			pfr_commit_ktable(p, tzero);
1707 		}
1708 		rs->topen = 0;
1709 		pf_remove_if_empty_kruleset(rs);
1710 	}
1711 	if (nadd != NULL)
1712 		*nadd = xadd;
1713 	if (nchange != NULL)
1714 		*nchange = xchange;
1715 
1716 	return (0);
1717 }
1718 
1719 static void
pfr_commit_ktable(struct pfr_ktable * kt,time_t tzero)1720 pfr_commit_ktable(struct pfr_ktable *kt, time_t tzero)
1721 {
1722 	counter_u64_t		*pkc, *qkc;
1723 	struct pfr_ktable	*shadow = kt->pfrkt_shadow;
1724 	int			 nflags;
1725 
1726 	PF_RULES_WASSERT();
1727 
1728 	if (shadow->pfrkt_cnt == NO_ADDRESSES) {
1729 		if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
1730 			pfr_clstats_ktable(kt, tzero, 1);
1731 	} else if (kt->pfrkt_flags & PFR_TFLAG_ACTIVE) {
1732 		/* kt might contain addresses */
1733 		struct pfr_kentryworkq	 addrq, addq, changeq, delq, garbageq;
1734 		struct pfr_kentry	*p, *q;
1735 		struct pfr_addr		 ad;
1736 
1737 		pfr_enqueue_addrs(shadow, &addrq, NULL, 0);
1738 		pfr_mark_addrs(kt);
1739 		SLIST_INIT(&addq);
1740 		SLIST_INIT(&changeq);
1741 		SLIST_INIT(&delq);
1742 		SLIST_INIT(&garbageq);
1743 		pfr_clean_node_mask(shadow, &addrq);
1744 		while ((p = SLIST_FIRST(&addrq)) != NULL) {
1745 			SLIST_REMOVE_HEAD(&addrq, pfrke_workq);
1746 			pfr_copyout_addr(&ad, p);
1747 			q = pfr_lookup_addr(kt, &ad, 1);
1748 			if (q != NULL) {
1749 				if (q->pfrke_not != p->pfrke_not)
1750 					SLIST_INSERT_HEAD(&changeq, q,
1751 					    pfrke_workq);
1752 				pkc = &p->pfrke_counters.pfrkc_counters;
1753 				qkc = &q->pfrke_counters.pfrkc_counters;
1754 				if ((*pkc == NULL) != (*qkc == NULL))
1755 					SWAP(counter_u64_t, *pkc, *qkc);
1756 				q->pfrke_mark = 1;
1757 				SLIST_INSERT_HEAD(&garbageq, p, pfrke_workq);
1758 			} else {
1759 				p->pfrke_counters.pfrkc_tzero = tzero;
1760 				SLIST_INSERT_HEAD(&addq, p, pfrke_workq);
1761 			}
1762 		}
1763 		pfr_enqueue_addrs(kt, &delq, NULL, ENQUEUE_UNMARKED_ONLY);
1764 		pfr_insert_kentries(kt, &addq, tzero);
1765 		pfr_remove_kentries(kt, &delq);
1766 		pfr_clstats_kentries(kt, &changeq, tzero, INVERT_NEG_FLAG);
1767 		pfr_destroy_kentries(&garbageq);
1768 	} else {
1769 		/* kt cannot contain addresses */
1770 		SWAP(struct radix_node_head *, kt->pfrkt_ip4,
1771 		    shadow->pfrkt_ip4);
1772 		SWAP(struct radix_node_head *, kt->pfrkt_ip6,
1773 		    shadow->pfrkt_ip6);
1774 		SWAP(int, kt->pfrkt_cnt, shadow->pfrkt_cnt);
1775 		pfr_clstats_ktable(kt, tzero, 1);
1776 	}
1777 	nflags = ((shadow->pfrkt_flags & PFR_TFLAG_USRMASK) |
1778 	    (kt->pfrkt_flags & PFR_TFLAG_SETMASK) | PFR_TFLAG_ACTIVE)
1779 		& ~PFR_TFLAG_INACTIVE;
1780 	pfr_destroy_ktable(shadow, 0);
1781 	kt->pfrkt_shadow = NULL;
1782 	pfr_setflags_ktable(kt, nflags);
1783 }
1784 
1785 static int
pfr_validate_table(struct pfr_table * tbl,int allowedflags,int no_reserved)1786 pfr_validate_table(struct pfr_table *tbl, int allowedflags, int no_reserved)
1787 {
1788 	int i;
1789 
1790 	if (!tbl->pfrt_name[0])
1791 		return (-1);
1792 	if (no_reserved && !strcmp(tbl->pfrt_anchor, PF_RESERVED_ANCHOR))
1793 		 return (-1);
1794 	if (tbl->pfrt_name[PF_TABLE_NAME_SIZE-1])
1795 		return (-1);
1796 	for (i = strlen(tbl->pfrt_name); i < PF_TABLE_NAME_SIZE; i++)
1797 		if (tbl->pfrt_name[i])
1798 			return (-1);
1799 	if (pfr_fix_anchor(tbl->pfrt_anchor))
1800 		return (-1);
1801 	if (tbl->pfrt_flags & ~allowedflags)
1802 		return (-1);
1803 	return (0);
1804 }
1805 
1806 /*
1807  * Rewrite anchors referenced by tables to remove slashes
1808  * and check for validity.
1809  */
1810 static int
pfr_fix_anchor(char * anchor)1811 pfr_fix_anchor(char *anchor)
1812 {
1813 	size_t siz = MAXPATHLEN;
1814 	int i;
1815 
1816 	if (anchor[0] == '/') {
1817 		char *path;
1818 		int off;
1819 
1820 		path = anchor;
1821 		off = 1;
1822 		while (*++path == '/')
1823 			off++;
1824 		bcopy(path, anchor, siz - off);
1825 		memset(anchor + siz - off, 0, off);
1826 	}
1827 	if (anchor[siz - 1])
1828 		return (-1);
1829 	for (i = strlen(anchor); i < siz; i++)
1830 		if (anchor[i])
1831 			return (-1);
1832 	return (0);
1833 }
1834 
1835 int
pfr_table_count(struct pfr_table * filter,int flags)1836 pfr_table_count(struct pfr_table *filter, int flags)
1837 {
1838 	struct pf_kruleset *rs;
1839 
1840 	PF_RULES_ASSERT();
1841 
1842 	if (flags & PFR_FLAG_ALLRSETS)
1843 		return (V_pfr_ktable_cnt);
1844 	if (filter->pfrt_anchor[0]) {
1845 		rs = pf_find_kruleset(filter->pfrt_anchor);
1846 		return ((rs != NULL) ? rs->tables : -1);
1847 	}
1848 	return (pf_main_ruleset.tables);
1849 }
1850 
1851 static int
pfr_skip_table(struct pfr_table * filter,struct pfr_ktable * kt,int flags)1852 pfr_skip_table(struct pfr_table *filter, struct pfr_ktable *kt, int flags)
1853 {
1854 	if (flags & PFR_FLAG_ALLRSETS)
1855 		return (0);
1856 	if (strcmp(filter->pfrt_anchor, kt->pfrkt_anchor))
1857 		return (1);
1858 	return (0);
1859 }
1860 
1861 static void
pfr_insert_ktables(struct pfr_ktableworkq * workq)1862 pfr_insert_ktables(struct pfr_ktableworkq *workq)
1863 {
1864 	struct pfr_ktable	*p;
1865 
1866 	SLIST_FOREACH(p, workq, pfrkt_workq)
1867 		pfr_insert_ktable(p);
1868 }
1869 
1870 static void
pfr_insert_ktable(struct pfr_ktable * kt)1871 pfr_insert_ktable(struct pfr_ktable *kt)
1872 {
1873 
1874 	PF_RULES_WASSERT();
1875 
1876 	RB_INSERT(pfr_ktablehead, &V_pfr_ktables, kt);
1877 	V_pfr_ktable_cnt++;
1878 	if (kt->pfrkt_root != NULL)
1879 		if (!kt->pfrkt_root->pfrkt_refcnt[PFR_REFCNT_ANCHOR]++)
1880 			pfr_setflags_ktable(kt->pfrkt_root,
1881 			    kt->pfrkt_root->pfrkt_flags|PFR_TFLAG_REFDANCHOR);
1882 }
1883 
1884 static void
pfr_setflags_ktables(struct pfr_ktableworkq * workq)1885 pfr_setflags_ktables(struct pfr_ktableworkq *workq)
1886 {
1887 	struct pfr_ktable	*p, *q;
1888 
1889 	SLIST_FOREACH_SAFE(p, workq, pfrkt_workq, q) {
1890 		pfr_setflags_ktable(p, p->pfrkt_nflags);
1891 	}
1892 }
1893 
1894 static void
pfr_setflags_ktable(struct pfr_ktable * kt,int newf)1895 pfr_setflags_ktable(struct pfr_ktable *kt, int newf)
1896 {
1897 	struct pfr_kentryworkq	addrq;
1898 	struct pfr_walktree	w;
1899 
1900 	PF_RULES_WASSERT();
1901 
1902 	if (!(newf & PFR_TFLAG_REFERENCED) &&
1903 	    !(newf & PFR_TFLAG_REFDANCHOR) &&
1904 	    !(newf & PFR_TFLAG_PERSIST))
1905 		newf &= ~PFR_TFLAG_ACTIVE;
1906 	if (!(newf & PFR_TFLAG_ACTIVE))
1907 		newf &= ~PFR_TFLAG_USRMASK;
1908 	if (!(newf & PFR_TFLAG_SETMASK)) {
1909 		RB_REMOVE(pfr_ktablehead, &V_pfr_ktables, kt);
1910 		if (kt->pfrkt_root != NULL)
1911 			if (!--kt->pfrkt_root->pfrkt_refcnt[PFR_REFCNT_ANCHOR])
1912 				pfr_setflags_ktable(kt->pfrkt_root,
1913 				    kt->pfrkt_root->pfrkt_flags &
1914 					~PFR_TFLAG_REFDANCHOR);
1915 		pfr_destroy_ktable(kt, 1);
1916 		V_pfr_ktable_cnt--;
1917 		return;
1918 	}
1919 	if (newf & PFR_TFLAG_COUNTERS && ! (kt->pfrkt_flags & PFR_TFLAG_COUNTERS)) {
1920 		bzero(&w, sizeof(w));
1921 		w.pfrw_op = PFRW_COUNTERS;
1922 		w.pfrw_flags |= PFR_TFLAG_COUNTERS;
1923 		kt->pfrkt_ip4->rnh_walktree(&kt->pfrkt_ip4->rh, pfr_walktree, &w);
1924 		kt->pfrkt_ip6->rnh_walktree(&kt->pfrkt_ip6->rh, pfr_walktree, &w);
1925 	}
1926 	if (! (newf & PFR_TFLAG_COUNTERS) && (kt->pfrkt_flags & PFR_TFLAG_COUNTERS)) {
1927 		bzero(&w, sizeof(w));
1928 		w.pfrw_op = PFRW_COUNTERS;
1929 		w.pfrw_flags |= 0;
1930 		kt->pfrkt_ip4->rnh_walktree(&kt->pfrkt_ip4->rh, pfr_walktree, &w);
1931 		kt->pfrkt_ip6->rnh_walktree(&kt->pfrkt_ip6->rh, pfr_walktree, &w);
1932 	}
1933 	if (!(newf & PFR_TFLAG_ACTIVE) && kt->pfrkt_cnt) {
1934 		pfr_enqueue_addrs(kt, &addrq, NULL, 0);
1935 		pfr_remove_kentries(kt, &addrq);
1936 	}
1937 	if (!(newf & PFR_TFLAG_INACTIVE) && kt->pfrkt_shadow != NULL) {
1938 		pfr_destroy_ktable(kt->pfrkt_shadow, 1);
1939 		kt->pfrkt_shadow = NULL;
1940 	}
1941 	kt->pfrkt_flags = newf;
1942 }
1943 
1944 static void
pfr_clstats_ktables(struct pfr_ktableworkq * workq,time_t tzero,int recurse)1945 pfr_clstats_ktables(struct pfr_ktableworkq *workq, time_t tzero, int recurse)
1946 {
1947 	struct pfr_ktable	*p;
1948 
1949 	SLIST_FOREACH(p, workq, pfrkt_workq)
1950 		pfr_clstats_ktable(p, tzero, recurse);
1951 }
1952 
1953 static void
pfr_clstats_ktable(struct pfr_ktable * kt,time_t tzero,int recurse)1954 pfr_clstats_ktable(struct pfr_ktable *kt, time_t tzero, int recurse)
1955 {
1956 	struct pfr_kentryworkq	 addrq;
1957 	int			 pfr_dir, pfr_op;
1958 
1959 	MPASS(PF_TABLE_STATS_OWNED() || PF_RULES_WOWNED());
1960 
1961 	if (recurse) {
1962 		pfr_enqueue_addrs(kt, &addrq, NULL, 0);
1963 		pfr_clstats_kentries(kt, &addrq, tzero, 0);
1964 	}
1965 	for (pfr_dir = 0; pfr_dir < PFR_DIR_MAX; pfr_dir ++) {
1966 		for (pfr_op = 0; pfr_op < PFR_OP_TABLE_MAX; pfr_op ++) {
1967 			pfr_kstate_counter_zero(&kt->pfrkt_packets[pfr_dir][pfr_op]);
1968 			pfr_kstate_counter_zero(&kt->pfrkt_bytes[pfr_dir][pfr_op]);
1969 		}
1970 	}
1971 	pfr_kstate_counter_zero(&kt->pfrkt_match);
1972 	pfr_kstate_counter_zero(&kt->pfrkt_nomatch);
1973 	kt->pfrkt_tzero = tzero;
1974 }
1975 
1976 static struct pfr_ktable *
pfr_create_ktable(struct pfr_table * tbl,time_t tzero,int attachruleset)1977 pfr_create_ktable(struct pfr_table *tbl, time_t tzero, int attachruleset)
1978 {
1979 	struct pfr_ktable	*kt;
1980 	struct pf_kruleset	*rs;
1981 	int			 pfr_dir, pfr_op;
1982 
1983 	PF_RULES_WASSERT();
1984 
1985 	kt = malloc(sizeof(*kt), M_PFTABLE, M_NOWAIT|M_ZERO);
1986 	if (kt == NULL)
1987 		return (NULL);
1988 	kt->pfrkt_t = *tbl;
1989 
1990 	if (attachruleset) {
1991 		rs = pf_find_or_create_kruleset(tbl->pfrt_anchor);
1992 		if (!rs) {
1993 			pfr_destroy_ktable(kt, 0);
1994 			return (NULL);
1995 		}
1996 		kt->pfrkt_rs = rs;
1997 		rs->tables++;
1998 	}
1999 
2000 	for (pfr_dir = 0; pfr_dir < PFR_DIR_MAX; pfr_dir ++) {
2001 		for (pfr_op = 0; pfr_op < PFR_OP_TABLE_MAX; pfr_op ++) {
2002 			if (pfr_kstate_counter_init(
2003 			    &kt->pfrkt_packets[pfr_dir][pfr_op], M_NOWAIT) != 0) {
2004 				pfr_destroy_ktable(kt, 0);
2005 				return (NULL);
2006 			}
2007 			if (pfr_kstate_counter_init(
2008 			    &kt->pfrkt_bytes[pfr_dir][pfr_op], M_NOWAIT) != 0) {
2009 				pfr_destroy_ktable(kt, 0);
2010 				return (NULL);
2011 			}
2012 		}
2013 	}
2014 	if (pfr_kstate_counter_init(&kt->pfrkt_match, M_NOWAIT) != 0) {
2015 		pfr_destroy_ktable(kt, 0);
2016 		return (NULL);
2017 	}
2018 
2019 	if (pfr_kstate_counter_init(&kt->pfrkt_nomatch, M_NOWAIT) != 0) {
2020 		pfr_destroy_ktable(kt, 0);
2021 		return (NULL);
2022 	}
2023 
2024 	if (!rn_inithead((void **)&kt->pfrkt_ip4,
2025 	    offsetof(struct sockaddr_in, sin_addr) * 8) ||
2026 	    !rn_inithead((void **)&kt->pfrkt_ip6,
2027 	    offsetof(struct sockaddr_in6, sin6_addr) * 8)) {
2028 		pfr_destroy_ktable(kt, 0);
2029 		return (NULL);
2030 	}
2031 	kt->pfrkt_tzero = tzero;
2032 
2033 	return (kt);
2034 }
2035 
2036 static void
pfr_destroy_ktables(struct pfr_ktableworkq * workq,int flushaddr)2037 pfr_destroy_ktables(struct pfr_ktableworkq *workq, int flushaddr)
2038 {
2039 	struct pfr_ktable	*p;
2040 
2041 	while ((p = SLIST_FIRST(workq)) != NULL) {
2042 		SLIST_REMOVE_HEAD(workq, pfrkt_workq);
2043 		pfr_destroy_ktable(p, flushaddr);
2044 	}
2045 }
2046 
2047 static void
pfr_destroy_ktable(struct pfr_ktable * kt,int flushaddr)2048 pfr_destroy_ktable(struct pfr_ktable *kt, int flushaddr)
2049 {
2050 	struct pfr_kentryworkq	 addrq;
2051 	int			 pfr_dir, pfr_op;
2052 
2053 	if (flushaddr) {
2054 		pfr_enqueue_addrs(kt, &addrq, NULL, 0);
2055 		pfr_clean_node_mask(kt, &addrq);
2056 		pfr_destroy_kentries(&addrq);
2057 	}
2058 	if (kt->pfrkt_ip4 != NULL)
2059 		rn_detachhead((void **)&kt->pfrkt_ip4);
2060 	if (kt->pfrkt_ip6 != NULL)
2061 		rn_detachhead((void **)&kt->pfrkt_ip6);
2062 	if (kt->pfrkt_shadow != NULL)
2063 		pfr_destroy_ktable(kt->pfrkt_shadow, flushaddr);
2064 	if (kt->pfrkt_rs != NULL) {
2065 		kt->pfrkt_rs->tables--;
2066 		pf_remove_if_empty_kruleset(kt->pfrkt_rs);
2067 	}
2068 	for (pfr_dir = 0; pfr_dir < PFR_DIR_MAX; pfr_dir ++) {
2069 		for (pfr_op = 0; pfr_op < PFR_OP_TABLE_MAX; pfr_op ++) {
2070 			pfr_kstate_counter_deinit(&kt->pfrkt_packets[pfr_dir][pfr_op]);
2071 			pfr_kstate_counter_deinit(&kt->pfrkt_bytes[pfr_dir][pfr_op]);
2072 		}
2073 	}
2074 	pfr_kstate_counter_deinit(&kt->pfrkt_match);
2075 	pfr_kstate_counter_deinit(&kt->pfrkt_nomatch);
2076 
2077 	free(kt, M_PFTABLE);
2078 }
2079 
2080 static int
pfr_ktable_compare(struct pfr_ktable * p,struct pfr_ktable * q)2081 pfr_ktable_compare(struct pfr_ktable *p, struct pfr_ktable *q)
2082 {
2083 	int d;
2084 
2085 	if ((d = strncmp(p->pfrkt_name, q->pfrkt_name, PF_TABLE_NAME_SIZE)))
2086 		return (d);
2087 	return (strcmp(p->pfrkt_anchor, q->pfrkt_anchor));
2088 }
2089 
2090 static struct pfr_ktable *
pfr_lookup_table(struct pfr_table * tbl)2091 pfr_lookup_table(struct pfr_table *tbl)
2092 {
2093 	/* struct pfr_ktable start like a struct pfr_table */
2094 	return (RB_FIND(pfr_ktablehead, &V_pfr_ktables,
2095 	    (struct pfr_ktable *)tbl));
2096 }
2097 
2098 struct pfr_kentry *
pfr_kentry_byaddr(struct pfr_ktable * kt,struct pf_addr * a,sa_family_t af,int exact)2099 pfr_kentry_byaddr(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af,
2100     int exact)
2101 {
2102 	struct pfr_kentry	*ke = NULL;
2103 
2104 	PF_RULES_RASSERT();
2105 
2106 	kt = pfr_ktable_select_active(kt);
2107 	if (kt == NULL)
2108 		return (0);
2109 
2110 	switch (af) {
2111 #ifdef INET
2112 	case AF_INET:
2113 	    {
2114 		struct sockaddr_in sin;
2115 
2116 		bzero(&sin, sizeof(sin));
2117 		sin.sin_len = sizeof(sin);
2118 		sin.sin_family = AF_INET;
2119 		sin.sin_addr.s_addr = a->addr32[0];
2120 		ke = (struct pfr_kentry *)rn_match(&sin, &kt->pfrkt_ip4->rh);
2121 		if (ke && KENTRY_RNF_ROOT(ke))
2122 			ke = NULL;
2123 		break;
2124 	    }
2125 #endif /* INET */
2126 #ifdef INET6
2127 	case AF_INET6:
2128 	    {
2129 		struct sockaddr_in6 sin6;
2130 
2131 		bzero(&sin6, sizeof(sin6));
2132 		sin6.sin6_len = sizeof(sin6);
2133 		sin6.sin6_family = AF_INET6;
2134 		bcopy(a, &sin6.sin6_addr, sizeof(sin6.sin6_addr));
2135 		ke = (struct pfr_kentry *)rn_match(&sin6, &kt->pfrkt_ip6->rh);
2136 		if (ke && KENTRY_RNF_ROOT(ke))
2137 			ke = NULL;
2138 		break;
2139 	    }
2140 #endif /* INET6 */
2141 	default:
2142 		unhandled_af(af);
2143 	}
2144 	if (exact && ke && KENTRY_NETWORK(ke))
2145 		ke = NULL;
2146 
2147 	return (ke);
2148 }
2149 
2150 int
pfr_match_addr(struct pfr_ktable * kt,struct pf_addr * a,sa_family_t af)2151 pfr_match_addr(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af)
2152 {
2153 	struct pfr_kentry	*ke = NULL;
2154 	int match;
2155 
2156 	ke = pfr_kentry_byaddr(kt, a, af, 0);
2157 
2158 	match = (ke && !ke->pfrke_not);
2159 	if (match)
2160 		pfr_kstate_counter_add(&kt->pfrkt_match, 1);
2161 	else
2162 		pfr_kstate_counter_add(&kt->pfrkt_nomatch, 1);
2163 
2164 	return (match);
2165 }
2166 
2167 void
pfr_update_stats(struct pfr_ktable * kt,struct pf_addr * a,sa_family_t af,u_int64_t len,int dir_out,int op_pass,int notrule)2168 pfr_update_stats(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af,
2169     u_int64_t len, int dir_out, int op_pass, int notrule)
2170 {
2171 	struct pfr_kentry	*ke = NULL;
2172 
2173 	kt = pfr_ktable_select_active(kt);
2174 	if (kt == NULL)
2175 		return;
2176 
2177 	switch (af) {
2178 #ifdef INET
2179 	case AF_INET:
2180 	    {
2181 		struct sockaddr_in sin;
2182 
2183 		bzero(&sin, sizeof(sin));
2184 		sin.sin_len = sizeof(sin);
2185 		sin.sin_family = AF_INET;
2186 		sin.sin_addr.s_addr = a->addr32[0];
2187 		ke = (struct pfr_kentry *)rn_match(&sin, &kt->pfrkt_ip4->rh);
2188 		if (ke && KENTRY_RNF_ROOT(ke))
2189 			ke = NULL;
2190 		break;
2191 	    }
2192 #endif /* INET */
2193 #ifdef INET6
2194 	case AF_INET6:
2195 	    {
2196 		struct sockaddr_in6 sin6;
2197 
2198 		bzero(&sin6, sizeof(sin6));
2199 		sin6.sin6_len = sizeof(sin6);
2200 		sin6.sin6_family = AF_INET6;
2201 		bcopy(a, &sin6.sin6_addr, sizeof(sin6.sin6_addr));
2202 		ke = (struct pfr_kentry *)rn_match(&sin6, &kt->pfrkt_ip6->rh);
2203 		if (ke && KENTRY_RNF_ROOT(ke))
2204 			ke = NULL;
2205 		break;
2206 	    }
2207 #endif /* INET6 */
2208 	default:
2209 		unhandled_af(af);
2210 	}
2211 	if ((ke == NULL || ke->pfrke_not) != notrule) {
2212 		if (op_pass != PFR_OP_PASS)
2213 			DPFPRINTF(PF_DEBUG_URGENT,
2214 			    "pfr_update_stats: assertion failed.");
2215 		op_pass = PFR_OP_XPASS;
2216 	}
2217 	pfr_kstate_counter_add(&kt->pfrkt_packets[dir_out][op_pass], 1);
2218 	pfr_kstate_counter_add(&kt->pfrkt_bytes[dir_out][op_pass], len);
2219 	if (ke != NULL && op_pass != PFR_OP_XPASS &&
2220 	    (kt->pfrkt_flags & PFR_TFLAG_COUNTERS)) {
2221 		counter_u64_add(pfr_kentry_counter(&ke->pfrke_counters,
2222 		    dir_out, op_pass, PFR_TYPE_PACKETS), 1);
2223 		counter_u64_add(pfr_kentry_counter(&ke->pfrke_counters,
2224 		    dir_out, op_pass, PFR_TYPE_BYTES), len);
2225 	}
2226 }
2227 
2228 struct pfr_ktable *
pfr_eth_attach_table(struct pf_keth_ruleset * rs,char * name)2229 pfr_eth_attach_table(struct pf_keth_ruleset *rs, char *name)
2230 {
2231 	struct pfr_ktable	*kt, *rt;
2232 	struct pfr_table	 tbl;
2233 	struct pf_keth_anchor	*ac = rs->anchor;
2234 
2235 	PF_RULES_WASSERT();
2236 
2237 	bzero(&tbl, sizeof(tbl));
2238 	strlcpy(tbl.pfrt_name, name, sizeof(tbl.pfrt_name));
2239 	if (ac != NULL)
2240 		strlcpy(tbl.pfrt_anchor, ac->path, sizeof(tbl.pfrt_anchor));
2241 	kt = pfr_lookup_table(&tbl);
2242 	if (kt == NULL) {
2243 		kt = pfr_create_ktable(&tbl, time_second, 1);
2244 		if (kt == NULL)
2245 			return (NULL);
2246 		if (ac != NULL) {
2247 			bzero(tbl.pfrt_anchor, sizeof(tbl.pfrt_anchor));
2248 			rt = pfr_lookup_table(&tbl);
2249 			if (rt == NULL) {
2250 				rt = pfr_create_ktable(&tbl, 0, 1);
2251 				if (rt == NULL) {
2252 					pfr_destroy_ktable(kt, 0);
2253 					return (NULL);
2254 				}
2255 				pfr_insert_ktable(rt);
2256 			}
2257 			kt->pfrkt_root = rt;
2258 		}
2259 		pfr_insert_ktable(kt);
2260 	}
2261 	if (!kt->pfrkt_refcnt[PFR_REFCNT_RULE]++)
2262 		pfr_setflags_ktable(kt, kt->pfrkt_flags|PFR_TFLAG_REFERENCED);
2263 	return (kt);
2264 }
2265 
2266 struct pfr_ktable *
pfr_attach_table(struct pf_kruleset * rs,char * name)2267 pfr_attach_table(struct pf_kruleset *rs, char *name)
2268 {
2269 	struct pfr_ktable	*kt, *rt;
2270 	struct pfr_table	 tbl;
2271 	struct pf_kanchor	*ac = rs->anchor;
2272 
2273 	PF_RULES_WASSERT();
2274 
2275 	bzero(&tbl, sizeof(tbl));
2276 	strlcpy(tbl.pfrt_name, name, sizeof(tbl.pfrt_name));
2277 	if (ac != NULL)
2278 		strlcpy(tbl.pfrt_anchor, ac->path, sizeof(tbl.pfrt_anchor));
2279 	kt = pfr_lookup_table(&tbl);
2280 	if (kt == NULL) {
2281 		kt = pfr_create_ktable(&tbl, time_second, 1);
2282 		if (kt == NULL)
2283 			return (NULL);
2284 		if (ac != NULL) {
2285 			bzero(tbl.pfrt_anchor, sizeof(tbl.pfrt_anchor));
2286 			rt = pfr_lookup_table(&tbl);
2287 			if (rt == NULL) {
2288 				rt = pfr_create_ktable(&tbl, 0, 1);
2289 				if (rt == NULL) {
2290 					pfr_destroy_ktable(kt, 0);
2291 					return (NULL);
2292 				}
2293 				pfr_insert_ktable(rt);
2294 			}
2295 			kt->pfrkt_root = rt;
2296 		}
2297 		pfr_insert_ktable(kt);
2298 	}
2299 	if (!kt->pfrkt_refcnt[PFR_REFCNT_RULE]++)
2300 		pfr_setflags_ktable(kt, kt->pfrkt_flags|PFR_TFLAG_REFERENCED);
2301 	return (kt);
2302 }
2303 
2304 void
pfr_detach_table(struct pfr_ktable * kt)2305 pfr_detach_table(struct pfr_ktable *kt)
2306 {
2307 
2308 	PF_RULES_WASSERT();
2309 	KASSERT(kt->pfrkt_refcnt[PFR_REFCNT_RULE] > 0, ("%s: refcount %d\n",
2310 	    __func__, kt->pfrkt_refcnt[PFR_REFCNT_RULE]));
2311 
2312 	if (!--kt->pfrkt_refcnt[PFR_REFCNT_RULE])
2313 		pfr_setflags_ktable(kt, kt->pfrkt_flags&~PFR_TFLAG_REFERENCED);
2314 }
2315 
2316 int
pfr_pool_get(struct pfr_ktable * kt,int * pidx,struct pf_addr * counter,sa_family_t af,pf_addr_filter_func_t filter,bool loop_once)2317 pfr_pool_get(struct pfr_ktable *kt, int *pidx, struct pf_addr *counter,
2318     sa_family_t af, pf_addr_filter_func_t filter, bool loop_once)
2319 {
2320 	struct pf_addr		*addr, cur, mask, umask_addr;
2321 	union sockaddr_union	 uaddr, umask;
2322 	struct pfr_kentry	*ke, *ke2 = NULL;
2323 	int			 startidx, idx = -1, loop = 0, use_counter = 0;
2324 
2325 	MPASS(pidx != NULL);
2326 	MPASS(counter != NULL);
2327 
2328 	switch (af) {
2329 	case AF_INET:
2330 		uaddr.sin.sin_len = sizeof(struct sockaddr_in);
2331 		uaddr.sin.sin_family = AF_INET;
2332 		addr = (struct pf_addr *)&uaddr.sin.sin_addr;
2333 		break;
2334 	case AF_INET6:
2335 		uaddr.sin6.sin6_len = sizeof(struct sockaddr_in6);
2336 		uaddr.sin6.sin6_family = AF_INET6;
2337 		addr = (struct pf_addr *)&uaddr.sin6.sin6_addr;
2338 		break;
2339 	default:
2340 		unhandled_af(af);
2341 	}
2342 
2343 	kt = pfr_ktable_select_active(kt);
2344 	if (kt == NULL)
2345 		return (-1);
2346 
2347 	idx = *pidx;
2348 	if (idx < 0 || idx >= kt->pfrkt_cnt)
2349 		idx = 0;
2350 	else if (counter != NULL)
2351 		use_counter = 1;
2352 	startidx = idx;
2353 
2354 _next_block:
2355 	if (loop && startidx == idx) {
2356 		pfr_kstate_counter_add(&kt->pfrkt_nomatch, 1);
2357 		return (1);
2358 	}
2359 
2360 	ke = pfr_kentry_byidx(kt, idx, af);
2361 	if (ke == NULL) {
2362 		/* we don't have this idx, try looping */
2363 		if ((loop || loop_once) || (ke = pfr_kentry_byidx(kt, 0, af)) == NULL) {
2364 			pfr_kstate_counter_add(&kt->pfrkt_nomatch, 1);
2365 			return (1);
2366 		}
2367 		idx = 0;
2368 		loop++;
2369 	}
2370 	pfr_prepare_network(&umask, af, ke->pfrke_net);
2371 	pfr_sockaddr_to_pf_addr(&ke->pfrke_sa, &cur);
2372 	pfr_sockaddr_to_pf_addr(&umask, &mask);
2373 
2374 	if (use_counter && !PF_AZERO(counter, af)) {
2375 		/* is supplied address within block? */
2376 		if (!pf_match_addr(0, &cur, &mask, counter, af)) {
2377 			/* no, go to next block in table */
2378 			idx++;
2379 			use_counter = 0;
2380 			goto _next_block;
2381 		}
2382 		pf_addrcpy(addr, counter, af);
2383 	} else {
2384 		/* use first address of block */
2385 		pf_addrcpy(addr, &cur, af);
2386 	}
2387 
2388 	if (!KENTRY_NETWORK(ke)) {
2389 		/* this is a single IP address - no possible nested block */
2390 		if (filter && filter(af, addr)) {
2391 			idx++;
2392 			goto _next_block;
2393 		}
2394 		pf_addrcpy(counter, addr, af);
2395 		*pidx = idx;
2396 		pfr_kstate_counter_add(&kt->pfrkt_match, 1);
2397 		return (0);
2398 	}
2399 	for (;;) {
2400 		/* we don't want to use a nested block */
2401 		switch (af) {
2402 		case AF_INET:
2403 			ke2 = (struct pfr_kentry *)rn_match(&uaddr,
2404 			    &kt->pfrkt_ip4->rh);
2405 			break;
2406 		case AF_INET6:
2407 			ke2 = (struct pfr_kentry *)rn_match(&uaddr,
2408 			    &kt->pfrkt_ip6->rh);
2409 			break;
2410 		default:
2411 			unhandled_af(af);
2412 		}
2413 		/* no need to check KENTRY_RNF_ROOT() here */
2414 		if (ke2 == ke) {
2415 			/* lookup return the same block - perfect */
2416 			if (filter && filter(af, addr))
2417 				goto _next_entry;
2418 			pf_addrcpy(counter, addr, af);
2419 			*pidx = idx;
2420 			pfr_kstate_counter_add(&kt->pfrkt_match, 1);
2421 			return (0);
2422 		}
2423 
2424 _next_entry:
2425 		/* we need to increase the counter past the nested block */
2426 		pfr_prepare_network(&umask, AF_INET, ke2->pfrke_net);
2427 		pfr_sockaddr_to_pf_addr(&umask, &umask_addr);
2428 		pf_poolmask(addr, addr, &umask_addr, &pfr_ffaddr, af);
2429 		pf_addr_inc(addr, af);
2430 		if (!pf_match_addr(0, &cur, &mask, addr, af)) {
2431 			/* ok, we reached the end of our main block */
2432 			/* go to next block in table */
2433 			idx++;
2434 			use_counter = 0;
2435 			goto _next_block;
2436 		}
2437 	}
2438 }
2439 
2440 static struct pfr_kentry *
pfr_kentry_byidx(struct pfr_ktable * kt,int idx,int af)2441 pfr_kentry_byidx(struct pfr_ktable *kt, int idx, int af)
2442 {
2443 	struct pfr_walktree	w;
2444 
2445 	bzero(&w, sizeof(w));
2446 	w.pfrw_op = PFRW_POOL_GET;
2447 	w.pfrw_free = idx;
2448 
2449 	switch (af) {
2450 #ifdef INET
2451 	case AF_INET:
2452 		kt->pfrkt_ip4->rnh_walktree(&kt->pfrkt_ip4->rh, pfr_walktree, &w);
2453 		return (w.pfrw_kentry);
2454 #endif /* INET */
2455 #ifdef INET6
2456 	case AF_INET6:
2457 		kt->pfrkt_ip6->rnh_walktree(&kt->pfrkt_ip6->rh, pfr_walktree, &w);
2458 		return (w.pfrw_kentry);
2459 #endif /* INET6 */
2460 	default:
2461 		return (NULL);
2462 	}
2463 }
2464 
2465 void
pfr_dynaddr_update(struct pfr_ktable * kt,struct pfi_dynaddr * dyn)2466 pfr_dynaddr_update(struct pfr_ktable *kt, struct pfi_dynaddr *dyn)
2467 {
2468 	struct pfr_walktree	w;
2469 
2470 	bzero(&w, sizeof(w));
2471 	w.pfrw_op = PFRW_DYNADDR_UPDATE;
2472 	w.pfrw_dyn = dyn;
2473 
2474 	dyn->pfid_acnt4 = 0;
2475 	dyn->pfid_acnt6 = 0;
2476 	switch (dyn->pfid_af) {
2477 	case AF_UNSPEC: /* look up all both addresses IPv4 + IPv6 */
2478 		kt->pfrkt_ip4->rnh_walktree(&kt->pfrkt_ip4->rh, pfr_walktree, &w);
2479 		kt->pfrkt_ip6->rnh_walktree(&kt->pfrkt_ip6->rh, pfr_walktree, &w);
2480 		break;
2481 	case AF_INET:
2482 		kt->pfrkt_ip4->rnh_walktree(&kt->pfrkt_ip4->rh, pfr_walktree, &w);
2483 		break;
2484 	case AF_INET6:
2485 		kt->pfrkt_ip6->rnh_walktree(&kt->pfrkt_ip6->rh, pfr_walktree, &w);
2486 		break;
2487 	default:
2488 		unhandled_af(dyn->pfid_af);
2489 	}
2490 }
2491 
2492 struct pfr_ktable *
pfr_ktable_select_active(struct pfr_ktable * kt)2493 pfr_ktable_select_active(struct pfr_ktable *kt)
2494 {
2495 	if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
2496 		kt = kt->pfrkt_root;
2497 	if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
2498 		return (NULL);
2499 
2500 	return (kt);
2501 }
2502