xref: /freebsd/sys/netpfil/pf/pf_table.c (revision 2e5b60079b7d8c3ca68f1390cd90f305e651f8d3)
1 /*-
2  * Copyright (c) 2002 Cedric Berger
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  *
9  *    - Redistributions of source code must retain the above copyright
10  *      notice, this list of conditions and the following disclaimer.
11  *    - Redistributions in binary form must reproduce the above
12  *      copyright notice, this list of conditions and the following
13  *      disclaimer in the documentation and/or other materials provided
14  *      with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
19  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
20  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
21  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
22  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
23  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
24  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
26  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27  * POSSIBILITY OF SUCH DAMAGE.
28  *
29  *	$OpenBSD: pf_table.c,v 1.79 2008/10/08 06:24:50 mcbride Exp $
30  */
31 
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
34 
35 #include "opt_inet.h"
36 #include "opt_inet6.h"
37 
38 #include <sys/param.h>
39 #include <sys/kernel.h>
40 #include <sys/lock.h>
41 #include <sys/malloc.h>
42 #include <sys/mbuf.h>
43 #include <sys/mutex.h>
44 #include <sys/refcount.h>
45 #include <sys/rwlock.h>
46 #include <sys/socket.h>
47 #include <vm/uma.h>
48 
49 #include <net/if.h>
50 #include <net/vnet.h>
51 #include <net/pfvar.h>
52 
53 #define	ACCEPT_FLAGS(flags, oklist)		\
54 	do {					\
55 		if ((flags & ~(oklist)) &	\
56 		    PFR_FLAG_ALLMASK)		\
57 			return (EINVAL);	\
58 	} while (0)
59 
60 #define	FILLIN_SIN(sin, addr)			\
61 	do {					\
62 		(sin).sin_len = sizeof(sin);	\
63 		(sin).sin_family = AF_INET;	\
64 		(sin).sin_addr = (addr);	\
65 	} while (0)
66 
67 #define	FILLIN_SIN6(sin6, addr)			\
68 	do {					\
69 		(sin6).sin6_len = sizeof(sin6);	\
70 		(sin6).sin6_family = AF_INET6;	\
71 		(sin6).sin6_addr = (addr);	\
72 	} while (0)
73 
74 #define	SWAP(type, a1, a2)			\
75 	do {					\
76 		type tmp = a1;			\
77 		a1 = a2;			\
78 		a2 = tmp;			\
79 	} while (0)
80 
81 #define	SUNION2PF(su, af) (((af)==AF_INET) ?	\
82     (struct pf_addr *)&(su)->sin.sin_addr :	\
83     (struct pf_addr *)&(su)->sin6.sin6_addr)
84 
85 #define	AF_BITS(af)		(((af)==AF_INET)?32:128)
86 #define	ADDR_NETWORK(ad)	((ad)->pfra_net < AF_BITS((ad)->pfra_af))
87 #define	KENTRY_NETWORK(ke)	((ke)->pfrke_net < AF_BITS((ke)->pfrke_af))
88 #define	KENTRY_RNF_ROOT(ke) \
89 		((((struct radix_node *)(ke))->rn_flags & RNF_ROOT) != 0)
90 
91 #define	NO_ADDRESSES		(-1)
92 #define	ENQUEUE_UNMARKED_ONLY	(1)
93 #define	INVERT_NEG_FLAG		(1)
94 
95 struct pfr_walktree {
96 	enum pfrw_op {
97 		PFRW_MARK,
98 		PFRW_SWEEP,
99 		PFRW_ENQUEUE,
100 		PFRW_GET_ADDRS,
101 		PFRW_GET_ASTATS,
102 		PFRW_POOL_GET,
103 		PFRW_DYNADDR_UPDATE
104 	}	 pfrw_op;
105 	union {
106 		struct pfr_addr		*pfrw1_addr;
107 		struct pfr_astats	*pfrw1_astats;
108 		struct pfr_kentryworkq	*pfrw1_workq;
109 		struct pfr_kentry	*pfrw1_kentry;
110 		struct pfi_dynaddr	*pfrw1_dyn;
111 	}	 pfrw_1;
112 	int	 pfrw_free;
113 };
114 #define	pfrw_addr	pfrw_1.pfrw1_addr
115 #define	pfrw_astats	pfrw_1.pfrw1_astats
116 #define	pfrw_workq	pfrw_1.pfrw1_workq
117 #define	pfrw_kentry	pfrw_1.pfrw1_kentry
118 #define	pfrw_dyn	pfrw_1.pfrw1_dyn
119 #define	pfrw_cnt	pfrw_free
120 
121 #define	senderr(e)	do { rv = (e); goto _bad; } while (0)
122 
123 static MALLOC_DEFINE(M_PFTABLE, "pf_table", "pf(4) tables structures");
124 static VNET_DEFINE(uma_zone_t, pfr_kentry_z);
125 #define	V_pfr_kentry_z		VNET(pfr_kentry_z)
126 static VNET_DEFINE(uma_zone_t, pfr_kcounters_z);
127 #define	V_pfr_kcounters_z	VNET(pfr_kcounters_z)
128 
129 static struct pf_addr	 pfr_ffaddr = {
130 	.addr32 = { 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff }
131 };
132 
133 static void		 pfr_copyout_addr(struct pfr_addr *,
134 			    struct pfr_kentry *ke);
135 static int		 pfr_validate_addr(struct pfr_addr *);
136 static void		 pfr_enqueue_addrs(struct pfr_ktable *,
137 			    struct pfr_kentryworkq *, int *, int);
138 static void		 pfr_mark_addrs(struct pfr_ktable *);
139 static struct pfr_kentry
140 			*pfr_lookup_addr(struct pfr_ktable *,
141 			    struct pfr_addr *, int);
142 static struct pfr_kentry *pfr_create_kentry(struct pfr_addr *);
143 static void		 pfr_destroy_kentries(struct pfr_kentryworkq *);
144 static void		 pfr_destroy_kentry(struct pfr_kentry *);
145 static void		 pfr_insert_kentries(struct pfr_ktable *,
146 			    struct pfr_kentryworkq *, long);
147 static void		 pfr_remove_kentries(struct pfr_ktable *,
148 			    struct pfr_kentryworkq *);
149 static void		 pfr_clstats_kentries(struct pfr_kentryworkq *, long,
150 			    int);
151 static void		 pfr_reset_feedback(struct pfr_addr *, int);
152 static void		 pfr_prepare_network(union sockaddr_union *, int, int);
153 static int		 pfr_route_kentry(struct pfr_ktable *,
154 			    struct pfr_kentry *);
155 static int		 pfr_unroute_kentry(struct pfr_ktable *,
156 			    struct pfr_kentry *);
157 static int		 pfr_walktree(struct radix_node *, void *);
158 static int		 pfr_validate_table(struct pfr_table *, int, int);
159 static int		 pfr_fix_anchor(char *);
160 static void		 pfr_commit_ktable(struct pfr_ktable *, long);
161 static void		 pfr_insert_ktables(struct pfr_ktableworkq *);
162 static void		 pfr_insert_ktable(struct pfr_ktable *);
163 static void		 pfr_setflags_ktables(struct pfr_ktableworkq *);
164 static void		 pfr_setflags_ktable(struct pfr_ktable *, int);
165 static void		 pfr_clstats_ktables(struct pfr_ktableworkq *, long,
166 			    int);
167 static void		 pfr_clstats_ktable(struct pfr_ktable *, long, int);
168 static struct pfr_ktable
169 			*pfr_create_ktable(struct pfr_table *, long, int);
170 static void		 pfr_destroy_ktables(struct pfr_ktableworkq *, int);
171 static void		 pfr_destroy_ktable(struct pfr_ktable *, int);
172 static int		 pfr_ktable_compare(struct pfr_ktable *,
173 			    struct pfr_ktable *);
174 static struct pfr_ktable
175 			*pfr_lookup_table(struct pfr_table *);
176 static void		 pfr_clean_node_mask(struct pfr_ktable *,
177 			    struct pfr_kentryworkq *);
178 static int		 pfr_table_count(struct pfr_table *, int);
179 static int		 pfr_skip_table(struct pfr_table *,
180 			    struct pfr_ktable *, int);
181 static struct pfr_kentry
182 			*pfr_kentry_byidx(struct pfr_ktable *, int, int);
183 
184 static RB_PROTOTYPE(pfr_ktablehead, pfr_ktable, pfrkt_tree, pfr_ktable_compare);
185 static RB_GENERATE(pfr_ktablehead, pfr_ktable, pfrkt_tree, pfr_ktable_compare);
186 
187 VNET_DEFINE(struct pfr_ktablehead, pfr_ktables);
188 #define V_pfr_ktables		VNET(pfr_ktables)
189 
190 struct pfr_table	 pfr_nulltable;
191 
192 VNET_DEFINE(int, pfr_ktable_cnt);
193 #define V_pfr_ktable_cnt	VNET(pfr_ktable_cnt)
194 
195 void
196 pfr_initialize(void)
197 {
198 
199 	V_pfr_kentry_z = uma_zcreate("pf table entries",
200 	    sizeof(struct pfr_kentry), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
201 	    0);
202 	V_pfr_kcounters_z = uma_zcreate("pf table counters",
203 	    sizeof(struct pfr_kcounters), NULL, NULL, NULL, NULL,
204 	    UMA_ALIGN_PTR, 0);
205 	V_pf_limits[PF_LIMIT_TABLE_ENTRIES].zone = V_pfr_kentry_z;
206 	V_pf_limits[PF_LIMIT_TABLE_ENTRIES].limit = PFR_KENTRY_HIWAT;
207 }
208 
209 void
210 pfr_cleanup(void)
211 {
212 
213 	uma_zdestroy(V_pfr_kentry_z);
214 	uma_zdestroy(V_pfr_kcounters_z);
215 }
216 
217 int
218 pfr_clr_addrs(struct pfr_table *tbl, int *ndel, int flags)
219 {
220 	struct pfr_ktable	*kt;
221 	struct pfr_kentryworkq	 workq;
222 
223 	PF_RULES_WASSERT();
224 
225 	ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
226 	if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL))
227 		return (EINVAL);
228 	kt = pfr_lookup_table(tbl);
229 	if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
230 		return (ESRCH);
231 	if (kt->pfrkt_flags & PFR_TFLAG_CONST)
232 		return (EPERM);
233 	pfr_enqueue_addrs(kt, &workq, ndel, 0);
234 
235 	if (!(flags & PFR_FLAG_DUMMY)) {
236 		pfr_remove_kentries(kt, &workq);
237 		KASSERT(kt->pfrkt_cnt == 0, ("%s: non-null pfrkt_cnt", __func__));
238 	}
239 	return (0);
240 }
241 
242 int
243 pfr_add_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
244     int *nadd, int flags)
245 {
246 	struct pfr_ktable	*kt, *tmpkt;
247 	struct pfr_kentryworkq	 workq;
248 	struct pfr_kentry	*p, *q;
249 	struct pfr_addr		*ad;
250 	int			 i, rv, xadd = 0;
251 	long			 tzero = time_second;
252 
253 	PF_RULES_WASSERT();
254 
255 	ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_FEEDBACK);
256 	if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL))
257 		return (EINVAL);
258 	kt = pfr_lookup_table(tbl);
259 	if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
260 		return (ESRCH);
261 	if (kt->pfrkt_flags & PFR_TFLAG_CONST)
262 		return (EPERM);
263 	tmpkt = pfr_create_ktable(&pfr_nulltable, 0, 0);
264 	if (tmpkt == NULL)
265 		return (ENOMEM);
266 	SLIST_INIT(&workq);
267 	for (i = 0, ad = addr; i < size; i++, ad++) {
268 		if (pfr_validate_addr(ad))
269 			senderr(EINVAL);
270 		p = pfr_lookup_addr(kt, ad, 1);
271 		q = pfr_lookup_addr(tmpkt, ad, 1);
272 		if (flags & PFR_FLAG_FEEDBACK) {
273 			if (q != NULL)
274 				ad->pfra_fback = PFR_FB_DUPLICATE;
275 			else if (p == NULL)
276 				ad->pfra_fback = PFR_FB_ADDED;
277 			else if (p->pfrke_not != ad->pfra_not)
278 				ad->pfra_fback = PFR_FB_CONFLICT;
279 			else
280 				ad->pfra_fback = PFR_FB_NONE;
281 		}
282 		if (p == NULL && q == NULL) {
283 			p = pfr_create_kentry(ad);
284 			if (p == NULL)
285 				senderr(ENOMEM);
286 			if (pfr_route_kentry(tmpkt, p)) {
287 				pfr_destroy_kentry(p);
288 				ad->pfra_fback = PFR_FB_NONE;
289 			} else {
290 				SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
291 				xadd++;
292 			}
293 		}
294 	}
295 	pfr_clean_node_mask(tmpkt, &workq);
296 	if (!(flags & PFR_FLAG_DUMMY))
297 		pfr_insert_kentries(kt, &workq, tzero);
298 	else
299 		pfr_destroy_kentries(&workq);
300 	if (nadd != NULL)
301 		*nadd = xadd;
302 	pfr_destroy_ktable(tmpkt, 0);
303 	return (0);
304 _bad:
305 	pfr_clean_node_mask(tmpkt, &workq);
306 	pfr_destroy_kentries(&workq);
307 	if (flags & PFR_FLAG_FEEDBACK)
308 		pfr_reset_feedback(addr, size);
309 	pfr_destroy_ktable(tmpkt, 0);
310 	return (rv);
311 }
312 
313 int
314 pfr_del_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
315     int *ndel, int flags)
316 {
317 	struct pfr_ktable	*kt;
318 	struct pfr_kentryworkq	 workq;
319 	struct pfr_kentry	*p;
320 	struct pfr_addr		*ad;
321 	int			 i, rv, xdel = 0, log = 1;
322 
323 	PF_RULES_WASSERT();
324 
325 	ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_FEEDBACK);
326 	if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL))
327 		return (EINVAL);
328 	kt = pfr_lookup_table(tbl);
329 	if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
330 		return (ESRCH);
331 	if (kt->pfrkt_flags & PFR_TFLAG_CONST)
332 		return (EPERM);
333 	/*
334 	 * there are two algorithms to choose from here.
335 	 * with:
336 	 *   n: number of addresses to delete
337 	 *   N: number of addresses in the table
338 	 *
339 	 * one is O(N) and is better for large 'n'
340 	 * one is O(n*LOG(N)) and is better for small 'n'
341 	 *
342 	 * following code try to decide which one is best.
343 	 */
344 	for (i = kt->pfrkt_cnt; i > 0; i >>= 1)
345 		log++;
346 	if (size > kt->pfrkt_cnt/log) {
347 		/* full table scan */
348 		pfr_mark_addrs(kt);
349 	} else {
350 		/* iterate over addresses to delete */
351 		for (i = 0, ad = addr; i < size; i++, ad++) {
352 			if (pfr_validate_addr(ad))
353 				return (EINVAL);
354 			p = pfr_lookup_addr(kt, ad, 1);
355 			if (p != NULL)
356 				p->pfrke_mark = 0;
357 		}
358 	}
359 	SLIST_INIT(&workq);
360 	for (i = 0, ad = addr; i < size; i++, ad++) {
361 		if (pfr_validate_addr(ad))
362 			senderr(EINVAL);
363 		p = pfr_lookup_addr(kt, ad, 1);
364 		if (flags & PFR_FLAG_FEEDBACK) {
365 			if (p == NULL)
366 				ad->pfra_fback = PFR_FB_NONE;
367 			else if (p->pfrke_not != ad->pfra_not)
368 				ad->pfra_fback = PFR_FB_CONFLICT;
369 			else if (p->pfrke_mark)
370 				ad->pfra_fback = PFR_FB_DUPLICATE;
371 			else
372 				ad->pfra_fback = PFR_FB_DELETED;
373 		}
374 		if (p != NULL && p->pfrke_not == ad->pfra_not &&
375 		    !p->pfrke_mark) {
376 			p->pfrke_mark = 1;
377 			SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
378 			xdel++;
379 		}
380 	}
381 	if (!(flags & PFR_FLAG_DUMMY))
382 		pfr_remove_kentries(kt, &workq);
383 	if (ndel != NULL)
384 		*ndel = xdel;
385 	return (0);
386 _bad:
387 	if (flags & PFR_FLAG_FEEDBACK)
388 		pfr_reset_feedback(addr, size);
389 	return (rv);
390 }
391 
392 int
393 pfr_set_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
394     int *size2, int *nadd, int *ndel, int *nchange, int flags,
395     u_int32_t ignore_pfrt_flags)
396 {
397 	struct pfr_ktable	*kt, *tmpkt;
398 	struct pfr_kentryworkq	 addq, delq, changeq;
399 	struct pfr_kentry	*p, *q;
400 	struct pfr_addr		 ad;
401 	int			 i, rv, xadd = 0, xdel = 0, xchange = 0;
402 	long			 tzero = time_second;
403 
404 	PF_RULES_WASSERT();
405 
406 	ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_FEEDBACK);
407 	if (pfr_validate_table(tbl, ignore_pfrt_flags, flags &
408 	    PFR_FLAG_USERIOCTL))
409 		return (EINVAL);
410 	kt = pfr_lookup_table(tbl);
411 	if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
412 		return (ESRCH);
413 	if (kt->pfrkt_flags & PFR_TFLAG_CONST)
414 		return (EPERM);
415 	tmpkt = pfr_create_ktable(&pfr_nulltable, 0, 0);
416 	if (tmpkt == NULL)
417 		return (ENOMEM);
418 	pfr_mark_addrs(kt);
419 	SLIST_INIT(&addq);
420 	SLIST_INIT(&delq);
421 	SLIST_INIT(&changeq);
422 	for (i = 0; i < size; i++) {
423 		/*
424 		 * XXXGL: undertand pf_if usage of this function
425 		 * and make ad a moving pointer
426 		 */
427 		bcopy(addr + i, &ad, sizeof(ad));
428 		if (pfr_validate_addr(&ad))
429 			senderr(EINVAL);
430 		ad.pfra_fback = PFR_FB_NONE;
431 		p = pfr_lookup_addr(kt, &ad, 1);
432 		if (p != NULL) {
433 			if (p->pfrke_mark) {
434 				ad.pfra_fback = PFR_FB_DUPLICATE;
435 				goto _skip;
436 			}
437 			p->pfrke_mark = 1;
438 			if (p->pfrke_not != ad.pfra_not) {
439 				SLIST_INSERT_HEAD(&changeq, p, pfrke_workq);
440 				ad.pfra_fback = PFR_FB_CHANGED;
441 				xchange++;
442 			}
443 		} else {
444 			q = pfr_lookup_addr(tmpkt, &ad, 1);
445 			if (q != NULL) {
446 				ad.pfra_fback = PFR_FB_DUPLICATE;
447 				goto _skip;
448 			}
449 			p = pfr_create_kentry(&ad);
450 			if (p == NULL)
451 				senderr(ENOMEM);
452 			if (pfr_route_kentry(tmpkt, p)) {
453 				pfr_destroy_kentry(p);
454 				ad.pfra_fback = PFR_FB_NONE;
455 			} else {
456 				SLIST_INSERT_HEAD(&addq, p, pfrke_workq);
457 				ad.pfra_fback = PFR_FB_ADDED;
458 				xadd++;
459 			}
460 		}
461 _skip:
462 		if (flags & PFR_FLAG_FEEDBACK)
463 			bcopy(&ad, addr + i, sizeof(ad));
464 	}
465 	pfr_enqueue_addrs(kt, &delq, &xdel, ENQUEUE_UNMARKED_ONLY);
466 	if ((flags & PFR_FLAG_FEEDBACK) && *size2) {
467 		if (*size2 < size+xdel) {
468 			*size2 = size+xdel;
469 			senderr(0);
470 		}
471 		i = 0;
472 		SLIST_FOREACH(p, &delq, pfrke_workq) {
473 			pfr_copyout_addr(&ad, p);
474 			ad.pfra_fback = PFR_FB_DELETED;
475 			bcopy(&ad, addr + size + i, sizeof(ad));
476 			i++;
477 		}
478 	}
479 	pfr_clean_node_mask(tmpkt, &addq);
480 	if (!(flags & PFR_FLAG_DUMMY)) {
481 		pfr_insert_kentries(kt, &addq, tzero);
482 		pfr_remove_kentries(kt, &delq);
483 		pfr_clstats_kentries(&changeq, tzero, INVERT_NEG_FLAG);
484 	} else
485 		pfr_destroy_kentries(&addq);
486 	if (nadd != NULL)
487 		*nadd = xadd;
488 	if (ndel != NULL)
489 		*ndel = xdel;
490 	if (nchange != NULL)
491 		*nchange = xchange;
492 	if ((flags & PFR_FLAG_FEEDBACK) && size2)
493 		*size2 = size+xdel;
494 	pfr_destroy_ktable(tmpkt, 0);
495 	return (0);
496 _bad:
497 	pfr_clean_node_mask(tmpkt, &addq);
498 	pfr_destroy_kentries(&addq);
499 	if (flags & PFR_FLAG_FEEDBACK)
500 		pfr_reset_feedback(addr, size);
501 	pfr_destroy_ktable(tmpkt, 0);
502 	return (rv);
503 }
504 
505 int
506 pfr_tst_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
507 	int *nmatch, int flags)
508 {
509 	struct pfr_ktable	*kt;
510 	struct pfr_kentry	*p;
511 	struct pfr_addr		*ad;
512 	int			 i, xmatch = 0;
513 
514 	PF_RULES_RASSERT();
515 
516 	ACCEPT_FLAGS(flags, PFR_FLAG_REPLACE);
517 	if (pfr_validate_table(tbl, 0, 0))
518 		return (EINVAL);
519 	kt = pfr_lookup_table(tbl);
520 	if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
521 		return (ESRCH);
522 
523 	for (i = 0, ad = addr; i < size; i++, ad++) {
524 		if (pfr_validate_addr(ad))
525 			return (EINVAL);
526 		if (ADDR_NETWORK(ad))
527 			return (EINVAL);
528 		p = pfr_lookup_addr(kt, ad, 0);
529 		if (flags & PFR_FLAG_REPLACE)
530 			pfr_copyout_addr(ad, p);
531 		ad->pfra_fback = (p == NULL) ? PFR_FB_NONE :
532 		    (p->pfrke_not ? PFR_FB_NOTMATCH : PFR_FB_MATCH);
533 		if (p != NULL && !p->pfrke_not)
534 			xmatch++;
535 	}
536 	if (nmatch != NULL)
537 		*nmatch = xmatch;
538 	return (0);
539 }
540 
541 int
542 pfr_get_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int *size,
543 	int flags)
544 {
545 	struct pfr_ktable	*kt;
546 	struct pfr_walktree	 w;
547 	int			 rv;
548 
549 	PF_RULES_RASSERT();
550 
551 	ACCEPT_FLAGS(flags, 0);
552 	if (pfr_validate_table(tbl, 0, 0))
553 		return (EINVAL);
554 	kt = pfr_lookup_table(tbl);
555 	if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
556 		return (ESRCH);
557 	if (kt->pfrkt_cnt > *size) {
558 		*size = kt->pfrkt_cnt;
559 		return (0);
560 	}
561 
562 	bzero(&w, sizeof(w));
563 	w.pfrw_op = PFRW_GET_ADDRS;
564 	w.pfrw_addr = addr;
565 	w.pfrw_free = kt->pfrkt_cnt;
566 	rv = kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
567 	if (!rv)
568 		rv = kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6, pfr_walktree,
569 		    &w);
570 	if (rv)
571 		return (rv);
572 
573 	KASSERT(w.pfrw_free == 0, ("%s: corruption detected (%d)", __func__,
574 	    w.pfrw_free));
575 
576 	*size = kt->pfrkt_cnt;
577 	return (0);
578 }
579 
580 int
581 pfr_get_astats(struct pfr_table *tbl, struct pfr_astats *addr, int *size,
582 	int flags)
583 {
584 	struct pfr_ktable	*kt;
585 	struct pfr_walktree	 w;
586 	struct pfr_kentryworkq	 workq;
587 	int			 rv;
588 	long			 tzero = time_second;
589 
590 	PF_RULES_RASSERT();
591 
592 	/* XXX PFR_FLAG_CLSTATS disabled */
593 	ACCEPT_FLAGS(flags, 0);
594 	if (pfr_validate_table(tbl, 0, 0))
595 		return (EINVAL);
596 	kt = pfr_lookup_table(tbl);
597 	if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
598 		return (ESRCH);
599 	if (kt->pfrkt_cnt > *size) {
600 		*size = kt->pfrkt_cnt;
601 		return (0);
602 	}
603 
604 	bzero(&w, sizeof(w));
605 	w.pfrw_op = PFRW_GET_ASTATS;
606 	w.pfrw_astats = addr;
607 	w.pfrw_free = kt->pfrkt_cnt;
608 	rv = kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
609 	if (!rv)
610 		rv = kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6, pfr_walktree,
611 		    &w);
612 	if (!rv && (flags & PFR_FLAG_CLSTATS)) {
613 		pfr_enqueue_addrs(kt, &workq, NULL, 0);
614 		pfr_clstats_kentries(&workq, tzero, 0);
615 	}
616 	if (rv)
617 		return (rv);
618 
619 	if (w.pfrw_free) {
620 		printf("pfr_get_astats: corruption detected (%d).\n",
621 		    w.pfrw_free);
622 		return (ENOTTY);
623 	}
624 	*size = kt->pfrkt_cnt;
625 	return (0);
626 }
627 
628 int
629 pfr_clr_astats(struct pfr_table *tbl, struct pfr_addr *addr, int size,
630     int *nzero, int flags)
631 {
632 	struct pfr_ktable	*kt;
633 	struct pfr_kentryworkq	 workq;
634 	struct pfr_kentry	*p;
635 	struct pfr_addr		*ad;
636 	int			 i, rv, xzero = 0;
637 
638 	PF_RULES_WASSERT();
639 
640 	ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_FEEDBACK);
641 	if (pfr_validate_table(tbl, 0, 0))
642 		return (EINVAL);
643 	kt = pfr_lookup_table(tbl);
644 	if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
645 		return (ESRCH);
646 	SLIST_INIT(&workq);
647 	for (i = 0, ad = addr; i < size; i++, ad++) {
648 		if (pfr_validate_addr(ad))
649 			senderr(EINVAL);
650 		p = pfr_lookup_addr(kt, ad, 1);
651 		if (flags & PFR_FLAG_FEEDBACK) {
652 			ad->pfra_fback = (p != NULL) ?
653 			    PFR_FB_CLEARED : PFR_FB_NONE;
654 		}
655 		if (p != NULL) {
656 			SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
657 			xzero++;
658 		}
659 	}
660 
661 	if (!(flags & PFR_FLAG_DUMMY))
662 		pfr_clstats_kentries(&workq, 0, 0);
663 	if (nzero != NULL)
664 		*nzero = xzero;
665 	return (0);
666 _bad:
667 	if (flags & PFR_FLAG_FEEDBACK)
668 		pfr_reset_feedback(addr, size);
669 	return (rv);
670 }
671 
672 static int
673 pfr_validate_addr(struct pfr_addr *ad)
674 {
675 	int i;
676 
677 	switch (ad->pfra_af) {
678 #ifdef INET
679 	case AF_INET:
680 		if (ad->pfra_net > 32)
681 			return (-1);
682 		break;
683 #endif /* INET */
684 #ifdef INET6
685 	case AF_INET6:
686 		if (ad->pfra_net > 128)
687 			return (-1);
688 		break;
689 #endif /* INET6 */
690 	default:
691 		return (-1);
692 	}
693 	if (ad->pfra_net < 128 &&
694 		(((caddr_t)ad)[ad->pfra_net/8] & (0xFF >> (ad->pfra_net%8))))
695 			return (-1);
696 	for (i = (ad->pfra_net+7)/8; i < sizeof(ad->pfra_u); i++)
697 		if (((caddr_t)ad)[i])
698 			return (-1);
699 	if (ad->pfra_not && ad->pfra_not != 1)
700 		return (-1);
701 	if (ad->pfra_fback)
702 		return (-1);
703 	return (0);
704 }
705 
706 static void
707 pfr_enqueue_addrs(struct pfr_ktable *kt, struct pfr_kentryworkq *workq,
708 	int *naddr, int sweep)
709 {
710 	struct pfr_walktree	w;
711 
712 	SLIST_INIT(workq);
713 	bzero(&w, sizeof(w));
714 	w.pfrw_op = sweep ? PFRW_SWEEP : PFRW_ENQUEUE;
715 	w.pfrw_workq = workq;
716 	if (kt->pfrkt_ip4 != NULL)
717 		if (kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree,
718 		    &w))
719 			printf("pfr_enqueue_addrs: IPv4 walktree failed.\n");
720 	if (kt->pfrkt_ip6 != NULL)
721 		if (kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6, pfr_walktree,
722 		    &w))
723 			printf("pfr_enqueue_addrs: IPv6 walktree failed.\n");
724 	if (naddr != NULL)
725 		*naddr = w.pfrw_cnt;
726 }
727 
728 static void
729 pfr_mark_addrs(struct pfr_ktable *kt)
730 {
731 	struct pfr_walktree	w;
732 
733 	bzero(&w, sizeof(w));
734 	w.pfrw_op = PFRW_MARK;
735 	if (kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree, &w))
736 		printf("pfr_mark_addrs: IPv4 walktree failed.\n");
737 	if (kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6, pfr_walktree, &w))
738 		printf("pfr_mark_addrs: IPv6 walktree failed.\n");
739 }
740 
741 
742 static struct pfr_kentry *
743 pfr_lookup_addr(struct pfr_ktable *kt, struct pfr_addr *ad, int exact)
744 {
745 	union sockaddr_union	 sa, mask;
746 	struct radix_node_head	*head = NULL;
747 	struct pfr_kentry	*ke;
748 
749 	PF_RULES_ASSERT();
750 
751 	bzero(&sa, sizeof(sa));
752 	if (ad->pfra_af == AF_INET) {
753 		FILLIN_SIN(sa.sin, ad->pfra_ip4addr);
754 		head = kt->pfrkt_ip4;
755 	} else if ( ad->pfra_af == AF_INET6 ) {
756 		FILLIN_SIN6(sa.sin6, ad->pfra_ip6addr);
757 		head = kt->pfrkt_ip6;
758 	}
759 	if (ADDR_NETWORK(ad)) {
760 		pfr_prepare_network(&mask, ad->pfra_af, ad->pfra_net);
761 		ke = (struct pfr_kentry *)rn_lookup(&sa, &mask, head);
762 		if (ke && KENTRY_RNF_ROOT(ke))
763 			ke = NULL;
764 	} else {
765 		ke = (struct pfr_kentry *)rn_match(&sa, head);
766 		if (ke && KENTRY_RNF_ROOT(ke))
767 			ke = NULL;
768 		if (exact && ke && KENTRY_NETWORK(ke))
769 			ke = NULL;
770 	}
771 	return (ke);
772 }
773 
774 static struct pfr_kentry *
775 pfr_create_kentry(struct pfr_addr *ad)
776 {
777 	struct pfr_kentry	*ke;
778 
779 	ke =  uma_zalloc(V_pfr_kentry_z, M_NOWAIT | M_ZERO);
780 	if (ke == NULL)
781 		return (NULL);
782 
783 	if (ad->pfra_af == AF_INET)
784 		FILLIN_SIN(ke->pfrke_sa.sin, ad->pfra_ip4addr);
785 	else if (ad->pfra_af == AF_INET6)
786 		FILLIN_SIN6(ke->pfrke_sa.sin6, ad->pfra_ip6addr);
787 	ke->pfrke_af = ad->pfra_af;
788 	ke->pfrke_net = ad->pfra_net;
789 	ke->pfrke_not = ad->pfra_not;
790 	return (ke);
791 }
792 
793 static void
794 pfr_destroy_kentries(struct pfr_kentryworkq *workq)
795 {
796 	struct pfr_kentry	*p, *q;
797 
798 	for (p = SLIST_FIRST(workq); p != NULL; p = q) {
799 		q = SLIST_NEXT(p, pfrke_workq);
800 		pfr_destroy_kentry(p);
801 	}
802 }
803 
804 static void
805 pfr_destroy_kentry(struct pfr_kentry *ke)
806 {
807 	if (ke->pfrke_counters)
808 		uma_zfree(V_pfr_kcounters_z, ke->pfrke_counters);
809 	uma_zfree(V_pfr_kentry_z, ke);
810 }
811 
812 static void
813 pfr_insert_kentries(struct pfr_ktable *kt,
814     struct pfr_kentryworkq *workq, long tzero)
815 {
816 	struct pfr_kentry	*p;
817 	int			 rv, n = 0;
818 
819 	SLIST_FOREACH(p, workq, pfrke_workq) {
820 		rv = pfr_route_kentry(kt, p);
821 		if (rv) {
822 			printf("pfr_insert_kentries: cannot route entry "
823 			    "(code=%d).\n", rv);
824 			break;
825 		}
826 		p->pfrke_tzero = tzero;
827 		n++;
828 	}
829 	kt->pfrkt_cnt += n;
830 }
831 
832 int
833 pfr_insert_kentry(struct pfr_ktable *kt, struct pfr_addr *ad, long tzero)
834 {
835 	struct pfr_kentry	*p;
836 	int			 rv;
837 
838 	p = pfr_lookup_addr(kt, ad, 1);
839 	if (p != NULL)
840 		return (0);
841 	p = pfr_create_kentry(ad);
842 	if (p == NULL)
843 		return (ENOMEM);
844 
845 	rv = pfr_route_kentry(kt, p);
846 	if (rv)
847 		return (rv);
848 
849 	p->pfrke_tzero = tzero;
850 	kt->pfrkt_cnt++;
851 
852 	return (0);
853 }
854 
855 static void
856 pfr_remove_kentries(struct pfr_ktable *kt,
857     struct pfr_kentryworkq *workq)
858 {
859 	struct pfr_kentry	*p;
860 	int			 n = 0;
861 
862 	SLIST_FOREACH(p, workq, pfrke_workq) {
863 		pfr_unroute_kentry(kt, p);
864 		n++;
865 	}
866 	kt->pfrkt_cnt -= n;
867 	pfr_destroy_kentries(workq);
868 }
869 
870 static void
871 pfr_clean_node_mask(struct pfr_ktable *kt,
872     struct pfr_kentryworkq *workq)
873 {
874 	struct pfr_kentry	*p;
875 
876 	SLIST_FOREACH(p, workq, pfrke_workq)
877 		pfr_unroute_kentry(kt, p);
878 }
879 
880 static void
881 pfr_clstats_kentries(struct pfr_kentryworkq *workq, long tzero, int negchange)
882 {
883 	struct pfr_kentry	*p;
884 
885 	SLIST_FOREACH(p, workq, pfrke_workq) {
886 		if (negchange)
887 			p->pfrke_not = !p->pfrke_not;
888 		if (p->pfrke_counters) {
889 			uma_zfree(V_pfr_kcounters_z, p->pfrke_counters);
890 			p->pfrke_counters = NULL;
891 		}
892 		p->pfrke_tzero = tzero;
893 	}
894 }
895 
896 static void
897 pfr_reset_feedback(struct pfr_addr *addr, int size)
898 {
899 	struct pfr_addr	*ad;
900 	int		i;
901 
902 	for (i = 0, ad = addr; i < size; i++, ad++)
903 		ad->pfra_fback = PFR_FB_NONE;
904 }
905 
906 static void
907 pfr_prepare_network(union sockaddr_union *sa, int af, int net)
908 {
909 	int	i;
910 
911 	bzero(sa, sizeof(*sa));
912 	if (af == AF_INET) {
913 		sa->sin.sin_len = sizeof(sa->sin);
914 		sa->sin.sin_family = AF_INET;
915 		sa->sin.sin_addr.s_addr = net ? htonl(-1 << (32-net)) : 0;
916 	} else if (af == AF_INET6) {
917 		sa->sin6.sin6_len = sizeof(sa->sin6);
918 		sa->sin6.sin6_family = AF_INET6;
919 		for (i = 0; i < 4; i++) {
920 			if (net <= 32) {
921 				sa->sin6.sin6_addr.s6_addr32[i] =
922 				    net ? htonl(-1 << (32-net)) : 0;
923 				break;
924 			}
925 			sa->sin6.sin6_addr.s6_addr32[i] = 0xFFFFFFFF;
926 			net -= 32;
927 		}
928 	}
929 }
930 
931 static int
932 pfr_route_kentry(struct pfr_ktable *kt, struct pfr_kentry *ke)
933 {
934 	union sockaddr_union	 mask;
935 	struct radix_node	*rn;
936 	struct radix_node_head	*head = NULL;
937 
938 	PF_RULES_WASSERT();
939 
940 	bzero(ke->pfrke_node, sizeof(ke->pfrke_node));
941 	if (ke->pfrke_af == AF_INET)
942 		head = kt->pfrkt_ip4;
943 	else if (ke->pfrke_af == AF_INET6)
944 		head = kt->pfrkt_ip6;
945 
946 	if (KENTRY_NETWORK(ke)) {
947 		pfr_prepare_network(&mask, ke->pfrke_af, ke->pfrke_net);
948 		rn = rn_addroute(&ke->pfrke_sa, &mask, head, ke->pfrke_node);
949 	} else
950 		rn = rn_addroute(&ke->pfrke_sa, NULL, head, ke->pfrke_node);
951 
952 	return (rn == NULL ? -1 : 0);
953 }
954 
955 static int
956 pfr_unroute_kentry(struct pfr_ktable *kt, struct pfr_kentry *ke)
957 {
958 	union sockaddr_union	 mask;
959 	struct radix_node	*rn;
960 	struct radix_node_head	*head = NULL;
961 
962 	if (ke->pfrke_af == AF_INET)
963 		head = kt->pfrkt_ip4;
964 	else if (ke->pfrke_af == AF_INET6)
965 		head = kt->pfrkt_ip6;
966 
967 	if (KENTRY_NETWORK(ke)) {
968 		pfr_prepare_network(&mask, ke->pfrke_af, ke->pfrke_net);
969 		rn = rn_delete(&ke->pfrke_sa, &mask, head);
970 	} else
971 		rn = rn_delete(&ke->pfrke_sa, NULL, head);
972 
973 	if (rn == NULL) {
974 		printf("pfr_unroute_kentry: delete failed.\n");
975 		return (-1);
976 	}
977 	return (0);
978 }
979 
980 static void
981 pfr_copyout_addr(struct pfr_addr *ad, struct pfr_kentry *ke)
982 {
983 	bzero(ad, sizeof(*ad));
984 	if (ke == NULL)
985 		return;
986 	ad->pfra_af = ke->pfrke_af;
987 	ad->pfra_net = ke->pfrke_net;
988 	ad->pfra_not = ke->pfrke_not;
989 	if (ad->pfra_af == AF_INET)
990 		ad->pfra_ip4addr = ke->pfrke_sa.sin.sin_addr;
991 	else if (ad->pfra_af == AF_INET6)
992 		ad->pfra_ip6addr = ke->pfrke_sa.sin6.sin6_addr;
993 }
994 
995 static int
996 pfr_walktree(struct radix_node *rn, void *arg)
997 {
998 	struct pfr_kentry	*ke = (struct pfr_kentry *)rn;
999 	struct pfr_walktree	*w = arg;
1000 
1001 	switch (w->pfrw_op) {
1002 	case PFRW_MARK:
1003 		ke->pfrke_mark = 0;
1004 		break;
1005 	case PFRW_SWEEP:
1006 		if (ke->pfrke_mark)
1007 			break;
1008 		/* FALLTHROUGH */
1009 	case PFRW_ENQUEUE:
1010 		SLIST_INSERT_HEAD(w->pfrw_workq, ke, pfrke_workq);
1011 		w->pfrw_cnt++;
1012 		break;
1013 	case PFRW_GET_ADDRS:
1014 		if (w->pfrw_free-- > 0) {
1015 			pfr_copyout_addr(w->pfrw_addr, ke);
1016 			w->pfrw_addr++;
1017 		}
1018 		break;
1019 	case PFRW_GET_ASTATS:
1020 		if (w->pfrw_free-- > 0) {
1021 			struct pfr_astats as;
1022 
1023 			pfr_copyout_addr(&as.pfras_a, ke);
1024 
1025 			if (ke->pfrke_counters) {
1026 				bcopy(ke->pfrke_counters->pfrkc_packets,
1027 				    as.pfras_packets, sizeof(as.pfras_packets));
1028 				bcopy(ke->pfrke_counters->pfrkc_bytes,
1029 				    as.pfras_bytes, sizeof(as.pfras_bytes));
1030 			} else {
1031 				bzero(as.pfras_packets, sizeof(as.pfras_packets));
1032 				bzero(as.pfras_bytes, sizeof(as.pfras_bytes));
1033 				as.pfras_a.pfra_fback = PFR_FB_NOCOUNT;
1034 			}
1035 			as.pfras_tzero = ke->pfrke_tzero;
1036 
1037 			bcopy(&as, w->pfrw_astats, sizeof(as));
1038 			w->pfrw_astats++;
1039 		}
1040 		break;
1041 	case PFRW_POOL_GET:
1042 		if (ke->pfrke_not)
1043 			break; /* negative entries are ignored */
1044 		if (!w->pfrw_cnt--) {
1045 			w->pfrw_kentry = ke;
1046 			return (1); /* finish search */
1047 		}
1048 		break;
1049 	case PFRW_DYNADDR_UPDATE:
1050 	    {
1051 		union sockaddr_union	pfr_mask;
1052 
1053 		if (ke->pfrke_af == AF_INET) {
1054 			if (w->pfrw_dyn->pfid_acnt4++ > 0)
1055 				break;
1056 			pfr_prepare_network(&pfr_mask, AF_INET, ke->pfrke_net);
1057 			w->pfrw_dyn->pfid_addr4 = *SUNION2PF(&ke->pfrke_sa,
1058 			    AF_INET);
1059 			w->pfrw_dyn->pfid_mask4 = *SUNION2PF(&pfr_mask,
1060 			    AF_INET);
1061 		} else if (ke->pfrke_af == AF_INET6){
1062 			if (w->pfrw_dyn->pfid_acnt6++ > 0)
1063 				break;
1064 			pfr_prepare_network(&pfr_mask, AF_INET6, ke->pfrke_net);
1065 			w->pfrw_dyn->pfid_addr6 = *SUNION2PF(&ke->pfrke_sa,
1066 			    AF_INET6);
1067 			w->pfrw_dyn->pfid_mask6 = *SUNION2PF(&pfr_mask,
1068 			    AF_INET6);
1069 		}
1070 		break;
1071 	    }
1072 	}
1073 	return (0);
1074 }
1075 
1076 int
1077 pfr_clr_tables(struct pfr_table *filter, int *ndel, int flags)
1078 {
1079 	struct pfr_ktableworkq	 workq;
1080 	struct pfr_ktable	*p;
1081 	int			 xdel = 0;
1082 
1083 	ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_ALLRSETS);
1084 	if (pfr_fix_anchor(filter->pfrt_anchor))
1085 		return (EINVAL);
1086 	if (pfr_table_count(filter, flags) < 0)
1087 		return (ENOENT);
1088 
1089 	SLIST_INIT(&workq);
1090 	RB_FOREACH(p, pfr_ktablehead, &V_pfr_ktables) {
1091 		if (pfr_skip_table(filter, p, flags))
1092 			continue;
1093 		if (!strcmp(p->pfrkt_anchor, PF_RESERVED_ANCHOR))
1094 			continue;
1095 		if (!(p->pfrkt_flags & PFR_TFLAG_ACTIVE))
1096 			continue;
1097 		p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_ACTIVE;
1098 		SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1099 		xdel++;
1100 	}
1101 	if (!(flags & PFR_FLAG_DUMMY))
1102 		pfr_setflags_ktables(&workq);
1103 	if (ndel != NULL)
1104 		*ndel = xdel;
1105 	return (0);
1106 }
1107 
1108 int
1109 pfr_add_tables(struct pfr_table *tbl, int size, int *nadd, int flags)
1110 {
1111 	struct pfr_ktableworkq	 addq, changeq;
1112 	struct pfr_ktable	*p, *q, *r, key;
1113 	int			 i, rv, xadd = 0;
1114 	long			 tzero = time_second;
1115 
1116 	ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
1117 	SLIST_INIT(&addq);
1118 	SLIST_INIT(&changeq);
1119 	for (i = 0; i < size; i++) {
1120 		bcopy(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t));
1121 		if (pfr_validate_table(&key.pfrkt_t, PFR_TFLAG_USRMASK,
1122 		    flags & PFR_FLAG_USERIOCTL))
1123 			senderr(EINVAL);
1124 		key.pfrkt_flags |= PFR_TFLAG_ACTIVE;
1125 		p = RB_FIND(pfr_ktablehead, &V_pfr_ktables, &key);
1126 		if (p == NULL) {
1127 			p = pfr_create_ktable(&key.pfrkt_t, tzero, 1);
1128 			if (p == NULL)
1129 				senderr(ENOMEM);
1130 			SLIST_FOREACH(q, &addq, pfrkt_workq) {
1131 				if (!pfr_ktable_compare(p, q))
1132 					goto _skip;
1133 			}
1134 			SLIST_INSERT_HEAD(&addq, p, pfrkt_workq);
1135 			xadd++;
1136 			if (!key.pfrkt_anchor[0])
1137 				goto _skip;
1138 
1139 			/* find or create root table */
1140 			bzero(key.pfrkt_anchor, sizeof(key.pfrkt_anchor));
1141 			r = RB_FIND(pfr_ktablehead, &V_pfr_ktables, &key);
1142 			if (r != NULL) {
1143 				p->pfrkt_root = r;
1144 				goto _skip;
1145 			}
1146 			SLIST_FOREACH(q, &addq, pfrkt_workq) {
1147 				if (!pfr_ktable_compare(&key, q)) {
1148 					p->pfrkt_root = q;
1149 					goto _skip;
1150 				}
1151 			}
1152 			key.pfrkt_flags = 0;
1153 			r = pfr_create_ktable(&key.pfrkt_t, 0, 1);
1154 			if (r == NULL)
1155 				senderr(ENOMEM);
1156 			SLIST_INSERT_HEAD(&addq, r, pfrkt_workq);
1157 			p->pfrkt_root = r;
1158 		} else if (!(p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1159 			SLIST_FOREACH(q, &changeq, pfrkt_workq)
1160 				if (!pfr_ktable_compare(&key, q))
1161 					goto _skip;
1162 			p->pfrkt_nflags = (p->pfrkt_flags &
1163 			    ~PFR_TFLAG_USRMASK) | key.pfrkt_flags;
1164 			SLIST_INSERT_HEAD(&changeq, p, pfrkt_workq);
1165 			xadd++;
1166 		}
1167 _skip:
1168 	;
1169 	}
1170 	if (!(flags & PFR_FLAG_DUMMY)) {
1171 		pfr_insert_ktables(&addq);
1172 		pfr_setflags_ktables(&changeq);
1173 	} else
1174 		 pfr_destroy_ktables(&addq, 0);
1175 	if (nadd != NULL)
1176 		*nadd = xadd;
1177 	return (0);
1178 _bad:
1179 	pfr_destroy_ktables(&addq, 0);
1180 	return (rv);
1181 }
1182 
1183 int
1184 pfr_del_tables(struct pfr_table *tbl, int size, int *ndel, int flags)
1185 {
1186 	struct pfr_ktableworkq	 workq;
1187 	struct pfr_ktable	*p, *q, key;
1188 	int			 i, xdel = 0;
1189 
1190 	ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
1191 	SLIST_INIT(&workq);
1192 	for (i = 0; i < size; i++) {
1193 		bcopy(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t));
1194 		if (pfr_validate_table(&key.pfrkt_t, 0,
1195 		    flags & PFR_FLAG_USERIOCTL))
1196 			return (EINVAL);
1197 		p = RB_FIND(pfr_ktablehead, &V_pfr_ktables, &key);
1198 		if (p != NULL && (p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1199 			SLIST_FOREACH(q, &workq, pfrkt_workq)
1200 				if (!pfr_ktable_compare(p, q))
1201 					goto _skip;
1202 			p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_ACTIVE;
1203 			SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1204 			xdel++;
1205 		}
1206 _skip:
1207 	;
1208 	}
1209 
1210 	if (!(flags & PFR_FLAG_DUMMY))
1211 		pfr_setflags_ktables(&workq);
1212 	if (ndel != NULL)
1213 		*ndel = xdel;
1214 	return (0);
1215 }
1216 
1217 int
1218 pfr_get_tables(struct pfr_table *filter, struct pfr_table *tbl, int *size,
1219 	int flags)
1220 {
1221 	struct pfr_ktable	*p;
1222 	int			 n, nn;
1223 
1224 	PF_RULES_RASSERT();
1225 
1226 	ACCEPT_FLAGS(flags, PFR_FLAG_ALLRSETS);
1227 	if (pfr_fix_anchor(filter->pfrt_anchor))
1228 		return (EINVAL);
1229 	n = nn = pfr_table_count(filter, flags);
1230 	if (n < 0)
1231 		return (ENOENT);
1232 	if (n > *size) {
1233 		*size = n;
1234 		return (0);
1235 	}
1236 	RB_FOREACH(p, pfr_ktablehead, &V_pfr_ktables) {
1237 		if (pfr_skip_table(filter, p, flags))
1238 			continue;
1239 		if (n-- <= 0)
1240 			continue;
1241 		bcopy(&p->pfrkt_t, tbl++, sizeof(*tbl));
1242 	}
1243 
1244 	KASSERT(n == 0, ("%s: corruption detected (%d)", __func__, n));
1245 
1246 	*size = nn;
1247 	return (0);
1248 }
1249 
1250 int
1251 pfr_get_tstats(struct pfr_table *filter, struct pfr_tstats *tbl, int *size,
1252 	int flags)
1253 {
1254 	struct pfr_ktable	*p;
1255 	struct pfr_ktableworkq	 workq;
1256 	int			 n, nn;
1257 	long			 tzero = time_second;
1258 
1259 	/* XXX PFR_FLAG_CLSTATS disabled */
1260 	ACCEPT_FLAGS(flags, PFR_FLAG_ALLRSETS);
1261 	if (pfr_fix_anchor(filter->pfrt_anchor))
1262 		return (EINVAL);
1263 	n = nn = pfr_table_count(filter, flags);
1264 	if (n < 0)
1265 		return (ENOENT);
1266 	if (n > *size) {
1267 		*size = n;
1268 		return (0);
1269 	}
1270 	SLIST_INIT(&workq);
1271 	RB_FOREACH(p, pfr_ktablehead, &V_pfr_ktables) {
1272 		if (pfr_skip_table(filter, p, flags))
1273 			continue;
1274 		if (n-- <= 0)
1275 			continue;
1276 		bcopy(&p->pfrkt_ts, tbl++, sizeof(*tbl));
1277 		SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1278 	}
1279 	if (flags & PFR_FLAG_CLSTATS)
1280 		pfr_clstats_ktables(&workq, tzero,
1281 		    flags & PFR_FLAG_ADDRSTOO);
1282 
1283 	KASSERT(n == 0, ("%s: corruption detected (%d)", __func__, n));
1284 
1285 	*size = nn;
1286 	return (0);
1287 }
1288 
1289 int
1290 pfr_clr_tstats(struct pfr_table *tbl, int size, int *nzero, int flags)
1291 {
1292 	struct pfr_ktableworkq	 workq;
1293 	struct pfr_ktable	*p, key;
1294 	int			 i, xzero = 0;
1295 	long			 tzero = time_second;
1296 
1297 	ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_ADDRSTOO);
1298 	SLIST_INIT(&workq);
1299 	for (i = 0; i < size; i++) {
1300 		bcopy(tbl + i, &key.pfrkt_t, sizeof(key.pfrkt_t));
1301 		if (pfr_validate_table(&key.pfrkt_t, 0, 0))
1302 			return (EINVAL);
1303 		p = RB_FIND(pfr_ktablehead, &V_pfr_ktables, &key);
1304 		if (p != NULL) {
1305 			SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1306 			xzero++;
1307 		}
1308 	}
1309 	if (!(flags & PFR_FLAG_DUMMY))
1310 		pfr_clstats_ktables(&workq, tzero, flags & PFR_FLAG_ADDRSTOO);
1311 	if (nzero != NULL)
1312 		*nzero = xzero;
1313 	return (0);
1314 }
1315 
1316 int
1317 pfr_set_tflags(struct pfr_table *tbl, int size, int setflag, int clrflag,
1318 	int *nchange, int *ndel, int flags)
1319 {
1320 	struct pfr_ktableworkq	 workq;
1321 	struct pfr_ktable	*p, *q, key;
1322 	int			 i, xchange = 0, xdel = 0;
1323 
1324 	ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
1325 	if ((setflag & ~PFR_TFLAG_USRMASK) ||
1326 	    (clrflag & ~PFR_TFLAG_USRMASK) ||
1327 	    (setflag & clrflag))
1328 		return (EINVAL);
1329 	SLIST_INIT(&workq);
1330 	for (i = 0; i < size; i++) {
1331 		bcopy(tbl + i, &key.pfrkt_t, sizeof(key.pfrkt_t));
1332 		if (pfr_validate_table(&key.pfrkt_t, 0,
1333 		    flags & PFR_FLAG_USERIOCTL))
1334 			return (EINVAL);
1335 		p = RB_FIND(pfr_ktablehead, &V_pfr_ktables, &key);
1336 		if (p != NULL && (p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1337 			p->pfrkt_nflags = (p->pfrkt_flags | setflag) &
1338 			    ~clrflag;
1339 			if (p->pfrkt_nflags == p->pfrkt_flags)
1340 				goto _skip;
1341 			SLIST_FOREACH(q, &workq, pfrkt_workq)
1342 				if (!pfr_ktable_compare(p, q))
1343 					goto _skip;
1344 			SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1345 			if ((p->pfrkt_flags & PFR_TFLAG_PERSIST) &&
1346 			    (clrflag & PFR_TFLAG_PERSIST) &&
1347 			    !(p->pfrkt_flags & PFR_TFLAG_REFERENCED))
1348 				xdel++;
1349 			else
1350 				xchange++;
1351 		}
1352 _skip:
1353 	;
1354 	}
1355 	if (!(flags & PFR_FLAG_DUMMY))
1356 		pfr_setflags_ktables(&workq);
1357 	if (nchange != NULL)
1358 		*nchange = xchange;
1359 	if (ndel != NULL)
1360 		*ndel = xdel;
1361 	return (0);
1362 }
1363 
1364 int
1365 pfr_ina_begin(struct pfr_table *trs, u_int32_t *ticket, int *ndel, int flags)
1366 {
1367 	struct pfr_ktableworkq	 workq;
1368 	struct pfr_ktable	*p;
1369 	struct pf_ruleset	*rs;
1370 	int			 xdel = 0;
1371 
1372 	ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
1373 	rs = pf_find_or_create_ruleset(trs->pfrt_anchor);
1374 	if (rs == NULL)
1375 		return (ENOMEM);
1376 	SLIST_INIT(&workq);
1377 	RB_FOREACH(p, pfr_ktablehead, &V_pfr_ktables) {
1378 		if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
1379 		    pfr_skip_table(trs, p, 0))
1380 			continue;
1381 		p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_INACTIVE;
1382 		SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1383 		xdel++;
1384 	}
1385 	if (!(flags & PFR_FLAG_DUMMY)) {
1386 		pfr_setflags_ktables(&workq);
1387 		if (ticket != NULL)
1388 			*ticket = ++rs->tticket;
1389 		rs->topen = 1;
1390 	} else
1391 		pf_remove_if_empty_ruleset(rs);
1392 	if (ndel != NULL)
1393 		*ndel = xdel;
1394 	return (0);
1395 }
1396 
1397 int
1398 pfr_ina_define(struct pfr_table *tbl, struct pfr_addr *addr, int size,
1399     int *nadd, int *naddr, u_int32_t ticket, int flags)
1400 {
1401 	struct pfr_ktableworkq	 tableq;
1402 	struct pfr_kentryworkq	 addrq;
1403 	struct pfr_ktable	*kt, *rt, *shadow, key;
1404 	struct pfr_kentry	*p;
1405 	struct pfr_addr		*ad;
1406 	struct pf_ruleset	*rs;
1407 	int			 i, rv, xadd = 0, xaddr = 0;
1408 
1409 	PF_RULES_WASSERT();
1410 
1411 	ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_ADDRSTOO);
1412 	if (size && !(flags & PFR_FLAG_ADDRSTOO))
1413 		return (EINVAL);
1414 	if (pfr_validate_table(tbl, PFR_TFLAG_USRMASK,
1415 	    flags & PFR_FLAG_USERIOCTL))
1416 		return (EINVAL);
1417 	rs = pf_find_ruleset(tbl->pfrt_anchor);
1418 	if (rs == NULL || !rs->topen || ticket != rs->tticket)
1419 		return (EBUSY);
1420 	tbl->pfrt_flags |= PFR_TFLAG_INACTIVE;
1421 	SLIST_INIT(&tableq);
1422 	kt = RB_FIND(pfr_ktablehead, &V_pfr_ktables, (struct pfr_ktable *)tbl);
1423 	if (kt == NULL) {
1424 		kt = pfr_create_ktable(tbl, 0, 1);
1425 		if (kt == NULL)
1426 			return (ENOMEM);
1427 		SLIST_INSERT_HEAD(&tableq, kt, pfrkt_workq);
1428 		xadd++;
1429 		if (!tbl->pfrt_anchor[0])
1430 			goto _skip;
1431 
1432 		/* find or create root table */
1433 		bzero(&key, sizeof(key));
1434 		strlcpy(key.pfrkt_name, tbl->pfrt_name, sizeof(key.pfrkt_name));
1435 		rt = RB_FIND(pfr_ktablehead, &V_pfr_ktables, &key);
1436 		if (rt != NULL) {
1437 			kt->pfrkt_root = rt;
1438 			goto _skip;
1439 		}
1440 		rt = pfr_create_ktable(&key.pfrkt_t, 0, 1);
1441 		if (rt == NULL) {
1442 			pfr_destroy_ktables(&tableq, 0);
1443 			return (ENOMEM);
1444 		}
1445 		SLIST_INSERT_HEAD(&tableq, rt, pfrkt_workq);
1446 		kt->pfrkt_root = rt;
1447 	} else if (!(kt->pfrkt_flags & PFR_TFLAG_INACTIVE))
1448 		xadd++;
1449 _skip:
1450 	shadow = pfr_create_ktable(tbl, 0, 0);
1451 	if (shadow == NULL) {
1452 		pfr_destroy_ktables(&tableq, 0);
1453 		return (ENOMEM);
1454 	}
1455 	SLIST_INIT(&addrq);
1456 	for (i = 0, ad = addr; i < size; i++, ad++) {
1457 		if (pfr_validate_addr(ad))
1458 			senderr(EINVAL);
1459 		if (pfr_lookup_addr(shadow, ad, 1) != NULL)
1460 			continue;
1461 		p = pfr_create_kentry(ad);
1462 		if (p == NULL)
1463 			senderr(ENOMEM);
1464 		if (pfr_route_kentry(shadow, p)) {
1465 			pfr_destroy_kentry(p);
1466 			continue;
1467 		}
1468 		SLIST_INSERT_HEAD(&addrq, p, pfrke_workq);
1469 		xaddr++;
1470 	}
1471 	if (!(flags & PFR_FLAG_DUMMY)) {
1472 		if (kt->pfrkt_shadow != NULL)
1473 			pfr_destroy_ktable(kt->pfrkt_shadow, 1);
1474 		kt->pfrkt_flags |= PFR_TFLAG_INACTIVE;
1475 		pfr_insert_ktables(&tableq);
1476 		shadow->pfrkt_cnt = (flags & PFR_FLAG_ADDRSTOO) ?
1477 		    xaddr : NO_ADDRESSES;
1478 		kt->pfrkt_shadow = shadow;
1479 	} else {
1480 		pfr_clean_node_mask(shadow, &addrq);
1481 		pfr_destroy_ktable(shadow, 0);
1482 		pfr_destroy_ktables(&tableq, 0);
1483 		pfr_destroy_kentries(&addrq);
1484 	}
1485 	if (nadd != NULL)
1486 		*nadd = xadd;
1487 	if (naddr != NULL)
1488 		*naddr = xaddr;
1489 	return (0);
1490 _bad:
1491 	pfr_destroy_ktable(shadow, 0);
1492 	pfr_destroy_ktables(&tableq, 0);
1493 	pfr_destroy_kentries(&addrq);
1494 	return (rv);
1495 }
1496 
1497 int
1498 pfr_ina_rollback(struct pfr_table *trs, u_int32_t ticket, int *ndel, int flags)
1499 {
1500 	struct pfr_ktableworkq	 workq;
1501 	struct pfr_ktable	*p;
1502 	struct pf_ruleset	*rs;
1503 	int			 xdel = 0;
1504 
1505 	PF_RULES_WASSERT();
1506 
1507 	ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
1508 	rs = pf_find_ruleset(trs->pfrt_anchor);
1509 	if (rs == NULL || !rs->topen || ticket != rs->tticket)
1510 		return (0);
1511 	SLIST_INIT(&workq);
1512 	RB_FOREACH(p, pfr_ktablehead, &V_pfr_ktables) {
1513 		if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
1514 		    pfr_skip_table(trs, p, 0))
1515 			continue;
1516 		p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_INACTIVE;
1517 		SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1518 		xdel++;
1519 	}
1520 	if (!(flags & PFR_FLAG_DUMMY)) {
1521 		pfr_setflags_ktables(&workq);
1522 		rs->topen = 0;
1523 		pf_remove_if_empty_ruleset(rs);
1524 	}
1525 	if (ndel != NULL)
1526 		*ndel = xdel;
1527 	return (0);
1528 }
1529 
1530 int
1531 pfr_ina_commit(struct pfr_table *trs, u_int32_t ticket, int *nadd,
1532     int *nchange, int flags)
1533 {
1534 	struct pfr_ktable	*p, *q;
1535 	struct pfr_ktableworkq	 workq;
1536 	struct pf_ruleset	*rs;
1537 	int			 xadd = 0, xchange = 0;
1538 	long			 tzero = time_second;
1539 
1540 	PF_RULES_WASSERT();
1541 
1542 	ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
1543 	rs = pf_find_ruleset(trs->pfrt_anchor);
1544 	if (rs == NULL || !rs->topen || ticket != rs->tticket)
1545 		return (EBUSY);
1546 
1547 	SLIST_INIT(&workq);
1548 	RB_FOREACH(p, pfr_ktablehead, &V_pfr_ktables) {
1549 		if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
1550 		    pfr_skip_table(trs, p, 0))
1551 			continue;
1552 		SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1553 		if (p->pfrkt_flags & PFR_TFLAG_ACTIVE)
1554 			xchange++;
1555 		else
1556 			xadd++;
1557 	}
1558 
1559 	if (!(flags & PFR_FLAG_DUMMY)) {
1560 		for (p = SLIST_FIRST(&workq); p != NULL; p = q) {
1561 			q = SLIST_NEXT(p, pfrkt_workq);
1562 			pfr_commit_ktable(p, tzero);
1563 		}
1564 		rs->topen = 0;
1565 		pf_remove_if_empty_ruleset(rs);
1566 	}
1567 	if (nadd != NULL)
1568 		*nadd = xadd;
1569 	if (nchange != NULL)
1570 		*nchange = xchange;
1571 
1572 	return (0);
1573 }
1574 
1575 static void
1576 pfr_commit_ktable(struct pfr_ktable *kt, long tzero)
1577 {
1578 	struct pfr_ktable	*shadow = kt->pfrkt_shadow;
1579 	int			 nflags;
1580 
1581 	PF_RULES_WASSERT();
1582 
1583 	if (shadow->pfrkt_cnt == NO_ADDRESSES) {
1584 		if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
1585 			pfr_clstats_ktable(kt, tzero, 1);
1586 	} else if (kt->pfrkt_flags & PFR_TFLAG_ACTIVE) {
1587 		/* kt might contain addresses */
1588 		struct pfr_kentryworkq	 addrq, addq, changeq, delq, garbageq;
1589 		struct pfr_kentry	*p, *q, *next;
1590 		struct pfr_addr		 ad;
1591 
1592 		pfr_enqueue_addrs(shadow, &addrq, NULL, 0);
1593 		pfr_mark_addrs(kt);
1594 		SLIST_INIT(&addq);
1595 		SLIST_INIT(&changeq);
1596 		SLIST_INIT(&delq);
1597 		SLIST_INIT(&garbageq);
1598 		pfr_clean_node_mask(shadow, &addrq);
1599 		for (p = SLIST_FIRST(&addrq); p != NULL; p = next) {
1600 			next = SLIST_NEXT(p, pfrke_workq);	/* XXX */
1601 			pfr_copyout_addr(&ad, p);
1602 			q = pfr_lookup_addr(kt, &ad, 1);
1603 			if (q != NULL) {
1604 				if (q->pfrke_not != p->pfrke_not)
1605 					SLIST_INSERT_HEAD(&changeq, q,
1606 					    pfrke_workq);
1607 				q->pfrke_mark = 1;
1608 				SLIST_INSERT_HEAD(&garbageq, p, pfrke_workq);
1609 			} else {
1610 				p->pfrke_tzero = tzero;
1611 				SLIST_INSERT_HEAD(&addq, p, pfrke_workq);
1612 			}
1613 		}
1614 		pfr_enqueue_addrs(kt, &delq, NULL, ENQUEUE_UNMARKED_ONLY);
1615 		pfr_insert_kentries(kt, &addq, tzero);
1616 		pfr_remove_kentries(kt, &delq);
1617 		pfr_clstats_kentries(&changeq, tzero, INVERT_NEG_FLAG);
1618 		pfr_destroy_kentries(&garbageq);
1619 	} else {
1620 		/* kt cannot contain addresses */
1621 		SWAP(struct radix_node_head *, kt->pfrkt_ip4,
1622 		    shadow->pfrkt_ip4);
1623 		SWAP(struct radix_node_head *, kt->pfrkt_ip6,
1624 		    shadow->pfrkt_ip6);
1625 		SWAP(int, kt->pfrkt_cnt, shadow->pfrkt_cnt);
1626 		pfr_clstats_ktable(kt, tzero, 1);
1627 	}
1628 	nflags = ((shadow->pfrkt_flags & PFR_TFLAG_USRMASK) |
1629 	    (kt->pfrkt_flags & PFR_TFLAG_SETMASK) | PFR_TFLAG_ACTIVE)
1630 		& ~PFR_TFLAG_INACTIVE;
1631 	pfr_destroy_ktable(shadow, 0);
1632 	kt->pfrkt_shadow = NULL;
1633 	pfr_setflags_ktable(kt, nflags);
1634 }
1635 
1636 static int
1637 pfr_validate_table(struct pfr_table *tbl, int allowedflags, int no_reserved)
1638 {
1639 	int i;
1640 
1641 	if (!tbl->pfrt_name[0])
1642 		return (-1);
1643 	if (no_reserved && !strcmp(tbl->pfrt_anchor, PF_RESERVED_ANCHOR))
1644 		 return (-1);
1645 	if (tbl->pfrt_name[PF_TABLE_NAME_SIZE-1])
1646 		return (-1);
1647 	for (i = strlen(tbl->pfrt_name); i < PF_TABLE_NAME_SIZE; i++)
1648 		if (tbl->pfrt_name[i])
1649 			return (-1);
1650 	if (pfr_fix_anchor(tbl->pfrt_anchor))
1651 		return (-1);
1652 	if (tbl->pfrt_flags & ~allowedflags)
1653 		return (-1);
1654 	return (0);
1655 }
1656 
1657 /*
1658  * Rewrite anchors referenced by tables to remove slashes
1659  * and check for validity.
1660  */
1661 static int
1662 pfr_fix_anchor(char *anchor)
1663 {
1664 	size_t siz = MAXPATHLEN;
1665 	int i;
1666 
1667 	if (anchor[0] == '/') {
1668 		char *path;
1669 		int off;
1670 
1671 		path = anchor;
1672 		off = 1;
1673 		while (*++path == '/')
1674 			off++;
1675 		bcopy(path, anchor, siz - off);
1676 		memset(anchor + siz - off, 0, off);
1677 	}
1678 	if (anchor[siz - 1])
1679 		return (-1);
1680 	for (i = strlen(anchor); i < siz; i++)
1681 		if (anchor[i])
1682 			return (-1);
1683 	return (0);
1684 }
1685 
1686 static int
1687 pfr_table_count(struct pfr_table *filter, int flags)
1688 {
1689 	struct pf_ruleset *rs;
1690 
1691 	PF_RULES_ASSERT();
1692 
1693 	if (flags & PFR_FLAG_ALLRSETS)
1694 		return (V_pfr_ktable_cnt);
1695 	if (filter->pfrt_anchor[0]) {
1696 		rs = pf_find_ruleset(filter->pfrt_anchor);
1697 		return ((rs != NULL) ? rs->tables : -1);
1698 	}
1699 	return (pf_main_ruleset.tables);
1700 }
1701 
1702 static int
1703 pfr_skip_table(struct pfr_table *filter, struct pfr_ktable *kt, int flags)
1704 {
1705 	if (flags & PFR_FLAG_ALLRSETS)
1706 		return (0);
1707 	if (strcmp(filter->pfrt_anchor, kt->pfrkt_anchor))
1708 		return (1);
1709 	return (0);
1710 }
1711 
1712 static void
1713 pfr_insert_ktables(struct pfr_ktableworkq *workq)
1714 {
1715 	struct pfr_ktable	*p;
1716 
1717 	SLIST_FOREACH(p, workq, pfrkt_workq)
1718 		pfr_insert_ktable(p);
1719 }
1720 
1721 static void
1722 pfr_insert_ktable(struct pfr_ktable *kt)
1723 {
1724 
1725 	PF_RULES_WASSERT();
1726 
1727 	RB_INSERT(pfr_ktablehead, &V_pfr_ktables, kt);
1728 	V_pfr_ktable_cnt++;
1729 	if (kt->pfrkt_root != NULL)
1730 		if (!kt->pfrkt_root->pfrkt_refcnt[PFR_REFCNT_ANCHOR]++)
1731 			pfr_setflags_ktable(kt->pfrkt_root,
1732 			    kt->pfrkt_root->pfrkt_flags|PFR_TFLAG_REFDANCHOR);
1733 }
1734 
1735 static void
1736 pfr_setflags_ktables(struct pfr_ktableworkq *workq)
1737 {
1738 	struct pfr_ktable	*p, *q;
1739 
1740 	for (p = SLIST_FIRST(workq); p; p = q) {
1741 		q = SLIST_NEXT(p, pfrkt_workq);
1742 		pfr_setflags_ktable(p, p->pfrkt_nflags);
1743 	}
1744 }
1745 
1746 static void
1747 pfr_setflags_ktable(struct pfr_ktable *kt, int newf)
1748 {
1749 	struct pfr_kentryworkq	addrq;
1750 
1751 	PF_RULES_WASSERT();
1752 
1753 	if (!(newf & PFR_TFLAG_REFERENCED) &&
1754 	    !(newf & PFR_TFLAG_PERSIST))
1755 		newf &= ~PFR_TFLAG_ACTIVE;
1756 	if (!(newf & PFR_TFLAG_ACTIVE))
1757 		newf &= ~PFR_TFLAG_USRMASK;
1758 	if (!(newf & PFR_TFLAG_SETMASK)) {
1759 		RB_REMOVE(pfr_ktablehead, &V_pfr_ktables, kt);
1760 		if (kt->pfrkt_root != NULL)
1761 			if (!--kt->pfrkt_root->pfrkt_refcnt[PFR_REFCNT_ANCHOR])
1762 				pfr_setflags_ktable(kt->pfrkt_root,
1763 				    kt->pfrkt_root->pfrkt_flags &
1764 					~PFR_TFLAG_REFDANCHOR);
1765 		pfr_destroy_ktable(kt, 1);
1766 		V_pfr_ktable_cnt--;
1767 		return;
1768 	}
1769 	if (!(newf & PFR_TFLAG_ACTIVE) && kt->pfrkt_cnt) {
1770 		pfr_enqueue_addrs(kt, &addrq, NULL, 0);
1771 		pfr_remove_kentries(kt, &addrq);
1772 	}
1773 	if (!(newf & PFR_TFLAG_INACTIVE) && kt->pfrkt_shadow != NULL) {
1774 		pfr_destroy_ktable(kt->pfrkt_shadow, 1);
1775 		kt->pfrkt_shadow = NULL;
1776 	}
1777 	kt->pfrkt_flags = newf;
1778 }
1779 
1780 static void
1781 pfr_clstats_ktables(struct pfr_ktableworkq *workq, long tzero, int recurse)
1782 {
1783 	struct pfr_ktable	*p;
1784 
1785 	SLIST_FOREACH(p, workq, pfrkt_workq)
1786 		pfr_clstats_ktable(p, tzero, recurse);
1787 }
1788 
1789 static void
1790 pfr_clstats_ktable(struct pfr_ktable *kt, long tzero, int recurse)
1791 {
1792 	struct pfr_kentryworkq	 addrq;
1793 
1794 	if (recurse) {
1795 		pfr_enqueue_addrs(kt, &addrq, NULL, 0);
1796 		pfr_clstats_kentries(&addrq, tzero, 0);
1797 	}
1798 	bzero(kt->pfrkt_packets, sizeof(kt->pfrkt_packets));
1799 	bzero(kt->pfrkt_bytes, sizeof(kt->pfrkt_bytes));
1800 	kt->pfrkt_match = kt->pfrkt_nomatch = 0;
1801 	kt->pfrkt_tzero = tzero;
1802 }
1803 
1804 static struct pfr_ktable *
1805 pfr_create_ktable(struct pfr_table *tbl, long tzero, int attachruleset)
1806 {
1807 	struct pfr_ktable	*kt;
1808 	struct pf_ruleset	*rs;
1809 
1810 	PF_RULES_WASSERT();
1811 
1812 	kt = malloc(sizeof(*kt), M_PFTABLE, M_NOWAIT|M_ZERO);
1813 	if (kt == NULL)
1814 		return (NULL);
1815 	kt->pfrkt_t = *tbl;
1816 
1817 	if (attachruleset) {
1818 		rs = pf_find_or_create_ruleset(tbl->pfrt_anchor);
1819 		if (!rs) {
1820 			pfr_destroy_ktable(kt, 0);
1821 			return (NULL);
1822 		}
1823 		kt->pfrkt_rs = rs;
1824 		rs->tables++;
1825 	}
1826 
1827 	if (!rn_inithead((void **)&kt->pfrkt_ip4,
1828 	    offsetof(struct sockaddr_in, sin_addr) * 8) ||
1829 	    !rn_inithead((void **)&kt->pfrkt_ip6,
1830 	    offsetof(struct sockaddr_in6, sin6_addr) * 8)) {
1831 		pfr_destroy_ktable(kt, 0);
1832 		return (NULL);
1833 	}
1834 	kt->pfrkt_tzero = tzero;
1835 
1836 	return (kt);
1837 }
1838 
1839 static void
1840 pfr_destroy_ktables(struct pfr_ktableworkq *workq, int flushaddr)
1841 {
1842 	struct pfr_ktable	*p, *q;
1843 
1844 	for (p = SLIST_FIRST(workq); p; p = q) {
1845 		q = SLIST_NEXT(p, pfrkt_workq);
1846 		pfr_destroy_ktable(p, flushaddr);
1847 	}
1848 }
1849 
1850 static void
1851 pfr_destroy_ktable(struct pfr_ktable *kt, int flushaddr)
1852 {
1853 	struct pfr_kentryworkq	 addrq;
1854 
1855 	if (flushaddr) {
1856 		pfr_enqueue_addrs(kt, &addrq, NULL, 0);
1857 		pfr_clean_node_mask(kt, &addrq);
1858 		pfr_destroy_kentries(&addrq);
1859 	}
1860 	if (kt->pfrkt_ip4 != NULL)
1861 		rn_detachhead((void **)&kt->pfrkt_ip4);
1862 	if (kt->pfrkt_ip6 != NULL)
1863 		rn_detachhead((void **)&kt->pfrkt_ip6);
1864 	if (kt->pfrkt_shadow != NULL)
1865 		pfr_destroy_ktable(kt->pfrkt_shadow, flushaddr);
1866 	if (kt->pfrkt_rs != NULL) {
1867 		kt->pfrkt_rs->tables--;
1868 		pf_remove_if_empty_ruleset(kt->pfrkt_rs);
1869 	}
1870 	free(kt, M_PFTABLE);
1871 }
1872 
1873 static int
1874 pfr_ktable_compare(struct pfr_ktable *p, struct pfr_ktable *q)
1875 {
1876 	int d;
1877 
1878 	if ((d = strncmp(p->pfrkt_name, q->pfrkt_name, PF_TABLE_NAME_SIZE)))
1879 		return (d);
1880 	return (strcmp(p->pfrkt_anchor, q->pfrkt_anchor));
1881 }
1882 
1883 static struct pfr_ktable *
1884 pfr_lookup_table(struct pfr_table *tbl)
1885 {
1886 	/* struct pfr_ktable start like a struct pfr_table */
1887 	return (RB_FIND(pfr_ktablehead, &V_pfr_ktables,
1888 	    (struct pfr_ktable *)tbl));
1889 }
1890 
1891 int
1892 pfr_match_addr(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af)
1893 {
1894 	struct pfr_kentry	*ke = NULL;
1895 	int			 match;
1896 
1897 	PF_RULES_RASSERT();
1898 
1899 	if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
1900 		kt = kt->pfrkt_root;
1901 	if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
1902 		return (0);
1903 
1904 	switch (af) {
1905 #ifdef INET
1906 	case AF_INET:
1907 	    {
1908 		struct sockaddr_in sin;
1909 
1910 		bzero(&sin, sizeof(sin));
1911 		sin.sin_len = sizeof(sin);
1912 		sin.sin_family = AF_INET;
1913 		sin.sin_addr.s_addr = a->addr32[0];
1914 		ke = (struct pfr_kentry *)rn_match(&sin, kt->pfrkt_ip4);
1915 		if (ke && KENTRY_RNF_ROOT(ke))
1916 			ke = NULL;
1917 		break;
1918 	    }
1919 #endif /* INET */
1920 #ifdef INET6
1921 	case AF_INET6:
1922 	    {
1923 		struct sockaddr_in6 sin6;
1924 
1925 		bzero(&sin6, sizeof(sin6));
1926 		sin6.sin6_len = sizeof(sin6);
1927 		sin6.sin6_family = AF_INET6;
1928 		bcopy(a, &sin6.sin6_addr, sizeof(sin6.sin6_addr));
1929 		ke = (struct pfr_kentry *)rn_match(&sin6, kt->pfrkt_ip6);
1930 		if (ke && KENTRY_RNF_ROOT(ke))
1931 			ke = NULL;
1932 		break;
1933 	    }
1934 #endif /* INET6 */
1935 	}
1936 	match = (ke && !ke->pfrke_not);
1937 	if (match)
1938 		kt->pfrkt_match++;
1939 	else
1940 		kt->pfrkt_nomatch++;
1941 	return (match);
1942 }
1943 
1944 void
1945 pfr_update_stats(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af,
1946     u_int64_t len, int dir_out, int op_pass, int notrule)
1947 {
1948 	struct pfr_kentry	*ke = NULL;
1949 
1950 	if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
1951 		kt = kt->pfrkt_root;
1952 	if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
1953 		return;
1954 
1955 	switch (af) {
1956 #ifdef INET
1957 	case AF_INET:
1958 	    {
1959 		struct sockaddr_in sin;
1960 
1961 		bzero(&sin, sizeof(sin));
1962 		sin.sin_len = sizeof(sin);
1963 		sin.sin_family = AF_INET;
1964 		sin.sin_addr.s_addr = a->addr32[0];
1965 		ke = (struct pfr_kentry *)rn_match(&sin, kt->pfrkt_ip4);
1966 		if (ke && KENTRY_RNF_ROOT(ke))
1967 			ke = NULL;
1968 		break;
1969 	    }
1970 #endif /* INET */
1971 #ifdef INET6
1972 	case AF_INET6:
1973 	    {
1974 		struct sockaddr_in6 sin6;
1975 
1976 		bzero(&sin6, sizeof(sin6));
1977 		sin6.sin6_len = sizeof(sin6);
1978 		sin6.sin6_family = AF_INET6;
1979 		bcopy(a, &sin6.sin6_addr, sizeof(sin6.sin6_addr));
1980 		ke = (struct pfr_kentry *)rn_match(&sin6, kt->pfrkt_ip6);
1981 		if (ke && KENTRY_RNF_ROOT(ke))
1982 			ke = NULL;
1983 		break;
1984 	    }
1985 #endif /* INET6 */
1986 	default:
1987 		panic("%s: unknown address family %u", __func__, af);
1988 	}
1989 	if ((ke == NULL || ke->pfrke_not) != notrule) {
1990 		if (op_pass != PFR_OP_PASS)
1991 			printf("pfr_update_stats: assertion failed.\n");
1992 		op_pass = PFR_OP_XPASS;
1993 	}
1994 	kt->pfrkt_packets[dir_out][op_pass]++;
1995 	kt->pfrkt_bytes[dir_out][op_pass] += len;
1996 	if (ke != NULL && op_pass != PFR_OP_XPASS &&
1997 	    (kt->pfrkt_flags & PFR_TFLAG_COUNTERS)) {
1998 		if (ke->pfrke_counters == NULL)
1999 			ke->pfrke_counters = uma_zalloc(V_pfr_kcounters_z,
2000 			    M_NOWAIT | M_ZERO);
2001 		if (ke->pfrke_counters != NULL) {
2002 			ke->pfrke_counters->pfrkc_packets[dir_out][op_pass]++;
2003 			ke->pfrke_counters->pfrkc_bytes[dir_out][op_pass] += len;
2004 		}
2005 	}
2006 }
2007 
2008 struct pfr_ktable *
2009 pfr_attach_table(struct pf_ruleset *rs, char *name)
2010 {
2011 	struct pfr_ktable	*kt, *rt;
2012 	struct pfr_table	 tbl;
2013 	struct pf_anchor	*ac = rs->anchor;
2014 
2015 	PF_RULES_WASSERT();
2016 
2017 	bzero(&tbl, sizeof(tbl));
2018 	strlcpy(tbl.pfrt_name, name, sizeof(tbl.pfrt_name));
2019 	if (ac != NULL)
2020 		strlcpy(tbl.pfrt_anchor, ac->path, sizeof(tbl.pfrt_anchor));
2021 	kt = pfr_lookup_table(&tbl);
2022 	if (kt == NULL) {
2023 		kt = pfr_create_ktable(&tbl, time_second, 1);
2024 		if (kt == NULL)
2025 			return (NULL);
2026 		if (ac != NULL) {
2027 			bzero(tbl.pfrt_anchor, sizeof(tbl.pfrt_anchor));
2028 			rt = pfr_lookup_table(&tbl);
2029 			if (rt == NULL) {
2030 				rt = pfr_create_ktable(&tbl, 0, 1);
2031 				if (rt == NULL) {
2032 					pfr_destroy_ktable(kt, 0);
2033 					return (NULL);
2034 				}
2035 				pfr_insert_ktable(rt);
2036 			}
2037 			kt->pfrkt_root = rt;
2038 		}
2039 		pfr_insert_ktable(kt);
2040 	}
2041 	if (!kt->pfrkt_refcnt[PFR_REFCNT_RULE]++)
2042 		pfr_setflags_ktable(kt, kt->pfrkt_flags|PFR_TFLAG_REFERENCED);
2043 	return (kt);
2044 }
2045 
2046 void
2047 pfr_detach_table(struct pfr_ktable *kt)
2048 {
2049 
2050 	PF_RULES_WASSERT();
2051 	KASSERT(kt->pfrkt_refcnt[PFR_REFCNT_RULE] > 0, ("%s: refcount %d\n",
2052 	    __func__, kt->pfrkt_refcnt[PFR_REFCNT_RULE]));
2053 
2054 	if (!--kt->pfrkt_refcnt[PFR_REFCNT_RULE])
2055 		pfr_setflags_ktable(kt, kt->pfrkt_flags&~PFR_TFLAG_REFERENCED);
2056 }
2057 
2058 int
2059 pfr_pool_get(struct pfr_ktable *kt, int *pidx, struct pf_addr *counter,
2060     sa_family_t af)
2061 {
2062 	struct pf_addr		 *addr, *cur, *mask;
2063 	union sockaddr_union	 uaddr, umask;
2064 	struct pfr_kentry	*ke, *ke2 = NULL;
2065 	int			 idx = -1, use_counter = 0;
2066 
2067 	switch (af) {
2068 	case AF_INET:
2069 		uaddr.sin.sin_len = sizeof(struct sockaddr_in);
2070 		uaddr.sin.sin_family = AF_INET;
2071 		break;
2072 	case AF_INET6:
2073 		uaddr.sin6.sin6_len = sizeof(struct sockaddr_in6);
2074 		uaddr.sin6.sin6_family = AF_INET6;
2075 		break;
2076 	}
2077 	addr = SUNION2PF(&uaddr, af);
2078 
2079 	if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
2080 		kt = kt->pfrkt_root;
2081 	if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
2082 		return (-1);
2083 
2084 	if (pidx != NULL)
2085 		idx = *pidx;
2086 	if (counter != NULL && idx >= 0)
2087 		use_counter = 1;
2088 	if (idx < 0)
2089 		idx = 0;
2090 
2091 _next_block:
2092 	ke = pfr_kentry_byidx(kt, idx, af);
2093 	if (ke == NULL) {
2094 		kt->pfrkt_nomatch++;
2095 		return (1);
2096 	}
2097 	pfr_prepare_network(&umask, af, ke->pfrke_net);
2098 	cur = SUNION2PF(&ke->pfrke_sa, af);
2099 	mask = SUNION2PF(&umask, af);
2100 
2101 	if (use_counter) {
2102 		/* is supplied address within block? */
2103 		if (!PF_MATCHA(0, cur, mask, counter, af)) {
2104 			/* no, go to next block in table */
2105 			idx++;
2106 			use_counter = 0;
2107 			goto _next_block;
2108 		}
2109 		PF_ACPY(addr, counter, af);
2110 	} else {
2111 		/* use first address of block */
2112 		PF_ACPY(addr, cur, af);
2113 	}
2114 
2115 	if (!KENTRY_NETWORK(ke)) {
2116 		/* this is a single IP address - no possible nested block */
2117 		PF_ACPY(counter, addr, af);
2118 		*pidx = idx;
2119 		kt->pfrkt_match++;
2120 		return (0);
2121 	}
2122 	for (;;) {
2123 		/* we don't want to use a nested block */
2124 		switch (af) {
2125 		case AF_INET:
2126 			ke2 = (struct pfr_kentry *)rn_match(&uaddr,
2127 			    kt->pfrkt_ip4);
2128 			break;
2129 		case AF_INET6:
2130 			ke2 = (struct pfr_kentry *)rn_match(&uaddr,
2131 			    kt->pfrkt_ip6);
2132 			break;
2133 		}
2134 		/* no need to check KENTRY_RNF_ROOT() here */
2135 		if (ke2 == ke) {
2136 			/* lookup return the same block - perfect */
2137 			PF_ACPY(counter, addr, af);
2138 			*pidx = idx;
2139 			kt->pfrkt_match++;
2140 			return (0);
2141 		}
2142 
2143 		/* we need to increase the counter past the nested block */
2144 		pfr_prepare_network(&umask, AF_INET, ke2->pfrke_net);
2145 		PF_POOLMASK(addr, addr, SUNION2PF(&umask, af), &pfr_ffaddr, af);
2146 		PF_AINC(addr, af);
2147 		if (!PF_MATCHA(0, cur, mask, addr, af)) {
2148 			/* ok, we reached the end of our main block */
2149 			/* go to next block in table */
2150 			idx++;
2151 			use_counter = 0;
2152 			goto _next_block;
2153 		}
2154 	}
2155 }
2156 
2157 static struct pfr_kentry *
2158 pfr_kentry_byidx(struct pfr_ktable *kt, int idx, int af)
2159 {
2160 	struct pfr_walktree	w;
2161 
2162 	bzero(&w, sizeof(w));
2163 	w.pfrw_op = PFRW_POOL_GET;
2164 	w.pfrw_cnt = idx;
2165 
2166 	switch (af) {
2167 #ifdef INET
2168 	case AF_INET:
2169 		kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
2170 		return (w.pfrw_kentry);
2171 #endif /* INET */
2172 #ifdef INET6
2173 	case AF_INET6:
2174 		kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6, pfr_walktree, &w);
2175 		return (w.pfrw_kentry);
2176 #endif /* INET6 */
2177 	default:
2178 		return (NULL);
2179 	}
2180 }
2181 
2182 void
2183 pfr_dynaddr_update(struct pfr_ktable *kt, struct pfi_dynaddr *dyn)
2184 {
2185 	struct pfr_walktree	w;
2186 
2187 	bzero(&w, sizeof(w));
2188 	w.pfrw_op = PFRW_DYNADDR_UPDATE;
2189 	w.pfrw_dyn = dyn;
2190 
2191 	dyn->pfid_acnt4 = 0;
2192 	dyn->pfid_acnt6 = 0;
2193 	if (!dyn->pfid_af || dyn->pfid_af == AF_INET)
2194 		kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
2195 	if (!dyn->pfid_af || dyn->pfid_af == AF_INET6)
2196 		kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6, pfr_walktree, &w);
2197 }
2198