xref: /freebsd/sys/netpfil/pf/pf_table.c (revision 595e514d0df2bac5b813d35f83e32875dbf16a83)
1 /*-
2  * Copyright (c) 2002 Cedric Berger
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  *
9  *    - Redistributions of source code must retain the above copyright
10  *      notice, this list of conditions and the following disclaimer.
11  *    - Redistributions in binary form must reproduce the above
12  *      copyright notice, this list of conditions and the following
13  *      disclaimer in the documentation and/or other materials provided
14  *      with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
19  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
20  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
21  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
22  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
23  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
24  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
26  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27  * POSSIBILITY OF SUCH DAMAGE.
28  *
29  *	$OpenBSD: pf_table.c,v 1.79 2008/10/08 06:24:50 mcbride Exp $
30  */
31 
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
34 
35 #include "opt_inet.h"
36 #include "opt_inet6.h"
37 
38 #include <sys/param.h>
39 #include <sys/kernel.h>
40 #include <sys/lock.h>
41 #include <sys/malloc.h>
42 #include <sys/mutex.h>
43 #include <sys/refcount.h>
44 #include <sys/rwlock.h>
45 #include <sys/socket.h>
46 #include <vm/uma.h>
47 
48 #include <net/if.h>
49 #include <net/vnet.h>
50 #include <net/pfvar.h>
51 
52 #define	ACCEPT_FLAGS(flags, oklist)		\
53 	do {					\
54 		if ((flags & ~(oklist)) &	\
55 		    PFR_FLAG_ALLMASK)		\
56 			return (EINVAL);	\
57 	} while (0)
58 
59 #define	FILLIN_SIN(sin, addr)			\
60 	do {					\
61 		(sin).sin_len = sizeof(sin);	\
62 		(sin).sin_family = AF_INET;	\
63 		(sin).sin_addr = (addr);	\
64 	} while (0)
65 
66 #define	FILLIN_SIN6(sin6, addr)			\
67 	do {					\
68 		(sin6).sin6_len = sizeof(sin6);	\
69 		(sin6).sin6_family = AF_INET6;	\
70 		(sin6).sin6_addr = (addr);	\
71 	} while (0)
72 
73 #define	SWAP(type, a1, a2)			\
74 	do {					\
75 		type tmp = a1;			\
76 		a1 = a2;			\
77 		a2 = tmp;			\
78 	} while (0)
79 
80 #define	SUNION2PF(su, af) (((af)==AF_INET) ?	\
81     (struct pf_addr *)&(su)->sin.sin_addr :	\
82     (struct pf_addr *)&(su)->sin6.sin6_addr)
83 
84 #define	AF_BITS(af)		(((af)==AF_INET)?32:128)
85 #define	ADDR_NETWORK(ad)	((ad)->pfra_net < AF_BITS((ad)->pfra_af))
86 #define	KENTRY_NETWORK(ke)	((ke)->pfrke_net < AF_BITS((ke)->pfrke_af))
87 #define	KENTRY_RNF_ROOT(ke) \
88 		((((struct radix_node *)(ke))->rn_flags & RNF_ROOT) != 0)
89 
90 #define	NO_ADDRESSES		(-1)
91 #define	ENQUEUE_UNMARKED_ONLY	(1)
92 #define	INVERT_NEG_FLAG		(1)
93 
94 struct pfr_walktree {
95 	enum pfrw_op {
96 		PFRW_MARK,
97 		PFRW_SWEEP,
98 		PFRW_ENQUEUE,
99 		PFRW_GET_ADDRS,
100 		PFRW_GET_ASTATS,
101 		PFRW_POOL_GET,
102 		PFRW_DYNADDR_UPDATE
103 	}	 pfrw_op;
104 	union {
105 		struct pfr_addr		*pfrw1_addr;
106 		struct pfr_astats	*pfrw1_astats;
107 		struct pfr_kentryworkq	*pfrw1_workq;
108 		struct pfr_kentry	*pfrw1_kentry;
109 		struct pfi_dynaddr	*pfrw1_dyn;
110 	}	 pfrw_1;
111 	int	 pfrw_free;
112 };
113 #define	pfrw_addr	pfrw_1.pfrw1_addr
114 #define	pfrw_astats	pfrw_1.pfrw1_astats
115 #define	pfrw_workq	pfrw_1.pfrw1_workq
116 #define	pfrw_kentry	pfrw_1.pfrw1_kentry
117 #define	pfrw_dyn	pfrw_1.pfrw1_dyn
118 #define	pfrw_cnt	pfrw_free
119 
120 #define	senderr(e)	do { rv = (e); goto _bad; } while (0)
121 
122 static MALLOC_DEFINE(M_PFTABLE, "pf_table", "pf(4) tables structures");
123 static VNET_DEFINE(uma_zone_t, pfr_kentry_z);
124 #define	V_pfr_kentry_z		VNET(pfr_kentry_z)
125 static VNET_DEFINE(uma_zone_t, pfr_kcounters_z);
126 #define	V_pfr_kcounters_z	VNET(pfr_kcounters_z)
127 
128 static struct pf_addr	 pfr_ffaddr = {
129 	.addr32 = { 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff }
130 };
131 
132 static void		 pfr_copyout_addr(struct pfr_addr *,
133 			    struct pfr_kentry *ke);
134 static int		 pfr_validate_addr(struct pfr_addr *);
135 static void		 pfr_enqueue_addrs(struct pfr_ktable *,
136 			    struct pfr_kentryworkq *, int *, int);
137 static void		 pfr_mark_addrs(struct pfr_ktable *);
138 static struct pfr_kentry
139 			*pfr_lookup_addr(struct pfr_ktable *,
140 			    struct pfr_addr *, int);
141 static struct pfr_kentry *pfr_create_kentry(struct pfr_addr *);
142 static void		 pfr_destroy_kentries(struct pfr_kentryworkq *);
143 static void		 pfr_destroy_kentry(struct pfr_kentry *);
144 static void		 pfr_insert_kentries(struct pfr_ktable *,
145 			    struct pfr_kentryworkq *, long);
146 static void		 pfr_remove_kentries(struct pfr_ktable *,
147 			    struct pfr_kentryworkq *);
148 static void		 pfr_clstats_kentries(struct pfr_kentryworkq *, long,
149 			    int);
150 static void		 pfr_reset_feedback(struct pfr_addr *, int);
151 static void		 pfr_prepare_network(union sockaddr_union *, int, int);
152 static int		 pfr_route_kentry(struct pfr_ktable *,
153 			    struct pfr_kentry *);
154 static int		 pfr_unroute_kentry(struct pfr_ktable *,
155 			    struct pfr_kentry *);
156 static int		 pfr_walktree(struct radix_node *, void *);
157 static int		 pfr_validate_table(struct pfr_table *, int, int);
158 static int		 pfr_fix_anchor(char *);
159 static void		 pfr_commit_ktable(struct pfr_ktable *, long);
160 static void		 pfr_insert_ktables(struct pfr_ktableworkq *);
161 static void		 pfr_insert_ktable(struct pfr_ktable *);
162 static void		 pfr_setflags_ktables(struct pfr_ktableworkq *);
163 static void		 pfr_setflags_ktable(struct pfr_ktable *, int);
164 static void		 pfr_clstats_ktables(struct pfr_ktableworkq *, long,
165 			    int);
166 static void		 pfr_clstats_ktable(struct pfr_ktable *, long, int);
167 static struct pfr_ktable
168 			*pfr_create_ktable(struct pfr_table *, long, int);
169 static void		 pfr_destroy_ktables(struct pfr_ktableworkq *, int);
170 static void		 pfr_destroy_ktable(struct pfr_ktable *, int);
171 static int		 pfr_ktable_compare(struct pfr_ktable *,
172 			    struct pfr_ktable *);
173 static struct pfr_ktable
174 			*pfr_lookup_table(struct pfr_table *);
175 static void		 pfr_clean_node_mask(struct pfr_ktable *,
176 			    struct pfr_kentryworkq *);
177 static int		 pfr_table_count(struct pfr_table *, int);
178 static int		 pfr_skip_table(struct pfr_table *,
179 			    struct pfr_ktable *, int);
180 static struct pfr_kentry
181 			*pfr_kentry_byidx(struct pfr_ktable *, int, int);
182 
183 static RB_PROTOTYPE(pfr_ktablehead, pfr_ktable, pfrkt_tree, pfr_ktable_compare);
184 static RB_GENERATE(pfr_ktablehead, pfr_ktable, pfrkt_tree, pfr_ktable_compare);
185 
186 struct pfr_ktablehead	 pfr_ktables;
187 struct pfr_table	 pfr_nulltable;
188 int			 pfr_ktable_cnt;
189 
190 void
191 pfr_initialize(void)
192 {
193 
194 	V_pfr_kentry_z = uma_zcreate("pf table entries",
195 	    sizeof(struct pfr_kentry), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
196 	    0);
197 	V_pfr_kcounters_z = uma_zcreate("pf table counters",
198 	    sizeof(struct pfr_kcounters), NULL, NULL, NULL, NULL,
199 	    UMA_ALIGN_PTR, 0);
200 	V_pf_limits[PF_LIMIT_TABLE_ENTRIES].zone = V_pfr_kentry_z;
201 	V_pf_limits[PF_LIMIT_TABLE_ENTRIES].limit = PFR_KENTRY_HIWAT;
202 }
203 
204 void
205 pfr_cleanup(void)
206 {
207 
208 	uma_zdestroy(V_pfr_kentry_z);
209 	uma_zdestroy(V_pfr_kcounters_z);
210 }
211 
212 int
213 pfr_clr_addrs(struct pfr_table *tbl, int *ndel, int flags)
214 {
215 	struct pfr_ktable	*kt;
216 	struct pfr_kentryworkq	 workq;
217 
218 	PF_RULES_WASSERT();
219 
220 	ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
221 	if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL))
222 		return (EINVAL);
223 	kt = pfr_lookup_table(tbl);
224 	if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
225 		return (ESRCH);
226 	if (kt->pfrkt_flags & PFR_TFLAG_CONST)
227 		return (EPERM);
228 	pfr_enqueue_addrs(kt, &workq, ndel, 0);
229 
230 	if (!(flags & PFR_FLAG_DUMMY)) {
231 		pfr_remove_kentries(kt, &workq);
232 		KASSERT(kt->pfrkt_cnt == 0, ("%s: non-null pfrkt_cnt", __func__));
233 	}
234 	return (0);
235 }
236 
237 int
238 pfr_add_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
239     int *nadd, int flags)
240 {
241 	struct pfr_ktable	*kt, *tmpkt;
242 	struct pfr_kentryworkq	 workq;
243 	struct pfr_kentry	*p, *q;
244 	struct pfr_addr		*ad;
245 	int			 i, rv, xadd = 0;
246 	long			 tzero = time_second;
247 
248 	PF_RULES_WASSERT();
249 
250 	ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_FEEDBACK);
251 	if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL))
252 		return (EINVAL);
253 	kt = pfr_lookup_table(tbl);
254 	if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
255 		return (ESRCH);
256 	if (kt->pfrkt_flags & PFR_TFLAG_CONST)
257 		return (EPERM);
258 	tmpkt = pfr_create_ktable(&pfr_nulltable, 0, 0);
259 	if (tmpkt == NULL)
260 		return (ENOMEM);
261 	SLIST_INIT(&workq);
262 	for (i = 0, ad = addr; i < size; i++, ad++) {
263 		if (pfr_validate_addr(ad))
264 			senderr(EINVAL);
265 		p = pfr_lookup_addr(kt, ad, 1);
266 		q = pfr_lookup_addr(tmpkt, ad, 1);
267 		if (flags & PFR_FLAG_FEEDBACK) {
268 			if (q != NULL)
269 				ad->pfra_fback = PFR_FB_DUPLICATE;
270 			else if (p == NULL)
271 				ad->pfra_fback = PFR_FB_ADDED;
272 			else if (p->pfrke_not != ad->pfra_not)
273 				ad->pfra_fback = PFR_FB_CONFLICT;
274 			else
275 				ad->pfra_fback = PFR_FB_NONE;
276 		}
277 		if (p == NULL && q == NULL) {
278 			p = pfr_create_kentry(ad);
279 			if (p == NULL)
280 				senderr(ENOMEM);
281 			if (pfr_route_kentry(tmpkt, p)) {
282 				pfr_destroy_kentry(p);
283 				ad->pfra_fback = PFR_FB_NONE;
284 			} else {
285 				SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
286 				xadd++;
287 			}
288 		}
289 	}
290 	pfr_clean_node_mask(tmpkt, &workq);
291 	if (!(flags & PFR_FLAG_DUMMY))
292 		pfr_insert_kentries(kt, &workq, tzero);
293 	else
294 		pfr_destroy_kentries(&workq);
295 	if (nadd != NULL)
296 		*nadd = xadd;
297 	pfr_destroy_ktable(tmpkt, 0);
298 	return (0);
299 _bad:
300 	pfr_clean_node_mask(tmpkt, &workq);
301 	pfr_destroy_kentries(&workq);
302 	if (flags & PFR_FLAG_FEEDBACK)
303 		pfr_reset_feedback(addr, size);
304 	pfr_destroy_ktable(tmpkt, 0);
305 	return (rv);
306 }
307 
308 int
309 pfr_del_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
310     int *ndel, int flags)
311 {
312 	struct pfr_ktable	*kt;
313 	struct pfr_kentryworkq	 workq;
314 	struct pfr_kentry	*p;
315 	struct pfr_addr		*ad;
316 	int			 i, rv, xdel = 0, log = 1;
317 
318 	PF_RULES_WASSERT();
319 
320 	ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_FEEDBACK);
321 	if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL))
322 		return (EINVAL);
323 	kt = pfr_lookup_table(tbl);
324 	if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
325 		return (ESRCH);
326 	if (kt->pfrkt_flags & PFR_TFLAG_CONST)
327 		return (EPERM);
328 	/*
329 	 * there are two algorithms to choose from here.
330 	 * with:
331 	 *   n: number of addresses to delete
332 	 *   N: number of addresses in the table
333 	 *
334 	 * one is O(N) and is better for large 'n'
335 	 * one is O(n*LOG(N)) and is better for small 'n'
336 	 *
337 	 * following code try to decide which one is best.
338 	 */
339 	for (i = kt->pfrkt_cnt; i > 0; i >>= 1)
340 		log++;
341 	if (size > kt->pfrkt_cnt/log) {
342 		/* full table scan */
343 		pfr_mark_addrs(kt);
344 	} else {
345 		/* iterate over addresses to delete */
346 		for (i = 0, ad = addr; i < size; i++, ad++) {
347 			if (pfr_validate_addr(ad))
348 				return (EINVAL);
349 			p = pfr_lookup_addr(kt, ad, 1);
350 			if (p != NULL)
351 				p->pfrke_mark = 0;
352 		}
353 	}
354 	SLIST_INIT(&workq);
355 	for (i = 0, ad = addr; i < size; i++, ad++) {
356 		if (pfr_validate_addr(ad))
357 			senderr(EINVAL);
358 		p = pfr_lookup_addr(kt, ad, 1);
359 		if (flags & PFR_FLAG_FEEDBACK) {
360 			if (p == NULL)
361 				ad->pfra_fback = PFR_FB_NONE;
362 			else if (p->pfrke_not != ad->pfra_not)
363 				ad->pfra_fback = PFR_FB_CONFLICT;
364 			else if (p->pfrke_mark)
365 				ad->pfra_fback = PFR_FB_DUPLICATE;
366 			else
367 				ad->pfra_fback = PFR_FB_DELETED;
368 		}
369 		if (p != NULL && p->pfrke_not == ad->pfra_not &&
370 		    !p->pfrke_mark) {
371 			p->pfrke_mark = 1;
372 			SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
373 			xdel++;
374 		}
375 	}
376 	if (!(flags & PFR_FLAG_DUMMY))
377 		pfr_remove_kentries(kt, &workq);
378 	if (ndel != NULL)
379 		*ndel = xdel;
380 	return (0);
381 _bad:
382 	if (flags & PFR_FLAG_FEEDBACK)
383 		pfr_reset_feedback(addr, size);
384 	return (rv);
385 }
386 
387 int
388 pfr_set_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
389     int *size2, int *nadd, int *ndel, int *nchange, int flags,
390     u_int32_t ignore_pfrt_flags)
391 {
392 	struct pfr_ktable	*kt, *tmpkt;
393 	struct pfr_kentryworkq	 addq, delq, changeq;
394 	struct pfr_kentry	*p, *q;
395 	struct pfr_addr		 ad;
396 	int			 i, rv, xadd = 0, xdel = 0, xchange = 0;
397 	long			 tzero = time_second;
398 
399 	PF_RULES_WASSERT();
400 
401 	ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_FEEDBACK);
402 	if (pfr_validate_table(tbl, ignore_pfrt_flags, flags &
403 	    PFR_FLAG_USERIOCTL))
404 		return (EINVAL);
405 	kt = pfr_lookup_table(tbl);
406 	if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
407 		return (ESRCH);
408 	if (kt->pfrkt_flags & PFR_TFLAG_CONST)
409 		return (EPERM);
410 	tmpkt = pfr_create_ktable(&pfr_nulltable, 0, 0);
411 	if (tmpkt == NULL)
412 		return (ENOMEM);
413 	pfr_mark_addrs(kt);
414 	SLIST_INIT(&addq);
415 	SLIST_INIT(&delq);
416 	SLIST_INIT(&changeq);
417 	for (i = 0; i < size; i++) {
418 		/*
419 		 * XXXGL: undertand pf_if usage of this function
420 		 * and make ad a moving pointer
421 		 */
422 		bcopy(addr + i, &ad, sizeof(ad));
423 		if (pfr_validate_addr(&ad))
424 			senderr(EINVAL);
425 		ad.pfra_fback = PFR_FB_NONE;
426 		p = pfr_lookup_addr(kt, &ad, 1);
427 		if (p != NULL) {
428 			if (p->pfrke_mark) {
429 				ad.pfra_fback = PFR_FB_DUPLICATE;
430 				goto _skip;
431 			}
432 			p->pfrke_mark = 1;
433 			if (p->pfrke_not != ad.pfra_not) {
434 				SLIST_INSERT_HEAD(&changeq, p, pfrke_workq);
435 				ad.pfra_fback = PFR_FB_CHANGED;
436 				xchange++;
437 			}
438 		} else {
439 			q = pfr_lookup_addr(tmpkt, &ad, 1);
440 			if (q != NULL) {
441 				ad.pfra_fback = PFR_FB_DUPLICATE;
442 				goto _skip;
443 			}
444 			p = pfr_create_kentry(&ad);
445 			if (p == NULL)
446 				senderr(ENOMEM);
447 			if (pfr_route_kentry(tmpkt, p)) {
448 				pfr_destroy_kentry(p);
449 				ad.pfra_fback = PFR_FB_NONE;
450 			} else {
451 				SLIST_INSERT_HEAD(&addq, p, pfrke_workq);
452 				ad.pfra_fback = PFR_FB_ADDED;
453 				xadd++;
454 			}
455 		}
456 _skip:
457 		if (flags & PFR_FLAG_FEEDBACK)
458 			bcopy(&ad, addr + i, sizeof(ad));
459 	}
460 	pfr_enqueue_addrs(kt, &delq, &xdel, ENQUEUE_UNMARKED_ONLY);
461 	if ((flags & PFR_FLAG_FEEDBACK) && *size2) {
462 		if (*size2 < size+xdel) {
463 			*size2 = size+xdel;
464 			senderr(0);
465 		}
466 		i = 0;
467 		SLIST_FOREACH(p, &delq, pfrke_workq) {
468 			pfr_copyout_addr(&ad, p);
469 			ad.pfra_fback = PFR_FB_DELETED;
470 			bcopy(&ad, addr + size + i, sizeof(ad));
471 			i++;
472 		}
473 	}
474 	pfr_clean_node_mask(tmpkt, &addq);
475 	if (!(flags & PFR_FLAG_DUMMY)) {
476 		pfr_insert_kentries(kt, &addq, tzero);
477 		pfr_remove_kentries(kt, &delq);
478 		pfr_clstats_kentries(&changeq, tzero, INVERT_NEG_FLAG);
479 	} else
480 		pfr_destroy_kentries(&addq);
481 	if (nadd != NULL)
482 		*nadd = xadd;
483 	if (ndel != NULL)
484 		*ndel = xdel;
485 	if (nchange != NULL)
486 		*nchange = xchange;
487 	if ((flags & PFR_FLAG_FEEDBACK) && size2)
488 		*size2 = size+xdel;
489 	pfr_destroy_ktable(tmpkt, 0);
490 	return (0);
491 _bad:
492 	pfr_clean_node_mask(tmpkt, &addq);
493 	pfr_destroy_kentries(&addq);
494 	if (flags & PFR_FLAG_FEEDBACK)
495 		pfr_reset_feedback(addr, size);
496 	pfr_destroy_ktable(tmpkt, 0);
497 	return (rv);
498 }
499 
500 int
501 pfr_tst_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
502 	int *nmatch, int flags)
503 {
504 	struct pfr_ktable	*kt;
505 	struct pfr_kentry	*p;
506 	struct pfr_addr		*ad;
507 	int			 i, xmatch = 0;
508 
509 	PF_RULES_RASSERT();
510 
511 	ACCEPT_FLAGS(flags, PFR_FLAG_REPLACE);
512 	if (pfr_validate_table(tbl, 0, 0))
513 		return (EINVAL);
514 	kt = pfr_lookup_table(tbl);
515 	if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
516 		return (ESRCH);
517 
518 	for (i = 0, ad = addr; i < size; i++, ad++) {
519 		if (pfr_validate_addr(ad))
520 			return (EINVAL);
521 		if (ADDR_NETWORK(ad))
522 			return (EINVAL);
523 		p = pfr_lookup_addr(kt, ad, 0);
524 		if (flags & PFR_FLAG_REPLACE)
525 			pfr_copyout_addr(ad, p);
526 		ad->pfra_fback = (p == NULL) ? PFR_FB_NONE :
527 		    (p->pfrke_not ? PFR_FB_NOTMATCH : PFR_FB_MATCH);
528 		if (p != NULL && !p->pfrke_not)
529 			xmatch++;
530 	}
531 	if (nmatch != NULL)
532 		*nmatch = xmatch;
533 	return (0);
534 }
535 
536 int
537 pfr_get_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int *size,
538 	int flags)
539 {
540 	struct pfr_ktable	*kt;
541 	struct pfr_walktree	 w;
542 	int			 rv;
543 
544 	PF_RULES_RASSERT();
545 
546 	ACCEPT_FLAGS(flags, 0);
547 	if (pfr_validate_table(tbl, 0, 0))
548 		return (EINVAL);
549 	kt = pfr_lookup_table(tbl);
550 	if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
551 		return (ESRCH);
552 	if (kt->pfrkt_cnt > *size) {
553 		*size = kt->pfrkt_cnt;
554 		return (0);
555 	}
556 
557 	bzero(&w, sizeof(w));
558 	w.pfrw_op = PFRW_GET_ADDRS;
559 	w.pfrw_addr = addr;
560 	w.pfrw_free = kt->pfrkt_cnt;
561 	rv = kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
562 	if (!rv)
563 		rv = kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6, pfr_walktree,
564 		    &w);
565 	if (rv)
566 		return (rv);
567 
568 	KASSERT(w.pfrw_free == 0, ("%s: corruption detected (%d)", __func__,
569 	    w.pfrw_free));
570 
571 	*size = kt->pfrkt_cnt;
572 	return (0);
573 }
574 
575 int
576 pfr_get_astats(struct pfr_table *tbl, struct pfr_astats *addr, int *size,
577 	int flags)
578 {
579 	struct pfr_ktable	*kt;
580 	struct pfr_walktree	 w;
581 	struct pfr_kentryworkq	 workq;
582 	int			 rv;
583 	long			 tzero = time_second;
584 
585 	PF_RULES_RASSERT();
586 
587 	/* XXX PFR_FLAG_CLSTATS disabled */
588 	ACCEPT_FLAGS(flags, 0);
589 	if (pfr_validate_table(tbl, 0, 0))
590 		return (EINVAL);
591 	kt = pfr_lookup_table(tbl);
592 	if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
593 		return (ESRCH);
594 	if (kt->pfrkt_cnt > *size) {
595 		*size = kt->pfrkt_cnt;
596 		return (0);
597 	}
598 
599 	bzero(&w, sizeof(w));
600 	w.pfrw_op = PFRW_GET_ASTATS;
601 	w.pfrw_astats = addr;
602 	w.pfrw_free = kt->pfrkt_cnt;
603 	rv = kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
604 	if (!rv)
605 		rv = kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6, pfr_walktree,
606 		    &w);
607 	if (!rv && (flags & PFR_FLAG_CLSTATS)) {
608 		pfr_enqueue_addrs(kt, &workq, NULL, 0);
609 		pfr_clstats_kentries(&workq, tzero, 0);
610 	}
611 	if (rv)
612 		return (rv);
613 
614 	if (w.pfrw_free) {
615 		printf("pfr_get_astats: corruption detected (%d).\n",
616 		    w.pfrw_free);
617 		return (ENOTTY);
618 	}
619 	*size = kt->pfrkt_cnt;
620 	return (0);
621 }
622 
623 int
624 pfr_clr_astats(struct pfr_table *tbl, struct pfr_addr *addr, int size,
625     int *nzero, int flags)
626 {
627 	struct pfr_ktable	*kt;
628 	struct pfr_kentryworkq	 workq;
629 	struct pfr_kentry	*p;
630 	struct pfr_addr		*ad;
631 	int			 i, rv, xzero = 0;
632 
633 	PF_RULES_WASSERT();
634 
635 	ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_FEEDBACK);
636 	if (pfr_validate_table(tbl, 0, 0))
637 		return (EINVAL);
638 	kt = pfr_lookup_table(tbl);
639 	if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
640 		return (ESRCH);
641 	SLIST_INIT(&workq);
642 	for (i = 0, ad = addr; i < size; i++, ad++) {
643 		if (pfr_validate_addr(ad))
644 			senderr(EINVAL);
645 		p = pfr_lookup_addr(kt, ad, 1);
646 		if (flags & PFR_FLAG_FEEDBACK) {
647 			ad->pfra_fback = (p != NULL) ?
648 			    PFR_FB_CLEARED : PFR_FB_NONE;
649 		}
650 		if (p != NULL) {
651 			SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
652 			xzero++;
653 		}
654 	}
655 
656 	if (!(flags & PFR_FLAG_DUMMY))
657 		pfr_clstats_kentries(&workq, 0, 0);
658 	if (nzero != NULL)
659 		*nzero = xzero;
660 	return (0);
661 _bad:
662 	if (flags & PFR_FLAG_FEEDBACK)
663 		pfr_reset_feedback(addr, size);
664 	return (rv);
665 }
666 
667 static int
668 pfr_validate_addr(struct pfr_addr *ad)
669 {
670 	int i;
671 
672 	switch (ad->pfra_af) {
673 #ifdef INET
674 	case AF_INET:
675 		if (ad->pfra_net > 32)
676 			return (-1);
677 		break;
678 #endif /* INET */
679 #ifdef INET6
680 	case AF_INET6:
681 		if (ad->pfra_net > 128)
682 			return (-1);
683 		break;
684 #endif /* INET6 */
685 	default:
686 		return (-1);
687 	}
688 	if (ad->pfra_net < 128 &&
689 		(((caddr_t)ad)[ad->pfra_net/8] & (0xFF >> (ad->pfra_net%8))))
690 			return (-1);
691 	for (i = (ad->pfra_net+7)/8; i < sizeof(ad->pfra_u); i++)
692 		if (((caddr_t)ad)[i])
693 			return (-1);
694 	if (ad->pfra_not && ad->pfra_not != 1)
695 		return (-1);
696 	if (ad->pfra_fback)
697 		return (-1);
698 	return (0);
699 }
700 
701 static void
702 pfr_enqueue_addrs(struct pfr_ktable *kt, struct pfr_kentryworkq *workq,
703 	int *naddr, int sweep)
704 {
705 	struct pfr_walktree	w;
706 
707 	SLIST_INIT(workq);
708 	bzero(&w, sizeof(w));
709 	w.pfrw_op = sweep ? PFRW_SWEEP : PFRW_ENQUEUE;
710 	w.pfrw_workq = workq;
711 	if (kt->pfrkt_ip4 != NULL)
712 		if (kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree,
713 		    &w))
714 			printf("pfr_enqueue_addrs: IPv4 walktree failed.\n");
715 	if (kt->pfrkt_ip6 != NULL)
716 		if (kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6, pfr_walktree,
717 		    &w))
718 			printf("pfr_enqueue_addrs: IPv6 walktree failed.\n");
719 	if (naddr != NULL)
720 		*naddr = w.pfrw_cnt;
721 }
722 
723 static void
724 pfr_mark_addrs(struct pfr_ktable *kt)
725 {
726 	struct pfr_walktree	w;
727 
728 	bzero(&w, sizeof(w));
729 	w.pfrw_op = PFRW_MARK;
730 	if (kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree, &w))
731 		printf("pfr_mark_addrs: IPv4 walktree failed.\n");
732 	if (kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6, pfr_walktree, &w))
733 		printf("pfr_mark_addrs: IPv6 walktree failed.\n");
734 }
735 
736 
737 static struct pfr_kentry *
738 pfr_lookup_addr(struct pfr_ktable *kt, struct pfr_addr *ad, int exact)
739 {
740 	union sockaddr_union	 sa, mask;
741 	struct radix_node_head	*head = NULL;
742 	struct pfr_kentry	*ke;
743 
744 	PF_RULES_ASSERT();
745 
746 	bzero(&sa, sizeof(sa));
747 	if (ad->pfra_af == AF_INET) {
748 		FILLIN_SIN(sa.sin, ad->pfra_ip4addr);
749 		head = kt->pfrkt_ip4;
750 	} else if ( ad->pfra_af == AF_INET6 ) {
751 		FILLIN_SIN6(sa.sin6, ad->pfra_ip6addr);
752 		head = kt->pfrkt_ip6;
753 	}
754 	if (ADDR_NETWORK(ad)) {
755 		pfr_prepare_network(&mask, ad->pfra_af, ad->pfra_net);
756 		ke = (struct pfr_kentry *)rn_lookup(&sa, &mask, head);
757 		if (ke && KENTRY_RNF_ROOT(ke))
758 			ke = NULL;
759 	} else {
760 		ke = (struct pfr_kentry *)rn_match(&sa, head);
761 		if (ke && KENTRY_RNF_ROOT(ke))
762 			ke = NULL;
763 		if (exact && ke && KENTRY_NETWORK(ke))
764 			ke = NULL;
765 	}
766 	return (ke);
767 }
768 
769 static struct pfr_kentry *
770 pfr_create_kentry(struct pfr_addr *ad)
771 {
772 	struct pfr_kentry	*ke;
773 
774 	ke =  uma_zalloc(V_pfr_kentry_z, M_NOWAIT | M_ZERO);
775 	if (ke == NULL)
776 		return (NULL);
777 
778 	if (ad->pfra_af == AF_INET)
779 		FILLIN_SIN(ke->pfrke_sa.sin, ad->pfra_ip4addr);
780 	else if (ad->pfra_af == AF_INET6)
781 		FILLIN_SIN6(ke->pfrke_sa.sin6, ad->pfra_ip6addr);
782 	ke->pfrke_af = ad->pfra_af;
783 	ke->pfrke_net = ad->pfra_net;
784 	ke->pfrke_not = ad->pfra_not;
785 	return (ke);
786 }
787 
788 static void
789 pfr_destroy_kentries(struct pfr_kentryworkq *workq)
790 {
791 	struct pfr_kentry	*p, *q;
792 
793 	for (p = SLIST_FIRST(workq); p != NULL; p = q) {
794 		q = SLIST_NEXT(p, pfrke_workq);
795 		pfr_destroy_kentry(p);
796 	}
797 }
798 
799 static void
800 pfr_destroy_kentry(struct pfr_kentry *ke)
801 {
802 	if (ke->pfrke_counters)
803 		uma_zfree(V_pfr_kcounters_z, ke->pfrke_counters);
804 	uma_zfree(V_pfr_kentry_z, ke);
805 }
806 
807 static void
808 pfr_insert_kentries(struct pfr_ktable *kt,
809     struct pfr_kentryworkq *workq, long tzero)
810 {
811 	struct pfr_kentry	*p;
812 	int			 rv, n = 0;
813 
814 	SLIST_FOREACH(p, workq, pfrke_workq) {
815 		rv = pfr_route_kentry(kt, p);
816 		if (rv) {
817 			printf("pfr_insert_kentries: cannot route entry "
818 			    "(code=%d).\n", rv);
819 			break;
820 		}
821 		p->pfrke_tzero = tzero;
822 		n++;
823 	}
824 	kt->pfrkt_cnt += n;
825 }
826 
827 int
828 pfr_insert_kentry(struct pfr_ktable *kt, struct pfr_addr *ad, long tzero)
829 {
830 	struct pfr_kentry	*p;
831 	int			 rv;
832 
833 	p = pfr_lookup_addr(kt, ad, 1);
834 	if (p != NULL)
835 		return (0);
836 	p = pfr_create_kentry(ad);
837 	if (p == NULL)
838 		return (ENOMEM);
839 
840 	rv = pfr_route_kentry(kt, p);
841 	if (rv)
842 		return (rv);
843 
844 	p->pfrke_tzero = tzero;
845 	kt->pfrkt_cnt++;
846 
847 	return (0);
848 }
849 
850 static void
851 pfr_remove_kentries(struct pfr_ktable *kt,
852     struct pfr_kentryworkq *workq)
853 {
854 	struct pfr_kentry	*p;
855 	int			 n = 0;
856 
857 	SLIST_FOREACH(p, workq, pfrke_workq) {
858 		pfr_unroute_kentry(kt, p);
859 		n++;
860 	}
861 	kt->pfrkt_cnt -= n;
862 	pfr_destroy_kentries(workq);
863 }
864 
865 static void
866 pfr_clean_node_mask(struct pfr_ktable *kt,
867     struct pfr_kentryworkq *workq)
868 {
869 	struct pfr_kentry	*p;
870 
871 	SLIST_FOREACH(p, workq, pfrke_workq)
872 		pfr_unroute_kentry(kt, p);
873 }
874 
875 static void
876 pfr_clstats_kentries(struct pfr_kentryworkq *workq, long tzero, int negchange)
877 {
878 	struct pfr_kentry	*p;
879 
880 	SLIST_FOREACH(p, workq, pfrke_workq) {
881 		if (negchange)
882 			p->pfrke_not = !p->pfrke_not;
883 		if (p->pfrke_counters) {
884 			uma_zfree(V_pfr_kcounters_z, p->pfrke_counters);
885 			p->pfrke_counters = NULL;
886 		}
887 		p->pfrke_tzero = tzero;
888 	}
889 }
890 
891 static void
892 pfr_reset_feedback(struct pfr_addr *addr, int size)
893 {
894 	struct pfr_addr	*ad;
895 	int		i;
896 
897 	for (i = 0, ad = addr; i < size; i++, ad++)
898 		ad->pfra_fback = PFR_FB_NONE;
899 }
900 
901 static void
902 pfr_prepare_network(union sockaddr_union *sa, int af, int net)
903 {
904 	int	i;
905 
906 	bzero(sa, sizeof(*sa));
907 	if (af == AF_INET) {
908 		sa->sin.sin_len = sizeof(sa->sin);
909 		sa->sin.sin_family = AF_INET;
910 		sa->sin.sin_addr.s_addr = net ? htonl(-1 << (32-net)) : 0;
911 	} else if (af == AF_INET6) {
912 		sa->sin6.sin6_len = sizeof(sa->sin6);
913 		sa->sin6.sin6_family = AF_INET6;
914 		for (i = 0; i < 4; i++) {
915 			if (net <= 32) {
916 				sa->sin6.sin6_addr.s6_addr32[i] =
917 				    net ? htonl(-1 << (32-net)) : 0;
918 				break;
919 			}
920 			sa->sin6.sin6_addr.s6_addr32[i] = 0xFFFFFFFF;
921 			net -= 32;
922 		}
923 	}
924 }
925 
926 static int
927 pfr_route_kentry(struct pfr_ktable *kt, struct pfr_kentry *ke)
928 {
929 	union sockaddr_union	 mask;
930 	struct radix_node	*rn;
931 	struct radix_node_head	*head = NULL;
932 
933 	PF_RULES_WASSERT();
934 
935 	bzero(ke->pfrke_node, sizeof(ke->pfrke_node));
936 	if (ke->pfrke_af == AF_INET)
937 		head = kt->pfrkt_ip4;
938 	else if (ke->pfrke_af == AF_INET6)
939 		head = kt->pfrkt_ip6;
940 
941 	if (KENTRY_NETWORK(ke)) {
942 		pfr_prepare_network(&mask, ke->pfrke_af, ke->pfrke_net);
943 		rn = rn_addroute(&ke->pfrke_sa, &mask, head, ke->pfrke_node);
944 	} else
945 		rn = rn_addroute(&ke->pfrke_sa, NULL, head, ke->pfrke_node);
946 
947 	return (rn == NULL ? -1 : 0);
948 }
949 
950 static int
951 pfr_unroute_kentry(struct pfr_ktable *kt, struct pfr_kentry *ke)
952 {
953 	union sockaddr_union	 mask;
954 	struct radix_node	*rn;
955 	struct radix_node_head	*head = NULL;
956 
957 	if (ke->pfrke_af == AF_INET)
958 		head = kt->pfrkt_ip4;
959 	else if (ke->pfrke_af == AF_INET6)
960 		head = kt->pfrkt_ip6;
961 
962 	if (KENTRY_NETWORK(ke)) {
963 		pfr_prepare_network(&mask, ke->pfrke_af, ke->pfrke_net);
964 		rn = rn_delete(&ke->pfrke_sa, &mask, head);
965 	} else
966 		rn = rn_delete(&ke->pfrke_sa, NULL, head);
967 
968 	if (rn == NULL) {
969 		printf("pfr_unroute_kentry: delete failed.\n");
970 		return (-1);
971 	}
972 	return (0);
973 }
974 
975 static void
976 pfr_copyout_addr(struct pfr_addr *ad, struct pfr_kentry *ke)
977 {
978 	bzero(ad, sizeof(*ad));
979 	if (ke == NULL)
980 		return;
981 	ad->pfra_af = ke->pfrke_af;
982 	ad->pfra_net = ke->pfrke_net;
983 	ad->pfra_not = ke->pfrke_not;
984 	if (ad->pfra_af == AF_INET)
985 		ad->pfra_ip4addr = ke->pfrke_sa.sin.sin_addr;
986 	else if (ad->pfra_af == AF_INET6)
987 		ad->pfra_ip6addr = ke->pfrke_sa.sin6.sin6_addr;
988 }
989 
990 static int
991 pfr_walktree(struct radix_node *rn, void *arg)
992 {
993 	struct pfr_kentry	*ke = (struct pfr_kentry *)rn;
994 	struct pfr_walktree	*w = arg;
995 
996 	switch (w->pfrw_op) {
997 	case PFRW_MARK:
998 		ke->pfrke_mark = 0;
999 		break;
1000 	case PFRW_SWEEP:
1001 		if (ke->pfrke_mark)
1002 			break;
1003 		/* FALLTHROUGH */
1004 	case PFRW_ENQUEUE:
1005 		SLIST_INSERT_HEAD(w->pfrw_workq, ke, pfrke_workq);
1006 		w->pfrw_cnt++;
1007 		break;
1008 	case PFRW_GET_ADDRS:
1009 		if (w->pfrw_free-- > 0) {
1010 			pfr_copyout_addr(w->pfrw_addr, ke);
1011 			w->pfrw_addr++;
1012 		}
1013 		break;
1014 	case PFRW_GET_ASTATS:
1015 		if (w->pfrw_free-- > 0) {
1016 			struct pfr_astats as;
1017 
1018 			pfr_copyout_addr(&as.pfras_a, ke);
1019 
1020 			if (ke->pfrke_counters) {
1021 				bcopy(ke->pfrke_counters->pfrkc_packets,
1022 				    as.pfras_packets, sizeof(as.pfras_packets));
1023 				bcopy(ke->pfrke_counters->pfrkc_bytes,
1024 				    as.pfras_bytes, sizeof(as.pfras_bytes));
1025 			} else {
1026 				bzero(as.pfras_packets, sizeof(as.pfras_packets));
1027 				bzero(as.pfras_bytes, sizeof(as.pfras_bytes));
1028 				as.pfras_a.pfra_fback = PFR_FB_NOCOUNT;
1029 			}
1030 			as.pfras_tzero = ke->pfrke_tzero;
1031 
1032 			bcopy(&as, w->pfrw_astats, sizeof(as));
1033 			w->pfrw_astats++;
1034 		}
1035 		break;
1036 	case PFRW_POOL_GET:
1037 		if (ke->pfrke_not)
1038 			break; /* negative entries are ignored */
1039 		if (!w->pfrw_cnt--) {
1040 			w->pfrw_kentry = ke;
1041 			return (1); /* finish search */
1042 		}
1043 		break;
1044 	case PFRW_DYNADDR_UPDATE:
1045 	    {
1046 		union sockaddr_union	pfr_mask;
1047 
1048 		if (ke->pfrke_af == AF_INET) {
1049 			if (w->pfrw_dyn->pfid_acnt4++ > 0)
1050 				break;
1051 			pfr_prepare_network(&pfr_mask, AF_INET, ke->pfrke_net);
1052 			w->pfrw_dyn->pfid_addr4 = *SUNION2PF(&ke->pfrke_sa,
1053 			    AF_INET);
1054 			w->pfrw_dyn->pfid_mask4 = *SUNION2PF(&pfr_mask,
1055 			    AF_INET);
1056 		} else if (ke->pfrke_af == AF_INET6){
1057 			if (w->pfrw_dyn->pfid_acnt6++ > 0)
1058 				break;
1059 			pfr_prepare_network(&pfr_mask, AF_INET6, ke->pfrke_net);
1060 			w->pfrw_dyn->pfid_addr6 = *SUNION2PF(&ke->pfrke_sa,
1061 			    AF_INET6);
1062 			w->pfrw_dyn->pfid_mask6 = *SUNION2PF(&pfr_mask,
1063 			    AF_INET6);
1064 		}
1065 		break;
1066 	    }
1067 	}
1068 	return (0);
1069 }
1070 
1071 int
1072 pfr_clr_tables(struct pfr_table *filter, int *ndel, int flags)
1073 {
1074 	struct pfr_ktableworkq	 workq;
1075 	struct pfr_ktable	*p;
1076 	int			 xdel = 0;
1077 
1078 	ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_ALLRSETS);
1079 	if (pfr_fix_anchor(filter->pfrt_anchor))
1080 		return (EINVAL);
1081 	if (pfr_table_count(filter, flags) < 0)
1082 		return (ENOENT);
1083 
1084 	SLIST_INIT(&workq);
1085 	RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1086 		if (pfr_skip_table(filter, p, flags))
1087 			continue;
1088 		if (!strcmp(p->pfrkt_anchor, PF_RESERVED_ANCHOR))
1089 			continue;
1090 		if (!(p->pfrkt_flags & PFR_TFLAG_ACTIVE))
1091 			continue;
1092 		p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_ACTIVE;
1093 		SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1094 		xdel++;
1095 	}
1096 	if (!(flags & PFR_FLAG_DUMMY))
1097 		pfr_setflags_ktables(&workq);
1098 	if (ndel != NULL)
1099 		*ndel = xdel;
1100 	return (0);
1101 }
1102 
1103 int
1104 pfr_add_tables(struct pfr_table *tbl, int size, int *nadd, int flags)
1105 {
1106 	struct pfr_ktableworkq	 addq, changeq;
1107 	struct pfr_ktable	*p, *q, *r, key;
1108 	int			 i, rv, xadd = 0;
1109 	long			 tzero = time_second;
1110 
1111 	ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
1112 	SLIST_INIT(&addq);
1113 	SLIST_INIT(&changeq);
1114 	for (i = 0; i < size; i++) {
1115 		bcopy(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t));
1116 		if (pfr_validate_table(&key.pfrkt_t, PFR_TFLAG_USRMASK,
1117 		    flags & PFR_FLAG_USERIOCTL))
1118 			senderr(EINVAL);
1119 		key.pfrkt_flags |= PFR_TFLAG_ACTIVE;
1120 		p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1121 		if (p == NULL) {
1122 			p = pfr_create_ktable(&key.pfrkt_t, tzero, 1);
1123 			if (p == NULL)
1124 				senderr(ENOMEM);
1125 			SLIST_FOREACH(q, &addq, pfrkt_workq) {
1126 				if (!pfr_ktable_compare(p, q))
1127 					goto _skip;
1128 			}
1129 			SLIST_INSERT_HEAD(&addq, p, pfrkt_workq);
1130 			xadd++;
1131 			if (!key.pfrkt_anchor[0])
1132 				goto _skip;
1133 
1134 			/* find or create root table */
1135 			bzero(key.pfrkt_anchor, sizeof(key.pfrkt_anchor));
1136 			r = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1137 			if (r != NULL) {
1138 				p->pfrkt_root = r;
1139 				goto _skip;
1140 			}
1141 			SLIST_FOREACH(q, &addq, pfrkt_workq) {
1142 				if (!pfr_ktable_compare(&key, q)) {
1143 					p->pfrkt_root = q;
1144 					goto _skip;
1145 				}
1146 			}
1147 			key.pfrkt_flags = 0;
1148 			r = pfr_create_ktable(&key.pfrkt_t, 0, 1);
1149 			if (r == NULL)
1150 				senderr(ENOMEM);
1151 			SLIST_INSERT_HEAD(&addq, r, pfrkt_workq);
1152 			p->pfrkt_root = r;
1153 		} else if (!(p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1154 			SLIST_FOREACH(q, &changeq, pfrkt_workq)
1155 				if (!pfr_ktable_compare(&key, q))
1156 					goto _skip;
1157 			p->pfrkt_nflags = (p->pfrkt_flags &
1158 			    ~PFR_TFLAG_USRMASK) | key.pfrkt_flags;
1159 			SLIST_INSERT_HEAD(&changeq, p, pfrkt_workq);
1160 			xadd++;
1161 		}
1162 _skip:
1163 	;
1164 	}
1165 	if (!(flags & PFR_FLAG_DUMMY)) {
1166 		pfr_insert_ktables(&addq);
1167 		pfr_setflags_ktables(&changeq);
1168 	} else
1169 		 pfr_destroy_ktables(&addq, 0);
1170 	if (nadd != NULL)
1171 		*nadd = xadd;
1172 	return (0);
1173 _bad:
1174 	pfr_destroy_ktables(&addq, 0);
1175 	return (rv);
1176 }
1177 
1178 int
1179 pfr_del_tables(struct pfr_table *tbl, int size, int *ndel, int flags)
1180 {
1181 	struct pfr_ktableworkq	 workq;
1182 	struct pfr_ktable	*p, *q, key;
1183 	int			 i, xdel = 0;
1184 
1185 	ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
1186 	SLIST_INIT(&workq);
1187 	for (i = 0; i < size; i++) {
1188 		bcopy(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t));
1189 		if (pfr_validate_table(&key.pfrkt_t, 0,
1190 		    flags & PFR_FLAG_USERIOCTL))
1191 			return (EINVAL);
1192 		p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1193 		if (p != NULL && (p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1194 			SLIST_FOREACH(q, &workq, pfrkt_workq)
1195 				if (!pfr_ktable_compare(p, q))
1196 					goto _skip;
1197 			p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_ACTIVE;
1198 			SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1199 			xdel++;
1200 		}
1201 _skip:
1202 	;
1203 	}
1204 
1205 	if (!(flags & PFR_FLAG_DUMMY))
1206 		pfr_setflags_ktables(&workq);
1207 	if (ndel != NULL)
1208 		*ndel = xdel;
1209 	return (0);
1210 }
1211 
1212 int
1213 pfr_get_tables(struct pfr_table *filter, struct pfr_table *tbl, int *size,
1214 	int flags)
1215 {
1216 	struct pfr_ktable	*p;
1217 	int			 n, nn;
1218 
1219 	PF_RULES_RASSERT();
1220 
1221 	ACCEPT_FLAGS(flags, PFR_FLAG_ALLRSETS);
1222 	if (pfr_fix_anchor(filter->pfrt_anchor))
1223 		return (EINVAL);
1224 	n = nn = pfr_table_count(filter, flags);
1225 	if (n < 0)
1226 		return (ENOENT);
1227 	if (n > *size) {
1228 		*size = n;
1229 		return (0);
1230 	}
1231 	RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1232 		if (pfr_skip_table(filter, p, flags))
1233 			continue;
1234 		if (n-- <= 0)
1235 			continue;
1236 		bcopy(&p->pfrkt_t, tbl++, sizeof(*tbl));
1237 	}
1238 
1239 	KASSERT(n == 0, ("%s: corruption detected (%d)", __func__, n));
1240 
1241 	*size = nn;
1242 	return (0);
1243 }
1244 
1245 int
1246 pfr_get_tstats(struct pfr_table *filter, struct pfr_tstats *tbl, int *size,
1247 	int flags)
1248 {
1249 	struct pfr_ktable	*p;
1250 	struct pfr_ktableworkq	 workq;
1251 	int			 n, nn;
1252 	long			 tzero = time_second;
1253 
1254 	/* XXX PFR_FLAG_CLSTATS disabled */
1255 	ACCEPT_FLAGS(flags, PFR_FLAG_ALLRSETS);
1256 	if (pfr_fix_anchor(filter->pfrt_anchor))
1257 		return (EINVAL);
1258 	n = nn = pfr_table_count(filter, flags);
1259 	if (n < 0)
1260 		return (ENOENT);
1261 	if (n > *size) {
1262 		*size = n;
1263 		return (0);
1264 	}
1265 	SLIST_INIT(&workq);
1266 	RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1267 		if (pfr_skip_table(filter, p, flags))
1268 			continue;
1269 		if (n-- <= 0)
1270 			continue;
1271 		bcopy(&p->pfrkt_ts, tbl++, sizeof(*tbl));
1272 		SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1273 	}
1274 	if (flags & PFR_FLAG_CLSTATS)
1275 		pfr_clstats_ktables(&workq, tzero,
1276 		    flags & PFR_FLAG_ADDRSTOO);
1277 
1278 	KASSERT(n == 0, ("%s: corruption detected (%d)", __func__, n));
1279 
1280 	*size = nn;
1281 	return (0);
1282 }
1283 
1284 int
1285 pfr_clr_tstats(struct pfr_table *tbl, int size, int *nzero, int flags)
1286 {
1287 	struct pfr_ktableworkq	 workq;
1288 	struct pfr_ktable	*p, key;
1289 	int			 i, xzero = 0;
1290 	long			 tzero = time_second;
1291 
1292 	ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_ADDRSTOO);
1293 	SLIST_INIT(&workq);
1294 	for (i = 0; i < size; i++) {
1295 		bcopy(tbl + i, &key.pfrkt_t, sizeof(key.pfrkt_t));
1296 		if (pfr_validate_table(&key.pfrkt_t, 0, 0))
1297 			return (EINVAL);
1298 		p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1299 		if (p != NULL) {
1300 			SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1301 			xzero++;
1302 		}
1303 	}
1304 	if (!(flags & PFR_FLAG_DUMMY))
1305 		pfr_clstats_ktables(&workq, tzero, flags & PFR_FLAG_ADDRSTOO);
1306 	if (nzero != NULL)
1307 		*nzero = xzero;
1308 	return (0);
1309 }
1310 
1311 int
1312 pfr_set_tflags(struct pfr_table *tbl, int size, int setflag, int clrflag,
1313 	int *nchange, int *ndel, int flags)
1314 {
1315 	struct pfr_ktableworkq	 workq;
1316 	struct pfr_ktable	*p, *q, key;
1317 	int			 i, xchange = 0, xdel = 0;
1318 
1319 	ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
1320 	if ((setflag & ~PFR_TFLAG_USRMASK) ||
1321 	    (clrflag & ~PFR_TFLAG_USRMASK) ||
1322 	    (setflag & clrflag))
1323 		return (EINVAL);
1324 	SLIST_INIT(&workq);
1325 	for (i = 0; i < size; i++) {
1326 		bcopy(tbl + i, &key.pfrkt_t, sizeof(key.pfrkt_t));
1327 		if (pfr_validate_table(&key.pfrkt_t, 0,
1328 		    flags & PFR_FLAG_USERIOCTL))
1329 			return (EINVAL);
1330 		p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1331 		if (p != NULL && (p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1332 			p->pfrkt_nflags = (p->pfrkt_flags | setflag) &
1333 			    ~clrflag;
1334 			if (p->pfrkt_nflags == p->pfrkt_flags)
1335 				goto _skip;
1336 			SLIST_FOREACH(q, &workq, pfrkt_workq)
1337 				if (!pfr_ktable_compare(p, q))
1338 					goto _skip;
1339 			SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1340 			if ((p->pfrkt_flags & PFR_TFLAG_PERSIST) &&
1341 			    (clrflag & PFR_TFLAG_PERSIST) &&
1342 			    !(p->pfrkt_flags & PFR_TFLAG_REFERENCED))
1343 				xdel++;
1344 			else
1345 				xchange++;
1346 		}
1347 _skip:
1348 	;
1349 	}
1350 	if (!(flags & PFR_FLAG_DUMMY))
1351 		pfr_setflags_ktables(&workq);
1352 	if (nchange != NULL)
1353 		*nchange = xchange;
1354 	if (ndel != NULL)
1355 		*ndel = xdel;
1356 	return (0);
1357 }
1358 
1359 int
1360 pfr_ina_begin(struct pfr_table *trs, u_int32_t *ticket, int *ndel, int flags)
1361 {
1362 	struct pfr_ktableworkq	 workq;
1363 	struct pfr_ktable	*p;
1364 	struct pf_ruleset	*rs;
1365 	int			 xdel = 0;
1366 
1367 	ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
1368 	rs = pf_find_or_create_ruleset(trs->pfrt_anchor);
1369 	if (rs == NULL)
1370 		return (ENOMEM);
1371 	SLIST_INIT(&workq);
1372 	RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1373 		if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
1374 		    pfr_skip_table(trs, p, 0))
1375 			continue;
1376 		p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_INACTIVE;
1377 		SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1378 		xdel++;
1379 	}
1380 	if (!(flags & PFR_FLAG_DUMMY)) {
1381 		pfr_setflags_ktables(&workq);
1382 		if (ticket != NULL)
1383 			*ticket = ++rs->tticket;
1384 		rs->topen = 1;
1385 	} else
1386 		pf_remove_if_empty_ruleset(rs);
1387 	if (ndel != NULL)
1388 		*ndel = xdel;
1389 	return (0);
1390 }
1391 
1392 int
1393 pfr_ina_define(struct pfr_table *tbl, struct pfr_addr *addr, int size,
1394     int *nadd, int *naddr, u_int32_t ticket, int flags)
1395 {
1396 	struct pfr_ktableworkq	 tableq;
1397 	struct pfr_kentryworkq	 addrq;
1398 	struct pfr_ktable	*kt, *rt, *shadow, key;
1399 	struct pfr_kentry	*p;
1400 	struct pfr_addr		*ad;
1401 	struct pf_ruleset	*rs;
1402 	int			 i, rv, xadd = 0, xaddr = 0;
1403 
1404 	PF_RULES_WASSERT();
1405 
1406 	ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_ADDRSTOO);
1407 	if (size && !(flags & PFR_FLAG_ADDRSTOO))
1408 		return (EINVAL);
1409 	if (pfr_validate_table(tbl, PFR_TFLAG_USRMASK,
1410 	    flags & PFR_FLAG_USERIOCTL))
1411 		return (EINVAL);
1412 	rs = pf_find_ruleset(tbl->pfrt_anchor);
1413 	if (rs == NULL || !rs->topen || ticket != rs->tticket)
1414 		return (EBUSY);
1415 	tbl->pfrt_flags |= PFR_TFLAG_INACTIVE;
1416 	SLIST_INIT(&tableq);
1417 	kt = RB_FIND(pfr_ktablehead, &pfr_ktables, (struct pfr_ktable *)tbl);
1418 	if (kt == NULL) {
1419 		kt = pfr_create_ktable(tbl, 0, 1);
1420 		if (kt == NULL)
1421 			return (ENOMEM);
1422 		SLIST_INSERT_HEAD(&tableq, kt, pfrkt_workq);
1423 		xadd++;
1424 		if (!tbl->pfrt_anchor[0])
1425 			goto _skip;
1426 
1427 		/* find or create root table */
1428 		bzero(&key, sizeof(key));
1429 		strlcpy(key.pfrkt_name, tbl->pfrt_name, sizeof(key.pfrkt_name));
1430 		rt = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1431 		if (rt != NULL) {
1432 			kt->pfrkt_root = rt;
1433 			goto _skip;
1434 		}
1435 		rt = pfr_create_ktable(&key.pfrkt_t, 0, 1);
1436 		if (rt == NULL) {
1437 			pfr_destroy_ktables(&tableq, 0);
1438 			return (ENOMEM);
1439 		}
1440 		SLIST_INSERT_HEAD(&tableq, rt, pfrkt_workq);
1441 		kt->pfrkt_root = rt;
1442 	} else if (!(kt->pfrkt_flags & PFR_TFLAG_INACTIVE))
1443 		xadd++;
1444 _skip:
1445 	shadow = pfr_create_ktable(tbl, 0, 0);
1446 	if (shadow == NULL) {
1447 		pfr_destroy_ktables(&tableq, 0);
1448 		return (ENOMEM);
1449 	}
1450 	SLIST_INIT(&addrq);
1451 	for (i = 0, ad = addr; i < size; i++, ad++) {
1452 		if (pfr_validate_addr(ad))
1453 			senderr(EINVAL);
1454 		if (pfr_lookup_addr(shadow, ad, 1) != NULL)
1455 			continue;
1456 		p = pfr_create_kentry(ad);
1457 		if (p == NULL)
1458 			senderr(ENOMEM);
1459 		if (pfr_route_kentry(shadow, p)) {
1460 			pfr_destroy_kentry(p);
1461 			continue;
1462 		}
1463 		SLIST_INSERT_HEAD(&addrq, p, pfrke_workq);
1464 		xaddr++;
1465 	}
1466 	if (!(flags & PFR_FLAG_DUMMY)) {
1467 		if (kt->pfrkt_shadow != NULL)
1468 			pfr_destroy_ktable(kt->pfrkt_shadow, 1);
1469 		kt->pfrkt_flags |= PFR_TFLAG_INACTIVE;
1470 		pfr_insert_ktables(&tableq);
1471 		shadow->pfrkt_cnt = (flags & PFR_FLAG_ADDRSTOO) ?
1472 		    xaddr : NO_ADDRESSES;
1473 		kt->pfrkt_shadow = shadow;
1474 	} else {
1475 		pfr_clean_node_mask(shadow, &addrq);
1476 		pfr_destroy_ktable(shadow, 0);
1477 		pfr_destroy_ktables(&tableq, 0);
1478 		pfr_destroy_kentries(&addrq);
1479 	}
1480 	if (nadd != NULL)
1481 		*nadd = xadd;
1482 	if (naddr != NULL)
1483 		*naddr = xaddr;
1484 	return (0);
1485 _bad:
1486 	pfr_destroy_ktable(shadow, 0);
1487 	pfr_destroy_ktables(&tableq, 0);
1488 	pfr_destroy_kentries(&addrq);
1489 	return (rv);
1490 }
1491 
1492 int
1493 pfr_ina_rollback(struct pfr_table *trs, u_int32_t ticket, int *ndel, int flags)
1494 {
1495 	struct pfr_ktableworkq	 workq;
1496 	struct pfr_ktable	*p;
1497 	struct pf_ruleset	*rs;
1498 	int			 xdel = 0;
1499 
1500 	PF_RULES_WASSERT();
1501 
1502 	ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
1503 	rs = pf_find_ruleset(trs->pfrt_anchor);
1504 	if (rs == NULL || !rs->topen || ticket != rs->tticket)
1505 		return (0);
1506 	SLIST_INIT(&workq);
1507 	RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1508 		if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
1509 		    pfr_skip_table(trs, p, 0))
1510 			continue;
1511 		p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_INACTIVE;
1512 		SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1513 		xdel++;
1514 	}
1515 	if (!(flags & PFR_FLAG_DUMMY)) {
1516 		pfr_setflags_ktables(&workq);
1517 		rs->topen = 0;
1518 		pf_remove_if_empty_ruleset(rs);
1519 	}
1520 	if (ndel != NULL)
1521 		*ndel = xdel;
1522 	return (0);
1523 }
1524 
1525 int
1526 pfr_ina_commit(struct pfr_table *trs, u_int32_t ticket, int *nadd,
1527     int *nchange, int flags)
1528 {
1529 	struct pfr_ktable	*p, *q;
1530 	struct pfr_ktableworkq	 workq;
1531 	struct pf_ruleset	*rs;
1532 	int			 xadd = 0, xchange = 0;
1533 	long			 tzero = time_second;
1534 
1535 	PF_RULES_WASSERT();
1536 
1537 	ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
1538 	rs = pf_find_ruleset(trs->pfrt_anchor);
1539 	if (rs == NULL || !rs->topen || ticket != rs->tticket)
1540 		return (EBUSY);
1541 
1542 	SLIST_INIT(&workq);
1543 	RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1544 		if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
1545 		    pfr_skip_table(trs, p, 0))
1546 			continue;
1547 		SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1548 		if (p->pfrkt_flags & PFR_TFLAG_ACTIVE)
1549 			xchange++;
1550 		else
1551 			xadd++;
1552 	}
1553 
1554 	if (!(flags & PFR_FLAG_DUMMY)) {
1555 		for (p = SLIST_FIRST(&workq); p != NULL; p = q) {
1556 			q = SLIST_NEXT(p, pfrkt_workq);
1557 			pfr_commit_ktable(p, tzero);
1558 		}
1559 		rs->topen = 0;
1560 		pf_remove_if_empty_ruleset(rs);
1561 	}
1562 	if (nadd != NULL)
1563 		*nadd = xadd;
1564 	if (nchange != NULL)
1565 		*nchange = xchange;
1566 
1567 	return (0);
1568 }
1569 
1570 static void
1571 pfr_commit_ktable(struct pfr_ktable *kt, long tzero)
1572 {
1573 	struct pfr_ktable	*shadow = kt->pfrkt_shadow;
1574 	int			 nflags;
1575 
1576 	PF_RULES_WASSERT();
1577 
1578 	if (shadow->pfrkt_cnt == NO_ADDRESSES) {
1579 		if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
1580 			pfr_clstats_ktable(kt, tzero, 1);
1581 	} else if (kt->pfrkt_flags & PFR_TFLAG_ACTIVE) {
1582 		/* kt might contain addresses */
1583 		struct pfr_kentryworkq	 addrq, addq, changeq, delq, garbageq;
1584 		struct pfr_kentry	*p, *q, *next;
1585 		struct pfr_addr		 ad;
1586 
1587 		pfr_enqueue_addrs(shadow, &addrq, NULL, 0);
1588 		pfr_mark_addrs(kt);
1589 		SLIST_INIT(&addq);
1590 		SLIST_INIT(&changeq);
1591 		SLIST_INIT(&delq);
1592 		SLIST_INIT(&garbageq);
1593 		pfr_clean_node_mask(shadow, &addrq);
1594 		for (p = SLIST_FIRST(&addrq); p != NULL; p = next) {
1595 			next = SLIST_NEXT(p, pfrke_workq);	/* XXX */
1596 			pfr_copyout_addr(&ad, p);
1597 			q = pfr_lookup_addr(kt, &ad, 1);
1598 			if (q != NULL) {
1599 				if (q->pfrke_not != p->pfrke_not)
1600 					SLIST_INSERT_HEAD(&changeq, q,
1601 					    pfrke_workq);
1602 				q->pfrke_mark = 1;
1603 				SLIST_INSERT_HEAD(&garbageq, p, pfrke_workq);
1604 			} else {
1605 				p->pfrke_tzero = tzero;
1606 				SLIST_INSERT_HEAD(&addq, p, pfrke_workq);
1607 			}
1608 		}
1609 		pfr_enqueue_addrs(kt, &delq, NULL, ENQUEUE_UNMARKED_ONLY);
1610 		pfr_insert_kentries(kt, &addq, tzero);
1611 		pfr_remove_kentries(kt, &delq);
1612 		pfr_clstats_kentries(&changeq, tzero, INVERT_NEG_FLAG);
1613 		pfr_destroy_kentries(&garbageq);
1614 	} else {
1615 		/* kt cannot contain addresses */
1616 		SWAP(struct radix_node_head *, kt->pfrkt_ip4,
1617 		    shadow->pfrkt_ip4);
1618 		SWAP(struct radix_node_head *, kt->pfrkt_ip6,
1619 		    shadow->pfrkt_ip6);
1620 		SWAP(int, kt->pfrkt_cnt, shadow->pfrkt_cnt);
1621 		pfr_clstats_ktable(kt, tzero, 1);
1622 	}
1623 	nflags = ((shadow->pfrkt_flags & PFR_TFLAG_USRMASK) |
1624 	    (kt->pfrkt_flags & PFR_TFLAG_SETMASK) | PFR_TFLAG_ACTIVE)
1625 		& ~PFR_TFLAG_INACTIVE;
1626 	pfr_destroy_ktable(shadow, 0);
1627 	kt->pfrkt_shadow = NULL;
1628 	pfr_setflags_ktable(kt, nflags);
1629 }
1630 
1631 static int
1632 pfr_validate_table(struct pfr_table *tbl, int allowedflags, int no_reserved)
1633 {
1634 	int i;
1635 
1636 	if (!tbl->pfrt_name[0])
1637 		return (-1);
1638 	if (no_reserved && !strcmp(tbl->pfrt_anchor, PF_RESERVED_ANCHOR))
1639 		 return (-1);
1640 	if (tbl->pfrt_name[PF_TABLE_NAME_SIZE-1])
1641 		return (-1);
1642 	for (i = strlen(tbl->pfrt_name); i < PF_TABLE_NAME_SIZE; i++)
1643 		if (tbl->pfrt_name[i])
1644 			return (-1);
1645 	if (pfr_fix_anchor(tbl->pfrt_anchor))
1646 		return (-1);
1647 	if (tbl->pfrt_flags & ~allowedflags)
1648 		return (-1);
1649 	return (0);
1650 }
1651 
1652 /*
1653  * Rewrite anchors referenced by tables to remove slashes
1654  * and check for validity.
1655  */
1656 static int
1657 pfr_fix_anchor(char *anchor)
1658 {
1659 	size_t siz = MAXPATHLEN;
1660 	int i;
1661 
1662 	if (anchor[0] == '/') {
1663 		char *path;
1664 		int off;
1665 
1666 		path = anchor;
1667 		off = 1;
1668 		while (*++path == '/')
1669 			off++;
1670 		bcopy(path, anchor, siz - off);
1671 		memset(anchor + siz - off, 0, off);
1672 	}
1673 	if (anchor[siz - 1])
1674 		return (-1);
1675 	for (i = strlen(anchor); i < siz; i++)
1676 		if (anchor[i])
1677 			return (-1);
1678 	return (0);
1679 }
1680 
1681 static int
1682 pfr_table_count(struct pfr_table *filter, int flags)
1683 {
1684 	struct pf_ruleset *rs;
1685 
1686 	PF_RULES_ASSERT();
1687 
1688 	if (flags & PFR_FLAG_ALLRSETS)
1689 		return (pfr_ktable_cnt);
1690 	if (filter->pfrt_anchor[0]) {
1691 		rs = pf_find_ruleset(filter->pfrt_anchor);
1692 		return ((rs != NULL) ? rs->tables : -1);
1693 	}
1694 	return (pf_main_ruleset.tables);
1695 }
1696 
1697 static int
1698 pfr_skip_table(struct pfr_table *filter, struct pfr_ktable *kt, int flags)
1699 {
1700 	if (flags & PFR_FLAG_ALLRSETS)
1701 		return (0);
1702 	if (strcmp(filter->pfrt_anchor, kt->pfrkt_anchor))
1703 		return (1);
1704 	return (0);
1705 }
1706 
1707 static void
1708 pfr_insert_ktables(struct pfr_ktableworkq *workq)
1709 {
1710 	struct pfr_ktable	*p;
1711 
1712 	SLIST_FOREACH(p, workq, pfrkt_workq)
1713 		pfr_insert_ktable(p);
1714 }
1715 
1716 static void
1717 pfr_insert_ktable(struct pfr_ktable *kt)
1718 {
1719 
1720 	PF_RULES_WASSERT();
1721 
1722 	RB_INSERT(pfr_ktablehead, &pfr_ktables, kt);
1723 	pfr_ktable_cnt++;
1724 	if (kt->pfrkt_root != NULL)
1725 		if (!kt->pfrkt_root->pfrkt_refcnt[PFR_REFCNT_ANCHOR]++)
1726 			pfr_setflags_ktable(kt->pfrkt_root,
1727 			    kt->pfrkt_root->pfrkt_flags|PFR_TFLAG_REFDANCHOR);
1728 }
1729 
1730 static void
1731 pfr_setflags_ktables(struct pfr_ktableworkq *workq)
1732 {
1733 	struct pfr_ktable	*p, *q;
1734 
1735 	for (p = SLIST_FIRST(workq); p; p = q) {
1736 		q = SLIST_NEXT(p, pfrkt_workq);
1737 		pfr_setflags_ktable(p, p->pfrkt_nflags);
1738 	}
1739 }
1740 
1741 static void
1742 pfr_setflags_ktable(struct pfr_ktable *kt, int newf)
1743 {
1744 	struct pfr_kentryworkq	addrq;
1745 
1746 	PF_RULES_WASSERT();
1747 
1748 	if (!(newf & PFR_TFLAG_REFERENCED) &&
1749 	    !(newf & PFR_TFLAG_PERSIST))
1750 		newf &= ~PFR_TFLAG_ACTIVE;
1751 	if (!(newf & PFR_TFLAG_ACTIVE))
1752 		newf &= ~PFR_TFLAG_USRMASK;
1753 	if (!(newf & PFR_TFLAG_SETMASK)) {
1754 		RB_REMOVE(pfr_ktablehead, &pfr_ktables, kt);
1755 		if (kt->pfrkt_root != NULL)
1756 			if (!--kt->pfrkt_root->pfrkt_refcnt[PFR_REFCNT_ANCHOR])
1757 				pfr_setflags_ktable(kt->pfrkt_root,
1758 				    kt->pfrkt_root->pfrkt_flags &
1759 					~PFR_TFLAG_REFDANCHOR);
1760 		pfr_destroy_ktable(kt, 1);
1761 		pfr_ktable_cnt--;
1762 		return;
1763 	}
1764 	if (!(newf & PFR_TFLAG_ACTIVE) && kt->pfrkt_cnt) {
1765 		pfr_enqueue_addrs(kt, &addrq, NULL, 0);
1766 		pfr_remove_kentries(kt, &addrq);
1767 	}
1768 	if (!(newf & PFR_TFLAG_INACTIVE) && kt->pfrkt_shadow != NULL) {
1769 		pfr_destroy_ktable(kt->pfrkt_shadow, 1);
1770 		kt->pfrkt_shadow = NULL;
1771 	}
1772 	kt->pfrkt_flags = newf;
1773 }
1774 
1775 static void
1776 pfr_clstats_ktables(struct pfr_ktableworkq *workq, long tzero, int recurse)
1777 {
1778 	struct pfr_ktable	*p;
1779 
1780 	SLIST_FOREACH(p, workq, pfrkt_workq)
1781 		pfr_clstats_ktable(p, tzero, recurse);
1782 }
1783 
1784 static void
1785 pfr_clstats_ktable(struct pfr_ktable *kt, long tzero, int recurse)
1786 {
1787 	struct pfr_kentryworkq	 addrq;
1788 
1789 	if (recurse) {
1790 		pfr_enqueue_addrs(kt, &addrq, NULL, 0);
1791 		pfr_clstats_kentries(&addrq, tzero, 0);
1792 	}
1793 	bzero(kt->pfrkt_packets, sizeof(kt->pfrkt_packets));
1794 	bzero(kt->pfrkt_bytes, sizeof(kt->pfrkt_bytes));
1795 	kt->pfrkt_match = kt->pfrkt_nomatch = 0;
1796 	kt->pfrkt_tzero = tzero;
1797 }
1798 
1799 static struct pfr_ktable *
1800 pfr_create_ktable(struct pfr_table *tbl, long tzero, int attachruleset)
1801 {
1802 	struct pfr_ktable	*kt;
1803 	struct pf_ruleset	*rs;
1804 
1805 	PF_RULES_WASSERT();
1806 
1807 	kt = malloc(sizeof(*kt), M_PFTABLE, M_NOWAIT|M_ZERO);
1808 	if (kt == NULL)
1809 		return (NULL);
1810 	kt->pfrkt_t = *tbl;
1811 
1812 	if (attachruleset) {
1813 		rs = pf_find_or_create_ruleset(tbl->pfrt_anchor);
1814 		if (!rs) {
1815 			pfr_destroy_ktable(kt, 0);
1816 			return (NULL);
1817 		}
1818 		kt->pfrkt_rs = rs;
1819 		rs->tables++;
1820 	}
1821 
1822 	if (!rn_inithead((void **)&kt->pfrkt_ip4,
1823 	    offsetof(struct sockaddr_in, sin_addr) * 8) ||
1824 	    !rn_inithead((void **)&kt->pfrkt_ip6,
1825 	    offsetof(struct sockaddr_in6, sin6_addr) * 8)) {
1826 		pfr_destroy_ktable(kt, 0);
1827 		return (NULL);
1828 	}
1829 	kt->pfrkt_tzero = tzero;
1830 
1831 	return (kt);
1832 }
1833 
1834 static void
1835 pfr_destroy_ktables(struct pfr_ktableworkq *workq, int flushaddr)
1836 {
1837 	struct pfr_ktable	*p, *q;
1838 
1839 	for (p = SLIST_FIRST(workq); p; p = q) {
1840 		q = SLIST_NEXT(p, pfrkt_workq);
1841 		pfr_destroy_ktable(p, flushaddr);
1842 	}
1843 }
1844 
1845 static void
1846 pfr_destroy_ktable(struct pfr_ktable *kt, int flushaddr)
1847 {
1848 	struct pfr_kentryworkq	 addrq;
1849 
1850 	if (flushaddr) {
1851 		pfr_enqueue_addrs(kt, &addrq, NULL, 0);
1852 		pfr_clean_node_mask(kt, &addrq);
1853 		pfr_destroy_kentries(&addrq);
1854 	}
1855 	if (kt->pfrkt_ip4 != NULL) {
1856 		RADIX_NODE_HEAD_DESTROY(kt->pfrkt_ip4);
1857 		free((caddr_t)kt->pfrkt_ip4, M_RTABLE);
1858 	}
1859 	if (kt->pfrkt_ip6 != NULL) {
1860 		RADIX_NODE_HEAD_DESTROY(kt->pfrkt_ip6);
1861 		free((caddr_t)kt->pfrkt_ip6, M_RTABLE);
1862 	}
1863 	if (kt->pfrkt_shadow != NULL)
1864 		pfr_destroy_ktable(kt->pfrkt_shadow, flushaddr);
1865 	if (kt->pfrkt_rs != NULL) {
1866 		kt->pfrkt_rs->tables--;
1867 		pf_remove_if_empty_ruleset(kt->pfrkt_rs);
1868 	}
1869 	free(kt, M_PFTABLE);
1870 }
1871 
1872 static int
1873 pfr_ktable_compare(struct pfr_ktable *p, struct pfr_ktable *q)
1874 {
1875 	int d;
1876 
1877 	if ((d = strncmp(p->pfrkt_name, q->pfrkt_name, PF_TABLE_NAME_SIZE)))
1878 		return (d);
1879 	return (strcmp(p->pfrkt_anchor, q->pfrkt_anchor));
1880 }
1881 
1882 static struct pfr_ktable *
1883 pfr_lookup_table(struct pfr_table *tbl)
1884 {
1885 	/* struct pfr_ktable start like a struct pfr_table */
1886 	return (RB_FIND(pfr_ktablehead, &pfr_ktables,
1887 	    (struct pfr_ktable *)tbl));
1888 }
1889 
1890 int
1891 pfr_match_addr(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af)
1892 {
1893 	struct pfr_kentry	*ke = NULL;
1894 	int			 match;
1895 
1896 	PF_RULES_RASSERT();
1897 
1898 	if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
1899 		kt = kt->pfrkt_root;
1900 	if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
1901 		return (0);
1902 
1903 	switch (af) {
1904 #ifdef INET
1905 	case AF_INET:
1906 	    {
1907 		struct sockaddr_in sin;
1908 
1909 		bzero(&sin, sizeof(sin));
1910 		sin.sin_len = sizeof(sin);
1911 		sin.sin_family = AF_INET;
1912 		sin.sin_addr.s_addr = a->addr32[0];
1913 		ke = (struct pfr_kentry *)rn_match(&sin, kt->pfrkt_ip4);
1914 		if (ke && KENTRY_RNF_ROOT(ke))
1915 			ke = NULL;
1916 		break;
1917 	    }
1918 #endif /* INET */
1919 #ifdef INET6
1920 	case AF_INET6:
1921 	    {
1922 		struct sockaddr_in6 sin6;
1923 
1924 		bzero(&sin6, sizeof(sin6));
1925 		sin6.sin6_len = sizeof(sin6);
1926 		sin6.sin6_family = AF_INET6;
1927 		bcopy(a, &sin6.sin6_addr, sizeof(sin6.sin6_addr));
1928 		ke = (struct pfr_kentry *)rn_match(&sin6, kt->pfrkt_ip6);
1929 		if (ke && KENTRY_RNF_ROOT(ke))
1930 			ke = NULL;
1931 		break;
1932 	    }
1933 #endif /* INET6 */
1934 	}
1935 	match = (ke && !ke->pfrke_not);
1936 	if (match)
1937 		kt->pfrkt_match++;
1938 	else
1939 		kt->pfrkt_nomatch++;
1940 	return (match);
1941 }
1942 
1943 void
1944 pfr_update_stats(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af,
1945     u_int64_t len, int dir_out, int op_pass, int notrule)
1946 {
1947 	struct pfr_kentry	*ke = NULL;
1948 
1949 	if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
1950 		kt = kt->pfrkt_root;
1951 	if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
1952 		return;
1953 
1954 	switch (af) {
1955 #ifdef INET
1956 	case AF_INET:
1957 	    {
1958 		struct sockaddr_in sin;
1959 
1960 		bzero(&sin, sizeof(sin));
1961 		sin.sin_len = sizeof(sin);
1962 		sin.sin_family = AF_INET;
1963 		sin.sin_addr.s_addr = a->addr32[0];
1964 		ke = (struct pfr_kentry *)rn_match(&sin, kt->pfrkt_ip4);
1965 		if (ke && KENTRY_RNF_ROOT(ke))
1966 			ke = NULL;
1967 		break;
1968 	    }
1969 #endif /* INET */
1970 #ifdef INET6
1971 	case AF_INET6:
1972 	    {
1973 		struct sockaddr_in6 sin6;
1974 
1975 		bzero(&sin6, sizeof(sin6));
1976 		sin6.sin6_len = sizeof(sin6);
1977 		sin6.sin6_family = AF_INET6;
1978 		bcopy(a, &sin6.sin6_addr, sizeof(sin6.sin6_addr));
1979 		ke = (struct pfr_kentry *)rn_match(&sin6, kt->pfrkt_ip6);
1980 		if (ke && KENTRY_RNF_ROOT(ke))
1981 			ke = NULL;
1982 		break;
1983 	    }
1984 #endif /* INET6 */
1985 	default:
1986 		panic("%s: unknown address family %u", __func__, af);
1987 	}
1988 	if ((ke == NULL || ke->pfrke_not) != notrule) {
1989 		if (op_pass != PFR_OP_PASS)
1990 			printf("pfr_update_stats: assertion failed.\n");
1991 		op_pass = PFR_OP_XPASS;
1992 	}
1993 	kt->pfrkt_packets[dir_out][op_pass]++;
1994 	kt->pfrkt_bytes[dir_out][op_pass] += len;
1995 	if (ke != NULL && op_pass != PFR_OP_XPASS &&
1996 	    (kt->pfrkt_flags & PFR_TFLAG_COUNTERS)) {
1997 		if (ke->pfrke_counters == NULL)
1998 			ke->pfrke_counters = uma_zalloc(V_pfr_kcounters_z,
1999 			    M_NOWAIT | M_ZERO);
2000 		if (ke->pfrke_counters != NULL) {
2001 			ke->pfrke_counters->pfrkc_packets[dir_out][op_pass]++;
2002 			ke->pfrke_counters->pfrkc_bytes[dir_out][op_pass] += len;
2003 		}
2004 	}
2005 }
2006 
2007 struct pfr_ktable *
2008 pfr_attach_table(struct pf_ruleset *rs, char *name)
2009 {
2010 	struct pfr_ktable	*kt, *rt;
2011 	struct pfr_table	 tbl;
2012 	struct pf_anchor	*ac = rs->anchor;
2013 
2014 	PF_RULES_WASSERT();
2015 
2016 	bzero(&tbl, sizeof(tbl));
2017 	strlcpy(tbl.pfrt_name, name, sizeof(tbl.pfrt_name));
2018 	if (ac != NULL)
2019 		strlcpy(tbl.pfrt_anchor, ac->path, sizeof(tbl.pfrt_anchor));
2020 	kt = pfr_lookup_table(&tbl);
2021 	if (kt == NULL) {
2022 		kt = pfr_create_ktable(&tbl, time_second, 1);
2023 		if (kt == NULL)
2024 			return (NULL);
2025 		if (ac != NULL) {
2026 			bzero(tbl.pfrt_anchor, sizeof(tbl.pfrt_anchor));
2027 			rt = pfr_lookup_table(&tbl);
2028 			if (rt == NULL) {
2029 				rt = pfr_create_ktable(&tbl, 0, 1);
2030 				if (rt == NULL) {
2031 					pfr_destroy_ktable(kt, 0);
2032 					return (NULL);
2033 				}
2034 				pfr_insert_ktable(rt);
2035 			}
2036 			kt->pfrkt_root = rt;
2037 		}
2038 		pfr_insert_ktable(kt);
2039 	}
2040 	if (!kt->pfrkt_refcnt[PFR_REFCNT_RULE]++)
2041 		pfr_setflags_ktable(kt, kt->pfrkt_flags|PFR_TFLAG_REFERENCED);
2042 	return (kt);
2043 }
2044 
2045 void
2046 pfr_detach_table(struct pfr_ktable *kt)
2047 {
2048 
2049 	PF_RULES_WASSERT();
2050 	KASSERT(kt->pfrkt_refcnt[PFR_REFCNT_RULE] > 0, ("%s: refcount %d\n",
2051 	    __func__, kt->pfrkt_refcnt[PFR_REFCNT_RULE]));
2052 
2053 	if (!--kt->pfrkt_refcnt[PFR_REFCNT_RULE])
2054 		pfr_setflags_ktable(kt, kt->pfrkt_flags&~PFR_TFLAG_REFERENCED);
2055 }
2056 
2057 int
2058 pfr_pool_get(struct pfr_ktable *kt, int *pidx, struct pf_addr *counter,
2059     sa_family_t af)
2060 {
2061 	struct pf_addr		 *addr, *cur, *mask;
2062 	union sockaddr_union	 uaddr, umask;
2063 	struct pfr_kentry	*ke, *ke2 = NULL;
2064 	int			 idx = -1, use_counter = 0;
2065 
2066 	switch (af) {
2067 	case AF_INET:
2068 		uaddr.sin.sin_len = sizeof(struct sockaddr_in);
2069 		uaddr.sin.sin_family = AF_INET;
2070 		break;
2071 	case AF_INET6:
2072 		uaddr.sin6.sin6_len = sizeof(struct sockaddr_in6);
2073 		uaddr.sin6.sin6_family = AF_INET6;
2074 		break;
2075 	}
2076 	addr = SUNION2PF(&uaddr, af);
2077 
2078 	if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
2079 		kt = kt->pfrkt_root;
2080 	if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
2081 		return (-1);
2082 
2083 	if (pidx != NULL)
2084 		idx = *pidx;
2085 	if (counter != NULL && idx >= 0)
2086 		use_counter = 1;
2087 	if (idx < 0)
2088 		idx = 0;
2089 
2090 _next_block:
2091 	ke = pfr_kentry_byidx(kt, idx, af);
2092 	if (ke == NULL) {
2093 		kt->pfrkt_nomatch++;
2094 		return (1);
2095 	}
2096 	pfr_prepare_network(&umask, af, ke->pfrke_net);
2097 	cur = SUNION2PF(&ke->pfrke_sa, af);
2098 	mask = SUNION2PF(&umask, af);
2099 
2100 	if (use_counter) {
2101 		/* is supplied address within block? */
2102 		if (!PF_MATCHA(0, cur, mask, counter, af)) {
2103 			/* no, go to next block in table */
2104 			idx++;
2105 			use_counter = 0;
2106 			goto _next_block;
2107 		}
2108 		PF_ACPY(addr, counter, af);
2109 	} else {
2110 		/* use first address of block */
2111 		PF_ACPY(addr, cur, af);
2112 	}
2113 
2114 	if (!KENTRY_NETWORK(ke)) {
2115 		/* this is a single IP address - no possible nested block */
2116 		PF_ACPY(counter, addr, af);
2117 		*pidx = idx;
2118 		kt->pfrkt_match++;
2119 		return (0);
2120 	}
2121 	for (;;) {
2122 		/* we don't want to use a nested block */
2123 		switch (af) {
2124 		case AF_INET:
2125 			ke2 = (struct pfr_kentry *)rn_match(&uaddr,
2126 			    kt->pfrkt_ip4);
2127 			break;
2128 		case AF_INET6:
2129 			ke2 = (struct pfr_kentry *)rn_match(&uaddr,
2130 			    kt->pfrkt_ip6);
2131 			break;
2132 		}
2133 		/* no need to check KENTRY_RNF_ROOT() here */
2134 		if (ke2 == ke) {
2135 			/* lookup return the same block - perfect */
2136 			PF_ACPY(counter, addr, af);
2137 			*pidx = idx;
2138 			kt->pfrkt_match++;
2139 			return (0);
2140 		}
2141 
2142 		/* we need to increase the counter past the nested block */
2143 		pfr_prepare_network(&umask, AF_INET, ke2->pfrke_net);
2144 		PF_POOLMASK(addr, addr, SUNION2PF(&umask, af), &pfr_ffaddr, af);
2145 		PF_AINC(addr, af);
2146 		if (!PF_MATCHA(0, cur, mask, addr, af)) {
2147 			/* ok, we reached the end of our main block */
2148 			/* go to next block in table */
2149 			idx++;
2150 			use_counter = 0;
2151 			goto _next_block;
2152 		}
2153 	}
2154 }
2155 
2156 static struct pfr_kentry *
2157 pfr_kentry_byidx(struct pfr_ktable *kt, int idx, int af)
2158 {
2159 	struct pfr_walktree	w;
2160 
2161 	bzero(&w, sizeof(w));
2162 	w.pfrw_op = PFRW_POOL_GET;
2163 	w.pfrw_cnt = idx;
2164 
2165 	switch (af) {
2166 #ifdef INET
2167 	case AF_INET:
2168 		kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
2169 		return (w.pfrw_kentry);
2170 #endif /* INET */
2171 #ifdef INET6
2172 	case AF_INET6:
2173 		kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6, pfr_walktree, &w);
2174 		return (w.pfrw_kentry);
2175 #endif /* INET6 */
2176 	default:
2177 		return (NULL);
2178 	}
2179 }
2180 
2181 void
2182 pfr_dynaddr_update(struct pfr_ktable *kt, struct pfi_dynaddr *dyn)
2183 {
2184 	struct pfr_walktree	w;
2185 
2186 	bzero(&w, sizeof(w));
2187 	w.pfrw_op = PFRW_DYNADDR_UPDATE;
2188 	w.pfrw_dyn = dyn;
2189 
2190 	dyn->pfid_acnt4 = 0;
2191 	dyn->pfid_acnt6 = 0;
2192 	if (!dyn->pfid_af || dyn->pfid_af == AF_INET)
2193 		kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
2194 	if (!dyn->pfid_af || dyn->pfid_af == AF_INET6)
2195 		kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6, pfr_walktree, &w);
2196 }
2197