xref: /freebsd/sys/netpfil/pf/pf_table.c (revision 08ed87a4a2769cf6294efdd908b0ed4d29ab49b4)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2002 Cedric Berger
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  *    - Redistributions of source code must retain the above copyright
12  *      notice, this list of conditions and the following disclaimer.
13  *    - Redistributions in binary form must reproduce the above
14  *      copyright notice, this list of conditions and the following
15  *      disclaimer in the documentation and/or other materials provided
16  *      with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
21  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
22  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
23  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
24  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
25  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
28  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  *
31  *	$OpenBSD: pf_table.c,v 1.79 2008/10/08 06:24:50 mcbride Exp $
32  */
33 
34 #include <sys/cdefs.h>
35 #include "opt_inet.h"
36 #include "opt_inet6.h"
37 
38 #include <sys/param.h>
39 #include <sys/kernel.h>
40 #include <sys/lock.h>
41 #include <sys/malloc.h>
42 #include <sys/mbuf.h>
43 #include <sys/mutex.h>
44 #include <sys/refcount.h>
45 #include <sys/socket.h>
46 #include <vm/uma.h>
47 
48 #include <net/if.h>
49 #include <net/vnet.h>
50 #include <net/pfvar.h>
51 
52 #define	ACCEPT_FLAGS(flags, oklist)		\
53 	do {					\
54 		if ((flags & ~(oklist)) &	\
55 		    PFR_FLAG_ALLMASK)		\
56 			return (EINVAL);	\
57 	} while (0)
58 
59 #define	FILLIN_SIN(sin, addr)			\
60 	do {					\
61 		(sin).sin_len = sizeof(sin);	\
62 		(sin).sin_family = AF_INET;	\
63 		(sin).sin_addr = (addr);	\
64 	} while (0)
65 
66 #define	FILLIN_SIN6(sin6, addr)			\
67 	do {					\
68 		(sin6).sin6_len = sizeof(sin6);	\
69 		(sin6).sin6_family = AF_INET6;	\
70 		(sin6).sin6_addr = (addr);	\
71 	} while (0)
72 
73 #define	SWAP(type, a1, a2)			\
74 	do {					\
75 		type tmp = a1;			\
76 		a1 = a2;			\
77 		a2 = tmp;			\
78 	} while (0)
79 
80 #define	AF_BITS(af)		(((af)==AF_INET)?32:128)
81 #define	ADDR_NETWORK(ad)	((ad)->pfra_net < AF_BITS((ad)->pfra_af))
82 #define	KENTRY_NETWORK(ke)	((ke)->pfrke_net < AF_BITS((ke)->pfrke_af))
83 #define	KENTRY_RNF_ROOT(ke) \
84 		((((struct radix_node *)(ke))->rn_flags & RNF_ROOT) != 0)
85 
86 #define	NO_ADDRESSES		(-1)
87 #define	ENQUEUE_UNMARKED_ONLY	(1)
88 #define	INVERT_NEG_FLAG		(1)
89 
90 struct pfr_walktree {
91 	enum pfrw_op {
92 		PFRW_MARK,
93 		PFRW_SWEEP,
94 		PFRW_ENQUEUE,
95 		PFRW_GET_ADDRS,
96 		PFRW_GET_ASTATS,
97 		PFRW_POOL_GET,
98 		PFRW_DYNADDR_UPDATE,
99 		PFRW_COUNTERS
100 	}	 pfrw_op;
101 	union {
102 		struct pfr_addr		*pfrw_addr;
103 		struct pfr_astats	*pfrw_astats;
104 		struct pfr_kentryworkq	*pfrw_workq;
105 		struct pfr_kentry	*pfrw_kentry;
106 		struct pfi_dynaddr	*pfrw_dyn;
107 	};
108 	int	 pfrw_free;
109 	int	 pfrw_flags;
110 };
111 
112 #define	senderr(e)	do { rv = (e); goto _bad; } while (0)
113 
114 static MALLOC_DEFINE(M_PFTABLE, "pf_table", "pf(4) tables structures");
115 VNET_DEFINE_STATIC(uma_zone_t, pfr_kentry_z);
116 #define	V_pfr_kentry_z		VNET(pfr_kentry_z)
117 VNET_DEFINE_STATIC(uma_zone_t, pfr_kentry_counter_z);
118 #define	V_pfr_kentry_counter_z	VNET(pfr_kentry_counter_z)
119 
120 static struct pf_addr	 pfr_ffaddr = {
121 	.addr32 = { 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff }
122 };
123 
124 static void		 pfr_copyout_astats(struct pfr_astats *,
125 			    const struct pfr_kentry *,
126 			    const struct pfr_walktree *);
127 static void		 pfr_copyout_addr(struct pfr_addr *,
128 			    const struct pfr_kentry *ke);
129 static int		 pfr_validate_addr(struct pfr_addr *);
130 static void		 pfr_enqueue_addrs(struct pfr_ktable *,
131 			    struct pfr_kentryworkq *, int *, int);
132 static void		 pfr_mark_addrs(struct pfr_ktable *);
133 static struct pfr_kentry
134 			*pfr_lookup_addr(struct pfr_ktable *,
135 			    struct pfr_addr *, int);
136 static struct pfr_kentry *pfr_create_kentry(struct pfr_addr *, bool);
137 static void		 pfr_destroy_kentries(struct pfr_kentryworkq *);
138 static void		 pfr_destroy_kentry(struct pfr_kentry *);
139 static void		 pfr_insert_kentries(struct pfr_ktable *,
140 			    struct pfr_kentryworkq *, time_t);
141 static void		 pfr_remove_kentries(struct pfr_ktable *,
142 			    struct pfr_kentryworkq *);
143 static void		 pfr_clstats_kentries(struct pfr_ktable *,
144 			    struct pfr_kentryworkq *, time_t, int);
145 static void		 pfr_reset_feedback(struct pfr_addr *, int);
146 static void		 pfr_prepare_network(union sockaddr_union *, int, int);
147 static int		 pfr_route_kentry(struct pfr_ktable *,
148 			    struct pfr_kentry *);
149 static int		 pfr_unroute_kentry(struct pfr_ktable *,
150 			    struct pfr_kentry *);
151 static int		 pfr_walktree(struct radix_node *, void *);
152 static int		 pfr_validate_table(struct pfr_table *, int, int);
153 static int		 pfr_fix_anchor(char *);
154 static void		 pfr_commit_ktable(struct pfr_ktable *, time_t);
155 static void		 pfr_insert_ktables(struct pfr_ktableworkq *);
156 static void		 pfr_insert_ktable(struct pfr_ktable *);
157 static void		 pfr_setflags_ktables(struct pfr_ktableworkq *);
158 static void		 pfr_setflags_ktable(struct pfr_ktable *, int);
159 static void		 pfr_clstats_ktables(struct pfr_ktableworkq *, time_t,
160 			    int);
161 static void		 pfr_clstats_ktable(struct pfr_ktable *, time_t, int);
162 static struct pfr_ktable
163 			*pfr_create_ktable(struct pfr_table *, time_t, int);
164 static void		 pfr_destroy_ktables(struct pfr_ktableworkq *, int);
165 static void		 pfr_destroy_ktable(struct pfr_ktable *, int);
166 static int		 pfr_ktable_compare(struct pfr_ktable *,
167 			    struct pfr_ktable *);
168 static struct pfr_ktable
169 			*pfr_lookup_table(struct pfr_table *);
170 static void		 pfr_clean_node_mask(struct pfr_ktable *,
171 			    struct pfr_kentryworkq *);
172 static int		 pfr_skip_table(struct pfr_table *,
173 			    struct pfr_ktable *, int);
174 static struct pfr_kentry
175 			*pfr_kentry_byidx(struct pfr_ktable *, int, int);
176 
177 static RB_PROTOTYPE(pfr_ktablehead, pfr_ktable, pfrkt_tree, pfr_ktable_compare);
178 static RB_GENERATE(pfr_ktablehead, pfr_ktable, pfrkt_tree, pfr_ktable_compare);
179 
180 VNET_DEFINE_STATIC(struct pfr_ktablehead, pfr_ktables);
181 #define	V_pfr_ktables	VNET(pfr_ktables)
182 
183 VNET_DEFINE_STATIC(struct pfr_table, pfr_nulltable);
184 #define	V_pfr_nulltable	VNET(pfr_nulltable)
185 
186 VNET_DEFINE_STATIC(int, pfr_ktable_cnt);
187 #define V_pfr_ktable_cnt	VNET(pfr_ktable_cnt)
188 
189 void
pfr_initialize(void)190 pfr_initialize(void)
191 {
192 
193 	V_pfr_kentry_counter_z = uma_zcreate("pf table entry counters",
194 	    PFR_NUM_COUNTERS * sizeof(uint64_t), NULL, NULL, NULL, NULL,
195 	    UMA_ALIGN_PTR, UMA_ZONE_PCPU);
196 	V_pfr_kentry_z = uma_zcreate("pf table entries",
197 	    sizeof(struct pfr_kentry), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
198 	    0);
199 	uma_zone_set_max(V_pfr_kentry_z, PFR_KENTRY_HIWAT);
200 	V_pf_limits[PF_LIMIT_TABLE_ENTRIES].zone = V_pfr_kentry_z;
201 	V_pf_limits[PF_LIMIT_TABLE_ENTRIES].limit = PFR_KENTRY_HIWAT;
202 }
203 
204 void
pfr_cleanup(void)205 pfr_cleanup(void)
206 {
207 
208 	uma_zdestroy(V_pfr_kentry_z);
209 	uma_zdestroy(V_pfr_kentry_counter_z);
210 }
211 
212 int
pfr_clr_addrs(struct pfr_table * tbl,int * ndel,int flags)213 pfr_clr_addrs(struct pfr_table *tbl, int *ndel, int flags)
214 {
215 	struct pfr_ktable	*kt;
216 	struct pfr_kentryworkq	 workq;
217 
218 	PF_RULES_WASSERT();
219 
220 	ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
221 	if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL))
222 		return (EINVAL);
223 	kt = pfr_lookup_table(tbl);
224 	if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
225 		return (ESRCH);
226 	if (kt->pfrkt_flags & PFR_TFLAG_CONST)
227 		return (EPERM);
228 	pfr_enqueue_addrs(kt, &workq, ndel, 0);
229 
230 	if (!(flags & PFR_FLAG_DUMMY)) {
231 		pfr_remove_kentries(kt, &workq);
232 		KASSERT(kt->pfrkt_cnt == 0, ("%s: non-null pfrkt_cnt", __func__));
233 	}
234 	return (0);
235 }
236 
237 int
pfr_add_addrs(struct pfr_table * tbl,struct pfr_addr * addr,int size,int * nadd,int flags)238 pfr_add_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
239     int *nadd, int flags)
240 {
241 	struct pfr_ktable	*kt, *tmpkt;
242 	struct pfr_kentryworkq	 workq;
243 	struct pfr_kentry	*p, *q;
244 	struct pfr_addr		*ad;
245 	int			 i, rv, xadd = 0;
246 	time_t			 tzero = time_second;
247 
248 	PF_RULES_WASSERT();
249 
250 	ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_FEEDBACK);
251 	if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL))
252 		return (EINVAL);
253 	kt = pfr_lookup_table(tbl);
254 	if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
255 		return (ESRCH);
256 	if (kt->pfrkt_flags & PFR_TFLAG_CONST)
257 		return (EPERM);
258 	tmpkt = pfr_create_ktable(&V_pfr_nulltable, 0, 0);
259 	if (tmpkt == NULL)
260 		return (ENOMEM);
261 	SLIST_INIT(&workq);
262 	for (i = 0, ad = addr; i < size; i++, ad++) {
263 		if (pfr_validate_addr(ad))
264 			senderr(EINVAL);
265 		p = pfr_lookup_addr(kt, ad, 1);
266 		q = pfr_lookup_addr(tmpkt, ad, 1);
267 		if (flags & PFR_FLAG_FEEDBACK) {
268 			if (q != NULL)
269 				ad->pfra_fback = PFR_FB_DUPLICATE;
270 			else if (p == NULL)
271 				ad->pfra_fback = PFR_FB_ADDED;
272 			else if (p->pfrke_not != ad->pfra_not)
273 				ad->pfra_fback = PFR_FB_CONFLICT;
274 			else
275 				ad->pfra_fback = PFR_FB_NONE;
276 		}
277 		if (p == NULL && q == NULL) {
278 			p = pfr_create_kentry(ad,
279 			    (kt->pfrkt_flags & PFR_TFLAG_COUNTERS) != 0);
280 			if (p == NULL)
281 				senderr(ENOMEM);
282 			if (pfr_route_kentry(tmpkt, p)) {
283 				pfr_destroy_kentry(p);
284 				ad->pfra_fback = PFR_FB_NONE;
285 			} else {
286 				SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
287 				xadd++;
288 			}
289 		}
290 	}
291 	pfr_clean_node_mask(tmpkt, &workq);
292 	if (!(flags & PFR_FLAG_DUMMY))
293 		pfr_insert_kentries(kt, &workq, tzero);
294 	else
295 		pfr_destroy_kentries(&workq);
296 	if (nadd != NULL)
297 		*nadd += xadd;
298 	pfr_destroy_ktable(tmpkt, 0);
299 	return (0);
300 _bad:
301 	pfr_clean_node_mask(tmpkt, &workq);
302 	pfr_destroy_kentries(&workq);
303 	if (flags & PFR_FLAG_FEEDBACK)
304 		pfr_reset_feedback(addr, size);
305 	pfr_destroy_ktable(tmpkt, 0);
306 	return (rv);
307 }
308 
309 int
pfr_del_addrs(struct pfr_table * tbl,struct pfr_addr * addr,int size,int * ndel,int flags)310 pfr_del_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
311     int *ndel, int flags)
312 {
313 	struct pfr_ktable	*kt;
314 	struct pfr_kentryworkq	 workq;
315 	struct pfr_kentry	*p;
316 	struct pfr_addr		*ad;
317 	int			 i, rv, xdel = 0, log = 1;
318 
319 	PF_RULES_WASSERT();
320 
321 	ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_FEEDBACK);
322 	if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL))
323 		return (EINVAL);
324 	kt = pfr_lookup_table(tbl);
325 	if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
326 		return (ESRCH);
327 	if (kt->pfrkt_flags & PFR_TFLAG_CONST)
328 		return (EPERM);
329 	/*
330 	 * there are two algorithms to choose from here.
331 	 * with:
332 	 *   n: number of addresses to delete
333 	 *   N: number of addresses in the table
334 	 *
335 	 * one is O(N) and is better for large 'n'
336 	 * one is O(n*LOG(N)) and is better for small 'n'
337 	 *
338 	 * following code try to decide which one is best.
339 	 */
340 	for (i = kt->pfrkt_cnt; i > 0; i >>= 1)
341 		log++;
342 	if (size > kt->pfrkt_cnt/log) {
343 		/* full table scan */
344 		pfr_mark_addrs(kt);
345 	} else {
346 		/* iterate over addresses to delete */
347 		for (i = 0, ad = addr; i < size; i++, ad++) {
348 			if (pfr_validate_addr(ad))
349 				return (EINVAL);
350 			p = pfr_lookup_addr(kt, ad, 1);
351 			if (p != NULL)
352 				p->pfrke_mark = 0;
353 		}
354 	}
355 	SLIST_INIT(&workq);
356 	for (i = 0, ad = addr; i < size; i++, ad++) {
357 		if (pfr_validate_addr(ad))
358 			senderr(EINVAL);
359 		p = pfr_lookup_addr(kt, ad, 1);
360 		if (flags & PFR_FLAG_FEEDBACK) {
361 			if (p == NULL)
362 				ad->pfra_fback = PFR_FB_NONE;
363 			else if (p->pfrke_not != ad->pfra_not)
364 				ad->pfra_fback = PFR_FB_CONFLICT;
365 			else if (p->pfrke_mark)
366 				ad->pfra_fback = PFR_FB_DUPLICATE;
367 			else
368 				ad->pfra_fback = PFR_FB_DELETED;
369 		}
370 		if (p != NULL && p->pfrke_not == ad->pfra_not &&
371 		    !p->pfrke_mark) {
372 			p->pfrke_mark = 1;
373 			SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
374 			xdel++;
375 		}
376 	}
377 	if (!(flags & PFR_FLAG_DUMMY))
378 		pfr_remove_kentries(kt, &workq);
379 	if (ndel != NULL)
380 		*ndel = xdel;
381 	return (0);
382 _bad:
383 	if (flags & PFR_FLAG_FEEDBACK)
384 		pfr_reset_feedback(addr, size);
385 	return (rv);
386 }
387 
388 int
pfr_set_addrs(struct pfr_table * tbl,struct pfr_addr * addr,int size,int * size2,int * nadd,int * ndel,int * nchange,int flags,u_int32_t ignore_pfrt_flags)389 pfr_set_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
390     int *size2, int *nadd, int *ndel, int *nchange, int flags,
391     u_int32_t ignore_pfrt_flags)
392 {
393 	struct pfr_ktable	*kt, *tmpkt;
394 	struct pfr_kentryworkq	 addq, delq, changeq;
395 	struct pfr_kentry	*p, *q;
396 	struct pfr_addr		 ad;
397 	int			 i, rv, xadd = 0, xdel = 0, xchange = 0;
398 	time_t			 tzero = time_second;
399 
400 	PF_RULES_WASSERT();
401 
402 	ACCEPT_FLAGS(flags, PFR_FLAG_START | PFR_FLAG_DONE |
403 	    PFR_FLAG_DUMMY | PFR_FLAG_FEEDBACK);
404 	if (pfr_validate_table(tbl, ignore_pfrt_flags, flags &
405 	    PFR_FLAG_USERIOCTL))
406 		return (EINVAL);
407 	kt = pfr_lookup_table(tbl);
408 	if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
409 		return (ESRCH);
410 	if (kt->pfrkt_flags & PFR_TFLAG_CONST)
411 		return (EPERM);
412 	tmpkt = pfr_create_ktable(&V_pfr_nulltable, 0, 0);
413 	if (tmpkt == NULL)
414 		return (ENOMEM);
415 	if (flags & PFR_FLAG_START)
416 		pfr_mark_addrs(kt);
417 	SLIST_INIT(&addq);
418 	SLIST_INIT(&delq);
419 	SLIST_INIT(&changeq);
420 	for (i = 0; i < size; i++) {
421 		/*
422 		 * XXXGL: undertand pf_if usage of this function
423 		 * and make ad a moving pointer
424 		 */
425 		bcopy(addr + i, &ad, sizeof(ad));
426 		if (pfr_validate_addr(&ad))
427 			senderr(EINVAL);
428 		ad.pfra_fback = PFR_FB_NONE;
429 		p = pfr_lookup_addr(kt, &ad, 1);
430 		if (p != NULL) {
431 			if (p->pfrke_mark) {
432 				ad.pfra_fback = PFR_FB_DUPLICATE;
433 				goto _skip;
434 			}
435 			p->pfrke_mark = 1;
436 			if (p->pfrke_not != ad.pfra_not) {
437 				SLIST_INSERT_HEAD(&changeq, p, pfrke_workq);
438 				ad.pfra_fback = PFR_FB_CHANGED;
439 				xchange++;
440 			}
441 		} else {
442 			q = pfr_lookup_addr(tmpkt, &ad, 1);
443 			if (q != NULL) {
444 				ad.pfra_fback = PFR_FB_DUPLICATE;
445 				goto _skip;
446 			}
447 			p = pfr_create_kentry(&ad,
448 			    (kt->pfrkt_flags & PFR_TFLAG_COUNTERS) != 0);
449 			p->pfrke_mark = PFR_FB_ADDED;
450 			if (p == NULL)
451 				senderr(ENOMEM);
452 			if (pfr_route_kentry(tmpkt, p)) {
453 				pfr_destroy_kentry(p);
454 				ad.pfra_fback = PFR_FB_NONE;
455 			} else {
456 				SLIST_INSERT_HEAD(&addq, p, pfrke_workq);
457 				ad.pfra_fback = PFR_FB_ADDED;
458 				xadd++;
459 			}
460 		}
461 _skip:
462 		if (flags & PFR_FLAG_FEEDBACK)
463 			bcopy(&ad, addr + i, sizeof(ad));
464 	}
465 	if (flags & PFR_FLAG_DONE)
466 		pfr_enqueue_addrs(kt, &delq, &xdel, ENQUEUE_UNMARKED_ONLY);
467 	if ((flags & PFR_FLAG_FEEDBACK) && *size2) {
468 		if (*size2 < size+xdel) {
469 			*size2 = size+xdel;
470 			senderr(0);
471 		}
472 		i = 0;
473 		SLIST_FOREACH(p, &delq, pfrke_workq) {
474 			pfr_copyout_addr(&ad, p);
475 			ad.pfra_fback = PFR_FB_DELETED;
476 			bcopy(&ad, addr + size + i, sizeof(ad));
477 			i++;
478 		}
479 	}
480 	pfr_clean_node_mask(tmpkt, &addq);
481 	if (!(flags & PFR_FLAG_DUMMY)) {
482 		pfr_insert_kentries(kt, &addq, tzero);
483 		pfr_remove_kentries(kt, &delq);
484 		pfr_clstats_kentries(kt, &changeq, tzero, INVERT_NEG_FLAG);
485 	} else
486 		pfr_destroy_kentries(&addq);
487 	if (nadd != NULL)
488 		*nadd = xadd;
489 	if (ndel != NULL)
490 		*ndel = xdel;
491 	if (nchange != NULL)
492 		*nchange = xchange;
493 	if ((flags & PFR_FLAG_FEEDBACK) && size2)
494 		*size2 = size+xdel;
495 	pfr_destroy_ktable(tmpkt, 0);
496 	return (0);
497 _bad:
498 	pfr_clean_node_mask(tmpkt, &addq);
499 	pfr_destroy_kentries(&addq);
500 	if (flags & PFR_FLAG_FEEDBACK)
501 		pfr_reset_feedback(addr, size);
502 	pfr_destroy_ktable(tmpkt, 0);
503 	return (rv);
504 }
505 
506 int
pfr_tst_addrs(struct pfr_table * tbl,struct pfr_addr * addr,int size,int * nmatch,int flags)507 pfr_tst_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
508 	int *nmatch, int flags)
509 {
510 	struct pfr_ktable	*kt;
511 	struct pfr_kentry	*p;
512 	struct pfr_addr		*ad;
513 	int			 i, xmatch = 0;
514 
515 	PF_RULES_RASSERT();
516 
517 	ACCEPT_FLAGS(flags, PFR_FLAG_REPLACE);
518 	if (pfr_validate_table(tbl, 0, 0))
519 		return (EINVAL);
520 	kt = pfr_lookup_table(tbl);
521 	if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
522 		return (ESRCH);
523 
524 	for (i = 0, ad = addr; i < size; i++, ad++) {
525 		if (pfr_validate_addr(ad))
526 			return (EINVAL);
527 		if (ADDR_NETWORK(ad))
528 			return (EINVAL);
529 		p = pfr_lookup_addr(kt, ad, 0);
530 		if (flags & PFR_FLAG_REPLACE)
531 			pfr_copyout_addr(ad, p);
532 		ad->pfra_fback = (p == NULL) ? PFR_FB_NONE :
533 		    (p->pfrke_not ? PFR_FB_NOTMATCH : PFR_FB_MATCH);
534 		if (p != NULL && !p->pfrke_not)
535 			xmatch++;
536 	}
537 	if (nmatch != NULL)
538 		*nmatch = xmatch;
539 	return (0);
540 }
541 
542 int
pfr_get_addrs(struct pfr_table * tbl,struct pfr_addr * addr,int * size,int flags)543 pfr_get_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int *size,
544 	int flags)
545 {
546 	struct pfr_ktable	*kt;
547 	struct pfr_walktree	 w;
548 	int			 rv;
549 
550 	PF_RULES_RASSERT();
551 
552 	ACCEPT_FLAGS(flags, 0);
553 	if (pfr_validate_table(tbl, 0, 0))
554 		return (EINVAL);
555 	kt = pfr_lookup_table(tbl);
556 	if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
557 		return (ESRCH);
558 	if (kt->pfrkt_cnt > *size) {
559 		*size = kt->pfrkt_cnt;
560 		return (0);
561 	}
562 
563 	bzero(&w, sizeof(w));
564 	w.pfrw_op = PFRW_GET_ADDRS;
565 	w.pfrw_addr = addr;
566 	w.pfrw_free = kt->pfrkt_cnt;
567 	rv = kt->pfrkt_ip4->rnh_walktree(&kt->pfrkt_ip4->rh, pfr_walktree, &w);
568 	if (!rv)
569 		rv = kt->pfrkt_ip6->rnh_walktree(&kt->pfrkt_ip6->rh,
570 		    pfr_walktree, &w);
571 	if (rv)
572 		return (rv);
573 
574 	KASSERT(w.pfrw_free == 0, ("%s: corruption detected (%d)", __func__,
575 	    w.pfrw_free));
576 
577 	*size = kt->pfrkt_cnt;
578 	return (0);
579 }
580 
581 int
pfr_get_astats(struct pfr_table * tbl,struct pfr_astats * addr,int * size,int flags)582 pfr_get_astats(struct pfr_table *tbl, struct pfr_astats *addr, int *size,
583 	int flags)
584 {
585 	struct pfr_ktable	*kt;
586 	struct pfr_walktree	 w;
587 	struct pfr_kentryworkq	 workq;
588 	int			 rv;
589 	time_t			 tzero = time_second;
590 
591 	PF_RULES_RASSERT();
592 
593 	/* XXX PFR_FLAG_CLSTATS disabled */
594 	ACCEPT_FLAGS(flags, 0);
595 	if (pfr_validate_table(tbl, 0, 0))
596 		return (EINVAL);
597 	kt = pfr_lookup_table(tbl);
598 	if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
599 		return (ESRCH);
600 	if (kt->pfrkt_cnt > *size) {
601 		*size = kt->pfrkt_cnt;
602 		return (0);
603 	}
604 
605 	bzero(&w, sizeof(w));
606 	w.pfrw_op = PFRW_GET_ASTATS;
607 	w.pfrw_astats = addr;
608 	w.pfrw_free = kt->pfrkt_cnt;
609 	/*
610 	 * Flags below are for backward compatibility. It was possible to have
611 	 * a table without per-entry counters. Now they are always allocated,
612 	 * we just discard data when reading it if table is not configured to
613 	 * have counters.
614 	 */
615 	w.pfrw_flags = kt->pfrkt_flags;
616 	rv = kt->pfrkt_ip4->rnh_walktree(&kt->pfrkt_ip4->rh, pfr_walktree, &w);
617 	if (!rv)
618 		rv = kt->pfrkt_ip6->rnh_walktree(&kt->pfrkt_ip6->rh,
619 		    pfr_walktree, &w);
620 	if (!rv && (flags & PFR_FLAG_CLSTATS)) {
621 		pfr_enqueue_addrs(kt, &workq, NULL, 0);
622 		pfr_clstats_kentries(kt, &workq, tzero, 0);
623 	}
624 	if (rv)
625 		return (rv);
626 
627 	if (w.pfrw_free) {
628 		printf("pfr_get_astats: corruption detected (%d).\n",
629 		    w.pfrw_free);
630 		return (ENOTTY);
631 	}
632 	*size = kt->pfrkt_cnt;
633 	return (0);
634 }
635 
636 int
pfr_clr_astats(struct pfr_table * tbl,struct pfr_addr * addr,int size,int * nzero,int flags)637 pfr_clr_astats(struct pfr_table *tbl, struct pfr_addr *addr, int size,
638     int *nzero, int flags)
639 {
640 	struct pfr_ktable	*kt;
641 	struct pfr_kentryworkq	 workq;
642 	struct pfr_kentry	*p;
643 	struct pfr_addr		*ad;
644 	int			 i, rv, xzero = 0;
645 
646 	PF_RULES_WASSERT();
647 
648 	ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_FEEDBACK);
649 	if (pfr_validate_table(tbl, 0, 0))
650 		return (EINVAL);
651 	kt = pfr_lookup_table(tbl);
652 	if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
653 		return (ESRCH);
654 	SLIST_INIT(&workq);
655 	for (i = 0, ad = addr; i < size; i++, ad++) {
656 		if (pfr_validate_addr(ad))
657 			senderr(EINVAL);
658 		p = pfr_lookup_addr(kt, ad, 1);
659 		if (flags & PFR_FLAG_FEEDBACK) {
660 			ad->pfra_fback = (p != NULL) ?
661 			    PFR_FB_CLEARED : PFR_FB_NONE;
662 		}
663 		if (p != NULL) {
664 			SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
665 			xzero++;
666 		}
667 	}
668 
669 	if (!(flags & PFR_FLAG_DUMMY))
670 		pfr_clstats_kentries(kt, &workq, time_second, 0);
671 	if (nzero != NULL)
672 		*nzero = xzero;
673 	return (0);
674 _bad:
675 	if (flags & PFR_FLAG_FEEDBACK)
676 		pfr_reset_feedback(addr, size);
677 	return (rv);
678 }
679 
680 static int
pfr_validate_addr(struct pfr_addr * ad)681 pfr_validate_addr(struct pfr_addr *ad)
682 {
683 	int i;
684 
685 	switch (ad->pfra_af) {
686 #ifdef INET
687 	case AF_INET:
688 		if (ad->pfra_net > 32)
689 			return (-1);
690 		break;
691 #endif /* INET */
692 #ifdef INET6
693 	case AF_INET6:
694 		if (ad->pfra_net > 128)
695 			return (-1);
696 		break;
697 #endif /* INET6 */
698 	default:
699 		return (-1);
700 	}
701 	if (ad->pfra_net < 128 &&
702 		(((caddr_t)ad)[ad->pfra_net/8] & (0xFF >> (ad->pfra_net%8))))
703 			return (-1);
704 	for (i = (ad->pfra_net+7)/8; i < sizeof(ad->pfra_u); i++)
705 		if (((caddr_t)ad)[i])
706 			return (-1);
707 	if (ad->pfra_not && ad->pfra_not != 1)
708 		return (-1);
709 	if (ad->pfra_fback != PFR_FB_NONE)
710 		return (-1);
711 	return (0);
712 }
713 
714 static void
pfr_enqueue_addrs(struct pfr_ktable * kt,struct pfr_kentryworkq * workq,int * naddr,int sweep)715 pfr_enqueue_addrs(struct pfr_ktable *kt, struct pfr_kentryworkq *workq,
716 	int *naddr, int sweep)
717 {
718 	struct pfr_walktree	w;
719 
720 	SLIST_INIT(workq);
721 	bzero(&w, sizeof(w));
722 	w.pfrw_op = sweep ? PFRW_SWEEP : PFRW_ENQUEUE;
723 	w.pfrw_workq = workq;
724 	if (kt->pfrkt_ip4 != NULL)
725 		if (kt->pfrkt_ip4->rnh_walktree(&kt->pfrkt_ip4->rh,
726 		    pfr_walktree, &w))
727 			printf("pfr_enqueue_addrs: IPv4 walktree failed.\n");
728 	if (kt->pfrkt_ip6 != NULL)
729 		if (kt->pfrkt_ip6->rnh_walktree(&kt->pfrkt_ip6->rh,
730 		    pfr_walktree, &w))
731 			printf("pfr_enqueue_addrs: IPv6 walktree failed.\n");
732 	if (naddr != NULL)
733 		*naddr = w.pfrw_free;
734 }
735 
736 static void
pfr_mark_addrs(struct pfr_ktable * kt)737 pfr_mark_addrs(struct pfr_ktable *kt)
738 {
739 	struct pfr_walktree	w;
740 
741 	bzero(&w, sizeof(w));
742 	w.pfrw_op = PFRW_MARK;
743 	if (kt->pfrkt_ip4->rnh_walktree(&kt->pfrkt_ip4->rh, pfr_walktree, &w))
744 		printf("pfr_mark_addrs: IPv4 walktree failed.\n");
745 	if (kt->pfrkt_ip6->rnh_walktree(&kt->pfrkt_ip6->rh, pfr_walktree, &w))
746 		printf("pfr_mark_addrs: IPv6 walktree failed.\n");
747 }
748 
749 static struct pfr_kentry *
pfr_lookup_addr(struct pfr_ktable * kt,struct pfr_addr * ad,int exact)750 pfr_lookup_addr(struct pfr_ktable *kt, struct pfr_addr *ad, int exact)
751 {
752 	union sockaddr_union	 sa, mask;
753 	struct radix_head	*head = NULL;
754 	struct pfr_kentry	*ke;
755 
756 	PF_RULES_ASSERT();
757 
758 	bzero(&sa, sizeof(sa));
759 	switch (ad->pfra_af) {
760 	case AF_INET:
761 		FILLIN_SIN(sa.sin, ad->pfra_ip4addr);
762 		head = &kt->pfrkt_ip4->rh;
763 		break;
764 	case AF_INET6:
765 		FILLIN_SIN6(sa.sin6, ad->pfra_ip6addr);
766 		head = &kt->pfrkt_ip6->rh;
767 		break;
768 	default:
769 		unhandled_af(ad->pfra_af);
770 	}
771 	if (ADDR_NETWORK(ad)) {
772 		pfr_prepare_network(&mask, ad->pfra_af, ad->pfra_net);
773 		ke = (struct pfr_kentry *)rn_lookup(&sa, &mask, head);
774 		if (ke && KENTRY_RNF_ROOT(ke))
775 			ke = NULL;
776 	} else {
777 		ke = (struct pfr_kentry *)rn_match(&sa, head);
778 		if (ke && KENTRY_RNF_ROOT(ke))
779 			ke = NULL;
780 		if (exact && ke && KENTRY_NETWORK(ke))
781 			ke = NULL;
782 	}
783 	return (ke);
784 }
785 
786 static struct pfr_kentry *
pfr_create_kentry(struct pfr_addr * ad,bool counters)787 pfr_create_kentry(struct pfr_addr *ad, bool counters)
788 {
789 	struct pfr_kentry	*ke;
790 	counter_u64_t		 c;
791 
792 	ke = uma_zalloc(V_pfr_kentry_z, M_NOWAIT | M_ZERO);
793 	if (ke == NULL)
794 		return (NULL);
795 
796 	switch (ad->pfra_af) {
797 	case AF_INET:
798 		FILLIN_SIN(ke->pfrke_sa.sin, ad->pfra_ip4addr);
799 		break;
800 	case AF_INET6:
801 		FILLIN_SIN6(ke->pfrke_sa.sin6, ad->pfra_ip6addr);
802 		break;
803 	default:
804 		unhandled_af(ad->pfra_af);
805 	}
806 	ke->pfrke_af = ad->pfra_af;
807 	ke->pfrke_net = ad->pfra_net;
808 	ke->pfrke_not = ad->pfra_not;
809 	ke->pfrke_counters.pfrkc_tzero = 0;
810 	if (counters) {
811 		c = uma_zalloc_pcpu(V_pfr_kentry_counter_z, M_NOWAIT | M_ZERO);
812 		if (c == NULL) {
813 			pfr_destroy_kentry(ke);
814 			return (NULL);
815 		}
816 		ke->pfrke_counters.pfrkc_counters = c;
817 	}
818 	return (ke);
819 }
820 
821 static void
pfr_destroy_kentries(struct pfr_kentryworkq * workq)822 pfr_destroy_kentries(struct pfr_kentryworkq *workq)
823 {
824 	struct pfr_kentry	*p;
825 
826 	while ((p = SLIST_FIRST(workq)) != NULL) {
827 		SLIST_REMOVE_HEAD(workq, pfrke_workq);
828 		pfr_destroy_kentry(p);
829 	}
830 }
831 
832 static void
pfr_destroy_kentry(struct pfr_kentry * ke)833 pfr_destroy_kentry(struct pfr_kentry *ke)
834 {
835 	counter_u64_t c;
836 
837 	if ((c = ke->pfrke_counters.pfrkc_counters) != NULL)
838 		uma_zfree_pcpu(V_pfr_kentry_counter_z, c);
839 	uma_zfree(V_pfr_kentry_z, ke);
840 }
841 
842 static void
pfr_insert_kentries(struct pfr_ktable * kt,struct pfr_kentryworkq * workq,time_t tzero)843 pfr_insert_kentries(struct pfr_ktable *kt,
844     struct pfr_kentryworkq *workq, time_t tzero)
845 {
846 	struct pfr_kentry	*p;
847 	int			 rv, n = 0;
848 
849 	SLIST_FOREACH(p, workq, pfrke_workq) {
850 		rv = pfr_route_kentry(kt, p);
851 		if (rv) {
852 			printf("pfr_insert_kentries: cannot route entry "
853 			    "(code=%d).\n", rv);
854 			break;
855 		}
856 		p->pfrke_counters.pfrkc_tzero = tzero;
857 		n++;
858 	}
859 	kt->pfrkt_cnt += n;
860 }
861 
862 int
pfr_insert_kentry(struct pfr_ktable * kt,struct pfr_addr * ad,time_t tzero)863 pfr_insert_kentry(struct pfr_ktable *kt, struct pfr_addr *ad, time_t tzero)
864 {
865 	struct pfr_kentry	*p;
866 	int			 rv;
867 
868 	p = pfr_lookup_addr(kt, ad, 1);
869 	if (p != NULL)
870 		return (0);
871 	p = pfr_create_kentry(ad, (kt->pfrkt_flags & PFR_TFLAG_COUNTERS) != 0);
872 	if (p == NULL)
873 		return (ENOMEM);
874 
875 	rv = pfr_route_kentry(kt, p);
876 	if (rv)
877 		return (rv);
878 
879 	p->pfrke_counters.pfrkc_tzero = tzero;
880 	kt->pfrkt_cnt++;
881 
882 	return (0);
883 }
884 
885 static void
pfr_remove_kentries(struct pfr_ktable * kt,struct pfr_kentryworkq * workq)886 pfr_remove_kentries(struct pfr_ktable *kt,
887     struct pfr_kentryworkq *workq)
888 {
889 	struct pfr_kentry	*p;
890 	int			 n = 0;
891 
892 	SLIST_FOREACH(p, workq, pfrke_workq) {
893 		pfr_unroute_kentry(kt, p);
894 		n++;
895 	}
896 	kt->pfrkt_cnt -= n;
897 	pfr_destroy_kentries(workq);
898 }
899 
900 static void
pfr_clean_node_mask(struct pfr_ktable * kt,struct pfr_kentryworkq * workq)901 pfr_clean_node_mask(struct pfr_ktable *kt,
902     struct pfr_kentryworkq *workq)
903 {
904 	struct pfr_kentry	*p;
905 
906 	SLIST_FOREACH(p, workq, pfrke_workq)
907 		pfr_unroute_kentry(kt, p);
908 }
909 
910 static void
pfr_clstats_kentries(struct pfr_ktable * kt,struct pfr_kentryworkq * workq,time_t tzero,int negchange)911 pfr_clstats_kentries(struct pfr_ktable *kt, struct pfr_kentryworkq *workq,
912     time_t tzero, int negchange)
913 {
914 	struct pfr_kentry	*p;
915 	int			 i;
916 
917 	SLIST_FOREACH(p, workq, pfrke_workq) {
918 		if (negchange)
919 			p->pfrke_not = !p->pfrke_not;
920 		if ((kt->pfrkt_flags & PFR_TFLAG_COUNTERS) != 0)
921 			for (i = 0; i < PFR_NUM_COUNTERS; i++)
922 				counter_u64_zero(
923 				    p->pfrke_counters.pfrkc_counters + i);
924 		p->pfrke_counters.pfrkc_tzero = tzero;
925 	}
926 }
927 
928 static void
pfr_reset_feedback(struct pfr_addr * addr,int size)929 pfr_reset_feedback(struct pfr_addr *addr, int size)
930 {
931 	struct pfr_addr	*ad;
932 	int		i;
933 
934 	for (i = 0, ad = addr; i < size; i++, ad++)
935 		ad->pfra_fback = PFR_FB_NONE;
936 }
937 
938 static void
pfr_prepare_network(union sockaddr_union * sa,int af,int net)939 pfr_prepare_network(union sockaddr_union *sa, int af, int net)
940 {
941 	int	i;
942 
943 	bzero(sa, sizeof(*sa));
944 	switch (af) {
945 	case AF_INET:
946 		sa->sin.sin_len = sizeof(sa->sin);
947 		sa->sin.sin_family = AF_INET;
948 		sa->sin.sin_addr.s_addr = net ? htonl(-1 << (32-net)) : 0;
949 		break;
950 	case AF_INET6:
951 		sa->sin6.sin6_len = sizeof(sa->sin6);
952 		sa->sin6.sin6_family = AF_INET6;
953 		for (i = 0; i < 4; i++) {
954 			if (net <= 32) {
955 				sa->sin6.sin6_addr.s6_addr32[i] =
956 				    net ? htonl(-1 << (32-net)) : 0;
957 				break;
958 			}
959 			sa->sin6.sin6_addr.s6_addr32[i] = 0xFFFFFFFF;
960 			net -= 32;
961 		}
962 		break;
963 	default:
964 		unhandled_af(af);
965 	}
966 }
967 
968 static int
pfr_route_kentry(struct pfr_ktable * kt,struct pfr_kentry * ke)969 pfr_route_kentry(struct pfr_ktable *kt, struct pfr_kentry *ke)
970 {
971 	union sockaddr_union	 mask;
972 	struct radix_node	*rn;
973 	struct radix_head	*head = NULL;
974 
975 	PF_RULES_WASSERT();
976 
977 	bzero(ke->pfrke_node, sizeof(ke->pfrke_node));
978 	switch (ke->pfrke_af) {
979 	case AF_INET:
980 		head = &kt->pfrkt_ip4->rh;
981 		break;
982 	case AF_INET6:
983 		head = &kt->pfrkt_ip6->rh;
984 		break;
985 	default:
986 		unhandled_af(ke->pfrke_af);
987 	}
988 
989 	if (KENTRY_NETWORK(ke)) {
990 		pfr_prepare_network(&mask, ke->pfrke_af, ke->pfrke_net);
991 		rn = rn_addroute(&ke->pfrke_sa, &mask, head, ke->pfrke_node);
992 	} else
993 		rn = rn_addroute(&ke->pfrke_sa, NULL, head, ke->pfrke_node);
994 
995 	return (rn == NULL ? -1 : 0);
996 }
997 
998 static int
pfr_unroute_kentry(struct pfr_ktable * kt,struct pfr_kentry * ke)999 pfr_unroute_kentry(struct pfr_ktable *kt, struct pfr_kentry *ke)
1000 {
1001 	union sockaddr_union	 mask;
1002 	struct radix_node	*rn;
1003 	struct radix_head	*head = NULL;
1004 
1005 	switch (ke->pfrke_af) {
1006 	case AF_INET:
1007 		head = &kt->pfrkt_ip4->rh;
1008 		break;
1009 	case AF_INET6:
1010 		head = &kt->pfrkt_ip6->rh;
1011 		break;
1012 	default:
1013 		unhandled_af(ke->pfrke_af);
1014 	}
1015 
1016 	if (KENTRY_NETWORK(ke)) {
1017 		pfr_prepare_network(&mask, ke->pfrke_af, ke->pfrke_net);
1018 		rn = rn_delete(&ke->pfrke_sa, &mask, head);
1019 	} else
1020 		rn = rn_delete(&ke->pfrke_sa, NULL, head);
1021 
1022 	if (rn == NULL) {
1023 		printf("pfr_unroute_kentry: delete failed.\n");
1024 		return (-1);
1025 	}
1026 	return (0);
1027 }
1028 
1029 static void
pfr_copyout_addr(struct pfr_addr * ad,const struct pfr_kentry * ke)1030 pfr_copyout_addr(struct pfr_addr *ad, const struct pfr_kentry *ke)
1031 {
1032 	bzero(ad, sizeof(*ad));
1033 	if (ke == NULL)
1034 		return;
1035 	ad->pfra_af = ke->pfrke_af;
1036 	ad->pfra_net = ke->pfrke_net;
1037 	ad->pfra_not = ke->pfrke_not;
1038 	switch (ad->pfra_af) {
1039 	case AF_INET:
1040 		ad->pfra_ip4addr = ke->pfrke_sa.sin.sin_addr;
1041 		break;
1042 	case AF_INET6:
1043 		ad->pfra_ip6addr = ke->pfrke_sa.sin6.sin6_addr;
1044 		break;
1045 	default:
1046 		unhandled_af(ad->pfra_af);
1047 	}
1048 }
1049 
1050 static void
pfr_copyout_astats(struct pfr_astats * as,const struct pfr_kentry * ke,const struct pfr_walktree * w)1051 pfr_copyout_astats(struct pfr_astats *as, const struct pfr_kentry *ke,
1052     const struct pfr_walktree *w)
1053 {
1054 	int dir, op;
1055 	const struct pfr_kcounters *kc = &ke->pfrke_counters;
1056 
1057 	bzero(as, sizeof(*as));
1058 	pfr_copyout_addr(&as->pfras_a, ke);
1059 	as->pfras_tzero = kc->pfrkc_tzero;
1060 
1061 	if (! (w->pfrw_flags & PFR_TFLAG_COUNTERS) ||
1062 	    kc->pfrkc_counters == NULL) {
1063 		bzero(as->pfras_packets, sizeof(as->pfras_packets));
1064 		bzero(as->pfras_bytes, sizeof(as->pfras_bytes));
1065 		as->pfras_a.pfra_fback = PFR_FB_NOCOUNT;
1066 		return;
1067 	}
1068 
1069 	for (dir = 0; dir < PFR_DIR_MAX; dir++) {
1070 		for (op = 0; op < PFR_OP_ADDR_MAX; op ++) {
1071 			as->pfras_packets[dir][op] = counter_u64_fetch(
1072 			    pfr_kentry_counter(kc, dir, op, PFR_TYPE_PACKETS));
1073 			as->pfras_bytes[dir][op] = counter_u64_fetch(
1074 			    pfr_kentry_counter(kc, dir, op, PFR_TYPE_BYTES));
1075 		}
1076 	}
1077 }
1078 
1079 static void
pfr_sockaddr_to_pf_addr(const union sockaddr_union * sa,struct pf_addr * a)1080 pfr_sockaddr_to_pf_addr(const union sockaddr_union *sa, struct pf_addr *a)
1081 {
1082 	switch (sa->sa.sa_family) {
1083 	case AF_INET:
1084 		memcpy(&a->v4, &sa->sin.sin_addr, sizeof(a->v4));
1085 		break;
1086 	case AF_INET6:
1087 		memcpy(&a->v6, &sa->sin6.sin6_addr, sizeof(a->v6));
1088 		break;
1089 	default:
1090 		unhandled_af(sa->sa.sa_family);
1091 	}
1092 }
1093 
1094 static int
pfr_walktree(struct radix_node * rn,void * arg)1095 pfr_walktree(struct radix_node *rn, void *arg)
1096 {
1097 	struct pfr_kentry	*ke = (struct pfr_kentry *)rn;
1098 	struct pfr_walktree	*w = arg;
1099 
1100 	switch (w->pfrw_op) {
1101 	case PFRW_MARK:
1102 		ke->pfrke_mark = 0;
1103 		break;
1104 	case PFRW_SWEEP:
1105 		if (ke->pfrke_mark)
1106 			break;
1107 		/* FALLTHROUGH */
1108 	case PFRW_ENQUEUE:
1109 		SLIST_INSERT_HEAD(w->pfrw_workq, ke, pfrke_workq);
1110 		w->pfrw_free++;
1111 		break;
1112 	case PFRW_GET_ADDRS:
1113 		if (w->pfrw_free-- > 0) {
1114 			pfr_copyout_addr(w->pfrw_addr, ke);
1115 			w->pfrw_addr++;
1116 		}
1117 		break;
1118 	case PFRW_GET_ASTATS:
1119 		if (w->pfrw_free-- > 0) {
1120 			struct pfr_astats as;
1121 
1122 			pfr_copyout_astats(&as, ke, w);
1123 
1124 			bcopy(&as, w->pfrw_astats, sizeof(as));
1125 			w->pfrw_astats++;
1126 		}
1127 		break;
1128 	case PFRW_POOL_GET:
1129 		if (ke->pfrke_not)
1130 			break; /* negative entries are ignored */
1131 		if (!w->pfrw_free--) {
1132 			w->pfrw_kentry = ke;
1133 			return (1); /* finish search */
1134 		}
1135 		break;
1136 	case PFRW_DYNADDR_UPDATE:
1137 	    {
1138 		union sockaddr_union	pfr_mask;
1139 
1140 		switch (ke->pfrke_af) {
1141 		case AF_INET:
1142 			if (w->pfrw_dyn->pfid_acnt4++ > 0)
1143 				break;
1144 			pfr_prepare_network(&pfr_mask, AF_INET, ke->pfrke_net);
1145 			pfr_sockaddr_to_pf_addr(&ke->pfrke_sa, &w->pfrw_dyn->pfid_addr4);
1146 			pfr_sockaddr_to_pf_addr(&pfr_mask, &w->pfrw_dyn->pfid_mask4);
1147 			break;
1148 		case AF_INET6:
1149 			if (w->pfrw_dyn->pfid_acnt6++ > 0)
1150 				break;
1151 			pfr_prepare_network(&pfr_mask, AF_INET6, ke->pfrke_net);
1152 			pfr_sockaddr_to_pf_addr(&ke->pfrke_sa, &w->pfrw_dyn->pfid_addr6);
1153 			pfr_sockaddr_to_pf_addr(&pfr_mask, &w->pfrw_dyn->pfid_mask6);
1154 			break;
1155 		default:
1156 			unhandled_af(ke->pfrke_af);
1157 		}
1158 		break;
1159 	    }
1160 	case PFRW_COUNTERS:
1161 	    {
1162 		if (w->pfrw_flags & PFR_TFLAG_COUNTERS) {
1163 			if (ke->pfrke_counters.pfrkc_counters != NULL)
1164 				break;
1165 			ke->pfrke_counters.pfrkc_counters =
1166 			    uma_zalloc_pcpu(V_pfr_kentry_counter_z,
1167 			    M_NOWAIT | M_ZERO);
1168 		} else {
1169 			uma_zfree_pcpu(V_pfr_kentry_counter_z,
1170 			    ke->pfrke_counters.pfrkc_counters);
1171 			ke->pfrke_counters.pfrkc_counters = NULL;
1172 		}
1173 		break;
1174 	    }
1175 	}
1176 	return (0);
1177 }
1178 
1179 int
pfr_clr_tables(struct pfr_table * filter,int * ndel,int flags)1180 pfr_clr_tables(struct pfr_table *filter, int *ndel, int flags)
1181 {
1182 	struct pfr_ktableworkq	 workq;
1183 	struct pfr_ktable	*p;
1184 	int			 xdel = 0;
1185 
1186 	ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_ALLRSETS);
1187 	if (pfr_fix_anchor(filter->pfrt_anchor))
1188 		return (EINVAL);
1189 	if (pfr_table_count(filter, flags) < 0)
1190 		return (ENOENT);
1191 
1192 	SLIST_INIT(&workq);
1193 	RB_FOREACH(p, pfr_ktablehead, &V_pfr_ktables) {
1194 		if (pfr_skip_table(filter, p, flags))
1195 			continue;
1196 		if (!strcmp(p->pfrkt_anchor, PF_RESERVED_ANCHOR))
1197 			continue;
1198 		if (!(p->pfrkt_flags & PFR_TFLAG_ACTIVE))
1199 			continue;
1200 		p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_ACTIVE;
1201 		SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1202 		xdel++;
1203 	}
1204 	if (!(flags & PFR_FLAG_DUMMY))
1205 		pfr_setflags_ktables(&workq);
1206 	if (ndel != NULL)
1207 		*ndel = xdel;
1208 	return (0);
1209 }
1210 
1211 int
pfr_add_tables(struct pfr_table * tbl,int size,int * nadd,int flags)1212 pfr_add_tables(struct pfr_table *tbl, int size, int *nadd, int flags)
1213 {
1214 	struct pfr_ktableworkq	 addq, changeq;
1215 	struct pfr_ktable	*p, *q, *r, key;
1216 	int			 i, rv, xadd = 0;
1217 	time_t			 tzero = time_second;
1218 
1219 	ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
1220 	SLIST_INIT(&addq);
1221 	SLIST_INIT(&changeq);
1222 	for (i = 0; i < size; i++) {
1223 		bcopy(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t));
1224 		if (pfr_validate_table(&key.pfrkt_t, PFR_TFLAG_USRMASK,
1225 		    flags & PFR_FLAG_USERIOCTL))
1226 			senderr(EINVAL);
1227 		key.pfrkt_flags |= PFR_TFLAG_ACTIVE;
1228 		p = RB_FIND(pfr_ktablehead, &V_pfr_ktables, &key);
1229 		if (p == NULL) {
1230 			p = pfr_create_ktable(&key.pfrkt_t, tzero, 1);
1231 			if (p == NULL)
1232 				senderr(ENOMEM);
1233 			SLIST_FOREACH(q, &addq, pfrkt_workq) {
1234 				if (!pfr_ktable_compare(p, q)) {
1235 					pfr_destroy_ktable(p, 0);
1236 					goto _skip;
1237 				}
1238 			}
1239 			SLIST_INSERT_HEAD(&addq, p, pfrkt_workq);
1240 			xadd++;
1241 			if (!key.pfrkt_anchor[0])
1242 				goto _skip;
1243 
1244 			/* find or create root table */
1245 			bzero(key.pfrkt_anchor, sizeof(key.pfrkt_anchor));
1246 			r = RB_FIND(pfr_ktablehead, &V_pfr_ktables, &key);
1247 			if (r != NULL) {
1248 				p->pfrkt_root = r;
1249 				goto _skip;
1250 			}
1251 			SLIST_FOREACH(q, &addq, pfrkt_workq) {
1252 				if (!pfr_ktable_compare(&key, q)) {
1253 					p->pfrkt_root = q;
1254 					goto _skip;
1255 				}
1256 			}
1257 			key.pfrkt_flags = 0;
1258 			r = pfr_create_ktable(&key.pfrkt_t, 0, 1);
1259 			if (r == NULL)
1260 				senderr(ENOMEM);
1261 			SLIST_INSERT_HEAD(&addq, r, pfrkt_workq);
1262 			p->pfrkt_root = r;
1263 		} else if (!(p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1264 			SLIST_FOREACH(q, &changeq, pfrkt_workq)
1265 				if (!pfr_ktable_compare(&key, q))
1266 					goto _skip;
1267 			p->pfrkt_nflags = (p->pfrkt_flags &
1268 			    ~PFR_TFLAG_USRMASK) | key.pfrkt_flags;
1269 			SLIST_INSERT_HEAD(&changeq, p, pfrkt_workq);
1270 			xadd++;
1271 		}
1272 _skip:
1273 	;
1274 	}
1275 	if (!(flags & PFR_FLAG_DUMMY)) {
1276 		pfr_insert_ktables(&addq);
1277 		pfr_setflags_ktables(&changeq);
1278 	} else
1279 		 pfr_destroy_ktables(&addq, 0);
1280 	if (nadd != NULL)
1281 		*nadd = xadd;
1282 	return (0);
1283 _bad:
1284 	pfr_destroy_ktables(&addq, 0);
1285 	return (rv);
1286 }
1287 
1288 int
pfr_del_tables(struct pfr_table * tbl,int size,int * ndel,int flags)1289 pfr_del_tables(struct pfr_table *tbl, int size, int *ndel, int flags)
1290 {
1291 	struct pfr_ktableworkq	 workq;
1292 	struct pfr_ktable	*p, *q, key;
1293 	int			 i, xdel = 0;
1294 
1295 	ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
1296 	SLIST_INIT(&workq);
1297 	for (i = 0; i < size; i++) {
1298 		bcopy(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t));
1299 		if (pfr_validate_table(&key.pfrkt_t, 0,
1300 		    flags & PFR_FLAG_USERIOCTL))
1301 			return (EINVAL);
1302 		p = RB_FIND(pfr_ktablehead, &V_pfr_ktables, &key);
1303 		if (p != NULL && (p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1304 			SLIST_FOREACH(q, &workq, pfrkt_workq)
1305 				if (!pfr_ktable_compare(p, q))
1306 					goto _skip;
1307 			p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_ACTIVE;
1308 			SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1309 			xdel++;
1310 		}
1311 _skip:
1312 	;
1313 	}
1314 
1315 	if (!(flags & PFR_FLAG_DUMMY))
1316 		pfr_setflags_ktables(&workq);
1317 	if (ndel != NULL)
1318 		*ndel = xdel;
1319 	return (0);
1320 }
1321 
1322 int
pfr_get_tables(struct pfr_table * filter,struct pfr_table * tbl,int * size,int flags)1323 pfr_get_tables(struct pfr_table *filter, struct pfr_table *tbl, int *size,
1324 	int flags)
1325 {
1326 	struct pfr_ktable	*p;
1327 	int			 n, nn;
1328 
1329 	PF_RULES_RASSERT();
1330 
1331 	ACCEPT_FLAGS(flags, PFR_FLAG_ALLRSETS);
1332 	if (pfr_fix_anchor(filter->pfrt_anchor))
1333 		return (EINVAL);
1334 	n = nn = pfr_table_count(filter, flags);
1335 	if (n < 0)
1336 		return (ENOENT);
1337 	if (n > *size) {
1338 		*size = n;
1339 		return (0);
1340 	}
1341 	RB_FOREACH(p, pfr_ktablehead, &V_pfr_ktables) {
1342 		if (pfr_skip_table(filter, p, flags))
1343 			continue;
1344 		if (n-- <= 0)
1345 			continue;
1346 		bcopy(&p->pfrkt_t, tbl++, sizeof(*tbl));
1347 	}
1348 
1349 	KASSERT(n == 0, ("%s: corruption detected (%d)", __func__, n));
1350 
1351 	*size = nn;
1352 	return (0);
1353 }
1354 
1355 int
pfr_get_tstats(struct pfr_table * filter,struct pfr_tstats * tbl,int * size,int flags)1356 pfr_get_tstats(struct pfr_table *filter, struct pfr_tstats *tbl, int *size,
1357 	int flags)
1358 {
1359 	struct pfr_ktable	*p;
1360 	struct pfr_ktableworkq	 workq;
1361 	int			 n, nn;
1362 	time_t			 tzero = time_second;
1363 	int			 pfr_dir, pfr_op;
1364 
1365 	/* XXX PFR_FLAG_CLSTATS disabled */
1366 	ACCEPT_FLAGS(flags, PFR_FLAG_ALLRSETS);
1367 	if (pfr_fix_anchor(filter->pfrt_anchor))
1368 		return (EINVAL);
1369 	n = nn = pfr_table_count(filter, flags);
1370 	if (n < 0)
1371 		return (ENOENT);
1372 	if (n > *size) {
1373 		*size = n;
1374 		return (0);
1375 	}
1376 	SLIST_INIT(&workq);
1377 	RB_FOREACH(p, pfr_ktablehead, &V_pfr_ktables) {
1378 		if (pfr_skip_table(filter, p, flags))
1379 			continue;
1380 		if (n-- <= 0)
1381 			continue;
1382 		bcopy(&p->pfrkt_kts.pfrts_t, &tbl->pfrts_t,
1383 		    sizeof(struct pfr_table));
1384 		for (pfr_dir = 0; pfr_dir < PFR_DIR_MAX; pfr_dir ++) {
1385 			for (pfr_op = 0; pfr_op < PFR_OP_TABLE_MAX; pfr_op ++) {
1386 				tbl->pfrts_packets[pfr_dir][pfr_op] =
1387 				    pfr_kstate_counter_fetch(
1388 					&p->pfrkt_packets[pfr_dir][pfr_op]);
1389 				tbl->pfrts_bytes[pfr_dir][pfr_op] =
1390 				    pfr_kstate_counter_fetch(
1391 					&p->pfrkt_bytes[pfr_dir][pfr_op]);
1392 			}
1393 		}
1394 		tbl->pfrts_match = pfr_kstate_counter_fetch(&p->pfrkt_match);
1395 		tbl->pfrts_nomatch = pfr_kstate_counter_fetch(&p->pfrkt_nomatch);
1396 		tbl->pfrts_tzero = p->pfrkt_tzero;
1397 		tbl->pfrts_cnt = p->pfrkt_cnt;
1398 		for (pfr_op = 0; pfr_op < PFR_REFCNT_MAX; pfr_op++)
1399 			tbl->pfrts_refcnt[pfr_op] = p->pfrkt_refcnt[pfr_op];
1400 		tbl++;
1401 		SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1402 	}
1403 	if (flags & PFR_FLAG_CLSTATS)
1404 		pfr_clstats_ktables(&workq, tzero,
1405 		    flags & PFR_FLAG_ADDRSTOO);
1406 
1407 	KASSERT(n == 0, ("%s: corruption detected (%d)", __func__, n));
1408 
1409 	*size = nn;
1410 	return (0);
1411 }
1412 
1413 int
pfr_clr_tstats(struct pfr_table * tbl,int size,int * nzero,int flags)1414 pfr_clr_tstats(struct pfr_table *tbl, int size, int *nzero, int flags)
1415 {
1416 	struct pfr_ktableworkq	 workq;
1417 	struct pfr_ktable	*p, key;
1418 	int			 i, xzero = 0;
1419 	time_t			 tzero = time_second;
1420 
1421 	ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_ADDRSTOO);
1422 	SLIST_INIT(&workq);
1423 	for (i = 0; i < size; i++) {
1424 		bcopy(tbl + i, &key.pfrkt_t, sizeof(key.pfrkt_t));
1425 		if (pfr_validate_table(&key.pfrkt_t, 0, 0))
1426 			return (EINVAL);
1427 		p = RB_FIND(pfr_ktablehead, &V_pfr_ktables, &key);
1428 		if (p != NULL) {
1429 			SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1430 			xzero++;
1431 		}
1432 	}
1433 	if (!(flags & PFR_FLAG_DUMMY))
1434 		pfr_clstats_ktables(&workq, tzero, flags & PFR_FLAG_ADDRSTOO);
1435 	if (nzero != NULL)
1436 		*nzero = xzero;
1437 	return (0);
1438 }
1439 
1440 int
pfr_set_tflags(struct pfr_table * tbl,int size,int setflag,int clrflag,int * nchange,int * ndel,int flags)1441 pfr_set_tflags(struct pfr_table *tbl, int size, int setflag, int clrflag,
1442 	int *nchange, int *ndel, int flags)
1443 {
1444 	struct pfr_ktableworkq	 workq;
1445 	struct pfr_ktable	*p, *q, key;
1446 	int			 i, xchange = 0, xdel = 0;
1447 
1448 	ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
1449 	if ((setflag & ~PFR_TFLAG_USRMASK) ||
1450 	    (clrflag & ~PFR_TFLAG_USRMASK) ||
1451 	    (setflag & clrflag))
1452 		return (EINVAL);
1453 	SLIST_INIT(&workq);
1454 	for (i = 0; i < size; i++) {
1455 		bcopy(tbl + i, &key.pfrkt_t, sizeof(key.pfrkt_t));
1456 		if (pfr_validate_table(&key.pfrkt_t, 0,
1457 		    flags & PFR_FLAG_USERIOCTL))
1458 			return (EINVAL);
1459 		p = RB_FIND(pfr_ktablehead, &V_pfr_ktables, &key);
1460 		if (p != NULL && (p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1461 			p->pfrkt_nflags = (p->pfrkt_flags | setflag) &
1462 			    ~clrflag;
1463 			if (p->pfrkt_nflags == p->pfrkt_flags)
1464 				goto _skip;
1465 			SLIST_FOREACH(q, &workq, pfrkt_workq)
1466 				if (!pfr_ktable_compare(p, q))
1467 					goto _skip;
1468 			SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1469 			if ((p->pfrkt_flags & PFR_TFLAG_PERSIST) &&
1470 			    (clrflag & PFR_TFLAG_PERSIST) &&
1471 			    !(p->pfrkt_flags & PFR_TFLAG_REFERENCED))
1472 				xdel++;
1473 			else
1474 				xchange++;
1475 		}
1476 _skip:
1477 	;
1478 	}
1479 	if (!(flags & PFR_FLAG_DUMMY))
1480 		pfr_setflags_ktables(&workq);
1481 	if (nchange != NULL)
1482 		*nchange = xchange;
1483 	if (ndel != NULL)
1484 		*ndel = xdel;
1485 	return (0);
1486 }
1487 
1488 int
pfr_ina_begin(struct pfr_table * trs,u_int32_t * ticket,int * ndel,int flags)1489 pfr_ina_begin(struct pfr_table *trs, u_int32_t *ticket, int *ndel, int flags)
1490 {
1491 	struct pfr_ktableworkq	 workq;
1492 	struct pfr_ktable	*p;
1493 	struct pf_kruleset	*rs;
1494 	int			 xdel = 0;
1495 
1496 	ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
1497 	rs = pf_find_or_create_kruleset(trs->pfrt_anchor);
1498 	if (rs == NULL)
1499 		return (ENOMEM);
1500 	SLIST_INIT(&workq);
1501 	RB_FOREACH(p, pfr_ktablehead, &V_pfr_ktables) {
1502 		if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
1503 		    pfr_skip_table(trs, p, 0))
1504 			continue;
1505 		p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_INACTIVE;
1506 		SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1507 		xdel++;
1508 	}
1509 	if (!(flags & PFR_FLAG_DUMMY)) {
1510 		pfr_setflags_ktables(&workq);
1511 		if (ticket != NULL)
1512 			*ticket = ++rs->tticket;
1513 		rs->topen = 1;
1514 	} else
1515 		pf_remove_if_empty_kruleset(rs);
1516 	if (ndel != NULL)
1517 		*ndel = xdel;
1518 	return (0);
1519 }
1520 
1521 int
pfr_ina_define(struct pfr_table * tbl,struct pfr_addr * addr,int size,int * nadd,int * naddr,u_int32_t ticket,int flags)1522 pfr_ina_define(struct pfr_table *tbl, struct pfr_addr *addr, int size,
1523     int *nadd, int *naddr, u_int32_t ticket, int flags)
1524 {
1525 	struct pfr_ktableworkq	 tableq;
1526 	struct pfr_kentryworkq	 addrq;
1527 	struct pfr_ktable	*kt, *rt, *shadow, key;
1528 	struct pfr_kentry	*p;
1529 	struct pfr_addr		*ad;
1530 	struct pf_kruleset	*rs;
1531 	int			 i, rv, xadd = 0, xaddr = 0;
1532 
1533 	PF_RULES_WASSERT();
1534 
1535 	ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_ADDRSTOO);
1536 	if (size && !(flags & PFR_FLAG_ADDRSTOO))
1537 		return (EINVAL);
1538 	if (pfr_validate_table(tbl, PFR_TFLAG_USRMASK,
1539 	    flags & PFR_FLAG_USERIOCTL))
1540 		return (EINVAL);
1541 	rs = pf_find_kruleset(tbl->pfrt_anchor);
1542 	if (rs == NULL || !rs->topen || ticket != rs->tticket)
1543 		return (EBUSY);
1544 	tbl->pfrt_flags |= PFR_TFLAG_INACTIVE;
1545 	SLIST_INIT(&tableq);
1546 	kt = RB_FIND(pfr_ktablehead, &V_pfr_ktables, (struct pfr_ktable *)tbl);
1547 	if (kt == NULL) {
1548 		kt = pfr_create_ktable(tbl, 0, 1);
1549 		if (kt == NULL)
1550 			return (ENOMEM);
1551 		SLIST_INSERT_HEAD(&tableq, kt, pfrkt_workq);
1552 		xadd++;
1553 		if (!tbl->pfrt_anchor[0])
1554 			goto _skip;
1555 
1556 		/* find or create root table */
1557 		bzero(&key, sizeof(key));
1558 		strlcpy(key.pfrkt_name, tbl->pfrt_name, sizeof(key.pfrkt_name));
1559 		rt = RB_FIND(pfr_ktablehead, &V_pfr_ktables, &key);
1560 		if (rt != NULL) {
1561 			kt->pfrkt_root = rt;
1562 			goto _skip;
1563 		}
1564 		rt = pfr_create_ktable(&key.pfrkt_t, 0, 1);
1565 		if (rt == NULL) {
1566 			pfr_destroy_ktables(&tableq, 0);
1567 			return (ENOMEM);
1568 		}
1569 		SLIST_INSERT_HEAD(&tableq, rt, pfrkt_workq);
1570 		kt->pfrkt_root = rt;
1571 	} else if (!(kt->pfrkt_flags & PFR_TFLAG_INACTIVE))
1572 		xadd++;
1573 _skip:
1574 	shadow = pfr_create_ktable(tbl, 0, 0);
1575 	if (shadow == NULL) {
1576 		pfr_destroy_ktables(&tableq, 0);
1577 		return (ENOMEM);
1578 	}
1579 	SLIST_INIT(&addrq);
1580 	for (i = 0, ad = addr; i < size; i++, ad++) {
1581 		if (pfr_validate_addr(ad))
1582 			senderr(EINVAL);
1583 		if (pfr_lookup_addr(shadow, ad, 1) != NULL)
1584 			continue;
1585 		p = pfr_create_kentry(ad,
1586 		    (shadow->pfrkt_flags & PFR_TFLAG_COUNTERS) != 0);
1587 		if (p == NULL)
1588 			senderr(ENOMEM);
1589 		if (pfr_route_kentry(shadow, p)) {
1590 			pfr_destroy_kentry(p);
1591 			continue;
1592 		}
1593 		SLIST_INSERT_HEAD(&addrq, p, pfrke_workq);
1594 		xaddr++;
1595 	}
1596 	if (!(flags & PFR_FLAG_DUMMY)) {
1597 		if (kt->pfrkt_shadow != NULL)
1598 			pfr_destroy_ktable(kt->pfrkt_shadow, 1);
1599 		kt->pfrkt_flags |= PFR_TFLAG_INACTIVE;
1600 		pfr_insert_ktables(&tableq);
1601 		shadow->pfrkt_cnt = (flags & PFR_FLAG_ADDRSTOO) ?
1602 		    xaddr : NO_ADDRESSES;
1603 		kt->pfrkt_shadow = shadow;
1604 	} else {
1605 		pfr_clean_node_mask(shadow, &addrq);
1606 		pfr_destroy_ktable(shadow, 0);
1607 		pfr_destroy_ktables(&tableq, 0);
1608 		pfr_destroy_kentries(&addrq);
1609 	}
1610 	if (nadd != NULL)
1611 		*nadd = xadd;
1612 	if (naddr != NULL)
1613 		*naddr = xaddr;
1614 	return (0);
1615 _bad:
1616 	pfr_destroy_ktable(shadow, 0);
1617 	pfr_destroy_ktables(&tableq, 0);
1618 	pfr_destroy_kentries(&addrq);
1619 	return (rv);
1620 }
1621 
1622 int
pfr_ina_rollback(struct pfr_table * trs,u_int32_t ticket,int * ndel,int flags)1623 pfr_ina_rollback(struct pfr_table *trs, u_int32_t ticket, int *ndel, int flags)
1624 {
1625 	struct pfr_ktableworkq	 workq;
1626 	struct pfr_ktable	*p;
1627 	struct pf_kruleset	*rs;
1628 	int			 xdel = 0;
1629 
1630 	PF_RULES_WASSERT();
1631 
1632 	ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
1633 	rs = pf_find_kruleset(trs->pfrt_anchor);
1634 	if (rs == NULL || !rs->topen || ticket != rs->tticket)
1635 		return (0);
1636 	SLIST_INIT(&workq);
1637 	RB_FOREACH(p, pfr_ktablehead, &V_pfr_ktables) {
1638 		if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
1639 		    pfr_skip_table(trs, p, 0))
1640 			continue;
1641 		p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_INACTIVE;
1642 		SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1643 		xdel++;
1644 	}
1645 	if (!(flags & PFR_FLAG_DUMMY)) {
1646 		pfr_setflags_ktables(&workq);
1647 		rs->topen = 0;
1648 		pf_remove_if_empty_kruleset(rs);
1649 	}
1650 	if (ndel != NULL)
1651 		*ndel = xdel;
1652 	return (0);
1653 }
1654 
1655 int
pfr_ina_commit(struct pfr_table * trs,u_int32_t ticket,int * nadd,int * nchange,int flags)1656 pfr_ina_commit(struct pfr_table *trs, u_int32_t ticket, int *nadd,
1657     int *nchange, int flags)
1658 {
1659 	struct pfr_ktable	*p, *q;
1660 	struct pfr_ktableworkq	 workq;
1661 	struct pf_kruleset	*rs;
1662 	int			 xadd = 0, xchange = 0;
1663 	time_t			 tzero = time_second;
1664 
1665 	PF_RULES_WASSERT();
1666 
1667 	ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
1668 	rs = pf_find_kruleset(trs->pfrt_anchor);
1669 	if (rs == NULL || !rs->topen || ticket != rs->tticket)
1670 		return (EBUSY);
1671 
1672 	SLIST_INIT(&workq);
1673 	RB_FOREACH(p, pfr_ktablehead, &V_pfr_ktables) {
1674 		if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
1675 		    pfr_skip_table(trs, p, 0))
1676 			continue;
1677 		SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1678 		if (p->pfrkt_flags & PFR_TFLAG_ACTIVE)
1679 			xchange++;
1680 		else
1681 			xadd++;
1682 	}
1683 
1684 	if (!(flags & PFR_FLAG_DUMMY)) {
1685 		SLIST_FOREACH_SAFE(p, &workq, pfrkt_workq, q) {
1686 			pfr_commit_ktable(p, tzero);
1687 		}
1688 		rs->topen = 0;
1689 		pf_remove_if_empty_kruleset(rs);
1690 	}
1691 	if (nadd != NULL)
1692 		*nadd = xadd;
1693 	if (nchange != NULL)
1694 		*nchange = xchange;
1695 
1696 	return (0);
1697 }
1698 
1699 static void
pfr_commit_ktable(struct pfr_ktable * kt,time_t tzero)1700 pfr_commit_ktable(struct pfr_ktable *kt, time_t tzero)
1701 {
1702 	counter_u64_t		*pkc, *qkc;
1703 	struct pfr_ktable	*shadow = kt->pfrkt_shadow;
1704 	int			 nflags;
1705 
1706 	PF_RULES_WASSERT();
1707 
1708 	if (shadow->pfrkt_cnt == NO_ADDRESSES) {
1709 		if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
1710 			pfr_clstats_ktable(kt, tzero, 1);
1711 	} else if (kt->pfrkt_flags & PFR_TFLAG_ACTIVE) {
1712 		/* kt might contain addresses */
1713 		struct pfr_kentryworkq	 addrq, addq, changeq, delq, garbageq;
1714 		struct pfr_kentry	*p, *q;
1715 		struct pfr_addr		 ad;
1716 
1717 		pfr_enqueue_addrs(shadow, &addrq, NULL, 0);
1718 		pfr_mark_addrs(kt);
1719 		SLIST_INIT(&addq);
1720 		SLIST_INIT(&changeq);
1721 		SLIST_INIT(&delq);
1722 		SLIST_INIT(&garbageq);
1723 		pfr_clean_node_mask(shadow, &addrq);
1724 		while ((p = SLIST_FIRST(&addrq)) != NULL) {
1725 			SLIST_REMOVE_HEAD(&addrq, pfrke_workq);
1726 			pfr_copyout_addr(&ad, p);
1727 			q = pfr_lookup_addr(kt, &ad, 1);
1728 			if (q != NULL) {
1729 				if (q->pfrke_not != p->pfrke_not)
1730 					SLIST_INSERT_HEAD(&changeq, q,
1731 					    pfrke_workq);
1732 				pkc = &p->pfrke_counters.pfrkc_counters;
1733 				qkc = &q->pfrke_counters.pfrkc_counters;
1734 				if ((*pkc == NULL) != (*qkc == NULL))
1735 					SWAP(counter_u64_t, *pkc, *qkc);
1736 				q->pfrke_mark = 1;
1737 				SLIST_INSERT_HEAD(&garbageq, p, pfrke_workq);
1738 			} else {
1739 				p->pfrke_counters.pfrkc_tzero = tzero;
1740 				SLIST_INSERT_HEAD(&addq, p, pfrke_workq);
1741 			}
1742 		}
1743 		pfr_enqueue_addrs(kt, &delq, NULL, ENQUEUE_UNMARKED_ONLY);
1744 		pfr_insert_kentries(kt, &addq, tzero);
1745 		pfr_remove_kentries(kt, &delq);
1746 		pfr_clstats_kentries(kt, &changeq, tzero, INVERT_NEG_FLAG);
1747 		pfr_destroy_kentries(&garbageq);
1748 	} else {
1749 		/* kt cannot contain addresses */
1750 		SWAP(struct radix_node_head *, kt->pfrkt_ip4,
1751 		    shadow->pfrkt_ip4);
1752 		SWAP(struct radix_node_head *, kt->pfrkt_ip6,
1753 		    shadow->pfrkt_ip6);
1754 		SWAP(int, kt->pfrkt_cnt, shadow->pfrkt_cnt);
1755 		pfr_clstats_ktable(kt, tzero, 1);
1756 	}
1757 	nflags = ((shadow->pfrkt_flags & PFR_TFLAG_USRMASK) |
1758 	    (kt->pfrkt_flags & PFR_TFLAG_SETMASK) | PFR_TFLAG_ACTIVE)
1759 		& ~PFR_TFLAG_INACTIVE;
1760 	pfr_destroy_ktable(shadow, 0);
1761 	kt->pfrkt_shadow = NULL;
1762 	pfr_setflags_ktable(kt, nflags);
1763 }
1764 
1765 static int
pfr_validate_table(struct pfr_table * tbl,int allowedflags,int no_reserved)1766 pfr_validate_table(struct pfr_table *tbl, int allowedflags, int no_reserved)
1767 {
1768 	int i;
1769 
1770 	if (!tbl->pfrt_name[0])
1771 		return (-1);
1772 	if (no_reserved && !strcmp(tbl->pfrt_anchor, PF_RESERVED_ANCHOR))
1773 		 return (-1);
1774 	if (tbl->pfrt_name[PF_TABLE_NAME_SIZE-1])
1775 		return (-1);
1776 	for (i = strlen(tbl->pfrt_name); i < PF_TABLE_NAME_SIZE; i++)
1777 		if (tbl->pfrt_name[i])
1778 			return (-1);
1779 	if (pfr_fix_anchor(tbl->pfrt_anchor))
1780 		return (-1);
1781 	if (tbl->pfrt_flags & ~allowedflags)
1782 		return (-1);
1783 	return (0);
1784 }
1785 
1786 /*
1787  * Rewrite anchors referenced by tables to remove slashes
1788  * and check for validity.
1789  */
1790 static int
pfr_fix_anchor(char * anchor)1791 pfr_fix_anchor(char *anchor)
1792 {
1793 	size_t siz = MAXPATHLEN;
1794 	int i;
1795 
1796 	if (anchor[0] == '/') {
1797 		char *path;
1798 		int off;
1799 
1800 		path = anchor;
1801 		off = 1;
1802 		while (*++path == '/')
1803 			off++;
1804 		bcopy(path, anchor, siz - off);
1805 		memset(anchor + siz - off, 0, off);
1806 	}
1807 	if (anchor[siz - 1])
1808 		return (-1);
1809 	for (i = strlen(anchor); i < siz; i++)
1810 		if (anchor[i])
1811 			return (-1);
1812 	return (0);
1813 }
1814 
1815 int
pfr_table_count(struct pfr_table * filter,int flags)1816 pfr_table_count(struct pfr_table *filter, int flags)
1817 {
1818 	struct pf_kruleset *rs;
1819 
1820 	PF_RULES_ASSERT();
1821 
1822 	if (flags & PFR_FLAG_ALLRSETS)
1823 		return (V_pfr_ktable_cnt);
1824 	if (filter->pfrt_anchor[0]) {
1825 		rs = pf_find_kruleset(filter->pfrt_anchor);
1826 		return ((rs != NULL) ? rs->tables : -1);
1827 	}
1828 	return (pf_main_ruleset.tables);
1829 }
1830 
1831 static int
pfr_skip_table(struct pfr_table * filter,struct pfr_ktable * kt,int flags)1832 pfr_skip_table(struct pfr_table *filter, struct pfr_ktable *kt, int flags)
1833 {
1834 	if (flags & PFR_FLAG_ALLRSETS)
1835 		return (0);
1836 	if (strcmp(filter->pfrt_anchor, kt->pfrkt_anchor))
1837 		return (1);
1838 	return (0);
1839 }
1840 
1841 static void
pfr_insert_ktables(struct pfr_ktableworkq * workq)1842 pfr_insert_ktables(struct pfr_ktableworkq *workq)
1843 {
1844 	struct pfr_ktable	*p;
1845 
1846 	SLIST_FOREACH(p, workq, pfrkt_workq)
1847 		pfr_insert_ktable(p);
1848 }
1849 
1850 static void
pfr_insert_ktable(struct pfr_ktable * kt)1851 pfr_insert_ktable(struct pfr_ktable *kt)
1852 {
1853 
1854 	PF_RULES_WASSERT();
1855 
1856 	RB_INSERT(pfr_ktablehead, &V_pfr_ktables, kt);
1857 	V_pfr_ktable_cnt++;
1858 	if (kt->pfrkt_root != NULL)
1859 		if (!kt->pfrkt_root->pfrkt_refcnt[PFR_REFCNT_ANCHOR]++)
1860 			pfr_setflags_ktable(kt->pfrkt_root,
1861 			    kt->pfrkt_root->pfrkt_flags|PFR_TFLAG_REFDANCHOR);
1862 }
1863 
1864 static void
pfr_setflags_ktables(struct pfr_ktableworkq * workq)1865 pfr_setflags_ktables(struct pfr_ktableworkq *workq)
1866 {
1867 	struct pfr_ktable	*p, *q;
1868 
1869 	SLIST_FOREACH_SAFE(p, workq, pfrkt_workq, q) {
1870 		pfr_setflags_ktable(p, p->pfrkt_nflags);
1871 	}
1872 }
1873 
1874 static void
pfr_setflags_ktable(struct pfr_ktable * kt,int newf)1875 pfr_setflags_ktable(struct pfr_ktable *kt, int newf)
1876 {
1877 	struct pfr_kentryworkq	addrq;
1878 	struct pfr_walktree	w;
1879 
1880 	PF_RULES_WASSERT();
1881 
1882 	if (!(newf & PFR_TFLAG_REFERENCED) &&
1883 	    !(newf & PFR_TFLAG_REFDANCHOR) &&
1884 	    !(newf & PFR_TFLAG_PERSIST))
1885 		newf &= ~PFR_TFLAG_ACTIVE;
1886 	if (!(newf & PFR_TFLAG_ACTIVE))
1887 		newf &= ~PFR_TFLAG_USRMASK;
1888 	if (!(newf & PFR_TFLAG_SETMASK)) {
1889 		RB_REMOVE(pfr_ktablehead, &V_pfr_ktables, kt);
1890 		if (kt->pfrkt_root != NULL)
1891 			if (!--kt->pfrkt_root->pfrkt_refcnt[PFR_REFCNT_ANCHOR])
1892 				pfr_setflags_ktable(kt->pfrkt_root,
1893 				    kt->pfrkt_root->pfrkt_flags &
1894 					~PFR_TFLAG_REFDANCHOR);
1895 		pfr_destroy_ktable(kt, 1);
1896 		V_pfr_ktable_cnt--;
1897 		return;
1898 	}
1899 	if (newf & PFR_TFLAG_COUNTERS && ! (kt->pfrkt_flags & PFR_TFLAG_COUNTERS)) {
1900 		bzero(&w, sizeof(w));
1901 		w.pfrw_op = PFRW_COUNTERS;
1902 		w.pfrw_flags |= PFR_TFLAG_COUNTERS;
1903 		kt->pfrkt_ip4->rnh_walktree(&kt->pfrkt_ip4->rh, pfr_walktree, &w);
1904 		kt->pfrkt_ip6->rnh_walktree(&kt->pfrkt_ip6->rh, pfr_walktree, &w);
1905 	}
1906 	if (! (newf & PFR_TFLAG_COUNTERS) && (kt->pfrkt_flags & PFR_TFLAG_COUNTERS)) {
1907 		bzero(&w, sizeof(w));
1908 		w.pfrw_op = PFRW_COUNTERS;
1909 		w.pfrw_flags |= 0;
1910 		kt->pfrkt_ip4->rnh_walktree(&kt->pfrkt_ip4->rh, pfr_walktree, &w);
1911 		kt->pfrkt_ip6->rnh_walktree(&kt->pfrkt_ip6->rh, pfr_walktree, &w);
1912 	}
1913 	if (!(newf & PFR_TFLAG_ACTIVE) && kt->pfrkt_cnt) {
1914 		pfr_enqueue_addrs(kt, &addrq, NULL, 0);
1915 		pfr_remove_kentries(kt, &addrq);
1916 	}
1917 	if (!(newf & PFR_TFLAG_INACTIVE) && kt->pfrkt_shadow != NULL) {
1918 		pfr_destroy_ktable(kt->pfrkt_shadow, 1);
1919 		kt->pfrkt_shadow = NULL;
1920 	}
1921 	kt->pfrkt_flags = newf;
1922 }
1923 
1924 static void
pfr_clstats_ktables(struct pfr_ktableworkq * workq,time_t tzero,int recurse)1925 pfr_clstats_ktables(struct pfr_ktableworkq *workq, time_t tzero, int recurse)
1926 {
1927 	struct pfr_ktable	*p;
1928 
1929 	SLIST_FOREACH(p, workq, pfrkt_workq)
1930 		pfr_clstats_ktable(p, tzero, recurse);
1931 }
1932 
1933 static void
pfr_clstats_ktable(struct pfr_ktable * kt,time_t tzero,int recurse)1934 pfr_clstats_ktable(struct pfr_ktable *kt, time_t tzero, int recurse)
1935 {
1936 	struct pfr_kentryworkq	 addrq;
1937 	int			 pfr_dir, pfr_op;
1938 
1939 	MPASS(PF_TABLE_STATS_OWNED() || PF_RULES_WOWNED());
1940 
1941 	if (recurse) {
1942 		pfr_enqueue_addrs(kt, &addrq, NULL, 0);
1943 		pfr_clstats_kentries(kt, &addrq, tzero, 0);
1944 	}
1945 	for (pfr_dir = 0; pfr_dir < PFR_DIR_MAX; pfr_dir ++) {
1946 		for (pfr_op = 0; pfr_op < PFR_OP_TABLE_MAX; pfr_op ++) {
1947 			pfr_kstate_counter_zero(&kt->pfrkt_packets[pfr_dir][pfr_op]);
1948 			pfr_kstate_counter_zero(&kt->pfrkt_bytes[pfr_dir][pfr_op]);
1949 		}
1950 	}
1951 	pfr_kstate_counter_zero(&kt->pfrkt_match);
1952 	pfr_kstate_counter_zero(&kt->pfrkt_nomatch);
1953 	kt->pfrkt_tzero = tzero;
1954 }
1955 
1956 static struct pfr_ktable *
pfr_create_ktable(struct pfr_table * tbl,time_t tzero,int attachruleset)1957 pfr_create_ktable(struct pfr_table *tbl, time_t tzero, int attachruleset)
1958 {
1959 	struct pfr_ktable	*kt;
1960 	struct pf_kruleset	*rs;
1961 	int			 pfr_dir, pfr_op;
1962 
1963 	PF_RULES_WASSERT();
1964 
1965 	kt = malloc(sizeof(*kt), M_PFTABLE, M_NOWAIT|M_ZERO);
1966 	if (kt == NULL)
1967 		return (NULL);
1968 	kt->pfrkt_t = *tbl;
1969 
1970 	if (attachruleset) {
1971 		rs = pf_find_or_create_kruleset(tbl->pfrt_anchor);
1972 		if (!rs) {
1973 			pfr_destroy_ktable(kt, 0);
1974 			return (NULL);
1975 		}
1976 		kt->pfrkt_rs = rs;
1977 		rs->tables++;
1978 	}
1979 
1980 	for (pfr_dir = 0; pfr_dir < PFR_DIR_MAX; pfr_dir ++) {
1981 		for (pfr_op = 0; pfr_op < PFR_OP_TABLE_MAX; pfr_op ++) {
1982 			if (pfr_kstate_counter_init(
1983 			    &kt->pfrkt_packets[pfr_dir][pfr_op], M_NOWAIT) != 0) {
1984 				pfr_destroy_ktable(kt, 0);
1985 				return (NULL);
1986 			}
1987 			if (pfr_kstate_counter_init(
1988 			    &kt->pfrkt_bytes[pfr_dir][pfr_op], M_NOWAIT) != 0) {
1989 				pfr_destroy_ktable(kt, 0);
1990 				return (NULL);
1991 			}
1992 		}
1993 	}
1994 	if (pfr_kstate_counter_init(&kt->pfrkt_match, M_NOWAIT) != 0) {
1995 		pfr_destroy_ktable(kt, 0);
1996 		return (NULL);
1997 	}
1998 
1999 	if (pfr_kstate_counter_init(&kt->pfrkt_nomatch, M_NOWAIT) != 0) {
2000 		pfr_destroy_ktable(kt, 0);
2001 		return (NULL);
2002 	}
2003 
2004 	if (!rn_inithead((void **)&kt->pfrkt_ip4,
2005 	    offsetof(struct sockaddr_in, sin_addr) * 8) ||
2006 	    !rn_inithead((void **)&kt->pfrkt_ip6,
2007 	    offsetof(struct sockaddr_in6, sin6_addr) * 8)) {
2008 		pfr_destroy_ktable(kt, 0);
2009 		return (NULL);
2010 	}
2011 	kt->pfrkt_tzero = tzero;
2012 
2013 	return (kt);
2014 }
2015 
2016 static void
pfr_destroy_ktables(struct pfr_ktableworkq * workq,int flushaddr)2017 pfr_destroy_ktables(struct pfr_ktableworkq *workq, int flushaddr)
2018 {
2019 	struct pfr_ktable	*p;
2020 
2021 	while ((p = SLIST_FIRST(workq)) != NULL) {
2022 		SLIST_REMOVE_HEAD(workq, pfrkt_workq);
2023 		pfr_destroy_ktable(p, flushaddr);
2024 	}
2025 }
2026 
2027 static void
pfr_destroy_ktable(struct pfr_ktable * kt,int flushaddr)2028 pfr_destroy_ktable(struct pfr_ktable *kt, int flushaddr)
2029 {
2030 	struct pfr_kentryworkq	 addrq;
2031 	int			 pfr_dir, pfr_op;
2032 
2033 	if (flushaddr) {
2034 		pfr_enqueue_addrs(kt, &addrq, NULL, 0);
2035 		pfr_clean_node_mask(kt, &addrq);
2036 		pfr_destroy_kentries(&addrq);
2037 	}
2038 	if (kt->pfrkt_ip4 != NULL)
2039 		rn_detachhead((void **)&kt->pfrkt_ip4);
2040 	if (kt->pfrkt_ip6 != NULL)
2041 		rn_detachhead((void **)&kt->pfrkt_ip6);
2042 	if (kt->pfrkt_shadow != NULL)
2043 		pfr_destroy_ktable(kt->pfrkt_shadow, flushaddr);
2044 	if (kt->pfrkt_rs != NULL) {
2045 		kt->pfrkt_rs->tables--;
2046 		pf_remove_if_empty_kruleset(kt->pfrkt_rs);
2047 	}
2048 	for (pfr_dir = 0; pfr_dir < PFR_DIR_MAX; pfr_dir ++) {
2049 		for (pfr_op = 0; pfr_op < PFR_OP_TABLE_MAX; pfr_op ++) {
2050 			pfr_kstate_counter_deinit(&kt->pfrkt_packets[pfr_dir][pfr_op]);
2051 			pfr_kstate_counter_deinit(&kt->pfrkt_bytes[pfr_dir][pfr_op]);
2052 		}
2053 	}
2054 	pfr_kstate_counter_deinit(&kt->pfrkt_match);
2055 	pfr_kstate_counter_deinit(&kt->pfrkt_nomatch);
2056 
2057 	free(kt, M_PFTABLE);
2058 }
2059 
2060 static int
pfr_ktable_compare(struct pfr_ktable * p,struct pfr_ktable * q)2061 pfr_ktable_compare(struct pfr_ktable *p, struct pfr_ktable *q)
2062 {
2063 	int d;
2064 
2065 	if ((d = strncmp(p->pfrkt_name, q->pfrkt_name, PF_TABLE_NAME_SIZE)))
2066 		return (d);
2067 	return (strcmp(p->pfrkt_anchor, q->pfrkt_anchor));
2068 }
2069 
2070 static struct pfr_ktable *
pfr_lookup_table(struct pfr_table * tbl)2071 pfr_lookup_table(struct pfr_table *tbl)
2072 {
2073 	/* struct pfr_ktable start like a struct pfr_table */
2074 	return (RB_FIND(pfr_ktablehead, &V_pfr_ktables,
2075 	    (struct pfr_ktable *)tbl));
2076 }
2077 
2078 struct pfr_kentry *
pfr_kentry_byaddr(struct pfr_ktable * kt,struct pf_addr * a,sa_family_t af,int exact)2079 pfr_kentry_byaddr(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af,
2080     int exact)
2081 {
2082 	struct pfr_kentry	*ke = NULL;
2083 
2084 	PF_RULES_RASSERT();
2085 
2086 	kt = pfr_ktable_select_active(kt);
2087 	if (kt == NULL)
2088 		return (0);
2089 
2090 	switch (af) {
2091 #ifdef INET
2092 	case AF_INET:
2093 	    {
2094 		struct sockaddr_in sin;
2095 
2096 		bzero(&sin, sizeof(sin));
2097 		sin.sin_len = sizeof(sin);
2098 		sin.sin_family = AF_INET;
2099 		sin.sin_addr.s_addr = a->addr32[0];
2100 		ke = (struct pfr_kentry *)rn_match(&sin, &kt->pfrkt_ip4->rh);
2101 		if (ke && KENTRY_RNF_ROOT(ke))
2102 			ke = NULL;
2103 		break;
2104 	    }
2105 #endif /* INET */
2106 #ifdef INET6
2107 	case AF_INET6:
2108 	    {
2109 		struct sockaddr_in6 sin6;
2110 
2111 		bzero(&sin6, sizeof(sin6));
2112 		sin6.sin6_len = sizeof(sin6);
2113 		sin6.sin6_family = AF_INET6;
2114 		bcopy(a, &sin6.sin6_addr, sizeof(sin6.sin6_addr));
2115 		ke = (struct pfr_kentry *)rn_match(&sin6, &kt->pfrkt_ip6->rh);
2116 		if (ke && KENTRY_RNF_ROOT(ke))
2117 			ke = NULL;
2118 		break;
2119 	    }
2120 #endif /* INET6 */
2121 	default:
2122 		unhandled_af(af);
2123 	}
2124 	if (exact && ke && KENTRY_NETWORK(ke))
2125 		ke = NULL;
2126 
2127 	return (ke);
2128 }
2129 
2130 int
pfr_match_addr(struct pfr_ktable * kt,struct pf_addr * a,sa_family_t af)2131 pfr_match_addr(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af)
2132 {
2133 	struct pfr_kentry	*ke = NULL;
2134 	int match;
2135 
2136 	ke = pfr_kentry_byaddr(kt, a, af, 0);
2137 
2138 	match = (ke && !ke->pfrke_not);
2139 	if (match)
2140 		pfr_kstate_counter_add(&kt->pfrkt_match, 1);
2141 	else
2142 		pfr_kstate_counter_add(&kt->pfrkt_nomatch, 1);
2143 
2144 	return (match);
2145 }
2146 
2147 void
pfr_update_stats(struct pfr_ktable * kt,struct pf_addr * a,sa_family_t af,u_int64_t len,int dir_out,int op_pass,int notrule)2148 pfr_update_stats(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af,
2149     u_int64_t len, int dir_out, int op_pass, int notrule)
2150 {
2151 	struct pfr_kentry	*ke = NULL;
2152 
2153 	kt = pfr_ktable_select_active(kt);
2154 	if (kt == NULL)
2155 		return;
2156 
2157 	switch (af) {
2158 #ifdef INET
2159 	case AF_INET:
2160 	    {
2161 		struct sockaddr_in sin;
2162 
2163 		bzero(&sin, sizeof(sin));
2164 		sin.sin_len = sizeof(sin);
2165 		sin.sin_family = AF_INET;
2166 		sin.sin_addr.s_addr = a->addr32[0];
2167 		ke = (struct pfr_kentry *)rn_match(&sin, &kt->pfrkt_ip4->rh);
2168 		if (ke && KENTRY_RNF_ROOT(ke))
2169 			ke = NULL;
2170 		break;
2171 	    }
2172 #endif /* INET */
2173 #ifdef INET6
2174 	case AF_INET6:
2175 	    {
2176 		struct sockaddr_in6 sin6;
2177 
2178 		bzero(&sin6, sizeof(sin6));
2179 		sin6.sin6_len = sizeof(sin6);
2180 		sin6.sin6_family = AF_INET6;
2181 		bcopy(a, &sin6.sin6_addr, sizeof(sin6.sin6_addr));
2182 		ke = (struct pfr_kentry *)rn_match(&sin6, &kt->pfrkt_ip6->rh);
2183 		if (ke && KENTRY_RNF_ROOT(ke))
2184 			ke = NULL;
2185 		break;
2186 	    }
2187 #endif /* INET6 */
2188 	default:
2189 		unhandled_af(af);
2190 	}
2191 	if ((ke == NULL || ke->pfrke_not) != notrule) {
2192 		if (op_pass != PFR_OP_PASS)
2193 			DPFPRINTF(PF_DEBUG_URGENT,
2194 			    "pfr_update_stats: assertion failed.");
2195 		op_pass = PFR_OP_XPASS;
2196 	}
2197 	pfr_kstate_counter_add(&kt->pfrkt_packets[dir_out][op_pass], 1);
2198 	pfr_kstate_counter_add(&kt->pfrkt_bytes[dir_out][op_pass], len);
2199 	if (ke != NULL && op_pass != PFR_OP_XPASS &&
2200 	    (kt->pfrkt_flags & PFR_TFLAG_COUNTERS)) {
2201 		counter_u64_add(pfr_kentry_counter(&ke->pfrke_counters,
2202 		    dir_out, op_pass, PFR_TYPE_PACKETS), 1);
2203 		counter_u64_add(pfr_kentry_counter(&ke->pfrke_counters,
2204 		    dir_out, op_pass, PFR_TYPE_BYTES), len);
2205 	}
2206 }
2207 
2208 struct pfr_ktable *
pfr_eth_attach_table(struct pf_keth_ruleset * rs,char * name)2209 pfr_eth_attach_table(struct pf_keth_ruleset *rs, char *name)
2210 {
2211 	struct pfr_ktable	*kt, *rt;
2212 	struct pfr_table	 tbl;
2213 	struct pf_keth_anchor	*ac = rs->anchor;
2214 
2215 	PF_RULES_WASSERT();
2216 
2217 	bzero(&tbl, sizeof(tbl));
2218 	strlcpy(tbl.pfrt_name, name, sizeof(tbl.pfrt_name));
2219 	if (ac != NULL)
2220 		strlcpy(tbl.pfrt_anchor, ac->path, sizeof(tbl.pfrt_anchor));
2221 	kt = pfr_lookup_table(&tbl);
2222 	if (kt == NULL) {
2223 		kt = pfr_create_ktable(&tbl, time_second, 1);
2224 		if (kt == NULL)
2225 			return (NULL);
2226 		if (ac != NULL) {
2227 			bzero(tbl.pfrt_anchor, sizeof(tbl.pfrt_anchor));
2228 			rt = pfr_lookup_table(&tbl);
2229 			if (rt == NULL) {
2230 				rt = pfr_create_ktable(&tbl, 0, 1);
2231 				if (rt == NULL) {
2232 					pfr_destroy_ktable(kt, 0);
2233 					return (NULL);
2234 				}
2235 				pfr_insert_ktable(rt);
2236 			}
2237 			kt->pfrkt_root = rt;
2238 		}
2239 		pfr_insert_ktable(kt);
2240 	}
2241 	if (!kt->pfrkt_refcnt[PFR_REFCNT_RULE]++)
2242 		pfr_setflags_ktable(kt, kt->pfrkt_flags|PFR_TFLAG_REFERENCED);
2243 	return (kt);
2244 }
2245 
2246 struct pfr_ktable *
pfr_attach_table(struct pf_kruleset * rs,char * name)2247 pfr_attach_table(struct pf_kruleset *rs, char *name)
2248 {
2249 	struct pfr_ktable	*kt, *rt;
2250 	struct pfr_table	 tbl;
2251 	struct pf_kanchor	*ac = rs->anchor;
2252 
2253 	PF_RULES_WASSERT();
2254 
2255 	bzero(&tbl, sizeof(tbl));
2256 	strlcpy(tbl.pfrt_name, name, sizeof(tbl.pfrt_name));
2257 	if (ac != NULL)
2258 		strlcpy(tbl.pfrt_anchor, ac->path, sizeof(tbl.pfrt_anchor));
2259 	kt = pfr_lookup_table(&tbl);
2260 	if (kt == NULL) {
2261 		kt = pfr_create_ktable(&tbl, time_second, 1);
2262 		if (kt == NULL)
2263 			return (NULL);
2264 		if (ac != NULL) {
2265 			bzero(tbl.pfrt_anchor, sizeof(tbl.pfrt_anchor));
2266 			rt = pfr_lookup_table(&tbl);
2267 			if (rt == NULL) {
2268 				rt = pfr_create_ktable(&tbl, 0, 1);
2269 				if (rt == NULL) {
2270 					pfr_destroy_ktable(kt, 0);
2271 					return (NULL);
2272 				}
2273 				pfr_insert_ktable(rt);
2274 			}
2275 			kt->pfrkt_root = rt;
2276 		}
2277 		pfr_insert_ktable(kt);
2278 	}
2279 	if (!kt->pfrkt_refcnt[PFR_REFCNT_RULE]++)
2280 		pfr_setflags_ktable(kt, kt->pfrkt_flags|PFR_TFLAG_REFERENCED);
2281 	return (kt);
2282 }
2283 
2284 void
pfr_detach_table(struct pfr_ktable * kt)2285 pfr_detach_table(struct pfr_ktable *kt)
2286 {
2287 
2288 	PF_RULES_WASSERT();
2289 	KASSERT(kt->pfrkt_refcnt[PFR_REFCNT_RULE] > 0, ("%s: refcount %d\n",
2290 	    __func__, kt->pfrkt_refcnt[PFR_REFCNT_RULE]));
2291 
2292 	if (!--kt->pfrkt_refcnt[PFR_REFCNT_RULE])
2293 		pfr_setflags_ktable(kt, kt->pfrkt_flags&~PFR_TFLAG_REFERENCED);
2294 }
2295 
2296 int
pfr_pool_get(struct pfr_ktable * kt,int * pidx,struct pf_addr * counter,sa_family_t af,pf_addr_filter_func_t filter,bool loop_once)2297 pfr_pool_get(struct pfr_ktable *kt, int *pidx, struct pf_addr *counter,
2298     sa_family_t af, pf_addr_filter_func_t filter, bool loop_once)
2299 {
2300 	struct pf_addr		*addr, cur, mask, umask_addr;
2301 	union sockaddr_union	 uaddr, umask;
2302 	struct pfr_kentry	*ke, *ke2 = NULL;
2303 	int			 startidx, idx = -1, loop = 0, use_counter = 0;
2304 
2305 	MPASS(pidx != NULL);
2306 	MPASS(counter != NULL);
2307 
2308 	switch (af) {
2309 	case AF_INET:
2310 		uaddr.sin.sin_len = sizeof(struct sockaddr_in);
2311 		uaddr.sin.sin_family = AF_INET;
2312 		addr = (struct pf_addr *)&uaddr.sin.sin_addr;
2313 		break;
2314 	case AF_INET6:
2315 		uaddr.sin6.sin6_len = sizeof(struct sockaddr_in6);
2316 		uaddr.sin6.sin6_family = AF_INET6;
2317 		addr = (struct pf_addr *)&uaddr.sin6.sin6_addr;
2318 		break;
2319 	default:
2320 		unhandled_af(af);
2321 	}
2322 
2323 	kt = pfr_ktable_select_active(kt);
2324 	if (kt == NULL)
2325 		return (-1);
2326 
2327 	idx = *pidx;
2328 	if (idx < 0 || idx >= kt->pfrkt_cnt)
2329 		idx = 0;
2330 	else if (counter != NULL)
2331 		use_counter = 1;
2332 	startidx = idx;
2333 
2334 _next_block:
2335 	if (loop && startidx == idx) {
2336 		pfr_kstate_counter_add(&kt->pfrkt_nomatch, 1);
2337 		return (1);
2338 	}
2339 
2340 	ke = pfr_kentry_byidx(kt, idx, af);
2341 	if (ke == NULL) {
2342 		/* we don't have this idx, try looping */
2343 		if ((loop || loop_once) || (ke = pfr_kentry_byidx(kt, 0, af)) == NULL) {
2344 			pfr_kstate_counter_add(&kt->pfrkt_nomatch, 1);
2345 			return (1);
2346 		}
2347 		idx = 0;
2348 		loop++;
2349 	}
2350 	pfr_prepare_network(&umask, af, ke->pfrke_net);
2351 	pfr_sockaddr_to_pf_addr(&ke->pfrke_sa, &cur);
2352 	pfr_sockaddr_to_pf_addr(&umask, &mask);
2353 
2354 	if (use_counter && !PF_AZERO(counter, af)) {
2355 		/* is supplied address within block? */
2356 		if (!pf_match_addr(0, &cur, &mask, counter, af)) {
2357 			/* no, go to next block in table */
2358 			idx++;
2359 			use_counter = 0;
2360 			goto _next_block;
2361 		}
2362 		pf_addrcpy(addr, counter, af);
2363 	} else {
2364 		/* use first address of block */
2365 		pf_addrcpy(addr, &cur, af);
2366 	}
2367 
2368 	if (!KENTRY_NETWORK(ke)) {
2369 		/* this is a single IP address - no possible nested block */
2370 		if (filter && filter(af, addr)) {
2371 			idx++;
2372 			goto _next_block;
2373 		}
2374 		pf_addrcpy(counter, addr, af);
2375 		*pidx = idx;
2376 		pfr_kstate_counter_add(&kt->pfrkt_match, 1);
2377 		return (0);
2378 	}
2379 	for (;;) {
2380 		/* we don't want to use a nested block */
2381 		switch (af) {
2382 		case AF_INET:
2383 			ke2 = (struct pfr_kentry *)rn_match(&uaddr,
2384 			    &kt->pfrkt_ip4->rh);
2385 			break;
2386 		case AF_INET6:
2387 			ke2 = (struct pfr_kentry *)rn_match(&uaddr,
2388 			    &kt->pfrkt_ip6->rh);
2389 			break;
2390 		default:
2391 			unhandled_af(af);
2392 		}
2393 		/* no need to check KENTRY_RNF_ROOT() here */
2394 		if (ke2 == ke) {
2395 			/* lookup return the same block - perfect */
2396 			if (filter && filter(af, addr))
2397 				goto _next_entry;
2398 			pf_addrcpy(counter, addr, af);
2399 			*pidx = idx;
2400 			pfr_kstate_counter_add(&kt->pfrkt_match, 1);
2401 			return (0);
2402 		}
2403 
2404 _next_entry:
2405 		/* we need to increase the counter past the nested block */
2406 		pfr_prepare_network(&umask, AF_INET, ke2->pfrke_net);
2407 		pfr_sockaddr_to_pf_addr(&umask, &umask_addr);
2408 		pf_poolmask(addr, addr, &umask_addr, &pfr_ffaddr, af);
2409 		pf_addr_inc(addr, af);
2410 		if (!pf_match_addr(0, &cur, &mask, addr, af)) {
2411 			/* ok, we reached the end of our main block */
2412 			/* go to next block in table */
2413 			idx++;
2414 			use_counter = 0;
2415 			goto _next_block;
2416 		}
2417 	}
2418 }
2419 
2420 static struct pfr_kentry *
pfr_kentry_byidx(struct pfr_ktable * kt,int idx,int af)2421 pfr_kentry_byidx(struct pfr_ktable *kt, int idx, int af)
2422 {
2423 	struct pfr_walktree	w;
2424 
2425 	bzero(&w, sizeof(w));
2426 	w.pfrw_op = PFRW_POOL_GET;
2427 	w.pfrw_free = idx;
2428 
2429 	switch (af) {
2430 #ifdef INET
2431 	case AF_INET:
2432 		kt->pfrkt_ip4->rnh_walktree(&kt->pfrkt_ip4->rh, pfr_walktree, &w);
2433 		return (w.pfrw_kentry);
2434 #endif /* INET */
2435 #ifdef INET6
2436 	case AF_INET6:
2437 		kt->pfrkt_ip6->rnh_walktree(&kt->pfrkt_ip6->rh, pfr_walktree, &w);
2438 		return (w.pfrw_kentry);
2439 #endif /* INET6 */
2440 	default:
2441 		return (NULL);
2442 	}
2443 }
2444 
2445 void
pfr_dynaddr_update(struct pfr_ktable * kt,struct pfi_dynaddr * dyn)2446 pfr_dynaddr_update(struct pfr_ktable *kt, struct pfi_dynaddr *dyn)
2447 {
2448 	struct pfr_walktree	w;
2449 
2450 	bzero(&w, sizeof(w));
2451 	w.pfrw_op = PFRW_DYNADDR_UPDATE;
2452 	w.pfrw_dyn = dyn;
2453 
2454 	dyn->pfid_acnt4 = 0;
2455 	dyn->pfid_acnt6 = 0;
2456 	switch (dyn->pfid_af) {
2457 	case AF_UNSPEC: /* look up all both addresses IPv4 + IPv6 */
2458 		kt->pfrkt_ip4->rnh_walktree(&kt->pfrkt_ip4->rh, pfr_walktree, &w);
2459 		kt->pfrkt_ip6->rnh_walktree(&kt->pfrkt_ip6->rh, pfr_walktree, &w);
2460 		break;
2461 	case AF_INET:
2462 		kt->pfrkt_ip4->rnh_walktree(&kt->pfrkt_ip4->rh, pfr_walktree, &w);
2463 		break;
2464 	case AF_INET6:
2465 		kt->pfrkt_ip6->rnh_walktree(&kt->pfrkt_ip6->rh, pfr_walktree, &w);
2466 		break;
2467 	default:
2468 		unhandled_af(dyn->pfid_af);
2469 	}
2470 }
2471 
2472 struct pfr_ktable *
pfr_ktable_select_active(struct pfr_ktable * kt)2473 pfr_ktable_select_active(struct pfr_ktable *kt)
2474 {
2475 	if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
2476 		kt = kt->pfrkt_root;
2477 	if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
2478 		return (NULL);
2479 
2480 	return (kt);
2481 }
2482