xref: /freebsd/sys/netpfil/pf/pf_table.c (revision abda72f3a1f60121dbe1341f3aadfb714cdee1fb)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2002 Cedric Berger
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  *    - Redistributions of source code must retain the above copyright
12  *      notice, this list of conditions and the following disclaimer.
13  *    - Redistributions in binary form must reproduce the above
14  *      copyright notice, this list of conditions and the following
15  *      disclaimer in the documentation and/or other materials provided
16  *      with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
21  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
22  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
23  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
24  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
25  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
28  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  *
31  *	$OpenBSD: pf_table.c,v 1.79 2008/10/08 06:24:50 mcbride Exp $
32  */
33 
34 #include <sys/cdefs.h>
35 #include "opt_inet.h"
36 #include "opt_inet6.h"
37 
38 #include <sys/param.h>
39 #include <sys/kernel.h>
40 #include <sys/lock.h>
41 #include <sys/malloc.h>
42 #include <sys/mbuf.h>
43 #include <sys/mutex.h>
44 #include <sys/refcount.h>
45 #include <sys/socket.h>
46 #include <vm/uma.h>
47 
48 #include <net/if.h>
49 #include <net/vnet.h>
50 #include <net/pfvar.h>
51 
52 #define DPFPRINTF(n, x) if (V_pf_status.debug >= (n)) printf x
53 
54 #define	ACCEPT_FLAGS(flags, oklist)		\
55 	do {					\
56 		if ((flags & ~(oklist)) &	\
57 		    PFR_FLAG_ALLMASK)		\
58 			return (EINVAL);	\
59 	} while (0)
60 
61 #define	FILLIN_SIN(sin, addr)			\
62 	do {					\
63 		(sin).sin_len = sizeof(sin);	\
64 		(sin).sin_family = AF_INET;	\
65 		(sin).sin_addr = (addr);	\
66 	} while (0)
67 
68 #define	FILLIN_SIN6(sin6, addr)			\
69 	do {					\
70 		(sin6).sin6_len = sizeof(sin6);	\
71 		(sin6).sin6_family = AF_INET6;	\
72 		(sin6).sin6_addr = (addr);	\
73 	} while (0)
74 
75 #define	SWAP(type, a1, a2)			\
76 	do {					\
77 		type tmp = a1;			\
78 		a1 = a2;			\
79 		a2 = tmp;			\
80 	} while (0)
81 
82 #define	AF_BITS(af)		(((af)==AF_INET)?32:128)
83 #define	ADDR_NETWORK(ad)	((ad)->pfra_net < AF_BITS((ad)->pfra_af))
84 #define	KENTRY_NETWORK(ke)	((ke)->pfrke_net < AF_BITS((ke)->pfrke_af))
85 #define	KENTRY_RNF_ROOT(ke) \
86 		((((struct radix_node *)(ke))->rn_flags & RNF_ROOT) != 0)
87 
88 #define	NO_ADDRESSES		(-1)
89 #define	ENQUEUE_UNMARKED_ONLY	(1)
90 #define	INVERT_NEG_FLAG		(1)
91 
92 struct pfr_walktree {
93 	enum pfrw_op {
94 		PFRW_MARK,
95 		PFRW_SWEEP,
96 		PFRW_ENQUEUE,
97 		PFRW_GET_ADDRS,
98 		PFRW_GET_ASTATS,
99 		PFRW_POOL_GET,
100 		PFRW_DYNADDR_UPDATE,
101 		PFRW_COUNTERS
102 	}	 pfrw_op;
103 	union {
104 		struct pfr_addr		*pfrw_addr;
105 		struct pfr_astats	*pfrw_astats;
106 		struct pfr_kentryworkq	*pfrw_workq;
107 		struct pfr_kentry	*pfrw_kentry;
108 		struct pfi_dynaddr	*pfrw_dyn;
109 	};
110 	int	 pfrw_free;
111 	int	 pfrw_flags;
112 };
113 
114 #define	senderr(e)	do { rv = (e); goto _bad; } while (0)
115 
116 static MALLOC_DEFINE(M_PFTABLE, "pf_table", "pf(4) tables structures");
117 VNET_DEFINE_STATIC(uma_zone_t, pfr_kentry_z);
118 #define	V_pfr_kentry_z		VNET(pfr_kentry_z)
119 VNET_DEFINE_STATIC(uma_zone_t, pfr_kentry_counter_z);
120 #define	V_pfr_kentry_counter_z	VNET(pfr_kentry_counter_z)
121 
122 static struct pf_addr	 pfr_ffaddr = {
123 	.addr32 = { 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff }
124 };
125 
126 static void		 pfr_copyout_astats(struct pfr_astats *,
127 			    const struct pfr_kentry *,
128 			    const struct pfr_walktree *);
129 static void		 pfr_copyout_addr(struct pfr_addr *,
130 			    const struct pfr_kentry *ke);
131 static int		 pfr_validate_addr(struct pfr_addr *);
132 static void		 pfr_enqueue_addrs(struct pfr_ktable *,
133 			    struct pfr_kentryworkq *, int *, int);
134 static void		 pfr_mark_addrs(struct pfr_ktable *);
135 static struct pfr_kentry
136 			*pfr_lookup_addr(struct pfr_ktable *,
137 			    struct pfr_addr *, int);
138 static struct pfr_kentry *pfr_create_kentry(struct pfr_addr *, bool);
139 static void		 pfr_destroy_kentries(struct pfr_kentryworkq *);
140 static void		 pfr_destroy_kentry(struct pfr_kentry *);
141 static void		 pfr_insert_kentries(struct pfr_ktable *,
142 			    struct pfr_kentryworkq *, time_t);
143 static void		 pfr_remove_kentries(struct pfr_ktable *,
144 			    struct pfr_kentryworkq *);
145 static void		 pfr_clstats_kentries(struct pfr_ktable *,
146 			    struct pfr_kentryworkq *, time_t, int);
147 static void		 pfr_reset_feedback(struct pfr_addr *, int);
148 static void		 pfr_prepare_network(union sockaddr_union *, int, int);
149 static int		 pfr_route_kentry(struct pfr_ktable *,
150 			    struct pfr_kentry *);
151 static int		 pfr_unroute_kentry(struct pfr_ktable *,
152 			    struct pfr_kentry *);
153 static int		 pfr_walktree(struct radix_node *, void *);
154 static int		 pfr_validate_table(struct pfr_table *, int, int);
155 static int		 pfr_fix_anchor(char *);
156 static void		 pfr_commit_ktable(struct pfr_ktable *, time_t);
157 static void		 pfr_insert_ktables(struct pfr_ktableworkq *);
158 static void		 pfr_insert_ktable(struct pfr_ktable *);
159 static void		 pfr_setflags_ktables(struct pfr_ktableworkq *);
160 static void		 pfr_setflags_ktable(struct pfr_ktable *, int);
161 static void		 pfr_clstats_ktables(struct pfr_ktableworkq *, time_t,
162 			    int);
163 static void		 pfr_clstats_ktable(struct pfr_ktable *, time_t, int);
164 static struct pfr_ktable
165 			*pfr_create_ktable(struct pfr_table *, time_t, int);
166 static void		 pfr_destroy_ktables(struct pfr_ktableworkq *, int);
167 static void		 pfr_destroy_ktable(struct pfr_ktable *, int);
168 static int		 pfr_ktable_compare(struct pfr_ktable *,
169 			    struct pfr_ktable *);
170 static struct pfr_ktable
171 			*pfr_lookup_table(struct pfr_table *);
172 static void		 pfr_clean_node_mask(struct pfr_ktable *,
173 			    struct pfr_kentryworkq *);
174 static int		 pfr_skip_table(struct pfr_table *,
175 			    struct pfr_ktable *, int);
176 static struct pfr_kentry
177 			*pfr_kentry_byidx(struct pfr_ktable *, int, int);
178 
179 static RB_PROTOTYPE(pfr_ktablehead, pfr_ktable, pfrkt_tree, pfr_ktable_compare);
180 static RB_GENERATE(pfr_ktablehead, pfr_ktable, pfrkt_tree, pfr_ktable_compare);
181 
182 VNET_DEFINE_STATIC(struct pfr_ktablehead, pfr_ktables);
183 #define	V_pfr_ktables	VNET(pfr_ktables)
184 
185 VNET_DEFINE_STATIC(struct pfr_table, pfr_nulltable);
186 #define	V_pfr_nulltable	VNET(pfr_nulltable)
187 
188 VNET_DEFINE_STATIC(int, pfr_ktable_cnt);
189 #define V_pfr_ktable_cnt	VNET(pfr_ktable_cnt)
190 
191 void
pfr_initialize(void)192 pfr_initialize(void)
193 {
194 
195 	V_pfr_kentry_counter_z = uma_zcreate("pf table entry counters",
196 	    PFR_NUM_COUNTERS * sizeof(uint64_t), NULL, NULL, NULL, NULL,
197 	    UMA_ALIGN_PTR, UMA_ZONE_PCPU);
198 	V_pfr_kentry_z = uma_zcreate("pf table entries",
199 	    sizeof(struct pfr_kentry), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
200 	    0);
201 	uma_zone_set_max(V_pfr_kentry_z, PFR_KENTRY_HIWAT);
202 	V_pf_limits[PF_LIMIT_TABLE_ENTRIES].zone = V_pfr_kentry_z;
203 	V_pf_limits[PF_LIMIT_TABLE_ENTRIES].limit = PFR_KENTRY_HIWAT;
204 }
205 
206 void
pfr_cleanup(void)207 pfr_cleanup(void)
208 {
209 
210 	uma_zdestroy(V_pfr_kentry_z);
211 	uma_zdestroy(V_pfr_kentry_counter_z);
212 }
213 
214 int
pfr_clr_addrs(struct pfr_table * tbl,int * ndel,int flags)215 pfr_clr_addrs(struct pfr_table *tbl, int *ndel, int flags)
216 {
217 	struct pfr_ktable	*kt;
218 	struct pfr_kentryworkq	 workq;
219 
220 	PF_RULES_WASSERT();
221 
222 	ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
223 	if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL))
224 		return (EINVAL);
225 	kt = pfr_lookup_table(tbl);
226 	if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
227 		return (ESRCH);
228 	if (kt->pfrkt_flags & PFR_TFLAG_CONST)
229 		return (EPERM);
230 	pfr_enqueue_addrs(kt, &workq, ndel, 0);
231 
232 	if (!(flags & PFR_FLAG_DUMMY)) {
233 		pfr_remove_kentries(kt, &workq);
234 		KASSERT(kt->pfrkt_cnt == 0, ("%s: non-null pfrkt_cnt", __func__));
235 	}
236 	return (0);
237 }
238 
239 int
pfr_add_addrs(struct pfr_table * tbl,struct pfr_addr * addr,int size,int * nadd,int flags)240 pfr_add_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
241     int *nadd, int flags)
242 {
243 	struct pfr_ktable	*kt, *tmpkt;
244 	struct pfr_kentryworkq	 workq;
245 	struct pfr_kentry	*p, *q;
246 	struct pfr_addr		*ad;
247 	int			 i, rv, xadd = 0;
248 	time_t			 tzero = time_second;
249 
250 	PF_RULES_WASSERT();
251 
252 	ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_FEEDBACK);
253 	if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL))
254 		return (EINVAL);
255 	kt = pfr_lookup_table(tbl);
256 	if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
257 		return (ESRCH);
258 	if (kt->pfrkt_flags & PFR_TFLAG_CONST)
259 		return (EPERM);
260 	tmpkt = pfr_create_ktable(&V_pfr_nulltable, 0, 0);
261 	if (tmpkt == NULL)
262 		return (ENOMEM);
263 	SLIST_INIT(&workq);
264 	for (i = 0, ad = addr; i < size; i++, ad++) {
265 		if (pfr_validate_addr(ad))
266 			senderr(EINVAL);
267 		p = pfr_lookup_addr(kt, ad, 1);
268 		q = pfr_lookup_addr(tmpkt, ad, 1);
269 		if (flags & PFR_FLAG_FEEDBACK) {
270 			if (q != NULL)
271 				ad->pfra_fback = PFR_FB_DUPLICATE;
272 			else if (p == NULL)
273 				ad->pfra_fback = PFR_FB_ADDED;
274 			else if (p->pfrke_not != ad->pfra_not)
275 				ad->pfra_fback = PFR_FB_CONFLICT;
276 			else
277 				ad->pfra_fback = PFR_FB_NONE;
278 		}
279 		if (p == NULL && q == NULL) {
280 			p = pfr_create_kentry(ad,
281 			    (kt->pfrkt_flags & PFR_TFLAG_COUNTERS) != 0);
282 			if (p == NULL)
283 				senderr(ENOMEM);
284 			if (pfr_route_kentry(tmpkt, p)) {
285 				pfr_destroy_kentry(p);
286 				ad->pfra_fback = PFR_FB_NONE;
287 			} else {
288 				SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
289 				xadd++;
290 			}
291 		}
292 	}
293 	pfr_clean_node_mask(tmpkt, &workq);
294 	if (!(flags & PFR_FLAG_DUMMY))
295 		pfr_insert_kentries(kt, &workq, tzero);
296 	else
297 		pfr_destroy_kentries(&workq);
298 	if (nadd != NULL)
299 		*nadd = xadd;
300 	pfr_destroy_ktable(tmpkt, 0);
301 	return (0);
302 _bad:
303 	pfr_clean_node_mask(tmpkt, &workq);
304 	pfr_destroy_kentries(&workq);
305 	if (flags & PFR_FLAG_FEEDBACK)
306 		pfr_reset_feedback(addr, size);
307 	pfr_destroy_ktable(tmpkt, 0);
308 	return (rv);
309 }
310 
311 int
pfr_del_addrs(struct pfr_table * tbl,struct pfr_addr * addr,int size,int * ndel,int flags)312 pfr_del_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
313     int *ndel, int flags)
314 {
315 	struct pfr_ktable	*kt;
316 	struct pfr_kentryworkq	 workq;
317 	struct pfr_kentry	*p;
318 	struct pfr_addr		*ad;
319 	int			 i, rv, xdel = 0, log = 1;
320 
321 	PF_RULES_WASSERT();
322 
323 	ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_FEEDBACK);
324 	if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL))
325 		return (EINVAL);
326 	kt = pfr_lookup_table(tbl);
327 	if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
328 		return (ESRCH);
329 	if (kt->pfrkt_flags & PFR_TFLAG_CONST)
330 		return (EPERM);
331 	/*
332 	 * there are two algorithms to choose from here.
333 	 * with:
334 	 *   n: number of addresses to delete
335 	 *   N: number of addresses in the table
336 	 *
337 	 * one is O(N) and is better for large 'n'
338 	 * one is O(n*LOG(N)) and is better for small 'n'
339 	 *
340 	 * following code try to decide which one is best.
341 	 */
342 	for (i = kt->pfrkt_cnt; i > 0; i >>= 1)
343 		log++;
344 	if (size > kt->pfrkt_cnt/log) {
345 		/* full table scan */
346 		pfr_mark_addrs(kt);
347 	} else {
348 		/* iterate over addresses to delete */
349 		for (i = 0, ad = addr; i < size; i++, ad++) {
350 			if (pfr_validate_addr(ad))
351 				return (EINVAL);
352 			p = pfr_lookup_addr(kt, ad, 1);
353 			if (p != NULL)
354 				p->pfrke_mark = 0;
355 		}
356 	}
357 	SLIST_INIT(&workq);
358 	for (i = 0, ad = addr; i < size; i++, ad++) {
359 		if (pfr_validate_addr(ad))
360 			senderr(EINVAL);
361 		p = pfr_lookup_addr(kt, ad, 1);
362 		if (flags & PFR_FLAG_FEEDBACK) {
363 			if (p == NULL)
364 				ad->pfra_fback = PFR_FB_NONE;
365 			else if (p->pfrke_not != ad->pfra_not)
366 				ad->pfra_fback = PFR_FB_CONFLICT;
367 			else if (p->pfrke_mark)
368 				ad->pfra_fback = PFR_FB_DUPLICATE;
369 			else
370 				ad->pfra_fback = PFR_FB_DELETED;
371 		}
372 		if (p != NULL && p->pfrke_not == ad->pfra_not &&
373 		    !p->pfrke_mark) {
374 			p->pfrke_mark = 1;
375 			SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
376 			xdel++;
377 		}
378 	}
379 	if (!(flags & PFR_FLAG_DUMMY))
380 		pfr_remove_kentries(kt, &workq);
381 	if (ndel != NULL)
382 		*ndel = xdel;
383 	return (0);
384 _bad:
385 	if (flags & PFR_FLAG_FEEDBACK)
386 		pfr_reset_feedback(addr, size);
387 	return (rv);
388 }
389 
390 int
pfr_set_addrs(struct pfr_table * tbl,struct pfr_addr * addr,int size,int * size2,int * nadd,int * ndel,int * nchange,int flags,u_int32_t ignore_pfrt_flags)391 pfr_set_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
392     int *size2, int *nadd, int *ndel, int *nchange, int flags,
393     u_int32_t ignore_pfrt_flags)
394 {
395 	struct pfr_ktable	*kt, *tmpkt;
396 	struct pfr_kentryworkq	 addq, delq, changeq;
397 	struct pfr_kentry	*p, *q;
398 	struct pfr_addr		 ad;
399 	int			 i, rv, xadd = 0, xdel = 0, xchange = 0;
400 	time_t			 tzero = time_second;
401 
402 	PF_RULES_WASSERT();
403 
404 	ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_FEEDBACK);
405 	if (pfr_validate_table(tbl, ignore_pfrt_flags, flags &
406 	    PFR_FLAG_USERIOCTL))
407 		return (EINVAL);
408 	kt = pfr_lookup_table(tbl);
409 	if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
410 		return (ESRCH);
411 	if (kt->pfrkt_flags & PFR_TFLAG_CONST)
412 		return (EPERM);
413 	tmpkt = pfr_create_ktable(&V_pfr_nulltable, 0, 0);
414 	if (tmpkt == NULL)
415 		return (ENOMEM);
416 	pfr_mark_addrs(kt);
417 	SLIST_INIT(&addq);
418 	SLIST_INIT(&delq);
419 	SLIST_INIT(&changeq);
420 	for (i = 0; i < size; i++) {
421 		/*
422 		 * XXXGL: undertand pf_if usage of this function
423 		 * and make ad a moving pointer
424 		 */
425 		bcopy(addr + i, &ad, sizeof(ad));
426 		if (pfr_validate_addr(&ad))
427 			senderr(EINVAL);
428 		ad.pfra_fback = PFR_FB_NONE;
429 		p = pfr_lookup_addr(kt, &ad, 1);
430 		if (p != NULL) {
431 			if (p->pfrke_mark) {
432 				ad.pfra_fback = PFR_FB_DUPLICATE;
433 				goto _skip;
434 			}
435 			p->pfrke_mark = 1;
436 			if (p->pfrke_not != ad.pfra_not) {
437 				SLIST_INSERT_HEAD(&changeq, p, pfrke_workq);
438 				ad.pfra_fback = PFR_FB_CHANGED;
439 				xchange++;
440 			}
441 		} else {
442 			q = pfr_lookup_addr(tmpkt, &ad, 1);
443 			if (q != NULL) {
444 				ad.pfra_fback = PFR_FB_DUPLICATE;
445 				goto _skip;
446 			}
447 			p = pfr_create_kentry(&ad,
448 			    (kt->pfrkt_flags & PFR_TFLAG_COUNTERS) != 0);
449 			if (p == NULL)
450 				senderr(ENOMEM);
451 			if (pfr_route_kentry(tmpkt, p)) {
452 				pfr_destroy_kentry(p);
453 				ad.pfra_fback = PFR_FB_NONE;
454 			} else {
455 				SLIST_INSERT_HEAD(&addq, p, pfrke_workq);
456 				ad.pfra_fback = PFR_FB_ADDED;
457 				xadd++;
458 			}
459 		}
460 _skip:
461 		if (flags & PFR_FLAG_FEEDBACK)
462 			bcopy(&ad, addr + i, sizeof(ad));
463 	}
464 	pfr_enqueue_addrs(kt, &delq, &xdel, ENQUEUE_UNMARKED_ONLY);
465 	if ((flags & PFR_FLAG_FEEDBACK) && *size2) {
466 		if (*size2 < size+xdel) {
467 			*size2 = size+xdel;
468 			senderr(0);
469 		}
470 		i = 0;
471 		SLIST_FOREACH(p, &delq, pfrke_workq) {
472 			pfr_copyout_addr(&ad, p);
473 			ad.pfra_fback = PFR_FB_DELETED;
474 			bcopy(&ad, addr + size + i, sizeof(ad));
475 			i++;
476 		}
477 	}
478 	pfr_clean_node_mask(tmpkt, &addq);
479 	if (!(flags & PFR_FLAG_DUMMY)) {
480 		pfr_insert_kentries(kt, &addq, tzero);
481 		pfr_remove_kentries(kt, &delq);
482 		pfr_clstats_kentries(kt, &changeq, tzero, INVERT_NEG_FLAG);
483 	} else
484 		pfr_destroy_kentries(&addq);
485 	if (nadd != NULL)
486 		*nadd = xadd;
487 	if (ndel != NULL)
488 		*ndel = xdel;
489 	if (nchange != NULL)
490 		*nchange = xchange;
491 	if ((flags & PFR_FLAG_FEEDBACK) && size2)
492 		*size2 = size+xdel;
493 	pfr_destroy_ktable(tmpkt, 0);
494 	return (0);
495 _bad:
496 	pfr_clean_node_mask(tmpkt, &addq);
497 	pfr_destroy_kentries(&addq);
498 	if (flags & PFR_FLAG_FEEDBACK)
499 		pfr_reset_feedback(addr, size);
500 	pfr_destroy_ktable(tmpkt, 0);
501 	return (rv);
502 }
503 
504 int
pfr_tst_addrs(struct pfr_table * tbl,struct pfr_addr * addr,int size,int * nmatch,int flags)505 pfr_tst_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
506 	int *nmatch, int flags)
507 {
508 	struct pfr_ktable	*kt;
509 	struct pfr_kentry	*p;
510 	struct pfr_addr		*ad;
511 	int			 i, xmatch = 0;
512 
513 	PF_RULES_RASSERT();
514 
515 	ACCEPT_FLAGS(flags, PFR_FLAG_REPLACE);
516 	if (pfr_validate_table(tbl, 0, 0))
517 		return (EINVAL);
518 	kt = pfr_lookup_table(tbl);
519 	if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
520 		return (ESRCH);
521 
522 	for (i = 0, ad = addr; i < size; i++, ad++) {
523 		if (pfr_validate_addr(ad))
524 			return (EINVAL);
525 		if (ADDR_NETWORK(ad))
526 			return (EINVAL);
527 		p = pfr_lookup_addr(kt, ad, 0);
528 		if (flags & PFR_FLAG_REPLACE)
529 			pfr_copyout_addr(ad, p);
530 		ad->pfra_fback = (p == NULL) ? PFR_FB_NONE :
531 		    (p->pfrke_not ? PFR_FB_NOTMATCH : PFR_FB_MATCH);
532 		if (p != NULL && !p->pfrke_not)
533 			xmatch++;
534 	}
535 	if (nmatch != NULL)
536 		*nmatch = xmatch;
537 	return (0);
538 }
539 
540 int
pfr_get_addrs(struct pfr_table * tbl,struct pfr_addr * addr,int * size,int flags)541 pfr_get_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int *size,
542 	int flags)
543 {
544 	struct pfr_ktable	*kt;
545 	struct pfr_walktree	 w;
546 	int			 rv;
547 
548 	PF_RULES_RASSERT();
549 
550 	ACCEPT_FLAGS(flags, 0);
551 	if (pfr_validate_table(tbl, 0, 0))
552 		return (EINVAL);
553 	kt = pfr_lookup_table(tbl);
554 	if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
555 		return (ESRCH);
556 	if (kt->pfrkt_cnt > *size) {
557 		*size = kt->pfrkt_cnt;
558 		return (0);
559 	}
560 
561 	bzero(&w, sizeof(w));
562 	w.pfrw_op = PFRW_GET_ADDRS;
563 	w.pfrw_addr = addr;
564 	w.pfrw_free = kt->pfrkt_cnt;
565 	rv = kt->pfrkt_ip4->rnh_walktree(&kt->pfrkt_ip4->rh, pfr_walktree, &w);
566 	if (!rv)
567 		rv = kt->pfrkt_ip6->rnh_walktree(&kt->pfrkt_ip6->rh,
568 		    pfr_walktree, &w);
569 	if (rv)
570 		return (rv);
571 
572 	KASSERT(w.pfrw_free == 0, ("%s: corruption detected (%d)", __func__,
573 	    w.pfrw_free));
574 
575 	*size = kt->pfrkt_cnt;
576 	return (0);
577 }
578 
579 int
pfr_get_astats(struct pfr_table * tbl,struct pfr_astats * addr,int * size,int flags)580 pfr_get_astats(struct pfr_table *tbl, struct pfr_astats *addr, int *size,
581 	int flags)
582 {
583 	struct pfr_ktable	*kt;
584 	struct pfr_walktree	 w;
585 	struct pfr_kentryworkq	 workq;
586 	int			 rv;
587 	time_t			 tzero = time_second;
588 
589 	PF_RULES_RASSERT();
590 
591 	/* XXX PFR_FLAG_CLSTATS disabled */
592 	ACCEPT_FLAGS(flags, 0);
593 	if (pfr_validate_table(tbl, 0, 0))
594 		return (EINVAL);
595 	kt = pfr_lookup_table(tbl);
596 	if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
597 		return (ESRCH);
598 	if (kt->pfrkt_cnt > *size) {
599 		*size = kt->pfrkt_cnt;
600 		return (0);
601 	}
602 
603 	bzero(&w, sizeof(w));
604 	w.pfrw_op = PFRW_GET_ASTATS;
605 	w.pfrw_astats = addr;
606 	w.pfrw_free = kt->pfrkt_cnt;
607 	/*
608 	 * Flags below are for backward compatibility. It was possible to have
609 	 * a table without per-entry counters. Now they are always allocated,
610 	 * we just discard data when reading it if table is not configured to
611 	 * have counters.
612 	 */
613 	w.pfrw_flags = kt->pfrkt_flags;
614 	rv = kt->pfrkt_ip4->rnh_walktree(&kt->pfrkt_ip4->rh, pfr_walktree, &w);
615 	if (!rv)
616 		rv = kt->pfrkt_ip6->rnh_walktree(&kt->pfrkt_ip6->rh,
617 		    pfr_walktree, &w);
618 	if (!rv && (flags & PFR_FLAG_CLSTATS)) {
619 		pfr_enqueue_addrs(kt, &workq, NULL, 0);
620 		pfr_clstats_kentries(kt, &workq, tzero, 0);
621 	}
622 	if (rv)
623 		return (rv);
624 
625 	if (w.pfrw_free) {
626 		printf("pfr_get_astats: corruption detected (%d).\n",
627 		    w.pfrw_free);
628 		return (ENOTTY);
629 	}
630 	*size = kt->pfrkt_cnt;
631 	return (0);
632 }
633 
634 int
pfr_clr_astats(struct pfr_table * tbl,struct pfr_addr * addr,int size,int * nzero,int flags)635 pfr_clr_astats(struct pfr_table *tbl, struct pfr_addr *addr, int size,
636     int *nzero, int flags)
637 {
638 	struct pfr_ktable	*kt;
639 	struct pfr_kentryworkq	 workq;
640 	struct pfr_kentry	*p;
641 	struct pfr_addr		*ad;
642 	int			 i, rv, xzero = 0;
643 
644 	PF_RULES_WASSERT();
645 
646 	ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_FEEDBACK);
647 	if (pfr_validate_table(tbl, 0, 0))
648 		return (EINVAL);
649 	kt = pfr_lookup_table(tbl);
650 	if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
651 		return (ESRCH);
652 	SLIST_INIT(&workq);
653 	for (i = 0, ad = addr; i < size; i++, ad++) {
654 		if (pfr_validate_addr(ad))
655 			senderr(EINVAL);
656 		p = pfr_lookup_addr(kt, ad, 1);
657 		if (flags & PFR_FLAG_FEEDBACK) {
658 			ad->pfra_fback = (p != NULL) ?
659 			    PFR_FB_CLEARED : PFR_FB_NONE;
660 		}
661 		if (p != NULL) {
662 			SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
663 			xzero++;
664 		}
665 	}
666 
667 	if (!(flags & PFR_FLAG_DUMMY))
668 		pfr_clstats_kentries(kt, &workq, time_second, 0);
669 	if (nzero != NULL)
670 		*nzero = xzero;
671 	return (0);
672 _bad:
673 	if (flags & PFR_FLAG_FEEDBACK)
674 		pfr_reset_feedback(addr, size);
675 	return (rv);
676 }
677 
678 static int
pfr_validate_addr(struct pfr_addr * ad)679 pfr_validate_addr(struct pfr_addr *ad)
680 {
681 	int i;
682 
683 	switch (ad->pfra_af) {
684 #ifdef INET
685 	case AF_INET:
686 		if (ad->pfra_net > 32)
687 			return (-1);
688 		break;
689 #endif /* INET */
690 #ifdef INET6
691 	case AF_INET6:
692 		if (ad->pfra_net > 128)
693 			return (-1);
694 		break;
695 #endif /* INET6 */
696 	default:
697 		return (-1);
698 	}
699 	if (ad->pfra_net < 128 &&
700 		(((caddr_t)ad)[ad->pfra_net/8] & (0xFF >> (ad->pfra_net%8))))
701 			return (-1);
702 	for (i = (ad->pfra_net+7)/8; i < sizeof(ad->pfra_u); i++)
703 		if (((caddr_t)ad)[i])
704 			return (-1);
705 	if (ad->pfra_not && ad->pfra_not != 1)
706 		return (-1);
707 	if (ad->pfra_fback)
708 		return (-1);
709 	return (0);
710 }
711 
712 static void
pfr_enqueue_addrs(struct pfr_ktable * kt,struct pfr_kentryworkq * workq,int * naddr,int sweep)713 pfr_enqueue_addrs(struct pfr_ktable *kt, struct pfr_kentryworkq *workq,
714 	int *naddr, int sweep)
715 {
716 	struct pfr_walktree	w;
717 
718 	SLIST_INIT(workq);
719 	bzero(&w, sizeof(w));
720 	w.pfrw_op = sweep ? PFRW_SWEEP : PFRW_ENQUEUE;
721 	w.pfrw_workq = workq;
722 	if (kt->pfrkt_ip4 != NULL)
723 		if (kt->pfrkt_ip4->rnh_walktree(&kt->pfrkt_ip4->rh,
724 		    pfr_walktree, &w))
725 			printf("pfr_enqueue_addrs: IPv4 walktree failed.\n");
726 	if (kt->pfrkt_ip6 != NULL)
727 		if (kt->pfrkt_ip6->rnh_walktree(&kt->pfrkt_ip6->rh,
728 		    pfr_walktree, &w))
729 			printf("pfr_enqueue_addrs: IPv6 walktree failed.\n");
730 	if (naddr != NULL)
731 		*naddr = w.pfrw_free;
732 }
733 
734 static void
pfr_mark_addrs(struct pfr_ktable * kt)735 pfr_mark_addrs(struct pfr_ktable *kt)
736 {
737 	struct pfr_walktree	w;
738 
739 	bzero(&w, sizeof(w));
740 	w.pfrw_op = PFRW_MARK;
741 	if (kt->pfrkt_ip4->rnh_walktree(&kt->pfrkt_ip4->rh, pfr_walktree, &w))
742 		printf("pfr_mark_addrs: IPv4 walktree failed.\n");
743 	if (kt->pfrkt_ip6->rnh_walktree(&kt->pfrkt_ip6->rh, pfr_walktree, &w))
744 		printf("pfr_mark_addrs: IPv6 walktree failed.\n");
745 }
746 
747 static struct pfr_kentry *
pfr_lookup_addr(struct pfr_ktable * kt,struct pfr_addr * ad,int exact)748 pfr_lookup_addr(struct pfr_ktable *kt, struct pfr_addr *ad, int exact)
749 {
750 	union sockaddr_union	 sa, mask;
751 	struct radix_head	*head = NULL;
752 	struct pfr_kentry	*ke;
753 
754 	PF_RULES_ASSERT();
755 
756 	bzero(&sa, sizeof(sa));
757 	switch (ad->pfra_af) {
758 	case AF_INET:
759 		FILLIN_SIN(sa.sin, ad->pfra_ip4addr);
760 		head = &kt->pfrkt_ip4->rh;
761 		break;
762 	case AF_INET6:
763 		FILLIN_SIN6(sa.sin6, ad->pfra_ip6addr);
764 		head = &kt->pfrkt_ip6->rh;
765 		break;
766 	default:
767 		unhandled_af(ad->pfra_af);
768 	}
769 	if (ADDR_NETWORK(ad)) {
770 		pfr_prepare_network(&mask, ad->pfra_af, ad->pfra_net);
771 		ke = (struct pfr_kentry *)rn_lookup(&sa, &mask, head);
772 		if (ke && KENTRY_RNF_ROOT(ke))
773 			ke = NULL;
774 	} else {
775 		ke = (struct pfr_kentry *)rn_match(&sa, head);
776 		if (ke && KENTRY_RNF_ROOT(ke))
777 			ke = NULL;
778 		if (exact && ke && KENTRY_NETWORK(ke))
779 			ke = NULL;
780 	}
781 	return (ke);
782 }
783 
784 static struct pfr_kentry *
pfr_create_kentry(struct pfr_addr * ad,bool counters)785 pfr_create_kentry(struct pfr_addr *ad, bool counters)
786 {
787 	struct pfr_kentry	*ke;
788 	counter_u64_t		 c;
789 
790 	ke = uma_zalloc(V_pfr_kentry_z, M_NOWAIT | M_ZERO);
791 	if (ke == NULL)
792 		return (NULL);
793 
794 	switch (ad->pfra_af) {
795 	case AF_INET:
796 		FILLIN_SIN(ke->pfrke_sa.sin, ad->pfra_ip4addr);
797 		break;
798 	case AF_INET6:
799 		FILLIN_SIN6(ke->pfrke_sa.sin6, ad->pfra_ip6addr);
800 		break;
801 	default:
802 		unhandled_af(ad->pfra_af);
803 	}
804 	ke->pfrke_af = ad->pfra_af;
805 	ke->pfrke_net = ad->pfra_net;
806 	ke->pfrke_not = ad->pfra_not;
807 	ke->pfrke_counters.pfrkc_tzero = 0;
808 	if (counters) {
809 		c = uma_zalloc_pcpu(V_pfr_kentry_counter_z, M_NOWAIT | M_ZERO);
810 		if (c == NULL) {
811 			pfr_destroy_kentry(ke);
812 			return (NULL);
813 		}
814 		ke->pfrke_counters.pfrkc_counters = c;
815 	}
816 	return (ke);
817 }
818 
819 static void
pfr_destroy_kentries(struct pfr_kentryworkq * workq)820 pfr_destroy_kentries(struct pfr_kentryworkq *workq)
821 {
822 	struct pfr_kentry	*p, *q;
823 
824 	for (p = SLIST_FIRST(workq); p != NULL; p = q) {
825 		q = SLIST_NEXT(p, pfrke_workq);
826 		pfr_destroy_kentry(p);
827 	}
828 }
829 
830 static void
pfr_destroy_kentry(struct pfr_kentry * ke)831 pfr_destroy_kentry(struct pfr_kentry *ke)
832 {
833 	counter_u64_t c;
834 
835 	if ((c = ke->pfrke_counters.pfrkc_counters) != NULL)
836 		uma_zfree_pcpu(V_pfr_kentry_counter_z, c);
837 	uma_zfree(V_pfr_kentry_z, ke);
838 }
839 
840 static void
pfr_insert_kentries(struct pfr_ktable * kt,struct pfr_kentryworkq * workq,time_t tzero)841 pfr_insert_kentries(struct pfr_ktable *kt,
842     struct pfr_kentryworkq *workq, time_t tzero)
843 {
844 	struct pfr_kentry	*p;
845 	int			 rv, n = 0;
846 
847 	SLIST_FOREACH(p, workq, pfrke_workq) {
848 		rv = pfr_route_kentry(kt, p);
849 		if (rv) {
850 			printf("pfr_insert_kentries: cannot route entry "
851 			    "(code=%d).\n", rv);
852 			break;
853 		}
854 		p->pfrke_counters.pfrkc_tzero = tzero;
855 		n++;
856 	}
857 	kt->pfrkt_cnt += n;
858 }
859 
860 int
pfr_insert_kentry(struct pfr_ktable * kt,struct pfr_addr * ad,time_t tzero)861 pfr_insert_kentry(struct pfr_ktable *kt, struct pfr_addr *ad, time_t tzero)
862 {
863 	struct pfr_kentry	*p;
864 	int			 rv;
865 
866 	p = pfr_lookup_addr(kt, ad, 1);
867 	if (p != NULL)
868 		return (0);
869 	p = pfr_create_kentry(ad, (kt->pfrkt_flags & PFR_TFLAG_COUNTERS) != 0);
870 	if (p == NULL)
871 		return (ENOMEM);
872 
873 	rv = pfr_route_kentry(kt, p);
874 	if (rv)
875 		return (rv);
876 
877 	p->pfrke_counters.pfrkc_tzero = tzero;
878 	kt->pfrkt_cnt++;
879 
880 	return (0);
881 }
882 
883 static void
pfr_remove_kentries(struct pfr_ktable * kt,struct pfr_kentryworkq * workq)884 pfr_remove_kentries(struct pfr_ktable *kt,
885     struct pfr_kentryworkq *workq)
886 {
887 	struct pfr_kentry	*p;
888 	int			 n = 0;
889 
890 	SLIST_FOREACH(p, workq, pfrke_workq) {
891 		pfr_unroute_kentry(kt, p);
892 		n++;
893 	}
894 	kt->pfrkt_cnt -= n;
895 	pfr_destroy_kentries(workq);
896 }
897 
898 static void
pfr_clean_node_mask(struct pfr_ktable * kt,struct pfr_kentryworkq * workq)899 pfr_clean_node_mask(struct pfr_ktable *kt,
900     struct pfr_kentryworkq *workq)
901 {
902 	struct pfr_kentry	*p;
903 
904 	SLIST_FOREACH(p, workq, pfrke_workq)
905 		pfr_unroute_kentry(kt, p);
906 }
907 
908 static void
pfr_clstats_kentries(struct pfr_ktable * kt,struct pfr_kentryworkq * workq,time_t tzero,int negchange)909 pfr_clstats_kentries(struct pfr_ktable *kt, struct pfr_kentryworkq *workq,
910     time_t tzero, int negchange)
911 {
912 	struct pfr_kentry	*p;
913 	int			 i;
914 
915 	SLIST_FOREACH(p, workq, pfrke_workq) {
916 		if (negchange)
917 			p->pfrke_not = !p->pfrke_not;
918 		if ((kt->pfrkt_flags & PFR_TFLAG_COUNTERS) != 0)
919 			for (i = 0; i < PFR_NUM_COUNTERS; i++)
920 				counter_u64_zero(
921 				    p->pfrke_counters.pfrkc_counters + i);
922 		p->pfrke_counters.pfrkc_tzero = tzero;
923 	}
924 }
925 
926 static void
pfr_reset_feedback(struct pfr_addr * addr,int size)927 pfr_reset_feedback(struct pfr_addr *addr, int size)
928 {
929 	struct pfr_addr	*ad;
930 	int		i;
931 
932 	for (i = 0, ad = addr; i < size; i++, ad++)
933 		ad->pfra_fback = PFR_FB_NONE;
934 }
935 
936 static void
pfr_prepare_network(union sockaddr_union * sa,int af,int net)937 pfr_prepare_network(union sockaddr_union *sa, int af, int net)
938 {
939 	int	i;
940 
941 	bzero(sa, sizeof(*sa));
942 	switch (af) {
943 	case AF_INET:
944 		sa->sin.sin_len = sizeof(sa->sin);
945 		sa->sin.sin_family = AF_INET;
946 		sa->sin.sin_addr.s_addr = net ? htonl(-1 << (32-net)) : 0;
947 		break;
948 	case AF_INET6:
949 		sa->sin6.sin6_len = sizeof(sa->sin6);
950 		sa->sin6.sin6_family = AF_INET6;
951 		for (i = 0; i < 4; i++) {
952 			if (net <= 32) {
953 				sa->sin6.sin6_addr.s6_addr32[i] =
954 				    net ? htonl(-1 << (32-net)) : 0;
955 				break;
956 			}
957 			sa->sin6.sin6_addr.s6_addr32[i] = 0xFFFFFFFF;
958 			net -= 32;
959 		}
960 		break;
961 	default:
962 		unhandled_af(af);
963 	}
964 }
965 
966 static int
pfr_route_kentry(struct pfr_ktable * kt,struct pfr_kentry * ke)967 pfr_route_kentry(struct pfr_ktable *kt, struct pfr_kentry *ke)
968 {
969 	union sockaddr_union	 mask;
970 	struct radix_node	*rn;
971 	struct radix_head	*head = NULL;
972 
973 	PF_RULES_WASSERT();
974 
975 	bzero(ke->pfrke_node, sizeof(ke->pfrke_node));
976 	switch (ke->pfrke_af) {
977 	case AF_INET:
978 		head = &kt->pfrkt_ip4->rh;
979 		break;
980 	case AF_INET6:
981 		head = &kt->pfrkt_ip6->rh;
982 		break;
983 	default:
984 		unhandled_af(ke->pfrke_af);
985 	}
986 
987 	if (KENTRY_NETWORK(ke)) {
988 		pfr_prepare_network(&mask, ke->pfrke_af, ke->pfrke_net);
989 		rn = rn_addroute(&ke->pfrke_sa, &mask, head, ke->pfrke_node);
990 	} else
991 		rn = rn_addroute(&ke->pfrke_sa, NULL, head, ke->pfrke_node);
992 
993 	return (rn == NULL ? -1 : 0);
994 }
995 
996 static int
pfr_unroute_kentry(struct pfr_ktable * kt,struct pfr_kentry * ke)997 pfr_unroute_kentry(struct pfr_ktable *kt, struct pfr_kentry *ke)
998 {
999 	union sockaddr_union	 mask;
1000 	struct radix_node	*rn;
1001 	struct radix_head	*head = NULL;
1002 
1003 	switch (ke->pfrke_af) {
1004 	case AF_INET:
1005 		head = &kt->pfrkt_ip4->rh;
1006 		break;
1007 	case AF_INET6:
1008 		head = &kt->pfrkt_ip6->rh;
1009 		break;
1010 	default:
1011 		unhandled_af(ke->pfrke_af);
1012 	}
1013 
1014 	if (KENTRY_NETWORK(ke)) {
1015 		pfr_prepare_network(&mask, ke->pfrke_af, ke->pfrke_net);
1016 		rn = rn_delete(&ke->pfrke_sa, &mask, head);
1017 	} else
1018 		rn = rn_delete(&ke->pfrke_sa, NULL, head);
1019 
1020 	if (rn == NULL) {
1021 		printf("pfr_unroute_kentry: delete failed.\n");
1022 		return (-1);
1023 	}
1024 	return (0);
1025 }
1026 
1027 static void
pfr_copyout_addr(struct pfr_addr * ad,const struct pfr_kentry * ke)1028 pfr_copyout_addr(struct pfr_addr *ad, const struct pfr_kentry *ke)
1029 {
1030 	bzero(ad, sizeof(*ad));
1031 	if (ke == NULL)
1032 		return;
1033 	ad->pfra_af = ke->pfrke_af;
1034 	ad->pfra_net = ke->pfrke_net;
1035 	ad->pfra_not = ke->pfrke_not;
1036 	switch (ad->pfra_af) {
1037 	case AF_INET:
1038 		ad->pfra_ip4addr = ke->pfrke_sa.sin.sin_addr;
1039 		break;
1040 	case AF_INET6:
1041 		ad->pfra_ip6addr = ke->pfrke_sa.sin6.sin6_addr;
1042 		break;
1043 	default:
1044 		unhandled_af(ad->pfra_af);
1045 	}
1046 }
1047 
1048 static void
pfr_copyout_astats(struct pfr_astats * as,const struct pfr_kentry * ke,const struct pfr_walktree * w)1049 pfr_copyout_astats(struct pfr_astats *as, const struct pfr_kentry *ke,
1050     const struct pfr_walktree *w)
1051 {
1052 	int dir, op;
1053 	const struct pfr_kcounters *kc = &ke->pfrke_counters;
1054 
1055 	bzero(as, sizeof(*as));
1056 	pfr_copyout_addr(&as->pfras_a, ke);
1057 	as->pfras_tzero = kc->pfrkc_tzero;
1058 
1059 	if (! (w->pfrw_flags & PFR_TFLAG_COUNTERS) ||
1060 	    kc->pfrkc_counters == NULL) {
1061 		bzero(as->pfras_packets, sizeof(as->pfras_packets));
1062 		bzero(as->pfras_bytes, sizeof(as->pfras_bytes));
1063 		as->pfras_a.pfra_fback = PFR_FB_NOCOUNT;
1064 		return;
1065 	}
1066 
1067 	for (dir = 0; dir < PFR_DIR_MAX; dir++) {
1068 		for (op = 0; op < PFR_OP_ADDR_MAX; op ++) {
1069 			as->pfras_packets[dir][op] = counter_u64_fetch(
1070 			    pfr_kentry_counter(kc, dir, op, PFR_TYPE_PACKETS));
1071 			as->pfras_bytes[dir][op] = counter_u64_fetch(
1072 			    pfr_kentry_counter(kc, dir, op, PFR_TYPE_BYTES));
1073 		}
1074 	}
1075 }
1076 
1077 static void
pfr_sockaddr_to_pf_addr(const union sockaddr_union * sa,struct pf_addr * a)1078 pfr_sockaddr_to_pf_addr(const union sockaddr_union *sa, struct pf_addr *a)
1079 {
1080 	switch (sa->sa.sa_family) {
1081 	case AF_INET:
1082 		memcpy(&a->v4, &sa->sin.sin_addr, sizeof(a->v4));
1083 		break;
1084 	case AF_INET6:
1085 		memcpy(&a->v6, &sa->sin6.sin6_addr, sizeof(a->v6));
1086 		break;
1087 	default:
1088 		unhandled_af(sa->sa.sa_family);
1089 	}
1090 }
1091 
1092 static int
pfr_walktree(struct radix_node * rn,void * arg)1093 pfr_walktree(struct radix_node *rn, void *arg)
1094 {
1095 	struct pfr_kentry	*ke = (struct pfr_kentry *)rn;
1096 	struct pfr_walktree	*w = arg;
1097 
1098 	switch (w->pfrw_op) {
1099 	case PFRW_MARK:
1100 		ke->pfrke_mark = 0;
1101 		break;
1102 	case PFRW_SWEEP:
1103 		if (ke->pfrke_mark)
1104 			break;
1105 		/* FALLTHROUGH */
1106 	case PFRW_ENQUEUE:
1107 		SLIST_INSERT_HEAD(w->pfrw_workq, ke, pfrke_workq);
1108 		w->pfrw_free++;
1109 		break;
1110 	case PFRW_GET_ADDRS:
1111 		if (w->pfrw_free-- > 0) {
1112 			pfr_copyout_addr(w->pfrw_addr, ke);
1113 			w->pfrw_addr++;
1114 		}
1115 		break;
1116 	case PFRW_GET_ASTATS:
1117 		if (w->pfrw_free-- > 0) {
1118 			struct pfr_astats as;
1119 
1120 			pfr_copyout_astats(&as, ke, w);
1121 
1122 			bcopy(&as, w->pfrw_astats, sizeof(as));
1123 			w->pfrw_astats++;
1124 		}
1125 		break;
1126 	case PFRW_POOL_GET:
1127 		if (ke->pfrke_not)
1128 			break; /* negative entries are ignored */
1129 		if (!w->pfrw_free--) {
1130 			w->pfrw_kentry = ke;
1131 			return (1); /* finish search */
1132 		}
1133 		break;
1134 	case PFRW_DYNADDR_UPDATE:
1135 	    {
1136 		union sockaddr_union	pfr_mask;
1137 
1138 		switch (ke->pfrke_af) {
1139 		case AF_INET:
1140 			if (w->pfrw_dyn->pfid_acnt4++ > 0)
1141 				break;
1142 			pfr_prepare_network(&pfr_mask, AF_INET, ke->pfrke_net);
1143 			pfr_sockaddr_to_pf_addr(&ke->pfrke_sa, &w->pfrw_dyn->pfid_addr4);
1144 			pfr_sockaddr_to_pf_addr(&pfr_mask, &w->pfrw_dyn->pfid_mask4);
1145 			break;
1146 		case AF_INET6:
1147 			if (w->pfrw_dyn->pfid_acnt6++ > 0)
1148 				break;
1149 			pfr_prepare_network(&pfr_mask, AF_INET6, ke->pfrke_net);
1150 			pfr_sockaddr_to_pf_addr(&ke->pfrke_sa, &w->pfrw_dyn->pfid_addr6);
1151 			pfr_sockaddr_to_pf_addr(&pfr_mask, &w->pfrw_dyn->pfid_mask6);
1152 			break;
1153 		default:
1154 			unhandled_af(ke->pfrke_af);
1155 		}
1156 		break;
1157 	    }
1158 	case PFRW_COUNTERS:
1159 	    {
1160 		if (w->pfrw_flags & PFR_TFLAG_COUNTERS) {
1161 			if (ke->pfrke_counters.pfrkc_counters != NULL)
1162 				break;
1163 			ke->pfrke_counters.pfrkc_counters =
1164 			    uma_zalloc_pcpu(V_pfr_kentry_counter_z,
1165 			    M_NOWAIT | M_ZERO);
1166 		} else {
1167 			uma_zfree_pcpu(V_pfr_kentry_counter_z,
1168 			    ke->pfrke_counters.pfrkc_counters);
1169 			ke->pfrke_counters.pfrkc_counters = NULL;
1170 		}
1171 		break;
1172 	    }
1173 	}
1174 	return (0);
1175 }
1176 
1177 int
pfr_clr_tables(struct pfr_table * filter,int * ndel,int flags)1178 pfr_clr_tables(struct pfr_table *filter, int *ndel, int flags)
1179 {
1180 	struct pfr_ktableworkq	 workq;
1181 	struct pfr_ktable	*p;
1182 	int			 xdel = 0;
1183 
1184 	ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_ALLRSETS);
1185 	if (pfr_fix_anchor(filter->pfrt_anchor))
1186 		return (EINVAL);
1187 	if (pfr_table_count(filter, flags) < 0)
1188 		return (ENOENT);
1189 
1190 	SLIST_INIT(&workq);
1191 	RB_FOREACH(p, pfr_ktablehead, &V_pfr_ktables) {
1192 		if (pfr_skip_table(filter, p, flags))
1193 			continue;
1194 		if (!strcmp(p->pfrkt_anchor, PF_RESERVED_ANCHOR))
1195 			continue;
1196 		if (!(p->pfrkt_flags & PFR_TFLAG_ACTIVE))
1197 			continue;
1198 		p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_ACTIVE;
1199 		SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1200 		xdel++;
1201 	}
1202 	if (!(flags & PFR_FLAG_DUMMY))
1203 		pfr_setflags_ktables(&workq);
1204 	if (ndel != NULL)
1205 		*ndel = xdel;
1206 	return (0);
1207 }
1208 
1209 int
pfr_add_tables(struct pfr_table * tbl,int size,int * nadd,int flags)1210 pfr_add_tables(struct pfr_table *tbl, int size, int *nadd, int flags)
1211 {
1212 	struct pfr_ktableworkq	 addq, changeq;
1213 	struct pfr_ktable	*p, *q, *r, key;
1214 	int			 i, rv, xadd = 0;
1215 	time_t			 tzero = time_second;
1216 
1217 	ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
1218 	SLIST_INIT(&addq);
1219 	SLIST_INIT(&changeq);
1220 	for (i = 0; i < size; i++) {
1221 		bcopy(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t));
1222 		if (pfr_validate_table(&key.pfrkt_t, PFR_TFLAG_USRMASK,
1223 		    flags & PFR_FLAG_USERIOCTL))
1224 			senderr(EINVAL);
1225 		key.pfrkt_flags |= PFR_TFLAG_ACTIVE;
1226 		p = RB_FIND(pfr_ktablehead, &V_pfr_ktables, &key);
1227 		if (p == NULL) {
1228 			p = pfr_create_ktable(&key.pfrkt_t, tzero, 1);
1229 			if (p == NULL)
1230 				senderr(ENOMEM);
1231 			SLIST_FOREACH(q, &addq, pfrkt_workq) {
1232 				if (!pfr_ktable_compare(p, q)) {
1233 					pfr_destroy_ktable(p, 0);
1234 					goto _skip;
1235 				}
1236 			}
1237 			SLIST_INSERT_HEAD(&addq, p, pfrkt_workq);
1238 			xadd++;
1239 			if (!key.pfrkt_anchor[0])
1240 				goto _skip;
1241 
1242 			/* find or create root table */
1243 			bzero(key.pfrkt_anchor, sizeof(key.pfrkt_anchor));
1244 			r = RB_FIND(pfr_ktablehead, &V_pfr_ktables, &key);
1245 			if (r != NULL) {
1246 				p->pfrkt_root = r;
1247 				goto _skip;
1248 			}
1249 			SLIST_FOREACH(q, &addq, pfrkt_workq) {
1250 				if (!pfr_ktable_compare(&key, q)) {
1251 					p->pfrkt_root = q;
1252 					goto _skip;
1253 				}
1254 			}
1255 			key.pfrkt_flags = 0;
1256 			r = pfr_create_ktable(&key.pfrkt_t, 0, 1);
1257 			if (r == NULL)
1258 				senderr(ENOMEM);
1259 			SLIST_INSERT_HEAD(&addq, r, pfrkt_workq);
1260 			p->pfrkt_root = r;
1261 		} else if (!(p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1262 			SLIST_FOREACH(q, &changeq, pfrkt_workq)
1263 				if (!pfr_ktable_compare(&key, q))
1264 					goto _skip;
1265 			p->pfrkt_nflags = (p->pfrkt_flags &
1266 			    ~PFR_TFLAG_USRMASK) | key.pfrkt_flags;
1267 			SLIST_INSERT_HEAD(&changeq, p, pfrkt_workq);
1268 			xadd++;
1269 		}
1270 _skip:
1271 	;
1272 	}
1273 	if (!(flags & PFR_FLAG_DUMMY)) {
1274 		pfr_insert_ktables(&addq);
1275 		pfr_setflags_ktables(&changeq);
1276 	} else
1277 		 pfr_destroy_ktables(&addq, 0);
1278 	if (nadd != NULL)
1279 		*nadd = xadd;
1280 	return (0);
1281 _bad:
1282 	pfr_destroy_ktables(&addq, 0);
1283 	return (rv);
1284 }
1285 
1286 int
pfr_del_tables(struct pfr_table * tbl,int size,int * ndel,int flags)1287 pfr_del_tables(struct pfr_table *tbl, int size, int *ndel, int flags)
1288 {
1289 	struct pfr_ktableworkq	 workq;
1290 	struct pfr_ktable	*p, *q, key;
1291 	int			 i, xdel = 0;
1292 
1293 	ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
1294 	SLIST_INIT(&workq);
1295 	for (i = 0; i < size; i++) {
1296 		bcopy(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t));
1297 		if (pfr_validate_table(&key.pfrkt_t, 0,
1298 		    flags & PFR_FLAG_USERIOCTL))
1299 			return (EINVAL);
1300 		p = RB_FIND(pfr_ktablehead, &V_pfr_ktables, &key);
1301 		if (p != NULL && (p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1302 			SLIST_FOREACH(q, &workq, pfrkt_workq)
1303 				if (!pfr_ktable_compare(p, q))
1304 					goto _skip;
1305 			p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_ACTIVE;
1306 			SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1307 			xdel++;
1308 		}
1309 _skip:
1310 	;
1311 	}
1312 
1313 	if (!(flags & PFR_FLAG_DUMMY))
1314 		pfr_setflags_ktables(&workq);
1315 	if (ndel != NULL)
1316 		*ndel = xdel;
1317 	return (0);
1318 }
1319 
1320 int
pfr_get_tables(struct pfr_table * filter,struct pfr_table * tbl,int * size,int flags)1321 pfr_get_tables(struct pfr_table *filter, struct pfr_table *tbl, int *size,
1322 	int flags)
1323 {
1324 	struct pfr_ktable	*p;
1325 	int			 n, nn;
1326 
1327 	PF_RULES_RASSERT();
1328 
1329 	ACCEPT_FLAGS(flags, PFR_FLAG_ALLRSETS);
1330 	if (pfr_fix_anchor(filter->pfrt_anchor))
1331 		return (EINVAL);
1332 	n = nn = pfr_table_count(filter, flags);
1333 	if (n < 0)
1334 		return (ENOENT);
1335 	if (n > *size) {
1336 		*size = n;
1337 		return (0);
1338 	}
1339 	RB_FOREACH(p, pfr_ktablehead, &V_pfr_ktables) {
1340 		if (pfr_skip_table(filter, p, flags))
1341 			continue;
1342 		if (n-- <= 0)
1343 			continue;
1344 		bcopy(&p->pfrkt_t, tbl++, sizeof(*tbl));
1345 	}
1346 
1347 	KASSERT(n == 0, ("%s: corruption detected (%d)", __func__, n));
1348 
1349 	*size = nn;
1350 	return (0);
1351 }
1352 
1353 int
pfr_get_tstats(struct pfr_table * filter,struct pfr_tstats * tbl,int * size,int flags)1354 pfr_get_tstats(struct pfr_table *filter, struct pfr_tstats *tbl, int *size,
1355 	int flags)
1356 {
1357 	struct pfr_ktable	*p;
1358 	struct pfr_ktableworkq	 workq;
1359 	int			 n, nn;
1360 	time_t			 tzero = time_second;
1361 	int			 pfr_dir, pfr_op;
1362 
1363 	/* XXX PFR_FLAG_CLSTATS disabled */
1364 	ACCEPT_FLAGS(flags, PFR_FLAG_ALLRSETS);
1365 	if (pfr_fix_anchor(filter->pfrt_anchor))
1366 		return (EINVAL);
1367 	n = nn = pfr_table_count(filter, flags);
1368 	if (n < 0)
1369 		return (ENOENT);
1370 	if (n > *size) {
1371 		*size = n;
1372 		return (0);
1373 	}
1374 	SLIST_INIT(&workq);
1375 	RB_FOREACH(p, pfr_ktablehead, &V_pfr_ktables) {
1376 		if (pfr_skip_table(filter, p, flags))
1377 			continue;
1378 		if (n-- <= 0)
1379 			continue;
1380 		bcopy(&p->pfrkt_kts.pfrts_t, &tbl->pfrts_t,
1381 		    sizeof(struct pfr_table));
1382 		for (pfr_dir = 0; pfr_dir < PFR_DIR_MAX; pfr_dir ++) {
1383 			for (pfr_op = 0; pfr_op < PFR_OP_TABLE_MAX; pfr_op ++) {
1384 				tbl->pfrts_packets[pfr_dir][pfr_op] =
1385 				    pfr_kstate_counter_fetch(
1386 					&p->pfrkt_packets[pfr_dir][pfr_op]);
1387 				tbl->pfrts_bytes[pfr_dir][pfr_op] =
1388 				    pfr_kstate_counter_fetch(
1389 					&p->pfrkt_bytes[pfr_dir][pfr_op]);
1390 			}
1391 		}
1392 		tbl->pfrts_match = pfr_kstate_counter_fetch(&p->pfrkt_match);
1393 		tbl->pfrts_nomatch = pfr_kstate_counter_fetch(&p->pfrkt_nomatch);
1394 		tbl->pfrts_tzero = p->pfrkt_tzero;
1395 		tbl->pfrts_cnt = p->pfrkt_cnt;
1396 		for (pfr_op = 0; pfr_op < PFR_REFCNT_MAX; pfr_op++)
1397 			tbl->pfrts_refcnt[pfr_op] = p->pfrkt_refcnt[pfr_op];
1398 		tbl++;
1399 		SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1400 	}
1401 	if (flags & PFR_FLAG_CLSTATS)
1402 		pfr_clstats_ktables(&workq, tzero,
1403 		    flags & PFR_FLAG_ADDRSTOO);
1404 
1405 	KASSERT(n == 0, ("%s: corruption detected (%d)", __func__, n));
1406 
1407 	*size = nn;
1408 	return (0);
1409 }
1410 
1411 int
pfr_clr_tstats(struct pfr_table * tbl,int size,int * nzero,int flags)1412 pfr_clr_tstats(struct pfr_table *tbl, int size, int *nzero, int flags)
1413 {
1414 	struct pfr_ktableworkq	 workq;
1415 	struct pfr_ktable	*p, key;
1416 	int			 i, xzero = 0;
1417 	time_t			 tzero = time_second;
1418 
1419 	ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_ADDRSTOO);
1420 	SLIST_INIT(&workq);
1421 	for (i = 0; i < size; i++) {
1422 		bcopy(tbl + i, &key.pfrkt_t, sizeof(key.pfrkt_t));
1423 		if (pfr_validate_table(&key.pfrkt_t, 0, 0))
1424 			return (EINVAL);
1425 		p = RB_FIND(pfr_ktablehead, &V_pfr_ktables, &key);
1426 		if (p != NULL) {
1427 			SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1428 			xzero++;
1429 		}
1430 	}
1431 	if (!(flags & PFR_FLAG_DUMMY))
1432 		pfr_clstats_ktables(&workq, tzero, flags & PFR_FLAG_ADDRSTOO);
1433 	if (nzero != NULL)
1434 		*nzero = xzero;
1435 	return (0);
1436 }
1437 
1438 int
pfr_set_tflags(struct pfr_table * tbl,int size,int setflag,int clrflag,int * nchange,int * ndel,int flags)1439 pfr_set_tflags(struct pfr_table *tbl, int size, int setflag, int clrflag,
1440 	int *nchange, int *ndel, int flags)
1441 {
1442 	struct pfr_ktableworkq	 workq;
1443 	struct pfr_ktable	*p, *q, key;
1444 	int			 i, xchange = 0, xdel = 0;
1445 
1446 	ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
1447 	if ((setflag & ~PFR_TFLAG_USRMASK) ||
1448 	    (clrflag & ~PFR_TFLAG_USRMASK) ||
1449 	    (setflag & clrflag))
1450 		return (EINVAL);
1451 	SLIST_INIT(&workq);
1452 	for (i = 0; i < size; i++) {
1453 		bcopy(tbl + i, &key.pfrkt_t, sizeof(key.pfrkt_t));
1454 		if (pfr_validate_table(&key.pfrkt_t, 0,
1455 		    flags & PFR_FLAG_USERIOCTL))
1456 			return (EINVAL);
1457 		p = RB_FIND(pfr_ktablehead, &V_pfr_ktables, &key);
1458 		if (p != NULL && (p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1459 			p->pfrkt_nflags = (p->pfrkt_flags | setflag) &
1460 			    ~clrflag;
1461 			if (p->pfrkt_nflags == p->pfrkt_flags)
1462 				goto _skip;
1463 			SLIST_FOREACH(q, &workq, pfrkt_workq)
1464 				if (!pfr_ktable_compare(p, q))
1465 					goto _skip;
1466 			SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1467 			if ((p->pfrkt_flags & PFR_TFLAG_PERSIST) &&
1468 			    (clrflag & PFR_TFLAG_PERSIST) &&
1469 			    !(p->pfrkt_flags & PFR_TFLAG_REFERENCED))
1470 				xdel++;
1471 			else
1472 				xchange++;
1473 		}
1474 _skip:
1475 	;
1476 	}
1477 	if (!(flags & PFR_FLAG_DUMMY))
1478 		pfr_setflags_ktables(&workq);
1479 	if (nchange != NULL)
1480 		*nchange = xchange;
1481 	if (ndel != NULL)
1482 		*ndel = xdel;
1483 	return (0);
1484 }
1485 
1486 int
pfr_ina_begin(struct pfr_table * trs,u_int32_t * ticket,int * ndel,int flags)1487 pfr_ina_begin(struct pfr_table *trs, u_int32_t *ticket, int *ndel, int flags)
1488 {
1489 	struct pfr_ktableworkq	 workq;
1490 	struct pfr_ktable	*p;
1491 	struct pf_kruleset	*rs;
1492 	int			 xdel = 0;
1493 
1494 	ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
1495 	rs = pf_find_or_create_kruleset(trs->pfrt_anchor);
1496 	if (rs == NULL)
1497 		return (ENOMEM);
1498 	SLIST_INIT(&workq);
1499 	RB_FOREACH(p, pfr_ktablehead, &V_pfr_ktables) {
1500 		if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
1501 		    pfr_skip_table(trs, p, 0))
1502 			continue;
1503 		p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_INACTIVE;
1504 		SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1505 		xdel++;
1506 	}
1507 	if (!(flags & PFR_FLAG_DUMMY)) {
1508 		pfr_setflags_ktables(&workq);
1509 		if (ticket != NULL)
1510 			*ticket = ++rs->tticket;
1511 		rs->topen = 1;
1512 	} else
1513 		pf_remove_if_empty_kruleset(rs);
1514 	if (ndel != NULL)
1515 		*ndel = xdel;
1516 	return (0);
1517 }
1518 
1519 int
pfr_ina_define(struct pfr_table * tbl,struct pfr_addr * addr,int size,int * nadd,int * naddr,u_int32_t ticket,int flags)1520 pfr_ina_define(struct pfr_table *tbl, struct pfr_addr *addr, int size,
1521     int *nadd, int *naddr, u_int32_t ticket, int flags)
1522 {
1523 	struct pfr_ktableworkq	 tableq;
1524 	struct pfr_kentryworkq	 addrq;
1525 	struct pfr_ktable	*kt, *rt, *shadow, key;
1526 	struct pfr_kentry	*p;
1527 	struct pfr_addr		*ad;
1528 	struct pf_kruleset	*rs;
1529 	int			 i, rv, xadd = 0, xaddr = 0;
1530 
1531 	PF_RULES_WASSERT();
1532 
1533 	ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_ADDRSTOO);
1534 	if (size && !(flags & PFR_FLAG_ADDRSTOO))
1535 		return (EINVAL);
1536 	if (pfr_validate_table(tbl, PFR_TFLAG_USRMASK,
1537 	    flags & PFR_FLAG_USERIOCTL))
1538 		return (EINVAL);
1539 	rs = pf_find_kruleset(tbl->pfrt_anchor);
1540 	if (rs == NULL || !rs->topen || ticket != rs->tticket)
1541 		return (EBUSY);
1542 	tbl->pfrt_flags |= PFR_TFLAG_INACTIVE;
1543 	SLIST_INIT(&tableq);
1544 	kt = RB_FIND(pfr_ktablehead, &V_pfr_ktables, (struct pfr_ktable *)tbl);
1545 	if (kt == NULL) {
1546 		kt = pfr_create_ktable(tbl, 0, 1);
1547 		if (kt == NULL)
1548 			return (ENOMEM);
1549 		SLIST_INSERT_HEAD(&tableq, kt, pfrkt_workq);
1550 		xadd++;
1551 		if (!tbl->pfrt_anchor[0])
1552 			goto _skip;
1553 
1554 		/* find or create root table */
1555 		bzero(&key, sizeof(key));
1556 		strlcpy(key.pfrkt_name, tbl->pfrt_name, sizeof(key.pfrkt_name));
1557 		rt = RB_FIND(pfr_ktablehead, &V_pfr_ktables, &key);
1558 		if (rt != NULL) {
1559 			kt->pfrkt_root = rt;
1560 			goto _skip;
1561 		}
1562 		rt = pfr_create_ktable(&key.pfrkt_t, 0, 1);
1563 		if (rt == NULL) {
1564 			pfr_destroy_ktables(&tableq, 0);
1565 			return (ENOMEM);
1566 		}
1567 		SLIST_INSERT_HEAD(&tableq, rt, pfrkt_workq);
1568 		kt->pfrkt_root = rt;
1569 	} else if (!(kt->pfrkt_flags & PFR_TFLAG_INACTIVE))
1570 		xadd++;
1571 _skip:
1572 	shadow = pfr_create_ktable(tbl, 0, 0);
1573 	if (shadow == NULL) {
1574 		pfr_destroy_ktables(&tableq, 0);
1575 		return (ENOMEM);
1576 	}
1577 	SLIST_INIT(&addrq);
1578 	for (i = 0, ad = addr; i < size; i++, ad++) {
1579 		if (pfr_validate_addr(ad))
1580 			senderr(EINVAL);
1581 		if (pfr_lookup_addr(shadow, ad, 1) != NULL)
1582 			continue;
1583 		p = pfr_create_kentry(ad,
1584 		    (shadow->pfrkt_flags & PFR_TFLAG_COUNTERS) != 0);
1585 		if (p == NULL)
1586 			senderr(ENOMEM);
1587 		if (pfr_route_kentry(shadow, p)) {
1588 			pfr_destroy_kentry(p);
1589 			continue;
1590 		}
1591 		SLIST_INSERT_HEAD(&addrq, p, pfrke_workq);
1592 		xaddr++;
1593 	}
1594 	if (!(flags & PFR_FLAG_DUMMY)) {
1595 		if (kt->pfrkt_shadow != NULL)
1596 			pfr_destroy_ktable(kt->pfrkt_shadow, 1);
1597 		kt->pfrkt_flags |= PFR_TFLAG_INACTIVE;
1598 		pfr_insert_ktables(&tableq);
1599 		shadow->pfrkt_cnt = (flags & PFR_FLAG_ADDRSTOO) ?
1600 		    xaddr : NO_ADDRESSES;
1601 		kt->pfrkt_shadow = shadow;
1602 	} else {
1603 		pfr_clean_node_mask(shadow, &addrq);
1604 		pfr_destroy_ktable(shadow, 0);
1605 		pfr_destroy_ktables(&tableq, 0);
1606 		pfr_destroy_kentries(&addrq);
1607 	}
1608 	if (nadd != NULL)
1609 		*nadd = xadd;
1610 	if (naddr != NULL)
1611 		*naddr = xaddr;
1612 	return (0);
1613 _bad:
1614 	pfr_destroy_ktable(shadow, 0);
1615 	pfr_destroy_ktables(&tableq, 0);
1616 	pfr_destroy_kentries(&addrq);
1617 	return (rv);
1618 }
1619 
1620 int
pfr_ina_rollback(struct pfr_table * trs,u_int32_t ticket,int * ndel,int flags)1621 pfr_ina_rollback(struct pfr_table *trs, u_int32_t ticket, int *ndel, int flags)
1622 {
1623 	struct pfr_ktableworkq	 workq;
1624 	struct pfr_ktable	*p;
1625 	struct pf_kruleset	*rs;
1626 	int			 xdel = 0;
1627 
1628 	PF_RULES_WASSERT();
1629 
1630 	ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
1631 	rs = pf_find_kruleset(trs->pfrt_anchor);
1632 	if (rs == NULL || !rs->topen || ticket != rs->tticket)
1633 		return (0);
1634 	SLIST_INIT(&workq);
1635 	RB_FOREACH(p, pfr_ktablehead, &V_pfr_ktables) {
1636 		if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
1637 		    pfr_skip_table(trs, p, 0))
1638 			continue;
1639 		p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_INACTIVE;
1640 		SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1641 		xdel++;
1642 	}
1643 	if (!(flags & PFR_FLAG_DUMMY)) {
1644 		pfr_setflags_ktables(&workq);
1645 		rs->topen = 0;
1646 		pf_remove_if_empty_kruleset(rs);
1647 	}
1648 	if (ndel != NULL)
1649 		*ndel = xdel;
1650 	return (0);
1651 }
1652 
1653 int
pfr_ina_commit(struct pfr_table * trs,u_int32_t ticket,int * nadd,int * nchange,int flags)1654 pfr_ina_commit(struct pfr_table *trs, u_int32_t ticket, int *nadd,
1655     int *nchange, int flags)
1656 {
1657 	struct pfr_ktable	*p, *q;
1658 	struct pfr_ktableworkq	 workq;
1659 	struct pf_kruleset	*rs;
1660 	int			 xadd = 0, xchange = 0;
1661 	time_t			 tzero = time_second;
1662 
1663 	PF_RULES_WASSERT();
1664 
1665 	ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
1666 	rs = pf_find_kruleset(trs->pfrt_anchor);
1667 	if (rs == NULL || !rs->topen || ticket != rs->tticket)
1668 		return (EBUSY);
1669 
1670 	SLIST_INIT(&workq);
1671 	RB_FOREACH(p, pfr_ktablehead, &V_pfr_ktables) {
1672 		if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
1673 		    pfr_skip_table(trs, p, 0))
1674 			continue;
1675 		SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1676 		if (p->pfrkt_flags & PFR_TFLAG_ACTIVE)
1677 			xchange++;
1678 		else
1679 			xadd++;
1680 	}
1681 
1682 	if (!(flags & PFR_FLAG_DUMMY)) {
1683 		for (p = SLIST_FIRST(&workq); p != NULL; p = q) {
1684 			q = SLIST_NEXT(p, pfrkt_workq);
1685 			pfr_commit_ktable(p, tzero);
1686 		}
1687 		rs->topen = 0;
1688 		pf_remove_if_empty_kruleset(rs);
1689 	}
1690 	if (nadd != NULL)
1691 		*nadd = xadd;
1692 	if (nchange != NULL)
1693 		*nchange = xchange;
1694 
1695 	return (0);
1696 }
1697 
1698 static void
pfr_commit_ktable(struct pfr_ktable * kt,time_t tzero)1699 pfr_commit_ktable(struct pfr_ktable *kt, time_t tzero)
1700 {
1701 	counter_u64_t		*pkc, *qkc;
1702 	struct pfr_ktable	*shadow = kt->pfrkt_shadow;
1703 	int			 nflags;
1704 
1705 	PF_RULES_WASSERT();
1706 
1707 	if (shadow->pfrkt_cnt == NO_ADDRESSES) {
1708 		if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
1709 			pfr_clstats_ktable(kt, tzero, 1);
1710 	} else if (kt->pfrkt_flags & PFR_TFLAG_ACTIVE) {
1711 		/* kt might contain addresses */
1712 		struct pfr_kentryworkq	 addrq, addq, changeq, delq, garbageq;
1713 		struct pfr_kentry	*p, *q, *next;
1714 		struct pfr_addr		 ad;
1715 
1716 		pfr_enqueue_addrs(shadow, &addrq, NULL, 0);
1717 		pfr_mark_addrs(kt);
1718 		SLIST_INIT(&addq);
1719 		SLIST_INIT(&changeq);
1720 		SLIST_INIT(&delq);
1721 		SLIST_INIT(&garbageq);
1722 		pfr_clean_node_mask(shadow, &addrq);
1723 		SLIST_FOREACH_SAFE(p, &addrq, pfrke_workq, next) {
1724 			pfr_copyout_addr(&ad, p);
1725 			q = pfr_lookup_addr(kt, &ad, 1);
1726 			if (q != NULL) {
1727 				if (q->pfrke_not != p->pfrke_not)
1728 					SLIST_INSERT_HEAD(&changeq, q,
1729 					    pfrke_workq);
1730 				pkc = &p->pfrke_counters.pfrkc_counters;
1731 				qkc = &q->pfrke_counters.pfrkc_counters;
1732 				if ((*pkc == NULL) != (*qkc == NULL))
1733 					SWAP(counter_u64_t, *pkc, *qkc);
1734 				q->pfrke_mark = 1;
1735 				SLIST_INSERT_HEAD(&garbageq, p, pfrke_workq);
1736 			} else {
1737 				p->pfrke_counters.pfrkc_tzero = tzero;
1738 				SLIST_INSERT_HEAD(&addq, p, pfrke_workq);
1739 			}
1740 		}
1741 		pfr_enqueue_addrs(kt, &delq, NULL, ENQUEUE_UNMARKED_ONLY);
1742 		pfr_insert_kentries(kt, &addq, tzero);
1743 		pfr_remove_kentries(kt, &delq);
1744 		pfr_clstats_kentries(kt, &changeq, tzero, INVERT_NEG_FLAG);
1745 		pfr_destroy_kentries(&garbageq);
1746 	} else {
1747 		/* kt cannot contain addresses */
1748 		SWAP(struct radix_node_head *, kt->pfrkt_ip4,
1749 		    shadow->pfrkt_ip4);
1750 		SWAP(struct radix_node_head *, kt->pfrkt_ip6,
1751 		    shadow->pfrkt_ip6);
1752 		SWAP(int, kt->pfrkt_cnt, shadow->pfrkt_cnt);
1753 		pfr_clstats_ktable(kt, tzero, 1);
1754 	}
1755 	nflags = ((shadow->pfrkt_flags & PFR_TFLAG_USRMASK) |
1756 	    (kt->pfrkt_flags & PFR_TFLAG_SETMASK) | PFR_TFLAG_ACTIVE)
1757 		& ~PFR_TFLAG_INACTIVE;
1758 	pfr_destroy_ktable(shadow, 0);
1759 	kt->pfrkt_shadow = NULL;
1760 	pfr_setflags_ktable(kt, nflags);
1761 }
1762 
1763 static int
pfr_validate_table(struct pfr_table * tbl,int allowedflags,int no_reserved)1764 pfr_validate_table(struct pfr_table *tbl, int allowedflags, int no_reserved)
1765 {
1766 	int i;
1767 
1768 	if (!tbl->pfrt_name[0])
1769 		return (-1);
1770 	if (no_reserved && !strcmp(tbl->pfrt_anchor, PF_RESERVED_ANCHOR))
1771 		 return (-1);
1772 	if (tbl->pfrt_name[PF_TABLE_NAME_SIZE-1])
1773 		return (-1);
1774 	for (i = strlen(tbl->pfrt_name); i < PF_TABLE_NAME_SIZE; i++)
1775 		if (tbl->pfrt_name[i])
1776 			return (-1);
1777 	if (pfr_fix_anchor(tbl->pfrt_anchor))
1778 		return (-1);
1779 	if (tbl->pfrt_flags & ~allowedflags)
1780 		return (-1);
1781 	return (0);
1782 }
1783 
1784 /*
1785  * Rewrite anchors referenced by tables to remove slashes
1786  * and check for validity.
1787  */
1788 static int
pfr_fix_anchor(char * anchor)1789 pfr_fix_anchor(char *anchor)
1790 {
1791 	size_t siz = MAXPATHLEN;
1792 	int i;
1793 
1794 	if (anchor[0] == '/') {
1795 		char *path;
1796 		int off;
1797 
1798 		path = anchor;
1799 		off = 1;
1800 		while (*++path == '/')
1801 			off++;
1802 		bcopy(path, anchor, siz - off);
1803 		memset(anchor + siz - off, 0, off);
1804 	}
1805 	if (anchor[siz - 1])
1806 		return (-1);
1807 	for (i = strlen(anchor); i < siz; i++)
1808 		if (anchor[i])
1809 			return (-1);
1810 	return (0);
1811 }
1812 
1813 int
pfr_table_count(struct pfr_table * filter,int flags)1814 pfr_table_count(struct pfr_table *filter, int flags)
1815 {
1816 	struct pf_kruleset *rs;
1817 
1818 	PF_RULES_ASSERT();
1819 
1820 	if (flags & PFR_FLAG_ALLRSETS)
1821 		return (V_pfr_ktable_cnt);
1822 	if (filter->pfrt_anchor[0]) {
1823 		rs = pf_find_kruleset(filter->pfrt_anchor);
1824 		return ((rs != NULL) ? rs->tables : -1);
1825 	}
1826 	return (pf_main_ruleset.tables);
1827 }
1828 
1829 static int
pfr_skip_table(struct pfr_table * filter,struct pfr_ktable * kt,int flags)1830 pfr_skip_table(struct pfr_table *filter, struct pfr_ktable *kt, int flags)
1831 {
1832 	if (flags & PFR_FLAG_ALLRSETS)
1833 		return (0);
1834 	if (strcmp(filter->pfrt_anchor, kt->pfrkt_anchor))
1835 		return (1);
1836 	return (0);
1837 }
1838 
1839 static void
pfr_insert_ktables(struct pfr_ktableworkq * workq)1840 pfr_insert_ktables(struct pfr_ktableworkq *workq)
1841 {
1842 	struct pfr_ktable	*p;
1843 
1844 	SLIST_FOREACH(p, workq, pfrkt_workq)
1845 		pfr_insert_ktable(p);
1846 }
1847 
1848 static void
pfr_insert_ktable(struct pfr_ktable * kt)1849 pfr_insert_ktable(struct pfr_ktable *kt)
1850 {
1851 
1852 	PF_RULES_WASSERT();
1853 
1854 	RB_INSERT(pfr_ktablehead, &V_pfr_ktables, kt);
1855 	V_pfr_ktable_cnt++;
1856 	if (kt->pfrkt_root != NULL)
1857 		if (!kt->pfrkt_root->pfrkt_refcnt[PFR_REFCNT_ANCHOR]++)
1858 			pfr_setflags_ktable(kt->pfrkt_root,
1859 			    kt->pfrkt_root->pfrkt_flags|PFR_TFLAG_REFDANCHOR);
1860 }
1861 
1862 static void
pfr_setflags_ktables(struct pfr_ktableworkq * workq)1863 pfr_setflags_ktables(struct pfr_ktableworkq *workq)
1864 {
1865 	struct pfr_ktable	*p, *q;
1866 
1867 	for (p = SLIST_FIRST(workq); p; p = q) {
1868 		q = SLIST_NEXT(p, pfrkt_workq);
1869 		pfr_setflags_ktable(p, p->pfrkt_nflags);
1870 	}
1871 }
1872 
1873 static void
pfr_setflags_ktable(struct pfr_ktable * kt,int newf)1874 pfr_setflags_ktable(struct pfr_ktable *kt, int newf)
1875 {
1876 	struct pfr_kentryworkq	addrq;
1877 	struct pfr_walktree	w;
1878 
1879 	PF_RULES_WASSERT();
1880 
1881 	if (!(newf & PFR_TFLAG_REFERENCED) &&
1882 	    !(newf & PFR_TFLAG_REFDANCHOR) &&
1883 	    !(newf & PFR_TFLAG_PERSIST))
1884 		newf &= ~PFR_TFLAG_ACTIVE;
1885 	if (!(newf & PFR_TFLAG_ACTIVE))
1886 		newf &= ~PFR_TFLAG_USRMASK;
1887 	if (!(newf & PFR_TFLAG_SETMASK)) {
1888 		RB_REMOVE(pfr_ktablehead, &V_pfr_ktables, kt);
1889 		if (kt->pfrkt_root != NULL)
1890 			if (!--kt->pfrkt_root->pfrkt_refcnt[PFR_REFCNT_ANCHOR])
1891 				pfr_setflags_ktable(kt->pfrkt_root,
1892 				    kt->pfrkt_root->pfrkt_flags &
1893 					~PFR_TFLAG_REFDANCHOR);
1894 		pfr_destroy_ktable(kt, 1);
1895 		V_pfr_ktable_cnt--;
1896 		return;
1897 	}
1898 	if (newf & PFR_TFLAG_COUNTERS && ! (kt->pfrkt_flags & PFR_TFLAG_COUNTERS)) {
1899 		bzero(&w, sizeof(w));
1900 		w.pfrw_op = PFRW_COUNTERS;
1901 		w.pfrw_flags |= PFR_TFLAG_COUNTERS;
1902 		kt->pfrkt_ip4->rnh_walktree(&kt->pfrkt_ip4->rh, pfr_walktree, &w);
1903 		kt->pfrkt_ip6->rnh_walktree(&kt->pfrkt_ip6->rh, pfr_walktree, &w);
1904 	}
1905 	if (! (newf & PFR_TFLAG_COUNTERS) && (kt->pfrkt_flags & PFR_TFLAG_COUNTERS)) {
1906 		bzero(&w, sizeof(w));
1907 		w.pfrw_op = PFRW_COUNTERS;
1908 		w.pfrw_flags |= 0;
1909 		kt->pfrkt_ip4->rnh_walktree(&kt->pfrkt_ip4->rh, pfr_walktree, &w);
1910 		kt->pfrkt_ip6->rnh_walktree(&kt->pfrkt_ip6->rh, pfr_walktree, &w);
1911 	}
1912 	if (!(newf & PFR_TFLAG_ACTIVE) && kt->pfrkt_cnt) {
1913 		pfr_enqueue_addrs(kt, &addrq, NULL, 0);
1914 		pfr_remove_kentries(kt, &addrq);
1915 	}
1916 	if (!(newf & PFR_TFLAG_INACTIVE) && kt->pfrkt_shadow != NULL) {
1917 		pfr_destroy_ktable(kt->pfrkt_shadow, 1);
1918 		kt->pfrkt_shadow = NULL;
1919 	}
1920 	kt->pfrkt_flags = newf;
1921 }
1922 
1923 static void
pfr_clstats_ktables(struct pfr_ktableworkq * workq,time_t tzero,int recurse)1924 pfr_clstats_ktables(struct pfr_ktableworkq *workq, time_t tzero, int recurse)
1925 {
1926 	struct pfr_ktable	*p;
1927 
1928 	SLIST_FOREACH(p, workq, pfrkt_workq)
1929 		pfr_clstats_ktable(p, tzero, recurse);
1930 }
1931 
1932 static void
pfr_clstats_ktable(struct pfr_ktable * kt,time_t tzero,int recurse)1933 pfr_clstats_ktable(struct pfr_ktable *kt, time_t tzero, int recurse)
1934 {
1935 	struct pfr_kentryworkq	 addrq;
1936 	int			 pfr_dir, pfr_op;
1937 
1938 	MPASS(PF_TABLE_STATS_OWNED() || PF_RULES_WOWNED());
1939 
1940 	if (recurse) {
1941 		pfr_enqueue_addrs(kt, &addrq, NULL, 0);
1942 		pfr_clstats_kentries(kt, &addrq, tzero, 0);
1943 	}
1944 	for (pfr_dir = 0; pfr_dir < PFR_DIR_MAX; pfr_dir ++) {
1945 		for (pfr_op = 0; pfr_op < PFR_OP_TABLE_MAX; pfr_op ++) {
1946 			pfr_kstate_counter_zero(&kt->pfrkt_packets[pfr_dir][pfr_op]);
1947 			pfr_kstate_counter_zero(&kt->pfrkt_bytes[pfr_dir][pfr_op]);
1948 		}
1949 	}
1950 	pfr_kstate_counter_zero(&kt->pfrkt_match);
1951 	pfr_kstate_counter_zero(&kt->pfrkt_nomatch);
1952 	kt->pfrkt_tzero = tzero;
1953 }
1954 
1955 static struct pfr_ktable *
pfr_create_ktable(struct pfr_table * tbl,time_t tzero,int attachruleset)1956 pfr_create_ktable(struct pfr_table *tbl, time_t tzero, int attachruleset)
1957 {
1958 	struct pfr_ktable	*kt;
1959 	struct pf_kruleset	*rs;
1960 	int			 pfr_dir, pfr_op;
1961 
1962 	PF_RULES_WASSERT();
1963 
1964 	kt = malloc(sizeof(*kt), M_PFTABLE, M_NOWAIT|M_ZERO);
1965 	if (kt == NULL)
1966 		return (NULL);
1967 	kt->pfrkt_t = *tbl;
1968 
1969 	if (attachruleset) {
1970 		rs = pf_find_or_create_kruleset(tbl->pfrt_anchor);
1971 		if (!rs) {
1972 			pfr_destroy_ktable(kt, 0);
1973 			return (NULL);
1974 		}
1975 		kt->pfrkt_rs = rs;
1976 		rs->tables++;
1977 	}
1978 
1979 	for (pfr_dir = 0; pfr_dir < PFR_DIR_MAX; pfr_dir ++) {
1980 		for (pfr_op = 0; pfr_op < PFR_OP_TABLE_MAX; pfr_op ++) {
1981 			if (pfr_kstate_counter_init(
1982 			    &kt->pfrkt_packets[pfr_dir][pfr_op], M_NOWAIT) != 0) {
1983 				pfr_destroy_ktable(kt, 0);
1984 				return (NULL);
1985 			}
1986 			if (pfr_kstate_counter_init(
1987 			    &kt->pfrkt_bytes[pfr_dir][pfr_op], M_NOWAIT) != 0) {
1988 				pfr_destroy_ktable(kt, 0);
1989 				return (NULL);
1990 			}
1991 		}
1992 	}
1993 	if (pfr_kstate_counter_init(&kt->pfrkt_match, M_NOWAIT) != 0) {
1994 		pfr_destroy_ktable(kt, 0);
1995 		return (NULL);
1996 	}
1997 
1998 	if (pfr_kstate_counter_init(&kt->pfrkt_nomatch, M_NOWAIT) != 0) {
1999 		pfr_destroy_ktable(kt, 0);
2000 		return (NULL);
2001 	}
2002 
2003 	if (!rn_inithead((void **)&kt->pfrkt_ip4,
2004 	    offsetof(struct sockaddr_in, sin_addr) * 8) ||
2005 	    !rn_inithead((void **)&kt->pfrkt_ip6,
2006 	    offsetof(struct sockaddr_in6, sin6_addr) * 8)) {
2007 		pfr_destroy_ktable(kt, 0);
2008 		return (NULL);
2009 	}
2010 	kt->pfrkt_tzero = tzero;
2011 
2012 	return (kt);
2013 }
2014 
2015 static void
pfr_destroy_ktables(struct pfr_ktableworkq * workq,int flushaddr)2016 pfr_destroy_ktables(struct pfr_ktableworkq *workq, int flushaddr)
2017 {
2018 	struct pfr_ktable	*p, *q;
2019 
2020 	for (p = SLIST_FIRST(workq); p; p = q) {
2021 		q = SLIST_NEXT(p, pfrkt_workq);
2022 		pfr_destroy_ktable(p, flushaddr);
2023 	}
2024 }
2025 
2026 static void
pfr_destroy_ktable(struct pfr_ktable * kt,int flushaddr)2027 pfr_destroy_ktable(struct pfr_ktable *kt, int flushaddr)
2028 {
2029 	struct pfr_kentryworkq	 addrq;
2030 	int			 pfr_dir, pfr_op;
2031 
2032 	if (flushaddr) {
2033 		pfr_enqueue_addrs(kt, &addrq, NULL, 0);
2034 		pfr_clean_node_mask(kt, &addrq);
2035 		pfr_destroy_kentries(&addrq);
2036 	}
2037 	if (kt->pfrkt_ip4 != NULL)
2038 		rn_detachhead((void **)&kt->pfrkt_ip4);
2039 	if (kt->pfrkt_ip6 != NULL)
2040 		rn_detachhead((void **)&kt->pfrkt_ip6);
2041 	if (kt->pfrkt_shadow != NULL)
2042 		pfr_destroy_ktable(kt->pfrkt_shadow, flushaddr);
2043 	if (kt->pfrkt_rs != NULL) {
2044 		kt->pfrkt_rs->tables--;
2045 		pf_remove_if_empty_kruleset(kt->pfrkt_rs);
2046 	}
2047 	for (pfr_dir = 0; pfr_dir < PFR_DIR_MAX; pfr_dir ++) {
2048 		for (pfr_op = 0; pfr_op < PFR_OP_TABLE_MAX; pfr_op ++) {
2049 			pfr_kstate_counter_deinit(&kt->pfrkt_packets[pfr_dir][pfr_op]);
2050 			pfr_kstate_counter_deinit(&kt->pfrkt_bytes[pfr_dir][pfr_op]);
2051 		}
2052 	}
2053 	pfr_kstate_counter_deinit(&kt->pfrkt_match);
2054 	pfr_kstate_counter_deinit(&kt->pfrkt_nomatch);
2055 
2056 	free(kt, M_PFTABLE);
2057 }
2058 
2059 static int
pfr_ktable_compare(struct pfr_ktable * p,struct pfr_ktable * q)2060 pfr_ktable_compare(struct pfr_ktable *p, struct pfr_ktable *q)
2061 {
2062 	int d;
2063 
2064 	if ((d = strncmp(p->pfrkt_name, q->pfrkt_name, PF_TABLE_NAME_SIZE)))
2065 		return (d);
2066 	return (strcmp(p->pfrkt_anchor, q->pfrkt_anchor));
2067 }
2068 
2069 static struct pfr_ktable *
pfr_lookup_table(struct pfr_table * tbl)2070 pfr_lookup_table(struct pfr_table *tbl)
2071 {
2072 	/* struct pfr_ktable start like a struct pfr_table */
2073 	return (RB_FIND(pfr_ktablehead, &V_pfr_ktables,
2074 	    (struct pfr_ktable *)tbl));
2075 }
2076 
2077 int
pfr_match_addr(struct pfr_ktable * kt,struct pf_addr * a,sa_family_t af)2078 pfr_match_addr(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af)
2079 {
2080 	struct pfr_kentry	*ke = NULL;
2081 	int			 match;
2082 
2083 	PF_RULES_RASSERT();
2084 
2085 	if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
2086 		kt = kt->pfrkt_root;
2087 	if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
2088 		return (0);
2089 
2090 	switch (af) {
2091 #ifdef INET
2092 	case AF_INET:
2093 	    {
2094 		struct sockaddr_in sin;
2095 
2096 		bzero(&sin, sizeof(sin));
2097 		sin.sin_len = sizeof(sin);
2098 		sin.sin_family = AF_INET;
2099 		sin.sin_addr.s_addr = a->addr32[0];
2100 		ke = (struct pfr_kentry *)rn_match(&sin, &kt->pfrkt_ip4->rh);
2101 		if (ke && KENTRY_RNF_ROOT(ke))
2102 			ke = NULL;
2103 		break;
2104 	    }
2105 #endif /* INET */
2106 #ifdef INET6
2107 	case AF_INET6:
2108 	    {
2109 		struct sockaddr_in6 sin6;
2110 
2111 		bzero(&sin6, sizeof(sin6));
2112 		sin6.sin6_len = sizeof(sin6);
2113 		sin6.sin6_family = AF_INET6;
2114 		bcopy(a, &sin6.sin6_addr, sizeof(sin6.sin6_addr));
2115 		ke = (struct pfr_kentry *)rn_match(&sin6, &kt->pfrkt_ip6->rh);
2116 		if (ke && KENTRY_RNF_ROOT(ke))
2117 			ke = NULL;
2118 		break;
2119 	    }
2120 #endif /* INET6 */
2121 	default:
2122 		unhandled_af(af);
2123 	}
2124 	match = (ke && !ke->pfrke_not);
2125 	if (match)
2126 		pfr_kstate_counter_add(&kt->pfrkt_match, 1);
2127 	else
2128 		pfr_kstate_counter_add(&kt->pfrkt_nomatch, 1);
2129 	return (match);
2130 }
2131 
2132 void
pfr_update_stats(struct pfr_ktable * kt,struct pf_addr * a,sa_family_t af,u_int64_t len,int dir_out,int op_pass,int notrule)2133 pfr_update_stats(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af,
2134     u_int64_t len, int dir_out, int op_pass, int notrule)
2135 {
2136 	struct pfr_kentry	*ke = NULL;
2137 
2138 	if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
2139 		kt = kt->pfrkt_root;
2140 	if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
2141 		return;
2142 
2143 	switch (af) {
2144 #ifdef INET
2145 	case AF_INET:
2146 	    {
2147 		struct sockaddr_in sin;
2148 
2149 		bzero(&sin, sizeof(sin));
2150 		sin.sin_len = sizeof(sin);
2151 		sin.sin_family = AF_INET;
2152 		sin.sin_addr.s_addr = a->addr32[0];
2153 		ke = (struct pfr_kentry *)rn_match(&sin, &kt->pfrkt_ip4->rh);
2154 		if (ke && KENTRY_RNF_ROOT(ke))
2155 			ke = NULL;
2156 		break;
2157 	    }
2158 #endif /* INET */
2159 #ifdef INET6
2160 	case AF_INET6:
2161 	    {
2162 		struct sockaddr_in6 sin6;
2163 
2164 		bzero(&sin6, sizeof(sin6));
2165 		sin6.sin6_len = sizeof(sin6);
2166 		sin6.sin6_family = AF_INET6;
2167 		bcopy(a, &sin6.sin6_addr, sizeof(sin6.sin6_addr));
2168 		ke = (struct pfr_kentry *)rn_match(&sin6, &kt->pfrkt_ip6->rh);
2169 		if (ke && KENTRY_RNF_ROOT(ke))
2170 			ke = NULL;
2171 		break;
2172 	    }
2173 #endif /* INET6 */
2174 	default:
2175 		unhandled_af(af);
2176 	}
2177 	if ((ke == NULL || ke->pfrke_not) != notrule) {
2178 		if (op_pass != PFR_OP_PASS)
2179 			DPFPRINTF(PF_DEBUG_URGENT,
2180 			    ("pfr_update_stats: assertion failed.\n"));
2181 		op_pass = PFR_OP_XPASS;
2182 	}
2183 	pfr_kstate_counter_add(&kt->pfrkt_packets[dir_out][op_pass], 1);
2184 	pfr_kstate_counter_add(&kt->pfrkt_bytes[dir_out][op_pass], len);
2185 	if (ke != NULL && op_pass != PFR_OP_XPASS &&
2186 	    (kt->pfrkt_flags & PFR_TFLAG_COUNTERS)) {
2187 		counter_u64_add(pfr_kentry_counter(&ke->pfrke_counters,
2188 		    dir_out, op_pass, PFR_TYPE_PACKETS), 1);
2189 		counter_u64_add(pfr_kentry_counter(&ke->pfrke_counters,
2190 		    dir_out, op_pass, PFR_TYPE_BYTES), len);
2191 	}
2192 }
2193 
2194 struct pfr_ktable *
pfr_eth_attach_table(struct pf_keth_ruleset * rs,char * name)2195 pfr_eth_attach_table(struct pf_keth_ruleset *rs, char *name)
2196 {
2197 	struct pfr_ktable	*kt, *rt;
2198 	struct pfr_table	 tbl;
2199 	struct pf_keth_anchor	*ac = rs->anchor;
2200 
2201 	PF_RULES_WASSERT();
2202 
2203 	bzero(&tbl, sizeof(tbl));
2204 	strlcpy(tbl.pfrt_name, name, sizeof(tbl.pfrt_name));
2205 	if (ac != NULL)
2206 		strlcpy(tbl.pfrt_anchor, ac->path, sizeof(tbl.pfrt_anchor));
2207 	kt = pfr_lookup_table(&tbl);
2208 	if (kt == NULL) {
2209 		kt = pfr_create_ktable(&tbl, time_second, 1);
2210 		if (kt == NULL)
2211 			return (NULL);
2212 		if (ac != NULL) {
2213 			bzero(tbl.pfrt_anchor, sizeof(tbl.pfrt_anchor));
2214 			rt = pfr_lookup_table(&tbl);
2215 			if (rt == NULL) {
2216 				rt = pfr_create_ktable(&tbl, 0, 1);
2217 				if (rt == NULL) {
2218 					pfr_destroy_ktable(kt, 0);
2219 					return (NULL);
2220 				}
2221 				pfr_insert_ktable(rt);
2222 			}
2223 			kt->pfrkt_root = rt;
2224 		}
2225 		pfr_insert_ktable(kt);
2226 	}
2227 	if (!kt->pfrkt_refcnt[PFR_REFCNT_RULE]++)
2228 		pfr_setflags_ktable(kt, kt->pfrkt_flags|PFR_TFLAG_REFERENCED);
2229 	return (kt);
2230 }
2231 
2232 struct pfr_ktable *
pfr_attach_table(struct pf_kruleset * rs,char * name)2233 pfr_attach_table(struct pf_kruleset *rs, char *name)
2234 {
2235 	struct pfr_ktable	*kt, *rt;
2236 	struct pfr_table	 tbl;
2237 	struct pf_kanchor	*ac = rs->anchor;
2238 
2239 	PF_RULES_WASSERT();
2240 
2241 	bzero(&tbl, sizeof(tbl));
2242 	strlcpy(tbl.pfrt_name, name, sizeof(tbl.pfrt_name));
2243 	if (ac != NULL)
2244 		strlcpy(tbl.pfrt_anchor, ac->path, sizeof(tbl.pfrt_anchor));
2245 	kt = pfr_lookup_table(&tbl);
2246 	if (kt == NULL) {
2247 		kt = pfr_create_ktable(&tbl, time_second, 1);
2248 		if (kt == NULL)
2249 			return (NULL);
2250 		if (ac != NULL) {
2251 			bzero(tbl.pfrt_anchor, sizeof(tbl.pfrt_anchor));
2252 			rt = pfr_lookup_table(&tbl);
2253 			if (rt == NULL) {
2254 				rt = pfr_create_ktable(&tbl, 0, 1);
2255 				if (rt == NULL) {
2256 					pfr_destroy_ktable(kt, 0);
2257 					return (NULL);
2258 				}
2259 				pfr_insert_ktable(rt);
2260 			}
2261 			kt->pfrkt_root = rt;
2262 		}
2263 		pfr_insert_ktable(kt);
2264 	}
2265 	if (!kt->pfrkt_refcnt[PFR_REFCNT_RULE]++)
2266 		pfr_setflags_ktable(kt, kt->pfrkt_flags|PFR_TFLAG_REFERENCED);
2267 	return (kt);
2268 }
2269 
2270 void
pfr_detach_table(struct pfr_ktable * kt)2271 pfr_detach_table(struct pfr_ktable *kt)
2272 {
2273 
2274 	PF_RULES_WASSERT();
2275 	KASSERT(kt->pfrkt_refcnt[PFR_REFCNT_RULE] > 0, ("%s: refcount %d\n",
2276 	    __func__, kt->pfrkt_refcnt[PFR_REFCNT_RULE]));
2277 
2278 	if (!--kt->pfrkt_refcnt[PFR_REFCNT_RULE])
2279 		pfr_setflags_ktable(kt, kt->pfrkt_flags&~PFR_TFLAG_REFERENCED);
2280 }
2281 
2282 int
pfr_pool_get(struct pfr_ktable * kt,int * pidx,struct pf_addr * counter,sa_family_t af,pf_addr_filter_func_t filter)2283 pfr_pool_get(struct pfr_ktable *kt, int *pidx, struct pf_addr *counter,
2284     sa_family_t af, pf_addr_filter_func_t filter)
2285 {
2286 	struct pf_addr		*addr, cur, mask, umask_addr;
2287 	union sockaddr_union	 uaddr, umask;
2288 	struct pfr_kentry	*ke, *ke2 = NULL;
2289 	int			 startidx, idx = -1, loop = 0, use_counter = 0;
2290 
2291 	MPASS(pidx != NULL);
2292 	MPASS(counter != NULL);
2293 
2294 	switch (af) {
2295 	case AF_INET:
2296 		uaddr.sin.sin_len = sizeof(struct sockaddr_in);
2297 		uaddr.sin.sin_family = AF_INET;
2298 		addr = (struct pf_addr *)&uaddr.sin.sin_addr;
2299 		break;
2300 	case AF_INET6:
2301 		uaddr.sin6.sin6_len = sizeof(struct sockaddr_in6);
2302 		uaddr.sin6.sin6_family = AF_INET6;
2303 		addr = (struct pf_addr *)&uaddr.sin6.sin6_addr;
2304 		break;
2305 	default:
2306 		unhandled_af(af);
2307 	}
2308 
2309 	if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
2310 		kt = kt->pfrkt_root;
2311 	if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
2312 		return (-1);
2313 
2314 	idx = *pidx;
2315 	if (idx < 0 || idx >= kt->pfrkt_cnt)
2316 		idx = 0;
2317 	else if (counter != NULL)
2318 		use_counter = 1;
2319 	startidx = idx;
2320 
2321 _next_block:
2322 	if (loop && startidx == idx) {
2323 		pfr_kstate_counter_add(&kt->pfrkt_nomatch, 1);
2324 		return (1);
2325 	}
2326 
2327 	ke = pfr_kentry_byidx(kt, idx, af);
2328 	if (ke == NULL) {
2329 		/* we don't have this idx, try looping */
2330 		if (loop || (ke = pfr_kentry_byidx(kt, 0, af)) == NULL) {
2331 			pfr_kstate_counter_add(&kt->pfrkt_nomatch, 1);
2332 			return (1);
2333 		}
2334 		idx = 0;
2335 		loop++;
2336 	}
2337 	pfr_prepare_network(&umask, af, ke->pfrke_net);
2338 	pfr_sockaddr_to_pf_addr(&ke->pfrke_sa, &cur);
2339 	pfr_sockaddr_to_pf_addr(&umask, &mask);
2340 
2341 	if (use_counter && !PF_AZERO(counter, af)) {
2342 		/* is supplied address within block? */
2343 		if (!PF_MATCHA(0, &cur, &mask, counter, af)) {
2344 			/* no, go to next block in table */
2345 			idx++;
2346 			use_counter = 0;
2347 			goto _next_block;
2348 		}
2349 		PF_ACPY(addr, counter, af);
2350 	} else {
2351 		/* use first address of block */
2352 		PF_ACPY(addr, &cur, af);
2353 	}
2354 
2355 	if (!KENTRY_NETWORK(ke)) {
2356 		/* this is a single IP address - no possible nested block */
2357 		if (filter && filter(af, addr)) {
2358 			idx++;
2359 			goto _next_block;
2360 		}
2361 		PF_ACPY(counter, addr, af);
2362 		*pidx = idx;
2363 		pfr_kstate_counter_add(&kt->pfrkt_match, 1);
2364 		return (0);
2365 	}
2366 	for (;;) {
2367 		/* we don't want to use a nested block */
2368 		switch (af) {
2369 		case AF_INET:
2370 			ke2 = (struct pfr_kentry *)rn_match(&uaddr,
2371 			    &kt->pfrkt_ip4->rh);
2372 			break;
2373 		case AF_INET6:
2374 			ke2 = (struct pfr_kentry *)rn_match(&uaddr,
2375 			    &kt->pfrkt_ip6->rh);
2376 			break;
2377 		default:
2378 			unhandled_af(af);
2379 		}
2380 		/* no need to check KENTRY_RNF_ROOT() here */
2381 		if (ke2 == ke) {
2382 			/* lookup return the same block - perfect */
2383 			if (filter && filter(af, addr))
2384 				goto _next_entry;
2385 			PF_ACPY(counter, addr, af);
2386 			*pidx = idx;
2387 			pfr_kstate_counter_add(&kt->pfrkt_match, 1);
2388 			return (0);
2389 		}
2390 
2391 _next_entry:
2392 		/* we need to increase the counter past the nested block */
2393 		pfr_prepare_network(&umask, AF_INET, ke2->pfrke_net);
2394 		pfr_sockaddr_to_pf_addr(&umask, &umask_addr);
2395 		PF_POOLMASK(addr, addr, &umask_addr, &pfr_ffaddr, af);
2396 		PF_AINC(addr, af);
2397 		if (!PF_MATCHA(0, &cur, &mask, addr, af)) {
2398 			/* ok, we reached the end of our main block */
2399 			/* go to next block in table */
2400 			idx++;
2401 			use_counter = 0;
2402 			goto _next_block;
2403 		}
2404 	}
2405 }
2406 
2407 static struct pfr_kentry *
pfr_kentry_byidx(struct pfr_ktable * kt,int idx,int af)2408 pfr_kentry_byidx(struct pfr_ktable *kt, int idx, int af)
2409 {
2410 	struct pfr_walktree	w;
2411 
2412 	bzero(&w, sizeof(w));
2413 	w.pfrw_op = PFRW_POOL_GET;
2414 	w.pfrw_free = idx;
2415 
2416 	switch (af) {
2417 #ifdef INET
2418 	case AF_INET:
2419 		kt->pfrkt_ip4->rnh_walktree(&kt->pfrkt_ip4->rh, pfr_walktree, &w);
2420 		return (w.pfrw_kentry);
2421 #endif /* INET */
2422 #ifdef INET6
2423 	case AF_INET6:
2424 		kt->pfrkt_ip6->rnh_walktree(&kt->pfrkt_ip6->rh, pfr_walktree, &w);
2425 		return (w.pfrw_kentry);
2426 #endif /* INET6 */
2427 	default:
2428 		return (NULL);
2429 	}
2430 }
2431 
2432 void
pfr_dynaddr_update(struct pfr_ktable * kt,struct pfi_dynaddr * dyn)2433 pfr_dynaddr_update(struct pfr_ktable *kt, struct pfi_dynaddr *dyn)
2434 {
2435 	struct pfr_walktree	w;
2436 
2437 	bzero(&w, sizeof(w));
2438 	w.pfrw_op = PFRW_DYNADDR_UPDATE;
2439 	w.pfrw_dyn = dyn;
2440 
2441 	dyn->pfid_acnt4 = 0;
2442 	dyn->pfid_acnt6 = 0;
2443 	switch (dyn->pfid_af) {
2444 	case AF_UNSPEC: /* look up all both addresses IPv4 + IPv6 */
2445 		kt->pfrkt_ip4->rnh_walktree(&kt->pfrkt_ip4->rh, pfr_walktree, &w);
2446 		kt->pfrkt_ip6->rnh_walktree(&kt->pfrkt_ip6->rh, pfr_walktree, &w);
2447 		break;
2448 	case AF_INET:
2449 		kt->pfrkt_ip4->rnh_walktree(&kt->pfrkt_ip4->rh, pfr_walktree, &w);
2450 		break;
2451 	case AF_INET6:
2452 		kt->pfrkt_ip6->rnh_walktree(&kt->pfrkt_ip6->rh, pfr_walktree, &w);
2453 		break;
2454 	default:
2455 		unhandled_af(dyn->pfid_af);
2456 	}
2457 }
2458