1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2002 Cedric Berger
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * - Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * - Redistributions in binary form must reproduce the above
14 * copyright notice, this list of conditions and the following
15 * disclaimer in the documentation and/or other materials provided
16 * with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
21 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
22 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
24 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
25 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
28 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 *
31 * $OpenBSD: pf_table.c,v 1.79 2008/10/08 06:24:50 mcbride Exp $
32 */
33
34 #include <sys/cdefs.h>
35 #include "opt_inet.h"
36 #include "opt_inet6.h"
37
38 #include <sys/param.h>
39 #include <sys/kernel.h>
40 #include <sys/lock.h>
41 #include <sys/malloc.h>
42 #include <sys/mbuf.h>
43 #include <sys/mutex.h>
44 #include <sys/refcount.h>
45 #include <sys/socket.h>
46 #include <vm/uma.h>
47
48 #include <net/if.h>
49 #include <net/vnet.h>
50 #include <net/pfvar.h>
51
52 #define DPFPRINTF(n, x) if (V_pf_status.debug >= (n)) printf x
53
54 #define ACCEPT_FLAGS(flags, oklist) \
55 do { \
56 if ((flags & ~(oklist)) & \
57 PFR_FLAG_ALLMASK) \
58 return (EINVAL); \
59 } while (0)
60
61 #define FILLIN_SIN(sin, addr) \
62 do { \
63 (sin).sin_len = sizeof(sin); \
64 (sin).sin_family = AF_INET; \
65 (sin).sin_addr = (addr); \
66 } while (0)
67
68 #define FILLIN_SIN6(sin6, addr) \
69 do { \
70 (sin6).sin6_len = sizeof(sin6); \
71 (sin6).sin6_family = AF_INET6; \
72 (sin6).sin6_addr = (addr); \
73 } while (0)
74
75 #define SWAP(type, a1, a2) \
76 do { \
77 type tmp = a1; \
78 a1 = a2; \
79 a2 = tmp; \
80 } while (0)
81
82 #define AF_BITS(af) (((af)==AF_INET)?32:128)
83 #define ADDR_NETWORK(ad) ((ad)->pfra_net < AF_BITS((ad)->pfra_af))
84 #define KENTRY_NETWORK(ke) ((ke)->pfrke_net < AF_BITS((ke)->pfrke_af))
85 #define KENTRY_RNF_ROOT(ke) \
86 ((((struct radix_node *)(ke))->rn_flags & RNF_ROOT) != 0)
87
88 #define NO_ADDRESSES (-1)
89 #define ENQUEUE_UNMARKED_ONLY (1)
90 #define INVERT_NEG_FLAG (1)
91
92 struct pfr_walktree {
93 enum pfrw_op {
94 PFRW_MARK,
95 PFRW_SWEEP,
96 PFRW_ENQUEUE,
97 PFRW_GET_ADDRS,
98 PFRW_GET_ASTATS,
99 PFRW_POOL_GET,
100 PFRW_DYNADDR_UPDATE,
101 PFRW_COUNTERS
102 } pfrw_op;
103 union {
104 struct pfr_addr *pfrw_addr;
105 struct pfr_astats *pfrw_astats;
106 struct pfr_kentryworkq *pfrw_workq;
107 struct pfr_kentry *pfrw_kentry;
108 struct pfi_dynaddr *pfrw_dyn;
109 };
110 int pfrw_free;
111 int pfrw_flags;
112 };
113
114 #define senderr(e) do { rv = (e); goto _bad; } while (0)
115
116 static MALLOC_DEFINE(M_PFTABLE, "pf_table", "pf(4) tables structures");
117 VNET_DEFINE_STATIC(uma_zone_t, pfr_kentry_z);
118 #define V_pfr_kentry_z VNET(pfr_kentry_z)
119 VNET_DEFINE_STATIC(uma_zone_t, pfr_kentry_counter_z);
120 #define V_pfr_kentry_counter_z VNET(pfr_kentry_counter_z)
121
122 static struct pf_addr pfr_ffaddr = {
123 .addr32 = { 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff }
124 };
125
126 static void pfr_copyout_astats(struct pfr_astats *,
127 const struct pfr_kentry *,
128 const struct pfr_walktree *);
129 static void pfr_copyout_addr(struct pfr_addr *,
130 const struct pfr_kentry *ke);
131 static int pfr_validate_addr(struct pfr_addr *);
132 static void pfr_enqueue_addrs(struct pfr_ktable *,
133 struct pfr_kentryworkq *, int *, int);
134 static void pfr_mark_addrs(struct pfr_ktable *);
135 static struct pfr_kentry
136 *pfr_lookup_addr(struct pfr_ktable *,
137 struct pfr_addr *, int);
138 static struct pfr_kentry *pfr_create_kentry(struct pfr_addr *, bool);
139 static void pfr_destroy_kentries(struct pfr_kentryworkq *);
140 static void pfr_destroy_kentry(struct pfr_kentry *);
141 static void pfr_insert_kentries(struct pfr_ktable *,
142 struct pfr_kentryworkq *, time_t);
143 static void pfr_remove_kentries(struct pfr_ktable *,
144 struct pfr_kentryworkq *);
145 static void pfr_clstats_kentries(struct pfr_ktable *,
146 struct pfr_kentryworkq *, time_t, int);
147 static void pfr_reset_feedback(struct pfr_addr *, int);
148 static void pfr_prepare_network(union sockaddr_union *, int, int);
149 static int pfr_route_kentry(struct pfr_ktable *,
150 struct pfr_kentry *);
151 static int pfr_unroute_kentry(struct pfr_ktable *,
152 struct pfr_kentry *);
153 static int pfr_walktree(struct radix_node *, void *);
154 static int pfr_validate_table(struct pfr_table *, int, int);
155 static int pfr_fix_anchor(char *);
156 static void pfr_commit_ktable(struct pfr_ktable *, time_t);
157 static void pfr_insert_ktables(struct pfr_ktableworkq *);
158 static void pfr_insert_ktable(struct pfr_ktable *);
159 static void pfr_setflags_ktables(struct pfr_ktableworkq *);
160 static void pfr_setflags_ktable(struct pfr_ktable *, int);
161 static void pfr_clstats_ktables(struct pfr_ktableworkq *, time_t,
162 int);
163 static void pfr_clstats_ktable(struct pfr_ktable *, time_t, int);
164 static struct pfr_ktable
165 *pfr_create_ktable(struct pfr_table *, time_t, int);
166 static void pfr_destroy_ktables(struct pfr_ktableworkq *, int);
167 static void pfr_destroy_ktable(struct pfr_ktable *, int);
168 static int pfr_ktable_compare(struct pfr_ktable *,
169 struct pfr_ktable *);
170 static struct pfr_ktable
171 *pfr_lookup_table(struct pfr_table *);
172 static void pfr_clean_node_mask(struct pfr_ktable *,
173 struct pfr_kentryworkq *);
174 static int pfr_skip_table(struct pfr_table *,
175 struct pfr_ktable *, int);
176 static struct pfr_kentry
177 *pfr_kentry_byidx(struct pfr_ktable *, int, int);
178
179 static RB_PROTOTYPE(pfr_ktablehead, pfr_ktable, pfrkt_tree, pfr_ktable_compare);
180 static RB_GENERATE(pfr_ktablehead, pfr_ktable, pfrkt_tree, pfr_ktable_compare);
181
182 VNET_DEFINE_STATIC(struct pfr_ktablehead, pfr_ktables);
183 #define V_pfr_ktables VNET(pfr_ktables)
184
185 VNET_DEFINE_STATIC(struct pfr_table, pfr_nulltable);
186 #define V_pfr_nulltable VNET(pfr_nulltable)
187
188 VNET_DEFINE_STATIC(int, pfr_ktable_cnt);
189 #define V_pfr_ktable_cnt VNET(pfr_ktable_cnt)
190
191 void
pfr_initialize(void)192 pfr_initialize(void)
193 {
194
195 V_pfr_kentry_counter_z = uma_zcreate("pf table entry counters",
196 PFR_NUM_COUNTERS * sizeof(uint64_t), NULL, NULL, NULL, NULL,
197 UMA_ALIGN_PTR, UMA_ZONE_PCPU);
198 V_pfr_kentry_z = uma_zcreate("pf table entries",
199 sizeof(struct pfr_kentry), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
200 0);
201 uma_zone_set_max(V_pfr_kentry_z, PFR_KENTRY_HIWAT);
202 V_pf_limits[PF_LIMIT_TABLE_ENTRIES].zone = V_pfr_kentry_z;
203 V_pf_limits[PF_LIMIT_TABLE_ENTRIES].limit = PFR_KENTRY_HIWAT;
204 }
205
206 void
pfr_cleanup(void)207 pfr_cleanup(void)
208 {
209
210 uma_zdestroy(V_pfr_kentry_z);
211 uma_zdestroy(V_pfr_kentry_counter_z);
212 }
213
214 int
pfr_clr_addrs(struct pfr_table * tbl,int * ndel,int flags)215 pfr_clr_addrs(struct pfr_table *tbl, int *ndel, int flags)
216 {
217 struct pfr_ktable *kt;
218 struct pfr_kentryworkq workq;
219
220 PF_RULES_WASSERT();
221
222 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
223 if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL))
224 return (EINVAL);
225 kt = pfr_lookup_table(tbl);
226 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
227 return (ESRCH);
228 if (kt->pfrkt_flags & PFR_TFLAG_CONST)
229 return (EPERM);
230 pfr_enqueue_addrs(kt, &workq, ndel, 0);
231
232 if (!(flags & PFR_FLAG_DUMMY)) {
233 pfr_remove_kentries(kt, &workq);
234 KASSERT(kt->pfrkt_cnt == 0, ("%s: non-null pfrkt_cnt", __func__));
235 }
236 return (0);
237 }
238
239 int
pfr_add_addrs(struct pfr_table * tbl,struct pfr_addr * addr,int size,int * nadd,int flags)240 pfr_add_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
241 int *nadd, int flags)
242 {
243 struct pfr_ktable *kt, *tmpkt;
244 struct pfr_kentryworkq workq;
245 struct pfr_kentry *p, *q;
246 struct pfr_addr *ad;
247 int i, rv, xadd = 0;
248 time_t tzero = time_second;
249
250 PF_RULES_WASSERT();
251
252 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_FEEDBACK);
253 if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL))
254 return (EINVAL);
255 kt = pfr_lookup_table(tbl);
256 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
257 return (ESRCH);
258 if (kt->pfrkt_flags & PFR_TFLAG_CONST)
259 return (EPERM);
260 tmpkt = pfr_create_ktable(&V_pfr_nulltable, 0, 0);
261 if (tmpkt == NULL)
262 return (ENOMEM);
263 SLIST_INIT(&workq);
264 for (i = 0, ad = addr; i < size; i++, ad++) {
265 if (pfr_validate_addr(ad))
266 senderr(EINVAL);
267 p = pfr_lookup_addr(kt, ad, 1);
268 q = pfr_lookup_addr(tmpkt, ad, 1);
269 if (flags & PFR_FLAG_FEEDBACK) {
270 if (q != NULL)
271 ad->pfra_fback = PFR_FB_DUPLICATE;
272 else if (p == NULL)
273 ad->pfra_fback = PFR_FB_ADDED;
274 else if (p->pfrke_not != ad->pfra_not)
275 ad->pfra_fback = PFR_FB_CONFLICT;
276 else
277 ad->pfra_fback = PFR_FB_NONE;
278 }
279 if (p == NULL && q == NULL) {
280 p = pfr_create_kentry(ad,
281 (kt->pfrkt_flags & PFR_TFLAG_COUNTERS) != 0);
282 if (p == NULL)
283 senderr(ENOMEM);
284 if (pfr_route_kentry(tmpkt, p)) {
285 pfr_destroy_kentry(p);
286 ad->pfra_fback = PFR_FB_NONE;
287 } else {
288 SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
289 xadd++;
290 }
291 }
292 }
293 pfr_clean_node_mask(tmpkt, &workq);
294 if (!(flags & PFR_FLAG_DUMMY))
295 pfr_insert_kentries(kt, &workq, tzero);
296 else
297 pfr_destroy_kentries(&workq);
298 if (nadd != NULL)
299 *nadd = xadd;
300 pfr_destroy_ktable(tmpkt, 0);
301 return (0);
302 _bad:
303 pfr_clean_node_mask(tmpkt, &workq);
304 pfr_destroy_kentries(&workq);
305 if (flags & PFR_FLAG_FEEDBACK)
306 pfr_reset_feedback(addr, size);
307 pfr_destroy_ktable(tmpkt, 0);
308 return (rv);
309 }
310
311 int
pfr_del_addrs(struct pfr_table * tbl,struct pfr_addr * addr,int size,int * ndel,int flags)312 pfr_del_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
313 int *ndel, int flags)
314 {
315 struct pfr_ktable *kt;
316 struct pfr_kentryworkq workq;
317 struct pfr_kentry *p;
318 struct pfr_addr *ad;
319 int i, rv, xdel = 0, log = 1;
320
321 PF_RULES_WASSERT();
322
323 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_FEEDBACK);
324 if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL))
325 return (EINVAL);
326 kt = pfr_lookup_table(tbl);
327 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
328 return (ESRCH);
329 if (kt->pfrkt_flags & PFR_TFLAG_CONST)
330 return (EPERM);
331 /*
332 * there are two algorithms to choose from here.
333 * with:
334 * n: number of addresses to delete
335 * N: number of addresses in the table
336 *
337 * one is O(N) and is better for large 'n'
338 * one is O(n*LOG(N)) and is better for small 'n'
339 *
340 * following code try to decide which one is best.
341 */
342 for (i = kt->pfrkt_cnt; i > 0; i >>= 1)
343 log++;
344 if (size > kt->pfrkt_cnt/log) {
345 /* full table scan */
346 pfr_mark_addrs(kt);
347 } else {
348 /* iterate over addresses to delete */
349 for (i = 0, ad = addr; i < size; i++, ad++) {
350 if (pfr_validate_addr(ad))
351 return (EINVAL);
352 p = pfr_lookup_addr(kt, ad, 1);
353 if (p != NULL)
354 p->pfrke_mark = 0;
355 }
356 }
357 SLIST_INIT(&workq);
358 for (i = 0, ad = addr; i < size; i++, ad++) {
359 if (pfr_validate_addr(ad))
360 senderr(EINVAL);
361 p = pfr_lookup_addr(kt, ad, 1);
362 if (flags & PFR_FLAG_FEEDBACK) {
363 if (p == NULL)
364 ad->pfra_fback = PFR_FB_NONE;
365 else if (p->pfrke_not != ad->pfra_not)
366 ad->pfra_fback = PFR_FB_CONFLICT;
367 else if (p->pfrke_mark)
368 ad->pfra_fback = PFR_FB_DUPLICATE;
369 else
370 ad->pfra_fback = PFR_FB_DELETED;
371 }
372 if (p != NULL && p->pfrke_not == ad->pfra_not &&
373 !p->pfrke_mark) {
374 p->pfrke_mark = 1;
375 SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
376 xdel++;
377 }
378 }
379 if (!(flags & PFR_FLAG_DUMMY))
380 pfr_remove_kentries(kt, &workq);
381 if (ndel != NULL)
382 *ndel = xdel;
383 return (0);
384 _bad:
385 if (flags & PFR_FLAG_FEEDBACK)
386 pfr_reset_feedback(addr, size);
387 return (rv);
388 }
389
390 int
pfr_set_addrs(struct pfr_table * tbl,struct pfr_addr * addr,int size,int * size2,int * nadd,int * ndel,int * nchange,int flags,u_int32_t ignore_pfrt_flags)391 pfr_set_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
392 int *size2, int *nadd, int *ndel, int *nchange, int flags,
393 u_int32_t ignore_pfrt_flags)
394 {
395 struct pfr_ktable *kt, *tmpkt;
396 struct pfr_kentryworkq addq, delq, changeq;
397 struct pfr_kentry *p, *q;
398 struct pfr_addr ad;
399 int i, rv, xadd = 0, xdel = 0, xchange = 0;
400 time_t tzero = time_second;
401
402 PF_RULES_WASSERT();
403
404 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_FEEDBACK);
405 if (pfr_validate_table(tbl, ignore_pfrt_flags, flags &
406 PFR_FLAG_USERIOCTL))
407 return (EINVAL);
408 kt = pfr_lookup_table(tbl);
409 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
410 return (ESRCH);
411 if (kt->pfrkt_flags & PFR_TFLAG_CONST)
412 return (EPERM);
413 tmpkt = pfr_create_ktable(&V_pfr_nulltable, 0, 0);
414 if (tmpkt == NULL)
415 return (ENOMEM);
416 pfr_mark_addrs(kt);
417 SLIST_INIT(&addq);
418 SLIST_INIT(&delq);
419 SLIST_INIT(&changeq);
420 for (i = 0; i < size; i++) {
421 /*
422 * XXXGL: undertand pf_if usage of this function
423 * and make ad a moving pointer
424 */
425 bcopy(addr + i, &ad, sizeof(ad));
426 if (pfr_validate_addr(&ad))
427 senderr(EINVAL);
428 ad.pfra_fback = PFR_FB_NONE;
429 p = pfr_lookup_addr(kt, &ad, 1);
430 if (p != NULL) {
431 if (p->pfrke_mark) {
432 ad.pfra_fback = PFR_FB_DUPLICATE;
433 goto _skip;
434 }
435 p->pfrke_mark = 1;
436 if (p->pfrke_not != ad.pfra_not) {
437 SLIST_INSERT_HEAD(&changeq, p, pfrke_workq);
438 ad.pfra_fback = PFR_FB_CHANGED;
439 xchange++;
440 }
441 } else {
442 q = pfr_lookup_addr(tmpkt, &ad, 1);
443 if (q != NULL) {
444 ad.pfra_fback = PFR_FB_DUPLICATE;
445 goto _skip;
446 }
447 p = pfr_create_kentry(&ad,
448 (kt->pfrkt_flags & PFR_TFLAG_COUNTERS) != 0);
449 if (p == NULL)
450 senderr(ENOMEM);
451 if (pfr_route_kentry(tmpkt, p)) {
452 pfr_destroy_kentry(p);
453 ad.pfra_fback = PFR_FB_NONE;
454 } else {
455 SLIST_INSERT_HEAD(&addq, p, pfrke_workq);
456 ad.pfra_fback = PFR_FB_ADDED;
457 xadd++;
458 }
459 }
460 _skip:
461 if (flags & PFR_FLAG_FEEDBACK)
462 bcopy(&ad, addr + i, sizeof(ad));
463 }
464 pfr_enqueue_addrs(kt, &delq, &xdel, ENQUEUE_UNMARKED_ONLY);
465 if ((flags & PFR_FLAG_FEEDBACK) && *size2) {
466 if (*size2 < size+xdel) {
467 *size2 = size+xdel;
468 senderr(0);
469 }
470 i = 0;
471 SLIST_FOREACH(p, &delq, pfrke_workq) {
472 pfr_copyout_addr(&ad, p);
473 ad.pfra_fback = PFR_FB_DELETED;
474 bcopy(&ad, addr + size + i, sizeof(ad));
475 i++;
476 }
477 }
478 pfr_clean_node_mask(tmpkt, &addq);
479 if (!(flags & PFR_FLAG_DUMMY)) {
480 pfr_insert_kentries(kt, &addq, tzero);
481 pfr_remove_kentries(kt, &delq);
482 pfr_clstats_kentries(kt, &changeq, tzero, INVERT_NEG_FLAG);
483 } else
484 pfr_destroy_kentries(&addq);
485 if (nadd != NULL)
486 *nadd = xadd;
487 if (ndel != NULL)
488 *ndel = xdel;
489 if (nchange != NULL)
490 *nchange = xchange;
491 if ((flags & PFR_FLAG_FEEDBACK) && size2)
492 *size2 = size+xdel;
493 pfr_destroy_ktable(tmpkt, 0);
494 return (0);
495 _bad:
496 pfr_clean_node_mask(tmpkt, &addq);
497 pfr_destroy_kentries(&addq);
498 if (flags & PFR_FLAG_FEEDBACK)
499 pfr_reset_feedback(addr, size);
500 pfr_destroy_ktable(tmpkt, 0);
501 return (rv);
502 }
503
504 int
pfr_tst_addrs(struct pfr_table * tbl,struct pfr_addr * addr,int size,int * nmatch,int flags)505 pfr_tst_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
506 int *nmatch, int flags)
507 {
508 struct pfr_ktable *kt;
509 struct pfr_kentry *p;
510 struct pfr_addr *ad;
511 int i, xmatch = 0;
512
513 PF_RULES_RASSERT();
514
515 ACCEPT_FLAGS(flags, PFR_FLAG_REPLACE);
516 if (pfr_validate_table(tbl, 0, 0))
517 return (EINVAL);
518 kt = pfr_lookup_table(tbl);
519 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
520 return (ESRCH);
521
522 for (i = 0, ad = addr; i < size; i++, ad++) {
523 if (pfr_validate_addr(ad))
524 return (EINVAL);
525 if (ADDR_NETWORK(ad))
526 return (EINVAL);
527 p = pfr_lookup_addr(kt, ad, 0);
528 if (flags & PFR_FLAG_REPLACE)
529 pfr_copyout_addr(ad, p);
530 ad->pfra_fback = (p == NULL) ? PFR_FB_NONE :
531 (p->pfrke_not ? PFR_FB_NOTMATCH : PFR_FB_MATCH);
532 if (p != NULL && !p->pfrke_not)
533 xmatch++;
534 }
535 if (nmatch != NULL)
536 *nmatch = xmatch;
537 return (0);
538 }
539
540 int
pfr_get_addrs(struct pfr_table * tbl,struct pfr_addr * addr,int * size,int flags)541 pfr_get_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int *size,
542 int flags)
543 {
544 struct pfr_ktable *kt;
545 struct pfr_walktree w;
546 int rv;
547
548 PF_RULES_RASSERT();
549
550 ACCEPT_FLAGS(flags, 0);
551 if (pfr_validate_table(tbl, 0, 0))
552 return (EINVAL);
553 kt = pfr_lookup_table(tbl);
554 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
555 return (ESRCH);
556 if (kt->pfrkt_cnt > *size) {
557 *size = kt->pfrkt_cnt;
558 return (0);
559 }
560
561 bzero(&w, sizeof(w));
562 w.pfrw_op = PFRW_GET_ADDRS;
563 w.pfrw_addr = addr;
564 w.pfrw_free = kt->pfrkt_cnt;
565 rv = kt->pfrkt_ip4->rnh_walktree(&kt->pfrkt_ip4->rh, pfr_walktree, &w);
566 if (!rv)
567 rv = kt->pfrkt_ip6->rnh_walktree(&kt->pfrkt_ip6->rh,
568 pfr_walktree, &w);
569 if (rv)
570 return (rv);
571
572 KASSERT(w.pfrw_free == 0, ("%s: corruption detected (%d)", __func__,
573 w.pfrw_free));
574
575 *size = kt->pfrkt_cnt;
576 return (0);
577 }
578
579 int
pfr_get_astats(struct pfr_table * tbl,struct pfr_astats * addr,int * size,int flags)580 pfr_get_astats(struct pfr_table *tbl, struct pfr_astats *addr, int *size,
581 int flags)
582 {
583 struct pfr_ktable *kt;
584 struct pfr_walktree w;
585 struct pfr_kentryworkq workq;
586 int rv;
587 time_t tzero = time_second;
588
589 PF_RULES_RASSERT();
590
591 /* XXX PFR_FLAG_CLSTATS disabled */
592 ACCEPT_FLAGS(flags, 0);
593 if (pfr_validate_table(tbl, 0, 0))
594 return (EINVAL);
595 kt = pfr_lookup_table(tbl);
596 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
597 return (ESRCH);
598 if (kt->pfrkt_cnt > *size) {
599 *size = kt->pfrkt_cnt;
600 return (0);
601 }
602
603 bzero(&w, sizeof(w));
604 w.pfrw_op = PFRW_GET_ASTATS;
605 w.pfrw_astats = addr;
606 w.pfrw_free = kt->pfrkt_cnt;
607 /*
608 * Flags below are for backward compatibility. It was possible to have
609 * a table without per-entry counters. Now they are always allocated,
610 * we just discard data when reading it if table is not configured to
611 * have counters.
612 */
613 w.pfrw_flags = kt->pfrkt_flags;
614 rv = kt->pfrkt_ip4->rnh_walktree(&kt->pfrkt_ip4->rh, pfr_walktree, &w);
615 if (!rv)
616 rv = kt->pfrkt_ip6->rnh_walktree(&kt->pfrkt_ip6->rh,
617 pfr_walktree, &w);
618 if (!rv && (flags & PFR_FLAG_CLSTATS)) {
619 pfr_enqueue_addrs(kt, &workq, NULL, 0);
620 pfr_clstats_kentries(kt, &workq, tzero, 0);
621 }
622 if (rv)
623 return (rv);
624
625 if (w.pfrw_free) {
626 printf("pfr_get_astats: corruption detected (%d).\n",
627 w.pfrw_free);
628 return (ENOTTY);
629 }
630 *size = kt->pfrkt_cnt;
631 return (0);
632 }
633
634 int
pfr_clr_astats(struct pfr_table * tbl,struct pfr_addr * addr,int size,int * nzero,int flags)635 pfr_clr_astats(struct pfr_table *tbl, struct pfr_addr *addr, int size,
636 int *nzero, int flags)
637 {
638 struct pfr_ktable *kt;
639 struct pfr_kentryworkq workq;
640 struct pfr_kentry *p;
641 struct pfr_addr *ad;
642 int i, rv, xzero = 0;
643
644 PF_RULES_WASSERT();
645
646 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_FEEDBACK);
647 if (pfr_validate_table(tbl, 0, 0))
648 return (EINVAL);
649 kt = pfr_lookup_table(tbl);
650 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
651 return (ESRCH);
652 SLIST_INIT(&workq);
653 for (i = 0, ad = addr; i < size; i++, ad++) {
654 if (pfr_validate_addr(ad))
655 senderr(EINVAL);
656 p = pfr_lookup_addr(kt, ad, 1);
657 if (flags & PFR_FLAG_FEEDBACK) {
658 ad->pfra_fback = (p != NULL) ?
659 PFR_FB_CLEARED : PFR_FB_NONE;
660 }
661 if (p != NULL) {
662 SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
663 xzero++;
664 }
665 }
666
667 if (!(flags & PFR_FLAG_DUMMY))
668 pfr_clstats_kentries(kt, &workq, time_second, 0);
669 if (nzero != NULL)
670 *nzero = xzero;
671 return (0);
672 _bad:
673 if (flags & PFR_FLAG_FEEDBACK)
674 pfr_reset_feedback(addr, size);
675 return (rv);
676 }
677
678 static int
pfr_validate_addr(struct pfr_addr * ad)679 pfr_validate_addr(struct pfr_addr *ad)
680 {
681 int i;
682
683 switch (ad->pfra_af) {
684 #ifdef INET
685 case AF_INET:
686 if (ad->pfra_net > 32)
687 return (-1);
688 break;
689 #endif /* INET */
690 #ifdef INET6
691 case AF_INET6:
692 if (ad->pfra_net > 128)
693 return (-1);
694 break;
695 #endif /* INET6 */
696 default:
697 return (-1);
698 }
699 if (ad->pfra_net < 128 &&
700 (((caddr_t)ad)[ad->pfra_net/8] & (0xFF >> (ad->pfra_net%8))))
701 return (-1);
702 for (i = (ad->pfra_net+7)/8; i < sizeof(ad->pfra_u); i++)
703 if (((caddr_t)ad)[i])
704 return (-1);
705 if (ad->pfra_not && ad->pfra_not != 1)
706 return (-1);
707 if (ad->pfra_fback)
708 return (-1);
709 return (0);
710 }
711
712 static void
pfr_enqueue_addrs(struct pfr_ktable * kt,struct pfr_kentryworkq * workq,int * naddr,int sweep)713 pfr_enqueue_addrs(struct pfr_ktable *kt, struct pfr_kentryworkq *workq,
714 int *naddr, int sweep)
715 {
716 struct pfr_walktree w;
717
718 SLIST_INIT(workq);
719 bzero(&w, sizeof(w));
720 w.pfrw_op = sweep ? PFRW_SWEEP : PFRW_ENQUEUE;
721 w.pfrw_workq = workq;
722 if (kt->pfrkt_ip4 != NULL)
723 if (kt->pfrkt_ip4->rnh_walktree(&kt->pfrkt_ip4->rh,
724 pfr_walktree, &w))
725 printf("pfr_enqueue_addrs: IPv4 walktree failed.\n");
726 if (kt->pfrkt_ip6 != NULL)
727 if (kt->pfrkt_ip6->rnh_walktree(&kt->pfrkt_ip6->rh,
728 pfr_walktree, &w))
729 printf("pfr_enqueue_addrs: IPv6 walktree failed.\n");
730 if (naddr != NULL)
731 *naddr = w.pfrw_free;
732 }
733
734 static void
pfr_mark_addrs(struct pfr_ktable * kt)735 pfr_mark_addrs(struct pfr_ktable *kt)
736 {
737 struct pfr_walktree w;
738
739 bzero(&w, sizeof(w));
740 w.pfrw_op = PFRW_MARK;
741 if (kt->pfrkt_ip4->rnh_walktree(&kt->pfrkt_ip4->rh, pfr_walktree, &w))
742 printf("pfr_mark_addrs: IPv4 walktree failed.\n");
743 if (kt->pfrkt_ip6->rnh_walktree(&kt->pfrkt_ip6->rh, pfr_walktree, &w))
744 printf("pfr_mark_addrs: IPv6 walktree failed.\n");
745 }
746
747 static struct pfr_kentry *
pfr_lookup_addr(struct pfr_ktable * kt,struct pfr_addr * ad,int exact)748 pfr_lookup_addr(struct pfr_ktable *kt, struct pfr_addr *ad, int exact)
749 {
750 union sockaddr_union sa, mask;
751 struct radix_head *head = NULL;
752 struct pfr_kentry *ke;
753
754 PF_RULES_ASSERT();
755
756 bzero(&sa, sizeof(sa));
757 switch (ad->pfra_af) {
758 case AF_INET:
759 FILLIN_SIN(sa.sin, ad->pfra_ip4addr);
760 head = &kt->pfrkt_ip4->rh;
761 break;
762 case AF_INET6:
763 FILLIN_SIN6(sa.sin6, ad->pfra_ip6addr);
764 head = &kt->pfrkt_ip6->rh;
765 break;
766 default:
767 unhandled_af(ad->pfra_af);
768 }
769 if (ADDR_NETWORK(ad)) {
770 pfr_prepare_network(&mask, ad->pfra_af, ad->pfra_net);
771 ke = (struct pfr_kentry *)rn_lookup(&sa, &mask, head);
772 if (ke && KENTRY_RNF_ROOT(ke))
773 ke = NULL;
774 } else {
775 ke = (struct pfr_kentry *)rn_match(&sa, head);
776 if (ke && KENTRY_RNF_ROOT(ke))
777 ke = NULL;
778 if (exact && ke && KENTRY_NETWORK(ke))
779 ke = NULL;
780 }
781 return (ke);
782 }
783
784 static struct pfr_kentry *
pfr_create_kentry(struct pfr_addr * ad,bool counters)785 pfr_create_kentry(struct pfr_addr *ad, bool counters)
786 {
787 struct pfr_kentry *ke;
788 counter_u64_t c;
789
790 ke = uma_zalloc(V_pfr_kentry_z, M_NOWAIT | M_ZERO);
791 if (ke == NULL)
792 return (NULL);
793
794 if (ad->pfra_af == AF_INET)
795 FILLIN_SIN(ke->pfrke_sa.sin, ad->pfra_ip4addr);
796 else if (ad->pfra_af == AF_INET6)
797 FILLIN_SIN6(ke->pfrke_sa.sin6, ad->pfra_ip6addr);
798 ke->pfrke_af = ad->pfra_af;
799 ke->pfrke_net = ad->pfra_net;
800 ke->pfrke_not = ad->pfra_not;
801 ke->pfrke_counters.pfrkc_tzero = 0;
802 if (counters) {
803 c = uma_zalloc_pcpu(V_pfr_kentry_counter_z, M_NOWAIT | M_ZERO);
804 if (c == NULL) {
805 pfr_destroy_kentry(ke);
806 return (NULL);
807 }
808 ke->pfrke_counters.pfrkc_counters = c;
809 }
810 return (ke);
811 }
812
813 static void
pfr_destroy_kentries(struct pfr_kentryworkq * workq)814 pfr_destroy_kentries(struct pfr_kentryworkq *workq)
815 {
816 struct pfr_kentry *p, *q;
817
818 for (p = SLIST_FIRST(workq); p != NULL; p = q) {
819 q = SLIST_NEXT(p, pfrke_workq);
820 pfr_destroy_kentry(p);
821 }
822 }
823
824 static void
pfr_destroy_kentry(struct pfr_kentry * ke)825 pfr_destroy_kentry(struct pfr_kentry *ke)
826 {
827 counter_u64_t c;
828
829 if ((c = ke->pfrke_counters.pfrkc_counters) != NULL)
830 uma_zfree_pcpu(V_pfr_kentry_counter_z, c);
831 uma_zfree(V_pfr_kentry_z, ke);
832 }
833
834 static void
pfr_insert_kentries(struct pfr_ktable * kt,struct pfr_kentryworkq * workq,time_t tzero)835 pfr_insert_kentries(struct pfr_ktable *kt,
836 struct pfr_kentryworkq *workq, time_t tzero)
837 {
838 struct pfr_kentry *p;
839 int rv, n = 0;
840
841 SLIST_FOREACH(p, workq, pfrke_workq) {
842 rv = pfr_route_kentry(kt, p);
843 if (rv) {
844 printf("pfr_insert_kentries: cannot route entry "
845 "(code=%d).\n", rv);
846 break;
847 }
848 p->pfrke_counters.pfrkc_tzero = tzero;
849 n++;
850 }
851 kt->pfrkt_cnt += n;
852 }
853
854 int
pfr_insert_kentry(struct pfr_ktable * kt,struct pfr_addr * ad,time_t tzero)855 pfr_insert_kentry(struct pfr_ktable *kt, struct pfr_addr *ad, time_t tzero)
856 {
857 struct pfr_kentry *p;
858 int rv;
859
860 p = pfr_lookup_addr(kt, ad, 1);
861 if (p != NULL)
862 return (0);
863 p = pfr_create_kentry(ad, (kt->pfrkt_flags & PFR_TFLAG_COUNTERS) != 0);
864 if (p == NULL)
865 return (ENOMEM);
866
867 rv = pfr_route_kentry(kt, p);
868 if (rv)
869 return (rv);
870
871 p->pfrke_counters.pfrkc_tzero = tzero;
872 kt->pfrkt_cnt++;
873
874 return (0);
875 }
876
877 static void
pfr_remove_kentries(struct pfr_ktable * kt,struct pfr_kentryworkq * workq)878 pfr_remove_kentries(struct pfr_ktable *kt,
879 struct pfr_kentryworkq *workq)
880 {
881 struct pfr_kentry *p;
882 int n = 0;
883
884 SLIST_FOREACH(p, workq, pfrke_workq) {
885 pfr_unroute_kentry(kt, p);
886 n++;
887 }
888 kt->pfrkt_cnt -= n;
889 pfr_destroy_kentries(workq);
890 }
891
892 static void
pfr_clean_node_mask(struct pfr_ktable * kt,struct pfr_kentryworkq * workq)893 pfr_clean_node_mask(struct pfr_ktable *kt,
894 struct pfr_kentryworkq *workq)
895 {
896 struct pfr_kentry *p;
897
898 SLIST_FOREACH(p, workq, pfrke_workq)
899 pfr_unroute_kentry(kt, p);
900 }
901
902 static void
pfr_clstats_kentries(struct pfr_ktable * kt,struct pfr_kentryworkq * workq,time_t tzero,int negchange)903 pfr_clstats_kentries(struct pfr_ktable *kt, struct pfr_kentryworkq *workq,
904 time_t tzero, int negchange)
905 {
906 struct pfr_kentry *p;
907 int i;
908
909 SLIST_FOREACH(p, workq, pfrke_workq) {
910 if (negchange)
911 p->pfrke_not = !p->pfrke_not;
912 if ((kt->pfrkt_flags & PFR_TFLAG_COUNTERS) != 0)
913 for (i = 0; i < PFR_NUM_COUNTERS; i++)
914 counter_u64_zero(
915 p->pfrke_counters.pfrkc_counters + i);
916 p->pfrke_counters.pfrkc_tzero = tzero;
917 }
918 }
919
920 static void
pfr_reset_feedback(struct pfr_addr * addr,int size)921 pfr_reset_feedback(struct pfr_addr *addr, int size)
922 {
923 struct pfr_addr *ad;
924 int i;
925
926 for (i = 0, ad = addr; i < size; i++, ad++)
927 ad->pfra_fback = PFR_FB_NONE;
928 }
929
930 static void
pfr_prepare_network(union sockaddr_union * sa,int af,int net)931 pfr_prepare_network(union sockaddr_union *sa, int af, int net)
932 {
933 int i;
934
935 bzero(sa, sizeof(*sa));
936 if (af == AF_INET) {
937 sa->sin.sin_len = sizeof(sa->sin);
938 sa->sin.sin_family = AF_INET;
939 sa->sin.sin_addr.s_addr = net ? htonl(-1 << (32-net)) : 0;
940 } else if (af == AF_INET6) {
941 sa->sin6.sin6_len = sizeof(sa->sin6);
942 sa->sin6.sin6_family = AF_INET6;
943 for (i = 0; i < 4; i++) {
944 if (net <= 32) {
945 sa->sin6.sin6_addr.s6_addr32[i] =
946 net ? htonl(-1 << (32-net)) : 0;
947 break;
948 }
949 sa->sin6.sin6_addr.s6_addr32[i] = 0xFFFFFFFF;
950 net -= 32;
951 }
952 }
953 }
954
955 static int
pfr_route_kentry(struct pfr_ktable * kt,struct pfr_kentry * ke)956 pfr_route_kentry(struct pfr_ktable *kt, struct pfr_kentry *ke)
957 {
958 union sockaddr_union mask;
959 struct radix_node *rn;
960 struct radix_head *head = NULL;
961
962 PF_RULES_WASSERT();
963
964 bzero(ke->pfrke_node, sizeof(ke->pfrke_node));
965 switch (ke->pfrke_af) {
966 case AF_INET:
967 head = &kt->pfrkt_ip4->rh;
968 break;
969 case AF_INET6:
970 head = &kt->pfrkt_ip6->rh;
971 break;
972 default:
973 unhandled_af(ke->pfrke_af);
974 }
975
976 if (KENTRY_NETWORK(ke)) {
977 pfr_prepare_network(&mask, ke->pfrke_af, ke->pfrke_net);
978 rn = rn_addroute(&ke->pfrke_sa, &mask, head, ke->pfrke_node);
979 } else
980 rn = rn_addroute(&ke->pfrke_sa, NULL, head, ke->pfrke_node);
981
982 return (rn == NULL ? -1 : 0);
983 }
984
985 static int
pfr_unroute_kentry(struct pfr_ktable * kt,struct pfr_kentry * ke)986 pfr_unroute_kentry(struct pfr_ktable *kt, struct pfr_kentry *ke)
987 {
988 union sockaddr_union mask;
989 struct radix_node *rn;
990 struct radix_head *head = NULL;
991
992 switch (ke->pfrke_af) {
993 case AF_INET:
994 head = &kt->pfrkt_ip4->rh;
995 break;
996 case AF_INET6:
997 head = &kt->pfrkt_ip6->rh;
998 break;
999 default:
1000 unhandled_af(ke->pfrke_af);
1001 }
1002
1003 if (KENTRY_NETWORK(ke)) {
1004 pfr_prepare_network(&mask, ke->pfrke_af, ke->pfrke_net);
1005 rn = rn_delete(&ke->pfrke_sa, &mask, head);
1006 } else
1007 rn = rn_delete(&ke->pfrke_sa, NULL, head);
1008
1009 if (rn == NULL) {
1010 printf("pfr_unroute_kentry: delete failed.\n");
1011 return (-1);
1012 }
1013 return (0);
1014 }
1015
1016 static void
pfr_copyout_addr(struct pfr_addr * ad,const struct pfr_kentry * ke)1017 pfr_copyout_addr(struct pfr_addr *ad, const struct pfr_kentry *ke)
1018 {
1019 bzero(ad, sizeof(*ad));
1020 if (ke == NULL)
1021 return;
1022 ad->pfra_af = ke->pfrke_af;
1023 ad->pfra_net = ke->pfrke_net;
1024 ad->pfra_not = ke->pfrke_not;
1025 if (ad->pfra_af == AF_INET)
1026 ad->pfra_ip4addr = ke->pfrke_sa.sin.sin_addr;
1027 else if (ad->pfra_af == AF_INET6)
1028 ad->pfra_ip6addr = ke->pfrke_sa.sin6.sin6_addr;
1029 }
1030
1031 static void
pfr_copyout_astats(struct pfr_astats * as,const struct pfr_kentry * ke,const struct pfr_walktree * w)1032 pfr_copyout_astats(struct pfr_astats *as, const struct pfr_kentry *ke,
1033 const struct pfr_walktree *w)
1034 {
1035 int dir, op;
1036 const struct pfr_kcounters *kc = &ke->pfrke_counters;
1037
1038 bzero(as, sizeof(*as));
1039 pfr_copyout_addr(&as->pfras_a, ke);
1040 as->pfras_tzero = kc->pfrkc_tzero;
1041
1042 if (! (w->pfrw_flags & PFR_TFLAG_COUNTERS) ||
1043 kc->pfrkc_counters == NULL) {
1044 bzero(as->pfras_packets, sizeof(as->pfras_packets));
1045 bzero(as->pfras_bytes, sizeof(as->pfras_bytes));
1046 as->pfras_a.pfra_fback = PFR_FB_NOCOUNT;
1047 return;
1048 }
1049
1050 for (dir = 0; dir < PFR_DIR_MAX; dir++) {
1051 for (op = 0; op < PFR_OP_ADDR_MAX; op ++) {
1052 as->pfras_packets[dir][op] = counter_u64_fetch(
1053 pfr_kentry_counter(kc, dir, op, PFR_TYPE_PACKETS));
1054 as->pfras_bytes[dir][op] = counter_u64_fetch(
1055 pfr_kentry_counter(kc, dir, op, PFR_TYPE_BYTES));
1056 }
1057 }
1058 }
1059
1060 static void
pfr_sockaddr_to_pf_addr(const union sockaddr_union * sa,struct pf_addr * a)1061 pfr_sockaddr_to_pf_addr(const union sockaddr_union *sa, struct pf_addr *a)
1062 {
1063 switch (sa->sa.sa_family) {
1064 case AF_INET:
1065 memcpy(&a->v4, &sa->sin.sin_addr, sizeof(a->v4));
1066 break;
1067 case AF_INET6:
1068 memcpy(&a->v6, &sa->sin6.sin6_addr, sizeof(a->v6));
1069 break;
1070 default:
1071 unhandled_af(sa->sa.sa_family);
1072 }
1073 }
1074
1075 static int
pfr_walktree(struct radix_node * rn,void * arg)1076 pfr_walktree(struct radix_node *rn, void *arg)
1077 {
1078 struct pfr_kentry *ke = (struct pfr_kentry *)rn;
1079 struct pfr_walktree *w = arg;
1080
1081 switch (w->pfrw_op) {
1082 case PFRW_MARK:
1083 ke->pfrke_mark = 0;
1084 break;
1085 case PFRW_SWEEP:
1086 if (ke->pfrke_mark)
1087 break;
1088 /* FALLTHROUGH */
1089 case PFRW_ENQUEUE:
1090 SLIST_INSERT_HEAD(w->pfrw_workq, ke, pfrke_workq);
1091 w->pfrw_free++;
1092 break;
1093 case PFRW_GET_ADDRS:
1094 if (w->pfrw_free-- > 0) {
1095 pfr_copyout_addr(w->pfrw_addr, ke);
1096 w->pfrw_addr++;
1097 }
1098 break;
1099 case PFRW_GET_ASTATS:
1100 if (w->pfrw_free-- > 0) {
1101 struct pfr_astats as;
1102
1103 pfr_copyout_astats(&as, ke, w);
1104
1105 bcopy(&as, w->pfrw_astats, sizeof(as));
1106 w->pfrw_astats++;
1107 }
1108 break;
1109 case PFRW_POOL_GET:
1110 if (ke->pfrke_not)
1111 break; /* negative entries are ignored */
1112 if (!w->pfrw_free--) {
1113 w->pfrw_kentry = ke;
1114 return (1); /* finish search */
1115 }
1116 break;
1117 case PFRW_DYNADDR_UPDATE:
1118 {
1119 union sockaddr_union pfr_mask;
1120
1121 if (ke->pfrke_af == AF_INET) {
1122 if (w->pfrw_dyn->pfid_acnt4++ > 0)
1123 break;
1124 pfr_prepare_network(&pfr_mask, AF_INET, ke->pfrke_net);
1125 pfr_sockaddr_to_pf_addr(&ke->pfrke_sa, &w->pfrw_dyn->pfid_addr4);
1126 pfr_sockaddr_to_pf_addr(&pfr_mask, &w->pfrw_dyn->pfid_mask4);
1127 } else if (ke->pfrke_af == AF_INET6){
1128 if (w->pfrw_dyn->pfid_acnt6++ > 0)
1129 break;
1130 pfr_prepare_network(&pfr_mask, AF_INET6, ke->pfrke_net);
1131 pfr_sockaddr_to_pf_addr(&ke->pfrke_sa, &w->pfrw_dyn->pfid_addr6);
1132 pfr_sockaddr_to_pf_addr(&pfr_mask, &w->pfrw_dyn->pfid_mask6);
1133 }
1134 break;
1135 }
1136 case PFRW_COUNTERS:
1137 {
1138 if (w->pfrw_flags & PFR_TFLAG_COUNTERS) {
1139 if (ke->pfrke_counters.pfrkc_counters != NULL)
1140 break;
1141 ke->pfrke_counters.pfrkc_counters =
1142 uma_zalloc_pcpu(V_pfr_kentry_counter_z,
1143 M_NOWAIT | M_ZERO);
1144 } else {
1145 uma_zfree_pcpu(V_pfr_kentry_counter_z,
1146 ke->pfrke_counters.pfrkc_counters);
1147 ke->pfrke_counters.pfrkc_counters = NULL;
1148 }
1149 break;
1150 }
1151 }
1152 return (0);
1153 }
1154
1155 int
pfr_clr_tables(struct pfr_table * filter,int * ndel,int flags)1156 pfr_clr_tables(struct pfr_table *filter, int *ndel, int flags)
1157 {
1158 struct pfr_ktableworkq workq;
1159 struct pfr_ktable *p;
1160 int xdel = 0;
1161
1162 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_ALLRSETS);
1163 if (pfr_fix_anchor(filter->pfrt_anchor))
1164 return (EINVAL);
1165 if (pfr_table_count(filter, flags) < 0)
1166 return (ENOENT);
1167
1168 SLIST_INIT(&workq);
1169 RB_FOREACH(p, pfr_ktablehead, &V_pfr_ktables) {
1170 if (pfr_skip_table(filter, p, flags))
1171 continue;
1172 if (!strcmp(p->pfrkt_anchor, PF_RESERVED_ANCHOR))
1173 continue;
1174 if (!(p->pfrkt_flags & PFR_TFLAG_ACTIVE))
1175 continue;
1176 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_ACTIVE;
1177 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1178 xdel++;
1179 }
1180 if (!(flags & PFR_FLAG_DUMMY))
1181 pfr_setflags_ktables(&workq);
1182 if (ndel != NULL)
1183 *ndel = xdel;
1184 return (0);
1185 }
1186
1187 int
pfr_add_tables(struct pfr_table * tbl,int size,int * nadd,int flags)1188 pfr_add_tables(struct pfr_table *tbl, int size, int *nadd, int flags)
1189 {
1190 struct pfr_ktableworkq addq, changeq;
1191 struct pfr_ktable *p, *q, *r, key;
1192 int i, rv, xadd = 0;
1193 time_t tzero = time_second;
1194
1195 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
1196 SLIST_INIT(&addq);
1197 SLIST_INIT(&changeq);
1198 for (i = 0; i < size; i++) {
1199 bcopy(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t));
1200 if (pfr_validate_table(&key.pfrkt_t, PFR_TFLAG_USRMASK,
1201 flags & PFR_FLAG_USERIOCTL))
1202 senderr(EINVAL);
1203 key.pfrkt_flags |= PFR_TFLAG_ACTIVE;
1204 p = RB_FIND(pfr_ktablehead, &V_pfr_ktables, &key);
1205 if (p == NULL) {
1206 p = pfr_create_ktable(&key.pfrkt_t, tzero, 1);
1207 if (p == NULL)
1208 senderr(ENOMEM);
1209 SLIST_FOREACH(q, &addq, pfrkt_workq) {
1210 if (!pfr_ktable_compare(p, q)) {
1211 pfr_destroy_ktable(p, 0);
1212 goto _skip;
1213 }
1214 }
1215 SLIST_INSERT_HEAD(&addq, p, pfrkt_workq);
1216 xadd++;
1217 if (!key.pfrkt_anchor[0])
1218 goto _skip;
1219
1220 /* find or create root table */
1221 bzero(key.pfrkt_anchor, sizeof(key.pfrkt_anchor));
1222 r = RB_FIND(pfr_ktablehead, &V_pfr_ktables, &key);
1223 if (r != NULL) {
1224 p->pfrkt_root = r;
1225 goto _skip;
1226 }
1227 SLIST_FOREACH(q, &addq, pfrkt_workq) {
1228 if (!pfr_ktable_compare(&key, q)) {
1229 p->pfrkt_root = q;
1230 goto _skip;
1231 }
1232 }
1233 key.pfrkt_flags = 0;
1234 r = pfr_create_ktable(&key.pfrkt_t, 0, 1);
1235 if (r == NULL)
1236 senderr(ENOMEM);
1237 SLIST_INSERT_HEAD(&addq, r, pfrkt_workq);
1238 p->pfrkt_root = r;
1239 } else if (!(p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1240 SLIST_FOREACH(q, &changeq, pfrkt_workq)
1241 if (!pfr_ktable_compare(&key, q))
1242 goto _skip;
1243 p->pfrkt_nflags = (p->pfrkt_flags &
1244 ~PFR_TFLAG_USRMASK) | key.pfrkt_flags;
1245 SLIST_INSERT_HEAD(&changeq, p, pfrkt_workq);
1246 xadd++;
1247 }
1248 _skip:
1249 ;
1250 }
1251 if (!(flags & PFR_FLAG_DUMMY)) {
1252 pfr_insert_ktables(&addq);
1253 pfr_setflags_ktables(&changeq);
1254 } else
1255 pfr_destroy_ktables(&addq, 0);
1256 if (nadd != NULL)
1257 *nadd = xadd;
1258 return (0);
1259 _bad:
1260 pfr_destroy_ktables(&addq, 0);
1261 return (rv);
1262 }
1263
1264 int
pfr_del_tables(struct pfr_table * tbl,int size,int * ndel,int flags)1265 pfr_del_tables(struct pfr_table *tbl, int size, int *ndel, int flags)
1266 {
1267 struct pfr_ktableworkq workq;
1268 struct pfr_ktable *p, *q, key;
1269 int i, xdel = 0;
1270
1271 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
1272 SLIST_INIT(&workq);
1273 for (i = 0; i < size; i++) {
1274 bcopy(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t));
1275 if (pfr_validate_table(&key.pfrkt_t, 0,
1276 flags & PFR_FLAG_USERIOCTL))
1277 return (EINVAL);
1278 p = RB_FIND(pfr_ktablehead, &V_pfr_ktables, &key);
1279 if (p != NULL && (p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1280 SLIST_FOREACH(q, &workq, pfrkt_workq)
1281 if (!pfr_ktable_compare(p, q))
1282 goto _skip;
1283 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_ACTIVE;
1284 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1285 xdel++;
1286 }
1287 _skip:
1288 ;
1289 }
1290
1291 if (!(flags & PFR_FLAG_DUMMY))
1292 pfr_setflags_ktables(&workq);
1293 if (ndel != NULL)
1294 *ndel = xdel;
1295 return (0);
1296 }
1297
1298 int
pfr_get_tables(struct pfr_table * filter,struct pfr_table * tbl,int * size,int flags)1299 pfr_get_tables(struct pfr_table *filter, struct pfr_table *tbl, int *size,
1300 int flags)
1301 {
1302 struct pfr_ktable *p;
1303 int n, nn;
1304
1305 PF_RULES_RASSERT();
1306
1307 ACCEPT_FLAGS(flags, PFR_FLAG_ALLRSETS);
1308 if (pfr_fix_anchor(filter->pfrt_anchor))
1309 return (EINVAL);
1310 n = nn = pfr_table_count(filter, flags);
1311 if (n < 0)
1312 return (ENOENT);
1313 if (n > *size) {
1314 *size = n;
1315 return (0);
1316 }
1317 RB_FOREACH(p, pfr_ktablehead, &V_pfr_ktables) {
1318 if (pfr_skip_table(filter, p, flags))
1319 continue;
1320 if (n-- <= 0)
1321 continue;
1322 bcopy(&p->pfrkt_t, tbl++, sizeof(*tbl));
1323 }
1324
1325 KASSERT(n == 0, ("%s: corruption detected (%d)", __func__, n));
1326
1327 *size = nn;
1328 return (0);
1329 }
1330
1331 int
pfr_get_tstats(struct pfr_table * filter,struct pfr_tstats * tbl,int * size,int flags)1332 pfr_get_tstats(struct pfr_table *filter, struct pfr_tstats *tbl, int *size,
1333 int flags)
1334 {
1335 struct pfr_ktable *p;
1336 struct pfr_ktableworkq workq;
1337 int n, nn;
1338 time_t tzero = time_second;
1339 int pfr_dir, pfr_op;
1340
1341 /* XXX PFR_FLAG_CLSTATS disabled */
1342 ACCEPT_FLAGS(flags, PFR_FLAG_ALLRSETS);
1343 if (pfr_fix_anchor(filter->pfrt_anchor))
1344 return (EINVAL);
1345 n = nn = pfr_table_count(filter, flags);
1346 if (n < 0)
1347 return (ENOENT);
1348 if (n > *size) {
1349 *size = n;
1350 return (0);
1351 }
1352 SLIST_INIT(&workq);
1353 RB_FOREACH(p, pfr_ktablehead, &V_pfr_ktables) {
1354 if (pfr_skip_table(filter, p, flags))
1355 continue;
1356 if (n-- <= 0)
1357 continue;
1358 bcopy(&p->pfrkt_kts.pfrts_t, &tbl->pfrts_t,
1359 sizeof(struct pfr_table));
1360 for (pfr_dir = 0; pfr_dir < PFR_DIR_MAX; pfr_dir ++) {
1361 for (pfr_op = 0; pfr_op < PFR_OP_TABLE_MAX; pfr_op ++) {
1362 tbl->pfrts_packets[pfr_dir][pfr_op] =
1363 pfr_kstate_counter_fetch(
1364 &p->pfrkt_packets[pfr_dir][pfr_op]);
1365 tbl->pfrts_bytes[pfr_dir][pfr_op] =
1366 pfr_kstate_counter_fetch(
1367 &p->pfrkt_bytes[pfr_dir][pfr_op]);
1368 }
1369 }
1370 tbl->pfrts_match = pfr_kstate_counter_fetch(&p->pfrkt_match);
1371 tbl->pfrts_nomatch = pfr_kstate_counter_fetch(&p->pfrkt_nomatch);
1372 tbl->pfrts_tzero = p->pfrkt_tzero;
1373 tbl->pfrts_cnt = p->pfrkt_cnt;
1374 for (pfr_op = 0; pfr_op < PFR_REFCNT_MAX; pfr_op++)
1375 tbl->pfrts_refcnt[pfr_op] = p->pfrkt_refcnt[pfr_op];
1376 tbl++;
1377 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1378 }
1379 if (flags & PFR_FLAG_CLSTATS)
1380 pfr_clstats_ktables(&workq, tzero,
1381 flags & PFR_FLAG_ADDRSTOO);
1382
1383 KASSERT(n == 0, ("%s: corruption detected (%d)", __func__, n));
1384
1385 *size = nn;
1386 return (0);
1387 }
1388
1389 int
pfr_clr_tstats(struct pfr_table * tbl,int size,int * nzero,int flags)1390 pfr_clr_tstats(struct pfr_table *tbl, int size, int *nzero, int flags)
1391 {
1392 struct pfr_ktableworkq workq;
1393 struct pfr_ktable *p, key;
1394 int i, xzero = 0;
1395 time_t tzero = time_second;
1396
1397 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_ADDRSTOO);
1398 SLIST_INIT(&workq);
1399 for (i = 0; i < size; i++) {
1400 bcopy(tbl + i, &key.pfrkt_t, sizeof(key.pfrkt_t));
1401 if (pfr_validate_table(&key.pfrkt_t, 0, 0))
1402 return (EINVAL);
1403 p = RB_FIND(pfr_ktablehead, &V_pfr_ktables, &key);
1404 if (p != NULL) {
1405 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1406 xzero++;
1407 }
1408 }
1409 if (!(flags & PFR_FLAG_DUMMY))
1410 pfr_clstats_ktables(&workq, tzero, flags & PFR_FLAG_ADDRSTOO);
1411 if (nzero != NULL)
1412 *nzero = xzero;
1413 return (0);
1414 }
1415
1416 int
pfr_set_tflags(struct pfr_table * tbl,int size,int setflag,int clrflag,int * nchange,int * ndel,int flags)1417 pfr_set_tflags(struct pfr_table *tbl, int size, int setflag, int clrflag,
1418 int *nchange, int *ndel, int flags)
1419 {
1420 struct pfr_ktableworkq workq;
1421 struct pfr_ktable *p, *q, key;
1422 int i, xchange = 0, xdel = 0;
1423
1424 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
1425 if ((setflag & ~PFR_TFLAG_USRMASK) ||
1426 (clrflag & ~PFR_TFLAG_USRMASK) ||
1427 (setflag & clrflag))
1428 return (EINVAL);
1429 SLIST_INIT(&workq);
1430 for (i = 0; i < size; i++) {
1431 bcopy(tbl + i, &key.pfrkt_t, sizeof(key.pfrkt_t));
1432 if (pfr_validate_table(&key.pfrkt_t, 0,
1433 flags & PFR_FLAG_USERIOCTL))
1434 return (EINVAL);
1435 p = RB_FIND(pfr_ktablehead, &V_pfr_ktables, &key);
1436 if (p != NULL && (p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1437 p->pfrkt_nflags = (p->pfrkt_flags | setflag) &
1438 ~clrflag;
1439 if (p->pfrkt_nflags == p->pfrkt_flags)
1440 goto _skip;
1441 SLIST_FOREACH(q, &workq, pfrkt_workq)
1442 if (!pfr_ktable_compare(p, q))
1443 goto _skip;
1444 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1445 if ((p->pfrkt_flags & PFR_TFLAG_PERSIST) &&
1446 (clrflag & PFR_TFLAG_PERSIST) &&
1447 !(p->pfrkt_flags & PFR_TFLAG_REFERENCED))
1448 xdel++;
1449 else
1450 xchange++;
1451 }
1452 _skip:
1453 ;
1454 }
1455 if (!(flags & PFR_FLAG_DUMMY))
1456 pfr_setflags_ktables(&workq);
1457 if (nchange != NULL)
1458 *nchange = xchange;
1459 if (ndel != NULL)
1460 *ndel = xdel;
1461 return (0);
1462 }
1463
1464 int
pfr_ina_begin(struct pfr_table * trs,u_int32_t * ticket,int * ndel,int flags)1465 pfr_ina_begin(struct pfr_table *trs, u_int32_t *ticket, int *ndel, int flags)
1466 {
1467 struct pfr_ktableworkq workq;
1468 struct pfr_ktable *p;
1469 struct pf_kruleset *rs;
1470 int xdel = 0;
1471
1472 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
1473 rs = pf_find_or_create_kruleset(trs->pfrt_anchor);
1474 if (rs == NULL)
1475 return (ENOMEM);
1476 SLIST_INIT(&workq);
1477 RB_FOREACH(p, pfr_ktablehead, &V_pfr_ktables) {
1478 if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
1479 pfr_skip_table(trs, p, 0))
1480 continue;
1481 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_INACTIVE;
1482 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1483 xdel++;
1484 }
1485 if (!(flags & PFR_FLAG_DUMMY)) {
1486 pfr_setflags_ktables(&workq);
1487 if (ticket != NULL)
1488 *ticket = ++rs->tticket;
1489 rs->topen = 1;
1490 } else
1491 pf_remove_if_empty_kruleset(rs);
1492 if (ndel != NULL)
1493 *ndel = xdel;
1494 return (0);
1495 }
1496
1497 int
pfr_ina_define(struct pfr_table * tbl,struct pfr_addr * addr,int size,int * nadd,int * naddr,u_int32_t ticket,int flags)1498 pfr_ina_define(struct pfr_table *tbl, struct pfr_addr *addr, int size,
1499 int *nadd, int *naddr, u_int32_t ticket, int flags)
1500 {
1501 struct pfr_ktableworkq tableq;
1502 struct pfr_kentryworkq addrq;
1503 struct pfr_ktable *kt, *rt, *shadow, key;
1504 struct pfr_kentry *p;
1505 struct pfr_addr *ad;
1506 struct pf_kruleset *rs;
1507 int i, rv, xadd = 0, xaddr = 0;
1508
1509 PF_RULES_WASSERT();
1510
1511 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_ADDRSTOO);
1512 if (size && !(flags & PFR_FLAG_ADDRSTOO))
1513 return (EINVAL);
1514 if (pfr_validate_table(tbl, PFR_TFLAG_USRMASK,
1515 flags & PFR_FLAG_USERIOCTL))
1516 return (EINVAL);
1517 rs = pf_find_kruleset(tbl->pfrt_anchor);
1518 if (rs == NULL || !rs->topen || ticket != rs->tticket)
1519 return (EBUSY);
1520 tbl->pfrt_flags |= PFR_TFLAG_INACTIVE;
1521 SLIST_INIT(&tableq);
1522 kt = RB_FIND(pfr_ktablehead, &V_pfr_ktables, (struct pfr_ktable *)tbl);
1523 if (kt == NULL) {
1524 kt = pfr_create_ktable(tbl, 0, 1);
1525 if (kt == NULL)
1526 return (ENOMEM);
1527 SLIST_INSERT_HEAD(&tableq, kt, pfrkt_workq);
1528 xadd++;
1529 if (!tbl->pfrt_anchor[0])
1530 goto _skip;
1531
1532 /* find or create root table */
1533 bzero(&key, sizeof(key));
1534 strlcpy(key.pfrkt_name, tbl->pfrt_name, sizeof(key.pfrkt_name));
1535 rt = RB_FIND(pfr_ktablehead, &V_pfr_ktables, &key);
1536 if (rt != NULL) {
1537 kt->pfrkt_root = rt;
1538 goto _skip;
1539 }
1540 rt = pfr_create_ktable(&key.pfrkt_t, 0, 1);
1541 if (rt == NULL) {
1542 pfr_destroy_ktables(&tableq, 0);
1543 return (ENOMEM);
1544 }
1545 SLIST_INSERT_HEAD(&tableq, rt, pfrkt_workq);
1546 kt->pfrkt_root = rt;
1547 } else if (!(kt->pfrkt_flags & PFR_TFLAG_INACTIVE))
1548 xadd++;
1549 _skip:
1550 shadow = pfr_create_ktable(tbl, 0, 0);
1551 if (shadow == NULL) {
1552 pfr_destroy_ktables(&tableq, 0);
1553 return (ENOMEM);
1554 }
1555 SLIST_INIT(&addrq);
1556 for (i = 0, ad = addr; i < size; i++, ad++) {
1557 if (pfr_validate_addr(ad))
1558 senderr(EINVAL);
1559 if (pfr_lookup_addr(shadow, ad, 1) != NULL)
1560 continue;
1561 p = pfr_create_kentry(ad,
1562 (shadow->pfrkt_flags & PFR_TFLAG_COUNTERS) != 0);
1563 if (p == NULL)
1564 senderr(ENOMEM);
1565 if (pfr_route_kentry(shadow, p)) {
1566 pfr_destroy_kentry(p);
1567 continue;
1568 }
1569 SLIST_INSERT_HEAD(&addrq, p, pfrke_workq);
1570 xaddr++;
1571 }
1572 if (!(flags & PFR_FLAG_DUMMY)) {
1573 if (kt->pfrkt_shadow != NULL)
1574 pfr_destroy_ktable(kt->pfrkt_shadow, 1);
1575 kt->pfrkt_flags |= PFR_TFLAG_INACTIVE;
1576 pfr_insert_ktables(&tableq);
1577 shadow->pfrkt_cnt = (flags & PFR_FLAG_ADDRSTOO) ?
1578 xaddr : NO_ADDRESSES;
1579 kt->pfrkt_shadow = shadow;
1580 } else {
1581 pfr_clean_node_mask(shadow, &addrq);
1582 pfr_destroy_ktable(shadow, 0);
1583 pfr_destroy_ktables(&tableq, 0);
1584 pfr_destroy_kentries(&addrq);
1585 }
1586 if (nadd != NULL)
1587 *nadd = xadd;
1588 if (naddr != NULL)
1589 *naddr = xaddr;
1590 return (0);
1591 _bad:
1592 pfr_destroy_ktable(shadow, 0);
1593 pfr_destroy_ktables(&tableq, 0);
1594 pfr_destroy_kentries(&addrq);
1595 return (rv);
1596 }
1597
1598 int
pfr_ina_rollback(struct pfr_table * trs,u_int32_t ticket,int * ndel,int flags)1599 pfr_ina_rollback(struct pfr_table *trs, u_int32_t ticket, int *ndel, int flags)
1600 {
1601 struct pfr_ktableworkq workq;
1602 struct pfr_ktable *p;
1603 struct pf_kruleset *rs;
1604 int xdel = 0;
1605
1606 PF_RULES_WASSERT();
1607
1608 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
1609 rs = pf_find_kruleset(trs->pfrt_anchor);
1610 if (rs == NULL || !rs->topen || ticket != rs->tticket)
1611 return (0);
1612 SLIST_INIT(&workq);
1613 RB_FOREACH(p, pfr_ktablehead, &V_pfr_ktables) {
1614 if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
1615 pfr_skip_table(trs, p, 0))
1616 continue;
1617 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_INACTIVE;
1618 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1619 xdel++;
1620 }
1621 if (!(flags & PFR_FLAG_DUMMY)) {
1622 pfr_setflags_ktables(&workq);
1623 rs->topen = 0;
1624 pf_remove_if_empty_kruleset(rs);
1625 }
1626 if (ndel != NULL)
1627 *ndel = xdel;
1628 return (0);
1629 }
1630
1631 int
pfr_ina_commit(struct pfr_table * trs,u_int32_t ticket,int * nadd,int * nchange,int flags)1632 pfr_ina_commit(struct pfr_table *trs, u_int32_t ticket, int *nadd,
1633 int *nchange, int flags)
1634 {
1635 struct pfr_ktable *p, *q;
1636 struct pfr_ktableworkq workq;
1637 struct pf_kruleset *rs;
1638 int xadd = 0, xchange = 0;
1639 time_t tzero = time_second;
1640
1641 PF_RULES_WASSERT();
1642
1643 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
1644 rs = pf_find_kruleset(trs->pfrt_anchor);
1645 if (rs == NULL || !rs->topen || ticket != rs->tticket)
1646 return (EBUSY);
1647
1648 SLIST_INIT(&workq);
1649 RB_FOREACH(p, pfr_ktablehead, &V_pfr_ktables) {
1650 if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
1651 pfr_skip_table(trs, p, 0))
1652 continue;
1653 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1654 if (p->pfrkt_flags & PFR_TFLAG_ACTIVE)
1655 xchange++;
1656 else
1657 xadd++;
1658 }
1659
1660 if (!(flags & PFR_FLAG_DUMMY)) {
1661 for (p = SLIST_FIRST(&workq); p != NULL; p = q) {
1662 q = SLIST_NEXT(p, pfrkt_workq);
1663 pfr_commit_ktable(p, tzero);
1664 }
1665 rs->topen = 0;
1666 pf_remove_if_empty_kruleset(rs);
1667 }
1668 if (nadd != NULL)
1669 *nadd = xadd;
1670 if (nchange != NULL)
1671 *nchange = xchange;
1672
1673 return (0);
1674 }
1675
1676 static void
pfr_commit_ktable(struct pfr_ktable * kt,time_t tzero)1677 pfr_commit_ktable(struct pfr_ktable *kt, time_t tzero)
1678 {
1679 counter_u64_t *pkc, *qkc;
1680 struct pfr_ktable *shadow = kt->pfrkt_shadow;
1681 int nflags;
1682
1683 PF_RULES_WASSERT();
1684
1685 if (shadow->pfrkt_cnt == NO_ADDRESSES) {
1686 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
1687 pfr_clstats_ktable(kt, tzero, 1);
1688 } else if (kt->pfrkt_flags & PFR_TFLAG_ACTIVE) {
1689 /* kt might contain addresses */
1690 struct pfr_kentryworkq addrq, addq, changeq, delq, garbageq;
1691 struct pfr_kentry *p, *q, *next;
1692 struct pfr_addr ad;
1693
1694 pfr_enqueue_addrs(shadow, &addrq, NULL, 0);
1695 pfr_mark_addrs(kt);
1696 SLIST_INIT(&addq);
1697 SLIST_INIT(&changeq);
1698 SLIST_INIT(&delq);
1699 SLIST_INIT(&garbageq);
1700 pfr_clean_node_mask(shadow, &addrq);
1701 SLIST_FOREACH_SAFE(p, &addrq, pfrke_workq, next) {
1702 pfr_copyout_addr(&ad, p);
1703 q = pfr_lookup_addr(kt, &ad, 1);
1704 if (q != NULL) {
1705 if (q->pfrke_not != p->pfrke_not)
1706 SLIST_INSERT_HEAD(&changeq, q,
1707 pfrke_workq);
1708 pkc = &p->pfrke_counters.pfrkc_counters;
1709 qkc = &q->pfrke_counters.pfrkc_counters;
1710 if ((*pkc == NULL) != (*qkc == NULL))
1711 SWAP(counter_u64_t, *pkc, *qkc);
1712 q->pfrke_mark = 1;
1713 SLIST_INSERT_HEAD(&garbageq, p, pfrke_workq);
1714 } else {
1715 p->pfrke_counters.pfrkc_tzero = tzero;
1716 SLIST_INSERT_HEAD(&addq, p, pfrke_workq);
1717 }
1718 }
1719 pfr_enqueue_addrs(kt, &delq, NULL, ENQUEUE_UNMARKED_ONLY);
1720 pfr_insert_kentries(kt, &addq, tzero);
1721 pfr_remove_kentries(kt, &delq);
1722 pfr_clstats_kentries(kt, &changeq, tzero, INVERT_NEG_FLAG);
1723 pfr_destroy_kentries(&garbageq);
1724 } else {
1725 /* kt cannot contain addresses */
1726 SWAP(struct radix_node_head *, kt->pfrkt_ip4,
1727 shadow->pfrkt_ip4);
1728 SWAP(struct radix_node_head *, kt->pfrkt_ip6,
1729 shadow->pfrkt_ip6);
1730 SWAP(int, kt->pfrkt_cnt, shadow->pfrkt_cnt);
1731 pfr_clstats_ktable(kt, tzero, 1);
1732 }
1733 nflags = ((shadow->pfrkt_flags & PFR_TFLAG_USRMASK) |
1734 (kt->pfrkt_flags & PFR_TFLAG_SETMASK) | PFR_TFLAG_ACTIVE)
1735 & ~PFR_TFLAG_INACTIVE;
1736 pfr_destroy_ktable(shadow, 0);
1737 kt->pfrkt_shadow = NULL;
1738 pfr_setflags_ktable(kt, nflags);
1739 }
1740
1741 static int
pfr_validate_table(struct pfr_table * tbl,int allowedflags,int no_reserved)1742 pfr_validate_table(struct pfr_table *tbl, int allowedflags, int no_reserved)
1743 {
1744 int i;
1745
1746 if (!tbl->pfrt_name[0])
1747 return (-1);
1748 if (no_reserved && !strcmp(tbl->pfrt_anchor, PF_RESERVED_ANCHOR))
1749 return (-1);
1750 if (tbl->pfrt_name[PF_TABLE_NAME_SIZE-1])
1751 return (-1);
1752 for (i = strlen(tbl->pfrt_name); i < PF_TABLE_NAME_SIZE; i++)
1753 if (tbl->pfrt_name[i])
1754 return (-1);
1755 if (pfr_fix_anchor(tbl->pfrt_anchor))
1756 return (-1);
1757 if (tbl->pfrt_flags & ~allowedflags)
1758 return (-1);
1759 return (0);
1760 }
1761
1762 /*
1763 * Rewrite anchors referenced by tables to remove slashes
1764 * and check for validity.
1765 */
1766 static int
pfr_fix_anchor(char * anchor)1767 pfr_fix_anchor(char *anchor)
1768 {
1769 size_t siz = MAXPATHLEN;
1770 int i;
1771
1772 if (anchor[0] == '/') {
1773 char *path;
1774 int off;
1775
1776 path = anchor;
1777 off = 1;
1778 while (*++path == '/')
1779 off++;
1780 bcopy(path, anchor, siz - off);
1781 memset(anchor + siz - off, 0, off);
1782 }
1783 if (anchor[siz - 1])
1784 return (-1);
1785 for (i = strlen(anchor); i < siz; i++)
1786 if (anchor[i])
1787 return (-1);
1788 return (0);
1789 }
1790
1791 int
pfr_table_count(struct pfr_table * filter,int flags)1792 pfr_table_count(struct pfr_table *filter, int flags)
1793 {
1794 struct pf_kruleset *rs;
1795
1796 PF_RULES_ASSERT();
1797
1798 if (flags & PFR_FLAG_ALLRSETS)
1799 return (V_pfr_ktable_cnt);
1800 if (filter->pfrt_anchor[0]) {
1801 rs = pf_find_kruleset(filter->pfrt_anchor);
1802 return ((rs != NULL) ? rs->tables : -1);
1803 }
1804 return (pf_main_ruleset.tables);
1805 }
1806
1807 static int
pfr_skip_table(struct pfr_table * filter,struct pfr_ktable * kt,int flags)1808 pfr_skip_table(struct pfr_table *filter, struct pfr_ktable *kt, int flags)
1809 {
1810 if (flags & PFR_FLAG_ALLRSETS)
1811 return (0);
1812 if (strcmp(filter->pfrt_anchor, kt->pfrkt_anchor))
1813 return (1);
1814 return (0);
1815 }
1816
1817 static void
pfr_insert_ktables(struct pfr_ktableworkq * workq)1818 pfr_insert_ktables(struct pfr_ktableworkq *workq)
1819 {
1820 struct pfr_ktable *p;
1821
1822 SLIST_FOREACH(p, workq, pfrkt_workq)
1823 pfr_insert_ktable(p);
1824 }
1825
1826 static void
pfr_insert_ktable(struct pfr_ktable * kt)1827 pfr_insert_ktable(struct pfr_ktable *kt)
1828 {
1829
1830 PF_RULES_WASSERT();
1831
1832 RB_INSERT(pfr_ktablehead, &V_pfr_ktables, kt);
1833 V_pfr_ktable_cnt++;
1834 if (kt->pfrkt_root != NULL)
1835 if (!kt->pfrkt_root->pfrkt_refcnt[PFR_REFCNT_ANCHOR]++)
1836 pfr_setflags_ktable(kt->pfrkt_root,
1837 kt->pfrkt_root->pfrkt_flags|PFR_TFLAG_REFDANCHOR);
1838 }
1839
1840 static void
pfr_setflags_ktables(struct pfr_ktableworkq * workq)1841 pfr_setflags_ktables(struct pfr_ktableworkq *workq)
1842 {
1843 struct pfr_ktable *p, *q;
1844
1845 for (p = SLIST_FIRST(workq); p; p = q) {
1846 q = SLIST_NEXT(p, pfrkt_workq);
1847 pfr_setflags_ktable(p, p->pfrkt_nflags);
1848 }
1849 }
1850
1851 static void
pfr_setflags_ktable(struct pfr_ktable * kt,int newf)1852 pfr_setflags_ktable(struct pfr_ktable *kt, int newf)
1853 {
1854 struct pfr_kentryworkq addrq;
1855 struct pfr_walktree w;
1856
1857 PF_RULES_WASSERT();
1858
1859 if (!(newf & PFR_TFLAG_REFERENCED) &&
1860 !(newf & PFR_TFLAG_REFDANCHOR) &&
1861 !(newf & PFR_TFLAG_PERSIST))
1862 newf &= ~PFR_TFLAG_ACTIVE;
1863 if (!(newf & PFR_TFLAG_ACTIVE))
1864 newf &= ~PFR_TFLAG_USRMASK;
1865 if (!(newf & PFR_TFLAG_SETMASK)) {
1866 RB_REMOVE(pfr_ktablehead, &V_pfr_ktables, kt);
1867 if (kt->pfrkt_root != NULL)
1868 if (!--kt->pfrkt_root->pfrkt_refcnt[PFR_REFCNT_ANCHOR])
1869 pfr_setflags_ktable(kt->pfrkt_root,
1870 kt->pfrkt_root->pfrkt_flags &
1871 ~PFR_TFLAG_REFDANCHOR);
1872 pfr_destroy_ktable(kt, 1);
1873 V_pfr_ktable_cnt--;
1874 return;
1875 }
1876 if (newf & PFR_TFLAG_COUNTERS && ! (kt->pfrkt_flags & PFR_TFLAG_COUNTERS)) {
1877 bzero(&w, sizeof(w));
1878 w.pfrw_op = PFRW_COUNTERS;
1879 w.pfrw_flags |= PFR_TFLAG_COUNTERS;
1880 kt->pfrkt_ip4->rnh_walktree(&kt->pfrkt_ip4->rh, pfr_walktree, &w);
1881 kt->pfrkt_ip6->rnh_walktree(&kt->pfrkt_ip6->rh, pfr_walktree, &w);
1882 }
1883 if (! (newf & PFR_TFLAG_COUNTERS) && (kt->pfrkt_flags & PFR_TFLAG_COUNTERS)) {
1884 bzero(&w, sizeof(w));
1885 w.pfrw_op = PFRW_COUNTERS;
1886 w.pfrw_flags |= 0;
1887 kt->pfrkt_ip4->rnh_walktree(&kt->pfrkt_ip4->rh, pfr_walktree, &w);
1888 kt->pfrkt_ip6->rnh_walktree(&kt->pfrkt_ip6->rh, pfr_walktree, &w);
1889 }
1890 if (!(newf & PFR_TFLAG_ACTIVE) && kt->pfrkt_cnt) {
1891 pfr_enqueue_addrs(kt, &addrq, NULL, 0);
1892 pfr_remove_kentries(kt, &addrq);
1893 }
1894 if (!(newf & PFR_TFLAG_INACTIVE) && kt->pfrkt_shadow != NULL) {
1895 pfr_destroy_ktable(kt->pfrkt_shadow, 1);
1896 kt->pfrkt_shadow = NULL;
1897 }
1898 kt->pfrkt_flags = newf;
1899 }
1900
1901 static void
pfr_clstats_ktables(struct pfr_ktableworkq * workq,time_t tzero,int recurse)1902 pfr_clstats_ktables(struct pfr_ktableworkq *workq, time_t tzero, int recurse)
1903 {
1904 struct pfr_ktable *p;
1905
1906 SLIST_FOREACH(p, workq, pfrkt_workq)
1907 pfr_clstats_ktable(p, tzero, recurse);
1908 }
1909
1910 static void
pfr_clstats_ktable(struct pfr_ktable * kt,time_t tzero,int recurse)1911 pfr_clstats_ktable(struct pfr_ktable *kt, time_t tzero, int recurse)
1912 {
1913 struct pfr_kentryworkq addrq;
1914 int pfr_dir, pfr_op;
1915
1916 MPASS(PF_TABLE_STATS_OWNED() || PF_RULES_WOWNED());
1917
1918 if (recurse) {
1919 pfr_enqueue_addrs(kt, &addrq, NULL, 0);
1920 pfr_clstats_kentries(kt, &addrq, tzero, 0);
1921 }
1922 for (pfr_dir = 0; pfr_dir < PFR_DIR_MAX; pfr_dir ++) {
1923 for (pfr_op = 0; pfr_op < PFR_OP_TABLE_MAX; pfr_op ++) {
1924 pfr_kstate_counter_zero(&kt->pfrkt_packets[pfr_dir][pfr_op]);
1925 pfr_kstate_counter_zero(&kt->pfrkt_bytes[pfr_dir][pfr_op]);
1926 }
1927 }
1928 pfr_kstate_counter_zero(&kt->pfrkt_match);
1929 pfr_kstate_counter_zero(&kt->pfrkt_nomatch);
1930 kt->pfrkt_tzero = tzero;
1931 }
1932
1933 static struct pfr_ktable *
pfr_create_ktable(struct pfr_table * tbl,time_t tzero,int attachruleset)1934 pfr_create_ktable(struct pfr_table *tbl, time_t tzero, int attachruleset)
1935 {
1936 struct pfr_ktable *kt;
1937 struct pf_kruleset *rs;
1938 int pfr_dir, pfr_op;
1939
1940 PF_RULES_WASSERT();
1941
1942 kt = malloc(sizeof(*kt), M_PFTABLE, M_NOWAIT|M_ZERO);
1943 if (kt == NULL)
1944 return (NULL);
1945 kt->pfrkt_t = *tbl;
1946
1947 if (attachruleset) {
1948 rs = pf_find_or_create_kruleset(tbl->pfrt_anchor);
1949 if (!rs) {
1950 pfr_destroy_ktable(kt, 0);
1951 return (NULL);
1952 }
1953 kt->pfrkt_rs = rs;
1954 rs->tables++;
1955 }
1956
1957 for (pfr_dir = 0; pfr_dir < PFR_DIR_MAX; pfr_dir ++) {
1958 for (pfr_op = 0; pfr_op < PFR_OP_TABLE_MAX; pfr_op ++) {
1959 if (pfr_kstate_counter_init(
1960 &kt->pfrkt_packets[pfr_dir][pfr_op], M_NOWAIT) != 0) {
1961 pfr_destroy_ktable(kt, 0);
1962 return (NULL);
1963 }
1964 if (pfr_kstate_counter_init(
1965 &kt->pfrkt_bytes[pfr_dir][pfr_op], M_NOWAIT) != 0) {
1966 pfr_destroy_ktable(kt, 0);
1967 return (NULL);
1968 }
1969 }
1970 }
1971 if (pfr_kstate_counter_init(&kt->pfrkt_match, M_NOWAIT) != 0) {
1972 pfr_destroy_ktable(kt, 0);
1973 return (NULL);
1974 }
1975
1976 if (pfr_kstate_counter_init(&kt->pfrkt_nomatch, M_NOWAIT) != 0) {
1977 pfr_destroy_ktable(kt, 0);
1978 return (NULL);
1979 }
1980
1981 if (!rn_inithead((void **)&kt->pfrkt_ip4,
1982 offsetof(struct sockaddr_in, sin_addr) * 8) ||
1983 !rn_inithead((void **)&kt->pfrkt_ip6,
1984 offsetof(struct sockaddr_in6, sin6_addr) * 8)) {
1985 pfr_destroy_ktable(kt, 0);
1986 return (NULL);
1987 }
1988 kt->pfrkt_tzero = tzero;
1989
1990 return (kt);
1991 }
1992
1993 static void
pfr_destroy_ktables(struct pfr_ktableworkq * workq,int flushaddr)1994 pfr_destroy_ktables(struct pfr_ktableworkq *workq, int flushaddr)
1995 {
1996 struct pfr_ktable *p, *q;
1997
1998 for (p = SLIST_FIRST(workq); p; p = q) {
1999 q = SLIST_NEXT(p, pfrkt_workq);
2000 pfr_destroy_ktable(p, flushaddr);
2001 }
2002 }
2003
2004 static void
pfr_destroy_ktable(struct pfr_ktable * kt,int flushaddr)2005 pfr_destroy_ktable(struct pfr_ktable *kt, int flushaddr)
2006 {
2007 struct pfr_kentryworkq addrq;
2008 int pfr_dir, pfr_op;
2009
2010 if (flushaddr) {
2011 pfr_enqueue_addrs(kt, &addrq, NULL, 0);
2012 pfr_clean_node_mask(kt, &addrq);
2013 pfr_destroy_kentries(&addrq);
2014 }
2015 if (kt->pfrkt_ip4 != NULL)
2016 rn_detachhead((void **)&kt->pfrkt_ip4);
2017 if (kt->pfrkt_ip6 != NULL)
2018 rn_detachhead((void **)&kt->pfrkt_ip6);
2019 if (kt->pfrkt_shadow != NULL)
2020 pfr_destroy_ktable(kt->pfrkt_shadow, flushaddr);
2021 if (kt->pfrkt_rs != NULL) {
2022 kt->pfrkt_rs->tables--;
2023 pf_remove_if_empty_kruleset(kt->pfrkt_rs);
2024 }
2025 for (pfr_dir = 0; pfr_dir < PFR_DIR_MAX; pfr_dir ++) {
2026 for (pfr_op = 0; pfr_op < PFR_OP_TABLE_MAX; pfr_op ++) {
2027 pfr_kstate_counter_deinit(&kt->pfrkt_packets[pfr_dir][pfr_op]);
2028 pfr_kstate_counter_deinit(&kt->pfrkt_bytes[pfr_dir][pfr_op]);
2029 }
2030 }
2031 pfr_kstate_counter_deinit(&kt->pfrkt_match);
2032 pfr_kstate_counter_deinit(&kt->pfrkt_nomatch);
2033
2034 free(kt, M_PFTABLE);
2035 }
2036
2037 static int
pfr_ktable_compare(struct pfr_ktable * p,struct pfr_ktable * q)2038 pfr_ktable_compare(struct pfr_ktable *p, struct pfr_ktable *q)
2039 {
2040 int d;
2041
2042 if ((d = strncmp(p->pfrkt_name, q->pfrkt_name, PF_TABLE_NAME_SIZE)))
2043 return (d);
2044 return (strcmp(p->pfrkt_anchor, q->pfrkt_anchor));
2045 }
2046
2047 static struct pfr_ktable *
pfr_lookup_table(struct pfr_table * tbl)2048 pfr_lookup_table(struct pfr_table *tbl)
2049 {
2050 /* struct pfr_ktable start like a struct pfr_table */
2051 return (RB_FIND(pfr_ktablehead, &V_pfr_ktables,
2052 (struct pfr_ktable *)tbl));
2053 }
2054
2055 int
pfr_match_addr(struct pfr_ktable * kt,struct pf_addr * a,sa_family_t af)2056 pfr_match_addr(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af)
2057 {
2058 struct pfr_kentry *ke = NULL;
2059 int match;
2060
2061 PF_RULES_RASSERT();
2062
2063 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
2064 kt = kt->pfrkt_root;
2065 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
2066 return (0);
2067
2068 switch (af) {
2069 #ifdef INET
2070 case AF_INET:
2071 {
2072 struct sockaddr_in sin;
2073
2074 bzero(&sin, sizeof(sin));
2075 sin.sin_len = sizeof(sin);
2076 sin.sin_family = AF_INET;
2077 sin.sin_addr.s_addr = a->addr32[0];
2078 ke = (struct pfr_kentry *)rn_match(&sin, &kt->pfrkt_ip4->rh);
2079 if (ke && KENTRY_RNF_ROOT(ke))
2080 ke = NULL;
2081 break;
2082 }
2083 #endif /* INET */
2084 #ifdef INET6
2085 case AF_INET6:
2086 {
2087 struct sockaddr_in6 sin6;
2088
2089 bzero(&sin6, sizeof(sin6));
2090 sin6.sin6_len = sizeof(sin6);
2091 sin6.sin6_family = AF_INET6;
2092 bcopy(a, &sin6.sin6_addr, sizeof(sin6.sin6_addr));
2093 ke = (struct pfr_kentry *)rn_match(&sin6, &kt->pfrkt_ip6->rh);
2094 if (ke && KENTRY_RNF_ROOT(ke))
2095 ke = NULL;
2096 break;
2097 }
2098 #endif /* INET6 */
2099 default:
2100 unhandled_af(af);
2101 }
2102 match = (ke && !ke->pfrke_not);
2103 if (match)
2104 pfr_kstate_counter_add(&kt->pfrkt_match, 1);
2105 else
2106 pfr_kstate_counter_add(&kt->pfrkt_nomatch, 1);
2107 return (match);
2108 }
2109
2110 void
pfr_update_stats(struct pfr_ktable * kt,struct pf_addr * a,sa_family_t af,u_int64_t len,int dir_out,int op_pass,int notrule)2111 pfr_update_stats(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af,
2112 u_int64_t len, int dir_out, int op_pass, int notrule)
2113 {
2114 struct pfr_kentry *ke = NULL;
2115
2116 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
2117 kt = kt->pfrkt_root;
2118 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
2119 return;
2120
2121 switch (af) {
2122 #ifdef INET
2123 case AF_INET:
2124 {
2125 struct sockaddr_in sin;
2126
2127 bzero(&sin, sizeof(sin));
2128 sin.sin_len = sizeof(sin);
2129 sin.sin_family = AF_INET;
2130 sin.sin_addr.s_addr = a->addr32[0];
2131 ke = (struct pfr_kentry *)rn_match(&sin, &kt->pfrkt_ip4->rh);
2132 if (ke && KENTRY_RNF_ROOT(ke))
2133 ke = NULL;
2134 break;
2135 }
2136 #endif /* INET */
2137 #ifdef INET6
2138 case AF_INET6:
2139 {
2140 struct sockaddr_in6 sin6;
2141
2142 bzero(&sin6, sizeof(sin6));
2143 sin6.sin6_len = sizeof(sin6);
2144 sin6.sin6_family = AF_INET6;
2145 bcopy(a, &sin6.sin6_addr, sizeof(sin6.sin6_addr));
2146 ke = (struct pfr_kentry *)rn_match(&sin6, &kt->pfrkt_ip6->rh);
2147 if (ke && KENTRY_RNF_ROOT(ke))
2148 ke = NULL;
2149 break;
2150 }
2151 #endif /* INET6 */
2152 default:
2153 unhandled_af(af);
2154 }
2155 if ((ke == NULL || ke->pfrke_not) != notrule) {
2156 if (op_pass != PFR_OP_PASS)
2157 DPFPRINTF(PF_DEBUG_URGENT,
2158 ("pfr_update_stats: assertion failed.\n"));
2159 op_pass = PFR_OP_XPASS;
2160 }
2161 pfr_kstate_counter_add(&kt->pfrkt_packets[dir_out][op_pass], 1);
2162 pfr_kstate_counter_add(&kt->pfrkt_bytes[dir_out][op_pass], len);
2163 if (ke != NULL && op_pass != PFR_OP_XPASS &&
2164 (kt->pfrkt_flags & PFR_TFLAG_COUNTERS)) {
2165 counter_u64_add(pfr_kentry_counter(&ke->pfrke_counters,
2166 dir_out, op_pass, PFR_TYPE_PACKETS), 1);
2167 counter_u64_add(pfr_kentry_counter(&ke->pfrke_counters,
2168 dir_out, op_pass, PFR_TYPE_BYTES), len);
2169 }
2170 }
2171
2172 struct pfr_ktable *
pfr_eth_attach_table(struct pf_keth_ruleset * rs,char * name)2173 pfr_eth_attach_table(struct pf_keth_ruleset *rs, char *name)
2174 {
2175 struct pfr_ktable *kt, *rt;
2176 struct pfr_table tbl;
2177 struct pf_keth_anchor *ac = rs->anchor;
2178
2179 PF_RULES_WASSERT();
2180
2181 bzero(&tbl, sizeof(tbl));
2182 strlcpy(tbl.pfrt_name, name, sizeof(tbl.pfrt_name));
2183 if (ac != NULL)
2184 strlcpy(tbl.pfrt_anchor, ac->path, sizeof(tbl.pfrt_anchor));
2185 kt = pfr_lookup_table(&tbl);
2186 if (kt == NULL) {
2187 kt = pfr_create_ktable(&tbl, time_second, 1);
2188 if (kt == NULL)
2189 return (NULL);
2190 if (ac != NULL) {
2191 bzero(tbl.pfrt_anchor, sizeof(tbl.pfrt_anchor));
2192 rt = pfr_lookup_table(&tbl);
2193 if (rt == NULL) {
2194 rt = pfr_create_ktable(&tbl, 0, 1);
2195 if (rt == NULL) {
2196 pfr_destroy_ktable(kt, 0);
2197 return (NULL);
2198 }
2199 pfr_insert_ktable(rt);
2200 }
2201 kt->pfrkt_root = rt;
2202 }
2203 pfr_insert_ktable(kt);
2204 }
2205 if (!kt->pfrkt_refcnt[PFR_REFCNT_RULE]++)
2206 pfr_setflags_ktable(kt, kt->pfrkt_flags|PFR_TFLAG_REFERENCED);
2207 return (kt);
2208 }
2209
2210 struct pfr_ktable *
pfr_attach_table(struct pf_kruleset * rs,char * name)2211 pfr_attach_table(struct pf_kruleset *rs, char *name)
2212 {
2213 struct pfr_ktable *kt, *rt;
2214 struct pfr_table tbl;
2215 struct pf_kanchor *ac = rs->anchor;
2216
2217 PF_RULES_WASSERT();
2218
2219 bzero(&tbl, sizeof(tbl));
2220 strlcpy(tbl.pfrt_name, name, sizeof(tbl.pfrt_name));
2221 if (ac != NULL)
2222 strlcpy(tbl.pfrt_anchor, ac->path, sizeof(tbl.pfrt_anchor));
2223 kt = pfr_lookup_table(&tbl);
2224 if (kt == NULL) {
2225 kt = pfr_create_ktable(&tbl, time_second, 1);
2226 if (kt == NULL)
2227 return (NULL);
2228 if (ac != NULL) {
2229 bzero(tbl.pfrt_anchor, sizeof(tbl.pfrt_anchor));
2230 rt = pfr_lookup_table(&tbl);
2231 if (rt == NULL) {
2232 rt = pfr_create_ktable(&tbl, 0, 1);
2233 if (rt == NULL) {
2234 pfr_destroy_ktable(kt, 0);
2235 return (NULL);
2236 }
2237 pfr_insert_ktable(rt);
2238 }
2239 kt->pfrkt_root = rt;
2240 }
2241 pfr_insert_ktable(kt);
2242 }
2243 if (!kt->pfrkt_refcnt[PFR_REFCNT_RULE]++)
2244 pfr_setflags_ktable(kt, kt->pfrkt_flags|PFR_TFLAG_REFERENCED);
2245 return (kt);
2246 }
2247
2248 void
pfr_detach_table(struct pfr_ktable * kt)2249 pfr_detach_table(struct pfr_ktable *kt)
2250 {
2251
2252 PF_RULES_WASSERT();
2253 KASSERT(kt->pfrkt_refcnt[PFR_REFCNT_RULE] > 0, ("%s: refcount %d\n",
2254 __func__, kt->pfrkt_refcnt[PFR_REFCNT_RULE]));
2255
2256 if (!--kt->pfrkt_refcnt[PFR_REFCNT_RULE])
2257 pfr_setflags_ktable(kt, kt->pfrkt_flags&~PFR_TFLAG_REFERENCED);
2258 }
2259
2260 int
pfr_pool_get(struct pfr_ktable * kt,int * pidx,struct pf_addr * counter,sa_family_t af,pf_addr_filter_func_t filter)2261 pfr_pool_get(struct pfr_ktable *kt, int *pidx, struct pf_addr *counter,
2262 sa_family_t af, pf_addr_filter_func_t filter)
2263 {
2264 struct pf_addr *addr, cur, mask, umask_addr;
2265 union sockaddr_union uaddr, umask;
2266 struct pfr_kentry *ke, *ke2 = NULL;
2267 int startidx, idx = -1, loop = 0, use_counter = 0;
2268
2269 MPASS(pidx != NULL);
2270 MPASS(counter != NULL);
2271
2272 switch (af) {
2273 case AF_INET:
2274 uaddr.sin.sin_len = sizeof(struct sockaddr_in);
2275 uaddr.sin.sin_family = AF_INET;
2276 addr = (struct pf_addr *)&uaddr.sin.sin_addr;
2277 break;
2278 case AF_INET6:
2279 uaddr.sin6.sin6_len = sizeof(struct sockaddr_in6);
2280 uaddr.sin6.sin6_family = AF_INET6;
2281 addr = (struct pf_addr *)&uaddr.sin6.sin6_addr;
2282 break;
2283 default:
2284 unhandled_af(af);
2285 }
2286
2287 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
2288 kt = kt->pfrkt_root;
2289 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
2290 return (-1);
2291
2292 idx = *pidx;
2293 if (idx < 0 || idx >= kt->pfrkt_cnt)
2294 idx = 0;
2295 else if (counter != NULL)
2296 use_counter = 1;
2297 startidx = idx;
2298
2299 _next_block:
2300 if (loop && startidx == idx) {
2301 pfr_kstate_counter_add(&kt->pfrkt_nomatch, 1);
2302 return (1);
2303 }
2304
2305 ke = pfr_kentry_byidx(kt, idx, af);
2306 if (ke == NULL) {
2307 /* we don't have this idx, try looping */
2308 if (loop || (ke = pfr_kentry_byidx(kt, 0, af)) == NULL) {
2309 pfr_kstate_counter_add(&kt->pfrkt_nomatch, 1);
2310 return (1);
2311 }
2312 idx = 0;
2313 loop++;
2314 }
2315 pfr_prepare_network(&umask, af, ke->pfrke_net);
2316 pfr_sockaddr_to_pf_addr(&ke->pfrke_sa, &cur);
2317 pfr_sockaddr_to_pf_addr(&umask, &mask);
2318
2319 if (use_counter && !PF_AZERO(counter, af)) {
2320 /* is supplied address within block? */
2321 if (!PF_MATCHA(0, &cur, &mask, counter, af)) {
2322 /* no, go to next block in table */
2323 idx++;
2324 use_counter = 0;
2325 goto _next_block;
2326 }
2327 PF_ACPY(addr, counter, af);
2328 } else {
2329 /* use first address of block */
2330 PF_ACPY(addr, &cur, af);
2331 }
2332
2333 if (!KENTRY_NETWORK(ke)) {
2334 /* this is a single IP address - no possible nested block */
2335 if (filter && filter(af, addr)) {
2336 idx++;
2337 goto _next_block;
2338 }
2339 PF_ACPY(counter, addr, af);
2340 *pidx = idx;
2341 pfr_kstate_counter_add(&kt->pfrkt_match, 1);
2342 return (0);
2343 }
2344 for (;;) {
2345 /* we don't want to use a nested block */
2346 switch (af) {
2347 case AF_INET:
2348 ke2 = (struct pfr_kentry *)rn_match(&uaddr,
2349 &kt->pfrkt_ip4->rh);
2350 break;
2351 case AF_INET6:
2352 ke2 = (struct pfr_kentry *)rn_match(&uaddr,
2353 &kt->pfrkt_ip6->rh);
2354 break;
2355 }
2356 /* no need to check KENTRY_RNF_ROOT() here */
2357 if (ke2 == ke) {
2358 /* lookup return the same block - perfect */
2359 if (filter && filter(af, addr))
2360 goto _next_entry;
2361 PF_ACPY(counter, addr, af);
2362 *pidx = idx;
2363 pfr_kstate_counter_add(&kt->pfrkt_match, 1);
2364 return (0);
2365 }
2366
2367 _next_entry:
2368 /* we need to increase the counter past the nested block */
2369 pfr_prepare_network(&umask, AF_INET, ke2->pfrke_net);
2370 pfr_sockaddr_to_pf_addr(&umask, &umask_addr);
2371 PF_POOLMASK(addr, addr, &umask_addr, &pfr_ffaddr, af);
2372 PF_AINC(addr, af);
2373 if (!PF_MATCHA(0, &cur, &mask, addr, af)) {
2374 /* ok, we reached the end of our main block */
2375 /* go to next block in table */
2376 idx++;
2377 use_counter = 0;
2378 goto _next_block;
2379 }
2380 }
2381 }
2382
2383 static struct pfr_kentry *
pfr_kentry_byidx(struct pfr_ktable * kt,int idx,int af)2384 pfr_kentry_byidx(struct pfr_ktable *kt, int idx, int af)
2385 {
2386 struct pfr_walktree w;
2387
2388 bzero(&w, sizeof(w));
2389 w.pfrw_op = PFRW_POOL_GET;
2390 w.pfrw_free = idx;
2391
2392 switch (af) {
2393 #ifdef INET
2394 case AF_INET:
2395 kt->pfrkt_ip4->rnh_walktree(&kt->pfrkt_ip4->rh, pfr_walktree, &w);
2396 return (w.pfrw_kentry);
2397 #endif /* INET */
2398 #ifdef INET6
2399 case AF_INET6:
2400 kt->pfrkt_ip6->rnh_walktree(&kt->pfrkt_ip6->rh, pfr_walktree, &w);
2401 return (w.pfrw_kentry);
2402 #endif /* INET6 */
2403 default:
2404 return (NULL);
2405 }
2406 }
2407
2408 void
pfr_dynaddr_update(struct pfr_ktable * kt,struct pfi_dynaddr * dyn)2409 pfr_dynaddr_update(struct pfr_ktable *kt, struct pfi_dynaddr *dyn)
2410 {
2411 struct pfr_walktree w;
2412
2413 bzero(&w, sizeof(w));
2414 w.pfrw_op = PFRW_DYNADDR_UPDATE;
2415 w.pfrw_dyn = dyn;
2416
2417 dyn->pfid_acnt4 = 0;
2418 dyn->pfid_acnt6 = 0;
2419 if (!dyn->pfid_af || dyn->pfid_af == AF_INET)
2420 kt->pfrkt_ip4->rnh_walktree(&kt->pfrkt_ip4->rh, pfr_walktree, &w);
2421 if (!dyn->pfid_af || dyn->pfid_af == AF_INET6)
2422 kt->pfrkt_ip6->rnh_walktree(&kt->pfrkt_ip6->rh, pfr_walktree, &w);
2423 }
2424