1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2002 Cedric Berger
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * - Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * - Redistributions in binary form must reproduce the above
14 * copyright notice, this list of conditions and the following
15 * disclaimer in the documentation and/or other materials provided
16 * with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
21 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
22 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
24 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
25 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
28 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 *
31 * $OpenBSD: pf_table.c,v 1.79 2008/10/08 06:24:50 mcbride Exp $
32 */
33
34 #include <sys/cdefs.h>
35 #include "opt_inet.h"
36 #include "opt_inet6.h"
37
38 #include <sys/param.h>
39 #include <sys/kernel.h>
40 #include <sys/lock.h>
41 #include <sys/malloc.h>
42 #include <sys/mbuf.h>
43 #include <sys/mutex.h>
44 #include <sys/refcount.h>
45 #include <sys/socket.h>
46 #include <vm/uma.h>
47
48 #include <net/if.h>
49 #include <net/vnet.h>
50 #include <net/pfvar.h>
51
52 #define ACCEPT_FLAGS(flags, oklist) \
53 do { \
54 if ((flags & ~(oklist)) & \
55 PFR_FLAG_ALLMASK) \
56 return (EINVAL); \
57 } while (0)
58
59 #define FILLIN_SIN(sin, addr) \
60 do { \
61 (sin).sin_len = sizeof(sin); \
62 (sin).sin_family = AF_INET; \
63 (sin).sin_addr = (addr); \
64 } while (0)
65
66 #define FILLIN_SIN6(sin6, addr) \
67 do { \
68 (sin6).sin6_len = sizeof(sin6); \
69 (sin6).sin6_family = AF_INET6; \
70 (sin6).sin6_addr = (addr); \
71 } while (0)
72
73 #define SWAP(type, a1, a2) \
74 do { \
75 type tmp = a1; \
76 a1 = a2; \
77 a2 = tmp; \
78 } while (0)
79
80 #define AF_BITS(af) (((af)==AF_INET)?32:128)
81 #define ADDR_NETWORK(ad) ((ad)->pfra_net < AF_BITS((ad)->pfra_af))
82 #define KENTRY_NETWORK(ke) ((ke)->pfrke_net < AF_BITS((ke)->pfrke_af))
83 #define KENTRY_RNF_ROOT(ke) \
84 ((((struct radix_node *)(ke))->rn_flags & RNF_ROOT) != 0)
85
86 #define NO_ADDRESSES (-1)
87 #define ENQUEUE_UNMARKED_ONLY (1)
88 #define INVERT_NEG_FLAG (1)
89
90 struct pfr_walktree {
91 enum pfrw_op {
92 PFRW_MARK,
93 PFRW_SWEEP,
94 PFRW_ENQUEUE,
95 PFRW_GET_ADDRS,
96 PFRW_GET_ASTATS,
97 PFRW_POOL_GET,
98 PFRW_DYNADDR_UPDATE,
99 PFRW_COUNTERS
100 } pfrw_op;
101 union {
102 struct pfr_addr *pfrw_addr;
103 struct pfr_astats *pfrw_astats;
104 struct pfr_kentryworkq *pfrw_workq;
105 struct pfr_kentry *pfrw_kentry;
106 struct pfi_dynaddr *pfrw_dyn;
107 };
108 int pfrw_free;
109 int pfrw_flags;
110 };
111
112 #define senderr(e) do { rv = (e); goto _bad; } while (0)
113
114 static MALLOC_DEFINE(M_PFTABLE, "pf_table", "pf(4) tables structures");
115 VNET_DEFINE_STATIC(uma_zone_t, pfr_kentry_z);
116 #define V_pfr_kentry_z VNET(pfr_kentry_z)
117 VNET_DEFINE_STATIC(uma_zone_t, pfr_kentry_counter_z);
118 #define V_pfr_kentry_counter_z VNET(pfr_kentry_counter_z)
119
120 static struct pf_addr pfr_ffaddr = {
121 .addr32 = { 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff }
122 };
123
124 static void pfr_copyout_astats(struct pfr_astats *,
125 const struct pfr_kentry *,
126 const struct pfr_walktree *);
127 static void pfr_copyout_addr(struct pfr_addr *,
128 const struct pfr_kentry *ke);
129 static int pfr_validate_addr(struct pfr_addr *);
130 static void pfr_enqueue_addrs(struct pfr_ktable *,
131 struct pfr_kentryworkq *, int *, int);
132 static void pfr_mark_addrs(struct pfr_ktable *);
133 static struct pfr_kentry
134 *pfr_lookup_addr(struct pfr_ktable *,
135 struct pfr_addr *, int);
136 static struct pfr_kentry *pfr_create_kentry(struct pfr_addr *, bool);
137 static void pfr_destroy_kentries(struct pfr_kentryworkq *);
138 static void pfr_destroy_kentry(struct pfr_kentry *);
139 static void pfr_insert_kentries(struct pfr_ktable *,
140 struct pfr_kentryworkq *, time_t);
141 static void pfr_remove_kentries(struct pfr_ktable *,
142 struct pfr_kentryworkq *);
143 static void pfr_clstats_kentries(struct pfr_ktable *,
144 struct pfr_kentryworkq *, time_t, int);
145 static void pfr_reset_feedback(struct pfr_addr *, int);
146 static void pfr_prepare_network(union sockaddr_union *, int, int);
147 static int pfr_route_kentry(struct pfr_ktable *,
148 struct pfr_kentry *);
149 static int pfr_unroute_kentry(struct pfr_ktable *,
150 struct pfr_kentry *);
151 static int pfr_walktree(struct radix_node *, void *);
152 static int pfr_validate_table(struct pfr_table *, int, int);
153 static int pfr_fix_anchor(char *);
154 static void pfr_commit_ktable(struct pfr_ktable *, time_t);
155 static void pfr_insert_ktables(struct pfr_ktableworkq *);
156 static void pfr_insert_ktable(struct pfr_ktable *);
157 static void pfr_setflags_ktables(struct pfr_ktableworkq *);
158 static void pfr_setflags_ktable(struct pfr_ktable *, int);
159 static void pfr_clstats_ktables(struct pfr_ktableworkq *, time_t,
160 int);
161 static void pfr_clstats_ktable(struct pfr_ktable *, time_t, int);
162 static struct pfr_ktable
163 *pfr_create_ktable(struct pfr_table *, time_t, int);
164 static void pfr_destroy_ktables(struct pfr_ktableworkq *, int);
165 static void pfr_destroy_ktable(struct pfr_ktable *, int);
166 static int pfr_ktable_compare(struct pfr_ktable *,
167 struct pfr_ktable *);
168 static struct pfr_ktable
169 *pfr_lookup_table(struct pfr_table *);
170 static void pfr_clean_node_mask(struct pfr_ktable *,
171 struct pfr_kentryworkq *);
172 static int pfr_skip_table(struct pfr_table *,
173 struct pfr_ktable *, int);
174 static struct pfr_kentry
175 *pfr_kentry_byidx(struct pfr_ktable *, int, int);
176
177 static RB_PROTOTYPE(pfr_ktablehead, pfr_ktable, pfrkt_tree, pfr_ktable_compare);
178 static RB_GENERATE(pfr_ktablehead, pfr_ktable, pfrkt_tree, pfr_ktable_compare);
179
180 VNET_DEFINE_STATIC(struct pfr_ktablehead, pfr_ktables);
181 #define V_pfr_ktables VNET(pfr_ktables)
182
183 VNET_DEFINE_STATIC(struct pfr_table, pfr_nulltable);
184 #define V_pfr_nulltable VNET(pfr_nulltable)
185
186 VNET_DEFINE_STATIC(int, pfr_ktable_cnt);
187 #define V_pfr_ktable_cnt VNET(pfr_ktable_cnt)
188
189 void
pfr_initialize(void)190 pfr_initialize(void)
191 {
192
193 V_pfr_kentry_counter_z = uma_zcreate("pf table entry counters",
194 PFR_NUM_COUNTERS * sizeof(uint64_t), NULL, NULL, NULL, NULL,
195 UMA_ALIGN_PTR, UMA_ZONE_PCPU);
196 V_pfr_kentry_z = uma_zcreate("pf table entries",
197 sizeof(struct pfr_kentry), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
198 0);
199 uma_zone_set_max(V_pfr_kentry_z, PFR_KENTRY_HIWAT);
200 V_pf_limits[PF_LIMIT_TABLE_ENTRIES].zone = V_pfr_kentry_z;
201 V_pf_limits[PF_LIMIT_TABLE_ENTRIES].limit = PFR_KENTRY_HIWAT;
202 }
203
204 void
pfr_cleanup(void)205 pfr_cleanup(void)
206 {
207
208 uma_zdestroy(V_pfr_kentry_z);
209 uma_zdestroy(V_pfr_kentry_counter_z);
210 }
211
212 int
pfr_clr_addrs(struct pfr_table * tbl,int * ndel,int flags)213 pfr_clr_addrs(struct pfr_table *tbl, int *ndel, int flags)
214 {
215 struct pfr_ktable *kt;
216 struct pfr_kentryworkq workq;
217
218 PF_RULES_WASSERT();
219
220 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
221 if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL))
222 return (EINVAL);
223 kt = pfr_lookup_table(tbl);
224 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
225 return (ESRCH);
226 if (kt->pfrkt_flags & PFR_TFLAG_CONST)
227 return (EPERM);
228 pfr_enqueue_addrs(kt, &workq, ndel, 0);
229
230 if (!(flags & PFR_FLAG_DUMMY)) {
231 pfr_remove_kentries(kt, &workq);
232 KASSERT(kt->pfrkt_cnt == 0, ("%s: non-null pfrkt_cnt", __func__));
233 }
234 return (0);
235 }
236
237 int
pfr_add_addrs(struct pfr_table * tbl,struct pfr_addr * addr,int size,int * nadd,int flags)238 pfr_add_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
239 int *nadd, int flags)
240 {
241 struct pfr_ktable *kt, *tmpkt;
242 struct pfr_kentryworkq workq;
243 struct pfr_kentry *p, *q;
244 struct pfr_addr *ad;
245 int i, rv, xadd = 0;
246 time_t tzero = time_second;
247
248 PF_RULES_WASSERT();
249
250 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_FEEDBACK);
251 if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL))
252 return (EINVAL);
253 kt = pfr_lookup_table(tbl);
254 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
255 return (ESRCH);
256 if (kt->pfrkt_flags & PFR_TFLAG_CONST)
257 return (EPERM);
258 tmpkt = pfr_create_ktable(&V_pfr_nulltable, 0, 0);
259 if (tmpkt == NULL)
260 return (ENOMEM);
261 SLIST_INIT(&workq);
262 for (i = 0, ad = addr; i < size; i++, ad++) {
263 if (pfr_validate_addr(ad))
264 senderr(EINVAL);
265 p = pfr_lookup_addr(kt, ad, 1);
266 q = pfr_lookup_addr(tmpkt, ad, 1);
267 if (flags & PFR_FLAG_FEEDBACK) {
268 if (q != NULL)
269 ad->pfra_fback = PFR_FB_DUPLICATE;
270 else if (p == NULL)
271 ad->pfra_fback = PFR_FB_ADDED;
272 else if (p->pfrke_not != ad->pfra_not)
273 ad->pfra_fback = PFR_FB_CONFLICT;
274 else
275 ad->pfra_fback = PFR_FB_NONE;
276 }
277 if (p == NULL && q == NULL) {
278 p = pfr_create_kentry(ad,
279 (kt->pfrkt_flags & PFR_TFLAG_COUNTERS) != 0);
280 if (p == NULL)
281 senderr(ENOMEM);
282 if (pfr_route_kentry(tmpkt, p)) {
283 pfr_destroy_kentry(p);
284 ad->pfra_fback = PFR_FB_NONE;
285 } else {
286 SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
287 xadd++;
288 }
289 }
290 }
291 pfr_clean_node_mask(tmpkt, &workq);
292 if (!(flags & PFR_FLAG_DUMMY))
293 pfr_insert_kentries(kt, &workq, tzero);
294 else
295 pfr_destroy_kentries(&workq);
296 if (nadd != NULL)
297 *nadd = xadd;
298 pfr_destroy_ktable(tmpkt, 0);
299 return (0);
300 _bad:
301 pfr_clean_node_mask(tmpkt, &workq);
302 pfr_destroy_kentries(&workq);
303 if (flags & PFR_FLAG_FEEDBACK)
304 pfr_reset_feedback(addr, size);
305 pfr_destroy_ktable(tmpkt, 0);
306 return (rv);
307 }
308
309 int
pfr_del_addrs(struct pfr_table * tbl,struct pfr_addr * addr,int size,int * ndel,int flags)310 pfr_del_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
311 int *ndel, int flags)
312 {
313 struct pfr_ktable *kt;
314 struct pfr_kentryworkq workq;
315 struct pfr_kentry *p;
316 struct pfr_addr *ad;
317 int i, rv, xdel = 0, log = 1;
318
319 PF_RULES_WASSERT();
320
321 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_FEEDBACK);
322 if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL))
323 return (EINVAL);
324 kt = pfr_lookup_table(tbl);
325 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
326 return (ESRCH);
327 if (kt->pfrkt_flags & PFR_TFLAG_CONST)
328 return (EPERM);
329 /*
330 * there are two algorithms to choose from here.
331 * with:
332 * n: number of addresses to delete
333 * N: number of addresses in the table
334 *
335 * one is O(N) and is better for large 'n'
336 * one is O(n*LOG(N)) and is better for small 'n'
337 *
338 * following code try to decide which one is best.
339 */
340 for (i = kt->pfrkt_cnt; i > 0; i >>= 1)
341 log++;
342 if (size > kt->pfrkt_cnt/log) {
343 /* full table scan */
344 pfr_mark_addrs(kt);
345 } else {
346 /* iterate over addresses to delete */
347 for (i = 0, ad = addr; i < size; i++, ad++) {
348 if (pfr_validate_addr(ad))
349 return (EINVAL);
350 p = pfr_lookup_addr(kt, ad, 1);
351 if (p != NULL)
352 p->pfrke_mark = 0;
353 }
354 }
355 SLIST_INIT(&workq);
356 for (i = 0, ad = addr; i < size; i++, ad++) {
357 if (pfr_validate_addr(ad))
358 senderr(EINVAL);
359 p = pfr_lookup_addr(kt, ad, 1);
360 if (flags & PFR_FLAG_FEEDBACK) {
361 if (p == NULL)
362 ad->pfra_fback = PFR_FB_NONE;
363 else if (p->pfrke_not != ad->pfra_not)
364 ad->pfra_fback = PFR_FB_CONFLICT;
365 else if (p->pfrke_mark)
366 ad->pfra_fback = PFR_FB_DUPLICATE;
367 else
368 ad->pfra_fback = PFR_FB_DELETED;
369 }
370 if (p != NULL && p->pfrke_not == ad->pfra_not &&
371 !p->pfrke_mark) {
372 p->pfrke_mark = 1;
373 SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
374 xdel++;
375 }
376 }
377 if (!(flags & PFR_FLAG_DUMMY))
378 pfr_remove_kentries(kt, &workq);
379 if (ndel != NULL)
380 *ndel = xdel;
381 return (0);
382 _bad:
383 if (flags & PFR_FLAG_FEEDBACK)
384 pfr_reset_feedback(addr, size);
385 return (rv);
386 }
387
388 int
pfr_set_addrs(struct pfr_table * tbl,struct pfr_addr * addr,int size,int * size2,int * nadd,int * ndel,int * nchange,int flags,u_int32_t ignore_pfrt_flags)389 pfr_set_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
390 int *size2, int *nadd, int *ndel, int *nchange, int flags,
391 u_int32_t ignore_pfrt_flags)
392 {
393 struct pfr_ktable *kt, *tmpkt;
394 struct pfr_kentryworkq addq, delq, changeq;
395 struct pfr_kentry *p, *q;
396 struct pfr_addr ad;
397 int i, rv, xadd = 0, xdel = 0, xchange = 0;
398 time_t tzero = time_second;
399
400 PF_RULES_WASSERT();
401
402 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_FEEDBACK);
403 if (pfr_validate_table(tbl, ignore_pfrt_flags, flags &
404 PFR_FLAG_USERIOCTL))
405 return (EINVAL);
406 kt = pfr_lookup_table(tbl);
407 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
408 return (ESRCH);
409 if (kt->pfrkt_flags & PFR_TFLAG_CONST)
410 return (EPERM);
411 tmpkt = pfr_create_ktable(&V_pfr_nulltable, 0, 0);
412 if (tmpkt == NULL)
413 return (ENOMEM);
414 pfr_mark_addrs(kt);
415 SLIST_INIT(&addq);
416 SLIST_INIT(&delq);
417 SLIST_INIT(&changeq);
418 for (i = 0; i < size; i++) {
419 /*
420 * XXXGL: undertand pf_if usage of this function
421 * and make ad a moving pointer
422 */
423 bcopy(addr + i, &ad, sizeof(ad));
424 if (pfr_validate_addr(&ad))
425 senderr(EINVAL);
426 ad.pfra_fback = PFR_FB_NONE;
427 p = pfr_lookup_addr(kt, &ad, 1);
428 if (p != NULL) {
429 if (p->pfrke_mark) {
430 ad.pfra_fback = PFR_FB_DUPLICATE;
431 goto _skip;
432 }
433 p->pfrke_mark = 1;
434 if (p->pfrke_not != ad.pfra_not) {
435 SLIST_INSERT_HEAD(&changeq, p, pfrke_workq);
436 ad.pfra_fback = PFR_FB_CHANGED;
437 xchange++;
438 }
439 } else {
440 q = pfr_lookup_addr(tmpkt, &ad, 1);
441 if (q != NULL) {
442 ad.pfra_fback = PFR_FB_DUPLICATE;
443 goto _skip;
444 }
445 p = pfr_create_kentry(&ad,
446 (kt->pfrkt_flags & PFR_TFLAG_COUNTERS) != 0);
447 if (p == NULL)
448 senderr(ENOMEM);
449 if (pfr_route_kentry(tmpkt, p)) {
450 pfr_destroy_kentry(p);
451 ad.pfra_fback = PFR_FB_NONE;
452 } else {
453 SLIST_INSERT_HEAD(&addq, p, pfrke_workq);
454 ad.pfra_fback = PFR_FB_ADDED;
455 xadd++;
456 }
457 }
458 _skip:
459 if (flags & PFR_FLAG_FEEDBACK)
460 bcopy(&ad, addr + i, sizeof(ad));
461 }
462 pfr_enqueue_addrs(kt, &delq, &xdel, ENQUEUE_UNMARKED_ONLY);
463 if ((flags & PFR_FLAG_FEEDBACK) && *size2) {
464 if (*size2 < size+xdel) {
465 *size2 = size+xdel;
466 senderr(0);
467 }
468 i = 0;
469 SLIST_FOREACH(p, &delq, pfrke_workq) {
470 pfr_copyout_addr(&ad, p);
471 ad.pfra_fback = PFR_FB_DELETED;
472 bcopy(&ad, addr + size + i, sizeof(ad));
473 i++;
474 }
475 }
476 pfr_clean_node_mask(tmpkt, &addq);
477 if (!(flags & PFR_FLAG_DUMMY)) {
478 pfr_insert_kentries(kt, &addq, tzero);
479 pfr_remove_kentries(kt, &delq);
480 pfr_clstats_kentries(kt, &changeq, tzero, INVERT_NEG_FLAG);
481 } else
482 pfr_destroy_kentries(&addq);
483 if (nadd != NULL)
484 *nadd = xadd;
485 if (ndel != NULL)
486 *ndel = xdel;
487 if (nchange != NULL)
488 *nchange = xchange;
489 if ((flags & PFR_FLAG_FEEDBACK) && size2)
490 *size2 = size+xdel;
491 pfr_destroy_ktable(tmpkt, 0);
492 return (0);
493 _bad:
494 pfr_clean_node_mask(tmpkt, &addq);
495 pfr_destroy_kentries(&addq);
496 if (flags & PFR_FLAG_FEEDBACK)
497 pfr_reset_feedback(addr, size);
498 pfr_destroy_ktable(tmpkt, 0);
499 return (rv);
500 }
501
502 int
pfr_tst_addrs(struct pfr_table * tbl,struct pfr_addr * addr,int size,int * nmatch,int flags)503 pfr_tst_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
504 int *nmatch, int flags)
505 {
506 struct pfr_ktable *kt;
507 struct pfr_kentry *p;
508 struct pfr_addr *ad;
509 int i, xmatch = 0;
510
511 PF_RULES_RASSERT();
512
513 ACCEPT_FLAGS(flags, PFR_FLAG_REPLACE);
514 if (pfr_validate_table(tbl, 0, 0))
515 return (EINVAL);
516 kt = pfr_lookup_table(tbl);
517 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
518 return (ESRCH);
519
520 for (i = 0, ad = addr; i < size; i++, ad++) {
521 if (pfr_validate_addr(ad))
522 return (EINVAL);
523 if (ADDR_NETWORK(ad))
524 return (EINVAL);
525 p = pfr_lookup_addr(kt, ad, 0);
526 if (flags & PFR_FLAG_REPLACE)
527 pfr_copyout_addr(ad, p);
528 ad->pfra_fback = (p == NULL) ? PFR_FB_NONE :
529 (p->pfrke_not ? PFR_FB_NOTMATCH : PFR_FB_MATCH);
530 if (p != NULL && !p->pfrke_not)
531 xmatch++;
532 }
533 if (nmatch != NULL)
534 *nmatch = xmatch;
535 return (0);
536 }
537
538 int
pfr_get_addrs(struct pfr_table * tbl,struct pfr_addr * addr,int * size,int flags)539 pfr_get_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int *size,
540 int flags)
541 {
542 struct pfr_ktable *kt;
543 struct pfr_walktree w;
544 int rv;
545
546 PF_RULES_RASSERT();
547
548 ACCEPT_FLAGS(flags, 0);
549 if (pfr_validate_table(tbl, 0, 0))
550 return (EINVAL);
551 kt = pfr_lookup_table(tbl);
552 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
553 return (ESRCH);
554 if (kt->pfrkt_cnt > *size) {
555 *size = kt->pfrkt_cnt;
556 return (0);
557 }
558
559 bzero(&w, sizeof(w));
560 w.pfrw_op = PFRW_GET_ADDRS;
561 w.pfrw_addr = addr;
562 w.pfrw_free = kt->pfrkt_cnt;
563 rv = kt->pfrkt_ip4->rnh_walktree(&kt->pfrkt_ip4->rh, pfr_walktree, &w);
564 if (!rv)
565 rv = kt->pfrkt_ip6->rnh_walktree(&kt->pfrkt_ip6->rh,
566 pfr_walktree, &w);
567 if (rv)
568 return (rv);
569
570 KASSERT(w.pfrw_free == 0, ("%s: corruption detected (%d)", __func__,
571 w.pfrw_free));
572
573 *size = kt->pfrkt_cnt;
574 return (0);
575 }
576
577 int
pfr_get_astats(struct pfr_table * tbl,struct pfr_astats * addr,int * size,int flags)578 pfr_get_astats(struct pfr_table *tbl, struct pfr_astats *addr, int *size,
579 int flags)
580 {
581 struct pfr_ktable *kt;
582 struct pfr_walktree w;
583 struct pfr_kentryworkq workq;
584 int rv;
585 time_t tzero = time_second;
586
587 PF_RULES_RASSERT();
588
589 /* XXX PFR_FLAG_CLSTATS disabled */
590 ACCEPT_FLAGS(flags, 0);
591 if (pfr_validate_table(tbl, 0, 0))
592 return (EINVAL);
593 kt = pfr_lookup_table(tbl);
594 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
595 return (ESRCH);
596 if (kt->pfrkt_cnt > *size) {
597 *size = kt->pfrkt_cnt;
598 return (0);
599 }
600
601 bzero(&w, sizeof(w));
602 w.pfrw_op = PFRW_GET_ASTATS;
603 w.pfrw_astats = addr;
604 w.pfrw_free = kt->pfrkt_cnt;
605 /*
606 * Flags below are for backward compatibility. It was possible to have
607 * a table without per-entry counters. Now they are always allocated,
608 * we just discard data when reading it if table is not configured to
609 * have counters.
610 */
611 w.pfrw_flags = kt->pfrkt_flags;
612 rv = kt->pfrkt_ip4->rnh_walktree(&kt->pfrkt_ip4->rh, pfr_walktree, &w);
613 if (!rv)
614 rv = kt->pfrkt_ip6->rnh_walktree(&kt->pfrkt_ip6->rh,
615 pfr_walktree, &w);
616 if (!rv && (flags & PFR_FLAG_CLSTATS)) {
617 pfr_enqueue_addrs(kt, &workq, NULL, 0);
618 pfr_clstats_kentries(kt, &workq, tzero, 0);
619 }
620 if (rv)
621 return (rv);
622
623 if (w.pfrw_free) {
624 printf("pfr_get_astats: corruption detected (%d).\n",
625 w.pfrw_free);
626 return (ENOTTY);
627 }
628 *size = kt->pfrkt_cnt;
629 return (0);
630 }
631
632 int
pfr_clr_astats(struct pfr_table * tbl,struct pfr_addr * addr,int size,int * nzero,int flags)633 pfr_clr_astats(struct pfr_table *tbl, struct pfr_addr *addr, int size,
634 int *nzero, int flags)
635 {
636 struct pfr_ktable *kt;
637 struct pfr_kentryworkq workq;
638 struct pfr_kentry *p;
639 struct pfr_addr *ad;
640 int i, rv, xzero = 0;
641
642 PF_RULES_WASSERT();
643
644 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_FEEDBACK);
645 if (pfr_validate_table(tbl, 0, 0))
646 return (EINVAL);
647 kt = pfr_lookup_table(tbl);
648 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
649 return (ESRCH);
650 SLIST_INIT(&workq);
651 for (i = 0, ad = addr; i < size; i++, ad++) {
652 if (pfr_validate_addr(ad))
653 senderr(EINVAL);
654 p = pfr_lookup_addr(kt, ad, 1);
655 if (flags & PFR_FLAG_FEEDBACK) {
656 ad->pfra_fback = (p != NULL) ?
657 PFR_FB_CLEARED : PFR_FB_NONE;
658 }
659 if (p != NULL) {
660 SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
661 xzero++;
662 }
663 }
664
665 if (!(flags & PFR_FLAG_DUMMY))
666 pfr_clstats_kentries(kt, &workq, time_second, 0);
667 if (nzero != NULL)
668 *nzero = xzero;
669 return (0);
670 _bad:
671 if (flags & PFR_FLAG_FEEDBACK)
672 pfr_reset_feedback(addr, size);
673 return (rv);
674 }
675
676 static int
pfr_validate_addr(struct pfr_addr * ad)677 pfr_validate_addr(struct pfr_addr *ad)
678 {
679 int i;
680
681 switch (ad->pfra_af) {
682 #ifdef INET
683 case AF_INET:
684 if (ad->pfra_net > 32)
685 return (-1);
686 break;
687 #endif /* INET */
688 #ifdef INET6
689 case AF_INET6:
690 if (ad->pfra_net > 128)
691 return (-1);
692 break;
693 #endif /* INET6 */
694 default:
695 return (-1);
696 }
697 if (ad->pfra_net < 128 &&
698 (((caddr_t)ad)[ad->pfra_net/8] & (0xFF >> (ad->pfra_net%8))))
699 return (-1);
700 for (i = (ad->pfra_net+7)/8; i < sizeof(ad->pfra_u); i++)
701 if (((caddr_t)ad)[i])
702 return (-1);
703 if (ad->pfra_not && ad->pfra_not != 1)
704 return (-1);
705 if (ad->pfra_fback != PFR_FB_NONE)
706 return (-1);
707 return (0);
708 }
709
710 static void
pfr_enqueue_addrs(struct pfr_ktable * kt,struct pfr_kentryworkq * workq,int * naddr,int sweep)711 pfr_enqueue_addrs(struct pfr_ktable *kt, struct pfr_kentryworkq *workq,
712 int *naddr, int sweep)
713 {
714 struct pfr_walktree w;
715
716 SLIST_INIT(workq);
717 bzero(&w, sizeof(w));
718 w.pfrw_op = sweep ? PFRW_SWEEP : PFRW_ENQUEUE;
719 w.pfrw_workq = workq;
720 if (kt->pfrkt_ip4 != NULL)
721 if (kt->pfrkt_ip4->rnh_walktree(&kt->pfrkt_ip4->rh,
722 pfr_walktree, &w))
723 printf("pfr_enqueue_addrs: IPv4 walktree failed.\n");
724 if (kt->pfrkt_ip6 != NULL)
725 if (kt->pfrkt_ip6->rnh_walktree(&kt->pfrkt_ip6->rh,
726 pfr_walktree, &w))
727 printf("pfr_enqueue_addrs: IPv6 walktree failed.\n");
728 if (naddr != NULL)
729 *naddr = w.pfrw_free;
730 }
731
732 static void
pfr_mark_addrs(struct pfr_ktable * kt)733 pfr_mark_addrs(struct pfr_ktable *kt)
734 {
735 struct pfr_walktree w;
736
737 bzero(&w, sizeof(w));
738 w.pfrw_op = PFRW_MARK;
739 if (kt->pfrkt_ip4->rnh_walktree(&kt->pfrkt_ip4->rh, pfr_walktree, &w))
740 printf("pfr_mark_addrs: IPv4 walktree failed.\n");
741 if (kt->pfrkt_ip6->rnh_walktree(&kt->pfrkt_ip6->rh, pfr_walktree, &w))
742 printf("pfr_mark_addrs: IPv6 walktree failed.\n");
743 }
744
745 static struct pfr_kentry *
pfr_lookup_addr(struct pfr_ktable * kt,struct pfr_addr * ad,int exact)746 pfr_lookup_addr(struct pfr_ktable *kt, struct pfr_addr *ad, int exact)
747 {
748 union sockaddr_union sa, mask;
749 struct radix_head *head = NULL;
750 struct pfr_kentry *ke;
751
752 PF_RULES_ASSERT();
753
754 bzero(&sa, sizeof(sa));
755 switch (ad->pfra_af) {
756 case AF_INET:
757 FILLIN_SIN(sa.sin, ad->pfra_ip4addr);
758 head = &kt->pfrkt_ip4->rh;
759 break;
760 case AF_INET6:
761 FILLIN_SIN6(sa.sin6, ad->pfra_ip6addr);
762 head = &kt->pfrkt_ip6->rh;
763 break;
764 default:
765 unhandled_af(ad->pfra_af);
766 }
767 if (ADDR_NETWORK(ad)) {
768 pfr_prepare_network(&mask, ad->pfra_af, ad->pfra_net);
769 ke = (struct pfr_kentry *)rn_lookup(&sa, &mask, head);
770 if (ke && KENTRY_RNF_ROOT(ke))
771 ke = NULL;
772 } else {
773 ke = (struct pfr_kentry *)rn_match(&sa, head);
774 if (ke && KENTRY_RNF_ROOT(ke))
775 ke = NULL;
776 if (exact && ke && KENTRY_NETWORK(ke))
777 ke = NULL;
778 }
779 return (ke);
780 }
781
782 static struct pfr_kentry *
pfr_create_kentry(struct pfr_addr * ad,bool counters)783 pfr_create_kentry(struct pfr_addr *ad, bool counters)
784 {
785 struct pfr_kentry *ke;
786 counter_u64_t c;
787
788 ke = uma_zalloc(V_pfr_kentry_z, M_NOWAIT | M_ZERO);
789 if (ke == NULL)
790 return (NULL);
791
792 switch (ad->pfra_af) {
793 case AF_INET:
794 FILLIN_SIN(ke->pfrke_sa.sin, ad->pfra_ip4addr);
795 break;
796 case AF_INET6:
797 FILLIN_SIN6(ke->pfrke_sa.sin6, ad->pfra_ip6addr);
798 break;
799 default:
800 unhandled_af(ad->pfra_af);
801 }
802 ke->pfrke_af = ad->pfra_af;
803 ke->pfrke_net = ad->pfra_net;
804 ke->pfrke_not = ad->pfra_not;
805 ke->pfrke_counters.pfrkc_tzero = 0;
806 if (counters) {
807 c = uma_zalloc_pcpu(V_pfr_kentry_counter_z, M_NOWAIT | M_ZERO);
808 if (c == NULL) {
809 pfr_destroy_kentry(ke);
810 return (NULL);
811 }
812 ke->pfrke_counters.pfrkc_counters = c;
813 }
814 return (ke);
815 }
816
817 static void
pfr_destroy_kentries(struct pfr_kentryworkq * workq)818 pfr_destroy_kentries(struct pfr_kentryworkq *workq)
819 {
820 struct pfr_kentry *p;
821
822 while ((p = SLIST_FIRST(workq)) != NULL) {
823 SLIST_REMOVE_HEAD(workq, pfrke_workq);
824 pfr_destroy_kentry(p);
825 }
826 }
827
828 static void
pfr_destroy_kentry(struct pfr_kentry * ke)829 pfr_destroy_kentry(struct pfr_kentry *ke)
830 {
831 counter_u64_t c;
832
833 if ((c = ke->pfrke_counters.pfrkc_counters) != NULL)
834 uma_zfree_pcpu(V_pfr_kentry_counter_z, c);
835 uma_zfree(V_pfr_kentry_z, ke);
836 }
837
838 static void
pfr_insert_kentries(struct pfr_ktable * kt,struct pfr_kentryworkq * workq,time_t tzero)839 pfr_insert_kentries(struct pfr_ktable *kt,
840 struct pfr_kentryworkq *workq, time_t tzero)
841 {
842 struct pfr_kentry *p;
843 int rv, n = 0;
844
845 SLIST_FOREACH(p, workq, pfrke_workq) {
846 rv = pfr_route_kentry(kt, p);
847 if (rv) {
848 printf("pfr_insert_kentries: cannot route entry "
849 "(code=%d).\n", rv);
850 break;
851 }
852 p->pfrke_counters.pfrkc_tzero = tzero;
853 n++;
854 }
855 kt->pfrkt_cnt += n;
856 }
857
858 int
pfr_insert_kentry(struct pfr_ktable * kt,struct pfr_addr * ad,time_t tzero)859 pfr_insert_kentry(struct pfr_ktable *kt, struct pfr_addr *ad, time_t tzero)
860 {
861 struct pfr_kentry *p;
862 int rv;
863
864 p = pfr_lookup_addr(kt, ad, 1);
865 if (p != NULL)
866 return (0);
867 p = pfr_create_kentry(ad, (kt->pfrkt_flags & PFR_TFLAG_COUNTERS) != 0);
868 if (p == NULL)
869 return (ENOMEM);
870
871 rv = pfr_route_kentry(kt, p);
872 if (rv)
873 return (rv);
874
875 p->pfrke_counters.pfrkc_tzero = tzero;
876 kt->pfrkt_cnt++;
877
878 return (0);
879 }
880
881 static void
pfr_remove_kentries(struct pfr_ktable * kt,struct pfr_kentryworkq * workq)882 pfr_remove_kentries(struct pfr_ktable *kt,
883 struct pfr_kentryworkq *workq)
884 {
885 struct pfr_kentry *p;
886 int n = 0;
887
888 SLIST_FOREACH(p, workq, pfrke_workq) {
889 pfr_unroute_kentry(kt, p);
890 n++;
891 }
892 kt->pfrkt_cnt -= n;
893 pfr_destroy_kentries(workq);
894 }
895
896 static void
pfr_clean_node_mask(struct pfr_ktable * kt,struct pfr_kentryworkq * workq)897 pfr_clean_node_mask(struct pfr_ktable *kt,
898 struct pfr_kentryworkq *workq)
899 {
900 struct pfr_kentry *p;
901
902 SLIST_FOREACH(p, workq, pfrke_workq)
903 pfr_unroute_kentry(kt, p);
904 }
905
906 static void
pfr_clstats_kentries(struct pfr_ktable * kt,struct pfr_kentryworkq * workq,time_t tzero,int negchange)907 pfr_clstats_kentries(struct pfr_ktable *kt, struct pfr_kentryworkq *workq,
908 time_t tzero, int negchange)
909 {
910 struct pfr_kentry *p;
911 int i;
912
913 SLIST_FOREACH(p, workq, pfrke_workq) {
914 if (negchange)
915 p->pfrke_not = !p->pfrke_not;
916 if ((kt->pfrkt_flags & PFR_TFLAG_COUNTERS) != 0)
917 for (i = 0; i < PFR_NUM_COUNTERS; i++)
918 counter_u64_zero(
919 p->pfrke_counters.pfrkc_counters + i);
920 p->pfrke_counters.pfrkc_tzero = tzero;
921 }
922 }
923
924 static void
pfr_reset_feedback(struct pfr_addr * addr,int size)925 pfr_reset_feedback(struct pfr_addr *addr, int size)
926 {
927 struct pfr_addr *ad;
928 int i;
929
930 for (i = 0, ad = addr; i < size; i++, ad++)
931 ad->pfra_fback = PFR_FB_NONE;
932 }
933
934 static void
pfr_prepare_network(union sockaddr_union * sa,int af,int net)935 pfr_prepare_network(union sockaddr_union *sa, int af, int net)
936 {
937 int i;
938
939 bzero(sa, sizeof(*sa));
940 switch (af) {
941 case AF_INET:
942 sa->sin.sin_len = sizeof(sa->sin);
943 sa->sin.sin_family = AF_INET;
944 sa->sin.sin_addr.s_addr = net ? htonl(-1 << (32-net)) : 0;
945 break;
946 case AF_INET6:
947 sa->sin6.sin6_len = sizeof(sa->sin6);
948 sa->sin6.sin6_family = AF_INET6;
949 for (i = 0; i < 4; i++) {
950 if (net <= 32) {
951 sa->sin6.sin6_addr.s6_addr32[i] =
952 net ? htonl(-1 << (32-net)) : 0;
953 break;
954 }
955 sa->sin6.sin6_addr.s6_addr32[i] = 0xFFFFFFFF;
956 net -= 32;
957 }
958 break;
959 default:
960 unhandled_af(af);
961 }
962 }
963
964 static int
pfr_route_kentry(struct pfr_ktable * kt,struct pfr_kentry * ke)965 pfr_route_kentry(struct pfr_ktable *kt, struct pfr_kentry *ke)
966 {
967 union sockaddr_union mask;
968 struct radix_node *rn;
969 struct radix_head *head = NULL;
970
971 PF_RULES_WASSERT();
972
973 bzero(ke->pfrke_node, sizeof(ke->pfrke_node));
974 switch (ke->pfrke_af) {
975 case AF_INET:
976 head = &kt->pfrkt_ip4->rh;
977 break;
978 case AF_INET6:
979 head = &kt->pfrkt_ip6->rh;
980 break;
981 default:
982 unhandled_af(ke->pfrke_af);
983 }
984
985 if (KENTRY_NETWORK(ke)) {
986 pfr_prepare_network(&mask, ke->pfrke_af, ke->pfrke_net);
987 rn = rn_addroute(&ke->pfrke_sa, &mask, head, ke->pfrke_node);
988 } else
989 rn = rn_addroute(&ke->pfrke_sa, NULL, head, ke->pfrke_node);
990
991 return (rn == NULL ? -1 : 0);
992 }
993
994 static int
pfr_unroute_kentry(struct pfr_ktable * kt,struct pfr_kentry * ke)995 pfr_unroute_kentry(struct pfr_ktable *kt, struct pfr_kentry *ke)
996 {
997 union sockaddr_union mask;
998 struct radix_node *rn;
999 struct radix_head *head = NULL;
1000
1001 switch (ke->pfrke_af) {
1002 case AF_INET:
1003 head = &kt->pfrkt_ip4->rh;
1004 break;
1005 case AF_INET6:
1006 head = &kt->pfrkt_ip6->rh;
1007 break;
1008 default:
1009 unhandled_af(ke->pfrke_af);
1010 }
1011
1012 if (KENTRY_NETWORK(ke)) {
1013 pfr_prepare_network(&mask, ke->pfrke_af, ke->pfrke_net);
1014 rn = rn_delete(&ke->pfrke_sa, &mask, head);
1015 } else
1016 rn = rn_delete(&ke->pfrke_sa, NULL, head);
1017
1018 if (rn == NULL) {
1019 printf("pfr_unroute_kentry: delete failed.\n");
1020 return (-1);
1021 }
1022 return (0);
1023 }
1024
1025 static void
pfr_copyout_addr(struct pfr_addr * ad,const struct pfr_kentry * ke)1026 pfr_copyout_addr(struct pfr_addr *ad, const struct pfr_kentry *ke)
1027 {
1028 bzero(ad, sizeof(*ad));
1029 if (ke == NULL)
1030 return;
1031 ad->pfra_af = ke->pfrke_af;
1032 ad->pfra_net = ke->pfrke_net;
1033 ad->pfra_not = ke->pfrke_not;
1034 switch (ad->pfra_af) {
1035 case AF_INET:
1036 ad->pfra_ip4addr = ke->pfrke_sa.sin.sin_addr;
1037 break;
1038 case AF_INET6:
1039 ad->pfra_ip6addr = ke->pfrke_sa.sin6.sin6_addr;
1040 break;
1041 default:
1042 unhandled_af(ad->pfra_af);
1043 }
1044 }
1045
1046 static void
pfr_copyout_astats(struct pfr_astats * as,const struct pfr_kentry * ke,const struct pfr_walktree * w)1047 pfr_copyout_astats(struct pfr_astats *as, const struct pfr_kentry *ke,
1048 const struct pfr_walktree *w)
1049 {
1050 int dir, op;
1051 const struct pfr_kcounters *kc = &ke->pfrke_counters;
1052
1053 bzero(as, sizeof(*as));
1054 pfr_copyout_addr(&as->pfras_a, ke);
1055 as->pfras_tzero = kc->pfrkc_tzero;
1056
1057 if (! (w->pfrw_flags & PFR_TFLAG_COUNTERS) ||
1058 kc->pfrkc_counters == NULL) {
1059 bzero(as->pfras_packets, sizeof(as->pfras_packets));
1060 bzero(as->pfras_bytes, sizeof(as->pfras_bytes));
1061 as->pfras_a.pfra_fback = PFR_FB_NOCOUNT;
1062 return;
1063 }
1064
1065 for (dir = 0; dir < PFR_DIR_MAX; dir++) {
1066 for (op = 0; op < PFR_OP_ADDR_MAX; op ++) {
1067 as->pfras_packets[dir][op] = counter_u64_fetch(
1068 pfr_kentry_counter(kc, dir, op, PFR_TYPE_PACKETS));
1069 as->pfras_bytes[dir][op] = counter_u64_fetch(
1070 pfr_kentry_counter(kc, dir, op, PFR_TYPE_BYTES));
1071 }
1072 }
1073 }
1074
1075 static void
pfr_sockaddr_to_pf_addr(const union sockaddr_union * sa,struct pf_addr * a)1076 pfr_sockaddr_to_pf_addr(const union sockaddr_union *sa, struct pf_addr *a)
1077 {
1078 switch (sa->sa.sa_family) {
1079 case AF_INET:
1080 memcpy(&a->v4, &sa->sin.sin_addr, sizeof(a->v4));
1081 break;
1082 case AF_INET6:
1083 memcpy(&a->v6, &sa->sin6.sin6_addr, sizeof(a->v6));
1084 break;
1085 default:
1086 unhandled_af(sa->sa.sa_family);
1087 }
1088 }
1089
1090 static int
pfr_walktree(struct radix_node * rn,void * arg)1091 pfr_walktree(struct radix_node *rn, void *arg)
1092 {
1093 struct pfr_kentry *ke = (struct pfr_kentry *)rn;
1094 struct pfr_walktree *w = arg;
1095
1096 switch (w->pfrw_op) {
1097 case PFRW_MARK:
1098 ke->pfrke_mark = 0;
1099 break;
1100 case PFRW_SWEEP:
1101 if (ke->pfrke_mark)
1102 break;
1103 /* FALLTHROUGH */
1104 case PFRW_ENQUEUE:
1105 SLIST_INSERT_HEAD(w->pfrw_workq, ke, pfrke_workq);
1106 w->pfrw_free++;
1107 break;
1108 case PFRW_GET_ADDRS:
1109 if (w->pfrw_free-- > 0) {
1110 pfr_copyout_addr(w->pfrw_addr, ke);
1111 w->pfrw_addr++;
1112 }
1113 break;
1114 case PFRW_GET_ASTATS:
1115 if (w->pfrw_free-- > 0) {
1116 struct pfr_astats as;
1117
1118 pfr_copyout_astats(&as, ke, w);
1119
1120 bcopy(&as, w->pfrw_astats, sizeof(as));
1121 w->pfrw_astats++;
1122 }
1123 break;
1124 case PFRW_POOL_GET:
1125 if (ke->pfrke_not)
1126 break; /* negative entries are ignored */
1127 if (!w->pfrw_free--) {
1128 w->pfrw_kentry = ke;
1129 return (1); /* finish search */
1130 }
1131 break;
1132 case PFRW_DYNADDR_UPDATE:
1133 {
1134 union sockaddr_union pfr_mask;
1135
1136 switch (ke->pfrke_af) {
1137 case AF_INET:
1138 if (w->pfrw_dyn->pfid_acnt4++ > 0)
1139 break;
1140 pfr_prepare_network(&pfr_mask, AF_INET, ke->pfrke_net);
1141 pfr_sockaddr_to_pf_addr(&ke->pfrke_sa, &w->pfrw_dyn->pfid_addr4);
1142 pfr_sockaddr_to_pf_addr(&pfr_mask, &w->pfrw_dyn->pfid_mask4);
1143 break;
1144 case AF_INET6:
1145 if (w->pfrw_dyn->pfid_acnt6++ > 0)
1146 break;
1147 pfr_prepare_network(&pfr_mask, AF_INET6, ke->pfrke_net);
1148 pfr_sockaddr_to_pf_addr(&ke->pfrke_sa, &w->pfrw_dyn->pfid_addr6);
1149 pfr_sockaddr_to_pf_addr(&pfr_mask, &w->pfrw_dyn->pfid_mask6);
1150 break;
1151 default:
1152 unhandled_af(ke->pfrke_af);
1153 }
1154 break;
1155 }
1156 case PFRW_COUNTERS:
1157 {
1158 if (w->pfrw_flags & PFR_TFLAG_COUNTERS) {
1159 if (ke->pfrke_counters.pfrkc_counters != NULL)
1160 break;
1161 ke->pfrke_counters.pfrkc_counters =
1162 uma_zalloc_pcpu(V_pfr_kentry_counter_z,
1163 M_NOWAIT | M_ZERO);
1164 } else {
1165 uma_zfree_pcpu(V_pfr_kentry_counter_z,
1166 ke->pfrke_counters.pfrkc_counters);
1167 ke->pfrke_counters.pfrkc_counters = NULL;
1168 }
1169 break;
1170 }
1171 }
1172 return (0);
1173 }
1174
1175 int
pfr_clr_tables(struct pfr_table * filter,int * ndel,int flags)1176 pfr_clr_tables(struct pfr_table *filter, int *ndel, int flags)
1177 {
1178 struct pfr_ktableworkq workq;
1179 struct pfr_ktable *p;
1180 int xdel = 0;
1181
1182 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_ALLRSETS);
1183 if (pfr_fix_anchor(filter->pfrt_anchor))
1184 return (EINVAL);
1185 if (pfr_table_count(filter, flags) < 0)
1186 return (ENOENT);
1187
1188 SLIST_INIT(&workq);
1189 RB_FOREACH(p, pfr_ktablehead, &V_pfr_ktables) {
1190 if (pfr_skip_table(filter, p, flags))
1191 continue;
1192 if (!strcmp(p->pfrkt_anchor, PF_RESERVED_ANCHOR))
1193 continue;
1194 if (!(p->pfrkt_flags & PFR_TFLAG_ACTIVE))
1195 continue;
1196 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_ACTIVE;
1197 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1198 xdel++;
1199 }
1200 if (!(flags & PFR_FLAG_DUMMY))
1201 pfr_setflags_ktables(&workq);
1202 if (ndel != NULL)
1203 *ndel = xdel;
1204 return (0);
1205 }
1206
1207 int
pfr_add_tables(struct pfr_table * tbl,int size,int * nadd,int flags)1208 pfr_add_tables(struct pfr_table *tbl, int size, int *nadd, int flags)
1209 {
1210 struct pfr_ktableworkq addq, changeq;
1211 struct pfr_ktable *p, *q, *r, key;
1212 int i, rv, xadd = 0;
1213 time_t tzero = time_second;
1214
1215 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
1216 SLIST_INIT(&addq);
1217 SLIST_INIT(&changeq);
1218 for (i = 0; i < size; i++) {
1219 bcopy(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t));
1220 if (pfr_validate_table(&key.pfrkt_t, PFR_TFLAG_USRMASK,
1221 flags & PFR_FLAG_USERIOCTL))
1222 senderr(EINVAL);
1223 key.pfrkt_flags |= PFR_TFLAG_ACTIVE;
1224 p = RB_FIND(pfr_ktablehead, &V_pfr_ktables, &key);
1225 if (p == NULL) {
1226 p = pfr_create_ktable(&key.pfrkt_t, tzero, 1);
1227 if (p == NULL)
1228 senderr(ENOMEM);
1229 SLIST_FOREACH(q, &addq, pfrkt_workq) {
1230 if (!pfr_ktable_compare(p, q)) {
1231 pfr_destroy_ktable(p, 0);
1232 goto _skip;
1233 }
1234 }
1235 SLIST_INSERT_HEAD(&addq, p, pfrkt_workq);
1236 xadd++;
1237 if (!key.pfrkt_anchor[0])
1238 goto _skip;
1239
1240 /* find or create root table */
1241 bzero(key.pfrkt_anchor, sizeof(key.pfrkt_anchor));
1242 r = RB_FIND(pfr_ktablehead, &V_pfr_ktables, &key);
1243 if (r != NULL) {
1244 p->pfrkt_root = r;
1245 goto _skip;
1246 }
1247 SLIST_FOREACH(q, &addq, pfrkt_workq) {
1248 if (!pfr_ktable_compare(&key, q)) {
1249 p->pfrkt_root = q;
1250 goto _skip;
1251 }
1252 }
1253 key.pfrkt_flags = 0;
1254 r = pfr_create_ktable(&key.pfrkt_t, 0, 1);
1255 if (r == NULL)
1256 senderr(ENOMEM);
1257 SLIST_INSERT_HEAD(&addq, r, pfrkt_workq);
1258 p->pfrkt_root = r;
1259 } else if (!(p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1260 SLIST_FOREACH(q, &changeq, pfrkt_workq)
1261 if (!pfr_ktable_compare(&key, q))
1262 goto _skip;
1263 p->pfrkt_nflags = (p->pfrkt_flags &
1264 ~PFR_TFLAG_USRMASK) | key.pfrkt_flags;
1265 SLIST_INSERT_HEAD(&changeq, p, pfrkt_workq);
1266 xadd++;
1267 }
1268 _skip:
1269 ;
1270 }
1271 if (!(flags & PFR_FLAG_DUMMY)) {
1272 pfr_insert_ktables(&addq);
1273 pfr_setflags_ktables(&changeq);
1274 } else
1275 pfr_destroy_ktables(&addq, 0);
1276 if (nadd != NULL)
1277 *nadd = xadd;
1278 return (0);
1279 _bad:
1280 pfr_destroy_ktables(&addq, 0);
1281 return (rv);
1282 }
1283
1284 int
pfr_del_tables(struct pfr_table * tbl,int size,int * ndel,int flags)1285 pfr_del_tables(struct pfr_table *tbl, int size, int *ndel, int flags)
1286 {
1287 struct pfr_ktableworkq workq;
1288 struct pfr_ktable *p, *q, key;
1289 int i, xdel = 0;
1290
1291 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
1292 SLIST_INIT(&workq);
1293 for (i = 0; i < size; i++) {
1294 bcopy(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t));
1295 if (pfr_validate_table(&key.pfrkt_t, 0,
1296 flags & PFR_FLAG_USERIOCTL))
1297 return (EINVAL);
1298 p = RB_FIND(pfr_ktablehead, &V_pfr_ktables, &key);
1299 if (p != NULL && (p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1300 SLIST_FOREACH(q, &workq, pfrkt_workq)
1301 if (!pfr_ktable_compare(p, q))
1302 goto _skip;
1303 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_ACTIVE;
1304 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1305 xdel++;
1306 }
1307 _skip:
1308 ;
1309 }
1310
1311 if (!(flags & PFR_FLAG_DUMMY))
1312 pfr_setflags_ktables(&workq);
1313 if (ndel != NULL)
1314 *ndel = xdel;
1315 return (0);
1316 }
1317
1318 int
pfr_get_tables(struct pfr_table * filter,struct pfr_table * tbl,int * size,int flags)1319 pfr_get_tables(struct pfr_table *filter, struct pfr_table *tbl, int *size,
1320 int flags)
1321 {
1322 struct pfr_ktable *p;
1323 int n, nn;
1324
1325 PF_RULES_RASSERT();
1326
1327 ACCEPT_FLAGS(flags, PFR_FLAG_ALLRSETS);
1328 if (pfr_fix_anchor(filter->pfrt_anchor))
1329 return (EINVAL);
1330 n = nn = pfr_table_count(filter, flags);
1331 if (n < 0)
1332 return (ENOENT);
1333 if (n > *size) {
1334 *size = n;
1335 return (0);
1336 }
1337 RB_FOREACH(p, pfr_ktablehead, &V_pfr_ktables) {
1338 if (pfr_skip_table(filter, p, flags))
1339 continue;
1340 if (n-- <= 0)
1341 continue;
1342 bcopy(&p->pfrkt_t, tbl++, sizeof(*tbl));
1343 }
1344
1345 KASSERT(n == 0, ("%s: corruption detected (%d)", __func__, n));
1346
1347 *size = nn;
1348 return (0);
1349 }
1350
1351 int
pfr_get_tstats(struct pfr_table * filter,struct pfr_tstats * tbl,int * size,int flags)1352 pfr_get_tstats(struct pfr_table *filter, struct pfr_tstats *tbl, int *size,
1353 int flags)
1354 {
1355 struct pfr_ktable *p;
1356 struct pfr_ktableworkq workq;
1357 int n, nn;
1358 time_t tzero = time_second;
1359 int pfr_dir, pfr_op;
1360
1361 /* XXX PFR_FLAG_CLSTATS disabled */
1362 ACCEPT_FLAGS(flags, PFR_FLAG_ALLRSETS);
1363 if (pfr_fix_anchor(filter->pfrt_anchor))
1364 return (EINVAL);
1365 n = nn = pfr_table_count(filter, flags);
1366 if (n < 0)
1367 return (ENOENT);
1368 if (n > *size) {
1369 *size = n;
1370 return (0);
1371 }
1372 SLIST_INIT(&workq);
1373 RB_FOREACH(p, pfr_ktablehead, &V_pfr_ktables) {
1374 if (pfr_skip_table(filter, p, flags))
1375 continue;
1376 if (n-- <= 0)
1377 continue;
1378 bcopy(&p->pfrkt_kts.pfrts_t, &tbl->pfrts_t,
1379 sizeof(struct pfr_table));
1380 for (pfr_dir = 0; pfr_dir < PFR_DIR_MAX; pfr_dir ++) {
1381 for (pfr_op = 0; pfr_op < PFR_OP_TABLE_MAX; pfr_op ++) {
1382 tbl->pfrts_packets[pfr_dir][pfr_op] =
1383 pfr_kstate_counter_fetch(
1384 &p->pfrkt_packets[pfr_dir][pfr_op]);
1385 tbl->pfrts_bytes[pfr_dir][pfr_op] =
1386 pfr_kstate_counter_fetch(
1387 &p->pfrkt_bytes[pfr_dir][pfr_op]);
1388 }
1389 }
1390 tbl->pfrts_match = pfr_kstate_counter_fetch(&p->pfrkt_match);
1391 tbl->pfrts_nomatch = pfr_kstate_counter_fetch(&p->pfrkt_nomatch);
1392 tbl->pfrts_tzero = p->pfrkt_tzero;
1393 tbl->pfrts_cnt = p->pfrkt_cnt;
1394 for (pfr_op = 0; pfr_op < PFR_REFCNT_MAX; pfr_op++)
1395 tbl->pfrts_refcnt[pfr_op] = p->pfrkt_refcnt[pfr_op];
1396 tbl++;
1397 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1398 }
1399 if (flags & PFR_FLAG_CLSTATS)
1400 pfr_clstats_ktables(&workq, tzero,
1401 flags & PFR_FLAG_ADDRSTOO);
1402
1403 KASSERT(n == 0, ("%s: corruption detected (%d)", __func__, n));
1404
1405 *size = nn;
1406 return (0);
1407 }
1408
1409 int
pfr_clr_tstats(struct pfr_table * tbl,int size,int * nzero,int flags)1410 pfr_clr_tstats(struct pfr_table *tbl, int size, int *nzero, int flags)
1411 {
1412 struct pfr_ktableworkq workq;
1413 struct pfr_ktable *p, key;
1414 int i, xzero = 0;
1415 time_t tzero = time_second;
1416
1417 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_ADDRSTOO);
1418 SLIST_INIT(&workq);
1419 for (i = 0; i < size; i++) {
1420 bcopy(tbl + i, &key.pfrkt_t, sizeof(key.pfrkt_t));
1421 if (pfr_validate_table(&key.pfrkt_t, 0, 0))
1422 return (EINVAL);
1423 p = RB_FIND(pfr_ktablehead, &V_pfr_ktables, &key);
1424 if (p != NULL) {
1425 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1426 xzero++;
1427 }
1428 }
1429 if (!(flags & PFR_FLAG_DUMMY))
1430 pfr_clstats_ktables(&workq, tzero, flags & PFR_FLAG_ADDRSTOO);
1431 if (nzero != NULL)
1432 *nzero = xzero;
1433 return (0);
1434 }
1435
1436 int
pfr_set_tflags(struct pfr_table * tbl,int size,int setflag,int clrflag,int * nchange,int * ndel,int flags)1437 pfr_set_tflags(struct pfr_table *tbl, int size, int setflag, int clrflag,
1438 int *nchange, int *ndel, int flags)
1439 {
1440 struct pfr_ktableworkq workq;
1441 struct pfr_ktable *p, *q, key;
1442 int i, xchange = 0, xdel = 0;
1443
1444 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
1445 if ((setflag & ~PFR_TFLAG_USRMASK) ||
1446 (clrflag & ~PFR_TFLAG_USRMASK) ||
1447 (setflag & clrflag))
1448 return (EINVAL);
1449 SLIST_INIT(&workq);
1450 for (i = 0; i < size; i++) {
1451 bcopy(tbl + i, &key.pfrkt_t, sizeof(key.pfrkt_t));
1452 if (pfr_validate_table(&key.pfrkt_t, 0,
1453 flags & PFR_FLAG_USERIOCTL))
1454 return (EINVAL);
1455 p = RB_FIND(pfr_ktablehead, &V_pfr_ktables, &key);
1456 if (p != NULL && (p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1457 p->pfrkt_nflags = (p->pfrkt_flags | setflag) &
1458 ~clrflag;
1459 if (p->pfrkt_nflags == p->pfrkt_flags)
1460 goto _skip;
1461 SLIST_FOREACH(q, &workq, pfrkt_workq)
1462 if (!pfr_ktable_compare(p, q))
1463 goto _skip;
1464 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1465 if ((p->pfrkt_flags & PFR_TFLAG_PERSIST) &&
1466 (clrflag & PFR_TFLAG_PERSIST) &&
1467 !(p->pfrkt_flags & PFR_TFLAG_REFERENCED))
1468 xdel++;
1469 else
1470 xchange++;
1471 }
1472 _skip:
1473 ;
1474 }
1475 if (!(flags & PFR_FLAG_DUMMY))
1476 pfr_setflags_ktables(&workq);
1477 if (nchange != NULL)
1478 *nchange = xchange;
1479 if (ndel != NULL)
1480 *ndel = xdel;
1481 return (0);
1482 }
1483
1484 int
pfr_ina_begin(struct pfr_table * trs,u_int32_t * ticket,int * ndel,int flags)1485 pfr_ina_begin(struct pfr_table *trs, u_int32_t *ticket, int *ndel, int flags)
1486 {
1487 struct pfr_ktableworkq workq;
1488 struct pfr_ktable *p;
1489 struct pf_kruleset *rs;
1490 int xdel = 0;
1491
1492 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
1493 rs = pf_find_or_create_kruleset(trs->pfrt_anchor);
1494 if (rs == NULL)
1495 return (ENOMEM);
1496 SLIST_INIT(&workq);
1497 RB_FOREACH(p, pfr_ktablehead, &V_pfr_ktables) {
1498 if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
1499 pfr_skip_table(trs, p, 0))
1500 continue;
1501 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_INACTIVE;
1502 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1503 xdel++;
1504 }
1505 if (!(flags & PFR_FLAG_DUMMY)) {
1506 pfr_setflags_ktables(&workq);
1507 if (ticket != NULL)
1508 *ticket = ++rs->tticket;
1509 rs->topen = 1;
1510 } else
1511 pf_remove_if_empty_kruleset(rs);
1512 if (ndel != NULL)
1513 *ndel = xdel;
1514 return (0);
1515 }
1516
1517 int
pfr_ina_define(struct pfr_table * tbl,struct pfr_addr * addr,int size,int * nadd,int * naddr,u_int32_t ticket,int flags)1518 pfr_ina_define(struct pfr_table *tbl, struct pfr_addr *addr, int size,
1519 int *nadd, int *naddr, u_int32_t ticket, int flags)
1520 {
1521 struct pfr_ktableworkq tableq;
1522 struct pfr_kentryworkq addrq;
1523 struct pfr_ktable *kt, *rt, *shadow, key;
1524 struct pfr_kentry *p;
1525 struct pfr_addr *ad;
1526 struct pf_kruleset *rs;
1527 int i, rv, xadd = 0, xaddr = 0;
1528
1529 PF_RULES_WASSERT();
1530
1531 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_ADDRSTOO);
1532 if (size && !(flags & PFR_FLAG_ADDRSTOO))
1533 return (EINVAL);
1534 if (pfr_validate_table(tbl, PFR_TFLAG_USRMASK,
1535 flags & PFR_FLAG_USERIOCTL))
1536 return (EINVAL);
1537 rs = pf_find_kruleset(tbl->pfrt_anchor);
1538 if (rs == NULL || !rs->topen || ticket != rs->tticket)
1539 return (EBUSY);
1540 tbl->pfrt_flags |= PFR_TFLAG_INACTIVE;
1541 SLIST_INIT(&tableq);
1542 kt = RB_FIND(pfr_ktablehead, &V_pfr_ktables, (struct pfr_ktable *)tbl);
1543 if (kt == NULL) {
1544 kt = pfr_create_ktable(tbl, 0, 1);
1545 if (kt == NULL)
1546 return (ENOMEM);
1547 SLIST_INSERT_HEAD(&tableq, kt, pfrkt_workq);
1548 xadd++;
1549 if (!tbl->pfrt_anchor[0])
1550 goto _skip;
1551
1552 /* find or create root table */
1553 bzero(&key, sizeof(key));
1554 strlcpy(key.pfrkt_name, tbl->pfrt_name, sizeof(key.pfrkt_name));
1555 rt = RB_FIND(pfr_ktablehead, &V_pfr_ktables, &key);
1556 if (rt != NULL) {
1557 kt->pfrkt_root = rt;
1558 goto _skip;
1559 }
1560 rt = pfr_create_ktable(&key.pfrkt_t, 0, 1);
1561 if (rt == NULL) {
1562 pfr_destroy_ktables(&tableq, 0);
1563 return (ENOMEM);
1564 }
1565 SLIST_INSERT_HEAD(&tableq, rt, pfrkt_workq);
1566 kt->pfrkt_root = rt;
1567 } else if (!(kt->pfrkt_flags & PFR_TFLAG_INACTIVE))
1568 xadd++;
1569 _skip:
1570 shadow = pfr_create_ktable(tbl, 0, 0);
1571 if (shadow == NULL) {
1572 pfr_destroy_ktables(&tableq, 0);
1573 return (ENOMEM);
1574 }
1575 SLIST_INIT(&addrq);
1576 for (i = 0, ad = addr; i < size; i++, ad++) {
1577 if (pfr_validate_addr(ad))
1578 senderr(EINVAL);
1579 if (pfr_lookup_addr(shadow, ad, 1) != NULL)
1580 continue;
1581 p = pfr_create_kentry(ad,
1582 (shadow->pfrkt_flags & PFR_TFLAG_COUNTERS) != 0);
1583 if (p == NULL)
1584 senderr(ENOMEM);
1585 if (pfr_route_kentry(shadow, p)) {
1586 pfr_destroy_kentry(p);
1587 continue;
1588 }
1589 SLIST_INSERT_HEAD(&addrq, p, pfrke_workq);
1590 xaddr++;
1591 }
1592 if (!(flags & PFR_FLAG_DUMMY)) {
1593 if (kt->pfrkt_shadow != NULL)
1594 pfr_destroy_ktable(kt->pfrkt_shadow, 1);
1595 kt->pfrkt_flags |= PFR_TFLAG_INACTIVE;
1596 pfr_insert_ktables(&tableq);
1597 shadow->pfrkt_cnt = (flags & PFR_FLAG_ADDRSTOO) ?
1598 xaddr : NO_ADDRESSES;
1599 kt->pfrkt_shadow = shadow;
1600 } else {
1601 pfr_clean_node_mask(shadow, &addrq);
1602 pfr_destroy_ktable(shadow, 0);
1603 pfr_destroy_ktables(&tableq, 0);
1604 pfr_destroy_kentries(&addrq);
1605 }
1606 if (nadd != NULL)
1607 *nadd = xadd;
1608 if (naddr != NULL)
1609 *naddr = xaddr;
1610 return (0);
1611 _bad:
1612 pfr_destroy_ktable(shadow, 0);
1613 pfr_destroy_ktables(&tableq, 0);
1614 pfr_destroy_kentries(&addrq);
1615 return (rv);
1616 }
1617
1618 int
pfr_ina_rollback(struct pfr_table * trs,u_int32_t ticket,int * ndel,int flags)1619 pfr_ina_rollback(struct pfr_table *trs, u_int32_t ticket, int *ndel, int flags)
1620 {
1621 struct pfr_ktableworkq workq;
1622 struct pfr_ktable *p;
1623 struct pf_kruleset *rs;
1624 int xdel = 0;
1625
1626 PF_RULES_WASSERT();
1627
1628 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
1629 rs = pf_find_kruleset(trs->pfrt_anchor);
1630 if (rs == NULL || !rs->topen || ticket != rs->tticket)
1631 return (0);
1632 SLIST_INIT(&workq);
1633 RB_FOREACH(p, pfr_ktablehead, &V_pfr_ktables) {
1634 if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
1635 pfr_skip_table(trs, p, 0))
1636 continue;
1637 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_INACTIVE;
1638 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1639 xdel++;
1640 }
1641 if (!(flags & PFR_FLAG_DUMMY)) {
1642 pfr_setflags_ktables(&workq);
1643 rs->topen = 0;
1644 pf_remove_if_empty_kruleset(rs);
1645 }
1646 if (ndel != NULL)
1647 *ndel = xdel;
1648 return (0);
1649 }
1650
1651 int
pfr_ina_commit(struct pfr_table * trs,u_int32_t ticket,int * nadd,int * nchange,int flags)1652 pfr_ina_commit(struct pfr_table *trs, u_int32_t ticket, int *nadd,
1653 int *nchange, int flags)
1654 {
1655 struct pfr_ktable *p, *q;
1656 struct pfr_ktableworkq workq;
1657 struct pf_kruleset *rs;
1658 int xadd = 0, xchange = 0;
1659 time_t tzero = time_second;
1660
1661 PF_RULES_WASSERT();
1662
1663 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
1664 rs = pf_find_kruleset(trs->pfrt_anchor);
1665 if (rs == NULL || !rs->topen || ticket != rs->tticket)
1666 return (EBUSY);
1667
1668 SLIST_INIT(&workq);
1669 RB_FOREACH(p, pfr_ktablehead, &V_pfr_ktables) {
1670 if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
1671 pfr_skip_table(trs, p, 0))
1672 continue;
1673 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1674 if (p->pfrkt_flags & PFR_TFLAG_ACTIVE)
1675 xchange++;
1676 else
1677 xadd++;
1678 }
1679
1680 if (!(flags & PFR_FLAG_DUMMY)) {
1681 SLIST_FOREACH_SAFE(p, &workq, pfrkt_workq, q) {
1682 pfr_commit_ktable(p, tzero);
1683 }
1684 rs->topen = 0;
1685 pf_remove_if_empty_kruleset(rs);
1686 }
1687 if (nadd != NULL)
1688 *nadd = xadd;
1689 if (nchange != NULL)
1690 *nchange = xchange;
1691
1692 return (0);
1693 }
1694
1695 static void
pfr_commit_ktable(struct pfr_ktable * kt,time_t tzero)1696 pfr_commit_ktable(struct pfr_ktable *kt, time_t tzero)
1697 {
1698 counter_u64_t *pkc, *qkc;
1699 struct pfr_ktable *shadow = kt->pfrkt_shadow;
1700 int nflags;
1701
1702 PF_RULES_WASSERT();
1703
1704 if (shadow->pfrkt_cnt == NO_ADDRESSES) {
1705 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
1706 pfr_clstats_ktable(kt, tzero, 1);
1707 } else if (kt->pfrkt_flags & PFR_TFLAG_ACTIVE) {
1708 /* kt might contain addresses */
1709 struct pfr_kentryworkq addrq, addq, changeq, delq, garbageq;
1710 struct pfr_kentry *p, *q;
1711 struct pfr_addr ad;
1712
1713 pfr_enqueue_addrs(shadow, &addrq, NULL, 0);
1714 pfr_mark_addrs(kt);
1715 SLIST_INIT(&addq);
1716 SLIST_INIT(&changeq);
1717 SLIST_INIT(&delq);
1718 SLIST_INIT(&garbageq);
1719 pfr_clean_node_mask(shadow, &addrq);
1720 while ((p = SLIST_FIRST(&addrq)) != NULL) {
1721 SLIST_REMOVE_HEAD(&addrq, pfrke_workq);
1722 pfr_copyout_addr(&ad, p);
1723 q = pfr_lookup_addr(kt, &ad, 1);
1724 if (q != NULL) {
1725 if (q->pfrke_not != p->pfrke_not)
1726 SLIST_INSERT_HEAD(&changeq, q,
1727 pfrke_workq);
1728 pkc = &p->pfrke_counters.pfrkc_counters;
1729 qkc = &q->pfrke_counters.pfrkc_counters;
1730 if ((*pkc == NULL) != (*qkc == NULL))
1731 SWAP(counter_u64_t, *pkc, *qkc);
1732 q->pfrke_mark = 1;
1733 SLIST_INSERT_HEAD(&garbageq, p, pfrke_workq);
1734 } else {
1735 p->pfrke_counters.pfrkc_tzero = tzero;
1736 SLIST_INSERT_HEAD(&addq, p, pfrke_workq);
1737 }
1738 }
1739 pfr_enqueue_addrs(kt, &delq, NULL, ENQUEUE_UNMARKED_ONLY);
1740 pfr_insert_kentries(kt, &addq, tzero);
1741 pfr_remove_kentries(kt, &delq);
1742 pfr_clstats_kentries(kt, &changeq, tzero, INVERT_NEG_FLAG);
1743 pfr_destroy_kentries(&garbageq);
1744 } else {
1745 /* kt cannot contain addresses */
1746 SWAP(struct radix_node_head *, kt->pfrkt_ip4,
1747 shadow->pfrkt_ip4);
1748 SWAP(struct radix_node_head *, kt->pfrkt_ip6,
1749 shadow->pfrkt_ip6);
1750 SWAP(int, kt->pfrkt_cnt, shadow->pfrkt_cnt);
1751 pfr_clstats_ktable(kt, tzero, 1);
1752 }
1753 nflags = ((shadow->pfrkt_flags & PFR_TFLAG_USRMASK) |
1754 (kt->pfrkt_flags & PFR_TFLAG_SETMASK) | PFR_TFLAG_ACTIVE)
1755 & ~PFR_TFLAG_INACTIVE;
1756 pfr_destroy_ktable(shadow, 0);
1757 kt->pfrkt_shadow = NULL;
1758 pfr_setflags_ktable(kt, nflags);
1759 }
1760
1761 static int
pfr_validate_table(struct pfr_table * tbl,int allowedflags,int no_reserved)1762 pfr_validate_table(struct pfr_table *tbl, int allowedflags, int no_reserved)
1763 {
1764 int i;
1765
1766 if (!tbl->pfrt_name[0])
1767 return (-1);
1768 if (no_reserved && !strcmp(tbl->pfrt_anchor, PF_RESERVED_ANCHOR))
1769 return (-1);
1770 if (tbl->pfrt_name[PF_TABLE_NAME_SIZE-1])
1771 return (-1);
1772 for (i = strlen(tbl->pfrt_name); i < PF_TABLE_NAME_SIZE; i++)
1773 if (tbl->pfrt_name[i])
1774 return (-1);
1775 if (pfr_fix_anchor(tbl->pfrt_anchor))
1776 return (-1);
1777 if (tbl->pfrt_flags & ~allowedflags)
1778 return (-1);
1779 return (0);
1780 }
1781
1782 /*
1783 * Rewrite anchors referenced by tables to remove slashes
1784 * and check for validity.
1785 */
1786 static int
pfr_fix_anchor(char * anchor)1787 pfr_fix_anchor(char *anchor)
1788 {
1789 size_t siz = MAXPATHLEN;
1790 int i;
1791
1792 if (anchor[0] == '/') {
1793 char *path;
1794 int off;
1795
1796 path = anchor;
1797 off = 1;
1798 while (*++path == '/')
1799 off++;
1800 bcopy(path, anchor, siz - off);
1801 memset(anchor + siz - off, 0, off);
1802 }
1803 if (anchor[siz - 1])
1804 return (-1);
1805 for (i = strlen(anchor); i < siz; i++)
1806 if (anchor[i])
1807 return (-1);
1808 return (0);
1809 }
1810
1811 int
pfr_table_count(struct pfr_table * filter,int flags)1812 pfr_table_count(struct pfr_table *filter, int flags)
1813 {
1814 struct pf_kruleset *rs;
1815
1816 PF_RULES_ASSERT();
1817
1818 if (flags & PFR_FLAG_ALLRSETS)
1819 return (V_pfr_ktable_cnt);
1820 if (filter->pfrt_anchor[0]) {
1821 rs = pf_find_kruleset(filter->pfrt_anchor);
1822 return ((rs != NULL) ? rs->tables : -1);
1823 }
1824 return (pf_main_ruleset.tables);
1825 }
1826
1827 static int
pfr_skip_table(struct pfr_table * filter,struct pfr_ktable * kt,int flags)1828 pfr_skip_table(struct pfr_table *filter, struct pfr_ktable *kt, int flags)
1829 {
1830 if (flags & PFR_FLAG_ALLRSETS)
1831 return (0);
1832 if (strcmp(filter->pfrt_anchor, kt->pfrkt_anchor))
1833 return (1);
1834 return (0);
1835 }
1836
1837 static void
pfr_insert_ktables(struct pfr_ktableworkq * workq)1838 pfr_insert_ktables(struct pfr_ktableworkq *workq)
1839 {
1840 struct pfr_ktable *p;
1841
1842 SLIST_FOREACH(p, workq, pfrkt_workq)
1843 pfr_insert_ktable(p);
1844 }
1845
1846 static void
pfr_insert_ktable(struct pfr_ktable * kt)1847 pfr_insert_ktable(struct pfr_ktable *kt)
1848 {
1849
1850 PF_RULES_WASSERT();
1851
1852 RB_INSERT(pfr_ktablehead, &V_pfr_ktables, kt);
1853 V_pfr_ktable_cnt++;
1854 if (kt->pfrkt_root != NULL)
1855 if (!kt->pfrkt_root->pfrkt_refcnt[PFR_REFCNT_ANCHOR]++)
1856 pfr_setflags_ktable(kt->pfrkt_root,
1857 kt->pfrkt_root->pfrkt_flags|PFR_TFLAG_REFDANCHOR);
1858 }
1859
1860 static void
pfr_setflags_ktables(struct pfr_ktableworkq * workq)1861 pfr_setflags_ktables(struct pfr_ktableworkq *workq)
1862 {
1863 struct pfr_ktable *p, *q;
1864
1865 SLIST_FOREACH_SAFE(p, workq, pfrkt_workq, q) {
1866 pfr_setflags_ktable(p, p->pfrkt_nflags);
1867 }
1868 }
1869
1870 static void
pfr_setflags_ktable(struct pfr_ktable * kt,int newf)1871 pfr_setflags_ktable(struct pfr_ktable *kt, int newf)
1872 {
1873 struct pfr_kentryworkq addrq;
1874 struct pfr_walktree w;
1875
1876 PF_RULES_WASSERT();
1877
1878 if (!(newf & PFR_TFLAG_REFERENCED) &&
1879 !(newf & PFR_TFLAG_REFDANCHOR) &&
1880 !(newf & PFR_TFLAG_PERSIST))
1881 newf &= ~PFR_TFLAG_ACTIVE;
1882 if (!(newf & PFR_TFLAG_ACTIVE))
1883 newf &= ~PFR_TFLAG_USRMASK;
1884 if (!(newf & PFR_TFLAG_SETMASK)) {
1885 RB_REMOVE(pfr_ktablehead, &V_pfr_ktables, kt);
1886 if (kt->pfrkt_root != NULL)
1887 if (!--kt->pfrkt_root->pfrkt_refcnt[PFR_REFCNT_ANCHOR])
1888 pfr_setflags_ktable(kt->pfrkt_root,
1889 kt->pfrkt_root->pfrkt_flags &
1890 ~PFR_TFLAG_REFDANCHOR);
1891 pfr_destroy_ktable(kt, 1);
1892 V_pfr_ktable_cnt--;
1893 return;
1894 }
1895 if (newf & PFR_TFLAG_COUNTERS && ! (kt->pfrkt_flags & PFR_TFLAG_COUNTERS)) {
1896 bzero(&w, sizeof(w));
1897 w.pfrw_op = PFRW_COUNTERS;
1898 w.pfrw_flags |= PFR_TFLAG_COUNTERS;
1899 kt->pfrkt_ip4->rnh_walktree(&kt->pfrkt_ip4->rh, pfr_walktree, &w);
1900 kt->pfrkt_ip6->rnh_walktree(&kt->pfrkt_ip6->rh, pfr_walktree, &w);
1901 }
1902 if (! (newf & PFR_TFLAG_COUNTERS) && (kt->pfrkt_flags & PFR_TFLAG_COUNTERS)) {
1903 bzero(&w, sizeof(w));
1904 w.pfrw_op = PFRW_COUNTERS;
1905 w.pfrw_flags |= 0;
1906 kt->pfrkt_ip4->rnh_walktree(&kt->pfrkt_ip4->rh, pfr_walktree, &w);
1907 kt->pfrkt_ip6->rnh_walktree(&kt->pfrkt_ip6->rh, pfr_walktree, &w);
1908 }
1909 if (!(newf & PFR_TFLAG_ACTIVE) && kt->pfrkt_cnt) {
1910 pfr_enqueue_addrs(kt, &addrq, NULL, 0);
1911 pfr_remove_kentries(kt, &addrq);
1912 }
1913 if (!(newf & PFR_TFLAG_INACTIVE) && kt->pfrkt_shadow != NULL) {
1914 pfr_destroy_ktable(kt->pfrkt_shadow, 1);
1915 kt->pfrkt_shadow = NULL;
1916 }
1917 kt->pfrkt_flags = newf;
1918 }
1919
1920 static void
pfr_clstats_ktables(struct pfr_ktableworkq * workq,time_t tzero,int recurse)1921 pfr_clstats_ktables(struct pfr_ktableworkq *workq, time_t tzero, int recurse)
1922 {
1923 struct pfr_ktable *p;
1924
1925 SLIST_FOREACH(p, workq, pfrkt_workq)
1926 pfr_clstats_ktable(p, tzero, recurse);
1927 }
1928
1929 static void
pfr_clstats_ktable(struct pfr_ktable * kt,time_t tzero,int recurse)1930 pfr_clstats_ktable(struct pfr_ktable *kt, time_t tzero, int recurse)
1931 {
1932 struct pfr_kentryworkq addrq;
1933 int pfr_dir, pfr_op;
1934
1935 MPASS(PF_TABLE_STATS_OWNED() || PF_RULES_WOWNED());
1936
1937 if (recurse) {
1938 pfr_enqueue_addrs(kt, &addrq, NULL, 0);
1939 pfr_clstats_kentries(kt, &addrq, tzero, 0);
1940 }
1941 for (pfr_dir = 0; pfr_dir < PFR_DIR_MAX; pfr_dir ++) {
1942 for (pfr_op = 0; pfr_op < PFR_OP_TABLE_MAX; pfr_op ++) {
1943 pfr_kstate_counter_zero(&kt->pfrkt_packets[pfr_dir][pfr_op]);
1944 pfr_kstate_counter_zero(&kt->pfrkt_bytes[pfr_dir][pfr_op]);
1945 }
1946 }
1947 pfr_kstate_counter_zero(&kt->pfrkt_match);
1948 pfr_kstate_counter_zero(&kt->pfrkt_nomatch);
1949 kt->pfrkt_tzero = tzero;
1950 }
1951
1952 static struct pfr_ktable *
pfr_create_ktable(struct pfr_table * tbl,time_t tzero,int attachruleset)1953 pfr_create_ktable(struct pfr_table *tbl, time_t tzero, int attachruleset)
1954 {
1955 struct pfr_ktable *kt;
1956 struct pf_kruleset *rs;
1957 int pfr_dir, pfr_op;
1958
1959 PF_RULES_WASSERT();
1960
1961 kt = malloc(sizeof(*kt), M_PFTABLE, M_NOWAIT|M_ZERO);
1962 if (kt == NULL)
1963 return (NULL);
1964 kt->pfrkt_t = *tbl;
1965
1966 if (attachruleset) {
1967 rs = pf_find_or_create_kruleset(tbl->pfrt_anchor);
1968 if (!rs) {
1969 pfr_destroy_ktable(kt, 0);
1970 return (NULL);
1971 }
1972 kt->pfrkt_rs = rs;
1973 rs->tables++;
1974 }
1975
1976 for (pfr_dir = 0; pfr_dir < PFR_DIR_MAX; pfr_dir ++) {
1977 for (pfr_op = 0; pfr_op < PFR_OP_TABLE_MAX; pfr_op ++) {
1978 if (pfr_kstate_counter_init(
1979 &kt->pfrkt_packets[pfr_dir][pfr_op], M_NOWAIT) != 0) {
1980 pfr_destroy_ktable(kt, 0);
1981 return (NULL);
1982 }
1983 if (pfr_kstate_counter_init(
1984 &kt->pfrkt_bytes[pfr_dir][pfr_op], M_NOWAIT) != 0) {
1985 pfr_destroy_ktable(kt, 0);
1986 return (NULL);
1987 }
1988 }
1989 }
1990 if (pfr_kstate_counter_init(&kt->pfrkt_match, M_NOWAIT) != 0) {
1991 pfr_destroy_ktable(kt, 0);
1992 return (NULL);
1993 }
1994
1995 if (pfr_kstate_counter_init(&kt->pfrkt_nomatch, M_NOWAIT) != 0) {
1996 pfr_destroy_ktable(kt, 0);
1997 return (NULL);
1998 }
1999
2000 if (!rn_inithead((void **)&kt->pfrkt_ip4,
2001 offsetof(struct sockaddr_in, sin_addr) * 8) ||
2002 !rn_inithead((void **)&kt->pfrkt_ip6,
2003 offsetof(struct sockaddr_in6, sin6_addr) * 8)) {
2004 pfr_destroy_ktable(kt, 0);
2005 return (NULL);
2006 }
2007 kt->pfrkt_tzero = tzero;
2008
2009 return (kt);
2010 }
2011
2012 static void
pfr_destroy_ktables(struct pfr_ktableworkq * workq,int flushaddr)2013 pfr_destroy_ktables(struct pfr_ktableworkq *workq, int flushaddr)
2014 {
2015 struct pfr_ktable *p;
2016
2017 while ((p = SLIST_FIRST(workq)) != NULL) {
2018 SLIST_REMOVE_HEAD(workq, pfrkt_workq);
2019 pfr_destroy_ktable(p, flushaddr);
2020 }
2021 }
2022
2023 static void
pfr_destroy_ktable(struct pfr_ktable * kt,int flushaddr)2024 pfr_destroy_ktable(struct pfr_ktable *kt, int flushaddr)
2025 {
2026 struct pfr_kentryworkq addrq;
2027 int pfr_dir, pfr_op;
2028
2029 if (flushaddr) {
2030 pfr_enqueue_addrs(kt, &addrq, NULL, 0);
2031 pfr_clean_node_mask(kt, &addrq);
2032 pfr_destroy_kentries(&addrq);
2033 }
2034 if (kt->pfrkt_ip4 != NULL)
2035 rn_detachhead((void **)&kt->pfrkt_ip4);
2036 if (kt->pfrkt_ip6 != NULL)
2037 rn_detachhead((void **)&kt->pfrkt_ip6);
2038 if (kt->pfrkt_shadow != NULL)
2039 pfr_destroy_ktable(kt->pfrkt_shadow, flushaddr);
2040 if (kt->pfrkt_rs != NULL) {
2041 kt->pfrkt_rs->tables--;
2042 pf_remove_if_empty_kruleset(kt->pfrkt_rs);
2043 }
2044 for (pfr_dir = 0; pfr_dir < PFR_DIR_MAX; pfr_dir ++) {
2045 for (pfr_op = 0; pfr_op < PFR_OP_TABLE_MAX; pfr_op ++) {
2046 pfr_kstate_counter_deinit(&kt->pfrkt_packets[pfr_dir][pfr_op]);
2047 pfr_kstate_counter_deinit(&kt->pfrkt_bytes[pfr_dir][pfr_op]);
2048 }
2049 }
2050 pfr_kstate_counter_deinit(&kt->pfrkt_match);
2051 pfr_kstate_counter_deinit(&kt->pfrkt_nomatch);
2052
2053 free(kt, M_PFTABLE);
2054 }
2055
2056 static int
pfr_ktable_compare(struct pfr_ktable * p,struct pfr_ktable * q)2057 pfr_ktable_compare(struct pfr_ktable *p, struct pfr_ktable *q)
2058 {
2059 int d;
2060
2061 if ((d = strncmp(p->pfrkt_name, q->pfrkt_name, PF_TABLE_NAME_SIZE)))
2062 return (d);
2063 return (strcmp(p->pfrkt_anchor, q->pfrkt_anchor));
2064 }
2065
2066 static struct pfr_ktable *
pfr_lookup_table(struct pfr_table * tbl)2067 pfr_lookup_table(struct pfr_table *tbl)
2068 {
2069 /* struct pfr_ktable start like a struct pfr_table */
2070 return (RB_FIND(pfr_ktablehead, &V_pfr_ktables,
2071 (struct pfr_ktable *)tbl));
2072 }
2073
2074 static struct pfr_kentry *
pfr_kentry_byaddr(struct pfr_ktable * kt,struct pf_addr * a,sa_family_t af,int exact)2075 pfr_kentry_byaddr(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af,
2076 int exact)
2077 {
2078 struct pfr_kentry *ke = NULL;
2079
2080 PF_RULES_RASSERT();
2081
2082 kt = pfr_ktable_select_active(kt);
2083 if (kt == NULL)
2084 return (0);
2085
2086 switch (af) {
2087 #ifdef INET
2088 case AF_INET:
2089 {
2090 struct sockaddr_in sin;
2091
2092 bzero(&sin, sizeof(sin));
2093 sin.sin_len = sizeof(sin);
2094 sin.sin_family = AF_INET;
2095 sin.sin_addr.s_addr = a->addr32[0];
2096 ke = (struct pfr_kentry *)rn_match(&sin, &kt->pfrkt_ip4->rh);
2097 if (ke && KENTRY_RNF_ROOT(ke))
2098 ke = NULL;
2099 break;
2100 }
2101 #endif /* INET */
2102 #ifdef INET6
2103 case AF_INET6:
2104 {
2105 struct sockaddr_in6 sin6;
2106
2107 bzero(&sin6, sizeof(sin6));
2108 sin6.sin6_len = sizeof(sin6);
2109 sin6.sin6_family = AF_INET6;
2110 bcopy(a, &sin6.sin6_addr, sizeof(sin6.sin6_addr));
2111 ke = (struct pfr_kentry *)rn_match(&sin6, &kt->pfrkt_ip6->rh);
2112 if (ke && KENTRY_RNF_ROOT(ke))
2113 ke = NULL;
2114 break;
2115 }
2116 #endif /* INET6 */
2117 default:
2118 unhandled_af(af);
2119 }
2120 if (exact && ke && KENTRY_NETWORK(ke))
2121 ke = NULL;
2122
2123 return (ke);
2124 }
2125
2126 int
pfr_match_addr(struct pfr_ktable * kt,struct pf_addr * a,sa_family_t af)2127 pfr_match_addr(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af)
2128 {
2129 struct pfr_kentry *ke = NULL;
2130 int match;
2131
2132 ke = pfr_kentry_byaddr(kt, a, af, 0);
2133
2134 match = (ke && !ke->pfrke_not);
2135 if (match)
2136 pfr_kstate_counter_add(&kt->pfrkt_match, 1);
2137 else
2138 pfr_kstate_counter_add(&kt->pfrkt_nomatch, 1);
2139
2140 return (match);
2141 }
2142
2143 void
pfr_update_stats(struct pfr_ktable * kt,struct pf_addr * a,sa_family_t af,u_int64_t len,int dir_out,int op_pass,int notrule)2144 pfr_update_stats(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af,
2145 u_int64_t len, int dir_out, int op_pass, int notrule)
2146 {
2147 struct pfr_kentry *ke = NULL;
2148
2149 kt = pfr_ktable_select_active(kt);
2150 if (kt == NULL)
2151 return;
2152
2153 switch (af) {
2154 #ifdef INET
2155 case AF_INET:
2156 {
2157 struct sockaddr_in sin;
2158
2159 bzero(&sin, sizeof(sin));
2160 sin.sin_len = sizeof(sin);
2161 sin.sin_family = AF_INET;
2162 sin.sin_addr.s_addr = a->addr32[0];
2163 ke = (struct pfr_kentry *)rn_match(&sin, &kt->pfrkt_ip4->rh);
2164 if (ke && KENTRY_RNF_ROOT(ke))
2165 ke = NULL;
2166 break;
2167 }
2168 #endif /* INET */
2169 #ifdef INET6
2170 case AF_INET6:
2171 {
2172 struct sockaddr_in6 sin6;
2173
2174 bzero(&sin6, sizeof(sin6));
2175 sin6.sin6_len = sizeof(sin6);
2176 sin6.sin6_family = AF_INET6;
2177 bcopy(a, &sin6.sin6_addr, sizeof(sin6.sin6_addr));
2178 ke = (struct pfr_kentry *)rn_match(&sin6, &kt->pfrkt_ip6->rh);
2179 if (ke && KENTRY_RNF_ROOT(ke))
2180 ke = NULL;
2181 break;
2182 }
2183 #endif /* INET6 */
2184 default:
2185 unhandled_af(af);
2186 }
2187 if ((ke == NULL || ke->pfrke_not) != notrule) {
2188 if (op_pass != PFR_OP_PASS)
2189 DPFPRINTF(PF_DEBUG_URGENT,
2190 "pfr_update_stats: assertion failed.");
2191 op_pass = PFR_OP_XPASS;
2192 }
2193 pfr_kstate_counter_add(&kt->pfrkt_packets[dir_out][op_pass], 1);
2194 pfr_kstate_counter_add(&kt->pfrkt_bytes[dir_out][op_pass], len);
2195 if (ke != NULL && op_pass != PFR_OP_XPASS &&
2196 (kt->pfrkt_flags & PFR_TFLAG_COUNTERS)) {
2197 counter_u64_add(pfr_kentry_counter(&ke->pfrke_counters,
2198 dir_out, op_pass, PFR_TYPE_PACKETS), 1);
2199 counter_u64_add(pfr_kentry_counter(&ke->pfrke_counters,
2200 dir_out, op_pass, PFR_TYPE_BYTES), len);
2201 }
2202 }
2203
2204 struct pfr_ktable *
pfr_eth_attach_table(struct pf_keth_ruleset * rs,char * name)2205 pfr_eth_attach_table(struct pf_keth_ruleset *rs, char *name)
2206 {
2207 struct pfr_ktable *kt, *rt;
2208 struct pfr_table tbl;
2209 struct pf_keth_anchor *ac = rs->anchor;
2210
2211 PF_RULES_WASSERT();
2212
2213 bzero(&tbl, sizeof(tbl));
2214 strlcpy(tbl.pfrt_name, name, sizeof(tbl.pfrt_name));
2215 if (ac != NULL)
2216 strlcpy(tbl.pfrt_anchor, ac->path, sizeof(tbl.pfrt_anchor));
2217 kt = pfr_lookup_table(&tbl);
2218 if (kt == NULL) {
2219 kt = pfr_create_ktable(&tbl, time_second, 1);
2220 if (kt == NULL)
2221 return (NULL);
2222 if (ac != NULL) {
2223 bzero(tbl.pfrt_anchor, sizeof(tbl.pfrt_anchor));
2224 rt = pfr_lookup_table(&tbl);
2225 if (rt == NULL) {
2226 rt = pfr_create_ktable(&tbl, 0, 1);
2227 if (rt == NULL) {
2228 pfr_destroy_ktable(kt, 0);
2229 return (NULL);
2230 }
2231 pfr_insert_ktable(rt);
2232 }
2233 kt->pfrkt_root = rt;
2234 }
2235 pfr_insert_ktable(kt);
2236 }
2237 if (!kt->pfrkt_refcnt[PFR_REFCNT_RULE]++)
2238 pfr_setflags_ktable(kt, kt->pfrkt_flags|PFR_TFLAG_REFERENCED);
2239 return (kt);
2240 }
2241
2242 struct pfr_ktable *
pfr_attach_table(struct pf_kruleset * rs,char * name)2243 pfr_attach_table(struct pf_kruleset *rs, char *name)
2244 {
2245 struct pfr_ktable *kt, *rt;
2246 struct pfr_table tbl;
2247 struct pf_kanchor *ac = rs->anchor;
2248
2249 PF_RULES_WASSERT();
2250
2251 bzero(&tbl, sizeof(tbl));
2252 strlcpy(tbl.pfrt_name, name, sizeof(tbl.pfrt_name));
2253 if (ac != NULL)
2254 strlcpy(tbl.pfrt_anchor, ac->path, sizeof(tbl.pfrt_anchor));
2255 kt = pfr_lookup_table(&tbl);
2256 if (kt == NULL) {
2257 kt = pfr_create_ktable(&tbl, time_second, 1);
2258 if (kt == NULL)
2259 return (NULL);
2260 if (ac != NULL) {
2261 bzero(tbl.pfrt_anchor, sizeof(tbl.pfrt_anchor));
2262 rt = pfr_lookup_table(&tbl);
2263 if (rt == NULL) {
2264 rt = pfr_create_ktable(&tbl, 0, 1);
2265 if (rt == NULL) {
2266 pfr_destroy_ktable(kt, 0);
2267 return (NULL);
2268 }
2269 pfr_insert_ktable(rt);
2270 }
2271 kt->pfrkt_root = rt;
2272 }
2273 pfr_insert_ktable(kt);
2274 }
2275 if (!kt->pfrkt_refcnt[PFR_REFCNT_RULE]++)
2276 pfr_setflags_ktable(kt, kt->pfrkt_flags|PFR_TFLAG_REFERENCED);
2277 return (kt);
2278 }
2279
2280 void
pfr_detach_table(struct pfr_ktable * kt)2281 pfr_detach_table(struct pfr_ktable *kt)
2282 {
2283
2284 PF_RULES_WASSERT();
2285 KASSERT(kt->pfrkt_refcnt[PFR_REFCNT_RULE] > 0, ("%s: refcount %d\n",
2286 __func__, kt->pfrkt_refcnt[PFR_REFCNT_RULE]));
2287
2288 if (!--kt->pfrkt_refcnt[PFR_REFCNT_RULE])
2289 pfr_setflags_ktable(kt, kt->pfrkt_flags&~PFR_TFLAG_REFERENCED);
2290 }
2291
2292 int
pfr_pool_get(struct pfr_ktable * kt,int * pidx,struct pf_addr * counter,sa_family_t af,pf_addr_filter_func_t filter,bool loop_once)2293 pfr_pool_get(struct pfr_ktable *kt, int *pidx, struct pf_addr *counter,
2294 sa_family_t af, pf_addr_filter_func_t filter, bool loop_once)
2295 {
2296 struct pf_addr *addr, cur, mask, umask_addr;
2297 union sockaddr_union uaddr, umask;
2298 struct pfr_kentry *ke, *ke2 = NULL;
2299 int startidx, idx = -1, loop = 0, use_counter = 0;
2300
2301 MPASS(pidx != NULL);
2302 MPASS(counter != NULL);
2303
2304 switch (af) {
2305 case AF_INET:
2306 uaddr.sin.sin_len = sizeof(struct sockaddr_in);
2307 uaddr.sin.sin_family = AF_INET;
2308 addr = (struct pf_addr *)&uaddr.sin.sin_addr;
2309 break;
2310 case AF_INET6:
2311 uaddr.sin6.sin6_len = sizeof(struct sockaddr_in6);
2312 uaddr.sin6.sin6_family = AF_INET6;
2313 addr = (struct pf_addr *)&uaddr.sin6.sin6_addr;
2314 break;
2315 default:
2316 unhandled_af(af);
2317 }
2318
2319 kt = pfr_ktable_select_active(kt);
2320 if (kt == NULL)
2321 return (-1);
2322
2323 idx = *pidx;
2324 if (idx < 0 || idx >= kt->pfrkt_cnt)
2325 idx = 0;
2326 else if (counter != NULL)
2327 use_counter = 1;
2328 startidx = idx;
2329
2330 _next_block:
2331 if (loop && startidx == idx) {
2332 pfr_kstate_counter_add(&kt->pfrkt_nomatch, 1);
2333 return (1);
2334 }
2335
2336 ke = pfr_kentry_byidx(kt, idx, af);
2337 if (ke == NULL) {
2338 /* we don't have this idx, try looping */
2339 if ((loop || loop_once) || (ke = pfr_kentry_byidx(kt, 0, af)) == NULL) {
2340 pfr_kstate_counter_add(&kt->pfrkt_nomatch, 1);
2341 return (1);
2342 }
2343 idx = 0;
2344 loop++;
2345 }
2346 pfr_prepare_network(&umask, af, ke->pfrke_net);
2347 pfr_sockaddr_to_pf_addr(&ke->pfrke_sa, &cur);
2348 pfr_sockaddr_to_pf_addr(&umask, &mask);
2349
2350 if (use_counter && !PF_AZERO(counter, af)) {
2351 /* is supplied address within block? */
2352 if (!pf_match_addr(0, &cur, &mask, counter, af)) {
2353 /* no, go to next block in table */
2354 idx++;
2355 use_counter = 0;
2356 goto _next_block;
2357 }
2358 pf_addrcpy(addr, counter, af);
2359 } else {
2360 /* use first address of block */
2361 pf_addrcpy(addr, &cur, af);
2362 }
2363
2364 if (!KENTRY_NETWORK(ke)) {
2365 /* this is a single IP address - no possible nested block */
2366 if (filter && filter(af, addr)) {
2367 idx++;
2368 goto _next_block;
2369 }
2370 pf_addrcpy(counter, addr, af);
2371 *pidx = idx;
2372 pfr_kstate_counter_add(&kt->pfrkt_match, 1);
2373 return (0);
2374 }
2375 for (;;) {
2376 /* we don't want to use a nested block */
2377 switch (af) {
2378 case AF_INET:
2379 ke2 = (struct pfr_kentry *)rn_match(&uaddr,
2380 &kt->pfrkt_ip4->rh);
2381 break;
2382 case AF_INET6:
2383 ke2 = (struct pfr_kentry *)rn_match(&uaddr,
2384 &kt->pfrkt_ip6->rh);
2385 break;
2386 default:
2387 unhandled_af(af);
2388 }
2389 /* no need to check KENTRY_RNF_ROOT() here */
2390 if (ke2 == ke) {
2391 /* lookup return the same block - perfect */
2392 if (filter && filter(af, addr))
2393 goto _next_entry;
2394 pf_addrcpy(counter, addr, af);
2395 *pidx = idx;
2396 pfr_kstate_counter_add(&kt->pfrkt_match, 1);
2397 return (0);
2398 }
2399
2400 _next_entry:
2401 /* we need to increase the counter past the nested block */
2402 pfr_prepare_network(&umask, AF_INET, ke2->pfrke_net);
2403 pfr_sockaddr_to_pf_addr(&umask, &umask_addr);
2404 pf_poolmask(addr, addr, &umask_addr, &pfr_ffaddr, af);
2405 pf_addr_inc(addr, af);
2406 if (!pf_match_addr(0, &cur, &mask, addr, af)) {
2407 /* ok, we reached the end of our main block */
2408 /* go to next block in table */
2409 idx++;
2410 use_counter = 0;
2411 goto _next_block;
2412 }
2413 }
2414 }
2415
2416 static struct pfr_kentry *
pfr_kentry_byidx(struct pfr_ktable * kt,int idx,int af)2417 pfr_kentry_byidx(struct pfr_ktable *kt, int idx, int af)
2418 {
2419 struct pfr_walktree w;
2420
2421 bzero(&w, sizeof(w));
2422 w.pfrw_op = PFRW_POOL_GET;
2423 w.pfrw_free = idx;
2424
2425 switch (af) {
2426 #ifdef INET
2427 case AF_INET:
2428 kt->pfrkt_ip4->rnh_walktree(&kt->pfrkt_ip4->rh, pfr_walktree, &w);
2429 return (w.pfrw_kentry);
2430 #endif /* INET */
2431 #ifdef INET6
2432 case AF_INET6:
2433 kt->pfrkt_ip6->rnh_walktree(&kt->pfrkt_ip6->rh, pfr_walktree, &w);
2434 return (w.pfrw_kentry);
2435 #endif /* INET6 */
2436 default:
2437 return (NULL);
2438 }
2439 }
2440
2441 void
pfr_dynaddr_update(struct pfr_ktable * kt,struct pfi_dynaddr * dyn)2442 pfr_dynaddr_update(struct pfr_ktable *kt, struct pfi_dynaddr *dyn)
2443 {
2444 struct pfr_walktree w;
2445
2446 bzero(&w, sizeof(w));
2447 w.pfrw_op = PFRW_DYNADDR_UPDATE;
2448 w.pfrw_dyn = dyn;
2449
2450 dyn->pfid_acnt4 = 0;
2451 dyn->pfid_acnt6 = 0;
2452 switch (dyn->pfid_af) {
2453 case AF_UNSPEC: /* look up all both addresses IPv4 + IPv6 */
2454 kt->pfrkt_ip4->rnh_walktree(&kt->pfrkt_ip4->rh, pfr_walktree, &w);
2455 kt->pfrkt_ip6->rnh_walktree(&kt->pfrkt_ip6->rh, pfr_walktree, &w);
2456 break;
2457 case AF_INET:
2458 kt->pfrkt_ip4->rnh_walktree(&kt->pfrkt_ip4->rh, pfr_walktree, &w);
2459 break;
2460 case AF_INET6:
2461 kt->pfrkt_ip6->rnh_walktree(&kt->pfrkt_ip6->rh, pfr_walktree, &w);
2462 break;
2463 default:
2464 unhandled_af(dyn->pfid_af);
2465 }
2466 }
2467
2468 struct pfr_ktable *
pfr_ktable_select_active(struct pfr_ktable * kt)2469 pfr_ktable_select_active(struct pfr_ktable *kt)
2470 {
2471 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
2472 kt = kt->pfrkt_root;
2473 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
2474 return (NULL);
2475
2476 return (kt);
2477 }
2478