1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2002 Cedric Berger 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * - Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * - Redistributions in binary form must reproduce the above 14 * copyright notice, this list of conditions and the following 15 * disclaimer in the documentation and/or other materials provided 16 * with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 21 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 22 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 24 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 25 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 26 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN 28 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 * 31 * $OpenBSD: pf_table.c,v 1.79 2008/10/08 06:24:50 mcbride Exp $ 32 */ 33 34 #include <sys/cdefs.h> 35 __FBSDID("$FreeBSD$"); 36 37 #include "opt_inet.h" 38 #include "opt_inet6.h" 39 40 #include <sys/param.h> 41 #include <sys/kernel.h> 42 #include <sys/lock.h> 43 #include <sys/malloc.h> 44 #include <sys/mbuf.h> 45 #include <sys/mutex.h> 46 #include <sys/refcount.h> 47 #include <sys/socket.h> 48 #include <vm/uma.h> 49 50 #include <net/if.h> 51 #include <net/vnet.h> 52 #include <net/pfvar.h> 53 54 #define DPFPRINTF(n, x) if (V_pf_status.debug >= (n)) printf x 55 56 #define ACCEPT_FLAGS(flags, oklist) \ 57 do { \ 58 if ((flags & ~(oklist)) & \ 59 PFR_FLAG_ALLMASK) \ 60 return (EINVAL); \ 61 } while (0) 62 63 #define FILLIN_SIN(sin, addr) \ 64 do { \ 65 (sin).sin_len = sizeof(sin); \ 66 (sin).sin_family = AF_INET; \ 67 (sin).sin_addr = (addr); \ 68 } while (0) 69 70 #define FILLIN_SIN6(sin6, addr) \ 71 do { \ 72 (sin6).sin6_len = sizeof(sin6); \ 73 (sin6).sin6_family = AF_INET6; \ 74 (sin6).sin6_addr = (addr); \ 75 } while (0) 76 77 #define SWAP(type, a1, a2) \ 78 do { \ 79 type tmp = a1; \ 80 a1 = a2; \ 81 a2 = tmp; \ 82 } while (0) 83 84 #define SUNION2PF(su, af) (((af)==AF_INET) ? \ 85 (struct pf_addr *)&(su)->sin.sin_addr : \ 86 (struct pf_addr *)&(su)->sin6.sin6_addr) 87 88 #define AF_BITS(af) (((af)==AF_INET)?32:128) 89 #define ADDR_NETWORK(ad) ((ad)->pfra_net < AF_BITS((ad)->pfra_af)) 90 #define KENTRY_NETWORK(ke) ((ke)->pfrke_net < AF_BITS((ke)->pfrke_af)) 91 #define KENTRY_RNF_ROOT(ke) \ 92 ((((struct radix_node *)(ke))->rn_flags & RNF_ROOT) != 0) 93 94 #define NO_ADDRESSES (-1) 95 #define ENQUEUE_UNMARKED_ONLY (1) 96 #define INVERT_NEG_FLAG (1) 97 98 struct pfr_walktree { 99 enum pfrw_op { 100 PFRW_MARK, 101 PFRW_SWEEP, 102 PFRW_ENQUEUE, 103 PFRW_GET_ADDRS, 104 PFRW_GET_ASTATS, 105 PFRW_POOL_GET, 106 PFRW_DYNADDR_UPDATE, 107 PFRW_COUNTERS 108 } pfrw_op; 109 union { 110 struct pfr_addr *pfrw_addr; 111 struct pfr_astats *pfrw_astats; 112 struct pfr_kentryworkq *pfrw_workq; 113 struct pfr_kentry *pfrw_kentry; 114 struct pfi_dynaddr *pfrw_dyn; 115 }; 116 int pfrw_free; 117 int pfrw_flags; 118 }; 119 120 #define senderr(e) do { rv = (e); goto _bad; } while (0) 121 122 static MALLOC_DEFINE(M_PFTABLE, "pf_table", "pf(4) tables structures"); 123 VNET_DEFINE_STATIC(uma_zone_t, pfr_kentry_z); 124 #define V_pfr_kentry_z VNET(pfr_kentry_z) 125 VNET_DEFINE_STATIC(uma_zone_t, pfr_kentry_counter_z); 126 #define V_pfr_kentry_counter_z VNET(pfr_kentry_counter_z) 127 128 static struct pf_addr pfr_ffaddr = { 129 .addr32 = { 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff } 130 }; 131 132 static void pfr_copyout_astats(struct pfr_astats *, 133 const struct pfr_kentry *, 134 const struct pfr_walktree *); 135 static void pfr_copyout_addr(struct pfr_addr *, 136 const struct pfr_kentry *ke); 137 static int pfr_validate_addr(struct pfr_addr *); 138 static void pfr_enqueue_addrs(struct pfr_ktable *, 139 struct pfr_kentryworkq *, int *, int); 140 static void pfr_mark_addrs(struct pfr_ktable *); 141 static struct pfr_kentry 142 *pfr_lookup_addr(struct pfr_ktable *, 143 struct pfr_addr *, int); 144 static struct pfr_kentry *pfr_create_kentry(struct pfr_addr *, bool); 145 static void pfr_destroy_kentries(struct pfr_kentryworkq *); 146 static void pfr_destroy_kentry(struct pfr_kentry *); 147 static void pfr_insert_kentries(struct pfr_ktable *, 148 struct pfr_kentryworkq *, long); 149 static void pfr_remove_kentries(struct pfr_ktable *, 150 struct pfr_kentryworkq *); 151 static void pfr_clstats_kentries(struct pfr_ktable *, 152 struct pfr_kentryworkq *, long, int); 153 static void pfr_reset_feedback(struct pfr_addr *, int); 154 static void pfr_prepare_network(union sockaddr_union *, int, int); 155 static int pfr_route_kentry(struct pfr_ktable *, 156 struct pfr_kentry *); 157 static int pfr_unroute_kentry(struct pfr_ktable *, 158 struct pfr_kentry *); 159 static int pfr_walktree(struct radix_node *, void *); 160 static int pfr_validate_table(struct pfr_table *, int, int); 161 static int pfr_fix_anchor(char *); 162 static void pfr_commit_ktable(struct pfr_ktable *, long); 163 static void pfr_insert_ktables(struct pfr_ktableworkq *); 164 static void pfr_insert_ktable(struct pfr_ktable *); 165 static void pfr_setflags_ktables(struct pfr_ktableworkq *); 166 static void pfr_setflags_ktable(struct pfr_ktable *, int); 167 static void pfr_clstats_ktables(struct pfr_ktableworkq *, long, 168 int); 169 static void pfr_clstats_ktable(struct pfr_ktable *, long, int); 170 static struct pfr_ktable 171 *pfr_create_ktable(struct pfr_table *, long, int); 172 static void pfr_destroy_ktables(struct pfr_ktableworkq *, int); 173 static void pfr_destroy_ktable(struct pfr_ktable *, int); 174 static int pfr_ktable_compare(struct pfr_ktable *, 175 struct pfr_ktable *); 176 static struct pfr_ktable 177 *pfr_lookup_table(struct pfr_table *); 178 static void pfr_clean_node_mask(struct pfr_ktable *, 179 struct pfr_kentryworkq *); 180 static int pfr_skip_table(struct pfr_table *, 181 struct pfr_ktable *, int); 182 static struct pfr_kentry 183 *pfr_kentry_byidx(struct pfr_ktable *, int, int); 184 185 static RB_PROTOTYPE(pfr_ktablehead, pfr_ktable, pfrkt_tree, pfr_ktable_compare); 186 static RB_GENERATE(pfr_ktablehead, pfr_ktable, pfrkt_tree, pfr_ktable_compare); 187 188 VNET_DEFINE_STATIC(struct pfr_ktablehead, pfr_ktables); 189 #define V_pfr_ktables VNET(pfr_ktables) 190 191 VNET_DEFINE_STATIC(struct pfr_table, pfr_nulltable); 192 #define V_pfr_nulltable VNET(pfr_nulltable) 193 194 VNET_DEFINE_STATIC(int, pfr_ktable_cnt); 195 #define V_pfr_ktable_cnt VNET(pfr_ktable_cnt) 196 197 void 198 pfr_initialize(void) 199 { 200 201 V_pfr_kentry_counter_z = uma_zcreate("pf table entry counters", 202 PFR_NUM_COUNTERS * sizeof(uint64_t), NULL, NULL, NULL, NULL, 203 UMA_ALIGN_PTR, UMA_ZONE_PCPU); 204 V_pfr_kentry_z = uma_zcreate("pf table entries", 205 sizeof(struct pfr_kentry), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 206 0); 207 uma_zone_set_max(V_pfr_kentry_z, PFR_KENTRY_HIWAT); 208 V_pf_limits[PF_LIMIT_TABLE_ENTRIES].zone = V_pfr_kentry_z; 209 V_pf_limits[PF_LIMIT_TABLE_ENTRIES].limit = PFR_KENTRY_HIWAT; 210 } 211 212 void 213 pfr_cleanup(void) 214 { 215 216 uma_zdestroy(V_pfr_kentry_z); 217 uma_zdestroy(V_pfr_kentry_counter_z); 218 } 219 220 int 221 pfr_clr_addrs(struct pfr_table *tbl, int *ndel, int flags) 222 { 223 struct pfr_ktable *kt; 224 struct pfr_kentryworkq workq; 225 226 PF_RULES_WASSERT(); 227 228 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY); 229 if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL)) 230 return (EINVAL); 231 kt = pfr_lookup_table(tbl); 232 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 233 return (ESRCH); 234 if (kt->pfrkt_flags & PFR_TFLAG_CONST) 235 return (EPERM); 236 pfr_enqueue_addrs(kt, &workq, ndel, 0); 237 238 if (!(flags & PFR_FLAG_DUMMY)) { 239 pfr_remove_kentries(kt, &workq); 240 KASSERT(kt->pfrkt_cnt == 0, ("%s: non-null pfrkt_cnt", __func__)); 241 } 242 return (0); 243 } 244 245 int 246 pfr_add_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size, 247 int *nadd, int flags) 248 { 249 struct pfr_ktable *kt, *tmpkt; 250 struct pfr_kentryworkq workq; 251 struct pfr_kentry *p, *q; 252 struct pfr_addr *ad; 253 int i, rv, xadd = 0; 254 long tzero = time_second; 255 256 PF_RULES_WASSERT(); 257 258 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_FEEDBACK); 259 if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL)) 260 return (EINVAL); 261 kt = pfr_lookup_table(tbl); 262 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 263 return (ESRCH); 264 if (kt->pfrkt_flags & PFR_TFLAG_CONST) 265 return (EPERM); 266 tmpkt = pfr_create_ktable(&V_pfr_nulltable, 0, 0); 267 if (tmpkt == NULL) 268 return (ENOMEM); 269 SLIST_INIT(&workq); 270 for (i = 0, ad = addr; i < size; i++, ad++) { 271 if (pfr_validate_addr(ad)) 272 senderr(EINVAL); 273 p = pfr_lookup_addr(kt, ad, 1); 274 q = pfr_lookup_addr(tmpkt, ad, 1); 275 if (flags & PFR_FLAG_FEEDBACK) { 276 if (q != NULL) 277 ad->pfra_fback = PFR_FB_DUPLICATE; 278 else if (p == NULL) 279 ad->pfra_fback = PFR_FB_ADDED; 280 else if (p->pfrke_not != ad->pfra_not) 281 ad->pfra_fback = PFR_FB_CONFLICT; 282 else 283 ad->pfra_fback = PFR_FB_NONE; 284 } 285 if (p == NULL && q == NULL) { 286 p = pfr_create_kentry(ad, 287 (kt->pfrkt_flags & PFR_TFLAG_COUNTERS) != 0); 288 if (p == NULL) 289 senderr(ENOMEM); 290 if (pfr_route_kentry(tmpkt, p)) { 291 pfr_destroy_kentry(p); 292 ad->pfra_fback = PFR_FB_NONE; 293 } else { 294 SLIST_INSERT_HEAD(&workq, p, pfrke_workq); 295 xadd++; 296 } 297 } 298 } 299 pfr_clean_node_mask(tmpkt, &workq); 300 if (!(flags & PFR_FLAG_DUMMY)) 301 pfr_insert_kentries(kt, &workq, tzero); 302 else 303 pfr_destroy_kentries(&workq); 304 if (nadd != NULL) 305 *nadd = xadd; 306 pfr_destroy_ktable(tmpkt, 0); 307 return (0); 308 _bad: 309 pfr_clean_node_mask(tmpkt, &workq); 310 pfr_destroy_kentries(&workq); 311 if (flags & PFR_FLAG_FEEDBACK) 312 pfr_reset_feedback(addr, size); 313 pfr_destroy_ktable(tmpkt, 0); 314 return (rv); 315 } 316 317 int 318 pfr_del_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size, 319 int *ndel, int flags) 320 { 321 struct pfr_ktable *kt; 322 struct pfr_kentryworkq workq; 323 struct pfr_kentry *p; 324 struct pfr_addr *ad; 325 int i, rv, xdel = 0, log = 1; 326 327 PF_RULES_WASSERT(); 328 329 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_FEEDBACK); 330 if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL)) 331 return (EINVAL); 332 kt = pfr_lookup_table(tbl); 333 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 334 return (ESRCH); 335 if (kt->pfrkt_flags & PFR_TFLAG_CONST) 336 return (EPERM); 337 /* 338 * there are two algorithms to choose from here. 339 * with: 340 * n: number of addresses to delete 341 * N: number of addresses in the table 342 * 343 * one is O(N) and is better for large 'n' 344 * one is O(n*LOG(N)) and is better for small 'n' 345 * 346 * following code try to decide which one is best. 347 */ 348 for (i = kt->pfrkt_cnt; i > 0; i >>= 1) 349 log++; 350 if (size > kt->pfrkt_cnt/log) { 351 /* full table scan */ 352 pfr_mark_addrs(kt); 353 } else { 354 /* iterate over addresses to delete */ 355 for (i = 0, ad = addr; i < size; i++, ad++) { 356 if (pfr_validate_addr(ad)) 357 return (EINVAL); 358 p = pfr_lookup_addr(kt, ad, 1); 359 if (p != NULL) 360 p->pfrke_mark = 0; 361 } 362 } 363 SLIST_INIT(&workq); 364 for (i = 0, ad = addr; i < size; i++, ad++) { 365 if (pfr_validate_addr(ad)) 366 senderr(EINVAL); 367 p = pfr_lookup_addr(kt, ad, 1); 368 if (flags & PFR_FLAG_FEEDBACK) { 369 if (p == NULL) 370 ad->pfra_fback = PFR_FB_NONE; 371 else if (p->pfrke_not != ad->pfra_not) 372 ad->pfra_fback = PFR_FB_CONFLICT; 373 else if (p->pfrke_mark) 374 ad->pfra_fback = PFR_FB_DUPLICATE; 375 else 376 ad->pfra_fback = PFR_FB_DELETED; 377 } 378 if (p != NULL && p->pfrke_not == ad->pfra_not && 379 !p->pfrke_mark) { 380 p->pfrke_mark = 1; 381 SLIST_INSERT_HEAD(&workq, p, pfrke_workq); 382 xdel++; 383 } 384 } 385 if (!(flags & PFR_FLAG_DUMMY)) 386 pfr_remove_kentries(kt, &workq); 387 if (ndel != NULL) 388 *ndel = xdel; 389 return (0); 390 _bad: 391 if (flags & PFR_FLAG_FEEDBACK) 392 pfr_reset_feedback(addr, size); 393 return (rv); 394 } 395 396 int 397 pfr_set_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size, 398 int *size2, int *nadd, int *ndel, int *nchange, int flags, 399 u_int32_t ignore_pfrt_flags) 400 { 401 struct pfr_ktable *kt, *tmpkt; 402 struct pfr_kentryworkq addq, delq, changeq; 403 struct pfr_kentry *p, *q; 404 struct pfr_addr ad; 405 int i, rv, xadd = 0, xdel = 0, xchange = 0; 406 long tzero = time_second; 407 408 PF_RULES_WASSERT(); 409 410 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_FEEDBACK); 411 if (pfr_validate_table(tbl, ignore_pfrt_flags, flags & 412 PFR_FLAG_USERIOCTL)) 413 return (EINVAL); 414 kt = pfr_lookup_table(tbl); 415 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 416 return (ESRCH); 417 if (kt->pfrkt_flags & PFR_TFLAG_CONST) 418 return (EPERM); 419 tmpkt = pfr_create_ktable(&V_pfr_nulltable, 0, 0); 420 if (tmpkt == NULL) 421 return (ENOMEM); 422 pfr_mark_addrs(kt); 423 SLIST_INIT(&addq); 424 SLIST_INIT(&delq); 425 SLIST_INIT(&changeq); 426 for (i = 0; i < size; i++) { 427 /* 428 * XXXGL: undertand pf_if usage of this function 429 * and make ad a moving pointer 430 */ 431 bcopy(addr + i, &ad, sizeof(ad)); 432 if (pfr_validate_addr(&ad)) 433 senderr(EINVAL); 434 ad.pfra_fback = PFR_FB_NONE; 435 p = pfr_lookup_addr(kt, &ad, 1); 436 if (p != NULL) { 437 if (p->pfrke_mark) { 438 ad.pfra_fback = PFR_FB_DUPLICATE; 439 goto _skip; 440 } 441 p->pfrke_mark = 1; 442 if (p->pfrke_not != ad.pfra_not) { 443 SLIST_INSERT_HEAD(&changeq, p, pfrke_workq); 444 ad.pfra_fback = PFR_FB_CHANGED; 445 xchange++; 446 } 447 } else { 448 q = pfr_lookup_addr(tmpkt, &ad, 1); 449 if (q != NULL) { 450 ad.pfra_fback = PFR_FB_DUPLICATE; 451 goto _skip; 452 } 453 p = pfr_create_kentry(&ad, 454 (kt->pfrkt_flags & PFR_TFLAG_COUNTERS) != 0); 455 if (p == NULL) 456 senderr(ENOMEM); 457 if (pfr_route_kentry(tmpkt, p)) { 458 pfr_destroy_kentry(p); 459 ad.pfra_fback = PFR_FB_NONE; 460 } else { 461 SLIST_INSERT_HEAD(&addq, p, pfrke_workq); 462 ad.pfra_fback = PFR_FB_ADDED; 463 xadd++; 464 } 465 } 466 _skip: 467 if (flags & PFR_FLAG_FEEDBACK) 468 bcopy(&ad, addr + i, sizeof(ad)); 469 } 470 pfr_enqueue_addrs(kt, &delq, &xdel, ENQUEUE_UNMARKED_ONLY); 471 if ((flags & PFR_FLAG_FEEDBACK) && *size2) { 472 if (*size2 < size+xdel) { 473 *size2 = size+xdel; 474 senderr(0); 475 } 476 i = 0; 477 SLIST_FOREACH(p, &delq, pfrke_workq) { 478 pfr_copyout_addr(&ad, p); 479 ad.pfra_fback = PFR_FB_DELETED; 480 bcopy(&ad, addr + size + i, sizeof(ad)); 481 i++; 482 } 483 } 484 pfr_clean_node_mask(tmpkt, &addq); 485 if (!(flags & PFR_FLAG_DUMMY)) { 486 pfr_insert_kentries(kt, &addq, tzero); 487 pfr_remove_kentries(kt, &delq); 488 pfr_clstats_kentries(kt, &changeq, tzero, INVERT_NEG_FLAG); 489 } else 490 pfr_destroy_kentries(&addq); 491 if (nadd != NULL) 492 *nadd = xadd; 493 if (ndel != NULL) 494 *ndel = xdel; 495 if (nchange != NULL) 496 *nchange = xchange; 497 if ((flags & PFR_FLAG_FEEDBACK) && size2) 498 *size2 = size+xdel; 499 pfr_destroy_ktable(tmpkt, 0); 500 return (0); 501 _bad: 502 pfr_clean_node_mask(tmpkt, &addq); 503 pfr_destroy_kentries(&addq); 504 if (flags & PFR_FLAG_FEEDBACK) 505 pfr_reset_feedback(addr, size); 506 pfr_destroy_ktable(tmpkt, 0); 507 return (rv); 508 } 509 510 int 511 pfr_tst_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size, 512 int *nmatch, int flags) 513 { 514 struct pfr_ktable *kt; 515 struct pfr_kentry *p; 516 struct pfr_addr *ad; 517 int i, xmatch = 0; 518 519 PF_RULES_RASSERT(); 520 521 ACCEPT_FLAGS(flags, PFR_FLAG_REPLACE); 522 if (pfr_validate_table(tbl, 0, 0)) 523 return (EINVAL); 524 kt = pfr_lookup_table(tbl); 525 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 526 return (ESRCH); 527 528 for (i = 0, ad = addr; i < size; i++, ad++) { 529 if (pfr_validate_addr(ad)) 530 return (EINVAL); 531 if (ADDR_NETWORK(ad)) 532 return (EINVAL); 533 p = pfr_lookup_addr(kt, ad, 0); 534 if (flags & PFR_FLAG_REPLACE) 535 pfr_copyout_addr(ad, p); 536 ad->pfra_fback = (p == NULL) ? PFR_FB_NONE : 537 (p->pfrke_not ? PFR_FB_NOTMATCH : PFR_FB_MATCH); 538 if (p != NULL && !p->pfrke_not) 539 xmatch++; 540 } 541 if (nmatch != NULL) 542 *nmatch = xmatch; 543 return (0); 544 } 545 546 int 547 pfr_get_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int *size, 548 int flags) 549 { 550 struct pfr_ktable *kt; 551 struct pfr_walktree w; 552 int rv; 553 554 PF_RULES_RASSERT(); 555 556 ACCEPT_FLAGS(flags, 0); 557 if (pfr_validate_table(tbl, 0, 0)) 558 return (EINVAL); 559 kt = pfr_lookup_table(tbl); 560 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 561 return (ESRCH); 562 if (kt->pfrkt_cnt > *size) { 563 *size = kt->pfrkt_cnt; 564 return (0); 565 } 566 567 bzero(&w, sizeof(w)); 568 w.pfrw_op = PFRW_GET_ADDRS; 569 w.pfrw_addr = addr; 570 w.pfrw_free = kt->pfrkt_cnt; 571 rv = kt->pfrkt_ip4->rnh_walktree(&kt->pfrkt_ip4->rh, pfr_walktree, &w); 572 if (!rv) 573 rv = kt->pfrkt_ip6->rnh_walktree(&kt->pfrkt_ip6->rh, 574 pfr_walktree, &w); 575 if (rv) 576 return (rv); 577 578 KASSERT(w.pfrw_free == 0, ("%s: corruption detected (%d)", __func__, 579 w.pfrw_free)); 580 581 *size = kt->pfrkt_cnt; 582 return (0); 583 } 584 585 int 586 pfr_get_astats(struct pfr_table *tbl, struct pfr_astats *addr, int *size, 587 int flags) 588 { 589 struct pfr_ktable *kt; 590 struct pfr_walktree w; 591 struct pfr_kentryworkq workq; 592 int rv; 593 long tzero = time_second; 594 595 PF_RULES_RASSERT(); 596 597 /* XXX PFR_FLAG_CLSTATS disabled */ 598 ACCEPT_FLAGS(flags, 0); 599 if (pfr_validate_table(tbl, 0, 0)) 600 return (EINVAL); 601 kt = pfr_lookup_table(tbl); 602 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 603 return (ESRCH); 604 if (kt->pfrkt_cnt > *size) { 605 *size = kt->pfrkt_cnt; 606 return (0); 607 } 608 609 bzero(&w, sizeof(w)); 610 w.pfrw_op = PFRW_GET_ASTATS; 611 w.pfrw_astats = addr; 612 w.pfrw_free = kt->pfrkt_cnt; 613 /* 614 * Flags below are for backward compatibility. It was possible to have 615 * a table without per-entry counters. Now they are always allocated, 616 * we just discard data when reading it if table is not configured to 617 * have counters. 618 */ 619 w.pfrw_flags = kt->pfrkt_flags; 620 rv = kt->pfrkt_ip4->rnh_walktree(&kt->pfrkt_ip4->rh, pfr_walktree, &w); 621 if (!rv) 622 rv = kt->pfrkt_ip6->rnh_walktree(&kt->pfrkt_ip6->rh, 623 pfr_walktree, &w); 624 if (!rv && (flags & PFR_FLAG_CLSTATS)) { 625 pfr_enqueue_addrs(kt, &workq, NULL, 0); 626 pfr_clstats_kentries(kt, &workq, tzero, 0); 627 } 628 if (rv) 629 return (rv); 630 631 if (w.pfrw_free) { 632 printf("pfr_get_astats: corruption detected (%d).\n", 633 w.pfrw_free); 634 return (ENOTTY); 635 } 636 *size = kt->pfrkt_cnt; 637 return (0); 638 } 639 640 int 641 pfr_clr_astats(struct pfr_table *tbl, struct pfr_addr *addr, int size, 642 int *nzero, int flags) 643 { 644 struct pfr_ktable *kt; 645 struct pfr_kentryworkq workq; 646 struct pfr_kentry *p; 647 struct pfr_addr *ad; 648 int i, rv, xzero = 0; 649 650 PF_RULES_WASSERT(); 651 652 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_FEEDBACK); 653 if (pfr_validate_table(tbl, 0, 0)) 654 return (EINVAL); 655 kt = pfr_lookup_table(tbl); 656 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 657 return (ESRCH); 658 SLIST_INIT(&workq); 659 for (i = 0, ad = addr; i < size; i++, ad++) { 660 if (pfr_validate_addr(ad)) 661 senderr(EINVAL); 662 p = pfr_lookup_addr(kt, ad, 1); 663 if (flags & PFR_FLAG_FEEDBACK) { 664 ad->pfra_fback = (p != NULL) ? 665 PFR_FB_CLEARED : PFR_FB_NONE; 666 } 667 if (p != NULL) { 668 SLIST_INSERT_HEAD(&workq, p, pfrke_workq); 669 xzero++; 670 } 671 } 672 673 if (!(flags & PFR_FLAG_DUMMY)) 674 pfr_clstats_kentries(kt, &workq, 0, 0); 675 if (nzero != NULL) 676 *nzero = xzero; 677 return (0); 678 _bad: 679 if (flags & PFR_FLAG_FEEDBACK) 680 pfr_reset_feedback(addr, size); 681 return (rv); 682 } 683 684 static int 685 pfr_validate_addr(struct pfr_addr *ad) 686 { 687 int i; 688 689 switch (ad->pfra_af) { 690 #ifdef INET 691 case AF_INET: 692 if (ad->pfra_net > 32) 693 return (-1); 694 break; 695 #endif /* INET */ 696 #ifdef INET6 697 case AF_INET6: 698 if (ad->pfra_net > 128) 699 return (-1); 700 break; 701 #endif /* INET6 */ 702 default: 703 return (-1); 704 } 705 if (ad->pfra_net < 128 && 706 (((caddr_t)ad)[ad->pfra_net/8] & (0xFF >> (ad->pfra_net%8)))) 707 return (-1); 708 for (i = (ad->pfra_net+7)/8; i < sizeof(ad->pfra_u); i++) 709 if (((caddr_t)ad)[i]) 710 return (-1); 711 if (ad->pfra_not && ad->pfra_not != 1) 712 return (-1); 713 if (ad->pfra_fback) 714 return (-1); 715 return (0); 716 } 717 718 static void 719 pfr_enqueue_addrs(struct pfr_ktable *kt, struct pfr_kentryworkq *workq, 720 int *naddr, int sweep) 721 { 722 struct pfr_walktree w; 723 724 SLIST_INIT(workq); 725 bzero(&w, sizeof(w)); 726 w.pfrw_op = sweep ? PFRW_SWEEP : PFRW_ENQUEUE; 727 w.pfrw_workq = workq; 728 if (kt->pfrkt_ip4 != NULL) 729 if (kt->pfrkt_ip4->rnh_walktree(&kt->pfrkt_ip4->rh, 730 pfr_walktree, &w)) 731 printf("pfr_enqueue_addrs: IPv4 walktree failed.\n"); 732 if (kt->pfrkt_ip6 != NULL) 733 if (kt->pfrkt_ip6->rnh_walktree(&kt->pfrkt_ip6->rh, 734 pfr_walktree, &w)) 735 printf("pfr_enqueue_addrs: IPv6 walktree failed.\n"); 736 if (naddr != NULL) 737 *naddr = w.pfrw_free; 738 } 739 740 static void 741 pfr_mark_addrs(struct pfr_ktable *kt) 742 { 743 struct pfr_walktree w; 744 745 bzero(&w, sizeof(w)); 746 w.pfrw_op = PFRW_MARK; 747 if (kt->pfrkt_ip4->rnh_walktree(&kt->pfrkt_ip4->rh, pfr_walktree, &w)) 748 printf("pfr_mark_addrs: IPv4 walktree failed.\n"); 749 if (kt->pfrkt_ip6->rnh_walktree(&kt->pfrkt_ip6->rh, pfr_walktree, &w)) 750 printf("pfr_mark_addrs: IPv6 walktree failed.\n"); 751 } 752 753 static struct pfr_kentry * 754 pfr_lookup_addr(struct pfr_ktable *kt, struct pfr_addr *ad, int exact) 755 { 756 union sockaddr_union sa, mask; 757 struct radix_head *head = NULL; 758 struct pfr_kentry *ke; 759 760 PF_RULES_ASSERT(); 761 762 bzero(&sa, sizeof(sa)); 763 if (ad->pfra_af == AF_INET) { 764 FILLIN_SIN(sa.sin, ad->pfra_ip4addr); 765 head = &kt->pfrkt_ip4->rh; 766 } else if ( ad->pfra_af == AF_INET6 ) { 767 FILLIN_SIN6(sa.sin6, ad->pfra_ip6addr); 768 head = &kt->pfrkt_ip6->rh; 769 } 770 if (ADDR_NETWORK(ad)) { 771 pfr_prepare_network(&mask, ad->pfra_af, ad->pfra_net); 772 ke = (struct pfr_kentry *)rn_lookup(&sa, &mask, head); 773 if (ke && KENTRY_RNF_ROOT(ke)) 774 ke = NULL; 775 } else { 776 ke = (struct pfr_kentry *)rn_match(&sa, head); 777 if (ke && KENTRY_RNF_ROOT(ke)) 778 ke = NULL; 779 if (exact && ke && KENTRY_NETWORK(ke)) 780 ke = NULL; 781 } 782 return (ke); 783 } 784 785 static struct pfr_kentry * 786 pfr_create_kentry(struct pfr_addr *ad, bool counters) 787 { 788 struct pfr_kentry *ke; 789 counter_u64_t c; 790 791 ke = uma_zalloc(V_pfr_kentry_z, M_NOWAIT | M_ZERO); 792 if (ke == NULL) 793 return (NULL); 794 795 if (ad->pfra_af == AF_INET) 796 FILLIN_SIN(ke->pfrke_sa.sin, ad->pfra_ip4addr); 797 else if (ad->pfra_af == AF_INET6) 798 FILLIN_SIN6(ke->pfrke_sa.sin6, ad->pfra_ip6addr); 799 ke->pfrke_af = ad->pfra_af; 800 ke->pfrke_net = ad->pfra_net; 801 ke->pfrke_not = ad->pfra_not; 802 ke->pfrke_counters.pfrkc_tzero = 0; 803 if (counters) { 804 c = uma_zalloc_pcpu(V_pfr_kentry_counter_z, M_NOWAIT | M_ZERO); 805 if (c == NULL) { 806 pfr_destroy_kentry(ke); 807 return (NULL); 808 } 809 ke->pfrke_counters.pfrkc_counters = c; 810 } 811 return (ke); 812 } 813 814 static void 815 pfr_destroy_kentries(struct pfr_kentryworkq *workq) 816 { 817 struct pfr_kentry *p, *q; 818 819 for (p = SLIST_FIRST(workq); p != NULL; p = q) { 820 q = SLIST_NEXT(p, pfrke_workq); 821 pfr_destroy_kentry(p); 822 } 823 } 824 825 static void 826 pfr_destroy_kentry(struct pfr_kentry *ke) 827 { 828 counter_u64_t c; 829 830 if ((c = ke->pfrke_counters.pfrkc_counters) != NULL) 831 uma_zfree_pcpu(V_pfr_kentry_counter_z, c); 832 uma_zfree(V_pfr_kentry_z, ke); 833 } 834 835 static void 836 pfr_insert_kentries(struct pfr_ktable *kt, 837 struct pfr_kentryworkq *workq, long tzero) 838 { 839 struct pfr_kentry *p; 840 int rv, n = 0; 841 842 SLIST_FOREACH(p, workq, pfrke_workq) { 843 rv = pfr_route_kentry(kt, p); 844 if (rv) { 845 printf("pfr_insert_kentries: cannot route entry " 846 "(code=%d).\n", rv); 847 break; 848 } 849 p->pfrke_counters.pfrkc_tzero = tzero; 850 n++; 851 } 852 kt->pfrkt_cnt += n; 853 } 854 855 int 856 pfr_insert_kentry(struct pfr_ktable *kt, struct pfr_addr *ad, long tzero) 857 { 858 struct pfr_kentry *p; 859 int rv; 860 861 p = pfr_lookup_addr(kt, ad, 1); 862 if (p != NULL) 863 return (0); 864 p = pfr_create_kentry(ad, (kt->pfrkt_flags & PFR_TFLAG_COUNTERS) != 0); 865 if (p == NULL) 866 return (ENOMEM); 867 868 rv = pfr_route_kentry(kt, p); 869 if (rv) 870 return (rv); 871 872 p->pfrke_counters.pfrkc_tzero = tzero; 873 kt->pfrkt_cnt++; 874 875 return (0); 876 } 877 878 static void 879 pfr_remove_kentries(struct pfr_ktable *kt, 880 struct pfr_kentryworkq *workq) 881 { 882 struct pfr_kentry *p; 883 int n = 0; 884 885 SLIST_FOREACH(p, workq, pfrke_workq) { 886 pfr_unroute_kentry(kt, p); 887 n++; 888 } 889 kt->pfrkt_cnt -= n; 890 pfr_destroy_kentries(workq); 891 } 892 893 static void 894 pfr_clean_node_mask(struct pfr_ktable *kt, 895 struct pfr_kentryworkq *workq) 896 { 897 struct pfr_kentry *p; 898 899 SLIST_FOREACH(p, workq, pfrke_workq) 900 pfr_unroute_kentry(kt, p); 901 } 902 903 static void 904 pfr_clstats_kentries(struct pfr_ktable *kt, struct pfr_kentryworkq *workq, 905 long tzero, int negchange) 906 { 907 struct pfr_kentry *p; 908 int i; 909 910 SLIST_FOREACH(p, workq, pfrke_workq) { 911 if (negchange) 912 p->pfrke_not = !p->pfrke_not; 913 if ((kt->pfrkt_flags & PFR_TFLAG_COUNTERS) != 0) 914 for (i = 0; i < PFR_NUM_COUNTERS; i++) 915 counter_u64_zero( 916 p->pfrke_counters.pfrkc_counters + i); 917 p->pfrke_counters.pfrkc_tzero = tzero; 918 } 919 } 920 921 static void 922 pfr_reset_feedback(struct pfr_addr *addr, int size) 923 { 924 struct pfr_addr *ad; 925 int i; 926 927 for (i = 0, ad = addr; i < size; i++, ad++) 928 ad->pfra_fback = PFR_FB_NONE; 929 } 930 931 static void 932 pfr_prepare_network(union sockaddr_union *sa, int af, int net) 933 { 934 int i; 935 936 bzero(sa, sizeof(*sa)); 937 if (af == AF_INET) { 938 sa->sin.sin_len = sizeof(sa->sin); 939 sa->sin.sin_family = AF_INET; 940 sa->sin.sin_addr.s_addr = net ? htonl(-1 << (32-net)) : 0; 941 } else if (af == AF_INET6) { 942 sa->sin6.sin6_len = sizeof(sa->sin6); 943 sa->sin6.sin6_family = AF_INET6; 944 for (i = 0; i < 4; i++) { 945 if (net <= 32) { 946 sa->sin6.sin6_addr.s6_addr32[i] = 947 net ? htonl(-1 << (32-net)) : 0; 948 break; 949 } 950 sa->sin6.sin6_addr.s6_addr32[i] = 0xFFFFFFFF; 951 net -= 32; 952 } 953 } 954 } 955 956 static int 957 pfr_route_kentry(struct pfr_ktable *kt, struct pfr_kentry *ke) 958 { 959 union sockaddr_union mask; 960 struct radix_node *rn; 961 struct radix_head *head = NULL; 962 963 PF_RULES_WASSERT(); 964 965 bzero(ke->pfrke_node, sizeof(ke->pfrke_node)); 966 if (ke->pfrke_af == AF_INET) 967 head = &kt->pfrkt_ip4->rh; 968 else if (ke->pfrke_af == AF_INET6) 969 head = &kt->pfrkt_ip6->rh; 970 971 if (KENTRY_NETWORK(ke)) { 972 pfr_prepare_network(&mask, ke->pfrke_af, ke->pfrke_net); 973 rn = rn_addroute(&ke->pfrke_sa, &mask, head, ke->pfrke_node); 974 } else 975 rn = rn_addroute(&ke->pfrke_sa, NULL, head, ke->pfrke_node); 976 977 return (rn == NULL ? -1 : 0); 978 } 979 980 static int 981 pfr_unroute_kentry(struct pfr_ktable *kt, struct pfr_kentry *ke) 982 { 983 union sockaddr_union mask; 984 struct radix_node *rn; 985 struct radix_head *head = NULL; 986 987 if (ke->pfrke_af == AF_INET) 988 head = &kt->pfrkt_ip4->rh; 989 else if (ke->pfrke_af == AF_INET6) 990 head = &kt->pfrkt_ip6->rh; 991 992 if (KENTRY_NETWORK(ke)) { 993 pfr_prepare_network(&mask, ke->pfrke_af, ke->pfrke_net); 994 rn = rn_delete(&ke->pfrke_sa, &mask, head); 995 } else 996 rn = rn_delete(&ke->pfrke_sa, NULL, head); 997 998 if (rn == NULL) { 999 printf("pfr_unroute_kentry: delete failed.\n"); 1000 return (-1); 1001 } 1002 return (0); 1003 } 1004 1005 static void 1006 pfr_copyout_addr(struct pfr_addr *ad, const struct pfr_kentry *ke) 1007 { 1008 bzero(ad, sizeof(*ad)); 1009 if (ke == NULL) 1010 return; 1011 ad->pfra_af = ke->pfrke_af; 1012 ad->pfra_net = ke->pfrke_net; 1013 ad->pfra_not = ke->pfrke_not; 1014 if (ad->pfra_af == AF_INET) 1015 ad->pfra_ip4addr = ke->pfrke_sa.sin.sin_addr; 1016 else if (ad->pfra_af == AF_INET6) 1017 ad->pfra_ip6addr = ke->pfrke_sa.sin6.sin6_addr; 1018 } 1019 1020 static void 1021 pfr_copyout_astats(struct pfr_astats *as, const struct pfr_kentry *ke, 1022 const struct pfr_walktree *w) 1023 { 1024 int dir, op; 1025 const struct pfr_kcounters *kc = &ke->pfrke_counters; 1026 1027 bzero(as, sizeof(*as)); 1028 pfr_copyout_addr(&as->pfras_a, ke); 1029 as->pfras_tzero = kc->pfrkc_tzero; 1030 1031 if (! (w->pfrw_flags & PFR_TFLAG_COUNTERS) || 1032 kc->pfrkc_counters == NULL) { 1033 bzero(as->pfras_packets, sizeof(as->pfras_packets)); 1034 bzero(as->pfras_bytes, sizeof(as->pfras_bytes)); 1035 as->pfras_a.pfra_fback = PFR_FB_NOCOUNT; 1036 return; 1037 } 1038 1039 for (dir = 0; dir < PFR_DIR_MAX; dir++) { 1040 for (op = 0; op < PFR_OP_ADDR_MAX; op ++) { 1041 as->pfras_packets[dir][op] = counter_u64_fetch( 1042 pfr_kentry_counter(kc, dir, op, PFR_TYPE_PACKETS)); 1043 as->pfras_bytes[dir][op] = counter_u64_fetch( 1044 pfr_kentry_counter(kc, dir, op, PFR_TYPE_BYTES)); 1045 } 1046 } 1047 } 1048 1049 static int 1050 pfr_walktree(struct radix_node *rn, void *arg) 1051 { 1052 struct pfr_kentry *ke = (struct pfr_kentry *)rn; 1053 struct pfr_walktree *w = arg; 1054 1055 switch (w->pfrw_op) { 1056 case PFRW_MARK: 1057 ke->pfrke_mark = 0; 1058 break; 1059 case PFRW_SWEEP: 1060 if (ke->pfrke_mark) 1061 break; 1062 /* FALLTHROUGH */ 1063 case PFRW_ENQUEUE: 1064 SLIST_INSERT_HEAD(w->pfrw_workq, ke, pfrke_workq); 1065 w->pfrw_free++; 1066 break; 1067 case PFRW_GET_ADDRS: 1068 if (w->pfrw_free-- > 0) { 1069 pfr_copyout_addr(w->pfrw_addr, ke); 1070 w->pfrw_addr++; 1071 } 1072 break; 1073 case PFRW_GET_ASTATS: 1074 if (w->pfrw_free-- > 0) { 1075 struct pfr_astats as; 1076 1077 pfr_copyout_astats(&as, ke, w); 1078 1079 bcopy(&as, w->pfrw_astats, sizeof(as)); 1080 w->pfrw_astats++; 1081 } 1082 break; 1083 case PFRW_POOL_GET: 1084 if (ke->pfrke_not) 1085 break; /* negative entries are ignored */ 1086 if (!w->pfrw_free--) { 1087 w->pfrw_kentry = ke; 1088 return (1); /* finish search */ 1089 } 1090 break; 1091 case PFRW_DYNADDR_UPDATE: 1092 { 1093 union sockaddr_union pfr_mask; 1094 1095 if (ke->pfrke_af == AF_INET) { 1096 if (w->pfrw_dyn->pfid_acnt4++ > 0) 1097 break; 1098 pfr_prepare_network(&pfr_mask, AF_INET, ke->pfrke_net); 1099 w->pfrw_dyn->pfid_addr4 = *SUNION2PF(&ke->pfrke_sa, 1100 AF_INET); 1101 w->pfrw_dyn->pfid_mask4 = *SUNION2PF(&pfr_mask, 1102 AF_INET); 1103 } else if (ke->pfrke_af == AF_INET6){ 1104 if (w->pfrw_dyn->pfid_acnt6++ > 0) 1105 break; 1106 pfr_prepare_network(&pfr_mask, AF_INET6, ke->pfrke_net); 1107 w->pfrw_dyn->pfid_addr6 = *SUNION2PF(&ke->pfrke_sa, 1108 AF_INET6); 1109 w->pfrw_dyn->pfid_mask6 = *SUNION2PF(&pfr_mask, 1110 AF_INET6); 1111 } 1112 break; 1113 } 1114 case PFRW_COUNTERS: 1115 { 1116 if (w->pfrw_flags & PFR_TFLAG_COUNTERS) { 1117 if (ke->pfrke_counters.pfrkc_counters != NULL) 1118 break; 1119 ke->pfrke_counters.pfrkc_counters = 1120 uma_zalloc_pcpu(V_pfr_kentry_counter_z, 1121 M_NOWAIT | M_ZERO); 1122 } else { 1123 uma_zfree_pcpu(V_pfr_kentry_counter_z, 1124 ke->pfrke_counters.pfrkc_counters); 1125 ke->pfrke_counters.pfrkc_counters = NULL; 1126 } 1127 break; 1128 } 1129 } 1130 return (0); 1131 } 1132 1133 int 1134 pfr_clr_tables(struct pfr_table *filter, int *ndel, int flags) 1135 { 1136 struct pfr_ktableworkq workq; 1137 struct pfr_ktable *p; 1138 int xdel = 0; 1139 1140 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_ALLRSETS); 1141 if (pfr_fix_anchor(filter->pfrt_anchor)) 1142 return (EINVAL); 1143 if (pfr_table_count(filter, flags) < 0) 1144 return (ENOENT); 1145 1146 SLIST_INIT(&workq); 1147 RB_FOREACH(p, pfr_ktablehead, &V_pfr_ktables) { 1148 if (pfr_skip_table(filter, p, flags)) 1149 continue; 1150 if (!strcmp(p->pfrkt_anchor, PF_RESERVED_ANCHOR)) 1151 continue; 1152 if (!(p->pfrkt_flags & PFR_TFLAG_ACTIVE)) 1153 continue; 1154 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_ACTIVE; 1155 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq); 1156 xdel++; 1157 } 1158 if (!(flags & PFR_FLAG_DUMMY)) 1159 pfr_setflags_ktables(&workq); 1160 if (ndel != NULL) 1161 *ndel = xdel; 1162 return (0); 1163 } 1164 1165 int 1166 pfr_add_tables(struct pfr_table *tbl, int size, int *nadd, int flags) 1167 { 1168 struct pfr_ktableworkq addq, changeq; 1169 struct pfr_ktable *p, *q, *r, key; 1170 int i, rv, xadd = 0; 1171 long tzero = time_second; 1172 1173 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY); 1174 SLIST_INIT(&addq); 1175 SLIST_INIT(&changeq); 1176 for (i = 0; i < size; i++) { 1177 bcopy(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t)); 1178 if (pfr_validate_table(&key.pfrkt_t, PFR_TFLAG_USRMASK, 1179 flags & PFR_FLAG_USERIOCTL)) 1180 senderr(EINVAL); 1181 key.pfrkt_flags |= PFR_TFLAG_ACTIVE; 1182 p = RB_FIND(pfr_ktablehead, &V_pfr_ktables, &key); 1183 if (p == NULL) { 1184 p = pfr_create_ktable(&key.pfrkt_t, tzero, 1); 1185 if (p == NULL) 1186 senderr(ENOMEM); 1187 SLIST_FOREACH(q, &addq, pfrkt_workq) { 1188 if (!pfr_ktable_compare(p, q)) { 1189 pfr_destroy_ktable(p, 0); 1190 goto _skip; 1191 } 1192 } 1193 SLIST_INSERT_HEAD(&addq, p, pfrkt_workq); 1194 xadd++; 1195 if (!key.pfrkt_anchor[0]) 1196 goto _skip; 1197 1198 /* find or create root table */ 1199 bzero(key.pfrkt_anchor, sizeof(key.pfrkt_anchor)); 1200 r = RB_FIND(pfr_ktablehead, &V_pfr_ktables, &key); 1201 if (r != NULL) { 1202 p->pfrkt_root = r; 1203 goto _skip; 1204 } 1205 SLIST_FOREACH(q, &addq, pfrkt_workq) { 1206 if (!pfr_ktable_compare(&key, q)) { 1207 p->pfrkt_root = q; 1208 goto _skip; 1209 } 1210 } 1211 key.pfrkt_flags = 0; 1212 r = pfr_create_ktable(&key.pfrkt_t, 0, 1); 1213 if (r == NULL) 1214 senderr(ENOMEM); 1215 SLIST_INSERT_HEAD(&addq, r, pfrkt_workq); 1216 p->pfrkt_root = r; 1217 } else if (!(p->pfrkt_flags & PFR_TFLAG_ACTIVE)) { 1218 SLIST_FOREACH(q, &changeq, pfrkt_workq) 1219 if (!pfr_ktable_compare(&key, q)) 1220 goto _skip; 1221 p->pfrkt_nflags = (p->pfrkt_flags & 1222 ~PFR_TFLAG_USRMASK) | key.pfrkt_flags; 1223 SLIST_INSERT_HEAD(&changeq, p, pfrkt_workq); 1224 xadd++; 1225 } 1226 _skip: 1227 ; 1228 } 1229 if (!(flags & PFR_FLAG_DUMMY)) { 1230 pfr_insert_ktables(&addq); 1231 pfr_setflags_ktables(&changeq); 1232 } else 1233 pfr_destroy_ktables(&addq, 0); 1234 if (nadd != NULL) 1235 *nadd = xadd; 1236 return (0); 1237 _bad: 1238 pfr_destroy_ktables(&addq, 0); 1239 return (rv); 1240 } 1241 1242 int 1243 pfr_del_tables(struct pfr_table *tbl, int size, int *ndel, int flags) 1244 { 1245 struct pfr_ktableworkq workq; 1246 struct pfr_ktable *p, *q, key; 1247 int i, xdel = 0; 1248 1249 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY); 1250 SLIST_INIT(&workq); 1251 for (i = 0; i < size; i++) { 1252 bcopy(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t)); 1253 if (pfr_validate_table(&key.pfrkt_t, 0, 1254 flags & PFR_FLAG_USERIOCTL)) 1255 return (EINVAL); 1256 p = RB_FIND(pfr_ktablehead, &V_pfr_ktables, &key); 1257 if (p != NULL && (p->pfrkt_flags & PFR_TFLAG_ACTIVE)) { 1258 SLIST_FOREACH(q, &workq, pfrkt_workq) 1259 if (!pfr_ktable_compare(p, q)) 1260 goto _skip; 1261 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_ACTIVE; 1262 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq); 1263 xdel++; 1264 } 1265 _skip: 1266 ; 1267 } 1268 1269 if (!(flags & PFR_FLAG_DUMMY)) 1270 pfr_setflags_ktables(&workq); 1271 if (ndel != NULL) 1272 *ndel = xdel; 1273 return (0); 1274 } 1275 1276 int 1277 pfr_get_tables(struct pfr_table *filter, struct pfr_table *tbl, int *size, 1278 int flags) 1279 { 1280 struct pfr_ktable *p; 1281 int n, nn; 1282 1283 PF_RULES_RASSERT(); 1284 1285 ACCEPT_FLAGS(flags, PFR_FLAG_ALLRSETS); 1286 if (pfr_fix_anchor(filter->pfrt_anchor)) 1287 return (EINVAL); 1288 n = nn = pfr_table_count(filter, flags); 1289 if (n < 0) 1290 return (ENOENT); 1291 if (n > *size) { 1292 *size = n; 1293 return (0); 1294 } 1295 RB_FOREACH(p, pfr_ktablehead, &V_pfr_ktables) { 1296 if (pfr_skip_table(filter, p, flags)) 1297 continue; 1298 if (n-- <= 0) 1299 continue; 1300 bcopy(&p->pfrkt_t, tbl++, sizeof(*tbl)); 1301 } 1302 1303 KASSERT(n == 0, ("%s: corruption detected (%d)", __func__, n)); 1304 1305 *size = nn; 1306 return (0); 1307 } 1308 1309 int 1310 pfr_get_tstats(struct pfr_table *filter, struct pfr_tstats *tbl, int *size, 1311 int flags) 1312 { 1313 struct pfr_ktable *p; 1314 struct pfr_ktableworkq workq; 1315 int n, nn; 1316 long tzero = time_second; 1317 int pfr_dir, pfr_op; 1318 1319 /* XXX PFR_FLAG_CLSTATS disabled */ 1320 ACCEPT_FLAGS(flags, PFR_FLAG_ALLRSETS); 1321 if (pfr_fix_anchor(filter->pfrt_anchor)) 1322 return (EINVAL); 1323 n = nn = pfr_table_count(filter, flags); 1324 if (n < 0) 1325 return (ENOENT); 1326 if (n > *size) { 1327 *size = n; 1328 return (0); 1329 } 1330 SLIST_INIT(&workq); 1331 RB_FOREACH(p, pfr_ktablehead, &V_pfr_ktables) { 1332 if (pfr_skip_table(filter, p, flags)) 1333 continue; 1334 if (n-- <= 0) 1335 continue; 1336 bcopy(&p->pfrkt_kts.pfrts_t, &tbl->pfrts_t, 1337 sizeof(struct pfr_table)); 1338 for (pfr_dir = 0; pfr_dir < PFR_DIR_MAX; pfr_dir ++) { 1339 for (pfr_op = 0; pfr_op < PFR_OP_TABLE_MAX; pfr_op ++) { 1340 tbl->pfrts_packets[pfr_dir][pfr_op] = 1341 pfr_kstate_counter_fetch( 1342 &p->pfrkt_packets[pfr_dir][pfr_op]); 1343 tbl->pfrts_bytes[pfr_dir][pfr_op] = 1344 pfr_kstate_counter_fetch( 1345 &p->pfrkt_bytes[pfr_dir][pfr_op]); 1346 } 1347 } 1348 tbl->pfrts_match = pfr_kstate_counter_fetch(&p->pfrkt_match); 1349 tbl->pfrts_nomatch = pfr_kstate_counter_fetch(&p->pfrkt_nomatch); 1350 tbl->pfrts_tzero = p->pfrkt_tzero; 1351 tbl->pfrts_cnt = p->pfrkt_cnt; 1352 for (pfr_op = 0; pfr_op < PFR_REFCNT_MAX; pfr_op++) 1353 tbl->pfrts_refcnt[pfr_op] = p->pfrkt_refcnt[pfr_op]; 1354 tbl++; 1355 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq); 1356 } 1357 if (flags & PFR_FLAG_CLSTATS) 1358 pfr_clstats_ktables(&workq, tzero, 1359 flags & PFR_FLAG_ADDRSTOO); 1360 1361 KASSERT(n == 0, ("%s: corruption detected (%d)", __func__, n)); 1362 1363 *size = nn; 1364 return (0); 1365 } 1366 1367 int 1368 pfr_clr_tstats(struct pfr_table *tbl, int size, int *nzero, int flags) 1369 { 1370 struct pfr_ktableworkq workq; 1371 struct pfr_ktable *p, key; 1372 int i, xzero = 0; 1373 long tzero = time_second; 1374 1375 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_ADDRSTOO); 1376 SLIST_INIT(&workq); 1377 for (i = 0; i < size; i++) { 1378 bcopy(tbl + i, &key.pfrkt_t, sizeof(key.pfrkt_t)); 1379 if (pfr_validate_table(&key.pfrkt_t, 0, 0)) 1380 return (EINVAL); 1381 p = RB_FIND(pfr_ktablehead, &V_pfr_ktables, &key); 1382 if (p != NULL) { 1383 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq); 1384 xzero++; 1385 } 1386 } 1387 if (!(flags & PFR_FLAG_DUMMY)) 1388 pfr_clstats_ktables(&workq, tzero, flags & PFR_FLAG_ADDRSTOO); 1389 if (nzero != NULL) 1390 *nzero = xzero; 1391 return (0); 1392 } 1393 1394 int 1395 pfr_set_tflags(struct pfr_table *tbl, int size, int setflag, int clrflag, 1396 int *nchange, int *ndel, int flags) 1397 { 1398 struct pfr_ktableworkq workq; 1399 struct pfr_ktable *p, *q, key; 1400 int i, xchange = 0, xdel = 0; 1401 1402 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY); 1403 if ((setflag & ~PFR_TFLAG_USRMASK) || 1404 (clrflag & ~PFR_TFLAG_USRMASK) || 1405 (setflag & clrflag)) 1406 return (EINVAL); 1407 SLIST_INIT(&workq); 1408 for (i = 0; i < size; i++) { 1409 bcopy(tbl + i, &key.pfrkt_t, sizeof(key.pfrkt_t)); 1410 if (pfr_validate_table(&key.pfrkt_t, 0, 1411 flags & PFR_FLAG_USERIOCTL)) 1412 return (EINVAL); 1413 p = RB_FIND(pfr_ktablehead, &V_pfr_ktables, &key); 1414 if (p != NULL && (p->pfrkt_flags & PFR_TFLAG_ACTIVE)) { 1415 p->pfrkt_nflags = (p->pfrkt_flags | setflag) & 1416 ~clrflag; 1417 if (p->pfrkt_nflags == p->pfrkt_flags) 1418 goto _skip; 1419 SLIST_FOREACH(q, &workq, pfrkt_workq) 1420 if (!pfr_ktable_compare(p, q)) 1421 goto _skip; 1422 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq); 1423 if ((p->pfrkt_flags & PFR_TFLAG_PERSIST) && 1424 (clrflag & PFR_TFLAG_PERSIST) && 1425 !(p->pfrkt_flags & PFR_TFLAG_REFERENCED)) 1426 xdel++; 1427 else 1428 xchange++; 1429 } 1430 _skip: 1431 ; 1432 } 1433 if (!(flags & PFR_FLAG_DUMMY)) 1434 pfr_setflags_ktables(&workq); 1435 if (nchange != NULL) 1436 *nchange = xchange; 1437 if (ndel != NULL) 1438 *ndel = xdel; 1439 return (0); 1440 } 1441 1442 int 1443 pfr_ina_begin(struct pfr_table *trs, u_int32_t *ticket, int *ndel, int flags) 1444 { 1445 struct pfr_ktableworkq workq; 1446 struct pfr_ktable *p; 1447 struct pf_kruleset *rs; 1448 int xdel = 0; 1449 1450 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY); 1451 rs = pf_find_or_create_kruleset(trs->pfrt_anchor); 1452 if (rs == NULL) 1453 return (ENOMEM); 1454 SLIST_INIT(&workq); 1455 RB_FOREACH(p, pfr_ktablehead, &V_pfr_ktables) { 1456 if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) || 1457 pfr_skip_table(trs, p, 0)) 1458 continue; 1459 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_INACTIVE; 1460 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq); 1461 xdel++; 1462 } 1463 if (!(flags & PFR_FLAG_DUMMY)) { 1464 pfr_setflags_ktables(&workq); 1465 if (ticket != NULL) 1466 *ticket = ++rs->tticket; 1467 rs->topen = 1; 1468 } else 1469 pf_remove_if_empty_kruleset(rs); 1470 if (ndel != NULL) 1471 *ndel = xdel; 1472 return (0); 1473 } 1474 1475 int 1476 pfr_ina_define(struct pfr_table *tbl, struct pfr_addr *addr, int size, 1477 int *nadd, int *naddr, u_int32_t ticket, int flags) 1478 { 1479 struct pfr_ktableworkq tableq; 1480 struct pfr_kentryworkq addrq; 1481 struct pfr_ktable *kt, *rt, *shadow, key; 1482 struct pfr_kentry *p; 1483 struct pfr_addr *ad; 1484 struct pf_kruleset *rs; 1485 int i, rv, xadd = 0, xaddr = 0; 1486 1487 PF_RULES_WASSERT(); 1488 1489 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_ADDRSTOO); 1490 if (size && !(flags & PFR_FLAG_ADDRSTOO)) 1491 return (EINVAL); 1492 if (pfr_validate_table(tbl, PFR_TFLAG_USRMASK, 1493 flags & PFR_FLAG_USERIOCTL)) 1494 return (EINVAL); 1495 rs = pf_find_kruleset(tbl->pfrt_anchor); 1496 if (rs == NULL || !rs->topen || ticket != rs->tticket) 1497 return (EBUSY); 1498 tbl->pfrt_flags |= PFR_TFLAG_INACTIVE; 1499 SLIST_INIT(&tableq); 1500 kt = RB_FIND(pfr_ktablehead, &V_pfr_ktables, (struct pfr_ktable *)tbl); 1501 if (kt == NULL) { 1502 kt = pfr_create_ktable(tbl, 0, 1); 1503 if (kt == NULL) 1504 return (ENOMEM); 1505 SLIST_INSERT_HEAD(&tableq, kt, pfrkt_workq); 1506 xadd++; 1507 if (!tbl->pfrt_anchor[0]) 1508 goto _skip; 1509 1510 /* find or create root table */ 1511 bzero(&key, sizeof(key)); 1512 strlcpy(key.pfrkt_name, tbl->pfrt_name, sizeof(key.pfrkt_name)); 1513 rt = RB_FIND(pfr_ktablehead, &V_pfr_ktables, &key); 1514 if (rt != NULL) { 1515 kt->pfrkt_root = rt; 1516 goto _skip; 1517 } 1518 rt = pfr_create_ktable(&key.pfrkt_t, 0, 1); 1519 if (rt == NULL) { 1520 pfr_destroy_ktables(&tableq, 0); 1521 return (ENOMEM); 1522 } 1523 SLIST_INSERT_HEAD(&tableq, rt, pfrkt_workq); 1524 kt->pfrkt_root = rt; 1525 } else if (!(kt->pfrkt_flags & PFR_TFLAG_INACTIVE)) 1526 xadd++; 1527 _skip: 1528 shadow = pfr_create_ktable(tbl, 0, 0); 1529 if (shadow == NULL) { 1530 pfr_destroy_ktables(&tableq, 0); 1531 return (ENOMEM); 1532 } 1533 SLIST_INIT(&addrq); 1534 for (i = 0, ad = addr; i < size; i++, ad++) { 1535 if (pfr_validate_addr(ad)) 1536 senderr(EINVAL); 1537 if (pfr_lookup_addr(shadow, ad, 1) != NULL) 1538 continue; 1539 p = pfr_create_kentry(ad, 1540 (shadow->pfrkt_flags & PFR_TFLAG_COUNTERS) != 0); 1541 if (p == NULL) 1542 senderr(ENOMEM); 1543 if (pfr_route_kentry(shadow, p)) { 1544 pfr_destroy_kentry(p); 1545 continue; 1546 } 1547 SLIST_INSERT_HEAD(&addrq, p, pfrke_workq); 1548 xaddr++; 1549 } 1550 if (!(flags & PFR_FLAG_DUMMY)) { 1551 if (kt->pfrkt_shadow != NULL) 1552 pfr_destroy_ktable(kt->pfrkt_shadow, 1); 1553 kt->pfrkt_flags |= PFR_TFLAG_INACTIVE; 1554 pfr_insert_ktables(&tableq); 1555 shadow->pfrkt_cnt = (flags & PFR_FLAG_ADDRSTOO) ? 1556 xaddr : NO_ADDRESSES; 1557 kt->pfrkt_shadow = shadow; 1558 } else { 1559 pfr_clean_node_mask(shadow, &addrq); 1560 pfr_destroy_ktable(shadow, 0); 1561 pfr_destroy_ktables(&tableq, 0); 1562 pfr_destroy_kentries(&addrq); 1563 } 1564 if (nadd != NULL) 1565 *nadd = xadd; 1566 if (naddr != NULL) 1567 *naddr = xaddr; 1568 return (0); 1569 _bad: 1570 pfr_destroy_ktable(shadow, 0); 1571 pfr_destroy_ktables(&tableq, 0); 1572 pfr_destroy_kentries(&addrq); 1573 return (rv); 1574 } 1575 1576 int 1577 pfr_ina_rollback(struct pfr_table *trs, u_int32_t ticket, int *ndel, int flags) 1578 { 1579 struct pfr_ktableworkq workq; 1580 struct pfr_ktable *p; 1581 struct pf_kruleset *rs; 1582 int xdel = 0; 1583 1584 PF_RULES_WASSERT(); 1585 1586 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY); 1587 rs = pf_find_kruleset(trs->pfrt_anchor); 1588 if (rs == NULL || !rs->topen || ticket != rs->tticket) 1589 return (0); 1590 SLIST_INIT(&workq); 1591 RB_FOREACH(p, pfr_ktablehead, &V_pfr_ktables) { 1592 if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) || 1593 pfr_skip_table(trs, p, 0)) 1594 continue; 1595 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_INACTIVE; 1596 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq); 1597 xdel++; 1598 } 1599 if (!(flags & PFR_FLAG_DUMMY)) { 1600 pfr_setflags_ktables(&workq); 1601 rs->topen = 0; 1602 pf_remove_if_empty_kruleset(rs); 1603 } 1604 if (ndel != NULL) 1605 *ndel = xdel; 1606 return (0); 1607 } 1608 1609 int 1610 pfr_ina_commit(struct pfr_table *trs, u_int32_t ticket, int *nadd, 1611 int *nchange, int flags) 1612 { 1613 struct pfr_ktable *p, *q; 1614 struct pfr_ktableworkq workq; 1615 struct pf_kruleset *rs; 1616 int xadd = 0, xchange = 0; 1617 long tzero = time_second; 1618 1619 PF_RULES_WASSERT(); 1620 1621 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY); 1622 rs = pf_find_kruleset(trs->pfrt_anchor); 1623 if (rs == NULL || !rs->topen || ticket != rs->tticket) 1624 return (EBUSY); 1625 1626 SLIST_INIT(&workq); 1627 RB_FOREACH(p, pfr_ktablehead, &V_pfr_ktables) { 1628 if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) || 1629 pfr_skip_table(trs, p, 0)) 1630 continue; 1631 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq); 1632 if (p->pfrkt_flags & PFR_TFLAG_ACTIVE) 1633 xchange++; 1634 else 1635 xadd++; 1636 } 1637 1638 if (!(flags & PFR_FLAG_DUMMY)) { 1639 for (p = SLIST_FIRST(&workq); p != NULL; p = q) { 1640 q = SLIST_NEXT(p, pfrkt_workq); 1641 pfr_commit_ktable(p, tzero); 1642 } 1643 rs->topen = 0; 1644 pf_remove_if_empty_kruleset(rs); 1645 } 1646 if (nadd != NULL) 1647 *nadd = xadd; 1648 if (nchange != NULL) 1649 *nchange = xchange; 1650 1651 return (0); 1652 } 1653 1654 static void 1655 pfr_commit_ktable(struct pfr_ktable *kt, long tzero) 1656 { 1657 counter_u64_t *pkc, *qkc; 1658 struct pfr_ktable *shadow = kt->pfrkt_shadow; 1659 int nflags; 1660 1661 PF_RULES_WASSERT(); 1662 1663 if (shadow->pfrkt_cnt == NO_ADDRESSES) { 1664 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 1665 pfr_clstats_ktable(kt, tzero, 1); 1666 } else if (kt->pfrkt_flags & PFR_TFLAG_ACTIVE) { 1667 /* kt might contain addresses */ 1668 struct pfr_kentryworkq addrq, addq, changeq, delq, garbageq; 1669 struct pfr_kentry *p, *q, *next; 1670 struct pfr_addr ad; 1671 1672 pfr_enqueue_addrs(shadow, &addrq, NULL, 0); 1673 pfr_mark_addrs(kt); 1674 SLIST_INIT(&addq); 1675 SLIST_INIT(&changeq); 1676 SLIST_INIT(&delq); 1677 SLIST_INIT(&garbageq); 1678 pfr_clean_node_mask(shadow, &addrq); 1679 SLIST_FOREACH_SAFE(p, &addrq, pfrke_workq, next) { 1680 pfr_copyout_addr(&ad, p); 1681 q = pfr_lookup_addr(kt, &ad, 1); 1682 if (q != NULL) { 1683 if (q->pfrke_not != p->pfrke_not) 1684 SLIST_INSERT_HEAD(&changeq, q, 1685 pfrke_workq); 1686 pkc = &p->pfrke_counters.pfrkc_counters; 1687 qkc = &q->pfrke_counters.pfrkc_counters; 1688 if ((*pkc == NULL) != (*qkc == NULL)) 1689 SWAP(counter_u64_t, *pkc, *qkc); 1690 q->pfrke_mark = 1; 1691 SLIST_INSERT_HEAD(&garbageq, p, pfrke_workq); 1692 } else { 1693 p->pfrke_counters.pfrkc_tzero = tzero; 1694 SLIST_INSERT_HEAD(&addq, p, pfrke_workq); 1695 } 1696 } 1697 pfr_enqueue_addrs(kt, &delq, NULL, ENQUEUE_UNMARKED_ONLY); 1698 pfr_insert_kentries(kt, &addq, tzero); 1699 pfr_remove_kentries(kt, &delq); 1700 pfr_clstats_kentries(kt, &changeq, tzero, INVERT_NEG_FLAG); 1701 pfr_destroy_kentries(&garbageq); 1702 } else { 1703 /* kt cannot contain addresses */ 1704 SWAP(struct radix_node_head *, kt->pfrkt_ip4, 1705 shadow->pfrkt_ip4); 1706 SWAP(struct radix_node_head *, kt->pfrkt_ip6, 1707 shadow->pfrkt_ip6); 1708 SWAP(int, kt->pfrkt_cnt, shadow->pfrkt_cnt); 1709 pfr_clstats_ktable(kt, tzero, 1); 1710 } 1711 nflags = ((shadow->pfrkt_flags & PFR_TFLAG_USRMASK) | 1712 (kt->pfrkt_flags & PFR_TFLAG_SETMASK) | PFR_TFLAG_ACTIVE) 1713 & ~PFR_TFLAG_INACTIVE; 1714 pfr_destroy_ktable(shadow, 0); 1715 kt->pfrkt_shadow = NULL; 1716 pfr_setflags_ktable(kt, nflags); 1717 } 1718 1719 static int 1720 pfr_validate_table(struct pfr_table *tbl, int allowedflags, int no_reserved) 1721 { 1722 int i; 1723 1724 if (!tbl->pfrt_name[0]) 1725 return (-1); 1726 if (no_reserved && !strcmp(tbl->pfrt_anchor, PF_RESERVED_ANCHOR)) 1727 return (-1); 1728 if (tbl->pfrt_name[PF_TABLE_NAME_SIZE-1]) 1729 return (-1); 1730 for (i = strlen(tbl->pfrt_name); i < PF_TABLE_NAME_SIZE; i++) 1731 if (tbl->pfrt_name[i]) 1732 return (-1); 1733 if (pfr_fix_anchor(tbl->pfrt_anchor)) 1734 return (-1); 1735 if (tbl->pfrt_flags & ~allowedflags) 1736 return (-1); 1737 return (0); 1738 } 1739 1740 /* 1741 * Rewrite anchors referenced by tables to remove slashes 1742 * and check for validity. 1743 */ 1744 static int 1745 pfr_fix_anchor(char *anchor) 1746 { 1747 size_t siz = MAXPATHLEN; 1748 int i; 1749 1750 if (anchor[0] == '/') { 1751 char *path; 1752 int off; 1753 1754 path = anchor; 1755 off = 1; 1756 while (*++path == '/') 1757 off++; 1758 bcopy(path, anchor, siz - off); 1759 memset(anchor + siz - off, 0, off); 1760 } 1761 if (anchor[siz - 1]) 1762 return (-1); 1763 for (i = strlen(anchor); i < siz; i++) 1764 if (anchor[i]) 1765 return (-1); 1766 return (0); 1767 } 1768 1769 int 1770 pfr_table_count(struct pfr_table *filter, int flags) 1771 { 1772 struct pf_kruleset *rs; 1773 1774 PF_RULES_ASSERT(); 1775 1776 if (flags & PFR_FLAG_ALLRSETS) 1777 return (V_pfr_ktable_cnt); 1778 if (filter->pfrt_anchor[0]) { 1779 rs = pf_find_kruleset(filter->pfrt_anchor); 1780 return ((rs != NULL) ? rs->tables : -1); 1781 } 1782 return (pf_main_ruleset.tables); 1783 } 1784 1785 static int 1786 pfr_skip_table(struct pfr_table *filter, struct pfr_ktable *kt, int flags) 1787 { 1788 if (flags & PFR_FLAG_ALLRSETS) 1789 return (0); 1790 if (strcmp(filter->pfrt_anchor, kt->pfrkt_anchor)) 1791 return (1); 1792 return (0); 1793 } 1794 1795 static void 1796 pfr_insert_ktables(struct pfr_ktableworkq *workq) 1797 { 1798 struct pfr_ktable *p; 1799 1800 SLIST_FOREACH(p, workq, pfrkt_workq) 1801 pfr_insert_ktable(p); 1802 } 1803 1804 static void 1805 pfr_insert_ktable(struct pfr_ktable *kt) 1806 { 1807 1808 PF_RULES_WASSERT(); 1809 1810 RB_INSERT(pfr_ktablehead, &V_pfr_ktables, kt); 1811 V_pfr_ktable_cnt++; 1812 if (kt->pfrkt_root != NULL) 1813 if (!kt->pfrkt_root->pfrkt_refcnt[PFR_REFCNT_ANCHOR]++) 1814 pfr_setflags_ktable(kt->pfrkt_root, 1815 kt->pfrkt_root->pfrkt_flags|PFR_TFLAG_REFDANCHOR); 1816 } 1817 1818 static void 1819 pfr_setflags_ktables(struct pfr_ktableworkq *workq) 1820 { 1821 struct pfr_ktable *p, *q; 1822 1823 for (p = SLIST_FIRST(workq); p; p = q) { 1824 q = SLIST_NEXT(p, pfrkt_workq); 1825 pfr_setflags_ktable(p, p->pfrkt_nflags); 1826 } 1827 } 1828 1829 static void 1830 pfr_setflags_ktable(struct pfr_ktable *kt, int newf) 1831 { 1832 struct pfr_kentryworkq addrq; 1833 struct pfr_walktree w; 1834 1835 PF_RULES_WASSERT(); 1836 1837 if (!(newf & PFR_TFLAG_REFERENCED) && 1838 !(newf & PFR_TFLAG_REFDANCHOR) && 1839 !(newf & PFR_TFLAG_PERSIST)) 1840 newf &= ~PFR_TFLAG_ACTIVE; 1841 if (!(newf & PFR_TFLAG_ACTIVE)) 1842 newf &= ~PFR_TFLAG_USRMASK; 1843 if (!(newf & PFR_TFLAG_SETMASK)) { 1844 RB_REMOVE(pfr_ktablehead, &V_pfr_ktables, kt); 1845 if (kt->pfrkt_root != NULL) 1846 if (!--kt->pfrkt_root->pfrkt_refcnt[PFR_REFCNT_ANCHOR]) 1847 pfr_setflags_ktable(kt->pfrkt_root, 1848 kt->pfrkt_root->pfrkt_flags & 1849 ~PFR_TFLAG_REFDANCHOR); 1850 pfr_destroy_ktable(kt, 1); 1851 V_pfr_ktable_cnt--; 1852 return; 1853 } 1854 if (newf & PFR_TFLAG_COUNTERS && ! (kt->pfrkt_flags & PFR_TFLAG_COUNTERS)) { 1855 bzero(&w, sizeof(w)); 1856 w.pfrw_op = PFRW_COUNTERS; 1857 w.pfrw_flags |= PFR_TFLAG_COUNTERS; 1858 kt->pfrkt_ip4->rnh_walktree(&kt->pfrkt_ip4->rh, pfr_walktree, &w); 1859 kt->pfrkt_ip6->rnh_walktree(&kt->pfrkt_ip6->rh, pfr_walktree, &w); 1860 } 1861 if (! (newf & PFR_TFLAG_COUNTERS) && (kt->pfrkt_flags & PFR_TFLAG_COUNTERS)) { 1862 bzero(&w, sizeof(w)); 1863 w.pfrw_op = PFRW_COUNTERS; 1864 w.pfrw_flags |= 0; 1865 kt->pfrkt_ip4->rnh_walktree(&kt->pfrkt_ip4->rh, pfr_walktree, &w); 1866 kt->pfrkt_ip6->rnh_walktree(&kt->pfrkt_ip6->rh, pfr_walktree, &w); 1867 } 1868 if (!(newf & PFR_TFLAG_ACTIVE) && kt->pfrkt_cnt) { 1869 pfr_enqueue_addrs(kt, &addrq, NULL, 0); 1870 pfr_remove_kentries(kt, &addrq); 1871 } 1872 if (!(newf & PFR_TFLAG_INACTIVE) && kt->pfrkt_shadow != NULL) { 1873 pfr_destroy_ktable(kt->pfrkt_shadow, 1); 1874 kt->pfrkt_shadow = NULL; 1875 } 1876 kt->pfrkt_flags = newf; 1877 } 1878 1879 static void 1880 pfr_clstats_ktables(struct pfr_ktableworkq *workq, long tzero, int recurse) 1881 { 1882 struct pfr_ktable *p; 1883 1884 SLIST_FOREACH(p, workq, pfrkt_workq) 1885 pfr_clstats_ktable(p, tzero, recurse); 1886 } 1887 1888 static void 1889 pfr_clstats_ktable(struct pfr_ktable *kt, long tzero, int recurse) 1890 { 1891 struct pfr_kentryworkq addrq; 1892 int pfr_dir, pfr_op; 1893 1894 MPASS(PF_TABLE_STATS_OWNED() || PF_RULES_WOWNED()); 1895 1896 if (recurse) { 1897 pfr_enqueue_addrs(kt, &addrq, NULL, 0); 1898 pfr_clstats_kentries(kt, &addrq, tzero, 0); 1899 } 1900 for (pfr_dir = 0; pfr_dir < PFR_DIR_MAX; pfr_dir ++) { 1901 for (pfr_op = 0; pfr_op < PFR_OP_TABLE_MAX; pfr_op ++) { 1902 pfr_kstate_counter_zero(&kt->pfrkt_packets[pfr_dir][pfr_op]); 1903 pfr_kstate_counter_zero(&kt->pfrkt_bytes[pfr_dir][pfr_op]); 1904 } 1905 } 1906 pfr_kstate_counter_zero(&kt->pfrkt_match); 1907 pfr_kstate_counter_zero(&kt->pfrkt_nomatch); 1908 kt->pfrkt_tzero = tzero; 1909 } 1910 1911 static struct pfr_ktable * 1912 pfr_create_ktable(struct pfr_table *tbl, long tzero, int attachruleset) 1913 { 1914 struct pfr_ktable *kt; 1915 struct pf_kruleset *rs; 1916 int pfr_dir, pfr_op; 1917 1918 PF_RULES_WASSERT(); 1919 1920 kt = malloc(sizeof(*kt), M_PFTABLE, M_NOWAIT|M_ZERO); 1921 if (kt == NULL) 1922 return (NULL); 1923 kt->pfrkt_t = *tbl; 1924 1925 if (attachruleset) { 1926 rs = pf_find_or_create_kruleset(tbl->pfrt_anchor); 1927 if (!rs) { 1928 pfr_destroy_ktable(kt, 0); 1929 return (NULL); 1930 } 1931 kt->pfrkt_rs = rs; 1932 rs->tables++; 1933 } 1934 1935 for (pfr_dir = 0; pfr_dir < PFR_DIR_MAX; pfr_dir ++) { 1936 for (pfr_op = 0; pfr_op < PFR_OP_TABLE_MAX; pfr_op ++) { 1937 if (pfr_kstate_counter_init( 1938 &kt->pfrkt_packets[pfr_dir][pfr_op], M_NOWAIT) != 0) { 1939 pfr_destroy_ktable(kt, 0); 1940 return (NULL); 1941 } 1942 if (pfr_kstate_counter_init( 1943 &kt->pfrkt_bytes[pfr_dir][pfr_op], M_NOWAIT) != 0) { 1944 pfr_destroy_ktable(kt, 0); 1945 return (NULL); 1946 } 1947 } 1948 } 1949 if (pfr_kstate_counter_init(&kt->pfrkt_match, M_NOWAIT) != 0) { 1950 pfr_destroy_ktable(kt, 0); 1951 return (NULL); 1952 } 1953 1954 if (pfr_kstate_counter_init(&kt->pfrkt_nomatch, M_NOWAIT) != 0) { 1955 pfr_destroy_ktable(kt, 0); 1956 return (NULL); 1957 } 1958 1959 if (!rn_inithead((void **)&kt->pfrkt_ip4, 1960 offsetof(struct sockaddr_in, sin_addr) * 8) || 1961 !rn_inithead((void **)&kt->pfrkt_ip6, 1962 offsetof(struct sockaddr_in6, sin6_addr) * 8)) { 1963 pfr_destroy_ktable(kt, 0); 1964 return (NULL); 1965 } 1966 kt->pfrkt_tzero = tzero; 1967 1968 return (kt); 1969 } 1970 1971 static void 1972 pfr_destroy_ktables(struct pfr_ktableworkq *workq, int flushaddr) 1973 { 1974 struct pfr_ktable *p, *q; 1975 1976 for (p = SLIST_FIRST(workq); p; p = q) { 1977 q = SLIST_NEXT(p, pfrkt_workq); 1978 pfr_destroy_ktable(p, flushaddr); 1979 } 1980 } 1981 1982 static void 1983 pfr_destroy_ktable(struct pfr_ktable *kt, int flushaddr) 1984 { 1985 struct pfr_kentryworkq addrq; 1986 int pfr_dir, pfr_op; 1987 1988 if (flushaddr) { 1989 pfr_enqueue_addrs(kt, &addrq, NULL, 0); 1990 pfr_clean_node_mask(kt, &addrq); 1991 pfr_destroy_kentries(&addrq); 1992 } 1993 if (kt->pfrkt_ip4 != NULL) 1994 rn_detachhead((void **)&kt->pfrkt_ip4); 1995 if (kt->pfrkt_ip6 != NULL) 1996 rn_detachhead((void **)&kt->pfrkt_ip6); 1997 if (kt->pfrkt_shadow != NULL) 1998 pfr_destroy_ktable(kt->pfrkt_shadow, flushaddr); 1999 if (kt->pfrkt_rs != NULL) { 2000 kt->pfrkt_rs->tables--; 2001 pf_remove_if_empty_kruleset(kt->pfrkt_rs); 2002 } 2003 for (pfr_dir = 0; pfr_dir < PFR_DIR_MAX; pfr_dir ++) { 2004 for (pfr_op = 0; pfr_op < PFR_OP_TABLE_MAX; pfr_op ++) { 2005 pfr_kstate_counter_deinit(&kt->pfrkt_packets[pfr_dir][pfr_op]); 2006 pfr_kstate_counter_deinit(&kt->pfrkt_bytes[pfr_dir][pfr_op]); 2007 } 2008 } 2009 pfr_kstate_counter_deinit(&kt->pfrkt_match); 2010 pfr_kstate_counter_deinit(&kt->pfrkt_nomatch); 2011 2012 free(kt, M_PFTABLE); 2013 } 2014 2015 static int 2016 pfr_ktable_compare(struct pfr_ktable *p, struct pfr_ktable *q) 2017 { 2018 int d; 2019 2020 if ((d = strncmp(p->pfrkt_name, q->pfrkt_name, PF_TABLE_NAME_SIZE))) 2021 return (d); 2022 return (strcmp(p->pfrkt_anchor, q->pfrkt_anchor)); 2023 } 2024 2025 static struct pfr_ktable * 2026 pfr_lookup_table(struct pfr_table *tbl) 2027 { 2028 /* struct pfr_ktable start like a struct pfr_table */ 2029 return (RB_FIND(pfr_ktablehead, &V_pfr_ktables, 2030 (struct pfr_ktable *)tbl)); 2031 } 2032 2033 int 2034 pfr_match_addr(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af) 2035 { 2036 struct pfr_kentry *ke = NULL; 2037 int match; 2038 2039 PF_RULES_RASSERT(); 2040 2041 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL) 2042 kt = kt->pfrkt_root; 2043 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 2044 return (0); 2045 2046 switch (af) { 2047 #ifdef INET 2048 case AF_INET: 2049 { 2050 struct sockaddr_in sin; 2051 2052 bzero(&sin, sizeof(sin)); 2053 sin.sin_len = sizeof(sin); 2054 sin.sin_family = AF_INET; 2055 sin.sin_addr.s_addr = a->addr32[0]; 2056 ke = (struct pfr_kentry *)rn_match(&sin, &kt->pfrkt_ip4->rh); 2057 if (ke && KENTRY_RNF_ROOT(ke)) 2058 ke = NULL; 2059 break; 2060 } 2061 #endif /* INET */ 2062 #ifdef INET6 2063 case AF_INET6: 2064 { 2065 struct sockaddr_in6 sin6; 2066 2067 bzero(&sin6, sizeof(sin6)); 2068 sin6.sin6_len = sizeof(sin6); 2069 sin6.sin6_family = AF_INET6; 2070 bcopy(a, &sin6.sin6_addr, sizeof(sin6.sin6_addr)); 2071 ke = (struct pfr_kentry *)rn_match(&sin6, &kt->pfrkt_ip6->rh); 2072 if (ke && KENTRY_RNF_ROOT(ke)) 2073 ke = NULL; 2074 break; 2075 } 2076 #endif /* INET6 */ 2077 } 2078 match = (ke && !ke->pfrke_not); 2079 if (match) 2080 pfr_kstate_counter_add(&kt->pfrkt_match, 1); 2081 else 2082 pfr_kstate_counter_add(&kt->pfrkt_nomatch, 1); 2083 return (match); 2084 } 2085 2086 void 2087 pfr_update_stats(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af, 2088 u_int64_t len, int dir_out, int op_pass, int notrule) 2089 { 2090 struct pfr_kentry *ke = NULL; 2091 2092 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL) 2093 kt = kt->pfrkt_root; 2094 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 2095 return; 2096 2097 switch (af) { 2098 #ifdef INET 2099 case AF_INET: 2100 { 2101 struct sockaddr_in sin; 2102 2103 bzero(&sin, sizeof(sin)); 2104 sin.sin_len = sizeof(sin); 2105 sin.sin_family = AF_INET; 2106 sin.sin_addr.s_addr = a->addr32[0]; 2107 ke = (struct pfr_kentry *)rn_match(&sin, &kt->pfrkt_ip4->rh); 2108 if (ke && KENTRY_RNF_ROOT(ke)) 2109 ke = NULL; 2110 break; 2111 } 2112 #endif /* INET */ 2113 #ifdef INET6 2114 case AF_INET6: 2115 { 2116 struct sockaddr_in6 sin6; 2117 2118 bzero(&sin6, sizeof(sin6)); 2119 sin6.sin6_len = sizeof(sin6); 2120 sin6.sin6_family = AF_INET6; 2121 bcopy(a, &sin6.sin6_addr, sizeof(sin6.sin6_addr)); 2122 ke = (struct pfr_kentry *)rn_match(&sin6, &kt->pfrkt_ip6->rh); 2123 if (ke && KENTRY_RNF_ROOT(ke)) 2124 ke = NULL; 2125 break; 2126 } 2127 #endif /* INET6 */ 2128 default: 2129 panic("%s: unknown address family %u", __func__, af); 2130 } 2131 if ((ke == NULL || ke->pfrke_not) != notrule) { 2132 if (op_pass != PFR_OP_PASS) 2133 DPFPRINTF(PF_DEBUG_URGENT, 2134 ("pfr_update_stats: assertion failed.\n")); 2135 op_pass = PFR_OP_XPASS; 2136 } 2137 pfr_kstate_counter_add(&kt->pfrkt_packets[dir_out][op_pass], 1); 2138 pfr_kstate_counter_add(&kt->pfrkt_bytes[dir_out][op_pass], len); 2139 if (ke != NULL && op_pass != PFR_OP_XPASS && 2140 (kt->pfrkt_flags & PFR_TFLAG_COUNTERS)) { 2141 counter_u64_add(pfr_kentry_counter(&ke->pfrke_counters, 2142 dir_out, op_pass, PFR_TYPE_PACKETS), 1); 2143 counter_u64_add(pfr_kentry_counter(&ke->pfrke_counters, 2144 dir_out, op_pass, PFR_TYPE_BYTES), len); 2145 } 2146 } 2147 2148 struct pfr_ktable * 2149 pfr_eth_attach_table(struct pf_keth_ruleset *rs, char *name) 2150 { 2151 struct pfr_ktable *kt, *rt; 2152 struct pfr_table tbl; 2153 struct pf_keth_anchor *ac = rs->anchor; 2154 2155 PF_RULES_WASSERT(); 2156 2157 bzero(&tbl, sizeof(tbl)); 2158 strlcpy(tbl.pfrt_name, name, sizeof(tbl.pfrt_name)); 2159 if (ac != NULL) 2160 strlcpy(tbl.pfrt_anchor, ac->path, sizeof(tbl.pfrt_anchor)); 2161 kt = pfr_lookup_table(&tbl); 2162 if (kt == NULL) { 2163 kt = pfr_create_ktable(&tbl, time_second, 1); 2164 if (kt == NULL) 2165 return (NULL); 2166 if (ac != NULL) { 2167 bzero(tbl.pfrt_anchor, sizeof(tbl.pfrt_anchor)); 2168 rt = pfr_lookup_table(&tbl); 2169 if (rt == NULL) { 2170 rt = pfr_create_ktable(&tbl, 0, 1); 2171 if (rt == NULL) { 2172 pfr_destroy_ktable(kt, 0); 2173 return (NULL); 2174 } 2175 pfr_insert_ktable(rt); 2176 } 2177 kt->pfrkt_root = rt; 2178 } 2179 pfr_insert_ktable(kt); 2180 } 2181 if (!kt->pfrkt_refcnt[PFR_REFCNT_RULE]++) 2182 pfr_setflags_ktable(kt, kt->pfrkt_flags|PFR_TFLAG_REFERENCED); 2183 return (kt); 2184 } 2185 2186 struct pfr_ktable * 2187 pfr_attach_table(struct pf_kruleset *rs, char *name) 2188 { 2189 struct pfr_ktable *kt, *rt; 2190 struct pfr_table tbl; 2191 struct pf_kanchor *ac = rs->anchor; 2192 2193 PF_RULES_WASSERT(); 2194 2195 bzero(&tbl, sizeof(tbl)); 2196 strlcpy(tbl.pfrt_name, name, sizeof(tbl.pfrt_name)); 2197 if (ac != NULL) 2198 strlcpy(tbl.pfrt_anchor, ac->path, sizeof(tbl.pfrt_anchor)); 2199 kt = pfr_lookup_table(&tbl); 2200 if (kt == NULL) { 2201 kt = pfr_create_ktable(&tbl, time_second, 1); 2202 if (kt == NULL) 2203 return (NULL); 2204 if (ac != NULL) { 2205 bzero(tbl.pfrt_anchor, sizeof(tbl.pfrt_anchor)); 2206 rt = pfr_lookup_table(&tbl); 2207 if (rt == NULL) { 2208 rt = pfr_create_ktable(&tbl, 0, 1); 2209 if (rt == NULL) { 2210 pfr_destroy_ktable(kt, 0); 2211 return (NULL); 2212 } 2213 pfr_insert_ktable(rt); 2214 } 2215 kt->pfrkt_root = rt; 2216 } 2217 pfr_insert_ktable(kt); 2218 } 2219 if (!kt->pfrkt_refcnt[PFR_REFCNT_RULE]++) 2220 pfr_setflags_ktable(kt, kt->pfrkt_flags|PFR_TFLAG_REFERENCED); 2221 return (kt); 2222 } 2223 2224 void 2225 pfr_detach_table(struct pfr_ktable *kt) 2226 { 2227 2228 PF_RULES_WASSERT(); 2229 KASSERT(kt->pfrkt_refcnt[PFR_REFCNT_RULE] > 0, ("%s: refcount %d\n", 2230 __func__, kt->pfrkt_refcnt[PFR_REFCNT_RULE])); 2231 2232 if (!--kt->pfrkt_refcnt[PFR_REFCNT_RULE]) 2233 pfr_setflags_ktable(kt, kt->pfrkt_flags&~PFR_TFLAG_REFERENCED); 2234 } 2235 2236 int 2237 pfr_pool_get(struct pfr_ktable *kt, int *pidx, struct pf_addr *counter, 2238 sa_family_t af) 2239 { 2240 struct pf_addr *addr, *cur, *mask; 2241 union sockaddr_union uaddr, umask; 2242 struct pfr_kentry *ke, *ke2 = NULL; 2243 int idx = -1, use_counter = 0; 2244 2245 MPASS(pidx != NULL); 2246 MPASS(counter != NULL); 2247 2248 switch (af) { 2249 case AF_INET: 2250 uaddr.sin.sin_len = sizeof(struct sockaddr_in); 2251 uaddr.sin.sin_family = AF_INET; 2252 break; 2253 case AF_INET6: 2254 uaddr.sin6.sin6_len = sizeof(struct sockaddr_in6); 2255 uaddr.sin6.sin6_family = AF_INET6; 2256 break; 2257 } 2258 addr = SUNION2PF(&uaddr, af); 2259 2260 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL) 2261 kt = kt->pfrkt_root; 2262 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 2263 return (-1); 2264 2265 idx = *pidx; 2266 if (idx >= 0) 2267 use_counter = 1; 2268 if (idx < 0) 2269 idx = 0; 2270 2271 _next_block: 2272 ke = pfr_kentry_byidx(kt, idx, af); 2273 if (ke == NULL) { 2274 pfr_kstate_counter_add(&kt->pfrkt_nomatch, 1); 2275 return (1); 2276 } 2277 pfr_prepare_network(&umask, af, ke->pfrke_net); 2278 cur = SUNION2PF(&ke->pfrke_sa, af); 2279 mask = SUNION2PF(&umask, af); 2280 2281 if (use_counter) { 2282 /* is supplied address within block? */ 2283 if (!PF_MATCHA(0, cur, mask, counter, af)) { 2284 /* no, go to next block in table */ 2285 idx++; 2286 use_counter = 0; 2287 goto _next_block; 2288 } 2289 PF_ACPY(addr, counter, af); 2290 } else { 2291 /* use first address of block */ 2292 PF_ACPY(addr, cur, af); 2293 } 2294 2295 if (!KENTRY_NETWORK(ke)) { 2296 /* this is a single IP address - no possible nested block */ 2297 PF_ACPY(counter, addr, af); 2298 *pidx = idx; 2299 pfr_kstate_counter_add(&kt->pfrkt_match, 1); 2300 return (0); 2301 } 2302 for (;;) { 2303 /* we don't want to use a nested block */ 2304 switch (af) { 2305 case AF_INET: 2306 ke2 = (struct pfr_kentry *)rn_match(&uaddr, 2307 &kt->pfrkt_ip4->rh); 2308 break; 2309 case AF_INET6: 2310 ke2 = (struct pfr_kentry *)rn_match(&uaddr, 2311 &kt->pfrkt_ip6->rh); 2312 break; 2313 } 2314 /* no need to check KENTRY_RNF_ROOT() here */ 2315 if (ke2 == ke) { 2316 /* lookup return the same block - perfect */ 2317 PF_ACPY(counter, addr, af); 2318 *pidx = idx; 2319 pfr_kstate_counter_add(&kt->pfrkt_match, 1); 2320 return (0); 2321 } 2322 2323 /* we need to increase the counter past the nested block */ 2324 pfr_prepare_network(&umask, AF_INET, ke2->pfrke_net); 2325 PF_POOLMASK(addr, addr, SUNION2PF(&umask, af), &pfr_ffaddr, af); 2326 PF_AINC(addr, af); 2327 if (!PF_MATCHA(0, cur, mask, addr, af)) { 2328 /* ok, we reached the end of our main block */ 2329 /* go to next block in table */ 2330 idx++; 2331 use_counter = 0; 2332 goto _next_block; 2333 } 2334 } 2335 } 2336 2337 static struct pfr_kentry * 2338 pfr_kentry_byidx(struct pfr_ktable *kt, int idx, int af) 2339 { 2340 struct pfr_walktree w; 2341 2342 bzero(&w, sizeof(w)); 2343 w.pfrw_op = PFRW_POOL_GET; 2344 w.pfrw_free = idx; 2345 2346 switch (af) { 2347 #ifdef INET 2348 case AF_INET: 2349 kt->pfrkt_ip4->rnh_walktree(&kt->pfrkt_ip4->rh, pfr_walktree, &w); 2350 return (w.pfrw_kentry); 2351 #endif /* INET */ 2352 #ifdef INET6 2353 case AF_INET6: 2354 kt->pfrkt_ip6->rnh_walktree(&kt->pfrkt_ip6->rh, pfr_walktree, &w); 2355 return (w.pfrw_kentry); 2356 #endif /* INET6 */ 2357 default: 2358 return (NULL); 2359 } 2360 } 2361 2362 void 2363 pfr_dynaddr_update(struct pfr_ktable *kt, struct pfi_dynaddr *dyn) 2364 { 2365 struct pfr_walktree w; 2366 2367 bzero(&w, sizeof(w)); 2368 w.pfrw_op = PFRW_DYNADDR_UPDATE; 2369 w.pfrw_dyn = dyn; 2370 2371 dyn->pfid_acnt4 = 0; 2372 dyn->pfid_acnt6 = 0; 2373 if (!dyn->pfid_af || dyn->pfid_af == AF_INET) 2374 kt->pfrkt_ip4->rnh_walktree(&kt->pfrkt_ip4->rh, pfr_walktree, &w); 2375 if (!dyn->pfid_af || dyn->pfid_af == AF_INET6) 2376 kt->pfrkt_ip6->rnh_walktree(&kt->pfrkt_ip6->rh, pfr_walktree, &w); 2377 } 2378