1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2002 Cedric Berger 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * - Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * - Redistributions in binary form must reproduce the above 14 * copyright notice, this list of conditions and the following 15 * disclaimer in the documentation and/or other materials provided 16 * with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 21 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 22 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 24 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 25 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 26 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN 28 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 * 31 * $OpenBSD: pf_table.c,v 1.79 2008/10/08 06:24:50 mcbride Exp $ 32 */ 33 34 #include <sys/cdefs.h> 35 #include "opt_inet.h" 36 #include "opt_inet6.h" 37 38 #include <sys/param.h> 39 #include <sys/kernel.h> 40 #include <sys/lock.h> 41 #include <sys/malloc.h> 42 #include <sys/mbuf.h> 43 #include <sys/mutex.h> 44 #include <sys/refcount.h> 45 #include <sys/socket.h> 46 #include <vm/uma.h> 47 48 #include <net/if.h> 49 #include <net/vnet.h> 50 #include <net/pfvar.h> 51 52 #define DPFPRINTF(n, x) if (V_pf_status.debug >= (n)) printf x 53 54 #define ACCEPT_FLAGS(flags, oklist) \ 55 do { \ 56 if ((flags & ~(oklist)) & \ 57 PFR_FLAG_ALLMASK) \ 58 return (EINVAL); \ 59 } while (0) 60 61 #define FILLIN_SIN(sin, addr) \ 62 do { \ 63 (sin).sin_len = sizeof(sin); \ 64 (sin).sin_family = AF_INET; \ 65 (sin).sin_addr = (addr); \ 66 } while (0) 67 68 #define FILLIN_SIN6(sin6, addr) \ 69 do { \ 70 (sin6).sin6_len = sizeof(sin6); \ 71 (sin6).sin6_family = AF_INET6; \ 72 (sin6).sin6_addr = (addr); \ 73 } while (0) 74 75 #define SWAP(type, a1, a2) \ 76 do { \ 77 type tmp = a1; \ 78 a1 = a2; \ 79 a2 = tmp; \ 80 } while (0) 81 82 #define SUNION2PF(su, af) (((af)==AF_INET) ? \ 83 (struct pf_addr *)&(su)->sin.sin_addr : \ 84 (struct pf_addr *)&(su)->sin6.sin6_addr) 85 86 #define AF_BITS(af) (((af)==AF_INET)?32:128) 87 #define ADDR_NETWORK(ad) ((ad)->pfra_net < AF_BITS((ad)->pfra_af)) 88 #define KENTRY_NETWORK(ke) ((ke)->pfrke_net < AF_BITS((ke)->pfrke_af)) 89 #define KENTRY_RNF_ROOT(ke) \ 90 ((((struct radix_node *)(ke))->rn_flags & RNF_ROOT) != 0) 91 92 #define NO_ADDRESSES (-1) 93 #define ENQUEUE_UNMARKED_ONLY (1) 94 #define INVERT_NEG_FLAG (1) 95 96 struct pfr_walktree { 97 enum pfrw_op { 98 PFRW_MARK, 99 PFRW_SWEEP, 100 PFRW_ENQUEUE, 101 PFRW_GET_ADDRS, 102 PFRW_GET_ASTATS, 103 PFRW_POOL_GET, 104 PFRW_DYNADDR_UPDATE, 105 PFRW_COUNTERS 106 } pfrw_op; 107 union { 108 struct pfr_addr *pfrw_addr; 109 struct pfr_astats *pfrw_astats; 110 struct pfr_kentryworkq *pfrw_workq; 111 struct pfr_kentry *pfrw_kentry; 112 struct pfi_dynaddr *pfrw_dyn; 113 }; 114 int pfrw_free; 115 int pfrw_flags; 116 }; 117 118 #define senderr(e) do { rv = (e); goto _bad; } while (0) 119 120 static MALLOC_DEFINE(M_PFTABLE, "pf_table", "pf(4) tables structures"); 121 VNET_DEFINE_STATIC(uma_zone_t, pfr_kentry_z); 122 #define V_pfr_kentry_z VNET(pfr_kentry_z) 123 VNET_DEFINE_STATIC(uma_zone_t, pfr_kentry_counter_z); 124 #define V_pfr_kentry_counter_z VNET(pfr_kentry_counter_z) 125 126 static struct pf_addr pfr_ffaddr = { 127 .addr32 = { 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff } 128 }; 129 130 static void pfr_copyout_astats(struct pfr_astats *, 131 const struct pfr_kentry *, 132 const struct pfr_walktree *); 133 static void pfr_copyout_addr(struct pfr_addr *, 134 const struct pfr_kentry *ke); 135 static int pfr_validate_addr(struct pfr_addr *); 136 static void pfr_enqueue_addrs(struct pfr_ktable *, 137 struct pfr_kentryworkq *, int *, int); 138 static void pfr_mark_addrs(struct pfr_ktable *); 139 static struct pfr_kentry 140 *pfr_lookup_addr(struct pfr_ktable *, 141 struct pfr_addr *, int); 142 static struct pfr_kentry *pfr_create_kentry(struct pfr_addr *, bool); 143 static void pfr_destroy_kentries(struct pfr_kentryworkq *); 144 static void pfr_destroy_kentry(struct pfr_kentry *); 145 static void pfr_insert_kentries(struct pfr_ktable *, 146 struct pfr_kentryworkq *, long); 147 static void pfr_remove_kentries(struct pfr_ktable *, 148 struct pfr_kentryworkq *); 149 static void pfr_clstats_kentries(struct pfr_ktable *, 150 struct pfr_kentryworkq *, long, int); 151 static void pfr_reset_feedback(struct pfr_addr *, int); 152 static void pfr_prepare_network(union sockaddr_union *, int, int); 153 static int pfr_route_kentry(struct pfr_ktable *, 154 struct pfr_kentry *); 155 static int pfr_unroute_kentry(struct pfr_ktable *, 156 struct pfr_kentry *); 157 static int pfr_walktree(struct radix_node *, void *); 158 static int pfr_validate_table(struct pfr_table *, int, int); 159 static int pfr_fix_anchor(char *); 160 static void pfr_commit_ktable(struct pfr_ktable *, long); 161 static void pfr_insert_ktables(struct pfr_ktableworkq *); 162 static void pfr_insert_ktable(struct pfr_ktable *); 163 static void pfr_setflags_ktables(struct pfr_ktableworkq *); 164 static void pfr_setflags_ktable(struct pfr_ktable *, int); 165 static void pfr_clstats_ktables(struct pfr_ktableworkq *, long, 166 int); 167 static void pfr_clstats_ktable(struct pfr_ktable *, long, int); 168 static struct pfr_ktable 169 *pfr_create_ktable(struct pfr_table *, long, int); 170 static void pfr_destroy_ktables(struct pfr_ktableworkq *, int); 171 static void pfr_destroy_ktable(struct pfr_ktable *, int); 172 static int pfr_ktable_compare(struct pfr_ktable *, 173 struct pfr_ktable *); 174 static struct pfr_ktable 175 *pfr_lookup_table(struct pfr_table *); 176 static void pfr_clean_node_mask(struct pfr_ktable *, 177 struct pfr_kentryworkq *); 178 static int pfr_skip_table(struct pfr_table *, 179 struct pfr_ktable *, int); 180 static struct pfr_kentry 181 *pfr_kentry_byidx(struct pfr_ktable *, int, int); 182 183 static RB_PROTOTYPE(pfr_ktablehead, pfr_ktable, pfrkt_tree, pfr_ktable_compare); 184 static RB_GENERATE(pfr_ktablehead, pfr_ktable, pfrkt_tree, pfr_ktable_compare); 185 186 VNET_DEFINE_STATIC(struct pfr_ktablehead, pfr_ktables); 187 #define V_pfr_ktables VNET(pfr_ktables) 188 189 VNET_DEFINE_STATIC(struct pfr_table, pfr_nulltable); 190 #define V_pfr_nulltable VNET(pfr_nulltable) 191 192 VNET_DEFINE_STATIC(int, pfr_ktable_cnt); 193 #define V_pfr_ktable_cnt VNET(pfr_ktable_cnt) 194 195 void 196 pfr_initialize(void) 197 { 198 199 V_pfr_kentry_counter_z = uma_zcreate("pf table entry counters", 200 PFR_NUM_COUNTERS * sizeof(uint64_t), NULL, NULL, NULL, NULL, 201 UMA_ALIGN_PTR, UMA_ZONE_PCPU); 202 V_pfr_kentry_z = uma_zcreate("pf table entries", 203 sizeof(struct pfr_kentry), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 204 0); 205 uma_zone_set_max(V_pfr_kentry_z, PFR_KENTRY_HIWAT); 206 V_pf_limits[PF_LIMIT_TABLE_ENTRIES].zone = V_pfr_kentry_z; 207 V_pf_limits[PF_LIMIT_TABLE_ENTRIES].limit = PFR_KENTRY_HIWAT; 208 } 209 210 void 211 pfr_cleanup(void) 212 { 213 214 uma_zdestroy(V_pfr_kentry_z); 215 uma_zdestroy(V_pfr_kentry_counter_z); 216 } 217 218 int 219 pfr_clr_addrs(struct pfr_table *tbl, int *ndel, int flags) 220 { 221 struct pfr_ktable *kt; 222 struct pfr_kentryworkq workq; 223 224 PF_RULES_WASSERT(); 225 226 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY); 227 if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL)) 228 return (EINVAL); 229 kt = pfr_lookup_table(tbl); 230 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 231 return (ESRCH); 232 if (kt->pfrkt_flags & PFR_TFLAG_CONST) 233 return (EPERM); 234 pfr_enqueue_addrs(kt, &workq, ndel, 0); 235 236 if (!(flags & PFR_FLAG_DUMMY)) { 237 pfr_remove_kentries(kt, &workq); 238 KASSERT(kt->pfrkt_cnt == 0, ("%s: non-null pfrkt_cnt", __func__)); 239 } 240 return (0); 241 } 242 243 int 244 pfr_add_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size, 245 int *nadd, int flags) 246 { 247 struct pfr_ktable *kt, *tmpkt; 248 struct pfr_kentryworkq workq; 249 struct pfr_kentry *p, *q; 250 struct pfr_addr *ad; 251 int i, rv, xadd = 0; 252 long tzero = time_second; 253 254 PF_RULES_WASSERT(); 255 256 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_FEEDBACK); 257 if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL)) 258 return (EINVAL); 259 kt = pfr_lookup_table(tbl); 260 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 261 return (ESRCH); 262 if (kt->pfrkt_flags & PFR_TFLAG_CONST) 263 return (EPERM); 264 tmpkt = pfr_create_ktable(&V_pfr_nulltable, 0, 0); 265 if (tmpkt == NULL) 266 return (ENOMEM); 267 SLIST_INIT(&workq); 268 for (i = 0, ad = addr; i < size; i++, ad++) { 269 if (pfr_validate_addr(ad)) 270 senderr(EINVAL); 271 p = pfr_lookup_addr(kt, ad, 1); 272 q = pfr_lookup_addr(tmpkt, ad, 1); 273 if (flags & PFR_FLAG_FEEDBACK) { 274 if (q != NULL) 275 ad->pfra_fback = PFR_FB_DUPLICATE; 276 else if (p == NULL) 277 ad->pfra_fback = PFR_FB_ADDED; 278 else if (p->pfrke_not != ad->pfra_not) 279 ad->pfra_fback = PFR_FB_CONFLICT; 280 else 281 ad->pfra_fback = PFR_FB_NONE; 282 } 283 if (p == NULL && q == NULL) { 284 p = pfr_create_kentry(ad, 285 (kt->pfrkt_flags & PFR_TFLAG_COUNTERS) != 0); 286 if (p == NULL) 287 senderr(ENOMEM); 288 if (pfr_route_kentry(tmpkt, p)) { 289 pfr_destroy_kentry(p); 290 ad->pfra_fback = PFR_FB_NONE; 291 } else { 292 SLIST_INSERT_HEAD(&workq, p, pfrke_workq); 293 xadd++; 294 } 295 } 296 } 297 pfr_clean_node_mask(tmpkt, &workq); 298 if (!(flags & PFR_FLAG_DUMMY)) 299 pfr_insert_kentries(kt, &workq, tzero); 300 else 301 pfr_destroy_kentries(&workq); 302 if (nadd != NULL) 303 *nadd = xadd; 304 pfr_destroy_ktable(tmpkt, 0); 305 return (0); 306 _bad: 307 pfr_clean_node_mask(tmpkt, &workq); 308 pfr_destroy_kentries(&workq); 309 if (flags & PFR_FLAG_FEEDBACK) 310 pfr_reset_feedback(addr, size); 311 pfr_destroy_ktable(tmpkt, 0); 312 return (rv); 313 } 314 315 int 316 pfr_del_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size, 317 int *ndel, int flags) 318 { 319 struct pfr_ktable *kt; 320 struct pfr_kentryworkq workq; 321 struct pfr_kentry *p; 322 struct pfr_addr *ad; 323 int i, rv, xdel = 0, log = 1; 324 325 PF_RULES_WASSERT(); 326 327 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_FEEDBACK); 328 if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL)) 329 return (EINVAL); 330 kt = pfr_lookup_table(tbl); 331 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 332 return (ESRCH); 333 if (kt->pfrkt_flags & PFR_TFLAG_CONST) 334 return (EPERM); 335 /* 336 * there are two algorithms to choose from here. 337 * with: 338 * n: number of addresses to delete 339 * N: number of addresses in the table 340 * 341 * one is O(N) and is better for large 'n' 342 * one is O(n*LOG(N)) and is better for small 'n' 343 * 344 * following code try to decide which one is best. 345 */ 346 for (i = kt->pfrkt_cnt; i > 0; i >>= 1) 347 log++; 348 if (size > kt->pfrkt_cnt/log) { 349 /* full table scan */ 350 pfr_mark_addrs(kt); 351 } else { 352 /* iterate over addresses to delete */ 353 for (i = 0, ad = addr; i < size; i++, ad++) { 354 if (pfr_validate_addr(ad)) 355 return (EINVAL); 356 p = pfr_lookup_addr(kt, ad, 1); 357 if (p != NULL) 358 p->pfrke_mark = 0; 359 } 360 } 361 SLIST_INIT(&workq); 362 for (i = 0, ad = addr; i < size; i++, ad++) { 363 if (pfr_validate_addr(ad)) 364 senderr(EINVAL); 365 p = pfr_lookup_addr(kt, ad, 1); 366 if (flags & PFR_FLAG_FEEDBACK) { 367 if (p == NULL) 368 ad->pfra_fback = PFR_FB_NONE; 369 else if (p->pfrke_not != ad->pfra_not) 370 ad->pfra_fback = PFR_FB_CONFLICT; 371 else if (p->pfrke_mark) 372 ad->pfra_fback = PFR_FB_DUPLICATE; 373 else 374 ad->pfra_fback = PFR_FB_DELETED; 375 } 376 if (p != NULL && p->pfrke_not == ad->pfra_not && 377 !p->pfrke_mark) { 378 p->pfrke_mark = 1; 379 SLIST_INSERT_HEAD(&workq, p, pfrke_workq); 380 xdel++; 381 } 382 } 383 if (!(flags & PFR_FLAG_DUMMY)) 384 pfr_remove_kentries(kt, &workq); 385 if (ndel != NULL) 386 *ndel = xdel; 387 return (0); 388 _bad: 389 if (flags & PFR_FLAG_FEEDBACK) 390 pfr_reset_feedback(addr, size); 391 return (rv); 392 } 393 394 int 395 pfr_set_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size, 396 int *size2, int *nadd, int *ndel, int *nchange, int flags, 397 u_int32_t ignore_pfrt_flags) 398 { 399 struct pfr_ktable *kt, *tmpkt; 400 struct pfr_kentryworkq addq, delq, changeq; 401 struct pfr_kentry *p, *q; 402 struct pfr_addr ad; 403 int i, rv, xadd = 0, xdel = 0, xchange = 0; 404 long tzero = time_second; 405 406 PF_RULES_WASSERT(); 407 408 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_FEEDBACK); 409 if (pfr_validate_table(tbl, ignore_pfrt_flags, flags & 410 PFR_FLAG_USERIOCTL)) 411 return (EINVAL); 412 kt = pfr_lookup_table(tbl); 413 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 414 return (ESRCH); 415 if (kt->pfrkt_flags & PFR_TFLAG_CONST) 416 return (EPERM); 417 tmpkt = pfr_create_ktable(&V_pfr_nulltable, 0, 0); 418 if (tmpkt == NULL) 419 return (ENOMEM); 420 pfr_mark_addrs(kt); 421 SLIST_INIT(&addq); 422 SLIST_INIT(&delq); 423 SLIST_INIT(&changeq); 424 for (i = 0; i < size; i++) { 425 /* 426 * XXXGL: undertand pf_if usage of this function 427 * and make ad a moving pointer 428 */ 429 bcopy(addr + i, &ad, sizeof(ad)); 430 if (pfr_validate_addr(&ad)) 431 senderr(EINVAL); 432 ad.pfra_fback = PFR_FB_NONE; 433 p = pfr_lookup_addr(kt, &ad, 1); 434 if (p != NULL) { 435 if (p->pfrke_mark) { 436 ad.pfra_fback = PFR_FB_DUPLICATE; 437 goto _skip; 438 } 439 p->pfrke_mark = 1; 440 if (p->pfrke_not != ad.pfra_not) { 441 SLIST_INSERT_HEAD(&changeq, p, pfrke_workq); 442 ad.pfra_fback = PFR_FB_CHANGED; 443 xchange++; 444 } 445 } else { 446 q = pfr_lookup_addr(tmpkt, &ad, 1); 447 if (q != NULL) { 448 ad.pfra_fback = PFR_FB_DUPLICATE; 449 goto _skip; 450 } 451 p = pfr_create_kentry(&ad, 452 (kt->pfrkt_flags & PFR_TFLAG_COUNTERS) != 0); 453 if (p == NULL) 454 senderr(ENOMEM); 455 if (pfr_route_kentry(tmpkt, p)) { 456 pfr_destroy_kentry(p); 457 ad.pfra_fback = PFR_FB_NONE; 458 } else { 459 SLIST_INSERT_HEAD(&addq, p, pfrke_workq); 460 ad.pfra_fback = PFR_FB_ADDED; 461 xadd++; 462 } 463 } 464 _skip: 465 if (flags & PFR_FLAG_FEEDBACK) 466 bcopy(&ad, addr + i, sizeof(ad)); 467 } 468 pfr_enqueue_addrs(kt, &delq, &xdel, ENQUEUE_UNMARKED_ONLY); 469 if ((flags & PFR_FLAG_FEEDBACK) && *size2) { 470 if (*size2 < size+xdel) { 471 *size2 = size+xdel; 472 senderr(0); 473 } 474 i = 0; 475 SLIST_FOREACH(p, &delq, pfrke_workq) { 476 pfr_copyout_addr(&ad, p); 477 ad.pfra_fback = PFR_FB_DELETED; 478 bcopy(&ad, addr + size + i, sizeof(ad)); 479 i++; 480 } 481 } 482 pfr_clean_node_mask(tmpkt, &addq); 483 if (!(flags & PFR_FLAG_DUMMY)) { 484 pfr_insert_kentries(kt, &addq, tzero); 485 pfr_remove_kentries(kt, &delq); 486 pfr_clstats_kentries(kt, &changeq, tzero, INVERT_NEG_FLAG); 487 } else 488 pfr_destroy_kentries(&addq); 489 if (nadd != NULL) 490 *nadd = xadd; 491 if (ndel != NULL) 492 *ndel = xdel; 493 if (nchange != NULL) 494 *nchange = xchange; 495 if ((flags & PFR_FLAG_FEEDBACK) && size2) 496 *size2 = size+xdel; 497 pfr_destroy_ktable(tmpkt, 0); 498 return (0); 499 _bad: 500 pfr_clean_node_mask(tmpkt, &addq); 501 pfr_destroy_kentries(&addq); 502 if (flags & PFR_FLAG_FEEDBACK) 503 pfr_reset_feedback(addr, size); 504 pfr_destroy_ktable(tmpkt, 0); 505 return (rv); 506 } 507 508 int 509 pfr_tst_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size, 510 int *nmatch, int flags) 511 { 512 struct pfr_ktable *kt; 513 struct pfr_kentry *p; 514 struct pfr_addr *ad; 515 int i, xmatch = 0; 516 517 PF_RULES_RASSERT(); 518 519 ACCEPT_FLAGS(flags, PFR_FLAG_REPLACE); 520 if (pfr_validate_table(tbl, 0, 0)) 521 return (EINVAL); 522 kt = pfr_lookup_table(tbl); 523 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 524 return (ESRCH); 525 526 for (i = 0, ad = addr; i < size; i++, ad++) { 527 if (pfr_validate_addr(ad)) 528 return (EINVAL); 529 if (ADDR_NETWORK(ad)) 530 return (EINVAL); 531 p = pfr_lookup_addr(kt, ad, 0); 532 if (flags & PFR_FLAG_REPLACE) 533 pfr_copyout_addr(ad, p); 534 ad->pfra_fback = (p == NULL) ? PFR_FB_NONE : 535 (p->pfrke_not ? PFR_FB_NOTMATCH : PFR_FB_MATCH); 536 if (p != NULL && !p->pfrke_not) 537 xmatch++; 538 } 539 if (nmatch != NULL) 540 *nmatch = xmatch; 541 return (0); 542 } 543 544 int 545 pfr_get_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int *size, 546 int flags) 547 { 548 struct pfr_ktable *kt; 549 struct pfr_walktree w; 550 int rv; 551 552 PF_RULES_RASSERT(); 553 554 ACCEPT_FLAGS(flags, 0); 555 if (pfr_validate_table(tbl, 0, 0)) 556 return (EINVAL); 557 kt = pfr_lookup_table(tbl); 558 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 559 return (ESRCH); 560 if (kt->pfrkt_cnt > *size) { 561 *size = kt->pfrkt_cnt; 562 return (0); 563 } 564 565 bzero(&w, sizeof(w)); 566 w.pfrw_op = PFRW_GET_ADDRS; 567 w.pfrw_addr = addr; 568 w.pfrw_free = kt->pfrkt_cnt; 569 rv = kt->pfrkt_ip4->rnh_walktree(&kt->pfrkt_ip4->rh, pfr_walktree, &w); 570 if (!rv) 571 rv = kt->pfrkt_ip6->rnh_walktree(&kt->pfrkt_ip6->rh, 572 pfr_walktree, &w); 573 if (rv) 574 return (rv); 575 576 KASSERT(w.pfrw_free == 0, ("%s: corruption detected (%d)", __func__, 577 w.pfrw_free)); 578 579 *size = kt->pfrkt_cnt; 580 return (0); 581 } 582 583 int 584 pfr_get_astats(struct pfr_table *tbl, struct pfr_astats *addr, int *size, 585 int flags) 586 { 587 struct pfr_ktable *kt; 588 struct pfr_walktree w; 589 struct pfr_kentryworkq workq; 590 int rv; 591 long tzero = time_second; 592 593 PF_RULES_RASSERT(); 594 595 /* XXX PFR_FLAG_CLSTATS disabled */ 596 ACCEPT_FLAGS(flags, 0); 597 if (pfr_validate_table(tbl, 0, 0)) 598 return (EINVAL); 599 kt = pfr_lookup_table(tbl); 600 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 601 return (ESRCH); 602 if (kt->pfrkt_cnt > *size) { 603 *size = kt->pfrkt_cnt; 604 return (0); 605 } 606 607 bzero(&w, sizeof(w)); 608 w.pfrw_op = PFRW_GET_ASTATS; 609 w.pfrw_astats = addr; 610 w.pfrw_free = kt->pfrkt_cnt; 611 /* 612 * Flags below are for backward compatibility. It was possible to have 613 * a table without per-entry counters. Now they are always allocated, 614 * we just discard data when reading it if table is not configured to 615 * have counters. 616 */ 617 w.pfrw_flags = kt->pfrkt_flags; 618 rv = kt->pfrkt_ip4->rnh_walktree(&kt->pfrkt_ip4->rh, pfr_walktree, &w); 619 if (!rv) 620 rv = kt->pfrkt_ip6->rnh_walktree(&kt->pfrkt_ip6->rh, 621 pfr_walktree, &w); 622 if (!rv && (flags & PFR_FLAG_CLSTATS)) { 623 pfr_enqueue_addrs(kt, &workq, NULL, 0); 624 pfr_clstats_kentries(kt, &workq, tzero, 0); 625 } 626 if (rv) 627 return (rv); 628 629 if (w.pfrw_free) { 630 printf("pfr_get_astats: corruption detected (%d).\n", 631 w.pfrw_free); 632 return (ENOTTY); 633 } 634 *size = kt->pfrkt_cnt; 635 return (0); 636 } 637 638 int 639 pfr_clr_astats(struct pfr_table *tbl, struct pfr_addr *addr, int size, 640 int *nzero, int flags) 641 { 642 struct pfr_ktable *kt; 643 struct pfr_kentryworkq workq; 644 struct pfr_kentry *p; 645 struct pfr_addr *ad; 646 int i, rv, xzero = 0; 647 648 PF_RULES_WASSERT(); 649 650 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_FEEDBACK); 651 if (pfr_validate_table(tbl, 0, 0)) 652 return (EINVAL); 653 kt = pfr_lookup_table(tbl); 654 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 655 return (ESRCH); 656 SLIST_INIT(&workq); 657 for (i = 0, ad = addr; i < size; i++, ad++) { 658 if (pfr_validate_addr(ad)) 659 senderr(EINVAL); 660 p = pfr_lookup_addr(kt, ad, 1); 661 if (flags & PFR_FLAG_FEEDBACK) { 662 ad->pfra_fback = (p != NULL) ? 663 PFR_FB_CLEARED : PFR_FB_NONE; 664 } 665 if (p != NULL) { 666 SLIST_INSERT_HEAD(&workq, p, pfrke_workq); 667 xzero++; 668 } 669 } 670 671 if (!(flags & PFR_FLAG_DUMMY)) 672 pfr_clstats_kentries(kt, &workq, 0, 0); 673 if (nzero != NULL) 674 *nzero = xzero; 675 return (0); 676 _bad: 677 if (flags & PFR_FLAG_FEEDBACK) 678 pfr_reset_feedback(addr, size); 679 return (rv); 680 } 681 682 static int 683 pfr_validate_addr(struct pfr_addr *ad) 684 { 685 int i; 686 687 switch (ad->pfra_af) { 688 #ifdef INET 689 case AF_INET: 690 if (ad->pfra_net > 32) 691 return (-1); 692 break; 693 #endif /* INET */ 694 #ifdef INET6 695 case AF_INET6: 696 if (ad->pfra_net > 128) 697 return (-1); 698 break; 699 #endif /* INET6 */ 700 default: 701 return (-1); 702 } 703 if (ad->pfra_net < 128 && 704 (((caddr_t)ad)[ad->pfra_net/8] & (0xFF >> (ad->pfra_net%8)))) 705 return (-1); 706 for (i = (ad->pfra_net+7)/8; i < sizeof(ad->pfra_u); i++) 707 if (((caddr_t)ad)[i]) 708 return (-1); 709 if (ad->pfra_not && ad->pfra_not != 1) 710 return (-1); 711 if (ad->pfra_fback) 712 return (-1); 713 return (0); 714 } 715 716 static void 717 pfr_enqueue_addrs(struct pfr_ktable *kt, struct pfr_kentryworkq *workq, 718 int *naddr, int sweep) 719 { 720 struct pfr_walktree w; 721 722 SLIST_INIT(workq); 723 bzero(&w, sizeof(w)); 724 w.pfrw_op = sweep ? PFRW_SWEEP : PFRW_ENQUEUE; 725 w.pfrw_workq = workq; 726 if (kt->pfrkt_ip4 != NULL) 727 if (kt->pfrkt_ip4->rnh_walktree(&kt->pfrkt_ip4->rh, 728 pfr_walktree, &w)) 729 printf("pfr_enqueue_addrs: IPv4 walktree failed.\n"); 730 if (kt->pfrkt_ip6 != NULL) 731 if (kt->pfrkt_ip6->rnh_walktree(&kt->pfrkt_ip6->rh, 732 pfr_walktree, &w)) 733 printf("pfr_enqueue_addrs: IPv6 walktree failed.\n"); 734 if (naddr != NULL) 735 *naddr = w.pfrw_free; 736 } 737 738 static void 739 pfr_mark_addrs(struct pfr_ktable *kt) 740 { 741 struct pfr_walktree w; 742 743 bzero(&w, sizeof(w)); 744 w.pfrw_op = PFRW_MARK; 745 if (kt->pfrkt_ip4->rnh_walktree(&kt->pfrkt_ip4->rh, pfr_walktree, &w)) 746 printf("pfr_mark_addrs: IPv4 walktree failed.\n"); 747 if (kt->pfrkt_ip6->rnh_walktree(&kt->pfrkt_ip6->rh, pfr_walktree, &w)) 748 printf("pfr_mark_addrs: IPv6 walktree failed.\n"); 749 } 750 751 static struct pfr_kentry * 752 pfr_lookup_addr(struct pfr_ktable *kt, struct pfr_addr *ad, int exact) 753 { 754 union sockaddr_union sa, mask; 755 struct radix_head *head = NULL; 756 struct pfr_kentry *ke; 757 758 PF_RULES_ASSERT(); 759 760 bzero(&sa, sizeof(sa)); 761 if (ad->pfra_af == AF_INET) { 762 FILLIN_SIN(sa.sin, ad->pfra_ip4addr); 763 head = &kt->pfrkt_ip4->rh; 764 } else if ( ad->pfra_af == AF_INET6 ) { 765 FILLIN_SIN6(sa.sin6, ad->pfra_ip6addr); 766 head = &kt->pfrkt_ip6->rh; 767 } 768 if (ADDR_NETWORK(ad)) { 769 pfr_prepare_network(&mask, ad->pfra_af, ad->pfra_net); 770 ke = (struct pfr_kentry *)rn_lookup(&sa, &mask, head); 771 if (ke && KENTRY_RNF_ROOT(ke)) 772 ke = NULL; 773 } else { 774 ke = (struct pfr_kentry *)rn_match(&sa, head); 775 if (ke && KENTRY_RNF_ROOT(ke)) 776 ke = NULL; 777 if (exact && ke && KENTRY_NETWORK(ke)) 778 ke = NULL; 779 } 780 return (ke); 781 } 782 783 static struct pfr_kentry * 784 pfr_create_kentry(struct pfr_addr *ad, bool counters) 785 { 786 struct pfr_kentry *ke; 787 counter_u64_t c; 788 789 ke = uma_zalloc(V_pfr_kentry_z, M_NOWAIT | M_ZERO); 790 if (ke == NULL) 791 return (NULL); 792 793 if (ad->pfra_af == AF_INET) 794 FILLIN_SIN(ke->pfrke_sa.sin, ad->pfra_ip4addr); 795 else if (ad->pfra_af == AF_INET6) 796 FILLIN_SIN6(ke->pfrke_sa.sin6, ad->pfra_ip6addr); 797 ke->pfrke_af = ad->pfra_af; 798 ke->pfrke_net = ad->pfra_net; 799 ke->pfrke_not = ad->pfra_not; 800 ke->pfrke_counters.pfrkc_tzero = 0; 801 if (counters) { 802 c = uma_zalloc_pcpu(V_pfr_kentry_counter_z, M_NOWAIT | M_ZERO); 803 if (c == NULL) { 804 pfr_destroy_kentry(ke); 805 return (NULL); 806 } 807 ke->pfrke_counters.pfrkc_counters = c; 808 } 809 return (ke); 810 } 811 812 static void 813 pfr_destroy_kentries(struct pfr_kentryworkq *workq) 814 { 815 struct pfr_kentry *p, *q; 816 817 for (p = SLIST_FIRST(workq); p != NULL; p = q) { 818 q = SLIST_NEXT(p, pfrke_workq); 819 pfr_destroy_kentry(p); 820 } 821 } 822 823 static void 824 pfr_destroy_kentry(struct pfr_kentry *ke) 825 { 826 counter_u64_t c; 827 828 if ((c = ke->pfrke_counters.pfrkc_counters) != NULL) 829 uma_zfree_pcpu(V_pfr_kentry_counter_z, c); 830 uma_zfree(V_pfr_kentry_z, ke); 831 } 832 833 static void 834 pfr_insert_kentries(struct pfr_ktable *kt, 835 struct pfr_kentryworkq *workq, long tzero) 836 { 837 struct pfr_kentry *p; 838 int rv, n = 0; 839 840 SLIST_FOREACH(p, workq, pfrke_workq) { 841 rv = pfr_route_kentry(kt, p); 842 if (rv) { 843 printf("pfr_insert_kentries: cannot route entry " 844 "(code=%d).\n", rv); 845 break; 846 } 847 p->pfrke_counters.pfrkc_tzero = tzero; 848 n++; 849 } 850 kt->pfrkt_cnt += n; 851 } 852 853 int 854 pfr_insert_kentry(struct pfr_ktable *kt, struct pfr_addr *ad, long tzero) 855 { 856 struct pfr_kentry *p; 857 int rv; 858 859 p = pfr_lookup_addr(kt, ad, 1); 860 if (p != NULL) 861 return (0); 862 p = pfr_create_kentry(ad, (kt->pfrkt_flags & PFR_TFLAG_COUNTERS) != 0); 863 if (p == NULL) 864 return (ENOMEM); 865 866 rv = pfr_route_kentry(kt, p); 867 if (rv) 868 return (rv); 869 870 p->pfrke_counters.pfrkc_tzero = tzero; 871 kt->pfrkt_cnt++; 872 873 return (0); 874 } 875 876 static void 877 pfr_remove_kentries(struct pfr_ktable *kt, 878 struct pfr_kentryworkq *workq) 879 { 880 struct pfr_kentry *p; 881 int n = 0; 882 883 SLIST_FOREACH(p, workq, pfrke_workq) { 884 pfr_unroute_kentry(kt, p); 885 n++; 886 } 887 kt->pfrkt_cnt -= n; 888 pfr_destroy_kentries(workq); 889 } 890 891 static void 892 pfr_clean_node_mask(struct pfr_ktable *kt, 893 struct pfr_kentryworkq *workq) 894 { 895 struct pfr_kentry *p; 896 897 SLIST_FOREACH(p, workq, pfrke_workq) 898 pfr_unroute_kentry(kt, p); 899 } 900 901 static void 902 pfr_clstats_kentries(struct pfr_ktable *kt, struct pfr_kentryworkq *workq, 903 long tzero, int negchange) 904 { 905 struct pfr_kentry *p; 906 int i; 907 908 SLIST_FOREACH(p, workq, pfrke_workq) { 909 if (negchange) 910 p->pfrke_not = !p->pfrke_not; 911 if ((kt->pfrkt_flags & PFR_TFLAG_COUNTERS) != 0) 912 for (i = 0; i < PFR_NUM_COUNTERS; i++) 913 counter_u64_zero( 914 p->pfrke_counters.pfrkc_counters + i); 915 p->pfrke_counters.pfrkc_tzero = tzero; 916 } 917 } 918 919 static void 920 pfr_reset_feedback(struct pfr_addr *addr, int size) 921 { 922 struct pfr_addr *ad; 923 int i; 924 925 for (i = 0, ad = addr; i < size; i++, ad++) 926 ad->pfra_fback = PFR_FB_NONE; 927 } 928 929 static void 930 pfr_prepare_network(union sockaddr_union *sa, int af, int net) 931 { 932 int i; 933 934 bzero(sa, sizeof(*sa)); 935 if (af == AF_INET) { 936 sa->sin.sin_len = sizeof(sa->sin); 937 sa->sin.sin_family = AF_INET; 938 sa->sin.sin_addr.s_addr = net ? htonl(-1 << (32-net)) : 0; 939 } else if (af == AF_INET6) { 940 sa->sin6.sin6_len = sizeof(sa->sin6); 941 sa->sin6.sin6_family = AF_INET6; 942 for (i = 0; i < 4; i++) { 943 if (net <= 32) { 944 sa->sin6.sin6_addr.s6_addr32[i] = 945 net ? htonl(-1 << (32-net)) : 0; 946 break; 947 } 948 sa->sin6.sin6_addr.s6_addr32[i] = 0xFFFFFFFF; 949 net -= 32; 950 } 951 } 952 } 953 954 static int 955 pfr_route_kentry(struct pfr_ktable *kt, struct pfr_kentry *ke) 956 { 957 union sockaddr_union mask; 958 struct radix_node *rn; 959 struct radix_head *head = NULL; 960 961 PF_RULES_WASSERT(); 962 963 bzero(ke->pfrke_node, sizeof(ke->pfrke_node)); 964 if (ke->pfrke_af == AF_INET) 965 head = &kt->pfrkt_ip4->rh; 966 else if (ke->pfrke_af == AF_INET6) 967 head = &kt->pfrkt_ip6->rh; 968 969 if (KENTRY_NETWORK(ke)) { 970 pfr_prepare_network(&mask, ke->pfrke_af, ke->pfrke_net); 971 rn = rn_addroute(&ke->pfrke_sa, &mask, head, ke->pfrke_node); 972 } else 973 rn = rn_addroute(&ke->pfrke_sa, NULL, head, ke->pfrke_node); 974 975 return (rn == NULL ? -1 : 0); 976 } 977 978 static int 979 pfr_unroute_kentry(struct pfr_ktable *kt, struct pfr_kentry *ke) 980 { 981 union sockaddr_union mask; 982 struct radix_node *rn; 983 struct radix_head *head = NULL; 984 985 if (ke->pfrke_af == AF_INET) 986 head = &kt->pfrkt_ip4->rh; 987 else if (ke->pfrke_af == AF_INET6) 988 head = &kt->pfrkt_ip6->rh; 989 990 if (KENTRY_NETWORK(ke)) { 991 pfr_prepare_network(&mask, ke->pfrke_af, ke->pfrke_net); 992 rn = rn_delete(&ke->pfrke_sa, &mask, head); 993 } else 994 rn = rn_delete(&ke->pfrke_sa, NULL, head); 995 996 if (rn == NULL) { 997 printf("pfr_unroute_kentry: delete failed.\n"); 998 return (-1); 999 } 1000 return (0); 1001 } 1002 1003 static void 1004 pfr_copyout_addr(struct pfr_addr *ad, const struct pfr_kentry *ke) 1005 { 1006 bzero(ad, sizeof(*ad)); 1007 if (ke == NULL) 1008 return; 1009 ad->pfra_af = ke->pfrke_af; 1010 ad->pfra_net = ke->pfrke_net; 1011 ad->pfra_not = ke->pfrke_not; 1012 if (ad->pfra_af == AF_INET) 1013 ad->pfra_ip4addr = ke->pfrke_sa.sin.sin_addr; 1014 else if (ad->pfra_af == AF_INET6) 1015 ad->pfra_ip6addr = ke->pfrke_sa.sin6.sin6_addr; 1016 } 1017 1018 static void 1019 pfr_copyout_astats(struct pfr_astats *as, const struct pfr_kentry *ke, 1020 const struct pfr_walktree *w) 1021 { 1022 int dir, op; 1023 const struct pfr_kcounters *kc = &ke->pfrke_counters; 1024 1025 bzero(as, sizeof(*as)); 1026 pfr_copyout_addr(&as->pfras_a, ke); 1027 as->pfras_tzero = kc->pfrkc_tzero; 1028 1029 if (! (w->pfrw_flags & PFR_TFLAG_COUNTERS) || 1030 kc->pfrkc_counters == NULL) { 1031 bzero(as->pfras_packets, sizeof(as->pfras_packets)); 1032 bzero(as->pfras_bytes, sizeof(as->pfras_bytes)); 1033 as->pfras_a.pfra_fback = PFR_FB_NOCOUNT; 1034 return; 1035 } 1036 1037 for (dir = 0; dir < PFR_DIR_MAX; dir++) { 1038 for (op = 0; op < PFR_OP_ADDR_MAX; op ++) { 1039 as->pfras_packets[dir][op] = counter_u64_fetch( 1040 pfr_kentry_counter(kc, dir, op, PFR_TYPE_PACKETS)); 1041 as->pfras_bytes[dir][op] = counter_u64_fetch( 1042 pfr_kentry_counter(kc, dir, op, PFR_TYPE_BYTES)); 1043 } 1044 } 1045 } 1046 1047 static int 1048 pfr_walktree(struct radix_node *rn, void *arg) 1049 { 1050 struct pfr_kentry *ke = (struct pfr_kentry *)rn; 1051 struct pfr_walktree *w = arg; 1052 1053 switch (w->pfrw_op) { 1054 case PFRW_MARK: 1055 ke->pfrke_mark = 0; 1056 break; 1057 case PFRW_SWEEP: 1058 if (ke->pfrke_mark) 1059 break; 1060 /* FALLTHROUGH */ 1061 case PFRW_ENQUEUE: 1062 SLIST_INSERT_HEAD(w->pfrw_workq, ke, pfrke_workq); 1063 w->pfrw_free++; 1064 break; 1065 case PFRW_GET_ADDRS: 1066 if (w->pfrw_free-- > 0) { 1067 pfr_copyout_addr(w->pfrw_addr, ke); 1068 w->pfrw_addr++; 1069 } 1070 break; 1071 case PFRW_GET_ASTATS: 1072 if (w->pfrw_free-- > 0) { 1073 struct pfr_astats as; 1074 1075 pfr_copyout_astats(&as, ke, w); 1076 1077 bcopy(&as, w->pfrw_astats, sizeof(as)); 1078 w->pfrw_astats++; 1079 } 1080 break; 1081 case PFRW_POOL_GET: 1082 if (ke->pfrke_not) 1083 break; /* negative entries are ignored */ 1084 if (!w->pfrw_free--) { 1085 w->pfrw_kentry = ke; 1086 return (1); /* finish search */ 1087 } 1088 break; 1089 case PFRW_DYNADDR_UPDATE: 1090 { 1091 union sockaddr_union pfr_mask; 1092 1093 if (ke->pfrke_af == AF_INET) { 1094 if (w->pfrw_dyn->pfid_acnt4++ > 0) 1095 break; 1096 pfr_prepare_network(&pfr_mask, AF_INET, ke->pfrke_net); 1097 w->pfrw_dyn->pfid_addr4 = *SUNION2PF(&ke->pfrke_sa, 1098 AF_INET); 1099 w->pfrw_dyn->pfid_mask4 = *SUNION2PF(&pfr_mask, 1100 AF_INET); 1101 } else if (ke->pfrke_af == AF_INET6){ 1102 if (w->pfrw_dyn->pfid_acnt6++ > 0) 1103 break; 1104 pfr_prepare_network(&pfr_mask, AF_INET6, ke->pfrke_net); 1105 w->pfrw_dyn->pfid_addr6 = *SUNION2PF(&ke->pfrke_sa, 1106 AF_INET6); 1107 w->pfrw_dyn->pfid_mask6 = *SUNION2PF(&pfr_mask, 1108 AF_INET6); 1109 } 1110 break; 1111 } 1112 case PFRW_COUNTERS: 1113 { 1114 if (w->pfrw_flags & PFR_TFLAG_COUNTERS) { 1115 if (ke->pfrke_counters.pfrkc_counters != NULL) 1116 break; 1117 ke->pfrke_counters.pfrkc_counters = 1118 uma_zalloc_pcpu(V_pfr_kentry_counter_z, 1119 M_NOWAIT | M_ZERO); 1120 } else { 1121 uma_zfree_pcpu(V_pfr_kentry_counter_z, 1122 ke->pfrke_counters.pfrkc_counters); 1123 ke->pfrke_counters.pfrkc_counters = NULL; 1124 } 1125 break; 1126 } 1127 } 1128 return (0); 1129 } 1130 1131 int 1132 pfr_clr_tables(struct pfr_table *filter, int *ndel, int flags) 1133 { 1134 struct pfr_ktableworkq workq; 1135 struct pfr_ktable *p; 1136 int xdel = 0; 1137 1138 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_ALLRSETS); 1139 if (pfr_fix_anchor(filter->pfrt_anchor)) 1140 return (EINVAL); 1141 if (pfr_table_count(filter, flags) < 0) 1142 return (ENOENT); 1143 1144 SLIST_INIT(&workq); 1145 RB_FOREACH(p, pfr_ktablehead, &V_pfr_ktables) { 1146 if (pfr_skip_table(filter, p, flags)) 1147 continue; 1148 if (!strcmp(p->pfrkt_anchor, PF_RESERVED_ANCHOR)) 1149 continue; 1150 if (!(p->pfrkt_flags & PFR_TFLAG_ACTIVE)) 1151 continue; 1152 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_ACTIVE; 1153 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq); 1154 xdel++; 1155 } 1156 if (!(flags & PFR_FLAG_DUMMY)) 1157 pfr_setflags_ktables(&workq); 1158 if (ndel != NULL) 1159 *ndel = xdel; 1160 return (0); 1161 } 1162 1163 int 1164 pfr_add_tables(struct pfr_table *tbl, int size, int *nadd, int flags) 1165 { 1166 struct pfr_ktableworkq addq, changeq; 1167 struct pfr_ktable *p, *q, *r, key; 1168 int i, rv, xadd = 0; 1169 long tzero = time_second; 1170 1171 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY); 1172 SLIST_INIT(&addq); 1173 SLIST_INIT(&changeq); 1174 for (i = 0; i < size; i++) { 1175 bcopy(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t)); 1176 if (pfr_validate_table(&key.pfrkt_t, PFR_TFLAG_USRMASK, 1177 flags & PFR_FLAG_USERIOCTL)) 1178 senderr(EINVAL); 1179 key.pfrkt_flags |= PFR_TFLAG_ACTIVE; 1180 p = RB_FIND(pfr_ktablehead, &V_pfr_ktables, &key); 1181 if (p == NULL) { 1182 p = pfr_create_ktable(&key.pfrkt_t, tzero, 1); 1183 if (p == NULL) 1184 senderr(ENOMEM); 1185 SLIST_FOREACH(q, &addq, pfrkt_workq) { 1186 if (!pfr_ktable_compare(p, q)) { 1187 pfr_destroy_ktable(p, 0); 1188 goto _skip; 1189 } 1190 } 1191 SLIST_INSERT_HEAD(&addq, p, pfrkt_workq); 1192 xadd++; 1193 if (!key.pfrkt_anchor[0]) 1194 goto _skip; 1195 1196 /* find or create root table */ 1197 bzero(key.pfrkt_anchor, sizeof(key.pfrkt_anchor)); 1198 r = RB_FIND(pfr_ktablehead, &V_pfr_ktables, &key); 1199 if (r != NULL) { 1200 p->pfrkt_root = r; 1201 goto _skip; 1202 } 1203 SLIST_FOREACH(q, &addq, pfrkt_workq) { 1204 if (!pfr_ktable_compare(&key, q)) { 1205 p->pfrkt_root = q; 1206 goto _skip; 1207 } 1208 } 1209 key.pfrkt_flags = 0; 1210 r = pfr_create_ktable(&key.pfrkt_t, 0, 1); 1211 if (r == NULL) 1212 senderr(ENOMEM); 1213 SLIST_INSERT_HEAD(&addq, r, pfrkt_workq); 1214 p->pfrkt_root = r; 1215 } else if (!(p->pfrkt_flags & PFR_TFLAG_ACTIVE)) { 1216 SLIST_FOREACH(q, &changeq, pfrkt_workq) 1217 if (!pfr_ktable_compare(&key, q)) 1218 goto _skip; 1219 p->pfrkt_nflags = (p->pfrkt_flags & 1220 ~PFR_TFLAG_USRMASK) | key.pfrkt_flags; 1221 SLIST_INSERT_HEAD(&changeq, p, pfrkt_workq); 1222 xadd++; 1223 } 1224 _skip: 1225 ; 1226 } 1227 if (!(flags & PFR_FLAG_DUMMY)) { 1228 pfr_insert_ktables(&addq); 1229 pfr_setflags_ktables(&changeq); 1230 } else 1231 pfr_destroy_ktables(&addq, 0); 1232 if (nadd != NULL) 1233 *nadd = xadd; 1234 return (0); 1235 _bad: 1236 pfr_destroy_ktables(&addq, 0); 1237 return (rv); 1238 } 1239 1240 int 1241 pfr_del_tables(struct pfr_table *tbl, int size, int *ndel, int flags) 1242 { 1243 struct pfr_ktableworkq workq; 1244 struct pfr_ktable *p, *q, key; 1245 int i, xdel = 0; 1246 1247 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY); 1248 SLIST_INIT(&workq); 1249 for (i = 0; i < size; i++) { 1250 bcopy(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t)); 1251 if (pfr_validate_table(&key.pfrkt_t, 0, 1252 flags & PFR_FLAG_USERIOCTL)) 1253 return (EINVAL); 1254 p = RB_FIND(pfr_ktablehead, &V_pfr_ktables, &key); 1255 if (p != NULL && (p->pfrkt_flags & PFR_TFLAG_ACTIVE)) { 1256 SLIST_FOREACH(q, &workq, pfrkt_workq) 1257 if (!pfr_ktable_compare(p, q)) 1258 goto _skip; 1259 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_ACTIVE; 1260 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq); 1261 xdel++; 1262 } 1263 _skip: 1264 ; 1265 } 1266 1267 if (!(flags & PFR_FLAG_DUMMY)) 1268 pfr_setflags_ktables(&workq); 1269 if (ndel != NULL) 1270 *ndel = xdel; 1271 return (0); 1272 } 1273 1274 int 1275 pfr_get_tables(struct pfr_table *filter, struct pfr_table *tbl, int *size, 1276 int flags) 1277 { 1278 struct pfr_ktable *p; 1279 int n, nn; 1280 1281 PF_RULES_RASSERT(); 1282 1283 ACCEPT_FLAGS(flags, PFR_FLAG_ALLRSETS); 1284 if (pfr_fix_anchor(filter->pfrt_anchor)) 1285 return (EINVAL); 1286 n = nn = pfr_table_count(filter, flags); 1287 if (n < 0) 1288 return (ENOENT); 1289 if (n > *size) { 1290 *size = n; 1291 return (0); 1292 } 1293 RB_FOREACH(p, pfr_ktablehead, &V_pfr_ktables) { 1294 if (pfr_skip_table(filter, p, flags)) 1295 continue; 1296 if (n-- <= 0) 1297 continue; 1298 bcopy(&p->pfrkt_t, tbl++, sizeof(*tbl)); 1299 } 1300 1301 KASSERT(n == 0, ("%s: corruption detected (%d)", __func__, n)); 1302 1303 *size = nn; 1304 return (0); 1305 } 1306 1307 int 1308 pfr_get_tstats(struct pfr_table *filter, struct pfr_tstats *tbl, int *size, 1309 int flags) 1310 { 1311 struct pfr_ktable *p; 1312 struct pfr_ktableworkq workq; 1313 int n, nn; 1314 long tzero = time_second; 1315 int pfr_dir, pfr_op; 1316 1317 /* XXX PFR_FLAG_CLSTATS disabled */ 1318 ACCEPT_FLAGS(flags, PFR_FLAG_ALLRSETS); 1319 if (pfr_fix_anchor(filter->pfrt_anchor)) 1320 return (EINVAL); 1321 n = nn = pfr_table_count(filter, flags); 1322 if (n < 0) 1323 return (ENOENT); 1324 if (n > *size) { 1325 *size = n; 1326 return (0); 1327 } 1328 SLIST_INIT(&workq); 1329 RB_FOREACH(p, pfr_ktablehead, &V_pfr_ktables) { 1330 if (pfr_skip_table(filter, p, flags)) 1331 continue; 1332 if (n-- <= 0) 1333 continue; 1334 bcopy(&p->pfrkt_kts.pfrts_t, &tbl->pfrts_t, 1335 sizeof(struct pfr_table)); 1336 for (pfr_dir = 0; pfr_dir < PFR_DIR_MAX; pfr_dir ++) { 1337 for (pfr_op = 0; pfr_op < PFR_OP_TABLE_MAX; pfr_op ++) { 1338 tbl->pfrts_packets[pfr_dir][pfr_op] = 1339 pfr_kstate_counter_fetch( 1340 &p->pfrkt_packets[pfr_dir][pfr_op]); 1341 tbl->pfrts_bytes[pfr_dir][pfr_op] = 1342 pfr_kstate_counter_fetch( 1343 &p->pfrkt_bytes[pfr_dir][pfr_op]); 1344 } 1345 } 1346 tbl->pfrts_match = pfr_kstate_counter_fetch(&p->pfrkt_match); 1347 tbl->pfrts_nomatch = pfr_kstate_counter_fetch(&p->pfrkt_nomatch); 1348 tbl->pfrts_tzero = p->pfrkt_tzero; 1349 tbl->pfrts_cnt = p->pfrkt_cnt; 1350 for (pfr_op = 0; pfr_op < PFR_REFCNT_MAX; pfr_op++) 1351 tbl->pfrts_refcnt[pfr_op] = p->pfrkt_refcnt[pfr_op]; 1352 tbl++; 1353 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq); 1354 } 1355 if (flags & PFR_FLAG_CLSTATS) 1356 pfr_clstats_ktables(&workq, tzero, 1357 flags & PFR_FLAG_ADDRSTOO); 1358 1359 KASSERT(n == 0, ("%s: corruption detected (%d)", __func__, n)); 1360 1361 *size = nn; 1362 return (0); 1363 } 1364 1365 int 1366 pfr_clr_tstats(struct pfr_table *tbl, int size, int *nzero, int flags) 1367 { 1368 struct pfr_ktableworkq workq; 1369 struct pfr_ktable *p, key; 1370 int i, xzero = 0; 1371 long tzero = time_second; 1372 1373 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_ADDRSTOO); 1374 SLIST_INIT(&workq); 1375 for (i = 0; i < size; i++) { 1376 bcopy(tbl + i, &key.pfrkt_t, sizeof(key.pfrkt_t)); 1377 if (pfr_validate_table(&key.pfrkt_t, 0, 0)) 1378 return (EINVAL); 1379 p = RB_FIND(pfr_ktablehead, &V_pfr_ktables, &key); 1380 if (p != NULL) { 1381 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq); 1382 xzero++; 1383 } 1384 } 1385 if (!(flags & PFR_FLAG_DUMMY)) 1386 pfr_clstats_ktables(&workq, tzero, flags & PFR_FLAG_ADDRSTOO); 1387 if (nzero != NULL) 1388 *nzero = xzero; 1389 return (0); 1390 } 1391 1392 int 1393 pfr_set_tflags(struct pfr_table *tbl, int size, int setflag, int clrflag, 1394 int *nchange, int *ndel, int flags) 1395 { 1396 struct pfr_ktableworkq workq; 1397 struct pfr_ktable *p, *q, key; 1398 int i, xchange = 0, xdel = 0; 1399 1400 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY); 1401 if ((setflag & ~PFR_TFLAG_USRMASK) || 1402 (clrflag & ~PFR_TFLAG_USRMASK) || 1403 (setflag & clrflag)) 1404 return (EINVAL); 1405 SLIST_INIT(&workq); 1406 for (i = 0; i < size; i++) { 1407 bcopy(tbl + i, &key.pfrkt_t, sizeof(key.pfrkt_t)); 1408 if (pfr_validate_table(&key.pfrkt_t, 0, 1409 flags & PFR_FLAG_USERIOCTL)) 1410 return (EINVAL); 1411 p = RB_FIND(pfr_ktablehead, &V_pfr_ktables, &key); 1412 if (p != NULL && (p->pfrkt_flags & PFR_TFLAG_ACTIVE)) { 1413 p->pfrkt_nflags = (p->pfrkt_flags | setflag) & 1414 ~clrflag; 1415 if (p->pfrkt_nflags == p->pfrkt_flags) 1416 goto _skip; 1417 SLIST_FOREACH(q, &workq, pfrkt_workq) 1418 if (!pfr_ktable_compare(p, q)) 1419 goto _skip; 1420 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq); 1421 if ((p->pfrkt_flags & PFR_TFLAG_PERSIST) && 1422 (clrflag & PFR_TFLAG_PERSIST) && 1423 !(p->pfrkt_flags & PFR_TFLAG_REFERENCED)) 1424 xdel++; 1425 else 1426 xchange++; 1427 } 1428 _skip: 1429 ; 1430 } 1431 if (!(flags & PFR_FLAG_DUMMY)) 1432 pfr_setflags_ktables(&workq); 1433 if (nchange != NULL) 1434 *nchange = xchange; 1435 if (ndel != NULL) 1436 *ndel = xdel; 1437 return (0); 1438 } 1439 1440 int 1441 pfr_ina_begin(struct pfr_table *trs, u_int32_t *ticket, int *ndel, int flags) 1442 { 1443 struct pfr_ktableworkq workq; 1444 struct pfr_ktable *p; 1445 struct pf_kruleset *rs; 1446 int xdel = 0; 1447 1448 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY); 1449 rs = pf_find_or_create_kruleset(trs->pfrt_anchor); 1450 if (rs == NULL) 1451 return (ENOMEM); 1452 SLIST_INIT(&workq); 1453 RB_FOREACH(p, pfr_ktablehead, &V_pfr_ktables) { 1454 if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) || 1455 pfr_skip_table(trs, p, 0)) 1456 continue; 1457 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_INACTIVE; 1458 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq); 1459 xdel++; 1460 } 1461 if (!(flags & PFR_FLAG_DUMMY)) { 1462 pfr_setflags_ktables(&workq); 1463 if (ticket != NULL) 1464 *ticket = ++rs->tticket; 1465 rs->topen = 1; 1466 } else 1467 pf_remove_if_empty_kruleset(rs); 1468 if (ndel != NULL) 1469 *ndel = xdel; 1470 return (0); 1471 } 1472 1473 int 1474 pfr_ina_define(struct pfr_table *tbl, struct pfr_addr *addr, int size, 1475 int *nadd, int *naddr, u_int32_t ticket, int flags) 1476 { 1477 struct pfr_ktableworkq tableq; 1478 struct pfr_kentryworkq addrq; 1479 struct pfr_ktable *kt, *rt, *shadow, key; 1480 struct pfr_kentry *p; 1481 struct pfr_addr *ad; 1482 struct pf_kruleset *rs; 1483 int i, rv, xadd = 0, xaddr = 0; 1484 1485 PF_RULES_WASSERT(); 1486 1487 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_ADDRSTOO); 1488 if (size && !(flags & PFR_FLAG_ADDRSTOO)) 1489 return (EINVAL); 1490 if (pfr_validate_table(tbl, PFR_TFLAG_USRMASK, 1491 flags & PFR_FLAG_USERIOCTL)) 1492 return (EINVAL); 1493 rs = pf_find_kruleset(tbl->pfrt_anchor); 1494 if (rs == NULL || !rs->topen || ticket != rs->tticket) 1495 return (EBUSY); 1496 tbl->pfrt_flags |= PFR_TFLAG_INACTIVE; 1497 SLIST_INIT(&tableq); 1498 kt = RB_FIND(pfr_ktablehead, &V_pfr_ktables, (struct pfr_ktable *)tbl); 1499 if (kt == NULL) { 1500 kt = pfr_create_ktable(tbl, 0, 1); 1501 if (kt == NULL) 1502 return (ENOMEM); 1503 SLIST_INSERT_HEAD(&tableq, kt, pfrkt_workq); 1504 xadd++; 1505 if (!tbl->pfrt_anchor[0]) 1506 goto _skip; 1507 1508 /* find or create root table */ 1509 bzero(&key, sizeof(key)); 1510 strlcpy(key.pfrkt_name, tbl->pfrt_name, sizeof(key.pfrkt_name)); 1511 rt = RB_FIND(pfr_ktablehead, &V_pfr_ktables, &key); 1512 if (rt != NULL) { 1513 kt->pfrkt_root = rt; 1514 goto _skip; 1515 } 1516 rt = pfr_create_ktable(&key.pfrkt_t, 0, 1); 1517 if (rt == NULL) { 1518 pfr_destroy_ktables(&tableq, 0); 1519 return (ENOMEM); 1520 } 1521 SLIST_INSERT_HEAD(&tableq, rt, pfrkt_workq); 1522 kt->pfrkt_root = rt; 1523 } else if (!(kt->pfrkt_flags & PFR_TFLAG_INACTIVE)) 1524 xadd++; 1525 _skip: 1526 shadow = pfr_create_ktable(tbl, 0, 0); 1527 if (shadow == NULL) { 1528 pfr_destroy_ktables(&tableq, 0); 1529 return (ENOMEM); 1530 } 1531 SLIST_INIT(&addrq); 1532 for (i = 0, ad = addr; i < size; i++, ad++) { 1533 if (pfr_validate_addr(ad)) 1534 senderr(EINVAL); 1535 if (pfr_lookup_addr(shadow, ad, 1) != NULL) 1536 continue; 1537 p = pfr_create_kentry(ad, 1538 (shadow->pfrkt_flags & PFR_TFLAG_COUNTERS) != 0); 1539 if (p == NULL) 1540 senderr(ENOMEM); 1541 if (pfr_route_kentry(shadow, p)) { 1542 pfr_destroy_kentry(p); 1543 continue; 1544 } 1545 SLIST_INSERT_HEAD(&addrq, p, pfrke_workq); 1546 xaddr++; 1547 } 1548 if (!(flags & PFR_FLAG_DUMMY)) { 1549 if (kt->pfrkt_shadow != NULL) 1550 pfr_destroy_ktable(kt->pfrkt_shadow, 1); 1551 kt->pfrkt_flags |= PFR_TFLAG_INACTIVE; 1552 pfr_insert_ktables(&tableq); 1553 shadow->pfrkt_cnt = (flags & PFR_FLAG_ADDRSTOO) ? 1554 xaddr : NO_ADDRESSES; 1555 kt->pfrkt_shadow = shadow; 1556 } else { 1557 pfr_clean_node_mask(shadow, &addrq); 1558 pfr_destroy_ktable(shadow, 0); 1559 pfr_destroy_ktables(&tableq, 0); 1560 pfr_destroy_kentries(&addrq); 1561 } 1562 if (nadd != NULL) 1563 *nadd = xadd; 1564 if (naddr != NULL) 1565 *naddr = xaddr; 1566 return (0); 1567 _bad: 1568 pfr_destroy_ktable(shadow, 0); 1569 pfr_destroy_ktables(&tableq, 0); 1570 pfr_destroy_kentries(&addrq); 1571 return (rv); 1572 } 1573 1574 int 1575 pfr_ina_rollback(struct pfr_table *trs, u_int32_t ticket, int *ndel, int flags) 1576 { 1577 struct pfr_ktableworkq workq; 1578 struct pfr_ktable *p; 1579 struct pf_kruleset *rs; 1580 int xdel = 0; 1581 1582 PF_RULES_WASSERT(); 1583 1584 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY); 1585 rs = pf_find_kruleset(trs->pfrt_anchor); 1586 if (rs == NULL || !rs->topen || ticket != rs->tticket) 1587 return (0); 1588 SLIST_INIT(&workq); 1589 RB_FOREACH(p, pfr_ktablehead, &V_pfr_ktables) { 1590 if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) || 1591 pfr_skip_table(trs, p, 0)) 1592 continue; 1593 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_INACTIVE; 1594 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq); 1595 xdel++; 1596 } 1597 if (!(flags & PFR_FLAG_DUMMY)) { 1598 pfr_setflags_ktables(&workq); 1599 rs->topen = 0; 1600 pf_remove_if_empty_kruleset(rs); 1601 } 1602 if (ndel != NULL) 1603 *ndel = xdel; 1604 return (0); 1605 } 1606 1607 int 1608 pfr_ina_commit(struct pfr_table *trs, u_int32_t ticket, int *nadd, 1609 int *nchange, int flags) 1610 { 1611 struct pfr_ktable *p, *q; 1612 struct pfr_ktableworkq workq; 1613 struct pf_kruleset *rs; 1614 int xadd = 0, xchange = 0; 1615 long tzero = time_second; 1616 1617 PF_RULES_WASSERT(); 1618 1619 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY); 1620 rs = pf_find_kruleset(trs->pfrt_anchor); 1621 if (rs == NULL || !rs->topen || ticket != rs->tticket) 1622 return (EBUSY); 1623 1624 SLIST_INIT(&workq); 1625 RB_FOREACH(p, pfr_ktablehead, &V_pfr_ktables) { 1626 if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) || 1627 pfr_skip_table(trs, p, 0)) 1628 continue; 1629 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq); 1630 if (p->pfrkt_flags & PFR_TFLAG_ACTIVE) 1631 xchange++; 1632 else 1633 xadd++; 1634 } 1635 1636 if (!(flags & PFR_FLAG_DUMMY)) { 1637 for (p = SLIST_FIRST(&workq); p != NULL; p = q) { 1638 q = SLIST_NEXT(p, pfrkt_workq); 1639 pfr_commit_ktable(p, tzero); 1640 } 1641 rs->topen = 0; 1642 pf_remove_if_empty_kruleset(rs); 1643 } 1644 if (nadd != NULL) 1645 *nadd = xadd; 1646 if (nchange != NULL) 1647 *nchange = xchange; 1648 1649 return (0); 1650 } 1651 1652 static void 1653 pfr_commit_ktable(struct pfr_ktable *kt, long tzero) 1654 { 1655 counter_u64_t *pkc, *qkc; 1656 struct pfr_ktable *shadow = kt->pfrkt_shadow; 1657 int nflags; 1658 1659 PF_RULES_WASSERT(); 1660 1661 if (shadow->pfrkt_cnt == NO_ADDRESSES) { 1662 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 1663 pfr_clstats_ktable(kt, tzero, 1); 1664 } else if (kt->pfrkt_flags & PFR_TFLAG_ACTIVE) { 1665 /* kt might contain addresses */ 1666 struct pfr_kentryworkq addrq, addq, changeq, delq, garbageq; 1667 struct pfr_kentry *p, *q, *next; 1668 struct pfr_addr ad; 1669 1670 pfr_enqueue_addrs(shadow, &addrq, NULL, 0); 1671 pfr_mark_addrs(kt); 1672 SLIST_INIT(&addq); 1673 SLIST_INIT(&changeq); 1674 SLIST_INIT(&delq); 1675 SLIST_INIT(&garbageq); 1676 pfr_clean_node_mask(shadow, &addrq); 1677 SLIST_FOREACH_SAFE(p, &addrq, pfrke_workq, next) { 1678 pfr_copyout_addr(&ad, p); 1679 q = pfr_lookup_addr(kt, &ad, 1); 1680 if (q != NULL) { 1681 if (q->pfrke_not != p->pfrke_not) 1682 SLIST_INSERT_HEAD(&changeq, q, 1683 pfrke_workq); 1684 pkc = &p->pfrke_counters.pfrkc_counters; 1685 qkc = &q->pfrke_counters.pfrkc_counters; 1686 if ((*pkc == NULL) != (*qkc == NULL)) 1687 SWAP(counter_u64_t, *pkc, *qkc); 1688 q->pfrke_mark = 1; 1689 SLIST_INSERT_HEAD(&garbageq, p, pfrke_workq); 1690 } else { 1691 p->pfrke_counters.pfrkc_tzero = tzero; 1692 SLIST_INSERT_HEAD(&addq, p, pfrke_workq); 1693 } 1694 } 1695 pfr_enqueue_addrs(kt, &delq, NULL, ENQUEUE_UNMARKED_ONLY); 1696 pfr_insert_kentries(kt, &addq, tzero); 1697 pfr_remove_kentries(kt, &delq); 1698 pfr_clstats_kentries(kt, &changeq, tzero, INVERT_NEG_FLAG); 1699 pfr_destroy_kentries(&garbageq); 1700 } else { 1701 /* kt cannot contain addresses */ 1702 SWAP(struct radix_node_head *, kt->pfrkt_ip4, 1703 shadow->pfrkt_ip4); 1704 SWAP(struct radix_node_head *, kt->pfrkt_ip6, 1705 shadow->pfrkt_ip6); 1706 SWAP(int, kt->pfrkt_cnt, shadow->pfrkt_cnt); 1707 pfr_clstats_ktable(kt, tzero, 1); 1708 } 1709 nflags = ((shadow->pfrkt_flags & PFR_TFLAG_USRMASK) | 1710 (kt->pfrkt_flags & PFR_TFLAG_SETMASK) | PFR_TFLAG_ACTIVE) 1711 & ~PFR_TFLAG_INACTIVE; 1712 pfr_destroy_ktable(shadow, 0); 1713 kt->pfrkt_shadow = NULL; 1714 pfr_setflags_ktable(kt, nflags); 1715 } 1716 1717 static int 1718 pfr_validate_table(struct pfr_table *tbl, int allowedflags, int no_reserved) 1719 { 1720 int i; 1721 1722 if (!tbl->pfrt_name[0]) 1723 return (-1); 1724 if (no_reserved && !strcmp(tbl->pfrt_anchor, PF_RESERVED_ANCHOR)) 1725 return (-1); 1726 if (tbl->pfrt_name[PF_TABLE_NAME_SIZE-1]) 1727 return (-1); 1728 for (i = strlen(tbl->pfrt_name); i < PF_TABLE_NAME_SIZE; i++) 1729 if (tbl->pfrt_name[i]) 1730 return (-1); 1731 if (pfr_fix_anchor(tbl->pfrt_anchor)) 1732 return (-1); 1733 if (tbl->pfrt_flags & ~allowedflags) 1734 return (-1); 1735 return (0); 1736 } 1737 1738 /* 1739 * Rewrite anchors referenced by tables to remove slashes 1740 * and check for validity. 1741 */ 1742 static int 1743 pfr_fix_anchor(char *anchor) 1744 { 1745 size_t siz = MAXPATHLEN; 1746 int i; 1747 1748 if (anchor[0] == '/') { 1749 char *path; 1750 int off; 1751 1752 path = anchor; 1753 off = 1; 1754 while (*++path == '/') 1755 off++; 1756 bcopy(path, anchor, siz - off); 1757 memset(anchor + siz - off, 0, off); 1758 } 1759 if (anchor[siz - 1]) 1760 return (-1); 1761 for (i = strlen(anchor); i < siz; i++) 1762 if (anchor[i]) 1763 return (-1); 1764 return (0); 1765 } 1766 1767 int 1768 pfr_table_count(struct pfr_table *filter, int flags) 1769 { 1770 struct pf_kruleset *rs; 1771 1772 PF_RULES_ASSERT(); 1773 1774 if (flags & PFR_FLAG_ALLRSETS) 1775 return (V_pfr_ktable_cnt); 1776 if (filter->pfrt_anchor[0]) { 1777 rs = pf_find_kruleset(filter->pfrt_anchor); 1778 return ((rs != NULL) ? rs->tables : -1); 1779 } 1780 return (pf_main_ruleset.tables); 1781 } 1782 1783 static int 1784 pfr_skip_table(struct pfr_table *filter, struct pfr_ktable *kt, int flags) 1785 { 1786 if (flags & PFR_FLAG_ALLRSETS) 1787 return (0); 1788 if (strcmp(filter->pfrt_anchor, kt->pfrkt_anchor)) 1789 return (1); 1790 return (0); 1791 } 1792 1793 static void 1794 pfr_insert_ktables(struct pfr_ktableworkq *workq) 1795 { 1796 struct pfr_ktable *p; 1797 1798 SLIST_FOREACH(p, workq, pfrkt_workq) 1799 pfr_insert_ktable(p); 1800 } 1801 1802 static void 1803 pfr_insert_ktable(struct pfr_ktable *kt) 1804 { 1805 1806 PF_RULES_WASSERT(); 1807 1808 RB_INSERT(pfr_ktablehead, &V_pfr_ktables, kt); 1809 V_pfr_ktable_cnt++; 1810 if (kt->pfrkt_root != NULL) 1811 if (!kt->pfrkt_root->pfrkt_refcnt[PFR_REFCNT_ANCHOR]++) 1812 pfr_setflags_ktable(kt->pfrkt_root, 1813 kt->pfrkt_root->pfrkt_flags|PFR_TFLAG_REFDANCHOR); 1814 } 1815 1816 static void 1817 pfr_setflags_ktables(struct pfr_ktableworkq *workq) 1818 { 1819 struct pfr_ktable *p, *q; 1820 1821 for (p = SLIST_FIRST(workq); p; p = q) { 1822 q = SLIST_NEXT(p, pfrkt_workq); 1823 pfr_setflags_ktable(p, p->pfrkt_nflags); 1824 } 1825 } 1826 1827 static void 1828 pfr_setflags_ktable(struct pfr_ktable *kt, int newf) 1829 { 1830 struct pfr_kentryworkq addrq; 1831 struct pfr_walktree w; 1832 1833 PF_RULES_WASSERT(); 1834 1835 if (!(newf & PFR_TFLAG_REFERENCED) && 1836 !(newf & PFR_TFLAG_REFDANCHOR) && 1837 !(newf & PFR_TFLAG_PERSIST)) 1838 newf &= ~PFR_TFLAG_ACTIVE; 1839 if (!(newf & PFR_TFLAG_ACTIVE)) 1840 newf &= ~PFR_TFLAG_USRMASK; 1841 if (!(newf & PFR_TFLAG_SETMASK)) { 1842 RB_REMOVE(pfr_ktablehead, &V_pfr_ktables, kt); 1843 if (kt->pfrkt_root != NULL) 1844 if (!--kt->pfrkt_root->pfrkt_refcnt[PFR_REFCNT_ANCHOR]) 1845 pfr_setflags_ktable(kt->pfrkt_root, 1846 kt->pfrkt_root->pfrkt_flags & 1847 ~PFR_TFLAG_REFDANCHOR); 1848 pfr_destroy_ktable(kt, 1); 1849 V_pfr_ktable_cnt--; 1850 return; 1851 } 1852 if (newf & PFR_TFLAG_COUNTERS && ! (kt->pfrkt_flags & PFR_TFLAG_COUNTERS)) { 1853 bzero(&w, sizeof(w)); 1854 w.pfrw_op = PFRW_COUNTERS; 1855 w.pfrw_flags |= PFR_TFLAG_COUNTERS; 1856 kt->pfrkt_ip4->rnh_walktree(&kt->pfrkt_ip4->rh, pfr_walktree, &w); 1857 kt->pfrkt_ip6->rnh_walktree(&kt->pfrkt_ip6->rh, pfr_walktree, &w); 1858 } 1859 if (! (newf & PFR_TFLAG_COUNTERS) && (kt->pfrkt_flags & PFR_TFLAG_COUNTERS)) { 1860 bzero(&w, sizeof(w)); 1861 w.pfrw_op = PFRW_COUNTERS; 1862 w.pfrw_flags |= 0; 1863 kt->pfrkt_ip4->rnh_walktree(&kt->pfrkt_ip4->rh, pfr_walktree, &w); 1864 kt->pfrkt_ip6->rnh_walktree(&kt->pfrkt_ip6->rh, pfr_walktree, &w); 1865 } 1866 if (!(newf & PFR_TFLAG_ACTIVE) && kt->pfrkt_cnt) { 1867 pfr_enqueue_addrs(kt, &addrq, NULL, 0); 1868 pfr_remove_kentries(kt, &addrq); 1869 } 1870 if (!(newf & PFR_TFLAG_INACTIVE) && kt->pfrkt_shadow != NULL) { 1871 pfr_destroy_ktable(kt->pfrkt_shadow, 1); 1872 kt->pfrkt_shadow = NULL; 1873 } 1874 kt->pfrkt_flags = newf; 1875 } 1876 1877 static void 1878 pfr_clstats_ktables(struct pfr_ktableworkq *workq, long tzero, int recurse) 1879 { 1880 struct pfr_ktable *p; 1881 1882 SLIST_FOREACH(p, workq, pfrkt_workq) 1883 pfr_clstats_ktable(p, tzero, recurse); 1884 } 1885 1886 static void 1887 pfr_clstats_ktable(struct pfr_ktable *kt, long tzero, int recurse) 1888 { 1889 struct pfr_kentryworkq addrq; 1890 int pfr_dir, pfr_op; 1891 1892 MPASS(PF_TABLE_STATS_OWNED() || PF_RULES_WOWNED()); 1893 1894 if (recurse) { 1895 pfr_enqueue_addrs(kt, &addrq, NULL, 0); 1896 pfr_clstats_kentries(kt, &addrq, tzero, 0); 1897 } 1898 for (pfr_dir = 0; pfr_dir < PFR_DIR_MAX; pfr_dir ++) { 1899 for (pfr_op = 0; pfr_op < PFR_OP_TABLE_MAX; pfr_op ++) { 1900 pfr_kstate_counter_zero(&kt->pfrkt_packets[pfr_dir][pfr_op]); 1901 pfr_kstate_counter_zero(&kt->pfrkt_bytes[pfr_dir][pfr_op]); 1902 } 1903 } 1904 pfr_kstate_counter_zero(&kt->pfrkt_match); 1905 pfr_kstate_counter_zero(&kt->pfrkt_nomatch); 1906 kt->pfrkt_tzero = tzero; 1907 } 1908 1909 static struct pfr_ktable * 1910 pfr_create_ktable(struct pfr_table *tbl, long tzero, int attachruleset) 1911 { 1912 struct pfr_ktable *kt; 1913 struct pf_kruleset *rs; 1914 int pfr_dir, pfr_op; 1915 1916 PF_RULES_WASSERT(); 1917 1918 kt = malloc(sizeof(*kt), M_PFTABLE, M_NOWAIT|M_ZERO); 1919 if (kt == NULL) 1920 return (NULL); 1921 kt->pfrkt_t = *tbl; 1922 1923 if (attachruleset) { 1924 rs = pf_find_or_create_kruleset(tbl->pfrt_anchor); 1925 if (!rs) { 1926 pfr_destroy_ktable(kt, 0); 1927 return (NULL); 1928 } 1929 kt->pfrkt_rs = rs; 1930 rs->tables++; 1931 } 1932 1933 for (pfr_dir = 0; pfr_dir < PFR_DIR_MAX; pfr_dir ++) { 1934 for (pfr_op = 0; pfr_op < PFR_OP_TABLE_MAX; pfr_op ++) { 1935 if (pfr_kstate_counter_init( 1936 &kt->pfrkt_packets[pfr_dir][pfr_op], M_NOWAIT) != 0) { 1937 pfr_destroy_ktable(kt, 0); 1938 return (NULL); 1939 } 1940 if (pfr_kstate_counter_init( 1941 &kt->pfrkt_bytes[pfr_dir][pfr_op], M_NOWAIT) != 0) { 1942 pfr_destroy_ktable(kt, 0); 1943 return (NULL); 1944 } 1945 } 1946 } 1947 if (pfr_kstate_counter_init(&kt->pfrkt_match, M_NOWAIT) != 0) { 1948 pfr_destroy_ktable(kt, 0); 1949 return (NULL); 1950 } 1951 1952 if (pfr_kstate_counter_init(&kt->pfrkt_nomatch, M_NOWAIT) != 0) { 1953 pfr_destroy_ktable(kt, 0); 1954 return (NULL); 1955 } 1956 1957 if (!rn_inithead((void **)&kt->pfrkt_ip4, 1958 offsetof(struct sockaddr_in, sin_addr) * 8) || 1959 !rn_inithead((void **)&kt->pfrkt_ip6, 1960 offsetof(struct sockaddr_in6, sin6_addr) * 8)) { 1961 pfr_destroy_ktable(kt, 0); 1962 return (NULL); 1963 } 1964 kt->pfrkt_tzero = tzero; 1965 1966 return (kt); 1967 } 1968 1969 static void 1970 pfr_destroy_ktables(struct pfr_ktableworkq *workq, int flushaddr) 1971 { 1972 struct pfr_ktable *p, *q; 1973 1974 for (p = SLIST_FIRST(workq); p; p = q) { 1975 q = SLIST_NEXT(p, pfrkt_workq); 1976 pfr_destroy_ktable(p, flushaddr); 1977 } 1978 } 1979 1980 static void 1981 pfr_destroy_ktable(struct pfr_ktable *kt, int flushaddr) 1982 { 1983 struct pfr_kentryworkq addrq; 1984 int pfr_dir, pfr_op; 1985 1986 if (flushaddr) { 1987 pfr_enqueue_addrs(kt, &addrq, NULL, 0); 1988 pfr_clean_node_mask(kt, &addrq); 1989 pfr_destroy_kentries(&addrq); 1990 } 1991 if (kt->pfrkt_ip4 != NULL) 1992 rn_detachhead((void **)&kt->pfrkt_ip4); 1993 if (kt->pfrkt_ip6 != NULL) 1994 rn_detachhead((void **)&kt->pfrkt_ip6); 1995 if (kt->pfrkt_shadow != NULL) 1996 pfr_destroy_ktable(kt->pfrkt_shadow, flushaddr); 1997 if (kt->pfrkt_rs != NULL) { 1998 kt->pfrkt_rs->tables--; 1999 pf_remove_if_empty_kruleset(kt->pfrkt_rs); 2000 } 2001 for (pfr_dir = 0; pfr_dir < PFR_DIR_MAX; pfr_dir ++) { 2002 for (pfr_op = 0; pfr_op < PFR_OP_TABLE_MAX; pfr_op ++) { 2003 pfr_kstate_counter_deinit(&kt->pfrkt_packets[pfr_dir][pfr_op]); 2004 pfr_kstate_counter_deinit(&kt->pfrkt_bytes[pfr_dir][pfr_op]); 2005 } 2006 } 2007 pfr_kstate_counter_deinit(&kt->pfrkt_match); 2008 pfr_kstate_counter_deinit(&kt->pfrkt_nomatch); 2009 2010 free(kt, M_PFTABLE); 2011 } 2012 2013 static int 2014 pfr_ktable_compare(struct pfr_ktable *p, struct pfr_ktable *q) 2015 { 2016 int d; 2017 2018 if ((d = strncmp(p->pfrkt_name, q->pfrkt_name, PF_TABLE_NAME_SIZE))) 2019 return (d); 2020 return (strcmp(p->pfrkt_anchor, q->pfrkt_anchor)); 2021 } 2022 2023 static struct pfr_ktable * 2024 pfr_lookup_table(struct pfr_table *tbl) 2025 { 2026 /* struct pfr_ktable start like a struct pfr_table */ 2027 return (RB_FIND(pfr_ktablehead, &V_pfr_ktables, 2028 (struct pfr_ktable *)tbl)); 2029 } 2030 2031 int 2032 pfr_match_addr(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af) 2033 { 2034 struct pfr_kentry *ke = NULL; 2035 int match; 2036 2037 PF_RULES_RASSERT(); 2038 2039 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL) 2040 kt = kt->pfrkt_root; 2041 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 2042 return (0); 2043 2044 switch (af) { 2045 #ifdef INET 2046 case AF_INET: 2047 { 2048 struct sockaddr_in sin; 2049 2050 bzero(&sin, sizeof(sin)); 2051 sin.sin_len = sizeof(sin); 2052 sin.sin_family = AF_INET; 2053 sin.sin_addr.s_addr = a->addr32[0]; 2054 ke = (struct pfr_kentry *)rn_match(&sin, &kt->pfrkt_ip4->rh); 2055 if (ke && KENTRY_RNF_ROOT(ke)) 2056 ke = NULL; 2057 break; 2058 } 2059 #endif /* INET */ 2060 #ifdef INET6 2061 case AF_INET6: 2062 { 2063 struct sockaddr_in6 sin6; 2064 2065 bzero(&sin6, sizeof(sin6)); 2066 sin6.sin6_len = sizeof(sin6); 2067 sin6.sin6_family = AF_INET6; 2068 bcopy(a, &sin6.sin6_addr, sizeof(sin6.sin6_addr)); 2069 ke = (struct pfr_kentry *)rn_match(&sin6, &kt->pfrkt_ip6->rh); 2070 if (ke && KENTRY_RNF_ROOT(ke)) 2071 ke = NULL; 2072 break; 2073 } 2074 #endif /* INET6 */ 2075 } 2076 match = (ke && !ke->pfrke_not); 2077 if (match) 2078 pfr_kstate_counter_add(&kt->pfrkt_match, 1); 2079 else 2080 pfr_kstate_counter_add(&kt->pfrkt_nomatch, 1); 2081 return (match); 2082 } 2083 2084 void 2085 pfr_update_stats(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af, 2086 u_int64_t len, int dir_out, int op_pass, int notrule) 2087 { 2088 struct pfr_kentry *ke = NULL; 2089 2090 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL) 2091 kt = kt->pfrkt_root; 2092 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 2093 return; 2094 2095 switch (af) { 2096 #ifdef INET 2097 case AF_INET: 2098 { 2099 struct sockaddr_in sin; 2100 2101 bzero(&sin, sizeof(sin)); 2102 sin.sin_len = sizeof(sin); 2103 sin.sin_family = AF_INET; 2104 sin.sin_addr.s_addr = a->addr32[0]; 2105 ke = (struct pfr_kentry *)rn_match(&sin, &kt->pfrkt_ip4->rh); 2106 if (ke && KENTRY_RNF_ROOT(ke)) 2107 ke = NULL; 2108 break; 2109 } 2110 #endif /* INET */ 2111 #ifdef INET6 2112 case AF_INET6: 2113 { 2114 struct sockaddr_in6 sin6; 2115 2116 bzero(&sin6, sizeof(sin6)); 2117 sin6.sin6_len = sizeof(sin6); 2118 sin6.sin6_family = AF_INET6; 2119 bcopy(a, &sin6.sin6_addr, sizeof(sin6.sin6_addr)); 2120 ke = (struct pfr_kentry *)rn_match(&sin6, &kt->pfrkt_ip6->rh); 2121 if (ke && KENTRY_RNF_ROOT(ke)) 2122 ke = NULL; 2123 break; 2124 } 2125 #endif /* INET6 */ 2126 default: 2127 panic("%s: unknown address family %u", __func__, af); 2128 } 2129 if ((ke == NULL || ke->pfrke_not) != notrule) { 2130 if (op_pass != PFR_OP_PASS) 2131 DPFPRINTF(PF_DEBUG_URGENT, 2132 ("pfr_update_stats: assertion failed.\n")); 2133 op_pass = PFR_OP_XPASS; 2134 } 2135 pfr_kstate_counter_add(&kt->pfrkt_packets[dir_out][op_pass], 1); 2136 pfr_kstate_counter_add(&kt->pfrkt_bytes[dir_out][op_pass], len); 2137 if (ke != NULL && op_pass != PFR_OP_XPASS && 2138 (kt->pfrkt_flags & PFR_TFLAG_COUNTERS)) { 2139 counter_u64_add(pfr_kentry_counter(&ke->pfrke_counters, 2140 dir_out, op_pass, PFR_TYPE_PACKETS), 1); 2141 counter_u64_add(pfr_kentry_counter(&ke->pfrke_counters, 2142 dir_out, op_pass, PFR_TYPE_BYTES), len); 2143 } 2144 } 2145 2146 struct pfr_ktable * 2147 pfr_eth_attach_table(struct pf_keth_ruleset *rs, char *name) 2148 { 2149 struct pfr_ktable *kt, *rt; 2150 struct pfr_table tbl; 2151 struct pf_keth_anchor *ac = rs->anchor; 2152 2153 PF_RULES_WASSERT(); 2154 2155 bzero(&tbl, sizeof(tbl)); 2156 strlcpy(tbl.pfrt_name, name, sizeof(tbl.pfrt_name)); 2157 if (ac != NULL) 2158 strlcpy(tbl.pfrt_anchor, ac->path, sizeof(tbl.pfrt_anchor)); 2159 kt = pfr_lookup_table(&tbl); 2160 if (kt == NULL) { 2161 kt = pfr_create_ktable(&tbl, time_second, 1); 2162 if (kt == NULL) 2163 return (NULL); 2164 if (ac != NULL) { 2165 bzero(tbl.pfrt_anchor, sizeof(tbl.pfrt_anchor)); 2166 rt = pfr_lookup_table(&tbl); 2167 if (rt == NULL) { 2168 rt = pfr_create_ktable(&tbl, 0, 1); 2169 if (rt == NULL) { 2170 pfr_destroy_ktable(kt, 0); 2171 return (NULL); 2172 } 2173 pfr_insert_ktable(rt); 2174 } 2175 kt->pfrkt_root = rt; 2176 } 2177 pfr_insert_ktable(kt); 2178 } 2179 if (!kt->pfrkt_refcnt[PFR_REFCNT_RULE]++) 2180 pfr_setflags_ktable(kt, kt->pfrkt_flags|PFR_TFLAG_REFERENCED); 2181 return (kt); 2182 } 2183 2184 struct pfr_ktable * 2185 pfr_attach_table(struct pf_kruleset *rs, char *name) 2186 { 2187 struct pfr_ktable *kt, *rt; 2188 struct pfr_table tbl; 2189 struct pf_kanchor *ac = rs->anchor; 2190 2191 PF_RULES_WASSERT(); 2192 2193 bzero(&tbl, sizeof(tbl)); 2194 strlcpy(tbl.pfrt_name, name, sizeof(tbl.pfrt_name)); 2195 if (ac != NULL) 2196 strlcpy(tbl.pfrt_anchor, ac->path, sizeof(tbl.pfrt_anchor)); 2197 kt = pfr_lookup_table(&tbl); 2198 if (kt == NULL) { 2199 kt = pfr_create_ktable(&tbl, time_second, 1); 2200 if (kt == NULL) 2201 return (NULL); 2202 if (ac != NULL) { 2203 bzero(tbl.pfrt_anchor, sizeof(tbl.pfrt_anchor)); 2204 rt = pfr_lookup_table(&tbl); 2205 if (rt == NULL) { 2206 rt = pfr_create_ktable(&tbl, 0, 1); 2207 if (rt == NULL) { 2208 pfr_destroy_ktable(kt, 0); 2209 return (NULL); 2210 } 2211 pfr_insert_ktable(rt); 2212 } 2213 kt->pfrkt_root = rt; 2214 } 2215 pfr_insert_ktable(kt); 2216 } 2217 if (!kt->pfrkt_refcnt[PFR_REFCNT_RULE]++) 2218 pfr_setflags_ktable(kt, kt->pfrkt_flags|PFR_TFLAG_REFERENCED); 2219 return (kt); 2220 } 2221 2222 void 2223 pfr_detach_table(struct pfr_ktable *kt) 2224 { 2225 2226 PF_RULES_WASSERT(); 2227 KASSERT(kt->pfrkt_refcnt[PFR_REFCNT_RULE] > 0, ("%s: refcount %d\n", 2228 __func__, kt->pfrkt_refcnt[PFR_REFCNT_RULE])); 2229 2230 if (!--kt->pfrkt_refcnt[PFR_REFCNT_RULE]) 2231 pfr_setflags_ktable(kt, kt->pfrkt_flags&~PFR_TFLAG_REFERENCED); 2232 } 2233 2234 int 2235 pfr_pool_get(struct pfr_ktable *kt, int *pidx, struct pf_addr *counter, 2236 sa_family_t af) 2237 { 2238 struct pf_addr *addr, *cur, *mask; 2239 union sockaddr_union uaddr, umask; 2240 struct pfr_kentry *ke, *ke2 = NULL; 2241 int idx = -1, use_counter = 0; 2242 2243 MPASS(pidx != NULL); 2244 MPASS(counter != NULL); 2245 2246 switch (af) { 2247 case AF_INET: 2248 uaddr.sin.sin_len = sizeof(struct sockaddr_in); 2249 uaddr.sin.sin_family = AF_INET; 2250 break; 2251 case AF_INET6: 2252 uaddr.sin6.sin6_len = sizeof(struct sockaddr_in6); 2253 uaddr.sin6.sin6_family = AF_INET6; 2254 break; 2255 } 2256 addr = SUNION2PF(&uaddr, af); 2257 2258 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL) 2259 kt = kt->pfrkt_root; 2260 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 2261 return (-1); 2262 2263 idx = *pidx; 2264 if (idx >= 0) 2265 use_counter = 1; 2266 if (idx < 0) 2267 idx = 0; 2268 2269 _next_block: 2270 ke = pfr_kentry_byidx(kt, idx, af); 2271 if (ke == NULL) { 2272 pfr_kstate_counter_add(&kt->pfrkt_nomatch, 1); 2273 return (1); 2274 } 2275 pfr_prepare_network(&umask, af, ke->pfrke_net); 2276 cur = SUNION2PF(&ke->pfrke_sa, af); 2277 mask = SUNION2PF(&umask, af); 2278 2279 if (use_counter) { 2280 /* is supplied address within block? */ 2281 if (!PF_MATCHA(0, cur, mask, counter, af)) { 2282 /* no, go to next block in table */ 2283 idx++; 2284 use_counter = 0; 2285 goto _next_block; 2286 } 2287 PF_ACPY(addr, counter, af); 2288 } else { 2289 /* use first address of block */ 2290 PF_ACPY(addr, cur, af); 2291 } 2292 2293 if (!KENTRY_NETWORK(ke)) { 2294 /* this is a single IP address - no possible nested block */ 2295 PF_ACPY(counter, addr, af); 2296 *pidx = idx; 2297 pfr_kstate_counter_add(&kt->pfrkt_match, 1); 2298 return (0); 2299 } 2300 for (;;) { 2301 /* we don't want to use a nested block */ 2302 switch (af) { 2303 case AF_INET: 2304 ke2 = (struct pfr_kentry *)rn_match(&uaddr, 2305 &kt->pfrkt_ip4->rh); 2306 break; 2307 case AF_INET6: 2308 ke2 = (struct pfr_kentry *)rn_match(&uaddr, 2309 &kt->pfrkt_ip6->rh); 2310 break; 2311 } 2312 /* no need to check KENTRY_RNF_ROOT() here */ 2313 if (ke2 == ke) { 2314 /* lookup return the same block - perfect */ 2315 PF_ACPY(counter, addr, af); 2316 *pidx = idx; 2317 pfr_kstate_counter_add(&kt->pfrkt_match, 1); 2318 return (0); 2319 } 2320 2321 /* we need to increase the counter past the nested block */ 2322 pfr_prepare_network(&umask, AF_INET, ke2->pfrke_net); 2323 PF_POOLMASK(addr, addr, SUNION2PF(&umask, af), &pfr_ffaddr, af); 2324 PF_AINC(addr, af); 2325 if (!PF_MATCHA(0, cur, mask, addr, af)) { 2326 /* ok, we reached the end of our main block */ 2327 /* go to next block in table */ 2328 idx++; 2329 use_counter = 0; 2330 goto _next_block; 2331 } 2332 } 2333 } 2334 2335 static struct pfr_kentry * 2336 pfr_kentry_byidx(struct pfr_ktable *kt, int idx, int af) 2337 { 2338 struct pfr_walktree w; 2339 2340 bzero(&w, sizeof(w)); 2341 w.pfrw_op = PFRW_POOL_GET; 2342 w.pfrw_free = idx; 2343 2344 switch (af) { 2345 #ifdef INET 2346 case AF_INET: 2347 kt->pfrkt_ip4->rnh_walktree(&kt->pfrkt_ip4->rh, pfr_walktree, &w); 2348 return (w.pfrw_kentry); 2349 #endif /* INET */ 2350 #ifdef INET6 2351 case AF_INET6: 2352 kt->pfrkt_ip6->rnh_walktree(&kt->pfrkt_ip6->rh, pfr_walktree, &w); 2353 return (w.pfrw_kentry); 2354 #endif /* INET6 */ 2355 default: 2356 return (NULL); 2357 } 2358 } 2359 2360 void 2361 pfr_dynaddr_update(struct pfr_ktable *kt, struct pfi_dynaddr *dyn) 2362 { 2363 struct pfr_walktree w; 2364 2365 bzero(&w, sizeof(w)); 2366 w.pfrw_op = PFRW_DYNADDR_UPDATE; 2367 w.pfrw_dyn = dyn; 2368 2369 dyn->pfid_acnt4 = 0; 2370 dyn->pfid_acnt6 = 0; 2371 if (!dyn->pfid_af || dyn->pfid_af == AF_INET) 2372 kt->pfrkt_ip4->rnh_walktree(&kt->pfrkt_ip4->rh, pfr_walktree, &w); 2373 if (!dyn->pfid_af || dyn->pfid_af == AF_INET6) 2374 kt->pfrkt_ip6->rnh_walktree(&kt->pfrkt_ip6->rh, pfr_walktree, &w); 2375 } 2376