1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2002 Cedric Berger 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * - Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * - Redistributions in binary form must reproduce the above 14 * copyright notice, this list of conditions and the following 15 * disclaimer in the documentation and/or other materials provided 16 * with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 21 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 22 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 24 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 25 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 26 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN 28 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 * 31 * $OpenBSD: pf_table.c,v 1.79 2008/10/08 06:24:50 mcbride Exp $ 32 */ 33 34 #include <sys/cdefs.h> 35 #include "opt_inet.h" 36 #include "opt_inet6.h" 37 38 #include <sys/param.h> 39 #include <sys/kernel.h> 40 #include <sys/lock.h> 41 #include <sys/malloc.h> 42 #include <sys/mbuf.h> 43 #include <sys/mutex.h> 44 #include <sys/refcount.h> 45 #include <sys/socket.h> 46 #include <vm/uma.h> 47 48 #include <net/if.h> 49 #include <net/vnet.h> 50 #include <net/pfvar.h> 51 52 #define DPFPRINTF(n, x) if (V_pf_status.debug >= (n)) printf x 53 54 #define ACCEPT_FLAGS(flags, oklist) \ 55 do { \ 56 if ((flags & ~(oklist)) & \ 57 PFR_FLAG_ALLMASK) \ 58 return (EINVAL); \ 59 } while (0) 60 61 #define FILLIN_SIN(sin, addr) \ 62 do { \ 63 (sin).sin_len = sizeof(sin); \ 64 (sin).sin_family = AF_INET; \ 65 (sin).sin_addr = (addr); \ 66 } while (0) 67 68 #define FILLIN_SIN6(sin6, addr) \ 69 do { \ 70 (sin6).sin6_len = sizeof(sin6); \ 71 (sin6).sin6_family = AF_INET6; \ 72 (sin6).sin6_addr = (addr); \ 73 } while (0) 74 75 #define SWAP(type, a1, a2) \ 76 do { \ 77 type tmp = a1; \ 78 a1 = a2; \ 79 a2 = tmp; \ 80 } while (0) 81 82 #define AF_BITS(af) (((af)==AF_INET)?32:128) 83 #define ADDR_NETWORK(ad) ((ad)->pfra_net < AF_BITS((ad)->pfra_af)) 84 #define KENTRY_NETWORK(ke) ((ke)->pfrke_net < AF_BITS((ke)->pfrke_af)) 85 #define KENTRY_RNF_ROOT(ke) \ 86 ((((struct radix_node *)(ke))->rn_flags & RNF_ROOT) != 0) 87 88 #define NO_ADDRESSES (-1) 89 #define ENQUEUE_UNMARKED_ONLY (1) 90 #define INVERT_NEG_FLAG (1) 91 92 struct pfr_walktree { 93 enum pfrw_op { 94 PFRW_MARK, 95 PFRW_SWEEP, 96 PFRW_ENQUEUE, 97 PFRW_GET_ADDRS, 98 PFRW_GET_ASTATS, 99 PFRW_POOL_GET, 100 PFRW_DYNADDR_UPDATE, 101 PFRW_COUNTERS 102 } pfrw_op; 103 union { 104 struct pfr_addr *pfrw_addr; 105 struct pfr_astats *pfrw_astats; 106 struct pfr_kentryworkq *pfrw_workq; 107 struct pfr_kentry *pfrw_kentry; 108 struct pfi_dynaddr *pfrw_dyn; 109 }; 110 int pfrw_free; 111 int pfrw_flags; 112 }; 113 114 #define senderr(e) do { rv = (e); goto _bad; } while (0) 115 116 static MALLOC_DEFINE(M_PFTABLE, "pf_table", "pf(4) tables structures"); 117 VNET_DEFINE_STATIC(uma_zone_t, pfr_kentry_z); 118 #define V_pfr_kentry_z VNET(pfr_kentry_z) 119 VNET_DEFINE_STATIC(uma_zone_t, pfr_kentry_counter_z); 120 #define V_pfr_kentry_counter_z VNET(pfr_kentry_counter_z) 121 122 static struct pf_addr pfr_ffaddr = { 123 .addr32 = { 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff } 124 }; 125 126 static void pfr_copyout_astats(struct pfr_astats *, 127 const struct pfr_kentry *, 128 const struct pfr_walktree *); 129 static void pfr_copyout_addr(struct pfr_addr *, 130 const struct pfr_kentry *ke); 131 static int pfr_validate_addr(struct pfr_addr *); 132 static void pfr_enqueue_addrs(struct pfr_ktable *, 133 struct pfr_kentryworkq *, int *, int); 134 static void pfr_mark_addrs(struct pfr_ktable *); 135 static struct pfr_kentry 136 *pfr_lookup_addr(struct pfr_ktable *, 137 struct pfr_addr *, int); 138 static struct pfr_kentry *pfr_create_kentry(struct pfr_addr *, bool); 139 static void pfr_destroy_kentries(struct pfr_kentryworkq *); 140 static void pfr_destroy_kentry(struct pfr_kentry *); 141 static void pfr_insert_kentries(struct pfr_ktable *, 142 struct pfr_kentryworkq *, time_t); 143 static void pfr_remove_kentries(struct pfr_ktable *, 144 struct pfr_kentryworkq *); 145 static void pfr_clstats_kentries(struct pfr_ktable *, 146 struct pfr_kentryworkq *, time_t, int); 147 static void pfr_reset_feedback(struct pfr_addr *, int); 148 static void pfr_prepare_network(union sockaddr_union *, int, int); 149 static int pfr_route_kentry(struct pfr_ktable *, 150 struct pfr_kentry *); 151 static int pfr_unroute_kentry(struct pfr_ktable *, 152 struct pfr_kentry *); 153 static int pfr_walktree(struct radix_node *, void *); 154 static int pfr_validate_table(struct pfr_table *, int, int); 155 static int pfr_fix_anchor(char *); 156 static void pfr_commit_ktable(struct pfr_ktable *, time_t); 157 static void pfr_insert_ktables(struct pfr_ktableworkq *); 158 static void pfr_insert_ktable(struct pfr_ktable *); 159 static void pfr_setflags_ktables(struct pfr_ktableworkq *); 160 static void pfr_setflags_ktable(struct pfr_ktable *, int); 161 static void pfr_clstats_ktables(struct pfr_ktableworkq *, time_t, 162 int); 163 static void pfr_clstats_ktable(struct pfr_ktable *, time_t, int); 164 static struct pfr_ktable 165 *pfr_create_ktable(struct pfr_table *, time_t, int); 166 static void pfr_destroy_ktables(struct pfr_ktableworkq *, int); 167 static void pfr_destroy_ktable(struct pfr_ktable *, int); 168 static int pfr_ktable_compare(struct pfr_ktable *, 169 struct pfr_ktable *); 170 static struct pfr_ktable 171 *pfr_lookup_table(struct pfr_table *); 172 static void pfr_clean_node_mask(struct pfr_ktable *, 173 struct pfr_kentryworkq *); 174 static int pfr_skip_table(struct pfr_table *, 175 struct pfr_ktable *, int); 176 static struct pfr_kentry 177 *pfr_kentry_byidx(struct pfr_ktable *, int, int); 178 179 static RB_PROTOTYPE(pfr_ktablehead, pfr_ktable, pfrkt_tree, pfr_ktable_compare); 180 static RB_GENERATE(pfr_ktablehead, pfr_ktable, pfrkt_tree, pfr_ktable_compare); 181 182 VNET_DEFINE_STATIC(struct pfr_ktablehead, pfr_ktables); 183 #define V_pfr_ktables VNET(pfr_ktables) 184 185 VNET_DEFINE_STATIC(struct pfr_table, pfr_nulltable); 186 #define V_pfr_nulltable VNET(pfr_nulltable) 187 188 VNET_DEFINE_STATIC(int, pfr_ktable_cnt); 189 #define V_pfr_ktable_cnt VNET(pfr_ktable_cnt) 190 191 void 192 pfr_initialize(void) 193 { 194 195 V_pfr_kentry_counter_z = uma_zcreate("pf table entry counters", 196 PFR_NUM_COUNTERS * sizeof(uint64_t), NULL, NULL, NULL, NULL, 197 UMA_ALIGN_PTR, UMA_ZONE_PCPU); 198 V_pfr_kentry_z = uma_zcreate("pf table entries", 199 sizeof(struct pfr_kentry), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 200 0); 201 uma_zone_set_max(V_pfr_kentry_z, PFR_KENTRY_HIWAT); 202 V_pf_limits[PF_LIMIT_TABLE_ENTRIES].zone = V_pfr_kentry_z; 203 V_pf_limits[PF_LIMIT_TABLE_ENTRIES].limit = PFR_KENTRY_HIWAT; 204 } 205 206 void 207 pfr_cleanup(void) 208 { 209 210 uma_zdestroy(V_pfr_kentry_z); 211 uma_zdestroy(V_pfr_kentry_counter_z); 212 } 213 214 int 215 pfr_clr_addrs(struct pfr_table *tbl, int *ndel, int flags) 216 { 217 struct pfr_ktable *kt; 218 struct pfr_kentryworkq workq; 219 220 PF_RULES_WASSERT(); 221 222 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY); 223 if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL)) 224 return (EINVAL); 225 kt = pfr_lookup_table(tbl); 226 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 227 return (ESRCH); 228 if (kt->pfrkt_flags & PFR_TFLAG_CONST) 229 return (EPERM); 230 pfr_enqueue_addrs(kt, &workq, ndel, 0); 231 232 if (!(flags & PFR_FLAG_DUMMY)) { 233 pfr_remove_kentries(kt, &workq); 234 KASSERT(kt->pfrkt_cnt == 0, ("%s: non-null pfrkt_cnt", __func__)); 235 } 236 return (0); 237 } 238 239 int 240 pfr_add_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size, 241 int *nadd, int flags) 242 { 243 struct pfr_ktable *kt, *tmpkt; 244 struct pfr_kentryworkq workq; 245 struct pfr_kentry *p, *q; 246 struct pfr_addr *ad; 247 int i, rv, xadd = 0; 248 time_t tzero = time_second; 249 250 PF_RULES_WASSERT(); 251 252 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_FEEDBACK); 253 if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL)) 254 return (EINVAL); 255 kt = pfr_lookup_table(tbl); 256 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 257 return (ESRCH); 258 if (kt->pfrkt_flags & PFR_TFLAG_CONST) 259 return (EPERM); 260 tmpkt = pfr_create_ktable(&V_pfr_nulltable, 0, 0); 261 if (tmpkt == NULL) 262 return (ENOMEM); 263 SLIST_INIT(&workq); 264 for (i = 0, ad = addr; i < size; i++, ad++) { 265 if (pfr_validate_addr(ad)) 266 senderr(EINVAL); 267 p = pfr_lookup_addr(kt, ad, 1); 268 q = pfr_lookup_addr(tmpkt, ad, 1); 269 if (flags & PFR_FLAG_FEEDBACK) { 270 if (q != NULL) 271 ad->pfra_fback = PFR_FB_DUPLICATE; 272 else if (p == NULL) 273 ad->pfra_fback = PFR_FB_ADDED; 274 else if (p->pfrke_not != ad->pfra_not) 275 ad->pfra_fback = PFR_FB_CONFLICT; 276 else 277 ad->pfra_fback = PFR_FB_NONE; 278 } 279 if (p == NULL && q == NULL) { 280 p = pfr_create_kentry(ad, 281 (kt->pfrkt_flags & PFR_TFLAG_COUNTERS) != 0); 282 if (p == NULL) 283 senderr(ENOMEM); 284 if (pfr_route_kentry(tmpkt, p)) { 285 pfr_destroy_kentry(p); 286 ad->pfra_fback = PFR_FB_NONE; 287 } else { 288 SLIST_INSERT_HEAD(&workq, p, pfrke_workq); 289 xadd++; 290 } 291 } 292 } 293 pfr_clean_node_mask(tmpkt, &workq); 294 if (!(flags & PFR_FLAG_DUMMY)) 295 pfr_insert_kentries(kt, &workq, tzero); 296 else 297 pfr_destroy_kentries(&workq); 298 if (nadd != NULL) 299 *nadd = xadd; 300 pfr_destroy_ktable(tmpkt, 0); 301 return (0); 302 _bad: 303 pfr_clean_node_mask(tmpkt, &workq); 304 pfr_destroy_kentries(&workq); 305 if (flags & PFR_FLAG_FEEDBACK) 306 pfr_reset_feedback(addr, size); 307 pfr_destroy_ktable(tmpkt, 0); 308 return (rv); 309 } 310 311 int 312 pfr_del_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size, 313 int *ndel, int flags) 314 { 315 struct pfr_ktable *kt; 316 struct pfr_kentryworkq workq; 317 struct pfr_kentry *p; 318 struct pfr_addr *ad; 319 int i, rv, xdel = 0, log = 1; 320 321 PF_RULES_WASSERT(); 322 323 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_FEEDBACK); 324 if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL)) 325 return (EINVAL); 326 kt = pfr_lookup_table(tbl); 327 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 328 return (ESRCH); 329 if (kt->pfrkt_flags & PFR_TFLAG_CONST) 330 return (EPERM); 331 /* 332 * there are two algorithms to choose from here. 333 * with: 334 * n: number of addresses to delete 335 * N: number of addresses in the table 336 * 337 * one is O(N) and is better for large 'n' 338 * one is O(n*LOG(N)) and is better for small 'n' 339 * 340 * following code try to decide which one is best. 341 */ 342 for (i = kt->pfrkt_cnt; i > 0; i >>= 1) 343 log++; 344 if (size > kt->pfrkt_cnt/log) { 345 /* full table scan */ 346 pfr_mark_addrs(kt); 347 } else { 348 /* iterate over addresses to delete */ 349 for (i = 0, ad = addr; i < size; i++, ad++) { 350 if (pfr_validate_addr(ad)) 351 return (EINVAL); 352 p = pfr_lookup_addr(kt, ad, 1); 353 if (p != NULL) 354 p->pfrke_mark = 0; 355 } 356 } 357 SLIST_INIT(&workq); 358 for (i = 0, ad = addr; i < size; i++, ad++) { 359 if (pfr_validate_addr(ad)) 360 senderr(EINVAL); 361 p = pfr_lookup_addr(kt, ad, 1); 362 if (flags & PFR_FLAG_FEEDBACK) { 363 if (p == NULL) 364 ad->pfra_fback = PFR_FB_NONE; 365 else if (p->pfrke_not != ad->pfra_not) 366 ad->pfra_fback = PFR_FB_CONFLICT; 367 else if (p->pfrke_mark) 368 ad->pfra_fback = PFR_FB_DUPLICATE; 369 else 370 ad->pfra_fback = PFR_FB_DELETED; 371 } 372 if (p != NULL && p->pfrke_not == ad->pfra_not && 373 !p->pfrke_mark) { 374 p->pfrke_mark = 1; 375 SLIST_INSERT_HEAD(&workq, p, pfrke_workq); 376 xdel++; 377 } 378 } 379 if (!(flags & PFR_FLAG_DUMMY)) 380 pfr_remove_kentries(kt, &workq); 381 if (ndel != NULL) 382 *ndel = xdel; 383 return (0); 384 _bad: 385 if (flags & PFR_FLAG_FEEDBACK) 386 pfr_reset_feedback(addr, size); 387 return (rv); 388 } 389 390 int 391 pfr_set_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size, 392 int *size2, int *nadd, int *ndel, int *nchange, int flags, 393 u_int32_t ignore_pfrt_flags) 394 { 395 struct pfr_ktable *kt, *tmpkt; 396 struct pfr_kentryworkq addq, delq, changeq; 397 struct pfr_kentry *p, *q; 398 struct pfr_addr ad; 399 int i, rv, xadd = 0, xdel = 0, xchange = 0; 400 time_t tzero = time_second; 401 402 PF_RULES_WASSERT(); 403 404 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_FEEDBACK); 405 if (pfr_validate_table(tbl, ignore_pfrt_flags, flags & 406 PFR_FLAG_USERIOCTL)) 407 return (EINVAL); 408 kt = pfr_lookup_table(tbl); 409 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 410 return (ESRCH); 411 if (kt->pfrkt_flags & PFR_TFLAG_CONST) 412 return (EPERM); 413 tmpkt = pfr_create_ktable(&V_pfr_nulltable, 0, 0); 414 if (tmpkt == NULL) 415 return (ENOMEM); 416 pfr_mark_addrs(kt); 417 SLIST_INIT(&addq); 418 SLIST_INIT(&delq); 419 SLIST_INIT(&changeq); 420 for (i = 0; i < size; i++) { 421 /* 422 * XXXGL: undertand pf_if usage of this function 423 * and make ad a moving pointer 424 */ 425 bcopy(addr + i, &ad, sizeof(ad)); 426 if (pfr_validate_addr(&ad)) 427 senderr(EINVAL); 428 ad.pfra_fback = PFR_FB_NONE; 429 p = pfr_lookup_addr(kt, &ad, 1); 430 if (p != NULL) { 431 if (p->pfrke_mark) { 432 ad.pfra_fback = PFR_FB_DUPLICATE; 433 goto _skip; 434 } 435 p->pfrke_mark = 1; 436 if (p->pfrke_not != ad.pfra_not) { 437 SLIST_INSERT_HEAD(&changeq, p, pfrke_workq); 438 ad.pfra_fback = PFR_FB_CHANGED; 439 xchange++; 440 } 441 } else { 442 q = pfr_lookup_addr(tmpkt, &ad, 1); 443 if (q != NULL) { 444 ad.pfra_fback = PFR_FB_DUPLICATE; 445 goto _skip; 446 } 447 p = pfr_create_kentry(&ad, 448 (kt->pfrkt_flags & PFR_TFLAG_COUNTERS) != 0); 449 if (p == NULL) 450 senderr(ENOMEM); 451 if (pfr_route_kentry(tmpkt, p)) { 452 pfr_destroy_kentry(p); 453 ad.pfra_fback = PFR_FB_NONE; 454 } else { 455 SLIST_INSERT_HEAD(&addq, p, pfrke_workq); 456 ad.pfra_fback = PFR_FB_ADDED; 457 xadd++; 458 } 459 } 460 _skip: 461 if (flags & PFR_FLAG_FEEDBACK) 462 bcopy(&ad, addr + i, sizeof(ad)); 463 } 464 pfr_enqueue_addrs(kt, &delq, &xdel, ENQUEUE_UNMARKED_ONLY); 465 if ((flags & PFR_FLAG_FEEDBACK) && *size2) { 466 if (*size2 < size+xdel) { 467 *size2 = size+xdel; 468 senderr(0); 469 } 470 i = 0; 471 SLIST_FOREACH(p, &delq, pfrke_workq) { 472 pfr_copyout_addr(&ad, p); 473 ad.pfra_fback = PFR_FB_DELETED; 474 bcopy(&ad, addr + size + i, sizeof(ad)); 475 i++; 476 } 477 } 478 pfr_clean_node_mask(tmpkt, &addq); 479 if (!(flags & PFR_FLAG_DUMMY)) { 480 pfr_insert_kentries(kt, &addq, tzero); 481 pfr_remove_kentries(kt, &delq); 482 pfr_clstats_kentries(kt, &changeq, tzero, INVERT_NEG_FLAG); 483 } else 484 pfr_destroy_kentries(&addq); 485 if (nadd != NULL) 486 *nadd = xadd; 487 if (ndel != NULL) 488 *ndel = xdel; 489 if (nchange != NULL) 490 *nchange = xchange; 491 if ((flags & PFR_FLAG_FEEDBACK) && size2) 492 *size2 = size+xdel; 493 pfr_destroy_ktable(tmpkt, 0); 494 return (0); 495 _bad: 496 pfr_clean_node_mask(tmpkt, &addq); 497 pfr_destroy_kentries(&addq); 498 if (flags & PFR_FLAG_FEEDBACK) 499 pfr_reset_feedback(addr, size); 500 pfr_destroy_ktable(tmpkt, 0); 501 return (rv); 502 } 503 504 int 505 pfr_tst_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size, 506 int *nmatch, int flags) 507 { 508 struct pfr_ktable *kt; 509 struct pfr_kentry *p; 510 struct pfr_addr *ad; 511 int i, xmatch = 0; 512 513 PF_RULES_RASSERT(); 514 515 ACCEPT_FLAGS(flags, PFR_FLAG_REPLACE); 516 if (pfr_validate_table(tbl, 0, 0)) 517 return (EINVAL); 518 kt = pfr_lookup_table(tbl); 519 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 520 return (ESRCH); 521 522 for (i = 0, ad = addr; i < size; i++, ad++) { 523 if (pfr_validate_addr(ad)) 524 return (EINVAL); 525 if (ADDR_NETWORK(ad)) 526 return (EINVAL); 527 p = pfr_lookup_addr(kt, ad, 0); 528 if (flags & PFR_FLAG_REPLACE) 529 pfr_copyout_addr(ad, p); 530 ad->pfra_fback = (p == NULL) ? PFR_FB_NONE : 531 (p->pfrke_not ? PFR_FB_NOTMATCH : PFR_FB_MATCH); 532 if (p != NULL && !p->pfrke_not) 533 xmatch++; 534 } 535 if (nmatch != NULL) 536 *nmatch = xmatch; 537 return (0); 538 } 539 540 int 541 pfr_get_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int *size, 542 int flags) 543 { 544 struct pfr_ktable *kt; 545 struct pfr_walktree w; 546 int rv; 547 548 PF_RULES_RASSERT(); 549 550 ACCEPT_FLAGS(flags, 0); 551 if (pfr_validate_table(tbl, 0, 0)) 552 return (EINVAL); 553 kt = pfr_lookup_table(tbl); 554 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 555 return (ESRCH); 556 if (kt->pfrkt_cnt > *size) { 557 *size = kt->pfrkt_cnt; 558 return (0); 559 } 560 561 bzero(&w, sizeof(w)); 562 w.pfrw_op = PFRW_GET_ADDRS; 563 w.pfrw_addr = addr; 564 w.pfrw_free = kt->pfrkt_cnt; 565 rv = kt->pfrkt_ip4->rnh_walktree(&kt->pfrkt_ip4->rh, pfr_walktree, &w); 566 if (!rv) 567 rv = kt->pfrkt_ip6->rnh_walktree(&kt->pfrkt_ip6->rh, 568 pfr_walktree, &w); 569 if (rv) 570 return (rv); 571 572 KASSERT(w.pfrw_free == 0, ("%s: corruption detected (%d)", __func__, 573 w.pfrw_free)); 574 575 *size = kt->pfrkt_cnt; 576 return (0); 577 } 578 579 int 580 pfr_get_astats(struct pfr_table *tbl, struct pfr_astats *addr, int *size, 581 int flags) 582 { 583 struct pfr_ktable *kt; 584 struct pfr_walktree w; 585 struct pfr_kentryworkq workq; 586 int rv; 587 time_t tzero = time_second; 588 589 PF_RULES_RASSERT(); 590 591 /* XXX PFR_FLAG_CLSTATS disabled */ 592 ACCEPT_FLAGS(flags, 0); 593 if (pfr_validate_table(tbl, 0, 0)) 594 return (EINVAL); 595 kt = pfr_lookup_table(tbl); 596 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 597 return (ESRCH); 598 if (kt->pfrkt_cnt > *size) { 599 *size = kt->pfrkt_cnt; 600 return (0); 601 } 602 603 bzero(&w, sizeof(w)); 604 w.pfrw_op = PFRW_GET_ASTATS; 605 w.pfrw_astats = addr; 606 w.pfrw_free = kt->pfrkt_cnt; 607 /* 608 * Flags below are for backward compatibility. It was possible to have 609 * a table without per-entry counters. Now they are always allocated, 610 * we just discard data when reading it if table is not configured to 611 * have counters. 612 */ 613 w.pfrw_flags = kt->pfrkt_flags; 614 rv = kt->pfrkt_ip4->rnh_walktree(&kt->pfrkt_ip4->rh, pfr_walktree, &w); 615 if (!rv) 616 rv = kt->pfrkt_ip6->rnh_walktree(&kt->pfrkt_ip6->rh, 617 pfr_walktree, &w); 618 if (!rv && (flags & PFR_FLAG_CLSTATS)) { 619 pfr_enqueue_addrs(kt, &workq, NULL, 0); 620 pfr_clstats_kentries(kt, &workq, tzero, 0); 621 } 622 if (rv) 623 return (rv); 624 625 if (w.pfrw_free) { 626 printf("pfr_get_astats: corruption detected (%d).\n", 627 w.pfrw_free); 628 return (ENOTTY); 629 } 630 *size = kt->pfrkt_cnt; 631 return (0); 632 } 633 634 int 635 pfr_clr_astats(struct pfr_table *tbl, struct pfr_addr *addr, int size, 636 int *nzero, int flags) 637 { 638 struct pfr_ktable *kt; 639 struct pfr_kentryworkq workq; 640 struct pfr_kentry *p; 641 struct pfr_addr *ad; 642 int i, rv, xzero = 0; 643 644 PF_RULES_WASSERT(); 645 646 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_FEEDBACK); 647 if (pfr_validate_table(tbl, 0, 0)) 648 return (EINVAL); 649 kt = pfr_lookup_table(tbl); 650 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 651 return (ESRCH); 652 SLIST_INIT(&workq); 653 for (i = 0, ad = addr; i < size; i++, ad++) { 654 if (pfr_validate_addr(ad)) 655 senderr(EINVAL); 656 p = pfr_lookup_addr(kt, ad, 1); 657 if (flags & PFR_FLAG_FEEDBACK) { 658 ad->pfra_fback = (p != NULL) ? 659 PFR_FB_CLEARED : PFR_FB_NONE; 660 } 661 if (p != NULL) { 662 SLIST_INSERT_HEAD(&workq, p, pfrke_workq); 663 xzero++; 664 } 665 } 666 667 if (!(flags & PFR_FLAG_DUMMY)) 668 pfr_clstats_kentries(kt, &workq, time_second, 0); 669 if (nzero != NULL) 670 *nzero = xzero; 671 return (0); 672 _bad: 673 if (flags & PFR_FLAG_FEEDBACK) 674 pfr_reset_feedback(addr, size); 675 return (rv); 676 } 677 678 static int 679 pfr_validate_addr(struct pfr_addr *ad) 680 { 681 int i; 682 683 switch (ad->pfra_af) { 684 #ifdef INET 685 case AF_INET: 686 if (ad->pfra_net > 32) 687 return (-1); 688 break; 689 #endif /* INET */ 690 #ifdef INET6 691 case AF_INET6: 692 if (ad->pfra_net > 128) 693 return (-1); 694 break; 695 #endif /* INET6 */ 696 default: 697 return (-1); 698 } 699 if (ad->pfra_net < 128 && 700 (((caddr_t)ad)[ad->pfra_net/8] & (0xFF >> (ad->pfra_net%8)))) 701 return (-1); 702 for (i = (ad->pfra_net+7)/8; i < sizeof(ad->pfra_u); i++) 703 if (((caddr_t)ad)[i]) 704 return (-1); 705 if (ad->pfra_not && ad->pfra_not != 1) 706 return (-1); 707 if (ad->pfra_fback != PFR_FB_NONE) 708 return (-1); 709 return (0); 710 } 711 712 static void 713 pfr_enqueue_addrs(struct pfr_ktable *kt, struct pfr_kentryworkq *workq, 714 int *naddr, int sweep) 715 { 716 struct pfr_walktree w; 717 718 SLIST_INIT(workq); 719 bzero(&w, sizeof(w)); 720 w.pfrw_op = sweep ? PFRW_SWEEP : PFRW_ENQUEUE; 721 w.pfrw_workq = workq; 722 if (kt->pfrkt_ip4 != NULL) 723 if (kt->pfrkt_ip4->rnh_walktree(&kt->pfrkt_ip4->rh, 724 pfr_walktree, &w)) 725 printf("pfr_enqueue_addrs: IPv4 walktree failed.\n"); 726 if (kt->pfrkt_ip6 != NULL) 727 if (kt->pfrkt_ip6->rnh_walktree(&kt->pfrkt_ip6->rh, 728 pfr_walktree, &w)) 729 printf("pfr_enqueue_addrs: IPv6 walktree failed.\n"); 730 if (naddr != NULL) 731 *naddr = w.pfrw_free; 732 } 733 734 static void 735 pfr_mark_addrs(struct pfr_ktable *kt) 736 { 737 struct pfr_walktree w; 738 739 bzero(&w, sizeof(w)); 740 w.pfrw_op = PFRW_MARK; 741 if (kt->pfrkt_ip4->rnh_walktree(&kt->pfrkt_ip4->rh, pfr_walktree, &w)) 742 printf("pfr_mark_addrs: IPv4 walktree failed.\n"); 743 if (kt->pfrkt_ip6->rnh_walktree(&kt->pfrkt_ip6->rh, pfr_walktree, &w)) 744 printf("pfr_mark_addrs: IPv6 walktree failed.\n"); 745 } 746 747 static struct pfr_kentry * 748 pfr_lookup_addr(struct pfr_ktable *kt, struct pfr_addr *ad, int exact) 749 { 750 union sockaddr_union sa, mask; 751 struct radix_head *head = NULL; 752 struct pfr_kentry *ke; 753 754 PF_RULES_ASSERT(); 755 756 bzero(&sa, sizeof(sa)); 757 switch (ad->pfra_af) { 758 case AF_INET: 759 FILLIN_SIN(sa.sin, ad->pfra_ip4addr); 760 head = &kt->pfrkt_ip4->rh; 761 break; 762 case AF_INET6: 763 FILLIN_SIN6(sa.sin6, ad->pfra_ip6addr); 764 head = &kt->pfrkt_ip6->rh; 765 break; 766 default: 767 unhandled_af(ad->pfra_af); 768 } 769 if (ADDR_NETWORK(ad)) { 770 pfr_prepare_network(&mask, ad->pfra_af, ad->pfra_net); 771 ke = (struct pfr_kentry *)rn_lookup(&sa, &mask, head); 772 if (ke && KENTRY_RNF_ROOT(ke)) 773 ke = NULL; 774 } else { 775 ke = (struct pfr_kentry *)rn_match(&sa, head); 776 if (ke && KENTRY_RNF_ROOT(ke)) 777 ke = NULL; 778 if (exact && ke && KENTRY_NETWORK(ke)) 779 ke = NULL; 780 } 781 return (ke); 782 } 783 784 static struct pfr_kentry * 785 pfr_create_kentry(struct pfr_addr *ad, bool counters) 786 { 787 struct pfr_kentry *ke; 788 counter_u64_t c; 789 790 ke = uma_zalloc(V_pfr_kentry_z, M_NOWAIT | M_ZERO); 791 if (ke == NULL) 792 return (NULL); 793 794 switch (ad->pfra_af) { 795 case AF_INET: 796 FILLIN_SIN(ke->pfrke_sa.sin, ad->pfra_ip4addr); 797 break; 798 case AF_INET6: 799 FILLIN_SIN6(ke->pfrke_sa.sin6, ad->pfra_ip6addr); 800 break; 801 default: 802 unhandled_af(ad->pfra_af); 803 } 804 ke->pfrke_af = ad->pfra_af; 805 ke->pfrke_net = ad->pfra_net; 806 ke->pfrke_not = ad->pfra_not; 807 ke->pfrke_counters.pfrkc_tzero = 0; 808 if (counters) { 809 c = uma_zalloc_pcpu(V_pfr_kentry_counter_z, M_NOWAIT | M_ZERO); 810 if (c == NULL) { 811 pfr_destroy_kentry(ke); 812 return (NULL); 813 } 814 ke->pfrke_counters.pfrkc_counters = c; 815 } 816 return (ke); 817 } 818 819 static void 820 pfr_destroy_kentries(struct pfr_kentryworkq *workq) 821 { 822 struct pfr_kentry *p; 823 824 while ((p = SLIST_FIRST(workq)) != NULL) { 825 SLIST_REMOVE_HEAD(workq, pfrke_workq); 826 pfr_destroy_kentry(p); 827 } 828 } 829 830 static void 831 pfr_destroy_kentry(struct pfr_kentry *ke) 832 { 833 counter_u64_t c; 834 835 if ((c = ke->pfrke_counters.pfrkc_counters) != NULL) 836 uma_zfree_pcpu(V_pfr_kentry_counter_z, c); 837 uma_zfree(V_pfr_kentry_z, ke); 838 } 839 840 static void 841 pfr_insert_kentries(struct pfr_ktable *kt, 842 struct pfr_kentryworkq *workq, time_t tzero) 843 { 844 struct pfr_kentry *p; 845 int rv, n = 0; 846 847 SLIST_FOREACH(p, workq, pfrke_workq) { 848 rv = pfr_route_kentry(kt, p); 849 if (rv) { 850 printf("pfr_insert_kentries: cannot route entry " 851 "(code=%d).\n", rv); 852 break; 853 } 854 p->pfrke_counters.pfrkc_tzero = tzero; 855 n++; 856 } 857 kt->pfrkt_cnt += n; 858 } 859 860 int 861 pfr_insert_kentry(struct pfr_ktable *kt, struct pfr_addr *ad, time_t tzero) 862 { 863 struct pfr_kentry *p; 864 int rv; 865 866 p = pfr_lookup_addr(kt, ad, 1); 867 if (p != NULL) 868 return (0); 869 p = pfr_create_kentry(ad, (kt->pfrkt_flags & PFR_TFLAG_COUNTERS) != 0); 870 if (p == NULL) 871 return (ENOMEM); 872 873 rv = pfr_route_kentry(kt, p); 874 if (rv) 875 return (rv); 876 877 p->pfrke_counters.pfrkc_tzero = tzero; 878 kt->pfrkt_cnt++; 879 880 return (0); 881 } 882 883 static void 884 pfr_remove_kentries(struct pfr_ktable *kt, 885 struct pfr_kentryworkq *workq) 886 { 887 struct pfr_kentry *p; 888 int n = 0; 889 890 SLIST_FOREACH(p, workq, pfrke_workq) { 891 pfr_unroute_kentry(kt, p); 892 n++; 893 } 894 kt->pfrkt_cnt -= n; 895 pfr_destroy_kentries(workq); 896 } 897 898 static void 899 pfr_clean_node_mask(struct pfr_ktable *kt, 900 struct pfr_kentryworkq *workq) 901 { 902 struct pfr_kentry *p; 903 904 SLIST_FOREACH(p, workq, pfrke_workq) 905 pfr_unroute_kentry(kt, p); 906 } 907 908 static void 909 pfr_clstats_kentries(struct pfr_ktable *kt, struct pfr_kentryworkq *workq, 910 time_t tzero, int negchange) 911 { 912 struct pfr_kentry *p; 913 int i; 914 915 SLIST_FOREACH(p, workq, pfrke_workq) { 916 if (negchange) 917 p->pfrke_not = !p->pfrke_not; 918 if ((kt->pfrkt_flags & PFR_TFLAG_COUNTERS) != 0) 919 for (i = 0; i < PFR_NUM_COUNTERS; i++) 920 counter_u64_zero( 921 p->pfrke_counters.pfrkc_counters + i); 922 p->pfrke_counters.pfrkc_tzero = tzero; 923 } 924 } 925 926 static void 927 pfr_reset_feedback(struct pfr_addr *addr, int size) 928 { 929 struct pfr_addr *ad; 930 int i; 931 932 for (i = 0, ad = addr; i < size; i++, ad++) 933 ad->pfra_fback = PFR_FB_NONE; 934 } 935 936 static void 937 pfr_prepare_network(union sockaddr_union *sa, int af, int net) 938 { 939 int i; 940 941 bzero(sa, sizeof(*sa)); 942 switch (af) { 943 case AF_INET: 944 sa->sin.sin_len = sizeof(sa->sin); 945 sa->sin.sin_family = AF_INET; 946 sa->sin.sin_addr.s_addr = net ? htonl(-1 << (32-net)) : 0; 947 break; 948 case AF_INET6: 949 sa->sin6.sin6_len = sizeof(sa->sin6); 950 sa->sin6.sin6_family = AF_INET6; 951 for (i = 0; i < 4; i++) { 952 if (net <= 32) { 953 sa->sin6.sin6_addr.s6_addr32[i] = 954 net ? htonl(-1 << (32-net)) : 0; 955 break; 956 } 957 sa->sin6.sin6_addr.s6_addr32[i] = 0xFFFFFFFF; 958 net -= 32; 959 } 960 break; 961 default: 962 unhandled_af(af); 963 } 964 } 965 966 static int 967 pfr_route_kentry(struct pfr_ktable *kt, struct pfr_kentry *ke) 968 { 969 union sockaddr_union mask; 970 struct radix_node *rn; 971 struct radix_head *head = NULL; 972 973 PF_RULES_WASSERT(); 974 975 bzero(ke->pfrke_node, sizeof(ke->pfrke_node)); 976 switch (ke->pfrke_af) { 977 case AF_INET: 978 head = &kt->pfrkt_ip4->rh; 979 break; 980 case AF_INET6: 981 head = &kt->pfrkt_ip6->rh; 982 break; 983 default: 984 unhandled_af(ke->pfrke_af); 985 } 986 987 if (KENTRY_NETWORK(ke)) { 988 pfr_prepare_network(&mask, ke->pfrke_af, ke->pfrke_net); 989 rn = rn_addroute(&ke->pfrke_sa, &mask, head, ke->pfrke_node); 990 } else 991 rn = rn_addroute(&ke->pfrke_sa, NULL, head, ke->pfrke_node); 992 993 return (rn == NULL ? -1 : 0); 994 } 995 996 static int 997 pfr_unroute_kentry(struct pfr_ktable *kt, struct pfr_kentry *ke) 998 { 999 union sockaddr_union mask; 1000 struct radix_node *rn; 1001 struct radix_head *head = NULL; 1002 1003 switch (ke->pfrke_af) { 1004 case AF_INET: 1005 head = &kt->pfrkt_ip4->rh; 1006 break; 1007 case AF_INET6: 1008 head = &kt->pfrkt_ip6->rh; 1009 break; 1010 default: 1011 unhandled_af(ke->pfrke_af); 1012 } 1013 1014 if (KENTRY_NETWORK(ke)) { 1015 pfr_prepare_network(&mask, ke->pfrke_af, ke->pfrke_net); 1016 rn = rn_delete(&ke->pfrke_sa, &mask, head); 1017 } else 1018 rn = rn_delete(&ke->pfrke_sa, NULL, head); 1019 1020 if (rn == NULL) { 1021 printf("pfr_unroute_kentry: delete failed.\n"); 1022 return (-1); 1023 } 1024 return (0); 1025 } 1026 1027 static void 1028 pfr_copyout_addr(struct pfr_addr *ad, const struct pfr_kentry *ke) 1029 { 1030 bzero(ad, sizeof(*ad)); 1031 if (ke == NULL) 1032 return; 1033 ad->pfra_af = ke->pfrke_af; 1034 ad->pfra_net = ke->pfrke_net; 1035 ad->pfra_not = ke->pfrke_not; 1036 switch (ad->pfra_af) { 1037 case AF_INET: 1038 ad->pfra_ip4addr = ke->pfrke_sa.sin.sin_addr; 1039 break; 1040 case AF_INET6: 1041 ad->pfra_ip6addr = ke->pfrke_sa.sin6.sin6_addr; 1042 break; 1043 default: 1044 unhandled_af(ad->pfra_af); 1045 } 1046 } 1047 1048 static void 1049 pfr_copyout_astats(struct pfr_astats *as, const struct pfr_kentry *ke, 1050 const struct pfr_walktree *w) 1051 { 1052 int dir, op; 1053 const struct pfr_kcounters *kc = &ke->pfrke_counters; 1054 1055 bzero(as, sizeof(*as)); 1056 pfr_copyout_addr(&as->pfras_a, ke); 1057 as->pfras_tzero = kc->pfrkc_tzero; 1058 1059 if (! (w->pfrw_flags & PFR_TFLAG_COUNTERS) || 1060 kc->pfrkc_counters == NULL) { 1061 bzero(as->pfras_packets, sizeof(as->pfras_packets)); 1062 bzero(as->pfras_bytes, sizeof(as->pfras_bytes)); 1063 as->pfras_a.pfra_fback = PFR_FB_NOCOUNT; 1064 return; 1065 } 1066 1067 for (dir = 0; dir < PFR_DIR_MAX; dir++) { 1068 for (op = 0; op < PFR_OP_ADDR_MAX; op ++) { 1069 as->pfras_packets[dir][op] = counter_u64_fetch( 1070 pfr_kentry_counter(kc, dir, op, PFR_TYPE_PACKETS)); 1071 as->pfras_bytes[dir][op] = counter_u64_fetch( 1072 pfr_kentry_counter(kc, dir, op, PFR_TYPE_BYTES)); 1073 } 1074 } 1075 } 1076 1077 static void 1078 pfr_sockaddr_to_pf_addr(const union sockaddr_union *sa, struct pf_addr *a) 1079 { 1080 switch (sa->sa.sa_family) { 1081 case AF_INET: 1082 memcpy(&a->v4, &sa->sin.sin_addr, sizeof(a->v4)); 1083 break; 1084 case AF_INET6: 1085 memcpy(&a->v6, &sa->sin6.sin6_addr, sizeof(a->v6)); 1086 break; 1087 default: 1088 unhandled_af(sa->sa.sa_family); 1089 } 1090 } 1091 1092 static int 1093 pfr_walktree(struct radix_node *rn, void *arg) 1094 { 1095 struct pfr_kentry *ke = (struct pfr_kentry *)rn; 1096 struct pfr_walktree *w = arg; 1097 1098 switch (w->pfrw_op) { 1099 case PFRW_MARK: 1100 ke->pfrke_mark = 0; 1101 break; 1102 case PFRW_SWEEP: 1103 if (ke->pfrke_mark) 1104 break; 1105 /* FALLTHROUGH */ 1106 case PFRW_ENQUEUE: 1107 SLIST_INSERT_HEAD(w->pfrw_workq, ke, pfrke_workq); 1108 w->pfrw_free++; 1109 break; 1110 case PFRW_GET_ADDRS: 1111 if (w->pfrw_free-- > 0) { 1112 pfr_copyout_addr(w->pfrw_addr, ke); 1113 w->pfrw_addr++; 1114 } 1115 break; 1116 case PFRW_GET_ASTATS: 1117 if (w->pfrw_free-- > 0) { 1118 struct pfr_astats as; 1119 1120 pfr_copyout_astats(&as, ke, w); 1121 1122 bcopy(&as, w->pfrw_astats, sizeof(as)); 1123 w->pfrw_astats++; 1124 } 1125 break; 1126 case PFRW_POOL_GET: 1127 if (ke->pfrke_not) 1128 break; /* negative entries are ignored */ 1129 if (!w->pfrw_free--) { 1130 w->pfrw_kentry = ke; 1131 return (1); /* finish search */ 1132 } 1133 break; 1134 case PFRW_DYNADDR_UPDATE: 1135 { 1136 union sockaddr_union pfr_mask; 1137 1138 switch (ke->pfrke_af) { 1139 case AF_INET: 1140 if (w->pfrw_dyn->pfid_acnt4++ > 0) 1141 break; 1142 pfr_prepare_network(&pfr_mask, AF_INET, ke->pfrke_net); 1143 pfr_sockaddr_to_pf_addr(&ke->pfrke_sa, &w->pfrw_dyn->pfid_addr4); 1144 pfr_sockaddr_to_pf_addr(&pfr_mask, &w->pfrw_dyn->pfid_mask4); 1145 break; 1146 case AF_INET6: 1147 if (w->pfrw_dyn->pfid_acnt6++ > 0) 1148 break; 1149 pfr_prepare_network(&pfr_mask, AF_INET6, ke->pfrke_net); 1150 pfr_sockaddr_to_pf_addr(&ke->pfrke_sa, &w->pfrw_dyn->pfid_addr6); 1151 pfr_sockaddr_to_pf_addr(&pfr_mask, &w->pfrw_dyn->pfid_mask6); 1152 break; 1153 default: 1154 unhandled_af(ke->pfrke_af); 1155 } 1156 break; 1157 } 1158 case PFRW_COUNTERS: 1159 { 1160 if (w->pfrw_flags & PFR_TFLAG_COUNTERS) { 1161 if (ke->pfrke_counters.pfrkc_counters != NULL) 1162 break; 1163 ke->pfrke_counters.pfrkc_counters = 1164 uma_zalloc_pcpu(V_pfr_kentry_counter_z, 1165 M_NOWAIT | M_ZERO); 1166 } else { 1167 uma_zfree_pcpu(V_pfr_kentry_counter_z, 1168 ke->pfrke_counters.pfrkc_counters); 1169 ke->pfrke_counters.pfrkc_counters = NULL; 1170 } 1171 break; 1172 } 1173 } 1174 return (0); 1175 } 1176 1177 int 1178 pfr_clr_tables(struct pfr_table *filter, int *ndel, int flags) 1179 { 1180 struct pfr_ktableworkq workq; 1181 struct pfr_ktable *p; 1182 int xdel = 0; 1183 1184 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_ALLRSETS); 1185 if (pfr_fix_anchor(filter->pfrt_anchor)) 1186 return (EINVAL); 1187 if (pfr_table_count(filter, flags) < 0) 1188 return (ENOENT); 1189 1190 SLIST_INIT(&workq); 1191 RB_FOREACH(p, pfr_ktablehead, &V_pfr_ktables) { 1192 if (pfr_skip_table(filter, p, flags)) 1193 continue; 1194 if (!strcmp(p->pfrkt_anchor, PF_RESERVED_ANCHOR)) 1195 continue; 1196 if (!(p->pfrkt_flags & PFR_TFLAG_ACTIVE)) 1197 continue; 1198 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_ACTIVE; 1199 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq); 1200 xdel++; 1201 } 1202 if (!(flags & PFR_FLAG_DUMMY)) 1203 pfr_setflags_ktables(&workq); 1204 if (ndel != NULL) 1205 *ndel = xdel; 1206 return (0); 1207 } 1208 1209 int 1210 pfr_add_tables(struct pfr_table *tbl, int size, int *nadd, int flags) 1211 { 1212 struct pfr_ktableworkq addq, changeq; 1213 struct pfr_ktable *p, *q, *r, key; 1214 int i, rv, xadd = 0; 1215 time_t tzero = time_second; 1216 1217 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY); 1218 SLIST_INIT(&addq); 1219 SLIST_INIT(&changeq); 1220 for (i = 0; i < size; i++) { 1221 bcopy(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t)); 1222 if (pfr_validate_table(&key.pfrkt_t, PFR_TFLAG_USRMASK, 1223 flags & PFR_FLAG_USERIOCTL)) 1224 senderr(EINVAL); 1225 key.pfrkt_flags |= PFR_TFLAG_ACTIVE; 1226 p = RB_FIND(pfr_ktablehead, &V_pfr_ktables, &key); 1227 if (p == NULL) { 1228 p = pfr_create_ktable(&key.pfrkt_t, tzero, 1); 1229 if (p == NULL) 1230 senderr(ENOMEM); 1231 SLIST_FOREACH(q, &addq, pfrkt_workq) { 1232 if (!pfr_ktable_compare(p, q)) { 1233 pfr_destroy_ktable(p, 0); 1234 goto _skip; 1235 } 1236 } 1237 SLIST_INSERT_HEAD(&addq, p, pfrkt_workq); 1238 xadd++; 1239 if (!key.pfrkt_anchor[0]) 1240 goto _skip; 1241 1242 /* find or create root table */ 1243 bzero(key.pfrkt_anchor, sizeof(key.pfrkt_anchor)); 1244 r = RB_FIND(pfr_ktablehead, &V_pfr_ktables, &key); 1245 if (r != NULL) { 1246 p->pfrkt_root = r; 1247 goto _skip; 1248 } 1249 SLIST_FOREACH(q, &addq, pfrkt_workq) { 1250 if (!pfr_ktable_compare(&key, q)) { 1251 p->pfrkt_root = q; 1252 goto _skip; 1253 } 1254 } 1255 key.pfrkt_flags = 0; 1256 r = pfr_create_ktable(&key.pfrkt_t, 0, 1); 1257 if (r == NULL) 1258 senderr(ENOMEM); 1259 SLIST_INSERT_HEAD(&addq, r, pfrkt_workq); 1260 p->pfrkt_root = r; 1261 } else if (!(p->pfrkt_flags & PFR_TFLAG_ACTIVE)) { 1262 SLIST_FOREACH(q, &changeq, pfrkt_workq) 1263 if (!pfr_ktable_compare(&key, q)) 1264 goto _skip; 1265 p->pfrkt_nflags = (p->pfrkt_flags & 1266 ~PFR_TFLAG_USRMASK) | key.pfrkt_flags; 1267 SLIST_INSERT_HEAD(&changeq, p, pfrkt_workq); 1268 xadd++; 1269 } 1270 _skip: 1271 ; 1272 } 1273 if (!(flags & PFR_FLAG_DUMMY)) { 1274 pfr_insert_ktables(&addq); 1275 pfr_setflags_ktables(&changeq); 1276 } else 1277 pfr_destroy_ktables(&addq, 0); 1278 if (nadd != NULL) 1279 *nadd = xadd; 1280 return (0); 1281 _bad: 1282 pfr_destroy_ktables(&addq, 0); 1283 return (rv); 1284 } 1285 1286 int 1287 pfr_del_tables(struct pfr_table *tbl, int size, int *ndel, int flags) 1288 { 1289 struct pfr_ktableworkq workq; 1290 struct pfr_ktable *p, *q, key; 1291 int i, xdel = 0; 1292 1293 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY); 1294 SLIST_INIT(&workq); 1295 for (i = 0; i < size; i++) { 1296 bcopy(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t)); 1297 if (pfr_validate_table(&key.pfrkt_t, 0, 1298 flags & PFR_FLAG_USERIOCTL)) 1299 return (EINVAL); 1300 p = RB_FIND(pfr_ktablehead, &V_pfr_ktables, &key); 1301 if (p != NULL && (p->pfrkt_flags & PFR_TFLAG_ACTIVE)) { 1302 SLIST_FOREACH(q, &workq, pfrkt_workq) 1303 if (!pfr_ktable_compare(p, q)) 1304 goto _skip; 1305 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_ACTIVE; 1306 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq); 1307 xdel++; 1308 } 1309 _skip: 1310 ; 1311 } 1312 1313 if (!(flags & PFR_FLAG_DUMMY)) 1314 pfr_setflags_ktables(&workq); 1315 if (ndel != NULL) 1316 *ndel = xdel; 1317 return (0); 1318 } 1319 1320 int 1321 pfr_get_tables(struct pfr_table *filter, struct pfr_table *tbl, int *size, 1322 int flags) 1323 { 1324 struct pfr_ktable *p; 1325 int n, nn; 1326 1327 PF_RULES_RASSERT(); 1328 1329 ACCEPT_FLAGS(flags, PFR_FLAG_ALLRSETS); 1330 if (pfr_fix_anchor(filter->pfrt_anchor)) 1331 return (EINVAL); 1332 n = nn = pfr_table_count(filter, flags); 1333 if (n < 0) 1334 return (ENOENT); 1335 if (n > *size) { 1336 *size = n; 1337 return (0); 1338 } 1339 RB_FOREACH(p, pfr_ktablehead, &V_pfr_ktables) { 1340 if (pfr_skip_table(filter, p, flags)) 1341 continue; 1342 if (n-- <= 0) 1343 continue; 1344 bcopy(&p->pfrkt_t, tbl++, sizeof(*tbl)); 1345 } 1346 1347 KASSERT(n == 0, ("%s: corruption detected (%d)", __func__, n)); 1348 1349 *size = nn; 1350 return (0); 1351 } 1352 1353 int 1354 pfr_get_tstats(struct pfr_table *filter, struct pfr_tstats *tbl, int *size, 1355 int flags) 1356 { 1357 struct pfr_ktable *p; 1358 struct pfr_ktableworkq workq; 1359 int n, nn; 1360 time_t tzero = time_second; 1361 int pfr_dir, pfr_op; 1362 1363 /* XXX PFR_FLAG_CLSTATS disabled */ 1364 ACCEPT_FLAGS(flags, PFR_FLAG_ALLRSETS); 1365 if (pfr_fix_anchor(filter->pfrt_anchor)) 1366 return (EINVAL); 1367 n = nn = pfr_table_count(filter, flags); 1368 if (n < 0) 1369 return (ENOENT); 1370 if (n > *size) { 1371 *size = n; 1372 return (0); 1373 } 1374 SLIST_INIT(&workq); 1375 RB_FOREACH(p, pfr_ktablehead, &V_pfr_ktables) { 1376 if (pfr_skip_table(filter, p, flags)) 1377 continue; 1378 if (n-- <= 0) 1379 continue; 1380 bcopy(&p->pfrkt_kts.pfrts_t, &tbl->pfrts_t, 1381 sizeof(struct pfr_table)); 1382 for (pfr_dir = 0; pfr_dir < PFR_DIR_MAX; pfr_dir ++) { 1383 for (pfr_op = 0; pfr_op < PFR_OP_TABLE_MAX; pfr_op ++) { 1384 tbl->pfrts_packets[pfr_dir][pfr_op] = 1385 pfr_kstate_counter_fetch( 1386 &p->pfrkt_packets[pfr_dir][pfr_op]); 1387 tbl->pfrts_bytes[pfr_dir][pfr_op] = 1388 pfr_kstate_counter_fetch( 1389 &p->pfrkt_bytes[pfr_dir][pfr_op]); 1390 } 1391 } 1392 tbl->pfrts_match = pfr_kstate_counter_fetch(&p->pfrkt_match); 1393 tbl->pfrts_nomatch = pfr_kstate_counter_fetch(&p->pfrkt_nomatch); 1394 tbl->pfrts_tzero = p->pfrkt_tzero; 1395 tbl->pfrts_cnt = p->pfrkt_cnt; 1396 for (pfr_op = 0; pfr_op < PFR_REFCNT_MAX; pfr_op++) 1397 tbl->pfrts_refcnt[pfr_op] = p->pfrkt_refcnt[pfr_op]; 1398 tbl++; 1399 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq); 1400 } 1401 if (flags & PFR_FLAG_CLSTATS) 1402 pfr_clstats_ktables(&workq, tzero, 1403 flags & PFR_FLAG_ADDRSTOO); 1404 1405 KASSERT(n == 0, ("%s: corruption detected (%d)", __func__, n)); 1406 1407 *size = nn; 1408 return (0); 1409 } 1410 1411 int 1412 pfr_clr_tstats(struct pfr_table *tbl, int size, int *nzero, int flags) 1413 { 1414 struct pfr_ktableworkq workq; 1415 struct pfr_ktable *p, key; 1416 int i, xzero = 0; 1417 time_t tzero = time_second; 1418 1419 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_ADDRSTOO); 1420 SLIST_INIT(&workq); 1421 for (i = 0; i < size; i++) { 1422 bcopy(tbl + i, &key.pfrkt_t, sizeof(key.pfrkt_t)); 1423 if (pfr_validate_table(&key.pfrkt_t, 0, 0)) 1424 return (EINVAL); 1425 p = RB_FIND(pfr_ktablehead, &V_pfr_ktables, &key); 1426 if (p != NULL) { 1427 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq); 1428 xzero++; 1429 } 1430 } 1431 if (!(flags & PFR_FLAG_DUMMY)) 1432 pfr_clstats_ktables(&workq, tzero, flags & PFR_FLAG_ADDRSTOO); 1433 if (nzero != NULL) 1434 *nzero = xzero; 1435 return (0); 1436 } 1437 1438 int 1439 pfr_set_tflags(struct pfr_table *tbl, int size, int setflag, int clrflag, 1440 int *nchange, int *ndel, int flags) 1441 { 1442 struct pfr_ktableworkq workq; 1443 struct pfr_ktable *p, *q, key; 1444 int i, xchange = 0, xdel = 0; 1445 1446 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY); 1447 if ((setflag & ~PFR_TFLAG_USRMASK) || 1448 (clrflag & ~PFR_TFLAG_USRMASK) || 1449 (setflag & clrflag)) 1450 return (EINVAL); 1451 SLIST_INIT(&workq); 1452 for (i = 0; i < size; i++) { 1453 bcopy(tbl + i, &key.pfrkt_t, sizeof(key.pfrkt_t)); 1454 if (pfr_validate_table(&key.pfrkt_t, 0, 1455 flags & PFR_FLAG_USERIOCTL)) 1456 return (EINVAL); 1457 p = RB_FIND(pfr_ktablehead, &V_pfr_ktables, &key); 1458 if (p != NULL && (p->pfrkt_flags & PFR_TFLAG_ACTIVE)) { 1459 p->pfrkt_nflags = (p->pfrkt_flags | setflag) & 1460 ~clrflag; 1461 if (p->pfrkt_nflags == p->pfrkt_flags) 1462 goto _skip; 1463 SLIST_FOREACH(q, &workq, pfrkt_workq) 1464 if (!pfr_ktable_compare(p, q)) 1465 goto _skip; 1466 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq); 1467 if ((p->pfrkt_flags & PFR_TFLAG_PERSIST) && 1468 (clrflag & PFR_TFLAG_PERSIST) && 1469 !(p->pfrkt_flags & PFR_TFLAG_REFERENCED)) 1470 xdel++; 1471 else 1472 xchange++; 1473 } 1474 _skip: 1475 ; 1476 } 1477 if (!(flags & PFR_FLAG_DUMMY)) 1478 pfr_setflags_ktables(&workq); 1479 if (nchange != NULL) 1480 *nchange = xchange; 1481 if (ndel != NULL) 1482 *ndel = xdel; 1483 return (0); 1484 } 1485 1486 int 1487 pfr_ina_begin(struct pfr_table *trs, u_int32_t *ticket, int *ndel, int flags) 1488 { 1489 struct pfr_ktableworkq workq; 1490 struct pfr_ktable *p; 1491 struct pf_kruleset *rs; 1492 int xdel = 0; 1493 1494 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY); 1495 rs = pf_find_or_create_kruleset(trs->pfrt_anchor); 1496 if (rs == NULL) 1497 return (ENOMEM); 1498 SLIST_INIT(&workq); 1499 RB_FOREACH(p, pfr_ktablehead, &V_pfr_ktables) { 1500 if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) || 1501 pfr_skip_table(trs, p, 0)) 1502 continue; 1503 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_INACTIVE; 1504 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq); 1505 xdel++; 1506 } 1507 if (!(flags & PFR_FLAG_DUMMY)) { 1508 pfr_setflags_ktables(&workq); 1509 if (ticket != NULL) 1510 *ticket = ++rs->tticket; 1511 rs->topen = 1; 1512 } else 1513 pf_remove_if_empty_kruleset(rs); 1514 if (ndel != NULL) 1515 *ndel = xdel; 1516 return (0); 1517 } 1518 1519 int 1520 pfr_ina_define(struct pfr_table *tbl, struct pfr_addr *addr, int size, 1521 int *nadd, int *naddr, u_int32_t ticket, int flags) 1522 { 1523 struct pfr_ktableworkq tableq; 1524 struct pfr_kentryworkq addrq; 1525 struct pfr_ktable *kt, *rt, *shadow, key; 1526 struct pfr_kentry *p; 1527 struct pfr_addr *ad; 1528 struct pf_kruleset *rs; 1529 int i, rv, xadd = 0, xaddr = 0; 1530 1531 PF_RULES_WASSERT(); 1532 1533 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_ADDRSTOO); 1534 if (size && !(flags & PFR_FLAG_ADDRSTOO)) 1535 return (EINVAL); 1536 if (pfr_validate_table(tbl, PFR_TFLAG_USRMASK, 1537 flags & PFR_FLAG_USERIOCTL)) 1538 return (EINVAL); 1539 rs = pf_find_kruleset(tbl->pfrt_anchor); 1540 if (rs == NULL || !rs->topen || ticket != rs->tticket) 1541 return (EBUSY); 1542 tbl->pfrt_flags |= PFR_TFLAG_INACTIVE; 1543 SLIST_INIT(&tableq); 1544 kt = RB_FIND(pfr_ktablehead, &V_pfr_ktables, (struct pfr_ktable *)tbl); 1545 if (kt == NULL) { 1546 kt = pfr_create_ktable(tbl, 0, 1); 1547 if (kt == NULL) 1548 return (ENOMEM); 1549 SLIST_INSERT_HEAD(&tableq, kt, pfrkt_workq); 1550 xadd++; 1551 if (!tbl->pfrt_anchor[0]) 1552 goto _skip; 1553 1554 /* find or create root table */ 1555 bzero(&key, sizeof(key)); 1556 strlcpy(key.pfrkt_name, tbl->pfrt_name, sizeof(key.pfrkt_name)); 1557 rt = RB_FIND(pfr_ktablehead, &V_pfr_ktables, &key); 1558 if (rt != NULL) { 1559 kt->pfrkt_root = rt; 1560 goto _skip; 1561 } 1562 rt = pfr_create_ktable(&key.pfrkt_t, 0, 1); 1563 if (rt == NULL) { 1564 pfr_destroy_ktables(&tableq, 0); 1565 return (ENOMEM); 1566 } 1567 SLIST_INSERT_HEAD(&tableq, rt, pfrkt_workq); 1568 kt->pfrkt_root = rt; 1569 } else if (!(kt->pfrkt_flags & PFR_TFLAG_INACTIVE)) 1570 xadd++; 1571 _skip: 1572 shadow = pfr_create_ktable(tbl, 0, 0); 1573 if (shadow == NULL) { 1574 pfr_destroy_ktables(&tableq, 0); 1575 return (ENOMEM); 1576 } 1577 SLIST_INIT(&addrq); 1578 for (i = 0, ad = addr; i < size; i++, ad++) { 1579 if (pfr_validate_addr(ad)) 1580 senderr(EINVAL); 1581 if (pfr_lookup_addr(shadow, ad, 1) != NULL) 1582 continue; 1583 p = pfr_create_kentry(ad, 1584 (shadow->pfrkt_flags & PFR_TFLAG_COUNTERS) != 0); 1585 if (p == NULL) 1586 senderr(ENOMEM); 1587 if (pfr_route_kentry(shadow, p)) { 1588 pfr_destroy_kentry(p); 1589 continue; 1590 } 1591 SLIST_INSERT_HEAD(&addrq, p, pfrke_workq); 1592 xaddr++; 1593 } 1594 if (!(flags & PFR_FLAG_DUMMY)) { 1595 if (kt->pfrkt_shadow != NULL) 1596 pfr_destroy_ktable(kt->pfrkt_shadow, 1); 1597 kt->pfrkt_flags |= PFR_TFLAG_INACTIVE; 1598 pfr_insert_ktables(&tableq); 1599 shadow->pfrkt_cnt = (flags & PFR_FLAG_ADDRSTOO) ? 1600 xaddr : NO_ADDRESSES; 1601 kt->pfrkt_shadow = shadow; 1602 } else { 1603 pfr_clean_node_mask(shadow, &addrq); 1604 pfr_destroy_ktable(shadow, 0); 1605 pfr_destroy_ktables(&tableq, 0); 1606 pfr_destroy_kentries(&addrq); 1607 } 1608 if (nadd != NULL) 1609 *nadd = xadd; 1610 if (naddr != NULL) 1611 *naddr = xaddr; 1612 return (0); 1613 _bad: 1614 pfr_destroy_ktable(shadow, 0); 1615 pfr_destroy_ktables(&tableq, 0); 1616 pfr_destroy_kentries(&addrq); 1617 return (rv); 1618 } 1619 1620 int 1621 pfr_ina_rollback(struct pfr_table *trs, u_int32_t ticket, int *ndel, int flags) 1622 { 1623 struct pfr_ktableworkq workq; 1624 struct pfr_ktable *p; 1625 struct pf_kruleset *rs; 1626 int xdel = 0; 1627 1628 PF_RULES_WASSERT(); 1629 1630 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY); 1631 rs = pf_find_kruleset(trs->pfrt_anchor); 1632 if (rs == NULL || !rs->topen || ticket != rs->tticket) 1633 return (0); 1634 SLIST_INIT(&workq); 1635 RB_FOREACH(p, pfr_ktablehead, &V_pfr_ktables) { 1636 if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) || 1637 pfr_skip_table(trs, p, 0)) 1638 continue; 1639 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_INACTIVE; 1640 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq); 1641 xdel++; 1642 } 1643 if (!(flags & PFR_FLAG_DUMMY)) { 1644 pfr_setflags_ktables(&workq); 1645 rs->topen = 0; 1646 pf_remove_if_empty_kruleset(rs); 1647 } 1648 if (ndel != NULL) 1649 *ndel = xdel; 1650 return (0); 1651 } 1652 1653 int 1654 pfr_ina_commit(struct pfr_table *trs, u_int32_t ticket, int *nadd, 1655 int *nchange, int flags) 1656 { 1657 struct pfr_ktable *p, *q; 1658 struct pfr_ktableworkq workq; 1659 struct pf_kruleset *rs; 1660 int xadd = 0, xchange = 0; 1661 time_t tzero = time_second; 1662 1663 PF_RULES_WASSERT(); 1664 1665 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY); 1666 rs = pf_find_kruleset(trs->pfrt_anchor); 1667 if (rs == NULL || !rs->topen || ticket != rs->tticket) 1668 return (EBUSY); 1669 1670 SLIST_INIT(&workq); 1671 RB_FOREACH(p, pfr_ktablehead, &V_pfr_ktables) { 1672 if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) || 1673 pfr_skip_table(trs, p, 0)) 1674 continue; 1675 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq); 1676 if (p->pfrkt_flags & PFR_TFLAG_ACTIVE) 1677 xchange++; 1678 else 1679 xadd++; 1680 } 1681 1682 if (!(flags & PFR_FLAG_DUMMY)) { 1683 SLIST_FOREACH_SAFE(p, &workq, pfrkt_workq, q) { 1684 pfr_commit_ktable(p, tzero); 1685 } 1686 rs->topen = 0; 1687 pf_remove_if_empty_kruleset(rs); 1688 } 1689 if (nadd != NULL) 1690 *nadd = xadd; 1691 if (nchange != NULL) 1692 *nchange = xchange; 1693 1694 return (0); 1695 } 1696 1697 static void 1698 pfr_commit_ktable(struct pfr_ktable *kt, time_t tzero) 1699 { 1700 counter_u64_t *pkc, *qkc; 1701 struct pfr_ktable *shadow = kt->pfrkt_shadow; 1702 int nflags; 1703 1704 PF_RULES_WASSERT(); 1705 1706 if (shadow->pfrkt_cnt == NO_ADDRESSES) { 1707 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 1708 pfr_clstats_ktable(kt, tzero, 1); 1709 } else if (kt->pfrkt_flags & PFR_TFLAG_ACTIVE) { 1710 /* kt might contain addresses */ 1711 struct pfr_kentryworkq addrq, addq, changeq, delq, garbageq; 1712 struct pfr_kentry *p, *q; 1713 struct pfr_addr ad; 1714 1715 pfr_enqueue_addrs(shadow, &addrq, NULL, 0); 1716 pfr_mark_addrs(kt); 1717 SLIST_INIT(&addq); 1718 SLIST_INIT(&changeq); 1719 SLIST_INIT(&delq); 1720 SLIST_INIT(&garbageq); 1721 pfr_clean_node_mask(shadow, &addrq); 1722 while ((p = SLIST_FIRST(&addrq)) != NULL) { 1723 SLIST_REMOVE_HEAD(&addrq, pfrke_workq); 1724 pfr_copyout_addr(&ad, p); 1725 q = pfr_lookup_addr(kt, &ad, 1); 1726 if (q != NULL) { 1727 if (q->pfrke_not != p->pfrke_not) 1728 SLIST_INSERT_HEAD(&changeq, q, 1729 pfrke_workq); 1730 pkc = &p->pfrke_counters.pfrkc_counters; 1731 qkc = &q->pfrke_counters.pfrkc_counters; 1732 if ((*pkc == NULL) != (*qkc == NULL)) 1733 SWAP(counter_u64_t, *pkc, *qkc); 1734 q->pfrke_mark = 1; 1735 SLIST_INSERT_HEAD(&garbageq, p, pfrke_workq); 1736 } else { 1737 p->pfrke_counters.pfrkc_tzero = tzero; 1738 SLIST_INSERT_HEAD(&addq, p, pfrke_workq); 1739 } 1740 } 1741 pfr_enqueue_addrs(kt, &delq, NULL, ENQUEUE_UNMARKED_ONLY); 1742 pfr_insert_kentries(kt, &addq, tzero); 1743 pfr_remove_kentries(kt, &delq); 1744 pfr_clstats_kentries(kt, &changeq, tzero, INVERT_NEG_FLAG); 1745 pfr_destroy_kentries(&garbageq); 1746 } else { 1747 /* kt cannot contain addresses */ 1748 SWAP(struct radix_node_head *, kt->pfrkt_ip4, 1749 shadow->pfrkt_ip4); 1750 SWAP(struct radix_node_head *, kt->pfrkt_ip6, 1751 shadow->pfrkt_ip6); 1752 SWAP(int, kt->pfrkt_cnt, shadow->pfrkt_cnt); 1753 pfr_clstats_ktable(kt, tzero, 1); 1754 } 1755 nflags = ((shadow->pfrkt_flags & PFR_TFLAG_USRMASK) | 1756 (kt->pfrkt_flags & PFR_TFLAG_SETMASK) | PFR_TFLAG_ACTIVE) 1757 & ~PFR_TFLAG_INACTIVE; 1758 pfr_destroy_ktable(shadow, 0); 1759 kt->pfrkt_shadow = NULL; 1760 pfr_setflags_ktable(kt, nflags); 1761 } 1762 1763 static int 1764 pfr_validate_table(struct pfr_table *tbl, int allowedflags, int no_reserved) 1765 { 1766 int i; 1767 1768 if (!tbl->pfrt_name[0]) 1769 return (-1); 1770 if (no_reserved && !strcmp(tbl->pfrt_anchor, PF_RESERVED_ANCHOR)) 1771 return (-1); 1772 if (tbl->pfrt_name[PF_TABLE_NAME_SIZE-1]) 1773 return (-1); 1774 for (i = strlen(tbl->pfrt_name); i < PF_TABLE_NAME_SIZE; i++) 1775 if (tbl->pfrt_name[i]) 1776 return (-1); 1777 if (pfr_fix_anchor(tbl->pfrt_anchor)) 1778 return (-1); 1779 if (tbl->pfrt_flags & ~allowedflags) 1780 return (-1); 1781 return (0); 1782 } 1783 1784 /* 1785 * Rewrite anchors referenced by tables to remove slashes 1786 * and check for validity. 1787 */ 1788 static int 1789 pfr_fix_anchor(char *anchor) 1790 { 1791 size_t siz = MAXPATHLEN; 1792 int i; 1793 1794 if (anchor[0] == '/') { 1795 char *path; 1796 int off; 1797 1798 path = anchor; 1799 off = 1; 1800 while (*++path == '/') 1801 off++; 1802 bcopy(path, anchor, siz - off); 1803 memset(anchor + siz - off, 0, off); 1804 } 1805 if (anchor[siz - 1]) 1806 return (-1); 1807 for (i = strlen(anchor); i < siz; i++) 1808 if (anchor[i]) 1809 return (-1); 1810 return (0); 1811 } 1812 1813 int 1814 pfr_table_count(struct pfr_table *filter, int flags) 1815 { 1816 struct pf_kruleset *rs; 1817 1818 PF_RULES_ASSERT(); 1819 1820 if (flags & PFR_FLAG_ALLRSETS) 1821 return (V_pfr_ktable_cnt); 1822 if (filter->pfrt_anchor[0]) { 1823 rs = pf_find_kruleset(filter->pfrt_anchor); 1824 return ((rs != NULL) ? rs->tables : -1); 1825 } 1826 return (pf_main_ruleset.tables); 1827 } 1828 1829 static int 1830 pfr_skip_table(struct pfr_table *filter, struct pfr_ktable *kt, int flags) 1831 { 1832 if (flags & PFR_FLAG_ALLRSETS) 1833 return (0); 1834 if (strcmp(filter->pfrt_anchor, kt->pfrkt_anchor)) 1835 return (1); 1836 return (0); 1837 } 1838 1839 static void 1840 pfr_insert_ktables(struct pfr_ktableworkq *workq) 1841 { 1842 struct pfr_ktable *p; 1843 1844 SLIST_FOREACH(p, workq, pfrkt_workq) 1845 pfr_insert_ktable(p); 1846 } 1847 1848 static void 1849 pfr_insert_ktable(struct pfr_ktable *kt) 1850 { 1851 1852 PF_RULES_WASSERT(); 1853 1854 RB_INSERT(pfr_ktablehead, &V_pfr_ktables, kt); 1855 V_pfr_ktable_cnt++; 1856 if (kt->pfrkt_root != NULL) 1857 if (!kt->pfrkt_root->pfrkt_refcnt[PFR_REFCNT_ANCHOR]++) 1858 pfr_setflags_ktable(kt->pfrkt_root, 1859 kt->pfrkt_root->pfrkt_flags|PFR_TFLAG_REFDANCHOR); 1860 } 1861 1862 static void 1863 pfr_setflags_ktables(struct pfr_ktableworkq *workq) 1864 { 1865 struct pfr_ktable *p, *q; 1866 1867 SLIST_FOREACH_SAFE(p, workq, pfrkt_workq, q) { 1868 pfr_setflags_ktable(p, p->pfrkt_nflags); 1869 } 1870 } 1871 1872 static void 1873 pfr_setflags_ktable(struct pfr_ktable *kt, int newf) 1874 { 1875 struct pfr_kentryworkq addrq; 1876 struct pfr_walktree w; 1877 1878 PF_RULES_WASSERT(); 1879 1880 if (!(newf & PFR_TFLAG_REFERENCED) && 1881 !(newf & PFR_TFLAG_REFDANCHOR) && 1882 !(newf & PFR_TFLAG_PERSIST)) 1883 newf &= ~PFR_TFLAG_ACTIVE; 1884 if (!(newf & PFR_TFLAG_ACTIVE)) 1885 newf &= ~PFR_TFLAG_USRMASK; 1886 if (!(newf & PFR_TFLAG_SETMASK)) { 1887 RB_REMOVE(pfr_ktablehead, &V_pfr_ktables, kt); 1888 if (kt->pfrkt_root != NULL) 1889 if (!--kt->pfrkt_root->pfrkt_refcnt[PFR_REFCNT_ANCHOR]) 1890 pfr_setflags_ktable(kt->pfrkt_root, 1891 kt->pfrkt_root->pfrkt_flags & 1892 ~PFR_TFLAG_REFDANCHOR); 1893 pfr_destroy_ktable(kt, 1); 1894 V_pfr_ktable_cnt--; 1895 return; 1896 } 1897 if (newf & PFR_TFLAG_COUNTERS && ! (kt->pfrkt_flags & PFR_TFLAG_COUNTERS)) { 1898 bzero(&w, sizeof(w)); 1899 w.pfrw_op = PFRW_COUNTERS; 1900 w.pfrw_flags |= PFR_TFLAG_COUNTERS; 1901 kt->pfrkt_ip4->rnh_walktree(&kt->pfrkt_ip4->rh, pfr_walktree, &w); 1902 kt->pfrkt_ip6->rnh_walktree(&kt->pfrkt_ip6->rh, pfr_walktree, &w); 1903 } 1904 if (! (newf & PFR_TFLAG_COUNTERS) && (kt->pfrkt_flags & PFR_TFLAG_COUNTERS)) { 1905 bzero(&w, sizeof(w)); 1906 w.pfrw_op = PFRW_COUNTERS; 1907 w.pfrw_flags |= 0; 1908 kt->pfrkt_ip4->rnh_walktree(&kt->pfrkt_ip4->rh, pfr_walktree, &w); 1909 kt->pfrkt_ip6->rnh_walktree(&kt->pfrkt_ip6->rh, pfr_walktree, &w); 1910 } 1911 if (!(newf & PFR_TFLAG_ACTIVE) && kt->pfrkt_cnt) { 1912 pfr_enqueue_addrs(kt, &addrq, NULL, 0); 1913 pfr_remove_kentries(kt, &addrq); 1914 } 1915 if (!(newf & PFR_TFLAG_INACTIVE) && kt->pfrkt_shadow != NULL) { 1916 pfr_destroy_ktable(kt->pfrkt_shadow, 1); 1917 kt->pfrkt_shadow = NULL; 1918 } 1919 kt->pfrkt_flags = newf; 1920 } 1921 1922 static void 1923 pfr_clstats_ktables(struct pfr_ktableworkq *workq, time_t tzero, int recurse) 1924 { 1925 struct pfr_ktable *p; 1926 1927 SLIST_FOREACH(p, workq, pfrkt_workq) 1928 pfr_clstats_ktable(p, tzero, recurse); 1929 } 1930 1931 static void 1932 pfr_clstats_ktable(struct pfr_ktable *kt, time_t tzero, int recurse) 1933 { 1934 struct pfr_kentryworkq addrq; 1935 int pfr_dir, pfr_op; 1936 1937 MPASS(PF_TABLE_STATS_OWNED() || PF_RULES_WOWNED()); 1938 1939 if (recurse) { 1940 pfr_enqueue_addrs(kt, &addrq, NULL, 0); 1941 pfr_clstats_kentries(kt, &addrq, tzero, 0); 1942 } 1943 for (pfr_dir = 0; pfr_dir < PFR_DIR_MAX; pfr_dir ++) { 1944 for (pfr_op = 0; pfr_op < PFR_OP_TABLE_MAX; pfr_op ++) { 1945 pfr_kstate_counter_zero(&kt->pfrkt_packets[pfr_dir][pfr_op]); 1946 pfr_kstate_counter_zero(&kt->pfrkt_bytes[pfr_dir][pfr_op]); 1947 } 1948 } 1949 pfr_kstate_counter_zero(&kt->pfrkt_match); 1950 pfr_kstate_counter_zero(&kt->pfrkt_nomatch); 1951 kt->pfrkt_tzero = tzero; 1952 } 1953 1954 static struct pfr_ktable * 1955 pfr_create_ktable(struct pfr_table *tbl, time_t tzero, int attachruleset) 1956 { 1957 struct pfr_ktable *kt; 1958 struct pf_kruleset *rs; 1959 int pfr_dir, pfr_op; 1960 1961 PF_RULES_WASSERT(); 1962 1963 kt = malloc(sizeof(*kt), M_PFTABLE, M_NOWAIT|M_ZERO); 1964 if (kt == NULL) 1965 return (NULL); 1966 kt->pfrkt_t = *tbl; 1967 1968 if (attachruleset) { 1969 rs = pf_find_or_create_kruleset(tbl->pfrt_anchor); 1970 if (!rs) { 1971 pfr_destroy_ktable(kt, 0); 1972 return (NULL); 1973 } 1974 kt->pfrkt_rs = rs; 1975 rs->tables++; 1976 } 1977 1978 for (pfr_dir = 0; pfr_dir < PFR_DIR_MAX; pfr_dir ++) { 1979 for (pfr_op = 0; pfr_op < PFR_OP_TABLE_MAX; pfr_op ++) { 1980 if (pfr_kstate_counter_init( 1981 &kt->pfrkt_packets[pfr_dir][pfr_op], M_NOWAIT) != 0) { 1982 pfr_destroy_ktable(kt, 0); 1983 return (NULL); 1984 } 1985 if (pfr_kstate_counter_init( 1986 &kt->pfrkt_bytes[pfr_dir][pfr_op], M_NOWAIT) != 0) { 1987 pfr_destroy_ktable(kt, 0); 1988 return (NULL); 1989 } 1990 } 1991 } 1992 if (pfr_kstate_counter_init(&kt->pfrkt_match, M_NOWAIT) != 0) { 1993 pfr_destroy_ktable(kt, 0); 1994 return (NULL); 1995 } 1996 1997 if (pfr_kstate_counter_init(&kt->pfrkt_nomatch, M_NOWAIT) != 0) { 1998 pfr_destroy_ktable(kt, 0); 1999 return (NULL); 2000 } 2001 2002 if (!rn_inithead((void **)&kt->pfrkt_ip4, 2003 offsetof(struct sockaddr_in, sin_addr) * 8) || 2004 !rn_inithead((void **)&kt->pfrkt_ip6, 2005 offsetof(struct sockaddr_in6, sin6_addr) * 8)) { 2006 pfr_destroy_ktable(kt, 0); 2007 return (NULL); 2008 } 2009 kt->pfrkt_tzero = tzero; 2010 2011 return (kt); 2012 } 2013 2014 static void 2015 pfr_destroy_ktables(struct pfr_ktableworkq *workq, int flushaddr) 2016 { 2017 struct pfr_ktable *p; 2018 2019 while ((p = SLIST_FIRST(workq)) != NULL) { 2020 SLIST_REMOVE_HEAD(workq, pfrkt_workq); 2021 pfr_destroy_ktable(p, flushaddr); 2022 } 2023 } 2024 2025 static void 2026 pfr_destroy_ktable(struct pfr_ktable *kt, int flushaddr) 2027 { 2028 struct pfr_kentryworkq addrq; 2029 int pfr_dir, pfr_op; 2030 2031 if (flushaddr) { 2032 pfr_enqueue_addrs(kt, &addrq, NULL, 0); 2033 pfr_clean_node_mask(kt, &addrq); 2034 pfr_destroy_kentries(&addrq); 2035 } 2036 if (kt->pfrkt_ip4 != NULL) 2037 rn_detachhead((void **)&kt->pfrkt_ip4); 2038 if (kt->pfrkt_ip6 != NULL) 2039 rn_detachhead((void **)&kt->pfrkt_ip6); 2040 if (kt->pfrkt_shadow != NULL) 2041 pfr_destroy_ktable(kt->pfrkt_shadow, flushaddr); 2042 if (kt->pfrkt_rs != NULL) { 2043 kt->pfrkt_rs->tables--; 2044 pf_remove_if_empty_kruleset(kt->pfrkt_rs); 2045 } 2046 for (pfr_dir = 0; pfr_dir < PFR_DIR_MAX; pfr_dir ++) { 2047 for (pfr_op = 0; pfr_op < PFR_OP_TABLE_MAX; pfr_op ++) { 2048 pfr_kstate_counter_deinit(&kt->pfrkt_packets[pfr_dir][pfr_op]); 2049 pfr_kstate_counter_deinit(&kt->pfrkt_bytes[pfr_dir][pfr_op]); 2050 } 2051 } 2052 pfr_kstate_counter_deinit(&kt->pfrkt_match); 2053 pfr_kstate_counter_deinit(&kt->pfrkt_nomatch); 2054 2055 free(kt, M_PFTABLE); 2056 } 2057 2058 static int 2059 pfr_ktable_compare(struct pfr_ktable *p, struct pfr_ktable *q) 2060 { 2061 int d; 2062 2063 if ((d = strncmp(p->pfrkt_name, q->pfrkt_name, PF_TABLE_NAME_SIZE))) 2064 return (d); 2065 return (strcmp(p->pfrkt_anchor, q->pfrkt_anchor)); 2066 } 2067 2068 static struct pfr_ktable * 2069 pfr_lookup_table(struct pfr_table *tbl) 2070 { 2071 /* struct pfr_ktable start like a struct pfr_table */ 2072 return (RB_FIND(pfr_ktablehead, &V_pfr_ktables, 2073 (struct pfr_ktable *)tbl)); 2074 } 2075 2076 static struct pfr_kentry * 2077 pfr_kentry_byaddr(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af, 2078 int exact) 2079 { 2080 struct pfr_kentry *ke = NULL; 2081 2082 PF_RULES_RASSERT(); 2083 2084 kt = pfr_ktable_select_active(kt); 2085 if (kt == NULL) 2086 return (0); 2087 2088 switch (af) { 2089 #ifdef INET 2090 case AF_INET: 2091 { 2092 struct sockaddr_in sin; 2093 2094 bzero(&sin, sizeof(sin)); 2095 sin.sin_len = sizeof(sin); 2096 sin.sin_family = AF_INET; 2097 sin.sin_addr.s_addr = a->addr32[0]; 2098 ke = (struct pfr_kentry *)rn_match(&sin, &kt->pfrkt_ip4->rh); 2099 if (ke && KENTRY_RNF_ROOT(ke)) 2100 ke = NULL; 2101 break; 2102 } 2103 #endif /* INET */ 2104 #ifdef INET6 2105 case AF_INET6: 2106 { 2107 struct sockaddr_in6 sin6; 2108 2109 bzero(&sin6, sizeof(sin6)); 2110 sin6.sin6_len = sizeof(sin6); 2111 sin6.sin6_family = AF_INET6; 2112 bcopy(a, &sin6.sin6_addr, sizeof(sin6.sin6_addr)); 2113 ke = (struct pfr_kentry *)rn_match(&sin6, &kt->pfrkt_ip6->rh); 2114 if (ke && KENTRY_RNF_ROOT(ke)) 2115 ke = NULL; 2116 break; 2117 } 2118 #endif /* INET6 */ 2119 default: 2120 unhandled_af(af); 2121 } 2122 if (exact && ke && KENTRY_NETWORK(ke)) 2123 ke = NULL; 2124 2125 return (ke); 2126 } 2127 2128 int 2129 pfr_match_addr(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af) 2130 { 2131 struct pfr_kentry *ke = NULL; 2132 int match; 2133 2134 ke = pfr_kentry_byaddr(kt, a, af, 0); 2135 2136 match = (ke && !ke->pfrke_not); 2137 if (match) 2138 pfr_kstate_counter_add(&kt->pfrkt_match, 1); 2139 else 2140 pfr_kstate_counter_add(&kt->pfrkt_nomatch, 1); 2141 2142 return (match); 2143 } 2144 2145 void 2146 pfr_update_stats(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af, 2147 u_int64_t len, int dir_out, int op_pass, int notrule) 2148 { 2149 struct pfr_kentry *ke = NULL; 2150 2151 kt = pfr_ktable_select_active(kt); 2152 if (kt == NULL) 2153 return; 2154 2155 switch (af) { 2156 #ifdef INET 2157 case AF_INET: 2158 { 2159 struct sockaddr_in sin; 2160 2161 bzero(&sin, sizeof(sin)); 2162 sin.sin_len = sizeof(sin); 2163 sin.sin_family = AF_INET; 2164 sin.sin_addr.s_addr = a->addr32[0]; 2165 ke = (struct pfr_kentry *)rn_match(&sin, &kt->pfrkt_ip4->rh); 2166 if (ke && KENTRY_RNF_ROOT(ke)) 2167 ke = NULL; 2168 break; 2169 } 2170 #endif /* INET */ 2171 #ifdef INET6 2172 case AF_INET6: 2173 { 2174 struct sockaddr_in6 sin6; 2175 2176 bzero(&sin6, sizeof(sin6)); 2177 sin6.sin6_len = sizeof(sin6); 2178 sin6.sin6_family = AF_INET6; 2179 bcopy(a, &sin6.sin6_addr, sizeof(sin6.sin6_addr)); 2180 ke = (struct pfr_kentry *)rn_match(&sin6, &kt->pfrkt_ip6->rh); 2181 if (ke && KENTRY_RNF_ROOT(ke)) 2182 ke = NULL; 2183 break; 2184 } 2185 #endif /* INET6 */ 2186 default: 2187 unhandled_af(af); 2188 } 2189 if ((ke == NULL || ke->pfrke_not) != notrule) { 2190 if (op_pass != PFR_OP_PASS) 2191 DPFPRINTF(PF_DEBUG_URGENT, 2192 ("pfr_update_stats: assertion failed.\n")); 2193 op_pass = PFR_OP_XPASS; 2194 } 2195 pfr_kstate_counter_add(&kt->pfrkt_packets[dir_out][op_pass], 1); 2196 pfr_kstate_counter_add(&kt->pfrkt_bytes[dir_out][op_pass], len); 2197 if (ke != NULL && op_pass != PFR_OP_XPASS && 2198 (kt->pfrkt_flags & PFR_TFLAG_COUNTERS)) { 2199 counter_u64_add(pfr_kentry_counter(&ke->pfrke_counters, 2200 dir_out, op_pass, PFR_TYPE_PACKETS), 1); 2201 counter_u64_add(pfr_kentry_counter(&ke->pfrke_counters, 2202 dir_out, op_pass, PFR_TYPE_BYTES), len); 2203 } 2204 } 2205 2206 struct pfr_ktable * 2207 pfr_eth_attach_table(struct pf_keth_ruleset *rs, char *name) 2208 { 2209 struct pfr_ktable *kt, *rt; 2210 struct pfr_table tbl; 2211 struct pf_keth_anchor *ac = rs->anchor; 2212 2213 PF_RULES_WASSERT(); 2214 2215 bzero(&tbl, sizeof(tbl)); 2216 strlcpy(tbl.pfrt_name, name, sizeof(tbl.pfrt_name)); 2217 if (ac != NULL) 2218 strlcpy(tbl.pfrt_anchor, ac->path, sizeof(tbl.pfrt_anchor)); 2219 kt = pfr_lookup_table(&tbl); 2220 if (kt == NULL) { 2221 kt = pfr_create_ktable(&tbl, time_second, 1); 2222 if (kt == NULL) 2223 return (NULL); 2224 if (ac != NULL) { 2225 bzero(tbl.pfrt_anchor, sizeof(tbl.pfrt_anchor)); 2226 rt = pfr_lookup_table(&tbl); 2227 if (rt == NULL) { 2228 rt = pfr_create_ktable(&tbl, 0, 1); 2229 if (rt == NULL) { 2230 pfr_destroy_ktable(kt, 0); 2231 return (NULL); 2232 } 2233 pfr_insert_ktable(rt); 2234 } 2235 kt->pfrkt_root = rt; 2236 } 2237 pfr_insert_ktable(kt); 2238 } 2239 if (!kt->pfrkt_refcnt[PFR_REFCNT_RULE]++) 2240 pfr_setflags_ktable(kt, kt->pfrkt_flags|PFR_TFLAG_REFERENCED); 2241 return (kt); 2242 } 2243 2244 struct pfr_ktable * 2245 pfr_attach_table(struct pf_kruleset *rs, char *name) 2246 { 2247 struct pfr_ktable *kt, *rt; 2248 struct pfr_table tbl; 2249 struct pf_kanchor *ac = rs->anchor; 2250 2251 PF_RULES_WASSERT(); 2252 2253 bzero(&tbl, sizeof(tbl)); 2254 strlcpy(tbl.pfrt_name, name, sizeof(tbl.pfrt_name)); 2255 if (ac != NULL) 2256 strlcpy(tbl.pfrt_anchor, ac->path, sizeof(tbl.pfrt_anchor)); 2257 kt = pfr_lookup_table(&tbl); 2258 if (kt == NULL) { 2259 kt = pfr_create_ktable(&tbl, time_second, 1); 2260 if (kt == NULL) 2261 return (NULL); 2262 if (ac != NULL) { 2263 bzero(tbl.pfrt_anchor, sizeof(tbl.pfrt_anchor)); 2264 rt = pfr_lookup_table(&tbl); 2265 if (rt == NULL) { 2266 rt = pfr_create_ktable(&tbl, 0, 1); 2267 if (rt == NULL) { 2268 pfr_destroy_ktable(kt, 0); 2269 return (NULL); 2270 } 2271 pfr_insert_ktable(rt); 2272 } 2273 kt->pfrkt_root = rt; 2274 } 2275 pfr_insert_ktable(kt); 2276 } 2277 if (!kt->pfrkt_refcnt[PFR_REFCNT_RULE]++) 2278 pfr_setflags_ktable(kt, kt->pfrkt_flags|PFR_TFLAG_REFERENCED); 2279 return (kt); 2280 } 2281 2282 void 2283 pfr_detach_table(struct pfr_ktable *kt) 2284 { 2285 2286 PF_RULES_WASSERT(); 2287 KASSERT(kt->pfrkt_refcnt[PFR_REFCNT_RULE] > 0, ("%s: refcount %d\n", 2288 __func__, kt->pfrkt_refcnt[PFR_REFCNT_RULE])); 2289 2290 if (!--kt->pfrkt_refcnt[PFR_REFCNT_RULE]) 2291 pfr_setflags_ktable(kt, kt->pfrkt_flags&~PFR_TFLAG_REFERENCED); 2292 } 2293 2294 int 2295 pfr_pool_get(struct pfr_ktable *kt, int *pidx, struct pf_addr *counter, 2296 sa_family_t af, pf_addr_filter_func_t filter, bool loop_once) 2297 { 2298 struct pf_addr *addr, cur, mask, umask_addr; 2299 union sockaddr_union uaddr, umask; 2300 struct pfr_kentry *ke, *ke2 = NULL; 2301 int startidx, idx = -1, loop = 0, use_counter = 0; 2302 2303 MPASS(pidx != NULL); 2304 MPASS(counter != NULL); 2305 2306 switch (af) { 2307 case AF_INET: 2308 uaddr.sin.sin_len = sizeof(struct sockaddr_in); 2309 uaddr.sin.sin_family = AF_INET; 2310 addr = (struct pf_addr *)&uaddr.sin.sin_addr; 2311 break; 2312 case AF_INET6: 2313 uaddr.sin6.sin6_len = sizeof(struct sockaddr_in6); 2314 uaddr.sin6.sin6_family = AF_INET6; 2315 addr = (struct pf_addr *)&uaddr.sin6.sin6_addr; 2316 break; 2317 default: 2318 unhandled_af(af); 2319 } 2320 2321 kt = pfr_ktable_select_active(kt); 2322 if (kt == NULL) 2323 return (-1); 2324 2325 idx = *pidx; 2326 if (idx < 0 || idx >= kt->pfrkt_cnt) 2327 idx = 0; 2328 else if (counter != NULL) 2329 use_counter = 1; 2330 startidx = idx; 2331 2332 _next_block: 2333 if (loop && startidx == idx) { 2334 pfr_kstate_counter_add(&kt->pfrkt_nomatch, 1); 2335 return (1); 2336 } 2337 2338 ke = pfr_kentry_byidx(kt, idx, af); 2339 if (ke == NULL) { 2340 /* we don't have this idx, try looping */ 2341 if ((loop || loop_once) || (ke = pfr_kentry_byidx(kt, 0, af)) == NULL) { 2342 pfr_kstate_counter_add(&kt->pfrkt_nomatch, 1); 2343 return (1); 2344 } 2345 idx = 0; 2346 loop++; 2347 } 2348 pfr_prepare_network(&umask, af, ke->pfrke_net); 2349 pfr_sockaddr_to_pf_addr(&ke->pfrke_sa, &cur); 2350 pfr_sockaddr_to_pf_addr(&umask, &mask); 2351 2352 if (use_counter && !PF_AZERO(counter, af)) { 2353 /* is supplied address within block? */ 2354 if (!pf_match_addr(0, &cur, &mask, counter, af)) { 2355 /* no, go to next block in table */ 2356 idx++; 2357 use_counter = 0; 2358 goto _next_block; 2359 } 2360 pf_addrcpy(addr, counter, af); 2361 } else { 2362 /* use first address of block */ 2363 pf_addrcpy(addr, &cur, af); 2364 } 2365 2366 if (!KENTRY_NETWORK(ke)) { 2367 /* this is a single IP address - no possible nested block */ 2368 if (filter && filter(af, addr)) { 2369 idx++; 2370 goto _next_block; 2371 } 2372 pf_addrcpy(counter, addr, af); 2373 *pidx = idx; 2374 pfr_kstate_counter_add(&kt->pfrkt_match, 1); 2375 return (0); 2376 } 2377 for (;;) { 2378 /* we don't want to use a nested block */ 2379 switch (af) { 2380 case AF_INET: 2381 ke2 = (struct pfr_kentry *)rn_match(&uaddr, 2382 &kt->pfrkt_ip4->rh); 2383 break; 2384 case AF_INET6: 2385 ke2 = (struct pfr_kentry *)rn_match(&uaddr, 2386 &kt->pfrkt_ip6->rh); 2387 break; 2388 default: 2389 unhandled_af(af); 2390 } 2391 /* no need to check KENTRY_RNF_ROOT() here */ 2392 if (ke2 == ke) { 2393 /* lookup return the same block - perfect */ 2394 if (filter && filter(af, addr)) 2395 goto _next_entry; 2396 pf_addrcpy(counter, addr, af); 2397 *pidx = idx; 2398 pfr_kstate_counter_add(&kt->pfrkt_match, 1); 2399 return (0); 2400 } 2401 2402 _next_entry: 2403 /* we need to increase the counter past the nested block */ 2404 pfr_prepare_network(&umask, AF_INET, ke2->pfrke_net); 2405 pfr_sockaddr_to_pf_addr(&umask, &umask_addr); 2406 pf_poolmask(addr, addr, &umask_addr, &pfr_ffaddr, af); 2407 pf_addr_inc(addr, af); 2408 if (!pf_match_addr(0, &cur, &mask, addr, af)) { 2409 /* ok, we reached the end of our main block */ 2410 /* go to next block in table */ 2411 idx++; 2412 use_counter = 0; 2413 goto _next_block; 2414 } 2415 } 2416 } 2417 2418 static struct pfr_kentry * 2419 pfr_kentry_byidx(struct pfr_ktable *kt, int idx, int af) 2420 { 2421 struct pfr_walktree w; 2422 2423 bzero(&w, sizeof(w)); 2424 w.pfrw_op = PFRW_POOL_GET; 2425 w.pfrw_free = idx; 2426 2427 switch (af) { 2428 #ifdef INET 2429 case AF_INET: 2430 kt->pfrkt_ip4->rnh_walktree(&kt->pfrkt_ip4->rh, pfr_walktree, &w); 2431 return (w.pfrw_kentry); 2432 #endif /* INET */ 2433 #ifdef INET6 2434 case AF_INET6: 2435 kt->pfrkt_ip6->rnh_walktree(&kt->pfrkt_ip6->rh, pfr_walktree, &w); 2436 return (w.pfrw_kentry); 2437 #endif /* INET6 */ 2438 default: 2439 return (NULL); 2440 } 2441 } 2442 2443 void 2444 pfr_dynaddr_update(struct pfr_ktable *kt, struct pfi_dynaddr *dyn) 2445 { 2446 struct pfr_walktree w; 2447 2448 bzero(&w, sizeof(w)); 2449 w.pfrw_op = PFRW_DYNADDR_UPDATE; 2450 w.pfrw_dyn = dyn; 2451 2452 dyn->pfid_acnt4 = 0; 2453 dyn->pfid_acnt6 = 0; 2454 switch (dyn->pfid_af) { 2455 case AF_UNSPEC: /* look up all both addresses IPv4 + IPv6 */ 2456 kt->pfrkt_ip4->rnh_walktree(&kt->pfrkt_ip4->rh, pfr_walktree, &w); 2457 kt->pfrkt_ip6->rnh_walktree(&kt->pfrkt_ip6->rh, pfr_walktree, &w); 2458 break; 2459 case AF_INET: 2460 kt->pfrkt_ip4->rnh_walktree(&kt->pfrkt_ip4->rh, pfr_walktree, &w); 2461 break; 2462 case AF_INET6: 2463 kt->pfrkt_ip6->rnh_walktree(&kt->pfrkt_ip6->rh, pfr_walktree, &w); 2464 break; 2465 default: 2466 unhandled_af(dyn->pfid_af); 2467 } 2468 } 2469 2470 struct pfr_ktable * 2471 pfr_ktable_select_active(struct pfr_ktable *kt) 2472 { 2473 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL) 2474 kt = kt->pfrkt_root; 2475 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 2476 return (NULL); 2477 2478 return (kt); 2479 } 2480