1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2002 Cedric Berger 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * - Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * - Redistributions in binary form must reproduce the above 14 * copyright notice, this list of conditions and the following 15 * disclaimer in the documentation and/or other materials provided 16 * with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 21 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 22 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 24 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 25 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 26 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN 28 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 * 31 * $OpenBSD: pf_table.c,v 1.79 2008/10/08 06:24:50 mcbride Exp $ 32 */ 33 34 #include <sys/cdefs.h> 35 __FBSDID("$FreeBSD$"); 36 37 #include "opt_inet.h" 38 #include "opt_inet6.h" 39 40 #include <sys/param.h> 41 #include <sys/kernel.h> 42 #include <sys/lock.h> 43 #include <sys/malloc.h> 44 #include <sys/mbuf.h> 45 #include <sys/mutex.h> 46 #include <sys/refcount.h> 47 #include <sys/socket.h> 48 #include <vm/uma.h> 49 50 #include <net/if.h> 51 #include <net/vnet.h> 52 #include <net/pfvar.h> 53 54 #define DPFPRINTF(n, x) if (V_pf_status.debug >= (n)) printf x 55 56 #define ACCEPT_FLAGS(flags, oklist) \ 57 do { \ 58 if ((flags & ~(oklist)) & \ 59 PFR_FLAG_ALLMASK) \ 60 return (EINVAL); \ 61 } while (0) 62 63 #define FILLIN_SIN(sin, addr) \ 64 do { \ 65 (sin).sin_len = sizeof(sin); \ 66 (sin).sin_family = AF_INET; \ 67 (sin).sin_addr = (addr); \ 68 } while (0) 69 70 #define FILLIN_SIN6(sin6, addr) \ 71 do { \ 72 (sin6).sin6_len = sizeof(sin6); \ 73 (sin6).sin6_family = AF_INET6; \ 74 (sin6).sin6_addr = (addr); \ 75 } while (0) 76 77 #define SWAP(type, a1, a2) \ 78 do { \ 79 type tmp = a1; \ 80 a1 = a2; \ 81 a2 = tmp; \ 82 } while (0) 83 84 #define SUNION2PF(su, af) (((af)==AF_INET) ? \ 85 (struct pf_addr *)&(su)->sin.sin_addr : \ 86 (struct pf_addr *)&(su)->sin6.sin6_addr) 87 88 #define AF_BITS(af) (((af)==AF_INET)?32:128) 89 #define ADDR_NETWORK(ad) ((ad)->pfra_net < AF_BITS((ad)->pfra_af)) 90 #define KENTRY_NETWORK(ke) ((ke)->pfrke_net < AF_BITS((ke)->pfrke_af)) 91 #define KENTRY_RNF_ROOT(ke) \ 92 ((((struct radix_node *)(ke))->rn_flags & RNF_ROOT) != 0) 93 94 #define NO_ADDRESSES (-1) 95 #define ENQUEUE_UNMARKED_ONLY (1) 96 #define INVERT_NEG_FLAG (1) 97 98 struct pfr_walktree { 99 enum pfrw_op { 100 PFRW_MARK, 101 PFRW_SWEEP, 102 PFRW_ENQUEUE, 103 PFRW_GET_ADDRS, 104 PFRW_GET_ASTATS, 105 PFRW_POOL_GET, 106 PFRW_DYNADDR_UPDATE, 107 PFRW_COUNTERS 108 } pfrw_op; 109 union { 110 struct pfr_addr *pfrw1_addr; 111 struct pfr_astats *pfrw1_astats; 112 struct pfr_kentryworkq *pfrw1_workq; 113 struct pfr_kentry *pfrw1_kentry; 114 struct pfi_dynaddr *pfrw1_dyn; 115 } pfrw_1; 116 int pfrw_free; 117 int pfrw_flags; 118 }; 119 #define pfrw_addr pfrw_1.pfrw1_addr 120 #define pfrw_astats pfrw_1.pfrw1_astats 121 #define pfrw_workq pfrw_1.pfrw1_workq 122 #define pfrw_kentry pfrw_1.pfrw1_kentry 123 #define pfrw_dyn pfrw_1.pfrw1_dyn 124 #define pfrw_cnt pfrw_free 125 126 #define senderr(e) do { rv = (e); goto _bad; } while (0) 127 128 static MALLOC_DEFINE(M_PFTABLE, "pf_table", "pf(4) tables structures"); 129 VNET_DEFINE_STATIC(uma_zone_t, pfr_kentry_z); 130 #define V_pfr_kentry_z VNET(pfr_kentry_z) 131 VNET_DEFINE_STATIC(uma_zone_t, pfr_kentry_counter_z); 132 #define V_pfr_kentry_counter_z VNET(pfr_kentry_counter_z) 133 134 static struct pf_addr pfr_ffaddr = { 135 .addr32 = { 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff } 136 }; 137 138 static void pfr_copyout_astats(struct pfr_astats *, 139 const struct pfr_kentry *, 140 const struct pfr_walktree *); 141 static void pfr_copyout_addr(struct pfr_addr *, 142 const struct pfr_kentry *ke); 143 static int pfr_validate_addr(struct pfr_addr *); 144 static void pfr_enqueue_addrs(struct pfr_ktable *, 145 struct pfr_kentryworkq *, int *, int); 146 static void pfr_mark_addrs(struct pfr_ktable *); 147 static struct pfr_kentry 148 *pfr_lookup_addr(struct pfr_ktable *, 149 struct pfr_addr *, int); 150 static struct pfr_kentry *pfr_create_kentry(struct pfr_addr *, bool); 151 static void pfr_destroy_kentries(struct pfr_kentryworkq *); 152 static void pfr_destroy_kentry(struct pfr_kentry *); 153 static void pfr_insert_kentries(struct pfr_ktable *, 154 struct pfr_kentryworkq *, long); 155 static void pfr_remove_kentries(struct pfr_ktable *, 156 struct pfr_kentryworkq *); 157 static void pfr_clstats_kentries(struct pfr_ktable *, 158 struct pfr_kentryworkq *, long, int); 159 static void pfr_reset_feedback(struct pfr_addr *, int); 160 static void pfr_prepare_network(union sockaddr_union *, int, int); 161 static int pfr_route_kentry(struct pfr_ktable *, 162 struct pfr_kentry *); 163 static int pfr_unroute_kentry(struct pfr_ktable *, 164 struct pfr_kentry *); 165 static int pfr_walktree(struct radix_node *, void *); 166 static int pfr_validate_table(struct pfr_table *, int, int); 167 static int pfr_fix_anchor(char *); 168 static void pfr_commit_ktable(struct pfr_ktable *, long); 169 static void pfr_insert_ktables(struct pfr_ktableworkq *); 170 static void pfr_insert_ktable(struct pfr_ktable *); 171 static void pfr_setflags_ktables(struct pfr_ktableworkq *); 172 static void pfr_setflags_ktable(struct pfr_ktable *, int); 173 static void pfr_clstats_ktables(struct pfr_ktableworkq *, long, 174 int); 175 static void pfr_clstats_ktable(struct pfr_ktable *, long, int); 176 static struct pfr_ktable 177 *pfr_create_ktable(struct pfr_table *, long, int); 178 static void pfr_destroy_ktables(struct pfr_ktableworkq *, int); 179 static void pfr_destroy_ktable(struct pfr_ktable *, int); 180 static int pfr_ktable_compare(struct pfr_ktable *, 181 struct pfr_ktable *); 182 static struct pfr_ktable 183 *pfr_lookup_table(struct pfr_table *); 184 static void pfr_clean_node_mask(struct pfr_ktable *, 185 struct pfr_kentryworkq *); 186 static int pfr_skip_table(struct pfr_table *, 187 struct pfr_ktable *, int); 188 static struct pfr_kentry 189 *pfr_kentry_byidx(struct pfr_ktable *, int, int); 190 191 static RB_PROTOTYPE(pfr_ktablehead, pfr_ktable, pfrkt_tree, pfr_ktable_compare); 192 static RB_GENERATE(pfr_ktablehead, pfr_ktable, pfrkt_tree, pfr_ktable_compare); 193 194 VNET_DEFINE_STATIC(struct pfr_ktablehead, pfr_ktables); 195 #define V_pfr_ktables VNET(pfr_ktables) 196 197 VNET_DEFINE_STATIC(struct pfr_table, pfr_nulltable); 198 #define V_pfr_nulltable VNET(pfr_nulltable) 199 200 VNET_DEFINE_STATIC(int, pfr_ktable_cnt); 201 #define V_pfr_ktable_cnt VNET(pfr_ktable_cnt) 202 203 void 204 pfr_initialize(void) 205 { 206 207 V_pfr_kentry_counter_z = uma_zcreate("pf table entry counters", 208 PFR_NUM_COUNTERS * sizeof(uint64_t), NULL, NULL, NULL, NULL, 209 UMA_ALIGN_PTR, UMA_ZONE_PCPU); 210 V_pfr_kentry_z = uma_zcreate("pf table entries", 211 sizeof(struct pfr_kentry), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 212 0); 213 V_pf_limits[PF_LIMIT_TABLE_ENTRIES].zone = V_pfr_kentry_z; 214 V_pf_limits[PF_LIMIT_TABLE_ENTRIES].limit = PFR_KENTRY_HIWAT; 215 } 216 217 void 218 pfr_cleanup(void) 219 { 220 221 uma_zdestroy(V_pfr_kentry_z); 222 uma_zdestroy(V_pfr_kentry_counter_z); 223 } 224 225 int 226 pfr_clr_addrs(struct pfr_table *tbl, int *ndel, int flags) 227 { 228 struct pfr_ktable *kt; 229 struct pfr_kentryworkq workq; 230 231 PF_RULES_WASSERT(); 232 233 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY); 234 if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL)) 235 return (EINVAL); 236 kt = pfr_lookup_table(tbl); 237 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 238 return (ESRCH); 239 if (kt->pfrkt_flags & PFR_TFLAG_CONST) 240 return (EPERM); 241 pfr_enqueue_addrs(kt, &workq, ndel, 0); 242 243 if (!(flags & PFR_FLAG_DUMMY)) { 244 pfr_remove_kentries(kt, &workq); 245 KASSERT(kt->pfrkt_cnt == 0, ("%s: non-null pfrkt_cnt", __func__)); 246 } 247 return (0); 248 } 249 250 int 251 pfr_add_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size, 252 int *nadd, int flags) 253 { 254 struct pfr_ktable *kt, *tmpkt; 255 struct pfr_kentryworkq workq; 256 struct pfr_kentry *p, *q; 257 struct pfr_addr *ad; 258 int i, rv, xadd = 0; 259 long tzero = time_second; 260 261 PF_RULES_WASSERT(); 262 263 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_FEEDBACK); 264 if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL)) 265 return (EINVAL); 266 kt = pfr_lookup_table(tbl); 267 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 268 return (ESRCH); 269 if (kt->pfrkt_flags & PFR_TFLAG_CONST) 270 return (EPERM); 271 tmpkt = pfr_create_ktable(&V_pfr_nulltable, 0, 0); 272 if (tmpkt == NULL) 273 return (ENOMEM); 274 SLIST_INIT(&workq); 275 for (i = 0, ad = addr; i < size; i++, ad++) { 276 if (pfr_validate_addr(ad)) 277 senderr(EINVAL); 278 p = pfr_lookup_addr(kt, ad, 1); 279 q = pfr_lookup_addr(tmpkt, ad, 1); 280 if (flags & PFR_FLAG_FEEDBACK) { 281 if (q != NULL) 282 ad->pfra_fback = PFR_FB_DUPLICATE; 283 else if (p == NULL) 284 ad->pfra_fback = PFR_FB_ADDED; 285 else if (p->pfrke_not != ad->pfra_not) 286 ad->pfra_fback = PFR_FB_CONFLICT; 287 else 288 ad->pfra_fback = PFR_FB_NONE; 289 } 290 if (p == NULL && q == NULL) { 291 p = pfr_create_kentry(ad, 292 (kt->pfrkt_flags & PFR_TFLAG_COUNTERS) != 0); 293 if (p == NULL) 294 senderr(ENOMEM); 295 if (pfr_route_kentry(tmpkt, p)) { 296 pfr_destroy_kentry(p); 297 ad->pfra_fback = PFR_FB_NONE; 298 } else { 299 SLIST_INSERT_HEAD(&workq, p, pfrke_workq); 300 xadd++; 301 } 302 } 303 } 304 pfr_clean_node_mask(tmpkt, &workq); 305 if (!(flags & PFR_FLAG_DUMMY)) 306 pfr_insert_kentries(kt, &workq, tzero); 307 else 308 pfr_destroy_kentries(&workq); 309 if (nadd != NULL) 310 *nadd = xadd; 311 pfr_destroy_ktable(tmpkt, 0); 312 return (0); 313 _bad: 314 pfr_clean_node_mask(tmpkt, &workq); 315 pfr_destroy_kentries(&workq); 316 if (flags & PFR_FLAG_FEEDBACK) 317 pfr_reset_feedback(addr, size); 318 pfr_destroy_ktable(tmpkt, 0); 319 return (rv); 320 } 321 322 int 323 pfr_del_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size, 324 int *ndel, int flags) 325 { 326 struct pfr_ktable *kt; 327 struct pfr_kentryworkq workq; 328 struct pfr_kentry *p; 329 struct pfr_addr *ad; 330 int i, rv, xdel = 0, log = 1; 331 332 PF_RULES_WASSERT(); 333 334 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_FEEDBACK); 335 if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL)) 336 return (EINVAL); 337 kt = pfr_lookup_table(tbl); 338 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 339 return (ESRCH); 340 if (kt->pfrkt_flags & PFR_TFLAG_CONST) 341 return (EPERM); 342 /* 343 * there are two algorithms to choose from here. 344 * with: 345 * n: number of addresses to delete 346 * N: number of addresses in the table 347 * 348 * one is O(N) and is better for large 'n' 349 * one is O(n*LOG(N)) and is better for small 'n' 350 * 351 * following code try to decide which one is best. 352 */ 353 for (i = kt->pfrkt_cnt; i > 0; i >>= 1) 354 log++; 355 if (size > kt->pfrkt_cnt/log) { 356 /* full table scan */ 357 pfr_mark_addrs(kt); 358 } else { 359 /* iterate over addresses to delete */ 360 for (i = 0, ad = addr; i < size; i++, ad++) { 361 if (pfr_validate_addr(ad)) 362 return (EINVAL); 363 p = pfr_lookup_addr(kt, ad, 1); 364 if (p != NULL) 365 p->pfrke_mark = 0; 366 } 367 } 368 SLIST_INIT(&workq); 369 for (i = 0, ad = addr; i < size; i++, ad++) { 370 if (pfr_validate_addr(ad)) 371 senderr(EINVAL); 372 p = pfr_lookup_addr(kt, ad, 1); 373 if (flags & PFR_FLAG_FEEDBACK) { 374 if (p == NULL) 375 ad->pfra_fback = PFR_FB_NONE; 376 else if (p->pfrke_not != ad->pfra_not) 377 ad->pfra_fback = PFR_FB_CONFLICT; 378 else if (p->pfrke_mark) 379 ad->pfra_fback = PFR_FB_DUPLICATE; 380 else 381 ad->pfra_fback = PFR_FB_DELETED; 382 } 383 if (p != NULL && p->pfrke_not == ad->pfra_not && 384 !p->pfrke_mark) { 385 p->pfrke_mark = 1; 386 SLIST_INSERT_HEAD(&workq, p, pfrke_workq); 387 xdel++; 388 } 389 } 390 if (!(flags & PFR_FLAG_DUMMY)) 391 pfr_remove_kentries(kt, &workq); 392 if (ndel != NULL) 393 *ndel = xdel; 394 return (0); 395 _bad: 396 if (flags & PFR_FLAG_FEEDBACK) 397 pfr_reset_feedback(addr, size); 398 return (rv); 399 } 400 401 int 402 pfr_set_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size, 403 int *size2, int *nadd, int *ndel, int *nchange, int flags, 404 u_int32_t ignore_pfrt_flags) 405 { 406 struct pfr_ktable *kt, *tmpkt; 407 struct pfr_kentryworkq addq, delq, changeq; 408 struct pfr_kentry *p, *q; 409 struct pfr_addr ad; 410 int i, rv, xadd = 0, xdel = 0, xchange = 0; 411 long tzero = time_second; 412 413 PF_RULES_WASSERT(); 414 415 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_FEEDBACK); 416 if (pfr_validate_table(tbl, ignore_pfrt_flags, flags & 417 PFR_FLAG_USERIOCTL)) 418 return (EINVAL); 419 kt = pfr_lookup_table(tbl); 420 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 421 return (ESRCH); 422 if (kt->pfrkt_flags & PFR_TFLAG_CONST) 423 return (EPERM); 424 tmpkt = pfr_create_ktable(&V_pfr_nulltable, 0, 0); 425 if (tmpkt == NULL) 426 return (ENOMEM); 427 pfr_mark_addrs(kt); 428 SLIST_INIT(&addq); 429 SLIST_INIT(&delq); 430 SLIST_INIT(&changeq); 431 for (i = 0; i < size; i++) { 432 /* 433 * XXXGL: undertand pf_if usage of this function 434 * and make ad a moving pointer 435 */ 436 bcopy(addr + i, &ad, sizeof(ad)); 437 if (pfr_validate_addr(&ad)) 438 senderr(EINVAL); 439 ad.pfra_fback = PFR_FB_NONE; 440 p = pfr_lookup_addr(kt, &ad, 1); 441 if (p != NULL) { 442 if (p->pfrke_mark) { 443 ad.pfra_fback = PFR_FB_DUPLICATE; 444 goto _skip; 445 } 446 p->pfrke_mark = 1; 447 if (p->pfrke_not != ad.pfra_not) { 448 SLIST_INSERT_HEAD(&changeq, p, pfrke_workq); 449 ad.pfra_fback = PFR_FB_CHANGED; 450 xchange++; 451 } 452 } else { 453 q = pfr_lookup_addr(tmpkt, &ad, 1); 454 if (q != NULL) { 455 ad.pfra_fback = PFR_FB_DUPLICATE; 456 goto _skip; 457 } 458 p = pfr_create_kentry(&ad, 459 (kt->pfrkt_flags & PFR_TFLAG_COUNTERS) != 0); 460 if (p == NULL) 461 senderr(ENOMEM); 462 if (pfr_route_kentry(tmpkt, p)) { 463 pfr_destroy_kentry(p); 464 ad.pfra_fback = PFR_FB_NONE; 465 } else { 466 SLIST_INSERT_HEAD(&addq, p, pfrke_workq); 467 ad.pfra_fback = PFR_FB_ADDED; 468 xadd++; 469 } 470 } 471 _skip: 472 if (flags & PFR_FLAG_FEEDBACK) 473 bcopy(&ad, addr + i, sizeof(ad)); 474 } 475 pfr_enqueue_addrs(kt, &delq, &xdel, ENQUEUE_UNMARKED_ONLY); 476 if ((flags & PFR_FLAG_FEEDBACK) && *size2) { 477 if (*size2 < size+xdel) { 478 *size2 = size+xdel; 479 senderr(0); 480 } 481 i = 0; 482 SLIST_FOREACH(p, &delq, pfrke_workq) { 483 pfr_copyout_addr(&ad, p); 484 ad.pfra_fback = PFR_FB_DELETED; 485 bcopy(&ad, addr + size + i, sizeof(ad)); 486 i++; 487 } 488 } 489 pfr_clean_node_mask(tmpkt, &addq); 490 if (!(flags & PFR_FLAG_DUMMY)) { 491 pfr_insert_kentries(kt, &addq, tzero); 492 pfr_remove_kentries(kt, &delq); 493 pfr_clstats_kentries(kt, &changeq, tzero, INVERT_NEG_FLAG); 494 } else 495 pfr_destroy_kentries(&addq); 496 if (nadd != NULL) 497 *nadd = xadd; 498 if (ndel != NULL) 499 *ndel = xdel; 500 if (nchange != NULL) 501 *nchange = xchange; 502 if ((flags & PFR_FLAG_FEEDBACK) && size2) 503 *size2 = size+xdel; 504 pfr_destroy_ktable(tmpkt, 0); 505 return (0); 506 _bad: 507 pfr_clean_node_mask(tmpkt, &addq); 508 pfr_destroy_kentries(&addq); 509 if (flags & PFR_FLAG_FEEDBACK) 510 pfr_reset_feedback(addr, size); 511 pfr_destroy_ktable(tmpkt, 0); 512 return (rv); 513 } 514 515 int 516 pfr_tst_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size, 517 int *nmatch, int flags) 518 { 519 struct pfr_ktable *kt; 520 struct pfr_kentry *p; 521 struct pfr_addr *ad; 522 int i, xmatch = 0; 523 524 PF_RULES_RASSERT(); 525 526 ACCEPT_FLAGS(flags, PFR_FLAG_REPLACE); 527 if (pfr_validate_table(tbl, 0, 0)) 528 return (EINVAL); 529 kt = pfr_lookup_table(tbl); 530 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 531 return (ESRCH); 532 533 for (i = 0, ad = addr; i < size; i++, ad++) { 534 if (pfr_validate_addr(ad)) 535 return (EINVAL); 536 if (ADDR_NETWORK(ad)) 537 return (EINVAL); 538 p = pfr_lookup_addr(kt, ad, 0); 539 if (flags & PFR_FLAG_REPLACE) 540 pfr_copyout_addr(ad, p); 541 ad->pfra_fback = (p == NULL) ? PFR_FB_NONE : 542 (p->pfrke_not ? PFR_FB_NOTMATCH : PFR_FB_MATCH); 543 if (p != NULL && !p->pfrke_not) 544 xmatch++; 545 } 546 if (nmatch != NULL) 547 *nmatch = xmatch; 548 return (0); 549 } 550 551 int 552 pfr_get_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int *size, 553 int flags) 554 { 555 struct pfr_ktable *kt; 556 struct pfr_walktree w; 557 int rv; 558 559 PF_RULES_RASSERT(); 560 561 ACCEPT_FLAGS(flags, 0); 562 if (pfr_validate_table(tbl, 0, 0)) 563 return (EINVAL); 564 kt = pfr_lookup_table(tbl); 565 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 566 return (ESRCH); 567 if (kt->pfrkt_cnt > *size) { 568 *size = kt->pfrkt_cnt; 569 return (0); 570 } 571 572 bzero(&w, sizeof(w)); 573 w.pfrw_op = PFRW_GET_ADDRS; 574 w.pfrw_addr = addr; 575 w.pfrw_free = kt->pfrkt_cnt; 576 rv = kt->pfrkt_ip4->rnh_walktree(&kt->pfrkt_ip4->rh, pfr_walktree, &w); 577 if (!rv) 578 rv = kt->pfrkt_ip6->rnh_walktree(&kt->pfrkt_ip6->rh, 579 pfr_walktree, &w); 580 if (rv) 581 return (rv); 582 583 KASSERT(w.pfrw_free == 0, ("%s: corruption detected (%d)", __func__, 584 w.pfrw_free)); 585 586 *size = kt->pfrkt_cnt; 587 return (0); 588 } 589 590 int 591 pfr_get_astats(struct pfr_table *tbl, struct pfr_astats *addr, int *size, 592 int flags) 593 { 594 struct pfr_ktable *kt; 595 struct pfr_walktree w; 596 struct pfr_kentryworkq workq; 597 int rv; 598 long tzero = time_second; 599 600 PF_RULES_RASSERT(); 601 602 /* XXX PFR_FLAG_CLSTATS disabled */ 603 ACCEPT_FLAGS(flags, 0); 604 if (pfr_validate_table(tbl, 0, 0)) 605 return (EINVAL); 606 kt = pfr_lookup_table(tbl); 607 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 608 return (ESRCH); 609 if (kt->pfrkt_cnt > *size) { 610 *size = kt->pfrkt_cnt; 611 return (0); 612 } 613 614 bzero(&w, sizeof(w)); 615 w.pfrw_op = PFRW_GET_ASTATS; 616 w.pfrw_astats = addr; 617 w.pfrw_free = kt->pfrkt_cnt; 618 /* 619 * Flags below are for backward compatibility. It was possible to have 620 * a table without per-entry counters. Now they are always allocated, 621 * we just discard data when reading it if table is not configured to 622 * have counters. 623 */ 624 w.pfrw_flags = kt->pfrkt_flags; 625 rv = kt->pfrkt_ip4->rnh_walktree(&kt->pfrkt_ip4->rh, pfr_walktree, &w); 626 if (!rv) 627 rv = kt->pfrkt_ip6->rnh_walktree(&kt->pfrkt_ip6->rh, 628 pfr_walktree, &w); 629 if (!rv && (flags & PFR_FLAG_CLSTATS)) { 630 pfr_enqueue_addrs(kt, &workq, NULL, 0); 631 pfr_clstats_kentries(kt, &workq, tzero, 0); 632 } 633 if (rv) 634 return (rv); 635 636 if (w.pfrw_free) { 637 printf("pfr_get_astats: corruption detected (%d).\n", 638 w.pfrw_free); 639 return (ENOTTY); 640 } 641 *size = kt->pfrkt_cnt; 642 return (0); 643 } 644 645 int 646 pfr_clr_astats(struct pfr_table *tbl, struct pfr_addr *addr, int size, 647 int *nzero, int flags) 648 { 649 struct pfr_ktable *kt; 650 struct pfr_kentryworkq workq; 651 struct pfr_kentry *p; 652 struct pfr_addr *ad; 653 int i, rv, xzero = 0; 654 655 PF_RULES_WASSERT(); 656 657 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_FEEDBACK); 658 if (pfr_validate_table(tbl, 0, 0)) 659 return (EINVAL); 660 kt = pfr_lookup_table(tbl); 661 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 662 return (ESRCH); 663 SLIST_INIT(&workq); 664 for (i = 0, ad = addr; i < size; i++, ad++) { 665 if (pfr_validate_addr(ad)) 666 senderr(EINVAL); 667 p = pfr_lookup_addr(kt, ad, 1); 668 if (flags & PFR_FLAG_FEEDBACK) { 669 ad->pfra_fback = (p != NULL) ? 670 PFR_FB_CLEARED : PFR_FB_NONE; 671 } 672 if (p != NULL) { 673 SLIST_INSERT_HEAD(&workq, p, pfrke_workq); 674 xzero++; 675 } 676 } 677 678 if (!(flags & PFR_FLAG_DUMMY)) 679 pfr_clstats_kentries(kt, &workq, 0, 0); 680 if (nzero != NULL) 681 *nzero = xzero; 682 return (0); 683 _bad: 684 if (flags & PFR_FLAG_FEEDBACK) 685 pfr_reset_feedback(addr, size); 686 return (rv); 687 } 688 689 static int 690 pfr_validate_addr(struct pfr_addr *ad) 691 { 692 int i; 693 694 switch (ad->pfra_af) { 695 #ifdef INET 696 case AF_INET: 697 if (ad->pfra_net > 32) 698 return (-1); 699 break; 700 #endif /* INET */ 701 #ifdef INET6 702 case AF_INET6: 703 if (ad->pfra_net > 128) 704 return (-1); 705 break; 706 #endif /* INET6 */ 707 default: 708 return (-1); 709 } 710 if (ad->pfra_net < 128 && 711 (((caddr_t)ad)[ad->pfra_net/8] & (0xFF >> (ad->pfra_net%8)))) 712 return (-1); 713 for (i = (ad->pfra_net+7)/8; i < sizeof(ad->pfra_u); i++) 714 if (((caddr_t)ad)[i]) 715 return (-1); 716 if (ad->pfra_not && ad->pfra_not != 1) 717 return (-1); 718 if (ad->pfra_fback) 719 return (-1); 720 return (0); 721 } 722 723 static void 724 pfr_enqueue_addrs(struct pfr_ktable *kt, struct pfr_kentryworkq *workq, 725 int *naddr, int sweep) 726 { 727 struct pfr_walktree w; 728 729 SLIST_INIT(workq); 730 bzero(&w, sizeof(w)); 731 w.pfrw_op = sweep ? PFRW_SWEEP : PFRW_ENQUEUE; 732 w.pfrw_workq = workq; 733 if (kt->pfrkt_ip4 != NULL) 734 if (kt->pfrkt_ip4->rnh_walktree(&kt->pfrkt_ip4->rh, 735 pfr_walktree, &w)) 736 printf("pfr_enqueue_addrs: IPv4 walktree failed.\n"); 737 if (kt->pfrkt_ip6 != NULL) 738 if (kt->pfrkt_ip6->rnh_walktree(&kt->pfrkt_ip6->rh, 739 pfr_walktree, &w)) 740 printf("pfr_enqueue_addrs: IPv6 walktree failed.\n"); 741 if (naddr != NULL) 742 *naddr = w.pfrw_cnt; 743 } 744 745 static void 746 pfr_mark_addrs(struct pfr_ktable *kt) 747 { 748 struct pfr_walktree w; 749 750 bzero(&w, sizeof(w)); 751 w.pfrw_op = PFRW_MARK; 752 if (kt->pfrkt_ip4->rnh_walktree(&kt->pfrkt_ip4->rh, pfr_walktree, &w)) 753 printf("pfr_mark_addrs: IPv4 walktree failed.\n"); 754 if (kt->pfrkt_ip6->rnh_walktree(&kt->pfrkt_ip6->rh, pfr_walktree, &w)) 755 printf("pfr_mark_addrs: IPv6 walktree failed.\n"); 756 } 757 758 static struct pfr_kentry * 759 pfr_lookup_addr(struct pfr_ktable *kt, struct pfr_addr *ad, int exact) 760 { 761 union sockaddr_union sa, mask; 762 struct radix_head *head = NULL; 763 struct pfr_kentry *ke; 764 765 PF_RULES_ASSERT(); 766 767 bzero(&sa, sizeof(sa)); 768 if (ad->pfra_af == AF_INET) { 769 FILLIN_SIN(sa.sin, ad->pfra_ip4addr); 770 head = &kt->pfrkt_ip4->rh; 771 } else if ( ad->pfra_af == AF_INET6 ) { 772 FILLIN_SIN6(sa.sin6, ad->pfra_ip6addr); 773 head = &kt->pfrkt_ip6->rh; 774 } 775 if (ADDR_NETWORK(ad)) { 776 pfr_prepare_network(&mask, ad->pfra_af, ad->pfra_net); 777 ke = (struct pfr_kentry *)rn_lookup(&sa, &mask, head); 778 if (ke && KENTRY_RNF_ROOT(ke)) 779 ke = NULL; 780 } else { 781 ke = (struct pfr_kentry *)rn_match(&sa, head); 782 if (ke && KENTRY_RNF_ROOT(ke)) 783 ke = NULL; 784 if (exact && ke && KENTRY_NETWORK(ke)) 785 ke = NULL; 786 } 787 return (ke); 788 } 789 790 static struct pfr_kentry * 791 pfr_create_kentry(struct pfr_addr *ad, bool counters) 792 { 793 struct pfr_kentry *ke; 794 counter_u64_t c; 795 796 ke = uma_zalloc(V_pfr_kentry_z, M_NOWAIT | M_ZERO); 797 if (ke == NULL) 798 return (NULL); 799 800 if (ad->pfra_af == AF_INET) 801 FILLIN_SIN(ke->pfrke_sa.sin, ad->pfra_ip4addr); 802 else if (ad->pfra_af == AF_INET6) 803 FILLIN_SIN6(ke->pfrke_sa.sin6, ad->pfra_ip6addr); 804 ke->pfrke_af = ad->pfra_af; 805 ke->pfrke_net = ad->pfra_net; 806 ke->pfrke_not = ad->pfra_not; 807 ke->pfrke_counters.pfrkc_tzero = 0; 808 if (counters) { 809 c = uma_zalloc_pcpu(V_pfr_kentry_counter_z, M_NOWAIT | M_ZERO); 810 if (c == NULL) { 811 pfr_destroy_kentry(ke); 812 return (NULL); 813 } 814 ke->pfrke_counters.pfrkc_counters = c; 815 } 816 return (ke); 817 } 818 819 static void 820 pfr_destroy_kentries(struct pfr_kentryworkq *workq) 821 { 822 struct pfr_kentry *p, *q; 823 824 for (p = SLIST_FIRST(workq); p != NULL; p = q) { 825 q = SLIST_NEXT(p, pfrke_workq); 826 pfr_destroy_kentry(p); 827 } 828 } 829 830 static void 831 pfr_destroy_kentry(struct pfr_kentry *ke) 832 { 833 counter_u64_t c; 834 835 if ((c = ke->pfrke_counters.pfrkc_counters) != NULL) 836 uma_zfree_pcpu(V_pfr_kentry_counter_z, c); 837 uma_zfree(V_pfr_kentry_z, ke); 838 } 839 840 static void 841 pfr_insert_kentries(struct pfr_ktable *kt, 842 struct pfr_kentryworkq *workq, long tzero) 843 { 844 struct pfr_kentry *p; 845 int rv, n = 0; 846 847 SLIST_FOREACH(p, workq, pfrke_workq) { 848 rv = pfr_route_kentry(kt, p); 849 if (rv) { 850 printf("pfr_insert_kentries: cannot route entry " 851 "(code=%d).\n", rv); 852 break; 853 } 854 p->pfrke_counters.pfrkc_tzero = tzero; 855 n++; 856 } 857 kt->pfrkt_cnt += n; 858 } 859 860 int 861 pfr_insert_kentry(struct pfr_ktable *kt, struct pfr_addr *ad, long tzero) 862 { 863 struct pfr_kentry *p; 864 int rv; 865 866 p = pfr_lookup_addr(kt, ad, 1); 867 if (p != NULL) 868 return (0); 869 p = pfr_create_kentry(ad, (kt->pfrkt_flags & PFR_TFLAG_COUNTERS) != 0); 870 if (p == NULL) 871 return (ENOMEM); 872 873 rv = pfr_route_kentry(kt, p); 874 if (rv) 875 return (rv); 876 877 p->pfrke_counters.pfrkc_tzero = tzero; 878 kt->pfrkt_cnt++; 879 880 return (0); 881 } 882 883 static void 884 pfr_remove_kentries(struct pfr_ktable *kt, 885 struct pfr_kentryworkq *workq) 886 { 887 struct pfr_kentry *p; 888 int n = 0; 889 890 SLIST_FOREACH(p, workq, pfrke_workq) { 891 pfr_unroute_kentry(kt, p); 892 n++; 893 } 894 kt->pfrkt_cnt -= n; 895 pfr_destroy_kentries(workq); 896 } 897 898 static void 899 pfr_clean_node_mask(struct pfr_ktable *kt, 900 struct pfr_kentryworkq *workq) 901 { 902 struct pfr_kentry *p; 903 904 SLIST_FOREACH(p, workq, pfrke_workq) 905 pfr_unroute_kentry(kt, p); 906 } 907 908 static void 909 pfr_clstats_kentries(struct pfr_ktable *kt, struct pfr_kentryworkq *workq, 910 long tzero, int negchange) 911 { 912 struct pfr_kentry *p; 913 int i; 914 915 SLIST_FOREACH(p, workq, pfrke_workq) { 916 if (negchange) 917 p->pfrke_not = !p->pfrke_not; 918 if ((kt->pfrkt_flags & PFR_TFLAG_COUNTERS) != 0) 919 for (i = 0; i < PFR_NUM_COUNTERS; i++) 920 counter_u64_zero( 921 p->pfrke_counters.pfrkc_counters + i); 922 p->pfrke_counters.pfrkc_tzero = tzero; 923 } 924 } 925 926 static void 927 pfr_reset_feedback(struct pfr_addr *addr, int size) 928 { 929 struct pfr_addr *ad; 930 int i; 931 932 for (i = 0, ad = addr; i < size; i++, ad++) 933 ad->pfra_fback = PFR_FB_NONE; 934 } 935 936 static void 937 pfr_prepare_network(union sockaddr_union *sa, int af, int net) 938 { 939 int i; 940 941 bzero(sa, sizeof(*sa)); 942 if (af == AF_INET) { 943 sa->sin.sin_len = sizeof(sa->sin); 944 sa->sin.sin_family = AF_INET; 945 sa->sin.sin_addr.s_addr = net ? htonl(-1 << (32-net)) : 0; 946 } else if (af == AF_INET6) { 947 sa->sin6.sin6_len = sizeof(sa->sin6); 948 sa->sin6.sin6_family = AF_INET6; 949 for (i = 0; i < 4; i++) { 950 if (net <= 32) { 951 sa->sin6.sin6_addr.s6_addr32[i] = 952 net ? htonl(-1 << (32-net)) : 0; 953 break; 954 } 955 sa->sin6.sin6_addr.s6_addr32[i] = 0xFFFFFFFF; 956 net -= 32; 957 } 958 } 959 } 960 961 static int 962 pfr_route_kentry(struct pfr_ktable *kt, struct pfr_kentry *ke) 963 { 964 union sockaddr_union mask; 965 struct radix_node *rn; 966 struct radix_head *head = NULL; 967 968 PF_RULES_WASSERT(); 969 970 bzero(ke->pfrke_node, sizeof(ke->pfrke_node)); 971 if (ke->pfrke_af == AF_INET) 972 head = &kt->pfrkt_ip4->rh; 973 else if (ke->pfrke_af == AF_INET6) 974 head = &kt->pfrkt_ip6->rh; 975 976 if (KENTRY_NETWORK(ke)) { 977 pfr_prepare_network(&mask, ke->pfrke_af, ke->pfrke_net); 978 rn = rn_addroute(&ke->pfrke_sa, &mask, head, ke->pfrke_node); 979 } else 980 rn = rn_addroute(&ke->pfrke_sa, NULL, head, ke->pfrke_node); 981 982 return (rn == NULL ? -1 : 0); 983 } 984 985 static int 986 pfr_unroute_kentry(struct pfr_ktable *kt, struct pfr_kentry *ke) 987 { 988 union sockaddr_union mask; 989 struct radix_node *rn; 990 struct radix_head *head = NULL; 991 992 if (ke->pfrke_af == AF_INET) 993 head = &kt->pfrkt_ip4->rh; 994 else if (ke->pfrke_af == AF_INET6) 995 head = &kt->pfrkt_ip6->rh; 996 997 if (KENTRY_NETWORK(ke)) { 998 pfr_prepare_network(&mask, ke->pfrke_af, ke->pfrke_net); 999 rn = rn_delete(&ke->pfrke_sa, &mask, head); 1000 } else 1001 rn = rn_delete(&ke->pfrke_sa, NULL, head); 1002 1003 if (rn == NULL) { 1004 printf("pfr_unroute_kentry: delete failed.\n"); 1005 return (-1); 1006 } 1007 return (0); 1008 } 1009 1010 static void 1011 pfr_copyout_addr(struct pfr_addr *ad, const struct pfr_kentry *ke) 1012 { 1013 bzero(ad, sizeof(*ad)); 1014 if (ke == NULL) 1015 return; 1016 ad->pfra_af = ke->pfrke_af; 1017 ad->pfra_net = ke->pfrke_net; 1018 ad->pfra_not = ke->pfrke_not; 1019 if (ad->pfra_af == AF_INET) 1020 ad->pfra_ip4addr = ke->pfrke_sa.sin.sin_addr; 1021 else if (ad->pfra_af == AF_INET6) 1022 ad->pfra_ip6addr = ke->pfrke_sa.sin6.sin6_addr; 1023 } 1024 1025 static void 1026 pfr_copyout_astats(struct pfr_astats *as, const struct pfr_kentry *ke, 1027 const struct pfr_walktree *w) 1028 { 1029 int dir, op; 1030 const struct pfr_kcounters *kc = &ke->pfrke_counters; 1031 1032 bzero(as, sizeof(*as)); 1033 pfr_copyout_addr(&as->pfras_a, ke); 1034 as->pfras_tzero = kc->pfrkc_tzero; 1035 1036 if (! (w->pfrw_flags & PFR_TFLAG_COUNTERS) || 1037 kc->pfrkc_counters == NULL) { 1038 bzero(as->pfras_packets, sizeof(as->pfras_packets)); 1039 bzero(as->pfras_bytes, sizeof(as->pfras_bytes)); 1040 as->pfras_a.pfra_fback = PFR_FB_NOCOUNT; 1041 return; 1042 } 1043 1044 for (dir = 0; dir < PFR_DIR_MAX; dir++) { 1045 for (op = 0; op < PFR_OP_ADDR_MAX; op ++) { 1046 as->pfras_packets[dir][op] = counter_u64_fetch( 1047 pfr_kentry_counter(kc, dir, op, PFR_TYPE_PACKETS)); 1048 as->pfras_bytes[dir][op] = counter_u64_fetch( 1049 pfr_kentry_counter(kc, dir, op, PFR_TYPE_BYTES)); 1050 } 1051 } 1052 } 1053 1054 static int 1055 pfr_walktree(struct radix_node *rn, void *arg) 1056 { 1057 struct pfr_kentry *ke = (struct pfr_kentry *)rn; 1058 struct pfr_walktree *w = arg; 1059 1060 switch (w->pfrw_op) { 1061 case PFRW_MARK: 1062 ke->pfrke_mark = 0; 1063 break; 1064 case PFRW_SWEEP: 1065 if (ke->pfrke_mark) 1066 break; 1067 /* FALLTHROUGH */ 1068 case PFRW_ENQUEUE: 1069 SLIST_INSERT_HEAD(w->pfrw_workq, ke, pfrke_workq); 1070 w->pfrw_cnt++; 1071 break; 1072 case PFRW_GET_ADDRS: 1073 if (w->pfrw_free-- > 0) { 1074 pfr_copyout_addr(w->pfrw_addr, ke); 1075 w->pfrw_addr++; 1076 } 1077 break; 1078 case PFRW_GET_ASTATS: 1079 if (w->pfrw_free-- > 0) { 1080 struct pfr_astats as; 1081 1082 pfr_copyout_astats(&as, ke, w); 1083 1084 bcopy(&as, w->pfrw_astats, sizeof(as)); 1085 w->pfrw_astats++; 1086 } 1087 break; 1088 case PFRW_POOL_GET: 1089 if (ke->pfrke_not) 1090 break; /* negative entries are ignored */ 1091 if (!w->pfrw_cnt--) { 1092 w->pfrw_kentry = ke; 1093 return (1); /* finish search */ 1094 } 1095 break; 1096 case PFRW_DYNADDR_UPDATE: 1097 { 1098 union sockaddr_union pfr_mask; 1099 1100 if (ke->pfrke_af == AF_INET) { 1101 if (w->pfrw_dyn->pfid_acnt4++ > 0) 1102 break; 1103 pfr_prepare_network(&pfr_mask, AF_INET, ke->pfrke_net); 1104 w->pfrw_dyn->pfid_addr4 = *SUNION2PF(&ke->pfrke_sa, 1105 AF_INET); 1106 w->pfrw_dyn->pfid_mask4 = *SUNION2PF(&pfr_mask, 1107 AF_INET); 1108 } else if (ke->pfrke_af == AF_INET6){ 1109 if (w->pfrw_dyn->pfid_acnt6++ > 0) 1110 break; 1111 pfr_prepare_network(&pfr_mask, AF_INET6, ke->pfrke_net); 1112 w->pfrw_dyn->pfid_addr6 = *SUNION2PF(&ke->pfrke_sa, 1113 AF_INET6); 1114 w->pfrw_dyn->pfid_mask6 = *SUNION2PF(&pfr_mask, 1115 AF_INET6); 1116 } 1117 break; 1118 } 1119 case PFRW_COUNTERS: 1120 { 1121 if (w->pfrw_flags & PFR_TFLAG_COUNTERS) { 1122 if (ke->pfrke_counters.pfrkc_counters != NULL) 1123 break; 1124 ke->pfrke_counters.pfrkc_counters = 1125 uma_zalloc_pcpu(V_pfr_kentry_counter_z, 1126 M_NOWAIT | M_ZERO); 1127 } else { 1128 uma_zfree_pcpu(V_pfr_kentry_counter_z, 1129 ke->pfrke_counters.pfrkc_counters); 1130 ke->pfrke_counters.pfrkc_counters = NULL; 1131 } 1132 break; 1133 } 1134 } 1135 return (0); 1136 } 1137 1138 int 1139 pfr_clr_tables(struct pfr_table *filter, int *ndel, int flags) 1140 { 1141 struct pfr_ktableworkq workq; 1142 struct pfr_ktable *p; 1143 int xdel = 0; 1144 1145 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_ALLRSETS); 1146 if (pfr_fix_anchor(filter->pfrt_anchor)) 1147 return (EINVAL); 1148 if (pfr_table_count(filter, flags) < 0) 1149 return (ENOENT); 1150 1151 SLIST_INIT(&workq); 1152 RB_FOREACH(p, pfr_ktablehead, &V_pfr_ktables) { 1153 if (pfr_skip_table(filter, p, flags)) 1154 continue; 1155 if (!strcmp(p->pfrkt_anchor, PF_RESERVED_ANCHOR)) 1156 continue; 1157 if (!(p->pfrkt_flags & PFR_TFLAG_ACTIVE)) 1158 continue; 1159 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_ACTIVE; 1160 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq); 1161 xdel++; 1162 } 1163 if (!(flags & PFR_FLAG_DUMMY)) 1164 pfr_setflags_ktables(&workq); 1165 if (ndel != NULL) 1166 *ndel = xdel; 1167 return (0); 1168 } 1169 1170 int 1171 pfr_add_tables(struct pfr_table *tbl, int size, int *nadd, int flags) 1172 { 1173 struct pfr_ktableworkq addq, changeq; 1174 struct pfr_ktable *p, *q, *r, key; 1175 int i, rv, xadd = 0; 1176 long tzero = time_second; 1177 1178 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY); 1179 SLIST_INIT(&addq); 1180 SLIST_INIT(&changeq); 1181 for (i = 0; i < size; i++) { 1182 bcopy(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t)); 1183 if (pfr_validate_table(&key.pfrkt_t, PFR_TFLAG_USRMASK, 1184 flags & PFR_FLAG_USERIOCTL)) 1185 senderr(EINVAL); 1186 key.pfrkt_flags |= PFR_TFLAG_ACTIVE; 1187 p = RB_FIND(pfr_ktablehead, &V_pfr_ktables, &key); 1188 if (p == NULL) { 1189 p = pfr_create_ktable(&key.pfrkt_t, tzero, 1); 1190 if (p == NULL) 1191 senderr(ENOMEM); 1192 SLIST_FOREACH(q, &addq, pfrkt_workq) { 1193 if (!pfr_ktable_compare(p, q)) { 1194 pfr_destroy_ktable(p, 0); 1195 goto _skip; 1196 } 1197 } 1198 SLIST_INSERT_HEAD(&addq, p, pfrkt_workq); 1199 xadd++; 1200 if (!key.pfrkt_anchor[0]) 1201 goto _skip; 1202 1203 /* find or create root table */ 1204 bzero(key.pfrkt_anchor, sizeof(key.pfrkt_anchor)); 1205 r = RB_FIND(pfr_ktablehead, &V_pfr_ktables, &key); 1206 if (r != NULL) { 1207 p->pfrkt_root = r; 1208 goto _skip; 1209 } 1210 SLIST_FOREACH(q, &addq, pfrkt_workq) { 1211 if (!pfr_ktable_compare(&key, q)) { 1212 p->pfrkt_root = q; 1213 goto _skip; 1214 } 1215 } 1216 key.pfrkt_flags = 0; 1217 r = pfr_create_ktable(&key.pfrkt_t, 0, 1); 1218 if (r == NULL) 1219 senderr(ENOMEM); 1220 SLIST_INSERT_HEAD(&addq, r, pfrkt_workq); 1221 p->pfrkt_root = r; 1222 } else if (!(p->pfrkt_flags & PFR_TFLAG_ACTIVE)) { 1223 SLIST_FOREACH(q, &changeq, pfrkt_workq) 1224 if (!pfr_ktable_compare(&key, q)) 1225 goto _skip; 1226 p->pfrkt_nflags = (p->pfrkt_flags & 1227 ~PFR_TFLAG_USRMASK) | key.pfrkt_flags; 1228 SLIST_INSERT_HEAD(&changeq, p, pfrkt_workq); 1229 xadd++; 1230 } 1231 _skip: 1232 ; 1233 } 1234 if (!(flags & PFR_FLAG_DUMMY)) { 1235 pfr_insert_ktables(&addq); 1236 pfr_setflags_ktables(&changeq); 1237 } else 1238 pfr_destroy_ktables(&addq, 0); 1239 if (nadd != NULL) 1240 *nadd = xadd; 1241 return (0); 1242 _bad: 1243 pfr_destroy_ktables(&addq, 0); 1244 return (rv); 1245 } 1246 1247 int 1248 pfr_del_tables(struct pfr_table *tbl, int size, int *ndel, int flags) 1249 { 1250 struct pfr_ktableworkq workq; 1251 struct pfr_ktable *p, *q, key; 1252 int i, xdel = 0; 1253 1254 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY); 1255 SLIST_INIT(&workq); 1256 for (i = 0; i < size; i++) { 1257 bcopy(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t)); 1258 if (pfr_validate_table(&key.pfrkt_t, 0, 1259 flags & PFR_FLAG_USERIOCTL)) 1260 return (EINVAL); 1261 p = RB_FIND(pfr_ktablehead, &V_pfr_ktables, &key); 1262 if (p != NULL && (p->pfrkt_flags & PFR_TFLAG_ACTIVE)) { 1263 SLIST_FOREACH(q, &workq, pfrkt_workq) 1264 if (!pfr_ktable_compare(p, q)) 1265 goto _skip; 1266 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_ACTIVE; 1267 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq); 1268 xdel++; 1269 } 1270 _skip: 1271 ; 1272 } 1273 1274 if (!(flags & PFR_FLAG_DUMMY)) 1275 pfr_setflags_ktables(&workq); 1276 if (ndel != NULL) 1277 *ndel = xdel; 1278 return (0); 1279 } 1280 1281 int 1282 pfr_get_tables(struct pfr_table *filter, struct pfr_table *tbl, int *size, 1283 int flags) 1284 { 1285 struct pfr_ktable *p; 1286 int n, nn; 1287 1288 PF_RULES_RASSERT(); 1289 1290 ACCEPT_FLAGS(flags, PFR_FLAG_ALLRSETS); 1291 if (pfr_fix_anchor(filter->pfrt_anchor)) 1292 return (EINVAL); 1293 n = nn = pfr_table_count(filter, flags); 1294 if (n < 0) 1295 return (ENOENT); 1296 if (n > *size) { 1297 *size = n; 1298 return (0); 1299 } 1300 RB_FOREACH(p, pfr_ktablehead, &V_pfr_ktables) { 1301 if (pfr_skip_table(filter, p, flags)) 1302 continue; 1303 if (n-- <= 0) 1304 continue; 1305 bcopy(&p->pfrkt_t, tbl++, sizeof(*tbl)); 1306 } 1307 1308 KASSERT(n == 0, ("%s: corruption detected (%d)", __func__, n)); 1309 1310 *size = nn; 1311 return (0); 1312 } 1313 1314 int 1315 pfr_get_tstats(struct pfr_table *filter, struct pfr_tstats *tbl, int *size, 1316 int flags) 1317 { 1318 struct pfr_ktable *p; 1319 struct pfr_ktableworkq workq; 1320 int n, nn; 1321 long tzero = time_second; 1322 int pfr_dir, pfr_op; 1323 1324 /* XXX PFR_FLAG_CLSTATS disabled */ 1325 ACCEPT_FLAGS(flags, PFR_FLAG_ALLRSETS); 1326 if (pfr_fix_anchor(filter->pfrt_anchor)) 1327 return (EINVAL); 1328 n = nn = pfr_table_count(filter, flags); 1329 if (n < 0) 1330 return (ENOENT); 1331 if (n > *size) { 1332 *size = n; 1333 return (0); 1334 } 1335 SLIST_INIT(&workq); 1336 RB_FOREACH(p, pfr_ktablehead, &V_pfr_ktables) { 1337 if (pfr_skip_table(filter, p, flags)) 1338 continue; 1339 if (n-- <= 0) 1340 continue; 1341 bcopy(&p->pfrkt_kts.pfrts_t, &tbl->pfrts_t, 1342 sizeof(struct pfr_table)); 1343 for (pfr_dir = 0; pfr_dir < PFR_DIR_MAX; pfr_dir ++) { 1344 for (pfr_op = 0; pfr_op < PFR_OP_TABLE_MAX; pfr_op ++) { 1345 tbl->pfrts_packets[pfr_dir][pfr_op] = 1346 pfr_kstate_counter_fetch( 1347 &p->pfrkt_packets[pfr_dir][pfr_op]); 1348 tbl->pfrts_bytes[pfr_dir][pfr_op] = 1349 pfr_kstate_counter_fetch( 1350 &p->pfrkt_bytes[pfr_dir][pfr_op]); 1351 } 1352 } 1353 tbl->pfrts_match = pfr_kstate_counter_fetch(&p->pfrkt_match); 1354 tbl->pfrts_nomatch = pfr_kstate_counter_fetch(&p->pfrkt_nomatch); 1355 tbl->pfrts_tzero = p->pfrkt_tzero; 1356 tbl->pfrts_cnt = p->pfrkt_cnt; 1357 for (pfr_op = 0; pfr_op < PFR_REFCNT_MAX; pfr_op++) 1358 tbl->pfrts_refcnt[pfr_op] = p->pfrkt_refcnt[pfr_op]; 1359 tbl++; 1360 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq); 1361 } 1362 if (flags & PFR_FLAG_CLSTATS) 1363 pfr_clstats_ktables(&workq, tzero, 1364 flags & PFR_FLAG_ADDRSTOO); 1365 1366 KASSERT(n == 0, ("%s: corruption detected (%d)", __func__, n)); 1367 1368 *size = nn; 1369 return (0); 1370 } 1371 1372 int 1373 pfr_clr_tstats(struct pfr_table *tbl, int size, int *nzero, int flags) 1374 { 1375 struct pfr_ktableworkq workq; 1376 struct pfr_ktable *p, key; 1377 int i, xzero = 0; 1378 long tzero = time_second; 1379 1380 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_ADDRSTOO); 1381 SLIST_INIT(&workq); 1382 for (i = 0; i < size; i++) { 1383 bcopy(tbl + i, &key.pfrkt_t, sizeof(key.pfrkt_t)); 1384 if (pfr_validate_table(&key.pfrkt_t, 0, 0)) 1385 return (EINVAL); 1386 p = RB_FIND(pfr_ktablehead, &V_pfr_ktables, &key); 1387 if (p != NULL) { 1388 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq); 1389 xzero++; 1390 } 1391 } 1392 if (!(flags & PFR_FLAG_DUMMY)) 1393 pfr_clstats_ktables(&workq, tzero, flags & PFR_FLAG_ADDRSTOO); 1394 if (nzero != NULL) 1395 *nzero = xzero; 1396 return (0); 1397 } 1398 1399 int 1400 pfr_set_tflags(struct pfr_table *tbl, int size, int setflag, int clrflag, 1401 int *nchange, int *ndel, int flags) 1402 { 1403 struct pfr_ktableworkq workq; 1404 struct pfr_ktable *p, *q, key; 1405 int i, xchange = 0, xdel = 0; 1406 1407 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY); 1408 if ((setflag & ~PFR_TFLAG_USRMASK) || 1409 (clrflag & ~PFR_TFLAG_USRMASK) || 1410 (setflag & clrflag)) 1411 return (EINVAL); 1412 SLIST_INIT(&workq); 1413 for (i = 0; i < size; i++) { 1414 bcopy(tbl + i, &key.pfrkt_t, sizeof(key.pfrkt_t)); 1415 if (pfr_validate_table(&key.pfrkt_t, 0, 1416 flags & PFR_FLAG_USERIOCTL)) 1417 return (EINVAL); 1418 p = RB_FIND(pfr_ktablehead, &V_pfr_ktables, &key); 1419 if (p != NULL && (p->pfrkt_flags & PFR_TFLAG_ACTIVE)) { 1420 p->pfrkt_nflags = (p->pfrkt_flags | setflag) & 1421 ~clrflag; 1422 if (p->pfrkt_nflags == p->pfrkt_flags) 1423 goto _skip; 1424 SLIST_FOREACH(q, &workq, pfrkt_workq) 1425 if (!pfr_ktable_compare(p, q)) 1426 goto _skip; 1427 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq); 1428 if ((p->pfrkt_flags & PFR_TFLAG_PERSIST) && 1429 (clrflag & PFR_TFLAG_PERSIST) && 1430 !(p->pfrkt_flags & PFR_TFLAG_REFERENCED)) 1431 xdel++; 1432 else 1433 xchange++; 1434 } 1435 _skip: 1436 ; 1437 } 1438 if (!(flags & PFR_FLAG_DUMMY)) 1439 pfr_setflags_ktables(&workq); 1440 if (nchange != NULL) 1441 *nchange = xchange; 1442 if (ndel != NULL) 1443 *ndel = xdel; 1444 return (0); 1445 } 1446 1447 int 1448 pfr_ina_begin(struct pfr_table *trs, u_int32_t *ticket, int *ndel, int flags) 1449 { 1450 struct pfr_ktableworkq workq; 1451 struct pfr_ktable *p; 1452 struct pf_kruleset *rs; 1453 int xdel = 0; 1454 1455 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY); 1456 rs = pf_find_or_create_kruleset(trs->pfrt_anchor); 1457 if (rs == NULL) 1458 return (ENOMEM); 1459 SLIST_INIT(&workq); 1460 RB_FOREACH(p, pfr_ktablehead, &V_pfr_ktables) { 1461 if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) || 1462 pfr_skip_table(trs, p, 0)) 1463 continue; 1464 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_INACTIVE; 1465 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq); 1466 xdel++; 1467 } 1468 if (!(flags & PFR_FLAG_DUMMY)) { 1469 pfr_setflags_ktables(&workq); 1470 if (ticket != NULL) 1471 *ticket = ++rs->tticket; 1472 rs->topen = 1; 1473 } else 1474 pf_remove_if_empty_kruleset(rs); 1475 if (ndel != NULL) 1476 *ndel = xdel; 1477 return (0); 1478 } 1479 1480 int 1481 pfr_ina_define(struct pfr_table *tbl, struct pfr_addr *addr, int size, 1482 int *nadd, int *naddr, u_int32_t ticket, int flags) 1483 { 1484 struct pfr_ktableworkq tableq; 1485 struct pfr_kentryworkq addrq; 1486 struct pfr_ktable *kt, *rt, *shadow, key; 1487 struct pfr_kentry *p; 1488 struct pfr_addr *ad; 1489 struct pf_kruleset *rs; 1490 int i, rv, xadd = 0, xaddr = 0; 1491 1492 PF_RULES_WASSERT(); 1493 1494 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_ADDRSTOO); 1495 if (size && !(flags & PFR_FLAG_ADDRSTOO)) 1496 return (EINVAL); 1497 if (pfr_validate_table(tbl, PFR_TFLAG_USRMASK, 1498 flags & PFR_FLAG_USERIOCTL)) 1499 return (EINVAL); 1500 rs = pf_find_kruleset(tbl->pfrt_anchor); 1501 if (rs == NULL || !rs->topen || ticket != rs->tticket) 1502 return (EBUSY); 1503 tbl->pfrt_flags |= PFR_TFLAG_INACTIVE; 1504 SLIST_INIT(&tableq); 1505 kt = RB_FIND(pfr_ktablehead, &V_pfr_ktables, (struct pfr_ktable *)tbl); 1506 if (kt == NULL) { 1507 kt = pfr_create_ktable(tbl, 0, 1); 1508 if (kt == NULL) 1509 return (ENOMEM); 1510 SLIST_INSERT_HEAD(&tableq, kt, pfrkt_workq); 1511 xadd++; 1512 if (!tbl->pfrt_anchor[0]) 1513 goto _skip; 1514 1515 /* find or create root table */ 1516 bzero(&key, sizeof(key)); 1517 strlcpy(key.pfrkt_name, tbl->pfrt_name, sizeof(key.pfrkt_name)); 1518 rt = RB_FIND(pfr_ktablehead, &V_pfr_ktables, &key); 1519 if (rt != NULL) { 1520 kt->pfrkt_root = rt; 1521 goto _skip; 1522 } 1523 rt = pfr_create_ktable(&key.pfrkt_t, 0, 1); 1524 if (rt == NULL) { 1525 pfr_destroy_ktables(&tableq, 0); 1526 return (ENOMEM); 1527 } 1528 SLIST_INSERT_HEAD(&tableq, rt, pfrkt_workq); 1529 kt->pfrkt_root = rt; 1530 } else if (!(kt->pfrkt_flags & PFR_TFLAG_INACTIVE)) 1531 xadd++; 1532 _skip: 1533 shadow = pfr_create_ktable(tbl, 0, 0); 1534 if (shadow == NULL) { 1535 pfr_destroy_ktables(&tableq, 0); 1536 return (ENOMEM); 1537 } 1538 SLIST_INIT(&addrq); 1539 for (i = 0, ad = addr; i < size; i++, ad++) { 1540 if (pfr_validate_addr(ad)) 1541 senderr(EINVAL); 1542 if (pfr_lookup_addr(shadow, ad, 1) != NULL) 1543 continue; 1544 p = pfr_create_kentry(ad, 1545 (shadow->pfrkt_flags & PFR_TFLAG_COUNTERS) != 0); 1546 if (p == NULL) 1547 senderr(ENOMEM); 1548 if (pfr_route_kentry(shadow, p)) { 1549 pfr_destroy_kentry(p); 1550 continue; 1551 } 1552 SLIST_INSERT_HEAD(&addrq, p, pfrke_workq); 1553 xaddr++; 1554 } 1555 if (!(flags & PFR_FLAG_DUMMY)) { 1556 if (kt->pfrkt_shadow != NULL) 1557 pfr_destroy_ktable(kt->pfrkt_shadow, 1); 1558 kt->pfrkt_flags |= PFR_TFLAG_INACTIVE; 1559 pfr_insert_ktables(&tableq); 1560 shadow->pfrkt_cnt = (flags & PFR_FLAG_ADDRSTOO) ? 1561 xaddr : NO_ADDRESSES; 1562 kt->pfrkt_shadow = shadow; 1563 } else { 1564 pfr_clean_node_mask(shadow, &addrq); 1565 pfr_destroy_ktable(shadow, 0); 1566 pfr_destroy_ktables(&tableq, 0); 1567 pfr_destroy_kentries(&addrq); 1568 } 1569 if (nadd != NULL) 1570 *nadd = xadd; 1571 if (naddr != NULL) 1572 *naddr = xaddr; 1573 return (0); 1574 _bad: 1575 pfr_destroy_ktable(shadow, 0); 1576 pfr_destroy_ktables(&tableq, 0); 1577 pfr_destroy_kentries(&addrq); 1578 return (rv); 1579 } 1580 1581 int 1582 pfr_ina_rollback(struct pfr_table *trs, u_int32_t ticket, int *ndel, int flags) 1583 { 1584 struct pfr_ktableworkq workq; 1585 struct pfr_ktable *p; 1586 struct pf_kruleset *rs; 1587 int xdel = 0; 1588 1589 PF_RULES_WASSERT(); 1590 1591 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY); 1592 rs = pf_find_kruleset(trs->pfrt_anchor); 1593 if (rs == NULL || !rs->topen || ticket != rs->tticket) 1594 return (0); 1595 SLIST_INIT(&workq); 1596 RB_FOREACH(p, pfr_ktablehead, &V_pfr_ktables) { 1597 if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) || 1598 pfr_skip_table(trs, p, 0)) 1599 continue; 1600 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_INACTIVE; 1601 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq); 1602 xdel++; 1603 } 1604 if (!(flags & PFR_FLAG_DUMMY)) { 1605 pfr_setflags_ktables(&workq); 1606 rs->topen = 0; 1607 pf_remove_if_empty_kruleset(rs); 1608 } 1609 if (ndel != NULL) 1610 *ndel = xdel; 1611 return (0); 1612 } 1613 1614 int 1615 pfr_ina_commit(struct pfr_table *trs, u_int32_t ticket, int *nadd, 1616 int *nchange, int flags) 1617 { 1618 struct pfr_ktable *p, *q; 1619 struct pfr_ktableworkq workq; 1620 struct pf_kruleset *rs; 1621 int xadd = 0, xchange = 0; 1622 long tzero = time_second; 1623 1624 PF_RULES_WASSERT(); 1625 1626 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY); 1627 rs = pf_find_kruleset(trs->pfrt_anchor); 1628 if (rs == NULL || !rs->topen || ticket != rs->tticket) 1629 return (EBUSY); 1630 1631 SLIST_INIT(&workq); 1632 RB_FOREACH(p, pfr_ktablehead, &V_pfr_ktables) { 1633 if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) || 1634 pfr_skip_table(trs, p, 0)) 1635 continue; 1636 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq); 1637 if (p->pfrkt_flags & PFR_TFLAG_ACTIVE) 1638 xchange++; 1639 else 1640 xadd++; 1641 } 1642 1643 if (!(flags & PFR_FLAG_DUMMY)) { 1644 for (p = SLIST_FIRST(&workq); p != NULL; p = q) { 1645 q = SLIST_NEXT(p, pfrkt_workq); 1646 pfr_commit_ktable(p, tzero); 1647 } 1648 rs->topen = 0; 1649 pf_remove_if_empty_kruleset(rs); 1650 } 1651 if (nadd != NULL) 1652 *nadd = xadd; 1653 if (nchange != NULL) 1654 *nchange = xchange; 1655 1656 return (0); 1657 } 1658 1659 static void 1660 pfr_commit_ktable(struct pfr_ktable *kt, long tzero) 1661 { 1662 counter_u64_t *pkc, *qkc; 1663 struct pfr_ktable *shadow = kt->pfrkt_shadow; 1664 int nflags; 1665 1666 PF_RULES_WASSERT(); 1667 1668 if (shadow->pfrkt_cnt == NO_ADDRESSES) { 1669 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 1670 pfr_clstats_ktable(kt, tzero, 1); 1671 } else if (kt->pfrkt_flags & PFR_TFLAG_ACTIVE) { 1672 /* kt might contain addresses */ 1673 struct pfr_kentryworkq addrq, addq, changeq, delq, garbageq; 1674 struct pfr_kentry *p, *q, *next; 1675 struct pfr_addr ad; 1676 1677 pfr_enqueue_addrs(shadow, &addrq, NULL, 0); 1678 pfr_mark_addrs(kt); 1679 SLIST_INIT(&addq); 1680 SLIST_INIT(&changeq); 1681 SLIST_INIT(&delq); 1682 SLIST_INIT(&garbageq); 1683 pfr_clean_node_mask(shadow, &addrq); 1684 SLIST_FOREACH_SAFE(p, &addrq, pfrke_workq, next) { 1685 pfr_copyout_addr(&ad, p); 1686 q = pfr_lookup_addr(kt, &ad, 1); 1687 if (q != NULL) { 1688 if (q->pfrke_not != p->pfrke_not) 1689 SLIST_INSERT_HEAD(&changeq, q, 1690 pfrke_workq); 1691 pkc = &p->pfrke_counters.pfrkc_counters; 1692 qkc = &q->pfrke_counters.pfrkc_counters; 1693 if ((*pkc == NULL) != (*qkc == NULL)) 1694 SWAP(counter_u64_t, *pkc, *qkc); 1695 q->pfrke_mark = 1; 1696 SLIST_INSERT_HEAD(&garbageq, p, pfrke_workq); 1697 } else { 1698 p->pfrke_counters.pfrkc_tzero = tzero; 1699 SLIST_INSERT_HEAD(&addq, p, pfrke_workq); 1700 } 1701 } 1702 pfr_enqueue_addrs(kt, &delq, NULL, ENQUEUE_UNMARKED_ONLY); 1703 pfr_insert_kentries(kt, &addq, tzero); 1704 pfr_remove_kentries(kt, &delq); 1705 pfr_clstats_kentries(kt, &changeq, tzero, INVERT_NEG_FLAG); 1706 pfr_destroy_kentries(&garbageq); 1707 } else { 1708 /* kt cannot contain addresses */ 1709 SWAP(struct radix_node_head *, kt->pfrkt_ip4, 1710 shadow->pfrkt_ip4); 1711 SWAP(struct radix_node_head *, kt->pfrkt_ip6, 1712 shadow->pfrkt_ip6); 1713 SWAP(int, kt->pfrkt_cnt, shadow->pfrkt_cnt); 1714 pfr_clstats_ktable(kt, tzero, 1); 1715 } 1716 nflags = ((shadow->pfrkt_flags & PFR_TFLAG_USRMASK) | 1717 (kt->pfrkt_flags & PFR_TFLAG_SETMASK) | PFR_TFLAG_ACTIVE) 1718 & ~PFR_TFLAG_INACTIVE; 1719 pfr_destroy_ktable(shadow, 0); 1720 kt->pfrkt_shadow = NULL; 1721 pfr_setflags_ktable(kt, nflags); 1722 } 1723 1724 static int 1725 pfr_validate_table(struct pfr_table *tbl, int allowedflags, int no_reserved) 1726 { 1727 int i; 1728 1729 if (!tbl->pfrt_name[0]) 1730 return (-1); 1731 if (no_reserved && !strcmp(tbl->pfrt_anchor, PF_RESERVED_ANCHOR)) 1732 return (-1); 1733 if (tbl->pfrt_name[PF_TABLE_NAME_SIZE-1]) 1734 return (-1); 1735 for (i = strlen(tbl->pfrt_name); i < PF_TABLE_NAME_SIZE; i++) 1736 if (tbl->pfrt_name[i]) 1737 return (-1); 1738 if (pfr_fix_anchor(tbl->pfrt_anchor)) 1739 return (-1); 1740 if (tbl->pfrt_flags & ~allowedflags) 1741 return (-1); 1742 return (0); 1743 } 1744 1745 /* 1746 * Rewrite anchors referenced by tables to remove slashes 1747 * and check for validity. 1748 */ 1749 static int 1750 pfr_fix_anchor(char *anchor) 1751 { 1752 size_t siz = MAXPATHLEN; 1753 int i; 1754 1755 if (anchor[0] == '/') { 1756 char *path; 1757 int off; 1758 1759 path = anchor; 1760 off = 1; 1761 while (*++path == '/') 1762 off++; 1763 bcopy(path, anchor, siz - off); 1764 memset(anchor + siz - off, 0, off); 1765 } 1766 if (anchor[siz - 1]) 1767 return (-1); 1768 for (i = strlen(anchor); i < siz; i++) 1769 if (anchor[i]) 1770 return (-1); 1771 return (0); 1772 } 1773 1774 int 1775 pfr_table_count(struct pfr_table *filter, int flags) 1776 { 1777 struct pf_kruleset *rs; 1778 1779 PF_RULES_ASSERT(); 1780 1781 if (flags & PFR_FLAG_ALLRSETS) 1782 return (V_pfr_ktable_cnt); 1783 if (filter->pfrt_anchor[0]) { 1784 rs = pf_find_kruleset(filter->pfrt_anchor); 1785 return ((rs != NULL) ? rs->tables : -1); 1786 } 1787 return (pf_main_ruleset.tables); 1788 } 1789 1790 static int 1791 pfr_skip_table(struct pfr_table *filter, struct pfr_ktable *kt, int flags) 1792 { 1793 if (flags & PFR_FLAG_ALLRSETS) 1794 return (0); 1795 if (strcmp(filter->pfrt_anchor, kt->pfrkt_anchor)) 1796 return (1); 1797 return (0); 1798 } 1799 1800 static void 1801 pfr_insert_ktables(struct pfr_ktableworkq *workq) 1802 { 1803 struct pfr_ktable *p; 1804 1805 SLIST_FOREACH(p, workq, pfrkt_workq) 1806 pfr_insert_ktable(p); 1807 } 1808 1809 static void 1810 pfr_insert_ktable(struct pfr_ktable *kt) 1811 { 1812 1813 PF_RULES_WASSERT(); 1814 1815 RB_INSERT(pfr_ktablehead, &V_pfr_ktables, kt); 1816 V_pfr_ktable_cnt++; 1817 if (kt->pfrkt_root != NULL) 1818 if (!kt->pfrkt_root->pfrkt_refcnt[PFR_REFCNT_ANCHOR]++) 1819 pfr_setflags_ktable(kt->pfrkt_root, 1820 kt->pfrkt_root->pfrkt_flags|PFR_TFLAG_REFDANCHOR); 1821 } 1822 1823 static void 1824 pfr_setflags_ktables(struct pfr_ktableworkq *workq) 1825 { 1826 struct pfr_ktable *p, *q; 1827 1828 for (p = SLIST_FIRST(workq); p; p = q) { 1829 q = SLIST_NEXT(p, pfrkt_workq); 1830 pfr_setflags_ktable(p, p->pfrkt_nflags); 1831 } 1832 } 1833 1834 static void 1835 pfr_setflags_ktable(struct pfr_ktable *kt, int newf) 1836 { 1837 struct pfr_kentryworkq addrq; 1838 struct pfr_walktree w; 1839 1840 PF_RULES_WASSERT(); 1841 1842 if (!(newf & PFR_TFLAG_REFERENCED) && 1843 !(newf & PFR_TFLAG_REFDANCHOR) && 1844 !(newf & PFR_TFLAG_PERSIST)) 1845 newf &= ~PFR_TFLAG_ACTIVE; 1846 if (!(newf & PFR_TFLAG_ACTIVE)) 1847 newf &= ~PFR_TFLAG_USRMASK; 1848 if (!(newf & PFR_TFLAG_SETMASK)) { 1849 RB_REMOVE(pfr_ktablehead, &V_pfr_ktables, kt); 1850 if (kt->pfrkt_root != NULL) 1851 if (!--kt->pfrkt_root->pfrkt_refcnt[PFR_REFCNT_ANCHOR]) 1852 pfr_setflags_ktable(kt->pfrkt_root, 1853 kt->pfrkt_root->pfrkt_flags & 1854 ~PFR_TFLAG_REFDANCHOR); 1855 pfr_destroy_ktable(kt, 1); 1856 V_pfr_ktable_cnt--; 1857 return; 1858 } 1859 if (newf & PFR_TFLAG_COUNTERS && ! (kt->pfrkt_flags & PFR_TFLAG_COUNTERS)) { 1860 bzero(&w, sizeof(w)); 1861 w.pfrw_op = PFRW_COUNTERS; 1862 w.pfrw_flags |= PFR_TFLAG_COUNTERS; 1863 kt->pfrkt_ip4->rnh_walktree(&kt->pfrkt_ip4->rh, pfr_walktree, &w); 1864 kt->pfrkt_ip6->rnh_walktree(&kt->pfrkt_ip6->rh, pfr_walktree, &w); 1865 } 1866 if (! (newf & PFR_TFLAG_COUNTERS) && (kt->pfrkt_flags & PFR_TFLAG_COUNTERS)) { 1867 bzero(&w, sizeof(w)); 1868 w.pfrw_op = PFRW_COUNTERS; 1869 w.pfrw_flags |= 0; 1870 kt->pfrkt_ip4->rnh_walktree(&kt->pfrkt_ip4->rh, pfr_walktree, &w); 1871 kt->pfrkt_ip6->rnh_walktree(&kt->pfrkt_ip6->rh, pfr_walktree, &w); 1872 } 1873 if (!(newf & PFR_TFLAG_ACTIVE) && kt->pfrkt_cnt) { 1874 pfr_enqueue_addrs(kt, &addrq, NULL, 0); 1875 pfr_remove_kentries(kt, &addrq); 1876 } 1877 if (!(newf & PFR_TFLAG_INACTIVE) && kt->pfrkt_shadow != NULL) { 1878 pfr_destroy_ktable(kt->pfrkt_shadow, 1); 1879 kt->pfrkt_shadow = NULL; 1880 } 1881 kt->pfrkt_flags = newf; 1882 } 1883 1884 static void 1885 pfr_clstats_ktables(struct pfr_ktableworkq *workq, long tzero, int recurse) 1886 { 1887 struct pfr_ktable *p; 1888 1889 SLIST_FOREACH(p, workq, pfrkt_workq) 1890 pfr_clstats_ktable(p, tzero, recurse); 1891 } 1892 1893 static void 1894 pfr_clstats_ktable(struct pfr_ktable *kt, long tzero, int recurse) 1895 { 1896 struct pfr_kentryworkq addrq; 1897 int pfr_dir, pfr_op; 1898 1899 MPASS(PF_TABLE_STATS_OWNED() || PF_RULES_WOWNED()); 1900 1901 if (recurse) { 1902 pfr_enqueue_addrs(kt, &addrq, NULL, 0); 1903 pfr_clstats_kentries(kt, &addrq, tzero, 0); 1904 } 1905 for (pfr_dir = 0; pfr_dir < PFR_DIR_MAX; pfr_dir ++) { 1906 for (pfr_op = 0; pfr_op < PFR_OP_TABLE_MAX; pfr_op ++) { 1907 pfr_kstate_counter_zero(&kt->pfrkt_packets[pfr_dir][pfr_op]); 1908 pfr_kstate_counter_zero(&kt->pfrkt_bytes[pfr_dir][pfr_op]); 1909 } 1910 } 1911 pfr_kstate_counter_zero(&kt->pfrkt_match); 1912 pfr_kstate_counter_zero(&kt->pfrkt_nomatch); 1913 kt->pfrkt_tzero = tzero; 1914 } 1915 1916 static struct pfr_ktable * 1917 pfr_create_ktable(struct pfr_table *tbl, long tzero, int attachruleset) 1918 { 1919 struct pfr_ktable *kt; 1920 struct pf_kruleset *rs; 1921 int pfr_dir, pfr_op; 1922 1923 PF_RULES_WASSERT(); 1924 1925 kt = malloc(sizeof(*kt), M_PFTABLE, M_NOWAIT|M_ZERO); 1926 if (kt == NULL) 1927 return (NULL); 1928 kt->pfrkt_t = *tbl; 1929 1930 if (attachruleset) { 1931 rs = pf_find_or_create_kruleset(tbl->pfrt_anchor); 1932 if (!rs) { 1933 pfr_destroy_ktable(kt, 0); 1934 return (NULL); 1935 } 1936 kt->pfrkt_rs = rs; 1937 rs->tables++; 1938 } 1939 1940 for (pfr_dir = 0; pfr_dir < PFR_DIR_MAX; pfr_dir ++) { 1941 for (pfr_op = 0; pfr_op < PFR_OP_TABLE_MAX; pfr_op ++) { 1942 if (pfr_kstate_counter_init( 1943 &kt->pfrkt_packets[pfr_dir][pfr_op], M_NOWAIT) != 0) { 1944 pfr_destroy_ktable(kt, 0); 1945 return (NULL); 1946 } 1947 if (pfr_kstate_counter_init( 1948 &kt->pfrkt_bytes[pfr_dir][pfr_op], M_NOWAIT) != 0) { 1949 pfr_destroy_ktable(kt, 0); 1950 return (NULL); 1951 } 1952 } 1953 } 1954 if (pfr_kstate_counter_init(&kt->pfrkt_match, M_NOWAIT) != 0) { 1955 pfr_destroy_ktable(kt, 0); 1956 return (NULL); 1957 } 1958 1959 if (pfr_kstate_counter_init(&kt->pfrkt_nomatch, M_NOWAIT) != 0) { 1960 pfr_destroy_ktable(kt, 0); 1961 return (NULL); 1962 } 1963 1964 if (!rn_inithead((void **)&kt->pfrkt_ip4, 1965 offsetof(struct sockaddr_in, sin_addr) * 8) || 1966 !rn_inithead((void **)&kt->pfrkt_ip6, 1967 offsetof(struct sockaddr_in6, sin6_addr) * 8)) { 1968 pfr_destroy_ktable(kt, 0); 1969 return (NULL); 1970 } 1971 kt->pfrkt_tzero = tzero; 1972 1973 return (kt); 1974 } 1975 1976 static void 1977 pfr_destroy_ktables(struct pfr_ktableworkq *workq, int flushaddr) 1978 { 1979 struct pfr_ktable *p, *q; 1980 1981 for (p = SLIST_FIRST(workq); p; p = q) { 1982 q = SLIST_NEXT(p, pfrkt_workq); 1983 pfr_destroy_ktable(p, flushaddr); 1984 } 1985 } 1986 1987 static void 1988 pfr_destroy_ktable(struct pfr_ktable *kt, int flushaddr) 1989 { 1990 struct pfr_kentryworkq addrq; 1991 int pfr_dir, pfr_op; 1992 1993 if (flushaddr) { 1994 pfr_enqueue_addrs(kt, &addrq, NULL, 0); 1995 pfr_clean_node_mask(kt, &addrq); 1996 pfr_destroy_kentries(&addrq); 1997 } 1998 if (kt->pfrkt_ip4 != NULL) 1999 rn_detachhead((void **)&kt->pfrkt_ip4); 2000 if (kt->pfrkt_ip6 != NULL) 2001 rn_detachhead((void **)&kt->pfrkt_ip6); 2002 if (kt->pfrkt_shadow != NULL) 2003 pfr_destroy_ktable(kt->pfrkt_shadow, flushaddr); 2004 if (kt->pfrkt_rs != NULL) { 2005 kt->pfrkt_rs->tables--; 2006 pf_remove_if_empty_kruleset(kt->pfrkt_rs); 2007 } 2008 for (pfr_dir = 0; pfr_dir < PFR_DIR_MAX; pfr_dir ++) { 2009 for (pfr_op = 0; pfr_op < PFR_OP_TABLE_MAX; pfr_op ++) { 2010 pfr_kstate_counter_deinit(&kt->pfrkt_packets[pfr_dir][pfr_op]); 2011 pfr_kstate_counter_deinit(&kt->pfrkt_bytes[pfr_dir][pfr_op]); 2012 } 2013 } 2014 pfr_kstate_counter_deinit(&kt->pfrkt_match); 2015 pfr_kstate_counter_deinit(&kt->pfrkt_nomatch); 2016 2017 free(kt, M_PFTABLE); 2018 } 2019 2020 static int 2021 pfr_ktable_compare(struct pfr_ktable *p, struct pfr_ktable *q) 2022 { 2023 int d; 2024 2025 if ((d = strncmp(p->pfrkt_name, q->pfrkt_name, PF_TABLE_NAME_SIZE))) 2026 return (d); 2027 return (strcmp(p->pfrkt_anchor, q->pfrkt_anchor)); 2028 } 2029 2030 static struct pfr_ktable * 2031 pfr_lookup_table(struct pfr_table *tbl) 2032 { 2033 /* struct pfr_ktable start like a struct pfr_table */ 2034 return (RB_FIND(pfr_ktablehead, &V_pfr_ktables, 2035 (struct pfr_ktable *)tbl)); 2036 } 2037 2038 int 2039 pfr_match_addr(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af) 2040 { 2041 struct pfr_kentry *ke = NULL; 2042 int match; 2043 2044 PF_RULES_RASSERT(); 2045 2046 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL) 2047 kt = kt->pfrkt_root; 2048 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 2049 return (0); 2050 2051 switch (af) { 2052 #ifdef INET 2053 case AF_INET: 2054 { 2055 struct sockaddr_in sin; 2056 2057 bzero(&sin, sizeof(sin)); 2058 sin.sin_len = sizeof(sin); 2059 sin.sin_family = AF_INET; 2060 sin.sin_addr.s_addr = a->addr32[0]; 2061 ke = (struct pfr_kentry *)rn_match(&sin, &kt->pfrkt_ip4->rh); 2062 if (ke && KENTRY_RNF_ROOT(ke)) 2063 ke = NULL; 2064 break; 2065 } 2066 #endif /* INET */ 2067 #ifdef INET6 2068 case AF_INET6: 2069 { 2070 struct sockaddr_in6 sin6; 2071 2072 bzero(&sin6, sizeof(sin6)); 2073 sin6.sin6_len = sizeof(sin6); 2074 sin6.sin6_family = AF_INET6; 2075 bcopy(a, &sin6.sin6_addr, sizeof(sin6.sin6_addr)); 2076 ke = (struct pfr_kentry *)rn_match(&sin6, &kt->pfrkt_ip6->rh); 2077 if (ke && KENTRY_RNF_ROOT(ke)) 2078 ke = NULL; 2079 break; 2080 } 2081 #endif /* INET6 */ 2082 } 2083 match = (ke && !ke->pfrke_not); 2084 if (match) 2085 pfr_kstate_counter_add(&kt->pfrkt_match, 1); 2086 else 2087 pfr_kstate_counter_add(&kt->pfrkt_nomatch, 1); 2088 return (match); 2089 } 2090 2091 void 2092 pfr_update_stats(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af, 2093 u_int64_t len, int dir_out, int op_pass, int notrule) 2094 { 2095 struct pfr_kentry *ke = NULL; 2096 2097 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL) 2098 kt = kt->pfrkt_root; 2099 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 2100 return; 2101 2102 switch (af) { 2103 #ifdef INET 2104 case AF_INET: 2105 { 2106 struct sockaddr_in sin; 2107 2108 bzero(&sin, sizeof(sin)); 2109 sin.sin_len = sizeof(sin); 2110 sin.sin_family = AF_INET; 2111 sin.sin_addr.s_addr = a->addr32[0]; 2112 ke = (struct pfr_kentry *)rn_match(&sin, &kt->pfrkt_ip4->rh); 2113 if (ke && KENTRY_RNF_ROOT(ke)) 2114 ke = NULL; 2115 break; 2116 } 2117 #endif /* INET */ 2118 #ifdef INET6 2119 case AF_INET6: 2120 { 2121 struct sockaddr_in6 sin6; 2122 2123 bzero(&sin6, sizeof(sin6)); 2124 sin6.sin6_len = sizeof(sin6); 2125 sin6.sin6_family = AF_INET6; 2126 bcopy(a, &sin6.sin6_addr, sizeof(sin6.sin6_addr)); 2127 ke = (struct pfr_kentry *)rn_match(&sin6, &kt->pfrkt_ip6->rh); 2128 if (ke && KENTRY_RNF_ROOT(ke)) 2129 ke = NULL; 2130 break; 2131 } 2132 #endif /* INET6 */ 2133 default: 2134 panic("%s: unknown address family %u", __func__, af); 2135 } 2136 if ((ke == NULL || ke->pfrke_not) != notrule) { 2137 if (op_pass != PFR_OP_PASS) 2138 DPFPRINTF(PF_DEBUG_URGENT, 2139 ("pfr_update_stats: assertion failed.\n")); 2140 op_pass = PFR_OP_XPASS; 2141 } 2142 pfr_kstate_counter_add(&kt->pfrkt_packets[dir_out][op_pass], 1); 2143 pfr_kstate_counter_add(&kt->pfrkt_bytes[dir_out][op_pass], len); 2144 if (ke != NULL && op_pass != PFR_OP_XPASS && 2145 (kt->pfrkt_flags & PFR_TFLAG_COUNTERS)) { 2146 counter_u64_add(pfr_kentry_counter(&ke->pfrke_counters, 2147 dir_out, op_pass, PFR_TYPE_PACKETS), 1); 2148 counter_u64_add(pfr_kentry_counter(&ke->pfrke_counters, 2149 dir_out, op_pass, PFR_TYPE_BYTES), len); 2150 } 2151 } 2152 2153 struct pfr_ktable * 2154 pfr_attach_table(struct pf_kruleset *rs, char *name) 2155 { 2156 struct pfr_ktable *kt, *rt; 2157 struct pfr_table tbl; 2158 struct pf_kanchor *ac = rs->anchor; 2159 2160 PF_RULES_WASSERT(); 2161 2162 bzero(&tbl, sizeof(tbl)); 2163 strlcpy(tbl.pfrt_name, name, sizeof(tbl.pfrt_name)); 2164 if (ac != NULL) 2165 strlcpy(tbl.pfrt_anchor, ac->path, sizeof(tbl.pfrt_anchor)); 2166 kt = pfr_lookup_table(&tbl); 2167 if (kt == NULL) { 2168 kt = pfr_create_ktable(&tbl, time_second, 1); 2169 if (kt == NULL) 2170 return (NULL); 2171 if (ac != NULL) { 2172 bzero(tbl.pfrt_anchor, sizeof(tbl.pfrt_anchor)); 2173 rt = pfr_lookup_table(&tbl); 2174 if (rt == NULL) { 2175 rt = pfr_create_ktable(&tbl, 0, 1); 2176 if (rt == NULL) { 2177 pfr_destroy_ktable(kt, 0); 2178 return (NULL); 2179 } 2180 pfr_insert_ktable(rt); 2181 } 2182 kt->pfrkt_root = rt; 2183 } 2184 pfr_insert_ktable(kt); 2185 } 2186 if (!kt->pfrkt_refcnt[PFR_REFCNT_RULE]++) 2187 pfr_setflags_ktable(kt, kt->pfrkt_flags|PFR_TFLAG_REFERENCED); 2188 return (kt); 2189 } 2190 2191 void 2192 pfr_detach_table(struct pfr_ktable *kt) 2193 { 2194 2195 PF_RULES_WASSERT(); 2196 KASSERT(kt->pfrkt_refcnt[PFR_REFCNT_RULE] > 0, ("%s: refcount %d\n", 2197 __func__, kt->pfrkt_refcnt[PFR_REFCNT_RULE])); 2198 2199 if (!--kt->pfrkt_refcnt[PFR_REFCNT_RULE]) 2200 pfr_setflags_ktable(kt, kt->pfrkt_flags&~PFR_TFLAG_REFERENCED); 2201 } 2202 2203 int 2204 pfr_pool_get(struct pfr_ktable *kt, int *pidx, struct pf_addr *counter, 2205 sa_family_t af) 2206 { 2207 struct pf_addr *addr, *cur, *mask; 2208 union sockaddr_union uaddr, umask; 2209 struct pfr_kentry *ke, *ke2 = NULL; 2210 int idx = -1, use_counter = 0; 2211 2212 MPASS(pidx != NULL); 2213 2214 switch (af) { 2215 case AF_INET: 2216 uaddr.sin.sin_len = sizeof(struct sockaddr_in); 2217 uaddr.sin.sin_family = AF_INET; 2218 break; 2219 case AF_INET6: 2220 uaddr.sin6.sin6_len = sizeof(struct sockaddr_in6); 2221 uaddr.sin6.sin6_family = AF_INET6; 2222 break; 2223 } 2224 addr = SUNION2PF(&uaddr, af); 2225 2226 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL) 2227 kt = kt->pfrkt_root; 2228 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 2229 return (-1); 2230 2231 idx = *pidx; 2232 if (counter != NULL && idx >= 0) 2233 use_counter = 1; 2234 if (idx < 0) 2235 idx = 0; 2236 2237 _next_block: 2238 ke = pfr_kentry_byidx(kt, idx, af); 2239 if (ke == NULL) { 2240 pfr_kstate_counter_add(&kt->pfrkt_nomatch, 1); 2241 return (1); 2242 } 2243 pfr_prepare_network(&umask, af, ke->pfrke_net); 2244 cur = SUNION2PF(&ke->pfrke_sa, af); 2245 mask = SUNION2PF(&umask, af); 2246 2247 if (use_counter) { 2248 /* is supplied address within block? */ 2249 if (!PF_MATCHA(0, cur, mask, counter, af)) { 2250 /* no, go to next block in table */ 2251 idx++; 2252 use_counter = 0; 2253 goto _next_block; 2254 } 2255 PF_ACPY(addr, counter, af); 2256 } else { 2257 /* use first address of block */ 2258 PF_ACPY(addr, cur, af); 2259 } 2260 2261 if (!KENTRY_NETWORK(ke)) { 2262 /* this is a single IP address - no possible nested block */ 2263 PF_ACPY(counter, addr, af); 2264 *pidx = idx; 2265 pfr_kstate_counter_add(&kt->pfrkt_match, 1); 2266 return (0); 2267 } 2268 for (;;) { 2269 /* we don't want to use a nested block */ 2270 switch (af) { 2271 case AF_INET: 2272 ke2 = (struct pfr_kentry *)rn_match(&uaddr, 2273 &kt->pfrkt_ip4->rh); 2274 break; 2275 case AF_INET6: 2276 ke2 = (struct pfr_kentry *)rn_match(&uaddr, 2277 &kt->pfrkt_ip6->rh); 2278 break; 2279 } 2280 /* no need to check KENTRY_RNF_ROOT() here */ 2281 if (ke2 == ke) { 2282 /* lookup return the same block - perfect */ 2283 PF_ACPY(counter, addr, af); 2284 *pidx = idx; 2285 pfr_kstate_counter_add(&kt->pfrkt_match, 1); 2286 return (0); 2287 } 2288 2289 /* we need to increase the counter past the nested block */ 2290 pfr_prepare_network(&umask, AF_INET, ke2->pfrke_net); 2291 PF_POOLMASK(addr, addr, SUNION2PF(&umask, af), &pfr_ffaddr, af); 2292 PF_AINC(addr, af); 2293 if (!PF_MATCHA(0, cur, mask, addr, af)) { 2294 /* ok, we reached the end of our main block */ 2295 /* go to next block in table */ 2296 idx++; 2297 use_counter = 0; 2298 goto _next_block; 2299 } 2300 } 2301 } 2302 2303 static struct pfr_kentry * 2304 pfr_kentry_byidx(struct pfr_ktable *kt, int idx, int af) 2305 { 2306 struct pfr_walktree w; 2307 2308 bzero(&w, sizeof(w)); 2309 w.pfrw_op = PFRW_POOL_GET; 2310 w.pfrw_cnt = idx; 2311 2312 switch (af) { 2313 #ifdef INET 2314 case AF_INET: 2315 kt->pfrkt_ip4->rnh_walktree(&kt->pfrkt_ip4->rh, pfr_walktree, &w); 2316 return (w.pfrw_kentry); 2317 #endif /* INET */ 2318 #ifdef INET6 2319 case AF_INET6: 2320 kt->pfrkt_ip6->rnh_walktree(&kt->pfrkt_ip6->rh, pfr_walktree, &w); 2321 return (w.pfrw_kentry); 2322 #endif /* INET6 */ 2323 default: 2324 return (NULL); 2325 } 2326 } 2327 2328 void 2329 pfr_dynaddr_update(struct pfr_ktable *kt, struct pfi_dynaddr *dyn) 2330 { 2331 struct pfr_walktree w; 2332 2333 bzero(&w, sizeof(w)); 2334 w.pfrw_op = PFRW_DYNADDR_UPDATE; 2335 w.pfrw_dyn = dyn; 2336 2337 dyn->pfid_acnt4 = 0; 2338 dyn->pfid_acnt6 = 0; 2339 if (!dyn->pfid_af || dyn->pfid_af == AF_INET) 2340 kt->pfrkt_ip4->rnh_walktree(&kt->pfrkt_ip4->rh, pfr_walktree, &w); 2341 if (!dyn->pfid_af || dyn->pfid_af == AF_INET6) 2342 kt->pfrkt_ip6->rnh_walktree(&kt->pfrkt_ip6->rh, pfr_walktree, &w); 2343 } 2344