1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2002 Cedric Berger 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * - Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * - Redistributions in binary form must reproduce the above 14 * copyright notice, this list of conditions and the following 15 * disclaimer in the documentation and/or other materials provided 16 * with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 21 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 22 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 24 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 25 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 26 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN 28 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 * 31 * $OpenBSD: pf_table.c,v 1.79 2008/10/08 06:24:50 mcbride Exp $ 32 */ 33 34 #include <sys/cdefs.h> 35 __FBSDID("$FreeBSD$"); 36 37 #include "opt_inet.h" 38 #include "opt_inet6.h" 39 40 #include <sys/param.h> 41 #include <sys/kernel.h> 42 #include <sys/lock.h> 43 #include <sys/malloc.h> 44 #include <sys/mbuf.h> 45 #include <sys/mutex.h> 46 #include <sys/refcount.h> 47 #include <sys/socket.h> 48 #include <vm/uma.h> 49 50 #include <net/if.h> 51 #include <net/vnet.h> 52 #include <net/pfvar.h> 53 54 #define DPFPRINTF(n, x) if (V_pf_status.debug >= (n)) printf x 55 56 #define ACCEPT_FLAGS(flags, oklist) \ 57 do { \ 58 if ((flags & ~(oklist)) & \ 59 PFR_FLAG_ALLMASK) \ 60 return (EINVAL); \ 61 } while (0) 62 63 #define FILLIN_SIN(sin, addr) \ 64 do { \ 65 (sin).sin_len = sizeof(sin); \ 66 (sin).sin_family = AF_INET; \ 67 (sin).sin_addr = (addr); \ 68 } while (0) 69 70 #define FILLIN_SIN6(sin6, addr) \ 71 do { \ 72 (sin6).sin6_len = sizeof(sin6); \ 73 (sin6).sin6_family = AF_INET6; \ 74 (sin6).sin6_addr = (addr); \ 75 } while (0) 76 77 #define SWAP(type, a1, a2) \ 78 do { \ 79 type tmp = a1; \ 80 a1 = a2; \ 81 a2 = tmp; \ 82 } while (0) 83 84 #define SUNION2PF(su, af) (((af)==AF_INET) ? \ 85 (struct pf_addr *)&(su)->sin.sin_addr : \ 86 (struct pf_addr *)&(su)->sin6.sin6_addr) 87 88 #define AF_BITS(af) (((af)==AF_INET)?32:128) 89 #define ADDR_NETWORK(ad) ((ad)->pfra_net < AF_BITS((ad)->pfra_af)) 90 #define KENTRY_NETWORK(ke) ((ke)->pfrke_net < AF_BITS((ke)->pfrke_af)) 91 #define KENTRY_RNF_ROOT(ke) \ 92 ((((struct radix_node *)(ke))->rn_flags & RNF_ROOT) != 0) 93 94 #define NO_ADDRESSES (-1) 95 #define ENQUEUE_UNMARKED_ONLY (1) 96 #define INVERT_NEG_FLAG (1) 97 98 struct pfr_walktree { 99 enum pfrw_op { 100 PFRW_MARK, 101 PFRW_SWEEP, 102 PFRW_ENQUEUE, 103 PFRW_GET_ADDRS, 104 PFRW_GET_ASTATS, 105 PFRW_POOL_GET, 106 PFRW_DYNADDR_UPDATE 107 } pfrw_op; 108 union { 109 struct pfr_addr *pfrw1_addr; 110 struct pfr_astats *pfrw1_astats; 111 struct pfr_kentryworkq *pfrw1_workq; 112 struct pfr_kentry *pfrw1_kentry; 113 struct pfi_dynaddr *pfrw1_dyn; 114 } pfrw_1; 115 int pfrw_free; 116 }; 117 #define pfrw_addr pfrw_1.pfrw1_addr 118 #define pfrw_astats pfrw_1.pfrw1_astats 119 #define pfrw_workq pfrw_1.pfrw1_workq 120 #define pfrw_kentry pfrw_1.pfrw1_kentry 121 #define pfrw_dyn pfrw_1.pfrw1_dyn 122 #define pfrw_cnt pfrw_free 123 124 #define senderr(e) do { rv = (e); goto _bad; } while (0) 125 126 static MALLOC_DEFINE(M_PFTABLE, "pf_table", "pf(4) tables structures"); 127 VNET_DEFINE_STATIC(uma_zone_t, pfr_kentry_z); 128 #define V_pfr_kentry_z VNET(pfr_kentry_z) 129 VNET_DEFINE_STATIC(uma_zone_t, pfr_kcounters_z); 130 #define V_pfr_kcounters_z VNET(pfr_kcounters_z) 131 132 static struct pf_addr pfr_ffaddr = { 133 .addr32 = { 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff } 134 }; 135 136 static void pfr_copyout_addr(struct pfr_addr *, 137 struct pfr_kentry *ke); 138 static int pfr_validate_addr(struct pfr_addr *); 139 static void pfr_enqueue_addrs(struct pfr_ktable *, 140 struct pfr_kentryworkq *, int *, int); 141 static void pfr_mark_addrs(struct pfr_ktable *); 142 static struct pfr_kentry 143 *pfr_lookup_addr(struct pfr_ktable *, 144 struct pfr_addr *, int); 145 static struct pfr_kentry *pfr_create_kentry(struct pfr_addr *); 146 static void pfr_destroy_kentries(struct pfr_kentryworkq *); 147 static void pfr_destroy_kentry(struct pfr_kentry *); 148 static void pfr_insert_kentries(struct pfr_ktable *, 149 struct pfr_kentryworkq *, long); 150 static void pfr_remove_kentries(struct pfr_ktable *, 151 struct pfr_kentryworkq *); 152 static void pfr_clstats_kentries(struct pfr_kentryworkq *, long, 153 int); 154 static void pfr_reset_feedback(struct pfr_addr *, int); 155 static void pfr_prepare_network(union sockaddr_union *, int, int); 156 static int pfr_route_kentry(struct pfr_ktable *, 157 struct pfr_kentry *); 158 static int pfr_unroute_kentry(struct pfr_ktable *, 159 struct pfr_kentry *); 160 static int pfr_walktree(struct radix_node *, void *); 161 static int pfr_validate_table(struct pfr_table *, int, int); 162 static int pfr_fix_anchor(char *); 163 static void pfr_commit_ktable(struct pfr_ktable *, long); 164 static void pfr_insert_ktables(struct pfr_ktableworkq *); 165 static void pfr_insert_ktable(struct pfr_ktable *); 166 static void pfr_setflags_ktables(struct pfr_ktableworkq *); 167 static void pfr_setflags_ktable(struct pfr_ktable *, int); 168 static void pfr_clstats_ktables(struct pfr_ktableworkq *, long, 169 int); 170 static void pfr_clstats_ktable(struct pfr_ktable *, long, int); 171 static struct pfr_ktable 172 *pfr_create_ktable(struct pfr_table *, long, int); 173 static void pfr_destroy_ktables(struct pfr_ktableworkq *, int); 174 static void pfr_destroy_ktable(struct pfr_ktable *, int); 175 static int pfr_ktable_compare(struct pfr_ktable *, 176 struct pfr_ktable *); 177 static struct pfr_ktable 178 *pfr_lookup_table(struct pfr_table *); 179 static void pfr_clean_node_mask(struct pfr_ktable *, 180 struct pfr_kentryworkq *); 181 static int pfr_skip_table(struct pfr_table *, 182 struct pfr_ktable *, int); 183 static struct pfr_kentry 184 *pfr_kentry_byidx(struct pfr_ktable *, int, int); 185 186 static RB_PROTOTYPE(pfr_ktablehead, pfr_ktable, pfrkt_tree, pfr_ktable_compare); 187 static RB_GENERATE(pfr_ktablehead, pfr_ktable, pfrkt_tree, pfr_ktable_compare); 188 189 VNET_DEFINE_STATIC(struct pfr_ktablehead, pfr_ktables); 190 #define V_pfr_ktables VNET(pfr_ktables) 191 192 VNET_DEFINE_STATIC(struct pfr_table, pfr_nulltable); 193 #define V_pfr_nulltable VNET(pfr_nulltable) 194 195 VNET_DEFINE_STATIC(int, pfr_ktable_cnt); 196 #define V_pfr_ktable_cnt VNET(pfr_ktable_cnt) 197 198 void 199 pfr_initialize(void) 200 { 201 202 V_pfr_kentry_z = uma_zcreate("pf table entries", 203 sizeof(struct pfr_kentry), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 204 0); 205 V_pfr_kcounters_z = uma_zcreate("pf table counters", 206 sizeof(struct pfr_kcounters), NULL, NULL, NULL, NULL, 207 UMA_ALIGN_PTR, 0); 208 V_pf_limits[PF_LIMIT_TABLE_ENTRIES].zone = V_pfr_kentry_z; 209 V_pf_limits[PF_LIMIT_TABLE_ENTRIES].limit = PFR_KENTRY_HIWAT; 210 } 211 212 void 213 pfr_cleanup(void) 214 { 215 216 uma_zdestroy(V_pfr_kentry_z); 217 uma_zdestroy(V_pfr_kcounters_z); 218 } 219 220 int 221 pfr_clr_addrs(struct pfr_table *tbl, int *ndel, int flags) 222 { 223 struct pfr_ktable *kt; 224 struct pfr_kentryworkq workq; 225 226 PF_RULES_WASSERT(); 227 228 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY); 229 if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL)) 230 return (EINVAL); 231 kt = pfr_lookup_table(tbl); 232 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 233 return (ESRCH); 234 if (kt->pfrkt_flags & PFR_TFLAG_CONST) 235 return (EPERM); 236 pfr_enqueue_addrs(kt, &workq, ndel, 0); 237 238 if (!(flags & PFR_FLAG_DUMMY)) { 239 pfr_remove_kentries(kt, &workq); 240 KASSERT(kt->pfrkt_cnt == 0, ("%s: non-null pfrkt_cnt", __func__)); 241 } 242 return (0); 243 } 244 245 int 246 pfr_add_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size, 247 int *nadd, int flags) 248 { 249 struct pfr_ktable *kt, *tmpkt; 250 struct pfr_kentryworkq workq; 251 struct pfr_kentry *p, *q; 252 struct pfr_addr *ad; 253 int i, rv, xadd = 0; 254 long tzero = time_second; 255 256 PF_RULES_WASSERT(); 257 258 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_FEEDBACK); 259 if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL)) 260 return (EINVAL); 261 kt = pfr_lookup_table(tbl); 262 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 263 return (ESRCH); 264 if (kt->pfrkt_flags & PFR_TFLAG_CONST) 265 return (EPERM); 266 tmpkt = pfr_create_ktable(&V_pfr_nulltable, 0, 0); 267 if (tmpkt == NULL) 268 return (ENOMEM); 269 SLIST_INIT(&workq); 270 for (i = 0, ad = addr; i < size; i++, ad++) { 271 if (pfr_validate_addr(ad)) 272 senderr(EINVAL); 273 p = pfr_lookup_addr(kt, ad, 1); 274 q = pfr_lookup_addr(tmpkt, ad, 1); 275 if (flags & PFR_FLAG_FEEDBACK) { 276 if (q != NULL) 277 ad->pfra_fback = PFR_FB_DUPLICATE; 278 else if (p == NULL) 279 ad->pfra_fback = PFR_FB_ADDED; 280 else if (p->pfrke_not != ad->pfra_not) 281 ad->pfra_fback = PFR_FB_CONFLICT; 282 else 283 ad->pfra_fback = PFR_FB_NONE; 284 } 285 if (p == NULL && q == NULL) { 286 p = pfr_create_kentry(ad); 287 if (p == NULL) 288 senderr(ENOMEM); 289 if (pfr_route_kentry(tmpkt, p)) { 290 pfr_destroy_kentry(p); 291 ad->pfra_fback = PFR_FB_NONE; 292 } else { 293 SLIST_INSERT_HEAD(&workq, p, pfrke_workq); 294 xadd++; 295 } 296 } 297 } 298 pfr_clean_node_mask(tmpkt, &workq); 299 if (!(flags & PFR_FLAG_DUMMY)) 300 pfr_insert_kentries(kt, &workq, tzero); 301 else 302 pfr_destroy_kentries(&workq); 303 if (nadd != NULL) 304 *nadd = xadd; 305 pfr_destroy_ktable(tmpkt, 0); 306 return (0); 307 _bad: 308 pfr_clean_node_mask(tmpkt, &workq); 309 pfr_destroy_kentries(&workq); 310 if (flags & PFR_FLAG_FEEDBACK) 311 pfr_reset_feedback(addr, size); 312 pfr_destroy_ktable(tmpkt, 0); 313 return (rv); 314 } 315 316 int 317 pfr_del_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size, 318 int *ndel, int flags) 319 { 320 struct pfr_ktable *kt; 321 struct pfr_kentryworkq workq; 322 struct pfr_kentry *p; 323 struct pfr_addr *ad; 324 int i, rv, xdel = 0, log = 1; 325 326 PF_RULES_WASSERT(); 327 328 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_FEEDBACK); 329 if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL)) 330 return (EINVAL); 331 kt = pfr_lookup_table(tbl); 332 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 333 return (ESRCH); 334 if (kt->pfrkt_flags & PFR_TFLAG_CONST) 335 return (EPERM); 336 /* 337 * there are two algorithms to choose from here. 338 * with: 339 * n: number of addresses to delete 340 * N: number of addresses in the table 341 * 342 * one is O(N) and is better for large 'n' 343 * one is O(n*LOG(N)) and is better for small 'n' 344 * 345 * following code try to decide which one is best. 346 */ 347 for (i = kt->pfrkt_cnt; i > 0; i >>= 1) 348 log++; 349 if (size > kt->pfrkt_cnt/log) { 350 /* full table scan */ 351 pfr_mark_addrs(kt); 352 } else { 353 /* iterate over addresses to delete */ 354 for (i = 0, ad = addr; i < size; i++, ad++) { 355 if (pfr_validate_addr(ad)) 356 return (EINVAL); 357 p = pfr_lookup_addr(kt, ad, 1); 358 if (p != NULL) 359 p->pfrke_mark = 0; 360 } 361 } 362 SLIST_INIT(&workq); 363 for (i = 0, ad = addr; i < size; i++, ad++) { 364 if (pfr_validate_addr(ad)) 365 senderr(EINVAL); 366 p = pfr_lookup_addr(kt, ad, 1); 367 if (flags & PFR_FLAG_FEEDBACK) { 368 if (p == NULL) 369 ad->pfra_fback = PFR_FB_NONE; 370 else if (p->pfrke_not != ad->pfra_not) 371 ad->pfra_fback = PFR_FB_CONFLICT; 372 else if (p->pfrke_mark) 373 ad->pfra_fback = PFR_FB_DUPLICATE; 374 else 375 ad->pfra_fback = PFR_FB_DELETED; 376 } 377 if (p != NULL && p->pfrke_not == ad->pfra_not && 378 !p->pfrke_mark) { 379 p->pfrke_mark = 1; 380 SLIST_INSERT_HEAD(&workq, p, pfrke_workq); 381 xdel++; 382 } 383 } 384 if (!(flags & PFR_FLAG_DUMMY)) 385 pfr_remove_kentries(kt, &workq); 386 if (ndel != NULL) 387 *ndel = xdel; 388 return (0); 389 _bad: 390 if (flags & PFR_FLAG_FEEDBACK) 391 pfr_reset_feedback(addr, size); 392 return (rv); 393 } 394 395 int 396 pfr_set_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size, 397 int *size2, int *nadd, int *ndel, int *nchange, int flags, 398 u_int32_t ignore_pfrt_flags) 399 { 400 struct pfr_ktable *kt, *tmpkt; 401 struct pfr_kentryworkq addq, delq, changeq; 402 struct pfr_kentry *p, *q; 403 struct pfr_addr ad; 404 int i, rv, xadd = 0, xdel = 0, xchange = 0; 405 long tzero = time_second; 406 407 PF_RULES_WASSERT(); 408 409 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_FEEDBACK); 410 if (pfr_validate_table(tbl, ignore_pfrt_flags, flags & 411 PFR_FLAG_USERIOCTL)) 412 return (EINVAL); 413 kt = pfr_lookup_table(tbl); 414 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 415 return (ESRCH); 416 if (kt->pfrkt_flags & PFR_TFLAG_CONST) 417 return (EPERM); 418 tmpkt = pfr_create_ktable(&V_pfr_nulltable, 0, 0); 419 if (tmpkt == NULL) 420 return (ENOMEM); 421 pfr_mark_addrs(kt); 422 SLIST_INIT(&addq); 423 SLIST_INIT(&delq); 424 SLIST_INIT(&changeq); 425 for (i = 0; i < size; i++) { 426 /* 427 * XXXGL: undertand pf_if usage of this function 428 * and make ad a moving pointer 429 */ 430 bcopy(addr + i, &ad, sizeof(ad)); 431 if (pfr_validate_addr(&ad)) 432 senderr(EINVAL); 433 ad.pfra_fback = PFR_FB_NONE; 434 p = pfr_lookup_addr(kt, &ad, 1); 435 if (p != NULL) { 436 if (p->pfrke_mark) { 437 ad.pfra_fback = PFR_FB_DUPLICATE; 438 goto _skip; 439 } 440 p->pfrke_mark = 1; 441 if (p->pfrke_not != ad.pfra_not) { 442 SLIST_INSERT_HEAD(&changeq, p, pfrke_workq); 443 ad.pfra_fback = PFR_FB_CHANGED; 444 xchange++; 445 } 446 } else { 447 q = pfr_lookup_addr(tmpkt, &ad, 1); 448 if (q != NULL) { 449 ad.pfra_fback = PFR_FB_DUPLICATE; 450 goto _skip; 451 } 452 p = pfr_create_kentry(&ad); 453 if (p == NULL) 454 senderr(ENOMEM); 455 if (pfr_route_kentry(tmpkt, p)) { 456 pfr_destroy_kentry(p); 457 ad.pfra_fback = PFR_FB_NONE; 458 } else { 459 SLIST_INSERT_HEAD(&addq, p, pfrke_workq); 460 ad.pfra_fback = PFR_FB_ADDED; 461 xadd++; 462 } 463 } 464 _skip: 465 if (flags & PFR_FLAG_FEEDBACK) 466 bcopy(&ad, addr + i, sizeof(ad)); 467 } 468 pfr_enqueue_addrs(kt, &delq, &xdel, ENQUEUE_UNMARKED_ONLY); 469 if ((flags & PFR_FLAG_FEEDBACK) && *size2) { 470 if (*size2 < size+xdel) { 471 *size2 = size+xdel; 472 senderr(0); 473 } 474 i = 0; 475 SLIST_FOREACH(p, &delq, pfrke_workq) { 476 pfr_copyout_addr(&ad, p); 477 ad.pfra_fback = PFR_FB_DELETED; 478 bcopy(&ad, addr + size + i, sizeof(ad)); 479 i++; 480 } 481 } 482 pfr_clean_node_mask(tmpkt, &addq); 483 if (!(flags & PFR_FLAG_DUMMY)) { 484 pfr_insert_kentries(kt, &addq, tzero); 485 pfr_remove_kentries(kt, &delq); 486 pfr_clstats_kentries(&changeq, tzero, INVERT_NEG_FLAG); 487 } else 488 pfr_destroy_kentries(&addq); 489 if (nadd != NULL) 490 *nadd = xadd; 491 if (ndel != NULL) 492 *ndel = xdel; 493 if (nchange != NULL) 494 *nchange = xchange; 495 if ((flags & PFR_FLAG_FEEDBACK) && size2) 496 *size2 = size+xdel; 497 pfr_destroy_ktable(tmpkt, 0); 498 return (0); 499 _bad: 500 pfr_clean_node_mask(tmpkt, &addq); 501 pfr_destroy_kentries(&addq); 502 if (flags & PFR_FLAG_FEEDBACK) 503 pfr_reset_feedback(addr, size); 504 pfr_destroy_ktable(tmpkt, 0); 505 return (rv); 506 } 507 508 int 509 pfr_tst_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size, 510 int *nmatch, int flags) 511 { 512 struct pfr_ktable *kt; 513 struct pfr_kentry *p; 514 struct pfr_addr *ad; 515 int i, xmatch = 0; 516 517 PF_RULES_RASSERT(); 518 519 ACCEPT_FLAGS(flags, PFR_FLAG_REPLACE); 520 if (pfr_validate_table(tbl, 0, 0)) 521 return (EINVAL); 522 kt = pfr_lookup_table(tbl); 523 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 524 return (ESRCH); 525 526 for (i = 0, ad = addr; i < size; i++, ad++) { 527 if (pfr_validate_addr(ad)) 528 return (EINVAL); 529 if (ADDR_NETWORK(ad)) 530 return (EINVAL); 531 p = pfr_lookup_addr(kt, ad, 0); 532 if (flags & PFR_FLAG_REPLACE) 533 pfr_copyout_addr(ad, p); 534 ad->pfra_fback = (p == NULL) ? PFR_FB_NONE : 535 (p->pfrke_not ? PFR_FB_NOTMATCH : PFR_FB_MATCH); 536 if (p != NULL && !p->pfrke_not) 537 xmatch++; 538 } 539 if (nmatch != NULL) 540 *nmatch = xmatch; 541 return (0); 542 } 543 544 int 545 pfr_get_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int *size, 546 int flags) 547 { 548 struct pfr_ktable *kt; 549 struct pfr_walktree w; 550 int rv; 551 552 PF_RULES_RASSERT(); 553 554 ACCEPT_FLAGS(flags, 0); 555 if (pfr_validate_table(tbl, 0, 0)) 556 return (EINVAL); 557 kt = pfr_lookup_table(tbl); 558 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 559 return (ESRCH); 560 if (kt->pfrkt_cnt > *size) { 561 *size = kt->pfrkt_cnt; 562 return (0); 563 } 564 565 bzero(&w, sizeof(w)); 566 w.pfrw_op = PFRW_GET_ADDRS; 567 w.pfrw_addr = addr; 568 w.pfrw_free = kt->pfrkt_cnt; 569 rv = kt->pfrkt_ip4->rnh_walktree(&kt->pfrkt_ip4->rh, pfr_walktree, &w); 570 if (!rv) 571 rv = kt->pfrkt_ip6->rnh_walktree(&kt->pfrkt_ip6->rh, 572 pfr_walktree, &w); 573 if (rv) 574 return (rv); 575 576 KASSERT(w.pfrw_free == 0, ("%s: corruption detected (%d)", __func__, 577 w.pfrw_free)); 578 579 *size = kt->pfrkt_cnt; 580 return (0); 581 } 582 583 int 584 pfr_get_astats(struct pfr_table *tbl, struct pfr_astats *addr, int *size, 585 int flags) 586 { 587 struct pfr_ktable *kt; 588 struct pfr_walktree w; 589 struct pfr_kentryworkq workq; 590 int rv; 591 long tzero = time_second; 592 593 PF_RULES_RASSERT(); 594 595 /* XXX PFR_FLAG_CLSTATS disabled */ 596 ACCEPT_FLAGS(flags, 0); 597 if (pfr_validate_table(tbl, 0, 0)) 598 return (EINVAL); 599 kt = pfr_lookup_table(tbl); 600 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 601 return (ESRCH); 602 if (kt->pfrkt_cnt > *size) { 603 *size = kt->pfrkt_cnt; 604 return (0); 605 } 606 607 bzero(&w, sizeof(w)); 608 w.pfrw_op = PFRW_GET_ASTATS; 609 w.pfrw_astats = addr; 610 w.pfrw_free = kt->pfrkt_cnt; 611 rv = kt->pfrkt_ip4->rnh_walktree(&kt->pfrkt_ip4->rh, pfr_walktree, &w); 612 if (!rv) 613 rv = kt->pfrkt_ip6->rnh_walktree(&kt->pfrkt_ip6->rh, 614 pfr_walktree, &w); 615 if (!rv && (flags & PFR_FLAG_CLSTATS)) { 616 pfr_enqueue_addrs(kt, &workq, NULL, 0); 617 pfr_clstats_kentries(&workq, tzero, 0); 618 } 619 if (rv) 620 return (rv); 621 622 if (w.pfrw_free) { 623 printf("pfr_get_astats: corruption detected (%d).\n", 624 w.pfrw_free); 625 return (ENOTTY); 626 } 627 *size = kt->pfrkt_cnt; 628 return (0); 629 } 630 631 int 632 pfr_clr_astats(struct pfr_table *tbl, struct pfr_addr *addr, int size, 633 int *nzero, int flags) 634 { 635 struct pfr_ktable *kt; 636 struct pfr_kentryworkq workq; 637 struct pfr_kentry *p; 638 struct pfr_addr *ad; 639 int i, rv, xzero = 0; 640 641 PF_RULES_WASSERT(); 642 643 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_FEEDBACK); 644 if (pfr_validate_table(tbl, 0, 0)) 645 return (EINVAL); 646 kt = pfr_lookup_table(tbl); 647 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 648 return (ESRCH); 649 SLIST_INIT(&workq); 650 for (i = 0, ad = addr; i < size; i++, ad++) { 651 if (pfr_validate_addr(ad)) 652 senderr(EINVAL); 653 p = pfr_lookup_addr(kt, ad, 1); 654 if (flags & PFR_FLAG_FEEDBACK) { 655 ad->pfra_fback = (p != NULL) ? 656 PFR_FB_CLEARED : PFR_FB_NONE; 657 } 658 if (p != NULL) { 659 SLIST_INSERT_HEAD(&workq, p, pfrke_workq); 660 xzero++; 661 } 662 } 663 664 if (!(flags & PFR_FLAG_DUMMY)) 665 pfr_clstats_kentries(&workq, 0, 0); 666 if (nzero != NULL) 667 *nzero = xzero; 668 return (0); 669 _bad: 670 if (flags & PFR_FLAG_FEEDBACK) 671 pfr_reset_feedback(addr, size); 672 return (rv); 673 } 674 675 static int 676 pfr_validate_addr(struct pfr_addr *ad) 677 { 678 int i; 679 680 switch (ad->pfra_af) { 681 #ifdef INET 682 case AF_INET: 683 if (ad->pfra_net > 32) 684 return (-1); 685 break; 686 #endif /* INET */ 687 #ifdef INET6 688 case AF_INET6: 689 if (ad->pfra_net > 128) 690 return (-1); 691 break; 692 #endif /* INET6 */ 693 default: 694 return (-1); 695 } 696 if (ad->pfra_net < 128 && 697 (((caddr_t)ad)[ad->pfra_net/8] & (0xFF >> (ad->pfra_net%8)))) 698 return (-1); 699 for (i = (ad->pfra_net+7)/8; i < sizeof(ad->pfra_u); i++) 700 if (((caddr_t)ad)[i]) 701 return (-1); 702 if (ad->pfra_not && ad->pfra_not != 1) 703 return (-1); 704 if (ad->pfra_fback) 705 return (-1); 706 return (0); 707 } 708 709 static void 710 pfr_enqueue_addrs(struct pfr_ktable *kt, struct pfr_kentryworkq *workq, 711 int *naddr, int sweep) 712 { 713 struct pfr_walktree w; 714 715 SLIST_INIT(workq); 716 bzero(&w, sizeof(w)); 717 w.pfrw_op = sweep ? PFRW_SWEEP : PFRW_ENQUEUE; 718 w.pfrw_workq = workq; 719 if (kt->pfrkt_ip4 != NULL) 720 if (kt->pfrkt_ip4->rnh_walktree(&kt->pfrkt_ip4->rh, 721 pfr_walktree, &w)) 722 printf("pfr_enqueue_addrs: IPv4 walktree failed.\n"); 723 if (kt->pfrkt_ip6 != NULL) 724 if (kt->pfrkt_ip6->rnh_walktree(&kt->pfrkt_ip6->rh, 725 pfr_walktree, &w)) 726 printf("pfr_enqueue_addrs: IPv6 walktree failed.\n"); 727 if (naddr != NULL) 728 *naddr = w.pfrw_cnt; 729 } 730 731 static void 732 pfr_mark_addrs(struct pfr_ktable *kt) 733 { 734 struct pfr_walktree w; 735 736 bzero(&w, sizeof(w)); 737 w.pfrw_op = PFRW_MARK; 738 if (kt->pfrkt_ip4->rnh_walktree(&kt->pfrkt_ip4->rh, pfr_walktree, &w)) 739 printf("pfr_mark_addrs: IPv4 walktree failed.\n"); 740 if (kt->pfrkt_ip6->rnh_walktree(&kt->pfrkt_ip6->rh, pfr_walktree, &w)) 741 printf("pfr_mark_addrs: IPv6 walktree failed.\n"); 742 } 743 744 745 static struct pfr_kentry * 746 pfr_lookup_addr(struct pfr_ktable *kt, struct pfr_addr *ad, int exact) 747 { 748 union sockaddr_union sa, mask; 749 struct radix_head *head = NULL; 750 struct pfr_kentry *ke; 751 752 PF_RULES_ASSERT(); 753 754 bzero(&sa, sizeof(sa)); 755 if (ad->pfra_af == AF_INET) { 756 FILLIN_SIN(sa.sin, ad->pfra_ip4addr); 757 head = &kt->pfrkt_ip4->rh; 758 } else if ( ad->pfra_af == AF_INET6 ) { 759 FILLIN_SIN6(sa.sin6, ad->pfra_ip6addr); 760 head = &kt->pfrkt_ip6->rh; 761 } 762 if (ADDR_NETWORK(ad)) { 763 pfr_prepare_network(&mask, ad->pfra_af, ad->pfra_net); 764 ke = (struct pfr_kentry *)rn_lookup(&sa, &mask, head); 765 if (ke && KENTRY_RNF_ROOT(ke)) 766 ke = NULL; 767 } else { 768 ke = (struct pfr_kentry *)rn_match(&sa, head); 769 if (ke && KENTRY_RNF_ROOT(ke)) 770 ke = NULL; 771 if (exact && ke && KENTRY_NETWORK(ke)) 772 ke = NULL; 773 } 774 return (ke); 775 } 776 777 static struct pfr_kentry * 778 pfr_create_kentry(struct pfr_addr *ad) 779 { 780 struct pfr_kentry *ke; 781 782 ke = uma_zalloc(V_pfr_kentry_z, M_NOWAIT | M_ZERO); 783 if (ke == NULL) 784 return (NULL); 785 786 if (ad->pfra_af == AF_INET) 787 FILLIN_SIN(ke->pfrke_sa.sin, ad->pfra_ip4addr); 788 else if (ad->pfra_af == AF_INET6) 789 FILLIN_SIN6(ke->pfrke_sa.sin6, ad->pfra_ip6addr); 790 ke->pfrke_af = ad->pfra_af; 791 ke->pfrke_net = ad->pfra_net; 792 ke->pfrke_not = ad->pfra_not; 793 return (ke); 794 } 795 796 static void 797 pfr_destroy_kentries(struct pfr_kentryworkq *workq) 798 { 799 struct pfr_kentry *p, *q; 800 801 for (p = SLIST_FIRST(workq); p != NULL; p = q) { 802 q = SLIST_NEXT(p, pfrke_workq); 803 pfr_destroy_kentry(p); 804 } 805 } 806 807 static void 808 pfr_destroy_kentry(struct pfr_kentry *ke) 809 { 810 if (ke->pfrke_counters) 811 uma_zfree(V_pfr_kcounters_z, ke->pfrke_counters); 812 uma_zfree(V_pfr_kentry_z, ke); 813 } 814 815 static void 816 pfr_insert_kentries(struct pfr_ktable *kt, 817 struct pfr_kentryworkq *workq, long tzero) 818 { 819 struct pfr_kentry *p; 820 int rv, n = 0; 821 822 SLIST_FOREACH(p, workq, pfrke_workq) { 823 rv = pfr_route_kentry(kt, p); 824 if (rv) { 825 printf("pfr_insert_kentries: cannot route entry " 826 "(code=%d).\n", rv); 827 break; 828 } 829 p->pfrke_tzero = tzero; 830 n++; 831 } 832 kt->pfrkt_cnt += n; 833 } 834 835 int 836 pfr_insert_kentry(struct pfr_ktable *kt, struct pfr_addr *ad, long tzero) 837 { 838 struct pfr_kentry *p; 839 int rv; 840 841 p = pfr_lookup_addr(kt, ad, 1); 842 if (p != NULL) 843 return (0); 844 p = pfr_create_kentry(ad); 845 if (p == NULL) 846 return (ENOMEM); 847 848 rv = pfr_route_kentry(kt, p); 849 if (rv) 850 return (rv); 851 852 p->pfrke_tzero = tzero; 853 kt->pfrkt_cnt++; 854 855 return (0); 856 } 857 858 static void 859 pfr_remove_kentries(struct pfr_ktable *kt, 860 struct pfr_kentryworkq *workq) 861 { 862 struct pfr_kentry *p; 863 int n = 0; 864 865 SLIST_FOREACH(p, workq, pfrke_workq) { 866 pfr_unroute_kentry(kt, p); 867 n++; 868 } 869 kt->pfrkt_cnt -= n; 870 pfr_destroy_kentries(workq); 871 } 872 873 static void 874 pfr_clean_node_mask(struct pfr_ktable *kt, 875 struct pfr_kentryworkq *workq) 876 { 877 struct pfr_kentry *p; 878 879 SLIST_FOREACH(p, workq, pfrke_workq) 880 pfr_unroute_kentry(kt, p); 881 } 882 883 static void 884 pfr_clstats_kentries(struct pfr_kentryworkq *workq, long tzero, int negchange) 885 { 886 struct pfr_kentry *p; 887 888 SLIST_FOREACH(p, workq, pfrke_workq) { 889 if (negchange) 890 p->pfrke_not = !p->pfrke_not; 891 if (p->pfrke_counters) { 892 uma_zfree(V_pfr_kcounters_z, p->pfrke_counters); 893 p->pfrke_counters = NULL; 894 } 895 p->pfrke_tzero = tzero; 896 } 897 } 898 899 static void 900 pfr_reset_feedback(struct pfr_addr *addr, int size) 901 { 902 struct pfr_addr *ad; 903 int i; 904 905 for (i = 0, ad = addr; i < size; i++, ad++) 906 ad->pfra_fback = PFR_FB_NONE; 907 } 908 909 static void 910 pfr_prepare_network(union sockaddr_union *sa, int af, int net) 911 { 912 int i; 913 914 bzero(sa, sizeof(*sa)); 915 if (af == AF_INET) { 916 sa->sin.sin_len = sizeof(sa->sin); 917 sa->sin.sin_family = AF_INET; 918 sa->sin.sin_addr.s_addr = net ? htonl(-1 << (32-net)) : 0; 919 } else if (af == AF_INET6) { 920 sa->sin6.sin6_len = sizeof(sa->sin6); 921 sa->sin6.sin6_family = AF_INET6; 922 for (i = 0; i < 4; i++) { 923 if (net <= 32) { 924 sa->sin6.sin6_addr.s6_addr32[i] = 925 net ? htonl(-1 << (32-net)) : 0; 926 break; 927 } 928 sa->sin6.sin6_addr.s6_addr32[i] = 0xFFFFFFFF; 929 net -= 32; 930 } 931 } 932 } 933 934 static int 935 pfr_route_kentry(struct pfr_ktable *kt, struct pfr_kentry *ke) 936 { 937 union sockaddr_union mask; 938 struct radix_node *rn; 939 struct radix_head *head = NULL; 940 941 PF_RULES_WASSERT(); 942 943 bzero(ke->pfrke_node, sizeof(ke->pfrke_node)); 944 if (ke->pfrke_af == AF_INET) 945 head = &kt->pfrkt_ip4->rh; 946 else if (ke->pfrke_af == AF_INET6) 947 head = &kt->pfrkt_ip6->rh; 948 949 if (KENTRY_NETWORK(ke)) { 950 pfr_prepare_network(&mask, ke->pfrke_af, ke->pfrke_net); 951 rn = rn_addroute(&ke->pfrke_sa, &mask, head, ke->pfrke_node); 952 } else 953 rn = rn_addroute(&ke->pfrke_sa, NULL, head, ke->pfrke_node); 954 955 return (rn == NULL ? -1 : 0); 956 } 957 958 static int 959 pfr_unroute_kentry(struct pfr_ktable *kt, struct pfr_kentry *ke) 960 { 961 union sockaddr_union mask; 962 struct radix_node *rn; 963 struct radix_head *head = NULL; 964 965 if (ke->pfrke_af == AF_INET) 966 head = &kt->pfrkt_ip4->rh; 967 else if (ke->pfrke_af == AF_INET6) 968 head = &kt->pfrkt_ip6->rh; 969 970 if (KENTRY_NETWORK(ke)) { 971 pfr_prepare_network(&mask, ke->pfrke_af, ke->pfrke_net); 972 rn = rn_delete(&ke->pfrke_sa, &mask, head); 973 } else 974 rn = rn_delete(&ke->pfrke_sa, NULL, head); 975 976 if (rn == NULL) { 977 printf("pfr_unroute_kentry: delete failed.\n"); 978 return (-1); 979 } 980 return (0); 981 } 982 983 static void 984 pfr_copyout_addr(struct pfr_addr *ad, struct pfr_kentry *ke) 985 { 986 bzero(ad, sizeof(*ad)); 987 if (ke == NULL) 988 return; 989 ad->pfra_af = ke->pfrke_af; 990 ad->pfra_net = ke->pfrke_net; 991 ad->pfra_not = ke->pfrke_not; 992 if (ad->pfra_af == AF_INET) 993 ad->pfra_ip4addr = ke->pfrke_sa.sin.sin_addr; 994 else if (ad->pfra_af == AF_INET6) 995 ad->pfra_ip6addr = ke->pfrke_sa.sin6.sin6_addr; 996 } 997 998 static int 999 pfr_walktree(struct radix_node *rn, void *arg) 1000 { 1001 struct pfr_kentry *ke = (struct pfr_kentry *)rn; 1002 struct pfr_walktree *w = arg; 1003 1004 switch (w->pfrw_op) { 1005 case PFRW_MARK: 1006 ke->pfrke_mark = 0; 1007 break; 1008 case PFRW_SWEEP: 1009 if (ke->pfrke_mark) 1010 break; 1011 /* FALLTHROUGH */ 1012 case PFRW_ENQUEUE: 1013 SLIST_INSERT_HEAD(w->pfrw_workq, ke, pfrke_workq); 1014 w->pfrw_cnt++; 1015 break; 1016 case PFRW_GET_ADDRS: 1017 if (w->pfrw_free-- > 0) { 1018 pfr_copyout_addr(w->pfrw_addr, ke); 1019 w->pfrw_addr++; 1020 } 1021 break; 1022 case PFRW_GET_ASTATS: 1023 if (w->pfrw_free-- > 0) { 1024 struct pfr_astats as; 1025 1026 pfr_copyout_addr(&as.pfras_a, ke); 1027 1028 if (ke->pfrke_counters) { 1029 bcopy(ke->pfrke_counters->pfrkc_packets, 1030 as.pfras_packets, sizeof(as.pfras_packets)); 1031 bcopy(ke->pfrke_counters->pfrkc_bytes, 1032 as.pfras_bytes, sizeof(as.pfras_bytes)); 1033 } else { 1034 bzero(as.pfras_packets, sizeof(as.pfras_packets)); 1035 bzero(as.pfras_bytes, sizeof(as.pfras_bytes)); 1036 as.pfras_a.pfra_fback = PFR_FB_NOCOUNT; 1037 } 1038 as.pfras_tzero = ke->pfrke_tzero; 1039 1040 bcopy(&as, w->pfrw_astats, sizeof(as)); 1041 w->pfrw_astats++; 1042 } 1043 break; 1044 case PFRW_POOL_GET: 1045 if (ke->pfrke_not) 1046 break; /* negative entries are ignored */ 1047 if (!w->pfrw_cnt--) { 1048 w->pfrw_kentry = ke; 1049 return (1); /* finish search */ 1050 } 1051 break; 1052 case PFRW_DYNADDR_UPDATE: 1053 { 1054 union sockaddr_union pfr_mask; 1055 1056 if (ke->pfrke_af == AF_INET) { 1057 if (w->pfrw_dyn->pfid_acnt4++ > 0) 1058 break; 1059 pfr_prepare_network(&pfr_mask, AF_INET, ke->pfrke_net); 1060 w->pfrw_dyn->pfid_addr4 = *SUNION2PF(&ke->pfrke_sa, 1061 AF_INET); 1062 w->pfrw_dyn->pfid_mask4 = *SUNION2PF(&pfr_mask, 1063 AF_INET); 1064 } else if (ke->pfrke_af == AF_INET6){ 1065 if (w->pfrw_dyn->pfid_acnt6++ > 0) 1066 break; 1067 pfr_prepare_network(&pfr_mask, AF_INET6, ke->pfrke_net); 1068 w->pfrw_dyn->pfid_addr6 = *SUNION2PF(&ke->pfrke_sa, 1069 AF_INET6); 1070 w->pfrw_dyn->pfid_mask6 = *SUNION2PF(&pfr_mask, 1071 AF_INET6); 1072 } 1073 break; 1074 } 1075 } 1076 return (0); 1077 } 1078 1079 int 1080 pfr_clr_tables(struct pfr_table *filter, int *ndel, int flags) 1081 { 1082 struct pfr_ktableworkq workq; 1083 struct pfr_ktable *p; 1084 int xdel = 0; 1085 1086 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_ALLRSETS); 1087 if (pfr_fix_anchor(filter->pfrt_anchor)) 1088 return (EINVAL); 1089 if (pfr_table_count(filter, flags) < 0) 1090 return (ENOENT); 1091 1092 SLIST_INIT(&workq); 1093 RB_FOREACH(p, pfr_ktablehead, &V_pfr_ktables) { 1094 if (pfr_skip_table(filter, p, flags)) 1095 continue; 1096 if (!strcmp(p->pfrkt_anchor, PF_RESERVED_ANCHOR)) 1097 continue; 1098 if (!(p->pfrkt_flags & PFR_TFLAG_ACTIVE)) 1099 continue; 1100 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_ACTIVE; 1101 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq); 1102 xdel++; 1103 } 1104 if (!(flags & PFR_FLAG_DUMMY)) 1105 pfr_setflags_ktables(&workq); 1106 if (ndel != NULL) 1107 *ndel = xdel; 1108 return (0); 1109 } 1110 1111 int 1112 pfr_add_tables(struct pfr_table *tbl, int size, int *nadd, int flags) 1113 { 1114 struct pfr_ktableworkq addq, changeq; 1115 struct pfr_ktable *p, *q, *r, key; 1116 int i, rv, xadd = 0; 1117 long tzero = time_second; 1118 1119 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY); 1120 SLIST_INIT(&addq); 1121 SLIST_INIT(&changeq); 1122 for (i = 0; i < size; i++) { 1123 bcopy(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t)); 1124 if (pfr_validate_table(&key.pfrkt_t, PFR_TFLAG_USRMASK, 1125 flags & PFR_FLAG_USERIOCTL)) 1126 senderr(EINVAL); 1127 key.pfrkt_flags |= PFR_TFLAG_ACTIVE; 1128 p = RB_FIND(pfr_ktablehead, &V_pfr_ktables, &key); 1129 if (p == NULL) { 1130 p = pfr_create_ktable(&key.pfrkt_t, tzero, 1); 1131 if (p == NULL) 1132 senderr(ENOMEM); 1133 SLIST_FOREACH(q, &addq, pfrkt_workq) { 1134 if (!pfr_ktable_compare(p, q)) { 1135 pfr_destroy_ktable(p, 0); 1136 goto _skip; 1137 } 1138 } 1139 SLIST_INSERT_HEAD(&addq, p, pfrkt_workq); 1140 xadd++; 1141 if (!key.pfrkt_anchor[0]) 1142 goto _skip; 1143 1144 /* find or create root table */ 1145 bzero(key.pfrkt_anchor, sizeof(key.pfrkt_anchor)); 1146 r = RB_FIND(pfr_ktablehead, &V_pfr_ktables, &key); 1147 if (r != NULL) { 1148 p->pfrkt_root = r; 1149 goto _skip; 1150 } 1151 SLIST_FOREACH(q, &addq, pfrkt_workq) { 1152 if (!pfr_ktable_compare(&key, q)) { 1153 p->pfrkt_root = q; 1154 goto _skip; 1155 } 1156 } 1157 key.pfrkt_flags = 0; 1158 r = pfr_create_ktable(&key.pfrkt_t, 0, 1); 1159 if (r == NULL) 1160 senderr(ENOMEM); 1161 SLIST_INSERT_HEAD(&addq, r, pfrkt_workq); 1162 p->pfrkt_root = r; 1163 } else if (!(p->pfrkt_flags & PFR_TFLAG_ACTIVE)) { 1164 SLIST_FOREACH(q, &changeq, pfrkt_workq) 1165 if (!pfr_ktable_compare(&key, q)) 1166 goto _skip; 1167 p->pfrkt_nflags = (p->pfrkt_flags & 1168 ~PFR_TFLAG_USRMASK) | key.pfrkt_flags; 1169 SLIST_INSERT_HEAD(&changeq, p, pfrkt_workq); 1170 xadd++; 1171 } 1172 _skip: 1173 ; 1174 } 1175 if (!(flags & PFR_FLAG_DUMMY)) { 1176 pfr_insert_ktables(&addq); 1177 pfr_setflags_ktables(&changeq); 1178 } else 1179 pfr_destroy_ktables(&addq, 0); 1180 if (nadd != NULL) 1181 *nadd = xadd; 1182 return (0); 1183 _bad: 1184 pfr_destroy_ktables(&addq, 0); 1185 return (rv); 1186 } 1187 1188 int 1189 pfr_del_tables(struct pfr_table *tbl, int size, int *ndel, int flags) 1190 { 1191 struct pfr_ktableworkq workq; 1192 struct pfr_ktable *p, *q, key; 1193 int i, xdel = 0; 1194 1195 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY); 1196 SLIST_INIT(&workq); 1197 for (i = 0; i < size; i++) { 1198 bcopy(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t)); 1199 if (pfr_validate_table(&key.pfrkt_t, 0, 1200 flags & PFR_FLAG_USERIOCTL)) 1201 return (EINVAL); 1202 p = RB_FIND(pfr_ktablehead, &V_pfr_ktables, &key); 1203 if (p != NULL && (p->pfrkt_flags & PFR_TFLAG_ACTIVE)) { 1204 SLIST_FOREACH(q, &workq, pfrkt_workq) 1205 if (!pfr_ktable_compare(p, q)) 1206 goto _skip; 1207 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_ACTIVE; 1208 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq); 1209 xdel++; 1210 } 1211 _skip: 1212 ; 1213 } 1214 1215 if (!(flags & PFR_FLAG_DUMMY)) 1216 pfr_setflags_ktables(&workq); 1217 if (ndel != NULL) 1218 *ndel = xdel; 1219 return (0); 1220 } 1221 1222 int 1223 pfr_get_tables(struct pfr_table *filter, struct pfr_table *tbl, int *size, 1224 int flags) 1225 { 1226 struct pfr_ktable *p; 1227 int n, nn; 1228 1229 PF_RULES_RASSERT(); 1230 1231 ACCEPT_FLAGS(flags, PFR_FLAG_ALLRSETS); 1232 if (pfr_fix_anchor(filter->pfrt_anchor)) 1233 return (EINVAL); 1234 n = nn = pfr_table_count(filter, flags); 1235 if (n < 0) 1236 return (ENOENT); 1237 if (n > *size) { 1238 *size = n; 1239 return (0); 1240 } 1241 RB_FOREACH(p, pfr_ktablehead, &V_pfr_ktables) { 1242 if (pfr_skip_table(filter, p, flags)) 1243 continue; 1244 if (n-- <= 0) 1245 continue; 1246 bcopy(&p->pfrkt_t, tbl++, sizeof(*tbl)); 1247 } 1248 1249 KASSERT(n == 0, ("%s: corruption detected (%d)", __func__, n)); 1250 1251 *size = nn; 1252 return (0); 1253 } 1254 1255 int 1256 pfr_get_tstats(struct pfr_table *filter, struct pfr_tstats *tbl, int *size, 1257 int flags) 1258 { 1259 struct pfr_ktable *p; 1260 struct pfr_ktableworkq workq; 1261 int n, nn; 1262 long tzero = time_second; 1263 1264 /* XXX PFR_FLAG_CLSTATS disabled */ 1265 ACCEPT_FLAGS(flags, PFR_FLAG_ALLRSETS); 1266 if (pfr_fix_anchor(filter->pfrt_anchor)) 1267 return (EINVAL); 1268 n = nn = pfr_table_count(filter, flags); 1269 if (n < 0) 1270 return (ENOENT); 1271 if (n > *size) { 1272 *size = n; 1273 return (0); 1274 } 1275 SLIST_INIT(&workq); 1276 RB_FOREACH(p, pfr_ktablehead, &V_pfr_ktables) { 1277 if (pfr_skip_table(filter, p, flags)) 1278 continue; 1279 if (n-- <= 0) 1280 continue; 1281 bcopy(&p->pfrkt_ts, tbl++, sizeof(*tbl)); 1282 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq); 1283 } 1284 if (flags & PFR_FLAG_CLSTATS) 1285 pfr_clstats_ktables(&workq, tzero, 1286 flags & PFR_FLAG_ADDRSTOO); 1287 1288 KASSERT(n == 0, ("%s: corruption detected (%d)", __func__, n)); 1289 1290 *size = nn; 1291 return (0); 1292 } 1293 1294 int 1295 pfr_clr_tstats(struct pfr_table *tbl, int size, int *nzero, int flags) 1296 { 1297 struct pfr_ktableworkq workq; 1298 struct pfr_ktable *p, key; 1299 int i, xzero = 0; 1300 long tzero = time_second; 1301 1302 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_ADDRSTOO); 1303 SLIST_INIT(&workq); 1304 for (i = 0; i < size; i++) { 1305 bcopy(tbl + i, &key.pfrkt_t, sizeof(key.pfrkt_t)); 1306 if (pfr_validate_table(&key.pfrkt_t, 0, 0)) 1307 return (EINVAL); 1308 p = RB_FIND(pfr_ktablehead, &V_pfr_ktables, &key); 1309 if (p != NULL) { 1310 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq); 1311 xzero++; 1312 } 1313 } 1314 if (!(flags & PFR_FLAG_DUMMY)) 1315 pfr_clstats_ktables(&workq, tzero, flags & PFR_FLAG_ADDRSTOO); 1316 if (nzero != NULL) 1317 *nzero = xzero; 1318 return (0); 1319 } 1320 1321 int 1322 pfr_set_tflags(struct pfr_table *tbl, int size, int setflag, int clrflag, 1323 int *nchange, int *ndel, int flags) 1324 { 1325 struct pfr_ktableworkq workq; 1326 struct pfr_ktable *p, *q, key; 1327 int i, xchange = 0, xdel = 0; 1328 1329 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY); 1330 if ((setflag & ~PFR_TFLAG_USRMASK) || 1331 (clrflag & ~PFR_TFLAG_USRMASK) || 1332 (setflag & clrflag)) 1333 return (EINVAL); 1334 SLIST_INIT(&workq); 1335 for (i = 0; i < size; i++) { 1336 bcopy(tbl + i, &key.pfrkt_t, sizeof(key.pfrkt_t)); 1337 if (pfr_validate_table(&key.pfrkt_t, 0, 1338 flags & PFR_FLAG_USERIOCTL)) 1339 return (EINVAL); 1340 p = RB_FIND(pfr_ktablehead, &V_pfr_ktables, &key); 1341 if (p != NULL && (p->pfrkt_flags & PFR_TFLAG_ACTIVE)) { 1342 p->pfrkt_nflags = (p->pfrkt_flags | setflag) & 1343 ~clrflag; 1344 if (p->pfrkt_nflags == p->pfrkt_flags) 1345 goto _skip; 1346 SLIST_FOREACH(q, &workq, pfrkt_workq) 1347 if (!pfr_ktable_compare(p, q)) 1348 goto _skip; 1349 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq); 1350 if ((p->pfrkt_flags & PFR_TFLAG_PERSIST) && 1351 (clrflag & PFR_TFLAG_PERSIST) && 1352 !(p->pfrkt_flags & PFR_TFLAG_REFERENCED)) 1353 xdel++; 1354 else 1355 xchange++; 1356 } 1357 _skip: 1358 ; 1359 } 1360 if (!(flags & PFR_FLAG_DUMMY)) 1361 pfr_setflags_ktables(&workq); 1362 if (nchange != NULL) 1363 *nchange = xchange; 1364 if (ndel != NULL) 1365 *ndel = xdel; 1366 return (0); 1367 } 1368 1369 int 1370 pfr_ina_begin(struct pfr_table *trs, u_int32_t *ticket, int *ndel, int flags) 1371 { 1372 struct pfr_ktableworkq workq; 1373 struct pfr_ktable *p; 1374 struct pf_ruleset *rs; 1375 int xdel = 0; 1376 1377 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY); 1378 rs = pf_find_or_create_ruleset(trs->pfrt_anchor); 1379 if (rs == NULL) 1380 return (ENOMEM); 1381 SLIST_INIT(&workq); 1382 RB_FOREACH(p, pfr_ktablehead, &V_pfr_ktables) { 1383 if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) || 1384 pfr_skip_table(trs, p, 0)) 1385 continue; 1386 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_INACTIVE; 1387 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq); 1388 xdel++; 1389 } 1390 if (!(flags & PFR_FLAG_DUMMY)) { 1391 pfr_setflags_ktables(&workq); 1392 if (ticket != NULL) 1393 *ticket = ++rs->tticket; 1394 rs->topen = 1; 1395 } else 1396 pf_remove_if_empty_ruleset(rs); 1397 if (ndel != NULL) 1398 *ndel = xdel; 1399 return (0); 1400 } 1401 1402 int 1403 pfr_ina_define(struct pfr_table *tbl, struct pfr_addr *addr, int size, 1404 int *nadd, int *naddr, u_int32_t ticket, int flags) 1405 { 1406 struct pfr_ktableworkq tableq; 1407 struct pfr_kentryworkq addrq; 1408 struct pfr_ktable *kt, *rt, *shadow, key; 1409 struct pfr_kentry *p; 1410 struct pfr_addr *ad; 1411 struct pf_ruleset *rs; 1412 int i, rv, xadd = 0, xaddr = 0; 1413 1414 PF_RULES_WASSERT(); 1415 1416 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_ADDRSTOO); 1417 if (size && !(flags & PFR_FLAG_ADDRSTOO)) 1418 return (EINVAL); 1419 if (pfr_validate_table(tbl, PFR_TFLAG_USRMASK, 1420 flags & PFR_FLAG_USERIOCTL)) 1421 return (EINVAL); 1422 rs = pf_find_ruleset(tbl->pfrt_anchor); 1423 if (rs == NULL || !rs->topen || ticket != rs->tticket) 1424 return (EBUSY); 1425 tbl->pfrt_flags |= PFR_TFLAG_INACTIVE; 1426 SLIST_INIT(&tableq); 1427 kt = RB_FIND(pfr_ktablehead, &V_pfr_ktables, (struct pfr_ktable *)tbl); 1428 if (kt == NULL) { 1429 kt = pfr_create_ktable(tbl, 0, 1); 1430 if (kt == NULL) 1431 return (ENOMEM); 1432 SLIST_INSERT_HEAD(&tableq, kt, pfrkt_workq); 1433 xadd++; 1434 if (!tbl->pfrt_anchor[0]) 1435 goto _skip; 1436 1437 /* find or create root table */ 1438 bzero(&key, sizeof(key)); 1439 strlcpy(key.pfrkt_name, tbl->pfrt_name, sizeof(key.pfrkt_name)); 1440 rt = RB_FIND(pfr_ktablehead, &V_pfr_ktables, &key); 1441 if (rt != NULL) { 1442 kt->pfrkt_root = rt; 1443 goto _skip; 1444 } 1445 rt = pfr_create_ktable(&key.pfrkt_t, 0, 1); 1446 if (rt == NULL) { 1447 pfr_destroy_ktables(&tableq, 0); 1448 return (ENOMEM); 1449 } 1450 SLIST_INSERT_HEAD(&tableq, rt, pfrkt_workq); 1451 kt->pfrkt_root = rt; 1452 } else if (!(kt->pfrkt_flags & PFR_TFLAG_INACTIVE)) 1453 xadd++; 1454 _skip: 1455 shadow = pfr_create_ktable(tbl, 0, 0); 1456 if (shadow == NULL) { 1457 pfr_destroy_ktables(&tableq, 0); 1458 return (ENOMEM); 1459 } 1460 SLIST_INIT(&addrq); 1461 for (i = 0, ad = addr; i < size; i++, ad++) { 1462 if (pfr_validate_addr(ad)) 1463 senderr(EINVAL); 1464 if (pfr_lookup_addr(shadow, ad, 1) != NULL) 1465 continue; 1466 p = pfr_create_kentry(ad); 1467 if (p == NULL) 1468 senderr(ENOMEM); 1469 if (pfr_route_kentry(shadow, p)) { 1470 pfr_destroy_kentry(p); 1471 continue; 1472 } 1473 SLIST_INSERT_HEAD(&addrq, p, pfrke_workq); 1474 xaddr++; 1475 } 1476 if (!(flags & PFR_FLAG_DUMMY)) { 1477 if (kt->pfrkt_shadow != NULL) 1478 pfr_destroy_ktable(kt->pfrkt_shadow, 1); 1479 kt->pfrkt_flags |= PFR_TFLAG_INACTIVE; 1480 pfr_insert_ktables(&tableq); 1481 shadow->pfrkt_cnt = (flags & PFR_FLAG_ADDRSTOO) ? 1482 xaddr : NO_ADDRESSES; 1483 kt->pfrkt_shadow = shadow; 1484 } else { 1485 pfr_clean_node_mask(shadow, &addrq); 1486 pfr_destroy_ktable(shadow, 0); 1487 pfr_destroy_ktables(&tableq, 0); 1488 pfr_destroy_kentries(&addrq); 1489 } 1490 if (nadd != NULL) 1491 *nadd = xadd; 1492 if (naddr != NULL) 1493 *naddr = xaddr; 1494 return (0); 1495 _bad: 1496 pfr_destroy_ktable(shadow, 0); 1497 pfr_destroy_ktables(&tableq, 0); 1498 pfr_destroy_kentries(&addrq); 1499 return (rv); 1500 } 1501 1502 int 1503 pfr_ina_rollback(struct pfr_table *trs, u_int32_t ticket, int *ndel, int flags) 1504 { 1505 struct pfr_ktableworkq workq; 1506 struct pfr_ktable *p; 1507 struct pf_ruleset *rs; 1508 int xdel = 0; 1509 1510 PF_RULES_WASSERT(); 1511 1512 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY); 1513 rs = pf_find_ruleset(trs->pfrt_anchor); 1514 if (rs == NULL || !rs->topen || ticket != rs->tticket) 1515 return (0); 1516 SLIST_INIT(&workq); 1517 RB_FOREACH(p, pfr_ktablehead, &V_pfr_ktables) { 1518 if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) || 1519 pfr_skip_table(trs, p, 0)) 1520 continue; 1521 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_INACTIVE; 1522 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq); 1523 xdel++; 1524 } 1525 if (!(flags & PFR_FLAG_DUMMY)) { 1526 pfr_setflags_ktables(&workq); 1527 rs->topen = 0; 1528 pf_remove_if_empty_ruleset(rs); 1529 } 1530 if (ndel != NULL) 1531 *ndel = xdel; 1532 return (0); 1533 } 1534 1535 int 1536 pfr_ina_commit(struct pfr_table *trs, u_int32_t ticket, int *nadd, 1537 int *nchange, int flags) 1538 { 1539 struct pfr_ktable *p, *q; 1540 struct pfr_ktableworkq workq; 1541 struct pf_ruleset *rs; 1542 int xadd = 0, xchange = 0; 1543 long tzero = time_second; 1544 1545 PF_RULES_WASSERT(); 1546 1547 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY); 1548 rs = pf_find_ruleset(trs->pfrt_anchor); 1549 if (rs == NULL || !rs->topen || ticket != rs->tticket) 1550 return (EBUSY); 1551 1552 SLIST_INIT(&workq); 1553 RB_FOREACH(p, pfr_ktablehead, &V_pfr_ktables) { 1554 if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) || 1555 pfr_skip_table(trs, p, 0)) 1556 continue; 1557 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq); 1558 if (p->pfrkt_flags & PFR_TFLAG_ACTIVE) 1559 xchange++; 1560 else 1561 xadd++; 1562 } 1563 1564 if (!(flags & PFR_FLAG_DUMMY)) { 1565 for (p = SLIST_FIRST(&workq); p != NULL; p = q) { 1566 q = SLIST_NEXT(p, pfrkt_workq); 1567 pfr_commit_ktable(p, tzero); 1568 } 1569 rs->topen = 0; 1570 pf_remove_if_empty_ruleset(rs); 1571 } 1572 if (nadd != NULL) 1573 *nadd = xadd; 1574 if (nchange != NULL) 1575 *nchange = xchange; 1576 1577 return (0); 1578 } 1579 1580 static void 1581 pfr_commit_ktable(struct pfr_ktable *kt, long tzero) 1582 { 1583 struct pfr_ktable *shadow = kt->pfrkt_shadow; 1584 int nflags; 1585 1586 PF_RULES_WASSERT(); 1587 1588 if (shadow->pfrkt_cnt == NO_ADDRESSES) { 1589 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 1590 pfr_clstats_ktable(kt, tzero, 1); 1591 } else if (kt->pfrkt_flags & PFR_TFLAG_ACTIVE) { 1592 /* kt might contain addresses */ 1593 struct pfr_kentryworkq addrq, addq, changeq, delq, garbageq; 1594 struct pfr_kentry *p, *q, *next; 1595 struct pfr_addr ad; 1596 1597 pfr_enqueue_addrs(shadow, &addrq, NULL, 0); 1598 pfr_mark_addrs(kt); 1599 SLIST_INIT(&addq); 1600 SLIST_INIT(&changeq); 1601 SLIST_INIT(&delq); 1602 SLIST_INIT(&garbageq); 1603 pfr_clean_node_mask(shadow, &addrq); 1604 for (p = SLIST_FIRST(&addrq); p != NULL; p = next) { 1605 next = SLIST_NEXT(p, pfrke_workq); /* XXX */ 1606 pfr_copyout_addr(&ad, p); 1607 q = pfr_lookup_addr(kt, &ad, 1); 1608 if (q != NULL) { 1609 if (q->pfrke_not != p->pfrke_not) 1610 SLIST_INSERT_HEAD(&changeq, q, 1611 pfrke_workq); 1612 q->pfrke_mark = 1; 1613 SLIST_INSERT_HEAD(&garbageq, p, pfrke_workq); 1614 } else { 1615 p->pfrke_tzero = tzero; 1616 SLIST_INSERT_HEAD(&addq, p, pfrke_workq); 1617 } 1618 } 1619 pfr_enqueue_addrs(kt, &delq, NULL, ENQUEUE_UNMARKED_ONLY); 1620 pfr_insert_kentries(kt, &addq, tzero); 1621 pfr_remove_kentries(kt, &delq); 1622 pfr_clstats_kentries(&changeq, tzero, INVERT_NEG_FLAG); 1623 pfr_destroy_kentries(&garbageq); 1624 } else { 1625 /* kt cannot contain addresses */ 1626 SWAP(struct radix_node_head *, kt->pfrkt_ip4, 1627 shadow->pfrkt_ip4); 1628 SWAP(struct radix_node_head *, kt->pfrkt_ip6, 1629 shadow->pfrkt_ip6); 1630 SWAP(int, kt->pfrkt_cnt, shadow->pfrkt_cnt); 1631 pfr_clstats_ktable(kt, tzero, 1); 1632 } 1633 nflags = ((shadow->pfrkt_flags & PFR_TFLAG_USRMASK) | 1634 (kt->pfrkt_flags & PFR_TFLAG_SETMASK) | PFR_TFLAG_ACTIVE) 1635 & ~PFR_TFLAG_INACTIVE; 1636 pfr_destroy_ktable(shadow, 0); 1637 kt->pfrkt_shadow = NULL; 1638 pfr_setflags_ktable(kt, nflags); 1639 } 1640 1641 static int 1642 pfr_validate_table(struct pfr_table *tbl, int allowedflags, int no_reserved) 1643 { 1644 int i; 1645 1646 if (!tbl->pfrt_name[0]) 1647 return (-1); 1648 if (no_reserved && !strcmp(tbl->pfrt_anchor, PF_RESERVED_ANCHOR)) 1649 return (-1); 1650 if (tbl->pfrt_name[PF_TABLE_NAME_SIZE-1]) 1651 return (-1); 1652 for (i = strlen(tbl->pfrt_name); i < PF_TABLE_NAME_SIZE; i++) 1653 if (tbl->pfrt_name[i]) 1654 return (-1); 1655 if (pfr_fix_anchor(tbl->pfrt_anchor)) 1656 return (-1); 1657 if (tbl->pfrt_flags & ~allowedflags) 1658 return (-1); 1659 return (0); 1660 } 1661 1662 /* 1663 * Rewrite anchors referenced by tables to remove slashes 1664 * and check for validity. 1665 */ 1666 static int 1667 pfr_fix_anchor(char *anchor) 1668 { 1669 size_t siz = MAXPATHLEN; 1670 int i; 1671 1672 if (anchor[0] == '/') { 1673 char *path; 1674 int off; 1675 1676 path = anchor; 1677 off = 1; 1678 while (*++path == '/') 1679 off++; 1680 bcopy(path, anchor, siz - off); 1681 memset(anchor + siz - off, 0, off); 1682 } 1683 if (anchor[siz - 1]) 1684 return (-1); 1685 for (i = strlen(anchor); i < siz; i++) 1686 if (anchor[i]) 1687 return (-1); 1688 return (0); 1689 } 1690 1691 int 1692 pfr_table_count(struct pfr_table *filter, int flags) 1693 { 1694 struct pf_ruleset *rs; 1695 1696 PF_RULES_ASSERT(); 1697 1698 if (flags & PFR_FLAG_ALLRSETS) 1699 return (V_pfr_ktable_cnt); 1700 if (filter->pfrt_anchor[0]) { 1701 rs = pf_find_ruleset(filter->pfrt_anchor); 1702 return ((rs != NULL) ? rs->tables : -1); 1703 } 1704 return (pf_main_ruleset.tables); 1705 } 1706 1707 static int 1708 pfr_skip_table(struct pfr_table *filter, struct pfr_ktable *kt, int flags) 1709 { 1710 if (flags & PFR_FLAG_ALLRSETS) 1711 return (0); 1712 if (strcmp(filter->pfrt_anchor, kt->pfrkt_anchor)) 1713 return (1); 1714 return (0); 1715 } 1716 1717 static void 1718 pfr_insert_ktables(struct pfr_ktableworkq *workq) 1719 { 1720 struct pfr_ktable *p; 1721 1722 SLIST_FOREACH(p, workq, pfrkt_workq) 1723 pfr_insert_ktable(p); 1724 } 1725 1726 static void 1727 pfr_insert_ktable(struct pfr_ktable *kt) 1728 { 1729 1730 PF_RULES_WASSERT(); 1731 1732 RB_INSERT(pfr_ktablehead, &V_pfr_ktables, kt); 1733 V_pfr_ktable_cnt++; 1734 if (kt->pfrkt_root != NULL) 1735 if (!kt->pfrkt_root->pfrkt_refcnt[PFR_REFCNT_ANCHOR]++) 1736 pfr_setflags_ktable(kt->pfrkt_root, 1737 kt->pfrkt_root->pfrkt_flags|PFR_TFLAG_REFDANCHOR); 1738 } 1739 1740 static void 1741 pfr_setflags_ktables(struct pfr_ktableworkq *workq) 1742 { 1743 struct pfr_ktable *p, *q; 1744 1745 for (p = SLIST_FIRST(workq); p; p = q) { 1746 q = SLIST_NEXT(p, pfrkt_workq); 1747 pfr_setflags_ktable(p, p->pfrkt_nflags); 1748 } 1749 } 1750 1751 static void 1752 pfr_setflags_ktable(struct pfr_ktable *kt, int newf) 1753 { 1754 struct pfr_kentryworkq addrq; 1755 1756 PF_RULES_WASSERT(); 1757 1758 if (!(newf & PFR_TFLAG_REFERENCED) && 1759 !(newf & PFR_TFLAG_REFDANCHOR) && 1760 !(newf & PFR_TFLAG_PERSIST)) 1761 newf &= ~PFR_TFLAG_ACTIVE; 1762 if (!(newf & PFR_TFLAG_ACTIVE)) 1763 newf &= ~PFR_TFLAG_USRMASK; 1764 if (!(newf & PFR_TFLAG_SETMASK)) { 1765 RB_REMOVE(pfr_ktablehead, &V_pfr_ktables, kt); 1766 if (kt->pfrkt_root != NULL) 1767 if (!--kt->pfrkt_root->pfrkt_refcnt[PFR_REFCNT_ANCHOR]) 1768 pfr_setflags_ktable(kt->pfrkt_root, 1769 kt->pfrkt_root->pfrkt_flags & 1770 ~PFR_TFLAG_REFDANCHOR); 1771 pfr_destroy_ktable(kt, 1); 1772 V_pfr_ktable_cnt--; 1773 return; 1774 } 1775 if (!(newf & PFR_TFLAG_ACTIVE) && kt->pfrkt_cnt) { 1776 pfr_enqueue_addrs(kt, &addrq, NULL, 0); 1777 pfr_remove_kentries(kt, &addrq); 1778 } 1779 if (!(newf & PFR_TFLAG_INACTIVE) && kt->pfrkt_shadow != NULL) { 1780 pfr_destroy_ktable(kt->pfrkt_shadow, 1); 1781 kt->pfrkt_shadow = NULL; 1782 } 1783 kt->pfrkt_flags = newf; 1784 } 1785 1786 static void 1787 pfr_clstats_ktables(struct pfr_ktableworkq *workq, long tzero, int recurse) 1788 { 1789 struct pfr_ktable *p; 1790 1791 SLIST_FOREACH(p, workq, pfrkt_workq) 1792 pfr_clstats_ktable(p, tzero, recurse); 1793 } 1794 1795 static void 1796 pfr_clstats_ktable(struct pfr_ktable *kt, long tzero, int recurse) 1797 { 1798 struct pfr_kentryworkq addrq; 1799 1800 if (recurse) { 1801 pfr_enqueue_addrs(kt, &addrq, NULL, 0); 1802 pfr_clstats_kentries(&addrq, tzero, 0); 1803 } 1804 bzero(kt->pfrkt_packets, sizeof(kt->pfrkt_packets)); 1805 bzero(kt->pfrkt_bytes, sizeof(kt->pfrkt_bytes)); 1806 kt->pfrkt_match = kt->pfrkt_nomatch = 0; 1807 kt->pfrkt_tzero = tzero; 1808 } 1809 1810 static struct pfr_ktable * 1811 pfr_create_ktable(struct pfr_table *tbl, long tzero, int attachruleset) 1812 { 1813 struct pfr_ktable *kt; 1814 struct pf_ruleset *rs; 1815 1816 PF_RULES_WASSERT(); 1817 1818 kt = malloc(sizeof(*kt), M_PFTABLE, M_NOWAIT|M_ZERO); 1819 if (kt == NULL) 1820 return (NULL); 1821 kt->pfrkt_t = *tbl; 1822 1823 if (attachruleset) { 1824 rs = pf_find_or_create_ruleset(tbl->pfrt_anchor); 1825 if (!rs) { 1826 pfr_destroy_ktable(kt, 0); 1827 return (NULL); 1828 } 1829 kt->pfrkt_rs = rs; 1830 rs->tables++; 1831 } 1832 1833 if (!rn_inithead((void **)&kt->pfrkt_ip4, 1834 offsetof(struct sockaddr_in, sin_addr) * 8) || 1835 !rn_inithead((void **)&kt->pfrkt_ip6, 1836 offsetof(struct sockaddr_in6, sin6_addr) * 8)) { 1837 pfr_destroy_ktable(kt, 0); 1838 return (NULL); 1839 } 1840 kt->pfrkt_tzero = tzero; 1841 1842 return (kt); 1843 } 1844 1845 static void 1846 pfr_destroy_ktables(struct pfr_ktableworkq *workq, int flushaddr) 1847 { 1848 struct pfr_ktable *p, *q; 1849 1850 for (p = SLIST_FIRST(workq); p; p = q) { 1851 q = SLIST_NEXT(p, pfrkt_workq); 1852 pfr_destroy_ktable(p, flushaddr); 1853 } 1854 } 1855 1856 static void 1857 pfr_destroy_ktable(struct pfr_ktable *kt, int flushaddr) 1858 { 1859 struct pfr_kentryworkq addrq; 1860 1861 if (flushaddr) { 1862 pfr_enqueue_addrs(kt, &addrq, NULL, 0); 1863 pfr_clean_node_mask(kt, &addrq); 1864 pfr_destroy_kentries(&addrq); 1865 } 1866 if (kt->pfrkt_ip4 != NULL) 1867 rn_detachhead((void **)&kt->pfrkt_ip4); 1868 if (kt->pfrkt_ip6 != NULL) 1869 rn_detachhead((void **)&kt->pfrkt_ip6); 1870 if (kt->pfrkt_shadow != NULL) 1871 pfr_destroy_ktable(kt->pfrkt_shadow, flushaddr); 1872 if (kt->pfrkt_rs != NULL) { 1873 kt->pfrkt_rs->tables--; 1874 pf_remove_if_empty_ruleset(kt->pfrkt_rs); 1875 } 1876 free(kt, M_PFTABLE); 1877 } 1878 1879 static int 1880 pfr_ktable_compare(struct pfr_ktable *p, struct pfr_ktable *q) 1881 { 1882 int d; 1883 1884 if ((d = strncmp(p->pfrkt_name, q->pfrkt_name, PF_TABLE_NAME_SIZE))) 1885 return (d); 1886 return (strcmp(p->pfrkt_anchor, q->pfrkt_anchor)); 1887 } 1888 1889 static struct pfr_ktable * 1890 pfr_lookup_table(struct pfr_table *tbl) 1891 { 1892 /* struct pfr_ktable start like a struct pfr_table */ 1893 return (RB_FIND(pfr_ktablehead, &V_pfr_ktables, 1894 (struct pfr_ktable *)tbl)); 1895 } 1896 1897 int 1898 pfr_match_addr(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af) 1899 { 1900 struct pfr_kentry *ke = NULL; 1901 int match; 1902 1903 PF_RULES_RASSERT(); 1904 1905 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL) 1906 kt = kt->pfrkt_root; 1907 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 1908 return (0); 1909 1910 switch (af) { 1911 #ifdef INET 1912 case AF_INET: 1913 { 1914 struct sockaddr_in sin; 1915 1916 bzero(&sin, sizeof(sin)); 1917 sin.sin_len = sizeof(sin); 1918 sin.sin_family = AF_INET; 1919 sin.sin_addr.s_addr = a->addr32[0]; 1920 ke = (struct pfr_kentry *)rn_match(&sin, &kt->pfrkt_ip4->rh); 1921 if (ke && KENTRY_RNF_ROOT(ke)) 1922 ke = NULL; 1923 break; 1924 } 1925 #endif /* INET */ 1926 #ifdef INET6 1927 case AF_INET6: 1928 { 1929 struct sockaddr_in6 sin6; 1930 1931 bzero(&sin6, sizeof(sin6)); 1932 sin6.sin6_len = sizeof(sin6); 1933 sin6.sin6_family = AF_INET6; 1934 bcopy(a, &sin6.sin6_addr, sizeof(sin6.sin6_addr)); 1935 ke = (struct pfr_kentry *)rn_match(&sin6, &kt->pfrkt_ip6->rh); 1936 if (ke && KENTRY_RNF_ROOT(ke)) 1937 ke = NULL; 1938 break; 1939 } 1940 #endif /* INET6 */ 1941 } 1942 match = (ke && !ke->pfrke_not); 1943 if (match) 1944 kt->pfrkt_match++; 1945 else 1946 kt->pfrkt_nomatch++; 1947 return (match); 1948 } 1949 1950 void 1951 pfr_update_stats(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af, 1952 u_int64_t len, int dir_out, int op_pass, int notrule) 1953 { 1954 struct pfr_kentry *ke = NULL; 1955 1956 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL) 1957 kt = kt->pfrkt_root; 1958 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 1959 return; 1960 1961 switch (af) { 1962 #ifdef INET 1963 case AF_INET: 1964 { 1965 struct sockaddr_in sin; 1966 1967 bzero(&sin, sizeof(sin)); 1968 sin.sin_len = sizeof(sin); 1969 sin.sin_family = AF_INET; 1970 sin.sin_addr.s_addr = a->addr32[0]; 1971 ke = (struct pfr_kentry *)rn_match(&sin, &kt->pfrkt_ip4->rh); 1972 if (ke && KENTRY_RNF_ROOT(ke)) 1973 ke = NULL; 1974 break; 1975 } 1976 #endif /* INET */ 1977 #ifdef INET6 1978 case AF_INET6: 1979 { 1980 struct sockaddr_in6 sin6; 1981 1982 bzero(&sin6, sizeof(sin6)); 1983 sin6.sin6_len = sizeof(sin6); 1984 sin6.sin6_family = AF_INET6; 1985 bcopy(a, &sin6.sin6_addr, sizeof(sin6.sin6_addr)); 1986 ke = (struct pfr_kentry *)rn_match(&sin6, &kt->pfrkt_ip6->rh); 1987 if (ke && KENTRY_RNF_ROOT(ke)) 1988 ke = NULL; 1989 break; 1990 } 1991 #endif /* INET6 */ 1992 default: 1993 panic("%s: unknown address family %u", __func__, af); 1994 } 1995 if ((ke == NULL || ke->pfrke_not) != notrule) { 1996 if (op_pass != PFR_OP_PASS) 1997 DPFPRINTF(PF_DEBUG_URGENT, 1998 ("pfr_update_stats: assertion failed.\n")); 1999 op_pass = PFR_OP_XPASS; 2000 } 2001 kt->pfrkt_packets[dir_out][op_pass]++; 2002 kt->pfrkt_bytes[dir_out][op_pass] += len; 2003 if (ke != NULL && op_pass != PFR_OP_XPASS && 2004 (kt->pfrkt_flags & PFR_TFLAG_COUNTERS)) { 2005 if (ke->pfrke_counters == NULL) 2006 ke->pfrke_counters = uma_zalloc(V_pfr_kcounters_z, 2007 M_NOWAIT | M_ZERO); 2008 if (ke->pfrke_counters != NULL) { 2009 ke->pfrke_counters->pfrkc_packets[dir_out][op_pass]++; 2010 ke->pfrke_counters->pfrkc_bytes[dir_out][op_pass] += len; 2011 } 2012 } 2013 } 2014 2015 struct pfr_ktable * 2016 pfr_attach_table(struct pf_ruleset *rs, char *name) 2017 { 2018 struct pfr_ktable *kt, *rt; 2019 struct pfr_table tbl; 2020 struct pf_anchor *ac = rs->anchor; 2021 2022 PF_RULES_WASSERT(); 2023 2024 bzero(&tbl, sizeof(tbl)); 2025 strlcpy(tbl.pfrt_name, name, sizeof(tbl.pfrt_name)); 2026 if (ac != NULL) 2027 strlcpy(tbl.pfrt_anchor, ac->path, sizeof(tbl.pfrt_anchor)); 2028 kt = pfr_lookup_table(&tbl); 2029 if (kt == NULL) { 2030 kt = pfr_create_ktable(&tbl, time_second, 1); 2031 if (kt == NULL) 2032 return (NULL); 2033 if (ac != NULL) { 2034 bzero(tbl.pfrt_anchor, sizeof(tbl.pfrt_anchor)); 2035 rt = pfr_lookup_table(&tbl); 2036 if (rt == NULL) { 2037 rt = pfr_create_ktable(&tbl, 0, 1); 2038 if (rt == NULL) { 2039 pfr_destroy_ktable(kt, 0); 2040 return (NULL); 2041 } 2042 pfr_insert_ktable(rt); 2043 } 2044 kt->pfrkt_root = rt; 2045 } 2046 pfr_insert_ktable(kt); 2047 } 2048 if (!kt->pfrkt_refcnt[PFR_REFCNT_RULE]++) 2049 pfr_setflags_ktable(kt, kt->pfrkt_flags|PFR_TFLAG_REFERENCED); 2050 return (kt); 2051 } 2052 2053 void 2054 pfr_detach_table(struct pfr_ktable *kt) 2055 { 2056 2057 PF_RULES_WASSERT(); 2058 KASSERT(kt->pfrkt_refcnt[PFR_REFCNT_RULE] > 0, ("%s: refcount %d\n", 2059 __func__, kt->pfrkt_refcnt[PFR_REFCNT_RULE])); 2060 2061 if (!--kt->pfrkt_refcnt[PFR_REFCNT_RULE]) 2062 pfr_setflags_ktable(kt, kt->pfrkt_flags&~PFR_TFLAG_REFERENCED); 2063 } 2064 2065 int 2066 pfr_pool_get(struct pfr_ktable *kt, int *pidx, struct pf_addr *counter, 2067 sa_family_t af) 2068 { 2069 struct pf_addr *addr, *cur, *mask; 2070 union sockaddr_union uaddr, umask; 2071 struct pfr_kentry *ke, *ke2 = NULL; 2072 int idx = -1, use_counter = 0; 2073 2074 switch (af) { 2075 case AF_INET: 2076 uaddr.sin.sin_len = sizeof(struct sockaddr_in); 2077 uaddr.sin.sin_family = AF_INET; 2078 break; 2079 case AF_INET6: 2080 uaddr.sin6.sin6_len = sizeof(struct sockaddr_in6); 2081 uaddr.sin6.sin6_family = AF_INET6; 2082 break; 2083 } 2084 addr = SUNION2PF(&uaddr, af); 2085 2086 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL) 2087 kt = kt->pfrkt_root; 2088 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 2089 return (-1); 2090 2091 if (pidx != NULL) 2092 idx = *pidx; 2093 if (counter != NULL && idx >= 0) 2094 use_counter = 1; 2095 if (idx < 0) 2096 idx = 0; 2097 2098 _next_block: 2099 ke = pfr_kentry_byidx(kt, idx, af); 2100 if (ke == NULL) { 2101 kt->pfrkt_nomatch++; 2102 return (1); 2103 } 2104 pfr_prepare_network(&umask, af, ke->pfrke_net); 2105 cur = SUNION2PF(&ke->pfrke_sa, af); 2106 mask = SUNION2PF(&umask, af); 2107 2108 if (use_counter) { 2109 /* is supplied address within block? */ 2110 if (!PF_MATCHA(0, cur, mask, counter, af)) { 2111 /* no, go to next block in table */ 2112 idx++; 2113 use_counter = 0; 2114 goto _next_block; 2115 } 2116 PF_ACPY(addr, counter, af); 2117 } else { 2118 /* use first address of block */ 2119 PF_ACPY(addr, cur, af); 2120 } 2121 2122 if (!KENTRY_NETWORK(ke)) { 2123 /* this is a single IP address - no possible nested block */ 2124 PF_ACPY(counter, addr, af); 2125 *pidx = idx; 2126 kt->pfrkt_match++; 2127 return (0); 2128 } 2129 for (;;) { 2130 /* we don't want to use a nested block */ 2131 switch (af) { 2132 case AF_INET: 2133 ke2 = (struct pfr_kentry *)rn_match(&uaddr, 2134 &kt->pfrkt_ip4->rh); 2135 break; 2136 case AF_INET6: 2137 ke2 = (struct pfr_kentry *)rn_match(&uaddr, 2138 &kt->pfrkt_ip6->rh); 2139 break; 2140 } 2141 /* no need to check KENTRY_RNF_ROOT() here */ 2142 if (ke2 == ke) { 2143 /* lookup return the same block - perfect */ 2144 PF_ACPY(counter, addr, af); 2145 *pidx = idx; 2146 kt->pfrkt_match++; 2147 return (0); 2148 } 2149 2150 /* we need to increase the counter past the nested block */ 2151 pfr_prepare_network(&umask, AF_INET, ke2->pfrke_net); 2152 PF_POOLMASK(addr, addr, SUNION2PF(&umask, af), &pfr_ffaddr, af); 2153 PF_AINC(addr, af); 2154 if (!PF_MATCHA(0, cur, mask, addr, af)) { 2155 /* ok, we reached the end of our main block */ 2156 /* go to next block in table */ 2157 idx++; 2158 use_counter = 0; 2159 goto _next_block; 2160 } 2161 } 2162 } 2163 2164 static struct pfr_kentry * 2165 pfr_kentry_byidx(struct pfr_ktable *kt, int idx, int af) 2166 { 2167 struct pfr_walktree w; 2168 2169 bzero(&w, sizeof(w)); 2170 w.pfrw_op = PFRW_POOL_GET; 2171 w.pfrw_cnt = idx; 2172 2173 switch (af) { 2174 #ifdef INET 2175 case AF_INET: 2176 kt->pfrkt_ip4->rnh_walktree(&kt->pfrkt_ip4->rh, pfr_walktree, &w); 2177 return (w.pfrw_kentry); 2178 #endif /* INET */ 2179 #ifdef INET6 2180 case AF_INET6: 2181 kt->pfrkt_ip6->rnh_walktree(&kt->pfrkt_ip6->rh, pfr_walktree, &w); 2182 return (w.pfrw_kentry); 2183 #endif /* INET6 */ 2184 default: 2185 return (NULL); 2186 } 2187 } 2188 2189 void 2190 pfr_dynaddr_update(struct pfr_ktable *kt, struct pfi_dynaddr *dyn) 2191 { 2192 struct pfr_walktree w; 2193 2194 bzero(&w, sizeof(w)); 2195 w.pfrw_op = PFRW_DYNADDR_UPDATE; 2196 w.pfrw_dyn = dyn; 2197 2198 dyn->pfid_acnt4 = 0; 2199 dyn->pfid_acnt6 = 0; 2200 if (!dyn->pfid_af || dyn->pfid_af == AF_INET) 2201 kt->pfrkt_ip4->rnh_walktree(&kt->pfrkt_ip4->rh, pfr_walktree, &w); 2202 if (!dyn->pfid_af || dyn->pfid_af == AF_INET6) 2203 kt->pfrkt_ip6->rnh_walktree(&kt->pfrkt_ip6->rh, pfr_walktree, &w); 2204 } 2205