1 /*- 2 * Copyright (c) 2002 Cedric Berger 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 9 * - Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * - Redistributions in binary form must reproduce the above 12 * copyright notice, this list of conditions and the following 13 * disclaimer in the documentation and/or other materials provided 14 * with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 19 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 20 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 22 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 23 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 24 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN 26 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 27 * POSSIBILITY OF SUCH DAMAGE. 28 * 29 * $OpenBSD: pf_table.c,v 1.79 2008/10/08 06:24:50 mcbride Exp $ 30 */ 31 32 #include <sys/cdefs.h> 33 __FBSDID("$FreeBSD$"); 34 35 #include "opt_inet.h" 36 #include "opt_inet6.h" 37 38 #include <sys/param.h> 39 #include <sys/kernel.h> 40 #include <sys/lock.h> 41 #include <sys/malloc.h> 42 #include <sys/mbuf.h> 43 #include <sys/mutex.h> 44 #include <sys/refcount.h> 45 #include <sys/rwlock.h> 46 #include <sys/socket.h> 47 #include <vm/uma.h> 48 49 #include <net/if.h> 50 #include <net/vnet.h> 51 #include <net/pfvar.h> 52 53 #define ACCEPT_FLAGS(flags, oklist) \ 54 do { \ 55 if ((flags & ~(oklist)) & \ 56 PFR_FLAG_ALLMASK) \ 57 return (EINVAL); \ 58 } while (0) 59 60 #define FILLIN_SIN(sin, addr) \ 61 do { \ 62 (sin).sin_len = sizeof(sin); \ 63 (sin).sin_family = AF_INET; \ 64 (sin).sin_addr = (addr); \ 65 } while (0) 66 67 #define FILLIN_SIN6(sin6, addr) \ 68 do { \ 69 (sin6).sin6_len = sizeof(sin6); \ 70 (sin6).sin6_family = AF_INET6; \ 71 (sin6).sin6_addr = (addr); \ 72 } while (0) 73 74 #define SWAP(type, a1, a2) \ 75 do { \ 76 type tmp = a1; \ 77 a1 = a2; \ 78 a2 = tmp; \ 79 } while (0) 80 81 #define SUNION2PF(su, af) (((af)==AF_INET) ? \ 82 (struct pf_addr *)&(su)->sin.sin_addr : \ 83 (struct pf_addr *)&(su)->sin6.sin6_addr) 84 85 #define AF_BITS(af) (((af)==AF_INET)?32:128) 86 #define ADDR_NETWORK(ad) ((ad)->pfra_net < AF_BITS((ad)->pfra_af)) 87 #define KENTRY_NETWORK(ke) ((ke)->pfrke_net < AF_BITS((ke)->pfrke_af)) 88 #define KENTRY_RNF_ROOT(ke) \ 89 ((((struct radix_node *)(ke))->rn_flags & RNF_ROOT) != 0) 90 91 #define NO_ADDRESSES (-1) 92 #define ENQUEUE_UNMARKED_ONLY (1) 93 #define INVERT_NEG_FLAG (1) 94 95 struct pfr_walktree { 96 enum pfrw_op { 97 PFRW_MARK, 98 PFRW_SWEEP, 99 PFRW_ENQUEUE, 100 PFRW_GET_ADDRS, 101 PFRW_GET_ASTATS, 102 PFRW_POOL_GET, 103 PFRW_DYNADDR_UPDATE 104 } pfrw_op; 105 union { 106 struct pfr_addr *pfrw1_addr; 107 struct pfr_astats *pfrw1_astats; 108 struct pfr_kentryworkq *pfrw1_workq; 109 struct pfr_kentry *pfrw1_kentry; 110 struct pfi_dynaddr *pfrw1_dyn; 111 } pfrw_1; 112 int pfrw_free; 113 }; 114 #define pfrw_addr pfrw_1.pfrw1_addr 115 #define pfrw_astats pfrw_1.pfrw1_astats 116 #define pfrw_workq pfrw_1.pfrw1_workq 117 #define pfrw_kentry pfrw_1.pfrw1_kentry 118 #define pfrw_dyn pfrw_1.pfrw1_dyn 119 #define pfrw_cnt pfrw_free 120 121 #define senderr(e) do { rv = (e); goto _bad; } while (0) 122 123 static MALLOC_DEFINE(M_PFTABLE, "pf_table", "pf(4) tables structures"); 124 static VNET_DEFINE(uma_zone_t, pfr_kentry_z); 125 #define V_pfr_kentry_z VNET(pfr_kentry_z) 126 static VNET_DEFINE(uma_zone_t, pfr_kcounters_z); 127 #define V_pfr_kcounters_z VNET(pfr_kcounters_z) 128 129 static struct pf_addr pfr_ffaddr = { 130 .addr32 = { 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff } 131 }; 132 133 static void pfr_copyout_addr(struct pfr_addr *, 134 struct pfr_kentry *ke); 135 static int pfr_validate_addr(struct pfr_addr *); 136 static void pfr_enqueue_addrs(struct pfr_ktable *, 137 struct pfr_kentryworkq *, int *, int); 138 static void pfr_mark_addrs(struct pfr_ktable *); 139 static struct pfr_kentry 140 *pfr_lookup_addr(struct pfr_ktable *, 141 struct pfr_addr *, int); 142 static struct pfr_kentry *pfr_create_kentry(struct pfr_addr *); 143 static void pfr_destroy_kentries(struct pfr_kentryworkq *); 144 static void pfr_destroy_kentry(struct pfr_kentry *); 145 static void pfr_insert_kentries(struct pfr_ktable *, 146 struct pfr_kentryworkq *, long); 147 static void pfr_remove_kentries(struct pfr_ktable *, 148 struct pfr_kentryworkq *); 149 static void pfr_clstats_kentries(struct pfr_kentryworkq *, long, 150 int); 151 static void pfr_reset_feedback(struct pfr_addr *, int); 152 static void pfr_prepare_network(union sockaddr_union *, int, int); 153 static int pfr_route_kentry(struct pfr_ktable *, 154 struct pfr_kentry *); 155 static int pfr_unroute_kentry(struct pfr_ktable *, 156 struct pfr_kentry *); 157 static int pfr_walktree(struct radix_node *, void *); 158 static int pfr_validate_table(struct pfr_table *, int, int); 159 static int pfr_fix_anchor(char *); 160 static void pfr_commit_ktable(struct pfr_ktable *, long); 161 static void pfr_insert_ktables(struct pfr_ktableworkq *); 162 static void pfr_insert_ktable(struct pfr_ktable *); 163 static void pfr_setflags_ktables(struct pfr_ktableworkq *); 164 static void pfr_setflags_ktable(struct pfr_ktable *, int); 165 static void pfr_clstats_ktables(struct pfr_ktableworkq *, long, 166 int); 167 static void pfr_clstats_ktable(struct pfr_ktable *, long, int); 168 static struct pfr_ktable 169 *pfr_create_ktable(struct pfr_table *, long, int); 170 static void pfr_destroy_ktables(struct pfr_ktableworkq *, int); 171 static void pfr_destroy_ktable(struct pfr_ktable *, int); 172 static int pfr_ktable_compare(struct pfr_ktable *, 173 struct pfr_ktable *); 174 static struct pfr_ktable 175 *pfr_lookup_table(struct pfr_table *); 176 static void pfr_clean_node_mask(struct pfr_ktable *, 177 struct pfr_kentryworkq *); 178 static int pfr_table_count(struct pfr_table *, int); 179 static int pfr_skip_table(struct pfr_table *, 180 struct pfr_ktable *, int); 181 static struct pfr_kentry 182 *pfr_kentry_byidx(struct pfr_ktable *, int, int); 183 184 static RB_PROTOTYPE(pfr_ktablehead, pfr_ktable, pfrkt_tree, pfr_ktable_compare); 185 static RB_GENERATE(pfr_ktablehead, pfr_ktable, pfrkt_tree, pfr_ktable_compare); 186 187 struct pfr_ktablehead pfr_ktables; 188 struct pfr_table pfr_nulltable; 189 int pfr_ktable_cnt; 190 191 void 192 pfr_initialize(void) 193 { 194 195 V_pfr_kentry_z = uma_zcreate("pf table entries", 196 sizeof(struct pfr_kentry), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 197 0); 198 V_pfr_kcounters_z = uma_zcreate("pf table counters", 199 sizeof(struct pfr_kcounters), NULL, NULL, NULL, NULL, 200 UMA_ALIGN_PTR, 0); 201 V_pf_limits[PF_LIMIT_TABLE_ENTRIES].zone = V_pfr_kentry_z; 202 V_pf_limits[PF_LIMIT_TABLE_ENTRIES].limit = PFR_KENTRY_HIWAT; 203 } 204 205 void 206 pfr_cleanup(void) 207 { 208 209 uma_zdestroy(V_pfr_kentry_z); 210 uma_zdestroy(V_pfr_kcounters_z); 211 } 212 213 int 214 pfr_clr_addrs(struct pfr_table *tbl, int *ndel, int flags) 215 { 216 struct pfr_ktable *kt; 217 struct pfr_kentryworkq workq; 218 219 PF_RULES_WASSERT(); 220 221 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY); 222 if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL)) 223 return (EINVAL); 224 kt = pfr_lookup_table(tbl); 225 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 226 return (ESRCH); 227 if (kt->pfrkt_flags & PFR_TFLAG_CONST) 228 return (EPERM); 229 pfr_enqueue_addrs(kt, &workq, ndel, 0); 230 231 if (!(flags & PFR_FLAG_DUMMY)) { 232 pfr_remove_kentries(kt, &workq); 233 KASSERT(kt->pfrkt_cnt == 0, ("%s: non-null pfrkt_cnt", __func__)); 234 } 235 return (0); 236 } 237 238 int 239 pfr_add_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size, 240 int *nadd, int flags) 241 { 242 struct pfr_ktable *kt, *tmpkt; 243 struct pfr_kentryworkq workq; 244 struct pfr_kentry *p, *q; 245 struct pfr_addr *ad; 246 int i, rv, xadd = 0; 247 long tzero = time_second; 248 249 PF_RULES_WASSERT(); 250 251 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_FEEDBACK); 252 if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL)) 253 return (EINVAL); 254 kt = pfr_lookup_table(tbl); 255 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 256 return (ESRCH); 257 if (kt->pfrkt_flags & PFR_TFLAG_CONST) 258 return (EPERM); 259 tmpkt = pfr_create_ktable(&pfr_nulltable, 0, 0); 260 if (tmpkt == NULL) 261 return (ENOMEM); 262 SLIST_INIT(&workq); 263 for (i = 0, ad = addr; i < size; i++, ad++) { 264 if (pfr_validate_addr(ad)) 265 senderr(EINVAL); 266 p = pfr_lookup_addr(kt, ad, 1); 267 q = pfr_lookup_addr(tmpkt, ad, 1); 268 if (flags & PFR_FLAG_FEEDBACK) { 269 if (q != NULL) 270 ad->pfra_fback = PFR_FB_DUPLICATE; 271 else if (p == NULL) 272 ad->pfra_fback = PFR_FB_ADDED; 273 else if (p->pfrke_not != ad->pfra_not) 274 ad->pfra_fback = PFR_FB_CONFLICT; 275 else 276 ad->pfra_fback = PFR_FB_NONE; 277 } 278 if (p == NULL && q == NULL) { 279 p = pfr_create_kentry(ad); 280 if (p == NULL) 281 senderr(ENOMEM); 282 if (pfr_route_kentry(tmpkt, p)) { 283 pfr_destroy_kentry(p); 284 ad->pfra_fback = PFR_FB_NONE; 285 } else { 286 SLIST_INSERT_HEAD(&workq, p, pfrke_workq); 287 xadd++; 288 } 289 } 290 } 291 pfr_clean_node_mask(tmpkt, &workq); 292 if (!(flags & PFR_FLAG_DUMMY)) 293 pfr_insert_kentries(kt, &workq, tzero); 294 else 295 pfr_destroy_kentries(&workq); 296 if (nadd != NULL) 297 *nadd = xadd; 298 pfr_destroy_ktable(tmpkt, 0); 299 return (0); 300 _bad: 301 pfr_clean_node_mask(tmpkt, &workq); 302 pfr_destroy_kentries(&workq); 303 if (flags & PFR_FLAG_FEEDBACK) 304 pfr_reset_feedback(addr, size); 305 pfr_destroy_ktable(tmpkt, 0); 306 return (rv); 307 } 308 309 int 310 pfr_del_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size, 311 int *ndel, int flags) 312 { 313 struct pfr_ktable *kt; 314 struct pfr_kentryworkq workq; 315 struct pfr_kentry *p; 316 struct pfr_addr *ad; 317 int i, rv, xdel = 0, log = 1; 318 319 PF_RULES_WASSERT(); 320 321 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_FEEDBACK); 322 if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL)) 323 return (EINVAL); 324 kt = pfr_lookup_table(tbl); 325 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 326 return (ESRCH); 327 if (kt->pfrkt_flags & PFR_TFLAG_CONST) 328 return (EPERM); 329 /* 330 * there are two algorithms to choose from here. 331 * with: 332 * n: number of addresses to delete 333 * N: number of addresses in the table 334 * 335 * one is O(N) and is better for large 'n' 336 * one is O(n*LOG(N)) and is better for small 'n' 337 * 338 * following code try to decide which one is best. 339 */ 340 for (i = kt->pfrkt_cnt; i > 0; i >>= 1) 341 log++; 342 if (size > kt->pfrkt_cnt/log) { 343 /* full table scan */ 344 pfr_mark_addrs(kt); 345 } else { 346 /* iterate over addresses to delete */ 347 for (i = 0, ad = addr; i < size; i++, ad++) { 348 if (pfr_validate_addr(ad)) 349 return (EINVAL); 350 p = pfr_lookup_addr(kt, ad, 1); 351 if (p != NULL) 352 p->pfrke_mark = 0; 353 } 354 } 355 SLIST_INIT(&workq); 356 for (i = 0, ad = addr; i < size; i++, ad++) { 357 if (pfr_validate_addr(ad)) 358 senderr(EINVAL); 359 p = pfr_lookup_addr(kt, ad, 1); 360 if (flags & PFR_FLAG_FEEDBACK) { 361 if (p == NULL) 362 ad->pfra_fback = PFR_FB_NONE; 363 else if (p->pfrke_not != ad->pfra_not) 364 ad->pfra_fback = PFR_FB_CONFLICT; 365 else if (p->pfrke_mark) 366 ad->pfra_fback = PFR_FB_DUPLICATE; 367 else 368 ad->pfra_fback = PFR_FB_DELETED; 369 } 370 if (p != NULL && p->pfrke_not == ad->pfra_not && 371 !p->pfrke_mark) { 372 p->pfrke_mark = 1; 373 SLIST_INSERT_HEAD(&workq, p, pfrke_workq); 374 xdel++; 375 } 376 } 377 if (!(flags & PFR_FLAG_DUMMY)) 378 pfr_remove_kentries(kt, &workq); 379 if (ndel != NULL) 380 *ndel = xdel; 381 return (0); 382 _bad: 383 if (flags & PFR_FLAG_FEEDBACK) 384 pfr_reset_feedback(addr, size); 385 return (rv); 386 } 387 388 int 389 pfr_set_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size, 390 int *size2, int *nadd, int *ndel, int *nchange, int flags, 391 u_int32_t ignore_pfrt_flags) 392 { 393 struct pfr_ktable *kt, *tmpkt; 394 struct pfr_kentryworkq addq, delq, changeq; 395 struct pfr_kentry *p, *q; 396 struct pfr_addr ad; 397 int i, rv, xadd = 0, xdel = 0, xchange = 0; 398 long tzero = time_second; 399 400 PF_RULES_WASSERT(); 401 402 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_FEEDBACK); 403 if (pfr_validate_table(tbl, ignore_pfrt_flags, flags & 404 PFR_FLAG_USERIOCTL)) 405 return (EINVAL); 406 kt = pfr_lookup_table(tbl); 407 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 408 return (ESRCH); 409 if (kt->pfrkt_flags & PFR_TFLAG_CONST) 410 return (EPERM); 411 tmpkt = pfr_create_ktable(&pfr_nulltable, 0, 0); 412 if (tmpkt == NULL) 413 return (ENOMEM); 414 pfr_mark_addrs(kt); 415 SLIST_INIT(&addq); 416 SLIST_INIT(&delq); 417 SLIST_INIT(&changeq); 418 for (i = 0; i < size; i++) { 419 /* 420 * XXXGL: undertand pf_if usage of this function 421 * and make ad a moving pointer 422 */ 423 bcopy(addr + i, &ad, sizeof(ad)); 424 if (pfr_validate_addr(&ad)) 425 senderr(EINVAL); 426 ad.pfra_fback = PFR_FB_NONE; 427 p = pfr_lookup_addr(kt, &ad, 1); 428 if (p != NULL) { 429 if (p->pfrke_mark) { 430 ad.pfra_fback = PFR_FB_DUPLICATE; 431 goto _skip; 432 } 433 p->pfrke_mark = 1; 434 if (p->pfrke_not != ad.pfra_not) { 435 SLIST_INSERT_HEAD(&changeq, p, pfrke_workq); 436 ad.pfra_fback = PFR_FB_CHANGED; 437 xchange++; 438 } 439 } else { 440 q = pfr_lookup_addr(tmpkt, &ad, 1); 441 if (q != NULL) { 442 ad.pfra_fback = PFR_FB_DUPLICATE; 443 goto _skip; 444 } 445 p = pfr_create_kentry(&ad); 446 if (p == NULL) 447 senderr(ENOMEM); 448 if (pfr_route_kentry(tmpkt, p)) { 449 pfr_destroy_kentry(p); 450 ad.pfra_fback = PFR_FB_NONE; 451 } else { 452 SLIST_INSERT_HEAD(&addq, p, pfrke_workq); 453 ad.pfra_fback = PFR_FB_ADDED; 454 xadd++; 455 } 456 } 457 _skip: 458 if (flags & PFR_FLAG_FEEDBACK) 459 bcopy(&ad, addr + i, sizeof(ad)); 460 } 461 pfr_enqueue_addrs(kt, &delq, &xdel, ENQUEUE_UNMARKED_ONLY); 462 if ((flags & PFR_FLAG_FEEDBACK) && *size2) { 463 if (*size2 < size+xdel) { 464 *size2 = size+xdel; 465 senderr(0); 466 } 467 i = 0; 468 SLIST_FOREACH(p, &delq, pfrke_workq) { 469 pfr_copyout_addr(&ad, p); 470 ad.pfra_fback = PFR_FB_DELETED; 471 bcopy(&ad, addr + size + i, sizeof(ad)); 472 i++; 473 } 474 } 475 pfr_clean_node_mask(tmpkt, &addq); 476 if (!(flags & PFR_FLAG_DUMMY)) { 477 pfr_insert_kentries(kt, &addq, tzero); 478 pfr_remove_kentries(kt, &delq); 479 pfr_clstats_kentries(&changeq, tzero, INVERT_NEG_FLAG); 480 } else 481 pfr_destroy_kentries(&addq); 482 if (nadd != NULL) 483 *nadd = xadd; 484 if (ndel != NULL) 485 *ndel = xdel; 486 if (nchange != NULL) 487 *nchange = xchange; 488 if ((flags & PFR_FLAG_FEEDBACK) && size2) 489 *size2 = size+xdel; 490 pfr_destroy_ktable(tmpkt, 0); 491 return (0); 492 _bad: 493 pfr_clean_node_mask(tmpkt, &addq); 494 pfr_destroy_kentries(&addq); 495 if (flags & PFR_FLAG_FEEDBACK) 496 pfr_reset_feedback(addr, size); 497 pfr_destroy_ktable(tmpkt, 0); 498 return (rv); 499 } 500 501 int 502 pfr_tst_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size, 503 int *nmatch, int flags) 504 { 505 struct pfr_ktable *kt; 506 struct pfr_kentry *p; 507 struct pfr_addr *ad; 508 int i, xmatch = 0; 509 510 PF_RULES_RASSERT(); 511 512 ACCEPT_FLAGS(flags, PFR_FLAG_REPLACE); 513 if (pfr_validate_table(tbl, 0, 0)) 514 return (EINVAL); 515 kt = pfr_lookup_table(tbl); 516 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 517 return (ESRCH); 518 519 for (i = 0, ad = addr; i < size; i++, ad++) { 520 if (pfr_validate_addr(ad)) 521 return (EINVAL); 522 if (ADDR_NETWORK(ad)) 523 return (EINVAL); 524 p = pfr_lookup_addr(kt, ad, 0); 525 if (flags & PFR_FLAG_REPLACE) 526 pfr_copyout_addr(ad, p); 527 ad->pfra_fback = (p == NULL) ? PFR_FB_NONE : 528 (p->pfrke_not ? PFR_FB_NOTMATCH : PFR_FB_MATCH); 529 if (p != NULL && !p->pfrke_not) 530 xmatch++; 531 } 532 if (nmatch != NULL) 533 *nmatch = xmatch; 534 return (0); 535 } 536 537 int 538 pfr_get_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int *size, 539 int flags) 540 { 541 struct pfr_ktable *kt; 542 struct pfr_walktree w; 543 int rv; 544 545 PF_RULES_RASSERT(); 546 547 ACCEPT_FLAGS(flags, 0); 548 if (pfr_validate_table(tbl, 0, 0)) 549 return (EINVAL); 550 kt = pfr_lookup_table(tbl); 551 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 552 return (ESRCH); 553 if (kt->pfrkt_cnt > *size) { 554 *size = kt->pfrkt_cnt; 555 return (0); 556 } 557 558 bzero(&w, sizeof(w)); 559 w.pfrw_op = PFRW_GET_ADDRS; 560 w.pfrw_addr = addr; 561 w.pfrw_free = kt->pfrkt_cnt; 562 rv = kt->pfrkt_ip4->rnh_walktree(&kt->pfrkt_ip4->rh, pfr_walktree, &w); 563 if (!rv) 564 rv = kt->pfrkt_ip6->rnh_walktree(&kt->pfrkt_ip6->rh, 565 pfr_walktree, &w); 566 if (rv) 567 return (rv); 568 569 KASSERT(w.pfrw_free == 0, ("%s: corruption detected (%d)", __func__, 570 w.pfrw_free)); 571 572 *size = kt->pfrkt_cnt; 573 return (0); 574 } 575 576 int 577 pfr_get_astats(struct pfr_table *tbl, struct pfr_astats *addr, int *size, 578 int flags) 579 { 580 struct pfr_ktable *kt; 581 struct pfr_walktree w; 582 struct pfr_kentryworkq workq; 583 int rv; 584 long tzero = time_second; 585 586 PF_RULES_RASSERT(); 587 588 /* XXX PFR_FLAG_CLSTATS disabled */ 589 ACCEPT_FLAGS(flags, 0); 590 if (pfr_validate_table(tbl, 0, 0)) 591 return (EINVAL); 592 kt = pfr_lookup_table(tbl); 593 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 594 return (ESRCH); 595 if (kt->pfrkt_cnt > *size) { 596 *size = kt->pfrkt_cnt; 597 return (0); 598 } 599 600 bzero(&w, sizeof(w)); 601 w.pfrw_op = PFRW_GET_ASTATS; 602 w.pfrw_astats = addr; 603 w.pfrw_free = kt->pfrkt_cnt; 604 rv = kt->pfrkt_ip4->rnh_walktree(&kt->pfrkt_ip4->rh, pfr_walktree, &w); 605 if (!rv) 606 rv = kt->pfrkt_ip6->rnh_walktree(&kt->pfrkt_ip6->rh, 607 pfr_walktree, &w); 608 if (!rv && (flags & PFR_FLAG_CLSTATS)) { 609 pfr_enqueue_addrs(kt, &workq, NULL, 0); 610 pfr_clstats_kentries(&workq, tzero, 0); 611 } 612 if (rv) 613 return (rv); 614 615 if (w.pfrw_free) { 616 printf("pfr_get_astats: corruption detected (%d).\n", 617 w.pfrw_free); 618 return (ENOTTY); 619 } 620 *size = kt->pfrkt_cnt; 621 return (0); 622 } 623 624 int 625 pfr_clr_astats(struct pfr_table *tbl, struct pfr_addr *addr, int size, 626 int *nzero, int flags) 627 { 628 struct pfr_ktable *kt; 629 struct pfr_kentryworkq workq; 630 struct pfr_kentry *p; 631 struct pfr_addr *ad; 632 int i, rv, xzero = 0; 633 634 PF_RULES_WASSERT(); 635 636 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_FEEDBACK); 637 if (pfr_validate_table(tbl, 0, 0)) 638 return (EINVAL); 639 kt = pfr_lookup_table(tbl); 640 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 641 return (ESRCH); 642 SLIST_INIT(&workq); 643 for (i = 0, ad = addr; i < size; i++, ad++) { 644 if (pfr_validate_addr(ad)) 645 senderr(EINVAL); 646 p = pfr_lookup_addr(kt, ad, 1); 647 if (flags & PFR_FLAG_FEEDBACK) { 648 ad->pfra_fback = (p != NULL) ? 649 PFR_FB_CLEARED : PFR_FB_NONE; 650 } 651 if (p != NULL) { 652 SLIST_INSERT_HEAD(&workq, p, pfrke_workq); 653 xzero++; 654 } 655 } 656 657 if (!(flags & PFR_FLAG_DUMMY)) 658 pfr_clstats_kentries(&workq, 0, 0); 659 if (nzero != NULL) 660 *nzero = xzero; 661 return (0); 662 _bad: 663 if (flags & PFR_FLAG_FEEDBACK) 664 pfr_reset_feedback(addr, size); 665 return (rv); 666 } 667 668 static int 669 pfr_validate_addr(struct pfr_addr *ad) 670 { 671 int i; 672 673 switch (ad->pfra_af) { 674 #ifdef INET 675 case AF_INET: 676 if (ad->pfra_net > 32) 677 return (-1); 678 break; 679 #endif /* INET */ 680 #ifdef INET6 681 case AF_INET6: 682 if (ad->pfra_net > 128) 683 return (-1); 684 break; 685 #endif /* INET6 */ 686 default: 687 return (-1); 688 } 689 if (ad->pfra_net < 128 && 690 (((caddr_t)ad)[ad->pfra_net/8] & (0xFF >> (ad->pfra_net%8)))) 691 return (-1); 692 for (i = (ad->pfra_net+7)/8; i < sizeof(ad->pfra_u); i++) 693 if (((caddr_t)ad)[i]) 694 return (-1); 695 if (ad->pfra_not && ad->pfra_not != 1) 696 return (-1); 697 if (ad->pfra_fback) 698 return (-1); 699 return (0); 700 } 701 702 static void 703 pfr_enqueue_addrs(struct pfr_ktable *kt, struct pfr_kentryworkq *workq, 704 int *naddr, int sweep) 705 { 706 struct pfr_walktree w; 707 708 SLIST_INIT(workq); 709 bzero(&w, sizeof(w)); 710 w.pfrw_op = sweep ? PFRW_SWEEP : PFRW_ENQUEUE; 711 w.pfrw_workq = workq; 712 if (kt->pfrkt_ip4 != NULL) 713 if (kt->pfrkt_ip4->rnh_walktree(&kt->pfrkt_ip4->rh, 714 pfr_walktree, &w)) 715 printf("pfr_enqueue_addrs: IPv4 walktree failed.\n"); 716 if (kt->pfrkt_ip6 != NULL) 717 if (kt->pfrkt_ip6->rnh_walktree(&kt->pfrkt_ip6->rh, 718 pfr_walktree, &w)) 719 printf("pfr_enqueue_addrs: IPv6 walktree failed.\n"); 720 if (naddr != NULL) 721 *naddr = w.pfrw_cnt; 722 } 723 724 static void 725 pfr_mark_addrs(struct pfr_ktable *kt) 726 { 727 struct pfr_walktree w; 728 729 bzero(&w, sizeof(w)); 730 w.pfrw_op = PFRW_MARK; 731 if (kt->pfrkt_ip4->rnh_walktree(&kt->pfrkt_ip4->rh, pfr_walktree, &w)) 732 printf("pfr_mark_addrs: IPv4 walktree failed.\n"); 733 if (kt->pfrkt_ip6->rnh_walktree(&kt->pfrkt_ip6->rh, pfr_walktree, &w)) 734 printf("pfr_mark_addrs: IPv6 walktree failed.\n"); 735 } 736 737 738 static struct pfr_kentry * 739 pfr_lookup_addr(struct pfr_ktable *kt, struct pfr_addr *ad, int exact) 740 { 741 union sockaddr_union sa, mask; 742 struct radix_head *head = NULL; 743 struct pfr_kentry *ke; 744 745 PF_RULES_ASSERT(); 746 747 bzero(&sa, sizeof(sa)); 748 if (ad->pfra_af == AF_INET) { 749 FILLIN_SIN(sa.sin, ad->pfra_ip4addr); 750 head = &kt->pfrkt_ip4->rh; 751 } else if ( ad->pfra_af == AF_INET6 ) { 752 FILLIN_SIN6(sa.sin6, ad->pfra_ip6addr); 753 head = &kt->pfrkt_ip6->rh; 754 } 755 if (ADDR_NETWORK(ad)) { 756 pfr_prepare_network(&mask, ad->pfra_af, ad->pfra_net); 757 ke = (struct pfr_kentry *)rn_lookup(&sa, &mask, head); 758 if (ke && KENTRY_RNF_ROOT(ke)) 759 ke = NULL; 760 } else { 761 ke = (struct pfr_kentry *)rn_match(&sa, head); 762 if (ke && KENTRY_RNF_ROOT(ke)) 763 ke = NULL; 764 if (exact && ke && KENTRY_NETWORK(ke)) 765 ke = NULL; 766 } 767 return (ke); 768 } 769 770 static struct pfr_kentry * 771 pfr_create_kentry(struct pfr_addr *ad) 772 { 773 struct pfr_kentry *ke; 774 775 ke = uma_zalloc(V_pfr_kentry_z, M_NOWAIT | M_ZERO); 776 if (ke == NULL) 777 return (NULL); 778 779 if (ad->pfra_af == AF_INET) 780 FILLIN_SIN(ke->pfrke_sa.sin, ad->pfra_ip4addr); 781 else if (ad->pfra_af == AF_INET6) 782 FILLIN_SIN6(ke->pfrke_sa.sin6, ad->pfra_ip6addr); 783 ke->pfrke_af = ad->pfra_af; 784 ke->pfrke_net = ad->pfra_net; 785 ke->pfrke_not = ad->pfra_not; 786 return (ke); 787 } 788 789 static void 790 pfr_destroy_kentries(struct pfr_kentryworkq *workq) 791 { 792 struct pfr_kentry *p, *q; 793 794 for (p = SLIST_FIRST(workq); p != NULL; p = q) { 795 q = SLIST_NEXT(p, pfrke_workq); 796 pfr_destroy_kentry(p); 797 } 798 } 799 800 static void 801 pfr_destroy_kentry(struct pfr_kentry *ke) 802 { 803 if (ke->pfrke_counters) 804 uma_zfree(V_pfr_kcounters_z, ke->pfrke_counters); 805 uma_zfree(V_pfr_kentry_z, ke); 806 } 807 808 static void 809 pfr_insert_kentries(struct pfr_ktable *kt, 810 struct pfr_kentryworkq *workq, long tzero) 811 { 812 struct pfr_kentry *p; 813 int rv, n = 0; 814 815 SLIST_FOREACH(p, workq, pfrke_workq) { 816 rv = pfr_route_kentry(kt, p); 817 if (rv) { 818 printf("pfr_insert_kentries: cannot route entry " 819 "(code=%d).\n", rv); 820 break; 821 } 822 p->pfrke_tzero = tzero; 823 n++; 824 } 825 kt->pfrkt_cnt += n; 826 } 827 828 int 829 pfr_insert_kentry(struct pfr_ktable *kt, struct pfr_addr *ad, long tzero) 830 { 831 struct pfr_kentry *p; 832 int rv; 833 834 p = pfr_lookup_addr(kt, ad, 1); 835 if (p != NULL) 836 return (0); 837 p = pfr_create_kentry(ad); 838 if (p == NULL) 839 return (ENOMEM); 840 841 rv = pfr_route_kentry(kt, p); 842 if (rv) 843 return (rv); 844 845 p->pfrke_tzero = tzero; 846 kt->pfrkt_cnt++; 847 848 return (0); 849 } 850 851 static void 852 pfr_remove_kentries(struct pfr_ktable *kt, 853 struct pfr_kentryworkq *workq) 854 { 855 struct pfr_kentry *p; 856 int n = 0; 857 858 SLIST_FOREACH(p, workq, pfrke_workq) { 859 pfr_unroute_kentry(kt, p); 860 n++; 861 } 862 kt->pfrkt_cnt -= n; 863 pfr_destroy_kentries(workq); 864 } 865 866 static void 867 pfr_clean_node_mask(struct pfr_ktable *kt, 868 struct pfr_kentryworkq *workq) 869 { 870 struct pfr_kentry *p; 871 872 SLIST_FOREACH(p, workq, pfrke_workq) 873 pfr_unroute_kentry(kt, p); 874 } 875 876 static void 877 pfr_clstats_kentries(struct pfr_kentryworkq *workq, long tzero, int negchange) 878 { 879 struct pfr_kentry *p; 880 881 SLIST_FOREACH(p, workq, pfrke_workq) { 882 if (negchange) 883 p->pfrke_not = !p->pfrke_not; 884 if (p->pfrke_counters) { 885 uma_zfree(V_pfr_kcounters_z, p->pfrke_counters); 886 p->pfrke_counters = NULL; 887 } 888 p->pfrke_tzero = tzero; 889 } 890 } 891 892 static void 893 pfr_reset_feedback(struct pfr_addr *addr, int size) 894 { 895 struct pfr_addr *ad; 896 int i; 897 898 for (i = 0, ad = addr; i < size; i++, ad++) 899 ad->pfra_fback = PFR_FB_NONE; 900 } 901 902 static void 903 pfr_prepare_network(union sockaddr_union *sa, int af, int net) 904 { 905 int i; 906 907 bzero(sa, sizeof(*sa)); 908 if (af == AF_INET) { 909 sa->sin.sin_len = sizeof(sa->sin); 910 sa->sin.sin_family = AF_INET; 911 sa->sin.sin_addr.s_addr = net ? htonl(-1 << (32-net)) : 0; 912 } else if (af == AF_INET6) { 913 sa->sin6.sin6_len = sizeof(sa->sin6); 914 sa->sin6.sin6_family = AF_INET6; 915 for (i = 0; i < 4; i++) { 916 if (net <= 32) { 917 sa->sin6.sin6_addr.s6_addr32[i] = 918 net ? htonl(-1 << (32-net)) : 0; 919 break; 920 } 921 sa->sin6.sin6_addr.s6_addr32[i] = 0xFFFFFFFF; 922 net -= 32; 923 } 924 } 925 } 926 927 static int 928 pfr_route_kentry(struct pfr_ktable *kt, struct pfr_kentry *ke) 929 { 930 union sockaddr_union mask; 931 struct radix_node *rn; 932 struct radix_head *head = NULL; 933 934 PF_RULES_WASSERT(); 935 936 bzero(ke->pfrke_node, sizeof(ke->pfrke_node)); 937 if (ke->pfrke_af == AF_INET) 938 head = &kt->pfrkt_ip4->rh; 939 else if (ke->pfrke_af == AF_INET6) 940 head = &kt->pfrkt_ip6->rh; 941 942 if (KENTRY_NETWORK(ke)) { 943 pfr_prepare_network(&mask, ke->pfrke_af, ke->pfrke_net); 944 rn = rn_addroute(&ke->pfrke_sa, &mask, head, ke->pfrke_node); 945 } else 946 rn = rn_addroute(&ke->pfrke_sa, NULL, head, ke->pfrke_node); 947 948 return (rn == NULL ? -1 : 0); 949 } 950 951 static int 952 pfr_unroute_kentry(struct pfr_ktable *kt, struct pfr_kentry *ke) 953 { 954 union sockaddr_union mask; 955 struct radix_node *rn; 956 struct radix_head *head = NULL; 957 958 if (ke->pfrke_af == AF_INET) 959 head = &kt->pfrkt_ip4->rh; 960 else if (ke->pfrke_af == AF_INET6) 961 head = &kt->pfrkt_ip6->rh; 962 963 if (KENTRY_NETWORK(ke)) { 964 pfr_prepare_network(&mask, ke->pfrke_af, ke->pfrke_net); 965 rn = rn_delete(&ke->pfrke_sa, &mask, head); 966 } else 967 rn = rn_delete(&ke->pfrke_sa, NULL, head); 968 969 if (rn == NULL) { 970 printf("pfr_unroute_kentry: delete failed.\n"); 971 return (-1); 972 } 973 return (0); 974 } 975 976 static void 977 pfr_copyout_addr(struct pfr_addr *ad, struct pfr_kentry *ke) 978 { 979 bzero(ad, sizeof(*ad)); 980 if (ke == NULL) 981 return; 982 ad->pfra_af = ke->pfrke_af; 983 ad->pfra_net = ke->pfrke_net; 984 ad->pfra_not = ke->pfrke_not; 985 if (ad->pfra_af == AF_INET) 986 ad->pfra_ip4addr = ke->pfrke_sa.sin.sin_addr; 987 else if (ad->pfra_af == AF_INET6) 988 ad->pfra_ip6addr = ke->pfrke_sa.sin6.sin6_addr; 989 } 990 991 static int 992 pfr_walktree(struct radix_node *rn, void *arg) 993 { 994 struct pfr_kentry *ke = (struct pfr_kentry *)rn; 995 struct pfr_walktree *w = arg; 996 997 switch (w->pfrw_op) { 998 case PFRW_MARK: 999 ke->pfrke_mark = 0; 1000 break; 1001 case PFRW_SWEEP: 1002 if (ke->pfrke_mark) 1003 break; 1004 /* FALLTHROUGH */ 1005 case PFRW_ENQUEUE: 1006 SLIST_INSERT_HEAD(w->pfrw_workq, ke, pfrke_workq); 1007 w->pfrw_cnt++; 1008 break; 1009 case PFRW_GET_ADDRS: 1010 if (w->pfrw_free-- > 0) { 1011 pfr_copyout_addr(w->pfrw_addr, ke); 1012 w->pfrw_addr++; 1013 } 1014 break; 1015 case PFRW_GET_ASTATS: 1016 if (w->pfrw_free-- > 0) { 1017 struct pfr_astats as; 1018 1019 pfr_copyout_addr(&as.pfras_a, ke); 1020 1021 if (ke->pfrke_counters) { 1022 bcopy(ke->pfrke_counters->pfrkc_packets, 1023 as.pfras_packets, sizeof(as.pfras_packets)); 1024 bcopy(ke->pfrke_counters->pfrkc_bytes, 1025 as.pfras_bytes, sizeof(as.pfras_bytes)); 1026 } else { 1027 bzero(as.pfras_packets, sizeof(as.pfras_packets)); 1028 bzero(as.pfras_bytes, sizeof(as.pfras_bytes)); 1029 as.pfras_a.pfra_fback = PFR_FB_NOCOUNT; 1030 } 1031 as.pfras_tzero = ke->pfrke_tzero; 1032 1033 bcopy(&as, w->pfrw_astats, sizeof(as)); 1034 w->pfrw_astats++; 1035 } 1036 break; 1037 case PFRW_POOL_GET: 1038 if (ke->pfrke_not) 1039 break; /* negative entries are ignored */ 1040 if (!w->pfrw_cnt--) { 1041 w->pfrw_kentry = ke; 1042 return (1); /* finish search */ 1043 } 1044 break; 1045 case PFRW_DYNADDR_UPDATE: 1046 { 1047 union sockaddr_union pfr_mask; 1048 1049 if (ke->pfrke_af == AF_INET) { 1050 if (w->pfrw_dyn->pfid_acnt4++ > 0) 1051 break; 1052 pfr_prepare_network(&pfr_mask, AF_INET, ke->pfrke_net); 1053 w->pfrw_dyn->pfid_addr4 = *SUNION2PF(&ke->pfrke_sa, 1054 AF_INET); 1055 w->pfrw_dyn->pfid_mask4 = *SUNION2PF(&pfr_mask, 1056 AF_INET); 1057 } else if (ke->pfrke_af == AF_INET6){ 1058 if (w->pfrw_dyn->pfid_acnt6++ > 0) 1059 break; 1060 pfr_prepare_network(&pfr_mask, AF_INET6, ke->pfrke_net); 1061 w->pfrw_dyn->pfid_addr6 = *SUNION2PF(&ke->pfrke_sa, 1062 AF_INET6); 1063 w->pfrw_dyn->pfid_mask6 = *SUNION2PF(&pfr_mask, 1064 AF_INET6); 1065 } 1066 break; 1067 } 1068 } 1069 return (0); 1070 } 1071 1072 int 1073 pfr_clr_tables(struct pfr_table *filter, int *ndel, int flags) 1074 { 1075 struct pfr_ktableworkq workq; 1076 struct pfr_ktable *p; 1077 int xdel = 0; 1078 1079 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_ALLRSETS); 1080 if (pfr_fix_anchor(filter->pfrt_anchor)) 1081 return (EINVAL); 1082 if (pfr_table_count(filter, flags) < 0) 1083 return (ENOENT); 1084 1085 SLIST_INIT(&workq); 1086 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) { 1087 if (pfr_skip_table(filter, p, flags)) 1088 continue; 1089 if (!strcmp(p->pfrkt_anchor, PF_RESERVED_ANCHOR)) 1090 continue; 1091 if (!(p->pfrkt_flags & PFR_TFLAG_ACTIVE)) 1092 continue; 1093 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_ACTIVE; 1094 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq); 1095 xdel++; 1096 } 1097 if (!(flags & PFR_FLAG_DUMMY)) 1098 pfr_setflags_ktables(&workq); 1099 if (ndel != NULL) 1100 *ndel = xdel; 1101 return (0); 1102 } 1103 1104 int 1105 pfr_add_tables(struct pfr_table *tbl, int size, int *nadd, int flags) 1106 { 1107 struct pfr_ktableworkq addq, changeq; 1108 struct pfr_ktable *p, *q, *r, key; 1109 int i, rv, xadd = 0; 1110 long tzero = time_second; 1111 1112 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY); 1113 SLIST_INIT(&addq); 1114 SLIST_INIT(&changeq); 1115 for (i = 0; i < size; i++) { 1116 bcopy(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t)); 1117 if (pfr_validate_table(&key.pfrkt_t, PFR_TFLAG_USRMASK, 1118 flags & PFR_FLAG_USERIOCTL)) 1119 senderr(EINVAL); 1120 key.pfrkt_flags |= PFR_TFLAG_ACTIVE; 1121 p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key); 1122 if (p == NULL) { 1123 p = pfr_create_ktable(&key.pfrkt_t, tzero, 1); 1124 if (p == NULL) 1125 senderr(ENOMEM); 1126 SLIST_FOREACH(q, &addq, pfrkt_workq) { 1127 if (!pfr_ktable_compare(p, q)) 1128 goto _skip; 1129 } 1130 SLIST_INSERT_HEAD(&addq, p, pfrkt_workq); 1131 xadd++; 1132 if (!key.pfrkt_anchor[0]) 1133 goto _skip; 1134 1135 /* find or create root table */ 1136 bzero(key.pfrkt_anchor, sizeof(key.pfrkt_anchor)); 1137 r = RB_FIND(pfr_ktablehead, &pfr_ktables, &key); 1138 if (r != NULL) { 1139 p->pfrkt_root = r; 1140 goto _skip; 1141 } 1142 SLIST_FOREACH(q, &addq, pfrkt_workq) { 1143 if (!pfr_ktable_compare(&key, q)) { 1144 p->pfrkt_root = q; 1145 goto _skip; 1146 } 1147 } 1148 key.pfrkt_flags = 0; 1149 r = pfr_create_ktable(&key.pfrkt_t, 0, 1); 1150 if (r == NULL) 1151 senderr(ENOMEM); 1152 SLIST_INSERT_HEAD(&addq, r, pfrkt_workq); 1153 p->pfrkt_root = r; 1154 } else if (!(p->pfrkt_flags & PFR_TFLAG_ACTIVE)) { 1155 SLIST_FOREACH(q, &changeq, pfrkt_workq) 1156 if (!pfr_ktable_compare(&key, q)) 1157 goto _skip; 1158 p->pfrkt_nflags = (p->pfrkt_flags & 1159 ~PFR_TFLAG_USRMASK) | key.pfrkt_flags; 1160 SLIST_INSERT_HEAD(&changeq, p, pfrkt_workq); 1161 xadd++; 1162 } 1163 _skip: 1164 ; 1165 } 1166 if (!(flags & PFR_FLAG_DUMMY)) { 1167 pfr_insert_ktables(&addq); 1168 pfr_setflags_ktables(&changeq); 1169 } else 1170 pfr_destroy_ktables(&addq, 0); 1171 if (nadd != NULL) 1172 *nadd = xadd; 1173 return (0); 1174 _bad: 1175 pfr_destroy_ktables(&addq, 0); 1176 return (rv); 1177 } 1178 1179 int 1180 pfr_del_tables(struct pfr_table *tbl, int size, int *ndel, int flags) 1181 { 1182 struct pfr_ktableworkq workq; 1183 struct pfr_ktable *p, *q, key; 1184 int i, xdel = 0; 1185 1186 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY); 1187 SLIST_INIT(&workq); 1188 for (i = 0; i < size; i++) { 1189 bcopy(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t)); 1190 if (pfr_validate_table(&key.pfrkt_t, 0, 1191 flags & PFR_FLAG_USERIOCTL)) 1192 return (EINVAL); 1193 p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key); 1194 if (p != NULL && (p->pfrkt_flags & PFR_TFLAG_ACTIVE)) { 1195 SLIST_FOREACH(q, &workq, pfrkt_workq) 1196 if (!pfr_ktable_compare(p, q)) 1197 goto _skip; 1198 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_ACTIVE; 1199 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq); 1200 xdel++; 1201 } 1202 _skip: 1203 ; 1204 } 1205 1206 if (!(flags & PFR_FLAG_DUMMY)) 1207 pfr_setflags_ktables(&workq); 1208 if (ndel != NULL) 1209 *ndel = xdel; 1210 return (0); 1211 } 1212 1213 int 1214 pfr_get_tables(struct pfr_table *filter, struct pfr_table *tbl, int *size, 1215 int flags) 1216 { 1217 struct pfr_ktable *p; 1218 int n, nn; 1219 1220 PF_RULES_RASSERT(); 1221 1222 ACCEPT_FLAGS(flags, PFR_FLAG_ALLRSETS); 1223 if (pfr_fix_anchor(filter->pfrt_anchor)) 1224 return (EINVAL); 1225 n = nn = pfr_table_count(filter, flags); 1226 if (n < 0) 1227 return (ENOENT); 1228 if (n > *size) { 1229 *size = n; 1230 return (0); 1231 } 1232 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) { 1233 if (pfr_skip_table(filter, p, flags)) 1234 continue; 1235 if (n-- <= 0) 1236 continue; 1237 bcopy(&p->pfrkt_t, tbl++, sizeof(*tbl)); 1238 } 1239 1240 KASSERT(n == 0, ("%s: corruption detected (%d)", __func__, n)); 1241 1242 *size = nn; 1243 return (0); 1244 } 1245 1246 int 1247 pfr_get_tstats(struct pfr_table *filter, struct pfr_tstats *tbl, int *size, 1248 int flags) 1249 { 1250 struct pfr_ktable *p; 1251 struct pfr_ktableworkq workq; 1252 int n, nn; 1253 long tzero = time_second; 1254 1255 /* XXX PFR_FLAG_CLSTATS disabled */ 1256 ACCEPT_FLAGS(flags, PFR_FLAG_ALLRSETS); 1257 if (pfr_fix_anchor(filter->pfrt_anchor)) 1258 return (EINVAL); 1259 n = nn = pfr_table_count(filter, flags); 1260 if (n < 0) 1261 return (ENOENT); 1262 if (n > *size) { 1263 *size = n; 1264 return (0); 1265 } 1266 SLIST_INIT(&workq); 1267 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) { 1268 if (pfr_skip_table(filter, p, flags)) 1269 continue; 1270 if (n-- <= 0) 1271 continue; 1272 bcopy(&p->pfrkt_ts, tbl++, sizeof(*tbl)); 1273 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq); 1274 } 1275 if (flags & PFR_FLAG_CLSTATS) 1276 pfr_clstats_ktables(&workq, tzero, 1277 flags & PFR_FLAG_ADDRSTOO); 1278 1279 KASSERT(n == 0, ("%s: corruption detected (%d)", __func__, n)); 1280 1281 *size = nn; 1282 return (0); 1283 } 1284 1285 int 1286 pfr_clr_tstats(struct pfr_table *tbl, int size, int *nzero, int flags) 1287 { 1288 struct pfr_ktableworkq workq; 1289 struct pfr_ktable *p, key; 1290 int i, xzero = 0; 1291 long tzero = time_second; 1292 1293 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_ADDRSTOO); 1294 SLIST_INIT(&workq); 1295 for (i = 0; i < size; i++) { 1296 bcopy(tbl + i, &key.pfrkt_t, sizeof(key.pfrkt_t)); 1297 if (pfr_validate_table(&key.pfrkt_t, 0, 0)) 1298 return (EINVAL); 1299 p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key); 1300 if (p != NULL) { 1301 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq); 1302 xzero++; 1303 } 1304 } 1305 if (!(flags & PFR_FLAG_DUMMY)) 1306 pfr_clstats_ktables(&workq, tzero, flags & PFR_FLAG_ADDRSTOO); 1307 if (nzero != NULL) 1308 *nzero = xzero; 1309 return (0); 1310 } 1311 1312 int 1313 pfr_set_tflags(struct pfr_table *tbl, int size, int setflag, int clrflag, 1314 int *nchange, int *ndel, int flags) 1315 { 1316 struct pfr_ktableworkq workq; 1317 struct pfr_ktable *p, *q, key; 1318 int i, xchange = 0, xdel = 0; 1319 1320 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY); 1321 if ((setflag & ~PFR_TFLAG_USRMASK) || 1322 (clrflag & ~PFR_TFLAG_USRMASK) || 1323 (setflag & clrflag)) 1324 return (EINVAL); 1325 SLIST_INIT(&workq); 1326 for (i = 0; i < size; i++) { 1327 bcopy(tbl + i, &key.pfrkt_t, sizeof(key.pfrkt_t)); 1328 if (pfr_validate_table(&key.pfrkt_t, 0, 1329 flags & PFR_FLAG_USERIOCTL)) 1330 return (EINVAL); 1331 p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key); 1332 if (p != NULL && (p->pfrkt_flags & PFR_TFLAG_ACTIVE)) { 1333 p->pfrkt_nflags = (p->pfrkt_flags | setflag) & 1334 ~clrflag; 1335 if (p->pfrkt_nflags == p->pfrkt_flags) 1336 goto _skip; 1337 SLIST_FOREACH(q, &workq, pfrkt_workq) 1338 if (!pfr_ktable_compare(p, q)) 1339 goto _skip; 1340 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq); 1341 if ((p->pfrkt_flags & PFR_TFLAG_PERSIST) && 1342 (clrflag & PFR_TFLAG_PERSIST) && 1343 !(p->pfrkt_flags & PFR_TFLAG_REFERENCED)) 1344 xdel++; 1345 else 1346 xchange++; 1347 } 1348 _skip: 1349 ; 1350 } 1351 if (!(flags & PFR_FLAG_DUMMY)) 1352 pfr_setflags_ktables(&workq); 1353 if (nchange != NULL) 1354 *nchange = xchange; 1355 if (ndel != NULL) 1356 *ndel = xdel; 1357 return (0); 1358 } 1359 1360 int 1361 pfr_ina_begin(struct pfr_table *trs, u_int32_t *ticket, int *ndel, int flags) 1362 { 1363 struct pfr_ktableworkq workq; 1364 struct pfr_ktable *p; 1365 struct pf_ruleset *rs; 1366 int xdel = 0; 1367 1368 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY); 1369 rs = pf_find_or_create_ruleset(trs->pfrt_anchor); 1370 if (rs == NULL) 1371 return (ENOMEM); 1372 SLIST_INIT(&workq); 1373 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) { 1374 if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) || 1375 pfr_skip_table(trs, p, 0)) 1376 continue; 1377 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_INACTIVE; 1378 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq); 1379 xdel++; 1380 } 1381 if (!(flags & PFR_FLAG_DUMMY)) { 1382 pfr_setflags_ktables(&workq); 1383 if (ticket != NULL) 1384 *ticket = ++rs->tticket; 1385 rs->topen = 1; 1386 } else 1387 pf_remove_if_empty_ruleset(rs); 1388 if (ndel != NULL) 1389 *ndel = xdel; 1390 return (0); 1391 } 1392 1393 int 1394 pfr_ina_define(struct pfr_table *tbl, struct pfr_addr *addr, int size, 1395 int *nadd, int *naddr, u_int32_t ticket, int flags) 1396 { 1397 struct pfr_ktableworkq tableq; 1398 struct pfr_kentryworkq addrq; 1399 struct pfr_ktable *kt, *rt, *shadow, key; 1400 struct pfr_kentry *p; 1401 struct pfr_addr *ad; 1402 struct pf_ruleset *rs; 1403 int i, rv, xadd = 0, xaddr = 0; 1404 1405 PF_RULES_WASSERT(); 1406 1407 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_ADDRSTOO); 1408 if (size && !(flags & PFR_FLAG_ADDRSTOO)) 1409 return (EINVAL); 1410 if (pfr_validate_table(tbl, PFR_TFLAG_USRMASK, 1411 flags & PFR_FLAG_USERIOCTL)) 1412 return (EINVAL); 1413 rs = pf_find_ruleset(tbl->pfrt_anchor); 1414 if (rs == NULL || !rs->topen || ticket != rs->tticket) 1415 return (EBUSY); 1416 tbl->pfrt_flags |= PFR_TFLAG_INACTIVE; 1417 SLIST_INIT(&tableq); 1418 kt = RB_FIND(pfr_ktablehead, &pfr_ktables, (struct pfr_ktable *)tbl); 1419 if (kt == NULL) { 1420 kt = pfr_create_ktable(tbl, 0, 1); 1421 if (kt == NULL) 1422 return (ENOMEM); 1423 SLIST_INSERT_HEAD(&tableq, kt, pfrkt_workq); 1424 xadd++; 1425 if (!tbl->pfrt_anchor[0]) 1426 goto _skip; 1427 1428 /* find or create root table */ 1429 bzero(&key, sizeof(key)); 1430 strlcpy(key.pfrkt_name, tbl->pfrt_name, sizeof(key.pfrkt_name)); 1431 rt = RB_FIND(pfr_ktablehead, &pfr_ktables, &key); 1432 if (rt != NULL) { 1433 kt->pfrkt_root = rt; 1434 goto _skip; 1435 } 1436 rt = pfr_create_ktable(&key.pfrkt_t, 0, 1); 1437 if (rt == NULL) { 1438 pfr_destroy_ktables(&tableq, 0); 1439 return (ENOMEM); 1440 } 1441 SLIST_INSERT_HEAD(&tableq, rt, pfrkt_workq); 1442 kt->pfrkt_root = rt; 1443 } else if (!(kt->pfrkt_flags & PFR_TFLAG_INACTIVE)) 1444 xadd++; 1445 _skip: 1446 shadow = pfr_create_ktable(tbl, 0, 0); 1447 if (shadow == NULL) { 1448 pfr_destroy_ktables(&tableq, 0); 1449 return (ENOMEM); 1450 } 1451 SLIST_INIT(&addrq); 1452 for (i = 0, ad = addr; i < size; i++, ad++) { 1453 if (pfr_validate_addr(ad)) 1454 senderr(EINVAL); 1455 if (pfr_lookup_addr(shadow, ad, 1) != NULL) 1456 continue; 1457 p = pfr_create_kentry(ad); 1458 if (p == NULL) 1459 senderr(ENOMEM); 1460 if (pfr_route_kentry(shadow, p)) { 1461 pfr_destroy_kentry(p); 1462 continue; 1463 } 1464 SLIST_INSERT_HEAD(&addrq, p, pfrke_workq); 1465 xaddr++; 1466 } 1467 if (!(flags & PFR_FLAG_DUMMY)) { 1468 if (kt->pfrkt_shadow != NULL) 1469 pfr_destroy_ktable(kt->pfrkt_shadow, 1); 1470 kt->pfrkt_flags |= PFR_TFLAG_INACTIVE; 1471 pfr_insert_ktables(&tableq); 1472 shadow->pfrkt_cnt = (flags & PFR_FLAG_ADDRSTOO) ? 1473 xaddr : NO_ADDRESSES; 1474 kt->pfrkt_shadow = shadow; 1475 } else { 1476 pfr_clean_node_mask(shadow, &addrq); 1477 pfr_destroy_ktable(shadow, 0); 1478 pfr_destroy_ktables(&tableq, 0); 1479 pfr_destroy_kentries(&addrq); 1480 } 1481 if (nadd != NULL) 1482 *nadd = xadd; 1483 if (naddr != NULL) 1484 *naddr = xaddr; 1485 return (0); 1486 _bad: 1487 pfr_destroy_ktable(shadow, 0); 1488 pfr_destroy_ktables(&tableq, 0); 1489 pfr_destroy_kentries(&addrq); 1490 return (rv); 1491 } 1492 1493 int 1494 pfr_ina_rollback(struct pfr_table *trs, u_int32_t ticket, int *ndel, int flags) 1495 { 1496 struct pfr_ktableworkq workq; 1497 struct pfr_ktable *p; 1498 struct pf_ruleset *rs; 1499 int xdel = 0; 1500 1501 PF_RULES_WASSERT(); 1502 1503 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY); 1504 rs = pf_find_ruleset(trs->pfrt_anchor); 1505 if (rs == NULL || !rs->topen || ticket != rs->tticket) 1506 return (0); 1507 SLIST_INIT(&workq); 1508 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) { 1509 if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) || 1510 pfr_skip_table(trs, p, 0)) 1511 continue; 1512 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_INACTIVE; 1513 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq); 1514 xdel++; 1515 } 1516 if (!(flags & PFR_FLAG_DUMMY)) { 1517 pfr_setflags_ktables(&workq); 1518 rs->topen = 0; 1519 pf_remove_if_empty_ruleset(rs); 1520 } 1521 if (ndel != NULL) 1522 *ndel = xdel; 1523 return (0); 1524 } 1525 1526 int 1527 pfr_ina_commit(struct pfr_table *trs, u_int32_t ticket, int *nadd, 1528 int *nchange, int flags) 1529 { 1530 struct pfr_ktable *p, *q; 1531 struct pfr_ktableworkq workq; 1532 struct pf_ruleset *rs; 1533 int xadd = 0, xchange = 0; 1534 long tzero = time_second; 1535 1536 PF_RULES_WASSERT(); 1537 1538 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY); 1539 rs = pf_find_ruleset(trs->pfrt_anchor); 1540 if (rs == NULL || !rs->topen || ticket != rs->tticket) 1541 return (EBUSY); 1542 1543 SLIST_INIT(&workq); 1544 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) { 1545 if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) || 1546 pfr_skip_table(trs, p, 0)) 1547 continue; 1548 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq); 1549 if (p->pfrkt_flags & PFR_TFLAG_ACTIVE) 1550 xchange++; 1551 else 1552 xadd++; 1553 } 1554 1555 if (!(flags & PFR_FLAG_DUMMY)) { 1556 for (p = SLIST_FIRST(&workq); p != NULL; p = q) { 1557 q = SLIST_NEXT(p, pfrkt_workq); 1558 pfr_commit_ktable(p, tzero); 1559 } 1560 rs->topen = 0; 1561 pf_remove_if_empty_ruleset(rs); 1562 } 1563 if (nadd != NULL) 1564 *nadd = xadd; 1565 if (nchange != NULL) 1566 *nchange = xchange; 1567 1568 return (0); 1569 } 1570 1571 static void 1572 pfr_commit_ktable(struct pfr_ktable *kt, long tzero) 1573 { 1574 struct pfr_ktable *shadow = kt->pfrkt_shadow; 1575 int nflags; 1576 1577 PF_RULES_WASSERT(); 1578 1579 if (shadow->pfrkt_cnt == NO_ADDRESSES) { 1580 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 1581 pfr_clstats_ktable(kt, tzero, 1); 1582 } else if (kt->pfrkt_flags & PFR_TFLAG_ACTIVE) { 1583 /* kt might contain addresses */ 1584 struct pfr_kentryworkq addrq, addq, changeq, delq, garbageq; 1585 struct pfr_kentry *p, *q, *next; 1586 struct pfr_addr ad; 1587 1588 pfr_enqueue_addrs(shadow, &addrq, NULL, 0); 1589 pfr_mark_addrs(kt); 1590 SLIST_INIT(&addq); 1591 SLIST_INIT(&changeq); 1592 SLIST_INIT(&delq); 1593 SLIST_INIT(&garbageq); 1594 pfr_clean_node_mask(shadow, &addrq); 1595 for (p = SLIST_FIRST(&addrq); p != NULL; p = next) { 1596 next = SLIST_NEXT(p, pfrke_workq); /* XXX */ 1597 pfr_copyout_addr(&ad, p); 1598 q = pfr_lookup_addr(kt, &ad, 1); 1599 if (q != NULL) { 1600 if (q->pfrke_not != p->pfrke_not) 1601 SLIST_INSERT_HEAD(&changeq, q, 1602 pfrke_workq); 1603 q->pfrke_mark = 1; 1604 SLIST_INSERT_HEAD(&garbageq, p, pfrke_workq); 1605 } else { 1606 p->pfrke_tzero = tzero; 1607 SLIST_INSERT_HEAD(&addq, p, pfrke_workq); 1608 } 1609 } 1610 pfr_enqueue_addrs(kt, &delq, NULL, ENQUEUE_UNMARKED_ONLY); 1611 pfr_insert_kentries(kt, &addq, tzero); 1612 pfr_remove_kentries(kt, &delq); 1613 pfr_clstats_kentries(&changeq, tzero, INVERT_NEG_FLAG); 1614 pfr_destroy_kentries(&garbageq); 1615 } else { 1616 /* kt cannot contain addresses */ 1617 SWAP(struct radix_node_head *, kt->pfrkt_ip4, 1618 shadow->pfrkt_ip4); 1619 SWAP(struct radix_node_head *, kt->pfrkt_ip6, 1620 shadow->pfrkt_ip6); 1621 SWAP(int, kt->pfrkt_cnt, shadow->pfrkt_cnt); 1622 pfr_clstats_ktable(kt, tzero, 1); 1623 } 1624 nflags = ((shadow->pfrkt_flags & PFR_TFLAG_USRMASK) | 1625 (kt->pfrkt_flags & PFR_TFLAG_SETMASK) | PFR_TFLAG_ACTIVE) 1626 & ~PFR_TFLAG_INACTIVE; 1627 pfr_destroy_ktable(shadow, 0); 1628 kt->pfrkt_shadow = NULL; 1629 pfr_setflags_ktable(kt, nflags); 1630 } 1631 1632 static int 1633 pfr_validate_table(struct pfr_table *tbl, int allowedflags, int no_reserved) 1634 { 1635 int i; 1636 1637 if (!tbl->pfrt_name[0]) 1638 return (-1); 1639 if (no_reserved && !strcmp(tbl->pfrt_anchor, PF_RESERVED_ANCHOR)) 1640 return (-1); 1641 if (tbl->pfrt_name[PF_TABLE_NAME_SIZE-1]) 1642 return (-1); 1643 for (i = strlen(tbl->pfrt_name); i < PF_TABLE_NAME_SIZE; i++) 1644 if (tbl->pfrt_name[i]) 1645 return (-1); 1646 if (pfr_fix_anchor(tbl->pfrt_anchor)) 1647 return (-1); 1648 if (tbl->pfrt_flags & ~allowedflags) 1649 return (-1); 1650 return (0); 1651 } 1652 1653 /* 1654 * Rewrite anchors referenced by tables to remove slashes 1655 * and check for validity. 1656 */ 1657 static int 1658 pfr_fix_anchor(char *anchor) 1659 { 1660 size_t siz = MAXPATHLEN; 1661 int i; 1662 1663 if (anchor[0] == '/') { 1664 char *path; 1665 int off; 1666 1667 path = anchor; 1668 off = 1; 1669 while (*++path == '/') 1670 off++; 1671 bcopy(path, anchor, siz - off); 1672 memset(anchor + siz - off, 0, off); 1673 } 1674 if (anchor[siz - 1]) 1675 return (-1); 1676 for (i = strlen(anchor); i < siz; i++) 1677 if (anchor[i]) 1678 return (-1); 1679 return (0); 1680 } 1681 1682 static int 1683 pfr_table_count(struct pfr_table *filter, int flags) 1684 { 1685 struct pf_ruleset *rs; 1686 1687 PF_RULES_ASSERT(); 1688 1689 if (flags & PFR_FLAG_ALLRSETS) 1690 return (pfr_ktable_cnt); 1691 if (filter->pfrt_anchor[0]) { 1692 rs = pf_find_ruleset(filter->pfrt_anchor); 1693 return ((rs != NULL) ? rs->tables : -1); 1694 } 1695 return (pf_main_ruleset.tables); 1696 } 1697 1698 static int 1699 pfr_skip_table(struct pfr_table *filter, struct pfr_ktable *kt, int flags) 1700 { 1701 if (flags & PFR_FLAG_ALLRSETS) 1702 return (0); 1703 if (strcmp(filter->pfrt_anchor, kt->pfrkt_anchor)) 1704 return (1); 1705 return (0); 1706 } 1707 1708 static void 1709 pfr_insert_ktables(struct pfr_ktableworkq *workq) 1710 { 1711 struct pfr_ktable *p; 1712 1713 SLIST_FOREACH(p, workq, pfrkt_workq) 1714 pfr_insert_ktable(p); 1715 } 1716 1717 static void 1718 pfr_insert_ktable(struct pfr_ktable *kt) 1719 { 1720 1721 PF_RULES_WASSERT(); 1722 1723 RB_INSERT(pfr_ktablehead, &pfr_ktables, kt); 1724 pfr_ktable_cnt++; 1725 if (kt->pfrkt_root != NULL) 1726 if (!kt->pfrkt_root->pfrkt_refcnt[PFR_REFCNT_ANCHOR]++) 1727 pfr_setflags_ktable(kt->pfrkt_root, 1728 kt->pfrkt_root->pfrkt_flags|PFR_TFLAG_REFDANCHOR); 1729 } 1730 1731 static void 1732 pfr_setflags_ktables(struct pfr_ktableworkq *workq) 1733 { 1734 struct pfr_ktable *p, *q; 1735 1736 for (p = SLIST_FIRST(workq); p; p = q) { 1737 q = SLIST_NEXT(p, pfrkt_workq); 1738 pfr_setflags_ktable(p, p->pfrkt_nflags); 1739 } 1740 } 1741 1742 static void 1743 pfr_setflags_ktable(struct pfr_ktable *kt, int newf) 1744 { 1745 struct pfr_kentryworkq addrq; 1746 1747 PF_RULES_WASSERT(); 1748 1749 if (!(newf & PFR_TFLAG_REFERENCED) && 1750 !(newf & PFR_TFLAG_PERSIST)) 1751 newf &= ~PFR_TFLAG_ACTIVE; 1752 if (!(newf & PFR_TFLAG_ACTIVE)) 1753 newf &= ~PFR_TFLAG_USRMASK; 1754 if (!(newf & PFR_TFLAG_SETMASK)) { 1755 RB_REMOVE(pfr_ktablehead, &pfr_ktables, kt); 1756 if (kt->pfrkt_root != NULL) 1757 if (!--kt->pfrkt_root->pfrkt_refcnt[PFR_REFCNT_ANCHOR]) 1758 pfr_setflags_ktable(kt->pfrkt_root, 1759 kt->pfrkt_root->pfrkt_flags & 1760 ~PFR_TFLAG_REFDANCHOR); 1761 pfr_destroy_ktable(kt, 1); 1762 pfr_ktable_cnt--; 1763 return; 1764 } 1765 if (!(newf & PFR_TFLAG_ACTIVE) && kt->pfrkt_cnt) { 1766 pfr_enqueue_addrs(kt, &addrq, NULL, 0); 1767 pfr_remove_kentries(kt, &addrq); 1768 } 1769 if (!(newf & PFR_TFLAG_INACTIVE) && kt->pfrkt_shadow != NULL) { 1770 pfr_destroy_ktable(kt->pfrkt_shadow, 1); 1771 kt->pfrkt_shadow = NULL; 1772 } 1773 kt->pfrkt_flags = newf; 1774 } 1775 1776 static void 1777 pfr_clstats_ktables(struct pfr_ktableworkq *workq, long tzero, int recurse) 1778 { 1779 struct pfr_ktable *p; 1780 1781 SLIST_FOREACH(p, workq, pfrkt_workq) 1782 pfr_clstats_ktable(p, tzero, recurse); 1783 } 1784 1785 static void 1786 pfr_clstats_ktable(struct pfr_ktable *kt, long tzero, int recurse) 1787 { 1788 struct pfr_kentryworkq addrq; 1789 1790 if (recurse) { 1791 pfr_enqueue_addrs(kt, &addrq, NULL, 0); 1792 pfr_clstats_kentries(&addrq, tzero, 0); 1793 } 1794 bzero(kt->pfrkt_packets, sizeof(kt->pfrkt_packets)); 1795 bzero(kt->pfrkt_bytes, sizeof(kt->pfrkt_bytes)); 1796 kt->pfrkt_match = kt->pfrkt_nomatch = 0; 1797 kt->pfrkt_tzero = tzero; 1798 } 1799 1800 static struct pfr_ktable * 1801 pfr_create_ktable(struct pfr_table *tbl, long tzero, int attachruleset) 1802 { 1803 struct pfr_ktable *kt; 1804 struct pf_ruleset *rs; 1805 1806 PF_RULES_WASSERT(); 1807 1808 kt = malloc(sizeof(*kt), M_PFTABLE, M_NOWAIT|M_ZERO); 1809 if (kt == NULL) 1810 return (NULL); 1811 kt->pfrkt_t = *tbl; 1812 1813 if (attachruleset) { 1814 rs = pf_find_or_create_ruleset(tbl->pfrt_anchor); 1815 if (!rs) { 1816 pfr_destroy_ktable(kt, 0); 1817 return (NULL); 1818 } 1819 kt->pfrkt_rs = rs; 1820 rs->tables++; 1821 } 1822 1823 if (!rn_inithead((void **)&kt->pfrkt_ip4, 1824 offsetof(struct sockaddr_in, sin_addr) * 8) || 1825 !rn_inithead((void **)&kt->pfrkt_ip6, 1826 offsetof(struct sockaddr_in6, sin6_addr) * 8)) { 1827 pfr_destroy_ktable(kt, 0); 1828 return (NULL); 1829 } 1830 kt->pfrkt_tzero = tzero; 1831 1832 return (kt); 1833 } 1834 1835 static void 1836 pfr_destroy_ktables(struct pfr_ktableworkq *workq, int flushaddr) 1837 { 1838 struct pfr_ktable *p, *q; 1839 1840 for (p = SLIST_FIRST(workq); p; p = q) { 1841 q = SLIST_NEXT(p, pfrkt_workq); 1842 pfr_destroy_ktable(p, flushaddr); 1843 } 1844 } 1845 1846 static void 1847 pfr_destroy_ktable(struct pfr_ktable *kt, int flushaddr) 1848 { 1849 struct pfr_kentryworkq addrq; 1850 1851 if (flushaddr) { 1852 pfr_enqueue_addrs(kt, &addrq, NULL, 0); 1853 pfr_clean_node_mask(kt, &addrq); 1854 pfr_destroy_kentries(&addrq); 1855 } 1856 if (kt->pfrkt_ip4 != NULL) 1857 rn_detachhead((void **)&kt->pfrkt_ip4); 1858 if (kt->pfrkt_ip6 != NULL) 1859 rn_detachhead((void **)&kt->pfrkt_ip6); 1860 if (kt->pfrkt_shadow != NULL) 1861 pfr_destroy_ktable(kt->pfrkt_shadow, flushaddr); 1862 if (kt->pfrkt_rs != NULL) { 1863 kt->pfrkt_rs->tables--; 1864 pf_remove_if_empty_ruleset(kt->pfrkt_rs); 1865 } 1866 free(kt, M_PFTABLE); 1867 } 1868 1869 static int 1870 pfr_ktable_compare(struct pfr_ktable *p, struct pfr_ktable *q) 1871 { 1872 int d; 1873 1874 if ((d = strncmp(p->pfrkt_name, q->pfrkt_name, PF_TABLE_NAME_SIZE))) 1875 return (d); 1876 return (strcmp(p->pfrkt_anchor, q->pfrkt_anchor)); 1877 } 1878 1879 static struct pfr_ktable * 1880 pfr_lookup_table(struct pfr_table *tbl) 1881 { 1882 /* struct pfr_ktable start like a struct pfr_table */ 1883 return (RB_FIND(pfr_ktablehead, &pfr_ktables, 1884 (struct pfr_ktable *)tbl)); 1885 } 1886 1887 int 1888 pfr_match_addr(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af) 1889 { 1890 struct pfr_kentry *ke = NULL; 1891 int match; 1892 1893 PF_RULES_RASSERT(); 1894 1895 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL) 1896 kt = kt->pfrkt_root; 1897 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 1898 return (0); 1899 1900 switch (af) { 1901 #ifdef INET 1902 case AF_INET: 1903 { 1904 struct sockaddr_in sin; 1905 1906 bzero(&sin, sizeof(sin)); 1907 sin.sin_len = sizeof(sin); 1908 sin.sin_family = AF_INET; 1909 sin.sin_addr.s_addr = a->addr32[0]; 1910 ke = (struct pfr_kentry *)rn_match(&sin, &kt->pfrkt_ip4->rh); 1911 if (ke && KENTRY_RNF_ROOT(ke)) 1912 ke = NULL; 1913 break; 1914 } 1915 #endif /* INET */ 1916 #ifdef INET6 1917 case AF_INET6: 1918 { 1919 struct sockaddr_in6 sin6; 1920 1921 bzero(&sin6, sizeof(sin6)); 1922 sin6.sin6_len = sizeof(sin6); 1923 sin6.sin6_family = AF_INET6; 1924 bcopy(a, &sin6.sin6_addr, sizeof(sin6.sin6_addr)); 1925 ke = (struct pfr_kentry *)rn_match(&sin6, &kt->pfrkt_ip6->rh); 1926 if (ke && KENTRY_RNF_ROOT(ke)) 1927 ke = NULL; 1928 break; 1929 } 1930 #endif /* INET6 */ 1931 } 1932 match = (ke && !ke->pfrke_not); 1933 if (match) 1934 kt->pfrkt_match++; 1935 else 1936 kt->pfrkt_nomatch++; 1937 return (match); 1938 } 1939 1940 void 1941 pfr_update_stats(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af, 1942 u_int64_t len, int dir_out, int op_pass, int notrule) 1943 { 1944 struct pfr_kentry *ke = NULL; 1945 1946 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL) 1947 kt = kt->pfrkt_root; 1948 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 1949 return; 1950 1951 switch (af) { 1952 #ifdef INET 1953 case AF_INET: 1954 { 1955 struct sockaddr_in sin; 1956 1957 bzero(&sin, sizeof(sin)); 1958 sin.sin_len = sizeof(sin); 1959 sin.sin_family = AF_INET; 1960 sin.sin_addr.s_addr = a->addr32[0]; 1961 ke = (struct pfr_kentry *)rn_match(&sin, &kt->pfrkt_ip4->rh); 1962 if (ke && KENTRY_RNF_ROOT(ke)) 1963 ke = NULL; 1964 break; 1965 } 1966 #endif /* INET */ 1967 #ifdef INET6 1968 case AF_INET6: 1969 { 1970 struct sockaddr_in6 sin6; 1971 1972 bzero(&sin6, sizeof(sin6)); 1973 sin6.sin6_len = sizeof(sin6); 1974 sin6.sin6_family = AF_INET6; 1975 bcopy(a, &sin6.sin6_addr, sizeof(sin6.sin6_addr)); 1976 ke = (struct pfr_kentry *)rn_match(&sin6, &kt->pfrkt_ip6->rh); 1977 if (ke && KENTRY_RNF_ROOT(ke)) 1978 ke = NULL; 1979 break; 1980 } 1981 #endif /* INET6 */ 1982 default: 1983 panic("%s: unknown address family %u", __func__, af); 1984 } 1985 if ((ke == NULL || ke->pfrke_not) != notrule) { 1986 if (op_pass != PFR_OP_PASS) 1987 printf("pfr_update_stats: assertion failed.\n"); 1988 op_pass = PFR_OP_XPASS; 1989 } 1990 kt->pfrkt_packets[dir_out][op_pass]++; 1991 kt->pfrkt_bytes[dir_out][op_pass] += len; 1992 if (ke != NULL && op_pass != PFR_OP_XPASS && 1993 (kt->pfrkt_flags & PFR_TFLAG_COUNTERS)) { 1994 if (ke->pfrke_counters == NULL) 1995 ke->pfrke_counters = uma_zalloc(V_pfr_kcounters_z, 1996 M_NOWAIT | M_ZERO); 1997 if (ke->pfrke_counters != NULL) { 1998 ke->pfrke_counters->pfrkc_packets[dir_out][op_pass]++; 1999 ke->pfrke_counters->pfrkc_bytes[dir_out][op_pass] += len; 2000 } 2001 } 2002 } 2003 2004 struct pfr_ktable * 2005 pfr_attach_table(struct pf_ruleset *rs, char *name) 2006 { 2007 struct pfr_ktable *kt, *rt; 2008 struct pfr_table tbl; 2009 struct pf_anchor *ac = rs->anchor; 2010 2011 PF_RULES_WASSERT(); 2012 2013 bzero(&tbl, sizeof(tbl)); 2014 strlcpy(tbl.pfrt_name, name, sizeof(tbl.pfrt_name)); 2015 if (ac != NULL) 2016 strlcpy(tbl.pfrt_anchor, ac->path, sizeof(tbl.pfrt_anchor)); 2017 kt = pfr_lookup_table(&tbl); 2018 if (kt == NULL) { 2019 kt = pfr_create_ktable(&tbl, time_second, 1); 2020 if (kt == NULL) 2021 return (NULL); 2022 if (ac != NULL) { 2023 bzero(tbl.pfrt_anchor, sizeof(tbl.pfrt_anchor)); 2024 rt = pfr_lookup_table(&tbl); 2025 if (rt == NULL) { 2026 rt = pfr_create_ktable(&tbl, 0, 1); 2027 if (rt == NULL) { 2028 pfr_destroy_ktable(kt, 0); 2029 return (NULL); 2030 } 2031 pfr_insert_ktable(rt); 2032 } 2033 kt->pfrkt_root = rt; 2034 } 2035 pfr_insert_ktable(kt); 2036 } 2037 if (!kt->pfrkt_refcnt[PFR_REFCNT_RULE]++) 2038 pfr_setflags_ktable(kt, kt->pfrkt_flags|PFR_TFLAG_REFERENCED); 2039 return (kt); 2040 } 2041 2042 void 2043 pfr_detach_table(struct pfr_ktable *kt) 2044 { 2045 2046 PF_RULES_WASSERT(); 2047 KASSERT(kt->pfrkt_refcnt[PFR_REFCNT_RULE] > 0, ("%s: refcount %d\n", 2048 __func__, kt->pfrkt_refcnt[PFR_REFCNT_RULE])); 2049 2050 if (!--kt->pfrkt_refcnt[PFR_REFCNT_RULE]) 2051 pfr_setflags_ktable(kt, kt->pfrkt_flags&~PFR_TFLAG_REFERENCED); 2052 } 2053 2054 int 2055 pfr_pool_get(struct pfr_ktable *kt, int *pidx, struct pf_addr *counter, 2056 sa_family_t af) 2057 { 2058 struct pf_addr *addr, *cur, *mask; 2059 union sockaddr_union uaddr, umask; 2060 struct pfr_kentry *ke, *ke2 = NULL; 2061 int idx = -1, use_counter = 0; 2062 2063 switch (af) { 2064 case AF_INET: 2065 uaddr.sin.sin_len = sizeof(struct sockaddr_in); 2066 uaddr.sin.sin_family = AF_INET; 2067 break; 2068 case AF_INET6: 2069 uaddr.sin6.sin6_len = sizeof(struct sockaddr_in6); 2070 uaddr.sin6.sin6_family = AF_INET6; 2071 break; 2072 } 2073 addr = SUNION2PF(&uaddr, af); 2074 2075 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL) 2076 kt = kt->pfrkt_root; 2077 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 2078 return (-1); 2079 2080 if (pidx != NULL) 2081 idx = *pidx; 2082 if (counter != NULL && idx >= 0) 2083 use_counter = 1; 2084 if (idx < 0) 2085 idx = 0; 2086 2087 _next_block: 2088 ke = pfr_kentry_byidx(kt, idx, af); 2089 if (ke == NULL) { 2090 kt->pfrkt_nomatch++; 2091 return (1); 2092 } 2093 pfr_prepare_network(&umask, af, ke->pfrke_net); 2094 cur = SUNION2PF(&ke->pfrke_sa, af); 2095 mask = SUNION2PF(&umask, af); 2096 2097 if (use_counter) { 2098 /* is supplied address within block? */ 2099 if (!PF_MATCHA(0, cur, mask, counter, af)) { 2100 /* no, go to next block in table */ 2101 idx++; 2102 use_counter = 0; 2103 goto _next_block; 2104 } 2105 PF_ACPY(addr, counter, af); 2106 } else { 2107 /* use first address of block */ 2108 PF_ACPY(addr, cur, af); 2109 } 2110 2111 if (!KENTRY_NETWORK(ke)) { 2112 /* this is a single IP address - no possible nested block */ 2113 PF_ACPY(counter, addr, af); 2114 *pidx = idx; 2115 kt->pfrkt_match++; 2116 return (0); 2117 } 2118 for (;;) { 2119 /* we don't want to use a nested block */ 2120 switch (af) { 2121 case AF_INET: 2122 ke2 = (struct pfr_kentry *)rn_match(&uaddr, 2123 &kt->pfrkt_ip4->rh); 2124 break; 2125 case AF_INET6: 2126 ke2 = (struct pfr_kentry *)rn_match(&uaddr, 2127 &kt->pfrkt_ip6->rh); 2128 break; 2129 } 2130 /* no need to check KENTRY_RNF_ROOT() here */ 2131 if (ke2 == ke) { 2132 /* lookup return the same block - perfect */ 2133 PF_ACPY(counter, addr, af); 2134 *pidx = idx; 2135 kt->pfrkt_match++; 2136 return (0); 2137 } 2138 2139 /* we need to increase the counter past the nested block */ 2140 pfr_prepare_network(&umask, AF_INET, ke2->pfrke_net); 2141 PF_POOLMASK(addr, addr, SUNION2PF(&umask, af), &pfr_ffaddr, af); 2142 PF_AINC(addr, af); 2143 if (!PF_MATCHA(0, cur, mask, addr, af)) { 2144 /* ok, we reached the end of our main block */ 2145 /* go to next block in table */ 2146 idx++; 2147 use_counter = 0; 2148 goto _next_block; 2149 } 2150 } 2151 } 2152 2153 static struct pfr_kentry * 2154 pfr_kentry_byidx(struct pfr_ktable *kt, int idx, int af) 2155 { 2156 struct pfr_walktree w; 2157 2158 bzero(&w, sizeof(w)); 2159 w.pfrw_op = PFRW_POOL_GET; 2160 w.pfrw_cnt = idx; 2161 2162 switch (af) { 2163 #ifdef INET 2164 case AF_INET: 2165 kt->pfrkt_ip4->rnh_walktree(&kt->pfrkt_ip4->rh, pfr_walktree, &w); 2166 return (w.pfrw_kentry); 2167 #endif /* INET */ 2168 #ifdef INET6 2169 case AF_INET6: 2170 kt->pfrkt_ip6->rnh_walktree(&kt->pfrkt_ip6->rh, pfr_walktree, &w); 2171 return (w.pfrw_kentry); 2172 #endif /* INET6 */ 2173 default: 2174 return (NULL); 2175 } 2176 } 2177 2178 void 2179 pfr_dynaddr_update(struct pfr_ktable *kt, struct pfi_dynaddr *dyn) 2180 { 2181 struct pfr_walktree w; 2182 2183 bzero(&w, sizeof(w)); 2184 w.pfrw_op = PFRW_DYNADDR_UPDATE; 2185 w.pfrw_dyn = dyn; 2186 2187 dyn->pfid_acnt4 = 0; 2188 dyn->pfid_acnt6 = 0; 2189 if (!dyn->pfid_af || dyn->pfid_af == AF_INET) 2190 kt->pfrkt_ip4->rnh_walktree(&kt->pfrkt_ip4->rh, pfr_walktree, &w); 2191 if (!dyn->pfid_af || dyn->pfid_af == AF_INET6) 2192 kt->pfrkt_ip6->rnh_walktree(&kt->pfrkt_ip6->rh, pfr_walktree, &w); 2193 } 2194