1 /*- 2 * Copyright (c) 2002 Cedric Berger 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 9 * - Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * - Redistributions in binary form must reproduce the above 12 * copyright notice, this list of conditions and the following 13 * disclaimer in the documentation and/or other materials provided 14 * with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 19 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 20 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 22 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 23 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 24 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN 26 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 27 * POSSIBILITY OF SUCH DAMAGE. 28 * 29 * $OpenBSD: pf_table.c,v 1.79 2008/10/08 06:24:50 mcbride Exp $ 30 */ 31 32 #include <sys/cdefs.h> 33 __FBSDID("$FreeBSD$"); 34 35 #include "opt_inet.h" 36 #include "opt_inet6.h" 37 38 #include <sys/param.h> 39 #include <sys/kernel.h> 40 #include <sys/lock.h> 41 #include <sys/malloc.h> 42 #include <sys/mbuf.h> 43 #include <sys/mutex.h> 44 #include <sys/refcount.h> 45 #include <sys/rwlock.h> 46 #include <sys/socket.h> 47 #include <vm/uma.h> 48 49 #include <net/if.h> 50 #include <net/vnet.h> 51 #include <net/pfvar.h> 52 53 #define ACCEPT_FLAGS(flags, oklist) \ 54 do { \ 55 if ((flags & ~(oklist)) & \ 56 PFR_FLAG_ALLMASK) \ 57 return (EINVAL); \ 58 } while (0) 59 60 #define FILLIN_SIN(sin, addr) \ 61 do { \ 62 (sin).sin_len = sizeof(sin); \ 63 (sin).sin_family = AF_INET; \ 64 (sin).sin_addr = (addr); \ 65 } while (0) 66 67 #define FILLIN_SIN6(sin6, addr) \ 68 do { \ 69 (sin6).sin6_len = sizeof(sin6); \ 70 (sin6).sin6_family = AF_INET6; \ 71 (sin6).sin6_addr = (addr); \ 72 } while (0) 73 74 #define SWAP(type, a1, a2) \ 75 do { \ 76 type tmp = a1; \ 77 a1 = a2; \ 78 a2 = tmp; \ 79 } while (0) 80 81 #define SUNION2PF(su, af) (((af)==AF_INET) ? \ 82 (struct pf_addr *)&(su)->sin.sin_addr : \ 83 (struct pf_addr *)&(su)->sin6.sin6_addr) 84 85 #define AF_BITS(af) (((af)==AF_INET)?32:128) 86 #define ADDR_NETWORK(ad) ((ad)->pfra_net < AF_BITS((ad)->pfra_af)) 87 #define KENTRY_NETWORK(ke) ((ke)->pfrke_net < AF_BITS((ke)->pfrke_af)) 88 #define KENTRY_RNF_ROOT(ke) \ 89 ((((struct radix_node *)(ke))->rn_flags & RNF_ROOT) != 0) 90 91 #define NO_ADDRESSES (-1) 92 #define ENQUEUE_UNMARKED_ONLY (1) 93 #define INVERT_NEG_FLAG (1) 94 95 struct pfr_walktree { 96 enum pfrw_op { 97 PFRW_MARK, 98 PFRW_SWEEP, 99 PFRW_ENQUEUE, 100 PFRW_GET_ADDRS, 101 PFRW_GET_ASTATS, 102 PFRW_POOL_GET, 103 PFRW_DYNADDR_UPDATE 104 } pfrw_op; 105 union { 106 struct pfr_addr *pfrw1_addr; 107 struct pfr_astats *pfrw1_astats; 108 struct pfr_kentryworkq *pfrw1_workq; 109 struct pfr_kentry *pfrw1_kentry; 110 struct pfi_dynaddr *pfrw1_dyn; 111 } pfrw_1; 112 int pfrw_free; 113 }; 114 #define pfrw_addr pfrw_1.pfrw1_addr 115 #define pfrw_astats pfrw_1.pfrw1_astats 116 #define pfrw_workq pfrw_1.pfrw1_workq 117 #define pfrw_kentry pfrw_1.pfrw1_kentry 118 #define pfrw_dyn pfrw_1.pfrw1_dyn 119 #define pfrw_cnt pfrw_free 120 121 #define senderr(e) do { rv = (e); goto _bad; } while (0) 122 123 static MALLOC_DEFINE(M_PFTABLE, "pf_table", "pf(4) tables structures"); 124 static VNET_DEFINE(uma_zone_t, pfr_kentry_z); 125 #define V_pfr_kentry_z VNET(pfr_kentry_z) 126 static VNET_DEFINE(uma_zone_t, pfr_kcounters_z); 127 #define V_pfr_kcounters_z VNET(pfr_kcounters_z) 128 129 static struct pf_addr pfr_ffaddr = { 130 .addr32 = { 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff } 131 }; 132 133 static void pfr_copyout_addr(struct pfr_addr *, 134 struct pfr_kentry *ke); 135 static int pfr_validate_addr(struct pfr_addr *); 136 static void pfr_enqueue_addrs(struct pfr_ktable *, 137 struct pfr_kentryworkq *, int *, int); 138 static void pfr_mark_addrs(struct pfr_ktable *); 139 static struct pfr_kentry 140 *pfr_lookup_addr(struct pfr_ktable *, 141 struct pfr_addr *, int); 142 static struct pfr_kentry *pfr_create_kentry(struct pfr_addr *); 143 static void pfr_destroy_kentries(struct pfr_kentryworkq *); 144 static void pfr_destroy_kentry(struct pfr_kentry *); 145 static void pfr_insert_kentries(struct pfr_ktable *, 146 struct pfr_kentryworkq *, long); 147 static void pfr_remove_kentries(struct pfr_ktable *, 148 struct pfr_kentryworkq *); 149 static void pfr_clstats_kentries(struct pfr_kentryworkq *, long, 150 int); 151 static void pfr_reset_feedback(struct pfr_addr *, int); 152 static void pfr_prepare_network(union sockaddr_union *, int, int); 153 static int pfr_route_kentry(struct pfr_ktable *, 154 struct pfr_kentry *); 155 static int pfr_unroute_kentry(struct pfr_ktable *, 156 struct pfr_kentry *); 157 static int pfr_walktree(struct radix_node *, void *); 158 static int pfr_validate_table(struct pfr_table *, int, int); 159 static int pfr_fix_anchor(char *); 160 static void pfr_commit_ktable(struct pfr_ktable *, long); 161 static void pfr_insert_ktables(struct pfr_ktableworkq *); 162 static void pfr_insert_ktable(struct pfr_ktable *); 163 static void pfr_setflags_ktables(struct pfr_ktableworkq *); 164 static void pfr_setflags_ktable(struct pfr_ktable *, int); 165 static void pfr_clstats_ktables(struct pfr_ktableworkq *, long, 166 int); 167 static void pfr_clstats_ktable(struct pfr_ktable *, long, int); 168 static struct pfr_ktable 169 *pfr_create_ktable(struct pfr_table *, long, int); 170 static void pfr_destroy_ktables(struct pfr_ktableworkq *, int); 171 static void pfr_destroy_ktable(struct pfr_ktable *, int); 172 static int pfr_ktable_compare(struct pfr_ktable *, 173 struct pfr_ktable *); 174 static struct pfr_ktable 175 *pfr_lookup_table(struct pfr_table *); 176 static void pfr_clean_node_mask(struct pfr_ktable *, 177 struct pfr_kentryworkq *); 178 static int pfr_table_count(struct pfr_table *, int); 179 static int pfr_skip_table(struct pfr_table *, 180 struct pfr_ktable *, int); 181 static struct pfr_kentry 182 *pfr_kentry_byidx(struct pfr_ktable *, int, int); 183 184 static RB_PROTOTYPE(pfr_ktablehead, pfr_ktable, pfrkt_tree, pfr_ktable_compare); 185 static RB_GENERATE(pfr_ktablehead, pfr_ktable, pfrkt_tree, pfr_ktable_compare); 186 187 static VNET_DEFINE(struct pfr_ktablehead, pfr_ktables); 188 #define V_pfr_ktables VNET(pfr_ktables) 189 190 static VNET_DEFINE(struct pfr_table, pfr_nulltable); 191 #define V_pfr_nulltable VNET(pfr_nulltable) 192 193 static VNET_DEFINE(int, pfr_ktable_cnt); 194 #define V_pfr_ktable_cnt VNET(pfr_ktable_cnt) 195 196 void 197 pfr_initialize(void) 198 { 199 200 V_pfr_kentry_z = uma_zcreate("pf table entries", 201 sizeof(struct pfr_kentry), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 202 0); 203 V_pfr_kcounters_z = uma_zcreate("pf table counters", 204 sizeof(struct pfr_kcounters), NULL, NULL, NULL, NULL, 205 UMA_ALIGN_PTR, 0); 206 V_pf_limits[PF_LIMIT_TABLE_ENTRIES].zone = V_pfr_kentry_z; 207 V_pf_limits[PF_LIMIT_TABLE_ENTRIES].limit = PFR_KENTRY_HIWAT; 208 } 209 210 void 211 pfr_cleanup(void) 212 { 213 214 uma_zdestroy(V_pfr_kentry_z); 215 uma_zdestroy(V_pfr_kcounters_z); 216 } 217 218 int 219 pfr_clr_addrs(struct pfr_table *tbl, int *ndel, int flags) 220 { 221 struct pfr_ktable *kt; 222 struct pfr_kentryworkq workq; 223 224 PF_RULES_WASSERT(); 225 226 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY); 227 if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL)) 228 return (EINVAL); 229 kt = pfr_lookup_table(tbl); 230 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 231 return (ESRCH); 232 if (kt->pfrkt_flags & PFR_TFLAG_CONST) 233 return (EPERM); 234 pfr_enqueue_addrs(kt, &workq, ndel, 0); 235 236 if (!(flags & PFR_FLAG_DUMMY)) { 237 pfr_remove_kentries(kt, &workq); 238 KASSERT(kt->pfrkt_cnt == 0, ("%s: non-null pfrkt_cnt", __func__)); 239 } 240 return (0); 241 } 242 243 int 244 pfr_add_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size, 245 int *nadd, int flags) 246 { 247 struct pfr_ktable *kt, *tmpkt; 248 struct pfr_kentryworkq workq; 249 struct pfr_kentry *p, *q; 250 struct pfr_addr *ad; 251 int i, rv, xadd = 0; 252 long tzero = time_second; 253 254 PF_RULES_WASSERT(); 255 256 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_FEEDBACK); 257 if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL)) 258 return (EINVAL); 259 kt = pfr_lookup_table(tbl); 260 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 261 return (ESRCH); 262 if (kt->pfrkt_flags & PFR_TFLAG_CONST) 263 return (EPERM); 264 tmpkt = pfr_create_ktable(&V_pfr_nulltable, 0, 0); 265 if (tmpkt == NULL) 266 return (ENOMEM); 267 SLIST_INIT(&workq); 268 for (i = 0, ad = addr; i < size; i++, ad++) { 269 if (pfr_validate_addr(ad)) 270 senderr(EINVAL); 271 p = pfr_lookup_addr(kt, ad, 1); 272 q = pfr_lookup_addr(tmpkt, ad, 1); 273 if (flags & PFR_FLAG_FEEDBACK) { 274 if (q != NULL) 275 ad->pfra_fback = PFR_FB_DUPLICATE; 276 else if (p == NULL) 277 ad->pfra_fback = PFR_FB_ADDED; 278 else if (p->pfrke_not != ad->pfra_not) 279 ad->pfra_fback = PFR_FB_CONFLICT; 280 else 281 ad->pfra_fback = PFR_FB_NONE; 282 } 283 if (p == NULL && q == NULL) { 284 p = pfr_create_kentry(ad); 285 if (p == NULL) 286 senderr(ENOMEM); 287 if (pfr_route_kentry(tmpkt, p)) { 288 pfr_destroy_kentry(p); 289 ad->pfra_fback = PFR_FB_NONE; 290 } else { 291 SLIST_INSERT_HEAD(&workq, p, pfrke_workq); 292 xadd++; 293 } 294 } 295 } 296 pfr_clean_node_mask(tmpkt, &workq); 297 if (!(flags & PFR_FLAG_DUMMY)) 298 pfr_insert_kentries(kt, &workq, tzero); 299 else 300 pfr_destroy_kentries(&workq); 301 if (nadd != NULL) 302 *nadd = xadd; 303 pfr_destroy_ktable(tmpkt, 0); 304 return (0); 305 _bad: 306 pfr_clean_node_mask(tmpkt, &workq); 307 pfr_destroy_kentries(&workq); 308 if (flags & PFR_FLAG_FEEDBACK) 309 pfr_reset_feedback(addr, size); 310 pfr_destroy_ktable(tmpkt, 0); 311 return (rv); 312 } 313 314 int 315 pfr_del_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size, 316 int *ndel, int flags) 317 { 318 struct pfr_ktable *kt; 319 struct pfr_kentryworkq workq; 320 struct pfr_kentry *p; 321 struct pfr_addr *ad; 322 int i, rv, xdel = 0, log = 1; 323 324 PF_RULES_WASSERT(); 325 326 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_FEEDBACK); 327 if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL)) 328 return (EINVAL); 329 kt = pfr_lookup_table(tbl); 330 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 331 return (ESRCH); 332 if (kt->pfrkt_flags & PFR_TFLAG_CONST) 333 return (EPERM); 334 /* 335 * there are two algorithms to choose from here. 336 * with: 337 * n: number of addresses to delete 338 * N: number of addresses in the table 339 * 340 * one is O(N) and is better for large 'n' 341 * one is O(n*LOG(N)) and is better for small 'n' 342 * 343 * following code try to decide which one is best. 344 */ 345 for (i = kt->pfrkt_cnt; i > 0; i >>= 1) 346 log++; 347 if (size > kt->pfrkt_cnt/log) { 348 /* full table scan */ 349 pfr_mark_addrs(kt); 350 } else { 351 /* iterate over addresses to delete */ 352 for (i = 0, ad = addr; i < size; i++, ad++) { 353 if (pfr_validate_addr(ad)) 354 return (EINVAL); 355 p = pfr_lookup_addr(kt, ad, 1); 356 if (p != NULL) 357 p->pfrke_mark = 0; 358 } 359 } 360 SLIST_INIT(&workq); 361 for (i = 0, ad = addr; i < size; i++, ad++) { 362 if (pfr_validate_addr(ad)) 363 senderr(EINVAL); 364 p = pfr_lookup_addr(kt, ad, 1); 365 if (flags & PFR_FLAG_FEEDBACK) { 366 if (p == NULL) 367 ad->pfra_fback = PFR_FB_NONE; 368 else if (p->pfrke_not != ad->pfra_not) 369 ad->pfra_fback = PFR_FB_CONFLICT; 370 else if (p->pfrke_mark) 371 ad->pfra_fback = PFR_FB_DUPLICATE; 372 else 373 ad->pfra_fback = PFR_FB_DELETED; 374 } 375 if (p != NULL && p->pfrke_not == ad->pfra_not && 376 !p->pfrke_mark) { 377 p->pfrke_mark = 1; 378 SLIST_INSERT_HEAD(&workq, p, pfrke_workq); 379 xdel++; 380 } 381 } 382 if (!(flags & PFR_FLAG_DUMMY)) 383 pfr_remove_kentries(kt, &workq); 384 if (ndel != NULL) 385 *ndel = xdel; 386 return (0); 387 _bad: 388 if (flags & PFR_FLAG_FEEDBACK) 389 pfr_reset_feedback(addr, size); 390 return (rv); 391 } 392 393 int 394 pfr_set_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size, 395 int *size2, int *nadd, int *ndel, int *nchange, int flags, 396 u_int32_t ignore_pfrt_flags) 397 { 398 struct pfr_ktable *kt, *tmpkt; 399 struct pfr_kentryworkq addq, delq, changeq; 400 struct pfr_kentry *p, *q; 401 struct pfr_addr ad; 402 int i, rv, xadd = 0, xdel = 0, xchange = 0; 403 long tzero = time_second; 404 405 PF_RULES_WASSERT(); 406 407 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_FEEDBACK); 408 if (pfr_validate_table(tbl, ignore_pfrt_flags, flags & 409 PFR_FLAG_USERIOCTL)) 410 return (EINVAL); 411 kt = pfr_lookup_table(tbl); 412 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 413 return (ESRCH); 414 if (kt->pfrkt_flags & PFR_TFLAG_CONST) 415 return (EPERM); 416 tmpkt = pfr_create_ktable(&V_pfr_nulltable, 0, 0); 417 if (tmpkt == NULL) 418 return (ENOMEM); 419 pfr_mark_addrs(kt); 420 SLIST_INIT(&addq); 421 SLIST_INIT(&delq); 422 SLIST_INIT(&changeq); 423 for (i = 0; i < size; i++) { 424 /* 425 * XXXGL: undertand pf_if usage of this function 426 * and make ad a moving pointer 427 */ 428 bcopy(addr + i, &ad, sizeof(ad)); 429 if (pfr_validate_addr(&ad)) 430 senderr(EINVAL); 431 ad.pfra_fback = PFR_FB_NONE; 432 p = pfr_lookup_addr(kt, &ad, 1); 433 if (p != NULL) { 434 if (p->pfrke_mark) { 435 ad.pfra_fback = PFR_FB_DUPLICATE; 436 goto _skip; 437 } 438 p->pfrke_mark = 1; 439 if (p->pfrke_not != ad.pfra_not) { 440 SLIST_INSERT_HEAD(&changeq, p, pfrke_workq); 441 ad.pfra_fback = PFR_FB_CHANGED; 442 xchange++; 443 } 444 } else { 445 q = pfr_lookup_addr(tmpkt, &ad, 1); 446 if (q != NULL) { 447 ad.pfra_fback = PFR_FB_DUPLICATE; 448 goto _skip; 449 } 450 p = pfr_create_kentry(&ad); 451 if (p == NULL) 452 senderr(ENOMEM); 453 if (pfr_route_kentry(tmpkt, p)) { 454 pfr_destroy_kentry(p); 455 ad.pfra_fback = PFR_FB_NONE; 456 } else { 457 SLIST_INSERT_HEAD(&addq, p, pfrke_workq); 458 ad.pfra_fback = PFR_FB_ADDED; 459 xadd++; 460 } 461 } 462 _skip: 463 if (flags & PFR_FLAG_FEEDBACK) 464 bcopy(&ad, addr + i, sizeof(ad)); 465 } 466 pfr_enqueue_addrs(kt, &delq, &xdel, ENQUEUE_UNMARKED_ONLY); 467 if ((flags & PFR_FLAG_FEEDBACK) && *size2) { 468 if (*size2 < size+xdel) { 469 *size2 = size+xdel; 470 senderr(0); 471 } 472 i = 0; 473 SLIST_FOREACH(p, &delq, pfrke_workq) { 474 pfr_copyout_addr(&ad, p); 475 ad.pfra_fback = PFR_FB_DELETED; 476 bcopy(&ad, addr + size + i, sizeof(ad)); 477 i++; 478 } 479 } 480 pfr_clean_node_mask(tmpkt, &addq); 481 if (!(flags & PFR_FLAG_DUMMY)) { 482 pfr_insert_kentries(kt, &addq, tzero); 483 pfr_remove_kentries(kt, &delq); 484 pfr_clstats_kentries(&changeq, tzero, INVERT_NEG_FLAG); 485 } else 486 pfr_destroy_kentries(&addq); 487 if (nadd != NULL) 488 *nadd = xadd; 489 if (ndel != NULL) 490 *ndel = xdel; 491 if (nchange != NULL) 492 *nchange = xchange; 493 if ((flags & PFR_FLAG_FEEDBACK) && size2) 494 *size2 = size+xdel; 495 pfr_destroy_ktable(tmpkt, 0); 496 return (0); 497 _bad: 498 pfr_clean_node_mask(tmpkt, &addq); 499 pfr_destroy_kentries(&addq); 500 if (flags & PFR_FLAG_FEEDBACK) 501 pfr_reset_feedback(addr, size); 502 pfr_destroy_ktable(tmpkt, 0); 503 return (rv); 504 } 505 506 int 507 pfr_tst_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size, 508 int *nmatch, int flags) 509 { 510 struct pfr_ktable *kt; 511 struct pfr_kentry *p; 512 struct pfr_addr *ad; 513 int i, xmatch = 0; 514 515 PF_RULES_RASSERT(); 516 517 ACCEPT_FLAGS(flags, PFR_FLAG_REPLACE); 518 if (pfr_validate_table(tbl, 0, 0)) 519 return (EINVAL); 520 kt = pfr_lookup_table(tbl); 521 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 522 return (ESRCH); 523 524 for (i = 0, ad = addr; i < size; i++, ad++) { 525 if (pfr_validate_addr(ad)) 526 return (EINVAL); 527 if (ADDR_NETWORK(ad)) 528 return (EINVAL); 529 p = pfr_lookup_addr(kt, ad, 0); 530 if (flags & PFR_FLAG_REPLACE) 531 pfr_copyout_addr(ad, p); 532 ad->pfra_fback = (p == NULL) ? PFR_FB_NONE : 533 (p->pfrke_not ? PFR_FB_NOTMATCH : PFR_FB_MATCH); 534 if (p != NULL && !p->pfrke_not) 535 xmatch++; 536 } 537 if (nmatch != NULL) 538 *nmatch = xmatch; 539 return (0); 540 } 541 542 int 543 pfr_get_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int *size, 544 int flags) 545 { 546 struct pfr_ktable *kt; 547 struct pfr_walktree w; 548 int rv; 549 550 PF_RULES_RASSERT(); 551 552 ACCEPT_FLAGS(flags, 0); 553 if (pfr_validate_table(tbl, 0, 0)) 554 return (EINVAL); 555 kt = pfr_lookup_table(tbl); 556 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 557 return (ESRCH); 558 if (kt->pfrkt_cnt > *size) { 559 *size = kt->pfrkt_cnt; 560 return (0); 561 } 562 563 bzero(&w, sizeof(w)); 564 w.pfrw_op = PFRW_GET_ADDRS; 565 w.pfrw_addr = addr; 566 w.pfrw_free = kt->pfrkt_cnt; 567 rv = kt->pfrkt_ip4->rnh_walktree(&kt->pfrkt_ip4->rh, pfr_walktree, &w); 568 if (!rv) 569 rv = kt->pfrkt_ip6->rnh_walktree(&kt->pfrkt_ip6->rh, 570 pfr_walktree, &w); 571 if (rv) 572 return (rv); 573 574 KASSERT(w.pfrw_free == 0, ("%s: corruption detected (%d)", __func__, 575 w.pfrw_free)); 576 577 *size = kt->pfrkt_cnt; 578 return (0); 579 } 580 581 int 582 pfr_get_astats(struct pfr_table *tbl, struct pfr_astats *addr, int *size, 583 int flags) 584 { 585 struct pfr_ktable *kt; 586 struct pfr_walktree w; 587 struct pfr_kentryworkq workq; 588 int rv; 589 long tzero = time_second; 590 591 PF_RULES_RASSERT(); 592 593 /* XXX PFR_FLAG_CLSTATS disabled */ 594 ACCEPT_FLAGS(flags, 0); 595 if (pfr_validate_table(tbl, 0, 0)) 596 return (EINVAL); 597 kt = pfr_lookup_table(tbl); 598 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 599 return (ESRCH); 600 if (kt->pfrkt_cnt > *size) { 601 *size = kt->pfrkt_cnt; 602 return (0); 603 } 604 605 bzero(&w, sizeof(w)); 606 w.pfrw_op = PFRW_GET_ASTATS; 607 w.pfrw_astats = addr; 608 w.pfrw_free = kt->pfrkt_cnt; 609 rv = kt->pfrkt_ip4->rnh_walktree(&kt->pfrkt_ip4->rh, pfr_walktree, &w); 610 if (!rv) 611 rv = kt->pfrkt_ip6->rnh_walktree(&kt->pfrkt_ip6->rh, 612 pfr_walktree, &w); 613 if (!rv && (flags & PFR_FLAG_CLSTATS)) { 614 pfr_enqueue_addrs(kt, &workq, NULL, 0); 615 pfr_clstats_kentries(&workq, tzero, 0); 616 } 617 if (rv) 618 return (rv); 619 620 if (w.pfrw_free) { 621 printf("pfr_get_astats: corruption detected (%d).\n", 622 w.pfrw_free); 623 return (ENOTTY); 624 } 625 *size = kt->pfrkt_cnt; 626 return (0); 627 } 628 629 int 630 pfr_clr_astats(struct pfr_table *tbl, struct pfr_addr *addr, int size, 631 int *nzero, int flags) 632 { 633 struct pfr_ktable *kt; 634 struct pfr_kentryworkq workq; 635 struct pfr_kentry *p; 636 struct pfr_addr *ad; 637 int i, rv, xzero = 0; 638 639 PF_RULES_WASSERT(); 640 641 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_FEEDBACK); 642 if (pfr_validate_table(tbl, 0, 0)) 643 return (EINVAL); 644 kt = pfr_lookup_table(tbl); 645 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 646 return (ESRCH); 647 SLIST_INIT(&workq); 648 for (i = 0, ad = addr; i < size; i++, ad++) { 649 if (pfr_validate_addr(ad)) 650 senderr(EINVAL); 651 p = pfr_lookup_addr(kt, ad, 1); 652 if (flags & PFR_FLAG_FEEDBACK) { 653 ad->pfra_fback = (p != NULL) ? 654 PFR_FB_CLEARED : PFR_FB_NONE; 655 } 656 if (p != NULL) { 657 SLIST_INSERT_HEAD(&workq, p, pfrke_workq); 658 xzero++; 659 } 660 } 661 662 if (!(flags & PFR_FLAG_DUMMY)) 663 pfr_clstats_kentries(&workq, 0, 0); 664 if (nzero != NULL) 665 *nzero = xzero; 666 return (0); 667 _bad: 668 if (flags & PFR_FLAG_FEEDBACK) 669 pfr_reset_feedback(addr, size); 670 return (rv); 671 } 672 673 static int 674 pfr_validate_addr(struct pfr_addr *ad) 675 { 676 int i; 677 678 switch (ad->pfra_af) { 679 #ifdef INET 680 case AF_INET: 681 if (ad->pfra_net > 32) 682 return (-1); 683 break; 684 #endif /* INET */ 685 #ifdef INET6 686 case AF_INET6: 687 if (ad->pfra_net > 128) 688 return (-1); 689 break; 690 #endif /* INET6 */ 691 default: 692 return (-1); 693 } 694 if (ad->pfra_net < 128 && 695 (((caddr_t)ad)[ad->pfra_net/8] & (0xFF >> (ad->pfra_net%8)))) 696 return (-1); 697 for (i = (ad->pfra_net+7)/8; i < sizeof(ad->pfra_u); i++) 698 if (((caddr_t)ad)[i]) 699 return (-1); 700 if (ad->pfra_not && ad->pfra_not != 1) 701 return (-1); 702 if (ad->pfra_fback) 703 return (-1); 704 return (0); 705 } 706 707 static void 708 pfr_enqueue_addrs(struct pfr_ktable *kt, struct pfr_kentryworkq *workq, 709 int *naddr, int sweep) 710 { 711 struct pfr_walktree w; 712 713 SLIST_INIT(workq); 714 bzero(&w, sizeof(w)); 715 w.pfrw_op = sweep ? PFRW_SWEEP : PFRW_ENQUEUE; 716 w.pfrw_workq = workq; 717 if (kt->pfrkt_ip4 != NULL) 718 if (kt->pfrkt_ip4->rnh_walktree(&kt->pfrkt_ip4->rh, 719 pfr_walktree, &w)) 720 printf("pfr_enqueue_addrs: IPv4 walktree failed.\n"); 721 if (kt->pfrkt_ip6 != NULL) 722 if (kt->pfrkt_ip6->rnh_walktree(&kt->pfrkt_ip6->rh, 723 pfr_walktree, &w)) 724 printf("pfr_enqueue_addrs: IPv6 walktree failed.\n"); 725 if (naddr != NULL) 726 *naddr = w.pfrw_cnt; 727 } 728 729 static void 730 pfr_mark_addrs(struct pfr_ktable *kt) 731 { 732 struct pfr_walktree w; 733 734 bzero(&w, sizeof(w)); 735 w.pfrw_op = PFRW_MARK; 736 if (kt->pfrkt_ip4->rnh_walktree(&kt->pfrkt_ip4->rh, pfr_walktree, &w)) 737 printf("pfr_mark_addrs: IPv4 walktree failed.\n"); 738 if (kt->pfrkt_ip6->rnh_walktree(&kt->pfrkt_ip6->rh, pfr_walktree, &w)) 739 printf("pfr_mark_addrs: IPv6 walktree failed.\n"); 740 } 741 742 743 static struct pfr_kentry * 744 pfr_lookup_addr(struct pfr_ktable *kt, struct pfr_addr *ad, int exact) 745 { 746 union sockaddr_union sa, mask; 747 struct radix_head *head = NULL; 748 struct pfr_kentry *ke; 749 750 PF_RULES_ASSERT(); 751 752 bzero(&sa, sizeof(sa)); 753 if (ad->pfra_af == AF_INET) { 754 FILLIN_SIN(sa.sin, ad->pfra_ip4addr); 755 head = &kt->pfrkt_ip4->rh; 756 } else if ( ad->pfra_af == AF_INET6 ) { 757 FILLIN_SIN6(sa.sin6, ad->pfra_ip6addr); 758 head = &kt->pfrkt_ip6->rh; 759 } 760 if (ADDR_NETWORK(ad)) { 761 pfr_prepare_network(&mask, ad->pfra_af, ad->pfra_net); 762 ke = (struct pfr_kentry *)rn_lookup(&sa, &mask, head); 763 if (ke && KENTRY_RNF_ROOT(ke)) 764 ke = NULL; 765 } else { 766 ke = (struct pfr_kentry *)rn_match(&sa, head); 767 if (ke && KENTRY_RNF_ROOT(ke)) 768 ke = NULL; 769 if (exact && ke && KENTRY_NETWORK(ke)) 770 ke = NULL; 771 } 772 return (ke); 773 } 774 775 static struct pfr_kentry * 776 pfr_create_kentry(struct pfr_addr *ad) 777 { 778 struct pfr_kentry *ke; 779 780 ke = uma_zalloc(V_pfr_kentry_z, M_NOWAIT | M_ZERO); 781 if (ke == NULL) 782 return (NULL); 783 784 if (ad->pfra_af == AF_INET) 785 FILLIN_SIN(ke->pfrke_sa.sin, ad->pfra_ip4addr); 786 else if (ad->pfra_af == AF_INET6) 787 FILLIN_SIN6(ke->pfrke_sa.sin6, ad->pfra_ip6addr); 788 ke->pfrke_af = ad->pfra_af; 789 ke->pfrke_net = ad->pfra_net; 790 ke->pfrke_not = ad->pfra_not; 791 return (ke); 792 } 793 794 static void 795 pfr_destroy_kentries(struct pfr_kentryworkq *workq) 796 { 797 struct pfr_kentry *p, *q; 798 799 for (p = SLIST_FIRST(workq); p != NULL; p = q) { 800 q = SLIST_NEXT(p, pfrke_workq); 801 pfr_destroy_kentry(p); 802 } 803 } 804 805 static void 806 pfr_destroy_kentry(struct pfr_kentry *ke) 807 { 808 if (ke->pfrke_counters) 809 uma_zfree(V_pfr_kcounters_z, ke->pfrke_counters); 810 uma_zfree(V_pfr_kentry_z, ke); 811 } 812 813 static void 814 pfr_insert_kentries(struct pfr_ktable *kt, 815 struct pfr_kentryworkq *workq, long tzero) 816 { 817 struct pfr_kentry *p; 818 int rv, n = 0; 819 820 SLIST_FOREACH(p, workq, pfrke_workq) { 821 rv = pfr_route_kentry(kt, p); 822 if (rv) { 823 printf("pfr_insert_kentries: cannot route entry " 824 "(code=%d).\n", rv); 825 break; 826 } 827 p->pfrke_tzero = tzero; 828 n++; 829 } 830 kt->pfrkt_cnt += n; 831 } 832 833 int 834 pfr_insert_kentry(struct pfr_ktable *kt, struct pfr_addr *ad, long tzero) 835 { 836 struct pfr_kentry *p; 837 int rv; 838 839 p = pfr_lookup_addr(kt, ad, 1); 840 if (p != NULL) 841 return (0); 842 p = pfr_create_kentry(ad); 843 if (p == NULL) 844 return (ENOMEM); 845 846 rv = pfr_route_kentry(kt, p); 847 if (rv) 848 return (rv); 849 850 p->pfrke_tzero = tzero; 851 kt->pfrkt_cnt++; 852 853 return (0); 854 } 855 856 static void 857 pfr_remove_kentries(struct pfr_ktable *kt, 858 struct pfr_kentryworkq *workq) 859 { 860 struct pfr_kentry *p; 861 int n = 0; 862 863 SLIST_FOREACH(p, workq, pfrke_workq) { 864 pfr_unroute_kentry(kt, p); 865 n++; 866 } 867 kt->pfrkt_cnt -= n; 868 pfr_destroy_kentries(workq); 869 } 870 871 static void 872 pfr_clean_node_mask(struct pfr_ktable *kt, 873 struct pfr_kentryworkq *workq) 874 { 875 struct pfr_kentry *p; 876 877 SLIST_FOREACH(p, workq, pfrke_workq) 878 pfr_unroute_kentry(kt, p); 879 } 880 881 static void 882 pfr_clstats_kentries(struct pfr_kentryworkq *workq, long tzero, int negchange) 883 { 884 struct pfr_kentry *p; 885 886 SLIST_FOREACH(p, workq, pfrke_workq) { 887 if (negchange) 888 p->pfrke_not = !p->pfrke_not; 889 if (p->pfrke_counters) { 890 uma_zfree(V_pfr_kcounters_z, p->pfrke_counters); 891 p->pfrke_counters = NULL; 892 } 893 p->pfrke_tzero = tzero; 894 } 895 } 896 897 static void 898 pfr_reset_feedback(struct pfr_addr *addr, int size) 899 { 900 struct pfr_addr *ad; 901 int i; 902 903 for (i = 0, ad = addr; i < size; i++, ad++) 904 ad->pfra_fback = PFR_FB_NONE; 905 } 906 907 static void 908 pfr_prepare_network(union sockaddr_union *sa, int af, int net) 909 { 910 int i; 911 912 bzero(sa, sizeof(*sa)); 913 if (af == AF_INET) { 914 sa->sin.sin_len = sizeof(sa->sin); 915 sa->sin.sin_family = AF_INET; 916 sa->sin.sin_addr.s_addr = net ? htonl(-1 << (32-net)) : 0; 917 } else if (af == AF_INET6) { 918 sa->sin6.sin6_len = sizeof(sa->sin6); 919 sa->sin6.sin6_family = AF_INET6; 920 for (i = 0; i < 4; i++) { 921 if (net <= 32) { 922 sa->sin6.sin6_addr.s6_addr32[i] = 923 net ? htonl(-1 << (32-net)) : 0; 924 break; 925 } 926 sa->sin6.sin6_addr.s6_addr32[i] = 0xFFFFFFFF; 927 net -= 32; 928 } 929 } 930 } 931 932 static int 933 pfr_route_kentry(struct pfr_ktable *kt, struct pfr_kentry *ke) 934 { 935 union sockaddr_union mask; 936 struct radix_node *rn; 937 struct radix_head *head = NULL; 938 939 PF_RULES_WASSERT(); 940 941 bzero(ke->pfrke_node, sizeof(ke->pfrke_node)); 942 if (ke->pfrke_af == AF_INET) 943 head = &kt->pfrkt_ip4->rh; 944 else if (ke->pfrke_af == AF_INET6) 945 head = &kt->pfrkt_ip6->rh; 946 947 if (KENTRY_NETWORK(ke)) { 948 pfr_prepare_network(&mask, ke->pfrke_af, ke->pfrke_net); 949 rn = rn_addroute(&ke->pfrke_sa, &mask, head, ke->pfrke_node); 950 } else 951 rn = rn_addroute(&ke->pfrke_sa, NULL, head, ke->pfrke_node); 952 953 return (rn == NULL ? -1 : 0); 954 } 955 956 static int 957 pfr_unroute_kentry(struct pfr_ktable *kt, struct pfr_kentry *ke) 958 { 959 union sockaddr_union mask; 960 struct radix_node *rn; 961 struct radix_head *head = NULL; 962 963 if (ke->pfrke_af == AF_INET) 964 head = &kt->pfrkt_ip4->rh; 965 else if (ke->pfrke_af == AF_INET6) 966 head = &kt->pfrkt_ip6->rh; 967 968 if (KENTRY_NETWORK(ke)) { 969 pfr_prepare_network(&mask, ke->pfrke_af, ke->pfrke_net); 970 rn = rn_delete(&ke->pfrke_sa, &mask, head); 971 } else 972 rn = rn_delete(&ke->pfrke_sa, NULL, head); 973 974 if (rn == NULL) { 975 printf("pfr_unroute_kentry: delete failed.\n"); 976 return (-1); 977 } 978 return (0); 979 } 980 981 static void 982 pfr_copyout_addr(struct pfr_addr *ad, struct pfr_kentry *ke) 983 { 984 bzero(ad, sizeof(*ad)); 985 if (ke == NULL) 986 return; 987 ad->pfra_af = ke->pfrke_af; 988 ad->pfra_net = ke->pfrke_net; 989 ad->pfra_not = ke->pfrke_not; 990 if (ad->pfra_af == AF_INET) 991 ad->pfra_ip4addr = ke->pfrke_sa.sin.sin_addr; 992 else if (ad->pfra_af == AF_INET6) 993 ad->pfra_ip6addr = ke->pfrke_sa.sin6.sin6_addr; 994 } 995 996 static int 997 pfr_walktree(struct radix_node *rn, void *arg) 998 { 999 struct pfr_kentry *ke = (struct pfr_kentry *)rn; 1000 struct pfr_walktree *w = arg; 1001 1002 switch (w->pfrw_op) { 1003 case PFRW_MARK: 1004 ke->pfrke_mark = 0; 1005 break; 1006 case PFRW_SWEEP: 1007 if (ke->pfrke_mark) 1008 break; 1009 /* FALLTHROUGH */ 1010 case PFRW_ENQUEUE: 1011 SLIST_INSERT_HEAD(w->pfrw_workq, ke, pfrke_workq); 1012 w->pfrw_cnt++; 1013 break; 1014 case PFRW_GET_ADDRS: 1015 if (w->pfrw_free-- > 0) { 1016 pfr_copyout_addr(w->pfrw_addr, ke); 1017 w->pfrw_addr++; 1018 } 1019 break; 1020 case PFRW_GET_ASTATS: 1021 if (w->pfrw_free-- > 0) { 1022 struct pfr_astats as; 1023 1024 pfr_copyout_addr(&as.pfras_a, ke); 1025 1026 if (ke->pfrke_counters) { 1027 bcopy(ke->pfrke_counters->pfrkc_packets, 1028 as.pfras_packets, sizeof(as.pfras_packets)); 1029 bcopy(ke->pfrke_counters->pfrkc_bytes, 1030 as.pfras_bytes, sizeof(as.pfras_bytes)); 1031 } else { 1032 bzero(as.pfras_packets, sizeof(as.pfras_packets)); 1033 bzero(as.pfras_bytes, sizeof(as.pfras_bytes)); 1034 as.pfras_a.pfra_fback = PFR_FB_NOCOUNT; 1035 } 1036 as.pfras_tzero = ke->pfrke_tzero; 1037 1038 bcopy(&as, w->pfrw_astats, sizeof(as)); 1039 w->pfrw_astats++; 1040 } 1041 break; 1042 case PFRW_POOL_GET: 1043 if (ke->pfrke_not) 1044 break; /* negative entries are ignored */ 1045 if (!w->pfrw_cnt--) { 1046 w->pfrw_kentry = ke; 1047 return (1); /* finish search */ 1048 } 1049 break; 1050 case PFRW_DYNADDR_UPDATE: 1051 { 1052 union sockaddr_union pfr_mask; 1053 1054 if (ke->pfrke_af == AF_INET) { 1055 if (w->pfrw_dyn->pfid_acnt4++ > 0) 1056 break; 1057 pfr_prepare_network(&pfr_mask, AF_INET, ke->pfrke_net); 1058 w->pfrw_dyn->pfid_addr4 = *SUNION2PF(&ke->pfrke_sa, 1059 AF_INET); 1060 w->pfrw_dyn->pfid_mask4 = *SUNION2PF(&pfr_mask, 1061 AF_INET); 1062 } else if (ke->pfrke_af == AF_INET6){ 1063 if (w->pfrw_dyn->pfid_acnt6++ > 0) 1064 break; 1065 pfr_prepare_network(&pfr_mask, AF_INET6, ke->pfrke_net); 1066 w->pfrw_dyn->pfid_addr6 = *SUNION2PF(&ke->pfrke_sa, 1067 AF_INET6); 1068 w->pfrw_dyn->pfid_mask6 = *SUNION2PF(&pfr_mask, 1069 AF_INET6); 1070 } 1071 break; 1072 } 1073 } 1074 return (0); 1075 } 1076 1077 int 1078 pfr_clr_tables(struct pfr_table *filter, int *ndel, int flags) 1079 { 1080 struct pfr_ktableworkq workq; 1081 struct pfr_ktable *p; 1082 int xdel = 0; 1083 1084 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_ALLRSETS); 1085 if (pfr_fix_anchor(filter->pfrt_anchor)) 1086 return (EINVAL); 1087 if (pfr_table_count(filter, flags) < 0) 1088 return (ENOENT); 1089 1090 SLIST_INIT(&workq); 1091 RB_FOREACH(p, pfr_ktablehead, &V_pfr_ktables) { 1092 if (pfr_skip_table(filter, p, flags)) 1093 continue; 1094 if (!strcmp(p->pfrkt_anchor, PF_RESERVED_ANCHOR)) 1095 continue; 1096 if (!(p->pfrkt_flags & PFR_TFLAG_ACTIVE)) 1097 continue; 1098 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_ACTIVE; 1099 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq); 1100 xdel++; 1101 } 1102 if (!(flags & PFR_FLAG_DUMMY)) 1103 pfr_setflags_ktables(&workq); 1104 if (ndel != NULL) 1105 *ndel = xdel; 1106 return (0); 1107 } 1108 1109 int 1110 pfr_add_tables(struct pfr_table *tbl, int size, int *nadd, int flags) 1111 { 1112 struct pfr_ktableworkq addq, changeq; 1113 struct pfr_ktable *p, *q, *r, key; 1114 int i, rv, xadd = 0; 1115 long tzero = time_second; 1116 1117 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY); 1118 SLIST_INIT(&addq); 1119 SLIST_INIT(&changeq); 1120 for (i = 0; i < size; i++) { 1121 bcopy(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t)); 1122 if (pfr_validate_table(&key.pfrkt_t, PFR_TFLAG_USRMASK, 1123 flags & PFR_FLAG_USERIOCTL)) 1124 senderr(EINVAL); 1125 key.pfrkt_flags |= PFR_TFLAG_ACTIVE; 1126 p = RB_FIND(pfr_ktablehead, &V_pfr_ktables, &key); 1127 if (p == NULL) { 1128 p = pfr_create_ktable(&key.pfrkt_t, tzero, 1); 1129 if (p == NULL) 1130 senderr(ENOMEM); 1131 SLIST_FOREACH(q, &addq, pfrkt_workq) { 1132 if (!pfr_ktable_compare(p, q)) 1133 goto _skip; 1134 } 1135 SLIST_INSERT_HEAD(&addq, p, pfrkt_workq); 1136 xadd++; 1137 if (!key.pfrkt_anchor[0]) 1138 goto _skip; 1139 1140 /* find or create root table */ 1141 bzero(key.pfrkt_anchor, sizeof(key.pfrkt_anchor)); 1142 r = RB_FIND(pfr_ktablehead, &V_pfr_ktables, &key); 1143 if (r != NULL) { 1144 p->pfrkt_root = r; 1145 goto _skip; 1146 } 1147 SLIST_FOREACH(q, &addq, pfrkt_workq) { 1148 if (!pfr_ktable_compare(&key, q)) { 1149 p->pfrkt_root = q; 1150 goto _skip; 1151 } 1152 } 1153 key.pfrkt_flags = 0; 1154 r = pfr_create_ktable(&key.pfrkt_t, 0, 1); 1155 if (r == NULL) 1156 senderr(ENOMEM); 1157 SLIST_INSERT_HEAD(&addq, r, pfrkt_workq); 1158 p->pfrkt_root = r; 1159 } else if (!(p->pfrkt_flags & PFR_TFLAG_ACTIVE)) { 1160 SLIST_FOREACH(q, &changeq, pfrkt_workq) 1161 if (!pfr_ktable_compare(&key, q)) 1162 goto _skip; 1163 p->pfrkt_nflags = (p->pfrkt_flags & 1164 ~PFR_TFLAG_USRMASK) | key.pfrkt_flags; 1165 SLIST_INSERT_HEAD(&changeq, p, pfrkt_workq); 1166 xadd++; 1167 } 1168 _skip: 1169 ; 1170 } 1171 if (!(flags & PFR_FLAG_DUMMY)) { 1172 pfr_insert_ktables(&addq); 1173 pfr_setflags_ktables(&changeq); 1174 } else 1175 pfr_destroy_ktables(&addq, 0); 1176 if (nadd != NULL) 1177 *nadd = xadd; 1178 return (0); 1179 _bad: 1180 pfr_destroy_ktables(&addq, 0); 1181 return (rv); 1182 } 1183 1184 int 1185 pfr_del_tables(struct pfr_table *tbl, int size, int *ndel, int flags) 1186 { 1187 struct pfr_ktableworkq workq; 1188 struct pfr_ktable *p, *q, key; 1189 int i, xdel = 0; 1190 1191 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY); 1192 SLIST_INIT(&workq); 1193 for (i = 0; i < size; i++) { 1194 bcopy(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t)); 1195 if (pfr_validate_table(&key.pfrkt_t, 0, 1196 flags & PFR_FLAG_USERIOCTL)) 1197 return (EINVAL); 1198 p = RB_FIND(pfr_ktablehead, &V_pfr_ktables, &key); 1199 if (p != NULL && (p->pfrkt_flags & PFR_TFLAG_ACTIVE)) { 1200 SLIST_FOREACH(q, &workq, pfrkt_workq) 1201 if (!pfr_ktable_compare(p, q)) 1202 goto _skip; 1203 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_ACTIVE; 1204 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq); 1205 xdel++; 1206 } 1207 _skip: 1208 ; 1209 } 1210 1211 if (!(flags & PFR_FLAG_DUMMY)) 1212 pfr_setflags_ktables(&workq); 1213 if (ndel != NULL) 1214 *ndel = xdel; 1215 return (0); 1216 } 1217 1218 int 1219 pfr_get_tables(struct pfr_table *filter, struct pfr_table *tbl, int *size, 1220 int flags) 1221 { 1222 struct pfr_ktable *p; 1223 int n, nn; 1224 1225 PF_RULES_RASSERT(); 1226 1227 ACCEPT_FLAGS(flags, PFR_FLAG_ALLRSETS); 1228 if (pfr_fix_anchor(filter->pfrt_anchor)) 1229 return (EINVAL); 1230 n = nn = pfr_table_count(filter, flags); 1231 if (n < 0) 1232 return (ENOENT); 1233 if (n > *size) { 1234 *size = n; 1235 return (0); 1236 } 1237 RB_FOREACH(p, pfr_ktablehead, &V_pfr_ktables) { 1238 if (pfr_skip_table(filter, p, flags)) 1239 continue; 1240 if (n-- <= 0) 1241 continue; 1242 bcopy(&p->pfrkt_t, tbl++, sizeof(*tbl)); 1243 } 1244 1245 KASSERT(n == 0, ("%s: corruption detected (%d)", __func__, n)); 1246 1247 *size = nn; 1248 return (0); 1249 } 1250 1251 int 1252 pfr_get_tstats(struct pfr_table *filter, struct pfr_tstats *tbl, int *size, 1253 int flags) 1254 { 1255 struct pfr_ktable *p; 1256 struct pfr_ktableworkq workq; 1257 int n, nn; 1258 long tzero = time_second; 1259 1260 /* XXX PFR_FLAG_CLSTATS disabled */ 1261 ACCEPT_FLAGS(flags, PFR_FLAG_ALLRSETS); 1262 if (pfr_fix_anchor(filter->pfrt_anchor)) 1263 return (EINVAL); 1264 n = nn = pfr_table_count(filter, flags); 1265 if (n < 0) 1266 return (ENOENT); 1267 if (n > *size) { 1268 *size = n; 1269 return (0); 1270 } 1271 SLIST_INIT(&workq); 1272 RB_FOREACH(p, pfr_ktablehead, &V_pfr_ktables) { 1273 if (pfr_skip_table(filter, p, flags)) 1274 continue; 1275 if (n-- <= 0) 1276 continue; 1277 bcopy(&p->pfrkt_ts, tbl++, sizeof(*tbl)); 1278 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq); 1279 } 1280 if (flags & PFR_FLAG_CLSTATS) 1281 pfr_clstats_ktables(&workq, tzero, 1282 flags & PFR_FLAG_ADDRSTOO); 1283 1284 KASSERT(n == 0, ("%s: corruption detected (%d)", __func__, n)); 1285 1286 *size = nn; 1287 return (0); 1288 } 1289 1290 int 1291 pfr_clr_tstats(struct pfr_table *tbl, int size, int *nzero, int flags) 1292 { 1293 struct pfr_ktableworkq workq; 1294 struct pfr_ktable *p, key; 1295 int i, xzero = 0; 1296 long tzero = time_second; 1297 1298 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_ADDRSTOO); 1299 SLIST_INIT(&workq); 1300 for (i = 0; i < size; i++) { 1301 bcopy(tbl + i, &key.pfrkt_t, sizeof(key.pfrkt_t)); 1302 if (pfr_validate_table(&key.pfrkt_t, 0, 0)) 1303 return (EINVAL); 1304 p = RB_FIND(pfr_ktablehead, &V_pfr_ktables, &key); 1305 if (p != NULL) { 1306 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq); 1307 xzero++; 1308 } 1309 } 1310 if (!(flags & PFR_FLAG_DUMMY)) 1311 pfr_clstats_ktables(&workq, tzero, flags & PFR_FLAG_ADDRSTOO); 1312 if (nzero != NULL) 1313 *nzero = xzero; 1314 return (0); 1315 } 1316 1317 int 1318 pfr_set_tflags(struct pfr_table *tbl, int size, int setflag, int clrflag, 1319 int *nchange, int *ndel, int flags) 1320 { 1321 struct pfr_ktableworkq workq; 1322 struct pfr_ktable *p, *q, key; 1323 int i, xchange = 0, xdel = 0; 1324 1325 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY); 1326 if ((setflag & ~PFR_TFLAG_USRMASK) || 1327 (clrflag & ~PFR_TFLAG_USRMASK) || 1328 (setflag & clrflag)) 1329 return (EINVAL); 1330 SLIST_INIT(&workq); 1331 for (i = 0; i < size; i++) { 1332 bcopy(tbl + i, &key.pfrkt_t, sizeof(key.pfrkt_t)); 1333 if (pfr_validate_table(&key.pfrkt_t, 0, 1334 flags & PFR_FLAG_USERIOCTL)) 1335 return (EINVAL); 1336 p = RB_FIND(pfr_ktablehead, &V_pfr_ktables, &key); 1337 if (p != NULL && (p->pfrkt_flags & PFR_TFLAG_ACTIVE)) { 1338 p->pfrkt_nflags = (p->pfrkt_flags | setflag) & 1339 ~clrflag; 1340 if (p->pfrkt_nflags == p->pfrkt_flags) 1341 goto _skip; 1342 SLIST_FOREACH(q, &workq, pfrkt_workq) 1343 if (!pfr_ktable_compare(p, q)) 1344 goto _skip; 1345 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq); 1346 if ((p->pfrkt_flags & PFR_TFLAG_PERSIST) && 1347 (clrflag & PFR_TFLAG_PERSIST) && 1348 !(p->pfrkt_flags & PFR_TFLAG_REFERENCED)) 1349 xdel++; 1350 else 1351 xchange++; 1352 } 1353 _skip: 1354 ; 1355 } 1356 if (!(flags & PFR_FLAG_DUMMY)) 1357 pfr_setflags_ktables(&workq); 1358 if (nchange != NULL) 1359 *nchange = xchange; 1360 if (ndel != NULL) 1361 *ndel = xdel; 1362 return (0); 1363 } 1364 1365 int 1366 pfr_ina_begin(struct pfr_table *trs, u_int32_t *ticket, int *ndel, int flags) 1367 { 1368 struct pfr_ktableworkq workq; 1369 struct pfr_ktable *p; 1370 struct pf_ruleset *rs; 1371 int xdel = 0; 1372 1373 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY); 1374 rs = pf_find_or_create_ruleset(trs->pfrt_anchor); 1375 if (rs == NULL) 1376 return (ENOMEM); 1377 SLIST_INIT(&workq); 1378 RB_FOREACH(p, pfr_ktablehead, &V_pfr_ktables) { 1379 if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) || 1380 pfr_skip_table(trs, p, 0)) 1381 continue; 1382 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_INACTIVE; 1383 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq); 1384 xdel++; 1385 } 1386 if (!(flags & PFR_FLAG_DUMMY)) { 1387 pfr_setflags_ktables(&workq); 1388 if (ticket != NULL) 1389 *ticket = ++rs->tticket; 1390 rs->topen = 1; 1391 } else 1392 pf_remove_if_empty_ruleset(rs); 1393 if (ndel != NULL) 1394 *ndel = xdel; 1395 return (0); 1396 } 1397 1398 int 1399 pfr_ina_define(struct pfr_table *tbl, struct pfr_addr *addr, int size, 1400 int *nadd, int *naddr, u_int32_t ticket, int flags) 1401 { 1402 struct pfr_ktableworkq tableq; 1403 struct pfr_kentryworkq addrq; 1404 struct pfr_ktable *kt, *rt, *shadow, key; 1405 struct pfr_kentry *p; 1406 struct pfr_addr *ad; 1407 struct pf_ruleset *rs; 1408 int i, rv, xadd = 0, xaddr = 0; 1409 1410 PF_RULES_WASSERT(); 1411 1412 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_ADDRSTOO); 1413 if (size && !(flags & PFR_FLAG_ADDRSTOO)) 1414 return (EINVAL); 1415 if (pfr_validate_table(tbl, PFR_TFLAG_USRMASK, 1416 flags & PFR_FLAG_USERIOCTL)) 1417 return (EINVAL); 1418 rs = pf_find_ruleset(tbl->pfrt_anchor); 1419 if (rs == NULL || !rs->topen || ticket != rs->tticket) 1420 return (EBUSY); 1421 tbl->pfrt_flags |= PFR_TFLAG_INACTIVE; 1422 SLIST_INIT(&tableq); 1423 kt = RB_FIND(pfr_ktablehead, &V_pfr_ktables, (struct pfr_ktable *)tbl); 1424 if (kt == NULL) { 1425 kt = pfr_create_ktable(tbl, 0, 1); 1426 if (kt == NULL) 1427 return (ENOMEM); 1428 SLIST_INSERT_HEAD(&tableq, kt, pfrkt_workq); 1429 xadd++; 1430 if (!tbl->pfrt_anchor[0]) 1431 goto _skip; 1432 1433 /* find or create root table */ 1434 bzero(&key, sizeof(key)); 1435 strlcpy(key.pfrkt_name, tbl->pfrt_name, sizeof(key.pfrkt_name)); 1436 rt = RB_FIND(pfr_ktablehead, &V_pfr_ktables, &key); 1437 if (rt != NULL) { 1438 kt->pfrkt_root = rt; 1439 goto _skip; 1440 } 1441 rt = pfr_create_ktable(&key.pfrkt_t, 0, 1); 1442 if (rt == NULL) { 1443 pfr_destroy_ktables(&tableq, 0); 1444 return (ENOMEM); 1445 } 1446 SLIST_INSERT_HEAD(&tableq, rt, pfrkt_workq); 1447 kt->pfrkt_root = rt; 1448 } else if (!(kt->pfrkt_flags & PFR_TFLAG_INACTIVE)) 1449 xadd++; 1450 _skip: 1451 shadow = pfr_create_ktable(tbl, 0, 0); 1452 if (shadow == NULL) { 1453 pfr_destroy_ktables(&tableq, 0); 1454 return (ENOMEM); 1455 } 1456 SLIST_INIT(&addrq); 1457 for (i = 0, ad = addr; i < size; i++, ad++) { 1458 if (pfr_validate_addr(ad)) 1459 senderr(EINVAL); 1460 if (pfr_lookup_addr(shadow, ad, 1) != NULL) 1461 continue; 1462 p = pfr_create_kentry(ad); 1463 if (p == NULL) 1464 senderr(ENOMEM); 1465 if (pfr_route_kentry(shadow, p)) { 1466 pfr_destroy_kentry(p); 1467 continue; 1468 } 1469 SLIST_INSERT_HEAD(&addrq, p, pfrke_workq); 1470 xaddr++; 1471 } 1472 if (!(flags & PFR_FLAG_DUMMY)) { 1473 if (kt->pfrkt_shadow != NULL) 1474 pfr_destroy_ktable(kt->pfrkt_shadow, 1); 1475 kt->pfrkt_flags |= PFR_TFLAG_INACTIVE; 1476 pfr_insert_ktables(&tableq); 1477 shadow->pfrkt_cnt = (flags & PFR_FLAG_ADDRSTOO) ? 1478 xaddr : NO_ADDRESSES; 1479 kt->pfrkt_shadow = shadow; 1480 } else { 1481 pfr_clean_node_mask(shadow, &addrq); 1482 pfr_destroy_ktable(shadow, 0); 1483 pfr_destroy_ktables(&tableq, 0); 1484 pfr_destroy_kentries(&addrq); 1485 } 1486 if (nadd != NULL) 1487 *nadd = xadd; 1488 if (naddr != NULL) 1489 *naddr = xaddr; 1490 return (0); 1491 _bad: 1492 pfr_destroy_ktable(shadow, 0); 1493 pfr_destroy_ktables(&tableq, 0); 1494 pfr_destroy_kentries(&addrq); 1495 return (rv); 1496 } 1497 1498 int 1499 pfr_ina_rollback(struct pfr_table *trs, u_int32_t ticket, int *ndel, int flags) 1500 { 1501 struct pfr_ktableworkq workq; 1502 struct pfr_ktable *p; 1503 struct pf_ruleset *rs; 1504 int xdel = 0; 1505 1506 PF_RULES_WASSERT(); 1507 1508 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY); 1509 rs = pf_find_ruleset(trs->pfrt_anchor); 1510 if (rs == NULL || !rs->topen || ticket != rs->tticket) 1511 return (0); 1512 SLIST_INIT(&workq); 1513 RB_FOREACH(p, pfr_ktablehead, &V_pfr_ktables) { 1514 if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) || 1515 pfr_skip_table(trs, p, 0)) 1516 continue; 1517 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_INACTIVE; 1518 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq); 1519 xdel++; 1520 } 1521 if (!(flags & PFR_FLAG_DUMMY)) { 1522 pfr_setflags_ktables(&workq); 1523 rs->topen = 0; 1524 pf_remove_if_empty_ruleset(rs); 1525 } 1526 if (ndel != NULL) 1527 *ndel = xdel; 1528 return (0); 1529 } 1530 1531 int 1532 pfr_ina_commit(struct pfr_table *trs, u_int32_t ticket, int *nadd, 1533 int *nchange, int flags) 1534 { 1535 struct pfr_ktable *p, *q; 1536 struct pfr_ktableworkq workq; 1537 struct pf_ruleset *rs; 1538 int xadd = 0, xchange = 0; 1539 long tzero = time_second; 1540 1541 PF_RULES_WASSERT(); 1542 1543 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY); 1544 rs = pf_find_ruleset(trs->pfrt_anchor); 1545 if (rs == NULL || !rs->topen || ticket != rs->tticket) 1546 return (EBUSY); 1547 1548 SLIST_INIT(&workq); 1549 RB_FOREACH(p, pfr_ktablehead, &V_pfr_ktables) { 1550 if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) || 1551 pfr_skip_table(trs, p, 0)) 1552 continue; 1553 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq); 1554 if (p->pfrkt_flags & PFR_TFLAG_ACTIVE) 1555 xchange++; 1556 else 1557 xadd++; 1558 } 1559 1560 if (!(flags & PFR_FLAG_DUMMY)) { 1561 for (p = SLIST_FIRST(&workq); p != NULL; p = q) { 1562 q = SLIST_NEXT(p, pfrkt_workq); 1563 pfr_commit_ktable(p, tzero); 1564 } 1565 rs->topen = 0; 1566 pf_remove_if_empty_ruleset(rs); 1567 } 1568 if (nadd != NULL) 1569 *nadd = xadd; 1570 if (nchange != NULL) 1571 *nchange = xchange; 1572 1573 return (0); 1574 } 1575 1576 static void 1577 pfr_commit_ktable(struct pfr_ktable *kt, long tzero) 1578 { 1579 struct pfr_ktable *shadow = kt->pfrkt_shadow; 1580 int nflags; 1581 1582 PF_RULES_WASSERT(); 1583 1584 if (shadow->pfrkt_cnt == NO_ADDRESSES) { 1585 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 1586 pfr_clstats_ktable(kt, tzero, 1); 1587 } else if (kt->pfrkt_flags & PFR_TFLAG_ACTIVE) { 1588 /* kt might contain addresses */ 1589 struct pfr_kentryworkq addrq, addq, changeq, delq, garbageq; 1590 struct pfr_kentry *p, *q, *next; 1591 struct pfr_addr ad; 1592 1593 pfr_enqueue_addrs(shadow, &addrq, NULL, 0); 1594 pfr_mark_addrs(kt); 1595 SLIST_INIT(&addq); 1596 SLIST_INIT(&changeq); 1597 SLIST_INIT(&delq); 1598 SLIST_INIT(&garbageq); 1599 pfr_clean_node_mask(shadow, &addrq); 1600 for (p = SLIST_FIRST(&addrq); p != NULL; p = next) { 1601 next = SLIST_NEXT(p, pfrke_workq); /* XXX */ 1602 pfr_copyout_addr(&ad, p); 1603 q = pfr_lookup_addr(kt, &ad, 1); 1604 if (q != NULL) { 1605 if (q->pfrke_not != p->pfrke_not) 1606 SLIST_INSERT_HEAD(&changeq, q, 1607 pfrke_workq); 1608 q->pfrke_mark = 1; 1609 SLIST_INSERT_HEAD(&garbageq, p, pfrke_workq); 1610 } else { 1611 p->pfrke_tzero = tzero; 1612 SLIST_INSERT_HEAD(&addq, p, pfrke_workq); 1613 } 1614 } 1615 pfr_enqueue_addrs(kt, &delq, NULL, ENQUEUE_UNMARKED_ONLY); 1616 pfr_insert_kentries(kt, &addq, tzero); 1617 pfr_remove_kentries(kt, &delq); 1618 pfr_clstats_kentries(&changeq, tzero, INVERT_NEG_FLAG); 1619 pfr_destroy_kentries(&garbageq); 1620 } else { 1621 /* kt cannot contain addresses */ 1622 SWAP(struct radix_node_head *, kt->pfrkt_ip4, 1623 shadow->pfrkt_ip4); 1624 SWAP(struct radix_node_head *, kt->pfrkt_ip6, 1625 shadow->pfrkt_ip6); 1626 SWAP(int, kt->pfrkt_cnt, shadow->pfrkt_cnt); 1627 pfr_clstats_ktable(kt, tzero, 1); 1628 } 1629 nflags = ((shadow->pfrkt_flags & PFR_TFLAG_USRMASK) | 1630 (kt->pfrkt_flags & PFR_TFLAG_SETMASK) | PFR_TFLAG_ACTIVE) 1631 & ~PFR_TFLAG_INACTIVE; 1632 pfr_destroy_ktable(shadow, 0); 1633 kt->pfrkt_shadow = NULL; 1634 pfr_setflags_ktable(kt, nflags); 1635 } 1636 1637 static int 1638 pfr_validate_table(struct pfr_table *tbl, int allowedflags, int no_reserved) 1639 { 1640 int i; 1641 1642 if (!tbl->pfrt_name[0]) 1643 return (-1); 1644 if (no_reserved && !strcmp(tbl->pfrt_anchor, PF_RESERVED_ANCHOR)) 1645 return (-1); 1646 if (tbl->pfrt_name[PF_TABLE_NAME_SIZE-1]) 1647 return (-1); 1648 for (i = strlen(tbl->pfrt_name); i < PF_TABLE_NAME_SIZE; i++) 1649 if (tbl->pfrt_name[i]) 1650 return (-1); 1651 if (pfr_fix_anchor(tbl->pfrt_anchor)) 1652 return (-1); 1653 if (tbl->pfrt_flags & ~allowedflags) 1654 return (-1); 1655 return (0); 1656 } 1657 1658 /* 1659 * Rewrite anchors referenced by tables to remove slashes 1660 * and check for validity. 1661 */ 1662 static int 1663 pfr_fix_anchor(char *anchor) 1664 { 1665 size_t siz = MAXPATHLEN; 1666 int i; 1667 1668 if (anchor[0] == '/') { 1669 char *path; 1670 int off; 1671 1672 path = anchor; 1673 off = 1; 1674 while (*++path == '/') 1675 off++; 1676 bcopy(path, anchor, siz - off); 1677 memset(anchor + siz - off, 0, off); 1678 } 1679 if (anchor[siz - 1]) 1680 return (-1); 1681 for (i = strlen(anchor); i < siz; i++) 1682 if (anchor[i]) 1683 return (-1); 1684 return (0); 1685 } 1686 1687 static int 1688 pfr_table_count(struct pfr_table *filter, int flags) 1689 { 1690 struct pf_ruleset *rs; 1691 1692 PF_RULES_ASSERT(); 1693 1694 if (flags & PFR_FLAG_ALLRSETS) 1695 return (V_pfr_ktable_cnt); 1696 if (filter->pfrt_anchor[0]) { 1697 rs = pf_find_ruleset(filter->pfrt_anchor); 1698 return ((rs != NULL) ? rs->tables : -1); 1699 } 1700 return (pf_main_ruleset.tables); 1701 } 1702 1703 static int 1704 pfr_skip_table(struct pfr_table *filter, struct pfr_ktable *kt, int flags) 1705 { 1706 if (flags & PFR_FLAG_ALLRSETS) 1707 return (0); 1708 if (strcmp(filter->pfrt_anchor, kt->pfrkt_anchor)) 1709 return (1); 1710 return (0); 1711 } 1712 1713 static void 1714 pfr_insert_ktables(struct pfr_ktableworkq *workq) 1715 { 1716 struct pfr_ktable *p; 1717 1718 SLIST_FOREACH(p, workq, pfrkt_workq) 1719 pfr_insert_ktable(p); 1720 } 1721 1722 static void 1723 pfr_insert_ktable(struct pfr_ktable *kt) 1724 { 1725 1726 PF_RULES_WASSERT(); 1727 1728 RB_INSERT(pfr_ktablehead, &V_pfr_ktables, kt); 1729 V_pfr_ktable_cnt++; 1730 if (kt->pfrkt_root != NULL) 1731 if (!kt->pfrkt_root->pfrkt_refcnt[PFR_REFCNT_ANCHOR]++) 1732 pfr_setflags_ktable(kt->pfrkt_root, 1733 kt->pfrkt_root->pfrkt_flags|PFR_TFLAG_REFDANCHOR); 1734 } 1735 1736 static void 1737 pfr_setflags_ktables(struct pfr_ktableworkq *workq) 1738 { 1739 struct pfr_ktable *p, *q; 1740 1741 for (p = SLIST_FIRST(workq); p; p = q) { 1742 q = SLIST_NEXT(p, pfrkt_workq); 1743 pfr_setflags_ktable(p, p->pfrkt_nflags); 1744 } 1745 } 1746 1747 static void 1748 pfr_setflags_ktable(struct pfr_ktable *kt, int newf) 1749 { 1750 struct pfr_kentryworkq addrq; 1751 1752 PF_RULES_WASSERT(); 1753 1754 if (!(newf & PFR_TFLAG_REFERENCED) && 1755 !(newf & PFR_TFLAG_PERSIST)) 1756 newf &= ~PFR_TFLAG_ACTIVE; 1757 if (!(newf & PFR_TFLAG_ACTIVE)) 1758 newf &= ~PFR_TFLAG_USRMASK; 1759 if (!(newf & PFR_TFLAG_SETMASK)) { 1760 RB_REMOVE(pfr_ktablehead, &V_pfr_ktables, kt); 1761 if (kt->pfrkt_root != NULL) 1762 if (!--kt->pfrkt_root->pfrkt_refcnt[PFR_REFCNT_ANCHOR]) 1763 pfr_setflags_ktable(kt->pfrkt_root, 1764 kt->pfrkt_root->pfrkt_flags & 1765 ~PFR_TFLAG_REFDANCHOR); 1766 pfr_destroy_ktable(kt, 1); 1767 V_pfr_ktable_cnt--; 1768 return; 1769 } 1770 if (!(newf & PFR_TFLAG_ACTIVE) && kt->pfrkt_cnt) { 1771 pfr_enqueue_addrs(kt, &addrq, NULL, 0); 1772 pfr_remove_kentries(kt, &addrq); 1773 } 1774 if (!(newf & PFR_TFLAG_INACTIVE) && kt->pfrkt_shadow != NULL) { 1775 pfr_destroy_ktable(kt->pfrkt_shadow, 1); 1776 kt->pfrkt_shadow = NULL; 1777 } 1778 kt->pfrkt_flags = newf; 1779 } 1780 1781 static void 1782 pfr_clstats_ktables(struct pfr_ktableworkq *workq, long tzero, int recurse) 1783 { 1784 struct pfr_ktable *p; 1785 1786 SLIST_FOREACH(p, workq, pfrkt_workq) 1787 pfr_clstats_ktable(p, tzero, recurse); 1788 } 1789 1790 static void 1791 pfr_clstats_ktable(struct pfr_ktable *kt, long tzero, int recurse) 1792 { 1793 struct pfr_kentryworkq addrq; 1794 1795 if (recurse) { 1796 pfr_enqueue_addrs(kt, &addrq, NULL, 0); 1797 pfr_clstats_kentries(&addrq, tzero, 0); 1798 } 1799 bzero(kt->pfrkt_packets, sizeof(kt->pfrkt_packets)); 1800 bzero(kt->pfrkt_bytes, sizeof(kt->pfrkt_bytes)); 1801 kt->pfrkt_match = kt->pfrkt_nomatch = 0; 1802 kt->pfrkt_tzero = tzero; 1803 } 1804 1805 static struct pfr_ktable * 1806 pfr_create_ktable(struct pfr_table *tbl, long tzero, int attachruleset) 1807 { 1808 struct pfr_ktable *kt; 1809 struct pf_ruleset *rs; 1810 1811 PF_RULES_WASSERT(); 1812 1813 kt = malloc(sizeof(*kt), M_PFTABLE, M_NOWAIT|M_ZERO); 1814 if (kt == NULL) 1815 return (NULL); 1816 kt->pfrkt_t = *tbl; 1817 1818 if (attachruleset) { 1819 rs = pf_find_or_create_ruleset(tbl->pfrt_anchor); 1820 if (!rs) { 1821 pfr_destroy_ktable(kt, 0); 1822 return (NULL); 1823 } 1824 kt->pfrkt_rs = rs; 1825 rs->tables++; 1826 } 1827 1828 if (!rn_inithead((void **)&kt->pfrkt_ip4, 1829 offsetof(struct sockaddr_in, sin_addr) * 8) || 1830 !rn_inithead((void **)&kt->pfrkt_ip6, 1831 offsetof(struct sockaddr_in6, sin6_addr) * 8)) { 1832 pfr_destroy_ktable(kt, 0); 1833 return (NULL); 1834 } 1835 kt->pfrkt_tzero = tzero; 1836 1837 return (kt); 1838 } 1839 1840 static void 1841 pfr_destroy_ktables(struct pfr_ktableworkq *workq, int flushaddr) 1842 { 1843 struct pfr_ktable *p, *q; 1844 1845 for (p = SLIST_FIRST(workq); p; p = q) { 1846 q = SLIST_NEXT(p, pfrkt_workq); 1847 pfr_destroy_ktable(p, flushaddr); 1848 } 1849 } 1850 1851 static void 1852 pfr_destroy_ktable(struct pfr_ktable *kt, int flushaddr) 1853 { 1854 struct pfr_kentryworkq addrq; 1855 1856 if (flushaddr) { 1857 pfr_enqueue_addrs(kt, &addrq, NULL, 0); 1858 pfr_clean_node_mask(kt, &addrq); 1859 pfr_destroy_kentries(&addrq); 1860 } 1861 if (kt->pfrkt_ip4 != NULL) 1862 rn_detachhead((void **)&kt->pfrkt_ip4); 1863 if (kt->pfrkt_ip6 != NULL) 1864 rn_detachhead((void **)&kt->pfrkt_ip6); 1865 if (kt->pfrkt_shadow != NULL) 1866 pfr_destroy_ktable(kt->pfrkt_shadow, flushaddr); 1867 if (kt->pfrkt_rs != NULL) { 1868 kt->pfrkt_rs->tables--; 1869 pf_remove_if_empty_ruleset(kt->pfrkt_rs); 1870 } 1871 free(kt, M_PFTABLE); 1872 } 1873 1874 static int 1875 pfr_ktable_compare(struct pfr_ktable *p, struct pfr_ktable *q) 1876 { 1877 int d; 1878 1879 if ((d = strncmp(p->pfrkt_name, q->pfrkt_name, PF_TABLE_NAME_SIZE))) 1880 return (d); 1881 return (strcmp(p->pfrkt_anchor, q->pfrkt_anchor)); 1882 } 1883 1884 static struct pfr_ktable * 1885 pfr_lookup_table(struct pfr_table *tbl) 1886 { 1887 /* struct pfr_ktable start like a struct pfr_table */ 1888 return (RB_FIND(pfr_ktablehead, &V_pfr_ktables, 1889 (struct pfr_ktable *)tbl)); 1890 } 1891 1892 int 1893 pfr_match_addr(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af) 1894 { 1895 struct pfr_kentry *ke = NULL; 1896 int match; 1897 1898 PF_RULES_RASSERT(); 1899 1900 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL) 1901 kt = kt->pfrkt_root; 1902 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 1903 return (0); 1904 1905 switch (af) { 1906 #ifdef INET 1907 case AF_INET: 1908 { 1909 struct sockaddr_in sin; 1910 1911 bzero(&sin, sizeof(sin)); 1912 sin.sin_len = sizeof(sin); 1913 sin.sin_family = AF_INET; 1914 sin.sin_addr.s_addr = a->addr32[0]; 1915 ke = (struct pfr_kentry *)rn_match(&sin, &kt->pfrkt_ip4->rh); 1916 if (ke && KENTRY_RNF_ROOT(ke)) 1917 ke = NULL; 1918 break; 1919 } 1920 #endif /* INET */ 1921 #ifdef INET6 1922 case AF_INET6: 1923 { 1924 struct sockaddr_in6 sin6; 1925 1926 bzero(&sin6, sizeof(sin6)); 1927 sin6.sin6_len = sizeof(sin6); 1928 sin6.sin6_family = AF_INET6; 1929 bcopy(a, &sin6.sin6_addr, sizeof(sin6.sin6_addr)); 1930 ke = (struct pfr_kentry *)rn_match(&sin6, &kt->pfrkt_ip6->rh); 1931 if (ke && KENTRY_RNF_ROOT(ke)) 1932 ke = NULL; 1933 break; 1934 } 1935 #endif /* INET6 */ 1936 } 1937 match = (ke && !ke->pfrke_not); 1938 if (match) 1939 kt->pfrkt_match++; 1940 else 1941 kt->pfrkt_nomatch++; 1942 return (match); 1943 } 1944 1945 void 1946 pfr_update_stats(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af, 1947 u_int64_t len, int dir_out, int op_pass, int notrule) 1948 { 1949 struct pfr_kentry *ke = NULL; 1950 1951 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL) 1952 kt = kt->pfrkt_root; 1953 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 1954 return; 1955 1956 switch (af) { 1957 #ifdef INET 1958 case AF_INET: 1959 { 1960 struct sockaddr_in sin; 1961 1962 bzero(&sin, sizeof(sin)); 1963 sin.sin_len = sizeof(sin); 1964 sin.sin_family = AF_INET; 1965 sin.sin_addr.s_addr = a->addr32[0]; 1966 ke = (struct pfr_kentry *)rn_match(&sin, &kt->pfrkt_ip4->rh); 1967 if (ke && KENTRY_RNF_ROOT(ke)) 1968 ke = NULL; 1969 break; 1970 } 1971 #endif /* INET */ 1972 #ifdef INET6 1973 case AF_INET6: 1974 { 1975 struct sockaddr_in6 sin6; 1976 1977 bzero(&sin6, sizeof(sin6)); 1978 sin6.sin6_len = sizeof(sin6); 1979 sin6.sin6_family = AF_INET6; 1980 bcopy(a, &sin6.sin6_addr, sizeof(sin6.sin6_addr)); 1981 ke = (struct pfr_kentry *)rn_match(&sin6, &kt->pfrkt_ip6->rh); 1982 if (ke && KENTRY_RNF_ROOT(ke)) 1983 ke = NULL; 1984 break; 1985 } 1986 #endif /* INET6 */ 1987 default: 1988 panic("%s: unknown address family %u", __func__, af); 1989 } 1990 if ((ke == NULL || ke->pfrke_not) != notrule) { 1991 if (op_pass != PFR_OP_PASS) 1992 printf("pfr_update_stats: assertion failed.\n"); 1993 op_pass = PFR_OP_XPASS; 1994 } 1995 kt->pfrkt_packets[dir_out][op_pass]++; 1996 kt->pfrkt_bytes[dir_out][op_pass] += len; 1997 if (ke != NULL && op_pass != PFR_OP_XPASS && 1998 (kt->pfrkt_flags & PFR_TFLAG_COUNTERS)) { 1999 if (ke->pfrke_counters == NULL) 2000 ke->pfrke_counters = uma_zalloc(V_pfr_kcounters_z, 2001 M_NOWAIT | M_ZERO); 2002 if (ke->pfrke_counters != NULL) { 2003 ke->pfrke_counters->pfrkc_packets[dir_out][op_pass]++; 2004 ke->pfrke_counters->pfrkc_bytes[dir_out][op_pass] += len; 2005 } 2006 } 2007 } 2008 2009 struct pfr_ktable * 2010 pfr_attach_table(struct pf_ruleset *rs, char *name) 2011 { 2012 struct pfr_ktable *kt, *rt; 2013 struct pfr_table tbl; 2014 struct pf_anchor *ac = rs->anchor; 2015 2016 PF_RULES_WASSERT(); 2017 2018 bzero(&tbl, sizeof(tbl)); 2019 strlcpy(tbl.pfrt_name, name, sizeof(tbl.pfrt_name)); 2020 if (ac != NULL) 2021 strlcpy(tbl.pfrt_anchor, ac->path, sizeof(tbl.pfrt_anchor)); 2022 kt = pfr_lookup_table(&tbl); 2023 if (kt == NULL) { 2024 kt = pfr_create_ktable(&tbl, time_second, 1); 2025 if (kt == NULL) 2026 return (NULL); 2027 if (ac != NULL) { 2028 bzero(tbl.pfrt_anchor, sizeof(tbl.pfrt_anchor)); 2029 rt = pfr_lookup_table(&tbl); 2030 if (rt == NULL) { 2031 rt = pfr_create_ktable(&tbl, 0, 1); 2032 if (rt == NULL) { 2033 pfr_destroy_ktable(kt, 0); 2034 return (NULL); 2035 } 2036 pfr_insert_ktable(rt); 2037 } 2038 kt->pfrkt_root = rt; 2039 } 2040 pfr_insert_ktable(kt); 2041 } 2042 if (!kt->pfrkt_refcnt[PFR_REFCNT_RULE]++) 2043 pfr_setflags_ktable(kt, kt->pfrkt_flags|PFR_TFLAG_REFERENCED); 2044 return (kt); 2045 } 2046 2047 void 2048 pfr_detach_table(struct pfr_ktable *kt) 2049 { 2050 2051 PF_RULES_WASSERT(); 2052 KASSERT(kt->pfrkt_refcnt[PFR_REFCNT_RULE] > 0, ("%s: refcount %d\n", 2053 __func__, kt->pfrkt_refcnt[PFR_REFCNT_RULE])); 2054 2055 if (!--kt->pfrkt_refcnt[PFR_REFCNT_RULE]) 2056 pfr_setflags_ktable(kt, kt->pfrkt_flags&~PFR_TFLAG_REFERENCED); 2057 } 2058 2059 int 2060 pfr_pool_get(struct pfr_ktable *kt, int *pidx, struct pf_addr *counter, 2061 sa_family_t af) 2062 { 2063 struct pf_addr *addr, *cur, *mask; 2064 union sockaddr_union uaddr, umask; 2065 struct pfr_kentry *ke, *ke2 = NULL; 2066 int idx = -1, use_counter = 0; 2067 2068 switch (af) { 2069 case AF_INET: 2070 uaddr.sin.sin_len = sizeof(struct sockaddr_in); 2071 uaddr.sin.sin_family = AF_INET; 2072 break; 2073 case AF_INET6: 2074 uaddr.sin6.sin6_len = sizeof(struct sockaddr_in6); 2075 uaddr.sin6.sin6_family = AF_INET6; 2076 break; 2077 } 2078 addr = SUNION2PF(&uaddr, af); 2079 2080 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL) 2081 kt = kt->pfrkt_root; 2082 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 2083 return (-1); 2084 2085 if (pidx != NULL) 2086 idx = *pidx; 2087 if (counter != NULL && idx >= 0) 2088 use_counter = 1; 2089 if (idx < 0) 2090 idx = 0; 2091 2092 _next_block: 2093 ke = pfr_kentry_byidx(kt, idx, af); 2094 if (ke == NULL) { 2095 kt->pfrkt_nomatch++; 2096 return (1); 2097 } 2098 pfr_prepare_network(&umask, af, ke->pfrke_net); 2099 cur = SUNION2PF(&ke->pfrke_sa, af); 2100 mask = SUNION2PF(&umask, af); 2101 2102 if (use_counter) { 2103 /* is supplied address within block? */ 2104 if (!PF_MATCHA(0, cur, mask, counter, af)) { 2105 /* no, go to next block in table */ 2106 idx++; 2107 use_counter = 0; 2108 goto _next_block; 2109 } 2110 PF_ACPY(addr, counter, af); 2111 } else { 2112 /* use first address of block */ 2113 PF_ACPY(addr, cur, af); 2114 } 2115 2116 if (!KENTRY_NETWORK(ke)) { 2117 /* this is a single IP address - no possible nested block */ 2118 PF_ACPY(counter, addr, af); 2119 *pidx = idx; 2120 kt->pfrkt_match++; 2121 return (0); 2122 } 2123 for (;;) { 2124 /* we don't want to use a nested block */ 2125 switch (af) { 2126 case AF_INET: 2127 ke2 = (struct pfr_kentry *)rn_match(&uaddr, 2128 &kt->pfrkt_ip4->rh); 2129 break; 2130 case AF_INET6: 2131 ke2 = (struct pfr_kentry *)rn_match(&uaddr, 2132 &kt->pfrkt_ip6->rh); 2133 break; 2134 } 2135 /* no need to check KENTRY_RNF_ROOT() here */ 2136 if (ke2 == ke) { 2137 /* lookup return the same block - perfect */ 2138 PF_ACPY(counter, addr, af); 2139 *pidx = idx; 2140 kt->pfrkt_match++; 2141 return (0); 2142 } 2143 2144 /* we need to increase the counter past the nested block */ 2145 pfr_prepare_network(&umask, AF_INET, ke2->pfrke_net); 2146 PF_POOLMASK(addr, addr, SUNION2PF(&umask, af), &pfr_ffaddr, af); 2147 PF_AINC(addr, af); 2148 if (!PF_MATCHA(0, cur, mask, addr, af)) { 2149 /* ok, we reached the end of our main block */ 2150 /* go to next block in table */ 2151 idx++; 2152 use_counter = 0; 2153 goto _next_block; 2154 } 2155 } 2156 } 2157 2158 static struct pfr_kentry * 2159 pfr_kentry_byidx(struct pfr_ktable *kt, int idx, int af) 2160 { 2161 struct pfr_walktree w; 2162 2163 bzero(&w, sizeof(w)); 2164 w.pfrw_op = PFRW_POOL_GET; 2165 w.pfrw_cnt = idx; 2166 2167 switch (af) { 2168 #ifdef INET 2169 case AF_INET: 2170 kt->pfrkt_ip4->rnh_walktree(&kt->pfrkt_ip4->rh, pfr_walktree, &w); 2171 return (w.pfrw_kentry); 2172 #endif /* INET */ 2173 #ifdef INET6 2174 case AF_INET6: 2175 kt->pfrkt_ip6->rnh_walktree(&kt->pfrkt_ip6->rh, pfr_walktree, &w); 2176 return (w.pfrw_kentry); 2177 #endif /* INET6 */ 2178 default: 2179 return (NULL); 2180 } 2181 } 2182 2183 void 2184 pfr_dynaddr_update(struct pfr_ktable *kt, struct pfi_dynaddr *dyn) 2185 { 2186 struct pfr_walktree w; 2187 2188 bzero(&w, sizeof(w)); 2189 w.pfrw_op = PFRW_DYNADDR_UPDATE; 2190 w.pfrw_dyn = dyn; 2191 2192 dyn->pfid_acnt4 = 0; 2193 dyn->pfid_acnt6 = 0; 2194 if (!dyn->pfid_af || dyn->pfid_af == AF_INET) 2195 kt->pfrkt_ip4->rnh_walktree(&kt->pfrkt_ip4->rh, pfr_walktree, &w); 2196 if (!dyn->pfid_af || dyn->pfid_af == AF_INET6) 2197 kt->pfrkt_ip6->rnh_walktree(&kt->pfrkt_ip6->rh, pfr_walktree, &w); 2198 } 2199