1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2002 Cedric Berger 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * - Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * - Redistributions in binary form must reproduce the above 14 * copyright notice, this list of conditions and the following 15 * disclaimer in the documentation and/or other materials provided 16 * with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 21 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 22 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 24 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 25 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 26 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN 28 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 * 31 * $OpenBSD: pf_table.c,v 1.79 2008/10/08 06:24:50 mcbride Exp $ 32 */ 33 34 #include <sys/cdefs.h> 35 __FBSDID("$FreeBSD$"); 36 37 #include "opt_inet.h" 38 #include "opt_inet6.h" 39 40 #include <sys/param.h> 41 #include <sys/kernel.h> 42 #include <sys/lock.h> 43 #include <sys/malloc.h> 44 #include <sys/mbuf.h> 45 #include <sys/mutex.h> 46 #include <sys/refcount.h> 47 #include <sys/rwlock.h> 48 #include <sys/socket.h> 49 #include <vm/uma.h> 50 51 #include <net/if.h> 52 #include <net/vnet.h> 53 #include <net/pfvar.h> 54 55 #define ACCEPT_FLAGS(flags, oklist) \ 56 do { \ 57 if ((flags & ~(oklist)) & \ 58 PFR_FLAG_ALLMASK) \ 59 return (EINVAL); \ 60 } while (0) 61 62 #define FILLIN_SIN(sin, addr) \ 63 do { \ 64 (sin).sin_len = sizeof(sin); \ 65 (sin).sin_family = AF_INET; \ 66 (sin).sin_addr = (addr); \ 67 } while (0) 68 69 #define FILLIN_SIN6(sin6, addr) \ 70 do { \ 71 (sin6).sin6_len = sizeof(sin6); \ 72 (sin6).sin6_family = AF_INET6; \ 73 (sin6).sin6_addr = (addr); \ 74 } while (0) 75 76 #define SWAP(type, a1, a2) \ 77 do { \ 78 type tmp = a1; \ 79 a1 = a2; \ 80 a2 = tmp; \ 81 } while (0) 82 83 #define SUNION2PF(su, af) (((af)==AF_INET) ? \ 84 (struct pf_addr *)&(su)->sin.sin_addr : \ 85 (struct pf_addr *)&(su)->sin6.sin6_addr) 86 87 #define AF_BITS(af) (((af)==AF_INET)?32:128) 88 #define ADDR_NETWORK(ad) ((ad)->pfra_net < AF_BITS((ad)->pfra_af)) 89 #define KENTRY_NETWORK(ke) ((ke)->pfrke_net < AF_BITS((ke)->pfrke_af)) 90 #define KENTRY_RNF_ROOT(ke) \ 91 ((((struct radix_node *)(ke))->rn_flags & RNF_ROOT) != 0) 92 93 #define NO_ADDRESSES (-1) 94 #define ENQUEUE_UNMARKED_ONLY (1) 95 #define INVERT_NEG_FLAG (1) 96 97 struct pfr_walktree { 98 enum pfrw_op { 99 PFRW_MARK, 100 PFRW_SWEEP, 101 PFRW_ENQUEUE, 102 PFRW_GET_ADDRS, 103 PFRW_GET_ASTATS, 104 PFRW_POOL_GET, 105 PFRW_DYNADDR_UPDATE 106 } pfrw_op; 107 union { 108 struct pfr_addr *pfrw1_addr; 109 struct pfr_astats *pfrw1_astats; 110 struct pfr_kentryworkq *pfrw1_workq; 111 struct pfr_kentry *pfrw1_kentry; 112 struct pfi_dynaddr *pfrw1_dyn; 113 } pfrw_1; 114 int pfrw_free; 115 }; 116 #define pfrw_addr pfrw_1.pfrw1_addr 117 #define pfrw_astats pfrw_1.pfrw1_astats 118 #define pfrw_workq pfrw_1.pfrw1_workq 119 #define pfrw_kentry pfrw_1.pfrw1_kentry 120 #define pfrw_dyn pfrw_1.pfrw1_dyn 121 #define pfrw_cnt pfrw_free 122 123 #define senderr(e) do { rv = (e); goto _bad; } while (0) 124 125 static MALLOC_DEFINE(M_PFTABLE, "pf_table", "pf(4) tables structures"); 126 static VNET_DEFINE(uma_zone_t, pfr_kentry_z); 127 #define V_pfr_kentry_z VNET(pfr_kentry_z) 128 static VNET_DEFINE(uma_zone_t, pfr_kcounters_z); 129 #define V_pfr_kcounters_z VNET(pfr_kcounters_z) 130 131 static struct pf_addr pfr_ffaddr = { 132 .addr32 = { 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff } 133 }; 134 135 static void pfr_copyout_addr(struct pfr_addr *, 136 struct pfr_kentry *ke); 137 static int pfr_validate_addr(struct pfr_addr *); 138 static void pfr_enqueue_addrs(struct pfr_ktable *, 139 struct pfr_kentryworkq *, int *, int); 140 static void pfr_mark_addrs(struct pfr_ktable *); 141 static struct pfr_kentry 142 *pfr_lookup_addr(struct pfr_ktable *, 143 struct pfr_addr *, int); 144 static struct pfr_kentry *pfr_create_kentry(struct pfr_addr *); 145 static void pfr_destroy_kentries(struct pfr_kentryworkq *); 146 static void pfr_destroy_kentry(struct pfr_kentry *); 147 static void pfr_insert_kentries(struct pfr_ktable *, 148 struct pfr_kentryworkq *, long); 149 static void pfr_remove_kentries(struct pfr_ktable *, 150 struct pfr_kentryworkq *); 151 static void pfr_clstats_kentries(struct pfr_kentryworkq *, long, 152 int); 153 static void pfr_reset_feedback(struct pfr_addr *, int); 154 static void pfr_prepare_network(union sockaddr_union *, int, int); 155 static int pfr_route_kentry(struct pfr_ktable *, 156 struct pfr_kentry *); 157 static int pfr_unroute_kentry(struct pfr_ktable *, 158 struct pfr_kentry *); 159 static int pfr_walktree(struct radix_node *, void *); 160 static int pfr_validate_table(struct pfr_table *, int, int); 161 static int pfr_fix_anchor(char *); 162 static void pfr_commit_ktable(struct pfr_ktable *, long); 163 static void pfr_insert_ktables(struct pfr_ktableworkq *); 164 static void pfr_insert_ktable(struct pfr_ktable *); 165 static void pfr_setflags_ktables(struct pfr_ktableworkq *); 166 static void pfr_setflags_ktable(struct pfr_ktable *, int); 167 static void pfr_clstats_ktables(struct pfr_ktableworkq *, long, 168 int); 169 static void pfr_clstats_ktable(struct pfr_ktable *, long, int); 170 static struct pfr_ktable 171 *pfr_create_ktable(struct pfr_table *, long, int); 172 static void pfr_destroy_ktables(struct pfr_ktableworkq *, int); 173 static void pfr_destroy_ktable(struct pfr_ktable *, int); 174 static int pfr_ktable_compare(struct pfr_ktable *, 175 struct pfr_ktable *); 176 static struct pfr_ktable 177 *pfr_lookup_table(struct pfr_table *); 178 static void pfr_clean_node_mask(struct pfr_ktable *, 179 struct pfr_kentryworkq *); 180 static int pfr_skip_table(struct pfr_table *, 181 struct pfr_ktable *, int); 182 static struct pfr_kentry 183 *pfr_kentry_byidx(struct pfr_ktable *, int, int); 184 185 static RB_PROTOTYPE(pfr_ktablehead, pfr_ktable, pfrkt_tree, pfr_ktable_compare); 186 static RB_GENERATE(pfr_ktablehead, pfr_ktable, pfrkt_tree, pfr_ktable_compare); 187 188 static VNET_DEFINE(struct pfr_ktablehead, pfr_ktables); 189 #define V_pfr_ktables VNET(pfr_ktables) 190 191 static VNET_DEFINE(struct pfr_table, pfr_nulltable); 192 #define V_pfr_nulltable VNET(pfr_nulltable) 193 194 static VNET_DEFINE(int, pfr_ktable_cnt); 195 #define V_pfr_ktable_cnt VNET(pfr_ktable_cnt) 196 197 void 198 pfr_initialize(void) 199 { 200 201 V_pfr_kentry_z = uma_zcreate("pf table entries", 202 sizeof(struct pfr_kentry), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 203 0); 204 V_pfr_kcounters_z = uma_zcreate("pf table counters", 205 sizeof(struct pfr_kcounters), NULL, NULL, NULL, NULL, 206 UMA_ALIGN_PTR, 0); 207 V_pf_limits[PF_LIMIT_TABLE_ENTRIES].zone = V_pfr_kentry_z; 208 V_pf_limits[PF_LIMIT_TABLE_ENTRIES].limit = PFR_KENTRY_HIWAT; 209 } 210 211 void 212 pfr_cleanup(void) 213 { 214 215 uma_zdestroy(V_pfr_kentry_z); 216 uma_zdestroy(V_pfr_kcounters_z); 217 } 218 219 int 220 pfr_clr_addrs(struct pfr_table *tbl, int *ndel, int flags) 221 { 222 struct pfr_ktable *kt; 223 struct pfr_kentryworkq workq; 224 225 PF_RULES_WASSERT(); 226 227 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY); 228 if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL)) 229 return (EINVAL); 230 kt = pfr_lookup_table(tbl); 231 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 232 return (ESRCH); 233 if (kt->pfrkt_flags & PFR_TFLAG_CONST) 234 return (EPERM); 235 pfr_enqueue_addrs(kt, &workq, ndel, 0); 236 237 if (!(flags & PFR_FLAG_DUMMY)) { 238 pfr_remove_kentries(kt, &workq); 239 KASSERT(kt->pfrkt_cnt == 0, ("%s: non-null pfrkt_cnt", __func__)); 240 } 241 return (0); 242 } 243 244 int 245 pfr_add_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size, 246 int *nadd, int flags) 247 { 248 struct pfr_ktable *kt, *tmpkt; 249 struct pfr_kentryworkq workq; 250 struct pfr_kentry *p, *q; 251 struct pfr_addr *ad; 252 int i, rv, xadd = 0; 253 long tzero = time_second; 254 255 PF_RULES_WASSERT(); 256 257 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_FEEDBACK); 258 if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL)) 259 return (EINVAL); 260 kt = pfr_lookup_table(tbl); 261 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 262 return (ESRCH); 263 if (kt->pfrkt_flags & PFR_TFLAG_CONST) 264 return (EPERM); 265 tmpkt = pfr_create_ktable(&V_pfr_nulltable, 0, 0); 266 if (tmpkt == NULL) 267 return (ENOMEM); 268 SLIST_INIT(&workq); 269 for (i = 0, ad = addr; i < size; i++, ad++) { 270 if (pfr_validate_addr(ad)) 271 senderr(EINVAL); 272 p = pfr_lookup_addr(kt, ad, 1); 273 q = pfr_lookup_addr(tmpkt, ad, 1); 274 if (flags & PFR_FLAG_FEEDBACK) { 275 if (q != NULL) 276 ad->pfra_fback = PFR_FB_DUPLICATE; 277 else if (p == NULL) 278 ad->pfra_fback = PFR_FB_ADDED; 279 else if (p->pfrke_not != ad->pfra_not) 280 ad->pfra_fback = PFR_FB_CONFLICT; 281 else 282 ad->pfra_fback = PFR_FB_NONE; 283 } 284 if (p == NULL && q == NULL) { 285 p = pfr_create_kentry(ad); 286 if (p == NULL) 287 senderr(ENOMEM); 288 if (pfr_route_kentry(tmpkt, p)) { 289 pfr_destroy_kentry(p); 290 ad->pfra_fback = PFR_FB_NONE; 291 } else { 292 SLIST_INSERT_HEAD(&workq, p, pfrke_workq); 293 xadd++; 294 } 295 } 296 } 297 pfr_clean_node_mask(tmpkt, &workq); 298 if (!(flags & PFR_FLAG_DUMMY)) 299 pfr_insert_kentries(kt, &workq, tzero); 300 else 301 pfr_destroy_kentries(&workq); 302 if (nadd != NULL) 303 *nadd = xadd; 304 pfr_destroy_ktable(tmpkt, 0); 305 return (0); 306 _bad: 307 pfr_clean_node_mask(tmpkt, &workq); 308 pfr_destroy_kentries(&workq); 309 if (flags & PFR_FLAG_FEEDBACK) 310 pfr_reset_feedback(addr, size); 311 pfr_destroy_ktable(tmpkt, 0); 312 return (rv); 313 } 314 315 int 316 pfr_del_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size, 317 int *ndel, int flags) 318 { 319 struct pfr_ktable *kt; 320 struct pfr_kentryworkq workq; 321 struct pfr_kentry *p; 322 struct pfr_addr *ad; 323 int i, rv, xdel = 0, log = 1; 324 325 PF_RULES_WASSERT(); 326 327 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_FEEDBACK); 328 if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL)) 329 return (EINVAL); 330 kt = pfr_lookup_table(tbl); 331 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 332 return (ESRCH); 333 if (kt->pfrkt_flags & PFR_TFLAG_CONST) 334 return (EPERM); 335 /* 336 * there are two algorithms to choose from here. 337 * with: 338 * n: number of addresses to delete 339 * N: number of addresses in the table 340 * 341 * one is O(N) and is better for large 'n' 342 * one is O(n*LOG(N)) and is better for small 'n' 343 * 344 * following code try to decide which one is best. 345 */ 346 for (i = kt->pfrkt_cnt; i > 0; i >>= 1) 347 log++; 348 if (size > kt->pfrkt_cnt/log) { 349 /* full table scan */ 350 pfr_mark_addrs(kt); 351 } else { 352 /* iterate over addresses to delete */ 353 for (i = 0, ad = addr; i < size; i++, ad++) { 354 if (pfr_validate_addr(ad)) 355 return (EINVAL); 356 p = pfr_lookup_addr(kt, ad, 1); 357 if (p != NULL) 358 p->pfrke_mark = 0; 359 } 360 } 361 SLIST_INIT(&workq); 362 for (i = 0, ad = addr; i < size; i++, ad++) { 363 if (pfr_validate_addr(ad)) 364 senderr(EINVAL); 365 p = pfr_lookup_addr(kt, ad, 1); 366 if (flags & PFR_FLAG_FEEDBACK) { 367 if (p == NULL) 368 ad->pfra_fback = PFR_FB_NONE; 369 else if (p->pfrke_not != ad->pfra_not) 370 ad->pfra_fback = PFR_FB_CONFLICT; 371 else if (p->pfrke_mark) 372 ad->pfra_fback = PFR_FB_DUPLICATE; 373 else 374 ad->pfra_fback = PFR_FB_DELETED; 375 } 376 if (p != NULL && p->pfrke_not == ad->pfra_not && 377 !p->pfrke_mark) { 378 p->pfrke_mark = 1; 379 SLIST_INSERT_HEAD(&workq, p, pfrke_workq); 380 xdel++; 381 } 382 } 383 if (!(flags & PFR_FLAG_DUMMY)) 384 pfr_remove_kentries(kt, &workq); 385 if (ndel != NULL) 386 *ndel = xdel; 387 return (0); 388 _bad: 389 if (flags & PFR_FLAG_FEEDBACK) 390 pfr_reset_feedback(addr, size); 391 return (rv); 392 } 393 394 int 395 pfr_set_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size, 396 int *size2, int *nadd, int *ndel, int *nchange, int flags, 397 u_int32_t ignore_pfrt_flags) 398 { 399 struct pfr_ktable *kt, *tmpkt; 400 struct pfr_kentryworkq addq, delq, changeq; 401 struct pfr_kentry *p, *q; 402 struct pfr_addr ad; 403 int i, rv, xadd = 0, xdel = 0, xchange = 0; 404 long tzero = time_second; 405 406 PF_RULES_WASSERT(); 407 408 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_FEEDBACK); 409 if (pfr_validate_table(tbl, ignore_pfrt_flags, flags & 410 PFR_FLAG_USERIOCTL)) 411 return (EINVAL); 412 kt = pfr_lookup_table(tbl); 413 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 414 return (ESRCH); 415 if (kt->pfrkt_flags & PFR_TFLAG_CONST) 416 return (EPERM); 417 tmpkt = pfr_create_ktable(&V_pfr_nulltable, 0, 0); 418 if (tmpkt == NULL) 419 return (ENOMEM); 420 pfr_mark_addrs(kt); 421 SLIST_INIT(&addq); 422 SLIST_INIT(&delq); 423 SLIST_INIT(&changeq); 424 for (i = 0; i < size; i++) { 425 /* 426 * XXXGL: undertand pf_if usage of this function 427 * and make ad a moving pointer 428 */ 429 bcopy(addr + i, &ad, sizeof(ad)); 430 if (pfr_validate_addr(&ad)) 431 senderr(EINVAL); 432 ad.pfra_fback = PFR_FB_NONE; 433 p = pfr_lookup_addr(kt, &ad, 1); 434 if (p != NULL) { 435 if (p->pfrke_mark) { 436 ad.pfra_fback = PFR_FB_DUPLICATE; 437 goto _skip; 438 } 439 p->pfrke_mark = 1; 440 if (p->pfrke_not != ad.pfra_not) { 441 SLIST_INSERT_HEAD(&changeq, p, pfrke_workq); 442 ad.pfra_fback = PFR_FB_CHANGED; 443 xchange++; 444 } 445 } else { 446 q = pfr_lookup_addr(tmpkt, &ad, 1); 447 if (q != NULL) { 448 ad.pfra_fback = PFR_FB_DUPLICATE; 449 goto _skip; 450 } 451 p = pfr_create_kentry(&ad); 452 if (p == NULL) 453 senderr(ENOMEM); 454 if (pfr_route_kentry(tmpkt, p)) { 455 pfr_destroy_kentry(p); 456 ad.pfra_fback = PFR_FB_NONE; 457 } else { 458 SLIST_INSERT_HEAD(&addq, p, pfrke_workq); 459 ad.pfra_fback = PFR_FB_ADDED; 460 xadd++; 461 } 462 } 463 _skip: 464 if (flags & PFR_FLAG_FEEDBACK) 465 bcopy(&ad, addr + i, sizeof(ad)); 466 } 467 pfr_enqueue_addrs(kt, &delq, &xdel, ENQUEUE_UNMARKED_ONLY); 468 if ((flags & PFR_FLAG_FEEDBACK) && *size2) { 469 if (*size2 < size+xdel) { 470 *size2 = size+xdel; 471 senderr(0); 472 } 473 i = 0; 474 SLIST_FOREACH(p, &delq, pfrke_workq) { 475 pfr_copyout_addr(&ad, p); 476 ad.pfra_fback = PFR_FB_DELETED; 477 bcopy(&ad, addr + size + i, sizeof(ad)); 478 i++; 479 } 480 } 481 pfr_clean_node_mask(tmpkt, &addq); 482 if (!(flags & PFR_FLAG_DUMMY)) { 483 pfr_insert_kentries(kt, &addq, tzero); 484 pfr_remove_kentries(kt, &delq); 485 pfr_clstats_kentries(&changeq, tzero, INVERT_NEG_FLAG); 486 } else 487 pfr_destroy_kentries(&addq); 488 if (nadd != NULL) 489 *nadd = xadd; 490 if (ndel != NULL) 491 *ndel = xdel; 492 if (nchange != NULL) 493 *nchange = xchange; 494 if ((flags & PFR_FLAG_FEEDBACK) && size2) 495 *size2 = size+xdel; 496 pfr_destroy_ktable(tmpkt, 0); 497 return (0); 498 _bad: 499 pfr_clean_node_mask(tmpkt, &addq); 500 pfr_destroy_kentries(&addq); 501 if (flags & PFR_FLAG_FEEDBACK) 502 pfr_reset_feedback(addr, size); 503 pfr_destroy_ktable(tmpkt, 0); 504 return (rv); 505 } 506 507 int 508 pfr_tst_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size, 509 int *nmatch, int flags) 510 { 511 struct pfr_ktable *kt; 512 struct pfr_kentry *p; 513 struct pfr_addr *ad; 514 int i, xmatch = 0; 515 516 PF_RULES_RASSERT(); 517 518 ACCEPT_FLAGS(flags, PFR_FLAG_REPLACE); 519 if (pfr_validate_table(tbl, 0, 0)) 520 return (EINVAL); 521 kt = pfr_lookup_table(tbl); 522 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 523 return (ESRCH); 524 525 for (i = 0, ad = addr; i < size; i++, ad++) { 526 if (pfr_validate_addr(ad)) 527 return (EINVAL); 528 if (ADDR_NETWORK(ad)) 529 return (EINVAL); 530 p = pfr_lookup_addr(kt, ad, 0); 531 if (flags & PFR_FLAG_REPLACE) 532 pfr_copyout_addr(ad, p); 533 ad->pfra_fback = (p == NULL) ? PFR_FB_NONE : 534 (p->pfrke_not ? PFR_FB_NOTMATCH : PFR_FB_MATCH); 535 if (p != NULL && !p->pfrke_not) 536 xmatch++; 537 } 538 if (nmatch != NULL) 539 *nmatch = xmatch; 540 return (0); 541 } 542 543 int 544 pfr_get_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int *size, 545 int flags) 546 { 547 struct pfr_ktable *kt; 548 struct pfr_walktree w; 549 int rv; 550 551 PF_RULES_RASSERT(); 552 553 ACCEPT_FLAGS(flags, 0); 554 if (pfr_validate_table(tbl, 0, 0)) 555 return (EINVAL); 556 kt = pfr_lookup_table(tbl); 557 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 558 return (ESRCH); 559 if (kt->pfrkt_cnt > *size) { 560 *size = kt->pfrkt_cnt; 561 return (0); 562 } 563 564 bzero(&w, sizeof(w)); 565 w.pfrw_op = PFRW_GET_ADDRS; 566 w.pfrw_addr = addr; 567 w.pfrw_free = kt->pfrkt_cnt; 568 rv = kt->pfrkt_ip4->rnh_walktree(&kt->pfrkt_ip4->rh, pfr_walktree, &w); 569 if (!rv) 570 rv = kt->pfrkt_ip6->rnh_walktree(&kt->pfrkt_ip6->rh, 571 pfr_walktree, &w); 572 if (rv) 573 return (rv); 574 575 KASSERT(w.pfrw_free == 0, ("%s: corruption detected (%d)", __func__, 576 w.pfrw_free)); 577 578 *size = kt->pfrkt_cnt; 579 return (0); 580 } 581 582 int 583 pfr_get_astats(struct pfr_table *tbl, struct pfr_astats *addr, int *size, 584 int flags) 585 { 586 struct pfr_ktable *kt; 587 struct pfr_walktree w; 588 struct pfr_kentryworkq workq; 589 int rv; 590 long tzero = time_second; 591 592 PF_RULES_RASSERT(); 593 594 /* XXX PFR_FLAG_CLSTATS disabled */ 595 ACCEPT_FLAGS(flags, 0); 596 if (pfr_validate_table(tbl, 0, 0)) 597 return (EINVAL); 598 kt = pfr_lookup_table(tbl); 599 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 600 return (ESRCH); 601 if (kt->pfrkt_cnt > *size) { 602 *size = kt->pfrkt_cnt; 603 return (0); 604 } 605 606 bzero(&w, sizeof(w)); 607 w.pfrw_op = PFRW_GET_ASTATS; 608 w.pfrw_astats = addr; 609 w.pfrw_free = kt->pfrkt_cnt; 610 rv = kt->pfrkt_ip4->rnh_walktree(&kt->pfrkt_ip4->rh, pfr_walktree, &w); 611 if (!rv) 612 rv = kt->pfrkt_ip6->rnh_walktree(&kt->pfrkt_ip6->rh, 613 pfr_walktree, &w); 614 if (!rv && (flags & PFR_FLAG_CLSTATS)) { 615 pfr_enqueue_addrs(kt, &workq, NULL, 0); 616 pfr_clstats_kentries(&workq, tzero, 0); 617 } 618 if (rv) 619 return (rv); 620 621 if (w.pfrw_free) { 622 printf("pfr_get_astats: corruption detected (%d).\n", 623 w.pfrw_free); 624 return (ENOTTY); 625 } 626 *size = kt->pfrkt_cnt; 627 return (0); 628 } 629 630 int 631 pfr_clr_astats(struct pfr_table *tbl, struct pfr_addr *addr, int size, 632 int *nzero, int flags) 633 { 634 struct pfr_ktable *kt; 635 struct pfr_kentryworkq workq; 636 struct pfr_kentry *p; 637 struct pfr_addr *ad; 638 int i, rv, xzero = 0; 639 640 PF_RULES_WASSERT(); 641 642 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_FEEDBACK); 643 if (pfr_validate_table(tbl, 0, 0)) 644 return (EINVAL); 645 kt = pfr_lookup_table(tbl); 646 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 647 return (ESRCH); 648 SLIST_INIT(&workq); 649 for (i = 0, ad = addr; i < size; i++, ad++) { 650 if (pfr_validate_addr(ad)) 651 senderr(EINVAL); 652 p = pfr_lookup_addr(kt, ad, 1); 653 if (flags & PFR_FLAG_FEEDBACK) { 654 ad->pfra_fback = (p != NULL) ? 655 PFR_FB_CLEARED : PFR_FB_NONE; 656 } 657 if (p != NULL) { 658 SLIST_INSERT_HEAD(&workq, p, pfrke_workq); 659 xzero++; 660 } 661 } 662 663 if (!(flags & PFR_FLAG_DUMMY)) 664 pfr_clstats_kentries(&workq, 0, 0); 665 if (nzero != NULL) 666 *nzero = xzero; 667 return (0); 668 _bad: 669 if (flags & PFR_FLAG_FEEDBACK) 670 pfr_reset_feedback(addr, size); 671 return (rv); 672 } 673 674 static int 675 pfr_validate_addr(struct pfr_addr *ad) 676 { 677 int i; 678 679 switch (ad->pfra_af) { 680 #ifdef INET 681 case AF_INET: 682 if (ad->pfra_net > 32) 683 return (-1); 684 break; 685 #endif /* INET */ 686 #ifdef INET6 687 case AF_INET6: 688 if (ad->pfra_net > 128) 689 return (-1); 690 break; 691 #endif /* INET6 */ 692 default: 693 return (-1); 694 } 695 if (ad->pfra_net < 128 && 696 (((caddr_t)ad)[ad->pfra_net/8] & (0xFF >> (ad->pfra_net%8)))) 697 return (-1); 698 for (i = (ad->pfra_net+7)/8; i < sizeof(ad->pfra_u); i++) 699 if (((caddr_t)ad)[i]) 700 return (-1); 701 if (ad->pfra_not && ad->pfra_not != 1) 702 return (-1); 703 if (ad->pfra_fback) 704 return (-1); 705 return (0); 706 } 707 708 static void 709 pfr_enqueue_addrs(struct pfr_ktable *kt, struct pfr_kentryworkq *workq, 710 int *naddr, int sweep) 711 { 712 struct pfr_walktree w; 713 714 SLIST_INIT(workq); 715 bzero(&w, sizeof(w)); 716 w.pfrw_op = sweep ? PFRW_SWEEP : PFRW_ENQUEUE; 717 w.pfrw_workq = workq; 718 if (kt->pfrkt_ip4 != NULL) 719 if (kt->pfrkt_ip4->rnh_walktree(&kt->pfrkt_ip4->rh, 720 pfr_walktree, &w)) 721 printf("pfr_enqueue_addrs: IPv4 walktree failed.\n"); 722 if (kt->pfrkt_ip6 != NULL) 723 if (kt->pfrkt_ip6->rnh_walktree(&kt->pfrkt_ip6->rh, 724 pfr_walktree, &w)) 725 printf("pfr_enqueue_addrs: IPv6 walktree failed.\n"); 726 if (naddr != NULL) 727 *naddr = w.pfrw_cnt; 728 } 729 730 static void 731 pfr_mark_addrs(struct pfr_ktable *kt) 732 { 733 struct pfr_walktree w; 734 735 bzero(&w, sizeof(w)); 736 w.pfrw_op = PFRW_MARK; 737 if (kt->pfrkt_ip4->rnh_walktree(&kt->pfrkt_ip4->rh, pfr_walktree, &w)) 738 printf("pfr_mark_addrs: IPv4 walktree failed.\n"); 739 if (kt->pfrkt_ip6->rnh_walktree(&kt->pfrkt_ip6->rh, pfr_walktree, &w)) 740 printf("pfr_mark_addrs: IPv6 walktree failed.\n"); 741 } 742 743 744 static struct pfr_kentry * 745 pfr_lookup_addr(struct pfr_ktable *kt, struct pfr_addr *ad, int exact) 746 { 747 union sockaddr_union sa, mask; 748 struct radix_head *head = NULL; 749 struct pfr_kentry *ke; 750 751 PF_RULES_ASSERT(); 752 753 bzero(&sa, sizeof(sa)); 754 if (ad->pfra_af == AF_INET) { 755 FILLIN_SIN(sa.sin, ad->pfra_ip4addr); 756 head = &kt->pfrkt_ip4->rh; 757 } else if ( ad->pfra_af == AF_INET6 ) { 758 FILLIN_SIN6(sa.sin6, ad->pfra_ip6addr); 759 head = &kt->pfrkt_ip6->rh; 760 } 761 if (ADDR_NETWORK(ad)) { 762 pfr_prepare_network(&mask, ad->pfra_af, ad->pfra_net); 763 ke = (struct pfr_kentry *)rn_lookup(&sa, &mask, head); 764 if (ke && KENTRY_RNF_ROOT(ke)) 765 ke = NULL; 766 } else { 767 ke = (struct pfr_kentry *)rn_match(&sa, head); 768 if (ke && KENTRY_RNF_ROOT(ke)) 769 ke = NULL; 770 if (exact && ke && KENTRY_NETWORK(ke)) 771 ke = NULL; 772 } 773 return (ke); 774 } 775 776 static struct pfr_kentry * 777 pfr_create_kentry(struct pfr_addr *ad) 778 { 779 struct pfr_kentry *ke; 780 781 ke = uma_zalloc(V_pfr_kentry_z, M_NOWAIT | M_ZERO); 782 if (ke == NULL) 783 return (NULL); 784 785 if (ad->pfra_af == AF_INET) 786 FILLIN_SIN(ke->pfrke_sa.sin, ad->pfra_ip4addr); 787 else if (ad->pfra_af == AF_INET6) 788 FILLIN_SIN6(ke->pfrke_sa.sin6, ad->pfra_ip6addr); 789 ke->pfrke_af = ad->pfra_af; 790 ke->pfrke_net = ad->pfra_net; 791 ke->pfrke_not = ad->pfra_not; 792 return (ke); 793 } 794 795 static void 796 pfr_destroy_kentries(struct pfr_kentryworkq *workq) 797 { 798 struct pfr_kentry *p, *q; 799 800 for (p = SLIST_FIRST(workq); p != NULL; p = q) { 801 q = SLIST_NEXT(p, pfrke_workq); 802 pfr_destroy_kentry(p); 803 } 804 } 805 806 static void 807 pfr_destroy_kentry(struct pfr_kentry *ke) 808 { 809 if (ke->pfrke_counters) 810 uma_zfree(V_pfr_kcounters_z, ke->pfrke_counters); 811 uma_zfree(V_pfr_kentry_z, ke); 812 } 813 814 static void 815 pfr_insert_kentries(struct pfr_ktable *kt, 816 struct pfr_kentryworkq *workq, long tzero) 817 { 818 struct pfr_kentry *p; 819 int rv, n = 0; 820 821 SLIST_FOREACH(p, workq, pfrke_workq) { 822 rv = pfr_route_kentry(kt, p); 823 if (rv) { 824 printf("pfr_insert_kentries: cannot route entry " 825 "(code=%d).\n", rv); 826 break; 827 } 828 p->pfrke_tzero = tzero; 829 n++; 830 } 831 kt->pfrkt_cnt += n; 832 } 833 834 int 835 pfr_insert_kentry(struct pfr_ktable *kt, struct pfr_addr *ad, long tzero) 836 { 837 struct pfr_kentry *p; 838 int rv; 839 840 p = pfr_lookup_addr(kt, ad, 1); 841 if (p != NULL) 842 return (0); 843 p = pfr_create_kentry(ad); 844 if (p == NULL) 845 return (ENOMEM); 846 847 rv = pfr_route_kentry(kt, p); 848 if (rv) 849 return (rv); 850 851 p->pfrke_tzero = tzero; 852 kt->pfrkt_cnt++; 853 854 return (0); 855 } 856 857 static void 858 pfr_remove_kentries(struct pfr_ktable *kt, 859 struct pfr_kentryworkq *workq) 860 { 861 struct pfr_kentry *p; 862 int n = 0; 863 864 SLIST_FOREACH(p, workq, pfrke_workq) { 865 pfr_unroute_kentry(kt, p); 866 n++; 867 } 868 kt->pfrkt_cnt -= n; 869 pfr_destroy_kentries(workq); 870 } 871 872 static void 873 pfr_clean_node_mask(struct pfr_ktable *kt, 874 struct pfr_kentryworkq *workq) 875 { 876 struct pfr_kentry *p; 877 878 SLIST_FOREACH(p, workq, pfrke_workq) 879 pfr_unroute_kentry(kt, p); 880 } 881 882 static void 883 pfr_clstats_kentries(struct pfr_kentryworkq *workq, long tzero, int negchange) 884 { 885 struct pfr_kentry *p; 886 887 SLIST_FOREACH(p, workq, pfrke_workq) { 888 if (negchange) 889 p->pfrke_not = !p->pfrke_not; 890 if (p->pfrke_counters) { 891 uma_zfree(V_pfr_kcounters_z, p->pfrke_counters); 892 p->pfrke_counters = NULL; 893 } 894 p->pfrke_tzero = tzero; 895 } 896 } 897 898 static void 899 pfr_reset_feedback(struct pfr_addr *addr, int size) 900 { 901 struct pfr_addr *ad; 902 int i; 903 904 for (i = 0, ad = addr; i < size; i++, ad++) 905 ad->pfra_fback = PFR_FB_NONE; 906 } 907 908 static void 909 pfr_prepare_network(union sockaddr_union *sa, int af, int net) 910 { 911 int i; 912 913 bzero(sa, sizeof(*sa)); 914 if (af == AF_INET) { 915 sa->sin.sin_len = sizeof(sa->sin); 916 sa->sin.sin_family = AF_INET; 917 sa->sin.sin_addr.s_addr = net ? htonl(-1 << (32-net)) : 0; 918 } else if (af == AF_INET6) { 919 sa->sin6.sin6_len = sizeof(sa->sin6); 920 sa->sin6.sin6_family = AF_INET6; 921 for (i = 0; i < 4; i++) { 922 if (net <= 32) { 923 sa->sin6.sin6_addr.s6_addr32[i] = 924 net ? htonl(-1 << (32-net)) : 0; 925 break; 926 } 927 sa->sin6.sin6_addr.s6_addr32[i] = 0xFFFFFFFF; 928 net -= 32; 929 } 930 } 931 } 932 933 static int 934 pfr_route_kentry(struct pfr_ktable *kt, struct pfr_kentry *ke) 935 { 936 union sockaddr_union mask; 937 struct radix_node *rn; 938 struct radix_head *head = NULL; 939 940 PF_RULES_WASSERT(); 941 942 bzero(ke->pfrke_node, sizeof(ke->pfrke_node)); 943 if (ke->pfrke_af == AF_INET) 944 head = &kt->pfrkt_ip4->rh; 945 else if (ke->pfrke_af == AF_INET6) 946 head = &kt->pfrkt_ip6->rh; 947 948 if (KENTRY_NETWORK(ke)) { 949 pfr_prepare_network(&mask, ke->pfrke_af, ke->pfrke_net); 950 rn = rn_addroute(&ke->pfrke_sa, &mask, head, ke->pfrke_node); 951 } else 952 rn = rn_addroute(&ke->pfrke_sa, NULL, head, ke->pfrke_node); 953 954 return (rn == NULL ? -1 : 0); 955 } 956 957 static int 958 pfr_unroute_kentry(struct pfr_ktable *kt, struct pfr_kentry *ke) 959 { 960 union sockaddr_union mask; 961 struct radix_node *rn; 962 struct radix_head *head = NULL; 963 964 if (ke->pfrke_af == AF_INET) 965 head = &kt->pfrkt_ip4->rh; 966 else if (ke->pfrke_af == AF_INET6) 967 head = &kt->pfrkt_ip6->rh; 968 969 if (KENTRY_NETWORK(ke)) { 970 pfr_prepare_network(&mask, ke->pfrke_af, ke->pfrke_net); 971 rn = rn_delete(&ke->pfrke_sa, &mask, head); 972 } else 973 rn = rn_delete(&ke->pfrke_sa, NULL, head); 974 975 if (rn == NULL) { 976 printf("pfr_unroute_kentry: delete failed.\n"); 977 return (-1); 978 } 979 return (0); 980 } 981 982 static void 983 pfr_copyout_addr(struct pfr_addr *ad, struct pfr_kentry *ke) 984 { 985 bzero(ad, sizeof(*ad)); 986 if (ke == NULL) 987 return; 988 ad->pfra_af = ke->pfrke_af; 989 ad->pfra_net = ke->pfrke_net; 990 ad->pfra_not = ke->pfrke_not; 991 if (ad->pfra_af == AF_INET) 992 ad->pfra_ip4addr = ke->pfrke_sa.sin.sin_addr; 993 else if (ad->pfra_af == AF_INET6) 994 ad->pfra_ip6addr = ke->pfrke_sa.sin6.sin6_addr; 995 } 996 997 static int 998 pfr_walktree(struct radix_node *rn, void *arg) 999 { 1000 struct pfr_kentry *ke = (struct pfr_kentry *)rn; 1001 struct pfr_walktree *w = arg; 1002 1003 switch (w->pfrw_op) { 1004 case PFRW_MARK: 1005 ke->pfrke_mark = 0; 1006 break; 1007 case PFRW_SWEEP: 1008 if (ke->pfrke_mark) 1009 break; 1010 /* FALLTHROUGH */ 1011 case PFRW_ENQUEUE: 1012 SLIST_INSERT_HEAD(w->pfrw_workq, ke, pfrke_workq); 1013 w->pfrw_cnt++; 1014 break; 1015 case PFRW_GET_ADDRS: 1016 if (w->pfrw_free-- > 0) { 1017 pfr_copyout_addr(w->pfrw_addr, ke); 1018 w->pfrw_addr++; 1019 } 1020 break; 1021 case PFRW_GET_ASTATS: 1022 if (w->pfrw_free-- > 0) { 1023 struct pfr_astats as; 1024 1025 pfr_copyout_addr(&as.pfras_a, ke); 1026 1027 if (ke->pfrke_counters) { 1028 bcopy(ke->pfrke_counters->pfrkc_packets, 1029 as.pfras_packets, sizeof(as.pfras_packets)); 1030 bcopy(ke->pfrke_counters->pfrkc_bytes, 1031 as.pfras_bytes, sizeof(as.pfras_bytes)); 1032 } else { 1033 bzero(as.pfras_packets, sizeof(as.pfras_packets)); 1034 bzero(as.pfras_bytes, sizeof(as.pfras_bytes)); 1035 as.pfras_a.pfra_fback = PFR_FB_NOCOUNT; 1036 } 1037 as.pfras_tzero = ke->pfrke_tzero; 1038 1039 bcopy(&as, w->pfrw_astats, sizeof(as)); 1040 w->pfrw_astats++; 1041 } 1042 break; 1043 case PFRW_POOL_GET: 1044 if (ke->pfrke_not) 1045 break; /* negative entries are ignored */ 1046 if (!w->pfrw_cnt--) { 1047 w->pfrw_kentry = ke; 1048 return (1); /* finish search */ 1049 } 1050 break; 1051 case PFRW_DYNADDR_UPDATE: 1052 { 1053 union sockaddr_union pfr_mask; 1054 1055 if (ke->pfrke_af == AF_INET) { 1056 if (w->pfrw_dyn->pfid_acnt4++ > 0) 1057 break; 1058 pfr_prepare_network(&pfr_mask, AF_INET, ke->pfrke_net); 1059 w->pfrw_dyn->pfid_addr4 = *SUNION2PF(&ke->pfrke_sa, 1060 AF_INET); 1061 w->pfrw_dyn->pfid_mask4 = *SUNION2PF(&pfr_mask, 1062 AF_INET); 1063 } else if (ke->pfrke_af == AF_INET6){ 1064 if (w->pfrw_dyn->pfid_acnt6++ > 0) 1065 break; 1066 pfr_prepare_network(&pfr_mask, AF_INET6, ke->pfrke_net); 1067 w->pfrw_dyn->pfid_addr6 = *SUNION2PF(&ke->pfrke_sa, 1068 AF_INET6); 1069 w->pfrw_dyn->pfid_mask6 = *SUNION2PF(&pfr_mask, 1070 AF_INET6); 1071 } 1072 break; 1073 } 1074 } 1075 return (0); 1076 } 1077 1078 int 1079 pfr_clr_tables(struct pfr_table *filter, int *ndel, int flags) 1080 { 1081 struct pfr_ktableworkq workq; 1082 struct pfr_ktable *p; 1083 int xdel = 0; 1084 1085 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_ALLRSETS); 1086 if (pfr_fix_anchor(filter->pfrt_anchor)) 1087 return (EINVAL); 1088 if (pfr_table_count(filter, flags) < 0) 1089 return (ENOENT); 1090 1091 SLIST_INIT(&workq); 1092 RB_FOREACH(p, pfr_ktablehead, &V_pfr_ktables) { 1093 if (pfr_skip_table(filter, p, flags)) 1094 continue; 1095 if (!strcmp(p->pfrkt_anchor, PF_RESERVED_ANCHOR)) 1096 continue; 1097 if (!(p->pfrkt_flags & PFR_TFLAG_ACTIVE)) 1098 continue; 1099 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_ACTIVE; 1100 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq); 1101 xdel++; 1102 } 1103 if (!(flags & PFR_FLAG_DUMMY)) 1104 pfr_setflags_ktables(&workq); 1105 if (ndel != NULL) 1106 *ndel = xdel; 1107 return (0); 1108 } 1109 1110 int 1111 pfr_add_tables(struct pfr_table *tbl, int size, int *nadd, int flags) 1112 { 1113 struct pfr_ktableworkq addq, changeq; 1114 struct pfr_ktable *p, *q, *r, key; 1115 int i, rv, xadd = 0; 1116 long tzero = time_second; 1117 1118 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY); 1119 SLIST_INIT(&addq); 1120 SLIST_INIT(&changeq); 1121 for (i = 0; i < size; i++) { 1122 bcopy(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t)); 1123 if (pfr_validate_table(&key.pfrkt_t, PFR_TFLAG_USRMASK, 1124 flags & PFR_FLAG_USERIOCTL)) 1125 senderr(EINVAL); 1126 key.pfrkt_flags |= PFR_TFLAG_ACTIVE; 1127 p = RB_FIND(pfr_ktablehead, &V_pfr_ktables, &key); 1128 if (p == NULL) { 1129 p = pfr_create_ktable(&key.pfrkt_t, tzero, 1); 1130 if (p == NULL) 1131 senderr(ENOMEM); 1132 SLIST_FOREACH(q, &addq, pfrkt_workq) { 1133 if (!pfr_ktable_compare(p, q)) { 1134 pfr_destroy_ktable(p, 0); 1135 goto _skip; 1136 } 1137 } 1138 SLIST_INSERT_HEAD(&addq, p, pfrkt_workq); 1139 xadd++; 1140 if (!key.pfrkt_anchor[0]) 1141 goto _skip; 1142 1143 /* find or create root table */ 1144 bzero(key.pfrkt_anchor, sizeof(key.pfrkt_anchor)); 1145 r = RB_FIND(pfr_ktablehead, &V_pfr_ktables, &key); 1146 if (r != NULL) { 1147 p->pfrkt_root = r; 1148 goto _skip; 1149 } 1150 SLIST_FOREACH(q, &addq, pfrkt_workq) { 1151 if (!pfr_ktable_compare(&key, q)) { 1152 p->pfrkt_root = q; 1153 goto _skip; 1154 } 1155 } 1156 key.pfrkt_flags = 0; 1157 r = pfr_create_ktable(&key.pfrkt_t, 0, 1); 1158 if (r == NULL) 1159 senderr(ENOMEM); 1160 SLIST_INSERT_HEAD(&addq, r, pfrkt_workq); 1161 p->pfrkt_root = r; 1162 } else if (!(p->pfrkt_flags & PFR_TFLAG_ACTIVE)) { 1163 SLIST_FOREACH(q, &changeq, pfrkt_workq) 1164 if (!pfr_ktable_compare(&key, q)) 1165 goto _skip; 1166 p->pfrkt_nflags = (p->pfrkt_flags & 1167 ~PFR_TFLAG_USRMASK) | key.pfrkt_flags; 1168 SLIST_INSERT_HEAD(&changeq, p, pfrkt_workq); 1169 xadd++; 1170 } 1171 _skip: 1172 ; 1173 } 1174 if (!(flags & PFR_FLAG_DUMMY)) { 1175 pfr_insert_ktables(&addq); 1176 pfr_setflags_ktables(&changeq); 1177 } else 1178 pfr_destroy_ktables(&addq, 0); 1179 if (nadd != NULL) 1180 *nadd = xadd; 1181 return (0); 1182 _bad: 1183 pfr_destroy_ktables(&addq, 0); 1184 return (rv); 1185 } 1186 1187 int 1188 pfr_del_tables(struct pfr_table *tbl, int size, int *ndel, int flags) 1189 { 1190 struct pfr_ktableworkq workq; 1191 struct pfr_ktable *p, *q, key; 1192 int i, xdel = 0; 1193 1194 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY); 1195 SLIST_INIT(&workq); 1196 for (i = 0; i < size; i++) { 1197 bcopy(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t)); 1198 if (pfr_validate_table(&key.pfrkt_t, 0, 1199 flags & PFR_FLAG_USERIOCTL)) 1200 return (EINVAL); 1201 p = RB_FIND(pfr_ktablehead, &V_pfr_ktables, &key); 1202 if (p != NULL && (p->pfrkt_flags & PFR_TFLAG_ACTIVE)) { 1203 SLIST_FOREACH(q, &workq, pfrkt_workq) 1204 if (!pfr_ktable_compare(p, q)) 1205 goto _skip; 1206 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_ACTIVE; 1207 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq); 1208 xdel++; 1209 } 1210 _skip: 1211 ; 1212 } 1213 1214 if (!(flags & PFR_FLAG_DUMMY)) 1215 pfr_setflags_ktables(&workq); 1216 if (ndel != NULL) 1217 *ndel = xdel; 1218 return (0); 1219 } 1220 1221 int 1222 pfr_get_tables(struct pfr_table *filter, struct pfr_table *tbl, int *size, 1223 int flags) 1224 { 1225 struct pfr_ktable *p; 1226 int n, nn; 1227 1228 PF_RULES_RASSERT(); 1229 1230 ACCEPT_FLAGS(flags, PFR_FLAG_ALLRSETS); 1231 if (pfr_fix_anchor(filter->pfrt_anchor)) 1232 return (EINVAL); 1233 n = nn = pfr_table_count(filter, flags); 1234 if (n < 0) 1235 return (ENOENT); 1236 if (n > *size) { 1237 *size = n; 1238 return (0); 1239 } 1240 RB_FOREACH(p, pfr_ktablehead, &V_pfr_ktables) { 1241 if (pfr_skip_table(filter, p, flags)) 1242 continue; 1243 if (n-- <= 0) 1244 continue; 1245 bcopy(&p->pfrkt_t, tbl++, sizeof(*tbl)); 1246 } 1247 1248 KASSERT(n == 0, ("%s: corruption detected (%d)", __func__, n)); 1249 1250 *size = nn; 1251 return (0); 1252 } 1253 1254 int 1255 pfr_get_tstats(struct pfr_table *filter, struct pfr_tstats *tbl, int *size, 1256 int flags) 1257 { 1258 struct pfr_ktable *p; 1259 struct pfr_ktableworkq workq; 1260 int n, nn; 1261 long tzero = time_second; 1262 1263 /* XXX PFR_FLAG_CLSTATS disabled */ 1264 ACCEPT_FLAGS(flags, PFR_FLAG_ALLRSETS); 1265 if (pfr_fix_anchor(filter->pfrt_anchor)) 1266 return (EINVAL); 1267 n = nn = pfr_table_count(filter, flags); 1268 if (n < 0) 1269 return (ENOENT); 1270 if (n > *size) { 1271 *size = n; 1272 return (0); 1273 } 1274 SLIST_INIT(&workq); 1275 RB_FOREACH(p, pfr_ktablehead, &V_pfr_ktables) { 1276 if (pfr_skip_table(filter, p, flags)) 1277 continue; 1278 if (n-- <= 0) 1279 continue; 1280 bcopy(&p->pfrkt_ts, tbl++, sizeof(*tbl)); 1281 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq); 1282 } 1283 if (flags & PFR_FLAG_CLSTATS) 1284 pfr_clstats_ktables(&workq, tzero, 1285 flags & PFR_FLAG_ADDRSTOO); 1286 1287 KASSERT(n == 0, ("%s: corruption detected (%d)", __func__, n)); 1288 1289 *size = nn; 1290 return (0); 1291 } 1292 1293 int 1294 pfr_clr_tstats(struct pfr_table *tbl, int size, int *nzero, int flags) 1295 { 1296 struct pfr_ktableworkq workq; 1297 struct pfr_ktable *p, key; 1298 int i, xzero = 0; 1299 long tzero = time_second; 1300 1301 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_ADDRSTOO); 1302 SLIST_INIT(&workq); 1303 for (i = 0; i < size; i++) { 1304 bcopy(tbl + i, &key.pfrkt_t, sizeof(key.pfrkt_t)); 1305 if (pfr_validate_table(&key.pfrkt_t, 0, 0)) 1306 return (EINVAL); 1307 p = RB_FIND(pfr_ktablehead, &V_pfr_ktables, &key); 1308 if (p != NULL) { 1309 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq); 1310 xzero++; 1311 } 1312 } 1313 if (!(flags & PFR_FLAG_DUMMY)) 1314 pfr_clstats_ktables(&workq, tzero, flags & PFR_FLAG_ADDRSTOO); 1315 if (nzero != NULL) 1316 *nzero = xzero; 1317 return (0); 1318 } 1319 1320 int 1321 pfr_set_tflags(struct pfr_table *tbl, int size, int setflag, int clrflag, 1322 int *nchange, int *ndel, int flags) 1323 { 1324 struct pfr_ktableworkq workq; 1325 struct pfr_ktable *p, *q, key; 1326 int i, xchange = 0, xdel = 0; 1327 1328 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY); 1329 if ((setflag & ~PFR_TFLAG_USRMASK) || 1330 (clrflag & ~PFR_TFLAG_USRMASK) || 1331 (setflag & clrflag)) 1332 return (EINVAL); 1333 SLIST_INIT(&workq); 1334 for (i = 0; i < size; i++) { 1335 bcopy(tbl + i, &key.pfrkt_t, sizeof(key.pfrkt_t)); 1336 if (pfr_validate_table(&key.pfrkt_t, 0, 1337 flags & PFR_FLAG_USERIOCTL)) 1338 return (EINVAL); 1339 p = RB_FIND(pfr_ktablehead, &V_pfr_ktables, &key); 1340 if (p != NULL && (p->pfrkt_flags & PFR_TFLAG_ACTIVE)) { 1341 p->pfrkt_nflags = (p->pfrkt_flags | setflag) & 1342 ~clrflag; 1343 if (p->pfrkt_nflags == p->pfrkt_flags) 1344 goto _skip; 1345 SLIST_FOREACH(q, &workq, pfrkt_workq) 1346 if (!pfr_ktable_compare(p, q)) 1347 goto _skip; 1348 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq); 1349 if ((p->pfrkt_flags & PFR_TFLAG_PERSIST) && 1350 (clrflag & PFR_TFLAG_PERSIST) && 1351 !(p->pfrkt_flags & PFR_TFLAG_REFERENCED)) 1352 xdel++; 1353 else 1354 xchange++; 1355 } 1356 _skip: 1357 ; 1358 } 1359 if (!(flags & PFR_FLAG_DUMMY)) 1360 pfr_setflags_ktables(&workq); 1361 if (nchange != NULL) 1362 *nchange = xchange; 1363 if (ndel != NULL) 1364 *ndel = xdel; 1365 return (0); 1366 } 1367 1368 int 1369 pfr_ina_begin(struct pfr_table *trs, u_int32_t *ticket, int *ndel, int flags) 1370 { 1371 struct pfr_ktableworkq workq; 1372 struct pfr_ktable *p; 1373 struct pf_ruleset *rs; 1374 int xdel = 0; 1375 1376 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY); 1377 rs = pf_find_or_create_ruleset(trs->pfrt_anchor); 1378 if (rs == NULL) 1379 return (ENOMEM); 1380 SLIST_INIT(&workq); 1381 RB_FOREACH(p, pfr_ktablehead, &V_pfr_ktables) { 1382 if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) || 1383 pfr_skip_table(trs, p, 0)) 1384 continue; 1385 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_INACTIVE; 1386 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq); 1387 xdel++; 1388 } 1389 if (!(flags & PFR_FLAG_DUMMY)) { 1390 pfr_setflags_ktables(&workq); 1391 if (ticket != NULL) 1392 *ticket = ++rs->tticket; 1393 rs->topen = 1; 1394 } else 1395 pf_remove_if_empty_ruleset(rs); 1396 if (ndel != NULL) 1397 *ndel = xdel; 1398 return (0); 1399 } 1400 1401 int 1402 pfr_ina_define(struct pfr_table *tbl, struct pfr_addr *addr, int size, 1403 int *nadd, int *naddr, u_int32_t ticket, int flags) 1404 { 1405 struct pfr_ktableworkq tableq; 1406 struct pfr_kentryworkq addrq; 1407 struct pfr_ktable *kt, *rt, *shadow, key; 1408 struct pfr_kentry *p; 1409 struct pfr_addr *ad; 1410 struct pf_ruleset *rs; 1411 int i, rv, xadd = 0, xaddr = 0; 1412 1413 PF_RULES_WASSERT(); 1414 1415 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_ADDRSTOO); 1416 if (size && !(flags & PFR_FLAG_ADDRSTOO)) 1417 return (EINVAL); 1418 if (pfr_validate_table(tbl, PFR_TFLAG_USRMASK, 1419 flags & PFR_FLAG_USERIOCTL)) 1420 return (EINVAL); 1421 rs = pf_find_ruleset(tbl->pfrt_anchor); 1422 if (rs == NULL || !rs->topen || ticket != rs->tticket) 1423 return (EBUSY); 1424 tbl->pfrt_flags |= PFR_TFLAG_INACTIVE; 1425 SLIST_INIT(&tableq); 1426 kt = RB_FIND(pfr_ktablehead, &V_pfr_ktables, (struct pfr_ktable *)tbl); 1427 if (kt == NULL) { 1428 kt = pfr_create_ktable(tbl, 0, 1); 1429 if (kt == NULL) 1430 return (ENOMEM); 1431 SLIST_INSERT_HEAD(&tableq, kt, pfrkt_workq); 1432 xadd++; 1433 if (!tbl->pfrt_anchor[0]) 1434 goto _skip; 1435 1436 /* find or create root table */ 1437 bzero(&key, sizeof(key)); 1438 strlcpy(key.pfrkt_name, tbl->pfrt_name, sizeof(key.pfrkt_name)); 1439 rt = RB_FIND(pfr_ktablehead, &V_pfr_ktables, &key); 1440 if (rt != NULL) { 1441 kt->pfrkt_root = rt; 1442 goto _skip; 1443 } 1444 rt = pfr_create_ktable(&key.pfrkt_t, 0, 1); 1445 if (rt == NULL) { 1446 pfr_destroy_ktables(&tableq, 0); 1447 return (ENOMEM); 1448 } 1449 SLIST_INSERT_HEAD(&tableq, rt, pfrkt_workq); 1450 kt->pfrkt_root = rt; 1451 } else if (!(kt->pfrkt_flags & PFR_TFLAG_INACTIVE)) 1452 xadd++; 1453 _skip: 1454 shadow = pfr_create_ktable(tbl, 0, 0); 1455 if (shadow == NULL) { 1456 pfr_destroy_ktables(&tableq, 0); 1457 return (ENOMEM); 1458 } 1459 SLIST_INIT(&addrq); 1460 for (i = 0, ad = addr; i < size; i++, ad++) { 1461 if (pfr_validate_addr(ad)) 1462 senderr(EINVAL); 1463 if (pfr_lookup_addr(shadow, ad, 1) != NULL) 1464 continue; 1465 p = pfr_create_kentry(ad); 1466 if (p == NULL) 1467 senderr(ENOMEM); 1468 if (pfr_route_kentry(shadow, p)) { 1469 pfr_destroy_kentry(p); 1470 continue; 1471 } 1472 SLIST_INSERT_HEAD(&addrq, p, pfrke_workq); 1473 xaddr++; 1474 } 1475 if (!(flags & PFR_FLAG_DUMMY)) { 1476 if (kt->pfrkt_shadow != NULL) 1477 pfr_destroy_ktable(kt->pfrkt_shadow, 1); 1478 kt->pfrkt_flags |= PFR_TFLAG_INACTIVE; 1479 pfr_insert_ktables(&tableq); 1480 shadow->pfrkt_cnt = (flags & PFR_FLAG_ADDRSTOO) ? 1481 xaddr : NO_ADDRESSES; 1482 kt->pfrkt_shadow = shadow; 1483 } else { 1484 pfr_clean_node_mask(shadow, &addrq); 1485 pfr_destroy_ktable(shadow, 0); 1486 pfr_destroy_ktables(&tableq, 0); 1487 pfr_destroy_kentries(&addrq); 1488 } 1489 if (nadd != NULL) 1490 *nadd = xadd; 1491 if (naddr != NULL) 1492 *naddr = xaddr; 1493 return (0); 1494 _bad: 1495 pfr_destroy_ktable(shadow, 0); 1496 pfr_destroy_ktables(&tableq, 0); 1497 pfr_destroy_kentries(&addrq); 1498 return (rv); 1499 } 1500 1501 int 1502 pfr_ina_rollback(struct pfr_table *trs, u_int32_t ticket, int *ndel, int flags) 1503 { 1504 struct pfr_ktableworkq workq; 1505 struct pfr_ktable *p; 1506 struct pf_ruleset *rs; 1507 int xdel = 0; 1508 1509 PF_RULES_WASSERT(); 1510 1511 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY); 1512 rs = pf_find_ruleset(trs->pfrt_anchor); 1513 if (rs == NULL || !rs->topen || ticket != rs->tticket) 1514 return (0); 1515 SLIST_INIT(&workq); 1516 RB_FOREACH(p, pfr_ktablehead, &V_pfr_ktables) { 1517 if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) || 1518 pfr_skip_table(trs, p, 0)) 1519 continue; 1520 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_INACTIVE; 1521 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq); 1522 xdel++; 1523 } 1524 if (!(flags & PFR_FLAG_DUMMY)) { 1525 pfr_setflags_ktables(&workq); 1526 rs->topen = 0; 1527 pf_remove_if_empty_ruleset(rs); 1528 } 1529 if (ndel != NULL) 1530 *ndel = xdel; 1531 return (0); 1532 } 1533 1534 int 1535 pfr_ina_commit(struct pfr_table *trs, u_int32_t ticket, int *nadd, 1536 int *nchange, int flags) 1537 { 1538 struct pfr_ktable *p, *q; 1539 struct pfr_ktableworkq workq; 1540 struct pf_ruleset *rs; 1541 int xadd = 0, xchange = 0; 1542 long tzero = time_second; 1543 1544 PF_RULES_WASSERT(); 1545 1546 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY); 1547 rs = pf_find_ruleset(trs->pfrt_anchor); 1548 if (rs == NULL || !rs->topen || ticket != rs->tticket) 1549 return (EBUSY); 1550 1551 SLIST_INIT(&workq); 1552 RB_FOREACH(p, pfr_ktablehead, &V_pfr_ktables) { 1553 if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) || 1554 pfr_skip_table(trs, p, 0)) 1555 continue; 1556 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq); 1557 if (p->pfrkt_flags & PFR_TFLAG_ACTIVE) 1558 xchange++; 1559 else 1560 xadd++; 1561 } 1562 1563 if (!(flags & PFR_FLAG_DUMMY)) { 1564 for (p = SLIST_FIRST(&workq); p != NULL; p = q) { 1565 q = SLIST_NEXT(p, pfrkt_workq); 1566 pfr_commit_ktable(p, tzero); 1567 } 1568 rs->topen = 0; 1569 pf_remove_if_empty_ruleset(rs); 1570 } 1571 if (nadd != NULL) 1572 *nadd = xadd; 1573 if (nchange != NULL) 1574 *nchange = xchange; 1575 1576 return (0); 1577 } 1578 1579 static void 1580 pfr_commit_ktable(struct pfr_ktable *kt, long tzero) 1581 { 1582 struct pfr_ktable *shadow = kt->pfrkt_shadow; 1583 int nflags; 1584 1585 PF_RULES_WASSERT(); 1586 1587 if (shadow->pfrkt_cnt == NO_ADDRESSES) { 1588 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 1589 pfr_clstats_ktable(kt, tzero, 1); 1590 } else if (kt->pfrkt_flags & PFR_TFLAG_ACTIVE) { 1591 /* kt might contain addresses */ 1592 struct pfr_kentryworkq addrq, addq, changeq, delq, garbageq; 1593 struct pfr_kentry *p, *q, *next; 1594 struct pfr_addr ad; 1595 1596 pfr_enqueue_addrs(shadow, &addrq, NULL, 0); 1597 pfr_mark_addrs(kt); 1598 SLIST_INIT(&addq); 1599 SLIST_INIT(&changeq); 1600 SLIST_INIT(&delq); 1601 SLIST_INIT(&garbageq); 1602 pfr_clean_node_mask(shadow, &addrq); 1603 for (p = SLIST_FIRST(&addrq); p != NULL; p = next) { 1604 next = SLIST_NEXT(p, pfrke_workq); /* XXX */ 1605 pfr_copyout_addr(&ad, p); 1606 q = pfr_lookup_addr(kt, &ad, 1); 1607 if (q != NULL) { 1608 if (q->pfrke_not != p->pfrke_not) 1609 SLIST_INSERT_HEAD(&changeq, q, 1610 pfrke_workq); 1611 q->pfrke_mark = 1; 1612 SLIST_INSERT_HEAD(&garbageq, p, pfrke_workq); 1613 } else { 1614 p->pfrke_tzero = tzero; 1615 SLIST_INSERT_HEAD(&addq, p, pfrke_workq); 1616 } 1617 } 1618 pfr_enqueue_addrs(kt, &delq, NULL, ENQUEUE_UNMARKED_ONLY); 1619 pfr_insert_kentries(kt, &addq, tzero); 1620 pfr_remove_kentries(kt, &delq); 1621 pfr_clstats_kentries(&changeq, tzero, INVERT_NEG_FLAG); 1622 pfr_destroy_kentries(&garbageq); 1623 } else { 1624 /* kt cannot contain addresses */ 1625 SWAP(struct radix_node_head *, kt->pfrkt_ip4, 1626 shadow->pfrkt_ip4); 1627 SWAP(struct radix_node_head *, kt->pfrkt_ip6, 1628 shadow->pfrkt_ip6); 1629 SWAP(int, kt->pfrkt_cnt, shadow->pfrkt_cnt); 1630 pfr_clstats_ktable(kt, tzero, 1); 1631 } 1632 nflags = ((shadow->pfrkt_flags & PFR_TFLAG_USRMASK) | 1633 (kt->pfrkt_flags & PFR_TFLAG_SETMASK) | PFR_TFLAG_ACTIVE) 1634 & ~PFR_TFLAG_INACTIVE; 1635 pfr_destroy_ktable(shadow, 0); 1636 kt->pfrkt_shadow = NULL; 1637 pfr_setflags_ktable(kt, nflags); 1638 } 1639 1640 static int 1641 pfr_validate_table(struct pfr_table *tbl, int allowedflags, int no_reserved) 1642 { 1643 int i; 1644 1645 if (!tbl->pfrt_name[0]) 1646 return (-1); 1647 if (no_reserved && !strcmp(tbl->pfrt_anchor, PF_RESERVED_ANCHOR)) 1648 return (-1); 1649 if (tbl->pfrt_name[PF_TABLE_NAME_SIZE-1]) 1650 return (-1); 1651 for (i = strlen(tbl->pfrt_name); i < PF_TABLE_NAME_SIZE; i++) 1652 if (tbl->pfrt_name[i]) 1653 return (-1); 1654 if (pfr_fix_anchor(tbl->pfrt_anchor)) 1655 return (-1); 1656 if (tbl->pfrt_flags & ~allowedflags) 1657 return (-1); 1658 return (0); 1659 } 1660 1661 /* 1662 * Rewrite anchors referenced by tables to remove slashes 1663 * and check for validity. 1664 */ 1665 static int 1666 pfr_fix_anchor(char *anchor) 1667 { 1668 size_t siz = MAXPATHLEN; 1669 int i; 1670 1671 if (anchor[0] == '/') { 1672 char *path; 1673 int off; 1674 1675 path = anchor; 1676 off = 1; 1677 while (*++path == '/') 1678 off++; 1679 bcopy(path, anchor, siz - off); 1680 memset(anchor + siz - off, 0, off); 1681 } 1682 if (anchor[siz - 1]) 1683 return (-1); 1684 for (i = strlen(anchor); i < siz; i++) 1685 if (anchor[i]) 1686 return (-1); 1687 return (0); 1688 } 1689 1690 int 1691 pfr_table_count(struct pfr_table *filter, int flags) 1692 { 1693 struct pf_ruleset *rs; 1694 1695 PF_RULES_ASSERT(); 1696 1697 if (flags & PFR_FLAG_ALLRSETS) 1698 return (V_pfr_ktable_cnt); 1699 if (filter->pfrt_anchor[0]) { 1700 rs = pf_find_ruleset(filter->pfrt_anchor); 1701 return ((rs != NULL) ? rs->tables : -1); 1702 } 1703 return (pf_main_ruleset.tables); 1704 } 1705 1706 static int 1707 pfr_skip_table(struct pfr_table *filter, struct pfr_ktable *kt, int flags) 1708 { 1709 if (flags & PFR_FLAG_ALLRSETS) 1710 return (0); 1711 if (strcmp(filter->pfrt_anchor, kt->pfrkt_anchor)) 1712 return (1); 1713 return (0); 1714 } 1715 1716 static void 1717 pfr_insert_ktables(struct pfr_ktableworkq *workq) 1718 { 1719 struct pfr_ktable *p; 1720 1721 SLIST_FOREACH(p, workq, pfrkt_workq) 1722 pfr_insert_ktable(p); 1723 } 1724 1725 static void 1726 pfr_insert_ktable(struct pfr_ktable *kt) 1727 { 1728 1729 PF_RULES_WASSERT(); 1730 1731 RB_INSERT(pfr_ktablehead, &V_pfr_ktables, kt); 1732 V_pfr_ktable_cnt++; 1733 if (kt->pfrkt_root != NULL) 1734 if (!kt->pfrkt_root->pfrkt_refcnt[PFR_REFCNT_ANCHOR]++) 1735 pfr_setflags_ktable(kt->pfrkt_root, 1736 kt->pfrkt_root->pfrkt_flags|PFR_TFLAG_REFDANCHOR); 1737 } 1738 1739 static void 1740 pfr_setflags_ktables(struct pfr_ktableworkq *workq) 1741 { 1742 struct pfr_ktable *p, *q; 1743 1744 for (p = SLIST_FIRST(workq); p; p = q) { 1745 q = SLIST_NEXT(p, pfrkt_workq); 1746 pfr_setflags_ktable(p, p->pfrkt_nflags); 1747 } 1748 } 1749 1750 static void 1751 pfr_setflags_ktable(struct pfr_ktable *kt, int newf) 1752 { 1753 struct pfr_kentryworkq addrq; 1754 1755 PF_RULES_WASSERT(); 1756 1757 if (!(newf & PFR_TFLAG_REFERENCED) && 1758 !(newf & PFR_TFLAG_PERSIST)) 1759 newf &= ~PFR_TFLAG_ACTIVE; 1760 if (!(newf & PFR_TFLAG_ACTIVE)) 1761 newf &= ~PFR_TFLAG_USRMASK; 1762 if (!(newf & PFR_TFLAG_SETMASK)) { 1763 RB_REMOVE(pfr_ktablehead, &V_pfr_ktables, kt); 1764 if (kt->pfrkt_root != NULL) 1765 if (!--kt->pfrkt_root->pfrkt_refcnt[PFR_REFCNT_ANCHOR]) 1766 pfr_setflags_ktable(kt->pfrkt_root, 1767 kt->pfrkt_root->pfrkt_flags & 1768 ~PFR_TFLAG_REFDANCHOR); 1769 pfr_destroy_ktable(kt, 1); 1770 V_pfr_ktable_cnt--; 1771 return; 1772 } 1773 if (!(newf & PFR_TFLAG_ACTIVE) && kt->pfrkt_cnt) { 1774 pfr_enqueue_addrs(kt, &addrq, NULL, 0); 1775 pfr_remove_kentries(kt, &addrq); 1776 } 1777 if (!(newf & PFR_TFLAG_INACTIVE) && kt->pfrkt_shadow != NULL) { 1778 pfr_destroy_ktable(kt->pfrkt_shadow, 1); 1779 kt->pfrkt_shadow = NULL; 1780 } 1781 kt->pfrkt_flags = newf; 1782 } 1783 1784 static void 1785 pfr_clstats_ktables(struct pfr_ktableworkq *workq, long tzero, int recurse) 1786 { 1787 struct pfr_ktable *p; 1788 1789 SLIST_FOREACH(p, workq, pfrkt_workq) 1790 pfr_clstats_ktable(p, tzero, recurse); 1791 } 1792 1793 static void 1794 pfr_clstats_ktable(struct pfr_ktable *kt, long tzero, int recurse) 1795 { 1796 struct pfr_kentryworkq addrq; 1797 1798 if (recurse) { 1799 pfr_enqueue_addrs(kt, &addrq, NULL, 0); 1800 pfr_clstats_kentries(&addrq, tzero, 0); 1801 } 1802 bzero(kt->pfrkt_packets, sizeof(kt->pfrkt_packets)); 1803 bzero(kt->pfrkt_bytes, sizeof(kt->pfrkt_bytes)); 1804 kt->pfrkt_match = kt->pfrkt_nomatch = 0; 1805 kt->pfrkt_tzero = tzero; 1806 } 1807 1808 static struct pfr_ktable * 1809 pfr_create_ktable(struct pfr_table *tbl, long tzero, int attachruleset) 1810 { 1811 struct pfr_ktable *kt; 1812 struct pf_ruleset *rs; 1813 1814 PF_RULES_WASSERT(); 1815 1816 kt = malloc(sizeof(*kt), M_PFTABLE, M_NOWAIT|M_ZERO); 1817 if (kt == NULL) 1818 return (NULL); 1819 kt->pfrkt_t = *tbl; 1820 1821 if (attachruleset) { 1822 rs = pf_find_or_create_ruleset(tbl->pfrt_anchor); 1823 if (!rs) { 1824 pfr_destroy_ktable(kt, 0); 1825 return (NULL); 1826 } 1827 kt->pfrkt_rs = rs; 1828 rs->tables++; 1829 } 1830 1831 if (!rn_inithead((void **)&kt->pfrkt_ip4, 1832 offsetof(struct sockaddr_in, sin_addr) * 8) || 1833 !rn_inithead((void **)&kt->pfrkt_ip6, 1834 offsetof(struct sockaddr_in6, sin6_addr) * 8)) { 1835 pfr_destroy_ktable(kt, 0); 1836 return (NULL); 1837 } 1838 kt->pfrkt_tzero = tzero; 1839 1840 return (kt); 1841 } 1842 1843 static void 1844 pfr_destroy_ktables(struct pfr_ktableworkq *workq, int flushaddr) 1845 { 1846 struct pfr_ktable *p, *q; 1847 1848 for (p = SLIST_FIRST(workq); p; p = q) { 1849 q = SLIST_NEXT(p, pfrkt_workq); 1850 pfr_destroy_ktable(p, flushaddr); 1851 } 1852 } 1853 1854 static void 1855 pfr_destroy_ktable(struct pfr_ktable *kt, int flushaddr) 1856 { 1857 struct pfr_kentryworkq addrq; 1858 1859 if (flushaddr) { 1860 pfr_enqueue_addrs(kt, &addrq, NULL, 0); 1861 pfr_clean_node_mask(kt, &addrq); 1862 pfr_destroy_kentries(&addrq); 1863 } 1864 if (kt->pfrkt_ip4 != NULL) 1865 rn_detachhead((void **)&kt->pfrkt_ip4); 1866 if (kt->pfrkt_ip6 != NULL) 1867 rn_detachhead((void **)&kt->pfrkt_ip6); 1868 if (kt->pfrkt_shadow != NULL) 1869 pfr_destroy_ktable(kt->pfrkt_shadow, flushaddr); 1870 if (kt->pfrkt_rs != NULL) { 1871 kt->pfrkt_rs->tables--; 1872 pf_remove_if_empty_ruleset(kt->pfrkt_rs); 1873 } 1874 free(kt, M_PFTABLE); 1875 } 1876 1877 static int 1878 pfr_ktable_compare(struct pfr_ktable *p, struct pfr_ktable *q) 1879 { 1880 int d; 1881 1882 if ((d = strncmp(p->pfrkt_name, q->pfrkt_name, PF_TABLE_NAME_SIZE))) 1883 return (d); 1884 return (strcmp(p->pfrkt_anchor, q->pfrkt_anchor)); 1885 } 1886 1887 static struct pfr_ktable * 1888 pfr_lookup_table(struct pfr_table *tbl) 1889 { 1890 /* struct pfr_ktable start like a struct pfr_table */ 1891 return (RB_FIND(pfr_ktablehead, &V_pfr_ktables, 1892 (struct pfr_ktable *)tbl)); 1893 } 1894 1895 int 1896 pfr_match_addr(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af) 1897 { 1898 struct pfr_kentry *ke = NULL; 1899 int match; 1900 1901 PF_RULES_RASSERT(); 1902 1903 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL) 1904 kt = kt->pfrkt_root; 1905 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 1906 return (0); 1907 1908 switch (af) { 1909 #ifdef INET 1910 case AF_INET: 1911 { 1912 struct sockaddr_in sin; 1913 1914 bzero(&sin, sizeof(sin)); 1915 sin.sin_len = sizeof(sin); 1916 sin.sin_family = AF_INET; 1917 sin.sin_addr.s_addr = a->addr32[0]; 1918 ke = (struct pfr_kentry *)rn_match(&sin, &kt->pfrkt_ip4->rh); 1919 if (ke && KENTRY_RNF_ROOT(ke)) 1920 ke = NULL; 1921 break; 1922 } 1923 #endif /* INET */ 1924 #ifdef INET6 1925 case AF_INET6: 1926 { 1927 struct sockaddr_in6 sin6; 1928 1929 bzero(&sin6, sizeof(sin6)); 1930 sin6.sin6_len = sizeof(sin6); 1931 sin6.sin6_family = AF_INET6; 1932 bcopy(a, &sin6.sin6_addr, sizeof(sin6.sin6_addr)); 1933 ke = (struct pfr_kentry *)rn_match(&sin6, &kt->pfrkt_ip6->rh); 1934 if (ke && KENTRY_RNF_ROOT(ke)) 1935 ke = NULL; 1936 break; 1937 } 1938 #endif /* INET6 */ 1939 } 1940 match = (ke && !ke->pfrke_not); 1941 if (match) 1942 kt->pfrkt_match++; 1943 else 1944 kt->pfrkt_nomatch++; 1945 return (match); 1946 } 1947 1948 void 1949 pfr_update_stats(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af, 1950 u_int64_t len, int dir_out, int op_pass, int notrule) 1951 { 1952 struct pfr_kentry *ke = NULL; 1953 1954 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL) 1955 kt = kt->pfrkt_root; 1956 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 1957 return; 1958 1959 switch (af) { 1960 #ifdef INET 1961 case AF_INET: 1962 { 1963 struct sockaddr_in sin; 1964 1965 bzero(&sin, sizeof(sin)); 1966 sin.sin_len = sizeof(sin); 1967 sin.sin_family = AF_INET; 1968 sin.sin_addr.s_addr = a->addr32[0]; 1969 ke = (struct pfr_kentry *)rn_match(&sin, &kt->pfrkt_ip4->rh); 1970 if (ke && KENTRY_RNF_ROOT(ke)) 1971 ke = NULL; 1972 break; 1973 } 1974 #endif /* INET */ 1975 #ifdef INET6 1976 case AF_INET6: 1977 { 1978 struct sockaddr_in6 sin6; 1979 1980 bzero(&sin6, sizeof(sin6)); 1981 sin6.sin6_len = sizeof(sin6); 1982 sin6.sin6_family = AF_INET6; 1983 bcopy(a, &sin6.sin6_addr, sizeof(sin6.sin6_addr)); 1984 ke = (struct pfr_kentry *)rn_match(&sin6, &kt->pfrkt_ip6->rh); 1985 if (ke && KENTRY_RNF_ROOT(ke)) 1986 ke = NULL; 1987 break; 1988 } 1989 #endif /* INET6 */ 1990 default: 1991 panic("%s: unknown address family %u", __func__, af); 1992 } 1993 if ((ke == NULL || ke->pfrke_not) != notrule) { 1994 if (op_pass != PFR_OP_PASS) 1995 printf("pfr_update_stats: assertion failed.\n"); 1996 op_pass = PFR_OP_XPASS; 1997 } 1998 kt->pfrkt_packets[dir_out][op_pass]++; 1999 kt->pfrkt_bytes[dir_out][op_pass] += len; 2000 if (ke != NULL && op_pass != PFR_OP_XPASS && 2001 (kt->pfrkt_flags & PFR_TFLAG_COUNTERS)) { 2002 if (ke->pfrke_counters == NULL) 2003 ke->pfrke_counters = uma_zalloc(V_pfr_kcounters_z, 2004 M_NOWAIT | M_ZERO); 2005 if (ke->pfrke_counters != NULL) { 2006 ke->pfrke_counters->pfrkc_packets[dir_out][op_pass]++; 2007 ke->pfrke_counters->pfrkc_bytes[dir_out][op_pass] += len; 2008 } 2009 } 2010 } 2011 2012 struct pfr_ktable * 2013 pfr_attach_table(struct pf_ruleset *rs, char *name) 2014 { 2015 struct pfr_ktable *kt, *rt; 2016 struct pfr_table tbl; 2017 struct pf_anchor *ac = rs->anchor; 2018 2019 PF_RULES_WASSERT(); 2020 2021 bzero(&tbl, sizeof(tbl)); 2022 strlcpy(tbl.pfrt_name, name, sizeof(tbl.pfrt_name)); 2023 if (ac != NULL) 2024 strlcpy(tbl.pfrt_anchor, ac->path, sizeof(tbl.pfrt_anchor)); 2025 kt = pfr_lookup_table(&tbl); 2026 if (kt == NULL) { 2027 kt = pfr_create_ktable(&tbl, time_second, 1); 2028 if (kt == NULL) 2029 return (NULL); 2030 if (ac != NULL) { 2031 bzero(tbl.pfrt_anchor, sizeof(tbl.pfrt_anchor)); 2032 rt = pfr_lookup_table(&tbl); 2033 if (rt == NULL) { 2034 rt = pfr_create_ktable(&tbl, 0, 1); 2035 if (rt == NULL) { 2036 pfr_destroy_ktable(kt, 0); 2037 return (NULL); 2038 } 2039 pfr_insert_ktable(rt); 2040 } 2041 kt->pfrkt_root = rt; 2042 } 2043 pfr_insert_ktable(kt); 2044 } 2045 if (!kt->pfrkt_refcnt[PFR_REFCNT_RULE]++) 2046 pfr_setflags_ktable(kt, kt->pfrkt_flags|PFR_TFLAG_REFERENCED); 2047 return (kt); 2048 } 2049 2050 void 2051 pfr_detach_table(struct pfr_ktable *kt) 2052 { 2053 2054 PF_RULES_WASSERT(); 2055 KASSERT(kt->pfrkt_refcnt[PFR_REFCNT_RULE] > 0, ("%s: refcount %d\n", 2056 __func__, kt->pfrkt_refcnt[PFR_REFCNT_RULE])); 2057 2058 if (!--kt->pfrkt_refcnt[PFR_REFCNT_RULE]) 2059 pfr_setflags_ktable(kt, kt->pfrkt_flags&~PFR_TFLAG_REFERENCED); 2060 } 2061 2062 int 2063 pfr_pool_get(struct pfr_ktable *kt, int *pidx, struct pf_addr *counter, 2064 sa_family_t af) 2065 { 2066 struct pf_addr *addr, *cur, *mask; 2067 union sockaddr_union uaddr, umask; 2068 struct pfr_kentry *ke, *ke2 = NULL; 2069 int idx = -1, use_counter = 0; 2070 2071 switch (af) { 2072 case AF_INET: 2073 uaddr.sin.sin_len = sizeof(struct sockaddr_in); 2074 uaddr.sin.sin_family = AF_INET; 2075 break; 2076 case AF_INET6: 2077 uaddr.sin6.sin6_len = sizeof(struct sockaddr_in6); 2078 uaddr.sin6.sin6_family = AF_INET6; 2079 break; 2080 } 2081 addr = SUNION2PF(&uaddr, af); 2082 2083 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL) 2084 kt = kt->pfrkt_root; 2085 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 2086 return (-1); 2087 2088 if (pidx != NULL) 2089 idx = *pidx; 2090 if (counter != NULL && idx >= 0) 2091 use_counter = 1; 2092 if (idx < 0) 2093 idx = 0; 2094 2095 _next_block: 2096 ke = pfr_kentry_byidx(kt, idx, af); 2097 if (ke == NULL) { 2098 kt->pfrkt_nomatch++; 2099 return (1); 2100 } 2101 pfr_prepare_network(&umask, af, ke->pfrke_net); 2102 cur = SUNION2PF(&ke->pfrke_sa, af); 2103 mask = SUNION2PF(&umask, af); 2104 2105 if (use_counter) { 2106 /* is supplied address within block? */ 2107 if (!PF_MATCHA(0, cur, mask, counter, af)) { 2108 /* no, go to next block in table */ 2109 idx++; 2110 use_counter = 0; 2111 goto _next_block; 2112 } 2113 PF_ACPY(addr, counter, af); 2114 } else { 2115 /* use first address of block */ 2116 PF_ACPY(addr, cur, af); 2117 } 2118 2119 if (!KENTRY_NETWORK(ke)) { 2120 /* this is a single IP address - no possible nested block */ 2121 PF_ACPY(counter, addr, af); 2122 *pidx = idx; 2123 kt->pfrkt_match++; 2124 return (0); 2125 } 2126 for (;;) { 2127 /* we don't want to use a nested block */ 2128 switch (af) { 2129 case AF_INET: 2130 ke2 = (struct pfr_kentry *)rn_match(&uaddr, 2131 &kt->pfrkt_ip4->rh); 2132 break; 2133 case AF_INET6: 2134 ke2 = (struct pfr_kentry *)rn_match(&uaddr, 2135 &kt->pfrkt_ip6->rh); 2136 break; 2137 } 2138 /* no need to check KENTRY_RNF_ROOT() here */ 2139 if (ke2 == ke) { 2140 /* lookup return the same block - perfect */ 2141 PF_ACPY(counter, addr, af); 2142 *pidx = idx; 2143 kt->pfrkt_match++; 2144 return (0); 2145 } 2146 2147 /* we need to increase the counter past the nested block */ 2148 pfr_prepare_network(&umask, AF_INET, ke2->pfrke_net); 2149 PF_POOLMASK(addr, addr, SUNION2PF(&umask, af), &pfr_ffaddr, af); 2150 PF_AINC(addr, af); 2151 if (!PF_MATCHA(0, cur, mask, addr, af)) { 2152 /* ok, we reached the end of our main block */ 2153 /* go to next block in table */ 2154 idx++; 2155 use_counter = 0; 2156 goto _next_block; 2157 } 2158 } 2159 } 2160 2161 static struct pfr_kentry * 2162 pfr_kentry_byidx(struct pfr_ktable *kt, int idx, int af) 2163 { 2164 struct pfr_walktree w; 2165 2166 bzero(&w, sizeof(w)); 2167 w.pfrw_op = PFRW_POOL_GET; 2168 w.pfrw_cnt = idx; 2169 2170 switch (af) { 2171 #ifdef INET 2172 case AF_INET: 2173 kt->pfrkt_ip4->rnh_walktree(&kt->pfrkt_ip4->rh, pfr_walktree, &w); 2174 return (w.pfrw_kentry); 2175 #endif /* INET */ 2176 #ifdef INET6 2177 case AF_INET6: 2178 kt->pfrkt_ip6->rnh_walktree(&kt->pfrkt_ip6->rh, pfr_walktree, &w); 2179 return (w.pfrw_kentry); 2180 #endif /* INET6 */ 2181 default: 2182 return (NULL); 2183 } 2184 } 2185 2186 void 2187 pfr_dynaddr_update(struct pfr_ktable *kt, struct pfi_dynaddr *dyn) 2188 { 2189 struct pfr_walktree w; 2190 2191 bzero(&w, sizeof(w)); 2192 w.pfrw_op = PFRW_DYNADDR_UPDATE; 2193 w.pfrw_dyn = dyn; 2194 2195 dyn->pfid_acnt4 = 0; 2196 dyn->pfid_acnt6 = 0; 2197 if (!dyn->pfid_af || dyn->pfid_af == AF_INET) 2198 kt->pfrkt_ip4->rnh_walktree(&kt->pfrkt_ip4->rh, pfr_walktree, &w); 2199 if (!dyn->pfid_af || dyn->pfid_af == AF_INET6) 2200 kt->pfrkt_ip6->rnh_walktree(&kt->pfrkt_ip6->rh, pfr_walktree, &w); 2201 } 2202