1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2002 Cedric Berger 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * - Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * - Redistributions in binary form must reproduce the above 14 * copyright notice, this list of conditions and the following 15 * disclaimer in the documentation and/or other materials provided 16 * with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 21 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 22 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 24 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 25 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 26 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN 28 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 * 31 * $OpenBSD: pf_table.c,v 1.79 2008/10/08 06:24:50 mcbride Exp $ 32 */ 33 34 #include <sys/cdefs.h> 35 __FBSDID("$FreeBSD$"); 36 37 #include "opt_inet.h" 38 #include "opt_inet6.h" 39 40 #include <sys/param.h> 41 #include <sys/kernel.h> 42 #include <sys/lock.h> 43 #include <sys/malloc.h> 44 #include <sys/mbuf.h> 45 #include <sys/mutex.h> 46 #include <sys/refcount.h> 47 #include <sys/socket.h> 48 #include <vm/uma.h> 49 50 #include <net/if.h> 51 #include <net/vnet.h> 52 #include <net/pfvar.h> 53 54 #define DPFPRINTF(n, x) if (V_pf_status.debug >= (n)) printf x 55 56 #define ACCEPT_FLAGS(flags, oklist) \ 57 do { \ 58 if ((flags & ~(oklist)) & \ 59 PFR_FLAG_ALLMASK) \ 60 return (EINVAL); \ 61 } while (0) 62 63 #define FILLIN_SIN(sin, addr) \ 64 do { \ 65 (sin).sin_len = sizeof(sin); \ 66 (sin).sin_family = AF_INET; \ 67 (sin).sin_addr = (addr); \ 68 } while (0) 69 70 #define FILLIN_SIN6(sin6, addr) \ 71 do { \ 72 (sin6).sin6_len = sizeof(sin6); \ 73 (sin6).sin6_family = AF_INET6; \ 74 (sin6).sin6_addr = (addr); \ 75 } while (0) 76 77 #define SWAP(type, a1, a2) \ 78 do { \ 79 type tmp = a1; \ 80 a1 = a2; \ 81 a2 = tmp; \ 82 } while (0) 83 84 #define SUNION2PF(su, af) (((af)==AF_INET) ? \ 85 (struct pf_addr *)&(su)->sin.sin_addr : \ 86 (struct pf_addr *)&(su)->sin6.sin6_addr) 87 88 #define AF_BITS(af) (((af)==AF_INET)?32:128) 89 #define ADDR_NETWORK(ad) ((ad)->pfra_net < AF_BITS((ad)->pfra_af)) 90 #define KENTRY_NETWORK(ke) ((ke)->pfrke_net < AF_BITS((ke)->pfrke_af)) 91 #define KENTRY_RNF_ROOT(ke) \ 92 ((((struct radix_node *)(ke))->rn_flags & RNF_ROOT) != 0) 93 94 #define NO_ADDRESSES (-1) 95 #define ENQUEUE_UNMARKED_ONLY (1) 96 #define INVERT_NEG_FLAG (1) 97 98 struct pfr_walktree { 99 enum pfrw_op { 100 PFRW_MARK, 101 PFRW_SWEEP, 102 PFRW_ENQUEUE, 103 PFRW_GET_ADDRS, 104 PFRW_GET_ASTATS, 105 PFRW_POOL_GET, 106 PFRW_DYNADDR_UPDATE 107 } pfrw_op; 108 union { 109 struct pfr_addr *pfrw1_addr; 110 struct pfr_astats *pfrw1_astats; 111 struct pfr_kentryworkq *pfrw1_workq; 112 struct pfr_kentry *pfrw1_kentry; 113 struct pfi_dynaddr *pfrw1_dyn; 114 } pfrw_1; 115 int pfrw_free; 116 int pfrw_flags; 117 }; 118 #define pfrw_addr pfrw_1.pfrw1_addr 119 #define pfrw_astats pfrw_1.pfrw1_astats 120 #define pfrw_workq pfrw_1.pfrw1_workq 121 #define pfrw_kentry pfrw_1.pfrw1_kentry 122 #define pfrw_dyn pfrw_1.pfrw1_dyn 123 #define pfrw_cnt pfrw_free 124 125 #define senderr(e) do { rv = (e); goto _bad; } while (0) 126 127 static MALLOC_DEFINE(M_PFTABLE, "pf_table", "pf(4) tables structures"); 128 VNET_DEFINE_STATIC(uma_zone_t, pfr_kentry_z); 129 #define V_pfr_kentry_z VNET(pfr_kentry_z) 130 VNET_DEFINE_STATIC(uma_zone_t, pfr_kentry_counter_z); 131 #define V_pfr_kentry_counter_z VNET(pfr_kentry_counter_z) 132 133 static struct pf_addr pfr_ffaddr = { 134 .addr32 = { 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff } 135 }; 136 137 static void pfr_copyout_astats(struct pfr_astats *, 138 const struct pfr_kentry *, 139 const struct pfr_walktree *); 140 static void pfr_copyout_addr(struct pfr_addr *, 141 const struct pfr_kentry *ke); 142 static int pfr_validate_addr(struct pfr_addr *); 143 static void pfr_enqueue_addrs(struct pfr_ktable *, 144 struct pfr_kentryworkq *, int *, int); 145 static void pfr_mark_addrs(struct pfr_ktable *); 146 static struct pfr_kentry 147 *pfr_lookup_addr(struct pfr_ktable *, 148 struct pfr_addr *, int); 149 static struct pfr_kentry *pfr_create_kentry(struct pfr_addr *, bool); 150 static void pfr_destroy_kentries(struct pfr_kentryworkq *); 151 static void pfr_destroy_kentry(struct pfr_kentry *); 152 static void pfr_insert_kentries(struct pfr_ktable *, 153 struct pfr_kentryworkq *, long); 154 static void pfr_remove_kentries(struct pfr_ktable *, 155 struct pfr_kentryworkq *); 156 static void pfr_clstats_kentries(struct pfr_ktable *, 157 struct pfr_kentryworkq *, long, int); 158 static void pfr_reset_feedback(struct pfr_addr *, int); 159 static void pfr_prepare_network(union sockaddr_union *, int, int); 160 static int pfr_route_kentry(struct pfr_ktable *, 161 struct pfr_kentry *); 162 static int pfr_unroute_kentry(struct pfr_ktable *, 163 struct pfr_kentry *); 164 static int pfr_walktree(struct radix_node *, void *); 165 static int pfr_validate_table(struct pfr_table *, int, int); 166 static int pfr_fix_anchor(char *); 167 static void pfr_commit_ktable(struct pfr_ktable *, long); 168 static void pfr_insert_ktables(struct pfr_ktableworkq *); 169 static void pfr_insert_ktable(struct pfr_ktable *); 170 static void pfr_setflags_ktables(struct pfr_ktableworkq *); 171 static void pfr_setflags_ktable(struct pfr_ktable *, int); 172 static void pfr_clstats_ktables(struct pfr_ktableworkq *, long, 173 int); 174 static void pfr_clstats_ktable(struct pfr_ktable *, long, int); 175 static struct pfr_ktable 176 *pfr_create_ktable(struct pfr_table *, long, int); 177 static void pfr_destroy_ktables(struct pfr_ktableworkq *, int); 178 static void pfr_destroy_ktable(struct pfr_ktable *, int); 179 static int pfr_ktable_compare(struct pfr_ktable *, 180 struct pfr_ktable *); 181 static struct pfr_ktable 182 *pfr_lookup_table(struct pfr_table *); 183 static void pfr_clean_node_mask(struct pfr_ktable *, 184 struct pfr_kentryworkq *); 185 static int pfr_skip_table(struct pfr_table *, 186 struct pfr_ktable *, int); 187 static struct pfr_kentry 188 *pfr_kentry_byidx(struct pfr_ktable *, int, int); 189 190 static RB_PROTOTYPE(pfr_ktablehead, pfr_ktable, pfrkt_tree, pfr_ktable_compare); 191 static RB_GENERATE(pfr_ktablehead, pfr_ktable, pfrkt_tree, pfr_ktable_compare); 192 193 VNET_DEFINE_STATIC(struct pfr_ktablehead, pfr_ktables); 194 #define V_pfr_ktables VNET(pfr_ktables) 195 196 VNET_DEFINE_STATIC(struct pfr_table, pfr_nulltable); 197 #define V_pfr_nulltable VNET(pfr_nulltable) 198 199 VNET_DEFINE_STATIC(int, pfr_ktable_cnt); 200 #define V_pfr_ktable_cnt VNET(pfr_ktable_cnt) 201 202 void 203 pfr_initialize(void) 204 { 205 206 V_pfr_kentry_counter_z = uma_zcreate("pf table entry counters", 207 PFR_NUM_COUNTERS * sizeof(uint64_t), NULL, NULL, NULL, NULL, 208 UMA_ALIGN_PTR, UMA_ZONE_PCPU); 209 V_pfr_kentry_z = uma_zcreate("pf table entries", 210 sizeof(struct pfr_kentry), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 211 0); 212 V_pf_limits[PF_LIMIT_TABLE_ENTRIES].zone = V_pfr_kentry_z; 213 V_pf_limits[PF_LIMIT_TABLE_ENTRIES].limit = PFR_KENTRY_HIWAT; 214 } 215 216 void 217 pfr_cleanup(void) 218 { 219 220 uma_zdestroy(V_pfr_kentry_z); 221 uma_zdestroy(V_pfr_kentry_counter_z); 222 } 223 224 int 225 pfr_clr_addrs(struct pfr_table *tbl, int *ndel, int flags) 226 { 227 struct pfr_ktable *kt; 228 struct pfr_kentryworkq workq; 229 230 PF_RULES_WASSERT(); 231 232 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY); 233 if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL)) 234 return (EINVAL); 235 kt = pfr_lookup_table(tbl); 236 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 237 return (ESRCH); 238 if (kt->pfrkt_flags & PFR_TFLAG_CONST) 239 return (EPERM); 240 pfr_enqueue_addrs(kt, &workq, ndel, 0); 241 242 if (!(flags & PFR_FLAG_DUMMY)) { 243 pfr_remove_kentries(kt, &workq); 244 KASSERT(kt->pfrkt_cnt == 0, ("%s: non-null pfrkt_cnt", __func__)); 245 } 246 return (0); 247 } 248 249 int 250 pfr_add_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size, 251 int *nadd, int flags) 252 { 253 struct pfr_ktable *kt, *tmpkt; 254 struct pfr_kentryworkq workq; 255 struct pfr_kentry *p, *q; 256 struct pfr_addr *ad; 257 int i, rv, xadd = 0; 258 long tzero = time_second; 259 260 PF_RULES_WASSERT(); 261 262 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_FEEDBACK); 263 if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL)) 264 return (EINVAL); 265 kt = pfr_lookup_table(tbl); 266 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 267 return (ESRCH); 268 if (kt->pfrkt_flags & PFR_TFLAG_CONST) 269 return (EPERM); 270 tmpkt = pfr_create_ktable(&V_pfr_nulltable, 0, 0); 271 if (tmpkt == NULL) 272 return (ENOMEM); 273 SLIST_INIT(&workq); 274 for (i = 0, ad = addr; i < size; i++, ad++) { 275 if (pfr_validate_addr(ad)) 276 senderr(EINVAL); 277 p = pfr_lookup_addr(kt, ad, 1); 278 q = pfr_lookup_addr(tmpkt, ad, 1); 279 if (flags & PFR_FLAG_FEEDBACK) { 280 if (q != NULL) 281 ad->pfra_fback = PFR_FB_DUPLICATE; 282 else if (p == NULL) 283 ad->pfra_fback = PFR_FB_ADDED; 284 else if (p->pfrke_not != ad->pfra_not) 285 ad->pfra_fback = PFR_FB_CONFLICT; 286 else 287 ad->pfra_fback = PFR_FB_NONE; 288 } 289 if (p == NULL && q == NULL) { 290 p = pfr_create_kentry(ad, 291 (kt->pfrkt_flags & PFR_TFLAG_COUNTERS) != 0); 292 if (p == NULL) 293 senderr(ENOMEM); 294 if (pfr_route_kentry(tmpkt, p)) { 295 pfr_destroy_kentry(p); 296 ad->pfra_fback = PFR_FB_NONE; 297 } else { 298 SLIST_INSERT_HEAD(&workq, p, pfrke_workq); 299 xadd++; 300 } 301 } 302 } 303 pfr_clean_node_mask(tmpkt, &workq); 304 if (!(flags & PFR_FLAG_DUMMY)) 305 pfr_insert_kentries(kt, &workq, tzero); 306 else 307 pfr_destroy_kentries(&workq); 308 if (nadd != NULL) 309 *nadd = xadd; 310 pfr_destroy_ktable(tmpkt, 0); 311 return (0); 312 _bad: 313 pfr_clean_node_mask(tmpkt, &workq); 314 pfr_destroy_kentries(&workq); 315 if (flags & PFR_FLAG_FEEDBACK) 316 pfr_reset_feedback(addr, size); 317 pfr_destroy_ktable(tmpkt, 0); 318 return (rv); 319 } 320 321 int 322 pfr_del_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size, 323 int *ndel, int flags) 324 { 325 struct pfr_ktable *kt; 326 struct pfr_kentryworkq workq; 327 struct pfr_kentry *p; 328 struct pfr_addr *ad; 329 int i, rv, xdel = 0, log = 1; 330 331 PF_RULES_WASSERT(); 332 333 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_FEEDBACK); 334 if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL)) 335 return (EINVAL); 336 kt = pfr_lookup_table(tbl); 337 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 338 return (ESRCH); 339 if (kt->pfrkt_flags & PFR_TFLAG_CONST) 340 return (EPERM); 341 /* 342 * there are two algorithms to choose from here. 343 * with: 344 * n: number of addresses to delete 345 * N: number of addresses in the table 346 * 347 * one is O(N) and is better for large 'n' 348 * one is O(n*LOG(N)) and is better for small 'n' 349 * 350 * following code try to decide which one is best. 351 */ 352 for (i = kt->pfrkt_cnt; i > 0; i >>= 1) 353 log++; 354 if (size > kt->pfrkt_cnt/log) { 355 /* full table scan */ 356 pfr_mark_addrs(kt); 357 } else { 358 /* iterate over addresses to delete */ 359 for (i = 0, ad = addr; i < size; i++, ad++) { 360 if (pfr_validate_addr(ad)) 361 return (EINVAL); 362 p = pfr_lookup_addr(kt, ad, 1); 363 if (p != NULL) 364 p->pfrke_mark = 0; 365 } 366 } 367 SLIST_INIT(&workq); 368 for (i = 0, ad = addr; i < size; i++, ad++) { 369 if (pfr_validate_addr(ad)) 370 senderr(EINVAL); 371 p = pfr_lookup_addr(kt, ad, 1); 372 if (flags & PFR_FLAG_FEEDBACK) { 373 if (p == NULL) 374 ad->pfra_fback = PFR_FB_NONE; 375 else if (p->pfrke_not != ad->pfra_not) 376 ad->pfra_fback = PFR_FB_CONFLICT; 377 else if (p->pfrke_mark) 378 ad->pfra_fback = PFR_FB_DUPLICATE; 379 else 380 ad->pfra_fback = PFR_FB_DELETED; 381 } 382 if (p != NULL && p->pfrke_not == ad->pfra_not && 383 !p->pfrke_mark) { 384 p->pfrke_mark = 1; 385 SLIST_INSERT_HEAD(&workq, p, pfrke_workq); 386 xdel++; 387 } 388 } 389 if (!(flags & PFR_FLAG_DUMMY)) 390 pfr_remove_kentries(kt, &workq); 391 if (ndel != NULL) 392 *ndel = xdel; 393 return (0); 394 _bad: 395 if (flags & PFR_FLAG_FEEDBACK) 396 pfr_reset_feedback(addr, size); 397 return (rv); 398 } 399 400 int 401 pfr_set_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size, 402 int *size2, int *nadd, int *ndel, int *nchange, int flags, 403 u_int32_t ignore_pfrt_flags) 404 { 405 struct pfr_ktable *kt, *tmpkt; 406 struct pfr_kentryworkq addq, delq, changeq; 407 struct pfr_kentry *p, *q; 408 struct pfr_addr ad; 409 int i, rv, xadd = 0, xdel = 0, xchange = 0; 410 long tzero = time_second; 411 412 PF_RULES_WASSERT(); 413 414 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_FEEDBACK); 415 if (pfr_validate_table(tbl, ignore_pfrt_flags, flags & 416 PFR_FLAG_USERIOCTL)) 417 return (EINVAL); 418 kt = pfr_lookup_table(tbl); 419 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 420 return (ESRCH); 421 if (kt->pfrkt_flags & PFR_TFLAG_CONST) 422 return (EPERM); 423 tmpkt = pfr_create_ktable(&V_pfr_nulltable, 0, 0); 424 if (tmpkt == NULL) 425 return (ENOMEM); 426 pfr_mark_addrs(kt); 427 SLIST_INIT(&addq); 428 SLIST_INIT(&delq); 429 SLIST_INIT(&changeq); 430 for (i = 0; i < size; i++) { 431 /* 432 * XXXGL: undertand pf_if usage of this function 433 * and make ad a moving pointer 434 */ 435 bcopy(addr + i, &ad, sizeof(ad)); 436 if (pfr_validate_addr(&ad)) 437 senderr(EINVAL); 438 ad.pfra_fback = PFR_FB_NONE; 439 p = pfr_lookup_addr(kt, &ad, 1); 440 if (p != NULL) { 441 if (p->pfrke_mark) { 442 ad.pfra_fback = PFR_FB_DUPLICATE; 443 goto _skip; 444 } 445 p->pfrke_mark = 1; 446 if (p->pfrke_not != ad.pfra_not) { 447 SLIST_INSERT_HEAD(&changeq, p, pfrke_workq); 448 ad.pfra_fback = PFR_FB_CHANGED; 449 xchange++; 450 } 451 } else { 452 q = pfr_lookup_addr(tmpkt, &ad, 1); 453 if (q != NULL) { 454 ad.pfra_fback = PFR_FB_DUPLICATE; 455 goto _skip; 456 } 457 p = pfr_create_kentry(&ad, 458 (kt->pfrkt_flags & PFR_TFLAG_COUNTERS) != 0); 459 if (p == NULL) 460 senderr(ENOMEM); 461 if (pfr_route_kentry(tmpkt, p)) { 462 pfr_destroy_kentry(p); 463 ad.pfra_fback = PFR_FB_NONE; 464 } else { 465 SLIST_INSERT_HEAD(&addq, p, pfrke_workq); 466 ad.pfra_fback = PFR_FB_ADDED; 467 xadd++; 468 } 469 } 470 _skip: 471 if (flags & PFR_FLAG_FEEDBACK) 472 bcopy(&ad, addr + i, sizeof(ad)); 473 } 474 pfr_enqueue_addrs(kt, &delq, &xdel, ENQUEUE_UNMARKED_ONLY); 475 if ((flags & PFR_FLAG_FEEDBACK) && *size2) { 476 if (*size2 < size+xdel) { 477 *size2 = size+xdel; 478 senderr(0); 479 } 480 i = 0; 481 SLIST_FOREACH(p, &delq, pfrke_workq) { 482 pfr_copyout_addr(&ad, p); 483 ad.pfra_fback = PFR_FB_DELETED; 484 bcopy(&ad, addr + size + i, sizeof(ad)); 485 i++; 486 } 487 } 488 pfr_clean_node_mask(tmpkt, &addq); 489 if (!(flags & PFR_FLAG_DUMMY)) { 490 pfr_insert_kentries(kt, &addq, tzero); 491 pfr_remove_kentries(kt, &delq); 492 pfr_clstats_kentries(kt, &changeq, tzero, INVERT_NEG_FLAG); 493 } else 494 pfr_destroy_kentries(&addq); 495 if (nadd != NULL) 496 *nadd = xadd; 497 if (ndel != NULL) 498 *ndel = xdel; 499 if (nchange != NULL) 500 *nchange = xchange; 501 if ((flags & PFR_FLAG_FEEDBACK) && size2) 502 *size2 = size+xdel; 503 pfr_destroy_ktable(tmpkt, 0); 504 return (0); 505 _bad: 506 pfr_clean_node_mask(tmpkt, &addq); 507 pfr_destroy_kentries(&addq); 508 if (flags & PFR_FLAG_FEEDBACK) 509 pfr_reset_feedback(addr, size); 510 pfr_destroy_ktable(tmpkt, 0); 511 return (rv); 512 } 513 514 int 515 pfr_tst_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size, 516 int *nmatch, int flags) 517 { 518 struct pfr_ktable *kt; 519 struct pfr_kentry *p; 520 struct pfr_addr *ad; 521 int i, xmatch = 0; 522 523 PF_RULES_RASSERT(); 524 525 ACCEPT_FLAGS(flags, PFR_FLAG_REPLACE); 526 if (pfr_validate_table(tbl, 0, 0)) 527 return (EINVAL); 528 kt = pfr_lookup_table(tbl); 529 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 530 return (ESRCH); 531 532 for (i = 0, ad = addr; i < size; i++, ad++) { 533 if (pfr_validate_addr(ad)) 534 return (EINVAL); 535 if (ADDR_NETWORK(ad)) 536 return (EINVAL); 537 p = pfr_lookup_addr(kt, ad, 0); 538 if (flags & PFR_FLAG_REPLACE) 539 pfr_copyout_addr(ad, p); 540 ad->pfra_fback = (p == NULL) ? PFR_FB_NONE : 541 (p->pfrke_not ? PFR_FB_NOTMATCH : PFR_FB_MATCH); 542 if (p != NULL && !p->pfrke_not) 543 xmatch++; 544 } 545 if (nmatch != NULL) 546 *nmatch = xmatch; 547 return (0); 548 } 549 550 int 551 pfr_get_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int *size, 552 int flags) 553 { 554 struct pfr_ktable *kt; 555 struct pfr_walktree w; 556 int rv; 557 558 PF_RULES_RASSERT(); 559 560 ACCEPT_FLAGS(flags, 0); 561 if (pfr_validate_table(tbl, 0, 0)) 562 return (EINVAL); 563 kt = pfr_lookup_table(tbl); 564 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 565 return (ESRCH); 566 if (kt->pfrkt_cnt > *size) { 567 *size = kt->pfrkt_cnt; 568 return (0); 569 } 570 571 bzero(&w, sizeof(w)); 572 w.pfrw_op = PFRW_GET_ADDRS; 573 w.pfrw_addr = addr; 574 w.pfrw_free = kt->pfrkt_cnt; 575 rv = kt->pfrkt_ip4->rnh_walktree(&kt->pfrkt_ip4->rh, pfr_walktree, &w); 576 if (!rv) 577 rv = kt->pfrkt_ip6->rnh_walktree(&kt->pfrkt_ip6->rh, 578 pfr_walktree, &w); 579 if (rv) 580 return (rv); 581 582 KASSERT(w.pfrw_free == 0, ("%s: corruption detected (%d)", __func__, 583 w.pfrw_free)); 584 585 *size = kt->pfrkt_cnt; 586 return (0); 587 } 588 589 int 590 pfr_get_astats(struct pfr_table *tbl, struct pfr_astats *addr, int *size, 591 int flags) 592 { 593 struct pfr_ktable *kt; 594 struct pfr_walktree w; 595 struct pfr_kentryworkq workq; 596 int rv; 597 long tzero = time_second; 598 599 PF_RULES_RASSERT(); 600 601 /* XXX PFR_FLAG_CLSTATS disabled */ 602 ACCEPT_FLAGS(flags, 0); 603 if (pfr_validate_table(tbl, 0, 0)) 604 return (EINVAL); 605 kt = pfr_lookup_table(tbl); 606 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 607 return (ESRCH); 608 if (kt->pfrkt_cnt > *size) { 609 *size = kt->pfrkt_cnt; 610 return (0); 611 } 612 613 bzero(&w, sizeof(w)); 614 w.pfrw_op = PFRW_GET_ASTATS; 615 w.pfrw_astats = addr; 616 w.pfrw_free = kt->pfrkt_cnt; 617 /* 618 * Flags below are for backward compatibility. It was possible to have 619 * a table without per-entry counters. Now they are always allocated, 620 * we just discard data when reading it if table is not configured to 621 * have counters. 622 */ 623 w.pfrw_flags = kt->pfrkt_flags; 624 rv = kt->pfrkt_ip4->rnh_walktree(&kt->pfrkt_ip4->rh, pfr_walktree, &w); 625 if (!rv) 626 rv = kt->pfrkt_ip6->rnh_walktree(&kt->pfrkt_ip6->rh, 627 pfr_walktree, &w); 628 if (!rv && (flags & PFR_FLAG_CLSTATS)) { 629 pfr_enqueue_addrs(kt, &workq, NULL, 0); 630 pfr_clstats_kentries(kt, &workq, tzero, 0); 631 } 632 if (rv) 633 return (rv); 634 635 if (w.pfrw_free) { 636 printf("pfr_get_astats: corruption detected (%d).\n", 637 w.pfrw_free); 638 return (ENOTTY); 639 } 640 *size = kt->pfrkt_cnt; 641 return (0); 642 } 643 644 int 645 pfr_clr_astats(struct pfr_table *tbl, struct pfr_addr *addr, int size, 646 int *nzero, int flags) 647 { 648 struct pfr_ktable *kt; 649 struct pfr_kentryworkq workq; 650 struct pfr_kentry *p; 651 struct pfr_addr *ad; 652 int i, rv, xzero = 0; 653 654 PF_RULES_WASSERT(); 655 656 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_FEEDBACK); 657 if (pfr_validate_table(tbl, 0, 0)) 658 return (EINVAL); 659 kt = pfr_lookup_table(tbl); 660 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 661 return (ESRCH); 662 SLIST_INIT(&workq); 663 for (i = 0, ad = addr; i < size; i++, ad++) { 664 if (pfr_validate_addr(ad)) 665 senderr(EINVAL); 666 p = pfr_lookup_addr(kt, ad, 1); 667 if (flags & PFR_FLAG_FEEDBACK) { 668 ad->pfra_fback = (p != NULL) ? 669 PFR_FB_CLEARED : PFR_FB_NONE; 670 } 671 if (p != NULL) { 672 SLIST_INSERT_HEAD(&workq, p, pfrke_workq); 673 xzero++; 674 } 675 } 676 677 if (!(flags & PFR_FLAG_DUMMY)) 678 pfr_clstats_kentries(kt, &workq, 0, 0); 679 if (nzero != NULL) 680 *nzero = xzero; 681 return (0); 682 _bad: 683 if (flags & PFR_FLAG_FEEDBACK) 684 pfr_reset_feedback(addr, size); 685 return (rv); 686 } 687 688 static int 689 pfr_validate_addr(struct pfr_addr *ad) 690 { 691 int i; 692 693 switch (ad->pfra_af) { 694 #ifdef INET 695 case AF_INET: 696 if (ad->pfra_net > 32) 697 return (-1); 698 break; 699 #endif /* INET */ 700 #ifdef INET6 701 case AF_INET6: 702 if (ad->pfra_net > 128) 703 return (-1); 704 break; 705 #endif /* INET6 */ 706 default: 707 return (-1); 708 } 709 if (ad->pfra_net < 128 && 710 (((caddr_t)ad)[ad->pfra_net/8] & (0xFF >> (ad->pfra_net%8)))) 711 return (-1); 712 for (i = (ad->pfra_net+7)/8; i < sizeof(ad->pfra_u); i++) 713 if (((caddr_t)ad)[i]) 714 return (-1); 715 if (ad->pfra_not && ad->pfra_not != 1) 716 return (-1); 717 if (ad->pfra_fback) 718 return (-1); 719 return (0); 720 } 721 722 static void 723 pfr_enqueue_addrs(struct pfr_ktable *kt, struct pfr_kentryworkq *workq, 724 int *naddr, int sweep) 725 { 726 struct pfr_walktree w; 727 728 SLIST_INIT(workq); 729 bzero(&w, sizeof(w)); 730 w.pfrw_op = sweep ? PFRW_SWEEP : PFRW_ENQUEUE; 731 w.pfrw_workq = workq; 732 if (kt->pfrkt_ip4 != NULL) 733 if (kt->pfrkt_ip4->rnh_walktree(&kt->pfrkt_ip4->rh, 734 pfr_walktree, &w)) 735 printf("pfr_enqueue_addrs: IPv4 walktree failed.\n"); 736 if (kt->pfrkt_ip6 != NULL) 737 if (kt->pfrkt_ip6->rnh_walktree(&kt->pfrkt_ip6->rh, 738 pfr_walktree, &w)) 739 printf("pfr_enqueue_addrs: IPv6 walktree failed.\n"); 740 if (naddr != NULL) 741 *naddr = w.pfrw_cnt; 742 } 743 744 static void 745 pfr_mark_addrs(struct pfr_ktable *kt) 746 { 747 struct pfr_walktree w; 748 749 bzero(&w, sizeof(w)); 750 w.pfrw_op = PFRW_MARK; 751 if (kt->pfrkt_ip4->rnh_walktree(&kt->pfrkt_ip4->rh, pfr_walktree, &w)) 752 printf("pfr_mark_addrs: IPv4 walktree failed.\n"); 753 if (kt->pfrkt_ip6->rnh_walktree(&kt->pfrkt_ip6->rh, pfr_walktree, &w)) 754 printf("pfr_mark_addrs: IPv6 walktree failed.\n"); 755 } 756 757 758 static struct pfr_kentry * 759 pfr_lookup_addr(struct pfr_ktable *kt, struct pfr_addr *ad, int exact) 760 { 761 union sockaddr_union sa, mask; 762 struct radix_head *head = NULL; 763 struct pfr_kentry *ke; 764 765 PF_RULES_ASSERT(); 766 767 bzero(&sa, sizeof(sa)); 768 if (ad->pfra_af == AF_INET) { 769 FILLIN_SIN(sa.sin, ad->pfra_ip4addr); 770 head = &kt->pfrkt_ip4->rh; 771 } else if ( ad->pfra_af == AF_INET6 ) { 772 FILLIN_SIN6(sa.sin6, ad->pfra_ip6addr); 773 head = &kt->pfrkt_ip6->rh; 774 } 775 if (ADDR_NETWORK(ad)) { 776 pfr_prepare_network(&mask, ad->pfra_af, ad->pfra_net); 777 ke = (struct pfr_kentry *)rn_lookup(&sa, &mask, head); 778 if (ke && KENTRY_RNF_ROOT(ke)) 779 ke = NULL; 780 } else { 781 ke = (struct pfr_kentry *)rn_match(&sa, head); 782 if (ke && KENTRY_RNF_ROOT(ke)) 783 ke = NULL; 784 if (exact && ke && KENTRY_NETWORK(ke)) 785 ke = NULL; 786 } 787 return (ke); 788 } 789 790 static struct pfr_kentry * 791 pfr_create_kentry(struct pfr_addr *ad, bool counters) 792 { 793 struct pfr_kentry *ke; 794 counter_u64_t c; 795 796 ke = uma_zalloc(V_pfr_kentry_z, M_NOWAIT | M_ZERO); 797 if (ke == NULL) 798 return (NULL); 799 800 if (ad->pfra_af == AF_INET) 801 FILLIN_SIN(ke->pfrke_sa.sin, ad->pfra_ip4addr); 802 else if (ad->pfra_af == AF_INET6) 803 FILLIN_SIN6(ke->pfrke_sa.sin6, ad->pfra_ip6addr); 804 ke->pfrke_af = ad->pfra_af; 805 ke->pfrke_net = ad->pfra_net; 806 ke->pfrke_not = ad->pfra_not; 807 ke->pfrke_counters.pfrkc_tzero = 0; 808 if (counters) { 809 c = uma_zalloc_pcpu(V_pfr_kentry_counter_z, M_NOWAIT | M_ZERO); 810 if (c == NULL) { 811 pfr_destroy_kentry(ke); 812 return (NULL); 813 } 814 ke->pfrke_counters.pfrkc_counters = c; 815 } 816 return (ke); 817 } 818 819 static void 820 pfr_destroy_kentries(struct pfr_kentryworkq *workq) 821 { 822 struct pfr_kentry *p, *q; 823 824 for (p = SLIST_FIRST(workq); p != NULL; p = q) { 825 q = SLIST_NEXT(p, pfrke_workq); 826 pfr_destroy_kentry(p); 827 } 828 } 829 830 static void 831 pfr_destroy_kentry(struct pfr_kentry *ke) 832 { 833 counter_u64_t c; 834 835 if ((c = ke->pfrke_counters.pfrkc_counters) != NULL) 836 uma_zfree_pcpu(V_pfr_kentry_counter_z, c); 837 uma_zfree(V_pfr_kentry_z, ke); 838 } 839 840 static void 841 pfr_insert_kentries(struct pfr_ktable *kt, 842 struct pfr_kentryworkq *workq, long tzero) 843 { 844 struct pfr_kentry *p; 845 int rv, n = 0; 846 847 SLIST_FOREACH(p, workq, pfrke_workq) { 848 rv = pfr_route_kentry(kt, p); 849 if (rv) { 850 printf("pfr_insert_kentries: cannot route entry " 851 "(code=%d).\n", rv); 852 break; 853 } 854 p->pfrke_counters.pfrkc_tzero = tzero; 855 n++; 856 } 857 kt->pfrkt_cnt += n; 858 } 859 860 int 861 pfr_insert_kentry(struct pfr_ktable *kt, struct pfr_addr *ad, long tzero) 862 { 863 struct pfr_kentry *p; 864 int rv; 865 866 p = pfr_lookup_addr(kt, ad, 1); 867 if (p != NULL) 868 return (0); 869 p = pfr_create_kentry(ad, (kt->pfrkt_flags & PFR_TFLAG_COUNTERS) != 0); 870 if (p == NULL) 871 return (ENOMEM); 872 873 rv = pfr_route_kentry(kt, p); 874 if (rv) 875 return (rv); 876 877 p->pfrke_counters.pfrkc_tzero = tzero; 878 kt->pfrkt_cnt++; 879 880 return (0); 881 } 882 883 static void 884 pfr_remove_kentries(struct pfr_ktable *kt, 885 struct pfr_kentryworkq *workq) 886 { 887 struct pfr_kentry *p; 888 int n = 0; 889 890 SLIST_FOREACH(p, workq, pfrke_workq) { 891 pfr_unroute_kentry(kt, p); 892 n++; 893 } 894 kt->pfrkt_cnt -= n; 895 pfr_destroy_kentries(workq); 896 } 897 898 static void 899 pfr_clean_node_mask(struct pfr_ktable *kt, 900 struct pfr_kentryworkq *workq) 901 { 902 struct pfr_kentry *p; 903 904 SLIST_FOREACH(p, workq, pfrke_workq) 905 pfr_unroute_kentry(kt, p); 906 } 907 908 static void 909 pfr_clstats_kentries(struct pfr_ktable *kt, struct pfr_kentryworkq *workq, 910 long tzero, int negchange) 911 { 912 struct pfr_kentry *p; 913 int i; 914 915 SLIST_FOREACH(p, workq, pfrke_workq) { 916 if (negchange) 917 p->pfrke_not = !p->pfrke_not; 918 if ((kt->pfrkt_flags & PFR_TFLAG_COUNTERS) != 0) 919 for (i = 0; i < PFR_NUM_COUNTERS; i++) 920 counter_u64_zero( 921 p->pfrke_counters.pfrkc_counters + i); 922 p->pfrke_counters.pfrkc_tzero = tzero; 923 } 924 } 925 926 static void 927 pfr_reset_feedback(struct pfr_addr *addr, int size) 928 { 929 struct pfr_addr *ad; 930 int i; 931 932 for (i = 0, ad = addr; i < size; i++, ad++) 933 ad->pfra_fback = PFR_FB_NONE; 934 } 935 936 static void 937 pfr_prepare_network(union sockaddr_union *sa, int af, int net) 938 { 939 int i; 940 941 bzero(sa, sizeof(*sa)); 942 if (af == AF_INET) { 943 sa->sin.sin_len = sizeof(sa->sin); 944 sa->sin.sin_family = AF_INET; 945 sa->sin.sin_addr.s_addr = net ? htonl(-1 << (32-net)) : 0; 946 } else if (af == AF_INET6) { 947 sa->sin6.sin6_len = sizeof(sa->sin6); 948 sa->sin6.sin6_family = AF_INET6; 949 for (i = 0; i < 4; i++) { 950 if (net <= 32) { 951 sa->sin6.sin6_addr.s6_addr32[i] = 952 net ? htonl(-1 << (32-net)) : 0; 953 break; 954 } 955 sa->sin6.sin6_addr.s6_addr32[i] = 0xFFFFFFFF; 956 net -= 32; 957 } 958 } 959 } 960 961 static int 962 pfr_route_kentry(struct pfr_ktable *kt, struct pfr_kentry *ke) 963 { 964 union sockaddr_union mask; 965 struct radix_node *rn; 966 struct radix_head *head = NULL; 967 968 PF_RULES_WASSERT(); 969 970 bzero(ke->pfrke_node, sizeof(ke->pfrke_node)); 971 if (ke->pfrke_af == AF_INET) 972 head = &kt->pfrkt_ip4->rh; 973 else if (ke->pfrke_af == AF_INET6) 974 head = &kt->pfrkt_ip6->rh; 975 976 if (KENTRY_NETWORK(ke)) { 977 pfr_prepare_network(&mask, ke->pfrke_af, ke->pfrke_net); 978 rn = rn_addroute(&ke->pfrke_sa, &mask, head, ke->pfrke_node); 979 } else 980 rn = rn_addroute(&ke->pfrke_sa, NULL, head, ke->pfrke_node); 981 982 return (rn == NULL ? -1 : 0); 983 } 984 985 static int 986 pfr_unroute_kentry(struct pfr_ktable *kt, struct pfr_kentry *ke) 987 { 988 union sockaddr_union mask; 989 struct radix_node *rn; 990 struct radix_head *head = NULL; 991 992 if (ke->pfrke_af == AF_INET) 993 head = &kt->pfrkt_ip4->rh; 994 else if (ke->pfrke_af == AF_INET6) 995 head = &kt->pfrkt_ip6->rh; 996 997 if (KENTRY_NETWORK(ke)) { 998 pfr_prepare_network(&mask, ke->pfrke_af, ke->pfrke_net); 999 rn = rn_delete(&ke->pfrke_sa, &mask, head); 1000 } else 1001 rn = rn_delete(&ke->pfrke_sa, NULL, head); 1002 1003 if (rn == NULL) { 1004 printf("pfr_unroute_kentry: delete failed.\n"); 1005 return (-1); 1006 } 1007 return (0); 1008 } 1009 1010 static void 1011 pfr_copyout_addr(struct pfr_addr *ad, const struct pfr_kentry *ke) 1012 { 1013 bzero(ad, sizeof(*ad)); 1014 if (ke == NULL) 1015 return; 1016 ad->pfra_af = ke->pfrke_af; 1017 ad->pfra_net = ke->pfrke_net; 1018 ad->pfra_not = ke->pfrke_not; 1019 if (ad->pfra_af == AF_INET) 1020 ad->pfra_ip4addr = ke->pfrke_sa.sin.sin_addr; 1021 else if (ad->pfra_af == AF_INET6) 1022 ad->pfra_ip6addr = ke->pfrke_sa.sin6.sin6_addr; 1023 } 1024 1025 static void 1026 pfr_copyout_astats(struct pfr_astats *as, const struct pfr_kentry *ke, 1027 const struct pfr_walktree *w) 1028 { 1029 int dir, op; 1030 const struct pfr_kcounters *kc = &ke->pfrke_counters; 1031 1032 pfr_copyout_addr(&as->pfras_a, ke); 1033 as->pfras_tzero = kc->pfrkc_tzero; 1034 1035 if (! (w->pfrw_flags & PFR_TFLAG_COUNTERS)) { 1036 bzero(as->pfras_packets, sizeof(as->pfras_packets)); 1037 bzero(as->pfras_bytes, sizeof(as->pfras_bytes)); 1038 as->pfras_a.pfra_fback = PFR_FB_NOCOUNT; 1039 return; 1040 } 1041 1042 for (dir = 0; dir < PFR_DIR_MAX; dir++) { 1043 for (op = 0; op < PFR_OP_ADDR_MAX; op ++) { 1044 as->pfras_packets[dir][op] = counter_u64_fetch( 1045 pfr_kentry_counter(kc, dir, op, PFR_TYPE_PACKETS)); 1046 as->pfras_bytes[dir][op] = counter_u64_fetch( 1047 pfr_kentry_counter(kc, dir, op, PFR_TYPE_BYTES)); 1048 } 1049 } 1050 } 1051 1052 static int 1053 pfr_walktree(struct radix_node *rn, void *arg) 1054 { 1055 struct pfr_kentry *ke = (struct pfr_kentry *)rn; 1056 struct pfr_walktree *w = arg; 1057 1058 switch (w->pfrw_op) { 1059 case PFRW_MARK: 1060 ke->pfrke_mark = 0; 1061 break; 1062 case PFRW_SWEEP: 1063 if (ke->pfrke_mark) 1064 break; 1065 /* FALLTHROUGH */ 1066 case PFRW_ENQUEUE: 1067 SLIST_INSERT_HEAD(w->pfrw_workq, ke, pfrke_workq); 1068 w->pfrw_cnt++; 1069 break; 1070 case PFRW_GET_ADDRS: 1071 if (w->pfrw_free-- > 0) { 1072 pfr_copyout_addr(w->pfrw_addr, ke); 1073 w->pfrw_addr++; 1074 } 1075 break; 1076 case PFRW_GET_ASTATS: 1077 if (w->pfrw_free-- > 0) { 1078 struct pfr_astats as; 1079 1080 pfr_copyout_astats(&as, ke, w); 1081 1082 bcopy(&as, w->pfrw_astats, sizeof(as)); 1083 w->pfrw_astats++; 1084 } 1085 break; 1086 case PFRW_POOL_GET: 1087 if (ke->pfrke_not) 1088 break; /* negative entries are ignored */ 1089 if (!w->pfrw_cnt--) { 1090 w->pfrw_kentry = ke; 1091 return (1); /* finish search */ 1092 } 1093 break; 1094 case PFRW_DYNADDR_UPDATE: 1095 { 1096 union sockaddr_union pfr_mask; 1097 1098 if (ke->pfrke_af == AF_INET) { 1099 if (w->pfrw_dyn->pfid_acnt4++ > 0) 1100 break; 1101 pfr_prepare_network(&pfr_mask, AF_INET, ke->pfrke_net); 1102 w->pfrw_dyn->pfid_addr4 = *SUNION2PF(&ke->pfrke_sa, 1103 AF_INET); 1104 w->pfrw_dyn->pfid_mask4 = *SUNION2PF(&pfr_mask, 1105 AF_INET); 1106 } else if (ke->pfrke_af == AF_INET6){ 1107 if (w->pfrw_dyn->pfid_acnt6++ > 0) 1108 break; 1109 pfr_prepare_network(&pfr_mask, AF_INET6, ke->pfrke_net); 1110 w->pfrw_dyn->pfid_addr6 = *SUNION2PF(&ke->pfrke_sa, 1111 AF_INET6); 1112 w->pfrw_dyn->pfid_mask6 = *SUNION2PF(&pfr_mask, 1113 AF_INET6); 1114 } 1115 break; 1116 } 1117 } 1118 return (0); 1119 } 1120 1121 int 1122 pfr_clr_tables(struct pfr_table *filter, int *ndel, int flags) 1123 { 1124 struct pfr_ktableworkq workq; 1125 struct pfr_ktable *p; 1126 int xdel = 0; 1127 1128 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_ALLRSETS); 1129 if (pfr_fix_anchor(filter->pfrt_anchor)) 1130 return (EINVAL); 1131 if (pfr_table_count(filter, flags) < 0) 1132 return (ENOENT); 1133 1134 SLIST_INIT(&workq); 1135 RB_FOREACH(p, pfr_ktablehead, &V_pfr_ktables) { 1136 if (pfr_skip_table(filter, p, flags)) 1137 continue; 1138 if (!strcmp(p->pfrkt_anchor, PF_RESERVED_ANCHOR)) 1139 continue; 1140 if (!(p->pfrkt_flags & PFR_TFLAG_ACTIVE)) 1141 continue; 1142 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_ACTIVE; 1143 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq); 1144 xdel++; 1145 } 1146 if (!(flags & PFR_FLAG_DUMMY)) 1147 pfr_setflags_ktables(&workq); 1148 if (ndel != NULL) 1149 *ndel = xdel; 1150 return (0); 1151 } 1152 1153 int 1154 pfr_add_tables(struct pfr_table *tbl, int size, int *nadd, int flags) 1155 { 1156 struct pfr_ktableworkq addq, changeq; 1157 struct pfr_ktable *p, *q, *r, key; 1158 int i, rv, xadd = 0; 1159 long tzero = time_second; 1160 1161 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY); 1162 SLIST_INIT(&addq); 1163 SLIST_INIT(&changeq); 1164 for (i = 0; i < size; i++) { 1165 bcopy(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t)); 1166 if (pfr_validate_table(&key.pfrkt_t, PFR_TFLAG_USRMASK, 1167 flags & PFR_FLAG_USERIOCTL)) 1168 senderr(EINVAL); 1169 key.pfrkt_flags |= PFR_TFLAG_ACTIVE; 1170 p = RB_FIND(pfr_ktablehead, &V_pfr_ktables, &key); 1171 if (p == NULL) { 1172 p = pfr_create_ktable(&key.pfrkt_t, tzero, 1); 1173 if (p == NULL) 1174 senderr(ENOMEM); 1175 SLIST_FOREACH(q, &addq, pfrkt_workq) { 1176 if (!pfr_ktable_compare(p, q)) { 1177 pfr_destroy_ktable(p, 0); 1178 goto _skip; 1179 } 1180 } 1181 SLIST_INSERT_HEAD(&addq, p, pfrkt_workq); 1182 xadd++; 1183 if (!key.pfrkt_anchor[0]) 1184 goto _skip; 1185 1186 /* find or create root table */ 1187 bzero(key.pfrkt_anchor, sizeof(key.pfrkt_anchor)); 1188 r = RB_FIND(pfr_ktablehead, &V_pfr_ktables, &key); 1189 if (r != NULL) { 1190 p->pfrkt_root = r; 1191 goto _skip; 1192 } 1193 SLIST_FOREACH(q, &addq, pfrkt_workq) { 1194 if (!pfr_ktable_compare(&key, q)) { 1195 p->pfrkt_root = q; 1196 goto _skip; 1197 } 1198 } 1199 key.pfrkt_flags = 0; 1200 r = pfr_create_ktable(&key.pfrkt_t, 0, 1); 1201 if (r == NULL) 1202 senderr(ENOMEM); 1203 SLIST_INSERT_HEAD(&addq, r, pfrkt_workq); 1204 p->pfrkt_root = r; 1205 } else if (!(p->pfrkt_flags & PFR_TFLAG_ACTIVE)) { 1206 SLIST_FOREACH(q, &changeq, pfrkt_workq) 1207 if (!pfr_ktable_compare(&key, q)) 1208 goto _skip; 1209 p->pfrkt_nflags = (p->pfrkt_flags & 1210 ~PFR_TFLAG_USRMASK) | key.pfrkt_flags; 1211 SLIST_INSERT_HEAD(&changeq, p, pfrkt_workq); 1212 xadd++; 1213 } 1214 _skip: 1215 ; 1216 } 1217 if (!(flags & PFR_FLAG_DUMMY)) { 1218 pfr_insert_ktables(&addq); 1219 pfr_setflags_ktables(&changeq); 1220 } else 1221 pfr_destroy_ktables(&addq, 0); 1222 if (nadd != NULL) 1223 *nadd = xadd; 1224 return (0); 1225 _bad: 1226 pfr_destroy_ktables(&addq, 0); 1227 return (rv); 1228 } 1229 1230 int 1231 pfr_del_tables(struct pfr_table *tbl, int size, int *ndel, int flags) 1232 { 1233 struct pfr_ktableworkq workq; 1234 struct pfr_ktable *p, *q, key; 1235 int i, xdel = 0; 1236 1237 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY); 1238 SLIST_INIT(&workq); 1239 for (i = 0; i < size; i++) { 1240 bcopy(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t)); 1241 if (pfr_validate_table(&key.pfrkt_t, 0, 1242 flags & PFR_FLAG_USERIOCTL)) 1243 return (EINVAL); 1244 p = RB_FIND(pfr_ktablehead, &V_pfr_ktables, &key); 1245 if (p != NULL && (p->pfrkt_flags & PFR_TFLAG_ACTIVE)) { 1246 SLIST_FOREACH(q, &workq, pfrkt_workq) 1247 if (!pfr_ktable_compare(p, q)) 1248 goto _skip; 1249 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_ACTIVE; 1250 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq); 1251 xdel++; 1252 } 1253 _skip: 1254 ; 1255 } 1256 1257 if (!(flags & PFR_FLAG_DUMMY)) 1258 pfr_setflags_ktables(&workq); 1259 if (ndel != NULL) 1260 *ndel = xdel; 1261 return (0); 1262 } 1263 1264 int 1265 pfr_get_tables(struct pfr_table *filter, struct pfr_table *tbl, int *size, 1266 int flags) 1267 { 1268 struct pfr_ktable *p; 1269 int n, nn; 1270 1271 PF_RULES_RASSERT(); 1272 1273 ACCEPT_FLAGS(flags, PFR_FLAG_ALLRSETS); 1274 if (pfr_fix_anchor(filter->pfrt_anchor)) 1275 return (EINVAL); 1276 n = nn = pfr_table_count(filter, flags); 1277 if (n < 0) 1278 return (ENOENT); 1279 if (n > *size) { 1280 *size = n; 1281 return (0); 1282 } 1283 RB_FOREACH(p, pfr_ktablehead, &V_pfr_ktables) { 1284 if (pfr_skip_table(filter, p, flags)) 1285 continue; 1286 if (n-- <= 0) 1287 continue; 1288 bcopy(&p->pfrkt_t, tbl++, sizeof(*tbl)); 1289 } 1290 1291 KASSERT(n == 0, ("%s: corruption detected (%d)", __func__, n)); 1292 1293 *size = nn; 1294 return (0); 1295 } 1296 1297 int 1298 pfr_get_tstats(struct pfr_table *filter, struct pfr_tstats *tbl, int *size, 1299 int flags) 1300 { 1301 struct pfr_ktable *p; 1302 struct pfr_ktableworkq workq; 1303 int n, nn; 1304 long tzero = time_second; 1305 int pfr_dir, pfr_op; 1306 1307 /* XXX PFR_FLAG_CLSTATS disabled */ 1308 ACCEPT_FLAGS(flags, PFR_FLAG_ALLRSETS); 1309 if (pfr_fix_anchor(filter->pfrt_anchor)) 1310 return (EINVAL); 1311 n = nn = pfr_table_count(filter, flags); 1312 if (n < 0) 1313 return (ENOENT); 1314 if (n > *size) { 1315 *size = n; 1316 return (0); 1317 } 1318 SLIST_INIT(&workq); 1319 RB_FOREACH(p, pfr_ktablehead, &V_pfr_ktables) { 1320 if (pfr_skip_table(filter, p, flags)) 1321 continue; 1322 if (n-- <= 0) 1323 continue; 1324 bcopy(&p->pfrkt_kts.pfrts_t, &tbl->pfrts_t, 1325 sizeof(struct pfr_table)); 1326 for (pfr_dir = 0; pfr_dir < PFR_DIR_MAX; pfr_dir ++) { 1327 for (pfr_op = 0; pfr_op < PFR_OP_TABLE_MAX; pfr_op ++) { 1328 tbl->pfrts_packets[pfr_dir][pfr_op] = 1329 counter_u64_fetch( 1330 p->pfrkt_packets[pfr_dir][pfr_op]); 1331 tbl->pfrts_bytes[pfr_dir][pfr_op] = 1332 counter_u64_fetch( 1333 p->pfrkt_bytes[pfr_dir][pfr_op]); 1334 } 1335 } 1336 tbl->pfrts_match = counter_u64_fetch(p->pfrkt_match); 1337 tbl->pfrts_nomatch = counter_u64_fetch(p->pfrkt_nomatch); 1338 tbl->pfrts_tzero = p->pfrkt_tzero; 1339 tbl->pfrts_cnt = p->pfrkt_cnt; 1340 for (pfr_op = 0; pfr_op < PFR_REFCNT_MAX; pfr_op++) 1341 tbl->pfrts_refcnt[pfr_op] = p->pfrkt_refcnt[pfr_op]; 1342 tbl++; 1343 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq); 1344 } 1345 if (flags & PFR_FLAG_CLSTATS) 1346 pfr_clstats_ktables(&workq, tzero, 1347 flags & PFR_FLAG_ADDRSTOO); 1348 1349 KASSERT(n == 0, ("%s: corruption detected (%d)", __func__, n)); 1350 1351 *size = nn; 1352 return (0); 1353 } 1354 1355 int 1356 pfr_clr_tstats(struct pfr_table *tbl, int size, int *nzero, int flags) 1357 { 1358 struct pfr_ktableworkq workq; 1359 struct pfr_ktable *p, key; 1360 int i, xzero = 0; 1361 long tzero = time_second; 1362 1363 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_ADDRSTOO); 1364 SLIST_INIT(&workq); 1365 for (i = 0; i < size; i++) { 1366 bcopy(tbl + i, &key.pfrkt_t, sizeof(key.pfrkt_t)); 1367 if (pfr_validate_table(&key.pfrkt_t, 0, 0)) 1368 return (EINVAL); 1369 p = RB_FIND(pfr_ktablehead, &V_pfr_ktables, &key); 1370 if (p != NULL) { 1371 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq); 1372 xzero++; 1373 } 1374 } 1375 if (!(flags & PFR_FLAG_DUMMY)) 1376 pfr_clstats_ktables(&workq, tzero, flags & PFR_FLAG_ADDRSTOO); 1377 if (nzero != NULL) 1378 *nzero = xzero; 1379 return (0); 1380 } 1381 1382 int 1383 pfr_set_tflags(struct pfr_table *tbl, int size, int setflag, int clrflag, 1384 int *nchange, int *ndel, int flags) 1385 { 1386 struct pfr_ktableworkq workq; 1387 struct pfr_ktable *p, *q, key; 1388 int i, xchange = 0, xdel = 0; 1389 1390 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY); 1391 if ((setflag & ~PFR_TFLAG_USRMASK) || 1392 (clrflag & ~PFR_TFLAG_USRMASK) || 1393 (setflag & clrflag)) 1394 return (EINVAL); 1395 SLIST_INIT(&workq); 1396 for (i = 0; i < size; i++) { 1397 bcopy(tbl + i, &key.pfrkt_t, sizeof(key.pfrkt_t)); 1398 if (pfr_validate_table(&key.pfrkt_t, 0, 1399 flags & PFR_FLAG_USERIOCTL)) 1400 return (EINVAL); 1401 p = RB_FIND(pfr_ktablehead, &V_pfr_ktables, &key); 1402 if (p != NULL && (p->pfrkt_flags & PFR_TFLAG_ACTIVE)) { 1403 p->pfrkt_nflags = (p->pfrkt_flags | setflag) & 1404 ~clrflag; 1405 if (p->pfrkt_nflags == p->pfrkt_flags) 1406 goto _skip; 1407 SLIST_FOREACH(q, &workq, pfrkt_workq) 1408 if (!pfr_ktable_compare(p, q)) 1409 goto _skip; 1410 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq); 1411 if ((p->pfrkt_flags & PFR_TFLAG_PERSIST) && 1412 (clrflag & PFR_TFLAG_PERSIST) && 1413 !(p->pfrkt_flags & PFR_TFLAG_REFERENCED)) 1414 xdel++; 1415 else 1416 xchange++; 1417 } 1418 _skip: 1419 ; 1420 } 1421 if (!(flags & PFR_FLAG_DUMMY)) 1422 pfr_setflags_ktables(&workq); 1423 if (nchange != NULL) 1424 *nchange = xchange; 1425 if (ndel != NULL) 1426 *ndel = xdel; 1427 return (0); 1428 } 1429 1430 int 1431 pfr_ina_begin(struct pfr_table *trs, u_int32_t *ticket, int *ndel, int flags) 1432 { 1433 struct pfr_ktableworkq workq; 1434 struct pfr_ktable *p; 1435 struct pf_ruleset *rs; 1436 int xdel = 0; 1437 1438 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY); 1439 rs = pf_find_or_create_ruleset(trs->pfrt_anchor); 1440 if (rs == NULL) 1441 return (ENOMEM); 1442 SLIST_INIT(&workq); 1443 RB_FOREACH(p, pfr_ktablehead, &V_pfr_ktables) { 1444 if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) || 1445 pfr_skip_table(trs, p, 0)) 1446 continue; 1447 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_INACTIVE; 1448 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq); 1449 xdel++; 1450 } 1451 if (!(flags & PFR_FLAG_DUMMY)) { 1452 pfr_setflags_ktables(&workq); 1453 if (ticket != NULL) 1454 *ticket = ++rs->tticket; 1455 rs->topen = 1; 1456 } else 1457 pf_remove_if_empty_ruleset(rs); 1458 if (ndel != NULL) 1459 *ndel = xdel; 1460 return (0); 1461 } 1462 1463 int 1464 pfr_ina_define(struct pfr_table *tbl, struct pfr_addr *addr, int size, 1465 int *nadd, int *naddr, u_int32_t ticket, int flags) 1466 { 1467 struct pfr_ktableworkq tableq; 1468 struct pfr_kentryworkq addrq; 1469 struct pfr_ktable *kt, *rt, *shadow, key; 1470 struct pfr_kentry *p; 1471 struct pfr_addr *ad; 1472 struct pf_ruleset *rs; 1473 int i, rv, xadd = 0, xaddr = 0; 1474 1475 PF_RULES_WASSERT(); 1476 1477 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_ADDRSTOO); 1478 if (size && !(flags & PFR_FLAG_ADDRSTOO)) 1479 return (EINVAL); 1480 if (pfr_validate_table(tbl, PFR_TFLAG_USRMASK, 1481 flags & PFR_FLAG_USERIOCTL)) 1482 return (EINVAL); 1483 rs = pf_find_ruleset(tbl->pfrt_anchor); 1484 if (rs == NULL || !rs->topen || ticket != rs->tticket) 1485 return (EBUSY); 1486 tbl->pfrt_flags |= PFR_TFLAG_INACTIVE; 1487 SLIST_INIT(&tableq); 1488 kt = RB_FIND(pfr_ktablehead, &V_pfr_ktables, (struct pfr_ktable *)tbl); 1489 if (kt == NULL) { 1490 kt = pfr_create_ktable(tbl, 0, 1); 1491 if (kt == NULL) 1492 return (ENOMEM); 1493 SLIST_INSERT_HEAD(&tableq, kt, pfrkt_workq); 1494 xadd++; 1495 if (!tbl->pfrt_anchor[0]) 1496 goto _skip; 1497 1498 /* find or create root table */ 1499 bzero(&key, sizeof(key)); 1500 strlcpy(key.pfrkt_name, tbl->pfrt_name, sizeof(key.pfrkt_name)); 1501 rt = RB_FIND(pfr_ktablehead, &V_pfr_ktables, &key); 1502 if (rt != NULL) { 1503 kt->pfrkt_root = rt; 1504 goto _skip; 1505 } 1506 rt = pfr_create_ktable(&key.pfrkt_t, 0, 1); 1507 if (rt == NULL) { 1508 pfr_destroy_ktables(&tableq, 0); 1509 return (ENOMEM); 1510 } 1511 SLIST_INSERT_HEAD(&tableq, rt, pfrkt_workq); 1512 kt->pfrkt_root = rt; 1513 } else if (!(kt->pfrkt_flags & PFR_TFLAG_INACTIVE)) 1514 xadd++; 1515 _skip: 1516 shadow = pfr_create_ktable(tbl, 0, 0); 1517 if (shadow == NULL) { 1518 pfr_destroy_ktables(&tableq, 0); 1519 return (ENOMEM); 1520 } 1521 SLIST_INIT(&addrq); 1522 for (i = 0, ad = addr; i < size; i++, ad++) { 1523 if (pfr_validate_addr(ad)) 1524 senderr(EINVAL); 1525 if (pfr_lookup_addr(shadow, ad, 1) != NULL) 1526 continue; 1527 p = pfr_create_kentry(ad, 1528 (shadow->pfrkt_flags & PFR_TFLAG_COUNTERS) != 0); 1529 if (p == NULL) 1530 senderr(ENOMEM); 1531 if (pfr_route_kentry(shadow, p)) { 1532 pfr_destroy_kentry(p); 1533 continue; 1534 } 1535 SLIST_INSERT_HEAD(&addrq, p, pfrke_workq); 1536 xaddr++; 1537 } 1538 if (!(flags & PFR_FLAG_DUMMY)) { 1539 if (kt->pfrkt_shadow != NULL) 1540 pfr_destroy_ktable(kt->pfrkt_shadow, 1); 1541 kt->pfrkt_flags |= PFR_TFLAG_INACTIVE; 1542 pfr_insert_ktables(&tableq); 1543 shadow->pfrkt_cnt = (flags & PFR_FLAG_ADDRSTOO) ? 1544 xaddr : NO_ADDRESSES; 1545 kt->pfrkt_shadow = shadow; 1546 } else { 1547 pfr_clean_node_mask(shadow, &addrq); 1548 pfr_destroy_ktable(shadow, 0); 1549 pfr_destroy_ktables(&tableq, 0); 1550 pfr_destroy_kentries(&addrq); 1551 } 1552 if (nadd != NULL) 1553 *nadd = xadd; 1554 if (naddr != NULL) 1555 *naddr = xaddr; 1556 return (0); 1557 _bad: 1558 pfr_destroy_ktable(shadow, 0); 1559 pfr_destroy_ktables(&tableq, 0); 1560 pfr_destroy_kentries(&addrq); 1561 return (rv); 1562 } 1563 1564 int 1565 pfr_ina_rollback(struct pfr_table *trs, u_int32_t ticket, int *ndel, int flags) 1566 { 1567 struct pfr_ktableworkq workq; 1568 struct pfr_ktable *p; 1569 struct pf_ruleset *rs; 1570 int xdel = 0; 1571 1572 PF_RULES_WASSERT(); 1573 1574 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY); 1575 rs = pf_find_ruleset(trs->pfrt_anchor); 1576 if (rs == NULL || !rs->topen || ticket != rs->tticket) 1577 return (0); 1578 SLIST_INIT(&workq); 1579 RB_FOREACH(p, pfr_ktablehead, &V_pfr_ktables) { 1580 if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) || 1581 pfr_skip_table(trs, p, 0)) 1582 continue; 1583 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_INACTIVE; 1584 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq); 1585 xdel++; 1586 } 1587 if (!(flags & PFR_FLAG_DUMMY)) { 1588 pfr_setflags_ktables(&workq); 1589 rs->topen = 0; 1590 pf_remove_if_empty_ruleset(rs); 1591 } 1592 if (ndel != NULL) 1593 *ndel = xdel; 1594 return (0); 1595 } 1596 1597 int 1598 pfr_ina_commit(struct pfr_table *trs, u_int32_t ticket, int *nadd, 1599 int *nchange, int flags) 1600 { 1601 struct pfr_ktable *p, *q; 1602 struct pfr_ktableworkq workq; 1603 struct pf_ruleset *rs; 1604 int xadd = 0, xchange = 0; 1605 long tzero = time_second; 1606 1607 PF_RULES_WASSERT(); 1608 1609 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY); 1610 rs = pf_find_ruleset(trs->pfrt_anchor); 1611 if (rs == NULL || !rs->topen || ticket != rs->tticket) 1612 return (EBUSY); 1613 1614 SLIST_INIT(&workq); 1615 RB_FOREACH(p, pfr_ktablehead, &V_pfr_ktables) { 1616 if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) || 1617 pfr_skip_table(trs, p, 0)) 1618 continue; 1619 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq); 1620 if (p->pfrkt_flags & PFR_TFLAG_ACTIVE) 1621 xchange++; 1622 else 1623 xadd++; 1624 } 1625 1626 if (!(flags & PFR_FLAG_DUMMY)) { 1627 for (p = SLIST_FIRST(&workq); p != NULL; p = q) { 1628 q = SLIST_NEXT(p, pfrkt_workq); 1629 pfr_commit_ktable(p, tzero); 1630 } 1631 rs->topen = 0; 1632 pf_remove_if_empty_ruleset(rs); 1633 } 1634 if (nadd != NULL) 1635 *nadd = xadd; 1636 if (nchange != NULL) 1637 *nchange = xchange; 1638 1639 return (0); 1640 } 1641 1642 static void 1643 pfr_commit_ktable(struct pfr_ktable *kt, long tzero) 1644 { 1645 struct pfr_ktable *shadow = kt->pfrkt_shadow; 1646 int nflags; 1647 1648 PF_RULES_WASSERT(); 1649 1650 if (shadow->pfrkt_cnt == NO_ADDRESSES) { 1651 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 1652 pfr_clstats_ktable(kt, tzero, 1); 1653 } else if (kt->pfrkt_flags & PFR_TFLAG_ACTIVE) { 1654 /* kt might contain addresses */ 1655 struct pfr_kentryworkq addrq, addq, changeq, delq, garbageq; 1656 struct pfr_kentry *p, *q, *next; 1657 struct pfr_addr ad; 1658 1659 pfr_enqueue_addrs(shadow, &addrq, NULL, 0); 1660 pfr_mark_addrs(kt); 1661 SLIST_INIT(&addq); 1662 SLIST_INIT(&changeq); 1663 SLIST_INIT(&delq); 1664 SLIST_INIT(&garbageq); 1665 pfr_clean_node_mask(shadow, &addrq); 1666 for (p = SLIST_FIRST(&addrq); p != NULL; p = next) { 1667 next = SLIST_NEXT(p, pfrke_workq); /* XXX */ 1668 pfr_copyout_addr(&ad, p); 1669 q = pfr_lookup_addr(kt, &ad, 1); 1670 if (q != NULL) { 1671 if (q->pfrke_not != p->pfrke_not) 1672 SLIST_INSERT_HEAD(&changeq, q, 1673 pfrke_workq); 1674 q->pfrke_mark = 1; 1675 SLIST_INSERT_HEAD(&garbageq, p, pfrke_workq); 1676 } else { 1677 p->pfrke_counters.pfrkc_tzero = tzero; 1678 SLIST_INSERT_HEAD(&addq, p, pfrke_workq); 1679 } 1680 } 1681 pfr_enqueue_addrs(kt, &delq, NULL, ENQUEUE_UNMARKED_ONLY); 1682 pfr_insert_kentries(kt, &addq, tzero); 1683 pfr_remove_kentries(kt, &delq); 1684 pfr_clstats_kentries(kt, &changeq, tzero, INVERT_NEG_FLAG); 1685 pfr_destroy_kentries(&garbageq); 1686 } else { 1687 /* kt cannot contain addresses */ 1688 SWAP(struct radix_node_head *, kt->pfrkt_ip4, 1689 shadow->pfrkt_ip4); 1690 SWAP(struct radix_node_head *, kt->pfrkt_ip6, 1691 shadow->pfrkt_ip6); 1692 SWAP(int, kt->pfrkt_cnt, shadow->pfrkt_cnt); 1693 pfr_clstats_ktable(kt, tzero, 1); 1694 } 1695 nflags = ((shadow->pfrkt_flags & PFR_TFLAG_USRMASK) | 1696 (kt->pfrkt_flags & PFR_TFLAG_SETMASK) | PFR_TFLAG_ACTIVE) 1697 & ~PFR_TFLAG_INACTIVE; 1698 pfr_destroy_ktable(shadow, 0); 1699 kt->pfrkt_shadow = NULL; 1700 pfr_setflags_ktable(kt, nflags); 1701 } 1702 1703 static int 1704 pfr_validate_table(struct pfr_table *tbl, int allowedflags, int no_reserved) 1705 { 1706 int i; 1707 1708 if (!tbl->pfrt_name[0]) 1709 return (-1); 1710 if (no_reserved && !strcmp(tbl->pfrt_anchor, PF_RESERVED_ANCHOR)) 1711 return (-1); 1712 if (tbl->pfrt_name[PF_TABLE_NAME_SIZE-1]) 1713 return (-1); 1714 for (i = strlen(tbl->pfrt_name); i < PF_TABLE_NAME_SIZE; i++) 1715 if (tbl->pfrt_name[i]) 1716 return (-1); 1717 if (pfr_fix_anchor(tbl->pfrt_anchor)) 1718 return (-1); 1719 if (tbl->pfrt_flags & ~allowedflags) 1720 return (-1); 1721 return (0); 1722 } 1723 1724 /* 1725 * Rewrite anchors referenced by tables to remove slashes 1726 * and check for validity. 1727 */ 1728 static int 1729 pfr_fix_anchor(char *anchor) 1730 { 1731 size_t siz = MAXPATHLEN; 1732 int i; 1733 1734 if (anchor[0] == '/') { 1735 char *path; 1736 int off; 1737 1738 path = anchor; 1739 off = 1; 1740 while (*++path == '/') 1741 off++; 1742 bcopy(path, anchor, siz - off); 1743 memset(anchor + siz - off, 0, off); 1744 } 1745 if (anchor[siz - 1]) 1746 return (-1); 1747 for (i = strlen(anchor); i < siz; i++) 1748 if (anchor[i]) 1749 return (-1); 1750 return (0); 1751 } 1752 1753 int 1754 pfr_table_count(struct pfr_table *filter, int flags) 1755 { 1756 struct pf_ruleset *rs; 1757 1758 PF_RULES_ASSERT(); 1759 1760 if (flags & PFR_FLAG_ALLRSETS) 1761 return (V_pfr_ktable_cnt); 1762 if (filter->pfrt_anchor[0]) { 1763 rs = pf_find_ruleset(filter->pfrt_anchor); 1764 return ((rs != NULL) ? rs->tables : -1); 1765 } 1766 return (pf_main_ruleset.tables); 1767 } 1768 1769 static int 1770 pfr_skip_table(struct pfr_table *filter, struct pfr_ktable *kt, int flags) 1771 { 1772 if (flags & PFR_FLAG_ALLRSETS) 1773 return (0); 1774 if (strcmp(filter->pfrt_anchor, kt->pfrkt_anchor)) 1775 return (1); 1776 return (0); 1777 } 1778 1779 static void 1780 pfr_insert_ktables(struct pfr_ktableworkq *workq) 1781 { 1782 struct pfr_ktable *p; 1783 1784 SLIST_FOREACH(p, workq, pfrkt_workq) 1785 pfr_insert_ktable(p); 1786 } 1787 1788 static void 1789 pfr_insert_ktable(struct pfr_ktable *kt) 1790 { 1791 1792 PF_RULES_WASSERT(); 1793 1794 RB_INSERT(pfr_ktablehead, &V_pfr_ktables, kt); 1795 V_pfr_ktable_cnt++; 1796 if (kt->pfrkt_root != NULL) 1797 if (!kt->pfrkt_root->pfrkt_refcnt[PFR_REFCNT_ANCHOR]++) 1798 pfr_setflags_ktable(kt->pfrkt_root, 1799 kt->pfrkt_root->pfrkt_flags|PFR_TFLAG_REFDANCHOR); 1800 } 1801 1802 static void 1803 pfr_setflags_ktables(struct pfr_ktableworkq *workq) 1804 { 1805 struct pfr_ktable *p, *q; 1806 1807 for (p = SLIST_FIRST(workq); p; p = q) { 1808 q = SLIST_NEXT(p, pfrkt_workq); 1809 pfr_setflags_ktable(p, p->pfrkt_nflags); 1810 } 1811 } 1812 1813 static void 1814 pfr_setflags_ktable(struct pfr_ktable *kt, int newf) 1815 { 1816 struct pfr_kentryworkq addrq; 1817 1818 PF_RULES_WASSERT(); 1819 1820 if (!(newf & PFR_TFLAG_REFERENCED) && 1821 !(newf & PFR_TFLAG_REFDANCHOR) && 1822 !(newf & PFR_TFLAG_PERSIST)) 1823 newf &= ~PFR_TFLAG_ACTIVE; 1824 if (!(newf & PFR_TFLAG_ACTIVE)) 1825 newf &= ~PFR_TFLAG_USRMASK; 1826 if (!(newf & PFR_TFLAG_SETMASK)) { 1827 RB_REMOVE(pfr_ktablehead, &V_pfr_ktables, kt); 1828 if (kt->pfrkt_root != NULL) 1829 if (!--kt->pfrkt_root->pfrkt_refcnt[PFR_REFCNT_ANCHOR]) 1830 pfr_setflags_ktable(kt->pfrkt_root, 1831 kt->pfrkt_root->pfrkt_flags & 1832 ~PFR_TFLAG_REFDANCHOR); 1833 pfr_destroy_ktable(kt, 1); 1834 V_pfr_ktable_cnt--; 1835 return; 1836 } 1837 if (!(newf & PFR_TFLAG_ACTIVE) && kt->pfrkt_cnt) { 1838 pfr_enqueue_addrs(kt, &addrq, NULL, 0); 1839 pfr_remove_kentries(kt, &addrq); 1840 } 1841 if (!(newf & PFR_TFLAG_INACTIVE) && kt->pfrkt_shadow != NULL) { 1842 pfr_destroy_ktable(kt->pfrkt_shadow, 1); 1843 kt->pfrkt_shadow = NULL; 1844 } 1845 kt->pfrkt_flags = newf; 1846 } 1847 1848 static void 1849 pfr_clstats_ktables(struct pfr_ktableworkq *workq, long tzero, int recurse) 1850 { 1851 struct pfr_ktable *p; 1852 1853 SLIST_FOREACH(p, workq, pfrkt_workq) 1854 pfr_clstats_ktable(p, tzero, recurse); 1855 } 1856 1857 static void 1858 pfr_clstats_ktable(struct pfr_ktable *kt, long tzero, int recurse) 1859 { 1860 struct pfr_kentryworkq addrq; 1861 int pfr_dir, pfr_op; 1862 1863 if (recurse) { 1864 pfr_enqueue_addrs(kt, &addrq, NULL, 0); 1865 pfr_clstats_kentries(kt, &addrq, tzero, 0); 1866 } 1867 for (pfr_dir = 0; pfr_dir < PFR_DIR_MAX; pfr_dir ++) { 1868 for (pfr_op = 0; pfr_op < PFR_OP_TABLE_MAX; pfr_op ++) { 1869 counter_u64_zero(kt->pfrkt_packets[pfr_dir][pfr_op]); 1870 counter_u64_zero(kt->pfrkt_bytes[pfr_dir][pfr_op]); 1871 } 1872 } 1873 counter_u64_zero(kt->pfrkt_match); 1874 counter_u64_zero(kt->pfrkt_nomatch); 1875 kt->pfrkt_tzero = tzero; 1876 } 1877 1878 static struct pfr_ktable * 1879 pfr_create_ktable(struct pfr_table *tbl, long tzero, int attachruleset) 1880 { 1881 struct pfr_ktable *kt; 1882 struct pf_ruleset *rs; 1883 int pfr_dir, pfr_op; 1884 1885 PF_RULES_WASSERT(); 1886 1887 kt = malloc(sizeof(*kt), M_PFTABLE, M_NOWAIT|M_ZERO); 1888 if (kt == NULL) 1889 return (NULL); 1890 kt->pfrkt_t = *tbl; 1891 1892 if (attachruleset) { 1893 rs = pf_find_or_create_ruleset(tbl->pfrt_anchor); 1894 if (!rs) { 1895 pfr_destroy_ktable(kt, 0); 1896 return (NULL); 1897 } 1898 kt->pfrkt_rs = rs; 1899 rs->tables++; 1900 } 1901 1902 for (pfr_dir = 0; pfr_dir < PFR_DIR_MAX; pfr_dir ++) { 1903 for (pfr_op = 0; pfr_op < PFR_OP_TABLE_MAX; pfr_op ++) { 1904 kt->pfrkt_packets[pfr_dir][pfr_op] = 1905 counter_u64_alloc(M_NOWAIT); 1906 if (! kt->pfrkt_packets[pfr_dir][pfr_op]) { 1907 pfr_destroy_ktable(kt, 0); 1908 return (NULL); 1909 } 1910 kt->pfrkt_bytes[pfr_dir][pfr_op] = 1911 counter_u64_alloc(M_NOWAIT); 1912 if (! kt->pfrkt_bytes[pfr_dir][pfr_op]) { 1913 pfr_destroy_ktable(kt, 0); 1914 return (NULL); 1915 } 1916 } 1917 } 1918 kt->pfrkt_match = counter_u64_alloc(M_NOWAIT); 1919 if (! kt->pfrkt_match) { 1920 pfr_destroy_ktable(kt, 0); 1921 return (NULL); 1922 } 1923 1924 kt->pfrkt_nomatch = counter_u64_alloc(M_NOWAIT); 1925 if (! kt->pfrkt_nomatch) { 1926 pfr_destroy_ktable(kt, 0); 1927 return (NULL); 1928 } 1929 1930 if (!rn_inithead((void **)&kt->pfrkt_ip4, 1931 offsetof(struct sockaddr_in, sin_addr) * 8) || 1932 !rn_inithead((void **)&kt->pfrkt_ip6, 1933 offsetof(struct sockaddr_in6, sin6_addr) * 8)) { 1934 pfr_destroy_ktable(kt, 0); 1935 return (NULL); 1936 } 1937 kt->pfrkt_tzero = tzero; 1938 1939 return (kt); 1940 } 1941 1942 static void 1943 pfr_destroy_ktables(struct pfr_ktableworkq *workq, int flushaddr) 1944 { 1945 struct pfr_ktable *p, *q; 1946 1947 for (p = SLIST_FIRST(workq); p; p = q) { 1948 q = SLIST_NEXT(p, pfrkt_workq); 1949 pfr_destroy_ktable(p, flushaddr); 1950 } 1951 } 1952 1953 static void 1954 pfr_destroy_ktable(struct pfr_ktable *kt, int flushaddr) 1955 { 1956 struct pfr_kentryworkq addrq; 1957 int pfr_dir, pfr_op; 1958 1959 if (flushaddr) { 1960 pfr_enqueue_addrs(kt, &addrq, NULL, 0); 1961 pfr_clean_node_mask(kt, &addrq); 1962 pfr_destroy_kentries(&addrq); 1963 } 1964 if (kt->pfrkt_ip4 != NULL) 1965 rn_detachhead((void **)&kt->pfrkt_ip4); 1966 if (kt->pfrkt_ip6 != NULL) 1967 rn_detachhead((void **)&kt->pfrkt_ip6); 1968 if (kt->pfrkt_shadow != NULL) 1969 pfr_destroy_ktable(kt->pfrkt_shadow, flushaddr); 1970 if (kt->pfrkt_rs != NULL) { 1971 kt->pfrkt_rs->tables--; 1972 pf_remove_if_empty_ruleset(kt->pfrkt_rs); 1973 } 1974 for (pfr_dir = 0; pfr_dir < PFR_DIR_MAX; pfr_dir ++) { 1975 for (pfr_op = 0; pfr_op < PFR_OP_TABLE_MAX; pfr_op ++) { 1976 counter_u64_free(kt->pfrkt_packets[pfr_dir][pfr_op]); 1977 counter_u64_free(kt->pfrkt_bytes[pfr_dir][pfr_op]); 1978 } 1979 } 1980 counter_u64_free(kt->pfrkt_match); 1981 counter_u64_free(kt->pfrkt_nomatch); 1982 1983 free(kt, M_PFTABLE); 1984 } 1985 1986 static int 1987 pfr_ktable_compare(struct pfr_ktable *p, struct pfr_ktable *q) 1988 { 1989 int d; 1990 1991 if ((d = strncmp(p->pfrkt_name, q->pfrkt_name, PF_TABLE_NAME_SIZE))) 1992 return (d); 1993 return (strcmp(p->pfrkt_anchor, q->pfrkt_anchor)); 1994 } 1995 1996 static struct pfr_ktable * 1997 pfr_lookup_table(struct pfr_table *tbl) 1998 { 1999 /* struct pfr_ktable start like a struct pfr_table */ 2000 return (RB_FIND(pfr_ktablehead, &V_pfr_ktables, 2001 (struct pfr_ktable *)tbl)); 2002 } 2003 2004 int 2005 pfr_match_addr(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af) 2006 { 2007 struct pfr_kentry *ke = NULL; 2008 int match; 2009 2010 PF_RULES_RASSERT(); 2011 2012 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL) 2013 kt = kt->pfrkt_root; 2014 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 2015 return (0); 2016 2017 switch (af) { 2018 #ifdef INET 2019 case AF_INET: 2020 { 2021 struct sockaddr_in sin; 2022 2023 bzero(&sin, sizeof(sin)); 2024 sin.sin_len = sizeof(sin); 2025 sin.sin_family = AF_INET; 2026 sin.sin_addr.s_addr = a->addr32[0]; 2027 ke = (struct pfr_kentry *)rn_match(&sin, &kt->pfrkt_ip4->rh); 2028 if (ke && KENTRY_RNF_ROOT(ke)) 2029 ke = NULL; 2030 break; 2031 } 2032 #endif /* INET */ 2033 #ifdef INET6 2034 case AF_INET6: 2035 { 2036 struct sockaddr_in6 sin6; 2037 2038 bzero(&sin6, sizeof(sin6)); 2039 sin6.sin6_len = sizeof(sin6); 2040 sin6.sin6_family = AF_INET6; 2041 bcopy(a, &sin6.sin6_addr, sizeof(sin6.sin6_addr)); 2042 ke = (struct pfr_kentry *)rn_match(&sin6, &kt->pfrkt_ip6->rh); 2043 if (ke && KENTRY_RNF_ROOT(ke)) 2044 ke = NULL; 2045 break; 2046 } 2047 #endif /* INET6 */ 2048 } 2049 match = (ke && !ke->pfrke_not); 2050 if (match) 2051 counter_u64_add(kt->pfrkt_match, 1); 2052 else 2053 counter_u64_add(kt->pfrkt_nomatch, 1); 2054 return (match); 2055 } 2056 2057 void 2058 pfr_update_stats(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af, 2059 u_int64_t len, int dir_out, int op_pass, int notrule) 2060 { 2061 struct pfr_kentry *ke = NULL; 2062 2063 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL) 2064 kt = kt->pfrkt_root; 2065 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 2066 return; 2067 2068 switch (af) { 2069 #ifdef INET 2070 case AF_INET: 2071 { 2072 struct sockaddr_in sin; 2073 2074 bzero(&sin, sizeof(sin)); 2075 sin.sin_len = sizeof(sin); 2076 sin.sin_family = AF_INET; 2077 sin.sin_addr.s_addr = a->addr32[0]; 2078 ke = (struct pfr_kentry *)rn_match(&sin, &kt->pfrkt_ip4->rh); 2079 if (ke && KENTRY_RNF_ROOT(ke)) 2080 ke = NULL; 2081 break; 2082 } 2083 #endif /* INET */ 2084 #ifdef INET6 2085 case AF_INET6: 2086 { 2087 struct sockaddr_in6 sin6; 2088 2089 bzero(&sin6, sizeof(sin6)); 2090 sin6.sin6_len = sizeof(sin6); 2091 sin6.sin6_family = AF_INET6; 2092 bcopy(a, &sin6.sin6_addr, sizeof(sin6.sin6_addr)); 2093 ke = (struct pfr_kentry *)rn_match(&sin6, &kt->pfrkt_ip6->rh); 2094 if (ke && KENTRY_RNF_ROOT(ke)) 2095 ke = NULL; 2096 break; 2097 } 2098 #endif /* INET6 */ 2099 default: 2100 panic("%s: unknown address family %u", __func__, af); 2101 } 2102 if ((ke == NULL || ke->pfrke_not) != notrule) { 2103 if (op_pass != PFR_OP_PASS) 2104 DPFPRINTF(PF_DEBUG_URGENT, 2105 ("pfr_update_stats: assertion failed.\n")); 2106 op_pass = PFR_OP_XPASS; 2107 } 2108 counter_u64_add(kt->pfrkt_packets[dir_out][op_pass], 1); 2109 counter_u64_add(kt->pfrkt_bytes[dir_out][op_pass], len); 2110 if (ke != NULL && op_pass != PFR_OP_XPASS && 2111 (kt->pfrkt_flags & PFR_TFLAG_COUNTERS)) { 2112 counter_u64_add(pfr_kentry_counter(&ke->pfrke_counters, 2113 dir_out, op_pass, PFR_TYPE_PACKETS), 1); 2114 counter_u64_add(pfr_kentry_counter(&ke->pfrke_counters, 2115 dir_out, op_pass, PFR_TYPE_BYTES), len); 2116 } 2117 } 2118 2119 struct pfr_ktable * 2120 pfr_attach_table(struct pf_ruleset *rs, char *name) 2121 { 2122 struct pfr_ktable *kt, *rt; 2123 struct pfr_table tbl; 2124 struct pf_anchor *ac = rs->anchor; 2125 2126 PF_RULES_WASSERT(); 2127 2128 bzero(&tbl, sizeof(tbl)); 2129 strlcpy(tbl.pfrt_name, name, sizeof(tbl.pfrt_name)); 2130 if (ac != NULL) 2131 strlcpy(tbl.pfrt_anchor, ac->path, sizeof(tbl.pfrt_anchor)); 2132 kt = pfr_lookup_table(&tbl); 2133 if (kt == NULL) { 2134 kt = pfr_create_ktable(&tbl, time_second, 1); 2135 if (kt == NULL) 2136 return (NULL); 2137 if (ac != NULL) { 2138 bzero(tbl.pfrt_anchor, sizeof(tbl.pfrt_anchor)); 2139 rt = pfr_lookup_table(&tbl); 2140 if (rt == NULL) { 2141 rt = pfr_create_ktable(&tbl, 0, 1); 2142 if (rt == NULL) { 2143 pfr_destroy_ktable(kt, 0); 2144 return (NULL); 2145 } 2146 pfr_insert_ktable(rt); 2147 } 2148 kt->pfrkt_root = rt; 2149 } 2150 pfr_insert_ktable(kt); 2151 } 2152 if (!kt->pfrkt_refcnt[PFR_REFCNT_RULE]++) 2153 pfr_setflags_ktable(kt, kt->pfrkt_flags|PFR_TFLAG_REFERENCED); 2154 return (kt); 2155 } 2156 2157 void 2158 pfr_detach_table(struct pfr_ktable *kt) 2159 { 2160 2161 PF_RULES_WASSERT(); 2162 KASSERT(kt->pfrkt_refcnt[PFR_REFCNT_RULE] > 0, ("%s: refcount %d\n", 2163 __func__, kt->pfrkt_refcnt[PFR_REFCNT_RULE])); 2164 2165 if (!--kt->pfrkt_refcnt[PFR_REFCNT_RULE]) 2166 pfr_setflags_ktable(kt, kt->pfrkt_flags&~PFR_TFLAG_REFERENCED); 2167 } 2168 2169 int 2170 pfr_pool_get(struct pfr_ktable *kt, int *pidx, struct pf_addr *counter, 2171 sa_family_t af) 2172 { 2173 struct pf_addr *addr, *cur, *mask; 2174 union sockaddr_union uaddr, umask; 2175 struct pfr_kentry *ke, *ke2 = NULL; 2176 int idx = -1, use_counter = 0; 2177 2178 switch (af) { 2179 case AF_INET: 2180 uaddr.sin.sin_len = sizeof(struct sockaddr_in); 2181 uaddr.sin.sin_family = AF_INET; 2182 break; 2183 case AF_INET6: 2184 uaddr.sin6.sin6_len = sizeof(struct sockaddr_in6); 2185 uaddr.sin6.sin6_family = AF_INET6; 2186 break; 2187 } 2188 addr = SUNION2PF(&uaddr, af); 2189 2190 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL) 2191 kt = kt->pfrkt_root; 2192 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 2193 return (-1); 2194 2195 if (pidx != NULL) 2196 idx = *pidx; 2197 if (counter != NULL && idx >= 0) 2198 use_counter = 1; 2199 if (idx < 0) 2200 idx = 0; 2201 2202 _next_block: 2203 ke = pfr_kentry_byidx(kt, idx, af); 2204 if (ke == NULL) { 2205 counter_u64_add(kt->pfrkt_nomatch, 1); 2206 return (1); 2207 } 2208 pfr_prepare_network(&umask, af, ke->pfrke_net); 2209 cur = SUNION2PF(&ke->pfrke_sa, af); 2210 mask = SUNION2PF(&umask, af); 2211 2212 if (use_counter) { 2213 /* is supplied address within block? */ 2214 if (!PF_MATCHA(0, cur, mask, counter, af)) { 2215 /* no, go to next block in table */ 2216 idx++; 2217 use_counter = 0; 2218 goto _next_block; 2219 } 2220 PF_ACPY(addr, counter, af); 2221 } else { 2222 /* use first address of block */ 2223 PF_ACPY(addr, cur, af); 2224 } 2225 2226 if (!KENTRY_NETWORK(ke)) { 2227 /* this is a single IP address - no possible nested block */ 2228 PF_ACPY(counter, addr, af); 2229 *pidx = idx; 2230 counter_u64_add(kt->pfrkt_match, 1); 2231 return (0); 2232 } 2233 for (;;) { 2234 /* we don't want to use a nested block */ 2235 switch (af) { 2236 case AF_INET: 2237 ke2 = (struct pfr_kentry *)rn_match(&uaddr, 2238 &kt->pfrkt_ip4->rh); 2239 break; 2240 case AF_INET6: 2241 ke2 = (struct pfr_kentry *)rn_match(&uaddr, 2242 &kt->pfrkt_ip6->rh); 2243 break; 2244 } 2245 /* no need to check KENTRY_RNF_ROOT() here */ 2246 if (ke2 == ke) { 2247 /* lookup return the same block - perfect */ 2248 PF_ACPY(counter, addr, af); 2249 *pidx = idx; 2250 counter_u64_add(kt->pfrkt_match, 1); 2251 return (0); 2252 } 2253 2254 /* we need to increase the counter past the nested block */ 2255 pfr_prepare_network(&umask, AF_INET, ke2->pfrke_net); 2256 PF_POOLMASK(addr, addr, SUNION2PF(&umask, af), &pfr_ffaddr, af); 2257 PF_AINC(addr, af); 2258 if (!PF_MATCHA(0, cur, mask, addr, af)) { 2259 /* ok, we reached the end of our main block */ 2260 /* go to next block in table */ 2261 idx++; 2262 use_counter = 0; 2263 goto _next_block; 2264 } 2265 } 2266 } 2267 2268 static struct pfr_kentry * 2269 pfr_kentry_byidx(struct pfr_ktable *kt, int idx, int af) 2270 { 2271 struct pfr_walktree w; 2272 2273 bzero(&w, sizeof(w)); 2274 w.pfrw_op = PFRW_POOL_GET; 2275 w.pfrw_cnt = idx; 2276 2277 switch (af) { 2278 #ifdef INET 2279 case AF_INET: 2280 kt->pfrkt_ip4->rnh_walktree(&kt->pfrkt_ip4->rh, pfr_walktree, &w); 2281 return (w.pfrw_kentry); 2282 #endif /* INET */ 2283 #ifdef INET6 2284 case AF_INET6: 2285 kt->pfrkt_ip6->rnh_walktree(&kt->pfrkt_ip6->rh, pfr_walktree, &w); 2286 return (w.pfrw_kentry); 2287 #endif /* INET6 */ 2288 default: 2289 return (NULL); 2290 } 2291 } 2292 2293 void 2294 pfr_dynaddr_update(struct pfr_ktable *kt, struct pfi_dynaddr *dyn) 2295 { 2296 struct pfr_walktree w; 2297 2298 bzero(&w, sizeof(w)); 2299 w.pfrw_op = PFRW_DYNADDR_UPDATE; 2300 w.pfrw_dyn = dyn; 2301 2302 dyn->pfid_acnt4 = 0; 2303 dyn->pfid_acnt6 = 0; 2304 if (!dyn->pfid_af || dyn->pfid_af == AF_INET) 2305 kt->pfrkt_ip4->rnh_walktree(&kt->pfrkt_ip4->rh, pfr_walktree, &w); 2306 if (!dyn->pfid_af || dyn->pfid_af == AF_INET6) 2307 kt->pfrkt_ip6->rnh_walktree(&kt->pfrkt_ip6->rh, pfr_walktree, &w); 2308 } 2309