1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2002 Cedric Berger 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * - Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * - Redistributions in binary form must reproduce the above 14 * copyright notice, this list of conditions and the following 15 * disclaimer in the documentation and/or other materials provided 16 * with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 21 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 22 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 24 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 25 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 26 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN 28 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 * 31 * $OpenBSD: pf_table.c,v 1.79 2008/10/08 06:24:50 mcbride Exp $ 32 */ 33 34 #include <sys/cdefs.h> 35 __FBSDID("$FreeBSD$"); 36 37 #include "opt_inet.h" 38 #include "opt_inet6.h" 39 40 #include <sys/param.h> 41 #include <sys/kernel.h> 42 #include <sys/lock.h> 43 #include <sys/malloc.h> 44 #include <sys/mbuf.h> 45 #include <sys/mutex.h> 46 #include <sys/refcount.h> 47 #include <sys/socket.h> 48 #include <vm/uma.h> 49 50 #include <net/if.h> 51 #include <net/vnet.h> 52 #include <net/pfvar.h> 53 54 #define DPFPRINTF(n, x) if (V_pf_status.debug >= (n)) printf x 55 56 #define ACCEPT_FLAGS(flags, oklist) \ 57 do { \ 58 if ((flags & ~(oklist)) & \ 59 PFR_FLAG_ALLMASK) \ 60 return (EINVAL); \ 61 } while (0) 62 63 #define FILLIN_SIN(sin, addr) \ 64 do { \ 65 (sin).sin_len = sizeof(sin); \ 66 (sin).sin_family = AF_INET; \ 67 (sin).sin_addr = (addr); \ 68 } while (0) 69 70 #define FILLIN_SIN6(sin6, addr) \ 71 do { \ 72 (sin6).sin6_len = sizeof(sin6); \ 73 (sin6).sin6_family = AF_INET6; \ 74 (sin6).sin6_addr = (addr); \ 75 } while (0) 76 77 #define SWAP(type, a1, a2) \ 78 do { \ 79 type tmp = a1; \ 80 a1 = a2; \ 81 a2 = tmp; \ 82 } while (0) 83 84 #define SUNION2PF(su, af) (((af)==AF_INET) ? \ 85 (struct pf_addr *)&(su)->sin.sin_addr : \ 86 (struct pf_addr *)&(su)->sin6.sin6_addr) 87 88 #define AF_BITS(af) (((af)==AF_INET)?32:128) 89 #define ADDR_NETWORK(ad) ((ad)->pfra_net < AF_BITS((ad)->pfra_af)) 90 #define KENTRY_NETWORK(ke) ((ke)->pfrke_net < AF_BITS((ke)->pfrke_af)) 91 #define KENTRY_RNF_ROOT(ke) \ 92 ((((struct radix_node *)(ke))->rn_flags & RNF_ROOT) != 0) 93 94 #define NO_ADDRESSES (-1) 95 #define ENQUEUE_UNMARKED_ONLY (1) 96 #define INVERT_NEG_FLAG (1) 97 98 struct pfr_walktree { 99 enum pfrw_op { 100 PFRW_MARK, 101 PFRW_SWEEP, 102 PFRW_ENQUEUE, 103 PFRW_GET_ADDRS, 104 PFRW_GET_ASTATS, 105 PFRW_POOL_GET, 106 PFRW_DYNADDR_UPDATE 107 } pfrw_op; 108 union { 109 struct pfr_addr *pfrw1_addr; 110 struct pfr_astats *pfrw1_astats; 111 struct pfr_kentryworkq *pfrw1_workq; 112 struct pfr_kentry *pfrw1_kentry; 113 struct pfi_dynaddr *pfrw1_dyn; 114 } pfrw_1; 115 int pfrw_free; 116 int pfrw_flags; 117 }; 118 #define pfrw_addr pfrw_1.pfrw1_addr 119 #define pfrw_astats pfrw_1.pfrw1_astats 120 #define pfrw_workq pfrw_1.pfrw1_workq 121 #define pfrw_kentry pfrw_1.pfrw1_kentry 122 #define pfrw_dyn pfrw_1.pfrw1_dyn 123 #define pfrw_cnt pfrw_free 124 125 #define senderr(e) do { rv = (e); goto _bad; } while (0) 126 127 static MALLOC_DEFINE(M_PFTABLE, "pf_table", "pf(4) tables structures"); 128 VNET_DEFINE_STATIC(uma_zone_t, pfr_kentry_z); 129 #define V_pfr_kentry_z VNET(pfr_kentry_z) 130 VNET_DEFINE_STATIC(uma_zone_t, pfr_kentry_counter_z); 131 #define V_pfr_kentry_counter_z VNET(pfr_kentry_counter_z) 132 133 static struct pf_addr pfr_ffaddr = { 134 .addr32 = { 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff } 135 }; 136 137 static void pfr_copyout_astats(struct pfr_astats *, 138 const struct pfr_kentry *, 139 const struct pfr_walktree *); 140 static void pfr_copyout_addr(struct pfr_addr *, 141 const struct pfr_kentry *ke); 142 static int pfr_validate_addr(struct pfr_addr *); 143 static void pfr_enqueue_addrs(struct pfr_ktable *, 144 struct pfr_kentryworkq *, int *, int); 145 static void pfr_mark_addrs(struct pfr_ktable *); 146 static struct pfr_kentry 147 *pfr_lookup_addr(struct pfr_ktable *, 148 struct pfr_addr *, int); 149 static struct pfr_kentry *pfr_create_kentry(struct pfr_addr *, bool); 150 static void pfr_destroy_kentries(struct pfr_kentryworkq *); 151 static void pfr_destroy_kentry(struct pfr_kentry *); 152 static void pfr_insert_kentries(struct pfr_ktable *, 153 struct pfr_kentryworkq *, long); 154 static void pfr_remove_kentries(struct pfr_ktable *, 155 struct pfr_kentryworkq *); 156 static void pfr_clstats_kentries(struct pfr_ktable *, 157 struct pfr_kentryworkq *, long, int); 158 static void pfr_reset_feedback(struct pfr_addr *, int); 159 static void pfr_prepare_network(union sockaddr_union *, int, int); 160 static int pfr_route_kentry(struct pfr_ktable *, 161 struct pfr_kentry *); 162 static int pfr_unroute_kentry(struct pfr_ktable *, 163 struct pfr_kentry *); 164 static int pfr_walktree(struct radix_node *, void *); 165 static int pfr_validate_table(struct pfr_table *, int, int); 166 static int pfr_fix_anchor(char *); 167 static void pfr_commit_ktable(struct pfr_ktable *, long); 168 static void pfr_insert_ktables(struct pfr_ktableworkq *); 169 static void pfr_insert_ktable(struct pfr_ktable *); 170 static void pfr_setflags_ktables(struct pfr_ktableworkq *); 171 static void pfr_setflags_ktable(struct pfr_ktable *, int); 172 static void pfr_clstats_ktables(struct pfr_ktableworkq *, long, 173 int); 174 static void pfr_clstats_ktable(struct pfr_ktable *, long, int); 175 static struct pfr_ktable 176 *pfr_create_ktable(struct pfr_table *, long, int); 177 static void pfr_destroy_ktables(struct pfr_ktableworkq *, int); 178 static void pfr_destroy_ktable(struct pfr_ktable *, int); 179 static int pfr_ktable_compare(struct pfr_ktable *, 180 struct pfr_ktable *); 181 static struct pfr_ktable 182 *pfr_lookup_table(struct pfr_table *); 183 static void pfr_clean_node_mask(struct pfr_ktable *, 184 struct pfr_kentryworkq *); 185 static int pfr_skip_table(struct pfr_table *, 186 struct pfr_ktable *, int); 187 static struct pfr_kentry 188 *pfr_kentry_byidx(struct pfr_ktable *, int, int); 189 190 static RB_PROTOTYPE(pfr_ktablehead, pfr_ktable, pfrkt_tree, pfr_ktable_compare); 191 static RB_GENERATE(pfr_ktablehead, pfr_ktable, pfrkt_tree, pfr_ktable_compare); 192 193 VNET_DEFINE_STATIC(struct pfr_ktablehead, pfr_ktables); 194 #define V_pfr_ktables VNET(pfr_ktables) 195 196 VNET_DEFINE_STATIC(struct pfr_table, pfr_nulltable); 197 #define V_pfr_nulltable VNET(pfr_nulltable) 198 199 VNET_DEFINE_STATIC(int, pfr_ktable_cnt); 200 #define V_pfr_ktable_cnt VNET(pfr_ktable_cnt) 201 202 void 203 pfr_initialize(void) 204 { 205 206 V_pfr_kentry_counter_z = uma_zcreate("pf table entry counters", 207 PFR_NUM_COUNTERS * sizeof(uint64_t), NULL, NULL, NULL, NULL, 208 UMA_ALIGN_PTR, UMA_ZONE_PCPU); 209 V_pfr_kentry_z = uma_zcreate("pf table entries", 210 sizeof(struct pfr_kentry), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 211 0); 212 V_pf_limits[PF_LIMIT_TABLE_ENTRIES].zone = V_pfr_kentry_z; 213 V_pf_limits[PF_LIMIT_TABLE_ENTRIES].limit = PFR_KENTRY_HIWAT; 214 } 215 216 void 217 pfr_cleanup(void) 218 { 219 220 uma_zdestroy(V_pfr_kentry_z); 221 uma_zdestroy(V_pfr_kentry_counter_z); 222 } 223 224 int 225 pfr_clr_addrs(struct pfr_table *tbl, int *ndel, int flags) 226 { 227 struct pfr_ktable *kt; 228 struct pfr_kentryworkq workq; 229 230 PF_RULES_WASSERT(); 231 232 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY); 233 if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL)) 234 return (EINVAL); 235 kt = pfr_lookup_table(tbl); 236 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 237 return (ESRCH); 238 if (kt->pfrkt_flags & PFR_TFLAG_CONST) 239 return (EPERM); 240 pfr_enqueue_addrs(kt, &workq, ndel, 0); 241 242 if (!(flags & PFR_FLAG_DUMMY)) { 243 pfr_remove_kentries(kt, &workq); 244 KASSERT(kt->pfrkt_cnt == 0, ("%s: non-null pfrkt_cnt", __func__)); 245 } 246 return (0); 247 } 248 249 int 250 pfr_add_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size, 251 int *nadd, int flags) 252 { 253 struct pfr_ktable *kt, *tmpkt; 254 struct pfr_kentryworkq workq; 255 struct pfr_kentry *p, *q; 256 struct pfr_addr *ad; 257 int i, rv, xadd = 0; 258 long tzero = time_second; 259 260 PF_RULES_WASSERT(); 261 262 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_FEEDBACK); 263 if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL)) 264 return (EINVAL); 265 kt = pfr_lookup_table(tbl); 266 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 267 return (ESRCH); 268 if (kt->pfrkt_flags & PFR_TFLAG_CONST) 269 return (EPERM); 270 tmpkt = pfr_create_ktable(&V_pfr_nulltable, 0, 0); 271 if (tmpkt == NULL) 272 return (ENOMEM); 273 SLIST_INIT(&workq); 274 for (i = 0, ad = addr; i < size; i++, ad++) { 275 if (pfr_validate_addr(ad)) 276 senderr(EINVAL); 277 p = pfr_lookup_addr(kt, ad, 1); 278 q = pfr_lookup_addr(tmpkt, ad, 1); 279 if (flags & PFR_FLAG_FEEDBACK) { 280 if (q != NULL) 281 ad->pfra_fback = PFR_FB_DUPLICATE; 282 else if (p == NULL) 283 ad->pfra_fback = PFR_FB_ADDED; 284 else if (p->pfrke_not != ad->pfra_not) 285 ad->pfra_fback = PFR_FB_CONFLICT; 286 else 287 ad->pfra_fback = PFR_FB_NONE; 288 } 289 if (p == NULL && q == NULL) { 290 p = pfr_create_kentry(ad, 291 (kt->pfrkt_flags & PFR_TFLAG_COUNTERS) != 0); 292 if (p == NULL) 293 senderr(ENOMEM); 294 if (pfr_route_kentry(tmpkt, p)) { 295 pfr_destroy_kentry(p); 296 ad->pfra_fback = PFR_FB_NONE; 297 } else { 298 SLIST_INSERT_HEAD(&workq, p, pfrke_workq); 299 xadd++; 300 } 301 } 302 } 303 pfr_clean_node_mask(tmpkt, &workq); 304 if (!(flags & PFR_FLAG_DUMMY)) 305 pfr_insert_kentries(kt, &workq, tzero); 306 else 307 pfr_destroy_kentries(&workq); 308 if (nadd != NULL) 309 *nadd = xadd; 310 pfr_destroy_ktable(tmpkt, 0); 311 return (0); 312 _bad: 313 pfr_clean_node_mask(tmpkt, &workq); 314 pfr_destroy_kentries(&workq); 315 if (flags & PFR_FLAG_FEEDBACK) 316 pfr_reset_feedback(addr, size); 317 pfr_destroy_ktable(tmpkt, 0); 318 return (rv); 319 } 320 321 int 322 pfr_del_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size, 323 int *ndel, int flags) 324 { 325 struct pfr_ktable *kt; 326 struct pfr_kentryworkq workq; 327 struct pfr_kentry *p; 328 struct pfr_addr *ad; 329 int i, rv, xdel = 0, log = 1; 330 331 PF_RULES_WASSERT(); 332 333 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_FEEDBACK); 334 if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL)) 335 return (EINVAL); 336 kt = pfr_lookup_table(tbl); 337 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 338 return (ESRCH); 339 if (kt->pfrkt_flags & PFR_TFLAG_CONST) 340 return (EPERM); 341 /* 342 * there are two algorithms to choose from here. 343 * with: 344 * n: number of addresses to delete 345 * N: number of addresses in the table 346 * 347 * one is O(N) and is better for large 'n' 348 * one is O(n*LOG(N)) and is better for small 'n' 349 * 350 * following code try to decide which one is best. 351 */ 352 for (i = kt->pfrkt_cnt; i > 0; i >>= 1) 353 log++; 354 if (size > kt->pfrkt_cnt/log) { 355 /* full table scan */ 356 pfr_mark_addrs(kt); 357 } else { 358 /* iterate over addresses to delete */ 359 for (i = 0, ad = addr; i < size; i++, ad++) { 360 if (pfr_validate_addr(ad)) 361 return (EINVAL); 362 p = pfr_lookup_addr(kt, ad, 1); 363 if (p != NULL) 364 p->pfrke_mark = 0; 365 } 366 } 367 SLIST_INIT(&workq); 368 for (i = 0, ad = addr; i < size; i++, ad++) { 369 if (pfr_validate_addr(ad)) 370 senderr(EINVAL); 371 p = pfr_lookup_addr(kt, ad, 1); 372 if (flags & PFR_FLAG_FEEDBACK) { 373 if (p == NULL) 374 ad->pfra_fback = PFR_FB_NONE; 375 else if (p->pfrke_not != ad->pfra_not) 376 ad->pfra_fback = PFR_FB_CONFLICT; 377 else if (p->pfrke_mark) 378 ad->pfra_fback = PFR_FB_DUPLICATE; 379 else 380 ad->pfra_fback = PFR_FB_DELETED; 381 } 382 if (p != NULL && p->pfrke_not == ad->pfra_not && 383 !p->pfrke_mark) { 384 p->pfrke_mark = 1; 385 SLIST_INSERT_HEAD(&workq, p, pfrke_workq); 386 xdel++; 387 } 388 } 389 if (!(flags & PFR_FLAG_DUMMY)) 390 pfr_remove_kentries(kt, &workq); 391 if (ndel != NULL) 392 *ndel = xdel; 393 return (0); 394 _bad: 395 if (flags & PFR_FLAG_FEEDBACK) 396 pfr_reset_feedback(addr, size); 397 return (rv); 398 } 399 400 int 401 pfr_set_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size, 402 int *size2, int *nadd, int *ndel, int *nchange, int flags, 403 u_int32_t ignore_pfrt_flags) 404 { 405 struct pfr_ktable *kt, *tmpkt; 406 struct pfr_kentryworkq addq, delq, changeq; 407 struct pfr_kentry *p, *q; 408 struct pfr_addr ad; 409 int i, rv, xadd = 0, xdel = 0, xchange = 0; 410 long tzero = time_second; 411 412 PF_RULES_WASSERT(); 413 414 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_FEEDBACK); 415 if (pfr_validate_table(tbl, ignore_pfrt_flags, flags & 416 PFR_FLAG_USERIOCTL)) 417 return (EINVAL); 418 kt = pfr_lookup_table(tbl); 419 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 420 return (ESRCH); 421 if (kt->pfrkt_flags & PFR_TFLAG_CONST) 422 return (EPERM); 423 tmpkt = pfr_create_ktable(&V_pfr_nulltable, 0, 0); 424 if (tmpkt == NULL) 425 return (ENOMEM); 426 pfr_mark_addrs(kt); 427 SLIST_INIT(&addq); 428 SLIST_INIT(&delq); 429 SLIST_INIT(&changeq); 430 for (i = 0; i < size; i++) { 431 /* 432 * XXXGL: undertand pf_if usage of this function 433 * and make ad a moving pointer 434 */ 435 bcopy(addr + i, &ad, sizeof(ad)); 436 if (pfr_validate_addr(&ad)) 437 senderr(EINVAL); 438 ad.pfra_fback = PFR_FB_NONE; 439 p = pfr_lookup_addr(kt, &ad, 1); 440 if (p != NULL) { 441 if (p->pfrke_mark) { 442 ad.pfra_fback = PFR_FB_DUPLICATE; 443 goto _skip; 444 } 445 p->pfrke_mark = 1; 446 if (p->pfrke_not != ad.pfra_not) { 447 SLIST_INSERT_HEAD(&changeq, p, pfrke_workq); 448 ad.pfra_fback = PFR_FB_CHANGED; 449 xchange++; 450 } 451 } else { 452 q = pfr_lookup_addr(tmpkt, &ad, 1); 453 if (q != NULL) { 454 ad.pfra_fback = PFR_FB_DUPLICATE; 455 goto _skip; 456 } 457 p = pfr_create_kentry(&ad, 458 (kt->pfrkt_flags & PFR_TFLAG_COUNTERS) != 0); 459 if (p == NULL) 460 senderr(ENOMEM); 461 if (pfr_route_kentry(tmpkt, p)) { 462 pfr_destroy_kentry(p); 463 ad.pfra_fback = PFR_FB_NONE; 464 } else { 465 SLIST_INSERT_HEAD(&addq, p, pfrke_workq); 466 ad.pfra_fback = PFR_FB_ADDED; 467 xadd++; 468 } 469 } 470 _skip: 471 if (flags & PFR_FLAG_FEEDBACK) 472 bcopy(&ad, addr + i, sizeof(ad)); 473 } 474 pfr_enqueue_addrs(kt, &delq, &xdel, ENQUEUE_UNMARKED_ONLY); 475 if ((flags & PFR_FLAG_FEEDBACK) && *size2) { 476 if (*size2 < size+xdel) { 477 *size2 = size+xdel; 478 senderr(0); 479 } 480 i = 0; 481 SLIST_FOREACH(p, &delq, pfrke_workq) { 482 pfr_copyout_addr(&ad, p); 483 ad.pfra_fback = PFR_FB_DELETED; 484 bcopy(&ad, addr + size + i, sizeof(ad)); 485 i++; 486 } 487 } 488 pfr_clean_node_mask(tmpkt, &addq); 489 if (!(flags & PFR_FLAG_DUMMY)) { 490 pfr_insert_kentries(kt, &addq, tzero); 491 pfr_remove_kentries(kt, &delq); 492 pfr_clstats_kentries(kt, &changeq, tzero, INVERT_NEG_FLAG); 493 } else 494 pfr_destroy_kentries(&addq); 495 if (nadd != NULL) 496 *nadd = xadd; 497 if (ndel != NULL) 498 *ndel = xdel; 499 if (nchange != NULL) 500 *nchange = xchange; 501 if ((flags & PFR_FLAG_FEEDBACK) && size2) 502 *size2 = size+xdel; 503 pfr_destroy_ktable(tmpkt, 0); 504 return (0); 505 _bad: 506 pfr_clean_node_mask(tmpkt, &addq); 507 pfr_destroy_kentries(&addq); 508 if (flags & PFR_FLAG_FEEDBACK) 509 pfr_reset_feedback(addr, size); 510 pfr_destroy_ktable(tmpkt, 0); 511 return (rv); 512 } 513 514 int 515 pfr_tst_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size, 516 int *nmatch, int flags) 517 { 518 struct pfr_ktable *kt; 519 struct pfr_kentry *p; 520 struct pfr_addr *ad; 521 int i, xmatch = 0; 522 523 PF_RULES_RASSERT(); 524 525 ACCEPT_FLAGS(flags, PFR_FLAG_REPLACE); 526 if (pfr_validate_table(tbl, 0, 0)) 527 return (EINVAL); 528 kt = pfr_lookup_table(tbl); 529 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 530 return (ESRCH); 531 532 for (i = 0, ad = addr; i < size; i++, ad++) { 533 if (pfr_validate_addr(ad)) 534 return (EINVAL); 535 if (ADDR_NETWORK(ad)) 536 return (EINVAL); 537 p = pfr_lookup_addr(kt, ad, 0); 538 if (flags & PFR_FLAG_REPLACE) 539 pfr_copyout_addr(ad, p); 540 ad->pfra_fback = (p == NULL) ? PFR_FB_NONE : 541 (p->pfrke_not ? PFR_FB_NOTMATCH : PFR_FB_MATCH); 542 if (p != NULL && !p->pfrke_not) 543 xmatch++; 544 } 545 if (nmatch != NULL) 546 *nmatch = xmatch; 547 return (0); 548 } 549 550 int 551 pfr_get_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int *size, 552 int flags) 553 { 554 struct pfr_ktable *kt; 555 struct pfr_walktree w; 556 int rv; 557 558 PF_RULES_RASSERT(); 559 560 ACCEPT_FLAGS(flags, 0); 561 if (pfr_validate_table(tbl, 0, 0)) 562 return (EINVAL); 563 kt = pfr_lookup_table(tbl); 564 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 565 return (ESRCH); 566 if (kt->pfrkt_cnt > *size) { 567 *size = kt->pfrkt_cnt; 568 return (0); 569 } 570 571 bzero(&w, sizeof(w)); 572 w.pfrw_op = PFRW_GET_ADDRS; 573 w.pfrw_addr = addr; 574 w.pfrw_free = kt->pfrkt_cnt; 575 rv = kt->pfrkt_ip4->rnh_walktree(&kt->pfrkt_ip4->rh, pfr_walktree, &w); 576 if (!rv) 577 rv = kt->pfrkt_ip6->rnh_walktree(&kt->pfrkt_ip6->rh, 578 pfr_walktree, &w); 579 if (rv) 580 return (rv); 581 582 KASSERT(w.pfrw_free == 0, ("%s: corruption detected (%d)", __func__, 583 w.pfrw_free)); 584 585 *size = kt->pfrkt_cnt; 586 return (0); 587 } 588 589 int 590 pfr_get_astats(struct pfr_table *tbl, struct pfr_astats *addr, int *size, 591 int flags) 592 { 593 struct pfr_ktable *kt; 594 struct pfr_walktree w; 595 struct pfr_kentryworkq workq; 596 int rv; 597 long tzero = time_second; 598 599 PF_RULES_RASSERT(); 600 601 /* XXX PFR_FLAG_CLSTATS disabled */ 602 ACCEPT_FLAGS(flags, 0); 603 if (pfr_validate_table(tbl, 0, 0)) 604 return (EINVAL); 605 kt = pfr_lookup_table(tbl); 606 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 607 return (ESRCH); 608 if (kt->pfrkt_cnt > *size) { 609 *size = kt->pfrkt_cnt; 610 return (0); 611 } 612 613 bzero(&w, sizeof(w)); 614 w.pfrw_op = PFRW_GET_ASTATS; 615 w.pfrw_astats = addr; 616 w.pfrw_free = kt->pfrkt_cnt; 617 /* 618 * Flags below are for backward compatibility. It was possible to have 619 * a table without per-entry counters. Now they are always allocated, 620 * we just discard data when reading it if table is not configured to 621 * have counters. 622 */ 623 w.pfrw_flags = kt->pfrkt_flags; 624 rv = kt->pfrkt_ip4->rnh_walktree(&kt->pfrkt_ip4->rh, pfr_walktree, &w); 625 if (!rv) 626 rv = kt->pfrkt_ip6->rnh_walktree(&kt->pfrkt_ip6->rh, 627 pfr_walktree, &w); 628 if (!rv && (flags & PFR_FLAG_CLSTATS)) { 629 pfr_enqueue_addrs(kt, &workq, NULL, 0); 630 pfr_clstats_kentries(kt, &workq, tzero, 0); 631 } 632 if (rv) 633 return (rv); 634 635 if (w.pfrw_free) { 636 printf("pfr_get_astats: corruption detected (%d).\n", 637 w.pfrw_free); 638 return (ENOTTY); 639 } 640 *size = kt->pfrkt_cnt; 641 return (0); 642 } 643 644 int 645 pfr_clr_astats(struct pfr_table *tbl, struct pfr_addr *addr, int size, 646 int *nzero, int flags) 647 { 648 struct pfr_ktable *kt; 649 struct pfr_kentryworkq workq; 650 struct pfr_kentry *p; 651 struct pfr_addr *ad; 652 int i, rv, xzero = 0; 653 654 PF_RULES_WASSERT(); 655 656 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_FEEDBACK); 657 if (pfr_validate_table(tbl, 0, 0)) 658 return (EINVAL); 659 kt = pfr_lookup_table(tbl); 660 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 661 return (ESRCH); 662 SLIST_INIT(&workq); 663 for (i = 0, ad = addr; i < size; i++, ad++) { 664 if (pfr_validate_addr(ad)) 665 senderr(EINVAL); 666 p = pfr_lookup_addr(kt, ad, 1); 667 if (flags & PFR_FLAG_FEEDBACK) { 668 ad->pfra_fback = (p != NULL) ? 669 PFR_FB_CLEARED : PFR_FB_NONE; 670 } 671 if (p != NULL) { 672 SLIST_INSERT_HEAD(&workq, p, pfrke_workq); 673 xzero++; 674 } 675 } 676 677 if (!(flags & PFR_FLAG_DUMMY)) 678 pfr_clstats_kentries(kt, &workq, 0, 0); 679 if (nzero != NULL) 680 *nzero = xzero; 681 return (0); 682 _bad: 683 if (flags & PFR_FLAG_FEEDBACK) 684 pfr_reset_feedback(addr, size); 685 return (rv); 686 } 687 688 static int 689 pfr_validate_addr(struct pfr_addr *ad) 690 { 691 int i; 692 693 switch (ad->pfra_af) { 694 #ifdef INET 695 case AF_INET: 696 if (ad->pfra_net > 32) 697 return (-1); 698 break; 699 #endif /* INET */ 700 #ifdef INET6 701 case AF_INET6: 702 if (ad->pfra_net > 128) 703 return (-1); 704 break; 705 #endif /* INET6 */ 706 default: 707 return (-1); 708 } 709 if (ad->pfra_net < 128 && 710 (((caddr_t)ad)[ad->pfra_net/8] & (0xFF >> (ad->pfra_net%8)))) 711 return (-1); 712 for (i = (ad->pfra_net+7)/8; i < sizeof(ad->pfra_u); i++) 713 if (((caddr_t)ad)[i]) 714 return (-1); 715 if (ad->pfra_not && ad->pfra_not != 1) 716 return (-1); 717 if (ad->pfra_fback) 718 return (-1); 719 return (0); 720 } 721 722 static void 723 pfr_enqueue_addrs(struct pfr_ktable *kt, struct pfr_kentryworkq *workq, 724 int *naddr, int sweep) 725 { 726 struct pfr_walktree w; 727 728 SLIST_INIT(workq); 729 bzero(&w, sizeof(w)); 730 w.pfrw_op = sweep ? PFRW_SWEEP : PFRW_ENQUEUE; 731 w.pfrw_workq = workq; 732 if (kt->pfrkt_ip4 != NULL) 733 if (kt->pfrkt_ip4->rnh_walktree(&kt->pfrkt_ip4->rh, 734 pfr_walktree, &w)) 735 printf("pfr_enqueue_addrs: IPv4 walktree failed.\n"); 736 if (kt->pfrkt_ip6 != NULL) 737 if (kt->pfrkt_ip6->rnh_walktree(&kt->pfrkt_ip6->rh, 738 pfr_walktree, &w)) 739 printf("pfr_enqueue_addrs: IPv6 walktree failed.\n"); 740 if (naddr != NULL) 741 *naddr = w.pfrw_cnt; 742 } 743 744 static void 745 pfr_mark_addrs(struct pfr_ktable *kt) 746 { 747 struct pfr_walktree w; 748 749 bzero(&w, sizeof(w)); 750 w.pfrw_op = PFRW_MARK; 751 if (kt->pfrkt_ip4->rnh_walktree(&kt->pfrkt_ip4->rh, pfr_walktree, &w)) 752 printf("pfr_mark_addrs: IPv4 walktree failed.\n"); 753 if (kt->pfrkt_ip6->rnh_walktree(&kt->pfrkt_ip6->rh, pfr_walktree, &w)) 754 printf("pfr_mark_addrs: IPv6 walktree failed.\n"); 755 } 756 757 static struct pfr_kentry * 758 pfr_lookup_addr(struct pfr_ktable *kt, struct pfr_addr *ad, int exact) 759 { 760 union sockaddr_union sa, mask; 761 struct radix_head *head = NULL; 762 struct pfr_kentry *ke; 763 764 PF_RULES_ASSERT(); 765 766 bzero(&sa, sizeof(sa)); 767 if (ad->pfra_af == AF_INET) { 768 FILLIN_SIN(sa.sin, ad->pfra_ip4addr); 769 head = &kt->pfrkt_ip4->rh; 770 } else if ( ad->pfra_af == AF_INET6 ) { 771 FILLIN_SIN6(sa.sin6, ad->pfra_ip6addr); 772 head = &kt->pfrkt_ip6->rh; 773 } 774 if (ADDR_NETWORK(ad)) { 775 pfr_prepare_network(&mask, ad->pfra_af, ad->pfra_net); 776 ke = (struct pfr_kentry *)rn_lookup(&sa, &mask, head); 777 if (ke && KENTRY_RNF_ROOT(ke)) 778 ke = NULL; 779 } else { 780 ke = (struct pfr_kentry *)rn_match(&sa, head); 781 if (ke && KENTRY_RNF_ROOT(ke)) 782 ke = NULL; 783 if (exact && ke && KENTRY_NETWORK(ke)) 784 ke = NULL; 785 } 786 return (ke); 787 } 788 789 static struct pfr_kentry * 790 pfr_create_kentry(struct pfr_addr *ad, bool counters) 791 { 792 struct pfr_kentry *ke; 793 counter_u64_t c; 794 795 ke = uma_zalloc(V_pfr_kentry_z, M_NOWAIT | M_ZERO); 796 if (ke == NULL) 797 return (NULL); 798 799 if (ad->pfra_af == AF_INET) 800 FILLIN_SIN(ke->pfrke_sa.sin, ad->pfra_ip4addr); 801 else if (ad->pfra_af == AF_INET6) 802 FILLIN_SIN6(ke->pfrke_sa.sin6, ad->pfra_ip6addr); 803 ke->pfrke_af = ad->pfra_af; 804 ke->pfrke_net = ad->pfra_net; 805 ke->pfrke_not = ad->pfra_not; 806 ke->pfrke_counters.pfrkc_tzero = 0; 807 if (counters) { 808 c = uma_zalloc_pcpu(V_pfr_kentry_counter_z, M_NOWAIT | M_ZERO); 809 if (c == NULL) { 810 pfr_destroy_kentry(ke); 811 return (NULL); 812 } 813 ke->pfrke_counters.pfrkc_counters = c; 814 } 815 return (ke); 816 } 817 818 static void 819 pfr_destroy_kentries(struct pfr_kentryworkq *workq) 820 { 821 struct pfr_kentry *p, *q; 822 823 for (p = SLIST_FIRST(workq); p != NULL; p = q) { 824 q = SLIST_NEXT(p, pfrke_workq); 825 pfr_destroy_kentry(p); 826 } 827 } 828 829 static void 830 pfr_destroy_kentry(struct pfr_kentry *ke) 831 { 832 counter_u64_t c; 833 834 if ((c = ke->pfrke_counters.pfrkc_counters) != NULL) 835 uma_zfree_pcpu(V_pfr_kentry_counter_z, c); 836 uma_zfree(V_pfr_kentry_z, ke); 837 } 838 839 static void 840 pfr_insert_kentries(struct pfr_ktable *kt, 841 struct pfr_kentryworkq *workq, long tzero) 842 { 843 struct pfr_kentry *p; 844 int rv, n = 0; 845 846 SLIST_FOREACH(p, workq, pfrke_workq) { 847 rv = pfr_route_kentry(kt, p); 848 if (rv) { 849 printf("pfr_insert_kentries: cannot route entry " 850 "(code=%d).\n", rv); 851 break; 852 } 853 p->pfrke_counters.pfrkc_tzero = tzero; 854 n++; 855 } 856 kt->pfrkt_cnt += n; 857 } 858 859 int 860 pfr_insert_kentry(struct pfr_ktable *kt, struct pfr_addr *ad, long tzero) 861 { 862 struct pfr_kentry *p; 863 int rv; 864 865 p = pfr_lookup_addr(kt, ad, 1); 866 if (p != NULL) 867 return (0); 868 p = pfr_create_kentry(ad, (kt->pfrkt_flags & PFR_TFLAG_COUNTERS) != 0); 869 if (p == NULL) 870 return (ENOMEM); 871 872 rv = pfr_route_kentry(kt, p); 873 if (rv) 874 return (rv); 875 876 p->pfrke_counters.pfrkc_tzero = tzero; 877 kt->pfrkt_cnt++; 878 879 return (0); 880 } 881 882 static void 883 pfr_remove_kentries(struct pfr_ktable *kt, 884 struct pfr_kentryworkq *workq) 885 { 886 struct pfr_kentry *p; 887 int n = 0; 888 889 SLIST_FOREACH(p, workq, pfrke_workq) { 890 pfr_unroute_kentry(kt, p); 891 n++; 892 } 893 kt->pfrkt_cnt -= n; 894 pfr_destroy_kentries(workq); 895 } 896 897 static void 898 pfr_clean_node_mask(struct pfr_ktable *kt, 899 struct pfr_kentryworkq *workq) 900 { 901 struct pfr_kentry *p; 902 903 SLIST_FOREACH(p, workq, pfrke_workq) 904 pfr_unroute_kentry(kt, p); 905 } 906 907 static void 908 pfr_clstats_kentries(struct pfr_ktable *kt, struct pfr_kentryworkq *workq, 909 long tzero, int negchange) 910 { 911 struct pfr_kentry *p; 912 int i; 913 914 SLIST_FOREACH(p, workq, pfrke_workq) { 915 if (negchange) 916 p->pfrke_not = !p->pfrke_not; 917 if ((kt->pfrkt_flags & PFR_TFLAG_COUNTERS) != 0) 918 for (i = 0; i < PFR_NUM_COUNTERS; i++) 919 counter_u64_zero( 920 p->pfrke_counters.pfrkc_counters + i); 921 p->pfrke_counters.pfrkc_tzero = tzero; 922 } 923 } 924 925 static void 926 pfr_reset_feedback(struct pfr_addr *addr, int size) 927 { 928 struct pfr_addr *ad; 929 int i; 930 931 for (i = 0, ad = addr; i < size; i++, ad++) 932 ad->pfra_fback = PFR_FB_NONE; 933 } 934 935 static void 936 pfr_prepare_network(union sockaddr_union *sa, int af, int net) 937 { 938 int i; 939 940 bzero(sa, sizeof(*sa)); 941 if (af == AF_INET) { 942 sa->sin.sin_len = sizeof(sa->sin); 943 sa->sin.sin_family = AF_INET; 944 sa->sin.sin_addr.s_addr = net ? htonl(-1 << (32-net)) : 0; 945 } else if (af == AF_INET6) { 946 sa->sin6.sin6_len = sizeof(sa->sin6); 947 sa->sin6.sin6_family = AF_INET6; 948 for (i = 0; i < 4; i++) { 949 if (net <= 32) { 950 sa->sin6.sin6_addr.s6_addr32[i] = 951 net ? htonl(-1 << (32-net)) : 0; 952 break; 953 } 954 sa->sin6.sin6_addr.s6_addr32[i] = 0xFFFFFFFF; 955 net -= 32; 956 } 957 } 958 } 959 960 static int 961 pfr_route_kentry(struct pfr_ktable *kt, struct pfr_kentry *ke) 962 { 963 union sockaddr_union mask; 964 struct radix_node *rn; 965 struct radix_head *head = NULL; 966 967 PF_RULES_WASSERT(); 968 969 bzero(ke->pfrke_node, sizeof(ke->pfrke_node)); 970 if (ke->pfrke_af == AF_INET) 971 head = &kt->pfrkt_ip4->rh; 972 else if (ke->pfrke_af == AF_INET6) 973 head = &kt->pfrkt_ip6->rh; 974 975 if (KENTRY_NETWORK(ke)) { 976 pfr_prepare_network(&mask, ke->pfrke_af, ke->pfrke_net); 977 rn = rn_addroute(&ke->pfrke_sa, &mask, head, ke->pfrke_node); 978 } else 979 rn = rn_addroute(&ke->pfrke_sa, NULL, head, ke->pfrke_node); 980 981 return (rn == NULL ? -1 : 0); 982 } 983 984 static int 985 pfr_unroute_kentry(struct pfr_ktable *kt, struct pfr_kentry *ke) 986 { 987 union sockaddr_union mask; 988 struct radix_node *rn; 989 struct radix_head *head = NULL; 990 991 if (ke->pfrke_af == AF_INET) 992 head = &kt->pfrkt_ip4->rh; 993 else if (ke->pfrke_af == AF_INET6) 994 head = &kt->pfrkt_ip6->rh; 995 996 if (KENTRY_NETWORK(ke)) { 997 pfr_prepare_network(&mask, ke->pfrke_af, ke->pfrke_net); 998 rn = rn_delete(&ke->pfrke_sa, &mask, head); 999 } else 1000 rn = rn_delete(&ke->pfrke_sa, NULL, head); 1001 1002 if (rn == NULL) { 1003 printf("pfr_unroute_kentry: delete failed.\n"); 1004 return (-1); 1005 } 1006 return (0); 1007 } 1008 1009 static void 1010 pfr_copyout_addr(struct pfr_addr *ad, const struct pfr_kentry *ke) 1011 { 1012 bzero(ad, sizeof(*ad)); 1013 if (ke == NULL) 1014 return; 1015 ad->pfra_af = ke->pfrke_af; 1016 ad->pfra_net = ke->pfrke_net; 1017 ad->pfra_not = ke->pfrke_not; 1018 if (ad->pfra_af == AF_INET) 1019 ad->pfra_ip4addr = ke->pfrke_sa.sin.sin_addr; 1020 else if (ad->pfra_af == AF_INET6) 1021 ad->pfra_ip6addr = ke->pfrke_sa.sin6.sin6_addr; 1022 } 1023 1024 static void 1025 pfr_copyout_astats(struct pfr_astats *as, const struct pfr_kentry *ke, 1026 const struct pfr_walktree *w) 1027 { 1028 int dir, op; 1029 const struct pfr_kcounters *kc = &ke->pfrke_counters; 1030 1031 pfr_copyout_addr(&as->pfras_a, ke); 1032 as->pfras_tzero = kc->pfrkc_tzero; 1033 1034 if (! (w->pfrw_flags & PFR_TFLAG_COUNTERS)) { 1035 bzero(as->pfras_packets, sizeof(as->pfras_packets)); 1036 bzero(as->pfras_bytes, sizeof(as->pfras_bytes)); 1037 as->pfras_a.pfra_fback = PFR_FB_NOCOUNT; 1038 return; 1039 } 1040 1041 for (dir = 0; dir < PFR_DIR_MAX; dir++) { 1042 for (op = 0; op < PFR_OP_ADDR_MAX; op ++) { 1043 as->pfras_packets[dir][op] = counter_u64_fetch( 1044 pfr_kentry_counter(kc, dir, op, PFR_TYPE_PACKETS)); 1045 as->pfras_bytes[dir][op] = counter_u64_fetch( 1046 pfr_kentry_counter(kc, dir, op, PFR_TYPE_BYTES)); 1047 } 1048 } 1049 } 1050 1051 static int 1052 pfr_walktree(struct radix_node *rn, void *arg) 1053 { 1054 struct pfr_kentry *ke = (struct pfr_kentry *)rn; 1055 struct pfr_walktree *w = arg; 1056 1057 switch (w->pfrw_op) { 1058 case PFRW_MARK: 1059 ke->pfrke_mark = 0; 1060 break; 1061 case PFRW_SWEEP: 1062 if (ke->pfrke_mark) 1063 break; 1064 /* FALLTHROUGH */ 1065 case PFRW_ENQUEUE: 1066 SLIST_INSERT_HEAD(w->pfrw_workq, ke, pfrke_workq); 1067 w->pfrw_cnt++; 1068 break; 1069 case PFRW_GET_ADDRS: 1070 if (w->pfrw_free-- > 0) { 1071 pfr_copyout_addr(w->pfrw_addr, ke); 1072 w->pfrw_addr++; 1073 } 1074 break; 1075 case PFRW_GET_ASTATS: 1076 if (w->pfrw_free-- > 0) { 1077 struct pfr_astats as; 1078 1079 pfr_copyout_astats(&as, ke, w); 1080 1081 bcopy(&as, w->pfrw_astats, sizeof(as)); 1082 w->pfrw_astats++; 1083 } 1084 break; 1085 case PFRW_POOL_GET: 1086 if (ke->pfrke_not) 1087 break; /* negative entries are ignored */ 1088 if (!w->pfrw_cnt--) { 1089 w->pfrw_kentry = ke; 1090 return (1); /* finish search */ 1091 } 1092 break; 1093 case PFRW_DYNADDR_UPDATE: 1094 { 1095 union sockaddr_union pfr_mask; 1096 1097 if (ke->pfrke_af == AF_INET) { 1098 if (w->pfrw_dyn->pfid_acnt4++ > 0) 1099 break; 1100 pfr_prepare_network(&pfr_mask, AF_INET, ke->pfrke_net); 1101 w->pfrw_dyn->pfid_addr4 = *SUNION2PF(&ke->pfrke_sa, 1102 AF_INET); 1103 w->pfrw_dyn->pfid_mask4 = *SUNION2PF(&pfr_mask, 1104 AF_INET); 1105 } else if (ke->pfrke_af == AF_INET6){ 1106 if (w->pfrw_dyn->pfid_acnt6++ > 0) 1107 break; 1108 pfr_prepare_network(&pfr_mask, AF_INET6, ke->pfrke_net); 1109 w->pfrw_dyn->pfid_addr6 = *SUNION2PF(&ke->pfrke_sa, 1110 AF_INET6); 1111 w->pfrw_dyn->pfid_mask6 = *SUNION2PF(&pfr_mask, 1112 AF_INET6); 1113 } 1114 break; 1115 } 1116 } 1117 return (0); 1118 } 1119 1120 int 1121 pfr_clr_tables(struct pfr_table *filter, int *ndel, int flags) 1122 { 1123 struct pfr_ktableworkq workq; 1124 struct pfr_ktable *p; 1125 int xdel = 0; 1126 1127 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_ALLRSETS); 1128 if (pfr_fix_anchor(filter->pfrt_anchor)) 1129 return (EINVAL); 1130 if (pfr_table_count(filter, flags) < 0) 1131 return (ENOENT); 1132 1133 SLIST_INIT(&workq); 1134 RB_FOREACH(p, pfr_ktablehead, &V_pfr_ktables) { 1135 if (pfr_skip_table(filter, p, flags)) 1136 continue; 1137 if (!strcmp(p->pfrkt_anchor, PF_RESERVED_ANCHOR)) 1138 continue; 1139 if (!(p->pfrkt_flags & PFR_TFLAG_ACTIVE)) 1140 continue; 1141 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_ACTIVE; 1142 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq); 1143 xdel++; 1144 } 1145 if (!(flags & PFR_FLAG_DUMMY)) 1146 pfr_setflags_ktables(&workq); 1147 if (ndel != NULL) 1148 *ndel = xdel; 1149 return (0); 1150 } 1151 1152 int 1153 pfr_add_tables(struct pfr_table *tbl, int size, int *nadd, int flags) 1154 { 1155 struct pfr_ktableworkq addq, changeq; 1156 struct pfr_ktable *p, *q, *r, key; 1157 int i, rv, xadd = 0; 1158 long tzero = time_second; 1159 1160 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY); 1161 SLIST_INIT(&addq); 1162 SLIST_INIT(&changeq); 1163 for (i = 0; i < size; i++) { 1164 bcopy(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t)); 1165 if (pfr_validate_table(&key.pfrkt_t, PFR_TFLAG_USRMASK, 1166 flags & PFR_FLAG_USERIOCTL)) 1167 senderr(EINVAL); 1168 key.pfrkt_flags |= PFR_TFLAG_ACTIVE; 1169 p = RB_FIND(pfr_ktablehead, &V_pfr_ktables, &key); 1170 if (p == NULL) { 1171 p = pfr_create_ktable(&key.pfrkt_t, tzero, 1); 1172 if (p == NULL) 1173 senderr(ENOMEM); 1174 SLIST_FOREACH(q, &addq, pfrkt_workq) { 1175 if (!pfr_ktable_compare(p, q)) { 1176 pfr_destroy_ktable(p, 0); 1177 goto _skip; 1178 } 1179 } 1180 SLIST_INSERT_HEAD(&addq, p, pfrkt_workq); 1181 xadd++; 1182 if (!key.pfrkt_anchor[0]) 1183 goto _skip; 1184 1185 /* find or create root table */ 1186 bzero(key.pfrkt_anchor, sizeof(key.pfrkt_anchor)); 1187 r = RB_FIND(pfr_ktablehead, &V_pfr_ktables, &key); 1188 if (r != NULL) { 1189 p->pfrkt_root = r; 1190 goto _skip; 1191 } 1192 SLIST_FOREACH(q, &addq, pfrkt_workq) { 1193 if (!pfr_ktable_compare(&key, q)) { 1194 p->pfrkt_root = q; 1195 goto _skip; 1196 } 1197 } 1198 key.pfrkt_flags = 0; 1199 r = pfr_create_ktable(&key.pfrkt_t, 0, 1); 1200 if (r == NULL) 1201 senderr(ENOMEM); 1202 SLIST_INSERT_HEAD(&addq, r, pfrkt_workq); 1203 p->pfrkt_root = r; 1204 } else if (!(p->pfrkt_flags & PFR_TFLAG_ACTIVE)) { 1205 SLIST_FOREACH(q, &changeq, pfrkt_workq) 1206 if (!pfr_ktable_compare(&key, q)) 1207 goto _skip; 1208 p->pfrkt_nflags = (p->pfrkt_flags & 1209 ~PFR_TFLAG_USRMASK) | key.pfrkt_flags; 1210 SLIST_INSERT_HEAD(&changeq, p, pfrkt_workq); 1211 xadd++; 1212 } 1213 _skip: 1214 ; 1215 } 1216 if (!(flags & PFR_FLAG_DUMMY)) { 1217 pfr_insert_ktables(&addq); 1218 pfr_setflags_ktables(&changeq); 1219 } else 1220 pfr_destroy_ktables(&addq, 0); 1221 if (nadd != NULL) 1222 *nadd = xadd; 1223 return (0); 1224 _bad: 1225 pfr_destroy_ktables(&addq, 0); 1226 return (rv); 1227 } 1228 1229 int 1230 pfr_del_tables(struct pfr_table *tbl, int size, int *ndel, int flags) 1231 { 1232 struct pfr_ktableworkq workq; 1233 struct pfr_ktable *p, *q, key; 1234 int i, xdel = 0; 1235 1236 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY); 1237 SLIST_INIT(&workq); 1238 for (i = 0; i < size; i++) { 1239 bcopy(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t)); 1240 if (pfr_validate_table(&key.pfrkt_t, 0, 1241 flags & PFR_FLAG_USERIOCTL)) 1242 return (EINVAL); 1243 p = RB_FIND(pfr_ktablehead, &V_pfr_ktables, &key); 1244 if (p != NULL && (p->pfrkt_flags & PFR_TFLAG_ACTIVE)) { 1245 SLIST_FOREACH(q, &workq, pfrkt_workq) 1246 if (!pfr_ktable_compare(p, q)) 1247 goto _skip; 1248 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_ACTIVE; 1249 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq); 1250 xdel++; 1251 } 1252 _skip: 1253 ; 1254 } 1255 1256 if (!(flags & PFR_FLAG_DUMMY)) 1257 pfr_setflags_ktables(&workq); 1258 if (ndel != NULL) 1259 *ndel = xdel; 1260 return (0); 1261 } 1262 1263 int 1264 pfr_get_tables(struct pfr_table *filter, struct pfr_table *tbl, int *size, 1265 int flags) 1266 { 1267 struct pfr_ktable *p; 1268 int n, nn; 1269 1270 PF_RULES_RASSERT(); 1271 1272 ACCEPT_FLAGS(flags, PFR_FLAG_ALLRSETS); 1273 if (pfr_fix_anchor(filter->pfrt_anchor)) 1274 return (EINVAL); 1275 n = nn = pfr_table_count(filter, flags); 1276 if (n < 0) 1277 return (ENOENT); 1278 if (n > *size) { 1279 *size = n; 1280 return (0); 1281 } 1282 RB_FOREACH(p, pfr_ktablehead, &V_pfr_ktables) { 1283 if (pfr_skip_table(filter, p, flags)) 1284 continue; 1285 if (n-- <= 0) 1286 continue; 1287 bcopy(&p->pfrkt_t, tbl++, sizeof(*tbl)); 1288 } 1289 1290 KASSERT(n == 0, ("%s: corruption detected (%d)", __func__, n)); 1291 1292 *size = nn; 1293 return (0); 1294 } 1295 1296 int 1297 pfr_get_tstats(struct pfr_table *filter, struct pfr_tstats *tbl, int *size, 1298 int flags) 1299 { 1300 struct pfr_ktable *p; 1301 struct pfr_ktableworkq workq; 1302 int n, nn; 1303 long tzero = time_second; 1304 int pfr_dir, pfr_op; 1305 1306 /* XXX PFR_FLAG_CLSTATS disabled */ 1307 ACCEPT_FLAGS(flags, PFR_FLAG_ALLRSETS); 1308 if (pfr_fix_anchor(filter->pfrt_anchor)) 1309 return (EINVAL); 1310 n = nn = pfr_table_count(filter, flags); 1311 if (n < 0) 1312 return (ENOENT); 1313 if (n > *size) { 1314 *size = n; 1315 return (0); 1316 } 1317 SLIST_INIT(&workq); 1318 RB_FOREACH(p, pfr_ktablehead, &V_pfr_ktables) { 1319 if (pfr_skip_table(filter, p, flags)) 1320 continue; 1321 if (n-- <= 0) 1322 continue; 1323 bcopy(&p->pfrkt_kts.pfrts_t, &tbl->pfrts_t, 1324 sizeof(struct pfr_table)); 1325 for (pfr_dir = 0; pfr_dir < PFR_DIR_MAX; pfr_dir ++) { 1326 for (pfr_op = 0; pfr_op < PFR_OP_TABLE_MAX; pfr_op ++) { 1327 tbl->pfrts_packets[pfr_dir][pfr_op] = 1328 counter_u64_fetch( 1329 p->pfrkt_packets[pfr_dir][pfr_op]); 1330 tbl->pfrts_bytes[pfr_dir][pfr_op] = 1331 counter_u64_fetch( 1332 p->pfrkt_bytes[pfr_dir][pfr_op]); 1333 } 1334 } 1335 tbl->pfrts_match = counter_u64_fetch(p->pfrkt_match); 1336 tbl->pfrts_nomatch = counter_u64_fetch(p->pfrkt_nomatch); 1337 tbl->pfrts_tzero = p->pfrkt_tzero; 1338 tbl->pfrts_cnt = p->pfrkt_cnt; 1339 for (pfr_op = 0; pfr_op < PFR_REFCNT_MAX; pfr_op++) 1340 tbl->pfrts_refcnt[pfr_op] = p->pfrkt_refcnt[pfr_op]; 1341 tbl++; 1342 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq); 1343 } 1344 if (flags & PFR_FLAG_CLSTATS) 1345 pfr_clstats_ktables(&workq, tzero, 1346 flags & PFR_FLAG_ADDRSTOO); 1347 1348 KASSERT(n == 0, ("%s: corruption detected (%d)", __func__, n)); 1349 1350 *size = nn; 1351 return (0); 1352 } 1353 1354 int 1355 pfr_clr_tstats(struct pfr_table *tbl, int size, int *nzero, int flags) 1356 { 1357 struct pfr_ktableworkq workq; 1358 struct pfr_ktable *p, key; 1359 int i, xzero = 0; 1360 long tzero = time_second; 1361 1362 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_ADDRSTOO); 1363 SLIST_INIT(&workq); 1364 for (i = 0; i < size; i++) { 1365 bcopy(tbl + i, &key.pfrkt_t, sizeof(key.pfrkt_t)); 1366 if (pfr_validate_table(&key.pfrkt_t, 0, 0)) 1367 return (EINVAL); 1368 p = RB_FIND(pfr_ktablehead, &V_pfr_ktables, &key); 1369 if (p != NULL) { 1370 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq); 1371 xzero++; 1372 } 1373 } 1374 if (!(flags & PFR_FLAG_DUMMY)) 1375 pfr_clstats_ktables(&workq, tzero, flags & PFR_FLAG_ADDRSTOO); 1376 if (nzero != NULL) 1377 *nzero = xzero; 1378 return (0); 1379 } 1380 1381 int 1382 pfr_set_tflags(struct pfr_table *tbl, int size, int setflag, int clrflag, 1383 int *nchange, int *ndel, int flags) 1384 { 1385 struct pfr_ktableworkq workq; 1386 struct pfr_ktable *p, *q, key; 1387 int i, xchange = 0, xdel = 0; 1388 1389 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY); 1390 if ((setflag & ~PFR_TFLAG_USRMASK) || 1391 (clrflag & ~PFR_TFLAG_USRMASK) || 1392 (setflag & clrflag)) 1393 return (EINVAL); 1394 SLIST_INIT(&workq); 1395 for (i = 0; i < size; i++) { 1396 bcopy(tbl + i, &key.pfrkt_t, sizeof(key.pfrkt_t)); 1397 if (pfr_validate_table(&key.pfrkt_t, 0, 1398 flags & PFR_FLAG_USERIOCTL)) 1399 return (EINVAL); 1400 p = RB_FIND(pfr_ktablehead, &V_pfr_ktables, &key); 1401 if (p != NULL && (p->pfrkt_flags & PFR_TFLAG_ACTIVE)) { 1402 p->pfrkt_nflags = (p->pfrkt_flags | setflag) & 1403 ~clrflag; 1404 if (p->pfrkt_nflags == p->pfrkt_flags) 1405 goto _skip; 1406 SLIST_FOREACH(q, &workq, pfrkt_workq) 1407 if (!pfr_ktable_compare(p, q)) 1408 goto _skip; 1409 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq); 1410 if ((p->pfrkt_flags & PFR_TFLAG_PERSIST) && 1411 (clrflag & PFR_TFLAG_PERSIST) && 1412 !(p->pfrkt_flags & PFR_TFLAG_REFERENCED)) 1413 xdel++; 1414 else 1415 xchange++; 1416 } 1417 _skip: 1418 ; 1419 } 1420 if (!(flags & PFR_FLAG_DUMMY)) 1421 pfr_setflags_ktables(&workq); 1422 if (nchange != NULL) 1423 *nchange = xchange; 1424 if (ndel != NULL) 1425 *ndel = xdel; 1426 return (0); 1427 } 1428 1429 int 1430 pfr_ina_begin(struct pfr_table *trs, u_int32_t *ticket, int *ndel, int flags) 1431 { 1432 struct pfr_ktableworkq workq; 1433 struct pfr_ktable *p; 1434 struct pf_ruleset *rs; 1435 int xdel = 0; 1436 1437 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY); 1438 rs = pf_find_or_create_ruleset(trs->pfrt_anchor); 1439 if (rs == NULL) 1440 return (ENOMEM); 1441 SLIST_INIT(&workq); 1442 RB_FOREACH(p, pfr_ktablehead, &V_pfr_ktables) { 1443 if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) || 1444 pfr_skip_table(trs, p, 0)) 1445 continue; 1446 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_INACTIVE; 1447 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq); 1448 xdel++; 1449 } 1450 if (!(flags & PFR_FLAG_DUMMY)) { 1451 pfr_setflags_ktables(&workq); 1452 if (ticket != NULL) 1453 *ticket = ++rs->tticket; 1454 rs->topen = 1; 1455 } else 1456 pf_remove_if_empty_ruleset(rs); 1457 if (ndel != NULL) 1458 *ndel = xdel; 1459 return (0); 1460 } 1461 1462 int 1463 pfr_ina_define(struct pfr_table *tbl, struct pfr_addr *addr, int size, 1464 int *nadd, int *naddr, u_int32_t ticket, int flags) 1465 { 1466 struct pfr_ktableworkq tableq; 1467 struct pfr_kentryworkq addrq; 1468 struct pfr_ktable *kt, *rt, *shadow, key; 1469 struct pfr_kentry *p; 1470 struct pfr_addr *ad; 1471 struct pf_ruleset *rs; 1472 int i, rv, xadd = 0, xaddr = 0; 1473 1474 PF_RULES_WASSERT(); 1475 1476 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_ADDRSTOO); 1477 if (size && !(flags & PFR_FLAG_ADDRSTOO)) 1478 return (EINVAL); 1479 if (pfr_validate_table(tbl, PFR_TFLAG_USRMASK, 1480 flags & PFR_FLAG_USERIOCTL)) 1481 return (EINVAL); 1482 rs = pf_find_ruleset(tbl->pfrt_anchor); 1483 if (rs == NULL || !rs->topen || ticket != rs->tticket) 1484 return (EBUSY); 1485 tbl->pfrt_flags |= PFR_TFLAG_INACTIVE; 1486 SLIST_INIT(&tableq); 1487 kt = RB_FIND(pfr_ktablehead, &V_pfr_ktables, (struct pfr_ktable *)tbl); 1488 if (kt == NULL) { 1489 kt = pfr_create_ktable(tbl, 0, 1); 1490 if (kt == NULL) 1491 return (ENOMEM); 1492 SLIST_INSERT_HEAD(&tableq, kt, pfrkt_workq); 1493 xadd++; 1494 if (!tbl->pfrt_anchor[0]) 1495 goto _skip; 1496 1497 /* find or create root table */ 1498 bzero(&key, sizeof(key)); 1499 strlcpy(key.pfrkt_name, tbl->pfrt_name, sizeof(key.pfrkt_name)); 1500 rt = RB_FIND(pfr_ktablehead, &V_pfr_ktables, &key); 1501 if (rt != NULL) { 1502 kt->pfrkt_root = rt; 1503 goto _skip; 1504 } 1505 rt = pfr_create_ktable(&key.pfrkt_t, 0, 1); 1506 if (rt == NULL) { 1507 pfr_destroy_ktables(&tableq, 0); 1508 return (ENOMEM); 1509 } 1510 SLIST_INSERT_HEAD(&tableq, rt, pfrkt_workq); 1511 kt->pfrkt_root = rt; 1512 } else if (!(kt->pfrkt_flags & PFR_TFLAG_INACTIVE)) 1513 xadd++; 1514 _skip: 1515 shadow = pfr_create_ktable(tbl, 0, 0); 1516 if (shadow == NULL) { 1517 pfr_destroy_ktables(&tableq, 0); 1518 return (ENOMEM); 1519 } 1520 SLIST_INIT(&addrq); 1521 for (i = 0, ad = addr; i < size; i++, ad++) { 1522 if (pfr_validate_addr(ad)) 1523 senderr(EINVAL); 1524 if (pfr_lookup_addr(shadow, ad, 1) != NULL) 1525 continue; 1526 p = pfr_create_kentry(ad, 1527 (shadow->pfrkt_flags & PFR_TFLAG_COUNTERS) != 0); 1528 if (p == NULL) 1529 senderr(ENOMEM); 1530 if (pfr_route_kentry(shadow, p)) { 1531 pfr_destroy_kentry(p); 1532 continue; 1533 } 1534 SLIST_INSERT_HEAD(&addrq, p, pfrke_workq); 1535 xaddr++; 1536 } 1537 if (!(flags & PFR_FLAG_DUMMY)) { 1538 if (kt->pfrkt_shadow != NULL) 1539 pfr_destroy_ktable(kt->pfrkt_shadow, 1); 1540 kt->pfrkt_flags |= PFR_TFLAG_INACTIVE; 1541 pfr_insert_ktables(&tableq); 1542 shadow->pfrkt_cnt = (flags & PFR_FLAG_ADDRSTOO) ? 1543 xaddr : NO_ADDRESSES; 1544 kt->pfrkt_shadow = shadow; 1545 } else { 1546 pfr_clean_node_mask(shadow, &addrq); 1547 pfr_destroy_ktable(shadow, 0); 1548 pfr_destroy_ktables(&tableq, 0); 1549 pfr_destroy_kentries(&addrq); 1550 } 1551 if (nadd != NULL) 1552 *nadd = xadd; 1553 if (naddr != NULL) 1554 *naddr = xaddr; 1555 return (0); 1556 _bad: 1557 pfr_destroy_ktable(shadow, 0); 1558 pfr_destroy_ktables(&tableq, 0); 1559 pfr_destroy_kentries(&addrq); 1560 return (rv); 1561 } 1562 1563 int 1564 pfr_ina_rollback(struct pfr_table *trs, u_int32_t ticket, int *ndel, int flags) 1565 { 1566 struct pfr_ktableworkq workq; 1567 struct pfr_ktable *p; 1568 struct pf_ruleset *rs; 1569 int xdel = 0; 1570 1571 PF_RULES_WASSERT(); 1572 1573 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY); 1574 rs = pf_find_ruleset(trs->pfrt_anchor); 1575 if (rs == NULL || !rs->topen || ticket != rs->tticket) 1576 return (0); 1577 SLIST_INIT(&workq); 1578 RB_FOREACH(p, pfr_ktablehead, &V_pfr_ktables) { 1579 if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) || 1580 pfr_skip_table(trs, p, 0)) 1581 continue; 1582 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_INACTIVE; 1583 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq); 1584 xdel++; 1585 } 1586 if (!(flags & PFR_FLAG_DUMMY)) { 1587 pfr_setflags_ktables(&workq); 1588 rs->topen = 0; 1589 pf_remove_if_empty_ruleset(rs); 1590 } 1591 if (ndel != NULL) 1592 *ndel = xdel; 1593 return (0); 1594 } 1595 1596 int 1597 pfr_ina_commit(struct pfr_table *trs, u_int32_t ticket, int *nadd, 1598 int *nchange, int flags) 1599 { 1600 struct pfr_ktable *p, *q; 1601 struct pfr_ktableworkq workq; 1602 struct pf_ruleset *rs; 1603 int xadd = 0, xchange = 0; 1604 long tzero = time_second; 1605 1606 PF_RULES_WASSERT(); 1607 1608 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY); 1609 rs = pf_find_ruleset(trs->pfrt_anchor); 1610 if (rs == NULL || !rs->topen || ticket != rs->tticket) 1611 return (EBUSY); 1612 1613 SLIST_INIT(&workq); 1614 RB_FOREACH(p, pfr_ktablehead, &V_pfr_ktables) { 1615 if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) || 1616 pfr_skip_table(trs, p, 0)) 1617 continue; 1618 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq); 1619 if (p->pfrkt_flags & PFR_TFLAG_ACTIVE) 1620 xchange++; 1621 else 1622 xadd++; 1623 } 1624 1625 if (!(flags & PFR_FLAG_DUMMY)) { 1626 for (p = SLIST_FIRST(&workq); p != NULL; p = q) { 1627 q = SLIST_NEXT(p, pfrkt_workq); 1628 pfr_commit_ktable(p, tzero); 1629 } 1630 rs->topen = 0; 1631 pf_remove_if_empty_ruleset(rs); 1632 } 1633 if (nadd != NULL) 1634 *nadd = xadd; 1635 if (nchange != NULL) 1636 *nchange = xchange; 1637 1638 return (0); 1639 } 1640 1641 static void 1642 pfr_commit_ktable(struct pfr_ktable *kt, long tzero) 1643 { 1644 struct pfr_ktable *shadow = kt->pfrkt_shadow; 1645 int nflags; 1646 1647 PF_RULES_WASSERT(); 1648 1649 if (shadow->pfrkt_cnt == NO_ADDRESSES) { 1650 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 1651 pfr_clstats_ktable(kt, tzero, 1); 1652 } else if (kt->pfrkt_flags & PFR_TFLAG_ACTIVE) { 1653 /* kt might contain addresses */ 1654 struct pfr_kentryworkq addrq, addq, changeq, delq, garbageq; 1655 struct pfr_kentry *p, *q, *next; 1656 struct pfr_addr ad; 1657 1658 pfr_enqueue_addrs(shadow, &addrq, NULL, 0); 1659 pfr_mark_addrs(kt); 1660 SLIST_INIT(&addq); 1661 SLIST_INIT(&changeq); 1662 SLIST_INIT(&delq); 1663 SLIST_INIT(&garbageq); 1664 pfr_clean_node_mask(shadow, &addrq); 1665 for (p = SLIST_FIRST(&addrq); p != NULL; p = next) { 1666 next = SLIST_NEXT(p, pfrke_workq); /* XXX */ 1667 pfr_copyout_addr(&ad, p); 1668 q = pfr_lookup_addr(kt, &ad, 1); 1669 if (q != NULL) { 1670 if (q->pfrke_not != p->pfrke_not) 1671 SLIST_INSERT_HEAD(&changeq, q, 1672 pfrke_workq); 1673 q->pfrke_mark = 1; 1674 SLIST_INSERT_HEAD(&garbageq, p, pfrke_workq); 1675 } else { 1676 p->pfrke_counters.pfrkc_tzero = tzero; 1677 SLIST_INSERT_HEAD(&addq, p, pfrke_workq); 1678 } 1679 } 1680 pfr_enqueue_addrs(kt, &delq, NULL, ENQUEUE_UNMARKED_ONLY); 1681 pfr_insert_kentries(kt, &addq, tzero); 1682 pfr_remove_kentries(kt, &delq); 1683 pfr_clstats_kentries(kt, &changeq, tzero, INVERT_NEG_FLAG); 1684 pfr_destroy_kentries(&garbageq); 1685 } else { 1686 /* kt cannot contain addresses */ 1687 SWAP(struct radix_node_head *, kt->pfrkt_ip4, 1688 shadow->pfrkt_ip4); 1689 SWAP(struct radix_node_head *, kt->pfrkt_ip6, 1690 shadow->pfrkt_ip6); 1691 SWAP(int, kt->pfrkt_cnt, shadow->pfrkt_cnt); 1692 pfr_clstats_ktable(kt, tzero, 1); 1693 } 1694 nflags = ((shadow->pfrkt_flags & PFR_TFLAG_USRMASK) | 1695 (kt->pfrkt_flags & PFR_TFLAG_SETMASK) | PFR_TFLAG_ACTIVE) 1696 & ~PFR_TFLAG_INACTIVE; 1697 pfr_destroy_ktable(shadow, 0); 1698 kt->pfrkt_shadow = NULL; 1699 pfr_setflags_ktable(kt, nflags); 1700 } 1701 1702 static int 1703 pfr_validate_table(struct pfr_table *tbl, int allowedflags, int no_reserved) 1704 { 1705 int i; 1706 1707 if (!tbl->pfrt_name[0]) 1708 return (-1); 1709 if (no_reserved && !strcmp(tbl->pfrt_anchor, PF_RESERVED_ANCHOR)) 1710 return (-1); 1711 if (tbl->pfrt_name[PF_TABLE_NAME_SIZE-1]) 1712 return (-1); 1713 for (i = strlen(tbl->pfrt_name); i < PF_TABLE_NAME_SIZE; i++) 1714 if (tbl->pfrt_name[i]) 1715 return (-1); 1716 if (pfr_fix_anchor(tbl->pfrt_anchor)) 1717 return (-1); 1718 if (tbl->pfrt_flags & ~allowedflags) 1719 return (-1); 1720 return (0); 1721 } 1722 1723 /* 1724 * Rewrite anchors referenced by tables to remove slashes 1725 * and check for validity. 1726 */ 1727 static int 1728 pfr_fix_anchor(char *anchor) 1729 { 1730 size_t siz = MAXPATHLEN; 1731 int i; 1732 1733 if (anchor[0] == '/') { 1734 char *path; 1735 int off; 1736 1737 path = anchor; 1738 off = 1; 1739 while (*++path == '/') 1740 off++; 1741 bcopy(path, anchor, siz - off); 1742 memset(anchor + siz - off, 0, off); 1743 } 1744 if (anchor[siz - 1]) 1745 return (-1); 1746 for (i = strlen(anchor); i < siz; i++) 1747 if (anchor[i]) 1748 return (-1); 1749 return (0); 1750 } 1751 1752 int 1753 pfr_table_count(struct pfr_table *filter, int flags) 1754 { 1755 struct pf_ruleset *rs; 1756 1757 PF_RULES_ASSERT(); 1758 1759 if (flags & PFR_FLAG_ALLRSETS) 1760 return (V_pfr_ktable_cnt); 1761 if (filter->pfrt_anchor[0]) { 1762 rs = pf_find_ruleset(filter->pfrt_anchor); 1763 return ((rs != NULL) ? rs->tables : -1); 1764 } 1765 return (pf_main_ruleset.tables); 1766 } 1767 1768 static int 1769 pfr_skip_table(struct pfr_table *filter, struct pfr_ktable *kt, int flags) 1770 { 1771 if (flags & PFR_FLAG_ALLRSETS) 1772 return (0); 1773 if (strcmp(filter->pfrt_anchor, kt->pfrkt_anchor)) 1774 return (1); 1775 return (0); 1776 } 1777 1778 static void 1779 pfr_insert_ktables(struct pfr_ktableworkq *workq) 1780 { 1781 struct pfr_ktable *p; 1782 1783 SLIST_FOREACH(p, workq, pfrkt_workq) 1784 pfr_insert_ktable(p); 1785 } 1786 1787 static void 1788 pfr_insert_ktable(struct pfr_ktable *kt) 1789 { 1790 1791 PF_RULES_WASSERT(); 1792 1793 RB_INSERT(pfr_ktablehead, &V_pfr_ktables, kt); 1794 V_pfr_ktable_cnt++; 1795 if (kt->pfrkt_root != NULL) 1796 if (!kt->pfrkt_root->pfrkt_refcnt[PFR_REFCNT_ANCHOR]++) 1797 pfr_setflags_ktable(kt->pfrkt_root, 1798 kt->pfrkt_root->pfrkt_flags|PFR_TFLAG_REFDANCHOR); 1799 } 1800 1801 static void 1802 pfr_setflags_ktables(struct pfr_ktableworkq *workq) 1803 { 1804 struct pfr_ktable *p, *q; 1805 1806 for (p = SLIST_FIRST(workq); p; p = q) { 1807 q = SLIST_NEXT(p, pfrkt_workq); 1808 pfr_setflags_ktable(p, p->pfrkt_nflags); 1809 } 1810 } 1811 1812 static void 1813 pfr_setflags_ktable(struct pfr_ktable *kt, int newf) 1814 { 1815 struct pfr_kentryworkq addrq; 1816 1817 PF_RULES_WASSERT(); 1818 1819 if (!(newf & PFR_TFLAG_REFERENCED) && 1820 !(newf & PFR_TFLAG_REFDANCHOR) && 1821 !(newf & PFR_TFLAG_PERSIST)) 1822 newf &= ~PFR_TFLAG_ACTIVE; 1823 if (!(newf & PFR_TFLAG_ACTIVE)) 1824 newf &= ~PFR_TFLAG_USRMASK; 1825 if (!(newf & PFR_TFLAG_SETMASK)) { 1826 RB_REMOVE(pfr_ktablehead, &V_pfr_ktables, kt); 1827 if (kt->pfrkt_root != NULL) 1828 if (!--kt->pfrkt_root->pfrkt_refcnt[PFR_REFCNT_ANCHOR]) 1829 pfr_setflags_ktable(kt->pfrkt_root, 1830 kt->pfrkt_root->pfrkt_flags & 1831 ~PFR_TFLAG_REFDANCHOR); 1832 pfr_destroy_ktable(kt, 1); 1833 V_pfr_ktable_cnt--; 1834 return; 1835 } 1836 if (!(newf & PFR_TFLAG_ACTIVE) && kt->pfrkt_cnt) { 1837 pfr_enqueue_addrs(kt, &addrq, NULL, 0); 1838 pfr_remove_kentries(kt, &addrq); 1839 } 1840 if (!(newf & PFR_TFLAG_INACTIVE) && kt->pfrkt_shadow != NULL) { 1841 pfr_destroy_ktable(kt->pfrkt_shadow, 1); 1842 kt->pfrkt_shadow = NULL; 1843 } 1844 kt->pfrkt_flags = newf; 1845 } 1846 1847 static void 1848 pfr_clstats_ktables(struct pfr_ktableworkq *workq, long tzero, int recurse) 1849 { 1850 struct pfr_ktable *p; 1851 1852 SLIST_FOREACH(p, workq, pfrkt_workq) 1853 pfr_clstats_ktable(p, tzero, recurse); 1854 } 1855 1856 static void 1857 pfr_clstats_ktable(struct pfr_ktable *kt, long tzero, int recurse) 1858 { 1859 struct pfr_kentryworkq addrq; 1860 int pfr_dir, pfr_op; 1861 1862 if (recurse) { 1863 pfr_enqueue_addrs(kt, &addrq, NULL, 0); 1864 pfr_clstats_kentries(kt, &addrq, tzero, 0); 1865 } 1866 for (pfr_dir = 0; pfr_dir < PFR_DIR_MAX; pfr_dir ++) { 1867 for (pfr_op = 0; pfr_op < PFR_OP_TABLE_MAX; pfr_op ++) { 1868 counter_u64_zero(kt->pfrkt_packets[pfr_dir][pfr_op]); 1869 counter_u64_zero(kt->pfrkt_bytes[pfr_dir][pfr_op]); 1870 } 1871 } 1872 counter_u64_zero(kt->pfrkt_match); 1873 counter_u64_zero(kt->pfrkt_nomatch); 1874 kt->pfrkt_tzero = tzero; 1875 } 1876 1877 static struct pfr_ktable * 1878 pfr_create_ktable(struct pfr_table *tbl, long tzero, int attachruleset) 1879 { 1880 struct pfr_ktable *kt; 1881 struct pf_ruleset *rs; 1882 int pfr_dir, pfr_op; 1883 1884 PF_RULES_WASSERT(); 1885 1886 kt = malloc(sizeof(*kt), M_PFTABLE, M_NOWAIT|M_ZERO); 1887 if (kt == NULL) 1888 return (NULL); 1889 kt->pfrkt_t = *tbl; 1890 1891 if (attachruleset) { 1892 rs = pf_find_or_create_ruleset(tbl->pfrt_anchor); 1893 if (!rs) { 1894 pfr_destroy_ktable(kt, 0); 1895 return (NULL); 1896 } 1897 kt->pfrkt_rs = rs; 1898 rs->tables++; 1899 } 1900 1901 for (pfr_dir = 0; pfr_dir < PFR_DIR_MAX; pfr_dir ++) { 1902 for (pfr_op = 0; pfr_op < PFR_OP_TABLE_MAX; pfr_op ++) { 1903 kt->pfrkt_packets[pfr_dir][pfr_op] = 1904 counter_u64_alloc(M_NOWAIT); 1905 if (! kt->pfrkt_packets[pfr_dir][pfr_op]) { 1906 pfr_destroy_ktable(kt, 0); 1907 return (NULL); 1908 } 1909 kt->pfrkt_bytes[pfr_dir][pfr_op] = 1910 counter_u64_alloc(M_NOWAIT); 1911 if (! kt->pfrkt_bytes[pfr_dir][pfr_op]) { 1912 pfr_destroy_ktable(kt, 0); 1913 return (NULL); 1914 } 1915 } 1916 } 1917 kt->pfrkt_match = counter_u64_alloc(M_NOWAIT); 1918 if (! kt->pfrkt_match) { 1919 pfr_destroy_ktable(kt, 0); 1920 return (NULL); 1921 } 1922 1923 kt->pfrkt_nomatch = counter_u64_alloc(M_NOWAIT); 1924 if (! kt->pfrkt_nomatch) { 1925 pfr_destroy_ktable(kt, 0); 1926 return (NULL); 1927 } 1928 1929 if (!rn_inithead((void **)&kt->pfrkt_ip4, 1930 offsetof(struct sockaddr_in, sin_addr) * 8) || 1931 !rn_inithead((void **)&kt->pfrkt_ip6, 1932 offsetof(struct sockaddr_in6, sin6_addr) * 8)) { 1933 pfr_destroy_ktable(kt, 0); 1934 return (NULL); 1935 } 1936 kt->pfrkt_tzero = tzero; 1937 1938 return (kt); 1939 } 1940 1941 static void 1942 pfr_destroy_ktables(struct pfr_ktableworkq *workq, int flushaddr) 1943 { 1944 struct pfr_ktable *p, *q; 1945 1946 for (p = SLIST_FIRST(workq); p; p = q) { 1947 q = SLIST_NEXT(p, pfrkt_workq); 1948 pfr_destroy_ktable(p, flushaddr); 1949 } 1950 } 1951 1952 static void 1953 pfr_destroy_ktable(struct pfr_ktable *kt, int flushaddr) 1954 { 1955 struct pfr_kentryworkq addrq; 1956 int pfr_dir, pfr_op; 1957 1958 if (flushaddr) { 1959 pfr_enqueue_addrs(kt, &addrq, NULL, 0); 1960 pfr_clean_node_mask(kt, &addrq); 1961 pfr_destroy_kentries(&addrq); 1962 } 1963 if (kt->pfrkt_ip4 != NULL) 1964 rn_detachhead((void **)&kt->pfrkt_ip4); 1965 if (kt->pfrkt_ip6 != NULL) 1966 rn_detachhead((void **)&kt->pfrkt_ip6); 1967 if (kt->pfrkt_shadow != NULL) 1968 pfr_destroy_ktable(kt->pfrkt_shadow, flushaddr); 1969 if (kt->pfrkt_rs != NULL) { 1970 kt->pfrkt_rs->tables--; 1971 pf_remove_if_empty_ruleset(kt->pfrkt_rs); 1972 } 1973 for (pfr_dir = 0; pfr_dir < PFR_DIR_MAX; pfr_dir ++) { 1974 for (pfr_op = 0; pfr_op < PFR_OP_TABLE_MAX; pfr_op ++) { 1975 counter_u64_free(kt->pfrkt_packets[pfr_dir][pfr_op]); 1976 counter_u64_free(kt->pfrkt_bytes[pfr_dir][pfr_op]); 1977 } 1978 } 1979 counter_u64_free(kt->pfrkt_match); 1980 counter_u64_free(kt->pfrkt_nomatch); 1981 1982 free(kt, M_PFTABLE); 1983 } 1984 1985 static int 1986 pfr_ktable_compare(struct pfr_ktable *p, struct pfr_ktable *q) 1987 { 1988 int d; 1989 1990 if ((d = strncmp(p->pfrkt_name, q->pfrkt_name, PF_TABLE_NAME_SIZE))) 1991 return (d); 1992 return (strcmp(p->pfrkt_anchor, q->pfrkt_anchor)); 1993 } 1994 1995 static struct pfr_ktable * 1996 pfr_lookup_table(struct pfr_table *tbl) 1997 { 1998 /* struct pfr_ktable start like a struct pfr_table */ 1999 return (RB_FIND(pfr_ktablehead, &V_pfr_ktables, 2000 (struct pfr_ktable *)tbl)); 2001 } 2002 2003 int 2004 pfr_match_addr(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af) 2005 { 2006 struct pfr_kentry *ke = NULL; 2007 int match; 2008 2009 PF_RULES_RASSERT(); 2010 2011 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL) 2012 kt = kt->pfrkt_root; 2013 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 2014 return (0); 2015 2016 switch (af) { 2017 #ifdef INET 2018 case AF_INET: 2019 { 2020 struct sockaddr_in sin; 2021 2022 bzero(&sin, sizeof(sin)); 2023 sin.sin_len = sizeof(sin); 2024 sin.sin_family = AF_INET; 2025 sin.sin_addr.s_addr = a->addr32[0]; 2026 ke = (struct pfr_kentry *)rn_match(&sin, &kt->pfrkt_ip4->rh); 2027 if (ke && KENTRY_RNF_ROOT(ke)) 2028 ke = NULL; 2029 break; 2030 } 2031 #endif /* INET */ 2032 #ifdef INET6 2033 case AF_INET6: 2034 { 2035 struct sockaddr_in6 sin6; 2036 2037 bzero(&sin6, sizeof(sin6)); 2038 sin6.sin6_len = sizeof(sin6); 2039 sin6.sin6_family = AF_INET6; 2040 bcopy(a, &sin6.sin6_addr, sizeof(sin6.sin6_addr)); 2041 ke = (struct pfr_kentry *)rn_match(&sin6, &kt->pfrkt_ip6->rh); 2042 if (ke && KENTRY_RNF_ROOT(ke)) 2043 ke = NULL; 2044 break; 2045 } 2046 #endif /* INET6 */ 2047 } 2048 match = (ke && !ke->pfrke_not); 2049 if (match) 2050 counter_u64_add(kt->pfrkt_match, 1); 2051 else 2052 counter_u64_add(kt->pfrkt_nomatch, 1); 2053 return (match); 2054 } 2055 2056 void 2057 pfr_update_stats(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af, 2058 u_int64_t len, int dir_out, int op_pass, int notrule) 2059 { 2060 struct pfr_kentry *ke = NULL; 2061 2062 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL) 2063 kt = kt->pfrkt_root; 2064 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 2065 return; 2066 2067 switch (af) { 2068 #ifdef INET 2069 case AF_INET: 2070 { 2071 struct sockaddr_in sin; 2072 2073 bzero(&sin, sizeof(sin)); 2074 sin.sin_len = sizeof(sin); 2075 sin.sin_family = AF_INET; 2076 sin.sin_addr.s_addr = a->addr32[0]; 2077 ke = (struct pfr_kentry *)rn_match(&sin, &kt->pfrkt_ip4->rh); 2078 if (ke && KENTRY_RNF_ROOT(ke)) 2079 ke = NULL; 2080 break; 2081 } 2082 #endif /* INET */ 2083 #ifdef INET6 2084 case AF_INET6: 2085 { 2086 struct sockaddr_in6 sin6; 2087 2088 bzero(&sin6, sizeof(sin6)); 2089 sin6.sin6_len = sizeof(sin6); 2090 sin6.sin6_family = AF_INET6; 2091 bcopy(a, &sin6.sin6_addr, sizeof(sin6.sin6_addr)); 2092 ke = (struct pfr_kentry *)rn_match(&sin6, &kt->pfrkt_ip6->rh); 2093 if (ke && KENTRY_RNF_ROOT(ke)) 2094 ke = NULL; 2095 break; 2096 } 2097 #endif /* INET6 */ 2098 default: 2099 panic("%s: unknown address family %u", __func__, af); 2100 } 2101 if ((ke == NULL || ke->pfrke_not) != notrule) { 2102 if (op_pass != PFR_OP_PASS) 2103 DPFPRINTF(PF_DEBUG_URGENT, 2104 ("pfr_update_stats: assertion failed.\n")); 2105 op_pass = PFR_OP_XPASS; 2106 } 2107 counter_u64_add(kt->pfrkt_packets[dir_out][op_pass], 1); 2108 counter_u64_add(kt->pfrkt_bytes[dir_out][op_pass], len); 2109 if (ke != NULL && op_pass != PFR_OP_XPASS && 2110 (kt->pfrkt_flags & PFR_TFLAG_COUNTERS)) { 2111 counter_u64_add(pfr_kentry_counter(&ke->pfrke_counters, 2112 dir_out, op_pass, PFR_TYPE_PACKETS), 1); 2113 counter_u64_add(pfr_kentry_counter(&ke->pfrke_counters, 2114 dir_out, op_pass, PFR_TYPE_BYTES), len); 2115 } 2116 } 2117 2118 struct pfr_ktable * 2119 pfr_attach_table(struct pf_ruleset *rs, char *name) 2120 { 2121 struct pfr_ktable *kt, *rt; 2122 struct pfr_table tbl; 2123 struct pf_anchor *ac = rs->anchor; 2124 2125 PF_RULES_WASSERT(); 2126 2127 bzero(&tbl, sizeof(tbl)); 2128 strlcpy(tbl.pfrt_name, name, sizeof(tbl.pfrt_name)); 2129 if (ac != NULL) 2130 strlcpy(tbl.pfrt_anchor, ac->path, sizeof(tbl.pfrt_anchor)); 2131 kt = pfr_lookup_table(&tbl); 2132 if (kt == NULL) { 2133 kt = pfr_create_ktable(&tbl, time_second, 1); 2134 if (kt == NULL) 2135 return (NULL); 2136 if (ac != NULL) { 2137 bzero(tbl.pfrt_anchor, sizeof(tbl.pfrt_anchor)); 2138 rt = pfr_lookup_table(&tbl); 2139 if (rt == NULL) { 2140 rt = pfr_create_ktable(&tbl, 0, 1); 2141 if (rt == NULL) { 2142 pfr_destroy_ktable(kt, 0); 2143 return (NULL); 2144 } 2145 pfr_insert_ktable(rt); 2146 } 2147 kt->pfrkt_root = rt; 2148 } 2149 pfr_insert_ktable(kt); 2150 } 2151 if (!kt->pfrkt_refcnt[PFR_REFCNT_RULE]++) 2152 pfr_setflags_ktable(kt, kt->pfrkt_flags|PFR_TFLAG_REFERENCED); 2153 return (kt); 2154 } 2155 2156 void 2157 pfr_detach_table(struct pfr_ktable *kt) 2158 { 2159 2160 PF_RULES_WASSERT(); 2161 KASSERT(kt->pfrkt_refcnt[PFR_REFCNT_RULE] > 0, ("%s: refcount %d\n", 2162 __func__, kt->pfrkt_refcnt[PFR_REFCNT_RULE])); 2163 2164 if (!--kt->pfrkt_refcnt[PFR_REFCNT_RULE]) 2165 pfr_setflags_ktable(kt, kt->pfrkt_flags&~PFR_TFLAG_REFERENCED); 2166 } 2167 2168 int 2169 pfr_pool_get(struct pfr_ktable *kt, int *pidx, struct pf_addr *counter, 2170 sa_family_t af) 2171 { 2172 struct pf_addr *addr, *cur, *mask; 2173 union sockaddr_union uaddr, umask; 2174 struct pfr_kentry *ke, *ke2 = NULL; 2175 int idx = -1, use_counter = 0; 2176 2177 switch (af) { 2178 case AF_INET: 2179 uaddr.sin.sin_len = sizeof(struct sockaddr_in); 2180 uaddr.sin.sin_family = AF_INET; 2181 break; 2182 case AF_INET6: 2183 uaddr.sin6.sin6_len = sizeof(struct sockaddr_in6); 2184 uaddr.sin6.sin6_family = AF_INET6; 2185 break; 2186 } 2187 addr = SUNION2PF(&uaddr, af); 2188 2189 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL) 2190 kt = kt->pfrkt_root; 2191 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) 2192 return (-1); 2193 2194 if (pidx != NULL) 2195 idx = *pidx; 2196 if (counter != NULL && idx >= 0) 2197 use_counter = 1; 2198 if (idx < 0) 2199 idx = 0; 2200 2201 _next_block: 2202 ke = pfr_kentry_byidx(kt, idx, af); 2203 if (ke == NULL) { 2204 counter_u64_add(kt->pfrkt_nomatch, 1); 2205 return (1); 2206 } 2207 pfr_prepare_network(&umask, af, ke->pfrke_net); 2208 cur = SUNION2PF(&ke->pfrke_sa, af); 2209 mask = SUNION2PF(&umask, af); 2210 2211 if (use_counter) { 2212 /* is supplied address within block? */ 2213 if (!PF_MATCHA(0, cur, mask, counter, af)) { 2214 /* no, go to next block in table */ 2215 idx++; 2216 use_counter = 0; 2217 goto _next_block; 2218 } 2219 PF_ACPY(addr, counter, af); 2220 } else { 2221 /* use first address of block */ 2222 PF_ACPY(addr, cur, af); 2223 } 2224 2225 if (!KENTRY_NETWORK(ke)) { 2226 /* this is a single IP address - no possible nested block */ 2227 PF_ACPY(counter, addr, af); 2228 *pidx = idx; 2229 counter_u64_add(kt->pfrkt_match, 1); 2230 return (0); 2231 } 2232 for (;;) { 2233 /* we don't want to use a nested block */ 2234 switch (af) { 2235 case AF_INET: 2236 ke2 = (struct pfr_kentry *)rn_match(&uaddr, 2237 &kt->pfrkt_ip4->rh); 2238 break; 2239 case AF_INET6: 2240 ke2 = (struct pfr_kentry *)rn_match(&uaddr, 2241 &kt->pfrkt_ip6->rh); 2242 break; 2243 } 2244 /* no need to check KENTRY_RNF_ROOT() here */ 2245 if (ke2 == ke) { 2246 /* lookup return the same block - perfect */ 2247 PF_ACPY(counter, addr, af); 2248 *pidx = idx; 2249 counter_u64_add(kt->pfrkt_match, 1); 2250 return (0); 2251 } 2252 2253 /* we need to increase the counter past the nested block */ 2254 pfr_prepare_network(&umask, AF_INET, ke2->pfrke_net); 2255 PF_POOLMASK(addr, addr, SUNION2PF(&umask, af), &pfr_ffaddr, af); 2256 PF_AINC(addr, af); 2257 if (!PF_MATCHA(0, cur, mask, addr, af)) { 2258 /* ok, we reached the end of our main block */ 2259 /* go to next block in table */ 2260 idx++; 2261 use_counter = 0; 2262 goto _next_block; 2263 } 2264 } 2265 } 2266 2267 static struct pfr_kentry * 2268 pfr_kentry_byidx(struct pfr_ktable *kt, int idx, int af) 2269 { 2270 struct pfr_walktree w; 2271 2272 bzero(&w, sizeof(w)); 2273 w.pfrw_op = PFRW_POOL_GET; 2274 w.pfrw_cnt = idx; 2275 2276 switch (af) { 2277 #ifdef INET 2278 case AF_INET: 2279 kt->pfrkt_ip4->rnh_walktree(&kt->pfrkt_ip4->rh, pfr_walktree, &w); 2280 return (w.pfrw_kentry); 2281 #endif /* INET */ 2282 #ifdef INET6 2283 case AF_INET6: 2284 kt->pfrkt_ip6->rnh_walktree(&kt->pfrkt_ip6->rh, pfr_walktree, &w); 2285 return (w.pfrw_kentry); 2286 #endif /* INET6 */ 2287 default: 2288 return (NULL); 2289 } 2290 } 2291 2292 void 2293 pfr_dynaddr_update(struct pfr_ktable *kt, struct pfi_dynaddr *dyn) 2294 { 2295 struct pfr_walktree w; 2296 2297 bzero(&w, sizeof(w)); 2298 w.pfrw_op = PFRW_DYNADDR_UPDATE; 2299 w.pfrw_dyn = dyn; 2300 2301 dyn->pfid_acnt4 = 0; 2302 dyn->pfid_acnt6 = 0; 2303 if (!dyn->pfid_af || dyn->pfid_af == AF_INET) 2304 kt->pfrkt_ip4->rnh_walktree(&kt->pfrkt_ip4->rh, pfr_walktree, &w); 2305 if (!dyn->pfid_af || dyn->pfid_af == AF_INET6) 2306 kt->pfrkt_ip6->rnh_walktree(&kt->pfrkt_ip6->rh, pfr_walktree, &w); 2307 } 2308