1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2001 Daniel Hartmeier 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * - Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * - Redistributions in binary form must reproduce the above 14 * copyright notice, this list of conditions and the following 15 * disclaimer in the documentation and/or other materials provided 16 * with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 21 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 22 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 24 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 25 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 26 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN 28 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 * 31 * $OpenBSD: pfvar.h,v 1.282 2009/01/29 15:12:28 pyr Exp $ 32 * $FreeBSD$ 33 */ 34 35 #ifndef _NET_PFVAR_H_ 36 #define _NET_PFVAR_H_ 37 38 #include <sys/param.h> 39 #include <sys/queue.h> 40 #include <sys/counter.h> 41 #include <sys/cpuset.h> 42 #include <sys/malloc.h> 43 #include <sys/nv.h> 44 #include <sys/refcount.h> 45 #include <sys/sdt.h> 46 #include <sys/sysctl.h> 47 #include <sys/lock.h> 48 #include <sys/rmlock.h> 49 #include <sys/tree.h> 50 #include <vm/uma.h> 51 52 #include <net/radix.h> 53 #include <netinet/in.h> 54 #ifdef _KERNEL 55 #include <netinet/ip.h> 56 #include <netinet/tcp.h> 57 #include <netinet/udp.h> 58 #include <netinet/ip_icmp.h> 59 #include <netinet/icmp6.h> 60 #endif 61 62 #include <netpfil/pf/pf.h> 63 #include <netpfil/pf/pf_altq.h> 64 #include <netpfil/pf/pf_mtag.h> 65 66 #ifdef _KERNEL 67 68 SYSCTL_DECL(_net_pf); 69 MALLOC_DECLARE(M_PFHASH); 70 71 SDT_PROVIDER_DECLARE(pf); 72 73 struct pfi_dynaddr { 74 TAILQ_ENTRY(pfi_dynaddr) entry; 75 struct pf_addr pfid_addr4; 76 struct pf_addr pfid_mask4; 77 struct pf_addr pfid_addr6; 78 struct pf_addr pfid_mask6; 79 struct pfr_ktable *pfid_kt; 80 struct pfi_kkif *pfid_kif; 81 int pfid_net; /* mask or 128 */ 82 int pfid_acnt4; /* address count IPv4 */ 83 int pfid_acnt6; /* address count IPv6 */ 84 sa_family_t pfid_af; /* rule af */ 85 u_int8_t pfid_iflags; /* PFI_AFLAG_* */ 86 }; 87 88 /* 89 * Address manipulation macros 90 */ 91 #define HTONL(x) (x) = htonl((__uint32_t)(x)) 92 #define HTONS(x) (x) = htons((__uint16_t)(x)) 93 #define NTOHL(x) (x) = ntohl((__uint32_t)(x)) 94 #define NTOHS(x) (x) = ntohs((__uint16_t)(x)) 95 96 #define PF_NAME "pf" 97 98 #define PF_HASHROW_ASSERT(h) mtx_assert(&(h)->lock, MA_OWNED) 99 #define PF_HASHROW_LOCK(h) mtx_lock(&(h)->lock) 100 #define PF_HASHROW_UNLOCK(h) mtx_unlock(&(h)->lock) 101 102 #define PF_STATE_LOCK(s) \ 103 do { \ 104 struct pf_idhash *_ih = &V_pf_idhash[PF_IDHASH(s)]; \ 105 PF_HASHROW_LOCK(_ih); \ 106 } while (0) 107 108 #define PF_STATE_UNLOCK(s) \ 109 do { \ 110 struct pf_idhash *_ih = &V_pf_idhash[PF_IDHASH((s))]; \ 111 PF_HASHROW_UNLOCK(_ih); \ 112 } while (0) 113 114 #ifdef INVARIANTS 115 #define PF_STATE_LOCK_ASSERT(s) \ 116 do { \ 117 struct pf_idhash *_ih = &V_pf_idhash[PF_IDHASH(s)]; \ 118 PF_HASHROW_ASSERT(_ih); \ 119 } while (0) 120 #else /* !INVARIANTS */ 121 #define PF_STATE_LOCK_ASSERT(s) do {} while (0) 122 #endif /* INVARIANTS */ 123 124 extern struct mtx_padalign pf_unlnkdrules_mtx; 125 #define PF_UNLNKDRULES_LOCK() mtx_lock(&pf_unlnkdrules_mtx) 126 #define PF_UNLNKDRULES_UNLOCK() mtx_unlock(&pf_unlnkdrules_mtx) 127 128 extern struct rmlock pf_rules_lock; 129 #define PF_RULES_RLOCK_TRACKER struct rm_priotracker _pf_rules_tracker 130 #define PF_RULES_RLOCK() rm_rlock(&pf_rules_lock, &_pf_rules_tracker) 131 #define PF_RULES_RUNLOCK() rm_runlock(&pf_rules_lock, &_pf_rules_tracker) 132 #define PF_RULES_WLOCK() rm_wlock(&pf_rules_lock) 133 #define PF_RULES_WUNLOCK() rm_wunlock(&pf_rules_lock) 134 #define PF_RULES_WOWNED() rm_wowned(&pf_rules_lock) 135 #define PF_RULES_ASSERT() rm_assert(&pf_rules_lock, RA_LOCKED) 136 #define PF_RULES_RASSERT() rm_assert(&pf_rules_lock, RA_RLOCKED) 137 #define PF_RULES_WASSERT() rm_assert(&pf_rules_lock, RA_WLOCKED) 138 139 extern struct mtx_padalign pf_table_stats_lock; 140 #define PF_TABLE_STATS_LOCK() mtx_lock(&pf_table_stats_lock) 141 #define PF_TABLE_STATS_UNLOCK() mtx_unlock(&pf_table_stats_lock) 142 #define PF_TABLE_STATS_OWNED() mtx_owned(&pf_table_stats_lock) 143 #define PF_TABLE_STATS_ASSERT() mtx_assert(&pf_rules_lock, MA_OWNED) 144 145 extern struct sx pf_end_lock; 146 147 #define PF_MODVER 1 148 #define PFLOG_MODVER 1 149 #define PFSYNC_MODVER 1 150 151 #define PFLOG_MINVER 1 152 #define PFLOG_PREFVER PFLOG_MODVER 153 #define PFLOG_MAXVER 1 154 #define PFSYNC_MINVER 1 155 #define PFSYNC_PREFVER PFSYNC_MODVER 156 #define PFSYNC_MAXVER 1 157 158 #ifdef INET 159 #ifndef INET6 160 #define PF_INET_ONLY 161 #endif /* ! INET6 */ 162 #endif /* INET */ 163 164 #ifdef INET6 165 #ifndef INET 166 #define PF_INET6_ONLY 167 #endif /* ! INET */ 168 #endif /* INET6 */ 169 170 #ifdef INET 171 #ifdef INET6 172 #define PF_INET_INET6 173 #endif /* INET6 */ 174 #endif /* INET */ 175 176 #else 177 178 #define PF_INET_INET6 179 180 #endif /* _KERNEL */ 181 182 /* Both IPv4 and IPv6 */ 183 #ifdef PF_INET_INET6 184 185 #define PF_AEQ(a, b, c) \ 186 ((c == AF_INET && (a)->addr32[0] == (b)->addr32[0]) || \ 187 (c == AF_INET6 && (a)->addr32[3] == (b)->addr32[3] && \ 188 (a)->addr32[2] == (b)->addr32[2] && \ 189 (a)->addr32[1] == (b)->addr32[1] && \ 190 (a)->addr32[0] == (b)->addr32[0])) \ 191 192 #define PF_ANEQ(a, b, c) \ 193 ((c == AF_INET && (a)->addr32[0] != (b)->addr32[0]) || \ 194 (c == AF_INET6 && ((a)->addr32[0] != (b)->addr32[0] || \ 195 (a)->addr32[1] != (b)->addr32[1] || \ 196 (a)->addr32[2] != (b)->addr32[2] || \ 197 (a)->addr32[3] != (b)->addr32[3]))) \ 198 199 #define PF_AZERO(a, c) \ 200 ((c == AF_INET && !(a)->addr32[0]) || \ 201 (c == AF_INET6 && !(a)->addr32[0] && !(a)->addr32[1] && \ 202 !(a)->addr32[2] && !(a)->addr32[3] )) \ 203 204 #define PF_MATCHA(n, a, m, b, f) \ 205 pf_match_addr(n, a, m, b, f) 206 207 #define PF_ACPY(a, b, f) \ 208 pf_addrcpy(a, b, f) 209 210 #define PF_AINC(a, f) \ 211 pf_addr_inc(a, f) 212 213 #define PF_POOLMASK(a, b, c, d, f) \ 214 pf_poolmask(a, b, c, d, f) 215 216 #else 217 218 /* Just IPv6 */ 219 220 #ifdef PF_INET6_ONLY 221 222 #define PF_AEQ(a, b, c) \ 223 ((a)->addr32[3] == (b)->addr32[3] && \ 224 (a)->addr32[2] == (b)->addr32[2] && \ 225 (a)->addr32[1] == (b)->addr32[1] && \ 226 (a)->addr32[0] == (b)->addr32[0]) \ 227 228 #define PF_ANEQ(a, b, c) \ 229 ((a)->addr32[3] != (b)->addr32[3] || \ 230 (a)->addr32[2] != (b)->addr32[2] || \ 231 (a)->addr32[1] != (b)->addr32[1] || \ 232 (a)->addr32[0] != (b)->addr32[0]) \ 233 234 #define PF_AZERO(a, c) \ 235 (!(a)->addr32[0] && \ 236 !(a)->addr32[1] && \ 237 !(a)->addr32[2] && \ 238 !(a)->addr32[3] ) \ 239 240 #define PF_MATCHA(n, a, m, b, f) \ 241 pf_match_addr(n, a, m, b, f) 242 243 #define PF_ACPY(a, b, f) \ 244 pf_addrcpy(a, b, f) 245 246 #define PF_AINC(a, f) \ 247 pf_addr_inc(a, f) 248 249 #define PF_POOLMASK(a, b, c, d, f) \ 250 pf_poolmask(a, b, c, d, f) 251 252 #else 253 254 /* Just IPv4 */ 255 #ifdef PF_INET_ONLY 256 257 #define PF_AEQ(a, b, c) \ 258 ((a)->addr32[0] == (b)->addr32[0]) 259 260 #define PF_ANEQ(a, b, c) \ 261 ((a)->addr32[0] != (b)->addr32[0]) 262 263 #define PF_AZERO(a, c) \ 264 (!(a)->addr32[0]) 265 266 #define PF_MATCHA(n, a, m, b, f) \ 267 pf_match_addr(n, a, m, b, f) 268 269 #define PF_ACPY(a, b, f) \ 270 (a)->v4.s_addr = (b)->v4.s_addr 271 272 #define PF_AINC(a, f) \ 273 do { \ 274 (a)->addr32[0] = htonl(ntohl((a)->addr32[0]) + 1); \ 275 } while (0) 276 277 #define PF_POOLMASK(a, b, c, d, f) \ 278 do { \ 279 (a)->addr32[0] = ((b)->addr32[0] & (c)->addr32[0]) | \ 280 (((c)->addr32[0] ^ 0xffffffff ) & (d)->addr32[0]); \ 281 } while (0) 282 283 #endif /* PF_INET_ONLY */ 284 #endif /* PF_INET6_ONLY */ 285 #endif /* PF_INET_INET6 */ 286 287 /* 288 * XXX callers not FIB-aware in our version of pf yet. 289 * OpenBSD fixed it later it seems, 2010/05/07 13:33:16 claudio. 290 */ 291 #define PF_MISMATCHAW(aw, x, af, neg, ifp, rtid) \ 292 ( \ 293 (((aw)->type == PF_ADDR_NOROUTE && \ 294 pf_routable((x), (af), NULL, (rtid))) || \ 295 (((aw)->type == PF_ADDR_URPFFAILED && (ifp) != NULL && \ 296 pf_routable((x), (af), (ifp), (rtid))) || \ 297 ((aw)->type == PF_ADDR_TABLE && \ 298 !pfr_match_addr((aw)->p.tbl, (x), (af))) || \ 299 ((aw)->type == PF_ADDR_DYNIFTL && \ 300 !pfi_match_addr((aw)->p.dyn, (x), (af))) || \ 301 ((aw)->type == PF_ADDR_RANGE && \ 302 !pf_match_addr_range(&(aw)->v.a.addr, \ 303 &(aw)->v.a.mask, (x), (af))) || \ 304 ((aw)->type == PF_ADDR_ADDRMASK && \ 305 !PF_AZERO(&(aw)->v.a.mask, (af)) && \ 306 !PF_MATCHA(0, &(aw)->v.a.addr, \ 307 &(aw)->v.a.mask, (x), (af))))) != \ 308 (neg) \ 309 ) 310 311 #define PF_ALGNMNT(off) (((off) % 2) == 0) 312 313 #ifdef _KERNEL 314 315 struct pf_kpooladdr { 316 struct pf_addr_wrap addr; 317 TAILQ_ENTRY(pf_kpooladdr) entries; 318 char ifname[IFNAMSIZ]; 319 struct pfi_kkif *kif; 320 }; 321 322 TAILQ_HEAD(pf_kpalist, pf_kpooladdr); 323 324 struct pf_kpool { 325 struct pf_kpalist list; 326 struct pf_kpooladdr *cur; 327 struct pf_poolhashkey key; 328 struct pf_addr counter; 329 struct pf_mape_portset mape; 330 int tblidx; 331 u_int16_t proxy_port[2]; 332 u_int8_t opts; 333 }; 334 335 union pf_krule_ptr { 336 struct pf_krule *ptr; 337 u_int32_t nr; 338 }; 339 340 struct pf_krule { 341 struct pf_rule_addr src; 342 struct pf_rule_addr dst; 343 union pf_krule_ptr skip[PF_SKIP_COUNT]; 344 char label[PF_RULE_MAX_LABEL_COUNT][PF_RULE_LABEL_SIZE]; 345 char ifname[IFNAMSIZ]; 346 char qname[PF_QNAME_SIZE]; 347 char pqname[PF_QNAME_SIZE]; 348 char tagname[PF_TAG_NAME_SIZE]; 349 char match_tagname[PF_TAG_NAME_SIZE]; 350 351 char overload_tblname[PF_TABLE_NAME_SIZE]; 352 353 TAILQ_ENTRY(pf_krule) entries; 354 struct pf_kpool rpool; 355 356 counter_u64_t evaluations; 357 counter_u64_t packets[2]; 358 counter_u64_t bytes[2]; 359 360 struct pfi_kkif *kif; 361 struct pf_kanchor *anchor; 362 struct pfr_ktable *overload_tbl; 363 364 pf_osfp_t os_fingerprint; 365 366 int rtableid; 367 u_int32_t timeout[PFTM_MAX]; 368 u_int32_t max_states; 369 u_int32_t max_src_nodes; 370 u_int32_t max_src_states; 371 u_int32_t max_src_conn; 372 struct { 373 u_int32_t limit; 374 u_int32_t seconds; 375 } max_src_conn_rate; 376 u_int32_t qid; 377 u_int32_t pqid; 378 u_int32_t nr; 379 u_int32_t prob; 380 uid_t cuid; 381 pid_t cpid; 382 383 counter_u64_t states_cur; 384 counter_u64_t states_tot; 385 counter_u64_t src_nodes; 386 387 u_int16_t return_icmp; 388 u_int16_t return_icmp6; 389 u_int16_t max_mss; 390 u_int16_t tag; 391 u_int16_t match_tag; 392 u_int16_t scrub_flags; 393 394 struct pf_rule_uid uid; 395 struct pf_rule_gid gid; 396 397 u_int32_t rule_flag; 398 uint32_t rule_ref; 399 u_int8_t action; 400 u_int8_t direction; 401 u_int8_t log; 402 u_int8_t logif; 403 u_int8_t quick; 404 u_int8_t ifnot; 405 u_int8_t match_tag_not; 406 u_int8_t natpass; 407 408 u_int8_t keep_state; 409 sa_family_t af; 410 u_int8_t proto; 411 u_int8_t type; 412 u_int8_t code; 413 u_int8_t flags; 414 u_int8_t flagset; 415 u_int8_t min_ttl; 416 u_int8_t allow_opts; 417 u_int8_t rt; 418 u_int8_t return_ttl; 419 u_int8_t tos; 420 u_int8_t set_tos; 421 u_int8_t anchor_relative; 422 u_int8_t anchor_wildcard; 423 424 u_int8_t flush; 425 u_int8_t prio; 426 u_int8_t set_prio[2]; 427 428 struct { 429 struct pf_addr addr; 430 u_int16_t port; 431 } divert; 432 }; 433 434 struct pf_ksrc_node { 435 LIST_ENTRY(pf_ksrc_node) entry; 436 struct pf_addr addr; 437 struct pf_addr raddr; 438 union pf_krule_ptr rule; 439 struct pfi_kkif *kif; 440 counter_u64_t bytes[2]; 441 counter_u64_t packets[2]; 442 u_int32_t states; 443 u_int32_t conn; 444 struct pf_threshold conn_rate; 445 u_int32_t creation; 446 u_int32_t expire; 447 sa_family_t af; 448 u_int8_t ruletype; 449 }; 450 #endif 451 452 struct pf_state_scrub { 453 struct timeval pfss_last; /* time received last packet */ 454 u_int32_t pfss_tsecr; /* last echoed timestamp */ 455 u_int32_t pfss_tsval; /* largest timestamp */ 456 u_int32_t pfss_tsval0; /* original timestamp */ 457 u_int16_t pfss_flags; 458 #define PFSS_TIMESTAMP 0x0001 /* modulate timestamp */ 459 #define PFSS_PAWS 0x0010 /* stricter PAWS checks */ 460 #define PFSS_PAWS_IDLED 0x0020 /* was idle too long. no PAWS */ 461 #define PFSS_DATA_TS 0x0040 /* timestamp on data packets */ 462 #define PFSS_DATA_NOTS 0x0080 /* no timestamp on data packets */ 463 u_int8_t pfss_ttl; /* stashed TTL */ 464 u_int8_t pad; 465 u_int32_t pfss_ts_mod; /* timestamp modulation */ 466 }; 467 468 struct pf_state_host { 469 struct pf_addr addr; 470 u_int16_t port; 471 u_int16_t pad; 472 }; 473 474 struct pf_state_peer { 475 struct pf_state_scrub *scrub; /* state is scrubbed */ 476 u_int32_t seqlo; /* Max sequence number sent */ 477 u_int32_t seqhi; /* Max the other end ACKd + win */ 478 u_int32_t seqdiff; /* Sequence number modulator */ 479 u_int16_t max_win; /* largest window (pre scaling) */ 480 u_int16_t mss; /* Maximum segment size option */ 481 u_int8_t state; /* active state level */ 482 u_int8_t wscale; /* window scaling factor */ 483 u_int8_t tcp_est; /* Did we reach TCPS_ESTABLISHED */ 484 u_int8_t pad[1]; 485 }; 486 487 /* Keep synced with struct pf_state_key. */ 488 struct pf_state_key_cmp { 489 struct pf_addr addr[2]; 490 u_int16_t port[2]; 491 sa_family_t af; 492 u_int8_t proto; 493 u_int8_t pad[2]; 494 }; 495 496 struct pf_state_key { 497 struct pf_addr addr[2]; 498 u_int16_t port[2]; 499 sa_family_t af; 500 u_int8_t proto; 501 u_int8_t pad[2]; 502 503 LIST_ENTRY(pf_state_key) entry; 504 TAILQ_HEAD(, pf_state) states[2]; 505 }; 506 507 /* Keep synced with struct pf_state. */ 508 struct pf_state_cmp { 509 u_int64_t id; 510 u_int32_t creatorid; 511 u_int8_t direction; 512 u_int8_t pad[3]; 513 }; 514 515 #define PFSTATE_ALLOWOPTS 0x01 516 #define PFSTATE_SLOPPY 0x02 517 /* was PFSTATE_PFLOW 0x04 */ 518 #define PFSTATE_NOSYNC 0x08 519 #define PFSTATE_ACK 0x10 520 #define PFSTATE_SETPRIO 0x0200 521 #define PFSTATE_SETMASK (PFSTATE_SETPRIO) 522 523 #ifdef _KERNEL 524 struct pf_state { 525 u_int64_t id; 526 u_int32_t creatorid; 527 u_int8_t direction; 528 u_int8_t pad[3]; 529 530 u_int refs; 531 TAILQ_ENTRY(pf_state) sync_list; 532 TAILQ_ENTRY(pf_state) key_list[2]; 533 LIST_ENTRY(pf_state) entry; 534 struct pf_state_peer src; 535 struct pf_state_peer dst; 536 union pf_krule_ptr rule; 537 union pf_krule_ptr anchor; 538 union pf_krule_ptr nat_rule; 539 struct pf_addr rt_addr; 540 struct pf_state_key *key[2]; /* addresses stack and wire */ 541 struct pfi_kkif *kif; 542 struct pfi_kkif *orig_kif; /* The real kif, even if we're a floating state (i.e. if == V_pfi_all). */ 543 struct pfi_kkif *rt_kif; 544 struct pf_ksrc_node *src_node; 545 struct pf_ksrc_node *nat_src_node; 546 u_int64_t packets[2]; 547 u_int64_t bytes[2]; 548 u_int32_t creation; 549 u_int32_t expire; 550 u_int32_t pfsync_time; 551 u_int16_t tag; 552 u_int8_t log; 553 u_int8_t state_flags; 554 u_int8_t timeout; 555 u_int8_t sync_state; /* PFSYNC_S_x */ 556 557 /* XXX */ 558 u_int8_t sync_updates; 559 u_int8_t _tail[3]; 560 }; 561 562 /* 563 * Size <= fits 13 objects per page on LP64. Try to not grow the struct beyond that. 564 */ 565 _Static_assert(sizeof(struct pf_state) <= 312, "pf_state size crosses 312 bytes"); 566 #endif 567 568 /* 569 * Unified state structures for pulling states out of the kernel 570 * used by pfsync(4) and the pf(4) ioctl. 571 */ 572 struct pfsync_state_scrub { 573 u_int16_t pfss_flags; 574 u_int8_t pfss_ttl; /* stashed TTL */ 575 #define PFSYNC_SCRUB_FLAG_VALID 0x01 576 u_int8_t scrub_flag; 577 u_int32_t pfss_ts_mod; /* timestamp modulation */ 578 } __packed; 579 580 struct pfsync_state_peer { 581 struct pfsync_state_scrub scrub; /* state is scrubbed */ 582 u_int32_t seqlo; /* Max sequence number sent */ 583 u_int32_t seqhi; /* Max the other end ACKd + win */ 584 u_int32_t seqdiff; /* Sequence number modulator */ 585 u_int16_t max_win; /* largest window (pre scaling) */ 586 u_int16_t mss; /* Maximum segment size option */ 587 u_int8_t state; /* active state level */ 588 u_int8_t wscale; /* window scaling factor */ 589 u_int8_t pad[6]; 590 } __packed; 591 592 struct pfsync_state_key { 593 struct pf_addr addr[2]; 594 u_int16_t port[2]; 595 }; 596 597 struct pfsync_state { 598 u_int64_t id; 599 char ifname[IFNAMSIZ]; 600 struct pfsync_state_key key[2]; 601 struct pfsync_state_peer src; 602 struct pfsync_state_peer dst; 603 struct pf_addr rt_addr; 604 u_int32_t rule; 605 u_int32_t anchor; 606 u_int32_t nat_rule; 607 u_int32_t creation; 608 u_int32_t expire; 609 u_int32_t packets[2][2]; 610 u_int32_t bytes[2][2]; 611 u_int32_t creatorid; 612 sa_family_t af; 613 u_int8_t proto; 614 u_int8_t direction; 615 u_int8_t __spare[2]; 616 u_int8_t log; 617 u_int8_t state_flags; 618 u_int8_t timeout; 619 u_int8_t sync_flags; 620 u_int8_t updates; 621 } __packed; 622 623 #ifdef _KERNEL 624 /* pfsync */ 625 typedef int pfsync_state_import_t(struct pfsync_state *, u_int8_t); 626 typedef void pfsync_insert_state_t(struct pf_state *); 627 typedef void pfsync_update_state_t(struct pf_state *); 628 typedef void pfsync_delete_state_t(struct pf_state *); 629 typedef void pfsync_clear_states_t(u_int32_t, const char *); 630 typedef int pfsync_defer_t(struct pf_state *, struct mbuf *); 631 typedef void pfsync_detach_ifnet_t(struct ifnet *); 632 633 VNET_DECLARE(pfsync_state_import_t *, pfsync_state_import_ptr); 634 #define V_pfsync_state_import_ptr VNET(pfsync_state_import_ptr) 635 VNET_DECLARE(pfsync_insert_state_t *, pfsync_insert_state_ptr); 636 #define V_pfsync_insert_state_ptr VNET(pfsync_insert_state_ptr) 637 VNET_DECLARE(pfsync_update_state_t *, pfsync_update_state_ptr); 638 #define V_pfsync_update_state_ptr VNET(pfsync_update_state_ptr) 639 VNET_DECLARE(pfsync_delete_state_t *, pfsync_delete_state_ptr); 640 #define V_pfsync_delete_state_ptr VNET(pfsync_delete_state_ptr) 641 VNET_DECLARE(pfsync_clear_states_t *, pfsync_clear_states_ptr); 642 #define V_pfsync_clear_states_ptr VNET(pfsync_clear_states_ptr) 643 VNET_DECLARE(pfsync_defer_t *, pfsync_defer_ptr); 644 #define V_pfsync_defer_ptr VNET(pfsync_defer_ptr) 645 extern pfsync_detach_ifnet_t *pfsync_detach_ifnet_ptr; 646 647 void pfsync_state_export(struct pfsync_state *, 648 struct pf_state *); 649 650 /* pflog */ 651 struct pf_kruleset; 652 struct pf_pdesc; 653 typedef int pflog_packet_t(struct pfi_kkif *, struct mbuf *, sa_family_t, 654 u_int8_t, u_int8_t, struct pf_krule *, struct pf_krule *, 655 struct pf_kruleset *, struct pf_pdesc *, int); 656 extern pflog_packet_t *pflog_packet_ptr; 657 658 #endif /* _KERNEL */ 659 660 #define PFSYNC_FLAG_SRCNODE 0x04 661 #define PFSYNC_FLAG_NATSRCNODE 0x08 662 663 /* for copies to/from network byte order */ 664 /* ioctl interface also uses network byte order */ 665 #define pf_state_peer_hton(s,d) do { \ 666 (d)->seqlo = htonl((s)->seqlo); \ 667 (d)->seqhi = htonl((s)->seqhi); \ 668 (d)->seqdiff = htonl((s)->seqdiff); \ 669 (d)->max_win = htons((s)->max_win); \ 670 (d)->mss = htons((s)->mss); \ 671 (d)->state = (s)->state; \ 672 (d)->wscale = (s)->wscale; \ 673 if ((s)->scrub) { \ 674 (d)->scrub.pfss_flags = \ 675 htons((s)->scrub->pfss_flags & PFSS_TIMESTAMP); \ 676 (d)->scrub.pfss_ttl = (s)->scrub->pfss_ttl; \ 677 (d)->scrub.pfss_ts_mod = htonl((s)->scrub->pfss_ts_mod);\ 678 (d)->scrub.scrub_flag = PFSYNC_SCRUB_FLAG_VALID; \ 679 } \ 680 } while (0) 681 682 #define pf_state_peer_ntoh(s,d) do { \ 683 (d)->seqlo = ntohl((s)->seqlo); \ 684 (d)->seqhi = ntohl((s)->seqhi); \ 685 (d)->seqdiff = ntohl((s)->seqdiff); \ 686 (d)->max_win = ntohs((s)->max_win); \ 687 (d)->mss = ntohs((s)->mss); \ 688 (d)->state = (s)->state; \ 689 (d)->wscale = (s)->wscale; \ 690 if ((s)->scrub.scrub_flag == PFSYNC_SCRUB_FLAG_VALID && \ 691 (d)->scrub != NULL) { \ 692 (d)->scrub->pfss_flags = \ 693 ntohs((s)->scrub.pfss_flags) & PFSS_TIMESTAMP; \ 694 (d)->scrub->pfss_ttl = (s)->scrub.pfss_ttl; \ 695 (d)->scrub->pfss_ts_mod = ntohl((s)->scrub.pfss_ts_mod);\ 696 } \ 697 } while (0) 698 699 #define pf_state_counter_hton(s,d) do { \ 700 d[0] = htonl((s>>32)&0xffffffff); \ 701 d[1] = htonl(s&0xffffffff); \ 702 } while (0) 703 704 #define pf_state_counter_from_pfsync(s) \ 705 (((u_int64_t)(s[0])<<32) | (u_int64_t)(s[1])) 706 707 #define pf_state_counter_ntoh(s,d) do { \ 708 d = ntohl(s[0]); \ 709 d = d<<32; \ 710 d += ntohl(s[1]); \ 711 } while (0) 712 713 TAILQ_HEAD(pf_krulequeue, pf_krule); 714 715 struct pf_kanchor; 716 717 struct pf_kruleset { 718 struct { 719 struct pf_krulequeue queues[2]; 720 struct { 721 struct pf_krulequeue *ptr; 722 struct pf_krule **ptr_array; 723 u_int32_t rcount; 724 u_int32_t ticket; 725 int open; 726 } active, inactive; 727 } rules[PF_RULESET_MAX]; 728 struct pf_kanchor *anchor; 729 u_int32_t tticket; 730 int tables; 731 int topen; 732 }; 733 734 RB_HEAD(pf_kanchor_global, pf_kanchor); 735 RB_HEAD(pf_kanchor_node, pf_kanchor); 736 struct pf_kanchor { 737 RB_ENTRY(pf_kanchor) entry_global; 738 RB_ENTRY(pf_kanchor) entry_node; 739 struct pf_kanchor *parent; 740 struct pf_kanchor_node children; 741 char name[PF_ANCHOR_NAME_SIZE]; 742 char path[MAXPATHLEN]; 743 struct pf_kruleset ruleset; 744 int refcnt; /* anchor rules */ 745 int match; /* XXX: used for pfctl black magic */ 746 }; 747 RB_PROTOTYPE(pf_kanchor_global, pf_kanchor, entry_global, pf_anchor_compare); 748 RB_PROTOTYPE(pf_kanchor_node, pf_kanchor, entry_node, pf_kanchor_compare); 749 750 #define PF_RESERVED_ANCHOR "_pf" 751 752 #define PFR_TFLAG_PERSIST 0x00000001 753 #define PFR_TFLAG_CONST 0x00000002 754 #define PFR_TFLAG_ACTIVE 0x00000004 755 #define PFR_TFLAG_INACTIVE 0x00000008 756 #define PFR_TFLAG_REFERENCED 0x00000010 757 #define PFR_TFLAG_REFDANCHOR 0x00000020 758 #define PFR_TFLAG_COUNTERS 0x00000040 759 /* Adjust masks below when adding flags. */ 760 #define PFR_TFLAG_USRMASK (PFR_TFLAG_PERSIST | \ 761 PFR_TFLAG_CONST | \ 762 PFR_TFLAG_COUNTERS) 763 #define PFR_TFLAG_SETMASK (PFR_TFLAG_ACTIVE | \ 764 PFR_TFLAG_INACTIVE | \ 765 PFR_TFLAG_REFERENCED | \ 766 PFR_TFLAG_REFDANCHOR) 767 #define PFR_TFLAG_ALLMASK (PFR_TFLAG_PERSIST | \ 768 PFR_TFLAG_CONST | \ 769 PFR_TFLAG_ACTIVE | \ 770 PFR_TFLAG_INACTIVE | \ 771 PFR_TFLAG_REFERENCED | \ 772 PFR_TFLAG_REFDANCHOR | \ 773 PFR_TFLAG_COUNTERS) 774 775 struct pf_kanchor_stackframe; 776 777 struct pfr_table { 778 char pfrt_anchor[MAXPATHLEN]; 779 char pfrt_name[PF_TABLE_NAME_SIZE]; 780 u_int32_t pfrt_flags; 781 u_int8_t pfrt_fback; 782 }; 783 784 enum { PFR_FB_NONE, PFR_FB_MATCH, PFR_FB_ADDED, PFR_FB_DELETED, 785 PFR_FB_CHANGED, PFR_FB_CLEARED, PFR_FB_DUPLICATE, 786 PFR_FB_NOTMATCH, PFR_FB_CONFLICT, PFR_FB_NOCOUNT, PFR_FB_MAX }; 787 788 struct pfr_addr { 789 union { 790 struct in_addr _pfra_ip4addr; 791 struct in6_addr _pfra_ip6addr; 792 } pfra_u; 793 u_int8_t pfra_af; 794 u_int8_t pfra_net; 795 u_int8_t pfra_not; 796 u_int8_t pfra_fback; 797 }; 798 #define pfra_ip4addr pfra_u._pfra_ip4addr 799 #define pfra_ip6addr pfra_u._pfra_ip6addr 800 801 enum { PFR_DIR_IN, PFR_DIR_OUT, PFR_DIR_MAX }; 802 enum { PFR_OP_BLOCK, PFR_OP_PASS, PFR_OP_ADDR_MAX, PFR_OP_TABLE_MAX }; 803 enum { PFR_TYPE_PACKETS, PFR_TYPE_BYTES, PFR_TYPE_MAX }; 804 #define PFR_NUM_COUNTERS (PFR_DIR_MAX * PFR_OP_ADDR_MAX * PFR_TYPE_MAX) 805 #define PFR_OP_XPASS PFR_OP_ADDR_MAX 806 807 struct pfr_astats { 808 struct pfr_addr pfras_a; 809 u_int64_t pfras_packets[PFR_DIR_MAX][PFR_OP_ADDR_MAX]; 810 u_int64_t pfras_bytes[PFR_DIR_MAX][PFR_OP_ADDR_MAX]; 811 long pfras_tzero; 812 }; 813 814 enum { PFR_REFCNT_RULE, PFR_REFCNT_ANCHOR, PFR_REFCNT_MAX }; 815 816 struct pfr_tstats { 817 struct pfr_table pfrts_t; 818 u_int64_t pfrts_packets[PFR_DIR_MAX][PFR_OP_TABLE_MAX]; 819 u_int64_t pfrts_bytes[PFR_DIR_MAX][PFR_OP_TABLE_MAX]; 820 u_int64_t pfrts_match; 821 u_int64_t pfrts_nomatch; 822 long pfrts_tzero; 823 int pfrts_cnt; 824 int pfrts_refcnt[PFR_REFCNT_MAX]; 825 }; 826 827 #ifdef _KERNEL 828 829 struct pfr_kstate_counter { 830 counter_u64_t pkc_pcpu; 831 u_int64_t pkc_zero; 832 }; 833 834 static inline int 835 pfr_kstate_counter_init(struct pfr_kstate_counter *pfrc, int flags) 836 { 837 838 pfrc->pkc_zero = 0; 839 pfrc->pkc_pcpu = counter_u64_alloc(flags); 840 if (pfrc->pkc_pcpu == NULL) 841 return (ENOMEM); 842 return (0); 843 } 844 845 static inline void 846 pfr_kstate_counter_deinit(struct pfr_kstate_counter *pfrc) 847 { 848 849 counter_u64_free(pfrc->pkc_pcpu); 850 } 851 852 static inline u_int64_t 853 pfr_kstate_counter_fetch(struct pfr_kstate_counter *pfrc) 854 { 855 u_int64_t c; 856 857 c = counter_u64_fetch(pfrc->pkc_pcpu); 858 c -= pfrc->pkc_zero; 859 return (c); 860 } 861 862 static inline void 863 pfr_kstate_counter_zero(struct pfr_kstate_counter *pfrc) 864 { 865 u_int64_t c; 866 867 c = counter_u64_fetch(pfrc->pkc_pcpu); 868 pfrc->pkc_zero = c; 869 } 870 871 static inline void 872 pfr_kstate_counter_add(struct pfr_kstate_counter *pfrc, int64_t n) 873 { 874 875 counter_u64_add(pfrc->pkc_pcpu, n); 876 } 877 878 struct pfr_ktstats { 879 struct pfr_table pfrts_t; 880 struct pfr_kstate_counter pfrkts_packets[PFR_DIR_MAX][PFR_OP_TABLE_MAX]; 881 struct pfr_kstate_counter pfrkts_bytes[PFR_DIR_MAX][PFR_OP_TABLE_MAX]; 882 struct pfr_kstate_counter pfrkts_match; 883 struct pfr_kstate_counter pfrkts_nomatch; 884 long pfrkts_tzero; 885 int pfrkts_cnt; 886 int pfrkts_refcnt[PFR_REFCNT_MAX]; 887 }; 888 889 #endif /* _KERNEL */ 890 891 #define pfrts_name pfrts_t.pfrt_name 892 #define pfrts_flags pfrts_t.pfrt_flags 893 894 #ifndef _SOCKADDR_UNION_DEFINED 895 #define _SOCKADDR_UNION_DEFINED 896 union sockaddr_union { 897 struct sockaddr sa; 898 struct sockaddr_in sin; 899 struct sockaddr_in6 sin6; 900 }; 901 #endif /* _SOCKADDR_UNION_DEFINED */ 902 903 struct pfr_kcounters { 904 counter_u64_t pfrkc_counters; 905 long pfrkc_tzero; 906 }; 907 #define pfr_kentry_counter(kc, dir, op, t) \ 908 ((kc)->pfrkc_counters + \ 909 (dir) * PFR_OP_ADDR_MAX * PFR_TYPE_MAX + (op) * PFR_TYPE_MAX + (t)) 910 911 #ifdef _KERNEL 912 SLIST_HEAD(pfr_kentryworkq, pfr_kentry); 913 struct pfr_kentry { 914 struct radix_node pfrke_node[2]; 915 union sockaddr_union pfrke_sa; 916 SLIST_ENTRY(pfr_kentry) pfrke_workq; 917 struct pfr_kcounters pfrke_counters; 918 u_int8_t pfrke_af; 919 u_int8_t pfrke_net; 920 u_int8_t pfrke_not; 921 u_int8_t pfrke_mark; 922 }; 923 924 SLIST_HEAD(pfr_ktableworkq, pfr_ktable); 925 RB_HEAD(pfr_ktablehead, pfr_ktable); 926 struct pfr_ktable { 927 struct pfr_ktstats pfrkt_kts; 928 RB_ENTRY(pfr_ktable) pfrkt_tree; 929 SLIST_ENTRY(pfr_ktable) pfrkt_workq; 930 struct radix_node_head *pfrkt_ip4; 931 struct radix_node_head *pfrkt_ip6; 932 struct pfr_ktable *pfrkt_shadow; 933 struct pfr_ktable *pfrkt_root; 934 struct pf_kruleset *pfrkt_rs; 935 long pfrkt_larg; 936 int pfrkt_nflags; 937 }; 938 #define pfrkt_t pfrkt_kts.pfrts_t 939 #define pfrkt_name pfrkt_t.pfrt_name 940 #define pfrkt_anchor pfrkt_t.pfrt_anchor 941 #define pfrkt_ruleset pfrkt_t.pfrt_ruleset 942 #define pfrkt_flags pfrkt_t.pfrt_flags 943 #define pfrkt_cnt pfrkt_kts.pfrkts_cnt 944 #define pfrkt_refcnt pfrkt_kts.pfrkts_refcnt 945 #define pfrkt_packets pfrkt_kts.pfrkts_packets 946 #define pfrkt_bytes pfrkt_kts.pfrkts_bytes 947 #define pfrkt_match pfrkt_kts.pfrkts_match 948 #define pfrkt_nomatch pfrkt_kts.pfrkts_nomatch 949 #define pfrkt_tzero pfrkt_kts.pfrkts_tzero 950 #endif 951 952 #ifdef _KERNEL 953 struct pfi_kkif { 954 char pfik_name[IFNAMSIZ]; 955 union { 956 RB_ENTRY(pfi_kkif) _pfik_tree; 957 LIST_ENTRY(pfi_kkif) _pfik_list; 958 } _pfik_glue; 959 #define pfik_tree _pfik_glue._pfik_tree 960 #define pfik_list _pfik_glue._pfik_list 961 counter_u64_t pfik_packets[2][2][2]; 962 counter_u64_t pfik_bytes[2][2][2]; 963 u_int32_t pfik_tzero; 964 u_int pfik_flags; 965 struct ifnet *pfik_ifp; 966 struct ifg_group *pfik_group; 967 u_int pfik_rulerefs; 968 TAILQ_HEAD(, pfi_dynaddr) pfik_dynaddrs; 969 }; 970 #endif 971 972 #define PFI_IFLAG_REFS 0x0001 /* has state references */ 973 #define PFI_IFLAG_SKIP 0x0100 /* skip filtering on interface */ 974 975 #ifdef _KERNEL 976 struct pf_pdesc { 977 struct { 978 int done; 979 uid_t uid; 980 gid_t gid; 981 } lookup; 982 u_int64_t tot_len; /* Make Mickey money */ 983 union pf_headers { 984 struct tcphdr tcp; 985 struct udphdr udp; 986 struct icmp icmp; 987 #ifdef INET6 988 struct icmp6_hdr icmp6; 989 #endif /* INET6 */ 990 char any[0]; 991 } hdr; 992 993 struct pf_krule *nat_rule; /* nat/rdr rule applied to packet */ 994 struct pf_addr *src; /* src address */ 995 struct pf_addr *dst; /* dst address */ 996 u_int16_t *sport; 997 u_int16_t *dport; 998 struct pf_mtag *pf_mtag; 999 1000 u_int32_t p_len; /* total length of payload */ 1001 1002 u_int16_t *ip_sum; 1003 u_int16_t *proto_sum; 1004 u_int16_t flags; /* Let SCRUB trigger behavior in 1005 * state code. Easier than tags */ 1006 #define PFDESC_TCP_NORM 0x0001 /* TCP shall be statefully scrubbed */ 1007 #define PFDESC_IP_REAS 0x0002 /* IP frags would've been reassembled */ 1008 sa_family_t af; 1009 u_int8_t proto; 1010 u_int8_t tos; 1011 u_int8_t dir; /* direction */ 1012 u_int8_t sidx; /* key index for source */ 1013 u_int8_t didx; /* key index for destination */ 1014 }; 1015 #endif 1016 1017 /* flags for RDR options */ 1018 #define PF_DPORT_RANGE 0x01 /* Dest port uses range */ 1019 #define PF_RPORT_RANGE 0x02 /* RDR'ed port uses range */ 1020 1021 /* UDP state enumeration */ 1022 #define PFUDPS_NO_TRAFFIC 0 1023 #define PFUDPS_SINGLE 1 1024 #define PFUDPS_MULTIPLE 2 1025 1026 #define PFUDPS_NSTATES 3 /* number of state levels */ 1027 1028 #define PFUDPS_NAMES { \ 1029 "NO_TRAFFIC", \ 1030 "SINGLE", \ 1031 "MULTIPLE", \ 1032 NULL \ 1033 } 1034 1035 /* Other protocol state enumeration */ 1036 #define PFOTHERS_NO_TRAFFIC 0 1037 #define PFOTHERS_SINGLE 1 1038 #define PFOTHERS_MULTIPLE 2 1039 1040 #define PFOTHERS_NSTATES 3 /* number of state levels */ 1041 1042 #define PFOTHERS_NAMES { \ 1043 "NO_TRAFFIC", \ 1044 "SINGLE", \ 1045 "MULTIPLE", \ 1046 NULL \ 1047 } 1048 1049 #define ACTION_SET(a, x) \ 1050 do { \ 1051 if ((a) != NULL) \ 1052 *(a) = (x); \ 1053 } while (0) 1054 1055 #define REASON_SET(a, x) \ 1056 do { \ 1057 if ((a) != NULL) \ 1058 *(a) = (x); \ 1059 if (x < PFRES_MAX) \ 1060 counter_u64_add(V_pf_status.counters[x], 1); \ 1061 } while (0) 1062 1063 struct pf_kstatus { 1064 counter_u64_t counters[PFRES_MAX]; /* reason for passing/dropping */ 1065 counter_u64_t lcounters[LCNT_MAX]; /* limit counters */ 1066 counter_u64_t fcounters[FCNT_MAX]; /* state operation counters */ 1067 counter_u64_t scounters[SCNT_MAX]; /* src_node operation counters */ 1068 uint32_t states; 1069 uint32_t src_nodes; 1070 uint32_t running; 1071 uint32_t since; 1072 uint32_t debug; 1073 uint32_t hostid; 1074 char ifname[IFNAMSIZ]; 1075 uint8_t pf_chksum[PF_MD5_DIGEST_LENGTH]; 1076 bool keep_counters; 1077 }; 1078 1079 struct pf_divert { 1080 union { 1081 struct in_addr ipv4; 1082 struct in6_addr ipv6; 1083 } addr; 1084 u_int16_t port; 1085 }; 1086 1087 #define PFFRAG_FRENT_HIWAT 5000 /* Number of fragment entries */ 1088 #define PFR_KENTRY_HIWAT 200000 /* Number of table entries */ 1089 1090 /* 1091 * Limit the length of the fragment queue traversal. Remember 1092 * search entry points based on the fragment offset. 1093 */ 1094 #define PF_FRAG_ENTRY_POINTS 16 1095 1096 /* 1097 * The number of entries in the fragment queue must be limited 1098 * to avoid DoS by linear seaching. Instead of a global limit, 1099 * use a limit per entry point. For large packets these sum up. 1100 */ 1101 #define PF_FRAG_ENTRY_LIMIT 64 1102 1103 /* 1104 * ioctl parameter structures 1105 */ 1106 1107 struct pfioc_pooladdr { 1108 u_int32_t action; 1109 u_int32_t ticket; 1110 u_int32_t nr; 1111 u_int32_t r_num; 1112 u_int8_t r_action; 1113 u_int8_t r_last; 1114 u_int8_t af; 1115 char anchor[MAXPATHLEN]; 1116 struct pf_pooladdr addr; 1117 }; 1118 1119 struct pfioc_rule { 1120 u_int32_t action; 1121 u_int32_t ticket; 1122 u_int32_t pool_ticket; 1123 u_int32_t nr; 1124 char anchor[MAXPATHLEN]; 1125 char anchor_call[MAXPATHLEN]; 1126 struct pf_rule rule; 1127 }; 1128 1129 struct pfioc_natlook { 1130 struct pf_addr saddr; 1131 struct pf_addr daddr; 1132 struct pf_addr rsaddr; 1133 struct pf_addr rdaddr; 1134 u_int16_t sport; 1135 u_int16_t dport; 1136 u_int16_t rsport; 1137 u_int16_t rdport; 1138 sa_family_t af; 1139 u_int8_t proto; 1140 u_int8_t direction; 1141 }; 1142 1143 struct pfioc_state { 1144 struct pfsync_state state; 1145 }; 1146 1147 struct pfioc_src_node_kill { 1148 sa_family_t psnk_af; 1149 struct pf_rule_addr psnk_src; 1150 struct pf_rule_addr psnk_dst; 1151 u_int psnk_killed; 1152 }; 1153 1154 #ifdef _KERNEL 1155 struct pf_kstate_kill { 1156 struct pf_state_cmp psk_pfcmp; 1157 sa_family_t psk_af; 1158 int psk_proto; 1159 struct pf_rule_addr psk_src; 1160 struct pf_rule_addr psk_dst; 1161 struct pf_rule_addr psk_rt_addr; 1162 char psk_ifname[IFNAMSIZ]; 1163 char psk_label[PF_RULE_LABEL_SIZE]; 1164 u_int psk_killed; 1165 bool psk_kill_match; 1166 }; 1167 #endif 1168 1169 struct pfioc_state_kill { 1170 struct pf_state_cmp psk_pfcmp; 1171 sa_family_t psk_af; 1172 int psk_proto; 1173 struct pf_rule_addr psk_src; 1174 struct pf_rule_addr psk_dst; 1175 char psk_ifname[IFNAMSIZ]; 1176 char psk_label[PF_RULE_LABEL_SIZE]; 1177 u_int psk_killed; 1178 }; 1179 1180 struct pfioc_states { 1181 int ps_len; 1182 union { 1183 caddr_t psu_buf; 1184 struct pfsync_state *psu_states; 1185 } ps_u; 1186 #define ps_buf ps_u.psu_buf 1187 #define ps_states ps_u.psu_states 1188 }; 1189 1190 struct pfioc_src_nodes { 1191 int psn_len; 1192 union { 1193 caddr_t psu_buf; 1194 struct pf_src_node *psu_src_nodes; 1195 } psn_u; 1196 #define psn_buf psn_u.psu_buf 1197 #define psn_src_nodes psn_u.psu_src_nodes 1198 }; 1199 1200 struct pfioc_if { 1201 char ifname[IFNAMSIZ]; 1202 }; 1203 1204 struct pfioc_tm { 1205 int timeout; 1206 int seconds; 1207 }; 1208 1209 struct pfioc_limit { 1210 int index; 1211 unsigned limit; 1212 }; 1213 1214 struct pfioc_altq_v0 { 1215 u_int32_t action; 1216 u_int32_t ticket; 1217 u_int32_t nr; 1218 struct pf_altq_v0 altq; 1219 }; 1220 1221 struct pfioc_altq_v1 { 1222 u_int32_t action; 1223 u_int32_t ticket; 1224 u_int32_t nr; 1225 /* 1226 * Placed here so code that only uses the above parameters can be 1227 * written entirely in terms of the v0 or v1 type. 1228 */ 1229 u_int32_t version; 1230 struct pf_altq_v1 altq; 1231 }; 1232 1233 /* 1234 * Latest version of struct pfioc_altq_vX. This must move in lock-step with 1235 * the latest version of struct pf_altq_vX as it has that struct as a 1236 * member. 1237 */ 1238 #define PFIOC_ALTQ_VERSION PF_ALTQ_VERSION 1239 1240 struct pfioc_qstats_v0 { 1241 u_int32_t ticket; 1242 u_int32_t nr; 1243 void *buf; 1244 int nbytes; 1245 u_int8_t scheduler; 1246 }; 1247 1248 struct pfioc_qstats_v1 { 1249 u_int32_t ticket; 1250 u_int32_t nr; 1251 void *buf; 1252 int nbytes; 1253 u_int8_t scheduler; 1254 /* 1255 * Placed here so code that only uses the above parameters can be 1256 * written entirely in terms of the v0 or v1 type. 1257 */ 1258 u_int32_t version; /* Requested version of stats struct */ 1259 }; 1260 1261 /* Latest version of struct pfioc_qstats_vX */ 1262 #define PFIOC_QSTATS_VERSION 1 1263 1264 struct pfioc_ruleset { 1265 u_int32_t nr; 1266 char path[MAXPATHLEN]; 1267 char name[PF_ANCHOR_NAME_SIZE]; 1268 }; 1269 1270 #define PF_RULESET_ALTQ (PF_RULESET_MAX) 1271 #define PF_RULESET_TABLE (PF_RULESET_MAX+1) 1272 struct pfioc_trans { 1273 int size; /* number of elements */ 1274 int esize; /* size of each element in bytes */ 1275 struct pfioc_trans_e { 1276 int rs_num; 1277 char anchor[MAXPATHLEN]; 1278 u_int32_t ticket; 1279 } *array; 1280 }; 1281 1282 #define PFR_FLAG_ATOMIC 0x00000001 /* unused */ 1283 #define PFR_FLAG_DUMMY 0x00000002 1284 #define PFR_FLAG_FEEDBACK 0x00000004 1285 #define PFR_FLAG_CLSTATS 0x00000008 1286 #define PFR_FLAG_ADDRSTOO 0x00000010 1287 #define PFR_FLAG_REPLACE 0x00000020 1288 #define PFR_FLAG_ALLRSETS 0x00000040 1289 #define PFR_FLAG_ALLMASK 0x0000007F 1290 #ifdef _KERNEL 1291 #define PFR_FLAG_USERIOCTL 0x10000000 1292 #endif 1293 1294 struct pfioc_table { 1295 struct pfr_table pfrio_table; 1296 void *pfrio_buffer; 1297 int pfrio_esize; 1298 int pfrio_size; 1299 int pfrio_size2; 1300 int pfrio_nadd; 1301 int pfrio_ndel; 1302 int pfrio_nchange; 1303 int pfrio_flags; 1304 u_int32_t pfrio_ticket; 1305 }; 1306 #define pfrio_exists pfrio_nadd 1307 #define pfrio_nzero pfrio_nadd 1308 #define pfrio_nmatch pfrio_nadd 1309 #define pfrio_naddr pfrio_size2 1310 #define pfrio_setflag pfrio_size2 1311 #define pfrio_clrflag pfrio_nadd 1312 1313 struct pfioc_iface { 1314 char pfiio_name[IFNAMSIZ]; 1315 void *pfiio_buffer; 1316 int pfiio_esize; 1317 int pfiio_size; 1318 int pfiio_nzero; 1319 int pfiio_flags; 1320 }; 1321 1322 /* 1323 * ioctl operations 1324 */ 1325 1326 #define DIOCSTART _IO ('D', 1) 1327 #define DIOCSTOP _IO ('D', 2) 1328 #define DIOCADDRULE _IOWR('D', 4, struct pfioc_rule) 1329 #define DIOCADDRULENV _IOWR('D', 4, struct pfioc_nv) 1330 #define DIOCGETRULES _IOWR('D', 6, struct pfioc_rule) 1331 #define DIOCGETRULE _IOWR('D', 7, struct pfioc_rule) 1332 #define DIOCGETRULENV _IOWR('D', 7, struct pfioc_nv) 1333 /* XXX cut 8 - 17 */ 1334 #define DIOCCLRSTATES _IOWR('D', 18, struct pfioc_state_kill) 1335 #define DIOCCLRSTATESNV _IOWR('D', 18, struct pfioc_nv) 1336 #define DIOCGETSTATE _IOWR('D', 19, struct pfioc_state) 1337 #define DIOCGETSTATENV _IOWR('D', 19, struct pfioc_nv) 1338 #define DIOCSETSTATUSIF _IOWR('D', 20, struct pfioc_if) 1339 #define DIOCGETSTATUS _IOWR('D', 21, struct pf_status) 1340 #define DIOCCLRSTATUS _IO ('D', 22) 1341 #define DIOCNATLOOK _IOWR('D', 23, struct pfioc_natlook) 1342 #define DIOCSETDEBUG _IOWR('D', 24, u_int32_t) 1343 #define DIOCGETSTATES _IOWR('D', 25, struct pfioc_states) 1344 #define DIOCGETSTATESNV _IOWR('D', 25, struct pfioc_nv) 1345 #define DIOCCHANGERULE _IOWR('D', 26, struct pfioc_rule) 1346 /* XXX cut 26 - 28 */ 1347 #define DIOCSETTIMEOUT _IOWR('D', 29, struct pfioc_tm) 1348 #define DIOCGETTIMEOUT _IOWR('D', 30, struct pfioc_tm) 1349 #define DIOCADDSTATE _IOWR('D', 37, struct pfioc_state) 1350 #define DIOCCLRRULECTRS _IO ('D', 38) 1351 #define DIOCGETLIMIT _IOWR('D', 39, struct pfioc_limit) 1352 #define DIOCSETLIMIT _IOWR('D', 40, struct pfioc_limit) 1353 #define DIOCKILLSTATES _IOWR('D', 41, struct pfioc_state_kill) 1354 #define DIOCKILLSTATESNV _IOWR('D', 41, struct pfioc_nv) 1355 #define DIOCSTARTALTQ _IO ('D', 42) 1356 #define DIOCSTOPALTQ _IO ('D', 43) 1357 #define DIOCADDALTQV0 _IOWR('D', 45, struct pfioc_altq_v0) 1358 #define DIOCADDALTQV1 _IOWR('D', 45, struct pfioc_altq_v1) 1359 #define DIOCGETALTQSV0 _IOWR('D', 47, struct pfioc_altq_v0) 1360 #define DIOCGETALTQSV1 _IOWR('D', 47, struct pfioc_altq_v1) 1361 #define DIOCGETALTQV0 _IOWR('D', 48, struct pfioc_altq_v0) 1362 #define DIOCGETALTQV1 _IOWR('D', 48, struct pfioc_altq_v1) 1363 #define DIOCCHANGEALTQV0 _IOWR('D', 49, struct pfioc_altq_v0) 1364 #define DIOCCHANGEALTQV1 _IOWR('D', 49, struct pfioc_altq_v1) 1365 #define DIOCGETQSTATSV0 _IOWR('D', 50, struct pfioc_qstats_v0) 1366 #define DIOCGETQSTATSV1 _IOWR('D', 50, struct pfioc_qstats_v1) 1367 #define DIOCBEGINADDRS _IOWR('D', 51, struct pfioc_pooladdr) 1368 #define DIOCADDADDR _IOWR('D', 52, struct pfioc_pooladdr) 1369 #define DIOCGETADDRS _IOWR('D', 53, struct pfioc_pooladdr) 1370 #define DIOCGETADDR _IOWR('D', 54, struct pfioc_pooladdr) 1371 #define DIOCCHANGEADDR _IOWR('D', 55, struct pfioc_pooladdr) 1372 /* XXX cut 55 - 57 */ 1373 #define DIOCGETRULESETS _IOWR('D', 58, struct pfioc_ruleset) 1374 #define DIOCGETRULESET _IOWR('D', 59, struct pfioc_ruleset) 1375 #define DIOCRCLRTABLES _IOWR('D', 60, struct pfioc_table) 1376 #define DIOCRADDTABLES _IOWR('D', 61, struct pfioc_table) 1377 #define DIOCRDELTABLES _IOWR('D', 62, struct pfioc_table) 1378 #define DIOCRGETTABLES _IOWR('D', 63, struct pfioc_table) 1379 #define DIOCRGETTSTATS _IOWR('D', 64, struct pfioc_table) 1380 #define DIOCRCLRTSTATS _IOWR('D', 65, struct pfioc_table) 1381 #define DIOCRCLRADDRS _IOWR('D', 66, struct pfioc_table) 1382 #define DIOCRADDADDRS _IOWR('D', 67, struct pfioc_table) 1383 #define DIOCRDELADDRS _IOWR('D', 68, struct pfioc_table) 1384 #define DIOCRSETADDRS _IOWR('D', 69, struct pfioc_table) 1385 #define DIOCRGETADDRS _IOWR('D', 70, struct pfioc_table) 1386 #define DIOCRGETASTATS _IOWR('D', 71, struct pfioc_table) 1387 #define DIOCRCLRASTATS _IOWR('D', 72, struct pfioc_table) 1388 #define DIOCRTSTADDRS _IOWR('D', 73, struct pfioc_table) 1389 #define DIOCRSETTFLAGS _IOWR('D', 74, struct pfioc_table) 1390 #define DIOCRINADEFINE _IOWR('D', 77, struct pfioc_table) 1391 #define DIOCOSFPFLUSH _IO('D', 78) 1392 #define DIOCOSFPADD _IOWR('D', 79, struct pf_osfp_ioctl) 1393 #define DIOCOSFPGET _IOWR('D', 80, struct pf_osfp_ioctl) 1394 #define DIOCXBEGIN _IOWR('D', 81, struct pfioc_trans) 1395 #define DIOCXCOMMIT _IOWR('D', 82, struct pfioc_trans) 1396 #define DIOCXROLLBACK _IOWR('D', 83, struct pfioc_trans) 1397 #define DIOCGETSRCNODES _IOWR('D', 84, struct pfioc_src_nodes) 1398 #define DIOCCLRSRCNODES _IO('D', 85) 1399 #define DIOCSETHOSTID _IOWR('D', 86, u_int32_t) 1400 #define DIOCIGETIFACES _IOWR('D', 87, struct pfioc_iface) 1401 #define DIOCSETIFFLAG _IOWR('D', 89, struct pfioc_iface) 1402 #define DIOCCLRIFFLAG _IOWR('D', 90, struct pfioc_iface) 1403 #define DIOCKILLSRCNODES _IOWR('D', 91, struct pfioc_src_node_kill) 1404 #define DIOCKEEPCOUNTERS _IOWR('D', 92, struct pfioc_nv) 1405 1406 struct pf_ifspeed_v0 { 1407 char ifname[IFNAMSIZ]; 1408 u_int32_t baudrate; 1409 }; 1410 1411 struct pf_ifspeed_v1 { 1412 char ifname[IFNAMSIZ]; 1413 u_int32_t baudrate32; 1414 /* layout identical to struct pf_ifspeed_v0 up to this point */ 1415 u_int64_t baudrate; 1416 }; 1417 1418 /* Latest version of struct pf_ifspeed_vX */ 1419 #define PF_IFSPEED_VERSION 1 1420 1421 #define DIOCGIFSPEEDV0 _IOWR('D', 92, struct pf_ifspeed_v0) 1422 #define DIOCGIFSPEEDV1 _IOWR('D', 92, struct pf_ifspeed_v1) 1423 1424 /* 1425 * Compatibility and convenience macros 1426 */ 1427 #ifndef _KERNEL 1428 #ifdef PFIOC_USE_LATEST 1429 /* 1430 * Maintaining in-tree consumers of the ioctl interface is easier when that 1431 * code can be written in terms old names that refer to the latest interface 1432 * version as that reduces the required changes in the consumers to those 1433 * that are functionally necessary to accommodate a new interface version. 1434 */ 1435 #define pfioc_altq __CONCAT(pfioc_altq_v, PFIOC_ALTQ_VERSION) 1436 #define pfioc_qstats __CONCAT(pfioc_qstats_v, PFIOC_QSTATS_VERSION) 1437 #define pf_ifspeed __CONCAT(pf_ifspeed_v, PF_IFSPEED_VERSION) 1438 1439 #define DIOCADDALTQ __CONCAT(DIOCADDALTQV, PFIOC_ALTQ_VERSION) 1440 #define DIOCGETALTQS __CONCAT(DIOCGETALTQSV, PFIOC_ALTQ_VERSION) 1441 #define DIOCGETALTQ __CONCAT(DIOCGETALTQV, PFIOC_ALTQ_VERSION) 1442 #define DIOCCHANGEALTQ __CONCAT(DIOCCHANGEALTQV, PFIOC_ALTQ_VERSION) 1443 #define DIOCGETQSTATS __CONCAT(DIOCGETQSTATSV, PFIOC_QSTATS_VERSION) 1444 #define DIOCGIFSPEED __CONCAT(DIOCGIFSPEEDV, PF_IFSPEED_VERSION) 1445 #else 1446 /* 1447 * When building out-of-tree code that is written for the old interface, 1448 * such as may exist in ports for example, resolve the old struct tags and 1449 * ioctl command names to the v0 versions. 1450 */ 1451 #define pfioc_altq __CONCAT(pfioc_altq_v, 0) 1452 #define pfioc_qstats __CONCAT(pfioc_qstats_v, 0) 1453 #define pf_ifspeed __CONCAT(pf_ifspeed_v, 0) 1454 1455 #define DIOCADDALTQ __CONCAT(DIOCADDALTQV, 0) 1456 #define DIOCGETALTQS __CONCAT(DIOCGETALTQSV, 0) 1457 #define DIOCGETALTQ __CONCAT(DIOCGETALTQV, 0) 1458 #define DIOCCHANGEALTQ __CONCAT(DIOCCHANGEALTQV, 0) 1459 #define DIOCGETQSTATS __CONCAT(DIOCGETQSTATSV, 0) 1460 #define DIOCGIFSPEED __CONCAT(DIOCGIFSPEEDV, 0) 1461 #endif /* PFIOC_USE_LATEST */ 1462 #endif /* _KERNEL */ 1463 1464 #ifdef _KERNEL 1465 LIST_HEAD(pf_ksrc_node_list, pf_ksrc_node); 1466 struct pf_srchash { 1467 struct pf_ksrc_node_list nodes; 1468 struct mtx lock; 1469 }; 1470 1471 struct pf_keyhash { 1472 LIST_HEAD(, pf_state_key) keys; 1473 struct mtx lock; 1474 }; 1475 1476 struct pf_idhash { 1477 LIST_HEAD(, pf_state) states; 1478 struct mtx lock; 1479 }; 1480 1481 extern u_long pf_hashmask; 1482 extern u_long pf_srchashmask; 1483 #define PF_HASHSIZ (131072) 1484 #define PF_SRCHASHSIZ (PF_HASHSIZ/4) 1485 VNET_DECLARE(struct pf_keyhash *, pf_keyhash); 1486 VNET_DECLARE(struct pf_idhash *, pf_idhash); 1487 #define V_pf_keyhash VNET(pf_keyhash) 1488 #define V_pf_idhash VNET(pf_idhash) 1489 VNET_DECLARE(struct pf_srchash *, pf_srchash); 1490 #define V_pf_srchash VNET(pf_srchash) 1491 1492 #define PF_IDHASH(s) (be64toh((s)->id) % (pf_hashmask + 1)) 1493 1494 VNET_DECLARE(void *, pf_swi_cookie); 1495 #define V_pf_swi_cookie VNET(pf_swi_cookie) 1496 VNET_DECLARE(struct intr_event *, pf_swi_ie); 1497 #define V_pf_swi_ie VNET(pf_swi_ie) 1498 1499 VNET_DECLARE(uint64_t, pf_stateid[MAXCPU]); 1500 #define V_pf_stateid VNET(pf_stateid) 1501 1502 TAILQ_HEAD(pf_altqqueue, pf_altq); 1503 VNET_DECLARE(struct pf_altqqueue, pf_altqs[4]); 1504 #define V_pf_altqs VNET(pf_altqs) 1505 VNET_DECLARE(struct pf_kpalist, pf_pabuf); 1506 #define V_pf_pabuf VNET(pf_pabuf) 1507 1508 VNET_DECLARE(u_int32_t, ticket_altqs_active); 1509 #define V_ticket_altqs_active VNET(ticket_altqs_active) 1510 VNET_DECLARE(u_int32_t, ticket_altqs_inactive); 1511 #define V_ticket_altqs_inactive VNET(ticket_altqs_inactive) 1512 VNET_DECLARE(int, altqs_inactive_open); 1513 #define V_altqs_inactive_open VNET(altqs_inactive_open) 1514 VNET_DECLARE(u_int32_t, ticket_pabuf); 1515 #define V_ticket_pabuf VNET(ticket_pabuf) 1516 VNET_DECLARE(struct pf_altqqueue *, pf_altqs_active); 1517 #define V_pf_altqs_active VNET(pf_altqs_active) 1518 VNET_DECLARE(struct pf_altqqueue *, pf_altq_ifs_active); 1519 #define V_pf_altq_ifs_active VNET(pf_altq_ifs_active) 1520 VNET_DECLARE(struct pf_altqqueue *, pf_altqs_inactive); 1521 #define V_pf_altqs_inactive VNET(pf_altqs_inactive) 1522 VNET_DECLARE(struct pf_altqqueue *, pf_altq_ifs_inactive); 1523 #define V_pf_altq_ifs_inactive VNET(pf_altq_ifs_inactive) 1524 1525 VNET_DECLARE(struct pf_krulequeue, pf_unlinked_rules); 1526 #define V_pf_unlinked_rules VNET(pf_unlinked_rules) 1527 1528 void pf_initialize(void); 1529 void pf_mtag_initialize(void); 1530 void pf_mtag_cleanup(void); 1531 void pf_cleanup(void); 1532 1533 struct pf_mtag *pf_get_mtag(struct mbuf *); 1534 1535 extern void pf_calc_skip_steps(struct pf_krulequeue *); 1536 #ifdef ALTQ 1537 extern void pf_altq_ifnet_event(struct ifnet *, int); 1538 #endif 1539 VNET_DECLARE(uma_zone_t, pf_state_z); 1540 #define V_pf_state_z VNET(pf_state_z) 1541 VNET_DECLARE(uma_zone_t, pf_state_key_z); 1542 #define V_pf_state_key_z VNET(pf_state_key_z) 1543 VNET_DECLARE(uma_zone_t, pf_state_scrub_z); 1544 #define V_pf_state_scrub_z VNET(pf_state_scrub_z) 1545 1546 extern void pf_purge_thread(void *); 1547 extern void pf_unload_vnet_purge(void); 1548 extern void pf_intr(void *); 1549 extern void pf_purge_expired_src_nodes(void); 1550 1551 extern int pf_unlink_state(struct pf_state *, u_int); 1552 #define PF_ENTER_LOCKED 0x00000001 1553 #define PF_RETURN_LOCKED 0x00000002 1554 extern int pf_state_insert(struct pfi_kkif *, 1555 struct pfi_kkif *, 1556 struct pf_state_key *, 1557 struct pf_state_key *, 1558 struct pf_state *); 1559 extern struct pf_state *pf_alloc_state(int); 1560 extern void pf_free_state(struct pf_state *); 1561 1562 static __inline void 1563 pf_ref_state(struct pf_state *s) 1564 { 1565 1566 refcount_acquire(&s->refs); 1567 } 1568 1569 static __inline int 1570 pf_release_state(struct pf_state *s) 1571 { 1572 1573 if (refcount_release(&s->refs)) { 1574 pf_free_state(s); 1575 return (1); 1576 } else 1577 return (0); 1578 } 1579 1580 static __inline int 1581 pf_release_staten(struct pf_state *s, u_int n) 1582 { 1583 1584 if (refcount_releasen(&s->refs, n)) { 1585 pf_free_state(s); 1586 return (1); 1587 } else 1588 return (0); 1589 } 1590 1591 extern struct pf_state *pf_find_state_byid(uint64_t, uint32_t); 1592 extern struct pf_state *pf_find_state_all(struct pf_state_key_cmp *, 1593 u_int, int *); 1594 extern struct pf_ksrc_node *pf_find_src_node(struct pf_addr *, 1595 struct pf_krule *, sa_family_t, int); 1596 extern void pf_unlink_src_node(struct pf_ksrc_node *); 1597 extern u_int pf_free_src_nodes(struct pf_ksrc_node_list *); 1598 extern void pf_print_state(struct pf_state *); 1599 extern void pf_print_flags(u_int8_t); 1600 extern u_int16_t pf_cksum_fixup(u_int16_t, u_int16_t, u_int16_t, 1601 u_int8_t); 1602 extern u_int16_t pf_proto_cksum_fixup(struct mbuf *, u_int16_t, 1603 u_int16_t, u_int16_t, u_int8_t); 1604 1605 VNET_DECLARE(struct ifnet *, sync_ifp); 1606 #define V_sync_ifp VNET(sync_ifp); 1607 VNET_DECLARE(struct pf_krule, pf_default_rule); 1608 #define V_pf_default_rule VNET(pf_default_rule) 1609 extern void pf_addrcpy(struct pf_addr *, struct pf_addr *, 1610 u_int8_t); 1611 void pf_free_rule(struct pf_krule *); 1612 1613 #ifdef INET 1614 int pf_test(int, int, struct ifnet *, struct mbuf **, struct inpcb *); 1615 int pf_normalize_ip(struct mbuf **, int, struct pfi_kkif *, u_short *, 1616 struct pf_pdesc *); 1617 #endif /* INET */ 1618 1619 #ifdef INET6 1620 int pf_test6(int, int, struct ifnet *, struct mbuf **, struct inpcb *); 1621 int pf_normalize_ip6(struct mbuf **, int, struct pfi_kkif *, u_short *, 1622 struct pf_pdesc *); 1623 void pf_poolmask(struct pf_addr *, struct pf_addr*, 1624 struct pf_addr *, struct pf_addr *, u_int8_t); 1625 void pf_addr_inc(struct pf_addr *, sa_family_t); 1626 int pf_refragment6(struct ifnet *, struct mbuf **, struct m_tag *); 1627 #endif /* INET6 */ 1628 1629 u_int32_t pf_new_isn(struct pf_state *); 1630 void *pf_pull_hdr(struct mbuf *, int, void *, int, u_short *, u_short *, 1631 sa_family_t); 1632 void pf_change_a(void *, u_int16_t *, u_int32_t, u_int8_t); 1633 void pf_change_proto_a(struct mbuf *, void *, u_int16_t *, u_int32_t, 1634 u_int8_t); 1635 void pf_change_tcp_a(struct mbuf *, void *, u_int16_t *, u_int32_t); 1636 void pf_patch_16_unaligned(struct mbuf *, u_int16_t *, void *, u_int16_t, 1637 bool, u_int8_t); 1638 void pf_patch_32_unaligned(struct mbuf *, u_int16_t *, void *, u_int32_t, 1639 bool, u_int8_t); 1640 void pf_send_deferred_syn(struct pf_state *); 1641 int pf_match_addr(u_int8_t, struct pf_addr *, struct pf_addr *, 1642 struct pf_addr *, sa_family_t); 1643 int pf_match_addr_range(struct pf_addr *, struct pf_addr *, 1644 struct pf_addr *, sa_family_t); 1645 int pf_match_port(u_int8_t, u_int16_t, u_int16_t, u_int16_t); 1646 1647 void pf_normalize_init(void); 1648 void pf_normalize_cleanup(void); 1649 int pf_normalize_tcp(int, struct pfi_kkif *, struct mbuf *, int, int, void *, 1650 struct pf_pdesc *); 1651 void pf_normalize_tcp_cleanup(struct pf_state *); 1652 int pf_normalize_tcp_init(struct mbuf *, int, struct pf_pdesc *, 1653 struct tcphdr *, struct pf_state_peer *, struct pf_state_peer *); 1654 int pf_normalize_tcp_stateful(struct mbuf *, int, struct pf_pdesc *, 1655 u_short *, struct tcphdr *, struct pf_state *, 1656 struct pf_state_peer *, struct pf_state_peer *, int *); 1657 u_int32_t 1658 pf_state_expires(const struct pf_state *); 1659 void pf_purge_expired_fragments(void); 1660 void pf_purge_fragments(uint32_t); 1661 int pf_routable(struct pf_addr *addr, sa_family_t af, struct pfi_kkif *, 1662 int); 1663 int pf_socket_lookup(int, struct pf_pdesc *, struct mbuf *); 1664 struct pf_state_key *pf_alloc_state_key(int); 1665 void pfr_initialize(void); 1666 void pfr_cleanup(void); 1667 int pfr_match_addr(struct pfr_ktable *, struct pf_addr *, sa_family_t); 1668 void pfr_update_stats(struct pfr_ktable *, struct pf_addr *, sa_family_t, 1669 u_int64_t, int, int, int); 1670 int pfr_pool_get(struct pfr_ktable *, int *, struct pf_addr *, sa_family_t); 1671 void pfr_dynaddr_update(struct pfr_ktable *, struct pfi_dynaddr *); 1672 struct pfr_ktable * 1673 pfr_attach_table(struct pf_kruleset *, char *); 1674 void pfr_detach_table(struct pfr_ktable *); 1675 int pfr_clr_tables(struct pfr_table *, int *, int); 1676 int pfr_add_tables(struct pfr_table *, int, int *, int); 1677 int pfr_del_tables(struct pfr_table *, int, int *, int); 1678 int pfr_table_count(struct pfr_table *, int); 1679 int pfr_get_tables(struct pfr_table *, struct pfr_table *, int *, int); 1680 int pfr_get_tstats(struct pfr_table *, struct pfr_tstats *, int *, int); 1681 int pfr_clr_tstats(struct pfr_table *, int, int *, int); 1682 int pfr_set_tflags(struct pfr_table *, int, int, int, int *, int *, int); 1683 int pfr_clr_addrs(struct pfr_table *, int *, int); 1684 int pfr_insert_kentry(struct pfr_ktable *, struct pfr_addr *, long); 1685 int pfr_add_addrs(struct pfr_table *, struct pfr_addr *, int, int *, 1686 int); 1687 int pfr_del_addrs(struct pfr_table *, struct pfr_addr *, int, int *, 1688 int); 1689 int pfr_set_addrs(struct pfr_table *, struct pfr_addr *, int, int *, 1690 int *, int *, int *, int, u_int32_t); 1691 int pfr_get_addrs(struct pfr_table *, struct pfr_addr *, int *, int); 1692 int pfr_get_astats(struct pfr_table *, struct pfr_astats *, int *, int); 1693 int pfr_clr_astats(struct pfr_table *, struct pfr_addr *, int, int *, 1694 int); 1695 int pfr_tst_addrs(struct pfr_table *, struct pfr_addr *, int, int *, 1696 int); 1697 int pfr_ina_begin(struct pfr_table *, u_int32_t *, int *, int); 1698 int pfr_ina_rollback(struct pfr_table *, u_int32_t, int *, int); 1699 int pfr_ina_commit(struct pfr_table *, u_int32_t, int *, int *, int); 1700 int pfr_ina_define(struct pfr_table *, struct pfr_addr *, int, int *, 1701 int *, u_int32_t, int); 1702 1703 MALLOC_DECLARE(PFI_MTYPE); 1704 VNET_DECLARE(struct pfi_kkif *, pfi_all); 1705 #define V_pfi_all VNET(pfi_all) 1706 1707 void pfi_initialize(void); 1708 void pfi_initialize_vnet(void); 1709 void pfi_cleanup(void); 1710 void pfi_cleanup_vnet(void); 1711 void pfi_kkif_ref(struct pfi_kkif *); 1712 void pfi_kkif_unref(struct pfi_kkif *); 1713 struct pfi_kkif *pfi_kkif_find(const char *); 1714 struct pfi_kkif *pfi_kkif_attach(struct pfi_kkif *, const char *); 1715 int pfi_kkif_match(struct pfi_kkif *, struct pfi_kkif *); 1716 void pfi_kkif_purge(void); 1717 int pfi_match_addr(struct pfi_dynaddr *, struct pf_addr *, 1718 sa_family_t); 1719 int pfi_dynaddr_setup(struct pf_addr_wrap *, sa_family_t); 1720 void pfi_dynaddr_remove(struct pfi_dynaddr *); 1721 void pfi_dynaddr_copyout(struct pf_addr_wrap *); 1722 void pfi_update_status(const char *, struct pf_status *); 1723 void pfi_get_ifaces(const char *, struct pfi_kif *, int *); 1724 int pfi_set_flags(const char *, int); 1725 int pfi_clear_flags(const char *, int); 1726 1727 int pf_match_tag(struct mbuf *, struct pf_krule *, int *, int); 1728 int pf_tag_packet(struct mbuf *, struct pf_pdesc *, int); 1729 int pf_addr_cmp(struct pf_addr *, struct pf_addr *, 1730 sa_family_t); 1731 void pf_qid2qname(u_int32_t, char *); 1732 1733 VNET_DECLARE(struct pf_kstatus, pf_status); 1734 #define V_pf_status VNET(pf_status) 1735 1736 struct pf_limit { 1737 uma_zone_t zone; 1738 u_int limit; 1739 }; 1740 VNET_DECLARE(struct pf_limit, pf_limits[PF_LIMIT_MAX]); 1741 #define V_pf_limits VNET(pf_limits) 1742 1743 #endif /* _KERNEL */ 1744 1745 #ifdef _KERNEL 1746 VNET_DECLARE(struct pf_kanchor_global, pf_anchors); 1747 #define V_pf_anchors VNET(pf_anchors) 1748 VNET_DECLARE(struct pf_kanchor, pf_main_anchor); 1749 #define V_pf_main_anchor VNET(pf_main_anchor) 1750 #define pf_main_ruleset V_pf_main_anchor.ruleset 1751 1752 void pf_init_kruleset(struct pf_kruleset *); 1753 int pf_kanchor_setup(struct pf_krule *, 1754 const struct pf_kruleset *, const char *); 1755 int pf_kanchor_nvcopyout(const struct pf_kruleset *, 1756 const struct pf_krule *, nvlist_t *); 1757 int pf_kanchor_copyout(const struct pf_kruleset *, 1758 const struct pf_krule *, struct pfioc_rule *); 1759 void pf_kanchor_remove(struct pf_krule *); 1760 void pf_remove_if_empty_kruleset(struct pf_kruleset *); 1761 struct pf_kruleset *pf_find_kruleset(const char *); 1762 struct pf_kruleset *pf_find_or_create_kruleset(const char *); 1763 void pf_rs_initialize(void); 1764 1765 void pf_krule_free(struct pf_krule *); 1766 #endif 1767 1768 /* The fingerprint functions can be linked into userland programs (tcpdump) */ 1769 int pf_osfp_add(struct pf_osfp_ioctl *); 1770 #ifdef _KERNEL 1771 struct pf_osfp_enlist * 1772 pf_osfp_fingerprint(struct pf_pdesc *, struct mbuf *, int, 1773 const struct tcphdr *); 1774 #endif /* _KERNEL */ 1775 void pf_osfp_flush(void); 1776 int pf_osfp_get(struct pf_osfp_ioctl *); 1777 int pf_osfp_match(struct pf_osfp_enlist *, pf_osfp_t); 1778 1779 #ifdef _KERNEL 1780 void pf_print_host(struct pf_addr *, u_int16_t, u_int8_t); 1781 1782 void pf_step_into_anchor(struct pf_kanchor_stackframe *, int *, 1783 struct pf_kruleset **, int, struct pf_krule **, 1784 struct pf_krule **, int *); 1785 int pf_step_out_of_anchor(struct pf_kanchor_stackframe *, int *, 1786 struct pf_kruleset **, int, struct pf_krule **, 1787 struct pf_krule **, int *); 1788 1789 int pf_map_addr(u_int8_t, struct pf_krule *, 1790 struct pf_addr *, struct pf_addr *, 1791 struct pf_addr *, struct pf_ksrc_node **); 1792 struct pf_krule *pf_get_translation(struct pf_pdesc *, struct mbuf *, 1793 int, int, struct pfi_kkif *, struct pf_ksrc_node **, 1794 struct pf_state_key **, struct pf_state_key **, 1795 struct pf_addr *, struct pf_addr *, 1796 uint16_t, uint16_t, struct pf_kanchor_stackframe *); 1797 1798 struct pf_state_key *pf_state_key_setup(struct pf_pdesc *, struct pf_addr *, 1799 struct pf_addr *, u_int16_t, u_int16_t); 1800 struct pf_state_key *pf_state_key_clone(struct pf_state_key *); 1801 1802 struct pfi_kkif *pf_kkif_create(int); 1803 void pf_kkif_free(struct pfi_kkif *); 1804 void pf_kkif_zero(struct pfi_kkif *); 1805 #endif /* _KERNEL */ 1806 1807 #endif /* _NET_PFVAR_H_ */ 1808