1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2001 Daniel Hartmeier 5 * Copyright (c) 2002,2003 Henning Brauer 6 * Copyright (c) 2012 Gleb Smirnoff <glebius@FreeBSD.org> 7 * All rights reserved. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 13 * - Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * - Redistributions in binary form must reproduce the above 16 * copyright notice, this list of conditions and the following 17 * disclaimer in the documentation and/or other materials provided 18 * with the distribution. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 23 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 24 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 27 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 28 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN 30 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 31 * POSSIBILITY OF SUCH DAMAGE. 32 * 33 * Effort sponsored in part by the Defense Advanced Research Projects 34 * Agency (DARPA) and Air Force Research Laboratory, Air Force 35 * Materiel Command, USAF, under agreement number F30602-01-2-0537. 36 * 37 * $OpenBSD: pf_ioctl.c,v 1.213 2009/02/15 21:46:12 mbalmer Exp $ 38 */ 39 40 #include <sys/cdefs.h> 41 __FBSDID("$FreeBSD$"); 42 43 #include "opt_inet.h" 44 #include "opt_inet6.h" 45 #include "opt_bpf.h" 46 #include "opt_pf.h" 47 48 #include <sys/param.h> 49 #include <sys/_bitset.h> 50 #include <sys/bitset.h> 51 #include <sys/bus.h> 52 #include <sys/conf.h> 53 #include <sys/endian.h> 54 #include <sys/fcntl.h> 55 #include <sys/filio.h> 56 #include <sys/hash.h> 57 #include <sys/interrupt.h> 58 #include <sys/jail.h> 59 #include <sys/kernel.h> 60 #include <sys/kthread.h> 61 #include <sys/lock.h> 62 #include <sys/mbuf.h> 63 #include <sys/module.h> 64 #include <sys/nv.h> 65 #include <sys/proc.h> 66 #include <sys/sdt.h> 67 #include <sys/smp.h> 68 #include <sys/socket.h> 69 #include <sys/sysctl.h> 70 #include <sys/md5.h> 71 #include <sys/ucred.h> 72 73 #include <net/if.h> 74 #include <net/if_var.h> 75 #include <net/vnet.h> 76 #include <net/route.h> 77 #include <net/pfil.h> 78 #include <net/pfvar.h> 79 #include <net/if_pfsync.h> 80 #include <net/if_pflog.h> 81 82 #include <netinet/in.h> 83 #include <netinet/ip.h> 84 #include <netinet/ip_var.h> 85 #include <netinet6/ip6_var.h> 86 #include <netinet/ip_icmp.h> 87 #include <netpfil/pf/pf_nv.h> 88 89 #ifdef INET6 90 #include <netinet/ip6.h> 91 #endif /* INET6 */ 92 93 #ifdef ALTQ 94 #include <net/altq/altq.h> 95 #endif 96 97 SDT_PROBE_DEFINE3(pf, ioctl, ioctl, error, "int", "int", "int"); 98 SDT_PROBE_DEFINE3(pf, ioctl, function, error, "char *", "int", "int"); 99 SDT_PROBE_DEFINE2(pf, ioctl, addrule, error, "int", "int"); 100 SDT_PROBE_DEFINE2(pf, ioctl, nvchk, error, "int", "int"); 101 102 static struct pf_kpool *pf_get_kpool(const char *, u_int32_t, u_int8_t, 103 u_int32_t, u_int8_t, u_int8_t, u_int8_t); 104 105 static void pf_mv_kpool(struct pf_kpalist *, struct pf_kpalist *); 106 static void pf_empty_kpool(struct pf_kpalist *); 107 static int pfioctl(struct cdev *, u_long, caddr_t, int, 108 struct thread *); 109 #ifdef ALTQ 110 static int pf_begin_altq(u_int32_t *); 111 static int pf_rollback_altq(u_int32_t); 112 static int pf_commit_altq(u_int32_t); 113 static int pf_enable_altq(struct pf_altq *); 114 static int pf_disable_altq(struct pf_altq *); 115 static uint16_t pf_qname2qid(const char *); 116 static void pf_qid_unref(uint16_t); 117 #endif /* ALTQ */ 118 static int pf_begin_rules(u_int32_t *, int, const char *); 119 static int pf_rollback_rules(u_int32_t, int, char *); 120 static int pf_setup_pfsync_matching(struct pf_kruleset *); 121 static void pf_hash_rule(MD5_CTX *, struct pf_krule *); 122 static void pf_hash_rule_addr(MD5_CTX *, struct pf_rule_addr *); 123 static int pf_commit_rules(u_int32_t, int, char *); 124 static int pf_addr_setup(struct pf_kruleset *, 125 struct pf_addr_wrap *, sa_family_t); 126 static void pf_addr_copyout(struct pf_addr_wrap *); 127 static void pf_src_node_copy(const struct pf_ksrc_node *, 128 struct pf_src_node *); 129 #ifdef ALTQ 130 static int pf_export_kaltq(struct pf_altq *, 131 struct pfioc_altq_v1 *, size_t); 132 static int pf_import_kaltq(struct pfioc_altq_v1 *, 133 struct pf_altq *, size_t); 134 #endif /* ALTQ */ 135 136 VNET_DEFINE(struct pf_krule, pf_default_rule); 137 138 #ifdef ALTQ 139 VNET_DEFINE_STATIC(int, pf_altq_running); 140 #define V_pf_altq_running VNET(pf_altq_running) 141 #endif 142 143 #define TAGID_MAX 50000 144 struct pf_tagname { 145 TAILQ_ENTRY(pf_tagname) namehash_entries; 146 TAILQ_ENTRY(pf_tagname) taghash_entries; 147 char name[PF_TAG_NAME_SIZE]; 148 uint16_t tag; 149 int ref; 150 }; 151 152 struct pf_tagset { 153 TAILQ_HEAD(, pf_tagname) *namehash; 154 TAILQ_HEAD(, pf_tagname) *taghash; 155 unsigned int mask; 156 uint32_t seed; 157 BITSET_DEFINE(, TAGID_MAX) avail; 158 }; 159 160 VNET_DEFINE(struct pf_tagset, pf_tags); 161 #define V_pf_tags VNET(pf_tags) 162 static unsigned int pf_rule_tag_hashsize; 163 #define PF_RULE_TAG_HASH_SIZE_DEFAULT 128 164 SYSCTL_UINT(_net_pf, OID_AUTO, rule_tag_hashsize, CTLFLAG_RDTUN, 165 &pf_rule_tag_hashsize, PF_RULE_TAG_HASH_SIZE_DEFAULT, 166 "Size of pf(4) rule tag hashtable"); 167 168 #ifdef ALTQ 169 VNET_DEFINE(struct pf_tagset, pf_qids); 170 #define V_pf_qids VNET(pf_qids) 171 static unsigned int pf_queue_tag_hashsize; 172 #define PF_QUEUE_TAG_HASH_SIZE_DEFAULT 128 173 SYSCTL_UINT(_net_pf, OID_AUTO, queue_tag_hashsize, CTLFLAG_RDTUN, 174 &pf_queue_tag_hashsize, PF_QUEUE_TAG_HASH_SIZE_DEFAULT, 175 "Size of pf(4) queue tag hashtable"); 176 #endif 177 VNET_DEFINE(uma_zone_t, pf_tag_z); 178 #define V_pf_tag_z VNET(pf_tag_z) 179 static MALLOC_DEFINE(M_PFALTQ, "pf_altq", "pf(4) altq configuration db"); 180 static MALLOC_DEFINE(M_PFRULE, "pf_rule", "pf(4) rules"); 181 182 #if (PF_QNAME_SIZE != PF_TAG_NAME_SIZE) 183 #error PF_QNAME_SIZE must be equal to PF_TAG_NAME_SIZE 184 #endif 185 186 static void pf_init_tagset(struct pf_tagset *, unsigned int *, 187 unsigned int); 188 static void pf_cleanup_tagset(struct pf_tagset *); 189 static uint16_t tagname2hashindex(const struct pf_tagset *, const char *); 190 static uint16_t tag2hashindex(const struct pf_tagset *, uint16_t); 191 static u_int16_t tagname2tag(struct pf_tagset *, const char *); 192 static u_int16_t pf_tagname2tag(const char *); 193 static void tag_unref(struct pf_tagset *, u_int16_t); 194 195 #define DPFPRINTF(n, x) if (V_pf_status.debug >= (n)) printf x 196 197 struct cdev *pf_dev; 198 199 /* 200 * XXX - These are new and need to be checked when moveing to a new version 201 */ 202 static void pf_clear_all_states(void); 203 static unsigned int pf_clear_states(const struct pf_kstate_kill *); 204 static void pf_killstates(struct pf_kstate_kill *, 205 unsigned int *); 206 static int pf_killstates_row(struct pf_kstate_kill *, 207 struct pf_idhash *); 208 static int pf_killstates_nv(struct pfioc_nv *); 209 static int pf_clearstates_nv(struct pfioc_nv *); 210 static int pf_getstate(struct pfioc_nv *); 211 static int pf_getstatus(struct pfioc_nv *); 212 static int pf_clear_tables(void); 213 static void pf_clear_srcnodes(struct pf_ksrc_node *); 214 static void pf_kill_srcnodes(struct pfioc_src_node_kill *); 215 static int pf_keepcounters(struct pfioc_nv *); 216 static void pf_tbladdr_copyout(struct pf_addr_wrap *); 217 218 /* 219 * Wrapper functions for pfil(9) hooks 220 */ 221 #ifdef INET 222 static pfil_return_t pf_check_in(struct mbuf **m, struct ifnet *ifp, 223 int flags, void *ruleset __unused, struct inpcb *inp); 224 static pfil_return_t pf_check_out(struct mbuf **m, struct ifnet *ifp, 225 int flags, void *ruleset __unused, struct inpcb *inp); 226 #endif 227 #ifdef INET6 228 static pfil_return_t pf_check6_in(struct mbuf **m, struct ifnet *ifp, 229 int flags, void *ruleset __unused, struct inpcb *inp); 230 static pfil_return_t pf_check6_out(struct mbuf **m, struct ifnet *ifp, 231 int flags, void *ruleset __unused, struct inpcb *inp); 232 #endif 233 234 static void hook_pf(void); 235 static void dehook_pf(void); 236 static int shutdown_pf(void); 237 static int pf_load(void); 238 static void pf_unload(void); 239 240 static struct cdevsw pf_cdevsw = { 241 .d_ioctl = pfioctl, 242 .d_name = PF_NAME, 243 .d_version = D_VERSION, 244 }; 245 246 volatile VNET_DEFINE_STATIC(int, pf_pfil_hooked); 247 #define V_pf_pfil_hooked VNET(pf_pfil_hooked) 248 249 /* 250 * We need a flag that is neither hooked nor running to know when 251 * the VNET is "valid". We primarily need this to control (global) 252 * external event, e.g., eventhandlers. 253 */ 254 VNET_DEFINE(int, pf_vnet_active); 255 #define V_pf_vnet_active VNET(pf_vnet_active) 256 257 int pf_end_threads; 258 struct proc *pf_purge_proc; 259 260 struct rmlock pf_rules_lock; 261 struct sx pf_ioctl_lock; 262 struct sx pf_end_lock; 263 264 /* pfsync */ 265 VNET_DEFINE(pfsync_state_import_t *, pfsync_state_import_ptr); 266 VNET_DEFINE(pfsync_insert_state_t *, pfsync_insert_state_ptr); 267 VNET_DEFINE(pfsync_update_state_t *, pfsync_update_state_ptr); 268 VNET_DEFINE(pfsync_delete_state_t *, pfsync_delete_state_ptr); 269 VNET_DEFINE(pfsync_clear_states_t *, pfsync_clear_states_ptr); 270 VNET_DEFINE(pfsync_defer_t *, pfsync_defer_ptr); 271 pfsync_detach_ifnet_t *pfsync_detach_ifnet_ptr; 272 273 /* pflog */ 274 pflog_packet_t *pflog_packet_ptr = NULL; 275 276 /* 277 * Copy a user-provided string, returning an error if truncation would occur. 278 * Avoid scanning past "sz" bytes in the source string since there's no 279 * guarantee that it's nul-terminated. 280 */ 281 static int 282 pf_user_strcpy(char *dst, const char *src, size_t sz) 283 { 284 if (strnlen(src, sz) == sz) 285 return (EINVAL); 286 (void)strlcpy(dst, src, sz); 287 return (0); 288 } 289 290 static void 291 pfattach_vnet(void) 292 { 293 u_int32_t *my_timeout = V_pf_default_rule.timeout; 294 295 pf_initialize(); 296 pfr_initialize(); 297 pfi_initialize_vnet(); 298 pf_normalize_init(); 299 pf_syncookies_init(); 300 301 V_pf_limits[PF_LIMIT_STATES].limit = PFSTATE_HIWAT; 302 V_pf_limits[PF_LIMIT_SRC_NODES].limit = PFSNODE_HIWAT; 303 304 RB_INIT(&V_pf_anchors); 305 pf_init_kruleset(&pf_main_ruleset); 306 307 /* default rule should never be garbage collected */ 308 V_pf_default_rule.entries.tqe_prev = &V_pf_default_rule.entries.tqe_next; 309 #ifdef PF_DEFAULT_TO_DROP 310 V_pf_default_rule.action = PF_DROP; 311 #else 312 V_pf_default_rule.action = PF_PASS; 313 #endif 314 V_pf_default_rule.nr = -1; 315 V_pf_default_rule.rtableid = -1; 316 317 pf_counter_u64_init(&V_pf_default_rule.evaluations, M_WAITOK); 318 for (int i = 0; i < 2; i++) { 319 pf_counter_u64_init(&V_pf_default_rule.packets[i], M_WAITOK); 320 pf_counter_u64_init(&V_pf_default_rule.bytes[i], M_WAITOK); 321 } 322 V_pf_default_rule.states_cur = counter_u64_alloc(M_WAITOK); 323 V_pf_default_rule.states_tot = counter_u64_alloc(M_WAITOK); 324 V_pf_default_rule.src_nodes = counter_u64_alloc(M_WAITOK); 325 326 #ifdef PF_WANT_32_TO_64_COUNTER 327 V_pf_kifmarker = malloc(sizeof(*V_pf_kifmarker), PFI_MTYPE, M_WAITOK | M_ZERO); 328 V_pf_rulemarker = malloc(sizeof(*V_pf_rulemarker), M_PFRULE, M_WAITOK | M_ZERO); 329 PF_RULES_WLOCK(); 330 LIST_INSERT_HEAD(&V_pf_allkiflist, V_pf_kifmarker, pfik_allkiflist); 331 LIST_INSERT_HEAD(&V_pf_allrulelist, &V_pf_default_rule, allrulelist); 332 V_pf_allrulecount++; 333 LIST_INSERT_HEAD(&V_pf_allrulelist, V_pf_rulemarker, allrulelist); 334 PF_RULES_WUNLOCK(); 335 #endif 336 337 /* initialize default timeouts */ 338 my_timeout[PFTM_TCP_FIRST_PACKET] = PFTM_TCP_FIRST_PACKET_VAL; 339 my_timeout[PFTM_TCP_OPENING] = PFTM_TCP_OPENING_VAL; 340 my_timeout[PFTM_TCP_ESTABLISHED] = PFTM_TCP_ESTABLISHED_VAL; 341 my_timeout[PFTM_TCP_CLOSING] = PFTM_TCP_CLOSING_VAL; 342 my_timeout[PFTM_TCP_FIN_WAIT] = PFTM_TCP_FIN_WAIT_VAL; 343 my_timeout[PFTM_TCP_CLOSED] = PFTM_TCP_CLOSED_VAL; 344 my_timeout[PFTM_UDP_FIRST_PACKET] = PFTM_UDP_FIRST_PACKET_VAL; 345 my_timeout[PFTM_UDP_SINGLE] = PFTM_UDP_SINGLE_VAL; 346 my_timeout[PFTM_UDP_MULTIPLE] = PFTM_UDP_MULTIPLE_VAL; 347 my_timeout[PFTM_ICMP_FIRST_PACKET] = PFTM_ICMP_FIRST_PACKET_VAL; 348 my_timeout[PFTM_ICMP_ERROR_REPLY] = PFTM_ICMP_ERROR_REPLY_VAL; 349 my_timeout[PFTM_OTHER_FIRST_PACKET] = PFTM_OTHER_FIRST_PACKET_VAL; 350 my_timeout[PFTM_OTHER_SINGLE] = PFTM_OTHER_SINGLE_VAL; 351 my_timeout[PFTM_OTHER_MULTIPLE] = PFTM_OTHER_MULTIPLE_VAL; 352 my_timeout[PFTM_FRAG] = PFTM_FRAG_VAL; 353 my_timeout[PFTM_INTERVAL] = PFTM_INTERVAL_VAL; 354 my_timeout[PFTM_SRC_NODE] = PFTM_SRC_NODE_VAL; 355 my_timeout[PFTM_TS_DIFF] = PFTM_TS_DIFF_VAL; 356 my_timeout[PFTM_ADAPTIVE_START] = PFSTATE_ADAPT_START; 357 my_timeout[PFTM_ADAPTIVE_END] = PFSTATE_ADAPT_END; 358 359 bzero(&V_pf_status, sizeof(V_pf_status)); 360 V_pf_status.debug = PF_DEBUG_URGENT; 361 362 V_pf_pfil_hooked = 0; 363 364 /* XXX do our best to avoid a conflict */ 365 V_pf_status.hostid = arc4random(); 366 367 for (int i = 0; i < PFRES_MAX; i++) 368 V_pf_status.counters[i] = counter_u64_alloc(M_WAITOK); 369 for (int i = 0; i < KLCNT_MAX; i++) 370 V_pf_status.lcounters[i] = counter_u64_alloc(M_WAITOK); 371 for (int i = 0; i < FCNT_MAX; i++) 372 pf_counter_u64_init(&V_pf_status.fcounters[i], M_WAITOK); 373 for (int i = 0; i < SCNT_MAX; i++) 374 V_pf_status.scounters[i] = counter_u64_alloc(M_WAITOK); 375 376 if (swi_add(&V_pf_swi_ie, "pf send", pf_intr, curvnet, SWI_NET, 377 INTR_MPSAFE, &V_pf_swi_cookie) != 0) 378 /* XXXGL: leaked all above. */ 379 return; 380 } 381 382 static struct pf_kpool * 383 pf_get_kpool(const char *anchor, u_int32_t ticket, u_int8_t rule_action, 384 u_int32_t rule_number, u_int8_t r_last, u_int8_t active, 385 u_int8_t check_ticket) 386 { 387 struct pf_kruleset *ruleset; 388 struct pf_krule *rule; 389 int rs_num; 390 391 ruleset = pf_find_kruleset(anchor); 392 if (ruleset == NULL) 393 return (NULL); 394 rs_num = pf_get_ruleset_number(rule_action); 395 if (rs_num >= PF_RULESET_MAX) 396 return (NULL); 397 if (active) { 398 if (check_ticket && ticket != 399 ruleset->rules[rs_num].active.ticket) 400 return (NULL); 401 if (r_last) 402 rule = TAILQ_LAST(ruleset->rules[rs_num].active.ptr, 403 pf_krulequeue); 404 else 405 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr); 406 } else { 407 if (check_ticket && ticket != 408 ruleset->rules[rs_num].inactive.ticket) 409 return (NULL); 410 if (r_last) 411 rule = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr, 412 pf_krulequeue); 413 else 414 rule = TAILQ_FIRST(ruleset->rules[rs_num].inactive.ptr); 415 } 416 if (!r_last) { 417 while ((rule != NULL) && (rule->nr != rule_number)) 418 rule = TAILQ_NEXT(rule, entries); 419 } 420 if (rule == NULL) 421 return (NULL); 422 423 return (&rule->rpool); 424 } 425 426 static void 427 pf_mv_kpool(struct pf_kpalist *poola, struct pf_kpalist *poolb) 428 { 429 struct pf_kpooladdr *mv_pool_pa; 430 431 while ((mv_pool_pa = TAILQ_FIRST(poola)) != NULL) { 432 TAILQ_REMOVE(poola, mv_pool_pa, entries); 433 TAILQ_INSERT_TAIL(poolb, mv_pool_pa, entries); 434 } 435 } 436 437 static void 438 pf_empty_kpool(struct pf_kpalist *poola) 439 { 440 struct pf_kpooladdr *pa; 441 442 while ((pa = TAILQ_FIRST(poola)) != NULL) { 443 switch (pa->addr.type) { 444 case PF_ADDR_DYNIFTL: 445 pfi_dynaddr_remove(pa->addr.p.dyn); 446 break; 447 case PF_ADDR_TABLE: 448 /* XXX: this could be unfinished pooladdr on pabuf */ 449 if (pa->addr.p.tbl != NULL) 450 pfr_detach_table(pa->addr.p.tbl); 451 break; 452 } 453 if (pa->kif) 454 pfi_kkif_unref(pa->kif); 455 TAILQ_REMOVE(poola, pa, entries); 456 free(pa, M_PFRULE); 457 } 458 } 459 460 static void 461 pf_unlink_rule(struct pf_krulequeue *rulequeue, struct pf_krule *rule) 462 { 463 464 PF_RULES_WASSERT(); 465 466 TAILQ_REMOVE(rulequeue, rule, entries); 467 468 PF_UNLNKDRULES_LOCK(); 469 rule->rule_ref |= PFRULE_REFS; 470 TAILQ_INSERT_TAIL(&V_pf_unlinked_rules, rule, entries); 471 PF_UNLNKDRULES_UNLOCK(); 472 } 473 474 void 475 pf_free_rule(struct pf_krule *rule) 476 { 477 478 PF_RULES_WASSERT(); 479 480 if (rule->tag) 481 tag_unref(&V_pf_tags, rule->tag); 482 if (rule->match_tag) 483 tag_unref(&V_pf_tags, rule->match_tag); 484 #ifdef ALTQ 485 if (rule->pqid != rule->qid) 486 pf_qid_unref(rule->pqid); 487 pf_qid_unref(rule->qid); 488 #endif 489 switch (rule->src.addr.type) { 490 case PF_ADDR_DYNIFTL: 491 pfi_dynaddr_remove(rule->src.addr.p.dyn); 492 break; 493 case PF_ADDR_TABLE: 494 pfr_detach_table(rule->src.addr.p.tbl); 495 break; 496 } 497 switch (rule->dst.addr.type) { 498 case PF_ADDR_DYNIFTL: 499 pfi_dynaddr_remove(rule->dst.addr.p.dyn); 500 break; 501 case PF_ADDR_TABLE: 502 pfr_detach_table(rule->dst.addr.p.tbl); 503 break; 504 } 505 if (rule->overload_tbl) 506 pfr_detach_table(rule->overload_tbl); 507 if (rule->kif) 508 pfi_kkif_unref(rule->kif); 509 pf_kanchor_remove(rule); 510 pf_empty_kpool(&rule->rpool.list); 511 512 pf_krule_free(rule); 513 } 514 515 static void 516 pf_init_tagset(struct pf_tagset *ts, unsigned int *tunable_size, 517 unsigned int default_size) 518 { 519 unsigned int i; 520 unsigned int hashsize; 521 522 if (*tunable_size == 0 || !powerof2(*tunable_size)) 523 *tunable_size = default_size; 524 525 hashsize = *tunable_size; 526 ts->namehash = mallocarray(hashsize, sizeof(*ts->namehash), M_PFHASH, 527 M_WAITOK); 528 ts->taghash = mallocarray(hashsize, sizeof(*ts->taghash), M_PFHASH, 529 M_WAITOK); 530 ts->mask = hashsize - 1; 531 ts->seed = arc4random(); 532 for (i = 0; i < hashsize; i++) { 533 TAILQ_INIT(&ts->namehash[i]); 534 TAILQ_INIT(&ts->taghash[i]); 535 } 536 BIT_FILL(TAGID_MAX, &ts->avail); 537 } 538 539 static void 540 pf_cleanup_tagset(struct pf_tagset *ts) 541 { 542 unsigned int i; 543 unsigned int hashsize; 544 struct pf_tagname *t, *tmp; 545 546 /* 547 * Only need to clean up one of the hashes as each tag is hashed 548 * into each table. 549 */ 550 hashsize = ts->mask + 1; 551 for (i = 0; i < hashsize; i++) 552 TAILQ_FOREACH_SAFE(t, &ts->namehash[i], namehash_entries, tmp) 553 uma_zfree(V_pf_tag_z, t); 554 555 free(ts->namehash, M_PFHASH); 556 free(ts->taghash, M_PFHASH); 557 } 558 559 static uint16_t 560 tagname2hashindex(const struct pf_tagset *ts, const char *tagname) 561 { 562 size_t len; 563 564 len = strnlen(tagname, PF_TAG_NAME_SIZE - 1); 565 return (murmur3_32_hash(tagname, len, ts->seed) & ts->mask); 566 } 567 568 static uint16_t 569 tag2hashindex(const struct pf_tagset *ts, uint16_t tag) 570 { 571 572 return (tag & ts->mask); 573 } 574 575 static u_int16_t 576 tagname2tag(struct pf_tagset *ts, const char *tagname) 577 { 578 struct pf_tagname *tag; 579 u_int32_t index; 580 u_int16_t new_tagid; 581 582 PF_RULES_WASSERT(); 583 584 index = tagname2hashindex(ts, tagname); 585 TAILQ_FOREACH(tag, &ts->namehash[index], namehash_entries) 586 if (strcmp(tagname, tag->name) == 0) { 587 tag->ref++; 588 return (tag->tag); 589 } 590 591 /* 592 * new entry 593 * 594 * to avoid fragmentation, we do a linear search from the beginning 595 * and take the first free slot we find. 596 */ 597 new_tagid = BIT_FFS(TAGID_MAX, &ts->avail); 598 /* 599 * Tags are 1-based, with valid tags in the range [1..TAGID_MAX]. 600 * BIT_FFS() returns a 1-based bit number, with 0 indicating no bits 601 * set. It may also return a bit number greater than TAGID_MAX due 602 * to rounding of the number of bits in the vector up to a multiple 603 * of the vector word size at declaration/allocation time. 604 */ 605 if ((new_tagid == 0) || (new_tagid > TAGID_MAX)) 606 return (0); 607 608 /* Mark the tag as in use. Bits are 0-based for BIT_CLR() */ 609 BIT_CLR(TAGID_MAX, new_tagid - 1, &ts->avail); 610 611 /* allocate and fill new struct pf_tagname */ 612 tag = uma_zalloc(V_pf_tag_z, M_NOWAIT); 613 if (tag == NULL) 614 return (0); 615 strlcpy(tag->name, tagname, sizeof(tag->name)); 616 tag->tag = new_tagid; 617 tag->ref = 1; 618 619 /* Insert into namehash */ 620 TAILQ_INSERT_TAIL(&ts->namehash[index], tag, namehash_entries); 621 622 /* Insert into taghash */ 623 index = tag2hashindex(ts, new_tagid); 624 TAILQ_INSERT_TAIL(&ts->taghash[index], tag, taghash_entries); 625 626 return (tag->tag); 627 } 628 629 static void 630 tag_unref(struct pf_tagset *ts, u_int16_t tag) 631 { 632 struct pf_tagname *t; 633 uint16_t index; 634 635 PF_RULES_WASSERT(); 636 637 index = tag2hashindex(ts, tag); 638 TAILQ_FOREACH(t, &ts->taghash[index], taghash_entries) 639 if (tag == t->tag) { 640 if (--t->ref == 0) { 641 TAILQ_REMOVE(&ts->taghash[index], t, 642 taghash_entries); 643 index = tagname2hashindex(ts, t->name); 644 TAILQ_REMOVE(&ts->namehash[index], t, 645 namehash_entries); 646 /* Bits are 0-based for BIT_SET() */ 647 BIT_SET(TAGID_MAX, tag - 1, &ts->avail); 648 uma_zfree(V_pf_tag_z, t); 649 } 650 break; 651 } 652 } 653 654 static uint16_t 655 pf_tagname2tag(const char *tagname) 656 { 657 return (tagname2tag(&V_pf_tags, tagname)); 658 } 659 660 #ifdef ALTQ 661 static uint16_t 662 pf_qname2qid(const char *qname) 663 { 664 return (tagname2tag(&V_pf_qids, qname)); 665 } 666 667 static void 668 pf_qid_unref(uint16_t qid) 669 { 670 tag_unref(&V_pf_qids, qid); 671 } 672 673 static int 674 pf_begin_altq(u_int32_t *ticket) 675 { 676 struct pf_altq *altq, *tmp; 677 int error = 0; 678 679 PF_RULES_WASSERT(); 680 681 /* Purge the old altq lists */ 682 TAILQ_FOREACH_SAFE(altq, V_pf_altq_ifs_inactive, entries, tmp) { 683 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) { 684 /* detach and destroy the discipline */ 685 error = altq_remove(altq); 686 } 687 free(altq, M_PFALTQ); 688 } 689 TAILQ_INIT(V_pf_altq_ifs_inactive); 690 TAILQ_FOREACH_SAFE(altq, V_pf_altqs_inactive, entries, tmp) { 691 pf_qid_unref(altq->qid); 692 free(altq, M_PFALTQ); 693 } 694 TAILQ_INIT(V_pf_altqs_inactive); 695 if (error) 696 return (error); 697 *ticket = ++V_ticket_altqs_inactive; 698 V_altqs_inactive_open = 1; 699 return (0); 700 } 701 702 static int 703 pf_rollback_altq(u_int32_t ticket) 704 { 705 struct pf_altq *altq, *tmp; 706 int error = 0; 707 708 PF_RULES_WASSERT(); 709 710 if (!V_altqs_inactive_open || ticket != V_ticket_altqs_inactive) 711 return (0); 712 /* Purge the old altq lists */ 713 TAILQ_FOREACH_SAFE(altq, V_pf_altq_ifs_inactive, entries, tmp) { 714 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) { 715 /* detach and destroy the discipline */ 716 error = altq_remove(altq); 717 } 718 free(altq, M_PFALTQ); 719 } 720 TAILQ_INIT(V_pf_altq_ifs_inactive); 721 TAILQ_FOREACH_SAFE(altq, V_pf_altqs_inactive, entries, tmp) { 722 pf_qid_unref(altq->qid); 723 free(altq, M_PFALTQ); 724 } 725 TAILQ_INIT(V_pf_altqs_inactive); 726 V_altqs_inactive_open = 0; 727 return (error); 728 } 729 730 static int 731 pf_commit_altq(u_int32_t ticket) 732 { 733 struct pf_altqqueue *old_altqs, *old_altq_ifs; 734 struct pf_altq *altq, *tmp; 735 int err, error = 0; 736 737 PF_RULES_WASSERT(); 738 739 if (!V_altqs_inactive_open || ticket != V_ticket_altqs_inactive) 740 return (EBUSY); 741 742 /* swap altqs, keep the old. */ 743 old_altqs = V_pf_altqs_active; 744 old_altq_ifs = V_pf_altq_ifs_active; 745 V_pf_altqs_active = V_pf_altqs_inactive; 746 V_pf_altq_ifs_active = V_pf_altq_ifs_inactive; 747 V_pf_altqs_inactive = old_altqs; 748 V_pf_altq_ifs_inactive = old_altq_ifs; 749 V_ticket_altqs_active = V_ticket_altqs_inactive; 750 751 /* Attach new disciplines */ 752 TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) { 753 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) { 754 /* attach the discipline */ 755 error = altq_pfattach(altq); 756 if (error == 0 && V_pf_altq_running) 757 error = pf_enable_altq(altq); 758 if (error != 0) 759 return (error); 760 } 761 } 762 763 /* Purge the old altq lists */ 764 TAILQ_FOREACH_SAFE(altq, V_pf_altq_ifs_inactive, entries, tmp) { 765 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) { 766 /* detach and destroy the discipline */ 767 if (V_pf_altq_running) 768 error = pf_disable_altq(altq); 769 err = altq_pfdetach(altq); 770 if (err != 0 && error == 0) 771 error = err; 772 err = altq_remove(altq); 773 if (err != 0 && error == 0) 774 error = err; 775 } 776 free(altq, M_PFALTQ); 777 } 778 TAILQ_INIT(V_pf_altq_ifs_inactive); 779 TAILQ_FOREACH_SAFE(altq, V_pf_altqs_inactive, entries, tmp) { 780 pf_qid_unref(altq->qid); 781 free(altq, M_PFALTQ); 782 } 783 TAILQ_INIT(V_pf_altqs_inactive); 784 785 V_altqs_inactive_open = 0; 786 return (error); 787 } 788 789 static int 790 pf_enable_altq(struct pf_altq *altq) 791 { 792 struct ifnet *ifp; 793 struct tb_profile tb; 794 int error = 0; 795 796 if ((ifp = ifunit(altq->ifname)) == NULL) 797 return (EINVAL); 798 799 if (ifp->if_snd.altq_type != ALTQT_NONE) 800 error = altq_enable(&ifp->if_snd); 801 802 /* set tokenbucket regulator */ 803 if (error == 0 && ifp != NULL && ALTQ_IS_ENABLED(&ifp->if_snd)) { 804 tb.rate = altq->ifbandwidth; 805 tb.depth = altq->tbrsize; 806 error = tbr_set(&ifp->if_snd, &tb); 807 } 808 809 return (error); 810 } 811 812 static int 813 pf_disable_altq(struct pf_altq *altq) 814 { 815 struct ifnet *ifp; 816 struct tb_profile tb; 817 int error; 818 819 if ((ifp = ifunit(altq->ifname)) == NULL) 820 return (EINVAL); 821 822 /* 823 * when the discipline is no longer referenced, it was overridden 824 * by a new one. if so, just return. 825 */ 826 if (altq->altq_disc != ifp->if_snd.altq_disc) 827 return (0); 828 829 error = altq_disable(&ifp->if_snd); 830 831 if (error == 0) { 832 /* clear tokenbucket regulator */ 833 tb.rate = 0; 834 error = tbr_set(&ifp->if_snd, &tb); 835 } 836 837 return (error); 838 } 839 840 static int 841 pf_altq_ifnet_event_add(struct ifnet *ifp, int remove, u_int32_t ticket, 842 struct pf_altq *altq) 843 { 844 struct ifnet *ifp1; 845 int error = 0; 846 847 /* Deactivate the interface in question */ 848 altq->local_flags &= ~PFALTQ_FLAG_IF_REMOVED; 849 if ((ifp1 = ifunit(altq->ifname)) == NULL || 850 (remove && ifp1 == ifp)) { 851 altq->local_flags |= PFALTQ_FLAG_IF_REMOVED; 852 } else { 853 error = altq_add(ifp1, altq); 854 855 if (ticket != V_ticket_altqs_inactive) 856 error = EBUSY; 857 858 if (error) 859 free(altq, M_PFALTQ); 860 } 861 862 return (error); 863 } 864 865 void 866 pf_altq_ifnet_event(struct ifnet *ifp, int remove) 867 { 868 struct pf_altq *a1, *a2, *a3; 869 u_int32_t ticket; 870 int error = 0; 871 872 /* 873 * No need to re-evaluate the configuration for events on interfaces 874 * that do not support ALTQ, as it's not possible for such 875 * interfaces to be part of the configuration. 876 */ 877 if (!ALTQ_IS_READY(&ifp->if_snd)) 878 return; 879 880 /* Interrupt userland queue modifications */ 881 if (V_altqs_inactive_open) 882 pf_rollback_altq(V_ticket_altqs_inactive); 883 884 /* Start new altq ruleset */ 885 if (pf_begin_altq(&ticket)) 886 return; 887 888 /* Copy the current active set */ 889 TAILQ_FOREACH(a1, V_pf_altq_ifs_active, entries) { 890 a2 = malloc(sizeof(*a2), M_PFALTQ, M_NOWAIT); 891 if (a2 == NULL) { 892 error = ENOMEM; 893 break; 894 } 895 bcopy(a1, a2, sizeof(struct pf_altq)); 896 897 error = pf_altq_ifnet_event_add(ifp, remove, ticket, a2); 898 if (error) 899 break; 900 901 TAILQ_INSERT_TAIL(V_pf_altq_ifs_inactive, a2, entries); 902 } 903 if (error) 904 goto out; 905 TAILQ_FOREACH(a1, V_pf_altqs_active, entries) { 906 a2 = malloc(sizeof(*a2), M_PFALTQ, M_NOWAIT); 907 if (a2 == NULL) { 908 error = ENOMEM; 909 break; 910 } 911 bcopy(a1, a2, sizeof(struct pf_altq)); 912 913 if ((a2->qid = pf_qname2qid(a2->qname)) == 0) { 914 error = EBUSY; 915 free(a2, M_PFALTQ); 916 break; 917 } 918 a2->altq_disc = NULL; 919 TAILQ_FOREACH(a3, V_pf_altq_ifs_inactive, entries) { 920 if (strncmp(a3->ifname, a2->ifname, 921 IFNAMSIZ) == 0) { 922 a2->altq_disc = a3->altq_disc; 923 break; 924 } 925 } 926 error = pf_altq_ifnet_event_add(ifp, remove, ticket, a2); 927 if (error) 928 break; 929 930 TAILQ_INSERT_TAIL(V_pf_altqs_inactive, a2, entries); 931 } 932 933 out: 934 if (error != 0) 935 pf_rollback_altq(ticket); 936 else 937 pf_commit_altq(ticket); 938 } 939 #endif /* ALTQ */ 940 941 static int 942 pf_begin_rules(u_int32_t *ticket, int rs_num, const char *anchor) 943 { 944 struct pf_kruleset *rs; 945 struct pf_krule *rule; 946 947 PF_RULES_WASSERT(); 948 949 if (rs_num < 0 || rs_num >= PF_RULESET_MAX) 950 return (EINVAL); 951 rs = pf_find_or_create_kruleset(anchor); 952 if (rs == NULL) 953 return (EINVAL); 954 while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) { 955 pf_unlink_rule(rs->rules[rs_num].inactive.ptr, rule); 956 rs->rules[rs_num].inactive.rcount--; 957 } 958 *ticket = ++rs->rules[rs_num].inactive.ticket; 959 rs->rules[rs_num].inactive.open = 1; 960 return (0); 961 } 962 963 static int 964 pf_rollback_rules(u_int32_t ticket, int rs_num, char *anchor) 965 { 966 struct pf_kruleset *rs; 967 struct pf_krule *rule; 968 969 PF_RULES_WASSERT(); 970 971 if (rs_num < 0 || rs_num >= PF_RULESET_MAX) 972 return (EINVAL); 973 rs = pf_find_kruleset(anchor); 974 if (rs == NULL || !rs->rules[rs_num].inactive.open || 975 rs->rules[rs_num].inactive.ticket != ticket) 976 return (0); 977 while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) { 978 pf_unlink_rule(rs->rules[rs_num].inactive.ptr, rule); 979 rs->rules[rs_num].inactive.rcount--; 980 } 981 rs->rules[rs_num].inactive.open = 0; 982 return (0); 983 } 984 985 #define PF_MD5_UPD(st, elm) \ 986 MD5Update(ctx, (u_int8_t *) &(st)->elm, sizeof((st)->elm)) 987 988 #define PF_MD5_UPD_STR(st, elm) \ 989 MD5Update(ctx, (u_int8_t *) (st)->elm, strlen((st)->elm)) 990 991 #define PF_MD5_UPD_HTONL(st, elm, stor) do { \ 992 (stor) = htonl((st)->elm); \ 993 MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int32_t));\ 994 } while (0) 995 996 #define PF_MD5_UPD_HTONS(st, elm, stor) do { \ 997 (stor) = htons((st)->elm); \ 998 MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int16_t));\ 999 } while (0) 1000 1001 static void 1002 pf_hash_rule_addr(MD5_CTX *ctx, struct pf_rule_addr *pfr) 1003 { 1004 PF_MD5_UPD(pfr, addr.type); 1005 switch (pfr->addr.type) { 1006 case PF_ADDR_DYNIFTL: 1007 PF_MD5_UPD(pfr, addr.v.ifname); 1008 PF_MD5_UPD(pfr, addr.iflags); 1009 break; 1010 case PF_ADDR_TABLE: 1011 PF_MD5_UPD(pfr, addr.v.tblname); 1012 break; 1013 case PF_ADDR_ADDRMASK: 1014 /* XXX ignore af? */ 1015 PF_MD5_UPD(pfr, addr.v.a.addr.addr32); 1016 PF_MD5_UPD(pfr, addr.v.a.mask.addr32); 1017 break; 1018 } 1019 1020 PF_MD5_UPD(pfr, port[0]); 1021 PF_MD5_UPD(pfr, port[1]); 1022 PF_MD5_UPD(pfr, neg); 1023 PF_MD5_UPD(pfr, port_op); 1024 } 1025 1026 static void 1027 pf_hash_rule(MD5_CTX *ctx, struct pf_krule *rule) 1028 { 1029 u_int16_t x; 1030 u_int32_t y; 1031 1032 pf_hash_rule_addr(ctx, &rule->src); 1033 pf_hash_rule_addr(ctx, &rule->dst); 1034 for (int i = 0; i < PF_RULE_MAX_LABEL_COUNT; i++) 1035 PF_MD5_UPD_STR(rule, label[i]); 1036 PF_MD5_UPD_STR(rule, ifname); 1037 PF_MD5_UPD_STR(rule, match_tagname); 1038 PF_MD5_UPD_HTONS(rule, match_tag, x); /* dup? */ 1039 PF_MD5_UPD_HTONL(rule, os_fingerprint, y); 1040 PF_MD5_UPD_HTONL(rule, prob, y); 1041 PF_MD5_UPD_HTONL(rule, uid.uid[0], y); 1042 PF_MD5_UPD_HTONL(rule, uid.uid[1], y); 1043 PF_MD5_UPD(rule, uid.op); 1044 PF_MD5_UPD_HTONL(rule, gid.gid[0], y); 1045 PF_MD5_UPD_HTONL(rule, gid.gid[1], y); 1046 PF_MD5_UPD(rule, gid.op); 1047 PF_MD5_UPD_HTONL(rule, rule_flag, y); 1048 PF_MD5_UPD(rule, action); 1049 PF_MD5_UPD(rule, direction); 1050 PF_MD5_UPD(rule, af); 1051 PF_MD5_UPD(rule, quick); 1052 PF_MD5_UPD(rule, ifnot); 1053 PF_MD5_UPD(rule, match_tag_not); 1054 PF_MD5_UPD(rule, natpass); 1055 PF_MD5_UPD(rule, keep_state); 1056 PF_MD5_UPD(rule, proto); 1057 PF_MD5_UPD(rule, type); 1058 PF_MD5_UPD(rule, code); 1059 PF_MD5_UPD(rule, flags); 1060 PF_MD5_UPD(rule, flagset); 1061 PF_MD5_UPD(rule, allow_opts); 1062 PF_MD5_UPD(rule, rt); 1063 PF_MD5_UPD(rule, tos); 1064 } 1065 1066 static bool 1067 pf_krule_compare(struct pf_krule *a, struct pf_krule *b) 1068 { 1069 MD5_CTX ctx[2]; 1070 u_int8_t digest[2][PF_MD5_DIGEST_LENGTH]; 1071 1072 MD5Init(&ctx[0]); 1073 MD5Init(&ctx[1]); 1074 pf_hash_rule(&ctx[0], a); 1075 pf_hash_rule(&ctx[1], b); 1076 MD5Final(digest[0], &ctx[0]); 1077 MD5Final(digest[1], &ctx[1]); 1078 1079 return (memcmp(digest[0], digest[1], PF_MD5_DIGEST_LENGTH) == 0); 1080 } 1081 1082 static int 1083 pf_commit_rules(u_int32_t ticket, int rs_num, char *anchor) 1084 { 1085 struct pf_kruleset *rs; 1086 struct pf_krule *rule, **old_array, *tail; 1087 struct pf_krulequeue *old_rules; 1088 int error; 1089 u_int32_t old_rcount; 1090 1091 PF_RULES_WASSERT(); 1092 1093 if (rs_num < 0 || rs_num >= PF_RULESET_MAX) 1094 return (EINVAL); 1095 rs = pf_find_kruleset(anchor); 1096 if (rs == NULL || !rs->rules[rs_num].inactive.open || 1097 ticket != rs->rules[rs_num].inactive.ticket) 1098 return (EBUSY); 1099 1100 /* Calculate checksum for the main ruleset */ 1101 if (rs == &pf_main_ruleset) { 1102 error = pf_setup_pfsync_matching(rs); 1103 if (error != 0) 1104 return (error); 1105 } 1106 1107 /* Swap rules, keep the old. */ 1108 old_rules = rs->rules[rs_num].active.ptr; 1109 old_rcount = rs->rules[rs_num].active.rcount; 1110 old_array = rs->rules[rs_num].active.ptr_array; 1111 1112 rs->rules[rs_num].active.ptr = 1113 rs->rules[rs_num].inactive.ptr; 1114 rs->rules[rs_num].active.ptr_array = 1115 rs->rules[rs_num].inactive.ptr_array; 1116 rs->rules[rs_num].active.rcount = 1117 rs->rules[rs_num].inactive.rcount; 1118 1119 /* Attempt to preserve counter information. */ 1120 if (V_pf_status.keep_counters) { 1121 TAILQ_FOREACH(rule, rs->rules[rs_num].active.ptr, 1122 entries) { 1123 tail = TAILQ_FIRST(old_rules); 1124 while ((tail != NULL) && ! pf_krule_compare(tail, rule)) 1125 tail = TAILQ_NEXT(tail, entries); 1126 if (tail != NULL) { 1127 pf_counter_u64_critical_enter(); 1128 pf_counter_u64_add_protected(&rule->evaluations, 1129 pf_counter_u64_fetch(&tail->evaluations)); 1130 pf_counter_u64_add_protected(&rule->packets[0], 1131 pf_counter_u64_fetch(&tail->packets[0])); 1132 pf_counter_u64_add_protected(&rule->packets[1], 1133 pf_counter_u64_fetch(&tail->packets[1])); 1134 pf_counter_u64_add_protected(&rule->bytes[0], 1135 pf_counter_u64_fetch(&tail->bytes[0])); 1136 pf_counter_u64_add_protected(&rule->bytes[1], 1137 pf_counter_u64_fetch(&tail->bytes[1])); 1138 pf_counter_u64_critical_exit(); 1139 } 1140 } 1141 } 1142 1143 rs->rules[rs_num].inactive.ptr = old_rules; 1144 rs->rules[rs_num].inactive.ptr_array = old_array; 1145 rs->rules[rs_num].inactive.rcount = old_rcount; 1146 1147 rs->rules[rs_num].active.ticket = 1148 rs->rules[rs_num].inactive.ticket; 1149 pf_calc_skip_steps(rs->rules[rs_num].active.ptr); 1150 1151 /* Purge the old rule list. */ 1152 while ((rule = TAILQ_FIRST(old_rules)) != NULL) 1153 pf_unlink_rule(old_rules, rule); 1154 if (rs->rules[rs_num].inactive.ptr_array) 1155 free(rs->rules[rs_num].inactive.ptr_array, M_TEMP); 1156 rs->rules[rs_num].inactive.ptr_array = NULL; 1157 rs->rules[rs_num].inactive.rcount = 0; 1158 rs->rules[rs_num].inactive.open = 0; 1159 pf_remove_if_empty_kruleset(rs); 1160 1161 return (0); 1162 } 1163 1164 static int 1165 pf_setup_pfsync_matching(struct pf_kruleset *rs) 1166 { 1167 MD5_CTX ctx; 1168 struct pf_krule *rule; 1169 int rs_cnt; 1170 u_int8_t digest[PF_MD5_DIGEST_LENGTH]; 1171 1172 MD5Init(&ctx); 1173 for (rs_cnt = 0; rs_cnt < PF_RULESET_MAX; rs_cnt++) { 1174 /* XXX PF_RULESET_SCRUB as well? */ 1175 if (rs_cnt == PF_RULESET_SCRUB) 1176 continue; 1177 1178 if (rs->rules[rs_cnt].inactive.ptr_array) 1179 free(rs->rules[rs_cnt].inactive.ptr_array, M_TEMP); 1180 rs->rules[rs_cnt].inactive.ptr_array = NULL; 1181 1182 if (rs->rules[rs_cnt].inactive.rcount) { 1183 rs->rules[rs_cnt].inactive.ptr_array = 1184 malloc(sizeof(caddr_t) * 1185 rs->rules[rs_cnt].inactive.rcount, 1186 M_TEMP, M_NOWAIT); 1187 1188 if (!rs->rules[rs_cnt].inactive.ptr_array) 1189 return (ENOMEM); 1190 } 1191 1192 TAILQ_FOREACH(rule, rs->rules[rs_cnt].inactive.ptr, 1193 entries) { 1194 pf_hash_rule(&ctx, rule); 1195 (rs->rules[rs_cnt].inactive.ptr_array)[rule->nr] = rule; 1196 } 1197 } 1198 1199 MD5Final(digest, &ctx); 1200 memcpy(V_pf_status.pf_chksum, digest, sizeof(V_pf_status.pf_chksum)); 1201 return (0); 1202 } 1203 1204 static int 1205 pf_addr_setup(struct pf_kruleset *ruleset, struct pf_addr_wrap *addr, 1206 sa_family_t af) 1207 { 1208 int error = 0; 1209 1210 switch (addr->type) { 1211 case PF_ADDR_TABLE: 1212 addr->p.tbl = pfr_attach_table(ruleset, addr->v.tblname); 1213 if (addr->p.tbl == NULL) 1214 error = ENOMEM; 1215 break; 1216 case PF_ADDR_DYNIFTL: 1217 error = pfi_dynaddr_setup(addr, af); 1218 break; 1219 } 1220 1221 return (error); 1222 } 1223 1224 static void 1225 pf_addr_copyout(struct pf_addr_wrap *addr) 1226 { 1227 1228 switch (addr->type) { 1229 case PF_ADDR_DYNIFTL: 1230 pfi_dynaddr_copyout(addr); 1231 break; 1232 case PF_ADDR_TABLE: 1233 pf_tbladdr_copyout(addr); 1234 break; 1235 } 1236 } 1237 1238 static void 1239 pf_src_node_copy(const struct pf_ksrc_node *in, struct pf_src_node *out) 1240 { 1241 int secs = time_uptime, diff; 1242 1243 bzero(out, sizeof(struct pf_src_node)); 1244 1245 bcopy(&in->addr, &out->addr, sizeof(struct pf_addr)); 1246 bcopy(&in->raddr, &out->raddr, sizeof(struct pf_addr)); 1247 1248 if (in->rule.ptr != NULL) 1249 out->rule.nr = in->rule.ptr->nr; 1250 1251 for (int i = 0; i < 2; i++) { 1252 out->bytes[i] = counter_u64_fetch(in->bytes[i]); 1253 out->packets[i] = counter_u64_fetch(in->packets[i]); 1254 } 1255 1256 out->states = in->states; 1257 out->conn = in->conn; 1258 out->af = in->af; 1259 out->ruletype = in->ruletype; 1260 1261 out->creation = secs - in->creation; 1262 if (out->expire > secs) 1263 out->expire -= secs; 1264 else 1265 out->expire = 0; 1266 1267 /* Adjust the connection rate estimate. */ 1268 diff = secs - in->conn_rate.last; 1269 if (diff >= in->conn_rate.seconds) 1270 out->conn_rate.count = 0; 1271 else 1272 out->conn_rate.count -= 1273 in->conn_rate.count * diff / 1274 in->conn_rate.seconds; 1275 } 1276 1277 #ifdef ALTQ 1278 /* 1279 * Handle export of struct pf_kaltq to user binaries that may be using any 1280 * version of struct pf_altq. 1281 */ 1282 static int 1283 pf_export_kaltq(struct pf_altq *q, struct pfioc_altq_v1 *pa, size_t ioc_size) 1284 { 1285 u_int32_t version; 1286 1287 if (ioc_size == sizeof(struct pfioc_altq_v0)) 1288 version = 0; 1289 else 1290 version = pa->version; 1291 1292 if (version > PFIOC_ALTQ_VERSION) 1293 return (EINVAL); 1294 1295 #define ASSIGN(x) exported_q->x = q->x 1296 #define COPY(x) \ 1297 bcopy(&q->x, &exported_q->x, min(sizeof(q->x), sizeof(exported_q->x))) 1298 #define SATU16(x) (u_int32_t)uqmin((x), USHRT_MAX) 1299 #define SATU32(x) (u_int32_t)uqmin((x), UINT_MAX) 1300 1301 switch (version) { 1302 case 0: { 1303 struct pf_altq_v0 *exported_q = 1304 &((struct pfioc_altq_v0 *)pa)->altq; 1305 1306 COPY(ifname); 1307 1308 ASSIGN(scheduler); 1309 ASSIGN(tbrsize); 1310 exported_q->tbrsize = SATU16(q->tbrsize); 1311 exported_q->ifbandwidth = SATU32(q->ifbandwidth); 1312 1313 COPY(qname); 1314 COPY(parent); 1315 ASSIGN(parent_qid); 1316 exported_q->bandwidth = SATU32(q->bandwidth); 1317 ASSIGN(priority); 1318 ASSIGN(local_flags); 1319 1320 ASSIGN(qlimit); 1321 ASSIGN(flags); 1322 1323 if (q->scheduler == ALTQT_HFSC) { 1324 #define ASSIGN_OPT(x) exported_q->pq_u.hfsc_opts.x = q->pq_u.hfsc_opts.x 1325 #define ASSIGN_OPT_SATU32(x) exported_q->pq_u.hfsc_opts.x = \ 1326 SATU32(q->pq_u.hfsc_opts.x) 1327 1328 ASSIGN_OPT_SATU32(rtsc_m1); 1329 ASSIGN_OPT(rtsc_d); 1330 ASSIGN_OPT_SATU32(rtsc_m2); 1331 1332 ASSIGN_OPT_SATU32(lssc_m1); 1333 ASSIGN_OPT(lssc_d); 1334 ASSIGN_OPT_SATU32(lssc_m2); 1335 1336 ASSIGN_OPT_SATU32(ulsc_m1); 1337 ASSIGN_OPT(ulsc_d); 1338 ASSIGN_OPT_SATU32(ulsc_m2); 1339 1340 ASSIGN_OPT(flags); 1341 1342 #undef ASSIGN_OPT 1343 #undef ASSIGN_OPT_SATU32 1344 } else 1345 COPY(pq_u); 1346 1347 ASSIGN(qid); 1348 break; 1349 } 1350 case 1: { 1351 struct pf_altq_v1 *exported_q = 1352 &((struct pfioc_altq_v1 *)pa)->altq; 1353 1354 COPY(ifname); 1355 1356 ASSIGN(scheduler); 1357 ASSIGN(tbrsize); 1358 ASSIGN(ifbandwidth); 1359 1360 COPY(qname); 1361 COPY(parent); 1362 ASSIGN(parent_qid); 1363 ASSIGN(bandwidth); 1364 ASSIGN(priority); 1365 ASSIGN(local_flags); 1366 1367 ASSIGN(qlimit); 1368 ASSIGN(flags); 1369 COPY(pq_u); 1370 1371 ASSIGN(qid); 1372 break; 1373 } 1374 default: 1375 panic("%s: unhandled struct pfioc_altq version", __func__); 1376 break; 1377 } 1378 1379 #undef ASSIGN 1380 #undef COPY 1381 #undef SATU16 1382 #undef SATU32 1383 1384 return (0); 1385 } 1386 1387 /* 1388 * Handle import to struct pf_kaltq of struct pf_altq from user binaries 1389 * that may be using any version of it. 1390 */ 1391 static int 1392 pf_import_kaltq(struct pfioc_altq_v1 *pa, struct pf_altq *q, size_t ioc_size) 1393 { 1394 u_int32_t version; 1395 1396 if (ioc_size == sizeof(struct pfioc_altq_v0)) 1397 version = 0; 1398 else 1399 version = pa->version; 1400 1401 if (version > PFIOC_ALTQ_VERSION) 1402 return (EINVAL); 1403 1404 #define ASSIGN(x) q->x = imported_q->x 1405 #define COPY(x) \ 1406 bcopy(&imported_q->x, &q->x, min(sizeof(imported_q->x), sizeof(q->x))) 1407 1408 switch (version) { 1409 case 0: { 1410 struct pf_altq_v0 *imported_q = 1411 &((struct pfioc_altq_v0 *)pa)->altq; 1412 1413 COPY(ifname); 1414 1415 ASSIGN(scheduler); 1416 ASSIGN(tbrsize); /* 16-bit -> 32-bit */ 1417 ASSIGN(ifbandwidth); /* 32-bit -> 64-bit */ 1418 1419 COPY(qname); 1420 COPY(parent); 1421 ASSIGN(parent_qid); 1422 ASSIGN(bandwidth); /* 32-bit -> 64-bit */ 1423 ASSIGN(priority); 1424 ASSIGN(local_flags); 1425 1426 ASSIGN(qlimit); 1427 ASSIGN(flags); 1428 1429 if (imported_q->scheduler == ALTQT_HFSC) { 1430 #define ASSIGN_OPT(x) q->pq_u.hfsc_opts.x = imported_q->pq_u.hfsc_opts.x 1431 1432 /* 1433 * The m1 and m2 parameters are being copied from 1434 * 32-bit to 64-bit. 1435 */ 1436 ASSIGN_OPT(rtsc_m1); 1437 ASSIGN_OPT(rtsc_d); 1438 ASSIGN_OPT(rtsc_m2); 1439 1440 ASSIGN_OPT(lssc_m1); 1441 ASSIGN_OPT(lssc_d); 1442 ASSIGN_OPT(lssc_m2); 1443 1444 ASSIGN_OPT(ulsc_m1); 1445 ASSIGN_OPT(ulsc_d); 1446 ASSIGN_OPT(ulsc_m2); 1447 1448 ASSIGN_OPT(flags); 1449 1450 #undef ASSIGN_OPT 1451 } else 1452 COPY(pq_u); 1453 1454 ASSIGN(qid); 1455 break; 1456 } 1457 case 1: { 1458 struct pf_altq_v1 *imported_q = 1459 &((struct pfioc_altq_v1 *)pa)->altq; 1460 1461 COPY(ifname); 1462 1463 ASSIGN(scheduler); 1464 ASSIGN(tbrsize); 1465 ASSIGN(ifbandwidth); 1466 1467 COPY(qname); 1468 COPY(parent); 1469 ASSIGN(parent_qid); 1470 ASSIGN(bandwidth); 1471 ASSIGN(priority); 1472 ASSIGN(local_flags); 1473 1474 ASSIGN(qlimit); 1475 ASSIGN(flags); 1476 COPY(pq_u); 1477 1478 ASSIGN(qid); 1479 break; 1480 } 1481 default: 1482 panic("%s: unhandled struct pfioc_altq version", __func__); 1483 break; 1484 } 1485 1486 #undef ASSIGN 1487 #undef COPY 1488 1489 return (0); 1490 } 1491 1492 static struct pf_altq * 1493 pf_altq_get_nth_active(u_int32_t n) 1494 { 1495 struct pf_altq *altq; 1496 u_int32_t nr; 1497 1498 nr = 0; 1499 TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) { 1500 if (nr == n) 1501 return (altq); 1502 nr++; 1503 } 1504 1505 TAILQ_FOREACH(altq, V_pf_altqs_active, entries) { 1506 if (nr == n) 1507 return (altq); 1508 nr++; 1509 } 1510 1511 return (NULL); 1512 } 1513 #endif /* ALTQ */ 1514 1515 void 1516 pf_krule_free(struct pf_krule *rule) 1517 { 1518 #ifdef PF_WANT_32_TO_64_COUNTER 1519 bool wowned; 1520 #endif 1521 1522 if (rule == NULL) 1523 return; 1524 1525 #ifdef PF_WANT_32_TO_64_COUNTER 1526 if (rule->allrulelinked) { 1527 wowned = PF_RULES_WOWNED(); 1528 if (!wowned) 1529 PF_RULES_WLOCK(); 1530 LIST_REMOVE(rule, allrulelist); 1531 V_pf_allrulecount--; 1532 if (!wowned) 1533 PF_RULES_WUNLOCK(); 1534 } 1535 #endif 1536 1537 pf_counter_u64_deinit(&rule->evaluations); 1538 for (int i = 0; i < 2; i++) { 1539 pf_counter_u64_deinit(&rule->packets[i]); 1540 pf_counter_u64_deinit(&rule->bytes[i]); 1541 } 1542 counter_u64_free(rule->states_cur); 1543 counter_u64_free(rule->states_tot); 1544 counter_u64_free(rule->src_nodes); 1545 free(rule, M_PFRULE); 1546 } 1547 1548 static void 1549 pf_kpooladdr_to_pooladdr(const struct pf_kpooladdr *kpool, 1550 struct pf_pooladdr *pool) 1551 { 1552 1553 bzero(pool, sizeof(*pool)); 1554 bcopy(&kpool->addr, &pool->addr, sizeof(pool->addr)); 1555 strlcpy(pool->ifname, kpool->ifname, sizeof(pool->ifname)); 1556 } 1557 1558 static int 1559 pf_pooladdr_to_kpooladdr(const struct pf_pooladdr *pool, 1560 struct pf_kpooladdr *kpool) 1561 { 1562 int ret; 1563 1564 bzero(kpool, sizeof(*kpool)); 1565 bcopy(&pool->addr, &kpool->addr, sizeof(kpool->addr)); 1566 ret = pf_user_strcpy(kpool->ifname, pool->ifname, 1567 sizeof(kpool->ifname)); 1568 return (ret); 1569 } 1570 1571 static void 1572 pf_kpool_to_pool(const struct pf_kpool *kpool, struct pf_pool *pool) 1573 { 1574 bzero(pool, sizeof(*pool)); 1575 1576 bcopy(&kpool->key, &pool->key, sizeof(pool->key)); 1577 bcopy(&kpool->counter, &pool->counter, sizeof(pool->counter)); 1578 1579 pool->tblidx = kpool->tblidx; 1580 pool->proxy_port[0] = kpool->proxy_port[0]; 1581 pool->proxy_port[1] = kpool->proxy_port[1]; 1582 pool->opts = kpool->opts; 1583 } 1584 1585 static int 1586 pf_pool_to_kpool(const struct pf_pool *pool, struct pf_kpool *kpool) 1587 { 1588 _Static_assert(sizeof(pool->key) == sizeof(kpool->key), ""); 1589 _Static_assert(sizeof(pool->counter) == sizeof(kpool->counter), ""); 1590 1591 bzero(kpool, sizeof(*kpool)); 1592 1593 bcopy(&pool->key, &kpool->key, sizeof(kpool->key)); 1594 bcopy(&pool->counter, &kpool->counter, sizeof(kpool->counter)); 1595 1596 kpool->tblidx = pool->tblidx; 1597 kpool->proxy_port[0] = pool->proxy_port[0]; 1598 kpool->proxy_port[1] = pool->proxy_port[1]; 1599 kpool->opts = pool->opts; 1600 1601 return (0); 1602 } 1603 1604 static void 1605 pf_krule_to_rule(struct pf_krule *krule, struct pf_rule *rule) 1606 { 1607 1608 bzero(rule, sizeof(*rule)); 1609 1610 bcopy(&krule->src, &rule->src, sizeof(rule->src)); 1611 bcopy(&krule->dst, &rule->dst, sizeof(rule->dst)); 1612 1613 for (int i = 0; i < PF_SKIP_COUNT; ++i) { 1614 if (rule->skip[i].ptr == NULL) 1615 rule->skip[i].nr = -1; 1616 else 1617 rule->skip[i].nr = krule->skip[i].ptr->nr; 1618 } 1619 1620 strlcpy(rule->label, krule->label[0], sizeof(rule->label)); 1621 strlcpy(rule->ifname, krule->ifname, sizeof(rule->ifname)); 1622 strlcpy(rule->qname, krule->qname, sizeof(rule->qname)); 1623 strlcpy(rule->pqname, krule->pqname, sizeof(rule->pqname)); 1624 strlcpy(rule->tagname, krule->tagname, sizeof(rule->tagname)); 1625 strlcpy(rule->match_tagname, krule->match_tagname, 1626 sizeof(rule->match_tagname)); 1627 strlcpy(rule->overload_tblname, krule->overload_tblname, 1628 sizeof(rule->overload_tblname)); 1629 1630 pf_kpool_to_pool(&krule->rpool, &rule->rpool); 1631 1632 rule->evaluations = pf_counter_u64_fetch(&krule->evaluations); 1633 for (int i = 0; i < 2; i++) { 1634 rule->packets[i] = pf_counter_u64_fetch(&krule->packets[i]); 1635 rule->bytes[i] = pf_counter_u64_fetch(&krule->bytes[i]); 1636 } 1637 1638 /* kif, anchor, overload_tbl are not copied over. */ 1639 1640 rule->os_fingerprint = krule->os_fingerprint; 1641 1642 rule->rtableid = krule->rtableid; 1643 bcopy(krule->timeout, rule->timeout, sizeof(krule->timeout)); 1644 rule->max_states = krule->max_states; 1645 rule->max_src_nodes = krule->max_src_nodes; 1646 rule->max_src_states = krule->max_src_states; 1647 rule->max_src_conn = krule->max_src_conn; 1648 rule->max_src_conn_rate.limit = krule->max_src_conn_rate.limit; 1649 rule->max_src_conn_rate.seconds = krule->max_src_conn_rate.seconds; 1650 rule->qid = krule->qid; 1651 rule->pqid = krule->pqid; 1652 rule->nr = krule->nr; 1653 rule->prob = krule->prob; 1654 rule->cuid = krule->cuid; 1655 rule->cpid = krule->cpid; 1656 1657 rule->return_icmp = krule->return_icmp; 1658 rule->return_icmp6 = krule->return_icmp6; 1659 rule->max_mss = krule->max_mss; 1660 rule->tag = krule->tag; 1661 rule->match_tag = krule->match_tag; 1662 rule->scrub_flags = krule->scrub_flags; 1663 1664 bcopy(&krule->uid, &rule->uid, sizeof(krule->uid)); 1665 bcopy(&krule->gid, &rule->gid, sizeof(krule->gid)); 1666 1667 rule->rule_flag = krule->rule_flag; 1668 rule->action = krule->action; 1669 rule->direction = krule->direction; 1670 rule->log = krule->log; 1671 rule->logif = krule->logif; 1672 rule->quick = krule->quick; 1673 rule->ifnot = krule->ifnot; 1674 rule->match_tag_not = krule->match_tag_not; 1675 rule->natpass = krule->natpass; 1676 1677 rule->keep_state = krule->keep_state; 1678 rule->af = krule->af; 1679 rule->proto = krule->proto; 1680 rule->type = krule->type; 1681 rule->code = krule->code; 1682 rule->flags = krule->flags; 1683 rule->flagset = krule->flagset; 1684 rule->min_ttl = krule->min_ttl; 1685 rule->allow_opts = krule->allow_opts; 1686 rule->rt = krule->rt; 1687 rule->return_ttl = krule->return_ttl; 1688 rule->tos = krule->tos; 1689 rule->set_tos = krule->set_tos; 1690 rule->anchor_relative = krule->anchor_relative; 1691 rule->anchor_wildcard = krule->anchor_wildcard; 1692 1693 rule->flush = krule->flush; 1694 rule->prio = krule->prio; 1695 rule->set_prio[0] = krule->set_prio[0]; 1696 rule->set_prio[1] = krule->set_prio[1]; 1697 1698 bcopy(&krule->divert, &rule->divert, sizeof(krule->divert)); 1699 1700 rule->u_states_cur = counter_u64_fetch(krule->states_cur); 1701 rule->u_states_tot = counter_u64_fetch(krule->states_tot); 1702 rule->u_src_nodes = counter_u64_fetch(krule->src_nodes); 1703 } 1704 1705 static int 1706 pf_rule_to_krule(const struct pf_rule *rule, struct pf_krule *krule) 1707 { 1708 int ret; 1709 1710 #ifndef INET 1711 if (rule->af == AF_INET) { 1712 return (EAFNOSUPPORT); 1713 } 1714 #endif /* INET */ 1715 #ifndef INET6 1716 if (rule->af == AF_INET6) { 1717 return (EAFNOSUPPORT); 1718 } 1719 #endif /* INET6 */ 1720 1721 ret = pf_check_rule_addr(&rule->src); 1722 if (ret != 0) 1723 return (ret); 1724 ret = pf_check_rule_addr(&rule->dst); 1725 if (ret != 0) 1726 return (ret); 1727 1728 bzero(krule, sizeof(*krule)); 1729 1730 bcopy(&rule->src, &krule->src, sizeof(rule->src)); 1731 bcopy(&rule->dst, &krule->dst, sizeof(rule->dst)); 1732 1733 ret = pf_user_strcpy(krule->label[0], rule->label, sizeof(rule->label)); 1734 if (ret != 0) 1735 return (ret); 1736 ret = pf_user_strcpy(krule->ifname, rule->ifname, sizeof(rule->ifname)); 1737 if (ret != 0) 1738 return (ret); 1739 ret = pf_user_strcpy(krule->qname, rule->qname, sizeof(rule->qname)); 1740 if (ret != 0) 1741 return (ret); 1742 ret = pf_user_strcpy(krule->pqname, rule->pqname, sizeof(rule->pqname)); 1743 if (ret != 0) 1744 return (ret); 1745 ret = pf_user_strcpy(krule->tagname, rule->tagname, 1746 sizeof(rule->tagname)); 1747 if (ret != 0) 1748 return (ret); 1749 ret = pf_user_strcpy(krule->match_tagname, rule->match_tagname, 1750 sizeof(rule->match_tagname)); 1751 if (ret != 0) 1752 return (ret); 1753 ret = pf_user_strcpy(krule->overload_tblname, rule->overload_tblname, 1754 sizeof(rule->overload_tblname)); 1755 if (ret != 0) 1756 return (ret); 1757 1758 ret = pf_pool_to_kpool(&rule->rpool, &krule->rpool); 1759 if (ret != 0) 1760 return (ret); 1761 1762 /* Don't allow userspace to set evaulations, packets or bytes. */ 1763 /* kif, anchor, overload_tbl are not copied over. */ 1764 1765 krule->os_fingerprint = rule->os_fingerprint; 1766 1767 krule->rtableid = rule->rtableid; 1768 bcopy(rule->timeout, krule->timeout, sizeof(krule->timeout)); 1769 krule->max_states = rule->max_states; 1770 krule->max_src_nodes = rule->max_src_nodes; 1771 krule->max_src_states = rule->max_src_states; 1772 krule->max_src_conn = rule->max_src_conn; 1773 krule->max_src_conn_rate.limit = rule->max_src_conn_rate.limit; 1774 krule->max_src_conn_rate.seconds = rule->max_src_conn_rate.seconds; 1775 krule->qid = rule->qid; 1776 krule->pqid = rule->pqid; 1777 krule->nr = rule->nr; 1778 krule->prob = rule->prob; 1779 krule->cuid = rule->cuid; 1780 krule->cpid = rule->cpid; 1781 1782 krule->return_icmp = rule->return_icmp; 1783 krule->return_icmp6 = rule->return_icmp6; 1784 krule->max_mss = rule->max_mss; 1785 krule->tag = rule->tag; 1786 krule->match_tag = rule->match_tag; 1787 krule->scrub_flags = rule->scrub_flags; 1788 1789 bcopy(&rule->uid, &krule->uid, sizeof(krule->uid)); 1790 bcopy(&rule->gid, &krule->gid, sizeof(krule->gid)); 1791 1792 krule->rule_flag = rule->rule_flag; 1793 krule->action = rule->action; 1794 krule->direction = rule->direction; 1795 krule->log = rule->log; 1796 krule->logif = rule->logif; 1797 krule->quick = rule->quick; 1798 krule->ifnot = rule->ifnot; 1799 krule->match_tag_not = rule->match_tag_not; 1800 krule->natpass = rule->natpass; 1801 1802 krule->keep_state = rule->keep_state; 1803 krule->af = rule->af; 1804 krule->proto = rule->proto; 1805 krule->type = rule->type; 1806 krule->code = rule->code; 1807 krule->flags = rule->flags; 1808 krule->flagset = rule->flagset; 1809 krule->min_ttl = rule->min_ttl; 1810 krule->allow_opts = rule->allow_opts; 1811 krule->rt = rule->rt; 1812 krule->return_ttl = rule->return_ttl; 1813 krule->tos = rule->tos; 1814 krule->set_tos = rule->set_tos; 1815 1816 krule->flush = rule->flush; 1817 krule->prio = rule->prio; 1818 krule->set_prio[0] = rule->set_prio[0]; 1819 krule->set_prio[1] = rule->set_prio[1]; 1820 1821 bcopy(&rule->divert, &krule->divert, sizeof(krule->divert)); 1822 1823 return (0); 1824 } 1825 1826 static int 1827 pf_state_kill_to_kstate_kill(const struct pfioc_state_kill *psk, 1828 struct pf_kstate_kill *kill) 1829 { 1830 int ret; 1831 1832 bzero(kill, sizeof(*kill)); 1833 1834 bcopy(&psk->psk_pfcmp, &kill->psk_pfcmp, sizeof(kill->psk_pfcmp)); 1835 kill->psk_af = psk->psk_af; 1836 kill->psk_proto = psk->psk_proto; 1837 bcopy(&psk->psk_src, &kill->psk_src, sizeof(kill->psk_src)); 1838 bcopy(&psk->psk_dst, &kill->psk_dst, sizeof(kill->psk_dst)); 1839 ret = pf_user_strcpy(kill->psk_ifname, psk->psk_ifname, 1840 sizeof(kill->psk_ifname)); 1841 if (ret != 0) 1842 return (ret); 1843 ret = pf_user_strcpy(kill->psk_label, psk->psk_label, 1844 sizeof(kill->psk_label)); 1845 if (ret != 0) 1846 return (ret); 1847 1848 return (0); 1849 } 1850 1851 static int 1852 pf_ioctl_addrule(struct pf_krule *rule, uint32_t ticket, 1853 uint32_t pool_ticket, const char *anchor, const char *anchor_call, 1854 struct thread *td) 1855 { 1856 struct pf_kruleset *ruleset; 1857 struct pf_krule *tail; 1858 struct pf_kpooladdr *pa; 1859 struct pfi_kkif *kif = NULL; 1860 int rs_num; 1861 int error = 0; 1862 1863 if ((rule->return_icmp >> 8) > ICMP_MAXTYPE) { 1864 error = EINVAL; 1865 goto errout_unlocked; 1866 } 1867 1868 #define ERROUT(x) ERROUT_FUNCTION(errout, x) 1869 1870 if (rule->ifname[0]) 1871 kif = pf_kkif_create(M_WAITOK); 1872 pf_counter_u64_init(&rule->evaluations, M_WAITOK); 1873 for (int i = 0; i < 2; i++) { 1874 pf_counter_u64_init(&rule->packets[i], M_WAITOK); 1875 pf_counter_u64_init(&rule->bytes[i], M_WAITOK); 1876 } 1877 rule->states_cur = counter_u64_alloc(M_WAITOK); 1878 rule->states_tot = counter_u64_alloc(M_WAITOK); 1879 rule->src_nodes = counter_u64_alloc(M_WAITOK); 1880 rule->cuid = td->td_ucred->cr_ruid; 1881 rule->cpid = td->td_proc ? td->td_proc->p_pid : 0; 1882 TAILQ_INIT(&rule->rpool.list); 1883 1884 PF_RULES_WLOCK(); 1885 #ifdef PF_WANT_32_TO_64_COUNTER 1886 LIST_INSERT_HEAD(&V_pf_allrulelist, rule, allrulelist); 1887 MPASS(!rule->allrulelinked); 1888 rule->allrulelinked = true; 1889 V_pf_allrulecount++; 1890 #endif 1891 ruleset = pf_find_kruleset(anchor); 1892 if (ruleset == NULL) 1893 ERROUT(EINVAL); 1894 rs_num = pf_get_ruleset_number(rule->action); 1895 if (rs_num >= PF_RULESET_MAX) 1896 ERROUT(EINVAL); 1897 if (ticket != ruleset->rules[rs_num].inactive.ticket) { 1898 DPFPRINTF(PF_DEBUG_MISC, 1899 ("ticket: %d != [%d]%d\n", ticket, rs_num, 1900 ruleset->rules[rs_num].inactive.ticket)); 1901 ERROUT(EBUSY); 1902 } 1903 if (pool_ticket != V_ticket_pabuf) { 1904 DPFPRINTF(PF_DEBUG_MISC, 1905 ("pool_ticket: %d != %d\n", pool_ticket, 1906 V_ticket_pabuf)); 1907 ERROUT(EBUSY); 1908 } 1909 1910 tail = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr, 1911 pf_krulequeue); 1912 if (tail) 1913 rule->nr = tail->nr + 1; 1914 else 1915 rule->nr = 0; 1916 if (rule->ifname[0]) { 1917 rule->kif = pfi_kkif_attach(kif, rule->ifname); 1918 kif = NULL; 1919 pfi_kkif_ref(rule->kif); 1920 } else 1921 rule->kif = NULL; 1922 1923 if (rule->rtableid > 0 && rule->rtableid >= rt_numfibs) 1924 error = EBUSY; 1925 1926 #ifdef ALTQ 1927 /* set queue IDs */ 1928 if (rule->qname[0] != 0) { 1929 if ((rule->qid = pf_qname2qid(rule->qname)) == 0) 1930 error = EBUSY; 1931 else if (rule->pqname[0] != 0) { 1932 if ((rule->pqid = 1933 pf_qname2qid(rule->pqname)) == 0) 1934 error = EBUSY; 1935 } else 1936 rule->pqid = rule->qid; 1937 } 1938 #endif 1939 if (rule->tagname[0]) 1940 if ((rule->tag = pf_tagname2tag(rule->tagname)) == 0) 1941 error = EBUSY; 1942 if (rule->match_tagname[0]) 1943 if ((rule->match_tag = 1944 pf_tagname2tag(rule->match_tagname)) == 0) 1945 error = EBUSY; 1946 if (rule->rt && !rule->direction) 1947 error = EINVAL; 1948 if (!rule->log) 1949 rule->logif = 0; 1950 if (rule->logif >= PFLOGIFS_MAX) 1951 error = EINVAL; 1952 if (pf_addr_setup(ruleset, &rule->src.addr, rule->af)) 1953 error = ENOMEM; 1954 if (pf_addr_setup(ruleset, &rule->dst.addr, rule->af)) 1955 error = ENOMEM; 1956 if (pf_kanchor_setup(rule, ruleset, anchor_call)) 1957 error = EINVAL; 1958 if (rule->scrub_flags & PFSTATE_SETPRIO && 1959 (rule->set_prio[0] > PF_PRIO_MAX || 1960 rule->set_prio[1] > PF_PRIO_MAX)) 1961 error = EINVAL; 1962 TAILQ_FOREACH(pa, &V_pf_pabuf, entries) 1963 if (pa->addr.type == PF_ADDR_TABLE) { 1964 pa->addr.p.tbl = pfr_attach_table(ruleset, 1965 pa->addr.v.tblname); 1966 if (pa->addr.p.tbl == NULL) 1967 error = ENOMEM; 1968 } 1969 1970 rule->overload_tbl = NULL; 1971 if (rule->overload_tblname[0]) { 1972 if ((rule->overload_tbl = pfr_attach_table(ruleset, 1973 rule->overload_tblname)) == NULL) 1974 error = EINVAL; 1975 else 1976 rule->overload_tbl->pfrkt_flags |= 1977 PFR_TFLAG_ACTIVE; 1978 } 1979 1980 pf_mv_kpool(&V_pf_pabuf, &rule->rpool.list); 1981 if (((((rule->action == PF_NAT) || (rule->action == PF_RDR) || 1982 (rule->action == PF_BINAT)) && rule->anchor == NULL) || 1983 (rule->rt > PF_NOPFROUTE)) && 1984 (TAILQ_FIRST(&rule->rpool.list) == NULL)) 1985 error = EINVAL; 1986 1987 if (error) { 1988 pf_free_rule(rule); 1989 rule = NULL; 1990 ERROUT(error); 1991 } 1992 1993 rule->rpool.cur = TAILQ_FIRST(&rule->rpool.list); 1994 pf_counter_u64_zero(&rule->evaluations); 1995 for (int i = 0; i < 2; i++) { 1996 pf_counter_u64_zero(&rule->packets[i]); 1997 pf_counter_u64_zero(&rule->bytes[i]); 1998 } 1999 TAILQ_INSERT_TAIL(ruleset->rules[rs_num].inactive.ptr, 2000 rule, entries); 2001 ruleset->rules[rs_num].inactive.rcount++; 2002 PF_RULES_WUNLOCK(); 2003 2004 return (0); 2005 2006 #undef ERROUT 2007 errout: 2008 PF_RULES_WUNLOCK(); 2009 errout_unlocked: 2010 pf_kkif_free(kif); 2011 pf_krule_free(rule); 2012 return (error); 2013 } 2014 2015 static bool 2016 pf_label_match(const struct pf_krule *rule, const char *label) 2017 { 2018 int i = 0; 2019 2020 while (*rule->label[i]) { 2021 if (strcmp(rule->label[i], label) == 0) 2022 return (true); 2023 i++; 2024 } 2025 2026 return (false); 2027 } 2028 2029 static unsigned int 2030 pf_kill_matching_state(struct pf_state_key_cmp *key, int dir) 2031 { 2032 struct pf_kstate *match; 2033 int more = 0; 2034 unsigned int killed = 0; 2035 2036 /* Call with unlocked hashrow */ 2037 2038 match = pf_find_state_all(key, dir, &more); 2039 if (match && !more) { 2040 pf_unlink_state(match, 0); 2041 killed++; 2042 } 2043 2044 return (killed); 2045 } 2046 2047 static int 2048 pf_killstates_row(struct pf_kstate_kill *psk, struct pf_idhash *ih) 2049 { 2050 struct pf_kstate *s; 2051 struct pf_state_key *sk; 2052 struct pf_addr *srcaddr, *dstaddr; 2053 struct pf_state_key_cmp match_key; 2054 int idx, killed = 0; 2055 unsigned int dir; 2056 u_int16_t srcport, dstport; 2057 struct pfi_kkif *kif; 2058 2059 relock_DIOCKILLSTATES: 2060 PF_HASHROW_LOCK(ih); 2061 LIST_FOREACH(s, &ih->states, entry) { 2062 /* For floating states look at the original kif. */ 2063 kif = s->kif == V_pfi_all ? s->orig_kif : s->kif; 2064 2065 sk = s->key[PF_SK_WIRE]; 2066 if (s->direction == PF_OUT) { 2067 srcaddr = &sk->addr[1]; 2068 dstaddr = &sk->addr[0]; 2069 srcport = sk->port[1]; 2070 dstport = sk->port[0]; 2071 } else { 2072 srcaddr = &sk->addr[0]; 2073 dstaddr = &sk->addr[1]; 2074 srcport = sk->port[0]; 2075 dstport = sk->port[1]; 2076 } 2077 2078 if (psk->psk_af && sk->af != psk->psk_af) 2079 continue; 2080 2081 if (psk->psk_proto && psk->psk_proto != sk->proto) 2082 continue; 2083 2084 if (! PF_MATCHA(psk->psk_src.neg, &psk->psk_src.addr.v.a.addr, 2085 &psk->psk_src.addr.v.a.mask, srcaddr, sk->af)) 2086 continue; 2087 2088 if (! PF_MATCHA(psk->psk_dst.neg, &psk->psk_dst.addr.v.a.addr, 2089 &psk->psk_dst.addr.v.a.mask, dstaddr, sk->af)) 2090 continue; 2091 2092 if (! PF_MATCHA(psk->psk_rt_addr.neg, 2093 &psk->psk_rt_addr.addr.v.a.addr, 2094 &psk->psk_rt_addr.addr.v.a.mask, 2095 &s->rt_addr, sk->af)) 2096 continue; 2097 2098 if (psk->psk_src.port_op != 0 && 2099 ! pf_match_port(psk->psk_src.port_op, 2100 psk->psk_src.port[0], psk->psk_src.port[1], srcport)) 2101 continue; 2102 2103 if (psk->psk_dst.port_op != 0 && 2104 ! pf_match_port(psk->psk_dst.port_op, 2105 psk->psk_dst.port[0], psk->psk_dst.port[1], dstport)) 2106 continue; 2107 2108 if (psk->psk_label[0] && 2109 ! pf_label_match(s->rule.ptr, psk->psk_label)) 2110 continue; 2111 2112 if (psk->psk_ifname[0] && strcmp(psk->psk_ifname, 2113 kif->pfik_name)) 2114 continue; 2115 2116 if (psk->psk_kill_match) { 2117 /* Create the key to find matching states, with lock 2118 * held. */ 2119 2120 bzero(&match_key, sizeof(match_key)); 2121 2122 if (s->direction == PF_OUT) { 2123 dir = PF_IN; 2124 idx = PF_SK_STACK; 2125 } else { 2126 dir = PF_OUT; 2127 idx = PF_SK_WIRE; 2128 } 2129 2130 match_key.af = s->key[idx]->af; 2131 match_key.proto = s->key[idx]->proto; 2132 PF_ACPY(&match_key.addr[0], 2133 &s->key[idx]->addr[1], match_key.af); 2134 match_key.port[0] = s->key[idx]->port[1]; 2135 PF_ACPY(&match_key.addr[1], 2136 &s->key[idx]->addr[0], match_key.af); 2137 match_key.port[1] = s->key[idx]->port[0]; 2138 } 2139 2140 pf_unlink_state(s, PF_ENTER_LOCKED); 2141 killed++; 2142 2143 if (psk->psk_kill_match) 2144 killed += pf_kill_matching_state(&match_key, dir); 2145 2146 goto relock_DIOCKILLSTATES; 2147 } 2148 PF_HASHROW_UNLOCK(ih); 2149 2150 return (killed); 2151 } 2152 2153 static int 2154 pfioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td) 2155 { 2156 int error = 0; 2157 PF_RULES_RLOCK_TRACKER; 2158 2159 #define ERROUT_IOCTL(target, x) \ 2160 do { \ 2161 error = (x); \ 2162 SDT_PROBE3(pf, ioctl, ioctl, error, cmd, error, __LINE__); \ 2163 goto target; \ 2164 } while (0) 2165 2166 2167 /* XXX keep in sync with switch() below */ 2168 if (securelevel_gt(td->td_ucred, 2)) 2169 switch (cmd) { 2170 case DIOCGETRULES: 2171 case DIOCGETRULE: 2172 case DIOCGETRULENV: 2173 case DIOCGETADDRS: 2174 case DIOCGETADDR: 2175 case DIOCGETSTATE: 2176 case DIOCGETSTATENV: 2177 case DIOCSETSTATUSIF: 2178 case DIOCGETSTATUS: 2179 case DIOCGETSTATUSNV: 2180 case DIOCCLRSTATUS: 2181 case DIOCNATLOOK: 2182 case DIOCSETDEBUG: 2183 case DIOCGETSTATES: 2184 case DIOCGETSTATESV2: 2185 case DIOCGETTIMEOUT: 2186 case DIOCCLRRULECTRS: 2187 case DIOCGETLIMIT: 2188 case DIOCGETALTQSV0: 2189 case DIOCGETALTQSV1: 2190 case DIOCGETALTQV0: 2191 case DIOCGETALTQV1: 2192 case DIOCGETQSTATSV0: 2193 case DIOCGETQSTATSV1: 2194 case DIOCGETRULESETS: 2195 case DIOCGETRULESET: 2196 case DIOCRGETTABLES: 2197 case DIOCRGETTSTATS: 2198 case DIOCRCLRTSTATS: 2199 case DIOCRCLRADDRS: 2200 case DIOCRADDADDRS: 2201 case DIOCRDELADDRS: 2202 case DIOCRSETADDRS: 2203 case DIOCRGETADDRS: 2204 case DIOCRGETASTATS: 2205 case DIOCRCLRASTATS: 2206 case DIOCRTSTADDRS: 2207 case DIOCOSFPGET: 2208 case DIOCGETSRCNODES: 2209 case DIOCCLRSRCNODES: 2210 case DIOCGETSYNCOOKIES: 2211 case DIOCIGETIFACES: 2212 case DIOCGIFSPEEDV0: 2213 case DIOCGIFSPEEDV1: 2214 case DIOCSETIFFLAG: 2215 case DIOCCLRIFFLAG: 2216 break; 2217 case DIOCRCLRTABLES: 2218 case DIOCRADDTABLES: 2219 case DIOCRDELTABLES: 2220 case DIOCRSETTFLAGS: 2221 if (((struct pfioc_table *)addr)->pfrio_flags & 2222 PFR_FLAG_DUMMY) 2223 break; /* dummy operation ok */ 2224 return (EPERM); 2225 default: 2226 return (EPERM); 2227 } 2228 2229 if (!(flags & FWRITE)) 2230 switch (cmd) { 2231 case DIOCGETRULES: 2232 case DIOCGETADDRS: 2233 case DIOCGETADDR: 2234 case DIOCGETSTATE: 2235 case DIOCGETSTATENV: 2236 case DIOCGETSTATUS: 2237 case DIOCGETSTATUSNV: 2238 case DIOCGETSTATES: 2239 case DIOCGETSTATESV2: 2240 case DIOCGETTIMEOUT: 2241 case DIOCGETLIMIT: 2242 case DIOCGETALTQSV0: 2243 case DIOCGETALTQSV1: 2244 case DIOCGETALTQV0: 2245 case DIOCGETALTQV1: 2246 case DIOCGETQSTATSV0: 2247 case DIOCGETQSTATSV1: 2248 case DIOCGETRULESETS: 2249 case DIOCGETRULESET: 2250 case DIOCNATLOOK: 2251 case DIOCRGETTABLES: 2252 case DIOCRGETTSTATS: 2253 case DIOCRGETADDRS: 2254 case DIOCRGETASTATS: 2255 case DIOCRTSTADDRS: 2256 case DIOCOSFPGET: 2257 case DIOCGETSRCNODES: 2258 case DIOCGETSYNCOOKIES: 2259 case DIOCIGETIFACES: 2260 case DIOCGIFSPEEDV1: 2261 case DIOCGIFSPEEDV0: 2262 case DIOCGETRULENV: 2263 break; 2264 case DIOCRCLRTABLES: 2265 case DIOCRADDTABLES: 2266 case DIOCRDELTABLES: 2267 case DIOCRCLRTSTATS: 2268 case DIOCRCLRADDRS: 2269 case DIOCRADDADDRS: 2270 case DIOCRDELADDRS: 2271 case DIOCRSETADDRS: 2272 case DIOCRSETTFLAGS: 2273 if (((struct pfioc_table *)addr)->pfrio_flags & 2274 PFR_FLAG_DUMMY) { 2275 flags |= FWRITE; /* need write lock for dummy */ 2276 break; /* dummy operation ok */ 2277 } 2278 return (EACCES); 2279 case DIOCGETRULE: 2280 if (((struct pfioc_rule *)addr)->action == 2281 PF_GET_CLR_CNTR) 2282 return (EACCES); 2283 break; 2284 default: 2285 return (EACCES); 2286 } 2287 2288 CURVNET_SET(TD_TO_VNET(td)); 2289 2290 switch (cmd) { 2291 case DIOCSTART: 2292 sx_xlock(&pf_ioctl_lock); 2293 if (V_pf_status.running) 2294 error = EEXIST; 2295 else { 2296 int cpu; 2297 2298 hook_pf(); 2299 V_pf_status.running = 1; 2300 V_pf_status.since = time_second; 2301 2302 CPU_FOREACH(cpu) 2303 V_pf_stateid[cpu] = time_second; 2304 2305 DPFPRINTF(PF_DEBUG_MISC, ("pf: started\n")); 2306 } 2307 break; 2308 2309 case DIOCSTOP: 2310 sx_xlock(&pf_ioctl_lock); 2311 if (!V_pf_status.running) 2312 error = ENOENT; 2313 else { 2314 V_pf_status.running = 0; 2315 dehook_pf(); 2316 V_pf_status.since = time_second; 2317 DPFPRINTF(PF_DEBUG_MISC, ("pf: stopped\n")); 2318 } 2319 break; 2320 2321 case DIOCADDRULENV: { 2322 struct pfioc_nv *nv = (struct pfioc_nv *)addr; 2323 nvlist_t *nvl = NULL; 2324 void *nvlpacked = NULL; 2325 struct pf_krule *rule = NULL; 2326 const char *anchor = "", *anchor_call = ""; 2327 uint32_t ticket = 0, pool_ticket = 0; 2328 2329 #define ERROUT(x) ERROUT_IOCTL(DIOCADDRULENV_error, x) 2330 2331 if (nv->len > pf_ioctl_maxcount) 2332 ERROUT(ENOMEM); 2333 2334 nvlpacked = malloc(nv->len, M_TEMP, M_WAITOK); 2335 error = copyin(nv->data, nvlpacked, nv->len); 2336 if (error) 2337 ERROUT(error); 2338 2339 nvl = nvlist_unpack(nvlpacked, nv->len, 0); 2340 if (nvl == NULL) 2341 ERROUT(EBADMSG); 2342 2343 if (! nvlist_exists_number(nvl, "ticket")) 2344 ERROUT(EINVAL); 2345 ticket = nvlist_get_number(nvl, "ticket"); 2346 2347 if (! nvlist_exists_number(nvl, "pool_ticket")) 2348 ERROUT(EINVAL); 2349 pool_ticket = nvlist_get_number(nvl, "pool_ticket"); 2350 2351 if (! nvlist_exists_nvlist(nvl, "rule")) 2352 ERROUT(EINVAL); 2353 2354 rule = malloc(sizeof(*rule), M_PFRULE, M_WAITOK | M_ZERO); 2355 error = pf_nvrule_to_krule(nvlist_get_nvlist(nvl, "rule"), 2356 rule); 2357 if (error) 2358 ERROUT(error); 2359 2360 if (nvlist_exists_string(nvl, "anchor")) 2361 anchor = nvlist_get_string(nvl, "anchor"); 2362 if (nvlist_exists_string(nvl, "anchor_call")) 2363 anchor_call = nvlist_get_string(nvl, "anchor_call"); 2364 2365 if ((error = nvlist_error(nvl))) 2366 ERROUT(error); 2367 2368 /* Frees rule on error */ 2369 error = pf_ioctl_addrule(rule, ticket, pool_ticket, anchor, 2370 anchor_call, td); 2371 2372 nvlist_destroy(nvl); 2373 free(nvlpacked, M_TEMP); 2374 break; 2375 #undef ERROUT 2376 DIOCADDRULENV_error: 2377 pf_krule_free(rule); 2378 nvlist_destroy(nvl); 2379 free(nvlpacked, M_TEMP); 2380 2381 break; 2382 } 2383 case DIOCADDRULE: { 2384 struct pfioc_rule *pr = (struct pfioc_rule *)addr; 2385 struct pf_krule *rule; 2386 2387 rule = malloc(sizeof(*rule), M_PFRULE, M_WAITOK | M_ZERO); 2388 error = pf_rule_to_krule(&pr->rule, rule); 2389 if (error != 0) { 2390 free(rule, M_PFRULE); 2391 break; 2392 } 2393 2394 pr->anchor[sizeof(pr->anchor) - 1] = 0; 2395 2396 /* Frees rule on error */ 2397 error = pf_ioctl_addrule(rule, pr->ticket, pr->pool_ticket, 2398 pr->anchor, pr->anchor_call, td); 2399 break; 2400 } 2401 2402 case DIOCGETRULES: { 2403 struct pfioc_rule *pr = (struct pfioc_rule *)addr; 2404 struct pf_kruleset *ruleset; 2405 struct pf_krule *tail; 2406 int rs_num; 2407 2408 pr->anchor[sizeof(pr->anchor) - 1] = 0; 2409 2410 PF_RULES_WLOCK(); 2411 ruleset = pf_find_kruleset(pr->anchor); 2412 if (ruleset == NULL) { 2413 PF_RULES_WUNLOCK(); 2414 error = EINVAL; 2415 break; 2416 } 2417 rs_num = pf_get_ruleset_number(pr->rule.action); 2418 if (rs_num >= PF_RULESET_MAX) { 2419 PF_RULES_WUNLOCK(); 2420 error = EINVAL; 2421 break; 2422 } 2423 tail = TAILQ_LAST(ruleset->rules[rs_num].active.ptr, 2424 pf_krulequeue); 2425 if (tail) 2426 pr->nr = tail->nr + 1; 2427 else 2428 pr->nr = 0; 2429 pr->ticket = ruleset->rules[rs_num].active.ticket; 2430 PF_RULES_WUNLOCK(); 2431 break; 2432 } 2433 2434 case DIOCGETRULE: { 2435 struct pfioc_rule *pr = (struct pfioc_rule *)addr; 2436 struct pf_kruleset *ruleset; 2437 struct pf_krule *rule; 2438 int rs_num; 2439 2440 pr->anchor[sizeof(pr->anchor) - 1] = 0; 2441 2442 PF_RULES_WLOCK(); 2443 ruleset = pf_find_kruleset(pr->anchor); 2444 if (ruleset == NULL) { 2445 PF_RULES_WUNLOCK(); 2446 error = EINVAL; 2447 break; 2448 } 2449 rs_num = pf_get_ruleset_number(pr->rule.action); 2450 if (rs_num >= PF_RULESET_MAX) { 2451 PF_RULES_WUNLOCK(); 2452 error = EINVAL; 2453 break; 2454 } 2455 if (pr->ticket != ruleset->rules[rs_num].active.ticket) { 2456 PF_RULES_WUNLOCK(); 2457 error = EBUSY; 2458 break; 2459 } 2460 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr); 2461 while ((rule != NULL) && (rule->nr != pr->nr)) 2462 rule = TAILQ_NEXT(rule, entries); 2463 if (rule == NULL) { 2464 PF_RULES_WUNLOCK(); 2465 error = EBUSY; 2466 break; 2467 } 2468 2469 pf_krule_to_rule(rule, &pr->rule); 2470 2471 if (pf_kanchor_copyout(ruleset, rule, pr)) { 2472 PF_RULES_WUNLOCK(); 2473 error = EBUSY; 2474 break; 2475 } 2476 pf_addr_copyout(&pr->rule.src.addr); 2477 pf_addr_copyout(&pr->rule.dst.addr); 2478 2479 if (pr->action == PF_GET_CLR_CNTR) { 2480 pf_counter_u64_zero(&rule->evaluations); 2481 for (int i = 0; i < 2; i++) { 2482 pf_counter_u64_zero(&rule->packets[i]); 2483 pf_counter_u64_zero(&rule->bytes[i]); 2484 } 2485 counter_u64_zero(rule->states_tot); 2486 } 2487 PF_RULES_WUNLOCK(); 2488 break; 2489 } 2490 2491 case DIOCGETRULENV: { 2492 struct pfioc_nv *nv = (struct pfioc_nv *)addr; 2493 nvlist_t *nvrule = NULL; 2494 nvlist_t *nvl = NULL; 2495 struct pf_kruleset *ruleset; 2496 struct pf_krule *rule; 2497 void *nvlpacked = NULL; 2498 int rs_num, nr; 2499 bool clear_counter = false; 2500 2501 #define ERROUT(x) ERROUT_IOCTL(DIOCGETRULENV_error, x) 2502 2503 if (nv->len > pf_ioctl_maxcount) 2504 ERROUT(ENOMEM); 2505 2506 /* Copy the request in */ 2507 nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK); 2508 if (nvlpacked == NULL) 2509 ERROUT(ENOMEM); 2510 2511 error = copyin(nv->data, nvlpacked, nv->len); 2512 if (error) 2513 ERROUT(error); 2514 2515 nvl = nvlist_unpack(nvlpacked, nv->len, 0); 2516 if (nvl == NULL) 2517 ERROUT(EBADMSG); 2518 2519 if (! nvlist_exists_string(nvl, "anchor")) 2520 ERROUT(EBADMSG); 2521 if (! nvlist_exists_number(nvl, "ruleset")) 2522 ERROUT(EBADMSG); 2523 if (! nvlist_exists_number(nvl, "ticket")) 2524 ERROUT(EBADMSG); 2525 if (! nvlist_exists_number(nvl, "nr")) 2526 ERROUT(EBADMSG); 2527 2528 if (nvlist_exists_bool(nvl, "clear_counter")) 2529 clear_counter = nvlist_get_bool(nvl, "clear_counter"); 2530 2531 if (clear_counter && !(flags & FWRITE)) 2532 ERROUT(EACCES); 2533 2534 nr = nvlist_get_number(nvl, "nr"); 2535 2536 PF_RULES_WLOCK(); 2537 ruleset = pf_find_kruleset(nvlist_get_string(nvl, "anchor")); 2538 if (ruleset == NULL) { 2539 PF_RULES_WUNLOCK(); 2540 ERROUT(ENOENT); 2541 } 2542 2543 rs_num = pf_get_ruleset_number(nvlist_get_number(nvl, "ruleset")); 2544 if (rs_num >= PF_RULESET_MAX) { 2545 PF_RULES_WUNLOCK(); 2546 ERROUT(EINVAL); 2547 } 2548 2549 if (nvlist_get_number(nvl, "ticket") != 2550 ruleset->rules[rs_num].active.ticket) { 2551 PF_RULES_WUNLOCK(); 2552 ERROUT(EBUSY); 2553 } 2554 2555 if ((error = nvlist_error(nvl))) { 2556 PF_RULES_WUNLOCK(); 2557 ERROUT(error); 2558 } 2559 2560 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr); 2561 while ((rule != NULL) && (rule->nr != nr)) 2562 rule = TAILQ_NEXT(rule, entries); 2563 if (rule == NULL) { 2564 PF_RULES_WUNLOCK(); 2565 ERROUT(EBUSY); 2566 } 2567 2568 nvrule = pf_krule_to_nvrule(rule); 2569 2570 nvlist_destroy(nvl); 2571 nvl = nvlist_create(0); 2572 if (nvl == NULL) { 2573 PF_RULES_WUNLOCK(); 2574 ERROUT(ENOMEM); 2575 } 2576 nvlist_add_number(nvl, "nr", nr); 2577 nvlist_add_nvlist(nvl, "rule", nvrule); 2578 nvlist_destroy(nvrule); 2579 nvrule = NULL; 2580 if (pf_kanchor_nvcopyout(ruleset, rule, nvl)) { 2581 PF_RULES_WUNLOCK(); 2582 ERROUT(EBUSY); 2583 } 2584 2585 free(nvlpacked, M_NVLIST); 2586 nvlpacked = nvlist_pack(nvl, &nv->len); 2587 if (nvlpacked == NULL) { 2588 PF_RULES_WUNLOCK(); 2589 ERROUT(ENOMEM); 2590 } 2591 2592 if (nv->size == 0) { 2593 PF_RULES_WUNLOCK(); 2594 ERROUT(0); 2595 } 2596 else if (nv->size < nv->len) { 2597 PF_RULES_WUNLOCK(); 2598 ERROUT(ENOSPC); 2599 } 2600 2601 if (clear_counter) { 2602 pf_counter_u64_zero(&rule->evaluations); 2603 for (int i = 0; i < 2; i++) { 2604 pf_counter_u64_zero(&rule->packets[i]); 2605 pf_counter_u64_zero(&rule->bytes[i]); 2606 } 2607 counter_u64_zero(rule->states_tot); 2608 } 2609 PF_RULES_WUNLOCK(); 2610 2611 error = copyout(nvlpacked, nv->data, nv->len); 2612 2613 #undef ERROUT 2614 DIOCGETRULENV_error: 2615 free(nvlpacked, M_NVLIST); 2616 nvlist_destroy(nvrule); 2617 nvlist_destroy(nvl); 2618 2619 break; 2620 } 2621 2622 case DIOCCHANGERULE: { 2623 struct pfioc_rule *pcr = (struct pfioc_rule *)addr; 2624 struct pf_kruleset *ruleset; 2625 struct pf_krule *oldrule = NULL, *newrule = NULL; 2626 struct pfi_kkif *kif = NULL; 2627 struct pf_kpooladdr *pa; 2628 u_int32_t nr = 0; 2629 int rs_num; 2630 2631 pcr->anchor[sizeof(pcr->anchor) - 1] = 0; 2632 2633 if (pcr->action < PF_CHANGE_ADD_HEAD || 2634 pcr->action > PF_CHANGE_GET_TICKET) { 2635 error = EINVAL; 2636 break; 2637 } 2638 if (pcr->rule.return_icmp >> 8 > ICMP_MAXTYPE) { 2639 error = EINVAL; 2640 break; 2641 } 2642 2643 if (pcr->action != PF_CHANGE_REMOVE) { 2644 newrule = malloc(sizeof(*newrule), M_PFRULE, M_WAITOK | M_ZERO); 2645 error = pf_rule_to_krule(&pcr->rule, newrule); 2646 if (error != 0) { 2647 free(newrule, M_PFRULE); 2648 break; 2649 } 2650 2651 if (newrule->ifname[0]) 2652 kif = pf_kkif_create(M_WAITOK); 2653 pf_counter_u64_init(&newrule->evaluations, M_WAITOK); 2654 for (int i = 0; i < 2; i++) { 2655 pf_counter_u64_init(&newrule->packets[i], M_WAITOK); 2656 pf_counter_u64_init(&newrule->bytes[i], M_WAITOK); 2657 } 2658 newrule->states_cur = counter_u64_alloc(M_WAITOK); 2659 newrule->states_tot = counter_u64_alloc(M_WAITOK); 2660 newrule->src_nodes = counter_u64_alloc(M_WAITOK); 2661 newrule->cuid = td->td_ucred->cr_ruid; 2662 newrule->cpid = td->td_proc ? td->td_proc->p_pid : 0; 2663 TAILQ_INIT(&newrule->rpool.list); 2664 } 2665 #define ERROUT(x) { error = (x); goto DIOCCHANGERULE_error; } 2666 2667 PF_RULES_WLOCK(); 2668 #ifdef PF_WANT_32_TO_64_COUNTER 2669 if (newrule != NULL) { 2670 LIST_INSERT_HEAD(&V_pf_allrulelist, newrule, allrulelist); 2671 newrule->allrulelinked = true; 2672 V_pf_allrulecount++; 2673 } 2674 #endif 2675 2676 if (!(pcr->action == PF_CHANGE_REMOVE || 2677 pcr->action == PF_CHANGE_GET_TICKET) && 2678 pcr->pool_ticket != V_ticket_pabuf) 2679 ERROUT(EBUSY); 2680 2681 ruleset = pf_find_kruleset(pcr->anchor); 2682 if (ruleset == NULL) 2683 ERROUT(EINVAL); 2684 2685 rs_num = pf_get_ruleset_number(pcr->rule.action); 2686 if (rs_num >= PF_RULESET_MAX) 2687 ERROUT(EINVAL); 2688 2689 if (pcr->action == PF_CHANGE_GET_TICKET) { 2690 pcr->ticket = ++ruleset->rules[rs_num].active.ticket; 2691 ERROUT(0); 2692 } else if (pcr->ticket != 2693 ruleset->rules[rs_num].active.ticket) 2694 ERROUT(EINVAL); 2695 2696 if (pcr->action != PF_CHANGE_REMOVE) { 2697 if (newrule->ifname[0]) { 2698 newrule->kif = pfi_kkif_attach(kif, 2699 newrule->ifname); 2700 kif = NULL; 2701 pfi_kkif_ref(newrule->kif); 2702 } else 2703 newrule->kif = NULL; 2704 2705 if (newrule->rtableid > 0 && 2706 newrule->rtableid >= rt_numfibs) 2707 error = EBUSY; 2708 2709 #ifdef ALTQ 2710 /* set queue IDs */ 2711 if (newrule->qname[0] != 0) { 2712 if ((newrule->qid = 2713 pf_qname2qid(newrule->qname)) == 0) 2714 error = EBUSY; 2715 else if (newrule->pqname[0] != 0) { 2716 if ((newrule->pqid = 2717 pf_qname2qid(newrule->pqname)) == 0) 2718 error = EBUSY; 2719 } else 2720 newrule->pqid = newrule->qid; 2721 } 2722 #endif /* ALTQ */ 2723 if (newrule->tagname[0]) 2724 if ((newrule->tag = 2725 pf_tagname2tag(newrule->tagname)) == 0) 2726 error = EBUSY; 2727 if (newrule->match_tagname[0]) 2728 if ((newrule->match_tag = pf_tagname2tag( 2729 newrule->match_tagname)) == 0) 2730 error = EBUSY; 2731 if (newrule->rt && !newrule->direction) 2732 error = EINVAL; 2733 if (!newrule->log) 2734 newrule->logif = 0; 2735 if (newrule->logif >= PFLOGIFS_MAX) 2736 error = EINVAL; 2737 if (pf_addr_setup(ruleset, &newrule->src.addr, newrule->af)) 2738 error = ENOMEM; 2739 if (pf_addr_setup(ruleset, &newrule->dst.addr, newrule->af)) 2740 error = ENOMEM; 2741 if (pf_kanchor_setup(newrule, ruleset, pcr->anchor_call)) 2742 error = EINVAL; 2743 TAILQ_FOREACH(pa, &V_pf_pabuf, entries) 2744 if (pa->addr.type == PF_ADDR_TABLE) { 2745 pa->addr.p.tbl = 2746 pfr_attach_table(ruleset, 2747 pa->addr.v.tblname); 2748 if (pa->addr.p.tbl == NULL) 2749 error = ENOMEM; 2750 } 2751 2752 newrule->overload_tbl = NULL; 2753 if (newrule->overload_tblname[0]) { 2754 if ((newrule->overload_tbl = pfr_attach_table( 2755 ruleset, newrule->overload_tblname)) == 2756 NULL) 2757 error = EINVAL; 2758 else 2759 newrule->overload_tbl->pfrkt_flags |= 2760 PFR_TFLAG_ACTIVE; 2761 } 2762 2763 pf_mv_kpool(&V_pf_pabuf, &newrule->rpool.list); 2764 if (((((newrule->action == PF_NAT) || 2765 (newrule->action == PF_RDR) || 2766 (newrule->action == PF_BINAT) || 2767 (newrule->rt > PF_NOPFROUTE)) && 2768 !newrule->anchor)) && 2769 (TAILQ_FIRST(&newrule->rpool.list) == NULL)) 2770 error = EINVAL; 2771 2772 if (error) { 2773 pf_free_rule(newrule); 2774 PF_RULES_WUNLOCK(); 2775 break; 2776 } 2777 2778 newrule->rpool.cur = TAILQ_FIRST(&newrule->rpool.list); 2779 } 2780 pf_empty_kpool(&V_pf_pabuf); 2781 2782 if (pcr->action == PF_CHANGE_ADD_HEAD) 2783 oldrule = TAILQ_FIRST( 2784 ruleset->rules[rs_num].active.ptr); 2785 else if (pcr->action == PF_CHANGE_ADD_TAIL) 2786 oldrule = TAILQ_LAST( 2787 ruleset->rules[rs_num].active.ptr, pf_krulequeue); 2788 else { 2789 oldrule = TAILQ_FIRST( 2790 ruleset->rules[rs_num].active.ptr); 2791 while ((oldrule != NULL) && (oldrule->nr != pcr->nr)) 2792 oldrule = TAILQ_NEXT(oldrule, entries); 2793 if (oldrule == NULL) { 2794 if (newrule != NULL) 2795 pf_free_rule(newrule); 2796 PF_RULES_WUNLOCK(); 2797 error = EINVAL; 2798 break; 2799 } 2800 } 2801 2802 if (pcr->action == PF_CHANGE_REMOVE) { 2803 pf_unlink_rule(ruleset->rules[rs_num].active.ptr, 2804 oldrule); 2805 ruleset->rules[rs_num].active.rcount--; 2806 } else { 2807 if (oldrule == NULL) 2808 TAILQ_INSERT_TAIL( 2809 ruleset->rules[rs_num].active.ptr, 2810 newrule, entries); 2811 else if (pcr->action == PF_CHANGE_ADD_HEAD || 2812 pcr->action == PF_CHANGE_ADD_BEFORE) 2813 TAILQ_INSERT_BEFORE(oldrule, newrule, entries); 2814 else 2815 TAILQ_INSERT_AFTER( 2816 ruleset->rules[rs_num].active.ptr, 2817 oldrule, newrule, entries); 2818 ruleset->rules[rs_num].active.rcount++; 2819 } 2820 2821 nr = 0; 2822 TAILQ_FOREACH(oldrule, 2823 ruleset->rules[rs_num].active.ptr, entries) 2824 oldrule->nr = nr++; 2825 2826 ruleset->rules[rs_num].active.ticket++; 2827 2828 pf_calc_skip_steps(ruleset->rules[rs_num].active.ptr); 2829 pf_remove_if_empty_kruleset(ruleset); 2830 2831 PF_RULES_WUNLOCK(); 2832 break; 2833 2834 #undef ERROUT 2835 DIOCCHANGERULE_error: 2836 PF_RULES_WUNLOCK(); 2837 pf_krule_free(newrule); 2838 pf_kkif_free(kif); 2839 break; 2840 } 2841 2842 case DIOCCLRSTATES: { 2843 struct pfioc_state_kill *psk = (struct pfioc_state_kill *)addr; 2844 struct pf_kstate_kill kill; 2845 2846 error = pf_state_kill_to_kstate_kill(psk, &kill); 2847 if (error) 2848 break; 2849 2850 psk->psk_killed = pf_clear_states(&kill); 2851 break; 2852 } 2853 2854 case DIOCCLRSTATESNV: { 2855 error = pf_clearstates_nv((struct pfioc_nv *)addr); 2856 break; 2857 } 2858 2859 case DIOCKILLSTATES: { 2860 struct pfioc_state_kill *psk = (struct pfioc_state_kill *)addr; 2861 struct pf_kstate_kill kill; 2862 2863 error = pf_state_kill_to_kstate_kill(psk, &kill); 2864 if (error) 2865 break; 2866 2867 psk->psk_killed = 0; 2868 pf_killstates(&kill, &psk->psk_killed); 2869 break; 2870 } 2871 2872 case DIOCKILLSTATESNV: { 2873 error = pf_killstates_nv((struct pfioc_nv *)addr); 2874 break; 2875 } 2876 2877 case DIOCADDSTATE: { 2878 struct pfioc_state *ps = (struct pfioc_state *)addr; 2879 struct pfsync_state *sp = &ps->state; 2880 2881 if (sp->timeout >= PFTM_MAX) { 2882 error = EINVAL; 2883 break; 2884 } 2885 if (V_pfsync_state_import_ptr != NULL) { 2886 PF_RULES_RLOCK(); 2887 error = V_pfsync_state_import_ptr(sp, PFSYNC_SI_IOCTL); 2888 PF_RULES_RUNLOCK(); 2889 } else 2890 error = EOPNOTSUPP; 2891 break; 2892 } 2893 2894 case DIOCGETSTATE: { 2895 struct pfioc_state *ps = (struct pfioc_state *)addr; 2896 struct pf_kstate *s; 2897 2898 s = pf_find_state_byid(ps->state.id, ps->state.creatorid); 2899 if (s == NULL) { 2900 error = ENOENT; 2901 break; 2902 } 2903 2904 pfsync_state_export(&ps->state, s); 2905 PF_STATE_UNLOCK(s); 2906 break; 2907 } 2908 2909 case DIOCGETSTATENV: { 2910 error = pf_getstate((struct pfioc_nv *)addr); 2911 break; 2912 } 2913 2914 case DIOCGETSTATES: { 2915 struct pfioc_states *ps = (struct pfioc_states *)addr; 2916 struct pf_kstate *s; 2917 struct pfsync_state *pstore, *p; 2918 int i, nr; 2919 size_t slice_count = 16, count; 2920 void *out; 2921 2922 if (ps->ps_len <= 0) { 2923 nr = uma_zone_get_cur(V_pf_state_z); 2924 ps->ps_len = sizeof(struct pfsync_state) * nr; 2925 break; 2926 } 2927 2928 out = ps->ps_states; 2929 pstore = mallocarray(slice_count, 2930 sizeof(struct pfsync_state), M_TEMP, M_WAITOK | M_ZERO); 2931 nr = 0; 2932 2933 for (i = 0; i <= pf_hashmask; i++) { 2934 struct pf_idhash *ih = &V_pf_idhash[i]; 2935 2936 DIOCGETSTATES_retry: 2937 p = pstore; 2938 2939 if (LIST_EMPTY(&ih->states)) 2940 continue; 2941 2942 PF_HASHROW_LOCK(ih); 2943 count = 0; 2944 LIST_FOREACH(s, &ih->states, entry) { 2945 if (s->timeout == PFTM_UNLINKED) 2946 continue; 2947 count++; 2948 } 2949 2950 if (count > slice_count) { 2951 PF_HASHROW_UNLOCK(ih); 2952 free(pstore, M_TEMP); 2953 slice_count = count * 2; 2954 pstore = mallocarray(slice_count, 2955 sizeof(struct pfsync_state), M_TEMP, 2956 M_WAITOK | M_ZERO); 2957 goto DIOCGETSTATES_retry; 2958 } 2959 2960 if ((nr+count) * sizeof(*p) > ps->ps_len) { 2961 PF_HASHROW_UNLOCK(ih); 2962 goto DIOCGETSTATES_full; 2963 } 2964 2965 LIST_FOREACH(s, &ih->states, entry) { 2966 if (s->timeout == PFTM_UNLINKED) 2967 continue; 2968 2969 pfsync_state_export(p, s); 2970 p++; 2971 nr++; 2972 } 2973 PF_HASHROW_UNLOCK(ih); 2974 error = copyout(pstore, out, 2975 sizeof(struct pfsync_state) * count); 2976 if (error) 2977 break; 2978 out = ps->ps_states + nr; 2979 } 2980 DIOCGETSTATES_full: 2981 ps->ps_len = sizeof(struct pfsync_state) * nr; 2982 free(pstore, M_TEMP); 2983 2984 break; 2985 } 2986 2987 case DIOCGETSTATESV2: { 2988 struct pfioc_states_v2 *ps = (struct pfioc_states_v2 *)addr; 2989 struct pf_kstate *s; 2990 struct pf_state_export *pstore, *p; 2991 int i, nr; 2992 size_t slice_count = 16, count; 2993 void *out; 2994 2995 if (ps->ps_req_version > PF_STATE_VERSION) { 2996 error = ENOTSUP; 2997 break; 2998 } 2999 3000 if (ps->ps_len <= 0) { 3001 nr = uma_zone_get_cur(V_pf_state_z); 3002 ps->ps_len = sizeof(struct pf_state_export) * nr; 3003 break; 3004 } 3005 3006 out = ps->ps_states; 3007 pstore = mallocarray(slice_count, 3008 sizeof(struct pf_state_export), M_TEMP, M_WAITOK | M_ZERO); 3009 nr = 0; 3010 3011 for (i = 0; i <= pf_hashmask; i++) { 3012 struct pf_idhash *ih = &V_pf_idhash[i]; 3013 3014 DIOCGETSTATESV2_retry: 3015 p = pstore; 3016 3017 if (LIST_EMPTY(&ih->states)) 3018 continue; 3019 3020 PF_HASHROW_LOCK(ih); 3021 count = 0; 3022 LIST_FOREACH(s, &ih->states, entry) { 3023 if (s->timeout == PFTM_UNLINKED) 3024 continue; 3025 count++; 3026 } 3027 3028 if (count > slice_count) { 3029 PF_HASHROW_UNLOCK(ih); 3030 free(pstore, M_TEMP); 3031 slice_count = count * 2; 3032 pstore = mallocarray(slice_count, 3033 sizeof(struct pf_state_export), M_TEMP, 3034 M_WAITOK | M_ZERO); 3035 goto DIOCGETSTATESV2_retry; 3036 } 3037 3038 if ((nr+count) * sizeof(*p) > ps->ps_len) { 3039 PF_HASHROW_UNLOCK(ih); 3040 goto DIOCGETSTATESV2_full; 3041 } 3042 3043 LIST_FOREACH(s, &ih->states, entry) { 3044 if (s->timeout == PFTM_UNLINKED) 3045 continue; 3046 3047 pf_state_export(p, s); 3048 p++; 3049 nr++; 3050 } 3051 PF_HASHROW_UNLOCK(ih); 3052 error = copyout(pstore, out, 3053 sizeof(struct pf_state_export) * count); 3054 if (error) 3055 break; 3056 out = ps->ps_states + nr; 3057 } 3058 DIOCGETSTATESV2_full: 3059 ps->ps_len = nr * sizeof(struct pf_state_export); 3060 free(pstore, M_TEMP); 3061 3062 break; 3063 } 3064 3065 case DIOCGETSTATUS: { 3066 struct pf_status *s = (struct pf_status *)addr; 3067 3068 PF_RULES_RLOCK(); 3069 s->running = V_pf_status.running; 3070 s->since = V_pf_status.since; 3071 s->debug = V_pf_status.debug; 3072 s->hostid = V_pf_status.hostid; 3073 s->states = V_pf_status.states; 3074 s->src_nodes = V_pf_status.src_nodes; 3075 3076 for (int i = 0; i < PFRES_MAX; i++) 3077 s->counters[i] = 3078 counter_u64_fetch(V_pf_status.counters[i]); 3079 for (int i = 0; i < LCNT_MAX; i++) 3080 s->lcounters[i] = 3081 counter_u64_fetch(V_pf_status.lcounters[i]); 3082 for (int i = 0; i < FCNT_MAX; i++) 3083 s->fcounters[i] = 3084 pf_counter_u64_fetch(&V_pf_status.fcounters[i]); 3085 for (int i = 0; i < SCNT_MAX; i++) 3086 s->scounters[i] = 3087 counter_u64_fetch(V_pf_status.scounters[i]); 3088 3089 bcopy(V_pf_status.ifname, s->ifname, IFNAMSIZ); 3090 bcopy(V_pf_status.pf_chksum, s->pf_chksum, 3091 PF_MD5_DIGEST_LENGTH); 3092 3093 pfi_update_status(s->ifname, s); 3094 PF_RULES_RUNLOCK(); 3095 break; 3096 } 3097 3098 case DIOCGETSTATUSNV: { 3099 error = pf_getstatus((struct pfioc_nv *)addr); 3100 break; 3101 } 3102 3103 case DIOCSETSTATUSIF: { 3104 struct pfioc_if *pi = (struct pfioc_if *)addr; 3105 3106 if (pi->ifname[0] == 0) { 3107 bzero(V_pf_status.ifname, IFNAMSIZ); 3108 break; 3109 } 3110 PF_RULES_WLOCK(); 3111 error = pf_user_strcpy(V_pf_status.ifname, pi->ifname, IFNAMSIZ); 3112 PF_RULES_WUNLOCK(); 3113 break; 3114 } 3115 3116 case DIOCCLRSTATUS: { 3117 PF_RULES_WLOCK(); 3118 for (int i = 0; i < PFRES_MAX; i++) 3119 counter_u64_zero(V_pf_status.counters[i]); 3120 for (int i = 0; i < FCNT_MAX; i++) 3121 pf_counter_u64_zero(&V_pf_status.fcounters[i]); 3122 for (int i = 0; i < SCNT_MAX; i++) 3123 counter_u64_zero(V_pf_status.scounters[i]); 3124 for (int i = 0; i < KLCNT_MAX; i++) 3125 counter_u64_zero(V_pf_status.lcounters[i]); 3126 V_pf_status.since = time_second; 3127 if (*V_pf_status.ifname) 3128 pfi_update_status(V_pf_status.ifname, NULL); 3129 PF_RULES_WUNLOCK(); 3130 break; 3131 } 3132 3133 case DIOCNATLOOK: { 3134 struct pfioc_natlook *pnl = (struct pfioc_natlook *)addr; 3135 struct pf_state_key *sk; 3136 struct pf_kstate *state; 3137 struct pf_state_key_cmp key; 3138 int m = 0, direction = pnl->direction; 3139 int sidx, didx; 3140 3141 /* NATLOOK src and dst are reversed, so reverse sidx/didx */ 3142 sidx = (direction == PF_IN) ? 1 : 0; 3143 didx = (direction == PF_IN) ? 0 : 1; 3144 3145 if (!pnl->proto || 3146 PF_AZERO(&pnl->saddr, pnl->af) || 3147 PF_AZERO(&pnl->daddr, pnl->af) || 3148 ((pnl->proto == IPPROTO_TCP || 3149 pnl->proto == IPPROTO_UDP) && 3150 (!pnl->dport || !pnl->sport))) 3151 error = EINVAL; 3152 else { 3153 bzero(&key, sizeof(key)); 3154 key.af = pnl->af; 3155 key.proto = pnl->proto; 3156 PF_ACPY(&key.addr[sidx], &pnl->saddr, pnl->af); 3157 key.port[sidx] = pnl->sport; 3158 PF_ACPY(&key.addr[didx], &pnl->daddr, pnl->af); 3159 key.port[didx] = pnl->dport; 3160 3161 state = pf_find_state_all(&key, direction, &m); 3162 3163 if (m > 1) 3164 error = E2BIG; /* more than one state */ 3165 else if (state != NULL) { 3166 /* XXXGL: not locked read */ 3167 sk = state->key[sidx]; 3168 PF_ACPY(&pnl->rsaddr, &sk->addr[sidx], sk->af); 3169 pnl->rsport = sk->port[sidx]; 3170 PF_ACPY(&pnl->rdaddr, &sk->addr[didx], sk->af); 3171 pnl->rdport = sk->port[didx]; 3172 } else 3173 error = ENOENT; 3174 } 3175 break; 3176 } 3177 3178 case DIOCSETTIMEOUT: { 3179 struct pfioc_tm *pt = (struct pfioc_tm *)addr; 3180 int old; 3181 3182 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX || 3183 pt->seconds < 0) { 3184 error = EINVAL; 3185 break; 3186 } 3187 PF_RULES_WLOCK(); 3188 old = V_pf_default_rule.timeout[pt->timeout]; 3189 if (pt->timeout == PFTM_INTERVAL && pt->seconds == 0) 3190 pt->seconds = 1; 3191 V_pf_default_rule.timeout[pt->timeout] = pt->seconds; 3192 if (pt->timeout == PFTM_INTERVAL && pt->seconds < old) 3193 wakeup(pf_purge_thread); 3194 pt->seconds = old; 3195 PF_RULES_WUNLOCK(); 3196 break; 3197 } 3198 3199 case DIOCGETTIMEOUT: { 3200 struct pfioc_tm *pt = (struct pfioc_tm *)addr; 3201 3202 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX) { 3203 error = EINVAL; 3204 break; 3205 } 3206 PF_RULES_RLOCK(); 3207 pt->seconds = V_pf_default_rule.timeout[pt->timeout]; 3208 PF_RULES_RUNLOCK(); 3209 break; 3210 } 3211 3212 case DIOCGETLIMIT: { 3213 struct pfioc_limit *pl = (struct pfioc_limit *)addr; 3214 3215 if (pl->index < 0 || pl->index >= PF_LIMIT_MAX) { 3216 error = EINVAL; 3217 break; 3218 } 3219 PF_RULES_RLOCK(); 3220 pl->limit = V_pf_limits[pl->index].limit; 3221 PF_RULES_RUNLOCK(); 3222 break; 3223 } 3224 3225 case DIOCSETLIMIT: { 3226 struct pfioc_limit *pl = (struct pfioc_limit *)addr; 3227 int old_limit; 3228 3229 PF_RULES_WLOCK(); 3230 if (pl->index < 0 || pl->index >= PF_LIMIT_MAX || 3231 V_pf_limits[pl->index].zone == NULL) { 3232 PF_RULES_WUNLOCK(); 3233 error = EINVAL; 3234 break; 3235 } 3236 uma_zone_set_max(V_pf_limits[pl->index].zone, pl->limit); 3237 old_limit = V_pf_limits[pl->index].limit; 3238 V_pf_limits[pl->index].limit = pl->limit; 3239 pl->limit = old_limit; 3240 PF_RULES_WUNLOCK(); 3241 break; 3242 } 3243 3244 case DIOCSETDEBUG: { 3245 u_int32_t *level = (u_int32_t *)addr; 3246 3247 PF_RULES_WLOCK(); 3248 V_pf_status.debug = *level; 3249 PF_RULES_WUNLOCK(); 3250 break; 3251 } 3252 3253 case DIOCCLRRULECTRS: { 3254 /* obsoleted by DIOCGETRULE with action=PF_GET_CLR_CNTR */ 3255 struct pf_kruleset *ruleset = &pf_main_ruleset; 3256 struct pf_krule *rule; 3257 3258 PF_RULES_WLOCK(); 3259 TAILQ_FOREACH(rule, 3260 ruleset->rules[PF_RULESET_FILTER].active.ptr, entries) { 3261 pf_counter_u64_zero(&rule->evaluations); 3262 for (int i = 0; i < 2; i++) { 3263 pf_counter_u64_zero(&rule->packets[i]); 3264 pf_counter_u64_zero(&rule->bytes[i]); 3265 } 3266 } 3267 PF_RULES_WUNLOCK(); 3268 break; 3269 } 3270 3271 case DIOCGIFSPEEDV0: 3272 case DIOCGIFSPEEDV1: { 3273 struct pf_ifspeed_v1 *psp = (struct pf_ifspeed_v1 *)addr; 3274 struct pf_ifspeed_v1 ps; 3275 struct ifnet *ifp; 3276 3277 if (psp->ifname[0] == '\0') { 3278 error = EINVAL; 3279 break; 3280 } 3281 3282 error = pf_user_strcpy(ps.ifname, psp->ifname, IFNAMSIZ); 3283 if (error != 0) 3284 break; 3285 ifp = ifunit(ps.ifname); 3286 if (ifp != NULL) { 3287 psp->baudrate32 = 3288 (u_int32_t)uqmin(ifp->if_baudrate, UINT_MAX); 3289 if (cmd == DIOCGIFSPEEDV1) 3290 psp->baudrate = ifp->if_baudrate; 3291 } else { 3292 error = EINVAL; 3293 } 3294 break; 3295 } 3296 3297 #ifdef ALTQ 3298 case DIOCSTARTALTQ: { 3299 struct pf_altq *altq; 3300 3301 PF_RULES_WLOCK(); 3302 /* enable all altq interfaces on active list */ 3303 TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) { 3304 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) { 3305 error = pf_enable_altq(altq); 3306 if (error != 0) 3307 break; 3308 } 3309 } 3310 if (error == 0) 3311 V_pf_altq_running = 1; 3312 PF_RULES_WUNLOCK(); 3313 DPFPRINTF(PF_DEBUG_MISC, ("altq: started\n")); 3314 break; 3315 } 3316 3317 case DIOCSTOPALTQ: { 3318 struct pf_altq *altq; 3319 3320 PF_RULES_WLOCK(); 3321 /* disable all altq interfaces on active list */ 3322 TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) { 3323 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) { 3324 error = pf_disable_altq(altq); 3325 if (error != 0) 3326 break; 3327 } 3328 } 3329 if (error == 0) 3330 V_pf_altq_running = 0; 3331 PF_RULES_WUNLOCK(); 3332 DPFPRINTF(PF_DEBUG_MISC, ("altq: stopped\n")); 3333 break; 3334 } 3335 3336 case DIOCADDALTQV0: 3337 case DIOCADDALTQV1: { 3338 struct pfioc_altq_v1 *pa = (struct pfioc_altq_v1 *)addr; 3339 struct pf_altq *altq, *a; 3340 struct ifnet *ifp; 3341 3342 altq = malloc(sizeof(*altq), M_PFALTQ, M_WAITOK | M_ZERO); 3343 error = pf_import_kaltq(pa, altq, IOCPARM_LEN(cmd)); 3344 if (error) 3345 break; 3346 altq->local_flags = 0; 3347 3348 PF_RULES_WLOCK(); 3349 if (pa->ticket != V_ticket_altqs_inactive) { 3350 PF_RULES_WUNLOCK(); 3351 free(altq, M_PFALTQ); 3352 error = EBUSY; 3353 break; 3354 } 3355 3356 /* 3357 * if this is for a queue, find the discipline and 3358 * copy the necessary fields 3359 */ 3360 if (altq->qname[0] != 0) { 3361 if ((altq->qid = pf_qname2qid(altq->qname)) == 0) { 3362 PF_RULES_WUNLOCK(); 3363 error = EBUSY; 3364 free(altq, M_PFALTQ); 3365 break; 3366 } 3367 altq->altq_disc = NULL; 3368 TAILQ_FOREACH(a, V_pf_altq_ifs_inactive, entries) { 3369 if (strncmp(a->ifname, altq->ifname, 3370 IFNAMSIZ) == 0) { 3371 altq->altq_disc = a->altq_disc; 3372 break; 3373 } 3374 } 3375 } 3376 3377 if ((ifp = ifunit(altq->ifname)) == NULL) 3378 altq->local_flags |= PFALTQ_FLAG_IF_REMOVED; 3379 else 3380 error = altq_add(ifp, altq); 3381 3382 if (error) { 3383 PF_RULES_WUNLOCK(); 3384 free(altq, M_PFALTQ); 3385 break; 3386 } 3387 3388 if (altq->qname[0] != 0) 3389 TAILQ_INSERT_TAIL(V_pf_altqs_inactive, altq, entries); 3390 else 3391 TAILQ_INSERT_TAIL(V_pf_altq_ifs_inactive, altq, entries); 3392 /* version error check done on import above */ 3393 pf_export_kaltq(altq, pa, IOCPARM_LEN(cmd)); 3394 PF_RULES_WUNLOCK(); 3395 break; 3396 } 3397 3398 case DIOCGETALTQSV0: 3399 case DIOCGETALTQSV1: { 3400 struct pfioc_altq_v1 *pa = (struct pfioc_altq_v1 *)addr; 3401 struct pf_altq *altq; 3402 3403 PF_RULES_RLOCK(); 3404 pa->nr = 0; 3405 TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) 3406 pa->nr++; 3407 TAILQ_FOREACH(altq, V_pf_altqs_active, entries) 3408 pa->nr++; 3409 pa->ticket = V_ticket_altqs_active; 3410 PF_RULES_RUNLOCK(); 3411 break; 3412 } 3413 3414 case DIOCGETALTQV0: 3415 case DIOCGETALTQV1: { 3416 struct pfioc_altq_v1 *pa = (struct pfioc_altq_v1 *)addr; 3417 struct pf_altq *altq; 3418 3419 PF_RULES_RLOCK(); 3420 if (pa->ticket != V_ticket_altqs_active) { 3421 PF_RULES_RUNLOCK(); 3422 error = EBUSY; 3423 break; 3424 } 3425 altq = pf_altq_get_nth_active(pa->nr); 3426 if (altq == NULL) { 3427 PF_RULES_RUNLOCK(); 3428 error = EBUSY; 3429 break; 3430 } 3431 pf_export_kaltq(altq, pa, IOCPARM_LEN(cmd)); 3432 PF_RULES_RUNLOCK(); 3433 break; 3434 } 3435 3436 case DIOCCHANGEALTQV0: 3437 case DIOCCHANGEALTQV1: 3438 /* CHANGEALTQ not supported yet! */ 3439 error = ENODEV; 3440 break; 3441 3442 case DIOCGETQSTATSV0: 3443 case DIOCGETQSTATSV1: { 3444 struct pfioc_qstats_v1 *pq = (struct pfioc_qstats_v1 *)addr; 3445 struct pf_altq *altq; 3446 int nbytes; 3447 u_int32_t version; 3448 3449 PF_RULES_RLOCK(); 3450 if (pq->ticket != V_ticket_altqs_active) { 3451 PF_RULES_RUNLOCK(); 3452 error = EBUSY; 3453 break; 3454 } 3455 nbytes = pq->nbytes; 3456 altq = pf_altq_get_nth_active(pq->nr); 3457 if (altq == NULL) { 3458 PF_RULES_RUNLOCK(); 3459 error = EBUSY; 3460 break; 3461 } 3462 3463 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) != 0) { 3464 PF_RULES_RUNLOCK(); 3465 error = ENXIO; 3466 break; 3467 } 3468 PF_RULES_RUNLOCK(); 3469 if (cmd == DIOCGETQSTATSV0) 3470 version = 0; /* DIOCGETQSTATSV0 means stats struct v0 */ 3471 else 3472 version = pq->version; 3473 error = altq_getqstats(altq, pq->buf, &nbytes, version); 3474 if (error == 0) { 3475 pq->scheduler = altq->scheduler; 3476 pq->nbytes = nbytes; 3477 } 3478 break; 3479 } 3480 #endif /* ALTQ */ 3481 3482 case DIOCBEGINADDRS: { 3483 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr; 3484 3485 PF_RULES_WLOCK(); 3486 pf_empty_kpool(&V_pf_pabuf); 3487 pp->ticket = ++V_ticket_pabuf; 3488 PF_RULES_WUNLOCK(); 3489 break; 3490 } 3491 3492 case DIOCADDADDR: { 3493 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr; 3494 struct pf_kpooladdr *pa; 3495 struct pfi_kkif *kif = NULL; 3496 3497 #ifndef INET 3498 if (pp->af == AF_INET) { 3499 error = EAFNOSUPPORT; 3500 break; 3501 } 3502 #endif /* INET */ 3503 #ifndef INET6 3504 if (pp->af == AF_INET6) { 3505 error = EAFNOSUPPORT; 3506 break; 3507 } 3508 #endif /* INET6 */ 3509 if (pp->addr.addr.type != PF_ADDR_ADDRMASK && 3510 pp->addr.addr.type != PF_ADDR_DYNIFTL && 3511 pp->addr.addr.type != PF_ADDR_TABLE) { 3512 error = EINVAL; 3513 break; 3514 } 3515 if (pp->addr.addr.p.dyn != NULL) { 3516 error = EINVAL; 3517 break; 3518 } 3519 pa = malloc(sizeof(*pa), M_PFRULE, M_WAITOK); 3520 error = pf_pooladdr_to_kpooladdr(&pp->addr, pa); 3521 if (error != 0) 3522 break; 3523 if (pa->ifname[0]) 3524 kif = pf_kkif_create(M_WAITOK); 3525 PF_RULES_WLOCK(); 3526 if (pp->ticket != V_ticket_pabuf) { 3527 PF_RULES_WUNLOCK(); 3528 if (pa->ifname[0]) 3529 pf_kkif_free(kif); 3530 free(pa, M_PFRULE); 3531 error = EBUSY; 3532 break; 3533 } 3534 if (pa->ifname[0]) { 3535 pa->kif = pfi_kkif_attach(kif, pa->ifname); 3536 kif = NULL; 3537 pfi_kkif_ref(pa->kif); 3538 } else 3539 pa->kif = NULL; 3540 if (pa->addr.type == PF_ADDR_DYNIFTL && ((error = 3541 pfi_dynaddr_setup(&pa->addr, pp->af)) != 0)) { 3542 if (pa->ifname[0]) 3543 pfi_kkif_unref(pa->kif); 3544 PF_RULES_WUNLOCK(); 3545 free(pa, M_PFRULE); 3546 break; 3547 } 3548 TAILQ_INSERT_TAIL(&V_pf_pabuf, pa, entries); 3549 PF_RULES_WUNLOCK(); 3550 break; 3551 } 3552 3553 case DIOCGETADDRS: { 3554 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr; 3555 struct pf_kpool *pool; 3556 struct pf_kpooladdr *pa; 3557 3558 pp->anchor[sizeof(pp->anchor) - 1] = 0; 3559 pp->nr = 0; 3560 3561 PF_RULES_RLOCK(); 3562 pool = pf_get_kpool(pp->anchor, pp->ticket, pp->r_action, 3563 pp->r_num, 0, 1, 0); 3564 if (pool == NULL) { 3565 PF_RULES_RUNLOCK(); 3566 error = EBUSY; 3567 break; 3568 } 3569 TAILQ_FOREACH(pa, &pool->list, entries) 3570 pp->nr++; 3571 PF_RULES_RUNLOCK(); 3572 break; 3573 } 3574 3575 case DIOCGETADDR: { 3576 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr; 3577 struct pf_kpool *pool; 3578 struct pf_kpooladdr *pa; 3579 u_int32_t nr = 0; 3580 3581 pp->anchor[sizeof(pp->anchor) - 1] = 0; 3582 3583 PF_RULES_RLOCK(); 3584 pool = pf_get_kpool(pp->anchor, pp->ticket, pp->r_action, 3585 pp->r_num, 0, 1, 1); 3586 if (pool == NULL) { 3587 PF_RULES_RUNLOCK(); 3588 error = EBUSY; 3589 break; 3590 } 3591 pa = TAILQ_FIRST(&pool->list); 3592 while ((pa != NULL) && (nr < pp->nr)) { 3593 pa = TAILQ_NEXT(pa, entries); 3594 nr++; 3595 } 3596 if (pa == NULL) { 3597 PF_RULES_RUNLOCK(); 3598 error = EBUSY; 3599 break; 3600 } 3601 pf_kpooladdr_to_pooladdr(pa, &pp->addr); 3602 pf_addr_copyout(&pp->addr.addr); 3603 PF_RULES_RUNLOCK(); 3604 break; 3605 } 3606 3607 case DIOCCHANGEADDR: { 3608 struct pfioc_pooladdr *pca = (struct pfioc_pooladdr *)addr; 3609 struct pf_kpool *pool; 3610 struct pf_kpooladdr *oldpa = NULL, *newpa = NULL; 3611 struct pf_kruleset *ruleset; 3612 struct pfi_kkif *kif = NULL; 3613 3614 pca->anchor[sizeof(pca->anchor) - 1] = 0; 3615 3616 if (pca->action < PF_CHANGE_ADD_HEAD || 3617 pca->action > PF_CHANGE_REMOVE) { 3618 error = EINVAL; 3619 break; 3620 } 3621 if (pca->addr.addr.type != PF_ADDR_ADDRMASK && 3622 pca->addr.addr.type != PF_ADDR_DYNIFTL && 3623 pca->addr.addr.type != PF_ADDR_TABLE) { 3624 error = EINVAL; 3625 break; 3626 } 3627 if (pca->addr.addr.p.dyn != NULL) { 3628 error = EINVAL; 3629 break; 3630 } 3631 3632 if (pca->action != PF_CHANGE_REMOVE) { 3633 #ifndef INET 3634 if (pca->af == AF_INET) { 3635 error = EAFNOSUPPORT; 3636 break; 3637 } 3638 #endif /* INET */ 3639 #ifndef INET6 3640 if (pca->af == AF_INET6) { 3641 error = EAFNOSUPPORT; 3642 break; 3643 } 3644 #endif /* INET6 */ 3645 newpa = malloc(sizeof(*newpa), M_PFRULE, M_WAITOK); 3646 bcopy(&pca->addr, newpa, sizeof(struct pf_pooladdr)); 3647 if (newpa->ifname[0]) 3648 kif = pf_kkif_create(M_WAITOK); 3649 newpa->kif = NULL; 3650 } 3651 #define ERROUT(x) ERROUT_IOCTL(DIOCCHANGEADDR_error, x) 3652 PF_RULES_WLOCK(); 3653 ruleset = pf_find_kruleset(pca->anchor); 3654 if (ruleset == NULL) 3655 ERROUT(EBUSY); 3656 3657 pool = pf_get_kpool(pca->anchor, pca->ticket, pca->r_action, 3658 pca->r_num, pca->r_last, 1, 1); 3659 if (pool == NULL) 3660 ERROUT(EBUSY); 3661 3662 if (pca->action != PF_CHANGE_REMOVE) { 3663 if (newpa->ifname[0]) { 3664 newpa->kif = pfi_kkif_attach(kif, newpa->ifname); 3665 pfi_kkif_ref(newpa->kif); 3666 kif = NULL; 3667 } 3668 3669 switch (newpa->addr.type) { 3670 case PF_ADDR_DYNIFTL: 3671 error = pfi_dynaddr_setup(&newpa->addr, 3672 pca->af); 3673 break; 3674 case PF_ADDR_TABLE: 3675 newpa->addr.p.tbl = pfr_attach_table(ruleset, 3676 newpa->addr.v.tblname); 3677 if (newpa->addr.p.tbl == NULL) 3678 error = ENOMEM; 3679 break; 3680 } 3681 if (error) 3682 goto DIOCCHANGEADDR_error; 3683 } 3684 3685 switch (pca->action) { 3686 case PF_CHANGE_ADD_HEAD: 3687 oldpa = TAILQ_FIRST(&pool->list); 3688 break; 3689 case PF_CHANGE_ADD_TAIL: 3690 oldpa = TAILQ_LAST(&pool->list, pf_kpalist); 3691 break; 3692 default: 3693 oldpa = TAILQ_FIRST(&pool->list); 3694 for (int i = 0; oldpa && i < pca->nr; i++) 3695 oldpa = TAILQ_NEXT(oldpa, entries); 3696 3697 if (oldpa == NULL) 3698 ERROUT(EINVAL); 3699 } 3700 3701 if (pca->action == PF_CHANGE_REMOVE) { 3702 TAILQ_REMOVE(&pool->list, oldpa, entries); 3703 switch (oldpa->addr.type) { 3704 case PF_ADDR_DYNIFTL: 3705 pfi_dynaddr_remove(oldpa->addr.p.dyn); 3706 break; 3707 case PF_ADDR_TABLE: 3708 pfr_detach_table(oldpa->addr.p.tbl); 3709 break; 3710 } 3711 if (oldpa->kif) 3712 pfi_kkif_unref(oldpa->kif); 3713 free(oldpa, M_PFRULE); 3714 } else { 3715 if (oldpa == NULL) 3716 TAILQ_INSERT_TAIL(&pool->list, newpa, entries); 3717 else if (pca->action == PF_CHANGE_ADD_HEAD || 3718 pca->action == PF_CHANGE_ADD_BEFORE) 3719 TAILQ_INSERT_BEFORE(oldpa, newpa, entries); 3720 else 3721 TAILQ_INSERT_AFTER(&pool->list, oldpa, 3722 newpa, entries); 3723 } 3724 3725 pool->cur = TAILQ_FIRST(&pool->list); 3726 PF_ACPY(&pool->counter, &pool->cur->addr.v.a.addr, pca->af); 3727 PF_RULES_WUNLOCK(); 3728 break; 3729 3730 #undef ERROUT 3731 DIOCCHANGEADDR_error: 3732 if (newpa != NULL) { 3733 if (newpa->kif) 3734 pfi_kkif_unref(newpa->kif); 3735 free(newpa, M_PFRULE); 3736 } 3737 PF_RULES_WUNLOCK(); 3738 pf_kkif_free(kif); 3739 break; 3740 } 3741 3742 case DIOCGETRULESETS: { 3743 struct pfioc_ruleset *pr = (struct pfioc_ruleset *)addr; 3744 struct pf_kruleset *ruleset; 3745 struct pf_kanchor *anchor; 3746 3747 pr->path[sizeof(pr->path) - 1] = 0; 3748 3749 PF_RULES_RLOCK(); 3750 if ((ruleset = pf_find_kruleset(pr->path)) == NULL) { 3751 PF_RULES_RUNLOCK(); 3752 error = ENOENT; 3753 break; 3754 } 3755 pr->nr = 0; 3756 if (ruleset->anchor == NULL) { 3757 /* XXX kludge for pf_main_ruleset */ 3758 RB_FOREACH(anchor, pf_kanchor_global, &V_pf_anchors) 3759 if (anchor->parent == NULL) 3760 pr->nr++; 3761 } else { 3762 RB_FOREACH(anchor, pf_kanchor_node, 3763 &ruleset->anchor->children) 3764 pr->nr++; 3765 } 3766 PF_RULES_RUNLOCK(); 3767 break; 3768 } 3769 3770 case DIOCGETRULESET: { 3771 struct pfioc_ruleset *pr = (struct pfioc_ruleset *)addr; 3772 struct pf_kruleset *ruleset; 3773 struct pf_kanchor *anchor; 3774 u_int32_t nr = 0; 3775 3776 pr->path[sizeof(pr->path) - 1] = 0; 3777 3778 PF_RULES_RLOCK(); 3779 if ((ruleset = pf_find_kruleset(pr->path)) == NULL) { 3780 PF_RULES_RUNLOCK(); 3781 error = ENOENT; 3782 break; 3783 } 3784 pr->name[0] = 0; 3785 if (ruleset->anchor == NULL) { 3786 /* XXX kludge for pf_main_ruleset */ 3787 RB_FOREACH(anchor, pf_kanchor_global, &V_pf_anchors) 3788 if (anchor->parent == NULL && nr++ == pr->nr) { 3789 strlcpy(pr->name, anchor->name, 3790 sizeof(pr->name)); 3791 break; 3792 } 3793 } else { 3794 RB_FOREACH(anchor, pf_kanchor_node, 3795 &ruleset->anchor->children) 3796 if (nr++ == pr->nr) { 3797 strlcpy(pr->name, anchor->name, 3798 sizeof(pr->name)); 3799 break; 3800 } 3801 } 3802 if (!pr->name[0]) 3803 error = EBUSY; 3804 PF_RULES_RUNLOCK(); 3805 break; 3806 } 3807 3808 case DIOCRCLRTABLES: { 3809 struct pfioc_table *io = (struct pfioc_table *)addr; 3810 3811 if (io->pfrio_esize != 0) { 3812 error = ENODEV; 3813 break; 3814 } 3815 PF_RULES_WLOCK(); 3816 error = pfr_clr_tables(&io->pfrio_table, &io->pfrio_ndel, 3817 io->pfrio_flags | PFR_FLAG_USERIOCTL); 3818 PF_RULES_WUNLOCK(); 3819 break; 3820 } 3821 3822 case DIOCRADDTABLES: { 3823 struct pfioc_table *io = (struct pfioc_table *)addr; 3824 struct pfr_table *pfrts; 3825 size_t totlen; 3826 3827 if (io->pfrio_esize != sizeof(struct pfr_table)) { 3828 error = ENODEV; 3829 break; 3830 } 3831 3832 if (io->pfrio_size < 0 || io->pfrio_size > pf_ioctl_maxcount || 3833 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_table))) { 3834 error = ENOMEM; 3835 break; 3836 } 3837 3838 totlen = io->pfrio_size * sizeof(struct pfr_table); 3839 pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table), 3840 M_TEMP, M_WAITOK); 3841 error = copyin(io->pfrio_buffer, pfrts, totlen); 3842 if (error) { 3843 free(pfrts, M_TEMP); 3844 break; 3845 } 3846 PF_RULES_WLOCK(); 3847 error = pfr_add_tables(pfrts, io->pfrio_size, 3848 &io->pfrio_nadd, io->pfrio_flags | PFR_FLAG_USERIOCTL); 3849 PF_RULES_WUNLOCK(); 3850 free(pfrts, M_TEMP); 3851 break; 3852 } 3853 3854 case DIOCRDELTABLES: { 3855 struct pfioc_table *io = (struct pfioc_table *)addr; 3856 struct pfr_table *pfrts; 3857 size_t totlen; 3858 3859 if (io->pfrio_esize != sizeof(struct pfr_table)) { 3860 error = ENODEV; 3861 break; 3862 } 3863 3864 if (io->pfrio_size < 0 || io->pfrio_size > pf_ioctl_maxcount || 3865 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_table))) { 3866 error = ENOMEM; 3867 break; 3868 } 3869 3870 totlen = io->pfrio_size * sizeof(struct pfr_table); 3871 pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table), 3872 M_TEMP, M_WAITOK); 3873 error = copyin(io->pfrio_buffer, pfrts, totlen); 3874 if (error) { 3875 free(pfrts, M_TEMP); 3876 break; 3877 } 3878 PF_RULES_WLOCK(); 3879 error = pfr_del_tables(pfrts, io->pfrio_size, 3880 &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL); 3881 PF_RULES_WUNLOCK(); 3882 free(pfrts, M_TEMP); 3883 break; 3884 } 3885 3886 case DIOCRGETTABLES: { 3887 struct pfioc_table *io = (struct pfioc_table *)addr; 3888 struct pfr_table *pfrts; 3889 size_t totlen; 3890 int n; 3891 3892 if (io->pfrio_esize != sizeof(struct pfr_table)) { 3893 error = ENODEV; 3894 break; 3895 } 3896 PF_RULES_RLOCK(); 3897 n = pfr_table_count(&io->pfrio_table, io->pfrio_flags); 3898 if (n < 0) { 3899 PF_RULES_RUNLOCK(); 3900 error = EINVAL; 3901 break; 3902 } 3903 io->pfrio_size = min(io->pfrio_size, n); 3904 3905 totlen = io->pfrio_size * sizeof(struct pfr_table); 3906 3907 pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table), 3908 M_TEMP, M_NOWAIT | M_ZERO); 3909 if (pfrts == NULL) { 3910 error = ENOMEM; 3911 PF_RULES_RUNLOCK(); 3912 break; 3913 } 3914 error = pfr_get_tables(&io->pfrio_table, pfrts, 3915 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); 3916 PF_RULES_RUNLOCK(); 3917 if (error == 0) 3918 error = copyout(pfrts, io->pfrio_buffer, totlen); 3919 free(pfrts, M_TEMP); 3920 break; 3921 } 3922 3923 case DIOCRGETTSTATS: { 3924 struct pfioc_table *io = (struct pfioc_table *)addr; 3925 struct pfr_tstats *pfrtstats; 3926 size_t totlen; 3927 int n; 3928 3929 if (io->pfrio_esize != sizeof(struct pfr_tstats)) { 3930 error = ENODEV; 3931 break; 3932 } 3933 PF_TABLE_STATS_LOCK(); 3934 PF_RULES_RLOCK(); 3935 n = pfr_table_count(&io->pfrio_table, io->pfrio_flags); 3936 if (n < 0) { 3937 PF_RULES_RUNLOCK(); 3938 PF_TABLE_STATS_UNLOCK(); 3939 error = EINVAL; 3940 break; 3941 } 3942 io->pfrio_size = min(io->pfrio_size, n); 3943 3944 totlen = io->pfrio_size * sizeof(struct pfr_tstats); 3945 pfrtstats = mallocarray(io->pfrio_size, 3946 sizeof(struct pfr_tstats), M_TEMP, M_NOWAIT | M_ZERO); 3947 if (pfrtstats == NULL) { 3948 error = ENOMEM; 3949 PF_RULES_RUNLOCK(); 3950 PF_TABLE_STATS_UNLOCK(); 3951 break; 3952 } 3953 error = pfr_get_tstats(&io->pfrio_table, pfrtstats, 3954 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); 3955 PF_RULES_RUNLOCK(); 3956 PF_TABLE_STATS_UNLOCK(); 3957 if (error == 0) 3958 error = copyout(pfrtstats, io->pfrio_buffer, totlen); 3959 free(pfrtstats, M_TEMP); 3960 break; 3961 } 3962 3963 case DIOCRCLRTSTATS: { 3964 struct pfioc_table *io = (struct pfioc_table *)addr; 3965 struct pfr_table *pfrts; 3966 size_t totlen; 3967 3968 if (io->pfrio_esize != sizeof(struct pfr_table)) { 3969 error = ENODEV; 3970 break; 3971 } 3972 3973 if (io->pfrio_size < 0 || io->pfrio_size > pf_ioctl_maxcount || 3974 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_table))) { 3975 /* We used to count tables and use the minimum required 3976 * size, so we didn't fail on overly large requests. 3977 * Keep doing so. */ 3978 io->pfrio_size = pf_ioctl_maxcount; 3979 break; 3980 } 3981 3982 totlen = io->pfrio_size * sizeof(struct pfr_table); 3983 pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table), 3984 M_TEMP, M_WAITOK); 3985 error = copyin(io->pfrio_buffer, pfrts, totlen); 3986 if (error) { 3987 free(pfrts, M_TEMP); 3988 break; 3989 } 3990 3991 PF_TABLE_STATS_LOCK(); 3992 PF_RULES_RLOCK(); 3993 error = pfr_clr_tstats(pfrts, io->pfrio_size, 3994 &io->pfrio_nzero, io->pfrio_flags | PFR_FLAG_USERIOCTL); 3995 PF_RULES_RUNLOCK(); 3996 PF_TABLE_STATS_UNLOCK(); 3997 free(pfrts, M_TEMP); 3998 break; 3999 } 4000 4001 case DIOCRSETTFLAGS: { 4002 struct pfioc_table *io = (struct pfioc_table *)addr; 4003 struct pfr_table *pfrts; 4004 size_t totlen; 4005 int n; 4006 4007 if (io->pfrio_esize != sizeof(struct pfr_table)) { 4008 error = ENODEV; 4009 break; 4010 } 4011 4012 PF_RULES_RLOCK(); 4013 n = pfr_table_count(&io->pfrio_table, io->pfrio_flags); 4014 if (n < 0) { 4015 PF_RULES_RUNLOCK(); 4016 error = EINVAL; 4017 break; 4018 } 4019 4020 io->pfrio_size = min(io->pfrio_size, n); 4021 PF_RULES_RUNLOCK(); 4022 4023 totlen = io->pfrio_size * sizeof(struct pfr_table); 4024 pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table), 4025 M_TEMP, M_WAITOK); 4026 error = copyin(io->pfrio_buffer, pfrts, totlen); 4027 if (error) { 4028 free(pfrts, M_TEMP); 4029 break; 4030 } 4031 PF_RULES_WLOCK(); 4032 error = pfr_set_tflags(pfrts, io->pfrio_size, 4033 io->pfrio_setflag, io->pfrio_clrflag, &io->pfrio_nchange, 4034 &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL); 4035 PF_RULES_WUNLOCK(); 4036 free(pfrts, M_TEMP); 4037 break; 4038 } 4039 4040 case DIOCRCLRADDRS: { 4041 struct pfioc_table *io = (struct pfioc_table *)addr; 4042 4043 if (io->pfrio_esize != 0) { 4044 error = ENODEV; 4045 break; 4046 } 4047 PF_RULES_WLOCK(); 4048 error = pfr_clr_addrs(&io->pfrio_table, &io->pfrio_ndel, 4049 io->pfrio_flags | PFR_FLAG_USERIOCTL); 4050 PF_RULES_WUNLOCK(); 4051 break; 4052 } 4053 4054 case DIOCRADDADDRS: { 4055 struct pfioc_table *io = (struct pfioc_table *)addr; 4056 struct pfr_addr *pfras; 4057 size_t totlen; 4058 4059 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 4060 error = ENODEV; 4061 break; 4062 } 4063 if (io->pfrio_size < 0 || 4064 io->pfrio_size > pf_ioctl_maxcount || 4065 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) { 4066 error = EINVAL; 4067 break; 4068 } 4069 totlen = io->pfrio_size * sizeof(struct pfr_addr); 4070 pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr), 4071 M_TEMP, M_WAITOK); 4072 error = copyin(io->pfrio_buffer, pfras, totlen); 4073 if (error) { 4074 free(pfras, M_TEMP); 4075 break; 4076 } 4077 PF_RULES_WLOCK(); 4078 error = pfr_add_addrs(&io->pfrio_table, pfras, 4079 io->pfrio_size, &io->pfrio_nadd, io->pfrio_flags | 4080 PFR_FLAG_USERIOCTL); 4081 PF_RULES_WUNLOCK(); 4082 if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK) 4083 error = copyout(pfras, io->pfrio_buffer, totlen); 4084 free(pfras, M_TEMP); 4085 break; 4086 } 4087 4088 case DIOCRDELADDRS: { 4089 struct pfioc_table *io = (struct pfioc_table *)addr; 4090 struct pfr_addr *pfras; 4091 size_t totlen; 4092 4093 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 4094 error = ENODEV; 4095 break; 4096 } 4097 if (io->pfrio_size < 0 || 4098 io->pfrio_size > pf_ioctl_maxcount || 4099 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) { 4100 error = EINVAL; 4101 break; 4102 } 4103 totlen = io->pfrio_size * sizeof(struct pfr_addr); 4104 pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr), 4105 M_TEMP, M_WAITOK); 4106 error = copyin(io->pfrio_buffer, pfras, totlen); 4107 if (error) { 4108 free(pfras, M_TEMP); 4109 break; 4110 } 4111 PF_RULES_WLOCK(); 4112 error = pfr_del_addrs(&io->pfrio_table, pfras, 4113 io->pfrio_size, &io->pfrio_ndel, io->pfrio_flags | 4114 PFR_FLAG_USERIOCTL); 4115 PF_RULES_WUNLOCK(); 4116 if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK) 4117 error = copyout(pfras, io->pfrio_buffer, totlen); 4118 free(pfras, M_TEMP); 4119 break; 4120 } 4121 4122 case DIOCRSETADDRS: { 4123 struct pfioc_table *io = (struct pfioc_table *)addr; 4124 struct pfr_addr *pfras; 4125 size_t totlen, count; 4126 4127 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 4128 error = ENODEV; 4129 break; 4130 } 4131 if (io->pfrio_size < 0 || io->pfrio_size2 < 0) { 4132 error = EINVAL; 4133 break; 4134 } 4135 count = max(io->pfrio_size, io->pfrio_size2); 4136 if (count > pf_ioctl_maxcount || 4137 WOULD_OVERFLOW(count, sizeof(struct pfr_addr))) { 4138 error = EINVAL; 4139 break; 4140 } 4141 totlen = count * sizeof(struct pfr_addr); 4142 pfras = mallocarray(count, sizeof(struct pfr_addr), M_TEMP, 4143 M_WAITOK); 4144 error = copyin(io->pfrio_buffer, pfras, totlen); 4145 if (error) { 4146 free(pfras, M_TEMP); 4147 break; 4148 } 4149 PF_RULES_WLOCK(); 4150 error = pfr_set_addrs(&io->pfrio_table, pfras, 4151 io->pfrio_size, &io->pfrio_size2, &io->pfrio_nadd, 4152 &io->pfrio_ndel, &io->pfrio_nchange, io->pfrio_flags | 4153 PFR_FLAG_USERIOCTL, 0); 4154 PF_RULES_WUNLOCK(); 4155 if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK) 4156 error = copyout(pfras, io->pfrio_buffer, totlen); 4157 free(pfras, M_TEMP); 4158 break; 4159 } 4160 4161 case DIOCRGETADDRS: { 4162 struct pfioc_table *io = (struct pfioc_table *)addr; 4163 struct pfr_addr *pfras; 4164 size_t totlen; 4165 4166 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 4167 error = ENODEV; 4168 break; 4169 } 4170 if (io->pfrio_size < 0 || 4171 io->pfrio_size > pf_ioctl_maxcount || 4172 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) { 4173 error = EINVAL; 4174 break; 4175 } 4176 totlen = io->pfrio_size * sizeof(struct pfr_addr); 4177 pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr), 4178 M_TEMP, M_WAITOK | M_ZERO); 4179 PF_RULES_RLOCK(); 4180 error = pfr_get_addrs(&io->pfrio_table, pfras, 4181 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); 4182 PF_RULES_RUNLOCK(); 4183 if (error == 0) 4184 error = copyout(pfras, io->pfrio_buffer, totlen); 4185 free(pfras, M_TEMP); 4186 break; 4187 } 4188 4189 case DIOCRGETASTATS: { 4190 struct pfioc_table *io = (struct pfioc_table *)addr; 4191 struct pfr_astats *pfrastats; 4192 size_t totlen; 4193 4194 if (io->pfrio_esize != sizeof(struct pfr_astats)) { 4195 error = ENODEV; 4196 break; 4197 } 4198 if (io->pfrio_size < 0 || 4199 io->pfrio_size > pf_ioctl_maxcount || 4200 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_astats))) { 4201 error = EINVAL; 4202 break; 4203 } 4204 totlen = io->pfrio_size * sizeof(struct pfr_astats); 4205 pfrastats = mallocarray(io->pfrio_size, 4206 sizeof(struct pfr_astats), M_TEMP, M_WAITOK | M_ZERO); 4207 PF_RULES_RLOCK(); 4208 error = pfr_get_astats(&io->pfrio_table, pfrastats, 4209 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); 4210 PF_RULES_RUNLOCK(); 4211 if (error == 0) 4212 error = copyout(pfrastats, io->pfrio_buffer, totlen); 4213 free(pfrastats, M_TEMP); 4214 break; 4215 } 4216 4217 case DIOCRCLRASTATS: { 4218 struct pfioc_table *io = (struct pfioc_table *)addr; 4219 struct pfr_addr *pfras; 4220 size_t totlen; 4221 4222 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 4223 error = ENODEV; 4224 break; 4225 } 4226 if (io->pfrio_size < 0 || 4227 io->pfrio_size > pf_ioctl_maxcount || 4228 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) { 4229 error = EINVAL; 4230 break; 4231 } 4232 totlen = io->pfrio_size * sizeof(struct pfr_addr); 4233 pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr), 4234 M_TEMP, M_WAITOK); 4235 error = copyin(io->pfrio_buffer, pfras, totlen); 4236 if (error) { 4237 free(pfras, M_TEMP); 4238 break; 4239 } 4240 PF_RULES_WLOCK(); 4241 error = pfr_clr_astats(&io->pfrio_table, pfras, 4242 io->pfrio_size, &io->pfrio_nzero, io->pfrio_flags | 4243 PFR_FLAG_USERIOCTL); 4244 PF_RULES_WUNLOCK(); 4245 if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK) 4246 error = copyout(pfras, io->pfrio_buffer, totlen); 4247 free(pfras, M_TEMP); 4248 break; 4249 } 4250 4251 case DIOCRTSTADDRS: { 4252 struct pfioc_table *io = (struct pfioc_table *)addr; 4253 struct pfr_addr *pfras; 4254 size_t totlen; 4255 4256 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 4257 error = ENODEV; 4258 break; 4259 } 4260 if (io->pfrio_size < 0 || 4261 io->pfrio_size > pf_ioctl_maxcount || 4262 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) { 4263 error = EINVAL; 4264 break; 4265 } 4266 totlen = io->pfrio_size * sizeof(struct pfr_addr); 4267 pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr), 4268 M_TEMP, M_WAITOK); 4269 error = copyin(io->pfrio_buffer, pfras, totlen); 4270 if (error) { 4271 free(pfras, M_TEMP); 4272 break; 4273 } 4274 PF_RULES_RLOCK(); 4275 error = pfr_tst_addrs(&io->pfrio_table, pfras, 4276 io->pfrio_size, &io->pfrio_nmatch, io->pfrio_flags | 4277 PFR_FLAG_USERIOCTL); 4278 PF_RULES_RUNLOCK(); 4279 if (error == 0) 4280 error = copyout(pfras, io->pfrio_buffer, totlen); 4281 free(pfras, M_TEMP); 4282 break; 4283 } 4284 4285 case DIOCRINADEFINE: { 4286 struct pfioc_table *io = (struct pfioc_table *)addr; 4287 struct pfr_addr *pfras; 4288 size_t totlen; 4289 4290 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 4291 error = ENODEV; 4292 break; 4293 } 4294 if (io->pfrio_size < 0 || 4295 io->pfrio_size > pf_ioctl_maxcount || 4296 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) { 4297 error = EINVAL; 4298 break; 4299 } 4300 totlen = io->pfrio_size * sizeof(struct pfr_addr); 4301 pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr), 4302 M_TEMP, M_WAITOK); 4303 error = copyin(io->pfrio_buffer, pfras, totlen); 4304 if (error) { 4305 free(pfras, M_TEMP); 4306 break; 4307 } 4308 PF_RULES_WLOCK(); 4309 error = pfr_ina_define(&io->pfrio_table, pfras, 4310 io->pfrio_size, &io->pfrio_nadd, &io->pfrio_naddr, 4311 io->pfrio_ticket, io->pfrio_flags | PFR_FLAG_USERIOCTL); 4312 PF_RULES_WUNLOCK(); 4313 free(pfras, M_TEMP); 4314 break; 4315 } 4316 4317 case DIOCOSFPADD: { 4318 struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr; 4319 PF_RULES_WLOCK(); 4320 error = pf_osfp_add(io); 4321 PF_RULES_WUNLOCK(); 4322 break; 4323 } 4324 4325 case DIOCOSFPGET: { 4326 struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr; 4327 PF_RULES_RLOCK(); 4328 error = pf_osfp_get(io); 4329 PF_RULES_RUNLOCK(); 4330 break; 4331 } 4332 4333 case DIOCXBEGIN: { 4334 struct pfioc_trans *io = (struct pfioc_trans *)addr; 4335 struct pfioc_trans_e *ioes, *ioe; 4336 size_t totlen; 4337 int i; 4338 4339 if (io->esize != sizeof(*ioe)) { 4340 error = ENODEV; 4341 break; 4342 } 4343 if (io->size < 0 || 4344 io->size > pf_ioctl_maxcount || 4345 WOULD_OVERFLOW(io->size, sizeof(struct pfioc_trans_e))) { 4346 error = EINVAL; 4347 break; 4348 } 4349 totlen = sizeof(struct pfioc_trans_e) * io->size; 4350 ioes = mallocarray(io->size, sizeof(struct pfioc_trans_e), 4351 M_TEMP, M_WAITOK); 4352 error = copyin(io->array, ioes, totlen); 4353 if (error) { 4354 free(ioes, M_TEMP); 4355 break; 4356 } 4357 PF_RULES_WLOCK(); 4358 for (i = 0, ioe = ioes; i < io->size; i++, ioe++) { 4359 ioe->anchor[sizeof(ioe->anchor) - 1] = '\0'; 4360 switch (ioe->rs_num) { 4361 #ifdef ALTQ 4362 case PF_RULESET_ALTQ: 4363 if (ioe->anchor[0]) { 4364 PF_RULES_WUNLOCK(); 4365 free(ioes, M_TEMP); 4366 error = EINVAL; 4367 goto fail; 4368 } 4369 if ((error = pf_begin_altq(&ioe->ticket))) { 4370 PF_RULES_WUNLOCK(); 4371 free(ioes, M_TEMP); 4372 goto fail; 4373 } 4374 break; 4375 #endif /* ALTQ */ 4376 case PF_RULESET_TABLE: 4377 { 4378 struct pfr_table table; 4379 4380 bzero(&table, sizeof(table)); 4381 strlcpy(table.pfrt_anchor, ioe->anchor, 4382 sizeof(table.pfrt_anchor)); 4383 if ((error = pfr_ina_begin(&table, 4384 &ioe->ticket, NULL, 0))) { 4385 PF_RULES_WUNLOCK(); 4386 free(ioes, M_TEMP); 4387 goto fail; 4388 } 4389 break; 4390 } 4391 default: 4392 if ((error = pf_begin_rules(&ioe->ticket, 4393 ioe->rs_num, ioe->anchor))) { 4394 PF_RULES_WUNLOCK(); 4395 free(ioes, M_TEMP); 4396 goto fail; 4397 } 4398 break; 4399 } 4400 } 4401 PF_RULES_WUNLOCK(); 4402 error = copyout(ioes, io->array, totlen); 4403 free(ioes, M_TEMP); 4404 break; 4405 } 4406 4407 case DIOCXROLLBACK: { 4408 struct pfioc_trans *io = (struct pfioc_trans *)addr; 4409 struct pfioc_trans_e *ioe, *ioes; 4410 size_t totlen; 4411 int i; 4412 4413 if (io->esize != sizeof(*ioe)) { 4414 error = ENODEV; 4415 break; 4416 } 4417 if (io->size < 0 || 4418 io->size > pf_ioctl_maxcount || 4419 WOULD_OVERFLOW(io->size, sizeof(struct pfioc_trans_e))) { 4420 error = EINVAL; 4421 break; 4422 } 4423 totlen = sizeof(struct pfioc_trans_e) * io->size; 4424 ioes = mallocarray(io->size, sizeof(struct pfioc_trans_e), 4425 M_TEMP, M_WAITOK); 4426 error = copyin(io->array, ioes, totlen); 4427 if (error) { 4428 free(ioes, M_TEMP); 4429 break; 4430 } 4431 PF_RULES_WLOCK(); 4432 for (i = 0, ioe = ioes; i < io->size; i++, ioe++) { 4433 ioe->anchor[sizeof(ioe->anchor) - 1] = '\0'; 4434 switch (ioe->rs_num) { 4435 #ifdef ALTQ 4436 case PF_RULESET_ALTQ: 4437 if (ioe->anchor[0]) { 4438 PF_RULES_WUNLOCK(); 4439 free(ioes, M_TEMP); 4440 error = EINVAL; 4441 goto fail; 4442 } 4443 if ((error = pf_rollback_altq(ioe->ticket))) { 4444 PF_RULES_WUNLOCK(); 4445 free(ioes, M_TEMP); 4446 goto fail; /* really bad */ 4447 } 4448 break; 4449 #endif /* ALTQ */ 4450 case PF_RULESET_TABLE: 4451 { 4452 struct pfr_table table; 4453 4454 bzero(&table, sizeof(table)); 4455 strlcpy(table.pfrt_anchor, ioe->anchor, 4456 sizeof(table.pfrt_anchor)); 4457 if ((error = pfr_ina_rollback(&table, 4458 ioe->ticket, NULL, 0))) { 4459 PF_RULES_WUNLOCK(); 4460 free(ioes, M_TEMP); 4461 goto fail; /* really bad */ 4462 } 4463 break; 4464 } 4465 default: 4466 if ((error = pf_rollback_rules(ioe->ticket, 4467 ioe->rs_num, ioe->anchor))) { 4468 PF_RULES_WUNLOCK(); 4469 free(ioes, M_TEMP); 4470 goto fail; /* really bad */ 4471 } 4472 break; 4473 } 4474 } 4475 PF_RULES_WUNLOCK(); 4476 free(ioes, M_TEMP); 4477 break; 4478 } 4479 4480 case DIOCXCOMMIT: { 4481 struct pfioc_trans *io = (struct pfioc_trans *)addr; 4482 struct pfioc_trans_e *ioe, *ioes; 4483 struct pf_kruleset *rs; 4484 size_t totlen; 4485 int i; 4486 4487 if (io->esize != sizeof(*ioe)) { 4488 error = ENODEV; 4489 break; 4490 } 4491 4492 if (io->size < 0 || 4493 io->size > pf_ioctl_maxcount || 4494 WOULD_OVERFLOW(io->size, sizeof(struct pfioc_trans_e))) { 4495 error = EINVAL; 4496 break; 4497 } 4498 4499 totlen = sizeof(struct pfioc_trans_e) * io->size; 4500 ioes = mallocarray(io->size, sizeof(struct pfioc_trans_e), 4501 M_TEMP, M_WAITOK); 4502 error = copyin(io->array, ioes, totlen); 4503 if (error) { 4504 free(ioes, M_TEMP); 4505 break; 4506 } 4507 PF_RULES_WLOCK(); 4508 /* First makes sure everything will succeed. */ 4509 for (i = 0, ioe = ioes; i < io->size; i++, ioe++) { 4510 ioe->anchor[sizeof(ioe->anchor) - 1] = 0; 4511 switch (ioe->rs_num) { 4512 #ifdef ALTQ 4513 case PF_RULESET_ALTQ: 4514 if (ioe->anchor[0]) { 4515 PF_RULES_WUNLOCK(); 4516 free(ioes, M_TEMP); 4517 error = EINVAL; 4518 goto fail; 4519 } 4520 if (!V_altqs_inactive_open || ioe->ticket != 4521 V_ticket_altqs_inactive) { 4522 PF_RULES_WUNLOCK(); 4523 free(ioes, M_TEMP); 4524 error = EBUSY; 4525 goto fail; 4526 } 4527 break; 4528 #endif /* ALTQ */ 4529 case PF_RULESET_TABLE: 4530 rs = pf_find_kruleset(ioe->anchor); 4531 if (rs == NULL || !rs->topen || ioe->ticket != 4532 rs->tticket) { 4533 PF_RULES_WUNLOCK(); 4534 free(ioes, M_TEMP); 4535 error = EBUSY; 4536 goto fail; 4537 } 4538 break; 4539 default: 4540 if (ioe->rs_num < 0 || ioe->rs_num >= 4541 PF_RULESET_MAX) { 4542 PF_RULES_WUNLOCK(); 4543 free(ioes, M_TEMP); 4544 error = EINVAL; 4545 goto fail; 4546 } 4547 rs = pf_find_kruleset(ioe->anchor); 4548 if (rs == NULL || 4549 !rs->rules[ioe->rs_num].inactive.open || 4550 rs->rules[ioe->rs_num].inactive.ticket != 4551 ioe->ticket) { 4552 PF_RULES_WUNLOCK(); 4553 free(ioes, M_TEMP); 4554 error = EBUSY; 4555 goto fail; 4556 } 4557 break; 4558 } 4559 } 4560 /* Now do the commit - no errors should happen here. */ 4561 for (i = 0, ioe = ioes; i < io->size; i++, ioe++) { 4562 switch (ioe->rs_num) { 4563 #ifdef ALTQ 4564 case PF_RULESET_ALTQ: 4565 if ((error = pf_commit_altq(ioe->ticket))) { 4566 PF_RULES_WUNLOCK(); 4567 free(ioes, M_TEMP); 4568 goto fail; /* really bad */ 4569 } 4570 break; 4571 #endif /* ALTQ */ 4572 case PF_RULESET_TABLE: 4573 { 4574 struct pfr_table table; 4575 4576 bzero(&table, sizeof(table)); 4577 (void)strlcpy(table.pfrt_anchor, ioe->anchor, 4578 sizeof(table.pfrt_anchor)); 4579 if ((error = pfr_ina_commit(&table, 4580 ioe->ticket, NULL, NULL, 0))) { 4581 PF_RULES_WUNLOCK(); 4582 free(ioes, M_TEMP); 4583 goto fail; /* really bad */ 4584 } 4585 break; 4586 } 4587 default: 4588 if ((error = pf_commit_rules(ioe->ticket, 4589 ioe->rs_num, ioe->anchor))) { 4590 PF_RULES_WUNLOCK(); 4591 free(ioes, M_TEMP); 4592 goto fail; /* really bad */ 4593 } 4594 break; 4595 } 4596 } 4597 PF_RULES_WUNLOCK(); 4598 free(ioes, M_TEMP); 4599 break; 4600 } 4601 4602 case DIOCGETSRCNODES: { 4603 struct pfioc_src_nodes *psn = (struct pfioc_src_nodes *)addr; 4604 struct pf_srchash *sh; 4605 struct pf_ksrc_node *n; 4606 struct pf_src_node *p, *pstore; 4607 uint32_t i, nr = 0; 4608 4609 for (i = 0, sh = V_pf_srchash; i <= pf_srchashmask; 4610 i++, sh++) { 4611 PF_HASHROW_LOCK(sh); 4612 LIST_FOREACH(n, &sh->nodes, entry) 4613 nr++; 4614 PF_HASHROW_UNLOCK(sh); 4615 } 4616 4617 psn->psn_len = min(psn->psn_len, 4618 sizeof(struct pf_src_node) * nr); 4619 4620 if (psn->psn_len == 0) { 4621 psn->psn_len = sizeof(struct pf_src_node) * nr; 4622 break; 4623 } 4624 4625 nr = 0; 4626 4627 p = pstore = malloc(psn->psn_len, M_TEMP, M_WAITOK | M_ZERO); 4628 for (i = 0, sh = V_pf_srchash; i <= pf_srchashmask; 4629 i++, sh++) { 4630 PF_HASHROW_LOCK(sh); 4631 LIST_FOREACH(n, &sh->nodes, entry) { 4632 4633 if ((nr + 1) * sizeof(*p) > (unsigned)psn->psn_len) 4634 break; 4635 4636 pf_src_node_copy(n, p); 4637 4638 p++; 4639 nr++; 4640 } 4641 PF_HASHROW_UNLOCK(sh); 4642 } 4643 error = copyout(pstore, psn->psn_src_nodes, 4644 sizeof(struct pf_src_node) * nr); 4645 if (error) { 4646 free(pstore, M_TEMP); 4647 break; 4648 } 4649 psn->psn_len = sizeof(struct pf_src_node) * nr; 4650 free(pstore, M_TEMP); 4651 break; 4652 } 4653 4654 case DIOCCLRSRCNODES: { 4655 pf_clear_srcnodes(NULL); 4656 pf_purge_expired_src_nodes(); 4657 break; 4658 } 4659 4660 case DIOCKILLSRCNODES: 4661 pf_kill_srcnodes((struct pfioc_src_node_kill *)addr); 4662 break; 4663 4664 case DIOCKEEPCOUNTERS: 4665 error = pf_keepcounters((struct pfioc_nv *)addr); 4666 break; 4667 4668 case DIOCGETSYNCOOKIES: 4669 error = pf_get_syncookies((struct pfioc_nv *)addr); 4670 break; 4671 4672 case DIOCSETSYNCOOKIES: 4673 error = pf_set_syncookies((struct pfioc_nv *)addr); 4674 break; 4675 4676 case DIOCSETHOSTID: { 4677 u_int32_t *hostid = (u_int32_t *)addr; 4678 4679 PF_RULES_WLOCK(); 4680 if (*hostid == 0) 4681 V_pf_status.hostid = arc4random(); 4682 else 4683 V_pf_status.hostid = *hostid; 4684 PF_RULES_WUNLOCK(); 4685 break; 4686 } 4687 4688 case DIOCOSFPFLUSH: 4689 PF_RULES_WLOCK(); 4690 pf_osfp_flush(); 4691 PF_RULES_WUNLOCK(); 4692 break; 4693 4694 case DIOCIGETIFACES: { 4695 struct pfioc_iface *io = (struct pfioc_iface *)addr; 4696 struct pfi_kif *ifstore; 4697 size_t bufsiz; 4698 4699 if (io->pfiio_esize != sizeof(struct pfi_kif)) { 4700 error = ENODEV; 4701 break; 4702 } 4703 4704 if (io->pfiio_size < 0 || 4705 io->pfiio_size > pf_ioctl_maxcount || 4706 WOULD_OVERFLOW(io->pfiio_size, sizeof(struct pfi_kif))) { 4707 error = EINVAL; 4708 break; 4709 } 4710 4711 bufsiz = io->pfiio_size * sizeof(struct pfi_kif); 4712 ifstore = mallocarray(io->pfiio_size, sizeof(struct pfi_kif), 4713 M_TEMP, M_WAITOK | M_ZERO); 4714 4715 PF_RULES_RLOCK(); 4716 pfi_get_ifaces(io->pfiio_name, ifstore, &io->pfiio_size); 4717 PF_RULES_RUNLOCK(); 4718 error = copyout(ifstore, io->pfiio_buffer, bufsiz); 4719 free(ifstore, M_TEMP); 4720 break; 4721 } 4722 4723 case DIOCSETIFFLAG: { 4724 struct pfioc_iface *io = (struct pfioc_iface *)addr; 4725 4726 PF_RULES_WLOCK(); 4727 error = pfi_set_flags(io->pfiio_name, io->pfiio_flags); 4728 PF_RULES_WUNLOCK(); 4729 break; 4730 } 4731 4732 case DIOCCLRIFFLAG: { 4733 struct pfioc_iface *io = (struct pfioc_iface *)addr; 4734 4735 PF_RULES_WLOCK(); 4736 error = pfi_clear_flags(io->pfiio_name, io->pfiio_flags); 4737 PF_RULES_WUNLOCK(); 4738 break; 4739 } 4740 4741 default: 4742 error = ENODEV; 4743 break; 4744 } 4745 fail: 4746 if (sx_xlocked(&pf_ioctl_lock)) 4747 sx_xunlock(&pf_ioctl_lock); 4748 CURVNET_RESTORE(); 4749 4750 #undef ERROUT_IOCTL 4751 4752 return (error); 4753 } 4754 4755 void 4756 pfsync_state_export(struct pfsync_state *sp, struct pf_kstate *st) 4757 { 4758 bzero(sp, sizeof(struct pfsync_state)); 4759 4760 /* copy from state key */ 4761 sp->key[PF_SK_WIRE].addr[0] = st->key[PF_SK_WIRE]->addr[0]; 4762 sp->key[PF_SK_WIRE].addr[1] = st->key[PF_SK_WIRE]->addr[1]; 4763 sp->key[PF_SK_WIRE].port[0] = st->key[PF_SK_WIRE]->port[0]; 4764 sp->key[PF_SK_WIRE].port[1] = st->key[PF_SK_WIRE]->port[1]; 4765 sp->key[PF_SK_STACK].addr[0] = st->key[PF_SK_STACK]->addr[0]; 4766 sp->key[PF_SK_STACK].addr[1] = st->key[PF_SK_STACK]->addr[1]; 4767 sp->key[PF_SK_STACK].port[0] = st->key[PF_SK_STACK]->port[0]; 4768 sp->key[PF_SK_STACK].port[1] = st->key[PF_SK_STACK]->port[1]; 4769 sp->proto = st->key[PF_SK_WIRE]->proto; 4770 sp->af = st->key[PF_SK_WIRE]->af; 4771 4772 /* copy from state */ 4773 strlcpy(sp->ifname, st->kif->pfik_name, sizeof(sp->ifname)); 4774 bcopy(&st->rt_addr, &sp->rt_addr, sizeof(sp->rt_addr)); 4775 sp->creation = htonl(time_uptime - st->creation); 4776 sp->expire = pf_state_expires(st); 4777 if (sp->expire <= time_uptime) 4778 sp->expire = htonl(0); 4779 else 4780 sp->expire = htonl(sp->expire - time_uptime); 4781 4782 sp->direction = st->direction; 4783 sp->log = st->log; 4784 sp->timeout = st->timeout; 4785 sp->state_flags = st->state_flags; 4786 if (st->src_node) 4787 sp->sync_flags |= PFSYNC_FLAG_SRCNODE; 4788 if (st->nat_src_node) 4789 sp->sync_flags |= PFSYNC_FLAG_NATSRCNODE; 4790 4791 sp->id = st->id; 4792 sp->creatorid = st->creatorid; 4793 pf_state_peer_hton(&st->src, &sp->src); 4794 pf_state_peer_hton(&st->dst, &sp->dst); 4795 4796 if (st->rule.ptr == NULL) 4797 sp->rule = htonl(-1); 4798 else 4799 sp->rule = htonl(st->rule.ptr->nr); 4800 if (st->anchor.ptr == NULL) 4801 sp->anchor = htonl(-1); 4802 else 4803 sp->anchor = htonl(st->anchor.ptr->nr); 4804 if (st->nat_rule.ptr == NULL) 4805 sp->nat_rule = htonl(-1); 4806 else 4807 sp->nat_rule = htonl(st->nat_rule.ptr->nr); 4808 4809 pf_state_counter_hton(st->packets[0], sp->packets[0]); 4810 pf_state_counter_hton(st->packets[1], sp->packets[1]); 4811 pf_state_counter_hton(st->bytes[0], sp->bytes[0]); 4812 pf_state_counter_hton(st->bytes[1], sp->bytes[1]); 4813 } 4814 4815 void 4816 pf_state_export(struct pf_state_export *sp, struct pf_kstate *st) 4817 { 4818 bzero(sp, sizeof(*sp)); 4819 4820 sp->version = PF_STATE_VERSION; 4821 4822 /* copy from state key */ 4823 sp->key[PF_SK_WIRE].addr[0] = st->key[PF_SK_WIRE]->addr[0]; 4824 sp->key[PF_SK_WIRE].addr[1] = st->key[PF_SK_WIRE]->addr[1]; 4825 sp->key[PF_SK_WIRE].port[0] = st->key[PF_SK_WIRE]->port[0]; 4826 sp->key[PF_SK_WIRE].port[1] = st->key[PF_SK_WIRE]->port[1]; 4827 sp->key[PF_SK_STACK].addr[0] = st->key[PF_SK_STACK]->addr[0]; 4828 sp->key[PF_SK_STACK].addr[1] = st->key[PF_SK_STACK]->addr[1]; 4829 sp->key[PF_SK_STACK].port[0] = st->key[PF_SK_STACK]->port[0]; 4830 sp->key[PF_SK_STACK].port[1] = st->key[PF_SK_STACK]->port[1]; 4831 sp->proto = st->key[PF_SK_WIRE]->proto; 4832 sp->af = st->key[PF_SK_WIRE]->af; 4833 4834 /* copy from state */ 4835 strlcpy(sp->ifname, st->kif->pfik_name, sizeof(sp->ifname)); 4836 strlcpy(sp->orig_ifname, st->orig_kif->pfik_name, 4837 sizeof(sp->orig_ifname)); 4838 bcopy(&st->rt_addr, &sp->rt_addr, sizeof(sp->rt_addr)); 4839 sp->creation = htonl(time_uptime - st->creation); 4840 sp->expire = pf_state_expires(st); 4841 if (sp->expire <= time_uptime) 4842 sp->expire = htonl(0); 4843 else 4844 sp->expire = htonl(sp->expire - time_uptime); 4845 4846 sp->direction = st->direction; 4847 sp->log = st->log; 4848 sp->timeout = st->timeout; 4849 sp->state_flags = st->state_flags; 4850 if (st->src_node) 4851 sp->sync_flags |= PFSYNC_FLAG_SRCNODE; 4852 if (st->nat_src_node) 4853 sp->sync_flags |= PFSYNC_FLAG_NATSRCNODE; 4854 4855 sp->id = st->id; 4856 sp->creatorid = st->creatorid; 4857 pf_state_peer_hton(&st->src, &sp->src); 4858 pf_state_peer_hton(&st->dst, &sp->dst); 4859 4860 if (st->rule.ptr == NULL) 4861 sp->rule = htonl(-1); 4862 else 4863 sp->rule = htonl(st->rule.ptr->nr); 4864 if (st->anchor.ptr == NULL) 4865 sp->anchor = htonl(-1); 4866 else 4867 sp->anchor = htonl(st->anchor.ptr->nr); 4868 if (st->nat_rule.ptr == NULL) 4869 sp->nat_rule = htonl(-1); 4870 else 4871 sp->nat_rule = htonl(st->nat_rule.ptr->nr); 4872 4873 sp->packets[0] = st->packets[0]; 4874 sp->packets[1] = st->packets[1]; 4875 sp->bytes[0] = st->bytes[0]; 4876 sp->bytes[1] = st->bytes[1]; 4877 } 4878 4879 static void 4880 pf_tbladdr_copyout(struct pf_addr_wrap *aw) 4881 { 4882 struct pfr_ktable *kt; 4883 4884 KASSERT(aw->type == PF_ADDR_TABLE, ("%s: type %u", __func__, aw->type)); 4885 4886 kt = aw->p.tbl; 4887 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL) 4888 kt = kt->pfrkt_root; 4889 aw->p.tbl = NULL; 4890 aw->p.tblcnt = (kt->pfrkt_flags & PFR_TFLAG_ACTIVE) ? 4891 kt->pfrkt_cnt : -1; 4892 } 4893 4894 static int 4895 pf_add_status_counters(nvlist_t *nvl, const char *name, counter_u64_t *counters, 4896 size_t number, char **names) 4897 { 4898 nvlist_t *nvc; 4899 4900 nvc = nvlist_create(0); 4901 if (nvc == NULL) 4902 return (ENOMEM); 4903 4904 for (int i = 0; i < number; i++) { 4905 nvlist_append_number_array(nvc, "counters", 4906 counter_u64_fetch(counters[i])); 4907 nvlist_append_string_array(nvc, "names", 4908 names[i]); 4909 nvlist_append_number_array(nvc, "ids", 4910 i); 4911 } 4912 nvlist_add_nvlist(nvl, name, nvc); 4913 nvlist_destroy(nvc); 4914 4915 return (0); 4916 } 4917 4918 static int 4919 pf_getstatus(struct pfioc_nv *nv) 4920 { 4921 nvlist_t *nvl = NULL, *nvc = NULL; 4922 void *nvlpacked = NULL; 4923 int error; 4924 struct pf_status s; 4925 char *pf_reasons[PFRES_MAX+1] = PFRES_NAMES; 4926 char *pf_lcounter[KLCNT_MAX+1] = KLCNT_NAMES; 4927 char *pf_fcounter[FCNT_MAX+1] = FCNT_NAMES; 4928 PF_RULES_RLOCK_TRACKER; 4929 4930 #define ERROUT(x) ERROUT_FUNCTION(errout, x) 4931 4932 PF_RULES_RLOCK(); 4933 4934 nvl = nvlist_create(0); 4935 if (nvl == NULL) 4936 ERROUT(ENOMEM); 4937 4938 nvlist_add_bool(nvl, "running", V_pf_status.running); 4939 nvlist_add_number(nvl, "since", V_pf_status.since); 4940 nvlist_add_number(nvl, "debug", V_pf_status.debug); 4941 nvlist_add_number(nvl, "hostid", V_pf_status.hostid); 4942 nvlist_add_number(nvl, "states", V_pf_status.states); 4943 nvlist_add_number(nvl, "src_nodes", V_pf_status.src_nodes); 4944 4945 /* counters */ 4946 error = pf_add_status_counters(nvl, "counters", V_pf_status.counters, 4947 PFRES_MAX, pf_reasons); 4948 if (error != 0) 4949 ERROUT(error); 4950 4951 /* lcounters */ 4952 error = pf_add_status_counters(nvl, "lcounters", V_pf_status.lcounters, 4953 KLCNT_MAX, pf_lcounter); 4954 if (error != 0) 4955 ERROUT(error); 4956 4957 /* fcounters */ 4958 nvc = nvlist_create(0); 4959 if (nvc == NULL) 4960 ERROUT(ENOMEM); 4961 4962 for (int i = 0; i < FCNT_MAX; i++) { 4963 nvlist_append_number_array(nvc, "counters", 4964 pf_counter_u64_fetch(&V_pf_status.fcounters[i])); 4965 nvlist_append_string_array(nvc, "names", 4966 pf_fcounter[i]); 4967 nvlist_append_number_array(nvc, "ids", 4968 i); 4969 } 4970 nvlist_add_nvlist(nvl, "fcounters", nvc); 4971 nvlist_destroy(nvc); 4972 nvc = NULL; 4973 4974 /* scounters */ 4975 error = pf_add_status_counters(nvl, "scounters", V_pf_status.scounters, 4976 SCNT_MAX, pf_fcounter); 4977 if (error != 0) 4978 ERROUT(error); 4979 4980 nvlist_add_string(nvl, "ifname", V_pf_status.ifname); 4981 nvlist_add_binary(nvl, "chksum", V_pf_status.pf_chksum, 4982 PF_MD5_DIGEST_LENGTH); 4983 4984 pfi_update_status(V_pf_status.ifname, &s); 4985 4986 /* pcounters / bcounters */ 4987 for (int i = 0; i < 2; i++) { 4988 for (int j = 0; j < 2; j++) { 4989 for (int k = 0; k < 2; k++) { 4990 nvlist_append_number_array(nvl, "pcounters", 4991 s.pcounters[i][j][k]); 4992 } 4993 nvlist_append_number_array(nvl, "bcounters", 4994 s.bcounters[i][j]); 4995 } 4996 } 4997 4998 nvlpacked = nvlist_pack(nvl, &nv->len); 4999 if (nvlpacked == NULL) 5000 ERROUT(ENOMEM); 5001 5002 if (nv->size == 0) 5003 ERROUT(0); 5004 else if (nv->size < nv->len) 5005 ERROUT(ENOSPC); 5006 5007 PF_RULES_RUNLOCK(); 5008 error = copyout(nvlpacked, nv->data, nv->len); 5009 goto done; 5010 5011 #undef ERROUT 5012 errout: 5013 PF_RULES_RUNLOCK(); 5014 done: 5015 free(nvlpacked, M_NVLIST); 5016 nvlist_destroy(nvc); 5017 nvlist_destroy(nvl); 5018 5019 return (error); 5020 } 5021 5022 /* 5023 * XXX - Check for version missmatch!!! 5024 */ 5025 static void 5026 pf_clear_all_states(void) 5027 { 5028 struct pf_kstate *s; 5029 u_int i; 5030 5031 for (i = 0; i <= pf_hashmask; i++) { 5032 struct pf_idhash *ih = &V_pf_idhash[i]; 5033 relock: 5034 PF_HASHROW_LOCK(ih); 5035 LIST_FOREACH(s, &ih->states, entry) { 5036 s->timeout = PFTM_PURGE; 5037 /* Don't send out individual delete messages. */ 5038 s->state_flags |= PFSTATE_NOSYNC; 5039 pf_unlink_state(s, PF_ENTER_LOCKED); 5040 goto relock; 5041 } 5042 PF_HASHROW_UNLOCK(ih); 5043 } 5044 } 5045 5046 static int 5047 pf_clear_tables(void) 5048 { 5049 struct pfioc_table io; 5050 int error; 5051 5052 bzero(&io, sizeof(io)); 5053 5054 error = pfr_clr_tables(&io.pfrio_table, &io.pfrio_ndel, 5055 io.pfrio_flags); 5056 5057 return (error); 5058 } 5059 5060 static void 5061 pf_clear_srcnodes(struct pf_ksrc_node *n) 5062 { 5063 struct pf_kstate *s; 5064 int i; 5065 5066 for (i = 0; i <= pf_hashmask; i++) { 5067 struct pf_idhash *ih = &V_pf_idhash[i]; 5068 5069 PF_HASHROW_LOCK(ih); 5070 LIST_FOREACH(s, &ih->states, entry) { 5071 if (n == NULL || n == s->src_node) 5072 s->src_node = NULL; 5073 if (n == NULL || n == s->nat_src_node) 5074 s->nat_src_node = NULL; 5075 } 5076 PF_HASHROW_UNLOCK(ih); 5077 } 5078 5079 if (n == NULL) { 5080 struct pf_srchash *sh; 5081 5082 for (i = 0, sh = V_pf_srchash; i <= pf_srchashmask; 5083 i++, sh++) { 5084 PF_HASHROW_LOCK(sh); 5085 LIST_FOREACH(n, &sh->nodes, entry) { 5086 n->expire = 1; 5087 n->states = 0; 5088 } 5089 PF_HASHROW_UNLOCK(sh); 5090 } 5091 } else { 5092 /* XXX: hash slot should already be locked here. */ 5093 n->expire = 1; 5094 n->states = 0; 5095 } 5096 } 5097 5098 static void 5099 pf_kill_srcnodes(struct pfioc_src_node_kill *psnk) 5100 { 5101 struct pf_ksrc_node_list kill; 5102 5103 LIST_INIT(&kill); 5104 for (int i = 0; i <= pf_srchashmask; i++) { 5105 struct pf_srchash *sh = &V_pf_srchash[i]; 5106 struct pf_ksrc_node *sn, *tmp; 5107 5108 PF_HASHROW_LOCK(sh); 5109 LIST_FOREACH_SAFE(sn, &sh->nodes, entry, tmp) 5110 if (PF_MATCHA(psnk->psnk_src.neg, 5111 &psnk->psnk_src.addr.v.a.addr, 5112 &psnk->psnk_src.addr.v.a.mask, 5113 &sn->addr, sn->af) && 5114 PF_MATCHA(psnk->psnk_dst.neg, 5115 &psnk->psnk_dst.addr.v.a.addr, 5116 &psnk->psnk_dst.addr.v.a.mask, 5117 &sn->raddr, sn->af)) { 5118 pf_unlink_src_node(sn); 5119 LIST_INSERT_HEAD(&kill, sn, entry); 5120 sn->expire = 1; 5121 } 5122 PF_HASHROW_UNLOCK(sh); 5123 } 5124 5125 for (int i = 0; i <= pf_hashmask; i++) { 5126 struct pf_idhash *ih = &V_pf_idhash[i]; 5127 struct pf_kstate *s; 5128 5129 PF_HASHROW_LOCK(ih); 5130 LIST_FOREACH(s, &ih->states, entry) { 5131 if (s->src_node && s->src_node->expire == 1) 5132 s->src_node = NULL; 5133 if (s->nat_src_node && s->nat_src_node->expire == 1) 5134 s->nat_src_node = NULL; 5135 } 5136 PF_HASHROW_UNLOCK(ih); 5137 } 5138 5139 psnk->psnk_killed = pf_free_src_nodes(&kill); 5140 } 5141 5142 static int 5143 pf_keepcounters(struct pfioc_nv *nv) 5144 { 5145 nvlist_t *nvl = NULL; 5146 void *nvlpacked = NULL; 5147 int error = 0; 5148 5149 #define ERROUT(x) ERROUT_FUNCTION(on_error, x) 5150 5151 if (nv->len > pf_ioctl_maxcount) 5152 ERROUT(ENOMEM); 5153 5154 nvlpacked = malloc(nv->len, M_TEMP, M_WAITOK); 5155 if (nvlpacked == NULL) 5156 ERROUT(ENOMEM); 5157 5158 error = copyin(nv->data, nvlpacked, nv->len); 5159 if (error) 5160 ERROUT(error); 5161 5162 nvl = nvlist_unpack(nvlpacked, nv->len, 0); 5163 if (nvl == NULL) 5164 ERROUT(EBADMSG); 5165 5166 if (! nvlist_exists_bool(nvl, "keep_counters")) 5167 ERROUT(EBADMSG); 5168 5169 V_pf_status.keep_counters = nvlist_get_bool(nvl, "keep_counters"); 5170 5171 on_error: 5172 nvlist_destroy(nvl); 5173 free(nvlpacked, M_TEMP); 5174 return (error); 5175 } 5176 5177 static unsigned int 5178 pf_clear_states(const struct pf_kstate_kill *kill) 5179 { 5180 struct pf_state_key_cmp match_key; 5181 struct pf_kstate *s; 5182 struct pfi_kkif *kif; 5183 int idx; 5184 unsigned int killed = 0, dir; 5185 5186 for (unsigned int i = 0; i <= pf_hashmask; i++) { 5187 struct pf_idhash *ih = &V_pf_idhash[i]; 5188 5189 relock_DIOCCLRSTATES: 5190 PF_HASHROW_LOCK(ih); 5191 LIST_FOREACH(s, &ih->states, entry) { 5192 /* For floating states look at the original kif. */ 5193 kif = s->kif == V_pfi_all ? s->orig_kif : s->kif; 5194 5195 if (kill->psk_ifname[0] && 5196 strcmp(kill->psk_ifname, 5197 kif->pfik_name)) 5198 continue; 5199 5200 if (kill->psk_kill_match) { 5201 bzero(&match_key, sizeof(match_key)); 5202 5203 if (s->direction == PF_OUT) { 5204 dir = PF_IN; 5205 idx = PF_SK_STACK; 5206 } else { 5207 dir = PF_OUT; 5208 idx = PF_SK_WIRE; 5209 } 5210 5211 match_key.af = s->key[idx]->af; 5212 match_key.proto = s->key[idx]->proto; 5213 PF_ACPY(&match_key.addr[0], 5214 &s->key[idx]->addr[1], match_key.af); 5215 match_key.port[0] = s->key[idx]->port[1]; 5216 PF_ACPY(&match_key.addr[1], 5217 &s->key[idx]->addr[0], match_key.af); 5218 match_key.port[1] = s->key[idx]->port[0]; 5219 } 5220 5221 /* 5222 * Don't send out individual 5223 * delete messages. 5224 */ 5225 s->state_flags |= PFSTATE_NOSYNC; 5226 pf_unlink_state(s, PF_ENTER_LOCKED); 5227 killed++; 5228 5229 if (kill->psk_kill_match) 5230 killed += pf_kill_matching_state(&match_key, 5231 dir); 5232 5233 goto relock_DIOCCLRSTATES; 5234 } 5235 PF_HASHROW_UNLOCK(ih); 5236 } 5237 5238 if (V_pfsync_clear_states_ptr != NULL) 5239 V_pfsync_clear_states_ptr(V_pf_status.hostid, kill->psk_ifname); 5240 5241 return (killed); 5242 } 5243 5244 static void 5245 pf_killstates(struct pf_kstate_kill *kill, unsigned int *killed) 5246 { 5247 struct pf_kstate *s; 5248 5249 if (kill->psk_pfcmp.id) { 5250 if (kill->psk_pfcmp.creatorid == 0) 5251 kill->psk_pfcmp.creatorid = V_pf_status.hostid; 5252 if ((s = pf_find_state_byid(kill->psk_pfcmp.id, 5253 kill->psk_pfcmp.creatorid))) { 5254 pf_unlink_state(s, PF_ENTER_LOCKED); 5255 *killed = 1; 5256 } 5257 return; 5258 } 5259 5260 for (unsigned int i = 0; i <= pf_hashmask; i++) 5261 *killed += pf_killstates_row(kill, &V_pf_idhash[i]); 5262 5263 return; 5264 } 5265 5266 static int 5267 pf_killstates_nv(struct pfioc_nv *nv) 5268 { 5269 struct pf_kstate_kill kill; 5270 nvlist_t *nvl = NULL; 5271 void *nvlpacked = NULL; 5272 int error = 0; 5273 unsigned int killed = 0; 5274 5275 #define ERROUT(x) ERROUT_FUNCTION(on_error, x) 5276 5277 if (nv->len > pf_ioctl_maxcount) 5278 ERROUT(ENOMEM); 5279 5280 nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK); 5281 if (nvlpacked == NULL) 5282 ERROUT(ENOMEM); 5283 5284 error = copyin(nv->data, nvlpacked, nv->len); 5285 if (error) 5286 ERROUT(error); 5287 5288 nvl = nvlist_unpack(nvlpacked, nv->len, 0); 5289 if (nvl == NULL) 5290 ERROUT(EBADMSG); 5291 5292 error = pf_nvstate_kill_to_kstate_kill(nvl, &kill); 5293 if (error) 5294 ERROUT(error); 5295 5296 pf_killstates(&kill, &killed); 5297 5298 free(nvlpacked, M_NVLIST); 5299 nvlpacked = NULL; 5300 nvlist_destroy(nvl); 5301 nvl = nvlist_create(0); 5302 if (nvl == NULL) 5303 ERROUT(ENOMEM); 5304 5305 nvlist_add_number(nvl, "killed", killed); 5306 5307 nvlpacked = nvlist_pack(nvl, &nv->len); 5308 if (nvlpacked == NULL) 5309 ERROUT(ENOMEM); 5310 5311 if (nv->size == 0) 5312 ERROUT(0); 5313 else if (nv->size < nv->len) 5314 ERROUT(ENOSPC); 5315 5316 error = copyout(nvlpacked, nv->data, nv->len); 5317 5318 on_error: 5319 nvlist_destroy(nvl); 5320 free(nvlpacked, M_NVLIST); 5321 return (error); 5322 } 5323 5324 static int 5325 pf_clearstates_nv(struct pfioc_nv *nv) 5326 { 5327 struct pf_kstate_kill kill; 5328 nvlist_t *nvl = NULL; 5329 void *nvlpacked = NULL; 5330 int error = 0; 5331 unsigned int killed; 5332 5333 #define ERROUT(x) ERROUT_FUNCTION(on_error, x) 5334 5335 if (nv->len > pf_ioctl_maxcount) 5336 ERROUT(ENOMEM); 5337 5338 nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK); 5339 if (nvlpacked == NULL) 5340 ERROUT(ENOMEM); 5341 5342 error = copyin(nv->data, nvlpacked, nv->len); 5343 if (error) 5344 ERROUT(error); 5345 5346 nvl = nvlist_unpack(nvlpacked, nv->len, 0); 5347 if (nvl == NULL) 5348 ERROUT(EBADMSG); 5349 5350 error = pf_nvstate_kill_to_kstate_kill(nvl, &kill); 5351 if (error) 5352 ERROUT(error); 5353 5354 killed = pf_clear_states(&kill); 5355 5356 free(nvlpacked, M_NVLIST); 5357 nvlpacked = NULL; 5358 nvlist_destroy(nvl); 5359 nvl = nvlist_create(0); 5360 if (nvl == NULL) 5361 ERROUT(ENOMEM); 5362 5363 nvlist_add_number(nvl, "killed", killed); 5364 5365 nvlpacked = nvlist_pack(nvl, &nv->len); 5366 if (nvlpacked == NULL) 5367 ERROUT(ENOMEM); 5368 5369 if (nv->size == 0) 5370 ERROUT(0); 5371 else if (nv->size < nv->len) 5372 ERROUT(ENOSPC); 5373 5374 error = copyout(nvlpacked, nv->data, nv->len); 5375 5376 #undef ERROUT 5377 on_error: 5378 nvlist_destroy(nvl); 5379 free(nvlpacked, M_NVLIST); 5380 return (error); 5381 } 5382 5383 static int 5384 pf_getstate(struct pfioc_nv *nv) 5385 { 5386 nvlist_t *nvl = NULL, *nvls; 5387 void *nvlpacked = NULL; 5388 struct pf_kstate *s = NULL; 5389 int error = 0; 5390 uint64_t id, creatorid; 5391 5392 #define ERROUT(x) ERROUT_FUNCTION(errout, x) 5393 5394 if (nv->len > pf_ioctl_maxcount) 5395 ERROUT(ENOMEM); 5396 5397 nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK); 5398 if (nvlpacked == NULL) 5399 ERROUT(ENOMEM); 5400 5401 error = copyin(nv->data, nvlpacked, nv->len); 5402 if (error) 5403 ERROUT(error); 5404 5405 nvl = nvlist_unpack(nvlpacked, nv->len, 0); 5406 if (nvl == NULL) 5407 ERROUT(EBADMSG); 5408 5409 PFNV_CHK(pf_nvuint64(nvl, "id", &id)); 5410 PFNV_CHK(pf_nvuint64(nvl, "creatorid", &creatorid)); 5411 5412 s = pf_find_state_byid(id, creatorid); 5413 if (s == NULL) 5414 ERROUT(ENOENT); 5415 5416 free(nvlpacked, M_NVLIST); 5417 nvlpacked = NULL; 5418 nvlist_destroy(nvl); 5419 nvl = nvlist_create(0); 5420 if (nvl == NULL) 5421 ERROUT(ENOMEM); 5422 5423 nvls = pf_state_to_nvstate(s); 5424 if (nvls == NULL) 5425 ERROUT(ENOMEM); 5426 5427 nvlist_add_nvlist(nvl, "state", nvls); 5428 nvlist_destroy(nvls); 5429 5430 nvlpacked = nvlist_pack(nvl, &nv->len); 5431 if (nvlpacked == NULL) 5432 ERROUT(ENOMEM); 5433 5434 if (nv->size == 0) 5435 ERROUT(0); 5436 else if (nv->size < nv->len) 5437 ERROUT(ENOSPC); 5438 5439 error = copyout(nvlpacked, nv->data, nv->len); 5440 5441 #undef ERROUT 5442 errout: 5443 if (s != NULL) 5444 PF_STATE_UNLOCK(s); 5445 free(nvlpacked, M_NVLIST); 5446 nvlist_destroy(nvl); 5447 return (error); 5448 } 5449 5450 /* 5451 * XXX - Check for version missmatch!!! 5452 */ 5453 5454 /* 5455 * Duplicate pfctl -Fa operation to get rid of as much as we can. 5456 */ 5457 static int 5458 shutdown_pf(void) 5459 { 5460 int error = 0; 5461 u_int32_t t[5]; 5462 char nn = '\0'; 5463 5464 do { 5465 if ((error = pf_begin_rules(&t[0], PF_RULESET_SCRUB, &nn)) 5466 != 0) { 5467 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: SCRUB\n")); 5468 break; 5469 } 5470 if ((error = pf_begin_rules(&t[1], PF_RULESET_FILTER, &nn)) 5471 != 0) { 5472 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: FILTER\n")); 5473 break; /* XXX: rollback? */ 5474 } 5475 if ((error = pf_begin_rules(&t[2], PF_RULESET_NAT, &nn)) 5476 != 0) { 5477 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: NAT\n")); 5478 break; /* XXX: rollback? */ 5479 } 5480 if ((error = pf_begin_rules(&t[3], PF_RULESET_BINAT, &nn)) 5481 != 0) { 5482 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: BINAT\n")); 5483 break; /* XXX: rollback? */ 5484 } 5485 if ((error = pf_begin_rules(&t[4], PF_RULESET_RDR, &nn)) 5486 != 0) { 5487 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: RDR\n")); 5488 break; /* XXX: rollback? */ 5489 } 5490 5491 /* XXX: these should always succeed here */ 5492 pf_commit_rules(t[0], PF_RULESET_SCRUB, &nn); 5493 pf_commit_rules(t[1], PF_RULESET_FILTER, &nn); 5494 pf_commit_rules(t[2], PF_RULESET_NAT, &nn); 5495 pf_commit_rules(t[3], PF_RULESET_BINAT, &nn); 5496 pf_commit_rules(t[4], PF_RULESET_RDR, &nn); 5497 5498 if ((error = pf_clear_tables()) != 0) 5499 break; 5500 5501 #ifdef ALTQ 5502 if ((error = pf_begin_altq(&t[0])) != 0) { 5503 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: ALTQ\n")); 5504 break; 5505 } 5506 pf_commit_altq(t[0]); 5507 #endif 5508 5509 pf_clear_all_states(); 5510 5511 pf_clear_srcnodes(NULL); 5512 5513 /* status does not use malloced mem so no need to cleanup */ 5514 /* fingerprints and interfaces have their own cleanup code */ 5515 } while(0); 5516 5517 return (error); 5518 } 5519 5520 static pfil_return_t 5521 pf_check_return(int chk, struct mbuf **m) 5522 { 5523 5524 switch (chk) { 5525 case PF_PASS: 5526 if (*m == NULL) 5527 return (PFIL_CONSUMED); 5528 else 5529 return (PFIL_PASS); 5530 break; 5531 default: 5532 if (*m != NULL) { 5533 m_freem(*m); 5534 *m = NULL; 5535 } 5536 return (PFIL_DROPPED); 5537 } 5538 } 5539 5540 #ifdef INET 5541 static pfil_return_t 5542 pf_check_in(struct mbuf **m, struct ifnet *ifp, int flags, 5543 void *ruleset __unused, struct inpcb *inp) 5544 { 5545 int chk; 5546 5547 chk = pf_test(PF_IN, flags, ifp, m, inp); 5548 5549 return (pf_check_return(chk, m)); 5550 } 5551 5552 static pfil_return_t 5553 pf_check_out(struct mbuf **m, struct ifnet *ifp, int flags, 5554 void *ruleset __unused, struct inpcb *inp) 5555 { 5556 int chk; 5557 5558 chk = pf_test(PF_OUT, flags, ifp, m, inp); 5559 5560 return (pf_check_return(chk, m)); 5561 } 5562 #endif 5563 5564 #ifdef INET6 5565 static pfil_return_t 5566 pf_check6_in(struct mbuf **m, struct ifnet *ifp, int flags, 5567 void *ruleset __unused, struct inpcb *inp) 5568 { 5569 int chk; 5570 5571 /* 5572 * In case of loopback traffic IPv6 uses the real interface in 5573 * order to support scoped addresses. In order to support stateful 5574 * filtering we have change this to lo0 as it is the case in IPv4. 5575 */ 5576 CURVNET_SET(ifp->if_vnet); 5577 chk = pf_test6(PF_IN, flags, (*m)->m_flags & M_LOOP ? V_loif : ifp, m, inp); 5578 CURVNET_RESTORE(); 5579 5580 return (pf_check_return(chk, m)); 5581 } 5582 5583 static pfil_return_t 5584 pf_check6_out(struct mbuf **m, struct ifnet *ifp, int flags, 5585 void *ruleset __unused, struct inpcb *inp) 5586 { 5587 int chk; 5588 5589 CURVNET_SET(ifp->if_vnet); 5590 chk = pf_test6(PF_OUT, flags, ifp, m, inp); 5591 CURVNET_RESTORE(); 5592 5593 return (pf_check_return(chk, m)); 5594 } 5595 #endif /* INET6 */ 5596 5597 #ifdef INET 5598 VNET_DEFINE_STATIC(pfil_hook_t, pf_ip4_in_hook); 5599 VNET_DEFINE_STATIC(pfil_hook_t, pf_ip4_out_hook); 5600 #define V_pf_ip4_in_hook VNET(pf_ip4_in_hook) 5601 #define V_pf_ip4_out_hook VNET(pf_ip4_out_hook) 5602 #endif 5603 #ifdef INET6 5604 VNET_DEFINE_STATIC(pfil_hook_t, pf_ip6_in_hook); 5605 VNET_DEFINE_STATIC(pfil_hook_t, pf_ip6_out_hook); 5606 #define V_pf_ip6_in_hook VNET(pf_ip6_in_hook) 5607 #define V_pf_ip6_out_hook VNET(pf_ip6_out_hook) 5608 #endif 5609 5610 static void 5611 hook_pf(void) 5612 { 5613 struct pfil_hook_args pha; 5614 struct pfil_link_args pla; 5615 int ret; 5616 5617 if (V_pf_pfil_hooked) 5618 return; 5619 5620 pha.pa_version = PFIL_VERSION; 5621 pha.pa_modname = "pf"; 5622 pha.pa_ruleset = NULL; 5623 5624 pla.pa_version = PFIL_VERSION; 5625 5626 #ifdef INET 5627 pha.pa_type = PFIL_TYPE_IP4; 5628 pha.pa_func = pf_check_in; 5629 pha.pa_flags = PFIL_IN; 5630 pha.pa_rulname = "default-in"; 5631 V_pf_ip4_in_hook = pfil_add_hook(&pha); 5632 pla.pa_flags = PFIL_IN | PFIL_HEADPTR | PFIL_HOOKPTR; 5633 pla.pa_head = V_inet_pfil_head; 5634 pla.pa_hook = V_pf_ip4_in_hook; 5635 ret = pfil_link(&pla); 5636 MPASS(ret == 0); 5637 pha.pa_func = pf_check_out; 5638 pha.pa_flags = PFIL_OUT; 5639 pha.pa_rulname = "default-out"; 5640 V_pf_ip4_out_hook = pfil_add_hook(&pha); 5641 pla.pa_flags = PFIL_OUT | PFIL_HEADPTR | PFIL_HOOKPTR; 5642 pla.pa_head = V_inet_pfil_head; 5643 pla.pa_hook = V_pf_ip4_out_hook; 5644 ret = pfil_link(&pla); 5645 MPASS(ret == 0); 5646 #endif 5647 #ifdef INET6 5648 pha.pa_type = PFIL_TYPE_IP6; 5649 pha.pa_func = pf_check6_in; 5650 pha.pa_flags = PFIL_IN; 5651 pha.pa_rulname = "default-in6"; 5652 V_pf_ip6_in_hook = pfil_add_hook(&pha); 5653 pla.pa_flags = PFIL_IN | PFIL_HEADPTR | PFIL_HOOKPTR; 5654 pla.pa_head = V_inet6_pfil_head; 5655 pla.pa_hook = V_pf_ip6_in_hook; 5656 ret = pfil_link(&pla); 5657 MPASS(ret == 0); 5658 pha.pa_func = pf_check6_out; 5659 pha.pa_rulname = "default-out6"; 5660 pha.pa_flags = PFIL_OUT; 5661 V_pf_ip6_out_hook = pfil_add_hook(&pha); 5662 pla.pa_flags = PFIL_OUT | PFIL_HEADPTR | PFIL_HOOKPTR; 5663 pla.pa_head = V_inet6_pfil_head; 5664 pla.pa_hook = V_pf_ip6_out_hook; 5665 ret = pfil_link(&pla); 5666 MPASS(ret == 0); 5667 #endif 5668 5669 V_pf_pfil_hooked = 1; 5670 } 5671 5672 static void 5673 dehook_pf(void) 5674 { 5675 5676 if (V_pf_pfil_hooked == 0) 5677 return; 5678 5679 #ifdef INET 5680 pfil_remove_hook(V_pf_ip4_in_hook); 5681 pfil_remove_hook(V_pf_ip4_out_hook); 5682 #endif 5683 #ifdef INET6 5684 pfil_remove_hook(V_pf_ip6_in_hook); 5685 pfil_remove_hook(V_pf_ip6_out_hook); 5686 #endif 5687 5688 V_pf_pfil_hooked = 0; 5689 } 5690 5691 static void 5692 pf_load_vnet(void) 5693 { 5694 V_pf_tag_z = uma_zcreate("pf tags", sizeof(struct pf_tagname), 5695 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); 5696 5697 pf_init_tagset(&V_pf_tags, &pf_rule_tag_hashsize, 5698 PF_RULE_TAG_HASH_SIZE_DEFAULT); 5699 #ifdef ALTQ 5700 pf_init_tagset(&V_pf_qids, &pf_queue_tag_hashsize, 5701 PF_QUEUE_TAG_HASH_SIZE_DEFAULT); 5702 #endif 5703 5704 pfattach_vnet(); 5705 V_pf_vnet_active = 1; 5706 } 5707 5708 static int 5709 pf_load(void) 5710 { 5711 int error; 5712 5713 rm_init_flags(&pf_rules_lock, "pf rulesets", RM_RECURSE); 5714 sx_init(&pf_ioctl_lock, "pf ioctl"); 5715 sx_init(&pf_end_lock, "pf end thread"); 5716 5717 pf_mtag_initialize(); 5718 5719 pf_dev = make_dev(&pf_cdevsw, 0, UID_ROOT, GID_WHEEL, 0600, PF_NAME); 5720 if (pf_dev == NULL) 5721 return (ENOMEM); 5722 5723 pf_end_threads = 0; 5724 error = kproc_create(pf_purge_thread, NULL, &pf_purge_proc, 0, 0, "pf purge"); 5725 if (error != 0) 5726 return (error); 5727 5728 pfi_initialize(); 5729 5730 return (0); 5731 } 5732 5733 static void 5734 pf_unload_vnet(void) 5735 { 5736 int ret; 5737 5738 V_pf_vnet_active = 0; 5739 V_pf_status.running = 0; 5740 dehook_pf(); 5741 5742 PF_RULES_WLOCK(); 5743 pf_syncookies_cleanup(); 5744 shutdown_pf(); 5745 PF_RULES_WUNLOCK(); 5746 5747 ret = swi_remove(V_pf_swi_cookie); 5748 MPASS(ret == 0); 5749 ret = intr_event_destroy(V_pf_swi_ie); 5750 MPASS(ret == 0); 5751 5752 pf_unload_vnet_purge(); 5753 5754 pf_normalize_cleanup(); 5755 PF_RULES_WLOCK(); 5756 pfi_cleanup_vnet(); 5757 PF_RULES_WUNLOCK(); 5758 pfr_cleanup(); 5759 pf_osfp_flush(); 5760 pf_cleanup(); 5761 if (IS_DEFAULT_VNET(curvnet)) 5762 pf_mtag_cleanup(); 5763 5764 pf_cleanup_tagset(&V_pf_tags); 5765 #ifdef ALTQ 5766 pf_cleanup_tagset(&V_pf_qids); 5767 #endif 5768 uma_zdestroy(V_pf_tag_z); 5769 5770 #ifdef PF_WANT_32_TO_64_COUNTER 5771 PF_RULES_WLOCK(); 5772 LIST_REMOVE(V_pf_kifmarker, pfik_allkiflist); 5773 5774 MPASS(LIST_EMPTY(&V_pf_allkiflist)); 5775 MPASS(V_pf_allkifcount == 0); 5776 5777 LIST_REMOVE(&V_pf_default_rule, allrulelist); 5778 V_pf_allrulecount--; 5779 LIST_REMOVE(V_pf_rulemarker, allrulelist); 5780 5781 /* 5782 * There are known pf rule leaks when running the test suite. 5783 */ 5784 #ifdef notyet 5785 MPASS(LIST_EMPTY(&V_pf_allrulelist)); 5786 MPASS(V_pf_allrulecount == 0); 5787 #endif 5788 5789 PF_RULES_WUNLOCK(); 5790 5791 free(V_pf_kifmarker, PFI_MTYPE); 5792 free(V_pf_rulemarker, M_PFRULE); 5793 #endif 5794 5795 /* Free counters last as we updated them during shutdown. */ 5796 pf_counter_u64_deinit(&V_pf_default_rule.evaluations); 5797 for (int i = 0; i < 2; i++) { 5798 pf_counter_u64_deinit(&V_pf_default_rule.packets[i]); 5799 pf_counter_u64_deinit(&V_pf_default_rule.bytes[i]); 5800 } 5801 counter_u64_free(V_pf_default_rule.states_cur); 5802 counter_u64_free(V_pf_default_rule.states_tot); 5803 counter_u64_free(V_pf_default_rule.src_nodes); 5804 5805 for (int i = 0; i < PFRES_MAX; i++) 5806 counter_u64_free(V_pf_status.counters[i]); 5807 for (int i = 0; i < KLCNT_MAX; i++) 5808 counter_u64_free(V_pf_status.lcounters[i]); 5809 for (int i = 0; i < FCNT_MAX; i++) 5810 pf_counter_u64_deinit(&V_pf_status.fcounters[i]); 5811 for (int i = 0; i < SCNT_MAX; i++) 5812 counter_u64_free(V_pf_status.scounters[i]); 5813 } 5814 5815 static void 5816 pf_unload(void) 5817 { 5818 5819 sx_xlock(&pf_end_lock); 5820 pf_end_threads = 1; 5821 while (pf_end_threads < 2) { 5822 wakeup_one(pf_purge_thread); 5823 sx_sleep(pf_purge_proc, &pf_end_lock, 0, "pftmo", 0); 5824 } 5825 sx_xunlock(&pf_end_lock); 5826 5827 if (pf_dev != NULL) 5828 destroy_dev(pf_dev); 5829 5830 pfi_cleanup(); 5831 5832 rm_destroy(&pf_rules_lock); 5833 sx_destroy(&pf_ioctl_lock); 5834 sx_destroy(&pf_end_lock); 5835 } 5836 5837 static void 5838 vnet_pf_init(void *unused __unused) 5839 { 5840 5841 pf_load_vnet(); 5842 } 5843 VNET_SYSINIT(vnet_pf_init, SI_SUB_PROTO_FIREWALL, SI_ORDER_THIRD, 5844 vnet_pf_init, NULL); 5845 5846 static void 5847 vnet_pf_uninit(const void *unused __unused) 5848 { 5849 5850 pf_unload_vnet(); 5851 } 5852 SYSUNINIT(pf_unload, SI_SUB_PROTO_FIREWALL, SI_ORDER_SECOND, pf_unload, NULL); 5853 VNET_SYSUNINIT(vnet_pf_uninit, SI_SUB_PROTO_FIREWALL, SI_ORDER_THIRD, 5854 vnet_pf_uninit, NULL); 5855 5856 static int 5857 pf_modevent(module_t mod, int type, void *data) 5858 { 5859 int error = 0; 5860 5861 switch(type) { 5862 case MOD_LOAD: 5863 error = pf_load(); 5864 break; 5865 case MOD_UNLOAD: 5866 /* Handled in SYSUNINIT(pf_unload) to ensure it's done after 5867 * the vnet_pf_uninit()s */ 5868 break; 5869 default: 5870 error = EINVAL; 5871 break; 5872 } 5873 5874 return (error); 5875 } 5876 5877 static moduledata_t pf_mod = { 5878 "pf", 5879 pf_modevent, 5880 0 5881 }; 5882 5883 DECLARE_MODULE(pf, pf_mod, SI_SUB_PROTO_FIREWALL, SI_ORDER_SECOND); 5884 MODULE_VERSION(pf, PF_MODVER); 5885