1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2001 Daniel Hartmeier 5 * Copyright (c) 2002,2003 Henning Brauer 6 * Copyright (c) 2012 Gleb Smirnoff <glebius@FreeBSD.org> 7 * All rights reserved. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 13 * - Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * - Redistributions in binary form must reproduce the above 16 * copyright notice, this list of conditions and the following 17 * disclaimer in the documentation and/or other materials provided 18 * with the distribution. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 23 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 24 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 27 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 28 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN 30 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 31 * POSSIBILITY OF SUCH DAMAGE. 32 * 33 * Effort sponsored in part by the Defense Advanced Research Projects 34 * Agency (DARPA) and Air Force Research Laboratory, Air Force 35 * Materiel Command, USAF, under agreement number F30602-01-2-0537. 36 * 37 * $OpenBSD: pf_ioctl.c,v 1.213 2009/02/15 21:46:12 mbalmer Exp $ 38 */ 39 40 #include <sys/cdefs.h> 41 __FBSDID("$FreeBSD$"); 42 43 #include "opt_inet.h" 44 #include "opt_inet6.h" 45 #include "opt_bpf.h" 46 #include "opt_pf.h" 47 48 #include <sys/param.h> 49 #include <sys/_bitset.h> 50 #include <sys/bitset.h> 51 #include <sys/bus.h> 52 #include <sys/conf.h> 53 #include <sys/endian.h> 54 #include <sys/fcntl.h> 55 #include <sys/filio.h> 56 #include <sys/hash.h> 57 #include <sys/interrupt.h> 58 #include <sys/jail.h> 59 #include <sys/kernel.h> 60 #include <sys/kthread.h> 61 #include <sys/lock.h> 62 #include <sys/mbuf.h> 63 #include <sys/module.h> 64 #include <sys/nv.h> 65 #include <sys/proc.h> 66 #include <sys/sdt.h> 67 #include <sys/smp.h> 68 #include <sys/socket.h> 69 #include <sys/sysctl.h> 70 #include <sys/md5.h> 71 #include <sys/ucred.h> 72 73 #include <net/if.h> 74 #include <net/if_var.h> 75 #include <net/if_private.h> 76 #include <net/vnet.h> 77 #include <net/route.h> 78 #include <net/pfil.h> 79 #include <net/pfvar.h> 80 #include <net/if_pfsync.h> 81 #include <net/if_pflog.h> 82 83 #include <netinet/in.h> 84 #include <netinet/ip.h> 85 #include <netinet/ip_var.h> 86 #include <netinet6/ip6_var.h> 87 #include <netinet/ip_icmp.h> 88 #include <netpfil/pf/pf_nv.h> 89 90 #ifdef INET6 91 #include <netinet/ip6.h> 92 #endif /* INET6 */ 93 94 #ifdef ALTQ 95 #include <net/altq/altq.h> 96 #endif 97 98 SDT_PROBE_DEFINE3(pf, ioctl, ioctl, error, "int", "int", "int"); 99 SDT_PROBE_DEFINE3(pf, ioctl, function, error, "char *", "int", "int"); 100 SDT_PROBE_DEFINE2(pf, ioctl, addrule, error, "int", "int"); 101 SDT_PROBE_DEFINE2(pf, ioctl, nvchk, error, "int", "int"); 102 103 static struct pf_kpool *pf_get_kpool(const char *, u_int32_t, u_int8_t, 104 u_int32_t, u_int8_t, u_int8_t, u_int8_t); 105 106 static void pf_mv_kpool(struct pf_kpalist *, struct pf_kpalist *); 107 static void pf_empty_kpool(struct pf_kpalist *); 108 static int pfioctl(struct cdev *, u_long, caddr_t, int, 109 struct thread *); 110 static int pf_begin_eth(uint32_t *, const char *); 111 static void pf_rollback_eth_cb(struct epoch_context *); 112 static int pf_rollback_eth(uint32_t, const char *); 113 static int pf_commit_eth(uint32_t, const char *); 114 static void pf_free_eth_rule(struct pf_keth_rule *); 115 #ifdef ALTQ 116 static int pf_begin_altq(u_int32_t *); 117 static int pf_rollback_altq(u_int32_t); 118 static int pf_commit_altq(u_int32_t); 119 static int pf_enable_altq(struct pf_altq *); 120 static int pf_disable_altq(struct pf_altq *); 121 static uint16_t pf_qname2qid(const char *); 122 static void pf_qid_unref(uint16_t); 123 #endif /* ALTQ */ 124 static int pf_begin_rules(u_int32_t *, int, const char *); 125 static int pf_rollback_rules(u_int32_t, int, char *); 126 static int pf_setup_pfsync_matching(struct pf_kruleset *); 127 static void pf_hash_rule_rolling(MD5_CTX *, struct pf_krule *); 128 static void pf_hash_rule(struct pf_krule *); 129 static void pf_hash_rule_addr(MD5_CTX *, struct pf_rule_addr *); 130 static int pf_commit_rules(u_int32_t, int, char *); 131 static int pf_addr_setup(struct pf_kruleset *, 132 struct pf_addr_wrap *, sa_family_t); 133 static void pf_addr_copyout(struct pf_addr_wrap *); 134 static void pf_src_node_copy(const struct pf_ksrc_node *, 135 struct pf_src_node *); 136 #ifdef ALTQ 137 static int pf_export_kaltq(struct pf_altq *, 138 struct pfioc_altq_v1 *, size_t); 139 static int pf_import_kaltq(struct pfioc_altq_v1 *, 140 struct pf_altq *, size_t); 141 #endif /* ALTQ */ 142 143 VNET_DEFINE(struct pf_krule, pf_default_rule); 144 145 static __inline int pf_krule_compare(struct pf_krule *, 146 struct pf_krule *); 147 148 RB_GENERATE(pf_krule_global, pf_krule, entry_global, pf_krule_compare); 149 150 #ifdef ALTQ 151 VNET_DEFINE_STATIC(int, pf_altq_running); 152 #define V_pf_altq_running VNET(pf_altq_running) 153 #endif 154 155 #define TAGID_MAX 50000 156 struct pf_tagname { 157 TAILQ_ENTRY(pf_tagname) namehash_entries; 158 TAILQ_ENTRY(pf_tagname) taghash_entries; 159 char name[PF_TAG_NAME_SIZE]; 160 uint16_t tag; 161 int ref; 162 }; 163 164 struct pf_tagset { 165 TAILQ_HEAD(, pf_tagname) *namehash; 166 TAILQ_HEAD(, pf_tagname) *taghash; 167 unsigned int mask; 168 uint32_t seed; 169 BITSET_DEFINE(, TAGID_MAX) avail; 170 }; 171 172 VNET_DEFINE(struct pf_tagset, pf_tags); 173 #define V_pf_tags VNET(pf_tags) 174 static unsigned int pf_rule_tag_hashsize; 175 #define PF_RULE_TAG_HASH_SIZE_DEFAULT 128 176 SYSCTL_UINT(_net_pf, OID_AUTO, rule_tag_hashsize, CTLFLAG_RDTUN, 177 &pf_rule_tag_hashsize, PF_RULE_TAG_HASH_SIZE_DEFAULT, 178 "Size of pf(4) rule tag hashtable"); 179 180 #ifdef ALTQ 181 VNET_DEFINE(struct pf_tagset, pf_qids); 182 #define V_pf_qids VNET(pf_qids) 183 static unsigned int pf_queue_tag_hashsize; 184 #define PF_QUEUE_TAG_HASH_SIZE_DEFAULT 128 185 SYSCTL_UINT(_net_pf, OID_AUTO, queue_tag_hashsize, CTLFLAG_RDTUN, 186 &pf_queue_tag_hashsize, PF_QUEUE_TAG_HASH_SIZE_DEFAULT, 187 "Size of pf(4) queue tag hashtable"); 188 #endif 189 VNET_DEFINE(uma_zone_t, pf_tag_z); 190 #define V_pf_tag_z VNET(pf_tag_z) 191 static MALLOC_DEFINE(M_PFALTQ, "pf_altq", "pf(4) altq configuration db"); 192 static MALLOC_DEFINE(M_PFRULE, "pf_rule", "pf(4) rules"); 193 194 #if (PF_QNAME_SIZE != PF_TAG_NAME_SIZE) 195 #error PF_QNAME_SIZE must be equal to PF_TAG_NAME_SIZE 196 #endif 197 198 static void pf_init_tagset(struct pf_tagset *, unsigned int *, 199 unsigned int); 200 static void pf_cleanup_tagset(struct pf_tagset *); 201 static uint16_t tagname2hashindex(const struct pf_tagset *, const char *); 202 static uint16_t tag2hashindex(const struct pf_tagset *, uint16_t); 203 static u_int16_t tagname2tag(struct pf_tagset *, const char *); 204 static u_int16_t pf_tagname2tag(const char *); 205 static void tag_unref(struct pf_tagset *, u_int16_t); 206 207 #define DPFPRINTF(n, x) if (V_pf_status.debug >= (n)) printf x 208 209 struct cdev *pf_dev; 210 211 /* 212 * XXX - These are new and need to be checked when moveing to a new version 213 */ 214 static void pf_clear_all_states(void); 215 static unsigned int pf_clear_states(const struct pf_kstate_kill *); 216 static void pf_killstates(struct pf_kstate_kill *, 217 unsigned int *); 218 static int pf_killstates_row(struct pf_kstate_kill *, 219 struct pf_idhash *); 220 static int pf_killstates_nv(struct pfioc_nv *); 221 static int pf_clearstates_nv(struct pfioc_nv *); 222 static int pf_getstate(struct pfioc_nv *); 223 static int pf_getstatus(struct pfioc_nv *); 224 static int pf_clear_tables(void); 225 static void pf_clear_srcnodes(struct pf_ksrc_node *); 226 static void pf_kill_srcnodes(struct pfioc_src_node_kill *); 227 static int pf_keepcounters(struct pfioc_nv *); 228 static void pf_tbladdr_copyout(struct pf_addr_wrap *); 229 230 /* 231 * Wrapper functions for pfil(9) hooks 232 */ 233 static pfil_return_t pf_eth_check_in(struct mbuf **m, struct ifnet *ifp, 234 int flags, void *ruleset __unused, struct inpcb *inp); 235 static pfil_return_t pf_eth_check_out(struct mbuf **m, struct ifnet *ifp, 236 int flags, void *ruleset __unused, struct inpcb *inp); 237 #ifdef INET 238 static pfil_return_t pf_check_in(struct mbuf **m, struct ifnet *ifp, 239 int flags, void *ruleset __unused, struct inpcb *inp); 240 static pfil_return_t pf_check_out(struct mbuf **m, struct ifnet *ifp, 241 int flags, void *ruleset __unused, struct inpcb *inp); 242 #endif 243 #ifdef INET6 244 static pfil_return_t pf_check6_in(struct mbuf **m, struct ifnet *ifp, 245 int flags, void *ruleset __unused, struct inpcb *inp); 246 static pfil_return_t pf_check6_out(struct mbuf **m, struct ifnet *ifp, 247 int flags, void *ruleset __unused, struct inpcb *inp); 248 #endif 249 250 static void hook_pf_eth(void); 251 static void hook_pf(void); 252 static void dehook_pf_eth(void); 253 static void dehook_pf(void); 254 static int shutdown_pf(void); 255 static int pf_load(void); 256 static void pf_unload(void); 257 258 static struct cdevsw pf_cdevsw = { 259 .d_ioctl = pfioctl, 260 .d_name = PF_NAME, 261 .d_version = D_VERSION, 262 }; 263 264 VNET_DEFINE_STATIC(bool, pf_pfil_hooked); 265 #define V_pf_pfil_hooked VNET(pf_pfil_hooked) 266 VNET_DEFINE_STATIC(bool, pf_pfil_eth_hooked); 267 #define V_pf_pfil_eth_hooked VNET(pf_pfil_eth_hooked) 268 269 /* 270 * We need a flag that is neither hooked nor running to know when 271 * the VNET is "valid". We primarily need this to control (global) 272 * external event, e.g., eventhandlers. 273 */ 274 VNET_DEFINE(int, pf_vnet_active); 275 #define V_pf_vnet_active VNET(pf_vnet_active) 276 277 int pf_end_threads; 278 struct proc *pf_purge_proc; 279 280 struct rmlock pf_rules_lock; 281 struct sx pf_ioctl_lock; 282 struct sx pf_end_lock; 283 284 /* pfsync */ 285 VNET_DEFINE(pfsync_state_import_t *, pfsync_state_import_ptr); 286 VNET_DEFINE(pfsync_insert_state_t *, pfsync_insert_state_ptr); 287 VNET_DEFINE(pfsync_update_state_t *, pfsync_update_state_ptr); 288 VNET_DEFINE(pfsync_delete_state_t *, pfsync_delete_state_ptr); 289 VNET_DEFINE(pfsync_clear_states_t *, pfsync_clear_states_ptr); 290 VNET_DEFINE(pfsync_defer_t *, pfsync_defer_ptr); 291 pfsync_detach_ifnet_t *pfsync_detach_ifnet_ptr; 292 293 /* pflog */ 294 pflog_packet_t *pflog_packet_ptr = NULL; 295 296 /* 297 * Copy a user-provided string, returning an error if truncation would occur. 298 * Avoid scanning past "sz" bytes in the source string since there's no 299 * guarantee that it's nul-terminated. 300 */ 301 static int 302 pf_user_strcpy(char *dst, const char *src, size_t sz) 303 { 304 if (strnlen(src, sz) == sz) 305 return (EINVAL); 306 (void)strlcpy(dst, src, sz); 307 return (0); 308 } 309 310 static void 311 pfattach_vnet(void) 312 { 313 u_int32_t *my_timeout = V_pf_default_rule.timeout; 314 315 bzero(&V_pf_status, sizeof(V_pf_status)); 316 317 pf_initialize(); 318 pfr_initialize(); 319 pfi_initialize_vnet(); 320 pf_normalize_init(); 321 pf_syncookies_init(); 322 323 V_pf_limits[PF_LIMIT_STATES].limit = PFSTATE_HIWAT; 324 V_pf_limits[PF_LIMIT_SRC_NODES].limit = PFSNODE_HIWAT; 325 326 RB_INIT(&V_pf_anchors); 327 pf_init_kruleset(&pf_main_ruleset); 328 329 pf_init_keth(V_pf_keth); 330 331 /* default rule should never be garbage collected */ 332 V_pf_default_rule.entries.tqe_prev = &V_pf_default_rule.entries.tqe_next; 333 #ifdef PF_DEFAULT_TO_DROP 334 V_pf_default_rule.action = PF_DROP; 335 #else 336 V_pf_default_rule.action = PF_PASS; 337 #endif 338 V_pf_default_rule.nr = -1; 339 V_pf_default_rule.rtableid = -1; 340 341 pf_counter_u64_init(&V_pf_default_rule.evaluations, M_WAITOK); 342 for (int i = 0; i < 2; i++) { 343 pf_counter_u64_init(&V_pf_default_rule.packets[i], M_WAITOK); 344 pf_counter_u64_init(&V_pf_default_rule.bytes[i], M_WAITOK); 345 } 346 V_pf_default_rule.states_cur = counter_u64_alloc(M_WAITOK); 347 V_pf_default_rule.states_tot = counter_u64_alloc(M_WAITOK); 348 V_pf_default_rule.src_nodes = counter_u64_alloc(M_WAITOK); 349 350 V_pf_default_rule.timestamp = uma_zalloc_pcpu(pf_timestamp_pcpu_zone, 351 M_WAITOK | M_ZERO); 352 353 #ifdef PF_WANT_32_TO_64_COUNTER 354 V_pf_kifmarker = malloc(sizeof(*V_pf_kifmarker), PFI_MTYPE, M_WAITOK | M_ZERO); 355 V_pf_rulemarker = malloc(sizeof(*V_pf_rulemarker), M_PFRULE, M_WAITOK | M_ZERO); 356 PF_RULES_WLOCK(); 357 LIST_INSERT_HEAD(&V_pf_allkiflist, V_pf_kifmarker, pfik_allkiflist); 358 LIST_INSERT_HEAD(&V_pf_allrulelist, &V_pf_default_rule, allrulelist); 359 V_pf_allrulecount++; 360 LIST_INSERT_HEAD(&V_pf_allrulelist, V_pf_rulemarker, allrulelist); 361 PF_RULES_WUNLOCK(); 362 #endif 363 364 /* initialize default timeouts */ 365 my_timeout[PFTM_TCP_FIRST_PACKET] = PFTM_TCP_FIRST_PACKET_VAL; 366 my_timeout[PFTM_TCP_OPENING] = PFTM_TCP_OPENING_VAL; 367 my_timeout[PFTM_TCP_ESTABLISHED] = PFTM_TCP_ESTABLISHED_VAL; 368 my_timeout[PFTM_TCP_CLOSING] = PFTM_TCP_CLOSING_VAL; 369 my_timeout[PFTM_TCP_FIN_WAIT] = PFTM_TCP_FIN_WAIT_VAL; 370 my_timeout[PFTM_TCP_CLOSED] = PFTM_TCP_CLOSED_VAL; 371 my_timeout[PFTM_UDP_FIRST_PACKET] = PFTM_UDP_FIRST_PACKET_VAL; 372 my_timeout[PFTM_UDP_SINGLE] = PFTM_UDP_SINGLE_VAL; 373 my_timeout[PFTM_UDP_MULTIPLE] = PFTM_UDP_MULTIPLE_VAL; 374 my_timeout[PFTM_ICMP_FIRST_PACKET] = PFTM_ICMP_FIRST_PACKET_VAL; 375 my_timeout[PFTM_ICMP_ERROR_REPLY] = PFTM_ICMP_ERROR_REPLY_VAL; 376 my_timeout[PFTM_OTHER_FIRST_PACKET] = PFTM_OTHER_FIRST_PACKET_VAL; 377 my_timeout[PFTM_OTHER_SINGLE] = PFTM_OTHER_SINGLE_VAL; 378 my_timeout[PFTM_OTHER_MULTIPLE] = PFTM_OTHER_MULTIPLE_VAL; 379 my_timeout[PFTM_FRAG] = PFTM_FRAG_VAL; 380 my_timeout[PFTM_INTERVAL] = PFTM_INTERVAL_VAL; 381 my_timeout[PFTM_SRC_NODE] = PFTM_SRC_NODE_VAL; 382 my_timeout[PFTM_TS_DIFF] = PFTM_TS_DIFF_VAL; 383 my_timeout[PFTM_ADAPTIVE_START] = PFSTATE_ADAPT_START; 384 my_timeout[PFTM_ADAPTIVE_END] = PFSTATE_ADAPT_END; 385 386 V_pf_status.debug = PF_DEBUG_URGENT; 387 388 V_pf_pfil_hooked = false; 389 V_pf_pfil_eth_hooked = false; 390 391 /* XXX do our best to avoid a conflict */ 392 V_pf_status.hostid = arc4random(); 393 394 for (int i = 0; i < PFRES_MAX; i++) 395 V_pf_status.counters[i] = counter_u64_alloc(M_WAITOK); 396 for (int i = 0; i < KLCNT_MAX; i++) 397 V_pf_status.lcounters[i] = counter_u64_alloc(M_WAITOK); 398 for (int i = 0; i < FCNT_MAX; i++) 399 pf_counter_u64_init(&V_pf_status.fcounters[i], M_WAITOK); 400 for (int i = 0; i < SCNT_MAX; i++) 401 V_pf_status.scounters[i] = counter_u64_alloc(M_WAITOK); 402 403 if (swi_add(&V_pf_swi_ie, "pf send", pf_intr, curvnet, SWI_NET, 404 INTR_MPSAFE, &V_pf_swi_cookie) != 0) 405 /* XXXGL: leaked all above. */ 406 return; 407 } 408 409 static struct pf_kpool * 410 pf_get_kpool(const char *anchor, u_int32_t ticket, u_int8_t rule_action, 411 u_int32_t rule_number, u_int8_t r_last, u_int8_t active, 412 u_int8_t check_ticket) 413 { 414 struct pf_kruleset *ruleset; 415 struct pf_krule *rule; 416 int rs_num; 417 418 ruleset = pf_find_kruleset(anchor); 419 if (ruleset == NULL) 420 return (NULL); 421 rs_num = pf_get_ruleset_number(rule_action); 422 if (rs_num >= PF_RULESET_MAX) 423 return (NULL); 424 if (active) { 425 if (check_ticket && ticket != 426 ruleset->rules[rs_num].active.ticket) 427 return (NULL); 428 if (r_last) 429 rule = TAILQ_LAST(ruleset->rules[rs_num].active.ptr, 430 pf_krulequeue); 431 else 432 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr); 433 } else { 434 if (check_ticket && ticket != 435 ruleset->rules[rs_num].inactive.ticket) 436 return (NULL); 437 if (r_last) 438 rule = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr, 439 pf_krulequeue); 440 else 441 rule = TAILQ_FIRST(ruleset->rules[rs_num].inactive.ptr); 442 } 443 if (!r_last) { 444 while ((rule != NULL) && (rule->nr != rule_number)) 445 rule = TAILQ_NEXT(rule, entries); 446 } 447 if (rule == NULL) 448 return (NULL); 449 450 return (&rule->rpool); 451 } 452 453 static void 454 pf_mv_kpool(struct pf_kpalist *poola, struct pf_kpalist *poolb) 455 { 456 struct pf_kpooladdr *mv_pool_pa; 457 458 while ((mv_pool_pa = TAILQ_FIRST(poola)) != NULL) { 459 TAILQ_REMOVE(poola, mv_pool_pa, entries); 460 TAILQ_INSERT_TAIL(poolb, mv_pool_pa, entries); 461 } 462 } 463 464 static void 465 pf_empty_kpool(struct pf_kpalist *poola) 466 { 467 struct pf_kpooladdr *pa; 468 469 while ((pa = TAILQ_FIRST(poola)) != NULL) { 470 switch (pa->addr.type) { 471 case PF_ADDR_DYNIFTL: 472 pfi_dynaddr_remove(pa->addr.p.dyn); 473 break; 474 case PF_ADDR_TABLE: 475 /* XXX: this could be unfinished pooladdr on pabuf */ 476 if (pa->addr.p.tbl != NULL) 477 pfr_detach_table(pa->addr.p.tbl); 478 break; 479 } 480 if (pa->kif) 481 pfi_kkif_unref(pa->kif); 482 TAILQ_REMOVE(poola, pa, entries); 483 free(pa, M_PFRULE); 484 } 485 } 486 487 static void 488 pf_unlink_rule_locked(struct pf_krulequeue *rulequeue, struct pf_krule *rule) 489 { 490 491 PF_RULES_WASSERT(); 492 PF_UNLNKDRULES_ASSERT(); 493 494 TAILQ_REMOVE(rulequeue, rule, entries); 495 496 rule->rule_ref |= PFRULE_REFS; 497 TAILQ_INSERT_TAIL(&V_pf_unlinked_rules, rule, entries); 498 } 499 500 static void 501 pf_unlink_rule(struct pf_krulequeue *rulequeue, struct pf_krule *rule) 502 { 503 504 PF_RULES_WASSERT(); 505 506 PF_UNLNKDRULES_LOCK(); 507 pf_unlink_rule_locked(rulequeue, rule); 508 PF_UNLNKDRULES_UNLOCK(); 509 } 510 511 static void 512 pf_free_eth_rule(struct pf_keth_rule *rule) 513 { 514 PF_RULES_WASSERT(); 515 516 if (rule == NULL) 517 return; 518 519 if (rule->tag) 520 tag_unref(&V_pf_tags, rule->tag); 521 if (rule->match_tag) 522 tag_unref(&V_pf_tags, rule->match_tag); 523 #ifdef ALTQ 524 pf_qid_unref(rule->qid); 525 #endif 526 527 if (rule->bridge_to) 528 pfi_kkif_unref(rule->bridge_to); 529 if (rule->kif) 530 pfi_kkif_unref(rule->kif); 531 532 if (rule->ipsrc.addr.type == PF_ADDR_TABLE) 533 pfr_detach_table(rule->ipsrc.addr.p.tbl); 534 if (rule->ipdst.addr.type == PF_ADDR_TABLE) 535 pfr_detach_table(rule->ipdst.addr.p.tbl); 536 537 counter_u64_free(rule->evaluations); 538 for (int i = 0; i < 2; i++) { 539 counter_u64_free(rule->packets[i]); 540 counter_u64_free(rule->bytes[i]); 541 } 542 uma_zfree_pcpu(pf_timestamp_pcpu_zone, rule->timestamp); 543 pf_keth_anchor_remove(rule); 544 545 free(rule, M_PFRULE); 546 } 547 548 void 549 pf_free_rule(struct pf_krule *rule) 550 { 551 552 PF_RULES_WASSERT(); 553 PF_CONFIG_ASSERT(); 554 555 if (rule->tag) 556 tag_unref(&V_pf_tags, rule->tag); 557 if (rule->match_tag) 558 tag_unref(&V_pf_tags, rule->match_tag); 559 #ifdef ALTQ 560 if (rule->pqid != rule->qid) 561 pf_qid_unref(rule->pqid); 562 pf_qid_unref(rule->qid); 563 #endif 564 switch (rule->src.addr.type) { 565 case PF_ADDR_DYNIFTL: 566 pfi_dynaddr_remove(rule->src.addr.p.dyn); 567 break; 568 case PF_ADDR_TABLE: 569 pfr_detach_table(rule->src.addr.p.tbl); 570 break; 571 } 572 switch (rule->dst.addr.type) { 573 case PF_ADDR_DYNIFTL: 574 pfi_dynaddr_remove(rule->dst.addr.p.dyn); 575 break; 576 case PF_ADDR_TABLE: 577 pfr_detach_table(rule->dst.addr.p.tbl); 578 break; 579 } 580 if (rule->overload_tbl) 581 pfr_detach_table(rule->overload_tbl); 582 if (rule->kif) 583 pfi_kkif_unref(rule->kif); 584 pf_kanchor_remove(rule); 585 pf_empty_kpool(&rule->rpool.list); 586 587 pf_krule_free(rule); 588 } 589 590 static void 591 pf_init_tagset(struct pf_tagset *ts, unsigned int *tunable_size, 592 unsigned int default_size) 593 { 594 unsigned int i; 595 unsigned int hashsize; 596 597 if (*tunable_size == 0 || !powerof2(*tunable_size)) 598 *tunable_size = default_size; 599 600 hashsize = *tunable_size; 601 ts->namehash = mallocarray(hashsize, sizeof(*ts->namehash), M_PFHASH, 602 M_WAITOK); 603 ts->taghash = mallocarray(hashsize, sizeof(*ts->taghash), M_PFHASH, 604 M_WAITOK); 605 ts->mask = hashsize - 1; 606 ts->seed = arc4random(); 607 for (i = 0; i < hashsize; i++) { 608 TAILQ_INIT(&ts->namehash[i]); 609 TAILQ_INIT(&ts->taghash[i]); 610 } 611 BIT_FILL(TAGID_MAX, &ts->avail); 612 } 613 614 static void 615 pf_cleanup_tagset(struct pf_tagset *ts) 616 { 617 unsigned int i; 618 unsigned int hashsize; 619 struct pf_tagname *t, *tmp; 620 621 /* 622 * Only need to clean up one of the hashes as each tag is hashed 623 * into each table. 624 */ 625 hashsize = ts->mask + 1; 626 for (i = 0; i < hashsize; i++) 627 TAILQ_FOREACH_SAFE(t, &ts->namehash[i], namehash_entries, tmp) 628 uma_zfree(V_pf_tag_z, t); 629 630 free(ts->namehash, M_PFHASH); 631 free(ts->taghash, M_PFHASH); 632 } 633 634 static uint16_t 635 tagname2hashindex(const struct pf_tagset *ts, const char *tagname) 636 { 637 size_t len; 638 639 len = strnlen(tagname, PF_TAG_NAME_SIZE - 1); 640 return (murmur3_32_hash(tagname, len, ts->seed) & ts->mask); 641 } 642 643 static uint16_t 644 tag2hashindex(const struct pf_tagset *ts, uint16_t tag) 645 { 646 647 return (tag & ts->mask); 648 } 649 650 static u_int16_t 651 tagname2tag(struct pf_tagset *ts, const char *tagname) 652 { 653 struct pf_tagname *tag; 654 u_int32_t index; 655 u_int16_t new_tagid; 656 657 PF_RULES_WASSERT(); 658 659 index = tagname2hashindex(ts, tagname); 660 TAILQ_FOREACH(tag, &ts->namehash[index], namehash_entries) 661 if (strcmp(tagname, tag->name) == 0) { 662 tag->ref++; 663 return (tag->tag); 664 } 665 666 /* 667 * new entry 668 * 669 * to avoid fragmentation, we do a linear search from the beginning 670 * and take the first free slot we find. 671 */ 672 new_tagid = BIT_FFS(TAGID_MAX, &ts->avail); 673 /* 674 * Tags are 1-based, with valid tags in the range [1..TAGID_MAX]. 675 * BIT_FFS() returns a 1-based bit number, with 0 indicating no bits 676 * set. It may also return a bit number greater than TAGID_MAX due 677 * to rounding of the number of bits in the vector up to a multiple 678 * of the vector word size at declaration/allocation time. 679 */ 680 if ((new_tagid == 0) || (new_tagid > TAGID_MAX)) 681 return (0); 682 683 /* Mark the tag as in use. Bits are 0-based for BIT_CLR() */ 684 BIT_CLR(TAGID_MAX, new_tagid - 1, &ts->avail); 685 686 /* allocate and fill new struct pf_tagname */ 687 tag = uma_zalloc(V_pf_tag_z, M_NOWAIT); 688 if (tag == NULL) 689 return (0); 690 strlcpy(tag->name, tagname, sizeof(tag->name)); 691 tag->tag = new_tagid; 692 tag->ref = 1; 693 694 /* Insert into namehash */ 695 TAILQ_INSERT_TAIL(&ts->namehash[index], tag, namehash_entries); 696 697 /* Insert into taghash */ 698 index = tag2hashindex(ts, new_tagid); 699 TAILQ_INSERT_TAIL(&ts->taghash[index], tag, taghash_entries); 700 701 return (tag->tag); 702 } 703 704 static void 705 tag_unref(struct pf_tagset *ts, u_int16_t tag) 706 { 707 struct pf_tagname *t; 708 uint16_t index; 709 710 PF_RULES_WASSERT(); 711 712 index = tag2hashindex(ts, tag); 713 TAILQ_FOREACH(t, &ts->taghash[index], taghash_entries) 714 if (tag == t->tag) { 715 if (--t->ref == 0) { 716 TAILQ_REMOVE(&ts->taghash[index], t, 717 taghash_entries); 718 index = tagname2hashindex(ts, t->name); 719 TAILQ_REMOVE(&ts->namehash[index], t, 720 namehash_entries); 721 /* Bits are 0-based for BIT_SET() */ 722 BIT_SET(TAGID_MAX, tag - 1, &ts->avail); 723 uma_zfree(V_pf_tag_z, t); 724 } 725 break; 726 } 727 } 728 729 static uint16_t 730 pf_tagname2tag(const char *tagname) 731 { 732 return (tagname2tag(&V_pf_tags, tagname)); 733 } 734 735 static int 736 pf_begin_eth(uint32_t *ticket, const char *anchor) 737 { 738 struct pf_keth_rule *rule, *tmp; 739 struct pf_keth_ruleset *rs; 740 741 PF_RULES_WASSERT(); 742 743 rs = pf_find_or_create_keth_ruleset(anchor); 744 if (rs == NULL) 745 return (EINVAL); 746 747 /* Purge old inactive rules. */ 748 TAILQ_FOREACH_SAFE(rule, rs->inactive.rules, entries, 749 tmp) { 750 TAILQ_REMOVE(rs->inactive.rules, rule, 751 entries); 752 pf_free_eth_rule(rule); 753 } 754 755 *ticket = ++rs->inactive.ticket; 756 rs->inactive.open = 1; 757 758 return (0); 759 } 760 761 static void 762 pf_rollback_eth_cb(struct epoch_context *ctx) 763 { 764 struct pf_keth_ruleset *rs; 765 766 rs = __containerof(ctx, struct pf_keth_ruleset, epoch_ctx); 767 768 CURVNET_SET(rs->vnet); 769 770 PF_RULES_WLOCK(); 771 pf_rollback_eth(rs->inactive.ticket, 772 rs->anchor ? rs->anchor->path : ""); 773 PF_RULES_WUNLOCK(); 774 775 CURVNET_RESTORE(); 776 } 777 778 static int 779 pf_rollback_eth(uint32_t ticket, const char *anchor) 780 { 781 struct pf_keth_rule *rule, *tmp; 782 struct pf_keth_ruleset *rs; 783 784 PF_RULES_WASSERT(); 785 786 rs = pf_find_keth_ruleset(anchor); 787 if (rs == NULL) 788 return (EINVAL); 789 790 if (!rs->inactive.open || 791 ticket != rs->inactive.ticket) 792 return (0); 793 794 /* Purge old inactive rules. */ 795 TAILQ_FOREACH_SAFE(rule, rs->inactive.rules, entries, 796 tmp) { 797 TAILQ_REMOVE(rs->inactive.rules, rule, entries); 798 pf_free_eth_rule(rule); 799 } 800 801 rs->inactive.open = 0; 802 803 pf_remove_if_empty_keth_ruleset(rs); 804 805 return (0); 806 } 807 808 #define PF_SET_SKIP_STEPS(i) \ 809 do { \ 810 while (head[i] != cur) { \ 811 head[i]->skip[i].ptr = cur; \ 812 head[i] = TAILQ_NEXT(head[i], entries); \ 813 } \ 814 } while (0) 815 816 static void 817 pf_eth_calc_skip_steps(struct pf_keth_ruleq *rules) 818 { 819 struct pf_keth_rule *cur, *prev, *head[PFE_SKIP_COUNT]; 820 int i; 821 822 cur = TAILQ_FIRST(rules); 823 prev = cur; 824 for (i = 0; i < PFE_SKIP_COUNT; ++i) 825 head[i] = cur; 826 while (cur != NULL) { 827 if (cur->kif != prev->kif || cur->ifnot != prev->ifnot) 828 PF_SET_SKIP_STEPS(PFE_SKIP_IFP); 829 if (cur->direction != prev->direction) 830 PF_SET_SKIP_STEPS(PFE_SKIP_DIR); 831 if (cur->proto != prev->proto) 832 PF_SET_SKIP_STEPS(PFE_SKIP_PROTO); 833 if (memcmp(&cur->src, &prev->src, sizeof(cur->src)) != 0) 834 PF_SET_SKIP_STEPS(PFE_SKIP_SRC_ADDR); 835 if (memcmp(&cur->dst, &prev->dst, sizeof(cur->dst)) != 0) 836 PF_SET_SKIP_STEPS(PFE_SKIP_DST_ADDR); 837 838 prev = cur; 839 cur = TAILQ_NEXT(cur, entries); 840 } 841 for (i = 0; i < PFE_SKIP_COUNT; ++i) 842 PF_SET_SKIP_STEPS(i); 843 } 844 845 static int 846 pf_commit_eth(uint32_t ticket, const char *anchor) 847 { 848 struct pf_keth_ruleq *rules; 849 struct pf_keth_ruleset *rs; 850 851 rs = pf_find_keth_ruleset(anchor); 852 if (rs == NULL) { 853 return (EINVAL); 854 } 855 856 if (!rs->inactive.open || 857 ticket != rs->inactive.ticket) 858 return (EBUSY); 859 860 PF_RULES_WASSERT(); 861 862 pf_eth_calc_skip_steps(rs->inactive.rules); 863 864 rules = rs->active.rules; 865 ck_pr_store_ptr(&rs->active.rules, rs->inactive.rules); 866 rs->inactive.rules = rules; 867 rs->inactive.ticket = rs->active.ticket; 868 869 /* Clean up inactive rules (i.e. previously active rules), only when 870 * we're sure they're no longer used. */ 871 NET_EPOCH_CALL(pf_rollback_eth_cb, &rs->epoch_ctx); 872 873 return (0); 874 } 875 876 #ifdef ALTQ 877 static uint16_t 878 pf_qname2qid(const char *qname) 879 { 880 return (tagname2tag(&V_pf_qids, qname)); 881 } 882 883 static void 884 pf_qid_unref(uint16_t qid) 885 { 886 tag_unref(&V_pf_qids, qid); 887 } 888 889 static int 890 pf_begin_altq(u_int32_t *ticket) 891 { 892 struct pf_altq *altq, *tmp; 893 int error = 0; 894 895 PF_RULES_WASSERT(); 896 897 /* Purge the old altq lists */ 898 TAILQ_FOREACH_SAFE(altq, V_pf_altq_ifs_inactive, entries, tmp) { 899 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) { 900 /* detach and destroy the discipline */ 901 error = altq_remove(altq); 902 } 903 free(altq, M_PFALTQ); 904 } 905 TAILQ_INIT(V_pf_altq_ifs_inactive); 906 TAILQ_FOREACH_SAFE(altq, V_pf_altqs_inactive, entries, tmp) { 907 pf_qid_unref(altq->qid); 908 free(altq, M_PFALTQ); 909 } 910 TAILQ_INIT(V_pf_altqs_inactive); 911 if (error) 912 return (error); 913 *ticket = ++V_ticket_altqs_inactive; 914 V_altqs_inactive_open = 1; 915 return (0); 916 } 917 918 static int 919 pf_rollback_altq(u_int32_t ticket) 920 { 921 struct pf_altq *altq, *tmp; 922 int error = 0; 923 924 PF_RULES_WASSERT(); 925 926 if (!V_altqs_inactive_open || ticket != V_ticket_altqs_inactive) 927 return (0); 928 /* Purge the old altq lists */ 929 TAILQ_FOREACH_SAFE(altq, V_pf_altq_ifs_inactive, entries, tmp) { 930 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) { 931 /* detach and destroy the discipline */ 932 error = altq_remove(altq); 933 } 934 free(altq, M_PFALTQ); 935 } 936 TAILQ_INIT(V_pf_altq_ifs_inactive); 937 TAILQ_FOREACH_SAFE(altq, V_pf_altqs_inactive, entries, tmp) { 938 pf_qid_unref(altq->qid); 939 free(altq, M_PFALTQ); 940 } 941 TAILQ_INIT(V_pf_altqs_inactive); 942 V_altqs_inactive_open = 0; 943 return (error); 944 } 945 946 static int 947 pf_commit_altq(u_int32_t ticket) 948 { 949 struct pf_altqqueue *old_altqs, *old_altq_ifs; 950 struct pf_altq *altq, *tmp; 951 int err, error = 0; 952 953 PF_RULES_WASSERT(); 954 955 if (!V_altqs_inactive_open || ticket != V_ticket_altqs_inactive) 956 return (EBUSY); 957 958 /* swap altqs, keep the old. */ 959 old_altqs = V_pf_altqs_active; 960 old_altq_ifs = V_pf_altq_ifs_active; 961 V_pf_altqs_active = V_pf_altqs_inactive; 962 V_pf_altq_ifs_active = V_pf_altq_ifs_inactive; 963 V_pf_altqs_inactive = old_altqs; 964 V_pf_altq_ifs_inactive = old_altq_ifs; 965 V_ticket_altqs_active = V_ticket_altqs_inactive; 966 967 /* Attach new disciplines */ 968 TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) { 969 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) { 970 /* attach the discipline */ 971 error = altq_pfattach(altq); 972 if (error == 0 && V_pf_altq_running) 973 error = pf_enable_altq(altq); 974 if (error != 0) 975 return (error); 976 } 977 } 978 979 /* Purge the old altq lists */ 980 TAILQ_FOREACH_SAFE(altq, V_pf_altq_ifs_inactive, entries, tmp) { 981 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) { 982 /* detach and destroy the discipline */ 983 if (V_pf_altq_running) 984 error = pf_disable_altq(altq); 985 err = altq_pfdetach(altq); 986 if (err != 0 && error == 0) 987 error = err; 988 err = altq_remove(altq); 989 if (err != 0 && error == 0) 990 error = err; 991 } 992 free(altq, M_PFALTQ); 993 } 994 TAILQ_INIT(V_pf_altq_ifs_inactive); 995 TAILQ_FOREACH_SAFE(altq, V_pf_altqs_inactive, entries, tmp) { 996 pf_qid_unref(altq->qid); 997 free(altq, M_PFALTQ); 998 } 999 TAILQ_INIT(V_pf_altqs_inactive); 1000 1001 V_altqs_inactive_open = 0; 1002 return (error); 1003 } 1004 1005 static int 1006 pf_enable_altq(struct pf_altq *altq) 1007 { 1008 struct ifnet *ifp; 1009 struct tb_profile tb; 1010 int error = 0; 1011 1012 if ((ifp = ifunit(altq->ifname)) == NULL) 1013 return (EINVAL); 1014 1015 if (ifp->if_snd.altq_type != ALTQT_NONE) 1016 error = altq_enable(&ifp->if_snd); 1017 1018 /* set tokenbucket regulator */ 1019 if (error == 0 && ifp != NULL && ALTQ_IS_ENABLED(&ifp->if_snd)) { 1020 tb.rate = altq->ifbandwidth; 1021 tb.depth = altq->tbrsize; 1022 error = tbr_set(&ifp->if_snd, &tb); 1023 } 1024 1025 return (error); 1026 } 1027 1028 static int 1029 pf_disable_altq(struct pf_altq *altq) 1030 { 1031 struct ifnet *ifp; 1032 struct tb_profile tb; 1033 int error; 1034 1035 if ((ifp = ifunit(altq->ifname)) == NULL) 1036 return (EINVAL); 1037 1038 /* 1039 * when the discipline is no longer referenced, it was overridden 1040 * by a new one. if so, just return. 1041 */ 1042 if (altq->altq_disc != ifp->if_snd.altq_disc) 1043 return (0); 1044 1045 error = altq_disable(&ifp->if_snd); 1046 1047 if (error == 0) { 1048 /* clear tokenbucket regulator */ 1049 tb.rate = 0; 1050 error = tbr_set(&ifp->if_snd, &tb); 1051 } 1052 1053 return (error); 1054 } 1055 1056 static int 1057 pf_altq_ifnet_event_add(struct ifnet *ifp, int remove, u_int32_t ticket, 1058 struct pf_altq *altq) 1059 { 1060 struct ifnet *ifp1; 1061 int error = 0; 1062 1063 /* Deactivate the interface in question */ 1064 altq->local_flags &= ~PFALTQ_FLAG_IF_REMOVED; 1065 if ((ifp1 = ifunit(altq->ifname)) == NULL || 1066 (remove && ifp1 == ifp)) { 1067 altq->local_flags |= PFALTQ_FLAG_IF_REMOVED; 1068 } else { 1069 error = altq_add(ifp1, altq); 1070 1071 if (ticket != V_ticket_altqs_inactive) 1072 error = EBUSY; 1073 1074 if (error) 1075 free(altq, M_PFALTQ); 1076 } 1077 1078 return (error); 1079 } 1080 1081 void 1082 pf_altq_ifnet_event(struct ifnet *ifp, int remove) 1083 { 1084 struct pf_altq *a1, *a2, *a3; 1085 u_int32_t ticket; 1086 int error = 0; 1087 1088 /* 1089 * No need to re-evaluate the configuration for events on interfaces 1090 * that do not support ALTQ, as it's not possible for such 1091 * interfaces to be part of the configuration. 1092 */ 1093 if (!ALTQ_IS_READY(&ifp->if_snd)) 1094 return; 1095 1096 /* Interrupt userland queue modifications */ 1097 if (V_altqs_inactive_open) 1098 pf_rollback_altq(V_ticket_altqs_inactive); 1099 1100 /* Start new altq ruleset */ 1101 if (pf_begin_altq(&ticket)) 1102 return; 1103 1104 /* Copy the current active set */ 1105 TAILQ_FOREACH(a1, V_pf_altq_ifs_active, entries) { 1106 a2 = malloc(sizeof(*a2), M_PFALTQ, M_NOWAIT); 1107 if (a2 == NULL) { 1108 error = ENOMEM; 1109 break; 1110 } 1111 bcopy(a1, a2, sizeof(struct pf_altq)); 1112 1113 error = pf_altq_ifnet_event_add(ifp, remove, ticket, a2); 1114 if (error) 1115 break; 1116 1117 TAILQ_INSERT_TAIL(V_pf_altq_ifs_inactive, a2, entries); 1118 } 1119 if (error) 1120 goto out; 1121 TAILQ_FOREACH(a1, V_pf_altqs_active, entries) { 1122 a2 = malloc(sizeof(*a2), M_PFALTQ, M_NOWAIT); 1123 if (a2 == NULL) { 1124 error = ENOMEM; 1125 break; 1126 } 1127 bcopy(a1, a2, sizeof(struct pf_altq)); 1128 1129 if ((a2->qid = pf_qname2qid(a2->qname)) == 0) { 1130 error = EBUSY; 1131 free(a2, M_PFALTQ); 1132 break; 1133 } 1134 a2->altq_disc = NULL; 1135 TAILQ_FOREACH(a3, V_pf_altq_ifs_inactive, entries) { 1136 if (strncmp(a3->ifname, a2->ifname, 1137 IFNAMSIZ) == 0) { 1138 a2->altq_disc = a3->altq_disc; 1139 break; 1140 } 1141 } 1142 error = pf_altq_ifnet_event_add(ifp, remove, ticket, a2); 1143 if (error) 1144 break; 1145 1146 TAILQ_INSERT_TAIL(V_pf_altqs_inactive, a2, entries); 1147 } 1148 1149 out: 1150 if (error != 0) 1151 pf_rollback_altq(ticket); 1152 else 1153 pf_commit_altq(ticket); 1154 } 1155 #endif /* ALTQ */ 1156 1157 static struct pf_krule_global * 1158 pf_rule_tree_alloc(int flags) 1159 { 1160 struct pf_krule_global *tree; 1161 1162 tree = malloc(sizeof(struct pf_krule_global), M_TEMP, flags); 1163 if (tree == NULL) 1164 return (NULL); 1165 RB_INIT(tree); 1166 return (tree); 1167 } 1168 1169 static void 1170 pf_rule_tree_free(struct pf_krule_global *tree) 1171 { 1172 1173 free(tree, M_TEMP); 1174 } 1175 1176 static int 1177 pf_begin_rules(u_int32_t *ticket, int rs_num, const char *anchor) 1178 { 1179 struct pf_krule_global *tree; 1180 struct pf_kruleset *rs; 1181 struct pf_krule *rule; 1182 1183 PF_RULES_WASSERT(); 1184 1185 if (rs_num < 0 || rs_num >= PF_RULESET_MAX) 1186 return (EINVAL); 1187 tree = pf_rule_tree_alloc(M_NOWAIT); 1188 if (tree == NULL) 1189 return (ENOMEM); 1190 rs = pf_find_or_create_kruleset(anchor); 1191 if (rs == NULL) { 1192 free(tree, M_TEMP); 1193 return (EINVAL); 1194 } 1195 pf_rule_tree_free(rs->rules[rs_num].inactive.tree); 1196 rs->rules[rs_num].inactive.tree = tree; 1197 1198 while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) { 1199 pf_unlink_rule(rs->rules[rs_num].inactive.ptr, rule); 1200 rs->rules[rs_num].inactive.rcount--; 1201 } 1202 *ticket = ++rs->rules[rs_num].inactive.ticket; 1203 rs->rules[rs_num].inactive.open = 1; 1204 return (0); 1205 } 1206 1207 static int 1208 pf_rollback_rules(u_int32_t ticket, int rs_num, char *anchor) 1209 { 1210 struct pf_kruleset *rs; 1211 struct pf_krule *rule; 1212 1213 PF_RULES_WASSERT(); 1214 1215 if (rs_num < 0 || rs_num >= PF_RULESET_MAX) 1216 return (EINVAL); 1217 rs = pf_find_kruleset(anchor); 1218 if (rs == NULL || !rs->rules[rs_num].inactive.open || 1219 rs->rules[rs_num].inactive.ticket != ticket) 1220 return (0); 1221 while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) { 1222 pf_unlink_rule(rs->rules[rs_num].inactive.ptr, rule); 1223 rs->rules[rs_num].inactive.rcount--; 1224 } 1225 rs->rules[rs_num].inactive.open = 0; 1226 return (0); 1227 } 1228 1229 #define PF_MD5_UPD(st, elm) \ 1230 MD5Update(ctx, (u_int8_t *) &(st)->elm, sizeof((st)->elm)) 1231 1232 #define PF_MD5_UPD_STR(st, elm) \ 1233 MD5Update(ctx, (u_int8_t *) (st)->elm, strlen((st)->elm)) 1234 1235 #define PF_MD5_UPD_HTONL(st, elm, stor) do { \ 1236 (stor) = htonl((st)->elm); \ 1237 MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int32_t));\ 1238 } while (0) 1239 1240 #define PF_MD5_UPD_HTONS(st, elm, stor) do { \ 1241 (stor) = htons((st)->elm); \ 1242 MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int16_t));\ 1243 } while (0) 1244 1245 static void 1246 pf_hash_rule_addr(MD5_CTX *ctx, struct pf_rule_addr *pfr) 1247 { 1248 PF_MD5_UPD(pfr, addr.type); 1249 switch (pfr->addr.type) { 1250 case PF_ADDR_DYNIFTL: 1251 PF_MD5_UPD(pfr, addr.v.ifname); 1252 PF_MD5_UPD(pfr, addr.iflags); 1253 break; 1254 case PF_ADDR_TABLE: 1255 PF_MD5_UPD(pfr, addr.v.tblname); 1256 break; 1257 case PF_ADDR_ADDRMASK: 1258 /* XXX ignore af? */ 1259 PF_MD5_UPD(pfr, addr.v.a.addr.addr32); 1260 PF_MD5_UPD(pfr, addr.v.a.mask.addr32); 1261 break; 1262 } 1263 1264 PF_MD5_UPD(pfr, port[0]); 1265 PF_MD5_UPD(pfr, port[1]); 1266 PF_MD5_UPD(pfr, neg); 1267 PF_MD5_UPD(pfr, port_op); 1268 } 1269 1270 static void 1271 pf_hash_rule_rolling(MD5_CTX *ctx, struct pf_krule *rule) 1272 { 1273 u_int16_t x; 1274 u_int32_t y; 1275 1276 pf_hash_rule_addr(ctx, &rule->src); 1277 pf_hash_rule_addr(ctx, &rule->dst); 1278 for (int i = 0; i < PF_RULE_MAX_LABEL_COUNT; i++) 1279 PF_MD5_UPD_STR(rule, label[i]); 1280 PF_MD5_UPD_STR(rule, ifname); 1281 PF_MD5_UPD_STR(rule, match_tagname); 1282 PF_MD5_UPD_HTONS(rule, match_tag, x); /* dup? */ 1283 PF_MD5_UPD_HTONL(rule, os_fingerprint, y); 1284 PF_MD5_UPD_HTONL(rule, prob, y); 1285 PF_MD5_UPD_HTONL(rule, uid.uid[0], y); 1286 PF_MD5_UPD_HTONL(rule, uid.uid[1], y); 1287 PF_MD5_UPD(rule, uid.op); 1288 PF_MD5_UPD_HTONL(rule, gid.gid[0], y); 1289 PF_MD5_UPD_HTONL(rule, gid.gid[1], y); 1290 PF_MD5_UPD(rule, gid.op); 1291 PF_MD5_UPD_HTONL(rule, rule_flag, y); 1292 PF_MD5_UPD(rule, action); 1293 PF_MD5_UPD(rule, direction); 1294 PF_MD5_UPD(rule, af); 1295 PF_MD5_UPD(rule, quick); 1296 PF_MD5_UPD(rule, ifnot); 1297 PF_MD5_UPD(rule, match_tag_not); 1298 PF_MD5_UPD(rule, natpass); 1299 PF_MD5_UPD(rule, keep_state); 1300 PF_MD5_UPD(rule, proto); 1301 PF_MD5_UPD(rule, type); 1302 PF_MD5_UPD(rule, code); 1303 PF_MD5_UPD(rule, flags); 1304 PF_MD5_UPD(rule, flagset); 1305 PF_MD5_UPD(rule, allow_opts); 1306 PF_MD5_UPD(rule, rt); 1307 PF_MD5_UPD(rule, tos); 1308 if (rule->anchor != NULL) 1309 PF_MD5_UPD_STR(rule, anchor->path); 1310 } 1311 1312 static void 1313 pf_hash_rule(struct pf_krule *rule) 1314 { 1315 MD5_CTX ctx; 1316 1317 MD5Init(&ctx); 1318 pf_hash_rule_rolling(&ctx, rule); 1319 MD5Final(rule->md5sum, &ctx); 1320 } 1321 1322 static int 1323 pf_krule_compare(struct pf_krule *a, struct pf_krule *b) 1324 { 1325 1326 return (memcmp(a->md5sum, b->md5sum, PF_MD5_DIGEST_LENGTH)); 1327 } 1328 1329 static int 1330 pf_commit_rules(u_int32_t ticket, int rs_num, char *anchor) 1331 { 1332 struct pf_kruleset *rs; 1333 struct pf_krule *rule, **old_array, *old_rule; 1334 struct pf_krulequeue *old_rules; 1335 struct pf_krule_global *old_tree; 1336 int error; 1337 u_int32_t old_rcount; 1338 1339 PF_RULES_WASSERT(); 1340 1341 if (rs_num < 0 || rs_num >= PF_RULESET_MAX) 1342 return (EINVAL); 1343 rs = pf_find_kruleset(anchor); 1344 if (rs == NULL || !rs->rules[rs_num].inactive.open || 1345 ticket != rs->rules[rs_num].inactive.ticket) 1346 return (EBUSY); 1347 1348 /* Calculate checksum for the main ruleset */ 1349 if (rs == &pf_main_ruleset) { 1350 error = pf_setup_pfsync_matching(rs); 1351 if (error != 0) 1352 return (error); 1353 } 1354 1355 /* Swap rules, keep the old. */ 1356 old_rules = rs->rules[rs_num].active.ptr; 1357 old_rcount = rs->rules[rs_num].active.rcount; 1358 old_array = rs->rules[rs_num].active.ptr_array; 1359 old_tree = rs->rules[rs_num].active.tree; 1360 1361 rs->rules[rs_num].active.ptr = 1362 rs->rules[rs_num].inactive.ptr; 1363 rs->rules[rs_num].active.ptr_array = 1364 rs->rules[rs_num].inactive.ptr_array; 1365 rs->rules[rs_num].active.tree = 1366 rs->rules[rs_num].inactive.tree; 1367 rs->rules[rs_num].active.rcount = 1368 rs->rules[rs_num].inactive.rcount; 1369 1370 /* Attempt to preserve counter information. */ 1371 if (V_pf_status.keep_counters && old_tree != NULL) { 1372 TAILQ_FOREACH(rule, rs->rules[rs_num].active.ptr, 1373 entries) { 1374 old_rule = RB_FIND(pf_krule_global, old_tree, rule); 1375 if (old_rule == NULL) { 1376 continue; 1377 } 1378 pf_counter_u64_critical_enter(); 1379 pf_counter_u64_add_protected(&rule->evaluations, 1380 pf_counter_u64_fetch(&old_rule->evaluations)); 1381 pf_counter_u64_add_protected(&rule->packets[0], 1382 pf_counter_u64_fetch(&old_rule->packets[0])); 1383 pf_counter_u64_add_protected(&rule->packets[1], 1384 pf_counter_u64_fetch(&old_rule->packets[1])); 1385 pf_counter_u64_add_protected(&rule->bytes[0], 1386 pf_counter_u64_fetch(&old_rule->bytes[0])); 1387 pf_counter_u64_add_protected(&rule->bytes[1], 1388 pf_counter_u64_fetch(&old_rule->bytes[1])); 1389 pf_counter_u64_critical_exit(); 1390 } 1391 } 1392 1393 rs->rules[rs_num].inactive.ptr = old_rules; 1394 rs->rules[rs_num].inactive.ptr_array = old_array; 1395 rs->rules[rs_num].inactive.tree = NULL; /* important for pf_ioctl_addrule */ 1396 rs->rules[rs_num].inactive.rcount = old_rcount; 1397 1398 rs->rules[rs_num].active.ticket = 1399 rs->rules[rs_num].inactive.ticket; 1400 pf_calc_skip_steps(rs->rules[rs_num].active.ptr); 1401 1402 /* Purge the old rule list. */ 1403 PF_UNLNKDRULES_LOCK(); 1404 while ((rule = TAILQ_FIRST(old_rules)) != NULL) 1405 pf_unlink_rule_locked(old_rules, rule); 1406 PF_UNLNKDRULES_UNLOCK(); 1407 if (rs->rules[rs_num].inactive.ptr_array) 1408 free(rs->rules[rs_num].inactive.ptr_array, M_TEMP); 1409 rs->rules[rs_num].inactive.ptr_array = NULL; 1410 rs->rules[rs_num].inactive.rcount = 0; 1411 rs->rules[rs_num].inactive.open = 0; 1412 pf_remove_if_empty_kruleset(rs); 1413 free(old_tree, M_TEMP); 1414 1415 return (0); 1416 } 1417 1418 static int 1419 pf_setup_pfsync_matching(struct pf_kruleset *rs) 1420 { 1421 MD5_CTX ctx; 1422 struct pf_krule *rule; 1423 int rs_cnt; 1424 u_int8_t digest[PF_MD5_DIGEST_LENGTH]; 1425 1426 MD5Init(&ctx); 1427 for (rs_cnt = 0; rs_cnt < PF_RULESET_MAX; rs_cnt++) { 1428 /* XXX PF_RULESET_SCRUB as well? */ 1429 if (rs_cnt == PF_RULESET_SCRUB) 1430 continue; 1431 1432 if (rs->rules[rs_cnt].inactive.ptr_array) 1433 free(rs->rules[rs_cnt].inactive.ptr_array, M_TEMP); 1434 rs->rules[rs_cnt].inactive.ptr_array = NULL; 1435 1436 if (rs->rules[rs_cnt].inactive.rcount) { 1437 rs->rules[rs_cnt].inactive.ptr_array = 1438 malloc(sizeof(caddr_t) * 1439 rs->rules[rs_cnt].inactive.rcount, 1440 M_TEMP, M_NOWAIT); 1441 1442 if (!rs->rules[rs_cnt].inactive.ptr_array) 1443 return (ENOMEM); 1444 } 1445 1446 TAILQ_FOREACH(rule, rs->rules[rs_cnt].inactive.ptr, 1447 entries) { 1448 pf_hash_rule_rolling(&ctx, rule); 1449 (rs->rules[rs_cnt].inactive.ptr_array)[rule->nr] = rule; 1450 } 1451 } 1452 1453 MD5Final(digest, &ctx); 1454 memcpy(V_pf_status.pf_chksum, digest, sizeof(V_pf_status.pf_chksum)); 1455 return (0); 1456 } 1457 1458 static int 1459 pf_eth_addr_setup(struct pf_keth_ruleset *ruleset, struct pf_addr_wrap *addr) 1460 { 1461 int error = 0; 1462 1463 switch (addr->type) { 1464 case PF_ADDR_TABLE: 1465 addr->p.tbl = pfr_eth_attach_table(ruleset, addr->v.tblname); 1466 if (addr->p.tbl == NULL) 1467 error = ENOMEM; 1468 break; 1469 default: 1470 error = EINVAL; 1471 } 1472 1473 return (error); 1474 } 1475 1476 static int 1477 pf_addr_setup(struct pf_kruleset *ruleset, struct pf_addr_wrap *addr, 1478 sa_family_t af) 1479 { 1480 int error = 0; 1481 1482 switch (addr->type) { 1483 case PF_ADDR_TABLE: 1484 addr->p.tbl = pfr_attach_table(ruleset, addr->v.tblname); 1485 if (addr->p.tbl == NULL) 1486 error = ENOMEM; 1487 break; 1488 case PF_ADDR_DYNIFTL: 1489 error = pfi_dynaddr_setup(addr, af); 1490 break; 1491 } 1492 1493 return (error); 1494 } 1495 1496 static void 1497 pf_addr_copyout(struct pf_addr_wrap *addr) 1498 { 1499 1500 switch (addr->type) { 1501 case PF_ADDR_DYNIFTL: 1502 pfi_dynaddr_copyout(addr); 1503 break; 1504 case PF_ADDR_TABLE: 1505 pf_tbladdr_copyout(addr); 1506 break; 1507 } 1508 } 1509 1510 static void 1511 pf_src_node_copy(const struct pf_ksrc_node *in, struct pf_src_node *out) 1512 { 1513 int secs = time_uptime, diff; 1514 1515 bzero(out, sizeof(struct pf_src_node)); 1516 1517 bcopy(&in->addr, &out->addr, sizeof(struct pf_addr)); 1518 bcopy(&in->raddr, &out->raddr, sizeof(struct pf_addr)); 1519 1520 if (in->rule.ptr != NULL) 1521 out->rule.nr = in->rule.ptr->nr; 1522 1523 for (int i = 0; i < 2; i++) { 1524 out->bytes[i] = counter_u64_fetch(in->bytes[i]); 1525 out->packets[i] = counter_u64_fetch(in->packets[i]); 1526 } 1527 1528 out->states = in->states; 1529 out->conn = in->conn; 1530 out->af = in->af; 1531 out->ruletype = in->ruletype; 1532 1533 out->creation = secs - in->creation; 1534 if (out->expire > secs) 1535 out->expire -= secs; 1536 else 1537 out->expire = 0; 1538 1539 /* Adjust the connection rate estimate. */ 1540 diff = secs - in->conn_rate.last; 1541 if (diff >= in->conn_rate.seconds) 1542 out->conn_rate.count = 0; 1543 else 1544 out->conn_rate.count -= 1545 in->conn_rate.count * diff / 1546 in->conn_rate.seconds; 1547 } 1548 1549 #ifdef ALTQ 1550 /* 1551 * Handle export of struct pf_kaltq to user binaries that may be using any 1552 * version of struct pf_altq. 1553 */ 1554 static int 1555 pf_export_kaltq(struct pf_altq *q, struct pfioc_altq_v1 *pa, size_t ioc_size) 1556 { 1557 u_int32_t version; 1558 1559 if (ioc_size == sizeof(struct pfioc_altq_v0)) 1560 version = 0; 1561 else 1562 version = pa->version; 1563 1564 if (version > PFIOC_ALTQ_VERSION) 1565 return (EINVAL); 1566 1567 #define ASSIGN(x) exported_q->x = q->x 1568 #define COPY(x) \ 1569 bcopy(&q->x, &exported_q->x, min(sizeof(q->x), sizeof(exported_q->x))) 1570 #define SATU16(x) (u_int32_t)uqmin((x), USHRT_MAX) 1571 #define SATU32(x) (u_int32_t)uqmin((x), UINT_MAX) 1572 1573 switch (version) { 1574 case 0: { 1575 struct pf_altq_v0 *exported_q = 1576 &((struct pfioc_altq_v0 *)pa)->altq; 1577 1578 COPY(ifname); 1579 1580 ASSIGN(scheduler); 1581 ASSIGN(tbrsize); 1582 exported_q->tbrsize = SATU16(q->tbrsize); 1583 exported_q->ifbandwidth = SATU32(q->ifbandwidth); 1584 1585 COPY(qname); 1586 COPY(parent); 1587 ASSIGN(parent_qid); 1588 exported_q->bandwidth = SATU32(q->bandwidth); 1589 ASSIGN(priority); 1590 ASSIGN(local_flags); 1591 1592 ASSIGN(qlimit); 1593 ASSIGN(flags); 1594 1595 if (q->scheduler == ALTQT_HFSC) { 1596 #define ASSIGN_OPT(x) exported_q->pq_u.hfsc_opts.x = q->pq_u.hfsc_opts.x 1597 #define ASSIGN_OPT_SATU32(x) exported_q->pq_u.hfsc_opts.x = \ 1598 SATU32(q->pq_u.hfsc_opts.x) 1599 1600 ASSIGN_OPT_SATU32(rtsc_m1); 1601 ASSIGN_OPT(rtsc_d); 1602 ASSIGN_OPT_SATU32(rtsc_m2); 1603 1604 ASSIGN_OPT_SATU32(lssc_m1); 1605 ASSIGN_OPT(lssc_d); 1606 ASSIGN_OPT_SATU32(lssc_m2); 1607 1608 ASSIGN_OPT_SATU32(ulsc_m1); 1609 ASSIGN_OPT(ulsc_d); 1610 ASSIGN_OPT_SATU32(ulsc_m2); 1611 1612 ASSIGN_OPT(flags); 1613 1614 #undef ASSIGN_OPT 1615 #undef ASSIGN_OPT_SATU32 1616 } else 1617 COPY(pq_u); 1618 1619 ASSIGN(qid); 1620 break; 1621 } 1622 case 1: { 1623 struct pf_altq_v1 *exported_q = 1624 &((struct pfioc_altq_v1 *)pa)->altq; 1625 1626 COPY(ifname); 1627 1628 ASSIGN(scheduler); 1629 ASSIGN(tbrsize); 1630 ASSIGN(ifbandwidth); 1631 1632 COPY(qname); 1633 COPY(parent); 1634 ASSIGN(parent_qid); 1635 ASSIGN(bandwidth); 1636 ASSIGN(priority); 1637 ASSIGN(local_flags); 1638 1639 ASSIGN(qlimit); 1640 ASSIGN(flags); 1641 COPY(pq_u); 1642 1643 ASSIGN(qid); 1644 break; 1645 } 1646 default: 1647 panic("%s: unhandled struct pfioc_altq version", __func__); 1648 break; 1649 } 1650 1651 #undef ASSIGN 1652 #undef COPY 1653 #undef SATU16 1654 #undef SATU32 1655 1656 return (0); 1657 } 1658 1659 /* 1660 * Handle import to struct pf_kaltq of struct pf_altq from user binaries 1661 * that may be using any version of it. 1662 */ 1663 static int 1664 pf_import_kaltq(struct pfioc_altq_v1 *pa, struct pf_altq *q, size_t ioc_size) 1665 { 1666 u_int32_t version; 1667 1668 if (ioc_size == sizeof(struct pfioc_altq_v0)) 1669 version = 0; 1670 else 1671 version = pa->version; 1672 1673 if (version > PFIOC_ALTQ_VERSION) 1674 return (EINVAL); 1675 1676 #define ASSIGN(x) q->x = imported_q->x 1677 #define COPY(x) \ 1678 bcopy(&imported_q->x, &q->x, min(sizeof(imported_q->x), sizeof(q->x))) 1679 1680 switch (version) { 1681 case 0: { 1682 struct pf_altq_v0 *imported_q = 1683 &((struct pfioc_altq_v0 *)pa)->altq; 1684 1685 COPY(ifname); 1686 1687 ASSIGN(scheduler); 1688 ASSIGN(tbrsize); /* 16-bit -> 32-bit */ 1689 ASSIGN(ifbandwidth); /* 32-bit -> 64-bit */ 1690 1691 COPY(qname); 1692 COPY(parent); 1693 ASSIGN(parent_qid); 1694 ASSIGN(bandwidth); /* 32-bit -> 64-bit */ 1695 ASSIGN(priority); 1696 ASSIGN(local_flags); 1697 1698 ASSIGN(qlimit); 1699 ASSIGN(flags); 1700 1701 if (imported_q->scheduler == ALTQT_HFSC) { 1702 #define ASSIGN_OPT(x) q->pq_u.hfsc_opts.x = imported_q->pq_u.hfsc_opts.x 1703 1704 /* 1705 * The m1 and m2 parameters are being copied from 1706 * 32-bit to 64-bit. 1707 */ 1708 ASSIGN_OPT(rtsc_m1); 1709 ASSIGN_OPT(rtsc_d); 1710 ASSIGN_OPT(rtsc_m2); 1711 1712 ASSIGN_OPT(lssc_m1); 1713 ASSIGN_OPT(lssc_d); 1714 ASSIGN_OPT(lssc_m2); 1715 1716 ASSIGN_OPT(ulsc_m1); 1717 ASSIGN_OPT(ulsc_d); 1718 ASSIGN_OPT(ulsc_m2); 1719 1720 ASSIGN_OPT(flags); 1721 1722 #undef ASSIGN_OPT 1723 } else 1724 COPY(pq_u); 1725 1726 ASSIGN(qid); 1727 break; 1728 } 1729 case 1: { 1730 struct pf_altq_v1 *imported_q = 1731 &((struct pfioc_altq_v1 *)pa)->altq; 1732 1733 COPY(ifname); 1734 1735 ASSIGN(scheduler); 1736 ASSIGN(tbrsize); 1737 ASSIGN(ifbandwidth); 1738 1739 COPY(qname); 1740 COPY(parent); 1741 ASSIGN(parent_qid); 1742 ASSIGN(bandwidth); 1743 ASSIGN(priority); 1744 ASSIGN(local_flags); 1745 1746 ASSIGN(qlimit); 1747 ASSIGN(flags); 1748 COPY(pq_u); 1749 1750 ASSIGN(qid); 1751 break; 1752 } 1753 default: 1754 panic("%s: unhandled struct pfioc_altq version", __func__); 1755 break; 1756 } 1757 1758 #undef ASSIGN 1759 #undef COPY 1760 1761 return (0); 1762 } 1763 1764 static struct pf_altq * 1765 pf_altq_get_nth_active(u_int32_t n) 1766 { 1767 struct pf_altq *altq; 1768 u_int32_t nr; 1769 1770 nr = 0; 1771 TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) { 1772 if (nr == n) 1773 return (altq); 1774 nr++; 1775 } 1776 1777 TAILQ_FOREACH(altq, V_pf_altqs_active, entries) { 1778 if (nr == n) 1779 return (altq); 1780 nr++; 1781 } 1782 1783 return (NULL); 1784 } 1785 #endif /* ALTQ */ 1786 1787 struct pf_krule * 1788 pf_krule_alloc(void) 1789 { 1790 struct pf_krule *rule; 1791 1792 rule = malloc(sizeof(struct pf_krule), M_PFRULE, M_WAITOK | M_ZERO); 1793 mtx_init(&rule->rpool.mtx, "pf_krule_pool", NULL, MTX_DEF); 1794 rule->timestamp = uma_zalloc_pcpu(pf_timestamp_pcpu_zone, 1795 M_WAITOK | M_ZERO); 1796 return (rule); 1797 } 1798 1799 void 1800 pf_krule_free(struct pf_krule *rule) 1801 { 1802 #ifdef PF_WANT_32_TO_64_COUNTER 1803 bool wowned; 1804 #endif 1805 1806 if (rule == NULL) 1807 return; 1808 1809 #ifdef PF_WANT_32_TO_64_COUNTER 1810 if (rule->allrulelinked) { 1811 wowned = PF_RULES_WOWNED(); 1812 if (!wowned) 1813 PF_RULES_WLOCK(); 1814 LIST_REMOVE(rule, allrulelist); 1815 V_pf_allrulecount--; 1816 if (!wowned) 1817 PF_RULES_WUNLOCK(); 1818 } 1819 #endif 1820 1821 pf_counter_u64_deinit(&rule->evaluations); 1822 for (int i = 0; i < 2; i++) { 1823 pf_counter_u64_deinit(&rule->packets[i]); 1824 pf_counter_u64_deinit(&rule->bytes[i]); 1825 } 1826 counter_u64_free(rule->states_cur); 1827 counter_u64_free(rule->states_tot); 1828 counter_u64_free(rule->src_nodes); 1829 uma_zfree_pcpu(pf_timestamp_pcpu_zone, rule->timestamp); 1830 1831 mtx_destroy(&rule->rpool.mtx); 1832 free(rule, M_PFRULE); 1833 } 1834 1835 static void 1836 pf_kpooladdr_to_pooladdr(const struct pf_kpooladdr *kpool, 1837 struct pf_pooladdr *pool) 1838 { 1839 1840 bzero(pool, sizeof(*pool)); 1841 bcopy(&kpool->addr, &pool->addr, sizeof(pool->addr)); 1842 strlcpy(pool->ifname, kpool->ifname, sizeof(pool->ifname)); 1843 } 1844 1845 static int 1846 pf_pooladdr_to_kpooladdr(const struct pf_pooladdr *pool, 1847 struct pf_kpooladdr *kpool) 1848 { 1849 int ret; 1850 1851 bzero(kpool, sizeof(*kpool)); 1852 bcopy(&pool->addr, &kpool->addr, sizeof(kpool->addr)); 1853 ret = pf_user_strcpy(kpool->ifname, pool->ifname, 1854 sizeof(kpool->ifname)); 1855 return (ret); 1856 } 1857 1858 static void 1859 pf_kpool_to_pool(const struct pf_kpool *kpool, struct pf_pool *pool) 1860 { 1861 bzero(pool, sizeof(*pool)); 1862 1863 bcopy(&kpool->key, &pool->key, sizeof(pool->key)); 1864 bcopy(&kpool->counter, &pool->counter, sizeof(pool->counter)); 1865 1866 pool->tblidx = kpool->tblidx; 1867 pool->proxy_port[0] = kpool->proxy_port[0]; 1868 pool->proxy_port[1] = kpool->proxy_port[1]; 1869 pool->opts = kpool->opts; 1870 } 1871 1872 static void 1873 pf_pool_to_kpool(const struct pf_pool *pool, struct pf_kpool *kpool) 1874 { 1875 _Static_assert(sizeof(pool->key) == sizeof(kpool->key), ""); 1876 _Static_assert(sizeof(pool->counter) == sizeof(kpool->counter), ""); 1877 1878 bcopy(&pool->key, &kpool->key, sizeof(kpool->key)); 1879 bcopy(&pool->counter, &kpool->counter, sizeof(kpool->counter)); 1880 1881 kpool->tblidx = pool->tblidx; 1882 kpool->proxy_port[0] = pool->proxy_port[0]; 1883 kpool->proxy_port[1] = pool->proxy_port[1]; 1884 kpool->opts = pool->opts; 1885 } 1886 1887 static void 1888 pf_krule_to_rule(const struct pf_krule *krule, struct pf_rule *rule) 1889 { 1890 1891 bzero(rule, sizeof(*rule)); 1892 1893 bcopy(&krule->src, &rule->src, sizeof(rule->src)); 1894 bcopy(&krule->dst, &rule->dst, sizeof(rule->dst)); 1895 1896 for (int i = 0; i < PF_SKIP_COUNT; ++i) { 1897 if (rule->skip[i].ptr == NULL) 1898 rule->skip[i].nr = -1; 1899 else 1900 rule->skip[i].nr = krule->skip[i].ptr->nr; 1901 } 1902 1903 strlcpy(rule->label, krule->label[0], sizeof(rule->label)); 1904 strlcpy(rule->ifname, krule->ifname, sizeof(rule->ifname)); 1905 strlcpy(rule->qname, krule->qname, sizeof(rule->qname)); 1906 strlcpy(rule->pqname, krule->pqname, sizeof(rule->pqname)); 1907 strlcpy(rule->tagname, krule->tagname, sizeof(rule->tagname)); 1908 strlcpy(rule->match_tagname, krule->match_tagname, 1909 sizeof(rule->match_tagname)); 1910 strlcpy(rule->overload_tblname, krule->overload_tblname, 1911 sizeof(rule->overload_tblname)); 1912 1913 pf_kpool_to_pool(&krule->rpool, &rule->rpool); 1914 1915 rule->evaluations = pf_counter_u64_fetch(&krule->evaluations); 1916 for (int i = 0; i < 2; i++) { 1917 rule->packets[i] = pf_counter_u64_fetch(&krule->packets[i]); 1918 rule->bytes[i] = pf_counter_u64_fetch(&krule->bytes[i]); 1919 } 1920 1921 /* kif, anchor, overload_tbl are not copied over. */ 1922 1923 rule->os_fingerprint = krule->os_fingerprint; 1924 1925 rule->rtableid = krule->rtableid; 1926 bcopy(krule->timeout, rule->timeout, sizeof(krule->timeout)); 1927 rule->max_states = krule->max_states; 1928 rule->max_src_nodes = krule->max_src_nodes; 1929 rule->max_src_states = krule->max_src_states; 1930 rule->max_src_conn = krule->max_src_conn; 1931 rule->max_src_conn_rate.limit = krule->max_src_conn_rate.limit; 1932 rule->max_src_conn_rate.seconds = krule->max_src_conn_rate.seconds; 1933 rule->qid = krule->qid; 1934 rule->pqid = krule->pqid; 1935 rule->nr = krule->nr; 1936 rule->prob = krule->prob; 1937 rule->cuid = krule->cuid; 1938 rule->cpid = krule->cpid; 1939 1940 rule->return_icmp = krule->return_icmp; 1941 rule->return_icmp6 = krule->return_icmp6; 1942 rule->max_mss = krule->max_mss; 1943 rule->tag = krule->tag; 1944 rule->match_tag = krule->match_tag; 1945 rule->scrub_flags = krule->scrub_flags; 1946 1947 bcopy(&krule->uid, &rule->uid, sizeof(krule->uid)); 1948 bcopy(&krule->gid, &rule->gid, sizeof(krule->gid)); 1949 1950 rule->rule_flag = krule->rule_flag; 1951 rule->action = krule->action; 1952 rule->direction = krule->direction; 1953 rule->log = krule->log; 1954 rule->logif = krule->logif; 1955 rule->quick = krule->quick; 1956 rule->ifnot = krule->ifnot; 1957 rule->match_tag_not = krule->match_tag_not; 1958 rule->natpass = krule->natpass; 1959 1960 rule->keep_state = krule->keep_state; 1961 rule->af = krule->af; 1962 rule->proto = krule->proto; 1963 rule->type = krule->type; 1964 rule->code = krule->code; 1965 rule->flags = krule->flags; 1966 rule->flagset = krule->flagset; 1967 rule->min_ttl = krule->min_ttl; 1968 rule->allow_opts = krule->allow_opts; 1969 rule->rt = krule->rt; 1970 rule->return_ttl = krule->return_ttl; 1971 rule->tos = krule->tos; 1972 rule->set_tos = krule->set_tos; 1973 rule->anchor_relative = krule->anchor_relative; 1974 rule->anchor_wildcard = krule->anchor_wildcard; 1975 1976 rule->flush = krule->flush; 1977 rule->prio = krule->prio; 1978 rule->set_prio[0] = krule->set_prio[0]; 1979 rule->set_prio[1] = krule->set_prio[1]; 1980 1981 bcopy(&krule->divert, &rule->divert, sizeof(krule->divert)); 1982 1983 rule->u_states_cur = counter_u64_fetch(krule->states_cur); 1984 rule->u_states_tot = counter_u64_fetch(krule->states_tot); 1985 rule->u_src_nodes = counter_u64_fetch(krule->src_nodes); 1986 } 1987 1988 static int 1989 pf_rule_to_krule(const struct pf_rule *rule, struct pf_krule *krule) 1990 { 1991 int ret; 1992 1993 #ifndef INET 1994 if (rule->af == AF_INET) { 1995 return (EAFNOSUPPORT); 1996 } 1997 #endif /* INET */ 1998 #ifndef INET6 1999 if (rule->af == AF_INET6) { 2000 return (EAFNOSUPPORT); 2001 } 2002 #endif /* INET6 */ 2003 2004 ret = pf_check_rule_addr(&rule->src); 2005 if (ret != 0) 2006 return (ret); 2007 ret = pf_check_rule_addr(&rule->dst); 2008 if (ret != 0) 2009 return (ret); 2010 2011 bcopy(&rule->src, &krule->src, sizeof(rule->src)); 2012 bcopy(&rule->dst, &krule->dst, sizeof(rule->dst)); 2013 2014 ret = pf_user_strcpy(krule->label[0], rule->label, sizeof(rule->label)); 2015 if (ret != 0) 2016 return (ret); 2017 ret = pf_user_strcpy(krule->ifname, rule->ifname, sizeof(rule->ifname)); 2018 if (ret != 0) 2019 return (ret); 2020 ret = pf_user_strcpy(krule->qname, rule->qname, sizeof(rule->qname)); 2021 if (ret != 0) 2022 return (ret); 2023 ret = pf_user_strcpy(krule->pqname, rule->pqname, sizeof(rule->pqname)); 2024 if (ret != 0) 2025 return (ret); 2026 ret = pf_user_strcpy(krule->tagname, rule->tagname, 2027 sizeof(rule->tagname)); 2028 if (ret != 0) 2029 return (ret); 2030 ret = pf_user_strcpy(krule->match_tagname, rule->match_tagname, 2031 sizeof(rule->match_tagname)); 2032 if (ret != 0) 2033 return (ret); 2034 ret = pf_user_strcpy(krule->overload_tblname, rule->overload_tblname, 2035 sizeof(rule->overload_tblname)); 2036 if (ret != 0) 2037 return (ret); 2038 2039 pf_pool_to_kpool(&rule->rpool, &krule->rpool); 2040 2041 /* Don't allow userspace to set evaulations, packets or bytes. */ 2042 /* kif, anchor, overload_tbl are not copied over. */ 2043 2044 krule->os_fingerprint = rule->os_fingerprint; 2045 2046 krule->rtableid = rule->rtableid; 2047 bcopy(rule->timeout, krule->timeout, sizeof(krule->timeout)); 2048 krule->max_states = rule->max_states; 2049 krule->max_src_nodes = rule->max_src_nodes; 2050 krule->max_src_states = rule->max_src_states; 2051 krule->max_src_conn = rule->max_src_conn; 2052 krule->max_src_conn_rate.limit = rule->max_src_conn_rate.limit; 2053 krule->max_src_conn_rate.seconds = rule->max_src_conn_rate.seconds; 2054 krule->qid = rule->qid; 2055 krule->pqid = rule->pqid; 2056 krule->nr = rule->nr; 2057 krule->prob = rule->prob; 2058 krule->cuid = rule->cuid; 2059 krule->cpid = rule->cpid; 2060 2061 krule->return_icmp = rule->return_icmp; 2062 krule->return_icmp6 = rule->return_icmp6; 2063 krule->max_mss = rule->max_mss; 2064 krule->tag = rule->tag; 2065 krule->match_tag = rule->match_tag; 2066 krule->scrub_flags = rule->scrub_flags; 2067 2068 bcopy(&rule->uid, &krule->uid, sizeof(krule->uid)); 2069 bcopy(&rule->gid, &krule->gid, sizeof(krule->gid)); 2070 2071 krule->rule_flag = rule->rule_flag; 2072 krule->action = rule->action; 2073 krule->direction = rule->direction; 2074 krule->log = rule->log; 2075 krule->logif = rule->logif; 2076 krule->quick = rule->quick; 2077 krule->ifnot = rule->ifnot; 2078 krule->match_tag_not = rule->match_tag_not; 2079 krule->natpass = rule->natpass; 2080 2081 krule->keep_state = rule->keep_state; 2082 krule->af = rule->af; 2083 krule->proto = rule->proto; 2084 krule->type = rule->type; 2085 krule->code = rule->code; 2086 krule->flags = rule->flags; 2087 krule->flagset = rule->flagset; 2088 krule->min_ttl = rule->min_ttl; 2089 krule->allow_opts = rule->allow_opts; 2090 krule->rt = rule->rt; 2091 krule->return_ttl = rule->return_ttl; 2092 krule->tos = rule->tos; 2093 krule->set_tos = rule->set_tos; 2094 2095 krule->flush = rule->flush; 2096 krule->prio = rule->prio; 2097 krule->set_prio[0] = rule->set_prio[0]; 2098 krule->set_prio[1] = rule->set_prio[1]; 2099 2100 bcopy(&rule->divert, &krule->divert, sizeof(krule->divert)); 2101 2102 return (0); 2103 } 2104 2105 static int 2106 pf_state_kill_to_kstate_kill(const struct pfioc_state_kill *psk, 2107 struct pf_kstate_kill *kill) 2108 { 2109 int ret; 2110 2111 bzero(kill, sizeof(*kill)); 2112 2113 bcopy(&psk->psk_pfcmp, &kill->psk_pfcmp, sizeof(kill->psk_pfcmp)); 2114 kill->psk_af = psk->psk_af; 2115 kill->psk_proto = psk->psk_proto; 2116 bcopy(&psk->psk_src, &kill->psk_src, sizeof(kill->psk_src)); 2117 bcopy(&psk->psk_dst, &kill->psk_dst, sizeof(kill->psk_dst)); 2118 ret = pf_user_strcpy(kill->psk_ifname, psk->psk_ifname, 2119 sizeof(kill->psk_ifname)); 2120 if (ret != 0) 2121 return (ret); 2122 ret = pf_user_strcpy(kill->psk_label, psk->psk_label, 2123 sizeof(kill->psk_label)); 2124 if (ret != 0) 2125 return (ret); 2126 2127 return (0); 2128 } 2129 2130 static int 2131 pf_ioctl_addrule(struct pf_krule *rule, uint32_t ticket, 2132 uint32_t pool_ticket, const char *anchor, const char *anchor_call, 2133 struct thread *td) 2134 { 2135 struct pf_kruleset *ruleset; 2136 struct pf_krule *tail; 2137 struct pf_kpooladdr *pa; 2138 struct pfi_kkif *kif = NULL; 2139 int rs_num; 2140 int error = 0; 2141 2142 if ((rule->return_icmp >> 8) > ICMP_MAXTYPE) { 2143 error = EINVAL; 2144 goto errout_unlocked; 2145 } 2146 2147 #define ERROUT(x) ERROUT_FUNCTION(errout, x) 2148 2149 if (rule->ifname[0]) 2150 kif = pf_kkif_create(M_WAITOK); 2151 pf_counter_u64_init(&rule->evaluations, M_WAITOK); 2152 for (int i = 0; i < 2; i++) { 2153 pf_counter_u64_init(&rule->packets[i], M_WAITOK); 2154 pf_counter_u64_init(&rule->bytes[i], M_WAITOK); 2155 } 2156 rule->states_cur = counter_u64_alloc(M_WAITOK); 2157 rule->states_tot = counter_u64_alloc(M_WAITOK); 2158 rule->src_nodes = counter_u64_alloc(M_WAITOK); 2159 rule->cuid = td->td_ucred->cr_ruid; 2160 rule->cpid = td->td_proc ? td->td_proc->p_pid : 0; 2161 TAILQ_INIT(&rule->rpool.list); 2162 2163 PF_CONFIG_LOCK(); 2164 PF_RULES_WLOCK(); 2165 #ifdef PF_WANT_32_TO_64_COUNTER 2166 LIST_INSERT_HEAD(&V_pf_allrulelist, rule, allrulelist); 2167 MPASS(!rule->allrulelinked); 2168 rule->allrulelinked = true; 2169 V_pf_allrulecount++; 2170 #endif 2171 ruleset = pf_find_kruleset(anchor); 2172 if (ruleset == NULL) 2173 ERROUT(EINVAL); 2174 rs_num = pf_get_ruleset_number(rule->action); 2175 if (rs_num >= PF_RULESET_MAX) 2176 ERROUT(EINVAL); 2177 if (ticket != ruleset->rules[rs_num].inactive.ticket) { 2178 DPFPRINTF(PF_DEBUG_MISC, 2179 ("ticket: %d != [%d]%d\n", ticket, rs_num, 2180 ruleset->rules[rs_num].inactive.ticket)); 2181 ERROUT(EBUSY); 2182 } 2183 if (pool_ticket != V_ticket_pabuf) { 2184 DPFPRINTF(PF_DEBUG_MISC, 2185 ("pool_ticket: %d != %d\n", pool_ticket, 2186 V_ticket_pabuf)); 2187 ERROUT(EBUSY); 2188 } 2189 /* 2190 * XXXMJG hack: there is no mechanism to ensure they started the 2191 * transaction. Ticket checked above may happen to match by accident, 2192 * even if nobody called DIOCXBEGIN, let alone this process. 2193 * Partially work around it by checking if the RB tree got allocated, 2194 * see pf_begin_rules. 2195 */ 2196 if (ruleset->rules[rs_num].inactive.tree == NULL) { 2197 ERROUT(EINVAL); 2198 } 2199 2200 tail = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr, 2201 pf_krulequeue); 2202 if (tail) 2203 rule->nr = tail->nr + 1; 2204 else 2205 rule->nr = 0; 2206 if (rule->ifname[0]) { 2207 rule->kif = pfi_kkif_attach(kif, rule->ifname); 2208 kif = NULL; 2209 pfi_kkif_ref(rule->kif); 2210 } else 2211 rule->kif = NULL; 2212 2213 if (rule->rtableid > 0 && rule->rtableid >= rt_numfibs) 2214 error = EBUSY; 2215 2216 #ifdef ALTQ 2217 /* set queue IDs */ 2218 if (rule->qname[0] != 0) { 2219 if ((rule->qid = pf_qname2qid(rule->qname)) == 0) 2220 error = EBUSY; 2221 else if (rule->pqname[0] != 0) { 2222 if ((rule->pqid = 2223 pf_qname2qid(rule->pqname)) == 0) 2224 error = EBUSY; 2225 } else 2226 rule->pqid = rule->qid; 2227 } 2228 #endif 2229 if (rule->tagname[0]) 2230 if ((rule->tag = pf_tagname2tag(rule->tagname)) == 0) 2231 error = EBUSY; 2232 if (rule->match_tagname[0]) 2233 if ((rule->match_tag = 2234 pf_tagname2tag(rule->match_tagname)) == 0) 2235 error = EBUSY; 2236 if (rule->rt && !rule->direction) 2237 error = EINVAL; 2238 if (!rule->log) 2239 rule->logif = 0; 2240 if (rule->logif >= PFLOGIFS_MAX) 2241 error = EINVAL; 2242 if (pf_addr_setup(ruleset, &rule->src.addr, rule->af)) 2243 error = ENOMEM; 2244 if (pf_addr_setup(ruleset, &rule->dst.addr, rule->af)) 2245 error = ENOMEM; 2246 if (pf_kanchor_setup(rule, ruleset, anchor_call)) 2247 error = EINVAL; 2248 if (rule->scrub_flags & PFSTATE_SETPRIO && 2249 (rule->set_prio[0] > PF_PRIO_MAX || 2250 rule->set_prio[1] > PF_PRIO_MAX)) 2251 error = EINVAL; 2252 TAILQ_FOREACH(pa, &V_pf_pabuf, entries) 2253 if (pa->addr.type == PF_ADDR_TABLE) { 2254 pa->addr.p.tbl = pfr_attach_table(ruleset, 2255 pa->addr.v.tblname); 2256 if (pa->addr.p.tbl == NULL) 2257 error = ENOMEM; 2258 } 2259 2260 rule->overload_tbl = NULL; 2261 if (rule->overload_tblname[0]) { 2262 if ((rule->overload_tbl = pfr_attach_table(ruleset, 2263 rule->overload_tblname)) == NULL) 2264 error = EINVAL; 2265 else 2266 rule->overload_tbl->pfrkt_flags |= 2267 PFR_TFLAG_ACTIVE; 2268 } 2269 2270 pf_mv_kpool(&V_pf_pabuf, &rule->rpool.list); 2271 if (((((rule->action == PF_NAT) || (rule->action == PF_RDR) || 2272 (rule->action == PF_BINAT)) && rule->anchor == NULL) || 2273 (rule->rt > PF_NOPFROUTE)) && 2274 (TAILQ_FIRST(&rule->rpool.list) == NULL)) 2275 error = EINVAL; 2276 2277 if (error) { 2278 pf_free_rule(rule); 2279 rule = NULL; 2280 ERROUT(error); 2281 } 2282 2283 rule->rpool.cur = TAILQ_FIRST(&rule->rpool.list); 2284 TAILQ_INSERT_TAIL(ruleset->rules[rs_num].inactive.ptr, 2285 rule, entries); 2286 ruleset->rules[rs_num].inactive.rcount++; 2287 2288 PF_RULES_WUNLOCK(); 2289 pf_hash_rule(rule); 2290 if (RB_INSERT(pf_krule_global, ruleset->rules[rs_num].inactive.tree, rule) != NULL) { 2291 PF_RULES_WLOCK(); 2292 TAILQ_REMOVE(ruleset->rules[rs_num].inactive.ptr, rule, entries); 2293 ruleset->rules[rs_num].inactive.rcount--; 2294 pf_free_rule(rule); 2295 rule = NULL; 2296 ERROUT(EEXIST); 2297 } 2298 PF_CONFIG_UNLOCK(); 2299 2300 return (0); 2301 2302 #undef ERROUT 2303 errout: 2304 PF_RULES_WUNLOCK(); 2305 PF_CONFIG_UNLOCK(); 2306 errout_unlocked: 2307 pf_kkif_free(kif); 2308 pf_krule_free(rule); 2309 return (error); 2310 } 2311 2312 static bool 2313 pf_label_match(const struct pf_krule *rule, const char *label) 2314 { 2315 int i = 0; 2316 2317 while (*rule->label[i]) { 2318 if (strcmp(rule->label[i], label) == 0) 2319 return (true); 2320 i++; 2321 } 2322 2323 return (false); 2324 } 2325 2326 static unsigned int 2327 pf_kill_matching_state(struct pf_state_key_cmp *key, int dir) 2328 { 2329 struct pf_kstate *s; 2330 int more = 0; 2331 2332 s = pf_find_state_all(key, dir, &more); 2333 if (s == NULL) 2334 return (0); 2335 2336 if (more) { 2337 PF_STATE_UNLOCK(s); 2338 return (0); 2339 } 2340 2341 pf_unlink_state(s); 2342 return (1); 2343 } 2344 2345 static int 2346 pf_killstates_row(struct pf_kstate_kill *psk, struct pf_idhash *ih) 2347 { 2348 struct pf_kstate *s; 2349 struct pf_state_key *sk; 2350 struct pf_addr *srcaddr, *dstaddr; 2351 struct pf_state_key_cmp match_key; 2352 int idx, killed = 0; 2353 unsigned int dir; 2354 u_int16_t srcport, dstport; 2355 struct pfi_kkif *kif; 2356 2357 relock_DIOCKILLSTATES: 2358 PF_HASHROW_LOCK(ih); 2359 LIST_FOREACH(s, &ih->states, entry) { 2360 /* For floating states look at the original kif. */ 2361 kif = s->kif == V_pfi_all ? s->orig_kif : s->kif; 2362 2363 sk = s->key[PF_SK_WIRE]; 2364 if (s->direction == PF_OUT) { 2365 srcaddr = &sk->addr[1]; 2366 dstaddr = &sk->addr[0]; 2367 srcport = sk->port[1]; 2368 dstport = sk->port[0]; 2369 } else { 2370 srcaddr = &sk->addr[0]; 2371 dstaddr = &sk->addr[1]; 2372 srcport = sk->port[0]; 2373 dstport = sk->port[1]; 2374 } 2375 2376 if (psk->psk_af && sk->af != psk->psk_af) 2377 continue; 2378 2379 if (psk->psk_proto && psk->psk_proto != sk->proto) 2380 continue; 2381 2382 if (! PF_MATCHA(psk->psk_src.neg, &psk->psk_src.addr.v.a.addr, 2383 &psk->psk_src.addr.v.a.mask, srcaddr, sk->af)) 2384 continue; 2385 2386 if (! PF_MATCHA(psk->psk_dst.neg, &psk->psk_dst.addr.v.a.addr, 2387 &psk->psk_dst.addr.v.a.mask, dstaddr, sk->af)) 2388 continue; 2389 2390 if (! PF_MATCHA(psk->psk_rt_addr.neg, 2391 &psk->psk_rt_addr.addr.v.a.addr, 2392 &psk->psk_rt_addr.addr.v.a.mask, 2393 &s->rt_addr, sk->af)) 2394 continue; 2395 2396 if (psk->psk_src.port_op != 0 && 2397 ! pf_match_port(psk->psk_src.port_op, 2398 psk->psk_src.port[0], psk->psk_src.port[1], srcport)) 2399 continue; 2400 2401 if (psk->psk_dst.port_op != 0 && 2402 ! pf_match_port(psk->psk_dst.port_op, 2403 psk->psk_dst.port[0], psk->psk_dst.port[1], dstport)) 2404 continue; 2405 2406 if (psk->psk_label[0] && 2407 ! pf_label_match(s->rule.ptr, psk->psk_label)) 2408 continue; 2409 2410 if (psk->psk_ifname[0] && strcmp(psk->psk_ifname, 2411 kif->pfik_name)) 2412 continue; 2413 2414 if (psk->psk_kill_match) { 2415 /* Create the key to find matching states, with lock 2416 * held. */ 2417 2418 bzero(&match_key, sizeof(match_key)); 2419 2420 if (s->direction == PF_OUT) { 2421 dir = PF_IN; 2422 idx = PF_SK_STACK; 2423 } else { 2424 dir = PF_OUT; 2425 idx = PF_SK_WIRE; 2426 } 2427 2428 match_key.af = s->key[idx]->af; 2429 match_key.proto = s->key[idx]->proto; 2430 PF_ACPY(&match_key.addr[0], 2431 &s->key[idx]->addr[1], match_key.af); 2432 match_key.port[0] = s->key[idx]->port[1]; 2433 PF_ACPY(&match_key.addr[1], 2434 &s->key[idx]->addr[0], match_key.af); 2435 match_key.port[1] = s->key[idx]->port[0]; 2436 } 2437 2438 pf_unlink_state(s); 2439 killed++; 2440 2441 if (psk->psk_kill_match) 2442 killed += pf_kill_matching_state(&match_key, dir); 2443 2444 goto relock_DIOCKILLSTATES; 2445 } 2446 PF_HASHROW_UNLOCK(ih); 2447 2448 return (killed); 2449 } 2450 2451 static int 2452 pfioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td) 2453 { 2454 int error = 0; 2455 PF_RULES_RLOCK_TRACKER; 2456 2457 #define ERROUT_IOCTL(target, x) \ 2458 do { \ 2459 error = (x); \ 2460 SDT_PROBE3(pf, ioctl, ioctl, error, cmd, error, __LINE__); \ 2461 goto target; \ 2462 } while (0) 2463 2464 2465 /* XXX keep in sync with switch() below */ 2466 if (securelevel_gt(td->td_ucred, 2)) 2467 switch (cmd) { 2468 case DIOCGETRULES: 2469 case DIOCGETRULE: 2470 case DIOCGETRULENV: 2471 case DIOCGETADDRS: 2472 case DIOCGETADDR: 2473 case DIOCGETSTATE: 2474 case DIOCGETSTATENV: 2475 case DIOCSETSTATUSIF: 2476 case DIOCGETSTATUS: 2477 case DIOCGETSTATUSNV: 2478 case DIOCCLRSTATUS: 2479 case DIOCNATLOOK: 2480 case DIOCSETDEBUG: 2481 case DIOCGETSTATES: 2482 case DIOCGETSTATESV2: 2483 case DIOCGETTIMEOUT: 2484 case DIOCCLRRULECTRS: 2485 case DIOCGETLIMIT: 2486 case DIOCGETALTQSV0: 2487 case DIOCGETALTQSV1: 2488 case DIOCGETALTQV0: 2489 case DIOCGETALTQV1: 2490 case DIOCGETQSTATSV0: 2491 case DIOCGETQSTATSV1: 2492 case DIOCGETRULESETS: 2493 case DIOCGETRULESET: 2494 case DIOCRGETTABLES: 2495 case DIOCRGETTSTATS: 2496 case DIOCRCLRTSTATS: 2497 case DIOCRCLRADDRS: 2498 case DIOCRADDADDRS: 2499 case DIOCRDELADDRS: 2500 case DIOCRSETADDRS: 2501 case DIOCRGETADDRS: 2502 case DIOCRGETASTATS: 2503 case DIOCRCLRASTATS: 2504 case DIOCRTSTADDRS: 2505 case DIOCOSFPGET: 2506 case DIOCGETSRCNODES: 2507 case DIOCCLRSRCNODES: 2508 case DIOCGETSYNCOOKIES: 2509 case DIOCIGETIFACES: 2510 case DIOCGIFSPEEDV0: 2511 case DIOCGIFSPEEDV1: 2512 case DIOCSETIFFLAG: 2513 case DIOCCLRIFFLAG: 2514 case DIOCGETETHRULES: 2515 case DIOCGETETHRULE: 2516 case DIOCGETETHRULESETS: 2517 case DIOCGETETHRULESET: 2518 break; 2519 case DIOCRCLRTABLES: 2520 case DIOCRADDTABLES: 2521 case DIOCRDELTABLES: 2522 case DIOCRSETTFLAGS: 2523 if (((struct pfioc_table *)addr)->pfrio_flags & 2524 PFR_FLAG_DUMMY) 2525 break; /* dummy operation ok */ 2526 return (EPERM); 2527 default: 2528 return (EPERM); 2529 } 2530 2531 if (!(flags & FWRITE)) 2532 switch (cmd) { 2533 case DIOCGETRULES: 2534 case DIOCGETADDRS: 2535 case DIOCGETADDR: 2536 case DIOCGETSTATE: 2537 case DIOCGETSTATENV: 2538 case DIOCGETSTATUS: 2539 case DIOCGETSTATUSNV: 2540 case DIOCGETSTATES: 2541 case DIOCGETSTATESV2: 2542 case DIOCGETTIMEOUT: 2543 case DIOCGETLIMIT: 2544 case DIOCGETALTQSV0: 2545 case DIOCGETALTQSV1: 2546 case DIOCGETALTQV0: 2547 case DIOCGETALTQV1: 2548 case DIOCGETQSTATSV0: 2549 case DIOCGETQSTATSV1: 2550 case DIOCGETRULESETS: 2551 case DIOCGETRULESET: 2552 case DIOCNATLOOK: 2553 case DIOCRGETTABLES: 2554 case DIOCRGETTSTATS: 2555 case DIOCRGETADDRS: 2556 case DIOCRGETASTATS: 2557 case DIOCRTSTADDRS: 2558 case DIOCOSFPGET: 2559 case DIOCGETSRCNODES: 2560 case DIOCGETSYNCOOKIES: 2561 case DIOCIGETIFACES: 2562 case DIOCGIFSPEEDV1: 2563 case DIOCGIFSPEEDV0: 2564 case DIOCGETRULENV: 2565 case DIOCGETETHRULES: 2566 case DIOCGETETHRULE: 2567 case DIOCGETETHRULESETS: 2568 case DIOCGETETHRULESET: 2569 break; 2570 case DIOCRCLRTABLES: 2571 case DIOCRADDTABLES: 2572 case DIOCRDELTABLES: 2573 case DIOCRCLRTSTATS: 2574 case DIOCRCLRADDRS: 2575 case DIOCRADDADDRS: 2576 case DIOCRDELADDRS: 2577 case DIOCRSETADDRS: 2578 case DIOCRSETTFLAGS: 2579 if (((struct pfioc_table *)addr)->pfrio_flags & 2580 PFR_FLAG_DUMMY) { 2581 flags |= FWRITE; /* need write lock for dummy */ 2582 break; /* dummy operation ok */ 2583 } 2584 return (EACCES); 2585 case DIOCGETRULE: 2586 if (((struct pfioc_rule *)addr)->action == 2587 PF_GET_CLR_CNTR) 2588 return (EACCES); 2589 break; 2590 default: 2591 return (EACCES); 2592 } 2593 2594 CURVNET_SET(TD_TO_VNET(td)); 2595 2596 switch (cmd) { 2597 case DIOCSTART: 2598 sx_xlock(&pf_ioctl_lock); 2599 if (V_pf_status.running) 2600 error = EEXIST; 2601 else { 2602 hook_pf(); 2603 if (! TAILQ_EMPTY(V_pf_keth->active.rules)) 2604 hook_pf_eth(); 2605 V_pf_status.running = 1; 2606 V_pf_status.since = time_second; 2607 new_unrhdr64(&V_pf_stateid, time_second); 2608 2609 DPFPRINTF(PF_DEBUG_MISC, ("pf: started\n")); 2610 } 2611 break; 2612 2613 case DIOCSTOP: 2614 sx_xlock(&pf_ioctl_lock); 2615 if (!V_pf_status.running) 2616 error = ENOENT; 2617 else { 2618 V_pf_status.running = 0; 2619 dehook_pf(); 2620 dehook_pf_eth(); 2621 V_pf_status.since = time_second; 2622 DPFPRINTF(PF_DEBUG_MISC, ("pf: stopped\n")); 2623 } 2624 break; 2625 2626 case DIOCGETETHRULES: { 2627 struct pfioc_nv *nv = (struct pfioc_nv *)addr; 2628 nvlist_t *nvl; 2629 void *packed; 2630 struct pf_keth_rule *tail; 2631 struct pf_keth_ruleset *rs; 2632 u_int32_t ticket, nr; 2633 const char *anchor = ""; 2634 2635 nvl = NULL; 2636 packed = NULL; 2637 2638 #define ERROUT(x) ERROUT_IOCTL(DIOCGETETHRULES_error, x) 2639 2640 if (nv->len > pf_ioctl_maxcount) 2641 ERROUT(ENOMEM); 2642 2643 /* Copy the request in */ 2644 packed = malloc(nv->len, M_NVLIST, M_WAITOK); 2645 if (packed == NULL) 2646 ERROUT(ENOMEM); 2647 2648 error = copyin(nv->data, packed, nv->len); 2649 if (error) 2650 ERROUT(error); 2651 2652 nvl = nvlist_unpack(packed, nv->len, 0); 2653 if (nvl == NULL) 2654 ERROUT(EBADMSG); 2655 2656 if (! nvlist_exists_string(nvl, "anchor")) 2657 ERROUT(EBADMSG); 2658 2659 anchor = nvlist_get_string(nvl, "anchor"); 2660 2661 rs = pf_find_keth_ruleset(anchor); 2662 2663 nvlist_destroy(nvl); 2664 nvl = NULL; 2665 free(packed, M_NVLIST); 2666 packed = NULL; 2667 2668 if (rs == NULL) 2669 ERROUT(ENOENT); 2670 2671 /* Reply */ 2672 nvl = nvlist_create(0); 2673 if (nvl == NULL) 2674 ERROUT(ENOMEM); 2675 2676 PF_RULES_RLOCK(); 2677 2678 ticket = rs->active.ticket; 2679 tail = TAILQ_LAST(rs->active.rules, pf_keth_ruleq); 2680 if (tail) 2681 nr = tail->nr + 1; 2682 else 2683 nr = 0; 2684 2685 PF_RULES_RUNLOCK(); 2686 2687 nvlist_add_number(nvl, "ticket", ticket); 2688 nvlist_add_number(nvl, "nr", nr); 2689 2690 packed = nvlist_pack(nvl, &nv->len); 2691 if (packed == NULL) 2692 ERROUT(ENOMEM); 2693 2694 if (nv->size == 0) 2695 ERROUT(0); 2696 else if (nv->size < nv->len) 2697 ERROUT(ENOSPC); 2698 2699 error = copyout(packed, nv->data, nv->len); 2700 2701 #undef ERROUT 2702 DIOCGETETHRULES_error: 2703 free(packed, M_NVLIST); 2704 nvlist_destroy(nvl); 2705 break; 2706 } 2707 2708 case DIOCGETETHRULE: { 2709 struct epoch_tracker et; 2710 struct pfioc_nv *nv = (struct pfioc_nv *)addr; 2711 nvlist_t *nvl = NULL; 2712 void *nvlpacked = NULL; 2713 struct pf_keth_rule *rule = NULL; 2714 struct pf_keth_ruleset *rs; 2715 u_int32_t ticket, nr; 2716 bool clear = false; 2717 const char *anchor; 2718 2719 #define ERROUT(x) ERROUT_IOCTL(DIOCGETETHRULE_error, x) 2720 2721 if (nv->len > pf_ioctl_maxcount) 2722 ERROUT(ENOMEM); 2723 2724 nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK); 2725 if (nvlpacked == NULL) 2726 ERROUT(ENOMEM); 2727 2728 error = copyin(nv->data, nvlpacked, nv->len); 2729 if (error) 2730 ERROUT(error); 2731 2732 nvl = nvlist_unpack(nvlpacked, nv->len, 0); 2733 if (nvl == NULL) 2734 ERROUT(EBADMSG); 2735 if (! nvlist_exists_number(nvl, "ticket")) 2736 ERROUT(EBADMSG); 2737 ticket = nvlist_get_number(nvl, "ticket"); 2738 if (! nvlist_exists_string(nvl, "anchor")) 2739 ERROUT(EBADMSG); 2740 anchor = nvlist_get_string(nvl, "anchor"); 2741 2742 if (nvlist_exists_bool(nvl, "clear")) 2743 clear = nvlist_get_bool(nvl, "clear"); 2744 2745 if (clear && !(flags & FWRITE)) 2746 ERROUT(EACCES); 2747 2748 if (! nvlist_exists_number(nvl, "nr")) 2749 ERROUT(EBADMSG); 2750 nr = nvlist_get_number(nvl, "nr"); 2751 2752 PF_RULES_RLOCK(); 2753 rs = pf_find_keth_ruleset(anchor); 2754 if (rs == NULL) { 2755 PF_RULES_RUNLOCK(); 2756 ERROUT(ENOENT); 2757 } 2758 if (ticket != rs->active.ticket) { 2759 PF_RULES_RUNLOCK(); 2760 ERROUT(EBUSY); 2761 } 2762 2763 nvlist_destroy(nvl); 2764 nvl = NULL; 2765 free(nvlpacked, M_NVLIST); 2766 nvlpacked = NULL; 2767 2768 rule = TAILQ_FIRST(rs->active.rules); 2769 while ((rule != NULL) && (rule->nr != nr)) 2770 rule = TAILQ_NEXT(rule, entries); 2771 if (rule == NULL) { 2772 PF_RULES_RUNLOCK(); 2773 ERROUT(ENOENT); 2774 } 2775 /* Make sure rule can't go away. */ 2776 NET_EPOCH_ENTER(et); 2777 PF_RULES_RUNLOCK(); 2778 nvl = pf_keth_rule_to_nveth_rule(rule); 2779 if (pf_keth_anchor_nvcopyout(rs, rule, nvl)) 2780 ERROUT(EBUSY); 2781 NET_EPOCH_EXIT(et); 2782 if (nvl == NULL) 2783 ERROUT(ENOMEM); 2784 2785 nvlpacked = nvlist_pack(nvl, &nv->len); 2786 if (nvlpacked == NULL) 2787 ERROUT(ENOMEM); 2788 2789 if (nv->size == 0) 2790 ERROUT(0); 2791 else if (nv->size < nv->len) 2792 ERROUT(ENOSPC); 2793 2794 error = copyout(nvlpacked, nv->data, nv->len); 2795 if (error == 0 && clear) { 2796 counter_u64_zero(rule->evaluations); 2797 for (int i = 0; i < 2; i++) { 2798 counter_u64_zero(rule->packets[i]); 2799 counter_u64_zero(rule->bytes[i]); 2800 } 2801 } 2802 2803 #undef ERROUT 2804 DIOCGETETHRULE_error: 2805 free(nvlpacked, M_NVLIST); 2806 nvlist_destroy(nvl); 2807 break; 2808 } 2809 2810 case DIOCADDETHRULE: { 2811 struct pfioc_nv *nv = (struct pfioc_nv *)addr; 2812 nvlist_t *nvl = NULL; 2813 void *nvlpacked = NULL; 2814 struct pf_keth_rule *rule = NULL, *tail = NULL; 2815 struct pf_keth_ruleset *ruleset = NULL; 2816 struct pfi_kkif *kif = NULL, *bridge_to_kif = NULL; 2817 const char *anchor = "", *anchor_call = ""; 2818 2819 #define ERROUT(x) ERROUT_IOCTL(DIOCADDETHRULE_error, x) 2820 2821 if (nv->len > pf_ioctl_maxcount) 2822 ERROUT(ENOMEM); 2823 2824 nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK); 2825 if (nvlpacked == NULL) 2826 ERROUT(ENOMEM); 2827 2828 error = copyin(nv->data, nvlpacked, nv->len); 2829 if (error) 2830 ERROUT(error); 2831 2832 nvl = nvlist_unpack(nvlpacked, nv->len, 0); 2833 if (nvl == NULL) 2834 ERROUT(EBADMSG); 2835 2836 if (! nvlist_exists_number(nvl, "ticket")) 2837 ERROUT(EBADMSG); 2838 2839 if (nvlist_exists_string(nvl, "anchor")) 2840 anchor = nvlist_get_string(nvl, "anchor"); 2841 if (nvlist_exists_string(nvl, "anchor_call")) 2842 anchor_call = nvlist_get_string(nvl, "anchor_call"); 2843 2844 ruleset = pf_find_keth_ruleset(anchor); 2845 if (ruleset == NULL) 2846 ERROUT(EINVAL); 2847 2848 if (nvlist_get_number(nvl, "ticket") != 2849 ruleset->inactive.ticket) { 2850 DPFPRINTF(PF_DEBUG_MISC, 2851 ("ticket: %d != %d\n", 2852 (u_int32_t)nvlist_get_number(nvl, "ticket"), 2853 ruleset->inactive.ticket)); 2854 ERROUT(EBUSY); 2855 } 2856 2857 rule = malloc(sizeof(*rule), M_PFRULE, M_WAITOK); 2858 if (rule == NULL) 2859 ERROUT(ENOMEM); 2860 rule->timestamp = NULL; 2861 2862 error = pf_nveth_rule_to_keth_rule(nvl, rule); 2863 if (error != 0) 2864 ERROUT(error); 2865 2866 if (rule->ifname[0]) 2867 kif = pf_kkif_create(M_WAITOK); 2868 if (rule->bridge_to_name[0]) 2869 bridge_to_kif = pf_kkif_create(M_WAITOK); 2870 rule->evaluations = counter_u64_alloc(M_WAITOK); 2871 for (int i = 0; i < 2; i++) { 2872 rule->packets[i] = counter_u64_alloc(M_WAITOK); 2873 rule->bytes[i] = counter_u64_alloc(M_WAITOK); 2874 } 2875 rule->timestamp = uma_zalloc_pcpu(pf_timestamp_pcpu_zone, 2876 M_WAITOK | M_ZERO); 2877 2878 PF_RULES_WLOCK(); 2879 2880 if (rule->ifname[0]) { 2881 rule->kif = pfi_kkif_attach(kif, rule->ifname); 2882 pfi_kkif_ref(rule->kif); 2883 } else 2884 rule->kif = NULL; 2885 if (rule->bridge_to_name[0]) { 2886 rule->bridge_to = pfi_kkif_attach(bridge_to_kif, 2887 rule->bridge_to_name); 2888 pfi_kkif_ref(rule->bridge_to); 2889 } else 2890 rule->bridge_to = NULL; 2891 2892 #ifdef ALTQ 2893 /* set queue IDs */ 2894 if (rule->qname[0] != 0) { 2895 if ((rule->qid = pf_qname2qid(rule->qname)) == 0) 2896 error = EBUSY; 2897 else 2898 rule->qid = rule->qid; 2899 } 2900 #endif 2901 if (rule->tagname[0]) 2902 if ((rule->tag = pf_tagname2tag(rule->tagname)) == 0) 2903 error = EBUSY; 2904 if (rule->match_tagname[0]) 2905 if ((rule->match_tag = pf_tagname2tag( 2906 rule->match_tagname)) == 0) 2907 error = EBUSY; 2908 2909 if (error == 0 && rule->ipdst.addr.type == PF_ADDR_TABLE) 2910 error = pf_eth_addr_setup(ruleset, &rule->ipdst.addr); 2911 if (error == 0 && rule->ipsrc.addr.type == PF_ADDR_TABLE) 2912 error = pf_eth_addr_setup(ruleset, &rule->ipsrc.addr); 2913 2914 if (error) { 2915 pf_free_eth_rule(rule); 2916 PF_RULES_WUNLOCK(); 2917 ERROUT(error); 2918 } 2919 2920 if (pf_keth_anchor_setup(rule, ruleset, anchor_call)) { 2921 pf_free_eth_rule(rule); 2922 PF_RULES_WUNLOCK(); 2923 ERROUT(EINVAL); 2924 } 2925 2926 tail = TAILQ_LAST(ruleset->inactive.rules, pf_keth_ruleq); 2927 if (tail) 2928 rule->nr = tail->nr + 1; 2929 else 2930 rule->nr = 0; 2931 2932 TAILQ_INSERT_TAIL(ruleset->inactive.rules, rule, entries); 2933 2934 PF_RULES_WUNLOCK(); 2935 2936 #undef ERROUT 2937 DIOCADDETHRULE_error: 2938 nvlist_destroy(nvl); 2939 free(nvlpacked, M_NVLIST); 2940 break; 2941 } 2942 2943 case DIOCGETETHRULESETS: { 2944 struct epoch_tracker et; 2945 struct pfioc_nv *nv = (struct pfioc_nv *)addr; 2946 nvlist_t *nvl = NULL; 2947 void *nvlpacked = NULL; 2948 struct pf_keth_ruleset *ruleset; 2949 struct pf_keth_anchor *anchor; 2950 int nr = 0; 2951 2952 #define ERROUT(x) ERROUT_IOCTL(DIOCGETETHRULESETS_error, x) 2953 2954 if (nv->len > pf_ioctl_maxcount) 2955 ERROUT(ENOMEM); 2956 2957 nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK); 2958 if (nvlpacked == NULL) 2959 ERROUT(ENOMEM); 2960 2961 error = copyin(nv->data, nvlpacked, nv->len); 2962 if (error) 2963 ERROUT(error); 2964 2965 nvl = nvlist_unpack(nvlpacked, nv->len, 0); 2966 if (nvl == NULL) 2967 ERROUT(EBADMSG); 2968 if (! nvlist_exists_string(nvl, "path")) 2969 ERROUT(EBADMSG); 2970 2971 NET_EPOCH_ENTER(et); 2972 2973 if ((ruleset = pf_find_keth_ruleset( 2974 nvlist_get_string(nvl, "path"))) == NULL) { 2975 NET_EPOCH_EXIT(et); 2976 ERROUT(ENOENT); 2977 } 2978 2979 if (ruleset->anchor == NULL) { 2980 RB_FOREACH(anchor, pf_keth_anchor_global, &V_pf_keth_anchors) 2981 if (anchor->parent == NULL) 2982 nr++; 2983 } else { 2984 RB_FOREACH(anchor, pf_keth_anchor_node, 2985 &ruleset->anchor->children) 2986 nr++; 2987 } 2988 2989 NET_EPOCH_EXIT(et); 2990 2991 nvlist_destroy(nvl); 2992 nvl = NULL; 2993 free(nvlpacked, M_NVLIST); 2994 nvlpacked = NULL; 2995 2996 nvl = nvlist_create(0); 2997 if (nvl == NULL) 2998 ERROUT(ENOMEM); 2999 3000 nvlist_add_number(nvl, "nr", nr); 3001 3002 nvlpacked = nvlist_pack(nvl, &nv->len); 3003 if (nvlpacked == NULL) 3004 ERROUT(ENOMEM); 3005 3006 if (nv->size == 0) 3007 ERROUT(0); 3008 else if (nv->size < nv->len) 3009 ERROUT(ENOSPC); 3010 3011 error = copyout(nvlpacked, nv->data, nv->len); 3012 3013 #undef ERROUT 3014 DIOCGETETHRULESETS_error: 3015 free(nvlpacked, M_NVLIST); 3016 nvlist_destroy(nvl); 3017 break; 3018 } 3019 3020 case DIOCGETETHRULESET: { 3021 struct epoch_tracker et; 3022 struct pfioc_nv *nv = (struct pfioc_nv *)addr; 3023 nvlist_t *nvl = NULL; 3024 void *nvlpacked = NULL; 3025 struct pf_keth_ruleset *ruleset; 3026 struct pf_keth_anchor *anchor; 3027 int nr = 0, req_nr = 0; 3028 bool found = false; 3029 3030 #define ERROUT(x) ERROUT_IOCTL(DIOCGETETHRULESET_error, x) 3031 3032 if (nv->len > pf_ioctl_maxcount) 3033 ERROUT(ENOMEM); 3034 3035 nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK); 3036 if (nvlpacked == NULL) 3037 ERROUT(ENOMEM); 3038 3039 error = copyin(nv->data, nvlpacked, nv->len); 3040 if (error) 3041 ERROUT(error); 3042 3043 nvl = nvlist_unpack(nvlpacked, nv->len, 0); 3044 if (nvl == NULL) 3045 ERROUT(EBADMSG); 3046 if (! nvlist_exists_string(nvl, "path")) 3047 ERROUT(EBADMSG); 3048 if (! nvlist_exists_number(nvl, "nr")) 3049 ERROUT(EBADMSG); 3050 3051 req_nr = nvlist_get_number(nvl, "nr"); 3052 3053 NET_EPOCH_ENTER(et); 3054 3055 if ((ruleset = pf_find_keth_ruleset( 3056 nvlist_get_string(nvl, "path"))) == NULL) { 3057 NET_EPOCH_EXIT(et); 3058 ERROUT(ENOENT); 3059 } 3060 3061 nvlist_destroy(nvl); 3062 nvl = NULL; 3063 free(nvlpacked, M_NVLIST); 3064 nvlpacked = NULL; 3065 3066 nvl = nvlist_create(0); 3067 if (nvl == NULL) { 3068 NET_EPOCH_EXIT(et); 3069 ERROUT(ENOMEM); 3070 } 3071 3072 if (ruleset->anchor == NULL) { 3073 RB_FOREACH(anchor, pf_keth_anchor_global, 3074 &V_pf_keth_anchors) { 3075 if (anchor->parent == NULL && nr++ == req_nr) { 3076 found = true; 3077 break; 3078 } 3079 } 3080 } else { 3081 RB_FOREACH(anchor, pf_keth_anchor_node, 3082 &ruleset->anchor->children) { 3083 if (nr++ == req_nr) { 3084 found = true; 3085 break; 3086 } 3087 } 3088 } 3089 3090 NET_EPOCH_EXIT(et); 3091 if (found) { 3092 nvlist_add_number(nvl, "nr", nr); 3093 nvlist_add_string(nvl, "name", anchor->name); 3094 if (ruleset->anchor) 3095 nvlist_add_string(nvl, "path", 3096 ruleset->anchor->path); 3097 else 3098 nvlist_add_string(nvl, "path", ""); 3099 } else { 3100 ERROUT(EBUSY); 3101 } 3102 3103 nvlpacked = nvlist_pack(nvl, &nv->len); 3104 if (nvlpacked == NULL) 3105 ERROUT(ENOMEM); 3106 3107 if (nv->size == 0) 3108 ERROUT(0); 3109 else if (nv->size < nv->len) 3110 ERROUT(ENOSPC); 3111 3112 error = copyout(nvlpacked, nv->data, nv->len); 3113 3114 #undef ERROUT 3115 DIOCGETETHRULESET_error: 3116 free(nvlpacked, M_NVLIST); 3117 nvlist_destroy(nvl); 3118 break; 3119 } 3120 3121 case DIOCADDRULENV: { 3122 struct pfioc_nv *nv = (struct pfioc_nv *)addr; 3123 nvlist_t *nvl = NULL; 3124 void *nvlpacked = NULL; 3125 struct pf_krule *rule = NULL; 3126 const char *anchor = "", *anchor_call = ""; 3127 uint32_t ticket = 0, pool_ticket = 0; 3128 3129 #define ERROUT(x) ERROUT_IOCTL(DIOCADDRULENV_error, x) 3130 3131 if (nv->len > pf_ioctl_maxcount) 3132 ERROUT(ENOMEM); 3133 3134 nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK); 3135 error = copyin(nv->data, nvlpacked, nv->len); 3136 if (error) 3137 ERROUT(error); 3138 3139 nvl = nvlist_unpack(nvlpacked, nv->len, 0); 3140 if (nvl == NULL) 3141 ERROUT(EBADMSG); 3142 3143 if (! nvlist_exists_number(nvl, "ticket")) 3144 ERROUT(EINVAL); 3145 ticket = nvlist_get_number(nvl, "ticket"); 3146 3147 if (! nvlist_exists_number(nvl, "pool_ticket")) 3148 ERROUT(EINVAL); 3149 pool_ticket = nvlist_get_number(nvl, "pool_ticket"); 3150 3151 if (! nvlist_exists_nvlist(nvl, "rule")) 3152 ERROUT(EINVAL); 3153 3154 rule = pf_krule_alloc(); 3155 error = pf_nvrule_to_krule(nvlist_get_nvlist(nvl, "rule"), 3156 rule); 3157 if (error) 3158 ERROUT(error); 3159 3160 if (nvlist_exists_string(nvl, "anchor")) 3161 anchor = nvlist_get_string(nvl, "anchor"); 3162 if (nvlist_exists_string(nvl, "anchor_call")) 3163 anchor_call = nvlist_get_string(nvl, "anchor_call"); 3164 3165 if ((error = nvlist_error(nvl))) 3166 ERROUT(error); 3167 3168 /* Frees rule on error */ 3169 error = pf_ioctl_addrule(rule, ticket, pool_ticket, anchor, 3170 anchor_call, td); 3171 3172 nvlist_destroy(nvl); 3173 free(nvlpacked, M_NVLIST); 3174 break; 3175 #undef ERROUT 3176 DIOCADDRULENV_error: 3177 pf_krule_free(rule); 3178 nvlist_destroy(nvl); 3179 free(nvlpacked, M_NVLIST); 3180 3181 break; 3182 } 3183 case DIOCADDRULE: { 3184 struct pfioc_rule *pr = (struct pfioc_rule *)addr; 3185 struct pf_krule *rule; 3186 3187 rule = pf_krule_alloc(); 3188 error = pf_rule_to_krule(&pr->rule, rule); 3189 if (error != 0) { 3190 pf_krule_free(rule); 3191 break; 3192 } 3193 3194 pr->anchor[sizeof(pr->anchor) - 1] = 0; 3195 3196 /* Frees rule on error */ 3197 error = pf_ioctl_addrule(rule, pr->ticket, pr->pool_ticket, 3198 pr->anchor, pr->anchor_call, td); 3199 break; 3200 } 3201 3202 case DIOCGETRULES: { 3203 struct pfioc_rule *pr = (struct pfioc_rule *)addr; 3204 struct pf_kruleset *ruleset; 3205 struct pf_krule *tail; 3206 int rs_num; 3207 3208 pr->anchor[sizeof(pr->anchor) - 1] = 0; 3209 3210 PF_RULES_WLOCK(); 3211 ruleset = pf_find_kruleset(pr->anchor); 3212 if (ruleset == NULL) { 3213 PF_RULES_WUNLOCK(); 3214 error = EINVAL; 3215 break; 3216 } 3217 rs_num = pf_get_ruleset_number(pr->rule.action); 3218 if (rs_num >= PF_RULESET_MAX) { 3219 PF_RULES_WUNLOCK(); 3220 error = EINVAL; 3221 break; 3222 } 3223 tail = TAILQ_LAST(ruleset->rules[rs_num].active.ptr, 3224 pf_krulequeue); 3225 if (tail) 3226 pr->nr = tail->nr + 1; 3227 else 3228 pr->nr = 0; 3229 pr->ticket = ruleset->rules[rs_num].active.ticket; 3230 PF_RULES_WUNLOCK(); 3231 break; 3232 } 3233 3234 case DIOCGETRULE: { 3235 struct pfioc_rule *pr = (struct pfioc_rule *)addr; 3236 struct pf_kruleset *ruleset; 3237 struct pf_krule *rule; 3238 int rs_num; 3239 3240 pr->anchor[sizeof(pr->anchor) - 1] = 0; 3241 3242 PF_RULES_WLOCK(); 3243 ruleset = pf_find_kruleset(pr->anchor); 3244 if (ruleset == NULL) { 3245 PF_RULES_WUNLOCK(); 3246 error = EINVAL; 3247 break; 3248 } 3249 rs_num = pf_get_ruleset_number(pr->rule.action); 3250 if (rs_num >= PF_RULESET_MAX) { 3251 PF_RULES_WUNLOCK(); 3252 error = EINVAL; 3253 break; 3254 } 3255 if (pr->ticket != ruleset->rules[rs_num].active.ticket) { 3256 PF_RULES_WUNLOCK(); 3257 error = EBUSY; 3258 break; 3259 } 3260 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr); 3261 while ((rule != NULL) && (rule->nr != pr->nr)) 3262 rule = TAILQ_NEXT(rule, entries); 3263 if (rule == NULL) { 3264 PF_RULES_WUNLOCK(); 3265 error = EBUSY; 3266 break; 3267 } 3268 3269 pf_krule_to_rule(rule, &pr->rule); 3270 3271 if (pf_kanchor_copyout(ruleset, rule, pr)) { 3272 PF_RULES_WUNLOCK(); 3273 error = EBUSY; 3274 break; 3275 } 3276 pf_addr_copyout(&pr->rule.src.addr); 3277 pf_addr_copyout(&pr->rule.dst.addr); 3278 3279 if (pr->action == PF_GET_CLR_CNTR) { 3280 pf_counter_u64_zero(&rule->evaluations); 3281 for (int i = 0; i < 2; i++) { 3282 pf_counter_u64_zero(&rule->packets[i]); 3283 pf_counter_u64_zero(&rule->bytes[i]); 3284 } 3285 counter_u64_zero(rule->states_tot); 3286 } 3287 PF_RULES_WUNLOCK(); 3288 break; 3289 } 3290 3291 case DIOCGETRULENV: { 3292 struct pfioc_nv *nv = (struct pfioc_nv *)addr; 3293 nvlist_t *nvrule = NULL; 3294 nvlist_t *nvl = NULL; 3295 struct pf_kruleset *ruleset; 3296 struct pf_krule *rule; 3297 void *nvlpacked = NULL; 3298 int rs_num, nr; 3299 bool clear_counter = false; 3300 3301 #define ERROUT(x) ERROUT_IOCTL(DIOCGETRULENV_error, x) 3302 3303 if (nv->len > pf_ioctl_maxcount) 3304 ERROUT(ENOMEM); 3305 3306 /* Copy the request in */ 3307 nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK); 3308 if (nvlpacked == NULL) 3309 ERROUT(ENOMEM); 3310 3311 error = copyin(nv->data, nvlpacked, nv->len); 3312 if (error) 3313 ERROUT(error); 3314 3315 nvl = nvlist_unpack(nvlpacked, nv->len, 0); 3316 if (nvl == NULL) 3317 ERROUT(EBADMSG); 3318 3319 if (! nvlist_exists_string(nvl, "anchor")) 3320 ERROUT(EBADMSG); 3321 if (! nvlist_exists_number(nvl, "ruleset")) 3322 ERROUT(EBADMSG); 3323 if (! nvlist_exists_number(nvl, "ticket")) 3324 ERROUT(EBADMSG); 3325 if (! nvlist_exists_number(nvl, "nr")) 3326 ERROUT(EBADMSG); 3327 3328 if (nvlist_exists_bool(nvl, "clear_counter")) 3329 clear_counter = nvlist_get_bool(nvl, "clear_counter"); 3330 3331 if (clear_counter && !(flags & FWRITE)) 3332 ERROUT(EACCES); 3333 3334 nr = nvlist_get_number(nvl, "nr"); 3335 3336 PF_RULES_WLOCK(); 3337 ruleset = pf_find_kruleset(nvlist_get_string(nvl, "anchor")); 3338 if (ruleset == NULL) { 3339 PF_RULES_WUNLOCK(); 3340 ERROUT(ENOENT); 3341 } 3342 3343 rs_num = pf_get_ruleset_number(nvlist_get_number(nvl, "ruleset")); 3344 if (rs_num >= PF_RULESET_MAX) { 3345 PF_RULES_WUNLOCK(); 3346 ERROUT(EINVAL); 3347 } 3348 3349 if (nvlist_get_number(nvl, "ticket") != 3350 ruleset->rules[rs_num].active.ticket) { 3351 PF_RULES_WUNLOCK(); 3352 ERROUT(EBUSY); 3353 } 3354 3355 if ((error = nvlist_error(nvl))) { 3356 PF_RULES_WUNLOCK(); 3357 ERROUT(error); 3358 } 3359 3360 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr); 3361 while ((rule != NULL) && (rule->nr != nr)) 3362 rule = TAILQ_NEXT(rule, entries); 3363 if (rule == NULL) { 3364 PF_RULES_WUNLOCK(); 3365 ERROUT(EBUSY); 3366 } 3367 3368 nvrule = pf_krule_to_nvrule(rule); 3369 3370 nvlist_destroy(nvl); 3371 nvl = nvlist_create(0); 3372 if (nvl == NULL) { 3373 PF_RULES_WUNLOCK(); 3374 ERROUT(ENOMEM); 3375 } 3376 nvlist_add_number(nvl, "nr", nr); 3377 nvlist_add_nvlist(nvl, "rule", nvrule); 3378 nvlist_destroy(nvrule); 3379 nvrule = NULL; 3380 if (pf_kanchor_nvcopyout(ruleset, rule, nvl)) { 3381 PF_RULES_WUNLOCK(); 3382 ERROUT(EBUSY); 3383 } 3384 3385 free(nvlpacked, M_NVLIST); 3386 nvlpacked = nvlist_pack(nvl, &nv->len); 3387 if (nvlpacked == NULL) { 3388 PF_RULES_WUNLOCK(); 3389 ERROUT(ENOMEM); 3390 } 3391 3392 if (nv->size == 0) { 3393 PF_RULES_WUNLOCK(); 3394 ERROUT(0); 3395 } 3396 else if (nv->size < nv->len) { 3397 PF_RULES_WUNLOCK(); 3398 ERROUT(ENOSPC); 3399 } 3400 3401 if (clear_counter) { 3402 pf_counter_u64_zero(&rule->evaluations); 3403 for (int i = 0; i < 2; i++) { 3404 pf_counter_u64_zero(&rule->packets[i]); 3405 pf_counter_u64_zero(&rule->bytes[i]); 3406 } 3407 counter_u64_zero(rule->states_tot); 3408 } 3409 PF_RULES_WUNLOCK(); 3410 3411 error = copyout(nvlpacked, nv->data, nv->len); 3412 3413 #undef ERROUT 3414 DIOCGETRULENV_error: 3415 free(nvlpacked, M_NVLIST); 3416 nvlist_destroy(nvrule); 3417 nvlist_destroy(nvl); 3418 3419 break; 3420 } 3421 3422 case DIOCCHANGERULE: { 3423 struct pfioc_rule *pcr = (struct pfioc_rule *)addr; 3424 struct pf_kruleset *ruleset; 3425 struct pf_krule *oldrule = NULL, *newrule = NULL; 3426 struct pfi_kkif *kif = NULL; 3427 struct pf_kpooladdr *pa; 3428 u_int32_t nr = 0; 3429 int rs_num; 3430 3431 pcr->anchor[sizeof(pcr->anchor) - 1] = 0; 3432 3433 if (pcr->action < PF_CHANGE_ADD_HEAD || 3434 pcr->action > PF_CHANGE_GET_TICKET) { 3435 error = EINVAL; 3436 break; 3437 } 3438 if (pcr->rule.return_icmp >> 8 > ICMP_MAXTYPE) { 3439 error = EINVAL; 3440 break; 3441 } 3442 3443 if (pcr->action != PF_CHANGE_REMOVE) { 3444 newrule = pf_krule_alloc(); 3445 error = pf_rule_to_krule(&pcr->rule, newrule); 3446 if (error != 0) { 3447 pf_krule_free(newrule); 3448 break; 3449 } 3450 3451 if (newrule->ifname[0]) 3452 kif = pf_kkif_create(M_WAITOK); 3453 pf_counter_u64_init(&newrule->evaluations, M_WAITOK); 3454 for (int i = 0; i < 2; i++) { 3455 pf_counter_u64_init(&newrule->packets[i], M_WAITOK); 3456 pf_counter_u64_init(&newrule->bytes[i], M_WAITOK); 3457 } 3458 newrule->states_cur = counter_u64_alloc(M_WAITOK); 3459 newrule->states_tot = counter_u64_alloc(M_WAITOK); 3460 newrule->src_nodes = counter_u64_alloc(M_WAITOK); 3461 newrule->cuid = td->td_ucred->cr_ruid; 3462 newrule->cpid = td->td_proc ? td->td_proc->p_pid : 0; 3463 TAILQ_INIT(&newrule->rpool.list); 3464 } 3465 #define ERROUT(x) ERROUT_IOCTL(DIOCCHANGERULE_error, x) 3466 3467 PF_CONFIG_LOCK(); 3468 PF_RULES_WLOCK(); 3469 #ifdef PF_WANT_32_TO_64_COUNTER 3470 if (newrule != NULL) { 3471 LIST_INSERT_HEAD(&V_pf_allrulelist, newrule, allrulelist); 3472 newrule->allrulelinked = true; 3473 V_pf_allrulecount++; 3474 } 3475 #endif 3476 3477 if (!(pcr->action == PF_CHANGE_REMOVE || 3478 pcr->action == PF_CHANGE_GET_TICKET) && 3479 pcr->pool_ticket != V_ticket_pabuf) 3480 ERROUT(EBUSY); 3481 3482 ruleset = pf_find_kruleset(pcr->anchor); 3483 if (ruleset == NULL) 3484 ERROUT(EINVAL); 3485 3486 rs_num = pf_get_ruleset_number(pcr->rule.action); 3487 if (rs_num >= PF_RULESET_MAX) 3488 ERROUT(EINVAL); 3489 3490 /* 3491 * XXXMJG: there is no guarantee that the ruleset was 3492 * created by the usual route of calling DIOCXBEGIN. 3493 * As a result it is possible the rule tree will not 3494 * be allocated yet. Hack around it by doing it here. 3495 * Note it is fine to let the tree persist in case of 3496 * error as it will be freed down the road on future 3497 * updates (if need be). 3498 */ 3499 if (ruleset->rules[rs_num].active.tree == NULL) { 3500 ruleset->rules[rs_num].active.tree = pf_rule_tree_alloc(M_NOWAIT); 3501 if (ruleset->rules[rs_num].active.tree == NULL) { 3502 ERROUT(ENOMEM); 3503 } 3504 } 3505 3506 if (pcr->action == PF_CHANGE_GET_TICKET) { 3507 pcr->ticket = ++ruleset->rules[rs_num].active.ticket; 3508 ERROUT(0); 3509 } else if (pcr->ticket != 3510 ruleset->rules[rs_num].active.ticket) 3511 ERROUT(EINVAL); 3512 3513 if (pcr->action != PF_CHANGE_REMOVE) { 3514 if (newrule->ifname[0]) { 3515 newrule->kif = pfi_kkif_attach(kif, 3516 newrule->ifname); 3517 kif = NULL; 3518 pfi_kkif_ref(newrule->kif); 3519 } else 3520 newrule->kif = NULL; 3521 3522 if (newrule->rtableid > 0 && 3523 newrule->rtableid >= rt_numfibs) 3524 error = EBUSY; 3525 3526 #ifdef ALTQ 3527 /* set queue IDs */ 3528 if (newrule->qname[0] != 0) { 3529 if ((newrule->qid = 3530 pf_qname2qid(newrule->qname)) == 0) 3531 error = EBUSY; 3532 else if (newrule->pqname[0] != 0) { 3533 if ((newrule->pqid = 3534 pf_qname2qid(newrule->pqname)) == 0) 3535 error = EBUSY; 3536 } else 3537 newrule->pqid = newrule->qid; 3538 } 3539 #endif /* ALTQ */ 3540 if (newrule->tagname[0]) 3541 if ((newrule->tag = 3542 pf_tagname2tag(newrule->tagname)) == 0) 3543 error = EBUSY; 3544 if (newrule->match_tagname[0]) 3545 if ((newrule->match_tag = pf_tagname2tag( 3546 newrule->match_tagname)) == 0) 3547 error = EBUSY; 3548 if (newrule->rt && !newrule->direction) 3549 error = EINVAL; 3550 if (!newrule->log) 3551 newrule->logif = 0; 3552 if (newrule->logif >= PFLOGIFS_MAX) 3553 error = EINVAL; 3554 if (pf_addr_setup(ruleset, &newrule->src.addr, newrule->af)) 3555 error = ENOMEM; 3556 if (pf_addr_setup(ruleset, &newrule->dst.addr, newrule->af)) 3557 error = ENOMEM; 3558 if (pf_kanchor_setup(newrule, ruleset, pcr->anchor_call)) 3559 error = EINVAL; 3560 TAILQ_FOREACH(pa, &V_pf_pabuf, entries) 3561 if (pa->addr.type == PF_ADDR_TABLE) { 3562 pa->addr.p.tbl = 3563 pfr_attach_table(ruleset, 3564 pa->addr.v.tblname); 3565 if (pa->addr.p.tbl == NULL) 3566 error = ENOMEM; 3567 } 3568 3569 newrule->overload_tbl = NULL; 3570 if (newrule->overload_tblname[0]) { 3571 if ((newrule->overload_tbl = pfr_attach_table( 3572 ruleset, newrule->overload_tblname)) == 3573 NULL) 3574 error = EINVAL; 3575 else 3576 newrule->overload_tbl->pfrkt_flags |= 3577 PFR_TFLAG_ACTIVE; 3578 } 3579 3580 pf_mv_kpool(&V_pf_pabuf, &newrule->rpool.list); 3581 if (((((newrule->action == PF_NAT) || 3582 (newrule->action == PF_RDR) || 3583 (newrule->action == PF_BINAT) || 3584 (newrule->rt > PF_NOPFROUTE)) && 3585 !newrule->anchor)) && 3586 (TAILQ_FIRST(&newrule->rpool.list) == NULL)) 3587 error = EINVAL; 3588 3589 if (error) { 3590 pf_free_rule(newrule); 3591 PF_RULES_WUNLOCK(); 3592 PF_CONFIG_UNLOCK(); 3593 break; 3594 } 3595 3596 newrule->rpool.cur = TAILQ_FIRST(&newrule->rpool.list); 3597 } 3598 pf_empty_kpool(&V_pf_pabuf); 3599 3600 if (pcr->action == PF_CHANGE_ADD_HEAD) 3601 oldrule = TAILQ_FIRST( 3602 ruleset->rules[rs_num].active.ptr); 3603 else if (pcr->action == PF_CHANGE_ADD_TAIL) 3604 oldrule = TAILQ_LAST( 3605 ruleset->rules[rs_num].active.ptr, pf_krulequeue); 3606 else { 3607 oldrule = TAILQ_FIRST( 3608 ruleset->rules[rs_num].active.ptr); 3609 while ((oldrule != NULL) && (oldrule->nr != pcr->nr)) 3610 oldrule = TAILQ_NEXT(oldrule, entries); 3611 if (oldrule == NULL) { 3612 if (newrule != NULL) 3613 pf_free_rule(newrule); 3614 PF_RULES_WUNLOCK(); 3615 PF_CONFIG_UNLOCK(); 3616 error = EINVAL; 3617 break; 3618 } 3619 } 3620 3621 if (pcr->action == PF_CHANGE_REMOVE) { 3622 pf_unlink_rule(ruleset->rules[rs_num].active.ptr, 3623 oldrule); 3624 RB_REMOVE(pf_krule_global, 3625 ruleset->rules[rs_num].active.tree, oldrule); 3626 ruleset->rules[rs_num].active.rcount--; 3627 } else { 3628 pf_hash_rule(newrule); 3629 if (RB_INSERT(pf_krule_global, 3630 ruleset->rules[rs_num].active.tree, newrule) != NULL) { 3631 pf_free_rule(newrule); 3632 PF_RULES_WUNLOCK(); 3633 PF_CONFIG_UNLOCK(); 3634 error = EEXIST; 3635 break; 3636 } 3637 3638 if (oldrule == NULL) 3639 TAILQ_INSERT_TAIL( 3640 ruleset->rules[rs_num].active.ptr, 3641 newrule, entries); 3642 else if (pcr->action == PF_CHANGE_ADD_HEAD || 3643 pcr->action == PF_CHANGE_ADD_BEFORE) 3644 TAILQ_INSERT_BEFORE(oldrule, newrule, entries); 3645 else 3646 TAILQ_INSERT_AFTER( 3647 ruleset->rules[rs_num].active.ptr, 3648 oldrule, newrule, entries); 3649 ruleset->rules[rs_num].active.rcount++; 3650 } 3651 3652 nr = 0; 3653 TAILQ_FOREACH(oldrule, 3654 ruleset->rules[rs_num].active.ptr, entries) 3655 oldrule->nr = nr++; 3656 3657 ruleset->rules[rs_num].active.ticket++; 3658 3659 pf_calc_skip_steps(ruleset->rules[rs_num].active.ptr); 3660 pf_remove_if_empty_kruleset(ruleset); 3661 3662 PF_RULES_WUNLOCK(); 3663 PF_CONFIG_UNLOCK(); 3664 break; 3665 3666 #undef ERROUT 3667 DIOCCHANGERULE_error: 3668 PF_RULES_WUNLOCK(); 3669 PF_CONFIG_UNLOCK(); 3670 pf_krule_free(newrule); 3671 pf_kkif_free(kif); 3672 break; 3673 } 3674 3675 case DIOCCLRSTATES: { 3676 struct pfioc_state_kill *psk = (struct pfioc_state_kill *)addr; 3677 struct pf_kstate_kill kill; 3678 3679 error = pf_state_kill_to_kstate_kill(psk, &kill); 3680 if (error) 3681 break; 3682 3683 psk->psk_killed = pf_clear_states(&kill); 3684 break; 3685 } 3686 3687 case DIOCCLRSTATESNV: { 3688 error = pf_clearstates_nv((struct pfioc_nv *)addr); 3689 break; 3690 } 3691 3692 case DIOCKILLSTATES: { 3693 struct pfioc_state_kill *psk = (struct pfioc_state_kill *)addr; 3694 struct pf_kstate_kill kill; 3695 3696 error = pf_state_kill_to_kstate_kill(psk, &kill); 3697 if (error) 3698 break; 3699 3700 psk->psk_killed = 0; 3701 pf_killstates(&kill, &psk->psk_killed); 3702 break; 3703 } 3704 3705 case DIOCKILLSTATESNV: { 3706 error = pf_killstates_nv((struct pfioc_nv *)addr); 3707 break; 3708 } 3709 3710 case DIOCADDSTATE: { 3711 struct pfioc_state *ps = (struct pfioc_state *)addr; 3712 struct pfsync_state *sp = &ps->state; 3713 3714 if (sp->timeout >= PFTM_MAX) { 3715 error = EINVAL; 3716 break; 3717 } 3718 if (V_pfsync_state_import_ptr != NULL) { 3719 PF_RULES_RLOCK(); 3720 error = V_pfsync_state_import_ptr(sp, PFSYNC_SI_IOCTL); 3721 PF_RULES_RUNLOCK(); 3722 } else 3723 error = EOPNOTSUPP; 3724 break; 3725 } 3726 3727 case DIOCGETSTATE: { 3728 struct pfioc_state *ps = (struct pfioc_state *)addr; 3729 struct pf_kstate *s; 3730 3731 s = pf_find_state_byid(ps->state.id, ps->state.creatorid); 3732 if (s == NULL) { 3733 error = ENOENT; 3734 break; 3735 } 3736 3737 pfsync_state_export(&ps->state, s); 3738 PF_STATE_UNLOCK(s); 3739 break; 3740 } 3741 3742 case DIOCGETSTATENV: { 3743 error = pf_getstate((struct pfioc_nv *)addr); 3744 break; 3745 } 3746 3747 case DIOCGETSTATES: { 3748 struct pfioc_states *ps = (struct pfioc_states *)addr; 3749 struct pf_kstate *s; 3750 struct pfsync_state *pstore, *p; 3751 int i, nr; 3752 size_t slice_count = 16, count; 3753 void *out; 3754 3755 if (ps->ps_len <= 0) { 3756 nr = uma_zone_get_cur(V_pf_state_z); 3757 ps->ps_len = sizeof(struct pfsync_state) * nr; 3758 break; 3759 } 3760 3761 out = ps->ps_states; 3762 pstore = mallocarray(slice_count, 3763 sizeof(struct pfsync_state), M_TEMP, M_WAITOK | M_ZERO); 3764 nr = 0; 3765 3766 for (i = 0; i <= pf_hashmask; i++) { 3767 struct pf_idhash *ih = &V_pf_idhash[i]; 3768 3769 DIOCGETSTATES_retry: 3770 p = pstore; 3771 3772 if (LIST_EMPTY(&ih->states)) 3773 continue; 3774 3775 PF_HASHROW_LOCK(ih); 3776 count = 0; 3777 LIST_FOREACH(s, &ih->states, entry) { 3778 if (s->timeout == PFTM_UNLINKED) 3779 continue; 3780 count++; 3781 } 3782 3783 if (count > slice_count) { 3784 PF_HASHROW_UNLOCK(ih); 3785 free(pstore, M_TEMP); 3786 slice_count = count * 2; 3787 pstore = mallocarray(slice_count, 3788 sizeof(struct pfsync_state), M_TEMP, 3789 M_WAITOK | M_ZERO); 3790 goto DIOCGETSTATES_retry; 3791 } 3792 3793 if ((nr+count) * sizeof(*p) > ps->ps_len) { 3794 PF_HASHROW_UNLOCK(ih); 3795 goto DIOCGETSTATES_full; 3796 } 3797 3798 LIST_FOREACH(s, &ih->states, entry) { 3799 if (s->timeout == PFTM_UNLINKED) 3800 continue; 3801 3802 pfsync_state_export(p, s); 3803 p++; 3804 nr++; 3805 } 3806 PF_HASHROW_UNLOCK(ih); 3807 error = copyout(pstore, out, 3808 sizeof(struct pfsync_state) * count); 3809 if (error) 3810 break; 3811 out = ps->ps_states + nr; 3812 } 3813 DIOCGETSTATES_full: 3814 ps->ps_len = sizeof(struct pfsync_state) * nr; 3815 free(pstore, M_TEMP); 3816 3817 break; 3818 } 3819 3820 case DIOCGETSTATESV2: { 3821 struct pfioc_states_v2 *ps = (struct pfioc_states_v2 *)addr; 3822 struct pf_kstate *s; 3823 struct pf_state_export *pstore, *p; 3824 int i, nr; 3825 size_t slice_count = 16, count; 3826 void *out; 3827 3828 if (ps->ps_req_version > PF_STATE_VERSION) { 3829 error = ENOTSUP; 3830 break; 3831 } 3832 3833 if (ps->ps_len <= 0) { 3834 nr = uma_zone_get_cur(V_pf_state_z); 3835 ps->ps_len = sizeof(struct pf_state_export) * nr; 3836 break; 3837 } 3838 3839 out = ps->ps_states; 3840 pstore = mallocarray(slice_count, 3841 sizeof(struct pf_state_export), M_TEMP, M_WAITOK | M_ZERO); 3842 nr = 0; 3843 3844 for (i = 0; i <= pf_hashmask; i++) { 3845 struct pf_idhash *ih = &V_pf_idhash[i]; 3846 3847 DIOCGETSTATESV2_retry: 3848 p = pstore; 3849 3850 if (LIST_EMPTY(&ih->states)) 3851 continue; 3852 3853 PF_HASHROW_LOCK(ih); 3854 count = 0; 3855 LIST_FOREACH(s, &ih->states, entry) { 3856 if (s->timeout == PFTM_UNLINKED) 3857 continue; 3858 count++; 3859 } 3860 3861 if (count > slice_count) { 3862 PF_HASHROW_UNLOCK(ih); 3863 free(pstore, M_TEMP); 3864 slice_count = count * 2; 3865 pstore = mallocarray(slice_count, 3866 sizeof(struct pf_state_export), M_TEMP, 3867 M_WAITOK | M_ZERO); 3868 goto DIOCGETSTATESV2_retry; 3869 } 3870 3871 if ((nr+count) * sizeof(*p) > ps->ps_len) { 3872 PF_HASHROW_UNLOCK(ih); 3873 goto DIOCGETSTATESV2_full; 3874 } 3875 3876 LIST_FOREACH(s, &ih->states, entry) { 3877 if (s->timeout == PFTM_UNLINKED) 3878 continue; 3879 3880 pf_state_export(p, s); 3881 p++; 3882 nr++; 3883 } 3884 PF_HASHROW_UNLOCK(ih); 3885 error = copyout(pstore, out, 3886 sizeof(struct pf_state_export) * count); 3887 if (error) 3888 break; 3889 out = ps->ps_states + nr; 3890 } 3891 DIOCGETSTATESV2_full: 3892 ps->ps_len = nr * sizeof(struct pf_state_export); 3893 free(pstore, M_TEMP); 3894 3895 break; 3896 } 3897 3898 case DIOCGETSTATUS: { 3899 struct pf_status *s = (struct pf_status *)addr; 3900 3901 PF_RULES_RLOCK(); 3902 s->running = V_pf_status.running; 3903 s->since = V_pf_status.since; 3904 s->debug = V_pf_status.debug; 3905 s->hostid = V_pf_status.hostid; 3906 s->states = V_pf_status.states; 3907 s->src_nodes = V_pf_status.src_nodes; 3908 3909 for (int i = 0; i < PFRES_MAX; i++) 3910 s->counters[i] = 3911 counter_u64_fetch(V_pf_status.counters[i]); 3912 for (int i = 0; i < LCNT_MAX; i++) 3913 s->lcounters[i] = 3914 counter_u64_fetch(V_pf_status.lcounters[i]); 3915 for (int i = 0; i < FCNT_MAX; i++) 3916 s->fcounters[i] = 3917 pf_counter_u64_fetch(&V_pf_status.fcounters[i]); 3918 for (int i = 0; i < SCNT_MAX; i++) 3919 s->scounters[i] = 3920 counter_u64_fetch(V_pf_status.scounters[i]); 3921 3922 bcopy(V_pf_status.ifname, s->ifname, IFNAMSIZ); 3923 bcopy(V_pf_status.pf_chksum, s->pf_chksum, 3924 PF_MD5_DIGEST_LENGTH); 3925 3926 pfi_update_status(s->ifname, s); 3927 PF_RULES_RUNLOCK(); 3928 break; 3929 } 3930 3931 case DIOCGETSTATUSNV: { 3932 error = pf_getstatus((struct pfioc_nv *)addr); 3933 break; 3934 } 3935 3936 case DIOCSETSTATUSIF: { 3937 struct pfioc_if *pi = (struct pfioc_if *)addr; 3938 3939 if (pi->ifname[0] == 0) { 3940 bzero(V_pf_status.ifname, IFNAMSIZ); 3941 break; 3942 } 3943 PF_RULES_WLOCK(); 3944 error = pf_user_strcpy(V_pf_status.ifname, pi->ifname, IFNAMSIZ); 3945 PF_RULES_WUNLOCK(); 3946 break; 3947 } 3948 3949 case DIOCCLRSTATUS: { 3950 PF_RULES_WLOCK(); 3951 for (int i = 0; i < PFRES_MAX; i++) 3952 counter_u64_zero(V_pf_status.counters[i]); 3953 for (int i = 0; i < FCNT_MAX; i++) 3954 pf_counter_u64_zero(&V_pf_status.fcounters[i]); 3955 for (int i = 0; i < SCNT_MAX; i++) 3956 counter_u64_zero(V_pf_status.scounters[i]); 3957 for (int i = 0; i < KLCNT_MAX; i++) 3958 counter_u64_zero(V_pf_status.lcounters[i]); 3959 V_pf_status.since = time_second; 3960 if (*V_pf_status.ifname) 3961 pfi_update_status(V_pf_status.ifname, NULL); 3962 PF_RULES_WUNLOCK(); 3963 break; 3964 } 3965 3966 case DIOCNATLOOK: { 3967 struct pfioc_natlook *pnl = (struct pfioc_natlook *)addr; 3968 struct pf_state_key *sk; 3969 struct pf_kstate *state; 3970 struct pf_state_key_cmp key; 3971 int m = 0, direction = pnl->direction; 3972 int sidx, didx; 3973 3974 /* NATLOOK src and dst are reversed, so reverse sidx/didx */ 3975 sidx = (direction == PF_IN) ? 1 : 0; 3976 didx = (direction == PF_IN) ? 0 : 1; 3977 3978 if (!pnl->proto || 3979 PF_AZERO(&pnl->saddr, pnl->af) || 3980 PF_AZERO(&pnl->daddr, pnl->af) || 3981 ((pnl->proto == IPPROTO_TCP || 3982 pnl->proto == IPPROTO_UDP) && 3983 (!pnl->dport || !pnl->sport))) 3984 error = EINVAL; 3985 else { 3986 bzero(&key, sizeof(key)); 3987 key.af = pnl->af; 3988 key.proto = pnl->proto; 3989 PF_ACPY(&key.addr[sidx], &pnl->saddr, pnl->af); 3990 key.port[sidx] = pnl->sport; 3991 PF_ACPY(&key.addr[didx], &pnl->daddr, pnl->af); 3992 key.port[didx] = pnl->dport; 3993 3994 state = pf_find_state_all(&key, direction, &m); 3995 if (state == NULL) { 3996 error = ENOENT; 3997 } else { 3998 if (m > 1) { 3999 PF_STATE_UNLOCK(state); 4000 error = E2BIG; /* more than one state */ 4001 } else { 4002 sk = state->key[sidx]; 4003 PF_ACPY(&pnl->rsaddr, &sk->addr[sidx], sk->af); 4004 pnl->rsport = sk->port[sidx]; 4005 PF_ACPY(&pnl->rdaddr, &sk->addr[didx], sk->af); 4006 pnl->rdport = sk->port[didx]; 4007 PF_STATE_UNLOCK(state); 4008 } 4009 } 4010 } 4011 break; 4012 } 4013 4014 case DIOCSETTIMEOUT: { 4015 struct pfioc_tm *pt = (struct pfioc_tm *)addr; 4016 int old; 4017 4018 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX || 4019 pt->seconds < 0) { 4020 error = EINVAL; 4021 break; 4022 } 4023 PF_RULES_WLOCK(); 4024 old = V_pf_default_rule.timeout[pt->timeout]; 4025 if (pt->timeout == PFTM_INTERVAL && pt->seconds == 0) 4026 pt->seconds = 1; 4027 V_pf_default_rule.timeout[pt->timeout] = pt->seconds; 4028 if (pt->timeout == PFTM_INTERVAL && pt->seconds < old) 4029 wakeup(pf_purge_thread); 4030 pt->seconds = old; 4031 PF_RULES_WUNLOCK(); 4032 break; 4033 } 4034 4035 case DIOCGETTIMEOUT: { 4036 struct pfioc_tm *pt = (struct pfioc_tm *)addr; 4037 4038 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX) { 4039 error = EINVAL; 4040 break; 4041 } 4042 PF_RULES_RLOCK(); 4043 pt->seconds = V_pf_default_rule.timeout[pt->timeout]; 4044 PF_RULES_RUNLOCK(); 4045 break; 4046 } 4047 4048 case DIOCGETLIMIT: { 4049 struct pfioc_limit *pl = (struct pfioc_limit *)addr; 4050 4051 if (pl->index < 0 || pl->index >= PF_LIMIT_MAX) { 4052 error = EINVAL; 4053 break; 4054 } 4055 PF_RULES_RLOCK(); 4056 pl->limit = V_pf_limits[pl->index].limit; 4057 PF_RULES_RUNLOCK(); 4058 break; 4059 } 4060 4061 case DIOCSETLIMIT: { 4062 struct pfioc_limit *pl = (struct pfioc_limit *)addr; 4063 int old_limit; 4064 4065 PF_RULES_WLOCK(); 4066 if (pl->index < 0 || pl->index >= PF_LIMIT_MAX || 4067 V_pf_limits[pl->index].zone == NULL) { 4068 PF_RULES_WUNLOCK(); 4069 error = EINVAL; 4070 break; 4071 } 4072 uma_zone_set_max(V_pf_limits[pl->index].zone, pl->limit); 4073 old_limit = V_pf_limits[pl->index].limit; 4074 V_pf_limits[pl->index].limit = pl->limit; 4075 pl->limit = old_limit; 4076 PF_RULES_WUNLOCK(); 4077 break; 4078 } 4079 4080 case DIOCSETDEBUG: { 4081 u_int32_t *level = (u_int32_t *)addr; 4082 4083 PF_RULES_WLOCK(); 4084 V_pf_status.debug = *level; 4085 PF_RULES_WUNLOCK(); 4086 break; 4087 } 4088 4089 case DIOCCLRRULECTRS: { 4090 /* obsoleted by DIOCGETRULE with action=PF_GET_CLR_CNTR */ 4091 struct pf_kruleset *ruleset = &pf_main_ruleset; 4092 struct pf_krule *rule; 4093 4094 PF_RULES_WLOCK(); 4095 TAILQ_FOREACH(rule, 4096 ruleset->rules[PF_RULESET_FILTER].active.ptr, entries) { 4097 pf_counter_u64_zero(&rule->evaluations); 4098 for (int i = 0; i < 2; i++) { 4099 pf_counter_u64_zero(&rule->packets[i]); 4100 pf_counter_u64_zero(&rule->bytes[i]); 4101 } 4102 } 4103 PF_RULES_WUNLOCK(); 4104 break; 4105 } 4106 4107 case DIOCGIFSPEEDV0: 4108 case DIOCGIFSPEEDV1: { 4109 struct pf_ifspeed_v1 *psp = (struct pf_ifspeed_v1 *)addr; 4110 struct pf_ifspeed_v1 ps; 4111 struct ifnet *ifp; 4112 4113 if (psp->ifname[0] == '\0') { 4114 error = EINVAL; 4115 break; 4116 } 4117 4118 error = pf_user_strcpy(ps.ifname, psp->ifname, IFNAMSIZ); 4119 if (error != 0) 4120 break; 4121 ifp = ifunit(ps.ifname); 4122 if (ifp != NULL) { 4123 psp->baudrate32 = 4124 (u_int32_t)uqmin(ifp->if_baudrate, UINT_MAX); 4125 if (cmd == DIOCGIFSPEEDV1) 4126 psp->baudrate = ifp->if_baudrate; 4127 } else { 4128 error = EINVAL; 4129 } 4130 break; 4131 } 4132 4133 #ifdef ALTQ 4134 case DIOCSTARTALTQ: { 4135 struct pf_altq *altq; 4136 4137 PF_RULES_WLOCK(); 4138 /* enable all altq interfaces on active list */ 4139 TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) { 4140 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) { 4141 error = pf_enable_altq(altq); 4142 if (error != 0) 4143 break; 4144 } 4145 } 4146 if (error == 0) 4147 V_pf_altq_running = 1; 4148 PF_RULES_WUNLOCK(); 4149 DPFPRINTF(PF_DEBUG_MISC, ("altq: started\n")); 4150 break; 4151 } 4152 4153 case DIOCSTOPALTQ: { 4154 struct pf_altq *altq; 4155 4156 PF_RULES_WLOCK(); 4157 /* disable all altq interfaces on active list */ 4158 TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) { 4159 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) { 4160 error = pf_disable_altq(altq); 4161 if (error != 0) 4162 break; 4163 } 4164 } 4165 if (error == 0) 4166 V_pf_altq_running = 0; 4167 PF_RULES_WUNLOCK(); 4168 DPFPRINTF(PF_DEBUG_MISC, ("altq: stopped\n")); 4169 break; 4170 } 4171 4172 case DIOCADDALTQV0: 4173 case DIOCADDALTQV1: { 4174 struct pfioc_altq_v1 *pa = (struct pfioc_altq_v1 *)addr; 4175 struct pf_altq *altq, *a; 4176 struct ifnet *ifp; 4177 4178 altq = malloc(sizeof(*altq), M_PFALTQ, M_WAITOK | M_ZERO); 4179 error = pf_import_kaltq(pa, altq, IOCPARM_LEN(cmd)); 4180 if (error) 4181 break; 4182 altq->local_flags = 0; 4183 4184 PF_RULES_WLOCK(); 4185 if (pa->ticket != V_ticket_altqs_inactive) { 4186 PF_RULES_WUNLOCK(); 4187 free(altq, M_PFALTQ); 4188 error = EBUSY; 4189 break; 4190 } 4191 4192 /* 4193 * if this is for a queue, find the discipline and 4194 * copy the necessary fields 4195 */ 4196 if (altq->qname[0] != 0) { 4197 if ((altq->qid = pf_qname2qid(altq->qname)) == 0) { 4198 PF_RULES_WUNLOCK(); 4199 error = EBUSY; 4200 free(altq, M_PFALTQ); 4201 break; 4202 } 4203 altq->altq_disc = NULL; 4204 TAILQ_FOREACH(a, V_pf_altq_ifs_inactive, entries) { 4205 if (strncmp(a->ifname, altq->ifname, 4206 IFNAMSIZ) == 0) { 4207 altq->altq_disc = a->altq_disc; 4208 break; 4209 } 4210 } 4211 } 4212 4213 if ((ifp = ifunit(altq->ifname)) == NULL) 4214 altq->local_flags |= PFALTQ_FLAG_IF_REMOVED; 4215 else 4216 error = altq_add(ifp, altq); 4217 4218 if (error) { 4219 PF_RULES_WUNLOCK(); 4220 free(altq, M_PFALTQ); 4221 break; 4222 } 4223 4224 if (altq->qname[0] != 0) 4225 TAILQ_INSERT_TAIL(V_pf_altqs_inactive, altq, entries); 4226 else 4227 TAILQ_INSERT_TAIL(V_pf_altq_ifs_inactive, altq, entries); 4228 /* version error check done on import above */ 4229 pf_export_kaltq(altq, pa, IOCPARM_LEN(cmd)); 4230 PF_RULES_WUNLOCK(); 4231 break; 4232 } 4233 4234 case DIOCGETALTQSV0: 4235 case DIOCGETALTQSV1: { 4236 struct pfioc_altq_v1 *pa = (struct pfioc_altq_v1 *)addr; 4237 struct pf_altq *altq; 4238 4239 PF_RULES_RLOCK(); 4240 pa->nr = 0; 4241 TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) 4242 pa->nr++; 4243 TAILQ_FOREACH(altq, V_pf_altqs_active, entries) 4244 pa->nr++; 4245 pa->ticket = V_ticket_altqs_active; 4246 PF_RULES_RUNLOCK(); 4247 break; 4248 } 4249 4250 case DIOCGETALTQV0: 4251 case DIOCGETALTQV1: { 4252 struct pfioc_altq_v1 *pa = (struct pfioc_altq_v1 *)addr; 4253 struct pf_altq *altq; 4254 4255 PF_RULES_RLOCK(); 4256 if (pa->ticket != V_ticket_altqs_active) { 4257 PF_RULES_RUNLOCK(); 4258 error = EBUSY; 4259 break; 4260 } 4261 altq = pf_altq_get_nth_active(pa->nr); 4262 if (altq == NULL) { 4263 PF_RULES_RUNLOCK(); 4264 error = EBUSY; 4265 break; 4266 } 4267 pf_export_kaltq(altq, pa, IOCPARM_LEN(cmd)); 4268 PF_RULES_RUNLOCK(); 4269 break; 4270 } 4271 4272 case DIOCCHANGEALTQV0: 4273 case DIOCCHANGEALTQV1: 4274 /* CHANGEALTQ not supported yet! */ 4275 error = ENODEV; 4276 break; 4277 4278 case DIOCGETQSTATSV0: 4279 case DIOCGETQSTATSV1: { 4280 struct pfioc_qstats_v1 *pq = (struct pfioc_qstats_v1 *)addr; 4281 struct pf_altq *altq; 4282 int nbytes; 4283 u_int32_t version; 4284 4285 PF_RULES_RLOCK(); 4286 if (pq->ticket != V_ticket_altqs_active) { 4287 PF_RULES_RUNLOCK(); 4288 error = EBUSY; 4289 break; 4290 } 4291 nbytes = pq->nbytes; 4292 altq = pf_altq_get_nth_active(pq->nr); 4293 if (altq == NULL) { 4294 PF_RULES_RUNLOCK(); 4295 error = EBUSY; 4296 break; 4297 } 4298 4299 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) != 0) { 4300 PF_RULES_RUNLOCK(); 4301 error = ENXIO; 4302 break; 4303 } 4304 PF_RULES_RUNLOCK(); 4305 if (cmd == DIOCGETQSTATSV0) 4306 version = 0; /* DIOCGETQSTATSV0 means stats struct v0 */ 4307 else 4308 version = pq->version; 4309 error = altq_getqstats(altq, pq->buf, &nbytes, version); 4310 if (error == 0) { 4311 pq->scheduler = altq->scheduler; 4312 pq->nbytes = nbytes; 4313 } 4314 break; 4315 } 4316 #endif /* ALTQ */ 4317 4318 case DIOCBEGINADDRS: { 4319 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr; 4320 4321 PF_RULES_WLOCK(); 4322 pf_empty_kpool(&V_pf_pabuf); 4323 pp->ticket = ++V_ticket_pabuf; 4324 PF_RULES_WUNLOCK(); 4325 break; 4326 } 4327 4328 case DIOCADDADDR: { 4329 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr; 4330 struct pf_kpooladdr *pa; 4331 struct pfi_kkif *kif = NULL; 4332 4333 #ifndef INET 4334 if (pp->af == AF_INET) { 4335 error = EAFNOSUPPORT; 4336 break; 4337 } 4338 #endif /* INET */ 4339 #ifndef INET6 4340 if (pp->af == AF_INET6) { 4341 error = EAFNOSUPPORT; 4342 break; 4343 } 4344 #endif /* INET6 */ 4345 if (pp->addr.addr.type != PF_ADDR_ADDRMASK && 4346 pp->addr.addr.type != PF_ADDR_DYNIFTL && 4347 pp->addr.addr.type != PF_ADDR_TABLE) { 4348 error = EINVAL; 4349 break; 4350 } 4351 if (pp->addr.addr.p.dyn != NULL) { 4352 error = EINVAL; 4353 break; 4354 } 4355 pa = malloc(sizeof(*pa), M_PFRULE, M_WAITOK); 4356 error = pf_pooladdr_to_kpooladdr(&pp->addr, pa); 4357 if (error != 0) 4358 break; 4359 if (pa->ifname[0]) 4360 kif = pf_kkif_create(M_WAITOK); 4361 PF_RULES_WLOCK(); 4362 if (pp->ticket != V_ticket_pabuf) { 4363 PF_RULES_WUNLOCK(); 4364 if (pa->ifname[0]) 4365 pf_kkif_free(kif); 4366 free(pa, M_PFRULE); 4367 error = EBUSY; 4368 break; 4369 } 4370 if (pa->ifname[0]) { 4371 pa->kif = pfi_kkif_attach(kif, pa->ifname); 4372 kif = NULL; 4373 pfi_kkif_ref(pa->kif); 4374 } else 4375 pa->kif = NULL; 4376 if (pa->addr.type == PF_ADDR_DYNIFTL && ((error = 4377 pfi_dynaddr_setup(&pa->addr, pp->af)) != 0)) { 4378 if (pa->ifname[0]) 4379 pfi_kkif_unref(pa->kif); 4380 PF_RULES_WUNLOCK(); 4381 free(pa, M_PFRULE); 4382 break; 4383 } 4384 TAILQ_INSERT_TAIL(&V_pf_pabuf, pa, entries); 4385 PF_RULES_WUNLOCK(); 4386 break; 4387 } 4388 4389 case DIOCGETADDRS: { 4390 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr; 4391 struct pf_kpool *pool; 4392 struct pf_kpooladdr *pa; 4393 4394 pp->anchor[sizeof(pp->anchor) - 1] = 0; 4395 pp->nr = 0; 4396 4397 PF_RULES_RLOCK(); 4398 pool = pf_get_kpool(pp->anchor, pp->ticket, pp->r_action, 4399 pp->r_num, 0, 1, 0); 4400 if (pool == NULL) { 4401 PF_RULES_RUNLOCK(); 4402 error = EBUSY; 4403 break; 4404 } 4405 TAILQ_FOREACH(pa, &pool->list, entries) 4406 pp->nr++; 4407 PF_RULES_RUNLOCK(); 4408 break; 4409 } 4410 4411 case DIOCGETADDR: { 4412 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr; 4413 struct pf_kpool *pool; 4414 struct pf_kpooladdr *pa; 4415 u_int32_t nr = 0; 4416 4417 pp->anchor[sizeof(pp->anchor) - 1] = 0; 4418 4419 PF_RULES_RLOCK(); 4420 pool = pf_get_kpool(pp->anchor, pp->ticket, pp->r_action, 4421 pp->r_num, 0, 1, 1); 4422 if (pool == NULL) { 4423 PF_RULES_RUNLOCK(); 4424 error = EBUSY; 4425 break; 4426 } 4427 pa = TAILQ_FIRST(&pool->list); 4428 while ((pa != NULL) && (nr < pp->nr)) { 4429 pa = TAILQ_NEXT(pa, entries); 4430 nr++; 4431 } 4432 if (pa == NULL) { 4433 PF_RULES_RUNLOCK(); 4434 error = EBUSY; 4435 break; 4436 } 4437 pf_kpooladdr_to_pooladdr(pa, &pp->addr); 4438 pf_addr_copyout(&pp->addr.addr); 4439 PF_RULES_RUNLOCK(); 4440 break; 4441 } 4442 4443 case DIOCCHANGEADDR: { 4444 struct pfioc_pooladdr *pca = (struct pfioc_pooladdr *)addr; 4445 struct pf_kpool *pool; 4446 struct pf_kpooladdr *oldpa = NULL, *newpa = NULL; 4447 struct pf_kruleset *ruleset; 4448 struct pfi_kkif *kif = NULL; 4449 4450 pca->anchor[sizeof(pca->anchor) - 1] = 0; 4451 4452 if (pca->action < PF_CHANGE_ADD_HEAD || 4453 pca->action > PF_CHANGE_REMOVE) { 4454 error = EINVAL; 4455 break; 4456 } 4457 if (pca->addr.addr.type != PF_ADDR_ADDRMASK && 4458 pca->addr.addr.type != PF_ADDR_DYNIFTL && 4459 pca->addr.addr.type != PF_ADDR_TABLE) { 4460 error = EINVAL; 4461 break; 4462 } 4463 if (pca->addr.addr.p.dyn != NULL) { 4464 error = EINVAL; 4465 break; 4466 } 4467 4468 if (pca->action != PF_CHANGE_REMOVE) { 4469 #ifndef INET 4470 if (pca->af == AF_INET) { 4471 error = EAFNOSUPPORT; 4472 break; 4473 } 4474 #endif /* INET */ 4475 #ifndef INET6 4476 if (pca->af == AF_INET6) { 4477 error = EAFNOSUPPORT; 4478 break; 4479 } 4480 #endif /* INET6 */ 4481 newpa = malloc(sizeof(*newpa), M_PFRULE, M_WAITOK); 4482 bcopy(&pca->addr, newpa, sizeof(struct pf_pooladdr)); 4483 if (newpa->ifname[0]) 4484 kif = pf_kkif_create(M_WAITOK); 4485 newpa->kif = NULL; 4486 } 4487 #define ERROUT(x) ERROUT_IOCTL(DIOCCHANGEADDR_error, x) 4488 PF_RULES_WLOCK(); 4489 ruleset = pf_find_kruleset(pca->anchor); 4490 if (ruleset == NULL) 4491 ERROUT(EBUSY); 4492 4493 pool = pf_get_kpool(pca->anchor, pca->ticket, pca->r_action, 4494 pca->r_num, pca->r_last, 1, 1); 4495 if (pool == NULL) 4496 ERROUT(EBUSY); 4497 4498 if (pca->action != PF_CHANGE_REMOVE) { 4499 if (newpa->ifname[0]) { 4500 newpa->kif = pfi_kkif_attach(kif, newpa->ifname); 4501 pfi_kkif_ref(newpa->kif); 4502 kif = NULL; 4503 } 4504 4505 switch (newpa->addr.type) { 4506 case PF_ADDR_DYNIFTL: 4507 error = pfi_dynaddr_setup(&newpa->addr, 4508 pca->af); 4509 break; 4510 case PF_ADDR_TABLE: 4511 newpa->addr.p.tbl = pfr_attach_table(ruleset, 4512 newpa->addr.v.tblname); 4513 if (newpa->addr.p.tbl == NULL) 4514 error = ENOMEM; 4515 break; 4516 } 4517 if (error) 4518 goto DIOCCHANGEADDR_error; 4519 } 4520 4521 switch (pca->action) { 4522 case PF_CHANGE_ADD_HEAD: 4523 oldpa = TAILQ_FIRST(&pool->list); 4524 break; 4525 case PF_CHANGE_ADD_TAIL: 4526 oldpa = TAILQ_LAST(&pool->list, pf_kpalist); 4527 break; 4528 default: 4529 oldpa = TAILQ_FIRST(&pool->list); 4530 for (int i = 0; oldpa && i < pca->nr; i++) 4531 oldpa = TAILQ_NEXT(oldpa, entries); 4532 4533 if (oldpa == NULL) 4534 ERROUT(EINVAL); 4535 } 4536 4537 if (pca->action == PF_CHANGE_REMOVE) { 4538 TAILQ_REMOVE(&pool->list, oldpa, entries); 4539 switch (oldpa->addr.type) { 4540 case PF_ADDR_DYNIFTL: 4541 pfi_dynaddr_remove(oldpa->addr.p.dyn); 4542 break; 4543 case PF_ADDR_TABLE: 4544 pfr_detach_table(oldpa->addr.p.tbl); 4545 break; 4546 } 4547 if (oldpa->kif) 4548 pfi_kkif_unref(oldpa->kif); 4549 free(oldpa, M_PFRULE); 4550 } else { 4551 if (oldpa == NULL) 4552 TAILQ_INSERT_TAIL(&pool->list, newpa, entries); 4553 else if (pca->action == PF_CHANGE_ADD_HEAD || 4554 pca->action == PF_CHANGE_ADD_BEFORE) 4555 TAILQ_INSERT_BEFORE(oldpa, newpa, entries); 4556 else 4557 TAILQ_INSERT_AFTER(&pool->list, oldpa, 4558 newpa, entries); 4559 } 4560 4561 pool->cur = TAILQ_FIRST(&pool->list); 4562 PF_ACPY(&pool->counter, &pool->cur->addr.v.a.addr, pca->af); 4563 PF_RULES_WUNLOCK(); 4564 break; 4565 4566 #undef ERROUT 4567 DIOCCHANGEADDR_error: 4568 if (newpa != NULL) { 4569 if (newpa->kif) 4570 pfi_kkif_unref(newpa->kif); 4571 free(newpa, M_PFRULE); 4572 } 4573 PF_RULES_WUNLOCK(); 4574 pf_kkif_free(kif); 4575 break; 4576 } 4577 4578 case DIOCGETRULESETS: { 4579 struct pfioc_ruleset *pr = (struct pfioc_ruleset *)addr; 4580 struct pf_kruleset *ruleset; 4581 struct pf_kanchor *anchor; 4582 4583 pr->path[sizeof(pr->path) - 1] = 0; 4584 4585 PF_RULES_RLOCK(); 4586 if ((ruleset = pf_find_kruleset(pr->path)) == NULL) { 4587 PF_RULES_RUNLOCK(); 4588 error = ENOENT; 4589 break; 4590 } 4591 pr->nr = 0; 4592 if (ruleset->anchor == NULL) { 4593 /* XXX kludge for pf_main_ruleset */ 4594 RB_FOREACH(anchor, pf_kanchor_global, &V_pf_anchors) 4595 if (anchor->parent == NULL) 4596 pr->nr++; 4597 } else { 4598 RB_FOREACH(anchor, pf_kanchor_node, 4599 &ruleset->anchor->children) 4600 pr->nr++; 4601 } 4602 PF_RULES_RUNLOCK(); 4603 break; 4604 } 4605 4606 case DIOCGETRULESET: { 4607 struct pfioc_ruleset *pr = (struct pfioc_ruleset *)addr; 4608 struct pf_kruleset *ruleset; 4609 struct pf_kanchor *anchor; 4610 u_int32_t nr = 0; 4611 4612 pr->path[sizeof(pr->path) - 1] = 0; 4613 4614 PF_RULES_RLOCK(); 4615 if ((ruleset = pf_find_kruleset(pr->path)) == NULL) { 4616 PF_RULES_RUNLOCK(); 4617 error = ENOENT; 4618 break; 4619 } 4620 pr->name[0] = 0; 4621 if (ruleset->anchor == NULL) { 4622 /* XXX kludge for pf_main_ruleset */ 4623 RB_FOREACH(anchor, pf_kanchor_global, &V_pf_anchors) 4624 if (anchor->parent == NULL && nr++ == pr->nr) { 4625 strlcpy(pr->name, anchor->name, 4626 sizeof(pr->name)); 4627 break; 4628 } 4629 } else { 4630 RB_FOREACH(anchor, pf_kanchor_node, 4631 &ruleset->anchor->children) 4632 if (nr++ == pr->nr) { 4633 strlcpy(pr->name, anchor->name, 4634 sizeof(pr->name)); 4635 break; 4636 } 4637 } 4638 if (!pr->name[0]) 4639 error = EBUSY; 4640 PF_RULES_RUNLOCK(); 4641 break; 4642 } 4643 4644 case DIOCRCLRTABLES: { 4645 struct pfioc_table *io = (struct pfioc_table *)addr; 4646 4647 if (io->pfrio_esize != 0) { 4648 error = ENODEV; 4649 break; 4650 } 4651 PF_RULES_WLOCK(); 4652 error = pfr_clr_tables(&io->pfrio_table, &io->pfrio_ndel, 4653 io->pfrio_flags | PFR_FLAG_USERIOCTL); 4654 PF_RULES_WUNLOCK(); 4655 break; 4656 } 4657 4658 case DIOCRADDTABLES: { 4659 struct pfioc_table *io = (struct pfioc_table *)addr; 4660 struct pfr_table *pfrts; 4661 size_t totlen; 4662 4663 if (io->pfrio_esize != sizeof(struct pfr_table)) { 4664 error = ENODEV; 4665 break; 4666 } 4667 4668 if (io->pfrio_size < 0 || io->pfrio_size > pf_ioctl_maxcount || 4669 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_table))) { 4670 error = ENOMEM; 4671 break; 4672 } 4673 4674 totlen = io->pfrio_size * sizeof(struct pfr_table); 4675 pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table), 4676 M_TEMP, M_WAITOK); 4677 error = copyin(io->pfrio_buffer, pfrts, totlen); 4678 if (error) { 4679 free(pfrts, M_TEMP); 4680 break; 4681 } 4682 PF_RULES_WLOCK(); 4683 error = pfr_add_tables(pfrts, io->pfrio_size, 4684 &io->pfrio_nadd, io->pfrio_flags | PFR_FLAG_USERIOCTL); 4685 PF_RULES_WUNLOCK(); 4686 free(pfrts, M_TEMP); 4687 break; 4688 } 4689 4690 case DIOCRDELTABLES: { 4691 struct pfioc_table *io = (struct pfioc_table *)addr; 4692 struct pfr_table *pfrts; 4693 size_t totlen; 4694 4695 if (io->pfrio_esize != sizeof(struct pfr_table)) { 4696 error = ENODEV; 4697 break; 4698 } 4699 4700 if (io->pfrio_size < 0 || io->pfrio_size > pf_ioctl_maxcount || 4701 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_table))) { 4702 error = ENOMEM; 4703 break; 4704 } 4705 4706 totlen = io->pfrio_size * sizeof(struct pfr_table); 4707 pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table), 4708 M_TEMP, M_WAITOK); 4709 error = copyin(io->pfrio_buffer, pfrts, totlen); 4710 if (error) { 4711 free(pfrts, M_TEMP); 4712 break; 4713 } 4714 PF_RULES_WLOCK(); 4715 error = pfr_del_tables(pfrts, io->pfrio_size, 4716 &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL); 4717 PF_RULES_WUNLOCK(); 4718 free(pfrts, M_TEMP); 4719 break; 4720 } 4721 4722 case DIOCRGETTABLES: { 4723 struct pfioc_table *io = (struct pfioc_table *)addr; 4724 struct pfr_table *pfrts; 4725 size_t totlen; 4726 int n; 4727 4728 if (io->pfrio_esize != sizeof(struct pfr_table)) { 4729 error = ENODEV; 4730 break; 4731 } 4732 PF_RULES_RLOCK(); 4733 n = pfr_table_count(&io->pfrio_table, io->pfrio_flags); 4734 if (n < 0) { 4735 PF_RULES_RUNLOCK(); 4736 error = EINVAL; 4737 break; 4738 } 4739 io->pfrio_size = min(io->pfrio_size, n); 4740 4741 totlen = io->pfrio_size * sizeof(struct pfr_table); 4742 4743 pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table), 4744 M_TEMP, M_NOWAIT | M_ZERO); 4745 if (pfrts == NULL) { 4746 error = ENOMEM; 4747 PF_RULES_RUNLOCK(); 4748 break; 4749 } 4750 error = pfr_get_tables(&io->pfrio_table, pfrts, 4751 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); 4752 PF_RULES_RUNLOCK(); 4753 if (error == 0) 4754 error = copyout(pfrts, io->pfrio_buffer, totlen); 4755 free(pfrts, M_TEMP); 4756 break; 4757 } 4758 4759 case DIOCRGETTSTATS: { 4760 struct pfioc_table *io = (struct pfioc_table *)addr; 4761 struct pfr_tstats *pfrtstats; 4762 size_t totlen; 4763 int n; 4764 4765 if (io->pfrio_esize != sizeof(struct pfr_tstats)) { 4766 error = ENODEV; 4767 break; 4768 } 4769 PF_TABLE_STATS_LOCK(); 4770 PF_RULES_RLOCK(); 4771 n = pfr_table_count(&io->pfrio_table, io->pfrio_flags); 4772 if (n < 0) { 4773 PF_RULES_RUNLOCK(); 4774 PF_TABLE_STATS_UNLOCK(); 4775 error = EINVAL; 4776 break; 4777 } 4778 io->pfrio_size = min(io->pfrio_size, n); 4779 4780 totlen = io->pfrio_size * sizeof(struct pfr_tstats); 4781 pfrtstats = mallocarray(io->pfrio_size, 4782 sizeof(struct pfr_tstats), M_TEMP, M_NOWAIT | M_ZERO); 4783 if (pfrtstats == NULL) { 4784 error = ENOMEM; 4785 PF_RULES_RUNLOCK(); 4786 PF_TABLE_STATS_UNLOCK(); 4787 break; 4788 } 4789 error = pfr_get_tstats(&io->pfrio_table, pfrtstats, 4790 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); 4791 PF_RULES_RUNLOCK(); 4792 PF_TABLE_STATS_UNLOCK(); 4793 if (error == 0) 4794 error = copyout(pfrtstats, io->pfrio_buffer, totlen); 4795 free(pfrtstats, M_TEMP); 4796 break; 4797 } 4798 4799 case DIOCRCLRTSTATS: { 4800 struct pfioc_table *io = (struct pfioc_table *)addr; 4801 struct pfr_table *pfrts; 4802 size_t totlen; 4803 4804 if (io->pfrio_esize != sizeof(struct pfr_table)) { 4805 error = ENODEV; 4806 break; 4807 } 4808 4809 if (io->pfrio_size < 0 || io->pfrio_size > pf_ioctl_maxcount || 4810 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_table))) { 4811 /* We used to count tables and use the minimum required 4812 * size, so we didn't fail on overly large requests. 4813 * Keep doing so. */ 4814 io->pfrio_size = pf_ioctl_maxcount; 4815 break; 4816 } 4817 4818 totlen = io->pfrio_size * sizeof(struct pfr_table); 4819 pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table), 4820 M_TEMP, M_WAITOK); 4821 error = copyin(io->pfrio_buffer, pfrts, totlen); 4822 if (error) { 4823 free(pfrts, M_TEMP); 4824 break; 4825 } 4826 4827 PF_TABLE_STATS_LOCK(); 4828 PF_RULES_RLOCK(); 4829 error = pfr_clr_tstats(pfrts, io->pfrio_size, 4830 &io->pfrio_nzero, io->pfrio_flags | PFR_FLAG_USERIOCTL); 4831 PF_RULES_RUNLOCK(); 4832 PF_TABLE_STATS_UNLOCK(); 4833 free(pfrts, M_TEMP); 4834 break; 4835 } 4836 4837 case DIOCRSETTFLAGS: { 4838 struct pfioc_table *io = (struct pfioc_table *)addr; 4839 struct pfr_table *pfrts; 4840 size_t totlen; 4841 int n; 4842 4843 if (io->pfrio_esize != sizeof(struct pfr_table)) { 4844 error = ENODEV; 4845 break; 4846 } 4847 4848 PF_RULES_RLOCK(); 4849 n = pfr_table_count(&io->pfrio_table, io->pfrio_flags); 4850 if (n < 0) { 4851 PF_RULES_RUNLOCK(); 4852 error = EINVAL; 4853 break; 4854 } 4855 4856 io->pfrio_size = min(io->pfrio_size, n); 4857 PF_RULES_RUNLOCK(); 4858 4859 totlen = io->pfrio_size * sizeof(struct pfr_table); 4860 pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table), 4861 M_TEMP, M_WAITOK); 4862 error = copyin(io->pfrio_buffer, pfrts, totlen); 4863 if (error) { 4864 free(pfrts, M_TEMP); 4865 break; 4866 } 4867 PF_RULES_WLOCK(); 4868 error = pfr_set_tflags(pfrts, io->pfrio_size, 4869 io->pfrio_setflag, io->pfrio_clrflag, &io->pfrio_nchange, 4870 &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL); 4871 PF_RULES_WUNLOCK(); 4872 free(pfrts, M_TEMP); 4873 break; 4874 } 4875 4876 case DIOCRCLRADDRS: { 4877 struct pfioc_table *io = (struct pfioc_table *)addr; 4878 4879 if (io->pfrio_esize != 0) { 4880 error = ENODEV; 4881 break; 4882 } 4883 PF_RULES_WLOCK(); 4884 error = pfr_clr_addrs(&io->pfrio_table, &io->pfrio_ndel, 4885 io->pfrio_flags | PFR_FLAG_USERIOCTL); 4886 PF_RULES_WUNLOCK(); 4887 break; 4888 } 4889 4890 case DIOCRADDADDRS: { 4891 struct pfioc_table *io = (struct pfioc_table *)addr; 4892 struct pfr_addr *pfras; 4893 size_t totlen; 4894 4895 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 4896 error = ENODEV; 4897 break; 4898 } 4899 if (io->pfrio_size < 0 || 4900 io->pfrio_size > pf_ioctl_maxcount || 4901 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) { 4902 error = EINVAL; 4903 break; 4904 } 4905 totlen = io->pfrio_size * sizeof(struct pfr_addr); 4906 pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr), 4907 M_TEMP, M_WAITOK); 4908 error = copyin(io->pfrio_buffer, pfras, totlen); 4909 if (error) { 4910 free(pfras, M_TEMP); 4911 break; 4912 } 4913 PF_RULES_WLOCK(); 4914 error = pfr_add_addrs(&io->pfrio_table, pfras, 4915 io->pfrio_size, &io->pfrio_nadd, io->pfrio_flags | 4916 PFR_FLAG_USERIOCTL); 4917 PF_RULES_WUNLOCK(); 4918 if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK) 4919 error = copyout(pfras, io->pfrio_buffer, totlen); 4920 free(pfras, M_TEMP); 4921 break; 4922 } 4923 4924 case DIOCRDELADDRS: { 4925 struct pfioc_table *io = (struct pfioc_table *)addr; 4926 struct pfr_addr *pfras; 4927 size_t totlen; 4928 4929 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 4930 error = ENODEV; 4931 break; 4932 } 4933 if (io->pfrio_size < 0 || 4934 io->pfrio_size > pf_ioctl_maxcount || 4935 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) { 4936 error = EINVAL; 4937 break; 4938 } 4939 totlen = io->pfrio_size * sizeof(struct pfr_addr); 4940 pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr), 4941 M_TEMP, M_WAITOK); 4942 error = copyin(io->pfrio_buffer, pfras, totlen); 4943 if (error) { 4944 free(pfras, M_TEMP); 4945 break; 4946 } 4947 PF_RULES_WLOCK(); 4948 error = pfr_del_addrs(&io->pfrio_table, pfras, 4949 io->pfrio_size, &io->pfrio_ndel, io->pfrio_flags | 4950 PFR_FLAG_USERIOCTL); 4951 PF_RULES_WUNLOCK(); 4952 if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK) 4953 error = copyout(pfras, io->pfrio_buffer, totlen); 4954 free(pfras, M_TEMP); 4955 break; 4956 } 4957 4958 case DIOCRSETADDRS: { 4959 struct pfioc_table *io = (struct pfioc_table *)addr; 4960 struct pfr_addr *pfras; 4961 size_t totlen, count; 4962 4963 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 4964 error = ENODEV; 4965 break; 4966 } 4967 if (io->pfrio_size < 0 || io->pfrio_size2 < 0) { 4968 error = EINVAL; 4969 break; 4970 } 4971 count = max(io->pfrio_size, io->pfrio_size2); 4972 if (count > pf_ioctl_maxcount || 4973 WOULD_OVERFLOW(count, sizeof(struct pfr_addr))) { 4974 error = EINVAL; 4975 break; 4976 } 4977 totlen = count * sizeof(struct pfr_addr); 4978 pfras = mallocarray(count, sizeof(struct pfr_addr), M_TEMP, 4979 M_WAITOK); 4980 error = copyin(io->pfrio_buffer, pfras, totlen); 4981 if (error) { 4982 free(pfras, M_TEMP); 4983 break; 4984 } 4985 PF_RULES_WLOCK(); 4986 error = pfr_set_addrs(&io->pfrio_table, pfras, 4987 io->pfrio_size, &io->pfrio_size2, &io->pfrio_nadd, 4988 &io->pfrio_ndel, &io->pfrio_nchange, io->pfrio_flags | 4989 PFR_FLAG_USERIOCTL, 0); 4990 PF_RULES_WUNLOCK(); 4991 if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK) 4992 error = copyout(pfras, io->pfrio_buffer, totlen); 4993 free(pfras, M_TEMP); 4994 break; 4995 } 4996 4997 case DIOCRGETADDRS: { 4998 struct pfioc_table *io = (struct pfioc_table *)addr; 4999 struct pfr_addr *pfras; 5000 size_t totlen; 5001 5002 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 5003 error = ENODEV; 5004 break; 5005 } 5006 if (io->pfrio_size < 0 || 5007 io->pfrio_size > pf_ioctl_maxcount || 5008 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) { 5009 error = EINVAL; 5010 break; 5011 } 5012 totlen = io->pfrio_size * sizeof(struct pfr_addr); 5013 pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr), 5014 M_TEMP, M_WAITOK | M_ZERO); 5015 PF_RULES_RLOCK(); 5016 error = pfr_get_addrs(&io->pfrio_table, pfras, 5017 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); 5018 PF_RULES_RUNLOCK(); 5019 if (error == 0) 5020 error = copyout(pfras, io->pfrio_buffer, totlen); 5021 free(pfras, M_TEMP); 5022 break; 5023 } 5024 5025 case DIOCRGETASTATS: { 5026 struct pfioc_table *io = (struct pfioc_table *)addr; 5027 struct pfr_astats *pfrastats; 5028 size_t totlen; 5029 5030 if (io->pfrio_esize != sizeof(struct pfr_astats)) { 5031 error = ENODEV; 5032 break; 5033 } 5034 if (io->pfrio_size < 0 || 5035 io->pfrio_size > pf_ioctl_maxcount || 5036 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_astats))) { 5037 error = EINVAL; 5038 break; 5039 } 5040 totlen = io->pfrio_size * sizeof(struct pfr_astats); 5041 pfrastats = mallocarray(io->pfrio_size, 5042 sizeof(struct pfr_astats), M_TEMP, M_WAITOK | M_ZERO); 5043 PF_RULES_RLOCK(); 5044 error = pfr_get_astats(&io->pfrio_table, pfrastats, 5045 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); 5046 PF_RULES_RUNLOCK(); 5047 if (error == 0) 5048 error = copyout(pfrastats, io->pfrio_buffer, totlen); 5049 free(pfrastats, M_TEMP); 5050 break; 5051 } 5052 5053 case DIOCRCLRASTATS: { 5054 struct pfioc_table *io = (struct pfioc_table *)addr; 5055 struct pfr_addr *pfras; 5056 size_t totlen; 5057 5058 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 5059 error = ENODEV; 5060 break; 5061 } 5062 if (io->pfrio_size < 0 || 5063 io->pfrio_size > pf_ioctl_maxcount || 5064 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) { 5065 error = EINVAL; 5066 break; 5067 } 5068 totlen = io->pfrio_size * sizeof(struct pfr_addr); 5069 pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr), 5070 M_TEMP, M_WAITOK); 5071 error = copyin(io->pfrio_buffer, pfras, totlen); 5072 if (error) { 5073 free(pfras, M_TEMP); 5074 break; 5075 } 5076 PF_RULES_WLOCK(); 5077 error = pfr_clr_astats(&io->pfrio_table, pfras, 5078 io->pfrio_size, &io->pfrio_nzero, io->pfrio_flags | 5079 PFR_FLAG_USERIOCTL); 5080 PF_RULES_WUNLOCK(); 5081 if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK) 5082 error = copyout(pfras, io->pfrio_buffer, totlen); 5083 free(pfras, M_TEMP); 5084 break; 5085 } 5086 5087 case DIOCRTSTADDRS: { 5088 struct pfioc_table *io = (struct pfioc_table *)addr; 5089 struct pfr_addr *pfras; 5090 size_t totlen; 5091 5092 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 5093 error = ENODEV; 5094 break; 5095 } 5096 if (io->pfrio_size < 0 || 5097 io->pfrio_size > pf_ioctl_maxcount || 5098 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) { 5099 error = EINVAL; 5100 break; 5101 } 5102 totlen = io->pfrio_size * sizeof(struct pfr_addr); 5103 pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr), 5104 M_TEMP, M_WAITOK); 5105 error = copyin(io->pfrio_buffer, pfras, totlen); 5106 if (error) { 5107 free(pfras, M_TEMP); 5108 break; 5109 } 5110 PF_RULES_RLOCK(); 5111 error = pfr_tst_addrs(&io->pfrio_table, pfras, 5112 io->pfrio_size, &io->pfrio_nmatch, io->pfrio_flags | 5113 PFR_FLAG_USERIOCTL); 5114 PF_RULES_RUNLOCK(); 5115 if (error == 0) 5116 error = copyout(pfras, io->pfrio_buffer, totlen); 5117 free(pfras, M_TEMP); 5118 break; 5119 } 5120 5121 case DIOCRINADEFINE: { 5122 struct pfioc_table *io = (struct pfioc_table *)addr; 5123 struct pfr_addr *pfras; 5124 size_t totlen; 5125 5126 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 5127 error = ENODEV; 5128 break; 5129 } 5130 if (io->pfrio_size < 0 || 5131 io->pfrio_size > pf_ioctl_maxcount || 5132 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) { 5133 error = EINVAL; 5134 break; 5135 } 5136 totlen = io->pfrio_size * sizeof(struct pfr_addr); 5137 pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr), 5138 M_TEMP, M_WAITOK); 5139 error = copyin(io->pfrio_buffer, pfras, totlen); 5140 if (error) { 5141 free(pfras, M_TEMP); 5142 break; 5143 } 5144 PF_RULES_WLOCK(); 5145 error = pfr_ina_define(&io->pfrio_table, pfras, 5146 io->pfrio_size, &io->pfrio_nadd, &io->pfrio_naddr, 5147 io->pfrio_ticket, io->pfrio_flags | PFR_FLAG_USERIOCTL); 5148 PF_RULES_WUNLOCK(); 5149 free(pfras, M_TEMP); 5150 break; 5151 } 5152 5153 case DIOCOSFPADD: { 5154 struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr; 5155 PF_RULES_WLOCK(); 5156 error = pf_osfp_add(io); 5157 PF_RULES_WUNLOCK(); 5158 break; 5159 } 5160 5161 case DIOCOSFPGET: { 5162 struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr; 5163 PF_RULES_RLOCK(); 5164 error = pf_osfp_get(io); 5165 PF_RULES_RUNLOCK(); 5166 break; 5167 } 5168 5169 case DIOCXBEGIN: { 5170 struct pfioc_trans *io = (struct pfioc_trans *)addr; 5171 struct pfioc_trans_e *ioes, *ioe; 5172 size_t totlen; 5173 int i; 5174 5175 if (io->esize != sizeof(*ioe)) { 5176 error = ENODEV; 5177 break; 5178 } 5179 if (io->size < 0 || 5180 io->size > pf_ioctl_maxcount || 5181 WOULD_OVERFLOW(io->size, sizeof(struct pfioc_trans_e))) { 5182 error = EINVAL; 5183 break; 5184 } 5185 totlen = sizeof(struct pfioc_trans_e) * io->size; 5186 ioes = mallocarray(io->size, sizeof(struct pfioc_trans_e), 5187 M_TEMP, M_WAITOK); 5188 error = copyin(io->array, ioes, totlen); 5189 if (error) { 5190 free(ioes, M_TEMP); 5191 break; 5192 } 5193 /* Ensure there's no more ethernet rules to clean up. */ 5194 NET_EPOCH_DRAIN_CALLBACKS(); 5195 PF_RULES_WLOCK(); 5196 for (i = 0, ioe = ioes; i < io->size; i++, ioe++) { 5197 ioe->anchor[sizeof(ioe->anchor) - 1] = '\0'; 5198 switch (ioe->rs_num) { 5199 case PF_RULESET_ETH: 5200 if ((error = pf_begin_eth(&ioe->ticket, ioe->anchor))) { 5201 PF_RULES_WUNLOCK(); 5202 free(ioes, M_TEMP); 5203 goto fail; 5204 } 5205 break; 5206 #ifdef ALTQ 5207 case PF_RULESET_ALTQ: 5208 if (ioe->anchor[0]) { 5209 PF_RULES_WUNLOCK(); 5210 free(ioes, M_TEMP); 5211 error = EINVAL; 5212 goto fail; 5213 } 5214 if ((error = pf_begin_altq(&ioe->ticket))) { 5215 PF_RULES_WUNLOCK(); 5216 free(ioes, M_TEMP); 5217 goto fail; 5218 } 5219 break; 5220 #endif /* ALTQ */ 5221 case PF_RULESET_TABLE: 5222 { 5223 struct pfr_table table; 5224 5225 bzero(&table, sizeof(table)); 5226 strlcpy(table.pfrt_anchor, ioe->anchor, 5227 sizeof(table.pfrt_anchor)); 5228 if ((error = pfr_ina_begin(&table, 5229 &ioe->ticket, NULL, 0))) { 5230 PF_RULES_WUNLOCK(); 5231 free(ioes, M_TEMP); 5232 goto fail; 5233 } 5234 break; 5235 } 5236 default: 5237 if ((error = pf_begin_rules(&ioe->ticket, 5238 ioe->rs_num, ioe->anchor))) { 5239 PF_RULES_WUNLOCK(); 5240 free(ioes, M_TEMP); 5241 goto fail; 5242 } 5243 break; 5244 } 5245 } 5246 PF_RULES_WUNLOCK(); 5247 error = copyout(ioes, io->array, totlen); 5248 free(ioes, M_TEMP); 5249 break; 5250 } 5251 5252 case DIOCXROLLBACK: { 5253 struct pfioc_trans *io = (struct pfioc_trans *)addr; 5254 struct pfioc_trans_e *ioe, *ioes; 5255 size_t totlen; 5256 int i; 5257 5258 if (io->esize != sizeof(*ioe)) { 5259 error = ENODEV; 5260 break; 5261 } 5262 if (io->size < 0 || 5263 io->size > pf_ioctl_maxcount || 5264 WOULD_OVERFLOW(io->size, sizeof(struct pfioc_trans_e))) { 5265 error = EINVAL; 5266 break; 5267 } 5268 totlen = sizeof(struct pfioc_trans_e) * io->size; 5269 ioes = mallocarray(io->size, sizeof(struct pfioc_trans_e), 5270 M_TEMP, M_WAITOK); 5271 error = copyin(io->array, ioes, totlen); 5272 if (error) { 5273 free(ioes, M_TEMP); 5274 break; 5275 } 5276 PF_RULES_WLOCK(); 5277 for (i = 0, ioe = ioes; i < io->size; i++, ioe++) { 5278 ioe->anchor[sizeof(ioe->anchor) - 1] = '\0'; 5279 switch (ioe->rs_num) { 5280 case PF_RULESET_ETH: 5281 if ((error = pf_rollback_eth(ioe->ticket, 5282 ioe->anchor))) { 5283 PF_RULES_WUNLOCK(); 5284 free(ioes, M_TEMP); 5285 goto fail; /* really bad */ 5286 } 5287 break; 5288 #ifdef ALTQ 5289 case PF_RULESET_ALTQ: 5290 if (ioe->anchor[0]) { 5291 PF_RULES_WUNLOCK(); 5292 free(ioes, M_TEMP); 5293 error = EINVAL; 5294 goto fail; 5295 } 5296 if ((error = pf_rollback_altq(ioe->ticket))) { 5297 PF_RULES_WUNLOCK(); 5298 free(ioes, M_TEMP); 5299 goto fail; /* really bad */ 5300 } 5301 break; 5302 #endif /* ALTQ */ 5303 case PF_RULESET_TABLE: 5304 { 5305 struct pfr_table table; 5306 5307 bzero(&table, sizeof(table)); 5308 strlcpy(table.pfrt_anchor, ioe->anchor, 5309 sizeof(table.pfrt_anchor)); 5310 if ((error = pfr_ina_rollback(&table, 5311 ioe->ticket, NULL, 0))) { 5312 PF_RULES_WUNLOCK(); 5313 free(ioes, M_TEMP); 5314 goto fail; /* really bad */ 5315 } 5316 break; 5317 } 5318 default: 5319 if ((error = pf_rollback_rules(ioe->ticket, 5320 ioe->rs_num, ioe->anchor))) { 5321 PF_RULES_WUNLOCK(); 5322 free(ioes, M_TEMP); 5323 goto fail; /* really bad */ 5324 } 5325 break; 5326 } 5327 } 5328 PF_RULES_WUNLOCK(); 5329 free(ioes, M_TEMP); 5330 break; 5331 } 5332 5333 case DIOCXCOMMIT: { 5334 struct pfioc_trans *io = (struct pfioc_trans *)addr; 5335 struct pfioc_trans_e *ioe, *ioes; 5336 struct pf_kruleset *rs; 5337 struct pf_keth_ruleset *ers; 5338 size_t totlen; 5339 int i; 5340 5341 if (io->esize != sizeof(*ioe)) { 5342 error = ENODEV; 5343 break; 5344 } 5345 5346 if (io->size < 0 || 5347 io->size > pf_ioctl_maxcount || 5348 WOULD_OVERFLOW(io->size, sizeof(struct pfioc_trans_e))) { 5349 error = EINVAL; 5350 break; 5351 } 5352 5353 totlen = sizeof(struct pfioc_trans_e) * io->size; 5354 ioes = mallocarray(io->size, sizeof(struct pfioc_trans_e), 5355 M_TEMP, M_WAITOK); 5356 error = copyin(io->array, ioes, totlen); 5357 if (error) { 5358 free(ioes, M_TEMP); 5359 break; 5360 } 5361 PF_RULES_WLOCK(); 5362 /* First makes sure everything will succeed. */ 5363 for (i = 0, ioe = ioes; i < io->size; i++, ioe++) { 5364 ioe->anchor[sizeof(ioe->anchor) - 1] = 0; 5365 switch (ioe->rs_num) { 5366 case PF_RULESET_ETH: 5367 ers = pf_find_keth_ruleset(ioe->anchor); 5368 if (ers == NULL || ioe->ticket == 0 || 5369 ioe->ticket != ers->inactive.ticket) { 5370 PF_RULES_WUNLOCK(); 5371 free(ioes, M_TEMP); 5372 error = EINVAL; 5373 goto fail; 5374 } 5375 break; 5376 #ifdef ALTQ 5377 case PF_RULESET_ALTQ: 5378 if (ioe->anchor[0]) { 5379 PF_RULES_WUNLOCK(); 5380 free(ioes, M_TEMP); 5381 error = EINVAL; 5382 goto fail; 5383 } 5384 if (!V_altqs_inactive_open || ioe->ticket != 5385 V_ticket_altqs_inactive) { 5386 PF_RULES_WUNLOCK(); 5387 free(ioes, M_TEMP); 5388 error = EBUSY; 5389 goto fail; 5390 } 5391 break; 5392 #endif /* ALTQ */ 5393 case PF_RULESET_TABLE: 5394 rs = pf_find_kruleset(ioe->anchor); 5395 if (rs == NULL || !rs->topen || ioe->ticket != 5396 rs->tticket) { 5397 PF_RULES_WUNLOCK(); 5398 free(ioes, M_TEMP); 5399 error = EBUSY; 5400 goto fail; 5401 } 5402 break; 5403 default: 5404 if (ioe->rs_num < 0 || ioe->rs_num >= 5405 PF_RULESET_MAX) { 5406 PF_RULES_WUNLOCK(); 5407 free(ioes, M_TEMP); 5408 error = EINVAL; 5409 goto fail; 5410 } 5411 rs = pf_find_kruleset(ioe->anchor); 5412 if (rs == NULL || 5413 !rs->rules[ioe->rs_num].inactive.open || 5414 rs->rules[ioe->rs_num].inactive.ticket != 5415 ioe->ticket) { 5416 PF_RULES_WUNLOCK(); 5417 free(ioes, M_TEMP); 5418 error = EBUSY; 5419 goto fail; 5420 } 5421 break; 5422 } 5423 } 5424 /* Now do the commit - no errors should happen here. */ 5425 for (i = 0, ioe = ioes; i < io->size; i++, ioe++) { 5426 switch (ioe->rs_num) { 5427 case PF_RULESET_ETH: 5428 if ((error = pf_commit_eth(ioe->ticket, ioe->anchor))) { 5429 PF_RULES_WUNLOCK(); 5430 free(ioes, M_TEMP); 5431 goto fail; /* really bad */ 5432 } 5433 break; 5434 #ifdef ALTQ 5435 case PF_RULESET_ALTQ: 5436 if ((error = pf_commit_altq(ioe->ticket))) { 5437 PF_RULES_WUNLOCK(); 5438 free(ioes, M_TEMP); 5439 goto fail; /* really bad */ 5440 } 5441 break; 5442 #endif /* ALTQ */ 5443 case PF_RULESET_TABLE: 5444 { 5445 struct pfr_table table; 5446 5447 bzero(&table, sizeof(table)); 5448 (void)strlcpy(table.pfrt_anchor, ioe->anchor, 5449 sizeof(table.pfrt_anchor)); 5450 if ((error = pfr_ina_commit(&table, 5451 ioe->ticket, NULL, NULL, 0))) { 5452 PF_RULES_WUNLOCK(); 5453 free(ioes, M_TEMP); 5454 goto fail; /* really bad */ 5455 } 5456 break; 5457 } 5458 default: 5459 if ((error = pf_commit_rules(ioe->ticket, 5460 ioe->rs_num, ioe->anchor))) { 5461 PF_RULES_WUNLOCK(); 5462 free(ioes, M_TEMP); 5463 goto fail; /* really bad */ 5464 } 5465 break; 5466 } 5467 } 5468 PF_RULES_WUNLOCK(); 5469 5470 /* Only hook into EtherNet taffic if we've got rules for it. */ 5471 if (! TAILQ_EMPTY(V_pf_keth->active.rules)) 5472 hook_pf_eth(); 5473 else 5474 dehook_pf_eth(); 5475 5476 free(ioes, M_TEMP); 5477 break; 5478 } 5479 5480 case DIOCGETSRCNODES: { 5481 struct pfioc_src_nodes *psn = (struct pfioc_src_nodes *)addr; 5482 struct pf_srchash *sh; 5483 struct pf_ksrc_node *n; 5484 struct pf_src_node *p, *pstore; 5485 uint32_t i, nr = 0; 5486 5487 for (i = 0, sh = V_pf_srchash; i <= pf_srchashmask; 5488 i++, sh++) { 5489 PF_HASHROW_LOCK(sh); 5490 LIST_FOREACH(n, &sh->nodes, entry) 5491 nr++; 5492 PF_HASHROW_UNLOCK(sh); 5493 } 5494 5495 psn->psn_len = min(psn->psn_len, 5496 sizeof(struct pf_src_node) * nr); 5497 5498 if (psn->psn_len == 0) { 5499 psn->psn_len = sizeof(struct pf_src_node) * nr; 5500 break; 5501 } 5502 5503 nr = 0; 5504 5505 p = pstore = malloc(psn->psn_len, M_TEMP, M_WAITOK | M_ZERO); 5506 for (i = 0, sh = V_pf_srchash; i <= pf_srchashmask; 5507 i++, sh++) { 5508 PF_HASHROW_LOCK(sh); 5509 LIST_FOREACH(n, &sh->nodes, entry) { 5510 5511 if ((nr + 1) * sizeof(*p) > (unsigned)psn->psn_len) 5512 break; 5513 5514 pf_src_node_copy(n, p); 5515 5516 p++; 5517 nr++; 5518 } 5519 PF_HASHROW_UNLOCK(sh); 5520 } 5521 error = copyout(pstore, psn->psn_src_nodes, 5522 sizeof(struct pf_src_node) * nr); 5523 if (error) { 5524 free(pstore, M_TEMP); 5525 break; 5526 } 5527 psn->psn_len = sizeof(struct pf_src_node) * nr; 5528 free(pstore, M_TEMP); 5529 break; 5530 } 5531 5532 case DIOCCLRSRCNODES: { 5533 pf_clear_srcnodes(NULL); 5534 pf_purge_expired_src_nodes(); 5535 break; 5536 } 5537 5538 case DIOCKILLSRCNODES: 5539 pf_kill_srcnodes((struct pfioc_src_node_kill *)addr); 5540 break; 5541 5542 #ifdef COMPAT_FREEBSD13 5543 case DIOCKEEPCOUNTERS_FREEBSD13: 5544 #endif 5545 case DIOCKEEPCOUNTERS: 5546 error = pf_keepcounters((struct pfioc_nv *)addr); 5547 break; 5548 5549 case DIOCGETSYNCOOKIES: 5550 error = pf_get_syncookies((struct pfioc_nv *)addr); 5551 break; 5552 5553 case DIOCSETSYNCOOKIES: 5554 error = pf_set_syncookies((struct pfioc_nv *)addr); 5555 break; 5556 5557 case DIOCSETHOSTID: { 5558 u_int32_t *hostid = (u_int32_t *)addr; 5559 5560 PF_RULES_WLOCK(); 5561 if (*hostid == 0) 5562 V_pf_status.hostid = arc4random(); 5563 else 5564 V_pf_status.hostid = *hostid; 5565 PF_RULES_WUNLOCK(); 5566 break; 5567 } 5568 5569 case DIOCOSFPFLUSH: 5570 PF_RULES_WLOCK(); 5571 pf_osfp_flush(); 5572 PF_RULES_WUNLOCK(); 5573 break; 5574 5575 case DIOCIGETIFACES: { 5576 struct pfioc_iface *io = (struct pfioc_iface *)addr; 5577 struct pfi_kif *ifstore; 5578 size_t bufsiz; 5579 5580 if (io->pfiio_esize != sizeof(struct pfi_kif)) { 5581 error = ENODEV; 5582 break; 5583 } 5584 5585 if (io->pfiio_size < 0 || 5586 io->pfiio_size > pf_ioctl_maxcount || 5587 WOULD_OVERFLOW(io->pfiio_size, sizeof(struct pfi_kif))) { 5588 error = EINVAL; 5589 break; 5590 } 5591 5592 io->pfiio_name[sizeof(io->pfiio_name) - 1] = '\0'; 5593 5594 bufsiz = io->pfiio_size * sizeof(struct pfi_kif); 5595 ifstore = mallocarray(io->pfiio_size, sizeof(struct pfi_kif), 5596 M_TEMP, M_WAITOK | M_ZERO); 5597 5598 PF_RULES_RLOCK(); 5599 pfi_get_ifaces(io->pfiio_name, ifstore, &io->pfiio_size); 5600 PF_RULES_RUNLOCK(); 5601 error = copyout(ifstore, io->pfiio_buffer, bufsiz); 5602 free(ifstore, M_TEMP); 5603 break; 5604 } 5605 5606 case DIOCSETIFFLAG: { 5607 struct pfioc_iface *io = (struct pfioc_iface *)addr; 5608 5609 io->pfiio_name[sizeof(io->pfiio_name) - 1] = '\0'; 5610 5611 PF_RULES_WLOCK(); 5612 error = pfi_set_flags(io->pfiio_name, io->pfiio_flags); 5613 PF_RULES_WUNLOCK(); 5614 break; 5615 } 5616 5617 case DIOCCLRIFFLAG: { 5618 struct pfioc_iface *io = (struct pfioc_iface *)addr; 5619 5620 io->pfiio_name[sizeof(io->pfiio_name) - 1] = '\0'; 5621 5622 PF_RULES_WLOCK(); 5623 error = pfi_clear_flags(io->pfiio_name, io->pfiio_flags); 5624 PF_RULES_WUNLOCK(); 5625 break; 5626 } 5627 5628 default: 5629 error = ENODEV; 5630 break; 5631 } 5632 fail: 5633 if (sx_xlocked(&pf_ioctl_lock)) 5634 sx_xunlock(&pf_ioctl_lock); 5635 CURVNET_RESTORE(); 5636 5637 #undef ERROUT_IOCTL 5638 5639 return (error); 5640 } 5641 5642 void 5643 pfsync_state_export(struct pfsync_state *sp, struct pf_kstate *st) 5644 { 5645 bzero(sp, sizeof(struct pfsync_state)); 5646 5647 /* copy from state key */ 5648 sp->key[PF_SK_WIRE].addr[0] = st->key[PF_SK_WIRE]->addr[0]; 5649 sp->key[PF_SK_WIRE].addr[1] = st->key[PF_SK_WIRE]->addr[1]; 5650 sp->key[PF_SK_WIRE].port[0] = st->key[PF_SK_WIRE]->port[0]; 5651 sp->key[PF_SK_WIRE].port[1] = st->key[PF_SK_WIRE]->port[1]; 5652 sp->key[PF_SK_STACK].addr[0] = st->key[PF_SK_STACK]->addr[0]; 5653 sp->key[PF_SK_STACK].addr[1] = st->key[PF_SK_STACK]->addr[1]; 5654 sp->key[PF_SK_STACK].port[0] = st->key[PF_SK_STACK]->port[0]; 5655 sp->key[PF_SK_STACK].port[1] = st->key[PF_SK_STACK]->port[1]; 5656 sp->proto = st->key[PF_SK_WIRE]->proto; 5657 sp->af = st->key[PF_SK_WIRE]->af; 5658 5659 /* copy from state */ 5660 strlcpy(sp->ifname, st->kif->pfik_name, sizeof(sp->ifname)); 5661 bcopy(&st->rt_addr, &sp->rt_addr, sizeof(sp->rt_addr)); 5662 sp->creation = htonl(time_uptime - st->creation); 5663 sp->expire = pf_state_expires(st); 5664 if (sp->expire <= time_uptime) 5665 sp->expire = htonl(0); 5666 else 5667 sp->expire = htonl(sp->expire - time_uptime); 5668 5669 sp->direction = st->direction; 5670 sp->log = st->log; 5671 sp->timeout = st->timeout; 5672 sp->state_flags = st->state_flags; 5673 if (st->src_node) 5674 sp->sync_flags |= PFSYNC_FLAG_SRCNODE; 5675 if (st->nat_src_node) 5676 sp->sync_flags |= PFSYNC_FLAG_NATSRCNODE; 5677 5678 sp->id = st->id; 5679 sp->creatorid = st->creatorid; 5680 pf_state_peer_hton(&st->src, &sp->src); 5681 pf_state_peer_hton(&st->dst, &sp->dst); 5682 5683 if (st->rule.ptr == NULL) 5684 sp->rule = htonl(-1); 5685 else 5686 sp->rule = htonl(st->rule.ptr->nr); 5687 if (st->anchor.ptr == NULL) 5688 sp->anchor = htonl(-1); 5689 else 5690 sp->anchor = htonl(st->anchor.ptr->nr); 5691 if (st->nat_rule.ptr == NULL) 5692 sp->nat_rule = htonl(-1); 5693 else 5694 sp->nat_rule = htonl(st->nat_rule.ptr->nr); 5695 5696 pf_state_counter_hton(st->packets[0], sp->packets[0]); 5697 pf_state_counter_hton(st->packets[1], sp->packets[1]); 5698 pf_state_counter_hton(st->bytes[0], sp->bytes[0]); 5699 pf_state_counter_hton(st->bytes[1], sp->bytes[1]); 5700 } 5701 5702 void 5703 pf_state_export(struct pf_state_export *sp, struct pf_kstate *st) 5704 { 5705 bzero(sp, sizeof(*sp)); 5706 5707 sp->version = PF_STATE_VERSION; 5708 5709 /* copy from state key */ 5710 sp->key[PF_SK_WIRE].addr[0] = st->key[PF_SK_WIRE]->addr[0]; 5711 sp->key[PF_SK_WIRE].addr[1] = st->key[PF_SK_WIRE]->addr[1]; 5712 sp->key[PF_SK_WIRE].port[0] = st->key[PF_SK_WIRE]->port[0]; 5713 sp->key[PF_SK_WIRE].port[1] = st->key[PF_SK_WIRE]->port[1]; 5714 sp->key[PF_SK_STACK].addr[0] = st->key[PF_SK_STACK]->addr[0]; 5715 sp->key[PF_SK_STACK].addr[1] = st->key[PF_SK_STACK]->addr[1]; 5716 sp->key[PF_SK_STACK].port[0] = st->key[PF_SK_STACK]->port[0]; 5717 sp->key[PF_SK_STACK].port[1] = st->key[PF_SK_STACK]->port[1]; 5718 sp->proto = st->key[PF_SK_WIRE]->proto; 5719 sp->af = st->key[PF_SK_WIRE]->af; 5720 5721 /* copy from state */ 5722 strlcpy(sp->ifname, st->kif->pfik_name, sizeof(sp->ifname)); 5723 strlcpy(sp->orig_ifname, st->orig_kif->pfik_name, 5724 sizeof(sp->orig_ifname)); 5725 bcopy(&st->rt_addr, &sp->rt_addr, sizeof(sp->rt_addr)); 5726 sp->creation = htonl(time_uptime - st->creation); 5727 sp->expire = pf_state_expires(st); 5728 if (sp->expire <= time_uptime) 5729 sp->expire = htonl(0); 5730 else 5731 sp->expire = htonl(sp->expire - time_uptime); 5732 5733 sp->direction = st->direction; 5734 sp->log = st->log; 5735 sp->timeout = st->timeout; 5736 sp->state_flags = st->state_flags; 5737 if (st->src_node) 5738 sp->sync_flags |= PFSYNC_FLAG_SRCNODE; 5739 if (st->nat_src_node) 5740 sp->sync_flags |= PFSYNC_FLAG_NATSRCNODE; 5741 5742 sp->id = st->id; 5743 sp->creatorid = st->creatorid; 5744 pf_state_peer_hton(&st->src, &sp->src); 5745 pf_state_peer_hton(&st->dst, &sp->dst); 5746 5747 if (st->rule.ptr == NULL) 5748 sp->rule = htonl(-1); 5749 else 5750 sp->rule = htonl(st->rule.ptr->nr); 5751 if (st->anchor.ptr == NULL) 5752 sp->anchor = htonl(-1); 5753 else 5754 sp->anchor = htonl(st->anchor.ptr->nr); 5755 if (st->nat_rule.ptr == NULL) 5756 sp->nat_rule = htonl(-1); 5757 else 5758 sp->nat_rule = htonl(st->nat_rule.ptr->nr); 5759 5760 sp->packets[0] = st->packets[0]; 5761 sp->packets[1] = st->packets[1]; 5762 sp->bytes[0] = st->bytes[0]; 5763 sp->bytes[1] = st->bytes[1]; 5764 } 5765 5766 static void 5767 pf_tbladdr_copyout(struct pf_addr_wrap *aw) 5768 { 5769 struct pfr_ktable *kt; 5770 5771 KASSERT(aw->type == PF_ADDR_TABLE, ("%s: type %u", __func__, aw->type)); 5772 5773 kt = aw->p.tbl; 5774 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL) 5775 kt = kt->pfrkt_root; 5776 aw->p.tbl = NULL; 5777 aw->p.tblcnt = (kt->pfrkt_flags & PFR_TFLAG_ACTIVE) ? 5778 kt->pfrkt_cnt : -1; 5779 } 5780 5781 static int 5782 pf_add_status_counters(nvlist_t *nvl, const char *name, counter_u64_t *counters, 5783 size_t number, char **names) 5784 { 5785 nvlist_t *nvc; 5786 5787 nvc = nvlist_create(0); 5788 if (nvc == NULL) 5789 return (ENOMEM); 5790 5791 for (int i = 0; i < number; i++) { 5792 nvlist_append_number_array(nvc, "counters", 5793 counter_u64_fetch(counters[i])); 5794 nvlist_append_string_array(nvc, "names", 5795 names[i]); 5796 nvlist_append_number_array(nvc, "ids", 5797 i); 5798 } 5799 nvlist_add_nvlist(nvl, name, nvc); 5800 nvlist_destroy(nvc); 5801 5802 return (0); 5803 } 5804 5805 static int 5806 pf_getstatus(struct pfioc_nv *nv) 5807 { 5808 nvlist_t *nvl = NULL, *nvc = NULL; 5809 void *nvlpacked = NULL; 5810 int error; 5811 struct pf_status s; 5812 char *pf_reasons[PFRES_MAX+1] = PFRES_NAMES; 5813 char *pf_lcounter[KLCNT_MAX+1] = KLCNT_NAMES; 5814 char *pf_fcounter[FCNT_MAX+1] = FCNT_NAMES; 5815 PF_RULES_RLOCK_TRACKER; 5816 5817 #define ERROUT(x) ERROUT_FUNCTION(errout, x) 5818 5819 PF_RULES_RLOCK(); 5820 5821 nvl = nvlist_create(0); 5822 if (nvl == NULL) 5823 ERROUT(ENOMEM); 5824 5825 nvlist_add_bool(nvl, "running", V_pf_status.running); 5826 nvlist_add_number(nvl, "since", V_pf_status.since); 5827 nvlist_add_number(nvl, "debug", V_pf_status.debug); 5828 nvlist_add_number(nvl, "hostid", V_pf_status.hostid); 5829 nvlist_add_number(nvl, "states", V_pf_status.states); 5830 nvlist_add_number(nvl, "src_nodes", V_pf_status.src_nodes); 5831 nvlist_add_bool(nvl, "syncookies_active", 5832 V_pf_status.syncookies_active); 5833 5834 /* counters */ 5835 error = pf_add_status_counters(nvl, "counters", V_pf_status.counters, 5836 PFRES_MAX, pf_reasons); 5837 if (error != 0) 5838 ERROUT(error); 5839 5840 /* lcounters */ 5841 error = pf_add_status_counters(nvl, "lcounters", V_pf_status.lcounters, 5842 KLCNT_MAX, pf_lcounter); 5843 if (error != 0) 5844 ERROUT(error); 5845 5846 /* fcounters */ 5847 nvc = nvlist_create(0); 5848 if (nvc == NULL) 5849 ERROUT(ENOMEM); 5850 5851 for (int i = 0; i < FCNT_MAX; i++) { 5852 nvlist_append_number_array(nvc, "counters", 5853 pf_counter_u64_fetch(&V_pf_status.fcounters[i])); 5854 nvlist_append_string_array(nvc, "names", 5855 pf_fcounter[i]); 5856 nvlist_append_number_array(nvc, "ids", 5857 i); 5858 } 5859 nvlist_add_nvlist(nvl, "fcounters", nvc); 5860 nvlist_destroy(nvc); 5861 nvc = NULL; 5862 5863 /* scounters */ 5864 error = pf_add_status_counters(nvl, "scounters", V_pf_status.scounters, 5865 SCNT_MAX, pf_fcounter); 5866 if (error != 0) 5867 ERROUT(error); 5868 5869 nvlist_add_string(nvl, "ifname", V_pf_status.ifname); 5870 nvlist_add_binary(nvl, "chksum", V_pf_status.pf_chksum, 5871 PF_MD5_DIGEST_LENGTH); 5872 5873 pfi_update_status(V_pf_status.ifname, &s); 5874 5875 /* pcounters / bcounters */ 5876 for (int i = 0; i < 2; i++) { 5877 for (int j = 0; j < 2; j++) { 5878 for (int k = 0; k < 2; k++) { 5879 nvlist_append_number_array(nvl, "pcounters", 5880 s.pcounters[i][j][k]); 5881 } 5882 nvlist_append_number_array(nvl, "bcounters", 5883 s.bcounters[i][j]); 5884 } 5885 } 5886 5887 nvlpacked = nvlist_pack(nvl, &nv->len); 5888 if (nvlpacked == NULL) 5889 ERROUT(ENOMEM); 5890 5891 if (nv->size == 0) 5892 ERROUT(0); 5893 else if (nv->size < nv->len) 5894 ERROUT(ENOSPC); 5895 5896 PF_RULES_RUNLOCK(); 5897 error = copyout(nvlpacked, nv->data, nv->len); 5898 goto done; 5899 5900 #undef ERROUT 5901 errout: 5902 PF_RULES_RUNLOCK(); 5903 done: 5904 free(nvlpacked, M_NVLIST); 5905 nvlist_destroy(nvc); 5906 nvlist_destroy(nvl); 5907 5908 return (error); 5909 } 5910 5911 /* 5912 * XXX - Check for version mismatch!!! 5913 */ 5914 static void 5915 pf_clear_all_states(void) 5916 { 5917 struct pf_kstate *s; 5918 u_int i; 5919 5920 for (i = 0; i <= pf_hashmask; i++) { 5921 struct pf_idhash *ih = &V_pf_idhash[i]; 5922 relock: 5923 PF_HASHROW_LOCK(ih); 5924 LIST_FOREACH(s, &ih->states, entry) { 5925 s->timeout = PFTM_PURGE; 5926 /* Don't send out individual delete messages. */ 5927 s->state_flags |= PFSTATE_NOSYNC; 5928 pf_unlink_state(s); 5929 goto relock; 5930 } 5931 PF_HASHROW_UNLOCK(ih); 5932 } 5933 } 5934 5935 static int 5936 pf_clear_tables(void) 5937 { 5938 struct pfioc_table io; 5939 int error; 5940 5941 bzero(&io, sizeof(io)); 5942 5943 error = pfr_clr_tables(&io.pfrio_table, &io.pfrio_ndel, 5944 io.pfrio_flags); 5945 5946 return (error); 5947 } 5948 5949 static void 5950 pf_clear_srcnodes(struct pf_ksrc_node *n) 5951 { 5952 struct pf_kstate *s; 5953 int i; 5954 5955 for (i = 0; i <= pf_hashmask; i++) { 5956 struct pf_idhash *ih = &V_pf_idhash[i]; 5957 5958 PF_HASHROW_LOCK(ih); 5959 LIST_FOREACH(s, &ih->states, entry) { 5960 if (n == NULL || n == s->src_node) 5961 s->src_node = NULL; 5962 if (n == NULL || n == s->nat_src_node) 5963 s->nat_src_node = NULL; 5964 } 5965 PF_HASHROW_UNLOCK(ih); 5966 } 5967 5968 if (n == NULL) { 5969 struct pf_srchash *sh; 5970 5971 for (i = 0, sh = V_pf_srchash; i <= pf_srchashmask; 5972 i++, sh++) { 5973 PF_HASHROW_LOCK(sh); 5974 LIST_FOREACH(n, &sh->nodes, entry) { 5975 n->expire = 1; 5976 n->states = 0; 5977 } 5978 PF_HASHROW_UNLOCK(sh); 5979 } 5980 } else { 5981 /* XXX: hash slot should already be locked here. */ 5982 n->expire = 1; 5983 n->states = 0; 5984 } 5985 } 5986 5987 static void 5988 pf_kill_srcnodes(struct pfioc_src_node_kill *psnk) 5989 { 5990 struct pf_ksrc_node_list kill; 5991 5992 LIST_INIT(&kill); 5993 for (int i = 0; i <= pf_srchashmask; i++) { 5994 struct pf_srchash *sh = &V_pf_srchash[i]; 5995 struct pf_ksrc_node *sn, *tmp; 5996 5997 PF_HASHROW_LOCK(sh); 5998 LIST_FOREACH_SAFE(sn, &sh->nodes, entry, tmp) 5999 if (PF_MATCHA(psnk->psnk_src.neg, 6000 &psnk->psnk_src.addr.v.a.addr, 6001 &psnk->psnk_src.addr.v.a.mask, 6002 &sn->addr, sn->af) && 6003 PF_MATCHA(psnk->psnk_dst.neg, 6004 &psnk->psnk_dst.addr.v.a.addr, 6005 &psnk->psnk_dst.addr.v.a.mask, 6006 &sn->raddr, sn->af)) { 6007 pf_unlink_src_node(sn); 6008 LIST_INSERT_HEAD(&kill, sn, entry); 6009 sn->expire = 1; 6010 } 6011 PF_HASHROW_UNLOCK(sh); 6012 } 6013 6014 for (int i = 0; i <= pf_hashmask; i++) { 6015 struct pf_idhash *ih = &V_pf_idhash[i]; 6016 struct pf_kstate *s; 6017 6018 PF_HASHROW_LOCK(ih); 6019 LIST_FOREACH(s, &ih->states, entry) { 6020 if (s->src_node && s->src_node->expire == 1) 6021 s->src_node = NULL; 6022 if (s->nat_src_node && s->nat_src_node->expire == 1) 6023 s->nat_src_node = NULL; 6024 } 6025 PF_HASHROW_UNLOCK(ih); 6026 } 6027 6028 psnk->psnk_killed = pf_free_src_nodes(&kill); 6029 } 6030 6031 static int 6032 pf_keepcounters(struct pfioc_nv *nv) 6033 { 6034 nvlist_t *nvl = NULL; 6035 void *nvlpacked = NULL; 6036 int error = 0; 6037 6038 #define ERROUT(x) ERROUT_FUNCTION(on_error, x) 6039 6040 if (nv->len > pf_ioctl_maxcount) 6041 ERROUT(ENOMEM); 6042 6043 nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK); 6044 if (nvlpacked == NULL) 6045 ERROUT(ENOMEM); 6046 6047 error = copyin(nv->data, nvlpacked, nv->len); 6048 if (error) 6049 ERROUT(error); 6050 6051 nvl = nvlist_unpack(nvlpacked, nv->len, 0); 6052 if (nvl == NULL) 6053 ERROUT(EBADMSG); 6054 6055 if (! nvlist_exists_bool(nvl, "keep_counters")) 6056 ERROUT(EBADMSG); 6057 6058 V_pf_status.keep_counters = nvlist_get_bool(nvl, "keep_counters"); 6059 6060 on_error: 6061 nvlist_destroy(nvl); 6062 free(nvlpacked, M_NVLIST); 6063 return (error); 6064 } 6065 6066 static unsigned int 6067 pf_clear_states(const struct pf_kstate_kill *kill) 6068 { 6069 struct pf_state_key_cmp match_key; 6070 struct pf_kstate *s; 6071 struct pfi_kkif *kif; 6072 int idx; 6073 unsigned int killed = 0, dir; 6074 6075 for (unsigned int i = 0; i <= pf_hashmask; i++) { 6076 struct pf_idhash *ih = &V_pf_idhash[i]; 6077 6078 relock_DIOCCLRSTATES: 6079 PF_HASHROW_LOCK(ih); 6080 LIST_FOREACH(s, &ih->states, entry) { 6081 /* For floating states look at the original kif. */ 6082 kif = s->kif == V_pfi_all ? s->orig_kif : s->kif; 6083 6084 if (kill->psk_ifname[0] && 6085 strcmp(kill->psk_ifname, 6086 kif->pfik_name)) 6087 continue; 6088 6089 if (kill->psk_kill_match) { 6090 bzero(&match_key, sizeof(match_key)); 6091 6092 if (s->direction == PF_OUT) { 6093 dir = PF_IN; 6094 idx = PF_SK_STACK; 6095 } else { 6096 dir = PF_OUT; 6097 idx = PF_SK_WIRE; 6098 } 6099 6100 match_key.af = s->key[idx]->af; 6101 match_key.proto = s->key[idx]->proto; 6102 PF_ACPY(&match_key.addr[0], 6103 &s->key[idx]->addr[1], match_key.af); 6104 match_key.port[0] = s->key[idx]->port[1]; 6105 PF_ACPY(&match_key.addr[1], 6106 &s->key[idx]->addr[0], match_key.af); 6107 match_key.port[1] = s->key[idx]->port[0]; 6108 } 6109 6110 /* 6111 * Don't send out individual 6112 * delete messages. 6113 */ 6114 s->state_flags |= PFSTATE_NOSYNC; 6115 pf_unlink_state(s); 6116 killed++; 6117 6118 if (kill->psk_kill_match) 6119 killed += pf_kill_matching_state(&match_key, 6120 dir); 6121 6122 goto relock_DIOCCLRSTATES; 6123 } 6124 PF_HASHROW_UNLOCK(ih); 6125 } 6126 6127 if (V_pfsync_clear_states_ptr != NULL) 6128 V_pfsync_clear_states_ptr(V_pf_status.hostid, kill->psk_ifname); 6129 6130 return (killed); 6131 } 6132 6133 static void 6134 pf_killstates(struct pf_kstate_kill *kill, unsigned int *killed) 6135 { 6136 struct pf_kstate *s; 6137 6138 if (kill->psk_pfcmp.id) { 6139 if (kill->psk_pfcmp.creatorid == 0) 6140 kill->psk_pfcmp.creatorid = V_pf_status.hostid; 6141 if ((s = pf_find_state_byid(kill->psk_pfcmp.id, 6142 kill->psk_pfcmp.creatorid))) { 6143 pf_unlink_state(s); 6144 *killed = 1; 6145 } 6146 return; 6147 } 6148 6149 for (unsigned int i = 0; i <= pf_hashmask; i++) 6150 *killed += pf_killstates_row(kill, &V_pf_idhash[i]); 6151 6152 return; 6153 } 6154 6155 static int 6156 pf_killstates_nv(struct pfioc_nv *nv) 6157 { 6158 struct pf_kstate_kill kill; 6159 nvlist_t *nvl = NULL; 6160 void *nvlpacked = NULL; 6161 int error = 0; 6162 unsigned int killed = 0; 6163 6164 #define ERROUT(x) ERROUT_FUNCTION(on_error, x) 6165 6166 if (nv->len > pf_ioctl_maxcount) 6167 ERROUT(ENOMEM); 6168 6169 nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK); 6170 if (nvlpacked == NULL) 6171 ERROUT(ENOMEM); 6172 6173 error = copyin(nv->data, nvlpacked, nv->len); 6174 if (error) 6175 ERROUT(error); 6176 6177 nvl = nvlist_unpack(nvlpacked, nv->len, 0); 6178 if (nvl == NULL) 6179 ERROUT(EBADMSG); 6180 6181 error = pf_nvstate_kill_to_kstate_kill(nvl, &kill); 6182 if (error) 6183 ERROUT(error); 6184 6185 pf_killstates(&kill, &killed); 6186 6187 free(nvlpacked, M_NVLIST); 6188 nvlpacked = NULL; 6189 nvlist_destroy(nvl); 6190 nvl = nvlist_create(0); 6191 if (nvl == NULL) 6192 ERROUT(ENOMEM); 6193 6194 nvlist_add_number(nvl, "killed", killed); 6195 6196 nvlpacked = nvlist_pack(nvl, &nv->len); 6197 if (nvlpacked == NULL) 6198 ERROUT(ENOMEM); 6199 6200 if (nv->size == 0) 6201 ERROUT(0); 6202 else if (nv->size < nv->len) 6203 ERROUT(ENOSPC); 6204 6205 error = copyout(nvlpacked, nv->data, nv->len); 6206 6207 on_error: 6208 nvlist_destroy(nvl); 6209 free(nvlpacked, M_NVLIST); 6210 return (error); 6211 } 6212 6213 static int 6214 pf_clearstates_nv(struct pfioc_nv *nv) 6215 { 6216 struct pf_kstate_kill kill; 6217 nvlist_t *nvl = NULL; 6218 void *nvlpacked = NULL; 6219 int error = 0; 6220 unsigned int killed; 6221 6222 #define ERROUT(x) ERROUT_FUNCTION(on_error, x) 6223 6224 if (nv->len > pf_ioctl_maxcount) 6225 ERROUT(ENOMEM); 6226 6227 nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK); 6228 if (nvlpacked == NULL) 6229 ERROUT(ENOMEM); 6230 6231 error = copyin(nv->data, nvlpacked, nv->len); 6232 if (error) 6233 ERROUT(error); 6234 6235 nvl = nvlist_unpack(nvlpacked, nv->len, 0); 6236 if (nvl == NULL) 6237 ERROUT(EBADMSG); 6238 6239 error = pf_nvstate_kill_to_kstate_kill(nvl, &kill); 6240 if (error) 6241 ERROUT(error); 6242 6243 killed = pf_clear_states(&kill); 6244 6245 free(nvlpacked, M_NVLIST); 6246 nvlpacked = NULL; 6247 nvlist_destroy(nvl); 6248 nvl = nvlist_create(0); 6249 if (nvl == NULL) 6250 ERROUT(ENOMEM); 6251 6252 nvlist_add_number(nvl, "killed", killed); 6253 6254 nvlpacked = nvlist_pack(nvl, &nv->len); 6255 if (nvlpacked == NULL) 6256 ERROUT(ENOMEM); 6257 6258 if (nv->size == 0) 6259 ERROUT(0); 6260 else if (nv->size < nv->len) 6261 ERROUT(ENOSPC); 6262 6263 error = copyout(nvlpacked, nv->data, nv->len); 6264 6265 #undef ERROUT 6266 on_error: 6267 nvlist_destroy(nvl); 6268 free(nvlpacked, M_NVLIST); 6269 return (error); 6270 } 6271 6272 static int 6273 pf_getstate(struct pfioc_nv *nv) 6274 { 6275 nvlist_t *nvl = NULL, *nvls; 6276 void *nvlpacked = NULL; 6277 struct pf_kstate *s = NULL; 6278 int error = 0; 6279 uint64_t id, creatorid; 6280 6281 #define ERROUT(x) ERROUT_FUNCTION(errout, x) 6282 6283 if (nv->len > pf_ioctl_maxcount) 6284 ERROUT(ENOMEM); 6285 6286 nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK); 6287 if (nvlpacked == NULL) 6288 ERROUT(ENOMEM); 6289 6290 error = copyin(nv->data, nvlpacked, nv->len); 6291 if (error) 6292 ERROUT(error); 6293 6294 nvl = nvlist_unpack(nvlpacked, nv->len, 0); 6295 if (nvl == NULL) 6296 ERROUT(EBADMSG); 6297 6298 PFNV_CHK(pf_nvuint64(nvl, "id", &id)); 6299 PFNV_CHK(pf_nvuint64(nvl, "creatorid", &creatorid)); 6300 6301 s = pf_find_state_byid(id, creatorid); 6302 if (s == NULL) 6303 ERROUT(ENOENT); 6304 6305 free(nvlpacked, M_NVLIST); 6306 nvlpacked = NULL; 6307 nvlist_destroy(nvl); 6308 nvl = nvlist_create(0); 6309 if (nvl == NULL) 6310 ERROUT(ENOMEM); 6311 6312 nvls = pf_state_to_nvstate(s); 6313 if (nvls == NULL) 6314 ERROUT(ENOMEM); 6315 6316 nvlist_add_nvlist(nvl, "state", nvls); 6317 nvlist_destroy(nvls); 6318 6319 nvlpacked = nvlist_pack(nvl, &nv->len); 6320 if (nvlpacked == NULL) 6321 ERROUT(ENOMEM); 6322 6323 if (nv->size == 0) 6324 ERROUT(0); 6325 else if (nv->size < nv->len) 6326 ERROUT(ENOSPC); 6327 6328 error = copyout(nvlpacked, nv->data, nv->len); 6329 6330 #undef ERROUT 6331 errout: 6332 if (s != NULL) 6333 PF_STATE_UNLOCK(s); 6334 free(nvlpacked, M_NVLIST); 6335 nvlist_destroy(nvl); 6336 return (error); 6337 } 6338 6339 /* 6340 * XXX - Check for version mismatch!!! 6341 */ 6342 6343 /* 6344 * Duplicate pfctl -Fa operation to get rid of as much as we can. 6345 */ 6346 static int 6347 shutdown_pf(void) 6348 { 6349 int error = 0; 6350 u_int32_t t[5]; 6351 char nn = '\0'; 6352 6353 do { 6354 if ((error = pf_begin_rules(&t[0], PF_RULESET_SCRUB, &nn)) 6355 != 0) { 6356 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: SCRUB\n")); 6357 break; 6358 } 6359 if ((error = pf_begin_rules(&t[1], PF_RULESET_FILTER, &nn)) 6360 != 0) { 6361 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: FILTER\n")); 6362 break; /* XXX: rollback? */ 6363 } 6364 if ((error = pf_begin_rules(&t[2], PF_RULESET_NAT, &nn)) 6365 != 0) { 6366 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: NAT\n")); 6367 break; /* XXX: rollback? */ 6368 } 6369 if ((error = pf_begin_rules(&t[3], PF_RULESET_BINAT, &nn)) 6370 != 0) { 6371 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: BINAT\n")); 6372 break; /* XXX: rollback? */ 6373 } 6374 if ((error = pf_begin_rules(&t[4], PF_RULESET_RDR, &nn)) 6375 != 0) { 6376 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: RDR\n")); 6377 break; /* XXX: rollback? */ 6378 } 6379 6380 /* XXX: these should always succeed here */ 6381 pf_commit_rules(t[0], PF_RULESET_SCRUB, &nn); 6382 pf_commit_rules(t[1], PF_RULESET_FILTER, &nn); 6383 pf_commit_rules(t[2], PF_RULESET_NAT, &nn); 6384 pf_commit_rules(t[3], PF_RULESET_BINAT, &nn); 6385 pf_commit_rules(t[4], PF_RULESET_RDR, &nn); 6386 6387 if ((error = pf_clear_tables()) != 0) 6388 break; 6389 6390 if ((error = pf_begin_eth(&t[0], &nn)) != 0) { 6391 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: eth\n")); 6392 break; 6393 } 6394 pf_commit_eth(t[0], &nn); 6395 6396 #ifdef ALTQ 6397 if ((error = pf_begin_altq(&t[0])) != 0) { 6398 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: ALTQ\n")); 6399 break; 6400 } 6401 pf_commit_altq(t[0]); 6402 #endif 6403 6404 pf_clear_all_states(); 6405 6406 pf_clear_srcnodes(NULL); 6407 6408 /* status does not use malloced mem so no need to cleanup */ 6409 /* fingerprints and interfaces have their own cleanup code */ 6410 } while(0); 6411 6412 return (error); 6413 } 6414 6415 static pfil_return_t 6416 pf_check_return(int chk, struct mbuf **m) 6417 { 6418 6419 switch (chk) { 6420 case PF_PASS: 6421 if (*m == NULL) 6422 return (PFIL_CONSUMED); 6423 else 6424 return (PFIL_PASS); 6425 break; 6426 default: 6427 if (*m != NULL) { 6428 m_freem(*m); 6429 *m = NULL; 6430 } 6431 return (PFIL_DROPPED); 6432 } 6433 } 6434 6435 static pfil_return_t 6436 pf_eth_check_in(struct mbuf **m, struct ifnet *ifp, int flags, 6437 void *ruleset __unused, struct inpcb *inp) 6438 { 6439 int chk; 6440 6441 chk = pf_test_eth(PF_IN, flags, ifp, m, inp); 6442 6443 return (pf_check_return(chk, m)); 6444 } 6445 6446 static pfil_return_t 6447 pf_eth_check_out(struct mbuf **m, struct ifnet *ifp, int flags, 6448 void *ruleset __unused, struct inpcb *inp) 6449 { 6450 int chk; 6451 6452 chk = pf_test_eth(PF_OUT, flags, ifp, m, inp); 6453 6454 return (pf_check_return(chk, m)); 6455 } 6456 6457 #ifdef INET 6458 static pfil_return_t 6459 pf_check_in(struct mbuf **m, struct ifnet *ifp, int flags, 6460 void *ruleset __unused, struct inpcb *inp) 6461 { 6462 int chk; 6463 6464 chk = pf_test(PF_IN, flags, ifp, m, inp); 6465 6466 return (pf_check_return(chk, m)); 6467 } 6468 6469 static pfil_return_t 6470 pf_check_out(struct mbuf **m, struct ifnet *ifp, int flags, 6471 void *ruleset __unused, struct inpcb *inp) 6472 { 6473 int chk; 6474 6475 chk = pf_test(PF_OUT, flags, ifp, m, inp); 6476 6477 return (pf_check_return(chk, m)); 6478 } 6479 #endif 6480 6481 #ifdef INET6 6482 static pfil_return_t 6483 pf_check6_in(struct mbuf **m, struct ifnet *ifp, int flags, 6484 void *ruleset __unused, struct inpcb *inp) 6485 { 6486 int chk; 6487 6488 /* 6489 * In case of loopback traffic IPv6 uses the real interface in 6490 * order to support scoped addresses. In order to support stateful 6491 * filtering we have change this to lo0 as it is the case in IPv4. 6492 */ 6493 CURVNET_SET(ifp->if_vnet); 6494 chk = pf_test6(PF_IN, flags, (*m)->m_flags & M_LOOP ? V_loif : ifp, m, inp); 6495 CURVNET_RESTORE(); 6496 6497 return (pf_check_return(chk, m)); 6498 } 6499 6500 static pfil_return_t 6501 pf_check6_out(struct mbuf **m, struct ifnet *ifp, int flags, 6502 void *ruleset __unused, struct inpcb *inp) 6503 { 6504 int chk; 6505 6506 CURVNET_SET(ifp->if_vnet); 6507 chk = pf_test6(PF_OUT, flags, ifp, m, inp); 6508 CURVNET_RESTORE(); 6509 6510 return (pf_check_return(chk, m)); 6511 } 6512 #endif /* INET6 */ 6513 6514 VNET_DEFINE_STATIC(pfil_hook_t, pf_eth_in_hook); 6515 VNET_DEFINE_STATIC(pfil_hook_t, pf_eth_out_hook); 6516 #define V_pf_eth_in_hook VNET(pf_eth_in_hook) 6517 #define V_pf_eth_out_hook VNET(pf_eth_out_hook) 6518 6519 #ifdef INET 6520 VNET_DEFINE_STATIC(pfil_hook_t, pf_ip4_in_hook); 6521 VNET_DEFINE_STATIC(pfil_hook_t, pf_ip4_out_hook); 6522 #define V_pf_ip4_in_hook VNET(pf_ip4_in_hook) 6523 #define V_pf_ip4_out_hook VNET(pf_ip4_out_hook) 6524 #endif 6525 #ifdef INET6 6526 VNET_DEFINE_STATIC(pfil_hook_t, pf_ip6_in_hook); 6527 VNET_DEFINE_STATIC(pfil_hook_t, pf_ip6_out_hook); 6528 #define V_pf_ip6_in_hook VNET(pf_ip6_in_hook) 6529 #define V_pf_ip6_out_hook VNET(pf_ip6_out_hook) 6530 #endif 6531 6532 static void 6533 hook_pf_eth(void) 6534 { 6535 struct pfil_hook_args pha; 6536 struct pfil_link_args pla; 6537 int ret __diagused; 6538 6539 if (atomic_load_bool(&V_pf_pfil_eth_hooked)) 6540 return; 6541 6542 pha.pa_version = PFIL_VERSION; 6543 pha.pa_modname = "pf"; 6544 pha.pa_ruleset = NULL; 6545 6546 pla.pa_version = PFIL_VERSION; 6547 6548 pha.pa_type = PFIL_TYPE_ETHERNET; 6549 pha.pa_func = pf_eth_check_in; 6550 pha.pa_flags = PFIL_IN; 6551 pha.pa_rulname = "eth-in"; 6552 V_pf_eth_in_hook = pfil_add_hook(&pha); 6553 pla.pa_flags = PFIL_IN | PFIL_HEADPTR | PFIL_HOOKPTR; 6554 pla.pa_head = V_link_pfil_head; 6555 pla.pa_hook = V_pf_eth_in_hook; 6556 ret = pfil_link(&pla); 6557 MPASS(ret == 0); 6558 pha.pa_func = pf_eth_check_out; 6559 pha.pa_flags = PFIL_OUT; 6560 pha.pa_rulname = "eth-out"; 6561 V_pf_eth_out_hook = pfil_add_hook(&pha); 6562 pla.pa_flags = PFIL_OUT | PFIL_HEADPTR | PFIL_HOOKPTR; 6563 pla.pa_head = V_link_pfil_head; 6564 pla.pa_hook = V_pf_eth_out_hook; 6565 ret = pfil_link(&pla); 6566 MPASS(ret == 0); 6567 6568 atomic_store_bool(&V_pf_pfil_eth_hooked, true); 6569 } 6570 6571 static void 6572 hook_pf(void) 6573 { 6574 struct pfil_hook_args pha; 6575 struct pfil_link_args pla; 6576 int ret __diagused; 6577 6578 if (atomic_load_bool(&V_pf_pfil_hooked)) 6579 return; 6580 6581 pha.pa_version = PFIL_VERSION; 6582 pha.pa_modname = "pf"; 6583 pha.pa_ruleset = NULL; 6584 6585 pla.pa_version = PFIL_VERSION; 6586 6587 #ifdef INET 6588 pha.pa_type = PFIL_TYPE_IP4; 6589 pha.pa_func = pf_check_in; 6590 pha.pa_flags = PFIL_IN; 6591 pha.pa_rulname = "default-in"; 6592 V_pf_ip4_in_hook = pfil_add_hook(&pha); 6593 pla.pa_flags = PFIL_IN | PFIL_HEADPTR | PFIL_HOOKPTR; 6594 pla.pa_head = V_inet_pfil_head; 6595 pla.pa_hook = V_pf_ip4_in_hook; 6596 ret = pfil_link(&pla); 6597 MPASS(ret == 0); 6598 pha.pa_func = pf_check_out; 6599 pha.pa_flags = PFIL_OUT; 6600 pha.pa_rulname = "default-out"; 6601 V_pf_ip4_out_hook = pfil_add_hook(&pha); 6602 pla.pa_flags = PFIL_OUT | PFIL_HEADPTR | PFIL_HOOKPTR; 6603 pla.pa_head = V_inet_pfil_head; 6604 pla.pa_hook = V_pf_ip4_out_hook; 6605 ret = pfil_link(&pla); 6606 MPASS(ret == 0); 6607 #endif 6608 #ifdef INET6 6609 pha.pa_type = PFIL_TYPE_IP6; 6610 pha.pa_func = pf_check6_in; 6611 pha.pa_flags = PFIL_IN; 6612 pha.pa_rulname = "default-in6"; 6613 V_pf_ip6_in_hook = pfil_add_hook(&pha); 6614 pla.pa_flags = PFIL_IN | PFIL_HEADPTR | PFIL_HOOKPTR; 6615 pla.pa_head = V_inet6_pfil_head; 6616 pla.pa_hook = V_pf_ip6_in_hook; 6617 ret = pfil_link(&pla); 6618 MPASS(ret == 0); 6619 pha.pa_func = pf_check6_out; 6620 pha.pa_rulname = "default-out6"; 6621 pha.pa_flags = PFIL_OUT; 6622 V_pf_ip6_out_hook = pfil_add_hook(&pha); 6623 pla.pa_flags = PFIL_OUT | PFIL_HEADPTR | PFIL_HOOKPTR; 6624 pla.pa_head = V_inet6_pfil_head; 6625 pla.pa_hook = V_pf_ip6_out_hook; 6626 ret = pfil_link(&pla); 6627 MPASS(ret == 0); 6628 #endif 6629 6630 atomic_store_bool(&V_pf_pfil_hooked, true); 6631 } 6632 6633 static void 6634 dehook_pf_eth(void) 6635 { 6636 6637 if (!atomic_load_bool(&V_pf_pfil_eth_hooked)) 6638 return; 6639 6640 pfil_remove_hook(V_pf_eth_in_hook); 6641 pfil_remove_hook(V_pf_eth_out_hook); 6642 6643 atomic_store_bool(&V_pf_pfil_eth_hooked, false); 6644 } 6645 6646 static void 6647 dehook_pf(void) 6648 { 6649 6650 if (!atomic_load_bool(&V_pf_pfil_hooked)) 6651 return; 6652 6653 #ifdef INET 6654 pfil_remove_hook(V_pf_ip4_in_hook); 6655 pfil_remove_hook(V_pf_ip4_out_hook); 6656 #endif 6657 #ifdef INET6 6658 pfil_remove_hook(V_pf_ip6_in_hook); 6659 pfil_remove_hook(V_pf_ip6_out_hook); 6660 #endif 6661 6662 atomic_store_bool(&V_pf_pfil_hooked, false); 6663 } 6664 6665 static void 6666 pf_load_vnet(void) 6667 { 6668 V_pf_tag_z = uma_zcreate("pf tags", sizeof(struct pf_tagname), 6669 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); 6670 6671 pf_init_tagset(&V_pf_tags, &pf_rule_tag_hashsize, 6672 PF_RULE_TAG_HASH_SIZE_DEFAULT); 6673 #ifdef ALTQ 6674 pf_init_tagset(&V_pf_qids, &pf_queue_tag_hashsize, 6675 PF_QUEUE_TAG_HASH_SIZE_DEFAULT); 6676 #endif 6677 6678 V_pf_keth = &V_pf_main_keth_anchor.ruleset; 6679 6680 pfattach_vnet(); 6681 V_pf_vnet_active = 1; 6682 } 6683 6684 static int 6685 pf_load(void) 6686 { 6687 int error; 6688 6689 rm_init_flags(&pf_rules_lock, "pf rulesets", RM_RECURSE); 6690 sx_init(&pf_ioctl_lock, "pf ioctl"); 6691 sx_init(&pf_end_lock, "pf end thread"); 6692 6693 pf_mtag_initialize(); 6694 6695 pf_dev = make_dev(&pf_cdevsw, 0, UID_ROOT, GID_WHEEL, 0600, PF_NAME); 6696 if (pf_dev == NULL) 6697 return (ENOMEM); 6698 6699 pf_end_threads = 0; 6700 error = kproc_create(pf_purge_thread, NULL, &pf_purge_proc, 0, 0, "pf purge"); 6701 if (error != 0) 6702 return (error); 6703 6704 pfi_initialize(); 6705 6706 return (0); 6707 } 6708 6709 static void 6710 pf_unload_vnet(void) 6711 { 6712 int ret __diagused; 6713 6714 V_pf_vnet_active = 0; 6715 V_pf_status.running = 0; 6716 dehook_pf(); 6717 dehook_pf_eth(); 6718 6719 PF_RULES_WLOCK(); 6720 pf_syncookies_cleanup(); 6721 shutdown_pf(); 6722 PF_RULES_WUNLOCK(); 6723 6724 /* Make sure we've cleaned up ethernet rules before we continue. */ 6725 NET_EPOCH_DRAIN_CALLBACKS(); 6726 6727 ret = swi_remove(V_pf_swi_cookie); 6728 MPASS(ret == 0); 6729 ret = intr_event_destroy(V_pf_swi_ie); 6730 MPASS(ret == 0); 6731 6732 pf_unload_vnet_purge(); 6733 6734 pf_normalize_cleanup(); 6735 PF_RULES_WLOCK(); 6736 pfi_cleanup_vnet(); 6737 PF_RULES_WUNLOCK(); 6738 pfr_cleanup(); 6739 pf_osfp_flush(); 6740 pf_cleanup(); 6741 if (IS_DEFAULT_VNET(curvnet)) 6742 pf_mtag_cleanup(); 6743 6744 pf_cleanup_tagset(&V_pf_tags); 6745 #ifdef ALTQ 6746 pf_cleanup_tagset(&V_pf_qids); 6747 #endif 6748 uma_zdestroy(V_pf_tag_z); 6749 6750 #ifdef PF_WANT_32_TO_64_COUNTER 6751 PF_RULES_WLOCK(); 6752 LIST_REMOVE(V_pf_kifmarker, pfik_allkiflist); 6753 6754 MPASS(LIST_EMPTY(&V_pf_allkiflist)); 6755 MPASS(V_pf_allkifcount == 0); 6756 6757 LIST_REMOVE(&V_pf_default_rule, allrulelist); 6758 V_pf_allrulecount--; 6759 LIST_REMOVE(V_pf_rulemarker, allrulelist); 6760 6761 /* 6762 * There are known pf rule leaks when running the test suite. 6763 */ 6764 #ifdef notyet 6765 MPASS(LIST_EMPTY(&V_pf_allrulelist)); 6766 MPASS(V_pf_allrulecount == 0); 6767 #endif 6768 6769 PF_RULES_WUNLOCK(); 6770 6771 free(V_pf_kifmarker, PFI_MTYPE); 6772 free(V_pf_rulemarker, M_PFRULE); 6773 #endif 6774 6775 /* Free counters last as we updated them during shutdown. */ 6776 pf_counter_u64_deinit(&V_pf_default_rule.evaluations); 6777 for (int i = 0; i < 2; i++) { 6778 pf_counter_u64_deinit(&V_pf_default_rule.packets[i]); 6779 pf_counter_u64_deinit(&V_pf_default_rule.bytes[i]); 6780 } 6781 counter_u64_free(V_pf_default_rule.states_cur); 6782 counter_u64_free(V_pf_default_rule.states_tot); 6783 counter_u64_free(V_pf_default_rule.src_nodes); 6784 uma_zfree_pcpu(pf_timestamp_pcpu_zone, V_pf_default_rule.timestamp); 6785 6786 for (int i = 0; i < PFRES_MAX; i++) 6787 counter_u64_free(V_pf_status.counters[i]); 6788 for (int i = 0; i < KLCNT_MAX; i++) 6789 counter_u64_free(V_pf_status.lcounters[i]); 6790 for (int i = 0; i < FCNT_MAX; i++) 6791 pf_counter_u64_deinit(&V_pf_status.fcounters[i]); 6792 for (int i = 0; i < SCNT_MAX; i++) 6793 counter_u64_free(V_pf_status.scounters[i]); 6794 } 6795 6796 static void 6797 pf_unload(void) 6798 { 6799 6800 sx_xlock(&pf_end_lock); 6801 pf_end_threads = 1; 6802 while (pf_end_threads < 2) { 6803 wakeup_one(pf_purge_thread); 6804 sx_sleep(pf_purge_proc, &pf_end_lock, 0, "pftmo", 0); 6805 } 6806 sx_xunlock(&pf_end_lock); 6807 6808 if (pf_dev != NULL) 6809 destroy_dev(pf_dev); 6810 6811 pfi_cleanup(); 6812 6813 rm_destroy(&pf_rules_lock); 6814 sx_destroy(&pf_ioctl_lock); 6815 sx_destroy(&pf_end_lock); 6816 } 6817 6818 static void 6819 vnet_pf_init(void *unused __unused) 6820 { 6821 6822 pf_load_vnet(); 6823 } 6824 VNET_SYSINIT(vnet_pf_init, SI_SUB_PROTO_FIREWALL, SI_ORDER_THIRD, 6825 vnet_pf_init, NULL); 6826 6827 static void 6828 vnet_pf_uninit(const void *unused __unused) 6829 { 6830 6831 pf_unload_vnet(); 6832 } 6833 SYSUNINIT(pf_unload, SI_SUB_PROTO_FIREWALL, SI_ORDER_SECOND, pf_unload, NULL); 6834 VNET_SYSUNINIT(vnet_pf_uninit, SI_SUB_PROTO_FIREWALL, SI_ORDER_THIRD, 6835 vnet_pf_uninit, NULL); 6836 6837 static int 6838 pf_modevent(module_t mod, int type, void *data) 6839 { 6840 int error = 0; 6841 6842 switch(type) { 6843 case MOD_LOAD: 6844 error = pf_load(); 6845 break; 6846 case MOD_UNLOAD: 6847 /* Handled in SYSUNINIT(pf_unload) to ensure it's done after 6848 * the vnet_pf_uninit()s */ 6849 break; 6850 default: 6851 error = EINVAL; 6852 break; 6853 } 6854 6855 return (error); 6856 } 6857 6858 static moduledata_t pf_mod = { 6859 "pf", 6860 pf_modevent, 6861 0 6862 }; 6863 6864 DECLARE_MODULE(pf, pf_mod, SI_SUB_PROTO_FIREWALL, SI_ORDER_SECOND); 6865 MODULE_VERSION(pf, PF_MODVER); 6866