1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2001 Daniel Hartmeier 5 * Copyright (c) 2002,2003 Henning Brauer 6 * Copyright (c) 2012 Gleb Smirnoff <glebius@FreeBSD.org> 7 * All rights reserved. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 13 * - Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * - Redistributions in binary form must reproduce the above 16 * copyright notice, this list of conditions and the following 17 * disclaimer in the documentation and/or other materials provided 18 * with the distribution. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 23 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 24 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 27 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 28 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN 30 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 31 * POSSIBILITY OF SUCH DAMAGE. 32 * 33 * Effort sponsored in part by the Defense Advanced Research Projects 34 * Agency (DARPA) and Air Force Research Laboratory, Air Force 35 * Materiel Command, USAF, under agreement number F30602-01-2-0537. 36 * 37 * $OpenBSD: pf_ioctl.c,v 1.213 2009/02/15 21:46:12 mbalmer Exp $ 38 */ 39 40 #include <sys/cdefs.h> 41 __FBSDID("$FreeBSD$"); 42 43 #include "opt_inet.h" 44 #include "opt_inet6.h" 45 #include "opt_bpf.h" 46 #include "opt_pf.h" 47 48 #include <sys/param.h> 49 #include <sys/_bitset.h> 50 #include <sys/bitset.h> 51 #include <sys/bus.h> 52 #include <sys/conf.h> 53 #include <sys/endian.h> 54 #include <sys/fcntl.h> 55 #include <sys/filio.h> 56 #include <sys/hash.h> 57 #include <sys/interrupt.h> 58 #include <sys/jail.h> 59 #include <sys/kernel.h> 60 #include <sys/kthread.h> 61 #include <sys/lock.h> 62 #include <sys/mbuf.h> 63 #include <sys/module.h> 64 #include <sys/nv.h> 65 #include <sys/proc.h> 66 #include <sys/sdt.h> 67 #include <sys/smp.h> 68 #include <sys/socket.h> 69 #include <sys/sysctl.h> 70 #include <sys/md5.h> 71 #include <sys/ucred.h> 72 73 #include <net/if.h> 74 #include <net/if_var.h> 75 #include <net/if_private.h> 76 #include <net/vnet.h> 77 #include <net/route.h> 78 #include <net/pfil.h> 79 #include <net/pfvar.h> 80 #include <net/if_pfsync.h> 81 #include <net/if_pflog.h> 82 83 #include <netinet/in.h> 84 #include <netinet/ip.h> 85 #include <netinet/ip_var.h> 86 #include <netinet6/ip6_var.h> 87 #include <netinet/ip_icmp.h> 88 #include <netpfil/pf/pf_nv.h> 89 90 #ifdef INET6 91 #include <netinet/ip6.h> 92 #endif /* INET6 */ 93 94 #ifdef ALTQ 95 #include <net/altq/altq.h> 96 #endif 97 98 SDT_PROBE_DEFINE3(pf, ioctl, ioctl, error, "int", "int", "int"); 99 SDT_PROBE_DEFINE3(pf, ioctl, function, error, "char *", "int", "int"); 100 SDT_PROBE_DEFINE2(pf, ioctl, addrule, error, "int", "int"); 101 SDT_PROBE_DEFINE2(pf, ioctl, nvchk, error, "int", "int"); 102 103 static struct pf_kpool *pf_get_kpool(const char *, u_int32_t, u_int8_t, 104 u_int32_t, u_int8_t, u_int8_t, u_int8_t); 105 106 static void pf_mv_kpool(struct pf_kpalist *, struct pf_kpalist *); 107 static void pf_empty_kpool(struct pf_kpalist *); 108 static int pfioctl(struct cdev *, u_long, caddr_t, int, 109 struct thread *); 110 static int pf_begin_eth(uint32_t *, const char *); 111 static void pf_rollback_eth_cb(struct epoch_context *); 112 static int pf_rollback_eth(uint32_t, const char *); 113 static int pf_commit_eth(uint32_t, const char *); 114 static void pf_free_eth_rule(struct pf_keth_rule *); 115 #ifdef ALTQ 116 static int pf_begin_altq(u_int32_t *); 117 static int pf_rollback_altq(u_int32_t); 118 static int pf_commit_altq(u_int32_t); 119 static int pf_enable_altq(struct pf_altq *); 120 static int pf_disable_altq(struct pf_altq *); 121 static uint16_t pf_qname2qid(const char *); 122 static void pf_qid_unref(uint16_t); 123 #endif /* ALTQ */ 124 static int pf_begin_rules(u_int32_t *, int, const char *); 125 static int pf_rollback_rules(u_int32_t, int, char *); 126 static int pf_setup_pfsync_matching(struct pf_kruleset *); 127 static void pf_hash_rule_rolling(MD5_CTX *, struct pf_krule *); 128 static void pf_hash_rule(struct pf_krule *); 129 static void pf_hash_rule_addr(MD5_CTX *, struct pf_rule_addr *); 130 static int pf_commit_rules(u_int32_t, int, char *); 131 static int pf_addr_setup(struct pf_kruleset *, 132 struct pf_addr_wrap *, sa_family_t); 133 static void pf_addr_copyout(struct pf_addr_wrap *); 134 static void pf_src_node_copy(const struct pf_ksrc_node *, 135 struct pf_src_node *); 136 #ifdef ALTQ 137 static int pf_export_kaltq(struct pf_altq *, 138 struct pfioc_altq_v1 *, size_t); 139 static int pf_import_kaltq(struct pfioc_altq_v1 *, 140 struct pf_altq *, size_t); 141 #endif /* ALTQ */ 142 143 VNET_DEFINE(struct pf_krule, pf_default_rule); 144 145 static __inline int pf_krule_compare(struct pf_krule *, 146 struct pf_krule *); 147 148 RB_GENERATE(pf_krule_global, pf_krule, entry_global, pf_krule_compare); 149 150 #ifdef ALTQ 151 VNET_DEFINE_STATIC(int, pf_altq_running); 152 #define V_pf_altq_running VNET(pf_altq_running) 153 #endif 154 155 #define TAGID_MAX 50000 156 struct pf_tagname { 157 TAILQ_ENTRY(pf_tagname) namehash_entries; 158 TAILQ_ENTRY(pf_tagname) taghash_entries; 159 char name[PF_TAG_NAME_SIZE]; 160 uint16_t tag; 161 int ref; 162 }; 163 164 struct pf_tagset { 165 TAILQ_HEAD(, pf_tagname) *namehash; 166 TAILQ_HEAD(, pf_tagname) *taghash; 167 unsigned int mask; 168 uint32_t seed; 169 BITSET_DEFINE(, TAGID_MAX) avail; 170 }; 171 172 VNET_DEFINE(struct pf_tagset, pf_tags); 173 #define V_pf_tags VNET(pf_tags) 174 static unsigned int pf_rule_tag_hashsize; 175 #define PF_RULE_TAG_HASH_SIZE_DEFAULT 128 176 SYSCTL_UINT(_net_pf, OID_AUTO, rule_tag_hashsize, CTLFLAG_RDTUN, 177 &pf_rule_tag_hashsize, PF_RULE_TAG_HASH_SIZE_DEFAULT, 178 "Size of pf(4) rule tag hashtable"); 179 180 #ifdef ALTQ 181 VNET_DEFINE(struct pf_tagset, pf_qids); 182 #define V_pf_qids VNET(pf_qids) 183 static unsigned int pf_queue_tag_hashsize; 184 #define PF_QUEUE_TAG_HASH_SIZE_DEFAULT 128 185 SYSCTL_UINT(_net_pf, OID_AUTO, queue_tag_hashsize, CTLFLAG_RDTUN, 186 &pf_queue_tag_hashsize, PF_QUEUE_TAG_HASH_SIZE_DEFAULT, 187 "Size of pf(4) queue tag hashtable"); 188 #endif 189 VNET_DEFINE(uma_zone_t, pf_tag_z); 190 #define V_pf_tag_z VNET(pf_tag_z) 191 static MALLOC_DEFINE(M_PFALTQ, "pf_altq", "pf(4) altq configuration db"); 192 static MALLOC_DEFINE(M_PFRULE, "pf_rule", "pf(4) rules"); 193 194 #if (PF_QNAME_SIZE != PF_TAG_NAME_SIZE) 195 #error PF_QNAME_SIZE must be equal to PF_TAG_NAME_SIZE 196 #endif 197 198 static void pf_init_tagset(struct pf_tagset *, unsigned int *, 199 unsigned int); 200 static void pf_cleanup_tagset(struct pf_tagset *); 201 static uint16_t tagname2hashindex(const struct pf_tagset *, const char *); 202 static uint16_t tag2hashindex(const struct pf_tagset *, uint16_t); 203 static u_int16_t tagname2tag(struct pf_tagset *, const char *); 204 static u_int16_t pf_tagname2tag(const char *); 205 static void tag_unref(struct pf_tagset *, u_int16_t); 206 207 #define DPFPRINTF(n, x) if (V_pf_status.debug >= (n)) printf x 208 209 struct cdev *pf_dev; 210 211 /* 212 * XXX - These are new and need to be checked when moveing to a new version 213 */ 214 static void pf_clear_all_states(void); 215 static unsigned int pf_clear_states(const struct pf_kstate_kill *); 216 static void pf_killstates(struct pf_kstate_kill *, 217 unsigned int *); 218 static int pf_killstates_row(struct pf_kstate_kill *, 219 struct pf_idhash *); 220 static int pf_killstates_nv(struct pfioc_nv *); 221 static int pf_clearstates_nv(struct pfioc_nv *); 222 static int pf_getstate(struct pfioc_nv *); 223 static int pf_getstatus(struct pfioc_nv *); 224 static int pf_clear_tables(void); 225 static void pf_clear_srcnodes(struct pf_ksrc_node *); 226 static void pf_kill_srcnodes(struct pfioc_src_node_kill *); 227 static int pf_keepcounters(struct pfioc_nv *); 228 static void pf_tbladdr_copyout(struct pf_addr_wrap *); 229 230 /* 231 * Wrapper functions for pfil(9) hooks 232 */ 233 static pfil_return_t pf_eth_check_in(struct mbuf **m, struct ifnet *ifp, 234 int flags, void *ruleset __unused, struct inpcb *inp); 235 static pfil_return_t pf_eth_check_out(struct mbuf **m, struct ifnet *ifp, 236 int flags, void *ruleset __unused, struct inpcb *inp); 237 #ifdef INET 238 static pfil_return_t pf_check_in(struct mbuf **m, struct ifnet *ifp, 239 int flags, void *ruleset __unused, struct inpcb *inp); 240 static pfil_return_t pf_check_out(struct mbuf **m, struct ifnet *ifp, 241 int flags, void *ruleset __unused, struct inpcb *inp); 242 #endif 243 #ifdef INET6 244 static pfil_return_t pf_check6_in(struct mbuf **m, struct ifnet *ifp, 245 int flags, void *ruleset __unused, struct inpcb *inp); 246 static pfil_return_t pf_check6_out(struct mbuf **m, struct ifnet *ifp, 247 int flags, void *ruleset __unused, struct inpcb *inp); 248 #endif 249 250 static void hook_pf_eth(void); 251 static void hook_pf(void); 252 static void dehook_pf_eth(void); 253 static void dehook_pf(void); 254 static int shutdown_pf(void); 255 static int pf_load(void); 256 static void pf_unload(void); 257 258 static struct cdevsw pf_cdevsw = { 259 .d_ioctl = pfioctl, 260 .d_name = PF_NAME, 261 .d_version = D_VERSION, 262 }; 263 264 VNET_DEFINE_STATIC(bool, pf_pfil_hooked); 265 #define V_pf_pfil_hooked VNET(pf_pfil_hooked) 266 VNET_DEFINE_STATIC(bool, pf_pfil_eth_hooked); 267 #define V_pf_pfil_eth_hooked VNET(pf_pfil_eth_hooked) 268 269 /* 270 * We need a flag that is neither hooked nor running to know when 271 * the VNET is "valid". We primarily need this to control (global) 272 * external event, e.g., eventhandlers. 273 */ 274 VNET_DEFINE(int, pf_vnet_active); 275 #define V_pf_vnet_active VNET(pf_vnet_active) 276 277 int pf_end_threads; 278 struct proc *pf_purge_proc; 279 280 VNET_DEFINE(struct rmlock, pf_rules_lock); 281 VNET_DEFINE_STATIC(struct sx, pf_ioctl_lock); 282 #define V_pf_ioctl_lock VNET(pf_ioctl_lock) 283 struct sx pf_end_lock; 284 285 /* pfsync */ 286 VNET_DEFINE(pfsync_state_import_t *, pfsync_state_import_ptr); 287 VNET_DEFINE(pfsync_insert_state_t *, pfsync_insert_state_ptr); 288 VNET_DEFINE(pfsync_update_state_t *, pfsync_update_state_ptr); 289 VNET_DEFINE(pfsync_delete_state_t *, pfsync_delete_state_ptr); 290 VNET_DEFINE(pfsync_clear_states_t *, pfsync_clear_states_ptr); 291 VNET_DEFINE(pfsync_defer_t *, pfsync_defer_ptr); 292 pfsync_detach_ifnet_t *pfsync_detach_ifnet_ptr; 293 294 /* pflog */ 295 pflog_packet_t *pflog_packet_ptr = NULL; 296 297 /* 298 * Copy a user-provided string, returning an error if truncation would occur. 299 * Avoid scanning past "sz" bytes in the source string since there's no 300 * guarantee that it's nul-terminated. 301 */ 302 static int 303 pf_user_strcpy(char *dst, const char *src, size_t sz) 304 { 305 if (strnlen(src, sz) == sz) 306 return (EINVAL); 307 (void)strlcpy(dst, src, sz); 308 return (0); 309 } 310 311 static void 312 pfattach_vnet(void) 313 { 314 u_int32_t *my_timeout = V_pf_default_rule.timeout; 315 316 bzero(&V_pf_status, sizeof(V_pf_status)); 317 318 pf_initialize(); 319 pfr_initialize(); 320 pfi_initialize_vnet(); 321 pf_normalize_init(); 322 pf_syncookies_init(); 323 324 V_pf_limits[PF_LIMIT_STATES].limit = PFSTATE_HIWAT; 325 V_pf_limits[PF_LIMIT_SRC_NODES].limit = PFSNODE_HIWAT; 326 327 RB_INIT(&V_pf_anchors); 328 pf_init_kruleset(&pf_main_ruleset); 329 330 pf_init_keth(V_pf_keth); 331 332 /* default rule should never be garbage collected */ 333 V_pf_default_rule.entries.tqe_prev = &V_pf_default_rule.entries.tqe_next; 334 #ifdef PF_DEFAULT_TO_DROP 335 V_pf_default_rule.action = PF_DROP; 336 #else 337 V_pf_default_rule.action = PF_PASS; 338 #endif 339 V_pf_default_rule.nr = -1; 340 V_pf_default_rule.rtableid = -1; 341 342 pf_counter_u64_init(&V_pf_default_rule.evaluations, M_WAITOK); 343 for (int i = 0; i < 2; i++) { 344 pf_counter_u64_init(&V_pf_default_rule.packets[i], M_WAITOK); 345 pf_counter_u64_init(&V_pf_default_rule.bytes[i], M_WAITOK); 346 } 347 V_pf_default_rule.states_cur = counter_u64_alloc(M_WAITOK); 348 V_pf_default_rule.states_tot = counter_u64_alloc(M_WAITOK); 349 V_pf_default_rule.src_nodes = counter_u64_alloc(M_WAITOK); 350 351 V_pf_default_rule.timestamp = uma_zalloc_pcpu(pf_timestamp_pcpu_zone, 352 M_WAITOK | M_ZERO); 353 354 #ifdef PF_WANT_32_TO_64_COUNTER 355 V_pf_kifmarker = malloc(sizeof(*V_pf_kifmarker), PFI_MTYPE, M_WAITOK | M_ZERO); 356 V_pf_rulemarker = malloc(sizeof(*V_pf_rulemarker), M_PFRULE, M_WAITOK | M_ZERO); 357 PF_RULES_WLOCK(); 358 LIST_INSERT_HEAD(&V_pf_allkiflist, V_pf_kifmarker, pfik_allkiflist); 359 LIST_INSERT_HEAD(&V_pf_allrulelist, &V_pf_default_rule, allrulelist); 360 V_pf_allrulecount++; 361 LIST_INSERT_HEAD(&V_pf_allrulelist, V_pf_rulemarker, allrulelist); 362 PF_RULES_WUNLOCK(); 363 #endif 364 365 /* initialize default timeouts */ 366 my_timeout[PFTM_TCP_FIRST_PACKET] = PFTM_TCP_FIRST_PACKET_VAL; 367 my_timeout[PFTM_TCP_OPENING] = PFTM_TCP_OPENING_VAL; 368 my_timeout[PFTM_TCP_ESTABLISHED] = PFTM_TCP_ESTABLISHED_VAL; 369 my_timeout[PFTM_TCP_CLOSING] = PFTM_TCP_CLOSING_VAL; 370 my_timeout[PFTM_TCP_FIN_WAIT] = PFTM_TCP_FIN_WAIT_VAL; 371 my_timeout[PFTM_TCP_CLOSED] = PFTM_TCP_CLOSED_VAL; 372 my_timeout[PFTM_UDP_FIRST_PACKET] = PFTM_UDP_FIRST_PACKET_VAL; 373 my_timeout[PFTM_UDP_SINGLE] = PFTM_UDP_SINGLE_VAL; 374 my_timeout[PFTM_UDP_MULTIPLE] = PFTM_UDP_MULTIPLE_VAL; 375 my_timeout[PFTM_ICMP_FIRST_PACKET] = PFTM_ICMP_FIRST_PACKET_VAL; 376 my_timeout[PFTM_ICMP_ERROR_REPLY] = PFTM_ICMP_ERROR_REPLY_VAL; 377 my_timeout[PFTM_OTHER_FIRST_PACKET] = PFTM_OTHER_FIRST_PACKET_VAL; 378 my_timeout[PFTM_OTHER_SINGLE] = PFTM_OTHER_SINGLE_VAL; 379 my_timeout[PFTM_OTHER_MULTIPLE] = PFTM_OTHER_MULTIPLE_VAL; 380 my_timeout[PFTM_FRAG] = PFTM_FRAG_VAL; 381 my_timeout[PFTM_INTERVAL] = PFTM_INTERVAL_VAL; 382 my_timeout[PFTM_SRC_NODE] = PFTM_SRC_NODE_VAL; 383 my_timeout[PFTM_TS_DIFF] = PFTM_TS_DIFF_VAL; 384 my_timeout[PFTM_ADAPTIVE_START] = PFSTATE_ADAPT_START; 385 my_timeout[PFTM_ADAPTIVE_END] = PFSTATE_ADAPT_END; 386 387 V_pf_status.debug = PF_DEBUG_URGENT; 388 /* 389 * XXX This is different than in OpenBSD where reassembly is enabled by 390 * defult. In FreeBSD we expect people to still use scrub rules and 391 * switch to the new syntax later. Only when they switch they must 392 * explicitly enable reassemle. We could change the default once the 393 * scrub rule functionality is hopefully removed some day in future. 394 */ 395 V_pf_status.reass = 0; 396 397 V_pf_pfil_hooked = false; 398 V_pf_pfil_eth_hooked = false; 399 400 /* XXX do our best to avoid a conflict */ 401 V_pf_status.hostid = arc4random(); 402 403 for (int i = 0; i < PFRES_MAX; i++) 404 V_pf_status.counters[i] = counter_u64_alloc(M_WAITOK); 405 for (int i = 0; i < KLCNT_MAX; i++) 406 V_pf_status.lcounters[i] = counter_u64_alloc(M_WAITOK); 407 for (int i = 0; i < FCNT_MAX; i++) 408 pf_counter_u64_init(&V_pf_status.fcounters[i], M_WAITOK); 409 for (int i = 0; i < SCNT_MAX; i++) 410 V_pf_status.scounters[i] = counter_u64_alloc(M_WAITOK); 411 412 if (swi_add(&V_pf_swi_ie, "pf send", pf_intr, curvnet, SWI_NET, 413 INTR_MPSAFE, &V_pf_swi_cookie) != 0) 414 /* XXXGL: leaked all above. */ 415 return; 416 } 417 418 static struct pf_kpool * 419 pf_get_kpool(const char *anchor, u_int32_t ticket, u_int8_t rule_action, 420 u_int32_t rule_number, u_int8_t r_last, u_int8_t active, 421 u_int8_t check_ticket) 422 { 423 struct pf_kruleset *ruleset; 424 struct pf_krule *rule; 425 int rs_num; 426 427 ruleset = pf_find_kruleset(anchor); 428 if (ruleset == NULL) 429 return (NULL); 430 rs_num = pf_get_ruleset_number(rule_action); 431 if (rs_num >= PF_RULESET_MAX) 432 return (NULL); 433 if (active) { 434 if (check_ticket && ticket != 435 ruleset->rules[rs_num].active.ticket) 436 return (NULL); 437 if (r_last) 438 rule = TAILQ_LAST(ruleset->rules[rs_num].active.ptr, 439 pf_krulequeue); 440 else 441 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr); 442 } else { 443 if (check_ticket && ticket != 444 ruleset->rules[rs_num].inactive.ticket) 445 return (NULL); 446 if (r_last) 447 rule = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr, 448 pf_krulequeue); 449 else 450 rule = TAILQ_FIRST(ruleset->rules[rs_num].inactive.ptr); 451 } 452 if (!r_last) { 453 while ((rule != NULL) && (rule->nr != rule_number)) 454 rule = TAILQ_NEXT(rule, entries); 455 } 456 if (rule == NULL) 457 return (NULL); 458 459 return (&rule->rpool); 460 } 461 462 static void 463 pf_mv_kpool(struct pf_kpalist *poola, struct pf_kpalist *poolb) 464 { 465 struct pf_kpooladdr *mv_pool_pa; 466 467 while ((mv_pool_pa = TAILQ_FIRST(poola)) != NULL) { 468 TAILQ_REMOVE(poola, mv_pool_pa, entries); 469 TAILQ_INSERT_TAIL(poolb, mv_pool_pa, entries); 470 } 471 } 472 473 static void 474 pf_empty_kpool(struct pf_kpalist *poola) 475 { 476 struct pf_kpooladdr *pa; 477 478 while ((pa = TAILQ_FIRST(poola)) != NULL) { 479 switch (pa->addr.type) { 480 case PF_ADDR_DYNIFTL: 481 pfi_dynaddr_remove(pa->addr.p.dyn); 482 break; 483 case PF_ADDR_TABLE: 484 /* XXX: this could be unfinished pooladdr on pabuf */ 485 if (pa->addr.p.tbl != NULL) 486 pfr_detach_table(pa->addr.p.tbl); 487 break; 488 } 489 if (pa->kif) 490 pfi_kkif_unref(pa->kif); 491 TAILQ_REMOVE(poola, pa, entries); 492 free(pa, M_PFRULE); 493 } 494 } 495 496 static void 497 pf_unlink_rule_locked(struct pf_krulequeue *rulequeue, struct pf_krule *rule) 498 { 499 500 PF_RULES_WASSERT(); 501 PF_UNLNKDRULES_ASSERT(); 502 503 TAILQ_REMOVE(rulequeue, rule, entries); 504 505 rule->rule_ref |= PFRULE_REFS; 506 TAILQ_INSERT_TAIL(&V_pf_unlinked_rules, rule, entries); 507 } 508 509 static void 510 pf_unlink_rule(struct pf_krulequeue *rulequeue, struct pf_krule *rule) 511 { 512 513 PF_RULES_WASSERT(); 514 515 PF_UNLNKDRULES_LOCK(); 516 pf_unlink_rule_locked(rulequeue, rule); 517 PF_UNLNKDRULES_UNLOCK(); 518 } 519 520 static void 521 pf_free_eth_rule(struct pf_keth_rule *rule) 522 { 523 PF_RULES_WASSERT(); 524 525 if (rule == NULL) 526 return; 527 528 if (rule->tag) 529 tag_unref(&V_pf_tags, rule->tag); 530 if (rule->match_tag) 531 tag_unref(&V_pf_tags, rule->match_tag); 532 #ifdef ALTQ 533 pf_qid_unref(rule->qid); 534 #endif 535 536 if (rule->bridge_to) 537 pfi_kkif_unref(rule->bridge_to); 538 if (rule->kif) 539 pfi_kkif_unref(rule->kif); 540 541 if (rule->ipsrc.addr.type == PF_ADDR_TABLE) 542 pfr_detach_table(rule->ipsrc.addr.p.tbl); 543 if (rule->ipdst.addr.type == PF_ADDR_TABLE) 544 pfr_detach_table(rule->ipdst.addr.p.tbl); 545 546 counter_u64_free(rule->evaluations); 547 for (int i = 0; i < 2; i++) { 548 counter_u64_free(rule->packets[i]); 549 counter_u64_free(rule->bytes[i]); 550 } 551 uma_zfree_pcpu(pf_timestamp_pcpu_zone, rule->timestamp); 552 pf_keth_anchor_remove(rule); 553 554 free(rule, M_PFRULE); 555 } 556 557 void 558 pf_free_rule(struct pf_krule *rule) 559 { 560 561 PF_RULES_WASSERT(); 562 PF_CONFIG_ASSERT(); 563 564 if (rule->tag) 565 tag_unref(&V_pf_tags, rule->tag); 566 if (rule->match_tag) 567 tag_unref(&V_pf_tags, rule->match_tag); 568 #ifdef ALTQ 569 if (rule->pqid != rule->qid) 570 pf_qid_unref(rule->pqid); 571 pf_qid_unref(rule->qid); 572 #endif 573 switch (rule->src.addr.type) { 574 case PF_ADDR_DYNIFTL: 575 pfi_dynaddr_remove(rule->src.addr.p.dyn); 576 break; 577 case PF_ADDR_TABLE: 578 pfr_detach_table(rule->src.addr.p.tbl); 579 break; 580 } 581 switch (rule->dst.addr.type) { 582 case PF_ADDR_DYNIFTL: 583 pfi_dynaddr_remove(rule->dst.addr.p.dyn); 584 break; 585 case PF_ADDR_TABLE: 586 pfr_detach_table(rule->dst.addr.p.tbl); 587 break; 588 } 589 if (rule->overload_tbl) 590 pfr_detach_table(rule->overload_tbl); 591 if (rule->kif) 592 pfi_kkif_unref(rule->kif); 593 pf_kanchor_remove(rule); 594 pf_empty_kpool(&rule->rpool.list); 595 596 pf_krule_free(rule); 597 } 598 599 static void 600 pf_init_tagset(struct pf_tagset *ts, unsigned int *tunable_size, 601 unsigned int default_size) 602 { 603 unsigned int i; 604 unsigned int hashsize; 605 606 if (*tunable_size == 0 || !powerof2(*tunable_size)) 607 *tunable_size = default_size; 608 609 hashsize = *tunable_size; 610 ts->namehash = mallocarray(hashsize, sizeof(*ts->namehash), M_PFHASH, 611 M_WAITOK); 612 ts->taghash = mallocarray(hashsize, sizeof(*ts->taghash), M_PFHASH, 613 M_WAITOK); 614 ts->mask = hashsize - 1; 615 ts->seed = arc4random(); 616 for (i = 0; i < hashsize; i++) { 617 TAILQ_INIT(&ts->namehash[i]); 618 TAILQ_INIT(&ts->taghash[i]); 619 } 620 BIT_FILL(TAGID_MAX, &ts->avail); 621 } 622 623 static void 624 pf_cleanup_tagset(struct pf_tagset *ts) 625 { 626 unsigned int i; 627 unsigned int hashsize; 628 struct pf_tagname *t, *tmp; 629 630 /* 631 * Only need to clean up one of the hashes as each tag is hashed 632 * into each table. 633 */ 634 hashsize = ts->mask + 1; 635 for (i = 0; i < hashsize; i++) 636 TAILQ_FOREACH_SAFE(t, &ts->namehash[i], namehash_entries, tmp) 637 uma_zfree(V_pf_tag_z, t); 638 639 free(ts->namehash, M_PFHASH); 640 free(ts->taghash, M_PFHASH); 641 } 642 643 static uint16_t 644 tagname2hashindex(const struct pf_tagset *ts, const char *tagname) 645 { 646 size_t len; 647 648 len = strnlen(tagname, PF_TAG_NAME_SIZE - 1); 649 return (murmur3_32_hash(tagname, len, ts->seed) & ts->mask); 650 } 651 652 static uint16_t 653 tag2hashindex(const struct pf_tagset *ts, uint16_t tag) 654 { 655 656 return (tag & ts->mask); 657 } 658 659 static u_int16_t 660 tagname2tag(struct pf_tagset *ts, const char *tagname) 661 { 662 struct pf_tagname *tag; 663 u_int32_t index; 664 u_int16_t new_tagid; 665 666 PF_RULES_WASSERT(); 667 668 index = tagname2hashindex(ts, tagname); 669 TAILQ_FOREACH(tag, &ts->namehash[index], namehash_entries) 670 if (strcmp(tagname, tag->name) == 0) { 671 tag->ref++; 672 return (tag->tag); 673 } 674 675 /* 676 * new entry 677 * 678 * to avoid fragmentation, we do a linear search from the beginning 679 * and take the first free slot we find. 680 */ 681 new_tagid = BIT_FFS(TAGID_MAX, &ts->avail); 682 /* 683 * Tags are 1-based, with valid tags in the range [1..TAGID_MAX]. 684 * BIT_FFS() returns a 1-based bit number, with 0 indicating no bits 685 * set. It may also return a bit number greater than TAGID_MAX due 686 * to rounding of the number of bits in the vector up to a multiple 687 * of the vector word size at declaration/allocation time. 688 */ 689 if ((new_tagid == 0) || (new_tagid > TAGID_MAX)) 690 return (0); 691 692 /* Mark the tag as in use. Bits are 0-based for BIT_CLR() */ 693 BIT_CLR(TAGID_MAX, new_tagid - 1, &ts->avail); 694 695 /* allocate and fill new struct pf_tagname */ 696 tag = uma_zalloc(V_pf_tag_z, M_NOWAIT); 697 if (tag == NULL) 698 return (0); 699 strlcpy(tag->name, tagname, sizeof(tag->name)); 700 tag->tag = new_tagid; 701 tag->ref = 1; 702 703 /* Insert into namehash */ 704 TAILQ_INSERT_TAIL(&ts->namehash[index], tag, namehash_entries); 705 706 /* Insert into taghash */ 707 index = tag2hashindex(ts, new_tagid); 708 TAILQ_INSERT_TAIL(&ts->taghash[index], tag, taghash_entries); 709 710 return (tag->tag); 711 } 712 713 static void 714 tag_unref(struct pf_tagset *ts, u_int16_t tag) 715 { 716 struct pf_tagname *t; 717 uint16_t index; 718 719 PF_RULES_WASSERT(); 720 721 index = tag2hashindex(ts, tag); 722 TAILQ_FOREACH(t, &ts->taghash[index], taghash_entries) 723 if (tag == t->tag) { 724 if (--t->ref == 0) { 725 TAILQ_REMOVE(&ts->taghash[index], t, 726 taghash_entries); 727 index = tagname2hashindex(ts, t->name); 728 TAILQ_REMOVE(&ts->namehash[index], t, 729 namehash_entries); 730 /* Bits are 0-based for BIT_SET() */ 731 BIT_SET(TAGID_MAX, tag - 1, &ts->avail); 732 uma_zfree(V_pf_tag_z, t); 733 } 734 break; 735 } 736 } 737 738 static uint16_t 739 pf_tagname2tag(const char *tagname) 740 { 741 return (tagname2tag(&V_pf_tags, tagname)); 742 } 743 744 static int 745 pf_begin_eth(uint32_t *ticket, const char *anchor) 746 { 747 struct pf_keth_rule *rule, *tmp; 748 struct pf_keth_ruleset *rs; 749 750 PF_RULES_WASSERT(); 751 752 rs = pf_find_or_create_keth_ruleset(anchor); 753 if (rs == NULL) 754 return (EINVAL); 755 756 /* Purge old inactive rules. */ 757 TAILQ_FOREACH_SAFE(rule, rs->inactive.rules, entries, 758 tmp) { 759 TAILQ_REMOVE(rs->inactive.rules, rule, 760 entries); 761 pf_free_eth_rule(rule); 762 } 763 764 *ticket = ++rs->inactive.ticket; 765 rs->inactive.open = 1; 766 767 return (0); 768 } 769 770 static void 771 pf_rollback_eth_cb(struct epoch_context *ctx) 772 { 773 struct pf_keth_ruleset *rs; 774 775 rs = __containerof(ctx, struct pf_keth_ruleset, epoch_ctx); 776 777 CURVNET_SET(rs->vnet); 778 779 PF_RULES_WLOCK(); 780 pf_rollback_eth(rs->inactive.ticket, 781 rs->anchor ? rs->anchor->path : ""); 782 PF_RULES_WUNLOCK(); 783 784 CURVNET_RESTORE(); 785 } 786 787 static int 788 pf_rollback_eth(uint32_t ticket, const char *anchor) 789 { 790 struct pf_keth_rule *rule, *tmp; 791 struct pf_keth_ruleset *rs; 792 793 PF_RULES_WASSERT(); 794 795 rs = pf_find_keth_ruleset(anchor); 796 if (rs == NULL) 797 return (EINVAL); 798 799 if (!rs->inactive.open || 800 ticket != rs->inactive.ticket) 801 return (0); 802 803 /* Purge old inactive rules. */ 804 TAILQ_FOREACH_SAFE(rule, rs->inactive.rules, entries, 805 tmp) { 806 TAILQ_REMOVE(rs->inactive.rules, rule, entries); 807 pf_free_eth_rule(rule); 808 } 809 810 rs->inactive.open = 0; 811 812 pf_remove_if_empty_keth_ruleset(rs); 813 814 return (0); 815 } 816 817 #define PF_SET_SKIP_STEPS(i) \ 818 do { \ 819 while (head[i] != cur) { \ 820 head[i]->skip[i].ptr = cur; \ 821 head[i] = TAILQ_NEXT(head[i], entries); \ 822 } \ 823 } while (0) 824 825 static void 826 pf_eth_calc_skip_steps(struct pf_keth_ruleq *rules) 827 { 828 struct pf_keth_rule *cur, *prev, *head[PFE_SKIP_COUNT]; 829 int i; 830 831 cur = TAILQ_FIRST(rules); 832 prev = cur; 833 for (i = 0; i < PFE_SKIP_COUNT; ++i) 834 head[i] = cur; 835 while (cur != NULL) { 836 if (cur->kif != prev->kif || cur->ifnot != prev->ifnot) 837 PF_SET_SKIP_STEPS(PFE_SKIP_IFP); 838 if (cur->direction != prev->direction) 839 PF_SET_SKIP_STEPS(PFE_SKIP_DIR); 840 if (cur->proto != prev->proto) 841 PF_SET_SKIP_STEPS(PFE_SKIP_PROTO); 842 if (memcmp(&cur->src, &prev->src, sizeof(cur->src)) != 0) 843 PF_SET_SKIP_STEPS(PFE_SKIP_SRC_ADDR); 844 if (memcmp(&cur->dst, &prev->dst, sizeof(cur->dst)) != 0) 845 PF_SET_SKIP_STEPS(PFE_SKIP_DST_ADDR); 846 847 prev = cur; 848 cur = TAILQ_NEXT(cur, entries); 849 } 850 for (i = 0; i < PFE_SKIP_COUNT; ++i) 851 PF_SET_SKIP_STEPS(i); 852 } 853 854 static int 855 pf_commit_eth(uint32_t ticket, const char *anchor) 856 { 857 struct pf_keth_ruleq *rules; 858 struct pf_keth_ruleset *rs; 859 860 rs = pf_find_keth_ruleset(anchor); 861 if (rs == NULL) { 862 return (EINVAL); 863 } 864 865 if (!rs->inactive.open || 866 ticket != rs->inactive.ticket) 867 return (EBUSY); 868 869 PF_RULES_WASSERT(); 870 871 pf_eth_calc_skip_steps(rs->inactive.rules); 872 873 rules = rs->active.rules; 874 ck_pr_store_ptr(&rs->active.rules, rs->inactive.rules); 875 rs->inactive.rules = rules; 876 rs->inactive.ticket = rs->active.ticket; 877 878 /* Clean up inactive rules (i.e. previously active rules), only when 879 * we're sure they're no longer used. */ 880 NET_EPOCH_CALL(pf_rollback_eth_cb, &rs->epoch_ctx); 881 882 return (0); 883 } 884 885 #ifdef ALTQ 886 static uint16_t 887 pf_qname2qid(const char *qname) 888 { 889 return (tagname2tag(&V_pf_qids, qname)); 890 } 891 892 static void 893 pf_qid_unref(uint16_t qid) 894 { 895 tag_unref(&V_pf_qids, qid); 896 } 897 898 static int 899 pf_begin_altq(u_int32_t *ticket) 900 { 901 struct pf_altq *altq, *tmp; 902 int error = 0; 903 904 PF_RULES_WASSERT(); 905 906 /* Purge the old altq lists */ 907 TAILQ_FOREACH_SAFE(altq, V_pf_altq_ifs_inactive, entries, tmp) { 908 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) { 909 /* detach and destroy the discipline */ 910 error = altq_remove(altq); 911 } 912 free(altq, M_PFALTQ); 913 } 914 TAILQ_INIT(V_pf_altq_ifs_inactive); 915 TAILQ_FOREACH_SAFE(altq, V_pf_altqs_inactive, entries, tmp) { 916 pf_qid_unref(altq->qid); 917 free(altq, M_PFALTQ); 918 } 919 TAILQ_INIT(V_pf_altqs_inactive); 920 if (error) 921 return (error); 922 *ticket = ++V_ticket_altqs_inactive; 923 V_altqs_inactive_open = 1; 924 return (0); 925 } 926 927 static int 928 pf_rollback_altq(u_int32_t ticket) 929 { 930 struct pf_altq *altq, *tmp; 931 int error = 0; 932 933 PF_RULES_WASSERT(); 934 935 if (!V_altqs_inactive_open || ticket != V_ticket_altqs_inactive) 936 return (0); 937 /* Purge the old altq lists */ 938 TAILQ_FOREACH_SAFE(altq, V_pf_altq_ifs_inactive, entries, tmp) { 939 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) { 940 /* detach and destroy the discipline */ 941 error = altq_remove(altq); 942 } 943 free(altq, M_PFALTQ); 944 } 945 TAILQ_INIT(V_pf_altq_ifs_inactive); 946 TAILQ_FOREACH_SAFE(altq, V_pf_altqs_inactive, entries, tmp) { 947 pf_qid_unref(altq->qid); 948 free(altq, M_PFALTQ); 949 } 950 TAILQ_INIT(V_pf_altqs_inactive); 951 V_altqs_inactive_open = 0; 952 return (error); 953 } 954 955 static int 956 pf_commit_altq(u_int32_t ticket) 957 { 958 struct pf_altqqueue *old_altqs, *old_altq_ifs; 959 struct pf_altq *altq, *tmp; 960 int err, error = 0; 961 962 PF_RULES_WASSERT(); 963 964 if (!V_altqs_inactive_open || ticket != V_ticket_altqs_inactive) 965 return (EBUSY); 966 967 /* swap altqs, keep the old. */ 968 old_altqs = V_pf_altqs_active; 969 old_altq_ifs = V_pf_altq_ifs_active; 970 V_pf_altqs_active = V_pf_altqs_inactive; 971 V_pf_altq_ifs_active = V_pf_altq_ifs_inactive; 972 V_pf_altqs_inactive = old_altqs; 973 V_pf_altq_ifs_inactive = old_altq_ifs; 974 V_ticket_altqs_active = V_ticket_altqs_inactive; 975 976 /* Attach new disciplines */ 977 TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) { 978 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) { 979 /* attach the discipline */ 980 error = altq_pfattach(altq); 981 if (error == 0 && V_pf_altq_running) 982 error = pf_enable_altq(altq); 983 if (error != 0) 984 return (error); 985 } 986 } 987 988 /* Purge the old altq lists */ 989 TAILQ_FOREACH_SAFE(altq, V_pf_altq_ifs_inactive, entries, tmp) { 990 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) { 991 /* detach and destroy the discipline */ 992 if (V_pf_altq_running) 993 error = pf_disable_altq(altq); 994 err = altq_pfdetach(altq); 995 if (err != 0 && error == 0) 996 error = err; 997 err = altq_remove(altq); 998 if (err != 0 && error == 0) 999 error = err; 1000 } 1001 free(altq, M_PFALTQ); 1002 } 1003 TAILQ_INIT(V_pf_altq_ifs_inactive); 1004 TAILQ_FOREACH_SAFE(altq, V_pf_altqs_inactive, entries, tmp) { 1005 pf_qid_unref(altq->qid); 1006 free(altq, M_PFALTQ); 1007 } 1008 TAILQ_INIT(V_pf_altqs_inactive); 1009 1010 V_altqs_inactive_open = 0; 1011 return (error); 1012 } 1013 1014 static int 1015 pf_enable_altq(struct pf_altq *altq) 1016 { 1017 struct ifnet *ifp; 1018 struct tb_profile tb; 1019 int error = 0; 1020 1021 if ((ifp = ifunit(altq->ifname)) == NULL) 1022 return (EINVAL); 1023 1024 if (ifp->if_snd.altq_type != ALTQT_NONE) 1025 error = altq_enable(&ifp->if_snd); 1026 1027 /* set tokenbucket regulator */ 1028 if (error == 0 && ifp != NULL && ALTQ_IS_ENABLED(&ifp->if_snd)) { 1029 tb.rate = altq->ifbandwidth; 1030 tb.depth = altq->tbrsize; 1031 error = tbr_set(&ifp->if_snd, &tb); 1032 } 1033 1034 return (error); 1035 } 1036 1037 static int 1038 pf_disable_altq(struct pf_altq *altq) 1039 { 1040 struct ifnet *ifp; 1041 struct tb_profile tb; 1042 int error; 1043 1044 if ((ifp = ifunit(altq->ifname)) == NULL) 1045 return (EINVAL); 1046 1047 /* 1048 * when the discipline is no longer referenced, it was overridden 1049 * by a new one. if so, just return. 1050 */ 1051 if (altq->altq_disc != ifp->if_snd.altq_disc) 1052 return (0); 1053 1054 error = altq_disable(&ifp->if_snd); 1055 1056 if (error == 0) { 1057 /* clear tokenbucket regulator */ 1058 tb.rate = 0; 1059 error = tbr_set(&ifp->if_snd, &tb); 1060 } 1061 1062 return (error); 1063 } 1064 1065 static int 1066 pf_altq_ifnet_event_add(struct ifnet *ifp, int remove, u_int32_t ticket, 1067 struct pf_altq *altq) 1068 { 1069 struct ifnet *ifp1; 1070 int error = 0; 1071 1072 /* Deactivate the interface in question */ 1073 altq->local_flags &= ~PFALTQ_FLAG_IF_REMOVED; 1074 if ((ifp1 = ifunit(altq->ifname)) == NULL || 1075 (remove && ifp1 == ifp)) { 1076 altq->local_flags |= PFALTQ_FLAG_IF_REMOVED; 1077 } else { 1078 error = altq_add(ifp1, altq); 1079 1080 if (ticket != V_ticket_altqs_inactive) 1081 error = EBUSY; 1082 1083 if (error) 1084 free(altq, M_PFALTQ); 1085 } 1086 1087 return (error); 1088 } 1089 1090 void 1091 pf_altq_ifnet_event(struct ifnet *ifp, int remove) 1092 { 1093 struct pf_altq *a1, *a2, *a3; 1094 u_int32_t ticket; 1095 int error = 0; 1096 1097 /* 1098 * No need to re-evaluate the configuration for events on interfaces 1099 * that do not support ALTQ, as it's not possible for such 1100 * interfaces to be part of the configuration. 1101 */ 1102 if (!ALTQ_IS_READY(&ifp->if_snd)) 1103 return; 1104 1105 /* Interrupt userland queue modifications */ 1106 if (V_altqs_inactive_open) 1107 pf_rollback_altq(V_ticket_altqs_inactive); 1108 1109 /* Start new altq ruleset */ 1110 if (pf_begin_altq(&ticket)) 1111 return; 1112 1113 /* Copy the current active set */ 1114 TAILQ_FOREACH(a1, V_pf_altq_ifs_active, entries) { 1115 a2 = malloc(sizeof(*a2), M_PFALTQ, M_NOWAIT); 1116 if (a2 == NULL) { 1117 error = ENOMEM; 1118 break; 1119 } 1120 bcopy(a1, a2, sizeof(struct pf_altq)); 1121 1122 error = pf_altq_ifnet_event_add(ifp, remove, ticket, a2); 1123 if (error) 1124 break; 1125 1126 TAILQ_INSERT_TAIL(V_pf_altq_ifs_inactive, a2, entries); 1127 } 1128 if (error) 1129 goto out; 1130 TAILQ_FOREACH(a1, V_pf_altqs_active, entries) { 1131 a2 = malloc(sizeof(*a2), M_PFALTQ, M_NOWAIT); 1132 if (a2 == NULL) { 1133 error = ENOMEM; 1134 break; 1135 } 1136 bcopy(a1, a2, sizeof(struct pf_altq)); 1137 1138 if ((a2->qid = pf_qname2qid(a2->qname)) == 0) { 1139 error = EBUSY; 1140 free(a2, M_PFALTQ); 1141 break; 1142 } 1143 a2->altq_disc = NULL; 1144 TAILQ_FOREACH(a3, V_pf_altq_ifs_inactive, entries) { 1145 if (strncmp(a3->ifname, a2->ifname, 1146 IFNAMSIZ) == 0) { 1147 a2->altq_disc = a3->altq_disc; 1148 break; 1149 } 1150 } 1151 error = pf_altq_ifnet_event_add(ifp, remove, ticket, a2); 1152 if (error) 1153 break; 1154 1155 TAILQ_INSERT_TAIL(V_pf_altqs_inactive, a2, entries); 1156 } 1157 1158 out: 1159 if (error != 0) 1160 pf_rollback_altq(ticket); 1161 else 1162 pf_commit_altq(ticket); 1163 } 1164 #endif /* ALTQ */ 1165 1166 static struct pf_krule_global * 1167 pf_rule_tree_alloc(int flags) 1168 { 1169 struct pf_krule_global *tree; 1170 1171 tree = malloc(sizeof(struct pf_krule_global), M_TEMP, flags); 1172 if (tree == NULL) 1173 return (NULL); 1174 RB_INIT(tree); 1175 return (tree); 1176 } 1177 1178 static void 1179 pf_rule_tree_free(struct pf_krule_global *tree) 1180 { 1181 1182 free(tree, M_TEMP); 1183 } 1184 1185 static int 1186 pf_begin_rules(u_int32_t *ticket, int rs_num, const char *anchor) 1187 { 1188 struct pf_krule_global *tree; 1189 struct pf_kruleset *rs; 1190 struct pf_krule *rule; 1191 1192 PF_RULES_WASSERT(); 1193 1194 if (rs_num < 0 || rs_num >= PF_RULESET_MAX) 1195 return (EINVAL); 1196 tree = pf_rule_tree_alloc(M_NOWAIT); 1197 if (tree == NULL) 1198 return (ENOMEM); 1199 rs = pf_find_or_create_kruleset(anchor); 1200 if (rs == NULL) { 1201 free(tree, M_TEMP); 1202 return (EINVAL); 1203 } 1204 pf_rule_tree_free(rs->rules[rs_num].inactive.tree); 1205 rs->rules[rs_num].inactive.tree = tree; 1206 1207 while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) { 1208 pf_unlink_rule(rs->rules[rs_num].inactive.ptr, rule); 1209 rs->rules[rs_num].inactive.rcount--; 1210 } 1211 *ticket = ++rs->rules[rs_num].inactive.ticket; 1212 rs->rules[rs_num].inactive.open = 1; 1213 return (0); 1214 } 1215 1216 static int 1217 pf_rollback_rules(u_int32_t ticket, int rs_num, char *anchor) 1218 { 1219 struct pf_kruleset *rs; 1220 struct pf_krule *rule; 1221 1222 PF_RULES_WASSERT(); 1223 1224 if (rs_num < 0 || rs_num >= PF_RULESET_MAX) 1225 return (EINVAL); 1226 rs = pf_find_kruleset(anchor); 1227 if (rs == NULL || !rs->rules[rs_num].inactive.open || 1228 rs->rules[rs_num].inactive.ticket != ticket) 1229 return (0); 1230 while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) { 1231 pf_unlink_rule(rs->rules[rs_num].inactive.ptr, rule); 1232 rs->rules[rs_num].inactive.rcount--; 1233 } 1234 rs->rules[rs_num].inactive.open = 0; 1235 return (0); 1236 } 1237 1238 #define PF_MD5_UPD(st, elm) \ 1239 MD5Update(ctx, (u_int8_t *) &(st)->elm, sizeof((st)->elm)) 1240 1241 #define PF_MD5_UPD_STR(st, elm) \ 1242 MD5Update(ctx, (u_int8_t *) (st)->elm, strlen((st)->elm)) 1243 1244 #define PF_MD5_UPD_HTONL(st, elm, stor) do { \ 1245 (stor) = htonl((st)->elm); \ 1246 MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int32_t));\ 1247 } while (0) 1248 1249 #define PF_MD5_UPD_HTONS(st, elm, stor) do { \ 1250 (stor) = htons((st)->elm); \ 1251 MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int16_t));\ 1252 } while (0) 1253 1254 static void 1255 pf_hash_rule_addr(MD5_CTX *ctx, struct pf_rule_addr *pfr) 1256 { 1257 PF_MD5_UPD(pfr, addr.type); 1258 switch (pfr->addr.type) { 1259 case PF_ADDR_DYNIFTL: 1260 PF_MD5_UPD(pfr, addr.v.ifname); 1261 PF_MD5_UPD(pfr, addr.iflags); 1262 break; 1263 case PF_ADDR_TABLE: 1264 PF_MD5_UPD(pfr, addr.v.tblname); 1265 break; 1266 case PF_ADDR_ADDRMASK: 1267 /* XXX ignore af? */ 1268 PF_MD5_UPD(pfr, addr.v.a.addr.addr32); 1269 PF_MD5_UPD(pfr, addr.v.a.mask.addr32); 1270 break; 1271 } 1272 1273 PF_MD5_UPD(pfr, port[0]); 1274 PF_MD5_UPD(pfr, port[1]); 1275 PF_MD5_UPD(pfr, neg); 1276 PF_MD5_UPD(pfr, port_op); 1277 } 1278 1279 static void 1280 pf_hash_rule_rolling(MD5_CTX *ctx, struct pf_krule *rule) 1281 { 1282 u_int16_t x; 1283 u_int32_t y; 1284 1285 pf_hash_rule_addr(ctx, &rule->src); 1286 pf_hash_rule_addr(ctx, &rule->dst); 1287 for (int i = 0; i < PF_RULE_MAX_LABEL_COUNT; i++) 1288 PF_MD5_UPD_STR(rule, label[i]); 1289 PF_MD5_UPD_STR(rule, ifname); 1290 PF_MD5_UPD_STR(rule, match_tagname); 1291 PF_MD5_UPD_HTONS(rule, match_tag, x); /* dup? */ 1292 PF_MD5_UPD_HTONL(rule, os_fingerprint, y); 1293 PF_MD5_UPD_HTONL(rule, prob, y); 1294 PF_MD5_UPD_HTONL(rule, uid.uid[0], y); 1295 PF_MD5_UPD_HTONL(rule, uid.uid[1], y); 1296 PF_MD5_UPD(rule, uid.op); 1297 PF_MD5_UPD_HTONL(rule, gid.gid[0], y); 1298 PF_MD5_UPD_HTONL(rule, gid.gid[1], y); 1299 PF_MD5_UPD(rule, gid.op); 1300 PF_MD5_UPD_HTONL(rule, rule_flag, y); 1301 PF_MD5_UPD(rule, action); 1302 PF_MD5_UPD(rule, direction); 1303 PF_MD5_UPD(rule, af); 1304 PF_MD5_UPD(rule, quick); 1305 PF_MD5_UPD(rule, ifnot); 1306 PF_MD5_UPD(rule, match_tag_not); 1307 PF_MD5_UPD(rule, natpass); 1308 PF_MD5_UPD(rule, keep_state); 1309 PF_MD5_UPD(rule, proto); 1310 PF_MD5_UPD(rule, type); 1311 PF_MD5_UPD(rule, code); 1312 PF_MD5_UPD(rule, flags); 1313 PF_MD5_UPD(rule, flagset); 1314 PF_MD5_UPD(rule, allow_opts); 1315 PF_MD5_UPD(rule, rt); 1316 PF_MD5_UPD(rule, tos); 1317 PF_MD5_UPD(rule, scrub_flags); 1318 PF_MD5_UPD(rule, min_ttl); 1319 PF_MD5_UPD(rule, set_tos); 1320 if (rule->anchor != NULL) 1321 PF_MD5_UPD_STR(rule, anchor->path); 1322 } 1323 1324 static void 1325 pf_hash_rule(struct pf_krule *rule) 1326 { 1327 MD5_CTX ctx; 1328 1329 MD5Init(&ctx); 1330 pf_hash_rule_rolling(&ctx, rule); 1331 MD5Final(rule->md5sum, &ctx); 1332 } 1333 1334 static int 1335 pf_krule_compare(struct pf_krule *a, struct pf_krule *b) 1336 { 1337 1338 return (memcmp(a->md5sum, b->md5sum, PF_MD5_DIGEST_LENGTH)); 1339 } 1340 1341 static int 1342 pf_commit_rules(u_int32_t ticket, int rs_num, char *anchor) 1343 { 1344 struct pf_kruleset *rs; 1345 struct pf_krule *rule, **old_array, *old_rule; 1346 struct pf_krulequeue *old_rules; 1347 struct pf_krule_global *old_tree; 1348 int error; 1349 u_int32_t old_rcount; 1350 1351 PF_RULES_WASSERT(); 1352 1353 if (rs_num < 0 || rs_num >= PF_RULESET_MAX) 1354 return (EINVAL); 1355 rs = pf_find_kruleset(anchor); 1356 if (rs == NULL || !rs->rules[rs_num].inactive.open || 1357 ticket != rs->rules[rs_num].inactive.ticket) 1358 return (EBUSY); 1359 1360 /* Calculate checksum for the main ruleset */ 1361 if (rs == &pf_main_ruleset) { 1362 error = pf_setup_pfsync_matching(rs); 1363 if (error != 0) 1364 return (error); 1365 } 1366 1367 /* Swap rules, keep the old. */ 1368 old_rules = rs->rules[rs_num].active.ptr; 1369 old_rcount = rs->rules[rs_num].active.rcount; 1370 old_array = rs->rules[rs_num].active.ptr_array; 1371 old_tree = rs->rules[rs_num].active.tree; 1372 1373 rs->rules[rs_num].active.ptr = 1374 rs->rules[rs_num].inactive.ptr; 1375 rs->rules[rs_num].active.ptr_array = 1376 rs->rules[rs_num].inactive.ptr_array; 1377 rs->rules[rs_num].active.tree = 1378 rs->rules[rs_num].inactive.tree; 1379 rs->rules[rs_num].active.rcount = 1380 rs->rules[rs_num].inactive.rcount; 1381 1382 /* Attempt to preserve counter information. */ 1383 if (V_pf_status.keep_counters && old_tree != NULL) { 1384 TAILQ_FOREACH(rule, rs->rules[rs_num].active.ptr, 1385 entries) { 1386 old_rule = RB_FIND(pf_krule_global, old_tree, rule); 1387 if (old_rule == NULL) { 1388 continue; 1389 } 1390 pf_counter_u64_critical_enter(); 1391 pf_counter_u64_add_protected(&rule->evaluations, 1392 pf_counter_u64_fetch(&old_rule->evaluations)); 1393 pf_counter_u64_add_protected(&rule->packets[0], 1394 pf_counter_u64_fetch(&old_rule->packets[0])); 1395 pf_counter_u64_add_protected(&rule->packets[1], 1396 pf_counter_u64_fetch(&old_rule->packets[1])); 1397 pf_counter_u64_add_protected(&rule->bytes[0], 1398 pf_counter_u64_fetch(&old_rule->bytes[0])); 1399 pf_counter_u64_add_protected(&rule->bytes[1], 1400 pf_counter_u64_fetch(&old_rule->bytes[1])); 1401 pf_counter_u64_critical_exit(); 1402 } 1403 } 1404 1405 rs->rules[rs_num].inactive.ptr = old_rules; 1406 rs->rules[rs_num].inactive.ptr_array = old_array; 1407 rs->rules[rs_num].inactive.tree = NULL; /* important for pf_ioctl_addrule */ 1408 rs->rules[rs_num].inactive.rcount = old_rcount; 1409 1410 rs->rules[rs_num].active.ticket = 1411 rs->rules[rs_num].inactive.ticket; 1412 pf_calc_skip_steps(rs->rules[rs_num].active.ptr); 1413 1414 /* Purge the old rule list. */ 1415 PF_UNLNKDRULES_LOCK(); 1416 while ((rule = TAILQ_FIRST(old_rules)) != NULL) 1417 pf_unlink_rule_locked(old_rules, rule); 1418 PF_UNLNKDRULES_UNLOCK(); 1419 if (rs->rules[rs_num].inactive.ptr_array) 1420 free(rs->rules[rs_num].inactive.ptr_array, M_TEMP); 1421 rs->rules[rs_num].inactive.ptr_array = NULL; 1422 rs->rules[rs_num].inactive.rcount = 0; 1423 rs->rules[rs_num].inactive.open = 0; 1424 pf_remove_if_empty_kruleset(rs); 1425 free(old_tree, M_TEMP); 1426 1427 return (0); 1428 } 1429 1430 static int 1431 pf_setup_pfsync_matching(struct pf_kruleset *rs) 1432 { 1433 MD5_CTX ctx; 1434 struct pf_krule *rule; 1435 int rs_cnt; 1436 u_int8_t digest[PF_MD5_DIGEST_LENGTH]; 1437 1438 MD5Init(&ctx); 1439 for (rs_cnt = 0; rs_cnt < PF_RULESET_MAX; rs_cnt++) { 1440 /* XXX PF_RULESET_SCRUB as well? */ 1441 if (rs_cnt == PF_RULESET_SCRUB) 1442 continue; 1443 1444 if (rs->rules[rs_cnt].inactive.ptr_array) 1445 free(rs->rules[rs_cnt].inactive.ptr_array, M_TEMP); 1446 rs->rules[rs_cnt].inactive.ptr_array = NULL; 1447 1448 if (rs->rules[rs_cnt].inactive.rcount) { 1449 rs->rules[rs_cnt].inactive.ptr_array = 1450 mallocarray(rs->rules[rs_cnt].inactive.rcount, 1451 sizeof(struct pf_rule **), 1452 M_TEMP, M_NOWAIT); 1453 1454 if (!rs->rules[rs_cnt].inactive.ptr_array) 1455 return (ENOMEM); 1456 } 1457 1458 TAILQ_FOREACH(rule, rs->rules[rs_cnt].inactive.ptr, 1459 entries) { 1460 pf_hash_rule_rolling(&ctx, rule); 1461 (rs->rules[rs_cnt].inactive.ptr_array)[rule->nr] = rule; 1462 } 1463 } 1464 1465 MD5Final(digest, &ctx); 1466 memcpy(V_pf_status.pf_chksum, digest, sizeof(V_pf_status.pf_chksum)); 1467 return (0); 1468 } 1469 1470 static int 1471 pf_eth_addr_setup(struct pf_keth_ruleset *ruleset, struct pf_addr_wrap *addr) 1472 { 1473 int error = 0; 1474 1475 switch (addr->type) { 1476 case PF_ADDR_TABLE: 1477 addr->p.tbl = pfr_eth_attach_table(ruleset, addr->v.tblname); 1478 if (addr->p.tbl == NULL) 1479 error = ENOMEM; 1480 break; 1481 default: 1482 error = EINVAL; 1483 } 1484 1485 return (error); 1486 } 1487 1488 static int 1489 pf_addr_setup(struct pf_kruleset *ruleset, struct pf_addr_wrap *addr, 1490 sa_family_t af) 1491 { 1492 int error = 0; 1493 1494 switch (addr->type) { 1495 case PF_ADDR_TABLE: 1496 addr->p.tbl = pfr_attach_table(ruleset, addr->v.tblname); 1497 if (addr->p.tbl == NULL) 1498 error = ENOMEM; 1499 break; 1500 case PF_ADDR_DYNIFTL: 1501 error = pfi_dynaddr_setup(addr, af); 1502 break; 1503 } 1504 1505 return (error); 1506 } 1507 1508 static void 1509 pf_addr_copyout(struct pf_addr_wrap *addr) 1510 { 1511 1512 switch (addr->type) { 1513 case PF_ADDR_DYNIFTL: 1514 pfi_dynaddr_copyout(addr); 1515 break; 1516 case PF_ADDR_TABLE: 1517 pf_tbladdr_copyout(addr); 1518 break; 1519 } 1520 } 1521 1522 static void 1523 pf_src_node_copy(const struct pf_ksrc_node *in, struct pf_src_node *out) 1524 { 1525 int secs = time_uptime, diff; 1526 1527 bzero(out, sizeof(struct pf_src_node)); 1528 1529 bcopy(&in->addr, &out->addr, sizeof(struct pf_addr)); 1530 bcopy(&in->raddr, &out->raddr, sizeof(struct pf_addr)); 1531 1532 if (in->rule.ptr != NULL) 1533 out->rule.nr = in->rule.ptr->nr; 1534 1535 for (int i = 0; i < 2; i++) { 1536 out->bytes[i] = counter_u64_fetch(in->bytes[i]); 1537 out->packets[i] = counter_u64_fetch(in->packets[i]); 1538 } 1539 1540 out->states = in->states; 1541 out->conn = in->conn; 1542 out->af = in->af; 1543 out->ruletype = in->ruletype; 1544 1545 out->creation = secs - in->creation; 1546 if (out->expire > secs) 1547 out->expire -= secs; 1548 else 1549 out->expire = 0; 1550 1551 /* Adjust the connection rate estimate. */ 1552 diff = secs - in->conn_rate.last; 1553 if (diff >= in->conn_rate.seconds) 1554 out->conn_rate.count = 0; 1555 else 1556 out->conn_rate.count -= 1557 in->conn_rate.count * diff / 1558 in->conn_rate.seconds; 1559 } 1560 1561 #ifdef ALTQ 1562 /* 1563 * Handle export of struct pf_kaltq to user binaries that may be using any 1564 * version of struct pf_altq. 1565 */ 1566 static int 1567 pf_export_kaltq(struct pf_altq *q, struct pfioc_altq_v1 *pa, size_t ioc_size) 1568 { 1569 u_int32_t version; 1570 1571 if (ioc_size == sizeof(struct pfioc_altq_v0)) 1572 version = 0; 1573 else 1574 version = pa->version; 1575 1576 if (version > PFIOC_ALTQ_VERSION) 1577 return (EINVAL); 1578 1579 #define ASSIGN(x) exported_q->x = q->x 1580 #define COPY(x) \ 1581 bcopy(&q->x, &exported_q->x, min(sizeof(q->x), sizeof(exported_q->x))) 1582 #define SATU16(x) (u_int32_t)uqmin((x), USHRT_MAX) 1583 #define SATU32(x) (u_int32_t)uqmin((x), UINT_MAX) 1584 1585 switch (version) { 1586 case 0: { 1587 struct pf_altq_v0 *exported_q = 1588 &((struct pfioc_altq_v0 *)pa)->altq; 1589 1590 COPY(ifname); 1591 1592 ASSIGN(scheduler); 1593 ASSIGN(tbrsize); 1594 exported_q->tbrsize = SATU16(q->tbrsize); 1595 exported_q->ifbandwidth = SATU32(q->ifbandwidth); 1596 1597 COPY(qname); 1598 COPY(parent); 1599 ASSIGN(parent_qid); 1600 exported_q->bandwidth = SATU32(q->bandwidth); 1601 ASSIGN(priority); 1602 ASSIGN(local_flags); 1603 1604 ASSIGN(qlimit); 1605 ASSIGN(flags); 1606 1607 if (q->scheduler == ALTQT_HFSC) { 1608 #define ASSIGN_OPT(x) exported_q->pq_u.hfsc_opts.x = q->pq_u.hfsc_opts.x 1609 #define ASSIGN_OPT_SATU32(x) exported_q->pq_u.hfsc_opts.x = \ 1610 SATU32(q->pq_u.hfsc_opts.x) 1611 1612 ASSIGN_OPT_SATU32(rtsc_m1); 1613 ASSIGN_OPT(rtsc_d); 1614 ASSIGN_OPT_SATU32(rtsc_m2); 1615 1616 ASSIGN_OPT_SATU32(lssc_m1); 1617 ASSIGN_OPT(lssc_d); 1618 ASSIGN_OPT_SATU32(lssc_m2); 1619 1620 ASSIGN_OPT_SATU32(ulsc_m1); 1621 ASSIGN_OPT(ulsc_d); 1622 ASSIGN_OPT_SATU32(ulsc_m2); 1623 1624 ASSIGN_OPT(flags); 1625 1626 #undef ASSIGN_OPT 1627 #undef ASSIGN_OPT_SATU32 1628 } else 1629 COPY(pq_u); 1630 1631 ASSIGN(qid); 1632 break; 1633 } 1634 case 1: { 1635 struct pf_altq_v1 *exported_q = 1636 &((struct pfioc_altq_v1 *)pa)->altq; 1637 1638 COPY(ifname); 1639 1640 ASSIGN(scheduler); 1641 ASSIGN(tbrsize); 1642 ASSIGN(ifbandwidth); 1643 1644 COPY(qname); 1645 COPY(parent); 1646 ASSIGN(parent_qid); 1647 ASSIGN(bandwidth); 1648 ASSIGN(priority); 1649 ASSIGN(local_flags); 1650 1651 ASSIGN(qlimit); 1652 ASSIGN(flags); 1653 COPY(pq_u); 1654 1655 ASSIGN(qid); 1656 break; 1657 } 1658 default: 1659 panic("%s: unhandled struct pfioc_altq version", __func__); 1660 break; 1661 } 1662 1663 #undef ASSIGN 1664 #undef COPY 1665 #undef SATU16 1666 #undef SATU32 1667 1668 return (0); 1669 } 1670 1671 /* 1672 * Handle import to struct pf_kaltq of struct pf_altq from user binaries 1673 * that may be using any version of it. 1674 */ 1675 static int 1676 pf_import_kaltq(struct pfioc_altq_v1 *pa, struct pf_altq *q, size_t ioc_size) 1677 { 1678 u_int32_t version; 1679 1680 if (ioc_size == sizeof(struct pfioc_altq_v0)) 1681 version = 0; 1682 else 1683 version = pa->version; 1684 1685 if (version > PFIOC_ALTQ_VERSION) 1686 return (EINVAL); 1687 1688 #define ASSIGN(x) q->x = imported_q->x 1689 #define COPY(x) \ 1690 bcopy(&imported_q->x, &q->x, min(sizeof(imported_q->x), sizeof(q->x))) 1691 1692 switch (version) { 1693 case 0: { 1694 struct pf_altq_v0 *imported_q = 1695 &((struct pfioc_altq_v0 *)pa)->altq; 1696 1697 COPY(ifname); 1698 1699 ASSIGN(scheduler); 1700 ASSIGN(tbrsize); /* 16-bit -> 32-bit */ 1701 ASSIGN(ifbandwidth); /* 32-bit -> 64-bit */ 1702 1703 COPY(qname); 1704 COPY(parent); 1705 ASSIGN(parent_qid); 1706 ASSIGN(bandwidth); /* 32-bit -> 64-bit */ 1707 ASSIGN(priority); 1708 ASSIGN(local_flags); 1709 1710 ASSIGN(qlimit); 1711 ASSIGN(flags); 1712 1713 if (imported_q->scheduler == ALTQT_HFSC) { 1714 #define ASSIGN_OPT(x) q->pq_u.hfsc_opts.x = imported_q->pq_u.hfsc_opts.x 1715 1716 /* 1717 * The m1 and m2 parameters are being copied from 1718 * 32-bit to 64-bit. 1719 */ 1720 ASSIGN_OPT(rtsc_m1); 1721 ASSIGN_OPT(rtsc_d); 1722 ASSIGN_OPT(rtsc_m2); 1723 1724 ASSIGN_OPT(lssc_m1); 1725 ASSIGN_OPT(lssc_d); 1726 ASSIGN_OPT(lssc_m2); 1727 1728 ASSIGN_OPT(ulsc_m1); 1729 ASSIGN_OPT(ulsc_d); 1730 ASSIGN_OPT(ulsc_m2); 1731 1732 ASSIGN_OPT(flags); 1733 1734 #undef ASSIGN_OPT 1735 } else 1736 COPY(pq_u); 1737 1738 ASSIGN(qid); 1739 break; 1740 } 1741 case 1: { 1742 struct pf_altq_v1 *imported_q = 1743 &((struct pfioc_altq_v1 *)pa)->altq; 1744 1745 COPY(ifname); 1746 1747 ASSIGN(scheduler); 1748 ASSIGN(tbrsize); 1749 ASSIGN(ifbandwidth); 1750 1751 COPY(qname); 1752 COPY(parent); 1753 ASSIGN(parent_qid); 1754 ASSIGN(bandwidth); 1755 ASSIGN(priority); 1756 ASSIGN(local_flags); 1757 1758 ASSIGN(qlimit); 1759 ASSIGN(flags); 1760 COPY(pq_u); 1761 1762 ASSIGN(qid); 1763 break; 1764 } 1765 default: 1766 panic("%s: unhandled struct pfioc_altq version", __func__); 1767 break; 1768 } 1769 1770 #undef ASSIGN 1771 #undef COPY 1772 1773 return (0); 1774 } 1775 1776 static struct pf_altq * 1777 pf_altq_get_nth_active(u_int32_t n) 1778 { 1779 struct pf_altq *altq; 1780 u_int32_t nr; 1781 1782 nr = 0; 1783 TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) { 1784 if (nr == n) 1785 return (altq); 1786 nr++; 1787 } 1788 1789 TAILQ_FOREACH(altq, V_pf_altqs_active, entries) { 1790 if (nr == n) 1791 return (altq); 1792 nr++; 1793 } 1794 1795 return (NULL); 1796 } 1797 #endif /* ALTQ */ 1798 1799 struct pf_krule * 1800 pf_krule_alloc(void) 1801 { 1802 struct pf_krule *rule; 1803 1804 rule = malloc(sizeof(struct pf_krule), M_PFRULE, M_WAITOK | M_ZERO); 1805 mtx_init(&rule->rpool.mtx, "pf_krule_pool", NULL, MTX_DEF); 1806 rule->timestamp = uma_zalloc_pcpu(pf_timestamp_pcpu_zone, 1807 M_WAITOK | M_ZERO); 1808 return (rule); 1809 } 1810 1811 void 1812 pf_krule_free(struct pf_krule *rule) 1813 { 1814 #ifdef PF_WANT_32_TO_64_COUNTER 1815 bool wowned; 1816 #endif 1817 1818 if (rule == NULL) 1819 return; 1820 1821 #ifdef PF_WANT_32_TO_64_COUNTER 1822 if (rule->allrulelinked) { 1823 wowned = PF_RULES_WOWNED(); 1824 if (!wowned) 1825 PF_RULES_WLOCK(); 1826 LIST_REMOVE(rule, allrulelist); 1827 V_pf_allrulecount--; 1828 if (!wowned) 1829 PF_RULES_WUNLOCK(); 1830 } 1831 #endif 1832 1833 pf_counter_u64_deinit(&rule->evaluations); 1834 for (int i = 0; i < 2; i++) { 1835 pf_counter_u64_deinit(&rule->packets[i]); 1836 pf_counter_u64_deinit(&rule->bytes[i]); 1837 } 1838 counter_u64_free(rule->states_cur); 1839 counter_u64_free(rule->states_tot); 1840 counter_u64_free(rule->src_nodes); 1841 uma_zfree_pcpu(pf_timestamp_pcpu_zone, rule->timestamp); 1842 1843 mtx_destroy(&rule->rpool.mtx); 1844 free(rule, M_PFRULE); 1845 } 1846 1847 static void 1848 pf_kpooladdr_to_pooladdr(const struct pf_kpooladdr *kpool, 1849 struct pf_pooladdr *pool) 1850 { 1851 1852 bzero(pool, sizeof(*pool)); 1853 bcopy(&kpool->addr, &pool->addr, sizeof(pool->addr)); 1854 strlcpy(pool->ifname, kpool->ifname, sizeof(pool->ifname)); 1855 } 1856 1857 static int 1858 pf_pooladdr_to_kpooladdr(const struct pf_pooladdr *pool, 1859 struct pf_kpooladdr *kpool) 1860 { 1861 int ret; 1862 1863 bzero(kpool, sizeof(*kpool)); 1864 bcopy(&pool->addr, &kpool->addr, sizeof(kpool->addr)); 1865 ret = pf_user_strcpy(kpool->ifname, pool->ifname, 1866 sizeof(kpool->ifname)); 1867 return (ret); 1868 } 1869 1870 static void 1871 pf_kpool_to_pool(const struct pf_kpool *kpool, struct pf_pool *pool) 1872 { 1873 bzero(pool, sizeof(*pool)); 1874 1875 bcopy(&kpool->key, &pool->key, sizeof(pool->key)); 1876 bcopy(&kpool->counter, &pool->counter, sizeof(pool->counter)); 1877 1878 pool->tblidx = kpool->tblidx; 1879 pool->proxy_port[0] = kpool->proxy_port[0]; 1880 pool->proxy_port[1] = kpool->proxy_port[1]; 1881 pool->opts = kpool->opts; 1882 } 1883 1884 static void 1885 pf_pool_to_kpool(const struct pf_pool *pool, struct pf_kpool *kpool) 1886 { 1887 _Static_assert(sizeof(pool->key) == sizeof(kpool->key), ""); 1888 _Static_assert(sizeof(pool->counter) == sizeof(kpool->counter), ""); 1889 1890 bcopy(&pool->key, &kpool->key, sizeof(kpool->key)); 1891 bcopy(&pool->counter, &kpool->counter, sizeof(kpool->counter)); 1892 1893 kpool->tblidx = pool->tblidx; 1894 kpool->proxy_port[0] = pool->proxy_port[0]; 1895 kpool->proxy_port[1] = pool->proxy_port[1]; 1896 kpool->opts = pool->opts; 1897 } 1898 1899 static void 1900 pf_krule_to_rule(const struct pf_krule *krule, struct pf_rule *rule) 1901 { 1902 1903 bzero(rule, sizeof(*rule)); 1904 1905 bcopy(&krule->src, &rule->src, sizeof(rule->src)); 1906 bcopy(&krule->dst, &rule->dst, sizeof(rule->dst)); 1907 1908 for (int i = 0; i < PF_SKIP_COUNT; ++i) { 1909 if (rule->skip[i].ptr == NULL) 1910 rule->skip[i].nr = -1; 1911 else 1912 rule->skip[i].nr = krule->skip[i].ptr->nr; 1913 } 1914 1915 strlcpy(rule->label, krule->label[0], sizeof(rule->label)); 1916 strlcpy(rule->ifname, krule->ifname, sizeof(rule->ifname)); 1917 strlcpy(rule->qname, krule->qname, sizeof(rule->qname)); 1918 strlcpy(rule->pqname, krule->pqname, sizeof(rule->pqname)); 1919 strlcpy(rule->tagname, krule->tagname, sizeof(rule->tagname)); 1920 strlcpy(rule->match_tagname, krule->match_tagname, 1921 sizeof(rule->match_tagname)); 1922 strlcpy(rule->overload_tblname, krule->overload_tblname, 1923 sizeof(rule->overload_tblname)); 1924 1925 pf_kpool_to_pool(&krule->rpool, &rule->rpool); 1926 1927 rule->evaluations = pf_counter_u64_fetch(&krule->evaluations); 1928 for (int i = 0; i < 2; i++) { 1929 rule->packets[i] = pf_counter_u64_fetch(&krule->packets[i]); 1930 rule->bytes[i] = pf_counter_u64_fetch(&krule->bytes[i]); 1931 } 1932 1933 /* kif, anchor, overload_tbl are not copied over. */ 1934 1935 rule->os_fingerprint = krule->os_fingerprint; 1936 1937 rule->rtableid = krule->rtableid; 1938 bcopy(krule->timeout, rule->timeout, sizeof(krule->timeout)); 1939 rule->max_states = krule->max_states; 1940 rule->max_src_nodes = krule->max_src_nodes; 1941 rule->max_src_states = krule->max_src_states; 1942 rule->max_src_conn = krule->max_src_conn; 1943 rule->max_src_conn_rate.limit = krule->max_src_conn_rate.limit; 1944 rule->max_src_conn_rate.seconds = krule->max_src_conn_rate.seconds; 1945 rule->qid = krule->qid; 1946 rule->pqid = krule->pqid; 1947 rule->nr = krule->nr; 1948 rule->prob = krule->prob; 1949 rule->cuid = krule->cuid; 1950 rule->cpid = krule->cpid; 1951 1952 rule->return_icmp = krule->return_icmp; 1953 rule->return_icmp6 = krule->return_icmp6; 1954 rule->max_mss = krule->max_mss; 1955 rule->tag = krule->tag; 1956 rule->match_tag = krule->match_tag; 1957 rule->scrub_flags = krule->scrub_flags; 1958 1959 bcopy(&krule->uid, &rule->uid, sizeof(krule->uid)); 1960 bcopy(&krule->gid, &rule->gid, sizeof(krule->gid)); 1961 1962 rule->rule_flag = krule->rule_flag; 1963 rule->action = krule->action; 1964 rule->direction = krule->direction; 1965 rule->log = krule->log; 1966 rule->logif = krule->logif; 1967 rule->quick = krule->quick; 1968 rule->ifnot = krule->ifnot; 1969 rule->match_tag_not = krule->match_tag_not; 1970 rule->natpass = krule->natpass; 1971 1972 rule->keep_state = krule->keep_state; 1973 rule->af = krule->af; 1974 rule->proto = krule->proto; 1975 rule->type = krule->type; 1976 rule->code = krule->code; 1977 rule->flags = krule->flags; 1978 rule->flagset = krule->flagset; 1979 rule->min_ttl = krule->min_ttl; 1980 rule->allow_opts = krule->allow_opts; 1981 rule->rt = krule->rt; 1982 rule->return_ttl = krule->return_ttl; 1983 rule->tos = krule->tos; 1984 rule->set_tos = krule->set_tos; 1985 rule->anchor_relative = krule->anchor_relative; 1986 rule->anchor_wildcard = krule->anchor_wildcard; 1987 1988 rule->flush = krule->flush; 1989 rule->prio = krule->prio; 1990 rule->set_prio[0] = krule->set_prio[0]; 1991 rule->set_prio[1] = krule->set_prio[1]; 1992 1993 bcopy(&krule->divert, &rule->divert, sizeof(krule->divert)); 1994 1995 rule->u_states_cur = counter_u64_fetch(krule->states_cur); 1996 rule->u_states_tot = counter_u64_fetch(krule->states_tot); 1997 rule->u_src_nodes = counter_u64_fetch(krule->src_nodes); 1998 } 1999 2000 static int 2001 pf_rule_to_krule(const struct pf_rule *rule, struct pf_krule *krule) 2002 { 2003 int ret; 2004 2005 #ifndef INET 2006 if (rule->af == AF_INET) { 2007 return (EAFNOSUPPORT); 2008 } 2009 #endif /* INET */ 2010 #ifndef INET6 2011 if (rule->af == AF_INET6) { 2012 return (EAFNOSUPPORT); 2013 } 2014 #endif /* INET6 */ 2015 2016 ret = pf_check_rule_addr(&rule->src); 2017 if (ret != 0) 2018 return (ret); 2019 ret = pf_check_rule_addr(&rule->dst); 2020 if (ret != 0) 2021 return (ret); 2022 2023 bcopy(&rule->src, &krule->src, sizeof(rule->src)); 2024 bcopy(&rule->dst, &krule->dst, sizeof(rule->dst)); 2025 2026 ret = pf_user_strcpy(krule->label[0], rule->label, sizeof(rule->label)); 2027 if (ret != 0) 2028 return (ret); 2029 ret = pf_user_strcpy(krule->ifname, rule->ifname, sizeof(rule->ifname)); 2030 if (ret != 0) 2031 return (ret); 2032 ret = pf_user_strcpy(krule->qname, rule->qname, sizeof(rule->qname)); 2033 if (ret != 0) 2034 return (ret); 2035 ret = pf_user_strcpy(krule->pqname, rule->pqname, sizeof(rule->pqname)); 2036 if (ret != 0) 2037 return (ret); 2038 ret = pf_user_strcpy(krule->tagname, rule->tagname, 2039 sizeof(rule->tagname)); 2040 if (ret != 0) 2041 return (ret); 2042 ret = pf_user_strcpy(krule->match_tagname, rule->match_tagname, 2043 sizeof(rule->match_tagname)); 2044 if (ret != 0) 2045 return (ret); 2046 ret = pf_user_strcpy(krule->overload_tblname, rule->overload_tblname, 2047 sizeof(rule->overload_tblname)); 2048 if (ret != 0) 2049 return (ret); 2050 2051 pf_pool_to_kpool(&rule->rpool, &krule->rpool); 2052 2053 /* Don't allow userspace to set evaluations, packets or bytes. */ 2054 /* kif, anchor, overload_tbl are not copied over. */ 2055 2056 krule->os_fingerprint = rule->os_fingerprint; 2057 2058 krule->rtableid = rule->rtableid; 2059 bcopy(rule->timeout, krule->timeout, sizeof(krule->timeout)); 2060 krule->max_states = rule->max_states; 2061 krule->max_src_nodes = rule->max_src_nodes; 2062 krule->max_src_states = rule->max_src_states; 2063 krule->max_src_conn = rule->max_src_conn; 2064 krule->max_src_conn_rate.limit = rule->max_src_conn_rate.limit; 2065 krule->max_src_conn_rate.seconds = rule->max_src_conn_rate.seconds; 2066 krule->qid = rule->qid; 2067 krule->pqid = rule->pqid; 2068 krule->nr = rule->nr; 2069 krule->prob = rule->prob; 2070 krule->cuid = rule->cuid; 2071 krule->cpid = rule->cpid; 2072 2073 krule->return_icmp = rule->return_icmp; 2074 krule->return_icmp6 = rule->return_icmp6; 2075 krule->max_mss = rule->max_mss; 2076 krule->tag = rule->tag; 2077 krule->match_tag = rule->match_tag; 2078 krule->scrub_flags = rule->scrub_flags; 2079 2080 bcopy(&rule->uid, &krule->uid, sizeof(krule->uid)); 2081 bcopy(&rule->gid, &krule->gid, sizeof(krule->gid)); 2082 2083 krule->rule_flag = rule->rule_flag; 2084 krule->action = rule->action; 2085 krule->direction = rule->direction; 2086 krule->log = rule->log; 2087 krule->logif = rule->logif; 2088 krule->quick = rule->quick; 2089 krule->ifnot = rule->ifnot; 2090 krule->match_tag_not = rule->match_tag_not; 2091 krule->natpass = rule->natpass; 2092 2093 krule->keep_state = rule->keep_state; 2094 krule->af = rule->af; 2095 krule->proto = rule->proto; 2096 krule->type = rule->type; 2097 krule->code = rule->code; 2098 krule->flags = rule->flags; 2099 krule->flagset = rule->flagset; 2100 krule->min_ttl = rule->min_ttl; 2101 krule->allow_opts = rule->allow_opts; 2102 krule->rt = rule->rt; 2103 krule->return_ttl = rule->return_ttl; 2104 krule->tos = rule->tos; 2105 krule->set_tos = rule->set_tos; 2106 2107 krule->flush = rule->flush; 2108 krule->prio = rule->prio; 2109 krule->set_prio[0] = rule->set_prio[0]; 2110 krule->set_prio[1] = rule->set_prio[1]; 2111 2112 bcopy(&rule->divert, &krule->divert, sizeof(krule->divert)); 2113 2114 return (0); 2115 } 2116 2117 static int 2118 pf_state_kill_to_kstate_kill(const struct pfioc_state_kill *psk, 2119 struct pf_kstate_kill *kill) 2120 { 2121 int ret; 2122 2123 bzero(kill, sizeof(*kill)); 2124 2125 bcopy(&psk->psk_pfcmp, &kill->psk_pfcmp, sizeof(kill->psk_pfcmp)); 2126 kill->psk_af = psk->psk_af; 2127 kill->psk_proto = psk->psk_proto; 2128 bcopy(&psk->psk_src, &kill->psk_src, sizeof(kill->psk_src)); 2129 bcopy(&psk->psk_dst, &kill->psk_dst, sizeof(kill->psk_dst)); 2130 ret = pf_user_strcpy(kill->psk_ifname, psk->psk_ifname, 2131 sizeof(kill->psk_ifname)); 2132 if (ret != 0) 2133 return (ret); 2134 ret = pf_user_strcpy(kill->psk_label, psk->psk_label, 2135 sizeof(kill->psk_label)); 2136 if (ret != 0) 2137 return (ret); 2138 2139 return (0); 2140 } 2141 2142 static int 2143 pf_ioctl_addrule(struct pf_krule *rule, uint32_t ticket, 2144 uint32_t pool_ticket, const char *anchor, const char *anchor_call, 2145 struct thread *td) 2146 { 2147 struct pf_kruleset *ruleset; 2148 struct pf_krule *tail; 2149 struct pf_kpooladdr *pa; 2150 struct pfi_kkif *kif = NULL; 2151 int rs_num; 2152 int error = 0; 2153 2154 if ((rule->return_icmp >> 8) > ICMP_MAXTYPE) { 2155 error = EINVAL; 2156 goto errout_unlocked; 2157 } 2158 2159 #define ERROUT(x) ERROUT_FUNCTION(errout, x) 2160 2161 if (rule->ifname[0]) 2162 kif = pf_kkif_create(M_WAITOK); 2163 pf_counter_u64_init(&rule->evaluations, M_WAITOK); 2164 for (int i = 0; i < 2; i++) { 2165 pf_counter_u64_init(&rule->packets[i], M_WAITOK); 2166 pf_counter_u64_init(&rule->bytes[i], M_WAITOK); 2167 } 2168 rule->states_cur = counter_u64_alloc(M_WAITOK); 2169 rule->states_tot = counter_u64_alloc(M_WAITOK); 2170 rule->src_nodes = counter_u64_alloc(M_WAITOK); 2171 rule->cuid = td->td_ucred->cr_ruid; 2172 rule->cpid = td->td_proc ? td->td_proc->p_pid : 0; 2173 TAILQ_INIT(&rule->rpool.list); 2174 2175 PF_CONFIG_LOCK(); 2176 PF_RULES_WLOCK(); 2177 #ifdef PF_WANT_32_TO_64_COUNTER 2178 LIST_INSERT_HEAD(&V_pf_allrulelist, rule, allrulelist); 2179 MPASS(!rule->allrulelinked); 2180 rule->allrulelinked = true; 2181 V_pf_allrulecount++; 2182 #endif 2183 ruleset = pf_find_kruleset(anchor); 2184 if (ruleset == NULL) 2185 ERROUT(EINVAL); 2186 rs_num = pf_get_ruleset_number(rule->action); 2187 if (rs_num >= PF_RULESET_MAX) 2188 ERROUT(EINVAL); 2189 if (ticket != ruleset->rules[rs_num].inactive.ticket) { 2190 DPFPRINTF(PF_DEBUG_MISC, 2191 ("ticket: %d != [%d]%d\n", ticket, rs_num, 2192 ruleset->rules[rs_num].inactive.ticket)); 2193 ERROUT(EBUSY); 2194 } 2195 if (pool_ticket != V_ticket_pabuf) { 2196 DPFPRINTF(PF_DEBUG_MISC, 2197 ("pool_ticket: %d != %d\n", pool_ticket, 2198 V_ticket_pabuf)); 2199 ERROUT(EBUSY); 2200 } 2201 /* 2202 * XXXMJG hack: there is no mechanism to ensure they started the 2203 * transaction. Ticket checked above may happen to match by accident, 2204 * even if nobody called DIOCXBEGIN, let alone this process. 2205 * Partially work around it by checking if the RB tree got allocated, 2206 * see pf_begin_rules. 2207 */ 2208 if (ruleset->rules[rs_num].inactive.tree == NULL) { 2209 ERROUT(EINVAL); 2210 } 2211 2212 tail = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr, 2213 pf_krulequeue); 2214 if (tail) 2215 rule->nr = tail->nr + 1; 2216 else 2217 rule->nr = 0; 2218 if (rule->ifname[0]) { 2219 rule->kif = pfi_kkif_attach(kif, rule->ifname); 2220 kif = NULL; 2221 pfi_kkif_ref(rule->kif); 2222 } else 2223 rule->kif = NULL; 2224 2225 if (rule->rtableid > 0 && rule->rtableid >= rt_numfibs) 2226 error = EBUSY; 2227 2228 #ifdef ALTQ 2229 /* set queue IDs */ 2230 if (rule->qname[0] != 0) { 2231 if ((rule->qid = pf_qname2qid(rule->qname)) == 0) 2232 error = EBUSY; 2233 else if (rule->pqname[0] != 0) { 2234 if ((rule->pqid = 2235 pf_qname2qid(rule->pqname)) == 0) 2236 error = EBUSY; 2237 } else 2238 rule->pqid = rule->qid; 2239 } 2240 #endif 2241 if (rule->tagname[0]) 2242 if ((rule->tag = pf_tagname2tag(rule->tagname)) == 0) 2243 error = EBUSY; 2244 if (rule->match_tagname[0]) 2245 if ((rule->match_tag = 2246 pf_tagname2tag(rule->match_tagname)) == 0) 2247 error = EBUSY; 2248 if (rule->rt && !rule->direction) 2249 error = EINVAL; 2250 if (!rule->log) 2251 rule->logif = 0; 2252 if (rule->logif >= PFLOGIFS_MAX) 2253 error = EINVAL; 2254 if (pf_addr_setup(ruleset, &rule->src.addr, rule->af)) 2255 error = ENOMEM; 2256 if (pf_addr_setup(ruleset, &rule->dst.addr, rule->af)) 2257 error = ENOMEM; 2258 if (pf_kanchor_setup(rule, ruleset, anchor_call)) 2259 error = EINVAL; 2260 if (rule->scrub_flags & PFSTATE_SETPRIO && 2261 (rule->set_prio[0] > PF_PRIO_MAX || 2262 rule->set_prio[1] > PF_PRIO_MAX)) 2263 error = EINVAL; 2264 TAILQ_FOREACH(pa, &V_pf_pabuf, entries) 2265 if (pa->addr.type == PF_ADDR_TABLE) { 2266 pa->addr.p.tbl = pfr_attach_table(ruleset, 2267 pa->addr.v.tblname); 2268 if (pa->addr.p.tbl == NULL) 2269 error = ENOMEM; 2270 } 2271 2272 rule->overload_tbl = NULL; 2273 if (rule->overload_tblname[0]) { 2274 if ((rule->overload_tbl = pfr_attach_table(ruleset, 2275 rule->overload_tblname)) == NULL) 2276 error = EINVAL; 2277 else 2278 rule->overload_tbl->pfrkt_flags |= 2279 PFR_TFLAG_ACTIVE; 2280 } 2281 2282 pf_mv_kpool(&V_pf_pabuf, &rule->rpool.list); 2283 if (((((rule->action == PF_NAT) || (rule->action == PF_RDR) || 2284 (rule->action == PF_BINAT)) && rule->anchor == NULL) || 2285 (rule->rt > PF_NOPFROUTE)) && 2286 (TAILQ_FIRST(&rule->rpool.list) == NULL)) 2287 error = EINVAL; 2288 2289 if (error) { 2290 pf_free_rule(rule); 2291 rule = NULL; 2292 ERROUT(error); 2293 } 2294 2295 rule->rpool.cur = TAILQ_FIRST(&rule->rpool.list); 2296 TAILQ_INSERT_TAIL(ruleset->rules[rs_num].inactive.ptr, 2297 rule, entries); 2298 ruleset->rules[rs_num].inactive.rcount++; 2299 2300 PF_RULES_WUNLOCK(); 2301 pf_hash_rule(rule); 2302 if (RB_INSERT(pf_krule_global, ruleset->rules[rs_num].inactive.tree, rule) != NULL) { 2303 PF_RULES_WLOCK(); 2304 TAILQ_REMOVE(ruleset->rules[rs_num].inactive.ptr, rule, entries); 2305 ruleset->rules[rs_num].inactive.rcount--; 2306 pf_free_rule(rule); 2307 rule = NULL; 2308 ERROUT(EEXIST); 2309 } 2310 PF_CONFIG_UNLOCK(); 2311 2312 return (0); 2313 2314 #undef ERROUT 2315 errout: 2316 PF_RULES_WUNLOCK(); 2317 PF_CONFIG_UNLOCK(); 2318 errout_unlocked: 2319 pf_kkif_free(kif); 2320 pf_krule_free(rule); 2321 return (error); 2322 } 2323 2324 static bool 2325 pf_label_match(const struct pf_krule *rule, const char *label) 2326 { 2327 int i = 0; 2328 2329 while (*rule->label[i]) { 2330 if (strcmp(rule->label[i], label) == 0) 2331 return (true); 2332 i++; 2333 } 2334 2335 return (false); 2336 } 2337 2338 static unsigned int 2339 pf_kill_matching_state(struct pf_state_key_cmp *key, int dir) 2340 { 2341 struct pf_kstate *s; 2342 int more = 0; 2343 2344 s = pf_find_state_all(key, dir, &more); 2345 if (s == NULL) 2346 return (0); 2347 2348 if (more) { 2349 PF_STATE_UNLOCK(s); 2350 return (0); 2351 } 2352 2353 pf_unlink_state(s); 2354 return (1); 2355 } 2356 2357 static int 2358 pf_killstates_row(struct pf_kstate_kill *psk, struct pf_idhash *ih) 2359 { 2360 struct pf_kstate *s; 2361 struct pf_state_key *sk; 2362 struct pf_addr *srcaddr, *dstaddr; 2363 struct pf_state_key_cmp match_key; 2364 int idx, killed = 0; 2365 unsigned int dir; 2366 u_int16_t srcport, dstport; 2367 struct pfi_kkif *kif; 2368 2369 relock_DIOCKILLSTATES: 2370 PF_HASHROW_LOCK(ih); 2371 LIST_FOREACH(s, &ih->states, entry) { 2372 /* For floating states look at the original kif. */ 2373 kif = s->kif == V_pfi_all ? s->orig_kif : s->kif; 2374 2375 sk = s->key[PF_SK_WIRE]; 2376 if (s->direction == PF_OUT) { 2377 srcaddr = &sk->addr[1]; 2378 dstaddr = &sk->addr[0]; 2379 srcport = sk->port[1]; 2380 dstport = sk->port[0]; 2381 } else { 2382 srcaddr = &sk->addr[0]; 2383 dstaddr = &sk->addr[1]; 2384 srcport = sk->port[0]; 2385 dstport = sk->port[1]; 2386 } 2387 2388 if (psk->psk_af && sk->af != psk->psk_af) 2389 continue; 2390 2391 if (psk->psk_proto && psk->psk_proto != sk->proto) 2392 continue; 2393 2394 if (! PF_MATCHA(psk->psk_src.neg, &psk->psk_src.addr.v.a.addr, 2395 &psk->psk_src.addr.v.a.mask, srcaddr, sk->af)) 2396 continue; 2397 2398 if (! PF_MATCHA(psk->psk_dst.neg, &psk->psk_dst.addr.v.a.addr, 2399 &psk->psk_dst.addr.v.a.mask, dstaddr, sk->af)) 2400 continue; 2401 2402 if (! PF_MATCHA(psk->psk_rt_addr.neg, 2403 &psk->psk_rt_addr.addr.v.a.addr, 2404 &psk->psk_rt_addr.addr.v.a.mask, 2405 &s->rt_addr, sk->af)) 2406 continue; 2407 2408 if (psk->psk_src.port_op != 0 && 2409 ! pf_match_port(psk->psk_src.port_op, 2410 psk->psk_src.port[0], psk->psk_src.port[1], srcport)) 2411 continue; 2412 2413 if (psk->psk_dst.port_op != 0 && 2414 ! pf_match_port(psk->psk_dst.port_op, 2415 psk->psk_dst.port[0], psk->psk_dst.port[1], dstport)) 2416 continue; 2417 2418 if (psk->psk_label[0] && 2419 ! pf_label_match(s->rule.ptr, psk->psk_label)) 2420 continue; 2421 2422 if (psk->psk_ifname[0] && strcmp(psk->psk_ifname, 2423 kif->pfik_name)) 2424 continue; 2425 2426 if (psk->psk_kill_match) { 2427 /* Create the key to find matching states, with lock 2428 * held. */ 2429 2430 bzero(&match_key, sizeof(match_key)); 2431 2432 if (s->direction == PF_OUT) { 2433 dir = PF_IN; 2434 idx = PF_SK_STACK; 2435 } else { 2436 dir = PF_OUT; 2437 idx = PF_SK_WIRE; 2438 } 2439 2440 match_key.af = s->key[idx]->af; 2441 match_key.proto = s->key[idx]->proto; 2442 PF_ACPY(&match_key.addr[0], 2443 &s->key[idx]->addr[1], match_key.af); 2444 match_key.port[0] = s->key[idx]->port[1]; 2445 PF_ACPY(&match_key.addr[1], 2446 &s->key[idx]->addr[0], match_key.af); 2447 match_key.port[1] = s->key[idx]->port[0]; 2448 } 2449 2450 pf_unlink_state(s); 2451 killed++; 2452 2453 if (psk->psk_kill_match) 2454 killed += pf_kill_matching_state(&match_key, dir); 2455 2456 goto relock_DIOCKILLSTATES; 2457 } 2458 PF_HASHROW_UNLOCK(ih); 2459 2460 return (killed); 2461 } 2462 2463 static int 2464 pfioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td) 2465 { 2466 int error = 0; 2467 PF_RULES_RLOCK_TRACKER; 2468 2469 #define ERROUT_IOCTL(target, x) \ 2470 do { \ 2471 error = (x); \ 2472 SDT_PROBE3(pf, ioctl, ioctl, error, cmd, error, __LINE__); \ 2473 goto target; \ 2474 } while (0) 2475 2476 2477 /* XXX keep in sync with switch() below */ 2478 if (securelevel_gt(td->td_ucred, 2)) 2479 switch (cmd) { 2480 case DIOCGETRULES: 2481 case DIOCGETRULE: 2482 case DIOCGETRULENV: 2483 case DIOCGETADDRS: 2484 case DIOCGETADDR: 2485 case DIOCGETSTATE: 2486 case DIOCGETSTATENV: 2487 case DIOCSETSTATUSIF: 2488 case DIOCGETSTATUS: 2489 case DIOCGETSTATUSNV: 2490 case DIOCCLRSTATUS: 2491 case DIOCNATLOOK: 2492 case DIOCSETDEBUG: 2493 case DIOCGETSTATES: 2494 case DIOCGETSTATESV2: 2495 case DIOCGETTIMEOUT: 2496 case DIOCCLRRULECTRS: 2497 case DIOCGETLIMIT: 2498 case DIOCGETALTQSV0: 2499 case DIOCGETALTQSV1: 2500 case DIOCGETALTQV0: 2501 case DIOCGETALTQV1: 2502 case DIOCGETQSTATSV0: 2503 case DIOCGETQSTATSV1: 2504 case DIOCGETRULESETS: 2505 case DIOCGETRULESET: 2506 case DIOCRGETTABLES: 2507 case DIOCRGETTSTATS: 2508 case DIOCRCLRTSTATS: 2509 case DIOCRCLRADDRS: 2510 case DIOCRADDADDRS: 2511 case DIOCRDELADDRS: 2512 case DIOCRSETADDRS: 2513 case DIOCRGETADDRS: 2514 case DIOCRGETASTATS: 2515 case DIOCRCLRASTATS: 2516 case DIOCRTSTADDRS: 2517 case DIOCOSFPGET: 2518 case DIOCGETSRCNODES: 2519 case DIOCCLRSRCNODES: 2520 case DIOCGETSYNCOOKIES: 2521 case DIOCIGETIFACES: 2522 case DIOCGIFSPEEDV0: 2523 case DIOCGIFSPEEDV1: 2524 case DIOCSETIFFLAG: 2525 case DIOCCLRIFFLAG: 2526 case DIOCGETETHRULES: 2527 case DIOCGETETHRULE: 2528 case DIOCGETETHRULESETS: 2529 case DIOCGETETHRULESET: 2530 break; 2531 case DIOCRCLRTABLES: 2532 case DIOCRADDTABLES: 2533 case DIOCRDELTABLES: 2534 case DIOCRSETTFLAGS: 2535 if (((struct pfioc_table *)addr)->pfrio_flags & 2536 PFR_FLAG_DUMMY) 2537 break; /* dummy operation ok */ 2538 return (EPERM); 2539 default: 2540 return (EPERM); 2541 } 2542 2543 if (!(flags & FWRITE)) 2544 switch (cmd) { 2545 case DIOCGETRULES: 2546 case DIOCGETADDRS: 2547 case DIOCGETADDR: 2548 case DIOCGETSTATE: 2549 case DIOCGETSTATENV: 2550 case DIOCGETSTATUS: 2551 case DIOCGETSTATUSNV: 2552 case DIOCGETSTATES: 2553 case DIOCGETSTATESV2: 2554 case DIOCGETTIMEOUT: 2555 case DIOCGETLIMIT: 2556 case DIOCGETALTQSV0: 2557 case DIOCGETALTQSV1: 2558 case DIOCGETALTQV0: 2559 case DIOCGETALTQV1: 2560 case DIOCGETQSTATSV0: 2561 case DIOCGETQSTATSV1: 2562 case DIOCGETRULESETS: 2563 case DIOCGETRULESET: 2564 case DIOCNATLOOK: 2565 case DIOCRGETTABLES: 2566 case DIOCRGETTSTATS: 2567 case DIOCRGETADDRS: 2568 case DIOCRGETASTATS: 2569 case DIOCRTSTADDRS: 2570 case DIOCOSFPGET: 2571 case DIOCGETSRCNODES: 2572 case DIOCGETSYNCOOKIES: 2573 case DIOCIGETIFACES: 2574 case DIOCGIFSPEEDV1: 2575 case DIOCGIFSPEEDV0: 2576 case DIOCGETRULENV: 2577 case DIOCGETETHRULES: 2578 case DIOCGETETHRULE: 2579 case DIOCGETETHRULESETS: 2580 case DIOCGETETHRULESET: 2581 break; 2582 case DIOCRCLRTABLES: 2583 case DIOCRADDTABLES: 2584 case DIOCRDELTABLES: 2585 case DIOCRCLRTSTATS: 2586 case DIOCRCLRADDRS: 2587 case DIOCRADDADDRS: 2588 case DIOCRDELADDRS: 2589 case DIOCRSETADDRS: 2590 case DIOCRSETTFLAGS: 2591 if (((struct pfioc_table *)addr)->pfrio_flags & 2592 PFR_FLAG_DUMMY) { 2593 flags |= FWRITE; /* need write lock for dummy */ 2594 break; /* dummy operation ok */ 2595 } 2596 return (EACCES); 2597 case DIOCGETRULE: 2598 if (((struct pfioc_rule *)addr)->action == 2599 PF_GET_CLR_CNTR) 2600 return (EACCES); 2601 break; 2602 default: 2603 return (EACCES); 2604 } 2605 2606 CURVNET_SET(TD_TO_VNET(td)); 2607 2608 switch (cmd) { 2609 case DIOCSTART: 2610 sx_xlock(&V_pf_ioctl_lock); 2611 if (V_pf_status.running) 2612 error = EEXIST; 2613 else { 2614 hook_pf(); 2615 if (! TAILQ_EMPTY(V_pf_keth->active.rules)) 2616 hook_pf_eth(); 2617 V_pf_status.running = 1; 2618 V_pf_status.since = time_second; 2619 new_unrhdr64(&V_pf_stateid, time_second); 2620 2621 DPFPRINTF(PF_DEBUG_MISC, ("pf: started\n")); 2622 } 2623 break; 2624 2625 case DIOCSTOP: 2626 sx_xlock(&V_pf_ioctl_lock); 2627 if (!V_pf_status.running) 2628 error = ENOENT; 2629 else { 2630 V_pf_status.running = 0; 2631 dehook_pf(); 2632 dehook_pf_eth(); 2633 V_pf_status.since = time_second; 2634 DPFPRINTF(PF_DEBUG_MISC, ("pf: stopped\n")); 2635 } 2636 break; 2637 2638 case DIOCGETETHRULES: { 2639 struct pfioc_nv *nv = (struct pfioc_nv *)addr; 2640 nvlist_t *nvl; 2641 void *packed; 2642 struct pf_keth_rule *tail; 2643 struct pf_keth_ruleset *rs; 2644 u_int32_t ticket, nr; 2645 const char *anchor = ""; 2646 2647 nvl = NULL; 2648 packed = NULL; 2649 2650 #define ERROUT(x) ERROUT_IOCTL(DIOCGETETHRULES_error, x) 2651 2652 if (nv->len > pf_ioctl_maxcount) 2653 ERROUT(ENOMEM); 2654 2655 /* Copy the request in */ 2656 packed = malloc(nv->len, M_NVLIST, M_WAITOK); 2657 if (packed == NULL) 2658 ERROUT(ENOMEM); 2659 2660 error = copyin(nv->data, packed, nv->len); 2661 if (error) 2662 ERROUT(error); 2663 2664 nvl = nvlist_unpack(packed, nv->len, 0); 2665 if (nvl == NULL) 2666 ERROUT(EBADMSG); 2667 2668 if (! nvlist_exists_string(nvl, "anchor")) 2669 ERROUT(EBADMSG); 2670 2671 anchor = nvlist_get_string(nvl, "anchor"); 2672 2673 rs = pf_find_keth_ruleset(anchor); 2674 2675 nvlist_destroy(nvl); 2676 nvl = NULL; 2677 free(packed, M_NVLIST); 2678 packed = NULL; 2679 2680 if (rs == NULL) 2681 ERROUT(ENOENT); 2682 2683 /* Reply */ 2684 nvl = nvlist_create(0); 2685 if (nvl == NULL) 2686 ERROUT(ENOMEM); 2687 2688 PF_RULES_RLOCK(); 2689 2690 ticket = rs->active.ticket; 2691 tail = TAILQ_LAST(rs->active.rules, pf_keth_ruleq); 2692 if (tail) 2693 nr = tail->nr + 1; 2694 else 2695 nr = 0; 2696 2697 PF_RULES_RUNLOCK(); 2698 2699 nvlist_add_number(nvl, "ticket", ticket); 2700 nvlist_add_number(nvl, "nr", nr); 2701 2702 packed = nvlist_pack(nvl, &nv->len); 2703 if (packed == NULL) 2704 ERROUT(ENOMEM); 2705 2706 if (nv->size == 0) 2707 ERROUT(0); 2708 else if (nv->size < nv->len) 2709 ERROUT(ENOSPC); 2710 2711 error = copyout(packed, nv->data, nv->len); 2712 2713 #undef ERROUT 2714 DIOCGETETHRULES_error: 2715 free(packed, M_NVLIST); 2716 nvlist_destroy(nvl); 2717 break; 2718 } 2719 2720 case DIOCGETETHRULE: { 2721 struct epoch_tracker et; 2722 struct pfioc_nv *nv = (struct pfioc_nv *)addr; 2723 nvlist_t *nvl = NULL; 2724 void *nvlpacked = NULL; 2725 struct pf_keth_rule *rule = NULL; 2726 struct pf_keth_ruleset *rs; 2727 u_int32_t ticket, nr; 2728 bool clear = false; 2729 const char *anchor; 2730 2731 #define ERROUT(x) ERROUT_IOCTL(DIOCGETETHRULE_error, x) 2732 2733 if (nv->len > pf_ioctl_maxcount) 2734 ERROUT(ENOMEM); 2735 2736 nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK); 2737 if (nvlpacked == NULL) 2738 ERROUT(ENOMEM); 2739 2740 error = copyin(nv->data, nvlpacked, nv->len); 2741 if (error) 2742 ERROUT(error); 2743 2744 nvl = nvlist_unpack(nvlpacked, nv->len, 0); 2745 if (nvl == NULL) 2746 ERROUT(EBADMSG); 2747 if (! nvlist_exists_number(nvl, "ticket")) 2748 ERROUT(EBADMSG); 2749 ticket = nvlist_get_number(nvl, "ticket"); 2750 if (! nvlist_exists_string(nvl, "anchor")) 2751 ERROUT(EBADMSG); 2752 anchor = nvlist_get_string(nvl, "anchor"); 2753 2754 if (nvlist_exists_bool(nvl, "clear")) 2755 clear = nvlist_get_bool(nvl, "clear"); 2756 2757 if (clear && !(flags & FWRITE)) 2758 ERROUT(EACCES); 2759 2760 if (! nvlist_exists_number(nvl, "nr")) 2761 ERROUT(EBADMSG); 2762 nr = nvlist_get_number(nvl, "nr"); 2763 2764 PF_RULES_RLOCK(); 2765 rs = pf_find_keth_ruleset(anchor); 2766 if (rs == NULL) { 2767 PF_RULES_RUNLOCK(); 2768 ERROUT(ENOENT); 2769 } 2770 if (ticket != rs->active.ticket) { 2771 PF_RULES_RUNLOCK(); 2772 ERROUT(EBUSY); 2773 } 2774 2775 nvlist_destroy(nvl); 2776 nvl = NULL; 2777 free(nvlpacked, M_NVLIST); 2778 nvlpacked = NULL; 2779 2780 rule = TAILQ_FIRST(rs->active.rules); 2781 while ((rule != NULL) && (rule->nr != nr)) 2782 rule = TAILQ_NEXT(rule, entries); 2783 if (rule == NULL) { 2784 PF_RULES_RUNLOCK(); 2785 ERROUT(ENOENT); 2786 } 2787 /* Make sure rule can't go away. */ 2788 NET_EPOCH_ENTER(et); 2789 PF_RULES_RUNLOCK(); 2790 nvl = pf_keth_rule_to_nveth_rule(rule); 2791 if (pf_keth_anchor_nvcopyout(rs, rule, nvl)) 2792 ERROUT(EBUSY); 2793 NET_EPOCH_EXIT(et); 2794 if (nvl == NULL) 2795 ERROUT(ENOMEM); 2796 2797 nvlpacked = nvlist_pack(nvl, &nv->len); 2798 if (nvlpacked == NULL) 2799 ERROUT(ENOMEM); 2800 2801 if (nv->size == 0) 2802 ERROUT(0); 2803 else if (nv->size < nv->len) 2804 ERROUT(ENOSPC); 2805 2806 error = copyout(nvlpacked, nv->data, nv->len); 2807 if (error == 0 && clear) { 2808 counter_u64_zero(rule->evaluations); 2809 for (int i = 0; i < 2; i++) { 2810 counter_u64_zero(rule->packets[i]); 2811 counter_u64_zero(rule->bytes[i]); 2812 } 2813 } 2814 2815 #undef ERROUT 2816 DIOCGETETHRULE_error: 2817 free(nvlpacked, M_NVLIST); 2818 nvlist_destroy(nvl); 2819 break; 2820 } 2821 2822 case DIOCADDETHRULE: { 2823 struct pfioc_nv *nv = (struct pfioc_nv *)addr; 2824 nvlist_t *nvl = NULL; 2825 void *nvlpacked = NULL; 2826 struct pf_keth_rule *rule = NULL, *tail = NULL; 2827 struct pf_keth_ruleset *ruleset = NULL; 2828 struct pfi_kkif *kif = NULL, *bridge_to_kif = NULL; 2829 const char *anchor = "", *anchor_call = ""; 2830 2831 #define ERROUT(x) ERROUT_IOCTL(DIOCADDETHRULE_error, x) 2832 2833 if (nv->len > pf_ioctl_maxcount) 2834 ERROUT(ENOMEM); 2835 2836 nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK); 2837 if (nvlpacked == NULL) 2838 ERROUT(ENOMEM); 2839 2840 error = copyin(nv->data, nvlpacked, nv->len); 2841 if (error) 2842 ERROUT(error); 2843 2844 nvl = nvlist_unpack(nvlpacked, nv->len, 0); 2845 if (nvl == NULL) 2846 ERROUT(EBADMSG); 2847 2848 if (! nvlist_exists_number(nvl, "ticket")) 2849 ERROUT(EBADMSG); 2850 2851 if (nvlist_exists_string(nvl, "anchor")) 2852 anchor = nvlist_get_string(nvl, "anchor"); 2853 if (nvlist_exists_string(nvl, "anchor_call")) 2854 anchor_call = nvlist_get_string(nvl, "anchor_call"); 2855 2856 ruleset = pf_find_keth_ruleset(anchor); 2857 if (ruleset == NULL) 2858 ERROUT(EINVAL); 2859 2860 if (nvlist_get_number(nvl, "ticket") != 2861 ruleset->inactive.ticket) { 2862 DPFPRINTF(PF_DEBUG_MISC, 2863 ("ticket: %d != %d\n", 2864 (u_int32_t)nvlist_get_number(nvl, "ticket"), 2865 ruleset->inactive.ticket)); 2866 ERROUT(EBUSY); 2867 } 2868 2869 rule = malloc(sizeof(*rule), M_PFRULE, M_WAITOK); 2870 if (rule == NULL) 2871 ERROUT(ENOMEM); 2872 rule->timestamp = NULL; 2873 2874 error = pf_nveth_rule_to_keth_rule(nvl, rule); 2875 if (error != 0) 2876 ERROUT(error); 2877 2878 if (rule->ifname[0]) 2879 kif = pf_kkif_create(M_WAITOK); 2880 if (rule->bridge_to_name[0]) 2881 bridge_to_kif = pf_kkif_create(M_WAITOK); 2882 rule->evaluations = counter_u64_alloc(M_WAITOK); 2883 for (int i = 0; i < 2; i++) { 2884 rule->packets[i] = counter_u64_alloc(M_WAITOK); 2885 rule->bytes[i] = counter_u64_alloc(M_WAITOK); 2886 } 2887 rule->timestamp = uma_zalloc_pcpu(pf_timestamp_pcpu_zone, 2888 M_WAITOK | M_ZERO); 2889 2890 PF_RULES_WLOCK(); 2891 2892 if (rule->ifname[0]) { 2893 rule->kif = pfi_kkif_attach(kif, rule->ifname); 2894 pfi_kkif_ref(rule->kif); 2895 } else 2896 rule->kif = NULL; 2897 if (rule->bridge_to_name[0]) { 2898 rule->bridge_to = pfi_kkif_attach(bridge_to_kif, 2899 rule->bridge_to_name); 2900 pfi_kkif_ref(rule->bridge_to); 2901 } else 2902 rule->bridge_to = NULL; 2903 2904 #ifdef ALTQ 2905 /* set queue IDs */ 2906 if (rule->qname[0] != 0) { 2907 if ((rule->qid = pf_qname2qid(rule->qname)) == 0) 2908 error = EBUSY; 2909 else 2910 rule->qid = rule->qid; 2911 } 2912 #endif 2913 if (rule->tagname[0]) 2914 if ((rule->tag = pf_tagname2tag(rule->tagname)) == 0) 2915 error = EBUSY; 2916 if (rule->match_tagname[0]) 2917 if ((rule->match_tag = pf_tagname2tag( 2918 rule->match_tagname)) == 0) 2919 error = EBUSY; 2920 2921 if (error == 0 && rule->ipdst.addr.type == PF_ADDR_TABLE) 2922 error = pf_eth_addr_setup(ruleset, &rule->ipdst.addr); 2923 if (error == 0 && rule->ipsrc.addr.type == PF_ADDR_TABLE) 2924 error = pf_eth_addr_setup(ruleset, &rule->ipsrc.addr); 2925 2926 if (error) { 2927 pf_free_eth_rule(rule); 2928 PF_RULES_WUNLOCK(); 2929 ERROUT(error); 2930 } 2931 2932 if (pf_keth_anchor_setup(rule, ruleset, anchor_call)) { 2933 pf_free_eth_rule(rule); 2934 PF_RULES_WUNLOCK(); 2935 ERROUT(EINVAL); 2936 } 2937 2938 tail = TAILQ_LAST(ruleset->inactive.rules, pf_keth_ruleq); 2939 if (tail) 2940 rule->nr = tail->nr + 1; 2941 else 2942 rule->nr = 0; 2943 2944 TAILQ_INSERT_TAIL(ruleset->inactive.rules, rule, entries); 2945 2946 PF_RULES_WUNLOCK(); 2947 2948 #undef ERROUT 2949 DIOCADDETHRULE_error: 2950 nvlist_destroy(nvl); 2951 free(nvlpacked, M_NVLIST); 2952 break; 2953 } 2954 2955 case DIOCGETETHRULESETS: { 2956 struct epoch_tracker et; 2957 struct pfioc_nv *nv = (struct pfioc_nv *)addr; 2958 nvlist_t *nvl = NULL; 2959 void *nvlpacked = NULL; 2960 struct pf_keth_ruleset *ruleset; 2961 struct pf_keth_anchor *anchor; 2962 int nr = 0; 2963 2964 #define ERROUT(x) ERROUT_IOCTL(DIOCGETETHRULESETS_error, x) 2965 2966 if (nv->len > pf_ioctl_maxcount) 2967 ERROUT(ENOMEM); 2968 2969 nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK); 2970 if (nvlpacked == NULL) 2971 ERROUT(ENOMEM); 2972 2973 error = copyin(nv->data, nvlpacked, nv->len); 2974 if (error) 2975 ERROUT(error); 2976 2977 nvl = nvlist_unpack(nvlpacked, nv->len, 0); 2978 if (nvl == NULL) 2979 ERROUT(EBADMSG); 2980 if (! nvlist_exists_string(nvl, "path")) 2981 ERROUT(EBADMSG); 2982 2983 NET_EPOCH_ENTER(et); 2984 2985 if ((ruleset = pf_find_keth_ruleset( 2986 nvlist_get_string(nvl, "path"))) == NULL) { 2987 NET_EPOCH_EXIT(et); 2988 ERROUT(ENOENT); 2989 } 2990 2991 if (ruleset->anchor == NULL) { 2992 RB_FOREACH(anchor, pf_keth_anchor_global, &V_pf_keth_anchors) 2993 if (anchor->parent == NULL) 2994 nr++; 2995 } else { 2996 RB_FOREACH(anchor, pf_keth_anchor_node, 2997 &ruleset->anchor->children) 2998 nr++; 2999 } 3000 3001 NET_EPOCH_EXIT(et); 3002 3003 nvlist_destroy(nvl); 3004 nvl = NULL; 3005 free(nvlpacked, M_NVLIST); 3006 nvlpacked = NULL; 3007 3008 nvl = nvlist_create(0); 3009 if (nvl == NULL) 3010 ERROUT(ENOMEM); 3011 3012 nvlist_add_number(nvl, "nr", nr); 3013 3014 nvlpacked = nvlist_pack(nvl, &nv->len); 3015 if (nvlpacked == NULL) 3016 ERROUT(ENOMEM); 3017 3018 if (nv->size == 0) 3019 ERROUT(0); 3020 else if (nv->size < nv->len) 3021 ERROUT(ENOSPC); 3022 3023 error = copyout(nvlpacked, nv->data, nv->len); 3024 3025 #undef ERROUT 3026 DIOCGETETHRULESETS_error: 3027 free(nvlpacked, M_NVLIST); 3028 nvlist_destroy(nvl); 3029 break; 3030 } 3031 3032 case DIOCGETETHRULESET: { 3033 struct epoch_tracker et; 3034 struct pfioc_nv *nv = (struct pfioc_nv *)addr; 3035 nvlist_t *nvl = NULL; 3036 void *nvlpacked = NULL; 3037 struct pf_keth_ruleset *ruleset; 3038 struct pf_keth_anchor *anchor; 3039 int nr = 0, req_nr = 0; 3040 bool found = false; 3041 3042 #define ERROUT(x) ERROUT_IOCTL(DIOCGETETHRULESET_error, x) 3043 3044 if (nv->len > pf_ioctl_maxcount) 3045 ERROUT(ENOMEM); 3046 3047 nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK); 3048 if (nvlpacked == NULL) 3049 ERROUT(ENOMEM); 3050 3051 error = copyin(nv->data, nvlpacked, nv->len); 3052 if (error) 3053 ERROUT(error); 3054 3055 nvl = nvlist_unpack(nvlpacked, nv->len, 0); 3056 if (nvl == NULL) 3057 ERROUT(EBADMSG); 3058 if (! nvlist_exists_string(nvl, "path")) 3059 ERROUT(EBADMSG); 3060 if (! nvlist_exists_number(nvl, "nr")) 3061 ERROUT(EBADMSG); 3062 3063 req_nr = nvlist_get_number(nvl, "nr"); 3064 3065 NET_EPOCH_ENTER(et); 3066 3067 if ((ruleset = pf_find_keth_ruleset( 3068 nvlist_get_string(nvl, "path"))) == NULL) { 3069 NET_EPOCH_EXIT(et); 3070 ERROUT(ENOENT); 3071 } 3072 3073 nvlist_destroy(nvl); 3074 nvl = NULL; 3075 free(nvlpacked, M_NVLIST); 3076 nvlpacked = NULL; 3077 3078 nvl = nvlist_create(0); 3079 if (nvl == NULL) { 3080 NET_EPOCH_EXIT(et); 3081 ERROUT(ENOMEM); 3082 } 3083 3084 if (ruleset->anchor == NULL) { 3085 RB_FOREACH(anchor, pf_keth_anchor_global, 3086 &V_pf_keth_anchors) { 3087 if (anchor->parent == NULL && nr++ == req_nr) { 3088 found = true; 3089 break; 3090 } 3091 } 3092 } else { 3093 RB_FOREACH(anchor, pf_keth_anchor_node, 3094 &ruleset->anchor->children) { 3095 if (nr++ == req_nr) { 3096 found = true; 3097 break; 3098 } 3099 } 3100 } 3101 3102 NET_EPOCH_EXIT(et); 3103 if (found) { 3104 nvlist_add_number(nvl, "nr", nr); 3105 nvlist_add_string(nvl, "name", anchor->name); 3106 if (ruleset->anchor) 3107 nvlist_add_string(nvl, "path", 3108 ruleset->anchor->path); 3109 else 3110 nvlist_add_string(nvl, "path", ""); 3111 } else { 3112 ERROUT(EBUSY); 3113 } 3114 3115 nvlpacked = nvlist_pack(nvl, &nv->len); 3116 if (nvlpacked == NULL) 3117 ERROUT(ENOMEM); 3118 3119 if (nv->size == 0) 3120 ERROUT(0); 3121 else if (nv->size < nv->len) 3122 ERROUT(ENOSPC); 3123 3124 error = copyout(nvlpacked, nv->data, nv->len); 3125 3126 #undef ERROUT 3127 DIOCGETETHRULESET_error: 3128 free(nvlpacked, M_NVLIST); 3129 nvlist_destroy(nvl); 3130 break; 3131 } 3132 3133 case DIOCADDRULENV: { 3134 struct pfioc_nv *nv = (struct pfioc_nv *)addr; 3135 nvlist_t *nvl = NULL; 3136 void *nvlpacked = NULL; 3137 struct pf_krule *rule = NULL; 3138 const char *anchor = "", *anchor_call = ""; 3139 uint32_t ticket = 0, pool_ticket = 0; 3140 3141 #define ERROUT(x) ERROUT_IOCTL(DIOCADDRULENV_error, x) 3142 3143 if (nv->len > pf_ioctl_maxcount) 3144 ERROUT(ENOMEM); 3145 3146 nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK); 3147 error = copyin(nv->data, nvlpacked, nv->len); 3148 if (error) 3149 ERROUT(error); 3150 3151 nvl = nvlist_unpack(nvlpacked, nv->len, 0); 3152 if (nvl == NULL) 3153 ERROUT(EBADMSG); 3154 3155 if (! nvlist_exists_number(nvl, "ticket")) 3156 ERROUT(EINVAL); 3157 ticket = nvlist_get_number(nvl, "ticket"); 3158 3159 if (! nvlist_exists_number(nvl, "pool_ticket")) 3160 ERROUT(EINVAL); 3161 pool_ticket = nvlist_get_number(nvl, "pool_ticket"); 3162 3163 if (! nvlist_exists_nvlist(nvl, "rule")) 3164 ERROUT(EINVAL); 3165 3166 rule = pf_krule_alloc(); 3167 error = pf_nvrule_to_krule(nvlist_get_nvlist(nvl, "rule"), 3168 rule); 3169 if (error) 3170 ERROUT(error); 3171 3172 if (nvlist_exists_string(nvl, "anchor")) 3173 anchor = nvlist_get_string(nvl, "anchor"); 3174 if (nvlist_exists_string(nvl, "anchor_call")) 3175 anchor_call = nvlist_get_string(nvl, "anchor_call"); 3176 3177 if ((error = nvlist_error(nvl))) 3178 ERROUT(error); 3179 3180 /* Frees rule on error */ 3181 error = pf_ioctl_addrule(rule, ticket, pool_ticket, anchor, 3182 anchor_call, td); 3183 3184 nvlist_destroy(nvl); 3185 free(nvlpacked, M_NVLIST); 3186 break; 3187 #undef ERROUT 3188 DIOCADDRULENV_error: 3189 pf_krule_free(rule); 3190 nvlist_destroy(nvl); 3191 free(nvlpacked, M_NVLIST); 3192 3193 break; 3194 } 3195 case DIOCADDRULE: { 3196 struct pfioc_rule *pr = (struct pfioc_rule *)addr; 3197 struct pf_krule *rule; 3198 3199 rule = pf_krule_alloc(); 3200 error = pf_rule_to_krule(&pr->rule, rule); 3201 if (error != 0) { 3202 pf_krule_free(rule); 3203 break; 3204 } 3205 3206 pr->anchor[sizeof(pr->anchor) - 1] = 0; 3207 3208 /* Frees rule on error */ 3209 error = pf_ioctl_addrule(rule, pr->ticket, pr->pool_ticket, 3210 pr->anchor, pr->anchor_call, td); 3211 break; 3212 } 3213 3214 case DIOCGETRULES: { 3215 struct pfioc_rule *pr = (struct pfioc_rule *)addr; 3216 struct pf_kruleset *ruleset; 3217 struct pf_krule *tail; 3218 int rs_num; 3219 3220 pr->anchor[sizeof(pr->anchor) - 1] = 0; 3221 3222 PF_RULES_WLOCK(); 3223 ruleset = pf_find_kruleset(pr->anchor); 3224 if (ruleset == NULL) { 3225 PF_RULES_WUNLOCK(); 3226 error = EINVAL; 3227 break; 3228 } 3229 rs_num = pf_get_ruleset_number(pr->rule.action); 3230 if (rs_num >= PF_RULESET_MAX) { 3231 PF_RULES_WUNLOCK(); 3232 error = EINVAL; 3233 break; 3234 } 3235 tail = TAILQ_LAST(ruleset->rules[rs_num].active.ptr, 3236 pf_krulequeue); 3237 if (tail) 3238 pr->nr = tail->nr + 1; 3239 else 3240 pr->nr = 0; 3241 pr->ticket = ruleset->rules[rs_num].active.ticket; 3242 PF_RULES_WUNLOCK(); 3243 break; 3244 } 3245 3246 case DIOCGETRULE: { 3247 struct pfioc_rule *pr = (struct pfioc_rule *)addr; 3248 struct pf_kruleset *ruleset; 3249 struct pf_krule *rule; 3250 int rs_num; 3251 3252 pr->anchor[sizeof(pr->anchor) - 1] = 0; 3253 3254 PF_RULES_WLOCK(); 3255 ruleset = pf_find_kruleset(pr->anchor); 3256 if (ruleset == NULL) { 3257 PF_RULES_WUNLOCK(); 3258 error = EINVAL; 3259 break; 3260 } 3261 rs_num = pf_get_ruleset_number(pr->rule.action); 3262 if (rs_num >= PF_RULESET_MAX) { 3263 PF_RULES_WUNLOCK(); 3264 error = EINVAL; 3265 break; 3266 } 3267 if (pr->ticket != ruleset->rules[rs_num].active.ticket) { 3268 PF_RULES_WUNLOCK(); 3269 error = EBUSY; 3270 break; 3271 } 3272 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr); 3273 while ((rule != NULL) && (rule->nr != pr->nr)) 3274 rule = TAILQ_NEXT(rule, entries); 3275 if (rule == NULL) { 3276 PF_RULES_WUNLOCK(); 3277 error = EBUSY; 3278 break; 3279 } 3280 3281 pf_krule_to_rule(rule, &pr->rule); 3282 3283 if (pf_kanchor_copyout(ruleset, rule, pr)) { 3284 PF_RULES_WUNLOCK(); 3285 error = EBUSY; 3286 break; 3287 } 3288 pf_addr_copyout(&pr->rule.src.addr); 3289 pf_addr_copyout(&pr->rule.dst.addr); 3290 3291 if (pr->action == PF_GET_CLR_CNTR) { 3292 pf_counter_u64_zero(&rule->evaluations); 3293 for (int i = 0; i < 2; i++) { 3294 pf_counter_u64_zero(&rule->packets[i]); 3295 pf_counter_u64_zero(&rule->bytes[i]); 3296 } 3297 counter_u64_zero(rule->states_tot); 3298 } 3299 PF_RULES_WUNLOCK(); 3300 break; 3301 } 3302 3303 case DIOCGETRULENV: { 3304 struct pfioc_nv *nv = (struct pfioc_nv *)addr; 3305 nvlist_t *nvrule = NULL; 3306 nvlist_t *nvl = NULL; 3307 struct pf_kruleset *ruleset; 3308 struct pf_krule *rule; 3309 void *nvlpacked = NULL; 3310 int rs_num, nr; 3311 bool clear_counter = false; 3312 3313 #define ERROUT(x) ERROUT_IOCTL(DIOCGETRULENV_error, x) 3314 3315 if (nv->len > pf_ioctl_maxcount) 3316 ERROUT(ENOMEM); 3317 3318 /* Copy the request in */ 3319 nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK); 3320 if (nvlpacked == NULL) 3321 ERROUT(ENOMEM); 3322 3323 error = copyin(nv->data, nvlpacked, nv->len); 3324 if (error) 3325 ERROUT(error); 3326 3327 nvl = nvlist_unpack(nvlpacked, nv->len, 0); 3328 if (nvl == NULL) 3329 ERROUT(EBADMSG); 3330 3331 if (! nvlist_exists_string(nvl, "anchor")) 3332 ERROUT(EBADMSG); 3333 if (! nvlist_exists_number(nvl, "ruleset")) 3334 ERROUT(EBADMSG); 3335 if (! nvlist_exists_number(nvl, "ticket")) 3336 ERROUT(EBADMSG); 3337 if (! nvlist_exists_number(nvl, "nr")) 3338 ERROUT(EBADMSG); 3339 3340 if (nvlist_exists_bool(nvl, "clear_counter")) 3341 clear_counter = nvlist_get_bool(nvl, "clear_counter"); 3342 3343 if (clear_counter && !(flags & FWRITE)) 3344 ERROUT(EACCES); 3345 3346 nr = nvlist_get_number(nvl, "nr"); 3347 3348 PF_RULES_WLOCK(); 3349 ruleset = pf_find_kruleset(nvlist_get_string(nvl, "anchor")); 3350 if (ruleset == NULL) { 3351 PF_RULES_WUNLOCK(); 3352 ERROUT(ENOENT); 3353 } 3354 3355 rs_num = pf_get_ruleset_number(nvlist_get_number(nvl, "ruleset")); 3356 if (rs_num >= PF_RULESET_MAX) { 3357 PF_RULES_WUNLOCK(); 3358 ERROUT(EINVAL); 3359 } 3360 3361 if (nvlist_get_number(nvl, "ticket") != 3362 ruleset->rules[rs_num].active.ticket) { 3363 PF_RULES_WUNLOCK(); 3364 ERROUT(EBUSY); 3365 } 3366 3367 if ((error = nvlist_error(nvl))) { 3368 PF_RULES_WUNLOCK(); 3369 ERROUT(error); 3370 } 3371 3372 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr); 3373 while ((rule != NULL) && (rule->nr != nr)) 3374 rule = TAILQ_NEXT(rule, entries); 3375 if (rule == NULL) { 3376 PF_RULES_WUNLOCK(); 3377 ERROUT(EBUSY); 3378 } 3379 3380 nvrule = pf_krule_to_nvrule(rule); 3381 3382 nvlist_destroy(nvl); 3383 nvl = nvlist_create(0); 3384 if (nvl == NULL) { 3385 PF_RULES_WUNLOCK(); 3386 ERROUT(ENOMEM); 3387 } 3388 nvlist_add_number(nvl, "nr", nr); 3389 nvlist_add_nvlist(nvl, "rule", nvrule); 3390 nvlist_destroy(nvrule); 3391 nvrule = NULL; 3392 if (pf_kanchor_nvcopyout(ruleset, rule, nvl)) { 3393 PF_RULES_WUNLOCK(); 3394 ERROUT(EBUSY); 3395 } 3396 3397 free(nvlpacked, M_NVLIST); 3398 nvlpacked = nvlist_pack(nvl, &nv->len); 3399 if (nvlpacked == NULL) { 3400 PF_RULES_WUNLOCK(); 3401 ERROUT(ENOMEM); 3402 } 3403 3404 if (nv->size == 0) { 3405 PF_RULES_WUNLOCK(); 3406 ERROUT(0); 3407 } 3408 else if (nv->size < nv->len) { 3409 PF_RULES_WUNLOCK(); 3410 ERROUT(ENOSPC); 3411 } 3412 3413 if (clear_counter) { 3414 pf_counter_u64_zero(&rule->evaluations); 3415 for (int i = 0; i < 2; i++) { 3416 pf_counter_u64_zero(&rule->packets[i]); 3417 pf_counter_u64_zero(&rule->bytes[i]); 3418 } 3419 counter_u64_zero(rule->states_tot); 3420 } 3421 PF_RULES_WUNLOCK(); 3422 3423 error = copyout(nvlpacked, nv->data, nv->len); 3424 3425 #undef ERROUT 3426 DIOCGETRULENV_error: 3427 free(nvlpacked, M_NVLIST); 3428 nvlist_destroy(nvrule); 3429 nvlist_destroy(nvl); 3430 3431 break; 3432 } 3433 3434 case DIOCCHANGERULE: { 3435 struct pfioc_rule *pcr = (struct pfioc_rule *)addr; 3436 struct pf_kruleset *ruleset; 3437 struct pf_krule *oldrule = NULL, *newrule = NULL; 3438 struct pfi_kkif *kif = NULL; 3439 struct pf_kpooladdr *pa; 3440 u_int32_t nr = 0; 3441 int rs_num; 3442 3443 pcr->anchor[sizeof(pcr->anchor) - 1] = 0; 3444 3445 if (pcr->action < PF_CHANGE_ADD_HEAD || 3446 pcr->action > PF_CHANGE_GET_TICKET) { 3447 error = EINVAL; 3448 break; 3449 } 3450 if (pcr->rule.return_icmp >> 8 > ICMP_MAXTYPE) { 3451 error = EINVAL; 3452 break; 3453 } 3454 3455 if (pcr->action != PF_CHANGE_REMOVE) { 3456 newrule = pf_krule_alloc(); 3457 error = pf_rule_to_krule(&pcr->rule, newrule); 3458 if (error != 0) { 3459 pf_krule_free(newrule); 3460 break; 3461 } 3462 3463 if (newrule->ifname[0]) 3464 kif = pf_kkif_create(M_WAITOK); 3465 pf_counter_u64_init(&newrule->evaluations, M_WAITOK); 3466 for (int i = 0; i < 2; i++) { 3467 pf_counter_u64_init(&newrule->packets[i], M_WAITOK); 3468 pf_counter_u64_init(&newrule->bytes[i], M_WAITOK); 3469 } 3470 newrule->states_cur = counter_u64_alloc(M_WAITOK); 3471 newrule->states_tot = counter_u64_alloc(M_WAITOK); 3472 newrule->src_nodes = counter_u64_alloc(M_WAITOK); 3473 newrule->cuid = td->td_ucred->cr_ruid; 3474 newrule->cpid = td->td_proc ? td->td_proc->p_pid : 0; 3475 TAILQ_INIT(&newrule->rpool.list); 3476 } 3477 #define ERROUT(x) ERROUT_IOCTL(DIOCCHANGERULE_error, x) 3478 3479 PF_CONFIG_LOCK(); 3480 PF_RULES_WLOCK(); 3481 #ifdef PF_WANT_32_TO_64_COUNTER 3482 if (newrule != NULL) { 3483 LIST_INSERT_HEAD(&V_pf_allrulelist, newrule, allrulelist); 3484 newrule->allrulelinked = true; 3485 V_pf_allrulecount++; 3486 } 3487 #endif 3488 3489 if (!(pcr->action == PF_CHANGE_REMOVE || 3490 pcr->action == PF_CHANGE_GET_TICKET) && 3491 pcr->pool_ticket != V_ticket_pabuf) 3492 ERROUT(EBUSY); 3493 3494 ruleset = pf_find_kruleset(pcr->anchor); 3495 if (ruleset == NULL) 3496 ERROUT(EINVAL); 3497 3498 rs_num = pf_get_ruleset_number(pcr->rule.action); 3499 if (rs_num >= PF_RULESET_MAX) 3500 ERROUT(EINVAL); 3501 3502 /* 3503 * XXXMJG: there is no guarantee that the ruleset was 3504 * created by the usual route of calling DIOCXBEGIN. 3505 * As a result it is possible the rule tree will not 3506 * be allocated yet. Hack around it by doing it here. 3507 * Note it is fine to let the tree persist in case of 3508 * error as it will be freed down the road on future 3509 * updates (if need be). 3510 */ 3511 if (ruleset->rules[rs_num].active.tree == NULL) { 3512 ruleset->rules[rs_num].active.tree = pf_rule_tree_alloc(M_NOWAIT); 3513 if (ruleset->rules[rs_num].active.tree == NULL) { 3514 ERROUT(ENOMEM); 3515 } 3516 } 3517 3518 if (pcr->action == PF_CHANGE_GET_TICKET) { 3519 pcr->ticket = ++ruleset->rules[rs_num].active.ticket; 3520 ERROUT(0); 3521 } else if (pcr->ticket != 3522 ruleset->rules[rs_num].active.ticket) 3523 ERROUT(EINVAL); 3524 3525 if (pcr->action != PF_CHANGE_REMOVE) { 3526 if (newrule->ifname[0]) { 3527 newrule->kif = pfi_kkif_attach(kif, 3528 newrule->ifname); 3529 kif = NULL; 3530 pfi_kkif_ref(newrule->kif); 3531 } else 3532 newrule->kif = NULL; 3533 3534 if (newrule->rtableid > 0 && 3535 newrule->rtableid >= rt_numfibs) 3536 error = EBUSY; 3537 3538 #ifdef ALTQ 3539 /* set queue IDs */ 3540 if (newrule->qname[0] != 0) { 3541 if ((newrule->qid = 3542 pf_qname2qid(newrule->qname)) == 0) 3543 error = EBUSY; 3544 else if (newrule->pqname[0] != 0) { 3545 if ((newrule->pqid = 3546 pf_qname2qid(newrule->pqname)) == 0) 3547 error = EBUSY; 3548 } else 3549 newrule->pqid = newrule->qid; 3550 } 3551 #endif /* ALTQ */ 3552 if (newrule->tagname[0]) 3553 if ((newrule->tag = 3554 pf_tagname2tag(newrule->tagname)) == 0) 3555 error = EBUSY; 3556 if (newrule->match_tagname[0]) 3557 if ((newrule->match_tag = pf_tagname2tag( 3558 newrule->match_tagname)) == 0) 3559 error = EBUSY; 3560 if (newrule->rt && !newrule->direction) 3561 error = EINVAL; 3562 if (!newrule->log) 3563 newrule->logif = 0; 3564 if (newrule->logif >= PFLOGIFS_MAX) 3565 error = EINVAL; 3566 if (pf_addr_setup(ruleset, &newrule->src.addr, newrule->af)) 3567 error = ENOMEM; 3568 if (pf_addr_setup(ruleset, &newrule->dst.addr, newrule->af)) 3569 error = ENOMEM; 3570 if (pf_kanchor_setup(newrule, ruleset, pcr->anchor_call)) 3571 error = EINVAL; 3572 TAILQ_FOREACH(pa, &V_pf_pabuf, entries) 3573 if (pa->addr.type == PF_ADDR_TABLE) { 3574 pa->addr.p.tbl = 3575 pfr_attach_table(ruleset, 3576 pa->addr.v.tblname); 3577 if (pa->addr.p.tbl == NULL) 3578 error = ENOMEM; 3579 } 3580 3581 newrule->overload_tbl = NULL; 3582 if (newrule->overload_tblname[0]) { 3583 if ((newrule->overload_tbl = pfr_attach_table( 3584 ruleset, newrule->overload_tblname)) == 3585 NULL) 3586 error = EINVAL; 3587 else 3588 newrule->overload_tbl->pfrkt_flags |= 3589 PFR_TFLAG_ACTIVE; 3590 } 3591 3592 pf_mv_kpool(&V_pf_pabuf, &newrule->rpool.list); 3593 if (((((newrule->action == PF_NAT) || 3594 (newrule->action == PF_RDR) || 3595 (newrule->action == PF_BINAT) || 3596 (newrule->rt > PF_NOPFROUTE)) && 3597 !newrule->anchor)) && 3598 (TAILQ_FIRST(&newrule->rpool.list) == NULL)) 3599 error = EINVAL; 3600 3601 if (error) { 3602 pf_free_rule(newrule); 3603 PF_RULES_WUNLOCK(); 3604 PF_CONFIG_UNLOCK(); 3605 break; 3606 } 3607 3608 newrule->rpool.cur = TAILQ_FIRST(&newrule->rpool.list); 3609 } 3610 pf_empty_kpool(&V_pf_pabuf); 3611 3612 if (pcr->action == PF_CHANGE_ADD_HEAD) 3613 oldrule = TAILQ_FIRST( 3614 ruleset->rules[rs_num].active.ptr); 3615 else if (pcr->action == PF_CHANGE_ADD_TAIL) 3616 oldrule = TAILQ_LAST( 3617 ruleset->rules[rs_num].active.ptr, pf_krulequeue); 3618 else { 3619 oldrule = TAILQ_FIRST( 3620 ruleset->rules[rs_num].active.ptr); 3621 while ((oldrule != NULL) && (oldrule->nr != pcr->nr)) 3622 oldrule = TAILQ_NEXT(oldrule, entries); 3623 if (oldrule == NULL) { 3624 if (newrule != NULL) 3625 pf_free_rule(newrule); 3626 PF_RULES_WUNLOCK(); 3627 PF_CONFIG_UNLOCK(); 3628 error = EINVAL; 3629 break; 3630 } 3631 } 3632 3633 if (pcr->action == PF_CHANGE_REMOVE) { 3634 pf_unlink_rule(ruleset->rules[rs_num].active.ptr, 3635 oldrule); 3636 RB_REMOVE(pf_krule_global, 3637 ruleset->rules[rs_num].active.tree, oldrule); 3638 ruleset->rules[rs_num].active.rcount--; 3639 } else { 3640 pf_hash_rule(newrule); 3641 if (RB_INSERT(pf_krule_global, 3642 ruleset->rules[rs_num].active.tree, newrule) != NULL) { 3643 pf_free_rule(newrule); 3644 PF_RULES_WUNLOCK(); 3645 PF_CONFIG_UNLOCK(); 3646 error = EEXIST; 3647 break; 3648 } 3649 3650 if (oldrule == NULL) 3651 TAILQ_INSERT_TAIL( 3652 ruleset->rules[rs_num].active.ptr, 3653 newrule, entries); 3654 else if (pcr->action == PF_CHANGE_ADD_HEAD || 3655 pcr->action == PF_CHANGE_ADD_BEFORE) 3656 TAILQ_INSERT_BEFORE(oldrule, newrule, entries); 3657 else 3658 TAILQ_INSERT_AFTER( 3659 ruleset->rules[rs_num].active.ptr, 3660 oldrule, newrule, entries); 3661 ruleset->rules[rs_num].active.rcount++; 3662 } 3663 3664 nr = 0; 3665 TAILQ_FOREACH(oldrule, 3666 ruleset->rules[rs_num].active.ptr, entries) 3667 oldrule->nr = nr++; 3668 3669 ruleset->rules[rs_num].active.ticket++; 3670 3671 pf_calc_skip_steps(ruleset->rules[rs_num].active.ptr); 3672 pf_remove_if_empty_kruleset(ruleset); 3673 3674 PF_RULES_WUNLOCK(); 3675 PF_CONFIG_UNLOCK(); 3676 break; 3677 3678 #undef ERROUT 3679 DIOCCHANGERULE_error: 3680 PF_RULES_WUNLOCK(); 3681 PF_CONFIG_UNLOCK(); 3682 pf_krule_free(newrule); 3683 pf_kkif_free(kif); 3684 break; 3685 } 3686 3687 case DIOCCLRSTATES: { 3688 struct pfioc_state_kill *psk = (struct pfioc_state_kill *)addr; 3689 struct pf_kstate_kill kill; 3690 3691 error = pf_state_kill_to_kstate_kill(psk, &kill); 3692 if (error) 3693 break; 3694 3695 psk->psk_killed = pf_clear_states(&kill); 3696 break; 3697 } 3698 3699 case DIOCCLRSTATESNV: { 3700 error = pf_clearstates_nv((struct pfioc_nv *)addr); 3701 break; 3702 } 3703 3704 case DIOCKILLSTATES: { 3705 struct pfioc_state_kill *psk = (struct pfioc_state_kill *)addr; 3706 struct pf_kstate_kill kill; 3707 3708 error = pf_state_kill_to_kstate_kill(psk, &kill); 3709 if (error) 3710 break; 3711 3712 psk->psk_killed = 0; 3713 pf_killstates(&kill, &psk->psk_killed); 3714 break; 3715 } 3716 3717 case DIOCKILLSTATESNV: { 3718 error = pf_killstates_nv((struct pfioc_nv *)addr); 3719 break; 3720 } 3721 3722 case DIOCADDSTATE: { 3723 struct pfioc_state *ps = (struct pfioc_state *)addr; 3724 struct pfsync_state_1301 *sp = &ps->state; 3725 3726 if (sp->timeout >= PFTM_MAX) { 3727 error = EINVAL; 3728 break; 3729 } 3730 if (V_pfsync_state_import_ptr != NULL) { 3731 PF_RULES_RLOCK(); 3732 error = V_pfsync_state_import_ptr( 3733 (union pfsync_state_union *)sp, PFSYNC_SI_IOCTL, 3734 PFSYNC_MSG_VERSION_1301); 3735 PF_RULES_RUNLOCK(); 3736 } else 3737 error = EOPNOTSUPP; 3738 break; 3739 } 3740 3741 case DIOCGETSTATE: { 3742 struct pfioc_state *ps = (struct pfioc_state *)addr; 3743 struct pf_kstate *s; 3744 3745 s = pf_find_state_byid(ps->state.id, ps->state.creatorid); 3746 if (s == NULL) { 3747 error = ENOENT; 3748 break; 3749 } 3750 3751 pfsync_state_export((union pfsync_state_union*)&ps->state, 3752 s, PFSYNC_MSG_VERSION_1301); 3753 PF_STATE_UNLOCK(s); 3754 break; 3755 } 3756 3757 case DIOCGETSTATENV: { 3758 error = pf_getstate((struct pfioc_nv *)addr); 3759 break; 3760 } 3761 3762 case DIOCGETSTATES: { 3763 struct pfioc_states *ps = (struct pfioc_states *)addr; 3764 struct pf_kstate *s; 3765 struct pfsync_state_1301 *pstore, *p; 3766 int i, nr; 3767 size_t slice_count = 16, count; 3768 void *out; 3769 3770 if (ps->ps_len <= 0) { 3771 nr = uma_zone_get_cur(V_pf_state_z); 3772 ps->ps_len = sizeof(struct pfsync_state_1301) * nr; 3773 break; 3774 } 3775 3776 out = ps->ps_states; 3777 pstore = mallocarray(slice_count, 3778 sizeof(struct pfsync_state_1301), M_TEMP, M_WAITOK | M_ZERO); 3779 nr = 0; 3780 3781 for (i = 0; i <= pf_hashmask; i++) { 3782 struct pf_idhash *ih = &V_pf_idhash[i]; 3783 3784 DIOCGETSTATES_retry: 3785 p = pstore; 3786 3787 if (LIST_EMPTY(&ih->states)) 3788 continue; 3789 3790 PF_HASHROW_LOCK(ih); 3791 count = 0; 3792 LIST_FOREACH(s, &ih->states, entry) { 3793 if (s->timeout == PFTM_UNLINKED) 3794 continue; 3795 count++; 3796 } 3797 3798 if (count > slice_count) { 3799 PF_HASHROW_UNLOCK(ih); 3800 free(pstore, M_TEMP); 3801 slice_count = count * 2; 3802 pstore = mallocarray(slice_count, 3803 sizeof(struct pfsync_state_1301), M_TEMP, 3804 M_WAITOK | M_ZERO); 3805 goto DIOCGETSTATES_retry; 3806 } 3807 3808 if ((nr+count) * sizeof(*p) > ps->ps_len) { 3809 PF_HASHROW_UNLOCK(ih); 3810 goto DIOCGETSTATES_full; 3811 } 3812 3813 LIST_FOREACH(s, &ih->states, entry) { 3814 if (s->timeout == PFTM_UNLINKED) 3815 continue; 3816 3817 pfsync_state_export((union pfsync_state_union*)p, 3818 s, PFSYNC_MSG_VERSION_1301); 3819 p++; 3820 nr++; 3821 } 3822 PF_HASHROW_UNLOCK(ih); 3823 error = copyout(pstore, out, 3824 sizeof(struct pfsync_state_1301) * count); 3825 if (error) 3826 break; 3827 out = ps->ps_states + nr; 3828 } 3829 DIOCGETSTATES_full: 3830 ps->ps_len = sizeof(struct pfsync_state_1301) * nr; 3831 free(pstore, M_TEMP); 3832 3833 break; 3834 } 3835 3836 case DIOCGETSTATESV2: { 3837 struct pfioc_states_v2 *ps = (struct pfioc_states_v2 *)addr; 3838 struct pf_kstate *s; 3839 struct pf_state_export *pstore, *p; 3840 int i, nr; 3841 size_t slice_count = 16, count; 3842 void *out; 3843 3844 if (ps->ps_req_version > PF_STATE_VERSION) { 3845 error = ENOTSUP; 3846 break; 3847 } 3848 3849 if (ps->ps_len <= 0) { 3850 nr = uma_zone_get_cur(V_pf_state_z); 3851 ps->ps_len = sizeof(struct pf_state_export) * nr; 3852 break; 3853 } 3854 3855 out = ps->ps_states; 3856 pstore = mallocarray(slice_count, 3857 sizeof(struct pf_state_export), M_TEMP, M_WAITOK | M_ZERO); 3858 nr = 0; 3859 3860 for (i = 0; i <= pf_hashmask; i++) { 3861 struct pf_idhash *ih = &V_pf_idhash[i]; 3862 3863 DIOCGETSTATESV2_retry: 3864 p = pstore; 3865 3866 if (LIST_EMPTY(&ih->states)) 3867 continue; 3868 3869 PF_HASHROW_LOCK(ih); 3870 count = 0; 3871 LIST_FOREACH(s, &ih->states, entry) { 3872 if (s->timeout == PFTM_UNLINKED) 3873 continue; 3874 count++; 3875 } 3876 3877 if (count > slice_count) { 3878 PF_HASHROW_UNLOCK(ih); 3879 free(pstore, M_TEMP); 3880 slice_count = count * 2; 3881 pstore = mallocarray(slice_count, 3882 sizeof(struct pf_state_export), M_TEMP, 3883 M_WAITOK | M_ZERO); 3884 goto DIOCGETSTATESV2_retry; 3885 } 3886 3887 if ((nr+count) * sizeof(*p) > ps->ps_len) { 3888 PF_HASHROW_UNLOCK(ih); 3889 goto DIOCGETSTATESV2_full; 3890 } 3891 3892 LIST_FOREACH(s, &ih->states, entry) { 3893 if (s->timeout == PFTM_UNLINKED) 3894 continue; 3895 3896 pf_state_export(p, s); 3897 p++; 3898 nr++; 3899 } 3900 PF_HASHROW_UNLOCK(ih); 3901 error = copyout(pstore, out, 3902 sizeof(struct pf_state_export) * count); 3903 if (error) 3904 break; 3905 out = ps->ps_states + nr; 3906 } 3907 DIOCGETSTATESV2_full: 3908 ps->ps_len = nr * sizeof(struct pf_state_export); 3909 free(pstore, M_TEMP); 3910 3911 break; 3912 } 3913 3914 case DIOCGETSTATUS: { 3915 struct pf_status *s = (struct pf_status *)addr; 3916 3917 PF_RULES_RLOCK(); 3918 s->running = V_pf_status.running; 3919 s->since = V_pf_status.since; 3920 s->debug = V_pf_status.debug; 3921 s->hostid = V_pf_status.hostid; 3922 s->states = V_pf_status.states; 3923 s->src_nodes = V_pf_status.src_nodes; 3924 3925 for (int i = 0; i < PFRES_MAX; i++) 3926 s->counters[i] = 3927 counter_u64_fetch(V_pf_status.counters[i]); 3928 for (int i = 0; i < LCNT_MAX; i++) 3929 s->lcounters[i] = 3930 counter_u64_fetch(V_pf_status.lcounters[i]); 3931 for (int i = 0; i < FCNT_MAX; i++) 3932 s->fcounters[i] = 3933 pf_counter_u64_fetch(&V_pf_status.fcounters[i]); 3934 for (int i = 0; i < SCNT_MAX; i++) 3935 s->scounters[i] = 3936 counter_u64_fetch(V_pf_status.scounters[i]); 3937 3938 bcopy(V_pf_status.ifname, s->ifname, IFNAMSIZ); 3939 bcopy(V_pf_status.pf_chksum, s->pf_chksum, 3940 PF_MD5_DIGEST_LENGTH); 3941 3942 pfi_update_status(s->ifname, s); 3943 PF_RULES_RUNLOCK(); 3944 break; 3945 } 3946 3947 case DIOCGETSTATUSNV: { 3948 error = pf_getstatus((struct pfioc_nv *)addr); 3949 break; 3950 } 3951 3952 case DIOCSETSTATUSIF: { 3953 struct pfioc_if *pi = (struct pfioc_if *)addr; 3954 3955 if (pi->ifname[0] == 0) { 3956 bzero(V_pf_status.ifname, IFNAMSIZ); 3957 break; 3958 } 3959 PF_RULES_WLOCK(); 3960 error = pf_user_strcpy(V_pf_status.ifname, pi->ifname, IFNAMSIZ); 3961 PF_RULES_WUNLOCK(); 3962 break; 3963 } 3964 3965 case DIOCCLRSTATUS: { 3966 PF_RULES_WLOCK(); 3967 for (int i = 0; i < PFRES_MAX; i++) 3968 counter_u64_zero(V_pf_status.counters[i]); 3969 for (int i = 0; i < FCNT_MAX; i++) 3970 pf_counter_u64_zero(&V_pf_status.fcounters[i]); 3971 for (int i = 0; i < SCNT_MAX; i++) 3972 counter_u64_zero(V_pf_status.scounters[i]); 3973 for (int i = 0; i < KLCNT_MAX; i++) 3974 counter_u64_zero(V_pf_status.lcounters[i]); 3975 V_pf_status.since = time_second; 3976 if (*V_pf_status.ifname) 3977 pfi_update_status(V_pf_status.ifname, NULL); 3978 PF_RULES_WUNLOCK(); 3979 break; 3980 } 3981 3982 case DIOCNATLOOK: { 3983 struct pfioc_natlook *pnl = (struct pfioc_natlook *)addr; 3984 struct pf_state_key *sk; 3985 struct pf_kstate *state; 3986 struct pf_state_key_cmp key; 3987 int m = 0, direction = pnl->direction; 3988 int sidx, didx; 3989 3990 /* NATLOOK src and dst are reversed, so reverse sidx/didx */ 3991 sidx = (direction == PF_IN) ? 1 : 0; 3992 didx = (direction == PF_IN) ? 0 : 1; 3993 3994 if (!pnl->proto || 3995 PF_AZERO(&pnl->saddr, pnl->af) || 3996 PF_AZERO(&pnl->daddr, pnl->af) || 3997 ((pnl->proto == IPPROTO_TCP || 3998 pnl->proto == IPPROTO_UDP) && 3999 (!pnl->dport || !pnl->sport))) 4000 error = EINVAL; 4001 else { 4002 bzero(&key, sizeof(key)); 4003 key.af = pnl->af; 4004 key.proto = pnl->proto; 4005 PF_ACPY(&key.addr[sidx], &pnl->saddr, pnl->af); 4006 key.port[sidx] = pnl->sport; 4007 PF_ACPY(&key.addr[didx], &pnl->daddr, pnl->af); 4008 key.port[didx] = pnl->dport; 4009 4010 state = pf_find_state_all(&key, direction, &m); 4011 if (state == NULL) { 4012 error = ENOENT; 4013 } else { 4014 if (m > 1) { 4015 PF_STATE_UNLOCK(state); 4016 error = E2BIG; /* more than one state */ 4017 } else { 4018 sk = state->key[sidx]; 4019 PF_ACPY(&pnl->rsaddr, &sk->addr[sidx], sk->af); 4020 pnl->rsport = sk->port[sidx]; 4021 PF_ACPY(&pnl->rdaddr, &sk->addr[didx], sk->af); 4022 pnl->rdport = sk->port[didx]; 4023 PF_STATE_UNLOCK(state); 4024 } 4025 } 4026 } 4027 break; 4028 } 4029 4030 case DIOCSETTIMEOUT: { 4031 struct pfioc_tm *pt = (struct pfioc_tm *)addr; 4032 int old; 4033 4034 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX || 4035 pt->seconds < 0) { 4036 error = EINVAL; 4037 break; 4038 } 4039 PF_RULES_WLOCK(); 4040 old = V_pf_default_rule.timeout[pt->timeout]; 4041 if (pt->timeout == PFTM_INTERVAL && pt->seconds == 0) 4042 pt->seconds = 1; 4043 V_pf_default_rule.timeout[pt->timeout] = pt->seconds; 4044 if (pt->timeout == PFTM_INTERVAL && pt->seconds < old) 4045 wakeup(pf_purge_thread); 4046 pt->seconds = old; 4047 PF_RULES_WUNLOCK(); 4048 break; 4049 } 4050 4051 case DIOCGETTIMEOUT: { 4052 struct pfioc_tm *pt = (struct pfioc_tm *)addr; 4053 4054 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX) { 4055 error = EINVAL; 4056 break; 4057 } 4058 PF_RULES_RLOCK(); 4059 pt->seconds = V_pf_default_rule.timeout[pt->timeout]; 4060 PF_RULES_RUNLOCK(); 4061 break; 4062 } 4063 4064 case DIOCGETLIMIT: { 4065 struct pfioc_limit *pl = (struct pfioc_limit *)addr; 4066 4067 if (pl->index < 0 || pl->index >= PF_LIMIT_MAX) { 4068 error = EINVAL; 4069 break; 4070 } 4071 PF_RULES_RLOCK(); 4072 pl->limit = V_pf_limits[pl->index].limit; 4073 PF_RULES_RUNLOCK(); 4074 break; 4075 } 4076 4077 case DIOCSETLIMIT: { 4078 struct pfioc_limit *pl = (struct pfioc_limit *)addr; 4079 int old_limit; 4080 4081 PF_RULES_WLOCK(); 4082 if (pl->index < 0 || pl->index >= PF_LIMIT_MAX || 4083 V_pf_limits[pl->index].zone == NULL) { 4084 PF_RULES_WUNLOCK(); 4085 error = EINVAL; 4086 break; 4087 } 4088 uma_zone_set_max(V_pf_limits[pl->index].zone, pl->limit); 4089 old_limit = V_pf_limits[pl->index].limit; 4090 V_pf_limits[pl->index].limit = pl->limit; 4091 pl->limit = old_limit; 4092 PF_RULES_WUNLOCK(); 4093 break; 4094 } 4095 4096 case DIOCSETDEBUG: { 4097 u_int32_t *level = (u_int32_t *)addr; 4098 4099 PF_RULES_WLOCK(); 4100 V_pf_status.debug = *level; 4101 PF_RULES_WUNLOCK(); 4102 break; 4103 } 4104 4105 case DIOCCLRRULECTRS: { 4106 /* obsoleted by DIOCGETRULE with action=PF_GET_CLR_CNTR */ 4107 struct pf_kruleset *ruleset = &pf_main_ruleset; 4108 struct pf_krule *rule; 4109 4110 PF_RULES_WLOCK(); 4111 TAILQ_FOREACH(rule, 4112 ruleset->rules[PF_RULESET_FILTER].active.ptr, entries) { 4113 pf_counter_u64_zero(&rule->evaluations); 4114 for (int i = 0; i < 2; i++) { 4115 pf_counter_u64_zero(&rule->packets[i]); 4116 pf_counter_u64_zero(&rule->bytes[i]); 4117 } 4118 } 4119 PF_RULES_WUNLOCK(); 4120 break; 4121 } 4122 4123 case DIOCGIFSPEEDV0: 4124 case DIOCGIFSPEEDV1: { 4125 struct pf_ifspeed_v1 *psp = (struct pf_ifspeed_v1 *)addr; 4126 struct pf_ifspeed_v1 ps; 4127 struct ifnet *ifp; 4128 4129 if (psp->ifname[0] == '\0') { 4130 error = EINVAL; 4131 break; 4132 } 4133 4134 error = pf_user_strcpy(ps.ifname, psp->ifname, IFNAMSIZ); 4135 if (error != 0) 4136 break; 4137 ifp = ifunit(ps.ifname); 4138 if (ifp != NULL) { 4139 psp->baudrate32 = 4140 (u_int32_t)uqmin(ifp->if_baudrate, UINT_MAX); 4141 if (cmd == DIOCGIFSPEEDV1) 4142 psp->baudrate = ifp->if_baudrate; 4143 } else { 4144 error = EINVAL; 4145 } 4146 break; 4147 } 4148 4149 #ifdef ALTQ 4150 case DIOCSTARTALTQ: { 4151 struct pf_altq *altq; 4152 4153 PF_RULES_WLOCK(); 4154 /* enable all altq interfaces on active list */ 4155 TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) { 4156 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) { 4157 error = pf_enable_altq(altq); 4158 if (error != 0) 4159 break; 4160 } 4161 } 4162 if (error == 0) 4163 V_pf_altq_running = 1; 4164 PF_RULES_WUNLOCK(); 4165 DPFPRINTF(PF_DEBUG_MISC, ("altq: started\n")); 4166 break; 4167 } 4168 4169 case DIOCSTOPALTQ: { 4170 struct pf_altq *altq; 4171 4172 PF_RULES_WLOCK(); 4173 /* disable all altq interfaces on active list */ 4174 TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) { 4175 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) { 4176 error = pf_disable_altq(altq); 4177 if (error != 0) 4178 break; 4179 } 4180 } 4181 if (error == 0) 4182 V_pf_altq_running = 0; 4183 PF_RULES_WUNLOCK(); 4184 DPFPRINTF(PF_DEBUG_MISC, ("altq: stopped\n")); 4185 break; 4186 } 4187 4188 case DIOCADDALTQV0: 4189 case DIOCADDALTQV1: { 4190 struct pfioc_altq_v1 *pa = (struct pfioc_altq_v1 *)addr; 4191 struct pf_altq *altq, *a; 4192 struct ifnet *ifp; 4193 4194 altq = malloc(sizeof(*altq), M_PFALTQ, M_WAITOK | M_ZERO); 4195 error = pf_import_kaltq(pa, altq, IOCPARM_LEN(cmd)); 4196 if (error) 4197 break; 4198 altq->local_flags = 0; 4199 4200 PF_RULES_WLOCK(); 4201 if (pa->ticket != V_ticket_altqs_inactive) { 4202 PF_RULES_WUNLOCK(); 4203 free(altq, M_PFALTQ); 4204 error = EBUSY; 4205 break; 4206 } 4207 4208 /* 4209 * if this is for a queue, find the discipline and 4210 * copy the necessary fields 4211 */ 4212 if (altq->qname[0] != 0) { 4213 if ((altq->qid = pf_qname2qid(altq->qname)) == 0) { 4214 PF_RULES_WUNLOCK(); 4215 error = EBUSY; 4216 free(altq, M_PFALTQ); 4217 break; 4218 } 4219 altq->altq_disc = NULL; 4220 TAILQ_FOREACH(a, V_pf_altq_ifs_inactive, entries) { 4221 if (strncmp(a->ifname, altq->ifname, 4222 IFNAMSIZ) == 0) { 4223 altq->altq_disc = a->altq_disc; 4224 break; 4225 } 4226 } 4227 } 4228 4229 if ((ifp = ifunit(altq->ifname)) == NULL) 4230 altq->local_flags |= PFALTQ_FLAG_IF_REMOVED; 4231 else 4232 error = altq_add(ifp, altq); 4233 4234 if (error) { 4235 PF_RULES_WUNLOCK(); 4236 free(altq, M_PFALTQ); 4237 break; 4238 } 4239 4240 if (altq->qname[0] != 0) 4241 TAILQ_INSERT_TAIL(V_pf_altqs_inactive, altq, entries); 4242 else 4243 TAILQ_INSERT_TAIL(V_pf_altq_ifs_inactive, altq, entries); 4244 /* version error check done on import above */ 4245 pf_export_kaltq(altq, pa, IOCPARM_LEN(cmd)); 4246 PF_RULES_WUNLOCK(); 4247 break; 4248 } 4249 4250 case DIOCGETALTQSV0: 4251 case DIOCGETALTQSV1: { 4252 struct pfioc_altq_v1 *pa = (struct pfioc_altq_v1 *)addr; 4253 struct pf_altq *altq; 4254 4255 PF_RULES_RLOCK(); 4256 pa->nr = 0; 4257 TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) 4258 pa->nr++; 4259 TAILQ_FOREACH(altq, V_pf_altqs_active, entries) 4260 pa->nr++; 4261 pa->ticket = V_ticket_altqs_active; 4262 PF_RULES_RUNLOCK(); 4263 break; 4264 } 4265 4266 case DIOCGETALTQV0: 4267 case DIOCGETALTQV1: { 4268 struct pfioc_altq_v1 *pa = (struct pfioc_altq_v1 *)addr; 4269 struct pf_altq *altq; 4270 4271 PF_RULES_RLOCK(); 4272 if (pa->ticket != V_ticket_altqs_active) { 4273 PF_RULES_RUNLOCK(); 4274 error = EBUSY; 4275 break; 4276 } 4277 altq = pf_altq_get_nth_active(pa->nr); 4278 if (altq == NULL) { 4279 PF_RULES_RUNLOCK(); 4280 error = EBUSY; 4281 break; 4282 } 4283 pf_export_kaltq(altq, pa, IOCPARM_LEN(cmd)); 4284 PF_RULES_RUNLOCK(); 4285 break; 4286 } 4287 4288 case DIOCCHANGEALTQV0: 4289 case DIOCCHANGEALTQV1: 4290 /* CHANGEALTQ not supported yet! */ 4291 error = ENODEV; 4292 break; 4293 4294 case DIOCGETQSTATSV0: 4295 case DIOCGETQSTATSV1: { 4296 struct pfioc_qstats_v1 *pq = (struct pfioc_qstats_v1 *)addr; 4297 struct pf_altq *altq; 4298 int nbytes; 4299 u_int32_t version; 4300 4301 PF_RULES_RLOCK(); 4302 if (pq->ticket != V_ticket_altqs_active) { 4303 PF_RULES_RUNLOCK(); 4304 error = EBUSY; 4305 break; 4306 } 4307 nbytes = pq->nbytes; 4308 altq = pf_altq_get_nth_active(pq->nr); 4309 if (altq == NULL) { 4310 PF_RULES_RUNLOCK(); 4311 error = EBUSY; 4312 break; 4313 } 4314 4315 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) != 0) { 4316 PF_RULES_RUNLOCK(); 4317 error = ENXIO; 4318 break; 4319 } 4320 PF_RULES_RUNLOCK(); 4321 if (cmd == DIOCGETQSTATSV0) 4322 version = 0; /* DIOCGETQSTATSV0 means stats struct v0 */ 4323 else 4324 version = pq->version; 4325 error = altq_getqstats(altq, pq->buf, &nbytes, version); 4326 if (error == 0) { 4327 pq->scheduler = altq->scheduler; 4328 pq->nbytes = nbytes; 4329 } 4330 break; 4331 } 4332 #endif /* ALTQ */ 4333 4334 case DIOCBEGINADDRS: { 4335 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr; 4336 4337 PF_RULES_WLOCK(); 4338 pf_empty_kpool(&V_pf_pabuf); 4339 pp->ticket = ++V_ticket_pabuf; 4340 PF_RULES_WUNLOCK(); 4341 break; 4342 } 4343 4344 case DIOCADDADDR: { 4345 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr; 4346 struct pf_kpooladdr *pa; 4347 struct pfi_kkif *kif = NULL; 4348 4349 #ifndef INET 4350 if (pp->af == AF_INET) { 4351 error = EAFNOSUPPORT; 4352 break; 4353 } 4354 #endif /* INET */ 4355 #ifndef INET6 4356 if (pp->af == AF_INET6) { 4357 error = EAFNOSUPPORT; 4358 break; 4359 } 4360 #endif /* INET6 */ 4361 if (pp->addr.addr.type != PF_ADDR_ADDRMASK && 4362 pp->addr.addr.type != PF_ADDR_DYNIFTL && 4363 pp->addr.addr.type != PF_ADDR_TABLE) { 4364 error = EINVAL; 4365 break; 4366 } 4367 if (pp->addr.addr.p.dyn != NULL) { 4368 error = EINVAL; 4369 break; 4370 } 4371 pa = malloc(sizeof(*pa), M_PFRULE, M_WAITOK); 4372 error = pf_pooladdr_to_kpooladdr(&pp->addr, pa); 4373 if (error != 0) 4374 break; 4375 if (pa->ifname[0]) 4376 kif = pf_kkif_create(M_WAITOK); 4377 PF_RULES_WLOCK(); 4378 if (pp->ticket != V_ticket_pabuf) { 4379 PF_RULES_WUNLOCK(); 4380 if (pa->ifname[0]) 4381 pf_kkif_free(kif); 4382 free(pa, M_PFRULE); 4383 error = EBUSY; 4384 break; 4385 } 4386 if (pa->ifname[0]) { 4387 pa->kif = pfi_kkif_attach(kif, pa->ifname); 4388 kif = NULL; 4389 pfi_kkif_ref(pa->kif); 4390 } else 4391 pa->kif = NULL; 4392 if (pa->addr.type == PF_ADDR_DYNIFTL && ((error = 4393 pfi_dynaddr_setup(&pa->addr, pp->af)) != 0)) { 4394 if (pa->ifname[0]) 4395 pfi_kkif_unref(pa->kif); 4396 PF_RULES_WUNLOCK(); 4397 free(pa, M_PFRULE); 4398 break; 4399 } 4400 TAILQ_INSERT_TAIL(&V_pf_pabuf, pa, entries); 4401 PF_RULES_WUNLOCK(); 4402 break; 4403 } 4404 4405 case DIOCGETADDRS: { 4406 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr; 4407 struct pf_kpool *pool; 4408 struct pf_kpooladdr *pa; 4409 4410 pp->anchor[sizeof(pp->anchor) - 1] = 0; 4411 pp->nr = 0; 4412 4413 PF_RULES_RLOCK(); 4414 pool = pf_get_kpool(pp->anchor, pp->ticket, pp->r_action, 4415 pp->r_num, 0, 1, 0); 4416 if (pool == NULL) { 4417 PF_RULES_RUNLOCK(); 4418 error = EBUSY; 4419 break; 4420 } 4421 TAILQ_FOREACH(pa, &pool->list, entries) 4422 pp->nr++; 4423 PF_RULES_RUNLOCK(); 4424 break; 4425 } 4426 4427 case DIOCGETADDR: { 4428 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr; 4429 struct pf_kpool *pool; 4430 struct pf_kpooladdr *pa; 4431 u_int32_t nr = 0; 4432 4433 pp->anchor[sizeof(pp->anchor) - 1] = 0; 4434 4435 PF_RULES_RLOCK(); 4436 pool = pf_get_kpool(pp->anchor, pp->ticket, pp->r_action, 4437 pp->r_num, 0, 1, 1); 4438 if (pool == NULL) { 4439 PF_RULES_RUNLOCK(); 4440 error = EBUSY; 4441 break; 4442 } 4443 pa = TAILQ_FIRST(&pool->list); 4444 while ((pa != NULL) && (nr < pp->nr)) { 4445 pa = TAILQ_NEXT(pa, entries); 4446 nr++; 4447 } 4448 if (pa == NULL) { 4449 PF_RULES_RUNLOCK(); 4450 error = EBUSY; 4451 break; 4452 } 4453 pf_kpooladdr_to_pooladdr(pa, &pp->addr); 4454 pf_addr_copyout(&pp->addr.addr); 4455 PF_RULES_RUNLOCK(); 4456 break; 4457 } 4458 4459 case DIOCCHANGEADDR: { 4460 struct pfioc_pooladdr *pca = (struct pfioc_pooladdr *)addr; 4461 struct pf_kpool *pool; 4462 struct pf_kpooladdr *oldpa = NULL, *newpa = NULL; 4463 struct pf_kruleset *ruleset; 4464 struct pfi_kkif *kif = NULL; 4465 4466 pca->anchor[sizeof(pca->anchor) - 1] = 0; 4467 4468 if (pca->action < PF_CHANGE_ADD_HEAD || 4469 pca->action > PF_CHANGE_REMOVE) { 4470 error = EINVAL; 4471 break; 4472 } 4473 if (pca->addr.addr.type != PF_ADDR_ADDRMASK && 4474 pca->addr.addr.type != PF_ADDR_DYNIFTL && 4475 pca->addr.addr.type != PF_ADDR_TABLE) { 4476 error = EINVAL; 4477 break; 4478 } 4479 if (pca->addr.addr.p.dyn != NULL) { 4480 error = EINVAL; 4481 break; 4482 } 4483 4484 if (pca->action != PF_CHANGE_REMOVE) { 4485 #ifndef INET 4486 if (pca->af == AF_INET) { 4487 error = EAFNOSUPPORT; 4488 break; 4489 } 4490 #endif /* INET */ 4491 #ifndef INET6 4492 if (pca->af == AF_INET6) { 4493 error = EAFNOSUPPORT; 4494 break; 4495 } 4496 #endif /* INET6 */ 4497 newpa = malloc(sizeof(*newpa), M_PFRULE, M_WAITOK); 4498 bcopy(&pca->addr, newpa, sizeof(struct pf_pooladdr)); 4499 if (newpa->ifname[0]) 4500 kif = pf_kkif_create(M_WAITOK); 4501 newpa->kif = NULL; 4502 } 4503 #define ERROUT(x) ERROUT_IOCTL(DIOCCHANGEADDR_error, x) 4504 PF_RULES_WLOCK(); 4505 ruleset = pf_find_kruleset(pca->anchor); 4506 if (ruleset == NULL) 4507 ERROUT(EBUSY); 4508 4509 pool = pf_get_kpool(pca->anchor, pca->ticket, pca->r_action, 4510 pca->r_num, pca->r_last, 1, 1); 4511 if (pool == NULL) 4512 ERROUT(EBUSY); 4513 4514 if (pca->action != PF_CHANGE_REMOVE) { 4515 if (newpa->ifname[0]) { 4516 newpa->kif = pfi_kkif_attach(kif, newpa->ifname); 4517 pfi_kkif_ref(newpa->kif); 4518 kif = NULL; 4519 } 4520 4521 switch (newpa->addr.type) { 4522 case PF_ADDR_DYNIFTL: 4523 error = pfi_dynaddr_setup(&newpa->addr, 4524 pca->af); 4525 break; 4526 case PF_ADDR_TABLE: 4527 newpa->addr.p.tbl = pfr_attach_table(ruleset, 4528 newpa->addr.v.tblname); 4529 if (newpa->addr.p.tbl == NULL) 4530 error = ENOMEM; 4531 break; 4532 } 4533 if (error) 4534 goto DIOCCHANGEADDR_error; 4535 } 4536 4537 switch (pca->action) { 4538 case PF_CHANGE_ADD_HEAD: 4539 oldpa = TAILQ_FIRST(&pool->list); 4540 break; 4541 case PF_CHANGE_ADD_TAIL: 4542 oldpa = TAILQ_LAST(&pool->list, pf_kpalist); 4543 break; 4544 default: 4545 oldpa = TAILQ_FIRST(&pool->list); 4546 for (int i = 0; oldpa && i < pca->nr; i++) 4547 oldpa = TAILQ_NEXT(oldpa, entries); 4548 4549 if (oldpa == NULL) 4550 ERROUT(EINVAL); 4551 } 4552 4553 if (pca->action == PF_CHANGE_REMOVE) { 4554 TAILQ_REMOVE(&pool->list, oldpa, entries); 4555 switch (oldpa->addr.type) { 4556 case PF_ADDR_DYNIFTL: 4557 pfi_dynaddr_remove(oldpa->addr.p.dyn); 4558 break; 4559 case PF_ADDR_TABLE: 4560 pfr_detach_table(oldpa->addr.p.tbl); 4561 break; 4562 } 4563 if (oldpa->kif) 4564 pfi_kkif_unref(oldpa->kif); 4565 free(oldpa, M_PFRULE); 4566 } else { 4567 if (oldpa == NULL) 4568 TAILQ_INSERT_TAIL(&pool->list, newpa, entries); 4569 else if (pca->action == PF_CHANGE_ADD_HEAD || 4570 pca->action == PF_CHANGE_ADD_BEFORE) 4571 TAILQ_INSERT_BEFORE(oldpa, newpa, entries); 4572 else 4573 TAILQ_INSERT_AFTER(&pool->list, oldpa, 4574 newpa, entries); 4575 } 4576 4577 pool->cur = TAILQ_FIRST(&pool->list); 4578 PF_ACPY(&pool->counter, &pool->cur->addr.v.a.addr, pca->af); 4579 PF_RULES_WUNLOCK(); 4580 break; 4581 4582 #undef ERROUT 4583 DIOCCHANGEADDR_error: 4584 if (newpa != NULL) { 4585 if (newpa->kif) 4586 pfi_kkif_unref(newpa->kif); 4587 free(newpa, M_PFRULE); 4588 } 4589 PF_RULES_WUNLOCK(); 4590 pf_kkif_free(kif); 4591 break; 4592 } 4593 4594 case DIOCGETRULESETS: { 4595 struct pfioc_ruleset *pr = (struct pfioc_ruleset *)addr; 4596 struct pf_kruleset *ruleset; 4597 struct pf_kanchor *anchor; 4598 4599 pr->path[sizeof(pr->path) - 1] = 0; 4600 4601 PF_RULES_RLOCK(); 4602 if ((ruleset = pf_find_kruleset(pr->path)) == NULL) { 4603 PF_RULES_RUNLOCK(); 4604 error = ENOENT; 4605 break; 4606 } 4607 pr->nr = 0; 4608 if (ruleset->anchor == NULL) { 4609 /* XXX kludge for pf_main_ruleset */ 4610 RB_FOREACH(anchor, pf_kanchor_global, &V_pf_anchors) 4611 if (anchor->parent == NULL) 4612 pr->nr++; 4613 } else { 4614 RB_FOREACH(anchor, pf_kanchor_node, 4615 &ruleset->anchor->children) 4616 pr->nr++; 4617 } 4618 PF_RULES_RUNLOCK(); 4619 break; 4620 } 4621 4622 case DIOCGETRULESET: { 4623 struct pfioc_ruleset *pr = (struct pfioc_ruleset *)addr; 4624 struct pf_kruleset *ruleset; 4625 struct pf_kanchor *anchor; 4626 u_int32_t nr = 0; 4627 4628 pr->path[sizeof(pr->path) - 1] = 0; 4629 4630 PF_RULES_RLOCK(); 4631 if ((ruleset = pf_find_kruleset(pr->path)) == NULL) { 4632 PF_RULES_RUNLOCK(); 4633 error = ENOENT; 4634 break; 4635 } 4636 pr->name[0] = 0; 4637 if (ruleset->anchor == NULL) { 4638 /* XXX kludge for pf_main_ruleset */ 4639 RB_FOREACH(anchor, pf_kanchor_global, &V_pf_anchors) 4640 if (anchor->parent == NULL && nr++ == pr->nr) { 4641 strlcpy(pr->name, anchor->name, 4642 sizeof(pr->name)); 4643 break; 4644 } 4645 } else { 4646 RB_FOREACH(anchor, pf_kanchor_node, 4647 &ruleset->anchor->children) 4648 if (nr++ == pr->nr) { 4649 strlcpy(pr->name, anchor->name, 4650 sizeof(pr->name)); 4651 break; 4652 } 4653 } 4654 if (!pr->name[0]) 4655 error = EBUSY; 4656 PF_RULES_RUNLOCK(); 4657 break; 4658 } 4659 4660 case DIOCRCLRTABLES: { 4661 struct pfioc_table *io = (struct pfioc_table *)addr; 4662 4663 if (io->pfrio_esize != 0) { 4664 error = ENODEV; 4665 break; 4666 } 4667 PF_RULES_WLOCK(); 4668 error = pfr_clr_tables(&io->pfrio_table, &io->pfrio_ndel, 4669 io->pfrio_flags | PFR_FLAG_USERIOCTL); 4670 PF_RULES_WUNLOCK(); 4671 break; 4672 } 4673 4674 case DIOCRADDTABLES: { 4675 struct pfioc_table *io = (struct pfioc_table *)addr; 4676 struct pfr_table *pfrts; 4677 size_t totlen; 4678 4679 if (io->pfrio_esize != sizeof(struct pfr_table)) { 4680 error = ENODEV; 4681 break; 4682 } 4683 4684 if (io->pfrio_size < 0 || io->pfrio_size > pf_ioctl_maxcount || 4685 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_table))) { 4686 error = ENOMEM; 4687 break; 4688 } 4689 4690 totlen = io->pfrio_size * sizeof(struct pfr_table); 4691 pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table), 4692 M_TEMP, M_WAITOK); 4693 error = copyin(io->pfrio_buffer, pfrts, totlen); 4694 if (error) { 4695 free(pfrts, M_TEMP); 4696 break; 4697 } 4698 PF_RULES_WLOCK(); 4699 error = pfr_add_tables(pfrts, io->pfrio_size, 4700 &io->pfrio_nadd, io->pfrio_flags | PFR_FLAG_USERIOCTL); 4701 PF_RULES_WUNLOCK(); 4702 free(pfrts, M_TEMP); 4703 break; 4704 } 4705 4706 case DIOCRDELTABLES: { 4707 struct pfioc_table *io = (struct pfioc_table *)addr; 4708 struct pfr_table *pfrts; 4709 size_t totlen; 4710 4711 if (io->pfrio_esize != sizeof(struct pfr_table)) { 4712 error = ENODEV; 4713 break; 4714 } 4715 4716 if (io->pfrio_size < 0 || io->pfrio_size > pf_ioctl_maxcount || 4717 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_table))) { 4718 error = ENOMEM; 4719 break; 4720 } 4721 4722 totlen = io->pfrio_size * sizeof(struct pfr_table); 4723 pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table), 4724 M_TEMP, M_WAITOK); 4725 error = copyin(io->pfrio_buffer, pfrts, totlen); 4726 if (error) { 4727 free(pfrts, M_TEMP); 4728 break; 4729 } 4730 PF_RULES_WLOCK(); 4731 error = pfr_del_tables(pfrts, io->pfrio_size, 4732 &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL); 4733 PF_RULES_WUNLOCK(); 4734 free(pfrts, M_TEMP); 4735 break; 4736 } 4737 4738 case DIOCRGETTABLES: { 4739 struct pfioc_table *io = (struct pfioc_table *)addr; 4740 struct pfr_table *pfrts; 4741 size_t totlen; 4742 int n; 4743 4744 if (io->pfrio_esize != sizeof(struct pfr_table)) { 4745 error = ENODEV; 4746 break; 4747 } 4748 PF_RULES_RLOCK(); 4749 n = pfr_table_count(&io->pfrio_table, io->pfrio_flags); 4750 if (n < 0) { 4751 PF_RULES_RUNLOCK(); 4752 error = EINVAL; 4753 break; 4754 } 4755 io->pfrio_size = min(io->pfrio_size, n); 4756 4757 totlen = io->pfrio_size * sizeof(struct pfr_table); 4758 4759 pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table), 4760 M_TEMP, M_NOWAIT | M_ZERO); 4761 if (pfrts == NULL) { 4762 error = ENOMEM; 4763 PF_RULES_RUNLOCK(); 4764 break; 4765 } 4766 error = pfr_get_tables(&io->pfrio_table, pfrts, 4767 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); 4768 PF_RULES_RUNLOCK(); 4769 if (error == 0) 4770 error = copyout(pfrts, io->pfrio_buffer, totlen); 4771 free(pfrts, M_TEMP); 4772 break; 4773 } 4774 4775 case DIOCRGETTSTATS: { 4776 struct pfioc_table *io = (struct pfioc_table *)addr; 4777 struct pfr_tstats *pfrtstats; 4778 size_t totlen; 4779 int n; 4780 4781 if (io->pfrio_esize != sizeof(struct pfr_tstats)) { 4782 error = ENODEV; 4783 break; 4784 } 4785 PF_TABLE_STATS_LOCK(); 4786 PF_RULES_RLOCK(); 4787 n = pfr_table_count(&io->pfrio_table, io->pfrio_flags); 4788 if (n < 0) { 4789 PF_RULES_RUNLOCK(); 4790 PF_TABLE_STATS_UNLOCK(); 4791 error = EINVAL; 4792 break; 4793 } 4794 io->pfrio_size = min(io->pfrio_size, n); 4795 4796 totlen = io->pfrio_size * sizeof(struct pfr_tstats); 4797 pfrtstats = mallocarray(io->pfrio_size, 4798 sizeof(struct pfr_tstats), M_TEMP, M_NOWAIT | M_ZERO); 4799 if (pfrtstats == NULL) { 4800 error = ENOMEM; 4801 PF_RULES_RUNLOCK(); 4802 PF_TABLE_STATS_UNLOCK(); 4803 break; 4804 } 4805 error = pfr_get_tstats(&io->pfrio_table, pfrtstats, 4806 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); 4807 PF_RULES_RUNLOCK(); 4808 PF_TABLE_STATS_UNLOCK(); 4809 if (error == 0) 4810 error = copyout(pfrtstats, io->pfrio_buffer, totlen); 4811 free(pfrtstats, M_TEMP); 4812 break; 4813 } 4814 4815 case DIOCRCLRTSTATS: { 4816 struct pfioc_table *io = (struct pfioc_table *)addr; 4817 struct pfr_table *pfrts; 4818 size_t totlen; 4819 4820 if (io->pfrio_esize != sizeof(struct pfr_table)) { 4821 error = ENODEV; 4822 break; 4823 } 4824 4825 if (io->pfrio_size < 0 || io->pfrio_size > pf_ioctl_maxcount || 4826 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_table))) { 4827 /* We used to count tables and use the minimum required 4828 * size, so we didn't fail on overly large requests. 4829 * Keep doing so. */ 4830 io->pfrio_size = pf_ioctl_maxcount; 4831 break; 4832 } 4833 4834 totlen = io->pfrio_size * sizeof(struct pfr_table); 4835 pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table), 4836 M_TEMP, M_WAITOK); 4837 error = copyin(io->pfrio_buffer, pfrts, totlen); 4838 if (error) { 4839 free(pfrts, M_TEMP); 4840 break; 4841 } 4842 4843 PF_TABLE_STATS_LOCK(); 4844 PF_RULES_RLOCK(); 4845 error = pfr_clr_tstats(pfrts, io->pfrio_size, 4846 &io->pfrio_nzero, io->pfrio_flags | PFR_FLAG_USERIOCTL); 4847 PF_RULES_RUNLOCK(); 4848 PF_TABLE_STATS_UNLOCK(); 4849 free(pfrts, M_TEMP); 4850 break; 4851 } 4852 4853 case DIOCRSETTFLAGS: { 4854 struct pfioc_table *io = (struct pfioc_table *)addr; 4855 struct pfr_table *pfrts; 4856 size_t totlen; 4857 int n; 4858 4859 if (io->pfrio_esize != sizeof(struct pfr_table)) { 4860 error = ENODEV; 4861 break; 4862 } 4863 4864 PF_RULES_RLOCK(); 4865 n = pfr_table_count(&io->pfrio_table, io->pfrio_flags); 4866 if (n < 0) { 4867 PF_RULES_RUNLOCK(); 4868 error = EINVAL; 4869 break; 4870 } 4871 4872 io->pfrio_size = min(io->pfrio_size, n); 4873 PF_RULES_RUNLOCK(); 4874 4875 totlen = io->pfrio_size * sizeof(struct pfr_table); 4876 pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table), 4877 M_TEMP, M_WAITOK); 4878 error = copyin(io->pfrio_buffer, pfrts, totlen); 4879 if (error) { 4880 free(pfrts, M_TEMP); 4881 break; 4882 } 4883 PF_RULES_WLOCK(); 4884 error = pfr_set_tflags(pfrts, io->pfrio_size, 4885 io->pfrio_setflag, io->pfrio_clrflag, &io->pfrio_nchange, 4886 &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL); 4887 PF_RULES_WUNLOCK(); 4888 free(pfrts, M_TEMP); 4889 break; 4890 } 4891 4892 case DIOCRCLRADDRS: { 4893 struct pfioc_table *io = (struct pfioc_table *)addr; 4894 4895 if (io->pfrio_esize != 0) { 4896 error = ENODEV; 4897 break; 4898 } 4899 PF_RULES_WLOCK(); 4900 error = pfr_clr_addrs(&io->pfrio_table, &io->pfrio_ndel, 4901 io->pfrio_flags | PFR_FLAG_USERIOCTL); 4902 PF_RULES_WUNLOCK(); 4903 break; 4904 } 4905 4906 case DIOCRADDADDRS: { 4907 struct pfioc_table *io = (struct pfioc_table *)addr; 4908 struct pfr_addr *pfras; 4909 size_t totlen; 4910 4911 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 4912 error = ENODEV; 4913 break; 4914 } 4915 if (io->pfrio_size < 0 || 4916 io->pfrio_size > pf_ioctl_maxcount || 4917 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) { 4918 error = EINVAL; 4919 break; 4920 } 4921 totlen = io->pfrio_size * sizeof(struct pfr_addr); 4922 pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr), 4923 M_TEMP, M_WAITOK); 4924 error = copyin(io->pfrio_buffer, pfras, totlen); 4925 if (error) { 4926 free(pfras, M_TEMP); 4927 break; 4928 } 4929 PF_RULES_WLOCK(); 4930 error = pfr_add_addrs(&io->pfrio_table, pfras, 4931 io->pfrio_size, &io->pfrio_nadd, io->pfrio_flags | 4932 PFR_FLAG_USERIOCTL); 4933 PF_RULES_WUNLOCK(); 4934 if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK) 4935 error = copyout(pfras, io->pfrio_buffer, totlen); 4936 free(pfras, M_TEMP); 4937 break; 4938 } 4939 4940 case DIOCRDELADDRS: { 4941 struct pfioc_table *io = (struct pfioc_table *)addr; 4942 struct pfr_addr *pfras; 4943 size_t totlen; 4944 4945 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 4946 error = ENODEV; 4947 break; 4948 } 4949 if (io->pfrio_size < 0 || 4950 io->pfrio_size > pf_ioctl_maxcount || 4951 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) { 4952 error = EINVAL; 4953 break; 4954 } 4955 totlen = io->pfrio_size * sizeof(struct pfr_addr); 4956 pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr), 4957 M_TEMP, M_WAITOK); 4958 error = copyin(io->pfrio_buffer, pfras, totlen); 4959 if (error) { 4960 free(pfras, M_TEMP); 4961 break; 4962 } 4963 PF_RULES_WLOCK(); 4964 error = pfr_del_addrs(&io->pfrio_table, pfras, 4965 io->pfrio_size, &io->pfrio_ndel, io->pfrio_flags | 4966 PFR_FLAG_USERIOCTL); 4967 PF_RULES_WUNLOCK(); 4968 if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK) 4969 error = copyout(pfras, io->pfrio_buffer, totlen); 4970 free(pfras, M_TEMP); 4971 break; 4972 } 4973 4974 case DIOCRSETADDRS: { 4975 struct pfioc_table *io = (struct pfioc_table *)addr; 4976 struct pfr_addr *pfras; 4977 size_t totlen, count; 4978 4979 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 4980 error = ENODEV; 4981 break; 4982 } 4983 if (io->pfrio_size < 0 || io->pfrio_size2 < 0) { 4984 error = EINVAL; 4985 break; 4986 } 4987 count = max(io->pfrio_size, io->pfrio_size2); 4988 if (count > pf_ioctl_maxcount || 4989 WOULD_OVERFLOW(count, sizeof(struct pfr_addr))) { 4990 error = EINVAL; 4991 break; 4992 } 4993 totlen = count * sizeof(struct pfr_addr); 4994 pfras = mallocarray(count, sizeof(struct pfr_addr), M_TEMP, 4995 M_WAITOK); 4996 error = copyin(io->pfrio_buffer, pfras, totlen); 4997 if (error) { 4998 free(pfras, M_TEMP); 4999 break; 5000 } 5001 PF_RULES_WLOCK(); 5002 error = pfr_set_addrs(&io->pfrio_table, pfras, 5003 io->pfrio_size, &io->pfrio_size2, &io->pfrio_nadd, 5004 &io->pfrio_ndel, &io->pfrio_nchange, io->pfrio_flags | 5005 PFR_FLAG_USERIOCTL, 0); 5006 PF_RULES_WUNLOCK(); 5007 if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK) 5008 error = copyout(pfras, io->pfrio_buffer, totlen); 5009 free(pfras, M_TEMP); 5010 break; 5011 } 5012 5013 case DIOCRGETADDRS: { 5014 struct pfioc_table *io = (struct pfioc_table *)addr; 5015 struct pfr_addr *pfras; 5016 size_t totlen; 5017 5018 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 5019 error = ENODEV; 5020 break; 5021 } 5022 if (io->pfrio_size < 0 || 5023 io->pfrio_size > pf_ioctl_maxcount || 5024 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) { 5025 error = EINVAL; 5026 break; 5027 } 5028 totlen = io->pfrio_size * sizeof(struct pfr_addr); 5029 pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr), 5030 M_TEMP, M_WAITOK | M_ZERO); 5031 PF_RULES_RLOCK(); 5032 error = pfr_get_addrs(&io->pfrio_table, pfras, 5033 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); 5034 PF_RULES_RUNLOCK(); 5035 if (error == 0) 5036 error = copyout(pfras, io->pfrio_buffer, totlen); 5037 free(pfras, M_TEMP); 5038 break; 5039 } 5040 5041 case DIOCRGETASTATS: { 5042 struct pfioc_table *io = (struct pfioc_table *)addr; 5043 struct pfr_astats *pfrastats; 5044 size_t totlen; 5045 5046 if (io->pfrio_esize != sizeof(struct pfr_astats)) { 5047 error = ENODEV; 5048 break; 5049 } 5050 if (io->pfrio_size < 0 || 5051 io->pfrio_size > pf_ioctl_maxcount || 5052 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_astats))) { 5053 error = EINVAL; 5054 break; 5055 } 5056 totlen = io->pfrio_size * sizeof(struct pfr_astats); 5057 pfrastats = mallocarray(io->pfrio_size, 5058 sizeof(struct pfr_astats), M_TEMP, M_WAITOK | M_ZERO); 5059 PF_RULES_RLOCK(); 5060 error = pfr_get_astats(&io->pfrio_table, pfrastats, 5061 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); 5062 PF_RULES_RUNLOCK(); 5063 if (error == 0) 5064 error = copyout(pfrastats, io->pfrio_buffer, totlen); 5065 free(pfrastats, M_TEMP); 5066 break; 5067 } 5068 5069 case DIOCRCLRASTATS: { 5070 struct pfioc_table *io = (struct pfioc_table *)addr; 5071 struct pfr_addr *pfras; 5072 size_t totlen; 5073 5074 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 5075 error = ENODEV; 5076 break; 5077 } 5078 if (io->pfrio_size < 0 || 5079 io->pfrio_size > pf_ioctl_maxcount || 5080 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) { 5081 error = EINVAL; 5082 break; 5083 } 5084 totlen = io->pfrio_size * sizeof(struct pfr_addr); 5085 pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr), 5086 M_TEMP, M_WAITOK); 5087 error = copyin(io->pfrio_buffer, pfras, totlen); 5088 if (error) { 5089 free(pfras, M_TEMP); 5090 break; 5091 } 5092 PF_RULES_WLOCK(); 5093 error = pfr_clr_astats(&io->pfrio_table, pfras, 5094 io->pfrio_size, &io->pfrio_nzero, io->pfrio_flags | 5095 PFR_FLAG_USERIOCTL); 5096 PF_RULES_WUNLOCK(); 5097 if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK) 5098 error = copyout(pfras, io->pfrio_buffer, totlen); 5099 free(pfras, M_TEMP); 5100 break; 5101 } 5102 5103 case DIOCRTSTADDRS: { 5104 struct pfioc_table *io = (struct pfioc_table *)addr; 5105 struct pfr_addr *pfras; 5106 size_t totlen; 5107 5108 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 5109 error = ENODEV; 5110 break; 5111 } 5112 if (io->pfrio_size < 0 || 5113 io->pfrio_size > pf_ioctl_maxcount || 5114 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) { 5115 error = EINVAL; 5116 break; 5117 } 5118 totlen = io->pfrio_size * sizeof(struct pfr_addr); 5119 pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr), 5120 M_TEMP, M_WAITOK); 5121 error = copyin(io->pfrio_buffer, pfras, totlen); 5122 if (error) { 5123 free(pfras, M_TEMP); 5124 break; 5125 } 5126 PF_RULES_RLOCK(); 5127 error = pfr_tst_addrs(&io->pfrio_table, pfras, 5128 io->pfrio_size, &io->pfrio_nmatch, io->pfrio_flags | 5129 PFR_FLAG_USERIOCTL); 5130 PF_RULES_RUNLOCK(); 5131 if (error == 0) 5132 error = copyout(pfras, io->pfrio_buffer, totlen); 5133 free(pfras, M_TEMP); 5134 break; 5135 } 5136 5137 case DIOCRINADEFINE: { 5138 struct pfioc_table *io = (struct pfioc_table *)addr; 5139 struct pfr_addr *pfras; 5140 size_t totlen; 5141 5142 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 5143 error = ENODEV; 5144 break; 5145 } 5146 if (io->pfrio_size < 0 || 5147 io->pfrio_size > pf_ioctl_maxcount || 5148 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) { 5149 error = EINVAL; 5150 break; 5151 } 5152 totlen = io->pfrio_size * sizeof(struct pfr_addr); 5153 pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr), 5154 M_TEMP, M_WAITOK); 5155 error = copyin(io->pfrio_buffer, pfras, totlen); 5156 if (error) { 5157 free(pfras, M_TEMP); 5158 break; 5159 } 5160 PF_RULES_WLOCK(); 5161 error = pfr_ina_define(&io->pfrio_table, pfras, 5162 io->pfrio_size, &io->pfrio_nadd, &io->pfrio_naddr, 5163 io->pfrio_ticket, io->pfrio_flags | PFR_FLAG_USERIOCTL); 5164 PF_RULES_WUNLOCK(); 5165 free(pfras, M_TEMP); 5166 break; 5167 } 5168 5169 case DIOCOSFPADD: { 5170 struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr; 5171 PF_RULES_WLOCK(); 5172 error = pf_osfp_add(io); 5173 PF_RULES_WUNLOCK(); 5174 break; 5175 } 5176 5177 case DIOCOSFPGET: { 5178 struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr; 5179 PF_RULES_RLOCK(); 5180 error = pf_osfp_get(io); 5181 PF_RULES_RUNLOCK(); 5182 break; 5183 } 5184 5185 case DIOCXBEGIN: { 5186 struct pfioc_trans *io = (struct pfioc_trans *)addr; 5187 struct pfioc_trans_e *ioes, *ioe; 5188 size_t totlen; 5189 int i; 5190 5191 if (io->esize != sizeof(*ioe)) { 5192 error = ENODEV; 5193 break; 5194 } 5195 if (io->size < 0 || 5196 io->size > pf_ioctl_maxcount || 5197 WOULD_OVERFLOW(io->size, sizeof(struct pfioc_trans_e))) { 5198 error = EINVAL; 5199 break; 5200 } 5201 totlen = sizeof(struct pfioc_trans_e) * io->size; 5202 ioes = mallocarray(io->size, sizeof(struct pfioc_trans_e), 5203 M_TEMP, M_WAITOK); 5204 error = copyin(io->array, ioes, totlen); 5205 if (error) { 5206 free(ioes, M_TEMP); 5207 break; 5208 } 5209 /* Ensure there's no more ethernet rules to clean up. */ 5210 NET_EPOCH_DRAIN_CALLBACKS(); 5211 PF_RULES_WLOCK(); 5212 for (i = 0, ioe = ioes; i < io->size; i++, ioe++) { 5213 ioe->anchor[sizeof(ioe->anchor) - 1] = '\0'; 5214 switch (ioe->rs_num) { 5215 case PF_RULESET_ETH: 5216 if ((error = pf_begin_eth(&ioe->ticket, ioe->anchor))) { 5217 PF_RULES_WUNLOCK(); 5218 free(ioes, M_TEMP); 5219 goto fail; 5220 } 5221 break; 5222 #ifdef ALTQ 5223 case PF_RULESET_ALTQ: 5224 if (ioe->anchor[0]) { 5225 PF_RULES_WUNLOCK(); 5226 free(ioes, M_TEMP); 5227 error = EINVAL; 5228 goto fail; 5229 } 5230 if ((error = pf_begin_altq(&ioe->ticket))) { 5231 PF_RULES_WUNLOCK(); 5232 free(ioes, M_TEMP); 5233 goto fail; 5234 } 5235 break; 5236 #endif /* ALTQ */ 5237 case PF_RULESET_TABLE: 5238 { 5239 struct pfr_table table; 5240 5241 bzero(&table, sizeof(table)); 5242 strlcpy(table.pfrt_anchor, ioe->anchor, 5243 sizeof(table.pfrt_anchor)); 5244 if ((error = pfr_ina_begin(&table, 5245 &ioe->ticket, NULL, 0))) { 5246 PF_RULES_WUNLOCK(); 5247 free(ioes, M_TEMP); 5248 goto fail; 5249 } 5250 break; 5251 } 5252 default: 5253 if ((error = pf_begin_rules(&ioe->ticket, 5254 ioe->rs_num, ioe->anchor))) { 5255 PF_RULES_WUNLOCK(); 5256 free(ioes, M_TEMP); 5257 goto fail; 5258 } 5259 break; 5260 } 5261 } 5262 PF_RULES_WUNLOCK(); 5263 error = copyout(ioes, io->array, totlen); 5264 free(ioes, M_TEMP); 5265 break; 5266 } 5267 5268 case DIOCXROLLBACK: { 5269 struct pfioc_trans *io = (struct pfioc_trans *)addr; 5270 struct pfioc_trans_e *ioe, *ioes; 5271 size_t totlen; 5272 int i; 5273 5274 if (io->esize != sizeof(*ioe)) { 5275 error = ENODEV; 5276 break; 5277 } 5278 if (io->size < 0 || 5279 io->size > pf_ioctl_maxcount || 5280 WOULD_OVERFLOW(io->size, sizeof(struct pfioc_trans_e))) { 5281 error = EINVAL; 5282 break; 5283 } 5284 totlen = sizeof(struct pfioc_trans_e) * io->size; 5285 ioes = mallocarray(io->size, sizeof(struct pfioc_trans_e), 5286 M_TEMP, M_WAITOK); 5287 error = copyin(io->array, ioes, totlen); 5288 if (error) { 5289 free(ioes, M_TEMP); 5290 break; 5291 } 5292 PF_RULES_WLOCK(); 5293 for (i = 0, ioe = ioes; i < io->size; i++, ioe++) { 5294 ioe->anchor[sizeof(ioe->anchor) - 1] = '\0'; 5295 switch (ioe->rs_num) { 5296 case PF_RULESET_ETH: 5297 if ((error = pf_rollback_eth(ioe->ticket, 5298 ioe->anchor))) { 5299 PF_RULES_WUNLOCK(); 5300 free(ioes, M_TEMP); 5301 goto fail; /* really bad */ 5302 } 5303 break; 5304 #ifdef ALTQ 5305 case PF_RULESET_ALTQ: 5306 if (ioe->anchor[0]) { 5307 PF_RULES_WUNLOCK(); 5308 free(ioes, M_TEMP); 5309 error = EINVAL; 5310 goto fail; 5311 } 5312 if ((error = pf_rollback_altq(ioe->ticket))) { 5313 PF_RULES_WUNLOCK(); 5314 free(ioes, M_TEMP); 5315 goto fail; /* really bad */ 5316 } 5317 break; 5318 #endif /* ALTQ */ 5319 case PF_RULESET_TABLE: 5320 { 5321 struct pfr_table table; 5322 5323 bzero(&table, sizeof(table)); 5324 strlcpy(table.pfrt_anchor, ioe->anchor, 5325 sizeof(table.pfrt_anchor)); 5326 if ((error = pfr_ina_rollback(&table, 5327 ioe->ticket, NULL, 0))) { 5328 PF_RULES_WUNLOCK(); 5329 free(ioes, M_TEMP); 5330 goto fail; /* really bad */ 5331 } 5332 break; 5333 } 5334 default: 5335 if ((error = pf_rollback_rules(ioe->ticket, 5336 ioe->rs_num, ioe->anchor))) { 5337 PF_RULES_WUNLOCK(); 5338 free(ioes, M_TEMP); 5339 goto fail; /* really bad */ 5340 } 5341 break; 5342 } 5343 } 5344 PF_RULES_WUNLOCK(); 5345 free(ioes, M_TEMP); 5346 break; 5347 } 5348 5349 case DIOCXCOMMIT: { 5350 struct pfioc_trans *io = (struct pfioc_trans *)addr; 5351 struct pfioc_trans_e *ioe, *ioes; 5352 struct pf_kruleset *rs; 5353 struct pf_keth_ruleset *ers; 5354 size_t totlen; 5355 int i; 5356 5357 if (io->esize != sizeof(*ioe)) { 5358 error = ENODEV; 5359 break; 5360 } 5361 5362 if (io->size < 0 || 5363 io->size > pf_ioctl_maxcount || 5364 WOULD_OVERFLOW(io->size, sizeof(struct pfioc_trans_e))) { 5365 error = EINVAL; 5366 break; 5367 } 5368 5369 totlen = sizeof(struct pfioc_trans_e) * io->size; 5370 ioes = mallocarray(io->size, sizeof(struct pfioc_trans_e), 5371 M_TEMP, M_WAITOK); 5372 error = copyin(io->array, ioes, totlen); 5373 if (error) { 5374 free(ioes, M_TEMP); 5375 break; 5376 } 5377 PF_RULES_WLOCK(); 5378 /* First makes sure everything will succeed. */ 5379 for (i = 0, ioe = ioes; i < io->size; i++, ioe++) { 5380 ioe->anchor[sizeof(ioe->anchor) - 1] = 0; 5381 switch (ioe->rs_num) { 5382 case PF_RULESET_ETH: 5383 ers = pf_find_keth_ruleset(ioe->anchor); 5384 if (ers == NULL || ioe->ticket == 0 || 5385 ioe->ticket != ers->inactive.ticket) { 5386 PF_RULES_WUNLOCK(); 5387 free(ioes, M_TEMP); 5388 error = EINVAL; 5389 goto fail; 5390 } 5391 break; 5392 #ifdef ALTQ 5393 case PF_RULESET_ALTQ: 5394 if (ioe->anchor[0]) { 5395 PF_RULES_WUNLOCK(); 5396 free(ioes, M_TEMP); 5397 error = EINVAL; 5398 goto fail; 5399 } 5400 if (!V_altqs_inactive_open || ioe->ticket != 5401 V_ticket_altqs_inactive) { 5402 PF_RULES_WUNLOCK(); 5403 free(ioes, M_TEMP); 5404 error = EBUSY; 5405 goto fail; 5406 } 5407 break; 5408 #endif /* ALTQ */ 5409 case PF_RULESET_TABLE: 5410 rs = pf_find_kruleset(ioe->anchor); 5411 if (rs == NULL || !rs->topen || ioe->ticket != 5412 rs->tticket) { 5413 PF_RULES_WUNLOCK(); 5414 free(ioes, M_TEMP); 5415 error = EBUSY; 5416 goto fail; 5417 } 5418 break; 5419 default: 5420 if (ioe->rs_num < 0 || ioe->rs_num >= 5421 PF_RULESET_MAX) { 5422 PF_RULES_WUNLOCK(); 5423 free(ioes, M_TEMP); 5424 error = EINVAL; 5425 goto fail; 5426 } 5427 rs = pf_find_kruleset(ioe->anchor); 5428 if (rs == NULL || 5429 !rs->rules[ioe->rs_num].inactive.open || 5430 rs->rules[ioe->rs_num].inactive.ticket != 5431 ioe->ticket) { 5432 PF_RULES_WUNLOCK(); 5433 free(ioes, M_TEMP); 5434 error = EBUSY; 5435 goto fail; 5436 } 5437 break; 5438 } 5439 } 5440 /* Now do the commit - no errors should happen here. */ 5441 for (i = 0, ioe = ioes; i < io->size; i++, ioe++) { 5442 switch (ioe->rs_num) { 5443 case PF_RULESET_ETH: 5444 if ((error = pf_commit_eth(ioe->ticket, ioe->anchor))) { 5445 PF_RULES_WUNLOCK(); 5446 free(ioes, M_TEMP); 5447 goto fail; /* really bad */ 5448 } 5449 break; 5450 #ifdef ALTQ 5451 case PF_RULESET_ALTQ: 5452 if ((error = pf_commit_altq(ioe->ticket))) { 5453 PF_RULES_WUNLOCK(); 5454 free(ioes, M_TEMP); 5455 goto fail; /* really bad */ 5456 } 5457 break; 5458 #endif /* ALTQ */ 5459 case PF_RULESET_TABLE: 5460 { 5461 struct pfr_table table; 5462 5463 bzero(&table, sizeof(table)); 5464 (void)strlcpy(table.pfrt_anchor, ioe->anchor, 5465 sizeof(table.pfrt_anchor)); 5466 if ((error = pfr_ina_commit(&table, 5467 ioe->ticket, NULL, NULL, 0))) { 5468 PF_RULES_WUNLOCK(); 5469 free(ioes, M_TEMP); 5470 goto fail; /* really bad */ 5471 } 5472 break; 5473 } 5474 default: 5475 if ((error = pf_commit_rules(ioe->ticket, 5476 ioe->rs_num, ioe->anchor))) { 5477 PF_RULES_WUNLOCK(); 5478 free(ioes, M_TEMP); 5479 goto fail; /* really bad */ 5480 } 5481 break; 5482 } 5483 } 5484 PF_RULES_WUNLOCK(); 5485 5486 /* Only hook into EtherNet taffic if we've got rules for it. */ 5487 if (! TAILQ_EMPTY(V_pf_keth->active.rules)) 5488 hook_pf_eth(); 5489 else 5490 dehook_pf_eth(); 5491 5492 free(ioes, M_TEMP); 5493 break; 5494 } 5495 5496 case DIOCGETSRCNODES: { 5497 struct pfioc_src_nodes *psn = (struct pfioc_src_nodes *)addr; 5498 struct pf_srchash *sh; 5499 struct pf_ksrc_node *n; 5500 struct pf_src_node *p, *pstore; 5501 uint32_t i, nr = 0; 5502 5503 for (i = 0, sh = V_pf_srchash; i <= pf_srchashmask; 5504 i++, sh++) { 5505 PF_HASHROW_LOCK(sh); 5506 LIST_FOREACH(n, &sh->nodes, entry) 5507 nr++; 5508 PF_HASHROW_UNLOCK(sh); 5509 } 5510 5511 psn->psn_len = min(psn->psn_len, 5512 sizeof(struct pf_src_node) * nr); 5513 5514 if (psn->psn_len == 0) { 5515 psn->psn_len = sizeof(struct pf_src_node) * nr; 5516 break; 5517 } 5518 5519 nr = 0; 5520 5521 p = pstore = malloc(psn->psn_len, M_TEMP, M_WAITOK | M_ZERO); 5522 for (i = 0, sh = V_pf_srchash; i <= pf_srchashmask; 5523 i++, sh++) { 5524 PF_HASHROW_LOCK(sh); 5525 LIST_FOREACH(n, &sh->nodes, entry) { 5526 5527 if ((nr + 1) * sizeof(*p) > (unsigned)psn->psn_len) 5528 break; 5529 5530 pf_src_node_copy(n, p); 5531 5532 p++; 5533 nr++; 5534 } 5535 PF_HASHROW_UNLOCK(sh); 5536 } 5537 error = copyout(pstore, psn->psn_src_nodes, 5538 sizeof(struct pf_src_node) * nr); 5539 if (error) { 5540 free(pstore, M_TEMP); 5541 break; 5542 } 5543 psn->psn_len = sizeof(struct pf_src_node) * nr; 5544 free(pstore, M_TEMP); 5545 break; 5546 } 5547 5548 case DIOCCLRSRCNODES: { 5549 pf_clear_srcnodes(NULL); 5550 pf_purge_expired_src_nodes(); 5551 break; 5552 } 5553 5554 case DIOCKILLSRCNODES: 5555 pf_kill_srcnodes((struct pfioc_src_node_kill *)addr); 5556 break; 5557 5558 #ifdef COMPAT_FREEBSD13 5559 case DIOCKEEPCOUNTERS_FREEBSD13: 5560 #endif 5561 case DIOCKEEPCOUNTERS: 5562 error = pf_keepcounters((struct pfioc_nv *)addr); 5563 break; 5564 5565 case DIOCGETSYNCOOKIES: 5566 error = pf_get_syncookies((struct pfioc_nv *)addr); 5567 break; 5568 5569 case DIOCSETSYNCOOKIES: 5570 error = pf_set_syncookies((struct pfioc_nv *)addr); 5571 break; 5572 5573 case DIOCSETHOSTID: { 5574 u_int32_t *hostid = (u_int32_t *)addr; 5575 5576 PF_RULES_WLOCK(); 5577 if (*hostid == 0) 5578 V_pf_status.hostid = arc4random(); 5579 else 5580 V_pf_status.hostid = *hostid; 5581 PF_RULES_WUNLOCK(); 5582 break; 5583 } 5584 5585 case DIOCOSFPFLUSH: 5586 PF_RULES_WLOCK(); 5587 pf_osfp_flush(); 5588 PF_RULES_WUNLOCK(); 5589 break; 5590 5591 case DIOCIGETIFACES: { 5592 struct pfioc_iface *io = (struct pfioc_iface *)addr; 5593 struct pfi_kif *ifstore; 5594 size_t bufsiz; 5595 5596 if (io->pfiio_esize != sizeof(struct pfi_kif)) { 5597 error = ENODEV; 5598 break; 5599 } 5600 5601 if (io->pfiio_size < 0 || 5602 io->pfiio_size > pf_ioctl_maxcount || 5603 WOULD_OVERFLOW(io->pfiio_size, sizeof(struct pfi_kif))) { 5604 error = EINVAL; 5605 break; 5606 } 5607 5608 io->pfiio_name[sizeof(io->pfiio_name) - 1] = '\0'; 5609 5610 bufsiz = io->pfiio_size * sizeof(struct pfi_kif); 5611 ifstore = mallocarray(io->pfiio_size, sizeof(struct pfi_kif), 5612 M_TEMP, M_WAITOK | M_ZERO); 5613 5614 PF_RULES_RLOCK(); 5615 pfi_get_ifaces(io->pfiio_name, ifstore, &io->pfiio_size); 5616 PF_RULES_RUNLOCK(); 5617 error = copyout(ifstore, io->pfiio_buffer, bufsiz); 5618 free(ifstore, M_TEMP); 5619 break; 5620 } 5621 5622 case DIOCSETIFFLAG: { 5623 struct pfioc_iface *io = (struct pfioc_iface *)addr; 5624 5625 io->pfiio_name[sizeof(io->pfiio_name) - 1] = '\0'; 5626 5627 PF_RULES_WLOCK(); 5628 error = pfi_set_flags(io->pfiio_name, io->pfiio_flags); 5629 PF_RULES_WUNLOCK(); 5630 break; 5631 } 5632 5633 case DIOCCLRIFFLAG: { 5634 struct pfioc_iface *io = (struct pfioc_iface *)addr; 5635 5636 io->pfiio_name[sizeof(io->pfiio_name) - 1] = '\0'; 5637 5638 PF_RULES_WLOCK(); 5639 error = pfi_clear_flags(io->pfiio_name, io->pfiio_flags); 5640 PF_RULES_WUNLOCK(); 5641 break; 5642 } 5643 5644 case DIOCSETREASS: { 5645 u_int32_t *reass = (u_int32_t *)addr; 5646 5647 V_pf_status.reass = *reass & (PF_REASS_ENABLED|PF_REASS_NODF); 5648 /* Removal of DF flag without reassembly enabled is not a 5649 * valid combination. Disable reassembly in such case. */ 5650 if (!(V_pf_status.reass & PF_REASS_ENABLED)) 5651 V_pf_status.reass = 0; 5652 break; 5653 } 5654 5655 default: 5656 error = ENODEV; 5657 break; 5658 } 5659 fail: 5660 if (sx_xlocked(&V_pf_ioctl_lock)) 5661 sx_xunlock(&V_pf_ioctl_lock); 5662 CURVNET_RESTORE(); 5663 5664 #undef ERROUT_IOCTL 5665 5666 return (error); 5667 } 5668 5669 void 5670 pfsync_state_export(union pfsync_state_union *sp, struct pf_kstate *st, int msg_version) 5671 { 5672 bzero(sp, sizeof(union pfsync_state_union)); 5673 5674 /* copy from state key */ 5675 sp->pfs_1301.key[PF_SK_WIRE].addr[0] = st->key[PF_SK_WIRE]->addr[0]; 5676 sp->pfs_1301.key[PF_SK_WIRE].addr[1] = st->key[PF_SK_WIRE]->addr[1]; 5677 sp->pfs_1301.key[PF_SK_WIRE].port[0] = st->key[PF_SK_WIRE]->port[0]; 5678 sp->pfs_1301.key[PF_SK_WIRE].port[1] = st->key[PF_SK_WIRE]->port[1]; 5679 sp->pfs_1301.key[PF_SK_STACK].addr[0] = st->key[PF_SK_STACK]->addr[0]; 5680 sp->pfs_1301.key[PF_SK_STACK].addr[1] = st->key[PF_SK_STACK]->addr[1]; 5681 sp->pfs_1301.key[PF_SK_STACK].port[0] = st->key[PF_SK_STACK]->port[0]; 5682 sp->pfs_1301.key[PF_SK_STACK].port[1] = st->key[PF_SK_STACK]->port[1]; 5683 sp->pfs_1301.proto = st->key[PF_SK_WIRE]->proto; 5684 sp->pfs_1301.af = st->key[PF_SK_WIRE]->af; 5685 5686 /* copy from state */ 5687 strlcpy(sp->pfs_1301.ifname, st->kif->pfik_name, sizeof(sp->pfs_1301.ifname)); 5688 bcopy(&st->rt_addr, &sp->pfs_1301.rt_addr, sizeof(sp->pfs_1301.rt_addr)); 5689 sp->pfs_1301.creation = htonl(time_uptime - st->creation); 5690 sp->pfs_1301.expire = pf_state_expires(st); 5691 if (sp->pfs_1301.expire <= time_uptime) 5692 sp->pfs_1301.expire = htonl(0); 5693 else 5694 sp->pfs_1301.expire = htonl(sp->pfs_1301.expire - time_uptime); 5695 5696 sp->pfs_1301.direction = st->direction; 5697 sp->pfs_1301.log = st->log; 5698 sp->pfs_1301.timeout = st->timeout; 5699 5700 switch (msg_version) { 5701 case PFSYNC_MSG_VERSION_1301: 5702 sp->pfs_1301.state_flags = st->state_flags; 5703 break; 5704 case PFSYNC_MSG_VERSION_1400: 5705 sp->pfs_1400.state_flags = htons(st->state_flags); 5706 sp->pfs_1400.qid = htons(st->qid); 5707 sp->pfs_1400.pqid = htons(st->pqid); 5708 sp->pfs_1400.dnpipe = htons(st->dnpipe); 5709 sp->pfs_1400.dnrpipe = htons(st->dnrpipe); 5710 sp->pfs_1400.rtableid = htonl(st->rtableid); 5711 sp->pfs_1400.min_ttl = st->min_ttl; 5712 sp->pfs_1400.set_tos = st->set_tos; 5713 sp->pfs_1400.max_mss = htons(st->max_mss); 5714 sp->pfs_1400.set_prio[0] = st->set_prio[0]; 5715 sp->pfs_1400.set_prio[1] = st->set_prio[1]; 5716 sp->pfs_1400.rt = st->rt; 5717 if (st->rt_kif) 5718 strlcpy(sp->pfs_1400.rt_ifname, 5719 st->rt_kif->pfik_name, 5720 sizeof(sp->pfs_1400.rt_ifname)); 5721 break; 5722 default: 5723 panic("%s: Unsupported pfsync_msg_version %d", 5724 __func__, msg_version); 5725 } 5726 5727 if (st->src_node) 5728 sp->pfs_1301.sync_flags |= PFSYNC_FLAG_SRCNODE; 5729 if (st->nat_src_node) 5730 sp->pfs_1301.sync_flags |= PFSYNC_FLAG_NATSRCNODE; 5731 5732 sp->pfs_1301.id = st->id; 5733 sp->pfs_1301.creatorid = st->creatorid; 5734 pf_state_peer_hton(&st->src, &sp->pfs_1301.src); 5735 pf_state_peer_hton(&st->dst, &sp->pfs_1301.dst); 5736 5737 if (st->rule.ptr == NULL) 5738 sp->pfs_1301.rule = htonl(-1); 5739 else 5740 sp->pfs_1301.rule = htonl(st->rule.ptr->nr); 5741 if (st->anchor.ptr == NULL) 5742 sp->pfs_1301.anchor = htonl(-1); 5743 else 5744 sp->pfs_1301.anchor = htonl(st->anchor.ptr->nr); 5745 if (st->nat_rule.ptr == NULL) 5746 sp->pfs_1301.nat_rule = htonl(-1); 5747 else 5748 sp->pfs_1301.nat_rule = htonl(st->nat_rule.ptr->nr); 5749 5750 pf_state_counter_hton(st->packets[0], sp->pfs_1301.packets[0]); 5751 pf_state_counter_hton(st->packets[1], sp->pfs_1301.packets[1]); 5752 pf_state_counter_hton(st->bytes[0], sp->pfs_1301.bytes[0]); 5753 pf_state_counter_hton(st->bytes[1], sp->pfs_1301.bytes[1]); 5754 } 5755 5756 void 5757 pf_state_export(struct pf_state_export *sp, struct pf_kstate *st) 5758 { 5759 bzero(sp, sizeof(*sp)); 5760 5761 sp->version = PF_STATE_VERSION; 5762 5763 /* copy from state key */ 5764 sp->key[PF_SK_WIRE].addr[0] = st->key[PF_SK_WIRE]->addr[0]; 5765 sp->key[PF_SK_WIRE].addr[1] = st->key[PF_SK_WIRE]->addr[1]; 5766 sp->key[PF_SK_WIRE].port[0] = st->key[PF_SK_WIRE]->port[0]; 5767 sp->key[PF_SK_WIRE].port[1] = st->key[PF_SK_WIRE]->port[1]; 5768 sp->key[PF_SK_STACK].addr[0] = st->key[PF_SK_STACK]->addr[0]; 5769 sp->key[PF_SK_STACK].addr[1] = st->key[PF_SK_STACK]->addr[1]; 5770 sp->key[PF_SK_STACK].port[0] = st->key[PF_SK_STACK]->port[0]; 5771 sp->key[PF_SK_STACK].port[1] = st->key[PF_SK_STACK]->port[1]; 5772 sp->proto = st->key[PF_SK_WIRE]->proto; 5773 sp->af = st->key[PF_SK_WIRE]->af; 5774 5775 /* copy from state */ 5776 strlcpy(sp->ifname, st->kif->pfik_name, sizeof(sp->ifname)); 5777 strlcpy(sp->orig_ifname, st->orig_kif->pfik_name, 5778 sizeof(sp->orig_ifname)); 5779 bcopy(&st->rt_addr, &sp->rt_addr, sizeof(sp->rt_addr)); 5780 sp->creation = htonl(time_uptime - st->creation); 5781 sp->expire = pf_state_expires(st); 5782 if (sp->expire <= time_uptime) 5783 sp->expire = htonl(0); 5784 else 5785 sp->expire = htonl(sp->expire - time_uptime); 5786 5787 sp->direction = st->direction; 5788 sp->log = st->log; 5789 sp->timeout = st->timeout; 5790 /* 8 bits for the old libpfctl, 16 bits for the new libpfctl */ 5791 sp->state_flags_compat = st->state_flags; 5792 sp->state_flags = htons(st->state_flags); 5793 if (st->src_node) 5794 sp->sync_flags |= PFSYNC_FLAG_SRCNODE; 5795 if (st->nat_src_node) 5796 sp->sync_flags |= PFSYNC_FLAG_NATSRCNODE; 5797 5798 sp->id = st->id; 5799 sp->creatorid = st->creatorid; 5800 pf_state_peer_hton(&st->src, &sp->src); 5801 pf_state_peer_hton(&st->dst, &sp->dst); 5802 5803 if (st->rule.ptr == NULL) 5804 sp->rule = htonl(-1); 5805 else 5806 sp->rule = htonl(st->rule.ptr->nr); 5807 if (st->anchor.ptr == NULL) 5808 sp->anchor = htonl(-1); 5809 else 5810 sp->anchor = htonl(st->anchor.ptr->nr); 5811 if (st->nat_rule.ptr == NULL) 5812 sp->nat_rule = htonl(-1); 5813 else 5814 sp->nat_rule = htonl(st->nat_rule.ptr->nr); 5815 5816 sp->packets[0] = st->packets[0]; 5817 sp->packets[1] = st->packets[1]; 5818 sp->bytes[0] = st->bytes[0]; 5819 sp->bytes[1] = st->bytes[1]; 5820 5821 sp->qid = htons(st->qid); 5822 sp->pqid = htons(st->pqid); 5823 sp->dnpipe = htons(st->dnpipe); 5824 sp->dnrpipe = htons(st->dnrpipe); 5825 sp->rtableid = htonl(st->rtableid); 5826 sp->min_ttl = st->min_ttl; 5827 sp->set_tos = st->set_tos; 5828 sp->max_mss = htons(st->max_mss); 5829 sp->rt = st->rt; 5830 if (st->rt_kif) 5831 strlcpy(sp->rt_ifname, st->rt_kif->pfik_name, 5832 sizeof(sp->rt_ifname)); 5833 sp->set_prio[0] = st->set_prio[0]; 5834 sp->set_prio[1] = st->set_prio[1]; 5835 5836 } 5837 5838 static void 5839 pf_tbladdr_copyout(struct pf_addr_wrap *aw) 5840 { 5841 struct pfr_ktable *kt; 5842 5843 KASSERT(aw->type == PF_ADDR_TABLE, ("%s: type %u", __func__, aw->type)); 5844 5845 kt = aw->p.tbl; 5846 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL) 5847 kt = kt->pfrkt_root; 5848 aw->p.tbl = NULL; 5849 aw->p.tblcnt = (kt->pfrkt_flags & PFR_TFLAG_ACTIVE) ? 5850 kt->pfrkt_cnt : -1; 5851 } 5852 5853 static int 5854 pf_add_status_counters(nvlist_t *nvl, const char *name, counter_u64_t *counters, 5855 size_t number, char **names) 5856 { 5857 nvlist_t *nvc; 5858 5859 nvc = nvlist_create(0); 5860 if (nvc == NULL) 5861 return (ENOMEM); 5862 5863 for (int i = 0; i < number; i++) { 5864 nvlist_append_number_array(nvc, "counters", 5865 counter_u64_fetch(counters[i])); 5866 nvlist_append_string_array(nvc, "names", 5867 names[i]); 5868 nvlist_append_number_array(nvc, "ids", 5869 i); 5870 } 5871 nvlist_add_nvlist(nvl, name, nvc); 5872 nvlist_destroy(nvc); 5873 5874 return (0); 5875 } 5876 5877 static int 5878 pf_getstatus(struct pfioc_nv *nv) 5879 { 5880 nvlist_t *nvl = NULL, *nvc = NULL; 5881 void *nvlpacked = NULL; 5882 int error; 5883 struct pf_status s; 5884 char *pf_reasons[PFRES_MAX+1] = PFRES_NAMES; 5885 char *pf_lcounter[KLCNT_MAX+1] = KLCNT_NAMES; 5886 char *pf_fcounter[FCNT_MAX+1] = FCNT_NAMES; 5887 PF_RULES_RLOCK_TRACKER; 5888 5889 #define ERROUT(x) ERROUT_FUNCTION(errout, x) 5890 5891 PF_RULES_RLOCK(); 5892 5893 nvl = nvlist_create(0); 5894 if (nvl == NULL) 5895 ERROUT(ENOMEM); 5896 5897 nvlist_add_bool(nvl, "running", V_pf_status.running); 5898 nvlist_add_number(nvl, "since", V_pf_status.since); 5899 nvlist_add_number(nvl, "debug", V_pf_status.debug); 5900 nvlist_add_number(nvl, "hostid", V_pf_status.hostid); 5901 nvlist_add_number(nvl, "states", V_pf_status.states); 5902 nvlist_add_number(nvl, "src_nodes", V_pf_status.src_nodes); 5903 nvlist_add_number(nvl, "reass", V_pf_status.reass); 5904 nvlist_add_bool(nvl, "syncookies_active", 5905 V_pf_status.syncookies_active); 5906 5907 /* counters */ 5908 error = pf_add_status_counters(nvl, "counters", V_pf_status.counters, 5909 PFRES_MAX, pf_reasons); 5910 if (error != 0) 5911 ERROUT(error); 5912 5913 /* lcounters */ 5914 error = pf_add_status_counters(nvl, "lcounters", V_pf_status.lcounters, 5915 KLCNT_MAX, pf_lcounter); 5916 if (error != 0) 5917 ERROUT(error); 5918 5919 /* fcounters */ 5920 nvc = nvlist_create(0); 5921 if (nvc == NULL) 5922 ERROUT(ENOMEM); 5923 5924 for (int i = 0; i < FCNT_MAX; i++) { 5925 nvlist_append_number_array(nvc, "counters", 5926 pf_counter_u64_fetch(&V_pf_status.fcounters[i])); 5927 nvlist_append_string_array(nvc, "names", 5928 pf_fcounter[i]); 5929 nvlist_append_number_array(nvc, "ids", 5930 i); 5931 } 5932 nvlist_add_nvlist(nvl, "fcounters", nvc); 5933 nvlist_destroy(nvc); 5934 nvc = NULL; 5935 5936 /* scounters */ 5937 error = pf_add_status_counters(nvl, "scounters", V_pf_status.scounters, 5938 SCNT_MAX, pf_fcounter); 5939 if (error != 0) 5940 ERROUT(error); 5941 5942 nvlist_add_string(nvl, "ifname", V_pf_status.ifname); 5943 nvlist_add_binary(nvl, "chksum", V_pf_status.pf_chksum, 5944 PF_MD5_DIGEST_LENGTH); 5945 5946 pfi_update_status(V_pf_status.ifname, &s); 5947 5948 /* pcounters / bcounters */ 5949 for (int i = 0; i < 2; i++) { 5950 for (int j = 0; j < 2; j++) { 5951 for (int k = 0; k < 2; k++) { 5952 nvlist_append_number_array(nvl, "pcounters", 5953 s.pcounters[i][j][k]); 5954 } 5955 nvlist_append_number_array(nvl, "bcounters", 5956 s.bcounters[i][j]); 5957 } 5958 } 5959 5960 nvlpacked = nvlist_pack(nvl, &nv->len); 5961 if (nvlpacked == NULL) 5962 ERROUT(ENOMEM); 5963 5964 if (nv->size == 0) 5965 ERROUT(0); 5966 else if (nv->size < nv->len) 5967 ERROUT(ENOSPC); 5968 5969 PF_RULES_RUNLOCK(); 5970 error = copyout(nvlpacked, nv->data, nv->len); 5971 goto done; 5972 5973 #undef ERROUT 5974 errout: 5975 PF_RULES_RUNLOCK(); 5976 done: 5977 free(nvlpacked, M_NVLIST); 5978 nvlist_destroy(nvc); 5979 nvlist_destroy(nvl); 5980 5981 return (error); 5982 } 5983 5984 /* 5985 * XXX - Check for version mismatch!!! 5986 */ 5987 static void 5988 pf_clear_all_states(void) 5989 { 5990 struct pf_kstate *s; 5991 u_int i; 5992 5993 for (i = 0; i <= pf_hashmask; i++) { 5994 struct pf_idhash *ih = &V_pf_idhash[i]; 5995 relock: 5996 PF_HASHROW_LOCK(ih); 5997 LIST_FOREACH(s, &ih->states, entry) { 5998 s->timeout = PFTM_PURGE; 5999 /* Don't send out individual delete messages. */ 6000 s->state_flags |= PFSTATE_NOSYNC; 6001 pf_unlink_state(s); 6002 goto relock; 6003 } 6004 PF_HASHROW_UNLOCK(ih); 6005 } 6006 } 6007 6008 static int 6009 pf_clear_tables(void) 6010 { 6011 struct pfioc_table io; 6012 int error; 6013 6014 bzero(&io, sizeof(io)); 6015 6016 error = pfr_clr_tables(&io.pfrio_table, &io.pfrio_ndel, 6017 io.pfrio_flags); 6018 6019 return (error); 6020 } 6021 6022 static void 6023 pf_clear_srcnodes(struct pf_ksrc_node *n) 6024 { 6025 struct pf_kstate *s; 6026 int i; 6027 6028 for (i = 0; i <= pf_hashmask; i++) { 6029 struct pf_idhash *ih = &V_pf_idhash[i]; 6030 6031 PF_HASHROW_LOCK(ih); 6032 LIST_FOREACH(s, &ih->states, entry) { 6033 if (n == NULL || n == s->src_node) 6034 s->src_node = NULL; 6035 if (n == NULL || n == s->nat_src_node) 6036 s->nat_src_node = NULL; 6037 } 6038 PF_HASHROW_UNLOCK(ih); 6039 } 6040 6041 if (n == NULL) { 6042 struct pf_srchash *sh; 6043 6044 for (i = 0, sh = V_pf_srchash; i <= pf_srchashmask; 6045 i++, sh++) { 6046 PF_HASHROW_LOCK(sh); 6047 LIST_FOREACH(n, &sh->nodes, entry) { 6048 n->expire = 1; 6049 n->states = 0; 6050 } 6051 PF_HASHROW_UNLOCK(sh); 6052 } 6053 } else { 6054 /* XXX: hash slot should already be locked here. */ 6055 n->expire = 1; 6056 n->states = 0; 6057 } 6058 } 6059 6060 static void 6061 pf_kill_srcnodes(struct pfioc_src_node_kill *psnk) 6062 { 6063 struct pf_ksrc_node_list kill; 6064 6065 LIST_INIT(&kill); 6066 for (int i = 0; i <= pf_srchashmask; i++) { 6067 struct pf_srchash *sh = &V_pf_srchash[i]; 6068 struct pf_ksrc_node *sn, *tmp; 6069 6070 PF_HASHROW_LOCK(sh); 6071 LIST_FOREACH_SAFE(sn, &sh->nodes, entry, tmp) 6072 if (PF_MATCHA(psnk->psnk_src.neg, 6073 &psnk->psnk_src.addr.v.a.addr, 6074 &psnk->psnk_src.addr.v.a.mask, 6075 &sn->addr, sn->af) && 6076 PF_MATCHA(psnk->psnk_dst.neg, 6077 &psnk->psnk_dst.addr.v.a.addr, 6078 &psnk->psnk_dst.addr.v.a.mask, 6079 &sn->raddr, sn->af)) { 6080 pf_unlink_src_node(sn); 6081 LIST_INSERT_HEAD(&kill, sn, entry); 6082 sn->expire = 1; 6083 } 6084 PF_HASHROW_UNLOCK(sh); 6085 } 6086 6087 for (int i = 0; i <= pf_hashmask; i++) { 6088 struct pf_idhash *ih = &V_pf_idhash[i]; 6089 struct pf_kstate *s; 6090 6091 PF_HASHROW_LOCK(ih); 6092 LIST_FOREACH(s, &ih->states, entry) { 6093 if (s->src_node && s->src_node->expire == 1) 6094 s->src_node = NULL; 6095 if (s->nat_src_node && s->nat_src_node->expire == 1) 6096 s->nat_src_node = NULL; 6097 } 6098 PF_HASHROW_UNLOCK(ih); 6099 } 6100 6101 psnk->psnk_killed = pf_free_src_nodes(&kill); 6102 } 6103 6104 static int 6105 pf_keepcounters(struct pfioc_nv *nv) 6106 { 6107 nvlist_t *nvl = NULL; 6108 void *nvlpacked = NULL; 6109 int error = 0; 6110 6111 #define ERROUT(x) ERROUT_FUNCTION(on_error, x) 6112 6113 if (nv->len > pf_ioctl_maxcount) 6114 ERROUT(ENOMEM); 6115 6116 nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK); 6117 if (nvlpacked == NULL) 6118 ERROUT(ENOMEM); 6119 6120 error = copyin(nv->data, nvlpacked, nv->len); 6121 if (error) 6122 ERROUT(error); 6123 6124 nvl = nvlist_unpack(nvlpacked, nv->len, 0); 6125 if (nvl == NULL) 6126 ERROUT(EBADMSG); 6127 6128 if (! nvlist_exists_bool(nvl, "keep_counters")) 6129 ERROUT(EBADMSG); 6130 6131 V_pf_status.keep_counters = nvlist_get_bool(nvl, "keep_counters"); 6132 6133 on_error: 6134 nvlist_destroy(nvl); 6135 free(nvlpacked, M_NVLIST); 6136 return (error); 6137 } 6138 6139 static unsigned int 6140 pf_clear_states(const struct pf_kstate_kill *kill) 6141 { 6142 struct pf_state_key_cmp match_key; 6143 struct pf_kstate *s; 6144 struct pfi_kkif *kif; 6145 int idx; 6146 unsigned int killed = 0, dir; 6147 6148 for (unsigned int i = 0; i <= pf_hashmask; i++) { 6149 struct pf_idhash *ih = &V_pf_idhash[i]; 6150 6151 relock_DIOCCLRSTATES: 6152 PF_HASHROW_LOCK(ih); 6153 LIST_FOREACH(s, &ih->states, entry) { 6154 /* For floating states look at the original kif. */ 6155 kif = s->kif == V_pfi_all ? s->orig_kif : s->kif; 6156 6157 if (kill->psk_ifname[0] && 6158 strcmp(kill->psk_ifname, 6159 kif->pfik_name)) 6160 continue; 6161 6162 if (kill->psk_kill_match) { 6163 bzero(&match_key, sizeof(match_key)); 6164 6165 if (s->direction == PF_OUT) { 6166 dir = PF_IN; 6167 idx = PF_SK_STACK; 6168 } else { 6169 dir = PF_OUT; 6170 idx = PF_SK_WIRE; 6171 } 6172 6173 match_key.af = s->key[idx]->af; 6174 match_key.proto = s->key[idx]->proto; 6175 PF_ACPY(&match_key.addr[0], 6176 &s->key[idx]->addr[1], match_key.af); 6177 match_key.port[0] = s->key[idx]->port[1]; 6178 PF_ACPY(&match_key.addr[1], 6179 &s->key[idx]->addr[0], match_key.af); 6180 match_key.port[1] = s->key[idx]->port[0]; 6181 } 6182 6183 /* 6184 * Don't send out individual 6185 * delete messages. 6186 */ 6187 s->state_flags |= PFSTATE_NOSYNC; 6188 pf_unlink_state(s); 6189 killed++; 6190 6191 if (kill->psk_kill_match) 6192 killed += pf_kill_matching_state(&match_key, 6193 dir); 6194 6195 goto relock_DIOCCLRSTATES; 6196 } 6197 PF_HASHROW_UNLOCK(ih); 6198 } 6199 6200 if (V_pfsync_clear_states_ptr != NULL) 6201 V_pfsync_clear_states_ptr(V_pf_status.hostid, kill->psk_ifname); 6202 6203 return (killed); 6204 } 6205 6206 static void 6207 pf_killstates(struct pf_kstate_kill *kill, unsigned int *killed) 6208 { 6209 struct pf_kstate *s; 6210 6211 if (kill->psk_pfcmp.id) { 6212 if (kill->psk_pfcmp.creatorid == 0) 6213 kill->psk_pfcmp.creatorid = V_pf_status.hostid; 6214 if ((s = pf_find_state_byid(kill->psk_pfcmp.id, 6215 kill->psk_pfcmp.creatorid))) { 6216 pf_unlink_state(s); 6217 *killed = 1; 6218 } 6219 return; 6220 } 6221 6222 for (unsigned int i = 0; i <= pf_hashmask; i++) 6223 *killed += pf_killstates_row(kill, &V_pf_idhash[i]); 6224 6225 return; 6226 } 6227 6228 static int 6229 pf_killstates_nv(struct pfioc_nv *nv) 6230 { 6231 struct pf_kstate_kill kill; 6232 nvlist_t *nvl = NULL; 6233 void *nvlpacked = NULL; 6234 int error = 0; 6235 unsigned int killed = 0; 6236 6237 #define ERROUT(x) ERROUT_FUNCTION(on_error, x) 6238 6239 if (nv->len > pf_ioctl_maxcount) 6240 ERROUT(ENOMEM); 6241 6242 nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK); 6243 if (nvlpacked == NULL) 6244 ERROUT(ENOMEM); 6245 6246 error = copyin(nv->data, nvlpacked, nv->len); 6247 if (error) 6248 ERROUT(error); 6249 6250 nvl = nvlist_unpack(nvlpacked, nv->len, 0); 6251 if (nvl == NULL) 6252 ERROUT(EBADMSG); 6253 6254 error = pf_nvstate_kill_to_kstate_kill(nvl, &kill); 6255 if (error) 6256 ERROUT(error); 6257 6258 pf_killstates(&kill, &killed); 6259 6260 free(nvlpacked, M_NVLIST); 6261 nvlpacked = NULL; 6262 nvlist_destroy(nvl); 6263 nvl = nvlist_create(0); 6264 if (nvl == NULL) 6265 ERROUT(ENOMEM); 6266 6267 nvlist_add_number(nvl, "killed", killed); 6268 6269 nvlpacked = nvlist_pack(nvl, &nv->len); 6270 if (nvlpacked == NULL) 6271 ERROUT(ENOMEM); 6272 6273 if (nv->size == 0) 6274 ERROUT(0); 6275 else if (nv->size < nv->len) 6276 ERROUT(ENOSPC); 6277 6278 error = copyout(nvlpacked, nv->data, nv->len); 6279 6280 on_error: 6281 nvlist_destroy(nvl); 6282 free(nvlpacked, M_NVLIST); 6283 return (error); 6284 } 6285 6286 static int 6287 pf_clearstates_nv(struct pfioc_nv *nv) 6288 { 6289 struct pf_kstate_kill kill; 6290 nvlist_t *nvl = NULL; 6291 void *nvlpacked = NULL; 6292 int error = 0; 6293 unsigned int killed; 6294 6295 #define ERROUT(x) ERROUT_FUNCTION(on_error, x) 6296 6297 if (nv->len > pf_ioctl_maxcount) 6298 ERROUT(ENOMEM); 6299 6300 nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK); 6301 if (nvlpacked == NULL) 6302 ERROUT(ENOMEM); 6303 6304 error = copyin(nv->data, nvlpacked, nv->len); 6305 if (error) 6306 ERROUT(error); 6307 6308 nvl = nvlist_unpack(nvlpacked, nv->len, 0); 6309 if (nvl == NULL) 6310 ERROUT(EBADMSG); 6311 6312 error = pf_nvstate_kill_to_kstate_kill(nvl, &kill); 6313 if (error) 6314 ERROUT(error); 6315 6316 killed = pf_clear_states(&kill); 6317 6318 free(nvlpacked, M_NVLIST); 6319 nvlpacked = NULL; 6320 nvlist_destroy(nvl); 6321 nvl = nvlist_create(0); 6322 if (nvl == NULL) 6323 ERROUT(ENOMEM); 6324 6325 nvlist_add_number(nvl, "killed", killed); 6326 6327 nvlpacked = nvlist_pack(nvl, &nv->len); 6328 if (nvlpacked == NULL) 6329 ERROUT(ENOMEM); 6330 6331 if (nv->size == 0) 6332 ERROUT(0); 6333 else if (nv->size < nv->len) 6334 ERROUT(ENOSPC); 6335 6336 error = copyout(nvlpacked, nv->data, nv->len); 6337 6338 #undef ERROUT 6339 on_error: 6340 nvlist_destroy(nvl); 6341 free(nvlpacked, M_NVLIST); 6342 return (error); 6343 } 6344 6345 static int 6346 pf_getstate(struct pfioc_nv *nv) 6347 { 6348 nvlist_t *nvl = NULL, *nvls; 6349 void *nvlpacked = NULL; 6350 struct pf_kstate *s = NULL; 6351 int error = 0; 6352 uint64_t id, creatorid; 6353 6354 #define ERROUT(x) ERROUT_FUNCTION(errout, x) 6355 6356 if (nv->len > pf_ioctl_maxcount) 6357 ERROUT(ENOMEM); 6358 6359 nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK); 6360 if (nvlpacked == NULL) 6361 ERROUT(ENOMEM); 6362 6363 error = copyin(nv->data, nvlpacked, nv->len); 6364 if (error) 6365 ERROUT(error); 6366 6367 nvl = nvlist_unpack(nvlpacked, nv->len, 0); 6368 if (nvl == NULL) 6369 ERROUT(EBADMSG); 6370 6371 PFNV_CHK(pf_nvuint64(nvl, "id", &id)); 6372 PFNV_CHK(pf_nvuint64(nvl, "creatorid", &creatorid)); 6373 6374 s = pf_find_state_byid(id, creatorid); 6375 if (s == NULL) 6376 ERROUT(ENOENT); 6377 6378 free(nvlpacked, M_NVLIST); 6379 nvlpacked = NULL; 6380 nvlist_destroy(nvl); 6381 nvl = nvlist_create(0); 6382 if (nvl == NULL) 6383 ERROUT(ENOMEM); 6384 6385 nvls = pf_state_to_nvstate(s); 6386 if (nvls == NULL) 6387 ERROUT(ENOMEM); 6388 6389 nvlist_add_nvlist(nvl, "state", nvls); 6390 nvlist_destroy(nvls); 6391 6392 nvlpacked = nvlist_pack(nvl, &nv->len); 6393 if (nvlpacked == NULL) 6394 ERROUT(ENOMEM); 6395 6396 if (nv->size == 0) 6397 ERROUT(0); 6398 else if (nv->size < nv->len) 6399 ERROUT(ENOSPC); 6400 6401 error = copyout(nvlpacked, nv->data, nv->len); 6402 6403 #undef ERROUT 6404 errout: 6405 if (s != NULL) 6406 PF_STATE_UNLOCK(s); 6407 free(nvlpacked, M_NVLIST); 6408 nvlist_destroy(nvl); 6409 return (error); 6410 } 6411 6412 /* 6413 * XXX - Check for version mismatch!!! 6414 */ 6415 6416 /* 6417 * Duplicate pfctl -Fa operation to get rid of as much as we can. 6418 */ 6419 static int 6420 shutdown_pf(void) 6421 { 6422 int error = 0; 6423 u_int32_t t[5]; 6424 char nn = '\0'; 6425 6426 do { 6427 if ((error = pf_begin_rules(&t[0], PF_RULESET_SCRUB, &nn)) 6428 != 0) { 6429 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: SCRUB\n")); 6430 break; 6431 } 6432 if ((error = pf_begin_rules(&t[1], PF_RULESET_FILTER, &nn)) 6433 != 0) { 6434 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: FILTER\n")); 6435 break; /* XXX: rollback? */ 6436 } 6437 if ((error = pf_begin_rules(&t[2], PF_RULESET_NAT, &nn)) 6438 != 0) { 6439 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: NAT\n")); 6440 break; /* XXX: rollback? */ 6441 } 6442 if ((error = pf_begin_rules(&t[3], PF_RULESET_BINAT, &nn)) 6443 != 0) { 6444 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: BINAT\n")); 6445 break; /* XXX: rollback? */ 6446 } 6447 if ((error = pf_begin_rules(&t[4], PF_RULESET_RDR, &nn)) 6448 != 0) { 6449 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: RDR\n")); 6450 break; /* XXX: rollback? */ 6451 } 6452 6453 /* XXX: these should always succeed here */ 6454 pf_commit_rules(t[0], PF_RULESET_SCRUB, &nn); 6455 pf_commit_rules(t[1], PF_RULESET_FILTER, &nn); 6456 pf_commit_rules(t[2], PF_RULESET_NAT, &nn); 6457 pf_commit_rules(t[3], PF_RULESET_BINAT, &nn); 6458 pf_commit_rules(t[4], PF_RULESET_RDR, &nn); 6459 6460 if ((error = pf_clear_tables()) != 0) 6461 break; 6462 6463 if ((error = pf_begin_eth(&t[0], &nn)) != 0) { 6464 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: eth\n")); 6465 break; 6466 } 6467 pf_commit_eth(t[0], &nn); 6468 6469 #ifdef ALTQ 6470 if ((error = pf_begin_altq(&t[0])) != 0) { 6471 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: ALTQ\n")); 6472 break; 6473 } 6474 pf_commit_altq(t[0]); 6475 #endif 6476 6477 pf_clear_all_states(); 6478 6479 pf_clear_srcnodes(NULL); 6480 6481 /* status does not use malloced mem so no need to cleanup */ 6482 /* fingerprints and interfaces have their own cleanup code */ 6483 } while(0); 6484 6485 return (error); 6486 } 6487 6488 static pfil_return_t 6489 pf_check_return(int chk, struct mbuf **m) 6490 { 6491 6492 switch (chk) { 6493 case PF_PASS: 6494 if (*m == NULL) 6495 return (PFIL_CONSUMED); 6496 else 6497 return (PFIL_PASS); 6498 break; 6499 default: 6500 if (*m != NULL) { 6501 m_freem(*m); 6502 *m = NULL; 6503 } 6504 return (PFIL_DROPPED); 6505 } 6506 } 6507 6508 static pfil_return_t 6509 pf_eth_check_in(struct mbuf **m, struct ifnet *ifp, int flags, 6510 void *ruleset __unused, struct inpcb *inp) 6511 { 6512 int chk; 6513 6514 chk = pf_test_eth(PF_IN, flags, ifp, m, inp); 6515 6516 return (pf_check_return(chk, m)); 6517 } 6518 6519 static pfil_return_t 6520 pf_eth_check_out(struct mbuf **m, struct ifnet *ifp, int flags, 6521 void *ruleset __unused, struct inpcb *inp) 6522 { 6523 int chk; 6524 6525 chk = pf_test_eth(PF_OUT, flags, ifp, m, inp); 6526 6527 return (pf_check_return(chk, m)); 6528 } 6529 6530 #ifdef INET 6531 static pfil_return_t 6532 pf_check_in(struct mbuf **m, struct ifnet *ifp, int flags, 6533 void *ruleset __unused, struct inpcb *inp) 6534 { 6535 int chk; 6536 6537 chk = pf_test(PF_IN, flags, ifp, m, inp, NULL); 6538 6539 return (pf_check_return(chk, m)); 6540 } 6541 6542 static pfil_return_t 6543 pf_check_out(struct mbuf **m, struct ifnet *ifp, int flags, 6544 void *ruleset __unused, struct inpcb *inp) 6545 { 6546 int chk; 6547 6548 chk = pf_test(PF_OUT, flags, ifp, m, inp, NULL); 6549 6550 return (pf_check_return(chk, m)); 6551 } 6552 #endif 6553 6554 #ifdef INET6 6555 static pfil_return_t 6556 pf_check6_in(struct mbuf **m, struct ifnet *ifp, int flags, 6557 void *ruleset __unused, struct inpcb *inp) 6558 { 6559 int chk; 6560 6561 /* 6562 * In case of loopback traffic IPv6 uses the real interface in 6563 * order to support scoped addresses. In order to support stateful 6564 * filtering we have change this to lo0 as it is the case in IPv4. 6565 */ 6566 CURVNET_SET(ifp->if_vnet); 6567 chk = pf_test6(PF_IN, flags, (*m)->m_flags & M_LOOP ? V_loif : ifp, 6568 m, inp, NULL); 6569 CURVNET_RESTORE(); 6570 6571 return (pf_check_return(chk, m)); 6572 } 6573 6574 static pfil_return_t 6575 pf_check6_out(struct mbuf **m, struct ifnet *ifp, int flags, 6576 void *ruleset __unused, struct inpcb *inp) 6577 { 6578 int chk; 6579 6580 CURVNET_SET(ifp->if_vnet); 6581 chk = pf_test6(PF_OUT, flags, ifp, m, inp, NULL); 6582 CURVNET_RESTORE(); 6583 6584 return (pf_check_return(chk, m)); 6585 } 6586 #endif /* INET6 */ 6587 6588 VNET_DEFINE_STATIC(pfil_hook_t, pf_eth_in_hook); 6589 VNET_DEFINE_STATIC(pfil_hook_t, pf_eth_out_hook); 6590 #define V_pf_eth_in_hook VNET(pf_eth_in_hook) 6591 #define V_pf_eth_out_hook VNET(pf_eth_out_hook) 6592 6593 #ifdef INET 6594 VNET_DEFINE_STATIC(pfil_hook_t, pf_ip4_in_hook); 6595 VNET_DEFINE_STATIC(pfil_hook_t, pf_ip4_out_hook); 6596 #define V_pf_ip4_in_hook VNET(pf_ip4_in_hook) 6597 #define V_pf_ip4_out_hook VNET(pf_ip4_out_hook) 6598 #endif 6599 #ifdef INET6 6600 VNET_DEFINE_STATIC(pfil_hook_t, pf_ip6_in_hook); 6601 VNET_DEFINE_STATIC(pfil_hook_t, pf_ip6_out_hook); 6602 #define V_pf_ip6_in_hook VNET(pf_ip6_in_hook) 6603 #define V_pf_ip6_out_hook VNET(pf_ip6_out_hook) 6604 #endif 6605 6606 static void 6607 hook_pf_eth(void) 6608 { 6609 struct pfil_hook_args pha = { 6610 .pa_version = PFIL_VERSION, 6611 .pa_modname = "pf", 6612 .pa_type = PFIL_TYPE_ETHERNET, 6613 }; 6614 struct pfil_link_args pla = { 6615 .pa_version = PFIL_VERSION, 6616 }; 6617 int ret __diagused; 6618 6619 if (atomic_load_bool(&V_pf_pfil_eth_hooked)) 6620 return; 6621 6622 pha.pa_mbuf_chk = pf_eth_check_in; 6623 pha.pa_flags = PFIL_IN; 6624 pha.pa_rulname = "eth-in"; 6625 V_pf_eth_in_hook = pfil_add_hook(&pha); 6626 pla.pa_flags = PFIL_IN | PFIL_HEADPTR | PFIL_HOOKPTR; 6627 pla.pa_head = V_link_pfil_head; 6628 pla.pa_hook = V_pf_eth_in_hook; 6629 ret = pfil_link(&pla); 6630 MPASS(ret == 0); 6631 pha.pa_mbuf_chk = pf_eth_check_out; 6632 pha.pa_flags = PFIL_OUT; 6633 pha.pa_rulname = "eth-out"; 6634 V_pf_eth_out_hook = pfil_add_hook(&pha); 6635 pla.pa_flags = PFIL_OUT | PFIL_HEADPTR | PFIL_HOOKPTR; 6636 pla.pa_head = V_link_pfil_head; 6637 pla.pa_hook = V_pf_eth_out_hook; 6638 ret = pfil_link(&pla); 6639 MPASS(ret == 0); 6640 6641 atomic_store_bool(&V_pf_pfil_eth_hooked, true); 6642 } 6643 6644 static void 6645 hook_pf(void) 6646 { 6647 struct pfil_hook_args pha = { 6648 .pa_version = PFIL_VERSION, 6649 .pa_modname = "pf", 6650 }; 6651 struct pfil_link_args pla = { 6652 .pa_version = PFIL_VERSION, 6653 }; 6654 int ret __diagused; 6655 6656 if (atomic_load_bool(&V_pf_pfil_hooked)) 6657 return; 6658 6659 #ifdef INET 6660 pha.pa_type = PFIL_TYPE_IP4; 6661 pha.pa_mbuf_chk = pf_check_in; 6662 pha.pa_flags = PFIL_IN; 6663 pha.pa_rulname = "default-in"; 6664 V_pf_ip4_in_hook = pfil_add_hook(&pha); 6665 pla.pa_flags = PFIL_IN | PFIL_HEADPTR | PFIL_HOOKPTR; 6666 pla.pa_head = V_inet_pfil_head; 6667 pla.pa_hook = V_pf_ip4_in_hook; 6668 ret = pfil_link(&pla); 6669 MPASS(ret == 0); 6670 pha.pa_mbuf_chk = pf_check_out; 6671 pha.pa_flags = PFIL_OUT; 6672 pha.pa_rulname = "default-out"; 6673 V_pf_ip4_out_hook = pfil_add_hook(&pha); 6674 pla.pa_flags = PFIL_OUT | PFIL_HEADPTR | PFIL_HOOKPTR; 6675 pla.pa_head = V_inet_pfil_head; 6676 pla.pa_hook = V_pf_ip4_out_hook; 6677 ret = pfil_link(&pla); 6678 MPASS(ret == 0); 6679 #endif 6680 #ifdef INET6 6681 pha.pa_type = PFIL_TYPE_IP6; 6682 pha.pa_mbuf_chk = pf_check6_in; 6683 pha.pa_flags = PFIL_IN; 6684 pha.pa_rulname = "default-in6"; 6685 V_pf_ip6_in_hook = pfil_add_hook(&pha); 6686 pla.pa_flags = PFIL_IN | PFIL_HEADPTR | PFIL_HOOKPTR; 6687 pla.pa_head = V_inet6_pfil_head; 6688 pla.pa_hook = V_pf_ip6_in_hook; 6689 ret = pfil_link(&pla); 6690 MPASS(ret == 0); 6691 pha.pa_mbuf_chk = pf_check6_out; 6692 pha.pa_rulname = "default-out6"; 6693 pha.pa_flags = PFIL_OUT; 6694 V_pf_ip6_out_hook = pfil_add_hook(&pha); 6695 pla.pa_flags = PFIL_OUT | PFIL_HEADPTR | PFIL_HOOKPTR; 6696 pla.pa_head = V_inet6_pfil_head; 6697 pla.pa_hook = V_pf_ip6_out_hook; 6698 ret = pfil_link(&pla); 6699 MPASS(ret == 0); 6700 #endif 6701 6702 atomic_store_bool(&V_pf_pfil_hooked, true); 6703 } 6704 6705 static void 6706 dehook_pf_eth(void) 6707 { 6708 6709 if (!atomic_load_bool(&V_pf_pfil_eth_hooked)) 6710 return; 6711 6712 pfil_remove_hook(V_pf_eth_in_hook); 6713 pfil_remove_hook(V_pf_eth_out_hook); 6714 6715 atomic_store_bool(&V_pf_pfil_eth_hooked, false); 6716 } 6717 6718 static void 6719 dehook_pf(void) 6720 { 6721 6722 if (!atomic_load_bool(&V_pf_pfil_hooked)) 6723 return; 6724 6725 #ifdef INET 6726 pfil_remove_hook(V_pf_ip4_in_hook); 6727 pfil_remove_hook(V_pf_ip4_out_hook); 6728 #endif 6729 #ifdef INET6 6730 pfil_remove_hook(V_pf_ip6_in_hook); 6731 pfil_remove_hook(V_pf_ip6_out_hook); 6732 #endif 6733 6734 atomic_store_bool(&V_pf_pfil_hooked, false); 6735 } 6736 6737 static void 6738 pf_load_vnet(void) 6739 { 6740 V_pf_tag_z = uma_zcreate("pf tags", sizeof(struct pf_tagname), 6741 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); 6742 6743 rm_init_flags(&V_pf_rules_lock, "pf rulesets", RM_RECURSE); 6744 sx_init(&V_pf_ioctl_lock, "pf ioctl"); 6745 6746 pf_init_tagset(&V_pf_tags, &pf_rule_tag_hashsize, 6747 PF_RULE_TAG_HASH_SIZE_DEFAULT); 6748 #ifdef ALTQ 6749 pf_init_tagset(&V_pf_qids, &pf_queue_tag_hashsize, 6750 PF_QUEUE_TAG_HASH_SIZE_DEFAULT); 6751 #endif 6752 6753 V_pf_keth = &V_pf_main_keth_anchor.ruleset; 6754 6755 pfattach_vnet(); 6756 V_pf_vnet_active = 1; 6757 } 6758 6759 static int 6760 pf_load(void) 6761 { 6762 int error; 6763 6764 sx_init(&pf_end_lock, "pf end thread"); 6765 6766 pf_mtag_initialize(); 6767 6768 pf_dev = make_dev(&pf_cdevsw, 0, UID_ROOT, GID_WHEEL, 0600, PF_NAME); 6769 if (pf_dev == NULL) 6770 return (ENOMEM); 6771 6772 pf_end_threads = 0; 6773 error = kproc_create(pf_purge_thread, NULL, &pf_purge_proc, 0, 0, "pf purge"); 6774 if (error != 0) 6775 return (error); 6776 6777 pfi_initialize(); 6778 6779 return (0); 6780 } 6781 6782 static void 6783 pf_unload_vnet(void) 6784 { 6785 int ret __diagused; 6786 6787 V_pf_vnet_active = 0; 6788 V_pf_status.running = 0; 6789 dehook_pf(); 6790 dehook_pf_eth(); 6791 6792 PF_RULES_WLOCK(); 6793 pf_syncookies_cleanup(); 6794 shutdown_pf(); 6795 PF_RULES_WUNLOCK(); 6796 6797 /* Make sure we've cleaned up ethernet rules before we continue. */ 6798 NET_EPOCH_DRAIN_CALLBACKS(); 6799 6800 ret = swi_remove(V_pf_swi_cookie); 6801 MPASS(ret == 0); 6802 ret = intr_event_destroy(V_pf_swi_ie); 6803 MPASS(ret == 0); 6804 6805 pf_unload_vnet_purge(); 6806 6807 pf_normalize_cleanup(); 6808 PF_RULES_WLOCK(); 6809 pfi_cleanup_vnet(); 6810 PF_RULES_WUNLOCK(); 6811 pfr_cleanup(); 6812 pf_osfp_flush(); 6813 pf_cleanup(); 6814 if (IS_DEFAULT_VNET(curvnet)) 6815 pf_mtag_cleanup(); 6816 6817 pf_cleanup_tagset(&V_pf_tags); 6818 #ifdef ALTQ 6819 pf_cleanup_tagset(&V_pf_qids); 6820 #endif 6821 uma_zdestroy(V_pf_tag_z); 6822 6823 #ifdef PF_WANT_32_TO_64_COUNTER 6824 PF_RULES_WLOCK(); 6825 LIST_REMOVE(V_pf_kifmarker, pfik_allkiflist); 6826 6827 MPASS(LIST_EMPTY(&V_pf_allkiflist)); 6828 MPASS(V_pf_allkifcount == 0); 6829 6830 LIST_REMOVE(&V_pf_default_rule, allrulelist); 6831 V_pf_allrulecount--; 6832 LIST_REMOVE(V_pf_rulemarker, allrulelist); 6833 6834 /* 6835 * There are known pf rule leaks when running the test suite. 6836 */ 6837 #ifdef notyet 6838 MPASS(LIST_EMPTY(&V_pf_allrulelist)); 6839 MPASS(V_pf_allrulecount == 0); 6840 #endif 6841 6842 PF_RULES_WUNLOCK(); 6843 6844 free(V_pf_kifmarker, PFI_MTYPE); 6845 free(V_pf_rulemarker, M_PFRULE); 6846 #endif 6847 6848 /* Free counters last as we updated them during shutdown. */ 6849 pf_counter_u64_deinit(&V_pf_default_rule.evaluations); 6850 for (int i = 0; i < 2; i++) { 6851 pf_counter_u64_deinit(&V_pf_default_rule.packets[i]); 6852 pf_counter_u64_deinit(&V_pf_default_rule.bytes[i]); 6853 } 6854 counter_u64_free(V_pf_default_rule.states_cur); 6855 counter_u64_free(V_pf_default_rule.states_tot); 6856 counter_u64_free(V_pf_default_rule.src_nodes); 6857 uma_zfree_pcpu(pf_timestamp_pcpu_zone, V_pf_default_rule.timestamp); 6858 6859 for (int i = 0; i < PFRES_MAX; i++) 6860 counter_u64_free(V_pf_status.counters[i]); 6861 for (int i = 0; i < KLCNT_MAX; i++) 6862 counter_u64_free(V_pf_status.lcounters[i]); 6863 for (int i = 0; i < FCNT_MAX; i++) 6864 pf_counter_u64_deinit(&V_pf_status.fcounters[i]); 6865 for (int i = 0; i < SCNT_MAX; i++) 6866 counter_u64_free(V_pf_status.scounters[i]); 6867 6868 rm_destroy(&V_pf_rules_lock); 6869 sx_destroy(&V_pf_ioctl_lock); 6870 } 6871 6872 static void 6873 pf_unload(void) 6874 { 6875 6876 sx_xlock(&pf_end_lock); 6877 pf_end_threads = 1; 6878 while (pf_end_threads < 2) { 6879 wakeup_one(pf_purge_thread); 6880 sx_sleep(pf_purge_proc, &pf_end_lock, 0, "pftmo", 0); 6881 } 6882 sx_xunlock(&pf_end_lock); 6883 6884 if (pf_dev != NULL) 6885 destroy_dev(pf_dev); 6886 6887 pfi_cleanup(); 6888 6889 sx_destroy(&pf_end_lock); 6890 } 6891 6892 static void 6893 vnet_pf_init(void *unused __unused) 6894 { 6895 6896 pf_load_vnet(); 6897 } 6898 VNET_SYSINIT(vnet_pf_init, SI_SUB_PROTO_FIREWALL, SI_ORDER_THIRD, 6899 vnet_pf_init, NULL); 6900 6901 static void 6902 vnet_pf_uninit(const void *unused __unused) 6903 { 6904 6905 pf_unload_vnet(); 6906 } 6907 SYSUNINIT(pf_unload, SI_SUB_PROTO_FIREWALL, SI_ORDER_SECOND, pf_unload, NULL); 6908 VNET_SYSUNINIT(vnet_pf_uninit, SI_SUB_PROTO_FIREWALL, SI_ORDER_THIRD, 6909 vnet_pf_uninit, NULL); 6910 6911 static int 6912 pf_modevent(module_t mod, int type, void *data) 6913 { 6914 int error = 0; 6915 6916 switch(type) { 6917 case MOD_LOAD: 6918 error = pf_load(); 6919 break; 6920 case MOD_UNLOAD: 6921 /* Handled in SYSUNINIT(pf_unload) to ensure it's done after 6922 * the vnet_pf_uninit()s */ 6923 break; 6924 default: 6925 error = EINVAL; 6926 break; 6927 } 6928 6929 return (error); 6930 } 6931 6932 static moduledata_t pf_mod = { 6933 "pf", 6934 pf_modevent, 6935 0 6936 }; 6937 6938 DECLARE_MODULE(pf, pf_mod, SI_SUB_PROTO_FIREWALL, SI_ORDER_SECOND); 6939 MODULE_VERSION(pf, PF_MODVER); 6940