1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2001 Daniel Hartmeier 5 * Copyright (c) 2002,2003 Henning Brauer 6 * Copyright (c) 2012 Gleb Smirnoff <glebius@FreeBSD.org> 7 * All rights reserved. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 13 * - Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * - Redistributions in binary form must reproduce the above 16 * copyright notice, this list of conditions and the following 17 * disclaimer in the documentation and/or other materials provided 18 * with the distribution. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 23 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 24 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 27 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 28 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN 30 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 31 * POSSIBILITY OF SUCH DAMAGE. 32 * 33 * Effort sponsored in part by the Defense Advanced Research Projects 34 * Agency (DARPA) and Air Force Research Laboratory, Air Force 35 * Materiel Command, USAF, under agreement number F30602-01-2-0537. 36 * 37 * $OpenBSD: pf_ioctl.c,v 1.213 2009/02/15 21:46:12 mbalmer Exp $ 38 */ 39 40 #include <sys/cdefs.h> 41 #include "opt_inet.h" 42 #include "opt_inet6.h" 43 #include "opt_bpf.h" 44 #include "opt_pf.h" 45 46 #include <sys/param.h> 47 #include <sys/_bitset.h> 48 #include <sys/bitset.h> 49 #include <sys/bus.h> 50 #include <sys/conf.h> 51 #include <sys/endian.h> 52 #include <sys/fcntl.h> 53 #include <sys/filio.h> 54 #include <sys/hash.h> 55 #include <sys/interrupt.h> 56 #include <sys/jail.h> 57 #include <sys/kernel.h> 58 #include <sys/kthread.h> 59 #include <sys/lock.h> 60 #include <sys/mbuf.h> 61 #include <sys/module.h> 62 #include <sys/nv.h> 63 #include <sys/proc.h> 64 #include <sys/sdt.h> 65 #include <sys/smp.h> 66 #include <sys/socket.h> 67 #include <sys/sysctl.h> 68 #include <sys/md5.h> 69 #include <sys/ucred.h> 70 71 #include <net/if.h> 72 #include <net/if_var.h> 73 #include <net/if_private.h> 74 #include <net/vnet.h> 75 #include <net/route.h> 76 #include <net/pfil.h> 77 #include <net/pfvar.h> 78 #include <net/if_pfsync.h> 79 #include <net/if_pflog.h> 80 81 #include <netinet/in.h> 82 #include <netinet/ip.h> 83 #include <netinet/ip_var.h> 84 #include <netinet6/ip6_var.h> 85 #include <netinet/ip_icmp.h> 86 #include <netpfil/pf/pf_nv.h> 87 88 #ifdef INET6 89 #include <netinet/ip6.h> 90 #endif /* INET6 */ 91 92 #ifdef ALTQ 93 #include <net/altq/altq.h> 94 #endif 95 96 SDT_PROBE_DEFINE3(pf, ioctl, ioctl, error, "int", "int", "int"); 97 SDT_PROBE_DEFINE3(pf, ioctl, function, error, "char *", "int", "int"); 98 SDT_PROBE_DEFINE2(pf, ioctl, addrule, error, "int", "int"); 99 SDT_PROBE_DEFINE2(pf, ioctl, nvchk, error, "int", "int"); 100 101 static struct pf_kpool *pf_get_kpool(const char *, u_int32_t, u_int8_t, 102 u_int32_t, u_int8_t, u_int8_t, u_int8_t); 103 104 static void pf_mv_kpool(struct pf_kpalist *, struct pf_kpalist *); 105 static void pf_empty_kpool(struct pf_kpalist *); 106 static int pfioctl(struct cdev *, u_long, caddr_t, int, 107 struct thread *); 108 static int pf_begin_eth(uint32_t *, const char *); 109 static void pf_rollback_eth_cb(struct epoch_context *); 110 static int pf_rollback_eth(uint32_t, const char *); 111 static int pf_commit_eth(uint32_t, const char *); 112 static void pf_free_eth_rule(struct pf_keth_rule *); 113 #ifdef ALTQ 114 static int pf_begin_altq(u_int32_t *); 115 static int pf_rollback_altq(u_int32_t); 116 static int pf_commit_altq(u_int32_t); 117 static int pf_enable_altq(struct pf_altq *); 118 static int pf_disable_altq(struct pf_altq *); 119 static uint16_t pf_qname2qid(const char *); 120 static void pf_qid_unref(uint16_t); 121 #endif /* ALTQ */ 122 static int pf_begin_rules(u_int32_t *, int, const char *); 123 static int pf_rollback_rules(u_int32_t, int, char *); 124 static int pf_setup_pfsync_matching(struct pf_kruleset *); 125 static void pf_hash_rule_rolling(MD5_CTX *, struct pf_krule *); 126 static void pf_hash_rule(struct pf_krule *); 127 static void pf_hash_rule_addr(MD5_CTX *, struct pf_rule_addr *); 128 static int pf_commit_rules(u_int32_t, int, char *); 129 static int pf_addr_setup(struct pf_kruleset *, 130 struct pf_addr_wrap *, sa_family_t); 131 static void pf_addr_copyout(struct pf_addr_wrap *); 132 static void pf_src_node_copy(const struct pf_ksrc_node *, 133 struct pf_src_node *); 134 #ifdef ALTQ 135 static int pf_export_kaltq(struct pf_altq *, 136 struct pfioc_altq_v1 *, size_t); 137 static int pf_import_kaltq(struct pfioc_altq_v1 *, 138 struct pf_altq *, size_t); 139 #endif /* ALTQ */ 140 141 VNET_DEFINE(struct pf_krule, pf_default_rule); 142 143 static __inline int pf_krule_compare(struct pf_krule *, 144 struct pf_krule *); 145 146 RB_GENERATE(pf_krule_global, pf_krule, entry_global, pf_krule_compare); 147 148 #ifdef ALTQ 149 VNET_DEFINE_STATIC(int, pf_altq_running); 150 #define V_pf_altq_running VNET(pf_altq_running) 151 #endif 152 153 #define TAGID_MAX 50000 154 struct pf_tagname { 155 TAILQ_ENTRY(pf_tagname) namehash_entries; 156 TAILQ_ENTRY(pf_tagname) taghash_entries; 157 char name[PF_TAG_NAME_SIZE]; 158 uint16_t tag; 159 int ref; 160 }; 161 162 struct pf_tagset { 163 TAILQ_HEAD(, pf_tagname) *namehash; 164 TAILQ_HEAD(, pf_tagname) *taghash; 165 unsigned int mask; 166 uint32_t seed; 167 BITSET_DEFINE(, TAGID_MAX) avail; 168 }; 169 170 VNET_DEFINE(struct pf_tagset, pf_tags); 171 #define V_pf_tags VNET(pf_tags) 172 static unsigned int pf_rule_tag_hashsize; 173 #define PF_RULE_TAG_HASH_SIZE_DEFAULT 128 174 SYSCTL_UINT(_net_pf, OID_AUTO, rule_tag_hashsize, CTLFLAG_RDTUN, 175 &pf_rule_tag_hashsize, PF_RULE_TAG_HASH_SIZE_DEFAULT, 176 "Size of pf(4) rule tag hashtable"); 177 178 #ifdef ALTQ 179 VNET_DEFINE(struct pf_tagset, pf_qids); 180 #define V_pf_qids VNET(pf_qids) 181 static unsigned int pf_queue_tag_hashsize; 182 #define PF_QUEUE_TAG_HASH_SIZE_DEFAULT 128 183 SYSCTL_UINT(_net_pf, OID_AUTO, queue_tag_hashsize, CTLFLAG_RDTUN, 184 &pf_queue_tag_hashsize, PF_QUEUE_TAG_HASH_SIZE_DEFAULT, 185 "Size of pf(4) queue tag hashtable"); 186 #endif 187 VNET_DEFINE(uma_zone_t, pf_tag_z); 188 #define V_pf_tag_z VNET(pf_tag_z) 189 static MALLOC_DEFINE(M_PFALTQ, "pf_altq", "pf(4) altq configuration db"); 190 static MALLOC_DEFINE(M_PFRULE, "pf_rule", "pf(4) rules"); 191 192 #if (PF_QNAME_SIZE != PF_TAG_NAME_SIZE) 193 #error PF_QNAME_SIZE must be equal to PF_TAG_NAME_SIZE 194 #endif 195 196 VNET_DEFINE_STATIC(bool, pf_filter_local) = false; 197 #define V_pf_filter_local VNET(pf_filter_local) 198 SYSCTL_BOOL(_net_pf, OID_AUTO, filter_local, CTLFLAG_VNET | CTLFLAG_RW, 199 &VNET_NAME(pf_filter_local), false, 200 "Enable filtering for packets delivered to local network stack"); 201 202 static void pf_init_tagset(struct pf_tagset *, unsigned int *, 203 unsigned int); 204 static void pf_cleanup_tagset(struct pf_tagset *); 205 static uint16_t tagname2hashindex(const struct pf_tagset *, const char *); 206 static uint16_t tag2hashindex(const struct pf_tagset *, uint16_t); 207 static u_int16_t tagname2tag(struct pf_tagset *, const char *); 208 static u_int16_t pf_tagname2tag(const char *); 209 static void tag_unref(struct pf_tagset *, u_int16_t); 210 211 #define DPFPRINTF(n, x) if (V_pf_status.debug >= (n)) printf x 212 213 struct cdev *pf_dev; 214 215 /* 216 * XXX - These are new and need to be checked when moveing to a new version 217 */ 218 static void pf_clear_all_states(void); 219 static unsigned int pf_clear_states(const struct pf_kstate_kill *); 220 static void pf_killstates(struct pf_kstate_kill *, 221 unsigned int *); 222 static int pf_killstates_row(struct pf_kstate_kill *, 223 struct pf_idhash *); 224 static int pf_killstates_nv(struct pfioc_nv *); 225 static int pf_clearstates_nv(struct pfioc_nv *); 226 static int pf_getstate(struct pfioc_nv *); 227 static int pf_getstatus(struct pfioc_nv *); 228 static int pf_clear_tables(void); 229 static void pf_clear_srcnodes(struct pf_ksrc_node *); 230 static void pf_kill_srcnodes(struct pfioc_src_node_kill *); 231 static int pf_keepcounters(struct pfioc_nv *); 232 static void pf_tbladdr_copyout(struct pf_addr_wrap *); 233 234 /* 235 * Wrapper functions for pfil(9) hooks 236 */ 237 static pfil_return_t pf_eth_check_in(struct mbuf **m, struct ifnet *ifp, 238 int flags, void *ruleset __unused, struct inpcb *inp); 239 static pfil_return_t pf_eth_check_out(struct mbuf **m, struct ifnet *ifp, 240 int flags, void *ruleset __unused, struct inpcb *inp); 241 #ifdef INET 242 static pfil_return_t pf_check_in(struct mbuf **m, struct ifnet *ifp, 243 int flags, void *ruleset __unused, struct inpcb *inp); 244 static pfil_return_t pf_check_out(struct mbuf **m, struct ifnet *ifp, 245 int flags, void *ruleset __unused, struct inpcb *inp); 246 #endif 247 #ifdef INET6 248 static pfil_return_t pf_check6_in(struct mbuf **m, struct ifnet *ifp, 249 int flags, void *ruleset __unused, struct inpcb *inp); 250 static pfil_return_t pf_check6_out(struct mbuf **m, struct ifnet *ifp, 251 int flags, void *ruleset __unused, struct inpcb *inp); 252 #endif 253 254 static void hook_pf_eth(void); 255 static void hook_pf(void); 256 static void dehook_pf_eth(void); 257 static void dehook_pf(void); 258 static int shutdown_pf(void); 259 static int pf_load(void); 260 static void pf_unload(void); 261 262 static struct cdevsw pf_cdevsw = { 263 .d_ioctl = pfioctl, 264 .d_name = PF_NAME, 265 .d_version = D_VERSION, 266 }; 267 268 VNET_DEFINE_STATIC(bool, pf_pfil_hooked); 269 #define V_pf_pfil_hooked VNET(pf_pfil_hooked) 270 VNET_DEFINE_STATIC(bool, pf_pfil_eth_hooked); 271 #define V_pf_pfil_eth_hooked VNET(pf_pfil_eth_hooked) 272 273 /* 274 * We need a flag that is neither hooked nor running to know when 275 * the VNET is "valid". We primarily need this to control (global) 276 * external event, e.g., eventhandlers. 277 */ 278 VNET_DEFINE(int, pf_vnet_active); 279 #define V_pf_vnet_active VNET(pf_vnet_active) 280 281 int pf_end_threads; 282 struct proc *pf_purge_proc; 283 284 VNET_DEFINE(struct rmlock, pf_rules_lock); 285 VNET_DEFINE_STATIC(struct sx, pf_ioctl_lock); 286 #define V_pf_ioctl_lock VNET(pf_ioctl_lock) 287 struct sx pf_end_lock; 288 289 /* pfsync */ 290 VNET_DEFINE(pfsync_state_import_t *, pfsync_state_import_ptr); 291 VNET_DEFINE(pfsync_insert_state_t *, pfsync_insert_state_ptr); 292 VNET_DEFINE(pfsync_update_state_t *, pfsync_update_state_ptr); 293 VNET_DEFINE(pfsync_delete_state_t *, pfsync_delete_state_ptr); 294 VNET_DEFINE(pfsync_clear_states_t *, pfsync_clear_states_ptr); 295 VNET_DEFINE(pfsync_defer_t *, pfsync_defer_ptr); 296 pfsync_detach_ifnet_t *pfsync_detach_ifnet_ptr; 297 298 /* pflog */ 299 pflog_packet_t *pflog_packet_ptr = NULL; 300 301 /* 302 * Copy a user-provided string, returning an error if truncation would occur. 303 * Avoid scanning past "sz" bytes in the source string since there's no 304 * guarantee that it's nul-terminated. 305 */ 306 static int 307 pf_user_strcpy(char *dst, const char *src, size_t sz) 308 { 309 if (strnlen(src, sz) == sz) 310 return (EINVAL); 311 (void)strlcpy(dst, src, sz); 312 return (0); 313 } 314 315 static void 316 pfattach_vnet(void) 317 { 318 u_int32_t *my_timeout = V_pf_default_rule.timeout; 319 320 bzero(&V_pf_status, sizeof(V_pf_status)); 321 322 pf_initialize(); 323 pfr_initialize(); 324 pfi_initialize_vnet(); 325 pf_normalize_init(); 326 pf_syncookies_init(); 327 328 V_pf_limits[PF_LIMIT_STATES].limit = PFSTATE_HIWAT; 329 V_pf_limits[PF_LIMIT_SRC_NODES].limit = PFSNODE_HIWAT; 330 331 RB_INIT(&V_pf_anchors); 332 pf_init_kruleset(&pf_main_ruleset); 333 334 pf_init_keth(V_pf_keth); 335 336 /* default rule should never be garbage collected */ 337 V_pf_default_rule.entries.tqe_prev = &V_pf_default_rule.entries.tqe_next; 338 #ifdef PF_DEFAULT_TO_DROP 339 V_pf_default_rule.action = PF_DROP; 340 #else 341 V_pf_default_rule.action = PF_PASS; 342 #endif 343 V_pf_default_rule.nr = -1; 344 V_pf_default_rule.rtableid = -1; 345 346 pf_counter_u64_init(&V_pf_default_rule.evaluations, M_WAITOK); 347 for (int i = 0; i < 2; i++) { 348 pf_counter_u64_init(&V_pf_default_rule.packets[i], M_WAITOK); 349 pf_counter_u64_init(&V_pf_default_rule.bytes[i], M_WAITOK); 350 } 351 V_pf_default_rule.states_cur = counter_u64_alloc(M_WAITOK); 352 V_pf_default_rule.states_tot = counter_u64_alloc(M_WAITOK); 353 V_pf_default_rule.src_nodes = counter_u64_alloc(M_WAITOK); 354 355 V_pf_default_rule.timestamp = uma_zalloc_pcpu(pf_timestamp_pcpu_zone, 356 M_WAITOK | M_ZERO); 357 358 #ifdef PF_WANT_32_TO_64_COUNTER 359 V_pf_kifmarker = malloc(sizeof(*V_pf_kifmarker), PFI_MTYPE, M_WAITOK | M_ZERO); 360 V_pf_rulemarker = malloc(sizeof(*V_pf_rulemarker), M_PFRULE, M_WAITOK | M_ZERO); 361 PF_RULES_WLOCK(); 362 LIST_INSERT_HEAD(&V_pf_allkiflist, V_pf_kifmarker, pfik_allkiflist); 363 LIST_INSERT_HEAD(&V_pf_allrulelist, &V_pf_default_rule, allrulelist); 364 V_pf_allrulecount++; 365 LIST_INSERT_HEAD(&V_pf_allrulelist, V_pf_rulemarker, allrulelist); 366 PF_RULES_WUNLOCK(); 367 #endif 368 369 /* initialize default timeouts */ 370 my_timeout[PFTM_TCP_FIRST_PACKET] = PFTM_TCP_FIRST_PACKET_VAL; 371 my_timeout[PFTM_TCP_OPENING] = PFTM_TCP_OPENING_VAL; 372 my_timeout[PFTM_TCP_ESTABLISHED] = PFTM_TCP_ESTABLISHED_VAL; 373 my_timeout[PFTM_TCP_CLOSING] = PFTM_TCP_CLOSING_VAL; 374 my_timeout[PFTM_TCP_FIN_WAIT] = PFTM_TCP_FIN_WAIT_VAL; 375 my_timeout[PFTM_TCP_CLOSED] = PFTM_TCP_CLOSED_VAL; 376 my_timeout[PFTM_UDP_FIRST_PACKET] = PFTM_UDP_FIRST_PACKET_VAL; 377 my_timeout[PFTM_UDP_SINGLE] = PFTM_UDP_SINGLE_VAL; 378 my_timeout[PFTM_UDP_MULTIPLE] = PFTM_UDP_MULTIPLE_VAL; 379 my_timeout[PFTM_ICMP_FIRST_PACKET] = PFTM_ICMP_FIRST_PACKET_VAL; 380 my_timeout[PFTM_ICMP_ERROR_REPLY] = PFTM_ICMP_ERROR_REPLY_VAL; 381 my_timeout[PFTM_OTHER_FIRST_PACKET] = PFTM_OTHER_FIRST_PACKET_VAL; 382 my_timeout[PFTM_OTHER_SINGLE] = PFTM_OTHER_SINGLE_VAL; 383 my_timeout[PFTM_OTHER_MULTIPLE] = PFTM_OTHER_MULTIPLE_VAL; 384 my_timeout[PFTM_FRAG] = PFTM_FRAG_VAL; 385 my_timeout[PFTM_INTERVAL] = PFTM_INTERVAL_VAL; 386 my_timeout[PFTM_SRC_NODE] = PFTM_SRC_NODE_VAL; 387 my_timeout[PFTM_TS_DIFF] = PFTM_TS_DIFF_VAL; 388 my_timeout[PFTM_ADAPTIVE_START] = PFSTATE_ADAPT_START; 389 my_timeout[PFTM_ADAPTIVE_END] = PFSTATE_ADAPT_END; 390 391 V_pf_status.debug = PF_DEBUG_URGENT; 392 /* 393 * XXX This is different than in OpenBSD where reassembly is enabled by 394 * defult. In FreeBSD we expect people to still use scrub rules and 395 * switch to the new syntax later. Only when they switch they must 396 * explicitly enable reassemle. We could change the default once the 397 * scrub rule functionality is hopefully removed some day in future. 398 */ 399 V_pf_status.reass = 0; 400 401 V_pf_pfil_hooked = false; 402 V_pf_pfil_eth_hooked = false; 403 404 /* XXX do our best to avoid a conflict */ 405 V_pf_status.hostid = arc4random(); 406 407 for (int i = 0; i < PFRES_MAX; i++) 408 V_pf_status.counters[i] = counter_u64_alloc(M_WAITOK); 409 for (int i = 0; i < KLCNT_MAX; i++) 410 V_pf_status.lcounters[i] = counter_u64_alloc(M_WAITOK); 411 for (int i = 0; i < FCNT_MAX; i++) 412 pf_counter_u64_init(&V_pf_status.fcounters[i], M_WAITOK); 413 for (int i = 0; i < SCNT_MAX; i++) 414 V_pf_status.scounters[i] = counter_u64_alloc(M_WAITOK); 415 416 if (swi_add(&V_pf_swi_ie, "pf send", pf_intr, curvnet, SWI_NET, 417 INTR_MPSAFE, &V_pf_swi_cookie) != 0) 418 /* XXXGL: leaked all above. */ 419 return; 420 } 421 422 static struct pf_kpool * 423 pf_get_kpool(const char *anchor, u_int32_t ticket, u_int8_t rule_action, 424 u_int32_t rule_number, u_int8_t r_last, u_int8_t active, 425 u_int8_t check_ticket) 426 { 427 struct pf_kruleset *ruleset; 428 struct pf_krule *rule; 429 int rs_num; 430 431 ruleset = pf_find_kruleset(anchor); 432 if (ruleset == NULL) 433 return (NULL); 434 rs_num = pf_get_ruleset_number(rule_action); 435 if (rs_num >= PF_RULESET_MAX) 436 return (NULL); 437 if (active) { 438 if (check_ticket && ticket != 439 ruleset->rules[rs_num].active.ticket) 440 return (NULL); 441 if (r_last) 442 rule = TAILQ_LAST(ruleset->rules[rs_num].active.ptr, 443 pf_krulequeue); 444 else 445 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr); 446 } else { 447 if (check_ticket && ticket != 448 ruleset->rules[rs_num].inactive.ticket) 449 return (NULL); 450 if (r_last) 451 rule = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr, 452 pf_krulequeue); 453 else 454 rule = TAILQ_FIRST(ruleset->rules[rs_num].inactive.ptr); 455 } 456 if (!r_last) { 457 while ((rule != NULL) && (rule->nr != rule_number)) 458 rule = TAILQ_NEXT(rule, entries); 459 } 460 if (rule == NULL) 461 return (NULL); 462 463 return (&rule->rpool); 464 } 465 466 static void 467 pf_mv_kpool(struct pf_kpalist *poola, struct pf_kpalist *poolb) 468 { 469 struct pf_kpooladdr *mv_pool_pa; 470 471 while ((mv_pool_pa = TAILQ_FIRST(poola)) != NULL) { 472 TAILQ_REMOVE(poola, mv_pool_pa, entries); 473 TAILQ_INSERT_TAIL(poolb, mv_pool_pa, entries); 474 } 475 } 476 477 static void 478 pf_empty_kpool(struct pf_kpalist *poola) 479 { 480 struct pf_kpooladdr *pa; 481 482 while ((pa = TAILQ_FIRST(poola)) != NULL) { 483 switch (pa->addr.type) { 484 case PF_ADDR_DYNIFTL: 485 pfi_dynaddr_remove(pa->addr.p.dyn); 486 break; 487 case PF_ADDR_TABLE: 488 /* XXX: this could be unfinished pooladdr on pabuf */ 489 if (pa->addr.p.tbl != NULL) 490 pfr_detach_table(pa->addr.p.tbl); 491 break; 492 } 493 if (pa->kif) 494 pfi_kkif_unref(pa->kif); 495 TAILQ_REMOVE(poola, pa, entries); 496 free(pa, M_PFRULE); 497 } 498 } 499 500 static void 501 pf_unlink_rule_locked(struct pf_krulequeue *rulequeue, struct pf_krule *rule) 502 { 503 504 PF_RULES_WASSERT(); 505 PF_UNLNKDRULES_ASSERT(); 506 507 TAILQ_REMOVE(rulequeue, rule, entries); 508 509 rule->rule_ref |= PFRULE_REFS; 510 TAILQ_INSERT_TAIL(&V_pf_unlinked_rules, rule, entries); 511 } 512 513 static void 514 pf_unlink_rule(struct pf_krulequeue *rulequeue, struct pf_krule *rule) 515 { 516 517 PF_RULES_WASSERT(); 518 519 PF_UNLNKDRULES_LOCK(); 520 pf_unlink_rule_locked(rulequeue, rule); 521 PF_UNLNKDRULES_UNLOCK(); 522 } 523 524 static void 525 pf_free_eth_rule(struct pf_keth_rule *rule) 526 { 527 PF_RULES_WASSERT(); 528 529 if (rule == NULL) 530 return; 531 532 if (rule->tag) 533 tag_unref(&V_pf_tags, rule->tag); 534 if (rule->match_tag) 535 tag_unref(&V_pf_tags, rule->match_tag); 536 #ifdef ALTQ 537 pf_qid_unref(rule->qid); 538 #endif 539 540 if (rule->bridge_to) 541 pfi_kkif_unref(rule->bridge_to); 542 if (rule->kif) 543 pfi_kkif_unref(rule->kif); 544 545 if (rule->ipsrc.addr.type == PF_ADDR_TABLE) 546 pfr_detach_table(rule->ipsrc.addr.p.tbl); 547 if (rule->ipdst.addr.type == PF_ADDR_TABLE) 548 pfr_detach_table(rule->ipdst.addr.p.tbl); 549 550 counter_u64_free(rule->evaluations); 551 for (int i = 0; i < 2; i++) { 552 counter_u64_free(rule->packets[i]); 553 counter_u64_free(rule->bytes[i]); 554 } 555 uma_zfree_pcpu(pf_timestamp_pcpu_zone, rule->timestamp); 556 pf_keth_anchor_remove(rule); 557 558 free(rule, M_PFRULE); 559 } 560 561 void 562 pf_free_rule(struct pf_krule *rule) 563 { 564 565 PF_RULES_WASSERT(); 566 PF_CONFIG_ASSERT(); 567 568 if (rule->tag) 569 tag_unref(&V_pf_tags, rule->tag); 570 if (rule->match_tag) 571 tag_unref(&V_pf_tags, rule->match_tag); 572 #ifdef ALTQ 573 if (rule->pqid != rule->qid) 574 pf_qid_unref(rule->pqid); 575 pf_qid_unref(rule->qid); 576 #endif 577 switch (rule->src.addr.type) { 578 case PF_ADDR_DYNIFTL: 579 pfi_dynaddr_remove(rule->src.addr.p.dyn); 580 break; 581 case PF_ADDR_TABLE: 582 pfr_detach_table(rule->src.addr.p.tbl); 583 break; 584 } 585 switch (rule->dst.addr.type) { 586 case PF_ADDR_DYNIFTL: 587 pfi_dynaddr_remove(rule->dst.addr.p.dyn); 588 break; 589 case PF_ADDR_TABLE: 590 pfr_detach_table(rule->dst.addr.p.tbl); 591 break; 592 } 593 if (rule->overload_tbl) 594 pfr_detach_table(rule->overload_tbl); 595 if (rule->kif) 596 pfi_kkif_unref(rule->kif); 597 pf_kanchor_remove(rule); 598 pf_empty_kpool(&rule->rpool.list); 599 600 pf_krule_free(rule); 601 } 602 603 static void 604 pf_init_tagset(struct pf_tagset *ts, unsigned int *tunable_size, 605 unsigned int default_size) 606 { 607 unsigned int i; 608 unsigned int hashsize; 609 610 if (*tunable_size == 0 || !powerof2(*tunable_size)) 611 *tunable_size = default_size; 612 613 hashsize = *tunable_size; 614 ts->namehash = mallocarray(hashsize, sizeof(*ts->namehash), M_PFHASH, 615 M_WAITOK); 616 ts->taghash = mallocarray(hashsize, sizeof(*ts->taghash), M_PFHASH, 617 M_WAITOK); 618 ts->mask = hashsize - 1; 619 ts->seed = arc4random(); 620 for (i = 0; i < hashsize; i++) { 621 TAILQ_INIT(&ts->namehash[i]); 622 TAILQ_INIT(&ts->taghash[i]); 623 } 624 BIT_FILL(TAGID_MAX, &ts->avail); 625 } 626 627 static void 628 pf_cleanup_tagset(struct pf_tagset *ts) 629 { 630 unsigned int i; 631 unsigned int hashsize; 632 struct pf_tagname *t, *tmp; 633 634 /* 635 * Only need to clean up one of the hashes as each tag is hashed 636 * into each table. 637 */ 638 hashsize = ts->mask + 1; 639 for (i = 0; i < hashsize; i++) 640 TAILQ_FOREACH_SAFE(t, &ts->namehash[i], namehash_entries, tmp) 641 uma_zfree(V_pf_tag_z, t); 642 643 free(ts->namehash, M_PFHASH); 644 free(ts->taghash, M_PFHASH); 645 } 646 647 static uint16_t 648 tagname2hashindex(const struct pf_tagset *ts, const char *tagname) 649 { 650 size_t len; 651 652 len = strnlen(tagname, PF_TAG_NAME_SIZE - 1); 653 return (murmur3_32_hash(tagname, len, ts->seed) & ts->mask); 654 } 655 656 static uint16_t 657 tag2hashindex(const struct pf_tagset *ts, uint16_t tag) 658 { 659 660 return (tag & ts->mask); 661 } 662 663 static u_int16_t 664 tagname2tag(struct pf_tagset *ts, const char *tagname) 665 { 666 struct pf_tagname *tag; 667 u_int32_t index; 668 u_int16_t new_tagid; 669 670 PF_RULES_WASSERT(); 671 672 index = tagname2hashindex(ts, tagname); 673 TAILQ_FOREACH(tag, &ts->namehash[index], namehash_entries) 674 if (strcmp(tagname, tag->name) == 0) { 675 tag->ref++; 676 return (tag->tag); 677 } 678 679 /* 680 * new entry 681 * 682 * to avoid fragmentation, we do a linear search from the beginning 683 * and take the first free slot we find. 684 */ 685 new_tagid = BIT_FFS(TAGID_MAX, &ts->avail); 686 /* 687 * Tags are 1-based, with valid tags in the range [1..TAGID_MAX]. 688 * BIT_FFS() returns a 1-based bit number, with 0 indicating no bits 689 * set. It may also return a bit number greater than TAGID_MAX due 690 * to rounding of the number of bits in the vector up to a multiple 691 * of the vector word size at declaration/allocation time. 692 */ 693 if ((new_tagid == 0) || (new_tagid > TAGID_MAX)) 694 return (0); 695 696 /* Mark the tag as in use. Bits are 0-based for BIT_CLR() */ 697 BIT_CLR(TAGID_MAX, new_tagid - 1, &ts->avail); 698 699 /* allocate and fill new struct pf_tagname */ 700 tag = uma_zalloc(V_pf_tag_z, M_NOWAIT); 701 if (tag == NULL) 702 return (0); 703 strlcpy(tag->name, tagname, sizeof(tag->name)); 704 tag->tag = new_tagid; 705 tag->ref = 1; 706 707 /* Insert into namehash */ 708 TAILQ_INSERT_TAIL(&ts->namehash[index], tag, namehash_entries); 709 710 /* Insert into taghash */ 711 index = tag2hashindex(ts, new_tagid); 712 TAILQ_INSERT_TAIL(&ts->taghash[index], tag, taghash_entries); 713 714 return (tag->tag); 715 } 716 717 static void 718 tag_unref(struct pf_tagset *ts, u_int16_t tag) 719 { 720 struct pf_tagname *t; 721 uint16_t index; 722 723 PF_RULES_WASSERT(); 724 725 index = tag2hashindex(ts, tag); 726 TAILQ_FOREACH(t, &ts->taghash[index], taghash_entries) 727 if (tag == t->tag) { 728 if (--t->ref == 0) { 729 TAILQ_REMOVE(&ts->taghash[index], t, 730 taghash_entries); 731 index = tagname2hashindex(ts, t->name); 732 TAILQ_REMOVE(&ts->namehash[index], t, 733 namehash_entries); 734 /* Bits are 0-based for BIT_SET() */ 735 BIT_SET(TAGID_MAX, tag - 1, &ts->avail); 736 uma_zfree(V_pf_tag_z, t); 737 } 738 break; 739 } 740 } 741 742 static uint16_t 743 pf_tagname2tag(const char *tagname) 744 { 745 return (tagname2tag(&V_pf_tags, tagname)); 746 } 747 748 static int 749 pf_begin_eth(uint32_t *ticket, const char *anchor) 750 { 751 struct pf_keth_rule *rule, *tmp; 752 struct pf_keth_ruleset *rs; 753 754 PF_RULES_WASSERT(); 755 756 rs = pf_find_or_create_keth_ruleset(anchor); 757 if (rs == NULL) 758 return (EINVAL); 759 760 /* Purge old inactive rules. */ 761 TAILQ_FOREACH_SAFE(rule, rs->inactive.rules, entries, 762 tmp) { 763 TAILQ_REMOVE(rs->inactive.rules, rule, 764 entries); 765 pf_free_eth_rule(rule); 766 } 767 768 *ticket = ++rs->inactive.ticket; 769 rs->inactive.open = 1; 770 771 return (0); 772 } 773 774 static void 775 pf_rollback_eth_cb(struct epoch_context *ctx) 776 { 777 struct pf_keth_ruleset *rs; 778 779 rs = __containerof(ctx, struct pf_keth_ruleset, epoch_ctx); 780 781 CURVNET_SET(rs->vnet); 782 783 PF_RULES_WLOCK(); 784 pf_rollback_eth(rs->inactive.ticket, 785 rs->anchor ? rs->anchor->path : ""); 786 PF_RULES_WUNLOCK(); 787 788 CURVNET_RESTORE(); 789 } 790 791 static int 792 pf_rollback_eth(uint32_t ticket, const char *anchor) 793 { 794 struct pf_keth_rule *rule, *tmp; 795 struct pf_keth_ruleset *rs; 796 797 PF_RULES_WASSERT(); 798 799 rs = pf_find_keth_ruleset(anchor); 800 if (rs == NULL) 801 return (EINVAL); 802 803 if (!rs->inactive.open || 804 ticket != rs->inactive.ticket) 805 return (0); 806 807 /* Purge old inactive rules. */ 808 TAILQ_FOREACH_SAFE(rule, rs->inactive.rules, entries, 809 tmp) { 810 TAILQ_REMOVE(rs->inactive.rules, rule, entries); 811 pf_free_eth_rule(rule); 812 } 813 814 rs->inactive.open = 0; 815 816 pf_remove_if_empty_keth_ruleset(rs); 817 818 return (0); 819 } 820 821 #define PF_SET_SKIP_STEPS(i) \ 822 do { \ 823 while (head[i] != cur) { \ 824 head[i]->skip[i].ptr = cur; \ 825 head[i] = TAILQ_NEXT(head[i], entries); \ 826 } \ 827 } while (0) 828 829 static void 830 pf_eth_calc_skip_steps(struct pf_keth_ruleq *rules) 831 { 832 struct pf_keth_rule *cur, *prev, *head[PFE_SKIP_COUNT]; 833 int i; 834 835 cur = TAILQ_FIRST(rules); 836 prev = cur; 837 for (i = 0; i < PFE_SKIP_COUNT; ++i) 838 head[i] = cur; 839 while (cur != NULL) { 840 if (cur->kif != prev->kif || cur->ifnot != prev->ifnot) 841 PF_SET_SKIP_STEPS(PFE_SKIP_IFP); 842 if (cur->direction != prev->direction) 843 PF_SET_SKIP_STEPS(PFE_SKIP_DIR); 844 if (cur->proto != prev->proto) 845 PF_SET_SKIP_STEPS(PFE_SKIP_PROTO); 846 if (memcmp(&cur->src, &prev->src, sizeof(cur->src)) != 0) 847 PF_SET_SKIP_STEPS(PFE_SKIP_SRC_ADDR); 848 if (memcmp(&cur->dst, &prev->dst, sizeof(cur->dst)) != 0) 849 PF_SET_SKIP_STEPS(PFE_SKIP_DST_ADDR); 850 if (cur->ipsrc.neg != prev->ipsrc.neg || 851 pf_addr_wrap_neq(&cur->ipsrc.addr, &prev->ipsrc.addr)) 852 PF_SET_SKIP_STEPS(PFE_SKIP_SRC_IP_ADDR); 853 if (cur->ipdst.neg != prev->ipdst.neg || 854 pf_addr_wrap_neq(&cur->ipdst.addr, &prev->ipdst.addr)) 855 PF_SET_SKIP_STEPS(PFE_SKIP_DST_IP_ADDR); 856 857 prev = cur; 858 cur = TAILQ_NEXT(cur, entries); 859 } 860 for (i = 0; i < PFE_SKIP_COUNT; ++i) 861 PF_SET_SKIP_STEPS(i); 862 } 863 864 static int 865 pf_commit_eth(uint32_t ticket, const char *anchor) 866 { 867 struct pf_keth_ruleq *rules; 868 struct pf_keth_ruleset *rs; 869 870 rs = pf_find_keth_ruleset(anchor); 871 if (rs == NULL) { 872 return (EINVAL); 873 } 874 875 if (!rs->inactive.open || 876 ticket != rs->inactive.ticket) 877 return (EBUSY); 878 879 PF_RULES_WASSERT(); 880 881 pf_eth_calc_skip_steps(rs->inactive.rules); 882 883 rules = rs->active.rules; 884 ck_pr_store_ptr(&rs->active.rules, rs->inactive.rules); 885 rs->inactive.rules = rules; 886 rs->inactive.ticket = rs->active.ticket; 887 888 /* Clean up inactive rules (i.e. previously active rules), only when 889 * we're sure they're no longer used. */ 890 NET_EPOCH_CALL(pf_rollback_eth_cb, &rs->epoch_ctx); 891 892 return (0); 893 } 894 895 #ifdef ALTQ 896 static uint16_t 897 pf_qname2qid(const char *qname) 898 { 899 return (tagname2tag(&V_pf_qids, qname)); 900 } 901 902 static void 903 pf_qid_unref(uint16_t qid) 904 { 905 tag_unref(&V_pf_qids, qid); 906 } 907 908 static int 909 pf_begin_altq(u_int32_t *ticket) 910 { 911 struct pf_altq *altq, *tmp; 912 int error = 0; 913 914 PF_RULES_WASSERT(); 915 916 /* Purge the old altq lists */ 917 TAILQ_FOREACH_SAFE(altq, V_pf_altq_ifs_inactive, entries, tmp) { 918 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) { 919 /* detach and destroy the discipline */ 920 error = altq_remove(altq); 921 } 922 free(altq, M_PFALTQ); 923 } 924 TAILQ_INIT(V_pf_altq_ifs_inactive); 925 TAILQ_FOREACH_SAFE(altq, V_pf_altqs_inactive, entries, tmp) { 926 pf_qid_unref(altq->qid); 927 free(altq, M_PFALTQ); 928 } 929 TAILQ_INIT(V_pf_altqs_inactive); 930 if (error) 931 return (error); 932 *ticket = ++V_ticket_altqs_inactive; 933 V_altqs_inactive_open = 1; 934 return (0); 935 } 936 937 static int 938 pf_rollback_altq(u_int32_t ticket) 939 { 940 struct pf_altq *altq, *tmp; 941 int error = 0; 942 943 PF_RULES_WASSERT(); 944 945 if (!V_altqs_inactive_open || ticket != V_ticket_altqs_inactive) 946 return (0); 947 /* Purge the old altq lists */ 948 TAILQ_FOREACH_SAFE(altq, V_pf_altq_ifs_inactive, entries, tmp) { 949 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) { 950 /* detach and destroy the discipline */ 951 error = altq_remove(altq); 952 } 953 free(altq, M_PFALTQ); 954 } 955 TAILQ_INIT(V_pf_altq_ifs_inactive); 956 TAILQ_FOREACH_SAFE(altq, V_pf_altqs_inactive, entries, tmp) { 957 pf_qid_unref(altq->qid); 958 free(altq, M_PFALTQ); 959 } 960 TAILQ_INIT(V_pf_altqs_inactive); 961 V_altqs_inactive_open = 0; 962 return (error); 963 } 964 965 static int 966 pf_commit_altq(u_int32_t ticket) 967 { 968 struct pf_altqqueue *old_altqs, *old_altq_ifs; 969 struct pf_altq *altq, *tmp; 970 int err, error = 0; 971 972 PF_RULES_WASSERT(); 973 974 if (!V_altqs_inactive_open || ticket != V_ticket_altqs_inactive) 975 return (EBUSY); 976 977 /* swap altqs, keep the old. */ 978 old_altqs = V_pf_altqs_active; 979 old_altq_ifs = V_pf_altq_ifs_active; 980 V_pf_altqs_active = V_pf_altqs_inactive; 981 V_pf_altq_ifs_active = V_pf_altq_ifs_inactive; 982 V_pf_altqs_inactive = old_altqs; 983 V_pf_altq_ifs_inactive = old_altq_ifs; 984 V_ticket_altqs_active = V_ticket_altqs_inactive; 985 986 /* Attach new disciplines */ 987 TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) { 988 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) { 989 /* attach the discipline */ 990 error = altq_pfattach(altq); 991 if (error == 0 && V_pf_altq_running) 992 error = pf_enable_altq(altq); 993 if (error != 0) 994 return (error); 995 } 996 } 997 998 /* Purge the old altq lists */ 999 TAILQ_FOREACH_SAFE(altq, V_pf_altq_ifs_inactive, entries, tmp) { 1000 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) { 1001 /* detach and destroy the discipline */ 1002 if (V_pf_altq_running) 1003 error = pf_disable_altq(altq); 1004 err = altq_pfdetach(altq); 1005 if (err != 0 && error == 0) 1006 error = err; 1007 err = altq_remove(altq); 1008 if (err != 0 && error == 0) 1009 error = err; 1010 } 1011 free(altq, M_PFALTQ); 1012 } 1013 TAILQ_INIT(V_pf_altq_ifs_inactive); 1014 TAILQ_FOREACH_SAFE(altq, V_pf_altqs_inactive, entries, tmp) { 1015 pf_qid_unref(altq->qid); 1016 free(altq, M_PFALTQ); 1017 } 1018 TAILQ_INIT(V_pf_altqs_inactive); 1019 1020 V_altqs_inactive_open = 0; 1021 return (error); 1022 } 1023 1024 static int 1025 pf_enable_altq(struct pf_altq *altq) 1026 { 1027 struct ifnet *ifp; 1028 struct tb_profile tb; 1029 int error = 0; 1030 1031 if ((ifp = ifunit(altq->ifname)) == NULL) 1032 return (EINVAL); 1033 1034 if (ifp->if_snd.altq_type != ALTQT_NONE) 1035 error = altq_enable(&ifp->if_snd); 1036 1037 /* set tokenbucket regulator */ 1038 if (error == 0 && ifp != NULL && ALTQ_IS_ENABLED(&ifp->if_snd)) { 1039 tb.rate = altq->ifbandwidth; 1040 tb.depth = altq->tbrsize; 1041 error = tbr_set(&ifp->if_snd, &tb); 1042 } 1043 1044 return (error); 1045 } 1046 1047 static int 1048 pf_disable_altq(struct pf_altq *altq) 1049 { 1050 struct ifnet *ifp; 1051 struct tb_profile tb; 1052 int error; 1053 1054 if ((ifp = ifunit(altq->ifname)) == NULL) 1055 return (EINVAL); 1056 1057 /* 1058 * when the discipline is no longer referenced, it was overridden 1059 * by a new one. if so, just return. 1060 */ 1061 if (altq->altq_disc != ifp->if_snd.altq_disc) 1062 return (0); 1063 1064 error = altq_disable(&ifp->if_snd); 1065 1066 if (error == 0) { 1067 /* clear tokenbucket regulator */ 1068 tb.rate = 0; 1069 error = tbr_set(&ifp->if_snd, &tb); 1070 } 1071 1072 return (error); 1073 } 1074 1075 static int 1076 pf_altq_ifnet_event_add(struct ifnet *ifp, int remove, u_int32_t ticket, 1077 struct pf_altq *altq) 1078 { 1079 struct ifnet *ifp1; 1080 int error = 0; 1081 1082 /* Deactivate the interface in question */ 1083 altq->local_flags &= ~PFALTQ_FLAG_IF_REMOVED; 1084 if ((ifp1 = ifunit(altq->ifname)) == NULL || 1085 (remove && ifp1 == ifp)) { 1086 altq->local_flags |= PFALTQ_FLAG_IF_REMOVED; 1087 } else { 1088 error = altq_add(ifp1, altq); 1089 1090 if (ticket != V_ticket_altqs_inactive) 1091 error = EBUSY; 1092 1093 if (error) 1094 free(altq, M_PFALTQ); 1095 } 1096 1097 return (error); 1098 } 1099 1100 void 1101 pf_altq_ifnet_event(struct ifnet *ifp, int remove) 1102 { 1103 struct pf_altq *a1, *a2, *a3; 1104 u_int32_t ticket; 1105 int error = 0; 1106 1107 /* 1108 * No need to re-evaluate the configuration for events on interfaces 1109 * that do not support ALTQ, as it's not possible for such 1110 * interfaces to be part of the configuration. 1111 */ 1112 if (!ALTQ_IS_READY(&ifp->if_snd)) 1113 return; 1114 1115 /* Interrupt userland queue modifications */ 1116 if (V_altqs_inactive_open) 1117 pf_rollback_altq(V_ticket_altqs_inactive); 1118 1119 /* Start new altq ruleset */ 1120 if (pf_begin_altq(&ticket)) 1121 return; 1122 1123 /* Copy the current active set */ 1124 TAILQ_FOREACH(a1, V_pf_altq_ifs_active, entries) { 1125 a2 = malloc(sizeof(*a2), M_PFALTQ, M_NOWAIT); 1126 if (a2 == NULL) { 1127 error = ENOMEM; 1128 break; 1129 } 1130 bcopy(a1, a2, sizeof(struct pf_altq)); 1131 1132 error = pf_altq_ifnet_event_add(ifp, remove, ticket, a2); 1133 if (error) 1134 break; 1135 1136 TAILQ_INSERT_TAIL(V_pf_altq_ifs_inactive, a2, entries); 1137 } 1138 if (error) 1139 goto out; 1140 TAILQ_FOREACH(a1, V_pf_altqs_active, entries) { 1141 a2 = malloc(sizeof(*a2), M_PFALTQ, M_NOWAIT); 1142 if (a2 == NULL) { 1143 error = ENOMEM; 1144 break; 1145 } 1146 bcopy(a1, a2, sizeof(struct pf_altq)); 1147 1148 if ((a2->qid = pf_qname2qid(a2->qname)) == 0) { 1149 error = EBUSY; 1150 free(a2, M_PFALTQ); 1151 break; 1152 } 1153 a2->altq_disc = NULL; 1154 TAILQ_FOREACH(a3, V_pf_altq_ifs_inactive, entries) { 1155 if (strncmp(a3->ifname, a2->ifname, 1156 IFNAMSIZ) == 0) { 1157 a2->altq_disc = a3->altq_disc; 1158 break; 1159 } 1160 } 1161 error = pf_altq_ifnet_event_add(ifp, remove, ticket, a2); 1162 if (error) 1163 break; 1164 1165 TAILQ_INSERT_TAIL(V_pf_altqs_inactive, a2, entries); 1166 } 1167 1168 out: 1169 if (error != 0) 1170 pf_rollback_altq(ticket); 1171 else 1172 pf_commit_altq(ticket); 1173 } 1174 #endif /* ALTQ */ 1175 1176 static struct pf_krule_global * 1177 pf_rule_tree_alloc(int flags) 1178 { 1179 struct pf_krule_global *tree; 1180 1181 tree = malloc(sizeof(struct pf_krule_global), M_TEMP, flags); 1182 if (tree == NULL) 1183 return (NULL); 1184 RB_INIT(tree); 1185 return (tree); 1186 } 1187 1188 static void 1189 pf_rule_tree_free(struct pf_krule_global *tree) 1190 { 1191 1192 free(tree, M_TEMP); 1193 } 1194 1195 static int 1196 pf_begin_rules(u_int32_t *ticket, int rs_num, const char *anchor) 1197 { 1198 struct pf_krule_global *tree; 1199 struct pf_kruleset *rs; 1200 struct pf_krule *rule; 1201 1202 PF_RULES_WASSERT(); 1203 1204 if (rs_num < 0 || rs_num >= PF_RULESET_MAX) 1205 return (EINVAL); 1206 tree = pf_rule_tree_alloc(M_NOWAIT); 1207 if (tree == NULL) 1208 return (ENOMEM); 1209 rs = pf_find_or_create_kruleset(anchor); 1210 if (rs == NULL) { 1211 free(tree, M_TEMP); 1212 return (EINVAL); 1213 } 1214 pf_rule_tree_free(rs->rules[rs_num].inactive.tree); 1215 rs->rules[rs_num].inactive.tree = tree; 1216 1217 while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) { 1218 pf_unlink_rule(rs->rules[rs_num].inactive.ptr, rule); 1219 rs->rules[rs_num].inactive.rcount--; 1220 } 1221 *ticket = ++rs->rules[rs_num].inactive.ticket; 1222 rs->rules[rs_num].inactive.open = 1; 1223 return (0); 1224 } 1225 1226 static int 1227 pf_rollback_rules(u_int32_t ticket, int rs_num, char *anchor) 1228 { 1229 struct pf_kruleset *rs; 1230 struct pf_krule *rule; 1231 1232 PF_RULES_WASSERT(); 1233 1234 if (rs_num < 0 || rs_num >= PF_RULESET_MAX) 1235 return (EINVAL); 1236 rs = pf_find_kruleset(anchor); 1237 if (rs == NULL || !rs->rules[rs_num].inactive.open || 1238 rs->rules[rs_num].inactive.ticket != ticket) 1239 return (0); 1240 while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) { 1241 pf_unlink_rule(rs->rules[rs_num].inactive.ptr, rule); 1242 rs->rules[rs_num].inactive.rcount--; 1243 } 1244 rs->rules[rs_num].inactive.open = 0; 1245 return (0); 1246 } 1247 1248 #define PF_MD5_UPD(st, elm) \ 1249 MD5Update(ctx, (u_int8_t *) &(st)->elm, sizeof((st)->elm)) 1250 1251 #define PF_MD5_UPD_STR(st, elm) \ 1252 MD5Update(ctx, (u_int8_t *) (st)->elm, strlen((st)->elm)) 1253 1254 #define PF_MD5_UPD_HTONL(st, elm, stor) do { \ 1255 (stor) = htonl((st)->elm); \ 1256 MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int32_t));\ 1257 } while (0) 1258 1259 #define PF_MD5_UPD_HTONS(st, elm, stor) do { \ 1260 (stor) = htons((st)->elm); \ 1261 MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int16_t));\ 1262 } while (0) 1263 1264 static void 1265 pf_hash_rule_addr(MD5_CTX *ctx, struct pf_rule_addr *pfr) 1266 { 1267 PF_MD5_UPD(pfr, addr.type); 1268 switch (pfr->addr.type) { 1269 case PF_ADDR_DYNIFTL: 1270 PF_MD5_UPD(pfr, addr.v.ifname); 1271 PF_MD5_UPD(pfr, addr.iflags); 1272 break; 1273 case PF_ADDR_TABLE: 1274 PF_MD5_UPD(pfr, addr.v.tblname); 1275 break; 1276 case PF_ADDR_ADDRMASK: 1277 /* XXX ignore af? */ 1278 PF_MD5_UPD(pfr, addr.v.a.addr.addr32); 1279 PF_MD5_UPD(pfr, addr.v.a.mask.addr32); 1280 break; 1281 } 1282 1283 PF_MD5_UPD(pfr, port[0]); 1284 PF_MD5_UPD(pfr, port[1]); 1285 PF_MD5_UPD(pfr, neg); 1286 PF_MD5_UPD(pfr, port_op); 1287 } 1288 1289 static void 1290 pf_hash_rule_rolling(MD5_CTX *ctx, struct pf_krule *rule) 1291 { 1292 u_int16_t x; 1293 u_int32_t y; 1294 1295 pf_hash_rule_addr(ctx, &rule->src); 1296 pf_hash_rule_addr(ctx, &rule->dst); 1297 for (int i = 0; i < PF_RULE_MAX_LABEL_COUNT; i++) 1298 PF_MD5_UPD_STR(rule, label[i]); 1299 PF_MD5_UPD_STR(rule, ifname); 1300 PF_MD5_UPD_STR(rule, match_tagname); 1301 PF_MD5_UPD_HTONS(rule, match_tag, x); /* dup? */ 1302 PF_MD5_UPD_HTONL(rule, os_fingerprint, y); 1303 PF_MD5_UPD_HTONL(rule, prob, y); 1304 PF_MD5_UPD_HTONL(rule, uid.uid[0], y); 1305 PF_MD5_UPD_HTONL(rule, uid.uid[1], y); 1306 PF_MD5_UPD(rule, uid.op); 1307 PF_MD5_UPD_HTONL(rule, gid.gid[0], y); 1308 PF_MD5_UPD_HTONL(rule, gid.gid[1], y); 1309 PF_MD5_UPD(rule, gid.op); 1310 PF_MD5_UPD_HTONL(rule, rule_flag, y); 1311 PF_MD5_UPD(rule, action); 1312 PF_MD5_UPD(rule, direction); 1313 PF_MD5_UPD(rule, af); 1314 PF_MD5_UPD(rule, quick); 1315 PF_MD5_UPD(rule, ifnot); 1316 PF_MD5_UPD(rule, match_tag_not); 1317 PF_MD5_UPD(rule, natpass); 1318 PF_MD5_UPD(rule, keep_state); 1319 PF_MD5_UPD(rule, proto); 1320 PF_MD5_UPD(rule, type); 1321 PF_MD5_UPD(rule, code); 1322 PF_MD5_UPD(rule, flags); 1323 PF_MD5_UPD(rule, flagset); 1324 PF_MD5_UPD(rule, allow_opts); 1325 PF_MD5_UPD(rule, rt); 1326 PF_MD5_UPD(rule, tos); 1327 PF_MD5_UPD(rule, scrub_flags); 1328 PF_MD5_UPD(rule, min_ttl); 1329 PF_MD5_UPD(rule, set_tos); 1330 if (rule->anchor != NULL) 1331 PF_MD5_UPD_STR(rule, anchor->path); 1332 } 1333 1334 static void 1335 pf_hash_rule(struct pf_krule *rule) 1336 { 1337 MD5_CTX ctx; 1338 1339 MD5Init(&ctx); 1340 pf_hash_rule_rolling(&ctx, rule); 1341 MD5Final(rule->md5sum, &ctx); 1342 } 1343 1344 static int 1345 pf_krule_compare(struct pf_krule *a, struct pf_krule *b) 1346 { 1347 1348 return (memcmp(a->md5sum, b->md5sum, PF_MD5_DIGEST_LENGTH)); 1349 } 1350 1351 static int 1352 pf_commit_rules(u_int32_t ticket, int rs_num, char *anchor) 1353 { 1354 struct pf_kruleset *rs; 1355 struct pf_krule *rule, **old_array, *old_rule; 1356 struct pf_krulequeue *old_rules; 1357 struct pf_krule_global *old_tree; 1358 int error; 1359 u_int32_t old_rcount; 1360 1361 PF_RULES_WASSERT(); 1362 1363 if (rs_num < 0 || rs_num >= PF_RULESET_MAX) 1364 return (EINVAL); 1365 rs = pf_find_kruleset(anchor); 1366 if (rs == NULL || !rs->rules[rs_num].inactive.open || 1367 ticket != rs->rules[rs_num].inactive.ticket) 1368 return (EBUSY); 1369 1370 /* Calculate checksum for the main ruleset */ 1371 if (rs == &pf_main_ruleset) { 1372 error = pf_setup_pfsync_matching(rs); 1373 if (error != 0) 1374 return (error); 1375 } 1376 1377 /* Swap rules, keep the old. */ 1378 old_rules = rs->rules[rs_num].active.ptr; 1379 old_rcount = rs->rules[rs_num].active.rcount; 1380 old_array = rs->rules[rs_num].active.ptr_array; 1381 old_tree = rs->rules[rs_num].active.tree; 1382 1383 rs->rules[rs_num].active.ptr = 1384 rs->rules[rs_num].inactive.ptr; 1385 rs->rules[rs_num].active.ptr_array = 1386 rs->rules[rs_num].inactive.ptr_array; 1387 rs->rules[rs_num].active.tree = 1388 rs->rules[rs_num].inactive.tree; 1389 rs->rules[rs_num].active.rcount = 1390 rs->rules[rs_num].inactive.rcount; 1391 1392 /* Attempt to preserve counter information. */ 1393 if (V_pf_status.keep_counters && old_tree != NULL) { 1394 TAILQ_FOREACH(rule, rs->rules[rs_num].active.ptr, 1395 entries) { 1396 old_rule = RB_FIND(pf_krule_global, old_tree, rule); 1397 if (old_rule == NULL) { 1398 continue; 1399 } 1400 pf_counter_u64_critical_enter(); 1401 pf_counter_u64_add_protected(&rule->evaluations, 1402 pf_counter_u64_fetch(&old_rule->evaluations)); 1403 pf_counter_u64_add_protected(&rule->packets[0], 1404 pf_counter_u64_fetch(&old_rule->packets[0])); 1405 pf_counter_u64_add_protected(&rule->packets[1], 1406 pf_counter_u64_fetch(&old_rule->packets[1])); 1407 pf_counter_u64_add_protected(&rule->bytes[0], 1408 pf_counter_u64_fetch(&old_rule->bytes[0])); 1409 pf_counter_u64_add_protected(&rule->bytes[1], 1410 pf_counter_u64_fetch(&old_rule->bytes[1])); 1411 pf_counter_u64_critical_exit(); 1412 } 1413 } 1414 1415 rs->rules[rs_num].inactive.ptr = old_rules; 1416 rs->rules[rs_num].inactive.ptr_array = old_array; 1417 rs->rules[rs_num].inactive.tree = NULL; /* important for pf_ioctl_addrule */ 1418 rs->rules[rs_num].inactive.rcount = old_rcount; 1419 1420 rs->rules[rs_num].active.ticket = 1421 rs->rules[rs_num].inactive.ticket; 1422 pf_calc_skip_steps(rs->rules[rs_num].active.ptr); 1423 1424 /* Purge the old rule list. */ 1425 PF_UNLNKDRULES_LOCK(); 1426 while ((rule = TAILQ_FIRST(old_rules)) != NULL) 1427 pf_unlink_rule_locked(old_rules, rule); 1428 PF_UNLNKDRULES_UNLOCK(); 1429 if (rs->rules[rs_num].inactive.ptr_array) 1430 free(rs->rules[rs_num].inactive.ptr_array, M_TEMP); 1431 rs->rules[rs_num].inactive.ptr_array = NULL; 1432 rs->rules[rs_num].inactive.rcount = 0; 1433 rs->rules[rs_num].inactive.open = 0; 1434 pf_remove_if_empty_kruleset(rs); 1435 free(old_tree, M_TEMP); 1436 1437 return (0); 1438 } 1439 1440 static int 1441 pf_setup_pfsync_matching(struct pf_kruleset *rs) 1442 { 1443 MD5_CTX ctx; 1444 struct pf_krule *rule; 1445 int rs_cnt; 1446 u_int8_t digest[PF_MD5_DIGEST_LENGTH]; 1447 1448 MD5Init(&ctx); 1449 for (rs_cnt = 0; rs_cnt < PF_RULESET_MAX; rs_cnt++) { 1450 /* XXX PF_RULESET_SCRUB as well? */ 1451 if (rs_cnt == PF_RULESET_SCRUB) 1452 continue; 1453 1454 if (rs->rules[rs_cnt].inactive.ptr_array) 1455 free(rs->rules[rs_cnt].inactive.ptr_array, M_TEMP); 1456 rs->rules[rs_cnt].inactive.ptr_array = NULL; 1457 1458 if (rs->rules[rs_cnt].inactive.rcount) { 1459 rs->rules[rs_cnt].inactive.ptr_array = 1460 mallocarray(rs->rules[rs_cnt].inactive.rcount, 1461 sizeof(struct pf_rule **), 1462 M_TEMP, M_NOWAIT); 1463 1464 if (!rs->rules[rs_cnt].inactive.ptr_array) 1465 return (ENOMEM); 1466 } 1467 1468 TAILQ_FOREACH(rule, rs->rules[rs_cnt].inactive.ptr, 1469 entries) { 1470 pf_hash_rule_rolling(&ctx, rule); 1471 (rs->rules[rs_cnt].inactive.ptr_array)[rule->nr] = rule; 1472 } 1473 } 1474 1475 MD5Final(digest, &ctx); 1476 memcpy(V_pf_status.pf_chksum, digest, sizeof(V_pf_status.pf_chksum)); 1477 return (0); 1478 } 1479 1480 static int 1481 pf_eth_addr_setup(struct pf_keth_ruleset *ruleset, struct pf_addr_wrap *addr) 1482 { 1483 int error = 0; 1484 1485 switch (addr->type) { 1486 case PF_ADDR_TABLE: 1487 addr->p.tbl = pfr_eth_attach_table(ruleset, addr->v.tblname); 1488 if (addr->p.tbl == NULL) 1489 error = ENOMEM; 1490 break; 1491 default: 1492 error = EINVAL; 1493 } 1494 1495 return (error); 1496 } 1497 1498 static int 1499 pf_addr_setup(struct pf_kruleset *ruleset, struct pf_addr_wrap *addr, 1500 sa_family_t af) 1501 { 1502 int error = 0; 1503 1504 switch (addr->type) { 1505 case PF_ADDR_TABLE: 1506 addr->p.tbl = pfr_attach_table(ruleset, addr->v.tblname); 1507 if (addr->p.tbl == NULL) 1508 error = ENOMEM; 1509 break; 1510 case PF_ADDR_DYNIFTL: 1511 error = pfi_dynaddr_setup(addr, af); 1512 break; 1513 } 1514 1515 return (error); 1516 } 1517 1518 static void 1519 pf_addr_copyout(struct pf_addr_wrap *addr) 1520 { 1521 1522 switch (addr->type) { 1523 case PF_ADDR_DYNIFTL: 1524 pfi_dynaddr_copyout(addr); 1525 break; 1526 case PF_ADDR_TABLE: 1527 pf_tbladdr_copyout(addr); 1528 break; 1529 } 1530 } 1531 1532 static void 1533 pf_src_node_copy(const struct pf_ksrc_node *in, struct pf_src_node *out) 1534 { 1535 int secs = time_uptime, diff; 1536 1537 bzero(out, sizeof(struct pf_src_node)); 1538 1539 bcopy(&in->addr, &out->addr, sizeof(struct pf_addr)); 1540 bcopy(&in->raddr, &out->raddr, sizeof(struct pf_addr)); 1541 1542 if (in->rule.ptr != NULL) 1543 out->rule.nr = in->rule.ptr->nr; 1544 1545 for (int i = 0; i < 2; i++) { 1546 out->bytes[i] = counter_u64_fetch(in->bytes[i]); 1547 out->packets[i] = counter_u64_fetch(in->packets[i]); 1548 } 1549 1550 out->states = in->states; 1551 out->conn = in->conn; 1552 out->af = in->af; 1553 out->ruletype = in->ruletype; 1554 1555 out->creation = secs - in->creation; 1556 if (out->expire > secs) 1557 out->expire -= secs; 1558 else 1559 out->expire = 0; 1560 1561 /* Adjust the connection rate estimate. */ 1562 diff = secs - in->conn_rate.last; 1563 if (diff >= in->conn_rate.seconds) 1564 out->conn_rate.count = 0; 1565 else 1566 out->conn_rate.count -= 1567 in->conn_rate.count * diff / 1568 in->conn_rate.seconds; 1569 } 1570 1571 #ifdef ALTQ 1572 /* 1573 * Handle export of struct pf_kaltq to user binaries that may be using any 1574 * version of struct pf_altq. 1575 */ 1576 static int 1577 pf_export_kaltq(struct pf_altq *q, struct pfioc_altq_v1 *pa, size_t ioc_size) 1578 { 1579 u_int32_t version; 1580 1581 if (ioc_size == sizeof(struct pfioc_altq_v0)) 1582 version = 0; 1583 else 1584 version = pa->version; 1585 1586 if (version > PFIOC_ALTQ_VERSION) 1587 return (EINVAL); 1588 1589 #define ASSIGN(x) exported_q->x = q->x 1590 #define COPY(x) \ 1591 bcopy(&q->x, &exported_q->x, min(sizeof(q->x), sizeof(exported_q->x))) 1592 #define SATU16(x) (u_int32_t)uqmin((x), USHRT_MAX) 1593 #define SATU32(x) (u_int32_t)uqmin((x), UINT_MAX) 1594 1595 switch (version) { 1596 case 0: { 1597 struct pf_altq_v0 *exported_q = 1598 &((struct pfioc_altq_v0 *)pa)->altq; 1599 1600 COPY(ifname); 1601 1602 ASSIGN(scheduler); 1603 ASSIGN(tbrsize); 1604 exported_q->tbrsize = SATU16(q->tbrsize); 1605 exported_q->ifbandwidth = SATU32(q->ifbandwidth); 1606 1607 COPY(qname); 1608 COPY(parent); 1609 ASSIGN(parent_qid); 1610 exported_q->bandwidth = SATU32(q->bandwidth); 1611 ASSIGN(priority); 1612 ASSIGN(local_flags); 1613 1614 ASSIGN(qlimit); 1615 ASSIGN(flags); 1616 1617 if (q->scheduler == ALTQT_HFSC) { 1618 #define ASSIGN_OPT(x) exported_q->pq_u.hfsc_opts.x = q->pq_u.hfsc_opts.x 1619 #define ASSIGN_OPT_SATU32(x) exported_q->pq_u.hfsc_opts.x = \ 1620 SATU32(q->pq_u.hfsc_opts.x) 1621 1622 ASSIGN_OPT_SATU32(rtsc_m1); 1623 ASSIGN_OPT(rtsc_d); 1624 ASSIGN_OPT_SATU32(rtsc_m2); 1625 1626 ASSIGN_OPT_SATU32(lssc_m1); 1627 ASSIGN_OPT(lssc_d); 1628 ASSIGN_OPT_SATU32(lssc_m2); 1629 1630 ASSIGN_OPT_SATU32(ulsc_m1); 1631 ASSIGN_OPT(ulsc_d); 1632 ASSIGN_OPT_SATU32(ulsc_m2); 1633 1634 ASSIGN_OPT(flags); 1635 1636 #undef ASSIGN_OPT 1637 #undef ASSIGN_OPT_SATU32 1638 } else 1639 COPY(pq_u); 1640 1641 ASSIGN(qid); 1642 break; 1643 } 1644 case 1: { 1645 struct pf_altq_v1 *exported_q = 1646 &((struct pfioc_altq_v1 *)pa)->altq; 1647 1648 COPY(ifname); 1649 1650 ASSIGN(scheduler); 1651 ASSIGN(tbrsize); 1652 ASSIGN(ifbandwidth); 1653 1654 COPY(qname); 1655 COPY(parent); 1656 ASSIGN(parent_qid); 1657 ASSIGN(bandwidth); 1658 ASSIGN(priority); 1659 ASSIGN(local_flags); 1660 1661 ASSIGN(qlimit); 1662 ASSIGN(flags); 1663 COPY(pq_u); 1664 1665 ASSIGN(qid); 1666 break; 1667 } 1668 default: 1669 panic("%s: unhandled struct pfioc_altq version", __func__); 1670 break; 1671 } 1672 1673 #undef ASSIGN 1674 #undef COPY 1675 #undef SATU16 1676 #undef SATU32 1677 1678 return (0); 1679 } 1680 1681 /* 1682 * Handle import to struct pf_kaltq of struct pf_altq from user binaries 1683 * that may be using any version of it. 1684 */ 1685 static int 1686 pf_import_kaltq(struct pfioc_altq_v1 *pa, struct pf_altq *q, size_t ioc_size) 1687 { 1688 u_int32_t version; 1689 1690 if (ioc_size == sizeof(struct pfioc_altq_v0)) 1691 version = 0; 1692 else 1693 version = pa->version; 1694 1695 if (version > PFIOC_ALTQ_VERSION) 1696 return (EINVAL); 1697 1698 #define ASSIGN(x) q->x = imported_q->x 1699 #define COPY(x) \ 1700 bcopy(&imported_q->x, &q->x, min(sizeof(imported_q->x), sizeof(q->x))) 1701 1702 switch (version) { 1703 case 0: { 1704 struct pf_altq_v0 *imported_q = 1705 &((struct pfioc_altq_v0 *)pa)->altq; 1706 1707 COPY(ifname); 1708 1709 ASSIGN(scheduler); 1710 ASSIGN(tbrsize); /* 16-bit -> 32-bit */ 1711 ASSIGN(ifbandwidth); /* 32-bit -> 64-bit */ 1712 1713 COPY(qname); 1714 COPY(parent); 1715 ASSIGN(parent_qid); 1716 ASSIGN(bandwidth); /* 32-bit -> 64-bit */ 1717 ASSIGN(priority); 1718 ASSIGN(local_flags); 1719 1720 ASSIGN(qlimit); 1721 ASSIGN(flags); 1722 1723 if (imported_q->scheduler == ALTQT_HFSC) { 1724 #define ASSIGN_OPT(x) q->pq_u.hfsc_opts.x = imported_q->pq_u.hfsc_opts.x 1725 1726 /* 1727 * The m1 and m2 parameters are being copied from 1728 * 32-bit to 64-bit. 1729 */ 1730 ASSIGN_OPT(rtsc_m1); 1731 ASSIGN_OPT(rtsc_d); 1732 ASSIGN_OPT(rtsc_m2); 1733 1734 ASSIGN_OPT(lssc_m1); 1735 ASSIGN_OPT(lssc_d); 1736 ASSIGN_OPT(lssc_m2); 1737 1738 ASSIGN_OPT(ulsc_m1); 1739 ASSIGN_OPT(ulsc_d); 1740 ASSIGN_OPT(ulsc_m2); 1741 1742 ASSIGN_OPT(flags); 1743 1744 #undef ASSIGN_OPT 1745 } else 1746 COPY(pq_u); 1747 1748 ASSIGN(qid); 1749 break; 1750 } 1751 case 1: { 1752 struct pf_altq_v1 *imported_q = 1753 &((struct pfioc_altq_v1 *)pa)->altq; 1754 1755 COPY(ifname); 1756 1757 ASSIGN(scheduler); 1758 ASSIGN(tbrsize); 1759 ASSIGN(ifbandwidth); 1760 1761 COPY(qname); 1762 COPY(parent); 1763 ASSIGN(parent_qid); 1764 ASSIGN(bandwidth); 1765 ASSIGN(priority); 1766 ASSIGN(local_flags); 1767 1768 ASSIGN(qlimit); 1769 ASSIGN(flags); 1770 COPY(pq_u); 1771 1772 ASSIGN(qid); 1773 break; 1774 } 1775 default: 1776 panic("%s: unhandled struct pfioc_altq version", __func__); 1777 break; 1778 } 1779 1780 #undef ASSIGN 1781 #undef COPY 1782 1783 return (0); 1784 } 1785 1786 static struct pf_altq * 1787 pf_altq_get_nth_active(u_int32_t n) 1788 { 1789 struct pf_altq *altq; 1790 u_int32_t nr; 1791 1792 nr = 0; 1793 TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) { 1794 if (nr == n) 1795 return (altq); 1796 nr++; 1797 } 1798 1799 TAILQ_FOREACH(altq, V_pf_altqs_active, entries) { 1800 if (nr == n) 1801 return (altq); 1802 nr++; 1803 } 1804 1805 return (NULL); 1806 } 1807 #endif /* ALTQ */ 1808 1809 struct pf_krule * 1810 pf_krule_alloc(void) 1811 { 1812 struct pf_krule *rule; 1813 1814 rule = malloc(sizeof(struct pf_krule), M_PFRULE, M_WAITOK | M_ZERO); 1815 mtx_init(&rule->rpool.mtx, "pf_krule_pool", NULL, MTX_DEF); 1816 rule->timestamp = uma_zalloc_pcpu(pf_timestamp_pcpu_zone, 1817 M_WAITOK | M_ZERO); 1818 return (rule); 1819 } 1820 1821 void 1822 pf_krule_free(struct pf_krule *rule) 1823 { 1824 #ifdef PF_WANT_32_TO_64_COUNTER 1825 bool wowned; 1826 #endif 1827 1828 if (rule == NULL) 1829 return; 1830 1831 #ifdef PF_WANT_32_TO_64_COUNTER 1832 if (rule->allrulelinked) { 1833 wowned = PF_RULES_WOWNED(); 1834 if (!wowned) 1835 PF_RULES_WLOCK(); 1836 LIST_REMOVE(rule, allrulelist); 1837 V_pf_allrulecount--; 1838 if (!wowned) 1839 PF_RULES_WUNLOCK(); 1840 } 1841 #endif 1842 1843 pf_counter_u64_deinit(&rule->evaluations); 1844 for (int i = 0; i < 2; i++) { 1845 pf_counter_u64_deinit(&rule->packets[i]); 1846 pf_counter_u64_deinit(&rule->bytes[i]); 1847 } 1848 counter_u64_free(rule->states_cur); 1849 counter_u64_free(rule->states_tot); 1850 counter_u64_free(rule->src_nodes); 1851 uma_zfree_pcpu(pf_timestamp_pcpu_zone, rule->timestamp); 1852 1853 mtx_destroy(&rule->rpool.mtx); 1854 free(rule, M_PFRULE); 1855 } 1856 1857 static void 1858 pf_kpooladdr_to_pooladdr(const struct pf_kpooladdr *kpool, 1859 struct pf_pooladdr *pool) 1860 { 1861 1862 bzero(pool, sizeof(*pool)); 1863 bcopy(&kpool->addr, &pool->addr, sizeof(pool->addr)); 1864 strlcpy(pool->ifname, kpool->ifname, sizeof(pool->ifname)); 1865 } 1866 1867 static int 1868 pf_pooladdr_to_kpooladdr(const struct pf_pooladdr *pool, 1869 struct pf_kpooladdr *kpool) 1870 { 1871 int ret; 1872 1873 bzero(kpool, sizeof(*kpool)); 1874 bcopy(&pool->addr, &kpool->addr, sizeof(kpool->addr)); 1875 ret = pf_user_strcpy(kpool->ifname, pool->ifname, 1876 sizeof(kpool->ifname)); 1877 return (ret); 1878 } 1879 1880 static void 1881 pf_kpool_to_pool(const struct pf_kpool *kpool, struct pf_pool *pool) 1882 { 1883 bzero(pool, sizeof(*pool)); 1884 1885 bcopy(&kpool->key, &pool->key, sizeof(pool->key)); 1886 bcopy(&kpool->counter, &pool->counter, sizeof(pool->counter)); 1887 1888 pool->tblidx = kpool->tblidx; 1889 pool->proxy_port[0] = kpool->proxy_port[0]; 1890 pool->proxy_port[1] = kpool->proxy_port[1]; 1891 pool->opts = kpool->opts; 1892 } 1893 1894 static void 1895 pf_pool_to_kpool(const struct pf_pool *pool, struct pf_kpool *kpool) 1896 { 1897 _Static_assert(sizeof(pool->key) == sizeof(kpool->key), ""); 1898 _Static_assert(sizeof(pool->counter) == sizeof(kpool->counter), ""); 1899 1900 bcopy(&pool->key, &kpool->key, sizeof(kpool->key)); 1901 bcopy(&pool->counter, &kpool->counter, sizeof(kpool->counter)); 1902 1903 kpool->tblidx = pool->tblidx; 1904 kpool->proxy_port[0] = pool->proxy_port[0]; 1905 kpool->proxy_port[1] = pool->proxy_port[1]; 1906 kpool->opts = pool->opts; 1907 } 1908 1909 static void 1910 pf_krule_to_rule(const struct pf_krule *krule, struct pf_rule *rule) 1911 { 1912 1913 bzero(rule, sizeof(*rule)); 1914 1915 bcopy(&krule->src, &rule->src, sizeof(rule->src)); 1916 bcopy(&krule->dst, &rule->dst, sizeof(rule->dst)); 1917 1918 for (int i = 0; i < PF_SKIP_COUNT; ++i) { 1919 if (rule->skip[i].ptr == NULL) 1920 rule->skip[i].nr = -1; 1921 else 1922 rule->skip[i].nr = krule->skip[i].ptr->nr; 1923 } 1924 1925 strlcpy(rule->label, krule->label[0], sizeof(rule->label)); 1926 strlcpy(rule->ifname, krule->ifname, sizeof(rule->ifname)); 1927 strlcpy(rule->qname, krule->qname, sizeof(rule->qname)); 1928 strlcpy(rule->pqname, krule->pqname, sizeof(rule->pqname)); 1929 strlcpy(rule->tagname, krule->tagname, sizeof(rule->tagname)); 1930 strlcpy(rule->match_tagname, krule->match_tagname, 1931 sizeof(rule->match_tagname)); 1932 strlcpy(rule->overload_tblname, krule->overload_tblname, 1933 sizeof(rule->overload_tblname)); 1934 1935 pf_kpool_to_pool(&krule->rpool, &rule->rpool); 1936 1937 rule->evaluations = pf_counter_u64_fetch(&krule->evaluations); 1938 for (int i = 0; i < 2; i++) { 1939 rule->packets[i] = pf_counter_u64_fetch(&krule->packets[i]); 1940 rule->bytes[i] = pf_counter_u64_fetch(&krule->bytes[i]); 1941 } 1942 1943 /* kif, anchor, overload_tbl are not copied over. */ 1944 1945 rule->os_fingerprint = krule->os_fingerprint; 1946 1947 rule->rtableid = krule->rtableid; 1948 bcopy(krule->timeout, rule->timeout, sizeof(krule->timeout)); 1949 rule->max_states = krule->max_states; 1950 rule->max_src_nodes = krule->max_src_nodes; 1951 rule->max_src_states = krule->max_src_states; 1952 rule->max_src_conn = krule->max_src_conn; 1953 rule->max_src_conn_rate.limit = krule->max_src_conn_rate.limit; 1954 rule->max_src_conn_rate.seconds = krule->max_src_conn_rate.seconds; 1955 rule->qid = krule->qid; 1956 rule->pqid = krule->pqid; 1957 rule->nr = krule->nr; 1958 rule->prob = krule->prob; 1959 rule->cuid = krule->cuid; 1960 rule->cpid = krule->cpid; 1961 1962 rule->return_icmp = krule->return_icmp; 1963 rule->return_icmp6 = krule->return_icmp6; 1964 rule->max_mss = krule->max_mss; 1965 rule->tag = krule->tag; 1966 rule->match_tag = krule->match_tag; 1967 rule->scrub_flags = krule->scrub_flags; 1968 1969 bcopy(&krule->uid, &rule->uid, sizeof(krule->uid)); 1970 bcopy(&krule->gid, &rule->gid, sizeof(krule->gid)); 1971 1972 rule->rule_flag = krule->rule_flag; 1973 rule->action = krule->action; 1974 rule->direction = krule->direction; 1975 rule->log = krule->log; 1976 rule->logif = krule->logif; 1977 rule->quick = krule->quick; 1978 rule->ifnot = krule->ifnot; 1979 rule->match_tag_not = krule->match_tag_not; 1980 rule->natpass = krule->natpass; 1981 1982 rule->keep_state = krule->keep_state; 1983 rule->af = krule->af; 1984 rule->proto = krule->proto; 1985 rule->type = krule->type; 1986 rule->code = krule->code; 1987 rule->flags = krule->flags; 1988 rule->flagset = krule->flagset; 1989 rule->min_ttl = krule->min_ttl; 1990 rule->allow_opts = krule->allow_opts; 1991 rule->rt = krule->rt; 1992 rule->return_ttl = krule->return_ttl; 1993 rule->tos = krule->tos; 1994 rule->set_tos = krule->set_tos; 1995 rule->anchor_relative = krule->anchor_relative; 1996 rule->anchor_wildcard = krule->anchor_wildcard; 1997 1998 rule->flush = krule->flush; 1999 rule->prio = krule->prio; 2000 rule->set_prio[0] = krule->set_prio[0]; 2001 rule->set_prio[1] = krule->set_prio[1]; 2002 2003 bcopy(&krule->divert, &rule->divert, sizeof(krule->divert)); 2004 2005 rule->u_states_cur = counter_u64_fetch(krule->states_cur); 2006 rule->u_states_tot = counter_u64_fetch(krule->states_tot); 2007 rule->u_src_nodes = counter_u64_fetch(krule->src_nodes); 2008 } 2009 2010 static int 2011 pf_rule_to_krule(const struct pf_rule *rule, struct pf_krule *krule) 2012 { 2013 int ret; 2014 2015 #ifndef INET 2016 if (rule->af == AF_INET) { 2017 return (EAFNOSUPPORT); 2018 } 2019 #endif /* INET */ 2020 #ifndef INET6 2021 if (rule->af == AF_INET6) { 2022 return (EAFNOSUPPORT); 2023 } 2024 #endif /* INET6 */ 2025 2026 ret = pf_check_rule_addr(&rule->src); 2027 if (ret != 0) 2028 return (ret); 2029 ret = pf_check_rule_addr(&rule->dst); 2030 if (ret != 0) 2031 return (ret); 2032 2033 bcopy(&rule->src, &krule->src, sizeof(rule->src)); 2034 bcopy(&rule->dst, &krule->dst, sizeof(rule->dst)); 2035 2036 ret = pf_user_strcpy(krule->label[0], rule->label, sizeof(rule->label)); 2037 if (ret != 0) 2038 return (ret); 2039 ret = pf_user_strcpy(krule->ifname, rule->ifname, sizeof(rule->ifname)); 2040 if (ret != 0) 2041 return (ret); 2042 ret = pf_user_strcpy(krule->qname, rule->qname, sizeof(rule->qname)); 2043 if (ret != 0) 2044 return (ret); 2045 ret = pf_user_strcpy(krule->pqname, rule->pqname, sizeof(rule->pqname)); 2046 if (ret != 0) 2047 return (ret); 2048 ret = pf_user_strcpy(krule->tagname, rule->tagname, 2049 sizeof(rule->tagname)); 2050 if (ret != 0) 2051 return (ret); 2052 ret = pf_user_strcpy(krule->match_tagname, rule->match_tagname, 2053 sizeof(rule->match_tagname)); 2054 if (ret != 0) 2055 return (ret); 2056 ret = pf_user_strcpy(krule->overload_tblname, rule->overload_tblname, 2057 sizeof(rule->overload_tblname)); 2058 if (ret != 0) 2059 return (ret); 2060 2061 pf_pool_to_kpool(&rule->rpool, &krule->rpool); 2062 2063 /* Don't allow userspace to set evaluations, packets or bytes. */ 2064 /* kif, anchor, overload_tbl are not copied over. */ 2065 2066 krule->os_fingerprint = rule->os_fingerprint; 2067 2068 krule->rtableid = rule->rtableid; 2069 bcopy(rule->timeout, krule->timeout, sizeof(krule->timeout)); 2070 krule->max_states = rule->max_states; 2071 krule->max_src_nodes = rule->max_src_nodes; 2072 krule->max_src_states = rule->max_src_states; 2073 krule->max_src_conn = rule->max_src_conn; 2074 krule->max_src_conn_rate.limit = rule->max_src_conn_rate.limit; 2075 krule->max_src_conn_rate.seconds = rule->max_src_conn_rate.seconds; 2076 krule->qid = rule->qid; 2077 krule->pqid = rule->pqid; 2078 krule->nr = rule->nr; 2079 krule->prob = rule->prob; 2080 krule->cuid = rule->cuid; 2081 krule->cpid = rule->cpid; 2082 2083 krule->return_icmp = rule->return_icmp; 2084 krule->return_icmp6 = rule->return_icmp6; 2085 krule->max_mss = rule->max_mss; 2086 krule->tag = rule->tag; 2087 krule->match_tag = rule->match_tag; 2088 krule->scrub_flags = rule->scrub_flags; 2089 2090 bcopy(&rule->uid, &krule->uid, sizeof(krule->uid)); 2091 bcopy(&rule->gid, &krule->gid, sizeof(krule->gid)); 2092 2093 krule->rule_flag = rule->rule_flag; 2094 krule->action = rule->action; 2095 krule->direction = rule->direction; 2096 krule->log = rule->log; 2097 krule->logif = rule->logif; 2098 krule->quick = rule->quick; 2099 krule->ifnot = rule->ifnot; 2100 krule->match_tag_not = rule->match_tag_not; 2101 krule->natpass = rule->natpass; 2102 2103 krule->keep_state = rule->keep_state; 2104 krule->af = rule->af; 2105 krule->proto = rule->proto; 2106 krule->type = rule->type; 2107 krule->code = rule->code; 2108 krule->flags = rule->flags; 2109 krule->flagset = rule->flagset; 2110 krule->min_ttl = rule->min_ttl; 2111 krule->allow_opts = rule->allow_opts; 2112 krule->rt = rule->rt; 2113 krule->return_ttl = rule->return_ttl; 2114 krule->tos = rule->tos; 2115 krule->set_tos = rule->set_tos; 2116 2117 krule->flush = rule->flush; 2118 krule->prio = rule->prio; 2119 krule->set_prio[0] = rule->set_prio[0]; 2120 krule->set_prio[1] = rule->set_prio[1]; 2121 2122 bcopy(&rule->divert, &krule->divert, sizeof(krule->divert)); 2123 2124 return (0); 2125 } 2126 2127 static int 2128 pf_ioctl_addrule(struct pf_krule *rule, uint32_t ticket, 2129 uint32_t pool_ticket, const char *anchor, const char *anchor_call, 2130 struct thread *td) 2131 { 2132 struct pf_kruleset *ruleset; 2133 struct pf_krule *tail; 2134 struct pf_kpooladdr *pa; 2135 struct pfi_kkif *kif = NULL; 2136 int rs_num; 2137 int error = 0; 2138 2139 if ((rule->return_icmp >> 8) > ICMP_MAXTYPE) { 2140 error = EINVAL; 2141 goto errout_unlocked; 2142 } 2143 2144 #define ERROUT(x) ERROUT_FUNCTION(errout, x) 2145 2146 if (rule->ifname[0]) 2147 kif = pf_kkif_create(M_WAITOK); 2148 pf_counter_u64_init(&rule->evaluations, M_WAITOK); 2149 for (int i = 0; i < 2; i++) { 2150 pf_counter_u64_init(&rule->packets[i], M_WAITOK); 2151 pf_counter_u64_init(&rule->bytes[i], M_WAITOK); 2152 } 2153 rule->states_cur = counter_u64_alloc(M_WAITOK); 2154 rule->states_tot = counter_u64_alloc(M_WAITOK); 2155 rule->src_nodes = counter_u64_alloc(M_WAITOK); 2156 rule->cuid = td->td_ucred->cr_ruid; 2157 rule->cpid = td->td_proc ? td->td_proc->p_pid : 0; 2158 TAILQ_INIT(&rule->rpool.list); 2159 2160 PF_CONFIG_LOCK(); 2161 PF_RULES_WLOCK(); 2162 #ifdef PF_WANT_32_TO_64_COUNTER 2163 LIST_INSERT_HEAD(&V_pf_allrulelist, rule, allrulelist); 2164 MPASS(!rule->allrulelinked); 2165 rule->allrulelinked = true; 2166 V_pf_allrulecount++; 2167 #endif 2168 ruleset = pf_find_kruleset(anchor); 2169 if (ruleset == NULL) 2170 ERROUT(EINVAL); 2171 rs_num = pf_get_ruleset_number(rule->action); 2172 if (rs_num >= PF_RULESET_MAX) 2173 ERROUT(EINVAL); 2174 if (ticket != ruleset->rules[rs_num].inactive.ticket) { 2175 DPFPRINTF(PF_DEBUG_MISC, 2176 ("ticket: %d != [%d]%d\n", ticket, rs_num, 2177 ruleset->rules[rs_num].inactive.ticket)); 2178 ERROUT(EBUSY); 2179 } 2180 if (pool_ticket != V_ticket_pabuf) { 2181 DPFPRINTF(PF_DEBUG_MISC, 2182 ("pool_ticket: %d != %d\n", pool_ticket, 2183 V_ticket_pabuf)); 2184 ERROUT(EBUSY); 2185 } 2186 /* 2187 * XXXMJG hack: there is no mechanism to ensure they started the 2188 * transaction. Ticket checked above may happen to match by accident, 2189 * even if nobody called DIOCXBEGIN, let alone this process. 2190 * Partially work around it by checking if the RB tree got allocated, 2191 * see pf_begin_rules. 2192 */ 2193 if (ruleset->rules[rs_num].inactive.tree == NULL) { 2194 ERROUT(EINVAL); 2195 } 2196 2197 tail = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr, 2198 pf_krulequeue); 2199 if (tail) 2200 rule->nr = tail->nr + 1; 2201 else 2202 rule->nr = 0; 2203 if (rule->ifname[0]) { 2204 rule->kif = pfi_kkif_attach(kif, rule->ifname); 2205 kif = NULL; 2206 pfi_kkif_ref(rule->kif); 2207 } else 2208 rule->kif = NULL; 2209 2210 if (rule->rtableid > 0 && rule->rtableid >= rt_numfibs) 2211 error = EBUSY; 2212 2213 #ifdef ALTQ 2214 /* set queue IDs */ 2215 if (rule->qname[0] != 0) { 2216 if ((rule->qid = pf_qname2qid(rule->qname)) == 0) 2217 error = EBUSY; 2218 else if (rule->pqname[0] != 0) { 2219 if ((rule->pqid = 2220 pf_qname2qid(rule->pqname)) == 0) 2221 error = EBUSY; 2222 } else 2223 rule->pqid = rule->qid; 2224 } 2225 #endif 2226 if (rule->tagname[0]) 2227 if ((rule->tag = pf_tagname2tag(rule->tagname)) == 0) 2228 error = EBUSY; 2229 if (rule->match_tagname[0]) 2230 if ((rule->match_tag = 2231 pf_tagname2tag(rule->match_tagname)) == 0) 2232 error = EBUSY; 2233 if (rule->rt && !rule->direction) 2234 error = EINVAL; 2235 if (!rule->log) 2236 rule->logif = 0; 2237 if (rule->logif >= PFLOGIFS_MAX) 2238 error = EINVAL; 2239 if (pf_addr_setup(ruleset, &rule->src.addr, rule->af)) 2240 error = ENOMEM; 2241 if (pf_addr_setup(ruleset, &rule->dst.addr, rule->af)) 2242 error = ENOMEM; 2243 if (pf_kanchor_setup(rule, ruleset, anchor_call)) 2244 error = EINVAL; 2245 if (rule->scrub_flags & PFSTATE_SETPRIO && 2246 (rule->set_prio[0] > PF_PRIO_MAX || 2247 rule->set_prio[1] > PF_PRIO_MAX)) 2248 error = EINVAL; 2249 TAILQ_FOREACH(pa, &V_pf_pabuf, entries) 2250 if (pa->addr.type == PF_ADDR_TABLE) { 2251 pa->addr.p.tbl = pfr_attach_table(ruleset, 2252 pa->addr.v.tblname); 2253 if (pa->addr.p.tbl == NULL) 2254 error = ENOMEM; 2255 } 2256 2257 rule->overload_tbl = NULL; 2258 if (rule->overload_tblname[0]) { 2259 if ((rule->overload_tbl = pfr_attach_table(ruleset, 2260 rule->overload_tblname)) == NULL) 2261 error = EINVAL; 2262 else 2263 rule->overload_tbl->pfrkt_flags |= 2264 PFR_TFLAG_ACTIVE; 2265 } 2266 2267 pf_mv_kpool(&V_pf_pabuf, &rule->rpool.list); 2268 if (((((rule->action == PF_NAT) || (rule->action == PF_RDR) || 2269 (rule->action == PF_BINAT)) && rule->anchor == NULL) || 2270 (rule->rt > PF_NOPFROUTE)) && 2271 (TAILQ_FIRST(&rule->rpool.list) == NULL)) 2272 error = EINVAL; 2273 2274 if (error) { 2275 pf_free_rule(rule); 2276 rule = NULL; 2277 ERROUT(error); 2278 } 2279 2280 rule->rpool.cur = TAILQ_FIRST(&rule->rpool.list); 2281 TAILQ_INSERT_TAIL(ruleset->rules[rs_num].inactive.ptr, 2282 rule, entries); 2283 ruleset->rules[rs_num].inactive.rcount++; 2284 2285 PF_RULES_WUNLOCK(); 2286 pf_hash_rule(rule); 2287 if (RB_INSERT(pf_krule_global, ruleset->rules[rs_num].inactive.tree, rule) != NULL) { 2288 PF_RULES_WLOCK(); 2289 TAILQ_REMOVE(ruleset->rules[rs_num].inactive.ptr, rule, entries); 2290 ruleset->rules[rs_num].inactive.rcount--; 2291 pf_free_rule(rule); 2292 rule = NULL; 2293 ERROUT(EEXIST); 2294 } 2295 PF_CONFIG_UNLOCK(); 2296 2297 return (0); 2298 2299 #undef ERROUT 2300 errout: 2301 PF_RULES_WUNLOCK(); 2302 PF_CONFIG_UNLOCK(); 2303 errout_unlocked: 2304 pf_kkif_free(kif); 2305 pf_krule_free(rule); 2306 return (error); 2307 } 2308 2309 static bool 2310 pf_label_match(const struct pf_krule *rule, const char *label) 2311 { 2312 int i = 0; 2313 2314 while (*rule->label[i]) { 2315 if (strcmp(rule->label[i], label) == 0) 2316 return (true); 2317 i++; 2318 } 2319 2320 return (false); 2321 } 2322 2323 static unsigned int 2324 pf_kill_matching_state(struct pf_state_key_cmp *key, int dir) 2325 { 2326 struct pf_kstate *s; 2327 int more = 0; 2328 2329 s = pf_find_state_all(key, dir, &more); 2330 if (s == NULL) 2331 return (0); 2332 2333 if (more) { 2334 PF_STATE_UNLOCK(s); 2335 return (0); 2336 } 2337 2338 pf_unlink_state(s); 2339 return (1); 2340 } 2341 2342 static int 2343 pf_killstates_row(struct pf_kstate_kill *psk, struct pf_idhash *ih) 2344 { 2345 struct pf_kstate *s; 2346 struct pf_state_key *sk; 2347 struct pf_addr *srcaddr, *dstaddr; 2348 struct pf_state_key_cmp match_key; 2349 int idx, killed = 0; 2350 unsigned int dir; 2351 u_int16_t srcport, dstport; 2352 struct pfi_kkif *kif; 2353 2354 relock_DIOCKILLSTATES: 2355 PF_HASHROW_LOCK(ih); 2356 LIST_FOREACH(s, &ih->states, entry) { 2357 /* For floating states look at the original kif. */ 2358 kif = s->kif == V_pfi_all ? s->orig_kif : s->kif; 2359 2360 sk = s->key[PF_SK_WIRE]; 2361 if (s->direction == PF_OUT) { 2362 srcaddr = &sk->addr[1]; 2363 dstaddr = &sk->addr[0]; 2364 srcport = sk->port[1]; 2365 dstport = sk->port[0]; 2366 } else { 2367 srcaddr = &sk->addr[0]; 2368 dstaddr = &sk->addr[1]; 2369 srcport = sk->port[0]; 2370 dstport = sk->port[1]; 2371 } 2372 2373 if (psk->psk_af && sk->af != psk->psk_af) 2374 continue; 2375 2376 if (psk->psk_proto && psk->psk_proto != sk->proto) 2377 continue; 2378 2379 if (! PF_MATCHA(psk->psk_src.neg, &psk->psk_src.addr.v.a.addr, 2380 &psk->psk_src.addr.v.a.mask, srcaddr, sk->af)) 2381 continue; 2382 2383 if (! PF_MATCHA(psk->psk_dst.neg, &psk->psk_dst.addr.v.a.addr, 2384 &psk->psk_dst.addr.v.a.mask, dstaddr, sk->af)) 2385 continue; 2386 2387 if (! PF_MATCHA(psk->psk_rt_addr.neg, 2388 &psk->psk_rt_addr.addr.v.a.addr, 2389 &psk->psk_rt_addr.addr.v.a.mask, 2390 &s->rt_addr, sk->af)) 2391 continue; 2392 2393 if (psk->psk_src.port_op != 0 && 2394 ! pf_match_port(psk->psk_src.port_op, 2395 psk->psk_src.port[0], psk->psk_src.port[1], srcport)) 2396 continue; 2397 2398 if (psk->psk_dst.port_op != 0 && 2399 ! pf_match_port(psk->psk_dst.port_op, 2400 psk->psk_dst.port[0], psk->psk_dst.port[1], dstport)) 2401 continue; 2402 2403 if (psk->psk_label[0] && 2404 ! pf_label_match(s->rule.ptr, psk->psk_label)) 2405 continue; 2406 2407 if (psk->psk_ifname[0] && strcmp(psk->psk_ifname, 2408 kif->pfik_name)) 2409 continue; 2410 2411 if (psk->psk_kill_match) { 2412 /* Create the key to find matching states, with lock 2413 * held. */ 2414 2415 bzero(&match_key, sizeof(match_key)); 2416 2417 if (s->direction == PF_OUT) { 2418 dir = PF_IN; 2419 idx = PF_SK_STACK; 2420 } else { 2421 dir = PF_OUT; 2422 idx = PF_SK_WIRE; 2423 } 2424 2425 match_key.af = s->key[idx]->af; 2426 match_key.proto = s->key[idx]->proto; 2427 PF_ACPY(&match_key.addr[0], 2428 &s->key[idx]->addr[1], match_key.af); 2429 match_key.port[0] = s->key[idx]->port[1]; 2430 PF_ACPY(&match_key.addr[1], 2431 &s->key[idx]->addr[0], match_key.af); 2432 match_key.port[1] = s->key[idx]->port[0]; 2433 } 2434 2435 pf_unlink_state(s); 2436 killed++; 2437 2438 if (psk->psk_kill_match) 2439 killed += pf_kill_matching_state(&match_key, dir); 2440 2441 goto relock_DIOCKILLSTATES; 2442 } 2443 PF_HASHROW_UNLOCK(ih); 2444 2445 return (killed); 2446 } 2447 2448 static int 2449 pfioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td) 2450 { 2451 int error = 0; 2452 PF_RULES_RLOCK_TRACKER; 2453 2454 #define ERROUT_IOCTL(target, x) \ 2455 do { \ 2456 error = (x); \ 2457 SDT_PROBE3(pf, ioctl, ioctl, error, cmd, error, __LINE__); \ 2458 goto target; \ 2459 } while (0) 2460 2461 2462 /* XXX keep in sync with switch() below */ 2463 if (securelevel_gt(td->td_ucred, 2)) 2464 switch (cmd) { 2465 case DIOCGETRULES: 2466 case DIOCGETRULENV: 2467 case DIOCGETADDRS: 2468 case DIOCGETADDR: 2469 case DIOCGETSTATE: 2470 case DIOCGETSTATENV: 2471 case DIOCSETSTATUSIF: 2472 case DIOCGETSTATUSNV: 2473 case DIOCCLRSTATUS: 2474 case DIOCNATLOOK: 2475 case DIOCSETDEBUG: 2476 case DIOCGETSTATES: 2477 case DIOCGETSTATESV2: 2478 case DIOCGETTIMEOUT: 2479 case DIOCCLRRULECTRS: 2480 case DIOCGETLIMIT: 2481 case DIOCGETALTQSV0: 2482 case DIOCGETALTQSV1: 2483 case DIOCGETALTQV0: 2484 case DIOCGETALTQV1: 2485 case DIOCGETQSTATSV0: 2486 case DIOCGETQSTATSV1: 2487 case DIOCGETRULESETS: 2488 case DIOCGETRULESET: 2489 case DIOCRGETTABLES: 2490 case DIOCRGETTSTATS: 2491 case DIOCRCLRTSTATS: 2492 case DIOCRCLRADDRS: 2493 case DIOCRADDADDRS: 2494 case DIOCRDELADDRS: 2495 case DIOCRSETADDRS: 2496 case DIOCRGETADDRS: 2497 case DIOCRGETASTATS: 2498 case DIOCRCLRASTATS: 2499 case DIOCRTSTADDRS: 2500 case DIOCOSFPGET: 2501 case DIOCGETSRCNODES: 2502 case DIOCCLRSRCNODES: 2503 case DIOCGETSYNCOOKIES: 2504 case DIOCIGETIFACES: 2505 case DIOCGIFSPEEDV0: 2506 case DIOCGIFSPEEDV1: 2507 case DIOCSETIFFLAG: 2508 case DIOCCLRIFFLAG: 2509 case DIOCGETETHRULES: 2510 case DIOCGETETHRULE: 2511 case DIOCGETETHRULESETS: 2512 case DIOCGETETHRULESET: 2513 break; 2514 case DIOCRCLRTABLES: 2515 case DIOCRADDTABLES: 2516 case DIOCRDELTABLES: 2517 case DIOCRSETTFLAGS: 2518 if (((struct pfioc_table *)addr)->pfrio_flags & 2519 PFR_FLAG_DUMMY) 2520 break; /* dummy operation ok */ 2521 return (EPERM); 2522 default: 2523 return (EPERM); 2524 } 2525 2526 if (!(flags & FWRITE)) 2527 switch (cmd) { 2528 case DIOCGETRULES: 2529 case DIOCGETADDRS: 2530 case DIOCGETADDR: 2531 case DIOCGETSTATE: 2532 case DIOCGETSTATENV: 2533 case DIOCGETSTATUSNV: 2534 case DIOCGETSTATES: 2535 case DIOCGETSTATESV2: 2536 case DIOCGETTIMEOUT: 2537 case DIOCGETLIMIT: 2538 case DIOCGETALTQSV0: 2539 case DIOCGETALTQSV1: 2540 case DIOCGETALTQV0: 2541 case DIOCGETALTQV1: 2542 case DIOCGETQSTATSV0: 2543 case DIOCGETQSTATSV1: 2544 case DIOCGETRULESETS: 2545 case DIOCGETRULESET: 2546 case DIOCNATLOOK: 2547 case DIOCRGETTABLES: 2548 case DIOCRGETTSTATS: 2549 case DIOCRGETADDRS: 2550 case DIOCRGETASTATS: 2551 case DIOCRTSTADDRS: 2552 case DIOCOSFPGET: 2553 case DIOCGETSRCNODES: 2554 case DIOCGETSYNCOOKIES: 2555 case DIOCIGETIFACES: 2556 case DIOCGIFSPEEDV1: 2557 case DIOCGIFSPEEDV0: 2558 case DIOCGETRULENV: 2559 case DIOCGETETHRULES: 2560 case DIOCGETETHRULE: 2561 case DIOCGETETHRULESETS: 2562 case DIOCGETETHRULESET: 2563 break; 2564 case DIOCRCLRTABLES: 2565 case DIOCRADDTABLES: 2566 case DIOCRDELTABLES: 2567 case DIOCRCLRTSTATS: 2568 case DIOCRCLRADDRS: 2569 case DIOCRADDADDRS: 2570 case DIOCRDELADDRS: 2571 case DIOCRSETADDRS: 2572 case DIOCRSETTFLAGS: 2573 if (((struct pfioc_table *)addr)->pfrio_flags & 2574 PFR_FLAG_DUMMY) { 2575 flags |= FWRITE; /* need write lock for dummy */ 2576 break; /* dummy operation ok */ 2577 } 2578 return (EACCES); 2579 default: 2580 return (EACCES); 2581 } 2582 2583 CURVNET_SET(TD_TO_VNET(td)); 2584 2585 switch (cmd) { 2586 case DIOCSTART: 2587 sx_xlock(&V_pf_ioctl_lock); 2588 if (V_pf_status.running) 2589 error = EEXIST; 2590 else { 2591 hook_pf(); 2592 if (! TAILQ_EMPTY(V_pf_keth->active.rules)) 2593 hook_pf_eth(); 2594 V_pf_status.running = 1; 2595 V_pf_status.since = time_second; 2596 new_unrhdr64(&V_pf_stateid, time_second); 2597 2598 DPFPRINTF(PF_DEBUG_MISC, ("pf: started\n")); 2599 } 2600 break; 2601 2602 case DIOCSTOP: 2603 sx_xlock(&V_pf_ioctl_lock); 2604 if (!V_pf_status.running) 2605 error = ENOENT; 2606 else { 2607 V_pf_status.running = 0; 2608 dehook_pf(); 2609 dehook_pf_eth(); 2610 V_pf_status.since = time_second; 2611 DPFPRINTF(PF_DEBUG_MISC, ("pf: stopped\n")); 2612 } 2613 break; 2614 2615 case DIOCGETETHRULES: { 2616 struct pfioc_nv *nv = (struct pfioc_nv *)addr; 2617 nvlist_t *nvl; 2618 void *packed; 2619 struct pf_keth_rule *tail; 2620 struct pf_keth_ruleset *rs; 2621 u_int32_t ticket, nr; 2622 const char *anchor = ""; 2623 2624 nvl = NULL; 2625 packed = NULL; 2626 2627 #define ERROUT(x) ERROUT_IOCTL(DIOCGETETHRULES_error, x) 2628 2629 if (nv->len > pf_ioctl_maxcount) 2630 ERROUT(ENOMEM); 2631 2632 /* Copy the request in */ 2633 packed = malloc(nv->len, M_NVLIST, M_WAITOK); 2634 if (packed == NULL) 2635 ERROUT(ENOMEM); 2636 2637 error = copyin(nv->data, packed, nv->len); 2638 if (error) 2639 ERROUT(error); 2640 2641 nvl = nvlist_unpack(packed, nv->len, 0); 2642 if (nvl == NULL) 2643 ERROUT(EBADMSG); 2644 2645 if (! nvlist_exists_string(nvl, "anchor")) 2646 ERROUT(EBADMSG); 2647 2648 anchor = nvlist_get_string(nvl, "anchor"); 2649 2650 rs = pf_find_keth_ruleset(anchor); 2651 2652 nvlist_destroy(nvl); 2653 nvl = NULL; 2654 free(packed, M_NVLIST); 2655 packed = NULL; 2656 2657 if (rs == NULL) 2658 ERROUT(ENOENT); 2659 2660 /* Reply */ 2661 nvl = nvlist_create(0); 2662 if (nvl == NULL) 2663 ERROUT(ENOMEM); 2664 2665 PF_RULES_RLOCK(); 2666 2667 ticket = rs->active.ticket; 2668 tail = TAILQ_LAST(rs->active.rules, pf_keth_ruleq); 2669 if (tail) 2670 nr = tail->nr + 1; 2671 else 2672 nr = 0; 2673 2674 PF_RULES_RUNLOCK(); 2675 2676 nvlist_add_number(nvl, "ticket", ticket); 2677 nvlist_add_number(nvl, "nr", nr); 2678 2679 packed = nvlist_pack(nvl, &nv->len); 2680 if (packed == NULL) 2681 ERROUT(ENOMEM); 2682 2683 if (nv->size == 0) 2684 ERROUT(0); 2685 else if (nv->size < nv->len) 2686 ERROUT(ENOSPC); 2687 2688 error = copyout(packed, nv->data, nv->len); 2689 2690 #undef ERROUT 2691 DIOCGETETHRULES_error: 2692 free(packed, M_NVLIST); 2693 nvlist_destroy(nvl); 2694 break; 2695 } 2696 2697 case DIOCGETETHRULE: { 2698 struct epoch_tracker et; 2699 struct pfioc_nv *nv = (struct pfioc_nv *)addr; 2700 nvlist_t *nvl = NULL; 2701 void *nvlpacked = NULL; 2702 struct pf_keth_rule *rule = NULL; 2703 struct pf_keth_ruleset *rs; 2704 u_int32_t ticket, nr; 2705 bool clear = false; 2706 const char *anchor; 2707 2708 #define ERROUT(x) ERROUT_IOCTL(DIOCGETETHRULE_error, x) 2709 2710 if (nv->len > pf_ioctl_maxcount) 2711 ERROUT(ENOMEM); 2712 2713 nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK); 2714 if (nvlpacked == NULL) 2715 ERROUT(ENOMEM); 2716 2717 error = copyin(nv->data, nvlpacked, nv->len); 2718 if (error) 2719 ERROUT(error); 2720 2721 nvl = nvlist_unpack(nvlpacked, nv->len, 0); 2722 if (nvl == NULL) 2723 ERROUT(EBADMSG); 2724 if (! nvlist_exists_number(nvl, "ticket")) 2725 ERROUT(EBADMSG); 2726 ticket = nvlist_get_number(nvl, "ticket"); 2727 if (! nvlist_exists_string(nvl, "anchor")) 2728 ERROUT(EBADMSG); 2729 anchor = nvlist_get_string(nvl, "anchor"); 2730 2731 if (nvlist_exists_bool(nvl, "clear")) 2732 clear = nvlist_get_bool(nvl, "clear"); 2733 2734 if (clear && !(flags & FWRITE)) 2735 ERROUT(EACCES); 2736 2737 if (! nvlist_exists_number(nvl, "nr")) 2738 ERROUT(EBADMSG); 2739 nr = nvlist_get_number(nvl, "nr"); 2740 2741 PF_RULES_RLOCK(); 2742 rs = pf_find_keth_ruleset(anchor); 2743 if (rs == NULL) { 2744 PF_RULES_RUNLOCK(); 2745 ERROUT(ENOENT); 2746 } 2747 if (ticket != rs->active.ticket) { 2748 PF_RULES_RUNLOCK(); 2749 ERROUT(EBUSY); 2750 } 2751 2752 nvlist_destroy(nvl); 2753 nvl = NULL; 2754 free(nvlpacked, M_NVLIST); 2755 nvlpacked = NULL; 2756 2757 rule = TAILQ_FIRST(rs->active.rules); 2758 while ((rule != NULL) && (rule->nr != nr)) 2759 rule = TAILQ_NEXT(rule, entries); 2760 if (rule == NULL) { 2761 PF_RULES_RUNLOCK(); 2762 ERROUT(ENOENT); 2763 } 2764 /* Make sure rule can't go away. */ 2765 NET_EPOCH_ENTER(et); 2766 PF_RULES_RUNLOCK(); 2767 nvl = pf_keth_rule_to_nveth_rule(rule); 2768 if (pf_keth_anchor_nvcopyout(rs, rule, nvl)) 2769 ERROUT(EBUSY); 2770 NET_EPOCH_EXIT(et); 2771 if (nvl == NULL) 2772 ERROUT(ENOMEM); 2773 2774 nvlpacked = nvlist_pack(nvl, &nv->len); 2775 if (nvlpacked == NULL) 2776 ERROUT(ENOMEM); 2777 2778 if (nv->size == 0) 2779 ERROUT(0); 2780 else if (nv->size < nv->len) 2781 ERROUT(ENOSPC); 2782 2783 error = copyout(nvlpacked, nv->data, nv->len); 2784 if (error == 0 && clear) { 2785 counter_u64_zero(rule->evaluations); 2786 for (int i = 0; i < 2; i++) { 2787 counter_u64_zero(rule->packets[i]); 2788 counter_u64_zero(rule->bytes[i]); 2789 } 2790 } 2791 2792 #undef ERROUT 2793 DIOCGETETHRULE_error: 2794 free(nvlpacked, M_NVLIST); 2795 nvlist_destroy(nvl); 2796 break; 2797 } 2798 2799 case DIOCADDETHRULE: { 2800 struct pfioc_nv *nv = (struct pfioc_nv *)addr; 2801 nvlist_t *nvl = NULL; 2802 void *nvlpacked = NULL; 2803 struct pf_keth_rule *rule = NULL, *tail = NULL; 2804 struct pf_keth_ruleset *ruleset = NULL; 2805 struct pfi_kkif *kif = NULL, *bridge_to_kif = NULL; 2806 const char *anchor = "", *anchor_call = ""; 2807 2808 #define ERROUT(x) ERROUT_IOCTL(DIOCADDETHRULE_error, x) 2809 2810 if (nv->len > pf_ioctl_maxcount) 2811 ERROUT(ENOMEM); 2812 2813 nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK); 2814 if (nvlpacked == NULL) 2815 ERROUT(ENOMEM); 2816 2817 error = copyin(nv->data, nvlpacked, nv->len); 2818 if (error) 2819 ERROUT(error); 2820 2821 nvl = nvlist_unpack(nvlpacked, nv->len, 0); 2822 if (nvl == NULL) 2823 ERROUT(EBADMSG); 2824 2825 if (! nvlist_exists_number(nvl, "ticket")) 2826 ERROUT(EBADMSG); 2827 2828 if (nvlist_exists_string(nvl, "anchor")) 2829 anchor = nvlist_get_string(nvl, "anchor"); 2830 if (nvlist_exists_string(nvl, "anchor_call")) 2831 anchor_call = nvlist_get_string(nvl, "anchor_call"); 2832 2833 ruleset = pf_find_keth_ruleset(anchor); 2834 if (ruleset == NULL) 2835 ERROUT(EINVAL); 2836 2837 if (nvlist_get_number(nvl, "ticket") != 2838 ruleset->inactive.ticket) { 2839 DPFPRINTF(PF_DEBUG_MISC, 2840 ("ticket: %d != %d\n", 2841 (u_int32_t)nvlist_get_number(nvl, "ticket"), 2842 ruleset->inactive.ticket)); 2843 ERROUT(EBUSY); 2844 } 2845 2846 rule = malloc(sizeof(*rule), M_PFRULE, M_WAITOK); 2847 if (rule == NULL) 2848 ERROUT(ENOMEM); 2849 rule->timestamp = NULL; 2850 2851 error = pf_nveth_rule_to_keth_rule(nvl, rule); 2852 if (error != 0) 2853 ERROUT(error); 2854 2855 if (rule->ifname[0]) 2856 kif = pf_kkif_create(M_WAITOK); 2857 if (rule->bridge_to_name[0]) 2858 bridge_to_kif = pf_kkif_create(M_WAITOK); 2859 rule->evaluations = counter_u64_alloc(M_WAITOK); 2860 for (int i = 0; i < 2; i++) { 2861 rule->packets[i] = counter_u64_alloc(M_WAITOK); 2862 rule->bytes[i] = counter_u64_alloc(M_WAITOK); 2863 } 2864 rule->timestamp = uma_zalloc_pcpu(pf_timestamp_pcpu_zone, 2865 M_WAITOK | M_ZERO); 2866 2867 PF_RULES_WLOCK(); 2868 2869 if (rule->ifname[0]) { 2870 rule->kif = pfi_kkif_attach(kif, rule->ifname); 2871 pfi_kkif_ref(rule->kif); 2872 } else 2873 rule->kif = NULL; 2874 if (rule->bridge_to_name[0]) { 2875 rule->bridge_to = pfi_kkif_attach(bridge_to_kif, 2876 rule->bridge_to_name); 2877 pfi_kkif_ref(rule->bridge_to); 2878 } else 2879 rule->bridge_to = NULL; 2880 2881 #ifdef ALTQ 2882 /* set queue IDs */ 2883 if (rule->qname[0] != 0) { 2884 if ((rule->qid = pf_qname2qid(rule->qname)) == 0) 2885 error = EBUSY; 2886 else 2887 rule->qid = rule->qid; 2888 } 2889 #endif 2890 if (rule->tagname[0]) 2891 if ((rule->tag = pf_tagname2tag(rule->tagname)) == 0) 2892 error = EBUSY; 2893 if (rule->match_tagname[0]) 2894 if ((rule->match_tag = pf_tagname2tag( 2895 rule->match_tagname)) == 0) 2896 error = EBUSY; 2897 2898 if (error == 0 && rule->ipdst.addr.type == PF_ADDR_TABLE) 2899 error = pf_eth_addr_setup(ruleset, &rule->ipdst.addr); 2900 if (error == 0 && rule->ipsrc.addr.type == PF_ADDR_TABLE) 2901 error = pf_eth_addr_setup(ruleset, &rule->ipsrc.addr); 2902 2903 if (error) { 2904 pf_free_eth_rule(rule); 2905 PF_RULES_WUNLOCK(); 2906 ERROUT(error); 2907 } 2908 2909 if (pf_keth_anchor_setup(rule, ruleset, anchor_call)) { 2910 pf_free_eth_rule(rule); 2911 PF_RULES_WUNLOCK(); 2912 ERROUT(EINVAL); 2913 } 2914 2915 tail = TAILQ_LAST(ruleset->inactive.rules, pf_keth_ruleq); 2916 if (tail) 2917 rule->nr = tail->nr + 1; 2918 else 2919 rule->nr = 0; 2920 2921 TAILQ_INSERT_TAIL(ruleset->inactive.rules, rule, entries); 2922 2923 PF_RULES_WUNLOCK(); 2924 2925 #undef ERROUT 2926 DIOCADDETHRULE_error: 2927 nvlist_destroy(nvl); 2928 free(nvlpacked, M_NVLIST); 2929 break; 2930 } 2931 2932 case DIOCGETETHRULESETS: { 2933 struct epoch_tracker et; 2934 struct pfioc_nv *nv = (struct pfioc_nv *)addr; 2935 nvlist_t *nvl = NULL; 2936 void *nvlpacked = NULL; 2937 struct pf_keth_ruleset *ruleset; 2938 struct pf_keth_anchor *anchor; 2939 int nr = 0; 2940 2941 #define ERROUT(x) ERROUT_IOCTL(DIOCGETETHRULESETS_error, x) 2942 2943 if (nv->len > pf_ioctl_maxcount) 2944 ERROUT(ENOMEM); 2945 2946 nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK); 2947 if (nvlpacked == NULL) 2948 ERROUT(ENOMEM); 2949 2950 error = copyin(nv->data, nvlpacked, nv->len); 2951 if (error) 2952 ERROUT(error); 2953 2954 nvl = nvlist_unpack(nvlpacked, nv->len, 0); 2955 if (nvl == NULL) 2956 ERROUT(EBADMSG); 2957 if (! nvlist_exists_string(nvl, "path")) 2958 ERROUT(EBADMSG); 2959 2960 NET_EPOCH_ENTER(et); 2961 2962 if ((ruleset = pf_find_keth_ruleset( 2963 nvlist_get_string(nvl, "path"))) == NULL) { 2964 NET_EPOCH_EXIT(et); 2965 ERROUT(ENOENT); 2966 } 2967 2968 if (ruleset->anchor == NULL) { 2969 RB_FOREACH(anchor, pf_keth_anchor_global, &V_pf_keth_anchors) 2970 if (anchor->parent == NULL) 2971 nr++; 2972 } else { 2973 RB_FOREACH(anchor, pf_keth_anchor_node, 2974 &ruleset->anchor->children) 2975 nr++; 2976 } 2977 2978 NET_EPOCH_EXIT(et); 2979 2980 nvlist_destroy(nvl); 2981 nvl = NULL; 2982 free(nvlpacked, M_NVLIST); 2983 nvlpacked = NULL; 2984 2985 nvl = nvlist_create(0); 2986 if (nvl == NULL) 2987 ERROUT(ENOMEM); 2988 2989 nvlist_add_number(nvl, "nr", nr); 2990 2991 nvlpacked = nvlist_pack(nvl, &nv->len); 2992 if (nvlpacked == NULL) 2993 ERROUT(ENOMEM); 2994 2995 if (nv->size == 0) 2996 ERROUT(0); 2997 else if (nv->size < nv->len) 2998 ERROUT(ENOSPC); 2999 3000 error = copyout(nvlpacked, nv->data, nv->len); 3001 3002 #undef ERROUT 3003 DIOCGETETHRULESETS_error: 3004 free(nvlpacked, M_NVLIST); 3005 nvlist_destroy(nvl); 3006 break; 3007 } 3008 3009 case DIOCGETETHRULESET: { 3010 struct epoch_tracker et; 3011 struct pfioc_nv *nv = (struct pfioc_nv *)addr; 3012 nvlist_t *nvl = NULL; 3013 void *nvlpacked = NULL; 3014 struct pf_keth_ruleset *ruleset; 3015 struct pf_keth_anchor *anchor; 3016 int nr = 0, req_nr = 0; 3017 bool found = false; 3018 3019 #define ERROUT(x) ERROUT_IOCTL(DIOCGETETHRULESET_error, x) 3020 3021 if (nv->len > pf_ioctl_maxcount) 3022 ERROUT(ENOMEM); 3023 3024 nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK); 3025 if (nvlpacked == NULL) 3026 ERROUT(ENOMEM); 3027 3028 error = copyin(nv->data, nvlpacked, nv->len); 3029 if (error) 3030 ERROUT(error); 3031 3032 nvl = nvlist_unpack(nvlpacked, nv->len, 0); 3033 if (nvl == NULL) 3034 ERROUT(EBADMSG); 3035 if (! nvlist_exists_string(nvl, "path")) 3036 ERROUT(EBADMSG); 3037 if (! nvlist_exists_number(nvl, "nr")) 3038 ERROUT(EBADMSG); 3039 3040 req_nr = nvlist_get_number(nvl, "nr"); 3041 3042 NET_EPOCH_ENTER(et); 3043 3044 if ((ruleset = pf_find_keth_ruleset( 3045 nvlist_get_string(nvl, "path"))) == NULL) { 3046 NET_EPOCH_EXIT(et); 3047 ERROUT(ENOENT); 3048 } 3049 3050 nvlist_destroy(nvl); 3051 nvl = NULL; 3052 free(nvlpacked, M_NVLIST); 3053 nvlpacked = NULL; 3054 3055 nvl = nvlist_create(0); 3056 if (nvl == NULL) { 3057 NET_EPOCH_EXIT(et); 3058 ERROUT(ENOMEM); 3059 } 3060 3061 if (ruleset->anchor == NULL) { 3062 RB_FOREACH(anchor, pf_keth_anchor_global, 3063 &V_pf_keth_anchors) { 3064 if (anchor->parent == NULL && nr++ == req_nr) { 3065 found = true; 3066 break; 3067 } 3068 } 3069 } else { 3070 RB_FOREACH(anchor, pf_keth_anchor_node, 3071 &ruleset->anchor->children) { 3072 if (nr++ == req_nr) { 3073 found = true; 3074 break; 3075 } 3076 } 3077 } 3078 3079 NET_EPOCH_EXIT(et); 3080 if (found) { 3081 nvlist_add_number(nvl, "nr", nr); 3082 nvlist_add_string(nvl, "name", anchor->name); 3083 if (ruleset->anchor) 3084 nvlist_add_string(nvl, "path", 3085 ruleset->anchor->path); 3086 else 3087 nvlist_add_string(nvl, "path", ""); 3088 } else { 3089 ERROUT(EBUSY); 3090 } 3091 3092 nvlpacked = nvlist_pack(nvl, &nv->len); 3093 if (nvlpacked == NULL) 3094 ERROUT(ENOMEM); 3095 3096 if (nv->size == 0) 3097 ERROUT(0); 3098 else if (nv->size < nv->len) 3099 ERROUT(ENOSPC); 3100 3101 error = copyout(nvlpacked, nv->data, nv->len); 3102 3103 #undef ERROUT 3104 DIOCGETETHRULESET_error: 3105 free(nvlpacked, M_NVLIST); 3106 nvlist_destroy(nvl); 3107 break; 3108 } 3109 3110 case DIOCADDRULENV: { 3111 struct pfioc_nv *nv = (struct pfioc_nv *)addr; 3112 nvlist_t *nvl = NULL; 3113 void *nvlpacked = NULL; 3114 struct pf_krule *rule = NULL; 3115 const char *anchor = "", *anchor_call = ""; 3116 uint32_t ticket = 0, pool_ticket = 0; 3117 3118 #define ERROUT(x) ERROUT_IOCTL(DIOCADDRULENV_error, x) 3119 3120 if (nv->len > pf_ioctl_maxcount) 3121 ERROUT(ENOMEM); 3122 3123 nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK); 3124 error = copyin(nv->data, nvlpacked, nv->len); 3125 if (error) 3126 ERROUT(error); 3127 3128 nvl = nvlist_unpack(nvlpacked, nv->len, 0); 3129 if (nvl == NULL) 3130 ERROUT(EBADMSG); 3131 3132 if (! nvlist_exists_number(nvl, "ticket")) 3133 ERROUT(EINVAL); 3134 ticket = nvlist_get_number(nvl, "ticket"); 3135 3136 if (! nvlist_exists_number(nvl, "pool_ticket")) 3137 ERROUT(EINVAL); 3138 pool_ticket = nvlist_get_number(nvl, "pool_ticket"); 3139 3140 if (! nvlist_exists_nvlist(nvl, "rule")) 3141 ERROUT(EINVAL); 3142 3143 rule = pf_krule_alloc(); 3144 error = pf_nvrule_to_krule(nvlist_get_nvlist(nvl, "rule"), 3145 rule); 3146 if (error) 3147 ERROUT(error); 3148 3149 if (nvlist_exists_string(nvl, "anchor")) 3150 anchor = nvlist_get_string(nvl, "anchor"); 3151 if (nvlist_exists_string(nvl, "anchor_call")) 3152 anchor_call = nvlist_get_string(nvl, "anchor_call"); 3153 3154 if ((error = nvlist_error(nvl))) 3155 ERROUT(error); 3156 3157 /* Frees rule on error */ 3158 error = pf_ioctl_addrule(rule, ticket, pool_ticket, anchor, 3159 anchor_call, td); 3160 3161 nvlist_destroy(nvl); 3162 free(nvlpacked, M_NVLIST); 3163 break; 3164 #undef ERROUT 3165 DIOCADDRULENV_error: 3166 pf_krule_free(rule); 3167 nvlist_destroy(nvl); 3168 free(nvlpacked, M_NVLIST); 3169 3170 break; 3171 } 3172 case DIOCADDRULE: { 3173 struct pfioc_rule *pr = (struct pfioc_rule *)addr; 3174 struct pf_krule *rule; 3175 3176 rule = pf_krule_alloc(); 3177 error = pf_rule_to_krule(&pr->rule, rule); 3178 if (error != 0) { 3179 pf_krule_free(rule); 3180 break; 3181 } 3182 3183 pr->anchor[sizeof(pr->anchor) - 1] = 0; 3184 3185 /* Frees rule on error */ 3186 error = pf_ioctl_addrule(rule, pr->ticket, pr->pool_ticket, 3187 pr->anchor, pr->anchor_call, td); 3188 break; 3189 } 3190 3191 case DIOCGETRULES: { 3192 struct pfioc_rule *pr = (struct pfioc_rule *)addr; 3193 struct pf_kruleset *ruleset; 3194 struct pf_krule *tail; 3195 int rs_num; 3196 3197 pr->anchor[sizeof(pr->anchor) - 1] = 0; 3198 3199 PF_RULES_WLOCK(); 3200 ruleset = pf_find_kruleset(pr->anchor); 3201 if (ruleset == NULL) { 3202 PF_RULES_WUNLOCK(); 3203 error = EINVAL; 3204 break; 3205 } 3206 rs_num = pf_get_ruleset_number(pr->rule.action); 3207 if (rs_num >= PF_RULESET_MAX) { 3208 PF_RULES_WUNLOCK(); 3209 error = EINVAL; 3210 break; 3211 } 3212 tail = TAILQ_LAST(ruleset->rules[rs_num].active.ptr, 3213 pf_krulequeue); 3214 if (tail) 3215 pr->nr = tail->nr + 1; 3216 else 3217 pr->nr = 0; 3218 pr->ticket = ruleset->rules[rs_num].active.ticket; 3219 PF_RULES_WUNLOCK(); 3220 break; 3221 } 3222 3223 case DIOCGETRULENV: { 3224 struct pfioc_nv *nv = (struct pfioc_nv *)addr; 3225 nvlist_t *nvrule = NULL; 3226 nvlist_t *nvl = NULL; 3227 struct pf_kruleset *ruleset; 3228 struct pf_krule *rule; 3229 void *nvlpacked = NULL; 3230 int rs_num, nr; 3231 bool clear_counter = false; 3232 3233 #define ERROUT(x) ERROUT_IOCTL(DIOCGETRULENV_error, x) 3234 3235 if (nv->len > pf_ioctl_maxcount) 3236 ERROUT(ENOMEM); 3237 3238 /* Copy the request in */ 3239 nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK); 3240 if (nvlpacked == NULL) 3241 ERROUT(ENOMEM); 3242 3243 error = copyin(nv->data, nvlpacked, nv->len); 3244 if (error) 3245 ERROUT(error); 3246 3247 nvl = nvlist_unpack(nvlpacked, nv->len, 0); 3248 if (nvl == NULL) 3249 ERROUT(EBADMSG); 3250 3251 if (! nvlist_exists_string(nvl, "anchor")) 3252 ERROUT(EBADMSG); 3253 if (! nvlist_exists_number(nvl, "ruleset")) 3254 ERROUT(EBADMSG); 3255 if (! nvlist_exists_number(nvl, "ticket")) 3256 ERROUT(EBADMSG); 3257 if (! nvlist_exists_number(nvl, "nr")) 3258 ERROUT(EBADMSG); 3259 3260 if (nvlist_exists_bool(nvl, "clear_counter")) 3261 clear_counter = nvlist_get_bool(nvl, "clear_counter"); 3262 3263 if (clear_counter && !(flags & FWRITE)) 3264 ERROUT(EACCES); 3265 3266 nr = nvlist_get_number(nvl, "nr"); 3267 3268 PF_RULES_WLOCK(); 3269 ruleset = pf_find_kruleset(nvlist_get_string(nvl, "anchor")); 3270 if (ruleset == NULL) { 3271 PF_RULES_WUNLOCK(); 3272 ERROUT(ENOENT); 3273 } 3274 3275 rs_num = pf_get_ruleset_number(nvlist_get_number(nvl, "ruleset")); 3276 if (rs_num >= PF_RULESET_MAX) { 3277 PF_RULES_WUNLOCK(); 3278 ERROUT(EINVAL); 3279 } 3280 3281 if (nvlist_get_number(nvl, "ticket") != 3282 ruleset->rules[rs_num].active.ticket) { 3283 PF_RULES_WUNLOCK(); 3284 ERROUT(EBUSY); 3285 } 3286 3287 if ((error = nvlist_error(nvl))) { 3288 PF_RULES_WUNLOCK(); 3289 ERROUT(error); 3290 } 3291 3292 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr); 3293 while ((rule != NULL) && (rule->nr != nr)) 3294 rule = TAILQ_NEXT(rule, entries); 3295 if (rule == NULL) { 3296 PF_RULES_WUNLOCK(); 3297 ERROUT(EBUSY); 3298 } 3299 3300 nvrule = pf_krule_to_nvrule(rule); 3301 3302 nvlist_destroy(nvl); 3303 nvl = nvlist_create(0); 3304 if (nvl == NULL) { 3305 PF_RULES_WUNLOCK(); 3306 ERROUT(ENOMEM); 3307 } 3308 nvlist_add_number(nvl, "nr", nr); 3309 nvlist_add_nvlist(nvl, "rule", nvrule); 3310 nvlist_destroy(nvrule); 3311 nvrule = NULL; 3312 if (pf_kanchor_nvcopyout(ruleset, rule, nvl)) { 3313 PF_RULES_WUNLOCK(); 3314 ERROUT(EBUSY); 3315 } 3316 3317 free(nvlpacked, M_NVLIST); 3318 nvlpacked = nvlist_pack(nvl, &nv->len); 3319 if (nvlpacked == NULL) { 3320 PF_RULES_WUNLOCK(); 3321 ERROUT(ENOMEM); 3322 } 3323 3324 if (nv->size == 0) { 3325 PF_RULES_WUNLOCK(); 3326 ERROUT(0); 3327 } 3328 else if (nv->size < nv->len) { 3329 PF_RULES_WUNLOCK(); 3330 ERROUT(ENOSPC); 3331 } 3332 3333 if (clear_counter) { 3334 pf_counter_u64_zero(&rule->evaluations); 3335 for (int i = 0; i < 2; i++) { 3336 pf_counter_u64_zero(&rule->packets[i]); 3337 pf_counter_u64_zero(&rule->bytes[i]); 3338 } 3339 counter_u64_zero(rule->states_tot); 3340 } 3341 PF_RULES_WUNLOCK(); 3342 3343 error = copyout(nvlpacked, nv->data, nv->len); 3344 3345 #undef ERROUT 3346 DIOCGETRULENV_error: 3347 free(nvlpacked, M_NVLIST); 3348 nvlist_destroy(nvrule); 3349 nvlist_destroy(nvl); 3350 3351 break; 3352 } 3353 3354 case DIOCCHANGERULE: { 3355 struct pfioc_rule *pcr = (struct pfioc_rule *)addr; 3356 struct pf_kruleset *ruleset; 3357 struct pf_krule *oldrule = NULL, *newrule = NULL; 3358 struct pfi_kkif *kif = NULL; 3359 struct pf_kpooladdr *pa; 3360 u_int32_t nr = 0; 3361 int rs_num; 3362 3363 pcr->anchor[sizeof(pcr->anchor) - 1] = 0; 3364 3365 if (pcr->action < PF_CHANGE_ADD_HEAD || 3366 pcr->action > PF_CHANGE_GET_TICKET) { 3367 error = EINVAL; 3368 break; 3369 } 3370 if (pcr->rule.return_icmp >> 8 > ICMP_MAXTYPE) { 3371 error = EINVAL; 3372 break; 3373 } 3374 3375 if (pcr->action != PF_CHANGE_REMOVE) { 3376 newrule = pf_krule_alloc(); 3377 error = pf_rule_to_krule(&pcr->rule, newrule); 3378 if (error != 0) { 3379 pf_krule_free(newrule); 3380 break; 3381 } 3382 3383 if (newrule->ifname[0]) 3384 kif = pf_kkif_create(M_WAITOK); 3385 pf_counter_u64_init(&newrule->evaluations, M_WAITOK); 3386 for (int i = 0; i < 2; i++) { 3387 pf_counter_u64_init(&newrule->packets[i], M_WAITOK); 3388 pf_counter_u64_init(&newrule->bytes[i], M_WAITOK); 3389 } 3390 newrule->states_cur = counter_u64_alloc(M_WAITOK); 3391 newrule->states_tot = counter_u64_alloc(M_WAITOK); 3392 newrule->src_nodes = counter_u64_alloc(M_WAITOK); 3393 newrule->cuid = td->td_ucred->cr_ruid; 3394 newrule->cpid = td->td_proc ? td->td_proc->p_pid : 0; 3395 TAILQ_INIT(&newrule->rpool.list); 3396 } 3397 #define ERROUT(x) ERROUT_IOCTL(DIOCCHANGERULE_error, x) 3398 3399 PF_CONFIG_LOCK(); 3400 PF_RULES_WLOCK(); 3401 #ifdef PF_WANT_32_TO_64_COUNTER 3402 if (newrule != NULL) { 3403 LIST_INSERT_HEAD(&V_pf_allrulelist, newrule, allrulelist); 3404 newrule->allrulelinked = true; 3405 V_pf_allrulecount++; 3406 } 3407 #endif 3408 3409 if (!(pcr->action == PF_CHANGE_REMOVE || 3410 pcr->action == PF_CHANGE_GET_TICKET) && 3411 pcr->pool_ticket != V_ticket_pabuf) 3412 ERROUT(EBUSY); 3413 3414 ruleset = pf_find_kruleset(pcr->anchor); 3415 if (ruleset == NULL) 3416 ERROUT(EINVAL); 3417 3418 rs_num = pf_get_ruleset_number(pcr->rule.action); 3419 if (rs_num >= PF_RULESET_MAX) 3420 ERROUT(EINVAL); 3421 3422 /* 3423 * XXXMJG: there is no guarantee that the ruleset was 3424 * created by the usual route of calling DIOCXBEGIN. 3425 * As a result it is possible the rule tree will not 3426 * be allocated yet. Hack around it by doing it here. 3427 * Note it is fine to let the tree persist in case of 3428 * error as it will be freed down the road on future 3429 * updates (if need be). 3430 */ 3431 if (ruleset->rules[rs_num].active.tree == NULL) { 3432 ruleset->rules[rs_num].active.tree = pf_rule_tree_alloc(M_NOWAIT); 3433 if (ruleset->rules[rs_num].active.tree == NULL) { 3434 ERROUT(ENOMEM); 3435 } 3436 } 3437 3438 if (pcr->action == PF_CHANGE_GET_TICKET) { 3439 pcr->ticket = ++ruleset->rules[rs_num].active.ticket; 3440 ERROUT(0); 3441 } else if (pcr->ticket != 3442 ruleset->rules[rs_num].active.ticket) 3443 ERROUT(EINVAL); 3444 3445 if (pcr->action != PF_CHANGE_REMOVE) { 3446 if (newrule->ifname[0]) { 3447 newrule->kif = pfi_kkif_attach(kif, 3448 newrule->ifname); 3449 kif = NULL; 3450 pfi_kkif_ref(newrule->kif); 3451 } else 3452 newrule->kif = NULL; 3453 3454 if (newrule->rtableid > 0 && 3455 newrule->rtableid >= rt_numfibs) 3456 error = EBUSY; 3457 3458 #ifdef ALTQ 3459 /* set queue IDs */ 3460 if (newrule->qname[0] != 0) { 3461 if ((newrule->qid = 3462 pf_qname2qid(newrule->qname)) == 0) 3463 error = EBUSY; 3464 else if (newrule->pqname[0] != 0) { 3465 if ((newrule->pqid = 3466 pf_qname2qid(newrule->pqname)) == 0) 3467 error = EBUSY; 3468 } else 3469 newrule->pqid = newrule->qid; 3470 } 3471 #endif /* ALTQ */ 3472 if (newrule->tagname[0]) 3473 if ((newrule->tag = 3474 pf_tagname2tag(newrule->tagname)) == 0) 3475 error = EBUSY; 3476 if (newrule->match_tagname[0]) 3477 if ((newrule->match_tag = pf_tagname2tag( 3478 newrule->match_tagname)) == 0) 3479 error = EBUSY; 3480 if (newrule->rt && !newrule->direction) 3481 error = EINVAL; 3482 if (!newrule->log) 3483 newrule->logif = 0; 3484 if (newrule->logif >= PFLOGIFS_MAX) 3485 error = EINVAL; 3486 if (pf_addr_setup(ruleset, &newrule->src.addr, newrule->af)) 3487 error = ENOMEM; 3488 if (pf_addr_setup(ruleset, &newrule->dst.addr, newrule->af)) 3489 error = ENOMEM; 3490 if (pf_kanchor_setup(newrule, ruleset, pcr->anchor_call)) 3491 error = EINVAL; 3492 TAILQ_FOREACH(pa, &V_pf_pabuf, entries) 3493 if (pa->addr.type == PF_ADDR_TABLE) { 3494 pa->addr.p.tbl = 3495 pfr_attach_table(ruleset, 3496 pa->addr.v.tblname); 3497 if (pa->addr.p.tbl == NULL) 3498 error = ENOMEM; 3499 } 3500 3501 newrule->overload_tbl = NULL; 3502 if (newrule->overload_tblname[0]) { 3503 if ((newrule->overload_tbl = pfr_attach_table( 3504 ruleset, newrule->overload_tblname)) == 3505 NULL) 3506 error = EINVAL; 3507 else 3508 newrule->overload_tbl->pfrkt_flags |= 3509 PFR_TFLAG_ACTIVE; 3510 } 3511 3512 pf_mv_kpool(&V_pf_pabuf, &newrule->rpool.list); 3513 if (((((newrule->action == PF_NAT) || 3514 (newrule->action == PF_RDR) || 3515 (newrule->action == PF_BINAT) || 3516 (newrule->rt > PF_NOPFROUTE)) && 3517 !newrule->anchor)) && 3518 (TAILQ_FIRST(&newrule->rpool.list) == NULL)) 3519 error = EINVAL; 3520 3521 if (error) { 3522 pf_free_rule(newrule); 3523 PF_RULES_WUNLOCK(); 3524 PF_CONFIG_UNLOCK(); 3525 break; 3526 } 3527 3528 newrule->rpool.cur = TAILQ_FIRST(&newrule->rpool.list); 3529 } 3530 pf_empty_kpool(&V_pf_pabuf); 3531 3532 if (pcr->action == PF_CHANGE_ADD_HEAD) 3533 oldrule = TAILQ_FIRST( 3534 ruleset->rules[rs_num].active.ptr); 3535 else if (pcr->action == PF_CHANGE_ADD_TAIL) 3536 oldrule = TAILQ_LAST( 3537 ruleset->rules[rs_num].active.ptr, pf_krulequeue); 3538 else { 3539 oldrule = TAILQ_FIRST( 3540 ruleset->rules[rs_num].active.ptr); 3541 while ((oldrule != NULL) && (oldrule->nr != pcr->nr)) 3542 oldrule = TAILQ_NEXT(oldrule, entries); 3543 if (oldrule == NULL) { 3544 if (newrule != NULL) 3545 pf_free_rule(newrule); 3546 PF_RULES_WUNLOCK(); 3547 PF_CONFIG_UNLOCK(); 3548 error = EINVAL; 3549 break; 3550 } 3551 } 3552 3553 if (pcr->action == PF_CHANGE_REMOVE) { 3554 pf_unlink_rule(ruleset->rules[rs_num].active.ptr, 3555 oldrule); 3556 RB_REMOVE(pf_krule_global, 3557 ruleset->rules[rs_num].active.tree, oldrule); 3558 ruleset->rules[rs_num].active.rcount--; 3559 } else { 3560 pf_hash_rule(newrule); 3561 if (RB_INSERT(pf_krule_global, 3562 ruleset->rules[rs_num].active.tree, newrule) != NULL) { 3563 pf_free_rule(newrule); 3564 PF_RULES_WUNLOCK(); 3565 PF_CONFIG_UNLOCK(); 3566 error = EEXIST; 3567 break; 3568 } 3569 3570 if (oldrule == NULL) 3571 TAILQ_INSERT_TAIL( 3572 ruleset->rules[rs_num].active.ptr, 3573 newrule, entries); 3574 else if (pcr->action == PF_CHANGE_ADD_HEAD || 3575 pcr->action == PF_CHANGE_ADD_BEFORE) 3576 TAILQ_INSERT_BEFORE(oldrule, newrule, entries); 3577 else 3578 TAILQ_INSERT_AFTER( 3579 ruleset->rules[rs_num].active.ptr, 3580 oldrule, newrule, entries); 3581 ruleset->rules[rs_num].active.rcount++; 3582 } 3583 3584 nr = 0; 3585 TAILQ_FOREACH(oldrule, 3586 ruleset->rules[rs_num].active.ptr, entries) 3587 oldrule->nr = nr++; 3588 3589 ruleset->rules[rs_num].active.ticket++; 3590 3591 pf_calc_skip_steps(ruleset->rules[rs_num].active.ptr); 3592 pf_remove_if_empty_kruleset(ruleset); 3593 3594 PF_RULES_WUNLOCK(); 3595 PF_CONFIG_UNLOCK(); 3596 break; 3597 3598 #undef ERROUT 3599 DIOCCHANGERULE_error: 3600 PF_RULES_WUNLOCK(); 3601 PF_CONFIG_UNLOCK(); 3602 pf_krule_free(newrule); 3603 pf_kkif_free(kif); 3604 break; 3605 } 3606 3607 case DIOCCLRSTATESNV: { 3608 error = pf_clearstates_nv((struct pfioc_nv *)addr); 3609 break; 3610 } 3611 3612 case DIOCKILLSTATESNV: { 3613 error = pf_killstates_nv((struct pfioc_nv *)addr); 3614 break; 3615 } 3616 3617 case DIOCADDSTATE: { 3618 struct pfioc_state *ps = (struct pfioc_state *)addr; 3619 struct pfsync_state_1301 *sp = &ps->state; 3620 3621 if (sp->timeout >= PFTM_MAX) { 3622 error = EINVAL; 3623 break; 3624 } 3625 if (V_pfsync_state_import_ptr != NULL) { 3626 PF_RULES_RLOCK(); 3627 error = V_pfsync_state_import_ptr( 3628 (union pfsync_state_union *)sp, PFSYNC_SI_IOCTL, 3629 PFSYNC_MSG_VERSION_1301); 3630 PF_RULES_RUNLOCK(); 3631 } else 3632 error = EOPNOTSUPP; 3633 break; 3634 } 3635 3636 case DIOCGETSTATE: { 3637 struct pfioc_state *ps = (struct pfioc_state *)addr; 3638 struct pf_kstate *s; 3639 3640 s = pf_find_state_byid(ps->state.id, ps->state.creatorid); 3641 if (s == NULL) { 3642 error = ENOENT; 3643 break; 3644 } 3645 3646 pfsync_state_export((union pfsync_state_union*)&ps->state, 3647 s, PFSYNC_MSG_VERSION_1301); 3648 PF_STATE_UNLOCK(s); 3649 break; 3650 } 3651 3652 case DIOCGETSTATENV: { 3653 error = pf_getstate((struct pfioc_nv *)addr); 3654 break; 3655 } 3656 3657 case DIOCGETSTATES: { 3658 struct pfioc_states *ps = (struct pfioc_states *)addr; 3659 struct pf_kstate *s; 3660 struct pfsync_state_1301 *pstore, *p; 3661 int i, nr; 3662 size_t slice_count = 16, count; 3663 void *out; 3664 3665 if (ps->ps_len <= 0) { 3666 nr = uma_zone_get_cur(V_pf_state_z); 3667 ps->ps_len = sizeof(struct pfsync_state_1301) * nr; 3668 break; 3669 } 3670 3671 out = ps->ps_states; 3672 pstore = mallocarray(slice_count, 3673 sizeof(struct pfsync_state_1301), M_TEMP, M_WAITOK | M_ZERO); 3674 nr = 0; 3675 3676 for (i = 0; i <= pf_hashmask; i++) { 3677 struct pf_idhash *ih = &V_pf_idhash[i]; 3678 3679 DIOCGETSTATES_retry: 3680 p = pstore; 3681 3682 if (LIST_EMPTY(&ih->states)) 3683 continue; 3684 3685 PF_HASHROW_LOCK(ih); 3686 count = 0; 3687 LIST_FOREACH(s, &ih->states, entry) { 3688 if (s->timeout == PFTM_UNLINKED) 3689 continue; 3690 count++; 3691 } 3692 3693 if (count > slice_count) { 3694 PF_HASHROW_UNLOCK(ih); 3695 free(pstore, M_TEMP); 3696 slice_count = count * 2; 3697 pstore = mallocarray(slice_count, 3698 sizeof(struct pfsync_state_1301), M_TEMP, 3699 M_WAITOK | M_ZERO); 3700 goto DIOCGETSTATES_retry; 3701 } 3702 3703 if ((nr+count) * sizeof(*p) > ps->ps_len) { 3704 PF_HASHROW_UNLOCK(ih); 3705 goto DIOCGETSTATES_full; 3706 } 3707 3708 LIST_FOREACH(s, &ih->states, entry) { 3709 if (s->timeout == PFTM_UNLINKED) 3710 continue; 3711 3712 pfsync_state_export((union pfsync_state_union*)p, 3713 s, PFSYNC_MSG_VERSION_1301); 3714 p++; 3715 nr++; 3716 } 3717 PF_HASHROW_UNLOCK(ih); 3718 error = copyout(pstore, out, 3719 sizeof(struct pfsync_state_1301) * count); 3720 if (error) 3721 break; 3722 out = ps->ps_states + nr; 3723 } 3724 DIOCGETSTATES_full: 3725 ps->ps_len = sizeof(struct pfsync_state_1301) * nr; 3726 free(pstore, M_TEMP); 3727 3728 break; 3729 } 3730 3731 case DIOCGETSTATESV2: { 3732 struct pfioc_states_v2 *ps = (struct pfioc_states_v2 *)addr; 3733 struct pf_kstate *s; 3734 struct pf_state_export *pstore, *p; 3735 int i, nr; 3736 size_t slice_count = 16, count; 3737 void *out; 3738 3739 if (ps->ps_req_version > PF_STATE_VERSION) { 3740 error = ENOTSUP; 3741 break; 3742 } 3743 3744 if (ps->ps_len <= 0) { 3745 nr = uma_zone_get_cur(V_pf_state_z); 3746 ps->ps_len = sizeof(struct pf_state_export) * nr; 3747 break; 3748 } 3749 3750 out = ps->ps_states; 3751 pstore = mallocarray(slice_count, 3752 sizeof(struct pf_state_export), M_TEMP, M_WAITOK | M_ZERO); 3753 nr = 0; 3754 3755 for (i = 0; i <= pf_hashmask; i++) { 3756 struct pf_idhash *ih = &V_pf_idhash[i]; 3757 3758 DIOCGETSTATESV2_retry: 3759 p = pstore; 3760 3761 if (LIST_EMPTY(&ih->states)) 3762 continue; 3763 3764 PF_HASHROW_LOCK(ih); 3765 count = 0; 3766 LIST_FOREACH(s, &ih->states, entry) { 3767 if (s->timeout == PFTM_UNLINKED) 3768 continue; 3769 count++; 3770 } 3771 3772 if (count > slice_count) { 3773 PF_HASHROW_UNLOCK(ih); 3774 free(pstore, M_TEMP); 3775 slice_count = count * 2; 3776 pstore = mallocarray(slice_count, 3777 sizeof(struct pf_state_export), M_TEMP, 3778 M_WAITOK | M_ZERO); 3779 goto DIOCGETSTATESV2_retry; 3780 } 3781 3782 if ((nr+count) * sizeof(*p) > ps->ps_len) { 3783 PF_HASHROW_UNLOCK(ih); 3784 goto DIOCGETSTATESV2_full; 3785 } 3786 3787 LIST_FOREACH(s, &ih->states, entry) { 3788 if (s->timeout == PFTM_UNLINKED) 3789 continue; 3790 3791 pf_state_export(p, s); 3792 p++; 3793 nr++; 3794 } 3795 PF_HASHROW_UNLOCK(ih); 3796 error = copyout(pstore, out, 3797 sizeof(struct pf_state_export) * count); 3798 if (error) 3799 break; 3800 out = ps->ps_states + nr; 3801 } 3802 DIOCGETSTATESV2_full: 3803 ps->ps_len = nr * sizeof(struct pf_state_export); 3804 free(pstore, M_TEMP); 3805 3806 break; 3807 } 3808 3809 case DIOCGETSTATUSNV: { 3810 error = pf_getstatus((struct pfioc_nv *)addr); 3811 break; 3812 } 3813 3814 case DIOCSETSTATUSIF: { 3815 struct pfioc_if *pi = (struct pfioc_if *)addr; 3816 3817 if (pi->ifname[0] == 0) { 3818 bzero(V_pf_status.ifname, IFNAMSIZ); 3819 break; 3820 } 3821 PF_RULES_WLOCK(); 3822 error = pf_user_strcpy(V_pf_status.ifname, pi->ifname, IFNAMSIZ); 3823 PF_RULES_WUNLOCK(); 3824 break; 3825 } 3826 3827 case DIOCCLRSTATUS: { 3828 PF_RULES_WLOCK(); 3829 for (int i = 0; i < PFRES_MAX; i++) 3830 counter_u64_zero(V_pf_status.counters[i]); 3831 for (int i = 0; i < FCNT_MAX; i++) 3832 pf_counter_u64_zero(&V_pf_status.fcounters[i]); 3833 for (int i = 0; i < SCNT_MAX; i++) 3834 counter_u64_zero(V_pf_status.scounters[i]); 3835 for (int i = 0; i < KLCNT_MAX; i++) 3836 counter_u64_zero(V_pf_status.lcounters[i]); 3837 V_pf_status.since = time_second; 3838 if (*V_pf_status.ifname) 3839 pfi_update_status(V_pf_status.ifname, NULL); 3840 PF_RULES_WUNLOCK(); 3841 break; 3842 } 3843 3844 case DIOCNATLOOK: { 3845 struct pfioc_natlook *pnl = (struct pfioc_natlook *)addr; 3846 struct pf_state_key *sk; 3847 struct pf_kstate *state; 3848 struct pf_state_key_cmp key; 3849 int m = 0, direction = pnl->direction; 3850 int sidx, didx; 3851 3852 /* NATLOOK src and dst are reversed, so reverse sidx/didx */ 3853 sidx = (direction == PF_IN) ? 1 : 0; 3854 didx = (direction == PF_IN) ? 0 : 1; 3855 3856 if (!pnl->proto || 3857 PF_AZERO(&pnl->saddr, pnl->af) || 3858 PF_AZERO(&pnl->daddr, pnl->af) || 3859 ((pnl->proto == IPPROTO_TCP || 3860 pnl->proto == IPPROTO_UDP) && 3861 (!pnl->dport || !pnl->sport))) 3862 error = EINVAL; 3863 else { 3864 bzero(&key, sizeof(key)); 3865 key.af = pnl->af; 3866 key.proto = pnl->proto; 3867 PF_ACPY(&key.addr[sidx], &pnl->saddr, pnl->af); 3868 key.port[sidx] = pnl->sport; 3869 PF_ACPY(&key.addr[didx], &pnl->daddr, pnl->af); 3870 key.port[didx] = pnl->dport; 3871 3872 state = pf_find_state_all(&key, direction, &m); 3873 if (state == NULL) { 3874 error = ENOENT; 3875 } else { 3876 if (m > 1) { 3877 PF_STATE_UNLOCK(state); 3878 error = E2BIG; /* more than one state */ 3879 } else { 3880 sk = state->key[sidx]; 3881 PF_ACPY(&pnl->rsaddr, &sk->addr[sidx], sk->af); 3882 pnl->rsport = sk->port[sidx]; 3883 PF_ACPY(&pnl->rdaddr, &sk->addr[didx], sk->af); 3884 pnl->rdport = sk->port[didx]; 3885 PF_STATE_UNLOCK(state); 3886 } 3887 } 3888 } 3889 break; 3890 } 3891 3892 case DIOCSETTIMEOUT: { 3893 struct pfioc_tm *pt = (struct pfioc_tm *)addr; 3894 int old; 3895 3896 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX || 3897 pt->seconds < 0) { 3898 error = EINVAL; 3899 break; 3900 } 3901 PF_RULES_WLOCK(); 3902 old = V_pf_default_rule.timeout[pt->timeout]; 3903 if (pt->timeout == PFTM_INTERVAL && pt->seconds == 0) 3904 pt->seconds = 1; 3905 V_pf_default_rule.timeout[pt->timeout] = pt->seconds; 3906 if (pt->timeout == PFTM_INTERVAL && pt->seconds < old) 3907 wakeup(pf_purge_thread); 3908 pt->seconds = old; 3909 PF_RULES_WUNLOCK(); 3910 break; 3911 } 3912 3913 case DIOCGETTIMEOUT: { 3914 struct pfioc_tm *pt = (struct pfioc_tm *)addr; 3915 3916 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX) { 3917 error = EINVAL; 3918 break; 3919 } 3920 PF_RULES_RLOCK(); 3921 pt->seconds = V_pf_default_rule.timeout[pt->timeout]; 3922 PF_RULES_RUNLOCK(); 3923 break; 3924 } 3925 3926 case DIOCGETLIMIT: { 3927 struct pfioc_limit *pl = (struct pfioc_limit *)addr; 3928 3929 if (pl->index < 0 || pl->index >= PF_LIMIT_MAX) { 3930 error = EINVAL; 3931 break; 3932 } 3933 PF_RULES_RLOCK(); 3934 pl->limit = V_pf_limits[pl->index].limit; 3935 PF_RULES_RUNLOCK(); 3936 break; 3937 } 3938 3939 case DIOCSETLIMIT: { 3940 struct pfioc_limit *pl = (struct pfioc_limit *)addr; 3941 int old_limit; 3942 3943 PF_RULES_WLOCK(); 3944 if (pl->index < 0 || pl->index >= PF_LIMIT_MAX || 3945 V_pf_limits[pl->index].zone == NULL) { 3946 PF_RULES_WUNLOCK(); 3947 error = EINVAL; 3948 break; 3949 } 3950 uma_zone_set_max(V_pf_limits[pl->index].zone, pl->limit); 3951 old_limit = V_pf_limits[pl->index].limit; 3952 V_pf_limits[pl->index].limit = pl->limit; 3953 pl->limit = old_limit; 3954 PF_RULES_WUNLOCK(); 3955 break; 3956 } 3957 3958 case DIOCSETDEBUG: { 3959 u_int32_t *level = (u_int32_t *)addr; 3960 3961 PF_RULES_WLOCK(); 3962 V_pf_status.debug = *level; 3963 PF_RULES_WUNLOCK(); 3964 break; 3965 } 3966 3967 case DIOCCLRRULECTRS: { 3968 /* obsoleted by DIOCGETRULE with action=PF_GET_CLR_CNTR */ 3969 struct pf_kruleset *ruleset = &pf_main_ruleset; 3970 struct pf_krule *rule; 3971 3972 PF_RULES_WLOCK(); 3973 TAILQ_FOREACH(rule, 3974 ruleset->rules[PF_RULESET_FILTER].active.ptr, entries) { 3975 pf_counter_u64_zero(&rule->evaluations); 3976 for (int i = 0; i < 2; i++) { 3977 pf_counter_u64_zero(&rule->packets[i]); 3978 pf_counter_u64_zero(&rule->bytes[i]); 3979 } 3980 } 3981 PF_RULES_WUNLOCK(); 3982 break; 3983 } 3984 3985 case DIOCGIFSPEEDV0: 3986 case DIOCGIFSPEEDV1: { 3987 struct pf_ifspeed_v1 *psp = (struct pf_ifspeed_v1 *)addr; 3988 struct pf_ifspeed_v1 ps; 3989 struct ifnet *ifp; 3990 3991 if (psp->ifname[0] == '\0') { 3992 error = EINVAL; 3993 break; 3994 } 3995 3996 error = pf_user_strcpy(ps.ifname, psp->ifname, IFNAMSIZ); 3997 if (error != 0) 3998 break; 3999 ifp = ifunit(ps.ifname); 4000 if (ifp != NULL) { 4001 psp->baudrate32 = 4002 (u_int32_t)uqmin(ifp->if_baudrate, UINT_MAX); 4003 if (cmd == DIOCGIFSPEEDV1) 4004 psp->baudrate = ifp->if_baudrate; 4005 } else { 4006 error = EINVAL; 4007 } 4008 break; 4009 } 4010 4011 #ifdef ALTQ 4012 case DIOCSTARTALTQ: { 4013 struct pf_altq *altq; 4014 4015 PF_RULES_WLOCK(); 4016 /* enable all altq interfaces on active list */ 4017 TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) { 4018 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) { 4019 error = pf_enable_altq(altq); 4020 if (error != 0) 4021 break; 4022 } 4023 } 4024 if (error == 0) 4025 V_pf_altq_running = 1; 4026 PF_RULES_WUNLOCK(); 4027 DPFPRINTF(PF_DEBUG_MISC, ("altq: started\n")); 4028 break; 4029 } 4030 4031 case DIOCSTOPALTQ: { 4032 struct pf_altq *altq; 4033 4034 PF_RULES_WLOCK(); 4035 /* disable all altq interfaces on active list */ 4036 TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) { 4037 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) { 4038 error = pf_disable_altq(altq); 4039 if (error != 0) 4040 break; 4041 } 4042 } 4043 if (error == 0) 4044 V_pf_altq_running = 0; 4045 PF_RULES_WUNLOCK(); 4046 DPFPRINTF(PF_DEBUG_MISC, ("altq: stopped\n")); 4047 break; 4048 } 4049 4050 case DIOCADDALTQV0: 4051 case DIOCADDALTQV1: { 4052 struct pfioc_altq_v1 *pa = (struct pfioc_altq_v1 *)addr; 4053 struct pf_altq *altq, *a; 4054 struct ifnet *ifp; 4055 4056 altq = malloc(sizeof(*altq), M_PFALTQ, M_WAITOK | M_ZERO); 4057 error = pf_import_kaltq(pa, altq, IOCPARM_LEN(cmd)); 4058 if (error) 4059 break; 4060 altq->local_flags = 0; 4061 4062 PF_RULES_WLOCK(); 4063 if (pa->ticket != V_ticket_altqs_inactive) { 4064 PF_RULES_WUNLOCK(); 4065 free(altq, M_PFALTQ); 4066 error = EBUSY; 4067 break; 4068 } 4069 4070 /* 4071 * if this is for a queue, find the discipline and 4072 * copy the necessary fields 4073 */ 4074 if (altq->qname[0] != 0) { 4075 if ((altq->qid = pf_qname2qid(altq->qname)) == 0) { 4076 PF_RULES_WUNLOCK(); 4077 error = EBUSY; 4078 free(altq, M_PFALTQ); 4079 break; 4080 } 4081 altq->altq_disc = NULL; 4082 TAILQ_FOREACH(a, V_pf_altq_ifs_inactive, entries) { 4083 if (strncmp(a->ifname, altq->ifname, 4084 IFNAMSIZ) == 0) { 4085 altq->altq_disc = a->altq_disc; 4086 break; 4087 } 4088 } 4089 } 4090 4091 if ((ifp = ifunit(altq->ifname)) == NULL) 4092 altq->local_flags |= PFALTQ_FLAG_IF_REMOVED; 4093 else 4094 error = altq_add(ifp, altq); 4095 4096 if (error) { 4097 PF_RULES_WUNLOCK(); 4098 free(altq, M_PFALTQ); 4099 break; 4100 } 4101 4102 if (altq->qname[0] != 0) 4103 TAILQ_INSERT_TAIL(V_pf_altqs_inactive, altq, entries); 4104 else 4105 TAILQ_INSERT_TAIL(V_pf_altq_ifs_inactive, altq, entries); 4106 /* version error check done on import above */ 4107 pf_export_kaltq(altq, pa, IOCPARM_LEN(cmd)); 4108 PF_RULES_WUNLOCK(); 4109 break; 4110 } 4111 4112 case DIOCGETALTQSV0: 4113 case DIOCGETALTQSV1: { 4114 struct pfioc_altq_v1 *pa = (struct pfioc_altq_v1 *)addr; 4115 struct pf_altq *altq; 4116 4117 PF_RULES_RLOCK(); 4118 pa->nr = 0; 4119 TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) 4120 pa->nr++; 4121 TAILQ_FOREACH(altq, V_pf_altqs_active, entries) 4122 pa->nr++; 4123 pa->ticket = V_ticket_altqs_active; 4124 PF_RULES_RUNLOCK(); 4125 break; 4126 } 4127 4128 case DIOCGETALTQV0: 4129 case DIOCGETALTQV1: { 4130 struct pfioc_altq_v1 *pa = (struct pfioc_altq_v1 *)addr; 4131 struct pf_altq *altq; 4132 4133 PF_RULES_RLOCK(); 4134 if (pa->ticket != V_ticket_altqs_active) { 4135 PF_RULES_RUNLOCK(); 4136 error = EBUSY; 4137 break; 4138 } 4139 altq = pf_altq_get_nth_active(pa->nr); 4140 if (altq == NULL) { 4141 PF_RULES_RUNLOCK(); 4142 error = EBUSY; 4143 break; 4144 } 4145 pf_export_kaltq(altq, pa, IOCPARM_LEN(cmd)); 4146 PF_RULES_RUNLOCK(); 4147 break; 4148 } 4149 4150 case DIOCCHANGEALTQV0: 4151 case DIOCCHANGEALTQV1: 4152 /* CHANGEALTQ not supported yet! */ 4153 error = ENODEV; 4154 break; 4155 4156 case DIOCGETQSTATSV0: 4157 case DIOCGETQSTATSV1: { 4158 struct pfioc_qstats_v1 *pq = (struct pfioc_qstats_v1 *)addr; 4159 struct pf_altq *altq; 4160 int nbytes; 4161 u_int32_t version; 4162 4163 PF_RULES_RLOCK(); 4164 if (pq->ticket != V_ticket_altqs_active) { 4165 PF_RULES_RUNLOCK(); 4166 error = EBUSY; 4167 break; 4168 } 4169 nbytes = pq->nbytes; 4170 altq = pf_altq_get_nth_active(pq->nr); 4171 if (altq == NULL) { 4172 PF_RULES_RUNLOCK(); 4173 error = EBUSY; 4174 break; 4175 } 4176 4177 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) != 0) { 4178 PF_RULES_RUNLOCK(); 4179 error = ENXIO; 4180 break; 4181 } 4182 PF_RULES_RUNLOCK(); 4183 if (cmd == DIOCGETQSTATSV0) 4184 version = 0; /* DIOCGETQSTATSV0 means stats struct v0 */ 4185 else 4186 version = pq->version; 4187 error = altq_getqstats(altq, pq->buf, &nbytes, version); 4188 if (error == 0) { 4189 pq->scheduler = altq->scheduler; 4190 pq->nbytes = nbytes; 4191 } 4192 break; 4193 } 4194 #endif /* ALTQ */ 4195 4196 case DIOCBEGINADDRS: { 4197 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr; 4198 4199 PF_RULES_WLOCK(); 4200 pf_empty_kpool(&V_pf_pabuf); 4201 pp->ticket = ++V_ticket_pabuf; 4202 PF_RULES_WUNLOCK(); 4203 break; 4204 } 4205 4206 case DIOCADDADDR: { 4207 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr; 4208 struct pf_kpooladdr *pa; 4209 struct pfi_kkif *kif = NULL; 4210 4211 #ifndef INET 4212 if (pp->af == AF_INET) { 4213 error = EAFNOSUPPORT; 4214 break; 4215 } 4216 #endif /* INET */ 4217 #ifndef INET6 4218 if (pp->af == AF_INET6) { 4219 error = EAFNOSUPPORT; 4220 break; 4221 } 4222 #endif /* INET6 */ 4223 if (pp->addr.addr.type != PF_ADDR_ADDRMASK && 4224 pp->addr.addr.type != PF_ADDR_DYNIFTL && 4225 pp->addr.addr.type != PF_ADDR_TABLE) { 4226 error = EINVAL; 4227 break; 4228 } 4229 if (pp->addr.addr.p.dyn != NULL) { 4230 error = EINVAL; 4231 break; 4232 } 4233 pa = malloc(sizeof(*pa), M_PFRULE, M_WAITOK); 4234 error = pf_pooladdr_to_kpooladdr(&pp->addr, pa); 4235 if (error != 0) 4236 break; 4237 if (pa->ifname[0]) 4238 kif = pf_kkif_create(M_WAITOK); 4239 PF_RULES_WLOCK(); 4240 if (pp->ticket != V_ticket_pabuf) { 4241 PF_RULES_WUNLOCK(); 4242 if (pa->ifname[0]) 4243 pf_kkif_free(kif); 4244 free(pa, M_PFRULE); 4245 error = EBUSY; 4246 break; 4247 } 4248 if (pa->ifname[0]) { 4249 pa->kif = pfi_kkif_attach(kif, pa->ifname); 4250 kif = NULL; 4251 pfi_kkif_ref(pa->kif); 4252 } else 4253 pa->kif = NULL; 4254 if (pa->addr.type == PF_ADDR_DYNIFTL && ((error = 4255 pfi_dynaddr_setup(&pa->addr, pp->af)) != 0)) { 4256 if (pa->ifname[0]) 4257 pfi_kkif_unref(pa->kif); 4258 PF_RULES_WUNLOCK(); 4259 free(pa, M_PFRULE); 4260 break; 4261 } 4262 TAILQ_INSERT_TAIL(&V_pf_pabuf, pa, entries); 4263 PF_RULES_WUNLOCK(); 4264 break; 4265 } 4266 4267 case DIOCGETADDRS: { 4268 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr; 4269 struct pf_kpool *pool; 4270 struct pf_kpooladdr *pa; 4271 4272 pp->anchor[sizeof(pp->anchor) - 1] = 0; 4273 pp->nr = 0; 4274 4275 PF_RULES_RLOCK(); 4276 pool = pf_get_kpool(pp->anchor, pp->ticket, pp->r_action, 4277 pp->r_num, 0, 1, 0); 4278 if (pool == NULL) { 4279 PF_RULES_RUNLOCK(); 4280 error = EBUSY; 4281 break; 4282 } 4283 TAILQ_FOREACH(pa, &pool->list, entries) 4284 pp->nr++; 4285 PF_RULES_RUNLOCK(); 4286 break; 4287 } 4288 4289 case DIOCGETADDR: { 4290 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr; 4291 struct pf_kpool *pool; 4292 struct pf_kpooladdr *pa; 4293 u_int32_t nr = 0; 4294 4295 pp->anchor[sizeof(pp->anchor) - 1] = 0; 4296 4297 PF_RULES_RLOCK(); 4298 pool = pf_get_kpool(pp->anchor, pp->ticket, pp->r_action, 4299 pp->r_num, 0, 1, 1); 4300 if (pool == NULL) { 4301 PF_RULES_RUNLOCK(); 4302 error = EBUSY; 4303 break; 4304 } 4305 pa = TAILQ_FIRST(&pool->list); 4306 while ((pa != NULL) && (nr < pp->nr)) { 4307 pa = TAILQ_NEXT(pa, entries); 4308 nr++; 4309 } 4310 if (pa == NULL) { 4311 PF_RULES_RUNLOCK(); 4312 error = EBUSY; 4313 break; 4314 } 4315 pf_kpooladdr_to_pooladdr(pa, &pp->addr); 4316 pf_addr_copyout(&pp->addr.addr); 4317 PF_RULES_RUNLOCK(); 4318 break; 4319 } 4320 4321 case DIOCCHANGEADDR: { 4322 struct pfioc_pooladdr *pca = (struct pfioc_pooladdr *)addr; 4323 struct pf_kpool *pool; 4324 struct pf_kpooladdr *oldpa = NULL, *newpa = NULL; 4325 struct pf_kruleset *ruleset; 4326 struct pfi_kkif *kif = NULL; 4327 4328 pca->anchor[sizeof(pca->anchor) - 1] = 0; 4329 4330 if (pca->action < PF_CHANGE_ADD_HEAD || 4331 pca->action > PF_CHANGE_REMOVE) { 4332 error = EINVAL; 4333 break; 4334 } 4335 if (pca->addr.addr.type != PF_ADDR_ADDRMASK && 4336 pca->addr.addr.type != PF_ADDR_DYNIFTL && 4337 pca->addr.addr.type != PF_ADDR_TABLE) { 4338 error = EINVAL; 4339 break; 4340 } 4341 if (pca->addr.addr.p.dyn != NULL) { 4342 error = EINVAL; 4343 break; 4344 } 4345 4346 if (pca->action != PF_CHANGE_REMOVE) { 4347 #ifndef INET 4348 if (pca->af == AF_INET) { 4349 error = EAFNOSUPPORT; 4350 break; 4351 } 4352 #endif /* INET */ 4353 #ifndef INET6 4354 if (pca->af == AF_INET6) { 4355 error = EAFNOSUPPORT; 4356 break; 4357 } 4358 #endif /* INET6 */ 4359 newpa = malloc(sizeof(*newpa), M_PFRULE, M_WAITOK); 4360 bcopy(&pca->addr, newpa, sizeof(struct pf_pooladdr)); 4361 if (newpa->ifname[0]) 4362 kif = pf_kkif_create(M_WAITOK); 4363 newpa->kif = NULL; 4364 } 4365 #define ERROUT(x) ERROUT_IOCTL(DIOCCHANGEADDR_error, x) 4366 PF_RULES_WLOCK(); 4367 ruleset = pf_find_kruleset(pca->anchor); 4368 if (ruleset == NULL) 4369 ERROUT(EBUSY); 4370 4371 pool = pf_get_kpool(pca->anchor, pca->ticket, pca->r_action, 4372 pca->r_num, pca->r_last, 1, 1); 4373 if (pool == NULL) 4374 ERROUT(EBUSY); 4375 4376 if (pca->action != PF_CHANGE_REMOVE) { 4377 if (newpa->ifname[0]) { 4378 newpa->kif = pfi_kkif_attach(kif, newpa->ifname); 4379 pfi_kkif_ref(newpa->kif); 4380 kif = NULL; 4381 } 4382 4383 switch (newpa->addr.type) { 4384 case PF_ADDR_DYNIFTL: 4385 error = pfi_dynaddr_setup(&newpa->addr, 4386 pca->af); 4387 break; 4388 case PF_ADDR_TABLE: 4389 newpa->addr.p.tbl = pfr_attach_table(ruleset, 4390 newpa->addr.v.tblname); 4391 if (newpa->addr.p.tbl == NULL) 4392 error = ENOMEM; 4393 break; 4394 } 4395 if (error) 4396 goto DIOCCHANGEADDR_error; 4397 } 4398 4399 switch (pca->action) { 4400 case PF_CHANGE_ADD_HEAD: 4401 oldpa = TAILQ_FIRST(&pool->list); 4402 break; 4403 case PF_CHANGE_ADD_TAIL: 4404 oldpa = TAILQ_LAST(&pool->list, pf_kpalist); 4405 break; 4406 default: 4407 oldpa = TAILQ_FIRST(&pool->list); 4408 for (int i = 0; oldpa && i < pca->nr; i++) 4409 oldpa = TAILQ_NEXT(oldpa, entries); 4410 4411 if (oldpa == NULL) 4412 ERROUT(EINVAL); 4413 } 4414 4415 if (pca->action == PF_CHANGE_REMOVE) { 4416 TAILQ_REMOVE(&pool->list, oldpa, entries); 4417 switch (oldpa->addr.type) { 4418 case PF_ADDR_DYNIFTL: 4419 pfi_dynaddr_remove(oldpa->addr.p.dyn); 4420 break; 4421 case PF_ADDR_TABLE: 4422 pfr_detach_table(oldpa->addr.p.tbl); 4423 break; 4424 } 4425 if (oldpa->kif) 4426 pfi_kkif_unref(oldpa->kif); 4427 free(oldpa, M_PFRULE); 4428 } else { 4429 if (oldpa == NULL) 4430 TAILQ_INSERT_TAIL(&pool->list, newpa, entries); 4431 else if (pca->action == PF_CHANGE_ADD_HEAD || 4432 pca->action == PF_CHANGE_ADD_BEFORE) 4433 TAILQ_INSERT_BEFORE(oldpa, newpa, entries); 4434 else 4435 TAILQ_INSERT_AFTER(&pool->list, oldpa, 4436 newpa, entries); 4437 } 4438 4439 pool->cur = TAILQ_FIRST(&pool->list); 4440 PF_ACPY(&pool->counter, &pool->cur->addr.v.a.addr, pca->af); 4441 PF_RULES_WUNLOCK(); 4442 break; 4443 4444 #undef ERROUT 4445 DIOCCHANGEADDR_error: 4446 if (newpa != NULL) { 4447 if (newpa->kif) 4448 pfi_kkif_unref(newpa->kif); 4449 free(newpa, M_PFRULE); 4450 } 4451 PF_RULES_WUNLOCK(); 4452 pf_kkif_free(kif); 4453 break; 4454 } 4455 4456 case DIOCGETRULESETS: { 4457 struct pfioc_ruleset *pr = (struct pfioc_ruleset *)addr; 4458 struct pf_kruleset *ruleset; 4459 struct pf_kanchor *anchor; 4460 4461 pr->path[sizeof(pr->path) - 1] = 0; 4462 4463 PF_RULES_RLOCK(); 4464 if ((ruleset = pf_find_kruleset(pr->path)) == NULL) { 4465 PF_RULES_RUNLOCK(); 4466 error = ENOENT; 4467 break; 4468 } 4469 pr->nr = 0; 4470 if (ruleset->anchor == NULL) { 4471 /* XXX kludge for pf_main_ruleset */ 4472 RB_FOREACH(anchor, pf_kanchor_global, &V_pf_anchors) 4473 if (anchor->parent == NULL) 4474 pr->nr++; 4475 } else { 4476 RB_FOREACH(anchor, pf_kanchor_node, 4477 &ruleset->anchor->children) 4478 pr->nr++; 4479 } 4480 PF_RULES_RUNLOCK(); 4481 break; 4482 } 4483 4484 case DIOCGETRULESET: { 4485 struct pfioc_ruleset *pr = (struct pfioc_ruleset *)addr; 4486 struct pf_kruleset *ruleset; 4487 struct pf_kanchor *anchor; 4488 u_int32_t nr = 0; 4489 4490 pr->path[sizeof(pr->path) - 1] = 0; 4491 4492 PF_RULES_RLOCK(); 4493 if ((ruleset = pf_find_kruleset(pr->path)) == NULL) { 4494 PF_RULES_RUNLOCK(); 4495 error = ENOENT; 4496 break; 4497 } 4498 pr->name[0] = 0; 4499 if (ruleset->anchor == NULL) { 4500 /* XXX kludge for pf_main_ruleset */ 4501 RB_FOREACH(anchor, pf_kanchor_global, &V_pf_anchors) 4502 if (anchor->parent == NULL && nr++ == pr->nr) { 4503 strlcpy(pr->name, anchor->name, 4504 sizeof(pr->name)); 4505 break; 4506 } 4507 } else { 4508 RB_FOREACH(anchor, pf_kanchor_node, 4509 &ruleset->anchor->children) 4510 if (nr++ == pr->nr) { 4511 strlcpy(pr->name, anchor->name, 4512 sizeof(pr->name)); 4513 break; 4514 } 4515 } 4516 if (!pr->name[0]) 4517 error = EBUSY; 4518 PF_RULES_RUNLOCK(); 4519 break; 4520 } 4521 4522 case DIOCRCLRTABLES: { 4523 struct pfioc_table *io = (struct pfioc_table *)addr; 4524 4525 if (io->pfrio_esize != 0) { 4526 error = ENODEV; 4527 break; 4528 } 4529 PF_RULES_WLOCK(); 4530 error = pfr_clr_tables(&io->pfrio_table, &io->pfrio_ndel, 4531 io->pfrio_flags | PFR_FLAG_USERIOCTL); 4532 PF_RULES_WUNLOCK(); 4533 break; 4534 } 4535 4536 case DIOCRADDTABLES: { 4537 struct pfioc_table *io = (struct pfioc_table *)addr; 4538 struct pfr_table *pfrts; 4539 size_t totlen; 4540 4541 if (io->pfrio_esize != sizeof(struct pfr_table)) { 4542 error = ENODEV; 4543 break; 4544 } 4545 4546 if (io->pfrio_size < 0 || io->pfrio_size > pf_ioctl_maxcount || 4547 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_table))) { 4548 error = ENOMEM; 4549 break; 4550 } 4551 4552 totlen = io->pfrio_size * sizeof(struct pfr_table); 4553 pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table), 4554 M_TEMP, M_WAITOK); 4555 error = copyin(io->pfrio_buffer, pfrts, totlen); 4556 if (error) { 4557 free(pfrts, M_TEMP); 4558 break; 4559 } 4560 PF_RULES_WLOCK(); 4561 error = pfr_add_tables(pfrts, io->pfrio_size, 4562 &io->pfrio_nadd, io->pfrio_flags | PFR_FLAG_USERIOCTL); 4563 PF_RULES_WUNLOCK(); 4564 free(pfrts, M_TEMP); 4565 break; 4566 } 4567 4568 case DIOCRDELTABLES: { 4569 struct pfioc_table *io = (struct pfioc_table *)addr; 4570 struct pfr_table *pfrts; 4571 size_t totlen; 4572 4573 if (io->pfrio_esize != sizeof(struct pfr_table)) { 4574 error = ENODEV; 4575 break; 4576 } 4577 4578 if (io->pfrio_size < 0 || io->pfrio_size > pf_ioctl_maxcount || 4579 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_table))) { 4580 error = ENOMEM; 4581 break; 4582 } 4583 4584 totlen = io->pfrio_size * sizeof(struct pfr_table); 4585 pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table), 4586 M_TEMP, M_WAITOK); 4587 error = copyin(io->pfrio_buffer, pfrts, totlen); 4588 if (error) { 4589 free(pfrts, M_TEMP); 4590 break; 4591 } 4592 PF_RULES_WLOCK(); 4593 error = pfr_del_tables(pfrts, io->pfrio_size, 4594 &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL); 4595 PF_RULES_WUNLOCK(); 4596 free(pfrts, M_TEMP); 4597 break; 4598 } 4599 4600 case DIOCRGETTABLES: { 4601 struct pfioc_table *io = (struct pfioc_table *)addr; 4602 struct pfr_table *pfrts; 4603 size_t totlen; 4604 int n; 4605 4606 if (io->pfrio_esize != sizeof(struct pfr_table)) { 4607 error = ENODEV; 4608 break; 4609 } 4610 PF_RULES_RLOCK(); 4611 n = pfr_table_count(&io->pfrio_table, io->pfrio_flags); 4612 if (n < 0) { 4613 PF_RULES_RUNLOCK(); 4614 error = EINVAL; 4615 break; 4616 } 4617 io->pfrio_size = min(io->pfrio_size, n); 4618 4619 totlen = io->pfrio_size * sizeof(struct pfr_table); 4620 4621 pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table), 4622 M_TEMP, M_NOWAIT | M_ZERO); 4623 if (pfrts == NULL) { 4624 error = ENOMEM; 4625 PF_RULES_RUNLOCK(); 4626 break; 4627 } 4628 error = pfr_get_tables(&io->pfrio_table, pfrts, 4629 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); 4630 PF_RULES_RUNLOCK(); 4631 if (error == 0) 4632 error = copyout(pfrts, io->pfrio_buffer, totlen); 4633 free(pfrts, M_TEMP); 4634 break; 4635 } 4636 4637 case DIOCRGETTSTATS: { 4638 struct pfioc_table *io = (struct pfioc_table *)addr; 4639 struct pfr_tstats *pfrtstats; 4640 size_t totlen; 4641 int n; 4642 4643 if (io->pfrio_esize != sizeof(struct pfr_tstats)) { 4644 error = ENODEV; 4645 break; 4646 } 4647 PF_TABLE_STATS_LOCK(); 4648 PF_RULES_RLOCK(); 4649 n = pfr_table_count(&io->pfrio_table, io->pfrio_flags); 4650 if (n < 0) { 4651 PF_RULES_RUNLOCK(); 4652 PF_TABLE_STATS_UNLOCK(); 4653 error = EINVAL; 4654 break; 4655 } 4656 io->pfrio_size = min(io->pfrio_size, n); 4657 4658 totlen = io->pfrio_size * sizeof(struct pfr_tstats); 4659 pfrtstats = mallocarray(io->pfrio_size, 4660 sizeof(struct pfr_tstats), M_TEMP, M_NOWAIT | M_ZERO); 4661 if (pfrtstats == NULL) { 4662 error = ENOMEM; 4663 PF_RULES_RUNLOCK(); 4664 PF_TABLE_STATS_UNLOCK(); 4665 break; 4666 } 4667 error = pfr_get_tstats(&io->pfrio_table, pfrtstats, 4668 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); 4669 PF_RULES_RUNLOCK(); 4670 PF_TABLE_STATS_UNLOCK(); 4671 if (error == 0) 4672 error = copyout(pfrtstats, io->pfrio_buffer, totlen); 4673 free(pfrtstats, M_TEMP); 4674 break; 4675 } 4676 4677 case DIOCRCLRTSTATS: { 4678 struct pfioc_table *io = (struct pfioc_table *)addr; 4679 struct pfr_table *pfrts; 4680 size_t totlen; 4681 4682 if (io->pfrio_esize != sizeof(struct pfr_table)) { 4683 error = ENODEV; 4684 break; 4685 } 4686 4687 if (io->pfrio_size < 0 || io->pfrio_size > pf_ioctl_maxcount || 4688 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_table))) { 4689 /* We used to count tables and use the minimum required 4690 * size, so we didn't fail on overly large requests. 4691 * Keep doing so. */ 4692 io->pfrio_size = pf_ioctl_maxcount; 4693 break; 4694 } 4695 4696 totlen = io->pfrio_size * sizeof(struct pfr_table); 4697 pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table), 4698 M_TEMP, M_WAITOK); 4699 error = copyin(io->pfrio_buffer, pfrts, totlen); 4700 if (error) { 4701 free(pfrts, M_TEMP); 4702 break; 4703 } 4704 4705 PF_TABLE_STATS_LOCK(); 4706 PF_RULES_RLOCK(); 4707 error = pfr_clr_tstats(pfrts, io->pfrio_size, 4708 &io->pfrio_nzero, io->pfrio_flags | PFR_FLAG_USERIOCTL); 4709 PF_RULES_RUNLOCK(); 4710 PF_TABLE_STATS_UNLOCK(); 4711 free(pfrts, M_TEMP); 4712 break; 4713 } 4714 4715 case DIOCRSETTFLAGS: { 4716 struct pfioc_table *io = (struct pfioc_table *)addr; 4717 struct pfr_table *pfrts; 4718 size_t totlen; 4719 int n; 4720 4721 if (io->pfrio_esize != sizeof(struct pfr_table)) { 4722 error = ENODEV; 4723 break; 4724 } 4725 4726 PF_RULES_RLOCK(); 4727 n = pfr_table_count(&io->pfrio_table, io->pfrio_flags); 4728 if (n < 0) { 4729 PF_RULES_RUNLOCK(); 4730 error = EINVAL; 4731 break; 4732 } 4733 4734 io->pfrio_size = min(io->pfrio_size, n); 4735 PF_RULES_RUNLOCK(); 4736 4737 totlen = io->pfrio_size * sizeof(struct pfr_table); 4738 pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table), 4739 M_TEMP, M_WAITOK); 4740 error = copyin(io->pfrio_buffer, pfrts, totlen); 4741 if (error) { 4742 free(pfrts, M_TEMP); 4743 break; 4744 } 4745 PF_RULES_WLOCK(); 4746 error = pfr_set_tflags(pfrts, io->pfrio_size, 4747 io->pfrio_setflag, io->pfrio_clrflag, &io->pfrio_nchange, 4748 &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL); 4749 PF_RULES_WUNLOCK(); 4750 free(pfrts, M_TEMP); 4751 break; 4752 } 4753 4754 case DIOCRCLRADDRS: { 4755 struct pfioc_table *io = (struct pfioc_table *)addr; 4756 4757 if (io->pfrio_esize != 0) { 4758 error = ENODEV; 4759 break; 4760 } 4761 PF_RULES_WLOCK(); 4762 error = pfr_clr_addrs(&io->pfrio_table, &io->pfrio_ndel, 4763 io->pfrio_flags | PFR_FLAG_USERIOCTL); 4764 PF_RULES_WUNLOCK(); 4765 break; 4766 } 4767 4768 case DIOCRADDADDRS: { 4769 struct pfioc_table *io = (struct pfioc_table *)addr; 4770 struct pfr_addr *pfras; 4771 size_t totlen; 4772 4773 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 4774 error = ENODEV; 4775 break; 4776 } 4777 if (io->pfrio_size < 0 || 4778 io->pfrio_size > pf_ioctl_maxcount || 4779 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) { 4780 error = EINVAL; 4781 break; 4782 } 4783 totlen = io->pfrio_size * sizeof(struct pfr_addr); 4784 pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr), 4785 M_TEMP, M_WAITOK); 4786 error = copyin(io->pfrio_buffer, pfras, totlen); 4787 if (error) { 4788 free(pfras, M_TEMP); 4789 break; 4790 } 4791 PF_RULES_WLOCK(); 4792 error = pfr_add_addrs(&io->pfrio_table, pfras, 4793 io->pfrio_size, &io->pfrio_nadd, io->pfrio_flags | 4794 PFR_FLAG_USERIOCTL); 4795 PF_RULES_WUNLOCK(); 4796 if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK) 4797 error = copyout(pfras, io->pfrio_buffer, totlen); 4798 free(pfras, M_TEMP); 4799 break; 4800 } 4801 4802 case DIOCRDELADDRS: { 4803 struct pfioc_table *io = (struct pfioc_table *)addr; 4804 struct pfr_addr *pfras; 4805 size_t totlen; 4806 4807 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 4808 error = ENODEV; 4809 break; 4810 } 4811 if (io->pfrio_size < 0 || 4812 io->pfrio_size > pf_ioctl_maxcount || 4813 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) { 4814 error = EINVAL; 4815 break; 4816 } 4817 totlen = io->pfrio_size * sizeof(struct pfr_addr); 4818 pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr), 4819 M_TEMP, M_WAITOK); 4820 error = copyin(io->pfrio_buffer, pfras, totlen); 4821 if (error) { 4822 free(pfras, M_TEMP); 4823 break; 4824 } 4825 PF_RULES_WLOCK(); 4826 error = pfr_del_addrs(&io->pfrio_table, pfras, 4827 io->pfrio_size, &io->pfrio_ndel, io->pfrio_flags | 4828 PFR_FLAG_USERIOCTL); 4829 PF_RULES_WUNLOCK(); 4830 if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK) 4831 error = copyout(pfras, io->pfrio_buffer, totlen); 4832 free(pfras, M_TEMP); 4833 break; 4834 } 4835 4836 case DIOCRSETADDRS: { 4837 struct pfioc_table *io = (struct pfioc_table *)addr; 4838 struct pfr_addr *pfras; 4839 size_t totlen, count; 4840 4841 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 4842 error = ENODEV; 4843 break; 4844 } 4845 if (io->pfrio_size < 0 || io->pfrio_size2 < 0) { 4846 error = EINVAL; 4847 break; 4848 } 4849 count = max(io->pfrio_size, io->pfrio_size2); 4850 if (count > pf_ioctl_maxcount || 4851 WOULD_OVERFLOW(count, sizeof(struct pfr_addr))) { 4852 error = EINVAL; 4853 break; 4854 } 4855 totlen = count * sizeof(struct pfr_addr); 4856 pfras = mallocarray(count, sizeof(struct pfr_addr), M_TEMP, 4857 M_WAITOK); 4858 error = copyin(io->pfrio_buffer, pfras, totlen); 4859 if (error) { 4860 free(pfras, M_TEMP); 4861 break; 4862 } 4863 PF_RULES_WLOCK(); 4864 error = pfr_set_addrs(&io->pfrio_table, pfras, 4865 io->pfrio_size, &io->pfrio_size2, &io->pfrio_nadd, 4866 &io->pfrio_ndel, &io->pfrio_nchange, io->pfrio_flags | 4867 PFR_FLAG_USERIOCTL, 0); 4868 PF_RULES_WUNLOCK(); 4869 if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK) 4870 error = copyout(pfras, io->pfrio_buffer, totlen); 4871 free(pfras, M_TEMP); 4872 break; 4873 } 4874 4875 case DIOCRGETADDRS: { 4876 struct pfioc_table *io = (struct pfioc_table *)addr; 4877 struct pfr_addr *pfras; 4878 size_t totlen; 4879 4880 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 4881 error = ENODEV; 4882 break; 4883 } 4884 if (io->pfrio_size < 0 || 4885 io->pfrio_size > pf_ioctl_maxcount || 4886 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) { 4887 error = EINVAL; 4888 break; 4889 } 4890 totlen = io->pfrio_size * sizeof(struct pfr_addr); 4891 pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr), 4892 M_TEMP, M_WAITOK | M_ZERO); 4893 PF_RULES_RLOCK(); 4894 error = pfr_get_addrs(&io->pfrio_table, pfras, 4895 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); 4896 PF_RULES_RUNLOCK(); 4897 if (error == 0) 4898 error = copyout(pfras, io->pfrio_buffer, totlen); 4899 free(pfras, M_TEMP); 4900 break; 4901 } 4902 4903 case DIOCRGETASTATS: { 4904 struct pfioc_table *io = (struct pfioc_table *)addr; 4905 struct pfr_astats *pfrastats; 4906 size_t totlen; 4907 4908 if (io->pfrio_esize != sizeof(struct pfr_astats)) { 4909 error = ENODEV; 4910 break; 4911 } 4912 if (io->pfrio_size < 0 || 4913 io->pfrio_size > pf_ioctl_maxcount || 4914 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_astats))) { 4915 error = EINVAL; 4916 break; 4917 } 4918 totlen = io->pfrio_size * sizeof(struct pfr_astats); 4919 pfrastats = mallocarray(io->pfrio_size, 4920 sizeof(struct pfr_astats), M_TEMP, M_WAITOK | M_ZERO); 4921 PF_RULES_RLOCK(); 4922 error = pfr_get_astats(&io->pfrio_table, pfrastats, 4923 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); 4924 PF_RULES_RUNLOCK(); 4925 if (error == 0) 4926 error = copyout(pfrastats, io->pfrio_buffer, totlen); 4927 free(pfrastats, M_TEMP); 4928 break; 4929 } 4930 4931 case DIOCRCLRASTATS: { 4932 struct pfioc_table *io = (struct pfioc_table *)addr; 4933 struct pfr_addr *pfras; 4934 size_t totlen; 4935 4936 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 4937 error = ENODEV; 4938 break; 4939 } 4940 if (io->pfrio_size < 0 || 4941 io->pfrio_size > pf_ioctl_maxcount || 4942 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) { 4943 error = EINVAL; 4944 break; 4945 } 4946 totlen = io->pfrio_size * sizeof(struct pfr_addr); 4947 pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr), 4948 M_TEMP, M_WAITOK); 4949 error = copyin(io->pfrio_buffer, pfras, totlen); 4950 if (error) { 4951 free(pfras, M_TEMP); 4952 break; 4953 } 4954 PF_RULES_WLOCK(); 4955 error = pfr_clr_astats(&io->pfrio_table, pfras, 4956 io->pfrio_size, &io->pfrio_nzero, io->pfrio_flags | 4957 PFR_FLAG_USERIOCTL); 4958 PF_RULES_WUNLOCK(); 4959 if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK) 4960 error = copyout(pfras, io->pfrio_buffer, totlen); 4961 free(pfras, M_TEMP); 4962 break; 4963 } 4964 4965 case DIOCRTSTADDRS: { 4966 struct pfioc_table *io = (struct pfioc_table *)addr; 4967 struct pfr_addr *pfras; 4968 size_t totlen; 4969 4970 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 4971 error = ENODEV; 4972 break; 4973 } 4974 if (io->pfrio_size < 0 || 4975 io->pfrio_size > pf_ioctl_maxcount || 4976 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) { 4977 error = EINVAL; 4978 break; 4979 } 4980 totlen = io->pfrio_size * sizeof(struct pfr_addr); 4981 pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr), 4982 M_TEMP, M_WAITOK); 4983 error = copyin(io->pfrio_buffer, pfras, totlen); 4984 if (error) { 4985 free(pfras, M_TEMP); 4986 break; 4987 } 4988 PF_RULES_RLOCK(); 4989 error = pfr_tst_addrs(&io->pfrio_table, pfras, 4990 io->pfrio_size, &io->pfrio_nmatch, io->pfrio_flags | 4991 PFR_FLAG_USERIOCTL); 4992 PF_RULES_RUNLOCK(); 4993 if (error == 0) 4994 error = copyout(pfras, io->pfrio_buffer, totlen); 4995 free(pfras, M_TEMP); 4996 break; 4997 } 4998 4999 case DIOCRINADEFINE: { 5000 struct pfioc_table *io = (struct pfioc_table *)addr; 5001 struct pfr_addr *pfras; 5002 size_t totlen; 5003 5004 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 5005 error = ENODEV; 5006 break; 5007 } 5008 if (io->pfrio_size < 0 || 5009 io->pfrio_size > pf_ioctl_maxcount || 5010 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) { 5011 error = EINVAL; 5012 break; 5013 } 5014 totlen = io->pfrio_size * sizeof(struct pfr_addr); 5015 pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr), 5016 M_TEMP, M_WAITOK); 5017 error = copyin(io->pfrio_buffer, pfras, totlen); 5018 if (error) { 5019 free(pfras, M_TEMP); 5020 break; 5021 } 5022 PF_RULES_WLOCK(); 5023 error = pfr_ina_define(&io->pfrio_table, pfras, 5024 io->pfrio_size, &io->pfrio_nadd, &io->pfrio_naddr, 5025 io->pfrio_ticket, io->pfrio_flags | PFR_FLAG_USERIOCTL); 5026 PF_RULES_WUNLOCK(); 5027 free(pfras, M_TEMP); 5028 break; 5029 } 5030 5031 case DIOCOSFPADD: { 5032 struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr; 5033 PF_RULES_WLOCK(); 5034 error = pf_osfp_add(io); 5035 PF_RULES_WUNLOCK(); 5036 break; 5037 } 5038 5039 case DIOCOSFPGET: { 5040 struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr; 5041 PF_RULES_RLOCK(); 5042 error = pf_osfp_get(io); 5043 PF_RULES_RUNLOCK(); 5044 break; 5045 } 5046 5047 case DIOCXBEGIN: { 5048 struct pfioc_trans *io = (struct pfioc_trans *)addr; 5049 struct pfioc_trans_e *ioes, *ioe; 5050 size_t totlen; 5051 int i; 5052 5053 if (io->esize != sizeof(*ioe)) { 5054 error = ENODEV; 5055 break; 5056 } 5057 if (io->size < 0 || 5058 io->size > pf_ioctl_maxcount || 5059 WOULD_OVERFLOW(io->size, sizeof(struct pfioc_trans_e))) { 5060 error = EINVAL; 5061 break; 5062 } 5063 totlen = sizeof(struct pfioc_trans_e) * io->size; 5064 ioes = mallocarray(io->size, sizeof(struct pfioc_trans_e), 5065 M_TEMP, M_WAITOK); 5066 error = copyin(io->array, ioes, totlen); 5067 if (error) { 5068 free(ioes, M_TEMP); 5069 break; 5070 } 5071 /* Ensure there's no more ethernet rules to clean up. */ 5072 NET_EPOCH_DRAIN_CALLBACKS(); 5073 PF_RULES_WLOCK(); 5074 for (i = 0, ioe = ioes; i < io->size; i++, ioe++) { 5075 ioe->anchor[sizeof(ioe->anchor) - 1] = '\0'; 5076 switch (ioe->rs_num) { 5077 case PF_RULESET_ETH: 5078 if ((error = pf_begin_eth(&ioe->ticket, ioe->anchor))) { 5079 PF_RULES_WUNLOCK(); 5080 free(ioes, M_TEMP); 5081 goto fail; 5082 } 5083 break; 5084 #ifdef ALTQ 5085 case PF_RULESET_ALTQ: 5086 if (ioe->anchor[0]) { 5087 PF_RULES_WUNLOCK(); 5088 free(ioes, M_TEMP); 5089 error = EINVAL; 5090 goto fail; 5091 } 5092 if ((error = pf_begin_altq(&ioe->ticket))) { 5093 PF_RULES_WUNLOCK(); 5094 free(ioes, M_TEMP); 5095 goto fail; 5096 } 5097 break; 5098 #endif /* ALTQ */ 5099 case PF_RULESET_TABLE: 5100 { 5101 struct pfr_table table; 5102 5103 bzero(&table, sizeof(table)); 5104 strlcpy(table.pfrt_anchor, ioe->anchor, 5105 sizeof(table.pfrt_anchor)); 5106 if ((error = pfr_ina_begin(&table, 5107 &ioe->ticket, NULL, 0))) { 5108 PF_RULES_WUNLOCK(); 5109 free(ioes, M_TEMP); 5110 goto fail; 5111 } 5112 break; 5113 } 5114 default: 5115 if ((error = pf_begin_rules(&ioe->ticket, 5116 ioe->rs_num, ioe->anchor))) { 5117 PF_RULES_WUNLOCK(); 5118 free(ioes, M_TEMP); 5119 goto fail; 5120 } 5121 break; 5122 } 5123 } 5124 PF_RULES_WUNLOCK(); 5125 error = copyout(ioes, io->array, totlen); 5126 free(ioes, M_TEMP); 5127 break; 5128 } 5129 5130 case DIOCXROLLBACK: { 5131 struct pfioc_trans *io = (struct pfioc_trans *)addr; 5132 struct pfioc_trans_e *ioe, *ioes; 5133 size_t totlen; 5134 int i; 5135 5136 if (io->esize != sizeof(*ioe)) { 5137 error = ENODEV; 5138 break; 5139 } 5140 if (io->size < 0 || 5141 io->size > pf_ioctl_maxcount || 5142 WOULD_OVERFLOW(io->size, sizeof(struct pfioc_trans_e))) { 5143 error = EINVAL; 5144 break; 5145 } 5146 totlen = sizeof(struct pfioc_trans_e) * io->size; 5147 ioes = mallocarray(io->size, sizeof(struct pfioc_trans_e), 5148 M_TEMP, M_WAITOK); 5149 error = copyin(io->array, ioes, totlen); 5150 if (error) { 5151 free(ioes, M_TEMP); 5152 break; 5153 } 5154 PF_RULES_WLOCK(); 5155 for (i = 0, ioe = ioes; i < io->size; i++, ioe++) { 5156 ioe->anchor[sizeof(ioe->anchor) - 1] = '\0'; 5157 switch (ioe->rs_num) { 5158 case PF_RULESET_ETH: 5159 if ((error = pf_rollback_eth(ioe->ticket, 5160 ioe->anchor))) { 5161 PF_RULES_WUNLOCK(); 5162 free(ioes, M_TEMP); 5163 goto fail; /* really bad */ 5164 } 5165 break; 5166 #ifdef ALTQ 5167 case PF_RULESET_ALTQ: 5168 if (ioe->anchor[0]) { 5169 PF_RULES_WUNLOCK(); 5170 free(ioes, M_TEMP); 5171 error = EINVAL; 5172 goto fail; 5173 } 5174 if ((error = pf_rollback_altq(ioe->ticket))) { 5175 PF_RULES_WUNLOCK(); 5176 free(ioes, M_TEMP); 5177 goto fail; /* really bad */ 5178 } 5179 break; 5180 #endif /* ALTQ */ 5181 case PF_RULESET_TABLE: 5182 { 5183 struct pfr_table table; 5184 5185 bzero(&table, sizeof(table)); 5186 strlcpy(table.pfrt_anchor, ioe->anchor, 5187 sizeof(table.pfrt_anchor)); 5188 if ((error = pfr_ina_rollback(&table, 5189 ioe->ticket, NULL, 0))) { 5190 PF_RULES_WUNLOCK(); 5191 free(ioes, M_TEMP); 5192 goto fail; /* really bad */ 5193 } 5194 break; 5195 } 5196 default: 5197 if ((error = pf_rollback_rules(ioe->ticket, 5198 ioe->rs_num, ioe->anchor))) { 5199 PF_RULES_WUNLOCK(); 5200 free(ioes, M_TEMP); 5201 goto fail; /* really bad */ 5202 } 5203 break; 5204 } 5205 } 5206 PF_RULES_WUNLOCK(); 5207 free(ioes, M_TEMP); 5208 break; 5209 } 5210 5211 case DIOCXCOMMIT: { 5212 struct pfioc_trans *io = (struct pfioc_trans *)addr; 5213 struct pfioc_trans_e *ioe, *ioes; 5214 struct pf_kruleset *rs; 5215 struct pf_keth_ruleset *ers; 5216 size_t totlen; 5217 int i; 5218 5219 if (io->esize != sizeof(*ioe)) { 5220 error = ENODEV; 5221 break; 5222 } 5223 5224 if (io->size < 0 || 5225 io->size > pf_ioctl_maxcount || 5226 WOULD_OVERFLOW(io->size, sizeof(struct pfioc_trans_e))) { 5227 error = EINVAL; 5228 break; 5229 } 5230 5231 totlen = sizeof(struct pfioc_trans_e) * io->size; 5232 ioes = mallocarray(io->size, sizeof(struct pfioc_trans_e), 5233 M_TEMP, M_WAITOK); 5234 error = copyin(io->array, ioes, totlen); 5235 if (error) { 5236 free(ioes, M_TEMP); 5237 break; 5238 } 5239 PF_RULES_WLOCK(); 5240 /* First makes sure everything will succeed. */ 5241 for (i = 0, ioe = ioes; i < io->size; i++, ioe++) { 5242 ioe->anchor[sizeof(ioe->anchor) - 1] = 0; 5243 switch (ioe->rs_num) { 5244 case PF_RULESET_ETH: 5245 ers = pf_find_keth_ruleset(ioe->anchor); 5246 if (ers == NULL || ioe->ticket == 0 || 5247 ioe->ticket != ers->inactive.ticket) { 5248 PF_RULES_WUNLOCK(); 5249 free(ioes, M_TEMP); 5250 error = EINVAL; 5251 goto fail; 5252 } 5253 break; 5254 #ifdef ALTQ 5255 case PF_RULESET_ALTQ: 5256 if (ioe->anchor[0]) { 5257 PF_RULES_WUNLOCK(); 5258 free(ioes, M_TEMP); 5259 error = EINVAL; 5260 goto fail; 5261 } 5262 if (!V_altqs_inactive_open || ioe->ticket != 5263 V_ticket_altqs_inactive) { 5264 PF_RULES_WUNLOCK(); 5265 free(ioes, M_TEMP); 5266 error = EBUSY; 5267 goto fail; 5268 } 5269 break; 5270 #endif /* ALTQ */ 5271 case PF_RULESET_TABLE: 5272 rs = pf_find_kruleset(ioe->anchor); 5273 if (rs == NULL || !rs->topen || ioe->ticket != 5274 rs->tticket) { 5275 PF_RULES_WUNLOCK(); 5276 free(ioes, M_TEMP); 5277 error = EBUSY; 5278 goto fail; 5279 } 5280 break; 5281 default: 5282 if (ioe->rs_num < 0 || ioe->rs_num >= 5283 PF_RULESET_MAX) { 5284 PF_RULES_WUNLOCK(); 5285 free(ioes, M_TEMP); 5286 error = EINVAL; 5287 goto fail; 5288 } 5289 rs = pf_find_kruleset(ioe->anchor); 5290 if (rs == NULL || 5291 !rs->rules[ioe->rs_num].inactive.open || 5292 rs->rules[ioe->rs_num].inactive.ticket != 5293 ioe->ticket) { 5294 PF_RULES_WUNLOCK(); 5295 free(ioes, M_TEMP); 5296 error = EBUSY; 5297 goto fail; 5298 } 5299 break; 5300 } 5301 } 5302 /* Now do the commit - no errors should happen here. */ 5303 for (i = 0, ioe = ioes; i < io->size; i++, ioe++) { 5304 switch (ioe->rs_num) { 5305 case PF_RULESET_ETH: 5306 if ((error = pf_commit_eth(ioe->ticket, ioe->anchor))) { 5307 PF_RULES_WUNLOCK(); 5308 free(ioes, M_TEMP); 5309 goto fail; /* really bad */ 5310 } 5311 break; 5312 #ifdef ALTQ 5313 case PF_RULESET_ALTQ: 5314 if ((error = pf_commit_altq(ioe->ticket))) { 5315 PF_RULES_WUNLOCK(); 5316 free(ioes, M_TEMP); 5317 goto fail; /* really bad */ 5318 } 5319 break; 5320 #endif /* ALTQ */ 5321 case PF_RULESET_TABLE: 5322 { 5323 struct pfr_table table; 5324 5325 bzero(&table, sizeof(table)); 5326 (void)strlcpy(table.pfrt_anchor, ioe->anchor, 5327 sizeof(table.pfrt_anchor)); 5328 if ((error = pfr_ina_commit(&table, 5329 ioe->ticket, NULL, NULL, 0))) { 5330 PF_RULES_WUNLOCK(); 5331 free(ioes, M_TEMP); 5332 goto fail; /* really bad */ 5333 } 5334 break; 5335 } 5336 default: 5337 if ((error = pf_commit_rules(ioe->ticket, 5338 ioe->rs_num, ioe->anchor))) { 5339 PF_RULES_WUNLOCK(); 5340 free(ioes, M_TEMP); 5341 goto fail; /* really bad */ 5342 } 5343 break; 5344 } 5345 } 5346 PF_RULES_WUNLOCK(); 5347 5348 /* Only hook into EtherNet taffic if we've got rules for it. */ 5349 if (! TAILQ_EMPTY(V_pf_keth->active.rules)) 5350 hook_pf_eth(); 5351 else 5352 dehook_pf_eth(); 5353 5354 free(ioes, M_TEMP); 5355 break; 5356 } 5357 5358 case DIOCGETSRCNODES: { 5359 struct pfioc_src_nodes *psn = (struct pfioc_src_nodes *)addr; 5360 struct pf_srchash *sh; 5361 struct pf_ksrc_node *n; 5362 struct pf_src_node *p, *pstore; 5363 uint32_t i, nr = 0; 5364 5365 for (i = 0, sh = V_pf_srchash; i <= pf_srchashmask; 5366 i++, sh++) { 5367 PF_HASHROW_LOCK(sh); 5368 LIST_FOREACH(n, &sh->nodes, entry) 5369 nr++; 5370 PF_HASHROW_UNLOCK(sh); 5371 } 5372 5373 psn->psn_len = min(psn->psn_len, 5374 sizeof(struct pf_src_node) * nr); 5375 5376 if (psn->psn_len == 0) { 5377 psn->psn_len = sizeof(struct pf_src_node) * nr; 5378 break; 5379 } 5380 5381 nr = 0; 5382 5383 p = pstore = malloc(psn->psn_len, M_TEMP, M_WAITOK | M_ZERO); 5384 for (i = 0, sh = V_pf_srchash; i <= pf_srchashmask; 5385 i++, sh++) { 5386 PF_HASHROW_LOCK(sh); 5387 LIST_FOREACH(n, &sh->nodes, entry) { 5388 5389 if ((nr + 1) * sizeof(*p) > (unsigned)psn->psn_len) 5390 break; 5391 5392 pf_src_node_copy(n, p); 5393 5394 p++; 5395 nr++; 5396 } 5397 PF_HASHROW_UNLOCK(sh); 5398 } 5399 error = copyout(pstore, psn->psn_src_nodes, 5400 sizeof(struct pf_src_node) * nr); 5401 if (error) { 5402 free(pstore, M_TEMP); 5403 break; 5404 } 5405 psn->psn_len = sizeof(struct pf_src_node) * nr; 5406 free(pstore, M_TEMP); 5407 break; 5408 } 5409 5410 case DIOCCLRSRCNODES: { 5411 pf_clear_srcnodes(NULL); 5412 pf_purge_expired_src_nodes(); 5413 break; 5414 } 5415 5416 case DIOCKILLSRCNODES: 5417 pf_kill_srcnodes((struct pfioc_src_node_kill *)addr); 5418 break; 5419 5420 #ifdef COMPAT_FREEBSD13 5421 case DIOCKEEPCOUNTERS_FREEBSD13: 5422 #endif 5423 case DIOCKEEPCOUNTERS: 5424 error = pf_keepcounters((struct pfioc_nv *)addr); 5425 break; 5426 5427 case DIOCGETSYNCOOKIES: 5428 error = pf_get_syncookies((struct pfioc_nv *)addr); 5429 break; 5430 5431 case DIOCSETSYNCOOKIES: 5432 error = pf_set_syncookies((struct pfioc_nv *)addr); 5433 break; 5434 5435 case DIOCSETHOSTID: { 5436 u_int32_t *hostid = (u_int32_t *)addr; 5437 5438 PF_RULES_WLOCK(); 5439 if (*hostid == 0) 5440 V_pf_status.hostid = arc4random(); 5441 else 5442 V_pf_status.hostid = *hostid; 5443 PF_RULES_WUNLOCK(); 5444 break; 5445 } 5446 5447 case DIOCOSFPFLUSH: 5448 PF_RULES_WLOCK(); 5449 pf_osfp_flush(); 5450 PF_RULES_WUNLOCK(); 5451 break; 5452 5453 case DIOCIGETIFACES: { 5454 struct pfioc_iface *io = (struct pfioc_iface *)addr; 5455 struct pfi_kif *ifstore; 5456 size_t bufsiz; 5457 5458 if (io->pfiio_esize != sizeof(struct pfi_kif)) { 5459 error = ENODEV; 5460 break; 5461 } 5462 5463 if (io->pfiio_size < 0 || 5464 io->pfiio_size > pf_ioctl_maxcount || 5465 WOULD_OVERFLOW(io->pfiio_size, sizeof(struct pfi_kif))) { 5466 error = EINVAL; 5467 break; 5468 } 5469 5470 io->pfiio_name[sizeof(io->pfiio_name) - 1] = '\0'; 5471 5472 bufsiz = io->pfiio_size * sizeof(struct pfi_kif); 5473 ifstore = mallocarray(io->pfiio_size, sizeof(struct pfi_kif), 5474 M_TEMP, M_WAITOK | M_ZERO); 5475 5476 PF_RULES_RLOCK(); 5477 pfi_get_ifaces(io->pfiio_name, ifstore, &io->pfiio_size); 5478 PF_RULES_RUNLOCK(); 5479 error = copyout(ifstore, io->pfiio_buffer, bufsiz); 5480 free(ifstore, M_TEMP); 5481 break; 5482 } 5483 5484 case DIOCSETIFFLAG: { 5485 struct pfioc_iface *io = (struct pfioc_iface *)addr; 5486 5487 io->pfiio_name[sizeof(io->pfiio_name) - 1] = '\0'; 5488 5489 PF_RULES_WLOCK(); 5490 error = pfi_set_flags(io->pfiio_name, io->pfiio_flags); 5491 PF_RULES_WUNLOCK(); 5492 break; 5493 } 5494 5495 case DIOCCLRIFFLAG: { 5496 struct pfioc_iface *io = (struct pfioc_iface *)addr; 5497 5498 io->pfiio_name[sizeof(io->pfiio_name) - 1] = '\0'; 5499 5500 PF_RULES_WLOCK(); 5501 error = pfi_clear_flags(io->pfiio_name, io->pfiio_flags); 5502 PF_RULES_WUNLOCK(); 5503 break; 5504 } 5505 5506 case DIOCSETREASS: { 5507 u_int32_t *reass = (u_int32_t *)addr; 5508 5509 V_pf_status.reass = *reass & (PF_REASS_ENABLED|PF_REASS_NODF); 5510 /* Removal of DF flag without reassembly enabled is not a 5511 * valid combination. Disable reassembly in such case. */ 5512 if (!(V_pf_status.reass & PF_REASS_ENABLED)) 5513 V_pf_status.reass = 0; 5514 break; 5515 } 5516 5517 default: 5518 error = ENODEV; 5519 break; 5520 } 5521 fail: 5522 if (sx_xlocked(&V_pf_ioctl_lock)) 5523 sx_xunlock(&V_pf_ioctl_lock); 5524 CURVNET_RESTORE(); 5525 5526 #undef ERROUT_IOCTL 5527 5528 return (error); 5529 } 5530 5531 void 5532 pfsync_state_export(union pfsync_state_union *sp, struct pf_kstate *st, int msg_version) 5533 { 5534 bzero(sp, sizeof(union pfsync_state_union)); 5535 5536 /* copy from state key */ 5537 sp->pfs_1301.key[PF_SK_WIRE].addr[0] = st->key[PF_SK_WIRE]->addr[0]; 5538 sp->pfs_1301.key[PF_SK_WIRE].addr[1] = st->key[PF_SK_WIRE]->addr[1]; 5539 sp->pfs_1301.key[PF_SK_WIRE].port[0] = st->key[PF_SK_WIRE]->port[0]; 5540 sp->pfs_1301.key[PF_SK_WIRE].port[1] = st->key[PF_SK_WIRE]->port[1]; 5541 sp->pfs_1301.key[PF_SK_STACK].addr[0] = st->key[PF_SK_STACK]->addr[0]; 5542 sp->pfs_1301.key[PF_SK_STACK].addr[1] = st->key[PF_SK_STACK]->addr[1]; 5543 sp->pfs_1301.key[PF_SK_STACK].port[0] = st->key[PF_SK_STACK]->port[0]; 5544 sp->pfs_1301.key[PF_SK_STACK].port[1] = st->key[PF_SK_STACK]->port[1]; 5545 sp->pfs_1301.proto = st->key[PF_SK_WIRE]->proto; 5546 sp->pfs_1301.af = st->key[PF_SK_WIRE]->af; 5547 5548 /* copy from state */ 5549 strlcpy(sp->pfs_1301.ifname, st->kif->pfik_name, sizeof(sp->pfs_1301.ifname)); 5550 bcopy(&st->rt_addr, &sp->pfs_1301.rt_addr, sizeof(sp->pfs_1301.rt_addr)); 5551 sp->pfs_1301.creation = htonl(time_uptime - st->creation); 5552 sp->pfs_1301.expire = pf_state_expires(st); 5553 if (sp->pfs_1301.expire <= time_uptime) 5554 sp->pfs_1301.expire = htonl(0); 5555 else 5556 sp->pfs_1301.expire = htonl(sp->pfs_1301.expire - time_uptime); 5557 5558 sp->pfs_1301.direction = st->direction; 5559 sp->pfs_1301.log = st->act.log; 5560 sp->pfs_1301.timeout = st->timeout; 5561 5562 switch (msg_version) { 5563 case PFSYNC_MSG_VERSION_1301: 5564 sp->pfs_1301.state_flags = st->state_flags; 5565 break; 5566 case PFSYNC_MSG_VERSION_1400: 5567 sp->pfs_1400.state_flags = htons(st->state_flags); 5568 sp->pfs_1400.qid = htons(st->act.qid); 5569 sp->pfs_1400.pqid = htons(st->act.pqid); 5570 sp->pfs_1400.dnpipe = htons(st->act.dnpipe); 5571 sp->pfs_1400.dnrpipe = htons(st->act.dnrpipe); 5572 sp->pfs_1400.rtableid = htonl(st->act.rtableid); 5573 sp->pfs_1400.min_ttl = st->act.min_ttl; 5574 sp->pfs_1400.set_tos = st->act.set_tos; 5575 sp->pfs_1400.max_mss = htons(st->act.max_mss); 5576 sp->pfs_1400.set_prio[0] = st->act.set_prio[0]; 5577 sp->pfs_1400.set_prio[1] = st->act.set_prio[1]; 5578 sp->pfs_1400.rt = st->rt; 5579 if (st->rt_kif) 5580 strlcpy(sp->pfs_1400.rt_ifname, 5581 st->rt_kif->pfik_name, 5582 sizeof(sp->pfs_1400.rt_ifname)); 5583 break; 5584 default: 5585 panic("%s: Unsupported pfsync_msg_version %d", 5586 __func__, msg_version); 5587 } 5588 5589 if (st->src_node) 5590 sp->pfs_1301.sync_flags |= PFSYNC_FLAG_SRCNODE; 5591 if (st->nat_src_node) 5592 sp->pfs_1301.sync_flags |= PFSYNC_FLAG_NATSRCNODE; 5593 5594 sp->pfs_1301.id = st->id; 5595 sp->pfs_1301.creatorid = st->creatorid; 5596 pf_state_peer_hton(&st->src, &sp->pfs_1301.src); 5597 pf_state_peer_hton(&st->dst, &sp->pfs_1301.dst); 5598 5599 if (st->rule.ptr == NULL) 5600 sp->pfs_1301.rule = htonl(-1); 5601 else 5602 sp->pfs_1301.rule = htonl(st->rule.ptr->nr); 5603 if (st->anchor.ptr == NULL) 5604 sp->pfs_1301.anchor = htonl(-1); 5605 else 5606 sp->pfs_1301.anchor = htonl(st->anchor.ptr->nr); 5607 if (st->nat_rule.ptr == NULL) 5608 sp->pfs_1301.nat_rule = htonl(-1); 5609 else 5610 sp->pfs_1301.nat_rule = htonl(st->nat_rule.ptr->nr); 5611 5612 pf_state_counter_hton(st->packets[0], sp->pfs_1301.packets[0]); 5613 pf_state_counter_hton(st->packets[1], sp->pfs_1301.packets[1]); 5614 pf_state_counter_hton(st->bytes[0], sp->pfs_1301.bytes[0]); 5615 pf_state_counter_hton(st->bytes[1], sp->pfs_1301.bytes[1]); 5616 } 5617 5618 void 5619 pf_state_export(struct pf_state_export *sp, struct pf_kstate *st) 5620 { 5621 bzero(sp, sizeof(*sp)); 5622 5623 sp->version = PF_STATE_VERSION; 5624 5625 /* copy from state key */ 5626 sp->key[PF_SK_WIRE].addr[0] = st->key[PF_SK_WIRE]->addr[0]; 5627 sp->key[PF_SK_WIRE].addr[1] = st->key[PF_SK_WIRE]->addr[1]; 5628 sp->key[PF_SK_WIRE].port[0] = st->key[PF_SK_WIRE]->port[0]; 5629 sp->key[PF_SK_WIRE].port[1] = st->key[PF_SK_WIRE]->port[1]; 5630 sp->key[PF_SK_STACK].addr[0] = st->key[PF_SK_STACK]->addr[0]; 5631 sp->key[PF_SK_STACK].addr[1] = st->key[PF_SK_STACK]->addr[1]; 5632 sp->key[PF_SK_STACK].port[0] = st->key[PF_SK_STACK]->port[0]; 5633 sp->key[PF_SK_STACK].port[1] = st->key[PF_SK_STACK]->port[1]; 5634 sp->proto = st->key[PF_SK_WIRE]->proto; 5635 sp->af = st->key[PF_SK_WIRE]->af; 5636 5637 /* copy from state */ 5638 strlcpy(sp->ifname, st->kif->pfik_name, sizeof(sp->ifname)); 5639 strlcpy(sp->orig_ifname, st->orig_kif->pfik_name, 5640 sizeof(sp->orig_ifname)); 5641 bcopy(&st->rt_addr, &sp->rt_addr, sizeof(sp->rt_addr)); 5642 sp->creation = htonl(time_uptime - st->creation); 5643 sp->expire = pf_state_expires(st); 5644 if (sp->expire <= time_uptime) 5645 sp->expire = htonl(0); 5646 else 5647 sp->expire = htonl(sp->expire - time_uptime); 5648 5649 sp->direction = st->direction; 5650 sp->log = st->act.log; 5651 sp->timeout = st->timeout; 5652 /* 8 bits for the old libpfctl, 16 bits for the new libpfctl */ 5653 sp->state_flags_compat = st->state_flags; 5654 sp->state_flags = htons(st->state_flags); 5655 if (st->src_node) 5656 sp->sync_flags |= PFSYNC_FLAG_SRCNODE; 5657 if (st->nat_src_node) 5658 sp->sync_flags |= PFSYNC_FLAG_NATSRCNODE; 5659 5660 sp->id = st->id; 5661 sp->creatorid = st->creatorid; 5662 pf_state_peer_hton(&st->src, &sp->src); 5663 pf_state_peer_hton(&st->dst, &sp->dst); 5664 5665 if (st->rule.ptr == NULL) 5666 sp->rule = htonl(-1); 5667 else 5668 sp->rule = htonl(st->rule.ptr->nr); 5669 if (st->anchor.ptr == NULL) 5670 sp->anchor = htonl(-1); 5671 else 5672 sp->anchor = htonl(st->anchor.ptr->nr); 5673 if (st->nat_rule.ptr == NULL) 5674 sp->nat_rule = htonl(-1); 5675 else 5676 sp->nat_rule = htonl(st->nat_rule.ptr->nr); 5677 5678 sp->packets[0] = st->packets[0]; 5679 sp->packets[1] = st->packets[1]; 5680 sp->bytes[0] = st->bytes[0]; 5681 sp->bytes[1] = st->bytes[1]; 5682 5683 sp->qid = htons(st->act.qid); 5684 sp->pqid = htons(st->act.pqid); 5685 sp->dnpipe = htons(st->act.dnpipe); 5686 sp->dnrpipe = htons(st->act.dnrpipe); 5687 sp->rtableid = htonl(st->act.rtableid); 5688 sp->min_ttl = st->act.min_ttl; 5689 sp->set_tos = st->act.set_tos; 5690 sp->max_mss = htons(st->act.max_mss); 5691 sp->rt = st->rt; 5692 if (st->rt_kif) 5693 strlcpy(sp->rt_ifname, st->rt_kif->pfik_name, 5694 sizeof(sp->rt_ifname)); 5695 sp->set_prio[0] = st->act.set_prio[0]; 5696 sp->set_prio[1] = st->act.set_prio[1]; 5697 5698 } 5699 5700 static void 5701 pf_tbladdr_copyout(struct pf_addr_wrap *aw) 5702 { 5703 struct pfr_ktable *kt; 5704 5705 KASSERT(aw->type == PF_ADDR_TABLE, ("%s: type %u", __func__, aw->type)); 5706 5707 kt = aw->p.tbl; 5708 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL) 5709 kt = kt->pfrkt_root; 5710 aw->p.tbl = NULL; 5711 aw->p.tblcnt = (kt->pfrkt_flags & PFR_TFLAG_ACTIVE) ? 5712 kt->pfrkt_cnt : -1; 5713 } 5714 5715 static int 5716 pf_add_status_counters(nvlist_t *nvl, const char *name, counter_u64_t *counters, 5717 size_t number, char **names) 5718 { 5719 nvlist_t *nvc; 5720 5721 nvc = nvlist_create(0); 5722 if (nvc == NULL) 5723 return (ENOMEM); 5724 5725 for (int i = 0; i < number; i++) { 5726 nvlist_append_number_array(nvc, "counters", 5727 counter_u64_fetch(counters[i])); 5728 nvlist_append_string_array(nvc, "names", 5729 names[i]); 5730 nvlist_append_number_array(nvc, "ids", 5731 i); 5732 } 5733 nvlist_add_nvlist(nvl, name, nvc); 5734 nvlist_destroy(nvc); 5735 5736 return (0); 5737 } 5738 5739 static int 5740 pf_getstatus(struct pfioc_nv *nv) 5741 { 5742 nvlist_t *nvl = NULL, *nvc = NULL; 5743 void *nvlpacked = NULL; 5744 int error; 5745 struct pf_status s; 5746 char *pf_reasons[PFRES_MAX+1] = PFRES_NAMES; 5747 char *pf_lcounter[KLCNT_MAX+1] = KLCNT_NAMES; 5748 char *pf_fcounter[FCNT_MAX+1] = FCNT_NAMES; 5749 PF_RULES_RLOCK_TRACKER; 5750 5751 #define ERROUT(x) ERROUT_FUNCTION(errout, x) 5752 5753 PF_RULES_RLOCK(); 5754 5755 nvl = nvlist_create(0); 5756 if (nvl == NULL) 5757 ERROUT(ENOMEM); 5758 5759 nvlist_add_bool(nvl, "running", V_pf_status.running); 5760 nvlist_add_number(nvl, "since", V_pf_status.since); 5761 nvlist_add_number(nvl, "debug", V_pf_status.debug); 5762 nvlist_add_number(nvl, "hostid", V_pf_status.hostid); 5763 nvlist_add_number(nvl, "states", V_pf_status.states); 5764 nvlist_add_number(nvl, "src_nodes", V_pf_status.src_nodes); 5765 nvlist_add_number(nvl, "reass", V_pf_status.reass); 5766 nvlist_add_bool(nvl, "syncookies_active", 5767 V_pf_status.syncookies_active); 5768 5769 /* counters */ 5770 error = pf_add_status_counters(nvl, "counters", V_pf_status.counters, 5771 PFRES_MAX, pf_reasons); 5772 if (error != 0) 5773 ERROUT(error); 5774 5775 /* lcounters */ 5776 error = pf_add_status_counters(nvl, "lcounters", V_pf_status.lcounters, 5777 KLCNT_MAX, pf_lcounter); 5778 if (error != 0) 5779 ERROUT(error); 5780 5781 /* fcounters */ 5782 nvc = nvlist_create(0); 5783 if (nvc == NULL) 5784 ERROUT(ENOMEM); 5785 5786 for (int i = 0; i < FCNT_MAX; i++) { 5787 nvlist_append_number_array(nvc, "counters", 5788 pf_counter_u64_fetch(&V_pf_status.fcounters[i])); 5789 nvlist_append_string_array(nvc, "names", 5790 pf_fcounter[i]); 5791 nvlist_append_number_array(nvc, "ids", 5792 i); 5793 } 5794 nvlist_add_nvlist(nvl, "fcounters", nvc); 5795 nvlist_destroy(nvc); 5796 nvc = NULL; 5797 5798 /* scounters */ 5799 error = pf_add_status_counters(nvl, "scounters", V_pf_status.scounters, 5800 SCNT_MAX, pf_fcounter); 5801 if (error != 0) 5802 ERROUT(error); 5803 5804 nvlist_add_string(nvl, "ifname", V_pf_status.ifname); 5805 nvlist_add_binary(nvl, "chksum", V_pf_status.pf_chksum, 5806 PF_MD5_DIGEST_LENGTH); 5807 5808 pfi_update_status(V_pf_status.ifname, &s); 5809 5810 /* pcounters / bcounters */ 5811 for (int i = 0; i < 2; i++) { 5812 for (int j = 0; j < 2; j++) { 5813 for (int k = 0; k < 2; k++) { 5814 nvlist_append_number_array(nvl, "pcounters", 5815 s.pcounters[i][j][k]); 5816 } 5817 nvlist_append_number_array(nvl, "bcounters", 5818 s.bcounters[i][j]); 5819 } 5820 } 5821 5822 nvlpacked = nvlist_pack(nvl, &nv->len); 5823 if (nvlpacked == NULL) 5824 ERROUT(ENOMEM); 5825 5826 if (nv->size == 0) 5827 ERROUT(0); 5828 else if (nv->size < nv->len) 5829 ERROUT(ENOSPC); 5830 5831 PF_RULES_RUNLOCK(); 5832 error = copyout(nvlpacked, nv->data, nv->len); 5833 goto done; 5834 5835 #undef ERROUT 5836 errout: 5837 PF_RULES_RUNLOCK(); 5838 done: 5839 free(nvlpacked, M_NVLIST); 5840 nvlist_destroy(nvc); 5841 nvlist_destroy(nvl); 5842 5843 return (error); 5844 } 5845 5846 /* 5847 * XXX - Check for version mismatch!!! 5848 */ 5849 static void 5850 pf_clear_all_states(void) 5851 { 5852 struct pf_kstate *s; 5853 u_int i; 5854 5855 for (i = 0; i <= pf_hashmask; i++) { 5856 struct pf_idhash *ih = &V_pf_idhash[i]; 5857 relock: 5858 PF_HASHROW_LOCK(ih); 5859 LIST_FOREACH(s, &ih->states, entry) { 5860 s->timeout = PFTM_PURGE; 5861 /* Don't send out individual delete messages. */ 5862 s->state_flags |= PFSTATE_NOSYNC; 5863 pf_unlink_state(s); 5864 goto relock; 5865 } 5866 PF_HASHROW_UNLOCK(ih); 5867 } 5868 } 5869 5870 static int 5871 pf_clear_tables(void) 5872 { 5873 struct pfioc_table io; 5874 int error; 5875 5876 bzero(&io, sizeof(io)); 5877 5878 error = pfr_clr_tables(&io.pfrio_table, &io.pfrio_ndel, 5879 io.pfrio_flags); 5880 5881 return (error); 5882 } 5883 5884 static void 5885 pf_clear_srcnodes(struct pf_ksrc_node *n) 5886 { 5887 struct pf_kstate *s; 5888 int i; 5889 5890 for (i = 0; i <= pf_hashmask; i++) { 5891 struct pf_idhash *ih = &V_pf_idhash[i]; 5892 5893 PF_HASHROW_LOCK(ih); 5894 LIST_FOREACH(s, &ih->states, entry) { 5895 if (n == NULL || n == s->src_node) 5896 s->src_node = NULL; 5897 if (n == NULL || n == s->nat_src_node) 5898 s->nat_src_node = NULL; 5899 } 5900 PF_HASHROW_UNLOCK(ih); 5901 } 5902 5903 if (n == NULL) { 5904 struct pf_srchash *sh; 5905 5906 for (i = 0, sh = V_pf_srchash; i <= pf_srchashmask; 5907 i++, sh++) { 5908 PF_HASHROW_LOCK(sh); 5909 LIST_FOREACH(n, &sh->nodes, entry) { 5910 n->expire = 1; 5911 n->states = 0; 5912 } 5913 PF_HASHROW_UNLOCK(sh); 5914 } 5915 } else { 5916 /* XXX: hash slot should already be locked here. */ 5917 n->expire = 1; 5918 n->states = 0; 5919 } 5920 } 5921 5922 static void 5923 pf_kill_srcnodes(struct pfioc_src_node_kill *psnk) 5924 { 5925 struct pf_ksrc_node_list kill; 5926 5927 LIST_INIT(&kill); 5928 for (int i = 0; i <= pf_srchashmask; i++) { 5929 struct pf_srchash *sh = &V_pf_srchash[i]; 5930 struct pf_ksrc_node *sn, *tmp; 5931 5932 PF_HASHROW_LOCK(sh); 5933 LIST_FOREACH_SAFE(sn, &sh->nodes, entry, tmp) 5934 if (PF_MATCHA(psnk->psnk_src.neg, 5935 &psnk->psnk_src.addr.v.a.addr, 5936 &psnk->psnk_src.addr.v.a.mask, 5937 &sn->addr, sn->af) && 5938 PF_MATCHA(psnk->psnk_dst.neg, 5939 &psnk->psnk_dst.addr.v.a.addr, 5940 &psnk->psnk_dst.addr.v.a.mask, 5941 &sn->raddr, sn->af)) { 5942 pf_unlink_src_node(sn); 5943 LIST_INSERT_HEAD(&kill, sn, entry); 5944 sn->expire = 1; 5945 } 5946 PF_HASHROW_UNLOCK(sh); 5947 } 5948 5949 for (int i = 0; i <= pf_hashmask; i++) { 5950 struct pf_idhash *ih = &V_pf_idhash[i]; 5951 struct pf_kstate *s; 5952 5953 PF_HASHROW_LOCK(ih); 5954 LIST_FOREACH(s, &ih->states, entry) { 5955 if (s->src_node && s->src_node->expire == 1) 5956 s->src_node = NULL; 5957 if (s->nat_src_node && s->nat_src_node->expire == 1) 5958 s->nat_src_node = NULL; 5959 } 5960 PF_HASHROW_UNLOCK(ih); 5961 } 5962 5963 psnk->psnk_killed = pf_free_src_nodes(&kill); 5964 } 5965 5966 static int 5967 pf_keepcounters(struct pfioc_nv *nv) 5968 { 5969 nvlist_t *nvl = NULL; 5970 void *nvlpacked = NULL; 5971 int error = 0; 5972 5973 #define ERROUT(x) ERROUT_FUNCTION(on_error, x) 5974 5975 if (nv->len > pf_ioctl_maxcount) 5976 ERROUT(ENOMEM); 5977 5978 nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK); 5979 if (nvlpacked == NULL) 5980 ERROUT(ENOMEM); 5981 5982 error = copyin(nv->data, nvlpacked, nv->len); 5983 if (error) 5984 ERROUT(error); 5985 5986 nvl = nvlist_unpack(nvlpacked, nv->len, 0); 5987 if (nvl == NULL) 5988 ERROUT(EBADMSG); 5989 5990 if (! nvlist_exists_bool(nvl, "keep_counters")) 5991 ERROUT(EBADMSG); 5992 5993 V_pf_status.keep_counters = nvlist_get_bool(nvl, "keep_counters"); 5994 5995 on_error: 5996 nvlist_destroy(nvl); 5997 free(nvlpacked, M_NVLIST); 5998 return (error); 5999 } 6000 6001 static unsigned int 6002 pf_clear_states(const struct pf_kstate_kill *kill) 6003 { 6004 struct pf_state_key_cmp match_key; 6005 struct pf_kstate *s; 6006 struct pfi_kkif *kif; 6007 int idx; 6008 unsigned int killed = 0, dir; 6009 6010 for (unsigned int i = 0; i <= pf_hashmask; i++) { 6011 struct pf_idhash *ih = &V_pf_idhash[i]; 6012 6013 relock_DIOCCLRSTATES: 6014 PF_HASHROW_LOCK(ih); 6015 LIST_FOREACH(s, &ih->states, entry) { 6016 /* For floating states look at the original kif. */ 6017 kif = s->kif == V_pfi_all ? s->orig_kif : s->kif; 6018 6019 if (kill->psk_ifname[0] && 6020 strcmp(kill->psk_ifname, 6021 kif->pfik_name)) 6022 continue; 6023 6024 if (kill->psk_kill_match) { 6025 bzero(&match_key, sizeof(match_key)); 6026 6027 if (s->direction == PF_OUT) { 6028 dir = PF_IN; 6029 idx = PF_SK_STACK; 6030 } else { 6031 dir = PF_OUT; 6032 idx = PF_SK_WIRE; 6033 } 6034 6035 match_key.af = s->key[idx]->af; 6036 match_key.proto = s->key[idx]->proto; 6037 PF_ACPY(&match_key.addr[0], 6038 &s->key[idx]->addr[1], match_key.af); 6039 match_key.port[0] = s->key[idx]->port[1]; 6040 PF_ACPY(&match_key.addr[1], 6041 &s->key[idx]->addr[0], match_key.af); 6042 match_key.port[1] = s->key[idx]->port[0]; 6043 } 6044 6045 /* 6046 * Don't send out individual 6047 * delete messages. 6048 */ 6049 s->state_flags |= PFSTATE_NOSYNC; 6050 pf_unlink_state(s); 6051 killed++; 6052 6053 if (kill->psk_kill_match) 6054 killed += pf_kill_matching_state(&match_key, 6055 dir); 6056 6057 goto relock_DIOCCLRSTATES; 6058 } 6059 PF_HASHROW_UNLOCK(ih); 6060 } 6061 6062 if (V_pfsync_clear_states_ptr != NULL) 6063 V_pfsync_clear_states_ptr(V_pf_status.hostid, kill->psk_ifname); 6064 6065 return (killed); 6066 } 6067 6068 static void 6069 pf_killstates(struct pf_kstate_kill *kill, unsigned int *killed) 6070 { 6071 struct pf_kstate *s; 6072 6073 if (kill->psk_pfcmp.id) { 6074 if (kill->psk_pfcmp.creatorid == 0) 6075 kill->psk_pfcmp.creatorid = V_pf_status.hostid; 6076 if ((s = pf_find_state_byid(kill->psk_pfcmp.id, 6077 kill->psk_pfcmp.creatorid))) { 6078 pf_unlink_state(s); 6079 *killed = 1; 6080 } 6081 return; 6082 } 6083 6084 for (unsigned int i = 0; i <= pf_hashmask; i++) 6085 *killed += pf_killstates_row(kill, &V_pf_idhash[i]); 6086 6087 return; 6088 } 6089 6090 static int 6091 pf_killstates_nv(struct pfioc_nv *nv) 6092 { 6093 struct pf_kstate_kill kill; 6094 nvlist_t *nvl = NULL; 6095 void *nvlpacked = NULL; 6096 int error = 0; 6097 unsigned int killed = 0; 6098 6099 #define ERROUT(x) ERROUT_FUNCTION(on_error, x) 6100 6101 if (nv->len > pf_ioctl_maxcount) 6102 ERROUT(ENOMEM); 6103 6104 nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK); 6105 if (nvlpacked == NULL) 6106 ERROUT(ENOMEM); 6107 6108 error = copyin(nv->data, nvlpacked, nv->len); 6109 if (error) 6110 ERROUT(error); 6111 6112 nvl = nvlist_unpack(nvlpacked, nv->len, 0); 6113 if (nvl == NULL) 6114 ERROUT(EBADMSG); 6115 6116 error = pf_nvstate_kill_to_kstate_kill(nvl, &kill); 6117 if (error) 6118 ERROUT(error); 6119 6120 pf_killstates(&kill, &killed); 6121 6122 free(nvlpacked, M_NVLIST); 6123 nvlpacked = NULL; 6124 nvlist_destroy(nvl); 6125 nvl = nvlist_create(0); 6126 if (nvl == NULL) 6127 ERROUT(ENOMEM); 6128 6129 nvlist_add_number(nvl, "killed", killed); 6130 6131 nvlpacked = nvlist_pack(nvl, &nv->len); 6132 if (nvlpacked == NULL) 6133 ERROUT(ENOMEM); 6134 6135 if (nv->size == 0) 6136 ERROUT(0); 6137 else if (nv->size < nv->len) 6138 ERROUT(ENOSPC); 6139 6140 error = copyout(nvlpacked, nv->data, nv->len); 6141 6142 on_error: 6143 nvlist_destroy(nvl); 6144 free(nvlpacked, M_NVLIST); 6145 return (error); 6146 } 6147 6148 static int 6149 pf_clearstates_nv(struct pfioc_nv *nv) 6150 { 6151 struct pf_kstate_kill kill; 6152 nvlist_t *nvl = NULL; 6153 void *nvlpacked = NULL; 6154 int error = 0; 6155 unsigned int killed; 6156 6157 #define ERROUT(x) ERROUT_FUNCTION(on_error, x) 6158 6159 if (nv->len > pf_ioctl_maxcount) 6160 ERROUT(ENOMEM); 6161 6162 nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK); 6163 if (nvlpacked == NULL) 6164 ERROUT(ENOMEM); 6165 6166 error = copyin(nv->data, nvlpacked, nv->len); 6167 if (error) 6168 ERROUT(error); 6169 6170 nvl = nvlist_unpack(nvlpacked, nv->len, 0); 6171 if (nvl == NULL) 6172 ERROUT(EBADMSG); 6173 6174 error = pf_nvstate_kill_to_kstate_kill(nvl, &kill); 6175 if (error) 6176 ERROUT(error); 6177 6178 killed = pf_clear_states(&kill); 6179 6180 free(nvlpacked, M_NVLIST); 6181 nvlpacked = NULL; 6182 nvlist_destroy(nvl); 6183 nvl = nvlist_create(0); 6184 if (nvl == NULL) 6185 ERROUT(ENOMEM); 6186 6187 nvlist_add_number(nvl, "killed", killed); 6188 6189 nvlpacked = nvlist_pack(nvl, &nv->len); 6190 if (nvlpacked == NULL) 6191 ERROUT(ENOMEM); 6192 6193 if (nv->size == 0) 6194 ERROUT(0); 6195 else if (nv->size < nv->len) 6196 ERROUT(ENOSPC); 6197 6198 error = copyout(nvlpacked, nv->data, nv->len); 6199 6200 #undef ERROUT 6201 on_error: 6202 nvlist_destroy(nvl); 6203 free(nvlpacked, M_NVLIST); 6204 return (error); 6205 } 6206 6207 static int 6208 pf_getstate(struct pfioc_nv *nv) 6209 { 6210 nvlist_t *nvl = NULL, *nvls; 6211 void *nvlpacked = NULL; 6212 struct pf_kstate *s = NULL; 6213 int error = 0; 6214 uint64_t id, creatorid; 6215 6216 #define ERROUT(x) ERROUT_FUNCTION(errout, x) 6217 6218 if (nv->len > pf_ioctl_maxcount) 6219 ERROUT(ENOMEM); 6220 6221 nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK); 6222 if (nvlpacked == NULL) 6223 ERROUT(ENOMEM); 6224 6225 error = copyin(nv->data, nvlpacked, nv->len); 6226 if (error) 6227 ERROUT(error); 6228 6229 nvl = nvlist_unpack(nvlpacked, nv->len, 0); 6230 if (nvl == NULL) 6231 ERROUT(EBADMSG); 6232 6233 PFNV_CHK(pf_nvuint64(nvl, "id", &id)); 6234 PFNV_CHK(pf_nvuint64(nvl, "creatorid", &creatorid)); 6235 6236 s = pf_find_state_byid(id, creatorid); 6237 if (s == NULL) 6238 ERROUT(ENOENT); 6239 6240 free(nvlpacked, M_NVLIST); 6241 nvlpacked = NULL; 6242 nvlist_destroy(nvl); 6243 nvl = nvlist_create(0); 6244 if (nvl == NULL) 6245 ERROUT(ENOMEM); 6246 6247 nvls = pf_state_to_nvstate(s); 6248 if (nvls == NULL) 6249 ERROUT(ENOMEM); 6250 6251 nvlist_add_nvlist(nvl, "state", nvls); 6252 nvlist_destroy(nvls); 6253 6254 nvlpacked = nvlist_pack(nvl, &nv->len); 6255 if (nvlpacked == NULL) 6256 ERROUT(ENOMEM); 6257 6258 if (nv->size == 0) 6259 ERROUT(0); 6260 else if (nv->size < nv->len) 6261 ERROUT(ENOSPC); 6262 6263 error = copyout(nvlpacked, nv->data, nv->len); 6264 6265 #undef ERROUT 6266 errout: 6267 if (s != NULL) 6268 PF_STATE_UNLOCK(s); 6269 free(nvlpacked, M_NVLIST); 6270 nvlist_destroy(nvl); 6271 return (error); 6272 } 6273 6274 /* 6275 * XXX - Check for version mismatch!!! 6276 */ 6277 6278 /* 6279 * Duplicate pfctl -Fa operation to get rid of as much as we can. 6280 */ 6281 static int 6282 shutdown_pf(void) 6283 { 6284 int error = 0; 6285 u_int32_t t[5]; 6286 char nn = '\0'; 6287 6288 do { 6289 if ((error = pf_begin_rules(&t[0], PF_RULESET_SCRUB, &nn)) 6290 != 0) { 6291 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: SCRUB\n")); 6292 break; 6293 } 6294 if ((error = pf_begin_rules(&t[1], PF_RULESET_FILTER, &nn)) 6295 != 0) { 6296 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: FILTER\n")); 6297 break; /* XXX: rollback? */ 6298 } 6299 if ((error = pf_begin_rules(&t[2], PF_RULESET_NAT, &nn)) 6300 != 0) { 6301 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: NAT\n")); 6302 break; /* XXX: rollback? */ 6303 } 6304 if ((error = pf_begin_rules(&t[3], PF_RULESET_BINAT, &nn)) 6305 != 0) { 6306 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: BINAT\n")); 6307 break; /* XXX: rollback? */ 6308 } 6309 if ((error = pf_begin_rules(&t[4], PF_RULESET_RDR, &nn)) 6310 != 0) { 6311 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: RDR\n")); 6312 break; /* XXX: rollback? */ 6313 } 6314 6315 /* XXX: these should always succeed here */ 6316 pf_commit_rules(t[0], PF_RULESET_SCRUB, &nn); 6317 pf_commit_rules(t[1], PF_RULESET_FILTER, &nn); 6318 pf_commit_rules(t[2], PF_RULESET_NAT, &nn); 6319 pf_commit_rules(t[3], PF_RULESET_BINAT, &nn); 6320 pf_commit_rules(t[4], PF_RULESET_RDR, &nn); 6321 6322 if ((error = pf_clear_tables()) != 0) 6323 break; 6324 6325 if ((error = pf_begin_eth(&t[0], &nn)) != 0) { 6326 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: eth\n")); 6327 break; 6328 } 6329 pf_commit_eth(t[0], &nn); 6330 6331 #ifdef ALTQ 6332 if ((error = pf_begin_altq(&t[0])) != 0) { 6333 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: ALTQ\n")); 6334 break; 6335 } 6336 pf_commit_altq(t[0]); 6337 #endif 6338 6339 pf_clear_all_states(); 6340 6341 pf_clear_srcnodes(NULL); 6342 6343 /* status does not use malloced mem so no need to cleanup */ 6344 /* fingerprints and interfaces have their own cleanup code */ 6345 } while(0); 6346 6347 return (error); 6348 } 6349 6350 static pfil_return_t 6351 pf_check_return(int chk, struct mbuf **m) 6352 { 6353 6354 switch (chk) { 6355 case PF_PASS: 6356 if (*m == NULL) 6357 return (PFIL_CONSUMED); 6358 else 6359 return (PFIL_PASS); 6360 break; 6361 default: 6362 if (*m != NULL) { 6363 m_freem(*m); 6364 *m = NULL; 6365 } 6366 return (PFIL_DROPPED); 6367 } 6368 } 6369 6370 static pfil_return_t 6371 pf_eth_check_in(struct mbuf **m, struct ifnet *ifp, int flags, 6372 void *ruleset __unused, struct inpcb *inp) 6373 { 6374 int chk; 6375 6376 chk = pf_test_eth(PF_IN, flags, ifp, m, inp); 6377 6378 return (pf_check_return(chk, m)); 6379 } 6380 6381 static pfil_return_t 6382 pf_eth_check_out(struct mbuf **m, struct ifnet *ifp, int flags, 6383 void *ruleset __unused, struct inpcb *inp) 6384 { 6385 int chk; 6386 6387 chk = pf_test_eth(PF_OUT, flags, ifp, m, inp); 6388 6389 return (pf_check_return(chk, m)); 6390 } 6391 6392 #ifdef INET 6393 static pfil_return_t 6394 pf_check_in(struct mbuf **m, struct ifnet *ifp, int flags, 6395 void *ruleset __unused, struct inpcb *inp) 6396 { 6397 int chk; 6398 6399 chk = pf_test(PF_IN, flags, ifp, m, inp, NULL); 6400 6401 return (pf_check_return(chk, m)); 6402 } 6403 6404 static pfil_return_t 6405 pf_check_out(struct mbuf **m, struct ifnet *ifp, int flags, 6406 void *ruleset __unused, struct inpcb *inp) 6407 { 6408 int chk; 6409 6410 chk = pf_test(PF_OUT, flags, ifp, m, inp, NULL); 6411 6412 return (pf_check_return(chk, m)); 6413 } 6414 #endif 6415 6416 #ifdef INET6 6417 static pfil_return_t 6418 pf_check6_in(struct mbuf **m, struct ifnet *ifp, int flags, 6419 void *ruleset __unused, struct inpcb *inp) 6420 { 6421 int chk; 6422 6423 /* 6424 * In case of loopback traffic IPv6 uses the real interface in 6425 * order to support scoped addresses. In order to support stateful 6426 * filtering we have change this to lo0 as it is the case in IPv4. 6427 */ 6428 CURVNET_SET(ifp->if_vnet); 6429 chk = pf_test6(PF_IN, flags, (*m)->m_flags & M_LOOP ? V_loif : ifp, 6430 m, inp, NULL); 6431 CURVNET_RESTORE(); 6432 6433 return (pf_check_return(chk, m)); 6434 } 6435 6436 static pfil_return_t 6437 pf_check6_out(struct mbuf **m, struct ifnet *ifp, int flags, 6438 void *ruleset __unused, struct inpcb *inp) 6439 { 6440 int chk; 6441 6442 CURVNET_SET(ifp->if_vnet); 6443 chk = pf_test6(PF_OUT, flags, ifp, m, inp, NULL); 6444 CURVNET_RESTORE(); 6445 6446 return (pf_check_return(chk, m)); 6447 } 6448 #endif /* INET6 */ 6449 6450 VNET_DEFINE_STATIC(pfil_hook_t, pf_eth_in_hook); 6451 VNET_DEFINE_STATIC(pfil_hook_t, pf_eth_out_hook); 6452 #define V_pf_eth_in_hook VNET(pf_eth_in_hook) 6453 #define V_pf_eth_out_hook VNET(pf_eth_out_hook) 6454 6455 #ifdef INET 6456 VNET_DEFINE_STATIC(pfil_hook_t, pf_ip4_in_hook); 6457 VNET_DEFINE_STATIC(pfil_hook_t, pf_ip4_out_hook); 6458 #define V_pf_ip4_in_hook VNET(pf_ip4_in_hook) 6459 #define V_pf_ip4_out_hook VNET(pf_ip4_out_hook) 6460 #endif 6461 #ifdef INET6 6462 VNET_DEFINE_STATIC(pfil_hook_t, pf_ip6_in_hook); 6463 VNET_DEFINE_STATIC(pfil_hook_t, pf_ip6_out_hook); 6464 #define V_pf_ip6_in_hook VNET(pf_ip6_in_hook) 6465 #define V_pf_ip6_out_hook VNET(pf_ip6_out_hook) 6466 #endif 6467 6468 static void 6469 hook_pf_eth(void) 6470 { 6471 struct pfil_hook_args pha = { 6472 .pa_version = PFIL_VERSION, 6473 .pa_modname = "pf", 6474 .pa_type = PFIL_TYPE_ETHERNET, 6475 }; 6476 struct pfil_link_args pla = { 6477 .pa_version = PFIL_VERSION, 6478 }; 6479 int ret __diagused; 6480 6481 if (atomic_load_bool(&V_pf_pfil_eth_hooked)) 6482 return; 6483 6484 pha.pa_mbuf_chk = pf_eth_check_in; 6485 pha.pa_flags = PFIL_IN; 6486 pha.pa_rulname = "eth-in"; 6487 V_pf_eth_in_hook = pfil_add_hook(&pha); 6488 pla.pa_flags = PFIL_IN | PFIL_HEADPTR | PFIL_HOOKPTR; 6489 pla.pa_head = V_link_pfil_head; 6490 pla.pa_hook = V_pf_eth_in_hook; 6491 ret = pfil_link(&pla); 6492 MPASS(ret == 0); 6493 pha.pa_mbuf_chk = pf_eth_check_out; 6494 pha.pa_flags = PFIL_OUT; 6495 pha.pa_rulname = "eth-out"; 6496 V_pf_eth_out_hook = pfil_add_hook(&pha); 6497 pla.pa_flags = PFIL_OUT | PFIL_HEADPTR | PFIL_HOOKPTR; 6498 pla.pa_head = V_link_pfil_head; 6499 pla.pa_hook = V_pf_eth_out_hook; 6500 ret = pfil_link(&pla); 6501 MPASS(ret == 0); 6502 6503 atomic_store_bool(&V_pf_pfil_eth_hooked, true); 6504 } 6505 6506 static void 6507 hook_pf(void) 6508 { 6509 struct pfil_hook_args pha = { 6510 .pa_version = PFIL_VERSION, 6511 .pa_modname = "pf", 6512 }; 6513 struct pfil_link_args pla = { 6514 .pa_version = PFIL_VERSION, 6515 }; 6516 int ret __diagused; 6517 6518 if (atomic_load_bool(&V_pf_pfil_hooked)) 6519 return; 6520 6521 #ifdef INET 6522 pha.pa_type = PFIL_TYPE_IP4; 6523 pha.pa_mbuf_chk = pf_check_in; 6524 pha.pa_flags = PFIL_IN; 6525 pha.pa_rulname = "default-in"; 6526 V_pf_ip4_in_hook = pfil_add_hook(&pha); 6527 pla.pa_flags = PFIL_IN | PFIL_HEADPTR | PFIL_HOOKPTR; 6528 pla.pa_head = V_inet_pfil_head; 6529 pla.pa_hook = V_pf_ip4_in_hook; 6530 ret = pfil_link(&pla); 6531 MPASS(ret == 0); 6532 pha.pa_mbuf_chk = pf_check_out; 6533 pha.pa_flags = PFIL_OUT; 6534 pha.pa_rulname = "default-out"; 6535 V_pf_ip4_out_hook = pfil_add_hook(&pha); 6536 pla.pa_flags = PFIL_OUT | PFIL_HEADPTR | PFIL_HOOKPTR; 6537 pla.pa_head = V_inet_pfil_head; 6538 pla.pa_hook = V_pf_ip4_out_hook; 6539 ret = pfil_link(&pla); 6540 MPASS(ret == 0); 6541 if (V_pf_filter_local) { 6542 pla.pa_flags = PFIL_OUT | PFIL_HEADPTR | PFIL_HOOKPTR; 6543 pla.pa_head = V_inet_local_pfil_head; 6544 pla.pa_hook = V_pf_ip4_out_hook; 6545 ret = pfil_link(&pla); 6546 MPASS(ret == 0); 6547 } 6548 #endif 6549 #ifdef INET6 6550 pha.pa_type = PFIL_TYPE_IP6; 6551 pha.pa_mbuf_chk = pf_check6_in; 6552 pha.pa_flags = PFIL_IN; 6553 pha.pa_rulname = "default-in6"; 6554 V_pf_ip6_in_hook = pfil_add_hook(&pha); 6555 pla.pa_flags = PFIL_IN | PFIL_HEADPTR | PFIL_HOOKPTR; 6556 pla.pa_head = V_inet6_pfil_head; 6557 pla.pa_hook = V_pf_ip6_in_hook; 6558 ret = pfil_link(&pla); 6559 MPASS(ret == 0); 6560 pha.pa_mbuf_chk = pf_check6_out; 6561 pha.pa_rulname = "default-out6"; 6562 pha.pa_flags = PFIL_OUT; 6563 V_pf_ip6_out_hook = pfil_add_hook(&pha); 6564 pla.pa_flags = PFIL_OUT | PFIL_HEADPTR | PFIL_HOOKPTR; 6565 pla.pa_head = V_inet6_pfil_head; 6566 pla.pa_hook = V_pf_ip6_out_hook; 6567 ret = pfil_link(&pla); 6568 MPASS(ret == 0); 6569 if (V_pf_filter_local) { 6570 pla.pa_flags = PFIL_OUT | PFIL_HEADPTR | PFIL_HOOKPTR; 6571 pla.pa_head = V_inet6_local_pfil_head; 6572 pla.pa_hook = V_pf_ip6_out_hook; 6573 ret = pfil_link(&pla); 6574 MPASS(ret == 0); 6575 } 6576 #endif 6577 6578 atomic_store_bool(&V_pf_pfil_hooked, true); 6579 } 6580 6581 static void 6582 dehook_pf_eth(void) 6583 { 6584 6585 if (!atomic_load_bool(&V_pf_pfil_eth_hooked)) 6586 return; 6587 6588 pfil_remove_hook(V_pf_eth_in_hook); 6589 pfil_remove_hook(V_pf_eth_out_hook); 6590 6591 atomic_store_bool(&V_pf_pfil_eth_hooked, false); 6592 } 6593 6594 static void 6595 dehook_pf(void) 6596 { 6597 6598 if (!atomic_load_bool(&V_pf_pfil_hooked)) 6599 return; 6600 6601 #ifdef INET 6602 pfil_remove_hook(V_pf_ip4_in_hook); 6603 pfil_remove_hook(V_pf_ip4_out_hook); 6604 #endif 6605 #ifdef INET6 6606 pfil_remove_hook(V_pf_ip6_in_hook); 6607 pfil_remove_hook(V_pf_ip6_out_hook); 6608 #endif 6609 6610 atomic_store_bool(&V_pf_pfil_hooked, false); 6611 } 6612 6613 static void 6614 pf_load_vnet(void) 6615 { 6616 V_pf_tag_z = uma_zcreate("pf tags", sizeof(struct pf_tagname), 6617 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); 6618 6619 rm_init_flags(&V_pf_rules_lock, "pf rulesets", RM_RECURSE); 6620 sx_init(&V_pf_ioctl_lock, "pf ioctl"); 6621 6622 pf_init_tagset(&V_pf_tags, &pf_rule_tag_hashsize, 6623 PF_RULE_TAG_HASH_SIZE_DEFAULT); 6624 #ifdef ALTQ 6625 pf_init_tagset(&V_pf_qids, &pf_queue_tag_hashsize, 6626 PF_QUEUE_TAG_HASH_SIZE_DEFAULT); 6627 #endif 6628 6629 V_pf_keth = &V_pf_main_keth_anchor.ruleset; 6630 6631 pfattach_vnet(); 6632 V_pf_vnet_active = 1; 6633 } 6634 6635 static int 6636 pf_load(void) 6637 { 6638 int error; 6639 6640 sx_init(&pf_end_lock, "pf end thread"); 6641 6642 pf_mtag_initialize(); 6643 6644 pf_dev = make_dev(&pf_cdevsw, 0, UID_ROOT, GID_WHEEL, 0600, PF_NAME); 6645 if (pf_dev == NULL) 6646 return (ENOMEM); 6647 6648 pf_end_threads = 0; 6649 error = kproc_create(pf_purge_thread, NULL, &pf_purge_proc, 0, 0, "pf purge"); 6650 if (error != 0) 6651 return (error); 6652 6653 pfi_initialize(); 6654 6655 return (0); 6656 } 6657 6658 static void 6659 pf_unload_vnet(void) 6660 { 6661 int ret __diagused; 6662 6663 V_pf_vnet_active = 0; 6664 V_pf_status.running = 0; 6665 dehook_pf(); 6666 dehook_pf_eth(); 6667 6668 PF_RULES_WLOCK(); 6669 pf_syncookies_cleanup(); 6670 shutdown_pf(); 6671 PF_RULES_WUNLOCK(); 6672 6673 /* Make sure we've cleaned up ethernet rules before we continue. */ 6674 NET_EPOCH_DRAIN_CALLBACKS(); 6675 6676 ret = swi_remove(V_pf_swi_cookie); 6677 MPASS(ret == 0); 6678 ret = intr_event_destroy(V_pf_swi_ie); 6679 MPASS(ret == 0); 6680 6681 pf_unload_vnet_purge(); 6682 6683 pf_normalize_cleanup(); 6684 PF_RULES_WLOCK(); 6685 pfi_cleanup_vnet(); 6686 PF_RULES_WUNLOCK(); 6687 pfr_cleanup(); 6688 pf_osfp_flush(); 6689 pf_cleanup(); 6690 if (IS_DEFAULT_VNET(curvnet)) 6691 pf_mtag_cleanup(); 6692 6693 pf_cleanup_tagset(&V_pf_tags); 6694 #ifdef ALTQ 6695 pf_cleanup_tagset(&V_pf_qids); 6696 #endif 6697 uma_zdestroy(V_pf_tag_z); 6698 6699 #ifdef PF_WANT_32_TO_64_COUNTER 6700 PF_RULES_WLOCK(); 6701 LIST_REMOVE(V_pf_kifmarker, pfik_allkiflist); 6702 6703 MPASS(LIST_EMPTY(&V_pf_allkiflist)); 6704 MPASS(V_pf_allkifcount == 0); 6705 6706 LIST_REMOVE(&V_pf_default_rule, allrulelist); 6707 V_pf_allrulecount--; 6708 LIST_REMOVE(V_pf_rulemarker, allrulelist); 6709 6710 /* 6711 * There are known pf rule leaks when running the test suite. 6712 */ 6713 #ifdef notyet 6714 MPASS(LIST_EMPTY(&V_pf_allrulelist)); 6715 MPASS(V_pf_allrulecount == 0); 6716 #endif 6717 6718 PF_RULES_WUNLOCK(); 6719 6720 free(V_pf_kifmarker, PFI_MTYPE); 6721 free(V_pf_rulemarker, M_PFRULE); 6722 #endif 6723 6724 /* Free counters last as we updated them during shutdown. */ 6725 pf_counter_u64_deinit(&V_pf_default_rule.evaluations); 6726 for (int i = 0; i < 2; i++) { 6727 pf_counter_u64_deinit(&V_pf_default_rule.packets[i]); 6728 pf_counter_u64_deinit(&V_pf_default_rule.bytes[i]); 6729 } 6730 counter_u64_free(V_pf_default_rule.states_cur); 6731 counter_u64_free(V_pf_default_rule.states_tot); 6732 counter_u64_free(V_pf_default_rule.src_nodes); 6733 uma_zfree_pcpu(pf_timestamp_pcpu_zone, V_pf_default_rule.timestamp); 6734 6735 for (int i = 0; i < PFRES_MAX; i++) 6736 counter_u64_free(V_pf_status.counters[i]); 6737 for (int i = 0; i < KLCNT_MAX; i++) 6738 counter_u64_free(V_pf_status.lcounters[i]); 6739 for (int i = 0; i < FCNT_MAX; i++) 6740 pf_counter_u64_deinit(&V_pf_status.fcounters[i]); 6741 for (int i = 0; i < SCNT_MAX; i++) 6742 counter_u64_free(V_pf_status.scounters[i]); 6743 6744 rm_destroy(&V_pf_rules_lock); 6745 sx_destroy(&V_pf_ioctl_lock); 6746 } 6747 6748 static void 6749 pf_unload(void) 6750 { 6751 6752 sx_xlock(&pf_end_lock); 6753 pf_end_threads = 1; 6754 while (pf_end_threads < 2) { 6755 wakeup_one(pf_purge_thread); 6756 sx_sleep(pf_purge_proc, &pf_end_lock, 0, "pftmo", 0); 6757 } 6758 sx_xunlock(&pf_end_lock); 6759 6760 if (pf_dev != NULL) 6761 destroy_dev(pf_dev); 6762 6763 pfi_cleanup(); 6764 6765 sx_destroy(&pf_end_lock); 6766 } 6767 6768 static void 6769 vnet_pf_init(void *unused __unused) 6770 { 6771 6772 pf_load_vnet(); 6773 } 6774 VNET_SYSINIT(vnet_pf_init, SI_SUB_PROTO_FIREWALL, SI_ORDER_THIRD, 6775 vnet_pf_init, NULL); 6776 6777 static void 6778 vnet_pf_uninit(const void *unused __unused) 6779 { 6780 6781 pf_unload_vnet(); 6782 } 6783 SYSUNINIT(pf_unload, SI_SUB_PROTO_FIREWALL, SI_ORDER_SECOND, pf_unload, NULL); 6784 VNET_SYSUNINIT(vnet_pf_uninit, SI_SUB_PROTO_FIREWALL, SI_ORDER_THIRD, 6785 vnet_pf_uninit, NULL); 6786 6787 static int 6788 pf_modevent(module_t mod, int type, void *data) 6789 { 6790 int error = 0; 6791 6792 switch(type) { 6793 case MOD_LOAD: 6794 error = pf_load(); 6795 break; 6796 case MOD_UNLOAD: 6797 /* Handled in SYSUNINIT(pf_unload) to ensure it's done after 6798 * the vnet_pf_uninit()s */ 6799 break; 6800 default: 6801 error = EINVAL; 6802 break; 6803 } 6804 6805 return (error); 6806 } 6807 6808 static moduledata_t pf_mod = { 6809 "pf", 6810 pf_modevent, 6811 0 6812 }; 6813 6814 DECLARE_MODULE(pf, pf_mod, SI_SUB_PROTO_FIREWALL, SI_ORDER_SECOND); 6815 MODULE_VERSION(pf, PF_MODVER); 6816