1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2001 Daniel Hartmeier 5 * Copyright (c) 2002,2003 Henning Brauer 6 * Copyright (c) 2012 Gleb Smirnoff <glebius@FreeBSD.org> 7 * All rights reserved. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 13 * - Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * - Redistributions in binary form must reproduce the above 16 * copyright notice, this list of conditions and the following 17 * disclaimer in the documentation and/or other materials provided 18 * with the distribution. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 23 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 24 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 27 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 28 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN 30 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 31 * POSSIBILITY OF SUCH DAMAGE. 32 * 33 * Effort sponsored in part by the Defense Advanced Research Projects 34 * Agency (DARPA) and Air Force Research Laboratory, Air Force 35 * Materiel Command, USAF, under agreement number F30602-01-2-0537. 36 * 37 * $OpenBSD: pf_ioctl.c,v 1.213 2009/02/15 21:46:12 mbalmer Exp $ 38 */ 39 40 #include <sys/cdefs.h> 41 __FBSDID("$FreeBSD$"); 42 43 #include "opt_inet.h" 44 #include "opt_inet6.h" 45 #include "opt_bpf.h" 46 #include "opt_pf.h" 47 48 #include <sys/param.h> 49 #include <sys/_bitset.h> 50 #include <sys/bitset.h> 51 #include <sys/bus.h> 52 #include <sys/conf.h> 53 #include <sys/endian.h> 54 #include <sys/fcntl.h> 55 #include <sys/filio.h> 56 #include <sys/hash.h> 57 #include <sys/interrupt.h> 58 #include <sys/jail.h> 59 #include <sys/kernel.h> 60 #include <sys/kthread.h> 61 #include <sys/lock.h> 62 #include <sys/mbuf.h> 63 #include <sys/module.h> 64 #include <sys/nv.h> 65 #include <sys/proc.h> 66 #include <sys/sdt.h> 67 #include <sys/smp.h> 68 #include <sys/socket.h> 69 #include <sys/sysctl.h> 70 #include <sys/md5.h> 71 #include <sys/ucred.h> 72 73 #include <net/if.h> 74 #include <net/if_var.h> 75 #include <net/if_private.h> 76 #include <net/vnet.h> 77 #include <net/route.h> 78 #include <net/pfil.h> 79 #include <net/pfvar.h> 80 #include <net/if_pfsync.h> 81 #include <net/if_pflog.h> 82 83 #include <netinet/in.h> 84 #include <netinet/ip.h> 85 #include <netinet/ip_var.h> 86 #include <netinet6/ip6_var.h> 87 #include <netinet/ip_icmp.h> 88 #include <netpfil/pf/pf_nv.h> 89 90 #ifdef INET6 91 #include <netinet/ip6.h> 92 #endif /* INET6 */ 93 94 #ifdef ALTQ 95 #include <net/altq/altq.h> 96 #endif 97 98 SDT_PROBE_DEFINE3(pf, ioctl, ioctl, error, "int", "int", "int"); 99 SDT_PROBE_DEFINE3(pf, ioctl, function, error, "char *", "int", "int"); 100 SDT_PROBE_DEFINE2(pf, ioctl, addrule, error, "int", "int"); 101 SDT_PROBE_DEFINE2(pf, ioctl, nvchk, error, "int", "int"); 102 103 static struct pf_kpool *pf_get_kpool(const char *, u_int32_t, u_int8_t, 104 u_int32_t, u_int8_t, u_int8_t, u_int8_t); 105 106 static void pf_mv_kpool(struct pf_kpalist *, struct pf_kpalist *); 107 static void pf_empty_kpool(struct pf_kpalist *); 108 static int pfioctl(struct cdev *, u_long, caddr_t, int, 109 struct thread *); 110 static int pf_begin_eth(uint32_t *, const char *); 111 static void pf_rollback_eth_cb(struct epoch_context *); 112 static int pf_rollback_eth(uint32_t, const char *); 113 static int pf_commit_eth(uint32_t, const char *); 114 static void pf_free_eth_rule(struct pf_keth_rule *); 115 #ifdef ALTQ 116 static int pf_begin_altq(u_int32_t *); 117 static int pf_rollback_altq(u_int32_t); 118 static int pf_commit_altq(u_int32_t); 119 static int pf_enable_altq(struct pf_altq *); 120 static int pf_disable_altq(struct pf_altq *); 121 static uint16_t pf_qname2qid(const char *); 122 static void pf_qid_unref(uint16_t); 123 #endif /* ALTQ */ 124 static int pf_begin_rules(u_int32_t *, int, const char *); 125 static int pf_rollback_rules(u_int32_t, int, char *); 126 static int pf_setup_pfsync_matching(struct pf_kruleset *); 127 static void pf_hash_rule_rolling(MD5_CTX *, struct pf_krule *); 128 static void pf_hash_rule(struct pf_krule *); 129 static void pf_hash_rule_addr(MD5_CTX *, struct pf_rule_addr *); 130 static int pf_commit_rules(u_int32_t, int, char *); 131 static int pf_addr_setup(struct pf_kruleset *, 132 struct pf_addr_wrap *, sa_family_t); 133 static void pf_addr_copyout(struct pf_addr_wrap *); 134 static void pf_src_node_copy(const struct pf_ksrc_node *, 135 struct pf_src_node *); 136 #ifdef ALTQ 137 static int pf_export_kaltq(struct pf_altq *, 138 struct pfioc_altq_v1 *, size_t); 139 static int pf_import_kaltq(struct pfioc_altq_v1 *, 140 struct pf_altq *, size_t); 141 #endif /* ALTQ */ 142 143 VNET_DEFINE(struct pf_krule, pf_default_rule); 144 145 static __inline int pf_krule_compare(struct pf_krule *, 146 struct pf_krule *); 147 148 RB_GENERATE(pf_krule_global, pf_krule, entry_global, pf_krule_compare); 149 150 #ifdef ALTQ 151 VNET_DEFINE_STATIC(int, pf_altq_running); 152 #define V_pf_altq_running VNET(pf_altq_running) 153 #endif 154 155 #define TAGID_MAX 50000 156 struct pf_tagname { 157 TAILQ_ENTRY(pf_tagname) namehash_entries; 158 TAILQ_ENTRY(pf_tagname) taghash_entries; 159 char name[PF_TAG_NAME_SIZE]; 160 uint16_t tag; 161 int ref; 162 }; 163 164 struct pf_tagset { 165 TAILQ_HEAD(, pf_tagname) *namehash; 166 TAILQ_HEAD(, pf_tagname) *taghash; 167 unsigned int mask; 168 uint32_t seed; 169 BITSET_DEFINE(, TAGID_MAX) avail; 170 }; 171 172 VNET_DEFINE(struct pf_tagset, pf_tags); 173 #define V_pf_tags VNET(pf_tags) 174 static unsigned int pf_rule_tag_hashsize; 175 #define PF_RULE_TAG_HASH_SIZE_DEFAULT 128 176 SYSCTL_UINT(_net_pf, OID_AUTO, rule_tag_hashsize, CTLFLAG_RDTUN, 177 &pf_rule_tag_hashsize, PF_RULE_TAG_HASH_SIZE_DEFAULT, 178 "Size of pf(4) rule tag hashtable"); 179 180 #ifdef ALTQ 181 VNET_DEFINE(struct pf_tagset, pf_qids); 182 #define V_pf_qids VNET(pf_qids) 183 static unsigned int pf_queue_tag_hashsize; 184 #define PF_QUEUE_TAG_HASH_SIZE_DEFAULT 128 185 SYSCTL_UINT(_net_pf, OID_AUTO, queue_tag_hashsize, CTLFLAG_RDTUN, 186 &pf_queue_tag_hashsize, PF_QUEUE_TAG_HASH_SIZE_DEFAULT, 187 "Size of pf(4) queue tag hashtable"); 188 #endif 189 VNET_DEFINE(uma_zone_t, pf_tag_z); 190 #define V_pf_tag_z VNET(pf_tag_z) 191 static MALLOC_DEFINE(M_PFALTQ, "pf_altq", "pf(4) altq configuration db"); 192 static MALLOC_DEFINE(M_PFRULE, "pf_rule", "pf(4) rules"); 193 194 #if (PF_QNAME_SIZE != PF_TAG_NAME_SIZE) 195 #error PF_QNAME_SIZE must be equal to PF_TAG_NAME_SIZE 196 #endif 197 198 VNET_DEFINE_STATIC(bool, pf_filter_local) = false; 199 #define V_pf_filter_local VNET(pf_filter_local) 200 SYSCTL_BOOL(_net_pf, OID_AUTO, filter_local, CTLFLAG_VNET | CTLFLAG_RW, 201 &VNET_NAME(pf_filter_local), false, 202 "Enable filtering for packets delivered to local network stack"); 203 204 static void pf_init_tagset(struct pf_tagset *, unsigned int *, 205 unsigned int); 206 static void pf_cleanup_tagset(struct pf_tagset *); 207 static uint16_t tagname2hashindex(const struct pf_tagset *, const char *); 208 static uint16_t tag2hashindex(const struct pf_tagset *, uint16_t); 209 static u_int16_t tagname2tag(struct pf_tagset *, const char *); 210 static u_int16_t pf_tagname2tag(const char *); 211 static void tag_unref(struct pf_tagset *, u_int16_t); 212 213 #define DPFPRINTF(n, x) if (V_pf_status.debug >= (n)) printf x 214 215 struct cdev *pf_dev; 216 217 /* 218 * XXX - These are new and need to be checked when moveing to a new version 219 */ 220 static void pf_clear_all_states(void); 221 static unsigned int pf_clear_states(const struct pf_kstate_kill *); 222 static void pf_killstates(struct pf_kstate_kill *, 223 unsigned int *); 224 static int pf_killstates_row(struct pf_kstate_kill *, 225 struct pf_idhash *); 226 static int pf_killstates_nv(struct pfioc_nv *); 227 static int pf_clearstates_nv(struct pfioc_nv *); 228 static int pf_getstate(struct pfioc_nv *); 229 static int pf_getstatus(struct pfioc_nv *); 230 static int pf_clear_tables(void); 231 static void pf_clear_srcnodes(struct pf_ksrc_node *); 232 static void pf_kill_srcnodes(struct pfioc_src_node_kill *); 233 static int pf_keepcounters(struct pfioc_nv *); 234 static void pf_tbladdr_copyout(struct pf_addr_wrap *); 235 236 /* 237 * Wrapper functions for pfil(9) hooks 238 */ 239 static pfil_return_t pf_eth_check_in(struct mbuf **m, struct ifnet *ifp, 240 int flags, void *ruleset __unused, struct inpcb *inp); 241 static pfil_return_t pf_eth_check_out(struct mbuf **m, struct ifnet *ifp, 242 int flags, void *ruleset __unused, struct inpcb *inp); 243 #ifdef INET 244 static pfil_return_t pf_check_in(struct mbuf **m, struct ifnet *ifp, 245 int flags, void *ruleset __unused, struct inpcb *inp); 246 static pfil_return_t pf_check_out(struct mbuf **m, struct ifnet *ifp, 247 int flags, void *ruleset __unused, struct inpcb *inp); 248 #endif 249 #ifdef INET6 250 static pfil_return_t pf_check6_in(struct mbuf **m, struct ifnet *ifp, 251 int flags, void *ruleset __unused, struct inpcb *inp); 252 static pfil_return_t pf_check6_out(struct mbuf **m, struct ifnet *ifp, 253 int flags, void *ruleset __unused, struct inpcb *inp); 254 #endif 255 256 static void hook_pf_eth(void); 257 static void hook_pf(void); 258 static void dehook_pf_eth(void); 259 static void dehook_pf(void); 260 static int shutdown_pf(void); 261 static int pf_load(void); 262 static void pf_unload(void); 263 264 static struct cdevsw pf_cdevsw = { 265 .d_ioctl = pfioctl, 266 .d_name = PF_NAME, 267 .d_version = D_VERSION, 268 }; 269 270 VNET_DEFINE_STATIC(bool, pf_pfil_hooked); 271 #define V_pf_pfil_hooked VNET(pf_pfil_hooked) 272 VNET_DEFINE_STATIC(bool, pf_pfil_eth_hooked); 273 #define V_pf_pfil_eth_hooked VNET(pf_pfil_eth_hooked) 274 275 /* 276 * We need a flag that is neither hooked nor running to know when 277 * the VNET is "valid". We primarily need this to control (global) 278 * external event, e.g., eventhandlers. 279 */ 280 VNET_DEFINE(int, pf_vnet_active); 281 #define V_pf_vnet_active VNET(pf_vnet_active) 282 283 int pf_end_threads; 284 struct proc *pf_purge_proc; 285 286 VNET_DEFINE(struct rmlock, pf_rules_lock); 287 VNET_DEFINE_STATIC(struct sx, pf_ioctl_lock); 288 #define V_pf_ioctl_lock VNET(pf_ioctl_lock) 289 struct sx pf_end_lock; 290 291 /* pfsync */ 292 VNET_DEFINE(pfsync_state_import_t *, pfsync_state_import_ptr); 293 VNET_DEFINE(pfsync_insert_state_t *, pfsync_insert_state_ptr); 294 VNET_DEFINE(pfsync_update_state_t *, pfsync_update_state_ptr); 295 VNET_DEFINE(pfsync_delete_state_t *, pfsync_delete_state_ptr); 296 VNET_DEFINE(pfsync_clear_states_t *, pfsync_clear_states_ptr); 297 VNET_DEFINE(pfsync_defer_t *, pfsync_defer_ptr); 298 pfsync_detach_ifnet_t *pfsync_detach_ifnet_ptr; 299 300 /* pflog */ 301 pflog_packet_t *pflog_packet_ptr = NULL; 302 303 /* 304 * Copy a user-provided string, returning an error if truncation would occur. 305 * Avoid scanning past "sz" bytes in the source string since there's no 306 * guarantee that it's nul-terminated. 307 */ 308 static int 309 pf_user_strcpy(char *dst, const char *src, size_t sz) 310 { 311 if (strnlen(src, sz) == sz) 312 return (EINVAL); 313 (void)strlcpy(dst, src, sz); 314 return (0); 315 } 316 317 static void 318 pfattach_vnet(void) 319 { 320 u_int32_t *my_timeout = V_pf_default_rule.timeout; 321 322 bzero(&V_pf_status, sizeof(V_pf_status)); 323 324 pf_initialize(); 325 pfr_initialize(); 326 pfi_initialize_vnet(); 327 pf_normalize_init(); 328 pf_syncookies_init(); 329 330 V_pf_limits[PF_LIMIT_STATES].limit = PFSTATE_HIWAT; 331 V_pf_limits[PF_LIMIT_SRC_NODES].limit = PFSNODE_HIWAT; 332 333 RB_INIT(&V_pf_anchors); 334 pf_init_kruleset(&pf_main_ruleset); 335 336 pf_init_keth(V_pf_keth); 337 338 /* default rule should never be garbage collected */ 339 V_pf_default_rule.entries.tqe_prev = &V_pf_default_rule.entries.tqe_next; 340 #ifdef PF_DEFAULT_TO_DROP 341 V_pf_default_rule.action = PF_DROP; 342 #else 343 V_pf_default_rule.action = PF_PASS; 344 #endif 345 V_pf_default_rule.nr = -1; 346 V_pf_default_rule.rtableid = -1; 347 348 pf_counter_u64_init(&V_pf_default_rule.evaluations, M_WAITOK); 349 for (int i = 0; i < 2; i++) { 350 pf_counter_u64_init(&V_pf_default_rule.packets[i], M_WAITOK); 351 pf_counter_u64_init(&V_pf_default_rule.bytes[i], M_WAITOK); 352 } 353 V_pf_default_rule.states_cur = counter_u64_alloc(M_WAITOK); 354 V_pf_default_rule.states_tot = counter_u64_alloc(M_WAITOK); 355 V_pf_default_rule.src_nodes = counter_u64_alloc(M_WAITOK); 356 357 V_pf_default_rule.timestamp = uma_zalloc_pcpu(pf_timestamp_pcpu_zone, 358 M_WAITOK | M_ZERO); 359 360 #ifdef PF_WANT_32_TO_64_COUNTER 361 V_pf_kifmarker = malloc(sizeof(*V_pf_kifmarker), PFI_MTYPE, M_WAITOK | M_ZERO); 362 V_pf_rulemarker = malloc(sizeof(*V_pf_rulemarker), M_PFRULE, M_WAITOK | M_ZERO); 363 PF_RULES_WLOCK(); 364 LIST_INSERT_HEAD(&V_pf_allkiflist, V_pf_kifmarker, pfik_allkiflist); 365 LIST_INSERT_HEAD(&V_pf_allrulelist, &V_pf_default_rule, allrulelist); 366 V_pf_allrulecount++; 367 LIST_INSERT_HEAD(&V_pf_allrulelist, V_pf_rulemarker, allrulelist); 368 PF_RULES_WUNLOCK(); 369 #endif 370 371 /* initialize default timeouts */ 372 my_timeout[PFTM_TCP_FIRST_PACKET] = PFTM_TCP_FIRST_PACKET_VAL; 373 my_timeout[PFTM_TCP_OPENING] = PFTM_TCP_OPENING_VAL; 374 my_timeout[PFTM_TCP_ESTABLISHED] = PFTM_TCP_ESTABLISHED_VAL; 375 my_timeout[PFTM_TCP_CLOSING] = PFTM_TCP_CLOSING_VAL; 376 my_timeout[PFTM_TCP_FIN_WAIT] = PFTM_TCP_FIN_WAIT_VAL; 377 my_timeout[PFTM_TCP_CLOSED] = PFTM_TCP_CLOSED_VAL; 378 my_timeout[PFTM_UDP_FIRST_PACKET] = PFTM_UDP_FIRST_PACKET_VAL; 379 my_timeout[PFTM_UDP_SINGLE] = PFTM_UDP_SINGLE_VAL; 380 my_timeout[PFTM_UDP_MULTIPLE] = PFTM_UDP_MULTIPLE_VAL; 381 my_timeout[PFTM_ICMP_FIRST_PACKET] = PFTM_ICMP_FIRST_PACKET_VAL; 382 my_timeout[PFTM_ICMP_ERROR_REPLY] = PFTM_ICMP_ERROR_REPLY_VAL; 383 my_timeout[PFTM_OTHER_FIRST_PACKET] = PFTM_OTHER_FIRST_PACKET_VAL; 384 my_timeout[PFTM_OTHER_SINGLE] = PFTM_OTHER_SINGLE_VAL; 385 my_timeout[PFTM_OTHER_MULTIPLE] = PFTM_OTHER_MULTIPLE_VAL; 386 my_timeout[PFTM_FRAG] = PFTM_FRAG_VAL; 387 my_timeout[PFTM_INTERVAL] = PFTM_INTERVAL_VAL; 388 my_timeout[PFTM_SRC_NODE] = PFTM_SRC_NODE_VAL; 389 my_timeout[PFTM_TS_DIFF] = PFTM_TS_DIFF_VAL; 390 my_timeout[PFTM_ADAPTIVE_START] = PFSTATE_ADAPT_START; 391 my_timeout[PFTM_ADAPTIVE_END] = PFSTATE_ADAPT_END; 392 393 V_pf_status.debug = PF_DEBUG_URGENT; 394 /* 395 * XXX This is different than in OpenBSD where reassembly is enabled by 396 * defult. In FreeBSD we expect people to still use scrub rules and 397 * switch to the new syntax later. Only when they switch they must 398 * explicitly enable reassemle. We could change the default once the 399 * scrub rule functionality is hopefully removed some day in future. 400 */ 401 V_pf_status.reass = 0; 402 403 V_pf_pfil_hooked = false; 404 V_pf_pfil_eth_hooked = false; 405 406 /* XXX do our best to avoid a conflict */ 407 V_pf_status.hostid = arc4random(); 408 409 for (int i = 0; i < PFRES_MAX; i++) 410 V_pf_status.counters[i] = counter_u64_alloc(M_WAITOK); 411 for (int i = 0; i < KLCNT_MAX; i++) 412 V_pf_status.lcounters[i] = counter_u64_alloc(M_WAITOK); 413 for (int i = 0; i < FCNT_MAX; i++) 414 pf_counter_u64_init(&V_pf_status.fcounters[i], M_WAITOK); 415 for (int i = 0; i < SCNT_MAX; i++) 416 V_pf_status.scounters[i] = counter_u64_alloc(M_WAITOK); 417 418 if (swi_add(&V_pf_swi_ie, "pf send", pf_intr, curvnet, SWI_NET, 419 INTR_MPSAFE, &V_pf_swi_cookie) != 0) 420 /* XXXGL: leaked all above. */ 421 return; 422 } 423 424 static struct pf_kpool * 425 pf_get_kpool(const char *anchor, u_int32_t ticket, u_int8_t rule_action, 426 u_int32_t rule_number, u_int8_t r_last, u_int8_t active, 427 u_int8_t check_ticket) 428 { 429 struct pf_kruleset *ruleset; 430 struct pf_krule *rule; 431 int rs_num; 432 433 ruleset = pf_find_kruleset(anchor); 434 if (ruleset == NULL) 435 return (NULL); 436 rs_num = pf_get_ruleset_number(rule_action); 437 if (rs_num >= PF_RULESET_MAX) 438 return (NULL); 439 if (active) { 440 if (check_ticket && ticket != 441 ruleset->rules[rs_num].active.ticket) 442 return (NULL); 443 if (r_last) 444 rule = TAILQ_LAST(ruleset->rules[rs_num].active.ptr, 445 pf_krulequeue); 446 else 447 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr); 448 } else { 449 if (check_ticket && ticket != 450 ruleset->rules[rs_num].inactive.ticket) 451 return (NULL); 452 if (r_last) 453 rule = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr, 454 pf_krulequeue); 455 else 456 rule = TAILQ_FIRST(ruleset->rules[rs_num].inactive.ptr); 457 } 458 if (!r_last) { 459 while ((rule != NULL) && (rule->nr != rule_number)) 460 rule = TAILQ_NEXT(rule, entries); 461 } 462 if (rule == NULL) 463 return (NULL); 464 465 return (&rule->rpool); 466 } 467 468 static void 469 pf_mv_kpool(struct pf_kpalist *poola, struct pf_kpalist *poolb) 470 { 471 struct pf_kpooladdr *mv_pool_pa; 472 473 while ((mv_pool_pa = TAILQ_FIRST(poola)) != NULL) { 474 TAILQ_REMOVE(poola, mv_pool_pa, entries); 475 TAILQ_INSERT_TAIL(poolb, mv_pool_pa, entries); 476 } 477 } 478 479 static void 480 pf_empty_kpool(struct pf_kpalist *poola) 481 { 482 struct pf_kpooladdr *pa; 483 484 while ((pa = TAILQ_FIRST(poola)) != NULL) { 485 switch (pa->addr.type) { 486 case PF_ADDR_DYNIFTL: 487 pfi_dynaddr_remove(pa->addr.p.dyn); 488 break; 489 case PF_ADDR_TABLE: 490 /* XXX: this could be unfinished pooladdr on pabuf */ 491 if (pa->addr.p.tbl != NULL) 492 pfr_detach_table(pa->addr.p.tbl); 493 break; 494 } 495 if (pa->kif) 496 pfi_kkif_unref(pa->kif); 497 TAILQ_REMOVE(poola, pa, entries); 498 free(pa, M_PFRULE); 499 } 500 } 501 502 static void 503 pf_unlink_rule_locked(struct pf_krulequeue *rulequeue, struct pf_krule *rule) 504 { 505 506 PF_RULES_WASSERT(); 507 PF_UNLNKDRULES_ASSERT(); 508 509 TAILQ_REMOVE(rulequeue, rule, entries); 510 511 rule->rule_ref |= PFRULE_REFS; 512 TAILQ_INSERT_TAIL(&V_pf_unlinked_rules, rule, entries); 513 } 514 515 static void 516 pf_unlink_rule(struct pf_krulequeue *rulequeue, struct pf_krule *rule) 517 { 518 519 PF_RULES_WASSERT(); 520 521 PF_UNLNKDRULES_LOCK(); 522 pf_unlink_rule_locked(rulequeue, rule); 523 PF_UNLNKDRULES_UNLOCK(); 524 } 525 526 static void 527 pf_free_eth_rule(struct pf_keth_rule *rule) 528 { 529 PF_RULES_WASSERT(); 530 531 if (rule == NULL) 532 return; 533 534 if (rule->tag) 535 tag_unref(&V_pf_tags, rule->tag); 536 if (rule->match_tag) 537 tag_unref(&V_pf_tags, rule->match_tag); 538 #ifdef ALTQ 539 pf_qid_unref(rule->qid); 540 #endif 541 542 if (rule->bridge_to) 543 pfi_kkif_unref(rule->bridge_to); 544 if (rule->kif) 545 pfi_kkif_unref(rule->kif); 546 547 if (rule->ipsrc.addr.type == PF_ADDR_TABLE) 548 pfr_detach_table(rule->ipsrc.addr.p.tbl); 549 if (rule->ipdst.addr.type == PF_ADDR_TABLE) 550 pfr_detach_table(rule->ipdst.addr.p.tbl); 551 552 counter_u64_free(rule->evaluations); 553 for (int i = 0; i < 2; i++) { 554 counter_u64_free(rule->packets[i]); 555 counter_u64_free(rule->bytes[i]); 556 } 557 uma_zfree_pcpu(pf_timestamp_pcpu_zone, rule->timestamp); 558 pf_keth_anchor_remove(rule); 559 560 free(rule, M_PFRULE); 561 } 562 563 void 564 pf_free_rule(struct pf_krule *rule) 565 { 566 567 PF_RULES_WASSERT(); 568 PF_CONFIG_ASSERT(); 569 570 if (rule->tag) 571 tag_unref(&V_pf_tags, rule->tag); 572 if (rule->match_tag) 573 tag_unref(&V_pf_tags, rule->match_tag); 574 #ifdef ALTQ 575 if (rule->pqid != rule->qid) 576 pf_qid_unref(rule->pqid); 577 pf_qid_unref(rule->qid); 578 #endif 579 switch (rule->src.addr.type) { 580 case PF_ADDR_DYNIFTL: 581 pfi_dynaddr_remove(rule->src.addr.p.dyn); 582 break; 583 case PF_ADDR_TABLE: 584 pfr_detach_table(rule->src.addr.p.tbl); 585 break; 586 } 587 switch (rule->dst.addr.type) { 588 case PF_ADDR_DYNIFTL: 589 pfi_dynaddr_remove(rule->dst.addr.p.dyn); 590 break; 591 case PF_ADDR_TABLE: 592 pfr_detach_table(rule->dst.addr.p.tbl); 593 break; 594 } 595 if (rule->overload_tbl) 596 pfr_detach_table(rule->overload_tbl); 597 if (rule->kif) 598 pfi_kkif_unref(rule->kif); 599 pf_kanchor_remove(rule); 600 pf_empty_kpool(&rule->rpool.list); 601 602 pf_krule_free(rule); 603 } 604 605 static void 606 pf_init_tagset(struct pf_tagset *ts, unsigned int *tunable_size, 607 unsigned int default_size) 608 { 609 unsigned int i; 610 unsigned int hashsize; 611 612 if (*tunable_size == 0 || !powerof2(*tunable_size)) 613 *tunable_size = default_size; 614 615 hashsize = *tunable_size; 616 ts->namehash = mallocarray(hashsize, sizeof(*ts->namehash), M_PFHASH, 617 M_WAITOK); 618 ts->taghash = mallocarray(hashsize, sizeof(*ts->taghash), M_PFHASH, 619 M_WAITOK); 620 ts->mask = hashsize - 1; 621 ts->seed = arc4random(); 622 for (i = 0; i < hashsize; i++) { 623 TAILQ_INIT(&ts->namehash[i]); 624 TAILQ_INIT(&ts->taghash[i]); 625 } 626 BIT_FILL(TAGID_MAX, &ts->avail); 627 } 628 629 static void 630 pf_cleanup_tagset(struct pf_tagset *ts) 631 { 632 unsigned int i; 633 unsigned int hashsize; 634 struct pf_tagname *t, *tmp; 635 636 /* 637 * Only need to clean up one of the hashes as each tag is hashed 638 * into each table. 639 */ 640 hashsize = ts->mask + 1; 641 for (i = 0; i < hashsize; i++) 642 TAILQ_FOREACH_SAFE(t, &ts->namehash[i], namehash_entries, tmp) 643 uma_zfree(V_pf_tag_z, t); 644 645 free(ts->namehash, M_PFHASH); 646 free(ts->taghash, M_PFHASH); 647 } 648 649 static uint16_t 650 tagname2hashindex(const struct pf_tagset *ts, const char *tagname) 651 { 652 size_t len; 653 654 len = strnlen(tagname, PF_TAG_NAME_SIZE - 1); 655 return (murmur3_32_hash(tagname, len, ts->seed) & ts->mask); 656 } 657 658 static uint16_t 659 tag2hashindex(const struct pf_tagset *ts, uint16_t tag) 660 { 661 662 return (tag & ts->mask); 663 } 664 665 static u_int16_t 666 tagname2tag(struct pf_tagset *ts, const char *tagname) 667 { 668 struct pf_tagname *tag; 669 u_int32_t index; 670 u_int16_t new_tagid; 671 672 PF_RULES_WASSERT(); 673 674 index = tagname2hashindex(ts, tagname); 675 TAILQ_FOREACH(tag, &ts->namehash[index], namehash_entries) 676 if (strcmp(tagname, tag->name) == 0) { 677 tag->ref++; 678 return (tag->tag); 679 } 680 681 /* 682 * new entry 683 * 684 * to avoid fragmentation, we do a linear search from the beginning 685 * and take the first free slot we find. 686 */ 687 new_tagid = BIT_FFS(TAGID_MAX, &ts->avail); 688 /* 689 * Tags are 1-based, with valid tags in the range [1..TAGID_MAX]. 690 * BIT_FFS() returns a 1-based bit number, with 0 indicating no bits 691 * set. It may also return a bit number greater than TAGID_MAX due 692 * to rounding of the number of bits in the vector up to a multiple 693 * of the vector word size at declaration/allocation time. 694 */ 695 if ((new_tagid == 0) || (new_tagid > TAGID_MAX)) 696 return (0); 697 698 /* Mark the tag as in use. Bits are 0-based for BIT_CLR() */ 699 BIT_CLR(TAGID_MAX, new_tagid - 1, &ts->avail); 700 701 /* allocate and fill new struct pf_tagname */ 702 tag = uma_zalloc(V_pf_tag_z, M_NOWAIT); 703 if (tag == NULL) 704 return (0); 705 strlcpy(tag->name, tagname, sizeof(tag->name)); 706 tag->tag = new_tagid; 707 tag->ref = 1; 708 709 /* Insert into namehash */ 710 TAILQ_INSERT_TAIL(&ts->namehash[index], tag, namehash_entries); 711 712 /* Insert into taghash */ 713 index = tag2hashindex(ts, new_tagid); 714 TAILQ_INSERT_TAIL(&ts->taghash[index], tag, taghash_entries); 715 716 return (tag->tag); 717 } 718 719 static void 720 tag_unref(struct pf_tagset *ts, u_int16_t tag) 721 { 722 struct pf_tagname *t; 723 uint16_t index; 724 725 PF_RULES_WASSERT(); 726 727 index = tag2hashindex(ts, tag); 728 TAILQ_FOREACH(t, &ts->taghash[index], taghash_entries) 729 if (tag == t->tag) { 730 if (--t->ref == 0) { 731 TAILQ_REMOVE(&ts->taghash[index], t, 732 taghash_entries); 733 index = tagname2hashindex(ts, t->name); 734 TAILQ_REMOVE(&ts->namehash[index], t, 735 namehash_entries); 736 /* Bits are 0-based for BIT_SET() */ 737 BIT_SET(TAGID_MAX, tag - 1, &ts->avail); 738 uma_zfree(V_pf_tag_z, t); 739 } 740 break; 741 } 742 } 743 744 static uint16_t 745 pf_tagname2tag(const char *tagname) 746 { 747 return (tagname2tag(&V_pf_tags, tagname)); 748 } 749 750 static int 751 pf_begin_eth(uint32_t *ticket, const char *anchor) 752 { 753 struct pf_keth_rule *rule, *tmp; 754 struct pf_keth_ruleset *rs; 755 756 PF_RULES_WASSERT(); 757 758 rs = pf_find_or_create_keth_ruleset(anchor); 759 if (rs == NULL) 760 return (EINVAL); 761 762 /* Purge old inactive rules. */ 763 TAILQ_FOREACH_SAFE(rule, rs->inactive.rules, entries, 764 tmp) { 765 TAILQ_REMOVE(rs->inactive.rules, rule, 766 entries); 767 pf_free_eth_rule(rule); 768 } 769 770 *ticket = ++rs->inactive.ticket; 771 rs->inactive.open = 1; 772 773 return (0); 774 } 775 776 static void 777 pf_rollback_eth_cb(struct epoch_context *ctx) 778 { 779 struct pf_keth_ruleset *rs; 780 781 rs = __containerof(ctx, struct pf_keth_ruleset, epoch_ctx); 782 783 CURVNET_SET(rs->vnet); 784 785 PF_RULES_WLOCK(); 786 pf_rollback_eth(rs->inactive.ticket, 787 rs->anchor ? rs->anchor->path : ""); 788 PF_RULES_WUNLOCK(); 789 790 CURVNET_RESTORE(); 791 } 792 793 static int 794 pf_rollback_eth(uint32_t ticket, const char *anchor) 795 { 796 struct pf_keth_rule *rule, *tmp; 797 struct pf_keth_ruleset *rs; 798 799 PF_RULES_WASSERT(); 800 801 rs = pf_find_keth_ruleset(anchor); 802 if (rs == NULL) 803 return (EINVAL); 804 805 if (!rs->inactive.open || 806 ticket != rs->inactive.ticket) 807 return (0); 808 809 /* Purge old inactive rules. */ 810 TAILQ_FOREACH_SAFE(rule, rs->inactive.rules, entries, 811 tmp) { 812 TAILQ_REMOVE(rs->inactive.rules, rule, entries); 813 pf_free_eth_rule(rule); 814 } 815 816 rs->inactive.open = 0; 817 818 pf_remove_if_empty_keth_ruleset(rs); 819 820 return (0); 821 } 822 823 #define PF_SET_SKIP_STEPS(i) \ 824 do { \ 825 while (head[i] != cur) { \ 826 head[i]->skip[i].ptr = cur; \ 827 head[i] = TAILQ_NEXT(head[i], entries); \ 828 } \ 829 } while (0) 830 831 static void 832 pf_eth_calc_skip_steps(struct pf_keth_ruleq *rules) 833 { 834 struct pf_keth_rule *cur, *prev, *head[PFE_SKIP_COUNT]; 835 int i; 836 837 cur = TAILQ_FIRST(rules); 838 prev = cur; 839 for (i = 0; i < PFE_SKIP_COUNT; ++i) 840 head[i] = cur; 841 while (cur != NULL) { 842 if (cur->kif != prev->kif || cur->ifnot != prev->ifnot) 843 PF_SET_SKIP_STEPS(PFE_SKIP_IFP); 844 if (cur->direction != prev->direction) 845 PF_SET_SKIP_STEPS(PFE_SKIP_DIR); 846 if (cur->proto != prev->proto) 847 PF_SET_SKIP_STEPS(PFE_SKIP_PROTO); 848 if (memcmp(&cur->src, &prev->src, sizeof(cur->src)) != 0) 849 PF_SET_SKIP_STEPS(PFE_SKIP_SRC_ADDR); 850 if (memcmp(&cur->dst, &prev->dst, sizeof(cur->dst)) != 0) 851 PF_SET_SKIP_STEPS(PFE_SKIP_DST_ADDR); 852 if (cur->ipsrc.neg != prev->ipsrc.neg || 853 pf_addr_wrap_neq(&cur->ipsrc.addr, &prev->ipsrc.addr)) 854 PF_SET_SKIP_STEPS(PFE_SKIP_SRC_IP_ADDR); 855 if (cur->ipdst.neg != prev->ipdst.neg || 856 pf_addr_wrap_neq(&cur->ipdst.addr, &prev->ipdst.addr)) 857 PF_SET_SKIP_STEPS(PFE_SKIP_DST_IP_ADDR); 858 859 prev = cur; 860 cur = TAILQ_NEXT(cur, entries); 861 } 862 for (i = 0; i < PFE_SKIP_COUNT; ++i) 863 PF_SET_SKIP_STEPS(i); 864 } 865 866 static int 867 pf_commit_eth(uint32_t ticket, const char *anchor) 868 { 869 struct pf_keth_ruleq *rules; 870 struct pf_keth_ruleset *rs; 871 872 rs = pf_find_keth_ruleset(anchor); 873 if (rs == NULL) { 874 return (EINVAL); 875 } 876 877 if (!rs->inactive.open || 878 ticket != rs->inactive.ticket) 879 return (EBUSY); 880 881 PF_RULES_WASSERT(); 882 883 pf_eth_calc_skip_steps(rs->inactive.rules); 884 885 rules = rs->active.rules; 886 ck_pr_store_ptr(&rs->active.rules, rs->inactive.rules); 887 rs->inactive.rules = rules; 888 rs->inactive.ticket = rs->active.ticket; 889 890 /* Clean up inactive rules (i.e. previously active rules), only when 891 * we're sure they're no longer used. */ 892 NET_EPOCH_CALL(pf_rollback_eth_cb, &rs->epoch_ctx); 893 894 return (0); 895 } 896 897 #ifdef ALTQ 898 static uint16_t 899 pf_qname2qid(const char *qname) 900 { 901 return (tagname2tag(&V_pf_qids, qname)); 902 } 903 904 static void 905 pf_qid_unref(uint16_t qid) 906 { 907 tag_unref(&V_pf_qids, qid); 908 } 909 910 static int 911 pf_begin_altq(u_int32_t *ticket) 912 { 913 struct pf_altq *altq, *tmp; 914 int error = 0; 915 916 PF_RULES_WASSERT(); 917 918 /* Purge the old altq lists */ 919 TAILQ_FOREACH_SAFE(altq, V_pf_altq_ifs_inactive, entries, tmp) { 920 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) { 921 /* detach and destroy the discipline */ 922 error = altq_remove(altq); 923 } 924 free(altq, M_PFALTQ); 925 } 926 TAILQ_INIT(V_pf_altq_ifs_inactive); 927 TAILQ_FOREACH_SAFE(altq, V_pf_altqs_inactive, entries, tmp) { 928 pf_qid_unref(altq->qid); 929 free(altq, M_PFALTQ); 930 } 931 TAILQ_INIT(V_pf_altqs_inactive); 932 if (error) 933 return (error); 934 *ticket = ++V_ticket_altqs_inactive; 935 V_altqs_inactive_open = 1; 936 return (0); 937 } 938 939 static int 940 pf_rollback_altq(u_int32_t ticket) 941 { 942 struct pf_altq *altq, *tmp; 943 int error = 0; 944 945 PF_RULES_WASSERT(); 946 947 if (!V_altqs_inactive_open || ticket != V_ticket_altqs_inactive) 948 return (0); 949 /* Purge the old altq lists */ 950 TAILQ_FOREACH_SAFE(altq, V_pf_altq_ifs_inactive, entries, tmp) { 951 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) { 952 /* detach and destroy the discipline */ 953 error = altq_remove(altq); 954 } 955 free(altq, M_PFALTQ); 956 } 957 TAILQ_INIT(V_pf_altq_ifs_inactive); 958 TAILQ_FOREACH_SAFE(altq, V_pf_altqs_inactive, entries, tmp) { 959 pf_qid_unref(altq->qid); 960 free(altq, M_PFALTQ); 961 } 962 TAILQ_INIT(V_pf_altqs_inactive); 963 V_altqs_inactive_open = 0; 964 return (error); 965 } 966 967 static int 968 pf_commit_altq(u_int32_t ticket) 969 { 970 struct pf_altqqueue *old_altqs, *old_altq_ifs; 971 struct pf_altq *altq, *tmp; 972 int err, error = 0; 973 974 PF_RULES_WASSERT(); 975 976 if (!V_altqs_inactive_open || ticket != V_ticket_altqs_inactive) 977 return (EBUSY); 978 979 /* swap altqs, keep the old. */ 980 old_altqs = V_pf_altqs_active; 981 old_altq_ifs = V_pf_altq_ifs_active; 982 V_pf_altqs_active = V_pf_altqs_inactive; 983 V_pf_altq_ifs_active = V_pf_altq_ifs_inactive; 984 V_pf_altqs_inactive = old_altqs; 985 V_pf_altq_ifs_inactive = old_altq_ifs; 986 V_ticket_altqs_active = V_ticket_altqs_inactive; 987 988 /* Attach new disciplines */ 989 TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) { 990 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) { 991 /* attach the discipline */ 992 error = altq_pfattach(altq); 993 if (error == 0 && V_pf_altq_running) 994 error = pf_enable_altq(altq); 995 if (error != 0) 996 return (error); 997 } 998 } 999 1000 /* Purge the old altq lists */ 1001 TAILQ_FOREACH_SAFE(altq, V_pf_altq_ifs_inactive, entries, tmp) { 1002 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) { 1003 /* detach and destroy the discipline */ 1004 if (V_pf_altq_running) 1005 error = pf_disable_altq(altq); 1006 err = altq_pfdetach(altq); 1007 if (err != 0 && error == 0) 1008 error = err; 1009 err = altq_remove(altq); 1010 if (err != 0 && error == 0) 1011 error = err; 1012 } 1013 free(altq, M_PFALTQ); 1014 } 1015 TAILQ_INIT(V_pf_altq_ifs_inactive); 1016 TAILQ_FOREACH_SAFE(altq, V_pf_altqs_inactive, entries, tmp) { 1017 pf_qid_unref(altq->qid); 1018 free(altq, M_PFALTQ); 1019 } 1020 TAILQ_INIT(V_pf_altqs_inactive); 1021 1022 V_altqs_inactive_open = 0; 1023 return (error); 1024 } 1025 1026 static int 1027 pf_enable_altq(struct pf_altq *altq) 1028 { 1029 struct ifnet *ifp; 1030 struct tb_profile tb; 1031 int error = 0; 1032 1033 if ((ifp = ifunit(altq->ifname)) == NULL) 1034 return (EINVAL); 1035 1036 if (ifp->if_snd.altq_type != ALTQT_NONE) 1037 error = altq_enable(&ifp->if_snd); 1038 1039 /* set tokenbucket regulator */ 1040 if (error == 0 && ifp != NULL && ALTQ_IS_ENABLED(&ifp->if_snd)) { 1041 tb.rate = altq->ifbandwidth; 1042 tb.depth = altq->tbrsize; 1043 error = tbr_set(&ifp->if_snd, &tb); 1044 } 1045 1046 return (error); 1047 } 1048 1049 static int 1050 pf_disable_altq(struct pf_altq *altq) 1051 { 1052 struct ifnet *ifp; 1053 struct tb_profile tb; 1054 int error; 1055 1056 if ((ifp = ifunit(altq->ifname)) == NULL) 1057 return (EINVAL); 1058 1059 /* 1060 * when the discipline is no longer referenced, it was overridden 1061 * by a new one. if so, just return. 1062 */ 1063 if (altq->altq_disc != ifp->if_snd.altq_disc) 1064 return (0); 1065 1066 error = altq_disable(&ifp->if_snd); 1067 1068 if (error == 0) { 1069 /* clear tokenbucket regulator */ 1070 tb.rate = 0; 1071 error = tbr_set(&ifp->if_snd, &tb); 1072 } 1073 1074 return (error); 1075 } 1076 1077 static int 1078 pf_altq_ifnet_event_add(struct ifnet *ifp, int remove, u_int32_t ticket, 1079 struct pf_altq *altq) 1080 { 1081 struct ifnet *ifp1; 1082 int error = 0; 1083 1084 /* Deactivate the interface in question */ 1085 altq->local_flags &= ~PFALTQ_FLAG_IF_REMOVED; 1086 if ((ifp1 = ifunit(altq->ifname)) == NULL || 1087 (remove && ifp1 == ifp)) { 1088 altq->local_flags |= PFALTQ_FLAG_IF_REMOVED; 1089 } else { 1090 error = altq_add(ifp1, altq); 1091 1092 if (ticket != V_ticket_altqs_inactive) 1093 error = EBUSY; 1094 1095 if (error) 1096 free(altq, M_PFALTQ); 1097 } 1098 1099 return (error); 1100 } 1101 1102 void 1103 pf_altq_ifnet_event(struct ifnet *ifp, int remove) 1104 { 1105 struct pf_altq *a1, *a2, *a3; 1106 u_int32_t ticket; 1107 int error = 0; 1108 1109 /* 1110 * No need to re-evaluate the configuration for events on interfaces 1111 * that do not support ALTQ, as it's not possible for such 1112 * interfaces to be part of the configuration. 1113 */ 1114 if (!ALTQ_IS_READY(&ifp->if_snd)) 1115 return; 1116 1117 /* Interrupt userland queue modifications */ 1118 if (V_altqs_inactive_open) 1119 pf_rollback_altq(V_ticket_altqs_inactive); 1120 1121 /* Start new altq ruleset */ 1122 if (pf_begin_altq(&ticket)) 1123 return; 1124 1125 /* Copy the current active set */ 1126 TAILQ_FOREACH(a1, V_pf_altq_ifs_active, entries) { 1127 a2 = malloc(sizeof(*a2), M_PFALTQ, M_NOWAIT); 1128 if (a2 == NULL) { 1129 error = ENOMEM; 1130 break; 1131 } 1132 bcopy(a1, a2, sizeof(struct pf_altq)); 1133 1134 error = pf_altq_ifnet_event_add(ifp, remove, ticket, a2); 1135 if (error) 1136 break; 1137 1138 TAILQ_INSERT_TAIL(V_pf_altq_ifs_inactive, a2, entries); 1139 } 1140 if (error) 1141 goto out; 1142 TAILQ_FOREACH(a1, V_pf_altqs_active, entries) { 1143 a2 = malloc(sizeof(*a2), M_PFALTQ, M_NOWAIT); 1144 if (a2 == NULL) { 1145 error = ENOMEM; 1146 break; 1147 } 1148 bcopy(a1, a2, sizeof(struct pf_altq)); 1149 1150 if ((a2->qid = pf_qname2qid(a2->qname)) == 0) { 1151 error = EBUSY; 1152 free(a2, M_PFALTQ); 1153 break; 1154 } 1155 a2->altq_disc = NULL; 1156 TAILQ_FOREACH(a3, V_pf_altq_ifs_inactive, entries) { 1157 if (strncmp(a3->ifname, a2->ifname, 1158 IFNAMSIZ) == 0) { 1159 a2->altq_disc = a3->altq_disc; 1160 break; 1161 } 1162 } 1163 error = pf_altq_ifnet_event_add(ifp, remove, ticket, a2); 1164 if (error) 1165 break; 1166 1167 TAILQ_INSERT_TAIL(V_pf_altqs_inactive, a2, entries); 1168 } 1169 1170 out: 1171 if (error != 0) 1172 pf_rollback_altq(ticket); 1173 else 1174 pf_commit_altq(ticket); 1175 } 1176 #endif /* ALTQ */ 1177 1178 static struct pf_krule_global * 1179 pf_rule_tree_alloc(int flags) 1180 { 1181 struct pf_krule_global *tree; 1182 1183 tree = malloc(sizeof(struct pf_krule_global), M_TEMP, flags); 1184 if (tree == NULL) 1185 return (NULL); 1186 RB_INIT(tree); 1187 return (tree); 1188 } 1189 1190 static void 1191 pf_rule_tree_free(struct pf_krule_global *tree) 1192 { 1193 1194 free(tree, M_TEMP); 1195 } 1196 1197 static int 1198 pf_begin_rules(u_int32_t *ticket, int rs_num, const char *anchor) 1199 { 1200 struct pf_krule_global *tree; 1201 struct pf_kruleset *rs; 1202 struct pf_krule *rule; 1203 1204 PF_RULES_WASSERT(); 1205 1206 if (rs_num < 0 || rs_num >= PF_RULESET_MAX) 1207 return (EINVAL); 1208 tree = pf_rule_tree_alloc(M_NOWAIT); 1209 if (tree == NULL) 1210 return (ENOMEM); 1211 rs = pf_find_or_create_kruleset(anchor); 1212 if (rs == NULL) { 1213 free(tree, M_TEMP); 1214 return (EINVAL); 1215 } 1216 pf_rule_tree_free(rs->rules[rs_num].inactive.tree); 1217 rs->rules[rs_num].inactive.tree = tree; 1218 1219 while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) { 1220 pf_unlink_rule(rs->rules[rs_num].inactive.ptr, rule); 1221 rs->rules[rs_num].inactive.rcount--; 1222 } 1223 *ticket = ++rs->rules[rs_num].inactive.ticket; 1224 rs->rules[rs_num].inactive.open = 1; 1225 return (0); 1226 } 1227 1228 static int 1229 pf_rollback_rules(u_int32_t ticket, int rs_num, char *anchor) 1230 { 1231 struct pf_kruleset *rs; 1232 struct pf_krule *rule; 1233 1234 PF_RULES_WASSERT(); 1235 1236 if (rs_num < 0 || rs_num >= PF_RULESET_MAX) 1237 return (EINVAL); 1238 rs = pf_find_kruleset(anchor); 1239 if (rs == NULL || !rs->rules[rs_num].inactive.open || 1240 rs->rules[rs_num].inactive.ticket != ticket) 1241 return (0); 1242 while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) { 1243 pf_unlink_rule(rs->rules[rs_num].inactive.ptr, rule); 1244 rs->rules[rs_num].inactive.rcount--; 1245 } 1246 rs->rules[rs_num].inactive.open = 0; 1247 return (0); 1248 } 1249 1250 #define PF_MD5_UPD(st, elm) \ 1251 MD5Update(ctx, (u_int8_t *) &(st)->elm, sizeof((st)->elm)) 1252 1253 #define PF_MD5_UPD_STR(st, elm) \ 1254 MD5Update(ctx, (u_int8_t *) (st)->elm, strlen((st)->elm)) 1255 1256 #define PF_MD5_UPD_HTONL(st, elm, stor) do { \ 1257 (stor) = htonl((st)->elm); \ 1258 MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int32_t));\ 1259 } while (0) 1260 1261 #define PF_MD5_UPD_HTONS(st, elm, stor) do { \ 1262 (stor) = htons((st)->elm); \ 1263 MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int16_t));\ 1264 } while (0) 1265 1266 static void 1267 pf_hash_rule_addr(MD5_CTX *ctx, struct pf_rule_addr *pfr) 1268 { 1269 PF_MD5_UPD(pfr, addr.type); 1270 switch (pfr->addr.type) { 1271 case PF_ADDR_DYNIFTL: 1272 PF_MD5_UPD(pfr, addr.v.ifname); 1273 PF_MD5_UPD(pfr, addr.iflags); 1274 break; 1275 case PF_ADDR_TABLE: 1276 PF_MD5_UPD(pfr, addr.v.tblname); 1277 break; 1278 case PF_ADDR_ADDRMASK: 1279 /* XXX ignore af? */ 1280 PF_MD5_UPD(pfr, addr.v.a.addr.addr32); 1281 PF_MD5_UPD(pfr, addr.v.a.mask.addr32); 1282 break; 1283 } 1284 1285 PF_MD5_UPD(pfr, port[0]); 1286 PF_MD5_UPD(pfr, port[1]); 1287 PF_MD5_UPD(pfr, neg); 1288 PF_MD5_UPD(pfr, port_op); 1289 } 1290 1291 static void 1292 pf_hash_rule_rolling(MD5_CTX *ctx, struct pf_krule *rule) 1293 { 1294 u_int16_t x; 1295 u_int32_t y; 1296 1297 pf_hash_rule_addr(ctx, &rule->src); 1298 pf_hash_rule_addr(ctx, &rule->dst); 1299 for (int i = 0; i < PF_RULE_MAX_LABEL_COUNT; i++) 1300 PF_MD5_UPD_STR(rule, label[i]); 1301 PF_MD5_UPD_STR(rule, ifname); 1302 PF_MD5_UPD_STR(rule, match_tagname); 1303 PF_MD5_UPD_HTONS(rule, match_tag, x); /* dup? */ 1304 PF_MD5_UPD_HTONL(rule, os_fingerprint, y); 1305 PF_MD5_UPD_HTONL(rule, prob, y); 1306 PF_MD5_UPD_HTONL(rule, uid.uid[0], y); 1307 PF_MD5_UPD_HTONL(rule, uid.uid[1], y); 1308 PF_MD5_UPD(rule, uid.op); 1309 PF_MD5_UPD_HTONL(rule, gid.gid[0], y); 1310 PF_MD5_UPD_HTONL(rule, gid.gid[1], y); 1311 PF_MD5_UPD(rule, gid.op); 1312 PF_MD5_UPD_HTONL(rule, rule_flag, y); 1313 PF_MD5_UPD(rule, action); 1314 PF_MD5_UPD(rule, direction); 1315 PF_MD5_UPD(rule, af); 1316 PF_MD5_UPD(rule, quick); 1317 PF_MD5_UPD(rule, ifnot); 1318 PF_MD5_UPD(rule, match_tag_not); 1319 PF_MD5_UPD(rule, natpass); 1320 PF_MD5_UPD(rule, keep_state); 1321 PF_MD5_UPD(rule, proto); 1322 PF_MD5_UPD(rule, type); 1323 PF_MD5_UPD(rule, code); 1324 PF_MD5_UPD(rule, flags); 1325 PF_MD5_UPD(rule, flagset); 1326 PF_MD5_UPD(rule, allow_opts); 1327 PF_MD5_UPD(rule, rt); 1328 PF_MD5_UPD(rule, tos); 1329 PF_MD5_UPD(rule, scrub_flags); 1330 PF_MD5_UPD(rule, min_ttl); 1331 PF_MD5_UPD(rule, set_tos); 1332 if (rule->anchor != NULL) 1333 PF_MD5_UPD_STR(rule, anchor->path); 1334 } 1335 1336 static void 1337 pf_hash_rule(struct pf_krule *rule) 1338 { 1339 MD5_CTX ctx; 1340 1341 MD5Init(&ctx); 1342 pf_hash_rule_rolling(&ctx, rule); 1343 MD5Final(rule->md5sum, &ctx); 1344 } 1345 1346 static int 1347 pf_krule_compare(struct pf_krule *a, struct pf_krule *b) 1348 { 1349 1350 return (memcmp(a->md5sum, b->md5sum, PF_MD5_DIGEST_LENGTH)); 1351 } 1352 1353 static int 1354 pf_commit_rules(u_int32_t ticket, int rs_num, char *anchor) 1355 { 1356 struct pf_kruleset *rs; 1357 struct pf_krule *rule, **old_array, *old_rule; 1358 struct pf_krulequeue *old_rules; 1359 struct pf_krule_global *old_tree; 1360 int error; 1361 u_int32_t old_rcount; 1362 1363 PF_RULES_WASSERT(); 1364 1365 if (rs_num < 0 || rs_num >= PF_RULESET_MAX) 1366 return (EINVAL); 1367 rs = pf_find_kruleset(anchor); 1368 if (rs == NULL || !rs->rules[rs_num].inactive.open || 1369 ticket != rs->rules[rs_num].inactive.ticket) 1370 return (EBUSY); 1371 1372 /* Calculate checksum for the main ruleset */ 1373 if (rs == &pf_main_ruleset) { 1374 error = pf_setup_pfsync_matching(rs); 1375 if (error != 0) 1376 return (error); 1377 } 1378 1379 /* Swap rules, keep the old. */ 1380 old_rules = rs->rules[rs_num].active.ptr; 1381 old_rcount = rs->rules[rs_num].active.rcount; 1382 old_array = rs->rules[rs_num].active.ptr_array; 1383 old_tree = rs->rules[rs_num].active.tree; 1384 1385 rs->rules[rs_num].active.ptr = 1386 rs->rules[rs_num].inactive.ptr; 1387 rs->rules[rs_num].active.ptr_array = 1388 rs->rules[rs_num].inactive.ptr_array; 1389 rs->rules[rs_num].active.tree = 1390 rs->rules[rs_num].inactive.tree; 1391 rs->rules[rs_num].active.rcount = 1392 rs->rules[rs_num].inactive.rcount; 1393 1394 /* Attempt to preserve counter information. */ 1395 if (V_pf_status.keep_counters && old_tree != NULL) { 1396 TAILQ_FOREACH(rule, rs->rules[rs_num].active.ptr, 1397 entries) { 1398 old_rule = RB_FIND(pf_krule_global, old_tree, rule); 1399 if (old_rule == NULL) { 1400 continue; 1401 } 1402 pf_counter_u64_critical_enter(); 1403 pf_counter_u64_add_protected(&rule->evaluations, 1404 pf_counter_u64_fetch(&old_rule->evaluations)); 1405 pf_counter_u64_add_protected(&rule->packets[0], 1406 pf_counter_u64_fetch(&old_rule->packets[0])); 1407 pf_counter_u64_add_protected(&rule->packets[1], 1408 pf_counter_u64_fetch(&old_rule->packets[1])); 1409 pf_counter_u64_add_protected(&rule->bytes[0], 1410 pf_counter_u64_fetch(&old_rule->bytes[0])); 1411 pf_counter_u64_add_protected(&rule->bytes[1], 1412 pf_counter_u64_fetch(&old_rule->bytes[1])); 1413 pf_counter_u64_critical_exit(); 1414 } 1415 } 1416 1417 rs->rules[rs_num].inactive.ptr = old_rules; 1418 rs->rules[rs_num].inactive.ptr_array = old_array; 1419 rs->rules[rs_num].inactive.tree = NULL; /* important for pf_ioctl_addrule */ 1420 rs->rules[rs_num].inactive.rcount = old_rcount; 1421 1422 rs->rules[rs_num].active.ticket = 1423 rs->rules[rs_num].inactive.ticket; 1424 pf_calc_skip_steps(rs->rules[rs_num].active.ptr); 1425 1426 /* Purge the old rule list. */ 1427 PF_UNLNKDRULES_LOCK(); 1428 while ((rule = TAILQ_FIRST(old_rules)) != NULL) 1429 pf_unlink_rule_locked(old_rules, rule); 1430 PF_UNLNKDRULES_UNLOCK(); 1431 if (rs->rules[rs_num].inactive.ptr_array) 1432 free(rs->rules[rs_num].inactive.ptr_array, M_TEMP); 1433 rs->rules[rs_num].inactive.ptr_array = NULL; 1434 rs->rules[rs_num].inactive.rcount = 0; 1435 rs->rules[rs_num].inactive.open = 0; 1436 pf_remove_if_empty_kruleset(rs); 1437 free(old_tree, M_TEMP); 1438 1439 return (0); 1440 } 1441 1442 static int 1443 pf_setup_pfsync_matching(struct pf_kruleset *rs) 1444 { 1445 MD5_CTX ctx; 1446 struct pf_krule *rule; 1447 int rs_cnt; 1448 u_int8_t digest[PF_MD5_DIGEST_LENGTH]; 1449 1450 MD5Init(&ctx); 1451 for (rs_cnt = 0; rs_cnt < PF_RULESET_MAX; rs_cnt++) { 1452 /* XXX PF_RULESET_SCRUB as well? */ 1453 if (rs_cnt == PF_RULESET_SCRUB) 1454 continue; 1455 1456 if (rs->rules[rs_cnt].inactive.ptr_array) 1457 free(rs->rules[rs_cnt].inactive.ptr_array, M_TEMP); 1458 rs->rules[rs_cnt].inactive.ptr_array = NULL; 1459 1460 if (rs->rules[rs_cnt].inactive.rcount) { 1461 rs->rules[rs_cnt].inactive.ptr_array = 1462 mallocarray(rs->rules[rs_cnt].inactive.rcount, 1463 sizeof(struct pf_rule **), 1464 M_TEMP, M_NOWAIT); 1465 1466 if (!rs->rules[rs_cnt].inactive.ptr_array) 1467 return (ENOMEM); 1468 } 1469 1470 TAILQ_FOREACH(rule, rs->rules[rs_cnt].inactive.ptr, 1471 entries) { 1472 pf_hash_rule_rolling(&ctx, rule); 1473 (rs->rules[rs_cnt].inactive.ptr_array)[rule->nr] = rule; 1474 } 1475 } 1476 1477 MD5Final(digest, &ctx); 1478 memcpy(V_pf_status.pf_chksum, digest, sizeof(V_pf_status.pf_chksum)); 1479 return (0); 1480 } 1481 1482 static int 1483 pf_eth_addr_setup(struct pf_keth_ruleset *ruleset, struct pf_addr_wrap *addr) 1484 { 1485 int error = 0; 1486 1487 switch (addr->type) { 1488 case PF_ADDR_TABLE: 1489 addr->p.tbl = pfr_eth_attach_table(ruleset, addr->v.tblname); 1490 if (addr->p.tbl == NULL) 1491 error = ENOMEM; 1492 break; 1493 default: 1494 error = EINVAL; 1495 } 1496 1497 return (error); 1498 } 1499 1500 static int 1501 pf_addr_setup(struct pf_kruleset *ruleset, struct pf_addr_wrap *addr, 1502 sa_family_t af) 1503 { 1504 int error = 0; 1505 1506 switch (addr->type) { 1507 case PF_ADDR_TABLE: 1508 addr->p.tbl = pfr_attach_table(ruleset, addr->v.tblname); 1509 if (addr->p.tbl == NULL) 1510 error = ENOMEM; 1511 break; 1512 case PF_ADDR_DYNIFTL: 1513 error = pfi_dynaddr_setup(addr, af); 1514 break; 1515 } 1516 1517 return (error); 1518 } 1519 1520 static void 1521 pf_addr_copyout(struct pf_addr_wrap *addr) 1522 { 1523 1524 switch (addr->type) { 1525 case PF_ADDR_DYNIFTL: 1526 pfi_dynaddr_copyout(addr); 1527 break; 1528 case PF_ADDR_TABLE: 1529 pf_tbladdr_copyout(addr); 1530 break; 1531 } 1532 } 1533 1534 static void 1535 pf_src_node_copy(const struct pf_ksrc_node *in, struct pf_src_node *out) 1536 { 1537 int secs = time_uptime, diff; 1538 1539 bzero(out, sizeof(struct pf_src_node)); 1540 1541 bcopy(&in->addr, &out->addr, sizeof(struct pf_addr)); 1542 bcopy(&in->raddr, &out->raddr, sizeof(struct pf_addr)); 1543 1544 if (in->rule.ptr != NULL) 1545 out->rule.nr = in->rule.ptr->nr; 1546 1547 for (int i = 0; i < 2; i++) { 1548 out->bytes[i] = counter_u64_fetch(in->bytes[i]); 1549 out->packets[i] = counter_u64_fetch(in->packets[i]); 1550 } 1551 1552 out->states = in->states; 1553 out->conn = in->conn; 1554 out->af = in->af; 1555 out->ruletype = in->ruletype; 1556 1557 out->creation = secs - in->creation; 1558 if (out->expire > secs) 1559 out->expire -= secs; 1560 else 1561 out->expire = 0; 1562 1563 /* Adjust the connection rate estimate. */ 1564 diff = secs - in->conn_rate.last; 1565 if (diff >= in->conn_rate.seconds) 1566 out->conn_rate.count = 0; 1567 else 1568 out->conn_rate.count -= 1569 in->conn_rate.count * diff / 1570 in->conn_rate.seconds; 1571 } 1572 1573 #ifdef ALTQ 1574 /* 1575 * Handle export of struct pf_kaltq to user binaries that may be using any 1576 * version of struct pf_altq. 1577 */ 1578 static int 1579 pf_export_kaltq(struct pf_altq *q, struct pfioc_altq_v1 *pa, size_t ioc_size) 1580 { 1581 u_int32_t version; 1582 1583 if (ioc_size == sizeof(struct pfioc_altq_v0)) 1584 version = 0; 1585 else 1586 version = pa->version; 1587 1588 if (version > PFIOC_ALTQ_VERSION) 1589 return (EINVAL); 1590 1591 #define ASSIGN(x) exported_q->x = q->x 1592 #define COPY(x) \ 1593 bcopy(&q->x, &exported_q->x, min(sizeof(q->x), sizeof(exported_q->x))) 1594 #define SATU16(x) (u_int32_t)uqmin((x), USHRT_MAX) 1595 #define SATU32(x) (u_int32_t)uqmin((x), UINT_MAX) 1596 1597 switch (version) { 1598 case 0: { 1599 struct pf_altq_v0 *exported_q = 1600 &((struct pfioc_altq_v0 *)pa)->altq; 1601 1602 COPY(ifname); 1603 1604 ASSIGN(scheduler); 1605 ASSIGN(tbrsize); 1606 exported_q->tbrsize = SATU16(q->tbrsize); 1607 exported_q->ifbandwidth = SATU32(q->ifbandwidth); 1608 1609 COPY(qname); 1610 COPY(parent); 1611 ASSIGN(parent_qid); 1612 exported_q->bandwidth = SATU32(q->bandwidth); 1613 ASSIGN(priority); 1614 ASSIGN(local_flags); 1615 1616 ASSIGN(qlimit); 1617 ASSIGN(flags); 1618 1619 if (q->scheduler == ALTQT_HFSC) { 1620 #define ASSIGN_OPT(x) exported_q->pq_u.hfsc_opts.x = q->pq_u.hfsc_opts.x 1621 #define ASSIGN_OPT_SATU32(x) exported_q->pq_u.hfsc_opts.x = \ 1622 SATU32(q->pq_u.hfsc_opts.x) 1623 1624 ASSIGN_OPT_SATU32(rtsc_m1); 1625 ASSIGN_OPT(rtsc_d); 1626 ASSIGN_OPT_SATU32(rtsc_m2); 1627 1628 ASSIGN_OPT_SATU32(lssc_m1); 1629 ASSIGN_OPT(lssc_d); 1630 ASSIGN_OPT_SATU32(lssc_m2); 1631 1632 ASSIGN_OPT_SATU32(ulsc_m1); 1633 ASSIGN_OPT(ulsc_d); 1634 ASSIGN_OPT_SATU32(ulsc_m2); 1635 1636 ASSIGN_OPT(flags); 1637 1638 #undef ASSIGN_OPT 1639 #undef ASSIGN_OPT_SATU32 1640 } else 1641 COPY(pq_u); 1642 1643 ASSIGN(qid); 1644 break; 1645 } 1646 case 1: { 1647 struct pf_altq_v1 *exported_q = 1648 &((struct pfioc_altq_v1 *)pa)->altq; 1649 1650 COPY(ifname); 1651 1652 ASSIGN(scheduler); 1653 ASSIGN(tbrsize); 1654 ASSIGN(ifbandwidth); 1655 1656 COPY(qname); 1657 COPY(parent); 1658 ASSIGN(parent_qid); 1659 ASSIGN(bandwidth); 1660 ASSIGN(priority); 1661 ASSIGN(local_flags); 1662 1663 ASSIGN(qlimit); 1664 ASSIGN(flags); 1665 COPY(pq_u); 1666 1667 ASSIGN(qid); 1668 break; 1669 } 1670 default: 1671 panic("%s: unhandled struct pfioc_altq version", __func__); 1672 break; 1673 } 1674 1675 #undef ASSIGN 1676 #undef COPY 1677 #undef SATU16 1678 #undef SATU32 1679 1680 return (0); 1681 } 1682 1683 /* 1684 * Handle import to struct pf_kaltq of struct pf_altq from user binaries 1685 * that may be using any version of it. 1686 */ 1687 static int 1688 pf_import_kaltq(struct pfioc_altq_v1 *pa, struct pf_altq *q, size_t ioc_size) 1689 { 1690 u_int32_t version; 1691 1692 if (ioc_size == sizeof(struct pfioc_altq_v0)) 1693 version = 0; 1694 else 1695 version = pa->version; 1696 1697 if (version > PFIOC_ALTQ_VERSION) 1698 return (EINVAL); 1699 1700 #define ASSIGN(x) q->x = imported_q->x 1701 #define COPY(x) \ 1702 bcopy(&imported_q->x, &q->x, min(sizeof(imported_q->x), sizeof(q->x))) 1703 1704 switch (version) { 1705 case 0: { 1706 struct pf_altq_v0 *imported_q = 1707 &((struct pfioc_altq_v0 *)pa)->altq; 1708 1709 COPY(ifname); 1710 1711 ASSIGN(scheduler); 1712 ASSIGN(tbrsize); /* 16-bit -> 32-bit */ 1713 ASSIGN(ifbandwidth); /* 32-bit -> 64-bit */ 1714 1715 COPY(qname); 1716 COPY(parent); 1717 ASSIGN(parent_qid); 1718 ASSIGN(bandwidth); /* 32-bit -> 64-bit */ 1719 ASSIGN(priority); 1720 ASSIGN(local_flags); 1721 1722 ASSIGN(qlimit); 1723 ASSIGN(flags); 1724 1725 if (imported_q->scheduler == ALTQT_HFSC) { 1726 #define ASSIGN_OPT(x) q->pq_u.hfsc_opts.x = imported_q->pq_u.hfsc_opts.x 1727 1728 /* 1729 * The m1 and m2 parameters are being copied from 1730 * 32-bit to 64-bit. 1731 */ 1732 ASSIGN_OPT(rtsc_m1); 1733 ASSIGN_OPT(rtsc_d); 1734 ASSIGN_OPT(rtsc_m2); 1735 1736 ASSIGN_OPT(lssc_m1); 1737 ASSIGN_OPT(lssc_d); 1738 ASSIGN_OPT(lssc_m2); 1739 1740 ASSIGN_OPT(ulsc_m1); 1741 ASSIGN_OPT(ulsc_d); 1742 ASSIGN_OPT(ulsc_m2); 1743 1744 ASSIGN_OPT(flags); 1745 1746 #undef ASSIGN_OPT 1747 } else 1748 COPY(pq_u); 1749 1750 ASSIGN(qid); 1751 break; 1752 } 1753 case 1: { 1754 struct pf_altq_v1 *imported_q = 1755 &((struct pfioc_altq_v1 *)pa)->altq; 1756 1757 COPY(ifname); 1758 1759 ASSIGN(scheduler); 1760 ASSIGN(tbrsize); 1761 ASSIGN(ifbandwidth); 1762 1763 COPY(qname); 1764 COPY(parent); 1765 ASSIGN(parent_qid); 1766 ASSIGN(bandwidth); 1767 ASSIGN(priority); 1768 ASSIGN(local_flags); 1769 1770 ASSIGN(qlimit); 1771 ASSIGN(flags); 1772 COPY(pq_u); 1773 1774 ASSIGN(qid); 1775 break; 1776 } 1777 default: 1778 panic("%s: unhandled struct pfioc_altq version", __func__); 1779 break; 1780 } 1781 1782 #undef ASSIGN 1783 #undef COPY 1784 1785 return (0); 1786 } 1787 1788 static struct pf_altq * 1789 pf_altq_get_nth_active(u_int32_t n) 1790 { 1791 struct pf_altq *altq; 1792 u_int32_t nr; 1793 1794 nr = 0; 1795 TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) { 1796 if (nr == n) 1797 return (altq); 1798 nr++; 1799 } 1800 1801 TAILQ_FOREACH(altq, V_pf_altqs_active, entries) { 1802 if (nr == n) 1803 return (altq); 1804 nr++; 1805 } 1806 1807 return (NULL); 1808 } 1809 #endif /* ALTQ */ 1810 1811 struct pf_krule * 1812 pf_krule_alloc(void) 1813 { 1814 struct pf_krule *rule; 1815 1816 rule = malloc(sizeof(struct pf_krule), M_PFRULE, M_WAITOK | M_ZERO); 1817 mtx_init(&rule->rpool.mtx, "pf_krule_pool", NULL, MTX_DEF); 1818 rule->timestamp = uma_zalloc_pcpu(pf_timestamp_pcpu_zone, 1819 M_WAITOK | M_ZERO); 1820 return (rule); 1821 } 1822 1823 void 1824 pf_krule_free(struct pf_krule *rule) 1825 { 1826 #ifdef PF_WANT_32_TO_64_COUNTER 1827 bool wowned; 1828 #endif 1829 1830 if (rule == NULL) 1831 return; 1832 1833 #ifdef PF_WANT_32_TO_64_COUNTER 1834 if (rule->allrulelinked) { 1835 wowned = PF_RULES_WOWNED(); 1836 if (!wowned) 1837 PF_RULES_WLOCK(); 1838 LIST_REMOVE(rule, allrulelist); 1839 V_pf_allrulecount--; 1840 if (!wowned) 1841 PF_RULES_WUNLOCK(); 1842 } 1843 #endif 1844 1845 pf_counter_u64_deinit(&rule->evaluations); 1846 for (int i = 0; i < 2; i++) { 1847 pf_counter_u64_deinit(&rule->packets[i]); 1848 pf_counter_u64_deinit(&rule->bytes[i]); 1849 } 1850 counter_u64_free(rule->states_cur); 1851 counter_u64_free(rule->states_tot); 1852 counter_u64_free(rule->src_nodes); 1853 uma_zfree_pcpu(pf_timestamp_pcpu_zone, rule->timestamp); 1854 1855 mtx_destroy(&rule->rpool.mtx); 1856 free(rule, M_PFRULE); 1857 } 1858 1859 static void 1860 pf_kpooladdr_to_pooladdr(const struct pf_kpooladdr *kpool, 1861 struct pf_pooladdr *pool) 1862 { 1863 1864 bzero(pool, sizeof(*pool)); 1865 bcopy(&kpool->addr, &pool->addr, sizeof(pool->addr)); 1866 strlcpy(pool->ifname, kpool->ifname, sizeof(pool->ifname)); 1867 } 1868 1869 static int 1870 pf_pooladdr_to_kpooladdr(const struct pf_pooladdr *pool, 1871 struct pf_kpooladdr *kpool) 1872 { 1873 int ret; 1874 1875 bzero(kpool, sizeof(*kpool)); 1876 bcopy(&pool->addr, &kpool->addr, sizeof(kpool->addr)); 1877 ret = pf_user_strcpy(kpool->ifname, pool->ifname, 1878 sizeof(kpool->ifname)); 1879 return (ret); 1880 } 1881 1882 static void 1883 pf_kpool_to_pool(const struct pf_kpool *kpool, struct pf_pool *pool) 1884 { 1885 bzero(pool, sizeof(*pool)); 1886 1887 bcopy(&kpool->key, &pool->key, sizeof(pool->key)); 1888 bcopy(&kpool->counter, &pool->counter, sizeof(pool->counter)); 1889 1890 pool->tblidx = kpool->tblidx; 1891 pool->proxy_port[0] = kpool->proxy_port[0]; 1892 pool->proxy_port[1] = kpool->proxy_port[1]; 1893 pool->opts = kpool->opts; 1894 } 1895 1896 static void 1897 pf_pool_to_kpool(const struct pf_pool *pool, struct pf_kpool *kpool) 1898 { 1899 _Static_assert(sizeof(pool->key) == sizeof(kpool->key), ""); 1900 _Static_assert(sizeof(pool->counter) == sizeof(kpool->counter), ""); 1901 1902 bcopy(&pool->key, &kpool->key, sizeof(kpool->key)); 1903 bcopy(&pool->counter, &kpool->counter, sizeof(kpool->counter)); 1904 1905 kpool->tblidx = pool->tblidx; 1906 kpool->proxy_port[0] = pool->proxy_port[0]; 1907 kpool->proxy_port[1] = pool->proxy_port[1]; 1908 kpool->opts = pool->opts; 1909 } 1910 1911 static void 1912 pf_krule_to_rule(const struct pf_krule *krule, struct pf_rule *rule) 1913 { 1914 1915 bzero(rule, sizeof(*rule)); 1916 1917 bcopy(&krule->src, &rule->src, sizeof(rule->src)); 1918 bcopy(&krule->dst, &rule->dst, sizeof(rule->dst)); 1919 1920 for (int i = 0; i < PF_SKIP_COUNT; ++i) { 1921 if (rule->skip[i].ptr == NULL) 1922 rule->skip[i].nr = -1; 1923 else 1924 rule->skip[i].nr = krule->skip[i].ptr->nr; 1925 } 1926 1927 strlcpy(rule->label, krule->label[0], sizeof(rule->label)); 1928 strlcpy(rule->ifname, krule->ifname, sizeof(rule->ifname)); 1929 strlcpy(rule->qname, krule->qname, sizeof(rule->qname)); 1930 strlcpy(rule->pqname, krule->pqname, sizeof(rule->pqname)); 1931 strlcpy(rule->tagname, krule->tagname, sizeof(rule->tagname)); 1932 strlcpy(rule->match_tagname, krule->match_tagname, 1933 sizeof(rule->match_tagname)); 1934 strlcpy(rule->overload_tblname, krule->overload_tblname, 1935 sizeof(rule->overload_tblname)); 1936 1937 pf_kpool_to_pool(&krule->rpool, &rule->rpool); 1938 1939 rule->evaluations = pf_counter_u64_fetch(&krule->evaluations); 1940 for (int i = 0; i < 2; i++) { 1941 rule->packets[i] = pf_counter_u64_fetch(&krule->packets[i]); 1942 rule->bytes[i] = pf_counter_u64_fetch(&krule->bytes[i]); 1943 } 1944 1945 /* kif, anchor, overload_tbl are not copied over. */ 1946 1947 rule->os_fingerprint = krule->os_fingerprint; 1948 1949 rule->rtableid = krule->rtableid; 1950 bcopy(krule->timeout, rule->timeout, sizeof(krule->timeout)); 1951 rule->max_states = krule->max_states; 1952 rule->max_src_nodes = krule->max_src_nodes; 1953 rule->max_src_states = krule->max_src_states; 1954 rule->max_src_conn = krule->max_src_conn; 1955 rule->max_src_conn_rate.limit = krule->max_src_conn_rate.limit; 1956 rule->max_src_conn_rate.seconds = krule->max_src_conn_rate.seconds; 1957 rule->qid = krule->qid; 1958 rule->pqid = krule->pqid; 1959 rule->nr = krule->nr; 1960 rule->prob = krule->prob; 1961 rule->cuid = krule->cuid; 1962 rule->cpid = krule->cpid; 1963 1964 rule->return_icmp = krule->return_icmp; 1965 rule->return_icmp6 = krule->return_icmp6; 1966 rule->max_mss = krule->max_mss; 1967 rule->tag = krule->tag; 1968 rule->match_tag = krule->match_tag; 1969 rule->scrub_flags = krule->scrub_flags; 1970 1971 bcopy(&krule->uid, &rule->uid, sizeof(krule->uid)); 1972 bcopy(&krule->gid, &rule->gid, sizeof(krule->gid)); 1973 1974 rule->rule_flag = krule->rule_flag; 1975 rule->action = krule->action; 1976 rule->direction = krule->direction; 1977 rule->log = krule->log; 1978 rule->logif = krule->logif; 1979 rule->quick = krule->quick; 1980 rule->ifnot = krule->ifnot; 1981 rule->match_tag_not = krule->match_tag_not; 1982 rule->natpass = krule->natpass; 1983 1984 rule->keep_state = krule->keep_state; 1985 rule->af = krule->af; 1986 rule->proto = krule->proto; 1987 rule->type = krule->type; 1988 rule->code = krule->code; 1989 rule->flags = krule->flags; 1990 rule->flagset = krule->flagset; 1991 rule->min_ttl = krule->min_ttl; 1992 rule->allow_opts = krule->allow_opts; 1993 rule->rt = krule->rt; 1994 rule->return_ttl = krule->return_ttl; 1995 rule->tos = krule->tos; 1996 rule->set_tos = krule->set_tos; 1997 rule->anchor_relative = krule->anchor_relative; 1998 rule->anchor_wildcard = krule->anchor_wildcard; 1999 2000 rule->flush = krule->flush; 2001 rule->prio = krule->prio; 2002 rule->set_prio[0] = krule->set_prio[0]; 2003 rule->set_prio[1] = krule->set_prio[1]; 2004 2005 bcopy(&krule->divert, &rule->divert, sizeof(krule->divert)); 2006 2007 rule->u_states_cur = counter_u64_fetch(krule->states_cur); 2008 rule->u_states_tot = counter_u64_fetch(krule->states_tot); 2009 rule->u_src_nodes = counter_u64_fetch(krule->src_nodes); 2010 } 2011 2012 static int 2013 pf_rule_to_krule(const struct pf_rule *rule, struct pf_krule *krule) 2014 { 2015 int ret; 2016 2017 #ifndef INET 2018 if (rule->af == AF_INET) { 2019 return (EAFNOSUPPORT); 2020 } 2021 #endif /* INET */ 2022 #ifndef INET6 2023 if (rule->af == AF_INET6) { 2024 return (EAFNOSUPPORT); 2025 } 2026 #endif /* INET6 */ 2027 2028 ret = pf_check_rule_addr(&rule->src); 2029 if (ret != 0) 2030 return (ret); 2031 ret = pf_check_rule_addr(&rule->dst); 2032 if (ret != 0) 2033 return (ret); 2034 2035 bcopy(&rule->src, &krule->src, sizeof(rule->src)); 2036 bcopy(&rule->dst, &krule->dst, sizeof(rule->dst)); 2037 2038 ret = pf_user_strcpy(krule->label[0], rule->label, sizeof(rule->label)); 2039 if (ret != 0) 2040 return (ret); 2041 ret = pf_user_strcpy(krule->ifname, rule->ifname, sizeof(rule->ifname)); 2042 if (ret != 0) 2043 return (ret); 2044 ret = pf_user_strcpy(krule->qname, rule->qname, sizeof(rule->qname)); 2045 if (ret != 0) 2046 return (ret); 2047 ret = pf_user_strcpy(krule->pqname, rule->pqname, sizeof(rule->pqname)); 2048 if (ret != 0) 2049 return (ret); 2050 ret = pf_user_strcpy(krule->tagname, rule->tagname, 2051 sizeof(rule->tagname)); 2052 if (ret != 0) 2053 return (ret); 2054 ret = pf_user_strcpy(krule->match_tagname, rule->match_tagname, 2055 sizeof(rule->match_tagname)); 2056 if (ret != 0) 2057 return (ret); 2058 ret = pf_user_strcpy(krule->overload_tblname, rule->overload_tblname, 2059 sizeof(rule->overload_tblname)); 2060 if (ret != 0) 2061 return (ret); 2062 2063 pf_pool_to_kpool(&rule->rpool, &krule->rpool); 2064 2065 /* Don't allow userspace to set evaluations, packets or bytes. */ 2066 /* kif, anchor, overload_tbl are not copied over. */ 2067 2068 krule->os_fingerprint = rule->os_fingerprint; 2069 2070 krule->rtableid = rule->rtableid; 2071 bcopy(rule->timeout, krule->timeout, sizeof(krule->timeout)); 2072 krule->max_states = rule->max_states; 2073 krule->max_src_nodes = rule->max_src_nodes; 2074 krule->max_src_states = rule->max_src_states; 2075 krule->max_src_conn = rule->max_src_conn; 2076 krule->max_src_conn_rate.limit = rule->max_src_conn_rate.limit; 2077 krule->max_src_conn_rate.seconds = rule->max_src_conn_rate.seconds; 2078 krule->qid = rule->qid; 2079 krule->pqid = rule->pqid; 2080 krule->nr = rule->nr; 2081 krule->prob = rule->prob; 2082 krule->cuid = rule->cuid; 2083 krule->cpid = rule->cpid; 2084 2085 krule->return_icmp = rule->return_icmp; 2086 krule->return_icmp6 = rule->return_icmp6; 2087 krule->max_mss = rule->max_mss; 2088 krule->tag = rule->tag; 2089 krule->match_tag = rule->match_tag; 2090 krule->scrub_flags = rule->scrub_flags; 2091 2092 bcopy(&rule->uid, &krule->uid, sizeof(krule->uid)); 2093 bcopy(&rule->gid, &krule->gid, sizeof(krule->gid)); 2094 2095 krule->rule_flag = rule->rule_flag; 2096 krule->action = rule->action; 2097 krule->direction = rule->direction; 2098 krule->log = rule->log; 2099 krule->logif = rule->logif; 2100 krule->quick = rule->quick; 2101 krule->ifnot = rule->ifnot; 2102 krule->match_tag_not = rule->match_tag_not; 2103 krule->natpass = rule->natpass; 2104 2105 krule->keep_state = rule->keep_state; 2106 krule->af = rule->af; 2107 krule->proto = rule->proto; 2108 krule->type = rule->type; 2109 krule->code = rule->code; 2110 krule->flags = rule->flags; 2111 krule->flagset = rule->flagset; 2112 krule->min_ttl = rule->min_ttl; 2113 krule->allow_opts = rule->allow_opts; 2114 krule->rt = rule->rt; 2115 krule->return_ttl = rule->return_ttl; 2116 krule->tos = rule->tos; 2117 krule->set_tos = rule->set_tos; 2118 2119 krule->flush = rule->flush; 2120 krule->prio = rule->prio; 2121 krule->set_prio[0] = rule->set_prio[0]; 2122 krule->set_prio[1] = rule->set_prio[1]; 2123 2124 bcopy(&rule->divert, &krule->divert, sizeof(krule->divert)); 2125 2126 return (0); 2127 } 2128 2129 static int 2130 pf_state_kill_to_kstate_kill(const struct pfioc_state_kill *psk, 2131 struct pf_kstate_kill *kill) 2132 { 2133 int ret; 2134 2135 bzero(kill, sizeof(*kill)); 2136 2137 bcopy(&psk->psk_pfcmp, &kill->psk_pfcmp, sizeof(kill->psk_pfcmp)); 2138 kill->psk_af = psk->psk_af; 2139 kill->psk_proto = psk->psk_proto; 2140 bcopy(&psk->psk_src, &kill->psk_src, sizeof(kill->psk_src)); 2141 bcopy(&psk->psk_dst, &kill->psk_dst, sizeof(kill->psk_dst)); 2142 ret = pf_user_strcpy(kill->psk_ifname, psk->psk_ifname, 2143 sizeof(kill->psk_ifname)); 2144 if (ret != 0) 2145 return (ret); 2146 ret = pf_user_strcpy(kill->psk_label, psk->psk_label, 2147 sizeof(kill->psk_label)); 2148 if (ret != 0) 2149 return (ret); 2150 2151 return (0); 2152 } 2153 2154 static int 2155 pf_ioctl_addrule(struct pf_krule *rule, uint32_t ticket, 2156 uint32_t pool_ticket, const char *anchor, const char *anchor_call, 2157 struct thread *td) 2158 { 2159 struct pf_kruleset *ruleset; 2160 struct pf_krule *tail; 2161 struct pf_kpooladdr *pa; 2162 struct pfi_kkif *kif = NULL; 2163 int rs_num; 2164 int error = 0; 2165 2166 if ((rule->return_icmp >> 8) > ICMP_MAXTYPE) { 2167 error = EINVAL; 2168 goto errout_unlocked; 2169 } 2170 2171 #define ERROUT(x) ERROUT_FUNCTION(errout, x) 2172 2173 if (rule->ifname[0]) 2174 kif = pf_kkif_create(M_WAITOK); 2175 pf_counter_u64_init(&rule->evaluations, M_WAITOK); 2176 for (int i = 0; i < 2; i++) { 2177 pf_counter_u64_init(&rule->packets[i], M_WAITOK); 2178 pf_counter_u64_init(&rule->bytes[i], M_WAITOK); 2179 } 2180 rule->states_cur = counter_u64_alloc(M_WAITOK); 2181 rule->states_tot = counter_u64_alloc(M_WAITOK); 2182 rule->src_nodes = counter_u64_alloc(M_WAITOK); 2183 rule->cuid = td->td_ucred->cr_ruid; 2184 rule->cpid = td->td_proc ? td->td_proc->p_pid : 0; 2185 TAILQ_INIT(&rule->rpool.list); 2186 2187 PF_CONFIG_LOCK(); 2188 PF_RULES_WLOCK(); 2189 #ifdef PF_WANT_32_TO_64_COUNTER 2190 LIST_INSERT_HEAD(&V_pf_allrulelist, rule, allrulelist); 2191 MPASS(!rule->allrulelinked); 2192 rule->allrulelinked = true; 2193 V_pf_allrulecount++; 2194 #endif 2195 ruleset = pf_find_kruleset(anchor); 2196 if (ruleset == NULL) 2197 ERROUT(EINVAL); 2198 rs_num = pf_get_ruleset_number(rule->action); 2199 if (rs_num >= PF_RULESET_MAX) 2200 ERROUT(EINVAL); 2201 if (ticket != ruleset->rules[rs_num].inactive.ticket) { 2202 DPFPRINTF(PF_DEBUG_MISC, 2203 ("ticket: %d != [%d]%d\n", ticket, rs_num, 2204 ruleset->rules[rs_num].inactive.ticket)); 2205 ERROUT(EBUSY); 2206 } 2207 if (pool_ticket != V_ticket_pabuf) { 2208 DPFPRINTF(PF_DEBUG_MISC, 2209 ("pool_ticket: %d != %d\n", pool_ticket, 2210 V_ticket_pabuf)); 2211 ERROUT(EBUSY); 2212 } 2213 /* 2214 * XXXMJG hack: there is no mechanism to ensure they started the 2215 * transaction. Ticket checked above may happen to match by accident, 2216 * even if nobody called DIOCXBEGIN, let alone this process. 2217 * Partially work around it by checking if the RB tree got allocated, 2218 * see pf_begin_rules. 2219 */ 2220 if (ruleset->rules[rs_num].inactive.tree == NULL) { 2221 ERROUT(EINVAL); 2222 } 2223 2224 tail = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr, 2225 pf_krulequeue); 2226 if (tail) 2227 rule->nr = tail->nr + 1; 2228 else 2229 rule->nr = 0; 2230 if (rule->ifname[0]) { 2231 rule->kif = pfi_kkif_attach(kif, rule->ifname); 2232 kif = NULL; 2233 pfi_kkif_ref(rule->kif); 2234 } else 2235 rule->kif = NULL; 2236 2237 if (rule->rtableid > 0 && rule->rtableid >= rt_numfibs) 2238 error = EBUSY; 2239 2240 #ifdef ALTQ 2241 /* set queue IDs */ 2242 if (rule->qname[0] != 0) { 2243 if ((rule->qid = pf_qname2qid(rule->qname)) == 0) 2244 error = EBUSY; 2245 else if (rule->pqname[0] != 0) { 2246 if ((rule->pqid = 2247 pf_qname2qid(rule->pqname)) == 0) 2248 error = EBUSY; 2249 } else 2250 rule->pqid = rule->qid; 2251 } 2252 #endif 2253 if (rule->tagname[0]) 2254 if ((rule->tag = pf_tagname2tag(rule->tagname)) == 0) 2255 error = EBUSY; 2256 if (rule->match_tagname[0]) 2257 if ((rule->match_tag = 2258 pf_tagname2tag(rule->match_tagname)) == 0) 2259 error = EBUSY; 2260 if (rule->rt && !rule->direction) 2261 error = EINVAL; 2262 if (!rule->log) 2263 rule->logif = 0; 2264 if (rule->logif >= PFLOGIFS_MAX) 2265 error = EINVAL; 2266 if (pf_addr_setup(ruleset, &rule->src.addr, rule->af)) 2267 error = ENOMEM; 2268 if (pf_addr_setup(ruleset, &rule->dst.addr, rule->af)) 2269 error = ENOMEM; 2270 if (pf_kanchor_setup(rule, ruleset, anchor_call)) 2271 error = EINVAL; 2272 if (rule->scrub_flags & PFSTATE_SETPRIO && 2273 (rule->set_prio[0] > PF_PRIO_MAX || 2274 rule->set_prio[1] > PF_PRIO_MAX)) 2275 error = EINVAL; 2276 TAILQ_FOREACH(pa, &V_pf_pabuf, entries) 2277 if (pa->addr.type == PF_ADDR_TABLE) { 2278 pa->addr.p.tbl = pfr_attach_table(ruleset, 2279 pa->addr.v.tblname); 2280 if (pa->addr.p.tbl == NULL) 2281 error = ENOMEM; 2282 } 2283 2284 rule->overload_tbl = NULL; 2285 if (rule->overload_tblname[0]) { 2286 if ((rule->overload_tbl = pfr_attach_table(ruleset, 2287 rule->overload_tblname)) == NULL) 2288 error = EINVAL; 2289 else 2290 rule->overload_tbl->pfrkt_flags |= 2291 PFR_TFLAG_ACTIVE; 2292 } 2293 2294 pf_mv_kpool(&V_pf_pabuf, &rule->rpool.list); 2295 if (((((rule->action == PF_NAT) || (rule->action == PF_RDR) || 2296 (rule->action == PF_BINAT)) && rule->anchor == NULL) || 2297 (rule->rt > PF_NOPFROUTE)) && 2298 (TAILQ_FIRST(&rule->rpool.list) == NULL)) 2299 error = EINVAL; 2300 2301 if (error) { 2302 pf_free_rule(rule); 2303 rule = NULL; 2304 ERROUT(error); 2305 } 2306 2307 rule->rpool.cur = TAILQ_FIRST(&rule->rpool.list); 2308 TAILQ_INSERT_TAIL(ruleset->rules[rs_num].inactive.ptr, 2309 rule, entries); 2310 ruleset->rules[rs_num].inactive.rcount++; 2311 2312 PF_RULES_WUNLOCK(); 2313 pf_hash_rule(rule); 2314 if (RB_INSERT(pf_krule_global, ruleset->rules[rs_num].inactive.tree, rule) != NULL) { 2315 PF_RULES_WLOCK(); 2316 TAILQ_REMOVE(ruleset->rules[rs_num].inactive.ptr, rule, entries); 2317 ruleset->rules[rs_num].inactive.rcount--; 2318 pf_free_rule(rule); 2319 rule = NULL; 2320 ERROUT(EEXIST); 2321 } 2322 PF_CONFIG_UNLOCK(); 2323 2324 return (0); 2325 2326 #undef ERROUT 2327 errout: 2328 PF_RULES_WUNLOCK(); 2329 PF_CONFIG_UNLOCK(); 2330 errout_unlocked: 2331 pf_kkif_free(kif); 2332 pf_krule_free(rule); 2333 return (error); 2334 } 2335 2336 static bool 2337 pf_label_match(const struct pf_krule *rule, const char *label) 2338 { 2339 int i = 0; 2340 2341 while (*rule->label[i]) { 2342 if (strcmp(rule->label[i], label) == 0) 2343 return (true); 2344 i++; 2345 } 2346 2347 return (false); 2348 } 2349 2350 static unsigned int 2351 pf_kill_matching_state(struct pf_state_key_cmp *key, int dir) 2352 { 2353 struct pf_kstate *s; 2354 int more = 0; 2355 2356 s = pf_find_state_all(key, dir, &more); 2357 if (s == NULL) 2358 return (0); 2359 2360 if (more) { 2361 PF_STATE_UNLOCK(s); 2362 return (0); 2363 } 2364 2365 pf_unlink_state(s); 2366 return (1); 2367 } 2368 2369 static int 2370 pf_killstates_row(struct pf_kstate_kill *psk, struct pf_idhash *ih) 2371 { 2372 struct pf_kstate *s; 2373 struct pf_state_key *sk; 2374 struct pf_addr *srcaddr, *dstaddr; 2375 struct pf_state_key_cmp match_key; 2376 int idx, killed = 0; 2377 unsigned int dir; 2378 u_int16_t srcport, dstport; 2379 struct pfi_kkif *kif; 2380 2381 relock_DIOCKILLSTATES: 2382 PF_HASHROW_LOCK(ih); 2383 LIST_FOREACH(s, &ih->states, entry) { 2384 /* For floating states look at the original kif. */ 2385 kif = s->kif == V_pfi_all ? s->orig_kif : s->kif; 2386 2387 sk = s->key[PF_SK_WIRE]; 2388 if (s->direction == PF_OUT) { 2389 srcaddr = &sk->addr[1]; 2390 dstaddr = &sk->addr[0]; 2391 srcport = sk->port[1]; 2392 dstport = sk->port[0]; 2393 } else { 2394 srcaddr = &sk->addr[0]; 2395 dstaddr = &sk->addr[1]; 2396 srcport = sk->port[0]; 2397 dstport = sk->port[1]; 2398 } 2399 2400 if (psk->psk_af && sk->af != psk->psk_af) 2401 continue; 2402 2403 if (psk->psk_proto && psk->psk_proto != sk->proto) 2404 continue; 2405 2406 if (! PF_MATCHA(psk->psk_src.neg, &psk->psk_src.addr.v.a.addr, 2407 &psk->psk_src.addr.v.a.mask, srcaddr, sk->af)) 2408 continue; 2409 2410 if (! PF_MATCHA(psk->psk_dst.neg, &psk->psk_dst.addr.v.a.addr, 2411 &psk->psk_dst.addr.v.a.mask, dstaddr, sk->af)) 2412 continue; 2413 2414 if (! PF_MATCHA(psk->psk_rt_addr.neg, 2415 &psk->psk_rt_addr.addr.v.a.addr, 2416 &psk->psk_rt_addr.addr.v.a.mask, 2417 &s->rt_addr, sk->af)) 2418 continue; 2419 2420 if (psk->psk_src.port_op != 0 && 2421 ! pf_match_port(psk->psk_src.port_op, 2422 psk->psk_src.port[0], psk->psk_src.port[1], srcport)) 2423 continue; 2424 2425 if (psk->psk_dst.port_op != 0 && 2426 ! pf_match_port(psk->psk_dst.port_op, 2427 psk->psk_dst.port[0], psk->psk_dst.port[1], dstport)) 2428 continue; 2429 2430 if (psk->psk_label[0] && 2431 ! pf_label_match(s->rule.ptr, psk->psk_label)) 2432 continue; 2433 2434 if (psk->psk_ifname[0] && strcmp(psk->psk_ifname, 2435 kif->pfik_name)) 2436 continue; 2437 2438 if (psk->psk_kill_match) { 2439 /* Create the key to find matching states, with lock 2440 * held. */ 2441 2442 bzero(&match_key, sizeof(match_key)); 2443 2444 if (s->direction == PF_OUT) { 2445 dir = PF_IN; 2446 idx = PF_SK_STACK; 2447 } else { 2448 dir = PF_OUT; 2449 idx = PF_SK_WIRE; 2450 } 2451 2452 match_key.af = s->key[idx]->af; 2453 match_key.proto = s->key[idx]->proto; 2454 PF_ACPY(&match_key.addr[0], 2455 &s->key[idx]->addr[1], match_key.af); 2456 match_key.port[0] = s->key[idx]->port[1]; 2457 PF_ACPY(&match_key.addr[1], 2458 &s->key[idx]->addr[0], match_key.af); 2459 match_key.port[1] = s->key[idx]->port[0]; 2460 } 2461 2462 pf_unlink_state(s); 2463 killed++; 2464 2465 if (psk->psk_kill_match) 2466 killed += pf_kill_matching_state(&match_key, dir); 2467 2468 goto relock_DIOCKILLSTATES; 2469 } 2470 PF_HASHROW_UNLOCK(ih); 2471 2472 return (killed); 2473 } 2474 2475 static int 2476 pfioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td) 2477 { 2478 int error = 0; 2479 PF_RULES_RLOCK_TRACKER; 2480 2481 #define ERROUT_IOCTL(target, x) \ 2482 do { \ 2483 error = (x); \ 2484 SDT_PROBE3(pf, ioctl, ioctl, error, cmd, error, __LINE__); \ 2485 goto target; \ 2486 } while (0) 2487 2488 2489 /* XXX keep in sync with switch() below */ 2490 if (securelevel_gt(td->td_ucred, 2)) 2491 switch (cmd) { 2492 case DIOCGETRULES: 2493 case DIOCGETRULE: 2494 case DIOCGETRULENV: 2495 case DIOCGETADDRS: 2496 case DIOCGETADDR: 2497 case DIOCGETSTATE: 2498 case DIOCGETSTATENV: 2499 case DIOCSETSTATUSIF: 2500 case DIOCGETSTATUS: 2501 case DIOCGETSTATUSNV: 2502 case DIOCCLRSTATUS: 2503 case DIOCNATLOOK: 2504 case DIOCSETDEBUG: 2505 case DIOCGETSTATES: 2506 case DIOCGETSTATESV2: 2507 case DIOCGETTIMEOUT: 2508 case DIOCCLRRULECTRS: 2509 case DIOCGETLIMIT: 2510 case DIOCGETALTQSV0: 2511 case DIOCGETALTQSV1: 2512 case DIOCGETALTQV0: 2513 case DIOCGETALTQV1: 2514 case DIOCGETQSTATSV0: 2515 case DIOCGETQSTATSV1: 2516 case DIOCGETRULESETS: 2517 case DIOCGETRULESET: 2518 case DIOCRGETTABLES: 2519 case DIOCRGETTSTATS: 2520 case DIOCRCLRTSTATS: 2521 case DIOCRCLRADDRS: 2522 case DIOCRADDADDRS: 2523 case DIOCRDELADDRS: 2524 case DIOCRSETADDRS: 2525 case DIOCRGETADDRS: 2526 case DIOCRGETASTATS: 2527 case DIOCRCLRASTATS: 2528 case DIOCRTSTADDRS: 2529 case DIOCOSFPGET: 2530 case DIOCGETSRCNODES: 2531 case DIOCCLRSRCNODES: 2532 case DIOCGETSYNCOOKIES: 2533 case DIOCIGETIFACES: 2534 case DIOCGIFSPEEDV0: 2535 case DIOCGIFSPEEDV1: 2536 case DIOCSETIFFLAG: 2537 case DIOCCLRIFFLAG: 2538 case DIOCGETETHRULES: 2539 case DIOCGETETHRULE: 2540 case DIOCGETETHRULESETS: 2541 case DIOCGETETHRULESET: 2542 break; 2543 case DIOCRCLRTABLES: 2544 case DIOCRADDTABLES: 2545 case DIOCRDELTABLES: 2546 case DIOCRSETTFLAGS: 2547 if (((struct pfioc_table *)addr)->pfrio_flags & 2548 PFR_FLAG_DUMMY) 2549 break; /* dummy operation ok */ 2550 return (EPERM); 2551 default: 2552 return (EPERM); 2553 } 2554 2555 if (!(flags & FWRITE)) 2556 switch (cmd) { 2557 case DIOCGETRULES: 2558 case DIOCGETADDRS: 2559 case DIOCGETADDR: 2560 case DIOCGETSTATE: 2561 case DIOCGETSTATENV: 2562 case DIOCGETSTATUS: 2563 case DIOCGETSTATUSNV: 2564 case DIOCGETSTATES: 2565 case DIOCGETSTATESV2: 2566 case DIOCGETTIMEOUT: 2567 case DIOCGETLIMIT: 2568 case DIOCGETALTQSV0: 2569 case DIOCGETALTQSV1: 2570 case DIOCGETALTQV0: 2571 case DIOCGETALTQV1: 2572 case DIOCGETQSTATSV0: 2573 case DIOCGETQSTATSV1: 2574 case DIOCGETRULESETS: 2575 case DIOCGETRULESET: 2576 case DIOCNATLOOK: 2577 case DIOCRGETTABLES: 2578 case DIOCRGETTSTATS: 2579 case DIOCRGETADDRS: 2580 case DIOCRGETASTATS: 2581 case DIOCRTSTADDRS: 2582 case DIOCOSFPGET: 2583 case DIOCGETSRCNODES: 2584 case DIOCGETSYNCOOKIES: 2585 case DIOCIGETIFACES: 2586 case DIOCGIFSPEEDV1: 2587 case DIOCGIFSPEEDV0: 2588 case DIOCGETRULENV: 2589 case DIOCGETETHRULES: 2590 case DIOCGETETHRULE: 2591 case DIOCGETETHRULESETS: 2592 case DIOCGETETHRULESET: 2593 break; 2594 case DIOCRCLRTABLES: 2595 case DIOCRADDTABLES: 2596 case DIOCRDELTABLES: 2597 case DIOCRCLRTSTATS: 2598 case DIOCRCLRADDRS: 2599 case DIOCRADDADDRS: 2600 case DIOCRDELADDRS: 2601 case DIOCRSETADDRS: 2602 case DIOCRSETTFLAGS: 2603 if (((struct pfioc_table *)addr)->pfrio_flags & 2604 PFR_FLAG_DUMMY) { 2605 flags |= FWRITE; /* need write lock for dummy */ 2606 break; /* dummy operation ok */ 2607 } 2608 return (EACCES); 2609 case DIOCGETRULE: 2610 if (((struct pfioc_rule *)addr)->action == 2611 PF_GET_CLR_CNTR) 2612 return (EACCES); 2613 break; 2614 default: 2615 return (EACCES); 2616 } 2617 2618 CURVNET_SET(TD_TO_VNET(td)); 2619 2620 switch (cmd) { 2621 case DIOCSTART: 2622 sx_xlock(&V_pf_ioctl_lock); 2623 if (V_pf_status.running) 2624 error = EEXIST; 2625 else { 2626 hook_pf(); 2627 if (! TAILQ_EMPTY(V_pf_keth->active.rules)) 2628 hook_pf_eth(); 2629 V_pf_status.running = 1; 2630 V_pf_status.since = time_second; 2631 new_unrhdr64(&V_pf_stateid, time_second); 2632 2633 DPFPRINTF(PF_DEBUG_MISC, ("pf: started\n")); 2634 } 2635 break; 2636 2637 case DIOCSTOP: 2638 sx_xlock(&V_pf_ioctl_lock); 2639 if (!V_pf_status.running) 2640 error = ENOENT; 2641 else { 2642 V_pf_status.running = 0; 2643 dehook_pf(); 2644 dehook_pf_eth(); 2645 V_pf_status.since = time_second; 2646 DPFPRINTF(PF_DEBUG_MISC, ("pf: stopped\n")); 2647 } 2648 break; 2649 2650 case DIOCGETETHRULES: { 2651 struct pfioc_nv *nv = (struct pfioc_nv *)addr; 2652 nvlist_t *nvl; 2653 void *packed; 2654 struct pf_keth_rule *tail; 2655 struct pf_keth_ruleset *rs; 2656 u_int32_t ticket, nr; 2657 const char *anchor = ""; 2658 2659 nvl = NULL; 2660 packed = NULL; 2661 2662 #define ERROUT(x) ERROUT_IOCTL(DIOCGETETHRULES_error, x) 2663 2664 if (nv->len > pf_ioctl_maxcount) 2665 ERROUT(ENOMEM); 2666 2667 /* Copy the request in */ 2668 packed = malloc(nv->len, M_NVLIST, M_WAITOK); 2669 if (packed == NULL) 2670 ERROUT(ENOMEM); 2671 2672 error = copyin(nv->data, packed, nv->len); 2673 if (error) 2674 ERROUT(error); 2675 2676 nvl = nvlist_unpack(packed, nv->len, 0); 2677 if (nvl == NULL) 2678 ERROUT(EBADMSG); 2679 2680 if (! nvlist_exists_string(nvl, "anchor")) 2681 ERROUT(EBADMSG); 2682 2683 anchor = nvlist_get_string(nvl, "anchor"); 2684 2685 rs = pf_find_keth_ruleset(anchor); 2686 2687 nvlist_destroy(nvl); 2688 nvl = NULL; 2689 free(packed, M_NVLIST); 2690 packed = NULL; 2691 2692 if (rs == NULL) 2693 ERROUT(ENOENT); 2694 2695 /* Reply */ 2696 nvl = nvlist_create(0); 2697 if (nvl == NULL) 2698 ERROUT(ENOMEM); 2699 2700 PF_RULES_RLOCK(); 2701 2702 ticket = rs->active.ticket; 2703 tail = TAILQ_LAST(rs->active.rules, pf_keth_ruleq); 2704 if (tail) 2705 nr = tail->nr + 1; 2706 else 2707 nr = 0; 2708 2709 PF_RULES_RUNLOCK(); 2710 2711 nvlist_add_number(nvl, "ticket", ticket); 2712 nvlist_add_number(nvl, "nr", nr); 2713 2714 packed = nvlist_pack(nvl, &nv->len); 2715 if (packed == NULL) 2716 ERROUT(ENOMEM); 2717 2718 if (nv->size == 0) 2719 ERROUT(0); 2720 else if (nv->size < nv->len) 2721 ERROUT(ENOSPC); 2722 2723 error = copyout(packed, nv->data, nv->len); 2724 2725 #undef ERROUT 2726 DIOCGETETHRULES_error: 2727 free(packed, M_NVLIST); 2728 nvlist_destroy(nvl); 2729 break; 2730 } 2731 2732 case DIOCGETETHRULE: { 2733 struct epoch_tracker et; 2734 struct pfioc_nv *nv = (struct pfioc_nv *)addr; 2735 nvlist_t *nvl = NULL; 2736 void *nvlpacked = NULL; 2737 struct pf_keth_rule *rule = NULL; 2738 struct pf_keth_ruleset *rs; 2739 u_int32_t ticket, nr; 2740 bool clear = false; 2741 const char *anchor; 2742 2743 #define ERROUT(x) ERROUT_IOCTL(DIOCGETETHRULE_error, x) 2744 2745 if (nv->len > pf_ioctl_maxcount) 2746 ERROUT(ENOMEM); 2747 2748 nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK); 2749 if (nvlpacked == NULL) 2750 ERROUT(ENOMEM); 2751 2752 error = copyin(nv->data, nvlpacked, nv->len); 2753 if (error) 2754 ERROUT(error); 2755 2756 nvl = nvlist_unpack(nvlpacked, nv->len, 0); 2757 if (nvl == NULL) 2758 ERROUT(EBADMSG); 2759 if (! nvlist_exists_number(nvl, "ticket")) 2760 ERROUT(EBADMSG); 2761 ticket = nvlist_get_number(nvl, "ticket"); 2762 if (! nvlist_exists_string(nvl, "anchor")) 2763 ERROUT(EBADMSG); 2764 anchor = nvlist_get_string(nvl, "anchor"); 2765 2766 if (nvlist_exists_bool(nvl, "clear")) 2767 clear = nvlist_get_bool(nvl, "clear"); 2768 2769 if (clear && !(flags & FWRITE)) 2770 ERROUT(EACCES); 2771 2772 if (! nvlist_exists_number(nvl, "nr")) 2773 ERROUT(EBADMSG); 2774 nr = nvlist_get_number(nvl, "nr"); 2775 2776 PF_RULES_RLOCK(); 2777 rs = pf_find_keth_ruleset(anchor); 2778 if (rs == NULL) { 2779 PF_RULES_RUNLOCK(); 2780 ERROUT(ENOENT); 2781 } 2782 if (ticket != rs->active.ticket) { 2783 PF_RULES_RUNLOCK(); 2784 ERROUT(EBUSY); 2785 } 2786 2787 nvlist_destroy(nvl); 2788 nvl = NULL; 2789 free(nvlpacked, M_NVLIST); 2790 nvlpacked = NULL; 2791 2792 rule = TAILQ_FIRST(rs->active.rules); 2793 while ((rule != NULL) && (rule->nr != nr)) 2794 rule = TAILQ_NEXT(rule, entries); 2795 if (rule == NULL) { 2796 PF_RULES_RUNLOCK(); 2797 ERROUT(ENOENT); 2798 } 2799 /* Make sure rule can't go away. */ 2800 NET_EPOCH_ENTER(et); 2801 PF_RULES_RUNLOCK(); 2802 nvl = pf_keth_rule_to_nveth_rule(rule); 2803 if (pf_keth_anchor_nvcopyout(rs, rule, nvl)) 2804 ERROUT(EBUSY); 2805 NET_EPOCH_EXIT(et); 2806 if (nvl == NULL) 2807 ERROUT(ENOMEM); 2808 2809 nvlpacked = nvlist_pack(nvl, &nv->len); 2810 if (nvlpacked == NULL) 2811 ERROUT(ENOMEM); 2812 2813 if (nv->size == 0) 2814 ERROUT(0); 2815 else if (nv->size < nv->len) 2816 ERROUT(ENOSPC); 2817 2818 error = copyout(nvlpacked, nv->data, nv->len); 2819 if (error == 0 && clear) { 2820 counter_u64_zero(rule->evaluations); 2821 for (int i = 0; i < 2; i++) { 2822 counter_u64_zero(rule->packets[i]); 2823 counter_u64_zero(rule->bytes[i]); 2824 } 2825 } 2826 2827 #undef ERROUT 2828 DIOCGETETHRULE_error: 2829 free(nvlpacked, M_NVLIST); 2830 nvlist_destroy(nvl); 2831 break; 2832 } 2833 2834 case DIOCADDETHRULE: { 2835 struct pfioc_nv *nv = (struct pfioc_nv *)addr; 2836 nvlist_t *nvl = NULL; 2837 void *nvlpacked = NULL; 2838 struct pf_keth_rule *rule = NULL, *tail = NULL; 2839 struct pf_keth_ruleset *ruleset = NULL; 2840 struct pfi_kkif *kif = NULL, *bridge_to_kif = NULL; 2841 const char *anchor = "", *anchor_call = ""; 2842 2843 #define ERROUT(x) ERROUT_IOCTL(DIOCADDETHRULE_error, x) 2844 2845 if (nv->len > pf_ioctl_maxcount) 2846 ERROUT(ENOMEM); 2847 2848 nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK); 2849 if (nvlpacked == NULL) 2850 ERROUT(ENOMEM); 2851 2852 error = copyin(nv->data, nvlpacked, nv->len); 2853 if (error) 2854 ERROUT(error); 2855 2856 nvl = nvlist_unpack(nvlpacked, nv->len, 0); 2857 if (nvl == NULL) 2858 ERROUT(EBADMSG); 2859 2860 if (! nvlist_exists_number(nvl, "ticket")) 2861 ERROUT(EBADMSG); 2862 2863 if (nvlist_exists_string(nvl, "anchor")) 2864 anchor = nvlist_get_string(nvl, "anchor"); 2865 if (nvlist_exists_string(nvl, "anchor_call")) 2866 anchor_call = nvlist_get_string(nvl, "anchor_call"); 2867 2868 ruleset = pf_find_keth_ruleset(anchor); 2869 if (ruleset == NULL) 2870 ERROUT(EINVAL); 2871 2872 if (nvlist_get_number(nvl, "ticket") != 2873 ruleset->inactive.ticket) { 2874 DPFPRINTF(PF_DEBUG_MISC, 2875 ("ticket: %d != %d\n", 2876 (u_int32_t)nvlist_get_number(nvl, "ticket"), 2877 ruleset->inactive.ticket)); 2878 ERROUT(EBUSY); 2879 } 2880 2881 rule = malloc(sizeof(*rule), M_PFRULE, M_WAITOK); 2882 if (rule == NULL) 2883 ERROUT(ENOMEM); 2884 rule->timestamp = NULL; 2885 2886 error = pf_nveth_rule_to_keth_rule(nvl, rule); 2887 if (error != 0) 2888 ERROUT(error); 2889 2890 if (rule->ifname[0]) 2891 kif = pf_kkif_create(M_WAITOK); 2892 if (rule->bridge_to_name[0]) 2893 bridge_to_kif = pf_kkif_create(M_WAITOK); 2894 rule->evaluations = counter_u64_alloc(M_WAITOK); 2895 for (int i = 0; i < 2; i++) { 2896 rule->packets[i] = counter_u64_alloc(M_WAITOK); 2897 rule->bytes[i] = counter_u64_alloc(M_WAITOK); 2898 } 2899 rule->timestamp = uma_zalloc_pcpu(pf_timestamp_pcpu_zone, 2900 M_WAITOK | M_ZERO); 2901 2902 PF_RULES_WLOCK(); 2903 2904 if (rule->ifname[0]) { 2905 rule->kif = pfi_kkif_attach(kif, rule->ifname); 2906 pfi_kkif_ref(rule->kif); 2907 } else 2908 rule->kif = NULL; 2909 if (rule->bridge_to_name[0]) { 2910 rule->bridge_to = pfi_kkif_attach(bridge_to_kif, 2911 rule->bridge_to_name); 2912 pfi_kkif_ref(rule->bridge_to); 2913 } else 2914 rule->bridge_to = NULL; 2915 2916 #ifdef ALTQ 2917 /* set queue IDs */ 2918 if (rule->qname[0] != 0) { 2919 if ((rule->qid = pf_qname2qid(rule->qname)) == 0) 2920 error = EBUSY; 2921 else 2922 rule->qid = rule->qid; 2923 } 2924 #endif 2925 if (rule->tagname[0]) 2926 if ((rule->tag = pf_tagname2tag(rule->tagname)) == 0) 2927 error = EBUSY; 2928 if (rule->match_tagname[0]) 2929 if ((rule->match_tag = pf_tagname2tag( 2930 rule->match_tagname)) == 0) 2931 error = EBUSY; 2932 2933 if (error == 0 && rule->ipdst.addr.type == PF_ADDR_TABLE) 2934 error = pf_eth_addr_setup(ruleset, &rule->ipdst.addr); 2935 if (error == 0 && rule->ipsrc.addr.type == PF_ADDR_TABLE) 2936 error = pf_eth_addr_setup(ruleset, &rule->ipsrc.addr); 2937 2938 if (error) { 2939 pf_free_eth_rule(rule); 2940 PF_RULES_WUNLOCK(); 2941 ERROUT(error); 2942 } 2943 2944 if (pf_keth_anchor_setup(rule, ruleset, anchor_call)) { 2945 pf_free_eth_rule(rule); 2946 PF_RULES_WUNLOCK(); 2947 ERROUT(EINVAL); 2948 } 2949 2950 tail = TAILQ_LAST(ruleset->inactive.rules, pf_keth_ruleq); 2951 if (tail) 2952 rule->nr = tail->nr + 1; 2953 else 2954 rule->nr = 0; 2955 2956 TAILQ_INSERT_TAIL(ruleset->inactive.rules, rule, entries); 2957 2958 PF_RULES_WUNLOCK(); 2959 2960 #undef ERROUT 2961 DIOCADDETHRULE_error: 2962 nvlist_destroy(nvl); 2963 free(nvlpacked, M_NVLIST); 2964 break; 2965 } 2966 2967 case DIOCGETETHRULESETS: { 2968 struct epoch_tracker et; 2969 struct pfioc_nv *nv = (struct pfioc_nv *)addr; 2970 nvlist_t *nvl = NULL; 2971 void *nvlpacked = NULL; 2972 struct pf_keth_ruleset *ruleset; 2973 struct pf_keth_anchor *anchor; 2974 int nr = 0; 2975 2976 #define ERROUT(x) ERROUT_IOCTL(DIOCGETETHRULESETS_error, x) 2977 2978 if (nv->len > pf_ioctl_maxcount) 2979 ERROUT(ENOMEM); 2980 2981 nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK); 2982 if (nvlpacked == NULL) 2983 ERROUT(ENOMEM); 2984 2985 error = copyin(nv->data, nvlpacked, nv->len); 2986 if (error) 2987 ERROUT(error); 2988 2989 nvl = nvlist_unpack(nvlpacked, nv->len, 0); 2990 if (nvl == NULL) 2991 ERROUT(EBADMSG); 2992 if (! nvlist_exists_string(nvl, "path")) 2993 ERROUT(EBADMSG); 2994 2995 NET_EPOCH_ENTER(et); 2996 2997 if ((ruleset = pf_find_keth_ruleset( 2998 nvlist_get_string(nvl, "path"))) == NULL) { 2999 NET_EPOCH_EXIT(et); 3000 ERROUT(ENOENT); 3001 } 3002 3003 if (ruleset->anchor == NULL) { 3004 RB_FOREACH(anchor, pf_keth_anchor_global, &V_pf_keth_anchors) 3005 if (anchor->parent == NULL) 3006 nr++; 3007 } else { 3008 RB_FOREACH(anchor, pf_keth_anchor_node, 3009 &ruleset->anchor->children) 3010 nr++; 3011 } 3012 3013 NET_EPOCH_EXIT(et); 3014 3015 nvlist_destroy(nvl); 3016 nvl = NULL; 3017 free(nvlpacked, M_NVLIST); 3018 nvlpacked = NULL; 3019 3020 nvl = nvlist_create(0); 3021 if (nvl == NULL) 3022 ERROUT(ENOMEM); 3023 3024 nvlist_add_number(nvl, "nr", nr); 3025 3026 nvlpacked = nvlist_pack(nvl, &nv->len); 3027 if (nvlpacked == NULL) 3028 ERROUT(ENOMEM); 3029 3030 if (nv->size == 0) 3031 ERROUT(0); 3032 else if (nv->size < nv->len) 3033 ERROUT(ENOSPC); 3034 3035 error = copyout(nvlpacked, nv->data, nv->len); 3036 3037 #undef ERROUT 3038 DIOCGETETHRULESETS_error: 3039 free(nvlpacked, M_NVLIST); 3040 nvlist_destroy(nvl); 3041 break; 3042 } 3043 3044 case DIOCGETETHRULESET: { 3045 struct epoch_tracker et; 3046 struct pfioc_nv *nv = (struct pfioc_nv *)addr; 3047 nvlist_t *nvl = NULL; 3048 void *nvlpacked = NULL; 3049 struct pf_keth_ruleset *ruleset; 3050 struct pf_keth_anchor *anchor; 3051 int nr = 0, req_nr = 0; 3052 bool found = false; 3053 3054 #define ERROUT(x) ERROUT_IOCTL(DIOCGETETHRULESET_error, x) 3055 3056 if (nv->len > pf_ioctl_maxcount) 3057 ERROUT(ENOMEM); 3058 3059 nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK); 3060 if (nvlpacked == NULL) 3061 ERROUT(ENOMEM); 3062 3063 error = copyin(nv->data, nvlpacked, nv->len); 3064 if (error) 3065 ERROUT(error); 3066 3067 nvl = nvlist_unpack(nvlpacked, nv->len, 0); 3068 if (nvl == NULL) 3069 ERROUT(EBADMSG); 3070 if (! nvlist_exists_string(nvl, "path")) 3071 ERROUT(EBADMSG); 3072 if (! nvlist_exists_number(nvl, "nr")) 3073 ERROUT(EBADMSG); 3074 3075 req_nr = nvlist_get_number(nvl, "nr"); 3076 3077 NET_EPOCH_ENTER(et); 3078 3079 if ((ruleset = pf_find_keth_ruleset( 3080 nvlist_get_string(nvl, "path"))) == NULL) { 3081 NET_EPOCH_EXIT(et); 3082 ERROUT(ENOENT); 3083 } 3084 3085 nvlist_destroy(nvl); 3086 nvl = NULL; 3087 free(nvlpacked, M_NVLIST); 3088 nvlpacked = NULL; 3089 3090 nvl = nvlist_create(0); 3091 if (nvl == NULL) { 3092 NET_EPOCH_EXIT(et); 3093 ERROUT(ENOMEM); 3094 } 3095 3096 if (ruleset->anchor == NULL) { 3097 RB_FOREACH(anchor, pf_keth_anchor_global, 3098 &V_pf_keth_anchors) { 3099 if (anchor->parent == NULL && nr++ == req_nr) { 3100 found = true; 3101 break; 3102 } 3103 } 3104 } else { 3105 RB_FOREACH(anchor, pf_keth_anchor_node, 3106 &ruleset->anchor->children) { 3107 if (nr++ == req_nr) { 3108 found = true; 3109 break; 3110 } 3111 } 3112 } 3113 3114 NET_EPOCH_EXIT(et); 3115 if (found) { 3116 nvlist_add_number(nvl, "nr", nr); 3117 nvlist_add_string(nvl, "name", anchor->name); 3118 if (ruleset->anchor) 3119 nvlist_add_string(nvl, "path", 3120 ruleset->anchor->path); 3121 else 3122 nvlist_add_string(nvl, "path", ""); 3123 } else { 3124 ERROUT(EBUSY); 3125 } 3126 3127 nvlpacked = nvlist_pack(nvl, &nv->len); 3128 if (nvlpacked == NULL) 3129 ERROUT(ENOMEM); 3130 3131 if (nv->size == 0) 3132 ERROUT(0); 3133 else if (nv->size < nv->len) 3134 ERROUT(ENOSPC); 3135 3136 error = copyout(nvlpacked, nv->data, nv->len); 3137 3138 #undef ERROUT 3139 DIOCGETETHRULESET_error: 3140 free(nvlpacked, M_NVLIST); 3141 nvlist_destroy(nvl); 3142 break; 3143 } 3144 3145 case DIOCADDRULENV: { 3146 struct pfioc_nv *nv = (struct pfioc_nv *)addr; 3147 nvlist_t *nvl = NULL; 3148 void *nvlpacked = NULL; 3149 struct pf_krule *rule = NULL; 3150 const char *anchor = "", *anchor_call = ""; 3151 uint32_t ticket = 0, pool_ticket = 0; 3152 3153 #define ERROUT(x) ERROUT_IOCTL(DIOCADDRULENV_error, x) 3154 3155 if (nv->len > pf_ioctl_maxcount) 3156 ERROUT(ENOMEM); 3157 3158 nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK); 3159 error = copyin(nv->data, nvlpacked, nv->len); 3160 if (error) 3161 ERROUT(error); 3162 3163 nvl = nvlist_unpack(nvlpacked, nv->len, 0); 3164 if (nvl == NULL) 3165 ERROUT(EBADMSG); 3166 3167 if (! nvlist_exists_number(nvl, "ticket")) 3168 ERROUT(EINVAL); 3169 ticket = nvlist_get_number(nvl, "ticket"); 3170 3171 if (! nvlist_exists_number(nvl, "pool_ticket")) 3172 ERROUT(EINVAL); 3173 pool_ticket = nvlist_get_number(nvl, "pool_ticket"); 3174 3175 if (! nvlist_exists_nvlist(nvl, "rule")) 3176 ERROUT(EINVAL); 3177 3178 rule = pf_krule_alloc(); 3179 error = pf_nvrule_to_krule(nvlist_get_nvlist(nvl, "rule"), 3180 rule); 3181 if (error) 3182 ERROUT(error); 3183 3184 if (nvlist_exists_string(nvl, "anchor")) 3185 anchor = nvlist_get_string(nvl, "anchor"); 3186 if (nvlist_exists_string(nvl, "anchor_call")) 3187 anchor_call = nvlist_get_string(nvl, "anchor_call"); 3188 3189 if ((error = nvlist_error(nvl))) 3190 ERROUT(error); 3191 3192 /* Frees rule on error */ 3193 error = pf_ioctl_addrule(rule, ticket, pool_ticket, anchor, 3194 anchor_call, td); 3195 3196 nvlist_destroy(nvl); 3197 free(nvlpacked, M_NVLIST); 3198 break; 3199 #undef ERROUT 3200 DIOCADDRULENV_error: 3201 pf_krule_free(rule); 3202 nvlist_destroy(nvl); 3203 free(nvlpacked, M_NVLIST); 3204 3205 break; 3206 } 3207 case DIOCADDRULE: { 3208 struct pfioc_rule *pr = (struct pfioc_rule *)addr; 3209 struct pf_krule *rule; 3210 3211 rule = pf_krule_alloc(); 3212 error = pf_rule_to_krule(&pr->rule, rule); 3213 if (error != 0) { 3214 pf_krule_free(rule); 3215 break; 3216 } 3217 3218 pr->anchor[sizeof(pr->anchor) - 1] = 0; 3219 3220 /* Frees rule on error */ 3221 error = pf_ioctl_addrule(rule, pr->ticket, pr->pool_ticket, 3222 pr->anchor, pr->anchor_call, td); 3223 break; 3224 } 3225 3226 case DIOCGETRULES: { 3227 struct pfioc_rule *pr = (struct pfioc_rule *)addr; 3228 struct pf_kruleset *ruleset; 3229 struct pf_krule *tail; 3230 int rs_num; 3231 3232 pr->anchor[sizeof(pr->anchor) - 1] = 0; 3233 3234 PF_RULES_WLOCK(); 3235 ruleset = pf_find_kruleset(pr->anchor); 3236 if (ruleset == NULL) { 3237 PF_RULES_WUNLOCK(); 3238 error = EINVAL; 3239 break; 3240 } 3241 rs_num = pf_get_ruleset_number(pr->rule.action); 3242 if (rs_num >= PF_RULESET_MAX) { 3243 PF_RULES_WUNLOCK(); 3244 error = EINVAL; 3245 break; 3246 } 3247 tail = TAILQ_LAST(ruleset->rules[rs_num].active.ptr, 3248 pf_krulequeue); 3249 if (tail) 3250 pr->nr = tail->nr + 1; 3251 else 3252 pr->nr = 0; 3253 pr->ticket = ruleset->rules[rs_num].active.ticket; 3254 PF_RULES_WUNLOCK(); 3255 break; 3256 } 3257 3258 case DIOCGETRULE: { 3259 struct pfioc_rule *pr = (struct pfioc_rule *)addr; 3260 struct pf_kruleset *ruleset; 3261 struct pf_krule *rule; 3262 int rs_num; 3263 3264 pr->anchor[sizeof(pr->anchor) - 1] = 0; 3265 3266 PF_RULES_WLOCK(); 3267 ruleset = pf_find_kruleset(pr->anchor); 3268 if (ruleset == NULL) { 3269 PF_RULES_WUNLOCK(); 3270 error = EINVAL; 3271 break; 3272 } 3273 rs_num = pf_get_ruleset_number(pr->rule.action); 3274 if (rs_num >= PF_RULESET_MAX) { 3275 PF_RULES_WUNLOCK(); 3276 error = EINVAL; 3277 break; 3278 } 3279 if (pr->ticket != ruleset->rules[rs_num].active.ticket) { 3280 PF_RULES_WUNLOCK(); 3281 error = EBUSY; 3282 break; 3283 } 3284 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr); 3285 while ((rule != NULL) && (rule->nr != pr->nr)) 3286 rule = TAILQ_NEXT(rule, entries); 3287 if (rule == NULL) { 3288 PF_RULES_WUNLOCK(); 3289 error = EBUSY; 3290 break; 3291 } 3292 3293 pf_krule_to_rule(rule, &pr->rule); 3294 3295 if (pf_kanchor_copyout(ruleset, rule, pr)) { 3296 PF_RULES_WUNLOCK(); 3297 error = EBUSY; 3298 break; 3299 } 3300 pf_addr_copyout(&pr->rule.src.addr); 3301 pf_addr_copyout(&pr->rule.dst.addr); 3302 3303 if (pr->action == PF_GET_CLR_CNTR) { 3304 pf_counter_u64_zero(&rule->evaluations); 3305 for (int i = 0; i < 2; i++) { 3306 pf_counter_u64_zero(&rule->packets[i]); 3307 pf_counter_u64_zero(&rule->bytes[i]); 3308 } 3309 counter_u64_zero(rule->states_tot); 3310 } 3311 PF_RULES_WUNLOCK(); 3312 break; 3313 } 3314 3315 case DIOCGETRULENV: { 3316 struct pfioc_nv *nv = (struct pfioc_nv *)addr; 3317 nvlist_t *nvrule = NULL; 3318 nvlist_t *nvl = NULL; 3319 struct pf_kruleset *ruleset; 3320 struct pf_krule *rule; 3321 void *nvlpacked = NULL; 3322 int rs_num, nr; 3323 bool clear_counter = false; 3324 3325 #define ERROUT(x) ERROUT_IOCTL(DIOCGETRULENV_error, x) 3326 3327 if (nv->len > pf_ioctl_maxcount) 3328 ERROUT(ENOMEM); 3329 3330 /* Copy the request in */ 3331 nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK); 3332 if (nvlpacked == NULL) 3333 ERROUT(ENOMEM); 3334 3335 error = copyin(nv->data, nvlpacked, nv->len); 3336 if (error) 3337 ERROUT(error); 3338 3339 nvl = nvlist_unpack(nvlpacked, nv->len, 0); 3340 if (nvl == NULL) 3341 ERROUT(EBADMSG); 3342 3343 if (! nvlist_exists_string(nvl, "anchor")) 3344 ERROUT(EBADMSG); 3345 if (! nvlist_exists_number(nvl, "ruleset")) 3346 ERROUT(EBADMSG); 3347 if (! nvlist_exists_number(nvl, "ticket")) 3348 ERROUT(EBADMSG); 3349 if (! nvlist_exists_number(nvl, "nr")) 3350 ERROUT(EBADMSG); 3351 3352 if (nvlist_exists_bool(nvl, "clear_counter")) 3353 clear_counter = nvlist_get_bool(nvl, "clear_counter"); 3354 3355 if (clear_counter && !(flags & FWRITE)) 3356 ERROUT(EACCES); 3357 3358 nr = nvlist_get_number(nvl, "nr"); 3359 3360 PF_RULES_WLOCK(); 3361 ruleset = pf_find_kruleset(nvlist_get_string(nvl, "anchor")); 3362 if (ruleset == NULL) { 3363 PF_RULES_WUNLOCK(); 3364 ERROUT(ENOENT); 3365 } 3366 3367 rs_num = pf_get_ruleset_number(nvlist_get_number(nvl, "ruleset")); 3368 if (rs_num >= PF_RULESET_MAX) { 3369 PF_RULES_WUNLOCK(); 3370 ERROUT(EINVAL); 3371 } 3372 3373 if (nvlist_get_number(nvl, "ticket") != 3374 ruleset->rules[rs_num].active.ticket) { 3375 PF_RULES_WUNLOCK(); 3376 ERROUT(EBUSY); 3377 } 3378 3379 if ((error = nvlist_error(nvl))) { 3380 PF_RULES_WUNLOCK(); 3381 ERROUT(error); 3382 } 3383 3384 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr); 3385 while ((rule != NULL) && (rule->nr != nr)) 3386 rule = TAILQ_NEXT(rule, entries); 3387 if (rule == NULL) { 3388 PF_RULES_WUNLOCK(); 3389 ERROUT(EBUSY); 3390 } 3391 3392 nvrule = pf_krule_to_nvrule(rule); 3393 3394 nvlist_destroy(nvl); 3395 nvl = nvlist_create(0); 3396 if (nvl == NULL) { 3397 PF_RULES_WUNLOCK(); 3398 ERROUT(ENOMEM); 3399 } 3400 nvlist_add_number(nvl, "nr", nr); 3401 nvlist_add_nvlist(nvl, "rule", nvrule); 3402 nvlist_destroy(nvrule); 3403 nvrule = NULL; 3404 if (pf_kanchor_nvcopyout(ruleset, rule, nvl)) { 3405 PF_RULES_WUNLOCK(); 3406 ERROUT(EBUSY); 3407 } 3408 3409 free(nvlpacked, M_NVLIST); 3410 nvlpacked = nvlist_pack(nvl, &nv->len); 3411 if (nvlpacked == NULL) { 3412 PF_RULES_WUNLOCK(); 3413 ERROUT(ENOMEM); 3414 } 3415 3416 if (nv->size == 0) { 3417 PF_RULES_WUNLOCK(); 3418 ERROUT(0); 3419 } 3420 else if (nv->size < nv->len) { 3421 PF_RULES_WUNLOCK(); 3422 ERROUT(ENOSPC); 3423 } 3424 3425 if (clear_counter) { 3426 pf_counter_u64_zero(&rule->evaluations); 3427 for (int i = 0; i < 2; i++) { 3428 pf_counter_u64_zero(&rule->packets[i]); 3429 pf_counter_u64_zero(&rule->bytes[i]); 3430 } 3431 counter_u64_zero(rule->states_tot); 3432 } 3433 PF_RULES_WUNLOCK(); 3434 3435 error = copyout(nvlpacked, nv->data, nv->len); 3436 3437 #undef ERROUT 3438 DIOCGETRULENV_error: 3439 free(nvlpacked, M_NVLIST); 3440 nvlist_destroy(nvrule); 3441 nvlist_destroy(nvl); 3442 3443 break; 3444 } 3445 3446 case DIOCCHANGERULE: { 3447 struct pfioc_rule *pcr = (struct pfioc_rule *)addr; 3448 struct pf_kruleset *ruleset; 3449 struct pf_krule *oldrule = NULL, *newrule = NULL; 3450 struct pfi_kkif *kif = NULL; 3451 struct pf_kpooladdr *pa; 3452 u_int32_t nr = 0; 3453 int rs_num; 3454 3455 pcr->anchor[sizeof(pcr->anchor) - 1] = 0; 3456 3457 if (pcr->action < PF_CHANGE_ADD_HEAD || 3458 pcr->action > PF_CHANGE_GET_TICKET) { 3459 error = EINVAL; 3460 break; 3461 } 3462 if (pcr->rule.return_icmp >> 8 > ICMP_MAXTYPE) { 3463 error = EINVAL; 3464 break; 3465 } 3466 3467 if (pcr->action != PF_CHANGE_REMOVE) { 3468 newrule = pf_krule_alloc(); 3469 error = pf_rule_to_krule(&pcr->rule, newrule); 3470 if (error != 0) { 3471 pf_krule_free(newrule); 3472 break; 3473 } 3474 3475 if (newrule->ifname[0]) 3476 kif = pf_kkif_create(M_WAITOK); 3477 pf_counter_u64_init(&newrule->evaluations, M_WAITOK); 3478 for (int i = 0; i < 2; i++) { 3479 pf_counter_u64_init(&newrule->packets[i], M_WAITOK); 3480 pf_counter_u64_init(&newrule->bytes[i], M_WAITOK); 3481 } 3482 newrule->states_cur = counter_u64_alloc(M_WAITOK); 3483 newrule->states_tot = counter_u64_alloc(M_WAITOK); 3484 newrule->src_nodes = counter_u64_alloc(M_WAITOK); 3485 newrule->cuid = td->td_ucred->cr_ruid; 3486 newrule->cpid = td->td_proc ? td->td_proc->p_pid : 0; 3487 TAILQ_INIT(&newrule->rpool.list); 3488 } 3489 #define ERROUT(x) ERROUT_IOCTL(DIOCCHANGERULE_error, x) 3490 3491 PF_CONFIG_LOCK(); 3492 PF_RULES_WLOCK(); 3493 #ifdef PF_WANT_32_TO_64_COUNTER 3494 if (newrule != NULL) { 3495 LIST_INSERT_HEAD(&V_pf_allrulelist, newrule, allrulelist); 3496 newrule->allrulelinked = true; 3497 V_pf_allrulecount++; 3498 } 3499 #endif 3500 3501 if (!(pcr->action == PF_CHANGE_REMOVE || 3502 pcr->action == PF_CHANGE_GET_TICKET) && 3503 pcr->pool_ticket != V_ticket_pabuf) 3504 ERROUT(EBUSY); 3505 3506 ruleset = pf_find_kruleset(pcr->anchor); 3507 if (ruleset == NULL) 3508 ERROUT(EINVAL); 3509 3510 rs_num = pf_get_ruleset_number(pcr->rule.action); 3511 if (rs_num >= PF_RULESET_MAX) 3512 ERROUT(EINVAL); 3513 3514 /* 3515 * XXXMJG: there is no guarantee that the ruleset was 3516 * created by the usual route of calling DIOCXBEGIN. 3517 * As a result it is possible the rule tree will not 3518 * be allocated yet. Hack around it by doing it here. 3519 * Note it is fine to let the tree persist in case of 3520 * error as it will be freed down the road on future 3521 * updates (if need be). 3522 */ 3523 if (ruleset->rules[rs_num].active.tree == NULL) { 3524 ruleset->rules[rs_num].active.tree = pf_rule_tree_alloc(M_NOWAIT); 3525 if (ruleset->rules[rs_num].active.tree == NULL) { 3526 ERROUT(ENOMEM); 3527 } 3528 } 3529 3530 if (pcr->action == PF_CHANGE_GET_TICKET) { 3531 pcr->ticket = ++ruleset->rules[rs_num].active.ticket; 3532 ERROUT(0); 3533 } else if (pcr->ticket != 3534 ruleset->rules[rs_num].active.ticket) 3535 ERROUT(EINVAL); 3536 3537 if (pcr->action != PF_CHANGE_REMOVE) { 3538 if (newrule->ifname[0]) { 3539 newrule->kif = pfi_kkif_attach(kif, 3540 newrule->ifname); 3541 kif = NULL; 3542 pfi_kkif_ref(newrule->kif); 3543 } else 3544 newrule->kif = NULL; 3545 3546 if (newrule->rtableid > 0 && 3547 newrule->rtableid >= rt_numfibs) 3548 error = EBUSY; 3549 3550 #ifdef ALTQ 3551 /* set queue IDs */ 3552 if (newrule->qname[0] != 0) { 3553 if ((newrule->qid = 3554 pf_qname2qid(newrule->qname)) == 0) 3555 error = EBUSY; 3556 else if (newrule->pqname[0] != 0) { 3557 if ((newrule->pqid = 3558 pf_qname2qid(newrule->pqname)) == 0) 3559 error = EBUSY; 3560 } else 3561 newrule->pqid = newrule->qid; 3562 } 3563 #endif /* ALTQ */ 3564 if (newrule->tagname[0]) 3565 if ((newrule->tag = 3566 pf_tagname2tag(newrule->tagname)) == 0) 3567 error = EBUSY; 3568 if (newrule->match_tagname[0]) 3569 if ((newrule->match_tag = pf_tagname2tag( 3570 newrule->match_tagname)) == 0) 3571 error = EBUSY; 3572 if (newrule->rt && !newrule->direction) 3573 error = EINVAL; 3574 if (!newrule->log) 3575 newrule->logif = 0; 3576 if (newrule->logif >= PFLOGIFS_MAX) 3577 error = EINVAL; 3578 if (pf_addr_setup(ruleset, &newrule->src.addr, newrule->af)) 3579 error = ENOMEM; 3580 if (pf_addr_setup(ruleset, &newrule->dst.addr, newrule->af)) 3581 error = ENOMEM; 3582 if (pf_kanchor_setup(newrule, ruleset, pcr->anchor_call)) 3583 error = EINVAL; 3584 TAILQ_FOREACH(pa, &V_pf_pabuf, entries) 3585 if (pa->addr.type == PF_ADDR_TABLE) { 3586 pa->addr.p.tbl = 3587 pfr_attach_table(ruleset, 3588 pa->addr.v.tblname); 3589 if (pa->addr.p.tbl == NULL) 3590 error = ENOMEM; 3591 } 3592 3593 newrule->overload_tbl = NULL; 3594 if (newrule->overload_tblname[0]) { 3595 if ((newrule->overload_tbl = pfr_attach_table( 3596 ruleset, newrule->overload_tblname)) == 3597 NULL) 3598 error = EINVAL; 3599 else 3600 newrule->overload_tbl->pfrkt_flags |= 3601 PFR_TFLAG_ACTIVE; 3602 } 3603 3604 pf_mv_kpool(&V_pf_pabuf, &newrule->rpool.list); 3605 if (((((newrule->action == PF_NAT) || 3606 (newrule->action == PF_RDR) || 3607 (newrule->action == PF_BINAT) || 3608 (newrule->rt > PF_NOPFROUTE)) && 3609 !newrule->anchor)) && 3610 (TAILQ_FIRST(&newrule->rpool.list) == NULL)) 3611 error = EINVAL; 3612 3613 if (error) { 3614 pf_free_rule(newrule); 3615 PF_RULES_WUNLOCK(); 3616 PF_CONFIG_UNLOCK(); 3617 break; 3618 } 3619 3620 newrule->rpool.cur = TAILQ_FIRST(&newrule->rpool.list); 3621 } 3622 pf_empty_kpool(&V_pf_pabuf); 3623 3624 if (pcr->action == PF_CHANGE_ADD_HEAD) 3625 oldrule = TAILQ_FIRST( 3626 ruleset->rules[rs_num].active.ptr); 3627 else if (pcr->action == PF_CHANGE_ADD_TAIL) 3628 oldrule = TAILQ_LAST( 3629 ruleset->rules[rs_num].active.ptr, pf_krulequeue); 3630 else { 3631 oldrule = TAILQ_FIRST( 3632 ruleset->rules[rs_num].active.ptr); 3633 while ((oldrule != NULL) && (oldrule->nr != pcr->nr)) 3634 oldrule = TAILQ_NEXT(oldrule, entries); 3635 if (oldrule == NULL) { 3636 if (newrule != NULL) 3637 pf_free_rule(newrule); 3638 PF_RULES_WUNLOCK(); 3639 PF_CONFIG_UNLOCK(); 3640 error = EINVAL; 3641 break; 3642 } 3643 } 3644 3645 if (pcr->action == PF_CHANGE_REMOVE) { 3646 pf_unlink_rule(ruleset->rules[rs_num].active.ptr, 3647 oldrule); 3648 RB_REMOVE(pf_krule_global, 3649 ruleset->rules[rs_num].active.tree, oldrule); 3650 ruleset->rules[rs_num].active.rcount--; 3651 } else { 3652 pf_hash_rule(newrule); 3653 if (RB_INSERT(pf_krule_global, 3654 ruleset->rules[rs_num].active.tree, newrule) != NULL) { 3655 pf_free_rule(newrule); 3656 PF_RULES_WUNLOCK(); 3657 PF_CONFIG_UNLOCK(); 3658 error = EEXIST; 3659 break; 3660 } 3661 3662 if (oldrule == NULL) 3663 TAILQ_INSERT_TAIL( 3664 ruleset->rules[rs_num].active.ptr, 3665 newrule, entries); 3666 else if (pcr->action == PF_CHANGE_ADD_HEAD || 3667 pcr->action == PF_CHANGE_ADD_BEFORE) 3668 TAILQ_INSERT_BEFORE(oldrule, newrule, entries); 3669 else 3670 TAILQ_INSERT_AFTER( 3671 ruleset->rules[rs_num].active.ptr, 3672 oldrule, newrule, entries); 3673 ruleset->rules[rs_num].active.rcount++; 3674 } 3675 3676 nr = 0; 3677 TAILQ_FOREACH(oldrule, 3678 ruleset->rules[rs_num].active.ptr, entries) 3679 oldrule->nr = nr++; 3680 3681 ruleset->rules[rs_num].active.ticket++; 3682 3683 pf_calc_skip_steps(ruleset->rules[rs_num].active.ptr); 3684 pf_remove_if_empty_kruleset(ruleset); 3685 3686 PF_RULES_WUNLOCK(); 3687 PF_CONFIG_UNLOCK(); 3688 break; 3689 3690 #undef ERROUT 3691 DIOCCHANGERULE_error: 3692 PF_RULES_WUNLOCK(); 3693 PF_CONFIG_UNLOCK(); 3694 pf_krule_free(newrule); 3695 pf_kkif_free(kif); 3696 break; 3697 } 3698 3699 case DIOCCLRSTATES: { 3700 struct pfioc_state_kill *psk = (struct pfioc_state_kill *)addr; 3701 struct pf_kstate_kill kill; 3702 3703 error = pf_state_kill_to_kstate_kill(psk, &kill); 3704 if (error) 3705 break; 3706 3707 psk->psk_killed = pf_clear_states(&kill); 3708 break; 3709 } 3710 3711 case DIOCCLRSTATESNV: { 3712 error = pf_clearstates_nv((struct pfioc_nv *)addr); 3713 break; 3714 } 3715 3716 case DIOCKILLSTATES: { 3717 struct pfioc_state_kill *psk = (struct pfioc_state_kill *)addr; 3718 struct pf_kstate_kill kill; 3719 3720 error = pf_state_kill_to_kstate_kill(psk, &kill); 3721 if (error) 3722 break; 3723 3724 psk->psk_killed = 0; 3725 pf_killstates(&kill, &psk->psk_killed); 3726 break; 3727 } 3728 3729 case DIOCKILLSTATESNV: { 3730 error = pf_killstates_nv((struct pfioc_nv *)addr); 3731 break; 3732 } 3733 3734 case DIOCADDSTATE: { 3735 struct pfioc_state *ps = (struct pfioc_state *)addr; 3736 struct pfsync_state_1301 *sp = &ps->state; 3737 3738 if (sp->timeout >= PFTM_MAX) { 3739 error = EINVAL; 3740 break; 3741 } 3742 if (V_pfsync_state_import_ptr != NULL) { 3743 PF_RULES_RLOCK(); 3744 error = V_pfsync_state_import_ptr( 3745 (union pfsync_state_union *)sp, PFSYNC_SI_IOCTL, 3746 PFSYNC_MSG_VERSION_1301); 3747 PF_RULES_RUNLOCK(); 3748 } else 3749 error = EOPNOTSUPP; 3750 break; 3751 } 3752 3753 case DIOCGETSTATE: { 3754 struct pfioc_state *ps = (struct pfioc_state *)addr; 3755 struct pf_kstate *s; 3756 3757 s = pf_find_state_byid(ps->state.id, ps->state.creatorid); 3758 if (s == NULL) { 3759 error = ENOENT; 3760 break; 3761 } 3762 3763 pfsync_state_export((union pfsync_state_union*)&ps->state, 3764 s, PFSYNC_MSG_VERSION_1301); 3765 PF_STATE_UNLOCK(s); 3766 break; 3767 } 3768 3769 case DIOCGETSTATENV: { 3770 error = pf_getstate((struct pfioc_nv *)addr); 3771 break; 3772 } 3773 3774 case DIOCGETSTATES: { 3775 struct pfioc_states *ps = (struct pfioc_states *)addr; 3776 struct pf_kstate *s; 3777 struct pfsync_state_1301 *pstore, *p; 3778 int i, nr; 3779 size_t slice_count = 16, count; 3780 void *out; 3781 3782 if (ps->ps_len <= 0) { 3783 nr = uma_zone_get_cur(V_pf_state_z); 3784 ps->ps_len = sizeof(struct pfsync_state_1301) * nr; 3785 break; 3786 } 3787 3788 out = ps->ps_states; 3789 pstore = mallocarray(slice_count, 3790 sizeof(struct pfsync_state_1301), M_TEMP, M_WAITOK | M_ZERO); 3791 nr = 0; 3792 3793 for (i = 0; i <= pf_hashmask; i++) { 3794 struct pf_idhash *ih = &V_pf_idhash[i]; 3795 3796 DIOCGETSTATES_retry: 3797 p = pstore; 3798 3799 if (LIST_EMPTY(&ih->states)) 3800 continue; 3801 3802 PF_HASHROW_LOCK(ih); 3803 count = 0; 3804 LIST_FOREACH(s, &ih->states, entry) { 3805 if (s->timeout == PFTM_UNLINKED) 3806 continue; 3807 count++; 3808 } 3809 3810 if (count > slice_count) { 3811 PF_HASHROW_UNLOCK(ih); 3812 free(pstore, M_TEMP); 3813 slice_count = count * 2; 3814 pstore = mallocarray(slice_count, 3815 sizeof(struct pfsync_state_1301), M_TEMP, 3816 M_WAITOK | M_ZERO); 3817 goto DIOCGETSTATES_retry; 3818 } 3819 3820 if ((nr+count) * sizeof(*p) > ps->ps_len) { 3821 PF_HASHROW_UNLOCK(ih); 3822 goto DIOCGETSTATES_full; 3823 } 3824 3825 LIST_FOREACH(s, &ih->states, entry) { 3826 if (s->timeout == PFTM_UNLINKED) 3827 continue; 3828 3829 pfsync_state_export((union pfsync_state_union*)p, 3830 s, PFSYNC_MSG_VERSION_1301); 3831 p++; 3832 nr++; 3833 } 3834 PF_HASHROW_UNLOCK(ih); 3835 error = copyout(pstore, out, 3836 sizeof(struct pfsync_state_1301) * count); 3837 if (error) 3838 break; 3839 out = ps->ps_states + nr; 3840 } 3841 DIOCGETSTATES_full: 3842 ps->ps_len = sizeof(struct pfsync_state_1301) * nr; 3843 free(pstore, M_TEMP); 3844 3845 break; 3846 } 3847 3848 case DIOCGETSTATESV2: { 3849 struct pfioc_states_v2 *ps = (struct pfioc_states_v2 *)addr; 3850 struct pf_kstate *s; 3851 struct pf_state_export *pstore, *p; 3852 int i, nr; 3853 size_t slice_count = 16, count; 3854 void *out; 3855 3856 if (ps->ps_req_version > PF_STATE_VERSION) { 3857 error = ENOTSUP; 3858 break; 3859 } 3860 3861 if (ps->ps_len <= 0) { 3862 nr = uma_zone_get_cur(V_pf_state_z); 3863 ps->ps_len = sizeof(struct pf_state_export) * nr; 3864 break; 3865 } 3866 3867 out = ps->ps_states; 3868 pstore = mallocarray(slice_count, 3869 sizeof(struct pf_state_export), M_TEMP, M_WAITOK | M_ZERO); 3870 nr = 0; 3871 3872 for (i = 0; i <= pf_hashmask; i++) { 3873 struct pf_idhash *ih = &V_pf_idhash[i]; 3874 3875 DIOCGETSTATESV2_retry: 3876 p = pstore; 3877 3878 if (LIST_EMPTY(&ih->states)) 3879 continue; 3880 3881 PF_HASHROW_LOCK(ih); 3882 count = 0; 3883 LIST_FOREACH(s, &ih->states, entry) { 3884 if (s->timeout == PFTM_UNLINKED) 3885 continue; 3886 count++; 3887 } 3888 3889 if (count > slice_count) { 3890 PF_HASHROW_UNLOCK(ih); 3891 free(pstore, M_TEMP); 3892 slice_count = count * 2; 3893 pstore = mallocarray(slice_count, 3894 sizeof(struct pf_state_export), M_TEMP, 3895 M_WAITOK | M_ZERO); 3896 goto DIOCGETSTATESV2_retry; 3897 } 3898 3899 if ((nr+count) * sizeof(*p) > ps->ps_len) { 3900 PF_HASHROW_UNLOCK(ih); 3901 goto DIOCGETSTATESV2_full; 3902 } 3903 3904 LIST_FOREACH(s, &ih->states, entry) { 3905 if (s->timeout == PFTM_UNLINKED) 3906 continue; 3907 3908 pf_state_export(p, s); 3909 p++; 3910 nr++; 3911 } 3912 PF_HASHROW_UNLOCK(ih); 3913 error = copyout(pstore, out, 3914 sizeof(struct pf_state_export) * count); 3915 if (error) 3916 break; 3917 out = ps->ps_states + nr; 3918 } 3919 DIOCGETSTATESV2_full: 3920 ps->ps_len = nr * sizeof(struct pf_state_export); 3921 free(pstore, M_TEMP); 3922 3923 break; 3924 } 3925 3926 case DIOCGETSTATUS: { 3927 struct pf_status *s = (struct pf_status *)addr; 3928 3929 PF_RULES_RLOCK(); 3930 s->running = V_pf_status.running; 3931 s->since = V_pf_status.since; 3932 s->debug = V_pf_status.debug; 3933 s->hostid = V_pf_status.hostid; 3934 s->states = V_pf_status.states; 3935 s->src_nodes = V_pf_status.src_nodes; 3936 3937 for (int i = 0; i < PFRES_MAX; i++) 3938 s->counters[i] = 3939 counter_u64_fetch(V_pf_status.counters[i]); 3940 for (int i = 0; i < LCNT_MAX; i++) 3941 s->lcounters[i] = 3942 counter_u64_fetch(V_pf_status.lcounters[i]); 3943 for (int i = 0; i < FCNT_MAX; i++) 3944 s->fcounters[i] = 3945 pf_counter_u64_fetch(&V_pf_status.fcounters[i]); 3946 for (int i = 0; i < SCNT_MAX; i++) 3947 s->scounters[i] = 3948 counter_u64_fetch(V_pf_status.scounters[i]); 3949 3950 bcopy(V_pf_status.ifname, s->ifname, IFNAMSIZ); 3951 bcopy(V_pf_status.pf_chksum, s->pf_chksum, 3952 PF_MD5_DIGEST_LENGTH); 3953 3954 pfi_update_status(s->ifname, s); 3955 PF_RULES_RUNLOCK(); 3956 break; 3957 } 3958 3959 case DIOCGETSTATUSNV: { 3960 error = pf_getstatus((struct pfioc_nv *)addr); 3961 break; 3962 } 3963 3964 case DIOCSETSTATUSIF: { 3965 struct pfioc_if *pi = (struct pfioc_if *)addr; 3966 3967 if (pi->ifname[0] == 0) { 3968 bzero(V_pf_status.ifname, IFNAMSIZ); 3969 break; 3970 } 3971 PF_RULES_WLOCK(); 3972 error = pf_user_strcpy(V_pf_status.ifname, pi->ifname, IFNAMSIZ); 3973 PF_RULES_WUNLOCK(); 3974 break; 3975 } 3976 3977 case DIOCCLRSTATUS: { 3978 PF_RULES_WLOCK(); 3979 for (int i = 0; i < PFRES_MAX; i++) 3980 counter_u64_zero(V_pf_status.counters[i]); 3981 for (int i = 0; i < FCNT_MAX; i++) 3982 pf_counter_u64_zero(&V_pf_status.fcounters[i]); 3983 for (int i = 0; i < SCNT_MAX; i++) 3984 counter_u64_zero(V_pf_status.scounters[i]); 3985 for (int i = 0; i < KLCNT_MAX; i++) 3986 counter_u64_zero(V_pf_status.lcounters[i]); 3987 V_pf_status.since = time_second; 3988 if (*V_pf_status.ifname) 3989 pfi_update_status(V_pf_status.ifname, NULL); 3990 PF_RULES_WUNLOCK(); 3991 break; 3992 } 3993 3994 case DIOCNATLOOK: { 3995 struct pfioc_natlook *pnl = (struct pfioc_natlook *)addr; 3996 struct pf_state_key *sk; 3997 struct pf_kstate *state; 3998 struct pf_state_key_cmp key; 3999 int m = 0, direction = pnl->direction; 4000 int sidx, didx; 4001 4002 /* NATLOOK src and dst are reversed, so reverse sidx/didx */ 4003 sidx = (direction == PF_IN) ? 1 : 0; 4004 didx = (direction == PF_IN) ? 0 : 1; 4005 4006 if (!pnl->proto || 4007 PF_AZERO(&pnl->saddr, pnl->af) || 4008 PF_AZERO(&pnl->daddr, pnl->af) || 4009 ((pnl->proto == IPPROTO_TCP || 4010 pnl->proto == IPPROTO_UDP) && 4011 (!pnl->dport || !pnl->sport))) 4012 error = EINVAL; 4013 else { 4014 bzero(&key, sizeof(key)); 4015 key.af = pnl->af; 4016 key.proto = pnl->proto; 4017 PF_ACPY(&key.addr[sidx], &pnl->saddr, pnl->af); 4018 key.port[sidx] = pnl->sport; 4019 PF_ACPY(&key.addr[didx], &pnl->daddr, pnl->af); 4020 key.port[didx] = pnl->dport; 4021 4022 state = pf_find_state_all(&key, direction, &m); 4023 if (state == NULL) { 4024 error = ENOENT; 4025 } else { 4026 if (m > 1) { 4027 PF_STATE_UNLOCK(state); 4028 error = E2BIG; /* more than one state */ 4029 } else { 4030 sk = state->key[sidx]; 4031 PF_ACPY(&pnl->rsaddr, &sk->addr[sidx], sk->af); 4032 pnl->rsport = sk->port[sidx]; 4033 PF_ACPY(&pnl->rdaddr, &sk->addr[didx], sk->af); 4034 pnl->rdport = sk->port[didx]; 4035 PF_STATE_UNLOCK(state); 4036 } 4037 } 4038 } 4039 break; 4040 } 4041 4042 case DIOCSETTIMEOUT: { 4043 struct pfioc_tm *pt = (struct pfioc_tm *)addr; 4044 int old; 4045 4046 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX || 4047 pt->seconds < 0) { 4048 error = EINVAL; 4049 break; 4050 } 4051 PF_RULES_WLOCK(); 4052 old = V_pf_default_rule.timeout[pt->timeout]; 4053 if (pt->timeout == PFTM_INTERVAL && pt->seconds == 0) 4054 pt->seconds = 1; 4055 V_pf_default_rule.timeout[pt->timeout] = pt->seconds; 4056 if (pt->timeout == PFTM_INTERVAL && pt->seconds < old) 4057 wakeup(pf_purge_thread); 4058 pt->seconds = old; 4059 PF_RULES_WUNLOCK(); 4060 break; 4061 } 4062 4063 case DIOCGETTIMEOUT: { 4064 struct pfioc_tm *pt = (struct pfioc_tm *)addr; 4065 4066 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX) { 4067 error = EINVAL; 4068 break; 4069 } 4070 PF_RULES_RLOCK(); 4071 pt->seconds = V_pf_default_rule.timeout[pt->timeout]; 4072 PF_RULES_RUNLOCK(); 4073 break; 4074 } 4075 4076 case DIOCGETLIMIT: { 4077 struct pfioc_limit *pl = (struct pfioc_limit *)addr; 4078 4079 if (pl->index < 0 || pl->index >= PF_LIMIT_MAX) { 4080 error = EINVAL; 4081 break; 4082 } 4083 PF_RULES_RLOCK(); 4084 pl->limit = V_pf_limits[pl->index].limit; 4085 PF_RULES_RUNLOCK(); 4086 break; 4087 } 4088 4089 case DIOCSETLIMIT: { 4090 struct pfioc_limit *pl = (struct pfioc_limit *)addr; 4091 int old_limit; 4092 4093 PF_RULES_WLOCK(); 4094 if (pl->index < 0 || pl->index >= PF_LIMIT_MAX || 4095 V_pf_limits[pl->index].zone == NULL) { 4096 PF_RULES_WUNLOCK(); 4097 error = EINVAL; 4098 break; 4099 } 4100 uma_zone_set_max(V_pf_limits[pl->index].zone, pl->limit); 4101 old_limit = V_pf_limits[pl->index].limit; 4102 V_pf_limits[pl->index].limit = pl->limit; 4103 pl->limit = old_limit; 4104 PF_RULES_WUNLOCK(); 4105 break; 4106 } 4107 4108 case DIOCSETDEBUG: { 4109 u_int32_t *level = (u_int32_t *)addr; 4110 4111 PF_RULES_WLOCK(); 4112 V_pf_status.debug = *level; 4113 PF_RULES_WUNLOCK(); 4114 break; 4115 } 4116 4117 case DIOCCLRRULECTRS: { 4118 /* obsoleted by DIOCGETRULE with action=PF_GET_CLR_CNTR */ 4119 struct pf_kruleset *ruleset = &pf_main_ruleset; 4120 struct pf_krule *rule; 4121 4122 PF_RULES_WLOCK(); 4123 TAILQ_FOREACH(rule, 4124 ruleset->rules[PF_RULESET_FILTER].active.ptr, entries) { 4125 pf_counter_u64_zero(&rule->evaluations); 4126 for (int i = 0; i < 2; i++) { 4127 pf_counter_u64_zero(&rule->packets[i]); 4128 pf_counter_u64_zero(&rule->bytes[i]); 4129 } 4130 } 4131 PF_RULES_WUNLOCK(); 4132 break; 4133 } 4134 4135 case DIOCGIFSPEEDV0: 4136 case DIOCGIFSPEEDV1: { 4137 struct pf_ifspeed_v1 *psp = (struct pf_ifspeed_v1 *)addr; 4138 struct pf_ifspeed_v1 ps; 4139 struct ifnet *ifp; 4140 4141 if (psp->ifname[0] == '\0') { 4142 error = EINVAL; 4143 break; 4144 } 4145 4146 error = pf_user_strcpy(ps.ifname, psp->ifname, IFNAMSIZ); 4147 if (error != 0) 4148 break; 4149 ifp = ifunit(ps.ifname); 4150 if (ifp != NULL) { 4151 psp->baudrate32 = 4152 (u_int32_t)uqmin(ifp->if_baudrate, UINT_MAX); 4153 if (cmd == DIOCGIFSPEEDV1) 4154 psp->baudrate = ifp->if_baudrate; 4155 } else { 4156 error = EINVAL; 4157 } 4158 break; 4159 } 4160 4161 #ifdef ALTQ 4162 case DIOCSTARTALTQ: { 4163 struct pf_altq *altq; 4164 4165 PF_RULES_WLOCK(); 4166 /* enable all altq interfaces on active list */ 4167 TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) { 4168 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) { 4169 error = pf_enable_altq(altq); 4170 if (error != 0) 4171 break; 4172 } 4173 } 4174 if (error == 0) 4175 V_pf_altq_running = 1; 4176 PF_RULES_WUNLOCK(); 4177 DPFPRINTF(PF_DEBUG_MISC, ("altq: started\n")); 4178 break; 4179 } 4180 4181 case DIOCSTOPALTQ: { 4182 struct pf_altq *altq; 4183 4184 PF_RULES_WLOCK(); 4185 /* disable all altq interfaces on active list */ 4186 TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) { 4187 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) { 4188 error = pf_disable_altq(altq); 4189 if (error != 0) 4190 break; 4191 } 4192 } 4193 if (error == 0) 4194 V_pf_altq_running = 0; 4195 PF_RULES_WUNLOCK(); 4196 DPFPRINTF(PF_DEBUG_MISC, ("altq: stopped\n")); 4197 break; 4198 } 4199 4200 case DIOCADDALTQV0: 4201 case DIOCADDALTQV1: { 4202 struct pfioc_altq_v1 *pa = (struct pfioc_altq_v1 *)addr; 4203 struct pf_altq *altq, *a; 4204 struct ifnet *ifp; 4205 4206 altq = malloc(sizeof(*altq), M_PFALTQ, M_WAITOK | M_ZERO); 4207 error = pf_import_kaltq(pa, altq, IOCPARM_LEN(cmd)); 4208 if (error) 4209 break; 4210 altq->local_flags = 0; 4211 4212 PF_RULES_WLOCK(); 4213 if (pa->ticket != V_ticket_altqs_inactive) { 4214 PF_RULES_WUNLOCK(); 4215 free(altq, M_PFALTQ); 4216 error = EBUSY; 4217 break; 4218 } 4219 4220 /* 4221 * if this is for a queue, find the discipline and 4222 * copy the necessary fields 4223 */ 4224 if (altq->qname[0] != 0) { 4225 if ((altq->qid = pf_qname2qid(altq->qname)) == 0) { 4226 PF_RULES_WUNLOCK(); 4227 error = EBUSY; 4228 free(altq, M_PFALTQ); 4229 break; 4230 } 4231 altq->altq_disc = NULL; 4232 TAILQ_FOREACH(a, V_pf_altq_ifs_inactive, entries) { 4233 if (strncmp(a->ifname, altq->ifname, 4234 IFNAMSIZ) == 0) { 4235 altq->altq_disc = a->altq_disc; 4236 break; 4237 } 4238 } 4239 } 4240 4241 if ((ifp = ifunit(altq->ifname)) == NULL) 4242 altq->local_flags |= PFALTQ_FLAG_IF_REMOVED; 4243 else 4244 error = altq_add(ifp, altq); 4245 4246 if (error) { 4247 PF_RULES_WUNLOCK(); 4248 free(altq, M_PFALTQ); 4249 break; 4250 } 4251 4252 if (altq->qname[0] != 0) 4253 TAILQ_INSERT_TAIL(V_pf_altqs_inactive, altq, entries); 4254 else 4255 TAILQ_INSERT_TAIL(V_pf_altq_ifs_inactive, altq, entries); 4256 /* version error check done on import above */ 4257 pf_export_kaltq(altq, pa, IOCPARM_LEN(cmd)); 4258 PF_RULES_WUNLOCK(); 4259 break; 4260 } 4261 4262 case DIOCGETALTQSV0: 4263 case DIOCGETALTQSV1: { 4264 struct pfioc_altq_v1 *pa = (struct pfioc_altq_v1 *)addr; 4265 struct pf_altq *altq; 4266 4267 PF_RULES_RLOCK(); 4268 pa->nr = 0; 4269 TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) 4270 pa->nr++; 4271 TAILQ_FOREACH(altq, V_pf_altqs_active, entries) 4272 pa->nr++; 4273 pa->ticket = V_ticket_altqs_active; 4274 PF_RULES_RUNLOCK(); 4275 break; 4276 } 4277 4278 case DIOCGETALTQV0: 4279 case DIOCGETALTQV1: { 4280 struct pfioc_altq_v1 *pa = (struct pfioc_altq_v1 *)addr; 4281 struct pf_altq *altq; 4282 4283 PF_RULES_RLOCK(); 4284 if (pa->ticket != V_ticket_altqs_active) { 4285 PF_RULES_RUNLOCK(); 4286 error = EBUSY; 4287 break; 4288 } 4289 altq = pf_altq_get_nth_active(pa->nr); 4290 if (altq == NULL) { 4291 PF_RULES_RUNLOCK(); 4292 error = EBUSY; 4293 break; 4294 } 4295 pf_export_kaltq(altq, pa, IOCPARM_LEN(cmd)); 4296 PF_RULES_RUNLOCK(); 4297 break; 4298 } 4299 4300 case DIOCCHANGEALTQV0: 4301 case DIOCCHANGEALTQV1: 4302 /* CHANGEALTQ not supported yet! */ 4303 error = ENODEV; 4304 break; 4305 4306 case DIOCGETQSTATSV0: 4307 case DIOCGETQSTATSV1: { 4308 struct pfioc_qstats_v1 *pq = (struct pfioc_qstats_v1 *)addr; 4309 struct pf_altq *altq; 4310 int nbytes; 4311 u_int32_t version; 4312 4313 PF_RULES_RLOCK(); 4314 if (pq->ticket != V_ticket_altqs_active) { 4315 PF_RULES_RUNLOCK(); 4316 error = EBUSY; 4317 break; 4318 } 4319 nbytes = pq->nbytes; 4320 altq = pf_altq_get_nth_active(pq->nr); 4321 if (altq == NULL) { 4322 PF_RULES_RUNLOCK(); 4323 error = EBUSY; 4324 break; 4325 } 4326 4327 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) != 0) { 4328 PF_RULES_RUNLOCK(); 4329 error = ENXIO; 4330 break; 4331 } 4332 PF_RULES_RUNLOCK(); 4333 if (cmd == DIOCGETQSTATSV0) 4334 version = 0; /* DIOCGETQSTATSV0 means stats struct v0 */ 4335 else 4336 version = pq->version; 4337 error = altq_getqstats(altq, pq->buf, &nbytes, version); 4338 if (error == 0) { 4339 pq->scheduler = altq->scheduler; 4340 pq->nbytes = nbytes; 4341 } 4342 break; 4343 } 4344 #endif /* ALTQ */ 4345 4346 case DIOCBEGINADDRS: { 4347 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr; 4348 4349 PF_RULES_WLOCK(); 4350 pf_empty_kpool(&V_pf_pabuf); 4351 pp->ticket = ++V_ticket_pabuf; 4352 PF_RULES_WUNLOCK(); 4353 break; 4354 } 4355 4356 case DIOCADDADDR: { 4357 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr; 4358 struct pf_kpooladdr *pa; 4359 struct pfi_kkif *kif = NULL; 4360 4361 #ifndef INET 4362 if (pp->af == AF_INET) { 4363 error = EAFNOSUPPORT; 4364 break; 4365 } 4366 #endif /* INET */ 4367 #ifndef INET6 4368 if (pp->af == AF_INET6) { 4369 error = EAFNOSUPPORT; 4370 break; 4371 } 4372 #endif /* INET6 */ 4373 if (pp->addr.addr.type != PF_ADDR_ADDRMASK && 4374 pp->addr.addr.type != PF_ADDR_DYNIFTL && 4375 pp->addr.addr.type != PF_ADDR_TABLE) { 4376 error = EINVAL; 4377 break; 4378 } 4379 if (pp->addr.addr.p.dyn != NULL) { 4380 error = EINVAL; 4381 break; 4382 } 4383 pa = malloc(sizeof(*pa), M_PFRULE, M_WAITOK); 4384 error = pf_pooladdr_to_kpooladdr(&pp->addr, pa); 4385 if (error != 0) 4386 break; 4387 if (pa->ifname[0]) 4388 kif = pf_kkif_create(M_WAITOK); 4389 PF_RULES_WLOCK(); 4390 if (pp->ticket != V_ticket_pabuf) { 4391 PF_RULES_WUNLOCK(); 4392 if (pa->ifname[0]) 4393 pf_kkif_free(kif); 4394 free(pa, M_PFRULE); 4395 error = EBUSY; 4396 break; 4397 } 4398 if (pa->ifname[0]) { 4399 pa->kif = pfi_kkif_attach(kif, pa->ifname); 4400 kif = NULL; 4401 pfi_kkif_ref(pa->kif); 4402 } else 4403 pa->kif = NULL; 4404 if (pa->addr.type == PF_ADDR_DYNIFTL && ((error = 4405 pfi_dynaddr_setup(&pa->addr, pp->af)) != 0)) { 4406 if (pa->ifname[0]) 4407 pfi_kkif_unref(pa->kif); 4408 PF_RULES_WUNLOCK(); 4409 free(pa, M_PFRULE); 4410 break; 4411 } 4412 TAILQ_INSERT_TAIL(&V_pf_pabuf, pa, entries); 4413 PF_RULES_WUNLOCK(); 4414 break; 4415 } 4416 4417 case DIOCGETADDRS: { 4418 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr; 4419 struct pf_kpool *pool; 4420 struct pf_kpooladdr *pa; 4421 4422 pp->anchor[sizeof(pp->anchor) - 1] = 0; 4423 pp->nr = 0; 4424 4425 PF_RULES_RLOCK(); 4426 pool = pf_get_kpool(pp->anchor, pp->ticket, pp->r_action, 4427 pp->r_num, 0, 1, 0); 4428 if (pool == NULL) { 4429 PF_RULES_RUNLOCK(); 4430 error = EBUSY; 4431 break; 4432 } 4433 TAILQ_FOREACH(pa, &pool->list, entries) 4434 pp->nr++; 4435 PF_RULES_RUNLOCK(); 4436 break; 4437 } 4438 4439 case DIOCGETADDR: { 4440 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr; 4441 struct pf_kpool *pool; 4442 struct pf_kpooladdr *pa; 4443 u_int32_t nr = 0; 4444 4445 pp->anchor[sizeof(pp->anchor) - 1] = 0; 4446 4447 PF_RULES_RLOCK(); 4448 pool = pf_get_kpool(pp->anchor, pp->ticket, pp->r_action, 4449 pp->r_num, 0, 1, 1); 4450 if (pool == NULL) { 4451 PF_RULES_RUNLOCK(); 4452 error = EBUSY; 4453 break; 4454 } 4455 pa = TAILQ_FIRST(&pool->list); 4456 while ((pa != NULL) && (nr < pp->nr)) { 4457 pa = TAILQ_NEXT(pa, entries); 4458 nr++; 4459 } 4460 if (pa == NULL) { 4461 PF_RULES_RUNLOCK(); 4462 error = EBUSY; 4463 break; 4464 } 4465 pf_kpooladdr_to_pooladdr(pa, &pp->addr); 4466 pf_addr_copyout(&pp->addr.addr); 4467 PF_RULES_RUNLOCK(); 4468 break; 4469 } 4470 4471 case DIOCCHANGEADDR: { 4472 struct pfioc_pooladdr *pca = (struct pfioc_pooladdr *)addr; 4473 struct pf_kpool *pool; 4474 struct pf_kpooladdr *oldpa = NULL, *newpa = NULL; 4475 struct pf_kruleset *ruleset; 4476 struct pfi_kkif *kif = NULL; 4477 4478 pca->anchor[sizeof(pca->anchor) - 1] = 0; 4479 4480 if (pca->action < PF_CHANGE_ADD_HEAD || 4481 pca->action > PF_CHANGE_REMOVE) { 4482 error = EINVAL; 4483 break; 4484 } 4485 if (pca->addr.addr.type != PF_ADDR_ADDRMASK && 4486 pca->addr.addr.type != PF_ADDR_DYNIFTL && 4487 pca->addr.addr.type != PF_ADDR_TABLE) { 4488 error = EINVAL; 4489 break; 4490 } 4491 if (pca->addr.addr.p.dyn != NULL) { 4492 error = EINVAL; 4493 break; 4494 } 4495 4496 if (pca->action != PF_CHANGE_REMOVE) { 4497 #ifndef INET 4498 if (pca->af == AF_INET) { 4499 error = EAFNOSUPPORT; 4500 break; 4501 } 4502 #endif /* INET */ 4503 #ifndef INET6 4504 if (pca->af == AF_INET6) { 4505 error = EAFNOSUPPORT; 4506 break; 4507 } 4508 #endif /* INET6 */ 4509 newpa = malloc(sizeof(*newpa), M_PFRULE, M_WAITOK); 4510 bcopy(&pca->addr, newpa, sizeof(struct pf_pooladdr)); 4511 if (newpa->ifname[0]) 4512 kif = pf_kkif_create(M_WAITOK); 4513 newpa->kif = NULL; 4514 } 4515 #define ERROUT(x) ERROUT_IOCTL(DIOCCHANGEADDR_error, x) 4516 PF_RULES_WLOCK(); 4517 ruleset = pf_find_kruleset(pca->anchor); 4518 if (ruleset == NULL) 4519 ERROUT(EBUSY); 4520 4521 pool = pf_get_kpool(pca->anchor, pca->ticket, pca->r_action, 4522 pca->r_num, pca->r_last, 1, 1); 4523 if (pool == NULL) 4524 ERROUT(EBUSY); 4525 4526 if (pca->action != PF_CHANGE_REMOVE) { 4527 if (newpa->ifname[0]) { 4528 newpa->kif = pfi_kkif_attach(kif, newpa->ifname); 4529 pfi_kkif_ref(newpa->kif); 4530 kif = NULL; 4531 } 4532 4533 switch (newpa->addr.type) { 4534 case PF_ADDR_DYNIFTL: 4535 error = pfi_dynaddr_setup(&newpa->addr, 4536 pca->af); 4537 break; 4538 case PF_ADDR_TABLE: 4539 newpa->addr.p.tbl = pfr_attach_table(ruleset, 4540 newpa->addr.v.tblname); 4541 if (newpa->addr.p.tbl == NULL) 4542 error = ENOMEM; 4543 break; 4544 } 4545 if (error) 4546 goto DIOCCHANGEADDR_error; 4547 } 4548 4549 switch (pca->action) { 4550 case PF_CHANGE_ADD_HEAD: 4551 oldpa = TAILQ_FIRST(&pool->list); 4552 break; 4553 case PF_CHANGE_ADD_TAIL: 4554 oldpa = TAILQ_LAST(&pool->list, pf_kpalist); 4555 break; 4556 default: 4557 oldpa = TAILQ_FIRST(&pool->list); 4558 for (int i = 0; oldpa && i < pca->nr; i++) 4559 oldpa = TAILQ_NEXT(oldpa, entries); 4560 4561 if (oldpa == NULL) 4562 ERROUT(EINVAL); 4563 } 4564 4565 if (pca->action == PF_CHANGE_REMOVE) { 4566 TAILQ_REMOVE(&pool->list, oldpa, entries); 4567 switch (oldpa->addr.type) { 4568 case PF_ADDR_DYNIFTL: 4569 pfi_dynaddr_remove(oldpa->addr.p.dyn); 4570 break; 4571 case PF_ADDR_TABLE: 4572 pfr_detach_table(oldpa->addr.p.tbl); 4573 break; 4574 } 4575 if (oldpa->kif) 4576 pfi_kkif_unref(oldpa->kif); 4577 free(oldpa, M_PFRULE); 4578 } else { 4579 if (oldpa == NULL) 4580 TAILQ_INSERT_TAIL(&pool->list, newpa, entries); 4581 else if (pca->action == PF_CHANGE_ADD_HEAD || 4582 pca->action == PF_CHANGE_ADD_BEFORE) 4583 TAILQ_INSERT_BEFORE(oldpa, newpa, entries); 4584 else 4585 TAILQ_INSERT_AFTER(&pool->list, oldpa, 4586 newpa, entries); 4587 } 4588 4589 pool->cur = TAILQ_FIRST(&pool->list); 4590 PF_ACPY(&pool->counter, &pool->cur->addr.v.a.addr, pca->af); 4591 PF_RULES_WUNLOCK(); 4592 break; 4593 4594 #undef ERROUT 4595 DIOCCHANGEADDR_error: 4596 if (newpa != NULL) { 4597 if (newpa->kif) 4598 pfi_kkif_unref(newpa->kif); 4599 free(newpa, M_PFRULE); 4600 } 4601 PF_RULES_WUNLOCK(); 4602 pf_kkif_free(kif); 4603 break; 4604 } 4605 4606 case DIOCGETRULESETS: { 4607 struct pfioc_ruleset *pr = (struct pfioc_ruleset *)addr; 4608 struct pf_kruleset *ruleset; 4609 struct pf_kanchor *anchor; 4610 4611 pr->path[sizeof(pr->path) - 1] = 0; 4612 4613 PF_RULES_RLOCK(); 4614 if ((ruleset = pf_find_kruleset(pr->path)) == NULL) { 4615 PF_RULES_RUNLOCK(); 4616 error = ENOENT; 4617 break; 4618 } 4619 pr->nr = 0; 4620 if (ruleset->anchor == NULL) { 4621 /* XXX kludge for pf_main_ruleset */ 4622 RB_FOREACH(anchor, pf_kanchor_global, &V_pf_anchors) 4623 if (anchor->parent == NULL) 4624 pr->nr++; 4625 } else { 4626 RB_FOREACH(anchor, pf_kanchor_node, 4627 &ruleset->anchor->children) 4628 pr->nr++; 4629 } 4630 PF_RULES_RUNLOCK(); 4631 break; 4632 } 4633 4634 case DIOCGETRULESET: { 4635 struct pfioc_ruleset *pr = (struct pfioc_ruleset *)addr; 4636 struct pf_kruleset *ruleset; 4637 struct pf_kanchor *anchor; 4638 u_int32_t nr = 0; 4639 4640 pr->path[sizeof(pr->path) - 1] = 0; 4641 4642 PF_RULES_RLOCK(); 4643 if ((ruleset = pf_find_kruleset(pr->path)) == NULL) { 4644 PF_RULES_RUNLOCK(); 4645 error = ENOENT; 4646 break; 4647 } 4648 pr->name[0] = 0; 4649 if (ruleset->anchor == NULL) { 4650 /* XXX kludge for pf_main_ruleset */ 4651 RB_FOREACH(anchor, pf_kanchor_global, &V_pf_anchors) 4652 if (anchor->parent == NULL && nr++ == pr->nr) { 4653 strlcpy(pr->name, anchor->name, 4654 sizeof(pr->name)); 4655 break; 4656 } 4657 } else { 4658 RB_FOREACH(anchor, pf_kanchor_node, 4659 &ruleset->anchor->children) 4660 if (nr++ == pr->nr) { 4661 strlcpy(pr->name, anchor->name, 4662 sizeof(pr->name)); 4663 break; 4664 } 4665 } 4666 if (!pr->name[0]) 4667 error = EBUSY; 4668 PF_RULES_RUNLOCK(); 4669 break; 4670 } 4671 4672 case DIOCRCLRTABLES: { 4673 struct pfioc_table *io = (struct pfioc_table *)addr; 4674 4675 if (io->pfrio_esize != 0) { 4676 error = ENODEV; 4677 break; 4678 } 4679 PF_RULES_WLOCK(); 4680 error = pfr_clr_tables(&io->pfrio_table, &io->pfrio_ndel, 4681 io->pfrio_flags | PFR_FLAG_USERIOCTL); 4682 PF_RULES_WUNLOCK(); 4683 break; 4684 } 4685 4686 case DIOCRADDTABLES: { 4687 struct pfioc_table *io = (struct pfioc_table *)addr; 4688 struct pfr_table *pfrts; 4689 size_t totlen; 4690 4691 if (io->pfrio_esize != sizeof(struct pfr_table)) { 4692 error = ENODEV; 4693 break; 4694 } 4695 4696 if (io->pfrio_size < 0 || io->pfrio_size > pf_ioctl_maxcount || 4697 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_table))) { 4698 error = ENOMEM; 4699 break; 4700 } 4701 4702 totlen = io->pfrio_size * sizeof(struct pfr_table); 4703 pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table), 4704 M_TEMP, M_WAITOK); 4705 error = copyin(io->pfrio_buffer, pfrts, totlen); 4706 if (error) { 4707 free(pfrts, M_TEMP); 4708 break; 4709 } 4710 PF_RULES_WLOCK(); 4711 error = pfr_add_tables(pfrts, io->pfrio_size, 4712 &io->pfrio_nadd, io->pfrio_flags | PFR_FLAG_USERIOCTL); 4713 PF_RULES_WUNLOCK(); 4714 free(pfrts, M_TEMP); 4715 break; 4716 } 4717 4718 case DIOCRDELTABLES: { 4719 struct pfioc_table *io = (struct pfioc_table *)addr; 4720 struct pfr_table *pfrts; 4721 size_t totlen; 4722 4723 if (io->pfrio_esize != sizeof(struct pfr_table)) { 4724 error = ENODEV; 4725 break; 4726 } 4727 4728 if (io->pfrio_size < 0 || io->pfrio_size > pf_ioctl_maxcount || 4729 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_table))) { 4730 error = ENOMEM; 4731 break; 4732 } 4733 4734 totlen = io->pfrio_size * sizeof(struct pfr_table); 4735 pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table), 4736 M_TEMP, M_WAITOK); 4737 error = copyin(io->pfrio_buffer, pfrts, totlen); 4738 if (error) { 4739 free(pfrts, M_TEMP); 4740 break; 4741 } 4742 PF_RULES_WLOCK(); 4743 error = pfr_del_tables(pfrts, io->pfrio_size, 4744 &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL); 4745 PF_RULES_WUNLOCK(); 4746 free(pfrts, M_TEMP); 4747 break; 4748 } 4749 4750 case DIOCRGETTABLES: { 4751 struct pfioc_table *io = (struct pfioc_table *)addr; 4752 struct pfr_table *pfrts; 4753 size_t totlen; 4754 int n; 4755 4756 if (io->pfrio_esize != sizeof(struct pfr_table)) { 4757 error = ENODEV; 4758 break; 4759 } 4760 PF_RULES_RLOCK(); 4761 n = pfr_table_count(&io->pfrio_table, io->pfrio_flags); 4762 if (n < 0) { 4763 PF_RULES_RUNLOCK(); 4764 error = EINVAL; 4765 break; 4766 } 4767 io->pfrio_size = min(io->pfrio_size, n); 4768 4769 totlen = io->pfrio_size * sizeof(struct pfr_table); 4770 4771 pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table), 4772 M_TEMP, M_NOWAIT | M_ZERO); 4773 if (pfrts == NULL) { 4774 error = ENOMEM; 4775 PF_RULES_RUNLOCK(); 4776 break; 4777 } 4778 error = pfr_get_tables(&io->pfrio_table, pfrts, 4779 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); 4780 PF_RULES_RUNLOCK(); 4781 if (error == 0) 4782 error = copyout(pfrts, io->pfrio_buffer, totlen); 4783 free(pfrts, M_TEMP); 4784 break; 4785 } 4786 4787 case DIOCRGETTSTATS: { 4788 struct pfioc_table *io = (struct pfioc_table *)addr; 4789 struct pfr_tstats *pfrtstats; 4790 size_t totlen; 4791 int n; 4792 4793 if (io->pfrio_esize != sizeof(struct pfr_tstats)) { 4794 error = ENODEV; 4795 break; 4796 } 4797 PF_TABLE_STATS_LOCK(); 4798 PF_RULES_RLOCK(); 4799 n = pfr_table_count(&io->pfrio_table, io->pfrio_flags); 4800 if (n < 0) { 4801 PF_RULES_RUNLOCK(); 4802 PF_TABLE_STATS_UNLOCK(); 4803 error = EINVAL; 4804 break; 4805 } 4806 io->pfrio_size = min(io->pfrio_size, n); 4807 4808 totlen = io->pfrio_size * sizeof(struct pfr_tstats); 4809 pfrtstats = mallocarray(io->pfrio_size, 4810 sizeof(struct pfr_tstats), M_TEMP, M_NOWAIT | M_ZERO); 4811 if (pfrtstats == NULL) { 4812 error = ENOMEM; 4813 PF_RULES_RUNLOCK(); 4814 PF_TABLE_STATS_UNLOCK(); 4815 break; 4816 } 4817 error = pfr_get_tstats(&io->pfrio_table, pfrtstats, 4818 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); 4819 PF_RULES_RUNLOCK(); 4820 PF_TABLE_STATS_UNLOCK(); 4821 if (error == 0) 4822 error = copyout(pfrtstats, io->pfrio_buffer, totlen); 4823 free(pfrtstats, M_TEMP); 4824 break; 4825 } 4826 4827 case DIOCRCLRTSTATS: { 4828 struct pfioc_table *io = (struct pfioc_table *)addr; 4829 struct pfr_table *pfrts; 4830 size_t totlen; 4831 4832 if (io->pfrio_esize != sizeof(struct pfr_table)) { 4833 error = ENODEV; 4834 break; 4835 } 4836 4837 if (io->pfrio_size < 0 || io->pfrio_size > pf_ioctl_maxcount || 4838 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_table))) { 4839 /* We used to count tables and use the minimum required 4840 * size, so we didn't fail on overly large requests. 4841 * Keep doing so. */ 4842 io->pfrio_size = pf_ioctl_maxcount; 4843 break; 4844 } 4845 4846 totlen = io->pfrio_size * sizeof(struct pfr_table); 4847 pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table), 4848 M_TEMP, M_WAITOK); 4849 error = copyin(io->pfrio_buffer, pfrts, totlen); 4850 if (error) { 4851 free(pfrts, M_TEMP); 4852 break; 4853 } 4854 4855 PF_TABLE_STATS_LOCK(); 4856 PF_RULES_RLOCK(); 4857 error = pfr_clr_tstats(pfrts, io->pfrio_size, 4858 &io->pfrio_nzero, io->pfrio_flags | PFR_FLAG_USERIOCTL); 4859 PF_RULES_RUNLOCK(); 4860 PF_TABLE_STATS_UNLOCK(); 4861 free(pfrts, M_TEMP); 4862 break; 4863 } 4864 4865 case DIOCRSETTFLAGS: { 4866 struct pfioc_table *io = (struct pfioc_table *)addr; 4867 struct pfr_table *pfrts; 4868 size_t totlen; 4869 int n; 4870 4871 if (io->pfrio_esize != sizeof(struct pfr_table)) { 4872 error = ENODEV; 4873 break; 4874 } 4875 4876 PF_RULES_RLOCK(); 4877 n = pfr_table_count(&io->pfrio_table, io->pfrio_flags); 4878 if (n < 0) { 4879 PF_RULES_RUNLOCK(); 4880 error = EINVAL; 4881 break; 4882 } 4883 4884 io->pfrio_size = min(io->pfrio_size, n); 4885 PF_RULES_RUNLOCK(); 4886 4887 totlen = io->pfrio_size * sizeof(struct pfr_table); 4888 pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table), 4889 M_TEMP, M_WAITOK); 4890 error = copyin(io->pfrio_buffer, pfrts, totlen); 4891 if (error) { 4892 free(pfrts, M_TEMP); 4893 break; 4894 } 4895 PF_RULES_WLOCK(); 4896 error = pfr_set_tflags(pfrts, io->pfrio_size, 4897 io->pfrio_setflag, io->pfrio_clrflag, &io->pfrio_nchange, 4898 &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL); 4899 PF_RULES_WUNLOCK(); 4900 free(pfrts, M_TEMP); 4901 break; 4902 } 4903 4904 case DIOCRCLRADDRS: { 4905 struct pfioc_table *io = (struct pfioc_table *)addr; 4906 4907 if (io->pfrio_esize != 0) { 4908 error = ENODEV; 4909 break; 4910 } 4911 PF_RULES_WLOCK(); 4912 error = pfr_clr_addrs(&io->pfrio_table, &io->pfrio_ndel, 4913 io->pfrio_flags | PFR_FLAG_USERIOCTL); 4914 PF_RULES_WUNLOCK(); 4915 break; 4916 } 4917 4918 case DIOCRADDADDRS: { 4919 struct pfioc_table *io = (struct pfioc_table *)addr; 4920 struct pfr_addr *pfras; 4921 size_t totlen; 4922 4923 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 4924 error = ENODEV; 4925 break; 4926 } 4927 if (io->pfrio_size < 0 || 4928 io->pfrio_size > pf_ioctl_maxcount || 4929 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) { 4930 error = EINVAL; 4931 break; 4932 } 4933 totlen = io->pfrio_size * sizeof(struct pfr_addr); 4934 pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr), 4935 M_TEMP, M_WAITOK); 4936 error = copyin(io->pfrio_buffer, pfras, totlen); 4937 if (error) { 4938 free(pfras, M_TEMP); 4939 break; 4940 } 4941 PF_RULES_WLOCK(); 4942 error = pfr_add_addrs(&io->pfrio_table, pfras, 4943 io->pfrio_size, &io->pfrio_nadd, io->pfrio_flags | 4944 PFR_FLAG_USERIOCTL); 4945 PF_RULES_WUNLOCK(); 4946 if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK) 4947 error = copyout(pfras, io->pfrio_buffer, totlen); 4948 free(pfras, M_TEMP); 4949 break; 4950 } 4951 4952 case DIOCRDELADDRS: { 4953 struct pfioc_table *io = (struct pfioc_table *)addr; 4954 struct pfr_addr *pfras; 4955 size_t totlen; 4956 4957 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 4958 error = ENODEV; 4959 break; 4960 } 4961 if (io->pfrio_size < 0 || 4962 io->pfrio_size > pf_ioctl_maxcount || 4963 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) { 4964 error = EINVAL; 4965 break; 4966 } 4967 totlen = io->pfrio_size * sizeof(struct pfr_addr); 4968 pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr), 4969 M_TEMP, M_WAITOK); 4970 error = copyin(io->pfrio_buffer, pfras, totlen); 4971 if (error) { 4972 free(pfras, M_TEMP); 4973 break; 4974 } 4975 PF_RULES_WLOCK(); 4976 error = pfr_del_addrs(&io->pfrio_table, pfras, 4977 io->pfrio_size, &io->pfrio_ndel, io->pfrio_flags | 4978 PFR_FLAG_USERIOCTL); 4979 PF_RULES_WUNLOCK(); 4980 if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK) 4981 error = copyout(pfras, io->pfrio_buffer, totlen); 4982 free(pfras, M_TEMP); 4983 break; 4984 } 4985 4986 case DIOCRSETADDRS: { 4987 struct pfioc_table *io = (struct pfioc_table *)addr; 4988 struct pfr_addr *pfras; 4989 size_t totlen, count; 4990 4991 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 4992 error = ENODEV; 4993 break; 4994 } 4995 if (io->pfrio_size < 0 || io->pfrio_size2 < 0) { 4996 error = EINVAL; 4997 break; 4998 } 4999 count = max(io->pfrio_size, io->pfrio_size2); 5000 if (count > pf_ioctl_maxcount || 5001 WOULD_OVERFLOW(count, sizeof(struct pfr_addr))) { 5002 error = EINVAL; 5003 break; 5004 } 5005 totlen = count * sizeof(struct pfr_addr); 5006 pfras = mallocarray(count, sizeof(struct pfr_addr), M_TEMP, 5007 M_WAITOK); 5008 error = copyin(io->pfrio_buffer, pfras, totlen); 5009 if (error) { 5010 free(pfras, M_TEMP); 5011 break; 5012 } 5013 PF_RULES_WLOCK(); 5014 error = pfr_set_addrs(&io->pfrio_table, pfras, 5015 io->pfrio_size, &io->pfrio_size2, &io->pfrio_nadd, 5016 &io->pfrio_ndel, &io->pfrio_nchange, io->pfrio_flags | 5017 PFR_FLAG_USERIOCTL, 0); 5018 PF_RULES_WUNLOCK(); 5019 if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK) 5020 error = copyout(pfras, io->pfrio_buffer, totlen); 5021 free(pfras, M_TEMP); 5022 break; 5023 } 5024 5025 case DIOCRGETADDRS: { 5026 struct pfioc_table *io = (struct pfioc_table *)addr; 5027 struct pfr_addr *pfras; 5028 size_t totlen; 5029 5030 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 5031 error = ENODEV; 5032 break; 5033 } 5034 if (io->pfrio_size < 0 || 5035 io->pfrio_size > pf_ioctl_maxcount || 5036 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) { 5037 error = EINVAL; 5038 break; 5039 } 5040 totlen = io->pfrio_size * sizeof(struct pfr_addr); 5041 pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr), 5042 M_TEMP, M_WAITOK | M_ZERO); 5043 PF_RULES_RLOCK(); 5044 error = pfr_get_addrs(&io->pfrio_table, pfras, 5045 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); 5046 PF_RULES_RUNLOCK(); 5047 if (error == 0) 5048 error = copyout(pfras, io->pfrio_buffer, totlen); 5049 free(pfras, M_TEMP); 5050 break; 5051 } 5052 5053 case DIOCRGETASTATS: { 5054 struct pfioc_table *io = (struct pfioc_table *)addr; 5055 struct pfr_astats *pfrastats; 5056 size_t totlen; 5057 5058 if (io->pfrio_esize != sizeof(struct pfr_astats)) { 5059 error = ENODEV; 5060 break; 5061 } 5062 if (io->pfrio_size < 0 || 5063 io->pfrio_size > pf_ioctl_maxcount || 5064 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_astats))) { 5065 error = EINVAL; 5066 break; 5067 } 5068 totlen = io->pfrio_size * sizeof(struct pfr_astats); 5069 pfrastats = mallocarray(io->pfrio_size, 5070 sizeof(struct pfr_astats), M_TEMP, M_WAITOK | M_ZERO); 5071 PF_RULES_RLOCK(); 5072 error = pfr_get_astats(&io->pfrio_table, pfrastats, 5073 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); 5074 PF_RULES_RUNLOCK(); 5075 if (error == 0) 5076 error = copyout(pfrastats, io->pfrio_buffer, totlen); 5077 free(pfrastats, M_TEMP); 5078 break; 5079 } 5080 5081 case DIOCRCLRASTATS: { 5082 struct pfioc_table *io = (struct pfioc_table *)addr; 5083 struct pfr_addr *pfras; 5084 size_t totlen; 5085 5086 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 5087 error = ENODEV; 5088 break; 5089 } 5090 if (io->pfrio_size < 0 || 5091 io->pfrio_size > pf_ioctl_maxcount || 5092 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) { 5093 error = EINVAL; 5094 break; 5095 } 5096 totlen = io->pfrio_size * sizeof(struct pfr_addr); 5097 pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr), 5098 M_TEMP, M_WAITOK); 5099 error = copyin(io->pfrio_buffer, pfras, totlen); 5100 if (error) { 5101 free(pfras, M_TEMP); 5102 break; 5103 } 5104 PF_RULES_WLOCK(); 5105 error = pfr_clr_astats(&io->pfrio_table, pfras, 5106 io->pfrio_size, &io->pfrio_nzero, io->pfrio_flags | 5107 PFR_FLAG_USERIOCTL); 5108 PF_RULES_WUNLOCK(); 5109 if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK) 5110 error = copyout(pfras, io->pfrio_buffer, totlen); 5111 free(pfras, M_TEMP); 5112 break; 5113 } 5114 5115 case DIOCRTSTADDRS: { 5116 struct pfioc_table *io = (struct pfioc_table *)addr; 5117 struct pfr_addr *pfras; 5118 size_t totlen; 5119 5120 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 5121 error = ENODEV; 5122 break; 5123 } 5124 if (io->pfrio_size < 0 || 5125 io->pfrio_size > pf_ioctl_maxcount || 5126 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) { 5127 error = EINVAL; 5128 break; 5129 } 5130 totlen = io->pfrio_size * sizeof(struct pfr_addr); 5131 pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr), 5132 M_TEMP, M_WAITOK); 5133 error = copyin(io->pfrio_buffer, pfras, totlen); 5134 if (error) { 5135 free(pfras, M_TEMP); 5136 break; 5137 } 5138 PF_RULES_RLOCK(); 5139 error = pfr_tst_addrs(&io->pfrio_table, pfras, 5140 io->pfrio_size, &io->pfrio_nmatch, io->pfrio_flags | 5141 PFR_FLAG_USERIOCTL); 5142 PF_RULES_RUNLOCK(); 5143 if (error == 0) 5144 error = copyout(pfras, io->pfrio_buffer, totlen); 5145 free(pfras, M_TEMP); 5146 break; 5147 } 5148 5149 case DIOCRINADEFINE: { 5150 struct pfioc_table *io = (struct pfioc_table *)addr; 5151 struct pfr_addr *pfras; 5152 size_t totlen; 5153 5154 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 5155 error = ENODEV; 5156 break; 5157 } 5158 if (io->pfrio_size < 0 || 5159 io->pfrio_size > pf_ioctl_maxcount || 5160 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) { 5161 error = EINVAL; 5162 break; 5163 } 5164 totlen = io->pfrio_size * sizeof(struct pfr_addr); 5165 pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr), 5166 M_TEMP, M_WAITOK); 5167 error = copyin(io->pfrio_buffer, pfras, totlen); 5168 if (error) { 5169 free(pfras, M_TEMP); 5170 break; 5171 } 5172 PF_RULES_WLOCK(); 5173 error = pfr_ina_define(&io->pfrio_table, pfras, 5174 io->pfrio_size, &io->pfrio_nadd, &io->pfrio_naddr, 5175 io->pfrio_ticket, io->pfrio_flags | PFR_FLAG_USERIOCTL); 5176 PF_RULES_WUNLOCK(); 5177 free(pfras, M_TEMP); 5178 break; 5179 } 5180 5181 case DIOCOSFPADD: { 5182 struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr; 5183 PF_RULES_WLOCK(); 5184 error = pf_osfp_add(io); 5185 PF_RULES_WUNLOCK(); 5186 break; 5187 } 5188 5189 case DIOCOSFPGET: { 5190 struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr; 5191 PF_RULES_RLOCK(); 5192 error = pf_osfp_get(io); 5193 PF_RULES_RUNLOCK(); 5194 break; 5195 } 5196 5197 case DIOCXBEGIN: { 5198 struct pfioc_trans *io = (struct pfioc_trans *)addr; 5199 struct pfioc_trans_e *ioes, *ioe; 5200 size_t totlen; 5201 int i; 5202 5203 if (io->esize != sizeof(*ioe)) { 5204 error = ENODEV; 5205 break; 5206 } 5207 if (io->size < 0 || 5208 io->size > pf_ioctl_maxcount || 5209 WOULD_OVERFLOW(io->size, sizeof(struct pfioc_trans_e))) { 5210 error = EINVAL; 5211 break; 5212 } 5213 totlen = sizeof(struct pfioc_trans_e) * io->size; 5214 ioes = mallocarray(io->size, sizeof(struct pfioc_trans_e), 5215 M_TEMP, M_WAITOK); 5216 error = copyin(io->array, ioes, totlen); 5217 if (error) { 5218 free(ioes, M_TEMP); 5219 break; 5220 } 5221 /* Ensure there's no more ethernet rules to clean up. */ 5222 NET_EPOCH_DRAIN_CALLBACKS(); 5223 PF_RULES_WLOCK(); 5224 for (i = 0, ioe = ioes; i < io->size; i++, ioe++) { 5225 ioe->anchor[sizeof(ioe->anchor) - 1] = '\0'; 5226 switch (ioe->rs_num) { 5227 case PF_RULESET_ETH: 5228 if ((error = pf_begin_eth(&ioe->ticket, ioe->anchor))) { 5229 PF_RULES_WUNLOCK(); 5230 free(ioes, M_TEMP); 5231 goto fail; 5232 } 5233 break; 5234 #ifdef ALTQ 5235 case PF_RULESET_ALTQ: 5236 if (ioe->anchor[0]) { 5237 PF_RULES_WUNLOCK(); 5238 free(ioes, M_TEMP); 5239 error = EINVAL; 5240 goto fail; 5241 } 5242 if ((error = pf_begin_altq(&ioe->ticket))) { 5243 PF_RULES_WUNLOCK(); 5244 free(ioes, M_TEMP); 5245 goto fail; 5246 } 5247 break; 5248 #endif /* ALTQ */ 5249 case PF_RULESET_TABLE: 5250 { 5251 struct pfr_table table; 5252 5253 bzero(&table, sizeof(table)); 5254 strlcpy(table.pfrt_anchor, ioe->anchor, 5255 sizeof(table.pfrt_anchor)); 5256 if ((error = pfr_ina_begin(&table, 5257 &ioe->ticket, NULL, 0))) { 5258 PF_RULES_WUNLOCK(); 5259 free(ioes, M_TEMP); 5260 goto fail; 5261 } 5262 break; 5263 } 5264 default: 5265 if ((error = pf_begin_rules(&ioe->ticket, 5266 ioe->rs_num, ioe->anchor))) { 5267 PF_RULES_WUNLOCK(); 5268 free(ioes, M_TEMP); 5269 goto fail; 5270 } 5271 break; 5272 } 5273 } 5274 PF_RULES_WUNLOCK(); 5275 error = copyout(ioes, io->array, totlen); 5276 free(ioes, M_TEMP); 5277 break; 5278 } 5279 5280 case DIOCXROLLBACK: { 5281 struct pfioc_trans *io = (struct pfioc_trans *)addr; 5282 struct pfioc_trans_e *ioe, *ioes; 5283 size_t totlen; 5284 int i; 5285 5286 if (io->esize != sizeof(*ioe)) { 5287 error = ENODEV; 5288 break; 5289 } 5290 if (io->size < 0 || 5291 io->size > pf_ioctl_maxcount || 5292 WOULD_OVERFLOW(io->size, sizeof(struct pfioc_trans_e))) { 5293 error = EINVAL; 5294 break; 5295 } 5296 totlen = sizeof(struct pfioc_trans_e) * io->size; 5297 ioes = mallocarray(io->size, sizeof(struct pfioc_trans_e), 5298 M_TEMP, M_WAITOK); 5299 error = copyin(io->array, ioes, totlen); 5300 if (error) { 5301 free(ioes, M_TEMP); 5302 break; 5303 } 5304 PF_RULES_WLOCK(); 5305 for (i = 0, ioe = ioes; i < io->size; i++, ioe++) { 5306 ioe->anchor[sizeof(ioe->anchor) - 1] = '\0'; 5307 switch (ioe->rs_num) { 5308 case PF_RULESET_ETH: 5309 if ((error = pf_rollback_eth(ioe->ticket, 5310 ioe->anchor))) { 5311 PF_RULES_WUNLOCK(); 5312 free(ioes, M_TEMP); 5313 goto fail; /* really bad */ 5314 } 5315 break; 5316 #ifdef ALTQ 5317 case PF_RULESET_ALTQ: 5318 if (ioe->anchor[0]) { 5319 PF_RULES_WUNLOCK(); 5320 free(ioes, M_TEMP); 5321 error = EINVAL; 5322 goto fail; 5323 } 5324 if ((error = pf_rollback_altq(ioe->ticket))) { 5325 PF_RULES_WUNLOCK(); 5326 free(ioes, M_TEMP); 5327 goto fail; /* really bad */ 5328 } 5329 break; 5330 #endif /* ALTQ */ 5331 case PF_RULESET_TABLE: 5332 { 5333 struct pfr_table table; 5334 5335 bzero(&table, sizeof(table)); 5336 strlcpy(table.pfrt_anchor, ioe->anchor, 5337 sizeof(table.pfrt_anchor)); 5338 if ((error = pfr_ina_rollback(&table, 5339 ioe->ticket, NULL, 0))) { 5340 PF_RULES_WUNLOCK(); 5341 free(ioes, M_TEMP); 5342 goto fail; /* really bad */ 5343 } 5344 break; 5345 } 5346 default: 5347 if ((error = pf_rollback_rules(ioe->ticket, 5348 ioe->rs_num, ioe->anchor))) { 5349 PF_RULES_WUNLOCK(); 5350 free(ioes, M_TEMP); 5351 goto fail; /* really bad */ 5352 } 5353 break; 5354 } 5355 } 5356 PF_RULES_WUNLOCK(); 5357 free(ioes, M_TEMP); 5358 break; 5359 } 5360 5361 case DIOCXCOMMIT: { 5362 struct pfioc_trans *io = (struct pfioc_trans *)addr; 5363 struct pfioc_trans_e *ioe, *ioes; 5364 struct pf_kruleset *rs; 5365 struct pf_keth_ruleset *ers; 5366 size_t totlen; 5367 int i; 5368 5369 if (io->esize != sizeof(*ioe)) { 5370 error = ENODEV; 5371 break; 5372 } 5373 5374 if (io->size < 0 || 5375 io->size > pf_ioctl_maxcount || 5376 WOULD_OVERFLOW(io->size, sizeof(struct pfioc_trans_e))) { 5377 error = EINVAL; 5378 break; 5379 } 5380 5381 totlen = sizeof(struct pfioc_trans_e) * io->size; 5382 ioes = mallocarray(io->size, sizeof(struct pfioc_trans_e), 5383 M_TEMP, M_WAITOK); 5384 error = copyin(io->array, ioes, totlen); 5385 if (error) { 5386 free(ioes, M_TEMP); 5387 break; 5388 } 5389 PF_RULES_WLOCK(); 5390 /* First makes sure everything will succeed. */ 5391 for (i = 0, ioe = ioes; i < io->size; i++, ioe++) { 5392 ioe->anchor[sizeof(ioe->anchor) - 1] = 0; 5393 switch (ioe->rs_num) { 5394 case PF_RULESET_ETH: 5395 ers = pf_find_keth_ruleset(ioe->anchor); 5396 if (ers == NULL || ioe->ticket == 0 || 5397 ioe->ticket != ers->inactive.ticket) { 5398 PF_RULES_WUNLOCK(); 5399 free(ioes, M_TEMP); 5400 error = EINVAL; 5401 goto fail; 5402 } 5403 break; 5404 #ifdef ALTQ 5405 case PF_RULESET_ALTQ: 5406 if (ioe->anchor[0]) { 5407 PF_RULES_WUNLOCK(); 5408 free(ioes, M_TEMP); 5409 error = EINVAL; 5410 goto fail; 5411 } 5412 if (!V_altqs_inactive_open || ioe->ticket != 5413 V_ticket_altqs_inactive) { 5414 PF_RULES_WUNLOCK(); 5415 free(ioes, M_TEMP); 5416 error = EBUSY; 5417 goto fail; 5418 } 5419 break; 5420 #endif /* ALTQ */ 5421 case PF_RULESET_TABLE: 5422 rs = pf_find_kruleset(ioe->anchor); 5423 if (rs == NULL || !rs->topen || ioe->ticket != 5424 rs->tticket) { 5425 PF_RULES_WUNLOCK(); 5426 free(ioes, M_TEMP); 5427 error = EBUSY; 5428 goto fail; 5429 } 5430 break; 5431 default: 5432 if (ioe->rs_num < 0 || ioe->rs_num >= 5433 PF_RULESET_MAX) { 5434 PF_RULES_WUNLOCK(); 5435 free(ioes, M_TEMP); 5436 error = EINVAL; 5437 goto fail; 5438 } 5439 rs = pf_find_kruleset(ioe->anchor); 5440 if (rs == NULL || 5441 !rs->rules[ioe->rs_num].inactive.open || 5442 rs->rules[ioe->rs_num].inactive.ticket != 5443 ioe->ticket) { 5444 PF_RULES_WUNLOCK(); 5445 free(ioes, M_TEMP); 5446 error = EBUSY; 5447 goto fail; 5448 } 5449 break; 5450 } 5451 } 5452 /* Now do the commit - no errors should happen here. */ 5453 for (i = 0, ioe = ioes; i < io->size; i++, ioe++) { 5454 switch (ioe->rs_num) { 5455 case PF_RULESET_ETH: 5456 if ((error = pf_commit_eth(ioe->ticket, ioe->anchor))) { 5457 PF_RULES_WUNLOCK(); 5458 free(ioes, M_TEMP); 5459 goto fail; /* really bad */ 5460 } 5461 break; 5462 #ifdef ALTQ 5463 case PF_RULESET_ALTQ: 5464 if ((error = pf_commit_altq(ioe->ticket))) { 5465 PF_RULES_WUNLOCK(); 5466 free(ioes, M_TEMP); 5467 goto fail; /* really bad */ 5468 } 5469 break; 5470 #endif /* ALTQ */ 5471 case PF_RULESET_TABLE: 5472 { 5473 struct pfr_table table; 5474 5475 bzero(&table, sizeof(table)); 5476 (void)strlcpy(table.pfrt_anchor, ioe->anchor, 5477 sizeof(table.pfrt_anchor)); 5478 if ((error = pfr_ina_commit(&table, 5479 ioe->ticket, NULL, NULL, 0))) { 5480 PF_RULES_WUNLOCK(); 5481 free(ioes, M_TEMP); 5482 goto fail; /* really bad */ 5483 } 5484 break; 5485 } 5486 default: 5487 if ((error = pf_commit_rules(ioe->ticket, 5488 ioe->rs_num, ioe->anchor))) { 5489 PF_RULES_WUNLOCK(); 5490 free(ioes, M_TEMP); 5491 goto fail; /* really bad */ 5492 } 5493 break; 5494 } 5495 } 5496 PF_RULES_WUNLOCK(); 5497 5498 /* Only hook into EtherNet taffic if we've got rules for it. */ 5499 if (! TAILQ_EMPTY(V_pf_keth->active.rules)) 5500 hook_pf_eth(); 5501 else 5502 dehook_pf_eth(); 5503 5504 free(ioes, M_TEMP); 5505 break; 5506 } 5507 5508 case DIOCGETSRCNODES: { 5509 struct pfioc_src_nodes *psn = (struct pfioc_src_nodes *)addr; 5510 struct pf_srchash *sh; 5511 struct pf_ksrc_node *n; 5512 struct pf_src_node *p, *pstore; 5513 uint32_t i, nr = 0; 5514 5515 for (i = 0, sh = V_pf_srchash; i <= pf_srchashmask; 5516 i++, sh++) { 5517 PF_HASHROW_LOCK(sh); 5518 LIST_FOREACH(n, &sh->nodes, entry) 5519 nr++; 5520 PF_HASHROW_UNLOCK(sh); 5521 } 5522 5523 psn->psn_len = min(psn->psn_len, 5524 sizeof(struct pf_src_node) * nr); 5525 5526 if (psn->psn_len == 0) { 5527 psn->psn_len = sizeof(struct pf_src_node) * nr; 5528 break; 5529 } 5530 5531 nr = 0; 5532 5533 p = pstore = malloc(psn->psn_len, M_TEMP, M_WAITOK | M_ZERO); 5534 for (i = 0, sh = V_pf_srchash; i <= pf_srchashmask; 5535 i++, sh++) { 5536 PF_HASHROW_LOCK(sh); 5537 LIST_FOREACH(n, &sh->nodes, entry) { 5538 5539 if ((nr + 1) * sizeof(*p) > (unsigned)psn->psn_len) 5540 break; 5541 5542 pf_src_node_copy(n, p); 5543 5544 p++; 5545 nr++; 5546 } 5547 PF_HASHROW_UNLOCK(sh); 5548 } 5549 error = copyout(pstore, psn->psn_src_nodes, 5550 sizeof(struct pf_src_node) * nr); 5551 if (error) { 5552 free(pstore, M_TEMP); 5553 break; 5554 } 5555 psn->psn_len = sizeof(struct pf_src_node) * nr; 5556 free(pstore, M_TEMP); 5557 break; 5558 } 5559 5560 case DIOCCLRSRCNODES: { 5561 pf_clear_srcnodes(NULL); 5562 pf_purge_expired_src_nodes(); 5563 break; 5564 } 5565 5566 case DIOCKILLSRCNODES: 5567 pf_kill_srcnodes((struct pfioc_src_node_kill *)addr); 5568 break; 5569 5570 #ifdef COMPAT_FREEBSD13 5571 case DIOCKEEPCOUNTERS_FREEBSD13: 5572 #endif 5573 case DIOCKEEPCOUNTERS: 5574 error = pf_keepcounters((struct pfioc_nv *)addr); 5575 break; 5576 5577 case DIOCGETSYNCOOKIES: 5578 error = pf_get_syncookies((struct pfioc_nv *)addr); 5579 break; 5580 5581 case DIOCSETSYNCOOKIES: 5582 error = pf_set_syncookies((struct pfioc_nv *)addr); 5583 break; 5584 5585 case DIOCSETHOSTID: { 5586 u_int32_t *hostid = (u_int32_t *)addr; 5587 5588 PF_RULES_WLOCK(); 5589 if (*hostid == 0) 5590 V_pf_status.hostid = arc4random(); 5591 else 5592 V_pf_status.hostid = *hostid; 5593 PF_RULES_WUNLOCK(); 5594 break; 5595 } 5596 5597 case DIOCOSFPFLUSH: 5598 PF_RULES_WLOCK(); 5599 pf_osfp_flush(); 5600 PF_RULES_WUNLOCK(); 5601 break; 5602 5603 case DIOCIGETIFACES: { 5604 struct pfioc_iface *io = (struct pfioc_iface *)addr; 5605 struct pfi_kif *ifstore; 5606 size_t bufsiz; 5607 5608 if (io->pfiio_esize != sizeof(struct pfi_kif)) { 5609 error = ENODEV; 5610 break; 5611 } 5612 5613 if (io->pfiio_size < 0 || 5614 io->pfiio_size > pf_ioctl_maxcount || 5615 WOULD_OVERFLOW(io->pfiio_size, sizeof(struct pfi_kif))) { 5616 error = EINVAL; 5617 break; 5618 } 5619 5620 io->pfiio_name[sizeof(io->pfiio_name) - 1] = '\0'; 5621 5622 bufsiz = io->pfiio_size * sizeof(struct pfi_kif); 5623 ifstore = mallocarray(io->pfiio_size, sizeof(struct pfi_kif), 5624 M_TEMP, M_WAITOK | M_ZERO); 5625 5626 PF_RULES_RLOCK(); 5627 pfi_get_ifaces(io->pfiio_name, ifstore, &io->pfiio_size); 5628 PF_RULES_RUNLOCK(); 5629 error = copyout(ifstore, io->pfiio_buffer, bufsiz); 5630 free(ifstore, M_TEMP); 5631 break; 5632 } 5633 5634 case DIOCSETIFFLAG: { 5635 struct pfioc_iface *io = (struct pfioc_iface *)addr; 5636 5637 io->pfiio_name[sizeof(io->pfiio_name) - 1] = '\0'; 5638 5639 PF_RULES_WLOCK(); 5640 error = pfi_set_flags(io->pfiio_name, io->pfiio_flags); 5641 PF_RULES_WUNLOCK(); 5642 break; 5643 } 5644 5645 case DIOCCLRIFFLAG: { 5646 struct pfioc_iface *io = (struct pfioc_iface *)addr; 5647 5648 io->pfiio_name[sizeof(io->pfiio_name) - 1] = '\0'; 5649 5650 PF_RULES_WLOCK(); 5651 error = pfi_clear_flags(io->pfiio_name, io->pfiio_flags); 5652 PF_RULES_WUNLOCK(); 5653 break; 5654 } 5655 5656 case DIOCSETREASS: { 5657 u_int32_t *reass = (u_int32_t *)addr; 5658 5659 V_pf_status.reass = *reass & (PF_REASS_ENABLED|PF_REASS_NODF); 5660 /* Removal of DF flag without reassembly enabled is not a 5661 * valid combination. Disable reassembly in such case. */ 5662 if (!(V_pf_status.reass & PF_REASS_ENABLED)) 5663 V_pf_status.reass = 0; 5664 break; 5665 } 5666 5667 default: 5668 error = ENODEV; 5669 break; 5670 } 5671 fail: 5672 if (sx_xlocked(&V_pf_ioctl_lock)) 5673 sx_xunlock(&V_pf_ioctl_lock); 5674 CURVNET_RESTORE(); 5675 5676 #undef ERROUT_IOCTL 5677 5678 return (error); 5679 } 5680 5681 void 5682 pfsync_state_export(union pfsync_state_union *sp, struct pf_kstate *st, int msg_version) 5683 { 5684 bzero(sp, sizeof(union pfsync_state_union)); 5685 5686 /* copy from state key */ 5687 sp->pfs_1301.key[PF_SK_WIRE].addr[0] = st->key[PF_SK_WIRE]->addr[0]; 5688 sp->pfs_1301.key[PF_SK_WIRE].addr[1] = st->key[PF_SK_WIRE]->addr[1]; 5689 sp->pfs_1301.key[PF_SK_WIRE].port[0] = st->key[PF_SK_WIRE]->port[0]; 5690 sp->pfs_1301.key[PF_SK_WIRE].port[1] = st->key[PF_SK_WIRE]->port[1]; 5691 sp->pfs_1301.key[PF_SK_STACK].addr[0] = st->key[PF_SK_STACK]->addr[0]; 5692 sp->pfs_1301.key[PF_SK_STACK].addr[1] = st->key[PF_SK_STACK]->addr[1]; 5693 sp->pfs_1301.key[PF_SK_STACK].port[0] = st->key[PF_SK_STACK]->port[0]; 5694 sp->pfs_1301.key[PF_SK_STACK].port[1] = st->key[PF_SK_STACK]->port[1]; 5695 sp->pfs_1301.proto = st->key[PF_SK_WIRE]->proto; 5696 sp->pfs_1301.af = st->key[PF_SK_WIRE]->af; 5697 5698 /* copy from state */ 5699 strlcpy(sp->pfs_1301.ifname, st->kif->pfik_name, sizeof(sp->pfs_1301.ifname)); 5700 bcopy(&st->rt_addr, &sp->pfs_1301.rt_addr, sizeof(sp->pfs_1301.rt_addr)); 5701 sp->pfs_1301.creation = htonl(time_uptime - st->creation); 5702 sp->pfs_1301.expire = pf_state_expires(st); 5703 if (sp->pfs_1301.expire <= time_uptime) 5704 sp->pfs_1301.expire = htonl(0); 5705 else 5706 sp->pfs_1301.expire = htonl(sp->pfs_1301.expire - time_uptime); 5707 5708 sp->pfs_1301.direction = st->direction; 5709 sp->pfs_1301.log = st->log; 5710 sp->pfs_1301.timeout = st->timeout; 5711 5712 switch (msg_version) { 5713 case PFSYNC_MSG_VERSION_1301: 5714 sp->pfs_1301.state_flags = st->state_flags; 5715 break; 5716 case PFSYNC_MSG_VERSION_1400: 5717 sp->pfs_1400.state_flags = htons(st->state_flags); 5718 sp->pfs_1400.qid = htons(st->qid); 5719 sp->pfs_1400.pqid = htons(st->pqid); 5720 sp->pfs_1400.dnpipe = htons(st->dnpipe); 5721 sp->pfs_1400.dnrpipe = htons(st->dnrpipe); 5722 sp->pfs_1400.rtableid = htonl(st->rtableid); 5723 sp->pfs_1400.min_ttl = st->min_ttl; 5724 sp->pfs_1400.set_tos = st->set_tos; 5725 sp->pfs_1400.max_mss = htons(st->max_mss); 5726 sp->pfs_1400.set_prio[0] = st->set_prio[0]; 5727 sp->pfs_1400.set_prio[1] = st->set_prio[1]; 5728 sp->pfs_1400.rt = st->rt; 5729 if (st->rt_kif) 5730 strlcpy(sp->pfs_1400.rt_ifname, 5731 st->rt_kif->pfik_name, 5732 sizeof(sp->pfs_1400.rt_ifname)); 5733 break; 5734 default: 5735 panic("%s: Unsupported pfsync_msg_version %d", 5736 __func__, msg_version); 5737 } 5738 5739 if (st->src_node) 5740 sp->pfs_1301.sync_flags |= PFSYNC_FLAG_SRCNODE; 5741 if (st->nat_src_node) 5742 sp->pfs_1301.sync_flags |= PFSYNC_FLAG_NATSRCNODE; 5743 5744 sp->pfs_1301.id = st->id; 5745 sp->pfs_1301.creatorid = st->creatorid; 5746 pf_state_peer_hton(&st->src, &sp->pfs_1301.src); 5747 pf_state_peer_hton(&st->dst, &sp->pfs_1301.dst); 5748 5749 if (st->rule.ptr == NULL) 5750 sp->pfs_1301.rule = htonl(-1); 5751 else 5752 sp->pfs_1301.rule = htonl(st->rule.ptr->nr); 5753 if (st->anchor.ptr == NULL) 5754 sp->pfs_1301.anchor = htonl(-1); 5755 else 5756 sp->pfs_1301.anchor = htonl(st->anchor.ptr->nr); 5757 if (st->nat_rule.ptr == NULL) 5758 sp->pfs_1301.nat_rule = htonl(-1); 5759 else 5760 sp->pfs_1301.nat_rule = htonl(st->nat_rule.ptr->nr); 5761 5762 pf_state_counter_hton(st->packets[0], sp->pfs_1301.packets[0]); 5763 pf_state_counter_hton(st->packets[1], sp->pfs_1301.packets[1]); 5764 pf_state_counter_hton(st->bytes[0], sp->pfs_1301.bytes[0]); 5765 pf_state_counter_hton(st->bytes[1], sp->pfs_1301.bytes[1]); 5766 } 5767 5768 void 5769 pf_state_export(struct pf_state_export *sp, struct pf_kstate *st) 5770 { 5771 bzero(sp, sizeof(*sp)); 5772 5773 sp->version = PF_STATE_VERSION; 5774 5775 /* copy from state key */ 5776 sp->key[PF_SK_WIRE].addr[0] = st->key[PF_SK_WIRE]->addr[0]; 5777 sp->key[PF_SK_WIRE].addr[1] = st->key[PF_SK_WIRE]->addr[1]; 5778 sp->key[PF_SK_WIRE].port[0] = st->key[PF_SK_WIRE]->port[0]; 5779 sp->key[PF_SK_WIRE].port[1] = st->key[PF_SK_WIRE]->port[1]; 5780 sp->key[PF_SK_STACK].addr[0] = st->key[PF_SK_STACK]->addr[0]; 5781 sp->key[PF_SK_STACK].addr[1] = st->key[PF_SK_STACK]->addr[1]; 5782 sp->key[PF_SK_STACK].port[0] = st->key[PF_SK_STACK]->port[0]; 5783 sp->key[PF_SK_STACK].port[1] = st->key[PF_SK_STACK]->port[1]; 5784 sp->proto = st->key[PF_SK_WIRE]->proto; 5785 sp->af = st->key[PF_SK_WIRE]->af; 5786 5787 /* copy from state */ 5788 strlcpy(sp->ifname, st->kif->pfik_name, sizeof(sp->ifname)); 5789 strlcpy(sp->orig_ifname, st->orig_kif->pfik_name, 5790 sizeof(sp->orig_ifname)); 5791 bcopy(&st->rt_addr, &sp->rt_addr, sizeof(sp->rt_addr)); 5792 sp->creation = htonl(time_uptime - st->creation); 5793 sp->expire = pf_state_expires(st); 5794 if (sp->expire <= time_uptime) 5795 sp->expire = htonl(0); 5796 else 5797 sp->expire = htonl(sp->expire - time_uptime); 5798 5799 sp->direction = st->direction; 5800 sp->log = st->log; 5801 sp->timeout = st->timeout; 5802 /* 8 bits for the old libpfctl, 16 bits for the new libpfctl */ 5803 sp->state_flags_compat = st->state_flags; 5804 sp->state_flags = htons(st->state_flags); 5805 if (st->src_node) 5806 sp->sync_flags |= PFSYNC_FLAG_SRCNODE; 5807 if (st->nat_src_node) 5808 sp->sync_flags |= PFSYNC_FLAG_NATSRCNODE; 5809 5810 sp->id = st->id; 5811 sp->creatorid = st->creatorid; 5812 pf_state_peer_hton(&st->src, &sp->src); 5813 pf_state_peer_hton(&st->dst, &sp->dst); 5814 5815 if (st->rule.ptr == NULL) 5816 sp->rule = htonl(-1); 5817 else 5818 sp->rule = htonl(st->rule.ptr->nr); 5819 if (st->anchor.ptr == NULL) 5820 sp->anchor = htonl(-1); 5821 else 5822 sp->anchor = htonl(st->anchor.ptr->nr); 5823 if (st->nat_rule.ptr == NULL) 5824 sp->nat_rule = htonl(-1); 5825 else 5826 sp->nat_rule = htonl(st->nat_rule.ptr->nr); 5827 5828 sp->packets[0] = st->packets[0]; 5829 sp->packets[1] = st->packets[1]; 5830 sp->bytes[0] = st->bytes[0]; 5831 sp->bytes[1] = st->bytes[1]; 5832 5833 sp->qid = htons(st->qid); 5834 sp->pqid = htons(st->pqid); 5835 sp->dnpipe = htons(st->dnpipe); 5836 sp->dnrpipe = htons(st->dnrpipe); 5837 sp->rtableid = htonl(st->rtableid); 5838 sp->min_ttl = st->min_ttl; 5839 sp->set_tos = st->set_tos; 5840 sp->max_mss = htons(st->max_mss); 5841 sp->rt = st->rt; 5842 if (st->rt_kif) 5843 strlcpy(sp->rt_ifname, st->rt_kif->pfik_name, 5844 sizeof(sp->rt_ifname)); 5845 sp->set_prio[0] = st->set_prio[0]; 5846 sp->set_prio[1] = st->set_prio[1]; 5847 5848 } 5849 5850 static void 5851 pf_tbladdr_copyout(struct pf_addr_wrap *aw) 5852 { 5853 struct pfr_ktable *kt; 5854 5855 KASSERT(aw->type == PF_ADDR_TABLE, ("%s: type %u", __func__, aw->type)); 5856 5857 kt = aw->p.tbl; 5858 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL) 5859 kt = kt->pfrkt_root; 5860 aw->p.tbl = NULL; 5861 aw->p.tblcnt = (kt->pfrkt_flags & PFR_TFLAG_ACTIVE) ? 5862 kt->pfrkt_cnt : -1; 5863 } 5864 5865 static int 5866 pf_add_status_counters(nvlist_t *nvl, const char *name, counter_u64_t *counters, 5867 size_t number, char **names) 5868 { 5869 nvlist_t *nvc; 5870 5871 nvc = nvlist_create(0); 5872 if (nvc == NULL) 5873 return (ENOMEM); 5874 5875 for (int i = 0; i < number; i++) { 5876 nvlist_append_number_array(nvc, "counters", 5877 counter_u64_fetch(counters[i])); 5878 nvlist_append_string_array(nvc, "names", 5879 names[i]); 5880 nvlist_append_number_array(nvc, "ids", 5881 i); 5882 } 5883 nvlist_add_nvlist(nvl, name, nvc); 5884 nvlist_destroy(nvc); 5885 5886 return (0); 5887 } 5888 5889 static int 5890 pf_getstatus(struct pfioc_nv *nv) 5891 { 5892 nvlist_t *nvl = NULL, *nvc = NULL; 5893 void *nvlpacked = NULL; 5894 int error; 5895 struct pf_status s; 5896 char *pf_reasons[PFRES_MAX+1] = PFRES_NAMES; 5897 char *pf_lcounter[KLCNT_MAX+1] = KLCNT_NAMES; 5898 char *pf_fcounter[FCNT_MAX+1] = FCNT_NAMES; 5899 PF_RULES_RLOCK_TRACKER; 5900 5901 #define ERROUT(x) ERROUT_FUNCTION(errout, x) 5902 5903 PF_RULES_RLOCK(); 5904 5905 nvl = nvlist_create(0); 5906 if (nvl == NULL) 5907 ERROUT(ENOMEM); 5908 5909 nvlist_add_bool(nvl, "running", V_pf_status.running); 5910 nvlist_add_number(nvl, "since", V_pf_status.since); 5911 nvlist_add_number(nvl, "debug", V_pf_status.debug); 5912 nvlist_add_number(nvl, "hostid", V_pf_status.hostid); 5913 nvlist_add_number(nvl, "states", V_pf_status.states); 5914 nvlist_add_number(nvl, "src_nodes", V_pf_status.src_nodes); 5915 nvlist_add_number(nvl, "reass", V_pf_status.reass); 5916 nvlist_add_bool(nvl, "syncookies_active", 5917 V_pf_status.syncookies_active); 5918 5919 /* counters */ 5920 error = pf_add_status_counters(nvl, "counters", V_pf_status.counters, 5921 PFRES_MAX, pf_reasons); 5922 if (error != 0) 5923 ERROUT(error); 5924 5925 /* lcounters */ 5926 error = pf_add_status_counters(nvl, "lcounters", V_pf_status.lcounters, 5927 KLCNT_MAX, pf_lcounter); 5928 if (error != 0) 5929 ERROUT(error); 5930 5931 /* fcounters */ 5932 nvc = nvlist_create(0); 5933 if (nvc == NULL) 5934 ERROUT(ENOMEM); 5935 5936 for (int i = 0; i < FCNT_MAX; i++) { 5937 nvlist_append_number_array(nvc, "counters", 5938 pf_counter_u64_fetch(&V_pf_status.fcounters[i])); 5939 nvlist_append_string_array(nvc, "names", 5940 pf_fcounter[i]); 5941 nvlist_append_number_array(nvc, "ids", 5942 i); 5943 } 5944 nvlist_add_nvlist(nvl, "fcounters", nvc); 5945 nvlist_destroy(nvc); 5946 nvc = NULL; 5947 5948 /* scounters */ 5949 error = pf_add_status_counters(nvl, "scounters", V_pf_status.scounters, 5950 SCNT_MAX, pf_fcounter); 5951 if (error != 0) 5952 ERROUT(error); 5953 5954 nvlist_add_string(nvl, "ifname", V_pf_status.ifname); 5955 nvlist_add_binary(nvl, "chksum", V_pf_status.pf_chksum, 5956 PF_MD5_DIGEST_LENGTH); 5957 5958 pfi_update_status(V_pf_status.ifname, &s); 5959 5960 /* pcounters / bcounters */ 5961 for (int i = 0; i < 2; i++) { 5962 for (int j = 0; j < 2; j++) { 5963 for (int k = 0; k < 2; k++) { 5964 nvlist_append_number_array(nvl, "pcounters", 5965 s.pcounters[i][j][k]); 5966 } 5967 nvlist_append_number_array(nvl, "bcounters", 5968 s.bcounters[i][j]); 5969 } 5970 } 5971 5972 nvlpacked = nvlist_pack(nvl, &nv->len); 5973 if (nvlpacked == NULL) 5974 ERROUT(ENOMEM); 5975 5976 if (nv->size == 0) 5977 ERROUT(0); 5978 else if (nv->size < nv->len) 5979 ERROUT(ENOSPC); 5980 5981 PF_RULES_RUNLOCK(); 5982 error = copyout(nvlpacked, nv->data, nv->len); 5983 goto done; 5984 5985 #undef ERROUT 5986 errout: 5987 PF_RULES_RUNLOCK(); 5988 done: 5989 free(nvlpacked, M_NVLIST); 5990 nvlist_destroy(nvc); 5991 nvlist_destroy(nvl); 5992 5993 return (error); 5994 } 5995 5996 /* 5997 * XXX - Check for version mismatch!!! 5998 */ 5999 static void 6000 pf_clear_all_states(void) 6001 { 6002 struct pf_kstate *s; 6003 u_int i; 6004 6005 for (i = 0; i <= pf_hashmask; i++) { 6006 struct pf_idhash *ih = &V_pf_idhash[i]; 6007 relock: 6008 PF_HASHROW_LOCK(ih); 6009 LIST_FOREACH(s, &ih->states, entry) { 6010 s->timeout = PFTM_PURGE; 6011 /* Don't send out individual delete messages. */ 6012 s->state_flags |= PFSTATE_NOSYNC; 6013 pf_unlink_state(s); 6014 goto relock; 6015 } 6016 PF_HASHROW_UNLOCK(ih); 6017 } 6018 } 6019 6020 static int 6021 pf_clear_tables(void) 6022 { 6023 struct pfioc_table io; 6024 int error; 6025 6026 bzero(&io, sizeof(io)); 6027 6028 error = pfr_clr_tables(&io.pfrio_table, &io.pfrio_ndel, 6029 io.pfrio_flags); 6030 6031 return (error); 6032 } 6033 6034 static void 6035 pf_clear_srcnodes(struct pf_ksrc_node *n) 6036 { 6037 struct pf_kstate *s; 6038 int i; 6039 6040 for (i = 0; i <= pf_hashmask; i++) { 6041 struct pf_idhash *ih = &V_pf_idhash[i]; 6042 6043 PF_HASHROW_LOCK(ih); 6044 LIST_FOREACH(s, &ih->states, entry) { 6045 if (n == NULL || n == s->src_node) 6046 s->src_node = NULL; 6047 if (n == NULL || n == s->nat_src_node) 6048 s->nat_src_node = NULL; 6049 } 6050 PF_HASHROW_UNLOCK(ih); 6051 } 6052 6053 if (n == NULL) { 6054 struct pf_srchash *sh; 6055 6056 for (i = 0, sh = V_pf_srchash; i <= pf_srchashmask; 6057 i++, sh++) { 6058 PF_HASHROW_LOCK(sh); 6059 LIST_FOREACH(n, &sh->nodes, entry) { 6060 n->expire = 1; 6061 n->states = 0; 6062 } 6063 PF_HASHROW_UNLOCK(sh); 6064 } 6065 } else { 6066 /* XXX: hash slot should already be locked here. */ 6067 n->expire = 1; 6068 n->states = 0; 6069 } 6070 } 6071 6072 static void 6073 pf_kill_srcnodes(struct pfioc_src_node_kill *psnk) 6074 { 6075 struct pf_ksrc_node_list kill; 6076 6077 LIST_INIT(&kill); 6078 for (int i = 0; i <= pf_srchashmask; i++) { 6079 struct pf_srchash *sh = &V_pf_srchash[i]; 6080 struct pf_ksrc_node *sn, *tmp; 6081 6082 PF_HASHROW_LOCK(sh); 6083 LIST_FOREACH_SAFE(sn, &sh->nodes, entry, tmp) 6084 if (PF_MATCHA(psnk->psnk_src.neg, 6085 &psnk->psnk_src.addr.v.a.addr, 6086 &psnk->psnk_src.addr.v.a.mask, 6087 &sn->addr, sn->af) && 6088 PF_MATCHA(psnk->psnk_dst.neg, 6089 &psnk->psnk_dst.addr.v.a.addr, 6090 &psnk->psnk_dst.addr.v.a.mask, 6091 &sn->raddr, sn->af)) { 6092 pf_unlink_src_node(sn); 6093 LIST_INSERT_HEAD(&kill, sn, entry); 6094 sn->expire = 1; 6095 } 6096 PF_HASHROW_UNLOCK(sh); 6097 } 6098 6099 for (int i = 0; i <= pf_hashmask; i++) { 6100 struct pf_idhash *ih = &V_pf_idhash[i]; 6101 struct pf_kstate *s; 6102 6103 PF_HASHROW_LOCK(ih); 6104 LIST_FOREACH(s, &ih->states, entry) { 6105 if (s->src_node && s->src_node->expire == 1) 6106 s->src_node = NULL; 6107 if (s->nat_src_node && s->nat_src_node->expire == 1) 6108 s->nat_src_node = NULL; 6109 } 6110 PF_HASHROW_UNLOCK(ih); 6111 } 6112 6113 psnk->psnk_killed = pf_free_src_nodes(&kill); 6114 } 6115 6116 static int 6117 pf_keepcounters(struct pfioc_nv *nv) 6118 { 6119 nvlist_t *nvl = NULL; 6120 void *nvlpacked = NULL; 6121 int error = 0; 6122 6123 #define ERROUT(x) ERROUT_FUNCTION(on_error, x) 6124 6125 if (nv->len > pf_ioctl_maxcount) 6126 ERROUT(ENOMEM); 6127 6128 nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK); 6129 if (nvlpacked == NULL) 6130 ERROUT(ENOMEM); 6131 6132 error = copyin(nv->data, nvlpacked, nv->len); 6133 if (error) 6134 ERROUT(error); 6135 6136 nvl = nvlist_unpack(nvlpacked, nv->len, 0); 6137 if (nvl == NULL) 6138 ERROUT(EBADMSG); 6139 6140 if (! nvlist_exists_bool(nvl, "keep_counters")) 6141 ERROUT(EBADMSG); 6142 6143 V_pf_status.keep_counters = nvlist_get_bool(nvl, "keep_counters"); 6144 6145 on_error: 6146 nvlist_destroy(nvl); 6147 free(nvlpacked, M_NVLIST); 6148 return (error); 6149 } 6150 6151 static unsigned int 6152 pf_clear_states(const struct pf_kstate_kill *kill) 6153 { 6154 struct pf_state_key_cmp match_key; 6155 struct pf_kstate *s; 6156 struct pfi_kkif *kif; 6157 int idx; 6158 unsigned int killed = 0, dir; 6159 6160 for (unsigned int i = 0; i <= pf_hashmask; i++) { 6161 struct pf_idhash *ih = &V_pf_idhash[i]; 6162 6163 relock_DIOCCLRSTATES: 6164 PF_HASHROW_LOCK(ih); 6165 LIST_FOREACH(s, &ih->states, entry) { 6166 /* For floating states look at the original kif. */ 6167 kif = s->kif == V_pfi_all ? s->orig_kif : s->kif; 6168 6169 if (kill->psk_ifname[0] && 6170 strcmp(kill->psk_ifname, 6171 kif->pfik_name)) 6172 continue; 6173 6174 if (kill->psk_kill_match) { 6175 bzero(&match_key, sizeof(match_key)); 6176 6177 if (s->direction == PF_OUT) { 6178 dir = PF_IN; 6179 idx = PF_SK_STACK; 6180 } else { 6181 dir = PF_OUT; 6182 idx = PF_SK_WIRE; 6183 } 6184 6185 match_key.af = s->key[idx]->af; 6186 match_key.proto = s->key[idx]->proto; 6187 PF_ACPY(&match_key.addr[0], 6188 &s->key[idx]->addr[1], match_key.af); 6189 match_key.port[0] = s->key[idx]->port[1]; 6190 PF_ACPY(&match_key.addr[1], 6191 &s->key[idx]->addr[0], match_key.af); 6192 match_key.port[1] = s->key[idx]->port[0]; 6193 } 6194 6195 /* 6196 * Don't send out individual 6197 * delete messages. 6198 */ 6199 s->state_flags |= PFSTATE_NOSYNC; 6200 pf_unlink_state(s); 6201 killed++; 6202 6203 if (kill->psk_kill_match) 6204 killed += pf_kill_matching_state(&match_key, 6205 dir); 6206 6207 goto relock_DIOCCLRSTATES; 6208 } 6209 PF_HASHROW_UNLOCK(ih); 6210 } 6211 6212 if (V_pfsync_clear_states_ptr != NULL) 6213 V_pfsync_clear_states_ptr(V_pf_status.hostid, kill->psk_ifname); 6214 6215 return (killed); 6216 } 6217 6218 static void 6219 pf_killstates(struct pf_kstate_kill *kill, unsigned int *killed) 6220 { 6221 struct pf_kstate *s; 6222 6223 if (kill->psk_pfcmp.id) { 6224 if (kill->psk_pfcmp.creatorid == 0) 6225 kill->psk_pfcmp.creatorid = V_pf_status.hostid; 6226 if ((s = pf_find_state_byid(kill->psk_pfcmp.id, 6227 kill->psk_pfcmp.creatorid))) { 6228 pf_unlink_state(s); 6229 *killed = 1; 6230 } 6231 return; 6232 } 6233 6234 for (unsigned int i = 0; i <= pf_hashmask; i++) 6235 *killed += pf_killstates_row(kill, &V_pf_idhash[i]); 6236 6237 return; 6238 } 6239 6240 static int 6241 pf_killstates_nv(struct pfioc_nv *nv) 6242 { 6243 struct pf_kstate_kill kill; 6244 nvlist_t *nvl = NULL; 6245 void *nvlpacked = NULL; 6246 int error = 0; 6247 unsigned int killed = 0; 6248 6249 #define ERROUT(x) ERROUT_FUNCTION(on_error, x) 6250 6251 if (nv->len > pf_ioctl_maxcount) 6252 ERROUT(ENOMEM); 6253 6254 nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK); 6255 if (nvlpacked == NULL) 6256 ERROUT(ENOMEM); 6257 6258 error = copyin(nv->data, nvlpacked, nv->len); 6259 if (error) 6260 ERROUT(error); 6261 6262 nvl = nvlist_unpack(nvlpacked, nv->len, 0); 6263 if (nvl == NULL) 6264 ERROUT(EBADMSG); 6265 6266 error = pf_nvstate_kill_to_kstate_kill(nvl, &kill); 6267 if (error) 6268 ERROUT(error); 6269 6270 pf_killstates(&kill, &killed); 6271 6272 free(nvlpacked, M_NVLIST); 6273 nvlpacked = NULL; 6274 nvlist_destroy(nvl); 6275 nvl = nvlist_create(0); 6276 if (nvl == NULL) 6277 ERROUT(ENOMEM); 6278 6279 nvlist_add_number(nvl, "killed", killed); 6280 6281 nvlpacked = nvlist_pack(nvl, &nv->len); 6282 if (nvlpacked == NULL) 6283 ERROUT(ENOMEM); 6284 6285 if (nv->size == 0) 6286 ERROUT(0); 6287 else if (nv->size < nv->len) 6288 ERROUT(ENOSPC); 6289 6290 error = copyout(nvlpacked, nv->data, nv->len); 6291 6292 on_error: 6293 nvlist_destroy(nvl); 6294 free(nvlpacked, M_NVLIST); 6295 return (error); 6296 } 6297 6298 static int 6299 pf_clearstates_nv(struct pfioc_nv *nv) 6300 { 6301 struct pf_kstate_kill kill; 6302 nvlist_t *nvl = NULL; 6303 void *nvlpacked = NULL; 6304 int error = 0; 6305 unsigned int killed; 6306 6307 #define ERROUT(x) ERROUT_FUNCTION(on_error, x) 6308 6309 if (nv->len > pf_ioctl_maxcount) 6310 ERROUT(ENOMEM); 6311 6312 nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK); 6313 if (nvlpacked == NULL) 6314 ERROUT(ENOMEM); 6315 6316 error = copyin(nv->data, nvlpacked, nv->len); 6317 if (error) 6318 ERROUT(error); 6319 6320 nvl = nvlist_unpack(nvlpacked, nv->len, 0); 6321 if (nvl == NULL) 6322 ERROUT(EBADMSG); 6323 6324 error = pf_nvstate_kill_to_kstate_kill(nvl, &kill); 6325 if (error) 6326 ERROUT(error); 6327 6328 killed = pf_clear_states(&kill); 6329 6330 free(nvlpacked, M_NVLIST); 6331 nvlpacked = NULL; 6332 nvlist_destroy(nvl); 6333 nvl = nvlist_create(0); 6334 if (nvl == NULL) 6335 ERROUT(ENOMEM); 6336 6337 nvlist_add_number(nvl, "killed", killed); 6338 6339 nvlpacked = nvlist_pack(nvl, &nv->len); 6340 if (nvlpacked == NULL) 6341 ERROUT(ENOMEM); 6342 6343 if (nv->size == 0) 6344 ERROUT(0); 6345 else if (nv->size < nv->len) 6346 ERROUT(ENOSPC); 6347 6348 error = copyout(nvlpacked, nv->data, nv->len); 6349 6350 #undef ERROUT 6351 on_error: 6352 nvlist_destroy(nvl); 6353 free(nvlpacked, M_NVLIST); 6354 return (error); 6355 } 6356 6357 static int 6358 pf_getstate(struct pfioc_nv *nv) 6359 { 6360 nvlist_t *nvl = NULL, *nvls; 6361 void *nvlpacked = NULL; 6362 struct pf_kstate *s = NULL; 6363 int error = 0; 6364 uint64_t id, creatorid; 6365 6366 #define ERROUT(x) ERROUT_FUNCTION(errout, x) 6367 6368 if (nv->len > pf_ioctl_maxcount) 6369 ERROUT(ENOMEM); 6370 6371 nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK); 6372 if (nvlpacked == NULL) 6373 ERROUT(ENOMEM); 6374 6375 error = copyin(nv->data, nvlpacked, nv->len); 6376 if (error) 6377 ERROUT(error); 6378 6379 nvl = nvlist_unpack(nvlpacked, nv->len, 0); 6380 if (nvl == NULL) 6381 ERROUT(EBADMSG); 6382 6383 PFNV_CHK(pf_nvuint64(nvl, "id", &id)); 6384 PFNV_CHK(pf_nvuint64(nvl, "creatorid", &creatorid)); 6385 6386 s = pf_find_state_byid(id, creatorid); 6387 if (s == NULL) 6388 ERROUT(ENOENT); 6389 6390 free(nvlpacked, M_NVLIST); 6391 nvlpacked = NULL; 6392 nvlist_destroy(nvl); 6393 nvl = nvlist_create(0); 6394 if (nvl == NULL) 6395 ERROUT(ENOMEM); 6396 6397 nvls = pf_state_to_nvstate(s); 6398 if (nvls == NULL) 6399 ERROUT(ENOMEM); 6400 6401 nvlist_add_nvlist(nvl, "state", nvls); 6402 nvlist_destroy(nvls); 6403 6404 nvlpacked = nvlist_pack(nvl, &nv->len); 6405 if (nvlpacked == NULL) 6406 ERROUT(ENOMEM); 6407 6408 if (nv->size == 0) 6409 ERROUT(0); 6410 else if (nv->size < nv->len) 6411 ERROUT(ENOSPC); 6412 6413 error = copyout(nvlpacked, nv->data, nv->len); 6414 6415 #undef ERROUT 6416 errout: 6417 if (s != NULL) 6418 PF_STATE_UNLOCK(s); 6419 free(nvlpacked, M_NVLIST); 6420 nvlist_destroy(nvl); 6421 return (error); 6422 } 6423 6424 /* 6425 * XXX - Check for version mismatch!!! 6426 */ 6427 6428 /* 6429 * Duplicate pfctl -Fa operation to get rid of as much as we can. 6430 */ 6431 static int 6432 shutdown_pf(void) 6433 { 6434 int error = 0; 6435 u_int32_t t[5]; 6436 char nn = '\0'; 6437 6438 do { 6439 if ((error = pf_begin_rules(&t[0], PF_RULESET_SCRUB, &nn)) 6440 != 0) { 6441 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: SCRUB\n")); 6442 break; 6443 } 6444 if ((error = pf_begin_rules(&t[1], PF_RULESET_FILTER, &nn)) 6445 != 0) { 6446 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: FILTER\n")); 6447 break; /* XXX: rollback? */ 6448 } 6449 if ((error = pf_begin_rules(&t[2], PF_RULESET_NAT, &nn)) 6450 != 0) { 6451 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: NAT\n")); 6452 break; /* XXX: rollback? */ 6453 } 6454 if ((error = pf_begin_rules(&t[3], PF_RULESET_BINAT, &nn)) 6455 != 0) { 6456 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: BINAT\n")); 6457 break; /* XXX: rollback? */ 6458 } 6459 if ((error = pf_begin_rules(&t[4], PF_RULESET_RDR, &nn)) 6460 != 0) { 6461 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: RDR\n")); 6462 break; /* XXX: rollback? */ 6463 } 6464 6465 /* XXX: these should always succeed here */ 6466 pf_commit_rules(t[0], PF_RULESET_SCRUB, &nn); 6467 pf_commit_rules(t[1], PF_RULESET_FILTER, &nn); 6468 pf_commit_rules(t[2], PF_RULESET_NAT, &nn); 6469 pf_commit_rules(t[3], PF_RULESET_BINAT, &nn); 6470 pf_commit_rules(t[4], PF_RULESET_RDR, &nn); 6471 6472 if ((error = pf_clear_tables()) != 0) 6473 break; 6474 6475 if ((error = pf_begin_eth(&t[0], &nn)) != 0) { 6476 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: eth\n")); 6477 break; 6478 } 6479 pf_commit_eth(t[0], &nn); 6480 6481 #ifdef ALTQ 6482 if ((error = pf_begin_altq(&t[0])) != 0) { 6483 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: ALTQ\n")); 6484 break; 6485 } 6486 pf_commit_altq(t[0]); 6487 #endif 6488 6489 pf_clear_all_states(); 6490 6491 pf_clear_srcnodes(NULL); 6492 6493 /* status does not use malloced mem so no need to cleanup */ 6494 /* fingerprints and interfaces have their own cleanup code */ 6495 } while(0); 6496 6497 return (error); 6498 } 6499 6500 static pfil_return_t 6501 pf_check_return(int chk, struct mbuf **m) 6502 { 6503 6504 switch (chk) { 6505 case PF_PASS: 6506 if (*m == NULL) 6507 return (PFIL_CONSUMED); 6508 else 6509 return (PFIL_PASS); 6510 break; 6511 default: 6512 if (*m != NULL) { 6513 m_freem(*m); 6514 *m = NULL; 6515 } 6516 return (PFIL_DROPPED); 6517 } 6518 } 6519 6520 static pfil_return_t 6521 pf_eth_check_in(struct mbuf **m, struct ifnet *ifp, int flags, 6522 void *ruleset __unused, struct inpcb *inp) 6523 { 6524 int chk; 6525 6526 chk = pf_test_eth(PF_IN, flags, ifp, m, inp); 6527 6528 return (pf_check_return(chk, m)); 6529 } 6530 6531 static pfil_return_t 6532 pf_eth_check_out(struct mbuf **m, struct ifnet *ifp, int flags, 6533 void *ruleset __unused, struct inpcb *inp) 6534 { 6535 int chk; 6536 6537 chk = pf_test_eth(PF_OUT, flags, ifp, m, inp); 6538 6539 return (pf_check_return(chk, m)); 6540 } 6541 6542 #ifdef INET 6543 static pfil_return_t 6544 pf_check_in(struct mbuf **m, struct ifnet *ifp, int flags, 6545 void *ruleset __unused, struct inpcb *inp) 6546 { 6547 int chk; 6548 6549 chk = pf_test(PF_IN, flags, ifp, m, inp, NULL); 6550 6551 return (pf_check_return(chk, m)); 6552 } 6553 6554 static pfil_return_t 6555 pf_check_out(struct mbuf **m, struct ifnet *ifp, int flags, 6556 void *ruleset __unused, struct inpcb *inp) 6557 { 6558 int chk; 6559 6560 chk = pf_test(PF_OUT, flags, ifp, m, inp, NULL); 6561 6562 return (pf_check_return(chk, m)); 6563 } 6564 #endif 6565 6566 #ifdef INET6 6567 static pfil_return_t 6568 pf_check6_in(struct mbuf **m, struct ifnet *ifp, int flags, 6569 void *ruleset __unused, struct inpcb *inp) 6570 { 6571 int chk; 6572 6573 /* 6574 * In case of loopback traffic IPv6 uses the real interface in 6575 * order to support scoped addresses. In order to support stateful 6576 * filtering we have change this to lo0 as it is the case in IPv4. 6577 */ 6578 CURVNET_SET(ifp->if_vnet); 6579 chk = pf_test6(PF_IN, flags, (*m)->m_flags & M_LOOP ? V_loif : ifp, 6580 m, inp, NULL); 6581 CURVNET_RESTORE(); 6582 6583 return (pf_check_return(chk, m)); 6584 } 6585 6586 static pfil_return_t 6587 pf_check6_out(struct mbuf **m, struct ifnet *ifp, int flags, 6588 void *ruleset __unused, struct inpcb *inp) 6589 { 6590 int chk; 6591 6592 CURVNET_SET(ifp->if_vnet); 6593 chk = pf_test6(PF_OUT, flags, ifp, m, inp, NULL); 6594 CURVNET_RESTORE(); 6595 6596 return (pf_check_return(chk, m)); 6597 } 6598 #endif /* INET6 */ 6599 6600 VNET_DEFINE_STATIC(pfil_hook_t, pf_eth_in_hook); 6601 VNET_DEFINE_STATIC(pfil_hook_t, pf_eth_out_hook); 6602 #define V_pf_eth_in_hook VNET(pf_eth_in_hook) 6603 #define V_pf_eth_out_hook VNET(pf_eth_out_hook) 6604 6605 #ifdef INET 6606 VNET_DEFINE_STATIC(pfil_hook_t, pf_ip4_in_hook); 6607 VNET_DEFINE_STATIC(pfil_hook_t, pf_ip4_out_hook); 6608 #define V_pf_ip4_in_hook VNET(pf_ip4_in_hook) 6609 #define V_pf_ip4_out_hook VNET(pf_ip4_out_hook) 6610 #endif 6611 #ifdef INET6 6612 VNET_DEFINE_STATIC(pfil_hook_t, pf_ip6_in_hook); 6613 VNET_DEFINE_STATIC(pfil_hook_t, pf_ip6_out_hook); 6614 #define V_pf_ip6_in_hook VNET(pf_ip6_in_hook) 6615 #define V_pf_ip6_out_hook VNET(pf_ip6_out_hook) 6616 #endif 6617 6618 static void 6619 hook_pf_eth(void) 6620 { 6621 struct pfil_hook_args pha = { 6622 .pa_version = PFIL_VERSION, 6623 .pa_modname = "pf", 6624 .pa_type = PFIL_TYPE_ETHERNET, 6625 }; 6626 struct pfil_link_args pla = { 6627 .pa_version = PFIL_VERSION, 6628 }; 6629 int ret __diagused; 6630 6631 if (atomic_load_bool(&V_pf_pfil_eth_hooked)) 6632 return; 6633 6634 pha.pa_mbuf_chk = pf_eth_check_in; 6635 pha.pa_flags = PFIL_IN; 6636 pha.pa_rulname = "eth-in"; 6637 V_pf_eth_in_hook = pfil_add_hook(&pha); 6638 pla.pa_flags = PFIL_IN | PFIL_HEADPTR | PFIL_HOOKPTR; 6639 pla.pa_head = V_link_pfil_head; 6640 pla.pa_hook = V_pf_eth_in_hook; 6641 ret = pfil_link(&pla); 6642 MPASS(ret == 0); 6643 pha.pa_mbuf_chk = pf_eth_check_out; 6644 pha.pa_flags = PFIL_OUT; 6645 pha.pa_rulname = "eth-out"; 6646 V_pf_eth_out_hook = pfil_add_hook(&pha); 6647 pla.pa_flags = PFIL_OUT | PFIL_HEADPTR | PFIL_HOOKPTR; 6648 pla.pa_head = V_link_pfil_head; 6649 pla.pa_hook = V_pf_eth_out_hook; 6650 ret = pfil_link(&pla); 6651 MPASS(ret == 0); 6652 6653 atomic_store_bool(&V_pf_pfil_eth_hooked, true); 6654 } 6655 6656 static void 6657 hook_pf(void) 6658 { 6659 struct pfil_hook_args pha = { 6660 .pa_version = PFIL_VERSION, 6661 .pa_modname = "pf", 6662 }; 6663 struct pfil_link_args pla = { 6664 .pa_version = PFIL_VERSION, 6665 }; 6666 int ret __diagused; 6667 6668 if (atomic_load_bool(&V_pf_pfil_hooked)) 6669 return; 6670 6671 #ifdef INET 6672 pha.pa_type = PFIL_TYPE_IP4; 6673 pha.pa_mbuf_chk = pf_check_in; 6674 pha.pa_flags = PFIL_IN; 6675 pha.pa_rulname = "default-in"; 6676 V_pf_ip4_in_hook = pfil_add_hook(&pha); 6677 pla.pa_flags = PFIL_IN | PFIL_HEADPTR | PFIL_HOOKPTR; 6678 pla.pa_head = V_inet_pfil_head; 6679 pla.pa_hook = V_pf_ip4_in_hook; 6680 ret = pfil_link(&pla); 6681 MPASS(ret == 0); 6682 pha.pa_mbuf_chk = pf_check_out; 6683 pha.pa_flags = PFIL_OUT; 6684 pha.pa_rulname = "default-out"; 6685 V_pf_ip4_out_hook = pfil_add_hook(&pha); 6686 pla.pa_flags = PFIL_OUT | PFIL_HEADPTR | PFIL_HOOKPTR; 6687 pla.pa_head = V_inet_pfil_head; 6688 pla.pa_hook = V_pf_ip4_out_hook; 6689 ret = pfil_link(&pla); 6690 MPASS(ret == 0); 6691 if (V_pf_filter_local) { 6692 pla.pa_flags = PFIL_OUT | PFIL_HEADPTR | PFIL_HOOKPTR; 6693 pla.pa_head = V_inet_local_pfil_head; 6694 pla.pa_hook = V_pf_ip4_out_hook; 6695 ret = pfil_link(&pla); 6696 MPASS(ret == 0); 6697 } 6698 #endif 6699 #ifdef INET6 6700 pha.pa_type = PFIL_TYPE_IP6; 6701 pha.pa_mbuf_chk = pf_check6_in; 6702 pha.pa_flags = PFIL_IN; 6703 pha.pa_rulname = "default-in6"; 6704 V_pf_ip6_in_hook = pfil_add_hook(&pha); 6705 pla.pa_flags = PFIL_IN | PFIL_HEADPTR | PFIL_HOOKPTR; 6706 pla.pa_head = V_inet6_pfil_head; 6707 pla.pa_hook = V_pf_ip6_in_hook; 6708 ret = pfil_link(&pla); 6709 MPASS(ret == 0); 6710 pha.pa_mbuf_chk = pf_check6_out; 6711 pha.pa_rulname = "default-out6"; 6712 pha.pa_flags = PFIL_OUT; 6713 V_pf_ip6_out_hook = pfil_add_hook(&pha); 6714 pla.pa_flags = PFIL_OUT | PFIL_HEADPTR | PFIL_HOOKPTR; 6715 pla.pa_head = V_inet6_pfil_head; 6716 pla.pa_hook = V_pf_ip6_out_hook; 6717 ret = pfil_link(&pla); 6718 MPASS(ret == 0); 6719 if (V_pf_filter_local) { 6720 pla.pa_flags = PFIL_OUT | PFIL_HEADPTR | PFIL_HOOKPTR; 6721 pla.pa_head = V_inet6_local_pfil_head; 6722 pla.pa_hook = V_pf_ip6_out_hook; 6723 ret = pfil_link(&pla); 6724 MPASS(ret == 0); 6725 } 6726 #endif 6727 6728 atomic_store_bool(&V_pf_pfil_hooked, true); 6729 } 6730 6731 static void 6732 dehook_pf_eth(void) 6733 { 6734 6735 if (!atomic_load_bool(&V_pf_pfil_eth_hooked)) 6736 return; 6737 6738 pfil_remove_hook(V_pf_eth_in_hook); 6739 pfil_remove_hook(V_pf_eth_out_hook); 6740 6741 atomic_store_bool(&V_pf_pfil_eth_hooked, false); 6742 } 6743 6744 static void 6745 dehook_pf(void) 6746 { 6747 6748 if (!atomic_load_bool(&V_pf_pfil_hooked)) 6749 return; 6750 6751 #ifdef INET 6752 pfil_remove_hook(V_pf_ip4_in_hook); 6753 pfil_remove_hook(V_pf_ip4_out_hook); 6754 #endif 6755 #ifdef INET6 6756 pfil_remove_hook(V_pf_ip6_in_hook); 6757 pfil_remove_hook(V_pf_ip6_out_hook); 6758 #endif 6759 6760 atomic_store_bool(&V_pf_pfil_hooked, false); 6761 } 6762 6763 static void 6764 pf_load_vnet(void) 6765 { 6766 V_pf_tag_z = uma_zcreate("pf tags", sizeof(struct pf_tagname), 6767 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); 6768 6769 rm_init_flags(&V_pf_rules_lock, "pf rulesets", RM_RECURSE); 6770 sx_init(&V_pf_ioctl_lock, "pf ioctl"); 6771 6772 pf_init_tagset(&V_pf_tags, &pf_rule_tag_hashsize, 6773 PF_RULE_TAG_HASH_SIZE_DEFAULT); 6774 #ifdef ALTQ 6775 pf_init_tagset(&V_pf_qids, &pf_queue_tag_hashsize, 6776 PF_QUEUE_TAG_HASH_SIZE_DEFAULT); 6777 #endif 6778 6779 V_pf_keth = &V_pf_main_keth_anchor.ruleset; 6780 6781 pfattach_vnet(); 6782 V_pf_vnet_active = 1; 6783 } 6784 6785 static int 6786 pf_load(void) 6787 { 6788 int error; 6789 6790 sx_init(&pf_end_lock, "pf end thread"); 6791 6792 pf_mtag_initialize(); 6793 6794 pf_dev = make_dev(&pf_cdevsw, 0, UID_ROOT, GID_WHEEL, 0600, PF_NAME); 6795 if (pf_dev == NULL) 6796 return (ENOMEM); 6797 6798 pf_end_threads = 0; 6799 error = kproc_create(pf_purge_thread, NULL, &pf_purge_proc, 0, 0, "pf purge"); 6800 if (error != 0) 6801 return (error); 6802 6803 pfi_initialize(); 6804 6805 return (0); 6806 } 6807 6808 static void 6809 pf_unload_vnet(void) 6810 { 6811 int ret __diagused; 6812 6813 V_pf_vnet_active = 0; 6814 V_pf_status.running = 0; 6815 dehook_pf(); 6816 dehook_pf_eth(); 6817 6818 PF_RULES_WLOCK(); 6819 pf_syncookies_cleanup(); 6820 shutdown_pf(); 6821 PF_RULES_WUNLOCK(); 6822 6823 /* Make sure we've cleaned up ethernet rules before we continue. */ 6824 NET_EPOCH_DRAIN_CALLBACKS(); 6825 6826 ret = swi_remove(V_pf_swi_cookie); 6827 MPASS(ret == 0); 6828 ret = intr_event_destroy(V_pf_swi_ie); 6829 MPASS(ret == 0); 6830 6831 pf_unload_vnet_purge(); 6832 6833 pf_normalize_cleanup(); 6834 PF_RULES_WLOCK(); 6835 pfi_cleanup_vnet(); 6836 PF_RULES_WUNLOCK(); 6837 pfr_cleanup(); 6838 pf_osfp_flush(); 6839 pf_cleanup(); 6840 if (IS_DEFAULT_VNET(curvnet)) 6841 pf_mtag_cleanup(); 6842 6843 pf_cleanup_tagset(&V_pf_tags); 6844 #ifdef ALTQ 6845 pf_cleanup_tagset(&V_pf_qids); 6846 #endif 6847 uma_zdestroy(V_pf_tag_z); 6848 6849 #ifdef PF_WANT_32_TO_64_COUNTER 6850 PF_RULES_WLOCK(); 6851 LIST_REMOVE(V_pf_kifmarker, pfik_allkiflist); 6852 6853 MPASS(LIST_EMPTY(&V_pf_allkiflist)); 6854 MPASS(V_pf_allkifcount == 0); 6855 6856 LIST_REMOVE(&V_pf_default_rule, allrulelist); 6857 V_pf_allrulecount--; 6858 LIST_REMOVE(V_pf_rulemarker, allrulelist); 6859 6860 /* 6861 * There are known pf rule leaks when running the test suite. 6862 */ 6863 #ifdef notyet 6864 MPASS(LIST_EMPTY(&V_pf_allrulelist)); 6865 MPASS(V_pf_allrulecount == 0); 6866 #endif 6867 6868 PF_RULES_WUNLOCK(); 6869 6870 free(V_pf_kifmarker, PFI_MTYPE); 6871 free(V_pf_rulemarker, M_PFRULE); 6872 #endif 6873 6874 /* Free counters last as we updated them during shutdown. */ 6875 pf_counter_u64_deinit(&V_pf_default_rule.evaluations); 6876 for (int i = 0; i < 2; i++) { 6877 pf_counter_u64_deinit(&V_pf_default_rule.packets[i]); 6878 pf_counter_u64_deinit(&V_pf_default_rule.bytes[i]); 6879 } 6880 counter_u64_free(V_pf_default_rule.states_cur); 6881 counter_u64_free(V_pf_default_rule.states_tot); 6882 counter_u64_free(V_pf_default_rule.src_nodes); 6883 uma_zfree_pcpu(pf_timestamp_pcpu_zone, V_pf_default_rule.timestamp); 6884 6885 for (int i = 0; i < PFRES_MAX; i++) 6886 counter_u64_free(V_pf_status.counters[i]); 6887 for (int i = 0; i < KLCNT_MAX; i++) 6888 counter_u64_free(V_pf_status.lcounters[i]); 6889 for (int i = 0; i < FCNT_MAX; i++) 6890 pf_counter_u64_deinit(&V_pf_status.fcounters[i]); 6891 for (int i = 0; i < SCNT_MAX; i++) 6892 counter_u64_free(V_pf_status.scounters[i]); 6893 6894 rm_destroy(&V_pf_rules_lock); 6895 sx_destroy(&V_pf_ioctl_lock); 6896 } 6897 6898 static void 6899 pf_unload(void) 6900 { 6901 6902 sx_xlock(&pf_end_lock); 6903 pf_end_threads = 1; 6904 while (pf_end_threads < 2) { 6905 wakeup_one(pf_purge_thread); 6906 sx_sleep(pf_purge_proc, &pf_end_lock, 0, "pftmo", 0); 6907 } 6908 sx_xunlock(&pf_end_lock); 6909 6910 if (pf_dev != NULL) 6911 destroy_dev(pf_dev); 6912 6913 pfi_cleanup(); 6914 6915 sx_destroy(&pf_end_lock); 6916 } 6917 6918 static void 6919 vnet_pf_init(void *unused __unused) 6920 { 6921 6922 pf_load_vnet(); 6923 } 6924 VNET_SYSINIT(vnet_pf_init, SI_SUB_PROTO_FIREWALL, SI_ORDER_THIRD, 6925 vnet_pf_init, NULL); 6926 6927 static void 6928 vnet_pf_uninit(const void *unused __unused) 6929 { 6930 6931 pf_unload_vnet(); 6932 } 6933 SYSUNINIT(pf_unload, SI_SUB_PROTO_FIREWALL, SI_ORDER_SECOND, pf_unload, NULL); 6934 VNET_SYSUNINIT(vnet_pf_uninit, SI_SUB_PROTO_FIREWALL, SI_ORDER_THIRD, 6935 vnet_pf_uninit, NULL); 6936 6937 static int 6938 pf_modevent(module_t mod, int type, void *data) 6939 { 6940 int error = 0; 6941 6942 switch(type) { 6943 case MOD_LOAD: 6944 error = pf_load(); 6945 break; 6946 case MOD_UNLOAD: 6947 /* Handled in SYSUNINIT(pf_unload) to ensure it's done after 6948 * the vnet_pf_uninit()s */ 6949 break; 6950 default: 6951 error = EINVAL; 6952 break; 6953 } 6954 6955 return (error); 6956 } 6957 6958 static moduledata_t pf_mod = { 6959 "pf", 6960 pf_modevent, 6961 0 6962 }; 6963 6964 DECLARE_MODULE(pf, pf_mod, SI_SUB_PROTO_FIREWALL, SI_ORDER_SECOND); 6965 MODULE_VERSION(pf, PF_MODVER); 6966