1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2001 Daniel Hartmeier 5 * Copyright (c) 2002,2003 Henning Brauer 6 * Copyright (c) 2012 Gleb Smirnoff <glebius@FreeBSD.org> 7 * All rights reserved. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 13 * - Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * - Redistributions in binary form must reproduce the above 16 * copyright notice, this list of conditions and the following 17 * disclaimer in the documentation and/or other materials provided 18 * with the distribution. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 23 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 24 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 27 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 28 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN 30 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 31 * POSSIBILITY OF SUCH DAMAGE. 32 * 33 * Effort sponsored in part by the Defense Advanced Research Projects 34 * Agency (DARPA) and Air Force Research Laboratory, Air Force 35 * Materiel Command, USAF, under agreement number F30602-01-2-0537. 36 * 37 * $OpenBSD: pf_ioctl.c,v 1.213 2009/02/15 21:46:12 mbalmer Exp $ 38 */ 39 40 #include <sys/cdefs.h> 41 #include "opt_inet.h" 42 #include "opt_inet6.h" 43 #include "opt_bpf.h" 44 #include "opt_pf.h" 45 46 #include <sys/param.h> 47 #include <sys/_bitset.h> 48 #include <sys/bitset.h> 49 #include <sys/bus.h> 50 #include <sys/conf.h> 51 #include <sys/endian.h> 52 #include <sys/fcntl.h> 53 #include <sys/filio.h> 54 #include <sys/hash.h> 55 #include <sys/interrupt.h> 56 #include <sys/jail.h> 57 #include <sys/kernel.h> 58 #include <sys/kthread.h> 59 #include <sys/lock.h> 60 #include <sys/mbuf.h> 61 #include <sys/module.h> 62 #include <sys/nv.h> 63 #include <sys/proc.h> 64 #include <sys/sdt.h> 65 #include <sys/smp.h> 66 #include <sys/socket.h> 67 #include <sys/sysctl.h> 68 #include <sys/md5.h> 69 #include <sys/ucred.h> 70 71 #include <net/if.h> 72 #include <net/if_var.h> 73 #include <net/if_private.h> 74 #include <net/vnet.h> 75 #include <net/route.h> 76 #include <net/pfil.h> 77 #include <net/pfvar.h> 78 #include <net/if_pfsync.h> 79 #include <net/if_pflog.h> 80 81 #include <netinet/in.h> 82 #include <netinet/ip.h> 83 #include <netinet/ip_var.h> 84 #include <netinet6/ip6_var.h> 85 #include <netinet/ip_icmp.h> 86 #include <netpfil/pf/pf_nl.h> 87 #include <netpfil/pf/pf_nv.h> 88 89 #ifdef INET6 90 #include <netinet/ip6.h> 91 #endif /* INET6 */ 92 93 #ifdef ALTQ 94 #include <net/altq/altq.h> 95 #endif 96 97 SDT_PROBE_DEFINE3(pf, ioctl, ioctl, error, "int", "int", "int"); 98 SDT_PROBE_DEFINE3(pf, ioctl, function, error, "char *", "int", "int"); 99 SDT_PROBE_DEFINE2(pf, ioctl, addrule, error, "int", "int"); 100 SDT_PROBE_DEFINE2(pf, ioctl, nvchk, error, "int", "int"); 101 102 static struct pf_kpool *pf_get_kpool(const char *, u_int32_t, u_int8_t, 103 u_int32_t, u_int8_t, u_int8_t, u_int8_t); 104 105 static void pf_mv_kpool(struct pf_kpalist *, struct pf_kpalist *); 106 static void pf_empty_kpool(struct pf_kpalist *); 107 static int pfioctl(struct cdev *, u_long, caddr_t, int, 108 struct thread *); 109 static int pf_begin_eth(uint32_t *, const char *); 110 static void pf_rollback_eth_cb(struct epoch_context *); 111 static int pf_rollback_eth(uint32_t, const char *); 112 static int pf_commit_eth(uint32_t, const char *); 113 static void pf_free_eth_rule(struct pf_keth_rule *); 114 #ifdef ALTQ 115 static int pf_begin_altq(u_int32_t *); 116 static int pf_rollback_altq(u_int32_t); 117 static int pf_commit_altq(u_int32_t); 118 static int pf_enable_altq(struct pf_altq *); 119 static int pf_disable_altq(struct pf_altq *); 120 static uint16_t pf_qname2qid(const char *); 121 static void pf_qid_unref(uint16_t); 122 #endif /* ALTQ */ 123 static int pf_begin_rules(u_int32_t *, int, const char *); 124 static int pf_rollback_rules(u_int32_t, int, char *); 125 static int pf_setup_pfsync_matching(struct pf_kruleset *); 126 static void pf_hash_rule_rolling(MD5_CTX *, struct pf_krule *); 127 static void pf_hash_rule(struct pf_krule *); 128 static void pf_hash_rule_addr(MD5_CTX *, struct pf_rule_addr *); 129 static int pf_commit_rules(u_int32_t, int, char *); 130 static int pf_addr_setup(struct pf_kruleset *, 131 struct pf_addr_wrap *, sa_family_t); 132 static void pf_src_node_copy(const struct pf_ksrc_node *, 133 struct pf_src_node *); 134 #ifdef ALTQ 135 static int pf_export_kaltq(struct pf_altq *, 136 struct pfioc_altq_v1 *, size_t); 137 static int pf_import_kaltq(struct pfioc_altq_v1 *, 138 struct pf_altq *, size_t); 139 #endif /* ALTQ */ 140 141 VNET_DEFINE(struct pf_krule, pf_default_rule); 142 143 static __inline int pf_krule_compare(struct pf_krule *, 144 struct pf_krule *); 145 146 RB_GENERATE(pf_krule_global, pf_krule, entry_global, pf_krule_compare); 147 148 #ifdef ALTQ 149 VNET_DEFINE_STATIC(int, pf_altq_running); 150 #define V_pf_altq_running VNET(pf_altq_running) 151 #endif 152 153 #define TAGID_MAX 50000 154 struct pf_tagname { 155 TAILQ_ENTRY(pf_tagname) namehash_entries; 156 TAILQ_ENTRY(pf_tagname) taghash_entries; 157 char name[PF_TAG_NAME_SIZE]; 158 uint16_t tag; 159 int ref; 160 }; 161 162 struct pf_tagset { 163 TAILQ_HEAD(, pf_tagname) *namehash; 164 TAILQ_HEAD(, pf_tagname) *taghash; 165 unsigned int mask; 166 uint32_t seed; 167 BITSET_DEFINE(, TAGID_MAX) avail; 168 }; 169 170 VNET_DEFINE(struct pf_tagset, pf_tags); 171 #define V_pf_tags VNET(pf_tags) 172 static unsigned int pf_rule_tag_hashsize; 173 #define PF_RULE_TAG_HASH_SIZE_DEFAULT 128 174 SYSCTL_UINT(_net_pf, OID_AUTO, rule_tag_hashsize, CTLFLAG_RDTUN, 175 &pf_rule_tag_hashsize, PF_RULE_TAG_HASH_SIZE_DEFAULT, 176 "Size of pf(4) rule tag hashtable"); 177 178 #ifdef ALTQ 179 VNET_DEFINE(struct pf_tagset, pf_qids); 180 #define V_pf_qids VNET(pf_qids) 181 static unsigned int pf_queue_tag_hashsize; 182 #define PF_QUEUE_TAG_HASH_SIZE_DEFAULT 128 183 SYSCTL_UINT(_net_pf, OID_AUTO, queue_tag_hashsize, CTLFLAG_RDTUN, 184 &pf_queue_tag_hashsize, PF_QUEUE_TAG_HASH_SIZE_DEFAULT, 185 "Size of pf(4) queue tag hashtable"); 186 #endif 187 VNET_DEFINE(uma_zone_t, pf_tag_z); 188 #define V_pf_tag_z VNET(pf_tag_z) 189 static MALLOC_DEFINE(M_PFALTQ, "pf_altq", "pf(4) altq configuration db"); 190 static MALLOC_DEFINE(M_PFRULE, "pf_rule", "pf(4) rules"); 191 192 #if (PF_QNAME_SIZE != PF_TAG_NAME_SIZE) 193 #error PF_QNAME_SIZE must be equal to PF_TAG_NAME_SIZE 194 #endif 195 196 VNET_DEFINE_STATIC(bool, pf_filter_local) = false; 197 #define V_pf_filter_local VNET(pf_filter_local) 198 SYSCTL_BOOL(_net_pf, OID_AUTO, filter_local, CTLFLAG_VNET | CTLFLAG_RW, 199 &VNET_NAME(pf_filter_local), false, 200 "Enable filtering for packets delivered to local network stack"); 201 202 #ifdef PF_DEFAULT_TO_DROP 203 VNET_DEFINE_STATIC(bool, default_to_drop) = true; 204 #else 205 VNET_DEFINE_STATIC(bool, default_to_drop); 206 #endif 207 #define V_default_to_drop VNET(default_to_drop) 208 SYSCTL_BOOL(_net_pf, OID_AUTO, default_to_drop, CTLFLAG_RDTUN | CTLFLAG_VNET, 209 &VNET_NAME(default_to_drop), false, 210 "Make the default rule drop all packets."); 211 212 static void pf_init_tagset(struct pf_tagset *, unsigned int *, 213 unsigned int); 214 static void pf_cleanup_tagset(struct pf_tagset *); 215 static uint16_t tagname2hashindex(const struct pf_tagset *, const char *); 216 static uint16_t tag2hashindex(const struct pf_tagset *, uint16_t); 217 static u_int16_t tagname2tag(struct pf_tagset *, const char *); 218 static u_int16_t pf_tagname2tag(const char *); 219 static void tag_unref(struct pf_tagset *, u_int16_t); 220 221 #define DPFPRINTF(n, x) if (V_pf_status.debug >= (n)) printf x 222 223 struct cdev *pf_dev; 224 225 /* 226 * XXX - These are new and need to be checked when moveing to a new version 227 */ 228 static void pf_clear_all_states(void); 229 static int pf_killstates_row(struct pf_kstate_kill *, 230 struct pf_idhash *); 231 static int pf_killstates_nv(struct pfioc_nv *); 232 static int pf_clearstates_nv(struct pfioc_nv *); 233 static int pf_getstate(struct pfioc_nv *); 234 static int pf_getstatus(struct pfioc_nv *); 235 static int pf_clear_tables(void); 236 static void pf_clear_srcnodes(struct pf_ksrc_node *); 237 static void pf_kill_srcnodes(struct pfioc_src_node_kill *); 238 static int pf_keepcounters(struct pfioc_nv *); 239 static void pf_tbladdr_copyout(struct pf_addr_wrap *); 240 241 /* 242 * Wrapper functions for pfil(9) hooks 243 */ 244 static pfil_return_t pf_eth_check_in(struct mbuf **m, struct ifnet *ifp, 245 int flags, void *ruleset __unused, struct inpcb *inp); 246 static pfil_return_t pf_eth_check_out(struct mbuf **m, struct ifnet *ifp, 247 int flags, void *ruleset __unused, struct inpcb *inp); 248 #ifdef INET 249 static pfil_return_t pf_check_in(struct mbuf **m, struct ifnet *ifp, 250 int flags, void *ruleset __unused, struct inpcb *inp); 251 static pfil_return_t pf_check_out(struct mbuf **m, struct ifnet *ifp, 252 int flags, void *ruleset __unused, struct inpcb *inp); 253 #endif 254 #ifdef INET6 255 static pfil_return_t pf_check6_in(struct mbuf **m, struct ifnet *ifp, 256 int flags, void *ruleset __unused, struct inpcb *inp); 257 static pfil_return_t pf_check6_out(struct mbuf **m, struct ifnet *ifp, 258 int flags, void *ruleset __unused, struct inpcb *inp); 259 #endif 260 261 static void hook_pf_eth(void); 262 static void hook_pf(void); 263 static void dehook_pf_eth(void); 264 static void dehook_pf(void); 265 static int shutdown_pf(void); 266 static int pf_load(void); 267 static void pf_unload(void); 268 269 static struct cdevsw pf_cdevsw = { 270 .d_ioctl = pfioctl, 271 .d_name = PF_NAME, 272 .d_version = D_VERSION, 273 }; 274 275 VNET_DEFINE_STATIC(bool, pf_pfil_hooked); 276 #define V_pf_pfil_hooked VNET(pf_pfil_hooked) 277 VNET_DEFINE_STATIC(bool, pf_pfil_eth_hooked); 278 #define V_pf_pfil_eth_hooked VNET(pf_pfil_eth_hooked) 279 280 /* 281 * We need a flag that is neither hooked nor running to know when 282 * the VNET is "valid". We primarily need this to control (global) 283 * external event, e.g., eventhandlers. 284 */ 285 VNET_DEFINE(int, pf_vnet_active); 286 #define V_pf_vnet_active VNET(pf_vnet_active) 287 288 int pf_end_threads; 289 struct proc *pf_purge_proc; 290 291 VNET_DEFINE(struct rmlock, pf_rules_lock); 292 VNET_DEFINE_STATIC(struct sx, pf_ioctl_lock); 293 #define V_pf_ioctl_lock VNET(pf_ioctl_lock) 294 struct sx pf_end_lock; 295 296 /* pfsync */ 297 VNET_DEFINE(pfsync_state_import_t *, pfsync_state_import_ptr); 298 VNET_DEFINE(pfsync_insert_state_t *, pfsync_insert_state_ptr); 299 VNET_DEFINE(pfsync_update_state_t *, pfsync_update_state_ptr); 300 VNET_DEFINE(pfsync_delete_state_t *, pfsync_delete_state_ptr); 301 VNET_DEFINE(pfsync_clear_states_t *, pfsync_clear_states_ptr); 302 VNET_DEFINE(pfsync_defer_t *, pfsync_defer_ptr); 303 VNET_DEFINE(pflow_export_state_t *, pflow_export_state_ptr); 304 pfsync_detach_ifnet_t *pfsync_detach_ifnet_ptr; 305 306 /* pflog */ 307 pflog_packet_t *pflog_packet_ptr = NULL; 308 309 /* 310 * Copy a user-provided string, returning an error if truncation would occur. 311 * Avoid scanning past "sz" bytes in the source string since there's no 312 * guarantee that it's nul-terminated. 313 */ 314 static int 315 pf_user_strcpy(char *dst, const char *src, size_t sz) 316 { 317 if (strnlen(src, sz) == sz) 318 return (EINVAL); 319 (void)strlcpy(dst, src, sz); 320 return (0); 321 } 322 323 static void 324 pfattach_vnet(void) 325 { 326 u_int32_t *my_timeout = V_pf_default_rule.timeout; 327 328 bzero(&V_pf_status, sizeof(V_pf_status)); 329 330 pf_initialize(); 331 pfr_initialize(); 332 pfi_initialize_vnet(); 333 pf_normalize_init(); 334 pf_syncookies_init(); 335 336 V_pf_limits[PF_LIMIT_STATES].limit = PFSTATE_HIWAT; 337 V_pf_limits[PF_LIMIT_SRC_NODES].limit = PFSNODE_HIWAT; 338 339 RB_INIT(&V_pf_anchors); 340 pf_init_kruleset(&pf_main_ruleset); 341 342 pf_init_keth(V_pf_keth); 343 344 /* default rule should never be garbage collected */ 345 V_pf_default_rule.entries.tqe_prev = &V_pf_default_rule.entries.tqe_next; 346 V_pf_default_rule.action = V_default_to_drop ? PF_DROP : PF_PASS; 347 V_pf_default_rule.nr = -1; 348 V_pf_default_rule.rtableid = -1; 349 350 pf_counter_u64_init(&V_pf_default_rule.evaluations, M_WAITOK); 351 for (int i = 0; i < 2; i++) { 352 pf_counter_u64_init(&V_pf_default_rule.packets[i], M_WAITOK); 353 pf_counter_u64_init(&V_pf_default_rule.bytes[i], M_WAITOK); 354 } 355 V_pf_default_rule.states_cur = counter_u64_alloc(M_WAITOK); 356 V_pf_default_rule.states_tot = counter_u64_alloc(M_WAITOK); 357 V_pf_default_rule.src_nodes = counter_u64_alloc(M_WAITOK); 358 359 V_pf_default_rule.timestamp = uma_zalloc_pcpu(pf_timestamp_pcpu_zone, 360 M_WAITOK | M_ZERO); 361 362 #ifdef PF_WANT_32_TO_64_COUNTER 363 V_pf_kifmarker = malloc(sizeof(*V_pf_kifmarker), PFI_MTYPE, M_WAITOK | M_ZERO); 364 V_pf_rulemarker = malloc(sizeof(*V_pf_rulemarker), M_PFRULE, M_WAITOK | M_ZERO); 365 PF_RULES_WLOCK(); 366 LIST_INSERT_HEAD(&V_pf_allkiflist, V_pf_kifmarker, pfik_allkiflist); 367 LIST_INSERT_HEAD(&V_pf_allrulelist, &V_pf_default_rule, allrulelist); 368 V_pf_allrulecount++; 369 LIST_INSERT_HEAD(&V_pf_allrulelist, V_pf_rulemarker, allrulelist); 370 PF_RULES_WUNLOCK(); 371 #endif 372 373 /* initialize default timeouts */ 374 my_timeout[PFTM_TCP_FIRST_PACKET] = PFTM_TCP_FIRST_PACKET_VAL; 375 my_timeout[PFTM_TCP_OPENING] = PFTM_TCP_OPENING_VAL; 376 my_timeout[PFTM_TCP_ESTABLISHED] = PFTM_TCP_ESTABLISHED_VAL; 377 my_timeout[PFTM_TCP_CLOSING] = PFTM_TCP_CLOSING_VAL; 378 my_timeout[PFTM_TCP_FIN_WAIT] = PFTM_TCP_FIN_WAIT_VAL; 379 my_timeout[PFTM_TCP_CLOSED] = PFTM_TCP_CLOSED_VAL; 380 my_timeout[PFTM_SCTP_FIRST_PACKET] = PFTM_TCP_FIRST_PACKET_VAL; 381 my_timeout[PFTM_SCTP_OPENING] = PFTM_TCP_OPENING_VAL; 382 my_timeout[PFTM_SCTP_ESTABLISHED] = PFTM_TCP_ESTABLISHED_VAL; 383 my_timeout[PFTM_SCTP_CLOSING] = PFTM_TCP_CLOSING_VAL; 384 my_timeout[PFTM_SCTP_CLOSED] = PFTM_TCP_CLOSED_VAL; 385 my_timeout[PFTM_UDP_FIRST_PACKET] = PFTM_UDP_FIRST_PACKET_VAL; 386 my_timeout[PFTM_UDP_SINGLE] = PFTM_UDP_SINGLE_VAL; 387 my_timeout[PFTM_UDP_MULTIPLE] = PFTM_UDP_MULTIPLE_VAL; 388 my_timeout[PFTM_ICMP_FIRST_PACKET] = PFTM_ICMP_FIRST_PACKET_VAL; 389 my_timeout[PFTM_ICMP_ERROR_REPLY] = PFTM_ICMP_ERROR_REPLY_VAL; 390 my_timeout[PFTM_OTHER_FIRST_PACKET] = PFTM_OTHER_FIRST_PACKET_VAL; 391 my_timeout[PFTM_OTHER_SINGLE] = PFTM_OTHER_SINGLE_VAL; 392 my_timeout[PFTM_OTHER_MULTIPLE] = PFTM_OTHER_MULTIPLE_VAL; 393 my_timeout[PFTM_FRAG] = PFTM_FRAG_VAL; 394 my_timeout[PFTM_INTERVAL] = PFTM_INTERVAL_VAL; 395 my_timeout[PFTM_SRC_NODE] = PFTM_SRC_NODE_VAL; 396 my_timeout[PFTM_TS_DIFF] = PFTM_TS_DIFF_VAL; 397 my_timeout[PFTM_ADAPTIVE_START] = PFSTATE_ADAPT_START; 398 my_timeout[PFTM_ADAPTIVE_END] = PFSTATE_ADAPT_END; 399 400 V_pf_status.debug = PF_DEBUG_URGENT; 401 /* 402 * XXX This is different than in OpenBSD where reassembly is enabled by 403 * defult. In FreeBSD we expect people to still use scrub rules and 404 * switch to the new syntax later. Only when they switch they must 405 * explicitly enable reassemle. We could change the default once the 406 * scrub rule functionality is hopefully removed some day in future. 407 */ 408 V_pf_status.reass = 0; 409 410 V_pf_pfil_hooked = false; 411 V_pf_pfil_eth_hooked = false; 412 413 /* XXX do our best to avoid a conflict */ 414 V_pf_status.hostid = arc4random(); 415 416 for (int i = 0; i < PFRES_MAX; i++) 417 V_pf_status.counters[i] = counter_u64_alloc(M_WAITOK); 418 for (int i = 0; i < KLCNT_MAX; i++) 419 V_pf_status.lcounters[i] = counter_u64_alloc(M_WAITOK); 420 for (int i = 0; i < FCNT_MAX; i++) 421 pf_counter_u64_init(&V_pf_status.fcounters[i], M_WAITOK); 422 for (int i = 0; i < SCNT_MAX; i++) 423 V_pf_status.scounters[i] = counter_u64_alloc(M_WAITOK); 424 425 if (swi_add(&V_pf_swi_ie, "pf send", pf_intr, curvnet, SWI_NET, 426 INTR_MPSAFE, &V_pf_swi_cookie) != 0) 427 /* XXXGL: leaked all above. */ 428 return; 429 } 430 431 static struct pf_kpool * 432 pf_get_kpool(const char *anchor, u_int32_t ticket, u_int8_t rule_action, 433 u_int32_t rule_number, u_int8_t r_last, u_int8_t active, 434 u_int8_t check_ticket) 435 { 436 struct pf_kruleset *ruleset; 437 struct pf_krule *rule; 438 int rs_num; 439 440 ruleset = pf_find_kruleset(anchor); 441 if (ruleset == NULL) 442 return (NULL); 443 rs_num = pf_get_ruleset_number(rule_action); 444 if (rs_num >= PF_RULESET_MAX) 445 return (NULL); 446 if (active) { 447 if (check_ticket && ticket != 448 ruleset->rules[rs_num].active.ticket) 449 return (NULL); 450 if (r_last) 451 rule = TAILQ_LAST(ruleset->rules[rs_num].active.ptr, 452 pf_krulequeue); 453 else 454 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr); 455 } else { 456 if (check_ticket && ticket != 457 ruleset->rules[rs_num].inactive.ticket) 458 return (NULL); 459 if (r_last) 460 rule = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr, 461 pf_krulequeue); 462 else 463 rule = TAILQ_FIRST(ruleset->rules[rs_num].inactive.ptr); 464 } 465 if (!r_last) { 466 while ((rule != NULL) && (rule->nr != rule_number)) 467 rule = TAILQ_NEXT(rule, entries); 468 } 469 if (rule == NULL) 470 return (NULL); 471 472 return (&rule->rpool); 473 } 474 475 static void 476 pf_mv_kpool(struct pf_kpalist *poola, struct pf_kpalist *poolb) 477 { 478 struct pf_kpooladdr *mv_pool_pa; 479 480 while ((mv_pool_pa = TAILQ_FIRST(poola)) != NULL) { 481 TAILQ_REMOVE(poola, mv_pool_pa, entries); 482 TAILQ_INSERT_TAIL(poolb, mv_pool_pa, entries); 483 } 484 } 485 486 static void 487 pf_empty_kpool(struct pf_kpalist *poola) 488 { 489 struct pf_kpooladdr *pa; 490 491 while ((pa = TAILQ_FIRST(poola)) != NULL) { 492 switch (pa->addr.type) { 493 case PF_ADDR_DYNIFTL: 494 pfi_dynaddr_remove(pa->addr.p.dyn); 495 break; 496 case PF_ADDR_TABLE: 497 /* XXX: this could be unfinished pooladdr on pabuf */ 498 if (pa->addr.p.tbl != NULL) 499 pfr_detach_table(pa->addr.p.tbl); 500 break; 501 } 502 if (pa->kif) 503 pfi_kkif_unref(pa->kif); 504 TAILQ_REMOVE(poola, pa, entries); 505 free(pa, M_PFRULE); 506 } 507 } 508 509 static void 510 pf_unlink_rule_locked(struct pf_krulequeue *rulequeue, struct pf_krule *rule) 511 { 512 513 PF_RULES_WASSERT(); 514 PF_UNLNKDRULES_ASSERT(); 515 516 TAILQ_REMOVE(rulequeue, rule, entries); 517 518 rule->rule_ref |= PFRULE_REFS; 519 TAILQ_INSERT_TAIL(&V_pf_unlinked_rules, rule, entries); 520 } 521 522 static void 523 pf_unlink_rule(struct pf_krulequeue *rulequeue, struct pf_krule *rule) 524 { 525 526 PF_RULES_WASSERT(); 527 528 PF_UNLNKDRULES_LOCK(); 529 pf_unlink_rule_locked(rulequeue, rule); 530 PF_UNLNKDRULES_UNLOCK(); 531 } 532 533 static void 534 pf_free_eth_rule(struct pf_keth_rule *rule) 535 { 536 PF_RULES_WASSERT(); 537 538 if (rule == NULL) 539 return; 540 541 if (rule->tag) 542 tag_unref(&V_pf_tags, rule->tag); 543 if (rule->match_tag) 544 tag_unref(&V_pf_tags, rule->match_tag); 545 #ifdef ALTQ 546 pf_qid_unref(rule->qid); 547 #endif 548 549 if (rule->bridge_to) 550 pfi_kkif_unref(rule->bridge_to); 551 if (rule->kif) 552 pfi_kkif_unref(rule->kif); 553 554 if (rule->ipsrc.addr.type == PF_ADDR_TABLE) 555 pfr_detach_table(rule->ipsrc.addr.p.tbl); 556 if (rule->ipdst.addr.type == PF_ADDR_TABLE) 557 pfr_detach_table(rule->ipdst.addr.p.tbl); 558 559 counter_u64_free(rule->evaluations); 560 for (int i = 0; i < 2; i++) { 561 counter_u64_free(rule->packets[i]); 562 counter_u64_free(rule->bytes[i]); 563 } 564 uma_zfree_pcpu(pf_timestamp_pcpu_zone, rule->timestamp); 565 pf_keth_anchor_remove(rule); 566 567 free(rule, M_PFRULE); 568 } 569 570 void 571 pf_free_rule(struct pf_krule *rule) 572 { 573 574 PF_RULES_WASSERT(); 575 PF_CONFIG_ASSERT(); 576 577 if (rule->tag) 578 tag_unref(&V_pf_tags, rule->tag); 579 if (rule->match_tag) 580 tag_unref(&V_pf_tags, rule->match_tag); 581 #ifdef ALTQ 582 if (rule->pqid != rule->qid) 583 pf_qid_unref(rule->pqid); 584 pf_qid_unref(rule->qid); 585 #endif 586 switch (rule->src.addr.type) { 587 case PF_ADDR_DYNIFTL: 588 pfi_dynaddr_remove(rule->src.addr.p.dyn); 589 break; 590 case PF_ADDR_TABLE: 591 pfr_detach_table(rule->src.addr.p.tbl); 592 break; 593 } 594 switch (rule->dst.addr.type) { 595 case PF_ADDR_DYNIFTL: 596 pfi_dynaddr_remove(rule->dst.addr.p.dyn); 597 break; 598 case PF_ADDR_TABLE: 599 pfr_detach_table(rule->dst.addr.p.tbl); 600 break; 601 } 602 if (rule->overload_tbl) 603 pfr_detach_table(rule->overload_tbl); 604 if (rule->kif) 605 pfi_kkif_unref(rule->kif); 606 if (rule->rcv_kif) 607 pfi_kkif_unref(rule->rcv_kif); 608 pf_kanchor_remove(rule); 609 pf_empty_kpool(&rule->rpool.list); 610 611 pf_krule_free(rule); 612 } 613 614 static void 615 pf_init_tagset(struct pf_tagset *ts, unsigned int *tunable_size, 616 unsigned int default_size) 617 { 618 unsigned int i; 619 unsigned int hashsize; 620 621 if (*tunable_size == 0 || !powerof2(*tunable_size)) 622 *tunable_size = default_size; 623 624 hashsize = *tunable_size; 625 ts->namehash = mallocarray(hashsize, sizeof(*ts->namehash), M_PFHASH, 626 M_WAITOK); 627 ts->taghash = mallocarray(hashsize, sizeof(*ts->taghash), M_PFHASH, 628 M_WAITOK); 629 ts->mask = hashsize - 1; 630 ts->seed = arc4random(); 631 for (i = 0; i < hashsize; i++) { 632 TAILQ_INIT(&ts->namehash[i]); 633 TAILQ_INIT(&ts->taghash[i]); 634 } 635 BIT_FILL(TAGID_MAX, &ts->avail); 636 } 637 638 static void 639 pf_cleanup_tagset(struct pf_tagset *ts) 640 { 641 unsigned int i; 642 unsigned int hashsize; 643 struct pf_tagname *t, *tmp; 644 645 /* 646 * Only need to clean up one of the hashes as each tag is hashed 647 * into each table. 648 */ 649 hashsize = ts->mask + 1; 650 for (i = 0; i < hashsize; i++) 651 TAILQ_FOREACH_SAFE(t, &ts->namehash[i], namehash_entries, tmp) 652 uma_zfree(V_pf_tag_z, t); 653 654 free(ts->namehash, M_PFHASH); 655 free(ts->taghash, M_PFHASH); 656 } 657 658 static uint16_t 659 tagname2hashindex(const struct pf_tagset *ts, const char *tagname) 660 { 661 size_t len; 662 663 len = strnlen(tagname, PF_TAG_NAME_SIZE - 1); 664 return (murmur3_32_hash(tagname, len, ts->seed) & ts->mask); 665 } 666 667 static uint16_t 668 tag2hashindex(const struct pf_tagset *ts, uint16_t tag) 669 { 670 671 return (tag & ts->mask); 672 } 673 674 static u_int16_t 675 tagname2tag(struct pf_tagset *ts, const char *tagname) 676 { 677 struct pf_tagname *tag; 678 u_int32_t index; 679 u_int16_t new_tagid; 680 681 PF_RULES_WASSERT(); 682 683 index = tagname2hashindex(ts, tagname); 684 TAILQ_FOREACH(tag, &ts->namehash[index], namehash_entries) 685 if (strcmp(tagname, tag->name) == 0) { 686 tag->ref++; 687 return (tag->tag); 688 } 689 690 /* 691 * new entry 692 * 693 * to avoid fragmentation, we do a linear search from the beginning 694 * and take the first free slot we find. 695 */ 696 new_tagid = BIT_FFS(TAGID_MAX, &ts->avail); 697 /* 698 * Tags are 1-based, with valid tags in the range [1..TAGID_MAX]. 699 * BIT_FFS() returns a 1-based bit number, with 0 indicating no bits 700 * set. It may also return a bit number greater than TAGID_MAX due 701 * to rounding of the number of bits in the vector up to a multiple 702 * of the vector word size at declaration/allocation time. 703 */ 704 if ((new_tagid == 0) || (new_tagid > TAGID_MAX)) 705 return (0); 706 707 /* Mark the tag as in use. Bits are 0-based for BIT_CLR() */ 708 BIT_CLR(TAGID_MAX, new_tagid - 1, &ts->avail); 709 710 /* allocate and fill new struct pf_tagname */ 711 tag = uma_zalloc(V_pf_tag_z, M_NOWAIT); 712 if (tag == NULL) 713 return (0); 714 strlcpy(tag->name, tagname, sizeof(tag->name)); 715 tag->tag = new_tagid; 716 tag->ref = 1; 717 718 /* Insert into namehash */ 719 TAILQ_INSERT_TAIL(&ts->namehash[index], tag, namehash_entries); 720 721 /* Insert into taghash */ 722 index = tag2hashindex(ts, new_tagid); 723 TAILQ_INSERT_TAIL(&ts->taghash[index], tag, taghash_entries); 724 725 return (tag->tag); 726 } 727 728 static void 729 tag_unref(struct pf_tagset *ts, u_int16_t tag) 730 { 731 struct pf_tagname *t; 732 uint16_t index; 733 734 PF_RULES_WASSERT(); 735 736 index = tag2hashindex(ts, tag); 737 TAILQ_FOREACH(t, &ts->taghash[index], taghash_entries) 738 if (tag == t->tag) { 739 if (--t->ref == 0) { 740 TAILQ_REMOVE(&ts->taghash[index], t, 741 taghash_entries); 742 index = tagname2hashindex(ts, t->name); 743 TAILQ_REMOVE(&ts->namehash[index], t, 744 namehash_entries); 745 /* Bits are 0-based for BIT_SET() */ 746 BIT_SET(TAGID_MAX, tag - 1, &ts->avail); 747 uma_zfree(V_pf_tag_z, t); 748 } 749 break; 750 } 751 } 752 753 static uint16_t 754 pf_tagname2tag(const char *tagname) 755 { 756 return (tagname2tag(&V_pf_tags, tagname)); 757 } 758 759 static int 760 pf_begin_eth(uint32_t *ticket, const char *anchor) 761 { 762 struct pf_keth_rule *rule, *tmp; 763 struct pf_keth_ruleset *rs; 764 765 PF_RULES_WASSERT(); 766 767 rs = pf_find_or_create_keth_ruleset(anchor); 768 if (rs == NULL) 769 return (EINVAL); 770 771 /* Purge old inactive rules. */ 772 TAILQ_FOREACH_SAFE(rule, rs->inactive.rules, entries, 773 tmp) { 774 TAILQ_REMOVE(rs->inactive.rules, rule, 775 entries); 776 pf_free_eth_rule(rule); 777 } 778 779 *ticket = ++rs->inactive.ticket; 780 rs->inactive.open = 1; 781 782 return (0); 783 } 784 785 static void 786 pf_rollback_eth_cb(struct epoch_context *ctx) 787 { 788 struct pf_keth_ruleset *rs; 789 790 rs = __containerof(ctx, struct pf_keth_ruleset, epoch_ctx); 791 792 CURVNET_SET(rs->vnet); 793 794 PF_RULES_WLOCK(); 795 pf_rollback_eth(rs->inactive.ticket, 796 rs->anchor ? rs->anchor->path : ""); 797 PF_RULES_WUNLOCK(); 798 799 CURVNET_RESTORE(); 800 } 801 802 static int 803 pf_rollback_eth(uint32_t ticket, const char *anchor) 804 { 805 struct pf_keth_rule *rule, *tmp; 806 struct pf_keth_ruleset *rs; 807 808 PF_RULES_WASSERT(); 809 810 rs = pf_find_keth_ruleset(anchor); 811 if (rs == NULL) 812 return (EINVAL); 813 814 if (!rs->inactive.open || 815 ticket != rs->inactive.ticket) 816 return (0); 817 818 /* Purge old inactive rules. */ 819 TAILQ_FOREACH_SAFE(rule, rs->inactive.rules, entries, 820 tmp) { 821 TAILQ_REMOVE(rs->inactive.rules, rule, entries); 822 pf_free_eth_rule(rule); 823 } 824 825 rs->inactive.open = 0; 826 827 pf_remove_if_empty_keth_ruleset(rs); 828 829 return (0); 830 } 831 832 #define PF_SET_SKIP_STEPS(i) \ 833 do { \ 834 while (head[i] != cur) { \ 835 head[i]->skip[i].ptr = cur; \ 836 head[i] = TAILQ_NEXT(head[i], entries); \ 837 } \ 838 } while (0) 839 840 static void 841 pf_eth_calc_skip_steps(struct pf_keth_ruleq *rules) 842 { 843 struct pf_keth_rule *cur, *prev, *head[PFE_SKIP_COUNT]; 844 int i; 845 846 cur = TAILQ_FIRST(rules); 847 prev = cur; 848 for (i = 0; i < PFE_SKIP_COUNT; ++i) 849 head[i] = cur; 850 while (cur != NULL) { 851 if (cur->kif != prev->kif || cur->ifnot != prev->ifnot) 852 PF_SET_SKIP_STEPS(PFE_SKIP_IFP); 853 if (cur->direction != prev->direction) 854 PF_SET_SKIP_STEPS(PFE_SKIP_DIR); 855 if (cur->proto != prev->proto) 856 PF_SET_SKIP_STEPS(PFE_SKIP_PROTO); 857 if (memcmp(&cur->src, &prev->src, sizeof(cur->src)) != 0) 858 PF_SET_SKIP_STEPS(PFE_SKIP_SRC_ADDR); 859 if (memcmp(&cur->dst, &prev->dst, sizeof(cur->dst)) != 0) 860 PF_SET_SKIP_STEPS(PFE_SKIP_DST_ADDR); 861 if (cur->ipsrc.neg != prev->ipsrc.neg || 862 pf_addr_wrap_neq(&cur->ipsrc.addr, &prev->ipsrc.addr)) 863 PF_SET_SKIP_STEPS(PFE_SKIP_SRC_IP_ADDR); 864 if (cur->ipdst.neg != prev->ipdst.neg || 865 pf_addr_wrap_neq(&cur->ipdst.addr, &prev->ipdst.addr)) 866 PF_SET_SKIP_STEPS(PFE_SKIP_DST_IP_ADDR); 867 868 prev = cur; 869 cur = TAILQ_NEXT(cur, entries); 870 } 871 for (i = 0; i < PFE_SKIP_COUNT; ++i) 872 PF_SET_SKIP_STEPS(i); 873 } 874 875 static int 876 pf_commit_eth(uint32_t ticket, const char *anchor) 877 { 878 struct pf_keth_ruleq *rules; 879 struct pf_keth_ruleset *rs; 880 881 rs = pf_find_keth_ruleset(anchor); 882 if (rs == NULL) { 883 return (EINVAL); 884 } 885 886 if (!rs->inactive.open || 887 ticket != rs->inactive.ticket) 888 return (EBUSY); 889 890 PF_RULES_WASSERT(); 891 892 pf_eth_calc_skip_steps(rs->inactive.rules); 893 894 rules = rs->active.rules; 895 ck_pr_store_ptr(&rs->active.rules, rs->inactive.rules); 896 rs->inactive.rules = rules; 897 rs->inactive.ticket = rs->active.ticket; 898 899 /* Clean up inactive rules (i.e. previously active rules), only when 900 * we're sure they're no longer used. */ 901 NET_EPOCH_CALL(pf_rollback_eth_cb, &rs->epoch_ctx); 902 903 return (0); 904 } 905 906 #ifdef ALTQ 907 static uint16_t 908 pf_qname2qid(const char *qname) 909 { 910 return (tagname2tag(&V_pf_qids, qname)); 911 } 912 913 static void 914 pf_qid_unref(uint16_t qid) 915 { 916 tag_unref(&V_pf_qids, qid); 917 } 918 919 static int 920 pf_begin_altq(u_int32_t *ticket) 921 { 922 struct pf_altq *altq, *tmp; 923 int error = 0; 924 925 PF_RULES_WASSERT(); 926 927 /* Purge the old altq lists */ 928 TAILQ_FOREACH_SAFE(altq, V_pf_altq_ifs_inactive, entries, tmp) { 929 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) { 930 /* detach and destroy the discipline */ 931 error = altq_remove(altq); 932 } 933 free(altq, M_PFALTQ); 934 } 935 TAILQ_INIT(V_pf_altq_ifs_inactive); 936 TAILQ_FOREACH_SAFE(altq, V_pf_altqs_inactive, entries, tmp) { 937 pf_qid_unref(altq->qid); 938 free(altq, M_PFALTQ); 939 } 940 TAILQ_INIT(V_pf_altqs_inactive); 941 if (error) 942 return (error); 943 *ticket = ++V_ticket_altqs_inactive; 944 V_altqs_inactive_open = 1; 945 return (0); 946 } 947 948 static int 949 pf_rollback_altq(u_int32_t ticket) 950 { 951 struct pf_altq *altq, *tmp; 952 int error = 0; 953 954 PF_RULES_WASSERT(); 955 956 if (!V_altqs_inactive_open || ticket != V_ticket_altqs_inactive) 957 return (0); 958 /* Purge the old altq lists */ 959 TAILQ_FOREACH_SAFE(altq, V_pf_altq_ifs_inactive, entries, tmp) { 960 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) { 961 /* detach and destroy the discipline */ 962 error = altq_remove(altq); 963 } 964 free(altq, M_PFALTQ); 965 } 966 TAILQ_INIT(V_pf_altq_ifs_inactive); 967 TAILQ_FOREACH_SAFE(altq, V_pf_altqs_inactive, entries, tmp) { 968 pf_qid_unref(altq->qid); 969 free(altq, M_PFALTQ); 970 } 971 TAILQ_INIT(V_pf_altqs_inactive); 972 V_altqs_inactive_open = 0; 973 return (error); 974 } 975 976 static int 977 pf_commit_altq(u_int32_t ticket) 978 { 979 struct pf_altqqueue *old_altqs, *old_altq_ifs; 980 struct pf_altq *altq, *tmp; 981 int err, error = 0; 982 983 PF_RULES_WASSERT(); 984 985 if (!V_altqs_inactive_open || ticket != V_ticket_altqs_inactive) 986 return (EBUSY); 987 988 /* swap altqs, keep the old. */ 989 old_altqs = V_pf_altqs_active; 990 old_altq_ifs = V_pf_altq_ifs_active; 991 V_pf_altqs_active = V_pf_altqs_inactive; 992 V_pf_altq_ifs_active = V_pf_altq_ifs_inactive; 993 V_pf_altqs_inactive = old_altqs; 994 V_pf_altq_ifs_inactive = old_altq_ifs; 995 V_ticket_altqs_active = V_ticket_altqs_inactive; 996 997 /* Attach new disciplines */ 998 TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) { 999 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) { 1000 /* attach the discipline */ 1001 error = altq_pfattach(altq); 1002 if (error == 0 && V_pf_altq_running) 1003 error = pf_enable_altq(altq); 1004 if (error != 0) 1005 return (error); 1006 } 1007 } 1008 1009 /* Purge the old altq lists */ 1010 TAILQ_FOREACH_SAFE(altq, V_pf_altq_ifs_inactive, entries, tmp) { 1011 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) { 1012 /* detach and destroy the discipline */ 1013 if (V_pf_altq_running) 1014 error = pf_disable_altq(altq); 1015 err = altq_pfdetach(altq); 1016 if (err != 0 && error == 0) 1017 error = err; 1018 err = altq_remove(altq); 1019 if (err != 0 && error == 0) 1020 error = err; 1021 } 1022 free(altq, M_PFALTQ); 1023 } 1024 TAILQ_INIT(V_pf_altq_ifs_inactive); 1025 TAILQ_FOREACH_SAFE(altq, V_pf_altqs_inactive, entries, tmp) { 1026 pf_qid_unref(altq->qid); 1027 free(altq, M_PFALTQ); 1028 } 1029 TAILQ_INIT(V_pf_altqs_inactive); 1030 1031 V_altqs_inactive_open = 0; 1032 return (error); 1033 } 1034 1035 static int 1036 pf_enable_altq(struct pf_altq *altq) 1037 { 1038 struct ifnet *ifp; 1039 struct tb_profile tb; 1040 int error = 0; 1041 1042 if ((ifp = ifunit(altq->ifname)) == NULL) 1043 return (EINVAL); 1044 1045 if (ifp->if_snd.altq_type != ALTQT_NONE) 1046 error = altq_enable(&ifp->if_snd); 1047 1048 /* set tokenbucket regulator */ 1049 if (error == 0 && ifp != NULL && ALTQ_IS_ENABLED(&ifp->if_snd)) { 1050 tb.rate = altq->ifbandwidth; 1051 tb.depth = altq->tbrsize; 1052 error = tbr_set(&ifp->if_snd, &tb); 1053 } 1054 1055 return (error); 1056 } 1057 1058 static int 1059 pf_disable_altq(struct pf_altq *altq) 1060 { 1061 struct ifnet *ifp; 1062 struct tb_profile tb; 1063 int error; 1064 1065 if ((ifp = ifunit(altq->ifname)) == NULL) 1066 return (EINVAL); 1067 1068 /* 1069 * when the discipline is no longer referenced, it was overridden 1070 * by a new one. if so, just return. 1071 */ 1072 if (altq->altq_disc != ifp->if_snd.altq_disc) 1073 return (0); 1074 1075 error = altq_disable(&ifp->if_snd); 1076 1077 if (error == 0) { 1078 /* clear tokenbucket regulator */ 1079 tb.rate = 0; 1080 error = tbr_set(&ifp->if_snd, &tb); 1081 } 1082 1083 return (error); 1084 } 1085 1086 static int 1087 pf_altq_ifnet_event_add(struct ifnet *ifp, int remove, u_int32_t ticket, 1088 struct pf_altq *altq) 1089 { 1090 struct ifnet *ifp1; 1091 int error = 0; 1092 1093 /* Deactivate the interface in question */ 1094 altq->local_flags &= ~PFALTQ_FLAG_IF_REMOVED; 1095 if ((ifp1 = ifunit(altq->ifname)) == NULL || 1096 (remove && ifp1 == ifp)) { 1097 altq->local_flags |= PFALTQ_FLAG_IF_REMOVED; 1098 } else { 1099 error = altq_add(ifp1, altq); 1100 1101 if (ticket != V_ticket_altqs_inactive) 1102 error = EBUSY; 1103 1104 if (error) 1105 free(altq, M_PFALTQ); 1106 } 1107 1108 return (error); 1109 } 1110 1111 void 1112 pf_altq_ifnet_event(struct ifnet *ifp, int remove) 1113 { 1114 struct pf_altq *a1, *a2, *a3; 1115 u_int32_t ticket; 1116 int error = 0; 1117 1118 /* 1119 * No need to re-evaluate the configuration for events on interfaces 1120 * that do not support ALTQ, as it's not possible for such 1121 * interfaces to be part of the configuration. 1122 */ 1123 if (!ALTQ_IS_READY(&ifp->if_snd)) 1124 return; 1125 1126 /* Interrupt userland queue modifications */ 1127 if (V_altqs_inactive_open) 1128 pf_rollback_altq(V_ticket_altqs_inactive); 1129 1130 /* Start new altq ruleset */ 1131 if (pf_begin_altq(&ticket)) 1132 return; 1133 1134 /* Copy the current active set */ 1135 TAILQ_FOREACH(a1, V_pf_altq_ifs_active, entries) { 1136 a2 = malloc(sizeof(*a2), M_PFALTQ, M_NOWAIT); 1137 if (a2 == NULL) { 1138 error = ENOMEM; 1139 break; 1140 } 1141 bcopy(a1, a2, sizeof(struct pf_altq)); 1142 1143 error = pf_altq_ifnet_event_add(ifp, remove, ticket, a2); 1144 if (error) 1145 break; 1146 1147 TAILQ_INSERT_TAIL(V_pf_altq_ifs_inactive, a2, entries); 1148 } 1149 if (error) 1150 goto out; 1151 TAILQ_FOREACH(a1, V_pf_altqs_active, entries) { 1152 a2 = malloc(sizeof(*a2), M_PFALTQ, M_NOWAIT); 1153 if (a2 == NULL) { 1154 error = ENOMEM; 1155 break; 1156 } 1157 bcopy(a1, a2, sizeof(struct pf_altq)); 1158 1159 if ((a2->qid = pf_qname2qid(a2->qname)) == 0) { 1160 error = EBUSY; 1161 free(a2, M_PFALTQ); 1162 break; 1163 } 1164 a2->altq_disc = NULL; 1165 TAILQ_FOREACH(a3, V_pf_altq_ifs_inactive, entries) { 1166 if (strncmp(a3->ifname, a2->ifname, 1167 IFNAMSIZ) == 0) { 1168 a2->altq_disc = a3->altq_disc; 1169 break; 1170 } 1171 } 1172 error = pf_altq_ifnet_event_add(ifp, remove, ticket, a2); 1173 if (error) 1174 break; 1175 1176 TAILQ_INSERT_TAIL(V_pf_altqs_inactive, a2, entries); 1177 } 1178 1179 out: 1180 if (error != 0) 1181 pf_rollback_altq(ticket); 1182 else 1183 pf_commit_altq(ticket); 1184 } 1185 #endif /* ALTQ */ 1186 1187 static struct pf_krule_global * 1188 pf_rule_tree_alloc(int flags) 1189 { 1190 struct pf_krule_global *tree; 1191 1192 tree = malloc(sizeof(struct pf_krule_global), M_TEMP, flags); 1193 if (tree == NULL) 1194 return (NULL); 1195 RB_INIT(tree); 1196 return (tree); 1197 } 1198 1199 static void 1200 pf_rule_tree_free(struct pf_krule_global *tree) 1201 { 1202 1203 free(tree, M_TEMP); 1204 } 1205 1206 static int 1207 pf_begin_rules(u_int32_t *ticket, int rs_num, const char *anchor) 1208 { 1209 struct pf_krule_global *tree; 1210 struct pf_kruleset *rs; 1211 struct pf_krule *rule; 1212 1213 PF_RULES_WASSERT(); 1214 1215 if (rs_num < 0 || rs_num >= PF_RULESET_MAX) 1216 return (EINVAL); 1217 tree = pf_rule_tree_alloc(M_NOWAIT); 1218 if (tree == NULL) 1219 return (ENOMEM); 1220 rs = pf_find_or_create_kruleset(anchor); 1221 if (rs == NULL) { 1222 free(tree, M_TEMP); 1223 return (EINVAL); 1224 } 1225 pf_rule_tree_free(rs->rules[rs_num].inactive.tree); 1226 rs->rules[rs_num].inactive.tree = tree; 1227 1228 while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) { 1229 pf_unlink_rule(rs->rules[rs_num].inactive.ptr, rule); 1230 rs->rules[rs_num].inactive.rcount--; 1231 } 1232 *ticket = ++rs->rules[rs_num].inactive.ticket; 1233 rs->rules[rs_num].inactive.open = 1; 1234 return (0); 1235 } 1236 1237 static int 1238 pf_rollback_rules(u_int32_t ticket, int rs_num, char *anchor) 1239 { 1240 struct pf_kruleset *rs; 1241 struct pf_krule *rule; 1242 1243 PF_RULES_WASSERT(); 1244 1245 if (rs_num < 0 || rs_num >= PF_RULESET_MAX) 1246 return (EINVAL); 1247 rs = pf_find_kruleset(anchor); 1248 if (rs == NULL || !rs->rules[rs_num].inactive.open || 1249 rs->rules[rs_num].inactive.ticket != ticket) 1250 return (0); 1251 while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) { 1252 pf_unlink_rule(rs->rules[rs_num].inactive.ptr, rule); 1253 rs->rules[rs_num].inactive.rcount--; 1254 } 1255 rs->rules[rs_num].inactive.open = 0; 1256 return (0); 1257 } 1258 1259 #define PF_MD5_UPD(st, elm) \ 1260 MD5Update(ctx, (u_int8_t *) &(st)->elm, sizeof((st)->elm)) 1261 1262 #define PF_MD5_UPD_STR(st, elm) \ 1263 MD5Update(ctx, (u_int8_t *) (st)->elm, strlen((st)->elm)) 1264 1265 #define PF_MD5_UPD_HTONL(st, elm, stor) do { \ 1266 (stor) = htonl((st)->elm); \ 1267 MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int32_t));\ 1268 } while (0) 1269 1270 #define PF_MD5_UPD_HTONS(st, elm, stor) do { \ 1271 (stor) = htons((st)->elm); \ 1272 MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int16_t));\ 1273 } while (0) 1274 1275 static void 1276 pf_hash_rule_addr(MD5_CTX *ctx, struct pf_rule_addr *pfr) 1277 { 1278 PF_MD5_UPD(pfr, addr.type); 1279 switch (pfr->addr.type) { 1280 case PF_ADDR_DYNIFTL: 1281 PF_MD5_UPD(pfr, addr.v.ifname); 1282 PF_MD5_UPD(pfr, addr.iflags); 1283 break; 1284 case PF_ADDR_TABLE: 1285 PF_MD5_UPD(pfr, addr.v.tblname); 1286 break; 1287 case PF_ADDR_ADDRMASK: 1288 /* XXX ignore af? */ 1289 PF_MD5_UPD(pfr, addr.v.a.addr.addr32); 1290 PF_MD5_UPD(pfr, addr.v.a.mask.addr32); 1291 break; 1292 } 1293 1294 PF_MD5_UPD(pfr, port[0]); 1295 PF_MD5_UPD(pfr, port[1]); 1296 PF_MD5_UPD(pfr, neg); 1297 PF_MD5_UPD(pfr, port_op); 1298 } 1299 1300 static void 1301 pf_hash_rule_rolling(MD5_CTX *ctx, struct pf_krule *rule) 1302 { 1303 u_int16_t x; 1304 u_int32_t y; 1305 1306 pf_hash_rule_addr(ctx, &rule->src); 1307 pf_hash_rule_addr(ctx, &rule->dst); 1308 for (int i = 0; i < PF_RULE_MAX_LABEL_COUNT; i++) 1309 PF_MD5_UPD_STR(rule, label[i]); 1310 PF_MD5_UPD_STR(rule, ifname); 1311 PF_MD5_UPD_STR(rule, rcv_ifname); 1312 PF_MD5_UPD_STR(rule, match_tagname); 1313 PF_MD5_UPD_HTONS(rule, match_tag, x); /* dup? */ 1314 PF_MD5_UPD_HTONL(rule, os_fingerprint, y); 1315 PF_MD5_UPD_HTONL(rule, prob, y); 1316 PF_MD5_UPD_HTONL(rule, uid.uid[0], y); 1317 PF_MD5_UPD_HTONL(rule, uid.uid[1], y); 1318 PF_MD5_UPD(rule, uid.op); 1319 PF_MD5_UPD_HTONL(rule, gid.gid[0], y); 1320 PF_MD5_UPD_HTONL(rule, gid.gid[1], y); 1321 PF_MD5_UPD(rule, gid.op); 1322 PF_MD5_UPD_HTONL(rule, rule_flag, y); 1323 PF_MD5_UPD(rule, action); 1324 PF_MD5_UPD(rule, direction); 1325 PF_MD5_UPD(rule, af); 1326 PF_MD5_UPD(rule, quick); 1327 PF_MD5_UPD(rule, ifnot); 1328 PF_MD5_UPD(rule, match_tag_not); 1329 PF_MD5_UPD(rule, natpass); 1330 PF_MD5_UPD(rule, keep_state); 1331 PF_MD5_UPD(rule, proto); 1332 PF_MD5_UPD(rule, type); 1333 PF_MD5_UPD(rule, code); 1334 PF_MD5_UPD(rule, flags); 1335 PF_MD5_UPD(rule, flagset); 1336 PF_MD5_UPD(rule, allow_opts); 1337 PF_MD5_UPD(rule, rt); 1338 PF_MD5_UPD(rule, tos); 1339 PF_MD5_UPD(rule, scrub_flags); 1340 PF_MD5_UPD(rule, min_ttl); 1341 PF_MD5_UPD(rule, set_tos); 1342 if (rule->anchor != NULL) 1343 PF_MD5_UPD_STR(rule, anchor->path); 1344 } 1345 1346 static void 1347 pf_hash_rule(struct pf_krule *rule) 1348 { 1349 MD5_CTX ctx; 1350 1351 MD5Init(&ctx); 1352 pf_hash_rule_rolling(&ctx, rule); 1353 MD5Final(rule->md5sum, &ctx); 1354 } 1355 1356 static int 1357 pf_krule_compare(struct pf_krule *a, struct pf_krule *b) 1358 { 1359 1360 return (memcmp(a->md5sum, b->md5sum, PF_MD5_DIGEST_LENGTH)); 1361 } 1362 1363 static int 1364 pf_commit_rules(u_int32_t ticket, int rs_num, char *anchor) 1365 { 1366 struct pf_kruleset *rs; 1367 struct pf_krule *rule, **old_array, *old_rule; 1368 struct pf_krulequeue *old_rules; 1369 struct pf_krule_global *old_tree; 1370 int error; 1371 u_int32_t old_rcount; 1372 1373 PF_RULES_WASSERT(); 1374 1375 if (rs_num < 0 || rs_num >= PF_RULESET_MAX) 1376 return (EINVAL); 1377 rs = pf_find_kruleset(anchor); 1378 if (rs == NULL || !rs->rules[rs_num].inactive.open || 1379 ticket != rs->rules[rs_num].inactive.ticket) 1380 return (EBUSY); 1381 1382 /* Calculate checksum for the main ruleset */ 1383 if (rs == &pf_main_ruleset) { 1384 error = pf_setup_pfsync_matching(rs); 1385 if (error != 0) 1386 return (error); 1387 } 1388 1389 /* Swap rules, keep the old. */ 1390 old_rules = rs->rules[rs_num].active.ptr; 1391 old_rcount = rs->rules[rs_num].active.rcount; 1392 old_array = rs->rules[rs_num].active.ptr_array; 1393 old_tree = rs->rules[rs_num].active.tree; 1394 1395 rs->rules[rs_num].active.ptr = 1396 rs->rules[rs_num].inactive.ptr; 1397 rs->rules[rs_num].active.ptr_array = 1398 rs->rules[rs_num].inactive.ptr_array; 1399 rs->rules[rs_num].active.tree = 1400 rs->rules[rs_num].inactive.tree; 1401 rs->rules[rs_num].active.rcount = 1402 rs->rules[rs_num].inactive.rcount; 1403 1404 /* Attempt to preserve counter information. */ 1405 if (V_pf_status.keep_counters && old_tree != NULL) { 1406 TAILQ_FOREACH(rule, rs->rules[rs_num].active.ptr, 1407 entries) { 1408 old_rule = RB_FIND(pf_krule_global, old_tree, rule); 1409 if (old_rule == NULL) { 1410 continue; 1411 } 1412 pf_counter_u64_critical_enter(); 1413 pf_counter_u64_rollup_protected(&rule->evaluations, 1414 pf_counter_u64_fetch(&old_rule->evaluations)); 1415 pf_counter_u64_rollup_protected(&rule->packets[0], 1416 pf_counter_u64_fetch(&old_rule->packets[0])); 1417 pf_counter_u64_rollup_protected(&rule->packets[1], 1418 pf_counter_u64_fetch(&old_rule->packets[1])); 1419 pf_counter_u64_rollup_protected(&rule->bytes[0], 1420 pf_counter_u64_fetch(&old_rule->bytes[0])); 1421 pf_counter_u64_rollup_protected(&rule->bytes[1], 1422 pf_counter_u64_fetch(&old_rule->bytes[1])); 1423 pf_counter_u64_critical_exit(); 1424 } 1425 } 1426 1427 rs->rules[rs_num].inactive.ptr = old_rules; 1428 rs->rules[rs_num].inactive.ptr_array = old_array; 1429 rs->rules[rs_num].inactive.tree = NULL; /* important for pf_ioctl_addrule */ 1430 rs->rules[rs_num].inactive.rcount = old_rcount; 1431 1432 rs->rules[rs_num].active.ticket = 1433 rs->rules[rs_num].inactive.ticket; 1434 pf_calc_skip_steps(rs->rules[rs_num].active.ptr); 1435 1436 /* Purge the old rule list. */ 1437 PF_UNLNKDRULES_LOCK(); 1438 while ((rule = TAILQ_FIRST(old_rules)) != NULL) 1439 pf_unlink_rule_locked(old_rules, rule); 1440 PF_UNLNKDRULES_UNLOCK(); 1441 if (rs->rules[rs_num].inactive.ptr_array) 1442 free(rs->rules[rs_num].inactive.ptr_array, M_TEMP); 1443 rs->rules[rs_num].inactive.ptr_array = NULL; 1444 rs->rules[rs_num].inactive.rcount = 0; 1445 rs->rules[rs_num].inactive.open = 0; 1446 pf_remove_if_empty_kruleset(rs); 1447 free(old_tree, M_TEMP); 1448 1449 return (0); 1450 } 1451 1452 static int 1453 pf_setup_pfsync_matching(struct pf_kruleset *rs) 1454 { 1455 MD5_CTX ctx; 1456 struct pf_krule *rule; 1457 int rs_cnt; 1458 u_int8_t digest[PF_MD5_DIGEST_LENGTH]; 1459 1460 MD5Init(&ctx); 1461 for (rs_cnt = 0; rs_cnt < PF_RULESET_MAX; rs_cnt++) { 1462 /* XXX PF_RULESET_SCRUB as well? */ 1463 if (rs_cnt == PF_RULESET_SCRUB) 1464 continue; 1465 1466 if (rs->rules[rs_cnt].inactive.ptr_array) 1467 free(rs->rules[rs_cnt].inactive.ptr_array, M_TEMP); 1468 rs->rules[rs_cnt].inactive.ptr_array = NULL; 1469 1470 if (rs->rules[rs_cnt].inactive.rcount) { 1471 rs->rules[rs_cnt].inactive.ptr_array = 1472 mallocarray(rs->rules[rs_cnt].inactive.rcount, 1473 sizeof(struct pf_rule **), 1474 M_TEMP, M_NOWAIT); 1475 1476 if (!rs->rules[rs_cnt].inactive.ptr_array) 1477 return (ENOMEM); 1478 } 1479 1480 TAILQ_FOREACH(rule, rs->rules[rs_cnt].inactive.ptr, 1481 entries) { 1482 pf_hash_rule_rolling(&ctx, rule); 1483 (rs->rules[rs_cnt].inactive.ptr_array)[rule->nr] = rule; 1484 } 1485 } 1486 1487 MD5Final(digest, &ctx); 1488 memcpy(V_pf_status.pf_chksum, digest, sizeof(V_pf_status.pf_chksum)); 1489 return (0); 1490 } 1491 1492 static int 1493 pf_eth_addr_setup(struct pf_keth_ruleset *ruleset, struct pf_addr_wrap *addr) 1494 { 1495 int error = 0; 1496 1497 switch (addr->type) { 1498 case PF_ADDR_TABLE: 1499 addr->p.tbl = pfr_eth_attach_table(ruleset, addr->v.tblname); 1500 if (addr->p.tbl == NULL) 1501 error = ENOMEM; 1502 break; 1503 default: 1504 error = EINVAL; 1505 } 1506 1507 return (error); 1508 } 1509 1510 static int 1511 pf_addr_setup(struct pf_kruleset *ruleset, struct pf_addr_wrap *addr, 1512 sa_family_t af) 1513 { 1514 int error = 0; 1515 1516 switch (addr->type) { 1517 case PF_ADDR_TABLE: 1518 addr->p.tbl = pfr_attach_table(ruleset, addr->v.tblname); 1519 if (addr->p.tbl == NULL) 1520 error = ENOMEM; 1521 break; 1522 case PF_ADDR_DYNIFTL: 1523 error = pfi_dynaddr_setup(addr, af); 1524 break; 1525 } 1526 1527 return (error); 1528 } 1529 1530 void 1531 pf_addr_copyout(struct pf_addr_wrap *addr) 1532 { 1533 1534 switch (addr->type) { 1535 case PF_ADDR_DYNIFTL: 1536 pfi_dynaddr_copyout(addr); 1537 break; 1538 case PF_ADDR_TABLE: 1539 pf_tbladdr_copyout(addr); 1540 break; 1541 } 1542 } 1543 1544 static void 1545 pf_src_node_copy(const struct pf_ksrc_node *in, struct pf_src_node *out) 1546 { 1547 int secs = time_uptime, diff; 1548 1549 bzero(out, sizeof(struct pf_src_node)); 1550 1551 bcopy(&in->addr, &out->addr, sizeof(struct pf_addr)); 1552 bcopy(&in->raddr, &out->raddr, sizeof(struct pf_addr)); 1553 1554 if (in->rule.ptr != NULL) 1555 out->rule.nr = in->rule.ptr->nr; 1556 1557 for (int i = 0; i < 2; i++) { 1558 out->bytes[i] = counter_u64_fetch(in->bytes[i]); 1559 out->packets[i] = counter_u64_fetch(in->packets[i]); 1560 } 1561 1562 out->states = in->states; 1563 out->conn = in->conn; 1564 out->af = in->af; 1565 out->ruletype = in->ruletype; 1566 1567 out->creation = secs - in->creation; 1568 if (out->expire > secs) 1569 out->expire -= secs; 1570 else 1571 out->expire = 0; 1572 1573 /* Adjust the connection rate estimate. */ 1574 diff = secs - in->conn_rate.last; 1575 if (diff >= in->conn_rate.seconds) 1576 out->conn_rate.count = 0; 1577 else 1578 out->conn_rate.count -= 1579 in->conn_rate.count * diff / 1580 in->conn_rate.seconds; 1581 } 1582 1583 #ifdef ALTQ 1584 /* 1585 * Handle export of struct pf_kaltq to user binaries that may be using any 1586 * version of struct pf_altq. 1587 */ 1588 static int 1589 pf_export_kaltq(struct pf_altq *q, struct pfioc_altq_v1 *pa, size_t ioc_size) 1590 { 1591 u_int32_t version; 1592 1593 if (ioc_size == sizeof(struct pfioc_altq_v0)) 1594 version = 0; 1595 else 1596 version = pa->version; 1597 1598 if (version > PFIOC_ALTQ_VERSION) 1599 return (EINVAL); 1600 1601 #define ASSIGN(x) exported_q->x = q->x 1602 #define COPY(x) \ 1603 bcopy(&q->x, &exported_q->x, min(sizeof(q->x), sizeof(exported_q->x))) 1604 #define SATU16(x) (u_int32_t)uqmin((x), USHRT_MAX) 1605 #define SATU32(x) (u_int32_t)uqmin((x), UINT_MAX) 1606 1607 switch (version) { 1608 case 0: { 1609 struct pf_altq_v0 *exported_q = 1610 &((struct pfioc_altq_v0 *)pa)->altq; 1611 1612 COPY(ifname); 1613 1614 ASSIGN(scheduler); 1615 ASSIGN(tbrsize); 1616 exported_q->tbrsize = SATU16(q->tbrsize); 1617 exported_q->ifbandwidth = SATU32(q->ifbandwidth); 1618 1619 COPY(qname); 1620 COPY(parent); 1621 ASSIGN(parent_qid); 1622 exported_q->bandwidth = SATU32(q->bandwidth); 1623 ASSIGN(priority); 1624 ASSIGN(local_flags); 1625 1626 ASSIGN(qlimit); 1627 ASSIGN(flags); 1628 1629 if (q->scheduler == ALTQT_HFSC) { 1630 #define ASSIGN_OPT(x) exported_q->pq_u.hfsc_opts.x = q->pq_u.hfsc_opts.x 1631 #define ASSIGN_OPT_SATU32(x) exported_q->pq_u.hfsc_opts.x = \ 1632 SATU32(q->pq_u.hfsc_opts.x) 1633 1634 ASSIGN_OPT_SATU32(rtsc_m1); 1635 ASSIGN_OPT(rtsc_d); 1636 ASSIGN_OPT_SATU32(rtsc_m2); 1637 1638 ASSIGN_OPT_SATU32(lssc_m1); 1639 ASSIGN_OPT(lssc_d); 1640 ASSIGN_OPT_SATU32(lssc_m2); 1641 1642 ASSIGN_OPT_SATU32(ulsc_m1); 1643 ASSIGN_OPT(ulsc_d); 1644 ASSIGN_OPT_SATU32(ulsc_m2); 1645 1646 ASSIGN_OPT(flags); 1647 1648 #undef ASSIGN_OPT 1649 #undef ASSIGN_OPT_SATU32 1650 } else 1651 COPY(pq_u); 1652 1653 ASSIGN(qid); 1654 break; 1655 } 1656 case 1: { 1657 struct pf_altq_v1 *exported_q = 1658 &((struct pfioc_altq_v1 *)pa)->altq; 1659 1660 COPY(ifname); 1661 1662 ASSIGN(scheduler); 1663 ASSIGN(tbrsize); 1664 ASSIGN(ifbandwidth); 1665 1666 COPY(qname); 1667 COPY(parent); 1668 ASSIGN(parent_qid); 1669 ASSIGN(bandwidth); 1670 ASSIGN(priority); 1671 ASSIGN(local_flags); 1672 1673 ASSIGN(qlimit); 1674 ASSIGN(flags); 1675 COPY(pq_u); 1676 1677 ASSIGN(qid); 1678 break; 1679 } 1680 default: 1681 panic("%s: unhandled struct pfioc_altq version", __func__); 1682 break; 1683 } 1684 1685 #undef ASSIGN 1686 #undef COPY 1687 #undef SATU16 1688 #undef SATU32 1689 1690 return (0); 1691 } 1692 1693 /* 1694 * Handle import to struct pf_kaltq of struct pf_altq from user binaries 1695 * that may be using any version of it. 1696 */ 1697 static int 1698 pf_import_kaltq(struct pfioc_altq_v1 *pa, struct pf_altq *q, size_t ioc_size) 1699 { 1700 u_int32_t version; 1701 1702 if (ioc_size == sizeof(struct pfioc_altq_v0)) 1703 version = 0; 1704 else 1705 version = pa->version; 1706 1707 if (version > PFIOC_ALTQ_VERSION) 1708 return (EINVAL); 1709 1710 #define ASSIGN(x) q->x = imported_q->x 1711 #define COPY(x) \ 1712 bcopy(&imported_q->x, &q->x, min(sizeof(imported_q->x), sizeof(q->x))) 1713 1714 switch (version) { 1715 case 0: { 1716 struct pf_altq_v0 *imported_q = 1717 &((struct pfioc_altq_v0 *)pa)->altq; 1718 1719 COPY(ifname); 1720 1721 ASSIGN(scheduler); 1722 ASSIGN(tbrsize); /* 16-bit -> 32-bit */ 1723 ASSIGN(ifbandwidth); /* 32-bit -> 64-bit */ 1724 1725 COPY(qname); 1726 COPY(parent); 1727 ASSIGN(parent_qid); 1728 ASSIGN(bandwidth); /* 32-bit -> 64-bit */ 1729 ASSIGN(priority); 1730 ASSIGN(local_flags); 1731 1732 ASSIGN(qlimit); 1733 ASSIGN(flags); 1734 1735 if (imported_q->scheduler == ALTQT_HFSC) { 1736 #define ASSIGN_OPT(x) q->pq_u.hfsc_opts.x = imported_q->pq_u.hfsc_opts.x 1737 1738 /* 1739 * The m1 and m2 parameters are being copied from 1740 * 32-bit to 64-bit. 1741 */ 1742 ASSIGN_OPT(rtsc_m1); 1743 ASSIGN_OPT(rtsc_d); 1744 ASSIGN_OPT(rtsc_m2); 1745 1746 ASSIGN_OPT(lssc_m1); 1747 ASSIGN_OPT(lssc_d); 1748 ASSIGN_OPT(lssc_m2); 1749 1750 ASSIGN_OPT(ulsc_m1); 1751 ASSIGN_OPT(ulsc_d); 1752 ASSIGN_OPT(ulsc_m2); 1753 1754 ASSIGN_OPT(flags); 1755 1756 #undef ASSIGN_OPT 1757 } else 1758 COPY(pq_u); 1759 1760 ASSIGN(qid); 1761 break; 1762 } 1763 case 1: { 1764 struct pf_altq_v1 *imported_q = 1765 &((struct pfioc_altq_v1 *)pa)->altq; 1766 1767 COPY(ifname); 1768 1769 ASSIGN(scheduler); 1770 ASSIGN(tbrsize); 1771 ASSIGN(ifbandwidth); 1772 1773 COPY(qname); 1774 COPY(parent); 1775 ASSIGN(parent_qid); 1776 ASSIGN(bandwidth); 1777 ASSIGN(priority); 1778 ASSIGN(local_flags); 1779 1780 ASSIGN(qlimit); 1781 ASSIGN(flags); 1782 COPY(pq_u); 1783 1784 ASSIGN(qid); 1785 break; 1786 } 1787 default: 1788 panic("%s: unhandled struct pfioc_altq version", __func__); 1789 break; 1790 } 1791 1792 #undef ASSIGN 1793 #undef COPY 1794 1795 return (0); 1796 } 1797 1798 static struct pf_altq * 1799 pf_altq_get_nth_active(u_int32_t n) 1800 { 1801 struct pf_altq *altq; 1802 u_int32_t nr; 1803 1804 nr = 0; 1805 TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) { 1806 if (nr == n) 1807 return (altq); 1808 nr++; 1809 } 1810 1811 TAILQ_FOREACH(altq, V_pf_altqs_active, entries) { 1812 if (nr == n) 1813 return (altq); 1814 nr++; 1815 } 1816 1817 return (NULL); 1818 } 1819 #endif /* ALTQ */ 1820 1821 struct pf_krule * 1822 pf_krule_alloc(void) 1823 { 1824 struct pf_krule *rule; 1825 1826 rule = malloc(sizeof(struct pf_krule), M_PFRULE, M_WAITOK | M_ZERO); 1827 mtx_init(&rule->rpool.mtx, "pf_krule_pool", NULL, MTX_DEF); 1828 rule->timestamp = uma_zalloc_pcpu(pf_timestamp_pcpu_zone, 1829 M_WAITOK | M_ZERO); 1830 return (rule); 1831 } 1832 1833 void 1834 pf_krule_free(struct pf_krule *rule) 1835 { 1836 #ifdef PF_WANT_32_TO_64_COUNTER 1837 bool wowned; 1838 #endif 1839 1840 if (rule == NULL) 1841 return; 1842 1843 #ifdef PF_WANT_32_TO_64_COUNTER 1844 if (rule->allrulelinked) { 1845 wowned = PF_RULES_WOWNED(); 1846 if (!wowned) 1847 PF_RULES_WLOCK(); 1848 LIST_REMOVE(rule, allrulelist); 1849 V_pf_allrulecount--; 1850 if (!wowned) 1851 PF_RULES_WUNLOCK(); 1852 } 1853 #endif 1854 1855 pf_counter_u64_deinit(&rule->evaluations); 1856 for (int i = 0; i < 2; i++) { 1857 pf_counter_u64_deinit(&rule->packets[i]); 1858 pf_counter_u64_deinit(&rule->bytes[i]); 1859 } 1860 counter_u64_free(rule->states_cur); 1861 counter_u64_free(rule->states_tot); 1862 counter_u64_free(rule->src_nodes); 1863 uma_zfree_pcpu(pf_timestamp_pcpu_zone, rule->timestamp); 1864 1865 mtx_destroy(&rule->rpool.mtx); 1866 free(rule, M_PFRULE); 1867 } 1868 1869 void 1870 pf_krule_clear_counters(struct pf_krule *rule) 1871 { 1872 pf_counter_u64_zero(&rule->evaluations); 1873 for (int i = 0; i < 2; i++) { 1874 pf_counter_u64_zero(&rule->packets[i]); 1875 pf_counter_u64_zero(&rule->bytes[i]); 1876 } 1877 counter_u64_zero(rule->states_tot); 1878 } 1879 1880 static void 1881 pf_kpooladdr_to_pooladdr(const struct pf_kpooladdr *kpool, 1882 struct pf_pooladdr *pool) 1883 { 1884 1885 bzero(pool, sizeof(*pool)); 1886 bcopy(&kpool->addr, &pool->addr, sizeof(pool->addr)); 1887 strlcpy(pool->ifname, kpool->ifname, sizeof(pool->ifname)); 1888 } 1889 1890 static int 1891 pf_pooladdr_to_kpooladdr(const struct pf_pooladdr *pool, 1892 struct pf_kpooladdr *kpool) 1893 { 1894 int ret; 1895 1896 bzero(kpool, sizeof(*kpool)); 1897 bcopy(&pool->addr, &kpool->addr, sizeof(kpool->addr)); 1898 ret = pf_user_strcpy(kpool->ifname, pool->ifname, 1899 sizeof(kpool->ifname)); 1900 return (ret); 1901 } 1902 1903 static void 1904 pf_pool_to_kpool(const struct pf_pool *pool, struct pf_kpool *kpool) 1905 { 1906 _Static_assert(sizeof(pool->key) == sizeof(kpool->key), ""); 1907 _Static_assert(sizeof(pool->counter) == sizeof(kpool->counter), ""); 1908 1909 bcopy(&pool->key, &kpool->key, sizeof(kpool->key)); 1910 bcopy(&pool->counter, &kpool->counter, sizeof(kpool->counter)); 1911 1912 kpool->tblidx = pool->tblidx; 1913 kpool->proxy_port[0] = pool->proxy_port[0]; 1914 kpool->proxy_port[1] = pool->proxy_port[1]; 1915 kpool->opts = pool->opts; 1916 } 1917 1918 static int 1919 pf_rule_to_krule(const struct pf_rule *rule, struct pf_krule *krule) 1920 { 1921 int ret; 1922 1923 #ifndef INET 1924 if (rule->af == AF_INET) { 1925 return (EAFNOSUPPORT); 1926 } 1927 #endif /* INET */ 1928 #ifndef INET6 1929 if (rule->af == AF_INET6) { 1930 return (EAFNOSUPPORT); 1931 } 1932 #endif /* INET6 */ 1933 1934 ret = pf_check_rule_addr(&rule->src); 1935 if (ret != 0) 1936 return (ret); 1937 ret = pf_check_rule_addr(&rule->dst); 1938 if (ret != 0) 1939 return (ret); 1940 1941 bcopy(&rule->src, &krule->src, sizeof(rule->src)); 1942 bcopy(&rule->dst, &krule->dst, sizeof(rule->dst)); 1943 1944 ret = pf_user_strcpy(krule->label[0], rule->label, sizeof(rule->label)); 1945 if (ret != 0) 1946 return (ret); 1947 ret = pf_user_strcpy(krule->ifname, rule->ifname, sizeof(rule->ifname)); 1948 if (ret != 0) 1949 return (ret); 1950 ret = pf_user_strcpy(krule->qname, rule->qname, sizeof(rule->qname)); 1951 if (ret != 0) 1952 return (ret); 1953 ret = pf_user_strcpy(krule->pqname, rule->pqname, sizeof(rule->pqname)); 1954 if (ret != 0) 1955 return (ret); 1956 ret = pf_user_strcpy(krule->tagname, rule->tagname, 1957 sizeof(rule->tagname)); 1958 if (ret != 0) 1959 return (ret); 1960 ret = pf_user_strcpy(krule->match_tagname, rule->match_tagname, 1961 sizeof(rule->match_tagname)); 1962 if (ret != 0) 1963 return (ret); 1964 ret = pf_user_strcpy(krule->overload_tblname, rule->overload_tblname, 1965 sizeof(rule->overload_tblname)); 1966 if (ret != 0) 1967 return (ret); 1968 1969 pf_pool_to_kpool(&rule->rpool, &krule->rpool); 1970 1971 /* Don't allow userspace to set evaluations, packets or bytes. */ 1972 /* kif, anchor, overload_tbl are not copied over. */ 1973 1974 krule->os_fingerprint = rule->os_fingerprint; 1975 1976 krule->rtableid = rule->rtableid; 1977 /* pf_rule->timeout is smaller than pf_krule->timeout */ 1978 bcopy(rule->timeout, krule->timeout, sizeof(rule->timeout)); 1979 krule->max_states = rule->max_states; 1980 krule->max_src_nodes = rule->max_src_nodes; 1981 krule->max_src_states = rule->max_src_states; 1982 krule->max_src_conn = rule->max_src_conn; 1983 krule->max_src_conn_rate.limit = rule->max_src_conn_rate.limit; 1984 krule->max_src_conn_rate.seconds = rule->max_src_conn_rate.seconds; 1985 krule->qid = rule->qid; 1986 krule->pqid = rule->pqid; 1987 krule->nr = rule->nr; 1988 krule->prob = rule->prob; 1989 krule->cuid = rule->cuid; 1990 krule->cpid = rule->cpid; 1991 1992 krule->return_icmp = rule->return_icmp; 1993 krule->return_icmp6 = rule->return_icmp6; 1994 krule->max_mss = rule->max_mss; 1995 krule->tag = rule->tag; 1996 krule->match_tag = rule->match_tag; 1997 krule->scrub_flags = rule->scrub_flags; 1998 1999 bcopy(&rule->uid, &krule->uid, sizeof(krule->uid)); 2000 bcopy(&rule->gid, &krule->gid, sizeof(krule->gid)); 2001 2002 krule->rule_flag = rule->rule_flag; 2003 krule->action = rule->action; 2004 krule->direction = rule->direction; 2005 krule->log = rule->log; 2006 krule->logif = rule->logif; 2007 krule->quick = rule->quick; 2008 krule->ifnot = rule->ifnot; 2009 krule->match_tag_not = rule->match_tag_not; 2010 krule->natpass = rule->natpass; 2011 2012 krule->keep_state = rule->keep_state; 2013 krule->af = rule->af; 2014 krule->proto = rule->proto; 2015 krule->type = rule->type; 2016 krule->code = rule->code; 2017 krule->flags = rule->flags; 2018 krule->flagset = rule->flagset; 2019 krule->min_ttl = rule->min_ttl; 2020 krule->allow_opts = rule->allow_opts; 2021 krule->rt = rule->rt; 2022 krule->return_ttl = rule->return_ttl; 2023 krule->tos = rule->tos; 2024 krule->set_tos = rule->set_tos; 2025 2026 krule->flush = rule->flush; 2027 krule->prio = rule->prio; 2028 krule->set_prio[0] = rule->set_prio[0]; 2029 krule->set_prio[1] = rule->set_prio[1]; 2030 2031 bcopy(&rule->divert, &krule->divert, sizeof(krule->divert)); 2032 2033 return (0); 2034 } 2035 2036 int 2037 pf_ioctl_getrules(struct pfioc_rule *pr) 2038 { 2039 struct pf_kruleset *ruleset; 2040 struct pf_krule *tail; 2041 int rs_num; 2042 2043 PF_RULES_WLOCK(); 2044 ruleset = pf_find_kruleset(pr->anchor); 2045 if (ruleset == NULL) { 2046 PF_RULES_WUNLOCK(); 2047 return (EINVAL); 2048 } 2049 rs_num = pf_get_ruleset_number(pr->rule.action); 2050 if (rs_num >= PF_RULESET_MAX) { 2051 PF_RULES_WUNLOCK(); 2052 return (EINVAL); 2053 } 2054 tail = TAILQ_LAST(ruleset->rules[rs_num].active.ptr, 2055 pf_krulequeue); 2056 if (tail) 2057 pr->nr = tail->nr + 1; 2058 else 2059 pr->nr = 0; 2060 pr->ticket = ruleset->rules[rs_num].active.ticket; 2061 PF_RULES_WUNLOCK(); 2062 2063 return (0); 2064 } 2065 2066 int 2067 pf_ioctl_addrule(struct pf_krule *rule, uint32_t ticket, 2068 uint32_t pool_ticket, const char *anchor, const char *anchor_call, 2069 uid_t uid, pid_t pid) 2070 { 2071 struct pf_kruleset *ruleset; 2072 struct pf_krule *tail; 2073 struct pf_kpooladdr *pa; 2074 struct pfi_kkif *kif = NULL, *rcv_kif = NULL; 2075 int rs_num; 2076 int error = 0; 2077 2078 if ((rule->return_icmp >> 8) > ICMP_MAXTYPE) { 2079 error = EINVAL; 2080 goto errout_unlocked; 2081 } 2082 2083 #define ERROUT(x) ERROUT_FUNCTION(errout, x) 2084 2085 if (rule->ifname[0]) 2086 kif = pf_kkif_create(M_WAITOK); 2087 if (rule->rcv_ifname[0]) 2088 rcv_kif = pf_kkif_create(M_WAITOK); 2089 pf_counter_u64_init(&rule->evaluations, M_WAITOK); 2090 for (int i = 0; i < 2; i++) { 2091 pf_counter_u64_init(&rule->packets[i], M_WAITOK); 2092 pf_counter_u64_init(&rule->bytes[i], M_WAITOK); 2093 } 2094 rule->states_cur = counter_u64_alloc(M_WAITOK); 2095 rule->states_tot = counter_u64_alloc(M_WAITOK); 2096 rule->src_nodes = counter_u64_alloc(M_WAITOK); 2097 rule->cuid = uid; 2098 rule->cpid = pid; 2099 TAILQ_INIT(&rule->rpool.list); 2100 2101 PF_CONFIG_LOCK(); 2102 PF_RULES_WLOCK(); 2103 #ifdef PF_WANT_32_TO_64_COUNTER 2104 LIST_INSERT_HEAD(&V_pf_allrulelist, rule, allrulelist); 2105 MPASS(!rule->allrulelinked); 2106 rule->allrulelinked = true; 2107 V_pf_allrulecount++; 2108 #endif 2109 ruleset = pf_find_kruleset(anchor); 2110 if (ruleset == NULL) 2111 ERROUT(EINVAL); 2112 rs_num = pf_get_ruleset_number(rule->action); 2113 if (rs_num >= PF_RULESET_MAX) 2114 ERROUT(EINVAL); 2115 if (ticket != ruleset->rules[rs_num].inactive.ticket) { 2116 DPFPRINTF(PF_DEBUG_MISC, 2117 ("ticket: %d != [%d]%d\n", ticket, rs_num, 2118 ruleset->rules[rs_num].inactive.ticket)); 2119 ERROUT(EBUSY); 2120 } 2121 if (pool_ticket != V_ticket_pabuf) { 2122 DPFPRINTF(PF_DEBUG_MISC, 2123 ("pool_ticket: %d != %d\n", pool_ticket, 2124 V_ticket_pabuf)); 2125 ERROUT(EBUSY); 2126 } 2127 /* 2128 * XXXMJG hack: there is no mechanism to ensure they started the 2129 * transaction. Ticket checked above may happen to match by accident, 2130 * even if nobody called DIOCXBEGIN, let alone this process. 2131 * Partially work around it by checking if the RB tree got allocated, 2132 * see pf_begin_rules. 2133 */ 2134 if (ruleset->rules[rs_num].inactive.tree == NULL) { 2135 ERROUT(EINVAL); 2136 } 2137 2138 tail = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr, 2139 pf_krulequeue); 2140 if (tail) 2141 rule->nr = tail->nr + 1; 2142 else 2143 rule->nr = 0; 2144 if (rule->ifname[0]) { 2145 rule->kif = pfi_kkif_attach(kif, rule->ifname); 2146 kif = NULL; 2147 pfi_kkif_ref(rule->kif); 2148 } else 2149 rule->kif = NULL; 2150 2151 if (rule->rcv_ifname[0]) { 2152 rule->rcv_kif = pfi_kkif_attach(rcv_kif, rule->rcv_ifname); 2153 rcv_kif = NULL; 2154 pfi_kkif_ref(rule->rcv_kif); 2155 } else 2156 rule->rcv_kif = NULL; 2157 2158 if (rule->rtableid > 0 && rule->rtableid >= rt_numfibs) 2159 error = EBUSY; 2160 2161 #ifdef ALTQ 2162 /* set queue IDs */ 2163 if (rule->qname[0] != 0) { 2164 if ((rule->qid = pf_qname2qid(rule->qname)) == 0) 2165 error = EBUSY; 2166 else if (rule->pqname[0] != 0) { 2167 if ((rule->pqid = 2168 pf_qname2qid(rule->pqname)) == 0) 2169 error = EBUSY; 2170 } else 2171 rule->pqid = rule->qid; 2172 } 2173 #endif 2174 if (rule->tagname[0]) 2175 if ((rule->tag = pf_tagname2tag(rule->tagname)) == 0) 2176 error = EBUSY; 2177 if (rule->match_tagname[0]) 2178 if ((rule->match_tag = 2179 pf_tagname2tag(rule->match_tagname)) == 0) 2180 error = EBUSY; 2181 if (rule->rt && !rule->direction) 2182 error = EINVAL; 2183 if (!rule->log) 2184 rule->logif = 0; 2185 if (rule->logif >= PFLOGIFS_MAX) 2186 error = EINVAL; 2187 if (pf_addr_setup(ruleset, &rule->src.addr, rule->af)) 2188 error = ENOMEM; 2189 if (pf_addr_setup(ruleset, &rule->dst.addr, rule->af)) 2190 error = ENOMEM; 2191 if (pf_kanchor_setup(rule, ruleset, anchor_call)) 2192 error = EINVAL; 2193 if (rule->scrub_flags & PFSTATE_SETPRIO && 2194 (rule->set_prio[0] > PF_PRIO_MAX || 2195 rule->set_prio[1] > PF_PRIO_MAX)) 2196 error = EINVAL; 2197 TAILQ_FOREACH(pa, &V_pf_pabuf, entries) 2198 if (pa->addr.type == PF_ADDR_TABLE) { 2199 pa->addr.p.tbl = pfr_attach_table(ruleset, 2200 pa->addr.v.tblname); 2201 if (pa->addr.p.tbl == NULL) 2202 error = ENOMEM; 2203 } 2204 2205 rule->overload_tbl = NULL; 2206 if (rule->overload_tblname[0]) { 2207 if ((rule->overload_tbl = pfr_attach_table(ruleset, 2208 rule->overload_tblname)) == NULL) 2209 error = EINVAL; 2210 else 2211 rule->overload_tbl->pfrkt_flags |= 2212 PFR_TFLAG_ACTIVE; 2213 } 2214 2215 pf_mv_kpool(&V_pf_pabuf, &rule->rpool.list); 2216 if (((((rule->action == PF_NAT) || (rule->action == PF_RDR) || 2217 (rule->action == PF_BINAT)) && rule->anchor == NULL) || 2218 (rule->rt > PF_NOPFROUTE)) && 2219 (TAILQ_FIRST(&rule->rpool.list) == NULL)) 2220 error = EINVAL; 2221 2222 if (rule->action == PF_PASS && rule->rpool.opts & PF_POOL_STICKYADDR && 2223 !rule->keep_state) { 2224 error = EINVAL; 2225 } 2226 2227 if (error) { 2228 pf_free_rule(rule); 2229 rule = NULL; 2230 ERROUT(error); 2231 } 2232 2233 rule->rpool.cur = TAILQ_FIRST(&rule->rpool.list); 2234 TAILQ_INSERT_TAIL(ruleset->rules[rs_num].inactive.ptr, 2235 rule, entries); 2236 ruleset->rules[rs_num].inactive.rcount++; 2237 2238 PF_RULES_WUNLOCK(); 2239 pf_hash_rule(rule); 2240 if (RB_INSERT(pf_krule_global, ruleset->rules[rs_num].inactive.tree, rule) != NULL) { 2241 PF_RULES_WLOCK(); 2242 TAILQ_REMOVE(ruleset->rules[rs_num].inactive.ptr, rule, entries); 2243 ruleset->rules[rs_num].inactive.rcount--; 2244 pf_free_rule(rule); 2245 rule = NULL; 2246 ERROUT(EEXIST); 2247 } 2248 PF_CONFIG_UNLOCK(); 2249 2250 return (0); 2251 2252 #undef ERROUT 2253 errout: 2254 PF_RULES_WUNLOCK(); 2255 PF_CONFIG_UNLOCK(); 2256 errout_unlocked: 2257 pf_kkif_free(rcv_kif); 2258 pf_kkif_free(kif); 2259 pf_krule_free(rule); 2260 return (error); 2261 } 2262 2263 static bool 2264 pf_label_match(const struct pf_krule *rule, const char *label) 2265 { 2266 int i = 0; 2267 2268 while (*rule->label[i]) { 2269 if (strcmp(rule->label[i], label) == 0) 2270 return (true); 2271 i++; 2272 } 2273 2274 return (false); 2275 } 2276 2277 static unsigned int 2278 pf_kill_matching_state(struct pf_state_key_cmp *key, int dir) 2279 { 2280 struct pf_kstate *s; 2281 int more = 0; 2282 2283 s = pf_find_state_all(key, dir, &more); 2284 if (s == NULL) 2285 return (0); 2286 2287 if (more) { 2288 PF_STATE_UNLOCK(s); 2289 return (0); 2290 } 2291 2292 pf_unlink_state(s); 2293 return (1); 2294 } 2295 2296 static int 2297 pf_killstates_row(struct pf_kstate_kill *psk, struct pf_idhash *ih) 2298 { 2299 struct pf_kstate *s; 2300 struct pf_state_key *sk; 2301 struct pf_addr *srcaddr, *dstaddr; 2302 struct pf_state_key_cmp match_key; 2303 int idx, killed = 0; 2304 unsigned int dir; 2305 u_int16_t srcport, dstport; 2306 struct pfi_kkif *kif; 2307 2308 relock_DIOCKILLSTATES: 2309 PF_HASHROW_LOCK(ih); 2310 LIST_FOREACH(s, &ih->states, entry) { 2311 /* For floating states look at the original kif. */ 2312 kif = s->kif == V_pfi_all ? s->orig_kif : s->kif; 2313 2314 sk = s->key[psk->psk_nat ? PF_SK_STACK : PF_SK_WIRE]; 2315 if (s->direction == PF_OUT) { 2316 srcaddr = &sk->addr[1]; 2317 dstaddr = &sk->addr[0]; 2318 srcport = sk->port[1]; 2319 dstport = sk->port[0]; 2320 } else { 2321 srcaddr = &sk->addr[0]; 2322 dstaddr = &sk->addr[1]; 2323 srcport = sk->port[0]; 2324 dstport = sk->port[1]; 2325 } 2326 2327 if (psk->psk_af && sk->af != psk->psk_af) 2328 continue; 2329 2330 if (psk->psk_proto && psk->psk_proto != sk->proto) 2331 continue; 2332 2333 if (! PF_MATCHA(psk->psk_src.neg, &psk->psk_src.addr.v.a.addr, 2334 &psk->psk_src.addr.v.a.mask, srcaddr, sk->af)) 2335 continue; 2336 2337 if (! PF_MATCHA(psk->psk_dst.neg, &psk->psk_dst.addr.v.a.addr, 2338 &psk->psk_dst.addr.v.a.mask, dstaddr, sk->af)) 2339 continue; 2340 2341 if (! PF_MATCHA(psk->psk_rt_addr.neg, 2342 &psk->psk_rt_addr.addr.v.a.addr, 2343 &psk->psk_rt_addr.addr.v.a.mask, 2344 &s->rt_addr, sk->af)) 2345 continue; 2346 2347 if (psk->psk_src.port_op != 0 && 2348 ! pf_match_port(psk->psk_src.port_op, 2349 psk->psk_src.port[0], psk->psk_src.port[1], srcport)) 2350 continue; 2351 2352 if (psk->psk_dst.port_op != 0 && 2353 ! pf_match_port(psk->psk_dst.port_op, 2354 psk->psk_dst.port[0], psk->psk_dst.port[1], dstport)) 2355 continue; 2356 2357 if (psk->psk_label[0] && 2358 ! pf_label_match(s->rule.ptr, psk->psk_label)) 2359 continue; 2360 2361 if (psk->psk_ifname[0] && strcmp(psk->psk_ifname, 2362 kif->pfik_name)) 2363 continue; 2364 2365 if (psk->psk_kill_match) { 2366 /* Create the key to find matching states, with lock 2367 * held. */ 2368 2369 bzero(&match_key, sizeof(match_key)); 2370 2371 if (s->direction == PF_OUT) { 2372 dir = PF_IN; 2373 idx = psk->psk_nat ? PF_SK_WIRE : PF_SK_STACK; 2374 } else { 2375 dir = PF_OUT; 2376 idx = psk->psk_nat ? PF_SK_STACK : PF_SK_WIRE; 2377 } 2378 2379 match_key.af = s->key[idx]->af; 2380 match_key.proto = s->key[idx]->proto; 2381 PF_ACPY(&match_key.addr[0], 2382 &s->key[idx]->addr[1], match_key.af); 2383 match_key.port[0] = s->key[idx]->port[1]; 2384 PF_ACPY(&match_key.addr[1], 2385 &s->key[idx]->addr[0], match_key.af); 2386 match_key.port[1] = s->key[idx]->port[0]; 2387 } 2388 2389 pf_unlink_state(s); 2390 killed++; 2391 2392 if (psk->psk_kill_match) 2393 killed += pf_kill_matching_state(&match_key, dir); 2394 2395 goto relock_DIOCKILLSTATES; 2396 } 2397 PF_HASHROW_UNLOCK(ih); 2398 2399 return (killed); 2400 } 2401 2402 int 2403 pf_start(void) 2404 { 2405 int error = 0; 2406 2407 sx_xlock(&V_pf_ioctl_lock); 2408 if (V_pf_status.running) 2409 error = EEXIST; 2410 else { 2411 hook_pf(); 2412 if (! TAILQ_EMPTY(V_pf_keth->active.rules)) 2413 hook_pf_eth(); 2414 V_pf_status.running = 1; 2415 V_pf_status.since = time_second; 2416 new_unrhdr64(&V_pf_stateid, time_second); 2417 2418 DPFPRINTF(PF_DEBUG_MISC, ("pf: started\n")); 2419 } 2420 sx_xunlock(&V_pf_ioctl_lock); 2421 2422 return (error); 2423 } 2424 2425 int 2426 pf_stop(void) 2427 { 2428 int error = 0; 2429 2430 sx_xlock(&V_pf_ioctl_lock); 2431 if (!V_pf_status.running) 2432 error = ENOENT; 2433 else { 2434 V_pf_status.running = 0; 2435 dehook_pf(); 2436 dehook_pf_eth(); 2437 V_pf_status.since = time_second; 2438 DPFPRINTF(PF_DEBUG_MISC, ("pf: stopped\n")); 2439 } 2440 sx_xunlock(&V_pf_ioctl_lock); 2441 2442 return (error); 2443 } 2444 2445 void 2446 pf_ioctl_clear_status(void) 2447 { 2448 PF_RULES_WLOCK(); 2449 for (int i = 0; i < PFRES_MAX; i++) 2450 counter_u64_zero(V_pf_status.counters[i]); 2451 for (int i = 0; i < FCNT_MAX; i++) 2452 pf_counter_u64_zero(&V_pf_status.fcounters[i]); 2453 for (int i = 0; i < SCNT_MAX; i++) 2454 counter_u64_zero(V_pf_status.scounters[i]); 2455 for (int i = 0; i < KLCNT_MAX; i++) 2456 counter_u64_zero(V_pf_status.lcounters[i]); 2457 V_pf_status.since = time_second; 2458 if (*V_pf_status.ifname) 2459 pfi_update_status(V_pf_status.ifname, NULL); 2460 PF_RULES_WUNLOCK(); 2461 } 2462 2463 int 2464 pf_ioctl_set_timeout(int timeout, int seconds, int *prev_seconds) 2465 { 2466 uint32_t old; 2467 2468 if (timeout < 0 || timeout >= PFTM_MAX || 2469 seconds < 0) 2470 return (EINVAL); 2471 2472 PF_RULES_WLOCK(); 2473 old = V_pf_default_rule.timeout[timeout]; 2474 if (timeout == PFTM_INTERVAL && seconds == 0) 2475 seconds = 1; 2476 V_pf_default_rule.timeout[timeout] = seconds; 2477 if (timeout == PFTM_INTERVAL && seconds < old) 2478 wakeup(pf_purge_thread); 2479 2480 if (prev_seconds != NULL) 2481 *prev_seconds = old; 2482 2483 PF_RULES_WUNLOCK(); 2484 2485 return (0); 2486 } 2487 2488 int 2489 pf_ioctl_get_timeout(int timeout, int *seconds) 2490 { 2491 PF_RULES_RLOCK_TRACKER; 2492 2493 if (timeout < 0 || timeout >= PFTM_MAX) 2494 return (EINVAL); 2495 2496 PF_RULES_RLOCK(); 2497 *seconds = V_pf_default_rule.timeout[timeout]; 2498 PF_RULES_RUNLOCK(); 2499 2500 return (0); 2501 } 2502 2503 int 2504 pf_ioctl_set_limit(int index, unsigned int limit, unsigned int *old_limit) 2505 { 2506 2507 PF_RULES_WLOCK(); 2508 if (index < 0 || index >= PF_LIMIT_MAX || 2509 V_pf_limits[index].zone == NULL) { 2510 PF_RULES_WUNLOCK(); 2511 return (EINVAL); 2512 } 2513 uma_zone_set_max(V_pf_limits[index].zone, limit); 2514 if (old_limit != NULL) 2515 *old_limit = V_pf_limits[index].limit; 2516 V_pf_limits[index].limit = limit; 2517 PF_RULES_WUNLOCK(); 2518 2519 return (0); 2520 } 2521 2522 int 2523 pf_ioctl_get_limit(int index, unsigned int *limit) 2524 { 2525 PF_RULES_RLOCK_TRACKER; 2526 2527 if (index < 0 || index >= PF_LIMIT_MAX) 2528 return (EINVAL); 2529 2530 PF_RULES_RLOCK(); 2531 *limit = V_pf_limits[index].limit; 2532 PF_RULES_RUNLOCK(); 2533 2534 return (0); 2535 } 2536 2537 int 2538 pf_ioctl_begin_addrs(uint32_t *ticket) 2539 { 2540 PF_RULES_WLOCK(); 2541 pf_empty_kpool(&V_pf_pabuf); 2542 *ticket = ++V_ticket_pabuf; 2543 PF_RULES_WUNLOCK(); 2544 2545 return (0); 2546 } 2547 2548 int 2549 pf_ioctl_add_addr(struct pfioc_pooladdr *pp) 2550 { 2551 struct pf_kpooladdr *pa = NULL; 2552 struct pfi_kkif *kif = NULL; 2553 int error; 2554 2555 #ifndef INET 2556 if (pp->af == AF_INET) 2557 return (EAFNOSUPPORT); 2558 #endif /* INET */ 2559 #ifndef INET6 2560 if (pp->af == AF_INET6) 2561 return (EAFNOSUPPORT); 2562 #endif /* INET6 */ 2563 2564 if (pp->addr.addr.type != PF_ADDR_ADDRMASK && 2565 pp->addr.addr.type != PF_ADDR_DYNIFTL && 2566 pp->addr.addr.type != PF_ADDR_TABLE) 2567 return (EINVAL); 2568 2569 if (pp->addr.addr.p.dyn != NULL) 2570 return (EINVAL); 2571 2572 pa = malloc(sizeof(*pa), M_PFRULE, M_WAITOK); 2573 error = pf_pooladdr_to_kpooladdr(&pp->addr, pa); 2574 if (error != 0) 2575 goto out; 2576 if (pa->ifname[0]) 2577 kif = pf_kkif_create(M_WAITOK); 2578 PF_RULES_WLOCK(); 2579 if (pp->ticket != V_ticket_pabuf) { 2580 PF_RULES_WUNLOCK(); 2581 if (pa->ifname[0]) 2582 pf_kkif_free(kif); 2583 error = EBUSY; 2584 goto out; 2585 } 2586 if (pa->ifname[0]) { 2587 pa->kif = pfi_kkif_attach(kif, pa->ifname); 2588 kif = NULL; 2589 pfi_kkif_ref(pa->kif); 2590 } else 2591 pa->kif = NULL; 2592 if (pa->addr.type == PF_ADDR_DYNIFTL && ((error = 2593 pfi_dynaddr_setup(&pa->addr, pp->af)) != 0)) { 2594 if (pa->ifname[0]) 2595 pfi_kkif_unref(pa->kif); 2596 PF_RULES_WUNLOCK(); 2597 goto out; 2598 } 2599 TAILQ_INSERT_TAIL(&V_pf_pabuf, pa, entries); 2600 PF_RULES_WUNLOCK(); 2601 2602 return (0); 2603 2604 out: 2605 free(pa, M_PFRULE); 2606 return (error); 2607 } 2608 2609 int 2610 pf_ioctl_get_addrs(struct pfioc_pooladdr *pp) 2611 { 2612 struct pf_kpool *pool; 2613 struct pf_kpooladdr *pa; 2614 2615 PF_RULES_RLOCK_TRACKER; 2616 2617 pp->anchor[sizeof(pp->anchor) - 1] = 0; 2618 pp->nr = 0; 2619 2620 PF_RULES_RLOCK(); 2621 pool = pf_get_kpool(pp->anchor, pp->ticket, pp->r_action, 2622 pp->r_num, 0, 1, 0); 2623 if (pool == NULL) { 2624 PF_RULES_RUNLOCK(); 2625 return (EBUSY); 2626 } 2627 TAILQ_FOREACH(pa, &pool->list, entries) 2628 pp->nr++; 2629 PF_RULES_RUNLOCK(); 2630 2631 return (0); 2632 } 2633 2634 int 2635 pf_ioctl_get_addr(struct pfioc_pooladdr *pp) 2636 { 2637 struct pf_kpool *pool; 2638 struct pf_kpooladdr *pa; 2639 u_int32_t nr = 0; 2640 2641 PF_RULES_RLOCK_TRACKER; 2642 2643 pp->anchor[sizeof(pp->anchor) - 1] = 0; 2644 2645 PF_RULES_RLOCK(); 2646 pool = pf_get_kpool(pp->anchor, pp->ticket, pp->r_action, 2647 pp->r_num, 0, 1, 1); 2648 if (pool == NULL) { 2649 PF_RULES_RUNLOCK(); 2650 return (EBUSY); 2651 } 2652 pa = TAILQ_FIRST(&pool->list); 2653 while ((pa != NULL) && (nr < pp->nr)) { 2654 pa = TAILQ_NEXT(pa, entries); 2655 nr++; 2656 } 2657 if (pa == NULL) { 2658 PF_RULES_RUNLOCK(); 2659 return (EBUSY); 2660 } 2661 pf_kpooladdr_to_pooladdr(pa, &pp->addr); 2662 pf_addr_copyout(&pp->addr.addr); 2663 PF_RULES_RUNLOCK(); 2664 2665 return (0); 2666 } 2667 2668 static int 2669 pfioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td) 2670 { 2671 int error = 0; 2672 PF_RULES_RLOCK_TRACKER; 2673 2674 #define ERROUT_IOCTL(target, x) \ 2675 do { \ 2676 error = (x); \ 2677 SDT_PROBE3(pf, ioctl, ioctl, error, cmd, error, __LINE__); \ 2678 goto target; \ 2679 } while (0) 2680 2681 2682 /* XXX keep in sync with switch() below */ 2683 if (securelevel_gt(td->td_ucred, 2)) 2684 switch (cmd) { 2685 case DIOCGETRULES: 2686 case DIOCGETRULENV: 2687 case DIOCGETADDRS: 2688 case DIOCGETADDR: 2689 case DIOCGETSTATE: 2690 case DIOCGETSTATENV: 2691 case DIOCSETSTATUSIF: 2692 case DIOCGETSTATUSNV: 2693 case DIOCCLRSTATUS: 2694 case DIOCNATLOOK: 2695 case DIOCSETDEBUG: 2696 #ifdef COMPAT_FREEBSD14 2697 case DIOCGETSTATES: 2698 case DIOCGETSTATESV2: 2699 #endif 2700 case DIOCGETTIMEOUT: 2701 case DIOCCLRRULECTRS: 2702 case DIOCGETLIMIT: 2703 case DIOCGETALTQSV0: 2704 case DIOCGETALTQSV1: 2705 case DIOCGETALTQV0: 2706 case DIOCGETALTQV1: 2707 case DIOCGETQSTATSV0: 2708 case DIOCGETQSTATSV1: 2709 case DIOCGETRULESETS: 2710 case DIOCGETRULESET: 2711 case DIOCRGETTABLES: 2712 case DIOCRGETTSTATS: 2713 case DIOCRCLRTSTATS: 2714 case DIOCRCLRADDRS: 2715 case DIOCRADDADDRS: 2716 case DIOCRDELADDRS: 2717 case DIOCRSETADDRS: 2718 case DIOCRGETADDRS: 2719 case DIOCRGETASTATS: 2720 case DIOCRCLRASTATS: 2721 case DIOCRTSTADDRS: 2722 case DIOCOSFPGET: 2723 case DIOCGETSRCNODES: 2724 case DIOCCLRSRCNODES: 2725 case DIOCGETSYNCOOKIES: 2726 case DIOCIGETIFACES: 2727 case DIOCGIFSPEEDV0: 2728 case DIOCGIFSPEEDV1: 2729 case DIOCSETIFFLAG: 2730 case DIOCCLRIFFLAG: 2731 case DIOCGETETHRULES: 2732 case DIOCGETETHRULE: 2733 case DIOCGETETHRULESETS: 2734 case DIOCGETETHRULESET: 2735 break; 2736 case DIOCRCLRTABLES: 2737 case DIOCRADDTABLES: 2738 case DIOCRDELTABLES: 2739 case DIOCRSETTFLAGS: 2740 if (((struct pfioc_table *)addr)->pfrio_flags & 2741 PFR_FLAG_DUMMY) 2742 break; /* dummy operation ok */ 2743 return (EPERM); 2744 default: 2745 return (EPERM); 2746 } 2747 2748 if (!(flags & FWRITE)) 2749 switch (cmd) { 2750 case DIOCGETRULES: 2751 case DIOCGETADDRS: 2752 case DIOCGETADDR: 2753 case DIOCGETSTATE: 2754 case DIOCGETSTATENV: 2755 case DIOCGETSTATUSNV: 2756 #ifdef COMPAT_FREEBSD14 2757 case DIOCGETSTATES: 2758 case DIOCGETSTATESV2: 2759 #endif 2760 case DIOCGETTIMEOUT: 2761 case DIOCGETLIMIT: 2762 case DIOCGETALTQSV0: 2763 case DIOCGETALTQSV1: 2764 case DIOCGETALTQV0: 2765 case DIOCGETALTQV1: 2766 case DIOCGETQSTATSV0: 2767 case DIOCGETQSTATSV1: 2768 case DIOCGETRULESETS: 2769 case DIOCGETRULESET: 2770 case DIOCNATLOOK: 2771 case DIOCRGETTABLES: 2772 case DIOCRGETTSTATS: 2773 case DIOCRGETADDRS: 2774 case DIOCRGETASTATS: 2775 case DIOCRTSTADDRS: 2776 case DIOCOSFPGET: 2777 case DIOCGETSRCNODES: 2778 case DIOCGETSYNCOOKIES: 2779 case DIOCIGETIFACES: 2780 case DIOCGIFSPEEDV1: 2781 case DIOCGIFSPEEDV0: 2782 case DIOCGETRULENV: 2783 case DIOCGETETHRULES: 2784 case DIOCGETETHRULE: 2785 case DIOCGETETHRULESETS: 2786 case DIOCGETETHRULESET: 2787 break; 2788 case DIOCRCLRTABLES: 2789 case DIOCRADDTABLES: 2790 case DIOCRDELTABLES: 2791 case DIOCRCLRTSTATS: 2792 case DIOCRCLRADDRS: 2793 case DIOCRADDADDRS: 2794 case DIOCRDELADDRS: 2795 case DIOCRSETADDRS: 2796 case DIOCRSETTFLAGS: 2797 if (((struct pfioc_table *)addr)->pfrio_flags & 2798 PFR_FLAG_DUMMY) { 2799 flags |= FWRITE; /* need write lock for dummy */ 2800 break; /* dummy operation ok */ 2801 } 2802 return (EACCES); 2803 default: 2804 return (EACCES); 2805 } 2806 2807 CURVNET_SET(TD_TO_VNET(td)); 2808 2809 switch (cmd) { 2810 #ifdef COMPAT_FREEBSD14 2811 case DIOCSTART: 2812 error = pf_start(); 2813 break; 2814 2815 case DIOCSTOP: 2816 error = pf_stop(); 2817 break; 2818 #endif 2819 2820 case DIOCGETETHRULES: { 2821 struct pfioc_nv *nv = (struct pfioc_nv *)addr; 2822 nvlist_t *nvl; 2823 void *packed; 2824 struct pf_keth_rule *tail; 2825 struct pf_keth_ruleset *rs; 2826 u_int32_t ticket, nr; 2827 const char *anchor = ""; 2828 2829 nvl = NULL; 2830 packed = NULL; 2831 2832 #define ERROUT(x) ERROUT_IOCTL(DIOCGETETHRULES_error, x) 2833 2834 if (nv->len > pf_ioctl_maxcount) 2835 ERROUT(ENOMEM); 2836 2837 /* Copy the request in */ 2838 packed = malloc(nv->len, M_NVLIST, M_WAITOK); 2839 error = copyin(nv->data, packed, nv->len); 2840 if (error) 2841 ERROUT(error); 2842 2843 nvl = nvlist_unpack(packed, nv->len, 0); 2844 if (nvl == NULL) 2845 ERROUT(EBADMSG); 2846 2847 if (! nvlist_exists_string(nvl, "anchor")) 2848 ERROUT(EBADMSG); 2849 2850 anchor = nvlist_get_string(nvl, "anchor"); 2851 2852 rs = pf_find_keth_ruleset(anchor); 2853 2854 nvlist_destroy(nvl); 2855 nvl = NULL; 2856 free(packed, M_NVLIST); 2857 packed = NULL; 2858 2859 if (rs == NULL) 2860 ERROUT(ENOENT); 2861 2862 /* Reply */ 2863 nvl = nvlist_create(0); 2864 if (nvl == NULL) 2865 ERROUT(ENOMEM); 2866 2867 PF_RULES_RLOCK(); 2868 2869 ticket = rs->active.ticket; 2870 tail = TAILQ_LAST(rs->active.rules, pf_keth_ruleq); 2871 if (tail) 2872 nr = tail->nr + 1; 2873 else 2874 nr = 0; 2875 2876 PF_RULES_RUNLOCK(); 2877 2878 nvlist_add_number(nvl, "ticket", ticket); 2879 nvlist_add_number(nvl, "nr", nr); 2880 2881 packed = nvlist_pack(nvl, &nv->len); 2882 if (packed == NULL) 2883 ERROUT(ENOMEM); 2884 2885 if (nv->size == 0) 2886 ERROUT(0); 2887 else if (nv->size < nv->len) 2888 ERROUT(ENOSPC); 2889 2890 error = copyout(packed, nv->data, nv->len); 2891 2892 #undef ERROUT 2893 DIOCGETETHRULES_error: 2894 free(packed, M_NVLIST); 2895 nvlist_destroy(nvl); 2896 break; 2897 } 2898 2899 case DIOCGETETHRULE: { 2900 struct epoch_tracker et; 2901 struct pfioc_nv *nv = (struct pfioc_nv *)addr; 2902 nvlist_t *nvl = NULL; 2903 void *nvlpacked = NULL; 2904 struct pf_keth_rule *rule = NULL; 2905 struct pf_keth_ruleset *rs; 2906 u_int32_t ticket, nr; 2907 bool clear = false; 2908 const char *anchor; 2909 2910 #define ERROUT(x) ERROUT_IOCTL(DIOCGETETHRULE_error, x) 2911 2912 if (nv->len > pf_ioctl_maxcount) 2913 ERROUT(ENOMEM); 2914 2915 nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK); 2916 error = copyin(nv->data, nvlpacked, nv->len); 2917 if (error) 2918 ERROUT(error); 2919 2920 nvl = nvlist_unpack(nvlpacked, nv->len, 0); 2921 if (nvl == NULL) 2922 ERROUT(EBADMSG); 2923 if (! nvlist_exists_number(nvl, "ticket")) 2924 ERROUT(EBADMSG); 2925 ticket = nvlist_get_number(nvl, "ticket"); 2926 if (! nvlist_exists_string(nvl, "anchor")) 2927 ERROUT(EBADMSG); 2928 anchor = nvlist_get_string(nvl, "anchor"); 2929 2930 if (nvlist_exists_bool(nvl, "clear")) 2931 clear = nvlist_get_bool(nvl, "clear"); 2932 2933 if (clear && !(flags & FWRITE)) 2934 ERROUT(EACCES); 2935 2936 if (! nvlist_exists_number(nvl, "nr")) 2937 ERROUT(EBADMSG); 2938 nr = nvlist_get_number(nvl, "nr"); 2939 2940 PF_RULES_RLOCK(); 2941 rs = pf_find_keth_ruleset(anchor); 2942 if (rs == NULL) { 2943 PF_RULES_RUNLOCK(); 2944 ERROUT(ENOENT); 2945 } 2946 if (ticket != rs->active.ticket) { 2947 PF_RULES_RUNLOCK(); 2948 ERROUT(EBUSY); 2949 } 2950 2951 nvlist_destroy(nvl); 2952 nvl = NULL; 2953 free(nvlpacked, M_NVLIST); 2954 nvlpacked = NULL; 2955 2956 rule = TAILQ_FIRST(rs->active.rules); 2957 while ((rule != NULL) && (rule->nr != nr)) 2958 rule = TAILQ_NEXT(rule, entries); 2959 if (rule == NULL) { 2960 PF_RULES_RUNLOCK(); 2961 ERROUT(ENOENT); 2962 } 2963 /* Make sure rule can't go away. */ 2964 NET_EPOCH_ENTER(et); 2965 PF_RULES_RUNLOCK(); 2966 nvl = pf_keth_rule_to_nveth_rule(rule); 2967 if (pf_keth_anchor_nvcopyout(rs, rule, nvl)) { 2968 NET_EPOCH_EXIT(et); 2969 ERROUT(EBUSY); 2970 } 2971 NET_EPOCH_EXIT(et); 2972 if (nvl == NULL) 2973 ERROUT(ENOMEM); 2974 2975 nvlpacked = nvlist_pack(nvl, &nv->len); 2976 if (nvlpacked == NULL) 2977 ERROUT(ENOMEM); 2978 2979 if (nv->size == 0) 2980 ERROUT(0); 2981 else if (nv->size < nv->len) 2982 ERROUT(ENOSPC); 2983 2984 error = copyout(nvlpacked, nv->data, nv->len); 2985 if (error == 0 && clear) { 2986 counter_u64_zero(rule->evaluations); 2987 for (int i = 0; i < 2; i++) { 2988 counter_u64_zero(rule->packets[i]); 2989 counter_u64_zero(rule->bytes[i]); 2990 } 2991 } 2992 2993 #undef ERROUT 2994 DIOCGETETHRULE_error: 2995 free(nvlpacked, M_NVLIST); 2996 nvlist_destroy(nvl); 2997 break; 2998 } 2999 3000 case DIOCADDETHRULE: { 3001 struct pfioc_nv *nv = (struct pfioc_nv *)addr; 3002 nvlist_t *nvl = NULL; 3003 void *nvlpacked = NULL; 3004 struct pf_keth_rule *rule = NULL, *tail = NULL; 3005 struct pf_keth_ruleset *ruleset = NULL; 3006 struct pfi_kkif *kif = NULL, *bridge_to_kif = NULL; 3007 const char *anchor = "", *anchor_call = ""; 3008 3009 #define ERROUT(x) ERROUT_IOCTL(DIOCADDETHRULE_error, x) 3010 3011 if (nv->len > pf_ioctl_maxcount) 3012 ERROUT(ENOMEM); 3013 3014 nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK); 3015 error = copyin(nv->data, nvlpacked, nv->len); 3016 if (error) 3017 ERROUT(error); 3018 3019 nvl = nvlist_unpack(nvlpacked, nv->len, 0); 3020 if (nvl == NULL) 3021 ERROUT(EBADMSG); 3022 3023 if (! nvlist_exists_number(nvl, "ticket")) 3024 ERROUT(EBADMSG); 3025 3026 if (nvlist_exists_string(nvl, "anchor")) 3027 anchor = nvlist_get_string(nvl, "anchor"); 3028 if (nvlist_exists_string(nvl, "anchor_call")) 3029 anchor_call = nvlist_get_string(nvl, "anchor_call"); 3030 3031 ruleset = pf_find_keth_ruleset(anchor); 3032 if (ruleset == NULL) 3033 ERROUT(EINVAL); 3034 3035 if (nvlist_get_number(nvl, "ticket") != 3036 ruleset->inactive.ticket) { 3037 DPFPRINTF(PF_DEBUG_MISC, 3038 ("ticket: %d != %d\n", 3039 (u_int32_t)nvlist_get_number(nvl, "ticket"), 3040 ruleset->inactive.ticket)); 3041 ERROUT(EBUSY); 3042 } 3043 3044 rule = malloc(sizeof(*rule), M_PFRULE, M_WAITOK); 3045 rule->timestamp = NULL; 3046 3047 error = pf_nveth_rule_to_keth_rule(nvl, rule); 3048 if (error != 0) 3049 ERROUT(error); 3050 3051 if (rule->ifname[0]) 3052 kif = pf_kkif_create(M_WAITOK); 3053 if (rule->bridge_to_name[0]) 3054 bridge_to_kif = pf_kkif_create(M_WAITOK); 3055 rule->evaluations = counter_u64_alloc(M_WAITOK); 3056 for (int i = 0; i < 2; i++) { 3057 rule->packets[i] = counter_u64_alloc(M_WAITOK); 3058 rule->bytes[i] = counter_u64_alloc(M_WAITOK); 3059 } 3060 rule->timestamp = uma_zalloc_pcpu(pf_timestamp_pcpu_zone, 3061 M_WAITOK | M_ZERO); 3062 3063 PF_RULES_WLOCK(); 3064 3065 if (rule->ifname[0]) { 3066 rule->kif = pfi_kkif_attach(kif, rule->ifname); 3067 pfi_kkif_ref(rule->kif); 3068 } else 3069 rule->kif = NULL; 3070 if (rule->bridge_to_name[0]) { 3071 rule->bridge_to = pfi_kkif_attach(bridge_to_kif, 3072 rule->bridge_to_name); 3073 pfi_kkif_ref(rule->bridge_to); 3074 } else 3075 rule->bridge_to = NULL; 3076 3077 #ifdef ALTQ 3078 /* set queue IDs */ 3079 if (rule->qname[0] != 0) { 3080 if ((rule->qid = pf_qname2qid(rule->qname)) == 0) 3081 error = EBUSY; 3082 else 3083 rule->qid = rule->qid; 3084 } 3085 #endif 3086 if (rule->tagname[0]) 3087 if ((rule->tag = pf_tagname2tag(rule->tagname)) == 0) 3088 error = EBUSY; 3089 if (rule->match_tagname[0]) 3090 if ((rule->match_tag = pf_tagname2tag( 3091 rule->match_tagname)) == 0) 3092 error = EBUSY; 3093 3094 if (error == 0 && rule->ipdst.addr.type == PF_ADDR_TABLE) 3095 error = pf_eth_addr_setup(ruleset, &rule->ipdst.addr); 3096 if (error == 0 && rule->ipsrc.addr.type == PF_ADDR_TABLE) 3097 error = pf_eth_addr_setup(ruleset, &rule->ipsrc.addr); 3098 3099 if (error) { 3100 pf_free_eth_rule(rule); 3101 PF_RULES_WUNLOCK(); 3102 ERROUT(error); 3103 } 3104 3105 if (pf_keth_anchor_setup(rule, ruleset, anchor_call)) { 3106 pf_free_eth_rule(rule); 3107 PF_RULES_WUNLOCK(); 3108 ERROUT(EINVAL); 3109 } 3110 3111 tail = TAILQ_LAST(ruleset->inactive.rules, pf_keth_ruleq); 3112 if (tail) 3113 rule->nr = tail->nr + 1; 3114 else 3115 rule->nr = 0; 3116 3117 TAILQ_INSERT_TAIL(ruleset->inactive.rules, rule, entries); 3118 3119 PF_RULES_WUNLOCK(); 3120 3121 #undef ERROUT 3122 DIOCADDETHRULE_error: 3123 nvlist_destroy(nvl); 3124 free(nvlpacked, M_NVLIST); 3125 break; 3126 } 3127 3128 case DIOCGETETHRULESETS: { 3129 struct epoch_tracker et; 3130 struct pfioc_nv *nv = (struct pfioc_nv *)addr; 3131 nvlist_t *nvl = NULL; 3132 void *nvlpacked = NULL; 3133 struct pf_keth_ruleset *ruleset; 3134 struct pf_keth_anchor *anchor; 3135 int nr = 0; 3136 3137 #define ERROUT(x) ERROUT_IOCTL(DIOCGETETHRULESETS_error, x) 3138 3139 if (nv->len > pf_ioctl_maxcount) 3140 ERROUT(ENOMEM); 3141 3142 nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK); 3143 error = copyin(nv->data, nvlpacked, nv->len); 3144 if (error) 3145 ERROUT(error); 3146 3147 nvl = nvlist_unpack(nvlpacked, nv->len, 0); 3148 if (nvl == NULL) 3149 ERROUT(EBADMSG); 3150 if (! nvlist_exists_string(nvl, "path")) 3151 ERROUT(EBADMSG); 3152 3153 NET_EPOCH_ENTER(et); 3154 3155 if ((ruleset = pf_find_keth_ruleset( 3156 nvlist_get_string(nvl, "path"))) == NULL) { 3157 NET_EPOCH_EXIT(et); 3158 ERROUT(ENOENT); 3159 } 3160 3161 if (ruleset->anchor == NULL) { 3162 RB_FOREACH(anchor, pf_keth_anchor_global, &V_pf_keth_anchors) 3163 if (anchor->parent == NULL) 3164 nr++; 3165 } else { 3166 RB_FOREACH(anchor, pf_keth_anchor_node, 3167 &ruleset->anchor->children) 3168 nr++; 3169 } 3170 3171 NET_EPOCH_EXIT(et); 3172 3173 nvlist_destroy(nvl); 3174 nvl = NULL; 3175 free(nvlpacked, M_NVLIST); 3176 nvlpacked = NULL; 3177 3178 nvl = nvlist_create(0); 3179 if (nvl == NULL) 3180 ERROUT(ENOMEM); 3181 3182 nvlist_add_number(nvl, "nr", nr); 3183 3184 nvlpacked = nvlist_pack(nvl, &nv->len); 3185 if (nvlpacked == NULL) 3186 ERROUT(ENOMEM); 3187 3188 if (nv->size == 0) 3189 ERROUT(0); 3190 else if (nv->size < nv->len) 3191 ERROUT(ENOSPC); 3192 3193 error = copyout(nvlpacked, nv->data, nv->len); 3194 3195 #undef ERROUT 3196 DIOCGETETHRULESETS_error: 3197 free(nvlpacked, M_NVLIST); 3198 nvlist_destroy(nvl); 3199 break; 3200 } 3201 3202 case DIOCGETETHRULESET: { 3203 struct epoch_tracker et; 3204 struct pfioc_nv *nv = (struct pfioc_nv *)addr; 3205 nvlist_t *nvl = NULL; 3206 void *nvlpacked = NULL; 3207 struct pf_keth_ruleset *ruleset; 3208 struct pf_keth_anchor *anchor; 3209 int nr = 0, req_nr = 0; 3210 bool found = false; 3211 3212 #define ERROUT(x) ERROUT_IOCTL(DIOCGETETHRULESET_error, x) 3213 3214 if (nv->len > pf_ioctl_maxcount) 3215 ERROUT(ENOMEM); 3216 3217 nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK); 3218 error = copyin(nv->data, nvlpacked, nv->len); 3219 if (error) 3220 ERROUT(error); 3221 3222 nvl = nvlist_unpack(nvlpacked, nv->len, 0); 3223 if (nvl == NULL) 3224 ERROUT(EBADMSG); 3225 if (! nvlist_exists_string(nvl, "path")) 3226 ERROUT(EBADMSG); 3227 if (! nvlist_exists_number(nvl, "nr")) 3228 ERROUT(EBADMSG); 3229 3230 req_nr = nvlist_get_number(nvl, "nr"); 3231 3232 NET_EPOCH_ENTER(et); 3233 3234 if ((ruleset = pf_find_keth_ruleset( 3235 nvlist_get_string(nvl, "path"))) == NULL) { 3236 NET_EPOCH_EXIT(et); 3237 ERROUT(ENOENT); 3238 } 3239 3240 nvlist_destroy(nvl); 3241 nvl = NULL; 3242 free(nvlpacked, M_NVLIST); 3243 nvlpacked = NULL; 3244 3245 nvl = nvlist_create(0); 3246 if (nvl == NULL) { 3247 NET_EPOCH_EXIT(et); 3248 ERROUT(ENOMEM); 3249 } 3250 3251 if (ruleset->anchor == NULL) { 3252 RB_FOREACH(anchor, pf_keth_anchor_global, 3253 &V_pf_keth_anchors) { 3254 if (anchor->parent == NULL && nr++ == req_nr) { 3255 found = true; 3256 break; 3257 } 3258 } 3259 } else { 3260 RB_FOREACH(anchor, pf_keth_anchor_node, 3261 &ruleset->anchor->children) { 3262 if (nr++ == req_nr) { 3263 found = true; 3264 break; 3265 } 3266 } 3267 } 3268 3269 NET_EPOCH_EXIT(et); 3270 if (found) { 3271 nvlist_add_number(nvl, "nr", nr); 3272 nvlist_add_string(nvl, "name", anchor->name); 3273 if (ruleset->anchor) 3274 nvlist_add_string(nvl, "path", 3275 ruleset->anchor->path); 3276 else 3277 nvlist_add_string(nvl, "path", ""); 3278 } else { 3279 ERROUT(EBUSY); 3280 } 3281 3282 nvlpacked = nvlist_pack(nvl, &nv->len); 3283 if (nvlpacked == NULL) 3284 ERROUT(ENOMEM); 3285 3286 if (nv->size == 0) 3287 ERROUT(0); 3288 else if (nv->size < nv->len) 3289 ERROUT(ENOSPC); 3290 3291 error = copyout(nvlpacked, nv->data, nv->len); 3292 3293 #undef ERROUT 3294 DIOCGETETHRULESET_error: 3295 free(nvlpacked, M_NVLIST); 3296 nvlist_destroy(nvl); 3297 break; 3298 } 3299 3300 case DIOCADDRULENV: { 3301 struct pfioc_nv *nv = (struct pfioc_nv *)addr; 3302 nvlist_t *nvl = NULL; 3303 void *nvlpacked = NULL; 3304 struct pf_krule *rule = NULL; 3305 const char *anchor = "", *anchor_call = ""; 3306 uint32_t ticket = 0, pool_ticket = 0; 3307 3308 #define ERROUT(x) ERROUT_IOCTL(DIOCADDRULENV_error, x) 3309 3310 if (nv->len > pf_ioctl_maxcount) 3311 ERROUT(ENOMEM); 3312 3313 nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK); 3314 error = copyin(nv->data, nvlpacked, nv->len); 3315 if (error) 3316 ERROUT(error); 3317 3318 nvl = nvlist_unpack(nvlpacked, nv->len, 0); 3319 if (nvl == NULL) 3320 ERROUT(EBADMSG); 3321 3322 if (! nvlist_exists_number(nvl, "ticket")) 3323 ERROUT(EINVAL); 3324 ticket = nvlist_get_number(nvl, "ticket"); 3325 3326 if (! nvlist_exists_number(nvl, "pool_ticket")) 3327 ERROUT(EINVAL); 3328 pool_ticket = nvlist_get_number(nvl, "pool_ticket"); 3329 3330 if (! nvlist_exists_nvlist(nvl, "rule")) 3331 ERROUT(EINVAL); 3332 3333 rule = pf_krule_alloc(); 3334 error = pf_nvrule_to_krule(nvlist_get_nvlist(nvl, "rule"), 3335 rule); 3336 if (error) 3337 ERROUT(error); 3338 3339 if (nvlist_exists_string(nvl, "anchor")) 3340 anchor = nvlist_get_string(nvl, "anchor"); 3341 if (nvlist_exists_string(nvl, "anchor_call")) 3342 anchor_call = nvlist_get_string(nvl, "anchor_call"); 3343 3344 if ((error = nvlist_error(nvl))) 3345 ERROUT(error); 3346 3347 /* Frees rule on error */ 3348 error = pf_ioctl_addrule(rule, ticket, pool_ticket, anchor, 3349 anchor_call, td->td_ucred->cr_ruid, 3350 td->td_proc ? td->td_proc->p_pid : 0); 3351 3352 nvlist_destroy(nvl); 3353 free(nvlpacked, M_NVLIST); 3354 break; 3355 #undef ERROUT 3356 DIOCADDRULENV_error: 3357 pf_krule_free(rule); 3358 nvlist_destroy(nvl); 3359 free(nvlpacked, M_NVLIST); 3360 3361 break; 3362 } 3363 case DIOCADDRULE: { 3364 struct pfioc_rule *pr = (struct pfioc_rule *)addr; 3365 struct pf_krule *rule; 3366 3367 rule = pf_krule_alloc(); 3368 error = pf_rule_to_krule(&pr->rule, rule); 3369 if (error != 0) { 3370 pf_krule_free(rule); 3371 break; 3372 } 3373 3374 pr->anchor[sizeof(pr->anchor) - 1] = 0; 3375 3376 /* Frees rule on error */ 3377 error = pf_ioctl_addrule(rule, pr->ticket, pr->pool_ticket, 3378 pr->anchor, pr->anchor_call, td->td_ucred->cr_ruid, 3379 td->td_proc ? td->td_proc->p_pid : 0); 3380 break; 3381 } 3382 3383 case DIOCGETRULES: { 3384 struct pfioc_rule *pr = (struct pfioc_rule *)addr; 3385 3386 pr->anchor[sizeof(pr->anchor) - 1] = 0; 3387 3388 error = pf_ioctl_getrules(pr); 3389 3390 break; 3391 } 3392 3393 case DIOCGETRULENV: { 3394 struct pfioc_nv *nv = (struct pfioc_nv *)addr; 3395 nvlist_t *nvrule = NULL; 3396 nvlist_t *nvl = NULL; 3397 struct pf_kruleset *ruleset; 3398 struct pf_krule *rule; 3399 void *nvlpacked = NULL; 3400 int rs_num, nr; 3401 bool clear_counter = false; 3402 3403 #define ERROUT(x) ERROUT_IOCTL(DIOCGETRULENV_error, x) 3404 3405 if (nv->len > pf_ioctl_maxcount) 3406 ERROUT(ENOMEM); 3407 3408 /* Copy the request in */ 3409 nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK); 3410 error = copyin(nv->data, nvlpacked, nv->len); 3411 if (error) 3412 ERROUT(error); 3413 3414 nvl = nvlist_unpack(nvlpacked, nv->len, 0); 3415 if (nvl == NULL) 3416 ERROUT(EBADMSG); 3417 3418 if (! nvlist_exists_string(nvl, "anchor")) 3419 ERROUT(EBADMSG); 3420 if (! nvlist_exists_number(nvl, "ruleset")) 3421 ERROUT(EBADMSG); 3422 if (! nvlist_exists_number(nvl, "ticket")) 3423 ERROUT(EBADMSG); 3424 if (! nvlist_exists_number(nvl, "nr")) 3425 ERROUT(EBADMSG); 3426 3427 if (nvlist_exists_bool(nvl, "clear_counter")) 3428 clear_counter = nvlist_get_bool(nvl, "clear_counter"); 3429 3430 if (clear_counter && !(flags & FWRITE)) 3431 ERROUT(EACCES); 3432 3433 nr = nvlist_get_number(nvl, "nr"); 3434 3435 PF_RULES_WLOCK(); 3436 ruleset = pf_find_kruleset(nvlist_get_string(nvl, "anchor")); 3437 if (ruleset == NULL) { 3438 PF_RULES_WUNLOCK(); 3439 ERROUT(ENOENT); 3440 } 3441 3442 rs_num = pf_get_ruleset_number(nvlist_get_number(nvl, "ruleset")); 3443 if (rs_num >= PF_RULESET_MAX) { 3444 PF_RULES_WUNLOCK(); 3445 ERROUT(EINVAL); 3446 } 3447 3448 if (nvlist_get_number(nvl, "ticket") != 3449 ruleset->rules[rs_num].active.ticket) { 3450 PF_RULES_WUNLOCK(); 3451 ERROUT(EBUSY); 3452 } 3453 3454 if ((error = nvlist_error(nvl))) { 3455 PF_RULES_WUNLOCK(); 3456 ERROUT(error); 3457 } 3458 3459 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr); 3460 while ((rule != NULL) && (rule->nr != nr)) 3461 rule = TAILQ_NEXT(rule, entries); 3462 if (rule == NULL) { 3463 PF_RULES_WUNLOCK(); 3464 ERROUT(EBUSY); 3465 } 3466 3467 nvrule = pf_krule_to_nvrule(rule); 3468 3469 nvlist_destroy(nvl); 3470 nvl = nvlist_create(0); 3471 if (nvl == NULL) { 3472 PF_RULES_WUNLOCK(); 3473 ERROUT(ENOMEM); 3474 } 3475 nvlist_add_number(nvl, "nr", nr); 3476 nvlist_add_nvlist(nvl, "rule", nvrule); 3477 nvlist_destroy(nvrule); 3478 nvrule = NULL; 3479 if (pf_kanchor_nvcopyout(ruleset, rule, nvl)) { 3480 PF_RULES_WUNLOCK(); 3481 ERROUT(EBUSY); 3482 } 3483 3484 free(nvlpacked, M_NVLIST); 3485 nvlpacked = nvlist_pack(nvl, &nv->len); 3486 if (nvlpacked == NULL) { 3487 PF_RULES_WUNLOCK(); 3488 ERROUT(ENOMEM); 3489 } 3490 3491 if (nv->size == 0) { 3492 PF_RULES_WUNLOCK(); 3493 ERROUT(0); 3494 } 3495 else if (nv->size < nv->len) { 3496 PF_RULES_WUNLOCK(); 3497 ERROUT(ENOSPC); 3498 } 3499 3500 if (clear_counter) 3501 pf_krule_clear_counters(rule); 3502 3503 PF_RULES_WUNLOCK(); 3504 3505 error = copyout(nvlpacked, nv->data, nv->len); 3506 3507 #undef ERROUT 3508 DIOCGETRULENV_error: 3509 free(nvlpacked, M_NVLIST); 3510 nvlist_destroy(nvrule); 3511 nvlist_destroy(nvl); 3512 3513 break; 3514 } 3515 3516 case DIOCCHANGERULE: { 3517 struct pfioc_rule *pcr = (struct pfioc_rule *)addr; 3518 struct pf_kruleset *ruleset; 3519 struct pf_krule *oldrule = NULL, *newrule = NULL; 3520 struct pfi_kkif *kif = NULL; 3521 struct pf_kpooladdr *pa; 3522 u_int32_t nr = 0; 3523 int rs_num; 3524 3525 pcr->anchor[sizeof(pcr->anchor) - 1] = 0; 3526 3527 if (pcr->action < PF_CHANGE_ADD_HEAD || 3528 pcr->action > PF_CHANGE_GET_TICKET) { 3529 error = EINVAL; 3530 break; 3531 } 3532 if (pcr->rule.return_icmp >> 8 > ICMP_MAXTYPE) { 3533 error = EINVAL; 3534 break; 3535 } 3536 3537 if (pcr->action != PF_CHANGE_REMOVE) { 3538 newrule = pf_krule_alloc(); 3539 error = pf_rule_to_krule(&pcr->rule, newrule); 3540 if (error != 0) { 3541 pf_krule_free(newrule); 3542 break; 3543 } 3544 3545 if (newrule->ifname[0]) 3546 kif = pf_kkif_create(M_WAITOK); 3547 pf_counter_u64_init(&newrule->evaluations, M_WAITOK); 3548 for (int i = 0; i < 2; i++) { 3549 pf_counter_u64_init(&newrule->packets[i], M_WAITOK); 3550 pf_counter_u64_init(&newrule->bytes[i], M_WAITOK); 3551 } 3552 newrule->states_cur = counter_u64_alloc(M_WAITOK); 3553 newrule->states_tot = counter_u64_alloc(M_WAITOK); 3554 newrule->src_nodes = counter_u64_alloc(M_WAITOK); 3555 newrule->cuid = td->td_ucred->cr_ruid; 3556 newrule->cpid = td->td_proc ? td->td_proc->p_pid : 0; 3557 TAILQ_INIT(&newrule->rpool.list); 3558 } 3559 #define ERROUT(x) ERROUT_IOCTL(DIOCCHANGERULE_error, x) 3560 3561 PF_CONFIG_LOCK(); 3562 PF_RULES_WLOCK(); 3563 #ifdef PF_WANT_32_TO_64_COUNTER 3564 if (newrule != NULL) { 3565 LIST_INSERT_HEAD(&V_pf_allrulelist, newrule, allrulelist); 3566 newrule->allrulelinked = true; 3567 V_pf_allrulecount++; 3568 } 3569 #endif 3570 3571 if (!(pcr->action == PF_CHANGE_REMOVE || 3572 pcr->action == PF_CHANGE_GET_TICKET) && 3573 pcr->pool_ticket != V_ticket_pabuf) 3574 ERROUT(EBUSY); 3575 3576 ruleset = pf_find_kruleset(pcr->anchor); 3577 if (ruleset == NULL) 3578 ERROUT(EINVAL); 3579 3580 rs_num = pf_get_ruleset_number(pcr->rule.action); 3581 if (rs_num >= PF_RULESET_MAX) 3582 ERROUT(EINVAL); 3583 3584 /* 3585 * XXXMJG: there is no guarantee that the ruleset was 3586 * created by the usual route of calling DIOCXBEGIN. 3587 * As a result it is possible the rule tree will not 3588 * be allocated yet. Hack around it by doing it here. 3589 * Note it is fine to let the tree persist in case of 3590 * error as it will be freed down the road on future 3591 * updates (if need be). 3592 */ 3593 if (ruleset->rules[rs_num].active.tree == NULL) { 3594 ruleset->rules[rs_num].active.tree = pf_rule_tree_alloc(M_NOWAIT); 3595 if (ruleset->rules[rs_num].active.tree == NULL) { 3596 ERROUT(ENOMEM); 3597 } 3598 } 3599 3600 if (pcr->action == PF_CHANGE_GET_TICKET) { 3601 pcr->ticket = ++ruleset->rules[rs_num].active.ticket; 3602 ERROUT(0); 3603 } else if (pcr->ticket != 3604 ruleset->rules[rs_num].active.ticket) 3605 ERROUT(EINVAL); 3606 3607 if (pcr->action != PF_CHANGE_REMOVE) { 3608 if (newrule->ifname[0]) { 3609 newrule->kif = pfi_kkif_attach(kif, 3610 newrule->ifname); 3611 kif = NULL; 3612 pfi_kkif_ref(newrule->kif); 3613 } else 3614 newrule->kif = NULL; 3615 3616 if (newrule->rtableid > 0 && 3617 newrule->rtableid >= rt_numfibs) 3618 error = EBUSY; 3619 3620 #ifdef ALTQ 3621 /* set queue IDs */ 3622 if (newrule->qname[0] != 0) { 3623 if ((newrule->qid = 3624 pf_qname2qid(newrule->qname)) == 0) 3625 error = EBUSY; 3626 else if (newrule->pqname[0] != 0) { 3627 if ((newrule->pqid = 3628 pf_qname2qid(newrule->pqname)) == 0) 3629 error = EBUSY; 3630 } else 3631 newrule->pqid = newrule->qid; 3632 } 3633 #endif /* ALTQ */ 3634 if (newrule->tagname[0]) 3635 if ((newrule->tag = 3636 pf_tagname2tag(newrule->tagname)) == 0) 3637 error = EBUSY; 3638 if (newrule->match_tagname[0]) 3639 if ((newrule->match_tag = pf_tagname2tag( 3640 newrule->match_tagname)) == 0) 3641 error = EBUSY; 3642 if (newrule->rt && !newrule->direction) 3643 error = EINVAL; 3644 if (!newrule->log) 3645 newrule->logif = 0; 3646 if (newrule->logif >= PFLOGIFS_MAX) 3647 error = EINVAL; 3648 if (pf_addr_setup(ruleset, &newrule->src.addr, newrule->af)) 3649 error = ENOMEM; 3650 if (pf_addr_setup(ruleset, &newrule->dst.addr, newrule->af)) 3651 error = ENOMEM; 3652 if (pf_kanchor_setup(newrule, ruleset, pcr->anchor_call)) 3653 error = EINVAL; 3654 TAILQ_FOREACH(pa, &V_pf_pabuf, entries) 3655 if (pa->addr.type == PF_ADDR_TABLE) { 3656 pa->addr.p.tbl = 3657 pfr_attach_table(ruleset, 3658 pa->addr.v.tblname); 3659 if (pa->addr.p.tbl == NULL) 3660 error = ENOMEM; 3661 } 3662 3663 newrule->overload_tbl = NULL; 3664 if (newrule->overload_tblname[0]) { 3665 if ((newrule->overload_tbl = pfr_attach_table( 3666 ruleset, newrule->overload_tblname)) == 3667 NULL) 3668 error = EINVAL; 3669 else 3670 newrule->overload_tbl->pfrkt_flags |= 3671 PFR_TFLAG_ACTIVE; 3672 } 3673 3674 pf_mv_kpool(&V_pf_pabuf, &newrule->rpool.list); 3675 if (((((newrule->action == PF_NAT) || 3676 (newrule->action == PF_RDR) || 3677 (newrule->action == PF_BINAT) || 3678 (newrule->rt > PF_NOPFROUTE)) && 3679 !newrule->anchor)) && 3680 (TAILQ_FIRST(&newrule->rpool.list) == NULL)) 3681 error = EINVAL; 3682 3683 if (error) { 3684 pf_free_rule(newrule); 3685 PF_RULES_WUNLOCK(); 3686 PF_CONFIG_UNLOCK(); 3687 break; 3688 } 3689 3690 newrule->rpool.cur = TAILQ_FIRST(&newrule->rpool.list); 3691 } 3692 pf_empty_kpool(&V_pf_pabuf); 3693 3694 if (pcr->action == PF_CHANGE_ADD_HEAD) 3695 oldrule = TAILQ_FIRST( 3696 ruleset->rules[rs_num].active.ptr); 3697 else if (pcr->action == PF_CHANGE_ADD_TAIL) 3698 oldrule = TAILQ_LAST( 3699 ruleset->rules[rs_num].active.ptr, pf_krulequeue); 3700 else { 3701 oldrule = TAILQ_FIRST( 3702 ruleset->rules[rs_num].active.ptr); 3703 while ((oldrule != NULL) && (oldrule->nr != pcr->nr)) 3704 oldrule = TAILQ_NEXT(oldrule, entries); 3705 if (oldrule == NULL) { 3706 if (newrule != NULL) 3707 pf_free_rule(newrule); 3708 PF_RULES_WUNLOCK(); 3709 PF_CONFIG_UNLOCK(); 3710 error = EINVAL; 3711 break; 3712 } 3713 } 3714 3715 if (pcr->action == PF_CHANGE_REMOVE) { 3716 pf_unlink_rule(ruleset->rules[rs_num].active.ptr, 3717 oldrule); 3718 RB_REMOVE(pf_krule_global, 3719 ruleset->rules[rs_num].active.tree, oldrule); 3720 ruleset->rules[rs_num].active.rcount--; 3721 } else { 3722 pf_hash_rule(newrule); 3723 if (RB_INSERT(pf_krule_global, 3724 ruleset->rules[rs_num].active.tree, newrule) != NULL) { 3725 pf_free_rule(newrule); 3726 PF_RULES_WUNLOCK(); 3727 PF_CONFIG_UNLOCK(); 3728 error = EEXIST; 3729 break; 3730 } 3731 3732 if (oldrule == NULL) 3733 TAILQ_INSERT_TAIL( 3734 ruleset->rules[rs_num].active.ptr, 3735 newrule, entries); 3736 else if (pcr->action == PF_CHANGE_ADD_HEAD || 3737 pcr->action == PF_CHANGE_ADD_BEFORE) 3738 TAILQ_INSERT_BEFORE(oldrule, newrule, entries); 3739 else 3740 TAILQ_INSERT_AFTER( 3741 ruleset->rules[rs_num].active.ptr, 3742 oldrule, newrule, entries); 3743 ruleset->rules[rs_num].active.rcount++; 3744 } 3745 3746 nr = 0; 3747 TAILQ_FOREACH(oldrule, 3748 ruleset->rules[rs_num].active.ptr, entries) 3749 oldrule->nr = nr++; 3750 3751 ruleset->rules[rs_num].active.ticket++; 3752 3753 pf_calc_skip_steps(ruleset->rules[rs_num].active.ptr); 3754 pf_remove_if_empty_kruleset(ruleset); 3755 3756 PF_RULES_WUNLOCK(); 3757 PF_CONFIG_UNLOCK(); 3758 break; 3759 3760 #undef ERROUT 3761 DIOCCHANGERULE_error: 3762 PF_RULES_WUNLOCK(); 3763 PF_CONFIG_UNLOCK(); 3764 pf_krule_free(newrule); 3765 pf_kkif_free(kif); 3766 break; 3767 } 3768 3769 case DIOCCLRSTATESNV: { 3770 error = pf_clearstates_nv((struct pfioc_nv *)addr); 3771 break; 3772 } 3773 3774 case DIOCKILLSTATESNV: { 3775 error = pf_killstates_nv((struct pfioc_nv *)addr); 3776 break; 3777 } 3778 3779 case DIOCADDSTATE: { 3780 struct pfioc_state *ps = (struct pfioc_state *)addr; 3781 struct pfsync_state_1301 *sp = &ps->state; 3782 3783 if (sp->timeout >= PFTM_MAX) { 3784 error = EINVAL; 3785 break; 3786 } 3787 if (V_pfsync_state_import_ptr != NULL) { 3788 PF_RULES_RLOCK(); 3789 error = V_pfsync_state_import_ptr( 3790 (union pfsync_state_union *)sp, PFSYNC_SI_IOCTL, 3791 PFSYNC_MSG_VERSION_1301); 3792 PF_RULES_RUNLOCK(); 3793 } else 3794 error = EOPNOTSUPP; 3795 break; 3796 } 3797 3798 case DIOCGETSTATE: { 3799 struct pfioc_state *ps = (struct pfioc_state *)addr; 3800 struct pf_kstate *s; 3801 3802 s = pf_find_state_byid(ps->state.id, ps->state.creatorid); 3803 if (s == NULL) { 3804 error = ENOENT; 3805 break; 3806 } 3807 3808 pfsync_state_export((union pfsync_state_union*)&ps->state, 3809 s, PFSYNC_MSG_VERSION_1301); 3810 PF_STATE_UNLOCK(s); 3811 break; 3812 } 3813 3814 case DIOCGETSTATENV: { 3815 error = pf_getstate((struct pfioc_nv *)addr); 3816 break; 3817 } 3818 3819 #ifdef COMPAT_FREEBSD14 3820 case DIOCGETSTATES: { 3821 struct pfioc_states *ps = (struct pfioc_states *)addr; 3822 struct pf_kstate *s; 3823 struct pfsync_state_1301 *pstore, *p; 3824 int i, nr; 3825 size_t slice_count = 16, count; 3826 void *out; 3827 3828 if (ps->ps_len <= 0) { 3829 nr = uma_zone_get_cur(V_pf_state_z); 3830 ps->ps_len = sizeof(struct pfsync_state_1301) * nr; 3831 break; 3832 } 3833 3834 out = ps->ps_states; 3835 pstore = mallocarray(slice_count, 3836 sizeof(struct pfsync_state_1301), M_TEMP, M_WAITOK | M_ZERO); 3837 nr = 0; 3838 3839 for (i = 0; i <= V_pf_hashmask; i++) { 3840 struct pf_idhash *ih = &V_pf_idhash[i]; 3841 3842 DIOCGETSTATES_retry: 3843 p = pstore; 3844 3845 if (LIST_EMPTY(&ih->states)) 3846 continue; 3847 3848 PF_HASHROW_LOCK(ih); 3849 count = 0; 3850 LIST_FOREACH(s, &ih->states, entry) { 3851 if (s->timeout == PFTM_UNLINKED) 3852 continue; 3853 count++; 3854 } 3855 3856 if (count > slice_count) { 3857 PF_HASHROW_UNLOCK(ih); 3858 free(pstore, M_TEMP); 3859 slice_count = count * 2; 3860 pstore = mallocarray(slice_count, 3861 sizeof(struct pfsync_state_1301), M_TEMP, 3862 M_WAITOK | M_ZERO); 3863 goto DIOCGETSTATES_retry; 3864 } 3865 3866 if ((nr+count) * sizeof(*p) > ps->ps_len) { 3867 PF_HASHROW_UNLOCK(ih); 3868 goto DIOCGETSTATES_full; 3869 } 3870 3871 LIST_FOREACH(s, &ih->states, entry) { 3872 if (s->timeout == PFTM_UNLINKED) 3873 continue; 3874 3875 pfsync_state_export((union pfsync_state_union*)p, 3876 s, PFSYNC_MSG_VERSION_1301); 3877 p++; 3878 nr++; 3879 } 3880 PF_HASHROW_UNLOCK(ih); 3881 error = copyout(pstore, out, 3882 sizeof(struct pfsync_state_1301) * count); 3883 if (error) 3884 break; 3885 out = ps->ps_states + nr; 3886 } 3887 DIOCGETSTATES_full: 3888 ps->ps_len = sizeof(struct pfsync_state_1301) * nr; 3889 free(pstore, M_TEMP); 3890 3891 break; 3892 } 3893 3894 case DIOCGETSTATESV2: { 3895 struct pfioc_states_v2 *ps = (struct pfioc_states_v2 *)addr; 3896 struct pf_kstate *s; 3897 struct pf_state_export *pstore, *p; 3898 int i, nr; 3899 size_t slice_count = 16, count; 3900 void *out; 3901 3902 if (ps->ps_req_version > PF_STATE_VERSION) { 3903 error = ENOTSUP; 3904 break; 3905 } 3906 3907 if (ps->ps_len <= 0) { 3908 nr = uma_zone_get_cur(V_pf_state_z); 3909 ps->ps_len = sizeof(struct pf_state_export) * nr; 3910 break; 3911 } 3912 3913 out = ps->ps_states; 3914 pstore = mallocarray(slice_count, 3915 sizeof(struct pf_state_export), M_TEMP, M_WAITOK | M_ZERO); 3916 nr = 0; 3917 3918 for (i = 0; i <= V_pf_hashmask; i++) { 3919 struct pf_idhash *ih = &V_pf_idhash[i]; 3920 3921 DIOCGETSTATESV2_retry: 3922 p = pstore; 3923 3924 if (LIST_EMPTY(&ih->states)) 3925 continue; 3926 3927 PF_HASHROW_LOCK(ih); 3928 count = 0; 3929 LIST_FOREACH(s, &ih->states, entry) { 3930 if (s->timeout == PFTM_UNLINKED) 3931 continue; 3932 count++; 3933 } 3934 3935 if (count > slice_count) { 3936 PF_HASHROW_UNLOCK(ih); 3937 free(pstore, M_TEMP); 3938 slice_count = count * 2; 3939 pstore = mallocarray(slice_count, 3940 sizeof(struct pf_state_export), M_TEMP, 3941 M_WAITOK | M_ZERO); 3942 goto DIOCGETSTATESV2_retry; 3943 } 3944 3945 if ((nr+count) * sizeof(*p) > ps->ps_len) { 3946 PF_HASHROW_UNLOCK(ih); 3947 goto DIOCGETSTATESV2_full; 3948 } 3949 3950 LIST_FOREACH(s, &ih->states, entry) { 3951 if (s->timeout == PFTM_UNLINKED) 3952 continue; 3953 3954 pf_state_export(p, s); 3955 p++; 3956 nr++; 3957 } 3958 PF_HASHROW_UNLOCK(ih); 3959 error = copyout(pstore, out, 3960 sizeof(struct pf_state_export) * count); 3961 if (error) 3962 break; 3963 out = ps->ps_states + nr; 3964 } 3965 DIOCGETSTATESV2_full: 3966 ps->ps_len = nr * sizeof(struct pf_state_export); 3967 free(pstore, M_TEMP); 3968 3969 break; 3970 } 3971 #endif 3972 case DIOCGETSTATUSNV: { 3973 error = pf_getstatus((struct pfioc_nv *)addr); 3974 break; 3975 } 3976 3977 case DIOCSETSTATUSIF: { 3978 struct pfioc_if *pi = (struct pfioc_if *)addr; 3979 3980 if (pi->ifname[0] == 0) { 3981 bzero(V_pf_status.ifname, IFNAMSIZ); 3982 break; 3983 } 3984 PF_RULES_WLOCK(); 3985 error = pf_user_strcpy(V_pf_status.ifname, pi->ifname, IFNAMSIZ); 3986 PF_RULES_WUNLOCK(); 3987 break; 3988 } 3989 3990 case DIOCCLRSTATUS: { 3991 pf_ioctl_clear_status(); 3992 break; 3993 } 3994 3995 case DIOCNATLOOK: { 3996 struct pfioc_natlook *pnl = (struct pfioc_natlook *)addr; 3997 struct pf_state_key *sk; 3998 struct pf_kstate *state; 3999 struct pf_state_key_cmp key; 4000 int m = 0, direction = pnl->direction; 4001 int sidx, didx; 4002 4003 /* NATLOOK src and dst are reversed, so reverse sidx/didx */ 4004 sidx = (direction == PF_IN) ? 1 : 0; 4005 didx = (direction == PF_IN) ? 0 : 1; 4006 4007 if (!pnl->proto || 4008 PF_AZERO(&pnl->saddr, pnl->af) || 4009 PF_AZERO(&pnl->daddr, pnl->af) || 4010 ((pnl->proto == IPPROTO_TCP || 4011 pnl->proto == IPPROTO_UDP) && 4012 (!pnl->dport || !pnl->sport))) 4013 error = EINVAL; 4014 else { 4015 bzero(&key, sizeof(key)); 4016 key.af = pnl->af; 4017 key.proto = pnl->proto; 4018 PF_ACPY(&key.addr[sidx], &pnl->saddr, pnl->af); 4019 key.port[sidx] = pnl->sport; 4020 PF_ACPY(&key.addr[didx], &pnl->daddr, pnl->af); 4021 key.port[didx] = pnl->dport; 4022 4023 state = pf_find_state_all(&key, direction, &m); 4024 if (state == NULL) { 4025 error = ENOENT; 4026 } else { 4027 if (m > 1) { 4028 PF_STATE_UNLOCK(state); 4029 error = E2BIG; /* more than one state */ 4030 } else { 4031 sk = state->key[sidx]; 4032 PF_ACPY(&pnl->rsaddr, &sk->addr[sidx], sk->af); 4033 pnl->rsport = sk->port[sidx]; 4034 PF_ACPY(&pnl->rdaddr, &sk->addr[didx], sk->af); 4035 pnl->rdport = sk->port[didx]; 4036 PF_STATE_UNLOCK(state); 4037 } 4038 } 4039 } 4040 break; 4041 } 4042 4043 case DIOCSETTIMEOUT: { 4044 struct pfioc_tm *pt = (struct pfioc_tm *)addr; 4045 4046 error = pf_ioctl_set_timeout(pt->timeout, pt->seconds, 4047 &pt->seconds); 4048 break; 4049 } 4050 4051 case DIOCGETTIMEOUT: { 4052 struct pfioc_tm *pt = (struct pfioc_tm *)addr; 4053 4054 error = pf_ioctl_get_timeout(pt->timeout, &pt->seconds); 4055 break; 4056 } 4057 4058 case DIOCGETLIMIT: { 4059 struct pfioc_limit *pl = (struct pfioc_limit *)addr; 4060 4061 error = pf_ioctl_get_limit(pl->index, &pl->limit); 4062 break; 4063 } 4064 4065 case DIOCSETLIMIT: { 4066 struct pfioc_limit *pl = (struct pfioc_limit *)addr; 4067 unsigned int old_limit; 4068 4069 error = pf_ioctl_set_limit(pl->index, pl->limit, &old_limit); 4070 pl->limit = old_limit; 4071 break; 4072 } 4073 4074 case DIOCSETDEBUG: { 4075 u_int32_t *level = (u_int32_t *)addr; 4076 4077 PF_RULES_WLOCK(); 4078 V_pf_status.debug = *level; 4079 PF_RULES_WUNLOCK(); 4080 break; 4081 } 4082 4083 case DIOCCLRRULECTRS: { 4084 /* obsoleted by DIOCGETRULE with action=PF_GET_CLR_CNTR */ 4085 struct pf_kruleset *ruleset = &pf_main_ruleset; 4086 struct pf_krule *rule; 4087 4088 PF_RULES_WLOCK(); 4089 TAILQ_FOREACH(rule, 4090 ruleset->rules[PF_RULESET_FILTER].active.ptr, entries) { 4091 pf_counter_u64_zero(&rule->evaluations); 4092 for (int i = 0; i < 2; i++) { 4093 pf_counter_u64_zero(&rule->packets[i]); 4094 pf_counter_u64_zero(&rule->bytes[i]); 4095 } 4096 } 4097 PF_RULES_WUNLOCK(); 4098 break; 4099 } 4100 4101 case DIOCGIFSPEEDV0: 4102 case DIOCGIFSPEEDV1: { 4103 struct pf_ifspeed_v1 *psp = (struct pf_ifspeed_v1 *)addr; 4104 struct pf_ifspeed_v1 ps; 4105 struct ifnet *ifp; 4106 4107 if (psp->ifname[0] == '\0') { 4108 error = EINVAL; 4109 break; 4110 } 4111 4112 error = pf_user_strcpy(ps.ifname, psp->ifname, IFNAMSIZ); 4113 if (error != 0) 4114 break; 4115 ifp = ifunit(ps.ifname); 4116 if (ifp != NULL) { 4117 psp->baudrate32 = 4118 (u_int32_t)uqmin(ifp->if_baudrate, UINT_MAX); 4119 if (cmd == DIOCGIFSPEEDV1) 4120 psp->baudrate = ifp->if_baudrate; 4121 } else { 4122 error = EINVAL; 4123 } 4124 break; 4125 } 4126 4127 #ifdef ALTQ 4128 case DIOCSTARTALTQ: { 4129 struct pf_altq *altq; 4130 4131 PF_RULES_WLOCK(); 4132 /* enable all altq interfaces on active list */ 4133 TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) { 4134 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) { 4135 error = pf_enable_altq(altq); 4136 if (error != 0) 4137 break; 4138 } 4139 } 4140 if (error == 0) 4141 V_pf_altq_running = 1; 4142 PF_RULES_WUNLOCK(); 4143 DPFPRINTF(PF_DEBUG_MISC, ("altq: started\n")); 4144 break; 4145 } 4146 4147 case DIOCSTOPALTQ: { 4148 struct pf_altq *altq; 4149 4150 PF_RULES_WLOCK(); 4151 /* disable all altq interfaces on active list */ 4152 TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) { 4153 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) { 4154 error = pf_disable_altq(altq); 4155 if (error != 0) 4156 break; 4157 } 4158 } 4159 if (error == 0) 4160 V_pf_altq_running = 0; 4161 PF_RULES_WUNLOCK(); 4162 DPFPRINTF(PF_DEBUG_MISC, ("altq: stopped\n")); 4163 break; 4164 } 4165 4166 case DIOCADDALTQV0: 4167 case DIOCADDALTQV1: { 4168 struct pfioc_altq_v1 *pa = (struct pfioc_altq_v1 *)addr; 4169 struct pf_altq *altq, *a; 4170 struct ifnet *ifp; 4171 4172 altq = malloc(sizeof(*altq), M_PFALTQ, M_WAITOK | M_ZERO); 4173 error = pf_import_kaltq(pa, altq, IOCPARM_LEN(cmd)); 4174 if (error) 4175 break; 4176 altq->local_flags = 0; 4177 4178 PF_RULES_WLOCK(); 4179 if (pa->ticket != V_ticket_altqs_inactive) { 4180 PF_RULES_WUNLOCK(); 4181 free(altq, M_PFALTQ); 4182 error = EBUSY; 4183 break; 4184 } 4185 4186 /* 4187 * if this is for a queue, find the discipline and 4188 * copy the necessary fields 4189 */ 4190 if (altq->qname[0] != 0) { 4191 if ((altq->qid = pf_qname2qid(altq->qname)) == 0) { 4192 PF_RULES_WUNLOCK(); 4193 error = EBUSY; 4194 free(altq, M_PFALTQ); 4195 break; 4196 } 4197 altq->altq_disc = NULL; 4198 TAILQ_FOREACH(a, V_pf_altq_ifs_inactive, entries) { 4199 if (strncmp(a->ifname, altq->ifname, 4200 IFNAMSIZ) == 0) { 4201 altq->altq_disc = a->altq_disc; 4202 break; 4203 } 4204 } 4205 } 4206 4207 if ((ifp = ifunit(altq->ifname)) == NULL) 4208 altq->local_flags |= PFALTQ_FLAG_IF_REMOVED; 4209 else 4210 error = altq_add(ifp, altq); 4211 4212 if (error) { 4213 PF_RULES_WUNLOCK(); 4214 free(altq, M_PFALTQ); 4215 break; 4216 } 4217 4218 if (altq->qname[0] != 0) 4219 TAILQ_INSERT_TAIL(V_pf_altqs_inactive, altq, entries); 4220 else 4221 TAILQ_INSERT_TAIL(V_pf_altq_ifs_inactive, altq, entries); 4222 /* version error check done on import above */ 4223 pf_export_kaltq(altq, pa, IOCPARM_LEN(cmd)); 4224 PF_RULES_WUNLOCK(); 4225 break; 4226 } 4227 4228 case DIOCGETALTQSV0: 4229 case DIOCGETALTQSV1: { 4230 struct pfioc_altq_v1 *pa = (struct pfioc_altq_v1 *)addr; 4231 struct pf_altq *altq; 4232 4233 PF_RULES_RLOCK(); 4234 pa->nr = 0; 4235 TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) 4236 pa->nr++; 4237 TAILQ_FOREACH(altq, V_pf_altqs_active, entries) 4238 pa->nr++; 4239 pa->ticket = V_ticket_altqs_active; 4240 PF_RULES_RUNLOCK(); 4241 break; 4242 } 4243 4244 case DIOCGETALTQV0: 4245 case DIOCGETALTQV1: { 4246 struct pfioc_altq_v1 *pa = (struct pfioc_altq_v1 *)addr; 4247 struct pf_altq *altq; 4248 4249 PF_RULES_RLOCK(); 4250 if (pa->ticket != V_ticket_altqs_active) { 4251 PF_RULES_RUNLOCK(); 4252 error = EBUSY; 4253 break; 4254 } 4255 altq = pf_altq_get_nth_active(pa->nr); 4256 if (altq == NULL) { 4257 PF_RULES_RUNLOCK(); 4258 error = EBUSY; 4259 break; 4260 } 4261 pf_export_kaltq(altq, pa, IOCPARM_LEN(cmd)); 4262 PF_RULES_RUNLOCK(); 4263 break; 4264 } 4265 4266 case DIOCCHANGEALTQV0: 4267 case DIOCCHANGEALTQV1: 4268 /* CHANGEALTQ not supported yet! */ 4269 error = ENODEV; 4270 break; 4271 4272 case DIOCGETQSTATSV0: 4273 case DIOCGETQSTATSV1: { 4274 struct pfioc_qstats_v1 *pq = (struct pfioc_qstats_v1 *)addr; 4275 struct pf_altq *altq; 4276 int nbytes; 4277 u_int32_t version; 4278 4279 PF_RULES_RLOCK(); 4280 if (pq->ticket != V_ticket_altqs_active) { 4281 PF_RULES_RUNLOCK(); 4282 error = EBUSY; 4283 break; 4284 } 4285 nbytes = pq->nbytes; 4286 altq = pf_altq_get_nth_active(pq->nr); 4287 if (altq == NULL) { 4288 PF_RULES_RUNLOCK(); 4289 error = EBUSY; 4290 break; 4291 } 4292 4293 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) != 0) { 4294 PF_RULES_RUNLOCK(); 4295 error = ENXIO; 4296 break; 4297 } 4298 PF_RULES_RUNLOCK(); 4299 if (cmd == DIOCGETQSTATSV0) 4300 version = 0; /* DIOCGETQSTATSV0 means stats struct v0 */ 4301 else 4302 version = pq->version; 4303 error = altq_getqstats(altq, pq->buf, &nbytes, version); 4304 if (error == 0) { 4305 pq->scheduler = altq->scheduler; 4306 pq->nbytes = nbytes; 4307 } 4308 break; 4309 } 4310 #endif /* ALTQ */ 4311 4312 case DIOCBEGINADDRS: { 4313 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr; 4314 4315 error = pf_ioctl_begin_addrs(&pp->ticket); 4316 break; 4317 } 4318 4319 case DIOCADDADDR: { 4320 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr; 4321 4322 error = pf_ioctl_add_addr(pp); 4323 break; 4324 } 4325 4326 case DIOCGETADDRS: { 4327 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr; 4328 4329 error = pf_ioctl_get_addrs(pp); 4330 break; 4331 } 4332 4333 case DIOCGETADDR: { 4334 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr; 4335 4336 error = pf_ioctl_get_addr(pp); 4337 break; 4338 } 4339 4340 case DIOCCHANGEADDR: { 4341 struct pfioc_pooladdr *pca = (struct pfioc_pooladdr *)addr; 4342 struct pf_kpool *pool; 4343 struct pf_kpooladdr *oldpa = NULL, *newpa = NULL; 4344 struct pf_kruleset *ruleset; 4345 struct pfi_kkif *kif = NULL; 4346 4347 pca->anchor[sizeof(pca->anchor) - 1] = 0; 4348 4349 if (pca->action < PF_CHANGE_ADD_HEAD || 4350 pca->action > PF_CHANGE_REMOVE) { 4351 error = EINVAL; 4352 break; 4353 } 4354 if (pca->addr.addr.type != PF_ADDR_ADDRMASK && 4355 pca->addr.addr.type != PF_ADDR_DYNIFTL && 4356 pca->addr.addr.type != PF_ADDR_TABLE) { 4357 error = EINVAL; 4358 break; 4359 } 4360 if (pca->addr.addr.p.dyn != NULL) { 4361 error = EINVAL; 4362 break; 4363 } 4364 4365 if (pca->action != PF_CHANGE_REMOVE) { 4366 #ifndef INET 4367 if (pca->af == AF_INET) { 4368 error = EAFNOSUPPORT; 4369 break; 4370 } 4371 #endif /* INET */ 4372 #ifndef INET6 4373 if (pca->af == AF_INET6) { 4374 error = EAFNOSUPPORT; 4375 break; 4376 } 4377 #endif /* INET6 */ 4378 newpa = malloc(sizeof(*newpa), M_PFRULE, M_WAITOK); 4379 bcopy(&pca->addr, newpa, sizeof(struct pf_pooladdr)); 4380 if (newpa->ifname[0]) 4381 kif = pf_kkif_create(M_WAITOK); 4382 newpa->kif = NULL; 4383 } 4384 #define ERROUT(x) ERROUT_IOCTL(DIOCCHANGEADDR_error, x) 4385 PF_RULES_WLOCK(); 4386 ruleset = pf_find_kruleset(pca->anchor); 4387 if (ruleset == NULL) 4388 ERROUT(EBUSY); 4389 4390 pool = pf_get_kpool(pca->anchor, pca->ticket, pca->r_action, 4391 pca->r_num, pca->r_last, 1, 1); 4392 if (pool == NULL) 4393 ERROUT(EBUSY); 4394 4395 if (pca->action != PF_CHANGE_REMOVE) { 4396 if (newpa->ifname[0]) { 4397 newpa->kif = pfi_kkif_attach(kif, newpa->ifname); 4398 pfi_kkif_ref(newpa->kif); 4399 kif = NULL; 4400 } 4401 4402 switch (newpa->addr.type) { 4403 case PF_ADDR_DYNIFTL: 4404 error = pfi_dynaddr_setup(&newpa->addr, 4405 pca->af); 4406 break; 4407 case PF_ADDR_TABLE: 4408 newpa->addr.p.tbl = pfr_attach_table(ruleset, 4409 newpa->addr.v.tblname); 4410 if (newpa->addr.p.tbl == NULL) 4411 error = ENOMEM; 4412 break; 4413 } 4414 if (error) 4415 goto DIOCCHANGEADDR_error; 4416 } 4417 4418 switch (pca->action) { 4419 case PF_CHANGE_ADD_HEAD: 4420 oldpa = TAILQ_FIRST(&pool->list); 4421 break; 4422 case PF_CHANGE_ADD_TAIL: 4423 oldpa = TAILQ_LAST(&pool->list, pf_kpalist); 4424 break; 4425 default: 4426 oldpa = TAILQ_FIRST(&pool->list); 4427 for (int i = 0; oldpa && i < pca->nr; i++) 4428 oldpa = TAILQ_NEXT(oldpa, entries); 4429 4430 if (oldpa == NULL) 4431 ERROUT(EINVAL); 4432 } 4433 4434 if (pca->action == PF_CHANGE_REMOVE) { 4435 TAILQ_REMOVE(&pool->list, oldpa, entries); 4436 switch (oldpa->addr.type) { 4437 case PF_ADDR_DYNIFTL: 4438 pfi_dynaddr_remove(oldpa->addr.p.dyn); 4439 break; 4440 case PF_ADDR_TABLE: 4441 pfr_detach_table(oldpa->addr.p.tbl); 4442 break; 4443 } 4444 if (oldpa->kif) 4445 pfi_kkif_unref(oldpa->kif); 4446 free(oldpa, M_PFRULE); 4447 } else { 4448 if (oldpa == NULL) 4449 TAILQ_INSERT_TAIL(&pool->list, newpa, entries); 4450 else if (pca->action == PF_CHANGE_ADD_HEAD || 4451 pca->action == PF_CHANGE_ADD_BEFORE) 4452 TAILQ_INSERT_BEFORE(oldpa, newpa, entries); 4453 else 4454 TAILQ_INSERT_AFTER(&pool->list, oldpa, 4455 newpa, entries); 4456 } 4457 4458 pool->cur = TAILQ_FIRST(&pool->list); 4459 PF_ACPY(&pool->counter, &pool->cur->addr.v.a.addr, pca->af); 4460 PF_RULES_WUNLOCK(); 4461 break; 4462 4463 #undef ERROUT 4464 DIOCCHANGEADDR_error: 4465 if (newpa != NULL) { 4466 if (newpa->kif) 4467 pfi_kkif_unref(newpa->kif); 4468 free(newpa, M_PFRULE); 4469 } 4470 PF_RULES_WUNLOCK(); 4471 pf_kkif_free(kif); 4472 break; 4473 } 4474 4475 case DIOCGETRULESETS: { 4476 struct pfioc_ruleset *pr = (struct pfioc_ruleset *)addr; 4477 struct pf_kruleset *ruleset; 4478 struct pf_kanchor *anchor; 4479 4480 pr->path[sizeof(pr->path) - 1] = 0; 4481 4482 PF_RULES_RLOCK(); 4483 if ((ruleset = pf_find_kruleset(pr->path)) == NULL) { 4484 PF_RULES_RUNLOCK(); 4485 error = ENOENT; 4486 break; 4487 } 4488 pr->nr = 0; 4489 if (ruleset->anchor == NULL) { 4490 /* XXX kludge for pf_main_ruleset */ 4491 RB_FOREACH(anchor, pf_kanchor_global, &V_pf_anchors) 4492 if (anchor->parent == NULL) 4493 pr->nr++; 4494 } else { 4495 RB_FOREACH(anchor, pf_kanchor_node, 4496 &ruleset->anchor->children) 4497 pr->nr++; 4498 } 4499 PF_RULES_RUNLOCK(); 4500 break; 4501 } 4502 4503 case DIOCGETRULESET: { 4504 struct pfioc_ruleset *pr = (struct pfioc_ruleset *)addr; 4505 struct pf_kruleset *ruleset; 4506 struct pf_kanchor *anchor; 4507 u_int32_t nr = 0; 4508 4509 pr->path[sizeof(pr->path) - 1] = 0; 4510 4511 PF_RULES_RLOCK(); 4512 if ((ruleset = pf_find_kruleset(pr->path)) == NULL) { 4513 PF_RULES_RUNLOCK(); 4514 error = ENOENT; 4515 break; 4516 } 4517 pr->name[0] = 0; 4518 if (ruleset->anchor == NULL) { 4519 /* XXX kludge for pf_main_ruleset */ 4520 RB_FOREACH(anchor, pf_kanchor_global, &V_pf_anchors) 4521 if (anchor->parent == NULL && nr++ == pr->nr) { 4522 strlcpy(pr->name, anchor->name, 4523 sizeof(pr->name)); 4524 break; 4525 } 4526 } else { 4527 RB_FOREACH(anchor, pf_kanchor_node, 4528 &ruleset->anchor->children) 4529 if (nr++ == pr->nr) { 4530 strlcpy(pr->name, anchor->name, 4531 sizeof(pr->name)); 4532 break; 4533 } 4534 } 4535 if (!pr->name[0]) 4536 error = EBUSY; 4537 PF_RULES_RUNLOCK(); 4538 break; 4539 } 4540 4541 case DIOCRCLRTABLES: { 4542 struct pfioc_table *io = (struct pfioc_table *)addr; 4543 4544 if (io->pfrio_esize != 0) { 4545 error = ENODEV; 4546 break; 4547 } 4548 PF_RULES_WLOCK(); 4549 error = pfr_clr_tables(&io->pfrio_table, &io->pfrio_ndel, 4550 io->pfrio_flags | PFR_FLAG_USERIOCTL); 4551 PF_RULES_WUNLOCK(); 4552 break; 4553 } 4554 4555 case DIOCRADDTABLES: { 4556 struct pfioc_table *io = (struct pfioc_table *)addr; 4557 struct pfr_table *pfrts; 4558 size_t totlen; 4559 4560 if (io->pfrio_esize != sizeof(struct pfr_table)) { 4561 error = ENODEV; 4562 break; 4563 } 4564 4565 if (io->pfrio_size < 0 || io->pfrio_size > pf_ioctl_maxcount || 4566 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_table))) { 4567 error = ENOMEM; 4568 break; 4569 } 4570 4571 totlen = io->pfrio_size * sizeof(struct pfr_table); 4572 pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table), 4573 M_TEMP, M_WAITOK); 4574 error = copyin(io->pfrio_buffer, pfrts, totlen); 4575 if (error) { 4576 free(pfrts, M_TEMP); 4577 break; 4578 } 4579 PF_RULES_WLOCK(); 4580 error = pfr_add_tables(pfrts, io->pfrio_size, 4581 &io->pfrio_nadd, io->pfrio_flags | PFR_FLAG_USERIOCTL); 4582 PF_RULES_WUNLOCK(); 4583 free(pfrts, M_TEMP); 4584 break; 4585 } 4586 4587 case DIOCRDELTABLES: { 4588 struct pfioc_table *io = (struct pfioc_table *)addr; 4589 struct pfr_table *pfrts; 4590 size_t totlen; 4591 4592 if (io->pfrio_esize != sizeof(struct pfr_table)) { 4593 error = ENODEV; 4594 break; 4595 } 4596 4597 if (io->pfrio_size < 0 || io->pfrio_size > pf_ioctl_maxcount || 4598 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_table))) { 4599 error = ENOMEM; 4600 break; 4601 } 4602 4603 totlen = io->pfrio_size * sizeof(struct pfr_table); 4604 pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table), 4605 M_TEMP, M_WAITOK); 4606 error = copyin(io->pfrio_buffer, pfrts, totlen); 4607 if (error) { 4608 free(pfrts, M_TEMP); 4609 break; 4610 } 4611 PF_RULES_WLOCK(); 4612 error = pfr_del_tables(pfrts, io->pfrio_size, 4613 &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL); 4614 PF_RULES_WUNLOCK(); 4615 free(pfrts, M_TEMP); 4616 break; 4617 } 4618 4619 case DIOCRGETTABLES: { 4620 struct pfioc_table *io = (struct pfioc_table *)addr; 4621 struct pfr_table *pfrts; 4622 size_t totlen; 4623 int n; 4624 4625 if (io->pfrio_esize != sizeof(struct pfr_table)) { 4626 error = ENODEV; 4627 break; 4628 } 4629 PF_RULES_RLOCK(); 4630 n = pfr_table_count(&io->pfrio_table, io->pfrio_flags); 4631 if (n < 0) { 4632 PF_RULES_RUNLOCK(); 4633 error = EINVAL; 4634 break; 4635 } 4636 io->pfrio_size = min(io->pfrio_size, n); 4637 4638 totlen = io->pfrio_size * sizeof(struct pfr_table); 4639 4640 pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table), 4641 M_TEMP, M_NOWAIT | M_ZERO); 4642 if (pfrts == NULL) { 4643 error = ENOMEM; 4644 PF_RULES_RUNLOCK(); 4645 break; 4646 } 4647 error = pfr_get_tables(&io->pfrio_table, pfrts, 4648 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); 4649 PF_RULES_RUNLOCK(); 4650 if (error == 0) 4651 error = copyout(pfrts, io->pfrio_buffer, totlen); 4652 free(pfrts, M_TEMP); 4653 break; 4654 } 4655 4656 case DIOCRGETTSTATS: { 4657 struct pfioc_table *io = (struct pfioc_table *)addr; 4658 struct pfr_tstats *pfrtstats; 4659 size_t totlen; 4660 int n; 4661 4662 if (io->pfrio_esize != sizeof(struct pfr_tstats)) { 4663 error = ENODEV; 4664 break; 4665 } 4666 PF_TABLE_STATS_LOCK(); 4667 PF_RULES_RLOCK(); 4668 n = pfr_table_count(&io->pfrio_table, io->pfrio_flags); 4669 if (n < 0) { 4670 PF_RULES_RUNLOCK(); 4671 PF_TABLE_STATS_UNLOCK(); 4672 error = EINVAL; 4673 break; 4674 } 4675 io->pfrio_size = min(io->pfrio_size, n); 4676 4677 totlen = io->pfrio_size * sizeof(struct pfr_tstats); 4678 pfrtstats = mallocarray(io->pfrio_size, 4679 sizeof(struct pfr_tstats), M_TEMP, M_NOWAIT | M_ZERO); 4680 if (pfrtstats == NULL) { 4681 error = ENOMEM; 4682 PF_RULES_RUNLOCK(); 4683 PF_TABLE_STATS_UNLOCK(); 4684 break; 4685 } 4686 error = pfr_get_tstats(&io->pfrio_table, pfrtstats, 4687 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); 4688 PF_RULES_RUNLOCK(); 4689 PF_TABLE_STATS_UNLOCK(); 4690 if (error == 0) 4691 error = copyout(pfrtstats, io->pfrio_buffer, totlen); 4692 free(pfrtstats, M_TEMP); 4693 break; 4694 } 4695 4696 case DIOCRCLRTSTATS: { 4697 struct pfioc_table *io = (struct pfioc_table *)addr; 4698 struct pfr_table *pfrts; 4699 size_t totlen; 4700 4701 if (io->pfrio_esize != sizeof(struct pfr_table)) { 4702 error = ENODEV; 4703 break; 4704 } 4705 4706 if (io->pfrio_size < 0 || io->pfrio_size > pf_ioctl_maxcount || 4707 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_table))) { 4708 /* We used to count tables and use the minimum required 4709 * size, so we didn't fail on overly large requests. 4710 * Keep doing so. */ 4711 io->pfrio_size = pf_ioctl_maxcount; 4712 break; 4713 } 4714 4715 totlen = io->pfrio_size * sizeof(struct pfr_table); 4716 pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table), 4717 M_TEMP, M_WAITOK); 4718 error = copyin(io->pfrio_buffer, pfrts, totlen); 4719 if (error) { 4720 free(pfrts, M_TEMP); 4721 break; 4722 } 4723 4724 PF_TABLE_STATS_LOCK(); 4725 PF_RULES_RLOCK(); 4726 error = pfr_clr_tstats(pfrts, io->pfrio_size, 4727 &io->pfrio_nzero, io->pfrio_flags | PFR_FLAG_USERIOCTL); 4728 PF_RULES_RUNLOCK(); 4729 PF_TABLE_STATS_UNLOCK(); 4730 free(pfrts, M_TEMP); 4731 break; 4732 } 4733 4734 case DIOCRSETTFLAGS: { 4735 struct pfioc_table *io = (struct pfioc_table *)addr; 4736 struct pfr_table *pfrts; 4737 size_t totlen; 4738 int n; 4739 4740 if (io->pfrio_esize != sizeof(struct pfr_table)) { 4741 error = ENODEV; 4742 break; 4743 } 4744 4745 PF_RULES_RLOCK(); 4746 n = pfr_table_count(&io->pfrio_table, io->pfrio_flags); 4747 if (n < 0) { 4748 PF_RULES_RUNLOCK(); 4749 error = EINVAL; 4750 break; 4751 } 4752 4753 io->pfrio_size = min(io->pfrio_size, n); 4754 PF_RULES_RUNLOCK(); 4755 4756 totlen = io->pfrio_size * sizeof(struct pfr_table); 4757 pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table), 4758 M_TEMP, M_WAITOK); 4759 error = copyin(io->pfrio_buffer, pfrts, totlen); 4760 if (error) { 4761 free(pfrts, M_TEMP); 4762 break; 4763 } 4764 PF_RULES_WLOCK(); 4765 error = pfr_set_tflags(pfrts, io->pfrio_size, 4766 io->pfrio_setflag, io->pfrio_clrflag, &io->pfrio_nchange, 4767 &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL); 4768 PF_RULES_WUNLOCK(); 4769 free(pfrts, M_TEMP); 4770 break; 4771 } 4772 4773 case DIOCRCLRADDRS: { 4774 struct pfioc_table *io = (struct pfioc_table *)addr; 4775 4776 if (io->pfrio_esize != 0) { 4777 error = ENODEV; 4778 break; 4779 } 4780 PF_RULES_WLOCK(); 4781 error = pfr_clr_addrs(&io->pfrio_table, &io->pfrio_ndel, 4782 io->pfrio_flags | PFR_FLAG_USERIOCTL); 4783 PF_RULES_WUNLOCK(); 4784 break; 4785 } 4786 4787 case DIOCRADDADDRS: { 4788 struct pfioc_table *io = (struct pfioc_table *)addr; 4789 struct pfr_addr *pfras; 4790 size_t totlen; 4791 4792 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 4793 error = ENODEV; 4794 break; 4795 } 4796 if (io->pfrio_size < 0 || 4797 io->pfrio_size > pf_ioctl_maxcount || 4798 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) { 4799 error = EINVAL; 4800 break; 4801 } 4802 totlen = io->pfrio_size * sizeof(struct pfr_addr); 4803 pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr), 4804 M_TEMP, M_WAITOK); 4805 error = copyin(io->pfrio_buffer, pfras, totlen); 4806 if (error) { 4807 free(pfras, M_TEMP); 4808 break; 4809 } 4810 PF_RULES_WLOCK(); 4811 error = pfr_add_addrs(&io->pfrio_table, pfras, 4812 io->pfrio_size, &io->pfrio_nadd, io->pfrio_flags | 4813 PFR_FLAG_USERIOCTL); 4814 PF_RULES_WUNLOCK(); 4815 if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK) 4816 error = copyout(pfras, io->pfrio_buffer, totlen); 4817 free(pfras, M_TEMP); 4818 break; 4819 } 4820 4821 case DIOCRDELADDRS: { 4822 struct pfioc_table *io = (struct pfioc_table *)addr; 4823 struct pfr_addr *pfras; 4824 size_t totlen; 4825 4826 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 4827 error = ENODEV; 4828 break; 4829 } 4830 if (io->pfrio_size < 0 || 4831 io->pfrio_size > pf_ioctl_maxcount || 4832 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) { 4833 error = EINVAL; 4834 break; 4835 } 4836 totlen = io->pfrio_size * sizeof(struct pfr_addr); 4837 pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr), 4838 M_TEMP, M_WAITOK); 4839 error = copyin(io->pfrio_buffer, pfras, totlen); 4840 if (error) { 4841 free(pfras, M_TEMP); 4842 break; 4843 } 4844 PF_RULES_WLOCK(); 4845 error = pfr_del_addrs(&io->pfrio_table, pfras, 4846 io->pfrio_size, &io->pfrio_ndel, io->pfrio_flags | 4847 PFR_FLAG_USERIOCTL); 4848 PF_RULES_WUNLOCK(); 4849 if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK) 4850 error = copyout(pfras, io->pfrio_buffer, totlen); 4851 free(pfras, M_TEMP); 4852 break; 4853 } 4854 4855 case DIOCRSETADDRS: { 4856 struct pfioc_table *io = (struct pfioc_table *)addr; 4857 struct pfr_addr *pfras; 4858 size_t totlen, count; 4859 4860 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 4861 error = ENODEV; 4862 break; 4863 } 4864 if (io->pfrio_size < 0 || io->pfrio_size2 < 0) { 4865 error = EINVAL; 4866 break; 4867 } 4868 count = max(io->pfrio_size, io->pfrio_size2); 4869 if (count > pf_ioctl_maxcount || 4870 WOULD_OVERFLOW(count, sizeof(struct pfr_addr))) { 4871 error = EINVAL; 4872 break; 4873 } 4874 totlen = count * sizeof(struct pfr_addr); 4875 pfras = mallocarray(count, sizeof(struct pfr_addr), M_TEMP, 4876 M_WAITOK); 4877 error = copyin(io->pfrio_buffer, pfras, totlen); 4878 if (error) { 4879 free(pfras, M_TEMP); 4880 break; 4881 } 4882 PF_RULES_WLOCK(); 4883 error = pfr_set_addrs(&io->pfrio_table, pfras, 4884 io->pfrio_size, &io->pfrio_size2, &io->pfrio_nadd, 4885 &io->pfrio_ndel, &io->pfrio_nchange, io->pfrio_flags | 4886 PFR_FLAG_USERIOCTL, 0); 4887 PF_RULES_WUNLOCK(); 4888 if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK) 4889 error = copyout(pfras, io->pfrio_buffer, totlen); 4890 free(pfras, M_TEMP); 4891 break; 4892 } 4893 4894 case DIOCRGETADDRS: { 4895 struct pfioc_table *io = (struct pfioc_table *)addr; 4896 struct pfr_addr *pfras; 4897 size_t totlen; 4898 4899 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 4900 error = ENODEV; 4901 break; 4902 } 4903 if (io->pfrio_size < 0 || 4904 io->pfrio_size > pf_ioctl_maxcount || 4905 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) { 4906 error = EINVAL; 4907 break; 4908 } 4909 totlen = io->pfrio_size * sizeof(struct pfr_addr); 4910 pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr), 4911 M_TEMP, M_WAITOK | M_ZERO); 4912 PF_RULES_RLOCK(); 4913 error = pfr_get_addrs(&io->pfrio_table, pfras, 4914 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); 4915 PF_RULES_RUNLOCK(); 4916 if (error == 0) 4917 error = copyout(pfras, io->pfrio_buffer, totlen); 4918 free(pfras, M_TEMP); 4919 break; 4920 } 4921 4922 case DIOCRGETASTATS: { 4923 struct pfioc_table *io = (struct pfioc_table *)addr; 4924 struct pfr_astats *pfrastats; 4925 size_t totlen; 4926 4927 if (io->pfrio_esize != sizeof(struct pfr_astats)) { 4928 error = ENODEV; 4929 break; 4930 } 4931 if (io->pfrio_size < 0 || 4932 io->pfrio_size > pf_ioctl_maxcount || 4933 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_astats))) { 4934 error = EINVAL; 4935 break; 4936 } 4937 totlen = io->pfrio_size * sizeof(struct pfr_astats); 4938 pfrastats = mallocarray(io->pfrio_size, 4939 sizeof(struct pfr_astats), M_TEMP, M_WAITOK | M_ZERO); 4940 PF_RULES_RLOCK(); 4941 error = pfr_get_astats(&io->pfrio_table, pfrastats, 4942 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); 4943 PF_RULES_RUNLOCK(); 4944 if (error == 0) 4945 error = copyout(pfrastats, io->pfrio_buffer, totlen); 4946 free(pfrastats, M_TEMP); 4947 break; 4948 } 4949 4950 case DIOCRCLRASTATS: { 4951 struct pfioc_table *io = (struct pfioc_table *)addr; 4952 struct pfr_addr *pfras; 4953 size_t totlen; 4954 4955 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 4956 error = ENODEV; 4957 break; 4958 } 4959 if (io->pfrio_size < 0 || 4960 io->pfrio_size > pf_ioctl_maxcount || 4961 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) { 4962 error = EINVAL; 4963 break; 4964 } 4965 totlen = io->pfrio_size * sizeof(struct pfr_addr); 4966 pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr), 4967 M_TEMP, M_WAITOK); 4968 error = copyin(io->pfrio_buffer, pfras, totlen); 4969 if (error) { 4970 free(pfras, M_TEMP); 4971 break; 4972 } 4973 PF_RULES_WLOCK(); 4974 error = pfr_clr_astats(&io->pfrio_table, pfras, 4975 io->pfrio_size, &io->pfrio_nzero, io->pfrio_flags | 4976 PFR_FLAG_USERIOCTL); 4977 PF_RULES_WUNLOCK(); 4978 if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK) 4979 error = copyout(pfras, io->pfrio_buffer, totlen); 4980 free(pfras, M_TEMP); 4981 break; 4982 } 4983 4984 case DIOCRTSTADDRS: { 4985 struct pfioc_table *io = (struct pfioc_table *)addr; 4986 struct pfr_addr *pfras; 4987 size_t totlen; 4988 4989 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 4990 error = ENODEV; 4991 break; 4992 } 4993 if (io->pfrio_size < 0 || 4994 io->pfrio_size > pf_ioctl_maxcount || 4995 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) { 4996 error = EINVAL; 4997 break; 4998 } 4999 totlen = io->pfrio_size * sizeof(struct pfr_addr); 5000 pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr), 5001 M_TEMP, M_WAITOK); 5002 error = copyin(io->pfrio_buffer, pfras, totlen); 5003 if (error) { 5004 free(pfras, M_TEMP); 5005 break; 5006 } 5007 PF_RULES_RLOCK(); 5008 error = pfr_tst_addrs(&io->pfrio_table, pfras, 5009 io->pfrio_size, &io->pfrio_nmatch, io->pfrio_flags | 5010 PFR_FLAG_USERIOCTL); 5011 PF_RULES_RUNLOCK(); 5012 if (error == 0) 5013 error = copyout(pfras, io->pfrio_buffer, totlen); 5014 free(pfras, M_TEMP); 5015 break; 5016 } 5017 5018 case DIOCRINADEFINE: { 5019 struct pfioc_table *io = (struct pfioc_table *)addr; 5020 struct pfr_addr *pfras; 5021 size_t totlen; 5022 5023 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 5024 error = ENODEV; 5025 break; 5026 } 5027 if (io->pfrio_size < 0 || 5028 io->pfrio_size > pf_ioctl_maxcount || 5029 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) { 5030 error = EINVAL; 5031 break; 5032 } 5033 totlen = io->pfrio_size * sizeof(struct pfr_addr); 5034 pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr), 5035 M_TEMP, M_WAITOK); 5036 error = copyin(io->pfrio_buffer, pfras, totlen); 5037 if (error) { 5038 free(pfras, M_TEMP); 5039 break; 5040 } 5041 PF_RULES_WLOCK(); 5042 error = pfr_ina_define(&io->pfrio_table, pfras, 5043 io->pfrio_size, &io->pfrio_nadd, &io->pfrio_naddr, 5044 io->pfrio_ticket, io->pfrio_flags | PFR_FLAG_USERIOCTL); 5045 PF_RULES_WUNLOCK(); 5046 free(pfras, M_TEMP); 5047 break; 5048 } 5049 5050 case DIOCOSFPADD: { 5051 struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr; 5052 PF_RULES_WLOCK(); 5053 error = pf_osfp_add(io); 5054 PF_RULES_WUNLOCK(); 5055 break; 5056 } 5057 5058 case DIOCOSFPGET: { 5059 struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr; 5060 PF_RULES_RLOCK(); 5061 error = pf_osfp_get(io); 5062 PF_RULES_RUNLOCK(); 5063 break; 5064 } 5065 5066 case DIOCXBEGIN: { 5067 struct pfioc_trans *io = (struct pfioc_trans *)addr; 5068 struct pfioc_trans_e *ioes, *ioe; 5069 size_t totlen; 5070 int i; 5071 5072 if (io->esize != sizeof(*ioe)) { 5073 error = ENODEV; 5074 break; 5075 } 5076 if (io->size < 0 || 5077 io->size > pf_ioctl_maxcount || 5078 WOULD_OVERFLOW(io->size, sizeof(struct pfioc_trans_e))) { 5079 error = EINVAL; 5080 break; 5081 } 5082 totlen = sizeof(struct pfioc_trans_e) * io->size; 5083 ioes = mallocarray(io->size, sizeof(struct pfioc_trans_e), 5084 M_TEMP, M_WAITOK); 5085 error = copyin(io->array, ioes, totlen); 5086 if (error) { 5087 free(ioes, M_TEMP); 5088 break; 5089 } 5090 /* Ensure there's no more ethernet rules to clean up. */ 5091 NET_EPOCH_DRAIN_CALLBACKS(); 5092 PF_RULES_WLOCK(); 5093 for (i = 0, ioe = ioes; i < io->size; i++, ioe++) { 5094 ioe->anchor[sizeof(ioe->anchor) - 1] = '\0'; 5095 switch (ioe->rs_num) { 5096 case PF_RULESET_ETH: 5097 if ((error = pf_begin_eth(&ioe->ticket, ioe->anchor))) { 5098 PF_RULES_WUNLOCK(); 5099 free(ioes, M_TEMP); 5100 goto fail; 5101 } 5102 break; 5103 #ifdef ALTQ 5104 case PF_RULESET_ALTQ: 5105 if (ioe->anchor[0]) { 5106 PF_RULES_WUNLOCK(); 5107 free(ioes, M_TEMP); 5108 error = EINVAL; 5109 goto fail; 5110 } 5111 if ((error = pf_begin_altq(&ioe->ticket))) { 5112 PF_RULES_WUNLOCK(); 5113 free(ioes, M_TEMP); 5114 goto fail; 5115 } 5116 break; 5117 #endif /* ALTQ */ 5118 case PF_RULESET_TABLE: 5119 { 5120 struct pfr_table table; 5121 5122 bzero(&table, sizeof(table)); 5123 strlcpy(table.pfrt_anchor, ioe->anchor, 5124 sizeof(table.pfrt_anchor)); 5125 if ((error = pfr_ina_begin(&table, 5126 &ioe->ticket, NULL, 0))) { 5127 PF_RULES_WUNLOCK(); 5128 free(ioes, M_TEMP); 5129 goto fail; 5130 } 5131 break; 5132 } 5133 default: 5134 if ((error = pf_begin_rules(&ioe->ticket, 5135 ioe->rs_num, ioe->anchor))) { 5136 PF_RULES_WUNLOCK(); 5137 free(ioes, M_TEMP); 5138 goto fail; 5139 } 5140 break; 5141 } 5142 } 5143 PF_RULES_WUNLOCK(); 5144 error = copyout(ioes, io->array, totlen); 5145 free(ioes, M_TEMP); 5146 break; 5147 } 5148 5149 case DIOCXROLLBACK: { 5150 struct pfioc_trans *io = (struct pfioc_trans *)addr; 5151 struct pfioc_trans_e *ioe, *ioes; 5152 size_t totlen; 5153 int i; 5154 5155 if (io->esize != sizeof(*ioe)) { 5156 error = ENODEV; 5157 break; 5158 } 5159 if (io->size < 0 || 5160 io->size > pf_ioctl_maxcount || 5161 WOULD_OVERFLOW(io->size, sizeof(struct pfioc_trans_e))) { 5162 error = EINVAL; 5163 break; 5164 } 5165 totlen = sizeof(struct pfioc_trans_e) * io->size; 5166 ioes = mallocarray(io->size, sizeof(struct pfioc_trans_e), 5167 M_TEMP, M_WAITOK); 5168 error = copyin(io->array, ioes, totlen); 5169 if (error) { 5170 free(ioes, M_TEMP); 5171 break; 5172 } 5173 PF_RULES_WLOCK(); 5174 for (i = 0, ioe = ioes; i < io->size; i++, ioe++) { 5175 ioe->anchor[sizeof(ioe->anchor) - 1] = '\0'; 5176 switch (ioe->rs_num) { 5177 case PF_RULESET_ETH: 5178 if ((error = pf_rollback_eth(ioe->ticket, 5179 ioe->anchor))) { 5180 PF_RULES_WUNLOCK(); 5181 free(ioes, M_TEMP); 5182 goto fail; /* really bad */ 5183 } 5184 break; 5185 #ifdef ALTQ 5186 case PF_RULESET_ALTQ: 5187 if (ioe->anchor[0]) { 5188 PF_RULES_WUNLOCK(); 5189 free(ioes, M_TEMP); 5190 error = EINVAL; 5191 goto fail; 5192 } 5193 if ((error = pf_rollback_altq(ioe->ticket))) { 5194 PF_RULES_WUNLOCK(); 5195 free(ioes, M_TEMP); 5196 goto fail; /* really bad */ 5197 } 5198 break; 5199 #endif /* ALTQ */ 5200 case PF_RULESET_TABLE: 5201 { 5202 struct pfr_table table; 5203 5204 bzero(&table, sizeof(table)); 5205 strlcpy(table.pfrt_anchor, ioe->anchor, 5206 sizeof(table.pfrt_anchor)); 5207 if ((error = pfr_ina_rollback(&table, 5208 ioe->ticket, NULL, 0))) { 5209 PF_RULES_WUNLOCK(); 5210 free(ioes, M_TEMP); 5211 goto fail; /* really bad */ 5212 } 5213 break; 5214 } 5215 default: 5216 if ((error = pf_rollback_rules(ioe->ticket, 5217 ioe->rs_num, ioe->anchor))) { 5218 PF_RULES_WUNLOCK(); 5219 free(ioes, M_TEMP); 5220 goto fail; /* really bad */ 5221 } 5222 break; 5223 } 5224 } 5225 PF_RULES_WUNLOCK(); 5226 free(ioes, M_TEMP); 5227 break; 5228 } 5229 5230 case DIOCXCOMMIT: { 5231 struct pfioc_trans *io = (struct pfioc_trans *)addr; 5232 struct pfioc_trans_e *ioe, *ioes; 5233 struct pf_kruleset *rs; 5234 struct pf_keth_ruleset *ers; 5235 size_t totlen; 5236 int i; 5237 5238 if (io->esize != sizeof(*ioe)) { 5239 error = ENODEV; 5240 break; 5241 } 5242 5243 if (io->size < 0 || 5244 io->size > pf_ioctl_maxcount || 5245 WOULD_OVERFLOW(io->size, sizeof(struct pfioc_trans_e))) { 5246 error = EINVAL; 5247 break; 5248 } 5249 5250 totlen = sizeof(struct pfioc_trans_e) * io->size; 5251 ioes = mallocarray(io->size, sizeof(struct pfioc_trans_e), 5252 M_TEMP, M_WAITOK); 5253 error = copyin(io->array, ioes, totlen); 5254 if (error) { 5255 free(ioes, M_TEMP); 5256 break; 5257 } 5258 PF_RULES_WLOCK(); 5259 /* First makes sure everything will succeed. */ 5260 for (i = 0, ioe = ioes; i < io->size; i++, ioe++) { 5261 ioe->anchor[sizeof(ioe->anchor) - 1] = 0; 5262 switch (ioe->rs_num) { 5263 case PF_RULESET_ETH: 5264 ers = pf_find_keth_ruleset(ioe->anchor); 5265 if (ers == NULL || ioe->ticket == 0 || 5266 ioe->ticket != ers->inactive.ticket) { 5267 PF_RULES_WUNLOCK(); 5268 free(ioes, M_TEMP); 5269 error = EINVAL; 5270 goto fail; 5271 } 5272 break; 5273 #ifdef ALTQ 5274 case PF_RULESET_ALTQ: 5275 if (ioe->anchor[0]) { 5276 PF_RULES_WUNLOCK(); 5277 free(ioes, M_TEMP); 5278 error = EINVAL; 5279 goto fail; 5280 } 5281 if (!V_altqs_inactive_open || ioe->ticket != 5282 V_ticket_altqs_inactive) { 5283 PF_RULES_WUNLOCK(); 5284 free(ioes, M_TEMP); 5285 error = EBUSY; 5286 goto fail; 5287 } 5288 break; 5289 #endif /* ALTQ */ 5290 case PF_RULESET_TABLE: 5291 rs = pf_find_kruleset(ioe->anchor); 5292 if (rs == NULL || !rs->topen || ioe->ticket != 5293 rs->tticket) { 5294 PF_RULES_WUNLOCK(); 5295 free(ioes, M_TEMP); 5296 error = EBUSY; 5297 goto fail; 5298 } 5299 break; 5300 default: 5301 if (ioe->rs_num < 0 || ioe->rs_num >= 5302 PF_RULESET_MAX) { 5303 PF_RULES_WUNLOCK(); 5304 free(ioes, M_TEMP); 5305 error = EINVAL; 5306 goto fail; 5307 } 5308 rs = pf_find_kruleset(ioe->anchor); 5309 if (rs == NULL || 5310 !rs->rules[ioe->rs_num].inactive.open || 5311 rs->rules[ioe->rs_num].inactive.ticket != 5312 ioe->ticket) { 5313 PF_RULES_WUNLOCK(); 5314 free(ioes, M_TEMP); 5315 error = EBUSY; 5316 goto fail; 5317 } 5318 break; 5319 } 5320 } 5321 /* Now do the commit - no errors should happen here. */ 5322 for (i = 0, ioe = ioes; i < io->size; i++, ioe++) { 5323 switch (ioe->rs_num) { 5324 case PF_RULESET_ETH: 5325 if ((error = pf_commit_eth(ioe->ticket, ioe->anchor))) { 5326 PF_RULES_WUNLOCK(); 5327 free(ioes, M_TEMP); 5328 goto fail; /* really bad */ 5329 } 5330 break; 5331 #ifdef ALTQ 5332 case PF_RULESET_ALTQ: 5333 if ((error = pf_commit_altq(ioe->ticket))) { 5334 PF_RULES_WUNLOCK(); 5335 free(ioes, M_TEMP); 5336 goto fail; /* really bad */ 5337 } 5338 break; 5339 #endif /* ALTQ */ 5340 case PF_RULESET_TABLE: 5341 { 5342 struct pfr_table table; 5343 5344 bzero(&table, sizeof(table)); 5345 (void)strlcpy(table.pfrt_anchor, ioe->anchor, 5346 sizeof(table.pfrt_anchor)); 5347 if ((error = pfr_ina_commit(&table, 5348 ioe->ticket, NULL, NULL, 0))) { 5349 PF_RULES_WUNLOCK(); 5350 free(ioes, M_TEMP); 5351 goto fail; /* really bad */ 5352 } 5353 break; 5354 } 5355 default: 5356 if ((error = pf_commit_rules(ioe->ticket, 5357 ioe->rs_num, ioe->anchor))) { 5358 PF_RULES_WUNLOCK(); 5359 free(ioes, M_TEMP); 5360 goto fail; /* really bad */ 5361 } 5362 break; 5363 } 5364 } 5365 PF_RULES_WUNLOCK(); 5366 5367 /* Only hook into EtherNet taffic if we've got rules for it. */ 5368 if (! TAILQ_EMPTY(V_pf_keth->active.rules)) 5369 hook_pf_eth(); 5370 else 5371 dehook_pf_eth(); 5372 5373 free(ioes, M_TEMP); 5374 break; 5375 } 5376 5377 case DIOCGETSRCNODES: { 5378 struct pfioc_src_nodes *psn = (struct pfioc_src_nodes *)addr; 5379 struct pf_srchash *sh; 5380 struct pf_ksrc_node *n; 5381 struct pf_src_node *p, *pstore; 5382 uint32_t i, nr = 0; 5383 5384 for (i = 0, sh = V_pf_srchash; i <= V_pf_srchashmask; 5385 i++, sh++) { 5386 PF_HASHROW_LOCK(sh); 5387 LIST_FOREACH(n, &sh->nodes, entry) 5388 nr++; 5389 PF_HASHROW_UNLOCK(sh); 5390 } 5391 5392 psn->psn_len = min(psn->psn_len, 5393 sizeof(struct pf_src_node) * nr); 5394 5395 if (psn->psn_len == 0) { 5396 psn->psn_len = sizeof(struct pf_src_node) * nr; 5397 break; 5398 } 5399 5400 nr = 0; 5401 5402 p = pstore = malloc(psn->psn_len, M_TEMP, M_WAITOK | M_ZERO); 5403 for (i = 0, sh = V_pf_srchash; i <= V_pf_srchashmask; 5404 i++, sh++) { 5405 PF_HASHROW_LOCK(sh); 5406 LIST_FOREACH(n, &sh->nodes, entry) { 5407 5408 if ((nr + 1) * sizeof(*p) > (unsigned)psn->psn_len) 5409 break; 5410 5411 pf_src_node_copy(n, p); 5412 5413 p++; 5414 nr++; 5415 } 5416 PF_HASHROW_UNLOCK(sh); 5417 } 5418 error = copyout(pstore, psn->psn_src_nodes, 5419 sizeof(struct pf_src_node) * nr); 5420 if (error) { 5421 free(pstore, M_TEMP); 5422 break; 5423 } 5424 psn->psn_len = sizeof(struct pf_src_node) * nr; 5425 free(pstore, M_TEMP); 5426 break; 5427 } 5428 5429 case DIOCCLRSRCNODES: { 5430 pf_clear_srcnodes(NULL); 5431 pf_purge_expired_src_nodes(); 5432 break; 5433 } 5434 5435 case DIOCKILLSRCNODES: 5436 pf_kill_srcnodes((struct pfioc_src_node_kill *)addr); 5437 break; 5438 5439 #ifdef COMPAT_FREEBSD13 5440 case DIOCKEEPCOUNTERS_FREEBSD13: 5441 #endif 5442 case DIOCKEEPCOUNTERS: 5443 error = pf_keepcounters((struct pfioc_nv *)addr); 5444 break; 5445 5446 case DIOCGETSYNCOOKIES: 5447 error = pf_get_syncookies((struct pfioc_nv *)addr); 5448 break; 5449 5450 case DIOCSETSYNCOOKIES: 5451 error = pf_set_syncookies((struct pfioc_nv *)addr); 5452 break; 5453 5454 case DIOCSETHOSTID: { 5455 u_int32_t *hostid = (u_int32_t *)addr; 5456 5457 PF_RULES_WLOCK(); 5458 if (*hostid == 0) 5459 V_pf_status.hostid = arc4random(); 5460 else 5461 V_pf_status.hostid = *hostid; 5462 PF_RULES_WUNLOCK(); 5463 break; 5464 } 5465 5466 case DIOCOSFPFLUSH: 5467 PF_RULES_WLOCK(); 5468 pf_osfp_flush(); 5469 PF_RULES_WUNLOCK(); 5470 break; 5471 5472 case DIOCIGETIFACES: { 5473 struct pfioc_iface *io = (struct pfioc_iface *)addr; 5474 struct pfi_kif *ifstore; 5475 size_t bufsiz; 5476 5477 if (io->pfiio_esize != sizeof(struct pfi_kif)) { 5478 error = ENODEV; 5479 break; 5480 } 5481 5482 if (io->pfiio_size < 0 || 5483 io->pfiio_size > pf_ioctl_maxcount || 5484 WOULD_OVERFLOW(io->pfiio_size, sizeof(struct pfi_kif))) { 5485 error = EINVAL; 5486 break; 5487 } 5488 5489 io->pfiio_name[sizeof(io->pfiio_name) - 1] = '\0'; 5490 5491 bufsiz = io->pfiio_size * sizeof(struct pfi_kif); 5492 ifstore = mallocarray(io->pfiio_size, sizeof(struct pfi_kif), 5493 M_TEMP, M_WAITOK | M_ZERO); 5494 5495 PF_RULES_RLOCK(); 5496 pfi_get_ifaces(io->pfiio_name, ifstore, &io->pfiio_size); 5497 PF_RULES_RUNLOCK(); 5498 error = copyout(ifstore, io->pfiio_buffer, bufsiz); 5499 free(ifstore, M_TEMP); 5500 break; 5501 } 5502 5503 case DIOCSETIFFLAG: { 5504 struct pfioc_iface *io = (struct pfioc_iface *)addr; 5505 5506 io->pfiio_name[sizeof(io->pfiio_name) - 1] = '\0'; 5507 5508 PF_RULES_WLOCK(); 5509 error = pfi_set_flags(io->pfiio_name, io->pfiio_flags); 5510 PF_RULES_WUNLOCK(); 5511 break; 5512 } 5513 5514 case DIOCCLRIFFLAG: { 5515 struct pfioc_iface *io = (struct pfioc_iface *)addr; 5516 5517 io->pfiio_name[sizeof(io->pfiio_name) - 1] = '\0'; 5518 5519 PF_RULES_WLOCK(); 5520 error = pfi_clear_flags(io->pfiio_name, io->pfiio_flags); 5521 PF_RULES_WUNLOCK(); 5522 break; 5523 } 5524 5525 case DIOCSETREASS: { 5526 u_int32_t *reass = (u_int32_t *)addr; 5527 5528 V_pf_status.reass = *reass & (PF_REASS_ENABLED|PF_REASS_NODF); 5529 /* Removal of DF flag without reassembly enabled is not a 5530 * valid combination. Disable reassembly in such case. */ 5531 if (!(V_pf_status.reass & PF_REASS_ENABLED)) 5532 V_pf_status.reass = 0; 5533 break; 5534 } 5535 5536 default: 5537 error = ENODEV; 5538 break; 5539 } 5540 fail: 5541 CURVNET_RESTORE(); 5542 5543 #undef ERROUT_IOCTL 5544 5545 return (error); 5546 } 5547 5548 void 5549 pfsync_state_export(union pfsync_state_union *sp, struct pf_kstate *st, int msg_version) 5550 { 5551 bzero(sp, sizeof(union pfsync_state_union)); 5552 5553 /* copy from state key */ 5554 sp->pfs_1301.key[PF_SK_WIRE].addr[0] = st->key[PF_SK_WIRE]->addr[0]; 5555 sp->pfs_1301.key[PF_SK_WIRE].addr[1] = st->key[PF_SK_WIRE]->addr[1]; 5556 sp->pfs_1301.key[PF_SK_WIRE].port[0] = st->key[PF_SK_WIRE]->port[0]; 5557 sp->pfs_1301.key[PF_SK_WIRE].port[1] = st->key[PF_SK_WIRE]->port[1]; 5558 sp->pfs_1301.key[PF_SK_STACK].addr[0] = st->key[PF_SK_STACK]->addr[0]; 5559 sp->pfs_1301.key[PF_SK_STACK].addr[1] = st->key[PF_SK_STACK]->addr[1]; 5560 sp->pfs_1301.key[PF_SK_STACK].port[0] = st->key[PF_SK_STACK]->port[0]; 5561 sp->pfs_1301.key[PF_SK_STACK].port[1] = st->key[PF_SK_STACK]->port[1]; 5562 sp->pfs_1301.proto = st->key[PF_SK_WIRE]->proto; 5563 sp->pfs_1301.af = st->key[PF_SK_WIRE]->af; 5564 5565 /* copy from state */ 5566 strlcpy(sp->pfs_1301.ifname, st->kif->pfik_name, sizeof(sp->pfs_1301.ifname)); 5567 bcopy(&st->rt_addr, &sp->pfs_1301.rt_addr, sizeof(sp->pfs_1301.rt_addr)); 5568 sp->pfs_1301.creation = htonl(time_uptime - (st->creation / 1000)); 5569 sp->pfs_1301.expire = pf_state_expires(st); 5570 if (sp->pfs_1301.expire <= time_uptime) 5571 sp->pfs_1301.expire = htonl(0); 5572 else 5573 sp->pfs_1301.expire = htonl(sp->pfs_1301.expire - time_uptime); 5574 5575 sp->pfs_1301.direction = st->direction; 5576 sp->pfs_1301.log = st->act.log; 5577 sp->pfs_1301.timeout = st->timeout; 5578 5579 switch (msg_version) { 5580 case PFSYNC_MSG_VERSION_1301: 5581 sp->pfs_1301.state_flags = st->state_flags; 5582 break; 5583 case PFSYNC_MSG_VERSION_1400: 5584 sp->pfs_1400.state_flags = htons(st->state_flags); 5585 sp->pfs_1400.qid = htons(st->act.qid); 5586 sp->pfs_1400.pqid = htons(st->act.pqid); 5587 sp->pfs_1400.dnpipe = htons(st->act.dnpipe); 5588 sp->pfs_1400.dnrpipe = htons(st->act.dnrpipe); 5589 sp->pfs_1400.rtableid = htonl(st->act.rtableid); 5590 sp->pfs_1400.min_ttl = st->act.min_ttl; 5591 sp->pfs_1400.set_tos = st->act.set_tos; 5592 sp->pfs_1400.max_mss = htons(st->act.max_mss); 5593 sp->pfs_1400.set_prio[0] = st->act.set_prio[0]; 5594 sp->pfs_1400.set_prio[1] = st->act.set_prio[1]; 5595 sp->pfs_1400.rt = st->rt; 5596 if (st->rt_kif) 5597 strlcpy(sp->pfs_1400.rt_ifname, 5598 st->rt_kif->pfik_name, 5599 sizeof(sp->pfs_1400.rt_ifname)); 5600 break; 5601 default: 5602 panic("%s: Unsupported pfsync_msg_version %d", 5603 __func__, msg_version); 5604 } 5605 5606 if (st->src_node) 5607 sp->pfs_1301.sync_flags |= PFSYNC_FLAG_SRCNODE; 5608 if (st->nat_src_node) 5609 sp->pfs_1301.sync_flags |= PFSYNC_FLAG_NATSRCNODE; 5610 5611 sp->pfs_1301.id = st->id; 5612 sp->pfs_1301.creatorid = st->creatorid; 5613 pf_state_peer_hton(&st->src, &sp->pfs_1301.src); 5614 pf_state_peer_hton(&st->dst, &sp->pfs_1301.dst); 5615 5616 if (st->rule.ptr == NULL) 5617 sp->pfs_1301.rule = htonl(-1); 5618 else 5619 sp->pfs_1301.rule = htonl(st->rule.ptr->nr); 5620 if (st->anchor.ptr == NULL) 5621 sp->pfs_1301.anchor = htonl(-1); 5622 else 5623 sp->pfs_1301.anchor = htonl(st->anchor.ptr->nr); 5624 if (st->nat_rule.ptr == NULL) 5625 sp->pfs_1301.nat_rule = htonl(-1); 5626 else 5627 sp->pfs_1301.nat_rule = htonl(st->nat_rule.ptr->nr); 5628 5629 pf_state_counter_hton(st->packets[0], sp->pfs_1301.packets[0]); 5630 pf_state_counter_hton(st->packets[1], sp->pfs_1301.packets[1]); 5631 pf_state_counter_hton(st->bytes[0], sp->pfs_1301.bytes[0]); 5632 pf_state_counter_hton(st->bytes[1], sp->pfs_1301.bytes[1]); 5633 } 5634 5635 void 5636 pf_state_export(struct pf_state_export *sp, struct pf_kstate *st) 5637 { 5638 bzero(sp, sizeof(*sp)); 5639 5640 sp->version = PF_STATE_VERSION; 5641 5642 /* copy from state key */ 5643 sp->key[PF_SK_WIRE].addr[0] = st->key[PF_SK_WIRE]->addr[0]; 5644 sp->key[PF_SK_WIRE].addr[1] = st->key[PF_SK_WIRE]->addr[1]; 5645 sp->key[PF_SK_WIRE].port[0] = st->key[PF_SK_WIRE]->port[0]; 5646 sp->key[PF_SK_WIRE].port[1] = st->key[PF_SK_WIRE]->port[1]; 5647 sp->key[PF_SK_STACK].addr[0] = st->key[PF_SK_STACK]->addr[0]; 5648 sp->key[PF_SK_STACK].addr[1] = st->key[PF_SK_STACK]->addr[1]; 5649 sp->key[PF_SK_STACK].port[0] = st->key[PF_SK_STACK]->port[0]; 5650 sp->key[PF_SK_STACK].port[1] = st->key[PF_SK_STACK]->port[1]; 5651 sp->proto = st->key[PF_SK_WIRE]->proto; 5652 sp->af = st->key[PF_SK_WIRE]->af; 5653 5654 /* copy from state */ 5655 strlcpy(sp->ifname, st->kif->pfik_name, sizeof(sp->ifname)); 5656 strlcpy(sp->orig_ifname, st->orig_kif->pfik_name, 5657 sizeof(sp->orig_ifname)); 5658 bcopy(&st->rt_addr, &sp->rt_addr, sizeof(sp->rt_addr)); 5659 sp->creation = htonl(time_uptime - (st->creation / 1000)); 5660 sp->expire = pf_state_expires(st); 5661 if (sp->expire <= time_uptime) 5662 sp->expire = htonl(0); 5663 else 5664 sp->expire = htonl(sp->expire - time_uptime); 5665 5666 sp->direction = st->direction; 5667 sp->log = st->act.log; 5668 sp->timeout = st->timeout; 5669 /* 8 bits for the old libpfctl, 16 bits for the new libpfctl */ 5670 sp->state_flags_compat = st->state_flags; 5671 sp->state_flags = htons(st->state_flags); 5672 if (st->src_node) 5673 sp->sync_flags |= PFSYNC_FLAG_SRCNODE; 5674 if (st->nat_src_node) 5675 sp->sync_flags |= PFSYNC_FLAG_NATSRCNODE; 5676 5677 sp->id = st->id; 5678 sp->creatorid = st->creatorid; 5679 pf_state_peer_hton(&st->src, &sp->src); 5680 pf_state_peer_hton(&st->dst, &sp->dst); 5681 5682 if (st->rule.ptr == NULL) 5683 sp->rule = htonl(-1); 5684 else 5685 sp->rule = htonl(st->rule.ptr->nr); 5686 if (st->anchor.ptr == NULL) 5687 sp->anchor = htonl(-1); 5688 else 5689 sp->anchor = htonl(st->anchor.ptr->nr); 5690 if (st->nat_rule.ptr == NULL) 5691 sp->nat_rule = htonl(-1); 5692 else 5693 sp->nat_rule = htonl(st->nat_rule.ptr->nr); 5694 5695 sp->packets[0] = st->packets[0]; 5696 sp->packets[1] = st->packets[1]; 5697 sp->bytes[0] = st->bytes[0]; 5698 sp->bytes[1] = st->bytes[1]; 5699 5700 sp->qid = htons(st->act.qid); 5701 sp->pqid = htons(st->act.pqid); 5702 sp->dnpipe = htons(st->act.dnpipe); 5703 sp->dnrpipe = htons(st->act.dnrpipe); 5704 sp->rtableid = htonl(st->act.rtableid); 5705 sp->min_ttl = st->act.min_ttl; 5706 sp->set_tos = st->act.set_tos; 5707 sp->max_mss = htons(st->act.max_mss); 5708 sp->rt = st->rt; 5709 if (st->rt_kif) 5710 strlcpy(sp->rt_ifname, st->rt_kif->pfik_name, 5711 sizeof(sp->rt_ifname)); 5712 sp->set_prio[0] = st->act.set_prio[0]; 5713 sp->set_prio[1] = st->act.set_prio[1]; 5714 5715 } 5716 5717 static void 5718 pf_tbladdr_copyout(struct pf_addr_wrap *aw) 5719 { 5720 struct pfr_ktable *kt; 5721 5722 KASSERT(aw->type == PF_ADDR_TABLE, ("%s: type %u", __func__, aw->type)); 5723 5724 kt = aw->p.tbl; 5725 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL) 5726 kt = kt->pfrkt_root; 5727 aw->p.tbl = NULL; 5728 aw->p.tblcnt = (kt->pfrkt_flags & PFR_TFLAG_ACTIVE) ? 5729 kt->pfrkt_cnt : -1; 5730 } 5731 5732 static int 5733 pf_add_status_counters(nvlist_t *nvl, const char *name, counter_u64_t *counters, 5734 size_t number, char **names) 5735 { 5736 nvlist_t *nvc; 5737 5738 nvc = nvlist_create(0); 5739 if (nvc == NULL) 5740 return (ENOMEM); 5741 5742 for (int i = 0; i < number; i++) { 5743 nvlist_append_number_array(nvc, "counters", 5744 counter_u64_fetch(counters[i])); 5745 nvlist_append_string_array(nvc, "names", 5746 names[i]); 5747 nvlist_append_number_array(nvc, "ids", 5748 i); 5749 } 5750 nvlist_add_nvlist(nvl, name, nvc); 5751 nvlist_destroy(nvc); 5752 5753 return (0); 5754 } 5755 5756 static int 5757 pf_getstatus(struct pfioc_nv *nv) 5758 { 5759 nvlist_t *nvl = NULL, *nvc = NULL; 5760 void *nvlpacked = NULL; 5761 int error; 5762 struct pf_status s; 5763 char *pf_reasons[PFRES_MAX+1] = PFRES_NAMES; 5764 char *pf_lcounter[KLCNT_MAX+1] = KLCNT_NAMES; 5765 char *pf_fcounter[FCNT_MAX+1] = FCNT_NAMES; 5766 PF_RULES_RLOCK_TRACKER; 5767 5768 #define ERROUT(x) ERROUT_FUNCTION(errout, x) 5769 5770 PF_RULES_RLOCK(); 5771 5772 nvl = nvlist_create(0); 5773 if (nvl == NULL) 5774 ERROUT(ENOMEM); 5775 5776 nvlist_add_bool(nvl, "running", V_pf_status.running); 5777 nvlist_add_number(nvl, "since", V_pf_status.since); 5778 nvlist_add_number(nvl, "debug", V_pf_status.debug); 5779 nvlist_add_number(nvl, "hostid", V_pf_status.hostid); 5780 nvlist_add_number(nvl, "states", V_pf_status.states); 5781 nvlist_add_number(nvl, "src_nodes", V_pf_status.src_nodes); 5782 nvlist_add_number(nvl, "reass", V_pf_status.reass); 5783 nvlist_add_bool(nvl, "syncookies_active", 5784 V_pf_status.syncookies_active); 5785 nvlist_add_number(nvl, "halfopen_states", V_pf_status.states_halfopen); 5786 5787 /* counters */ 5788 error = pf_add_status_counters(nvl, "counters", V_pf_status.counters, 5789 PFRES_MAX, pf_reasons); 5790 if (error != 0) 5791 ERROUT(error); 5792 5793 /* lcounters */ 5794 error = pf_add_status_counters(nvl, "lcounters", V_pf_status.lcounters, 5795 KLCNT_MAX, pf_lcounter); 5796 if (error != 0) 5797 ERROUT(error); 5798 5799 /* fcounters */ 5800 nvc = nvlist_create(0); 5801 if (nvc == NULL) 5802 ERROUT(ENOMEM); 5803 5804 for (int i = 0; i < FCNT_MAX; i++) { 5805 nvlist_append_number_array(nvc, "counters", 5806 pf_counter_u64_fetch(&V_pf_status.fcounters[i])); 5807 nvlist_append_string_array(nvc, "names", 5808 pf_fcounter[i]); 5809 nvlist_append_number_array(nvc, "ids", 5810 i); 5811 } 5812 nvlist_add_nvlist(nvl, "fcounters", nvc); 5813 nvlist_destroy(nvc); 5814 nvc = NULL; 5815 5816 /* scounters */ 5817 error = pf_add_status_counters(nvl, "scounters", V_pf_status.scounters, 5818 SCNT_MAX, pf_fcounter); 5819 if (error != 0) 5820 ERROUT(error); 5821 5822 nvlist_add_string(nvl, "ifname", V_pf_status.ifname); 5823 nvlist_add_binary(nvl, "chksum", V_pf_status.pf_chksum, 5824 PF_MD5_DIGEST_LENGTH); 5825 5826 pfi_update_status(V_pf_status.ifname, &s); 5827 5828 /* pcounters / bcounters */ 5829 for (int i = 0; i < 2; i++) { 5830 for (int j = 0; j < 2; j++) { 5831 for (int k = 0; k < 2; k++) { 5832 nvlist_append_number_array(nvl, "pcounters", 5833 s.pcounters[i][j][k]); 5834 } 5835 nvlist_append_number_array(nvl, "bcounters", 5836 s.bcounters[i][j]); 5837 } 5838 } 5839 5840 nvlpacked = nvlist_pack(nvl, &nv->len); 5841 if (nvlpacked == NULL) 5842 ERROUT(ENOMEM); 5843 5844 if (nv->size == 0) 5845 ERROUT(0); 5846 else if (nv->size < nv->len) 5847 ERROUT(ENOSPC); 5848 5849 PF_RULES_RUNLOCK(); 5850 error = copyout(nvlpacked, nv->data, nv->len); 5851 goto done; 5852 5853 #undef ERROUT 5854 errout: 5855 PF_RULES_RUNLOCK(); 5856 done: 5857 free(nvlpacked, M_NVLIST); 5858 nvlist_destroy(nvc); 5859 nvlist_destroy(nvl); 5860 5861 return (error); 5862 } 5863 5864 /* 5865 * XXX - Check for version mismatch!!! 5866 */ 5867 static void 5868 pf_clear_all_states(void) 5869 { 5870 struct epoch_tracker et; 5871 struct pf_kstate *s; 5872 u_int i; 5873 5874 NET_EPOCH_ENTER(et); 5875 for (i = 0; i <= V_pf_hashmask; i++) { 5876 struct pf_idhash *ih = &V_pf_idhash[i]; 5877 relock: 5878 PF_HASHROW_LOCK(ih); 5879 LIST_FOREACH(s, &ih->states, entry) { 5880 s->timeout = PFTM_PURGE; 5881 /* Don't send out individual delete messages. */ 5882 s->state_flags |= PFSTATE_NOSYNC; 5883 pf_unlink_state(s); 5884 goto relock; 5885 } 5886 PF_HASHROW_UNLOCK(ih); 5887 } 5888 NET_EPOCH_EXIT(et); 5889 } 5890 5891 static int 5892 pf_clear_tables(void) 5893 { 5894 struct pfioc_table io; 5895 int error; 5896 5897 bzero(&io, sizeof(io)); 5898 io.pfrio_flags |= PFR_FLAG_ALLRSETS; 5899 5900 error = pfr_clr_tables(&io.pfrio_table, &io.pfrio_ndel, 5901 io.pfrio_flags); 5902 5903 return (error); 5904 } 5905 5906 static void 5907 pf_clear_srcnodes(struct pf_ksrc_node *n) 5908 { 5909 struct pf_kstate *s; 5910 int i; 5911 5912 for (i = 0; i <= V_pf_hashmask; i++) { 5913 struct pf_idhash *ih = &V_pf_idhash[i]; 5914 5915 PF_HASHROW_LOCK(ih); 5916 LIST_FOREACH(s, &ih->states, entry) { 5917 if (n == NULL || n == s->src_node) 5918 s->src_node = NULL; 5919 if (n == NULL || n == s->nat_src_node) 5920 s->nat_src_node = NULL; 5921 } 5922 PF_HASHROW_UNLOCK(ih); 5923 } 5924 5925 if (n == NULL) { 5926 struct pf_srchash *sh; 5927 5928 for (i = 0, sh = V_pf_srchash; i <= V_pf_srchashmask; 5929 i++, sh++) { 5930 PF_HASHROW_LOCK(sh); 5931 LIST_FOREACH(n, &sh->nodes, entry) { 5932 n->expire = 1; 5933 n->states = 0; 5934 } 5935 PF_HASHROW_UNLOCK(sh); 5936 } 5937 } else { 5938 /* XXX: hash slot should already be locked here. */ 5939 n->expire = 1; 5940 n->states = 0; 5941 } 5942 } 5943 5944 static void 5945 pf_kill_srcnodes(struct pfioc_src_node_kill *psnk) 5946 { 5947 struct pf_ksrc_node_list kill; 5948 5949 LIST_INIT(&kill); 5950 for (int i = 0; i <= V_pf_srchashmask; i++) { 5951 struct pf_srchash *sh = &V_pf_srchash[i]; 5952 struct pf_ksrc_node *sn, *tmp; 5953 5954 PF_HASHROW_LOCK(sh); 5955 LIST_FOREACH_SAFE(sn, &sh->nodes, entry, tmp) 5956 if (PF_MATCHA(psnk->psnk_src.neg, 5957 &psnk->psnk_src.addr.v.a.addr, 5958 &psnk->psnk_src.addr.v.a.mask, 5959 &sn->addr, sn->af) && 5960 PF_MATCHA(psnk->psnk_dst.neg, 5961 &psnk->psnk_dst.addr.v.a.addr, 5962 &psnk->psnk_dst.addr.v.a.mask, 5963 &sn->raddr, sn->af)) { 5964 pf_unlink_src_node(sn); 5965 LIST_INSERT_HEAD(&kill, sn, entry); 5966 sn->expire = 1; 5967 } 5968 PF_HASHROW_UNLOCK(sh); 5969 } 5970 5971 for (int i = 0; i <= V_pf_hashmask; i++) { 5972 struct pf_idhash *ih = &V_pf_idhash[i]; 5973 struct pf_kstate *s; 5974 5975 PF_HASHROW_LOCK(ih); 5976 LIST_FOREACH(s, &ih->states, entry) { 5977 if (s->src_node && s->src_node->expire == 1) 5978 s->src_node = NULL; 5979 if (s->nat_src_node && s->nat_src_node->expire == 1) 5980 s->nat_src_node = NULL; 5981 } 5982 PF_HASHROW_UNLOCK(ih); 5983 } 5984 5985 psnk->psnk_killed = pf_free_src_nodes(&kill); 5986 } 5987 5988 static int 5989 pf_keepcounters(struct pfioc_nv *nv) 5990 { 5991 nvlist_t *nvl = NULL; 5992 void *nvlpacked = NULL; 5993 int error = 0; 5994 5995 #define ERROUT(x) ERROUT_FUNCTION(on_error, x) 5996 5997 if (nv->len > pf_ioctl_maxcount) 5998 ERROUT(ENOMEM); 5999 6000 nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK); 6001 error = copyin(nv->data, nvlpacked, nv->len); 6002 if (error) 6003 ERROUT(error); 6004 6005 nvl = nvlist_unpack(nvlpacked, nv->len, 0); 6006 if (nvl == NULL) 6007 ERROUT(EBADMSG); 6008 6009 if (! nvlist_exists_bool(nvl, "keep_counters")) 6010 ERROUT(EBADMSG); 6011 6012 V_pf_status.keep_counters = nvlist_get_bool(nvl, "keep_counters"); 6013 6014 on_error: 6015 nvlist_destroy(nvl); 6016 free(nvlpacked, M_NVLIST); 6017 return (error); 6018 } 6019 6020 unsigned int 6021 pf_clear_states(const struct pf_kstate_kill *kill) 6022 { 6023 struct pf_state_key_cmp match_key; 6024 struct pf_kstate *s; 6025 struct pfi_kkif *kif; 6026 int idx; 6027 unsigned int killed = 0, dir; 6028 6029 NET_EPOCH_ASSERT(); 6030 6031 for (unsigned int i = 0; i <= V_pf_hashmask; i++) { 6032 struct pf_idhash *ih = &V_pf_idhash[i]; 6033 6034 relock_DIOCCLRSTATES: 6035 PF_HASHROW_LOCK(ih); 6036 LIST_FOREACH(s, &ih->states, entry) { 6037 /* For floating states look at the original kif. */ 6038 kif = s->kif == V_pfi_all ? s->orig_kif : s->kif; 6039 6040 if (kill->psk_ifname[0] && 6041 strcmp(kill->psk_ifname, 6042 kif->pfik_name)) 6043 continue; 6044 6045 if (kill->psk_kill_match) { 6046 bzero(&match_key, sizeof(match_key)); 6047 6048 if (s->direction == PF_OUT) { 6049 dir = PF_IN; 6050 idx = PF_SK_STACK; 6051 } else { 6052 dir = PF_OUT; 6053 idx = PF_SK_WIRE; 6054 } 6055 6056 match_key.af = s->key[idx]->af; 6057 match_key.proto = s->key[idx]->proto; 6058 PF_ACPY(&match_key.addr[0], 6059 &s->key[idx]->addr[1], match_key.af); 6060 match_key.port[0] = s->key[idx]->port[1]; 6061 PF_ACPY(&match_key.addr[1], 6062 &s->key[idx]->addr[0], match_key.af); 6063 match_key.port[1] = s->key[idx]->port[0]; 6064 } 6065 6066 /* 6067 * Don't send out individual 6068 * delete messages. 6069 */ 6070 s->state_flags |= PFSTATE_NOSYNC; 6071 pf_unlink_state(s); 6072 killed++; 6073 6074 if (kill->psk_kill_match) 6075 killed += pf_kill_matching_state(&match_key, 6076 dir); 6077 6078 goto relock_DIOCCLRSTATES; 6079 } 6080 PF_HASHROW_UNLOCK(ih); 6081 } 6082 6083 if (V_pfsync_clear_states_ptr != NULL) 6084 V_pfsync_clear_states_ptr(V_pf_status.hostid, kill->psk_ifname); 6085 6086 return (killed); 6087 } 6088 6089 void 6090 pf_killstates(struct pf_kstate_kill *kill, unsigned int *killed) 6091 { 6092 struct pf_kstate *s; 6093 6094 NET_EPOCH_ASSERT(); 6095 if (kill->psk_pfcmp.id) { 6096 if (kill->psk_pfcmp.creatorid == 0) 6097 kill->psk_pfcmp.creatorid = V_pf_status.hostid; 6098 if ((s = pf_find_state_byid(kill->psk_pfcmp.id, 6099 kill->psk_pfcmp.creatorid))) { 6100 pf_unlink_state(s); 6101 *killed = 1; 6102 } 6103 return; 6104 } 6105 6106 for (unsigned int i = 0; i <= V_pf_hashmask; i++) 6107 *killed += pf_killstates_row(kill, &V_pf_idhash[i]); 6108 } 6109 6110 static int 6111 pf_killstates_nv(struct pfioc_nv *nv) 6112 { 6113 struct pf_kstate_kill kill; 6114 struct epoch_tracker et; 6115 nvlist_t *nvl = NULL; 6116 void *nvlpacked = NULL; 6117 int error = 0; 6118 unsigned int killed = 0; 6119 6120 #define ERROUT(x) ERROUT_FUNCTION(on_error, x) 6121 6122 if (nv->len > pf_ioctl_maxcount) 6123 ERROUT(ENOMEM); 6124 6125 nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK); 6126 error = copyin(nv->data, nvlpacked, nv->len); 6127 if (error) 6128 ERROUT(error); 6129 6130 nvl = nvlist_unpack(nvlpacked, nv->len, 0); 6131 if (nvl == NULL) 6132 ERROUT(EBADMSG); 6133 6134 error = pf_nvstate_kill_to_kstate_kill(nvl, &kill); 6135 if (error) 6136 ERROUT(error); 6137 6138 NET_EPOCH_ENTER(et); 6139 pf_killstates(&kill, &killed); 6140 NET_EPOCH_EXIT(et); 6141 6142 free(nvlpacked, M_NVLIST); 6143 nvlpacked = NULL; 6144 nvlist_destroy(nvl); 6145 nvl = nvlist_create(0); 6146 if (nvl == NULL) 6147 ERROUT(ENOMEM); 6148 6149 nvlist_add_number(nvl, "killed", killed); 6150 6151 nvlpacked = nvlist_pack(nvl, &nv->len); 6152 if (nvlpacked == NULL) 6153 ERROUT(ENOMEM); 6154 6155 if (nv->size == 0) 6156 ERROUT(0); 6157 else if (nv->size < nv->len) 6158 ERROUT(ENOSPC); 6159 6160 error = copyout(nvlpacked, nv->data, nv->len); 6161 6162 on_error: 6163 nvlist_destroy(nvl); 6164 free(nvlpacked, M_NVLIST); 6165 return (error); 6166 } 6167 6168 static int 6169 pf_clearstates_nv(struct pfioc_nv *nv) 6170 { 6171 struct pf_kstate_kill kill; 6172 struct epoch_tracker et; 6173 nvlist_t *nvl = NULL; 6174 void *nvlpacked = NULL; 6175 int error = 0; 6176 unsigned int killed; 6177 6178 #define ERROUT(x) ERROUT_FUNCTION(on_error, x) 6179 6180 if (nv->len > pf_ioctl_maxcount) 6181 ERROUT(ENOMEM); 6182 6183 nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK); 6184 error = copyin(nv->data, nvlpacked, nv->len); 6185 if (error) 6186 ERROUT(error); 6187 6188 nvl = nvlist_unpack(nvlpacked, nv->len, 0); 6189 if (nvl == NULL) 6190 ERROUT(EBADMSG); 6191 6192 error = pf_nvstate_kill_to_kstate_kill(nvl, &kill); 6193 if (error) 6194 ERROUT(error); 6195 6196 NET_EPOCH_ENTER(et); 6197 killed = pf_clear_states(&kill); 6198 NET_EPOCH_EXIT(et); 6199 6200 free(nvlpacked, M_NVLIST); 6201 nvlpacked = NULL; 6202 nvlist_destroy(nvl); 6203 nvl = nvlist_create(0); 6204 if (nvl == NULL) 6205 ERROUT(ENOMEM); 6206 6207 nvlist_add_number(nvl, "killed", killed); 6208 6209 nvlpacked = nvlist_pack(nvl, &nv->len); 6210 if (nvlpacked == NULL) 6211 ERROUT(ENOMEM); 6212 6213 if (nv->size == 0) 6214 ERROUT(0); 6215 else if (nv->size < nv->len) 6216 ERROUT(ENOSPC); 6217 6218 error = copyout(nvlpacked, nv->data, nv->len); 6219 6220 #undef ERROUT 6221 on_error: 6222 nvlist_destroy(nvl); 6223 free(nvlpacked, M_NVLIST); 6224 return (error); 6225 } 6226 6227 static int 6228 pf_getstate(struct pfioc_nv *nv) 6229 { 6230 nvlist_t *nvl = NULL, *nvls; 6231 void *nvlpacked = NULL; 6232 struct pf_kstate *s = NULL; 6233 int error = 0; 6234 uint64_t id, creatorid; 6235 6236 #define ERROUT(x) ERROUT_FUNCTION(errout, x) 6237 6238 if (nv->len > pf_ioctl_maxcount) 6239 ERROUT(ENOMEM); 6240 6241 nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK); 6242 error = copyin(nv->data, nvlpacked, nv->len); 6243 if (error) 6244 ERROUT(error); 6245 6246 nvl = nvlist_unpack(nvlpacked, nv->len, 0); 6247 if (nvl == NULL) 6248 ERROUT(EBADMSG); 6249 6250 PFNV_CHK(pf_nvuint64(nvl, "id", &id)); 6251 PFNV_CHK(pf_nvuint64(nvl, "creatorid", &creatorid)); 6252 6253 s = pf_find_state_byid(id, creatorid); 6254 if (s == NULL) 6255 ERROUT(ENOENT); 6256 6257 free(nvlpacked, M_NVLIST); 6258 nvlpacked = NULL; 6259 nvlist_destroy(nvl); 6260 nvl = nvlist_create(0); 6261 if (nvl == NULL) 6262 ERROUT(ENOMEM); 6263 6264 nvls = pf_state_to_nvstate(s); 6265 if (nvls == NULL) 6266 ERROUT(ENOMEM); 6267 6268 nvlist_add_nvlist(nvl, "state", nvls); 6269 nvlist_destroy(nvls); 6270 6271 nvlpacked = nvlist_pack(nvl, &nv->len); 6272 if (nvlpacked == NULL) 6273 ERROUT(ENOMEM); 6274 6275 if (nv->size == 0) 6276 ERROUT(0); 6277 else if (nv->size < nv->len) 6278 ERROUT(ENOSPC); 6279 6280 error = copyout(nvlpacked, nv->data, nv->len); 6281 6282 #undef ERROUT 6283 errout: 6284 if (s != NULL) 6285 PF_STATE_UNLOCK(s); 6286 free(nvlpacked, M_NVLIST); 6287 nvlist_destroy(nvl); 6288 return (error); 6289 } 6290 6291 /* 6292 * XXX - Check for version mismatch!!! 6293 */ 6294 6295 /* 6296 * Duplicate pfctl -Fa operation to get rid of as much as we can. 6297 */ 6298 static int 6299 shutdown_pf(void) 6300 { 6301 int error = 0; 6302 u_int32_t t[5]; 6303 char nn = '\0'; 6304 struct pf_kanchor *anchor; 6305 struct pf_keth_anchor *eth_anchor; 6306 int rs_num; 6307 6308 do { 6309 /* Unlink rules of all user defined anchors */ 6310 RB_FOREACH(anchor, pf_kanchor_global, &V_pf_anchors) { 6311 /* Wildcard based anchors may not have a respective 6312 * explicit anchor rule or they may be left empty 6313 * without rules. It leads to anchor.refcnt=0, and the 6314 * rest of the logic does not expect it. */ 6315 if (anchor->refcnt == 0) 6316 anchor->refcnt = 1; 6317 for (rs_num = 0; rs_num < PF_RULESET_MAX; ++rs_num) { 6318 if ((error = pf_begin_rules(&t[rs_num], rs_num, 6319 anchor->path)) != 0) { 6320 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: " 6321 "anchor.path=%s rs_num=%d\n", 6322 anchor->path, rs_num)); 6323 goto error; /* XXX: rollback? */ 6324 } 6325 } 6326 for (rs_num = 0; rs_num < PF_RULESET_MAX; ++rs_num) { 6327 error = pf_commit_rules(t[rs_num], rs_num, 6328 anchor->path); 6329 MPASS(error == 0); 6330 } 6331 } 6332 6333 /* Unlink rules of all user defined ether anchors */ 6334 RB_FOREACH(eth_anchor, pf_keth_anchor_global, 6335 &V_pf_keth_anchors) { 6336 /* Wildcard based anchors may not have a respective 6337 * explicit anchor rule or they may be left empty 6338 * without rules. It leads to anchor.refcnt=0, and the 6339 * rest of the logic does not expect it. */ 6340 if (eth_anchor->refcnt == 0) 6341 eth_anchor->refcnt = 1; 6342 if ((error = pf_begin_eth(&t[0], eth_anchor->path)) 6343 != 0) { 6344 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: eth " 6345 "anchor.path=%s\n", eth_anchor->path)); 6346 goto error; 6347 } 6348 error = pf_commit_eth(t[0], eth_anchor->path); 6349 MPASS(error == 0); 6350 } 6351 6352 if ((error = pf_begin_rules(&t[0], PF_RULESET_SCRUB, &nn)) 6353 != 0) { 6354 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: SCRUB\n")); 6355 break; 6356 } 6357 if ((error = pf_begin_rules(&t[1], PF_RULESET_FILTER, &nn)) 6358 != 0) { 6359 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: FILTER\n")); 6360 break; /* XXX: rollback? */ 6361 } 6362 if ((error = pf_begin_rules(&t[2], PF_RULESET_NAT, &nn)) 6363 != 0) { 6364 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: NAT\n")); 6365 break; /* XXX: rollback? */ 6366 } 6367 if ((error = pf_begin_rules(&t[3], PF_RULESET_BINAT, &nn)) 6368 != 0) { 6369 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: BINAT\n")); 6370 break; /* XXX: rollback? */ 6371 } 6372 if ((error = pf_begin_rules(&t[4], PF_RULESET_RDR, &nn)) 6373 != 0) { 6374 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: RDR\n")); 6375 break; /* XXX: rollback? */ 6376 } 6377 6378 error = pf_commit_rules(t[0], PF_RULESET_SCRUB, &nn); 6379 MPASS(error == 0); 6380 error = pf_commit_rules(t[1], PF_RULESET_FILTER, &nn); 6381 MPASS(error == 0); 6382 error = pf_commit_rules(t[2], PF_RULESET_NAT, &nn); 6383 MPASS(error == 0); 6384 error = pf_commit_rules(t[3], PF_RULESET_BINAT, &nn); 6385 MPASS(error == 0); 6386 error = pf_commit_rules(t[4], PF_RULESET_RDR, &nn); 6387 MPASS(error == 0); 6388 6389 if ((error = pf_clear_tables()) != 0) 6390 break; 6391 6392 if ((error = pf_begin_eth(&t[0], &nn)) != 0) { 6393 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: eth\n")); 6394 break; 6395 } 6396 error = pf_commit_eth(t[0], &nn); 6397 MPASS(error == 0); 6398 6399 #ifdef ALTQ 6400 if ((error = pf_begin_altq(&t[0])) != 0) { 6401 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: ALTQ\n")); 6402 break; 6403 } 6404 pf_commit_altq(t[0]); 6405 #endif 6406 6407 pf_clear_all_states(); 6408 6409 pf_clear_srcnodes(NULL); 6410 6411 /* status does not use malloced mem so no need to cleanup */ 6412 /* fingerprints and interfaces have their own cleanup code */ 6413 } while(0); 6414 6415 error: 6416 return (error); 6417 } 6418 6419 static pfil_return_t 6420 pf_check_return(int chk, struct mbuf **m) 6421 { 6422 6423 switch (chk) { 6424 case PF_PASS: 6425 if (*m == NULL) 6426 return (PFIL_CONSUMED); 6427 else 6428 return (PFIL_PASS); 6429 break; 6430 default: 6431 if (*m != NULL) { 6432 m_freem(*m); 6433 *m = NULL; 6434 } 6435 return (PFIL_DROPPED); 6436 } 6437 } 6438 6439 static pfil_return_t 6440 pf_eth_check_in(struct mbuf **m, struct ifnet *ifp, int flags, 6441 void *ruleset __unused, struct inpcb *inp) 6442 { 6443 int chk; 6444 6445 chk = pf_test_eth(PF_IN, flags, ifp, m, inp); 6446 6447 return (pf_check_return(chk, m)); 6448 } 6449 6450 static pfil_return_t 6451 pf_eth_check_out(struct mbuf **m, struct ifnet *ifp, int flags, 6452 void *ruleset __unused, struct inpcb *inp) 6453 { 6454 int chk; 6455 6456 chk = pf_test_eth(PF_OUT, flags, ifp, m, inp); 6457 6458 return (pf_check_return(chk, m)); 6459 } 6460 6461 #ifdef INET 6462 static pfil_return_t 6463 pf_check_in(struct mbuf **m, struct ifnet *ifp, int flags, 6464 void *ruleset __unused, struct inpcb *inp) 6465 { 6466 int chk; 6467 6468 chk = pf_test(PF_IN, flags, ifp, m, inp, NULL); 6469 6470 return (pf_check_return(chk, m)); 6471 } 6472 6473 static pfil_return_t 6474 pf_check_out(struct mbuf **m, struct ifnet *ifp, int flags, 6475 void *ruleset __unused, struct inpcb *inp) 6476 { 6477 int chk; 6478 6479 chk = pf_test(PF_OUT, flags, ifp, m, inp, NULL); 6480 6481 return (pf_check_return(chk, m)); 6482 } 6483 #endif 6484 6485 #ifdef INET6 6486 static pfil_return_t 6487 pf_check6_in(struct mbuf **m, struct ifnet *ifp, int flags, 6488 void *ruleset __unused, struct inpcb *inp) 6489 { 6490 int chk; 6491 6492 /* 6493 * In case of loopback traffic IPv6 uses the real interface in 6494 * order to support scoped addresses. In order to support stateful 6495 * filtering we have change this to lo0 as it is the case in IPv4. 6496 */ 6497 CURVNET_SET(ifp->if_vnet); 6498 chk = pf_test6(PF_IN, flags, (*m)->m_flags & M_LOOP ? V_loif : ifp, 6499 m, inp, NULL); 6500 CURVNET_RESTORE(); 6501 6502 return (pf_check_return(chk, m)); 6503 } 6504 6505 static pfil_return_t 6506 pf_check6_out(struct mbuf **m, struct ifnet *ifp, int flags, 6507 void *ruleset __unused, struct inpcb *inp) 6508 { 6509 int chk; 6510 6511 CURVNET_SET(ifp->if_vnet); 6512 chk = pf_test6(PF_OUT, flags, ifp, m, inp, NULL); 6513 CURVNET_RESTORE(); 6514 6515 return (pf_check_return(chk, m)); 6516 } 6517 #endif /* INET6 */ 6518 6519 VNET_DEFINE_STATIC(pfil_hook_t, pf_eth_in_hook); 6520 VNET_DEFINE_STATIC(pfil_hook_t, pf_eth_out_hook); 6521 #define V_pf_eth_in_hook VNET(pf_eth_in_hook) 6522 #define V_pf_eth_out_hook VNET(pf_eth_out_hook) 6523 6524 #ifdef INET 6525 VNET_DEFINE_STATIC(pfil_hook_t, pf_ip4_in_hook); 6526 VNET_DEFINE_STATIC(pfil_hook_t, pf_ip4_out_hook); 6527 #define V_pf_ip4_in_hook VNET(pf_ip4_in_hook) 6528 #define V_pf_ip4_out_hook VNET(pf_ip4_out_hook) 6529 #endif 6530 #ifdef INET6 6531 VNET_DEFINE_STATIC(pfil_hook_t, pf_ip6_in_hook); 6532 VNET_DEFINE_STATIC(pfil_hook_t, pf_ip6_out_hook); 6533 #define V_pf_ip6_in_hook VNET(pf_ip6_in_hook) 6534 #define V_pf_ip6_out_hook VNET(pf_ip6_out_hook) 6535 #endif 6536 6537 static void 6538 hook_pf_eth(void) 6539 { 6540 struct pfil_hook_args pha = { 6541 .pa_version = PFIL_VERSION, 6542 .pa_modname = "pf", 6543 .pa_type = PFIL_TYPE_ETHERNET, 6544 }; 6545 struct pfil_link_args pla = { 6546 .pa_version = PFIL_VERSION, 6547 }; 6548 int ret __diagused; 6549 6550 if (atomic_load_bool(&V_pf_pfil_eth_hooked)) 6551 return; 6552 6553 pha.pa_mbuf_chk = pf_eth_check_in; 6554 pha.pa_flags = PFIL_IN; 6555 pha.pa_rulname = "eth-in"; 6556 V_pf_eth_in_hook = pfil_add_hook(&pha); 6557 pla.pa_flags = PFIL_IN | PFIL_HEADPTR | PFIL_HOOKPTR; 6558 pla.pa_head = V_link_pfil_head; 6559 pla.pa_hook = V_pf_eth_in_hook; 6560 ret = pfil_link(&pla); 6561 MPASS(ret == 0); 6562 pha.pa_mbuf_chk = pf_eth_check_out; 6563 pha.pa_flags = PFIL_OUT; 6564 pha.pa_rulname = "eth-out"; 6565 V_pf_eth_out_hook = pfil_add_hook(&pha); 6566 pla.pa_flags = PFIL_OUT | PFIL_HEADPTR | PFIL_HOOKPTR; 6567 pla.pa_head = V_link_pfil_head; 6568 pla.pa_hook = V_pf_eth_out_hook; 6569 ret = pfil_link(&pla); 6570 MPASS(ret == 0); 6571 6572 atomic_store_bool(&V_pf_pfil_eth_hooked, true); 6573 } 6574 6575 static void 6576 hook_pf(void) 6577 { 6578 struct pfil_hook_args pha = { 6579 .pa_version = PFIL_VERSION, 6580 .pa_modname = "pf", 6581 }; 6582 struct pfil_link_args pla = { 6583 .pa_version = PFIL_VERSION, 6584 }; 6585 int ret __diagused; 6586 6587 if (atomic_load_bool(&V_pf_pfil_hooked)) 6588 return; 6589 6590 #ifdef INET 6591 pha.pa_type = PFIL_TYPE_IP4; 6592 pha.pa_mbuf_chk = pf_check_in; 6593 pha.pa_flags = PFIL_IN; 6594 pha.pa_rulname = "default-in"; 6595 V_pf_ip4_in_hook = pfil_add_hook(&pha); 6596 pla.pa_flags = PFIL_IN | PFIL_HEADPTR | PFIL_HOOKPTR; 6597 pla.pa_head = V_inet_pfil_head; 6598 pla.pa_hook = V_pf_ip4_in_hook; 6599 ret = pfil_link(&pla); 6600 MPASS(ret == 0); 6601 pha.pa_mbuf_chk = pf_check_out; 6602 pha.pa_flags = PFIL_OUT; 6603 pha.pa_rulname = "default-out"; 6604 V_pf_ip4_out_hook = pfil_add_hook(&pha); 6605 pla.pa_flags = PFIL_OUT | PFIL_HEADPTR | PFIL_HOOKPTR; 6606 pla.pa_head = V_inet_pfil_head; 6607 pla.pa_hook = V_pf_ip4_out_hook; 6608 ret = pfil_link(&pla); 6609 MPASS(ret == 0); 6610 if (V_pf_filter_local) { 6611 pla.pa_flags = PFIL_OUT | PFIL_HEADPTR | PFIL_HOOKPTR; 6612 pla.pa_head = V_inet_local_pfil_head; 6613 pla.pa_hook = V_pf_ip4_out_hook; 6614 ret = pfil_link(&pla); 6615 MPASS(ret == 0); 6616 } 6617 #endif 6618 #ifdef INET6 6619 pha.pa_type = PFIL_TYPE_IP6; 6620 pha.pa_mbuf_chk = pf_check6_in; 6621 pha.pa_flags = PFIL_IN; 6622 pha.pa_rulname = "default-in6"; 6623 V_pf_ip6_in_hook = pfil_add_hook(&pha); 6624 pla.pa_flags = PFIL_IN | PFIL_HEADPTR | PFIL_HOOKPTR; 6625 pla.pa_head = V_inet6_pfil_head; 6626 pla.pa_hook = V_pf_ip6_in_hook; 6627 ret = pfil_link(&pla); 6628 MPASS(ret == 0); 6629 pha.pa_mbuf_chk = pf_check6_out; 6630 pha.pa_rulname = "default-out6"; 6631 pha.pa_flags = PFIL_OUT; 6632 V_pf_ip6_out_hook = pfil_add_hook(&pha); 6633 pla.pa_flags = PFIL_OUT | PFIL_HEADPTR | PFIL_HOOKPTR; 6634 pla.pa_head = V_inet6_pfil_head; 6635 pla.pa_hook = V_pf_ip6_out_hook; 6636 ret = pfil_link(&pla); 6637 MPASS(ret == 0); 6638 if (V_pf_filter_local) { 6639 pla.pa_flags = PFIL_OUT | PFIL_HEADPTR | PFIL_HOOKPTR; 6640 pla.pa_head = V_inet6_local_pfil_head; 6641 pla.pa_hook = V_pf_ip6_out_hook; 6642 ret = pfil_link(&pla); 6643 MPASS(ret == 0); 6644 } 6645 #endif 6646 6647 atomic_store_bool(&V_pf_pfil_hooked, true); 6648 } 6649 6650 static void 6651 dehook_pf_eth(void) 6652 { 6653 6654 if (!atomic_load_bool(&V_pf_pfil_eth_hooked)) 6655 return; 6656 6657 pfil_remove_hook(V_pf_eth_in_hook); 6658 pfil_remove_hook(V_pf_eth_out_hook); 6659 6660 atomic_store_bool(&V_pf_pfil_eth_hooked, false); 6661 } 6662 6663 static void 6664 dehook_pf(void) 6665 { 6666 6667 if (!atomic_load_bool(&V_pf_pfil_hooked)) 6668 return; 6669 6670 #ifdef INET 6671 pfil_remove_hook(V_pf_ip4_in_hook); 6672 pfil_remove_hook(V_pf_ip4_out_hook); 6673 #endif 6674 #ifdef INET6 6675 pfil_remove_hook(V_pf_ip6_in_hook); 6676 pfil_remove_hook(V_pf_ip6_out_hook); 6677 #endif 6678 6679 atomic_store_bool(&V_pf_pfil_hooked, false); 6680 } 6681 6682 static void 6683 pf_load_vnet(void) 6684 { 6685 V_pf_tag_z = uma_zcreate("pf tags", sizeof(struct pf_tagname), 6686 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); 6687 6688 rm_init_flags(&V_pf_rules_lock, "pf rulesets", RM_RECURSE); 6689 sx_init(&V_pf_ioctl_lock, "pf ioctl"); 6690 6691 pf_init_tagset(&V_pf_tags, &pf_rule_tag_hashsize, 6692 PF_RULE_TAG_HASH_SIZE_DEFAULT); 6693 #ifdef ALTQ 6694 pf_init_tagset(&V_pf_qids, &pf_queue_tag_hashsize, 6695 PF_QUEUE_TAG_HASH_SIZE_DEFAULT); 6696 #endif 6697 6698 V_pf_keth = &V_pf_main_keth_anchor.ruleset; 6699 6700 pfattach_vnet(); 6701 V_pf_vnet_active = 1; 6702 } 6703 6704 static int 6705 pf_load(void) 6706 { 6707 int error; 6708 6709 sx_init(&pf_end_lock, "pf end thread"); 6710 6711 pf_mtag_initialize(); 6712 6713 pf_dev = make_dev(&pf_cdevsw, 0, UID_ROOT, GID_WHEEL, 0600, PF_NAME); 6714 if (pf_dev == NULL) 6715 return (ENOMEM); 6716 6717 pf_end_threads = 0; 6718 error = kproc_create(pf_purge_thread, NULL, &pf_purge_proc, 0, 0, "pf purge"); 6719 if (error != 0) 6720 return (error); 6721 6722 pfi_initialize(); 6723 6724 return (0); 6725 } 6726 6727 static void 6728 pf_unload_vnet(void) 6729 { 6730 int ret __diagused; 6731 6732 V_pf_vnet_active = 0; 6733 V_pf_status.running = 0; 6734 dehook_pf(); 6735 dehook_pf_eth(); 6736 6737 PF_RULES_WLOCK(); 6738 pf_syncookies_cleanup(); 6739 shutdown_pf(); 6740 PF_RULES_WUNLOCK(); 6741 6742 /* Make sure we've cleaned up ethernet rules before we continue. */ 6743 NET_EPOCH_DRAIN_CALLBACKS(); 6744 6745 ret = swi_remove(V_pf_swi_cookie); 6746 MPASS(ret == 0); 6747 ret = intr_event_destroy(V_pf_swi_ie); 6748 MPASS(ret == 0); 6749 6750 pf_unload_vnet_purge(); 6751 6752 pf_normalize_cleanup(); 6753 PF_RULES_WLOCK(); 6754 pfi_cleanup_vnet(); 6755 PF_RULES_WUNLOCK(); 6756 pfr_cleanup(); 6757 pf_osfp_flush(); 6758 pf_cleanup(); 6759 if (IS_DEFAULT_VNET(curvnet)) 6760 pf_mtag_cleanup(); 6761 6762 pf_cleanup_tagset(&V_pf_tags); 6763 #ifdef ALTQ 6764 pf_cleanup_tagset(&V_pf_qids); 6765 #endif 6766 uma_zdestroy(V_pf_tag_z); 6767 6768 #ifdef PF_WANT_32_TO_64_COUNTER 6769 PF_RULES_WLOCK(); 6770 LIST_REMOVE(V_pf_kifmarker, pfik_allkiflist); 6771 6772 MPASS(LIST_EMPTY(&V_pf_allkiflist)); 6773 MPASS(V_pf_allkifcount == 0); 6774 6775 LIST_REMOVE(&V_pf_default_rule, allrulelist); 6776 V_pf_allrulecount--; 6777 LIST_REMOVE(V_pf_rulemarker, allrulelist); 6778 6779 MPASS(LIST_EMPTY(&V_pf_allrulelist)); 6780 MPASS(V_pf_allrulecount == 0); 6781 6782 PF_RULES_WUNLOCK(); 6783 6784 free(V_pf_kifmarker, PFI_MTYPE); 6785 free(V_pf_rulemarker, M_PFRULE); 6786 #endif 6787 6788 /* Free counters last as we updated them during shutdown. */ 6789 pf_counter_u64_deinit(&V_pf_default_rule.evaluations); 6790 for (int i = 0; i < 2; i++) { 6791 pf_counter_u64_deinit(&V_pf_default_rule.packets[i]); 6792 pf_counter_u64_deinit(&V_pf_default_rule.bytes[i]); 6793 } 6794 counter_u64_free(V_pf_default_rule.states_cur); 6795 counter_u64_free(V_pf_default_rule.states_tot); 6796 counter_u64_free(V_pf_default_rule.src_nodes); 6797 uma_zfree_pcpu(pf_timestamp_pcpu_zone, V_pf_default_rule.timestamp); 6798 6799 for (int i = 0; i < PFRES_MAX; i++) 6800 counter_u64_free(V_pf_status.counters[i]); 6801 for (int i = 0; i < KLCNT_MAX; i++) 6802 counter_u64_free(V_pf_status.lcounters[i]); 6803 for (int i = 0; i < FCNT_MAX; i++) 6804 pf_counter_u64_deinit(&V_pf_status.fcounters[i]); 6805 for (int i = 0; i < SCNT_MAX; i++) 6806 counter_u64_free(V_pf_status.scounters[i]); 6807 6808 rm_destroy(&V_pf_rules_lock); 6809 sx_destroy(&V_pf_ioctl_lock); 6810 } 6811 6812 static void 6813 pf_unload(void) 6814 { 6815 6816 sx_xlock(&pf_end_lock); 6817 pf_end_threads = 1; 6818 while (pf_end_threads < 2) { 6819 wakeup_one(pf_purge_thread); 6820 sx_sleep(pf_purge_proc, &pf_end_lock, 0, "pftmo", 0); 6821 } 6822 sx_xunlock(&pf_end_lock); 6823 6824 pf_nl_unregister(); 6825 6826 if (pf_dev != NULL) 6827 destroy_dev(pf_dev); 6828 6829 pfi_cleanup(); 6830 6831 sx_destroy(&pf_end_lock); 6832 } 6833 6834 static void 6835 vnet_pf_init(void *unused __unused) 6836 { 6837 6838 pf_load_vnet(); 6839 } 6840 VNET_SYSINIT(vnet_pf_init, SI_SUB_PROTO_FIREWALL, SI_ORDER_THIRD, 6841 vnet_pf_init, NULL); 6842 6843 static void 6844 vnet_pf_uninit(const void *unused __unused) 6845 { 6846 6847 pf_unload_vnet(); 6848 } 6849 SYSUNINIT(pf_unload, SI_SUB_PROTO_FIREWALL, SI_ORDER_SECOND, pf_unload, NULL); 6850 VNET_SYSUNINIT(vnet_pf_uninit, SI_SUB_PROTO_FIREWALL, SI_ORDER_THIRD, 6851 vnet_pf_uninit, NULL); 6852 6853 static int 6854 pf_modevent(module_t mod, int type, void *data) 6855 { 6856 int error = 0; 6857 6858 switch(type) { 6859 case MOD_LOAD: 6860 error = pf_load(); 6861 pf_nl_register(); 6862 break; 6863 case MOD_UNLOAD: 6864 /* Handled in SYSUNINIT(pf_unload) to ensure it's done after 6865 * the vnet_pf_uninit()s */ 6866 break; 6867 default: 6868 error = EINVAL; 6869 break; 6870 } 6871 6872 return (error); 6873 } 6874 6875 static moduledata_t pf_mod = { 6876 "pf", 6877 pf_modevent, 6878 0 6879 }; 6880 6881 DECLARE_MODULE(pf, pf_mod, SI_SUB_PROTO_FIREWALL, SI_ORDER_SECOND); 6882 MODULE_DEPEND(pf, netlink, 1, 1, 1); 6883 MODULE_VERSION(pf, PF_MODVER); 6884