1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2001 Daniel Hartmeier 5 * Copyright (c) 2002,2003 Henning Brauer 6 * Copyright (c) 2012 Gleb Smirnoff <glebius@FreeBSD.org> 7 * All rights reserved. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 13 * - Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * - Redistributions in binary form must reproduce the above 16 * copyright notice, this list of conditions and the following 17 * disclaimer in the documentation and/or other materials provided 18 * with the distribution. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 23 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 24 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 27 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 28 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN 30 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 31 * POSSIBILITY OF SUCH DAMAGE. 32 * 33 * Effort sponsored in part by the Defense Advanced Research Projects 34 * Agency (DARPA) and Air Force Research Laboratory, Air Force 35 * Materiel Command, USAF, under agreement number F30602-01-2-0537. 36 * 37 * $OpenBSD: pf_ioctl.c,v 1.213 2009/02/15 21:46:12 mbalmer Exp $ 38 */ 39 40 #include <sys/cdefs.h> 41 #include "opt_inet.h" 42 #include "opt_inet6.h" 43 #include "opt_bpf.h" 44 #include "opt_pf.h" 45 46 #include <sys/param.h> 47 #include <sys/_bitset.h> 48 #include <sys/bitset.h> 49 #include <sys/bus.h> 50 #include <sys/conf.h> 51 #include <sys/endian.h> 52 #include <sys/fcntl.h> 53 #include <sys/filio.h> 54 #include <sys/hash.h> 55 #include <sys/interrupt.h> 56 #include <sys/jail.h> 57 #include <sys/kernel.h> 58 #include <sys/kthread.h> 59 #include <sys/lock.h> 60 #include <sys/mbuf.h> 61 #include <sys/module.h> 62 #include <sys/nv.h> 63 #include <sys/proc.h> 64 #include <sys/sdt.h> 65 #include <sys/smp.h> 66 #include <sys/socket.h> 67 #include <sys/sysctl.h> 68 #include <sys/md5.h> 69 #include <sys/ucred.h> 70 71 #include <net/if.h> 72 #include <net/if_var.h> 73 #include <net/if_private.h> 74 #include <net/vnet.h> 75 #include <net/route.h> 76 #include <net/pfil.h> 77 #include <net/pfvar.h> 78 #include <net/if_pfsync.h> 79 #include <net/if_pflog.h> 80 81 #include <netinet/in.h> 82 #include <netinet/ip.h> 83 #include <netinet/ip_var.h> 84 #include <netinet6/ip6_var.h> 85 #include <netinet/ip_icmp.h> 86 #include <netpfil/pf/pf_nl.h> 87 #include <netpfil/pf/pf_nv.h> 88 89 #ifdef INET6 90 #include <netinet/ip6.h> 91 #endif /* INET6 */ 92 93 #ifdef ALTQ 94 #include <net/altq/altq.h> 95 #endif 96 97 SDT_PROBE_DEFINE3(pf, ioctl, ioctl, error, "int", "int", "int"); 98 SDT_PROBE_DEFINE3(pf, ioctl, function, error, "char *", "int", "int"); 99 SDT_PROBE_DEFINE2(pf, ioctl, addrule, error, "int", "int"); 100 SDT_PROBE_DEFINE2(pf, ioctl, nvchk, error, "int", "int"); 101 102 static struct pf_kpool *pf_get_kpool(const char *, u_int32_t, u_int8_t, 103 u_int32_t, u_int8_t, u_int8_t, u_int8_t); 104 105 static void pf_mv_kpool(struct pf_kpalist *, struct pf_kpalist *); 106 static void pf_empty_kpool(struct pf_kpalist *); 107 static int pfioctl(struct cdev *, u_long, caddr_t, int, 108 struct thread *); 109 static int pf_begin_eth(uint32_t *, const char *); 110 static void pf_rollback_eth_cb(struct epoch_context *); 111 static int pf_rollback_eth(uint32_t, const char *); 112 static int pf_commit_eth(uint32_t, const char *); 113 static void pf_free_eth_rule(struct pf_keth_rule *); 114 #ifdef ALTQ 115 static int pf_begin_altq(u_int32_t *); 116 static int pf_rollback_altq(u_int32_t); 117 static int pf_commit_altq(u_int32_t); 118 static int pf_enable_altq(struct pf_altq *); 119 static int pf_disable_altq(struct pf_altq *); 120 static uint16_t pf_qname2qid(const char *); 121 static void pf_qid_unref(uint16_t); 122 #endif /* ALTQ */ 123 static int pf_begin_rules(u_int32_t *, int, const char *); 124 static int pf_rollback_rules(u_int32_t, int, char *); 125 static int pf_setup_pfsync_matching(struct pf_kruleset *); 126 static void pf_hash_rule_rolling(MD5_CTX *, struct pf_krule *); 127 static void pf_hash_rule(struct pf_krule *); 128 static void pf_hash_rule_addr(MD5_CTX *, struct pf_rule_addr *); 129 static int pf_commit_rules(u_int32_t, int, char *); 130 static int pf_addr_setup(struct pf_kruleset *, 131 struct pf_addr_wrap *, sa_family_t); 132 static void pf_addr_copyout(struct pf_addr_wrap *); 133 static void pf_src_node_copy(const struct pf_ksrc_node *, 134 struct pf_src_node *); 135 #ifdef ALTQ 136 static int pf_export_kaltq(struct pf_altq *, 137 struct pfioc_altq_v1 *, size_t); 138 static int pf_import_kaltq(struct pfioc_altq_v1 *, 139 struct pf_altq *, size_t); 140 #endif /* ALTQ */ 141 142 VNET_DEFINE(struct pf_krule, pf_default_rule); 143 144 static __inline int pf_krule_compare(struct pf_krule *, 145 struct pf_krule *); 146 147 RB_GENERATE(pf_krule_global, pf_krule, entry_global, pf_krule_compare); 148 149 #ifdef ALTQ 150 VNET_DEFINE_STATIC(int, pf_altq_running); 151 #define V_pf_altq_running VNET(pf_altq_running) 152 #endif 153 154 #define TAGID_MAX 50000 155 struct pf_tagname { 156 TAILQ_ENTRY(pf_tagname) namehash_entries; 157 TAILQ_ENTRY(pf_tagname) taghash_entries; 158 char name[PF_TAG_NAME_SIZE]; 159 uint16_t tag; 160 int ref; 161 }; 162 163 struct pf_tagset { 164 TAILQ_HEAD(, pf_tagname) *namehash; 165 TAILQ_HEAD(, pf_tagname) *taghash; 166 unsigned int mask; 167 uint32_t seed; 168 BITSET_DEFINE(, TAGID_MAX) avail; 169 }; 170 171 VNET_DEFINE(struct pf_tagset, pf_tags); 172 #define V_pf_tags VNET(pf_tags) 173 static unsigned int pf_rule_tag_hashsize; 174 #define PF_RULE_TAG_HASH_SIZE_DEFAULT 128 175 SYSCTL_UINT(_net_pf, OID_AUTO, rule_tag_hashsize, CTLFLAG_RDTUN, 176 &pf_rule_tag_hashsize, PF_RULE_TAG_HASH_SIZE_DEFAULT, 177 "Size of pf(4) rule tag hashtable"); 178 179 #ifdef ALTQ 180 VNET_DEFINE(struct pf_tagset, pf_qids); 181 #define V_pf_qids VNET(pf_qids) 182 static unsigned int pf_queue_tag_hashsize; 183 #define PF_QUEUE_TAG_HASH_SIZE_DEFAULT 128 184 SYSCTL_UINT(_net_pf, OID_AUTO, queue_tag_hashsize, CTLFLAG_RDTUN, 185 &pf_queue_tag_hashsize, PF_QUEUE_TAG_HASH_SIZE_DEFAULT, 186 "Size of pf(4) queue tag hashtable"); 187 #endif 188 VNET_DEFINE(uma_zone_t, pf_tag_z); 189 #define V_pf_tag_z VNET(pf_tag_z) 190 static MALLOC_DEFINE(M_PFALTQ, "pf_altq", "pf(4) altq configuration db"); 191 static MALLOC_DEFINE(M_PFRULE, "pf_rule", "pf(4) rules"); 192 193 #if (PF_QNAME_SIZE != PF_TAG_NAME_SIZE) 194 #error PF_QNAME_SIZE must be equal to PF_TAG_NAME_SIZE 195 #endif 196 197 VNET_DEFINE_STATIC(bool, pf_filter_local) = false; 198 #define V_pf_filter_local VNET(pf_filter_local) 199 SYSCTL_BOOL(_net_pf, OID_AUTO, filter_local, CTLFLAG_VNET | CTLFLAG_RW, 200 &VNET_NAME(pf_filter_local), false, 201 "Enable filtering for packets delivered to local network stack"); 202 203 #ifdef PF_DEFAULT_TO_DROP 204 VNET_DEFINE_STATIC(bool, default_to_drop) = true; 205 #else 206 VNET_DEFINE_STATIC(bool, default_to_drop); 207 #endif 208 #define V_default_to_drop VNET(default_to_drop) 209 SYSCTL_BOOL(_net_pf, OID_AUTO, default_to_drop, CTLFLAG_RDTUN | CTLFLAG_VNET, 210 &VNET_NAME(default_to_drop), false, 211 "Make the default rule drop all packets."); 212 213 static void pf_init_tagset(struct pf_tagset *, unsigned int *, 214 unsigned int); 215 static void pf_cleanup_tagset(struct pf_tagset *); 216 static uint16_t tagname2hashindex(const struct pf_tagset *, const char *); 217 static uint16_t tag2hashindex(const struct pf_tagset *, uint16_t); 218 static u_int16_t tagname2tag(struct pf_tagset *, const char *); 219 static u_int16_t pf_tagname2tag(const char *); 220 static void tag_unref(struct pf_tagset *, u_int16_t); 221 222 #define DPFPRINTF(n, x) if (V_pf_status.debug >= (n)) printf x 223 224 struct cdev *pf_dev; 225 226 /* 227 * XXX - These are new and need to be checked when moveing to a new version 228 */ 229 static void pf_clear_all_states(void); 230 static unsigned int pf_clear_states(const struct pf_kstate_kill *); 231 static void pf_killstates(struct pf_kstate_kill *, 232 unsigned int *); 233 static int pf_killstates_row(struct pf_kstate_kill *, 234 struct pf_idhash *); 235 static int pf_killstates_nv(struct pfioc_nv *); 236 static int pf_clearstates_nv(struct pfioc_nv *); 237 static int pf_getstate(struct pfioc_nv *); 238 static int pf_getstatus(struct pfioc_nv *); 239 static int pf_clear_tables(void); 240 static void pf_clear_srcnodes(struct pf_ksrc_node *); 241 static void pf_kill_srcnodes(struct pfioc_src_node_kill *); 242 static int pf_keepcounters(struct pfioc_nv *); 243 static void pf_tbladdr_copyout(struct pf_addr_wrap *); 244 245 /* 246 * Wrapper functions for pfil(9) hooks 247 */ 248 static pfil_return_t pf_eth_check_in(struct mbuf **m, struct ifnet *ifp, 249 int flags, void *ruleset __unused, struct inpcb *inp); 250 static pfil_return_t pf_eth_check_out(struct mbuf **m, struct ifnet *ifp, 251 int flags, void *ruleset __unused, struct inpcb *inp); 252 #ifdef INET 253 static pfil_return_t pf_check_in(struct mbuf **m, struct ifnet *ifp, 254 int flags, void *ruleset __unused, struct inpcb *inp); 255 static pfil_return_t pf_check_out(struct mbuf **m, struct ifnet *ifp, 256 int flags, void *ruleset __unused, struct inpcb *inp); 257 #endif 258 #ifdef INET6 259 static pfil_return_t pf_check6_in(struct mbuf **m, struct ifnet *ifp, 260 int flags, void *ruleset __unused, struct inpcb *inp); 261 static pfil_return_t pf_check6_out(struct mbuf **m, struct ifnet *ifp, 262 int flags, void *ruleset __unused, struct inpcb *inp); 263 #endif 264 265 static void hook_pf_eth(void); 266 static void hook_pf(void); 267 static void dehook_pf_eth(void); 268 static void dehook_pf(void); 269 static int shutdown_pf(void); 270 static int pf_load(void); 271 static void pf_unload(void); 272 273 static struct cdevsw pf_cdevsw = { 274 .d_ioctl = pfioctl, 275 .d_name = PF_NAME, 276 .d_version = D_VERSION, 277 }; 278 279 VNET_DEFINE_STATIC(bool, pf_pfil_hooked); 280 #define V_pf_pfil_hooked VNET(pf_pfil_hooked) 281 VNET_DEFINE_STATIC(bool, pf_pfil_eth_hooked); 282 #define V_pf_pfil_eth_hooked VNET(pf_pfil_eth_hooked) 283 284 /* 285 * We need a flag that is neither hooked nor running to know when 286 * the VNET is "valid". We primarily need this to control (global) 287 * external event, e.g., eventhandlers. 288 */ 289 VNET_DEFINE(int, pf_vnet_active); 290 #define V_pf_vnet_active VNET(pf_vnet_active) 291 292 int pf_end_threads; 293 struct proc *pf_purge_proc; 294 295 VNET_DEFINE(struct rmlock, pf_rules_lock); 296 VNET_DEFINE_STATIC(struct sx, pf_ioctl_lock); 297 #define V_pf_ioctl_lock VNET(pf_ioctl_lock) 298 struct sx pf_end_lock; 299 300 /* pfsync */ 301 VNET_DEFINE(pfsync_state_import_t *, pfsync_state_import_ptr); 302 VNET_DEFINE(pfsync_insert_state_t *, pfsync_insert_state_ptr); 303 VNET_DEFINE(pfsync_update_state_t *, pfsync_update_state_ptr); 304 VNET_DEFINE(pfsync_delete_state_t *, pfsync_delete_state_ptr); 305 VNET_DEFINE(pfsync_clear_states_t *, pfsync_clear_states_ptr); 306 VNET_DEFINE(pfsync_defer_t *, pfsync_defer_ptr); 307 VNET_DEFINE(pflow_export_state_t *, pflow_export_state_ptr); 308 pfsync_detach_ifnet_t *pfsync_detach_ifnet_ptr; 309 310 /* pflog */ 311 pflog_packet_t *pflog_packet_ptr = NULL; 312 313 /* 314 * Copy a user-provided string, returning an error if truncation would occur. 315 * Avoid scanning past "sz" bytes in the source string since there's no 316 * guarantee that it's nul-terminated. 317 */ 318 static int 319 pf_user_strcpy(char *dst, const char *src, size_t sz) 320 { 321 if (strnlen(src, sz) == sz) 322 return (EINVAL); 323 (void)strlcpy(dst, src, sz); 324 return (0); 325 } 326 327 static void 328 pfattach_vnet(void) 329 { 330 u_int32_t *my_timeout = V_pf_default_rule.timeout; 331 332 bzero(&V_pf_status, sizeof(V_pf_status)); 333 334 pf_initialize(); 335 pfr_initialize(); 336 pfi_initialize_vnet(); 337 pf_normalize_init(); 338 pf_syncookies_init(); 339 340 V_pf_limits[PF_LIMIT_STATES].limit = PFSTATE_HIWAT; 341 V_pf_limits[PF_LIMIT_SRC_NODES].limit = PFSNODE_HIWAT; 342 343 RB_INIT(&V_pf_anchors); 344 pf_init_kruleset(&pf_main_ruleset); 345 346 pf_init_keth(V_pf_keth); 347 348 /* default rule should never be garbage collected */ 349 V_pf_default_rule.entries.tqe_prev = &V_pf_default_rule.entries.tqe_next; 350 V_pf_default_rule.action = V_default_to_drop ? PF_DROP : PF_PASS; 351 V_pf_default_rule.nr = -1; 352 V_pf_default_rule.rtableid = -1; 353 354 pf_counter_u64_init(&V_pf_default_rule.evaluations, M_WAITOK); 355 for (int i = 0; i < 2; i++) { 356 pf_counter_u64_init(&V_pf_default_rule.packets[i], M_WAITOK); 357 pf_counter_u64_init(&V_pf_default_rule.bytes[i], M_WAITOK); 358 } 359 V_pf_default_rule.states_cur = counter_u64_alloc(M_WAITOK); 360 V_pf_default_rule.states_tot = counter_u64_alloc(M_WAITOK); 361 V_pf_default_rule.src_nodes = counter_u64_alloc(M_WAITOK); 362 363 V_pf_default_rule.timestamp = uma_zalloc_pcpu(pf_timestamp_pcpu_zone, 364 M_WAITOK | M_ZERO); 365 366 #ifdef PF_WANT_32_TO_64_COUNTER 367 V_pf_kifmarker = malloc(sizeof(*V_pf_kifmarker), PFI_MTYPE, M_WAITOK | M_ZERO); 368 V_pf_rulemarker = malloc(sizeof(*V_pf_rulemarker), M_PFRULE, M_WAITOK | M_ZERO); 369 PF_RULES_WLOCK(); 370 LIST_INSERT_HEAD(&V_pf_allkiflist, V_pf_kifmarker, pfik_allkiflist); 371 LIST_INSERT_HEAD(&V_pf_allrulelist, &V_pf_default_rule, allrulelist); 372 V_pf_allrulecount++; 373 LIST_INSERT_HEAD(&V_pf_allrulelist, V_pf_rulemarker, allrulelist); 374 PF_RULES_WUNLOCK(); 375 #endif 376 377 /* initialize default timeouts */ 378 my_timeout[PFTM_TCP_FIRST_PACKET] = PFTM_TCP_FIRST_PACKET_VAL; 379 my_timeout[PFTM_TCP_OPENING] = PFTM_TCP_OPENING_VAL; 380 my_timeout[PFTM_TCP_ESTABLISHED] = PFTM_TCP_ESTABLISHED_VAL; 381 my_timeout[PFTM_TCP_CLOSING] = PFTM_TCP_CLOSING_VAL; 382 my_timeout[PFTM_TCP_FIN_WAIT] = PFTM_TCP_FIN_WAIT_VAL; 383 my_timeout[PFTM_TCP_CLOSED] = PFTM_TCP_CLOSED_VAL; 384 my_timeout[PFTM_SCTP_FIRST_PACKET] = PFTM_TCP_FIRST_PACKET_VAL; 385 my_timeout[PFTM_SCTP_OPENING] = PFTM_TCP_OPENING_VAL; 386 my_timeout[PFTM_SCTP_ESTABLISHED] = PFTM_TCP_ESTABLISHED_VAL; 387 my_timeout[PFTM_SCTP_CLOSING] = PFTM_TCP_CLOSING_VAL; 388 my_timeout[PFTM_SCTP_CLOSED] = PFTM_TCP_CLOSED_VAL; 389 my_timeout[PFTM_UDP_FIRST_PACKET] = PFTM_UDP_FIRST_PACKET_VAL; 390 my_timeout[PFTM_UDP_SINGLE] = PFTM_UDP_SINGLE_VAL; 391 my_timeout[PFTM_UDP_MULTIPLE] = PFTM_UDP_MULTIPLE_VAL; 392 my_timeout[PFTM_ICMP_FIRST_PACKET] = PFTM_ICMP_FIRST_PACKET_VAL; 393 my_timeout[PFTM_ICMP_ERROR_REPLY] = PFTM_ICMP_ERROR_REPLY_VAL; 394 my_timeout[PFTM_OTHER_FIRST_PACKET] = PFTM_OTHER_FIRST_PACKET_VAL; 395 my_timeout[PFTM_OTHER_SINGLE] = PFTM_OTHER_SINGLE_VAL; 396 my_timeout[PFTM_OTHER_MULTIPLE] = PFTM_OTHER_MULTIPLE_VAL; 397 my_timeout[PFTM_FRAG] = PFTM_FRAG_VAL; 398 my_timeout[PFTM_INTERVAL] = PFTM_INTERVAL_VAL; 399 my_timeout[PFTM_SRC_NODE] = PFTM_SRC_NODE_VAL; 400 my_timeout[PFTM_TS_DIFF] = PFTM_TS_DIFF_VAL; 401 my_timeout[PFTM_ADAPTIVE_START] = PFSTATE_ADAPT_START; 402 my_timeout[PFTM_ADAPTIVE_END] = PFSTATE_ADAPT_END; 403 404 V_pf_status.debug = PF_DEBUG_URGENT; 405 /* 406 * XXX This is different than in OpenBSD where reassembly is enabled by 407 * defult. In FreeBSD we expect people to still use scrub rules and 408 * switch to the new syntax later. Only when they switch they must 409 * explicitly enable reassemle. We could change the default once the 410 * scrub rule functionality is hopefully removed some day in future. 411 */ 412 V_pf_status.reass = 0; 413 414 V_pf_pfil_hooked = false; 415 V_pf_pfil_eth_hooked = false; 416 417 /* XXX do our best to avoid a conflict */ 418 V_pf_status.hostid = arc4random(); 419 420 for (int i = 0; i < PFRES_MAX; i++) 421 V_pf_status.counters[i] = counter_u64_alloc(M_WAITOK); 422 for (int i = 0; i < KLCNT_MAX; i++) 423 V_pf_status.lcounters[i] = counter_u64_alloc(M_WAITOK); 424 for (int i = 0; i < FCNT_MAX; i++) 425 pf_counter_u64_init(&V_pf_status.fcounters[i], M_WAITOK); 426 for (int i = 0; i < SCNT_MAX; i++) 427 V_pf_status.scounters[i] = counter_u64_alloc(M_WAITOK); 428 429 if (swi_add(&V_pf_swi_ie, "pf send", pf_intr, curvnet, SWI_NET, 430 INTR_MPSAFE, &V_pf_swi_cookie) != 0) 431 /* XXXGL: leaked all above. */ 432 return; 433 } 434 435 static struct pf_kpool * 436 pf_get_kpool(const char *anchor, u_int32_t ticket, u_int8_t rule_action, 437 u_int32_t rule_number, u_int8_t r_last, u_int8_t active, 438 u_int8_t check_ticket) 439 { 440 struct pf_kruleset *ruleset; 441 struct pf_krule *rule; 442 int rs_num; 443 444 ruleset = pf_find_kruleset(anchor); 445 if (ruleset == NULL) 446 return (NULL); 447 rs_num = pf_get_ruleset_number(rule_action); 448 if (rs_num >= PF_RULESET_MAX) 449 return (NULL); 450 if (active) { 451 if (check_ticket && ticket != 452 ruleset->rules[rs_num].active.ticket) 453 return (NULL); 454 if (r_last) 455 rule = TAILQ_LAST(ruleset->rules[rs_num].active.ptr, 456 pf_krulequeue); 457 else 458 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr); 459 } else { 460 if (check_ticket && ticket != 461 ruleset->rules[rs_num].inactive.ticket) 462 return (NULL); 463 if (r_last) 464 rule = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr, 465 pf_krulequeue); 466 else 467 rule = TAILQ_FIRST(ruleset->rules[rs_num].inactive.ptr); 468 } 469 if (!r_last) { 470 while ((rule != NULL) && (rule->nr != rule_number)) 471 rule = TAILQ_NEXT(rule, entries); 472 } 473 if (rule == NULL) 474 return (NULL); 475 476 return (&rule->rpool); 477 } 478 479 static void 480 pf_mv_kpool(struct pf_kpalist *poola, struct pf_kpalist *poolb) 481 { 482 struct pf_kpooladdr *mv_pool_pa; 483 484 while ((mv_pool_pa = TAILQ_FIRST(poola)) != NULL) { 485 TAILQ_REMOVE(poola, mv_pool_pa, entries); 486 TAILQ_INSERT_TAIL(poolb, mv_pool_pa, entries); 487 } 488 } 489 490 static void 491 pf_empty_kpool(struct pf_kpalist *poola) 492 { 493 struct pf_kpooladdr *pa; 494 495 while ((pa = TAILQ_FIRST(poola)) != NULL) { 496 switch (pa->addr.type) { 497 case PF_ADDR_DYNIFTL: 498 pfi_dynaddr_remove(pa->addr.p.dyn); 499 break; 500 case PF_ADDR_TABLE: 501 /* XXX: this could be unfinished pooladdr on pabuf */ 502 if (pa->addr.p.tbl != NULL) 503 pfr_detach_table(pa->addr.p.tbl); 504 break; 505 } 506 if (pa->kif) 507 pfi_kkif_unref(pa->kif); 508 TAILQ_REMOVE(poola, pa, entries); 509 free(pa, M_PFRULE); 510 } 511 } 512 513 static void 514 pf_unlink_rule_locked(struct pf_krulequeue *rulequeue, struct pf_krule *rule) 515 { 516 517 PF_RULES_WASSERT(); 518 PF_UNLNKDRULES_ASSERT(); 519 520 TAILQ_REMOVE(rulequeue, rule, entries); 521 522 rule->rule_ref |= PFRULE_REFS; 523 TAILQ_INSERT_TAIL(&V_pf_unlinked_rules, rule, entries); 524 } 525 526 static void 527 pf_unlink_rule(struct pf_krulequeue *rulequeue, struct pf_krule *rule) 528 { 529 530 PF_RULES_WASSERT(); 531 532 PF_UNLNKDRULES_LOCK(); 533 pf_unlink_rule_locked(rulequeue, rule); 534 PF_UNLNKDRULES_UNLOCK(); 535 } 536 537 static void 538 pf_free_eth_rule(struct pf_keth_rule *rule) 539 { 540 PF_RULES_WASSERT(); 541 542 if (rule == NULL) 543 return; 544 545 if (rule->tag) 546 tag_unref(&V_pf_tags, rule->tag); 547 if (rule->match_tag) 548 tag_unref(&V_pf_tags, rule->match_tag); 549 #ifdef ALTQ 550 pf_qid_unref(rule->qid); 551 #endif 552 553 if (rule->bridge_to) 554 pfi_kkif_unref(rule->bridge_to); 555 if (rule->kif) 556 pfi_kkif_unref(rule->kif); 557 558 if (rule->ipsrc.addr.type == PF_ADDR_TABLE) 559 pfr_detach_table(rule->ipsrc.addr.p.tbl); 560 if (rule->ipdst.addr.type == PF_ADDR_TABLE) 561 pfr_detach_table(rule->ipdst.addr.p.tbl); 562 563 counter_u64_free(rule->evaluations); 564 for (int i = 0; i < 2; i++) { 565 counter_u64_free(rule->packets[i]); 566 counter_u64_free(rule->bytes[i]); 567 } 568 uma_zfree_pcpu(pf_timestamp_pcpu_zone, rule->timestamp); 569 pf_keth_anchor_remove(rule); 570 571 free(rule, M_PFRULE); 572 } 573 574 void 575 pf_free_rule(struct pf_krule *rule) 576 { 577 578 PF_RULES_WASSERT(); 579 PF_CONFIG_ASSERT(); 580 581 if (rule->tag) 582 tag_unref(&V_pf_tags, rule->tag); 583 if (rule->match_tag) 584 tag_unref(&V_pf_tags, rule->match_tag); 585 #ifdef ALTQ 586 if (rule->pqid != rule->qid) 587 pf_qid_unref(rule->pqid); 588 pf_qid_unref(rule->qid); 589 #endif 590 switch (rule->src.addr.type) { 591 case PF_ADDR_DYNIFTL: 592 pfi_dynaddr_remove(rule->src.addr.p.dyn); 593 break; 594 case PF_ADDR_TABLE: 595 pfr_detach_table(rule->src.addr.p.tbl); 596 break; 597 } 598 switch (rule->dst.addr.type) { 599 case PF_ADDR_DYNIFTL: 600 pfi_dynaddr_remove(rule->dst.addr.p.dyn); 601 break; 602 case PF_ADDR_TABLE: 603 pfr_detach_table(rule->dst.addr.p.tbl); 604 break; 605 } 606 if (rule->overload_tbl) 607 pfr_detach_table(rule->overload_tbl); 608 if (rule->kif) 609 pfi_kkif_unref(rule->kif); 610 pf_kanchor_remove(rule); 611 pf_empty_kpool(&rule->rpool.list); 612 613 pf_krule_free(rule); 614 } 615 616 static void 617 pf_init_tagset(struct pf_tagset *ts, unsigned int *tunable_size, 618 unsigned int default_size) 619 { 620 unsigned int i; 621 unsigned int hashsize; 622 623 if (*tunable_size == 0 || !powerof2(*tunable_size)) 624 *tunable_size = default_size; 625 626 hashsize = *tunable_size; 627 ts->namehash = mallocarray(hashsize, sizeof(*ts->namehash), M_PFHASH, 628 M_WAITOK); 629 ts->taghash = mallocarray(hashsize, sizeof(*ts->taghash), M_PFHASH, 630 M_WAITOK); 631 ts->mask = hashsize - 1; 632 ts->seed = arc4random(); 633 for (i = 0; i < hashsize; i++) { 634 TAILQ_INIT(&ts->namehash[i]); 635 TAILQ_INIT(&ts->taghash[i]); 636 } 637 BIT_FILL(TAGID_MAX, &ts->avail); 638 } 639 640 static void 641 pf_cleanup_tagset(struct pf_tagset *ts) 642 { 643 unsigned int i; 644 unsigned int hashsize; 645 struct pf_tagname *t, *tmp; 646 647 /* 648 * Only need to clean up one of the hashes as each tag is hashed 649 * into each table. 650 */ 651 hashsize = ts->mask + 1; 652 for (i = 0; i < hashsize; i++) 653 TAILQ_FOREACH_SAFE(t, &ts->namehash[i], namehash_entries, tmp) 654 uma_zfree(V_pf_tag_z, t); 655 656 free(ts->namehash, M_PFHASH); 657 free(ts->taghash, M_PFHASH); 658 } 659 660 static uint16_t 661 tagname2hashindex(const struct pf_tagset *ts, const char *tagname) 662 { 663 size_t len; 664 665 len = strnlen(tagname, PF_TAG_NAME_SIZE - 1); 666 return (murmur3_32_hash(tagname, len, ts->seed) & ts->mask); 667 } 668 669 static uint16_t 670 tag2hashindex(const struct pf_tagset *ts, uint16_t tag) 671 { 672 673 return (tag & ts->mask); 674 } 675 676 static u_int16_t 677 tagname2tag(struct pf_tagset *ts, const char *tagname) 678 { 679 struct pf_tagname *tag; 680 u_int32_t index; 681 u_int16_t new_tagid; 682 683 PF_RULES_WASSERT(); 684 685 index = tagname2hashindex(ts, tagname); 686 TAILQ_FOREACH(tag, &ts->namehash[index], namehash_entries) 687 if (strcmp(tagname, tag->name) == 0) { 688 tag->ref++; 689 return (tag->tag); 690 } 691 692 /* 693 * new entry 694 * 695 * to avoid fragmentation, we do a linear search from the beginning 696 * and take the first free slot we find. 697 */ 698 new_tagid = BIT_FFS(TAGID_MAX, &ts->avail); 699 /* 700 * Tags are 1-based, with valid tags in the range [1..TAGID_MAX]. 701 * BIT_FFS() returns a 1-based bit number, with 0 indicating no bits 702 * set. It may also return a bit number greater than TAGID_MAX due 703 * to rounding of the number of bits in the vector up to a multiple 704 * of the vector word size at declaration/allocation time. 705 */ 706 if ((new_tagid == 0) || (new_tagid > TAGID_MAX)) 707 return (0); 708 709 /* Mark the tag as in use. Bits are 0-based for BIT_CLR() */ 710 BIT_CLR(TAGID_MAX, new_tagid - 1, &ts->avail); 711 712 /* allocate and fill new struct pf_tagname */ 713 tag = uma_zalloc(V_pf_tag_z, M_NOWAIT); 714 if (tag == NULL) 715 return (0); 716 strlcpy(tag->name, tagname, sizeof(tag->name)); 717 tag->tag = new_tagid; 718 tag->ref = 1; 719 720 /* Insert into namehash */ 721 TAILQ_INSERT_TAIL(&ts->namehash[index], tag, namehash_entries); 722 723 /* Insert into taghash */ 724 index = tag2hashindex(ts, new_tagid); 725 TAILQ_INSERT_TAIL(&ts->taghash[index], tag, taghash_entries); 726 727 return (tag->tag); 728 } 729 730 static void 731 tag_unref(struct pf_tagset *ts, u_int16_t tag) 732 { 733 struct pf_tagname *t; 734 uint16_t index; 735 736 PF_RULES_WASSERT(); 737 738 index = tag2hashindex(ts, tag); 739 TAILQ_FOREACH(t, &ts->taghash[index], taghash_entries) 740 if (tag == t->tag) { 741 if (--t->ref == 0) { 742 TAILQ_REMOVE(&ts->taghash[index], t, 743 taghash_entries); 744 index = tagname2hashindex(ts, t->name); 745 TAILQ_REMOVE(&ts->namehash[index], t, 746 namehash_entries); 747 /* Bits are 0-based for BIT_SET() */ 748 BIT_SET(TAGID_MAX, tag - 1, &ts->avail); 749 uma_zfree(V_pf_tag_z, t); 750 } 751 break; 752 } 753 } 754 755 static uint16_t 756 pf_tagname2tag(const char *tagname) 757 { 758 return (tagname2tag(&V_pf_tags, tagname)); 759 } 760 761 static int 762 pf_begin_eth(uint32_t *ticket, const char *anchor) 763 { 764 struct pf_keth_rule *rule, *tmp; 765 struct pf_keth_ruleset *rs; 766 767 PF_RULES_WASSERT(); 768 769 rs = pf_find_or_create_keth_ruleset(anchor); 770 if (rs == NULL) 771 return (EINVAL); 772 773 /* Purge old inactive rules. */ 774 TAILQ_FOREACH_SAFE(rule, rs->inactive.rules, entries, 775 tmp) { 776 TAILQ_REMOVE(rs->inactive.rules, rule, 777 entries); 778 pf_free_eth_rule(rule); 779 } 780 781 *ticket = ++rs->inactive.ticket; 782 rs->inactive.open = 1; 783 784 return (0); 785 } 786 787 static void 788 pf_rollback_eth_cb(struct epoch_context *ctx) 789 { 790 struct pf_keth_ruleset *rs; 791 792 rs = __containerof(ctx, struct pf_keth_ruleset, epoch_ctx); 793 794 CURVNET_SET(rs->vnet); 795 796 PF_RULES_WLOCK(); 797 pf_rollback_eth(rs->inactive.ticket, 798 rs->anchor ? rs->anchor->path : ""); 799 PF_RULES_WUNLOCK(); 800 801 CURVNET_RESTORE(); 802 } 803 804 static int 805 pf_rollback_eth(uint32_t ticket, const char *anchor) 806 { 807 struct pf_keth_rule *rule, *tmp; 808 struct pf_keth_ruleset *rs; 809 810 PF_RULES_WASSERT(); 811 812 rs = pf_find_keth_ruleset(anchor); 813 if (rs == NULL) 814 return (EINVAL); 815 816 if (!rs->inactive.open || 817 ticket != rs->inactive.ticket) 818 return (0); 819 820 /* Purge old inactive rules. */ 821 TAILQ_FOREACH_SAFE(rule, rs->inactive.rules, entries, 822 tmp) { 823 TAILQ_REMOVE(rs->inactive.rules, rule, entries); 824 pf_free_eth_rule(rule); 825 } 826 827 rs->inactive.open = 0; 828 829 pf_remove_if_empty_keth_ruleset(rs); 830 831 return (0); 832 } 833 834 #define PF_SET_SKIP_STEPS(i) \ 835 do { \ 836 while (head[i] != cur) { \ 837 head[i]->skip[i].ptr = cur; \ 838 head[i] = TAILQ_NEXT(head[i], entries); \ 839 } \ 840 } while (0) 841 842 static void 843 pf_eth_calc_skip_steps(struct pf_keth_ruleq *rules) 844 { 845 struct pf_keth_rule *cur, *prev, *head[PFE_SKIP_COUNT]; 846 int i; 847 848 cur = TAILQ_FIRST(rules); 849 prev = cur; 850 for (i = 0; i < PFE_SKIP_COUNT; ++i) 851 head[i] = cur; 852 while (cur != NULL) { 853 if (cur->kif != prev->kif || cur->ifnot != prev->ifnot) 854 PF_SET_SKIP_STEPS(PFE_SKIP_IFP); 855 if (cur->direction != prev->direction) 856 PF_SET_SKIP_STEPS(PFE_SKIP_DIR); 857 if (cur->proto != prev->proto) 858 PF_SET_SKIP_STEPS(PFE_SKIP_PROTO); 859 if (memcmp(&cur->src, &prev->src, sizeof(cur->src)) != 0) 860 PF_SET_SKIP_STEPS(PFE_SKIP_SRC_ADDR); 861 if (memcmp(&cur->dst, &prev->dst, sizeof(cur->dst)) != 0) 862 PF_SET_SKIP_STEPS(PFE_SKIP_DST_ADDR); 863 if (cur->ipsrc.neg != prev->ipsrc.neg || 864 pf_addr_wrap_neq(&cur->ipsrc.addr, &prev->ipsrc.addr)) 865 PF_SET_SKIP_STEPS(PFE_SKIP_SRC_IP_ADDR); 866 if (cur->ipdst.neg != prev->ipdst.neg || 867 pf_addr_wrap_neq(&cur->ipdst.addr, &prev->ipdst.addr)) 868 PF_SET_SKIP_STEPS(PFE_SKIP_DST_IP_ADDR); 869 870 prev = cur; 871 cur = TAILQ_NEXT(cur, entries); 872 } 873 for (i = 0; i < PFE_SKIP_COUNT; ++i) 874 PF_SET_SKIP_STEPS(i); 875 } 876 877 static int 878 pf_commit_eth(uint32_t ticket, const char *anchor) 879 { 880 struct pf_keth_ruleq *rules; 881 struct pf_keth_ruleset *rs; 882 883 rs = pf_find_keth_ruleset(anchor); 884 if (rs == NULL) { 885 return (EINVAL); 886 } 887 888 if (!rs->inactive.open || 889 ticket != rs->inactive.ticket) 890 return (EBUSY); 891 892 PF_RULES_WASSERT(); 893 894 pf_eth_calc_skip_steps(rs->inactive.rules); 895 896 rules = rs->active.rules; 897 ck_pr_store_ptr(&rs->active.rules, rs->inactive.rules); 898 rs->inactive.rules = rules; 899 rs->inactive.ticket = rs->active.ticket; 900 901 /* Clean up inactive rules (i.e. previously active rules), only when 902 * we're sure they're no longer used. */ 903 NET_EPOCH_CALL(pf_rollback_eth_cb, &rs->epoch_ctx); 904 905 return (0); 906 } 907 908 #ifdef ALTQ 909 static uint16_t 910 pf_qname2qid(const char *qname) 911 { 912 return (tagname2tag(&V_pf_qids, qname)); 913 } 914 915 static void 916 pf_qid_unref(uint16_t qid) 917 { 918 tag_unref(&V_pf_qids, qid); 919 } 920 921 static int 922 pf_begin_altq(u_int32_t *ticket) 923 { 924 struct pf_altq *altq, *tmp; 925 int error = 0; 926 927 PF_RULES_WASSERT(); 928 929 /* Purge the old altq lists */ 930 TAILQ_FOREACH_SAFE(altq, V_pf_altq_ifs_inactive, entries, tmp) { 931 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) { 932 /* detach and destroy the discipline */ 933 error = altq_remove(altq); 934 } 935 free(altq, M_PFALTQ); 936 } 937 TAILQ_INIT(V_pf_altq_ifs_inactive); 938 TAILQ_FOREACH_SAFE(altq, V_pf_altqs_inactive, entries, tmp) { 939 pf_qid_unref(altq->qid); 940 free(altq, M_PFALTQ); 941 } 942 TAILQ_INIT(V_pf_altqs_inactive); 943 if (error) 944 return (error); 945 *ticket = ++V_ticket_altqs_inactive; 946 V_altqs_inactive_open = 1; 947 return (0); 948 } 949 950 static int 951 pf_rollback_altq(u_int32_t ticket) 952 { 953 struct pf_altq *altq, *tmp; 954 int error = 0; 955 956 PF_RULES_WASSERT(); 957 958 if (!V_altqs_inactive_open || ticket != V_ticket_altqs_inactive) 959 return (0); 960 /* Purge the old altq lists */ 961 TAILQ_FOREACH_SAFE(altq, V_pf_altq_ifs_inactive, entries, tmp) { 962 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) { 963 /* detach and destroy the discipline */ 964 error = altq_remove(altq); 965 } 966 free(altq, M_PFALTQ); 967 } 968 TAILQ_INIT(V_pf_altq_ifs_inactive); 969 TAILQ_FOREACH_SAFE(altq, V_pf_altqs_inactive, entries, tmp) { 970 pf_qid_unref(altq->qid); 971 free(altq, M_PFALTQ); 972 } 973 TAILQ_INIT(V_pf_altqs_inactive); 974 V_altqs_inactive_open = 0; 975 return (error); 976 } 977 978 static int 979 pf_commit_altq(u_int32_t ticket) 980 { 981 struct pf_altqqueue *old_altqs, *old_altq_ifs; 982 struct pf_altq *altq, *tmp; 983 int err, error = 0; 984 985 PF_RULES_WASSERT(); 986 987 if (!V_altqs_inactive_open || ticket != V_ticket_altqs_inactive) 988 return (EBUSY); 989 990 /* swap altqs, keep the old. */ 991 old_altqs = V_pf_altqs_active; 992 old_altq_ifs = V_pf_altq_ifs_active; 993 V_pf_altqs_active = V_pf_altqs_inactive; 994 V_pf_altq_ifs_active = V_pf_altq_ifs_inactive; 995 V_pf_altqs_inactive = old_altqs; 996 V_pf_altq_ifs_inactive = old_altq_ifs; 997 V_ticket_altqs_active = V_ticket_altqs_inactive; 998 999 /* Attach new disciplines */ 1000 TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) { 1001 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) { 1002 /* attach the discipline */ 1003 error = altq_pfattach(altq); 1004 if (error == 0 && V_pf_altq_running) 1005 error = pf_enable_altq(altq); 1006 if (error != 0) 1007 return (error); 1008 } 1009 } 1010 1011 /* Purge the old altq lists */ 1012 TAILQ_FOREACH_SAFE(altq, V_pf_altq_ifs_inactive, entries, tmp) { 1013 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) { 1014 /* detach and destroy the discipline */ 1015 if (V_pf_altq_running) 1016 error = pf_disable_altq(altq); 1017 err = altq_pfdetach(altq); 1018 if (err != 0 && error == 0) 1019 error = err; 1020 err = altq_remove(altq); 1021 if (err != 0 && error == 0) 1022 error = err; 1023 } 1024 free(altq, M_PFALTQ); 1025 } 1026 TAILQ_INIT(V_pf_altq_ifs_inactive); 1027 TAILQ_FOREACH_SAFE(altq, V_pf_altqs_inactive, entries, tmp) { 1028 pf_qid_unref(altq->qid); 1029 free(altq, M_PFALTQ); 1030 } 1031 TAILQ_INIT(V_pf_altqs_inactive); 1032 1033 V_altqs_inactive_open = 0; 1034 return (error); 1035 } 1036 1037 static int 1038 pf_enable_altq(struct pf_altq *altq) 1039 { 1040 struct ifnet *ifp; 1041 struct tb_profile tb; 1042 int error = 0; 1043 1044 if ((ifp = ifunit(altq->ifname)) == NULL) 1045 return (EINVAL); 1046 1047 if (ifp->if_snd.altq_type != ALTQT_NONE) 1048 error = altq_enable(&ifp->if_snd); 1049 1050 /* set tokenbucket regulator */ 1051 if (error == 0 && ifp != NULL && ALTQ_IS_ENABLED(&ifp->if_snd)) { 1052 tb.rate = altq->ifbandwidth; 1053 tb.depth = altq->tbrsize; 1054 error = tbr_set(&ifp->if_snd, &tb); 1055 } 1056 1057 return (error); 1058 } 1059 1060 static int 1061 pf_disable_altq(struct pf_altq *altq) 1062 { 1063 struct ifnet *ifp; 1064 struct tb_profile tb; 1065 int error; 1066 1067 if ((ifp = ifunit(altq->ifname)) == NULL) 1068 return (EINVAL); 1069 1070 /* 1071 * when the discipline is no longer referenced, it was overridden 1072 * by a new one. if so, just return. 1073 */ 1074 if (altq->altq_disc != ifp->if_snd.altq_disc) 1075 return (0); 1076 1077 error = altq_disable(&ifp->if_snd); 1078 1079 if (error == 0) { 1080 /* clear tokenbucket regulator */ 1081 tb.rate = 0; 1082 error = tbr_set(&ifp->if_snd, &tb); 1083 } 1084 1085 return (error); 1086 } 1087 1088 static int 1089 pf_altq_ifnet_event_add(struct ifnet *ifp, int remove, u_int32_t ticket, 1090 struct pf_altq *altq) 1091 { 1092 struct ifnet *ifp1; 1093 int error = 0; 1094 1095 /* Deactivate the interface in question */ 1096 altq->local_flags &= ~PFALTQ_FLAG_IF_REMOVED; 1097 if ((ifp1 = ifunit(altq->ifname)) == NULL || 1098 (remove && ifp1 == ifp)) { 1099 altq->local_flags |= PFALTQ_FLAG_IF_REMOVED; 1100 } else { 1101 error = altq_add(ifp1, altq); 1102 1103 if (ticket != V_ticket_altqs_inactive) 1104 error = EBUSY; 1105 1106 if (error) 1107 free(altq, M_PFALTQ); 1108 } 1109 1110 return (error); 1111 } 1112 1113 void 1114 pf_altq_ifnet_event(struct ifnet *ifp, int remove) 1115 { 1116 struct pf_altq *a1, *a2, *a3; 1117 u_int32_t ticket; 1118 int error = 0; 1119 1120 /* 1121 * No need to re-evaluate the configuration for events on interfaces 1122 * that do not support ALTQ, as it's not possible for such 1123 * interfaces to be part of the configuration. 1124 */ 1125 if (!ALTQ_IS_READY(&ifp->if_snd)) 1126 return; 1127 1128 /* Interrupt userland queue modifications */ 1129 if (V_altqs_inactive_open) 1130 pf_rollback_altq(V_ticket_altqs_inactive); 1131 1132 /* Start new altq ruleset */ 1133 if (pf_begin_altq(&ticket)) 1134 return; 1135 1136 /* Copy the current active set */ 1137 TAILQ_FOREACH(a1, V_pf_altq_ifs_active, entries) { 1138 a2 = malloc(sizeof(*a2), M_PFALTQ, M_NOWAIT); 1139 if (a2 == NULL) { 1140 error = ENOMEM; 1141 break; 1142 } 1143 bcopy(a1, a2, sizeof(struct pf_altq)); 1144 1145 error = pf_altq_ifnet_event_add(ifp, remove, ticket, a2); 1146 if (error) 1147 break; 1148 1149 TAILQ_INSERT_TAIL(V_pf_altq_ifs_inactive, a2, entries); 1150 } 1151 if (error) 1152 goto out; 1153 TAILQ_FOREACH(a1, V_pf_altqs_active, entries) { 1154 a2 = malloc(sizeof(*a2), M_PFALTQ, M_NOWAIT); 1155 if (a2 == NULL) { 1156 error = ENOMEM; 1157 break; 1158 } 1159 bcopy(a1, a2, sizeof(struct pf_altq)); 1160 1161 if ((a2->qid = pf_qname2qid(a2->qname)) == 0) { 1162 error = EBUSY; 1163 free(a2, M_PFALTQ); 1164 break; 1165 } 1166 a2->altq_disc = NULL; 1167 TAILQ_FOREACH(a3, V_pf_altq_ifs_inactive, entries) { 1168 if (strncmp(a3->ifname, a2->ifname, 1169 IFNAMSIZ) == 0) { 1170 a2->altq_disc = a3->altq_disc; 1171 break; 1172 } 1173 } 1174 error = pf_altq_ifnet_event_add(ifp, remove, ticket, a2); 1175 if (error) 1176 break; 1177 1178 TAILQ_INSERT_TAIL(V_pf_altqs_inactive, a2, entries); 1179 } 1180 1181 out: 1182 if (error != 0) 1183 pf_rollback_altq(ticket); 1184 else 1185 pf_commit_altq(ticket); 1186 } 1187 #endif /* ALTQ */ 1188 1189 static struct pf_krule_global * 1190 pf_rule_tree_alloc(int flags) 1191 { 1192 struct pf_krule_global *tree; 1193 1194 tree = malloc(sizeof(struct pf_krule_global), M_TEMP, flags); 1195 if (tree == NULL) 1196 return (NULL); 1197 RB_INIT(tree); 1198 return (tree); 1199 } 1200 1201 static void 1202 pf_rule_tree_free(struct pf_krule_global *tree) 1203 { 1204 1205 free(tree, M_TEMP); 1206 } 1207 1208 static int 1209 pf_begin_rules(u_int32_t *ticket, int rs_num, const char *anchor) 1210 { 1211 struct pf_krule_global *tree; 1212 struct pf_kruleset *rs; 1213 struct pf_krule *rule; 1214 1215 PF_RULES_WASSERT(); 1216 1217 if (rs_num < 0 || rs_num >= PF_RULESET_MAX) 1218 return (EINVAL); 1219 tree = pf_rule_tree_alloc(M_NOWAIT); 1220 if (tree == NULL) 1221 return (ENOMEM); 1222 rs = pf_find_or_create_kruleset(anchor); 1223 if (rs == NULL) { 1224 free(tree, M_TEMP); 1225 return (EINVAL); 1226 } 1227 pf_rule_tree_free(rs->rules[rs_num].inactive.tree); 1228 rs->rules[rs_num].inactive.tree = tree; 1229 1230 while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) { 1231 pf_unlink_rule(rs->rules[rs_num].inactive.ptr, rule); 1232 rs->rules[rs_num].inactive.rcount--; 1233 } 1234 *ticket = ++rs->rules[rs_num].inactive.ticket; 1235 rs->rules[rs_num].inactive.open = 1; 1236 return (0); 1237 } 1238 1239 static int 1240 pf_rollback_rules(u_int32_t ticket, int rs_num, char *anchor) 1241 { 1242 struct pf_kruleset *rs; 1243 struct pf_krule *rule; 1244 1245 PF_RULES_WASSERT(); 1246 1247 if (rs_num < 0 || rs_num >= PF_RULESET_MAX) 1248 return (EINVAL); 1249 rs = pf_find_kruleset(anchor); 1250 if (rs == NULL || !rs->rules[rs_num].inactive.open || 1251 rs->rules[rs_num].inactive.ticket != ticket) 1252 return (0); 1253 while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) { 1254 pf_unlink_rule(rs->rules[rs_num].inactive.ptr, rule); 1255 rs->rules[rs_num].inactive.rcount--; 1256 } 1257 rs->rules[rs_num].inactive.open = 0; 1258 return (0); 1259 } 1260 1261 #define PF_MD5_UPD(st, elm) \ 1262 MD5Update(ctx, (u_int8_t *) &(st)->elm, sizeof((st)->elm)) 1263 1264 #define PF_MD5_UPD_STR(st, elm) \ 1265 MD5Update(ctx, (u_int8_t *) (st)->elm, strlen((st)->elm)) 1266 1267 #define PF_MD5_UPD_HTONL(st, elm, stor) do { \ 1268 (stor) = htonl((st)->elm); \ 1269 MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int32_t));\ 1270 } while (0) 1271 1272 #define PF_MD5_UPD_HTONS(st, elm, stor) do { \ 1273 (stor) = htons((st)->elm); \ 1274 MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int16_t));\ 1275 } while (0) 1276 1277 static void 1278 pf_hash_rule_addr(MD5_CTX *ctx, struct pf_rule_addr *pfr) 1279 { 1280 PF_MD5_UPD(pfr, addr.type); 1281 switch (pfr->addr.type) { 1282 case PF_ADDR_DYNIFTL: 1283 PF_MD5_UPD(pfr, addr.v.ifname); 1284 PF_MD5_UPD(pfr, addr.iflags); 1285 break; 1286 case PF_ADDR_TABLE: 1287 PF_MD5_UPD(pfr, addr.v.tblname); 1288 break; 1289 case PF_ADDR_ADDRMASK: 1290 /* XXX ignore af? */ 1291 PF_MD5_UPD(pfr, addr.v.a.addr.addr32); 1292 PF_MD5_UPD(pfr, addr.v.a.mask.addr32); 1293 break; 1294 } 1295 1296 PF_MD5_UPD(pfr, port[0]); 1297 PF_MD5_UPD(pfr, port[1]); 1298 PF_MD5_UPD(pfr, neg); 1299 PF_MD5_UPD(pfr, port_op); 1300 } 1301 1302 static void 1303 pf_hash_rule_rolling(MD5_CTX *ctx, struct pf_krule *rule) 1304 { 1305 u_int16_t x; 1306 u_int32_t y; 1307 1308 pf_hash_rule_addr(ctx, &rule->src); 1309 pf_hash_rule_addr(ctx, &rule->dst); 1310 for (int i = 0; i < PF_RULE_MAX_LABEL_COUNT; i++) 1311 PF_MD5_UPD_STR(rule, label[i]); 1312 PF_MD5_UPD_STR(rule, ifname); 1313 PF_MD5_UPD_STR(rule, match_tagname); 1314 PF_MD5_UPD_HTONS(rule, match_tag, x); /* dup? */ 1315 PF_MD5_UPD_HTONL(rule, os_fingerprint, y); 1316 PF_MD5_UPD_HTONL(rule, prob, y); 1317 PF_MD5_UPD_HTONL(rule, uid.uid[0], y); 1318 PF_MD5_UPD_HTONL(rule, uid.uid[1], y); 1319 PF_MD5_UPD(rule, uid.op); 1320 PF_MD5_UPD_HTONL(rule, gid.gid[0], y); 1321 PF_MD5_UPD_HTONL(rule, gid.gid[1], y); 1322 PF_MD5_UPD(rule, gid.op); 1323 PF_MD5_UPD_HTONL(rule, rule_flag, y); 1324 PF_MD5_UPD(rule, action); 1325 PF_MD5_UPD(rule, direction); 1326 PF_MD5_UPD(rule, af); 1327 PF_MD5_UPD(rule, quick); 1328 PF_MD5_UPD(rule, ifnot); 1329 PF_MD5_UPD(rule, match_tag_not); 1330 PF_MD5_UPD(rule, natpass); 1331 PF_MD5_UPD(rule, keep_state); 1332 PF_MD5_UPD(rule, proto); 1333 PF_MD5_UPD(rule, type); 1334 PF_MD5_UPD(rule, code); 1335 PF_MD5_UPD(rule, flags); 1336 PF_MD5_UPD(rule, flagset); 1337 PF_MD5_UPD(rule, allow_opts); 1338 PF_MD5_UPD(rule, rt); 1339 PF_MD5_UPD(rule, tos); 1340 PF_MD5_UPD(rule, scrub_flags); 1341 PF_MD5_UPD(rule, min_ttl); 1342 PF_MD5_UPD(rule, set_tos); 1343 if (rule->anchor != NULL) 1344 PF_MD5_UPD_STR(rule, anchor->path); 1345 } 1346 1347 static void 1348 pf_hash_rule(struct pf_krule *rule) 1349 { 1350 MD5_CTX ctx; 1351 1352 MD5Init(&ctx); 1353 pf_hash_rule_rolling(&ctx, rule); 1354 MD5Final(rule->md5sum, &ctx); 1355 } 1356 1357 static int 1358 pf_krule_compare(struct pf_krule *a, struct pf_krule *b) 1359 { 1360 1361 return (memcmp(a->md5sum, b->md5sum, PF_MD5_DIGEST_LENGTH)); 1362 } 1363 1364 static int 1365 pf_commit_rules(u_int32_t ticket, int rs_num, char *anchor) 1366 { 1367 struct pf_kruleset *rs; 1368 struct pf_krule *rule, **old_array, *old_rule; 1369 struct pf_krulequeue *old_rules; 1370 struct pf_krule_global *old_tree; 1371 int error; 1372 u_int32_t old_rcount; 1373 1374 PF_RULES_WASSERT(); 1375 1376 if (rs_num < 0 || rs_num >= PF_RULESET_MAX) 1377 return (EINVAL); 1378 rs = pf_find_kruleset(anchor); 1379 if (rs == NULL || !rs->rules[rs_num].inactive.open || 1380 ticket != rs->rules[rs_num].inactive.ticket) 1381 return (EBUSY); 1382 1383 /* Calculate checksum for the main ruleset */ 1384 if (rs == &pf_main_ruleset) { 1385 error = pf_setup_pfsync_matching(rs); 1386 if (error != 0) 1387 return (error); 1388 } 1389 1390 /* Swap rules, keep the old. */ 1391 old_rules = rs->rules[rs_num].active.ptr; 1392 old_rcount = rs->rules[rs_num].active.rcount; 1393 old_array = rs->rules[rs_num].active.ptr_array; 1394 old_tree = rs->rules[rs_num].active.tree; 1395 1396 rs->rules[rs_num].active.ptr = 1397 rs->rules[rs_num].inactive.ptr; 1398 rs->rules[rs_num].active.ptr_array = 1399 rs->rules[rs_num].inactive.ptr_array; 1400 rs->rules[rs_num].active.tree = 1401 rs->rules[rs_num].inactive.tree; 1402 rs->rules[rs_num].active.rcount = 1403 rs->rules[rs_num].inactive.rcount; 1404 1405 /* Attempt to preserve counter information. */ 1406 if (V_pf_status.keep_counters && old_tree != NULL) { 1407 TAILQ_FOREACH(rule, rs->rules[rs_num].active.ptr, 1408 entries) { 1409 old_rule = RB_FIND(pf_krule_global, old_tree, rule); 1410 if (old_rule == NULL) { 1411 continue; 1412 } 1413 pf_counter_u64_critical_enter(); 1414 pf_counter_u64_add_protected(&rule->evaluations, 1415 pf_counter_u64_fetch(&old_rule->evaluations)); 1416 pf_counter_u64_add_protected(&rule->packets[0], 1417 pf_counter_u64_fetch(&old_rule->packets[0])); 1418 pf_counter_u64_add_protected(&rule->packets[1], 1419 pf_counter_u64_fetch(&old_rule->packets[1])); 1420 pf_counter_u64_add_protected(&rule->bytes[0], 1421 pf_counter_u64_fetch(&old_rule->bytes[0])); 1422 pf_counter_u64_add_protected(&rule->bytes[1], 1423 pf_counter_u64_fetch(&old_rule->bytes[1])); 1424 pf_counter_u64_critical_exit(); 1425 } 1426 } 1427 1428 rs->rules[rs_num].inactive.ptr = old_rules; 1429 rs->rules[rs_num].inactive.ptr_array = old_array; 1430 rs->rules[rs_num].inactive.tree = NULL; /* important for pf_ioctl_addrule */ 1431 rs->rules[rs_num].inactive.rcount = old_rcount; 1432 1433 rs->rules[rs_num].active.ticket = 1434 rs->rules[rs_num].inactive.ticket; 1435 pf_calc_skip_steps(rs->rules[rs_num].active.ptr); 1436 1437 /* Purge the old rule list. */ 1438 PF_UNLNKDRULES_LOCK(); 1439 while ((rule = TAILQ_FIRST(old_rules)) != NULL) 1440 pf_unlink_rule_locked(old_rules, rule); 1441 PF_UNLNKDRULES_UNLOCK(); 1442 if (rs->rules[rs_num].inactive.ptr_array) 1443 free(rs->rules[rs_num].inactive.ptr_array, M_TEMP); 1444 rs->rules[rs_num].inactive.ptr_array = NULL; 1445 rs->rules[rs_num].inactive.rcount = 0; 1446 rs->rules[rs_num].inactive.open = 0; 1447 pf_remove_if_empty_kruleset(rs); 1448 free(old_tree, M_TEMP); 1449 1450 return (0); 1451 } 1452 1453 static int 1454 pf_setup_pfsync_matching(struct pf_kruleset *rs) 1455 { 1456 MD5_CTX ctx; 1457 struct pf_krule *rule; 1458 int rs_cnt; 1459 u_int8_t digest[PF_MD5_DIGEST_LENGTH]; 1460 1461 MD5Init(&ctx); 1462 for (rs_cnt = 0; rs_cnt < PF_RULESET_MAX; rs_cnt++) { 1463 /* XXX PF_RULESET_SCRUB as well? */ 1464 if (rs_cnt == PF_RULESET_SCRUB) 1465 continue; 1466 1467 if (rs->rules[rs_cnt].inactive.ptr_array) 1468 free(rs->rules[rs_cnt].inactive.ptr_array, M_TEMP); 1469 rs->rules[rs_cnt].inactive.ptr_array = NULL; 1470 1471 if (rs->rules[rs_cnt].inactive.rcount) { 1472 rs->rules[rs_cnt].inactive.ptr_array = 1473 mallocarray(rs->rules[rs_cnt].inactive.rcount, 1474 sizeof(struct pf_rule **), 1475 M_TEMP, M_NOWAIT); 1476 1477 if (!rs->rules[rs_cnt].inactive.ptr_array) 1478 return (ENOMEM); 1479 } 1480 1481 TAILQ_FOREACH(rule, rs->rules[rs_cnt].inactive.ptr, 1482 entries) { 1483 pf_hash_rule_rolling(&ctx, rule); 1484 (rs->rules[rs_cnt].inactive.ptr_array)[rule->nr] = rule; 1485 } 1486 } 1487 1488 MD5Final(digest, &ctx); 1489 memcpy(V_pf_status.pf_chksum, digest, sizeof(V_pf_status.pf_chksum)); 1490 return (0); 1491 } 1492 1493 static int 1494 pf_eth_addr_setup(struct pf_keth_ruleset *ruleset, struct pf_addr_wrap *addr) 1495 { 1496 int error = 0; 1497 1498 switch (addr->type) { 1499 case PF_ADDR_TABLE: 1500 addr->p.tbl = pfr_eth_attach_table(ruleset, addr->v.tblname); 1501 if (addr->p.tbl == NULL) 1502 error = ENOMEM; 1503 break; 1504 default: 1505 error = EINVAL; 1506 } 1507 1508 return (error); 1509 } 1510 1511 static int 1512 pf_addr_setup(struct pf_kruleset *ruleset, struct pf_addr_wrap *addr, 1513 sa_family_t af) 1514 { 1515 int error = 0; 1516 1517 switch (addr->type) { 1518 case PF_ADDR_TABLE: 1519 addr->p.tbl = pfr_attach_table(ruleset, addr->v.tblname); 1520 if (addr->p.tbl == NULL) 1521 error = ENOMEM; 1522 break; 1523 case PF_ADDR_DYNIFTL: 1524 error = pfi_dynaddr_setup(addr, af); 1525 break; 1526 } 1527 1528 return (error); 1529 } 1530 1531 static void 1532 pf_addr_copyout(struct pf_addr_wrap *addr) 1533 { 1534 1535 switch (addr->type) { 1536 case PF_ADDR_DYNIFTL: 1537 pfi_dynaddr_copyout(addr); 1538 break; 1539 case PF_ADDR_TABLE: 1540 pf_tbladdr_copyout(addr); 1541 break; 1542 } 1543 } 1544 1545 static void 1546 pf_src_node_copy(const struct pf_ksrc_node *in, struct pf_src_node *out) 1547 { 1548 int secs = time_uptime, diff; 1549 1550 bzero(out, sizeof(struct pf_src_node)); 1551 1552 bcopy(&in->addr, &out->addr, sizeof(struct pf_addr)); 1553 bcopy(&in->raddr, &out->raddr, sizeof(struct pf_addr)); 1554 1555 if (in->rule.ptr != NULL) 1556 out->rule.nr = in->rule.ptr->nr; 1557 1558 for (int i = 0; i < 2; i++) { 1559 out->bytes[i] = counter_u64_fetch(in->bytes[i]); 1560 out->packets[i] = counter_u64_fetch(in->packets[i]); 1561 } 1562 1563 out->states = in->states; 1564 out->conn = in->conn; 1565 out->af = in->af; 1566 out->ruletype = in->ruletype; 1567 1568 out->creation = secs - in->creation; 1569 if (out->expire > secs) 1570 out->expire -= secs; 1571 else 1572 out->expire = 0; 1573 1574 /* Adjust the connection rate estimate. */ 1575 diff = secs - in->conn_rate.last; 1576 if (diff >= in->conn_rate.seconds) 1577 out->conn_rate.count = 0; 1578 else 1579 out->conn_rate.count -= 1580 in->conn_rate.count * diff / 1581 in->conn_rate.seconds; 1582 } 1583 1584 #ifdef ALTQ 1585 /* 1586 * Handle export of struct pf_kaltq to user binaries that may be using any 1587 * version of struct pf_altq. 1588 */ 1589 static int 1590 pf_export_kaltq(struct pf_altq *q, struct pfioc_altq_v1 *pa, size_t ioc_size) 1591 { 1592 u_int32_t version; 1593 1594 if (ioc_size == sizeof(struct pfioc_altq_v0)) 1595 version = 0; 1596 else 1597 version = pa->version; 1598 1599 if (version > PFIOC_ALTQ_VERSION) 1600 return (EINVAL); 1601 1602 #define ASSIGN(x) exported_q->x = q->x 1603 #define COPY(x) \ 1604 bcopy(&q->x, &exported_q->x, min(sizeof(q->x), sizeof(exported_q->x))) 1605 #define SATU16(x) (u_int32_t)uqmin((x), USHRT_MAX) 1606 #define SATU32(x) (u_int32_t)uqmin((x), UINT_MAX) 1607 1608 switch (version) { 1609 case 0: { 1610 struct pf_altq_v0 *exported_q = 1611 &((struct pfioc_altq_v0 *)pa)->altq; 1612 1613 COPY(ifname); 1614 1615 ASSIGN(scheduler); 1616 ASSIGN(tbrsize); 1617 exported_q->tbrsize = SATU16(q->tbrsize); 1618 exported_q->ifbandwidth = SATU32(q->ifbandwidth); 1619 1620 COPY(qname); 1621 COPY(parent); 1622 ASSIGN(parent_qid); 1623 exported_q->bandwidth = SATU32(q->bandwidth); 1624 ASSIGN(priority); 1625 ASSIGN(local_flags); 1626 1627 ASSIGN(qlimit); 1628 ASSIGN(flags); 1629 1630 if (q->scheduler == ALTQT_HFSC) { 1631 #define ASSIGN_OPT(x) exported_q->pq_u.hfsc_opts.x = q->pq_u.hfsc_opts.x 1632 #define ASSIGN_OPT_SATU32(x) exported_q->pq_u.hfsc_opts.x = \ 1633 SATU32(q->pq_u.hfsc_opts.x) 1634 1635 ASSIGN_OPT_SATU32(rtsc_m1); 1636 ASSIGN_OPT(rtsc_d); 1637 ASSIGN_OPT_SATU32(rtsc_m2); 1638 1639 ASSIGN_OPT_SATU32(lssc_m1); 1640 ASSIGN_OPT(lssc_d); 1641 ASSIGN_OPT_SATU32(lssc_m2); 1642 1643 ASSIGN_OPT_SATU32(ulsc_m1); 1644 ASSIGN_OPT(ulsc_d); 1645 ASSIGN_OPT_SATU32(ulsc_m2); 1646 1647 ASSIGN_OPT(flags); 1648 1649 #undef ASSIGN_OPT 1650 #undef ASSIGN_OPT_SATU32 1651 } else 1652 COPY(pq_u); 1653 1654 ASSIGN(qid); 1655 break; 1656 } 1657 case 1: { 1658 struct pf_altq_v1 *exported_q = 1659 &((struct pfioc_altq_v1 *)pa)->altq; 1660 1661 COPY(ifname); 1662 1663 ASSIGN(scheduler); 1664 ASSIGN(tbrsize); 1665 ASSIGN(ifbandwidth); 1666 1667 COPY(qname); 1668 COPY(parent); 1669 ASSIGN(parent_qid); 1670 ASSIGN(bandwidth); 1671 ASSIGN(priority); 1672 ASSIGN(local_flags); 1673 1674 ASSIGN(qlimit); 1675 ASSIGN(flags); 1676 COPY(pq_u); 1677 1678 ASSIGN(qid); 1679 break; 1680 } 1681 default: 1682 panic("%s: unhandled struct pfioc_altq version", __func__); 1683 break; 1684 } 1685 1686 #undef ASSIGN 1687 #undef COPY 1688 #undef SATU16 1689 #undef SATU32 1690 1691 return (0); 1692 } 1693 1694 /* 1695 * Handle import to struct pf_kaltq of struct pf_altq from user binaries 1696 * that may be using any version of it. 1697 */ 1698 static int 1699 pf_import_kaltq(struct pfioc_altq_v1 *pa, struct pf_altq *q, size_t ioc_size) 1700 { 1701 u_int32_t version; 1702 1703 if (ioc_size == sizeof(struct pfioc_altq_v0)) 1704 version = 0; 1705 else 1706 version = pa->version; 1707 1708 if (version > PFIOC_ALTQ_VERSION) 1709 return (EINVAL); 1710 1711 #define ASSIGN(x) q->x = imported_q->x 1712 #define COPY(x) \ 1713 bcopy(&imported_q->x, &q->x, min(sizeof(imported_q->x), sizeof(q->x))) 1714 1715 switch (version) { 1716 case 0: { 1717 struct pf_altq_v0 *imported_q = 1718 &((struct pfioc_altq_v0 *)pa)->altq; 1719 1720 COPY(ifname); 1721 1722 ASSIGN(scheduler); 1723 ASSIGN(tbrsize); /* 16-bit -> 32-bit */ 1724 ASSIGN(ifbandwidth); /* 32-bit -> 64-bit */ 1725 1726 COPY(qname); 1727 COPY(parent); 1728 ASSIGN(parent_qid); 1729 ASSIGN(bandwidth); /* 32-bit -> 64-bit */ 1730 ASSIGN(priority); 1731 ASSIGN(local_flags); 1732 1733 ASSIGN(qlimit); 1734 ASSIGN(flags); 1735 1736 if (imported_q->scheduler == ALTQT_HFSC) { 1737 #define ASSIGN_OPT(x) q->pq_u.hfsc_opts.x = imported_q->pq_u.hfsc_opts.x 1738 1739 /* 1740 * The m1 and m2 parameters are being copied from 1741 * 32-bit to 64-bit. 1742 */ 1743 ASSIGN_OPT(rtsc_m1); 1744 ASSIGN_OPT(rtsc_d); 1745 ASSIGN_OPT(rtsc_m2); 1746 1747 ASSIGN_OPT(lssc_m1); 1748 ASSIGN_OPT(lssc_d); 1749 ASSIGN_OPT(lssc_m2); 1750 1751 ASSIGN_OPT(ulsc_m1); 1752 ASSIGN_OPT(ulsc_d); 1753 ASSIGN_OPT(ulsc_m2); 1754 1755 ASSIGN_OPT(flags); 1756 1757 #undef ASSIGN_OPT 1758 } else 1759 COPY(pq_u); 1760 1761 ASSIGN(qid); 1762 break; 1763 } 1764 case 1: { 1765 struct pf_altq_v1 *imported_q = 1766 &((struct pfioc_altq_v1 *)pa)->altq; 1767 1768 COPY(ifname); 1769 1770 ASSIGN(scheduler); 1771 ASSIGN(tbrsize); 1772 ASSIGN(ifbandwidth); 1773 1774 COPY(qname); 1775 COPY(parent); 1776 ASSIGN(parent_qid); 1777 ASSIGN(bandwidth); 1778 ASSIGN(priority); 1779 ASSIGN(local_flags); 1780 1781 ASSIGN(qlimit); 1782 ASSIGN(flags); 1783 COPY(pq_u); 1784 1785 ASSIGN(qid); 1786 break; 1787 } 1788 default: 1789 panic("%s: unhandled struct pfioc_altq version", __func__); 1790 break; 1791 } 1792 1793 #undef ASSIGN 1794 #undef COPY 1795 1796 return (0); 1797 } 1798 1799 static struct pf_altq * 1800 pf_altq_get_nth_active(u_int32_t n) 1801 { 1802 struct pf_altq *altq; 1803 u_int32_t nr; 1804 1805 nr = 0; 1806 TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) { 1807 if (nr == n) 1808 return (altq); 1809 nr++; 1810 } 1811 1812 TAILQ_FOREACH(altq, V_pf_altqs_active, entries) { 1813 if (nr == n) 1814 return (altq); 1815 nr++; 1816 } 1817 1818 return (NULL); 1819 } 1820 #endif /* ALTQ */ 1821 1822 struct pf_krule * 1823 pf_krule_alloc(void) 1824 { 1825 struct pf_krule *rule; 1826 1827 rule = malloc(sizeof(struct pf_krule), M_PFRULE, M_WAITOK | M_ZERO); 1828 mtx_init(&rule->rpool.mtx, "pf_krule_pool", NULL, MTX_DEF); 1829 rule->timestamp = uma_zalloc_pcpu(pf_timestamp_pcpu_zone, 1830 M_WAITOK | M_ZERO); 1831 return (rule); 1832 } 1833 1834 void 1835 pf_krule_free(struct pf_krule *rule) 1836 { 1837 #ifdef PF_WANT_32_TO_64_COUNTER 1838 bool wowned; 1839 #endif 1840 1841 if (rule == NULL) 1842 return; 1843 1844 #ifdef PF_WANT_32_TO_64_COUNTER 1845 if (rule->allrulelinked) { 1846 wowned = PF_RULES_WOWNED(); 1847 if (!wowned) 1848 PF_RULES_WLOCK(); 1849 LIST_REMOVE(rule, allrulelist); 1850 V_pf_allrulecount--; 1851 if (!wowned) 1852 PF_RULES_WUNLOCK(); 1853 } 1854 #endif 1855 1856 pf_counter_u64_deinit(&rule->evaluations); 1857 for (int i = 0; i < 2; i++) { 1858 pf_counter_u64_deinit(&rule->packets[i]); 1859 pf_counter_u64_deinit(&rule->bytes[i]); 1860 } 1861 counter_u64_free(rule->states_cur); 1862 counter_u64_free(rule->states_tot); 1863 counter_u64_free(rule->src_nodes); 1864 uma_zfree_pcpu(pf_timestamp_pcpu_zone, rule->timestamp); 1865 1866 mtx_destroy(&rule->rpool.mtx); 1867 free(rule, M_PFRULE); 1868 } 1869 1870 void 1871 pf_krule_clear_counters(struct pf_krule *rule) 1872 { 1873 pf_counter_u64_zero(&rule->evaluations); 1874 for (int i = 0; i < 2; i++) { 1875 pf_counter_u64_zero(&rule->packets[i]); 1876 pf_counter_u64_zero(&rule->bytes[i]); 1877 } 1878 counter_u64_zero(rule->states_tot); 1879 } 1880 1881 static void 1882 pf_kpooladdr_to_pooladdr(const struct pf_kpooladdr *kpool, 1883 struct pf_pooladdr *pool) 1884 { 1885 1886 bzero(pool, sizeof(*pool)); 1887 bcopy(&kpool->addr, &pool->addr, sizeof(pool->addr)); 1888 strlcpy(pool->ifname, kpool->ifname, sizeof(pool->ifname)); 1889 } 1890 1891 static int 1892 pf_pooladdr_to_kpooladdr(const struct pf_pooladdr *pool, 1893 struct pf_kpooladdr *kpool) 1894 { 1895 int ret; 1896 1897 bzero(kpool, sizeof(*kpool)); 1898 bcopy(&pool->addr, &kpool->addr, sizeof(kpool->addr)); 1899 ret = pf_user_strcpy(kpool->ifname, pool->ifname, 1900 sizeof(kpool->ifname)); 1901 return (ret); 1902 } 1903 1904 static void 1905 pf_pool_to_kpool(const struct pf_pool *pool, struct pf_kpool *kpool) 1906 { 1907 _Static_assert(sizeof(pool->key) == sizeof(kpool->key), ""); 1908 _Static_assert(sizeof(pool->counter) == sizeof(kpool->counter), ""); 1909 1910 bcopy(&pool->key, &kpool->key, sizeof(kpool->key)); 1911 bcopy(&pool->counter, &kpool->counter, sizeof(kpool->counter)); 1912 1913 kpool->tblidx = pool->tblidx; 1914 kpool->proxy_port[0] = pool->proxy_port[0]; 1915 kpool->proxy_port[1] = pool->proxy_port[1]; 1916 kpool->opts = pool->opts; 1917 } 1918 1919 static int 1920 pf_rule_to_krule(const struct pf_rule *rule, struct pf_krule *krule) 1921 { 1922 int ret; 1923 1924 #ifndef INET 1925 if (rule->af == AF_INET) { 1926 return (EAFNOSUPPORT); 1927 } 1928 #endif /* INET */ 1929 #ifndef INET6 1930 if (rule->af == AF_INET6) { 1931 return (EAFNOSUPPORT); 1932 } 1933 #endif /* INET6 */ 1934 1935 ret = pf_check_rule_addr(&rule->src); 1936 if (ret != 0) 1937 return (ret); 1938 ret = pf_check_rule_addr(&rule->dst); 1939 if (ret != 0) 1940 return (ret); 1941 1942 bcopy(&rule->src, &krule->src, sizeof(rule->src)); 1943 bcopy(&rule->dst, &krule->dst, sizeof(rule->dst)); 1944 1945 ret = pf_user_strcpy(krule->label[0], rule->label, sizeof(rule->label)); 1946 if (ret != 0) 1947 return (ret); 1948 ret = pf_user_strcpy(krule->ifname, rule->ifname, sizeof(rule->ifname)); 1949 if (ret != 0) 1950 return (ret); 1951 ret = pf_user_strcpy(krule->qname, rule->qname, sizeof(rule->qname)); 1952 if (ret != 0) 1953 return (ret); 1954 ret = pf_user_strcpy(krule->pqname, rule->pqname, sizeof(rule->pqname)); 1955 if (ret != 0) 1956 return (ret); 1957 ret = pf_user_strcpy(krule->tagname, rule->tagname, 1958 sizeof(rule->tagname)); 1959 if (ret != 0) 1960 return (ret); 1961 ret = pf_user_strcpy(krule->match_tagname, rule->match_tagname, 1962 sizeof(rule->match_tagname)); 1963 if (ret != 0) 1964 return (ret); 1965 ret = pf_user_strcpy(krule->overload_tblname, rule->overload_tblname, 1966 sizeof(rule->overload_tblname)); 1967 if (ret != 0) 1968 return (ret); 1969 1970 pf_pool_to_kpool(&rule->rpool, &krule->rpool); 1971 1972 /* Don't allow userspace to set evaluations, packets or bytes. */ 1973 /* kif, anchor, overload_tbl are not copied over. */ 1974 1975 krule->os_fingerprint = rule->os_fingerprint; 1976 1977 krule->rtableid = rule->rtableid; 1978 bcopy(rule->timeout, krule->timeout, sizeof(krule->timeout)); 1979 krule->max_states = rule->max_states; 1980 krule->max_src_nodes = rule->max_src_nodes; 1981 krule->max_src_states = rule->max_src_states; 1982 krule->max_src_conn = rule->max_src_conn; 1983 krule->max_src_conn_rate.limit = rule->max_src_conn_rate.limit; 1984 krule->max_src_conn_rate.seconds = rule->max_src_conn_rate.seconds; 1985 krule->qid = rule->qid; 1986 krule->pqid = rule->pqid; 1987 krule->nr = rule->nr; 1988 krule->prob = rule->prob; 1989 krule->cuid = rule->cuid; 1990 krule->cpid = rule->cpid; 1991 1992 krule->return_icmp = rule->return_icmp; 1993 krule->return_icmp6 = rule->return_icmp6; 1994 krule->max_mss = rule->max_mss; 1995 krule->tag = rule->tag; 1996 krule->match_tag = rule->match_tag; 1997 krule->scrub_flags = rule->scrub_flags; 1998 1999 bcopy(&rule->uid, &krule->uid, sizeof(krule->uid)); 2000 bcopy(&rule->gid, &krule->gid, sizeof(krule->gid)); 2001 2002 krule->rule_flag = rule->rule_flag; 2003 krule->action = rule->action; 2004 krule->direction = rule->direction; 2005 krule->log = rule->log; 2006 krule->logif = rule->logif; 2007 krule->quick = rule->quick; 2008 krule->ifnot = rule->ifnot; 2009 krule->match_tag_not = rule->match_tag_not; 2010 krule->natpass = rule->natpass; 2011 2012 krule->keep_state = rule->keep_state; 2013 krule->af = rule->af; 2014 krule->proto = rule->proto; 2015 krule->type = rule->type; 2016 krule->code = rule->code; 2017 krule->flags = rule->flags; 2018 krule->flagset = rule->flagset; 2019 krule->min_ttl = rule->min_ttl; 2020 krule->allow_opts = rule->allow_opts; 2021 krule->rt = rule->rt; 2022 krule->return_ttl = rule->return_ttl; 2023 krule->tos = rule->tos; 2024 krule->set_tos = rule->set_tos; 2025 2026 krule->flush = rule->flush; 2027 krule->prio = rule->prio; 2028 krule->set_prio[0] = rule->set_prio[0]; 2029 krule->set_prio[1] = rule->set_prio[1]; 2030 2031 bcopy(&rule->divert, &krule->divert, sizeof(krule->divert)); 2032 2033 return (0); 2034 } 2035 2036 int 2037 pf_ioctl_getrules(struct pfioc_rule *pr) 2038 { 2039 struct pf_kruleset *ruleset; 2040 struct pf_krule *tail; 2041 int rs_num; 2042 2043 PF_RULES_WLOCK(); 2044 ruleset = pf_find_kruleset(pr->anchor); 2045 if (ruleset == NULL) { 2046 PF_RULES_WUNLOCK(); 2047 return (EINVAL); 2048 } 2049 rs_num = pf_get_ruleset_number(pr->rule.action); 2050 if (rs_num >= PF_RULESET_MAX) { 2051 PF_RULES_WUNLOCK(); 2052 return (EINVAL); 2053 } 2054 tail = TAILQ_LAST(ruleset->rules[rs_num].active.ptr, 2055 pf_krulequeue); 2056 if (tail) 2057 pr->nr = tail->nr + 1; 2058 else 2059 pr->nr = 0; 2060 pr->ticket = ruleset->rules[rs_num].active.ticket; 2061 PF_RULES_WUNLOCK(); 2062 2063 return (0); 2064 } 2065 2066 int 2067 pf_ioctl_addrule(struct pf_krule *rule, uint32_t ticket, 2068 uint32_t pool_ticket, const char *anchor, const char *anchor_call, 2069 uid_t uid, pid_t pid) 2070 { 2071 struct pf_kruleset *ruleset; 2072 struct pf_krule *tail; 2073 struct pf_kpooladdr *pa; 2074 struct pfi_kkif *kif = NULL; 2075 int rs_num; 2076 int error = 0; 2077 2078 if ((rule->return_icmp >> 8) > ICMP_MAXTYPE) { 2079 error = EINVAL; 2080 goto errout_unlocked; 2081 } 2082 2083 #define ERROUT(x) ERROUT_FUNCTION(errout, x) 2084 2085 if (rule->ifname[0]) 2086 kif = pf_kkif_create(M_WAITOK); 2087 pf_counter_u64_init(&rule->evaluations, M_WAITOK); 2088 for (int i = 0; i < 2; i++) { 2089 pf_counter_u64_init(&rule->packets[i], M_WAITOK); 2090 pf_counter_u64_init(&rule->bytes[i], M_WAITOK); 2091 } 2092 rule->states_cur = counter_u64_alloc(M_WAITOK); 2093 rule->states_tot = counter_u64_alloc(M_WAITOK); 2094 rule->src_nodes = counter_u64_alloc(M_WAITOK); 2095 rule->cuid = uid; 2096 rule->cpid = pid; 2097 TAILQ_INIT(&rule->rpool.list); 2098 2099 PF_CONFIG_LOCK(); 2100 PF_RULES_WLOCK(); 2101 #ifdef PF_WANT_32_TO_64_COUNTER 2102 LIST_INSERT_HEAD(&V_pf_allrulelist, rule, allrulelist); 2103 MPASS(!rule->allrulelinked); 2104 rule->allrulelinked = true; 2105 V_pf_allrulecount++; 2106 #endif 2107 ruleset = pf_find_kruleset(anchor); 2108 if (ruleset == NULL) 2109 ERROUT(EINVAL); 2110 rs_num = pf_get_ruleset_number(rule->action); 2111 if (rs_num >= PF_RULESET_MAX) 2112 ERROUT(EINVAL); 2113 if (ticket != ruleset->rules[rs_num].inactive.ticket) { 2114 DPFPRINTF(PF_DEBUG_MISC, 2115 ("ticket: %d != [%d]%d\n", ticket, rs_num, 2116 ruleset->rules[rs_num].inactive.ticket)); 2117 ERROUT(EBUSY); 2118 } 2119 if (pool_ticket != V_ticket_pabuf) { 2120 DPFPRINTF(PF_DEBUG_MISC, 2121 ("pool_ticket: %d != %d\n", pool_ticket, 2122 V_ticket_pabuf)); 2123 ERROUT(EBUSY); 2124 } 2125 /* 2126 * XXXMJG hack: there is no mechanism to ensure they started the 2127 * transaction. Ticket checked above may happen to match by accident, 2128 * even if nobody called DIOCXBEGIN, let alone this process. 2129 * Partially work around it by checking if the RB tree got allocated, 2130 * see pf_begin_rules. 2131 */ 2132 if (ruleset->rules[rs_num].inactive.tree == NULL) { 2133 ERROUT(EINVAL); 2134 } 2135 2136 tail = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr, 2137 pf_krulequeue); 2138 if (tail) 2139 rule->nr = tail->nr + 1; 2140 else 2141 rule->nr = 0; 2142 if (rule->ifname[0]) { 2143 rule->kif = pfi_kkif_attach(kif, rule->ifname); 2144 kif = NULL; 2145 pfi_kkif_ref(rule->kif); 2146 } else 2147 rule->kif = NULL; 2148 2149 if (rule->rtableid > 0 && rule->rtableid >= rt_numfibs) 2150 error = EBUSY; 2151 2152 #ifdef ALTQ 2153 /* set queue IDs */ 2154 if (rule->qname[0] != 0) { 2155 if ((rule->qid = pf_qname2qid(rule->qname)) == 0) 2156 error = EBUSY; 2157 else if (rule->pqname[0] != 0) { 2158 if ((rule->pqid = 2159 pf_qname2qid(rule->pqname)) == 0) 2160 error = EBUSY; 2161 } else 2162 rule->pqid = rule->qid; 2163 } 2164 #endif 2165 if (rule->tagname[0]) 2166 if ((rule->tag = pf_tagname2tag(rule->tagname)) == 0) 2167 error = EBUSY; 2168 if (rule->match_tagname[0]) 2169 if ((rule->match_tag = 2170 pf_tagname2tag(rule->match_tagname)) == 0) 2171 error = EBUSY; 2172 if (rule->rt && !rule->direction) 2173 error = EINVAL; 2174 if (!rule->log) 2175 rule->logif = 0; 2176 if (rule->logif >= PFLOGIFS_MAX) 2177 error = EINVAL; 2178 if (pf_addr_setup(ruleset, &rule->src.addr, rule->af)) 2179 error = ENOMEM; 2180 if (pf_addr_setup(ruleset, &rule->dst.addr, rule->af)) 2181 error = ENOMEM; 2182 if (pf_kanchor_setup(rule, ruleset, anchor_call)) 2183 error = EINVAL; 2184 if (rule->scrub_flags & PFSTATE_SETPRIO && 2185 (rule->set_prio[0] > PF_PRIO_MAX || 2186 rule->set_prio[1] > PF_PRIO_MAX)) 2187 error = EINVAL; 2188 TAILQ_FOREACH(pa, &V_pf_pabuf, entries) 2189 if (pa->addr.type == PF_ADDR_TABLE) { 2190 pa->addr.p.tbl = pfr_attach_table(ruleset, 2191 pa->addr.v.tblname); 2192 if (pa->addr.p.tbl == NULL) 2193 error = ENOMEM; 2194 } 2195 2196 rule->overload_tbl = NULL; 2197 if (rule->overload_tblname[0]) { 2198 if ((rule->overload_tbl = pfr_attach_table(ruleset, 2199 rule->overload_tblname)) == NULL) 2200 error = EINVAL; 2201 else 2202 rule->overload_tbl->pfrkt_flags |= 2203 PFR_TFLAG_ACTIVE; 2204 } 2205 2206 pf_mv_kpool(&V_pf_pabuf, &rule->rpool.list); 2207 if (((((rule->action == PF_NAT) || (rule->action == PF_RDR) || 2208 (rule->action == PF_BINAT)) && rule->anchor == NULL) || 2209 (rule->rt > PF_NOPFROUTE)) && 2210 (TAILQ_FIRST(&rule->rpool.list) == NULL)) 2211 error = EINVAL; 2212 2213 if (error) { 2214 pf_free_rule(rule); 2215 rule = NULL; 2216 ERROUT(error); 2217 } 2218 2219 rule->rpool.cur = TAILQ_FIRST(&rule->rpool.list); 2220 TAILQ_INSERT_TAIL(ruleset->rules[rs_num].inactive.ptr, 2221 rule, entries); 2222 ruleset->rules[rs_num].inactive.rcount++; 2223 2224 PF_RULES_WUNLOCK(); 2225 pf_hash_rule(rule); 2226 if (RB_INSERT(pf_krule_global, ruleset->rules[rs_num].inactive.tree, rule) != NULL) { 2227 PF_RULES_WLOCK(); 2228 TAILQ_REMOVE(ruleset->rules[rs_num].inactive.ptr, rule, entries); 2229 ruleset->rules[rs_num].inactive.rcount--; 2230 pf_free_rule(rule); 2231 rule = NULL; 2232 ERROUT(EEXIST); 2233 } 2234 PF_CONFIG_UNLOCK(); 2235 2236 return (0); 2237 2238 #undef ERROUT 2239 errout: 2240 PF_RULES_WUNLOCK(); 2241 PF_CONFIG_UNLOCK(); 2242 errout_unlocked: 2243 pf_kkif_free(kif); 2244 pf_krule_free(rule); 2245 return (error); 2246 } 2247 2248 static bool 2249 pf_label_match(const struct pf_krule *rule, const char *label) 2250 { 2251 int i = 0; 2252 2253 while (*rule->label[i]) { 2254 if (strcmp(rule->label[i], label) == 0) 2255 return (true); 2256 i++; 2257 } 2258 2259 return (false); 2260 } 2261 2262 static unsigned int 2263 pf_kill_matching_state(struct pf_state_key_cmp *key, int dir) 2264 { 2265 struct pf_kstate *s; 2266 int more = 0; 2267 2268 s = pf_find_state_all(key, dir, &more); 2269 if (s == NULL) 2270 return (0); 2271 2272 if (more) { 2273 PF_STATE_UNLOCK(s); 2274 return (0); 2275 } 2276 2277 pf_unlink_state(s); 2278 return (1); 2279 } 2280 2281 static int 2282 pf_killstates_row(struct pf_kstate_kill *psk, struct pf_idhash *ih) 2283 { 2284 struct pf_kstate *s; 2285 struct pf_state_key *sk; 2286 struct pf_addr *srcaddr, *dstaddr; 2287 struct pf_state_key_cmp match_key; 2288 int idx, killed = 0; 2289 unsigned int dir; 2290 u_int16_t srcport, dstport; 2291 struct pfi_kkif *kif; 2292 2293 relock_DIOCKILLSTATES: 2294 PF_HASHROW_LOCK(ih); 2295 LIST_FOREACH(s, &ih->states, entry) { 2296 /* For floating states look at the original kif. */ 2297 kif = s->kif == V_pfi_all ? s->orig_kif : s->kif; 2298 2299 sk = s->key[psk->psk_nat ? PF_SK_STACK : PF_SK_WIRE]; 2300 if (s->direction == PF_OUT) { 2301 srcaddr = &sk->addr[1]; 2302 dstaddr = &sk->addr[0]; 2303 srcport = sk->port[1]; 2304 dstport = sk->port[0]; 2305 } else { 2306 srcaddr = &sk->addr[0]; 2307 dstaddr = &sk->addr[1]; 2308 srcport = sk->port[0]; 2309 dstport = sk->port[1]; 2310 } 2311 2312 if (psk->psk_af && sk->af != psk->psk_af) 2313 continue; 2314 2315 if (psk->psk_proto && psk->psk_proto != sk->proto) 2316 continue; 2317 2318 if (! PF_MATCHA(psk->psk_src.neg, &psk->psk_src.addr.v.a.addr, 2319 &psk->psk_src.addr.v.a.mask, srcaddr, sk->af)) 2320 continue; 2321 2322 if (! PF_MATCHA(psk->psk_dst.neg, &psk->psk_dst.addr.v.a.addr, 2323 &psk->psk_dst.addr.v.a.mask, dstaddr, sk->af)) 2324 continue; 2325 2326 if (! PF_MATCHA(psk->psk_rt_addr.neg, 2327 &psk->psk_rt_addr.addr.v.a.addr, 2328 &psk->psk_rt_addr.addr.v.a.mask, 2329 &s->rt_addr, sk->af)) 2330 continue; 2331 2332 if (psk->psk_src.port_op != 0 && 2333 ! pf_match_port(psk->psk_src.port_op, 2334 psk->psk_src.port[0], psk->psk_src.port[1], srcport)) 2335 continue; 2336 2337 if (psk->psk_dst.port_op != 0 && 2338 ! pf_match_port(psk->psk_dst.port_op, 2339 psk->psk_dst.port[0], psk->psk_dst.port[1], dstport)) 2340 continue; 2341 2342 if (psk->psk_label[0] && 2343 ! pf_label_match(s->rule.ptr, psk->psk_label)) 2344 continue; 2345 2346 if (psk->psk_ifname[0] && strcmp(psk->psk_ifname, 2347 kif->pfik_name)) 2348 continue; 2349 2350 if (psk->psk_kill_match) { 2351 /* Create the key to find matching states, with lock 2352 * held. */ 2353 2354 bzero(&match_key, sizeof(match_key)); 2355 2356 if (s->direction == PF_OUT) { 2357 dir = PF_IN; 2358 idx = psk->psk_nat ? PF_SK_WIRE : PF_SK_STACK; 2359 } else { 2360 dir = PF_OUT; 2361 idx = psk->psk_nat ? PF_SK_STACK : PF_SK_WIRE; 2362 } 2363 2364 match_key.af = s->key[idx]->af; 2365 match_key.proto = s->key[idx]->proto; 2366 PF_ACPY(&match_key.addr[0], 2367 &s->key[idx]->addr[1], match_key.af); 2368 match_key.port[0] = s->key[idx]->port[1]; 2369 PF_ACPY(&match_key.addr[1], 2370 &s->key[idx]->addr[0], match_key.af); 2371 match_key.port[1] = s->key[idx]->port[0]; 2372 } 2373 2374 pf_unlink_state(s); 2375 killed++; 2376 2377 if (psk->psk_kill_match) 2378 killed += pf_kill_matching_state(&match_key, dir); 2379 2380 goto relock_DIOCKILLSTATES; 2381 } 2382 PF_HASHROW_UNLOCK(ih); 2383 2384 return (killed); 2385 } 2386 2387 int 2388 pf_start(void) 2389 { 2390 int error = 0; 2391 2392 sx_xlock(&V_pf_ioctl_lock); 2393 if (V_pf_status.running) 2394 error = EEXIST; 2395 else { 2396 hook_pf(); 2397 if (! TAILQ_EMPTY(V_pf_keth->active.rules)) 2398 hook_pf_eth(); 2399 V_pf_status.running = 1; 2400 V_pf_status.since = time_second; 2401 new_unrhdr64(&V_pf_stateid, time_second); 2402 2403 DPFPRINTF(PF_DEBUG_MISC, ("pf: started\n")); 2404 } 2405 sx_xunlock(&V_pf_ioctl_lock); 2406 2407 return (error); 2408 } 2409 2410 int 2411 pf_stop(void) 2412 { 2413 int error = 0; 2414 2415 sx_xlock(&V_pf_ioctl_lock); 2416 if (!V_pf_status.running) 2417 error = ENOENT; 2418 else { 2419 V_pf_status.running = 0; 2420 dehook_pf(); 2421 dehook_pf_eth(); 2422 V_pf_status.since = time_second; 2423 DPFPRINTF(PF_DEBUG_MISC, ("pf: stopped\n")); 2424 } 2425 sx_xunlock(&V_pf_ioctl_lock); 2426 2427 return (error); 2428 } 2429 2430 static int 2431 pfioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td) 2432 { 2433 int error = 0; 2434 PF_RULES_RLOCK_TRACKER; 2435 2436 #define ERROUT_IOCTL(target, x) \ 2437 do { \ 2438 error = (x); \ 2439 SDT_PROBE3(pf, ioctl, ioctl, error, cmd, error, __LINE__); \ 2440 goto target; \ 2441 } while (0) 2442 2443 2444 /* XXX keep in sync with switch() below */ 2445 if (securelevel_gt(td->td_ucred, 2)) 2446 switch (cmd) { 2447 case DIOCGETRULES: 2448 case DIOCGETRULENV: 2449 case DIOCGETADDRS: 2450 case DIOCGETADDR: 2451 case DIOCGETSTATE: 2452 case DIOCGETSTATENV: 2453 case DIOCSETSTATUSIF: 2454 case DIOCGETSTATUSNV: 2455 case DIOCCLRSTATUS: 2456 case DIOCNATLOOK: 2457 case DIOCSETDEBUG: 2458 #ifdef COMPAT_FREEBSD14 2459 case DIOCGETSTATES: 2460 case DIOCGETSTATESV2: 2461 #endif 2462 case DIOCGETTIMEOUT: 2463 case DIOCCLRRULECTRS: 2464 case DIOCGETLIMIT: 2465 case DIOCGETALTQSV0: 2466 case DIOCGETALTQSV1: 2467 case DIOCGETALTQV0: 2468 case DIOCGETALTQV1: 2469 case DIOCGETQSTATSV0: 2470 case DIOCGETQSTATSV1: 2471 case DIOCGETRULESETS: 2472 case DIOCGETRULESET: 2473 case DIOCRGETTABLES: 2474 case DIOCRGETTSTATS: 2475 case DIOCRCLRTSTATS: 2476 case DIOCRCLRADDRS: 2477 case DIOCRADDADDRS: 2478 case DIOCRDELADDRS: 2479 case DIOCRSETADDRS: 2480 case DIOCRGETADDRS: 2481 case DIOCRGETASTATS: 2482 case DIOCRCLRASTATS: 2483 case DIOCRTSTADDRS: 2484 case DIOCOSFPGET: 2485 case DIOCGETSRCNODES: 2486 case DIOCCLRSRCNODES: 2487 case DIOCGETSYNCOOKIES: 2488 case DIOCIGETIFACES: 2489 case DIOCGIFSPEEDV0: 2490 case DIOCGIFSPEEDV1: 2491 case DIOCSETIFFLAG: 2492 case DIOCCLRIFFLAG: 2493 case DIOCGETETHRULES: 2494 case DIOCGETETHRULE: 2495 case DIOCGETETHRULESETS: 2496 case DIOCGETETHRULESET: 2497 break; 2498 case DIOCRCLRTABLES: 2499 case DIOCRADDTABLES: 2500 case DIOCRDELTABLES: 2501 case DIOCRSETTFLAGS: 2502 if (((struct pfioc_table *)addr)->pfrio_flags & 2503 PFR_FLAG_DUMMY) 2504 break; /* dummy operation ok */ 2505 return (EPERM); 2506 default: 2507 return (EPERM); 2508 } 2509 2510 if (!(flags & FWRITE)) 2511 switch (cmd) { 2512 case DIOCGETRULES: 2513 case DIOCGETADDRS: 2514 case DIOCGETADDR: 2515 case DIOCGETSTATE: 2516 case DIOCGETSTATENV: 2517 case DIOCGETSTATUSNV: 2518 #ifdef COMPAT_FREEBSD14 2519 case DIOCGETSTATES: 2520 case DIOCGETSTATESV2: 2521 #endif 2522 case DIOCGETTIMEOUT: 2523 case DIOCGETLIMIT: 2524 case DIOCGETALTQSV0: 2525 case DIOCGETALTQSV1: 2526 case DIOCGETALTQV0: 2527 case DIOCGETALTQV1: 2528 case DIOCGETQSTATSV0: 2529 case DIOCGETQSTATSV1: 2530 case DIOCGETRULESETS: 2531 case DIOCGETRULESET: 2532 case DIOCNATLOOK: 2533 case DIOCRGETTABLES: 2534 case DIOCRGETTSTATS: 2535 case DIOCRGETADDRS: 2536 case DIOCRGETASTATS: 2537 case DIOCRTSTADDRS: 2538 case DIOCOSFPGET: 2539 case DIOCGETSRCNODES: 2540 case DIOCGETSYNCOOKIES: 2541 case DIOCIGETIFACES: 2542 case DIOCGIFSPEEDV1: 2543 case DIOCGIFSPEEDV0: 2544 case DIOCGETRULENV: 2545 case DIOCGETETHRULES: 2546 case DIOCGETETHRULE: 2547 case DIOCGETETHRULESETS: 2548 case DIOCGETETHRULESET: 2549 break; 2550 case DIOCRCLRTABLES: 2551 case DIOCRADDTABLES: 2552 case DIOCRDELTABLES: 2553 case DIOCRCLRTSTATS: 2554 case DIOCRCLRADDRS: 2555 case DIOCRADDADDRS: 2556 case DIOCRDELADDRS: 2557 case DIOCRSETADDRS: 2558 case DIOCRSETTFLAGS: 2559 if (((struct pfioc_table *)addr)->pfrio_flags & 2560 PFR_FLAG_DUMMY) { 2561 flags |= FWRITE; /* need write lock for dummy */ 2562 break; /* dummy operation ok */ 2563 } 2564 return (EACCES); 2565 default: 2566 return (EACCES); 2567 } 2568 2569 CURVNET_SET(TD_TO_VNET(td)); 2570 2571 switch (cmd) { 2572 #ifdef COMPAT_FREEBSD14 2573 case DIOCSTART: 2574 error = pf_start(); 2575 break; 2576 2577 case DIOCSTOP: 2578 error = pf_stop(); 2579 break; 2580 #endif 2581 2582 case DIOCGETETHRULES: { 2583 struct pfioc_nv *nv = (struct pfioc_nv *)addr; 2584 nvlist_t *nvl; 2585 void *packed; 2586 struct pf_keth_rule *tail; 2587 struct pf_keth_ruleset *rs; 2588 u_int32_t ticket, nr; 2589 const char *anchor = ""; 2590 2591 nvl = NULL; 2592 packed = NULL; 2593 2594 #define ERROUT(x) ERROUT_IOCTL(DIOCGETETHRULES_error, x) 2595 2596 if (nv->len > pf_ioctl_maxcount) 2597 ERROUT(ENOMEM); 2598 2599 /* Copy the request in */ 2600 packed = malloc(nv->len, M_NVLIST, M_WAITOK); 2601 if (packed == NULL) 2602 ERROUT(ENOMEM); 2603 2604 error = copyin(nv->data, packed, nv->len); 2605 if (error) 2606 ERROUT(error); 2607 2608 nvl = nvlist_unpack(packed, nv->len, 0); 2609 if (nvl == NULL) 2610 ERROUT(EBADMSG); 2611 2612 if (! nvlist_exists_string(nvl, "anchor")) 2613 ERROUT(EBADMSG); 2614 2615 anchor = nvlist_get_string(nvl, "anchor"); 2616 2617 rs = pf_find_keth_ruleset(anchor); 2618 2619 nvlist_destroy(nvl); 2620 nvl = NULL; 2621 free(packed, M_NVLIST); 2622 packed = NULL; 2623 2624 if (rs == NULL) 2625 ERROUT(ENOENT); 2626 2627 /* Reply */ 2628 nvl = nvlist_create(0); 2629 if (nvl == NULL) 2630 ERROUT(ENOMEM); 2631 2632 PF_RULES_RLOCK(); 2633 2634 ticket = rs->active.ticket; 2635 tail = TAILQ_LAST(rs->active.rules, pf_keth_ruleq); 2636 if (tail) 2637 nr = tail->nr + 1; 2638 else 2639 nr = 0; 2640 2641 PF_RULES_RUNLOCK(); 2642 2643 nvlist_add_number(nvl, "ticket", ticket); 2644 nvlist_add_number(nvl, "nr", nr); 2645 2646 packed = nvlist_pack(nvl, &nv->len); 2647 if (packed == NULL) 2648 ERROUT(ENOMEM); 2649 2650 if (nv->size == 0) 2651 ERROUT(0); 2652 else if (nv->size < nv->len) 2653 ERROUT(ENOSPC); 2654 2655 error = copyout(packed, nv->data, nv->len); 2656 2657 #undef ERROUT 2658 DIOCGETETHRULES_error: 2659 free(packed, M_NVLIST); 2660 nvlist_destroy(nvl); 2661 break; 2662 } 2663 2664 case DIOCGETETHRULE: { 2665 struct epoch_tracker et; 2666 struct pfioc_nv *nv = (struct pfioc_nv *)addr; 2667 nvlist_t *nvl = NULL; 2668 void *nvlpacked = NULL; 2669 struct pf_keth_rule *rule = NULL; 2670 struct pf_keth_ruleset *rs; 2671 u_int32_t ticket, nr; 2672 bool clear = false; 2673 const char *anchor; 2674 2675 #define ERROUT(x) ERROUT_IOCTL(DIOCGETETHRULE_error, x) 2676 2677 if (nv->len > pf_ioctl_maxcount) 2678 ERROUT(ENOMEM); 2679 2680 nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK); 2681 if (nvlpacked == NULL) 2682 ERROUT(ENOMEM); 2683 2684 error = copyin(nv->data, nvlpacked, nv->len); 2685 if (error) 2686 ERROUT(error); 2687 2688 nvl = nvlist_unpack(nvlpacked, nv->len, 0); 2689 if (nvl == NULL) 2690 ERROUT(EBADMSG); 2691 if (! nvlist_exists_number(nvl, "ticket")) 2692 ERROUT(EBADMSG); 2693 ticket = nvlist_get_number(nvl, "ticket"); 2694 if (! nvlist_exists_string(nvl, "anchor")) 2695 ERROUT(EBADMSG); 2696 anchor = nvlist_get_string(nvl, "anchor"); 2697 2698 if (nvlist_exists_bool(nvl, "clear")) 2699 clear = nvlist_get_bool(nvl, "clear"); 2700 2701 if (clear && !(flags & FWRITE)) 2702 ERROUT(EACCES); 2703 2704 if (! nvlist_exists_number(nvl, "nr")) 2705 ERROUT(EBADMSG); 2706 nr = nvlist_get_number(nvl, "nr"); 2707 2708 PF_RULES_RLOCK(); 2709 rs = pf_find_keth_ruleset(anchor); 2710 if (rs == NULL) { 2711 PF_RULES_RUNLOCK(); 2712 ERROUT(ENOENT); 2713 } 2714 if (ticket != rs->active.ticket) { 2715 PF_RULES_RUNLOCK(); 2716 ERROUT(EBUSY); 2717 } 2718 2719 nvlist_destroy(nvl); 2720 nvl = NULL; 2721 free(nvlpacked, M_NVLIST); 2722 nvlpacked = NULL; 2723 2724 rule = TAILQ_FIRST(rs->active.rules); 2725 while ((rule != NULL) && (rule->nr != nr)) 2726 rule = TAILQ_NEXT(rule, entries); 2727 if (rule == NULL) { 2728 PF_RULES_RUNLOCK(); 2729 ERROUT(ENOENT); 2730 } 2731 /* Make sure rule can't go away. */ 2732 NET_EPOCH_ENTER(et); 2733 PF_RULES_RUNLOCK(); 2734 nvl = pf_keth_rule_to_nveth_rule(rule); 2735 if (pf_keth_anchor_nvcopyout(rs, rule, nvl)) 2736 ERROUT(EBUSY); 2737 NET_EPOCH_EXIT(et); 2738 if (nvl == NULL) 2739 ERROUT(ENOMEM); 2740 2741 nvlpacked = nvlist_pack(nvl, &nv->len); 2742 if (nvlpacked == NULL) 2743 ERROUT(ENOMEM); 2744 2745 if (nv->size == 0) 2746 ERROUT(0); 2747 else if (nv->size < nv->len) 2748 ERROUT(ENOSPC); 2749 2750 error = copyout(nvlpacked, nv->data, nv->len); 2751 if (error == 0 && clear) { 2752 counter_u64_zero(rule->evaluations); 2753 for (int i = 0; i < 2; i++) { 2754 counter_u64_zero(rule->packets[i]); 2755 counter_u64_zero(rule->bytes[i]); 2756 } 2757 } 2758 2759 #undef ERROUT 2760 DIOCGETETHRULE_error: 2761 free(nvlpacked, M_NVLIST); 2762 nvlist_destroy(nvl); 2763 break; 2764 } 2765 2766 case DIOCADDETHRULE: { 2767 struct pfioc_nv *nv = (struct pfioc_nv *)addr; 2768 nvlist_t *nvl = NULL; 2769 void *nvlpacked = NULL; 2770 struct pf_keth_rule *rule = NULL, *tail = NULL; 2771 struct pf_keth_ruleset *ruleset = NULL; 2772 struct pfi_kkif *kif = NULL, *bridge_to_kif = NULL; 2773 const char *anchor = "", *anchor_call = ""; 2774 2775 #define ERROUT(x) ERROUT_IOCTL(DIOCADDETHRULE_error, x) 2776 2777 if (nv->len > pf_ioctl_maxcount) 2778 ERROUT(ENOMEM); 2779 2780 nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK); 2781 if (nvlpacked == NULL) 2782 ERROUT(ENOMEM); 2783 2784 error = copyin(nv->data, nvlpacked, nv->len); 2785 if (error) 2786 ERROUT(error); 2787 2788 nvl = nvlist_unpack(nvlpacked, nv->len, 0); 2789 if (nvl == NULL) 2790 ERROUT(EBADMSG); 2791 2792 if (! nvlist_exists_number(nvl, "ticket")) 2793 ERROUT(EBADMSG); 2794 2795 if (nvlist_exists_string(nvl, "anchor")) 2796 anchor = nvlist_get_string(nvl, "anchor"); 2797 if (nvlist_exists_string(nvl, "anchor_call")) 2798 anchor_call = nvlist_get_string(nvl, "anchor_call"); 2799 2800 ruleset = pf_find_keth_ruleset(anchor); 2801 if (ruleset == NULL) 2802 ERROUT(EINVAL); 2803 2804 if (nvlist_get_number(nvl, "ticket") != 2805 ruleset->inactive.ticket) { 2806 DPFPRINTF(PF_DEBUG_MISC, 2807 ("ticket: %d != %d\n", 2808 (u_int32_t)nvlist_get_number(nvl, "ticket"), 2809 ruleset->inactive.ticket)); 2810 ERROUT(EBUSY); 2811 } 2812 2813 rule = malloc(sizeof(*rule), M_PFRULE, M_WAITOK); 2814 if (rule == NULL) 2815 ERROUT(ENOMEM); 2816 rule->timestamp = NULL; 2817 2818 error = pf_nveth_rule_to_keth_rule(nvl, rule); 2819 if (error != 0) 2820 ERROUT(error); 2821 2822 if (rule->ifname[0]) 2823 kif = pf_kkif_create(M_WAITOK); 2824 if (rule->bridge_to_name[0]) 2825 bridge_to_kif = pf_kkif_create(M_WAITOK); 2826 rule->evaluations = counter_u64_alloc(M_WAITOK); 2827 for (int i = 0; i < 2; i++) { 2828 rule->packets[i] = counter_u64_alloc(M_WAITOK); 2829 rule->bytes[i] = counter_u64_alloc(M_WAITOK); 2830 } 2831 rule->timestamp = uma_zalloc_pcpu(pf_timestamp_pcpu_zone, 2832 M_WAITOK | M_ZERO); 2833 2834 PF_RULES_WLOCK(); 2835 2836 if (rule->ifname[0]) { 2837 rule->kif = pfi_kkif_attach(kif, rule->ifname); 2838 pfi_kkif_ref(rule->kif); 2839 } else 2840 rule->kif = NULL; 2841 if (rule->bridge_to_name[0]) { 2842 rule->bridge_to = pfi_kkif_attach(bridge_to_kif, 2843 rule->bridge_to_name); 2844 pfi_kkif_ref(rule->bridge_to); 2845 } else 2846 rule->bridge_to = NULL; 2847 2848 #ifdef ALTQ 2849 /* set queue IDs */ 2850 if (rule->qname[0] != 0) { 2851 if ((rule->qid = pf_qname2qid(rule->qname)) == 0) 2852 error = EBUSY; 2853 else 2854 rule->qid = rule->qid; 2855 } 2856 #endif 2857 if (rule->tagname[0]) 2858 if ((rule->tag = pf_tagname2tag(rule->tagname)) == 0) 2859 error = EBUSY; 2860 if (rule->match_tagname[0]) 2861 if ((rule->match_tag = pf_tagname2tag( 2862 rule->match_tagname)) == 0) 2863 error = EBUSY; 2864 2865 if (error == 0 && rule->ipdst.addr.type == PF_ADDR_TABLE) 2866 error = pf_eth_addr_setup(ruleset, &rule->ipdst.addr); 2867 if (error == 0 && rule->ipsrc.addr.type == PF_ADDR_TABLE) 2868 error = pf_eth_addr_setup(ruleset, &rule->ipsrc.addr); 2869 2870 if (error) { 2871 pf_free_eth_rule(rule); 2872 PF_RULES_WUNLOCK(); 2873 ERROUT(error); 2874 } 2875 2876 if (pf_keth_anchor_setup(rule, ruleset, anchor_call)) { 2877 pf_free_eth_rule(rule); 2878 PF_RULES_WUNLOCK(); 2879 ERROUT(EINVAL); 2880 } 2881 2882 tail = TAILQ_LAST(ruleset->inactive.rules, pf_keth_ruleq); 2883 if (tail) 2884 rule->nr = tail->nr + 1; 2885 else 2886 rule->nr = 0; 2887 2888 TAILQ_INSERT_TAIL(ruleset->inactive.rules, rule, entries); 2889 2890 PF_RULES_WUNLOCK(); 2891 2892 #undef ERROUT 2893 DIOCADDETHRULE_error: 2894 nvlist_destroy(nvl); 2895 free(nvlpacked, M_NVLIST); 2896 break; 2897 } 2898 2899 case DIOCGETETHRULESETS: { 2900 struct epoch_tracker et; 2901 struct pfioc_nv *nv = (struct pfioc_nv *)addr; 2902 nvlist_t *nvl = NULL; 2903 void *nvlpacked = NULL; 2904 struct pf_keth_ruleset *ruleset; 2905 struct pf_keth_anchor *anchor; 2906 int nr = 0; 2907 2908 #define ERROUT(x) ERROUT_IOCTL(DIOCGETETHRULESETS_error, x) 2909 2910 if (nv->len > pf_ioctl_maxcount) 2911 ERROUT(ENOMEM); 2912 2913 nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK); 2914 if (nvlpacked == NULL) 2915 ERROUT(ENOMEM); 2916 2917 error = copyin(nv->data, nvlpacked, nv->len); 2918 if (error) 2919 ERROUT(error); 2920 2921 nvl = nvlist_unpack(nvlpacked, nv->len, 0); 2922 if (nvl == NULL) 2923 ERROUT(EBADMSG); 2924 if (! nvlist_exists_string(nvl, "path")) 2925 ERROUT(EBADMSG); 2926 2927 NET_EPOCH_ENTER(et); 2928 2929 if ((ruleset = pf_find_keth_ruleset( 2930 nvlist_get_string(nvl, "path"))) == NULL) { 2931 NET_EPOCH_EXIT(et); 2932 ERROUT(ENOENT); 2933 } 2934 2935 if (ruleset->anchor == NULL) { 2936 RB_FOREACH(anchor, pf_keth_anchor_global, &V_pf_keth_anchors) 2937 if (anchor->parent == NULL) 2938 nr++; 2939 } else { 2940 RB_FOREACH(anchor, pf_keth_anchor_node, 2941 &ruleset->anchor->children) 2942 nr++; 2943 } 2944 2945 NET_EPOCH_EXIT(et); 2946 2947 nvlist_destroy(nvl); 2948 nvl = NULL; 2949 free(nvlpacked, M_NVLIST); 2950 nvlpacked = NULL; 2951 2952 nvl = nvlist_create(0); 2953 if (nvl == NULL) 2954 ERROUT(ENOMEM); 2955 2956 nvlist_add_number(nvl, "nr", nr); 2957 2958 nvlpacked = nvlist_pack(nvl, &nv->len); 2959 if (nvlpacked == NULL) 2960 ERROUT(ENOMEM); 2961 2962 if (nv->size == 0) 2963 ERROUT(0); 2964 else if (nv->size < nv->len) 2965 ERROUT(ENOSPC); 2966 2967 error = copyout(nvlpacked, nv->data, nv->len); 2968 2969 #undef ERROUT 2970 DIOCGETETHRULESETS_error: 2971 free(nvlpacked, M_NVLIST); 2972 nvlist_destroy(nvl); 2973 break; 2974 } 2975 2976 case DIOCGETETHRULESET: { 2977 struct epoch_tracker et; 2978 struct pfioc_nv *nv = (struct pfioc_nv *)addr; 2979 nvlist_t *nvl = NULL; 2980 void *nvlpacked = NULL; 2981 struct pf_keth_ruleset *ruleset; 2982 struct pf_keth_anchor *anchor; 2983 int nr = 0, req_nr = 0; 2984 bool found = false; 2985 2986 #define ERROUT(x) ERROUT_IOCTL(DIOCGETETHRULESET_error, x) 2987 2988 if (nv->len > pf_ioctl_maxcount) 2989 ERROUT(ENOMEM); 2990 2991 nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK); 2992 if (nvlpacked == NULL) 2993 ERROUT(ENOMEM); 2994 2995 error = copyin(nv->data, nvlpacked, nv->len); 2996 if (error) 2997 ERROUT(error); 2998 2999 nvl = nvlist_unpack(nvlpacked, nv->len, 0); 3000 if (nvl == NULL) 3001 ERROUT(EBADMSG); 3002 if (! nvlist_exists_string(nvl, "path")) 3003 ERROUT(EBADMSG); 3004 if (! nvlist_exists_number(nvl, "nr")) 3005 ERROUT(EBADMSG); 3006 3007 req_nr = nvlist_get_number(nvl, "nr"); 3008 3009 NET_EPOCH_ENTER(et); 3010 3011 if ((ruleset = pf_find_keth_ruleset( 3012 nvlist_get_string(nvl, "path"))) == NULL) { 3013 NET_EPOCH_EXIT(et); 3014 ERROUT(ENOENT); 3015 } 3016 3017 nvlist_destroy(nvl); 3018 nvl = NULL; 3019 free(nvlpacked, M_NVLIST); 3020 nvlpacked = NULL; 3021 3022 nvl = nvlist_create(0); 3023 if (nvl == NULL) { 3024 NET_EPOCH_EXIT(et); 3025 ERROUT(ENOMEM); 3026 } 3027 3028 if (ruleset->anchor == NULL) { 3029 RB_FOREACH(anchor, pf_keth_anchor_global, 3030 &V_pf_keth_anchors) { 3031 if (anchor->parent == NULL && nr++ == req_nr) { 3032 found = true; 3033 break; 3034 } 3035 } 3036 } else { 3037 RB_FOREACH(anchor, pf_keth_anchor_node, 3038 &ruleset->anchor->children) { 3039 if (nr++ == req_nr) { 3040 found = true; 3041 break; 3042 } 3043 } 3044 } 3045 3046 NET_EPOCH_EXIT(et); 3047 if (found) { 3048 nvlist_add_number(nvl, "nr", nr); 3049 nvlist_add_string(nvl, "name", anchor->name); 3050 if (ruleset->anchor) 3051 nvlist_add_string(nvl, "path", 3052 ruleset->anchor->path); 3053 else 3054 nvlist_add_string(nvl, "path", ""); 3055 } else { 3056 ERROUT(EBUSY); 3057 } 3058 3059 nvlpacked = nvlist_pack(nvl, &nv->len); 3060 if (nvlpacked == NULL) 3061 ERROUT(ENOMEM); 3062 3063 if (nv->size == 0) 3064 ERROUT(0); 3065 else if (nv->size < nv->len) 3066 ERROUT(ENOSPC); 3067 3068 error = copyout(nvlpacked, nv->data, nv->len); 3069 3070 #undef ERROUT 3071 DIOCGETETHRULESET_error: 3072 free(nvlpacked, M_NVLIST); 3073 nvlist_destroy(nvl); 3074 break; 3075 } 3076 3077 case DIOCADDRULENV: { 3078 struct pfioc_nv *nv = (struct pfioc_nv *)addr; 3079 nvlist_t *nvl = NULL; 3080 void *nvlpacked = NULL; 3081 struct pf_krule *rule = NULL; 3082 const char *anchor = "", *anchor_call = ""; 3083 uint32_t ticket = 0, pool_ticket = 0; 3084 3085 #define ERROUT(x) ERROUT_IOCTL(DIOCADDRULENV_error, x) 3086 3087 if (nv->len > pf_ioctl_maxcount) 3088 ERROUT(ENOMEM); 3089 3090 nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK); 3091 error = copyin(nv->data, nvlpacked, nv->len); 3092 if (error) 3093 ERROUT(error); 3094 3095 nvl = nvlist_unpack(nvlpacked, nv->len, 0); 3096 if (nvl == NULL) 3097 ERROUT(EBADMSG); 3098 3099 if (! nvlist_exists_number(nvl, "ticket")) 3100 ERROUT(EINVAL); 3101 ticket = nvlist_get_number(nvl, "ticket"); 3102 3103 if (! nvlist_exists_number(nvl, "pool_ticket")) 3104 ERROUT(EINVAL); 3105 pool_ticket = nvlist_get_number(nvl, "pool_ticket"); 3106 3107 if (! nvlist_exists_nvlist(nvl, "rule")) 3108 ERROUT(EINVAL); 3109 3110 rule = pf_krule_alloc(); 3111 error = pf_nvrule_to_krule(nvlist_get_nvlist(nvl, "rule"), 3112 rule); 3113 if (error) 3114 ERROUT(error); 3115 3116 if (nvlist_exists_string(nvl, "anchor")) 3117 anchor = nvlist_get_string(nvl, "anchor"); 3118 if (nvlist_exists_string(nvl, "anchor_call")) 3119 anchor_call = nvlist_get_string(nvl, "anchor_call"); 3120 3121 if ((error = nvlist_error(nvl))) 3122 ERROUT(error); 3123 3124 /* Frees rule on error */ 3125 error = pf_ioctl_addrule(rule, ticket, pool_ticket, anchor, 3126 anchor_call, td->td_ucred->cr_ruid, 3127 td->td_proc ? td->td_proc->p_pid : 0); 3128 3129 nvlist_destroy(nvl); 3130 free(nvlpacked, M_NVLIST); 3131 break; 3132 #undef ERROUT 3133 DIOCADDRULENV_error: 3134 pf_krule_free(rule); 3135 nvlist_destroy(nvl); 3136 free(nvlpacked, M_NVLIST); 3137 3138 break; 3139 } 3140 case DIOCADDRULE: { 3141 struct pfioc_rule *pr = (struct pfioc_rule *)addr; 3142 struct pf_krule *rule; 3143 3144 rule = pf_krule_alloc(); 3145 error = pf_rule_to_krule(&pr->rule, rule); 3146 if (error != 0) { 3147 pf_krule_free(rule); 3148 break; 3149 } 3150 3151 pr->anchor[sizeof(pr->anchor) - 1] = 0; 3152 3153 /* Frees rule on error */ 3154 error = pf_ioctl_addrule(rule, pr->ticket, pr->pool_ticket, 3155 pr->anchor, pr->anchor_call, td->td_ucred->cr_ruid, 3156 td->td_proc ? td->td_proc->p_pid : 0); 3157 break; 3158 } 3159 3160 case DIOCGETRULES: { 3161 struct pfioc_rule *pr = (struct pfioc_rule *)addr; 3162 3163 pr->anchor[sizeof(pr->anchor) - 1] = 0; 3164 3165 error = pf_ioctl_getrules(pr); 3166 3167 break; 3168 } 3169 3170 case DIOCGETRULENV: { 3171 struct pfioc_nv *nv = (struct pfioc_nv *)addr; 3172 nvlist_t *nvrule = NULL; 3173 nvlist_t *nvl = NULL; 3174 struct pf_kruleset *ruleset; 3175 struct pf_krule *rule; 3176 void *nvlpacked = NULL; 3177 int rs_num, nr; 3178 bool clear_counter = false; 3179 3180 #define ERROUT(x) ERROUT_IOCTL(DIOCGETRULENV_error, x) 3181 3182 if (nv->len > pf_ioctl_maxcount) 3183 ERROUT(ENOMEM); 3184 3185 /* Copy the request in */ 3186 nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK); 3187 if (nvlpacked == NULL) 3188 ERROUT(ENOMEM); 3189 3190 error = copyin(nv->data, nvlpacked, nv->len); 3191 if (error) 3192 ERROUT(error); 3193 3194 nvl = nvlist_unpack(nvlpacked, nv->len, 0); 3195 if (nvl == NULL) 3196 ERROUT(EBADMSG); 3197 3198 if (! nvlist_exists_string(nvl, "anchor")) 3199 ERROUT(EBADMSG); 3200 if (! nvlist_exists_number(nvl, "ruleset")) 3201 ERROUT(EBADMSG); 3202 if (! nvlist_exists_number(nvl, "ticket")) 3203 ERROUT(EBADMSG); 3204 if (! nvlist_exists_number(nvl, "nr")) 3205 ERROUT(EBADMSG); 3206 3207 if (nvlist_exists_bool(nvl, "clear_counter")) 3208 clear_counter = nvlist_get_bool(nvl, "clear_counter"); 3209 3210 if (clear_counter && !(flags & FWRITE)) 3211 ERROUT(EACCES); 3212 3213 nr = nvlist_get_number(nvl, "nr"); 3214 3215 PF_RULES_WLOCK(); 3216 ruleset = pf_find_kruleset(nvlist_get_string(nvl, "anchor")); 3217 if (ruleset == NULL) { 3218 PF_RULES_WUNLOCK(); 3219 ERROUT(ENOENT); 3220 } 3221 3222 rs_num = pf_get_ruleset_number(nvlist_get_number(nvl, "ruleset")); 3223 if (rs_num >= PF_RULESET_MAX) { 3224 PF_RULES_WUNLOCK(); 3225 ERROUT(EINVAL); 3226 } 3227 3228 if (nvlist_get_number(nvl, "ticket") != 3229 ruleset->rules[rs_num].active.ticket) { 3230 PF_RULES_WUNLOCK(); 3231 ERROUT(EBUSY); 3232 } 3233 3234 if ((error = nvlist_error(nvl))) { 3235 PF_RULES_WUNLOCK(); 3236 ERROUT(error); 3237 } 3238 3239 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr); 3240 while ((rule != NULL) && (rule->nr != nr)) 3241 rule = TAILQ_NEXT(rule, entries); 3242 if (rule == NULL) { 3243 PF_RULES_WUNLOCK(); 3244 ERROUT(EBUSY); 3245 } 3246 3247 nvrule = pf_krule_to_nvrule(rule); 3248 3249 nvlist_destroy(nvl); 3250 nvl = nvlist_create(0); 3251 if (nvl == NULL) { 3252 PF_RULES_WUNLOCK(); 3253 ERROUT(ENOMEM); 3254 } 3255 nvlist_add_number(nvl, "nr", nr); 3256 nvlist_add_nvlist(nvl, "rule", nvrule); 3257 nvlist_destroy(nvrule); 3258 nvrule = NULL; 3259 if (pf_kanchor_nvcopyout(ruleset, rule, nvl)) { 3260 PF_RULES_WUNLOCK(); 3261 ERROUT(EBUSY); 3262 } 3263 3264 free(nvlpacked, M_NVLIST); 3265 nvlpacked = nvlist_pack(nvl, &nv->len); 3266 if (nvlpacked == NULL) { 3267 PF_RULES_WUNLOCK(); 3268 ERROUT(ENOMEM); 3269 } 3270 3271 if (nv->size == 0) { 3272 PF_RULES_WUNLOCK(); 3273 ERROUT(0); 3274 } 3275 else if (nv->size < nv->len) { 3276 PF_RULES_WUNLOCK(); 3277 ERROUT(ENOSPC); 3278 } 3279 3280 if (clear_counter) 3281 pf_krule_clear_counters(rule); 3282 3283 PF_RULES_WUNLOCK(); 3284 3285 error = copyout(nvlpacked, nv->data, nv->len); 3286 3287 #undef ERROUT 3288 DIOCGETRULENV_error: 3289 free(nvlpacked, M_NVLIST); 3290 nvlist_destroy(nvrule); 3291 nvlist_destroy(nvl); 3292 3293 break; 3294 } 3295 3296 case DIOCCHANGERULE: { 3297 struct pfioc_rule *pcr = (struct pfioc_rule *)addr; 3298 struct pf_kruleset *ruleset; 3299 struct pf_krule *oldrule = NULL, *newrule = NULL; 3300 struct pfi_kkif *kif = NULL; 3301 struct pf_kpooladdr *pa; 3302 u_int32_t nr = 0; 3303 int rs_num; 3304 3305 pcr->anchor[sizeof(pcr->anchor) - 1] = 0; 3306 3307 if (pcr->action < PF_CHANGE_ADD_HEAD || 3308 pcr->action > PF_CHANGE_GET_TICKET) { 3309 error = EINVAL; 3310 break; 3311 } 3312 if (pcr->rule.return_icmp >> 8 > ICMP_MAXTYPE) { 3313 error = EINVAL; 3314 break; 3315 } 3316 3317 if (pcr->action != PF_CHANGE_REMOVE) { 3318 newrule = pf_krule_alloc(); 3319 error = pf_rule_to_krule(&pcr->rule, newrule); 3320 if (error != 0) { 3321 pf_krule_free(newrule); 3322 break; 3323 } 3324 3325 if (newrule->ifname[0]) 3326 kif = pf_kkif_create(M_WAITOK); 3327 pf_counter_u64_init(&newrule->evaluations, M_WAITOK); 3328 for (int i = 0; i < 2; i++) { 3329 pf_counter_u64_init(&newrule->packets[i], M_WAITOK); 3330 pf_counter_u64_init(&newrule->bytes[i], M_WAITOK); 3331 } 3332 newrule->states_cur = counter_u64_alloc(M_WAITOK); 3333 newrule->states_tot = counter_u64_alloc(M_WAITOK); 3334 newrule->src_nodes = counter_u64_alloc(M_WAITOK); 3335 newrule->cuid = td->td_ucred->cr_ruid; 3336 newrule->cpid = td->td_proc ? td->td_proc->p_pid : 0; 3337 TAILQ_INIT(&newrule->rpool.list); 3338 } 3339 #define ERROUT(x) ERROUT_IOCTL(DIOCCHANGERULE_error, x) 3340 3341 PF_CONFIG_LOCK(); 3342 PF_RULES_WLOCK(); 3343 #ifdef PF_WANT_32_TO_64_COUNTER 3344 if (newrule != NULL) { 3345 LIST_INSERT_HEAD(&V_pf_allrulelist, newrule, allrulelist); 3346 newrule->allrulelinked = true; 3347 V_pf_allrulecount++; 3348 } 3349 #endif 3350 3351 if (!(pcr->action == PF_CHANGE_REMOVE || 3352 pcr->action == PF_CHANGE_GET_TICKET) && 3353 pcr->pool_ticket != V_ticket_pabuf) 3354 ERROUT(EBUSY); 3355 3356 ruleset = pf_find_kruleset(pcr->anchor); 3357 if (ruleset == NULL) 3358 ERROUT(EINVAL); 3359 3360 rs_num = pf_get_ruleset_number(pcr->rule.action); 3361 if (rs_num >= PF_RULESET_MAX) 3362 ERROUT(EINVAL); 3363 3364 /* 3365 * XXXMJG: there is no guarantee that the ruleset was 3366 * created by the usual route of calling DIOCXBEGIN. 3367 * As a result it is possible the rule tree will not 3368 * be allocated yet. Hack around it by doing it here. 3369 * Note it is fine to let the tree persist in case of 3370 * error as it will be freed down the road on future 3371 * updates (if need be). 3372 */ 3373 if (ruleset->rules[rs_num].active.tree == NULL) { 3374 ruleset->rules[rs_num].active.tree = pf_rule_tree_alloc(M_NOWAIT); 3375 if (ruleset->rules[rs_num].active.tree == NULL) { 3376 ERROUT(ENOMEM); 3377 } 3378 } 3379 3380 if (pcr->action == PF_CHANGE_GET_TICKET) { 3381 pcr->ticket = ++ruleset->rules[rs_num].active.ticket; 3382 ERROUT(0); 3383 } else if (pcr->ticket != 3384 ruleset->rules[rs_num].active.ticket) 3385 ERROUT(EINVAL); 3386 3387 if (pcr->action != PF_CHANGE_REMOVE) { 3388 if (newrule->ifname[0]) { 3389 newrule->kif = pfi_kkif_attach(kif, 3390 newrule->ifname); 3391 kif = NULL; 3392 pfi_kkif_ref(newrule->kif); 3393 } else 3394 newrule->kif = NULL; 3395 3396 if (newrule->rtableid > 0 && 3397 newrule->rtableid >= rt_numfibs) 3398 error = EBUSY; 3399 3400 #ifdef ALTQ 3401 /* set queue IDs */ 3402 if (newrule->qname[0] != 0) { 3403 if ((newrule->qid = 3404 pf_qname2qid(newrule->qname)) == 0) 3405 error = EBUSY; 3406 else if (newrule->pqname[0] != 0) { 3407 if ((newrule->pqid = 3408 pf_qname2qid(newrule->pqname)) == 0) 3409 error = EBUSY; 3410 } else 3411 newrule->pqid = newrule->qid; 3412 } 3413 #endif /* ALTQ */ 3414 if (newrule->tagname[0]) 3415 if ((newrule->tag = 3416 pf_tagname2tag(newrule->tagname)) == 0) 3417 error = EBUSY; 3418 if (newrule->match_tagname[0]) 3419 if ((newrule->match_tag = pf_tagname2tag( 3420 newrule->match_tagname)) == 0) 3421 error = EBUSY; 3422 if (newrule->rt && !newrule->direction) 3423 error = EINVAL; 3424 if (!newrule->log) 3425 newrule->logif = 0; 3426 if (newrule->logif >= PFLOGIFS_MAX) 3427 error = EINVAL; 3428 if (pf_addr_setup(ruleset, &newrule->src.addr, newrule->af)) 3429 error = ENOMEM; 3430 if (pf_addr_setup(ruleset, &newrule->dst.addr, newrule->af)) 3431 error = ENOMEM; 3432 if (pf_kanchor_setup(newrule, ruleset, pcr->anchor_call)) 3433 error = EINVAL; 3434 TAILQ_FOREACH(pa, &V_pf_pabuf, entries) 3435 if (pa->addr.type == PF_ADDR_TABLE) { 3436 pa->addr.p.tbl = 3437 pfr_attach_table(ruleset, 3438 pa->addr.v.tblname); 3439 if (pa->addr.p.tbl == NULL) 3440 error = ENOMEM; 3441 } 3442 3443 newrule->overload_tbl = NULL; 3444 if (newrule->overload_tblname[0]) { 3445 if ((newrule->overload_tbl = pfr_attach_table( 3446 ruleset, newrule->overload_tblname)) == 3447 NULL) 3448 error = EINVAL; 3449 else 3450 newrule->overload_tbl->pfrkt_flags |= 3451 PFR_TFLAG_ACTIVE; 3452 } 3453 3454 pf_mv_kpool(&V_pf_pabuf, &newrule->rpool.list); 3455 if (((((newrule->action == PF_NAT) || 3456 (newrule->action == PF_RDR) || 3457 (newrule->action == PF_BINAT) || 3458 (newrule->rt > PF_NOPFROUTE)) && 3459 !newrule->anchor)) && 3460 (TAILQ_FIRST(&newrule->rpool.list) == NULL)) 3461 error = EINVAL; 3462 3463 if (error) { 3464 pf_free_rule(newrule); 3465 PF_RULES_WUNLOCK(); 3466 PF_CONFIG_UNLOCK(); 3467 break; 3468 } 3469 3470 newrule->rpool.cur = TAILQ_FIRST(&newrule->rpool.list); 3471 } 3472 pf_empty_kpool(&V_pf_pabuf); 3473 3474 if (pcr->action == PF_CHANGE_ADD_HEAD) 3475 oldrule = TAILQ_FIRST( 3476 ruleset->rules[rs_num].active.ptr); 3477 else if (pcr->action == PF_CHANGE_ADD_TAIL) 3478 oldrule = TAILQ_LAST( 3479 ruleset->rules[rs_num].active.ptr, pf_krulequeue); 3480 else { 3481 oldrule = TAILQ_FIRST( 3482 ruleset->rules[rs_num].active.ptr); 3483 while ((oldrule != NULL) && (oldrule->nr != pcr->nr)) 3484 oldrule = TAILQ_NEXT(oldrule, entries); 3485 if (oldrule == NULL) { 3486 if (newrule != NULL) 3487 pf_free_rule(newrule); 3488 PF_RULES_WUNLOCK(); 3489 PF_CONFIG_UNLOCK(); 3490 error = EINVAL; 3491 break; 3492 } 3493 } 3494 3495 if (pcr->action == PF_CHANGE_REMOVE) { 3496 pf_unlink_rule(ruleset->rules[rs_num].active.ptr, 3497 oldrule); 3498 RB_REMOVE(pf_krule_global, 3499 ruleset->rules[rs_num].active.tree, oldrule); 3500 ruleset->rules[rs_num].active.rcount--; 3501 } else { 3502 pf_hash_rule(newrule); 3503 if (RB_INSERT(pf_krule_global, 3504 ruleset->rules[rs_num].active.tree, newrule) != NULL) { 3505 pf_free_rule(newrule); 3506 PF_RULES_WUNLOCK(); 3507 PF_CONFIG_UNLOCK(); 3508 error = EEXIST; 3509 break; 3510 } 3511 3512 if (oldrule == NULL) 3513 TAILQ_INSERT_TAIL( 3514 ruleset->rules[rs_num].active.ptr, 3515 newrule, entries); 3516 else if (pcr->action == PF_CHANGE_ADD_HEAD || 3517 pcr->action == PF_CHANGE_ADD_BEFORE) 3518 TAILQ_INSERT_BEFORE(oldrule, newrule, entries); 3519 else 3520 TAILQ_INSERT_AFTER( 3521 ruleset->rules[rs_num].active.ptr, 3522 oldrule, newrule, entries); 3523 ruleset->rules[rs_num].active.rcount++; 3524 } 3525 3526 nr = 0; 3527 TAILQ_FOREACH(oldrule, 3528 ruleset->rules[rs_num].active.ptr, entries) 3529 oldrule->nr = nr++; 3530 3531 ruleset->rules[rs_num].active.ticket++; 3532 3533 pf_calc_skip_steps(ruleset->rules[rs_num].active.ptr); 3534 pf_remove_if_empty_kruleset(ruleset); 3535 3536 PF_RULES_WUNLOCK(); 3537 PF_CONFIG_UNLOCK(); 3538 break; 3539 3540 #undef ERROUT 3541 DIOCCHANGERULE_error: 3542 PF_RULES_WUNLOCK(); 3543 PF_CONFIG_UNLOCK(); 3544 pf_krule_free(newrule); 3545 pf_kkif_free(kif); 3546 break; 3547 } 3548 3549 case DIOCCLRSTATESNV: { 3550 error = pf_clearstates_nv((struct pfioc_nv *)addr); 3551 break; 3552 } 3553 3554 case DIOCKILLSTATESNV: { 3555 error = pf_killstates_nv((struct pfioc_nv *)addr); 3556 break; 3557 } 3558 3559 case DIOCADDSTATE: { 3560 struct pfioc_state *ps = (struct pfioc_state *)addr; 3561 struct pfsync_state_1301 *sp = &ps->state; 3562 3563 if (sp->timeout >= PFTM_MAX) { 3564 error = EINVAL; 3565 break; 3566 } 3567 if (V_pfsync_state_import_ptr != NULL) { 3568 PF_RULES_RLOCK(); 3569 error = V_pfsync_state_import_ptr( 3570 (union pfsync_state_union *)sp, PFSYNC_SI_IOCTL, 3571 PFSYNC_MSG_VERSION_1301); 3572 PF_RULES_RUNLOCK(); 3573 } else 3574 error = EOPNOTSUPP; 3575 break; 3576 } 3577 3578 case DIOCGETSTATE: { 3579 struct pfioc_state *ps = (struct pfioc_state *)addr; 3580 struct pf_kstate *s; 3581 3582 s = pf_find_state_byid(ps->state.id, ps->state.creatorid); 3583 if (s == NULL) { 3584 error = ENOENT; 3585 break; 3586 } 3587 3588 pfsync_state_export((union pfsync_state_union*)&ps->state, 3589 s, PFSYNC_MSG_VERSION_1301); 3590 PF_STATE_UNLOCK(s); 3591 break; 3592 } 3593 3594 case DIOCGETSTATENV: { 3595 error = pf_getstate((struct pfioc_nv *)addr); 3596 break; 3597 } 3598 3599 #ifdef COMPAT_FREEBSD14 3600 case DIOCGETSTATES: { 3601 struct pfioc_states *ps = (struct pfioc_states *)addr; 3602 struct pf_kstate *s; 3603 struct pfsync_state_1301 *pstore, *p; 3604 int i, nr; 3605 size_t slice_count = 16, count; 3606 void *out; 3607 3608 if (ps->ps_len <= 0) { 3609 nr = uma_zone_get_cur(V_pf_state_z); 3610 ps->ps_len = sizeof(struct pfsync_state_1301) * nr; 3611 break; 3612 } 3613 3614 out = ps->ps_states; 3615 pstore = mallocarray(slice_count, 3616 sizeof(struct pfsync_state_1301), M_TEMP, M_WAITOK | M_ZERO); 3617 nr = 0; 3618 3619 for (i = 0; i <= pf_hashmask; i++) { 3620 struct pf_idhash *ih = &V_pf_idhash[i]; 3621 3622 DIOCGETSTATES_retry: 3623 p = pstore; 3624 3625 if (LIST_EMPTY(&ih->states)) 3626 continue; 3627 3628 PF_HASHROW_LOCK(ih); 3629 count = 0; 3630 LIST_FOREACH(s, &ih->states, entry) { 3631 if (s->timeout == PFTM_UNLINKED) 3632 continue; 3633 count++; 3634 } 3635 3636 if (count > slice_count) { 3637 PF_HASHROW_UNLOCK(ih); 3638 free(pstore, M_TEMP); 3639 slice_count = count * 2; 3640 pstore = mallocarray(slice_count, 3641 sizeof(struct pfsync_state_1301), M_TEMP, 3642 M_WAITOK | M_ZERO); 3643 goto DIOCGETSTATES_retry; 3644 } 3645 3646 if ((nr+count) * sizeof(*p) > ps->ps_len) { 3647 PF_HASHROW_UNLOCK(ih); 3648 goto DIOCGETSTATES_full; 3649 } 3650 3651 LIST_FOREACH(s, &ih->states, entry) { 3652 if (s->timeout == PFTM_UNLINKED) 3653 continue; 3654 3655 pfsync_state_export((union pfsync_state_union*)p, 3656 s, PFSYNC_MSG_VERSION_1301); 3657 p++; 3658 nr++; 3659 } 3660 PF_HASHROW_UNLOCK(ih); 3661 error = copyout(pstore, out, 3662 sizeof(struct pfsync_state_1301) * count); 3663 if (error) 3664 break; 3665 out = ps->ps_states + nr; 3666 } 3667 DIOCGETSTATES_full: 3668 ps->ps_len = sizeof(struct pfsync_state_1301) * nr; 3669 free(pstore, M_TEMP); 3670 3671 break; 3672 } 3673 3674 case DIOCGETSTATESV2: { 3675 struct pfioc_states_v2 *ps = (struct pfioc_states_v2 *)addr; 3676 struct pf_kstate *s; 3677 struct pf_state_export *pstore, *p; 3678 int i, nr; 3679 size_t slice_count = 16, count; 3680 void *out; 3681 3682 if (ps->ps_req_version > PF_STATE_VERSION) { 3683 error = ENOTSUP; 3684 break; 3685 } 3686 3687 if (ps->ps_len <= 0) { 3688 nr = uma_zone_get_cur(V_pf_state_z); 3689 ps->ps_len = sizeof(struct pf_state_export) * nr; 3690 break; 3691 } 3692 3693 out = ps->ps_states; 3694 pstore = mallocarray(slice_count, 3695 sizeof(struct pf_state_export), M_TEMP, M_WAITOK | M_ZERO); 3696 nr = 0; 3697 3698 for (i = 0; i <= pf_hashmask; i++) { 3699 struct pf_idhash *ih = &V_pf_idhash[i]; 3700 3701 DIOCGETSTATESV2_retry: 3702 p = pstore; 3703 3704 if (LIST_EMPTY(&ih->states)) 3705 continue; 3706 3707 PF_HASHROW_LOCK(ih); 3708 count = 0; 3709 LIST_FOREACH(s, &ih->states, entry) { 3710 if (s->timeout == PFTM_UNLINKED) 3711 continue; 3712 count++; 3713 } 3714 3715 if (count > slice_count) { 3716 PF_HASHROW_UNLOCK(ih); 3717 free(pstore, M_TEMP); 3718 slice_count = count * 2; 3719 pstore = mallocarray(slice_count, 3720 sizeof(struct pf_state_export), M_TEMP, 3721 M_WAITOK | M_ZERO); 3722 goto DIOCGETSTATESV2_retry; 3723 } 3724 3725 if ((nr+count) * sizeof(*p) > ps->ps_len) { 3726 PF_HASHROW_UNLOCK(ih); 3727 goto DIOCGETSTATESV2_full; 3728 } 3729 3730 LIST_FOREACH(s, &ih->states, entry) { 3731 if (s->timeout == PFTM_UNLINKED) 3732 continue; 3733 3734 pf_state_export(p, s); 3735 p++; 3736 nr++; 3737 } 3738 PF_HASHROW_UNLOCK(ih); 3739 error = copyout(pstore, out, 3740 sizeof(struct pf_state_export) * count); 3741 if (error) 3742 break; 3743 out = ps->ps_states + nr; 3744 } 3745 DIOCGETSTATESV2_full: 3746 ps->ps_len = nr * sizeof(struct pf_state_export); 3747 free(pstore, M_TEMP); 3748 3749 break; 3750 } 3751 #endif 3752 case DIOCGETSTATUSNV: { 3753 error = pf_getstatus((struct pfioc_nv *)addr); 3754 break; 3755 } 3756 3757 case DIOCSETSTATUSIF: { 3758 struct pfioc_if *pi = (struct pfioc_if *)addr; 3759 3760 if (pi->ifname[0] == 0) { 3761 bzero(V_pf_status.ifname, IFNAMSIZ); 3762 break; 3763 } 3764 PF_RULES_WLOCK(); 3765 error = pf_user_strcpy(V_pf_status.ifname, pi->ifname, IFNAMSIZ); 3766 PF_RULES_WUNLOCK(); 3767 break; 3768 } 3769 3770 case DIOCCLRSTATUS: { 3771 PF_RULES_WLOCK(); 3772 for (int i = 0; i < PFRES_MAX; i++) 3773 counter_u64_zero(V_pf_status.counters[i]); 3774 for (int i = 0; i < FCNT_MAX; i++) 3775 pf_counter_u64_zero(&V_pf_status.fcounters[i]); 3776 for (int i = 0; i < SCNT_MAX; i++) 3777 counter_u64_zero(V_pf_status.scounters[i]); 3778 for (int i = 0; i < KLCNT_MAX; i++) 3779 counter_u64_zero(V_pf_status.lcounters[i]); 3780 V_pf_status.since = time_second; 3781 if (*V_pf_status.ifname) 3782 pfi_update_status(V_pf_status.ifname, NULL); 3783 PF_RULES_WUNLOCK(); 3784 break; 3785 } 3786 3787 case DIOCNATLOOK: { 3788 struct pfioc_natlook *pnl = (struct pfioc_natlook *)addr; 3789 struct pf_state_key *sk; 3790 struct pf_kstate *state; 3791 struct pf_state_key_cmp key; 3792 int m = 0, direction = pnl->direction; 3793 int sidx, didx; 3794 3795 /* NATLOOK src and dst are reversed, so reverse sidx/didx */ 3796 sidx = (direction == PF_IN) ? 1 : 0; 3797 didx = (direction == PF_IN) ? 0 : 1; 3798 3799 if (!pnl->proto || 3800 PF_AZERO(&pnl->saddr, pnl->af) || 3801 PF_AZERO(&pnl->daddr, pnl->af) || 3802 ((pnl->proto == IPPROTO_TCP || 3803 pnl->proto == IPPROTO_UDP) && 3804 (!pnl->dport || !pnl->sport))) 3805 error = EINVAL; 3806 else { 3807 bzero(&key, sizeof(key)); 3808 key.af = pnl->af; 3809 key.proto = pnl->proto; 3810 PF_ACPY(&key.addr[sidx], &pnl->saddr, pnl->af); 3811 key.port[sidx] = pnl->sport; 3812 PF_ACPY(&key.addr[didx], &pnl->daddr, pnl->af); 3813 key.port[didx] = pnl->dport; 3814 3815 state = pf_find_state_all(&key, direction, &m); 3816 if (state == NULL) { 3817 error = ENOENT; 3818 } else { 3819 if (m > 1) { 3820 PF_STATE_UNLOCK(state); 3821 error = E2BIG; /* more than one state */ 3822 } else { 3823 sk = state->key[sidx]; 3824 PF_ACPY(&pnl->rsaddr, &sk->addr[sidx], sk->af); 3825 pnl->rsport = sk->port[sidx]; 3826 PF_ACPY(&pnl->rdaddr, &sk->addr[didx], sk->af); 3827 pnl->rdport = sk->port[didx]; 3828 PF_STATE_UNLOCK(state); 3829 } 3830 } 3831 } 3832 break; 3833 } 3834 3835 case DIOCSETTIMEOUT: { 3836 struct pfioc_tm *pt = (struct pfioc_tm *)addr; 3837 int old; 3838 3839 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX || 3840 pt->seconds < 0) { 3841 error = EINVAL; 3842 break; 3843 } 3844 PF_RULES_WLOCK(); 3845 old = V_pf_default_rule.timeout[pt->timeout]; 3846 if (pt->timeout == PFTM_INTERVAL && pt->seconds == 0) 3847 pt->seconds = 1; 3848 V_pf_default_rule.timeout[pt->timeout] = pt->seconds; 3849 if (pt->timeout == PFTM_INTERVAL && pt->seconds < old) 3850 wakeup(pf_purge_thread); 3851 pt->seconds = old; 3852 PF_RULES_WUNLOCK(); 3853 break; 3854 } 3855 3856 case DIOCGETTIMEOUT: { 3857 struct pfioc_tm *pt = (struct pfioc_tm *)addr; 3858 3859 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX) { 3860 error = EINVAL; 3861 break; 3862 } 3863 PF_RULES_RLOCK(); 3864 pt->seconds = V_pf_default_rule.timeout[pt->timeout]; 3865 PF_RULES_RUNLOCK(); 3866 break; 3867 } 3868 3869 case DIOCGETLIMIT: { 3870 struct pfioc_limit *pl = (struct pfioc_limit *)addr; 3871 3872 if (pl->index < 0 || pl->index >= PF_LIMIT_MAX) { 3873 error = EINVAL; 3874 break; 3875 } 3876 PF_RULES_RLOCK(); 3877 pl->limit = V_pf_limits[pl->index].limit; 3878 PF_RULES_RUNLOCK(); 3879 break; 3880 } 3881 3882 case DIOCSETLIMIT: { 3883 struct pfioc_limit *pl = (struct pfioc_limit *)addr; 3884 int old_limit; 3885 3886 PF_RULES_WLOCK(); 3887 if (pl->index < 0 || pl->index >= PF_LIMIT_MAX || 3888 V_pf_limits[pl->index].zone == NULL) { 3889 PF_RULES_WUNLOCK(); 3890 error = EINVAL; 3891 break; 3892 } 3893 uma_zone_set_max(V_pf_limits[pl->index].zone, pl->limit); 3894 old_limit = V_pf_limits[pl->index].limit; 3895 V_pf_limits[pl->index].limit = pl->limit; 3896 pl->limit = old_limit; 3897 PF_RULES_WUNLOCK(); 3898 break; 3899 } 3900 3901 case DIOCSETDEBUG: { 3902 u_int32_t *level = (u_int32_t *)addr; 3903 3904 PF_RULES_WLOCK(); 3905 V_pf_status.debug = *level; 3906 PF_RULES_WUNLOCK(); 3907 break; 3908 } 3909 3910 case DIOCCLRRULECTRS: { 3911 /* obsoleted by DIOCGETRULE with action=PF_GET_CLR_CNTR */ 3912 struct pf_kruleset *ruleset = &pf_main_ruleset; 3913 struct pf_krule *rule; 3914 3915 PF_RULES_WLOCK(); 3916 TAILQ_FOREACH(rule, 3917 ruleset->rules[PF_RULESET_FILTER].active.ptr, entries) { 3918 pf_counter_u64_zero(&rule->evaluations); 3919 for (int i = 0; i < 2; i++) { 3920 pf_counter_u64_zero(&rule->packets[i]); 3921 pf_counter_u64_zero(&rule->bytes[i]); 3922 } 3923 } 3924 PF_RULES_WUNLOCK(); 3925 break; 3926 } 3927 3928 case DIOCGIFSPEEDV0: 3929 case DIOCGIFSPEEDV1: { 3930 struct pf_ifspeed_v1 *psp = (struct pf_ifspeed_v1 *)addr; 3931 struct pf_ifspeed_v1 ps; 3932 struct ifnet *ifp; 3933 3934 if (psp->ifname[0] == '\0') { 3935 error = EINVAL; 3936 break; 3937 } 3938 3939 error = pf_user_strcpy(ps.ifname, psp->ifname, IFNAMSIZ); 3940 if (error != 0) 3941 break; 3942 ifp = ifunit(ps.ifname); 3943 if (ifp != NULL) { 3944 psp->baudrate32 = 3945 (u_int32_t)uqmin(ifp->if_baudrate, UINT_MAX); 3946 if (cmd == DIOCGIFSPEEDV1) 3947 psp->baudrate = ifp->if_baudrate; 3948 } else { 3949 error = EINVAL; 3950 } 3951 break; 3952 } 3953 3954 #ifdef ALTQ 3955 case DIOCSTARTALTQ: { 3956 struct pf_altq *altq; 3957 3958 PF_RULES_WLOCK(); 3959 /* enable all altq interfaces on active list */ 3960 TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) { 3961 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) { 3962 error = pf_enable_altq(altq); 3963 if (error != 0) 3964 break; 3965 } 3966 } 3967 if (error == 0) 3968 V_pf_altq_running = 1; 3969 PF_RULES_WUNLOCK(); 3970 DPFPRINTF(PF_DEBUG_MISC, ("altq: started\n")); 3971 break; 3972 } 3973 3974 case DIOCSTOPALTQ: { 3975 struct pf_altq *altq; 3976 3977 PF_RULES_WLOCK(); 3978 /* disable all altq interfaces on active list */ 3979 TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) { 3980 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) { 3981 error = pf_disable_altq(altq); 3982 if (error != 0) 3983 break; 3984 } 3985 } 3986 if (error == 0) 3987 V_pf_altq_running = 0; 3988 PF_RULES_WUNLOCK(); 3989 DPFPRINTF(PF_DEBUG_MISC, ("altq: stopped\n")); 3990 break; 3991 } 3992 3993 case DIOCADDALTQV0: 3994 case DIOCADDALTQV1: { 3995 struct pfioc_altq_v1 *pa = (struct pfioc_altq_v1 *)addr; 3996 struct pf_altq *altq, *a; 3997 struct ifnet *ifp; 3998 3999 altq = malloc(sizeof(*altq), M_PFALTQ, M_WAITOK | M_ZERO); 4000 error = pf_import_kaltq(pa, altq, IOCPARM_LEN(cmd)); 4001 if (error) 4002 break; 4003 altq->local_flags = 0; 4004 4005 PF_RULES_WLOCK(); 4006 if (pa->ticket != V_ticket_altqs_inactive) { 4007 PF_RULES_WUNLOCK(); 4008 free(altq, M_PFALTQ); 4009 error = EBUSY; 4010 break; 4011 } 4012 4013 /* 4014 * if this is for a queue, find the discipline and 4015 * copy the necessary fields 4016 */ 4017 if (altq->qname[0] != 0) { 4018 if ((altq->qid = pf_qname2qid(altq->qname)) == 0) { 4019 PF_RULES_WUNLOCK(); 4020 error = EBUSY; 4021 free(altq, M_PFALTQ); 4022 break; 4023 } 4024 altq->altq_disc = NULL; 4025 TAILQ_FOREACH(a, V_pf_altq_ifs_inactive, entries) { 4026 if (strncmp(a->ifname, altq->ifname, 4027 IFNAMSIZ) == 0) { 4028 altq->altq_disc = a->altq_disc; 4029 break; 4030 } 4031 } 4032 } 4033 4034 if ((ifp = ifunit(altq->ifname)) == NULL) 4035 altq->local_flags |= PFALTQ_FLAG_IF_REMOVED; 4036 else 4037 error = altq_add(ifp, altq); 4038 4039 if (error) { 4040 PF_RULES_WUNLOCK(); 4041 free(altq, M_PFALTQ); 4042 break; 4043 } 4044 4045 if (altq->qname[0] != 0) 4046 TAILQ_INSERT_TAIL(V_pf_altqs_inactive, altq, entries); 4047 else 4048 TAILQ_INSERT_TAIL(V_pf_altq_ifs_inactive, altq, entries); 4049 /* version error check done on import above */ 4050 pf_export_kaltq(altq, pa, IOCPARM_LEN(cmd)); 4051 PF_RULES_WUNLOCK(); 4052 break; 4053 } 4054 4055 case DIOCGETALTQSV0: 4056 case DIOCGETALTQSV1: { 4057 struct pfioc_altq_v1 *pa = (struct pfioc_altq_v1 *)addr; 4058 struct pf_altq *altq; 4059 4060 PF_RULES_RLOCK(); 4061 pa->nr = 0; 4062 TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) 4063 pa->nr++; 4064 TAILQ_FOREACH(altq, V_pf_altqs_active, entries) 4065 pa->nr++; 4066 pa->ticket = V_ticket_altqs_active; 4067 PF_RULES_RUNLOCK(); 4068 break; 4069 } 4070 4071 case DIOCGETALTQV0: 4072 case DIOCGETALTQV1: { 4073 struct pfioc_altq_v1 *pa = (struct pfioc_altq_v1 *)addr; 4074 struct pf_altq *altq; 4075 4076 PF_RULES_RLOCK(); 4077 if (pa->ticket != V_ticket_altqs_active) { 4078 PF_RULES_RUNLOCK(); 4079 error = EBUSY; 4080 break; 4081 } 4082 altq = pf_altq_get_nth_active(pa->nr); 4083 if (altq == NULL) { 4084 PF_RULES_RUNLOCK(); 4085 error = EBUSY; 4086 break; 4087 } 4088 pf_export_kaltq(altq, pa, IOCPARM_LEN(cmd)); 4089 PF_RULES_RUNLOCK(); 4090 break; 4091 } 4092 4093 case DIOCCHANGEALTQV0: 4094 case DIOCCHANGEALTQV1: 4095 /* CHANGEALTQ not supported yet! */ 4096 error = ENODEV; 4097 break; 4098 4099 case DIOCGETQSTATSV0: 4100 case DIOCGETQSTATSV1: { 4101 struct pfioc_qstats_v1 *pq = (struct pfioc_qstats_v1 *)addr; 4102 struct pf_altq *altq; 4103 int nbytes; 4104 u_int32_t version; 4105 4106 PF_RULES_RLOCK(); 4107 if (pq->ticket != V_ticket_altqs_active) { 4108 PF_RULES_RUNLOCK(); 4109 error = EBUSY; 4110 break; 4111 } 4112 nbytes = pq->nbytes; 4113 altq = pf_altq_get_nth_active(pq->nr); 4114 if (altq == NULL) { 4115 PF_RULES_RUNLOCK(); 4116 error = EBUSY; 4117 break; 4118 } 4119 4120 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) != 0) { 4121 PF_RULES_RUNLOCK(); 4122 error = ENXIO; 4123 break; 4124 } 4125 PF_RULES_RUNLOCK(); 4126 if (cmd == DIOCGETQSTATSV0) 4127 version = 0; /* DIOCGETQSTATSV0 means stats struct v0 */ 4128 else 4129 version = pq->version; 4130 error = altq_getqstats(altq, pq->buf, &nbytes, version); 4131 if (error == 0) { 4132 pq->scheduler = altq->scheduler; 4133 pq->nbytes = nbytes; 4134 } 4135 break; 4136 } 4137 #endif /* ALTQ */ 4138 4139 case DIOCBEGINADDRS: { 4140 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr; 4141 4142 PF_RULES_WLOCK(); 4143 pf_empty_kpool(&V_pf_pabuf); 4144 pp->ticket = ++V_ticket_pabuf; 4145 PF_RULES_WUNLOCK(); 4146 break; 4147 } 4148 4149 case DIOCADDADDR: { 4150 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr; 4151 struct pf_kpooladdr *pa; 4152 struct pfi_kkif *kif = NULL; 4153 4154 #ifndef INET 4155 if (pp->af == AF_INET) { 4156 error = EAFNOSUPPORT; 4157 break; 4158 } 4159 #endif /* INET */ 4160 #ifndef INET6 4161 if (pp->af == AF_INET6) { 4162 error = EAFNOSUPPORT; 4163 break; 4164 } 4165 #endif /* INET6 */ 4166 if (pp->addr.addr.type != PF_ADDR_ADDRMASK && 4167 pp->addr.addr.type != PF_ADDR_DYNIFTL && 4168 pp->addr.addr.type != PF_ADDR_TABLE) { 4169 error = EINVAL; 4170 break; 4171 } 4172 if (pp->addr.addr.p.dyn != NULL) { 4173 error = EINVAL; 4174 break; 4175 } 4176 pa = malloc(sizeof(*pa), M_PFRULE, M_WAITOK); 4177 error = pf_pooladdr_to_kpooladdr(&pp->addr, pa); 4178 if (error != 0) 4179 break; 4180 if (pa->ifname[0]) 4181 kif = pf_kkif_create(M_WAITOK); 4182 PF_RULES_WLOCK(); 4183 if (pp->ticket != V_ticket_pabuf) { 4184 PF_RULES_WUNLOCK(); 4185 if (pa->ifname[0]) 4186 pf_kkif_free(kif); 4187 free(pa, M_PFRULE); 4188 error = EBUSY; 4189 break; 4190 } 4191 if (pa->ifname[0]) { 4192 pa->kif = pfi_kkif_attach(kif, pa->ifname); 4193 kif = NULL; 4194 pfi_kkif_ref(pa->kif); 4195 } else 4196 pa->kif = NULL; 4197 if (pa->addr.type == PF_ADDR_DYNIFTL && ((error = 4198 pfi_dynaddr_setup(&pa->addr, pp->af)) != 0)) { 4199 if (pa->ifname[0]) 4200 pfi_kkif_unref(pa->kif); 4201 PF_RULES_WUNLOCK(); 4202 free(pa, M_PFRULE); 4203 break; 4204 } 4205 TAILQ_INSERT_TAIL(&V_pf_pabuf, pa, entries); 4206 PF_RULES_WUNLOCK(); 4207 break; 4208 } 4209 4210 case DIOCGETADDRS: { 4211 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr; 4212 struct pf_kpool *pool; 4213 struct pf_kpooladdr *pa; 4214 4215 pp->anchor[sizeof(pp->anchor) - 1] = 0; 4216 pp->nr = 0; 4217 4218 PF_RULES_RLOCK(); 4219 pool = pf_get_kpool(pp->anchor, pp->ticket, pp->r_action, 4220 pp->r_num, 0, 1, 0); 4221 if (pool == NULL) { 4222 PF_RULES_RUNLOCK(); 4223 error = EBUSY; 4224 break; 4225 } 4226 TAILQ_FOREACH(pa, &pool->list, entries) 4227 pp->nr++; 4228 PF_RULES_RUNLOCK(); 4229 break; 4230 } 4231 4232 case DIOCGETADDR: { 4233 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr; 4234 struct pf_kpool *pool; 4235 struct pf_kpooladdr *pa; 4236 u_int32_t nr = 0; 4237 4238 pp->anchor[sizeof(pp->anchor) - 1] = 0; 4239 4240 PF_RULES_RLOCK(); 4241 pool = pf_get_kpool(pp->anchor, pp->ticket, pp->r_action, 4242 pp->r_num, 0, 1, 1); 4243 if (pool == NULL) { 4244 PF_RULES_RUNLOCK(); 4245 error = EBUSY; 4246 break; 4247 } 4248 pa = TAILQ_FIRST(&pool->list); 4249 while ((pa != NULL) && (nr < pp->nr)) { 4250 pa = TAILQ_NEXT(pa, entries); 4251 nr++; 4252 } 4253 if (pa == NULL) { 4254 PF_RULES_RUNLOCK(); 4255 error = EBUSY; 4256 break; 4257 } 4258 pf_kpooladdr_to_pooladdr(pa, &pp->addr); 4259 pf_addr_copyout(&pp->addr.addr); 4260 PF_RULES_RUNLOCK(); 4261 break; 4262 } 4263 4264 case DIOCCHANGEADDR: { 4265 struct pfioc_pooladdr *pca = (struct pfioc_pooladdr *)addr; 4266 struct pf_kpool *pool; 4267 struct pf_kpooladdr *oldpa = NULL, *newpa = NULL; 4268 struct pf_kruleset *ruleset; 4269 struct pfi_kkif *kif = NULL; 4270 4271 pca->anchor[sizeof(pca->anchor) - 1] = 0; 4272 4273 if (pca->action < PF_CHANGE_ADD_HEAD || 4274 pca->action > PF_CHANGE_REMOVE) { 4275 error = EINVAL; 4276 break; 4277 } 4278 if (pca->addr.addr.type != PF_ADDR_ADDRMASK && 4279 pca->addr.addr.type != PF_ADDR_DYNIFTL && 4280 pca->addr.addr.type != PF_ADDR_TABLE) { 4281 error = EINVAL; 4282 break; 4283 } 4284 if (pca->addr.addr.p.dyn != NULL) { 4285 error = EINVAL; 4286 break; 4287 } 4288 4289 if (pca->action != PF_CHANGE_REMOVE) { 4290 #ifndef INET 4291 if (pca->af == AF_INET) { 4292 error = EAFNOSUPPORT; 4293 break; 4294 } 4295 #endif /* INET */ 4296 #ifndef INET6 4297 if (pca->af == AF_INET6) { 4298 error = EAFNOSUPPORT; 4299 break; 4300 } 4301 #endif /* INET6 */ 4302 newpa = malloc(sizeof(*newpa), M_PFRULE, M_WAITOK); 4303 bcopy(&pca->addr, newpa, sizeof(struct pf_pooladdr)); 4304 if (newpa->ifname[0]) 4305 kif = pf_kkif_create(M_WAITOK); 4306 newpa->kif = NULL; 4307 } 4308 #define ERROUT(x) ERROUT_IOCTL(DIOCCHANGEADDR_error, x) 4309 PF_RULES_WLOCK(); 4310 ruleset = pf_find_kruleset(pca->anchor); 4311 if (ruleset == NULL) 4312 ERROUT(EBUSY); 4313 4314 pool = pf_get_kpool(pca->anchor, pca->ticket, pca->r_action, 4315 pca->r_num, pca->r_last, 1, 1); 4316 if (pool == NULL) 4317 ERROUT(EBUSY); 4318 4319 if (pca->action != PF_CHANGE_REMOVE) { 4320 if (newpa->ifname[0]) { 4321 newpa->kif = pfi_kkif_attach(kif, newpa->ifname); 4322 pfi_kkif_ref(newpa->kif); 4323 kif = NULL; 4324 } 4325 4326 switch (newpa->addr.type) { 4327 case PF_ADDR_DYNIFTL: 4328 error = pfi_dynaddr_setup(&newpa->addr, 4329 pca->af); 4330 break; 4331 case PF_ADDR_TABLE: 4332 newpa->addr.p.tbl = pfr_attach_table(ruleset, 4333 newpa->addr.v.tblname); 4334 if (newpa->addr.p.tbl == NULL) 4335 error = ENOMEM; 4336 break; 4337 } 4338 if (error) 4339 goto DIOCCHANGEADDR_error; 4340 } 4341 4342 switch (pca->action) { 4343 case PF_CHANGE_ADD_HEAD: 4344 oldpa = TAILQ_FIRST(&pool->list); 4345 break; 4346 case PF_CHANGE_ADD_TAIL: 4347 oldpa = TAILQ_LAST(&pool->list, pf_kpalist); 4348 break; 4349 default: 4350 oldpa = TAILQ_FIRST(&pool->list); 4351 for (int i = 0; oldpa && i < pca->nr; i++) 4352 oldpa = TAILQ_NEXT(oldpa, entries); 4353 4354 if (oldpa == NULL) 4355 ERROUT(EINVAL); 4356 } 4357 4358 if (pca->action == PF_CHANGE_REMOVE) { 4359 TAILQ_REMOVE(&pool->list, oldpa, entries); 4360 switch (oldpa->addr.type) { 4361 case PF_ADDR_DYNIFTL: 4362 pfi_dynaddr_remove(oldpa->addr.p.dyn); 4363 break; 4364 case PF_ADDR_TABLE: 4365 pfr_detach_table(oldpa->addr.p.tbl); 4366 break; 4367 } 4368 if (oldpa->kif) 4369 pfi_kkif_unref(oldpa->kif); 4370 free(oldpa, M_PFRULE); 4371 } else { 4372 if (oldpa == NULL) 4373 TAILQ_INSERT_TAIL(&pool->list, newpa, entries); 4374 else if (pca->action == PF_CHANGE_ADD_HEAD || 4375 pca->action == PF_CHANGE_ADD_BEFORE) 4376 TAILQ_INSERT_BEFORE(oldpa, newpa, entries); 4377 else 4378 TAILQ_INSERT_AFTER(&pool->list, oldpa, 4379 newpa, entries); 4380 } 4381 4382 pool->cur = TAILQ_FIRST(&pool->list); 4383 PF_ACPY(&pool->counter, &pool->cur->addr.v.a.addr, pca->af); 4384 PF_RULES_WUNLOCK(); 4385 break; 4386 4387 #undef ERROUT 4388 DIOCCHANGEADDR_error: 4389 if (newpa != NULL) { 4390 if (newpa->kif) 4391 pfi_kkif_unref(newpa->kif); 4392 free(newpa, M_PFRULE); 4393 } 4394 PF_RULES_WUNLOCK(); 4395 pf_kkif_free(kif); 4396 break; 4397 } 4398 4399 case DIOCGETRULESETS: { 4400 struct pfioc_ruleset *pr = (struct pfioc_ruleset *)addr; 4401 struct pf_kruleset *ruleset; 4402 struct pf_kanchor *anchor; 4403 4404 pr->path[sizeof(pr->path) - 1] = 0; 4405 4406 PF_RULES_RLOCK(); 4407 if ((ruleset = pf_find_kruleset(pr->path)) == NULL) { 4408 PF_RULES_RUNLOCK(); 4409 error = ENOENT; 4410 break; 4411 } 4412 pr->nr = 0; 4413 if (ruleset->anchor == NULL) { 4414 /* XXX kludge for pf_main_ruleset */ 4415 RB_FOREACH(anchor, pf_kanchor_global, &V_pf_anchors) 4416 if (anchor->parent == NULL) 4417 pr->nr++; 4418 } else { 4419 RB_FOREACH(anchor, pf_kanchor_node, 4420 &ruleset->anchor->children) 4421 pr->nr++; 4422 } 4423 PF_RULES_RUNLOCK(); 4424 break; 4425 } 4426 4427 case DIOCGETRULESET: { 4428 struct pfioc_ruleset *pr = (struct pfioc_ruleset *)addr; 4429 struct pf_kruleset *ruleset; 4430 struct pf_kanchor *anchor; 4431 u_int32_t nr = 0; 4432 4433 pr->path[sizeof(pr->path) - 1] = 0; 4434 4435 PF_RULES_RLOCK(); 4436 if ((ruleset = pf_find_kruleset(pr->path)) == NULL) { 4437 PF_RULES_RUNLOCK(); 4438 error = ENOENT; 4439 break; 4440 } 4441 pr->name[0] = 0; 4442 if (ruleset->anchor == NULL) { 4443 /* XXX kludge for pf_main_ruleset */ 4444 RB_FOREACH(anchor, pf_kanchor_global, &V_pf_anchors) 4445 if (anchor->parent == NULL && nr++ == pr->nr) { 4446 strlcpy(pr->name, anchor->name, 4447 sizeof(pr->name)); 4448 break; 4449 } 4450 } else { 4451 RB_FOREACH(anchor, pf_kanchor_node, 4452 &ruleset->anchor->children) 4453 if (nr++ == pr->nr) { 4454 strlcpy(pr->name, anchor->name, 4455 sizeof(pr->name)); 4456 break; 4457 } 4458 } 4459 if (!pr->name[0]) 4460 error = EBUSY; 4461 PF_RULES_RUNLOCK(); 4462 break; 4463 } 4464 4465 case DIOCRCLRTABLES: { 4466 struct pfioc_table *io = (struct pfioc_table *)addr; 4467 4468 if (io->pfrio_esize != 0) { 4469 error = ENODEV; 4470 break; 4471 } 4472 PF_RULES_WLOCK(); 4473 error = pfr_clr_tables(&io->pfrio_table, &io->pfrio_ndel, 4474 io->pfrio_flags | PFR_FLAG_USERIOCTL); 4475 PF_RULES_WUNLOCK(); 4476 break; 4477 } 4478 4479 case DIOCRADDTABLES: { 4480 struct pfioc_table *io = (struct pfioc_table *)addr; 4481 struct pfr_table *pfrts; 4482 size_t totlen; 4483 4484 if (io->pfrio_esize != sizeof(struct pfr_table)) { 4485 error = ENODEV; 4486 break; 4487 } 4488 4489 if (io->pfrio_size < 0 || io->pfrio_size > pf_ioctl_maxcount || 4490 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_table))) { 4491 error = ENOMEM; 4492 break; 4493 } 4494 4495 totlen = io->pfrio_size * sizeof(struct pfr_table); 4496 pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table), 4497 M_TEMP, M_WAITOK); 4498 error = copyin(io->pfrio_buffer, pfrts, totlen); 4499 if (error) { 4500 free(pfrts, M_TEMP); 4501 break; 4502 } 4503 PF_RULES_WLOCK(); 4504 error = pfr_add_tables(pfrts, io->pfrio_size, 4505 &io->pfrio_nadd, io->pfrio_flags | PFR_FLAG_USERIOCTL); 4506 PF_RULES_WUNLOCK(); 4507 free(pfrts, M_TEMP); 4508 break; 4509 } 4510 4511 case DIOCRDELTABLES: { 4512 struct pfioc_table *io = (struct pfioc_table *)addr; 4513 struct pfr_table *pfrts; 4514 size_t totlen; 4515 4516 if (io->pfrio_esize != sizeof(struct pfr_table)) { 4517 error = ENODEV; 4518 break; 4519 } 4520 4521 if (io->pfrio_size < 0 || io->pfrio_size > pf_ioctl_maxcount || 4522 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_table))) { 4523 error = ENOMEM; 4524 break; 4525 } 4526 4527 totlen = io->pfrio_size * sizeof(struct pfr_table); 4528 pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table), 4529 M_TEMP, M_WAITOK); 4530 error = copyin(io->pfrio_buffer, pfrts, totlen); 4531 if (error) { 4532 free(pfrts, M_TEMP); 4533 break; 4534 } 4535 PF_RULES_WLOCK(); 4536 error = pfr_del_tables(pfrts, io->pfrio_size, 4537 &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL); 4538 PF_RULES_WUNLOCK(); 4539 free(pfrts, M_TEMP); 4540 break; 4541 } 4542 4543 case DIOCRGETTABLES: { 4544 struct pfioc_table *io = (struct pfioc_table *)addr; 4545 struct pfr_table *pfrts; 4546 size_t totlen; 4547 int n; 4548 4549 if (io->pfrio_esize != sizeof(struct pfr_table)) { 4550 error = ENODEV; 4551 break; 4552 } 4553 PF_RULES_RLOCK(); 4554 n = pfr_table_count(&io->pfrio_table, io->pfrio_flags); 4555 if (n < 0) { 4556 PF_RULES_RUNLOCK(); 4557 error = EINVAL; 4558 break; 4559 } 4560 io->pfrio_size = min(io->pfrio_size, n); 4561 4562 totlen = io->pfrio_size * sizeof(struct pfr_table); 4563 4564 pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table), 4565 M_TEMP, M_NOWAIT | M_ZERO); 4566 if (pfrts == NULL) { 4567 error = ENOMEM; 4568 PF_RULES_RUNLOCK(); 4569 break; 4570 } 4571 error = pfr_get_tables(&io->pfrio_table, pfrts, 4572 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); 4573 PF_RULES_RUNLOCK(); 4574 if (error == 0) 4575 error = copyout(pfrts, io->pfrio_buffer, totlen); 4576 free(pfrts, M_TEMP); 4577 break; 4578 } 4579 4580 case DIOCRGETTSTATS: { 4581 struct pfioc_table *io = (struct pfioc_table *)addr; 4582 struct pfr_tstats *pfrtstats; 4583 size_t totlen; 4584 int n; 4585 4586 if (io->pfrio_esize != sizeof(struct pfr_tstats)) { 4587 error = ENODEV; 4588 break; 4589 } 4590 PF_TABLE_STATS_LOCK(); 4591 PF_RULES_RLOCK(); 4592 n = pfr_table_count(&io->pfrio_table, io->pfrio_flags); 4593 if (n < 0) { 4594 PF_RULES_RUNLOCK(); 4595 PF_TABLE_STATS_UNLOCK(); 4596 error = EINVAL; 4597 break; 4598 } 4599 io->pfrio_size = min(io->pfrio_size, n); 4600 4601 totlen = io->pfrio_size * sizeof(struct pfr_tstats); 4602 pfrtstats = mallocarray(io->pfrio_size, 4603 sizeof(struct pfr_tstats), M_TEMP, M_NOWAIT | M_ZERO); 4604 if (pfrtstats == NULL) { 4605 error = ENOMEM; 4606 PF_RULES_RUNLOCK(); 4607 PF_TABLE_STATS_UNLOCK(); 4608 break; 4609 } 4610 error = pfr_get_tstats(&io->pfrio_table, pfrtstats, 4611 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); 4612 PF_RULES_RUNLOCK(); 4613 PF_TABLE_STATS_UNLOCK(); 4614 if (error == 0) 4615 error = copyout(pfrtstats, io->pfrio_buffer, totlen); 4616 free(pfrtstats, M_TEMP); 4617 break; 4618 } 4619 4620 case DIOCRCLRTSTATS: { 4621 struct pfioc_table *io = (struct pfioc_table *)addr; 4622 struct pfr_table *pfrts; 4623 size_t totlen; 4624 4625 if (io->pfrio_esize != sizeof(struct pfr_table)) { 4626 error = ENODEV; 4627 break; 4628 } 4629 4630 if (io->pfrio_size < 0 || io->pfrio_size > pf_ioctl_maxcount || 4631 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_table))) { 4632 /* We used to count tables and use the minimum required 4633 * size, so we didn't fail on overly large requests. 4634 * Keep doing so. */ 4635 io->pfrio_size = pf_ioctl_maxcount; 4636 break; 4637 } 4638 4639 totlen = io->pfrio_size * sizeof(struct pfr_table); 4640 pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table), 4641 M_TEMP, M_WAITOK); 4642 error = copyin(io->pfrio_buffer, pfrts, totlen); 4643 if (error) { 4644 free(pfrts, M_TEMP); 4645 break; 4646 } 4647 4648 PF_TABLE_STATS_LOCK(); 4649 PF_RULES_RLOCK(); 4650 error = pfr_clr_tstats(pfrts, io->pfrio_size, 4651 &io->pfrio_nzero, io->pfrio_flags | PFR_FLAG_USERIOCTL); 4652 PF_RULES_RUNLOCK(); 4653 PF_TABLE_STATS_UNLOCK(); 4654 free(pfrts, M_TEMP); 4655 break; 4656 } 4657 4658 case DIOCRSETTFLAGS: { 4659 struct pfioc_table *io = (struct pfioc_table *)addr; 4660 struct pfr_table *pfrts; 4661 size_t totlen; 4662 int n; 4663 4664 if (io->pfrio_esize != sizeof(struct pfr_table)) { 4665 error = ENODEV; 4666 break; 4667 } 4668 4669 PF_RULES_RLOCK(); 4670 n = pfr_table_count(&io->pfrio_table, io->pfrio_flags); 4671 if (n < 0) { 4672 PF_RULES_RUNLOCK(); 4673 error = EINVAL; 4674 break; 4675 } 4676 4677 io->pfrio_size = min(io->pfrio_size, n); 4678 PF_RULES_RUNLOCK(); 4679 4680 totlen = io->pfrio_size * sizeof(struct pfr_table); 4681 pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table), 4682 M_TEMP, M_WAITOK); 4683 error = copyin(io->pfrio_buffer, pfrts, totlen); 4684 if (error) { 4685 free(pfrts, M_TEMP); 4686 break; 4687 } 4688 PF_RULES_WLOCK(); 4689 error = pfr_set_tflags(pfrts, io->pfrio_size, 4690 io->pfrio_setflag, io->pfrio_clrflag, &io->pfrio_nchange, 4691 &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL); 4692 PF_RULES_WUNLOCK(); 4693 free(pfrts, M_TEMP); 4694 break; 4695 } 4696 4697 case DIOCRCLRADDRS: { 4698 struct pfioc_table *io = (struct pfioc_table *)addr; 4699 4700 if (io->pfrio_esize != 0) { 4701 error = ENODEV; 4702 break; 4703 } 4704 PF_RULES_WLOCK(); 4705 error = pfr_clr_addrs(&io->pfrio_table, &io->pfrio_ndel, 4706 io->pfrio_flags | PFR_FLAG_USERIOCTL); 4707 PF_RULES_WUNLOCK(); 4708 break; 4709 } 4710 4711 case DIOCRADDADDRS: { 4712 struct pfioc_table *io = (struct pfioc_table *)addr; 4713 struct pfr_addr *pfras; 4714 size_t totlen; 4715 4716 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 4717 error = ENODEV; 4718 break; 4719 } 4720 if (io->pfrio_size < 0 || 4721 io->pfrio_size > pf_ioctl_maxcount || 4722 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) { 4723 error = EINVAL; 4724 break; 4725 } 4726 totlen = io->pfrio_size * sizeof(struct pfr_addr); 4727 pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr), 4728 M_TEMP, M_WAITOK); 4729 error = copyin(io->pfrio_buffer, pfras, totlen); 4730 if (error) { 4731 free(pfras, M_TEMP); 4732 break; 4733 } 4734 PF_RULES_WLOCK(); 4735 error = pfr_add_addrs(&io->pfrio_table, pfras, 4736 io->pfrio_size, &io->pfrio_nadd, io->pfrio_flags | 4737 PFR_FLAG_USERIOCTL); 4738 PF_RULES_WUNLOCK(); 4739 if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK) 4740 error = copyout(pfras, io->pfrio_buffer, totlen); 4741 free(pfras, M_TEMP); 4742 break; 4743 } 4744 4745 case DIOCRDELADDRS: { 4746 struct pfioc_table *io = (struct pfioc_table *)addr; 4747 struct pfr_addr *pfras; 4748 size_t totlen; 4749 4750 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 4751 error = ENODEV; 4752 break; 4753 } 4754 if (io->pfrio_size < 0 || 4755 io->pfrio_size > pf_ioctl_maxcount || 4756 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) { 4757 error = EINVAL; 4758 break; 4759 } 4760 totlen = io->pfrio_size * sizeof(struct pfr_addr); 4761 pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr), 4762 M_TEMP, M_WAITOK); 4763 error = copyin(io->pfrio_buffer, pfras, totlen); 4764 if (error) { 4765 free(pfras, M_TEMP); 4766 break; 4767 } 4768 PF_RULES_WLOCK(); 4769 error = pfr_del_addrs(&io->pfrio_table, pfras, 4770 io->pfrio_size, &io->pfrio_ndel, io->pfrio_flags | 4771 PFR_FLAG_USERIOCTL); 4772 PF_RULES_WUNLOCK(); 4773 if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK) 4774 error = copyout(pfras, io->pfrio_buffer, totlen); 4775 free(pfras, M_TEMP); 4776 break; 4777 } 4778 4779 case DIOCRSETADDRS: { 4780 struct pfioc_table *io = (struct pfioc_table *)addr; 4781 struct pfr_addr *pfras; 4782 size_t totlen, count; 4783 4784 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 4785 error = ENODEV; 4786 break; 4787 } 4788 if (io->pfrio_size < 0 || io->pfrio_size2 < 0) { 4789 error = EINVAL; 4790 break; 4791 } 4792 count = max(io->pfrio_size, io->pfrio_size2); 4793 if (count > pf_ioctl_maxcount || 4794 WOULD_OVERFLOW(count, sizeof(struct pfr_addr))) { 4795 error = EINVAL; 4796 break; 4797 } 4798 totlen = count * sizeof(struct pfr_addr); 4799 pfras = mallocarray(count, sizeof(struct pfr_addr), M_TEMP, 4800 M_WAITOK); 4801 error = copyin(io->pfrio_buffer, pfras, totlen); 4802 if (error) { 4803 free(pfras, M_TEMP); 4804 break; 4805 } 4806 PF_RULES_WLOCK(); 4807 error = pfr_set_addrs(&io->pfrio_table, pfras, 4808 io->pfrio_size, &io->pfrio_size2, &io->pfrio_nadd, 4809 &io->pfrio_ndel, &io->pfrio_nchange, io->pfrio_flags | 4810 PFR_FLAG_USERIOCTL, 0); 4811 PF_RULES_WUNLOCK(); 4812 if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK) 4813 error = copyout(pfras, io->pfrio_buffer, totlen); 4814 free(pfras, M_TEMP); 4815 break; 4816 } 4817 4818 case DIOCRGETADDRS: { 4819 struct pfioc_table *io = (struct pfioc_table *)addr; 4820 struct pfr_addr *pfras; 4821 size_t totlen; 4822 4823 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 4824 error = ENODEV; 4825 break; 4826 } 4827 if (io->pfrio_size < 0 || 4828 io->pfrio_size > pf_ioctl_maxcount || 4829 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) { 4830 error = EINVAL; 4831 break; 4832 } 4833 totlen = io->pfrio_size * sizeof(struct pfr_addr); 4834 pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr), 4835 M_TEMP, M_WAITOK | M_ZERO); 4836 PF_RULES_RLOCK(); 4837 error = pfr_get_addrs(&io->pfrio_table, pfras, 4838 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); 4839 PF_RULES_RUNLOCK(); 4840 if (error == 0) 4841 error = copyout(pfras, io->pfrio_buffer, totlen); 4842 free(pfras, M_TEMP); 4843 break; 4844 } 4845 4846 case DIOCRGETASTATS: { 4847 struct pfioc_table *io = (struct pfioc_table *)addr; 4848 struct pfr_astats *pfrastats; 4849 size_t totlen; 4850 4851 if (io->pfrio_esize != sizeof(struct pfr_astats)) { 4852 error = ENODEV; 4853 break; 4854 } 4855 if (io->pfrio_size < 0 || 4856 io->pfrio_size > pf_ioctl_maxcount || 4857 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_astats))) { 4858 error = EINVAL; 4859 break; 4860 } 4861 totlen = io->pfrio_size * sizeof(struct pfr_astats); 4862 pfrastats = mallocarray(io->pfrio_size, 4863 sizeof(struct pfr_astats), M_TEMP, M_WAITOK | M_ZERO); 4864 PF_RULES_RLOCK(); 4865 error = pfr_get_astats(&io->pfrio_table, pfrastats, 4866 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); 4867 PF_RULES_RUNLOCK(); 4868 if (error == 0) 4869 error = copyout(pfrastats, io->pfrio_buffer, totlen); 4870 free(pfrastats, M_TEMP); 4871 break; 4872 } 4873 4874 case DIOCRCLRASTATS: { 4875 struct pfioc_table *io = (struct pfioc_table *)addr; 4876 struct pfr_addr *pfras; 4877 size_t totlen; 4878 4879 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 4880 error = ENODEV; 4881 break; 4882 } 4883 if (io->pfrio_size < 0 || 4884 io->pfrio_size > pf_ioctl_maxcount || 4885 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) { 4886 error = EINVAL; 4887 break; 4888 } 4889 totlen = io->pfrio_size * sizeof(struct pfr_addr); 4890 pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr), 4891 M_TEMP, M_WAITOK); 4892 error = copyin(io->pfrio_buffer, pfras, totlen); 4893 if (error) { 4894 free(pfras, M_TEMP); 4895 break; 4896 } 4897 PF_RULES_WLOCK(); 4898 error = pfr_clr_astats(&io->pfrio_table, pfras, 4899 io->pfrio_size, &io->pfrio_nzero, io->pfrio_flags | 4900 PFR_FLAG_USERIOCTL); 4901 PF_RULES_WUNLOCK(); 4902 if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK) 4903 error = copyout(pfras, io->pfrio_buffer, totlen); 4904 free(pfras, M_TEMP); 4905 break; 4906 } 4907 4908 case DIOCRTSTADDRS: { 4909 struct pfioc_table *io = (struct pfioc_table *)addr; 4910 struct pfr_addr *pfras; 4911 size_t totlen; 4912 4913 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 4914 error = ENODEV; 4915 break; 4916 } 4917 if (io->pfrio_size < 0 || 4918 io->pfrio_size > pf_ioctl_maxcount || 4919 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) { 4920 error = EINVAL; 4921 break; 4922 } 4923 totlen = io->pfrio_size * sizeof(struct pfr_addr); 4924 pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr), 4925 M_TEMP, M_WAITOK); 4926 error = copyin(io->pfrio_buffer, pfras, totlen); 4927 if (error) { 4928 free(pfras, M_TEMP); 4929 break; 4930 } 4931 PF_RULES_RLOCK(); 4932 error = pfr_tst_addrs(&io->pfrio_table, pfras, 4933 io->pfrio_size, &io->pfrio_nmatch, io->pfrio_flags | 4934 PFR_FLAG_USERIOCTL); 4935 PF_RULES_RUNLOCK(); 4936 if (error == 0) 4937 error = copyout(pfras, io->pfrio_buffer, totlen); 4938 free(pfras, M_TEMP); 4939 break; 4940 } 4941 4942 case DIOCRINADEFINE: { 4943 struct pfioc_table *io = (struct pfioc_table *)addr; 4944 struct pfr_addr *pfras; 4945 size_t totlen; 4946 4947 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 4948 error = ENODEV; 4949 break; 4950 } 4951 if (io->pfrio_size < 0 || 4952 io->pfrio_size > pf_ioctl_maxcount || 4953 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) { 4954 error = EINVAL; 4955 break; 4956 } 4957 totlen = io->pfrio_size * sizeof(struct pfr_addr); 4958 pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr), 4959 M_TEMP, M_WAITOK); 4960 error = copyin(io->pfrio_buffer, pfras, totlen); 4961 if (error) { 4962 free(pfras, M_TEMP); 4963 break; 4964 } 4965 PF_RULES_WLOCK(); 4966 error = pfr_ina_define(&io->pfrio_table, pfras, 4967 io->pfrio_size, &io->pfrio_nadd, &io->pfrio_naddr, 4968 io->pfrio_ticket, io->pfrio_flags | PFR_FLAG_USERIOCTL); 4969 PF_RULES_WUNLOCK(); 4970 free(pfras, M_TEMP); 4971 break; 4972 } 4973 4974 case DIOCOSFPADD: { 4975 struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr; 4976 PF_RULES_WLOCK(); 4977 error = pf_osfp_add(io); 4978 PF_RULES_WUNLOCK(); 4979 break; 4980 } 4981 4982 case DIOCOSFPGET: { 4983 struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr; 4984 PF_RULES_RLOCK(); 4985 error = pf_osfp_get(io); 4986 PF_RULES_RUNLOCK(); 4987 break; 4988 } 4989 4990 case DIOCXBEGIN: { 4991 struct pfioc_trans *io = (struct pfioc_trans *)addr; 4992 struct pfioc_trans_e *ioes, *ioe; 4993 size_t totlen; 4994 int i; 4995 4996 if (io->esize != sizeof(*ioe)) { 4997 error = ENODEV; 4998 break; 4999 } 5000 if (io->size < 0 || 5001 io->size > pf_ioctl_maxcount || 5002 WOULD_OVERFLOW(io->size, sizeof(struct pfioc_trans_e))) { 5003 error = EINVAL; 5004 break; 5005 } 5006 totlen = sizeof(struct pfioc_trans_e) * io->size; 5007 ioes = mallocarray(io->size, sizeof(struct pfioc_trans_e), 5008 M_TEMP, M_WAITOK); 5009 error = copyin(io->array, ioes, totlen); 5010 if (error) { 5011 free(ioes, M_TEMP); 5012 break; 5013 } 5014 /* Ensure there's no more ethernet rules to clean up. */ 5015 NET_EPOCH_DRAIN_CALLBACKS(); 5016 PF_RULES_WLOCK(); 5017 for (i = 0, ioe = ioes; i < io->size; i++, ioe++) { 5018 ioe->anchor[sizeof(ioe->anchor) - 1] = '\0'; 5019 switch (ioe->rs_num) { 5020 case PF_RULESET_ETH: 5021 if ((error = pf_begin_eth(&ioe->ticket, ioe->anchor))) { 5022 PF_RULES_WUNLOCK(); 5023 free(ioes, M_TEMP); 5024 goto fail; 5025 } 5026 break; 5027 #ifdef ALTQ 5028 case PF_RULESET_ALTQ: 5029 if (ioe->anchor[0]) { 5030 PF_RULES_WUNLOCK(); 5031 free(ioes, M_TEMP); 5032 error = EINVAL; 5033 goto fail; 5034 } 5035 if ((error = pf_begin_altq(&ioe->ticket))) { 5036 PF_RULES_WUNLOCK(); 5037 free(ioes, M_TEMP); 5038 goto fail; 5039 } 5040 break; 5041 #endif /* ALTQ */ 5042 case PF_RULESET_TABLE: 5043 { 5044 struct pfr_table table; 5045 5046 bzero(&table, sizeof(table)); 5047 strlcpy(table.pfrt_anchor, ioe->anchor, 5048 sizeof(table.pfrt_anchor)); 5049 if ((error = pfr_ina_begin(&table, 5050 &ioe->ticket, NULL, 0))) { 5051 PF_RULES_WUNLOCK(); 5052 free(ioes, M_TEMP); 5053 goto fail; 5054 } 5055 break; 5056 } 5057 default: 5058 if ((error = pf_begin_rules(&ioe->ticket, 5059 ioe->rs_num, ioe->anchor))) { 5060 PF_RULES_WUNLOCK(); 5061 free(ioes, M_TEMP); 5062 goto fail; 5063 } 5064 break; 5065 } 5066 } 5067 PF_RULES_WUNLOCK(); 5068 error = copyout(ioes, io->array, totlen); 5069 free(ioes, M_TEMP); 5070 break; 5071 } 5072 5073 case DIOCXROLLBACK: { 5074 struct pfioc_trans *io = (struct pfioc_trans *)addr; 5075 struct pfioc_trans_e *ioe, *ioes; 5076 size_t totlen; 5077 int i; 5078 5079 if (io->esize != sizeof(*ioe)) { 5080 error = ENODEV; 5081 break; 5082 } 5083 if (io->size < 0 || 5084 io->size > pf_ioctl_maxcount || 5085 WOULD_OVERFLOW(io->size, sizeof(struct pfioc_trans_e))) { 5086 error = EINVAL; 5087 break; 5088 } 5089 totlen = sizeof(struct pfioc_trans_e) * io->size; 5090 ioes = mallocarray(io->size, sizeof(struct pfioc_trans_e), 5091 M_TEMP, M_WAITOK); 5092 error = copyin(io->array, ioes, totlen); 5093 if (error) { 5094 free(ioes, M_TEMP); 5095 break; 5096 } 5097 PF_RULES_WLOCK(); 5098 for (i = 0, ioe = ioes; i < io->size; i++, ioe++) { 5099 ioe->anchor[sizeof(ioe->anchor) - 1] = '\0'; 5100 switch (ioe->rs_num) { 5101 case PF_RULESET_ETH: 5102 if ((error = pf_rollback_eth(ioe->ticket, 5103 ioe->anchor))) { 5104 PF_RULES_WUNLOCK(); 5105 free(ioes, M_TEMP); 5106 goto fail; /* really bad */ 5107 } 5108 break; 5109 #ifdef ALTQ 5110 case PF_RULESET_ALTQ: 5111 if (ioe->anchor[0]) { 5112 PF_RULES_WUNLOCK(); 5113 free(ioes, M_TEMP); 5114 error = EINVAL; 5115 goto fail; 5116 } 5117 if ((error = pf_rollback_altq(ioe->ticket))) { 5118 PF_RULES_WUNLOCK(); 5119 free(ioes, M_TEMP); 5120 goto fail; /* really bad */ 5121 } 5122 break; 5123 #endif /* ALTQ */ 5124 case PF_RULESET_TABLE: 5125 { 5126 struct pfr_table table; 5127 5128 bzero(&table, sizeof(table)); 5129 strlcpy(table.pfrt_anchor, ioe->anchor, 5130 sizeof(table.pfrt_anchor)); 5131 if ((error = pfr_ina_rollback(&table, 5132 ioe->ticket, NULL, 0))) { 5133 PF_RULES_WUNLOCK(); 5134 free(ioes, M_TEMP); 5135 goto fail; /* really bad */ 5136 } 5137 break; 5138 } 5139 default: 5140 if ((error = pf_rollback_rules(ioe->ticket, 5141 ioe->rs_num, ioe->anchor))) { 5142 PF_RULES_WUNLOCK(); 5143 free(ioes, M_TEMP); 5144 goto fail; /* really bad */ 5145 } 5146 break; 5147 } 5148 } 5149 PF_RULES_WUNLOCK(); 5150 free(ioes, M_TEMP); 5151 break; 5152 } 5153 5154 case DIOCXCOMMIT: { 5155 struct pfioc_trans *io = (struct pfioc_trans *)addr; 5156 struct pfioc_trans_e *ioe, *ioes; 5157 struct pf_kruleset *rs; 5158 struct pf_keth_ruleset *ers; 5159 size_t totlen; 5160 int i; 5161 5162 if (io->esize != sizeof(*ioe)) { 5163 error = ENODEV; 5164 break; 5165 } 5166 5167 if (io->size < 0 || 5168 io->size > pf_ioctl_maxcount || 5169 WOULD_OVERFLOW(io->size, sizeof(struct pfioc_trans_e))) { 5170 error = EINVAL; 5171 break; 5172 } 5173 5174 totlen = sizeof(struct pfioc_trans_e) * io->size; 5175 ioes = mallocarray(io->size, sizeof(struct pfioc_trans_e), 5176 M_TEMP, M_WAITOK); 5177 error = copyin(io->array, ioes, totlen); 5178 if (error) { 5179 free(ioes, M_TEMP); 5180 break; 5181 } 5182 PF_RULES_WLOCK(); 5183 /* First makes sure everything will succeed. */ 5184 for (i = 0, ioe = ioes; i < io->size; i++, ioe++) { 5185 ioe->anchor[sizeof(ioe->anchor) - 1] = 0; 5186 switch (ioe->rs_num) { 5187 case PF_RULESET_ETH: 5188 ers = pf_find_keth_ruleset(ioe->anchor); 5189 if (ers == NULL || ioe->ticket == 0 || 5190 ioe->ticket != ers->inactive.ticket) { 5191 PF_RULES_WUNLOCK(); 5192 free(ioes, M_TEMP); 5193 error = EINVAL; 5194 goto fail; 5195 } 5196 break; 5197 #ifdef ALTQ 5198 case PF_RULESET_ALTQ: 5199 if (ioe->anchor[0]) { 5200 PF_RULES_WUNLOCK(); 5201 free(ioes, M_TEMP); 5202 error = EINVAL; 5203 goto fail; 5204 } 5205 if (!V_altqs_inactive_open || ioe->ticket != 5206 V_ticket_altqs_inactive) { 5207 PF_RULES_WUNLOCK(); 5208 free(ioes, M_TEMP); 5209 error = EBUSY; 5210 goto fail; 5211 } 5212 break; 5213 #endif /* ALTQ */ 5214 case PF_RULESET_TABLE: 5215 rs = pf_find_kruleset(ioe->anchor); 5216 if (rs == NULL || !rs->topen || ioe->ticket != 5217 rs->tticket) { 5218 PF_RULES_WUNLOCK(); 5219 free(ioes, M_TEMP); 5220 error = EBUSY; 5221 goto fail; 5222 } 5223 break; 5224 default: 5225 if (ioe->rs_num < 0 || ioe->rs_num >= 5226 PF_RULESET_MAX) { 5227 PF_RULES_WUNLOCK(); 5228 free(ioes, M_TEMP); 5229 error = EINVAL; 5230 goto fail; 5231 } 5232 rs = pf_find_kruleset(ioe->anchor); 5233 if (rs == NULL || 5234 !rs->rules[ioe->rs_num].inactive.open || 5235 rs->rules[ioe->rs_num].inactive.ticket != 5236 ioe->ticket) { 5237 PF_RULES_WUNLOCK(); 5238 free(ioes, M_TEMP); 5239 error = EBUSY; 5240 goto fail; 5241 } 5242 break; 5243 } 5244 } 5245 /* Now do the commit - no errors should happen here. */ 5246 for (i = 0, ioe = ioes; i < io->size; i++, ioe++) { 5247 switch (ioe->rs_num) { 5248 case PF_RULESET_ETH: 5249 if ((error = pf_commit_eth(ioe->ticket, ioe->anchor))) { 5250 PF_RULES_WUNLOCK(); 5251 free(ioes, M_TEMP); 5252 goto fail; /* really bad */ 5253 } 5254 break; 5255 #ifdef ALTQ 5256 case PF_RULESET_ALTQ: 5257 if ((error = pf_commit_altq(ioe->ticket))) { 5258 PF_RULES_WUNLOCK(); 5259 free(ioes, M_TEMP); 5260 goto fail; /* really bad */ 5261 } 5262 break; 5263 #endif /* ALTQ */ 5264 case PF_RULESET_TABLE: 5265 { 5266 struct pfr_table table; 5267 5268 bzero(&table, sizeof(table)); 5269 (void)strlcpy(table.pfrt_anchor, ioe->anchor, 5270 sizeof(table.pfrt_anchor)); 5271 if ((error = pfr_ina_commit(&table, 5272 ioe->ticket, NULL, NULL, 0))) { 5273 PF_RULES_WUNLOCK(); 5274 free(ioes, M_TEMP); 5275 goto fail; /* really bad */ 5276 } 5277 break; 5278 } 5279 default: 5280 if ((error = pf_commit_rules(ioe->ticket, 5281 ioe->rs_num, ioe->anchor))) { 5282 PF_RULES_WUNLOCK(); 5283 free(ioes, M_TEMP); 5284 goto fail; /* really bad */ 5285 } 5286 break; 5287 } 5288 } 5289 PF_RULES_WUNLOCK(); 5290 5291 /* Only hook into EtherNet taffic if we've got rules for it. */ 5292 if (! TAILQ_EMPTY(V_pf_keth->active.rules)) 5293 hook_pf_eth(); 5294 else 5295 dehook_pf_eth(); 5296 5297 free(ioes, M_TEMP); 5298 break; 5299 } 5300 5301 case DIOCGETSRCNODES: { 5302 struct pfioc_src_nodes *psn = (struct pfioc_src_nodes *)addr; 5303 struct pf_srchash *sh; 5304 struct pf_ksrc_node *n; 5305 struct pf_src_node *p, *pstore; 5306 uint32_t i, nr = 0; 5307 5308 for (i = 0, sh = V_pf_srchash; i <= pf_srchashmask; 5309 i++, sh++) { 5310 PF_HASHROW_LOCK(sh); 5311 LIST_FOREACH(n, &sh->nodes, entry) 5312 nr++; 5313 PF_HASHROW_UNLOCK(sh); 5314 } 5315 5316 psn->psn_len = min(psn->psn_len, 5317 sizeof(struct pf_src_node) * nr); 5318 5319 if (psn->psn_len == 0) { 5320 psn->psn_len = sizeof(struct pf_src_node) * nr; 5321 break; 5322 } 5323 5324 nr = 0; 5325 5326 p = pstore = malloc(psn->psn_len, M_TEMP, M_WAITOK | M_ZERO); 5327 for (i = 0, sh = V_pf_srchash; i <= pf_srchashmask; 5328 i++, sh++) { 5329 PF_HASHROW_LOCK(sh); 5330 LIST_FOREACH(n, &sh->nodes, entry) { 5331 5332 if ((nr + 1) * sizeof(*p) > (unsigned)psn->psn_len) 5333 break; 5334 5335 pf_src_node_copy(n, p); 5336 5337 p++; 5338 nr++; 5339 } 5340 PF_HASHROW_UNLOCK(sh); 5341 } 5342 error = copyout(pstore, psn->psn_src_nodes, 5343 sizeof(struct pf_src_node) * nr); 5344 if (error) { 5345 free(pstore, M_TEMP); 5346 break; 5347 } 5348 psn->psn_len = sizeof(struct pf_src_node) * nr; 5349 free(pstore, M_TEMP); 5350 break; 5351 } 5352 5353 case DIOCCLRSRCNODES: { 5354 pf_clear_srcnodes(NULL); 5355 pf_purge_expired_src_nodes(); 5356 break; 5357 } 5358 5359 case DIOCKILLSRCNODES: 5360 pf_kill_srcnodes((struct pfioc_src_node_kill *)addr); 5361 break; 5362 5363 #ifdef COMPAT_FREEBSD13 5364 case DIOCKEEPCOUNTERS_FREEBSD13: 5365 #endif 5366 case DIOCKEEPCOUNTERS: 5367 error = pf_keepcounters((struct pfioc_nv *)addr); 5368 break; 5369 5370 case DIOCGETSYNCOOKIES: 5371 error = pf_get_syncookies((struct pfioc_nv *)addr); 5372 break; 5373 5374 case DIOCSETSYNCOOKIES: 5375 error = pf_set_syncookies((struct pfioc_nv *)addr); 5376 break; 5377 5378 case DIOCSETHOSTID: { 5379 u_int32_t *hostid = (u_int32_t *)addr; 5380 5381 PF_RULES_WLOCK(); 5382 if (*hostid == 0) 5383 V_pf_status.hostid = arc4random(); 5384 else 5385 V_pf_status.hostid = *hostid; 5386 PF_RULES_WUNLOCK(); 5387 break; 5388 } 5389 5390 case DIOCOSFPFLUSH: 5391 PF_RULES_WLOCK(); 5392 pf_osfp_flush(); 5393 PF_RULES_WUNLOCK(); 5394 break; 5395 5396 case DIOCIGETIFACES: { 5397 struct pfioc_iface *io = (struct pfioc_iface *)addr; 5398 struct pfi_kif *ifstore; 5399 size_t bufsiz; 5400 5401 if (io->pfiio_esize != sizeof(struct pfi_kif)) { 5402 error = ENODEV; 5403 break; 5404 } 5405 5406 if (io->pfiio_size < 0 || 5407 io->pfiio_size > pf_ioctl_maxcount || 5408 WOULD_OVERFLOW(io->pfiio_size, sizeof(struct pfi_kif))) { 5409 error = EINVAL; 5410 break; 5411 } 5412 5413 io->pfiio_name[sizeof(io->pfiio_name) - 1] = '\0'; 5414 5415 bufsiz = io->pfiio_size * sizeof(struct pfi_kif); 5416 ifstore = mallocarray(io->pfiio_size, sizeof(struct pfi_kif), 5417 M_TEMP, M_WAITOK | M_ZERO); 5418 5419 PF_RULES_RLOCK(); 5420 pfi_get_ifaces(io->pfiio_name, ifstore, &io->pfiio_size); 5421 PF_RULES_RUNLOCK(); 5422 error = copyout(ifstore, io->pfiio_buffer, bufsiz); 5423 free(ifstore, M_TEMP); 5424 break; 5425 } 5426 5427 case DIOCSETIFFLAG: { 5428 struct pfioc_iface *io = (struct pfioc_iface *)addr; 5429 5430 io->pfiio_name[sizeof(io->pfiio_name) - 1] = '\0'; 5431 5432 PF_RULES_WLOCK(); 5433 error = pfi_set_flags(io->pfiio_name, io->pfiio_flags); 5434 PF_RULES_WUNLOCK(); 5435 break; 5436 } 5437 5438 case DIOCCLRIFFLAG: { 5439 struct pfioc_iface *io = (struct pfioc_iface *)addr; 5440 5441 io->pfiio_name[sizeof(io->pfiio_name) - 1] = '\0'; 5442 5443 PF_RULES_WLOCK(); 5444 error = pfi_clear_flags(io->pfiio_name, io->pfiio_flags); 5445 PF_RULES_WUNLOCK(); 5446 break; 5447 } 5448 5449 case DIOCSETREASS: { 5450 u_int32_t *reass = (u_int32_t *)addr; 5451 5452 V_pf_status.reass = *reass & (PF_REASS_ENABLED|PF_REASS_NODF); 5453 /* Removal of DF flag without reassembly enabled is not a 5454 * valid combination. Disable reassembly in such case. */ 5455 if (!(V_pf_status.reass & PF_REASS_ENABLED)) 5456 V_pf_status.reass = 0; 5457 break; 5458 } 5459 5460 default: 5461 error = ENODEV; 5462 break; 5463 } 5464 fail: 5465 CURVNET_RESTORE(); 5466 5467 #undef ERROUT_IOCTL 5468 5469 return (error); 5470 } 5471 5472 void 5473 pfsync_state_export(union pfsync_state_union *sp, struct pf_kstate *st, int msg_version) 5474 { 5475 bzero(sp, sizeof(union pfsync_state_union)); 5476 5477 /* copy from state key */ 5478 sp->pfs_1301.key[PF_SK_WIRE].addr[0] = st->key[PF_SK_WIRE]->addr[0]; 5479 sp->pfs_1301.key[PF_SK_WIRE].addr[1] = st->key[PF_SK_WIRE]->addr[1]; 5480 sp->pfs_1301.key[PF_SK_WIRE].port[0] = st->key[PF_SK_WIRE]->port[0]; 5481 sp->pfs_1301.key[PF_SK_WIRE].port[1] = st->key[PF_SK_WIRE]->port[1]; 5482 sp->pfs_1301.key[PF_SK_STACK].addr[0] = st->key[PF_SK_STACK]->addr[0]; 5483 sp->pfs_1301.key[PF_SK_STACK].addr[1] = st->key[PF_SK_STACK]->addr[1]; 5484 sp->pfs_1301.key[PF_SK_STACK].port[0] = st->key[PF_SK_STACK]->port[0]; 5485 sp->pfs_1301.key[PF_SK_STACK].port[1] = st->key[PF_SK_STACK]->port[1]; 5486 sp->pfs_1301.proto = st->key[PF_SK_WIRE]->proto; 5487 sp->pfs_1301.af = st->key[PF_SK_WIRE]->af; 5488 5489 /* copy from state */ 5490 strlcpy(sp->pfs_1301.ifname, st->kif->pfik_name, sizeof(sp->pfs_1301.ifname)); 5491 bcopy(&st->rt_addr, &sp->pfs_1301.rt_addr, sizeof(sp->pfs_1301.rt_addr)); 5492 sp->pfs_1301.creation = htonl(time_uptime - (st->creation / 1000)); 5493 sp->pfs_1301.expire = pf_state_expires(st); 5494 if (sp->pfs_1301.expire <= time_uptime) 5495 sp->pfs_1301.expire = htonl(0); 5496 else 5497 sp->pfs_1301.expire = htonl(sp->pfs_1301.expire - time_uptime); 5498 5499 sp->pfs_1301.direction = st->direction; 5500 sp->pfs_1301.log = st->act.log; 5501 sp->pfs_1301.timeout = st->timeout; 5502 5503 switch (msg_version) { 5504 case PFSYNC_MSG_VERSION_1301: 5505 sp->pfs_1301.state_flags = st->state_flags; 5506 break; 5507 case PFSYNC_MSG_VERSION_1400: 5508 sp->pfs_1400.state_flags = htons(st->state_flags); 5509 sp->pfs_1400.qid = htons(st->act.qid); 5510 sp->pfs_1400.pqid = htons(st->act.pqid); 5511 sp->pfs_1400.dnpipe = htons(st->act.dnpipe); 5512 sp->pfs_1400.dnrpipe = htons(st->act.dnrpipe); 5513 sp->pfs_1400.rtableid = htonl(st->act.rtableid); 5514 sp->pfs_1400.min_ttl = st->act.min_ttl; 5515 sp->pfs_1400.set_tos = st->act.set_tos; 5516 sp->pfs_1400.max_mss = htons(st->act.max_mss); 5517 sp->pfs_1400.set_prio[0] = st->act.set_prio[0]; 5518 sp->pfs_1400.set_prio[1] = st->act.set_prio[1]; 5519 sp->pfs_1400.rt = st->rt; 5520 if (st->rt_kif) 5521 strlcpy(sp->pfs_1400.rt_ifname, 5522 st->rt_kif->pfik_name, 5523 sizeof(sp->pfs_1400.rt_ifname)); 5524 break; 5525 default: 5526 panic("%s: Unsupported pfsync_msg_version %d", 5527 __func__, msg_version); 5528 } 5529 5530 if (st->src_node) 5531 sp->pfs_1301.sync_flags |= PFSYNC_FLAG_SRCNODE; 5532 if (st->nat_src_node) 5533 sp->pfs_1301.sync_flags |= PFSYNC_FLAG_NATSRCNODE; 5534 5535 sp->pfs_1301.id = st->id; 5536 sp->pfs_1301.creatorid = st->creatorid; 5537 pf_state_peer_hton(&st->src, &sp->pfs_1301.src); 5538 pf_state_peer_hton(&st->dst, &sp->pfs_1301.dst); 5539 5540 if (st->rule.ptr == NULL) 5541 sp->pfs_1301.rule = htonl(-1); 5542 else 5543 sp->pfs_1301.rule = htonl(st->rule.ptr->nr); 5544 if (st->anchor.ptr == NULL) 5545 sp->pfs_1301.anchor = htonl(-1); 5546 else 5547 sp->pfs_1301.anchor = htonl(st->anchor.ptr->nr); 5548 if (st->nat_rule.ptr == NULL) 5549 sp->pfs_1301.nat_rule = htonl(-1); 5550 else 5551 sp->pfs_1301.nat_rule = htonl(st->nat_rule.ptr->nr); 5552 5553 pf_state_counter_hton(st->packets[0], sp->pfs_1301.packets[0]); 5554 pf_state_counter_hton(st->packets[1], sp->pfs_1301.packets[1]); 5555 pf_state_counter_hton(st->bytes[0], sp->pfs_1301.bytes[0]); 5556 pf_state_counter_hton(st->bytes[1], sp->pfs_1301.bytes[1]); 5557 } 5558 5559 void 5560 pf_state_export(struct pf_state_export *sp, struct pf_kstate *st) 5561 { 5562 bzero(sp, sizeof(*sp)); 5563 5564 sp->version = PF_STATE_VERSION; 5565 5566 /* copy from state key */ 5567 sp->key[PF_SK_WIRE].addr[0] = st->key[PF_SK_WIRE]->addr[0]; 5568 sp->key[PF_SK_WIRE].addr[1] = st->key[PF_SK_WIRE]->addr[1]; 5569 sp->key[PF_SK_WIRE].port[0] = st->key[PF_SK_WIRE]->port[0]; 5570 sp->key[PF_SK_WIRE].port[1] = st->key[PF_SK_WIRE]->port[1]; 5571 sp->key[PF_SK_STACK].addr[0] = st->key[PF_SK_STACK]->addr[0]; 5572 sp->key[PF_SK_STACK].addr[1] = st->key[PF_SK_STACK]->addr[1]; 5573 sp->key[PF_SK_STACK].port[0] = st->key[PF_SK_STACK]->port[0]; 5574 sp->key[PF_SK_STACK].port[1] = st->key[PF_SK_STACK]->port[1]; 5575 sp->proto = st->key[PF_SK_WIRE]->proto; 5576 sp->af = st->key[PF_SK_WIRE]->af; 5577 5578 /* copy from state */ 5579 strlcpy(sp->ifname, st->kif->pfik_name, sizeof(sp->ifname)); 5580 strlcpy(sp->orig_ifname, st->orig_kif->pfik_name, 5581 sizeof(sp->orig_ifname)); 5582 bcopy(&st->rt_addr, &sp->rt_addr, sizeof(sp->rt_addr)); 5583 sp->creation = htonl(time_uptime - (st->creation / 1000)); 5584 sp->expire = pf_state_expires(st); 5585 if (sp->expire <= time_uptime) 5586 sp->expire = htonl(0); 5587 else 5588 sp->expire = htonl(sp->expire - time_uptime); 5589 5590 sp->direction = st->direction; 5591 sp->log = st->act.log; 5592 sp->timeout = st->timeout; 5593 /* 8 bits for the old libpfctl, 16 bits for the new libpfctl */ 5594 sp->state_flags_compat = st->state_flags; 5595 sp->state_flags = htons(st->state_flags); 5596 if (st->src_node) 5597 sp->sync_flags |= PFSYNC_FLAG_SRCNODE; 5598 if (st->nat_src_node) 5599 sp->sync_flags |= PFSYNC_FLAG_NATSRCNODE; 5600 5601 sp->id = st->id; 5602 sp->creatorid = st->creatorid; 5603 pf_state_peer_hton(&st->src, &sp->src); 5604 pf_state_peer_hton(&st->dst, &sp->dst); 5605 5606 if (st->rule.ptr == NULL) 5607 sp->rule = htonl(-1); 5608 else 5609 sp->rule = htonl(st->rule.ptr->nr); 5610 if (st->anchor.ptr == NULL) 5611 sp->anchor = htonl(-1); 5612 else 5613 sp->anchor = htonl(st->anchor.ptr->nr); 5614 if (st->nat_rule.ptr == NULL) 5615 sp->nat_rule = htonl(-1); 5616 else 5617 sp->nat_rule = htonl(st->nat_rule.ptr->nr); 5618 5619 sp->packets[0] = st->packets[0]; 5620 sp->packets[1] = st->packets[1]; 5621 sp->bytes[0] = st->bytes[0]; 5622 sp->bytes[1] = st->bytes[1]; 5623 5624 sp->qid = htons(st->act.qid); 5625 sp->pqid = htons(st->act.pqid); 5626 sp->dnpipe = htons(st->act.dnpipe); 5627 sp->dnrpipe = htons(st->act.dnrpipe); 5628 sp->rtableid = htonl(st->act.rtableid); 5629 sp->min_ttl = st->act.min_ttl; 5630 sp->set_tos = st->act.set_tos; 5631 sp->max_mss = htons(st->act.max_mss); 5632 sp->rt = st->rt; 5633 if (st->rt_kif) 5634 strlcpy(sp->rt_ifname, st->rt_kif->pfik_name, 5635 sizeof(sp->rt_ifname)); 5636 sp->set_prio[0] = st->act.set_prio[0]; 5637 sp->set_prio[1] = st->act.set_prio[1]; 5638 5639 } 5640 5641 static void 5642 pf_tbladdr_copyout(struct pf_addr_wrap *aw) 5643 { 5644 struct pfr_ktable *kt; 5645 5646 KASSERT(aw->type == PF_ADDR_TABLE, ("%s: type %u", __func__, aw->type)); 5647 5648 kt = aw->p.tbl; 5649 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL) 5650 kt = kt->pfrkt_root; 5651 aw->p.tbl = NULL; 5652 aw->p.tblcnt = (kt->pfrkt_flags & PFR_TFLAG_ACTIVE) ? 5653 kt->pfrkt_cnt : -1; 5654 } 5655 5656 static int 5657 pf_add_status_counters(nvlist_t *nvl, const char *name, counter_u64_t *counters, 5658 size_t number, char **names) 5659 { 5660 nvlist_t *nvc; 5661 5662 nvc = nvlist_create(0); 5663 if (nvc == NULL) 5664 return (ENOMEM); 5665 5666 for (int i = 0; i < number; i++) { 5667 nvlist_append_number_array(nvc, "counters", 5668 counter_u64_fetch(counters[i])); 5669 nvlist_append_string_array(nvc, "names", 5670 names[i]); 5671 nvlist_append_number_array(nvc, "ids", 5672 i); 5673 } 5674 nvlist_add_nvlist(nvl, name, nvc); 5675 nvlist_destroy(nvc); 5676 5677 return (0); 5678 } 5679 5680 static int 5681 pf_getstatus(struct pfioc_nv *nv) 5682 { 5683 nvlist_t *nvl = NULL, *nvc = NULL; 5684 void *nvlpacked = NULL; 5685 int error; 5686 struct pf_status s; 5687 char *pf_reasons[PFRES_MAX+1] = PFRES_NAMES; 5688 char *pf_lcounter[KLCNT_MAX+1] = KLCNT_NAMES; 5689 char *pf_fcounter[FCNT_MAX+1] = FCNT_NAMES; 5690 PF_RULES_RLOCK_TRACKER; 5691 5692 #define ERROUT(x) ERROUT_FUNCTION(errout, x) 5693 5694 PF_RULES_RLOCK(); 5695 5696 nvl = nvlist_create(0); 5697 if (nvl == NULL) 5698 ERROUT(ENOMEM); 5699 5700 nvlist_add_bool(nvl, "running", V_pf_status.running); 5701 nvlist_add_number(nvl, "since", V_pf_status.since); 5702 nvlist_add_number(nvl, "debug", V_pf_status.debug); 5703 nvlist_add_number(nvl, "hostid", V_pf_status.hostid); 5704 nvlist_add_number(nvl, "states", V_pf_status.states); 5705 nvlist_add_number(nvl, "src_nodes", V_pf_status.src_nodes); 5706 nvlist_add_number(nvl, "reass", V_pf_status.reass); 5707 nvlist_add_bool(nvl, "syncookies_active", 5708 V_pf_status.syncookies_active); 5709 nvlist_add_number(nvl, "halfopen_states", V_pf_status.states_halfopen); 5710 5711 /* counters */ 5712 error = pf_add_status_counters(nvl, "counters", V_pf_status.counters, 5713 PFRES_MAX, pf_reasons); 5714 if (error != 0) 5715 ERROUT(error); 5716 5717 /* lcounters */ 5718 error = pf_add_status_counters(nvl, "lcounters", V_pf_status.lcounters, 5719 KLCNT_MAX, pf_lcounter); 5720 if (error != 0) 5721 ERROUT(error); 5722 5723 /* fcounters */ 5724 nvc = nvlist_create(0); 5725 if (nvc == NULL) 5726 ERROUT(ENOMEM); 5727 5728 for (int i = 0; i < FCNT_MAX; i++) { 5729 nvlist_append_number_array(nvc, "counters", 5730 pf_counter_u64_fetch(&V_pf_status.fcounters[i])); 5731 nvlist_append_string_array(nvc, "names", 5732 pf_fcounter[i]); 5733 nvlist_append_number_array(nvc, "ids", 5734 i); 5735 } 5736 nvlist_add_nvlist(nvl, "fcounters", nvc); 5737 nvlist_destroy(nvc); 5738 nvc = NULL; 5739 5740 /* scounters */ 5741 error = pf_add_status_counters(nvl, "scounters", V_pf_status.scounters, 5742 SCNT_MAX, pf_fcounter); 5743 if (error != 0) 5744 ERROUT(error); 5745 5746 nvlist_add_string(nvl, "ifname", V_pf_status.ifname); 5747 nvlist_add_binary(nvl, "chksum", V_pf_status.pf_chksum, 5748 PF_MD5_DIGEST_LENGTH); 5749 5750 pfi_update_status(V_pf_status.ifname, &s); 5751 5752 /* pcounters / bcounters */ 5753 for (int i = 0; i < 2; i++) { 5754 for (int j = 0; j < 2; j++) { 5755 for (int k = 0; k < 2; k++) { 5756 nvlist_append_number_array(nvl, "pcounters", 5757 s.pcounters[i][j][k]); 5758 } 5759 nvlist_append_number_array(nvl, "bcounters", 5760 s.bcounters[i][j]); 5761 } 5762 } 5763 5764 nvlpacked = nvlist_pack(nvl, &nv->len); 5765 if (nvlpacked == NULL) 5766 ERROUT(ENOMEM); 5767 5768 if (nv->size == 0) 5769 ERROUT(0); 5770 else if (nv->size < nv->len) 5771 ERROUT(ENOSPC); 5772 5773 PF_RULES_RUNLOCK(); 5774 error = copyout(nvlpacked, nv->data, nv->len); 5775 goto done; 5776 5777 #undef ERROUT 5778 errout: 5779 PF_RULES_RUNLOCK(); 5780 done: 5781 free(nvlpacked, M_NVLIST); 5782 nvlist_destroy(nvc); 5783 nvlist_destroy(nvl); 5784 5785 return (error); 5786 } 5787 5788 /* 5789 * XXX - Check for version mismatch!!! 5790 */ 5791 static void 5792 pf_clear_all_states(void) 5793 { 5794 struct epoch_tracker et; 5795 struct pf_kstate *s; 5796 u_int i; 5797 5798 NET_EPOCH_ENTER(et); 5799 for (i = 0; i <= pf_hashmask; i++) { 5800 struct pf_idhash *ih = &V_pf_idhash[i]; 5801 relock: 5802 PF_HASHROW_LOCK(ih); 5803 LIST_FOREACH(s, &ih->states, entry) { 5804 s->timeout = PFTM_PURGE; 5805 /* Don't send out individual delete messages. */ 5806 s->state_flags |= PFSTATE_NOSYNC; 5807 pf_unlink_state(s); 5808 goto relock; 5809 } 5810 PF_HASHROW_UNLOCK(ih); 5811 } 5812 NET_EPOCH_EXIT(et); 5813 } 5814 5815 static int 5816 pf_clear_tables(void) 5817 { 5818 struct pfioc_table io; 5819 int error; 5820 5821 bzero(&io, sizeof(io)); 5822 io.pfrio_flags |= PFR_FLAG_ALLRSETS; 5823 5824 error = pfr_clr_tables(&io.pfrio_table, &io.pfrio_ndel, 5825 io.pfrio_flags); 5826 5827 return (error); 5828 } 5829 5830 static void 5831 pf_clear_srcnodes(struct pf_ksrc_node *n) 5832 { 5833 struct pf_kstate *s; 5834 int i; 5835 5836 for (i = 0; i <= pf_hashmask; i++) { 5837 struct pf_idhash *ih = &V_pf_idhash[i]; 5838 5839 PF_HASHROW_LOCK(ih); 5840 LIST_FOREACH(s, &ih->states, entry) { 5841 if (n == NULL || n == s->src_node) 5842 s->src_node = NULL; 5843 if (n == NULL || n == s->nat_src_node) 5844 s->nat_src_node = NULL; 5845 } 5846 PF_HASHROW_UNLOCK(ih); 5847 } 5848 5849 if (n == NULL) { 5850 struct pf_srchash *sh; 5851 5852 for (i = 0, sh = V_pf_srchash; i <= pf_srchashmask; 5853 i++, sh++) { 5854 PF_HASHROW_LOCK(sh); 5855 LIST_FOREACH(n, &sh->nodes, entry) { 5856 n->expire = 1; 5857 n->states = 0; 5858 } 5859 PF_HASHROW_UNLOCK(sh); 5860 } 5861 } else { 5862 /* XXX: hash slot should already be locked here. */ 5863 n->expire = 1; 5864 n->states = 0; 5865 } 5866 } 5867 5868 static void 5869 pf_kill_srcnodes(struct pfioc_src_node_kill *psnk) 5870 { 5871 struct pf_ksrc_node_list kill; 5872 5873 LIST_INIT(&kill); 5874 for (int i = 0; i <= pf_srchashmask; i++) { 5875 struct pf_srchash *sh = &V_pf_srchash[i]; 5876 struct pf_ksrc_node *sn, *tmp; 5877 5878 PF_HASHROW_LOCK(sh); 5879 LIST_FOREACH_SAFE(sn, &sh->nodes, entry, tmp) 5880 if (PF_MATCHA(psnk->psnk_src.neg, 5881 &psnk->psnk_src.addr.v.a.addr, 5882 &psnk->psnk_src.addr.v.a.mask, 5883 &sn->addr, sn->af) && 5884 PF_MATCHA(psnk->psnk_dst.neg, 5885 &psnk->psnk_dst.addr.v.a.addr, 5886 &psnk->psnk_dst.addr.v.a.mask, 5887 &sn->raddr, sn->af)) { 5888 pf_unlink_src_node(sn); 5889 LIST_INSERT_HEAD(&kill, sn, entry); 5890 sn->expire = 1; 5891 } 5892 PF_HASHROW_UNLOCK(sh); 5893 } 5894 5895 for (int i = 0; i <= pf_hashmask; i++) { 5896 struct pf_idhash *ih = &V_pf_idhash[i]; 5897 struct pf_kstate *s; 5898 5899 PF_HASHROW_LOCK(ih); 5900 LIST_FOREACH(s, &ih->states, entry) { 5901 if (s->src_node && s->src_node->expire == 1) 5902 s->src_node = NULL; 5903 if (s->nat_src_node && s->nat_src_node->expire == 1) 5904 s->nat_src_node = NULL; 5905 } 5906 PF_HASHROW_UNLOCK(ih); 5907 } 5908 5909 psnk->psnk_killed = pf_free_src_nodes(&kill); 5910 } 5911 5912 static int 5913 pf_keepcounters(struct pfioc_nv *nv) 5914 { 5915 nvlist_t *nvl = NULL; 5916 void *nvlpacked = NULL; 5917 int error = 0; 5918 5919 #define ERROUT(x) ERROUT_FUNCTION(on_error, x) 5920 5921 if (nv->len > pf_ioctl_maxcount) 5922 ERROUT(ENOMEM); 5923 5924 nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK); 5925 if (nvlpacked == NULL) 5926 ERROUT(ENOMEM); 5927 5928 error = copyin(nv->data, nvlpacked, nv->len); 5929 if (error) 5930 ERROUT(error); 5931 5932 nvl = nvlist_unpack(nvlpacked, nv->len, 0); 5933 if (nvl == NULL) 5934 ERROUT(EBADMSG); 5935 5936 if (! nvlist_exists_bool(nvl, "keep_counters")) 5937 ERROUT(EBADMSG); 5938 5939 V_pf_status.keep_counters = nvlist_get_bool(nvl, "keep_counters"); 5940 5941 on_error: 5942 nvlist_destroy(nvl); 5943 free(nvlpacked, M_NVLIST); 5944 return (error); 5945 } 5946 5947 static unsigned int 5948 pf_clear_states(const struct pf_kstate_kill *kill) 5949 { 5950 struct pf_state_key_cmp match_key; 5951 struct pf_kstate *s; 5952 struct pfi_kkif *kif; 5953 int idx; 5954 unsigned int killed = 0, dir; 5955 5956 NET_EPOCH_ASSERT(); 5957 5958 for (unsigned int i = 0; i <= pf_hashmask; i++) { 5959 struct pf_idhash *ih = &V_pf_idhash[i]; 5960 5961 relock_DIOCCLRSTATES: 5962 PF_HASHROW_LOCK(ih); 5963 LIST_FOREACH(s, &ih->states, entry) { 5964 /* For floating states look at the original kif. */ 5965 kif = s->kif == V_pfi_all ? s->orig_kif : s->kif; 5966 5967 if (kill->psk_ifname[0] && 5968 strcmp(kill->psk_ifname, 5969 kif->pfik_name)) 5970 continue; 5971 5972 if (kill->psk_kill_match) { 5973 bzero(&match_key, sizeof(match_key)); 5974 5975 if (s->direction == PF_OUT) { 5976 dir = PF_IN; 5977 idx = PF_SK_STACK; 5978 } else { 5979 dir = PF_OUT; 5980 idx = PF_SK_WIRE; 5981 } 5982 5983 match_key.af = s->key[idx]->af; 5984 match_key.proto = s->key[idx]->proto; 5985 PF_ACPY(&match_key.addr[0], 5986 &s->key[idx]->addr[1], match_key.af); 5987 match_key.port[0] = s->key[idx]->port[1]; 5988 PF_ACPY(&match_key.addr[1], 5989 &s->key[idx]->addr[0], match_key.af); 5990 match_key.port[1] = s->key[idx]->port[0]; 5991 } 5992 5993 /* 5994 * Don't send out individual 5995 * delete messages. 5996 */ 5997 s->state_flags |= PFSTATE_NOSYNC; 5998 pf_unlink_state(s); 5999 killed++; 6000 6001 if (kill->psk_kill_match) 6002 killed += pf_kill_matching_state(&match_key, 6003 dir); 6004 6005 goto relock_DIOCCLRSTATES; 6006 } 6007 PF_HASHROW_UNLOCK(ih); 6008 } 6009 6010 if (V_pfsync_clear_states_ptr != NULL) 6011 V_pfsync_clear_states_ptr(V_pf_status.hostid, kill->psk_ifname); 6012 6013 return (killed); 6014 } 6015 6016 static void 6017 pf_killstates(struct pf_kstate_kill *kill, unsigned int *killed) 6018 { 6019 struct pf_kstate *s; 6020 6021 NET_EPOCH_ASSERT(); 6022 if (kill->psk_pfcmp.id) { 6023 if (kill->psk_pfcmp.creatorid == 0) 6024 kill->psk_pfcmp.creatorid = V_pf_status.hostid; 6025 if ((s = pf_find_state_byid(kill->psk_pfcmp.id, 6026 kill->psk_pfcmp.creatorid))) { 6027 pf_unlink_state(s); 6028 *killed = 1; 6029 } 6030 return; 6031 } 6032 6033 for (unsigned int i = 0; i <= pf_hashmask; i++) 6034 *killed += pf_killstates_row(kill, &V_pf_idhash[i]); 6035 } 6036 6037 static int 6038 pf_killstates_nv(struct pfioc_nv *nv) 6039 { 6040 struct pf_kstate_kill kill; 6041 struct epoch_tracker et; 6042 nvlist_t *nvl = NULL; 6043 void *nvlpacked = NULL; 6044 int error = 0; 6045 unsigned int killed = 0; 6046 6047 #define ERROUT(x) ERROUT_FUNCTION(on_error, x) 6048 6049 if (nv->len > pf_ioctl_maxcount) 6050 ERROUT(ENOMEM); 6051 6052 nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK); 6053 if (nvlpacked == NULL) 6054 ERROUT(ENOMEM); 6055 6056 error = copyin(nv->data, nvlpacked, nv->len); 6057 if (error) 6058 ERROUT(error); 6059 6060 nvl = nvlist_unpack(nvlpacked, nv->len, 0); 6061 if (nvl == NULL) 6062 ERROUT(EBADMSG); 6063 6064 error = pf_nvstate_kill_to_kstate_kill(nvl, &kill); 6065 if (error) 6066 ERROUT(error); 6067 6068 NET_EPOCH_ENTER(et); 6069 pf_killstates(&kill, &killed); 6070 NET_EPOCH_EXIT(et); 6071 6072 free(nvlpacked, M_NVLIST); 6073 nvlpacked = NULL; 6074 nvlist_destroy(nvl); 6075 nvl = nvlist_create(0); 6076 if (nvl == NULL) 6077 ERROUT(ENOMEM); 6078 6079 nvlist_add_number(nvl, "killed", killed); 6080 6081 nvlpacked = nvlist_pack(nvl, &nv->len); 6082 if (nvlpacked == NULL) 6083 ERROUT(ENOMEM); 6084 6085 if (nv->size == 0) 6086 ERROUT(0); 6087 else if (nv->size < nv->len) 6088 ERROUT(ENOSPC); 6089 6090 error = copyout(nvlpacked, nv->data, nv->len); 6091 6092 on_error: 6093 nvlist_destroy(nvl); 6094 free(nvlpacked, M_NVLIST); 6095 return (error); 6096 } 6097 6098 static int 6099 pf_clearstates_nv(struct pfioc_nv *nv) 6100 { 6101 struct pf_kstate_kill kill; 6102 struct epoch_tracker et; 6103 nvlist_t *nvl = NULL; 6104 void *nvlpacked = NULL; 6105 int error = 0; 6106 unsigned int killed; 6107 6108 #define ERROUT(x) ERROUT_FUNCTION(on_error, x) 6109 6110 if (nv->len > pf_ioctl_maxcount) 6111 ERROUT(ENOMEM); 6112 6113 nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK); 6114 if (nvlpacked == NULL) 6115 ERROUT(ENOMEM); 6116 6117 error = copyin(nv->data, nvlpacked, nv->len); 6118 if (error) 6119 ERROUT(error); 6120 6121 nvl = nvlist_unpack(nvlpacked, nv->len, 0); 6122 if (nvl == NULL) 6123 ERROUT(EBADMSG); 6124 6125 error = pf_nvstate_kill_to_kstate_kill(nvl, &kill); 6126 if (error) 6127 ERROUT(error); 6128 6129 NET_EPOCH_ENTER(et); 6130 killed = pf_clear_states(&kill); 6131 NET_EPOCH_EXIT(et); 6132 6133 free(nvlpacked, M_NVLIST); 6134 nvlpacked = NULL; 6135 nvlist_destroy(nvl); 6136 nvl = nvlist_create(0); 6137 if (nvl == NULL) 6138 ERROUT(ENOMEM); 6139 6140 nvlist_add_number(nvl, "killed", killed); 6141 6142 nvlpacked = nvlist_pack(nvl, &nv->len); 6143 if (nvlpacked == NULL) 6144 ERROUT(ENOMEM); 6145 6146 if (nv->size == 0) 6147 ERROUT(0); 6148 else if (nv->size < nv->len) 6149 ERROUT(ENOSPC); 6150 6151 error = copyout(nvlpacked, nv->data, nv->len); 6152 6153 #undef ERROUT 6154 on_error: 6155 nvlist_destroy(nvl); 6156 free(nvlpacked, M_NVLIST); 6157 return (error); 6158 } 6159 6160 static int 6161 pf_getstate(struct pfioc_nv *nv) 6162 { 6163 nvlist_t *nvl = NULL, *nvls; 6164 void *nvlpacked = NULL; 6165 struct pf_kstate *s = NULL; 6166 int error = 0; 6167 uint64_t id, creatorid; 6168 6169 #define ERROUT(x) ERROUT_FUNCTION(errout, x) 6170 6171 if (nv->len > pf_ioctl_maxcount) 6172 ERROUT(ENOMEM); 6173 6174 nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK); 6175 if (nvlpacked == NULL) 6176 ERROUT(ENOMEM); 6177 6178 error = copyin(nv->data, nvlpacked, nv->len); 6179 if (error) 6180 ERROUT(error); 6181 6182 nvl = nvlist_unpack(nvlpacked, nv->len, 0); 6183 if (nvl == NULL) 6184 ERROUT(EBADMSG); 6185 6186 PFNV_CHK(pf_nvuint64(nvl, "id", &id)); 6187 PFNV_CHK(pf_nvuint64(nvl, "creatorid", &creatorid)); 6188 6189 s = pf_find_state_byid(id, creatorid); 6190 if (s == NULL) 6191 ERROUT(ENOENT); 6192 6193 free(nvlpacked, M_NVLIST); 6194 nvlpacked = NULL; 6195 nvlist_destroy(nvl); 6196 nvl = nvlist_create(0); 6197 if (nvl == NULL) 6198 ERROUT(ENOMEM); 6199 6200 nvls = pf_state_to_nvstate(s); 6201 if (nvls == NULL) 6202 ERROUT(ENOMEM); 6203 6204 nvlist_add_nvlist(nvl, "state", nvls); 6205 nvlist_destroy(nvls); 6206 6207 nvlpacked = nvlist_pack(nvl, &nv->len); 6208 if (nvlpacked == NULL) 6209 ERROUT(ENOMEM); 6210 6211 if (nv->size == 0) 6212 ERROUT(0); 6213 else if (nv->size < nv->len) 6214 ERROUT(ENOSPC); 6215 6216 error = copyout(nvlpacked, nv->data, nv->len); 6217 6218 #undef ERROUT 6219 errout: 6220 if (s != NULL) 6221 PF_STATE_UNLOCK(s); 6222 free(nvlpacked, M_NVLIST); 6223 nvlist_destroy(nvl); 6224 return (error); 6225 } 6226 6227 /* 6228 * XXX - Check for version mismatch!!! 6229 */ 6230 6231 /* 6232 * Duplicate pfctl -Fa operation to get rid of as much as we can. 6233 */ 6234 static int 6235 shutdown_pf(void) 6236 { 6237 int error = 0; 6238 u_int32_t t[5]; 6239 char nn = '\0'; 6240 struct pf_kanchor *anchor; 6241 struct pf_keth_anchor *eth_anchor; 6242 int rs_num; 6243 6244 do { 6245 /* Unlink rules of all user defined anchors */ 6246 RB_FOREACH(anchor, pf_kanchor_global, &V_pf_anchors) { 6247 /* Wildcard based anchors may not have a respective 6248 * explicit anchor rule or they may be left empty 6249 * without rules. It leads to anchor.refcnt=0, and the 6250 * rest of the logic does not expect it. */ 6251 if (anchor->refcnt == 0) 6252 anchor->refcnt = 1; 6253 for (rs_num = 0; rs_num < PF_RULESET_MAX; ++rs_num) { 6254 if ((error = pf_begin_rules(&t[rs_num], rs_num, 6255 anchor->path)) != 0) { 6256 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: " 6257 "anchor.path=%s rs_num=%d\n", 6258 anchor->path, rs_num)); 6259 goto error; /* XXX: rollback? */ 6260 } 6261 } 6262 for (rs_num = 0; rs_num < PF_RULESET_MAX; ++rs_num) { 6263 error = pf_commit_rules(t[rs_num], rs_num, 6264 anchor->path); 6265 MPASS(error == 0); 6266 } 6267 } 6268 6269 /* Unlink rules of all user defined ether anchors */ 6270 RB_FOREACH(eth_anchor, pf_keth_anchor_global, 6271 &V_pf_keth_anchors) { 6272 /* Wildcard based anchors may not have a respective 6273 * explicit anchor rule or they may be left empty 6274 * without rules. It leads to anchor.refcnt=0, and the 6275 * rest of the logic does not expect it. */ 6276 if (eth_anchor->refcnt == 0) 6277 eth_anchor->refcnt = 1; 6278 if ((error = pf_begin_eth(&t[0], eth_anchor->path)) 6279 != 0) { 6280 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: eth " 6281 "anchor.path=%s\n", eth_anchor->path)); 6282 goto error; 6283 } 6284 error = pf_commit_eth(t[0], eth_anchor->path); 6285 MPASS(error == 0); 6286 } 6287 6288 if ((error = pf_begin_rules(&t[0], PF_RULESET_SCRUB, &nn)) 6289 != 0) { 6290 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: SCRUB\n")); 6291 break; 6292 } 6293 if ((error = pf_begin_rules(&t[1], PF_RULESET_FILTER, &nn)) 6294 != 0) { 6295 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: FILTER\n")); 6296 break; /* XXX: rollback? */ 6297 } 6298 if ((error = pf_begin_rules(&t[2], PF_RULESET_NAT, &nn)) 6299 != 0) { 6300 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: NAT\n")); 6301 break; /* XXX: rollback? */ 6302 } 6303 if ((error = pf_begin_rules(&t[3], PF_RULESET_BINAT, &nn)) 6304 != 0) { 6305 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: BINAT\n")); 6306 break; /* XXX: rollback? */ 6307 } 6308 if ((error = pf_begin_rules(&t[4], PF_RULESET_RDR, &nn)) 6309 != 0) { 6310 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: RDR\n")); 6311 break; /* XXX: rollback? */ 6312 } 6313 6314 error = pf_commit_rules(t[0], PF_RULESET_SCRUB, &nn); 6315 MPASS(error == 0); 6316 error = pf_commit_rules(t[1], PF_RULESET_FILTER, &nn); 6317 MPASS(error == 0); 6318 error = pf_commit_rules(t[2], PF_RULESET_NAT, &nn); 6319 MPASS(error == 0); 6320 error = pf_commit_rules(t[3], PF_RULESET_BINAT, &nn); 6321 MPASS(error == 0); 6322 error = pf_commit_rules(t[4], PF_RULESET_RDR, &nn); 6323 MPASS(error == 0); 6324 6325 if ((error = pf_clear_tables()) != 0) 6326 break; 6327 6328 if ((error = pf_begin_eth(&t[0], &nn)) != 0) { 6329 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: eth\n")); 6330 break; 6331 } 6332 error = pf_commit_eth(t[0], &nn); 6333 MPASS(error == 0); 6334 6335 #ifdef ALTQ 6336 if ((error = pf_begin_altq(&t[0])) != 0) { 6337 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: ALTQ\n")); 6338 break; 6339 } 6340 pf_commit_altq(t[0]); 6341 #endif 6342 6343 pf_clear_all_states(); 6344 6345 pf_clear_srcnodes(NULL); 6346 6347 /* status does not use malloced mem so no need to cleanup */ 6348 /* fingerprints and interfaces have their own cleanup code */ 6349 } while(0); 6350 6351 error: 6352 return (error); 6353 } 6354 6355 static pfil_return_t 6356 pf_check_return(int chk, struct mbuf **m) 6357 { 6358 6359 switch (chk) { 6360 case PF_PASS: 6361 if (*m == NULL) 6362 return (PFIL_CONSUMED); 6363 else 6364 return (PFIL_PASS); 6365 break; 6366 default: 6367 if (*m != NULL) { 6368 m_freem(*m); 6369 *m = NULL; 6370 } 6371 return (PFIL_DROPPED); 6372 } 6373 } 6374 6375 static pfil_return_t 6376 pf_eth_check_in(struct mbuf **m, struct ifnet *ifp, int flags, 6377 void *ruleset __unused, struct inpcb *inp) 6378 { 6379 int chk; 6380 6381 chk = pf_test_eth(PF_IN, flags, ifp, m, inp); 6382 6383 return (pf_check_return(chk, m)); 6384 } 6385 6386 static pfil_return_t 6387 pf_eth_check_out(struct mbuf **m, struct ifnet *ifp, int flags, 6388 void *ruleset __unused, struct inpcb *inp) 6389 { 6390 int chk; 6391 6392 chk = pf_test_eth(PF_OUT, flags, ifp, m, inp); 6393 6394 return (pf_check_return(chk, m)); 6395 } 6396 6397 #ifdef INET 6398 static pfil_return_t 6399 pf_check_in(struct mbuf **m, struct ifnet *ifp, int flags, 6400 void *ruleset __unused, struct inpcb *inp) 6401 { 6402 int chk; 6403 6404 chk = pf_test(PF_IN, flags, ifp, m, inp, NULL); 6405 6406 return (pf_check_return(chk, m)); 6407 } 6408 6409 static pfil_return_t 6410 pf_check_out(struct mbuf **m, struct ifnet *ifp, int flags, 6411 void *ruleset __unused, struct inpcb *inp) 6412 { 6413 int chk; 6414 6415 chk = pf_test(PF_OUT, flags, ifp, m, inp, NULL); 6416 6417 return (pf_check_return(chk, m)); 6418 } 6419 #endif 6420 6421 #ifdef INET6 6422 static pfil_return_t 6423 pf_check6_in(struct mbuf **m, struct ifnet *ifp, int flags, 6424 void *ruleset __unused, struct inpcb *inp) 6425 { 6426 int chk; 6427 6428 /* 6429 * In case of loopback traffic IPv6 uses the real interface in 6430 * order to support scoped addresses. In order to support stateful 6431 * filtering we have change this to lo0 as it is the case in IPv4. 6432 */ 6433 CURVNET_SET(ifp->if_vnet); 6434 chk = pf_test6(PF_IN, flags, (*m)->m_flags & M_LOOP ? V_loif : ifp, 6435 m, inp, NULL); 6436 CURVNET_RESTORE(); 6437 6438 return (pf_check_return(chk, m)); 6439 } 6440 6441 static pfil_return_t 6442 pf_check6_out(struct mbuf **m, struct ifnet *ifp, int flags, 6443 void *ruleset __unused, struct inpcb *inp) 6444 { 6445 int chk; 6446 6447 CURVNET_SET(ifp->if_vnet); 6448 chk = pf_test6(PF_OUT, flags, ifp, m, inp, NULL); 6449 CURVNET_RESTORE(); 6450 6451 return (pf_check_return(chk, m)); 6452 } 6453 #endif /* INET6 */ 6454 6455 VNET_DEFINE_STATIC(pfil_hook_t, pf_eth_in_hook); 6456 VNET_DEFINE_STATIC(pfil_hook_t, pf_eth_out_hook); 6457 #define V_pf_eth_in_hook VNET(pf_eth_in_hook) 6458 #define V_pf_eth_out_hook VNET(pf_eth_out_hook) 6459 6460 #ifdef INET 6461 VNET_DEFINE_STATIC(pfil_hook_t, pf_ip4_in_hook); 6462 VNET_DEFINE_STATIC(pfil_hook_t, pf_ip4_out_hook); 6463 #define V_pf_ip4_in_hook VNET(pf_ip4_in_hook) 6464 #define V_pf_ip4_out_hook VNET(pf_ip4_out_hook) 6465 #endif 6466 #ifdef INET6 6467 VNET_DEFINE_STATIC(pfil_hook_t, pf_ip6_in_hook); 6468 VNET_DEFINE_STATIC(pfil_hook_t, pf_ip6_out_hook); 6469 #define V_pf_ip6_in_hook VNET(pf_ip6_in_hook) 6470 #define V_pf_ip6_out_hook VNET(pf_ip6_out_hook) 6471 #endif 6472 6473 static void 6474 hook_pf_eth(void) 6475 { 6476 struct pfil_hook_args pha = { 6477 .pa_version = PFIL_VERSION, 6478 .pa_modname = "pf", 6479 .pa_type = PFIL_TYPE_ETHERNET, 6480 }; 6481 struct pfil_link_args pla = { 6482 .pa_version = PFIL_VERSION, 6483 }; 6484 int ret __diagused; 6485 6486 if (atomic_load_bool(&V_pf_pfil_eth_hooked)) 6487 return; 6488 6489 pha.pa_mbuf_chk = pf_eth_check_in; 6490 pha.pa_flags = PFIL_IN; 6491 pha.pa_rulname = "eth-in"; 6492 V_pf_eth_in_hook = pfil_add_hook(&pha); 6493 pla.pa_flags = PFIL_IN | PFIL_HEADPTR | PFIL_HOOKPTR; 6494 pla.pa_head = V_link_pfil_head; 6495 pla.pa_hook = V_pf_eth_in_hook; 6496 ret = pfil_link(&pla); 6497 MPASS(ret == 0); 6498 pha.pa_mbuf_chk = pf_eth_check_out; 6499 pha.pa_flags = PFIL_OUT; 6500 pha.pa_rulname = "eth-out"; 6501 V_pf_eth_out_hook = pfil_add_hook(&pha); 6502 pla.pa_flags = PFIL_OUT | PFIL_HEADPTR | PFIL_HOOKPTR; 6503 pla.pa_head = V_link_pfil_head; 6504 pla.pa_hook = V_pf_eth_out_hook; 6505 ret = pfil_link(&pla); 6506 MPASS(ret == 0); 6507 6508 atomic_store_bool(&V_pf_pfil_eth_hooked, true); 6509 } 6510 6511 static void 6512 hook_pf(void) 6513 { 6514 struct pfil_hook_args pha = { 6515 .pa_version = PFIL_VERSION, 6516 .pa_modname = "pf", 6517 }; 6518 struct pfil_link_args pla = { 6519 .pa_version = PFIL_VERSION, 6520 }; 6521 int ret __diagused; 6522 6523 if (atomic_load_bool(&V_pf_pfil_hooked)) 6524 return; 6525 6526 #ifdef INET 6527 pha.pa_type = PFIL_TYPE_IP4; 6528 pha.pa_mbuf_chk = pf_check_in; 6529 pha.pa_flags = PFIL_IN; 6530 pha.pa_rulname = "default-in"; 6531 V_pf_ip4_in_hook = pfil_add_hook(&pha); 6532 pla.pa_flags = PFIL_IN | PFIL_HEADPTR | PFIL_HOOKPTR; 6533 pla.pa_head = V_inet_pfil_head; 6534 pla.pa_hook = V_pf_ip4_in_hook; 6535 ret = pfil_link(&pla); 6536 MPASS(ret == 0); 6537 pha.pa_mbuf_chk = pf_check_out; 6538 pha.pa_flags = PFIL_OUT; 6539 pha.pa_rulname = "default-out"; 6540 V_pf_ip4_out_hook = pfil_add_hook(&pha); 6541 pla.pa_flags = PFIL_OUT | PFIL_HEADPTR | PFIL_HOOKPTR; 6542 pla.pa_head = V_inet_pfil_head; 6543 pla.pa_hook = V_pf_ip4_out_hook; 6544 ret = pfil_link(&pla); 6545 MPASS(ret == 0); 6546 if (V_pf_filter_local) { 6547 pla.pa_flags = PFIL_OUT | PFIL_HEADPTR | PFIL_HOOKPTR; 6548 pla.pa_head = V_inet_local_pfil_head; 6549 pla.pa_hook = V_pf_ip4_out_hook; 6550 ret = pfil_link(&pla); 6551 MPASS(ret == 0); 6552 } 6553 #endif 6554 #ifdef INET6 6555 pha.pa_type = PFIL_TYPE_IP6; 6556 pha.pa_mbuf_chk = pf_check6_in; 6557 pha.pa_flags = PFIL_IN; 6558 pha.pa_rulname = "default-in6"; 6559 V_pf_ip6_in_hook = pfil_add_hook(&pha); 6560 pla.pa_flags = PFIL_IN | PFIL_HEADPTR | PFIL_HOOKPTR; 6561 pla.pa_head = V_inet6_pfil_head; 6562 pla.pa_hook = V_pf_ip6_in_hook; 6563 ret = pfil_link(&pla); 6564 MPASS(ret == 0); 6565 pha.pa_mbuf_chk = pf_check6_out; 6566 pha.pa_rulname = "default-out6"; 6567 pha.pa_flags = PFIL_OUT; 6568 V_pf_ip6_out_hook = pfil_add_hook(&pha); 6569 pla.pa_flags = PFIL_OUT | PFIL_HEADPTR | PFIL_HOOKPTR; 6570 pla.pa_head = V_inet6_pfil_head; 6571 pla.pa_hook = V_pf_ip6_out_hook; 6572 ret = pfil_link(&pla); 6573 MPASS(ret == 0); 6574 if (V_pf_filter_local) { 6575 pla.pa_flags = PFIL_OUT | PFIL_HEADPTR | PFIL_HOOKPTR; 6576 pla.pa_head = V_inet6_local_pfil_head; 6577 pla.pa_hook = V_pf_ip6_out_hook; 6578 ret = pfil_link(&pla); 6579 MPASS(ret == 0); 6580 } 6581 #endif 6582 6583 atomic_store_bool(&V_pf_pfil_hooked, true); 6584 } 6585 6586 static void 6587 dehook_pf_eth(void) 6588 { 6589 6590 if (!atomic_load_bool(&V_pf_pfil_eth_hooked)) 6591 return; 6592 6593 pfil_remove_hook(V_pf_eth_in_hook); 6594 pfil_remove_hook(V_pf_eth_out_hook); 6595 6596 atomic_store_bool(&V_pf_pfil_eth_hooked, false); 6597 } 6598 6599 static void 6600 dehook_pf(void) 6601 { 6602 6603 if (!atomic_load_bool(&V_pf_pfil_hooked)) 6604 return; 6605 6606 #ifdef INET 6607 pfil_remove_hook(V_pf_ip4_in_hook); 6608 pfil_remove_hook(V_pf_ip4_out_hook); 6609 #endif 6610 #ifdef INET6 6611 pfil_remove_hook(V_pf_ip6_in_hook); 6612 pfil_remove_hook(V_pf_ip6_out_hook); 6613 #endif 6614 6615 atomic_store_bool(&V_pf_pfil_hooked, false); 6616 } 6617 6618 static void 6619 pf_load_vnet(void) 6620 { 6621 V_pf_tag_z = uma_zcreate("pf tags", sizeof(struct pf_tagname), 6622 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); 6623 6624 rm_init_flags(&V_pf_rules_lock, "pf rulesets", RM_RECURSE); 6625 sx_init(&V_pf_ioctl_lock, "pf ioctl"); 6626 6627 pf_init_tagset(&V_pf_tags, &pf_rule_tag_hashsize, 6628 PF_RULE_TAG_HASH_SIZE_DEFAULT); 6629 #ifdef ALTQ 6630 pf_init_tagset(&V_pf_qids, &pf_queue_tag_hashsize, 6631 PF_QUEUE_TAG_HASH_SIZE_DEFAULT); 6632 #endif 6633 6634 V_pf_keth = &V_pf_main_keth_anchor.ruleset; 6635 6636 pfattach_vnet(); 6637 V_pf_vnet_active = 1; 6638 } 6639 6640 static int 6641 pf_load(void) 6642 { 6643 int error; 6644 6645 sx_init(&pf_end_lock, "pf end thread"); 6646 6647 pf_mtag_initialize(); 6648 6649 pf_dev = make_dev(&pf_cdevsw, 0, UID_ROOT, GID_WHEEL, 0600, PF_NAME); 6650 if (pf_dev == NULL) 6651 return (ENOMEM); 6652 6653 pf_end_threads = 0; 6654 error = kproc_create(pf_purge_thread, NULL, &pf_purge_proc, 0, 0, "pf purge"); 6655 if (error != 0) 6656 return (error); 6657 6658 pfi_initialize(); 6659 6660 return (0); 6661 } 6662 6663 static void 6664 pf_unload_vnet(void) 6665 { 6666 int ret __diagused; 6667 6668 V_pf_vnet_active = 0; 6669 V_pf_status.running = 0; 6670 dehook_pf(); 6671 dehook_pf_eth(); 6672 6673 PF_RULES_WLOCK(); 6674 pf_syncookies_cleanup(); 6675 shutdown_pf(); 6676 PF_RULES_WUNLOCK(); 6677 6678 /* Make sure we've cleaned up ethernet rules before we continue. */ 6679 NET_EPOCH_DRAIN_CALLBACKS(); 6680 6681 ret = swi_remove(V_pf_swi_cookie); 6682 MPASS(ret == 0); 6683 ret = intr_event_destroy(V_pf_swi_ie); 6684 MPASS(ret == 0); 6685 6686 pf_unload_vnet_purge(); 6687 6688 pf_normalize_cleanup(); 6689 PF_RULES_WLOCK(); 6690 pfi_cleanup_vnet(); 6691 PF_RULES_WUNLOCK(); 6692 pfr_cleanup(); 6693 pf_osfp_flush(); 6694 pf_cleanup(); 6695 if (IS_DEFAULT_VNET(curvnet)) 6696 pf_mtag_cleanup(); 6697 6698 pf_cleanup_tagset(&V_pf_tags); 6699 #ifdef ALTQ 6700 pf_cleanup_tagset(&V_pf_qids); 6701 #endif 6702 uma_zdestroy(V_pf_tag_z); 6703 6704 #ifdef PF_WANT_32_TO_64_COUNTER 6705 PF_RULES_WLOCK(); 6706 LIST_REMOVE(V_pf_kifmarker, pfik_allkiflist); 6707 6708 MPASS(LIST_EMPTY(&V_pf_allkiflist)); 6709 MPASS(V_pf_allkifcount == 0); 6710 6711 LIST_REMOVE(&V_pf_default_rule, allrulelist); 6712 V_pf_allrulecount--; 6713 LIST_REMOVE(V_pf_rulemarker, allrulelist); 6714 6715 MPASS(LIST_EMPTY(&V_pf_allrulelist)); 6716 MPASS(V_pf_allrulecount == 0); 6717 6718 PF_RULES_WUNLOCK(); 6719 6720 free(V_pf_kifmarker, PFI_MTYPE); 6721 free(V_pf_rulemarker, M_PFRULE); 6722 #endif 6723 6724 /* Free counters last as we updated them during shutdown. */ 6725 pf_counter_u64_deinit(&V_pf_default_rule.evaluations); 6726 for (int i = 0; i < 2; i++) { 6727 pf_counter_u64_deinit(&V_pf_default_rule.packets[i]); 6728 pf_counter_u64_deinit(&V_pf_default_rule.bytes[i]); 6729 } 6730 counter_u64_free(V_pf_default_rule.states_cur); 6731 counter_u64_free(V_pf_default_rule.states_tot); 6732 counter_u64_free(V_pf_default_rule.src_nodes); 6733 uma_zfree_pcpu(pf_timestamp_pcpu_zone, V_pf_default_rule.timestamp); 6734 6735 for (int i = 0; i < PFRES_MAX; i++) 6736 counter_u64_free(V_pf_status.counters[i]); 6737 for (int i = 0; i < KLCNT_MAX; i++) 6738 counter_u64_free(V_pf_status.lcounters[i]); 6739 for (int i = 0; i < FCNT_MAX; i++) 6740 pf_counter_u64_deinit(&V_pf_status.fcounters[i]); 6741 for (int i = 0; i < SCNT_MAX; i++) 6742 counter_u64_free(V_pf_status.scounters[i]); 6743 6744 rm_destroy(&V_pf_rules_lock); 6745 sx_destroy(&V_pf_ioctl_lock); 6746 } 6747 6748 static void 6749 pf_unload(void) 6750 { 6751 6752 sx_xlock(&pf_end_lock); 6753 pf_end_threads = 1; 6754 while (pf_end_threads < 2) { 6755 wakeup_one(pf_purge_thread); 6756 sx_sleep(pf_purge_proc, &pf_end_lock, 0, "pftmo", 0); 6757 } 6758 sx_xunlock(&pf_end_lock); 6759 6760 pf_nl_unregister(); 6761 6762 if (pf_dev != NULL) 6763 destroy_dev(pf_dev); 6764 6765 pfi_cleanup(); 6766 6767 sx_destroy(&pf_end_lock); 6768 } 6769 6770 static void 6771 vnet_pf_init(void *unused __unused) 6772 { 6773 6774 pf_load_vnet(); 6775 } 6776 VNET_SYSINIT(vnet_pf_init, SI_SUB_PROTO_FIREWALL, SI_ORDER_THIRD, 6777 vnet_pf_init, NULL); 6778 6779 static void 6780 vnet_pf_uninit(const void *unused __unused) 6781 { 6782 6783 pf_unload_vnet(); 6784 } 6785 SYSUNINIT(pf_unload, SI_SUB_PROTO_FIREWALL, SI_ORDER_SECOND, pf_unload, NULL); 6786 VNET_SYSUNINIT(vnet_pf_uninit, SI_SUB_PROTO_FIREWALL, SI_ORDER_THIRD, 6787 vnet_pf_uninit, NULL); 6788 6789 static int 6790 pf_modevent(module_t mod, int type, void *data) 6791 { 6792 int error = 0; 6793 6794 switch(type) { 6795 case MOD_LOAD: 6796 error = pf_load(); 6797 pf_nl_register(); 6798 break; 6799 case MOD_UNLOAD: 6800 /* Handled in SYSUNINIT(pf_unload) to ensure it's done after 6801 * the vnet_pf_uninit()s */ 6802 break; 6803 default: 6804 error = EINVAL; 6805 break; 6806 } 6807 6808 return (error); 6809 } 6810 6811 static moduledata_t pf_mod = { 6812 "pf", 6813 pf_modevent, 6814 0 6815 }; 6816 6817 DECLARE_MODULE(pf, pf_mod, SI_SUB_PROTO_FIREWALL, SI_ORDER_SECOND); 6818 MODULE_DEPEND(pf, netlink, 1, 1, 1); 6819 MODULE_VERSION(pf, PF_MODVER); 6820