1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2001 Daniel Hartmeier 5 * Copyright (c) 2002,2003 Henning Brauer 6 * Copyright (c) 2012 Gleb Smirnoff <glebius@FreeBSD.org> 7 * All rights reserved. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 13 * - Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * - Redistributions in binary form must reproduce the above 16 * copyright notice, this list of conditions and the following 17 * disclaimer in the documentation and/or other materials provided 18 * with the distribution. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 23 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 24 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 27 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 28 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN 30 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 31 * POSSIBILITY OF SUCH DAMAGE. 32 * 33 * Effort sponsored in part by the Defense Advanced Research Projects 34 * Agency (DARPA) and Air Force Research Laboratory, Air Force 35 * Materiel Command, USAF, under agreement number F30602-01-2-0537. 36 * 37 * $OpenBSD: pf_ioctl.c,v 1.213 2009/02/15 21:46:12 mbalmer Exp $ 38 */ 39 40 #include <sys/cdefs.h> 41 #include "opt_inet.h" 42 #include "opt_inet6.h" 43 #include "opt_bpf.h" 44 #include "opt_pf.h" 45 46 #include <sys/param.h> 47 #include <sys/_bitset.h> 48 #include <sys/bitset.h> 49 #include <sys/bus.h> 50 #include <sys/conf.h> 51 #include <sys/endian.h> 52 #include <sys/fcntl.h> 53 #include <sys/filio.h> 54 #include <sys/hash.h> 55 #include <sys/interrupt.h> 56 #include <sys/jail.h> 57 #include <sys/kernel.h> 58 #include <sys/kthread.h> 59 #include <sys/lock.h> 60 #include <sys/mbuf.h> 61 #include <sys/module.h> 62 #include <sys/nv.h> 63 #include <sys/proc.h> 64 #include <sys/sdt.h> 65 #include <sys/smp.h> 66 #include <sys/socket.h> 67 #include <sys/sysctl.h> 68 #include <sys/md5.h> 69 #include <sys/ucred.h> 70 71 #include <net/if.h> 72 #include <net/if_var.h> 73 #include <net/if_private.h> 74 #include <net/vnet.h> 75 #include <net/route.h> 76 #include <net/pfil.h> 77 #include <net/pfvar.h> 78 #include <net/if_pfsync.h> 79 #include <net/if_pflog.h> 80 81 #include <netinet/in.h> 82 #include <netinet/ip.h> 83 #include <netinet/ip_var.h> 84 #include <netinet6/ip6_var.h> 85 #include <netinet/ip_icmp.h> 86 #include <netpfil/pf/pf_nl.h> 87 #include <netpfil/pf/pf_nv.h> 88 89 #ifdef INET6 90 #include <netinet/ip6.h> 91 #endif /* INET6 */ 92 93 #ifdef ALTQ 94 #include <net/altq/altq.h> 95 #endif 96 97 SDT_PROBE_DEFINE3(pf, ioctl, ioctl, error, "int", "int", "int"); 98 SDT_PROBE_DEFINE3(pf, ioctl, function, error, "char *", "int", "int"); 99 SDT_PROBE_DEFINE2(pf, ioctl, addrule, error, "int", "int"); 100 SDT_PROBE_DEFINE2(pf, ioctl, nvchk, error, "int", "int"); 101 102 static struct pf_kpool *pf_get_kpool(const char *, u_int32_t, u_int8_t, 103 u_int32_t, u_int8_t, u_int8_t, u_int8_t); 104 105 static void pf_mv_kpool(struct pf_kpalist *, struct pf_kpalist *); 106 static void pf_empty_kpool(struct pf_kpalist *); 107 static int pfioctl(struct cdev *, u_long, caddr_t, int, 108 struct thread *); 109 static int pf_begin_eth(uint32_t *, const char *); 110 static void pf_rollback_eth_cb(struct epoch_context *); 111 static int pf_rollback_eth(uint32_t, const char *); 112 static int pf_commit_eth(uint32_t, const char *); 113 static void pf_free_eth_rule(struct pf_keth_rule *); 114 #ifdef ALTQ 115 static int pf_begin_altq(u_int32_t *); 116 static int pf_rollback_altq(u_int32_t); 117 static int pf_commit_altq(u_int32_t); 118 static int pf_enable_altq(struct pf_altq *); 119 static int pf_disable_altq(struct pf_altq *); 120 static uint16_t pf_qname2qid(const char *); 121 static void pf_qid_unref(uint16_t); 122 #endif /* ALTQ */ 123 static int pf_begin_rules(u_int32_t *, int, const char *); 124 static int pf_rollback_rules(u_int32_t, int, char *); 125 static int pf_setup_pfsync_matching(struct pf_kruleset *); 126 static void pf_hash_rule_rolling(MD5_CTX *, struct pf_krule *); 127 static void pf_hash_rule(struct pf_krule *); 128 static void pf_hash_rule_addr(MD5_CTX *, struct pf_rule_addr *); 129 static int pf_commit_rules(u_int32_t, int, char *); 130 static int pf_addr_setup(struct pf_kruleset *, 131 struct pf_addr_wrap *, sa_family_t); 132 static void pf_addr_copyout(struct pf_addr_wrap *); 133 static void pf_src_node_copy(const struct pf_ksrc_node *, 134 struct pf_src_node *); 135 #ifdef ALTQ 136 static int pf_export_kaltq(struct pf_altq *, 137 struct pfioc_altq_v1 *, size_t); 138 static int pf_import_kaltq(struct pfioc_altq_v1 *, 139 struct pf_altq *, size_t); 140 #endif /* ALTQ */ 141 142 VNET_DEFINE(struct pf_krule, pf_default_rule); 143 144 static __inline int pf_krule_compare(struct pf_krule *, 145 struct pf_krule *); 146 147 RB_GENERATE(pf_krule_global, pf_krule, entry_global, pf_krule_compare); 148 149 #ifdef ALTQ 150 VNET_DEFINE_STATIC(int, pf_altq_running); 151 #define V_pf_altq_running VNET(pf_altq_running) 152 #endif 153 154 #define TAGID_MAX 50000 155 struct pf_tagname { 156 TAILQ_ENTRY(pf_tagname) namehash_entries; 157 TAILQ_ENTRY(pf_tagname) taghash_entries; 158 char name[PF_TAG_NAME_SIZE]; 159 uint16_t tag; 160 int ref; 161 }; 162 163 struct pf_tagset { 164 TAILQ_HEAD(, pf_tagname) *namehash; 165 TAILQ_HEAD(, pf_tagname) *taghash; 166 unsigned int mask; 167 uint32_t seed; 168 BITSET_DEFINE(, TAGID_MAX) avail; 169 }; 170 171 VNET_DEFINE(struct pf_tagset, pf_tags); 172 #define V_pf_tags VNET(pf_tags) 173 static unsigned int pf_rule_tag_hashsize; 174 #define PF_RULE_TAG_HASH_SIZE_DEFAULT 128 175 SYSCTL_UINT(_net_pf, OID_AUTO, rule_tag_hashsize, CTLFLAG_RDTUN, 176 &pf_rule_tag_hashsize, PF_RULE_TAG_HASH_SIZE_DEFAULT, 177 "Size of pf(4) rule tag hashtable"); 178 179 #ifdef ALTQ 180 VNET_DEFINE(struct pf_tagset, pf_qids); 181 #define V_pf_qids VNET(pf_qids) 182 static unsigned int pf_queue_tag_hashsize; 183 #define PF_QUEUE_TAG_HASH_SIZE_DEFAULT 128 184 SYSCTL_UINT(_net_pf, OID_AUTO, queue_tag_hashsize, CTLFLAG_RDTUN, 185 &pf_queue_tag_hashsize, PF_QUEUE_TAG_HASH_SIZE_DEFAULT, 186 "Size of pf(4) queue tag hashtable"); 187 #endif 188 VNET_DEFINE(uma_zone_t, pf_tag_z); 189 #define V_pf_tag_z VNET(pf_tag_z) 190 static MALLOC_DEFINE(M_PFALTQ, "pf_altq", "pf(4) altq configuration db"); 191 static MALLOC_DEFINE(M_PFRULE, "pf_rule", "pf(4) rules"); 192 193 #if (PF_QNAME_SIZE != PF_TAG_NAME_SIZE) 194 #error PF_QNAME_SIZE must be equal to PF_TAG_NAME_SIZE 195 #endif 196 197 VNET_DEFINE_STATIC(bool, pf_filter_local) = false; 198 #define V_pf_filter_local VNET(pf_filter_local) 199 SYSCTL_BOOL(_net_pf, OID_AUTO, filter_local, CTLFLAG_VNET | CTLFLAG_RW, 200 &VNET_NAME(pf_filter_local), false, 201 "Enable filtering for packets delivered to local network stack"); 202 203 #ifdef PF_DEFAULT_TO_DROP 204 VNET_DEFINE_STATIC(bool, default_to_drop) = true; 205 #else 206 VNET_DEFINE_STATIC(bool, default_to_drop); 207 #endif 208 #define V_default_to_drop VNET(default_to_drop) 209 SYSCTL_BOOL(_net_pf, OID_AUTO, default_to_drop, CTLFLAG_RDTUN | CTLFLAG_VNET, 210 &VNET_NAME(default_to_drop), false, 211 "Make the default rule drop all packets."); 212 213 static void pf_init_tagset(struct pf_tagset *, unsigned int *, 214 unsigned int); 215 static void pf_cleanup_tagset(struct pf_tagset *); 216 static uint16_t tagname2hashindex(const struct pf_tagset *, const char *); 217 static uint16_t tag2hashindex(const struct pf_tagset *, uint16_t); 218 static u_int16_t tagname2tag(struct pf_tagset *, const char *); 219 static u_int16_t pf_tagname2tag(const char *); 220 static void tag_unref(struct pf_tagset *, u_int16_t); 221 222 #define DPFPRINTF(n, x) if (V_pf_status.debug >= (n)) printf x 223 224 struct cdev *pf_dev; 225 226 /* 227 * XXX - These are new and need to be checked when moveing to a new version 228 */ 229 static void pf_clear_all_states(void); 230 static unsigned int pf_clear_states(const struct pf_kstate_kill *); 231 static void pf_killstates(struct pf_kstate_kill *, 232 unsigned int *); 233 static int pf_killstates_row(struct pf_kstate_kill *, 234 struct pf_idhash *); 235 static int pf_killstates_nv(struct pfioc_nv *); 236 static int pf_clearstates_nv(struct pfioc_nv *); 237 static int pf_getstate(struct pfioc_nv *); 238 static int pf_getstatus(struct pfioc_nv *); 239 static int pf_clear_tables(void); 240 static void pf_clear_srcnodes(struct pf_ksrc_node *); 241 static void pf_kill_srcnodes(struct pfioc_src_node_kill *); 242 static int pf_keepcounters(struct pfioc_nv *); 243 static void pf_tbladdr_copyout(struct pf_addr_wrap *); 244 245 /* 246 * Wrapper functions for pfil(9) hooks 247 */ 248 static pfil_return_t pf_eth_check_in(struct mbuf **m, struct ifnet *ifp, 249 int flags, void *ruleset __unused, struct inpcb *inp); 250 static pfil_return_t pf_eth_check_out(struct mbuf **m, struct ifnet *ifp, 251 int flags, void *ruleset __unused, struct inpcb *inp); 252 #ifdef INET 253 static pfil_return_t pf_check_in(struct mbuf **m, struct ifnet *ifp, 254 int flags, void *ruleset __unused, struct inpcb *inp); 255 static pfil_return_t pf_check_out(struct mbuf **m, struct ifnet *ifp, 256 int flags, void *ruleset __unused, struct inpcb *inp); 257 #endif 258 #ifdef INET6 259 static pfil_return_t pf_check6_in(struct mbuf **m, struct ifnet *ifp, 260 int flags, void *ruleset __unused, struct inpcb *inp); 261 static pfil_return_t pf_check6_out(struct mbuf **m, struct ifnet *ifp, 262 int flags, void *ruleset __unused, struct inpcb *inp); 263 #endif 264 265 static void hook_pf_eth(void); 266 static void hook_pf(void); 267 static void dehook_pf_eth(void); 268 static void dehook_pf(void); 269 static int shutdown_pf(void); 270 static int pf_load(void); 271 static void pf_unload(void); 272 273 static struct cdevsw pf_cdevsw = { 274 .d_ioctl = pfioctl, 275 .d_name = PF_NAME, 276 .d_version = D_VERSION, 277 }; 278 279 VNET_DEFINE_STATIC(bool, pf_pfil_hooked); 280 #define V_pf_pfil_hooked VNET(pf_pfil_hooked) 281 VNET_DEFINE_STATIC(bool, pf_pfil_eth_hooked); 282 #define V_pf_pfil_eth_hooked VNET(pf_pfil_eth_hooked) 283 284 /* 285 * We need a flag that is neither hooked nor running to know when 286 * the VNET is "valid". We primarily need this to control (global) 287 * external event, e.g., eventhandlers. 288 */ 289 VNET_DEFINE(int, pf_vnet_active); 290 #define V_pf_vnet_active VNET(pf_vnet_active) 291 292 int pf_end_threads; 293 struct proc *pf_purge_proc; 294 295 VNET_DEFINE(struct rmlock, pf_rules_lock); 296 VNET_DEFINE_STATIC(struct sx, pf_ioctl_lock); 297 #define V_pf_ioctl_lock VNET(pf_ioctl_lock) 298 struct sx pf_end_lock; 299 300 /* pfsync */ 301 VNET_DEFINE(pfsync_state_import_t *, pfsync_state_import_ptr); 302 VNET_DEFINE(pfsync_insert_state_t *, pfsync_insert_state_ptr); 303 VNET_DEFINE(pfsync_update_state_t *, pfsync_update_state_ptr); 304 VNET_DEFINE(pfsync_delete_state_t *, pfsync_delete_state_ptr); 305 VNET_DEFINE(pfsync_clear_states_t *, pfsync_clear_states_ptr); 306 VNET_DEFINE(pfsync_defer_t *, pfsync_defer_ptr); 307 VNET_DEFINE(pflow_export_state_t *, pflow_export_state_ptr); 308 pfsync_detach_ifnet_t *pfsync_detach_ifnet_ptr; 309 310 /* pflog */ 311 pflog_packet_t *pflog_packet_ptr = NULL; 312 313 /* 314 * Copy a user-provided string, returning an error if truncation would occur. 315 * Avoid scanning past "sz" bytes in the source string since there's no 316 * guarantee that it's nul-terminated. 317 */ 318 static int 319 pf_user_strcpy(char *dst, const char *src, size_t sz) 320 { 321 if (strnlen(src, sz) == sz) 322 return (EINVAL); 323 (void)strlcpy(dst, src, sz); 324 return (0); 325 } 326 327 static void 328 pfattach_vnet(void) 329 { 330 u_int32_t *my_timeout = V_pf_default_rule.timeout; 331 332 bzero(&V_pf_status, sizeof(V_pf_status)); 333 334 pf_initialize(); 335 pfr_initialize(); 336 pfi_initialize_vnet(); 337 pf_normalize_init(); 338 pf_syncookies_init(); 339 340 V_pf_limits[PF_LIMIT_STATES].limit = PFSTATE_HIWAT; 341 V_pf_limits[PF_LIMIT_SRC_NODES].limit = PFSNODE_HIWAT; 342 343 RB_INIT(&V_pf_anchors); 344 pf_init_kruleset(&pf_main_ruleset); 345 346 pf_init_keth(V_pf_keth); 347 348 /* default rule should never be garbage collected */ 349 V_pf_default_rule.entries.tqe_prev = &V_pf_default_rule.entries.tqe_next; 350 V_pf_default_rule.action = V_default_to_drop ? PF_DROP : PF_PASS; 351 V_pf_default_rule.nr = -1; 352 V_pf_default_rule.rtableid = -1; 353 354 pf_counter_u64_init(&V_pf_default_rule.evaluations, M_WAITOK); 355 for (int i = 0; i < 2; i++) { 356 pf_counter_u64_init(&V_pf_default_rule.packets[i], M_WAITOK); 357 pf_counter_u64_init(&V_pf_default_rule.bytes[i], M_WAITOK); 358 } 359 V_pf_default_rule.states_cur = counter_u64_alloc(M_WAITOK); 360 V_pf_default_rule.states_tot = counter_u64_alloc(M_WAITOK); 361 V_pf_default_rule.src_nodes = counter_u64_alloc(M_WAITOK); 362 363 V_pf_default_rule.timestamp = uma_zalloc_pcpu(pf_timestamp_pcpu_zone, 364 M_WAITOK | M_ZERO); 365 366 #ifdef PF_WANT_32_TO_64_COUNTER 367 V_pf_kifmarker = malloc(sizeof(*V_pf_kifmarker), PFI_MTYPE, M_WAITOK | M_ZERO); 368 V_pf_rulemarker = malloc(sizeof(*V_pf_rulemarker), M_PFRULE, M_WAITOK | M_ZERO); 369 PF_RULES_WLOCK(); 370 LIST_INSERT_HEAD(&V_pf_allkiflist, V_pf_kifmarker, pfik_allkiflist); 371 LIST_INSERT_HEAD(&V_pf_allrulelist, &V_pf_default_rule, allrulelist); 372 V_pf_allrulecount++; 373 LIST_INSERT_HEAD(&V_pf_allrulelist, V_pf_rulemarker, allrulelist); 374 PF_RULES_WUNLOCK(); 375 #endif 376 377 /* initialize default timeouts */ 378 my_timeout[PFTM_TCP_FIRST_PACKET] = PFTM_TCP_FIRST_PACKET_VAL; 379 my_timeout[PFTM_TCP_OPENING] = PFTM_TCP_OPENING_VAL; 380 my_timeout[PFTM_TCP_ESTABLISHED] = PFTM_TCP_ESTABLISHED_VAL; 381 my_timeout[PFTM_TCP_CLOSING] = PFTM_TCP_CLOSING_VAL; 382 my_timeout[PFTM_TCP_FIN_WAIT] = PFTM_TCP_FIN_WAIT_VAL; 383 my_timeout[PFTM_TCP_CLOSED] = PFTM_TCP_CLOSED_VAL; 384 my_timeout[PFTM_SCTP_FIRST_PACKET] = PFTM_TCP_FIRST_PACKET_VAL; 385 my_timeout[PFTM_SCTP_OPENING] = PFTM_TCP_OPENING_VAL; 386 my_timeout[PFTM_SCTP_ESTABLISHED] = PFTM_TCP_ESTABLISHED_VAL; 387 my_timeout[PFTM_SCTP_CLOSING] = PFTM_TCP_CLOSING_VAL; 388 my_timeout[PFTM_SCTP_CLOSED] = PFTM_TCP_CLOSED_VAL; 389 my_timeout[PFTM_UDP_FIRST_PACKET] = PFTM_UDP_FIRST_PACKET_VAL; 390 my_timeout[PFTM_UDP_SINGLE] = PFTM_UDP_SINGLE_VAL; 391 my_timeout[PFTM_UDP_MULTIPLE] = PFTM_UDP_MULTIPLE_VAL; 392 my_timeout[PFTM_ICMP_FIRST_PACKET] = PFTM_ICMP_FIRST_PACKET_VAL; 393 my_timeout[PFTM_ICMP_ERROR_REPLY] = PFTM_ICMP_ERROR_REPLY_VAL; 394 my_timeout[PFTM_OTHER_FIRST_PACKET] = PFTM_OTHER_FIRST_PACKET_VAL; 395 my_timeout[PFTM_OTHER_SINGLE] = PFTM_OTHER_SINGLE_VAL; 396 my_timeout[PFTM_OTHER_MULTIPLE] = PFTM_OTHER_MULTIPLE_VAL; 397 my_timeout[PFTM_FRAG] = PFTM_FRAG_VAL; 398 my_timeout[PFTM_INTERVAL] = PFTM_INTERVAL_VAL; 399 my_timeout[PFTM_SRC_NODE] = PFTM_SRC_NODE_VAL; 400 my_timeout[PFTM_TS_DIFF] = PFTM_TS_DIFF_VAL; 401 my_timeout[PFTM_ADAPTIVE_START] = PFSTATE_ADAPT_START; 402 my_timeout[PFTM_ADAPTIVE_END] = PFSTATE_ADAPT_END; 403 404 V_pf_status.debug = PF_DEBUG_URGENT; 405 /* 406 * XXX This is different than in OpenBSD where reassembly is enabled by 407 * defult. In FreeBSD we expect people to still use scrub rules and 408 * switch to the new syntax later. Only when they switch they must 409 * explicitly enable reassemle. We could change the default once the 410 * scrub rule functionality is hopefully removed some day in future. 411 */ 412 V_pf_status.reass = 0; 413 414 V_pf_pfil_hooked = false; 415 V_pf_pfil_eth_hooked = false; 416 417 /* XXX do our best to avoid a conflict */ 418 V_pf_status.hostid = arc4random(); 419 420 for (int i = 0; i < PFRES_MAX; i++) 421 V_pf_status.counters[i] = counter_u64_alloc(M_WAITOK); 422 for (int i = 0; i < KLCNT_MAX; i++) 423 V_pf_status.lcounters[i] = counter_u64_alloc(M_WAITOK); 424 for (int i = 0; i < FCNT_MAX; i++) 425 pf_counter_u64_init(&V_pf_status.fcounters[i], M_WAITOK); 426 for (int i = 0; i < SCNT_MAX; i++) 427 V_pf_status.scounters[i] = counter_u64_alloc(M_WAITOK); 428 429 if (swi_add(&V_pf_swi_ie, "pf send", pf_intr, curvnet, SWI_NET, 430 INTR_MPSAFE, &V_pf_swi_cookie) != 0) 431 /* XXXGL: leaked all above. */ 432 return; 433 } 434 435 static struct pf_kpool * 436 pf_get_kpool(const char *anchor, u_int32_t ticket, u_int8_t rule_action, 437 u_int32_t rule_number, u_int8_t r_last, u_int8_t active, 438 u_int8_t check_ticket) 439 { 440 struct pf_kruleset *ruleset; 441 struct pf_krule *rule; 442 int rs_num; 443 444 ruleset = pf_find_kruleset(anchor); 445 if (ruleset == NULL) 446 return (NULL); 447 rs_num = pf_get_ruleset_number(rule_action); 448 if (rs_num >= PF_RULESET_MAX) 449 return (NULL); 450 if (active) { 451 if (check_ticket && ticket != 452 ruleset->rules[rs_num].active.ticket) 453 return (NULL); 454 if (r_last) 455 rule = TAILQ_LAST(ruleset->rules[rs_num].active.ptr, 456 pf_krulequeue); 457 else 458 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr); 459 } else { 460 if (check_ticket && ticket != 461 ruleset->rules[rs_num].inactive.ticket) 462 return (NULL); 463 if (r_last) 464 rule = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr, 465 pf_krulequeue); 466 else 467 rule = TAILQ_FIRST(ruleset->rules[rs_num].inactive.ptr); 468 } 469 if (!r_last) { 470 while ((rule != NULL) && (rule->nr != rule_number)) 471 rule = TAILQ_NEXT(rule, entries); 472 } 473 if (rule == NULL) 474 return (NULL); 475 476 return (&rule->rpool); 477 } 478 479 static void 480 pf_mv_kpool(struct pf_kpalist *poola, struct pf_kpalist *poolb) 481 { 482 struct pf_kpooladdr *mv_pool_pa; 483 484 while ((mv_pool_pa = TAILQ_FIRST(poola)) != NULL) { 485 TAILQ_REMOVE(poola, mv_pool_pa, entries); 486 TAILQ_INSERT_TAIL(poolb, mv_pool_pa, entries); 487 } 488 } 489 490 static void 491 pf_empty_kpool(struct pf_kpalist *poola) 492 { 493 struct pf_kpooladdr *pa; 494 495 while ((pa = TAILQ_FIRST(poola)) != NULL) { 496 switch (pa->addr.type) { 497 case PF_ADDR_DYNIFTL: 498 pfi_dynaddr_remove(pa->addr.p.dyn); 499 break; 500 case PF_ADDR_TABLE: 501 /* XXX: this could be unfinished pooladdr on pabuf */ 502 if (pa->addr.p.tbl != NULL) 503 pfr_detach_table(pa->addr.p.tbl); 504 break; 505 } 506 if (pa->kif) 507 pfi_kkif_unref(pa->kif); 508 TAILQ_REMOVE(poola, pa, entries); 509 free(pa, M_PFRULE); 510 } 511 } 512 513 static void 514 pf_unlink_rule_locked(struct pf_krulequeue *rulequeue, struct pf_krule *rule) 515 { 516 517 PF_RULES_WASSERT(); 518 PF_UNLNKDRULES_ASSERT(); 519 520 TAILQ_REMOVE(rulequeue, rule, entries); 521 522 rule->rule_ref |= PFRULE_REFS; 523 TAILQ_INSERT_TAIL(&V_pf_unlinked_rules, rule, entries); 524 } 525 526 static void 527 pf_unlink_rule(struct pf_krulequeue *rulequeue, struct pf_krule *rule) 528 { 529 530 PF_RULES_WASSERT(); 531 532 PF_UNLNKDRULES_LOCK(); 533 pf_unlink_rule_locked(rulequeue, rule); 534 PF_UNLNKDRULES_UNLOCK(); 535 } 536 537 static void 538 pf_free_eth_rule(struct pf_keth_rule *rule) 539 { 540 PF_RULES_WASSERT(); 541 542 if (rule == NULL) 543 return; 544 545 if (rule->tag) 546 tag_unref(&V_pf_tags, rule->tag); 547 if (rule->match_tag) 548 tag_unref(&V_pf_tags, rule->match_tag); 549 #ifdef ALTQ 550 pf_qid_unref(rule->qid); 551 #endif 552 553 if (rule->bridge_to) 554 pfi_kkif_unref(rule->bridge_to); 555 if (rule->kif) 556 pfi_kkif_unref(rule->kif); 557 558 if (rule->ipsrc.addr.type == PF_ADDR_TABLE) 559 pfr_detach_table(rule->ipsrc.addr.p.tbl); 560 if (rule->ipdst.addr.type == PF_ADDR_TABLE) 561 pfr_detach_table(rule->ipdst.addr.p.tbl); 562 563 counter_u64_free(rule->evaluations); 564 for (int i = 0; i < 2; i++) { 565 counter_u64_free(rule->packets[i]); 566 counter_u64_free(rule->bytes[i]); 567 } 568 uma_zfree_pcpu(pf_timestamp_pcpu_zone, rule->timestamp); 569 pf_keth_anchor_remove(rule); 570 571 free(rule, M_PFRULE); 572 } 573 574 void 575 pf_free_rule(struct pf_krule *rule) 576 { 577 578 PF_RULES_WASSERT(); 579 PF_CONFIG_ASSERT(); 580 581 if (rule->tag) 582 tag_unref(&V_pf_tags, rule->tag); 583 if (rule->match_tag) 584 tag_unref(&V_pf_tags, rule->match_tag); 585 #ifdef ALTQ 586 if (rule->pqid != rule->qid) 587 pf_qid_unref(rule->pqid); 588 pf_qid_unref(rule->qid); 589 #endif 590 switch (rule->src.addr.type) { 591 case PF_ADDR_DYNIFTL: 592 pfi_dynaddr_remove(rule->src.addr.p.dyn); 593 break; 594 case PF_ADDR_TABLE: 595 pfr_detach_table(rule->src.addr.p.tbl); 596 break; 597 } 598 switch (rule->dst.addr.type) { 599 case PF_ADDR_DYNIFTL: 600 pfi_dynaddr_remove(rule->dst.addr.p.dyn); 601 break; 602 case PF_ADDR_TABLE: 603 pfr_detach_table(rule->dst.addr.p.tbl); 604 break; 605 } 606 if (rule->overload_tbl) 607 pfr_detach_table(rule->overload_tbl); 608 if (rule->kif) 609 pfi_kkif_unref(rule->kif); 610 pf_kanchor_remove(rule); 611 pf_empty_kpool(&rule->rpool.list); 612 613 pf_krule_free(rule); 614 } 615 616 static void 617 pf_init_tagset(struct pf_tagset *ts, unsigned int *tunable_size, 618 unsigned int default_size) 619 { 620 unsigned int i; 621 unsigned int hashsize; 622 623 if (*tunable_size == 0 || !powerof2(*tunable_size)) 624 *tunable_size = default_size; 625 626 hashsize = *tunable_size; 627 ts->namehash = mallocarray(hashsize, sizeof(*ts->namehash), M_PFHASH, 628 M_WAITOK); 629 ts->taghash = mallocarray(hashsize, sizeof(*ts->taghash), M_PFHASH, 630 M_WAITOK); 631 ts->mask = hashsize - 1; 632 ts->seed = arc4random(); 633 for (i = 0; i < hashsize; i++) { 634 TAILQ_INIT(&ts->namehash[i]); 635 TAILQ_INIT(&ts->taghash[i]); 636 } 637 BIT_FILL(TAGID_MAX, &ts->avail); 638 } 639 640 static void 641 pf_cleanup_tagset(struct pf_tagset *ts) 642 { 643 unsigned int i; 644 unsigned int hashsize; 645 struct pf_tagname *t, *tmp; 646 647 /* 648 * Only need to clean up one of the hashes as each tag is hashed 649 * into each table. 650 */ 651 hashsize = ts->mask + 1; 652 for (i = 0; i < hashsize; i++) 653 TAILQ_FOREACH_SAFE(t, &ts->namehash[i], namehash_entries, tmp) 654 uma_zfree(V_pf_tag_z, t); 655 656 free(ts->namehash, M_PFHASH); 657 free(ts->taghash, M_PFHASH); 658 } 659 660 static uint16_t 661 tagname2hashindex(const struct pf_tagset *ts, const char *tagname) 662 { 663 size_t len; 664 665 len = strnlen(tagname, PF_TAG_NAME_SIZE - 1); 666 return (murmur3_32_hash(tagname, len, ts->seed) & ts->mask); 667 } 668 669 static uint16_t 670 tag2hashindex(const struct pf_tagset *ts, uint16_t tag) 671 { 672 673 return (tag & ts->mask); 674 } 675 676 static u_int16_t 677 tagname2tag(struct pf_tagset *ts, const char *tagname) 678 { 679 struct pf_tagname *tag; 680 u_int32_t index; 681 u_int16_t new_tagid; 682 683 PF_RULES_WASSERT(); 684 685 index = tagname2hashindex(ts, tagname); 686 TAILQ_FOREACH(tag, &ts->namehash[index], namehash_entries) 687 if (strcmp(tagname, tag->name) == 0) { 688 tag->ref++; 689 return (tag->tag); 690 } 691 692 /* 693 * new entry 694 * 695 * to avoid fragmentation, we do a linear search from the beginning 696 * and take the first free slot we find. 697 */ 698 new_tagid = BIT_FFS(TAGID_MAX, &ts->avail); 699 /* 700 * Tags are 1-based, with valid tags in the range [1..TAGID_MAX]. 701 * BIT_FFS() returns a 1-based bit number, with 0 indicating no bits 702 * set. It may also return a bit number greater than TAGID_MAX due 703 * to rounding of the number of bits in the vector up to a multiple 704 * of the vector word size at declaration/allocation time. 705 */ 706 if ((new_tagid == 0) || (new_tagid > TAGID_MAX)) 707 return (0); 708 709 /* Mark the tag as in use. Bits are 0-based for BIT_CLR() */ 710 BIT_CLR(TAGID_MAX, new_tagid - 1, &ts->avail); 711 712 /* allocate and fill new struct pf_tagname */ 713 tag = uma_zalloc(V_pf_tag_z, M_NOWAIT); 714 if (tag == NULL) 715 return (0); 716 strlcpy(tag->name, tagname, sizeof(tag->name)); 717 tag->tag = new_tagid; 718 tag->ref = 1; 719 720 /* Insert into namehash */ 721 TAILQ_INSERT_TAIL(&ts->namehash[index], tag, namehash_entries); 722 723 /* Insert into taghash */ 724 index = tag2hashindex(ts, new_tagid); 725 TAILQ_INSERT_TAIL(&ts->taghash[index], tag, taghash_entries); 726 727 return (tag->tag); 728 } 729 730 static void 731 tag_unref(struct pf_tagset *ts, u_int16_t tag) 732 { 733 struct pf_tagname *t; 734 uint16_t index; 735 736 PF_RULES_WASSERT(); 737 738 index = tag2hashindex(ts, tag); 739 TAILQ_FOREACH(t, &ts->taghash[index], taghash_entries) 740 if (tag == t->tag) { 741 if (--t->ref == 0) { 742 TAILQ_REMOVE(&ts->taghash[index], t, 743 taghash_entries); 744 index = tagname2hashindex(ts, t->name); 745 TAILQ_REMOVE(&ts->namehash[index], t, 746 namehash_entries); 747 /* Bits are 0-based for BIT_SET() */ 748 BIT_SET(TAGID_MAX, tag - 1, &ts->avail); 749 uma_zfree(V_pf_tag_z, t); 750 } 751 break; 752 } 753 } 754 755 static uint16_t 756 pf_tagname2tag(const char *tagname) 757 { 758 return (tagname2tag(&V_pf_tags, tagname)); 759 } 760 761 static int 762 pf_begin_eth(uint32_t *ticket, const char *anchor) 763 { 764 struct pf_keth_rule *rule, *tmp; 765 struct pf_keth_ruleset *rs; 766 767 PF_RULES_WASSERT(); 768 769 rs = pf_find_or_create_keth_ruleset(anchor); 770 if (rs == NULL) 771 return (EINVAL); 772 773 /* Purge old inactive rules. */ 774 TAILQ_FOREACH_SAFE(rule, rs->inactive.rules, entries, 775 tmp) { 776 TAILQ_REMOVE(rs->inactive.rules, rule, 777 entries); 778 pf_free_eth_rule(rule); 779 } 780 781 *ticket = ++rs->inactive.ticket; 782 rs->inactive.open = 1; 783 784 return (0); 785 } 786 787 static void 788 pf_rollback_eth_cb(struct epoch_context *ctx) 789 { 790 struct pf_keth_ruleset *rs; 791 792 rs = __containerof(ctx, struct pf_keth_ruleset, epoch_ctx); 793 794 CURVNET_SET(rs->vnet); 795 796 PF_RULES_WLOCK(); 797 pf_rollback_eth(rs->inactive.ticket, 798 rs->anchor ? rs->anchor->path : ""); 799 PF_RULES_WUNLOCK(); 800 801 CURVNET_RESTORE(); 802 } 803 804 static int 805 pf_rollback_eth(uint32_t ticket, const char *anchor) 806 { 807 struct pf_keth_rule *rule, *tmp; 808 struct pf_keth_ruleset *rs; 809 810 PF_RULES_WASSERT(); 811 812 rs = pf_find_keth_ruleset(anchor); 813 if (rs == NULL) 814 return (EINVAL); 815 816 if (!rs->inactive.open || 817 ticket != rs->inactive.ticket) 818 return (0); 819 820 /* Purge old inactive rules. */ 821 TAILQ_FOREACH_SAFE(rule, rs->inactive.rules, entries, 822 tmp) { 823 TAILQ_REMOVE(rs->inactive.rules, rule, entries); 824 pf_free_eth_rule(rule); 825 } 826 827 rs->inactive.open = 0; 828 829 pf_remove_if_empty_keth_ruleset(rs); 830 831 return (0); 832 } 833 834 #define PF_SET_SKIP_STEPS(i) \ 835 do { \ 836 while (head[i] != cur) { \ 837 head[i]->skip[i].ptr = cur; \ 838 head[i] = TAILQ_NEXT(head[i], entries); \ 839 } \ 840 } while (0) 841 842 static void 843 pf_eth_calc_skip_steps(struct pf_keth_ruleq *rules) 844 { 845 struct pf_keth_rule *cur, *prev, *head[PFE_SKIP_COUNT]; 846 int i; 847 848 cur = TAILQ_FIRST(rules); 849 prev = cur; 850 for (i = 0; i < PFE_SKIP_COUNT; ++i) 851 head[i] = cur; 852 while (cur != NULL) { 853 if (cur->kif != prev->kif || cur->ifnot != prev->ifnot) 854 PF_SET_SKIP_STEPS(PFE_SKIP_IFP); 855 if (cur->direction != prev->direction) 856 PF_SET_SKIP_STEPS(PFE_SKIP_DIR); 857 if (cur->proto != prev->proto) 858 PF_SET_SKIP_STEPS(PFE_SKIP_PROTO); 859 if (memcmp(&cur->src, &prev->src, sizeof(cur->src)) != 0) 860 PF_SET_SKIP_STEPS(PFE_SKIP_SRC_ADDR); 861 if (memcmp(&cur->dst, &prev->dst, sizeof(cur->dst)) != 0) 862 PF_SET_SKIP_STEPS(PFE_SKIP_DST_ADDR); 863 if (cur->ipsrc.neg != prev->ipsrc.neg || 864 pf_addr_wrap_neq(&cur->ipsrc.addr, &prev->ipsrc.addr)) 865 PF_SET_SKIP_STEPS(PFE_SKIP_SRC_IP_ADDR); 866 if (cur->ipdst.neg != prev->ipdst.neg || 867 pf_addr_wrap_neq(&cur->ipdst.addr, &prev->ipdst.addr)) 868 PF_SET_SKIP_STEPS(PFE_SKIP_DST_IP_ADDR); 869 870 prev = cur; 871 cur = TAILQ_NEXT(cur, entries); 872 } 873 for (i = 0; i < PFE_SKIP_COUNT; ++i) 874 PF_SET_SKIP_STEPS(i); 875 } 876 877 static int 878 pf_commit_eth(uint32_t ticket, const char *anchor) 879 { 880 struct pf_keth_ruleq *rules; 881 struct pf_keth_ruleset *rs; 882 883 rs = pf_find_keth_ruleset(anchor); 884 if (rs == NULL) { 885 return (EINVAL); 886 } 887 888 if (!rs->inactive.open || 889 ticket != rs->inactive.ticket) 890 return (EBUSY); 891 892 PF_RULES_WASSERT(); 893 894 pf_eth_calc_skip_steps(rs->inactive.rules); 895 896 rules = rs->active.rules; 897 ck_pr_store_ptr(&rs->active.rules, rs->inactive.rules); 898 rs->inactive.rules = rules; 899 rs->inactive.ticket = rs->active.ticket; 900 901 /* Clean up inactive rules (i.e. previously active rules), only when 902 * we're sure they're no longer used. */ 903 NET_EPOCH_CALL(pf_rollback_eth_cb, &rs->epoch_ctx); 904 905 return (0); 906 } 907 908 #ifdef ALTQ 909 static uint16_t 910 pf_qname2qid(const char *qname) 911 { 912 return (tagname2tag(&V_pf_qids, qname)); 913 } 914 915 static void 916 pf_qid_unref(uint16_t qid) 917 { 918 tag_unref(&V_pf_qids, qid); 919 } 920 921 static int 922 pf_begin_altq(u_int32_t *ticket) 923 { 924 struct pf_altq *altq, *tmp; 925 int error = 0; 926 927 PF_RULES_WASSERT(); 928 929 /* Purge the old altq lists */ 930 TAILQ_FOREACH_SAFE(altq, V_pf_altq_ifs_inactive, entries, tmp) { 931 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) { 932 /* detach and destroy the discipline */ 933 error = altq_remove(altq); 934 } 935 free(altq, M_PFALTQ); 936 } 937 TAILQ_INIT(V_pf_altq_ifs_inactive); 938 TAILQ_FOREACH_SAFE(altq, V_pf_altqs_inactive, entries, tmp) { 939 pf_qid_unref(altq->qid); 940 free(altq, M_PFALTQ); 941 } 942 TAILQ_INIT(V_pf_altqs_inactive); 943 if (error) 944 return (error); 945 *ticket = ++V_ticket_altqs_inactive; 946 V_altqs_inactive_open = 1; 947 return (0); 948 } 949 950 static int 951 pf_rollback_altq(u_int32_t ticket) 952 { 953 struct pf_altq *altq, *tmp; 954 int error = 0; 955 956 PF_RULES_WASSERT(); 957 958 if (!V_altqs_inactive_open || ticket != V_ticket_altqs_inactive) 959 return (0); 960 /* Purge the old altq lists */ 961 TAILQ_FOREACH_SAFE(altq, V_pf_altq_ifs_inactive, entries, tmp) { 962 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) { 963 /* detach and destroy the discipline */ 964 error = altq_remove(altq); 965 } 966 free(altq, M_PFALTQ); 967 } 968 TAILQ_INIT(V_pf_altq_ifs_inactive); 969 TAILQ_FOREACH_SAFE(altq, V_pf_altqs_inactive, entries, tmp) { 970 pf_qid_unref(altq->qid); 971 free(altq, M_PFALTQ); 972 } 973 TAILQ_INIT(V_pf_altqs_inactive); 974 V_altqs_inactive_open = 0; 975 return (error); 976 } 977 978 static int 979 pf_commit_altq(u_int32_t ticket) 980 { 981 struct pf_altqqueue *old_altqs, *old_altq_ifs; 982 struct pf_altq *altq, *tmp; 983 int err, error = 0; 984 985 PF_RULES_WASSERT(); 986 987 if (!V_altqs_inactive_open || ticket != V_ticket_altqs_inactive) 988 return (EBUSY); 989 990 /* swap altqs, keep the old. */ 991 old_altqs = V_pf_altqs_active; 992 old_altq_ifs = V_pf_altq_ifs_active; 993 V_pf_altqs_active = V_pf_altqs_inactive; 994 V_pf_altq_ifs_active = V_pf_altq_ifs_inactive; 995 V_pf_altqs_inactive = old_altqs; 996 V_pf_altq_ifs_inactive = old_altq_ifs; 997 V_ticket_altqs_active = V_ticket_altqs_inactive; 998 999 /* Attach new disciplines */ 1000 TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) { 1001 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) { 1002 /* attach the discipline */ 1003 error = altq_pfattach(altq); 1004 if (error == 0 && V_pf_altq_running) 1005 error = pf_enable_altq(altq); 1006 if (error != 0) 1007 return (error); 1008 } 1009 } 1010 1011 /* Purge the old altq lists */ 1012 TAILQ_FOREACH_SAFE(altq, V_pf_altq_ifs_inactive, entries, tmp) { 1013 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) { 1014 /* detach and destroy the discipline */ 1015 if (V_pf_altq_running) 1016 error = pf_disable_altq(altq); 1017 err = altq_pfdetach(altq); 1018 if (err != 0 && error == 0) 1019 error = err; 1020 err = altq_remove(altq); 1021 if (err != 0 && error == 0) 1022 error = err; 1023 } 1024 free(altq, M_PFALTQ); 1025 } 1026 TAILQ_INIT(V_pf_altq_ifs_inactive); 1027 TAILQ_FOREACH_SAFE(altq, V_pf_altqs_inactive, entries, tmp) { 1028 pf_qid_unref(altq->qid); 1029 free(altq, M_PFALTQ); 1030 } 1031 TAILQ_INIT(V_pf_altqs_inactive); 1032 1033 V_altqs_inactive_open = 0; 1034 return (error); 1035 } 1036 1037 static int 1038 pf_enable_altq(struct pf_altq *altq) 1039 { 1040 struct ifnet *ifp; 1041 struct tb_profile tb; 1042 int error = 0; 1043 1044 if ((ifp = ifunit(altq->ifname)) == NULL) 1045 return (EINVAL); 1046 1047 if (ifp->if_snd.altq_type != ALTQT_NONE) 1048 error = altq_enable(&ifp->if_snd); 1049 1050 /* set tokenbucket regulator */ 1051 if (error == 0 && ifp != NULL && ALTQ_IS_ENABLED(&ifp->if_snd)) { 1052 tb.rate = altq->ifbandwidth; 1053 tb.depth = altq->tbrsize; 1054 error = tbr_set(&ifp->if_snd, &tb); 1055 } 1056 1057 return (error); 1058 } 1059 1060 static int 1061 pf_disable_altq(struct pf_altq *altq) 1062 { 1063 struct ifnet *ifp; 1064 struct tb_profile tb; 1065 int error; 1066 1067 if ((ifp = ifunit(altq->ifname)) == NULL) 1068 return (EINVAL); 1069 1070 /* 1071 * when the discipline is no longer referenced, it was overridden 1072 * by a new one. if so, just return. 1073 */ 1074 if (altq->altq_disc != ifp->if_snd.altq_disc) 1075 return (0); 1076 1077 error = altq_disable(&ifp->if_snd); 1078 1079 if (error == 0) { 1080 /* clear tokenbucket regulator */ 1081 tb.rate = 0; 1082 error = tbr_set(&ifp->if_snd, &tb); 1083 } 1084 1085 return (error); 1086 } 1087 1088 static int 1089 pf_altq_ifnet_event_add(struct ifnet *ifp, int remove, u_int32_t ticket, 1090 struct pf_altq *altq) 1091 { 1092 struct ifnet *ifp1; 1093 int error = 0; 1094 1095 /* Deactivate the interface in question */ 1096 altq->local_flags &= ~PFALTQ_FLAG_IF_REMOVED; 1097 if ((ifp1 = ifunit(altq->ifname)) == NULL || 1098 (remove && ifp1 == ifp)) { 1099 altq->local_flags |= PFALTQ_FLAG_IF_REMOVED; 1100 } else { 1101 error = altq_add(ifp1, altq); 1102 1103 if (ticket != V_ticket_altqs_inactive) 1104 error = EBUSY; 1105 1106 if (error) 1107 free(altq, M_PFALTQ); 1108 } 1109 1110 return (error); 1111 } 1112 1113 void 1114 pf_altq_ifnet_event(struct ifnet *ifp, int remove) 1115 { 1116 struct pf_altq *a1, *a2, *a3; 1117 u_int32_t ticket; 1118 int error = 0; 1119 1120 /* 1121 * No need to re-evaluate the configuration for events on interfaces 1122 * that do not support ALTQ, as it's not possible for such 1123 * interfaces to be part of the configuration. 1124 */ 1125 if (!ALTQ_IS_READY(&ifp->if_snd)) 1126 return; 1127 1128 /* Interrupt userland queue modifications */ 1129 if (V_altqs_inactive_open) 1130 pf_rollback_altq(V_ticket_altqs_inactive); 1131 1132 /* Start new altq ruleset */ 1133 if (pf_begin_altq(&ticket)) 1134 return; 1135 1136 /* Copy the current active set */ 1137 TAILQ_FOREACH(a1, V_pf_altq_ifs_active, entries) { 1138 a2 = malloc(sizeof(*a2), M_PFALTQ, M_NOWAIT); 1139 if (a2 == NULL) { 1140 error = ENOMEM; 1141 break; 1142 } 1143 bcopy(a1, a2, sizeof(struct pf_altq)); 1144 1145 error = pf_altq_ifnet_event_add(ifp, remove, ticket, a2); 1146 if (error) 1147 break; 1148 1149 TAILQ_INSERT_TAIL(V_pf_altq_ifs_inactive, a2, entries); 1150 } 1151 if (error) 1152 goto out; 1153 TAILQ_FOREACH(a1, V_pf_altqs_active, entries) { 1154 a2 = malloc(sizeof(*a2), M_PFALTQ, M_NOWAIT); 1155 if (a2 == NULL) { 1156 error = ENOMEM; 1157 break; 1158 } 1159 bcopy(a1, a2, sizeof(struct pf_altq)); 1160 1161 if ((a2->qid = pf_qname2qid(a2->qname)) == 0) { 1162 error = EBUSY; 1163 free(a2, M_PFALTQ); 1164 break; 1165 } 1166 a2->altq_disc = NULL; 1167 TAILQ_FOREACH(a3, V_pf_altq_ifs_inactive, entries) { 1168 if (strncmp(a3->ifname, a2->ifname, 1169 IFNAMSIZ) == 0) { 1170 a2->altq_disc = a3->altq_disc; 1171 break; 1172 } 1173 } 1174 error = pf_altq_ifnet_event_add(ifp, remove, ticket, a2); 1175 if (error) 1176 break; 1177 1178 TAILQ_INSERT_TAIL(V_pf_altqs_inactive, a2, entries); 1179 } 1180 1181 out: 1182 if (error != 0) 1183 pf_rollback_altq(ticket); 1184 else 1185 pf_commit_altq(ticket); 1186 } 1187 #endif /* ALTQ */ 1188 1189 static struct pf_krule_global * 1190 pf_rule_tree_alloc(int flags) 1191 { 1192 struct pf_krule_global *tree; 1193 1194 tree = malloc(sizeof(struct pf_krule_global), M_TEMP, flags); 1195 if (tree == NULL) 1196 return (NULL); 1197 RB_INIT(tree); 1198 return (tree); 1199 } 1200 1201 static void 1202 pf_rule_tree_free(struct pf_krule_global *tree) 1203 { 1204 1205 free(tree, M_TEMP); 1206 } 1207 1208 static int 1209 pf_begin_rules(u_int32_t *ticket, int rs_num, const char *anchor) 1210 { 1211 struct pf_krule_global *tree; 1212 struct pf_kruleset *rs; 1213 struct pf_krule *rule; 1214 1215 PF_RULES_WASSERT(); 1216 1217 if (rs_num < 0 || rs_num >= PF_RULESET_MAX) 1218 return (EINVAL); 1219 tree = pf_rule_tree_alloc(M_NOWAIT); 1220 if (tree == NULL) 1221 return (ENOMEM); 1222 rs = pf_find_or_create_kruleset(anchor); 1223 if (rs == NULL) { 1224 free(tree, M_TEMP); 1225 return (EINVAL); 1226 } 1227 pf_rule_tree_free(rs->rules[rs_num].inactive.tree); 1228 rs->rules[rs_num].inactive.tree = tree; 1229 1230 while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) { 1231 pf_unlink_rule(rs->rules[rs_num].inactive.ptr, rule); 1232 rs->rules[rs_num].inactive.rcount--; 1233 } 1234 *ticket = ++rs->rules[rs_num].inactive.ticket; 1235 rs->rules[rs_num].inactive.open = 1; 1236 return (0); 1237 } 1238 1239 static int 1240 pf_rollback_rules(u_int32_t ticket, int rs_num, char *anchor) 1241 { 1242 struct pf_kruleset *rs; 1243 struct pf_krule *rule; 1244 1245 PF_RULES_WASSERT(); 1246 1247 if (rs_num < 0 || rs_num >= PF_RULESET_MAX) 1248 return (EINVAL); 1249 rs = pf_find_kruleset(anchor); 1250 if (rs == NULL || !rs->rules[rs_num].inactive.open || 1251 rs->rules[rs_num].inactive.ticket != ticket) 1252 return (0); 1253 while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) { 1254 pf_unlink_rule(rs->rules[rs_num].inactive.ptr, rule); 1255 rs->rules[rs_num].inactive.rcount--; 1256 } 1257 rs->rules[rs_num].inactive.open = 0; 1258 return (0); 1259 } 1260 1261 #define PF_MD5_UPD(st, elm) \ 1262 MD5Update(ctx, (u_int8_t *) &(st)->elm, sizeof((st)->elm)) 1263 1264 #define PF_MD5_UPD_STR(st, elm) \ 1265 MD5Update(ctx, (u_int8_t *) (st)->elm, strlen((st)->elm)) 1266 1267 #define PF_MD5_UPD_HTONL(st, elm, stor) do { \ 1268 (stor) = htonl((st)->elm); \ 1269 MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int32_t));\ 1270 } while (0) 1271 1272 #define PF_MD5_UPD_HTONS(st, elm, stor) do { \ 1273 (stor) = htons((st)->elm); \ 1274 MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int16_t));\ 1275 } while (0) 1276 1277 static void 1278 pf_hash_rule_addr(MD5_CTX *ctx, struct pf_rule_addr *pfr) 1279 { 1280 PF_MD5_UPD(pfr, addr.type); 1281 switch (pfr->addr.type) { 1282 case PF_ADDR_DYNIFTL: 1283 PF_MD5_UPD(pfr, addr.v.ifname); 1284 PF_MD5_UPD(pfr, addr.iflags); 1285 break; 1286 case PF_ADDR_TABLE: 1287 PF_MD5_UPD(pfr, addr.v.tblname); 1288 break; 1289 case PF_ADDR_ADDRMASK: 1290 /* XXX ignore af? */ 1291 PF_MD5_UPD(pfr, addr.v.a.addr.addr32); 1292 PF_MD5_UPD(pfr, addr.v.a.mask.addr32); 1293 break; 1294 } 1295 1296 PF_MD5_UPD(pfr, port[0]); 1297 PF_MD5_UPD(pfr, port[1]); 1298 PF_MD5_UPD(pfr, neg); 1299 PF_MD5_UPD(pfr, port_op); 1300 } 1301 1302 static void 1303 pf_hash_rule_rolling(MD5_CTX *ctx, struct pf_krule *rule) 1304 { 1305 u_int16_t x; 1306 u_int32_t y; 1307 1308 pf_hash_rule_addr(ctx, &rule->src); 1309 pf_hash_rule_addr(ctx, &rule->dst); 1310 for (int i = 0; i < PF_RULE_MAX_LABEL_COUNT; i++) 1311 PF_MD5_UPD_STR(rule, label[i]); 1312 PF_MD5_UPD_STR(rule, ifname); 1313 PF_MD5_UPD_STR(rule, match_tagname); 1314 PF_MD5_UPD_HTONS(rule, match_tag, x); /* dup? */ 1315 PF_MD5_UPD_HTONL(rule, os_fingerprint, y); 1316 PF_MD5_UPD_HTONL(rule, prob, y); 1317 PF_MD5_UPD_HTONL(rule, uid.uid[0], y); 1318 PF_MD5_UPD_HTONL(rule, uid.uid[1], y); 1319 PF_MD5_UPD(rule, uid.op); 1320 PF_MD5_UPD_HTONL(rule, gid.gid[0], y); 1321 PF_MD5_UPD_HTONL(rule, gid.gid[1], y); 1322 PF_MD5_UPD(rule, gid.op); 1323 PF_MD5_UPD_HTONL(rule, rule_flag, y); 1324 PF_MD5_UPD(rule, action); 1325 PF_MD5_UPD(rule, direction); 1326 PF_MD5_UPD(rule, af); 1327 PF_MD5_UPD(rule, quick); 1328 PF_MD5_UPD(rule, ifnot); 1329 PF_MD5_UPD(rule, match_tag_not); 1330 PF_MD5_UPD(rule, natpass); 1331 PF_MD5_UPD(rule, keep_state); 1332 PF_MD5_UPD(rule, proto); 1333 PF_MD5_UPD(rule, type); 1334 PF_MD5_UPD(rule, code); 1335 PF_MD5_UPD(rule, flags); 1336 PF_MD5_UPD(rule, flagset); 1337 PF_MD5_UPD(rule, allow_opts); 1338 PF_MD5_UPD(rule, rt); 1339 PF_MD5_UPD(rule, tos); 1340 PF_MD5_UPD(rule, scrub_flags); 1341 PF_MD5_UPD(rule, min_ttl); 1342 PF_MD5_UPD(rule, set_tos); 1343 if (rule->anchor != NULL) 1344 PF_MD5_UPD_STR(rule, anchor->path); 1345 } 1346 1347 static void 1348 pf_hash_rule(struct pf_krule *rule) 1349 { 1350 MD5_CTX ctx; 1351 1352 MD5Init(&ctx); 1353 pf_hash_rule_rolling(&ctx, rule); 1354 MD5Final(rule->md5sum, &ctx); 1355 } 1356 1357 static int 1358 pf_krule_compare(struct pf_krule *a, struct pf_krule *b) 1359 { 1360 1361 return (memcmp(a->md5sum, b->md5sum, PF_MD5_DIGEST_LENGTH)); 1362 } 1363 1364 static int 1365 pf_commit_rules(u_int32_t ticket, int rs_num, char *anchor) 1366 { 1367 struct pf_kruleset *rs; 1368 struct pf_krule *rule, **old_array, *old_rule; 1369 struct pf_krulequeue *old_rules; 1370 struct pf_krule_global *old_tree; 1371 int error; 1372 u_int32_t old_rcount; 1373 1374 PF_RULES_WASSERT(); 1375 1376 if (rs_num < 0 || rs_num >= PF_RULESET_MAX) 1377 return (EINVAL); 1378 rs = pf_find_kruleset(anchor); 1379 if (rs == NULL || !rs->rules[rs_num].inactive.open || 1380 ticket != rs->rules[rs_num].inactive.ticket) 1381 return (EBUSY); 1382 1383 /* Calculate checksum for the main ruleset */ 1384 if (rs == &pf_main_ruleset) { 1385 error = pf_setup_pfsync_matching(rs); 1386 if (error != 0) 1387 return (error); 1388 } 1389 1390 /* Swap rules, keep the old. */ 1391 old_rules = rs->rules[rs_num].active.ptr; 1392 old_rcount = rs->rules[rs_num].active.rcount; 1393 old_array = rs->rules[rs_num].active.ptr_array; 1394 old_tree = rs->rules[rs_num].active.tree; 1395 1396 rs->rules[rs_num].active.ptr = 1397 rs->rules[rs_num].inactive.ptr; 1398 rs->rules[rs_num].active.ptr_array = 1399 rs->rules[rs_num].inactive.ptr_array; 1400 rs->rules[rs_num].active.tree = 1401 rs->rules[rs_num].inactive.tree; 1402 rs->rules[rs_num].active.rcount = 1403 rs->rules[rs_num].inactive.rcount; 1404 1405 /* Attempt to preserve counter information. */ 1406 if (V_pf_status.keep_counters && old_tree != NULL) { 1407 TAILQ_FOREACH(rule, rs->rules[rs_num].active.ptr, 1408 entries) { 1409 old_rule = RB_FIND(pf_krule_global, old_tree, rule); 1410 if (old_rule == NULL) { 1411 continue; 1412 } 1413 pf_counter_u64_critical_enter(); 1414 pf_counter_u64_add_protected(&rule->evaluations, 1415 pf_counter_u64_fetch(&old_rule->evaluations)); 1416 pf_counter_u64_add_protected(&rule->packets[0], 1417 pf_counter_u64_fetch(&old_rule->packets[0])); 1418 pf_counter_u64_add_protected(&rule->packets[1], 1419 pf_counter_u64_fetch(&old_rule->packets[1])); 1420 pf_counter_u64_add_protected(&rule->bytes[0], 1421 pf_counter_u64_fetch(&old_rule->bytes[0])); 1422 pf_counter_u64_add_protected(&rule->bytes[1], 1423 pf_counter_u64_fetch(&old_rule->bytes[1])); 1424 pf_counter_u64_critical_exit(); 1425 } 1426 } 1427 1428 rs->rules[rs_num].inactive.ptr = old_rules; 1429 rs->rules[rs_num].inactive.ptr_array = old_array; 1430 rs->rules[rs_num].inactive.tree = NULL; /* important for pf_ioctl_addrule */ 1431 rs->rules[rs_num].inactive.rcount = old_rcount; 1432 1433 rs->rules[rs_num].active.ticket = 1434 rs->rules[rs_num].inactive.ticket; 1435 pf_calc_skip_steps(rs->rules[rs_num].active.ptr); 1436 1437 /* Purge the old rule list. */ 1438 PF_UNLNKDRULES_LOCK(); 1439 while ((rule = TAILQ_FIRST(old_rules)) != NULL) 1440 pf_unlink_rule_locked(old_rules, rule); 1441 PF_UNLNKDRULES_UNLOCK(); 1442 if (rs->rules[rs_num].inactive.ptr_array) 1443 free(rs->rules[rs_num].inactive.ptr_array, M_TEMP); 1444 rs->rules[rs_num].inactive.ptr_array = NULL; 1445 rs->rules[rs_num].inactive.rcount = 0; 1446 rs->rules[rs_num].inactive.open = 0; 1447 pf_remove_if_empty_kruleset(rs); 1448 free(old_tree, M_TEMP); 1449 1450 return (0); 1451 } 1452 1453 static int 1454 pf_setup_pfsync_matching(struct pf_kruleset *rs) 1455 { 1456 MD5_CTX ctx; 1457 struct pf_krule *rule; 1458 int rs_cnt; 1459 u_int8_t digest[PF_MD5_DIGEST_LENGTH]; 1460 1461 MD5Init(&ctx); 1462 for (rs_cnt = 0; rs_cnt < PF_RULESET_MAX; rs_cnt++) { 1463 /* XXX PF_RULESET_SCRUB as well? */ 1464 if (rs_cnt == PF_RULESET_SCRUB) 1465 continue; 1466 1467 if (rs->rules[rs_cnt].inactive.ptr_array) 1468 free(rs->rules[rs_cnt].inactive.ptr_array, M_TEMP); 1469 rs->rules[rs_cnt].inactive.ptr_array = NULL; 1470 1471 if (rs->rules[rs_cnt].inactive.rcount) { 1472 rs->rules[rs_cnt].inactive.ptr_array = 1473 mallocarray(rs->rules[rs_cnt].inactive.rcount, 1474 sizeof(struct pf_rule **), 1475 M_TEMP, M_NOWAIT); 1476 1477 if (!rs->rules[rs_cnt].inactive.ptr_array) 1478 return (ENOMEM); 1479 } 1480 1481 TAILQ_FOREACH(rule, rs->rules[rs_cnt].inactive.ptr, 1482 entries) { 1483 pf_hash_rule_rolling(&ctx, rule); 1484 (rs->rules[rs_cnt].inactive.ptr_array)[rule->nr] = rule; 1485 } 1486 } 1487 1488 MD5Final(digest, &ctx); 1489 memcpy(V_pf_status.pf_chksum, digest, sizeof(V_pf_status.pf_chksum)); 1490 return (0); 1491 } 1492 1493 static int 1494 pf_eth_addr_setup(struct pf_keth_ruleset *ruleset, struct pf_addr_wrap *addr) 1495 { 1496 int error = 0; 1497 1498 switch (addr->type) { 1499 case PF_ADDR_TABLE: 1500 addr->p.tbl = pfr_eth_attach_table(ruleset, addr->v.tblname); 1501 if (addr->p.tbl == NULL) 1502 error = ENOMEM; 1503 break; 1504 default: 1505 error = EINVAL; 1506 } 1507 1508 return (error); 1509 } 1510 1511 static int 1512 pf_addr_setup(struct pf_kruleset *ruleset, struct pf_addr_wrap *addr, 1513 sa_family_t af) 1514 { 1515 int error = 0; 1516 1517 switch (addr->type) { 1518 case PF_ADDR_TABLE: 1519 addr->p.tbl = pfr_attach_table(ruleset, addr->v.tblname); 1520 if (addr->p.tbl == NULL) 1521 error = ENOMEM; 1522 break; 1523 case PF_ADDR_DYNIFTL: 1524 error = pfi_dynaddr_setup(addr, af); 1525 break; 1526 } 1527 1528 return (error); 1529 } 1530 1531 static void 1532 pf_addr_copyout(struct pf_addr_wrap *addr) 1533 { 1534 1535 switch (addr->type) { 1536 case PF_ADDR_DYNIFTL: 1537 pfi_dynaddr_copyout(addr); 1538 break; 1539 case PF_ADDR_TABLE: 1540 pf_tbladdr_copyout(addr); 1541 break; 1542 } 1543 } 1544 1545 static void 1546 pf_src_node_copy(const struct pf_ksrc_node *in, struct pf_src_node *out) 1547 { 1548 int secs = time_uptime, diff; 1549 1550 bzero(out, sizeof(struct pf_src_node)); 1551 1552 bcopy(&in->addr, &out->addr, sizeof(struct pf_addr)); 1553 bcopy(&in->raddr, &out->raddr, sizeof(struct pf_addr)); 1554 1555 if (in->rule.ptr != NULL) 1556 out->rule.nr = in->rule.ptr->nr; 1557 1558 for (int i = 0; i < 2; i++) { 1559 out->bytes[i] = counter_u64_fetch(in->bytes[i]); 1560 out->packets[i] = counter_u64_fetch(in->packets[i]); 1561 } 1562 1563 out->states = in->states; 1564 out->conn = in->conn; 1565 out->af = in->af; 1566 out->ruletype = in->ruletype; 1567 1568 out->creation = secs - in->creation; 1569 if (out->expire > secs) 1570 out->expire -= secs; 1571 else 1572 out->expire = 0; 1573 1574 /* Adjust the connection rate estimate. */ 1575 diff = secs - in->conn_rate.last; 1576 if (diff >= in->conn_rate.seconds) 1577 out->conn_rate.count = 0; 1578 else 1579 out->conn_rate.count -= 1580 in->conn_rate.count * diff / 1581 in->conn_rate.seconds; 1582 } 1583 1584 #ifdef ALTQ 1585 /* 1586 * Handle export of struct pf_kaltq to user binaries that may be using any 1587 * version of struct pf_altq. 1588 */ 1589 static int 1590 pf_export_kaltq(struct pf_altq *q, struct pfioc_altq_v1 *pa, size_t ioc_size) 1591 { 1592 u_int32_t version; 1593 1594 if (ioc_size == sizeof(struct pfioc_altq_v0)) 1595 version = 0; 1596 else 1597 version = pa->version; 1598 1599 if (version > PFIOC_ALTQ_VERSION) 1600 return (EINVAL); 1601 1602 #define ASSIGN(x) exported_q->x = q->x 1603 #define COPY(x) \ 1604 bcopy(&q->x, &exported_q->x, min(sizeof(q->x), sizeof(exported_q->x))) 1605 #define SATU16(x) (u_int32_t)uqmin((x), USHRT_MAX) 1606 #define SATU32(x) (u_int32_t)uqmin((x), UINT_MAX) 1607 1608 switch (version) { 1609 case 0: { 1610 struct pf_altq_v0 *exported_q = 1611 &((struct pfioc_altq_v0 *)pa)->altq; 1612 1613 COPY(ifname); 1614 1615 ASSIGN(scheduler); 1616 ASSIGN(tbrsize); 1617 exported_q->tbrsize = SATU16(q->tbrsize); 1618 exported_q->ifbandwidth = SATU32(q->ifbandwidth); 1619 1620 COPY(qname); 1621 COPY(parent); 1622 ASSIGN(parent_qid); 1623 exported_q->bandwidth = SATU32(q->bandwidth); 1624 ASSIGN(priority); 1625 ASSIGN(local_flags); 1626 1627 ASSIGN(qlimit); 1628 ASSIGN(flags); 1629 1630 if (q->scheduler == ALTQT_HFSC) { 1631 #define ASSIGN_OPT(x) exported_q->pq_u.hfsc_opts.x = q->pq_u.hfsc_opts.x 1632 #define ASSIGN_OPT_SATU32(x) exported_q->pq_u.hfsc_opts.x = \ 1633 SATU32(q->pq_u.hfsc_opts.x) 1634 1635 ASSIGN_OPT_SATU32(rtsc_m1); 1636 ASSIGN_OPT(rtsc_d); 1637 ASSIGN_OPT_SATU32(rtsc_m2); 1638 1639 ASSIGN_OPT_SATU32(lssc_m1); 1640 ASSIGN_OPT(lssc_d); 1641 ASSIGN_OPT_SATU32(lssc_m2); 1642 1643 ASSIGN_OPT_SATU32(ulsc_m1); 1644 ASSIGN_OPT(ulsc_d); 1645 ASSIGN_OPT_SATU32(ulsc_m2); 1646 1647 ASSIGN_OPT(flags); 1648 1649 #undef ASSIGN_OPT 1650 #undef ASSIGN_OPT_SATU32 1651 } else 1652 COPY(pq_u); 1653 1654 ASSIGN(qid); 1655 break; 1656 } 1657 case 1: { 1658 struct pf_altq_v1 *exported_q = 1659 &((struct pfioc_altq_v1 *)pa)->altq; 1660 1661 COPY(ifname); 1662 1663 ASSIGN(scheduler); 1664 ASSIGN(tbrsize); 1665 ASSIGN(ifbandwidth); 1666 1667 COPY(qname); 1668 COPY(parent); 1669 ASSIGN(parent_qid); 1670 ASSIGN(bandwidth); 1671 ASSIGN(priority); 1672 ASSIGN(local_flags); 1673 1674 ASSIGN(qlimit); 1675 ASSIGN(flags); 1676 COPY(pq_u); 1677 1678 ASSIGN(qid); 1679 break; 1680 } 1681 default: 1682 panic("%s: unhandled struct pfioc_altq version", __func__); 1683 break; 1684 } 1685 1686 #undef ASSIGN 1687 #undef COPY 1688 #undef SATU16 1689 #undef SATU32 1690 1691 return (0); 1692 } 1693 1694 /* 1695 * Handle import to struct pf_kaltq of struct pf_altq from user binaries 1696 * that may be using any version of it. 1697 */ 1698 static int 1699 pf_import_kaltq(struct pfioc_altq_v1 *pa, struct pf_altq *q, size_t ioc_size) 1700 { 1701 u_int32_t version; 1702 1703 if (ioc_size == sizeof(struct pfioc_altq_v0)) 1704 version = 0; 1705 else 1706 version = pa->version; 1707 1708 if (version > PFIOC_ALTQ_VERSION) 1709 return (EINVAL); 1710 1711 #define ASSIGN(x) q->x = imported_q->x 1712 #define COPY(x) \ 1713 bcopy(&imported_q->x, &q->x, min(sizeof(imported_q->x), sizeof(q->x))) 1714 1715 switch (version) { 1716 case 0: { 1717 struct pf_altq_v0 *imported_q = 1718 &((struct pfioc_altq_v0 *)pa)->altq; 1719 1720 COPY(ifname); 1721 1722 ASSIGN(scheduler); 1723 ASSIGN(tbrsize); /* 16-bit -> 32-bit */ 1724 ASSIGN(ifbandwidth); /* 32-bit -> 64-bit */ 1725 1726 COPY(qname); 1727 COPY(parent); 1728 ASSIGN(parent_qid); 1729 ASSIGN(bandwidth); /* 32-bit -> 64-bit */ 1730 ASSIGN(priority); 1731 ASSIGN(local_flags); 1732 1733 ASSIGN(qlimit); 1734 ASSIGN(flags); 1735 1736 if (imported_q->scheduler == ALTQT_HFSC) { 1737 #define ASSIGN_OPT(x) q->pq_u.hfsc_opts.x = imported_q->pq_u.hfsc_opts.x 1738 1739 /* 1740 * The m1 and m2 parameters are being copied from 1741 * 32-bit to 64-bit. 1742 */ 1743 ASSIGN_OPT(rtsc_m1); 1744 ASSIGN_OPT(rtsc_d); 1745 ASSIGN_OPT(rtsc_m2); 1746 1747 ASSIGN_OPT(lssc_m1); 1748 ASSIGN_OPT(lssc_d); 1749 ASSIGN_OPT(lssc_m2); 1750 1751 ASSIGN_OPT(ulsc_m1); 1752 ASSIGN_OPT(ulsc_d); 1753 ASSIGN_OPT(ulsc_m2); 1754 1755 ASSIGN_OPT(flags); 1756 1757 #undef ASSIGN_OPT 1758 } else 1759 COPY(pq_u); 1760 1761 ASSIGN(qid); 1762 break; 1763 } 1764 case 1: { 1765 struct pf_altq_v1 *imported_q = 1766 &((struct pfioc_altq_v1 *)pa)->altq; 1767 1768 COPY(ifname); 1769 1770 ASSIGN(scheduler); 1771 ASSIGN(tbrsize); 1772 ASSIGN(ifbandwidth); 1773 1774 COPY(qname); 1775 COPY(parent); 1776 ASSIGN(parent_qid); 1777 ASSIGN(bandwidth); 1778 ASSIGN(priority); 1779 ASSIGN(local_flags); 1780 1781 ASSIGN(qlimit); 1782 ASSIGN(flags); 1783 COPY(pq_u); 1784 1785 ASSIGN(qid); 1786 break; 1787 } 1788 default: 1789 panic("%s: unhandled struct pfioc_altq version", __func__); 1790 break; 1791 } 1792 1793 #undef ASSIGN 1794 #undef COPY 1795 1796 return (0); 1797 } 1798 1799 static struct pf_altq * 1800 pf_altq_get_nth_active(u_int32_t n) 1801 { 1802 struct pf_altq *altq; 1803 u_int32_t nr; 1804 1805 nr = 0; 1806 TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) { 1807 if (nr == n) 1808 return (altq); 1809 nr++; 1810 } 1811 1812 TAILQ_FOREACH(altq, V_pf_altqs_active, entries) { 1813 if (nr == n) 1814 return (altq); 1815 nr++; 1816 } 1817 1818 return (NULL); 1819 } 1820 #endif /* ALTQ */ 1821 1822 struct pf_krule * 1823 pf_krule_alloc(void) 1824 { 1825 struct pf_krule *rule; 1826 1827 rule = malloc(sizeof(struct pf_krule), M_PFRULE, M_WAITOK | M_ZERO); 1828 mtx_init(&rule->rpool.mtx, "pf_krule_pool", NULL, MTX_DEF); 1829 rule->timestamp = uma_zalloc_pcpu(pf_timestamp_pcpu_zone, 1830 M_WAITOK | M_ZERO); 1831 return (rule); 1832 } 1833 1834 void 1835 pf_krule_free(struct pf_krule *rule) 1836 { 1837 #ifdef PF_WANT_32_TO_64_COUNTER 1838 bool wowned; 1839 #endif 1840 1841 if (rule == NULL) 1842 return; 1843 1844 #ifdef PF_WANT_32_TO_64_COUNTER 1845 if (rule->allrulelinked) { 1846 wowned = PF_RULES_WOWNED(); 1847 if (!wowned) 1848 PF_RULES_WLOCK(); 1849 LIST_REMOVE(rule, allrulelist); 1850 V_pf_allrulecount--; 1851 if (!wowned) 1852 PF_RULES_WUNLOCK(); 1853 } 1854 #endif 1855 1856 pf_counter_u64_deinit(&rule->evaluations); 1857 for (int i = 0; i < 2; i++) { 1858 pf_counter_u64_deinit(&rule->packets[i]); 1859 pf_counter_u64_deinit(&rule->bytes[i]); 1860 } 1861 counter_u64_free(rule->states_cur); 1862 counter_u64_free(rule->states_tot); 1863 counter_u64_free(rule->src_nodes); 1864 uma_zfree_pcpu(pf_timestamp_pcpu_zone, rule->timestamp); 1865 1866 mtx_destroy(&rule->rpool.mtx); 1867 free(rule, M_PFRULE); 1868 } 1869 1870 static void 1871 pf_kpooladdr_to_pooladdr(const struct pf_kpooladdr *kpool, 1872 struct pf_pooladdr *pool) 1873 { 1874 1875 bzero(pool, sizeof(*pool)); 1876 bcopy(&kpool->addr, &pool->addr, sizeof(pool->addr)); 1877 strlcpy(pool->ifname, kpool->ifname, sizeof(pool->ifname)); 1878 } 1879 1880 static int 1881 pf_pooladdr_to_kpooladdr(const struct pf_pooladdr *pool, 1882 struct pf_kpooladdr *kpool) 1883 { 1884 int ret; 1885 1886 bzero(kpool, sizeof(*kpool)); 1887 bcopy(&pool->addr, &kpool->addr, sizeof(kpool->addr)); 1888 ret = pf_user_strcpy(kpool->ifname, pool->ifname, 1889 sizeof(kpool->ifname)); 1890 return (ret); 1891 } 1892 1893 static void 1894 pf_pool_to_kpool(const struct pf_pool *pool, struct pf_kpool *kpool) 1895 { 1896 _Static_assert(sizeof(pool->key) == sizeof(kpool->key), ""); 1897 _Static_assert(sizeof(pool->counter) == sizeof(kpool->counter), ""); 1898 1899 bcopy(&pool->key, &kpool->key, sizeof(kpool->key)); 1900 bcopy(&pool->counter, &kpool->counter, sizeof(kpool->counter)); 1901 1902 kpool->tblidx = pool->tblidx; 1903 kpool->proxy_port[0] = pool->proxy_port[0]; 1904 kpool->proxy_port[1] = pool->proxy_port[1]; 1905 kpool->opts = pool->opts; 1906 } 1907 1908 static int 1909 pf_rule_to_krule(const struct pf_rule *rule, struct pf_krule *krule) 1910 { 1911 int ret; 1912 1913 #ifndef INET 1914 if (rule->af == AF_INET) { 1915 return (EAFNOSUPPORT); 1916 } 1917 #endif /* INET */ 1918 #ifndef INET6 1919 if (rule->af == AF_INET6) { 1920 return (EAFNOSUPPORT); 1921 } 1922 #endif /* INET6 */ 1923 1924 ret = pf_check_rule_addr(&rule->src); 1925 if (ret != 0) 1926 return (ret); 1927 ret = pf_check_rule_addr(&rule->dst); 1928 if (ret != 0) 1929 return (ret); 1930 1931 bcopy(&rule->src, &krule->src, sizeof(rule->src)); 1932 bcopy(&rule->dst, &krule->dst, sizeof(rule->dst)); 1933 1934 ret = pf_user_strcpy(krule->label[0], rule->label, sizeof(rule->label)); 1935 if (ret != 0) 1936 return (ret); 1937 ret = pf_user_strcpy(krule->ifname, rule->ifname, sizeof(rule->ifname)); 1938 if (ret != 0) 1939 return (ret); 1940 ret = pf_user_strcpy(krule->qname, rule->qname, sizeof(rule->qname)); 1941 if (ret != 0) 1942 return (ret); 1943 ret = pf_user_strcpy(krule->pqname, rule->pqname, sizeof(rule->pqname)); 1944 if (ret != 0) 1945 return (ret); 1946 ret = pf_user_strcpy(krule->tagname, rule->tagname, 1947 sizeof(rule->tagname)); 1948 if (ret != 0) 1949 return (ret); 1950 ret = pf_user_strcpy(krule->match_tagname, rule->match_tagname, 1951 sizeof(rule->match_tagname)); 1952 if (ret != 0) 1953 return (ret); 1954 ret = pf_user_strcpy(krule->overload_tblname, rule->overload_tblname, 1955 sizeof(rule->overload_tblname)); 1956 if (ret != 0) 1957 return (ret); 1958 1959 pf_pool_to_kpool(&rule->rpool, &krule->rpool); 1960 1961 /* Don't allow userspace to set evaluations, packets or bytes. */ 1962 /* kif, anchor, overload_tbl are not copied over. */ 1963 1964 krule->os_fingerprint = rule->os_fingerprint; 1965 1966 krule->rtableid = rule->rtableid; 1967 bcopy(rule->timeout, krule->timeout, sizeof(krule->timeout)); 1968 krule->max_states = rule->max_states; 1969 krule->max_src_nodes = rule->max_src_nodes; 1970 krule->max_src_states = rule->max_src_states; 1971 krule->max_src_conn = rule->max_src_conn; 1972 krule->max_src_conn_rate.limit = rule->max_src_conn_rate.limit; 1973 krule->max_src_conn_rate.seconds = rule->max_src_conn_rate.seconds; 1974 krule->qid = rule->qid; 1975 krule->pqid = rule->pqid; 1976 krule->nr = rule->nr; 1977 krule->prob = rule->prob; 1978 krule->cuid = rule->cuid; 1979 krule->cpid = rule->cpid; 1980 1981 krule->return_icmp = rule->return_icmp; 1982 krule->return_icmp6 = rule->return_icmp6; 1983 krule->max_mss = rule->max_mss; 1984 krule->tag = rule->tag; 1985 krule->match_tag = rule->match_tag; 1986 krule->scrub_flags = rule->scrub_flags; 1987 1988 bcopy(&rule->uid, &krule->uid, sizeof(krule->uid)); 1989 bcopy(&rule->gid, &krule->gid, sizeof(krule->gid)); 1990 1991 krule->rule_flag = rule->rule_flag; 1992 krule->action = rule->action; 1993 krule->direction = rule->direction; 1994 krule->log = rule->log; 1995 krule->logif = rule->logif; 1996 krule->quick = rule->quick; 1997 krule->ifnot = rule->ifnot; 1998 krule->match_tag_not = rule->match_tag_not; 1999 krule->natpass = rule->natpass; 2000 2001 krule->keep_state = rule->keep_state; 2002 krule->af = rule->af; 2003 krule->proto = rule->proto; 2004 krule->type = rule->type; 2005 krule->code = rule->code; 2006 krule->flags = rule->flags; 2007 krule->flagset = rule->flagset; 2008 krule->min_ttl = rule->min_ttl; 2009 krule->allow_opts = rule->allow_opts; 2010 krule->rt = rule->rt; 2011 krule->return_ttl = rule->return_ttl; 2012 krule->tos = rule->tos; 2013 krule->set_tos = rule->set_tos; 2014 2015 krule->flush = rule->flush; 2016 krule->prio = rule->prio; 2017 krule->set_prio[0] = rule->set_prio[0]; 2018 krule->set_prio[1] = rule->set_prio[1]; 2019 2020 bcopy(&rule->divert, &krule->divert, sizeof(krule->divert)); 2021 2022 return (0); 2023 } 2024 2025 int 2026 pf_ioctl_getrules(struct pfioc_rule *pr) 2027 { 2028 struct pf_kruleset *ruleset; 2029 struct pf_krule *tail; 2030 int rs_num; 2031 2032 PF_RULES_WLOCK(); 2033 ruleset = pf_find_kruleset(pr->anchor); 2034 if (ruleset == NULL) { 2035 PF_RULES_WUNLOCK(); 2036 return (EINVAL); 2037 } 2038 rs_num = pf_get_ruleset_number(pr->rule.action); 2039 if (rs_num >= PF_RULESET_MAX) { 2040 PF_RULES_WUNLOCK(); 2041 return (EINVAL); 2042 } 2043 tail = TAILQ_LAST(ruleset->rules[rs_num].active.ptr, 2044 pf_krulequeue); 2045 if (tail) 2046 pr->nr = tail->nr + 1; 2047 else 2048 pr->nr = 0; 2049 pr->ticket = ruleset->rules[rs_num].active.ticket; 2050 PF_RULES_WUNLOCK(); 2051 2052 return (0); 2053 } 2054 2055 int 2056 pf_ioctl_addrule(struct pf_krule *rule, uint32_t ticket, 2057 uint32_t pool_ticket, const char *anchor, const char *anchor_call, 2058 uid_t uid, pid_t pid) 2059 { 2060 struct pf_kruleset *ruleset; 2061 struct pf_krule *tail; 2062 struct pf_kpooladdr *pa; 2063 struct pfi_kkif *kif = NULL; 2064 int rs_num; 2065 int error = 0; 2066 2067 if ((rule->return_icmp >> 8) > ICMP_MAXTYPE) { 2068 error = EINVAL; 2069 goto errout_unlocked; 2070 } 2071 2072 #define ERROUT(x) ERROUT_FUNCTION(errout, x) 2073 2074 if (rule->ifname[0]) 2075 kif = pf_kkif_create(M_WAITOK); 2076 pf_counter_u64_init(&rule->evaluations, M_WAITOK); 2077 for (int i = 0; i < 2; i++) { 2078 pf_counter_u64_init(&rule->packets[i], M_WAITOK); 2079 pf_counter_u64_init(&rule->bytes[i], M_WAITOK); 2080 } 2081 rule->states_cur = counter_u64_alloc(M_WAITOK); 2082 rule->states_tot = counter_u64_alloc(M_WAITOK); 2083 rule->src_nodes = counter_u64_alloc(M_WAITOK); 2084 rule->cuid = uid; 2085 rule->cpid = pid; 2086 TAILQ_INIT(&rule->rpool.list); 2087 2088 PF_CONFIG_LOCK(); 2089 PF_RULES_WLOCK(); 2090 #ifdef PF_WANT_32_TO_64_COUNTER 2091 LIST_INSERT_HEAD(&V_pf_allrulelist, rule, allrulelist); 2092 MPASS(!rule->allrulelinked); 2093 rule->allrulelinked = true; 2094 V_pf_allrulecount++; 2095 #endif 2096 ruleset = pf_find_kruleset(anchor); 2097 if (ruleset == NULL) 2098 ERROUT(EINVAL); 2099 rs_num = pf_get_ruleset_number(rule->action); 2100 if (rs_num >= PF_RULESET_MAX) 2101 ERROUT(EINVAL); 2102 if (ticket != ruleset->rules[rs_num].inactive.ticket) { 2103 DPFPRINTF(PF_DEBUG_MISC, 2104 ("ticket: %d != [%d]%d\n", ticket, rs_num, 2105 ruleset->rules[rs_num].inactive.ticket)); 2106 ERROUT(EBUSY); 2107 } 2108 if (pool_ticket != V_ticket_pabuf) { 2109 DPFPRINTF(PF_DEBUG_MISC, 2110 ("pool_ticket: %d != %d\n", pool_ticket, 2111 V_ticket_pabuf)); 2112 ERROUT(EBUSY); 2113 } 2114 /* 2115 * XXXMJG hack: there is no mechanism to ensure they started the 2116 * transaction. Ticket checked above may happen to match by accident, 2117 * even if nobody called DIOCXBEGIN, let alone this process. 2118 * Partially work around it by checking if the RB tree got allocated, 2119 * see pf_begin_rules. 2120 */ 2121 if (ruleset->rules[rs_num].inactive.tree == NULL) { 2122 ERROUT(EINVAL); 2123 } 2124 2125 tail = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr, 2126 pf_krulequeue); 2127 if (tail) 2128 rule->nr = tail->nr + 1; 2129 else 2130 rule->nr = 0; 2131 if (rule->ifname[0]) { 2132 rule->kif = pfi_kkif_attach(kif, rule->ifname); 2133 kif = NULL; 2134 pfi_kkif_ref(rule->kif); 2135 } else 2136 rule->kif = NULL; 2137 2138 if (rule->rtableid > 0 && rule->rtableid >= rt_numfibs) 2139 error = EBUSY; 2140 2141 #ifdef ALTQ 2142 /* set queue IDs */ 2143 if (rule->qname[0] != 0) { 2144 if ((rule->qid = pf_qname2qid(rule->qname)) == 0) 2145 error = EBUSY; 2146 else if (rule->pqname[0] != 0) { 2147 if ((rule->pqid = 2148 pf_qname2qid(rule->pqname)) == 0) 2149 error = EBUSY; 2150 } else 2151 rule->pqid = rule->qid; 2152 } 2153 #endif 2154 if (rule->tagname[0]) 2155 if ((rule->tag = pf_tagname2tag(rule->tagname)) == 0) 2156 error = EBUSY; 2157 if (rule->match_tagname[0]) 2158 if ((rule->match_tag = 2159 pf_tagname2tag(rule->match_tagname)) == 0) 2160 error = EBUSY; 2161 if (rule->rt && !rule->direction) 2162 error = EINVAL; 2163 if (!rule->log) 2164 rule->logif = 0; 2165 if (rule->logif >= PFLOGIFS_MAX) 2166 error = EINVAL; 2167 if (pf_addr_setup(ruleset, &rule->src.addr, rule->af)) 2168 error = ENOMEM; 2169 if (pf_addr_setup(ruleset, &rule->dst.addr, rule->af)) 2170 error = ENOMEM; 2171 if (pf_kanchor_setup(rule, ruleset, anchor_call)) 2172 error = EINVAL; 2173 if (rule->scrub_flags & PFSTATE_SETPRIO && 2174 (rule->set_prio[0] > PF_PRIO_MAX || 2175 rule->set_prio[1] > PF_PRIO_MAX)) 2176 error = EINVAL; 2177 TAILQ_FOREACH(pa, &V_pf_pabuf, entries) 2178 if (pa->addr.type == PF_ADDR_TABLE) { 2179 pa->addr.p.tbl = pfr_attach_table(ruleset, 2180 pa->addr.v.tblname); 2181 if (pa->addr.p.tbl == NULL) 2182 error = ENOMEM; 2183 } 2184 2185 rule->overload_tbl = NULL; 2186 if (rule->overload_tblname[0]) { 2187 if ((rule->overload_tbl = pfr_attach_table(ruleset, 2188 rule->overload_tblname)) == NULL) 2189 error = EINVAL; 2190 else 2191 rule->overload_tbl->pfrkt_flags |= 2192 PFR_TFLAG_ACTIVE; 2193 } 2194 2195 pf_mv_kpool(&V_pf_pabuf, &rule->rpool.list); 2196 if (((((rule->action == PF_NAT) || (rule->action == PF_RDR) || 2197 (rule->action == PF_BINAT)) && rule->anchor == NULL) || 2198 (rule->rt > PF_NOPFROUTE)) && 2199 (TAILQ_FIRST(&rule->rpool.list) == NULL)) 2200 error = EINVAL; 2201 2202 if (error) { 2203 pf_free_rule(rule); 2204 rule = NULL; 2205 ERROUT(error); 2206 } 2207 2208 rule->rpool.cur = TAILQ_FIRST(&rule->rpool.list); 2209 TAILQ_INSERT_TAIL(ruleset->rules[rs_num].inactive.ptr, 2210 rule, entries); 2211 ruleset->rules[rs_num].inactive.rcount++; 2212 2213 PF_RULES_WUNLOCK(); 2214 pf_hash_rule(rule); 2215 if (RB_INSERT(pf_krule_global, ruleset->rules[rs_num].inactive.tree, rule) != NULL) { 2216 PF_RULES_WLOCK(); 2217 TAILQ_REMOVE(ruleset->rules[rs_num].inactive.ptr, rule, entries); 2218 ruleset->rules[rs_num].inactive.rcount--; 2219 pf_free_rule(rule); 2220 rule = NULL; 2221 ERROUT(EEXIST); 2222 } 2223 PF_CONFIG_UNLOCK(); 2224 2225 return (0); 2226 2227 #undef ERROUT 2228 errout: 2229 PF_RULES_WUNLOCK(); 2230 PF_CONFIG_UNLOCK(); 2231 errout_unlocked: 2232 pf_kkif_free(kif); 2233 pf_krule_free(rule); 2234 return (error); 2235 } 2236 2237 static bool 2238 pf_label_match(const struct pf_krule *rule, const char *label) 2239 { 2240 int i = 0; 2241 2242 while (*rule->label[i]) { 2243 if (strcmp(rule->label[i], label) == 0) 2244 return (true); 2245 i++; 2246 } 2247 2248 return (false); 2249 } 2250 2251 static unsigned int 2252 pf_kill_matching_state(struct pf_state_key_cmp *key, int dir) 2253 { 2254 struct pf_kstate *s; 2255 int more = 0; 2256 2257 s = pf_find_state_all(key, dir, &more); 2258 if (s == NULL) 2259 return (0); 2260 2261 if (more) { 2262 PF_STATE_UNLOCK(s); 2263 return (0); 2264 } 2265 2266 pf_unlink_state(s); 2267 return (1); 2268 } 2269 2270 static int 2271 pf_killstates_row(struct pf_kstate_kill *psk, struct pf_idhash *ih) 2272 { 2273 struct pf_kstate *s; 2274 struct pf_state_key *sk; 2275 struct pf_addr *srcaddr, *dstaddr; 2276 struct pf_state_key_cmp match_key; 2277 int idx, killed = 0; 2278 unsigned int dir; 2279 u_int16_t srcport, dstport; 2280 struct pfi_kkif *kif; 2281 2282 relock_DIOCKILLSTATES: 2283 PF_HASHROW_LOCK(ih); 2284 LIST_FOREACH(s, &ih->states, entry) { 2285 /* For floating states look at the original kif. */ 2286 kif = s->kif == V_pfi_all ? s->orig_kif : s->kif; 2287 2288 sk = s->key[psk->psk_nat ? PF_SK_STACK : PF_SK_WIRE]; 2289 if (s->direction == PF_OUT) { 2290 srcaddr = &sk->addr[1]; 2291 dstaddr = &sk->addr[0]; 2292 srcport = sk->port[1]; 2293 dstport = sk->port[0]; 2294 } else { 2295 srcaddr = &sk->addr[0]; 2296 dstaddr = &sk->addr[1]; 2297 srcport = sk->port[0]; 2298 dstport = sk->port[1]; 2299 } 2300 2301 if (psk->psk_af && sk->af != psk->psk_af) 2302 continue; 2303 2304 if (psk->psk_proto && psk->psk_proto != sk->proto) 2305 continue; 2306 2307 if (! PF_MATCHA(psk->psk_src.neg, &psk->psk_src.addr.v.a.addr, 2308 &psk->psk_src.addr.v.a.mask, srcaddr, sk->af)) 2309 continue; 2310 2311 if (! PF_MATCHA(psk->psk_dst.neg, &psk->psk_dst.addr.v.a.addr, 2312 &psk->psk_dst.addr.v.a.mask, dstaddr, sk->af)) 2313 continue; 2314 2315 if (! PF_MATCHA(psk->psk_rt_addr.neg, 2316 &psk->psk_rt_addr.addr.v.a.addr, 2317 &psk->psk_rt_addr.addr.v.a.mask, 2318 &s->rt_addr, sk->af)) 2319 continue; 2320 2321 if (psk->psk_src.port_op != 0 && 2322 ! pf_match_port(psk->psk_src.port_op, 2323 psk->psk_src.port[0], psk->psk_src.port[1], srcport)) 2324 continue; 2325 2326 if (psk->psk_dst.port_op != 0 && 2327 ! pf_match_port(psk->psk_dst.port_op, 2328 psk->psk_dst.port[0], psk->psk_dst.port[1], dstport)) 2329 continue; 2330 2331 if (psk->psk_label[0] && 2332 ! pf_label_match(s->rule.ptr, psk->psk_label)) 2333 continue; 2334 2335 if (psk->psk_ifname[0] && strcmp(psk->psk_ifname, 2336 kif->pfik_name)) 2337 continue; 2338 2339 if (psk->psk_kill_match) { 2340 /* Create the key to find matching states, with lock 2341 * held. */ 2342 2343 bzero(&match_key, sizeof(match_key)); 2344 2345 if (s->direction == PF_OUT) { 2346 dir = PF_IN; 2347 idx = psk->psk_nat ? PF_SK_WIRE : PF_SK_STACK; 2348 } else { 2349 dir = PF_OUT; 2350 idx = psk->psk_nat ? PF_SK_STACK : PF_SK_WIRE; 2351 } 2352 2353 match_key.af = s->key[idx]->af; 2354 match_key.proto = s->key[idx]->proto; 2355 PF_ACPY(&match_key.addr[0], 2356 &s->key[idx]->addr[1], match_key.af); 2357 match_key.port[0] = s->key[idx]->port[1]; 2358 PF_ACPY(&match_key.addr[1], 2359 &s->key[idx]->addr[0], match_key.af); 2360 match_key.port[1] = s->key[idx]->port[0]; 2361 } 2362 2363 pf_unlink_state(s); 2364 killed++; 2365 2366 if (psk->psk_kill_match) 2367 killed += pf_kill_matching_state(&match_key, dir); 2368 2369 goto relock_DIOCKILLSTATES; 2370 } 2371 PF_HASHROW_UNLOCK(ih); 2372 2373 return (killed); 2374 } 2375 2376 int 2377 pf_start(void) 2378 { 2379 int error = 0; 2380 2381 sx_xlock(&V_pf_ioctl_lock); 2382 if (V_pf_status.running) 2383 error = EEXIST; 2384 else { 2385 hook_pf(); 2386 if (! TAILQ_EMPTY(V_pf_keth->active.rules)) 2387 hook_pf_eth(); 2388 V_pf_status.running = 1; 2389 V_pf_status.since = time_second; 2390 new_unrhdr64(&V_pf_stateid, time_second); 2391 2392 DPFPRINTF(PF_DEBUG_MISC, ("pf: started\n")); 2393 } 2394 sx_xunlock(&V_pf_ioctl_lock); 2395 2396 return (error); 2397 } 2398 2399 int 2400 pf_stop(void) 2401 { 2402 int error = 0; 2403 2404 sx_xlock(&V_pf_ioctl_lock); 2405 if (!V_pf_status.running) 2406 error = ENOENT; 2407 else { 2408 V_pf_status.running = 0; 2409 dehook_pf(); 2410 dehook_pf_eth(); 2411 V_pf_status.since = time_second; 2412 DPFPRINTF(PF_DEBUG_MISC, ("pf: stopped\n")); 2413 } 2414 sx_xunlock(&V_pf_ioctl_lock); 2415 2416 return (error); 2417 } 2418 2419 static int 2420 pfioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td) 2421 { 2422 int error = 0; 2423 PF_RULES_RLOCK_TRACKER; 2424 2425 #define ERROUT_IOCTL(target, x) \ 2426 do { \ 2427 error = (x); \ 2428 SDT_PROBE3(pf, ioctl, ioctl, error, cmd, error, __LINE__); \ 2429 goto target; \ 2430 } while (0) 2431 2432 2433 /* XXX keep in sync with switch() below */ 2434 if (securelevel_gt(td->td_ucred, 2)) 2435 switch (cmd) { 2436 case DIOCGETRULES: 2437 case DIOCGETRULENV: 2438 case DIOCGETADDRS: 2439 case DIOCGETADDR: 2440 case DIOCGETSTATE: 2441 case DIOCGETSTATENV: 2442 case DIOCSETSTATUSIF: 2443 case DIOCGETSTATUSNV: 2444 case DIOCCLRSTATUS: 2445 case DIOCNATLOOK: 2446 case DIOCSETDEBUG: 2447 #ifdef COMPAT_FREEBSD14 2448 case DIOCGETSTATES: 2449 case DIOCGETSTATESV2: 2450 #endif 2451 case DIOCGETTIMEOUT: 2452 case DIOCCLRRULECTRS: 2453 case DIOCGETLIMIT: 2454 case DIOCGETALTQSV0: 2455 case DIOCGETALTQSV1: 2456 case DIOCGETALTQV0: 2457 case DIOCGETALTQV1: 2458 case DIOCGETQSTATSV0: 2459 case DIOCGETQSTATSV1: 2460 case DIOCGETRULESETS: 2461 case DIOCGETRULESET: 2462 case DIOCRGETTABLES: 2463 case DIOCRGETTSTATS: 2464 case DIOCRCLRTSTATS: 2465 case DIOCRCLRADDRS: 2466 case DIOCRADDADDRS: 2467 case DIOCRDELADDRS: 2468 case DIOCRSETADDRS: 2469 case DIOCRGETADDRS: 2470 case DIOCRGETASTATS: 2471 case DIOCRCLRASTATS: 2472 case DIOCRTSTADDRS: 2473 case DIOCOSFPGET: 2474 case DIOCGETSRCNODES: 2475 case DIOCCLRSRCNODES: 2476 case DIOCGETSYNCOOKIES: 2477 case DIOCIGETIFACES: 2478 case DIOCGIFSPEEDV0: 2479 case DIOCGIFSPEEDV1: 2480 case DIOCSETIFFLAG: 2481 case DIOCCLRIFFLAG: 2482 case DIOCGETETHRULES: 2483 case DIOCGETETHRULE: 2484 case DIOCGETETHRULESETS: 2485 case DIOCGETETHRULESET: 2486 break; 2487 case DIOCRCLRTABLES: 2488 case DIOCRADDTABLES: 2489 case DIOCRDELTABLES: 2490 case DIOCRSETTFLAGS: 2491 if (((struct pfioc_table *)addr)->pfrio_flags & 2492 PFR_FLAG_DUMMY) 2493 break; /* dummy operation ok */ 2494 return (EPERM); 2495 default: 2496 return (EPERM); 2497 } 2498 2499 if (!(flags & FWRITE)) 2500 switch (cmd) { 2501 case DIOCGETRULES: 2502 case DIOCGETADDRS: 2503 case DIOCGETADDR: 2504 case DIOCGETSTATE: 2505 case DIOCGETSTATENV: 2506 case DIOCGETSTATUSNV: 2507 #ifdef COMPAT_FREEBSD14 2508 case DIOCGETSTATES: 2509 case DIOCGETSTATESV2: 2510 #endif 2511 case DIOCGETTIMEOUT: 2512 case DIOCGETLIMIT: 2513 case DIOCGETALTQSV0: 2514 case DIOCGETALTQSV1: 2515 case DIOCGETALTQV0: 2516 case DIOCGETALTQV1: 2517 case DIOCGETQSTATSV0: 2518 case DIOCGETQSTATSV1: 2519 case DIOCGETRULESETS: 2520 case DIOCGETRULESET: 2521 case DIOCNATLOOK: 2522 case DIOCRGETTABLES: 2523 case DIOCRGETTSTATS: 2524 case DIOCRGETADDRS: 2525 case DIOCRGETASTATS: 2526 case DIOCRTSTADDRS: 2527 case DIOCOSFPGET: 2528 case DIOCGETSRCNODES: 2529 case DIOCGETSYNCOOKIES: 2530 case DIOCIGETIFACES: 2531 case DIOCGIFSPEEDV1: 2532 case DIOCGIFSPEEDV0: 2533 case DIOCGETRULENV: 2534 case DIOCGETETHRULES: 2535 case DIOCGETETHRULE: 2536 case DIOCGETETHRULESETS: 2537 case DIOCGETETHRULESET: 2538 break; 2539 case DIOCRCLRTABLES: 2540 case DIOCRADDTABLES: 2541 case DIOCRDELTABLES: 2542 case DIOCRCLRTSTATS: 2543 case DIOCRCLRADDRS: 2544 case DIOCRADDADDRS: 2545 case DIOCRDELADDRS: 2546 case DIOCRSETADDRS: 2547 case DIOCRSETTFLAGS: 2548 if (((struct pfioc_table *)addr)->pfrio_flags & 2549 PFR_FLAG_DUMMY) { 2550 flags |= FWRITE; /* need write lock for dummy */ 2551 break; /* dummy operation ok */ 2552 } 2553 return (EACCES); 2554 default: 2555 return (EACCES); 2556 } 2557 2558 CURVNET_SET(TD_TO_VNET(td)); 2559 2560 switch (cmd) { 2561 #ifdef COMPAT_FREEBSD14 2562 case DIOCSTART: 2563 error = pf_start(); 2564 break; 2565 2566 case DIOCSTOP: 2567 error = pf_stop(); 2568 break; 2569 #endif 2570 2571 case DIOCGETETHRULES: { 2572 struct pfioc_nv *nv = (struct pfioc_nv *)addr; 2573 nvlist_t *nvl; 2574 void *packed; 2575 struct pf_keth_rule *tail; 2576 struct pf_keth_ruleset *rs; 2577 u_int32_t ticket, nr; 2578 const char *anchor = ""; 2579 2580 nvl = NULL; 2581 packed = NULL; 2582 2583 #define ERROUT(x) ERROUT_IOCTL(DIOCGETETHRULES_error, x) 2584 2585 if (nv->len > pf_ioctl_maxcount) 2586 ERROUT(ENOMEM); 2587 2588 /* Copy the request in */ 2589 packed = malloc(nv->len, M_NVLIST, M_WAITOK); 2590 if (packed == NULL) 2591 ERROUT(ENOMEM); 2592 2593 error = copyin(nv->data, packed, nv->len); 2594 if (error) 2595 ERROUT(error); 2596 2597 nvl = nvlist_unpack(packed, nv->len, 0); 2598 if (nvl == NULL) 2599 ERROUT(EBADMSG); 2600 2601 if (! nvlist_exists_string(nvl, "anchor")) 2602 ERROUT(EBADMSG); 2603 2604 anchor = nvlist_get_string(nvl, "anchor"); 2605 2606 rs = pf_find_keth_ruleset(anchor); 2607 2608 nvlist_destroy(nvl); 2609 nvl = NULL; 2610 free(packed, M_NVLIST); 2611 packed = NULL; 2612 2613 if (rs == NULL) 2614 ERROUT(ENOENT); 2615 2616 /* Reply */ 2617 nvl = nvlist_create(0); 2618 if (nvl == NULL) 2619 ERROUT(ENOMEM); 2620 2621 PF_RULES_RLOCK(); 2622 2623 ticket = rs->active.ticket; 2624 tail = TAILQ_LAST(rs->active.rules, pf_keth_ruleq); 2625 if (tail) 2626 nr = tail->nr + 1; 2627 else 2628 nr = 0; 2629 2630 PF_RULES_RUNLOCK(); 2631 2632 nvlist_add_number(nvl, "ticket", ticket); 2633 nvlist_add_number(nvl, "nr", nr); 2634 2635 packed = nvlist_pack(nvl, &nv->len); 2636 if (packed == NULL) 2637 ERROUT(ENOMEM); 2638 2639 if (nv->size == 0) 2640 ERROUT(0); 2641 else if (nv->size < nv->len) 2642 ERROUT(ENOSPC); 2643 2644 error = copyout(packed, nv->data, nv->len); 2645 2646 #undef ERROUT 2647 DIOCGETETHRULES_error: 2648 free(packed, M_NVLIST); 2649 nvlist_destroy(nvl); 2650 break; 2651 } 2652 2653 case DIOCGETETHRULE: { 2654 struct epoch_tracker et; 2655 struct pfioc_nv *nv = (struct pfioc_nv *)addr; 2656 nvlist_t *nvl = NULL; 2657 void *nvlpacked = NULL; 2658 struct pf_keth_rule *rule = NULL; 2659 struct pf_keth_ruleset *rs; 2660 u_int32_t ticket, nr; 2661 bool clear = false; 2662 const char *anchor; 2663 2664 #define ERROUT(x) ERROUT_IOCTL(DIOCGETETHRULE_error, x) 2665 2666 if (nv->len > pf_ioctl_maxcount) 2667 ERROUT(ENOMEM); 2668 2669 nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK); 2670 if (nvlpacked == NULL) 2671 ERROUT(ENOMEM); 2672 2673 error = copyin(nv->data, nvlpacked, nv->len); 2674 if (error) 2675 ERROUT(error); 2676 2677 nvl = nvlist_unpack(nvlpacked, nv->len, 0); 2678 if (nvl == NULL) 2679 ERROUT(EBADMSG); 2680 if (! nvlist_exists_number(nvl, "ticket")) 2681 ERROUT(EBADMSG); 2682 ticket = nvlist_get_number(nvl, "ticket"); 2683 if (! nvlist_exists_string(nvl, "anchor")) 2684 ERROUT(EBADMSG); 2685 anchor = nvlist_get_string(nvl, "anchor"); 2686 2687 if (nvlist_exists_bool(nvl, "clear")) 2688 clear = nvlist_get_bool(nvl, "clear"); 2689 2690 if (clear && !(flags & FWRITE)) 2691 ERROUT(EACCES); 2692 2693 if (! nvlist_exists_number(nvl, "nr")) 2694 ERROUT(EBADMSG); 2695 nr = nvlist_get_number(nvl, "nr"); 2696 2697 PF_RULES_RLOCK(); 2698 rs = pf_find_keth_ruleset(anchor); 2699 if (rs == NULL) { 2700 PF_RULES_RUNLOCK(); 2701 ERROUT(ENOENT); 2702 } 2703 if (ticket != rs->active.ticket) { 2704 PF_RULES_RUNLOCK(); 2705 ERROUT(EBUSY); 2706 } 2707 2708 nvlist_destroy(nvl); 2709 nvl = NULL; 2710 free(nvlpacked, M_NVLIST); 2711 nvlpacked = NULL; 2712 2713 rule = TAILQ_FIRST(rs->active.rules); 2714 while ((rule != NULL) && (rule->nr != nr)) 2715 rule = TAILQ_NEXT(rule, entries); 2716 if (rule == NULL) { 2717 PF_RULES_RUNLOCK(); 2718 ERROUT(ENOENT); 2719 } 2720 /* Make sure rule can't go away. */ 2721 NET_EPOCH_ENTER(et); 2722 PF_RULES_RUNLOCK(); 2723 nvl = pf_keth_rule_to_nveth_rule(rule); 2724 if (pf_keth_anchor_nvcopyout(rs, rule, nvl)) 2725 ERROUT(EBUSY); 2726 NET_EPOCH_EXIT(et); 2727 if (nvl == NULL) 2728 ERROUT(ENOMEM); 2729 2730 nvlpacked = nvlist_pack(nvl, &nv->len); 2731 if (nvlpacked == NULL) 2732 ERROUT(ENOMEM); 2733 2734 if (nv->size == 0) 2735 ERROUT(0); 2736 else if (nv->size < nv->len) 2737 ERROUT(ENOSPC); 2738 2739 error = copyout(nvlpacked, nv->data, nv->len); 2740 if (error == 0 && clear) { 2741 counter_u64_zero(rule->evaluations); 2742 for (int i = 0; i < 2; i++) { 2743 counter_u64_zero(rule->packets[i]); 2744 counter_u64_zero(rule->bytes[i]); 2745 } 2746 } 2747 2748 #undef ERROUT 2749 DIOCGETETHRULE_error: 2750 free(nvlpacked, M_NVLIST); 2751 nvlist_destroy(nvl); 2752 break; 2753 } 2754 2755 case DIOCADDETHRULE: { 2756 struct pfioc_nv *nv = (struct pfioc_nv *)addr; 2757 nvlist_t *nvl = NULL; 2758 void *nvlpacked = NULL; 2759 struct pf_keth_rule *rule = NULL, *tail = NULL; 2760 struct pf_keth_ruleset *ruleset = NULL; 2761 struct pfi_kkif *kif = NULL, *bridge_to_kif = NULL; 2762 const char *anchor = "", *anchor_call = ""; 2763 2764 #define ERROUT(x) ERROUT_IOCTL(DIOCADDETHRULE_error, x) 2765 2766 if (nv->len > pf_ioctl_maxcount) 2767 ERROUT(ENOMEM); 2768 2769 nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK); 2770 if (nvlpacked == NULL) 2771 ERROUT(ENOMEM); 2772 2773 error = copyin(nv->data, nvlpacked, nv->len); 2774 if (error) 2775 ERROUT(error); 2776 2777 nvl = nvlist_unpack(nvlpacked, nv->len, 0); 2778 if (nvl == NULL) 2779 ERROUT(EBADMSG); 2780 2781 if (! nvlist_exists_number(nvl, "ticket")) 2782 ERROUT(EBADMSG); 2783 2784 if (nvlist_exists_string(nvl, "anchor")) 2785 anchor = nvlist_get_string(nvl, "anchor"); 2786 if (nvlist_exists_string(nvl, "anchor_call")) 2787 anchor_call = nvlist_get_string(nvl, "anchor_call"); 2788 2789 ruleset = pf_find_keth_ruleset(anchor); 2790 if (ruleset == NULL) 2791 ERROUT(EINVAL); 2792 2793 if (nvlist_get_number(nvl, "ticket") != 2794 ruleset->inactive.ticket) { 2795 DPFPRINTF(PF_DEBUG_MISC, 2796 ("ticket: %d != %d\n", 2797 (u_int32_t)nvlist_get_number(nvl, "ticket"), 2798 ruleset->inactive.ticket)); 2799 ERROUT(EBUSY); 2800 } 2801 2802 rule = malloc(sizeof(*rule), M_PFRULE, M_WAITOK); 2803 if (rule == NULL) 2804 ERROUT(ENOMEM); 2805 rule->timestamp = NULL; 2806 2807 error = pf_nveth_rule_to_keth_rule(nvl, rule); 2808 if (error != 0) 2809 ERROUT(error); 2810 2811 if (rule->ifname[0]) 2812 kif = pf_kkif_create(M_WAITOK); 2813 if (rule->bridge_to_name[0]) 2814 bridge_to_kif = pf_kkif_create(M_WAITOK); 2815 rule->evaluations = counter_u64_alloc(M_WAITOK); 2816 for (int i = 0; i < 2; i++) { 2817 rule->packets[i] = counter_u64_alloc(M_WAITOK); 2818 rule->bytes[i] = counter_u64_alloc(M_WAITOK); 2819 } 2820 rule->timestamp = uma_zalloc_pcpu(pf_timestamp_pcpu_zone, 2821 M_WAITOK | M_ZERO); 2822 2823 PF_RULES_WLOCK(); 2824 2825 if (rule->ifname[0]) { 2826 rule->kif = pfi_kkif_attach(kif, rule->ifname); 2827 pfi_kkif_ref(rule->kif); 2828 } else 2829 rule->kif = NULL; 2830 if (rule->bridge_to_name[0]) { 2831 rule->bridge_to = pfi_kkif_attach(bridge_to_kif, 2832 rule->bridge_to_name); 2833 pfi_kkif_ref(rule->bridge_to); 2834 } else 2835 rule->bridge_to = NULL; 2836 2837 #ifdef ALTQ 2838 /* set queue IDs */ 2839 if (rule->qname[0] != 0) { 2840 if ((rule->qid = pf_qname2qid(rule->qname)) == 0) 2841 error = EBUSY; 2842 else 2843 rule->qid = rule->qid; 2844 } 2845 #endif 2846 if (rule->tagname[0]) 2847 if ((rule->tag = pf_tagname2tag(rule->tagname)) == 0) 2848 error = EBUSY; 2849 if (rule->match_tagname[0]) 2850 if ((rule->match_tag = pf_tagname2tag( 2851 rule->match_tagname)) == 0) 2852 error = EBUSY; 2853 2854 if (error == 0 && rule->ipdst.addr.type == PF_ADDR_TABLE) 2855 error = pf_eth_addr_setup(ruleset, &rule->ipdst.addr); 2856 if (error == 0 && rule->ipsrc.addr.type == PF_ADDR_TABLE) 2857 error = pf_eth_addr_setup(ruleset, &rule->ipsrc.addr); 2858 2859 if (error) { 2860 pf_free_eth_rule(rule); 2861 PF_RULES_WUNLOCK(); 2862 ERROUT(error); 2863 } 2864 2865 if (pf_keth_anchor_setup(rule, ruleset, anchor_call)) { 2866 pf_free_eth_rule(rule); 2867 PF_RULES_WUNLOCK(); 2868 ERROUT(EINVAL); 2869 } 2870 2871 tail = TAILQ_LAST(ruleset->inactive.rules, pf_keth_ruleq); 2872 if (tail) 2873 rule->nr = tail->nr + 1; 2874 else 2875 rule->nr = 0; 2876 2877 TAILQ_INSERT_TAIL(ruleset->inactive.rules, rule, entries); 2878 2879 PF_RULES_WUNLOCK(); 2880 2881 #undef ERROUT 2882 DIOCADDETHRULE_error: 2883 nvlist_destroy(nvl); 2884 free(nvlpacked, M_NVLIST); 2885 break; 2886 } 2887 2888 case DIOCGETETHRULESETS: { 2889 struct epoch_tracker et; 2890 struct pfioc_nv *nv = (struct pfioc_nv *)addr; 2891 nvlist_t *nvl = NULL; 2892 void *nvlpacked = NULL; 2893 struct pf_keth_ruleset *ruleset; 2894 struct pf_keth_anchor *anchor; 2895 int nr = 0; 2896 2897 #define ERROUT(x) ERROUT_IOCTL(DIOCGETETHRULESETS_error, x) 2898 2899 if (nv->len > pf_ioctl_maxcount) 2900 ERROUT(ENOMEM); 2901 2902 nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK); 2903 if (nvlpacked == NULL) 2904 ERROUT(ENOMEM); 2905 2906 error = copyin(nv->data, nvlpacked, nv->len); 2907 if (error) 2908 ERROUT(error); 2909 2910 nvl = nvlist_unpack(nvlpacked, nv->len, 0); 2911 if (nvl == NULL) 2912 ERROUT(EBADMSG); 2913 if (! nvlist_exists_string(nvl, "path")) 2914 ERROUT(EBADMSG); 2915 2916 NET_EPOCH_ENTER(et); 2917 2918 if ((ruleset = pf_find_keth_ruleset( 2919 nvlist_get_string(nvl, "path"))) == NULL) { 2920 NET_EPOCH_EXIT(et); 2921 ERROUT(ENOENT); 2922 } 2923 2924 if (ruleset->anchor == NULL) { 2925 RB_FOREACH(anchor, pf_keth_anchor_global, &V_pf_keth_anchors) 2926 if (anchor->parent == NULL) 2927 nr++; 2928 } else { 2929 RB_FOREACH(anchor, pf_keth_anchor_node, 2930 &ruleset->anchor->children) 2931 nr++; 2932 } 2933 2934 NET_EPOCH_EXIT(et); 2935 2936 nvlist_destroy(nvl); 2937 nvl = NULL; 2938 free(nvlpacked, M_NVLIST); 2939 nvlpacked = NULL; 2940 2941 nvl = nvlist_create(0); 2942 if (nvl == NULL) 2943 ERROUT(ENOMEM); 2944 2945 nvlist_add_number(nvl, "nr", nr); 2946 2947 nvlpacked = nvlist_pack(nvl, &nv->len); 2948 if (nvlpacked == NULL) 2949 ERROUT(ENOMEM); 2950 2951 if (nv->size == 0) 2952 ERROUT(0); 2953 else if (nv->size < nv->len) 2954 ERROUT(ENOSPC); 2955 2956 error = copyout(nvlpacked, nv->data, nv->len); 2957 2958 #undef ERROUT 2959 DIOCGETETHRULESETS_error: 2960 free(nvlpacked, M_NVLIST); 2961 nvlist_destroy(nvl); 2962 break; 2963 } 2964 2965 case DIOCGETETHRULESET: { 2966 struct epoch_tracker et; 2967 struct pfioc_nv *nv = (struct pfioc_nv *)addr; 2968 nvlist_t *nvl = NULL; 2969 void *nvlpacked = NULL; 2970 struct pf_keth_ruleset *ruleset; 2971 struct pf_keth_anchor *anchor; 2972 int nr = 0, req_nr = 0; 2973 bool found = false; 2974 2975 #define ERROUT(x) ERROUT_IOCTL(DIOCGETETHRULESET_error, x) 2976 2977 if (nv->len > pf_ioctl_maxcount) 2978 ERROUT(ENOMEM); 2979 2980 nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK); 2981 if (nvlpacked == NULL) 2982 ERROUT(ENOMEM); 2983 2984 error = copyin(nv->data, nvlpacked, nv->len); 2985 if (error) 2986 ERROUT(error); 2987 2988 nvl = nvlist_unpack(nvlpacked, nv->len, 0); 2989 if (nvl == NULL) 2990 ERROUT(EBADMSG); 2991 if (! nvlist_exists_string(nvl, "path")) 2992 ERROUT(EBADMSG); 2993 if (! nvlist_exists_number(nvl, "nr")) 2994 ERROUT(EBADMSG); 2995 2996 req_nr = nvlist_get_number(nvl, "nr"); 2997 2998 NET_EPOCH_ENTER(et); 2999 3000 if ((ruleset = pf_find_keth_ruleset( 3001 nvlist_get_string(nvl, "path"))) == NULL) { 3002 NET_EPOCH_EXIT(et); 3003 ERROUT(ENOENT); 3004 } 3005 3006 nvlist_destroy(nvl); 3007 nvl = NULL; 3008 free(nvlpacked, M_NVLIST); 3009 nvlpacked = NULL; 3010 3011 nvl = nvlist_create(0); 3012 if (nvl == NULL) { 3013 NET_EPOCH_EXIT(et); 3014 ERROUT(ENOMEM); 3015 } 3016 3017 if (ruleset->anchor == NULL) { 3018 RB_FOREACH(anchor, pf_keth_anchor_global, 3019 &V_pf_keth_anchors) { 3020 if (anchor->parent == NULL && nr++ == req_nr) { 3021 found = true; 3022 break; 3023 } 3024 } 3025 } else { 3026 RB_FOREACH(anchor, pf_keth_anchor_node, 3027 &ruleset->anchor->children) { 3028 if (nr++ == req_nr) { 3029 found = true; 3030 break; 3031 } 3032 } 3033 } 3034 3035 NET_EPOCH_EXIT(et); 3036 if (found) { 3037 nvlist_add_number(nvl, "nr", nr); 3038 nvlist_add_string(nvl, "name", anchor->name); 3039 if (ruleset->anchor) 3040 nvlist_add_string(nvl, "path", 3041 ruleset->anchor->path); 3042 else 3043 nvlist_add_string(nvl, "path", ""); 3044 } else { 3045 ERROUT(EBUSY); 3046 } 3047 3048 nvlpacked = nvlist_pack(nvl, &nv->len); 3049 if (nvlpacked == NULL) 3050 ERROUT(ENOMEM); 3051 3052 if (nv->size == 0) 3053 ERROUT(0); 3054 else if (nv->size < nv->len) 3055 ERROUT(ENOSPC); 3056 3057 error = copyout(nvlpacked, nv->data, nv->len); 3058 3059 #undef ERROUT 3060 DIOCGETETHRULESET_error: 3061 free(nvlpacked, M_NVLIST); 3062 nvlist_destroy(nvl); 3063 break; 3064 } 3065 3066 case DIOCADDRULENV: { 3067 struct pfioc_nv *nv = (struct pfioc_nv *)addr; 3068 nvlist_t *nvl = NULL; 3069 void *nvlpacked = NULL; 3070 struct pf_krule *rule = NULL; 3071 const char *anchor = "", *anchor_call = ""; 3072 uint32_t ticket = 0, pool_ticket = 0; 3073 3074 #define ERROUT(x) ERROUT_IOCTL(DIOCADDRULENV_error, x) 3075 3076 if (nv->len > pf_ioctl_maxcount) 3077 ERROUT(ENOMEM); 3078 3079 nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK); 3080 error = copyin(nv->data, nvlpacked, nv->len); 3081 if (error) 3082 ERROUT(error); 3083 3084 nvl = nvlist_unpack(nvlpacked, nv->len, 0); 3085 if (nvl == NULL) 3086 ERROUT(EBADMSG); 3087 3088 if (! nvlist_exists_number(nvl, "ticket")) 3089 ERROUT(EINVAL); 3090 ticket = nvlist_get_number(nvl, "ticket"); 3091 3092 if (! nvlist_exists_number(nvl, "pool_ticket")) 3093 ERROUT(EINVAL); 3094 pool_ticket = nvlist_get_number(nvl, "pool_ticket"); 3095 3096 if (! nvlist_exists_nvlist(nvl, "rule")) 3097 ERROUT(EINVAL); 3098 3099 rule = pf_krule_alloc(); 3100 error = pf_nvrule_to_krule(nvlist_get_nvlist(nvl, "rule"), 3101 rule); 3102 if (error) 3103 ERROUT(error); 3104 3105 if (nvlist_exists_string(nvl, "anchor")) 3106 anchor = nvlist_get_string(nvl, "anchor"); 3107 if (nvlist_exists_string(nvl, "anchor_call")) 3108 anchor_call = nvlist_get_string(nvl, "anchor_call"); 3109 3110 if ((error = nvlist_error(nvl))) 3111 ERROUT(error); 3112 3113 /* Frees rule on error */ 3114 error = pf_ioctl_addrule(rule, ticket, pool_ticket, anchor, 3115 anchor_call, td->td_ucred->cr_ruid, 3116 td->td_proc ? td->td_proc->p_pid : 0); 3117 3118 nvlist_destroy(nvl); 3119 free(nvlpacked, M_NVLIST); 3120 break; 3121 #undef ERROUT 3122 DIOCADDRULENV_error: 3123 pf_krule_free(rule); 3124 nvlist_destroy(nvl); 3125 free(nvlpacked, M_NVLIST); 3126 3127 break; 3128 } 3129 case DIOCADDRULE: { 3130 struct pfioc_rule *pr = (struct pfioc_rule *)addr; 3131 struct pf_krule *rule; 3132 3133 rule = pf_krule_alloc(); 3134 error = pf_rule_to_krule(&pr->rule, rule); 3135 if (error != 0) { 3136 pf_krule_free(rule); 3137 break; 3138 } 3139 3140 pr->anchor[sizeof(pr->anchor) - 1] = 0; 3141 3142 /* Frees rule on error */ 3143 error = pf_ioctl_addrule(rule, pr->ticket, pr->pool_ticket, 3144 pr->anchor, pr->anchor_call, td->td_ucred->cr_ruid, 3145 td->td_proc ? td->td_proc->p_pid : 0); 3146 break; 3147 } 3148 3149 case DIOCGETRULES: { 3150 struct pfioc_rule *pr = (struct pfioc_rule *)addr; 3151 3152 pr->anchor[sizeof(pr->anchor) - 1] = 0; 3153 3154 error = pf_ioctl_getrules(pr); 3155 3156 break; 3157 } 3158 3159 case DIOCGETRULENV: { 3160 struct pfioc_nv *nv = (struct pfioc_nv *)addr; 3161 nvlist_t *nvrule = NULL; 3162 nvlist_t *nvl = NULL; 3163 struct pf_kruleset *ruleset; 3164 struct pf_krule *rule; 3165 void *nvlpacked = NULL; 3166 int rs_num, nr; 3167 bool clear_counter = false; 3168 3169 #define ERROUT(x) ERROUT_IOCTL(DIOCGETRULENV_error, x) 3170 3171 if (nv->len > pf_ioctl_maxcount) 3172 ERROUT(ENOMEM); 3173 3174 /* Copy the request in */ 3175 nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK); 3176 if (nvlpacked == NULL) 3177 ERROUT(ENOMEM); 3178 3179 error = copyin(nv->data, nvlpacked, nv->len); 3180 if (error) 3181 ERROUT(error); 3182 3183 nvl = nvlist_unpack(nvlpacked, nv->len, 0); 3184 if (nvl == NULL) 3185 ERROUT(EBADMSG); 3186 3187 if (! nvlist_exists_string(nvl, "anchor")) 3188 ERROUT(EBADMSG); 3189 if (! nvlist_exists_number(nvl, "ruleset")) 3190 ERROUT(EBADMSG); 3191 if (! nvlist_exists_number(nvl, "ticket")) 3192 ERROUT(EBADMSG); 3193 if (! nvlist_exists_number(nvl, "nr")) 3194 ERROUT(EBADMSG); 3195 3196 if (nvlist_exists_bool(nvl, "clear_counter")) 3197 clear_counter = nvlist_get_bool(nvl, "clear_counter"); 3198 3199 if (clear_counter && !(flags & FWRITE)) 3200 ERROUT(EACCES); 3201 3202 nr = nvlist_get_number(nvl, "nr"); 3203 3204 PF_RULES_WLOCK(); 3205 ruleset = pf_find_kruleset(nvlist_get_string(nvl, "anchor")); 3206 if (ruleset == NULL) { 3207 PF_RULES_WUNLOCK(); 3208 ERROUT(ENOENT); 3209 } 3210 3211 rs_num = pf_get_ruleset_number(nvlist_get_number(nvl, "ruleset")); 3212 if (rs_num >= PF_RULESET_MAX) { 3213 PF_RULES_WUNLOCK(); 3214 ERROUT(EINVAL); 3215 } 3216 3217 if (nvlist_get_number(nvl, "ticket") != 3218 ruleset->rules[rs_num].active.ticket) { 3219 PF_RULES_WUNLOCK(); 3220 ERROUT(EBUSY); 3221 } 3222 3223 if ((error = nvlist_error(nvl))) { 3224 PF_RULES_WUNLOCK(); 3225 ERROUT(error); 3226 } 3227 3228 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr); 3229 while ((rule != NULL) && (rule->nr != nr)) 3230 rule = TAILQ_NEXT(rule, entries); 3231 if (rule == NULL) { 3232 PF_RULES_WUNLOCK(); 3233 ERROUT(EBUSY); 3234 } 3235 3236 nvrule = pf_krule_to_nvrule(rule); 3237 3238 nvlist_destroy(nvl); 3239 nvl = nvlist_create(0); 3240 if (nvl == NULL) { 3241 PF_RULES_WUNLOCK(); 3242 ERROUT(ENOMEM); 3243 } 3244 nvlist_add_number(nvl, "nr", nr); 3245 nvlist_add_nvlist(nvl, "rule", nvrule); 3246 nvlist_destroy(nvrule); 3247 nvrule = NULL; 3248 if (pf_kanchor_nvcopyout(ruleset, rule, nvl)) { 3249 PF_RULES_WUNLOCK(); 3250 ERROUT(EBUSY); 3251 } 3252 3253 free(nvlpacked, M_NVLIST); 3254 nvlpacked = nvlist_pack(nvl, &nv->len); 3255 if (nvlpacked == NULL) { 3256 PF_RULES_WUNLOCK(); 3257 ERROUT(ENOMEM); 3258 } 3259 3260 if (nv->size == 0) { 3261 PF_RULES_WUNLOCK(); 3262 ERROUT(0); 3263 } 3264 else if (nv->size < nv->len) { 3265 PF_RULES_WUNLOCK(); 3266 ERROUT(ENOSPC); 3267 } 3268 3269 if (clear_counter) { 3270 pf_counter_u64_zero(&rule->evaluations); 3271 for (int i = 0; i < 2; i++) { 3272 pf_counter_u64_zero(&rule->packets[i]); 3273 pf_counter_u64_zero(&rule->bytes[i]); 3274 } 3275 counter_u64_zero(rule->states_tot); 3276 } 3277 PF_RULES_WUNLOCK(); 3278 3279 error = copyout(nvlpacked, nv->data, nv->len); 3280 3281 #undef ERROUT 3282 DIOCGETRULENV_error: 3283 free(nvlpacked, M_NVLIST); 3284 nvlist_destroy(nvrule); 3285 nvlist_destroy(nvl); 3286 3287 break; 3288 } 3289 3290 case DIOCCHANGERULE: { 3291 struct pfioc_rule *pcr = (struct pfioc_rule *)addr; 3292 struct pf_kruleset *ruleset; 3293 struct pf_krule *oldrule = NULL, *newrule = NULL; 3294 struct pfi_kkif *kif = NULL; 3295 struct pf_kpooladdr *pa; 3296 u_int32_t nr = 0; 3297 int rs_num; 3298 3299 pcr->anchor[sizeof(pcr->anchor) - 1] = 0; 3300 3301 if (pcr->action < PF_CHANGE_ADD_HEAD || 3302 pcr->action > PF_CHANGE_GET_TICKET) { 3303 error = EINVAL; 3304 break; 3305 } 3306 if (pcr->rule.return_icmp >> 8 > ICMP_MAXTYPE) { 3307 error = EINVAL; 3308 break; 3309 } 3310 3311 if (pcr->action != PF_CHANGE_REMOVE) { 3312 newrule = pf_krule_alloc(); 3313 error = pf_rule_to_krule(&pcr->rule, newrule); 3314 if (error != 0) { 3315 pf_krule_free(newrule); 3316 break; 3317 } 3318 3319 if (newrule->ifname[0]) 3320 kif = pf_kkif_create(M_WAITOK); 3321 pf_counter_u64_init(&newrule->evaluations, M_WAITOK); 3322 for (int i = 0; i < 2; i++) { 3323 pf_counter_u64_init(&newrule->packets[i], M_WAITOK); 3324 pf_counter_u64_init(&newrule->bytes[i], M_WAITOK); 3325 } 3326 newrule->states_cur = counter_u64_alloc(M_WAITOK); 3327 newrule->states_tot = counter_u64_alloc(M_WAITOK); 3328 newrule->src_nodes = counter_u64_alloc(M_WAITOK); 3329 newrule->cuid = td->td_ucred->cr_ruid; 3330 newrule->cpid = td->td_proc ? td->td_proc->p_pid : 0; 3331 TAILQ_INIT(&newrule->rpool.list); 3332 } 3333 #define ERROUT(x) ERROUT_IOCTL(DIOCCHANGERULE_error, x) 3334 3335 PF_CONFIG_LOCK(); 3336 PF_RULES_WLOCK(); 3337 #ifdef PF_WANT_32_TO_64_COUNTER 3338 if (newrule != NULL) { 3339 LIST_INSERT_HEAD(&V_pf_allrulelist, newrule, allrulelist); 3340 newrule->allrulelinked = true; 3341 V_pf_allrulecount++; 3342 } 3343 #endif 3344 3345 if (!(pcr->action == PF_CHANGE_REMOVE || 3346 pcr->action == PF_CHANGE_GET_TICKET) && 3347 pcr->pool_ticket != V_ticket_pabuf) 3348 ERROUT(EBUSY); 3349 3350 ruleset = pf_find_kruleset(pcr->anchor); 3351 if (ruleset == NULL) 3352 ERROUT(EINVAL); 3353 3354 rs_num = pf_get_ruleset_number(pcr->rule.action); 3355 if (rs_num >= PF_RULESET_MAX) 3356 ERROUT(EINVAL); 3357 3358 /* 3359 * XXXMJG: there is no guarantee that the ruleset was 3360 * created by the usual route of calling DIOCXBEGIN. 3361 * As a result it is possible the rule tree will not 3362 * be allocated yet. Hack around it by doing it here. 3363 * Note it is fine to let the tree persist in case of 3364 * error as it will be freed down the road on future 3365 * updates (if need be). 3366 */ 3367 if (ruleset->rules[rs_num].active.tree == NULL) { 3368 ruleset->rules[rs_num].active.tree = pf_rule_tree_alloc(M_NOWAIT); 3369 if (ruleset->rules[rs_num].active.tree == NULL) { 3370 ERROUT(ENOMEM); 3371 } 3372 } 3373 3374 if (pcr->action == PF_CHANGE_GET_TICKET) { 3375 pcr->ticket = ++ruleset->rules[rs_num].active.ticket; 3376 ERROUT(0); 3377 } else if (pcr->ticket != 3378 ruleset->rules[rs_num].active.ticket) 3379 ERROUT(EINVAL); 3380 3381 if (pcr->action != PF_CHANGE_REMOVE) { 3382 if (newrule->ifname[0]) { 3383 newrule->kif = pfi_kkif_attach(kif, 3384 newrule->ifname); 3385 kif = NULL; 3386 pfi_kkif_ref(newrule->kif); 3387 } else 3388 newrule->kif = NULL; 3389 3390 if (newrule->rtableid > 0 && 3391 newrule->rtableid >= rt_numfibs) 3392 error = EBUSY; 3393 3394 #ifdef ALTQ 3395 /* set queue IDs */ 3396 if (newrule->qname[0] != 0) { 3397 if ((newrule->qid = 3398 pf_qname2qid(newrule->qname)) == 0) 3399 error = EBUSY; 3400 else if (newrule->pqname[0] != 0) { 3401 if ((newrule->pqid = 3402 pf_qname2qid(newrule->pqname)) == 0) 3403 error = EBUSY; 3404 } else 3405 newrule->pqid = newrule->qid; 3406 } 3407 #endif /* ALTQ */ 3408 if (newrule->tagname[0]) 3409 if ((newrule->tag = 3410 pf_tagname2tag(newrule->tagname)) == 0) 3411 error = EBUSY; 3412 if (newrule->match_tagname[0]) 3413 if ((newrule->match_tag = pf_tagname2tag( 3414 newrule->match_tagname)) == 0) 3415 error = EBUSY; 3416 if (newrule->rt && !newrule->direction) 3417 error = EINVAL; 3418 if (!newrule->log) 3419 newrule->logif = 0; 3420 if (newrule->logif >= PFLOGIFS_MAX) 3421 error = EINVAL; 3422 if (pf_addr_setup(ruleset, &newrule->src.addr, newrule->af)) 3423 error = ENOMEM; 3424 if (pf_addr_setup(ruleset, &newrule->dst.addr, newrule->af)) 3425 error = ENOMEM; 3426 if (pf_kanchor_setup(newrule, ruleset, pcr->anchor_call)) 3427 error = EINVAL; 3428 TAILQ_FOREACH(pa, &V_pf_pabuf, entries) 3429 if (pa->addr.type == PF_ADDR_TABLE) { 3430 pa->addr.p.tbl = 3431 pfr_attach_table(ruleset, 3432 pa->addr.v.tblname); 3433 if (pa->addr.p.tbl == NULL) 3434 error = ENOMEM; 3435 } 3436 3437 newrule->overload_tbl = NULL; 3438 if (newrule->overload_tblname[0]) { 3439 if ((newrule->overload_tbl = pfr_attach_table( 3440 ruleset, newrule->overload_tblname)) == 3441 NULL) 3442 error = EINVAL; 3443 else 3444 newrule->overload_tbl->pfrkt_flags |= 3445 PFR_TFLAG_ACTIVE; 3446 } 3447 3448 pf_mv_kpool(&V_pf_pabuf, &newrule->rpool.list); 3449 if (((((newrule->action == PF_NAT) || 3450 (newrule->action == PF_RDR) || 3451 (newrule->action == PF_BINAT) || 3452 (newrule->rt > PF_NOPFROUTE)) && 3453 !newrule->anchor)) && 3454 (TAILQ_FIRST(&newrule->rpool.list) == NULL)) 3455 error = EINVAL; 3456 3457 if (error) { 3458 pf_free_rule(newrule); 3459 PF_RULES_WUNLOCK(); 3460 PF_CONFIG_UNLOCK(); 3461 break; 3462 } 3463 3464 newrule->rpool.cur = TAILQ_FIRST(&newrule->rpool.list); 3465 } 3466 pf_empty_kpool(&V_pf_pabuf); 3467 3468 if (pcr->action == PF_CHANGE_ADD_HEAD) 3469 oldrule = TAILQ_FIRST( 3470 ruleset->rules[rs_num].active.ptr); 3471 else if (pcr->action == PF_CHANGE_ADD_TAIL) 3472 oldrule = TAILQ_LAST( 3473 ruleset->rules[rs_num].active.ptr, pf_krulequeue); 3474 else { 3475 oldrule = TAILQ_FIRST( 3476 ruleset->rules[rs_num].active.ptr); 3477 while ((oldrule != NULL) && (oldrule->nr != pcr->nr)) 3478 oldrule = TAILQ_NEXT(oldrule, entries); 3479 if (oldrule == NULL) { 3480 if (newrule != NULL) 3481 pf_free_rule(newrule); 3482 PF_RULES_WUNLOCK(); 3483 PF_CONFIG_UNLOCK(); 3484 error = EINVAL; 3485 break; 3486 } 3487 } 3488 3489 if (pcr->action == PF_CHANGE_REMOVE) { 3490 pf_unlink_rule(ruleset->rules[rs_num].active.ptr, 3491 oldrule); 3492 RB_REMOVE(pf_krule_global, 3493 ruleset->rules[rs_num].active.tree, oldrule); 3494 ruleset->rules[rs_num].active.rcount--; 3495 } else { 3496 pf_hash_rule(newrule); 3497 if (RB_INSERT(pf_krule_global, 3498 ruleset->rules[rs_num].active.tree, newrule) != NULL) { 3499 pf_free_rule(newrule); 3500 PF_RULES_WUNLOCK(); 3501 PF_CONFIG_UNLOCK(); 3502 error = EEXIST; 3503 break; 3504 } 3505 3506 if (oldrule == NULL) 3507 TAILQ_INSERT_TAIL( 3508 ruleset->rules[rs_num].active.ptr, 3509 newrule, entries); 3510 else if (pcr->action == PF_CHANGE_ADD_HEAD || 3511 pcr->action == PF_CHANGE_ADD_BEFORE) 3512 TAILQ_INSERT_BEFORE(oldrule, newrule, entries); 3513 else 3514 TAILQ_INSERT_AFTER( 3515 ruleset->rules[rs_num].active.ptr, 3516 oldrule, newrule, entries); 3517 ruleset->rules[rs_num].active.rcount++; 3518 } 3519 3520 nr = 0; 3521 TAILQ_FOREACH(oldrule, 3522 ruleset->rules[rs_num].active.ptr, entries) 3523 oldrule->nr = nr++; 3524 3525 ruleset->rules[rs_num].active.ticket++; 3526 3527 pf_calc_skip_steps(ruleset->rules[rs_num].active.ptr); 3528 pf_remove_if_empty_kruleset(ruleset); 3529 3530 PF_RULES_WUNLOCK(); 3531 PF_CONFIG_UNLOCK(); 3532 break; 3533 3534 #undef ERROUT 3535 DIOCCHANGERULE_error: 3536 PF_RULES_WUNLOCK(); 3537 PF_CONFIG_UNLOCK(); 3538 pf_krule_free(newrule); 3539 pf_kkif_free(kif); 3540 break; 3541 } 3542 3543 case DIOCCLRSTATESNV: { 3544 error = pf_clearstates_nv((struct pfioc_nv *)addr); 3545 break; 3546 } 3547 3548 case DIOCKILLSTATESNV: { 3549 error = pf_killstates_nv((struct pfioc_nv *)addr); 3550 break; 3551 } 3552 3553 case DIOCADDSTATE: { 3554 struct pfioc_state *ps = (struct pfioc_state *)addr; 3555 struct pfsync_state_1301 *sp = &ps->state; 3556 3557 if (sp->timeout >= PFTM_MAX) { 3558 error = EINVAL; 3559 break; 3560 } 3561 if (V_pfsync_state_import_ptr != NULL) { 3562 PF_RULES_RLOCK(); 3563 error = V_pfsync_state_import_ptr( 3564 (union pfsync_state_union *)sp, PFSYNC_SI_IOCTL, 3565 PFSYNC_MSG_VERSION_1301); 3566 PF_RULES_RUNLOCK(); 3567 } else 3568 error = EOPNOTSUPP; 3569 break; 3570 } 3571 3572 case DIOCGETSTATE: { 3573 struct pfioc_state *ps = (struct pfioc_state *)addr; 3574 struct pf_kstate *s; 3575 3576 s = pf_find_state_byid(ps->state.id, ps->state.creatorid); 3577 if (s == NULL) { 3578 error = ENOENT; 3579 break; 3580 } 3581 3582 pfsync_state_export((union pfsync_state_union*)&ps->state, 3583 s, PFSYNC_MSG_VERSION_1301); 3584 PF_STATE_UNLOCK(s); 3585 break; 3586 } 3587 3588 case DIOCGETSTATENV: { 3589 error = pf_getstate((struct pfioc_nv *)addr); 3590 break; 3591 } 3592 3593 #ifdef COMPAT_FREEBSD14 3594 case DIOCGETSTATES: { 3595 struct pfioc_states *ps = (struct pfioc_states *)addr; 3596 struct pf_kstate *s; 3597 struct pfsync_state_1301 *pstore, *p; 3598 int i, nr; 3599 size_t slice_count = 16, count; 3600 void *out; 3601 3602 if (ps->ps_len <= 0) { 3603 nr = uma_zone_get_cur(V_pf_state_z); 3604 ps->ps_len = sizeof(struct pfsync_state_1301) * nr; 3605 break; 3606 } 3607 3608 out = ps->ps_states; 3609 pstore = mallocarray(slice_count, 3610 sizeof(struct pfsync_state_1301), M_TEMP, M_WAITOK | M_ZERO); 3611 nr = 0; 3612 3613 for (i = 0; i <= pf_hashmask; i++) { 3614 struct pf_idhash *ih = &V_pf_idhash[i]; 3615 3616 DIOCGETSTATES_retry: 3617 p = pstore; 3618 3619 if (LIST_EMPTY(&ih->states)) 3620 continue; 3621 3622 PF_HASHROW_LOCK(ih); 3623 count = 0; 3624 LIST_FOREACH(s, &ih->states, entry) { 3625 if (s->timeout == PFTM_UNLINKED) 3626 continue; 3627 count++; 3628 } 3629 3630 if (count > slice_count) { 3631 PF_HASHROW_UNLOCK(ih); 3632 free(pstore, M_TEMP); 3633 slice_count = count * 2; 3634 pstore = mallocarray(slice_count, 3635 sizeof(struct pfsync_state_1301), M_TEMP, 3636 M_WAITOK | M_ZERO); 3637 goto DIOCGETSTATES_retry; 3638 } 3639 3640 if ((nr+count) * sizeof(*p) > ps->ps_len) { 3641 PF_HASHROW_UNLOCK(ih); 3642 goto DIOCGETSTATES_full; 3643 } 3644 3645 LIST_FOREACH(s, &ih->states, entry) { 3646 if (s->timeout == PFTM_UNLINKED) 3647 continue; 3648 3649 pfsync_state_export((union pfsync_state_union*)p, 3650 s, PFSYNC_MSG_VERSION_1301); 3651 p++; 3652 nr++; 3653 } 3654 PF_HASHROW_UNLOCK(ih); 3655 error = copyout(pstore, out, 3656 sizeof(struct pfsync_state_1301) * count); 3657 if (error) 3658 break; 3659 out = ps->ps_states + nr; 3660 } 3661 DIOCGETSTATES_full: 3662 ps->ps_len = sizeof(struct pfsync_state_1301) * nr; 3663 free(pstore, M_TEMP); 3664 3665 break; 3666 } 3667 3668 case DIOCGETSTATESV2: { 3669 struct pfioc_states_v2 *ps = (struct pfioc_states_v2 *)addr; 3670 struct pf_kstate *s; 3671 struct pf_state_export *pstore, *p; 3672 int i, nr; 3673 size_t slice_count = 16, count; 3674 void *out; 3675 3676 if (ps->ps_req_version > PF_STATE_VERSION) { 3677 error = ENOTSUP; 3678 break; 3679 } 3680 3681 if (ps->ps_len <= 0) { 3682 nr = uma_zone_get_cur(V_pf_state_z); 3683 ps->ps_len = sizeof(struct pf_state_export) * nr; 3684 break; 3685 } 3686 3687 out = ps->ps_states; 3688 pstore = mallocarray(slice_count, 3689 sizeof(struct pf_state_export), M_TEMP, M_WAITOK | M_ZERO); 3690 nr = 0; 3691 3692 for (i = 0; i <= pf_hashmask; i++) { 3693 struct pf_idhash *ih = &V_pf_idhash[i]; 3694 3695 DIOCGETSTATESV2_retry: 3696 p = pstore; 3697 3698 if (LIST_EMPTY(&ih->states)) 3699 continue; 3700 3701 PF_HASHROW_LOCK(ih); 3702 count = 0; 3703 LIST_FOREACH(s, &ih->states, entry) { 3704 if (s->timeout == PFTM_UNLINKED) 3705 continue; 3706 count++; 3707 } 3708 3709 if (count > slice_count) { 3710 PF_HASHROW_UNLOCK(ih); 3711 free(pstore, M_TEMP); 3712 slice_count = count * 2; 3713 pstore = mallocarray(slice_count, 3714 sizeof(struct pf_state_export), M_TEMP, 3715 M_WAITOK | M_ZERO); 3716 goto DIOCGETSTATESV2_retry; 3717 } 3718 3719 if ((nr+count) * sizeof(*p) > ps->ps_len) { 3720 PF_HASHROW_UNLOCK(ih); 3721 goto DIOCGETSTATESV2_full; 3722 } 3723 3724 LIST_FOREACH(s, &ih->states, entry) { 3725 if (s->timeout == PFTM_UNLINKED) 3726 continue; 3727 3728 pf_state_export(p, s); 3729 p++; 3730 nr++; 3731 } 3732 PF_HASHROW_UNLOCK(ih); 3733 error = copyout(pstore, out, 3734 sizeof(struct pf_state_export) * count); 3735 if (error) 3736 break; 3737 out = ps->ps_states + nr; 3738 } 3739 DIOCGETSTATESV2_full: 3740 ps->ps_len = nr * sizeof(struct pf_state_export); 3741 free(pstore, M_TEMP); 3742 3743 break; 3744 } 3745 #endif 3746 case DIOCGETSTATUSNV: { 3747 error = pf_getstatus((struct pfioc_nv *)addr); 3748 break; 3749 } 3750 3751 case DIOCSETSTATUSIF: { 3752 struct pfioc_if *pi = (struct pfioc_if *)addr; 3753 3754 if (pi->ifname[0] == 0) { 3755 bzero(V_pf_status.ifname, IFNAMSIZ); 3756 break; 3757 } 3758 PF_RULES_WLOCK(); 3759 error = pf_user_strcpy(V_pf_status.ifname, pi->ifname, IFNAMSIZ); 3760 PF_RULES_WUNLOCK(); 3761 break; 3762 } 3763 3764 case DIOCCLRSTATUS: { 3765 PF_RULES_WLOCK(); 3766 for (int i = 0; i < PFRES_MAX; i++) 3767 counter_u64_zero(V_pf_status.counters[i]); 3768 for (int i = 0; i < FCNT_MAX; i++) 3769 pf_counter_u64_zero(&V_pf_status.fcounters[i]); 3770 for (int i = 0; i < SCNT_MAX; i++) 3771 counter_u64_zero(V_pf_status.scounters[i]); 3772 for (int i = 0; i < KLCNT_MAX; i++) 3773 counter_u64_zero(V_pf_status.lcounters[i]); 3774 V_pf_status.since = time_second; 3775 if (*V_pf_status.ifname) 3776 pfi_update_status(V_pf_status.ifname, NULL); 3777 PF_RULES_WUNLOCK(); 3778 break; 3779 } 3780 3781 case DIOCNATLOOK: { 3782 struct pfioc_natlook *pnl = (struct pfioc_natlook *)addr; 3783 struct pf_state_key *sk; 3784 struct pf_kstate *state; 3785 struct pf_state_key_cmp key; 3786 int m = 0, direction = pnl->direction; 3787 int sidx, didx; 3788 3789 /* NATLOOK src and dst are reversed, so reverse sidx/didx */ 3790 sidx = (direction == PF_IN) ? 1 : 0; 3791 didx = (direction == PF_IN) ? 0 : 1; 3792 3793 if (!pnl->proto || 3794 PF_AZERO(&pnl->saddr, pnl->af) || 3795 PF_AZERO(&pnl->daddr, pnl->af) || 3796 ((pnl->proto == IPPROTO_TCP || 3797 pnl->proto == IPPROTO_UDP) && 3798 (!pnl->dport || !pnl->sport))) 3799 error = EINVAL; 3800 else { 3801 bzero(&key, sizeof(key)); 3802 key.af = pnl->af; 3803 key.proto = pnl->proto; 3804 PF_ACPY(&key.addr[sidx], &pnl->saddr, pnl->af); 3805 key.port[sidx] = pnl->sport; 3806 PF_ACPY(&key.addr[didx], &pnl->daddr, pnl->af); 3807 key.port[didx] = pnl->dport; 3808 3809 state = pf_find_state_all(&key, direction, &m); 3810 if (state == NULL) { 3811 error = ENOENT; 3812 } else { 3813 if (m > 1) { 3814 PF_STATE_UNLOCK(state); 3815 error = E2BIG; /* more than one state */ 3816 } else { 3817 sk = state->key[sidx]; 3818 PF_ACPY(&pnl->rsaddr, &sk->addr[sidx], sk->af); 3819 pnl->rsport = sk->port[sidx]; 3820 PF_ACPY(&pnl->rdaddr, &sk->addr[didx], sk->af); 3821 pnl->rdport = sk->port[didx]; 3822 PF_STATE_UNLOCK(state); 3823 } 3824 } 3825 } 3826 break; 3827 } 3828 3829 case DIOCSETTIMEOUT: { 3830 struct pfioc_tm *pt = (struct pfioc_tm *)addr; 3831 int old; 3832 3833 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX || 3834 pt->seconds < 0) { 3835 error = EINVAL; 3836 break; 3837 } 3838 PF_RULES_WLOCK(); 3839 old = V_pf_default_rule.timeout[pt->timeout]; 3840 if (pt->timeout == PFTM_INTERVAL && pt->seconds == 0) 3841 pt->seconds = 1; 3842 V_pf_default_rule.timeout[pt->timeout] = pt->seconds; 3843 if (pt->timeout == PFTM_INTERVAL && pt->seconds < old) 3844 wakeup(pf_purge_thread); 3845 pt->seconds = old; 3846 PF_RULES_WUNLOCK(); 3847 break; 3848 } 3849 3850 case DIOCGETTIMEOUT: { 3851 struct pfioc_tm *pt = (struct pfioc_tm *)addr; 3852 3853 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX) { 3854 error = EINVAL; 3855 break; 3856 } 3857 PF_RULES_RLOCK(); 3858 pt->seconds = V_pf_default_rule.timeout[pt->timeout]; 3859 PF_RULES_RUNLOCK(); 3860 break; 3861 } 3862 3863 case DIOCGETLIMIT: { 3864 struct pfioc_limit *pl = (struct pfioc_limit *)addr; 3865 3866 if (pl->index < 0 || pl->index >= PF_LIMIT_MAX) { 3867 error = EINVAL; 3868 break; 3869 } 3870 PF_RULES_RLOCK(); 3871 pl->limit = V_pf_limits[pl->index].limit; 3872 PF_RULES_RUNLOCK(); 3873 break; 3874 } 3875 3876 case DIOCSETLIMIT: { 3877 struct pfioc_limit *pl = (struct pfioc_limit *)addr; 3878 int old_limit; 3879 3880 PF_RULES_WLOCK(); 3881 if (pl->index < 0 || pl->index >= PF_LIMIT_MAX || 3882 V_pf_limits[pl->index].zone == NULL) { 3883 PF_RULES_WUNLOCK(); 3884 error = EINVAL; 3885 break; 3886 } 3887 uma_zone_set_max(V_pf_limits[pl->index].zone, pl->limit); 3888 old_limit = V_pf_limits[pl->index].limit; 3889 V_pf_limits[pl->index].limit = pl->limit; 3890 pl->limit = old_limit; 3891 PF_RULES_WUNLOCK(); 3892 break; 3893 } 3894 3895 case DIOCSETDEBUG: { 3896 u_int32_t *level = (u_int32_t *)addr; 3897 3898 PF_RULES_WLOCK(); 3899 V_pf_status.debug = *level; 3900 PF_RULES_WUNLOCK(); 3901 break; 3902 } 3903 3904 case DIOCCLRRULECTRS: { 3905 /* obsoleted by DIOCGETRULE with action=PF_GET_CLR_CNTR */ 3906 struct pf_kruleset *ruleset = &pf_main_ruleset; 3907 struct pf_krule *rule; 3908 3909 PF_RULES_WLOCK(); 3910 TAILQ_FOREACH(rule, 3911 ruleset->rules[PF_RULESET_FILTER].active.ptr, entries) { 3912 pf_counter_u64_zero(&rule->evaluations); 3913 for (int i = 0; i < 2; i++) { 3914 pf_counter_u64_zero(&rule->packets[i]); 3915 pf_counter_u64_zero(&rule->bytes[i]); 3916 } 3917 } 3918 PF_RULES_WUNLOCK(); 3919 break; 3920 } 3921 3922 case DIOCGIFSPEEDV0: 3923 case DIOCGIFSPEEDV1: { 3924 struct pf_ifspeed_v1 *psp = (struct pf_ifspeed_v1 *)addr; 3925 struct pf_ifspeed_v1 ps; 3926 struct ifnet *ifp; 3927 3928 if (psp->ifname[0] == '\0') { 3929 error = EINVAL; 3930 break; 3931 } 3932 3933 error = pf_user_strcpy(ps.ifname, psp->ifname, IFNAMSIZ); 3934 if (error != 0) 3935 break; 3936 ifp = ifunit(ps.ifname); 3937 if (ifp != NULL) { 3938 psp->baudrate32 = 3939 (u_int32_t)uqmin(ifp->if_baudrate, UINT_MAX); 3940 if (cmd == DIOCGIFSPEEDV1) 3941 psp->baudrate = ifp->if_baudrate; 3942 } else { 3943 error = EINVAL; 3944 } 3945 break; 3946 } 3947 3948 #ifdef ALTQ 3949 case DIOCSTARTALTQ: { 3950 struct pf_altq *altq; 3951 3952 PF_RULES_WLOCK(); 3953 /* enable all altq interfaces on active list */ 3954 TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) { 3955 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) { 3956 error = pf_enable_altq(altq); 3957 if (error != 0) 3958 break; 3959 } 3960 } 3961 if (error == 0) 3962 V_pf_altq_running = 1; 3963 PF_RULES_WUNLOCK(); 3964 DPFPRINTF(PF_DEBUG_MISC, ("altq: started\n")); 3965 break; 3966 } 3967 3968 case DIOCSTOPALTQ: { 3969 struct pf_altq *altq; 3970 3971 PF_RULES_WLOCK(); 3972 /* disable all altq interfaces on active list */ 3973 TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) { 3974 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) { 3975 error = pf_disable_altq(altq); 3976 if (error != 0) 3977 break; 3978 } 3979 } 3980 if (error == 0) 3981 V_pf_altq_running = 0; 3982 PF_RULES_WUNLOCK(); 3983 DPFPRINTF(PF_DEBUG_MISC, ("altq: stopped\n")); 3984 break; 3985 } 3986 3987 case DIOCADDALTQV0: 3988 case DIOCADDALTQV1: { 3989 struct pfioc_altq_v1 *pa = (struct pfioc_altq_v1 *)addr; 3990 struct pf_altq *altq, *a; 3991 struct ifnet *ifp; 3992 3993 altq = malloc(sizeof(*altq), M_PFALTQ, M_WAITOK | M_ZERO); 3994 error = pf_import_kaltq(pa, altq, IOCPARM_LEN(cmd)); 3995 if (error) 3996 break; 3997 altq->local_flags = 0; 3998 3999 PF_RULES_WLOCK(); 4000 if (pa->ticket != V_ticket_altqs_inactive) { 4001 PF_RULES_WUNLOCK(); 4002 free(altq, M_PFALTQ); 4003 error = EBUSY; 4004 break; 4005 } 4006 4007 /* 4008 * if this is for a queue, find the discipline and 4009 * copy the necessary fields 4010 */ 4011 if (altq->qname[0] != 0) { 4012 if ((altq->qid = pf_qname2qid(altq->qname)) == 0) { 4013 PF_RULES_WUNLOCK(); 4014 error = EBUSY; 4015 free(altq, M_PFALTQ); 4016 break; 4017 } 4018 altq->altq_disc = NULL; 4019 TAILQ_FOREACH(a, V_pf_altq_ifs_inactive, entries) { 4020 if (strncmp(a->ifname, altq->ifname, 4021 IFNAMSIZ) == 0) { 4022 altq->altq_disc = a->altq_disc; 4023 break; 4024 } 4025 } 4026 } 4027 4028 if ((ifp = ifunit(altq->ifname)) == NULL) 4029 altq->local_flags |= PFALTQ_FLAG_IF_REMOVED; 4030 else 4031 error = altq_add(ifp, altq); 4032 4033 if (error) { 4034 PF_RULES_WUNLOCK(); 4035 free(altq, M_PFALTQ); 4036 break; 4037 } 4038 4039 if (altq->qname[0] != 0) 4040 TAILQ_INSERT_TAIL(V_pf_altqs_inactive, altq, entries); 4041 else 4042 TAILQ_INSERT_TAIL(V_pf_altq_ifs_inactive, altq, entries); 4043 /* version error check done on import above */ 4044 pf_export_kaltq(altq, pa, IOCPARM_LEN(cmd)); 4045 PF_RULES_WUNLOCK(); 4046 break; 4047 } 4048 4049 case DIOCGETALTQSV0: 4050 case DIOCGETALTQSV1: { 4051 struct pfioc_altq_v1 *pa = (struct pfioc_altq_v1 *)addr; 4052 struct pf_altq *altq; 4053 4054 PF_RULES_RLOCK(); 4055 pa->nr = 0; 4056 TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) 4057 pa->nr++; 4058 TAILQ_FOREACH(altq, V_pf_altqs_active, entries) 4059 pa->nr++; 4060 pa->ticket = V_ticket_altqs_active; 4061 PF_RULES_RUNLOCK(); 4062 break; 4063 } 4064 4065 case DIOCGETALTQV0: 4066 case DIOCGETALTQV1: { 4067 struct pfioc_altq_v1 *pa = (struct pfioc_altq_v1 *)addr; 4068 struct pf_altq *altq; 4069 4070 PF_RULES_RLOCK(); 4071 if (pa->ticket != V_ticket_altqs_active) { 4072 PF_RULES_RUNLOCK(); 4073 error = EBUSY; 4074 break; 4075 } 4076 altq = pf_altq_get_nth_active(pa->nr); 4077 if (altq == NULL) { 4078 PF_RULES_RUNLOCK(); 4079 error = EBUSY; 4080 break; 4081 } 4082 pf_export_kaltq(altq, pa, IOCPARM_LEN(cmd)); 4083 PF_RULES_RUNLOCK(); 4084 break; 4085 } 4086 4087 case DIOCCHANGEALTQV0: 4088 case DIOCCHANGEALTQV1: 4089 /* CHANGEALTQ not supported yet! */ 4090 error = ENODEV; 4091 break; 4092 4093 case DIOCGETQSTATSV0: 4094 case DIOCGETQSTATSV1: { 4095 struct pfioc_qstats_v1 *pq = (struct pfioc_qstats_v1 *)addr; 4096 struct pf_altq *altq; 4097 int nbytes; 4098 u_int32_t version; 4099 4100 PF_RULES_RLOCK(); 4101 if (pq->ticket != V_ticket_altqs_active) { 4102 PF_RULES_RUNLOCK(); 4103 error = EBUSY; 4104 break; 4105 } 4106 nbytes = pq->nbytes; 4107 altq = pf_altq_get_nth_active(pq->nr); 4108 if (altq == NULL) { 4109 PF_RULES_RUNLOCK(); 4110 error = EBUSY; 4111 break; 4112 } 4113 4114 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) != 0) { 4115 PF_RULES_RUNLOCK(); 4116 error = ENXIO; 4117 break; 4118 } 4119 PF_RULES_RUNLOCK(); 4120 if (cmd == DIOCGETQSTATSV0) 4121 version = 0; /* DIOCGETQSTATSV0 means stats struct v0 */ 4122 else 4123 version = pq->version; 4124 error = altq_getqstats(altq, pq->buf, &nbytes, version); 4125 if (error == 0) { 4126 pq->scheduler = altq->scheduler; 4127 pq->nbytes = nbytes; 4128 } 4129 break; 4130 } 4131 #endif /* ALTQ */ 4132 4133 case DIOCBEGINADDRS: { 4134 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr; 4135 4136 PF_RULES_WLOCK(); 4137 pf_empty_kpool(&V_pf_pabuf); 4138 pp->ticket = ++V_ticket_pabuf; 4139 PF_RULES_WUNLOCK(); 4140 break; 4141 } 4142 4143 case DIOCADDADDR: { 4144 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr; 4145 struct pf_kpooladdr *pa; 4146 struct pfi_kkif *kif = NULL; 4147 4148 #ifndef INET 4149 if (pp->af == AF_INET) { 4150 error = EAFNOSUPPORT; 4151 break; 4152 } 4153 #endif /* INET */ 4154 #ifndef INET6 4155 if (pp->af == AF_INET6) { 4156 error = EAFNOSUPPORT; 4157 break; 4158 } 4159 #endif /* INET6 */ 4160 if (pp->addr.addr.type != PF_ADDR_ADDRMASK && 4161 pp->addr.addr.type != PF_ADDR_DYNIFTL && 4162 pp->addr.addr.type != PF_ADDR_TABLE) { 4163 error = EINVAL; 4164 break; 4165 } 4166 if (pp->addr.addr.p.dyn != NULL) { 4167 error = EINVAL; 4168 break; 4169 } 4170 pa = malloc(sizeof(*pa), M_PFRULE, M_WAITOK); 4171 error = pf_pooladdr_to_kpooladdr(&pp->addr, pa); 4172 if (error != 0) 4173 break; 4174 if (pa->ifname[0]) 4175 kif = pf_kkif_create(M_WAITOK); 4176 PF_RULES_WLOCK(); 4177 if (pp->ticket != V_ticket_pabuf) { 4178 PF_RULES_WUNLOCK(); 4179 if (pa->ifname[0]) 4180 pf_kkif_free(kif); 4181 free(pa, M_PFRULE); 4182 error = EBUSY; 4183 break; 4184 } 4185 if (pa->ifname[0]) { 4186 pa->kif = pfi_kkif_attach(kif, pa->ifname); 4187 kif = NULL; 4188 pfi_kkif_ref(pa->kif); 4189 } else 4190 pa->kif = NULL; 4191 if (pa->addr.type == PF_ADDR_DYNIFTL && ((error = 4192 pfi_dynaddr_setup(&pa->addr, pp->af)) != 0)) { 4193 if (pa->ifname[0]) 4194 pfi_kkif_unref(pa->kif); 4195 PF_RULES_WUNLOCK(); 4196 free(pa, M_PFRULE); 4197 break; 4198 } 4199 TAILQ_INSERT_TAIL(&V_pf_pabuf, pa, entries); 4200 PF_RULES_WUNLOCK(); 4201 break; 4202 } 4203 4204 case DIOCGETADDRS: { 4205 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr; 4206 struct pf_kpool *pool; 4207 struct pf_kpooladdr *pa; 4208 4209 pp->anchor[sizeof(pp->anchor) - 1] = 0; 4210 pp->nr = 0; 4211 4212 PF_RULES_RLOCK(); 4213 pool = pf_get_kpool(pp->anchor, pp->ticket, pp->r_action, 4214 pp->r_num, 0, 1, 0); 4215 if (pool == NULL) { 4216 PF_RULES_RUNLOCK(); 4217 error = EBUSY; 4218 break; 4219 } 4220 TAILQ_FOREACH(pa, &pool->list, entries) 4221 pp->nr++; 4222 PF_RULES_RUNLOCK(); 4223 break; 4224 } 4225 4226 case DIOCGETADDR: { 4227 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr; 4228 struct pf_kpool *pool; 4229 struct pf_kpooladdr *pa; 4230 u_int32_t nr = 0; 4231 4232 pp->anchor[sizeof(pp->anchor) - 1] = 0; 4233 4234 PF_RULES_RLOCK(); 4235 pool = pf_get_kpool(pp->anchor, pp->ticket, pp->r_action, 4236 pp->r_num, 0, 1, 1); 4237 if (pool == NULL) { 4238 PF_RULES_RUNLOCK(); 4239 error = EBUSY; 4240 break; 4241 } 4242 pa = TAILQ_FIRST(&pool->list); 4243 while ((pa != NULL) && (nr < pp->nr)) { 4244 pa = TAILQ_NEXT(pa, entries); 4245 nr++; 4246 } 4247 if (pa == NULL) { 4248 PF_RULES_RUNLOCK(); 4249 error = EBUSY; 4250 break; 4251 } 4252 pf_kpooladdr_to_pooladdr(pa, &pp->addr); 4253 pf_addr_copyout(&pp->addr.addr); 4254 PF_RULES_RUNLOCK(); 4255 break; 4256 } 4257 4258 case DIOCCHANGEADDR: { 4259 struct pfioc_pooladdr *pca = (struct pfioc_pooladdr *)addr; 4260 struct pf_kpool *pool; 4261 struct pf_kpooladdr *oldpa = NULL, *newpa = NULL; 4262 struct pf_kruleset *ruleset; 4263 struct pfi_kkif *kif = NULL; 4264 4265 pca->anchor[sizeof(pca->anchor) - 1] = 0; 4266 4267 if (pca->action < PF_CHANGE_ADD_HEAD || 4268 pca->action > PF_CHANGE_REMOVE) { 4269 error = EINVAL; 4270 break; 4271 } 4272 if (pca->addr.addr.type != PF_ADDR_ADDRMASK && 4273 pca->addr.addr.type != PF_ADDR_DYNIFTL && 4274 pca->addr.addr.type != PF_ADDR_TABLE) { 4275 error = EINVAL; 4276 break; 4277 } 4278 if (pca->addr.addr.p.dyn != NULL) { 4279 error = EINVAL; 4280 break; 4281 } 4282 4283 if (pca->action != PF_CHANGE_REMOVE) { 4284 #ifndef INET 4285 if (pca->af == AF_INET) { 4286 error = EAFNOSUPPORT; 4287 break; 4288 } 4289 #endif /* INET */ 4290 #ifndef INET6 4291 if (pca->af == AF_INET6) { 4292 error = EAFNOSUPPORT; 4293 break; 4294 } 4295 #endif /* INET6 */ 4296 newpa = malloc(sizeof(*newpa), M_PFRULE, M_WAITOK); 4297 bcopy(&pca->addr, newpa, sizeof(struct pf_pooladdr)); 4298 if (newpa->ifname[0]) 4299 kif = pf_kkif_create(M_WAITOK); 4300 newpa->kif = NULL; 4301 } 4302 #define ERROUT(x) ERROUT_IOCTL(DIOCCHANGEADDR_error, x) 4303 PF_RULES_WLOCK(); 4304 ruleset = pf_find_kruleset(pca->anchor); 4305 if (ruleset == NULL) 4306 ERROUT(EBUSY); 4307 4308 pool = pf_get_kpool(pca->anchor, pca->ticket, pca->r_action, 4309 pca->r_num, pca->r_last, 1, 1); 4310 if (pool == NULL) 4311 ERROUT(EBUSY); 4312 4313 if (pca->action != PF_CHANGE_REMOVE) { 4314 if (newpa->ifname[0]) { 4315 newpa->kif = pfi_kkif_attach(kif, newpa->ifname); 4316 pfi_kkif_ref(newpa->kif); 4317 kif = NULL; 4318 } 4319 4320 switch (newpa->addr.type) { 4321 case PF_ADDR_DYNIFTL: 4322 error = pfi_dynaddr_setup(&newpa->addr, 4323 pca->af); 4324 break; 4325 case PF_ADDR_TABLE: 4326 newpa->addr.p.tbl = pfr_attach_table(ruleset, 4327 newpa->addr.v.tblname); 4328 if (newpa->addr.p.tbl == NULL) 4329 error = ENOMEM; 4330 break; 4331 } 4332 if (error) 4333 goto DIOCCHANGEADDR_error; 4334 } 4335 4336 switch (pca->action) { 4337 case PF_CHANGE_ADD_HEAD: 4338 oldpa = TAILQ_FIRST(&pool->list); 4339 break; 4340 case PF_CHANGE_ADD_TAIL: 4341 oldpa = TAILQ_LAST(&pool->list, pf_kpalist); 4342 break; 4343 default: 4344 oldpa = TAILQ_FIRST(&pool->list); 4345 for (int i = 0; oldpa && i < pca->nr; i++) 4346 oldpa = TAILQ_NEXT(oldpa, entries); 4347 4348 if (oldpa == NULL) 4349 ERROUT(EINVAL); 4350 } 4351 4352 if (pca->action == PF_CHANGE_REMOVE) { 4353 TAILQ_REMOVE(&pool->list, oldpa, entries); 4354 switch (oldpa->addr.type) { 4355 case PF_ADDR_DYNIFTL: 4356 pfi_dynaddr_remove(oldpa->addr.p.dyn); 4357 break; 4358 case PF_ADDR_TABLE: 4359 pfr_detach_table(oldpa->addr.p.tbl); 4360 break; 4361 } 4362 if (oldpa->kif) 4363 pfi_kkif_unref(oldpa->kif); 4364 free(oldpa, M_PFRULE); 4365 } else { 4366 if (oldpa == NULL) 4367 TAILQ_INSERT_TAIL(&pool->list, newpa, entries); 4368 else if (pca->action == PF_CHANGE_ADD_HEAD || 4369 pca->action == PF_CHANGE_ADD_BEFORE) 4370 TAILQ_INSERT_BEFORE(oldpa, newpa, entries); 4371 else 4372 TAILQ_INSERT_AFTER(&pool->list, oldpa, 4373 newpa, entries); 4374 } 4375 4376 pool->cur = TAILQ_FIRST(&pool->list); 4377 PF_ACPY(&pool->counter, &pool->cur->addr.v.a.addr, pca->af); 4378 PF_RULES_WUNLOCK(); 4379 break; 4380 4381 #undef ERROUT 4382 DIOCCHANGEADDR_error: 4383 if (newpa != NULL) { 4384 if (newpa->kif) 4385 pfi_kkif_unref(newpa->kif); 4386 free(newpa, M_PFRULE); 4387 } 4388 PF_RULES_WUNLOCK(); 4389 pf_kkif_free(kif); 4390 break; 4391 } 4392 4393 case DIOCGETRULESETS: { 4394 struct pfioc_ruleset *pr = (struct pfioc_ruleset *)addr; 4395 struct pf_kruleset *ruleset; 4396 struct pf_kanchor *anchor; 4397 4398 pr->path[sizeof(pr->path) - 1] = 0; 4399 4400 PF_RULES_RLOCK(); 4401 if ((ruleset = pf_find_kruleset(pr->path)) == NULL) { 4402 PF_RULES_RUNLOCK(); 4403 error = ENOENT; 4404 break; 4405 } 4406 pr->nr = 0; 4407 if (ruleset->anchor == NULL) { 4408 /* XXX kludge for pf_main_ruleset */ 4409 RB_FOREACH(anchor, pf_kanchor_global, &V_pf_anchors) 4410 if (anchor->parent == NULL) 4411 pr->nr++; 4412 } else { 4413 RB_FOREACH(anchor, pf_kanchor_node, 4414 &ruleset->anchor->children) 4415 pr->nr++; 4416 } 4417 PF_RULES_RUNLOCK(); 4418 break; 4419 } 4420 4421 case DIOCGETRULESET: { 4422 struct pfioc_ruleset *pr = (struct pfioc_ruleset *)addr; 4423 struct pf_kruleset *ruleset; 4424 struct pf_kanchor *anchor; 4425 u_int32_t nr = 0; 4426 4427 pr->path[sizeof(pr->path) - 1] = 0; 4428 4429 PF_RULES_RLOCK(); 4430 if ((ruleset = pf_find_kruleset(pr->path)) == NULL) { 4431 PF_RULES_RUNLOCK(); 4432 error = ENOENT; 4433 break; 4434 } 4435 pr->name[0] = 0; 4436 if (ruleset->anchor == NULL) { 4437 /* XXX kludge for pf_main_ruleset */ 4438 RB_FOREACH(anchor, pf_kanchor_global, &V_pf_anchors) 4439 if (anchor->parent == NULL && nr++ == pr->nr) { 4440 strlcpy(pr->name, anchor->name, 4441 sizeof(pr->name)); 4442 break; 4443 } 4444 } else { 4445 RB_FOREACH(anchor, pf_kanchor_node, 4446 &ruleset->anchor->children) 4447 if (nr++ == pr->nr) { 4448 strlcpy(pr->name, anchor->name, 4449 sizeof(pr->name)); 4450 break; 4451 } 4452 } 4453 if (!pr->name[0]) 4454 error = EBUSY; 4455 PF_RULES_RUNLOCK(); 4456 break; 4457 } 4458 4459 case DIOCRCLRTABLES: { 4460 struct pfioc_table *io = (struct pfioc_table *)addr; 4461 4462 if (io->pfrio_esize != 0) { 4463 error = ENODEV; 4464 break; 4465 } 4466 PF_RULES_WLOCK(); 4467 error = pfr_clr_tables(&io->pfrio_table, &io->pfrio_ndel, 4468 io->pfrio_flags | PFR_FLAG_USERIOCTL); 4469 PF_RULES_WUNLOCK(); 4470 break; 4471 } 4472 4473 case DIOCRADDTABLES: { 4474 struct pfioc_table *io = (struct pfioc_table *)addr; 4475 struct pfr_table *pfrts; 4476 size_t totlen; 4477 4478 if (io->pfrio_esize != sizeof(struct pfr_table)) { 4479 error = ENODEV; 4480 break; 4481 } 4482 4483 if (io->pfrio_size < 0 || io->pfrio_size > pf_ioctl_maxcount || 4484 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_table))) { 4485 error = ENOMEM; 4486 break; 4487 } 4488 4489 totlen = io->pfrio_size * sizeof(struct pfr_table); 4490 pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table), 4491 M_TEMP, M_WAITOK); 4492 error = copyin(io->pfrio_buffer, pfrts, totlen); 4493 if (error) { 4494 free(pfrts, M_TEMP); 4495 break; 4496 } 4497 PF_RULES_WLOCK(); 4498 error = pfr_add_tables(pfrts, io->pfrio_size, 4499 &io->pfrio_nadd, io->pfrio_flags | PFR_FLAG_USERIOCTL); 4500 PF_RULES_WUNLOCK(); 4501 free(pfrts, M_TEMP); 4502 break; 4503 } 4504 4505 case DIOCRDELTABLES: { 4506 struct pfioc_table *io = (struct pfioc_table *)addr; 4507 struct pfr_table *pfrts; 4508 size_t totlen; 4509 4510 if (io->pfrio_esize != sizeof(struct pfr_table)) { 4511 error = ENODEV; 4512 break; 4513 } 4514 4515 if (io->pfrio_size < 0 || io->pfrio_size > pf_ioctl_maxcount || 4516 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_table))) { 4517 error = ENOMEM; 4518 break; 4519 } 4520 4521 totlen = io->pfrio_size * sizeof(struct pfr_table); 4522 pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table), 4523 M_TEMP, M_WAITOK); 4524 error = copyin(io->pfrio_buffer, pfrts, totlen); 4525 if (error) { 4526 free(pfrts, M_TEMP); 4527 break; 4528 } 4529 PF_RULES_WLOCK(); 4530 error = pfr_del_tables(pfrts, io->pfrio_size, 4531 &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL); 4532 PF_RULES_WUNLOCK(); 4533 free(pfrts, M_TEMP); 4534 break; 4535 } 4536 4537 case DIOCRGETTABLES: { 4538 struct pfioc_table *io = (struct pfioc_table *)addr; 4539 struct pfr_table *pfrts; 4540 size_t totlen; 4541 int n; 4542 4543 if (io->pfrio_esize != sizeof(struct pfr_table)) { 4544 error = ENODEV; 4545 break; 4546 } 4547 PF_RULES_RLOCK(); 4548 n = pfr_table_count(&io->pfrio_table, io->pfrio_flags); 4549 if (n < 0) { 4550 PF_RULES_RUNLOCK(); 4551 error = EINVAL; 4552 break; 4553 } 4554 io->pfrio_size = min(io->pfrio_size, n); 4555 4556 totlen = io->pfrio_size * sizeof(struct pfr_table); 4557 4558 pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table), 4559 M_TEMP, M_NOWAIT | M_ZERO); 4560 if (pfrts == NULL) { 4561 error = ENOMEM; 4562 PF_RULES_RUNLOCK(); 4563 break; 4564 } 4565 error = pfr_get_tables(&io->pfrio_table, pfrts, 4566 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); 4567 PF_RULES_RUNLOCK(); 4568 if (error == 0) 4569 error = copyout(pfrts, io->pfrio_buffer, totlen); 4570 free(pfrts, M_TEMP); 4571 break; 4572 } 4573 4574 case DIOCRGETTSTATS: { 4575 struct pfioc_table *io = (struct pfioc_table *)addr; 4576 struct pfr_tstats *pfrtstats; 4577 size_t totlen; 4578 int n; 4579 4580 if (io->pfrio_esize != sizeof(struct pfr_tstats)) { 4581 error = ENODEV; 4582 break; 4583 } 4584 PF_TABLE_STATS_LOCK(); 4585 PF_RULES_RLOCK(); 4586 n = pfr_table_count(&io->pfrio_table, io->pfrio_flags); 4587 if (n < 0) { 4588 PF_RULES_RUNLOCK(); 4589 PF_TABLE_STATS_UNLOCK(); 4590 error = EINVAL; 4591 break; 4592 } 4593 io->pfrio_size = min(io->pfrio_size, n); 4594 4595 totlen = io->pfrio_size * sizeof(struct pfr_tstats); 4596 pfrtstats = mallocarray(io->pfrio_size, 4597 sizeof(struct pfr_tstats), M_TEMP, M_NOWAIT | M_ZERO); 4598 if (pfrtstats == NULL) { 4599 error = ENOMEM; 4600 PF_RULES_RUNLOCK(); 4601 PF_TABLE_STATS_UNLOCK(); 4602 break; 4603 } 4604 error = pfr_get_tstats(&io->pfrio_table, pfrtstats, 4605 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); 4606 PF_RULES_RUNLOCK(); 4607 PF_TABLE_STATS_UNLOCK(); 4608 if (error == 0) 4609 error = copyout(pfrtstats, io->pfrio_buffer, totlen); 4610 free(pfrtstats, M_TEMP); 4611 break; 4612 } 4613 4614 case DIOCRCLRTSTATS: { 4615 struct pfioc_table *io = (struct pfioc_table *)addr; 4616 struct pfr_table *pfrts; 4617 size_t totlen; 4618 4619 if (io->pfrio_esize != sizeof(struct pfr_table)) { 4620 error = ENODEV; 4621 break; 4622 } 4623 4624 if (io->pfrio_size < 0 || io->pfrio_size > pf_ioctl_maxcount || 4625 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_table))) { 4626 /* We used to count tables and use the minimum required 4627 * size, so we didn't fail on overly large requests. 4628 * Keep doing so. */ 4629 io->pfrio_size = pf_ioctl_maxcount; 4630 break; 4631 } 4632 4633 totlen = io->pfrio_size * sizeof(struct pfr_table); 4634 pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table), 4635 M_TEMP, M_WAITOK); 4636 error = copyin(io->pfrio_buffer, pfrts, totlen); 4637 if (error) { 4638 free(pfrts, M_TEMP); 4639 break; 4640 } 4641 4642 PF_TABLE_STATS_LOCK(); 4643 PF_RULES_RLOCK(); 4644 error = pfr_clr_tstats(pfrts, io->pfrio_size, 4645 &io->pfrio_nzero, io->pfrio_flags | PFR_FLAG_USERIOCTL); 4646 PF_RULES_RUNLOCK(); 4647 PF_TABLE_STATS_UNLOCK(); 4648 free(pfrts, M_TEMP); 4649 break; 4650 } 4651 4652 case DIOCRSETTFLAGS: { 4653 struct pfioc_table *io = (struct pfioc_table *)addr; 4654 struct pfr_table *pfrts; 4655 size_t totlen; 4656 int n; 4657 4658 if (io->pfrio_esize != sizeof(struct pfr_table)) { 4659 error = ENODEV; 4660 break; 4661 } 4662 4663 PF_RULES_RLOCK(); 4664 n = pfr_table_count(&io->pfrio_table, io->pfrio_flags); 4665 if (n < 0) { 4666 PF_RULES_RUNLOCK(); 4667 error = EINVAL; 4668 break; 4669 } 4670 4671 io->pfrio_size = min(io->pfrio_size, n); 4672 PF_RULES_RUNLOCK(); 4673 4674 totlen = io->pfrio_size * sizeof(struct pfr_table); 4675 pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table), 4676 M_TEMP, M_WAITOK); 4677 error = copyin(io->pfrio_buffer, pfrts, totlen); 4678 if (error) { 4679 free(pfrts, M_TEMP); 4680 break; 4681 } 4682 PF_RULES_WLOCK(); 4683 error = pfr_set_tflags(pfrts, io->pfrio_size, 4684 io->pfrio_setflag, io->pfrio_clrflag, &io->pfrio_nchange, 4685 &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL); 4686 PF_RULES_WUNLOCK(); 4687 free(pfrts, M_TEMP); 4688 break; 4689 } 4690 4691 case DIOCRCLRADDRS: { 4692 struct pfioc_table *io = (struct pfioc_table *)addr; 4693 4694 if (io->pfrio_esize != 0) { 4695 error = ENODEV; 4696 break; 4697 } 4698 PF_RULES_WLOCK(); 4699 error = pfr_clr_addrs(&io->pfrio_table, &io->pfrio_ndel, 4700 io->pfrio_flags | PFR_FLAG_USERIOCTL); 4701 PF_RULES_WUNLOCK(); 4702 break; 4703 } 4704 4705 case DIOCRADDADDRS: { 4706 struct pfioc_table *io = (struct pfioc_table *)addr; 4707 struct pfr_addr *pfras; 4708 size_t totlen; 4709 4710 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 4711 error = ENODEV; 4712 break; 4713 } 4714 if (io->pfrio_size < 0 || 4715 io->pfrio_size > pf_ioctl_maxcount || 4716 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) { 4717 error = EINVAL; 4718 break; 4719 } 4720 totlen = io->pfrio_size * sizeof(struct pfr_addr); 4721 pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr), 4722 M_TEMP, M_WAITOK); 4723 error = copyin(io->pfrio_buffer, pfras, totlen); 4724 if (error) { 4725 free(pfras, M_TEMP); 4726 break; 4727 } 4728 PF_RULES_WLOCK(); 4729 error = pfr_add_addrs(&io->pfrio_table, pfras, 4730 io->pfrio_size, &io->pfrio_nadd, io->pfrio_flags | 4731 PFR_FLAG_USERIOCTL); 4732 PF_RULES_WUNLOCK(); 4733 if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK) 4734 error = copyout(pfras, io->pfrio_buffer, totlen); 4735 free(pfras, M_TEMP); 4736 break; 4737 } 4738 4739 case DIOCRDELADDRS: { 4740 struct pfioc_table *io = (struct pfioc_table *)addr; 4741 struct pfr_addr *pfras; 4742 size_t totlen; 4743 4744 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 4745 error = ENODEV; 4746 break; 4747 } 4748 if (io->pfrio_size < 0 || 4749 io->pfrio_size > pf_ioctl_maxcount || 4750 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) { 4751 error = EINVAL; 4752 break; 4753 } 4754 totlen = io->pfrio_size * sizeof(struct pfr_addr); 4755 pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr), 4756 M_TEMP, M_WAITOK); 4757 error = copyin(io->pfrio_buffer, pfras, totlen); 4758 if (error) { 4759 free(pfras, M_TEMP); 4760 break; 4761 } 4762 PF_RULES_WLOCK(); 4763 error = pfr_del_addrs(&io->pfrio_table, pfras, 4764 io->pfrio_size, &io->pfrio_ndel, io->pfrio_flags | 4765 PFR_FLAG_USERIOCTL); 4766 PF_RULES_WUNLOCK(); 4767 if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK) 4768 error = copyout(pfras, io->pfrio_buffer, totlen); 4769 free(pfras, M_TEMP); 4770 break; 4771 } 4772 4773 case DIOCRSETADDRS: { 4774 struct pfioc_table *io = (struct pfioc_table *)addr; 4775 struct pfr_addr *pfras; 4776 size_t totlen, count; 4777 4778 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 4779 error = ENODEV; 4780 break; 4781 } 4782 if (io->pfrio_size < 0 || io->pfrio_size2 < 0) { 4783 error = EINVAL; 4784 break; 4785 } 4786 count = max(io->pfrio_size, io->pfrio_size2); 4787 if (count > pf_ioctl_maxcount || 4788 WOULD_OVERFLOW(count, sizeof(struct pfr_addr))) { 4789 error = EINVAL; 4790 break; 4791 } 4792 totlen = count * sizeof(struct pfr_addr); 4793 pfras = mallocarray(count, sizeof(struct pfr_addr), M_TEMP, 4794 M_WAITOK); 4795 error = copyin(io->pfrio_buffer, pfras, totlen); 4796 if (error) { 4797 free(pfras, M_TEMP); 4798 break; 4799 } 4800 PF_RULES_WLOCK(); 4801 error = pfr_set_addrs(&io->pfrio_table, pfras, 4802 io->pfrio_size, &io->pfrio_size2, &io->pfrio_nadd, 4803 &io->pfrio_ndel, &io->pfrio_nchange, io->pfrio_flags | 4804 PFR_FLAG_USERIOCTL, 0); 4805 PF_RULES_WUNLOCK(); 4806 if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK) 4807 error = copyout(pfras, io->pfrio_buffer, totlen); 4808 free(pfras, M_TEMP); 4809 break; 4810 } 4811 4812 case DIOCRGETADDRS: { 4813 struct pfioc_table *io = (struct pfioc_table *)addr; 4814 struct pfr_addr *pfras; 4815 size_t totlen; 4816 4817 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 4818 error = ENODEV; 4819 break; 4820 } 4821 if (io->pfrio_size < 0 || 4822 io->pfrio_size > pf_ioctl_maxcount || 4823 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) { 4824 error = EINVAL; 4825 break; 4826 } 4827 totlen = io->pfrio_size * sizeof(struct pfr_addr); 4828 pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr), 4829 M_TEMP, M_WAITOK | M_ZERO); 4830 PF_RULES_RLOCK(); 4831 error = pfr_get_addrs(&io->pfrio_table, pfras, 4832 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); 4833 PF_RULES_RUNLOCK(); 4834 if (error == 0) 4835 error = copyout(pfras, io->pfrio_buffer, totlen); 4836 free(pfras, M_TEMP); 4837 break; 4838 } 4839 4840 case DIOCRGETASTATS: { 4841 struct pfioc_table *io = (struct pfioc_table *)addr; 4842 struct pfr_astats *pfrastats; 4843 size_t totlen; 4844 4845 if (io->pfrio_esize != sizeof(struct pfr_astats)) { 4846 error = ENODEV; 4847 break; 4848 } 4849 if (io->pfrio_size < 0 || 4850 io->pfrio_size > pf_ioctl_maxcount || 4851 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_astats))) { 4852 error = EINVAL; 4853 break; 4854 } 4855 totlen = io->pfrio_size * sizeof(struct pfr_astats); 4856 pfrastats = mallocarray(io->pfrio_size, 4857 sizeof(struct pfr_astats), M_TEMP, M_WAITOK | M_ZERO); 4858 PF_RULES_RLOCK(); 4859 error = pfr_get_astats(&io->pfrio_table, pfrastats, 4860 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); 4861 PF_RULES_RUNLOCK(); 4862 if (error == 0) 4863 error = copyout(pfrastats, io->pfrio_buffer, totlen); 4864 free(pfrastats, M_TEMP); 4865 break; 4866 } 4867 4868 case DIOCRCLRASTATS: { 4869 struct pfioc_table *io = (struct pfioc_table *)addr; 4870 struct pfr_addr *pfras; 4871 size_t totlen; 4872 4873 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 4874 error = ENODEV; 4875 break; 4876 } 4877 if (io->pfrio_size < 0 || 4878 io->pfrio_size > pf_ioctl_maxcount || 4879 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) { 4880 error = EINVAL; 4881 break; 4882 } 4883 totlen = io->pfrio_size * sizeof(struct pfr_addr); 4884 pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr), 4885 M_TEMP, M_WAITOK); 4886 error = copyin(io->pfrio_buffer, pfras, totlen); 4887 if (error) { 4888 free(pfras, M_TEMP); 4889 break; 4890 } 4891 PF_RULES_WLOCK(); 4892 error = pfr_clr_astats(&io->pfrio_table, pfras, 4893 io->pfrio_size, &io->pfrio_nzero, io->pfrio_flags | 4894 PFR_FLAG_USERIOCTL); 4895 PF_RULES_WUNLOCK(); 4896 if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK) 4897 error = copyout(pfras, io->pfrio_buffer, totlen); 4898 free(pfras, M_TEMP); 4899 break; 4900 } 4901 4902 case DIOCRTSTADDRS: { 4903 struct pfioc_table *io = (struct pfioc_table *)addr; 4904 struct pfr_addr *pfras; 4905 size_t totlen; 4906 4907 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 4908 error = ENODEV; 4909 break; 4910 } 4911 if (io->pfrio_size < 0 || 4912 io->pfrio_size > pf_ioctl_maxcount || 4913 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) { 4914 error = EINVAL; 4915 break; 4916 } 4917 totlen = io->pfrio_size * sizeof(struct pfr_addr); 4918 pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr), 4919 M_TEMP, M_WAITOK); 4920 error = copyin(io->pfrio_buffer, pfras, totlen); 4921 if (error) { 4922 free(pfras, M_TEMP); 4923 break; 4924 } 4925 PF_RULES_RLOCK(); 4926 error = pfr_tst_addrs(&io->pfrio_table, pfras, 4927 io->pfrio_size, &io->pfrio_nmatch, io->pfrio_flags | 4928 PFR_FLAG_USERIOCTL); 4929 PF_RULES_RUNLOCK(); 4930 if (error == 0) 4931 error = copyout(pfras, io->pfrio_buffer, totlen); 4932 free(pfras, M_TEMP); 4933 break; 4934 } 4935 4936 case DIOCRINADEFINE: { 4937 struct pfioc_table *io = (struct pfioc_table *)addr; 4938 struct pfr_addr *pfras; 4939 size_t totlen; 4940 4941 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 4942 error = ENODEV; 4943 break; 4944 } 4945 if (io->pfrio_size < 0 || 4946 io->pfrio_size > pf_ioctl_maxcount || 4947 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) { 4948 error = EINVAL; 4949 break; 4950 } 4951 totlen = io->pfrio_size * sizeof(struct pfr_addr); 4952 pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr), 4953 M_TEMP, M_WAITOK); 4954 error = copyin(io->pfrio_buffer, pfras, totlen); 4955 if (error) { 4956 free(pfras, M_TEMP); 4957 break; 4958 } 4959 PF_RULES_WLOCK(); 4960 error = pfr_ina_define(&io->pfrio_table, pfras, 4961 io->pfrio_size, &io->pfrio_nadd, &io->pfrio_naddr, 4962 io->pfrio_ticket, io->pfrio_flags | PFR_FLAG_USERIOCTL); 4963 PF_RULES_WUNLOCK(); 4964 free(pfras, M_TEMP); 4965 break; 4966 } 4967 4968 case DIOCOSFPADD: { 4969 struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr; 4970 PF_RULES_WLOCK(); 4971 error = pf_osfp_add(io); 4972 PF_RULES_WUNLOCK(); 4973 break; 4974 } 4975 4976 case DIOCOSFPGET: { 4977 struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr; 4978 PF_RULES_RLOCK(); 4979 error = pf_osfp_get(io); 4980 PF_RULES_RUNLOCK(); 4981 break; 4982 } 4983 4984 case DIOCXBEGIN: { 4985 struct pfioc_trans *io = (struct pfioc_trans *)addr; 4986 struct pfioc_trans_e *ioes, *ioe; 4987 size_t totlen; 4988 int i; 4989 4990 if (io->esize != sizeof(*ioe)) { 4991 error = ENODEV; 4992 break; 4993 } 4994 if (io->size < 0 || 4995 io->size > pf_ioctl_maxcount || 4996 WOULD_OVERFLOW(io->size, sizeof(struct pfioc_trans_e))) { 4997 error = EINVAL; 4998 break; 4999 } 5000 totlen = sizeof(struct pfioc_trans_e) * io->size; 5001 ioes = mallocarray(io->size, sizeof(struct pfioc_trans_e), 5002 M_TEMP, M_WAITOK); 5003 error = copyin(io->array, ioes, totlen); 5004 if (error) { 5005 free(ioes, M_TEMP); 5006 break; 5007 } 5008 /* Ensure there's no more ethernet rules to clean up. */ 5009 NET_EPOCH_DRAIN_CALLBACKS(); 5010 PF_RULES_WLOCK(); 5011 for (i = 0, ioe = ioes; i < io->size; i++, ioe++) { 5012 ioe->anchor[sizeof(ioe->anchor) - 1] = '\0'; 5013 switch (ioe->rs_num) { 5014 case PF_RULESET_ETH: 5015 if ((error = pf_begin_eth(&ioe->ticket, ioe->anchor))) { 5016 PF_RULES_WUNLOCK(); 5017 free(ioes, M_TEMP); 5018 goto fail; 5019 } 5020 break; 5021 #ifdef ALTQ 5022 case PF_RULESET_ALTQ: 5023 if (ioe->anchor[0]) { 5024 PF_RULES_WUNLOCK(); 5025 free(ioes, M_TEMP); 5026 error = EINVAL; 5027 goto fail; 5028 } 5029 if ((error = pf_begin_altq(&ioe->ticket))) { 5030 PF_RULES_WUNLOCK(); 5031 free(ioes, M_TEMP); 5032 goto fail; 5033 } 5034 break; 5035 #endif /* ALTQ */ 5036 case PF_RULESET_TABLE: 5037 { 5038 struct pfr_table table; 5039 5040 bzero(&table, sizeof(table)); 5041 strlcpy(table.pfrt_anchor, ioe->anchor, 5042 sizeof(table.pfrt_anchor)); 5043 if ((error = pfr_ina_begin(&table, 5044 &ioe->ticket, NULL, 0))) { 5045 PF_RULES_WUNLOCK(); 5046 free(ioes, M_TEMP); 5047 goto fail; 5048 } 5049 break; 5050 } 5051 default: 5052 if ((error = pf_begin_rules(&ioe->ticket, 5053 ioe->rs_num, ioe->anchor))) { 5054 PF_RULES_WUNLOCK(); 5055 free(ioes, M_TEMP); 5056 goto fail; 5057 } 5058 break; 5059 } 5060 } 5061 PF_RULES_WUNLOCK(); 5062 error = copyout(ioes, io->array, totlen); 5063 free(ioes, M_TEMP); 5064 break; 5065 } 5066 5067 case DIOCXROLLBACK: { 5068 struct pfioc_trans *io = (struct pfioc_trans *)addr; 5069 struct pfioc_trans_e *ioe, *ioes; 5070 size_t totlen; 5071 int i; 5072 5073 if (io->esize != sizeof(*ioe)) { 5074 error = ENODEV; 5075 break; 5076 } 5077 if (io->size < 0 || 5078 io->size > pf_ioctl_maxcount || 5079 WOULD_OVERFLOW(io->size, sizeof(struct pfioc_trans_e))) { 5080 error = EINVAL; 5081 break; 5082 } 5083 totlen = sizeof(struct pfioc_trans_e) * io->size; 5084 ioes = mallocarray(io->size, sizeof(struct pfioc_trans_e), 5085 M_TEMP, M_WAITOK); 5086 error = copyin(io->array, ioes, totlen); 5087 if (error) { 5088 free(ioes, M_TEMP); 5089 break; 5090 } 5091 PF_RULES_WLOCK(); 5092 for (i = 0, ioe = ioes; i < io->size; i++, ioe++) { 5093 ioe->anchor[sizeof(ioe->anchor) - 1] = '\0'; 5094 switch (ioe->rs_num) { 5095 case PF_RULESET_ETH: 5096 if ((error = pf_rollback_eth(ioe->ticket, 5097 ioe->anchor))) { 5098 PF_RULES_WUNLOCK(); 5099 free(ioes, M_TEMP); 5100 goto fail; /* really bad */ 5101 } 5102 break; 5103 #ifdef ALTQ 5104 case PF_RULESET_ALTQ: 5105 if (ioe->anchor[0]) { 5106 PF_RULES_WUNLOCK(); 5107 free(ioes, M_TEMP); 5108 error = EINVAL; 5109 goto fail; 5110 } 5111 if ((error = pf_rollback_altq(ioe->ticket))) { 5112 PF_RULES_WUNLOCK(); 5113 free(ioes, M_TEMP); 5114 goto fail; /* really bad */ 5115 } 5116 break; 5117 #endif /* ALTQ */ 5118 case PF_RULESET_TABLE: 5119 { 5120 struct pfr_table table; 5121 5122 bzero(&table, sizeof(table)); 5123 strlcpy(table.pfrt_anchor, ioe->anchor, 5124 sizeof(table.pfrt_anchor)); 5125 if ((error = pfr_ina_rollback(&table, 5126 ioe->ticket, NULL, 0))) { 5127 PF_RULES_WUNLOCK(); 5128 free(ioes, M_TEMP); 5129 goto fail; /* really bad */ 5130 } 5131 break; 5132 } 5133 default: 5134 if ((error = pf_rollback_rules(ioe->ticket, 5135 ioe->rs_num, ioe->anchor))) { 5136 PF_RULES_WUNLOCK(); 5137 free(ioes, M_TEMP); 5138 goto fail; /* really bad */ 5139 } 5140 break; 5141 } 5142 } 5143 PF_RULES_WUNLOCK(); 5144 free(ioes, M_TEMP); 5145 break; 5146 } 5147 5148 case DIOCXCOMMIT: { 5149 struct pfioc_trans *io = (struct pfioc_trans *)addr; 5150 struct pfioc_trans_e *ioe, *ioes; 5151 struct pf_kruleset *rs; 5152 struct pf_keth_ruleset *ers; 5153 size_t totlen; 5154 int i; 5155 5156 if (io->esize != sizeof(*ioe)) { 5157 error = ENODEV; 5158 break; 5159 } 5160 5161 if (io->size < 0 || 5162 io->size > pf_ioctl_maxcount || 5163 WOULD_OVERFLOW(io->size, sizeof(struct pfioc_trans_e))) { 5164 error = EINVAL; 5165 break; 5166 } 5167 5168 totlen = sizeof(struct pfioc_trans_e) * io->size; 5169 ioes = mallocarray(io->size, sizeof(struct pfioc_trans_e), 5170 M_TEMP, M_WAITOK); 5171 error = copyin(io->array, ioes, totlen); 5172 if (error) { 5173 free(ioes, M_TEMP); 5174 break; 5175 } 5176 PF_RULES_WLOCK(); 5177 /* First makes sure everything will succeed. */ 5178 for (i = 0, ioe = ioes; i < io->size; i++, ioe++) { 5179 ioe->anchor[sizeof(ioe->anchor) - 1] = 0; 5180 switch (ioe->rs_num) { 5181 case PF_RULESET_ETH: 5182 ers = pf_find_keth_ruleset(ioe->anchor); 5183 if (ers == NULL || ioe->ticket == 0 || 5184 ioe->ticket != ers->inactive.ticket) { 5185 PF_RULES_WUNLOCK(); 5186 free(ioes, M_TEMP); 5187 error = EINVAL; 5188 goto fail; 5189 } 5190 break; 5191 #ifdef ALTQ 5192 case PF_RULESET_ALTQ: 5193 if (ioe->anchor[0]) { 5194 PF_RULES_WUNLOCK(); 5195 free(ioes, M_TEMP); 5196 error = EINVAL; 5197 goto fail; 5198 } 5199 if (!V_altqs_inactive_open || ioe->ticket != 5200 V_ticket_altqs_inactive) { 5201 PF_RULES_WUNLOCK(); 5202 free(ioes, M_TEMP); 5203 error = EBUSY; 5204 goto fail; 5205 } 5206 break; 5207 #endif /* ALTQ */ 5208 case PF_RULESET_TABLE: 5209 rs = pf_find_kruleset(ioe->anchor); 5210 if (rs == NULL || !rs->topen || ioe->ticket != 5211 rs->tticket) { 5212 PF_RULES_WUNLOCK(); 5213 free(ioes, M_TEMP); 5214 error = EBUSY; 5215 goto fail; 5216 } 5217 break; 5218 default: 5219 if (ioe->rs_num < 0 || ioe->rs_num >= 5220 PF_RULESET_MAX) { 5221 PF_RULES_WUNLOCK(); 5222 free(ioes, M_TEMP); 5223 error = EINVAL; 5224 goto fail; 5225 } 5226 rs = pf_find_kruleset(ioe->anchor); 5227 if (rs == NULL || 5228 !rs->rules[ioe->rs_num].inactive.open || 5229 rs->rules[ioe->rs_num].inactive.ticket != 5230 ioe->ticket) { 5231 PF_RULES_WUNLOCK(); 5232 free(ioes, M_TEMP); 5233 error = EBUSY; 5234 goto fail; 5235 } 5236 break; 5237 } 5238 } 5239 /* Now do the commit - no errors should happen here. */ 5240 for (i = 0, ioe = ioes; i < io->size; i++, ioe++) { 5241 switch (ioe->rs_num) { 5242 case PF_RULESET_ETH: 5243 if ((error = pf_commit_eth(ioe->ticket, ioe->anchor))) { 5244 PF_RULES_WUNLOCK(); 5245 free(ioes, M_TEMP); 5246 goto fail; /* really bad */ 5247 } 5248 break; 5249 #ifdef ALTQ 5250 case PF_RULESET_ALTQ: 5251 if ((error = pf_commit_altq(ioe->ticket))) { 5252 PF_RULES_WUNLOCK(); 5253 free(ioes, M_TEMP); 5254 goto fail; /* really bad */ 5255 } 5256 break; 5257 #endif /* ALTQ */ 5258 case PF_RULESET_TABLE: 5259 { 5260 struct pfr_table table; 5261 5262 bzero(&table, sizeof(table)); 5263 (void)strlcpy(table.pfrt_anchor, ioe->anchor, 5264 sizeof(table.pfrt_anchor)); 5265 if ((error = pfr_ina_commit(&table, 5266 ioe->ticket, NULL, NULL, 0))) { 5267 PF_RULES_WUNLOCK(); 5268 free(ioes, M_TEMP); 5269 goto fail; /* really bad */ 5270 } 5271 break; 5272 } 5273 default: 5274 if ((error = pf_commit_rules(ioe->ticket, 5275 ioe->rs_num, ioe->anchor))) { 5276 PF_RULES_WUNLOCK(); 5277 free(ioes, M_TEMP); 5278 goto fail; /* really bad */ 5279 } 5280 break; 5281 } 5282 } 5283 PF_RULES_WUNLOCK(); 5284 5285 /* Only hook into EtherNet taffic if we've got rules for it. */ 5286 if (! TAILQ_EMPTY(V_pf_keth->active.rules)) 5287 hook_pf_eth(); 5288 else 5289 dehook_pf_eth(); 5290 5291 free(ioes, M_TEMP); 5292 break; 5293 } 5294 5295 case DIOCGETSRCNODES: { 5296 struct pfioc_src_nodes *psn = (struct pfioc_src_nodes *)addr; 5297 struct pf_srchash *sh; 5298 struct pf_ksrc_node *n; 5299 struct pf_src_node *p, *pstore; 5300 uint32_t i, nr = 0; 5301 5302 for (i = 0, sh = V_pf_srchash; i <= pf_srchashmask; 5303 i++, sh++) { 5304 PF_HASHROW_LOCK(sh); 5305 LIST_FOREACH(n, &sh->nodes, entry) 5306 nr++; 5307 PF_HASHROW_UNLOCK(sh); 5308 } 5309 5310 psn->psn_len = min(psn->psn_len, 5311 sizeof(struct pf_src_node) * nr); 5312 5313 if (psn->psn_len == 0) { 5314 psn->psn_len = sizeof(struct pf_src_node) * nr; 5315 break; 5316 } 5317 5318 nr = 0; 5319 5320 p = pstore = malloc(psn->psn_len, M_TEMP, M_WAITOK | M_ZERO); 5321 for (i = 0, sh = V_pf_srchash; i <= pf_srchashmask; 5322 i++, sh++) { 5323 PF_HASHROW_LOCK(sh); 5324 LIST_FOREACH(n, &sh->nodes, entry) { 5325 5326 if ((nr + 1) * sizeof(*p) > (unsigned)psn->psn_len) 5327 break; 5328 5329 pf_src_node_copy(n, p); 5330 5331 p++; 5332 nr++; 5333 } 5334 PF_HASHROW_UNLOCK(sh); 5335 } 5336 error = copyout(pstore, psn->psn_src_nodes, 5337 sizeof(struct pf_src_node) * nr); 5338 if (error) { 5339 free(pstore, M_TEMP); 5340 break; 5341 } 5342 psn->psn_len = sizeof(struct pf_src_node) * nr; 5343 free(pstore, M_TEMP); 5344 break; 5345 } 5346 5347 case DIOCCLRSRCNODES: { 5348 pf_clear_srcnodes(NULL); 5349 pf_purge_expired_src_nodes(); 5350 break; 5351 } 5352 5353 case DIOCKILLSRCNODES: 5354 pf_kill_srcnodes((struct pfioc_src_node_kill *)addr); 5355 break; 5356 5357 #ifdef COMPAT_FREEBSD13 5358 case DIOCKEEPCOUNTERS_FREEBSD13: 5359 #endif 5360 case DIOCKEEPCOUNTERS: 5361 error = pf_keepcounters((struct pfioc_nv *)addr); 5362 break; 5363 5364 case DIOCGETSYNCOOKIES: 5365 error = pf_get_syncookies((struct pfioc_nv *)addr); 5366 break; 5367 5368 case DIOCSETSYNCOOKIES: 5369 error = pf_set_syncookies((struct pfioc_nv *)addr); 5370 break; 5371 5372 case DIOCSETHOSTID: { 5373 u_int32_t *hostid = (u_int32_t *)addr; 5374 5375 PF_RULES_WLOCK(); 5376 if (*hostid == 0) 5377 V_pf_status.hostid = arc4random(); 5378 else 5379 V_pf_status.hostid = *hostid; 5380 PF_RULES_WUNLOCK(); 5381 break; 5382 } 5383 5384 case DIOCOSFPFLUSH: 5385 PF_RULES_WLOCK(); 5386 pf_osfp_flush(); 5387 PF_RULES_WUNLOCK(); 5388 break; 5389 5390 case DIOCIGETIFACES: { 5391 struct pfioc_iface *io = (struct pfioc_iface *)addr; 5392 struct pfi_kif *ifstore; 5393 size_t bufsiz; 5394 5395 if (io->pfiio_esize != sizeof(struct pfi_kif)) { 5396 error = ENODEV; 5397 break; 5398 } 5399 5400 if (io->pfiio_size < 0 || 5401 io->pfiio_size > pf_ioctl_maxcount || 5402 WOULD_OVERFLOW(io->pfiio_size, sizeof(struct pfi_kif))) { 5403 error = EINVAL; 5404 break; 5405 } 5406 5407 io->pfiio_name[sizeof(io->pfiio_name) - 1] = '\0'; 5408 5409 bufsiz = io->pfiio_size * sizeof(struct pfi_kif); 5410 ifstore = mallocarray(io->pfiio_size, sizeof(struct pfi_kif), 5411 M_TEMP, M_WAITOK | M_ZERO); 5412 5413 PF_RULES_RLOCK(); 5414 pfi_get_ifaces(io->pfiio_name, ifstore, &io->pfiio_size); 5415 PF_RULES_RUNLOCK(); 5416 error = copyout(ifstore, io->pfiio_buffer, bufsiz); 5417 free(ifstore, M_TEMP); 5418 break; 5419 } 5420 5421 case DIOCSETIFFLAG: { 5422 struct pfioc_iface *io = (struct pfioc_iface *)addr; 5423 5424 io->pfiio_name[sizeof(io->pfiio_name) - 1] = '\0'; 5425 5426 PF_RULES_WLOCK(); 5427 error = pfi_set_flags(io->pfiio_name, io->pfiio_flags); 5428 PF_RULES_WUNLOCK(); 5429 break; 5430 } 5431 5432 case DIOCCLRIFFLAG: { 5433 struct pfioc_iface *io = (struct pfioc_iface *)addr; 5434 5435 io->pfiio_name[sizeof(io->pfiio_name) - 1] = '\0'; 5436 5437 PF_RULES_WLOCK(); 5438 error = pfi_clear_flags(io->pfiio_name, io->pfiio_flags); 5439 PF_RULES_WUNLOCK(); 5440 break; 5441 } 5442 5443 case DIOCSETREASS: { 5444 u_int32_t *reass = (u_int32_t *)addr; 5445 5446 V_pf_status.reass = *reass & (PF_REASS_ENABLED|PF_REASS_NODF); 5447 /* Removal of DF flag without reassembly enabled is not a 5448 * valid combination. Disable reassembly in such case. */ 5449 if (!(V_pf_status.reass & PF_REASS_ENABLED)) 5450 V_pf_status.reass = 0; 5451 break; 5452 } 5453 5454 default: 5455 error = ENODEV; 5456 break; 5457 } 5458 fail: 5459 CURVNET_RESTORE(); 5460 5461 #undef ERROUT_IOCTL 5462 5463 return (error); 5464 } 5465 5466 void 5467 pfsync_state_export(union pfsync_state_union *sp, struct pf_kstate *st, int msg_version) 5468 { 5469 bzero(sp, sizeof(union pfsync_state_union)); 5470 5471 /* copy from state key */ 5472 sp->pfs_1301.key[PF_SK_WIRE].addr[0] = st->key[PF_SK_WIRE]->addr[0]; 5473 sp->pfs_1301.key[PF_SK_WIRE].addr[1] = st->key[PF_SK_WIRE]->addr[1]; 5474 sp->pfs_1301.key[PF_SK_WIRE].port[0] = st->key[PF_SK_WIRE]->port[0]; 5475 sp->pfs_1301.key[PF_SK_WIRE].port[1] = st->key[PF_SK_WIRE]->port[1]; 5476 sp->pfs_1301.key[PF_SK_STACK].addr[0] = st->key[PF_SK_STACK]->addr[0]; 5477 sp->pfs_1301.key[PF_SK_STACK].addr[1] = st->key[PF_SK_STACK]->addr[1]; 5478 sp->pfs_1301.key[PF_SK_STACK].port[0] = st->key[PF_SK_STACK]->port[0]; 5479 sp->pfs_1301.key[PF_SK_STACK].port[1] = st->key[PF_SK_STACK]->port[1]; 5480 sp->pfs_1301.proto = st->key[PF_SK_WIRE]->proto; 5481 sp->pfs_1301.af = st->key[PF_SK_WIRE]->af; 5482 5483 /* copy from state */ 5484 strlcpy(sp->pfs_1301.ifname, st->kif->pfik_name, sizeof(sp->pfs_1301.ifname)); 5485 bcopy(&st->rt_addr, &sp->pfs_1301.rt_addr, sizeof(sp->pfs_1301.rt_addr)); 5486 sp->pfs_1301.creation = htonl(time_uptime - (st->creation / 1000)); 5487 sp->pfs_1301.expire = pf_state_expires(st); 5488 if (sp->pfs_1301.expire <= time_uptime) 5489 sp->pfs_1301.expire = htonl(0); 5490 else 5491 sp->pfs_1301.expire = htonl(sp->pfs_1301.expire - time_uptime); 5492 5493 sp->pfs_1301.direction = st->direction; 5494 sp->pfs_1301.log = st->act.log; 5495 sp->pfs_1301.timeout = st->timeout; 5496 5497 switch (msg_version) { 5498 case PFSYNC_MSG_VERSION_1301: 5499 sp->pfs_1301.state_flags = st->state_flags; 5500 break; 5501 case PFSYNC_MSG_VERSION_1400: 5502 sp->pfs_1400.state_flags = htons(st->state_flags); 5503 sp->pfs_1400.qid = htons(st->act.qid); 5504 sp->pfs_1400.pqid = htons(st->act.pqid); 5505 sp->pfs_1400.dnpipe = htons(st->act.dnpipe); 5506 sp->pfs_1400.dnrpipe = htons(st->act.dnrpipe); 5507 sp->pfs_1400.rtableid = htonl(st->act.rtableid); 5508 sp->pfs_1400.min_ttl = st->act.min_ttl; 5509 sp->pfs_1400.set_tos = st->act.set_tos; 5510 sp->pfs_1400.max_mss = htons(st->act.max_mss); 5511 sp->pfs_1400.set_prio[0] = st->act.set_prio[0]; 5512 sp->pfs_1400.set_prio[1] = st->act.set_prio[1]; 5513 sp->pfs_1400.rt = st->rt; 5514 if (st->rt_kif) 5515 strlcpy(sp->pfs_1400.rt_ifname, 5516 st->rt_kif->pfik_name, 5517 sizeof(sp->pfs_1400.rt_ifname)); 5518 break; 5519 default: 5520 panic("%s: Unsupported pfsync_msg_version %d", 5521 __func__, msg_version); 5522 } 5523 5524 if (st->src_node) 5525 sp->pfs_1301.sync_flags |= PFSYNC_FLAG_SRCNODE; 5526 if (st->nat_src_node) 5527 sp->pfs_1301.sync_flags |= PFSYNC_FLAG_NATSRCNODE; 5528 5529 sp->pfs_1301.id = st->id; 5530 sp->pfs_1301.creatorid = st->creatorid; 5531 pf_state_peer_hton(&st->src, &sp->pfs_1301.src); 5532 pf_state_peer_hton(&st->dst, &sp->pfs_1301.dst); 5533 5534 if (st->rule.ptr == NULL) 5535 sp->pfs_1301.rule = htonl(-1); 5536 else 5537 sp->pfs_1301.rule = htonl(st->rule.ptr->nr); 5538 if (st->anchor.ptr == NULL) 5539 sp->pfs_1301.anchor = htonl(-1); 5540 else 5541 sp->pfs_1301.anchor = htonl(st->anchor.ptr->nr); 5542 if (st->nat_rule.ptr == NULL) 5543 sp->pfs_1301.nat_rule = htonl(-1); 5544 else 5545 sp->pfs_1301.nat_rule = htonl(st->nat_rule.ptr->nr); 5546 5547 pf_state_counter_hton(st->packets[0], sp->pfs_1301.packets[0]); 5548 pf_state_counter_hton(st->packets[1], sp->pfs_1301.packets[1]); 5549 pf_state_counter_hton(st->bytes[0], sp->pfs_1301.bytes[0]); 5550 pf_state_counter_hton(st->bytes[1], sp->pfs_1301.bytes[1]); 5551 } 5552 5553 void 5554 pf_state_export(struct pf_state_export *sp, struct pf_kstate *st) 5555 { 5556 bzero(sp, sizeof(*sp)); 5557 5558 sp->version = PF_STATE_VERSION; 5559 5560 /* copy from state key */ 5561 sp->key[PF_SK_WIRE].addr[0] = st->key[PF_SK_WIRE]->addr[0]; 5562 sp->key[PF_SK_WIRE].addr[1] = st->key[PF_SK_WIRE]->addr[1]; 5563 sp->key[PF_SK_WIRE].port[0] = st->key[PF_SK_WIRE]->port[0]; 5564 sp->key[PF_SK_WIRE].port[1] = st->key[PF_SK_WIRE]->port[1]; 5565 sp->key[PF_SK_STACK].addr[0] = st->key[PF_SK_STACK]->addr[0]; 5566 sp->key[PF_SK_STACK].addr[1] = st->key[PF_SK_STACK]->addr[1]; 5567 sp->key[PF_SK_STACK].port[0] = st->key[PF_SK_STACK]->port[0]; 5568 sp->key[PF_SK_STACK].port[1] = st->key[PF_SK_STACK]->port[1]; 5569 sp->proto = st->key[PF_SK_WIRE]->proto; 5570 sp->af = st->key[PF_SK_WIRE]->af; 5571 5572 /* copy from state */ 5573 strlcpy(sp->ifname, st->kif->pfik_name, sizeof(sp->ifname)); 5574 strlcpy(sp->orig_ifname, st->orig_kif->pfik_name, 5575 sizeof(sp->orig_ifname)); 5576 bcopy(&st->rt_addr, &sp->rt_addr, sizeof(sp->rt_addr)); 5577 sp->creation = htonl(time_uptime - (st->creation / 1000)); 5578 sp->expire = pf_state_expires(st); 5579 if (sp->expire <= time_uptime) 5580 sp->expire = htonl(0); 5581 else 5582 sp->expire = htonl(sp->expire - time_uptime); 5583 5584 sp->direction = st->direction; 5585 sp->log = st->act.log; 5586 sp->timeout = st->timeout; 5587 /* 8 bits for the old libpfctl, 16 bits for the new libpfctl */ 5588 sp->state_flags_compat = st->state_flags; 5589 sp->state_flags = htons(st->state_flags); 5590 if (st->src_node) 5591 sp->sync_flags |= PFSYNC_FLAG_SRCNODE; 5592 if (st->nat_src_node) 5593 sp->sync_flags |= PFSYNC_FLAG_NATSRCNODE; 5594 5595 sp->id = st->id; 5596 sp->creatorid = st->creatorid; 5597 pf_state_peer_hton(&st->src, &sp->src); 5598 pf_state_peer_hton(&st->dst, &sp->dst); 5599 5600 if (st->rule.ptr == NULL) 5601 sp->rule = htonl(-1); 5602 else 5603 sp->rule = htonl(st->rule.ptr->nr); 5604 if (st->anchor.ptr == NULL) 5605 sp->anchor = htonl(-1); 5606 else 5607 sp->anchor = htonl(st->anchor.ptr->nr); 5608 if (st->nat_rule.ptr == NULL) 5609 sp->nat_rule = htonl(-1); 5610 else 5611 sp->nat_rule = htonl(st->nat_rule.ptr->nr); 5612 5613 sp->packets[0] = st->packets[0]; 5614 sp->packets[1] = st->packets[1]; 5615 sp->bytes[0] = st->bytes[0]; 5616 sp->bytes[1] = st->bytes[1]; 5617 5618 sp->qid = htons(st->act.qid); 5619 sp->pqid = htons(st->act.pqid); 5620 sp->dnpipe = htons(st->act.dnpipe); 5621 sp->dnrpipe = htons(st->act.dnrpipe); 5622 sp->rtableid = htonl(st->act.rtableid); 5623 sp->min_ttl = st->act.min_ttl; 5624 sp->set_tos = st->act.set_tos; 5625 sp->max_mss = htons(st->act.max_mss); 5626 sp->rt = st->rt; 5627 if (st->rt_kif) 5628 strlcpy(sp->rt_ifname, st->rt_kif->pfik_name, 5629 sizeof(sp->rt_ifname)); 5630 sp->set_prio[0] = st->act.set_prio[0]; 5631 sp->set_prio[1] = st->act.set_prio[1]; 5632 5633 } 5634 5635 static void 5636 pf_tbladdr_copyout(struct pf_addr_wrap *aw) 5637 { 5638 struct pfr_ktable *kt; 5639 5640 KASSERT(aw->type == PF_ADDR_TABLE, ("%s: type %u", __func__, aw->type)); 5641 5642 kt = aw->p.tbl; 5643 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL) 5644 kt = kt->pfrkt_root; 5645 aw->p.tbl = NULL; 5646 aw->p.tblcnt = (kt->pfrkt_flags & PFR_TFLAG_ACTIVE) ? 5647 kt->pfrkt_cnt : -1; 5648 } 5649 5650 static int 5651 pf_add_status_counters(nvlist_t *nvl, const char *name, counter_u64_t *counters, 5652 size_t number, char **names) 5653 { 5654 nvlist_t *nvc; 5655 5656 nvc = nvlist_create(0); 5657 if (nvc == NULL) 5658 return (ENOMEM); 5659 5660 for (int i = 0; i < number; i++) { 5661 nvlist_append_number_array(nvc, "counters", 5662 counter_u64_fetch(counters[i])); 5663 nvlist_append_string_array(nvc, "names", 5664 names[i]); 5665 nvlist_append_number_array(nvc, "ids", 5666 i); 5667 } 5668 nvlist_add_nvlist(nvl, name, nvc); 5669 nvlist_destroy(nvc); 5670 5671 return (0); 5672 } 5673 5674 static int 5675 pf_getstatus(struct pfioc_nv *nv) 5676 { 5677 nvlist_t *nvl = NULL, *nvc = NULL; 5678 void *nvlpacked = NULL; 5679 int error; 5680 struct pf_status s; 5681 char *pf_reasons[PFRES_MAX+1] = PFRES_NAMES; 5682 char *pf_lcounter[KLCNT_MAX+1] = KLCNT_NAMES; 5683 char *pf_fcounter[FCNT_MAX+1] = FCNT_NAMES; 5684 PF_RULES_RLOCK_TRACKER; 5685 5686 #define ERROUT(x) ERROUT_FUNCTION(errout, x) 5687 5688 PF_RULES_RLOCK(); 5689 5690 nvl = nvlist_create(0); 5691 if (nvl == NULL) 5692 ERROUT(ENOMEM); 5693 5694 nvlist_add_bool(nvl, "running", V_pf_status.running); 5695 nvlist_add_number(nvl, "since", V_pf_status.since); 5696 nvlist_add_number(nvl, "debug", V_pf_status.debug); 5697 nvlist_add_number(nvl, "hostid", V_pf_status.hostid); 5698 nvlist_add_number(nvl, "states", V_pf_status.states); 5699 nvlist_add_number(nvl, "src_nodes", V_pf_status.src_nodes); 5700 nvlist_add_number(nvl, "reass", V_pf_status.reass); 5701 nvlist_add_bool(nvl, "syncookies_active", 5702 V_pf_status.syncookies_active); 5703 nvlist_add_number(nvl, "halfopen_states", V_pf_status.states_halfopen); 5704 5705 /* counters */ 5706 error = pf_add_status_counters(nvl, "counters", V_pf_status.counters, 5707 PFRES_MAX, pf_reasons); 5708 if (error != 0) 5709 ERROUT(error); 5710 5711 /* lcounters */ 5712 error = pf_add_status_counters(nvl, "lcounters", V_pf_status.lcounters, 5713 KLCNT_MAX, pf_lcounter); 5714 if (error != 0) 5715 ERROUT(error); 5716 5717 /* fcounters */ 5718 nvc = nvlist_create(0); 5719 if (nvc == NULL) 5720 ERROUT(ENOMEM); 5721 5722 for (int i = 0; i < FCNT_MAX; i++) { 5723 nvlist_append_number_array(nvc, "counters", 5724 pf_counter_u64_fetch(&V_pf_status.fcounters[i])); 5725 nvlist_append_string_array(nvc, "names", 5726 pf_fcounter[i]); 5727 nvlist_append_number_array(nvc, "ids", 5728 i); 5729 } 5730 nvlist_add_nvlist(nvl, "fcounters", nvc); 5731 nvlist_destroy(nvc); 5732 nvc = NULL; 5733 5734 /* scounters */ 5735 error = pf_add_status_counters(nvl, "scounters", V_pf_status.scounters, 5736 SCNT_MAX, pf_fcounter); 5737 if (error != 0) 5738 ERROUT(error); 5739 5740 nvlist_add_string(nvl, "ifname", V_pf_status.ifname); 5741 nvlist_add_binary(nvl, "chksum", V_pf_status.pf_chksum, 5742 PF_MD5_DIGEST_LENGTH); 5743 5744 pfi_update_status(V_pf_status.ifname, &s); 5745 5746 /* pcounters / bcounters */ 5747 for (int i = 0; i < 2; i++) { 5748 for (int j = 0; j < 2; j++) { 5749 for (int k = 0; k < 2; k++) { 5750 nvlist_append_number_array(nvl, "pcounters", 5751 s.pcounters[i][j][k]); 5752 } 5753 nvlist_append_number_array(nvl, "bcounters", 5754 s.bcounters[i][j]); 5755 } 5756 } 5757 5758 nvlpacked = nvlist_pack(nvl, &nv->len); 5759 if (nvlpacked == NULL) 5760 ERROUT(ENOMEM); 5761 5762 if (nv->size == 0) 5763 ERROUT(0); 5764 else if (nv->size < nv->len) 5765 ERROUT(ENOSPC); 5766 5767 PF_RULES_RUNLOCK(); 5768 error = copyout(nvlpacked, nv->data, nv->len); 5769 goto done; 5770 5771 #undef ERROUT 5772 errout: 5773 PF_RULES_RUNLOCK(); 5774 done: 5775 free(nvlpacked, M_NVLIST); 5776 nvlist_destroy(nvc); 5777 nvlist_destroy(nvl); 5778 5779 return (error); 5780 } 5781 5782 /* 5783 * XXX - Check for version mismatch!!! 5784 */ 5785 static void 5786 pf_clear_all_states(void) 5787 { 5788 struct epoch_tracker et; 5789 struct pf_kstate *s; 5790 u_int i; 5791 5792 NET_EPOCH_ENTER(et); 5793 for (i = 0; i <= pf_hashmask; i++) { 5794 struct pf_idhash *ih = &V_pf_idhash[i]; 5795 relock: 5796 PF_HASHROW_LOCK(ih); 5797 LIST_FOREACH(s, &ih->states, entry) { 5798 s->timeout = PFTM_PURGE; 5799 /* Don't send out individual delete messages. */ 5800 s->state_flags |= PFSTATE_NOSYNC; 5801 pf_unlink_state(s); 5802 goto relock; 5803 } 5804 PF_HASHROW_UNLOCK(ih); 5805 } 5806 NET_EPOCH_EXIT(et); 5807 } 5808 5809 static int 5810 pf_clear_tables(void) 5811 { 5812 struct pfioc_table io; 5813 int error; 5814 5815 bzero(&io, sizeof(io)); 5816 io.pfrio_flags |= PFR_FLAG_ALLRSETS; 5817 5818 error = pfr_clr_tables(&io.pfrio_table, &io.pfrio_ndel, 5819 io.pfrio_flags); 5820 5821 return (error); 5822 } 5823 5824 static void 5825 pf_clear_srcnodes(struct pf_ksrc_node *n) 5826 { 5827 struct pf_kstate *s; 5828 int i; 5829 5830 for (i = 0; i <= pf_hashmask; i++) { 5831 struct pf_idhash *ih = &V_pf_idhash[i]; 5832 5833 PF_HASHROW_LOCK(ih); 5834 LIST_FOREACH(s, &ih->states, entry) { 5835 if (n == NULL || n == s->src_node) 5836 s->src_node = NULL; 5837 if (n == NULL || n == s->nat_src_node) 5838 s->nat_src_node = NULL; 5839 } 5840 PF_HASHROW_UNLOCK(ih); 5841 } 5842 5843 if (n == NULL) { 5844 struct pf_srchash *sh; 5845 5846 for (i = 0, sh = V_pf_srchash; i <= pf_srchashmask; 5847 i++, sh++) { 5848 PF_HASHROW_LOCK(sh); 5849 LIST_FOREACH(n, &sh->nodes, entry) { 5850 n->expire = 1; 5851 n->states = 0; 5852 } 5853 PF_HASHROW_UNLOCK(sh); 5854 } 5855 } else { 5856 /* XXX: hash slot should already be locked here. */ 5857 n->expire = 1; 5858 n->states = 0; 5859 } 5860 } 5861 5862 static void 5863 pf_kill_srcnodes(struct pfioc_src_node_kill *psnk) 5864 { 5865 struct pf_ksrc_node_list kill; 5866 5867 LIST_INIT(&kill); 5868 for (int i = 0; i <= pf_srchashmask; i++) { 5869 struct pf_srchash *sh = &V_pf_srchash[i]; 5870 struct pf_ksrc_node *sn, *tmp; 5871 5872 PF_HASHROW_LOCK(sh); 5873 LIST_FOREACH_SAFE(sn, &sh->nodes, entry, tmp) 5874 if (PF_MATCHA(psnk->psnk_src.neg, 5875 &psnk->psnk_src.addr.v.a.addr, 5876 &psnk->psnk_src.addr.v.a.mask, 5877 &sn->addr, sn->af) && 5878 PF_MATCHA(psnk->psnk_dst.neg, 5879 &psnk->psnk_dst.addr.v.a.addr, 5880 &psnk->psnk_dst.addr.v.a.mask, 5881 &sn->raddr, sn->af)) { 5882 pf_unlink_src_node(sn); 5883 LIST_INSERT_HEAD(&kill, sn, entry); 5884 sn->expire = 1; 5885 } 5886 PF_HASHROW_UNLOCK(sh); 5887 } 5888 5889 for (int i = 0; i <= pf_hashmask; i++) { 5890 struct pf_idhash *ih = &V_pf_idhash[i]; 5891 struct pf_kstate *s; 5892 5893 PF_HASHROW_LOCK(ih); 5894 LIST_FOREACH(s, &ih->states, entry) { 5895 if (s->src_node && s->src_node->expire == 1) 5896 s->src_node = NULL; 5897 if (s->nat_src_node && s->nat_src_node->expire == 1) 5898 s->nat_src_node = NULL; 5899 } 5900 PF_HASHROW_UNLOCK(ih); 5901 } 5902 5903 psnk->psnk_killed = pf_free_src_nodes(&kill); 5904 } 5905 5906 static int 5907 pf_keepcounters(struct pfioc_nv *nv) 5908 { 5909 nvlist_t *nvl = NULL; 5910 void *nvlpacked = NULL; 5911 int error = 0; 5912 5913 #define ERROUT(x) ERROUT_FUNCTION(on_error, x) 5914 5915 if (nv->len > pf_ioctl_maxcount) 5916 ERROUT(ENOMEM); 5917 5918 nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK); 5919 if (nvlpacked == NULL) 5920 ERROUT(ENOMEM); 5921 5922 error = copyin(nv->data, nvlpacked, nv->len); 5923 if (error) 5924 ERROUT(error); 5925 5926 nvl = nvlist_unpack(nvlpacked, nv->len, 0); 5927 if (nvl == NULL) 5928 ERROUT(EBADMSG); 5929 5930 if (! nvlist_exists_bool(nvl, "keep_counters")) 5931 ERROUT(EBADMSG); 5932 5933 V_pf_status.keep_counters = nvlist_get_bool(nvl, "keep_counters"); 5934 5935 on_error: 5936 nvlist_destroy(nvl); 5937 free(nvlpacked, M_NVLIST); 5938 return (error); 5939 } 5940 5941 static unsigned int 5942 pf_clear_states(const struct pf_kstate_kill *kill) 5943 { 5944 struct pf_state_key_cmp match_key; 5945 struct pf_kstate *s; 5946 struct pfi_kkif *kif; 5947 int idx; 5948 unsigned int killed = 0, dir; 5949 5950 NET_EPOCH_ASSERT(); 5951 5952 for (unsigned int i = 0; i <= pf_hashmask; i++) { 5953 struct pf_idhash *ih = &V_pf_idhash[i]; 5954 5955 relock_DIOCCLRSTATES: 5956 PF_HASHROW_LOCK(ih); 5957 LIST_FOREACH(s, &ih->states, entry) { 5958 /* For floating states look at the original kif. */ 5959 kif = s->kif == V_pfi_all ? s->orig_kif : s->kif; 5960 5961 if (kill->psk_ifname[0] && 5962 strcmp(kill->psk_ifname, 5963 kif->pfik_name)) 5964 continue; 5965 5966 if (kill->psk_kill_match) { 5967 bzero(&match_key, sizeof(match_key)); 5968 5969 if (s->direction == PF_OUT) { 5970 dir = PF_IN; 5971 idx = PF_SK_STACK; 5972 } else { 5973 dir = PF_OUT; 5974 idx = PF_SK_WIRE; 5975 } 5976 5977 match_key.af = s->key[idx]->af; 5978 match_key.proto = s->key[idx]->proto; 5979 PF_ACPY(&match_key.addr[0], 5980 &s->key[idx]->addr[1], match_key.af); 5981 match_key.port[0] = s->key[idx]->port[1]; 5982 PF_ACPY(&match_key.addr[1], 5983 &s->key[idx]->addr[0], match_key.af); 5984 match_key.port[1] = s->key[idx]->port[0]; 5985 } 5986 5987 /* 5988 * Don't send out individual 5989 * delete messages. 5990 */ 5991 s->state_flags |= PFSTATE_NOSYNC; 5992 pf_unlink_state(s); 5993 killed++; 5994 5995 if (kill->psk_kill_match) 5996 killed += pf_kill_matching_state(&match_key, 5997 dir); 5998 5999 goto relock_DIOCCLRSTATES; 6000 } 6001 PF_HASHROW_UNLOCK(ih); 6002 } 6003 6004 if (V_pfsync_clear_states_ptr != NULL) 6005 V_pfsync_clear_states_ptr(V_pf_status.hostid, kill->psk_ifname); 6006 6007 return (killed); 6008 } 6009 6010 static void 6011 pf_killstates(struct pf_kstate_kill *kill, unsigned int *killed) 6012 { 6013 struct pf_kstate *s; 6014 6015 NET_EPOCH_ASSERT(); 6016 if (kill->psk_pfcmp.id) { 6017 if (kill->psk_pfcmp.creatorid == 0) 6018 kill->psk_pfcmp.creatorid = V_pf_status.hostid; 6019 if ((s = pf_find_state_byid(kill->psk_pfcmp.id, 6020 kill->psk_pfcmp.creatorid))) { 6021 pf_unlink_state(s); 6022 *killed = 1; 6023 } 6024 return; 6025 } 6026 6027 for (unsigned int i = 0; i <= pf_hashmask; i++) 6028 *killed += pf_killstates_row(kill, &V_pf_idhash[i]); 6029 } 6030 6031 static int 6032 pf_killstates_nv(struct pfioc_nv *nv) 6033 { 6034 struct pf_kstate_kill kill; 6035 struct epoch_tracker et; 6036 nvlist_t *nvl = NULL; 6037 void *nvlpacked = NULL; 6038 int error = 0; 6039 unsigned int killed = 0; 6040 6041 #define ERROUT(x) ERROUT_FUNCTION(on_error, x) 6042 6043 if (nv->len > pf_ioctl_maxcount) 6044 ERROUT(ENOMEM); 6045 6046 nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK); 6047 if (nvlpacked == NULL) 6048 ERROUT(ENOMEM); 6049 6050 error = copyin(nv->data, nvlpacked, nv->len); 6051 if (error) 6052 ERROUT(error); 6053 6054 nvl = nvlist_unpack(nvlpacked, nv->len, 0); 6055 if (nvl == NULL) 6056 ERROUT(EBADMSG); 6057 6058 error = pf_nvstate_kill_to_kstate_kill(nvl, &kill); 6059 if (error) 6060 ERROUT(error); 6061 6062 NET_EPOCH_ENTER(et); 6063 pf_killstates(&kill, &killed); 6064 NET_EPOCH_EXIT(et); 6065 6066 free(nvlpacked, M_NVLIST); 6067 nvlpacked = NULL; 6068 nvlist_destroy(nvl); 6069 nvl = nvlist_create(0); 6070 if (nvl == NULL) 6071 ERROUT(ENOMEM); 6072 6073 nvlist_add_number(nvl, "killed", killed); 6074 6075 nvlpacked = nvlist_pack(nvl, &nv->len); 6076 if (nvlpacked == NULL) 6077 ERROUT(ENOMEM); 6078 6079 if (nv->size == 0) 6080 ERROUT(0); 6081 else if (nv->size < nv->len) 6082 ERROUT(ENOSPC); 6083 6084 error = copyout(nvlpacked, nv->data, nv->len); 6085 6086 on_error: 6087 nvlist_destroy(nvl); 6088 free(nvlpacked, M_NVLIST); 6089 return (error); 6090 } 6091 6092 static int 6093 pf_clearstates_nv(struct pfioc_nv *nv) 6094 { 6095 struct pf_kstate_kill kill; 6096 struct epoch_tracker et; 6097 nvlist_t *nvl = NULL; 6098 void *nvlpacked = NULL; 6099 int error = 0; 6100 unsigned int killed; 6101 6102 #define ERROUT(x) ERROUT_FUNCTION(on_error, x) 6103 6104 if (nv->len > pf_ioctl_maxcount) 6105 ERROUT(ENOMEM); 6106 6107 nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK); 6108 if (nvlpacked == NULL) 6109 ERROUT(ENOMEM); 6110 6111 error = copyin(nv->data, nvlpacked, nv->len); 6112 if (error) 6113 ERROUT(error); 6114 6115 nvl = nvlist_unpack(nvlpacked, nv->len, 0); 6116 if (nvl == NULL) 6117 ERROUT(EBADMSG); 6118 6119 error = pf_nvstate_kill_to_kstate_kill(nvl, &kill); 6120 if (error) 6121 ERROUT(error); 6122 6123 NET_EPOCH_ENTER(et); 6124 killed = pf_clear_states(&kill); 6125 NET_EPOCH_EXIT(et); 6126 6127 free(nvlpacked, M_NVLIST); 6128 nvlpacked = NULL; 6129 nvlist_destroy(nvl); 6130 nvl = nvlist_create(0); 6131 if (nvl == NULL) 6132 ERROUT(ENOMEM); 6133 6134 nvlist_add_number(nvl, "killed", killed); 6135 6136 nvlpacked = nvlist_pack(nvl, &nv->len); 6137 if (nvlpacked == NULL) 6138 ERROUT(ENOMEM); 6139 6140 if (nv->size == 0) 6141 ERROUT(0); 6142 else if (nv->size < nv->len) 6143 ERROUT(ENOSPC); 6144 6145 error = copyout(nvlpacked, nv->data, nv->len); 6146 6147 #undef ERROUT 6148 on_error: 6149 nvlist_destroy(nvl); 6150 free(nvlpacked, M_NVLIST); 6151 return (error); 6152 } 6153 6154 static int 6155 pf_getstate(struct pfioc_nv *nv) 6156 { 6157 nvlist_t *nvl = NULL, *nvls; 6158 void *nvlpacked = NULL; 6159 struct pf_kstate *s = NULL; 6160 int error = 0; 6161 uint64_t id, creatorid; 6162 6163 #define ERROUT(x) ERROUT_FUNCTION(errout, x) 6164 6165 if (nv->len > pf_ioctl_maxcount) 6166 ERROUT(ENOMEM); 6167 6168 nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK); 6169 if (nvlpacked == NULL) 6170 ERROUT(ENOMEM); 6171 6172 error = copyin(nv->data, nvlpacked, nv->len); 6173 if (error) 6174 ERROUT(error); 6175 6176 nvl = nvlist_unpack(nvlpacked, nv->len, 0); 6177 if (nvl == NULL) 6178 ERROUT(EBADMSG); 6179 6180 PFNV_CHK(pf_nvuint64(nvl, "id", &id)); 6181 PFNV_CHK(pf_nvuint64(nvl, "creatorid", &creatorid)); 6182 6183 s = pf_find_state_byid(id, creatorid); 6184 if (s == NULL) 6185 ERROUT(ENOENT); 6186 6187 free(nvlpacked, M_NVLIST); 6188 nvlpacked = NULL; 6189 nvlist_destroy(nvl); 6190 nvl = nvlist_create(0); 6191 if (nvl == NULL) 6192 ERROUT(ENOMEM); 6193 6194 nvls = pf_state_to_nvstate(s); 6195 if (nvls == NULL) 6196 ERROUT(ENOMEM); 6197 6198 nvlist_add_nvlist(nvl, "state", nvls); 6199 nvlist_destroy(nvls); 6200 6201 nvlpacked = nvlist_pack(nvl, &nv->len); 6202 if (nvlpacked == NULL) 6203 ERROUT(ENOMEM); 6204 6205 if (nv->size == 0) 6206 ERROUT(0); 6207 else if (nv->size < nv->len) 6208 ERROUT(ENOSPC); 6209 6210 error = copyout(nvlpacked, nv->data, nv->len); 6211 6212 #undef ERROUT 6213 errout: 6214 if (s != NULL) 6215 PF_STATE_UNLOCK(s); 6216 free(nvlpacked, M_NVLIST); 6217 nvlist_destroy(nvl); 6218 return (error); 6219 } 6220 6221 /* 6222 * XXX - Check for version mismatch!!! 6223 */ 6224 6225 /* 6226 * Duplicate pfctl -Fa operation to get rid of as much as we can. 6227 */ 6228 static int 6229 shutdown_pf(void) 6230 { 6231 int error = 0; 6232 u_int32_t t[5]; 6233 char nn = '\0'; 6234 struct pf_kanchor *anchor; 6235 struct pf_keth_anchor *eth_anchor; 6236 int rs_num; 6237 6238 do { 6239 /* Unlink rules of all user defined anchors */ 6240 RB_FOREACH(anchor, pf_kanchor_global, &V_pf_anchors) { 6241 /* Wildcard based anchors may not have a respective 6242 * explicit anchor rule or they may be left empty 6243 * without rules. It leads to anchor.refcnt=0, and the 6244 * rest of the logic does not expect it. */ 6245 if (anchor->refcnt == 0) 6246 anchor->refcnt = 1; 6247 for (rs_num = 0; rs_num < PF_RULESET_MAX; ++rs_num) { 6248 if ((error = pf_begin_rules(&t[rs_num], rs_num, 6249 anchor->path)) != 0) { 6250 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: " 6251 "anchor.path=%s rs_num=%d\n", 6252 anchor->path, rs_num)); 6253 goto error; /* XXX: rollback? */ 6254 } 6255 } 6256 for (rs_num = 0; rs_num < PF_RULESET_MAX; ++rs_num) { 6257 error = pf_commit_rules(t[rs_num], rs_num, 6258 anchor->path); 6259 MPASS(error == 0); 6260 } 6261 } 6262 6263 /* Unlink rules of all user defined ether anchors */ 6264 RB_FOREACH(eth_anchor, pf_keth_anchor_global, 6265 &V_pf_keth_anchors) { 6266 /* Wildcard based anchors may not have a respective 6267 * explicit anchor rule or they may be left empty 6268 * without rules. It leads to anchor.refcnt=0, and the 6269 * rest of the logic does not expect it. */ 6270 if (eth_anchor->refcnt == 0) 6271 eth_anchor->refcnt = 1; 6272 if ((error = pf_begin_eth(&t[0], eth_anchor->path)) 6273 != 0) { 6274 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: eth " 6275 "anchor.path=%s\n", eth_anchor->path)); 6276 goto error; 6277 } 6278 error = pf_commit_eth(t[0], eth_anchor->path); 6279 MPASS(error == 0); 6280 } 6281 6282 if ((error = pf_begin_rules(&t[0], PF_RULESET_SCRUB, &nn)) 6283 != 0) { 6284 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: SCRUB\n")); 6285 break; 6286 } 6287 if ((error = pf_begin_rules(&t[1], PF_RULESET_FILTER, &nn)) 6288 != 0) { 6289 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: FILTER\n")); 6290 break; /* XXX: rollback? */ 6291 } 6292 if ((error = pf_begin_rules(&t[2], PF_RULESET_NAT, &nn)) 6293 != 0) { 6294 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: NAT\n")); 6295 break; /* XXX: rollback? */ 6296 } 6297 if ((error = pf_begin_rules(&t[3], PF_RULESET_BINAT, &nn)) 6298 != 0) { 6299 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: BINAT\n")); 6300 break; /* XXX: rollback? */ 6301 } 6302 if ((error = pf_begin_rules(&t[4], PF_RULESET_RDR, &nn)) 6303 != 0) { 6304 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: RDR\n")); 6305 break; /* XXX: rollback? */ 6306 } 6307 6308 error = pf_commit_rules(t[0], PF_RULESET_SCRUB, &nn); 6309 MPASS(error == 0); 6310 error = pf_commit_rules(t[1], PF_RULESET_FILTER, &nn); 6311 MPASS(error == 0); 6312 error = pf_commit_rules(t[2], PF_RULESET_NAT, &nn); 6313 MPASS(error == 0); 6314 error = pf_commit_rules(t[3], PF_RULESET_BINAT, &nn); 6315 MPASS(error == 0); 6316 error = pf_commit_rules(t[4], PF_RULESET_RDR, &nn); 6317 MPASS(error == 0); 6318 6319 if ((error = pf_clear_tables()) != 0) 6320 break; 6321 6322 if ((error = pf_begin_eth(&t[0], &nn)) != 0) { 6323 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: eth\n")); 6324 break; 6325 } 6326 error = pf_commit_eth(t[0], &nn); 6327 MPASS(error == 0); 6328 6329 #ifdef ALTQ 6330 if ((error = pf_begin_altq(&t[0])) != 0) { 6331 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: ALTQ\n")); 6332 break; 6333 } 6334 pf_commit_altq(t[0]); 6335 #endif 6336 6337 pf_clear_all_states(); 6338 6339 pf_clear_srcnodes(NULL); 6340 6341 /* status does not use malloced mem so no need to cleanup */ 6342 /* fingerprints and interfaces have their own cleanup code */ 6343 } while(0); 6344 6345 error: 6346 return (error); 6347 } 6348 6349 static pfil_return_t 6350 pf_check_return(int chk, struct mbuf **m) 6351 { 6352 6353 switch (chk) { 6354 case PF_PASS: 6355 if (*m == NULL) 6356 return (PFIL_CONSUMED); 6357 else 6358 return (PFIL_PASS); 6359 break; 6360 default: 6361 if (*m != NULL) { 6362 m_freem(*m); 6363 *m = NULL; 6364 } 6365 return (PFIL_DROPPED); 6366 } 6367 } 6368 6369 static pfil_return_t 6370 pf_eth_check_in(struct mbuf **m, struct ifnet *ifp, int flags, 6371 void *ruleset __unused, struct inpcb *inp) 6372 { 6373 int chk; 6374 6375 chk = pf_test_eth(PF_IN, flags, ifp, m, inp); 6376 6377 return (pf_check_return(chk, m)); 6378 } 6379 6380 static pfil_return_t 6381 pf_eth_check_out(struct mbuf **m, struct ifnet *ifp, int flags, 6382 void *ruleset __unused, struct inpcb *inp) 6383 { 6384 int chk; 6385 6386 chk = pf_test_eth(PF_OUT, flags, ifp, m, inp); 6387 6388 return (pf_check_return(chk, m)); 6389 } 6390 6391 #ifdef INET 6392 static pfil_return_t 6393 pf_check_in(struct mbuf **m, struct ifnet *ifp, int flags, 6394 void *ruleset __unused, struct inpcb *inp) 6395 { 6396 int chk; 6397 6398 chk = pf_test(PF_IN, flags, ifp, m, inp, NULL); 6399 6400 return (pf_check_return(chk, m)); 6401 } 6402 6403 static pfil_return_t 6404 pf_check_out(struct mbuf **m, struct ifnet *ifp, int flags, 6405 void *ruleset __unused, struct inpcb *inp) 6406 { 6407 int chk; 6408 6409 chk = pf_test(PF_OUT, flags, ifp, m, inp, NULL); 6410 6411 return (pf_check_return(chk, m)); 6412 } 6413 #endif 6414 6415 #ifdef INET6 6416 static pfil_return_t 6417 pf_check6_in(struct mbuf **m, struct ifnet *ifp, int flags, 6418 void *ruleset __unused, struct inpcb *inp) 6419 { 6420 int chk; 6421 6422 /* 6423 * In case of loopback traffic IPv6 uses the real interface in 6424 * order to support scoped addresses. In order to support stateful 6425 * filtering we have change this to lo0 as it is the case in IPv4. 6426 */ 6427 CURVNET_SET(ifp->if_vnet); 6428 chk = pf_test6(PF_IN, flags, (*m)->m_flags & M_LOOP ? V_loif : ifp, 6429 m, inp, NULL); 6430 CURVNET_RESTORE(); 6431 6432 return (pf_check_return(chk, m)); 6433 } 6434 6435 static pfil_return_t 6436 pf_check6_out(struct mbuf **m, struct ifnet *ifp, int flags, 6437 void *ruleset __unused, struct inpcb *inp) 6438 { 6439 int chk; 6440 6441 CURVNET_SET(ifp->if_vnet); 6442 chk = pf_test6(PF_OUT, flags, ifp, m, inp, NULL); 6443 CURVNET_RESTORE(); 6444 6445 return (pf_check_return(chk, m)); 6446 } 6447 #endif /* INET6 */ 6448 6449 VNET_DEFINE_STATIC(pfil_hook_t, pf_eth_in_hook); 6450 VNET_DEFINE_STATIC(pfil_hook_t, pf_eth_out_hook); 6451 #define V_pf_eth_in_hook VNET(pf_eth_in_hook) 6452 #define V_pf_eth_out_hook VNET(pf_eth_out_hook) 6453 6454 #ifdef INET 6455 VNET_DEFINE_STATIC(pfil_hook_t, pf_ip4_in_hook); 6456 VNET_DEFINE_STATIC(pfil_hook_t, pf_ip4_out_hook); 6457 #define V_pf_ip4_in_hook VNET(pf_ip4_in_hook) 6458 #define V_pf_ip4_out_hook VNET(pf_ip4_out_hook) 6459 #endif 6460 #ifdef INET6 6461 VNET_DEFINE_STATIC(pfil_hook_t, pf_ip6_in_hook); 6462 VNET_DEFINE_STATIC(pfil_hook_t, pf_ip6_out_hook); 6463 #define V_pf_ip6_in_hook VNET(pf_ip6_in_hook) 6464 #define V_pf_ip6_out_hook VNET(pf_ip6_out_hook) 6465 #endif 6466 6467 static void 6468 hook_pf_eth(void) 6469 { 6470 struct pfil_hook_args pha = { 6471 .pa_version = PFIL_VERSION, 6472 .pa_modname = "pf", 6473 .pa_type = PFIL_TYPE_ETHERNET, 6474 }; 6475 struct pfil_link_args pla = { 6476 .pa_version = PFIL_VERSION, 6477 }; 6478 int ret __diagused; 6479 6480 if (atomic_load_bool(&V_pf_pfil_eth_hooked)) 6481 return; 6482 6483 pha.pa_mbuf_chk = pf_eth_check_in; 6484 pha.pa_flags = PFIL_IN; 6485 pha.pa_rulname = "eth-in"; 6486 V_pf_eth_in_hook = pfil_add_hook(&pha); 6487 pla.pa_flags = PFIL_IN | PFIL_HEADPTR | PFIL_HOOKPTR; 6488 pla.pa_head = V_link_pfil_head; 6489 pla.pa_hook = V_pf_eth_in_hook; 6490 ret = pfil_link(&pla); 6491 MPASS(ret == 0); 6492 pha.pa_mbuf_chk = pf_eth_check_out; 6493 pha.pa_flags = PFIL_OUT; 6494 pha.pa_rulname = "eth-out"; 6495 V_pf_eth_out_hook = pfil_add_hook(&pha); 6496 pla.pa_flags = PFIL_OUT | PFIL_HEADPTR | PFIL_HOOKPTR; 6497 pla.pa_head = V_link_pfil_head; 6498 pla.pa_hook = V_pf_eth_out_hook; 6499 ret = pfil_link(&pla); 6500 MPASS(ret == 0); 6501 6502 atomic_store_bool(&V_pf_pfil_eth_hooked, true); 6503 } 6504 6505 static void 6506 hook_pf(void) 6507 { 6508 struct pfil_hook_args pha = { 6509 .pa_version = PFIL_VERSION, 6510 .pa_modname = "pf", 6511 }; 6512 struct pfil_link_args pla = { 6513 .pa_version = PFIL_VERSION, 6514 }; 6515 int ret __diagused; 6516 6517 if (atomic_load_bool(&V_pf_pfil_hooked)) 6518 return; 6519 6520 #ifdef INET 6521 pha.pa_type = PFIL_TYPE_IP4; 6522 pha.pa_mbuf_chk = pf_check_in; 6523 pha.pa_flags = PFIL_IN; 6524 pha.pa_rulname = "default-in"; 6525 V_pf_ip4_in_hook = pfil_add_hook(&pha); 6526 pla.pa_flags = PFIL_IN | PFIL_HEADPTR | PFIL_HOOKPTR; 6527 pla.pa_head = V_inet_pfil_head; 6528 pla.pa_hook = V_pf_ip4_in_hook; 6529 ret = pfil_link(&pla); 6530 MPASS(ret == 0); 6531 pha.pa_mbuf_chk = pf_check_out; 6532 pha.pa_flags = PFIL_OUT; 6533 pha.pa_rulname = "default-out"; 6534 V_pf_ip4_out_hook = pfil_add_hook(&pha); 6535 pla.pa_flags = PFIL_OUT | PFIL_HEADPTR | PFIL_HOOKPTR; 6536 pla.pa_head = V_inet_pfil_head; 6537 pla.pa_hook = V_pf_ip4_out_hook; 6538 ret = pfil_link(&pla); 6539 MPASS(ret == 0); 6540 if (V_pf_filter_local) { 6541 pla.pa_flags = PFIL_OUT | PFIL_HEADPTR | PFIL_HOOKPTR; 6542 pla.pa_head = V_inet_local_pfil_head; 6543 pla.pa_hook = V_pf_ip4_out_hook; 6544 ret = pfil_link(&pla); 6545 MPASS(ret == 0); 6546 } 6547 #endif 6548 #ifdef INET6 6549 pha.pa_type = PFIL_TYPE_IP6; 6550 pha.pa_mbuf_chk = pf_check6_in; 6551 pha.pa_flags = PFIL_IN; 6552 pha.pa_rulname = "default-in6"; 6553 V_pf_ip6_in_hook = pfil_add_hook(&pha); 6554 pla.pa_flags = PFIL_IN | PFIL_HEADPTR | PFIL_HOOKPTR; 6555 pla.pa_head = V_inet6_pfil_head; 6556 pla.pa_hook = V_pf_ip6_in_hook; 6557 ret = pfil_link(&pla); 6558 MPASS(ret == 0); 6559 pha.pa_mbuf_chk = pf_check6_out; 6560 pha.pa_rulname = "default-out6"; 6561 pha.pa_flags = PFIL_OUT; 6562 V_pf_ip6_out_hook = pfil_add_hook(&pha); 6563 pla.pa_flags = PFIL_OUT | PFIL_HEADPTR | PFIL_HOOKPTR; 6564 pla.pa_head = V_inet6_pfil_head; 6565 pla.pa_hook = V_pf_ip6_out_hook; 6566 ret = pfil_link(&pla); 6567 MPASS(ret == 0); 6568 if (V_pf_filter_local) { 6569 pla.pa_flags = PFIL_OUT | PFIL_HEADPTR | PFIL_HOOKPTR; 6570 pla.pa_head = V_inet6_local_pfil_head; 6571 pla.pa_hook = V_pf_ip6_out_hook; 6572 ret = pfil_link(&pla); 6573 MPASS(ret == 0); 6574 } 6575 #endif 6576 6577 atomic_store_bool(&V_pf_pfil_hooked, true); 6578 } 6579 6580 static void 6581 dehook_pf_eth(void) 6582 { 6583 6584 if (!atomic_load_bool(&V_pf_pfil_eth_hooked)) 6585 return; 6586 6587 pfil_remove_hook(V_pf_eth_in_hook); 6588 pfil_remove_hook(V_pf_eth_out_hook); 6589 6590 atomic_store_bool(&V_pf_pfil_eth_hooked, false); 6591 } 6592 6593 static void 6594 dehook_pf(void) 6595 { 6596 6597 if (!atomic_load_bool(&V_pf_pfil_hooked)) 6598 return; 6599 6600 #ifdef INET 6601 pfil_remove_hook(V_pf_ip4_in_hook); 6602 pfil_remove_hook(V_pf_ip4_out_hook); 6603 #endif 6604 #ifdef INET6 6605 pfil_remove_hook(V_pf_ip6_in_hook); 6606 pfil_remove_hook(V_pf_ip6_out_hook); 6607 #endif 6608 6609 atomic_store_bool(&V_pf_pfil_hooked, false); 6610 } 6611 6612 static void 6613 pf_load_vnet(void) 6614 { 6615 V_pf_tag_z = uma_zcreate("pf tags", sizeof(struct pf_tagname), 6616 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); 6617 6618 rm_init_flags(&V_pf_rules_lock, "pf rulesets", RM_RECURSE); 6619 sx_init(&V_pf_ioctl_lock, "pf ioctl"); 6620 6621 pf_init_tagset(&V_pf_tags, &pf_rule_tag_hashsize, 6622 PF_RULE_TAG_HASH_SIZE_DEFAULT); 6623 #ifdef ALTQ 6624 pf_init_tagset(&V_pf_qids, &pf_queue_tag_hashsize, 6625 PF_QUEUE_TAG_HASH_SIZE_DEFAULT); 6626 #endif 6627 6628 V_pf_keth = &V_pf_main_keth_anchor.ruleset; 6629 6630 pfattach_vnet(); 6631 V_pf_vnet_active = 1; 6632 } 6633 6634 static int 6635 pf_load(void) 6636 { 6637 int error; 6638 6639 sx_init(&pf_end_lock, "pf end thread"); 6640 6641 pf_mtag_initialize(); 6642 6643 pf_dev = make_dev(&pf_cdevsw, 0, UID_ROOT, GID_WHEEL, 0600, PF_NAME); 6644 if (pf_dev == NULL) 6645 return (ENOMEM); 6646 6647 pf_end_threads = 0; 6648 error = kproc_create(pf_purge_thread, NULL, &pf_purge_proc, 0, 0, "pf purge"); 6649 if (error != 0) 6650 return (error); 6651 6652 pfi_initialize(); 6653 6654 return (0); 6655 } 6656 6657 static void 6658 pf_unload_vnet(void) 6659 { 6660 int ret __diagused; 6661 6662 V_pf_vnet_active = 0; 6663 V_pf_status.running = 0; 6664 dehook_pf(); 6665 dehook_pf_eth(); 6666 6667 PF_RULES_WLOCK(); 6668 pf_syncookies_cleanup(); 6669 shutdown_pf(); 6670 PF_RULES_WUNLOCK(); 6671 6672 /* Make sure we've cleaned up ethernet rules before we continue. */ 6673 NET_EPOCH_DRAIN_CALLBACKS(); 6674 6675 ret = swi_remove(V_pf_swi_cookie); 6676 MPASS(ret == 0); 6677 ret = intr_event_destroy(V_pf_swi_ie); 6678 MPASS(ret == 0); 6679 6680 pf_unload_vnet_purge(); 6681 6682 pf_normalize_cleanup(); 6683 PF_RULES_WLOCK(); 6684 pfi_cleanup_vnet(); 6685 PF_RULES_WUNLOCK(); 6686 pfr_cleanup(); 6687 pf_osfp_flush(); 6688 pf_cleanup(); 6689 if (IS_DEFAULT_VNET(curvnet)) 6690 pf_mtag_cleanup(); 6691 6692 pf_cleanup_tagset(&V_pf_tags); 6693 #ifdef ALTQ 6694 pf_cleanup_tagset(&V_pf_qids); 6695 #endif 6696 uma_zdestroy(V_pf_tag_z); 6697 6698 #ifdef PF_WANT_32_TO_64_COUNTER 6699 PF_RULES_WLOCK(); 6700 LIST_REMOVE(V_pf_kifmarker, pfik_allkiflist); 6701 6702 MPASS(LIST_EMPTY(&V_pf_allkiflist)); 6703 MPASS(V_pf_allkifcount == 0); 6704 6705 LIST_REMOVE(&V_pf_default_rule, allrulelist); 6706 V_pf_allrulecount--; 6707 LIST_REMOVE(V_pf_rulemarker, allrulelist); 6708 6709 /* 6710 * There are known pf rule leaks when running the test suite. 6711 */ 6712 #ifdef notyet 6713 MPASS(LIST_EMPTY(&V_pf_allrulelist)); 6714 MPASS(V_pf_allrulecount == 0); 6715 #endif 6716 6717 PF_RULES_WUNLOCK(); 6718 6719 free(V_pf_kifmarker, PFI_MTYPE); 6720 free(V_pf_rulemarker, M_PFRULE); 6721 #endif 6722 6723 /* Free counters last as we updated them during shutdown. */ 6724 pf_counter_u64_deinit(&V_pf_default_rule.evaluations); 6725 for (int i = 0; i < 2; i++) { 6726 pf_counter_u64_deinit(&V_pf_default_rule.packets[i]); 6727 pf_counter_u64_deinit(&V_pf_default_rule.bytes[i]); 6728 } 6729 counter_u64_free(V_pf_default_rule.states_cur); 6730 counter_u64_free(V_pf_default_rule.states_tot); 6731 counter_u64_free(V_pf_default_rule.src_nodes); 6732 uma_zfree_pcpu(pf_timestamp_pcpu_zone, V_pf_default_rule.timestamp); 6733 6734 for (int i = 0; i < PFRES_MAX; i++) 6735 counter_u64_free(V_pf_status.counters[i]); 6736 for (int i = 0; i < KLCNT_MAX; i++) 6737 counter_u64_free(V_pf_status.lcounters[i]); 6738 for (int i = 0; i < FCNT_MAX; i++) 6739 pf_counter_u64_deinit(&V_pf_status.fcounters[i]); 6740 for (int i = 0; i < SCNT_MAX; i++) 6741 counter_u64_free(V_pf_status.scounters[i]); 6742 6743 rm_destroy(&V_pf_rules_lock); 6744 sx_destroy(&V_pf_ioctl_lock); 6745 } 6746 6747 static void 6748 pf_unload(void) 6749 { 6750 6751 sx_xlock(&pf_end_lock); 6752 pf_end_threads = 1; 6753 while (pf_end_threads < 2) { 6754 wakeup_one(pf_purge_thread); 6755 sx_sleep(pf_purge_proc, &pf_end_lock, 0, "pftmo", 0); 6756 } 6757 sx_xunlock(&pf_end_lock); 6758 6759 pf_nl_unregister(); 6760 6761 if (pf_dev != NULL) 6762 destroy_dev(pf_dev); 6763 6764 pfi_cleanup(); 6765 6766 sx_destroy(&pf_end_lock); 6767 } 6768 6769 static void 6770 vnet_pf_init(void *unused __unused) 6771 { 6772 6773 pf_load_vnet(); 6774 } 6775 VNET_SYSINIT(vnet_pf_init, SI_SUB_PROTO_FIREWALL, SI_ORDER_THIRD, 6776 vnet_pf_init, NULL); 6777 6778 static void 6779 vnet_pf_uninit(const void *unused __unused) 6780 { 6781 6782 pf_unload_vnet(); 6783 } 6784 SYSUNINIT(pf_unload, SI_SUB_PROTO_FIREWALL, SI_ORDER_SECOND, pf_unload, NULL); 6785 VNET_SYSUNINIT(vnet_pf_uninit, SI_SUB_PROTO_FIREWALL, SI_ORDER_THIRD, 6786 vnet_pf_uninit, NULL); 6787 6788 static int 6789 pf_modevent(module_t mod, int type, void *data) 6790 { 6791 int error = 0; 6792 6793 switch(type) { 6794 case MOD_LOAD: 6795 error = pf_load(); 6796 pf_nl_register(); 6797 break; 6798 case MOD_UNLOAD: 6799 /* Handled in SYSUNINIT(pf_unload) to ensure it's done after 6800 * the vnet_pf_uninit()s */ 6801 break; 6802 default: 6803 error = EINVAL; 6804 break; 6805 } 6806 6807 return (error); 6808 } 6809 6810 static moduledata_t pf_mod = { 6811 "pf", 6812 pf_modevent, 6813 0 6814 }; 6815 6816 DECLARE_MODULE(pf, pf_mod, SI_SUB_PROTO_FIREWALL, SI_ORDER_SECOND); 6817 MODULE_DEPEND(pf, netlink, 1, 1, 1); 6818 MODULE_VERSION(pf, PF_MODVER); 6819