1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2001 Daniel Hartmeier 5 * Copyright (c) 2002,2003 Henning Brauer 6 * Copyright (c) 2012 Gleb Smirnoff <glebius@FreeBSD.org> 7 * All rights reserved. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 13 * - Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * - Redistributions in binary form must reproduce the above 16 * copyright notice, this list of conditions and the following 17 * disclaimer in the documentation and/or other materials provided 18 * with the distribution. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 23 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 24 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 27 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 28 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN 30 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 31 * POSSIBILITY OF SUCH DAMAGE. 32 * 33 * Effort sponsored in part by the Defense Advanced Research Projects 34 * Agency (DARPA) and Air Force Research Laboratory, Air Force 35 * Materiel Command, USAF, under agreement number F30602-01-2-0537. 36 * 37 * $OpenBSD: pf_ioctl.c,v 1.213 2009/02/15 21:46:12 mbalmer Exp $ 38 */ 39 40 #include <sys/cdefs.h> 41 __FBSDID("$FreeBSD$"); 42 43 #include "opt_inet.h" 44 #include "opt_inet6.h" 45 #include "opt_bpf.h" 46 #include "opt_pf.h" 47 48 #include <sys/param.h> 49 #include <sys/_bitset.h> 50 #include <sys/bitset.h> 51 #include <sys/bus.h> 52 #include <sys/conf.h> 53 #include <sys/endian.h> 54 #include <sys/fcntl.h> 55 #include <sys/filio.h> 56 #include <sys/hash.h> 57 #include <sys/interrupt.h> 58 #include <sys/jail.h> 59 #include <sys/kernel.h> 60 #include <sys/kthread.h> 61 #include <sys/lock.h> 62 #include <sys/mbuf.h> 63 #include <sys/module.h> 64 #include <sys/nv.h> 65 #include <sys/proc.h> 66 #include <sys/sdt.h> 67 #include <sys/smp.h> 68 #include <sys/socket.h> 69 #include <sys/sysctl.h> 70 #include <sys/md5.h> 71 #include <sys/ucred.h> 72 73 #include <net/if.h> 74 #include <net/if_var.h> 75 #include <net/vnet.h> 76 #include <net/route.h> 77 #include <net/pfil.h> 78 #include <net/pfvar.h> 79 #include <net/if_pfsync.h> 80 #include <net/if_pflog.h> 81 82 #include <netinet/in.h> 83 #include <netinet/ip.h> 84 #include <netinet/ip_var.h> 85 #include <netinet6/ip6_var.h> 86 #include <netinet/ip_icmp.h> 87 #include <netpfil/pf/pf_nv.h> 88 89 #ifdef INET6 90 #include <netinet/ip6.h> 91 #endif /* INET6 */ 92 93 #ifdef ALTQ 94 #include <net/altq/altq.h> 95 #endif 96 97 SDT_PROBE_DEFINE3(pf, ioctl, ioctl, error, "int", "int", "int"); 98 SDT_PROBE_DEFINE3(pf, ioctl, function, error, "char *", "int", "int"); 99 SDT_PROBE_DEFINE2(pf, ioctl, addrule, error, "int", "int"); 100 SDT_PROBE_DEFINE2(pf, ioctl, nvchk, error, "int", "int"); 101 102 static struct pf_kpool *pf_get_kpool(const char *, u_int32_t, u_int8_t, 103 u_int32_t, u_int8_t, u_int8_t, u_int8_t); 104 105 static void pf_mv_kpool(struct pf_kpalist *, struct pf_kpalist *); 106 static void pf_empty_kpool(struct pf_kpalist *); 107 static int pfioctl(struct cdev *, u_long, caddr_t, int, 108 struct thread *); 109 static int pf_begin_eth(uint32_t *, const char *); 110 static void pf_rollback_eth_cb(struct epoch_context *); 111 static int pf_rollback_eth(uint32_t, const char *); 112 static int pf_commit_eth(uint32_t, const char *); 113 static void pf_free_eth_rule(struct pf_keth_rule *); 114 #ifdef ALTQ 115 static int pf_begin_altq(u_int32_t *); 116 static int pf_rollback_altq(u_int32_t); 117 static int pf_commit_altq(u_int32_t); 118 static int pf_enable_altq(struct pf_altq *); 119 static int pf_disable_altq(struct pf_altq *); 120 static uint16_t pf_qname2qid(const char *); 121 static void pf_qid_unref(uint16_t); 122 #endif /* ALTQ */ 123 static int pf_begin_rules(u_int32_t *, int, const char *); 124 static int pf_rollback_rules(u_int32_t, int, char *); 125 static int pf_setup_pfsync_matching(struct pf_kruleset *); 126 static void pf_hash_rule_rolling(MD5_CTX *, struct pf_krule *); 127 static void pf_hash_rule(struct pf_krule *); 128 static void pf_hash_rule_addr(MD5_CTX *, struct pf_rule_addr *); 129 static int pf_commit_rules(u_int32_t, int, char *); 130 static int pf_addr_setup(struct pf_kruleset *, 131 struct pf_addr_wrap *, sa_family_t); 132 static void pf_addr_copyout(struct pf_addr_wrap *); 133 static void pf_src_node_copy(const struct pf_ksrc_node *, 134 struct pf_src_node *); 135 #ifdef ALTQ 136 static int pf_export_kaltq(struct pf_altq *, 137 struct pfioc_altq_v1 *, size_t); 138 static int pf_import_kaltq(struct pfioc_altq_v1 *, 139 struct pf_altq *, size_t); 140 #endif /* ALTQ */ 141 142 VNET_DEFINE(struct pf_krule, pf_default_rule); 143 144 static __inline int pf_krule_compare(struct pf_krule *, 145 struct pf_krule *); 146 147 RB_GENERATE(pf_krule_global, pf_krule, entry_global, pf_krule_compare); 148 149 #ifdef ALTQ 150 VNET_DEFINE_STATIC(int, pf_altq_running); 151 #define V_pf_altq_running VNET(pf_altq_running) 152 #endif 153 154 #define TAGID_MAX 50000 155 struct pf_tagname { 156 TAILQ_ENTRY(pf_tagname) namehash_entries; 157 TAILQ_ENTRY(pf_tagname) taghash_entries; 158 char name[PF_TAG_NAME_SIZE]; 159 uint16_t tag; 160 int ref; 161 }; 162 163 struct pf_tagset { 164 TAILQ_HEAD(, pf_tagname) *namehash; 165 TAILQ_HEAD(, pf_tagname) *taghash; 166 unsigned int mask; 167 uint32_t seed; 168 BITSET_DEFINE(, TAGID_MAX) avail; 169 }; 170 171 VNET_DEFINE(struct pf_tagset, pf_tags); 172 #define V_pf_tags VNET(pf_tags) 173 static unsigned int pf_rule_tag_hashsize; 174 #define PF_RULE_TAG_HASH_SIZE_DEFAULT 128 175 SYSCTL_UINT(_net_pf, OID_AUTO, rule_tag_hashsize, CTLFLAG_RDTUN, 176 &pf_rule_tag_hashsize, PF_RULE_TAG_HASH_SIZE_DEFAULT, 177 "Size of pf(4) rule tag hashtable"); 178 179 #ifdef ALTQ 180 VNET_DEFINE(struct pf_tagset, pf_qids); 181 #define V_pf_qids VNET(pf_qids) 182 static unsigned int pf_queue_tag_hashsize; 183 #define PF_QUEUE_TAG_HASH_SIZE_DEFAULT 128 184 SYSCTL_UINT(_net_pf, OID_AUTO, queue_tag_hashsize, CTLFLAG_RDTUN, 185 &pf_queue_tag_hashsize, PF_QUEUE_TAG_HASH_SIZE_DEFAULT, 186 "Size of pf(4) queue tag hashtable"); 187 #endif 188 VNET_DEFINE(uma_zone_t, pf_tag_z); 189 #define V_pf_tag_z VNET(pf_tag_z) 190 static MALLOC_DEFINE(M_PFALTQ, "pf_altq", "pf(4) altq configuration db"); 191 static MALLOC_DEFINE(M_PFRULE, "pf_rule", "pf(4) rules"); 192 193 #if (PF_QNAME_SIZE != PF_TAG_NAME_SIZE) 194 #error PF_QNAME_SIZE must be equal to PF_TAG_NAME_SIZE 195 #endif 196 197 static void pf_init_tagset(struct pf_tagset *, unsigned int *, 198 unsigned int); 199 static void pf_cleanup_tagset(struct pf_tagset *); 200 static uint16_t tagname2hashindex(const struct pf_tagset *, const char *); 201 static uint16_t tag2hashindex(const struct pf_tagset *, uint16_t); 202 static u_int16_t tagname2tag(struct pf_tagset *, const char *); 203 static u_int16_t pf_tagname2tag(const char *); 204 static void tag_unref(struct pf_tagset *, u_int16_t); 205 206 #define DPFPRINTF(n, x) if (V_pf_status.debug >= (n)) printf x 207 208 struct cdev *pf_dev; 209 210 /* 211 * XXX - These are new and need to be checked when moveing to a new version 212 */ 213 static void pf_clear_all_states(void); 214 static unsigned int pf_clear_states(const struct pf_kstate_kill *); 215 static void pf_killstates(struct pf_kstate_kill *, 216 unsigned int *); 217 static int pf_killstates_row(struct pf_kstate_kill *, 218 struct pf_idhash *); 219 static int pf_killstates_nv(struct pfioc_nv *); 220 static int pf_clearstates_nv(struct pfioc_nv *); 221 static int pf_getstate(struct pfioc_nv *); 222 static int pf_getstatus(struct pfioc_nv *); 223 static int pf_clear_tables(void); 224 static void pf_clear_srcnodes(struct pf_ksrc_node *); 225 static void pf_kill_srcnodes(struct pfioc_src_node_kill *); 226 static int pf_keepcounters(struct pfioc_nv *); 227 static void pf_tbladdr_copyout(struct pf_addr_wrap *); 228 229 /* 230 * Wrapper functions for pfil(9) hooks 231 */ 232 static pfil_return_t pf_eth_check_in(struct mbuf **m, struct ifnet *ifp, 233 int flags, void *ruleset __unused, struct inpcb *inp); 234 static pfil_return_t pf_eth_check_out(struct mbuf **m, struct ifnet *ifp, 235 int flags, void *ruleset __unused, struct inpcb *inp); 236 #ifdef INET 237 static pfil_return_t pf_check_in(struct mbuf **m, struct ifnet *ifp, 238 int flags, void *ruleset __unused, struct inpcb *inp); 239 static pfil_return_t pf_check_out(struct mbuf **m, struct ifnet *ifp, 240 int flags, void *ruleset __unused, struct inpcb *inp); 241 #endif 242 #ifdef INET6 243 static pfil_return_t pf_check6_in(struct mbuf **m, struct ifnet *ifp, 244 int flags, void *ruleset __unused, struct inpcb *inp); 245 static pfil_return_t pf_check6_out(struct mbuf **m, struct ifnet *ifp, 246 int flags, void *ruleset __unused, struct inpcb *inp); 247 #endif 248 249 static void hook_pf_eth(void); 250 static void hook_pf(void); 251 static void dehook_pf_eth(void); 252 static void dehook_pf(void); 253 static int shutdown_pf(void); 254 static int pf_load(void); 255 static void pf_unload(void); 256 257 static struct cdevsw pf_cdevsw = { 258 .d_ioctl = pfioctl, 259 .d_name = PF_NAME, 260 .d_version = D_VERSION, 261 }; 262 263 volatile VNET_DEFINE_STATIC(int, pf_pfil_hooked); 264 #define V_pf_pfil_hooked VNET(pf_pfil_hooked) 265 volatile VNET_DEFINE_STATIC(int, pf_pfil_eth_hooked); 266 #define V_pf_pfil_eth_hooked VNET(pf_pfil_eth_hooked) 267 268 /* 269 * We need a flag that is neither hooked nor running to know when 270 * the VNET is "valid". We primarily need this to control (global) 271 * external event, e.g., eventhandlers. 272 */ 273 VNET_DEFINE(int, pf_vnet_active); 274 #define V_pf_vnet_active VNET(pf_vnet_active) 275 276 int pf_end_threads; 277 struct proc *pf_purge_proc; 278 279 struct rmlock pf_rules_lock; 280 struct sx pf_ioctl_lock; 281 struct sx pf_end_lock; 282 283 /* pfsync */ 284 VNET_DEFINE(pfsync_state_import_t *, pfsync_state_import_ptr); 285 VNET_DEFINE(pfsync_insert_state_t *, pfsync_insert_state_ptr); 286 VNET_DEFINE(pfsync_update_state_t *, pfsync_update_state_ptr); 287 VNET_DEFINE(pfsync_delete_state_t *, pfsync_delete_state_ptr); 288 VNET_DEFINE(pfsync_clear_states_t *, pfsync_clear_states_ptr); 289 VNET_DEFINE(pfsync_defer_t *, pfsync_defer_ptr); 290 pfsync_detach_ifnet_t *pfsync_detach_ifnet_ptr; 291 292 /* pflog */ 293 pflog_packet_t *pflog_packet_ptr = NULL; 294 295 /* 296 * Copy a user-provided string, returning an error if truncation would occur. 297 * Avoid scanning past "sz" bytes in the source string since there's no 298 * guarantee that it's nul-terminated. 299 */ 300 static int 301 pf_user_strcpy(char *dst, const char *src, size_t sz) 302 { 303 if (strnlen(src, sz) == sz) 304 return (EINVAL); 305 (void)strlcpy(dst, src, sz); 306 return (0); 307 } 308 309 static void 310 pfattach_vnet(void) 311 { 312 u_int32_t *my_timeout = V_pf_default_rule.timeout; 313 314 pf_initialize(); 315 pfr_initialize(); 316 pfi_initialize_vnet(); 317 pf_normalize_init(); 318 pf_syncookies_init(); 319 320 V_pf_limits[PF_LIMIT_STATES].limit = PFSTATE_HIWAT; 321 V_pf_limits[PF_LIMIT_SRC_NODES].limit = PFSNODE_HIWAT; 322 323 RB_INIT(&V_pf_anchors); 324 pf_init_kruleset(&pf_main_ruleset); 325 326 pf_init_keth(V_pf_keth); 327 328 /* default rule should never be garbage collected */ 329 V_pf_default_rule.entries.tqe_prev = &V_pf_default_rule.entries.tqe_next; 330 #ifdef PF_DEFAULT_TO_DROP 331 V_pf_default_rule.action = PF_DROP; 332 #else 333 V_pf_default_rule.action = PF_PASS; 334 #endif 335 V_pf_default_rule.nr = -1; 336 V_pf_default_rule.rtableid = -1; 337 338 pf_counter_u64_init(&V_pf_default_rule.evaluations, M_WAITOK); 339 for (int i = 0; i < 2; i++) { 340 pf_counter_u64_init(&V_pf_default_rule.packets[i], M_WAITOK); 341 pf_counter_u64_init(&V_pf_default_rule.bytes[i], M_WAITOK); 342 } 343 V_pf_default_rule.states_cur = counter_u64_alloc(M_WAITOK); 344 V_pf_default_rule.states_tot = counter_u64_alloc(M_WAITOK); 345 V_pf_default_rule.src_nodes = counter_u64_alloc(M_WAITOK); 346 347 V_pf_default_rule.timestamp = uma_zalloc_pcpu(pcpu_zone_4, M_WAITOK | M_ZERO); 348 349 #ifdef PF_WANT_32_TO_64_COUNTER 350 V_pf_kifmarker = malloc(sizeof(*V_pf_kifmarker), PFI_MTYPE, M_WAITOK | M_ZERO); 351 V_pf_rulemarker = malloc(sizeof(*V_pf_rulemarker), M_PFRULE, M_WAITOK | M_ZERO); 352 PF_RULES_WLOCK(); 353 LIST_INSERT_HEAD(&V_pf_allkiflist, V_pf_kifmarker, pfik_allkiflist); 354 LIST_INSERT_HEAD(&V_pf_allrulelist, &V_pf_default_rule, allrulelist); 355 V_pf_allrulecount++; 356 LIST_INSERT_HEAD(&V_pf_allrulelist, V_pf_rulemarker, allrulelist); 357 PF_RULES_WUNLOCK(); 358 #endif 359 360 /* initialize default timeouts */ 361 my_timeout[PFTM_TCP_FIRST_PACKET] = PFTM_TCP_FIRST_PACKET_VAL; 362 my_timeout[PFTM_TCP_OPENING] = PFTM_TCP_OPENING_VAL; 363 my_timeout[PFTM_TCP_ESTABLISHED] = PFTM_TCP_ESTABLISHED_VAL; 364 my_timeout[PFTM_TCP_CLOSING] = PFTM_TCP_CLOSING_VAL; 365 my_timeout[PFTM_TCP_FIN_WAIT] = PFTM_TCP_FIN_WAIT_VAL; 366 my_timeout[PFTM_TCP_CLOSED] = PFTM_TCP_CLOSED_VAL; 367 my_timeout[PFTM_UDP_FIRST_PACKET] = PFTM_UDP_FIRST_PACKET_VAL; 368 my_timeout[PFTM_UDP_SINGLE] = PFTM_UDP_SINGLE_VAL; 369 my_timeout[PFTM_UDP_MULTIPLE] = PFTM_UDP_MULTIPLE_VAL; 370 my_timeout[PFTM_ICMP_FIRST_PACKET] = PFTM_ICMP_FIRST_PACKET_VAL; 371 my_timeout[PFTM_ICMP_ERROR_REPLY] = PFTM_ICMP_ERROR_REPLY_VAL; 372 my_timeout[PFTM_OTHER_FIRST_PACKET] = PFTM_OTHER_FIRST_PACKET_VAL; 373 my_timeout[PFTM_OTHER_SINGLE] = PFTM_OTHER_SINGLE_VAL; 374 my_timeout[PFTM_OTHER_MULTIPLE] = PFTM_OTHER_MULTIPLE_VAL; 375 my_timeout[PFTM_FRAG] = PFTM_FRAG_VAL; 376 my_timeout[PFTM_INTERVAL] = PFTM_INTERVAL_VAL; 377 my_timeout[PFTM_SRC_NODE] = PFTM_SRC_NODE_VAL; 378 my_timeout[PFTM_TS_DIFF] = PFTM_TS_DIFF_VAL; 379 my_timeout[PFTM_ADAPTIVE_START] = PFSTATE_ADAPT_START; 380 my_timeout[PFTM_ADAPTIVE_END] = PFSTATE_ADAPT_END; 381 382 bzero(&V_pf_status, sizeof(V_pf_status)); 383 V_pf_status.debug = PF_DEBUG_URGENT; 384 385 V_pf_pfil_hooked = 0; 386 V_pf_pfil_eth_hooked = 0; 387 388 /* XXX do our best to avoid a conflict */ 389 V_pf_status.hostid = arc4random(); 390 391 for (int i = 0; i < PFRES_MAX; i++) 392 V_pf_status.counters[i] = counter_u64_alloc(M_WAITOK); 393 for (int i = 0; i < KLCNT_MAX; i++) 394 V_pf_status.lcounters[i] = counter_u64_alloc(M_WAITOK); 395 for (int i = 0; i < FCNT_MAX; i++) 396 pf_counter_u64_init(&V_pf_status.fcounters[i], M_WAITOK); 397 for (int i = 0; i < SCNT_MAX; i++) 398 V_pf_status.scounters[i] = counter_u64_alloc(M_WAITOK); 399 400 if (swi_add(&V_pf_swi_ie, "pf send", pf_intr, curvnet, SWI_NET, 401 INTR_MPSAFE, &V_pf_swi_cookie) != 0) 402 /* XXXGL: leaked all above. */ 403 return; 404 } 405 406 static struct pf_kpool * 407 pf_get_kpool(const char *anchor, u_int32_t ticket, u_int8_t rule_action, 408 u_int32_t rule_number, u_int8_t r_last, u_int8_t active, 409 u_int8_t check_ticket) 410 { 411 struct pf_kruleset *ruleset; 412 struct pf_krule *rule; 413 int rs_num; 414 415 ruleset = pf_find_kruleset(anchor); 416 if (ruleset == NULL) 417 return (NULL); 418 rs_num = pf_get_ruleset_number(rule_action); 419 if (rs_num >= PF_RULESET_MAX) 420 return (NULL); 421 if (active) { 422 if (check_ticket && ticket != 423 ruleset->rules[rs_num].active.ticket) 424 return (NULL); 425 if (r_last) 426 rule = TAILQ_LAST(ruleset->rules[rs_num].active.ptr, 427 pf_krulequeue); 428 else 429 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr); 430 } else { 431 if (check_ticket && ticket != 432 ruleset->rules[rs_num].inactive.ticket) 433 return (NULL); 434 if (r_last) 435 rule = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr, 436 pf_krulequeue); 437 else 438 rule = TAILQ_FIRST(ruleset->rules[rs_num].inactive.ptr); 439 } 440 if (!r_last) { 441 while ((rule != NULL) && (rule->nr != rule_number)) 442 rule = TAILQ_NEXT(rule, entries); 443 } 444 if (rule == NULL) 445 return (NULL); 446 447 return (&rule->rpool); 448 } 449 450 static void 451 pf_mv_kpool(struct pf_kpalist *poola, struct pf_kpalist *poolb) 452 { 453 struct pf_kpooladdr *mv_pool_pa; 454 455 while ((mv_pool_pa = TAILQ_FIRST(poola)) != NULL) { 456 TAILQ_REMOVE(poola, mv_pool_pa, entries); 457 TAILQ_INSERT_TAIL(poolb, mv_pool_pa, entries); 458 } 459 } 460 461 static void 462 pf_empty_kpool(struct pf_kpalist *poola) 463 { 464 struct pf_kpooladdr *pa; 465 466 while ((pa = TAILQ_FIRST(poola)) != NULL) { 467 switch (pa->addr.type) { 468 case PF_ADDR_DYNIFTL: 469 pfi_dynaddr_remove(pa->addr.p.dyn); 470 break; 471 case PF_ADDR_TABLE: 472 /* XXX: this could be unfinished pooladdr on pabuf */ 473 if (pa->addr.p.tbl != NULL) 474 pfr_detach_table(pa->addr.p.tbl); 475 break; 476 } 477 if (pa->kif) 478 pfi_kkif_unref(pa->kif); 479 TAILQ_REMOVE(poola, pa, entries); 480 free(pa, M_PFRULE); 481 } 482 } 483 484 static void 485 pf_unlink_rule_locked(struct pf_krulequeue *rulequeue, struct pf_krule *rule) 486 { 487 488 PF_RULES_WASSERT(); 489 PF_UNLNKDRULES_ASSERT(); 490 491 TAILQ_REMOVE(rulequeue, rule, entries); 492 493 rule->rule_ref |= PFRULE_REFS; 494 TAILQ_INSERT_TAIL(&V_pf_unlinked_rules, rule, entries); 495 } 496 497 static void 498 pf_unlink_rule(struct pf_krulequeue *rulequeue, struct pf_krule *rule) 499 { 500 501 PF_RULES_WASSERT(); 502 503 PF_UNLNKDRULES_LOCK(); 504 pf_unlink_rule_locked(rulequeue, rule); 505 PF_UNLNKDRULES_UNLOCK(); 506 } 507 508 static void 509 pf_free_eth_rule(struct pf_keth_rule *rule) 510 { 511 PF_RULES_WASSERT(); 512 513 if (rule == NULL) 514 return; 515 516 if (rule->tag) 517 tag_unref(&V_pf_tags, rule->tag); 518 if (rule->match_tag) 519 tag_unref(&V_pf_tags, rule->match_tag); 520 #ifdef ALTQ 521 pf_qid_unref(rule->qid); 522 #endif 523 524 if (rule->kif) 525 pfi_kkif_unref(rule->kif); 526 527 if (rule->ipsrc.addr.type == PF_ADDR_TABLE) 528 pfr_detach_table(rule->ipsrc.addr.p.tbl); 529 if (rule->ipdst.addr.type == PF_ADDR_TABLE) 530 pfr_detach_table(rule->ipdst.addr.p.tbl); 531 532 counter_u64_free(rule->evaluations); 533 for (int i = 0; i < 2; i++) { 534 counter_u64_free(rule->packets[i]); 535 counter_u64_free(rule->bytes[i]); 536 } 537 uma_zfree_pcpu(pcpu_zone_4, rule->timestamp); 538 pf_keth_anchor_remove(rule); 539 540 free(rule, M_PFRULE); 541 } 542 543 void 544 pf_free_rule(struct pf_krule *rule) 545 { 546 547 PF_RULES_WASSERT(); 548 PF_CONFIG_ASSERT(); 549 550 if (rule->tag) 551 tag_unref(&V_pf_tags, rule->tag); 552 if (rule->match_tag) 553 tag_unref(&V_pf_tags, rule->match_tag); 554 #ifdef ALTQ 555 if (rule->pqid != rule->qid) 556 pf_qid_unref(rule->pqid); 557 pf_qid_unref(rule->qid); 558 #endif 559 switch (rule->src.addr.type) { 560 case PF_ADDR_DYNIFTL: 561 pfi_dynaddr_remove(rule->src.addr.p.dyn); 562 break; 563 case PF_ADDR_TABLE: 564 pfr_detach_table(rule->src.addr.p.tbl); 565 break; 566 } 567 switch (rule->dst.addr.type) { 568 case PF_ADDR_DYNIFTL: 569 pfi_dynaddr_remove(rule->dst.addr.p.dyn); 570 break; 571 case PF_ADDR_TABLE: 572 pfr_detach_table(rule->dst.addr.p.tbl); 573 break; 574 } 575 if (rule->overload_tbl) 576 pfr_detach_table(rule->overload_tbl); 577 if (rule->kif) 578 pfi_kkif_unref(rule->kif); 579 pf_kanchor_remove(rule); 580 pf_empty_kpool(&rule->rpool.list); 581 582 pf_krule_free(rule); 583 } 584 585 static void 586 pf_init_tagset(struct pf_tagset *ts, unsigned int *tunable_size, 587 unsigned int default_size) 588 { 589 unsigned int i; 590 unsigned int hashsize; 591 592 if (*tunable_size == 0 || !powerof2(*tunable_size)) 593 *tunable_size = default_size; 594 595 hashsize = *tunable_size; 596 ts->namehash = mallocarray(hashsize, sizeof(*ts->namehash), M_PFHASH, 597 M_WAITOK); 598 ts->taghash = mallocarray(hashsize, sizeof(*ts->taghash), M_PFHASH, 599 M_WAITOK); 600 ts->mask = hashsize - 1; 601 ts->seed = arc4random(); 602 for (i = 0; i < hashsize; i++) { 603 TAILQ_INIT(&ts->namehash[i]); 604 TAILQ_INIT(&ts->taghash[i]); 605 } 606 BIT_FILL(TAGID_MAX, &ts->avail); 607 } 608 609 static void 610 pf_cleanup_tagset(struct pf_tagset *ts) 611 { 612 unsigned int i; 613 unsigned int hashsize; 614 struct pf_tagname *t, *tmp; 615 616 /* 617 * Only need to clean up one of the hashes as each tag is hashed 618 * into each table. 619 */ 620 hashsize = ts->mask + 1; 621 for (i = 0; i < hashsize; i++) 622 TAILQ_FOREACH_SAFE(t, &ts->namehash[i], namehash_entries, tmp) 623 uma_zfree(V_pf_tag_z, t); 624 625 free(ts->namehash, M_PFHASH); 626 free(ts->taghash, M_PFHASH); 627 } 628 629 static uint16_t 630 tagname2hashindex(const struct pf_tagset *ts, const char *tagname) 631 { 632 size_t len; 633 634 len = strnlen(tagname, PF_TAG_NAME_SIZE - 1); 635 return (murmur3_32_hash(tagname, len, ts->seed) & ts->mask); 636 } 637 638 static uint16_t 639 tag2hashindex(const struct pf_tagset *ts, uint16_t tag) 640 { 641 642 return (tag & ts->mask); 643 } 644 645 static u_int16_t 646 tagname2tag(struct pf_tagset *ts, const char *tagname) 647 { 648 struct pf_tagname *tag; 649 u_int32_t index; 650 u_int16_t new_tagid; 651 652 PF_RULES_WASSERT(); 653 654 index = tagname2hashindex(ts, tagname); 655 TAILQ_FOREACH(tag, &ts->namehash[index], namehash_entries) 656 if (strcmp(tagname, tag->name) == 0) { 657 tag->ref++; 658 return (tag->tag); 659 } 660 661 /* 662 * new entry 663 * 664 * to avoid fragmentation, we do a linear search from the beginning 665 * and take the first free slot we find. 666 */ 667 new_tagid = BIT_FFS(TAGID_MAX, &ts->avail); 668 /* 669 * Tags are 1-based, with valid tags in the range [1..TAGID_MAX]. 670 * BIT_FFS() returns a 1-based bit number, with 0 indicating no bits 671 * set. It may also return a bit number greater than TAGID_MAX due 672 * to rounding of the number of bits in the vector up to a multiple 673 * of the vector word size at declaration/allocation time. 674 */ 675 if ((new_tagid == 0) || (new_tagid > TAGID_MAX)) 676 return (0); 677 678 /* Mark the tag as in use. Bits are 0-based for BIT_CLR() */ 679 BIT_CLR(TAGID_MAX, new_tagid - 1, &ts->avail); 680 681 /* allocate and fill new struct pf_tagname */ 682 tag = uma_zalloc(V_pf_tag_z, M_NOWAIT); 683 if (tag == NULL) 684 return (0); 685 strlcpy(tag->name, tagname, sizeof(tag->name)); 686 tag->tag = new_tagid; 687 tag->ref = 1; 688 689 /* Insert into namehash */ 690 TAILQ_INSERT_TAIL(&ts->namehash[index], tag, namehash_entries); 691 692 /* Insert into taghash */ 693 index = tag2hashindex(ts, new_tagid); 694 TAILQ_INSERT_TAIL(&ts->taghash[index], tag, taghash_entries); 695 696 return (tag->tag); 697 } 698 699 static void 700 tag_unref(struct pf_tagset *ts, u_int16_t tag) 701 { 702 struct pf_tagname *t; 703 uint16_t index; 704 705 PF_RULES_WASSERT(); 706 707 index = tag2hashindex(ts, tag); 708 TAILQ_FOREACH(t, &ts->taghash[index], taghash_entries) 709 if (tag == t->tag) { 710 if (--t->ref == 0) { 711 TAILQ_REMOVE(&ts->taghash[index], t, 712 taghash_entries); 713 index = tagname2hashindex(ts, t->name); 714 TAILQ_REMOVE(&ts->namehash[index], t, 715 namehash_entries); 716 /* Bits are 0-based for BIT_SET() */ 717 BIT_SET(TAGID_MAX, tag - 1, &ts->avail); 718 uma_zfree(V_pf_tag_z, t); 719 } 720 break; 721 } 722 } 723 724 static uint16_t 725 pf_tagname2tag(const char *tagname) 726 { 727 return (tagname2tag(&V_pf_tags, tagname)); 728 } 729 730 static int 731 pf_begin_eth(uint32_t *ticket, const char *anchor) 732 { 733 struct pf_keth_rule *rule, *tmp; 734 struct pf_keth_ruleset *rs; 735 736 PF_RULES_WASSERT(); 737 738 rs = pf_find_or_create_keth_ruleset(anchor); 739 if (rs == NULL) 740 return (EINVAL); 741 742 /* Purge old inactive rules. */ 743 TAILQ_FOREACH_SAFE(rule, rs->inactive.rules, entries, 744 tmp) { 745 TAILQ_REMOVE(rs->inactive.rules, rule, 746 entries); 747 pf_free_eth_rule(rule); 748 } 749 750 *ticket = ++rs->inactive.ticket; 751 rs->inactive.open = 1; 752 753 return (0); 754 } 755 756 static void 757 pf_rollback_eth_cb(struct epoch_context *ctx) 758 { 759 struct pf_keth_ruleset *rs; 760 761 rs = __containerof(ctx, struct pf_keth_ruleset, epoch_ctx); 762 763 CURVNET_SET(rs->vnet); 764 765 PF_RULES_WLOCK(); 766 pf_rollback_eth(rs->inactive.ticket, 767 rs->anchor ? rs->anchor->path : ""); 768 PF_RULES_WUNLOCK(); 769 770 CURVNET_RESTORE(); 771 } 772 773 static int 774 pf_rollback_eth(uint32_t ticket, const char *anchor) 775 { 776 struct pf_keth_rule *rule, *tmp; 777 struct pf_keth_ruleset *rs; 778 779 PF_RULES_WASSERT(); 780 781 rs = pf_find_keth_ruleset(anchor); 782 if (rs == NULL) 783 return (EINVAL); 784 785 if (!rs->inactive.open || 786 ticket != rs->inactive.ticket) 787 return (0); 788 789 /* Purge old inactive rules. */ 790 TAILQ_FOREACH_SAFE(rule, rs->inactive.rules, entries, 791 tmp) { 792 TAILQ_REMOVE(rs->inactive.rules, rule, entries); 793 pf_free_eth_rule(rule); 794 } 795 796 rs->inactive.open = 0; 797 798 pf_remove_if_empty_keth_ruleset(rs); 799 800 return (0); 801 } 802 803 #define PF_SET_SKIP_STEPS(i) \ 804 do { \ 805 while (head[i] != cur) { \ 806 head[i]->skip[i].ptr = cur; \ 807 head[i] = TAILQ_NEXT(head[i], entries); \ 808 } \ 809 } while (0) 810 811 static void 812 pf_eth_calc_skip_steps(struct pf_keth_ruleq *rules) 813 { 814 struct pf_keth_rule *cur, *prev, *head[PFE_SKIP_COUNT]; 815 int i; 816 817 cur = TAILQ_FIRST(rules); 818 prev = cur; 819 for (i = 0; i < PFE_SKIP_COUNT; ++i) 820 head[i] = cur; 821 while (cur != NULL) { 822 if (cur->kif != prev->kif || cur->ifnot != prev->ifnot) 823 PF_SET_SKIP_STEPS(PFE_SKIP_IFP); 824 if (cur->direction != prev->direction) 825 PF_SET_SKIP_STEPS(PFE_SKIP_DIR); 826 if (cur->proto != prev->proto) 827 PF_SET_SKIP_STEPS(PFE_SKIP_PROTO); 828 if (memcmp(&cur->src, &prev->src, sizeof(cur->src)) != 0) 829 PF_SET_SKIP_STEPS(PFE_SKIP_SRC_ADDR); 830 if (memcmp(&cur->dst, &prev->dst, sizeof(cur->dst)) != 0) 831 PF_SET_SKIP_STEPS(PFE_SKIP_DST_ADDR); 832 833 prev = cur; 834 cur = TAILQ_NEXT(cur, entries); 835 } 836 for (i = 0; i < PFE_SKIP_COUNT; ++i) 837 PF_SET_SKIP_STEPS(i); 838 } 839 840 static int 841 pf_commit_eth(uint32_t ticket, const char *anchor) 842 { 843 struct pf_keth_ruleq *rules; 844 struct pf_keth_ruleset *rs; 845 846 rs = pf_find_keth_ruleset(anchor); 847 if (rs == NULL) { 848 return (EINVAL); 849 } 850 851 if (!rs->inactive.open || 852 ticket != rs->inactive.ticket) 853 return (EBUSY); 854 855 PF_RULES_WASSERT(); 856 857 pf_eth_calc_skip_steps(rs->inactive.rules); 858 859 rules = rs->active.rules; 860 ck_pr_store_ptr(&rs->active.rules, rs->inactive.rules); 861 rs->inactive.rules = rules; 862 rs->inactive.ticket = rs->active.ticket; 863 864 /* Clean up inactive rules (i.e. previously active rules), only when 865 * we're sure they're no longer used. */ 866 NET_EPOCH_CALL(pf_rollback_eth_cb, &rs->epoch_ctx); 867 868 return (0); 869 } 870 871 #ifdef ALTQ 872 static uint16_t 873 pf_qname2qid(const char *qname) 874 { 875 return (tagname2tag(&V_pf_qids, qname)); 876 } 877 878 static void 879 pf_qid_unref(uint16_t qid) 880 { 881 tag_unref(&V_pf_qids, qid); 882 } 883 884 static int 885 pf_begin_altq(u_int32_t *ticket) 886 { 887 struct pf_altq *altq, *tmp; 888 int error = 0; 889 890 PF_RULES_WASSERT(); 891 892 /* Purge the old altq lists */ 893 TAILQ_FOREACH_SAFE(altq, V_pf_altq_ifs_inactive, entries, tmp) { 894 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) { 895 /* detach and destroy the discipline */ 896 error = altq_remove(altq); 897 } 898 free(altq, M_PFALTQ); 899 } 900 TAILQ_INIT(V_pf_altq_ifs_inactive); 901 TAILQ_FOREACH_SAFE(altq, V_pf_altqs_inactive, entries, tmp) { 902 pf_qid_unref(altq->qid); 903 free(altq, M_PFALTQ); 904 } 905 TAILQ_INIT(V_pf_altqs_inactive); 906 if (error) 907 return (error); 908 *ticket = ++V_ticket_altqs_inactive; 909 V_altqs_inactive_open = 1; 910 return (0); 911 } 912 913 static int 914 pf_rollback_altq(u_int32_t ticket) 915 { 916 struct pf_altq *altq, *tmp; 917 int error = 0; 918 919 PF_RULES_WASSERT(); 920 921 if (!V_altqs_inactive_open || ticket != V_ticket_altqs_inactive) 922 return (0); 923 /* Purge the old altq lists */ 924 TAILQ_FOREACH_SAFE(altq, V_pf_altq_ifs_inactive, entries, tmp) { 925 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) { 926 /* detach and destroy the discipline */ 927 error = altq_remove(altq); 928 } 929 free(altq, M_PFALTQ); 930 } 931 TAILQ_INIT(V_pf_altq_ifs_inactive); 932 TAILQ_FOREACH_SAFE(altq, V_pf_altqs_inactive, entries, tmp) { 933 pf_qid_unref(altq->qid); 934 free(altq, M_PFALTQ); 935 } 936 TAILQ_INIT(V_pf_altqs_inactive); 937 V_altqs_inactive_open = 0; 938 return (error); 939 } 940 941 static int 942 pf_commit_altq(u_int32_t ticket) 943 { 944 struct pf_altqqueue *old_altqs, *old_altq_ifs; 945 struct pf_altq *altq, *tmp; 946 int err, error = 0; 947 948 PF_RULES_WASSERT(); 949 950 if (!V_altqs_inactive_open || ticket != V_ticket_altqs_inactive) 951 return (EBUSY); 952 953 /* swap altqs, keep the old. */ 954 old_altqs = V_pf_altqs_active; 955 old_altq_ifs = V_pf_altq_ifs_active; 956 V_pf_altqs_active = V_pf_altqs_inactive; 957 V_pf_altq_ifs_active = V_pf_altq_ifs_inactive; 958 V_pf_altqs_inactive = old_altqs; 959 V_pf_altq_ifs_inactive = old_altq_ifs; 960 V_ticket_altqs_active = V_ticket_altqs_inactive; 961 962 /* Attach new disciplines */ 963 TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) { 964 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) { 965 /* attach the discipline */ 966 error = altq_pfattach(altq); 967 if (error == 0 && V_pf_altq_running) 968 error = pf_enable_altq(altq); 969 if (error != 0) 970 return (error); 971 } 972 } 973 974 /* Purge the old altq lists */ 975 TAILQ_FOREACH_SAFE(altq, V_pf_altq_ifs_inactive, entries, tmp) { 976 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) { 977 /* detach and destroy the discipline */ 978 if (V_pf_altq_running) 979 error = pf_disable_altq(altq); 980 err = altq_pfdetach(altq); 981 if (err != 0 && error == 0) 982 error = err; 983 err = altq_remove(altq); 984 if (err != 0 && error == 0) 985 error = err; 986 } 987 free(altq, M_PFALTQ); 988 } 989 TAILQ_INIT(V_pf_altq_ifs_inactive); 990 TAILQ_FOREACH_SAFE(altq, V_pf_altqs_inactive, entries, tmp) { 991 pf_qid_unref(altq->qid); 992 free(altq, M_PFALTQ); 993 } 994 TAILQ_INIT(V_pf_altqs_inactive); 995 996 V_altqs_inactive_open = 0; 997 return (error); 998 } 999 1000 static int 1001 pf_enable_altq(struct pf_altq *altq) 1002 { 1003 struct ifnet *ifp; 1004 struct tb_profile tb; 1005 int error = 0; 1006 1007 if ((ifp = ifunit(altq->ifname)) == NULL) 1008 return (EINVAL); 1009 1010 if (ifp->if_snd.altq_type != ALTQT_NONE) 1011 error = altq_enable(&ifp->if_snd); 1012 1013 /* set tokenbucket regulator */ 1014 if (error == 0 && ifp != NULL && ALTQ_IS_ENABLED(&ifp->if_snd)) { 1015 tb.rate = altq->ifbandwidth; 1016 tb.depth = altq->tbrsize; 1017 error = tbr_set(&ifp->if_snd, &tb); 1018 } 1019 1020 return (error); 1021 } 1022 1023 static int 1024 pf_disable_altq(struct pf_altq *altq) 1025 { 1026 struct ifnet *ifp; 1027 struct tb_profile tb; 1028 int error; 1029 1030 if ((ifp = ifunit(altq->ifname)) == NULL) 1031 return (EINVAL); 1032 1033 /* 1034 * when the discipline is no longer referenced, it was overridden 1035 * by a new one. if so, just return. 1036 */ 1037 if (altq->altq_disc != ifp->if_snd.altq_disc) 1038 return (0); 1039 1040 error = altq_disable(&ifp->if_snd); 1041 1042 if (error == 0) { 1043 /* clear tokenbucket regulator */ 1044 tb.rate = 0; 1045 error = tbr_set(&ifp->if_snd, &tb); 1046 } 1047 1048 return (error); 1049 } 1050 1051 static int 1052 pf_altq_ifnet_event_add(struct ifnet *ifp, int remove, u_int32_t ticket, 1053 struct pf_altq *altq) 1054 { 1055 struct ifnet *ifp1; 1056 int error = 0; 1057 1058 /* Deactivate the interface in question */ 1059 altq->local_flags &= ~PFALTQ_FLAG_IF_REMOVED; 1060 if ((ifp1 = ifunit(altq->ifname)) == NULL || 1061 (remove && ifp1 == ifp)) { 1062 altq->local_flags |= PFALTQ_FLAG_IF_REMOVED; 1063 } else { 1064 error = altq_add(ifp1, altq); 1065 1066 if (ticket != V_ticket_altqs_inactive) 1067 error = EBUSY; 1068 1069 if (error) 1070 free(altq, M_PFALTQ); 1071 } 1072 1073 return (error); 1074 } 1075 1076 void 1077 pf_altq_ifnet_event(struct ifnet *ifp, int remove) 1078 { 1079 struct pf_altq *a1, *a2, *a3; 1080 u_int32_t ticket; 1081 int error = 0; 1082 1083 /* 1084 * No need to re-evaluate the configuration for events on interfaces 1085 * that do not support ALTQ, as it's not possible for such 1086 * interfaces to be part of the configuration. 1087 */ 1088 if (!ALTQ_IS_READY(&ifp->if_snd)) 1089 return; 1090 1091 /* Interrupt userland queue modifications */ 1092 if (V_altqs_inactive_open) 1093 pf_rollback_altq(V_ticket_altqs_inactive); 1094 1095 /* Start new altq ruleset */ 1096 if (pf_begin_altq(&ticket)) 1097 return; 1098 1099 /* Copy the current active set */ 1100 TAILQ_FOREACH(a1, V_pf_altq_ifs_active, entries) { 1101 a2 = malloc(sizeof(*a2), M_PFALTQ, M_NOWAIT); 1102 if (a2 == NULL) { 1103 error = ENOMEM; 1104 break; 1105 } 1106 bcopy(a1, a2, sizeof(struct pf_altq)); 1107 1108 error = pf_altq_ifnet_event_add(ifp, remove, ticket, a2); 1109 if (error) 1110 break; 1111 1112 TAILQ_INSERT_TAIL(V_pf_altq_ifs_inactive, a2, entries); 1113 } 1114 if (error) 1115 goto out; 1116 TAILQ_FOREACH(a1, V_pf_altqs_active, entries) { 1117 a2 = malloc(sizeof(*a2), M_PFALTQ, M_NOWAIT); 1118 if (a2 == NULL) { 1119 error = ENOMEM; 1120 break; 1121 } 1122 bcopy(a1, a2, sizeof(struct pf_altq)); 1123 1124 if ((a2->qid = pf_qname2qid(a2->qname)) == 0) { 1125 error = EBUSY; 1126 free(a2, M_PFALTQ); 1127 break; 1128 } 1129 a2->altq_disc = NULL; 1130 TAILQ_FOREACH(a3, V_pf_altq_ifs_inactive, entries) { 1131 if (strncmp(a3->ifname, a2->ifname, 1132 IFNAMSIZ) == 0) { 1133 a2->altq_disc = a3->altq_disc; 1134 break; 1135 } 1136 } 1137 error = pf_altq_ifnet_event_add(ifp, remove, ticket, a2); 1138 if (error) 1139 break; 1140 1141 TAILQ_INSERT_TAIL(V_pf_altqs_inactive, a2, entries); 1142 } 1143 1144 out: 1145 if (error != 0) 1146 pf_rollback_altq(ticket); 1147 else 1148 pf_commit_altq(ticket); 1149 } 1150 #endif /* ALTQ */ 1151 1152 static struct pf_krule_global * 1153 pf_rule_tree_alloc(int flags) 1154 { 1155 struct pf_krule_global *tree; 1156 1157 tree = malloc(sizeof(struct pf_krule_global), M_TEMP, flags); 1158 if (tree == NULL) 1159 return (NULL); 1160 RB_INIT(tree); 1161 return (tree); 1162 } 1163 1164 static void 1165 pf_rule_tree_free(struct pf_krule_global *tree) 1166 { 1167 1168 free(tree, M_TEMP); 1169 } 1170 1171 static int 1172 pf_begin_rules(u_int32_t *ticket, int rs_num, const char *anchor) 1173 { 1174 struct pf_krule_global *tree; 1175 struct pf_kruleset *rs; 1176 struct pf_krule *rule; 1177 1178 PF_RULES_WASSERT(); 1179 1180 if (rs_num < 0 || rs_num >= PF_RULESET_MAX) 1181 return (EINVAL); 1182 tree = pf_rule_tree_alloc(M_NOWAIT); 1183 if (tree == NULL) 1184 return (ENOMEM); 1185 rs = pf_find_or_create_kruleset(anchor); 1186 if (rs == NULL) { 1187 free(tree, M_TEMP); 1188 return (EINVAL); 1189 } 1190 pf_rule_tree_free(rs->rules[rs_num].inactive.tree); 1191 rs->rules[rs_num].inactive.tree = tree; 1192 1193 while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) { 1194 pf_unlink_rule(rs->rules[rs_num].inactive.ptr, rule); 1195 rs->rules[rs_num].inactive.rcount--; 1196 } 1197 *ticket = ++rs->rules[rs_num].inactive.ticket; 1198 rs->rules[rs_num].inactive.open = 1; 1199 return (0); 1200 } 1201 1202 static int 1203 pf_rollback_rules(u_int32_t ticket, int rs_num, char *anchor) 1204 { 1205 struct pf_kruleset *rs; 1206 struct pf_krule *rule; 1207 1208 PF_RULES_WASSERT(); 1209 1210 if (rs_num < 0 || rs_num >= PF_RULESET_MAX) 1211 return (EINVAL); 1212 rs = pf_find_kruleset(anchor); 1213 if (rs == NULL || !rs->rules[rs_num].inactive.open || 1214 rs->rules[rs_num].inactive.ticket != ticket) 1215 return (0); 1216 while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) { 1217 pf_unlink_rule(rs->rules[rs_num].inactive.ptr, rule); 1218 rs->rules[rs_num].inactive.rcount--; 1219 } 1220 rs->rules[rs_num].inactive.open = 0; 1221 return (0); 1222 } 1223 1224 #define PF_MD5_UPD(st, elm) \ 1225 MD5Update(ctx, (u_int8_t *) &(st)->elm, sizeof((st)->elm)) 1226 1227 #define PF_MD5_UPD_STR(st, elm) \ 1228 MD5Update(ctx, (u_int8_t *) (st)->elm, strlen((st)->elm)) 1229 1230 #define PF_MD5_UPD_HTONL(st, elm, stor) do { \ 1231 (stor) = htonl((st)->elm); \ 1232 MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int32_t));\ 1233 } while (0) 1234 1235 #define PF_MD5_UPD_HTONS(st, elm, stor) do { \ 1236 (stor) = htons((st)->elm); \ 1237 MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int16_t));\ 1238 } while (0) 1239 1240 static void 1241 pf_hash_rule_addr(MD5_CTX *ctx, struct pf_rule_addr *pfr) 1242 { 1243 PF_MD5_UPD(pfr, addr.type); 1244 switch (pfr->addr.type) { 1245 case PF_ADDR_DYNIFTL: 1246 PF_MD5_UPD(pfr, addr.v.ifname); 1247 PF_MD5_UPD(pfr, addr.iflags); 1248 break; 1249 case PF_ADDR_TABLE: 1250 PF_MD5_UPD(pfr, addr.v.tblname); 1251 break; 1252 case PF_ADDR_ADDRMASK: 1253 /* XXX ignore af? */ 1254 PF_MD5_UPD(pfr, addr.v.a.addr.addr32); 1255 PF_MD5_UPD(pfr, addr.v.a.mask.addr32); 1256 break; 1257 } 1258 1259 PF_MD5_UPD(pfr, port[0]); 1260 PF_MD5_UPD(pfr, port[1]); 1261 PF_MD5_UPD(pfr, neg); 1262 PF_MD5_UPD(pfr, port_op); 1263 } 1264 1265 static void 1266 pf_hash_rule_rolling(MD5_CTX *ctx, struct pf_krule *rule) 1267 { 1268 u_int16_t x; 1269 u_int32_t y; 1270 1271 pf_hash_rule_addr(ctx, &rule->src); 1272 pf_hash_rule_addr(ctx, &rule->dst); 1273 for (int i = 0; i < PF_RULE_MAX_LABEL_COUNT; i++) 1274 PF_MD5_UPD_STR(rule, label[i]); 1275 PF_MD5_UPD_STR(rule, ifname); 1276 PF_MD5_UPD_STR(rule, match_tagname); 1277 PF_MD5_UPD_HTONS(rule, match_tag, x); /* dup? */ 1278 PF_MD5_UPD_HTONL(rule, os_fingerprint, y); 1279 PF_MD5_UPD_HTONL(rule, prob, y); 1280 PF_MD5_UPD_HTONL(rule, uid.uid[0], y); 1281 PF_MD5_UPD_HTONL(rule, uid.uid[1], y); 1282 PF_MD5_UPD(rule, uid.op); 1283 PF_MD5_UPD_HTONL(rule, gid.gid[0], y); 1284 PF_MD5_UPD_HTONL(rule, gid.gid[1], y); 1285 PF_MD5_UPD(rule, gid.op); 1286 PF_MD5_UPD_HTONL(rule, rule_flag, y); 1287 PF_MD5_UPD(rule, action); 1288 PF_MD5_UPD(rule, direction); 1289 PF_MD5_UPD(rule, af); 1290 PF_MD5_UPD(rule, quick); 1291 PF_MD5_UPD(rule, ifnot); 1292 PF_MD5_UPD(rule, match_tag_not); 1293 PF_MD5_UPD(rule, natpass); 1294 PF_MD5_UPD(rule, keep_state); 1295 PF_MD5_UPD(rule, proto); 1296 PF_MD5_UPD(rule, type); 1297 PF_MD5_UPD(rule, code); 1298 PF_MD5_UPD(rule, flags); 1299 PF_MD5_UPD(rule, flagset); 1300 PF_MD5_UPD(rule, allow_opts); 1301 PF_MD5_UPD(rule, rt); 1302 PF_MD5_UPD(rule, tos); 1303 if (rule->anchor != NULL) 1304 PF_MD5_UPD_STR(rule, anchor->path); 1305 } 1306 1307 static void 1308 pf_hash_rule(struct pf_krule *rule) 1309 { 1310 MD5_CTX ctx; 1311 1312 MD5Init(&ctx); 1313 pf_hash_rule_rolling(&ctx, rule); 1314 MD5Final(rule->md5sum, &ctx); 1315 } 1316 1317 static int 1318 pf_krule_compare(struct pf_krule *a, struct pf_krule *b) 1319 { 1320 1321 return (memcmp(a->md5sum, b->md5sum, PF_MD5_DIGEST_LENGTH)); 1322 } 1323 1324 static int 1325 pf_commit_rules(u_int32_t ticket, int rs_num, char *anchor) 1326 { 1327 struct pf_kruleset *rs; 1328 struct pf_krule *rule, **old_array, *old_rule; 1329 struct pf_krulequeue *old_rules; 1330 struct pf_krule_global *old_tree; 1331 int error; 1332 u_int32_t old_rcount; 1333 1334 PF_RULES_WASSERT(); 1335 1336 if (rs_num < 0 || rs_num >= PF_RULESET_MAX) 1337 return (EINVAL); 1338 rs = pf_find_kruleset(anchor); 1339 if (rs == NULL || !rs->rules[rs_num].inactive.open || 1340 ticket != rs->rules[rs_num].inactive.ticket) 1341 return (EBUSY); 1342 1343 /* Calculate checksum for the main ruleset */ 1344 if (rs == &pf_main_ruleset) { 1345 error = pf_setup_pfsync_matching(rs); 1346 if (error != 0) 1347 return (error); 1348 } 1349 1350 /* Swap rules, keep the old. */ 1351 old_rules = rs->rules[rs_num].active.ptr; 1352 old_rcount = rs->rules[rs_num].active.rcount; 1353 old_array = rs->rules[rs_num].active.ptr_array; 1354 old_tree = rs->rules[rs_num].active.tree; 1355 1356 rs->rules[rs_num].active.ptr = 1357 rs->rules[rs_num].inactive.ptr; 1358 rs->rules[rs_num].active.ptr_array = 1359 rs->rules[rs_num].inactive.ptr_array; 1360 rs->rules[rs_num].active.tree = 1361 rs->rules[rs_num].inactive.tree; 1362 rs->rules[rs_num].active.rcount = 1363 rs->rules[rs_num].inactive.rcount; 1364 1365 /* Attempt to preserve counter information. */ 1366 if (V_pf_status.keep_counters && old_tree != NULL) { 1367 TAILQ_FOREACH(rule, rs->rules[rs_num].active.ptr, 1368 entries) { 1369 old_rule = RB_FIND(pf_krule_global, old_tree, rule); 1370 if (old_rule == NULL) { 1371 continue; 1372 } 1373 pf_counter_u64_critical_enter(); 1374 pf_counter_u64_add_protected(&rule->evaluations, 1375 pf_counter_u64_fetch(&old_rule->evaluations)); 1376 pf_counter_u64_add_protected(&rule->packets[0], 1377 pf_counter_u64_fetch(&old_rule->packets[0])); 1378 pf_counter_u64_add_protected(&rule->packets[1], 1379 pf_counter_u64_fetch(&old_rule->packets[1])); 1380 pf_counter_u64_add_protected(&rule->bytes[0], 1381 pf_counter_u64_fetch(&old_rule->bytes[0])); 1382 pf_counter_u64_add_protected(&rule->bytes[1], 1383 pf_counter_u64_fetch(&old_rule->bytes[1])); 1384 pf_counter_u64_critical_exit(); 1385 } 1386 } 1387 1388 rs->rules[rs_num].inactive.ptr = old_rules; 1389 rs->rules[rs_num].inactive.ptr_array = old_array; 1390 rs->rules[rs_num].inactive.tree = NULL; /* important for pf_ioctl_addrule */ 1391 rs->rules[rs_num].inactive.rcount = old_rcount; 1392 1393 rs->rules[rs_num].active.ticket = 1394 rs->rules[rs_num].inactive.ticket; 1395 pf_calc_skip_steps(rs->rules[rs_num].active.ptr); 1396 1397 /* Purge the old rule list. */ 1398 PF_UNLNKDRULES_LOCK(); 1399 while ((rule = TAILQ_FIRST(old_rules)) != NULL) 1400 pf_unlink_rule_locked(old_rules, rule); 1401 PF_UNLNKDRULES_UNLOCK(); 1402 if (rs->rules[rs_num].inactive.ptr_array) 1403 free(rs->rules[rs_num].inactive.ptr_array, M_TEMP); 1404 rs->rules[rs_num].inactive.ptr_array = NULL; 1405 rs->rules[rs_num].inactive.rcount = 0; 1406 rs->rules[rs_num].inactive.open = 0; 1407 pf_remove_if_empty_kruleset(rs); 1408 free(old_tree, M_TEMP); 1409 1410 return (0); 1411 } 1412 1413 static int 1414 pf_setup_pfsync_matching(struct pf_kruleset *rs) 1415 { 1416 MD5_CTX ctx; 1417 struct pf_krule *rule; 1418 int rs_cnt; 1419 u_int8_t digest[PF_MD5_DIGEST_LENGTH]; 1420 1421 MD5Init(&ctx); 1422 for (rs_cnt = 0; rs_cnt < PF_RULESET_MAX; rs_cnt++) { 1423 /* XXX PF_RULESET_SCRUB as well? */ 1424 if (rs_cnt == PF_RULESET_SCRUB) 1425 continue; 1426 1427 if (rs->rules[rs_cnt].inactive.ptr_array) 1428 free(rs->rules[rs_cnt].inactive.ptr_array, M_TEMP); 1429 rs->rules[rs_cnt].inactive.ptr_array = NULL; 1430 1431 if (rs->rules[rs_cnt].inactive.rcount) { 1432 rs->rules[rs_cnt].inactive.ptr_array = 1433 malloc(sizeof(caddr_t) * 1434 rs->rules[rs_cnt].inactive.rcount, 1435 M_TEMP, M_NOWAIT); 1436 1437 if (!rs->rules[rs_cnt].inactive.ptr_array) 1438 return (ENOMEM); 1439 } 1440 1441 TAILQ_FOREACH(rule, rs->rules[rs_cnt].inactive.ptr, 1442 entries) { 1443 pf_hash_rule_rolling(&ctx, rule); 1444 (rs->rules[rs_cnt].inactive.ptr_array)[rule->nr] = rule; 1445 } 1446 } 1447 1448 MD5Final(digest, &ctx); 1449 memcpy(V_pf_status.pf_chksum, digest, sizeof(V_pf_status.pf_chksum)); 1450 return (0); 1451 } 1452 1453 static int 1454 pf_eth_addr_setup(struct pf_keth_ruleset *ruleset, struct pf_addr_wrap *addr) 1455 { 1456 int error = 0; 1457 1458 switch (addr->type) { 1459 case PF_ADDR_TABLE: 1460 addr->p.tbl = pfr_eth_attach_table(ruleset, addr->v.tblname); 1461 if (addr->p.tbl == NULL) 1462 error = ENOMEM; 1463 break; 1464 default: 1465 error = EINVAL; 1466 } 1467 1468 return (error); 1469 } 1470 1471 static int 1472 pf_addr_setup(struct pf_kruleset *ruleset, struct pf_addr_wrap *addr, 1473 sa_family_t af) 1474 { 1475 int error = 0; 1476 1477 switch (addr->type) { 1478 case PF_ADDR_TABLE: 1479 addr->p.tbl = pfr_attach_table(ruleset, addr->v.tblname); 1480 if (addr->p.tbl == NULL) 1481 error = ENOMEM; 1482 break; 1483 case PF_ADDR_DYNIFTL: 1484 error = pfi_dynaddr_setup(addr, af); 1485 break; 1486 } 1487 1488 return (error); 1489 } 1490 1491 static void 1492 pf_addr_copyout(struct pf_addr_wrap *addr) 1493 { 1494 1495 switch (addr->type) { 1496 case PF_ADDR_DYNIFTL: 1497 pfi_dynaddr_copyout(addr); 1498 break; 1499 case PF_ADDR_TABLE: 1500 pf_tbladdr_copyout(addr); 1501 break; 1502 } 1503 } 1504 1505 static void 1506 pf_src_node_copy(const struct pf_ksrc_node *in, struct pf_src_node *out) 1507 { 1508 int secs = time_uptime, diff; 1509 1510 bzero(out, sizeof(struct pf_src_node)); 1511 1512 bcopy(&in->addr, &out->addr, sizeof(struct pf_addr)); 1513 bcopy(&in->raddr, &out->raddr, sizeof(struct pf_addr)); 1514 1515 if (in->rule.ptr != NULL) 1516 out->rule.nr = in->rule.ptr->nr; 1517 1518 for (int i = 0; i < 2; i++) { 1519 out->bytes[i] = counter_u64_fetch(in->bytes[i]); 1520 out->packets[i] = counter_u64_fetch(in->packets[i]); 1521 } 1522 1523 out->states = in->states; 1524 out->conn = in->conn; 1525 out->af = in->af; 1526 out->ruletype = in->ruletype; 1527 1528 out->creation = secs - in->creation; 1529 if (out->expire > secs) 1530 out->expire -= secs; 1531 else 1532 out->expire = 0; 1533 1534 /* Adjust the connection rate estimate. */ 1535 diff = secs - in->conn_rate.last; 1536 if (diff >= in->conn_rate.seconds) 1537 out->conn_rate.count = 0; 1538 else 1539 out->conn_rate.count -= 1540 in->conn_rate.count * diff / 1541 in->conn_rate.seconds; 1542 } 1543 1544 #ifdef ALTQ 1545 /* 1546 * Handle export of struct pf_kaltq to user binaries that may be using any 1547 * version of struct pf_altq. 1548 */ 1549 static int 1550 pf_export_kaltq(struct pf_altq *q, struct pfioc_altq_v1 *pa, size_t ioc_size) 1551 { 1552 u_int32_t version; 1553 1554 if (ioc_size == sizeof(struct pfioc_altq_v0)) 1555 version = 0; 1556 else 1557 version = pa->version; 1558 1559 if (version > PFIOC_ALTQ_VERSION) 1560 return (EINVAL); 1561 1562 #define ASSIGN(x) exported_q->x = q->x 1563 #define COPY(x) \ 1564 bcopy(&q->x, &exported_q->x, min(sizeof(q->x), sizeof(exported_q->x))) 1565 #define SATU16(x) (u_int32_t)uqmin((x), USHRT_MAX) 1566 #define SATU32(x) (u_int32_t)uqmin((x), UINT_MAX) 1567 1568 switch (version) { 1569 case 0: { 1570 struct pf_altq_v0 *exported_q = 1571 &((struct pfioc_altq_v0 *)pa)->altq; 1572 1573 COPY(ifname); 1574 1575 ASSIGN(scheduler); 1576 ASSIGN(tbrsize); 1577 exported_q->tbrsize = SATU16(q->tbrsize); 1578 exported_q->ifbandwidth = SATU32(q->ifbandwidth); 1579 1580 COPY(qname); 1581 COPY(parent); 1582 ASSIGN(parent_qid); 1583 exported_q->bandwidth = SATU32(q->bandwidth); 1584 ASSIGN(priority); 1585 ASSIGN(local_flags); 1586 1587 ASSIGN(qlimit); 1588 ASSIGN(flags); 1589 1590 if (q->scheduler == ALTQT_HFSC) { 1591 #define ASSIGN_OPT(x) exported_q->pq_u.hfsc_opts.x = q->pq_u.hfsc_opts.x 1592 #define ASSIGN_OPT_SATU32(x) exported_q->pq_u.hfsc_opts.x = \ 1593 SATU32(q->pq_u.hfsc_opts.x) 1594 1595 ASSIGN_OPT_SATU32(rtsc_m1); 1596 ASSIGN_OPT(rtsc_d); 1597 ASSIGN_OPT_SATU32(rtsc_m2); 1598 1599 ASSIGN_OPT_SATU32(lssc_m1); 1600 ASSIGN_OPT(lssc_d); 1601 ASSIGN_OPT_SATU32(lssc_m2); 1602 1603 ASSIGN_OPT_SATU32(ulsc_m1); 1604 ASSIGN_OPT(ulsc_d); 1605 ASSIGN_OPT_SATU32(ulsc_m2); 1606 1607 ASSIGN_OPT(flags); 1608 1609 #undef ASSIGN_OPT 1610 #undef ASSIGN_OPT_SATU32 1611 } else 1612 COPY(pq_u); 1613 1614 ASSIGN(qid); 1615 break; 1616 } 1617 case 1: { 1618 struct pf_altq_v1 *exported_q = 1619 &((struct pfioc_altq_v1 *)pa)->altq; 1620 1621 COPY(ifname); 1622 1623 ASSIGN(scheduler); 1624 ASSIGN(tbrsize); 1625 ASSIGN(ifbandwidth); 1626 1627 COPY(qname); 1628 COPY(parent); 1629 ASSIGN(parent_qid); 1630 ASSIGN(bandwidth); 1631 ASSIGN(priority); 1632 ASSIGN(local_flags); 1633 1634 ASSIGN(qlimit); 1635 ASSIGN(flags); 1636 COPY(pq_u); 1637 1638 ASSIGN(qid); 1639 break; 1640 } 1641 default: 1642 panic("%s: unhandled struct pfioc_altq version", __func__); 1643 break; 1644 } 1645 1646 #undef ASSIGN 1647 #undef COPY 1648 #undef SATU16 1649 #undef SATU32 1650 1651 return (0); 1652 } 1653 1654 /* 1655 * Handle import to struct pf_kaltq of struct pf_altq from user binaries 1656 * that may be using any version of it. 1657 */ 1658 static int 1659 pf_import_kaltq(struct pfioc_altq_v1 *pa, struct pf_altq *q, size_t ioc_size) 1660 { 1661 u_int32_t version; 1662 1663 if (ioc_size == sizeof(struct pfioc_altq_v0)) 1664 version = 0; 1665 else 1666 version = pa->version; 1667 1668 if (version > PFIOC_ALTQ_VERSION) 1669 return (EINVAL); 1670 1671 #define ASSIGN(x) q->x = imported_q->x 1672 #define COPY(x) \ 1673 bcopy(&imported_q->x, &q->x, min(sizeof(imported_q->x), sizeof(q->x))) 1674 1675 switch (version) { 1676 case 0: { 1677 struct pf_altq_v0 *imported_q = 1678 &((struct pfioc_altq_v0 *)pa)->altq; 1679 1680 COPY(ifname); 1681 1682 ASSIGN(scheduler); 1683 ASSIGN(tbrsize); /* 16-bit -> 32-bit */ 1684 ASSIGN(ifbandwidth); /* 32-bit -> 64-bit */ 1685 1686 COPY(qname); 1687 COPY(parent); 1688 ASSIGN(parent_qid); 1689 ASSIGN(bandwidth); /* 32-bit -> 64-bit */ 1690 ASSIGN(priority); 1691 ASSIGN(local_flags); 1692 1693 ASSIGN(qlimit); 1694 ASSIGN(flags); 1695 1696 if (imported_q->scheduler == ALTQT_HFSC) { 1697 #define ASSIGN_OPT(x) q->pq_u.hfsc_opts.x = imported_q->pq_u.hfsc_opts.x 1698 1699 /* 1700 * The m1 and m2 parameters are being copied from 1701 * 32-bit to 64-bit. 1702 */ 1703 ASSIGN_OPT(rtsc_m1); 1704 ASSIGN_OPT(rtsc_d); 1705 ASSIGN_OPT(rtsc_m2); 1706 1707 ASSIGN_OPT(lssc_m1); 1708 ASSIGN_OPT(lssc_d); 1709 ASSIGN_OPT(lssc_m2); 1710 1711 ASSIGN_OPT(ulsc_m1); 1712 ASSIGN_OPT(ulsc_d); 1713 ASSIGN_OPT(ulsc_m2); 1714 1715 ASSIGN_OPT(flags); 1716 1717 #undef ASSIGN_OPT 1718 } else 1719 COPY(pq_u); 1720 1721 ASSIGN(qid); 1722 break; 1723 } 1724 case 1: { 1725 struct pf_altq_v1 *imported_q = 1726 &((struct pfioc_altq_v1 *)pa)->altq; 1727 1728 COPY(ifname); 1729 1730 ASSIGN(scheduler); 1731 ASSIGN(tbrsize); 1732 ASSIGN(ifbandwidth); 1733 1734 COPY(qname); 1735 COPY(parent); 1736 ASSIGN(parent_qid); 1737 ASSIGN(bandwidth); 1738 ASSIGN(priority); 1739 ASSIGN(local_flags); 1740 1741 ASSIGN(qlimit); 1742 ASSIGN(flags); 1743 COPY(pq_u); 1744 1745 ASSIGN(qid); 1746 break; 1747 } 1748 default: 1749 panic("%s: unhandled struct pfioc_altq version", __func__); 1750 break; 1751 } 1752 1753 #undef ASSIGN 1754 #undef COPY 1755 1756 return (0); 1757 } 1758 1759 static struct pf_altq * 1760 pf_altq_get_nth_active(u_int32_t n) 1761 { 1762 struct pf_altq *altq; 1763 u_int32_t nr; 1764 1765 nr = 0; 1766 TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) { 1767 if (nr == n) 1768 return (altq); 1769 nr++; 1770 } 1771 1772 TAILQ_FOREACH(altq, V_pf_altqs_active, entries) { 1773 if (nr == n) 1774 return (altq); 1775 nr++; 1776 } 1777 1778 return (NULL); 1779 } 1780 #endif /* ALTQ */ 1781 1782 struct pf_krule * 1783 pf_krule_alloc(void) 1784 { 1785 struct pf_krule *rule; 1786 1787 rule = malloc(sizeof(struct pf_krule), M_PFRULE, M_WAITOK | M_ZERO); 1788 mtx_init(&rule->rpool.mtx, "pf_krule_pool", NULL, MTX_DEF); 1789 rule->timestamp = uma_zalloc_pcpu(pcpu_zone_4, M_WAITOK | M_ZERO); 1790 return (rule); 1791 } 1792 1793 void 1794 pf_krule_free(struct pf_krule *rule) 1795 { 1796 #ifdef PF_WANT_32_TO_64_COUNTER 1797 bool wowned; 1798 #endif 1799 1800 if (rule == NULL) 1801 return; 1802 1803 #ifdef PF_WANT_32_TO_64_COUNTER 1804 if (rule->allrulelinked) { 1805 wowned = PF_RULES_WOWNED(); 1806 if (!wowned) 1807 PF_RULES_WLOCK(); 1808 LIST_REMOVE(rule, allrulelist); 1809 V_pf_allrulecount--; 1810 if (!wowned) 1811 PF_RULES_WUNLOCK(); 1812 } 1813 #endif 1814 1815 pf_counter_u64_deinit(&rule->evaluations); 1816 for (int i = 0; i < 2; i++) { 1817 pf_counter_u64_deinit(&rule->packets[i]); 1818 pf_counter_u64_deinit(&rule->bytes[i]); 1819 } 1820 counter_u64_free(rule->states_cur); 1821 counter_u64_free(rule->states_tot); 1822 counter_u64_free(rule->src_nodes); 1823 uma_zfree_pcpu(pcpu_zone_4, rule->timestamp); 1824 1825 mtx_destroy(&rule->rpool.mtx); 1826 free(rule, M_PFRULE); 1827 } 1828 1829 static void 1830 pf_kpooladdr_to_pooladdr(const struct pf_kpooladdr *kpool, 1831 struct pf_pooladdr *pool) 1832 { 1833 1834 bzero(pool, sizeof(*pool)); 1835 bcopy(&kpool->addr, &pool->addr, sizeof(pool->addr)); 1836 strlcpy(pool->ifname, kpool->ifname, sizeof(pool->ifname)); 1837 } 1838 1839 static int 1840 pf_pooladdr_to_kpooladdr(const struct pf_pooladdr *pool, 1841 struct pf_kpooladdr *kpool) 1842 { 1843 int ret; 1844 1845 bzero(kpool, sizeof(*kpool)); 1846 bcopy(&pool->addr, &kpool->addr, sizeof(kpool->addr)); 1847 ret = pf_user_strcpy(kpool->ifname, pool->ifname, 1848 sizeof(kpool->ifname)); 1849 return (ret); 1850 } 1851 1852 static void 1853 pf_kpool_to_pool(const struct pf_kpool *kpool, struct pf_pool *pool) 1854 { 1855 bzero(pool, sizeof(*pool)); 1856 1857 bcopy(&kpool->key, &pool->key, sizeof(pool->key)); 1858 bcopy(&kpool->counter, &pool->counter, sizeof(pool->counter)); 1859 1860 pool->tblidx = kpool->tblidx; 1861 pool->proxy_port[0] = kpool->proxy_port[0]; 1862 pool->proxy_port[1] = kpool->proxy_port[1]; 1863 pool->opts = kpool->opts; 1864 } 1865 1866 static void 1867 pf_pool_to_kpool(const struct pf_pool *pool, struct pf_kpool *kpool) 1868 { 1869 _Static_assert(sizeof(pool->key) == sizeof(kpool->key), ""); 1870 _Static_assert(sizeof(pool->counter) == sizeof(kpool->counter), ""); 1871 1872 bcopy(&pool->key, &kpool->key, sizeof(kpool->key)); 1873 bcopy(&pool->counter, &kpool->counter, sizeof(kpool->counter)); 1874 1875 kpool->tblidx = pool->tblidx; 1876 kpool->proxy_port[0] = pool->proxy_port[0]; 1877 kpool->proxy_port[1] = pool->proxy_port[1]; 1878 kpool->opts = pool->opts; 1879 } 1880 1881 static void 1882 pf_krule_to_rule(const struct pf_krule *krule, struct pf_rule *rule) 1883 { 1884 1885 bzero(rule, sizeof(*rule)); 1886 1887 bcopy(&krule->src, &rule->src, sizeof(rule->src)); 1888 bcopy(&krule->dst, &rule->dst, sizeof(rule->dst)); 1889 1890 for (int i = 0; i < PF_SKIP_COUNT; ++i) { 1891 if (rule->skip[i].ptr == NULL) 1892 rule->skip[i].nr = -1; 1893 else 1894 rule->skip[i].nr = krule->skip[i].ptr->nr; 1895 } 1896 1897 strlcpy(rule->label, krule->label[0], sizeof(rule->label)); 1898 strlcpy(rule->ifname, krule->ifname, sizeof(rule->ifname)); 1899 strlcpy(rule->qname, krule->qname, sizeof(rule->qname)); 1900 strlcpy(rule->pqname, krule->pqname, sizeof(rule->pqname)); 1901 strlcpy(rule->tagname, krule->tagname, sizeof(rule->tagname)); 1902 strlcpy(rule->match_tagname, krule->match_tagname, 1903 sizeof(rule->match_tagname)); 1904 strlcpy(rule->overload_tblname, krule->overload_tblname, 1905 sizeof(rule->overload_tblname)); 1906 1907 pf_kpool_to_pool(&krule->rpool, &rule->rpool); 1908 1909 rule->evaluations = pf_counter_u64_fetch(&krule->evaluations); 1910 for (int i = 0; i < 2; i++) { 1911 rule->packets[i] = pf_counter_u64_fetch(&krule->packets[i]); 1912 rule->bytes[i] = pf_counter_u64_fetch(&krule->bytes[i]); 1913 } 1914 1915 /* kif, anchor, overload_tbl are not copied over. */ 1916 1917 rule->os_fingerprint = krule->os_fingerprint; 1918 1919 rule->rtableid = krule->rtableid; 1920 bcopy(krule->timeout, rule->timeout, sizeof(krule->timeout)); 1921 rule->max_states = krule->max_states; 1922 rule->max_src_nodes = krule->max_src_nodes; 1923 rule->max_src_states = krule->max_src_states; 1924 rule->max_src_conn = krule->max_src_conn; 1925 rule->max_src_conn_rate.limit = krule->max_src_conn_rate.limit; 1926 rule->max_src_conn_rate.seconds = krule->max_src_conn_rate.seconds; 1927 rule->qid = krule->qid; 1928 rule->pqid = krule->pqid; 1929 rule->nr = krule->nr; 1930 rule->prob = krule->prob; 1931 rule->cuid = krule->cuid; 1932 rule->cpid = krule->cpid; 1933 1934 rule->return_icmp = krule->return_icmp; 1935 rule->return_icmp6 = krule->return_icmp6; 1936 rule->max_mss = krule->max_mss; 1937 rule->tag = krule->tag; 1938 rule->match_tag = krule->match_tag; 1939 rule->scrub_flags = krule->scrub_flags; 1940 1941 bcopy(&krule->uid, &rule->uid, sizeof(krule->uid)); 1942 bcopy(&krule->gid, &rule->gid, sizeof(krule->gid)); 1943 1944 rule->rule_flag = krule->rule_flag; 1945 rule->action = krule->action; 1946 rule->direction = krule->direction; 1947 rule->log = krule->log; 1948 rule->logif = krule->logif; 1949 rule->quick = krule->quick; 1950 rule->ifnot = krule->ifnot; 1951 rule->match_tag_not = krule->match_tag_not; 1952 rule->natpass = krule->natpass; 1953 1954 rule->keep_state = krule->keep_state; 1955 rule->af = krule->af; 1956 rule->proto = krule->proto; 1957 rule->type = krule->type; 1958 rule->code = krule->code; 1959 rule->flags = krule->flags; 1960 rule->flagset = krule->flagset; 1961 rule->min_ttl = krule->min_ttl; 1962 rule->allow_opts = krule->allow_opts; 1963 rule->rt = krule->rt; 1964 rule->return_ttl = krule->return_ttl; 1965 rule->tos = krule->tos; 1966 rule->set_tos = krule->set_tos; 1967 rule->anchor_relative = krule->anchor_relative; 1968 rule->anchor_wildcard = krule->anchor_wildcard; 1969 1970 rule->flush = krule->flush; 1971 rule->prio = krule->prio; 1972 rule->set_prio[0] = krule->set_prio[0]; 1973 rule->set_prio[1] = krule->set_prio[1]; 1974 1975 bcopy(&krule->divert, &rule->divert, sizeof(krule->divert)); 1976 1977 rule->u_states_cur = counter_u64_fetch(krule->states_cur); 1978 rule->u_states_tot = counter_u64_fetch(krule->states_tot); 1979 rule->u_src_nodes = counter_u64_fetch(krule->src_nodes); 1980 } 1981 1982 static int 1983 pf_rule_to_krule(const struct pf_rule *rule, struct pf_krule *krule) 1984 { 1985 int ret; 1986 1987 #ifndef INET 1988 if (rule->af == AF_INET) { 1989 return (EAFNOSUPPORT); 1990 } 1991 #endif /* INET */ 1992 #ifndef INET6 1993 if (rule->af == AF_INET6) { 1994 return (EAFNOSUPPORT); 1995 } 1996 #endif /* INET6 */ 1997 1998 ret = pf_check_rule_addr(&rule->src); 1999 if (ret != 0) 2000 return (ret); 2001 ret = pf_check_rule_addr(&rule->dst); 2002 if (ret != 0) 2003 return (ret); 2004 2005 bcopy(&rule->src, &krule->src, sizeof(rule->src)); 2006 bcopy(&rule->dst, &krule->dst, sizeof(rule->dst)); 2007 2008 ret = pf_user_strcpy(krule->label[0], rule->label, sizeof(rule->label)); 2009 if (ret != 0) 2010 return (ret); 2011 ret = pf_user_strcpy(krule->ifname, rule->ifname, sizeof(rule->ifname)); 2012 if (ret != 0) 2013 return (ret); 2014 ret = pf_user_strcpy(krule->qname, rule->qname, sizeof(rule->qname)); 2015 if (ret != 0) 2016 return (ret); 2017 ret = pf_user_strcpy(krule->pqname, rule->pqname, sizeof(rule->pqname)); 2018 if (ret != 0) 2019 return (ret); 2020 ret = pf_user_strcpy(krule->tagname, rule->tagname, 2021 sizeof(rule->tagname)); 2022 if (ret != 0) 2023 return (ret); 2024 ret = pf_user_strcpy(krule->match_tagname, rule->match_tagname, 2025 sizeof(rule->match_tagname)); 2026 if (ret != 0) 2027 return (ret); 2028 ret = pf_user_strcpy(krule->overload_tblname, rule->overload_tblname, 2029 sizeof(rule->overload_tblname)); 2030 if (ret != 0) 2031 return (ret); 2032 2033 pf_pool_to_kpool(&rule->rpool, &krule->rpool); 2034 2035 /* Don't allow userspace to set evaulations, packets or bytes. */ 2036 /* kif, anchor, overload_tbl are not copied over. */ 2037 2038 krule->os_fingerprint = rule->os_fingerprint; 2039 2040 krule->rtableid = rule->rtableid; 2041 bcopy(rule->timeout, krule->timeout, sizeof(krule->timeout)); 2042 krule->max_states = rule->max_states; 2043 krule->max_src_nodes = rule->max_src_nodes; 2044 krule->max_src_states = rule->max_src_states; 2045 krule->max_src_conn = rule->max_src_conn; 2046 krule->max_src_conn_rate.limit = rule->max_src_conn_rate.limit; 2047 krule->max_src_conn_rate.seconds = rule->max_src_conn_rate.seconds; 2048 krule->qid = rule->qid; 2049 krule->pqid = rule->pqid; 2050 krule->nr = rule->nr; 2051 krule->prob = rule->prob; 2052 krule->cuid = rule->cuid; 2053 krule->cpid = rule->cpid; 2054 2055 krule->return_icmp = rule->return_icmp; 2056 krule->return_icmp6 = rule->return_icmp6; 2057 krule->max_mss = rule->max_mss; 2058 krule->tag = rule->tag; 2059 krule->match_tag = rule->match_tag; 2060 krule->scrub_flags = rule->scrub_flags; 2061 2062 bcopy(&rule->uid, &krule->uid, sizeof(krule->uid)); 2063 bcopy(&rule->gid, &krule->gid, sizeof(krule->gid)); 2064 2065 krule->rule_flag = rule->rule_flag; 2066 krule->action = rule->action; 2067 krule->direction = rule->direction; 2068 krule->log = rule->log; 2069 krule->logif = rule->logif; 2070 krule->quick = rule->quick; 2071 krule->ifnot = rule->ifnot; 2072 krule->match_tag_not = rule->match_tag_not; 2073 krule->natpass = rule->natpass; 2074 2075 krule->keep_state = rule->keep_state; 2076 krule->af = rule->af; 2077 krule->proto = rule->proto; 2078 krule->type = rule->type; 2079 krule->code = rule->code; 2080 krule->flags = rule->flags; 2081 krule->flagset = rule->flagset; 2082 krule->min_ttl = rule->min_ttl; 2083 krule->allow_opts = rule->allow_opts; 2084 krule->rt = rule->rt; 2085 krule->return_ttl = rule->return_ttl; 2086 krule->tos = rule->tos; 2087 krule->set_tos = rule->set_tos; 2088 2089 krule->flush = rule->flush; 2090 krule->prio = rule->prio; 2091 krule->set_prio[0] = rule->set_prio[0]; 2092 krule->set_prio[1] = rule->set_prio[1]; 2093 2094 bcopy(&rule->divert, &krule->divert, sizeof(krule->divert)); 2095 2096 return (0); 2097 } 2098 2099 static int 2100 pf_state_kill_to_kstate_kill(const struct pfioc_state_kill *psk, 2101 struct pf_kstate_kill *kill) 2102 { 2103 int ret; 2104 2105 bzero(kill, sizeof(*kill)); 2106 2107 bcopy(&psk->psk_pfcmp, &kill->psk_pfcmp, sizeof(kill->psk_pfcmp)); 2108 kill->psk_af = psk->psk_af; 2109 kill->psk_proto = psk->psk_proto; 2110 bcopy(&psk->psk_src, &kill->psk_src, sizeof(kill->psk_src)); 2111 bcopy(&psk->psk_dst, &kill->psk_dst, sizeof(kill->psk_dst)); 2112 ret = pf_user_strcpy(kill->psk_ifname, psk->psk_ifname, 2113 sizeof(kill->psk_ifname)); 2114 if (ret != 0) 2115 return (ret); 2116 ret = pf_user_strcpy(kill->psk_label, psk->psk_label, 2117 sizeof(kill->psk_label)); 2118 if (ret != 0) 2119 return (ret); 2120 2121 return (0); 2122 } 2123 2124 static int 2125 pf_ioctl_addrule(struct pf_krule *rule, uint32_t ticket, 2126 uint32_t pool_ticket, const char *anchor, const char *anchor_call, 2127 struct thread *td) 2128 { 2129 struct pf_kruleset *ruleset; 2130 struct pf_krule *tail; 2131 struct pf_kpooladdr *pa; 2132 struct pfi_kkif *kif = NULL; 2133 int rs_num; 2134 int error = 0; 2135 2136 if ((rule->return_icmp >> 8) > ICMP_MAXTYPE) { 2137 error = EINVAL; 2138 goto errout_unlocked; 2139 } 2140 2141 #define ERROUT(x) ERROUT_FUNCTION(errout, x) 2142 2143 if (rule->ifname[0]) 2144 kif = pf_kkif_create(M_WAITOK); 2145 pf_counter_u64_init(&rule->evaluations, M_WAITOK); 2146 for (int i = 0; i < 2; i++) { 2147 pf_counter_u64_init(&rule->packets[i], M_WAITOK); 2148 pf_counter_u64_init(&rule->bytes[i], M_WAITOK); 2149 } 2150 rule->states_cur = counter_u64_alloc(M_WAITOK); 2151 rule->states_tot = counter_u64_alloc(M_WAITOK); 2152 rule->src_nodes = counter_u64_alloc(M_WAITOK); 2153 rule->cuid = td->td_ucred->cr_ruid; 2154 rule->cpid = td->td_proc ? td->td_proc->p_pid : 0; 2155 TAILQ_INIT(&rule->rpool.list); 2156 2157 PF_CONFIG_LOCK(); 2158 PF_RULES_WLOCK(); 2159 #ifdef PF_WANT_32_TO_64_COUNTER 2160 LIST_INSERT_HEAD(&V_pf_allrulelist, rule, allrulelist); 2161 MPASS(!rule->allrulelinked); 2162 rule->allrulelinked = true; 2163 V_pf_allrulecount++; 2164 #endif 2165 ruleset = pf_find_kruleset(anchor); 2166 if (ruleset == NULL) 2167 ERROUT(EINVAL); 2168 rs_num = pf_get_ruleset_number(rule->action); 2169 if (rs_num >= PF_RULESET_MAX) 2170 ERROUT(EINVAL); 2171 if (ticket != ruleset->rules[rs_num].inactive.ticket) { 2172 DPFPRINTF(PF_DEBUG_MISC, 2173 ("ticket: %d != [%d]%d\n", ticket, rs_num, 2174 ruleset->rules[rs_num].inactive.ticket)); 2175 ERROUT(EBUSY); 2176 } 2177 if (pool_ticket != V_ticket_pabuf) { 2178 DPFPRINTF(PF_DEBUG_MISC, 2179 ("pool_ticket: %d != %d\n", pool_ticket, 2180 V_ticket_pabuf)); 2181 ERROUT(EBUSY); 2182 } 2183 /* 2184 * XXXMJG hack: there is no mechanism to ensure they started the 2185 * transaction. Ticket checked above may happen to match by accident, 2186 * even if nobody called DIOCXBEGIN, let alone this process. 2187 * Partially work around it by checking if the RB tree got allocated, 2188 * see pf_begin_rules. 2189 */ 2190 if (ruleset->rules[rs_num].inactive.tree == NULL) { 2191 ERROUT(EINVAL); 2192 } 2193 2194 tail = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr, 2195 pf_krulequeue); 2196 if (tail) 2197 rule->nr = tail->nr + 1; 2198 else 2199 rule->nr = 0; 2200 if (rule->ifname[0]) { 2201 rule->kif = pfi_kkif_attach(kif, rule->ifname); 2202 kif = NULL; 2203 pfi_kkif_ref(rule->kif); 2204 } else 2205 rule->kif = NULL; 2206 2207 if (rule->rtableid > 0 && rule->rtableid >= rt_numfibs) 2208 error = EBUSY; 2209 2210 #ifdef ALTQ 2211 /* set queue IDs */ 2212 if (rule->qname[0] != 0) { 2213 if ((rule->qid = pf_qname2qid(rule->qname)) == 0) 2214 error = EBUSY; 2215 else if (rule->pqname[0] != 0) { 2216 if ((rule->pqid = 2217 pf_qname2qid(rule->pqname)) == 0) 2218 error = EBUSY; 2219 } else 2220 rule->pqid = rule->qid; 2221 } 2222 #endif 2223 if (rule->tagname[0]) 2224 if ((rule->tag = pf_tagname2tag(rule->tagname)) == 0) 2225 error = EBUSY; 2226 if (rule->match_tagname[0]) 2227 if ((rule->match_tag = 2228 pf_tagname2tag(rule->match_tagname)) == 0) 2229 error = EBUSY; 2230 if (rule->rt && !rule->direction) 2231 error = EINVAL; 2232 if (!rule->log) 2233 rule->logif = 0; 2234 if (rule->logif >= PFLOGIFS_MAX) 2235 error = EINVAL; 2236 if (pf_addr_setup(ruleset, &rule->src.addr, rule->af)) 2237 error = ENOMEM; 2238 if (pf_addr_setup(ruleset, &rule->dst.addr, rule->af)) 2239 error = ENOMEM; 2240 if (pf_kanchor_setup(rule, ruleset, anchor_call)) 2241 error = EINVAL; 2242 if (rule->scrub_flags & PFSTATE_SETPRIO && 2243 (rule->set_prio[0] > PF_PRIO_MAX || 2244 rule->set_prio[1] > PF_PRIO_MAX)) 2245 error = EINVAL; 2246 TAILQ_FOREACH(pa, &V_pf_pabuf, entries) 2247 if (pa->addr.type == PF_ADDR_TABLE) { 2248 pa->addr.p.tbl = pfr_attach_table(ruleset, 2249 pa->addr.v.tblname); 2250 if (pa->addr.p.tbl == NULL) 2251 error = ENOMEM; 2252 } 2253 2254 rule->overload_tbl = NULL; 2255 if (rule->overload_tblname[0]) { 2256 if ((rule->overload_tbl = pfr_attach_table(ruleset, 2257 rule->overload_tblname)) == NULL) 2258 error = EINVAL; 2259 else 2260 rule->overload_tbl->pfrkt_flags |= 2261 PFR_TFLAG_ACTIVE; 2262 } 2263 2264 pf_mv_kpool(&V_pf_pabuf, &rule->rpool.list); 2265 if (((((rule->action == PF_NAT) || (rule->action == PF_RDR) || 2266 (rule->action == PF_BINAT)) && rule->anchor == NULL) || 2267 (rule->rt > PF_NOPFROUTE)) && 2268 (TAILQ_FIRST(&rule->rpool.list) == NULL)) 2269 error = EINVAL; 2270 2271 if (error) { 2272 pf_free_rule(rule); 2273 rule = NULL; 2274 ERROUT(error); 2275 } 2276 2277 rule->rpool.cur = TAILQ_FIRST(&rule->rpool.list); 2278 TAILQ_INSERT_TAIL(ruleset->rules[rs_num].inactive.ptr, 2279 rule, entries); 2280 ruleset->rules[rs_num].inactive.rcount++; 2281 2282 PF_RULES_WUNLOCK(); 2283 pf_hash_rule(rule); 2284 if (RB_INSERT(pf_krule_global, ruleset->rules[rs_num].inactive.tree, rule) != NULL) { 2285 PF_RULES_WLOCK(); 2286 TAILQ_REMOVE(ruleset->rules[rs_num].inactive.ptr, rule, entries); 2287 ruleset->rules[rs_num].inactive.rcount--; 2288 pf_free_rule(rule); 2289 rule = NULL; 2290 ERROUT(EEXIST); 2291 } 2292 PF_CONFIG_UNLOCK(); 2293 2294 return (0); 2295 2296 #undef ERROUT 2297 errout: 2298 PF_RULES_WUNLOCK(); 2299 PF_CONFIG_UNLOCK(); 2300 errout_unlocked: 2301 pf_kkif_free(kif); 2302 pf_krule_free(rule); 2303 return (error); 2304 } 2305 2306 static bool 2307 pf_label_match(const struct pf_krule *rule, const char *label) 2308 { 2309 int i = 0; 2310 2311 while (*rule->label[i]) { 2312 if (strcmp(rule->label[i], label) == 0) 2313 return (true); 2314 i++; 2315 } 2316 2317 return (false); 2318 } 2319 2320 static unsigned int 2321 pf_kill_matching_state(struct pf_state_key_cmp *key, int dir) 2322 { 2323 struct pf_kstate *s; 2324 int more = 0; 2325 2326 s = pf_find_state_all(key, dir, &more); 2327 if (s == NULL) 2328 return (0); 2329 2330 if (more) { 2331 PF_STATE_UNLOCK(s); 2332 return (0); 2333 } 2334 2335 pf_unlink_state(s); 2336 return (1); 2337 } 2338 2339 static int 2340 pf_killstates_row(struct pf_kstate_kill *psk, struct pf_idhash *ih) 2341 { 2342 struct pf_kstate *s; 2343 struct pf_state_key *sk; 2344 struct pf_addr *srcaddr, *dstaddr; 2345 struct pf_state_key_cmp match_key; 2346 int idx, killed = 0; 2347 unsigned int dir; 2348 u_int16_t srcport, dstport; 2349 struct pfi_kkif *kif; 2350 2351 relock_DIOCKILLSTATES: 2352 PF_HASHROW_LOCK(ih); 2353 LIST_FOREACH(s, &ih->states, entry) { 2354 /* For floating states look at the original kif. */ 2355 kif = s->kif == V_pfi_all ? s->orig_kif : s->kif; 2356 2357 sk = s->key[PF_SK_WIRE]; 2358 if (s->direction == PF_OUT) { 2359 srcaddr = &sk->addr[1]; 2360 dstaddr = &sk->addr[0]; 2361 srcport = sk->port[1]; 2362 dstport = sk->port[0]; 2363 } else { 2364 srcaddr = &sk->addr[0]; 2365 dstaddr = &sk->addr[1]; 2366 srcport = sk->port[0]; 2367 dstport = sk->port[1]; 2368 } 2369 2370 if (psk->psk_af && sk->af != psk->psk_af) 2371 continue; 2372 2373 if (psk->psk_proto && psk->psk_proto != sk->proto) 2374 continue; 2375 2376 if (! PF_MATCHA(psk->psk_src.neg, &psk->psk_src.addr.v.a.addr, 2377 &psk->psk_src.addr.v.a.mask, srcaddr, sk->af)) 2378 continue; 2379 2380 if (! PF_MATCHA(psk->psk_dst.neg, &psk->psk_dst.addr.v.a.addr, 2381 &psk->psk_dst.addr.v.a.mask, dstaddr, sk->af)) 2382 continue; 2383 2384 if (! PF_MATCHA(psk->psk_rt_addr.neg, 2385 &psk->psk_rt_addr.addr.v.a.addr, 2386 &psk->psk_rt_addr.addr.v.a.mask, 2387 &s->rt_addr, sk->af)) 2388 continue; 2389 2390 if (psk->psk_src.port_op != 0 && 2391 ! pf_match_port(psk->psk_src.port_op, 2392 psk->psk_src.port[0], psk->psk_src.port[1], srcport)) 2393 continue; 2394 2395 if (psk->psk_dst.port_op != 0 && 2396 ! pf_match_port(psk->psk_dst.port_op, 2397 psk->psk_dst.port[0], psk->psk_dst.port[1], dstport)) 2398 continue; 2399 2400 if (psk->psk_label[0] && 2401 ! pf_label_match(s->rule.ptr, psk->psk_label)) 2402 continue; 2403 2404 if (psk->psk_ifname[0] && strcmp(psk->psk_ifname, 2405 kif->pfik_name)) 2406 continue; 2407 2408 if (psk->psk_kill_match) { 2409 /* Create the key to find matching states, with lock 2410 * held. */ 2411 2412 bzero(&match_key, sizeof(match_key)); 2413 2414 if (s->direction == PF_OUT) { 2415 dir = PF_IN; 2416 idx = PF_SK_STACK; 2417 } else { 2418 dir = PF_OUT; 2419 idx = PF_SK_WIRE; 2420 } 2421 2422 match_key.af = s->key[idx]->af; 2423 match_key.proto = s->key[idx]->proto; 2424 PF_ACPY(&match_key.addr[0], 2425 &s->key[idx]->addr[1], match_key.af); 2426 match_key.port[0] = s->key[idx]->port[1]; 2427 PF_ACPY(&match_key.addr[1], 2428 &s->key[idx]->addr[0], match_key.af); 2429 match_key.port[1] = s->key[idx]->port[0]; 2430 } 2431 2432 pf_unlink_state(s); 2433 killed++; 2434 2435 if (psk->psk_kill_match) 2436 killed += pf_kill_matching_state(&match_key, dir); 2437 2438 goto relock_DIOCKILLSTATES; 2439 } 2440 PF_HASHROW_UNLOCK(ih); 2441 2442 return (killed); 2443 } 2444 2445 static int 2446 pfioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td) 2447 { 2448 int error = 0; 2449 PF_RULES_RLOCK_TRACKER; 2450 2451 #define ERROUT_IOCTL(target, x) \ 2452 do { \ 2453 error = (x); \ 2454 SDT_PROBE3(pf, ioctl, ioctl, error, cmd, error, __LINE__); \ 2455 goto target; \ 2456 } while (0) 2457 2458 2459 /* XXX keep in sync with switch() below */ 2460 if (securelevel_gt(td->td_ucred, 2)) 2461 switch (cmd) { 2462 case DIOCGETRULES: 2463 case DIOCGETRULE: 2464 case DIOCGETRULENV: 2465 case DIOCGETADDRS: 2466 case DIOCGETADDR: 2467 case DIOCGETSTATE: 2468 case DIOCGETSTATENV: 2469 case DIOCSETSTATUSIF: 2470 case DIOCGETSTATUS: 2471 case DIOCGETSTATUSNV: 2472 case DIOCCLRSTATUS: 2473 case DIOCNATLOOK: 2474 case DIOCSETDEBUG: 2475 case DIOCGETSTATES: 2476 case DIOCGETSTATESV2: 2477 case DIOCGETTIMEOUT: 2478 case DIOCCLRRULECTRS: 2479 case DIOCGETLIMIT: 2480 case DIOCGETALTQSV0: 2481 case DIOCGETALTQSV1: 2482 case DIOCGETALTQV0: 2483 case DIOCGETALTQV1: 2484 case DIOCGETQSTATSV0: 2485 case DIOCGETQSTATSV1: 2486 case DIOCGETRULESETS: 2487 case DIOCGETRULESET: 2488 case DIOCRGETTABLES: 2489 case DIOCRGETTSTATS: 2490 case DIOCRCLRTSTATS: 2491 case DIOCRCLRADDRS: 2492 case DIOCRADDADDRS: 2493 case DIOCRDELADDRS: 2494 case DIOCRSETADDRS: 2495 case DIOCRGETADDRS: 2496 case DIOCRGETASTATS: 2497 case DIOCRCLRASTATS: 2498 case DIOCRTSTADDRS: 2499 case DIOCOSFPGET: 2500 case DIOCGETSRCNODES: 2501 case DIOCCLRSRCNODES: 2502 case DIOCGETSYNCOOKIES: 2503 case DIOCIGETIFACES: 2504 case DIOCGIFSPEEDV0: 2505 case DIOCGIFSPEEDV1: 2506 case DIOCSETIFFLAG: 2507 case DIOCCLRIFFLAG: 2508 case DIOCGETETHRULES: 2509 case DIOCGETETHRULE: 2510 case DIOCGETETHRULESETS: 2511 case DIOCGETETHRULESET: 2512 break; 2513 case DIOCRCLRTABLES: 2514 case DIOCRADDTABLES: 2515 case DIOCRDELTABLES: 2516 case DIOCRSETTFLAGS: 2517 if (((struct pfioc_table *)addr)->pfrio_flags & 2518 PFR_FLAG_DUMMY) 2519 break; /* dummy operation ok */ 2520 return (EPERM); 2521 default: 2522 return (EPERM); 2523 } 2524 2525 if (!(flags & FWRITE)) 2526 switch (cmd) { 2527 case DIOCGETRULES: 2528 case DIOCGETADDRS: 2529 case DIOCGETADDR: 2530 case DIOCGETSTATE: 2531 case DIOCGETSTATENV: 2532 case DIOCGETSTATUS: 2533 case DIOCGETSTATUSNV: 2534 case DIOCGETSTATES: 2535 case DIOCGETSTATESV2: 2536 case DIOCGETTIMEOUT: 2537 case DIOCGETLIMIT: 2538 case DIOCGETALTQSV0: 2539 case DIOCGETALTQSV1: 2540 case DIOCGETALTQV0: 2541 case DIOCGETALTQV1: 2542 case DIOCGETQSTATSV0: 2543 case DIOCGETQSTATSV1: 2544 case DIOCGETRULESETS: 2545 case DIOCGETRULESET: 2546 case DIOCNATLOOK: 2547 case DIOCRGETTABLES: 2548 case DIOCRGETTSTATS: 2549 case DIOCRGETADDRS: 2550 case DIOCRGETASTATS: 2551 case DIOCRTSTADDRS: 2552 case DIOCOSFPGET: 2553 case DIOCGETSRCNODES: 2554 case DIOCGETSYNCOOKIES: 2555 case DIOCIGETIFACES: 2556 case DIOCGIFSPEEDV1: 2557 case DIOCGIFSPEEDV0: 2558 case DIOCGETRULENV: 2559 case DIOCGETETHRULES: 2560 case DIOCGETETHRULE: 2561 case DIOCGETETHRULESETS: 2562 case DIOCGETETHRULESET: 2563 break; 2564 case DIOCRCLRTABLES: 2565 case DIOCRADDTABLES: 2566 case DIOCRDELTABLES: 2567 case DIOCRCLRTSTATS: 2568 case DIOCRCLRADDRS: 2569 case DIOCRADDADDRS: 2570 case DIOCRDELADDRS: 2571 case DIOCRSETADDRS: 2572 case DIOCRSETTFLAGS: 2573 if (((struct pfioc_table *)addr)->pfrio_flags & 2574 PFR_FLAG_DUMMY) { 2575 flags |= FWRITE; /* need write lock for dummy */ 2576 break; /* dummy operation ok */ 2577 } 2578 return (EACCES); 2579 case DIOCGETRULE: 2580 if (((struct pfioc_rule *)addr)->action == 2581 PF_GET_CLR_CNTR) 2582 return (EACCES); 2583 break; 2584 default: 2585 return (EACCES); 2586 } 2587 2588 CURVNET_SET(TD_TO_VNET(td)); 2589 2590 switch (cmd) { 2591 case DIOCSTART: 2592 sx_xlock(&pf_ioctl_lock); 2593 if (V_pf_status.running) 2594 error = EEXIST; 2595 else { 2596 int cpu; 2597 2598 hook_pf(); 2599 if (! TAILQ_EMPTY(V_pf_keth->active.rules)) 2600 hook_pf_eth(); 2601 V_pf_status.running = 1; 2602 V_pf_status.since = time_second; 2603 2604 CPU_FOREACH(cpu) 2605 V_pf_stateid[cpu] = time_second; 2606 2607 DPFPRINTF(PF_DEBUG_MISC, ("pf: started\n")); 2608 } 2609 break; 2610 2611 case DIOCSTOP: 2612 sx_xlock(&pf_ioctl_lock); 2613 if (!V_pf_status.running) 2614 error = ENOENT; 2615 else { 2616 V_pf_status.running = 0; 2617 dehook_pf(); 2618 dehook_pf_eth(); 2619 V_pf_status.since = time_second; 2620 DPFPRINTF(PF_DEBUG_MISC, ("pf: stopped\n")); 2621 } 2622 break; 2623 2624 case DIOCGETETHRULES: { 2625 struct pfioc_nv *nv = (struct pfioc_nv *)addr; 2626 nvlist_t *nvl; 2627 void *packed; 2628 struct pf_keth_rule *tail; 2629 struct pf_keth_ruleset *rs; 2630 u_int32_t ticket, nr; 2631 const char *anchor = ""; 2632 2633 nvl = NULL; 2634 packed = NULL; 2635 2636 #define ERROUT(x) ERROUT_IOCTL(DIOCGETETHRULES_error, x) 2637 2638 if (nv->len > pf_ioctl_maxcount) 2639 ERROUT(ENOMEM); 2640 2641 /* Copy the request in */ 2642 packed = malloc(nv->len, M_NVLIST, M_WAITOK); 2643 if (packed == NULL) 2644 ERROUT(ENOMEM); 2645 2646 error = copyin(nv->data, packed, nv->len); 2647 if (error) 2648 ERROUT(error); 2649 2650 nvl = nvlist_unpack(packed, nv->len, 0); 2651 if (nvl == NULL) 2652 ERROUT(EBADMSG); 2653 2654 if (! nvlist_exists_string(nvl, "anchor")) 2655 ERROUT(EBADMSG); 2656 2657 anchor = nvlist_get_string(nvl, "anchor"); 2658 2659 rs = pf_find_keth_ruleset(anchor); 2660 2661 nvlist_destroy(nvl); 2662 nvl = NULL; 2663 free(packed, M_NVLIST); 2664 packed = NULL; 2665 2666 if (rs == NULL) 2667 ERROUT(ENOENT); 2668 2669 /* Reply */ 2670 nvl = nvlist_create(0); 2671 if (nvl == NULL) 2672 ERROUT(ENOMEM); 2673 2674 PF_RULES_RLOCK(); 2675 2676 ticket = rs->active.ticket; 2677 tail = TAILQ_LAST(rs->active.rules, pf_keth_ruleq); 2678 if (tail) 2679 nr = tail->nr + 1; 2680 else 2681 nr = 0; 2682 2683 PF_RULES_RUNLOCK(); 2684 2685 nvlist_add_number(nvl, "ticket", ticket); 2686 nvlist_add_number(nvl, "nr", nr); 2687 2688 packed = nvlist_pack(nvl, &nv->len); 2689 if (packed == NULL) 2690 ERROUT(ENOMEM); 2691 2692 if (nv->size == 0) 2693 ERROUT(0); 2694 else if (nv->size < nv->len) 2695 ERROUT(ENOSPC); 2696 2697 error = copyout(packed, nv->data, nv->len); 2698 2699 #undef ERROUT 2700 DIOCGETETHRULES_error: 2701 free(packed, M_NVLIST); 2702 nvlist_destroy(nvl); 2703 break; 2704 } 2705 2706 case DIOCGETETHRULE: { 2707 struct epoch_tracker et; 2708 struct pfioc_nv *nv = (struct pfioc_nv *)addr; 2709 nvlist_t *nvl = NULL; 2710 void *nvlpacked = NULL; 2711 struct pf_keth_rule *rule = NULL; 2712 struct pf_keth_ruleset *rs; 2713 u_int32_t ticket, nr; 2714 bool clear = false; 2715 const char *anchor; 2716 2717 #define ERROUT(x) ERROUT_IOCTL(DIOCGETETHRULE_error, x) 2718 2719 if (nv->len > pf_ioctl_maxcount) 2720 ERROUT(ENOMEM); 2721 2722 nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK); 2723 if (nvlpacked == NULL) 2724 ERROUT(ENOMEM); 2725 2726 error = copyin(nv->data, nvlpacked, nv->len); 2727 if (error) 2728 ERROUT(error); 2729 2730 nvl = nvlist_unpack(nvlpacked, nv->len, 0); 2731 if (nvl == NULL) 2732 ERROUT(EBADMSG); 2733 if (! nvlist_exists_number(nvl, "ticket")) 2734 ERROUT(EBADMSG); 2735 ticket = nvlist_get_number(nvl, "ticket"); 2736 if (! nvlist_exists_string(nvl, "anchor")) 2737 ERROUT(EBADMSG); 2738 anchor = nvlist_get_string(nvl, "anchor"); 2739 2740 if (nvlist_exists_bool(nvl, "clear")) 2741 clear = nvlist_get_bool(nvl, "clear"); 2742 2743 if (clear && !(flags & FWRITE)) 2744 ERROUT(EACCES); 2745 2746 if (! nvlist_exists_number(nvl, "nr")) 2747 ERROUT(EBADMSG); 2748 nr = nvlist_get_number(nvl, "nr"); 2749 2750 PF_RULES_RLOCK(); 2751 rs = pf_find_keth_ruleset(anchor); 2752 if (rs == NULL) { 2753 PF_RULES_RUNLOCK(); 2754 ERROUT(ENOENT); 2755 } 2756 if (ticket != rs->active.ticket) { 2757 PF_RULES_RUNLOCK(); 2758 ERROUT(EBUSY); 2759 } 2760 2761 nvlist_destroy(nvl); 2762 nvl = NULL; 2763 free(nvlpacked, M_NVLIST); 2764 nvlpacked = NULL; 2765 2766 rule = TAILQ_FIRST(rs->active.rules); 2767 while ((rule != NULL) && (rule->nr != nr)) 2768 rule = TAILQ_NEXT(rule, entries); 2769 if (rule == NULL) { 2770 PF_RULES_RUNLOCK(); 2771 ERROUT(ENOENT); 2772 } 2773 /* Make sure rule can't go away. */ 2774 NET_EPOCH_ENTER(et); 2775 PF_RULES_RUNLOCK(); 2776 nvl = pf_keth_rule_to_nveth_rule(rule); 2777 if (pf_keth_anchor_nvcopyout(rs, rule, nvl)) 2778 ERROUT(EBUSY); 2779 NET_EPOCH_EXIT(et); 2780 if (nvl == NULL) 2781 ERROUT(ENOMEM); 2782 2783 nvlpacked = nvlist_pack(nvl, &nv->len); 2784 if (nvlpacked == NULL) 2785 ERROUT(ENOMEM); 2786 2787 if (nv->size == 0) 2788 ERROUT(0); 2789 else if (nv->size < nv->len) 2790 ERROUT(ENOSPC); 2791 2792 error = copyout(nvlpacked, nv->data, nv->len); 2793 if (error == 0 && clear) { 2794 counter_u64_zero(rule->evaluations); 2795 for (int i = 0; i < 2; i++) { 2796 counter_u64_zero(rule->packets[i]); 2797 counter_u64_zero(rule->bytes[i]); 2798 } 2799 } 2800 2801 #undef ERROUT 2802 DIOCGETETHRULE_error: 2803 free(nvlpacked, M_NVLIST); 2804 nvlist_destroy(nvl); 2805 break; 2806 } 2807 2808 case DIOCADDETHRULE: { 2809 struct pfioc_nv *nv = (struct pfioc_nv *)addr; 2810 nvlist_t *nvl = NULL; 2811 void *nvlpacked = NULL; 2812 struct pf_keth_rule *rule = NULL, *tail = NULL; 2813 struct pf_keth_ruleset *ruleset = NULL; 2814 struct pfi_kkif *kif = NULL; 2815 const char *anchor = "", *anchor_call = ""; 2816 2817 #define ERROUT(x) ERROUT_IOCTL(DIOCADDETHRULE_error, x) 2818 2819 if (nv->len > pf_ioctl_maxcount) 2820 ERROUT(ENOMEM); 2821 2822 nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK); 2823 if (nvlpacked == NULL) 2824 ERROUT(ENOMEM); 2825 2826 error = copyin(nv->data, nvlpacked, nv->len); 2827 if (error) 2828 ERROUT(error); 2829 2830 nvl = nvlist_unpack(nvlpacked, nv->len, 0); 2831 if (nvl == NULL) 2832 ERROUT(EBADMSG); 2833 2834 if (! nvlist_exists_number(nvl, "ticket")) 2835 ERROUT(EBADMSG); 2836 2837 if (nvlist_exists_string(nvl, "anchor")) 2838 anchor = nvlist_get_string(nvl, "anchor"); 2839 if (nvlist_exists_string(nvl, "anchor_call")) 2840 anchor_call = nvlist_get_string(nvl, "anchor_call"); 2841 2842 ruleset = pf_find_keth_ruleset(anchor); 2843 if (ruleset == NULL) 2844 ERROUT(EINVAL); 2845 2846 if (nvlist_get_number(nvl, "ticket") != 2847 ruleset->inactive.ticket) { 2848 DPFPRINTF(PF_DEBUG_MISC, 2849 ("ticket: %d != %d\n", 2850 (u_int32_t)nvlist_get_number(nvl, "ticket"), 2851 ruleset->inactive.ticket)); 2852 ERROUT(EBUSY); 2853 } 2854 2855 rule = malloc(sizeof(*rule), M_PFRULE, M_WAITOK); 2856 if (rule == NULL) 2857 ERROUT(ENOMEM); 2858 rule->timestamp = NULL; 2859 2860 error = pf_nveth_rule_to_keth_rule(nvl, rule); 2861 if (error != 0) 2862 ERROUT(error); 2863 2864 if (rule->ifname[0]) 2865 kif = pf_kkif_create(M_WAITOK); 2866 rule->evaluations = counter_u64_alloc(M_WAITOK); 2867 for (int i = 0; i < 2; i++) { 2868 rule->packets[i] = counter_u64_alloc(M_WAITOK); 2869 rule->bytes[i] = counter_u64_alloc(M_WAITOK); 2870 } 2871 rule->timestamp = uma_zalloc_pcpu(pcpu_zone_4, 2872 M_WAITOK | M_ZERO); 2873 2874 PF_RULES_WLOCK(); 2875 2876 if (rule->ifname[0]) { 2877 rule->kif = pfi_kkif_attach(kif, rule->ifname); 2878 pfi_kkif_ref(rule->kif); 2879 } else 2880 rule->kif = NULL; 2881 2882 #ifdef ALTQ 2883 /* set queue IDs */ 2884 if (rule->qname[0] != 0) { 2885 if ((rule->qid = pf_qname2qid(rule->qname)) == 0) 2886 error = EBUSY; 2887 else 2888 rule->qid = rule->qid; 2889 } 2890 #endif 2891 if (rule->tagname[0]) 2892 if ((rule->tag = pf_tagname2tag(rule->tagname)) == 0) 2893 error = EBUSY; 2894 if (rule->match_tagname[0]) 2895 if ((rule->match_tag = pf_tagname2tag( 2896 rule->match_tagname)) == 0) 2897 error = EBUSY; 2898 2899 if (error == 0 && rule->ipdst.addr.type == PF_ADDR_TABLE) 2900 error = pf_eth_addr_setup(ruleset, &rule->ipdst.addr); 2901 if (error == 0 && rule->ipsrc.addr.type == PF_ADDR_TABLE) 2902 error = pf_eth_addr_setup(ruleset, &rule->ipsrc.addr); 2903 2904 if (error) { 2905 pf_free_eth_rule(rule); 2906 PF_RULES_WUNLOCK(); 2907 ERROUT(error); 2908 } 2909 2910 if (pf_keth_anchor_setup(rule, ruleset, anchor_call)) { 2911 pf_free_eth_rule(rule); 2912 PF_RULES_WUNLOCK(); 2913 ERROUT(EINVAL); 2914 } 2915 2916 tail = TAILQ_LAST(ruleset->inactive.rules, pf_keth_ruleq); 2917 if (tail) 2918 rule->nr = tail->nr + 1; 2919 else 2920 rule->nr = 0; 2921 2922 TAILQ_INSERT_TAIL(ruleset->inactive.rules, rule, entries); 2923 2924 PF_RULES_WUNLOCK(); 2925 2926 #undef ERROUT 2927 DIOCADDETHRULE_error: 2928 nvlist_destroy(nvl); 2929 free(nvlpacked, M_NVLIST); 2930 break; 2931 } 2932 2933 case DIOCGETETHRULESETS: { 2934 struct epoch_tracker et; 2935 struct pfioc_nv *nv = (struct pfioc_nv *)addr; 2936 nvlist_t *nvl = NULL; 2937 void *nvlpacked = NULL; 2938 struct pf_keth_ruleset *ruleset; 2939 struct pf_keth_anchor *anchor; 2940 int nr = 0; 2941 2942 #define ERROUT(x) ERROUT_IOCTL(DIOCGETETHRULESETS_error, x) 2943 2944 if (nv->len > pf_ioctl_maxcount) 2945 ERROUT(ENOMEM); 2946 2947 nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK); 2948 if (nvlpacked == NULL) 2949 ERROUT(ENOMEM); 2950 2951 error = copyin(nv->data, nvlpacked, nv->len); 2952 if (error) 2953 ERROUT(error); 2954 2955 nvl = nvlist_unpack(nvlpacked, nv->len, 0); 2956 if (nvl == NULL) 2957 ERROUT(EBADMSG); 2958 if (! nvlist_exists_string(nvl, "path")) 2959 ERROUT(EBADMSG); 2960 2961 NET_EPOCH_ENTER(et); 2962 2963 if ((ruleset = pf_find_keth_ruleset( 2964 nvlist_get_string(nvl, "path"))) == NULL) { 2965 NET_EPOCH_EXIT(et); 2966 ERROUT(ENOENT); 2967 } 2968 2969 if (ruleset->anchor == NULL) { 2970 RB_FOREACH(anchor, pf_keth_anchor_global, &V_pf_keth_anchors) 2971 if (anchor->parent == NULL) 2972 nr++; 2973 } else { 2974 RB_FOREACH(anchor, pf_keth_anchor_node, 2975 &ruleset->anchor->children) 2976 nr++; 2977 } 2978 2979 NET_EPOCH_EXIT(et); 2980 2981 nvlist_destroy(nvl); 2982 nvl = NULL; 2983 free(nvlpacked, M_NVLIST); 2984 nvlpacked = NULL; 2985 2986 nvl = nvlist_create(0); 2987 if (nvl == NULL) 2988 ERROUT(ENOMEM); 2989 2990 nvlist_add_number(nvl, "nr", nr); 2991 2992 nvlpacked = nvlist_pack(nvl, &nv->len); 2993 if (nvlpacked == NULL) 2994 ERROUT(ENOMEM); 2995 2996 if (nv->size == 0) 2997 ERROUT(0); 2998 else if (nv->size < nv->len) 2999 ERROUT(ENOSPC); 3000 3001 error = copyout(nvlpacked, nv->data, nv->len); 3002 3003 #undef ERROUT 3004 DIOCGETETHRULESETS_error: 3005 free(nvlpacked, M_NVLIST); 3006 nvlist_destroy(nvl); 3007 break; 3008 } 3009 3010 case DIOCGETETHRULESET: { 3011 struct epoch_tracker et; 3012 struct pfioc_nv *nv = (struct pfioc_nv *)addr; 3013 nvlist_t *nvl = NULL; 3014 void *nvlpacked = NULL; 3015 struct pf_keth_ruleset *ruleset; 3016 struct pf_keth_anchor *anchor; 3017 int nr = 0, req_nr = 0; 3018 bool found = false; 3019 3020 #define ERROUT(x) ERROUT_IOCTL(DIOCGETETHRULESET_error, x) 3021 3022 if (nv->len > pf_ioctl_maxcount) 3023 ERROUT(ENOMEM); 3024 3025 nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK); 3026 if (nvlpacked == NULL) 3027 ERROUT(ENOMEM); 3028 3029 error = copyin(nv->data, nvlpacked, nv->len); 3030 if (error) 3031 ERROUT(error); 3032 3033 nvl = nvlist_unpack(nvlpacked, nv->len, 0); 3034 if (nvl == NULL) 3035 ERROUT(EBADMSG); 3036 if (! nvlist_exists_string(nvl, "path")) 3037 ERROUT(EBADMSG); 3038 if (! nvlist_exists_number(nvl, "nr")) 3039 ERROUT(EBADMSG); 3040 3041 req_nr = nvlist_get_number(nvl, "nr"); 3042 3043 NET_EPOCH_ENTER(et); 3044 3045 if ((ruleset = pf_find_keth_ruleset( 3046 nvlist_get_string(nvl, "path"))) == NULL) { 3047 NET_EPOCH_EXIT(et); 3048 ERROUT(ENOENT); 3049 } 3050 3051 nvlist_destroy(nvl); 3052 nvl = NULL; 3053 free(nvlpacked, M_NVLIST); 3054 nvlpacked = NULL; 3055 3056 nvl = nvlist_create(0); 3057 if (nvl == NULL) { 3058 NET_EPOCH_EXIT(et); 3059 ERROUT(ENOMEM); 3060 } 3061 3062 if (ruleset->anchor == NULL) { 3063 RB_FOREACH(anchor, pf_keth_anchor_global, 3064 &V_pf_keth_anchors) { 3065 if (anchor->parent == NULL && nr++ == req_nr) { 3066 found = true; 3067 break; 3068 } 3069 } 3070 } else { 3071 RB_FOREACH(anchor, pf_keth_anchor_node, 3072 &ruleset->anchor->children) { 3073 if (nr++ == req_nr) { 3074 found = true; 3075 break; 3076 } 3077 } 3078 } 3079 3080 NET_EPOCH_EXIT(et); 3081 if (found) { 3082 nvlist_add_number(nvl, "nr", nr); 3083 nvlist_add_string(nvl, "name", anchor->name); 3084 if (ruleset->anchor) 3085 nvlist_add_string(nvl, "path", 3086 ruleset->anchor->path); 3087 else 3088 nvlist_add_string(nvl, "path", ""); 3089 } else { 3090 ERROUT(EBUSY); 3091 } 3092 3093 nvlpacked = nvlist_pack(nvl, &nv->len); 3094 if (nvlpacked == NULL) 3095 ERROUT(ENOMEM); 3096 3097 if (nv->size == 0) 3098 ERROUT(0); 3099 else if (nv->size < nv->len) 3100 ERROUT(ENOSPC); 3101 3102 error = copyout(nvlpacked, nv->data, nv->len); 3103 3104 #undef ERROUT 3105 DIOCGETETHRULESET_error: 3106 free(nvlpacked, M_NVLIST); 3107 nvlist_destroy(nvl); 3108 break; 3109 } 3110 3111 case DIOCADDRULENV: { 3112 struct pfioc_nv *nv = (struct pfioc_nv *)addr; 3113 nvlist_t *nvl = NULL; 3114 void *nvlpacked = NULL; 3115 struct pf_krule *rule = NULL; 3116 const char *anchor = "", *anchor_call = ""; 3117 uint32_t ticket = 0, pool_ticket = 0; 3118 3119 #define ERROUT(x) ERROUT_IOCTL(DIOCADDRULENV_error, x) 3120 3121 if (nv->len > pf_ioctl_maxcount) 3122 ERROUT(ENOMEM); 3123 3124 nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK); 3125 error = copyin(nv->data, nvlpacked, nv->len); 3126 if (error) 3127 ERROUT(error); 3128 3129 nvl = nvlist_unpack(nvlpacked, nv->len, 0); 3130 if (nvl == NULL) 3131 ERROUT(EBADMSG); 3132 3133 if (! nvlist_exists_number(nvl, "ticket")) 3134 ERROUT(EINVAL); 3135 ticket = nvlist_get_number(nvl, "ticket"); 3136 3137 if (! nvlist_exists_number(nvl, "pool_ticket")) 3138 ERROUT(EINVAL); 3139 pool_ticket = nvlist_get_number(nvl, "pool_ticket"); 3140 3141 if (! nvlist_exists_nvlist(nvl, "rule")) 3142 ERROUT(EINVAL); 3143 3144 rule = pf_krule_alloc(); 3145 error = pf_nvrule_to_krule(nvlist_get_nvlist(nvl, "rule"), 3146 rule); 3147 if (error) 3148 ERROUT(error); 3149 3150 if (nvlist_exists_string(nvl, "anchor")) 3151 anchor = nvlist_get_string(nvl, "anchor"); 3152 if (nvlist_exists_string(nvl, "anchor_call")) 3153 anchor_call = nvlist_get_string(nvl, "anchor_call"); 3154 3155 if ((error = nvlist_error(nvl))) 3156 ERROUT(error); 3157 3158 /* Frees rule on error */ 3159 error = pf_ioctl_addrule(rule, ticket, pool_ticket, anchor, 3160 anchor_call, td); 3161 3162 nvlist_destroy(nvl); 3163 free(nvlpacked, M_NVLIST); 3164 break; 3165 #undef ERROUT 3166 DIOCADDRULENV_error: 3167 pf_krule_free(rule); 3168 nvlist_destroy(nvl); 3169 free(nvlpacked, M_NVLIST); 3170 3171 break; 3172 } 3173 case DIOCADDRULE: { 3174 struct pfioc_rule *pr = (struct pfioc_rule *)addr; 3175 struct pf_krule *rule; 3176 3177 rule = pf_krule_alloc(); 3178 error = pf_rule_to_krule(&pr->rule, rule); 3179 if (error != 0) { 3180 pf_krule_free(rule); 3181 break; 3182 } 3183 3184 pr->anchor[sizeof(pr->anchor) - 1] = 0; 3185 3186 /* Frees rule on error */ 3187 error = pf_ioctl_addrule(rule, pr->ticket, pr->pool_ticket, 3188 pr->anchor, pr->anchor_call, td); 3189 break; 3190 } 3191 3192 case DIOCGETRULES: { 3193 struct pfioc_rule *pr = (struct pfioc_rule *)addr; 3194 struct pf_kruleset *ruleset; 3195 struct pf_krule *tail; 3196 int rs_num; 3197 3198 pr->anchor[sizeof(pr->anchor) - 1] = 0; 3199 3200 PF_RULES_WLOCK(); 3201 ruleset = pf_find_kruleset(pr->anchor); 3202 if (ruleset == NULL) { 3203 PF_RULES_WUNLOCK(); 3204 error = EINVAL; 3205 break; 3206 } 3207 rs_num = pf_get_ruleset_number(pr->rule.action); 3208 if (rs_num >= PF_RULESET_MAX) { 3209 PF_RULES_WUNLOCK(); 3210 error = EINVAL; 3211 break; 3212 } 3213 tail = TAILQ_LAST(ruleset->rules[rs_num].active.ptr, 3214 pf_krulequeue); 3215 if (tail) 3216 pr->nr = tail->nr + 1; 3217 else 3218 pr->nr = 0; 3219 pr->ticket = ruleset->rules[rs_num].active.ticket; 3220 PF_RULES_WUNLOCK(); 3221 break; 3222 } 3223 3224 case DIOCGETRULE: { 3225 struct pfioc_rule *pr = (struct pfioc_rule *)addr; 3226 struct pf_kruleset *ruleset; 3227 struct pf_krule *rule; 3228 int rs_num; 3229 3230 pr->anchor[sizeof(pr->anchor) - 1] = 0; 3231 3232 PF_RULES_WLOCK(); 3233 ruleset = pf_find_kruleset(pr->anchor); 3234 if (ruleset == NULL) { 3235 PF_RULES_WUNLOCK(); 3236 error = EINVAL; 3237 break; 3238 } 3239 rs_num = pf_get_ruleset_number(pr->rule.action); 3240 if (rs_num >= PF_RULESET_MAX) { 3241 PF_RULES_WUNLOCK(); 3242 error = EINVAL; 3243 break; 3244 } 3245 if (pr->ticket != ruleset->rules[rs_num].active.ticket) { 3246 PF_RULES_WUNLOCK(); 3247 error = EBUSY; 3248 break; 3249 } 3250 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr); 3251 while ((rule != NULL) && (rule->nr != pr->nr)) 3252 rule = TAILQ_NEXT(rule, entries); 3253 if (rule == NULL) { 3254 PF_RULES_WUNLOCK(); 3255 error = EBUSY; 3256 break; 3257 } 3258 3259 pf_krule_to_rule(rule, &pr->rule); 3260 3261 if (pf_kanchor_copyout(ruleset, rule, pr)) { 3262 PF_RULES_WUNLOCK(); 3263 error = EBUSY; 3264 break; 3265 } 3266 pf_addr_copyout(&pr->rule.src.addr); 3267 pf_addr_copyout(&pr->rule.dst.addr); 3268 3269 if (pr->action == PF_GET_CLR_CNTR) { 3270 pf_counter_u64_zero(&rule->evaluations); 3271 for (int i = 0; i < 2; i++) { 3272 pf_counter_u64_zero(&rule->packets[i]); 3273 pf_counter_u64_zero(&rule->bytes[i]); 3274 } 3275 counter_u64_zero(rule->states_tot); 3276 } 3277 PF_RULES_WUNLOCK(); 3278 break; 3279 } 3280 3281 case DIOCGETRULENV: { 3282 struct pfioc_nv *nv = (struct pfioc_nv *)addr; 3283 nvlist_t *nvrule = NULL; 3284 nvlist_t *nvl = NULL; 3285 struct pf_kruleset *ruleset; 3286 struct pf_krule *rule; 3287 void *nvlpacked = NULL; 3288 int rs_num, nr; 3289 bool clear_counter = false; 3290 3291 #define ERROUT(x) ERROUT_IOCTL(DIOCGETRULENV_error, x) 3292 3293 if (nv->len > pf_ioctl_maxcount) 3294 ERROUT(ENOMEM); 3295 3296 /* Copy the request in */ 3297 nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK); 3298 if (nvlpacked == NULL) 3299 ERROUT(ENOMEM); 3300 3301 error = copyin(nv->data, nvlpacked, nv->len); 3302 if (error) 3303 ERROUT(error); 3304 3305 nvl = nvlist_unpack(nvlpacked, nv->len, 0); 3306 if (nvl == NULL) 3307 ERROUT(EBADMSG); 3308 3309 if (! nvlist_exists_string(nvl, "anchor")) 3310 ERROUT(EBADMSG); 3311 if (! nvlist_exists_number(nvl, "ruleset")) 3312 ERROUT(EBADMSG); 3313 if (! nvlist_exists_number(nvl, "ticket")) 3314 ERROUT(EBADMSG); 3315 if (! nvlist_exists_number(nvl, "nr")) 3316 ERROUT(EBADMSG); 3317 3318 if (nvlist_exists_bool(nvl, "clear_counter")) 3319 clear_counter = nvlist_get_bool(nvl, "clear_counter"); 3320 3321 if (clear_counter && !(flags & FWRITE)) 3322 ERROUT(EACCES); 3323 3324 nr = nvlist_get_number(nvl, "nr"); 3325 3326 PF_RULES_WLOCK(); 3327 ruleset = pf_find_kruleset(nvlist_get_string(nvl, "anchor")); 3328 if (ruleset == NULL) { 3329 PF_RULES_WUNLOCK(); 3330 ERROUT(ENOENT); 3331 } 3332 3333 rs_num = pf_get_ruleset_number(nvlist_get_number(nvl, "ruleset")); 3334 if (rs_num >= PF_RULESET_MAX) { 3335 PF_RULES_WUNLOCK(); 3336 ERROUT(EINVAL); 3337 } 3338 3339 if (nvlist_get_number(nvl, "ticket") != 3340 ruleset->rules[rs_num].active.ticket) { 3341 PF_RULES_WUNLOCK(); 3342 ERROUT(EBUSY); 3343 } 3344 3345 if ((error = nvlist_error(nvl))) { 3346 PF_RULES_WUNLOCK(); 3347 ERROUT(error); 3348 } 3349 3350 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr); 3351 while ((rule != NULL) && (rule->nr != nr)) 3352 rule = TAILQ_NEXT(rule, entries); 3353 if (rule == NULL) { 3354 PF_RULES_WUNLOCK(); 3355 ERROUT(EBUSY); 3356 } 3357 3358 nvrule = pf_krule_to_nvrule(rule); 3359 3360 nvlist_destroy(nvl); 3361 nvl = nvlist_create(0); 3362 if (nvl == NULL) { 3363 PF_RULES_WUNLOCK(); 3364 ERROUT(ENOMEM); 3365 } 3366 nvlist_add_number(nvl, "nr", nr); 3367 nvlist_add_nvlist(nvl, "rule", nvrule); 3368 nvlist_destroy(nvrule); 3369 nvrule = NULL; 3370 if (pf_kanchor_nvcopyout(ruleset, rule, nvl)) { 3371 PF_RULES_WUNLOCK(); 3372 ERROUT(EBUSY); 3373 } 3374 3375 free(nvlpacked, M_NVLIST); 3376 nvlpacked = nvlist_pack(nvl, &nv->len); 3377 if (nvlpacked == NULL) { 3378 PF_RULES_WUNLOCK(); 3379 ERROUT(ENOMEM); 3380 } 3381 3382 if (nv->size == 0) { 3383 PF_RULES_WUNLOCK(); 3384 ERROUT(0); 3385 } 3386 else if (nv->size < nv->len) { 3387 PF_RULES_WUNLOCK(); 3388 ERROUT(ENOSPC); 3389 } 3390 3391 if (clear_counter) { 3392 pf_counter_u64_zero(&rule->evaluations); 3393 for (int i = 0; i < 2; i++) { 3394 pf_counter_u64_zero(&rule->packets[i]); 3395 pf_counter_u64_zero(&rule->bytes[i]); 3396 } 3397 counter_u64_zero(rule->states_tot); 3398 } 3399 PF_RULES_WUNLOCK(); 3400 3401 error = copyout(nvlpacked, nv->data, nv->len); 3402 3403 #undef ERROUT 3404 DIOCGETRULENV_error: 3405 free(nvlpacked, M_NVLIST); 3406 nvlist_destroy(nvrule); 3407 nvlist_destroy(nvl); 3408 3409 break; 3410 } 3411 3412 case DIOCCHANGERULE: { 3413 struct pfioc_rule *pcr = (struct pfioc_rule *)addr; 3414 struct pf_kruleset *ruleset; 3415 struct pf_krule *oldrule = NULL, *newrule = NULL; 3416 struct pfi_kkif *kif = NULL; 3417 struct pf_kpooladdr *pa; 3418 u_int32_t nr = 0; 3419 int rs_num; 3420 3421 pcr->anchor[sizeof(pcr->anchor) - 1] = 0; 3422 3423 if (pcr->action < PF_CHANGE_ADD_HEAD || 3424 pcr->action > PF_CHANGE_GET_TICKET) { 3425 error = EINVAL; 3426 break; 3427 } 3428 if (pcr->rule.return_icmp >> 8 > ICMP_MAXTYPE) { 3429 error = EINVAL; 3430 break; 3431 } 3432 3433 if (pcr->action != PF_CHANGE_REMOVE) { 3434 newrule = pf_krule_alloc(); 3435 error = pf_rule_to_krule(&pcr->rule, newrule); 3436 if (error != 0) { 3437 pf_krule_free(newrule); 3438 break; 3439 } 3440 3441 if (newrule->ifname[0]) 3442 kif = pf_kkif_create(M_WAITOK); 3443 pf_counter_u64_init(&newrule->evaluations, M_WAITOK); 3444 for (int i = 0; i < 2; i++) { 3445 pf_counter_u64_init(&newrule->packets[i], M_WAITOK); 3446 pf_counter_u64_init(&newrule->bytes[i], M_WAITOK); 3447 } 3448 newrule->states_cur = counter_u64_alloc(M_WAITOK); 3449 newrule->states_tot = counter_u64_alloc(M_WAITOK); 3450 newrule->src_nodes = counter_u64_alloc(M_WAITOK); 3451 newrule->cuid = td->td_ucred->cr_ruid; 3452 newrule->cpid = td->td_proc ? td->td_proc->p_pid : 0; 3453 TAILQ_INIT(&newrule->rpool.list); 3454 } 3455 #define ERROUT(x) ERROUT_IOCTL(DIOCCHANGERULE_error, x) 3456 3457 PF_CONFIG_LOCK(); 3458 PF_RULES_WLOCK(); 3459 #ifdef PF_WANT_32_TO_64_COUNTER 3460 if (newrule != NULL) { 3461 LIST_INSERT_HEAD(&V_pf_allrulelist, newrule, allrulelist); 3462 newrule->allrulelinked = true; 3463 V_pf_allrulecount++; 3464 } 3465 #endif 3466 3467 if (!(pcr->action == PF_CHANGE_REMOVE || 3468 pcr->action == PF_CHANGE_GET_TICKET) && 3469 pcr->pool_ticket != V_ticket_pabuf) 3470 ERROUT(EBUSY); 3471 3472 ruleset = pf_find_kruleset(pcr->anchor); 3473 if (ruleset == NULL) 3474 ERROUT(EINVAL); 3475 3476 rs_num = pf_get_ruleset_number(pcr->rule.action); 3477 if (rs_num >= PF_RULESET_MAX) 3478 ERROUT(EINVAL); 3479 3480 /* 3481 * XXXMJG: there is no guarantee that the ruleset was 3482 * created by the usual route of calling DIOCXBEGIN. 3483 * As a result it is possible the rule tree will not 3484 * be allocated yet. Hack around it by doing it here. 3485 * Note it is fine to let the tree persist in case of 3486 * error as it will be freed down the road on future 3487 * updates (if need be). 3488 */ 3489 if (ruleset->rules[rs_num].active.tree == NULL) { 3490 ruleset->rules[rs_num].active.tree = pf_rule_tree_alloc(M_NOWAIT); 3491 if (ruleset->rules[rs_num].active.tree == NULL) { 3492 ERROUT(ENOMEM); 3493 } 3494 } 3495 3496 if (pcr->action == PF_CHANGE_GET_TICKET) { 3497 pcr->ticket = ++ruleset->rules[rs_num].active.ticket; 3498 ERROUT(0); 3499 } else if (pcr->ticket != 3500 ruleset->rules[rs_num].active.ticket) 3501 ERROUT(EINVAL); 3502 3503 if (pcr->action != PF_CHANGE_REMOVE) { 3504 if (newrule->ifname[0]) { 3505 newrule->kif = pfi_kkif_attach(kif, 3506 newrule->ifname); 3507 kif = NULL; 3508 pfi_kkif_ref(newrule->kif); 3509 } else 3510 newrule->kif = NULL; 3511 3512 if (newrule->rtableid > 0 && 3513 newrule->rtableid >= rt_numfibs) 3514 error = EBUSY; 3515 3516 #ifdef ALTQ 3517 /* set queue IDs */ 3518 if (newrule->qname[0] != 0) { 3519 if ((newrule->qid = 3520 pf_qname2qid(newrule->qname)) == 0) 3521 error = EBUSY; 3522 else if (newrule->pqname[0] != 0) { 3523 if ((newrule->pqid = 3524 pf_qname2qid(newrule->pqname)) == 0) 3525 error = EBUSY; 3526 } else 3527 newrule->pqid = newrule->qid; 3528 } 3529 #endif /* ALTQ */ 3530 if (newrule->tagname[0]) 3531 if ((newrule->tag = 3532 pf_tagname2tag(newrule->tagname)) == 0) 3533 error = EBUSY; 3534 if (newrule->match_tagname[0]) 3535 if ((newrule->match_tag = pf_tagname2tag( 3536 newrule->match_tagname)) == 0) 3537 error = EBUSY; 3538 if (newrule->rt && !newrule->direction) 3539 error = EINVAL; 3540 if (!newrule->log) 3541 newrule->logif = 0; 3542 if (newrule->logif >= PFLOGIFS_MAX) 3543 error = EINVAL; 3544 if (pf_addr_setup(ruleset, &newrule->src.addr, newrule->af)) 3545 error = ENOMEM; 3546 if (pf_addr_setup(ruleset, &newrule->dst.addr, newrule->af)) 3547 error = ENOMEM; 3548 if (pf_kanchor_setup(newrule, ruleset, pcr->anchor_call)) 3549 error = EINVAL; 3550 TAILQ_FOREACH(pa, &V_pf_pabuf, entries) 3551 if (pa->addr.type == PF_ADDR_TABLE) { 3552 pa->addr.p.tbl = 3553 pfr_attach_table(ruleset, 3554 pa->addr.v.tblname); 3555 if (pa->addr.p.tbl == NULL) 3556 error = ENOMEM; 3557 } 3558 3559 newrule->overload_tbl = NULL; 3560 if (newrule->overload_tblname[0]) { 3561 if ((newrule->overload_tbl = pfr_attach_table( 3562 ruleset, newrule->overload_tblname)) == 3563 NULL) 3564 error = EINVAL; 3565 else 3566 newrule->overload_tbl->pfrkt_flags |= 3567 PFR_TFLAG_ACTIVE; 3568 } 3569 3570 pf_mv_kpool(&V_pf_pabuf, &newrule->rpool.list); 3571 if (((((newrule->action == PF_NAT) || 3572 (newrule->action == PF_RDR) || 3573 (newrule->action == PF_BINAT) || 3574 (newrule->rt > PF_NOPFROUTE)) && 3575 !newrule->anchor)) && 3576 (TAILQ_FIRST(&newrule->rpool.list) == NULL)) 3577 error = EINVAL; 3578 3579 if (error) { 3580 pf_free_rule(newrule); 3581 PF_RULES_WUNLOCK(); 3582 PF_CONFIG_UNLOCK(); 3583 break; 3584 } 3585 3586 newrule->rpool.cur = TAILQ_FIRST(&newrule->rpool.list); 3587 } 3588 pf_empty_kpool(&V_pf_pabuf); 3589 3590 if (pcr->action == PF_CHANGE_ADD_HEAD) 3591 oldrule = TAILQ_FIRST( 3592 ruleset->rules[rs_num].active.ptr); 3593 else if (pcr->action == PF_CHANGE_ADD_TAIL) 3594 oldrule = TAILQ_LAST( 3595 ruleset->rules[rs_num].active.ptr, pf_krulequeue); 3596 else { 3597 oldrule = TAILQ_FIRST( 3598 ruleset->rules[rs_num].active.ptr); 3599 while ((oldrule != NULL) && (oldrule->nr != pcr->nr)) 3600 oldrule = TAILQ_NEXT(oldrule, entries); 3601 if (oldrule == NULL) { 3602 if (newrule != NULL) 3603 pf_free_rule(newrule); 3604 PF_RULES_WUNLOCK(); 3605 PF_CONFIG_UNLOCK(); 3606 error = EINVAL; 3607 break; 3608 } 3609 } 3610 3611 if (pcr->action == PF_CHANGE_REMOVE) { 3612 pf_unlink_rule(ruleset->rules[rs_num].active.ptr, 3613 oldrule); 3614 RB_REMOVE(pf_krule_global, 3615 ruleset->rules[rs_num].active.tree, oldrule); 3616 ruleset->rules[rs_num].active.rcount--; 3617 } else { 3618 pf_hash_rule(newrule); 3619 if (RB_INSERT(pf_krule_global, 3620 ruleset->rules[rs_num].active.tree, newrule) != NULL) { 3621 pf_free_rule(newrule); 3622 PF_RULES_WUNLOCK(); 3623 PF_CONFIG_UNLOCK(); 3624 error = EEXIST; 3625 break; 3626 } 3627 3628 if (oldrule == NULL) 3629 TAILQ_INSERT_TAIL( 3630 ruleset->rules[rs_num].active.ptr, 3631 newrule, entries); 3632 else if (pcr->action == PF_CHANGE_ADD_HEAD || 3633 pcr->action == PF_CHANGE_ADD_BEFORE) 3634 TAILQ_INSERT_BEFORE(oldrule, newrule, entries); 3635 else 3636 TAILQ_INSERT_AFTER( 3637 ruleset->rules[rs_num].active.ptr, 3638 oldrule, newrule, entries); 3639 ruleset->rules[rs_num].active.rcount++; 3640 } 3641 3642 nr = 0; 3643 TAILQ_FOREACH(oldrule, 3644 ruleset->rules[rs_num].active.ptr, entries) 3645 oldrule->nr = nr++; 3646 3647 ruleset->rules[rs_num].active.ticket++; 3648 3649 pf_calc_skip_steps(ruleset->rules[rs_num].active.ptr); 3650 pf_remove_if_empty_kruleset(ruleset); 3651 3652 PF_RULES_WUNLOCK(); 3653 PF_CONFIG_UNLOCK(); 3654 break; 3655 3656 #undef ERROUT 3657 DIOCCHANGERULE_error: 3658 PF_RULES_WUNLOCK(); 3659 PF_CONFIG_UNLOCK(); 3660 pf_krule_free(newrule); 3661 pf_kkif_free(kif); 3662 break; 3663 } 3664 3665 case DIOCCLRSTATES: { 3666 struct pfioc_state_kill *psk = (struct pfioc_state_kill *)addr; 3667 struct pf_kstate_kill kill; 3668 3669 error = pf_state_kill_to_kstate_kill(psk, &kill); 3670 if (error) 3671 break; 3672 3673 psk->psk_killed = pf_clear_states(&kill); 3674 break; 3675 } 3676 3677 case DIOCCLRSTATESNV: { 3678 error = pf_clearstates_nv((struct pfioc_nv *)addr); 3679 break; 3680 } 3681 3682 case DIOCKILLSTATES: { 3683 struct pfioc_state_kill *psk = (struct pfioc_state_kill *)addr; 3684 struct pf_kstate_kill kill; 3685 3686 error = pf_state_kill_to_kstate_kill(psk, &kill); 3687 if (error) 3688 break; 3689 3690 psk->psk_killed = 0; 3691 pf_killstates(&kill, &psk->psk_killed); 3692 break; 3693 } 3694 3695 case DIOCKILLSTATESNV: { 3696 error = pf_killstates_nv((struct pfioc_nv *)addr); 3697 break; 3698 } 3699 3700 case DIOCADDSTATE: { 3701 struct pfioc_state *ps = (struct pfioc_state *)addr; 3702 struct pfsync_state *sp = &ps->state; 3703 3704 if (sp->timeout >= PFTM_MAX) { 3705 error = EINVAL; 3706 break; 3707 } 3708 if (V_pfsync_state_import_ptr != NULL) { 3709 PF_RULES_RLOCK(); 3710 error = V_pfsync_state_import_ptr(sp, PFSYNC_SI_IOCTL); 3711 PF_RULES_RUNLOCK(); 3712 } else 3713 error = EOPNOTSUPP; 3714 break; 3715 } 3716 3717 case DIOCGETSTATE: { 3718 struct pfioc_state *ps = (struct pfioc_state *)addr; 3719 struct pf_kstate *s; 3720 3721 s = pf_find_state_byid(ps->state.id, ps->state.creatorid); 3722 if (s == NULL) { 3723 error = ENOENT; 3724 break; 3725 } 3726 3727 pfsync_state_export(&ps->state, s); 3728 PF_STATE_UNLOCK(s); 3729 break; 3730 } 3731 3732 case DIOCGETSTATENV: { 3733 error = pf_getstate((struct pfioc_nv *)addr); 3734 break; 3735 } 3736 3737 case DIOCGETSTATES: { 3738 struct pfioc_states *ps = (struct pfioc_states *)addr; 3739 struct pf_kstate *s; 3740 struct pfsync_state *pstore, *p; 3741 int i, nr; 3742 size_t slice_count = 16, count; 3743 void *out; 3744 3745 if (ps->ps_len <= 0) { 3746 nr = uma_zone_get_cur(V_pf_state_z); 3747 ps->ps_len = sizeof(struct pfsync_state) * nr; 3748 break; 3749 } 3750 3751 out = ps->ps_states; 3752 pstore = mallocarray(slice_count, 3753 sizeof(struct pfsync_state), M_TEMP, M_WAITOK | M_ZERO); 3754 nr = 0; 3755 3756 for (i = 0; i <= pf_hashmask; i++) { 3757 struct pf_idhash *ih = &V_pf_idhash[i]; 3758 3759 DIOCGETSTATES_retry: 3760 p = pstore; 3761 3762 if (LIST_EMPTY(&ih->states)) 3763 continue; 3764 3765 PF_HASHROW_LOCK(ih); 3766 count = 0; 3767 LIST_FOREACH(s, &ih->states, entry) { 3768 if (s->timeout == PFTM_UNLINKED) 3769 continue; 3770 count++; 3771 } 3772 3773 if (count > slice_count) { 3774 PF_HASHROW_UNLOCK(ih); 3775 free(pstore, M_TEMP); 3776 slice_count = count * 2; 3777 pstore = mallocarray(slice_count, 3778 sizeof(struct pfsync_state), M_TEMP, 3779 M_WAITOK | M_ZERO); 3780 goto DIOCGETSTATES_retry; 3781 } 3782 3783 if ((nr+count) * sizeof(*p) > ps->ps_len) { 3784 PF_HASHROW_UNLOCK(ih); 3785 goto DIOCGETSTATES_full; 3786 } 3787 3788 LIST_FOREACH(s, &ih->states, entry) { 3789 if (s->timeout == PFTM_UNLINKED) 3790 continue; 3791 3792 pfsync_state_export(p, s); 3793 p++; 3794 nr++; 3795 } 3796 PF_HASHROW_UNLOCK(ih); 3797 error = copyout(pstore, out, 3798 sizeof(struct pfsync_state) * count); 3799 if (error) 3800 break; 3801 out = ps->ps_states + nr; 3802 } 3803 DIOCGETSTATES_full: 3804 ps->ps_len = sizeof(struct pfsync_state) * nr; 3805 free(pstore, M_TEMP); 3806 3807 break; 3808 } 3809 3810 case DIOCGETSTATESV2: { 3811 struct pfioc_states_v2 *ps = (struct pfioc_states_v2 *)addr; 3812 struct pf_kstate *s; 3813 struct pf_state_export *pstore, *p; 3814 int i, nr; 3815 size_t slice_count = 16, count; 3816 void *out; 3817 3818 if (ps->ps_req_version > PF_STATE_VERSION) { 3819 error = ENOTSUP; 3820 break; 3821 } 3822 3823 if (ps->ps_len <= 0) { 3824 nr = uma_zone_get_cur(V_pf_state_z); 3825 ps->ps_len = sizeof(struct pf_state_export) * nr; 3826 break; 3827 } 3828 3829 out = ps->ps_states; 3830 pstore = mallocarray(slice_count, 3831 sizeof(struct pf_state_export), M_TEMP, M_WAITOK | M_ZERO); 3832 nr = 0; 3833 3834 for (i = 0; i <= pf_hashmask; i++) { 3835 struct pf_idhash *ih = &V_pf_idhash[i]; 3836 3837 DIOCGETSTATESV2_retry: 3838 p = pstore; 3839 3840 if (LIST_EMPTY(&ih->states)) 3841 continue; 3842 3843 PF_HASHROW_LOCK(ih); 3844 count = 0; 3845 LIST_FOREACH(s, &ih->states, entry) { 3846 if (s->timeout == PFTM_UNLINKED) 3847 continue; 3848 count++; 3849 } 3850 3851 if (count > slice_count) { 3852 PF_HASHROW_UNLOCK(ih); 3853 free(pstore, M_TEMP); 3854 slice_count = count * 2; 3855 pstore = mallocarray(slice_count, 3856 sizeof(struct pf_state_export), M_TEMP, 3857 M_WAITOK | M_ZERO); 3858 goto DIOCGETSTATESV2_retry; 3859 } 3860 3861 if ((nr+count) * sizeof(*p) > ps->ps_len) { 3862 PF_HASHROW_UNLOCK(ih); 3863 goto DIOCGETSTATESV2_full; 3864 } 3865 3866 LIST_FOREACH(s, &ih->states, entry) { 3867 if (s->timeout == PFTM_UNLINKED) 3868 continue; 3869 3870 pf_state_export(p, s); 3871 p++; 3872 nr++; 3873 } 3874 PF_HASHROW_UNLOCK(ih); 3875 error = copyout(pstore, out, 3876 sizeof(struct pf_state_export) * count); 3877 if (error) 3878 break; 3879 out = ps->ps_states + nr; 3880 } 3881 DIOCGETSTATESV2_full: 3882 ps->ps_len = nr * sizeof(struct pf_state_export); 3883 free(pstore, M_TEMP); 3884 3885 break; 3886 } 3887 3888 case DIOCGETSTATUS: { 3889 struct pf_status *s = (struct pf_status *)addr; 3890 3891 PF_RULES_RLOCK(); 3892 s->running = V_pf_status.running; 3893 s->since = V_pf_status.since; 3894 s->debug = V_pf_status.debug; 3895 s->hostid = V_pf_status.hostid; 3896 s->states = V_pf_status.states; 3897 s->src_nodes = V_pf_status.src_nodes; 3898 3899 for (int i = 0; i < PFRES_MAX; i++) 3900 s->counters[i] = 3901 counter_u64_fetch(V_pf_status.counters[i]); 3902 for (int i = 0; i < LCNT_MAX; i++) 3903 s->lcounters[i] = 3904 counter_u64_fetch(V_pf_status.lcounters[i]); 3905 for (int i = 0; i < FCNT_MAX; i++) 3906 s->fcounters[i] = 3907 pf_counter_u64_fetch(&V_pf_status.fcounters[i]); 3908 for (int i = 0; i < SCNT_MAX; i++) 3909 s->scounters[i] = 3910 counter_u64_fetch(V_pf_status.scounters[i]); 3911 3912 bcopy(V_pf_status.ifname, s->ifname, IFNAMSIZ); 3913 bcopy(V_pf_status.pf_chksum, s->pf_chksum, 3914 PF_MD5_DIGEST_LENGTH); 3915 3916 pfi_update_status(s->ifname, s); 3917 PF_RULES_RUNLOCK(); 3918 break; 3919 } 3920 3921 case DIOCGETSTATUSNV: { 3922 error = pf_getstatus((struct pfioc_nv *)addr); 3923 break; 3924 } 3925 3926 case DIOCSETSTATUSIF: { 3927 struct pfioc_if *pi = (struct pfioc_if *)addr; 3928 3929 if (pi->ifname[0] == 0) { 3930 bzero(V_pf_status.ifname, IFNAMSIZ); 3931 break; 3932 } 3933 PF_RULES_WLOCK(); 3934 error = pf_user_strcpy(V_pf_status.ifname, pi->ifname, IFNAMSIZ); 3935 PF_RULES_WUNLOCK(); 3936 break; 3937 } 3938 3939 case DIOCCLRSTATUS: { 3940 PF_RULES_WLOCK(); 3941 for (int i = 0; i < PFRES_MAX; i++) 3942 counter_u64_zero(V_pf_status.counters[i]); 3943 for (int i = 0; i < FCNT_MAX; i++) 3944 pf_counter_u64_zero(&V_pf_status.fcounters[i]); 3945 for (int i = 0; i < SCNT_MAX; i++) 3946 counter_u64_zero(V_pf_status.scounters[i]); 3947 for (int i = 0; i < KLCNT_MAX; i++) 3948 counter_u64_zero(V_pf_status.lcounters[i]); 3949 V_pf_status.since = time_second; 3950 if (*V_pf_status.ifname) 3951 pfi_update_status(V_pf_status.ifname, NULL); 3952 PF_RULES_WUNLOCK(); 3953 break; 3954 } 3955 3956 case DIOCNATLOOK: { 3957 struct pfioc_natlook *pnl = (struct pfioc_natlook *)addr; 3958 struct pf_state_key *sk; 3959 struct pf_kstate *state; 3960 struct pf_state_key_cmp key; 3961 int m = 0, direction = pnl->direction; 3962 int sidx, didx; 3963 3964 /* NATLOOK src and dst are reversed, so reverse sidx/didx */ 3965 sidx = (direction == PF_IN) ? 1 : 0; 3966 didx = (direction == PF_IN) ? 0 : 1; 3967 3968 if (!pnl->proto || 3969 PF_AZERO(&pnl->saddr, pnl->af) || 3970 PF_AZERO(&pnl->daddr, pnl->af) || 3971 ((pnl->proto == IPPROTO_TCP || 3972 pnl->proto == IPPROTO_UDP) && 3973 (!pnl->dport || !pnl->sport))) 3974 error = EINVAL; 3975 else { 3976 bzero(&key, sizeof(key)); 3977 key.af = pnl->af; 3978 key.proto = pnl->proto; 3979 PF_ACPY(&key.addr[sidx], &pnl->saddr, pnl->af); 3980 key.port[sidx] = pnl->sport; 3981 PF_ACPY(&key.addr[didx], &pnl->daddr, pnl->af); 3982 key.port[didx] = pnl->dport; 3983 3984 state = pf_find_state_all(&key, direction, &m); 3985 if (state == NULL) { 3986 error = ENOENT; 3987 } else { 3988 if (m > 1) { 3989 PF_STATE_UNLOCK(state); 3990 error = E2BIG; /* more than one state */ 3991 } else { 3992 sk = state->key[sidx]; 3993 PF_ACPY(&pnl->rsaddr, &sk->addr[sidx], sk->af); 3994 pnl->rsport = sk->port[sidx]; 3995 PF_ACPY(&pnl->rdaddr, &sk->addr[didx], sk->af); 3996 pnl->rdport = sk->port[didx]; 3997 PF_STATE_UNLOCK(state); 3998 } 3999 } 4000 } 4001 break; 4002 } 4003 4004 case DIOCSETTIMEOUT: { 4005 struct pfioc_tm *pt = (struct pfioc_tm *)addr; 4006 int old; 4007 4008 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX || 4009 pt->seconds < 0) { 4010 error = EINVAL; 4011 break; 4012 } 4013 PF_RULES_WLOCK(); 4014 old = V_pf_default_rule.timeout[pt->timeout]; 4015 if (pt->timeout == PFTM_INTERVAL && pt->seconds == 0) 4016 pt->seconds = 1; 4017 V_pf_default_rule.timeout[pt->timeout] = pt->seconds; 4018 if (pt->timeout == PFTM_INTERVAL && pt->seconds < old) 4019 wakeup(pf_purge_thread); 4020 pt->seconds = old; 4021 PF_RULES_WUNLOCK(); 4022 break; 4023 } 4024 4025 case DIOCGETTIMEOUT: { 4026 struct pfioc_tm *pt = (struct pfioc_tm *)addr; 4027 4028 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX) { 4029 error = EINVAL; 4030 break; 4031 } 4032 PF_RULES_RLOCK(); 4033 pt->seconds = V_pf_default_rule.timeout[pt->timeout]; 4034 PF_RULES_RUNLOCK(); 4035 break; 4036 } 4037 4038 case DIOCGETLIMIT: { 4039 struct pfioc_limit *pl = (struct pfioc_limit *)addr; 4040 4041 if (pl->index < 0 || pl->index >= PF_LIMIT_MAX) { 4042 error = EINVAL; 4043 break; 4044 } 4045 PF_RULES_RLOCK(); 4046 pl->limit = V_pf_limits[pl->index].limit; 4047 PF_RULES_RUNLOCK(); 4048 break; 4049 } 4050 4051 case DIOCSETLIMIT: { 4052 struct pfioc_limit *pl = (struct pfioc_limit *)addr; 4053 int old_limit; 4054 4055 PF_RULES_WLOCK(); 4056 if (pl->index < 0 || pl->index >= PF_LIMIT_MAX || 4057 V_pf_limits[pl->index].zone == NULL) { 4058 PF_RULES_WUNLOCK(); 4059 error = EINVAL; 4060 break; 4061 } 4062 uma_zone_set_max(V_pf_limits[pl->index].zone, pl->limit); 4063 old_limit = V_pf_limits[pl->index].limit; 4064 V_pf_limits[pl->index].limit = pl->limit; 4065 pl->limit = old_limit; 4066 PF_RULES_WUNLOCK(); 4067 break; 4068 } 4069 4070 case DIOCSETDEBUG: { 4071 u_int32_t *level = (u_int32_t *)addr; 4072 4073 PF_RULES_WLOCK(); 4074 V_pf_status.debug = *level; 4075 PF_RULES_WUNLOCK(); 4076 break; 4077 } 4078 4079 case DIOCCLRRULECTRS: { 4080 /* obsoleted by DIOCGETRULE with action=PF_GET_CLR_CNTR */ 4081 struct pf_kruleset *ruleset = &pf_main_ruleset; 4082 struct pf_krule *rule; 4083 4084 PF_RULES_WLOCK(); 4085 TAILQ_FOREACH(rule, 4086 ruleset->rules[PF_RULESET_FILTER].active.ptr, entries) { 4087 pf_counter_u64_zero(&rule->evaluations); 4088 for (int i = 0; i < 2; i++) { 4089 pf_counter_u64_zero(&rule->packets[i]); 4090 pf_counter_u64_zero(&rule->bytes[i]); 4091 } 4092 } 4093 PF_RULES_WUNLOCK(); 4094 break; 4095 } 4096 4097 case DIOCGIFSPEEDV0: 4098 case DIOCGIFSPEEDV1: { 4099 struct pf_ifspeed_v1 *psp = (struct pf_ifspeed_v1 *)addr; 4100 struct pf_ifspeed_v1 ps; 4101 struct ifnet *ifp; 4102 4103 if (psp->ifname[0] == '\0') { 4104 error = EINVAL; 4105 break; 4106 } 4107 4108 error = pf_user_strcpy(ps.ifname, psp->ifname, IFNAMSIZ); 4109 if (error != 0) 4110 break; 4111 ifp = ifunit(ps.ifname); 4112 if (ifp != NULL) { 4113 psp->baudrate32 = 4114 (u_int32_t)uqmin(ifp->if_baudrate, UINT_MAX); 4115 if (cmd == DIOCGIFSPEEDV1) 4116 psp->baudrate = ifp->if_baudrate; 4117 } else { 4118 error = EINVAL; 4119 } 4120 break; 4121 } 4122 4123 #ifdef ALTQ 4124 case DIOCSTARTALTQ: { 4125 struct pf_altq *altq; 4126 4127 PF_RULES_WLOCK(); 4128 /* enable all altq interfaces on active list */ 4129 TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) { 4130 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) { 4131 error = pf_enable_altq(altq); 4132 if (error != 0) 4133 break; 4134 } 4135 } 4136 if (error == 0) 4137 V_pf_altq_running = 1; 4138 PF_RULES_WUNLOCK(); 4139 DPFPRINTF(PF_DEBUG_MISC, ("altq: started\n")); 4140 break; 4141 } 4142 4143 case DIOCSTOPALTQ: { 4144 struct pf_altq *altq; 4145 4146 PF_RULES_WLOCK(); 4147 /* disable all altq interfaces on active list */ 4148 TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) { 4149 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) { 4150 error = pf_disable_altq(altq); 4151 if (error != 0) 4152 break; 4153 } 4154 } 4155 if (error == 0) 4156 V_pf_altq_running = 0; 4157 PF_RULES_WUNLOCK(); 4158 DPFPRINTF(PF_DEBUG_MISC, ("altq: stopped\n")); 4159 break; 4160 } 4161 4162 case DIOCADDALTQV0: 4163 case DIOCADDALTQV1: { 4164 struct pfioc_altq_v1 *pa = (struct pfioc_altq_v1 *)addr; 4165 struct pf_altq *altq, *a; 4166 struct ifnet *ifp; 4167 4168 altq = malloc(sizeof(*altq), M_PFALTQ, M_WAITOK | M_ZERO); 4169 error = pf_import_kaltq(pa, altq, IOCPARM_LEN(cmd)); 4170 if (error) 4171 break; 4172 altq->local_flags = 0; 4173 4174 PF_RULES_WLOCK(); 4175 if (pa->ticket != V_ticket_altqs_inactive) { 4176 PF_RULES_WUNLOCK(); 4177 free(altq, M_PFALTQ); 4178 error = EBUSY; 4179 break; 4180 } 4181 4182 /* 4183 * if this is for a queue, find the discipline and 4184 * copy the necessary fields 4185 */ 4186 if (altq->qname[0] != 0) { 4187 if ((altq->qid = pf_qname2qid(altq->qname)) == 0) { 4188 PF_RULES_WUNLOCK(); 4189 error = EBUSY; 4190 free(altq, M_PFALTQ); 4191 break; 4192 } 4193 altq->altq_disc = NULL; 4194 TAILQ_FOREACH(a, V_pf_altq_ifs_inactive, entries) { 4195 if (strncmp(a->ifname, altq->ifname, 4196 IFNAMSIZ) == 0) { 4197 altq->altq_disc = a->altq_disc; 4198 break; 4199 } 4200 } 4201 } 4202 4203 if ((ifp = ifunit(altq->ifname)) == NULL) 4204 altq->local_flags |= PFALTQ_FLAG_IF_REMOVED; 4205 else 4206 error = altq_add(ifp, altq); 4207 4208 if (error) { 4209 PF_RULES_WUNLOCK(); 4210 free(altq, M_PFALTQ); 4211 break; 4212 } 4213 4214 if (altq->qname[0] != 0) 4215 TAILQ_INSERT_TAIL(V_pf_altqs_inactive, altq, entries); 4216 else 4217 TAILQ_INSERT_TAIL(V_pf_altq_ifs_inactive, altq, entries); 4218 /* version error check done on import above */ 4219 pf_export_kaltq(altq, pa, IOCPARM_LEN(cmd)); 4220 PF_RULES_WUNLOCK(); 4221 break; 4222 } 4223 4224 case DIOCGETALTQSV0: 4225 case DIOCGETALTQSV1: { 4226 struct pfioc_altq_v1 *pa = (struct pfioc_altq_v1 *)addr; 4227 struct pf_altq *altq; 4228 4229 PF_RULES_RLOCK(); 4230 pa->nr = 0; 4231 TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) 4232 pa->nr++; 4233 TAILQ_FOREACH(altq, V_pf_altqs_active, entries) 4234 pa->nr++; 4235 pa->ticket = V_ticket_altqs_active; 4236 PF_RULES_RUNLOCK(); 4237 break; 4238 } 4239 4240 case DIOCGETALTQV0: 4241 case DIOCGETALTQV1: { 4242 struct pfioc_altq_v1 *pa = (struct pfioc_altq_v1 *)addr; 4243 struct pf_altq *altq; 4244 4245 PF_RULES_RLOCK(); 4246 if (pa->ticket != V_ticket_altqs_active) { 4247 PF_RULES_RUNLOCK(); 4248 error = EBUSY; 4249 break; 4250 } 4251 altq = pf_altq_get_nth_active(pa->nr); 4252 if (altq == NULL) { 4253 PF_RULES_RUNLOCK(); 4254 error = EBUSY; 4255 break; 4256 } 4257 pf_export_kaltq(altq, pa, IOCPARM_LEN(cmd)); 4258 PF_RULES_RUNLOCK(); 4259 break; 4260 } 4261 4262 case DIOCCHANGEALTQV0: 4263 case DIOCCHANGEALTQV1: 4264 /* CHANGEALTQ not supported yet! */ 4265 error = ENODEV; 4266 break; 4267 4268 case DIOCGETQSTATSV0: 4269 case DIOCGETQSTATSV1: { 4270 struct pfioc_qstats_v1 *pq = (struct pfioc_qstats_v1 *)addr; 4271 struct pf_altq *altq; 4272 int nbytes; 4273 u_int32_t version; 4274 4275 PF_RULES_RLOCK(); 4276 if (pq->ticket != V_ticket_altqs_active) { 4277 PF_RULES_RUNLOCK(); 4278 error = EBUSY; 4279 break; 4280 } 4281 nbytes = pq->nbytes; 4282 altq = pf_altq_get_nth_active(pq->nr); 4283 if (altq == NULL) { 4284 PF_RULES_RUNLOCK(); 4285 error = EBUSY; 4286 break; 4287 } 4288 4289 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) != 0) { 4290 PF_RULES_RUNLOCK(); 4291 error = ENXIO; 4292 break; 4293 } 4294 PF_RULES_RUNLOCK(); 4295 if (cmd == DIOCGETQSTATSV0) 4296 version = 0; /* DIOCGETQSTATSV0 means stats struct v0 */ 4297 else 4298 version = pq->version; 4299 error = altq_getqstats(altq, pq->buf, &nbytes, version); 4300 if (error == 0) { 4301 pq->scheduler = altq->scheduler; 4302 pq->nbytes = nbytes; 4303 } 4304 break; 4305 } 4306 #endif /* ALTQ */ 4307 4308 case DIOCBEGINADDRS: { 4309 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr; 4310 4311 PF_RULES_WLOCK(); 4312 pf_empty_kpool(&V_pf_pabuf); 4313 pp->ticket = ++V_ticket_pabuf; 4314 PF_RULES_WUNLOCK(); 4315 break; 4316 } 4317 4318 case DIOCADDADDR: { 4319 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr; 4320 struct pf_kpooladdr *pa; 4321 struct pfi_kkif *kif = NULL; 4322 4323 #ifndef INET 4324 if (pp->af == AF_INET) { 4325 error = EAFNOSUPPORT; 4326 break; 4327 } 4328 #endif /* INET */ 4329 #ifndef INET6 4330 if (pp->af == AF_INET6) { 4331 error = EAFNOSUPPORT; 4332 break; 4333 } 4334 #endif /* INET6 */ 4335 if (pp->addr.addr.type != PF_ADDR_ADDRMASK && 4336 pp->addr.addr.type != PF_ADDR_DYNIFTL && 4337 pp->addr.addr.type != PF_ADDR_TABLE) { 4338 error = EINVAL; 4339 break; 4340 } 4341 if (pp->addr.addr.p.dyn != NULL) { 4342 error = EINVAL; 4343 break; 4344 } 4345 pa = malloc(sizeof(*pa), M_PFRULE, M_WAITOK); 4346 error = pf_pooladdr_to_kpooladdr(&pp->addr, pa); 4347 if (error != 0) 4348 break; 4349 if (pa->ifname[0]) 4350 kif = pf_kkif_create(M_WAITOK); 4351 PF_RULES_WLOCK(); 4352 if (pp->ticket != V_ticket_pabuf) { 4353 PF_RULES_WUNLOCK(); 4354 if (pa->ifname[0]) 4355 pf_kkif_free(kif); 4356 free(pa, M_PFRULE); 4357 error = EBUSY; 4358 break; 4359 } 4360 if (pa->ifname[0]) { 4361 pa->kif = pfi_kkif_attach(kif, pa->ifname); 4362 kif = NULL; 4363 pfi_kkif_ref(pa->kif); 4364 } else 4365 pa->kif = NULL; 4366 if (pa->addr.type == PF_ADDR_DYNIFTL && ((error = 4367 pfi_dynaddr_setup(&pa->addr, pp->af)) != 0)) { 4368 if (pa->ifname[0]) 4369 pfi_kkif_unref(pa->kif); 4370 PF_RULES_WUNLOCK(); 4371 free(pa, M_PFRULE); 4372 break; 4373 } 4374 TAILQ_INSERT_TAIL(&V_pf_pabuf, pa, entries); 4375 PF_RULES_WUNLOCK(); 4376 break; 4377 } 4378 4379 case DIOCGETADDRS: { 4380 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr; 4381 struct pf_kpool *pool; 4382 struct pf_kpooladdr *pa; 4383 4384 pp->anchor[sizeof(pp->anchor) - 1] = 0; 4385 pp->nr = 0; 4386 4387 PF_RULES_RLOCK(); 4388 pool = pf_get_kpool(pp->anchor, pp->ticket, pp->r_action, 4389 pp->r_num, 0, 1, 0); 4390 if (pool == NULL) { 4391 PF_RULES_RUNLOCK(); 4392 error = EBUSY; 4393 break; 4394 } 4395 TAILQ_FOREACH(pa, &pool->list, entries) 4396 pp->nr++; 4397 PF_RULES_RUNLOCK(); 4398 break; 4399 } 4400 4401 case DIOCGETADDR: { 4402 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr; 4403 struct pf_kpool *pool; 4404 struct pf_kpooladdr *pa; 4405 u_int32_t nr = 0; 4406 4407 pp->anchor[sizeof(pp->anchor) - 1] = 0; 4408 4409 PF_RULES_RLOCK(); 4410 pool = pf_get_kpool(pp->anchor, pp->ticket, pp->r_action, 4411 pp->r_num, 0, 1, 1); 4412 if (pool == NULL) { 4413 PF_RULES_RUNLOCK(); 4414 error = EBUSY; 4415 break; 4416 } 4417 pa = TAILQ_FIRST(&pool->list); 4418 while ((pa != NULL) && (nr < pp->nr)) { 4419 pa = TAILQ_NEXT(pa, entries); 4420 nr++; 4421 } 4422 if (pa == NULL) { 4423 PF_RULES_RUNLOCK(); 4424 error = EBUSY; 4425 break; 4426 } 4427 pf_kpooladdr_to_pooladdr(pa, &pp->addr); 4428 pf_addr_copyout(&pp->addr.addr); 4429 PF_RULES_RUNLOCK(); 4430 break; 4431 } 4432 4433 case DIOCCHANGEADDR: { 4434 struct pfioc_pooladdr *pca = (struct pfioc_pooladdr *)addr; 4435 struct pf_kpool *pool; 4436 struct pf_kpooladdr *oldpa = NULL, *newpa = NULL; 4437 struct pf_kruleset *ruleset; 4438 struct pfi_kkif *kif = NULL; 4439 4440 pca->anchor[sizeof(pca->anchor) - 1] = 0; 4441 4442 if (pca->action < PF_CHANGE_ADD_HEAD || 4443 pca->action > PF_CHANGE_REMOVE) { 4444 error = EINVAL; 4445 break; 4446 } 4447 if (pca->addr.addr.type != PF_ADDR_ADDRMASK && 4448 pca->addr.addr.type != PF_ADDR_DYNIFTL && 4449 pca->addr.addr.type != PF_ADDR_TABLE) { 4450 error = EINVAL; 4451 break; 4452 } 4453 if (pca->addr.addr.p.dyn != NULL) { 4454 error = EINVAL; 4455 break; 4456 } 4457 4458 if (pca->action != PF_CHANGE_REMOVE) { 4459 #ifndef INET 4460 if (pca->af == AF_INET) { 4461 error = EAFNOSUPPORT; 4462 break; 4463 } 4464 #endif /* INET */ 4465 #ifndef INET6 4466 if (pca->af == AF_INET6) { 4467 error = EAFNOSUPPORT; 4468 break; 4469 } 4470 #endif /* INET6 */ 4471 newpa = malloc(sizeof(*newpa), M_PFRULE, M_WAITOK); 4472 bcopy(&pca->addr, newpa, sizeof(struct pf_pooladdr)); 4473 if (newpa->ifname[0]) 4474 kif = pf_kkif_create(M_WAITOK); 4475 newpa->kif = NULL; 4476 } 4477 #define ERROUT(x) ERROUT_IOCTL(DIOCCHANGEADDR_error, x) 4478 PF_RULES_WLOCK(); 4479 ruleset = pf_find_kruleset(pca->anchor); 4480 if (ruleset == NULL) 4481 ERROUT(EBUSY); 4482 4483 pool = pf_get_kpool(pca->anchor, pca->ticket, pca->r_action, 4484 pca->r_num, pca->r_last, 1, 1); 4485 if (pool == NULL) 4486 ERROUT(EBUSY); 4487 4488 if (pca->action != PF_CHANGE_REMOVE) { 4489 if (newpa->ifname[0]) { 4490 newpa->kif = pfi_kkif_attach(kif, newpa->ifname); 4491 pfi_kkif_ref(newpa->kif); 4492 kif = NULL; 4493 } 4494 4495 switch (newpa->addr.type) { 4496 case PF_ADDR_DYNIFTL: 4497 error = pfi_dynaddr_setup(&newpa->addr, 4498 pca->af); 4499 break; 4500 case PF_ADDR_TABLE: 4501 newpa->addr.p.tbl = pfr_attach_table(ruleset, 4502 newpa->addr.v.tblname); 4503 if (newpa->addr.p.tbl == NULL) 4504 error = ENOMEM; 4505 break; 4506 } 4507 if (error) 4508 goto DIOCCHANGEADDR_error; 4509 } 4510 4511 switch (pca->action) { 4512 case PF_CHANGE_ADD_HEAD: 4513 oldpa = TAILQ_FIRST(&pool->list); 4514 break; 4515 case PF_CHANGE_ADD_TAIL: 4516 oldpa = TAILQ_LAST(&pool->list, pf_kpalist); 4517 break; 4518 default: 4519 oldpa = TAILQ_FIRST(&pool->list); 4520 for (int i = 0; oldpa && i < pca->nr; i++) 4521 oldpa = TAILQ_NEXT(oldpa, entries); 4522 4523 if (oldpa == NULL) 4524 ERROUT(EINVAL); 4525 } 4526 4527 if (pca->action == PF_CHANGE_REMOVE) { 4528 TAILQ_REMOVE(&pool->list, oldpa, entries); 4529 switch (oldpa->addr.type) { 4530 case PF_ADDR_DYNIFTL: 4531 pfi_dynaddr_remove(oldpa->addr.p.dyn); 4532 break; 4533 case PF_ADDR_TABLE: 4534 pfr_detach_table(oldpa->addr.p.tbl); 4535 break; 4536 } 4537 if (oldpa->kif) 4538 pfi_kkif_unref(oldpa->kif); 4539 free(oldpa, M_PFRULE); 4540 } else { 4541 if (oldpa == NULL) 4542 TAILQ_INSERT_TAIL(&pool->list, newpa, entries); 4543 else if (pca->action == PF_CHANGE_ADD_HEAD || 4544 pca->action == PF_CHANGE_ADD_BEFORE) 4545 TAILQ_INSERT_BEFORE(oldpa, newpa, entries); 4546 else 4547 TAILQ_INSERT_AFTER(&pool->list, oldpa, 4548 newpa, entries); 4549 } 4550 4551 pool->cur = TAILQ_FIRST(&pool->list); 4552 PF_ACPY(&pool->counter, &pool->cur->addr.v.a.addr, pca->af); 4553 PF_RULES_WUNLOCK(); 4554 break; 4555 4556 #undef ERROUT 4557 DIOCCHANGEADDR_error: 4558 if (newpa != NULL) { 4559 if (newpa->kif) 4560 pfi_kkif_unref(newpa->kif); 4561 free(newpa, M_PFRULE); 4562 } 4563 PF_RULES_WUNLOCK(); 4564 pf_kkif_free(kif); 4565 break; 4566 } 4567 4568 case DIOCGETRULESETS: { 4569 struct pfioc_ruleset *pr = (struct pfioc_ruleset *)addr; 4570 struct pf_kruleset *ruleset; 4571 struct pf_kanchor *anchor; 4572 4573 pr->path[sizeof(pr->path) - 1] = 0; 4574 4575 PF_RULES_RLOCK(); 4576 if ((ruleset = pf_find_kruleset(pr->path)) == NULL) { 4577 PF_RULES_RUNLOCK(); 4578 error = ENOENT; 4579 break; 4580 } 4581 pr->nr = 0; 4582 if (ruleset->anchor == NULL) { 4583 /* XXX kludge for pf_main_ruleset */ 4584 RB_FOREACH(anchor, pf_kanchor_global, &V_pf_anchors) 4585 if (anchor->parent == NULL) 4586 pr->nr++; 4587 } else { 4588 RB_FOREACH(anchor, pf_kanchor_node, 4589 &ruleset->anchor->children) 4590 pr->nr++; 4591 } 4592 PF_RULES_RUNLOCK(); 4593 break; 4594 } 4595 4596 case DIOCGETRULESET: { 4597 struct pfioc_ruleset *pr = (struct pfioc_ruleset *)addr; 4598 struct pf_kruleset *ruleset; 4599 struct pf_kanchor *anchor; 4600 u_int32_t nr = 0; 4601 4602 pr->path[sizeof(pr->path) - 1] = 0; 4603 4604 PF_RULES_RLOCK(); 4605 if ((ruleset = pf_find_kruleset(pr->path)) == NULL) { 4606 PF_RULES_RUNLOCK(); 4607 error = ENOENT; 4608 break; 4609 } 4610 pr->name[0] = 0; 4611 if (ruleset->anchor == NULL) { 4612 /* XXX kludge for pf_main_ruleset */ 4613 RB_FOREACH(anchor, pf_kanchor_global, &V_pf_anchors) 4614 if (anchor->parent == NULL && nr++ == pr->nr) { 4615 strlcpy(pr->name, anchor->name, 4616 sizeof(pr->name)); 4617 break; 4618 } 4619 } else { 4620 RB_FOREACH(anchor, pf_kanchor_node, 4621 &ruleset->anchor->children) 4622 if (nr++ == pr->nr) { 4623 strlcpy(pr->name, anchor->name, 4624 sizeof(pr->name)); 4625 break; 4626 } 4627 } 4628 if (!pr->name[0]) 4629 error = EBUSY; 4630 PF_RULES_RUNLOCK(); 4631 break; 4632 } 4633 4634 case DIOCRCLRTABLES: { 4635 struct pfioc_table *io = (struct pfioc_table *)addr; 4636 4637 if (io->pfrio_esize != 0) { 4638 error = ENODEV; 4639 break; 4640 } 4641 PF_RULES_WLOCK(); 4642 error = pfr_clr_tables(&io->pfrio_table, &io->pfrio_ndel, 4643 io->pfrio_flags | PFR_FLAG_USERIOCTL); 4644 PF_RULES_WUNLOCK(); 4645 break; 4646 } 4647 4648 case DIOCRADDTABLES: { 4649 struct pfioc_table *io = (struct pfioc_table *)addr; 4650 struct pfr_table *pfrts; 4651 size_t totlen; 4652 4653 if (io->pfrio_esize != sizeof(struct pfr_table)) { 4654 error = ENODEV; 4655 break; 4656 } 4657 4658 if (io->pfrio_size < 0 || io->pfrio_size > pf_ioctl_maxcount || 4659 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_table))) { 4660 error = ENOMEM; 4661 break; 4662 } 4663 4664 totlen = io->pfrio_size * sizeof(struct pfr_table); 4665 pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table), 4666 M_TEMP, M_WAITOK); 4667 error = copyin(io->pfrio_buffer, pfrts, totlen); 4668 if (error) { 4669 free(pfrts, M_TEMP); 4670 break; 4671 } 4672 PF_RULES_WLOCK(); 4673 error = pfr_add_tables(pfrts, io->pfrio_size, 4674 &io->pfrio_nadd, io->pfrio_flags | PFR_FLAG_USERIOCTL); 4675 PF_RULES_WUNLOCK(); 4676 free(pfrts, M_TEMP); 4677 break; 4678 } 4679 4680 case DIOCRDELTABLES: { 4681 struct pfioc_table *io = (struct pfioc_table *)addr; 4682 struct pfr_table *pfrts; 4683 size_t totlen; 4684 4685 if (io->pfrio_esize != sizeof(struct pfr_table)) { 4686 error = ENODEV; 4687 break; 4688 } 4689 4690 if (io->pfrio_size < 0 || io->pfrio_size > pf_ioctl_maxcount || 4691 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_table))) { 4692 error = ENOMEM; 4693 break; 4694 } 4695 4696 totlen = io->pfrio_size * sizeof(struct pfr_table); 4697 pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table), 4698 M_TEMP, M_WAITOK); 4699 error = copyin(io->pfrio_buffer, pfrts, totlen); 4700 if (error) { 4701 free(pfrts, M_TEMP); 4702 break; 4703 } 4704 PF_RULES_WLOCK(); 4705 error = pfr_del_tables(pfrts, io->pfrio_size, 4706 &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL); 4707 PF_RULES_WUNLOCK(); 4708 free(pfrts, M_TEMP); 4709 break; 4710 } 4711 4712 case DIOCRGETTABLES: { 4713 struct pfioc_table *io = (struct pfioc_table *)addr; 4714 struct pfr_table *pfrts; 4715 size_t totlen; 4716 int n; 4717 4718 if (io->pfrio_esize != sizeof(struct pfr_table)) { 4719 error = ENODEV; 4720 break; 4721 } 4722 PF_RULES_RLOCK(); 4723 n = pfr_table_count(&io->pfrio_table, io->pfrio_flags); 4724 if (n < 0) { 4725 PF_RULES_RUNLOCK(); 4726 error = EINVAL; 4727 break; 4728 } 4729 io->pfrio_size = min(io->pfrio_size, n); 4730 4731 totlen = io->pfrio_size * sizeof(struct pfr_table); 4732 4733 pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table), 4734 M_TEMP, M_NOWAIT | M_ZERO); 4735 if (pfrts == NULL) { 4736 error = ENOMEM; 4737 PF_RULES_RUNLOCK(); 4738 break; 4739 } 4740 error = pfr_get_tables(&io->pfrio_table, pfrts, 4741 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); 4742 PF_RULES_RUNLOCK(); 4743 if (error == 0) 4744 error = copyout(pfrts, io->pfrio_buffer, totlen); 4745 free(pfrts, M_TEMP); 4746 break; 4747 } 4748 4749 case DIOCRGETTSTATS: { 4750 struct pfioc_table *io = (struct pfioc_table *)addr; 4751 struct pfr_tstats *pfrtstats; 4752 size_t totlen; 4753 int n; 4754 4755 if (io->pfrio_esize != sizeof(struct pfr_tstats)) { 4756 error = ENODEV; 4757 break; 4758 } 4759 PF_TABLE_STATS_LOCK(); 4760 PF_RULES_RLOCK(); 4761 n = pfr_table_count(&io->pfrio_table, io->pfrio_flags); 4762 if (n < 0) { 4763 PF_RULES_RUNLOCK(); 4764 PF_TABLE_STATS_UNLOCK(); 4765 error = EINVAL; 4766 break; 4767 } 4768 io->pfrio_size = min(io->pfrio_size, n); 4769 4770 totlen = io->pfrio_size * sizeof(struct pfr_tstats); 4771 pfrtstats = mallocarray(io->pfrio_size, 4772 sizeof(struct pfr_tstats), M_TEMP, M_NOWAIT | M_ZERO); 4773 if (pfrtstats == NULL) { 4774 error = ENOMEM; 4775 PF_RULES_RUNLOCK(); 4776 PF_TABLE_STATS_UNLOCK(); 4777 break; 4778 } 4779 error = pfr_get_tstats(&io->pfrio_table, pfrtstats, 4780 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); 4781 PF_RULES_RUNLOCK(); 4782 PF_TABLE_STATS_UNLOCK(); 4783 if (error == 0) 4784 error = copyout(pfrtstats, io->pfrio_buffer, totlen); 4785 free(pfrtstats, M_TEMP); 4786 break; 4787 } 4788 4789 case DIOCRCLRTSTATS: { 4790 struct pfioc_table *io = (struct pfioc_table *)addr; 4791 struct pfr_table *pfrts; 4792 size_t totlen; 4793 4794 if (io->pfrio_esize != sizeof(struct pfr_table)) { 4795 error = ENODEV; 4796 break; 4797 } 4798 4799 if (io->pfrio_size < 0 || io->pfrio_size > pf_ioctl_maxcount || 4800 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_table))) { 4801 /* We used to count tables and use the minimum required 4802 * size, so we didn't fail on overly large requests. 4803 * Keep doing so. */ 4804 io->pfrio_size = pf_ioctl_maxcount; 4805 break; 4806 } 4807 4808 totlen = io->pfrio_size * sizeof(struct pfr_table); 4809 pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table), 4810 M_TEMP, M_WAITOK); 4811 error = copyin(io->pfrio_buffer, pfrts, totlen); 4812 if (error) { 4813 free(pfrts, M_TEMP); 4814 break; 4815 } 4816 4817 PF_TABLE_STATS_LOCK(); 4818 PF_RULES_RLOCK(); 4819 error = pfr_clr_tstats(pfrts, io->pfrio_size, 4820 &io->pfrio_nzero, io->pfrio_flags | PFR_FLAG_USERIOCTL); 4821 PF_RULES_RUNLOCK(); 4822 PF_TABLE_STATS_UNLOCK(); 4823 free(pfrts, M_TEMP); 4824 break; 4825 } 4826 4827 case DIOCRSETTFLAGS: { 4828 struct pfioc_table *io = (struct pfioc_table *)addr; 4829 struct pfr_table *pfrts; 4830 size_t totlen; 4831 int n; 4832 4833 if (io->pfrio_esize != sizeof(struct pfr_table)) { 4834 error = ENODEV; 4835 break; 4836 } 4837 4838 PF_RULES_RLOCK(); 4839 n = pfr_table_count(&io->pfrio_table, io->pfrio_flags); 4840 if (n < 0) { 4841 PF_RULES_RUNLOCK(); 4842 error = EINVAL; 4843 break; 4844 } 4845 4846 io->pfrio_size = min(io->pfrio_size, n); 4847 PF_RULES_RUNLOCK(); 4848 4849 totlen = io->pfrio_size * sizeof(struct pfr_table); 4850 pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table), 4851 M_TEMP, M_WAITOK); 4852 error = copyin(io->pfrio_buffer, pfrts, totlen); 4853 if (error) { 4854 free(pfrts, M_TEMP); 4855 break; 4856 } 4857 PF_RULES_WLOCK(); 4858 error = pfr_set_tflags(pfrts, io->pfrio_size, 4859 io->pfrio_setflag, io->pfrio_clrflag, &io->pfrio_nchange, 4860 &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL); 4861 PF_RULES_WUNLOCK(); 4862 free(pfrts, M_TEMP); 4863 break; 4864 } 4865 4866 case DIOCRCLRADDRS: { 4867 struct pfioc_table *io = (struct pfioc_table *)addr; 4868 4869 if (io->pfrio_esize != 0) { 4870 error = ENODEV; 4871 break; 4872 } 4873 PF_RULES_WLOCK(); 4874 error = pfr_clr_addrs(&io->pfrio_table, &io->pfrio_ndel, 4875 io->pfrio_flags | PFR_FLAG_USERIOCTL); 4876 PF_RULES_WUNLOCK(); 4877 break; 4878 } 4879 4880 case DIOCRADDADDRS: { 4881 struct pfioc_table *io = (struct pfioc_table *)addr; 4882 struct pfr_addr *pfras; 4883 size_t totlen; 4884 4885 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 4886 error = ENODEV; 4887 break; 4888 } 4889 if (io->pfrio_size < 0 || 4890 io->pfrio_size > pf_ioctl_maxcount || 4891 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) { 4892 error = EINVAL; 4893 break; 4894 } 4895 totlen = io->pfrio_size * sizeof(struct pfr_addr); 4896 pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr), 4897 M_TEMP, M_WAITOK); 4898 error = copyin(io->pfrio_buffer, pfras, totlen); 4899 if (error) { 4900 free(pfras, M_TEMP); 4901 break; 4902 } 4903 PF_RULES_WLOCK(); 4904 error = pfr_add_addrs(&io->pfrio_table, pfras, 4905 io->pfrio_size, &io->pfrio_nadd, io->pfrio_flags | 4906 PFR_FLAG_USERIOCTL); 4907 PF_RULES_WUNLOCK(); 4908 if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK) 4909 error = copyout(pfras, io->pfrio_buffer, totlen); 4910 free(pfras, M_TEMP); 4911 break; 4912 } 4913 4914 case DIOCRDELADDRS: { 4915 struct pfioc_table *io = (struct pfioc_table *)addr; 4916 struct pfr_addr *pfras; 4917 size_t totlen; 4918 4919 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 4920 error = ENODEV; 4921 break; 4922 } 4923 if (io->pfrio_size < 0 || 4924 io->pfrio_size > pf_ioctl_maxcount || 4925 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) { 4926 error = EINVAL; 4927 break; 4928 } 4929 totlen = io->pfrio_size * sizeof(struct pfr_addr); 4930 pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr), 4931 M_TEMP, M_WAITOK); 4932 error = copyin(io->pfrio_buffer, pfras, totlen); 4933 if (error) { 4934 free(pfras, M_TEMP); 4935 break; 4936 } 4937 PF_RULES_WLOCK(); 4938 error = pfr_del_addrs(&io->pfrio_table, pfras, 4939 io->pfrio_size, &io->pfrio_ndel, io->pfrio_flags | 4940 PFR_FLAG_USERIOCTL); 4941 PF_RULES_WUNLOCK(); 4942 if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK) 4943 error = copyout(pfras, io->pfrio_buffer, totlen); 4944 free(pfras, M_TEMP); 4945 break; 4946 } 4947 4948 case DIOCRSETADDRS: { 4949 struct pfioc_table *io = (struct pfioc_table *)addr; 4950 struct pfr_addr *pfras; 4951 size_t totlen, count; 4952 4953 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 4954 error = ENODEV; 4955 break; 4956 } 4957 if (io->pfrio_size < 0 || io->pfrio_size2 < 0) { 4958 error = EINVAL; 4959 break; 4960 } 4961 count = max(io->pfrio_size, io->pfrio_size2); 4962 if (count > pf_ioctl_maxcount || 4963 WOULD_OVERFLOW(count, sizeof(struct pfr_addr))) { 4964 error = EINVAL; 4965 break; 4966 } 4967 totlen = count * sizeof(struct pfr_addr); 4968 pfras = mallocarray(count, sizeof(struct pfr_addr), M_TEMP, 4969 M_WAITOK); 4970 error = copyin(io->pfrio_buffer, pfras, totlen); 4971 if (error) { 4972 free(pfras, M_TEMP); 4973 break; 4974 } 4975 PF_RULES_WLOCK(); 4976 error = pfr_set_addrs(&io->pfrio_table, pfras, 4977 io->pfrio_size, &io->pfrio_size2, &io->pfrio_nadd, 4978 &io->pfrio_ndel, &io->pfrio_nchange, io->pfrio_flags | 4979 PFR_FLAG_USERIOCTL, 0); 4980 PF_RULES_WUNLOCK(); 4981 if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK) 4982 error = copyout(pfras, io->pfrio_buffer, totlen); 4983 free(pfras, M_TEMP); 4984 break; 4985 } 4986 4987 case DIOCRGETADDRS: { 4988 struct pfioc_table *io = (struct pfioc_table *)addr; 4989 struct pfr_addr *pfras; 4990 size_t totlen; 4991 4992 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 4993 error = ENODEV; 4994 break; 4995 } 4996 if (io->pfrio_size < 0 || 4997 io->pfrio_size > pf_ioctl_maxcount || 4998 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) { 4999 error = EINVAL; 5000 break; 5001 } 5002 totlen = io->pfrio_size * sizeof(struct pfr_addr); 5003 pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr), 5004 M_TEMP, M_WAITOK | M_ZERO); 5005 PF_RULES_RLOCK(); 5006 error = pfr_get_addrs(&io->pfrio_table, pfras, 5007 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); 5008 PF_RULES_RUNLOCK(); 5009 if (error == 0) 5010 error = copyout(pfras, io->pfrio_buffer, totlen); 5011 free(pfras, M_TEMP); 5012 break; 5013 } 5014 5015 case DIOCRGETASTATS: { 5016 struct pfioc_table *io = (struct pfioc_table *)addr; 5017 struct pfr_astats *pfrastats; 5018 size_t totlen; 5019 5020 if (io->pfrio_esize != sizeof(struct pfr_astats)) { 5021 error = ENODEV; 5022 break; 5023 } 5024 if (io->pfrio_size < 0 || 5025 io->pfrio_size > pf_ioctl_maxcount || 5026 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_astats))) { 5027 error = EINVAL; 5028 break; 5029 } 5030 totlen = io->pfrio_size * sizeof(struct pfr_astats); 5031 pfrastats = mallocarray(io->pfrio_size, 5032 sizeof(struct pfr_astats), M_TEMP, M_WAITOK | M_ZERO); 5033 PF_RULES_RLOCK(); 5034 error = pfr_get_astats(&io->pfrio_table, pfrastats, 5035 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); 5036 PF_RULES_RUNLOCK(); 5037 if (error == 0) 5038 error = copyout(pfrastats, io->pfrio_buffer, totlen); 5039 free(pfrastats, M_TEMP); 5040 break; 5041 } 5042 5043 case DIOCRCLRASTATS: { 5044 struct pfioc_table *io = (struct pfioc_table *)addr; 5045 struct pfr_addr *pfras; 5046 size_t totlen; 5047 5048 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 5049 error = ENODEV; 5050 break; 5051 } 5052 if (io->pfrio_size < 0 || 5053 io->pfrio_size > pf_ioctl_maxcount || 5054 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) { 5055 error = EINVAL; 5056 break; 5057 } 5058 totlen = io->pfrio_size * sizeof(struct pfr_addr); 5059 pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr), 5060 M_TEMP, M_WAITOK); 5061 error = copyin(io->pfrio_buffer, pfras, totlen); 5062 if (error) { 5063 free(pfras, M_TEMP); 5064 break; 5065 } 5066 PF_RULES_WLOCK(); 5067 error = pfr_clr_astats(&io->pfrio_table, pfras, 5068 io->pfrio_size, &io->pfrio_nzero, io->pfrio_flags | 5069 PFR_FLAG_USERIOCTL); 5070 PF_RULES_WUNLOCK(); 5071 if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK) 5072 error = copyout(pfras, io->pfrio_buffer, totlen); 5073 free(pfras, M_TEMP); 5074 break; 5075 } 5076 5077 case DIOCRTSTADDRS: { 5078 struct pfioc_table *io = (struct pfioc_table *)addr; 5079 struct pfr_addr *pfras; 5080 size_t totlen; 5081 5082 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 5083 error = ENODEV; 5084 break; 5085 } 5086 if (io->pfrio_size < 0 || 5087 io->pfrio_size > pf_ioctl_maxcount || 5088 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) { 5089 error = EINVAL; 5090 break; 5091 } 5092 totlen = io->pfrio_size * sizeof(struct pfr_addr); 5093 pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr), 5094 M_TEMP, M_WAITOK); 5095 error = copyin(io->pfrio_buffer, pfras, totlen); 5096 if (error) { 5097 free(pfras, M_TEMP); 5098 break; 5099 } 5100 PF_RULES_RLOCK(); 5101 error = pfr_tst_addrs(&io->pfrio_table, pfras, 5102 io->pfrio_size, &io->pfrio_nmatch, io->pfrio_flags | 5103 PFR_FLAG_USERIOCTL); 5104 PF_RULES_RUNLOCK(); 5105 if (error == 0) 5106 error = copyout(pfras, io->pfrio_buffer, totlen); 5107 free(pfras, M_TEMP); 5108 break; 5109 } 5110 5111 case DIOCRINADEFINE: { 5112 struct pfioc_table *io = (struct pfioc_table *)addr; 5113 struct pfr_addr *pfras; 5114 size_t totlen; 5115 5116 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 5117 error = ENODEV; 5118 break; 5119 } 5120 if (io->pfrio_size < 0 || 5121 io->pfrio_size > pf_ioctl_maxcount || 5122 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) { 5123 error = EINVAL; 5124 break; 5125 } 5126 totlen = io->pfrio_size * sizeof(struct pfr_addr); 5127 pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr), 5128 M_TEMP, M_WAITOK); 5129 error = copyin(io->pfrio_buffer, pfras, totlen); 5130 if (error) { 5131 free(pfras, M_TEMP); 5132 break; 5133 } 5134 PF_RULES_WLOCK(); 5135 error = pfr_ina_define(&io->pfrio_table, pfras, 5136 io->pfrio_size, &io->pfrio_nadd, &io->pfrio_naddr, 5137 io->pfrio_ticket, io->pfrio_flags | PFR_FLAG_USERIOCTL); 5138 PF_RULES_WUNLOCK(); 5139 free(pfras, M_TEMP); 5140 break; 5141 } 5142 5143 case DIOCOSFPADD: { 5144 struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr; 5145 PF_RULES_WLOCK(); 5146 error = pf_osfp_add(io); 5147 PF_RULES_WUNLOCK(); 5148 break; 5149 } 5150 5151 case DIOCOSFPGET: { 5152 struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr; 5153 PF_RULES_RLOCK(); 5154 error = pf_osfp_get(io); 5155 PF_RULES_RUNLOCK(); 5156 break; 5157 } 5158 5159 case DIOCXBEGIN: { 5160 struct pfioc_trans *io = (struct pfioc_trans *)addr; 5161 struct pfioc_trans_e *ioes, *ioe; 5162 size_t totlen; 5163 int i; 5164 5165 if (io->esize != sizeof(*ioe)) { 5166 error = ENODEV; 5167 break; 5168 } 5169 if (io->size < 0 || 5170 io->size > pf_ioctl_maxcount || 5171 WOULD_OVERFLOW(io->size, sizeof(struct pfioc_trans_e))) { 5172 error = EINVAL; 5173 break; 5174 } 5175 totlen = sizeof(struct pfioc_trans_e) * io->size; 5176 ioes = mallocarray(io->size, sizeof(struct pfioc_trans_e), 5177 M_TEMP, M_WAITOK); 5178 error = copyin(io->array, ioes, totlen); 5179 if (error) { 5180 free(ioes, M_TEMP); 5181 break; 5182 } 5183 /* Ensure there's no more ethernet rules to clean up. */ 5184 NET_EPOCH_DRAIN_CALLBACKS(); 5185 PF_RULES_WLOCK(); 5186 for (i = 0, ioe = ioes; i < io->size; i++, ioe++) { 5187 ioe->anchor[sizeof(ioe->anchor) - 1] = '\0'; 5188 switch (ioe->rs_num) { 5189 case PF_RULESET_ETH: 5190 if ((error = pf_begin_eth(&ioe->ticket, ioe->anchor))) { 5191 PF_RULES_WUNLOCK(); 5192 free(ioes, M_TEMP); 5193 goto fail; 5194 } 5195 break; 5196 #ifdef ALTQ 5197 case PF_RULESET_ALTQ: 5198 if (ioe->anchor[0]) { 5199 PF_RULES_WUNLOCK(); 5200 free(ioes, M_TEMP); 5201 error = EINVAL; 5202 goto fail; 5203 } 5204 if ((error = pf_begin_altq(&ioe->ticket))) { 5205 PF_RULES_WUNLOCK(); 5206 free(ioes, M_TEMP); 5207 goto fail; 5208 } 5209 break; 5210 #endif /* ALTQ */ 5211 case PF_RULESET_TABLE: 5212 { 5213 struct pfr_table table; 5214 5215 bzero(&table, sizeof(table)); 5216 strlcpy(table.pfrt_anchor, ioe->anchor, 5217 sizeof(table.pfrt_anchor)); 5218 if ((error = pfr_ina_begin(&table, 5219 &ioe->ticket, NULL, 0))) { 5220 PF_RULES_WUNLOCK(); 5221 free(ioes, M_TEMP); 5222 goto fail; 5223 } 5224 break; 5225 } 5226 default: 5227 if ((error = pf_begin_rules(&ioe->ticket, 5228 ioe->rs_num, ioe->anchor))) { 5229 PF_RULES_WUNLOCK(); 5230 free(ioes, M_TEMP); 5231 goto fail; 5232 } 5233 break; 5234 } 5235 } 5236 PF_RULES_WUNLOCK(); 5237 error = copyout(ioes, io->array, totlen); 5238 free(ioes, M_TEMP); 5239 break; 5240 } 5241 5242 case DIOCXROLLBACK: { 5243 struct pfioc_trans *io = (struct pfioc_trans *)addr; 5244 struct pfioc_trans_e *ioe, *ioes; 5245 size_t totlen; 5246 int i; 5247 5248 if (io->esize != sizeof(*ioe)) { 5249 error = ENODEV; 5250 break; 5251 } 5252 if (io->size < 0 || 5253 io->size > pf_ioctl_maxcount || 5254 WOULD_OVERFLOW(io->size, sizeof(struct pfioc_trans_e))) { 5255 error = EINVAL; 5256 break; 5257 } 5258 totlen = sizeof(struct pfioc_trans_e) * io->size; 5259 ioes = mallocarray(io->size, sizeof(struct pfioc_trans_e), 5260 M_TEMP, M_WAITOK); 5261 error = copyin(io->array, ioes, totlen); 5262 if (error) { 5263 free(ioes, M_TEMP); 5264 break; 5265 } 5266 PF_RULES_WLOCK(); 5267 for (i = 0, ioe = ioes; i < io->size; i++, ioe++) { 5268 ioe->anchor[sizeof(ioe->anchor) - 1] = '\0'; 5269 switch (ioe->rs_num) { 5270 case PF_RULESET_ETH: 5271 if ((error = pf_rollback_eth(ioe->ticket, 5272 ioe->anchor))) { 5273 PF_RULES_WUNLOCK(); 5274 free(ioes, M_TEMP); 5275 goto fail; /* really bad */ 5276 } 5277 break; 5278 #ifdef ALTQ 5279 case PF_RULESET_ALTQ: 5280 if (ioe->anchor[0]) { 5281 PF_RULES_WUNLOCK(); 5282 free(ioes, M_TEMP); 5283 error = EINVAL; 5284 goto fail; 5285 } 5286 if ((error = pf_rollback_altq(ioe->ticket))) { 5287 PF_RULES_WUNLOCK(); 5288 free(ioes, M_TEMP); 5289 goto fail; /* really bad */ 5290 } 5291 break; 5292 #endif /* ALTQ */ 5293 case PF_RULESET_TABLE: 5294 { 5295 struct pfr_table table; 5296 5297 bzero(&table, sizeof(table)); 5298 strlcpy(table.pfrt_anchor, ioe->anchor, 5299 sizeof(table.pfrt_anchor)); 5300 if ((error = pfr_ina_rollback(&table, 5301 ioe->ticket, NULL, 0))) { 5302 PF_RULES_WUNLOCK(); 5303 free(ioes, M_TEMP); 5304 goto fail; /* really bad */ 5305 } 5306 break; 5307 } 5308 default: 5309 if ((error = pf_rollback_rules(ioe->ticket, 5310 ioe->rs_num, ioe->anchor))) { 5311 PF_RULES_WUNLOCK(); 5312 free(ioes, M_TEMP); 5313 goto fail; /* really bad */ 5314 } 5315 break; 5316 } 5317 } 5318 PF_RULES_WUNLOCK(); 5319 free(ioes, M_TEMP); 5320 break; 5321 } 5322 5323 case DIOCXCOMMIT: { 5324 struct pfioc_trans *io = (struct pfioc_trans *)addr; 5325 struct pfioc_trans_e *ioe, *ioes; 5326 struct pf_kruleset *rs; 5327 struct pf_keth_ruleset *ers; 5328 size_t totlen; 5329 int i; 5330 5331 if (io->esize != sizeof(*ioe)) { 5332 error = ENODEV; 5333 break; 5334 } 5335 5336 if (io->size < 0 || 5337 io->size > pf_ioctl_maxcount || 5338 WOULD_OVERFLOW(io->size, sizeof(struct pfioc_trans_e))) { 5339 error = EINVAL; 5340 break; 5341 } 5342 5343 totlen = sizeof(struct pfioc_trans_e) * io->size; 5344 ioes = mallocarray(io->size, sizeof(struct pfioc_trans_e), 5345 M_TEMP, M_WAITOK); 5346 error = copyin(io->array, ioes, totlen); 5347 if (error) { 5348 free(ioes, M_TEMP); 5349 break; 5350 } 5351 PF_RULES_WLOCK(); 5352 /* First makes sure everything will succeed. */ 5353 for (i = 0, ioe = ioes; i < io->size; i++, ioe++) { 5354 ioe->anchor[sizeof(ioe->anchor) - 1] = 0; 5355 switch (ioe->rs_num) { 5356 case PF_RULESET_ETH: 5357 ers = pf_find_keth_ruleset(ioe->anchor); 5358 if (ers == NULL || ioe->ticket == 0 || 5359 ioe->ticket != ers->inactive.ticket) { 5360 PF_RULES_WUNLOCK(); 5361 free(ioes, M_TEMP); 5362 error = EINVAL; 5363 goto fail; 5364 } 5365 break; 5366 #ifdef ALTQ 5367 case PF_RULESET_ALTQ: 5368 if (ioe->anchor[0]) { 5369 PF_RULES_WUNLOCK(); 5370 free(ioes, M_TEMP); 5371 error = EINVAL; 5372 goto fail; 5373 } 5374 if (!V_altqs_inactive_open || ioe->ticket != 5375 V_ticket_altqs_inactive) { 5376 PF_RULES_WUNLOCK(); 5377 free(ioes, M_TEMP); 5378 error = EBUSY; 5379 goto fail; 5380 } 5381 break; 5382 #endif /* ALTQ */ 5383 case PF_RULESET_TABLE: 5384 rs = pf_find_kruleset(ioe->anchor); 5385 if (rs == NULL || !rs->topen || ioe->ticket != 5386 rs->tticket) { 5387 PF_RULES_WUNLOCK(); 5388 free(ioes, M_TEMP); 5389 error = EBUSY; 5390 goto fail; 5391 } 5392 break; 5393 default: 5394 if (ioe->rs_num < 0 || ioe->rs_num >= 5395 PF_RULESET_MAX) { 5396 PF_RULES_WUNLOCK(); 5397 free(ioes, M_TEMP); 5398 error = EINVAL; 5399 goto fail; 5400 } 5401 rs = pf_find_kruleset(ioe->anchor); 5402 if (rs == NULL || 5403 !rs->rules[ioe->rs_num].inactive.open || 5404 rs->rules[ioe->rs_num].inactive.ticket != 5405 ioe->ticket) { 5406 PF_RULES_WUNLOCK(); 5407 free(ioes, M_TEMP); 5408 error = EBUSY; 5409 goto fail; 5410 } 5411 break; 5412 } 5413 } 5414 /* Now do the commit - no errors should happen here. */ 5415 for (i = 0, ioe = ioes; i < io->size; i++, ioe++) { 5416 switch (ioe->rs_num) { 5417 case PF_RULESET_ETH: 5418 if ((error = pf_commit_eth(ioe->ticket, ioe->anchor))) { 5419 PF_RULES_WUNLOCK(); 5420 free(ioes, M_TEMP); 5421 goto fail; /* really bad */ 5422 } 5423 break; 5424 #ifdef ALTQ 5425 case PF_RULESET_ALTQ: 5426 if ((error = pf_commit_altq(ioe->ticket))) { 5427 PF_RULES_WUNLOCK(); 5428 free(ioes, M_TEMP); 5429 goto fail; /* really bad */ 5430 } 5431 break; 5432 #endif /* ALTQ */ 5433 case PF_RULESET_TABLE: 5434 { 5435 struct pfr_table table; 5436 5437 bzero(&table, sizeof(table)); 5438 (void)strlcpy(table.pfrt_anchor, ioe->anchor, 5439 sizeof(table.pfrt_anchor)); 5440 if ((error = pfr_ina_commit(&table, 5441 ioe->ticket, NULL, NULL, 0))) { 5442 PF_RULES_WUNLOCK(); 5443 free(ioes, M_TEMP); 5444 goto fail; /* really bad */ 5445 } 5446 break; 5447 } 5448 default: 5449 if ((error = pf_commit_rules(ioe->ticket, 5450 ioe->rs_num, ioe->anchor))) { 5451 PF_RULES_WUNLOCK(); 5452 free(ioes, M_TEMP); 5453 goto fail; /* really bad */ 5454 } 5455 break; 5456 } 5457 } 5458 PF_RULES_WUNLOCK(); 5459 5460 /* Only hook into EtherNet taffic if we've got rules for it. */ 5461 if (! TAILQ_EMPTY(V_pf_keth->active.rules)) 5462 hook_pf_eth(); 5463 else 5464 dehook_pf_eth(); 5465 5466 free(ioes, M_TEMP); 5467 break; 5468 } 5469 5470 case DIOCGETSRCNODES: { 5471 struct pfioc_src_nodes *psn = (struct pfioc_src_nodes *)addr; 5472 struct pf_srchash *sh; 5473 struct pf_ksrc_node *n; 5474 struct pf_src_node *p, *pstore; 5475 uint32_t i, nr = 0; 5476 5477 for (i = 0, sh = V_pf_srchash; i <= pf_srchashmask; 5478 i++, sh++) { 5479 PF_HASHROW_LOCK(sh); 5480 LIST_FOREACH(n, &sh->nodes, entry) 5481 nr++; 5482 PF_HASHROW_UNLOCK(sh); 5483 } 5484 5485 psn->psn_len = min(psn->psn_len, 5486 sizeof(struct pf_src_node) * nr); 5487 5488 if (psn->psn_len == 0) { 5489 psn->psn_len = sizeof(struct pf_src_node) * nr; 5490 break; 5491 } 5492 5493 nr = 0; 5494 5495 p = pstore = malloc(psn->psn_len, M_TEMP, M_WAITOK | M_ZERO); 5496 for (i = 0, sh = V_pf_srchash; i <= pf_srchashmask; 5497 i++, sh++) { 5498 PF_HASHROW_LOCK(sh); 5499 LIST_FOREACH(n, &sh->nodes, entry) { 5500 5501 if ((nr + 1) * sizeof(*p) > (unsigned)psn->psn_len) 5502 break; 5503 5504 pf_src_node_copy(n, p); 5505 5506 p++; 5507 nr++; 5508 } 5509 PF_HASHROW_UNLOCK(sh); 5510 } 5511 error = copyout(pstore, psn->psn_src_nodes, 5512 sizeof(struct pf_src_node) * nr); 5513 if (error) { 5514 free(pstore, M_TEMP); 5515 break; 5516 } 5517 psn->psn_len = sizeof(struct pf_src_node) * nr; 5518 free(pstore, M_TEMP); 5519 break; 5520 } 5521 5522 case DIOCCLRSRCNODES: { 5523 pf_clear_srcnodes(NULL); 5524 pf_purge_expired_src_nodes(); 5525 break; 5526 } 5527 5528 case DIOCKILLSRCNODES: 5529 pf_kill_srcnodes((struct pfioc_src_node_kill *)addr); 5530 break; 5531 5532 #ifdef COMPAT_FREEBSD13 5533 case DIOCKEEPCOUNTERS_FREEBSD13: 5534 #endif 5535 case DIOCKEEPCOUNTERS: 5536 error = pf_keepcounters((struct pfioc_nv *)addr); 5537 break; 5538 5539 case DIOCGETSYNCOOKIES: 5540 error = pf_get_syncookies((struct pfioc_nv *)addr); 5541 break; 5542 5543 case DIOCSETSYNCOOKIES: 5544 error = pf_set_syncookies((struct pfioc_nv *)addr); 5545 break; 5546 5547 case DIOCSETHOSTID: { 5548 u_int32_t *hostid = (u_int32_t *)addr; 5549 5550 PF_RULES_WLOCK(); 5551 if (*hostid == 0) 5552 V_pf_status.hostid = arc4random(); 5553 else 5554 V_pf_status.hostid = *hostid; 5555 PF_RULES_WUNLOCK(); 5556 break; 5557 } 5558 5559 case DIOCOSFPFLUSH: 5560 PF_RULES_WLOCK(); 5561 pf_osfp_flush(); 5562 PF_RULES_WUNLOCK(); 5563 break; 5564 5565 case DIOCIGETIFACES: { 5566 struct pfioc_iface *io = (struct pfioc_iface *)addr; 5567 struct pfi_kif *ifstore; 5568 size_t bufsiz; 5569 5570 if (io->pfiio_esize != sizeof(struct pfi_kif)) { 5571 error = ENODEV; 5572 break; 5573 } 5574 5575 if (io->pfiio_size < 0 || 5576 io->pfiio_size > pf_ioctl_maxcount || 5577 WOULD_OVERFLOW(io->pfiio_size, sizeof(struct pfi_kif))) { 5578 error = EINVAL; 5579 break; 5580 } 5581 5582 io->pfiio_name[sizeof(io->pfiio_name) - 1] = '\0'; 5583 5584 bufsiz = io->pfiio_size * sizeof(struct pfi_kif); 5585 ifstore = mallocarray(io->pfiio_size, sizeof(struct pfi_kif), 5586 M_TEMP, M_WAITOK | M_ZERO); 5587 5588 PF_RULES_RLOCK(); 5589 pfi_get_ifaces(io->pfiio_name, ifstore, &io->pfiio_size); 5590 PF_RULES_RUNLOCK(); 5591 error = copyout(ifstore, io->pfiio_buffer, bufsiz); 5592 free(ifstore, M_TEMP); 5593 break; 5594 } 5595 5596 case DIOCSETIFFLAG: { 5597 struct pfioc_iface *io = (struct pfioc_iface *)addr; 5598 5599 io->pfiio_name[sizeof(io->pfiio_name) - 1] = '\0'; 5600 5601 PF_RULES_WLOCK(); 5602 error = pfi_set_flags(io->pfiio_name, io->pfiio_flags); 5603 PF_RULES_WUNLOCK(); 5604 break; 5605 } 5606 5607 case DIOCCLRIFFLAG: { 5608 struct pfioc_iface *io = (struct pfioc_iface *)addr; 5609 5610 io->pfiio_name[sizeof(io->pfiio_name) - 1] = '\0'; 5611 5612 PF_RULES_WLOCK(); 5613 error = pfi_clear_flags(io->pfiio_name, io->pfiio_flags); 5614 PF_RULES_WUNLOCK(); 5615 break; 5616 } 5617 5618 default: 5619 error = ENODEV; 5620 break; 5621 } 5622 fail: 5623 if (sx_xlocked(&pf_ioctl_lock)) 5624 sx_xunlock(&pf_ioctl_lock); 5625 CURVNET_RESTORE(); 5626 5627 #undef ERROUT_IOCTL 5628 5629 return (error); 5630 } 5631 5632 void 5633 pfsync_state_export(struct pfsync_state *sp, struct pf_kstate *st) 5634 { 5635 bzero(sp, sizeof(struct pfsync_state)); 5636 5637 /* copy from state key */ 5638 sp->key[PF_SK_WIRE].addr[0] = st->key[PF_SK_WIRE]->addr[0]; 5639 sp->key[PF_SK_WIRE].addr[1] = st->key[PF_SK_WIRE]->addr[1]; 5640 sp->key[PF_SK_WIRE].port[0] = st->key[PF_SK_WIRE]->port[0]; 5641 sp->key[PF_SK_WIRE].port[1] = st->key[PF_SK_WIRE]->port[1]; 5642 sp->key[PF_SK_STACK].addr[0] = st->key[PF_SK_STACK]->addr[0]; 5643 sp->key[PF_SK_STACK].addr[1] = st->key[PF_SK_STACK]->addr[1]; 5644 sp->key[PF_SK_STACK].port[0] = st->key[PF_SK_STACK]->port[0]; 5645 sp->key[PF_SK_STACK].port[1] = st->key[PF_SK_STACK]->port[1]; 5646 sp->proto = st->key[PF_SK_WIRE]->proto; 5647 sp->af = st->key[PF_SK_WIRE]->af; 5648 5649 /* copy from state */ 5650 strlcpy(sp->ifname, st->kif->pfik_name, sizeof(sp->ifname)); 5651 bcopy(&st->rt_addr, &sp->rt_addr, sizeof(sp->rt_addr)); 5652 sp->creation = htonl(time_uptime - st->creation); 5653 sp->expire = pf_state_expires(st); 5654 if (sp->expire <= time_uptime) 5655 sp->expire = htonl(0); 5656 else 5657 sp->expire = htonl(sp->expire - time_uptime); 5658 5659 sp->direction = st->direction; 5660 sp->log = st->log; 5661 sp->timeout = st->timeout; 5662 sp->state_flags = st->state_flags; 5663 if (st->src_node) 5664 sp->sync_flags |= PFSYNC_FLAG_SRCNODE; 5665 if (st->nat_src_node) 5666 sp->sync_flags |= PFSYNC_FLAG_NATSRCNODE; 5667 5668 sp->id = st->id; 5669 sp->creatorid = st->creatorid; 5670 pf_state_peer_hton(&st->src, &sp->src); 5671 pf_state_peer_hton(&st->dst, &sp->dst); 5672 5673 if (st->rule.ptr == NULL) 5674 sp->rule = htonl(-1); 5675 else 5676 sp->rule = htonl(st->rule.ptr->nr); 5677 if (st->anchor.ptr == NULL) 5678 sp->anchor = htonl(-1); 5679 else 5680 sp->anchor = htonl(st->anchor.ptr->nr); 5681 if (st->nat_rule.ptr == NULL) 5682 sp->nat_rule = htonl(-1); 5683 else 5684 sp->nat_rule = htonl(st->nat_rule.ptr->nr); 5685 5686 pf_state_counter_hton(st->packets[0], sp->packets[0]); 5687 pf_state_counter_hton(st->packets[1], sp->packets[1]); 5688 pf_state_counter_hton(st->bytes[0], sp->bytes[0]); 5689 pf_state_counter_hton(st->bytes[1], sp->bytes[1]); 5690 } 5691 5692 void 5693 pf_state_export(struct pf_state_export *sp, struct pf_kstate *st) 5694 { 5695 bzero(sp, sizeof(*sp)); 5696 5697 sp->version = PF_STATE_VERSION; 5698 5699 /* copy from state key */ 5700 sp->key[PF_SK_WIRE].addr[0] = st->key[PF_SK_WIRE]->addr[0]; 5701 sp->key[PF_SK_WIRE].addr[1] = st->key[PF_SK_WIRE]->addr[1]; 5702 sp->key[PF_SK_WIRE].port[0] = st->key[PF_SK_WIRE]->port[0]; 5703 sp->key[PF_SK_WIRE].port[1] = st->key[PF_SK_WIRE]->port[1]; 5704 sp->key[PF_SK_STACK].addr[0] = st->key[PF_SK_STACK]->addr[0]; 5705 sp->key[PF_SK_STACK].addr[1] = st->key[PF_SK_STACK]->addr[1]; 5706 sp->key[PF_SK_STACK].port[0] = st->key[PF_SK_STACK]->port[0]; 5707 sp->key[PF_SK_STACK].port[1] = st->key[PF_SK_STACK]->port[1]; 5708 sp->proto = st->key[PF_SK_WIRE]->proto; 5709 sp->af = st->key[PF_SK_WIRE]->af; 5710 5711 /* copy from state */ 5712 strlcpy(sp->ifname, st->kif->pfik_name, sizeof(sp->ifname)); 5713 strlcpy(sp->orig_ifname, st->orig_kif->pfik_name, 5714 sizeof(sp->orig_ifname)); 5715 bcopy(&st->rt_addr, &sp->rt_addr, sizeof(sp->rt_addr)); 5716 sp->creation = htonl(time_uptime - st->creation); 5717 sp->expire = pf_state_expires(st); 5718 if (sp->expire <= time_uptime) 5719 sp->expire = htonl(0); 5720 else 5721 sp->expire = htonl(sp->expire - time_uptime); 5722 5723 sp->direction = st->direction; 5724 sp->log = st->log; 5725 sp->timeout = st->timeout; 5726 sp->state_flags = st->state_flags; 5727 if (st->src_node) 5728 sp->sync_flags |= PFSYNC_FLAG_SRCNODE; 5729 if (st->nat_src_node) 5730 sp->sync_flags |= PFSYNC_FLAG_NATSRCNODE; 5731 5732 sp->id = st->id; 5733 sp->creatorid = st->creatorid; 5734 pf_state_peer_hton(&st->src, &sp->src); 5735 pf_state_peer_hton(&st->dst, &sp->dst); 5736 5737 if (st->rule.ptr == NULL) 5738 sp->rule = htonl(-1); 5739 else 5740 sp->rule = htonl(st->rule.ptr->nr); 5741 if (st->anchor.ptr == NULL) 5742 sp->anchor = htonl(-1); 5743 else 5744 sp->anchor = htonl(st->anchor.ptr->nr); 5745 if (st->nat_rule.ptr == NULL) 5746 sp->nat_rule = htonl(-1); 5747 else 5748 sp->nat_rule = htonl(st->nat_rule.ptr->nr); 5749 5750 sp->packets[0] = st->packets[0]; 5751 sp->packets[1] = st->packets[1]; 5752 sp->bytes[0] = st->bytes[0]; 5753 sp->bytes[1] = st->bytes[1]; 5754 } 5755 5756 static void 5757 pf_tbladdr_copyout(struct pf_addr_wrap *aw) 5758 { 5759 struct pfr_ktable *kt; 5760 5761 KASSERT(aw->type == PF_ADDR_TABLE, ("%s: type %u", __func__, aw->type)); 5762 5763 kt = aw->p.tbl; 5764 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL) 5765 kt = kt->pfrkt_root; 5766 aw->p.tbl = NULL; 5767 aw->p.tblcnt = (kt->pfrkt_flags & PFR_TFLAG_ACTIVE) ? 5768 kt->pfrkt_cnt : -1; 5769 } 5770 5771 static int 5772 pf_add_status_counters(nvlist_t *nvl, const char *name, counter_u64_t *counters, 5773 size_t number, char **names) 5774 { 5775 nvlist_t *nvc; 5776 5777 nvc = nvlist_create(0); 5778 if (nvc == NULL) 5779 return (ENOMEM); 5780 5781 for (int i = 0; i < number; i++) { 5782 nvlist_append_number_array(nvc, "counters", 5783 counter_u64_fetch(counters[i])); 5784 nvlist_append_string_array(nvc, "names", 5785 names[i]); 5786 nvlist_append_number_array(nvc, "ids", 5787 i); 5788 } 5789 nvlist_add_nvlist(nvl, name, nvc); 5790 nvlist_destroy(nvc); 5791 5792 return (0); 5793 } 5794 5795 static int 5796 pf_getstatus(struct pfioc_nv *nv) 5797 { 5798 nvlist_t *nvl = NULL, *nvc = NULL; 5799 void *nvlpacked = NULL; 5800 int error; 5801 struct pf_status s; 5802 char *pf_reasons[PFRES_MAX+1] = PFRES_NAMES; 5803 char *pf_lcounter[KLCNT_MAX+1] = KLCNT_NAMES; 5804 char *pf_fcounter[FCNT_MAX+1] = FCNT_NAMES; 5805 PF_RULES_RLOCK_TRACKER; 5806 5807 #define ERROUT(x) ERROUT_FUNCTION(errout, x) 5808 5809 PF_RULES_RLOCK(); 5810 5811 nvl = nvlist_create(0); 5812 if (nvl == NULL) 5813 ERROUT(ENOMEM); 5814 5815 nvlist_add_bool(nvl, "running", V_pf_status.running); 5816 nvlist_add_number(nvl, "since", V_pf_status.since); 5817 nvlist_add_number(nvl, "debug", V_pf_status.debug); 5818 nvlist_add_number(nvl, "hostid", V_pf_status.hostid); 5819 nvlist_add_number(nvl, "states", V_pf_status.states); 5820 nvlist_add_number(nvl, "src_nodes", V_pf_status.src_nodes); 5821 5822 /* counters */ 5823 error = pf_add_status_counters(nvl, "counters", V_pf_status.counters, 5824 PFRES_MAX, pf_reasons); 5825 if (error != 0) 5826 ERROUT(error); 5827 5828 /* lcounters */ 5829 error = pf_add_status_counters(nvl, "lcounters", V_pf_status.lcounters, 5830 KLCNT_MAX, pf_lcounter); 5831 if (error != 0) 5832 ERROUT(error); 5833 5834 /* fcounters */ 5835 nvc = nvlist_create(0); 5836 if (nvc == NULL) 5837 ERROUT(ENOMEM); 5838 5839 for (int i = 0; i < FCNT_MAX; i++) { 5840 nvlist_append_number_array(nvc, "counters", 5841 pf_counter_u64_fetch(&V_pf_status.fcounters[i])); 5842 nvlist_append_string_array(nvc, "names", 5843 pf_fcounter[i]); 5844 nvlist_append_number_array(nvc, "ids", 5845 i); 5846 } 5847 nvlist_add_nvlist(nvl, "fcounters", nvc); 5848 nvlist_destroy(nvc); 5849 nvc = NULL; 5850 5851 /* scounters */ 5852 error = pf_add_status_counters(nvl, "scounters", V_pf_status.scounters, 5853 SCNT_MAX, pf_fcounter); 5854 if (error != 0) 5855 ERROUT(error); 5856 5857 nvlist_add_string(nvl, "ifname", V_pf_status.ifname); 5858 nvlist_add_binary(nvl, "chksum", V_pf_status.pf_chksum, 5859 PF_MD5_DIGEST_LENGTH); 5860 5861 pfi_update_status(V_pf_status.ifname, &s); 5862 5863 /* pcounters / bcounters */ 5864 for (int i = 0; i < 2; i++) { 5865 for (int j = 0; j < 2; j++) { 5866 for (int k = 0; k < 2; k++) { 5867 nvlist_append_number_array(nvl, "pcounters", 5868 s.pcounters[i][j][k]); 5869 } 5870 nvlist_append_number_array(nvl, "bcounters", 5871 s.bcounters[i][j]); 5872 } 5873 } 5874 5875 nvlpacked = nvlist_pack(nvl, &nv->len); 5876 if (nvlpacked == NULL) 5877 ERROUT(ENOMEM); 5878 5879 if (nv->size == 0) 5880 ERROUT(0); 5881 else if (nv->size < nv->len) 5882 ERROUT(ENOSPC); 5883 5884 PF_RULES_RUNLOCK(); 5885 error = copyout(nvlpacked, nv->data, nv->len); 5886 goto done; 5887 5888 #undef ERROUT 5889 errout: 5890 PF_RULES_RUNLOCK(); 5891 done: 5892 free(nvlpacked, M_NVLIST); 5893 nvlist_destroy(nvc); 5894 nvlist_destroy(nvl); 5895 5896 return (error); 5897 } 5898 5899 /* 5900 * XXX - Check for version missmatch!!! 5901 */ 5902 static void 5903 pf_clear_all_states(void) 5904 { 5905 struct pf_kstate *s; 5906 u_int i; 5907 5908 for (i = 0; i <= pf_hashmask; i++) { 5909 struct pf_idhash *ih = &V_pf_idhash[i]; 5910 relock: 5911 PF_HASHROW_LOCK(ih); 5912 LIST_FOREACH(s, &ih->states, entry) { 5913 s->timeout = PFTM_PURGE; 5914 /* Don't send out individual delete messages. */ 5915 s->state_flags |= PFSTATE_NOSYNC; 5916 pf_unlink_state(s); 5917 goto relock; 5918 } 5919 PF_HASHROW_UNLOCK(ih); 5920 } 5921 } 5922 5923 static int 5924 pf_clear_tables(void) 5925 { 5926 struct pfioc_table io; 5927 int error; 5928 5929 bzero(&io, sizeof(io)); 5930 5931 error = pfr_clr_tables(&io.pfrio_table, &io.pfrio_ndel, 5932 io.pfrio_flags); 5933 5934 return (error); 5935 } 5936 5937 static void 5938 pf_clear_srcnodes(struct pf_ksrc_node *n) 5939 { 5940 struct pf_kstate *s; 5941 int i; 5942 5943 for (i = 0; i <= pf_hashmask; i++) { 5944 struct pf_idhash *ih = &V_pf_idhash[i]; 5945 5946 PF_HASHROW_LOCK(ih); 5947 LIST_FOREACH(s, &ih->states, entry) { 5948 if (n == NULL || n == s->src_node) 5949 s->src_node = NULL; 5950 if (n == NULL || n == s->nat_src_node) 5951 s->nat_src_node = NULL; 5952 } 5953 PF_HASHROW_UNLOCK(ih); 5954 } 5955 5956 if (n == NULL) { 5957 struct pf_srchash *sh; 5958 5959 for (i = 0, sh = V_pf_srchash; i <= pf_srchashmask; 5960 i++, sh++) { 5961 PF_HASHROW_LOCK(sh); 5962 LIST_FOREACH(n, &sh->nodes, entry) { 5963 n->expire = 1; 5964 n->states = 0; 5965 } 5966 PF_HASHROW_UNLOCK(sh); 5967 } 5968 } else { 5969 /* XXX: hash slot should already be locked here. */ 5970 n->expire = 1; 5971 n->states = 0; 5972 } 5973 } 5974 5975 static void 5976 pf_kill_srcnodes(struct pfioc_src_node_kill *psnk) 5977 { 5978 struct pf_ksrc_node_list kill; 5979 5980 LIST_INIT(&kill); 5981 for (int i = 0; i <= pf_srchashmask; i++) { 5982 struct pf_srchash *sh = &V_pf_srchash[i]; 5983 struct pf_ksrc_node *sn, *tmp; 5984 5985 PF_HASHROW_LOCK(sh); 5986 LIST_FOREACH_SAFE(sn, &sh->nodes, entry, tmp) 5987 if (PF_MATCHA(psnk->psnk_src.neg, 5988 &psnk->psnk_src.addr.v.a.addr, 5989 &psnk->psnk_src.addr.v.a.mask, 5990 &sn->addr, sn->af) && 5991 PF_MATCHA(psnk->psnk_dst.neg, 5992 &psnk->psnk_dst.addr.v.a.addr, 5993 &psnk->psnk_dst.addr.v.a.mask, 5994 &sn->raddr, sn->af)) { 5995 pf_unlink_src_node(sn); 5996 LIST_INSERT_HEAD(&kill, sn, entry); 5997 sn->expire = 1; 5998 } 5999 PF_HASHROW_UNLOCK(sh); 6000 } 6001 6002 for (int i = 0; i <= pf_hashmask; i++) { 6003 struct pf_idhash *ih = &V_pf_idhash[i]; 6004 struct pf_kstate *s; 6005 6006 PF_HASHROW_LOCK(ih); 6007 LIST_FOREACH(s, &ih->states, entry) { 6008 if (s->src_node && s->src_node->expire == 1) 6009 s->src_node = NULL; 6010 if (s->nat_src_node && s->nat_src_node->expire == 1) 6011 s->nat_src_node = NULL; 6012 } 6013 PF_HASHROW_UNLOCK(ih); 6014 } 6015 6016 psnk->psnk_killed = pf_free_src_nodes(&kill); 6017 } 6018 6019 static int 6020 pf_keepcounters(struct pfioc_nv *nv) 6021 { 6022 nvlist_t *nvl = NULL; 6023 void *nvlpacked = NULL; 6024 int error = 0; 6025 6026 #define ERROUT(x) ERROUT_FUNCTION(on_error, x) 6027 6028 if (nv->len > pf_ioctl_maxcount) 6029 ERROUT(ENOMEM); 6030 6031 nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK); 6032 if (nvlpacked == NULL) 6033 ERROUT(ENOMEM); 6034 6035 error = copyin(nv->data, nvlpacked, nv->len); 6036 if (error) 6037 ERROUT(error); 6038 6039 nvl = nvlist_unpack(nvlpacked, nv->len, 0); 6040 if (nvl == NULL) 6041 ERROUT(EBADMSG); 6042 6043 if (! nvlist_exists_bool(nvl, "keep_counters")) 6044 ERROUT(EBADMSG); 6045 6046 V_pf_status.keep_counters = nvlist_get_bool(nvl, "keep_counters"); 6047 6048 on_error: 6049 nvlist_destroy(nvl); 6050 free(nvlpacked, M_NVLIST); 6051 return (error); 6052 } 6053 6054 static unsigned int 6055 pf_clear_states(const struct pf_kstate_kill *kill) 6056 { 6057 struct pf_state_key_cmp match_key; 6058 struct pf_kstate *s; 6059 struct pfi_kkif *kif; 6060 int idx; 6061 unsigned int killed = 0, dir; 6062 6063 for (unsigned int i = 0; i <= pf_hashmask; i++) { 6064 struct pf_idhash *ih = &V_pf_idhash[i]; 6065 6066 relock_DIOCCLRSTATES: 6067 PF_HASHROW_LOCK(ih); 6068 LIST_FOREACH(s, &ih->states, entry) { 6069 /* For floating states look at the original kif. */ 6070 kif = s->kif == V_pfi_all ? s->orig_kif : s->kif; 6071 6072 if (kill->psk_ifname[0] && 6073 strcmp(kill->psk_ifname, 6074 kif->pfik_name)) 6075 continue; 6076 6077 if (kill->psk_kill_match) { 6078 bzero(&match_key, sizeof(match_key)); 6079 6080 if (s->direction == PF_OUT) { 6081 dir = PF_IN; 6082 idx = PF_SK_STACK; 6083 } else { 6084 dir = PF_OUT; 6085 idx = PF_SK_WIRE; 6086 } 6087 6088 match_key.af = s->key[idx]->af; 6089 match_key.proto = s->key[idx]->proto; 6090 PF_ACPY(&match_key.addr[0], 6091 &s->key[idx]->addr[1], match_key.af); 6092 match_key.port[0] = s->key[idx]->port[1]; 6093 PF_ACPY(&match_key.addr[1], 6094 &s->key[idx]->addr[0], match_key.af); 6095 match_key.port[1] = s->key[idx]->port[0]; 6096 } 6097 6098 /* 6099 * Don't send out individual 6100 * delete messages. 6101 */ 6102 s->state_flags |= PFSTATE_NOSYNC; 6103 pf_unlink_state(s); 6104 killed++; 6105 6106 if (kill->psk_kill_match) 6107 killed += pf_kill_matching_state(&match_key, 6108 dir); 6109 6110 goto relock_DIOCCLRSTATES; 6111 } 6112 PF_HASHROW_UNLOCK(ih); 6113 } 6114 6115 if (V_pfsync_clear_states_ptr != NULL) 6116 V_pfsync_clear_states_ptr(V_pf_status.hostid, kill->psk_ifname); 6117 6118 return (killed); 6119 } 6120 6121 static void 6122 pf_killstates(struct pf_kstate_kill *kill, unsigned int *killed) 6123 { 6124 struct pf_kstate *s; 6125 6126 if (kill->psk_pfcmp.id) { 6127 if (kill->psk_pfcmp.creatorid == 0) 6128 kill->psk_pfcmp.creatorid = V_pf_status.hostid; 6129 if ((s = pf_find_state_byid(kill->psk_pfcmp.id, 6130 kill->psk_pfcmp.creatorid))) { 6131 pf_unlink_state(s); 6132 *killed = 1; 6133 } 6134 return; 6135 } 6136 6137 for (unsigned int i = 0; i <= pf_hashmask; i++) 6138 *killed += pf_killstates_row(kill, &V_pf_idhash[i]); 6139 6140 return; 6141 } 6142 6143 static int 6144 pf_killstates_nv(struct pfioc_nv *nv) 6145 { 6146 struct pf_kstate_kill kill; 6147 nvlist_t *nvl = NULL; 6148 void *nvlpacked = NULL; 6149 int error = 0; 6150 unsigned int killed = 0; 6151 6152 #define ERROUT(x) ERROUT_FUNCTION(on_error, x) 6153 6154 if (nv->len > pf_ioctl_maxcount) 6155 ERROUT(ENOMEM); 6156 6157 nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK); 6158 if (nvlpacked == NULL) 6159 ERROUT(ENOMEM); 6160 6161 error = copyin(nv->data, nvlpacked, nv->len); 6162 if (error) 6163 ERROUT(error); 6164 6165 nvl = nvlist_unpack(nvlpacked, nv->len, 0); 6166 if (nvl == NULL) 6167 ERROUT(EBADMSG); 6168 6169 error = pf_nvstate_kill_to_kstate_kill(nvl, &kill); 6170 if (error) 6171 ERROUT(error); 6172 6173 pf_killstates(&kill, &killed); 6174 6175 free(nvlpacked, M_NVLIST); 6176 nvlpacked = NULL; 6177 nvlist_destroy(nvl); 6178 nvl = nvlist_create(0); 6179 if (nvl == NULL) 6180 ERROUT(ENOMEM); 6181 6182 nvlist_add_number(nvl, "killed", killed); 6183 6184 nvlpacked = nvlist_pack(nvl, &nv->len); 6185 if (nvlpacked == NULL) 6186 ERROUT(ENOMEM); 6187 6188 if (nv->size == 0) 6189 ERROUT(0); 6190 else if (nv->size < nv->len) 6191 ERROUT(ENOSPC); 6192 6193 error = copyout(nvlpacked, nv->data, nv->len); 6194 6195 on_error: 6196 nvlist_destroy(nvl); 6197 free(nvlpacked, M_NVLIST); 6198 return (error); 6199 } 6200 6201 static int 6202 pf_clearstates_nv(struct pfioc_nv *nv) 6203 { 6204 struct pf_kstate_kill kill; 6205 nvlist_t *nvl = NULL; 6206 void *nvlpacked = NULL; 6207 int error = 0; 6208 unsigned int killed; 6209 6210 #define ERROUT(x) ERROUT_FUNCTION(on_error, x) 6211 6212 if (nv->len > pf_ioctl_maxcount) 6213 ERROUT(ENOMEM); 6214 6215 nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK); 6216 if (nvlpacked == NULL) 6217 ERROUT(ENOMEM); 6218 6219 error = copyin(nv->data, nvlpacked, nv->len); 6220 if (error) 6221 ERROUT(error); 6222 6223 nvl = nvlist_unpack(nvlpacked, nv->len, 0); 6224 if (nvl == NULL) 6225 ERROUT(EBADMSG); 6226 6227 error = pf_nvstate_kill_to_kstate_kill(nvl, &kill); 6228 if (error) 6229 ERROUT(error); 6230 6231 killed = pf_clear_states(&kill); 6232 6233 free(nvlpacked, M_NVLIST); 6234 nvlpacked = NULL; 6235 nvlist_destroy(nvl); 6236 nvl = nvlist_create(0); 6237 if (nvl == NULL) 6238 ERROUT(ENOMEM); 6239 6240 nvlist_add_number(nvl, "killed", killed); 6241 6242 nvlpacked = nvlist_pack(nvl, &nv->len); 6243 if (nvlpacked == NULL) 6244 ERROUT(ENOMEM); 6245 6246 if (nv->size == 0) 6247 ERROUT(0); 6248 else if (nv->size < nv->len) 6249 ERROUT(ENOSPC); 6250 6251 error = copyout(nvlpacked, nv->data, nv->len); 6252 6253 #undef ERROUT 6254 on_error: 6255 nvlist_destroy(nvl); 6256 free(nvlpacked, M_NVLIST); 6257 return (error); 6258 } 6259 6260 static int 6261 pf_getstate(struct pfioc_nv *nv) 6262 { 6263 nvlist_t *nvl = NULL, *nvls; 6264 void *nvlpacked = NULL; 6265 struct pf_kstate *s = NULL; 6266 int error = 0; 6267 uint64_t id, creatorid; 6268 6269 #define ERROUT(x) ERROUT_FUNCTION(errout, x) 6270 6271 if (nv->len > pf_ioctl_maxcount) 6272 ERROUT(ENOMEM); 6273 6274 nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK); 6275 if (nvlpacked == NULL) 6276 ERROUT(ENOMEM); 6277 6278 error = copyin(nv->data, nvlpacked, nv->len); 6279 if (error) 6280 ERROUT(error); 6281 6282 nvl = nvlist_unpack(nvlpacked, nv->len, 0); 6283 if (nvl == NULL) 6284 ERROUT(EBADMSG); 6285 6286 PFNV_CHK(pf_nvuint64(nvl, "id", &id)); 6287 PFNV_CHK(pf_nvuint64(nvl, "creatorid", &creatorid)); 6288 6289 s = pf_find_state_byid(id, creatorid); 6290 if (s == NULL) 6291 ERROUT(ENOENT); 6292 6293 free(nvlpacked, M_NVLIST); 6294 nvlpacked = NULL; 6295 nvlist_destroy(nvl); 6296 nvl = nvlist_create(0); 6297 if (nvl == NULL) 6298 ERROUT(ENOMEM); 6299 6300 nvls = pf_state_to_nvstate(s); 6301 if (nvls == NULL) 6302 ERROUT(ENOMEM); 6303 6304 nvlist_add_nvlist(nvl, "state", nvls); 6305 nvlist_destroy(nvls); 6306 6307 nvlpacked = nvlist_pack(nvl, &nv->len); 6308 if (nvlpacked == NULL) 6309 ERROUT(ENOMEM); 6310 6311 if (nv->size == 0) 6312 ERROUT(0); 6313 else if (nv->size < nv->len) 6314 ERROUT(ENOSPC); 6315 6316 error = copyout(nvlpacked, nv->data, nv->len); 6317 6318 #undef ERROUT 6319 errout: 6320 if (s != NULL) 6321 PF_STATE_UNLOCK(s); 6322 free(nvlpacked, M_NVLIST); 6323 nvlist_destroy(nvl); 6324 return (error); 6325 } 6326 6327 /* 6328 * XXX - Check for version missmatch!!! 6329 */ 6330 6331 /* 6332 * Duplicate pfctl -Fa operation to get rid of as much as we can. 6333 */ 6334 static int 6335 shutdown_pf(void) 6336 { 6337 int error = 0; 6338 u_int32_t t[5]; 6339 char nn = '\0'; 6340 6341 do { 6342 if ((error = pf_begin_rules(&t[0], PF_RULESET_SCRUB, &nn)) 6343 != 0) { 6344 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: SCRUB\n")); 6345 break; 6346 } 6347 if ((error = pf_begin_rules(&t[1], PF_RULESET_FILTER, &nn)) 6348 != 0) { 6349 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: FILTER\n")); 6350 break; /* XXX: rollback? */ 6351 } 6352 if ((error = pf_begin_rules(&t[2], PF_RULESET_NAT, &nn)) 6353 != 0) { 6354 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: NAT\n")); 6355 break; /* XXX: rollback? */ 6356 } 6357 if ((error = pf_begin_rules(&t[3], PF_RULESET_BINAT, &nn)) 6358 != 0) { 6359 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: BINAT\n")); 6360 break; /* XXX: rollback? */ 6361 } 6362 if ((error = pf_begin_rules(&t[4], PF_RULESET_RDR, &nn)) 6363 != 0) { 6364 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: RDR\n")); 6365 break; /* XXX: rollback? */ 6366 } 6367 6368 /* XXX: these should always succeed here */ 6369 pf_commit_rules(t[0], PF_RULESET_SCRUB, &nn); 6370 pf_commit_rules(t[1], PF_RULESET_FILTER, &nn); 6371 pf_commit_rules(t[2], PF_RULESET_NAT, &nn); 6372 pf_commit_rules(t[3], PF_RULESET_BINAT, &nn); 6373 pf_commit_rules(t[4], PF_RULESET_RDR, &nn); 6374 6375 if ((error = pf_clear_tables()) != 0) 6376 break; 6377 6378 if ((error = pf_begin_eth(&t[0], &nn)) != 0) { 6379 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: eth\n")); 6380 break; 6381 } 6382 pf_commit_eth(t[0], &nn); 6383 6384 #ifdef ALTQ 6385 if ((error = pf_begin_altq(&t[0])) != 0) { 6386 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: ALTQ\n")); 6387 break; 6388 } 6389 pf_commit_altq(t[0]); 6390 #endif 6391 6392 pf_clear_all_states(); 6393 6394 pf_clear_srcnodes(NULL); 6395 6396 /* status does not use malloced mem so no need to cleanup */ 6397 /* fingerprints and interfaces have their own cleanup code */ 6398 } while(0); 6399 6400 return (error); 6401 } 6402 6403 static pfil_return_t 6404 pf_check_return(int chk, struct mbuf **m) 6405 { 6406 6407 switch (chk) { 6408 case PF_PASS: 6409 if (*m == NULL) 6410 return (PFIL_CONSUMED); 6411 else 6412 return (PFIL_PASS); 6413 break; 6414 default: 6415 if (*m != NULL) { 6416 m_freem(*m); 6417 *m = NULL; 6418 } 6419 return (PFIL_DROPPED); 6420 } 6421 } 6422 6423 static pfil_return_t 6424 pf_eth_check_in(struct mbuf **m, struct ifnet *ifp, int flags, 6425 void *ruleset __unused, struct inpcb *inp) 6426 { 6427 int chk; 6428 6429 chk = pf_test_eth(PF_IN, flags, ifp, m, inp); 6430 6431 return (pf_check_return(chk, m)); 6432 } 6433 6434 static pfil_return_t 6435 pf_eth_check_out(struct mbuf **m, struct ifnet *ifp, int flags, 6436 void *ruleset __unused, struct inpcb *inp) 6437 { 6438 int chk; 6439 6440 chk = pf_test_eth(PF_OUT, flags, ifp, m, inp); 6441 6442 return (pf_check_return(chk, m)); 6443 } 6444 6445 #ifdef INET 6446 static pfil_return_t 6447 pf_check_in(struct mbuf **m, struct ifnet *ifp, int flags, 6448 void *ruleset __unused, struct inpcb *inp) 6449 { 6450 int chk; 6451 6452 chk = pf_test(PF_IN, flags, ifp, m, inp); 6453 6454 return (pf_check_return(chk, m)); 6455 } 6456 6457 static pfil_return_t 6458 pf_check_out(struct mbuf **m, struct ifnet *ifp, int flags, 6459 void *ruleset __unused, struct inpcb *inp) 6460 { 6461 int chk; 6462 6463 chk = pf_test(PF_OUT, flags, ifp, m, inp); 6464 6465 return (pf_check_return(chk, m)); 6466 } 6467 #endif 6468 6469 #ifdef INET6 6470 static pfil_return_t 6471 pf_check6_in(struct mbuf **m, struct ifnet *ifp, int flags, 6472 void *ruleset __unused, struct inpcb *inp) 6473 { 6474 int chk; 6475 6476 /* 6477 * In case of loopback traffic IPv6 uses the real interface in 6478 * order to support scoped addresses. In order to support stateful 6479 * filtering we have change this to lo0 as it is the case in IPv4. 6480 */ 6481 CURVNET_SET(ifp->if_vnet); 6482 chk = pf_test6(PF_IN, flags, (*m)->m_flags & M_LOOP ? V_loif : ifp, m, inp); 6483 CURVNET_RESTORE(); 6484 6485 return (pf_check_return(chk, m)); 6486 } 6487 6488 static pfil_return_t 6489 pf_check6_out(struct mbuf **m, struct ifnet *ifp, int flags, 6490 void *ruleset __unused, struct inpcb *inp) 6491 { 6492 int chk; 6493 6494 CURVNET_SET(ifp->if_vnet); 6495 chk = pf_test6(PF_OUT, flags, ifp, m, inp); 6496 CURVNET_RESTORE(); 6497 6498 return (pf_check_return(chk, m)); 6499 } 6500 #endif /* INET6 */ 6501 6502 VNET_DEFINE_STATIC(pfil_hook_t, pf_eth_in_hook); 6503 VNET_DEFINE_STATIC(pfil_hook_t, pf_eth_out_hook); 6504 #define V_pf_eth_in_hook VNET(pf_eth_in_hook) 6505 #define V_pf_eth_out_hook VNET(pf_eth_out_hook) 6506 6507 #ifdef INET 6508 VNET_DEFINE_STATIC(pfil_hook_t, pf_ip4_in_hook); 6509 VNET_DEFINE_STATIC(pfil_hook_t, pf_ip4_out_hook); 6510 #define V_pf_ip4_in_hook VNET(pf_ip4_in_hook) 6511 #define V_pf_ip4_out_hook VNET(pf_ip4_out_hook) 6512 #endif 6513 #ifdef INET6 6514 VNET_DEFINE_STATIC(pfil_hook_t, pf_ip6_in_hook); 6515 VNET_DEFINE_STATIC(pfil_hook_t, pf_ip6_out_hook); 6516 #define V_pf_ip6_in_hook VNET(pf_ip6_in_hook) 6517 #define V_pf_ip6_out_hook VNET(pf_ip6_out_hook) 6518 #endif 6519 6520 static void 6521 hook_pf_eth(void) 6522 { 6523 struct pfil_hook_args pha; 6524 struct pfil_link_args pla; 6525 int ret __diagused; 6526 6527 if (V_pf_pfil_eth_hooked) 6528 return; 6529 6530 pha.pa_version = PFIL_VERSION; 6531 pha.pa_modname = "pf"; 6532 pha.pa_ruleset = NULL; 6533 6534 pla.pa_version = PFIL_VERSION; 6535 6536 pha.pa_type = PFIL_TYPE_ETHERNET; 6537 pha.pa_func = pf_eth_check_in; 6538 pha.pa_flags = PFIL_IN; 6539 pha.pa_rulname = "eth-in"; 6540 V_pf_eth_in_hook = pfil_add_hook(&pha); 6541 pla.pa_flags = PFIL_IN | PFIL_HEADPTR | PFIL_HOOKPTR; 6542 pla.pa_head = V_link_pfil_head; 6543 pla.pa_hook = V_pf_eth_in_hook; 6544 ret = pfil_link(&pla); 6545 MPASS(ret == 0); 6546 pha.pa_func = pf_eth_check_out; 6547 pha.pa_flags = PFIL_OUT; 6548 pha.pa_rulname = "eth-out"; 6549 V_pf_eth_out_hook = pfil_add_hook(&pha); 6550 pla.pa_flags = PFIL_OUT | PFIL_HEADPTR | PFIL_HOOKPTR; 6551 pla.pa_head = V_link_pfil_head; 6552 pla.pa_hook = V_pf_eth_out_hook; 6553 ret = pfil_link(&pla); 6554 MPASS(ret == 0); 6555 6556 V_pf_pfil_eth_hooked = 1; 6557 } 6558 6559 static void 6560 hook_pf(void) 6561 { 6562 struct pfil_hook_args pha; 6563 struct pfil_link_args pla; 6564 int ret __diagused; 6565 6566 if (V_pf_pfil_hooked) 6567 return; 6568 6569 pha.pa_version = PFIL_VERSION; 6570 pha.pa_modname = "pf"; 6571 pha.pa_ruleset = NULL; 6572 6573 pla.pa_version = PFIL_VERSION; 6574 6575 #ifdef INET 6576 pha.pa_type = PFIL_TYPE_IP4; 6577 pha.pa_func = pf_check_in; 6578 pha.pa_flags = PFIL_IN; 6579 pha.pa_rulname = "default-in"; 6580 V_pf_ip4_in_hook = pfil_add_hook(&pha); 6581 pla.pa_flags = PFIL_IN | PFIL_HEADPTR | PFIL_HOOKPTR; 6582 pla.pa_head = V_inet_pfil_head; 6583 pla.pa_hook = V_pf_ip4_in_hook; 6584 ret = pfil_link(&pla); 6585 MPASS(ret == 0); 6586 pha.pa_func = pf_check_out; 6587 pha.pa_flags = PFIL_OUT; 6588 pha.pa_rulname = "default-out"; 6589 V_pf_ip4_out_hook = pfil_add_hook(&pha); 6590 pla.pa_flags = PFIL_OUT | PFIL_HEADPTR | PFIL_HOOKPTR; 6591 pla.pa_head = V_inet_pfil_head; 6592 pla.pa_hook = V_pf_ip4_out_hook; 6593 ret = pfil_link(&pla); 6594 MPASS(ret == 0); 6595 #endif 6596 #ifdef INET6 6597 pha.pa_type = PFIL_TYPE_IP6; 6598 pha.pa_func = pf_check6_in; 6599 pha.pa_flags = PFIL_IN; 6600 pha.pa_rulname = "default-in6"; 6601 V_pf_ip6_in_hook = pfil_add_hook(&pha); 6602 pla.pa_flags = PFIL_IN | PFIL_HEADPTR | PFIL_HOOKPTR; 6603 pla.pa_head = V_inet6_pfil_head; 6604 pla.pa_hook = V_pf_ip6_in_hook; 6605 ret = pfil_link(&pla); 6606 MPASS(ret == 0); 6607 pha.pa_func = pf_check6_out; 6608 pha.pa_rulname = "default-out6"; 6609 pha.pa_flags = PFIL_OUT; 6610 V_pf_ip6_out_hook = pfil_add_hook(&pha); 6611 pla.pa_flags = PFIL_OUT | PFIL_HEADPTR | PFIL_HOOKPTR; 6612 pla.pa_head = V_inet6_pfil_head; 6613 pla.pa_hook = V_pf_ip6_out_hook; 6614 ret = pfil_link(&pla); 6615 MPASS(ret == 0); 6616 #endif 6617 6618 V_pf_pfil_hooked = 1; 6619 } 6620 6621 static void 6622 dehook_pf_eth(void) 6623 { 6624 6625 if (V_pf_pfil_eth_hooked == 0) 6626 return; 6627 6628 pfil_remove_hook(V_pf_eth_in_hook); 6629 pfil_remove_hook(V_pf_eth_out_hook); 6630 6631 V_pf_pfil_eth_hooked = 0; 6632 } 6633 6634 static void 6635 dehook_pf(void) 6636 { 6637 6638 if (V_pf_pfil_hooked == 0) 6639 return; 6640 6641 #ifdef INET 6642 pfil_remove_hook(V_pf_ip4_in_hook); 6643 pfil_remove_hook(V_pf_ip4_out_hook); 6644 #endif 6645 #ifdef INET6 6646 pfil_remove_hook(V_pf_ip6_in_hook); 6647 pfil_remove_hook(V_pf_ip6_out_hook); 6648 #endif 6649 6650 V_pf_pfil_hooked = 0; 6651 } 6652 6653 static void 6654 pf_load_vnet(void) 6655 { 6656 V_pf_tag_z = uma_zcreate("pf tags", sizeof(struct pf_tagname), 6657 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); 6658 6659 pf_init_tagset(&V_pf_tags, &pf_rule_tag_hashsize, 6660 PF_RULE_TAG_HASH_SIZE_DEFAULT); 6661 #ifdef ALTQ 6662 pf_init_tagset(&V_pf_qids, &pf_queue_tag_hashsize, 6663 PF_QUEUE_TAG_HASH_SIZE_DEFAULT); 6664 #endif 6665 6666 V_pf_keth = &V_pf_main_keth_anchor.ruleset; 6667 6668 pfattach_vnet(); 6669 V_pf_vnet_active = 1; 6670 } 6671 6672 static int 6673 pf_load(void) 6674 { 6675 int error; 6676 6677 rm_init_flags(&pf_rules_lock, "pf rulesets", RM_RECURSE); 6678 sx_init(&pf_ioctl_lock, "pf ioctl"); 6679 sx_init(&pf_end_lock, "pf end thread"); 6680 6681 pf_mtag_initialize(); 6682 6683 pf_dev = make_dev(&pf_cdevsw, 0, UID_ROOT, GID_WHEEL, 0600, PF_NAME); 6684 if (pf_dev == NULL) 6685 return (ENOMEM); 6686 6687 pf_end_threads = 0; 6688 error = kproc_create(pf_purge_thread, NULL, &pf_purge_proc, 0, 0, "pf purge"); 6689 if (error != 0) 6690 return (error); 6691 6692 pfi_initialize(); 6693 6694 return (0); 6695 } 6696 6697 static void 6698 pf_unload_vnet(void) 6699 { 6700 int ret __diagused; 6701 6702 V_pf_vnet_active = 0; 6703 V_pf_status.running = 0; 6704 dehook_pf(); 6705 dehook_pf_eth(); 6706 6707 PF_RULES_WLOCK(); 6708 pf_syncookies_cleanup(); 6709 shutdown_pf(); 6710 PF_RULES_WUNLOCK(); 6711 6712 /* Make sure we've cleaned up ethernet rules before we continue. */ 6713 NET_EPOCH_DRAIN_CALLBACKS(); 6714 6715 ret = swi_remove(V_pf_swi_cookie); 6716 MPASS(ret == 0); 6717 ret = intr_event_destroy(V_pf_swi_ie); 6718 MPASS(ret == 0); 6719 6720 pf_unload_vnet_purge(); 6721 6722 pf_normalize_cleanup(); 6723 PF_RULES_WLOCK(); 6724 pfi_cleanup_vnet(); 6725 PF_RULES_WUNLOCK(); 6726 pfr_cleanup(); 6727 pf_osfp_flush(); 6728 pf_cleanup(); 6729 if (IS_DEFAULT_VNET(curvnet)) 6730 pf_mtag_cleanup(); 6731 6732 pf_cleanup_tagset(&V_pf_tags); 6733 #ifdef ALTQ 6734 pf_cleanup_tagset(&V_pf_qids); 6735 #endif 6736 uma_zdestroy(V_pf_tag_z); 6737 6738 #ifdef PF_WANT_32_TO_64_COUNTER 6739 PF_RULES_WLOCK(); 6740 LIST_REMOVE(V_pf_kifmarker, pfik_allkiflist); 6741 6742 MPASS(LIST_EMPTY(&V_pf_allkiflist)); 6743 MPASS(V_pf_allkifcount == 0); 6744 6745 LIST_REMOVE(&V_pf_default_rule, allrulelist); 6746 V_pf_allrulecount--; 6747 LIST_REMOVE(V_pf_rulemarker, allrulelist); 6748 6749 /* 6750 * There are known pf rule leaks when running the test suite. 6751 */ 6752 #ifdef notyet 6753 MPASS(LIST_EMPTY(&V_pf_allrulelist)); 6754 MPASS(V_pf_allrulecount == 0); 6755 #endif 6756 6757 PF_RULES_WUNLOCK(); 6758 6759 free(V_pf_kifmarker, PFI_MTYPE); 6760 free(V_pf_rulemarker, M_PFRULE); 6761 #endif 6762 6763 /* Free counters last as we updated them during shutdown. */ 6764 pf_counter_u64_deinit(&V_pf_default_rule.evaluations); 6765 for (int i = 0; i < 2; i++) { 6766 pf_counter_u64_deinit(&V_pf_default_rule.packets[i]); 6767 pf_counter_u64_deinit(&V_pf_default_rule.bytes[i]); 6768 } 6769 counter_u64_free(V_pf_default_rule.states_cur); 6770 counter_u64_free(V_pf_default_rule.states_tot); 6771 counter_u64_free(V_pf_default_rule.src_nodes); 6772 uma_zfree_pcpu(pcpu_zone_4, V_pf_default_rule.timestamp); 6773 6774 for (int i = 0; i < PFRES_MAX; i++) 6775 counter_u64_free(V_pf_status.counters[i]); 6776 for (int i = 0; i < KLCNT_MAX; i++) 6777 counter_u64_free(V_pf_status.lcounters[i]); 6778 for (int i = 0; i < FCNT_MAX; i++) 6779 pf_counter_u64_deinit(&V_pf_status.fcounters[i]); 6780 for (int i = 0; i < SCNT_MAX; i++) 6781 counter_u64_free(V_pf_status.scounters[i]); 6782 } 6783 6784 static void 6785 pf_unload(void) 6786 { 6787 6788 sx_xlock(&pf_end_lock); 6789 pf_end_threads = 1; 6790 while (pf_end_threads < 2) { 6791 wakeup_one(pf_purge_thread); 6792 sx_sleep(pf_purge_proc, &pf_end_lock, 0, "pftmo", 0); 6793 } 6794 sx_xunlock(&pf_end_lock); 6795 6796 if (pf_dev != NULL) 6797 destroy_dev(pf_dev); 6798 6799 pfi_cleanup(); 6800 6801 rm_destroy(&pf_rules_lock); 6802 sx_destroy(&pf_ioctl_lock); 6803 sx_destroy(&pf_end_lock); 6804 } 6805 6806 static void 6807 vnet_pf_init(void *unused __unused) 6808 { 6809 6810 pf_load_vnet(); 6811 } 6812 VNET_SYSINIT(vnet_pf_init, SI_SUB_PROTO_FIREWALL, SI_ORDER_THIRD, 6813 vnet_pf_init, NULL); 6814 6815 static void 6816 vnet_pf_uninit(const void *unused __unused) 6817 { 6818 6819 pf_unload_vnet(); 6820 } 6821 SYSUNINIT(pf_unload, SI_SUB_PROTO_FIREWALL, SI_ORDER_SECOND, pf_unload, NULL); 6822 VNET_SYSUNINIT(vnet_pf_uninit, SI_SUB_PROTO_FIREWALL, SI_ORDER_THIRD, 6823 vnet_pf_uninit, NULL); 6824 6825 static int 6826 pf_modevent(module_t mod, int type, void *data) 6827 { 6828 int error = 0; 6829 6830 switch(type) { 6831 case MOD_LOAD: 6832 error = pf_load(); 6833 break; 6834 case MOD_UNLOAD: 6835 /* Handled in SYSUNINIT(pf_unload) to ensure it's done after 6836 * the vnet_pf_uninit()s */ 6837 break; 6838 default: 6839 error = EINVAL; 6840 break; 6841 } 6842 6843 return (error); 6844 } 6845 6846 static moduledata_t pf_mod = { 6847 "pf", 6848 pf_modevent, 6849 0 6850 }; 6851 6852 DECLARE_MODULE(pf, pf_mod, SI_SUB_PROTO_FIREWALL, SI_ORDER_SECOND); 6853 MODULE_VERSION(pf, PF_MODVER); 6854