1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2001 Daniel Hartmeier 5 * Copyright (c) 2002,2003 Henning Brauer 6 * Copyright (c) 2012 Gleb Smirnoff <glebius@FreeBSD.org> 7 * All rights reserved. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 13 * - Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * - Redistributions in binary form must reproduce the above 16 * copyright notice, this list of conditions and the following 17 * disclaimer in the documentation and/or other materials provided 18 * with the distribution. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 23 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 24 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 27 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 28 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN 30 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 31 * POSSIBILITY OF SUCH DAMAGE. 32 * 33 * Effort sponsored in part by the Defense Advanced Research Projects 34 * Agency (DARPA) and Air Force Research Laboratory, Air Force 35 * Materiel Command, USAF, under agreement number F30602-01-2-0537. 36 * 37 * $OpenBSD: pf_ioctl.c,v 1.213 2009/02/15 21:46:12 mbalmer Exp $ 38 */ 39 40 #include <sys/cdefs.h> 41 __FBSDID("$FreeBSD$"); 42 43 #include "opt_inet.h" 44 #include "opt_inet6.h" 45 #include "opt_bpf.h" 46 #include "opt_pf.h" 47 48 #include <sys/param.h> 49 #include <sys/_bitset.h> 50 #include <sys/bitset.h> 51 #include <sys/bus.h> 52 #include <sys/conf.h> 53 #include <sys/endian.h> 54 #include <sys/fcntl.h> 55 #include <sys/filio.h> 56 #include <sys/hash.h> 57 #include <sys/interrupt.h> 58 #include <sys/jail.h> 59 #include <sys/kernel.h> 60 #include <sys/kthread.h> 61 #include <sys/lock.h> 62 #include <sys/mbuf.h> 63 #include <sys/module.h> 64 #include <sys/nv.h> 65 #include <sys/proc.h> 66 #include <sys/sdt.h> 67 #include <sys/smp.h> 68 #include <sys/socket.h> 69 #include <sys/sysctl.h> 70 #include <sys/md5.h> 71 #include <sys/ucred.h> 72 73 #include <net/if.h> 74 #include <net/if_var.h> 75 #include <net/vnet.h> 76 #include <net/route.h> 77 #include <net/pfil.h> 78 #include <net/pfvar.h> 79 #include <net/if_pfsync.h> 80 #include <net/if_pflog.h> 81 82 #include <netinet/in.h> 83 #include <netinet/ip.h> 84 #include <netinet/ip_var.h> 85 #include <netinet6/ip6_var.h> 86 #include <netinet/ip_icmp.h> 87 #include <netpfil/pf/pf_nv.h> 88 89 #ifdef INET6 90 #include <netinet/ip6.h> 91 #endif /* INET6 */ 92 93 #ifdef ALTQ 94 #include <net/altq/altq.h> 95 #endif 96 97 SDT_PROBE_DEFINE3(pf, ioctl, ioctl, error, "int", "int", "int"); 98 SDT_PROBE_DEFINE3(pf, ioctl, function, error, "char *", "int", "int"); 99 SDT_PROBE_DEFINE2(pf, ioctl, addrule, error, "int", "int"); 100 SDT_PROBE_DEFINE2(pf, ioctl, nvchk, error, "int", "int"); 101 102 static struct pf_kpool *pf_get_kpool(const char *, u_int32_t, u_int8_t, 103 u_int32_t, u_int8_t, u_int8_t, u_int8_t); 104 105 static void pf_mv_kpool(struct pf_kpalist *, struct pf_kpalist *); 106 static void pf_empty_kpool(struct pf_kpalist *); 107 static int pfioctl(struct cdev *, u_long, caddr_t, int, 108 struct thread *); 109 static int pf_begin_eth(uint32_t *, const char *); 110 static void pf_rollback_eth_cb(struct epoch_context *); 111 static int pf_rollback_eth(uint32_t, const char *); 112 static int pf_commit_eth(uint32_t, const char *); 113 static void pf_free_eth_rule(struct pf_keth_rule *); 114 #ifdef ALTQ 115 static int pf_begin_altq(u_int32_t *); 116 static int pf_rollback_altq(u_int32_t); 117 static int pf_commit_altq(u_int32_t); 118 static int pf_enable_altq(struct pf_altq *); 119 static int pf_disable_altq(struct pf_altq *); 120 static uint16_t pf_qname2qid(const char *); 121 static void pf_qid_unref(uint16_t); 122 #endif /* ALTQ */ 123 static int pf_begin_rules(u_int32_t *, int, const char *); 124 static int pf_rollback_rules(u_int32_t, int, char *); 125 static int pf_setup_pfsync_matching(struct pf_kruleset *); 126 static void pf_hash_rule_rolling(MD5_CTX *, struct pf_krule *); 127 static void pf_hash_rule(struct pf_krule *); 128 static void pf_hash_rule_addr(MD5_CTX *, struct pf_rule_addr *); 129 static int pf_commit_rules(u_int32_t, int, char *); 130 static int pf_addr_setup(struct pf_kruleset *, 131 struct pf_addr_wrap *, sa_family_t); 132 static void pf_addr_copyout(struct pf_addr_wrap *); 133 static void pf_src_node_copy(const struct pf_ksrc_node *, 134 struct pf_src_node *); 135 #ifdef ALTQ 136 static int pf_export_kaltq(struct pf_altq *, 137 struct pfioc_altq_v1 *, size_t); 138 static int pf_import_kaltq(struct pfioc_altq_v1 *, 139 struct pf_altq *, size_t); 140 #endif /* ALTQ */ 141 142 VNET_DEFINE(struct pf_krule, pf_default_rule); 143 144 static __inline int pf_krule_compare(struct pf_krule *, 145 struct pf_krule *); 146 147 RB_GENERATE(pf_krule_global, pf_krule, entry_global, pf_krule_compare); 148 149 #ifdef ALTQ 150 VNET_DEFINE_STATIC(int, pf_altq_running); 151 #define V_pf_altq_running VNET(pf_altq_running) 152 #endif 153 154 #define TAGID_MAX 50000 155 struct pf_tagname { 156 TAILQ_ENTRY(pf_tagname) namehash_entries; 157 TAILQ_ENTRY(pf_tagname) taghash_entries; 158 char name[PF_TAG_NAME_SIZE]; 159 uint16_t tag; 160 int ref; 161 }; 162 163 struct pf_tagset { 164 TAILQ_HEAD(, pf_tagname) *namehash; 165 TAILQ_HEAD(, pf_tagname) *taghash; 166 unsigned int mask; 167 uint32_t seed; 168 BITSET_DEFINE(, TAGID_MAX) avail; 169 }; 170 171 VNET_DEFINE(struct pf_tagset, pf_tags); 172 #define V_pf_tags VNET(pf_tags) 173 static unsigned int pf_rule_tag_hashsize; 174 #define PF_RULE_TAG_HASH_SIZE_DEFAULT 128 175 SYSCTL_UINT(_net_pf, OID_AUTO, rule_tag_hashsize, CTLFLAG_RDTUN, 176 &pf_rule_tag_hashsize, PF_RULE_TAG_HASH_SIZE_DEFAULT, 177 "Size of pf(4) rule tag hashtable"); 178 179 #ifdef ALTQ 180 VNET_DEFINE(struct pf_tagset, pf_qids); 181 #define V_pf_qids VNET(pf_qids) 182 static unsigned int pf_queue_tag_hashsize; 183 #define PF_QUEUE_TAG_HASH_SIZE_DEFAULT 128 184 SYSCTL_UINT(_net_pf, OID_AUTO, queue_tag_hashsize, CTLFLAG_RDTUN, 185 &pf_queue_tag_hashsize, PF_QUEUE_TAG_HASH_SIZE_DEFAULT, 186 "Size of pf(4) queue tag hashtable"); 187 #endif 188 VNET_DEFINE(uma_zone_t, pf_tag_z); 189 #define V_pf_tag_z VNET(pf_tag_z) 190 static MALLOC_DEFINE(M_PFALTQ, "pf_altq", "pf(4) altq configuration db"); 191 static MALLOC_DEFINE(M_PFRULE, "pf_rule", "pf(4) rules"); 192 193 #if (PF_QNAME_SIZE != PF_TAG_NAME_SIZE) 194 #error PF_QNAME_SIZE must be equal to PF_TAG_NAME_SIZE 195 #endif 196 197 static void pf_init_tagset(struct pf_tagset *, unsigned int *, 198 unsigned int); 199 static void pf_cleanup_tagset(struct pf_tagset *); 200 static uint16_t tagname2hashindex(const struct pf_tagset *, const char *); 201 static uint16_t tag2hashindex(const struct pf_tagset *, uint16_t); 202 static u_int16_t tagname2tag(struct pf_tagset *, const char *); 203 static u_int16_t pf_tagname2tag(const char *); 204 static void tag_unref(struct pf_tagset *, u_int16_t); 205 206 #define DPFPRINTF(n, x) if (V_pf_status.debug >= (n)) printf x 207 208 struct cdev *pf_dev; 209 210 /* 211 * XXX - These are new and need to be checked when moveing to a new version 212 */ 213 static void pf_clear_all_states(void); 214 static unsigned int pf_clear_states(const struct pf_kstate_kill *); 215 static void pf_killstates(struct pf_kstate_kill *, 216 unsigned int *); 217 static int pf_killstates_row(struct pf_kstate_kill *, 218 struct pf_idhash *); 219 static int pf_killstates_nv(struct pfioc_nv *); 220 static int pf_clearstates_nv(struct pfioc_nv *); 221 static int pf_getstate(struct pfioc_nv *); 222 static int pf_getstatus(struct pfioc_nv *); 223 static int pf_clear_tables(void); 224 static void pf_clear_srcnodes(struct pf_ksrc_node *); 225 static void pf_kill_srcnodes(struct pfioc_src_node_kill *); 226 static int pf_keepcounters(struct pfioc_nv *); 227 static void pf_tbladdr_copyout(struct pf_addr_wrap *); 228 229 /* 230 * Wrapper functions for pfil(9) hooks 231 */ 232 static pfil_return_t pf_eth_check_in(struct mbuf **m, struct ifnet *ifp, 233 int flags, void *ruleset __unused, struct inpcb *inp); 234 static pfil_return_t pf_eth_check_out(struct mbuf **m, struct ifnet *ifp, 235 int flags, void *ruleset __unused, struct inpcb *inp); 236 #ifdef INET 237 static pfil_return_t pf_check_in(struct mbuf **m, struct ifnet *ifp, 238 int flags, void *ruleset __unused, struct inpcb *inp); 239 static pfil_return_t pf_check_out(struct mbuf **m, struct ifnet *ifp, 240 int flags, void *ruleset __unused, struct inpcb *inp); 241 #endif 242 #ifdef INET6 243 static pfil_return_t pf_check6_in(struct mbuf **m, struct ifnet *ifp, 244 int flags, void *ruleset __unused, struct inpcb *inp); 245 static pfil_return_t pf_check6_out(struct mbuf **m, struct ifnet *ifp, 246 int flags, void *ruleset __unused, struct inpcb *inp); 247 #endif 248 249 static void hook_pf_eth(void); 250 static void hook_pf(void); 251 static void dehook_pf_eth(void); 252 static void dehook_pf(void); 253 static int shutdown_pf(void); 254 static int pf_load(void); 255 static void pf_unload(void); 256 257 static struct cdevsw pf_cdevsw = { 258 .d_ioctl = pfioctl, 259 .d_name = PF_NAME, 260 .d_version = D_VERSION, 261 }; 262 263 volatile VNET_DEFINE_STATIC(int, pf_pfil_hooked); 264 #define V_pf_pfil_hooked VNET(pf_pfil_hooked) 265 volatile VNET_DEFINE_STATIC(int, pf_pfil_eth_hooked); 266 #define V_pf_pfil_eth_hooked VNET(pf_pfil_eth_hooked) 267 268 /* 269 * We need a flag that is neither hooked nor running to know when 270 * the VNET is "valid". We primarily need this to control (global) 271 * external event, e.g., eventhandlers. 272 */ 273 VNET_DEFINE(int, pf_vnet_active); 274 #define V_pf_vnet_active VNET(pf_vnet_active) 275 276 int pf_end_threads; 277 struct proc *pf_purge_proc; 278 279 struct rmlock pf_rules_lock; 280 struct sx pf_ioctl_lock; 281 struct sx pf_end_lock; 282 283 /* pfsync */ 284 VNET_DEFINE(pfsync_state_import_t *, pfsync_state_import_ptr); 285 VNET_DEFINE(pfsync_insert_state_t *, pfsync_insert_state_ptr); 286 VNET_DEFINE(pfsync_update_state_t *, pfsync_update_state_ptr); 287 VNET_DEFINE(pfsync_delete_state_t *, pfsync_delete_state_ptr); 288 VNET_DEFINE(pfsync_clear_states_t *, pfsync_clear_states_ptr); 289 VNET_DEFINE(pfsync_defer_t *, pfsync_defer_ptr); 290 pfsync_detach_ifnet_t *pfsync_detach_ifnet_ptr; 291 292 /* pflog */ 293 pflog_packet_t *pflog_packet_ptr = NULL; 294 295 /* 296 * Copy a user-provided string, returning an error if truncation would occur. 297 * Avoid scanning past "sz" bytes in the source string since there's no 298 * guarantee that it's nul-terminated. 299 */ 300 static int 301 pf_user_strcpy(char *dst, const char *src, size_t sz) 302 { 303 if (strnlen(src, sz) == sz) 304 return (EINVAL); 305 (void)strlcpy(dst, src, sz); 306 return (0); 307 } 308 309 static void 310 pfattach_vnet(void) 311 { 312 u_int32_t *my_timeout = V_pf_default_rule.timeout; 313 314 pf_initialize(); 315 pfr_initialize(); 316 pfi_initialize_vnet(); 317 pf_normalize_init(); 318 pf_syncookies_init(); 319 320 V_pf_limits[PF_LIMIT_STATES].limit = PFSTATE_HIWAT; 321 V_pf_limits[PF_LIMIT_SRC_NODES].limit = PFSNODE_HIWAT; 322 323 RB_INIT(&V_pf_anchors); 324 pf_init_kruleset(&pf_main_ruleset); 325 326 pf_init_keth(V_pf_keth); 327 328 /* default rule should never be garbage collected */ 329 V_pf_default_rule.entries.tqe_prev = &V_pf_default_rule.entries.tqe_next; 330 #ifdef PF_DEFAULT_TO_DROP 331 V_pf_default_rule.action = PF_DROP; 332 #else 333 V_pf_default_rule.action = PF_PASS; 334 #endif 335 V_pf_default_rule.nr = -1; 336 V_pf_default_rule.rtableid = -1; 337 338 pf_counter_u64_init(&V_pf_default_rule.evaluations, M_WAITOK); 339 for (int i = 0; i < 2; i++) { 340 pf_counter_u64_init(&V_pf_default_rule.packets[i], M_WAITOK); 341 pf_counter_u64_init(&V_pf_default_rule.bytes[i], M_WAITOK); 342 } 343 V_pf_default_rule.states_cur = counter_u64_alloc(M_WAITOK); 344 V_pf_default_rule.states_tot = counter_u64_alloc(M_WAITOK); 345 V_pf_default_rule.src_nodes = counter_u64_alloc(M_WAITOK); 346 347 #ifdef PF_WANT_32_TO_64_COUNTER 348 V_pf_kifmarker = malloc(sizeof(*V_pf_kifmarker), PFI_MTYPE, M_WAITOK | M_ZERO); 349 V_pf_rulemarker = malloc(sizeof(*V_pf_rulemarker), M_PFRULE, M_WAITOK | M_ZERO); 350 PF_RULES_WLOCK(); 351 LIST_INSERT_HEAD(&V_pf_allkiflist, V_pf_kifmarker, pfik_allkiflist); 352 LIST_INSERT_HEAD(&V_pf_allrulelist, &V_pf_default_rule, allrulelist); 353 V_pf_allrulecount++; 354 LIST_INSERT_HEAD(&V_pf_allrulelist, V_pf_rulemarker, allrulelist); 355 PF_RULES_WUNLOCK(); 356 #endif 357 358 /* initialize default timeouts */ 359 my_timeout[PFTM_TCP_FIRST_PACKET] = PFTM_TCP_FIRST_PACKET_VAL; 360 my_timeout[PFTM_TCP_OPENING] = PFTM_TCP_OPENING_VAL; 361 my_timeout[PFTM_TCP_ESTABLISHED] = PFTM_TCP_ESTABLISHED_VAL; 362 my_timeout[PFTM_TCP_CLOSING] = PFTM_TCP_CLOSING_VAL; 363 my_timeout[PFTM_TCP_FIN_WAIT] = PFTM_TCP_FIN_WAIT_VAL; 364 my_timeout[PFTM_TCP_CLOSED] = PFTM_TCP_CLOSED_VAL; 365 my_timeout[PFTM_UDP_FIRST_PACKET] = PFTM_UDP_FIRST_PACKET_VAL; 366 my_timeout[PFTM_UDP_SINGLE] = PFTM_UDP_SINGLE_VAL; 367 my_timeout[PFTM_UDP_MULTIPLE] = PFTM_UDP_MULTIPLE_VAL; 368 my_timeout[PFTM_ICMP_FIRST_PACKET] = PFTM_ICMP_FIRST_PACKET_VAL; 369 my_timeout[PFTM_ICMP_ERROR_REPLY] = PFTM_ICMP_ERROR_REPLY_VAL; 370 my_timeout[PFTM_OTHER_FIRST_PACKET] = PFTM_OTHER_FIRST_PACKET_VAL; 371 my_timeout[PFTM_OTHER_SINGLE] = PFTM_OTHER_SINGLE_VAL; 372 my_timeout[PFTM_OTHER_MULTIPLE] = PFTM_OTHER_MULTIPLE_VAL; 373 my_timeout[PFTM_FRAG] = PFTM_FRAG_VAL; 374 my_timeout[PFTM_INTERVAL] = PFTM_INTERVAL_VAL; 375 my_timeout[PFTM_SRC_NODE] = PFTM_SRC_NODE_VAL; 376 my_timeout[PFTM_TS_DIFF] = PFTM_TS_DIFF_VAL; 377 my_timeout[PFTM_ADAPTIVE_START] = PFSTATE_ADAPT_START; 378 my_timeout[PFTM_ADAPTIVE_END] = PFSTATE_ADAPT_END; 379 380 bzero(&V_pf_status, sizeof(V_pf_status)); 381 V_pf_status.debug = PF_DEBUG_URGENT; 382 383 V_pf_pfil_hooked = 0; 384 V_pf_pfil_eth_hooked = 0; 385 386 /* XXX do our best to avoid a conflict */ 387 V_pf_status.hostid = arc4random(); 388 389 for (int i = 0; i < PFRES_MAX; i++) 390 V_pf_status.counters[i] = counter_u64_alloc(M_WAITOK); 391 for (int i = 0; i < KLCNT_MAX; i++) 392 V_pf_status.lcounters[i] = counter_u64_alloc(M_WAITOK); 393 for (int i = 0; i < FCNT_MAX; i++) 394 pf_counter_u64_init(&V_pf_status.fcounters[i], M_WAITOK); 395 for (int i = 0; i < SCNT_MAX; i++) 396 V_pf_status.scounters[i] = counter_u64_alloc(M_WAITOK); 397 398 if (swi_add(&V_pf_swi_ie, "pf send", pf_intr, curvnet, SWI_NET, 399 INTR_MPSAFE, &V_pf_swi_cookie) != 0) 400 /* XXXGL: leaked all above. */ 401 return; 402 } 403 404 static struct pf_kpool * 405 pf_get_kpool(const char *anchor, u_int32_t ticket, u_int8_t rule_action, 406 u_int32_t rule_number, u_int8_t r_last, u_int8_t active, 407 u_int8_t check_ticket) 408 { 409 struct pf_kruleset *ruleset; 410 struct pf_krule *rule; 411 int rs_num; 412 413 ruleset = pf_find_kruleset(anchor); 414 if (ruleset == NULL) 415 return (NULL); 416 rs_num = pf_get_ruleset_number(rule_action); 417 if (rs_num >= PF_RULESET_MAX) 418 return (NULL); 419 if (active) { 420 if (check_ticket && ticket != 421 ruleset->rules[rs_num].active.ticket) 422 return (NULL); 423 if (r_last) 424 rule = TAILQ_LAST(ruleset->rules[rs_num].active.ptr, 425 pf_krulequeue); 426 else 427 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr); 428 } else { 429 if (check_ticket && ticket != 430 ruleset->rules[rs_num].inactive.ticket) 431 return (NULL); 432 if (r_last) 433 rule = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr, 434 pf_krulequeue); 435 else 436 rule = TAILQ_FIRST(ruleset->rules[rs_num].inactive.ptr); 437 } 438 if (!r_last) { 439 while ((rule != NULL) && (rule->nr != rule_number)) 440 rule = TAILQ_NEXT(rule, entries); 441 } 442 if (rule == NULL) 443 return (NULL); 444 445 return (&rule->rpool); 446 } 447 448 static void 449 pf_mv_kpool(struct pf_kpalist *poola, struct pf_kpalist *poolb) 450 { 451 struct pf_kpooladdr *mv_pool_pa; 452 453 while ((mv_pool_pa = TAILQ_FIRST(poola)) != NULL) { 454 TAILQ_REMOVE(poola, mv_pool_pa, entries); 455 TAILQ_INSERT_TAIL(poolb, mv_pool_pa, entries); 456 } 457 } 458 459 static void 460 pf_empty_kpool(struct pf_kpalist *poola) 461 { 462 struct pf_kpooladdr *pa; 463 464 while ((pa = TAILQ_FIRST(poola)) != NULL) { 465 switch (pa->addr.type) { 466 case PF_ADDR_DYNIFTL: 467 pfi_dynaddr_remove(pa->addr.p.dyn); 468 break; 469 case PF_ADDR_TABLE: 470 /* XXX: this could be unfinished pooladdr on pabuf */ 471 if (pa->addr.p.tbl != NULL) 472 pfr_detach_table(pa->addr.p.tbl); 473 break; 474 } 475 if (pa->kif) 476 pfi_kkif_unref(pa->kif); 477 TAILQ_REMOVE(poola, pa, entries); 478 free(pa, M_PFRULE); 479 } 480 } 481 482 static void 483 pf_unlink_rule_locked(struct pf_krulequeue *rulequeue, struct pf_krule *rule) 484 { 485 486 PF_RULES_WASSERT(); 487 PF_UNLNKDRULES_ASSERT(); 488 489 TAILQ_REMOVE(rulequeue, rule, entries); 490 491 rule->rule_ref |= PFRULE_REFS; 492 TAILQ_INSERT_TAIL(&V_pf_unlinked_rules, rule, entries); 493 } 494 495 static void 496 pf_unlink_rule(struct pf_krulequeue *rulequeue, struct pf_krule *rule) 497 { 498 499 PF_RULES_WASSERT(); 500 501 PF_UNLNKDRULES_LOCK(); 502 pf_unlink_rule_locked(rulequeue, rule); 503 PF_UNLNKDRULES_UNLOCK(); 504 } 505 506 static void 507 pf_free_eth_rule(struct pf_keth_rule *rule) 508 { 509 PF_RULES_WASSERT(); 510 511 if (rule == NULL) 512 return; 513 514 if (rule->tag) 515 tag_unref(&V_pf_tags, rule->tag); 516 #ifdef ALTQ 517 pf_qid_unref(rule->qid); 518 #endif 519 520 if (rule->kif) 521 pfi_kkif_unref(rule->kif); 522 523 if (rule->ipsrc.addr.type == PF_ADDR_TABLE) 524 pfr_detach_table(rule->ipsrc.addr.p.tbl); 525 if (rule->ipdst.addr.type == PF_ADDR_TABLE) 526 pfr_detach_table(rule->ipdst.addr.p.tbl); 527 528 counter_u64_free(rule->evaluations); 529 for (int i = 0; i < 2; i++) { 530 counter_u64_free(rule->packets[i]); 531 counter_u64_free(rule->bytes[i]); 532 } 533 pf_keth_anchor_remove(rule); 534 535 free(rule, M_PFRULE); 536 } 537 538 void 539 pf_free_rule(struct pf_krule *rule) 540 { 541 542 PF_RULES_WASSERT(); 543 PF_CONFIG_ASSERT(); 544 545 if (rule->tag) 546 tag_unref(&V_pf_tags, rule->tag); 547 if (rule->match_tag) 548 tag_unref(&V_pf_tags, rule->match_tag); 549 #ifdef ALTQ 550 if (rule->pqid != rule->qid) 551 pf_qid_unref(rule->pqid); 552 pf_qid_unref(rule->qid); 553 #endif 554 switch (rule->src.addr.type) { 555 case PF_ADDR_DYNIFTL: 556 pfi_dynaddr_remove(rule->src.addr.p.dyn); 557 break; 558 case PF_ADDR_TABLE: 559 pfr_detach_table(rule->src.addr.p.tbl); 560 break; 561 } 562 switch (rule->dst.addr.type) { 563 case PF_ADDR_DYNIFTL: 564 pfi_dynaddr_remove(rule->dst.addr.p.dyn); 565 break; 566 case PF_ADDR_TABLE: 567 pfr_detach_table(rule->dst.addr.p.tbl); 568 break; 569 } 570 if (rule->overload_tbl) 571 pfr_detach_table(rule->overload_tbl); 572 if (rule->kif) 573 pfi_kkif_unref(rule->kif); 574 pf_kanchor_remove(rule); 575 pf_empty_kpool(&rule->rpool.list); 576 577 pf_krule_free(rule); 578 } 579 580 static void 581 pf_init_tagset(struct pf_tagset *ts, unsigned int *tunable_size, 582 unsigned int default_size) 583 { 584 unsigned int i; 585 unsigned int hashsize; 586 587 if (*tunable_size == 0 || !powerof2(*tunable_size)) 588 *tunable_size = default_size; 589 590 hashsize = *tunable_size; 591 ts->namehash = mallocarray(hashsize, sizeof(*ts->namehash), M_PFHASH, 592 M_WAITOK); 593 ts->taghash = mallocarray(hashsize, sizeof(*ts->taghash), M_PFHASH, 594 M_WAITOK); 595 ts->mask = hashsize - 1; 596 ts->seed = arc4random(); 597 for (i = 0; i < hashsize; i++) { 598 TAILQ_INIT(&ts->namehash[i]); 599 TAILQ_INIT(&ts->taghash[i]); 600 } 601 BIT_FILL(TAGID_MAX, &ts->avail); 602 } 603 604 static void 605 pf_cleanup_tagset(struct pf_tagset *ts) 606 { 607 unsigned int i; 608 unsigned int hashsize; 609 struct pf_tagname *t, *tmp; 610 611 /* 612 * Only need to clean up one of the hashes as each tag is hashed 613 * into each table. 614 */ 615 hashsize = ts->mask + 1; 616 for (i = 0; i < hashsize; i++) 617 TAILQ_FOREACH_SAFE(t, &ts->namehash[i], namehash_entries, tmp) 618 uma_zfree(V_pf_tag_z, t); 619 620 free(ts->namehash, M_PFHASH); 621 free(ts->taghash, M_PFHASH); 622 } 623 624 static uint16_t 625 tagname2hashindex(const struct pf_tagset *ts, const char *tagname) 626 { 627 size_t len; 628 629 len = strnlen(tagname, PF_TAG_NAME_SIZE - 1); 630 return (murmur3_32_hash(tagname, len, ts->seed) & ts->mask); 631 } 632 633 static uint16_t 634 tag2hashindex(const struct pf_tagset *ts, uint16_t tag) 635 { 636 637 return (tag & ts->mask); 638 } 639 640 static u_int16_t 641 tagname2tag(struct pf_tagset *ts, const char *tagname) 642 { 643 struct pf_tagname *tag; 644 u_int32_t index; 645 u_int16_t new_tagid; 646 647 PF_RULES_WASSERT(); 648 649 index = tagname2hashindex(ts, tagname); 650 TAILQ_FOREACH(tag, &ts->namehash[index], namehash_entries) 651 if (strcmp(tagname, tag->name) == 0) { 652 tag->ref++; 653 return (tag->tag); 654 } 655 656 /* 657 * new entry 658 * 659 * to avoid fragmentation, we do a linear search from the beginning 660 * and take the first free slot we find. 661 */ 662 new_tagid = BIT_FFS(TAGID_MAX, &ts->avail); 663 /* 664 * Tags are 1-based, with valid tags in the range [1..TAGID_MAX]. 665 * BIT_FFS() returns a 1-based bit number, with 0 indicating no bits 666 * set. It may also return a bit number greater than TAGID_MAX due 667 * to rounding of the number of bits in the vector up to a multiple 668 * of the vector word size at declaration/allocation time. 669 */ 670 if ((new_tagid == 0) || (new_tagid > TAGID_MAX)) 671 return (0); 672 673 /* Mark the tag as in use. Bits are 0-based for BIT_CLR() */ 674 BIT_CLR(TAGID_MAX, new_tagid - 1, &ts->avail); 675 676 /* allocate and fill new struct pf_tagname */ 677 tag = uma_zalloc(V_pf_tag_z, M_NOWAIT); 678 if (tag == NULL) 679 return (0); 680 strlcpy(tag->name, tagname, sizeof(tag->name)); 681 tag->tag = new_tagid; 682 tag->ref = 1; 683 684 /* Insert into namehash */ 685 TAILQ_INSERT_TAIL(&ts->namehash[index], tag, namehash_entries); 686 687 /* Insert into taghash */ 688 index = tag2hashindex(ts, new_tagid); 689 TAILQ_INSERT_TAIL(&ts->taghash[index], tag, taghash_entries); 690 691 return (tag->tag); 692 } 693 694 static void 695 tag_unref(struct pf_tagset *ts, u_int16_t tag) 696 { 697 struct pf_tagname *t; 698 uint16_t index; 699 700 PF_RULES_WASSERT(); 701 702 index = tag2hashindex(ts, tag); 703 TAILQ_FOREACH(t, &ts->taghash[index], taghash_entries) 704 if (tag == t->tag) { 705 if (--t->ref == 0) { 706 TAILQ_REMOVE(&ts->taghash[index], t, 707 taghash_entries); 708 index = tagname2hashindex(ts, t->name); 709 TAILQ_REMOVE(&ts->namehash[index], t, 710 namehash_entries); 711 /* Bits are 0-based for BIT_SET() */ 712 BIT_SET(TAGID_MAX, tag - 1, &ts->avail); 713 uma_zfree(V_pf_tag_z, t); 714 } 715 break; 716 } 717 } 718 719 static uint16_t 720 pf_tagname2tag(const char *tagname) 721 { 722 return (tagname2tag(&V_pf_tags, tagname)); 723 } 724 725 static int 726 pf_begin_eth(uint32_t *ticket, const char *anchor) 727 { 728 struct pf_keth_rule *rule, *tmp; 729 struct pf_keth_ruleset *rs; 730 731 PF_RULES_WASSERT(); 732 733 rs = pf_find_or_create_keth_ruleset(anchor); 734 if (rs == NULL) 735 return (EINVAL); 736 737 if (rs->inactive.open) 738 /* We may be waiting for NET_EPOCH_CALL(pf_rollback_eth_cb) to 739 * finish. */ 740 return (EBUSY); 741 742 /* Purge old inactive rules. */ 743 TAILQ_FOREACH_SAFE(rule, rs->inactive.rules, entries, 744 tmp) { 745 TAILQ_REMOVE(rs->inactive.rules, rule, 746 entries); 747 pf_free_eth_rule(rule); 748 } 749 750 *ticket = ++rs->inactive.ticket; 751 rs->inactive.open = 1; 752 753 return (0); 754 } 755 756 static void 757 pf_rollback_eth_cb(struct epoch_context *ctx) 758 { 759 struct pf_keth_ruleset *rs; 760 761 rs = __containerof(ctx, struct pf_keth_ruleset, epoch_ctx); 762 763 CURVNET_SET(rs->vnet); 764 765 PF_RULES_WLOCK(); 766 pf_rollback_eth(rs->inactive.ticket, 767 rs->anchor ? rs->anchor->path : ""); 768 PF_RULES_WUNLOCK(); 769 770 CURVNET_RESTORE(); 771 } 772 773 static int 774 pf_rollback_eth(uint32_t ticket, const char *anchor) 775 { 776 struct pf_keth_rule *rule, *tmp; 777 struct pf_keth_ruleset *rs; 778 779 PF_RULES_WASSERT(); 780 781 rs = pf_find_keth_ruleset(anchor); 782 if (rs == NULL) 783 return (EINVAL); 784 785 if (!rs->inactive.open || 786 ticket != rs->inactive.ticket) 787 return (0); 788 789 /* Purge old inactive rules. */ 790 TAILQ_FOREACH_SAFE(rule, rs->inactive.rules, entries, 791 tmp) { 792 TAILQ_REMOVE(rs->inactive.rules, rule, entries); 793 pf_free_eth_rule(rule); 794 } 795 796 rs->inactive.open = 0; 797 798 pf_remove_if_empty_keth_ruleset(rs); 799 800 return (0); 801 } 802 803 #define PF_SET_SKIP_STEPS(i) \ 804 do { \ 805 while (head[i] != cur) { \ 806 head[i]->skip[i].ptr = cur; \ 807 head[i] = TAILQ_NEXT(head[i], entries); \ 808 } \ 809 } while (0) 810 811 static void 812 pf_eth_calc_skip_steps(struct pf_keth_ruleq *rules) 813 { 814 struct pf_keth_rule *cur, *prev, *head[PFE_SKIP_COUNT]; 815 int i; 816 817 cur = TAILQ_FIRST(rules); 818 prev = cur; 819 for (i = 0; i < PFE_SKIP_COUNT; ++i) 820 head[i] = cur; 821 while (cur != NULL) { 822 if (cur->kif != prev->kif || cur->ifnot != prev->ifnot) 823 PF_SET_SKIP_STEPS(PFE_SKIP_IFP); 824 if (cur->direction != prev->direction) 825 PF_SET_SKIP_STEPS(PFE_SKIP_DIR); 826 if (cur->proto != prev->proto) 827 PF_SET_SKIP_STEPS(PFE_SKIP_PROTO); 828 if (memcmp(&cur->src, &prev->src, sizeof(cur->src)) != 0) 829 PF_SET_SKIP_STEPS(PFE_SKIP_SRC_ADDR); 830 if (memcmp(&cur->dst, &prev->dst, sizeof(cur->dst)) != 0) 831 PF_SET_SKIP_STEPS(PFE_SKIP_DST_ADDR); 832 833 prev = cur; 834 cur = TAILQ_NEXT(cur, entries); 835 } 836 for (i = 0; i < PFE_SKIP_COUNT; ++i) 837 PF_SET_SKIP_STEPS(i); 838 } 839 840 static int 841 pf_commit_eth(uint32_t ticket, const char *anchor) 842 { 843 struct pf_keth_ruleq *rules; 844 struct pf_keth_ruleset *rs; 845 846 rs = pf_find_keth_ruleset(anchor); 847 if (rs == NULL) { 848 return (EINVAL); 849 } 850 851 if (!rs->inactive.open || 852 ticket != rs->inactive.ticket) 853 return (EBUSY); 854 855 PF_RULES_WASSERT(); 856 857 pf_eth_calc_skip_steps(rs->inactive.rules); 858 859 rules = rs->active.rules; 860 ck_pr_store_ptr(&rs->active.rules, rs->inactive.rules); 861 rs->inactive.rules = rules; 862 rs->inactive.ticket = rs->active.ticket; 863 864 /* Clean up inactive rules (i.e. previously active rules), only when 865 * we're sure they're no longer used. */ 866 NET_EPOCH_CALL(pf_rollback_eth_cb, &rs->epoch_ctx); 867 868 return (0); 869 } 870 871 #ifdef ALTQ 872 static uint16_t 873 pf_qname2qid(const char *qname) 874 { 875 return (tagname2tag(&V_pf_qids, qname)); 876 } 877 878 static void 879 pf_qid_unref(uint16_t qid) 880 { 881 tag_unref(&V_pf_qids, qid); 882 } 883 884 static int 885 pf_begin_altq(u_int32_t *ticket) 886 { 887 struct pf_altq *altq, *tmp; 888 int error = 0; 889 890 PF_RULES_WASSERT(); 891 892 /* Purge the old altq lists */ 893 TAILQ_FOREACH_SAFE(altq, V_pf_altq_ifs_inactive, entries, tmp) { 894 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) { 895 /* detach and destroy the discipline */ 896 error = altq_remove(altq); 897 } 898 free(altq, M_PFALTQ); 899 } 900 TAILQ_INIT(V_pf_altq_ifs_inactive); 901 TAILQ_FOREACH_SAFE(altq, V_pf_altqs_inactive, entries, tmp) { 902 pf_qid_unref(altq->qid); 903 free(altq, M_PFALTQ); 904 } 905 TAILQ_INIT(V_pf_altqs_inactive); 906 if (error) 907 return (error); 908 *ticket = ++V_ticket_altqs_inactive; 909 V_altqs_inactive_open = 1; 910 return (0); 911 } 912 913 static int 914 pf_rollback_altq(u_int32_t ticket) 915 { 916 struct pf_altq *altq, *tmp; 917 int error = 0; 918 919 PF_RULES_WASSERT(); 920 921 if (!V_altqs_inactive_open || ticket != V_ticket_altqs_inactive) 922 return (0); 923 /* Purge the old altq lists */ 924 TAILQ_FOREACH_SAFE(altq, V_pf_altq_ifs_inactive, entries, tmp) { 925 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) { 926 /* detach and destroy the discipline */ 927 error = altq_remove(altq); 928 } 929 free(altq, M_PFALTQ); 930 } 931 TAILQ_INIT(V_pf_altq_ifs_inactive); 932 TAILQ_FOREACH_SAFE(altq, V_pf_altqs_inactive, entries, tmp) { 933 pf_qid_unref(altq->qid); 934 free(altq, M_PFALTQ); 935 } 936 TAILQ_INIT(V_pf_altqs_inactive); 937 V_altqs_inactive_open = 0; 938 return (error); 939 } 940 941 static int 942 pf_commit_altq(u_int32_t ticket) 943 { 944 struct pf_altqqueue *old_altqs, *old_altq_ifs; 945 struct pf_altq *altq, *tmp; 946 int err, error = 0; 947 948 PF_RULES_WASSERT(); 949 950 if (!V_altqs_inactive_open || ticket != V_ticket_altqs_inactive) 951 return (EBUSY); 952 953 /* swap altqs, keep the old. */ 954 old_altqs = V_pf_altqs_active; 955 old_altq_ifs = V_pf_altq_ifs_active; 956 V_pf_altqs_active = V_pf_altqs_inactive; 957 V_pf_altq_ifs_active = V_pf_altq_ifs_inactive; 958 V_pf_altqs_inactive = old_altqs; 959 V_pf_altq_ifs_inactive = old_altq_ifs; 960 V_ticket_altqs_active = V_ticket_altqs_inactive; 961 962 /* Attach new disciplines */ 963 TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) { 964 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) { 965 /* attach the discipline */ 966 error = altq_pfattach(altq); 967 if (error == 0 && V_pf_altq_running) 968 error = pf_enable_altq(altq); 969 if (error != 0) 970 return (error); 971 } 972 } 973 974 /* Purge the old altq lists */ 975 TAILQ_FOREACH_SAFE(altq, V_pf_altq_ifs_inactive, entries, tmp) { 976 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) { 977 /* detach and destroy the discipline */ 978 if (V_pf_altq_running) 979 error = pf_disable_altq(altq); 980 err = altq_pfdetach(altq); 981 if (err != 0 && error == 0) 982 error = err; 983 err = altq_remove(altq); 984 if (err != 0 && error == 0) 985 error = err; 986 } 987 free(altq, M_PFALTQ); 988 } 989 TAILQ_INIT(V_pf_altq_ifs_inactive); 990 TAILQ_FOREACH_SAFE(altq, V_pf_altqs_inactive, entries, tmp) { 991 pf_qid_unref(altq->qid); 992 free(altq, M_PFALTQ); 993 } 994 TAILQ_INIT(V_pf_altqs_inactive); 995 996 V_altqs_inactive_open = 0; 997 return (error); 998 } 999 1000 static int 1001 pf_enable_altq(struct pf_altq *altq) 1002 { 1003 struct ifnet *ifp; 1004 struct tb_profile tb; 1005 int error = 0; 1006 1007 if ((ifp = ifunit(altq->ifname)) == NULL) 1008 return (EINVAL); 1009 1010 if (ifp->if_snd.altq_type != ALTQT_NONE) 1011 error = altq_enable(&ifp->if_snd); 1012 1013 /* set tokenbucket regulator */ 1014 if (error == 0 && ifp != NULL && ALTQ_IS_ENABLED(&ifp->if_snd)) { 1015 tb.rate = altq->ifbandwidth; 1016 tb.depth = altq->tbrsize; 1017 error = tbr_set(&ifp->if_snd, &tb); 1018 } 1019 1020 return (error); 1021 } 1022 1023 static int 1024 pf_disable_altq(struct pf_altq *altq) 1025 { 1026 struct ifnet *ifp; 1027 struct tb_profile tb; 1028 int error; 1029 1030 if ((ifp = ifunit(altq->ifname)) == NULL) 1031 return (EINVAL); 1032 1033 /* 1034 * when the discipline is no longer referenced, it was overridden 1035 * by a new one. if so, just return. 1036 */ 1037 if (altq->altq_disc != ifp->if_snd.altq_disc) 1038 return (0); 1039 1040 error = altq_disable(&ifp->if_snd); 1041 1042 if (error == 0) { 1043 /* clear tokenbucket regulator */ 1044 tb.rate = 0; 1045 error = tbr_set(&ifp->if_snd, &tb); 1046 } 1047 1048 return (error); 1049 } 1050 1051 static int 1052 pf_altq_ifnet_event_add(struct ifnet *ifp, int remove, u_int32_t ticket, 1053 struct pf_altq *altq) 1054 { 1055 struct ifnet *ifp1; 1056 int error = 0; 1057 1058 /* Deactivate the interface in question */ 1059 altq->local_flags &= ~PFALTQ_FLAG_IF_REMOVED; 1060 if ((ifp1 = ifunit(altq->ifname)) == NULL || 1061 (remove && ifp1 == ifp)) { 1062 altq->local_flags |= PFALTQ_FLAG_IF_REMOVED; 1063 } else { 1064 error = altq_add(ifp1, altq); 1065 1066 if (ticket != V_ticket_altqs_inactive) 1067 error = EBUSY; 1068 1069 if (error) 1070 free(altq, M_PFALTQ); 1071 } 1072 1073 return (error); 1074 } 1075 1076 void 1077 pf_altq_ifnet_event(struct ifnet *ifp, int remove) 1078 { 1079 struct pf_altq *a1, *a2, *a3; 1080 u_int32_t ticket; 1081 int error = 0; 1082 1083 /* 1084 * No need to re-evaluate the configuration for events on interfaces 1085 * that do not support ALTQ, as it's not possible for such 1086 * interfaces to be part of the configuration. 1087 */ 1088 if (!ALTQ_IS_READY(&ifp->if_snd)) 1089 return; 1090 1091 /* Interrupt userland queue modifications */ 1092 if (V_altqs_inactive_open) 1093 pf_rollback_altq(V_ticket_altqs_inactive); 1094 1095 /* Start new altq ruleset */ 1096 if (pf_begin_altq(&ticket)) 1097 return; 1098 1099 /* Copy the current active set */ 1100 TAILQ_FOREACH(a1, V_pf_altq_ifs_active, entries) { 1101 a2 = malloc(sizeof(*a2), M_PFALTQ, M_NOWAIT); 1102 if (a2 == NULL) { 1103 error = ENOMEM; 1104 break; 1105 } 1106 bcopy(a1, a2, sizeof(struct pf_altq)); 1107 1108 error = pf_altq_ifnet_event_add(ifp, remove, ticket, a2); 1109 if (error) 1110 break; 1111 1112 TAILQ_INSERT_TAIL(V_pf_altq_ifs_inactive, a2, entries); 1113 } 1114 if (error) 1115 goto out; 1116 TAILQ_FOREACH(a1, V_pf_altqs_active, entries) { 1117 a2 = malloc(sizeof(*a2), M_PFALTQ, M_NOWAIT); 1118 if (a2 == NULL) { 1119 error = ENOMEM; 1120 break; 1121 } 1122 bcopy(a1, a2, sizeof(struct pf_altq)); 1123 1124 if ((a2->qid = pf_qname2qid(a2->qname)) == 0) { 1125 error = EBUSY; 1126 free(a2, M_PFALTQ); 1127 break; 1128 } 1129 a2->altq_disc = NULL; 1130 TAILQ_FOREACH(a3, V_pf_altq_ifs_inactive, entries) { 1131 if (strncmp(a3->ifname, a2->ifname, 1132 IFNAMSIZ) == 0) { 1133 a2->altq_disc = a3->altq_disc; 1134 break; 1135 } 1136 } 1137 error = pf_altq_ifnet_event_add(ifp, remove, ticket, a2); 1138 if (error) 1139 break; 1140 1141 TAILQ_INSERT_TAIL(V_pf_altqs_inactive, a2, entries); 1142 } 1143 1144 out: 1145 if (error != 0) 1146 pf_rollback_altq(ticket); 1147 else 1148 pf_commit_altq(ticket); 1149 } 1150 #endif /* ALTQ */ 1151 1152 static int 1153 pf_begin_rules(u_int32_t *ticket, int rs_num, const char *anchor) 1154 { 1155 struct pf_krule_global *tree; 1156 struct pf_kruleset *rs; 1157 struct pf_krule *rule; 1158 1159 PF_RULES_WASSERT(); 1160 1161 if (rs_num < 0 || rs_num >= PF_RULESET_MAX) 1162 return (EINVAL); 1163 tree = malloc(sizeof(struct pf_krule_global), M_TEMP, M_NOWAIT); 1164 if (tree == NULL) 1165 return (ENOMEM); 1166 RB_INIT(tree); 1167 rs = pf_find_or_create_kruleset(anchor); 1168 if (rs == NULL) { 1169 free(tree, M_TEMP); 1170 return (EINVAL); 1171 } 1172 free(rs->rules[rs_num].inactive.tree, M_TEMP); 1173 rs->rules[rs_num].inactive.tree = tree; 1174 1175 while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) { 1176 pf_unlink_rule(rs->rules[rs_num].inactive.ptr, rule); 1177 rs->rules[rs_num].inactive.rcount--; 1178 } 1179 *ticket = ++rs->rules[rs_num].inactive.ticket; 1180 rs->rules[rs_num].inactive.open = 1; 1181 return (0); 1182 } 1183 1184 static int 1185 pf_rollback_rules(u_int32_t ticket, int rs_num, char *anchor) 1186 { 1187 struct pf_kruleset *rs; 1188 struct pf_krule *rule; 1189 1190 PF_RULES_WASSERT(); 1191 1192 if (rs_num < 0 || rs_num >= PF_RULESET_MAX) 1193 return (EINVAL); 1194 rs = pf_find_kruleset(anchor); 1195 if (rs == NULL || !rs->rules[rs_num].inactive.open || 1196 rs->rules[rs_num].inactive.ticket != ticket) 1197 return (0); 1198 while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) { 1199 pf_unlink_rule(rs->rules[rs_num].inactive.ptr, rule); 1200 rs->rules[rs_num].inactive.rcount--; 1201 } 1202 rs->rules[rs_num].inactive.open = 0; 1203 return (0); 1204 } 1205 1206 #define PF_MD5_UPD(st, elm) \ 1207 MD5Update(ctx, (u_int8_t *) &(st)->elm, sizeof((st)->elm)) 1208 1209 #define PF_MD5_UPD_STR(st, elm) \ 1210 MD5Update(ctx, (u_int8_t *) (st)->elm, strlen((st)->elm)) 1211 1212 #define PF_MD5_UPD_HTONL(st, elm, stor) do { \ 1213 (stor) = htonl((st)->elm); \ 1214 MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int32_t));\ 1215 } while (0) 1216 1217 #define PF_MD5_UPD_HTONS(st, elm, stor) do { \ 1218 (stor) = htons((st)->elm); \ 1219 MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int16_t));\ 1220 } while (0) 1221 1222 static void 1223 pf_hash_rule_addr(MD5_CTX *ctx, struct pf_rule_addr *pfr) 1224 { 1225 PF_MD5_UPD(pfr, addr.type); 1226 switch (pfr->addr.type) { 1227 case PF_ADDR_DYNIFTL: 1228 PF_MD5_UPD(pfr, addr.v.ifname); 1229 PF_MD5_UPD(pfr, addr.iflags); 1230 break; 1231 case PF_ADDR_TABLE: 1232 PF_MD5_UPD(pfr, addr.v.tblname); 1233 break; 1234 case PF_ADDR_ADDRMASK: 1235 /* XXX ignore af? */ 1236 PF_MD5_UPD(pfr, addr.v.a.addr.addr32); 1237 PF_MD5_UPD(pfr, addr.v.a.mask.addr32); 1238 break; 1239 } 1240 1241 PF_MD5_UPD(pfr, port[0]); 1242 PF_MD5_UPD(pfr, port[1]); 1243 PF_MD5_UPD(pfr, neg); 1244 PF_MD5_UPD(pfr, port_op); 1245 } 1246 1247 static void 1248 pf_hash_rule_rolling(MD5_CTX *ctx, struct pf_krule *rule) 1249 { 1250 u_int16_t x; 1251 u_int32_t y; 1252 1253 pf_hash_rule_addr(ctx, &rule->src); 1254 pf_hash_rule_addr(ctx, &rule->dst); 1255 for (int i = 0; i < PF_RULE_MAX_LABEL_COUNT; i++) 1256 PF_MD5_UPD_STR(rule, label[i]); 1257 PF_MD5_UPD_STR(rule, ifname); 1258 PF_MD5_UPD_STR(rule, match_tagname); 1259 PF_MD5_UPD_HTONS(rule, match_tag, x); /* dup? */ 1260 PF_MD5_UPD_HTONL(rule, os_fingerprint, y); 1261 PF_MD5_UPD_HTONL(rule, prob, y); 1262 PF_MD5_UPD_HTONL(rule, uid.uid[0], y); 1263 PF_MD5_UPD_HTONL(rule, uid.uid[1], y); 1264 PF_MD5_UPD(rule, uid.op); 1265 PF_MD5_UPD_HTONL(rule, gid.gid[0], y); 1266 PF_MD5_UPD_HTONL(rule, gid.gid[1], y); 1267 PF_MD5_UPD(rule, gid.op); 1268 PF_MD5_UPD_HTONL(rule, rule_flag, y); 1269 PF_MD5_UPD(rule, action); 1270 PF_MD5_UPD(rule, direction); 1271 PF_MD5_UPD(rule, af); 1272 PF_MD5_UPD(rule, quick); 1273 PF_MD5_UPD(rule, ifnot); 1274 PF_MD5_UPD(rule, match_tag_not); 1275 PF_MD5_UPD(rule, natpass); 1276 PF_MD5_UPD(rule, keep_state); 1277 PF_MD5_UPD(rule, proto); 1278 PF_MD5_UPD(rule, type); 1279 PF_MD5_UPD(rule, code); 1280 PF_MD5_UPD(rule, flags); 1281 PF_MD5_UPD(rule, flagset); 1282 PF_MD5_UPD(rule, allow_opts); 1283 PF_MD5_UPD(rule, rt); 1284 PF_MD5_UPD(rule, tos); 1285 if (rule->anchor != NULL) 1286 PF_MD5_UPD_STR(rule, anchor->path); 1287 } 1288 1289 static void 1290 pf_hash_rule(struct pf_krule *rule) 1291 { 1292 MD5_CTX ctx; 1293 1294 MD5Init(&ctx); 1295 pf_hash_rule_rolling(&ctx, rule); 1296 MD5Final(rule->md5sum, &ctx); 1297 } 1298 1299 static int 1300 pf_krule_compare(struct pf_krule *a, struct pf_krule *b) 1301 { 1302 1303 return (memcmp(a->md5sum, b->md5sum, PF_MD5_DIGEST_LENGTH)); 1304 } 1305 1306 static int 1307 pf_commit_rules(u_int32_t ticket, int rs_num, char *anchor) 1308 { 1309 struct pf_kruleset *rs; 1310 struct pf_krule *rule, **old_array, *old_rule; 1311 struct pf_krulequeue *old_rules; 1312 struct pf_krule_global *old_tree; 1313 int error; 1314 u_int32_t old_rcount; 1315 1316 PF_RULES_WASSERT(); 1317 1318 if (rs_num < 0 || rs_num >= PF_RULESET_MAX) 1319 return (EINVAL); 1320 rs = pf_find_kruleset(anchor); 1321 if (rs == NULL || !rs->rules[rs_num].inactive.open || 1322 ticket != rs->rules[rs_num].inactive.ticket) 1323 return (EBUSY); 1324 1325 /* Calculate checksum for the main ruleset */ 1326 if (rs == &pf_main_ruleset) { 1327 error = pf_setup_pfsync_matching(rs); 1328 if (error != 0) 1329 return (error); 1330 } 1331 1332 /* Swap rules, keep the old. */ 1333 old_rules = rs->rules[rs_num].active.ptr; 1334 old_rcount = rs->rules[rs_num].active.rcount; 1335 old_array = rs->rules[rs_num].active.ptr_array; 1336 old_tree = rs->rules[rs_num].active.tree; 1337 1338 rs->rules[rs_num].active.ptr = 1339 rs->rules[rs_num].inactive.ptr; 1340 rs->rules[rs_num].active.ptr_array = 1341 rs->rules[rs_num].inactive.ptr_array; 1342 rs->rules[rs_num].active.tree = 1343 rs->rules[rs_num].inactive.tree; 1344 rs->rules[rs_num].active.rcount = 1345 rs->rules[rs_num].inactive.rcount; 1346 1347 /* Attempt to preserve counter information. */ 1348 if (V_pf_status.keep_counters && old_tree != NULL) { 1349 TAILQ_FOREACH(rule, rs->rules[rs_num].active.ptr, 1350 entries) { 1351 old_rule = RB_FIND(pf_krule_global, old_tree, rule); 1352 if (old_rule == NULL) { 1353 continue; 1354 } 1355 pf_counter_u64_critical_enter(); 1356 pf_counter_u64_add_protected(&rule->evaluations, 1357 pf_counter_u64_fetch(&old_rule->evaluations)); 1358 pf_counter_u64_add_protected(&rule->packets[0], 1359 pf_counter_u64_fetch(&old_rule->packets[0])); 1360 pf_counter_u64_add_protected(&rule->packets[1], 1361 pf_counter_u64_fetch(&old_rule->packets[1])); 1362 pf_counter_u64_add_protected(&rule->bytes[0], 1363 pf_counter_u64_fetch(&old_rule->bytes[0])); 1364 pf_counter_u64_add_protected(&rule->bytes[1], 1365 pf_counter_u64_fetch(&old_rule->bytes[1])); 1366 pf_counter_u64_critical_exit(); 1367 } 1368 } 1369 1370 rs->rules[rs_num].inactive.ptr = old_rules; 1371 rs->rules[rs_num].inactive.ptr_array = old_array; 1372 rs->rules[rs_num].inactive.tree = NULL; /* important for pf_ioctl_addrule */ 1373 rs->rules[rs_num].inactive.rcount = old_rcount; 1374 1375 rs->rules[rs_num].active.ticket = 1376 rs->rules[rs_num].inactive.ticket; 1377 pf_calc_skip_steps(rs->rules[rs_num].active.ptr); 1378 1379 /* Purge the old rule list. */ 1380 PF_UNLNKDRULES_LOCK(); 1381 while ((rule = TAILQ_FIRST(old_rules)) != NULL) 1382 pf_unlink_rule_locked(old_rules, rule); 1383 PF_UNLNKDRULES_UNLOCK(); 1384 if (rs->rules[rs_num].inactive.ptr_array) 1385 free(rs->rules[rs_num].inactive.ptr_array, M_TEMP); 1386 rs->rules[rs_num].inactive.ptr_array = NULL; 1387 rs->rules[rs_num].inactive.rcount = 0; 1388 rs->rules[rs_num].inactive.open = 0; 1389 pf_remove_if_empty_kruleset(rs); 1390 free(old_tree, M_TEMP); 1391 1392 return (0); 1393 } 1394 1395 static int 1396 pf_setup_pfsync_matching(struct pf_kruleset *rs) 1397 { 1398 MD5_CTX ctx; 1399 struct pf_krule *rule; 1400 int rs_cnt; 1401 u_int8_t digest[PF_MD5_DIGEST_LENGTH]; 1402 1403 MD5Init(&ctx); 1404 for (rs_cnt = 0; rs_cnt < PF_RULESET_MAX; rs_cnt++) { 1405 /* XXX PF_RULESET_SCRUB as well? */ 1406 if (rs_cnt == PF_RULESET_SCRUB) 1407 continue; 1408 1409 if (rs->rules[rs_cnt].inactive.ptr_array) 1410 free(rs->rules[rs_cnt].inactive.ptr_array, M_TEMP); 1411 rs->rules[rs_cnt].inactive.ptr_array = NULL; 1412 1413 if (rs->rules[rs_cnt].inactive.rcount) { 1414 rs->rules[rs_cnt].inactive.ptr_array = 1415 malloc(sizeof(caddr_t) * 1416 rs->rules[rs_cnt].inactive.rcount, 1417 M_TEMP, M_NOWAIT); 1418 1419 if (!rs->rules[rs_cnt].inactive.ptr_array) 1420 return (ENOMEM); 1421 } 1422 1423 TAILQ_FOREACH(rule, rs->rules[rs_cnt].inactive.ptr, 1424 entries) { 1425 pf_hash_rule_rolling(&ctx, rule); 1426 (rs->rules[rs_cnt].inactive.ptr_array)[rule->nr] = rule; 1427 } 1428 } 1429 1430 MD5Final(digest, &ctx); 1431 memcpy(V_pf_status.pf_chksum, digest, sizeof(V_pf_status.pf_chksum)); 1432 return (0); 1433 } 1434 1435 static int 1436 pf_eth_addr_setup(struct pf_keth_ruleset *ruleset, struct pf_addr_wrap *addr) 1437 { 1438 int error = 0; 1439 1440 switch (addr->type) { 1441 case PF_ADDR_TABLE: 1442 addr->p.tbl = pfr_eth_attach_table(ruleset, addr->v.tblname); 1443 if (addr->p.tbl == NULL) 1444 error = ENOMEM; 1445 break; 1446 default: 1447 error = EINVAL; 1448 } 1449 1450 return (error); 1451 } 1452 1453 static int 1454 pf_addr_setup(struct pf_kruleset *ruleset, struct pf_addr_wrap *addr, 1455 sa_family_t af) 1456 { 1457 int error = 0; 1458 1459 switch (addr->type) { 1460 case PF_ADDR_TABLE: 1461 addr->p.tbl = pfr_attach_table(ruleset, addr->v.tblname); 1462 if (addr->p.tbl == NULL) 1463 error = ENOMEM; 1464 break; 1465 case PF_ADDR_DYNIFTL: 1466 error = pfi_dynaddr_setup(addr, af); 1467 break; 1468 } 1469 1470 return (error); 1471 } 1472 1473 static void 1474 pf_addr_copyout(struct pf_addr_wrap *addr) 1475 { 1476 1477 switch (addr->type) { 1478 case PF_ADDR_DYNIFTL: 1479 pfi_dynaddr_copyout(addr); 1480 break; 1481 case PF_ADDR_TABLE: 1482 pf_tbladdr_copyout(addr); 1483 break; 1484 } 1485 } 1486 1487 static void 1488 pf_src_node_copy(const struct pf_ksrc_node *in, struct pf_src_node *out) 1489 { 1490 int secs = time_uptime, diff; 1491 1492 bzero(out, sizeof(struct pf_src_node)); 1493 1494 bcopy(&in->addr, &out->addr, sizeof(struct pf_addr)); 1495 bcopy(&in->raddr, &out->raddr, sizeof(struct pf_addr)); 1496 1497 if (in->rule.ptr != NULL) 1498 out->rule.nr = in->rule.ptr->nr; 1499 1500 for (int i = 0; i < 2; i++) { 1501 out->bytes[i] = counter_u64_fetch(in->bytes[i]); 1502 out->packets[i] = counter_u64_fetch(in->packets[i]); 1503 } 1504 1505 out->states = in->states; 1506 out->conn = in->conn; 1507 out->af = in->af; 1508 out->ruletype = in->ruletype; 1509 1510 out->creation = secs - in->creation; 1511 if (out->expire > secs) 1512 out->expire -= secs; 1513 else 1514 out->expire = 0; 1515 1516 /* Adjust the connection rate estimate. */ 1517 diff = secs - in->conn_rate.last; 1518 if (diff >= in->conn_rate.seconds) 1519 out->conn_rate.count = 0; 1520 else 1521 out->conn_rate.count -= 1522 in->conn_rate.count * diff / 1523 in->conn_rate.seconds; 1524 } 1525 1526 #ifdef ALTQ 1527 /* 1528 * Handle export of struct pf_kaltq to user binaries that may be using any 1529 * version of struct pf_altq. 1530 */ 1531 static int 1532 pf_export_kaltq(struct pf_altq *q, struct pfioc_altq_v1 *pa, size_t ioc_size) 1533 { 1534 u_int32_t version; 1535 1536 if (ioc_size == sizeof(struct pfioc_altq_v0)) 1537 version = 0; 1538 else 1539 version = pa->version; 1540 1541 if (version > PFIOC_ALTQ_VERSION) 1542 return (EINVAL); 1543 1544 #define ASSIGN(x) exported_q->x = q->x 1545 #define COPY(x) \ 1546 bcopy(&q->x, &exported_q->x, min(sizeof(q->x), sizeof(exported_q->x))) 1547 #define SATU16(x) (u_int32_t)uqmin((x), USHRT_MAX) 1548 #define SATU32(x) (u_int32_t)uqmin((x), UINT_MAX) 1549 1550 switch (version) { 1551 case 0: { 1552 struct pf_altq_v0 *exported_q = 1553 &((struct pfioc_altq_v0 *)pa)->altq; 1554 1555 COPY(ifname); 1556 1557 ASSIGN(scheduler); 1558 ASSIGN(tbrsize); 1559 exported_q->tbrsize = SATU16(q->tbrsize); 1560 exported_q->ifbandwidth = SATU32(q->ifbandwidth); 1561 1562 COPY(qname); 1563 COPY(parent); 1564 ASSIGN(parent_qid); 1565 exported_q->bandwidth = SATU32(q->bandwidth); 1566 ASSIGN(priority); 1567 ASSIGN(local_flags); 1568 1569 ASSIGN(qlimit); 1570 ASSIGN(flags); 1571 1572 if (q->scheduler == ALTQT_HFSC) { 1573 #define ASSIGN_OPT(x) exported_q->pq_u.hfsc_opts.x = q->pq_u.hfsc_opts.x 1574 #define ASSIGN_OPT_SATU32(x) exported_q->pq_u.hfsc_opts.x = \ 1575 SATU32(q->pq_u.hfsc_opts.x) 1576 1577 ASSIGN_OPT_SATU32(rtsc_m1); 1578 ASSIGN_OPT(rtsc_d); 1579 ASSIGN_OPT_SATU32(rtsc_m2); 1580 1581 ASSIGN_OPT_SATU32(lssc_m1); 1582 ASSIGN_OPT(lssc_d); 1583 ASSIGN_OPT_SATU32(lssc_m2); 1584 1585 ASSIGN_OPT_SATU32(ulsc_m1); 1586 ASSIGN_OPT(ulsc_d); 1587 ASSIGN_OPT_SATU32(ulsc_m2); 1588 1589 ASSIGN_OPT(flags); 1590 1591 #undef ASSIGN_OPT 1592 #undef ASSIGN_OPT_SATU32 1593 } else 1594 COPY(pq_u); 1595 1596 ASSIGN(qid); 1597 break; 1598 } 1599 case 1: { 1600 struct pf_altq_v1 *exported_q = 1601 &((struct pfioc_altq_v1 *)pa)->altq; 1602 1603 COPY(ifname); 1604 1605 ASSIGN(scheduler); 1606 ASSIGN(tbrsize); 1607 ASSIGN(ifbandwidth); 1608 1609 COPY(qname); 1610 COPY(parent); 1611 ASSIGN(parent_qid); 1612 ASSIGN(bandwidth); 1613 ASSIGN(priority); 1614 ASSIGN(local_flags); 1615 1616 ASSIGN(qlimit); 1617 ASSIGN(flags); 1618 COPY(pq_u); 1619 1620 ASSIGN(qid); 1621 break; 1622 } 1623 default: 1624 panic("%s: unhandled struct pfioc_altq version", __func__); 1625 break; 1626 } 1627 1628 #undef ASSIGN 1629 #undef COPY 1630 #undef SATU16 1631 #undef SATU32 1632 1633 return (0); 1634 } 1635 1636 /* 1637 * Handle import to struct pf_kaltq of struct pf_altq from user binaries 1638 * that may be using any version of it. 1639 */ 1640 static int 1641 pf_import_kaltq(struct pfioc_altq_v1 *pa, struct pf_altq *q, size_t ioc_size) 1642 { 1643 u_int32_t version; 1644 1645 if (ioc_size == sizeof(struct pfioc_altq_v0)) 1646 version = 0; 1647 else 1648 version = pa->version; 1649 1650 if (version > PFIOC_ALTQ_VERSION) 1651 return (EINVAL); 1652 1653 #define ASSIGN(x) q->x = imported_q->x 1654 #define COPY(x) \ 1655 bcopy(&imported_q->x, &q->x, min(sizeof(imported_q->x), sizeof(q->x))) 1656 1657 switch (version) { 1658 case 0: { 1659 struct pf_altq_v0 *imported_q = 1660 &((struct pfioc_altq_v0 *)pa)->altq; 1661 1662 COPY(ifname); 1663 1664 ASSIGN(scheduler); 1665 ASSIGN(tbrsize); /* 16-bit -> 32-bit */ 1666 ASSIGN(ifbandwidth); /* 32-bit -> 64-bit */ 1667 1668 COPY(qname); 1669 COPY(parent); 1670 ASSIGN(parent_qid); 1671 ASSIGN(bandwidth); /* 32-bit -> 64-bit */ 1672 ASSIGN(priority); 1673 ASSIGN(local_flags); 1674 1675 ASSIGN(qlimit); 1676 ASSIGN(flags); 1677 1678 if (imported_q->scheduler == ALTQT_HFSC) { 1679 #define ASSIGN_OPT(x) q->pq_u.hfsc_opts.x = imported_q->pq_u.hfsc_opts.x 1680 1681 /* 1682 * The m1 and m2 parameters are being copied from 1683 * 32-bit to 64-bit. 1684 */ 1685 ASSIGN_OPT(rtsc_m1); 1686 ASSIGN_OPT(rtsc_d); 1687 ASSIGN_OPT(rtsc_m2); 1688 1689 ASSIGN_OPT(lssc_m1); 1690 ASSIGN_OPT(lssc_d); 1691 ASSIGN_OPT(lssc_m2); 1692 1693 ASSIGN_OPT(ulsc_m1); 1694 ASSIGN_OPT(ulsc_d); 1695 ASSIGN_OPT(ulsc_m2); 1696 1697 ASSIGN_OPT(flags); 1698 1699 #undef ASSIGN_OPT 1700 } else 1701 COPY(pq_u); 1702 1703 ASSIGN(qid); 1704 break; 1705 } 1706 case 1: { 1707 struct pf_altq_v1 *imported_q = 1708 &((struct pfioc_altq_v1 *)pa)->altq; 1709 1710 COPY(ifname); 1711 1712 ASSIGN(scheduler); 1713 ASSIGN(tbrsize); 1714 ASSIGN(ifbandwidth); 1715 1716 COPY(qname); 1717 COPY(parent); 1718 ASSIGN(parent_qid); 1719 ASSIGN(bandwidth); 1720 ASSIGN(priority); 1721 ASSIGN(local_flags); 1722 1723 ASSIGN(qlimit); 1724 ASSIGN(flags); 1725 COPY(pq_u); 1726 1727 ASSIGN(qid); 1728 break; 1729 } 1730 default: 1731 panic("%s: unhandled struct pfioc_altq version", __func__); 1732 break; 1733 } 1734 1735 #undef ASSIGN 1736 #undef COPY 1737 1738 return (0); 1739 } 1740 1741 static struct pf_altq * 1742 pf_altq_get_nth_active(u_int32_t n) 1743 { 1744 struct pf_altq *altq; 1745 u_int32_t nr; 1746 1747 nr = 0; 1748 TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) { 1749 if (nr == n) 1750 return (altq); 1751 nr++; 1752 } 1753 1754 TAILQ_FOREACH(altq, V_pf_altqs_active, entries) { 1755 if (nr == n) 1756 return (altq); 1757 nr++; 1758 } 1759 1760 return (NULL); 1761 } 1762 #endif /* ALTQ */ 1763 1764 struct pf_krule * 1765 pf_krule_alloc(void) 1766 { 1767 struct pf_krule *rule; 1768 1769 rule = malloc(sizeof(struct pf_krule), M_PFRULE, M_WAITOK | M_ZERO); 1770 mtx_init(&rule->rpool.mtx, "pf_krule_pool", NULL, MTX_DEF); 1771 return (rule); 1772 } 1773 1774 void 1775 pf_krule_free(struct pf_krule *rule) 1776 { 1777 #ifdef PF_WANT_32_TO_64_COUNTER 1778 bool wowned; 1779 #endif 1780 1781 if (rule == NULL) 1782 return; 1783 1784 #ifdef PF_WANT_32_TO_64_COUNTER 1785 if (rule->allrulelinked) { 1786 wowned = PF_RULES_WOWNED(); 1787 if (!wowned) 1788 PF_RULES_WLOCK(); 1789 LIST_REMOVE(rule, allrulelist); 1790 V_pf_allrulecount--; 1791 if (!wowned) 1792 PF_RULES_WUNLOCK(); 1793 } 1794 #endif 1795 1796 pf_counter_u64_deinit(&rule->evaluations); 1797 for (int i = 0; i < 2; i++) { 1798 pf_counter_u64_deinit(&rule->packets[i]); 1799 pf_counter_u64_deinit(&rule->bytes[i]); 1800 } 1801 counter_u64_free(rule->states_cur); 1802 counter_u64_free(rule->states_tot); 1803 counter_u64_free(rule->src_nodes); 1804 1805 mtx_destroy(&rule->rpool.mtx); 1806 free(rule, M_PFRULE); 1807 } 1808 1809 static void 1810 pf_kpooladdr_to_pooladdr(const struct pf_kpooladdr *kpool, 1811 struct pf_pooladdr *pool) 1812 { 1813 1814 bzero(pool, sizeof(*pool)); 1815 bcopy(&kpool->addr, &pool->addr, sizeof(pool->addr)); 1816 strlcpy(pool->ifname, kpool->ifname, sizeof(pool->ifname)); 1817 } 1818 1819 static int 1820 pf_pooladdr_to_kpooladdr(const struct pf_pooladdr *pool, 1821 struct pf_kpooladdr *kpool) 1822 { 1823 int ret; 1824 1825 bzero(kpool, sizeof(*kpool)); 1826 bcopy(&pool->addr, &kpool->addr, sizeof(kpool->addr)); 1827 ret = pf_user_strcpy(kpool->ifname, pool->ifname, 1828 sizeof(kpool->ifname)); 1829 return (ret); 1830 } 1831 1832 static void 1833 pf_kpool_to_pool(const struct pf_kpool *kpool, struct pf_pool *pool) 1834 { 1835 bzero(pool, sizeof(*pool)); 1836 1837 bcopy(&kpool->key, &pool->key, sizeof(pool->key)); 1838 bcopy(&kpool->counter, &pool->counter, sizeof(pool->counter)); 1839 1840 pool->tblidx = kpool->tblidx; 1841 pool->proxy_port[0] = kpool->proxy_port[0]; 1842 pool->proxy_port[1] = kpool->proxy_port[1]; 1843 pool->opts = kpool->opts; 1844 } 1845 1846 static void 1847 pf_pool_to_kpool(const struct pf_pool *pool, struct pf_kpool *kpool) 1848 { 1849 _Static_assert(sizeof(pool->key) == sizeof(kpool->key), ""); 1850 _Static_assert(sizeof(pool->counter) == sizeof(kpool->counter), ""); 1851 1852 bcopy(&pool->key, &kpool->key, sizeof(kpool->key)); 1853 bcopy(&pool->counter, &kpool->counter, sizeof(kpool->counter)); 1854 1855 kpool->tblidx = pool->tblidx; 1856 kpool->proxy_port[0] = pool->proxy_port[0]; 1857 kpool->proxy_port[1] = pool->proxy_port[1]; 1858 kpool->opts = pool->opts; 1859 } 1860 1861 static void 1862 pf_krule_to_rule(const struct pf_krule *krule, struct pf_rule *rule) 1863 { 1864 1865 bzero(rule, sizeof(*rule)); 1866 1867 bcopy(&krule->src, &rule->src, sizeof(rule->src)); 1868 bcopy(&krule->dst, &rule->dst, sizeof(rule->dst)); 1869 1870 for (int i = 0; i < PF_SKIP_COUNT; ++i) { 1871 if (rule->skip[i].ptr == NULL) 1872 rule->skip[i].nr = -1; 1873 else 1874 rule->skip[i].nr = krule->skip[i].ptr->nr; 1875 } 1876 1877 strlcpy(rule->label, krule->label[0], sizeof(rule->label)); 1878 strlcpy(rule->ifname, krule->ifname, sizeof(rule->ifname)); 1879 strlcpy(rule->qname, krule->qname, sizeof(rule->qname)); 1880 strlcpy(rule->pqname, krule->pqname, sizeof(rule->pqname)); 1881 strlcpy(rule->tagname, krule->tagname, sizeof(rule->tagname)); 1882 strlcpy(rule->match_tagname, krule->match_tagname, 1883 sizeof(rule->match_tagname)); 1884 strlcpy(rule->overload_tblname, krule->overload_tblname, 1885 sizeof(rule->overload_tblname)); 1886 1887 pf_kpool_to_pool(&krule->rpool, &rule->rpool); 1888 1889 rule->evaluations = pf_counter_u64_fetch(&krule->evaluations); 1890 for (int i = 0; i < 2; i++) { 1891 rule->packets[i] = pf_counter_u64_fetch(&krule->packets[i]); 1892 rule->bytes[i] = pf_counter_u64_fetch(&krule->bytes[i]); 1893 } 1894 1895 /* kif, anchor, overload_tbl are not copied over. */ 1896 1897 rule->os_fingerprint = krule->os_fingerprint; 1898 1899 rule->rtableid = krule->rtableid; 1900 bcopy(krule->timeout, rule->timeout, sizeof(krule->timeout)); 1901 rule->max_states = krule->max_states; 1902 rule->max_src_nodes = krule->max_src_nodes; 1903 rule->max_src_states = krule->max_src_states; 1904 rule->max_src_conn = krule->max_src_conn; 1905 rule->max_src_conn_rate.limit = krule->max_src_conn_rate.limit; 1906 rule->max_src_conn_rate.seconds = krule->max_src_conn_rate.seconds; 1907 rule->qid = krule->qid; 1908 rule->pqid = krule->pqid; 1909 rule->nr = krule->nr; 1910 rule->prob = krule->prob; 1911 rule->cuid = krule->cuid; 1912 rule->cpid = krule->cpid; 1913 1914 rule->return_icmp = krule->return_icmp; 1915 rule->return_icmp6 = krule->return_icmp6; 1916 rule->max_mss = krule->max_mss; 1917 rule->tag = krule->tag; 1918 rule->match_tag = krule->match_tag; 1919 rule->scrub_flags = krule->scrub_flags; 1920 1921 bcopy(&krule->uid, &rule->uid, sizeof(krule->uid)); 1922 bcopy(&krule->gid, &rule->gid, sizeof(krule->gid)); 1923 1924 rule->rule_flag = krule->rule_flag; 1925 rule->action = krule->action; 1926 rule->direction = krule->direction; 1927 rule->log = krule->log; 1928 rule->logif = krule->logif; 1929 rule->quick = krule->quick; 1930 rule->ifnot = krule->ifnot; 1931 rule->match_tag_not = krule->match_tag_not; 1932 rule->natpass = krule->natpass; 1933 1934 rule->keep_state = krule->keep_state; 1935 rule->af = krule->af; 1936 rule->proto = krule->proto; 1937 rule->type = krule->type; 1938 rule->code = krule->code; 1939 rule->flags = krule->flags; 1940 rule->flagset = krule->flagset; 1941 rule->min_ttl = krule->min_ttl; 1942 rule->allow_opts = krule->allow_opts; 1943 rule->rt = krule->rt; 1944 rule->return_ttl = krule->return_ttl; 1945 rule->tos = krule->tos; 1946 rule->set_tos = krule->set_tos; 1947 rule->anchor_relative = krule->anchor_relative; 1948 rule->anchor_wildcard = krule->anchor_wildcard; 1949 1950 rule->flush = krule->flush; 1951 rule->prio = krule->prio; 1952 rule->set_prio[0] = krule->set_prio[0]; 1953 rule->set_prio[1] = krule->set_prio[1]; 1954 1955 bcopy(&krule->divert, &rule->divert, sizeof(krule->divert)); 1956 1957 rule->u_states_cur = counter_u64_fetch(krule->states_cur); 1958 rule->u_states_tot = counter_u64_fetch(krule->states_tot); 1959 rule->u_src_nodes = counter_u64_fetch(krule->src_nodes); 1960 } 1961 1962 static int 1963 pf_rule_to_krule(const struct pf_rule *rule, struct pf_krule *krule) 1964 { 1965 int ret; 1966 1967 #ifndef INET 1968 if (rule->af == AF_INET) { 1969 return (EAFNOSUPPORT); 1970 } 1971 #endif /* INET */ 1972 #ifndef INET6 1973 if (rule->af == AF_INET6) { 1974 return (EAFNOSUPPORT); 1975 } 1976 #endif /* INET6 */ 1977 1978 ret = pf_check_rule_addr(&rule->src); 1979 if (ret != 0) 1980 return (ret); 1981 ret = pf_check_rule_addr(&rule->dst); 1982 if (ret != 0) 1983 return (ret); 1984 1985 bcopy(&rule->src, &krule->src, sizeof(rule->src)); 1986 bcopy(&rule->dst, &krule->dst, sizeof(rule->dst)); 1987 1988 ret = pf_user_strcpy(krule->label[0], rule->label, sizeof(rule->label)); 1989 if (ret != 0) 1990 return (ret); 1991 ret = pf_user_strcpy(krule->ifname, rule->ifname, sizeof(rule->ifname)); 1992 if (ret != 0) 1993 return (ret); 1994 ret = pf_user_strcpy(krule->qname, rule->qname, sizeof(rule->qname)); 1995 if (ret != 0) 1996 return (ret); 1997 ret = pf_user_strcpy(krule->pqname, rule->pqname, sizeof(rule->pqname)); 1998 if (ret != 0) 1999 return (ret); 2000 ret = pf_user_strcpy(krule->tagname, rule->tagname, 2001 sizeof(rule->tagname)); 2002 if (ret != 0) 2003 return (ret); 2004 ret = pf_user_strcpy(krule->match_tagname, rule->match_tagname, 2005 sizeof(rule->match_tagname)); 2006 if (ret != 0) 2007 return (ret); 2008 ret = pf_user_strcpy(krule->overload_tblname, rule->overload_tblname, 2009 sizeof(rule->overload_tblname)); 2010 if (ret != 0) 2011 return (ret); 2012 2013 pf_pool_to_kpool(&rule->rpool, &krule->rpool); 2014 2015 /* Don't allow userspace to set evaulations, packets or bytes. */ 2016 /* kif, anchor, overload_tbl are not copied over. */ 2017 2018 krule->os_fingerprint = rule->os_fingerprint; 2019 2020 krule->rtableid = rule->rtableid; 2021 bcopy(rule->timeout, krule->timeout, sizeof(krule->timeout)); 2022 krule->max_states = rule->max_states; 2023 krule->max_src_nodes = rule->max_src_nodes; 2024 krule->max_src_states = rule->max_src_states; 2025 krule->max_src_conn = rule->max_src_conn; 2026 krule->max_src_conn_rate.limit = rule->max_src_conn_rate.limit; 2027 krule->max_src_conn_rate.seconds = rule->max_src_conn_rate.seconds; 2028 krule->qid = rule->qid; 2029 krule->pqid = rule->pqid; 2030 krule->nr = rule->nr; 2031 krule->prob = rule->prob; 2032 krule->cuid = rule->cuid; 2033 krule->cpid = rule->cpid; 2034 2035 krule->return_icmp = rule->return_icmp; 2036 krule->return_icmp6 = rule->return_icmp6; 2037 krule->max_mss = rule->max_mss; 2038 krule->tag = rule->tag; 2039 krule->match_tag = rule->match_tag; 2040 krule->scrub_flags = rule->scrub_flags; 2041 2042 bcopy(&rule->uid, &krule->uid, sizeof(krule->uid)); 2043 bcopy(&rule->gid, &krule->gid, sizeof(krule->gid)); 2044 2045 krule->rule_flag = rule->rule_flag; 2046 krule->action = rule->action; 2047 krule->direction = rule->direction; 2048 krule->log = rule->log; 2049 krule->logif = rule->logif; 2050 krule->quick = rule->quick; 2051 krule->ifnot = rule->ifnot; 2052 krule->match_tag_not = rule->match_tag_not; 2053 krule->natpass = rule->natpass; 2054 2055 krule->keep_state = rule->keep_state; 2056 krule->af = rule->af; 2057 krule->proto = rule->proto; 2058 krule->type = rule->type; 2059 krule->code = rule->code; 2060 krule->flags = rule->flags; 2061 krule->flagset = rule->flagset; 2062 krule->min_ttl = rule->min_ttl; 2063 krule->allow_opts = rule->allow_opts; 2064 krule->rt = rule->rt; 2065 krule->return_ttl = rule->return_ttl; 2066 krule->tos = rule->tos; 2067 krule->set_tos = rule->set_tos; 2068 2069 krule->flush = rule->flush; 2070 krule->prio = rule->prio; 2071 krule->set_prio[0] = rule->set_prio[0]; 2072 krule->set_prio[1] = rule->set_prio[1]; 2073 2074 bcopy(&rule->divert, &krule->divert, sizeof(krule->divert)); 2075 2076 return (0); 2077 } 2078 2079 static int 2080 pf_state_kill_to_kstate_kill(const struct pfioc_state_kill *psk, 2081 struct pf_kstate_kill *kill) 2082 { 2083 int ret; 2084 2085 bzero(kill, sizeof(*kill)); 2086 2087 bcopy(&psk->psk_pfcmp, &kill->psk_pfcmp, sizeof(kill->psk_pfcmp)); 2088 kill->psk_af = psk->psk_af; 2089 kill->psk_proto = psk->psk_proto; 2090 bcopy(&psk->psk_src, &kill->psk_src, sizeof(kill->psk_src)); 2091 bcopy(&psk->psk_dst, &kill->psk_dst, sizeof(kill->psk_dst)); 2092 ret = pf_user_strcpy(kill->psk_ifname, psk->psk_ifname, 2093 sizeof(kill->psk_ifname)); 2094 if (ret != 0) 2095 return (ret); 2096 ret = pf_user_strcpy(kill->psk_label, psk->psk_label, 2097 sizeof(kill->psk_label)); 2098 if (ret != 0) 2099 return (ret); 2100 2101 return (0); 2102 } 2103 2104 static int 2105 pf_ioctl_addrule(struct pf_krule *rule, uint32_t ticket, 2106 uint32_t pool_ticket, const char *anchor, const char *anchor_call, 2107 struct thread *td) 2108 { 2109 struct pf_kruleset *ruleset; 2110 struct pf_krule *tail; 2111 struct pf_kpooladdr *pa; 2112 struct pfi_kkif *kif = NULL; 2113 int rs_num; 2114 int error = 0; 2115 2116 if ((rule->return_icmp >> 8) > ICMP_MAXTYPE) { 2117 error = EINVAL; 2118 goto errout_unlocked; 2119 } 2120 2121 #define ERROUT(x) ERROUT_FUNCTION(errout, x) 2122 2123 if (rule->ifname[0]) 2124 kif = pf_kkif_create(M_WAITOK); 2125 pf_counter_u64_init(&rule->evaluations, M_WAITOK); 2126 for (int i = 0; i < 2; i++) { 2127 pf_counter_u64_init(&rule->packets[i], M_WAITOK); 2128 pf_counter_u64_init(&rule->bytes[i], M_WAITOK); 2129 } 2130 rule->states_cur = counter_u64_alloc(M_WAITOK); 2131 rule->states_tot = counter_u64_alloc(M_WAITOK); 2132 rule->src_nodes = counter_u64_alloc(M_WAITOK); 2133 rule->cuid = td->td_ucred->cr_ruid; 2134 rule->cpid = td->td_proc ? td->td_proc->p_pid : 0; 2135 TAILQ_INIT(&rule->rpool.list); 2136 2137 PF_CONFIG_LOCK(); 2138 PF_RULES_WLOCK(); 2139 #ifdef PF_WANT_32_TO_64_COUNTER 2140 LIST_INSERT_HEAD(&V_pf_allrulelist, rule, allrulelist); 2141 MPASS(!rule->allrulelinked); 2142 rule->allrulelinked = true; 2143 V_pf_allrulecount++; 2144 #endif 2145 ruleset = pf_find_kruleset(anchor); 2146 if (ruleset == NULL) 2147 ERROUT(EINVAL); 2148 rs_num = pf_get_ruleset_number(rule->action); 2149 if (rs_num >= PF_RULESET_MAX) 2150 ERROUT(EINVAL); 2151 if (ticket != ruleset->rules[rs_num].inactive.ticket) { 2152 DPFPRINTF(PF_DEBUG_MISC, 2153 ("ticket: %d != [%d]%d\n", ticket, rs_num, 2154 ruleset->rules[rs_num].inactive.ticket)); 2155 ERROUT(EBUSY); 2156 } 2157 if (pool_ticket != V_ticket_pabuf) { 2158 DPFPRINTF(PF_DEBUG_MISC, 2159 ("pool_ticket: %d != %d\n", pool_ticket, 2160 V_ticket_pabuf)); 2161 ERROUT(EBUSY); 2162 } 2163 /* 2164 * XXXMJG hack: there is no mechanism to ensure they started the 2165 * transaction. Ticket checked above may happen to match by accident, 2166 * even if nobody called DIOCXBEGIN, let alone this process. 2167 * Partially work around it by checking if the RB tree got allocated, 2168 * see pf_begin_rules. 2169 */ 2170 if (ruleset->rules[rs_num].inactive.tree == NULL) { 2171 ERROUT(EINVAL); 2172 } 2173 2174 tail = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr, 2175 pf_krulequeue); 2176 if (tail) 2177 rule->nr = tail->nr + 1; 2178 else 2179 rule->nr = 0; 2180 if (rule->ifname[0]) { 2181 rule->kif = pfi_kkif_attach(kif, rule->ifname); 2182 kif = NULL; 2183 pfi_kkif_ref(rule->kif); 2184 } else 2185 rule->kif = NULL; 2186 2187 if (rule->rtableid > 0 && rule->rtableid >= rt_numfibs) 2188 error = EBUSY; 2189 2190 #ifdef ALTQ 2191 /* set queue IDs */ 2192 if (rule->qname[0] != 0) { 2193 if ((rule->qid = pf_qname2qid(rule->qname)) == 0) 2194 error = EBUSY; 2195 else if (rule->pqname[0] != 0) { 2196 if ((rule->pqid = 2197 pf_qname2qid(rule->pqname)) == 0) 2198 error = EBUSY; 2199 } else 2200 rule->pqid = rule->qid; 2201 } 2202 #endif 2203 if (rule->tagname[0]) 2204 if ((rule->tag = pf_tagname2tag(rule->tagname)) == 0) 2205 error = EBUSY; 2206 if (rule->match_tagname[0]) 2207 if ((rule->match_tag = 2208 pf_tagname2tag(rule->match_tagname)) == 0) 2209 error = EBUSY; 2210 if (rule->rt && !rule->direction) 2211 error = EINVAL; 2212 if (!rule->log) 2213 rule->logif = 0; 2214 if (rule->logif >= PFLOGIFS_MAX) 2215 error = EINVAL; 2216 if (pf_addr_setup(ruleset, &rule->src.addr, rule->af)) 2217 error = ENOMEM; 2218 if (pf_addr_setup(ruleset, &rule->dst.addr, rule->af)) 2219 error = ENOMEM; 2220 if (pf_kanchor_setup(rule, ruleset, anchor_call)) 2221 error = EINVAL; 2222 if (rule->scrub_flags & PFSTATE_SETPRIO && 2223 (rule->set_prio[0] > PF_PRIO_MAX || 2224 rule->set_prio[1] > PF_PRIO_MAX)) 2225 error = EINVAL; 2226 TAILQ_FOREACH(pa, &V_pf_pabuf, entries) 2227 if (pa->addr.type == PF_ADDR_TABLE) { 2228 pa->addr.p.tbl = pfr_attach_table(ruleset, 2229 pa->addr.v.tblname); 2230 if (pa->addr.p.tbl == NULL) 2231 error = ENOMEM; 2232 } 2233 2234 rule->overload_tbl = NULL; 2235 if (rule->overload_tblname[0]) { 2236 if ((rule->overload_tbl = pfr_attach_table(ruleset, 2237 rule->overload_tblname)) == NULL) 2238 error = EINVAL; 2239 else 2240 rule->overload_tbl->pfrkt_flags |= 2241 PFR_TFLAG_ACTIVE; 2242 } 2243 2244 pf_mv_kpool(&V_pf_pabuf, &rule->rpool.list); 2245 if (((((rule->action == PF_NAT) || (rule->action == PF_RDR) || 2246 (rule->action == PF_BINAT)) && rule->anchor == NULL) || 2247 (rule->rt > PF_NOPFROUTE)) && 2248 (TAILQ_FIRST(&rule->rpool.list) == NULL)) 2249 error = EINVAL; 2250 2251 if (error) { 2252 pf_free_rule(rule); 2253 rule = NULL; 2254 ERROUT(error); 2255 } 2256 2257 rule->rpool.cur = TAILQ_FIRST(&rule->rpool.list); 2258 TAILQ_INSERT_TAIL(ruleset->rules[rs_num].inactive.ptr, 2259 rule, entries); 2260 ruleset->rules[rs_num].inactive.rcount++; 2261 2262 PF_RULES_WUNLOCK(); 2263 pf_hash_rule(rule); 2264 if (RB_INSERT(pf_krule_global, ruleset->rules[rs_num].inactive.tree, rule) != NULL) { 2265 PF_RULES_WLOCK(); 2266 TAILQ_REMOVE(ruleset->rules[rs_num].inactive.ptr, rule, entries); 2267 ruleset->rules[rs_num].inactive.rcount--; 2268 pf_free_rule(rule); 2269 rule = NULL; 2270 ERROUT(EEXIST); 2271 } 2272 PF_CONFIG_UNLOCK(); 2273 2274 return (0); 2275 2276 #undef ERROUT 2277 errout: 2278 PF_RULES_WUNLOCK(); 2279 PF_CONFIG_UNLOCK(); 2280 errout_unlocked: 2281 pf_kkif_free(kif); 2282 pf_krule_free(rule); 2283 return (error); 2284 } 2285 2286 static bool 2287 pf_label_match(const struct pf_krule *rule, const char *label) 2288 { 2289 int i = 0; 2290 2291 while (*rule->label[i]) { 2292 if (strcmp(rule->label[i], label) == 0) 2293 return (true); 2294 i++; 2295 } 2296 2297 return (false); 2298 } 2299 2300 static unsigned int 2301 pf_kill_matching_state(struct pf_state_key_cmp *key, int dir) 2302 { 2303 struct pf_kstate *s; 2304 int more = 0; 2305 2306 s = pf_find_state_all(key, dir, &more); 2307 if (s == NULL) 2308 return (0); 2309 2310 if (more) { 2311 PF_STATE_UNLOCK(s); 2312 return (0); 2313 } 2314 2315 pf_unlink_state(s); 2316 return (1); 2317 } 2318 2319 static int 2320 pf_killstates_row(struct pf_kstate_kill *psk, struct pf_idhash *ih) 2321 { 2322 struct pf_kstate *s; 2323 struct pf_state_key *sk; 2324 struct pf_addr *srcaddr, *dstaddr; 2325 struct pf_state_key_cmp match_key; 2326 int idx, killed = 0; 2327 unsigned int dir; 2328 u_int16_t srcport, dstport; 2329 struct pfi_kkif *kif; 2330 2331 relock_DIOCKILLSTATES: 2332 PF_HASHROW_LOCK(ih); 2333 LIST_FOREACH(s, &ih->states, entry) { 2334 /* For floating states look at the original kif. */ 2335 kif = s->kif == V_pfi_all ? s->orig_kif : s->kif; 2336 2337 sk = s->key[PF_SK_WIRE]; 2338 if (s->direction == PF_OUT) { 2339 srcaddr = &sk->addr[1]; 2340 dstaddr = &sk->addr[0]; 2341 srcport = sk->port[1]; 2342 dstport = sk->port[0]; 2343 } else { 2344 srcaddr = &sk->addr[0]; 2345 dstaddr = &sk->addr[1]; 2346 srcport = sk->port[0]; 2347 dstport = sk->port[1]; 2348 } 2349 2350 if (psk->psk_af && sk->af != psk->psk_af) 2351 continue; 2352 2353 if (psk->psk_proto && psk->psk_proto != sk->proto) 2354 continue; 2355 2356 if (! PF_MATCHA(psk->psk_src.neg, &psk->psk_src.addr.v.a.addr, 2357 &psk->psk_src.addr.v.a.mask, srcaddr, sk->af)) 2358 continue; 2359 2360 if (! PF_MATCHA(psk->psk_dst.neg, &psk->psk_dst.addr.v.a.addr, 2361 &psk->psk_dst.addr.v.a.mask, dstaddr, sk->af)) 2362 continue; 2363 2364 if (! PF_MATCHA(psk->psk_rt_addr.neg, 2365 &psk->psk_rt_addr.addr.v.a.addr, 2366 &psk->psk_rt_addr.addr.v.a.mask, 2367 &s->rt_addr, sk->af)) 2368 continue; 2369 2370 if (psk->psk_src.port_op != 0 && 2371 ! pf_match_port(psk->psk_src.port_op, 2372 psk->psk_src.port[0], psk->psk_src.port[1], srcport)) 2373 continue; 2374 2375 if (psk->psk_dst.port_op != 0 && 2376 ! pf_match_port(psk->psk_dst.port_op, 2377 psk->psk_dst.port[0], psk->psk_dst.port[1], dstport)) 2378 continue; 2379 2380 if (psk->psk_label[0] && 2381 ! pf_label_match(s->rule.ptr, psk->psk_label)) 2382 continue; 2383 2384 if (psk->psk_ifname[0] && strcmp(psk->psk_ifname, 2385 kif->pfik_name)) 2386 continue; 2387 2388 if (psk->psk_kill_match) { 2389 /* Create the key to find matching states, with lock 2390 * held. */ 2391 2392 bzero(&match_key, sizeof(match_key)); 2393 2394 if (s->direction == PF_OUT) { 2395 dir = PF_IN; 2396 idx = PF_SK_STACK; 2397 } else { 2398 dir = PF_OUT; 2399 idx = PF_SK_WIRE; 2400 } 2401 2402 match_key.af = s->key[idx]->af; 2403 match_key.proto = s->key[idx]->proto; 2404 PF_ACPY(&match_key.addr[0], 2405 &s->key[idx]->addr[1], match_key.af); 2406 match_key.port[0] = s->key[idx]->port[1]; 2407 PF_ACPY(&match_key.addr[1], 2408 &s->key[idx]->addr[0], match_key.af); 2409 match_key.port[1] = s->key[idx]->port[0]; 2410 } 2411 2412 pf_unlink_state(s); 2413 killed++; 2414 2415 if (psk->psk_kill_match) 2416 killed += pf_kill_matching_state(&match_key, dir); 2417 2418 goto relock_DIOCKILLSTATES; 2419 } 2420 PF_HASHROW_UNLOCK(ih); 2421 2422 return (killed); 2423 } 2424 2425 static int 2426 pfioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td) 2427 { 2428 int error = 0; 2429 PF_RULES_RLOCK_TRACKER; 2430 2431 #define ERROUT_IOCTL(target, x) \ 2432 do { \ 2433 error = (x); \ 2434 SDT_PROBE3(pf, ioctl, ioctl, error, cmd, error, __LINE__); \ 2435 goto target; \ 2436 } while (0) 2437 2438 2439 /* XXX keep in sync with switch() below */ 2440 if (securelevel_gt(td->td_ucred, 2)) 2441 switch (cmd) { 2442 case DIOCGETRULES: 2443 case DIOCGETRULE: 2444 case DIOCGETRULENV: 2445 case DIOCGETADDRS: 2446 case DIOCGETADDR: 2447 case DIOCGETSTATE: 2448 case DIOCGETSTATENV: 2449 case DIOCSETSTATUSIF: 2450 case DIOCGETSTATUS: 2451 case DIOCGETSTATUSNV: 2452 case DIOCCLRSTATUS: 2453 case DIOCNATLOOK: 2454 case DIOCSETDEBUG: 2455 case DIOCGETSTATES: 2456 case DIOCGETSTATESV2: 2457 case DIOCGETTIMEOUT: 2458 case DIOCCLRRULECTRS: 2459 case DIOCGETLIMIT: 2460 case DIOCGETALTQSV0: 2461 case DIOCGETALTQSV1: 2462 case DIOCGETALTQV0: 2463 case DIOCGETALTQV1: 2464 case DIOCGETQSTATSV0: 2465 case DIOCGETQSTATSV1: 2466 case DIOCGETRULESETS: 2467 case DIOCGETRULESET: 2468 case DIOCRGETTABLES: 2469 case DIOCRGETTSTATS: 2470 case DIOCRCLRTSTATS: 2471 case DIOCRCLRADDRS: 2472 case DIOCRADDADDRS: 2473 case DIOCRDELADDRS: 2474 case DIOCRSETADDRS: 2475 case DIOCRGETADDRS: 2476 case DIOCRGETASTATS: 2477 case DIOCRCLRASTATS: 2478 case DIOCRTSTADDRS: 2479 case DIOCOSFPGET: 2480 case DIOCGETSRCNODES: 2481 case DIOCCLRSRCNODES: 2482 case DIOCGETSYNCOOKIES: 2483 case DIOCIGETIFACES: 2484 case DIOCGIFSPEEDV0: 2485 case DIOCGIFSPEEDV1: 2486 case DIOCSETIFFLAG: 2487 case DIOCCLRIFFLAG: 2488 case DIOCGETETHRULES: 2489 case DIOCGETETHRULE: 2490 case DIOCGETETHRULESETS: 2491 case DIOCGETETHRULESET: 2492 break; 2493 case DIOCRCLRTABLES: 2494 case DIOCRADDTABLES: 2495 case DIOCRDELTABLES: 2496 case DIOCRSETTFLAGS: 2497 if (((struct pfioc_table *)addr)->pfrio_flags & 2498 PFR_FLAG_DUMMY) 2499 break; /* dummy operation ok */ 2500 return (EPERM); 2501 default: 2502 return (EPERM); 2503 } 2504 2505 if (!(flags & FWRITE)) 2506 switch (cmd) { 2507 case DIOCGETRULES: 2508 case DIOCGETADDRS: 2509 case DIOCGETADDR: 2510 case DIOCGETSTATE: 2511 case DIOCGETSTATENV: 2512 case DIOCGETSTATUS: 2513 case DIOCGETSTATUSNV: 2514 case DIOCGETSTATES: 2515 case DIOCGETSTATESV2: 2516 case DIOCGETTIMEOUT: 2517 case DIOCGETLIMIT: 2518 case DIOCGETALTQSV0: 2519 case DIOCGETALTQSV1: 2520 case DIOCGETALTQV0: 2521 case DIOCGETALTQV1: 2522 case DIOCGETQSTATSV0: 2523 case DIOCGETQSTATSV1: 2524 case DIOCGETRULESETS: 2525 case DIOCGETRULESET: 2526 case DIOCNATLOOK: 2527 case DIOCRGETTABLES: 2528 case DIOCRGETTSTATS: 2529 case DIOCRGETADDRS: 2530 case DIOCRGETASTATS: 2531 case DIOCRTSTADDRS: 2532 case DIOCOSFPGET: 2533 case DIOCGETSRCNODES: 2534 case DIOCGETSYNCOOKIES: 2535 case DIOCIGETIFACES: 2536 case DIOCGIFSPEEDV1: 2537 case DIOCGIFSPEEDV0: 2538 case DIOCGETRULENV: 2539 case DIOCGETETHRULES: 2540 case DIOCGETETHRULE: 2541 case DIOCGETETHRULESETS: 2542 case DIOCGETETHRULESET: 2543 break; 2544 case DIOCRCLRTABLES: 2545 case DIOCRADDTABLES: 2546 case DIOCRDELTABLES: 2547 case DIOCRCLRTSTATS: 2548 case DIOCRCLRADDRS: 2549 case DIOCRADDADDRS: 2550 case DIOCRDELADDRS: 2551 case DIOCRSETADDRS: 2552 case DIOCRSETTFLAGS: 2553 if (((struct pfioc_table *)addr)->pfrio_flags & 2554 PFR_FLAG_DUMMY) { 2555 flags |= FWRITE; /* need write lock for dummy */ 2556 break; /* dummy operation ok */ 2557 } 2558 return (EACCES); 2559 case DIOCGETRULE: 2560 if (((struct pfioc_rule *)addr)->action == 2561 PF_GET_CLR_CNTR) 2562 return (EACCES); 2563 break; 2564 default: 2565 return (EACCES); 2566 } 2567 2568 CURVNET_SET(TD_TO_VNET(td)); 2569 2570 switch (cmd) { 2571 case DIOCSTART: 2572 sx_xlock(&pf_ioctl_lock); 2573 if (V_pf_status.running) 2574 error = EEXIST; 2575 else { 2576 int cpu; 2577 2578 hook_pf(); 2579 if (! TAILQ_EMPTY(V_pf_keth->active.rules)) 2580 hook_pf_eth(); 2581 V_pf_status.running = 1; 2582 V_pf_status.since = time_second; 2583 2584 CPU_FOREACH(cpu) 2585 V_pf_stateid[cpu] = time_second; 2586 2587 DPFPRINTF(PF_DEBUG_MISC, ("pf: started\n")); 2588 } 2589 break; 2590 2591 case DIOCSTOP: 2592 sx_xlock(&pf_ioctl_lock); 2593 if (!V_pf_status.running) 2594 error = ENOENT; 2595 else { 2596 V_pf_status.running = 0; 2597 dehook_pf(); 2598 dehook_pf_eth(); 2599 V_pf_status.since = time_second; 2600 DPFPRINTF(PF_DEBUG_MISC, ("pf: stopped\n")); 2601 } 2602 break; 2603 2604 case DIOCGETETHRULES: { 2605 struct pfioc_nv *nv = (struct pfioc_nv *)addr; 2606 nvlist_t *nvl; 2607 void *packed; 2608 struct pf_keth_rule *tail; 2609 struct pf_keth_ruleset *rs; 2610 u_int32_t ticket, nr; 2611 const char *anchor = ""; 2612 2613 nvl = NULL; 2614 packed = NULL; 2615 2616 #define ERROUT(x) ERROUT_IOCTL(DIOCGETETHRULES_error, x) 2617 2618 if (nv->len > pf_ioctl_maxcount) 2619 ERROUT(ENOMEM); 2620 2621 /* Copy the request in */ 2622 packed = malloc(nv->len, M_NVLIST, M_WAITOK); 2623 if (packed == NULL) 2624 ERROUT(ENOMEM); 2625 2626 error = copyin(nv->data, packed, nv->len); 2627 if (error) 2628 ERROUT(error); 2629 2630 nvl = nvlist_unpack(packed, nv->len, 0); 2631 if (nvl == NULL) 2632 ERROUT(EBADMSG); 2633 2634 if (! nvlist_exists_string(nvl, "anchor")) 2635 ERROUT(EBADMSG); 2636 2637 anchor = nvlist_get_string(nvl, "anchor"); 2638 2639 rs = pf_find_keth_ruleset(anchor); 2640 2641 nvlist_destroy(nvl); 2642 nvl = NULL; 2643 free(packed, M_NVLIST); 2644 packed = NULL; 2645 2646 if (rs == NULL) 2647 ERROUT(ENOENT); 2648 2649 /* Reply */ 2650 nvl = nvlist_create(0); 2651 if (nvl == NULL) 2652 ERROUT(ENOMEM); 2653 2654 PF_RULES_RLOCK(); 2655 2656 ticket = rs->active.ticket; 2657 tail = TAILQ_LAST(rs->active.rules, pf_keth_ruleq); 2658 if (tail) 2659 nr = tail->nr + 1; 2660 else 2661 nr = 0; 2662 2663 PF_RULES_RUNLOCK(); 2664 2665 nvlist_add_number(nvl, "ticket", ticket); 2666 nvlist_add_number(nvl, "nr", nr); 2667 2668 packed = nvlist_pack(nvl, &nv->len); 2669 if (packed == NULL) 2670 ERROUT(ENOMEM); 2671 2672 if (nv->size == 0) 2673 ERROUT(0); 2674 else if (nv->size < nv->len) 2675 ERROUT(ENOSPC); 2676 2677 error = copyout(packed, nv->data, nv->len); 2678 2679 #undef ERROUT 2680 DIOCGETETHRULES_error: 2681 free(packed, M_NVLIST); 2682 nvlist_destroy(nvl); 2683 break; 2684 } 2685 2686 case DIOCGETETHRULE: { 2687 struct epoch_tracker et; 2688 struct pfioc_nv *nv = (struct pfioc_nv *)addr; 2689 nvlist_t *nvl = NULL; 2690 void *nvlpacked = NULL; 2691 struct pf_keth_rule *rule = NULL; 2692 struct pf_keth_ruleset *rs; 2693 u_int32_t ticket, nr; 2694 bool clear = false; 2695 const char *anchor; 2696 2697 #define ERROUT(x) ERROUT_IOCTL(DIOCGETETHRULE_error, x) 2698 2699 if (nv->len > pf_ioctl_maxcount) 2700 ERROUT(ENOMEM); 2701 2702 nvlpacked = malloc(nv->len, M_TEMP, M_WAITOK); 2703 if (nvlpacked == NULL) 2704 ERROUT(ENOMEM); 2705 2706 error = copyin(nv->data, nvlpacked, nv->len); 2707 if (error) 2708 ERROUT(error); 2709 2710 nvl = nvlist_unpack(nvlpacked, nv->len, 0); 2711 if (nvl == NULL) 2712 ERROUT(EBADMSG); 2713 if (! nvlist_exists_number(nvl, "ticket")) 2714 ERROUT(EBADMSG); 2715 ticket = nvlist_get_number(nvl, "ticket"); 2716 if (! nvlist_exists_string(nvl, "anchor")) 2717 ERROUT(EBADMSG); 2718 anchor = nvlist_get_string(nvl, "anchor"); 2719 2720 if (nvlist_exists_bool(nvl, "clear")) 2721 clear = nvlist_get_bool(nvl, "clear"); 2722 2723 if (clear && !(flags & FWRITE)) 2724 ERROUT(EACCES); 2725 2726 if (! nvlist_exists_number(nvl, "nr")) 2727 ERROUT(EBADMSG); 2728 nr = nvlist_get_number(nvl, "nr"); 2729 2730 PF_RULES_RLOCK(); 2731 rs = pf_find_keth_ruleset(anchor); 2732 if (rs == NULL) { 2733 PF_RULES_RUNLOCK(); 2734 ERROUT(ENOENT); 2735 } 2736 if (ticket != rs->active.ticket) { 2737 PF_RULES_RUNLOCK(); 2738 ERROUT(EBUSY); 2739 } 2740 2741 nvlist_destroy(nvl); 2742 nvl = NULL; 2743 free(nvlpacked, M_TEMP); 2744 nvlpacked = NULL; 2745 2746 rule = TAILQ_FIRST(rs->active.rules); 2747 while ((rule != NULL) && (rule->nr != nr)) 2748 rule = TAILQ_NEXT(rule, entries); 2749 if (rule == NULL) { 2750 PF_RULES_RUNLOCK(); 2751 ERROUT(ENOENT); 2752 } 2753 /* Make sure rule can't go away. */ 2754 NET_EPOCH_ENTER(et); 2755 PF_RULES_RUNLOCK(); 2756 nvl = pf_keth_rule_to_nveth_rule(rule); 2757 if (pf_keth_anchor_nvcopyout(rs, rule, nvl)) 2758 ERROUT(EBUSY); 2759 NET_EPOCH_EXIT(et); 2760 if (nvl == NULL) 2761 ERROUT(ENOMEM); 2762 2763 nvlpacked = nvlist_pack(nvl, &nv->len); 2764 if (nvlpacked == NULL) 2765 ERROUT(ENOMEM); 2766 2767 if (nv->size == 0) 2768 ERROUT(0); 2769 else if (nv->size < nv->len) 2770 ERROUT(ENOSPC); 2771 2772 error = copyout(nvlpacked, nv->data, nv->len); 2773 if (error == 0 && clear) { 2774 counter_u64_zero(rule->evaluations); 2775 for (int i = 0; i < 2; i++) { 2776 counter_u64_zero(rule->packets[i]); 2777 counter_u64_zero(rule->bytes[i]); 2778 } 2779 } 2780 2781 #undef ERROUT 2782 DIOCGETETHRULE_error: 2783 free(nvlpacked, M_TEMP); 2784 nvlist_destroy(nvl); 2785 break; 2786 } 2787 2788 case DIOCADDETHRULE: { 2789 struct pfioc_nv *nv = (struct pfioc_nv *)addr; 2790 nvlist_t *nvl = NULL; 2791 void *nvlpacked = NULL; 2792 struct pf_keth_rule *rule = NULL, *tail = NULL; 2793 struct pf_keth_ruleset *ruleset = NULL; 2794 struct pfi_kkif *kif = NULL; 2795 const char *anchor = "", *anchor_call = ""; 2796 2797 #define ERROUT(x) ERROUT_IOCTL(DIOCADDETHRULE_error, x) 2798 2799 nvlpacked = malloc(nv->len, M_TEMP, M_WAITOK); 2800 if (nvlpacked == NULL) 2801 ERROUT(ENOMEM); 2802 2803 error = copyin(nv->data, nvlpacked, nv->len); 2804 if (error) 2805 ERROUT(error); 2806 2807 nvl = nvlist_unpack(nvlpacked, nv->len, 0); 2808 if (nvl == NULL) 2809 ERROUT(EBADMSG); 2810 2811 if (! nvlist_exists_number(nvl, "ticket")) 2812 ERROUT(EBADMSG); 2813 2814 if (nvlist_exists_string(nvl, "anchor")) 2815 anchor = nvlist_get_string(nvl, "anchor"); 2816 if (nvlist_exists_string(nvl, "anchor_call")) 2817 anchor_call = nvlist_get_string(nvl, "anchor_call"); 2818 2819 ruleset = pf_find_keth_ruleset(anchor); 2820 if (ruleset == NULL) 2821 ERROUT(EINVAL); 2822 2823 if (nvlist_get_number(nvl, "ticket") != 2824 ruleset->inactive.ticket) { 2825 DPFPRINTF(PF_DEBUG_MISC, 2826 ("ticket: %d != %d\n", 2827 (u_int32_t)nvlist_get_number(nvl, "ticket"), 2828 ruleset->inactive.ticket)); 2829 ERROUT(EBUSY); 2830 } 2831 2832 rule = malloc(sizeof(*rule), M_PFRULE, M_WAITOK); 2833 if (rule == NULL) 2834 ERROUT(ENOMEM); 2835 2836 error = pf_nveth_rule_to_keth_rule(nvl, rule); 2837 if (error != 0) 2838 ERROUT(error); 2839 2840 if (rule->ifname[0]) 2841 kif = pf_kkif_create(M_WAITOK); 2842 rule->evaluations = counter_u64_alloc(M_WAITOK); 2843 for (int i = 0; i < 2; i++) { 2844 rule->packets[i] = counter_u64_alloc(M_WAITOK); 2845 rule->bytes[i] = counter_u64_alloc(M_WAITOK); 2846 } 2847 2848 PF_RULES_WLOCK(); 2849 2850 if (rule->ifname[0]) { 2851 rule->kif = pfi_kkif_attach(kif, rule->ifname); 2852 pfi_kkif_ref(rule->kif); 2853 } else 2854 rule->kif = NULL; 2855 2856 #ifdef ALTQ 2857 /* set queue IDs */ 2858 if (rule->qname[0] != 0) { 2859 if ((rule->qid = pf_qname2qid(rule->qname)) == 0) 2860 error = EBUSY; 2861 else 2862 rule->qid = rule->qid; 2863 } 2864 #endif 2865 if (rule->tagname[0]) 2866 if ((rule->tag = pf_tagname2tag(rule->tagname)) == 0) 2867 error = EBUSY; 2868 2869 if (error == 0 && rule->ipdst.addr.type == PF_ADDR_TABLE) 2870 error = pf_eth_addr_setup(ruleset, &rule->ipdst.addr); 2871 if (error == 0 && rule->ipsrc.addr.type == PF_ADDR_TABLE) 2872 error = pf_eth_addr_setup(ruleset, &rule->ipsrc.addr); 2873 2874 if (error) { 2875 pf_free_eth_rule(rule); 2876 PF_RULES_WUNLOCK(); 2877 ERROUT(error); 2878 } 2879 2880 if (pf_keth_anchor_setup(rule, ruleset, anchor_call)) { 2881 pf_free_eth_rule(rule); 2882 PF_RULES_WUNLOCK(); 2883 ERROUT(EINVAL); 2884 } 2885 2886 tail = TAILQ_LAST(ruleset->inactive.rules, pf_keth_ruleq); 2887 if (tail) 2888 rule->nr = tail->nr + 1; 2889 else 2890 rule->nr = 0; 2891 2892 TAILQ_INSERT_TAIL(ruleset->inactive.rules, rule, entries); 2893 2894 PF_RULES_WUNLOCK(); 2895 2896 #undef ERROUT 2897 DIOCADDETHRULE_error: 2898 nvlist_destroy(nvl); 2899 free(nvlpacked, M_TEMP); 2900 break; 2901 } 2902 2903 case DIOCGETETHRULESETS: { 2904 struct epoch_tracker et; 2905 struct pfioc_nv *nv = (struct pfioc_nv *)addr; 2906 nvlist_t *nvl = NULL; 2907 void *nvlpacked = NULL; 2908 struct pf_keth_ruleset *ruleset; 2909 struct pf_keth_anchor *anchor; 2910 int nr = 0; 2911 2912 #define ERROUT(x) ERROUT_IOCTL(DIOCGETETHRULESETS_error, x) 2913 2914 if (nv->len > pf_ioctl_maxcount) 2915 ERROUT(ENOMEM); 2916 2917 nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK); 2918 if (nvlpacked == NULL) 2919 ERROUT(ENOMEM); 2920 2921 error = copyin(nv->data, nvlpacked, nv->len); 2922 if (error) 2923 ERROUT(error); 2924 2925 nvl = nvlist_unpack(nvlpacked, nv->len, 0); 2926 if (nvl == NULL) 2927 ERROUT(EBADMSG); 2928 if (! nvlist_exists_string(nvl, "path")) 2929 ERROUT(EBADMSG); 2930 2931 NET_EPOCH_ENTER(et); 2932 2933 if ((ruleset = pf_find_keth_ruleset( 2934 nvlist_get_string(nvl, "path"))) == NULL) { 2935 NET_EPOCH_EXIT(et); 2936 ERROUT(ENOENT); 2937 } 2938 2939 if (ruleset->anchor == NULL) { 2940 RB_FOREACH(anchor, pf_keth_anchor_global, &V_pf_keth_anchors) 2941 if (anchor->parent == NULL) 2942 nr++; 2943 } else { 2944 RB_FOREACH(anchor, pf_keth_anchor_node, 2945 &ruleset->anchor->children) 2946 nr++; 2947 } 2948 2949 NET_EPOCH_EXIT(et); 2950 2951 nvlist_destroy(nvl); 2952 nvl = NULL; 2953 free(nvlpacked, M_NVLIST); 2954 nvlpacked = NULL; 2955 2956 nvl = nvlist_create(0); 2957 if (nvl == NULL) 2958 ERROUT(ENOMEM); 2959 2960 nvlist_add_number(nvl, "nr", nr); 2961 2962 nvlpacked = nvlist_pack(nvl, &nv->len); 2963 if (nvlpacked == NULL) 2964 ERROUT(ENOMEM); 2965 2966 if (nv->size == 0) 2967 ERROUT(0); 2968 else if (nv->size < nv->len) 2969 ERROUT(ENOSPC); 2970 2971 error = copyout(nvlpacked, nv->data, nv->len); 2972 2973 #undef ERROUT 2974 DIOCGETETHRULESETS_error: 2975 free(nvlpacked, M_NVLIST); 2976 nvlist_destroy(nvl); 2977 break; 2978 } 2979 2980 case DIOCGETETHRULESET: { 2981 struct epoch_tracker et; 2982 struct pfioc_nv *nv = (struct pfioc_nv *)addr; 2983 nvlist_t *nvl = NULL; 2984 void *nvlpacked = NULL; 2985 struct pf_keth_ruleset *ruleset; 2986 struct pf_keth_anchor *anchor; 2987 int nr = 0, req_nr = 0; 2988 bool found = false; 2989 2990 #define ERROUT(x) ERROUT_IOCTL(DIOCGETETHRULESET_error, x) 2991 2992 if (nv->len > pf_ioctl_maxcount) 2993 ERROUT(ENOMEM); 2994 2995 nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK); 2996 if (nvlpacked == NULL) 2997 ERROUT(ENOMEM); 2998 2999 error = copyin(nv->data, nvlpacked, nv->len); 3000 if (error) 3001 ERROUT(error); 3002 3003 nvl = nvlist_unpack(nvlpacked, nv->len, 0); 3004 if (nvl == NULL) 3005 ERROUT(EBADMSG); 3006 if (! nvlist_exists_string(nvl, "path")) 3007 ERROUT(EBADMSG); 3008 if (! nvlist_exists_number(nvl, "nr")) 3009 ERROUT(EBADMSG); 3010 3011 req_nr = nvlist_get_number(nvl, "nr"); 3012 3013 NET_EPOCH_ENTER(et); 3014 3015 if ((ruleset = pf_find_keth_ruleset( 3016 nvlist_get_string(nvl, "path"))) == NULL) { 3017 NET_EPOCH_EXIT(et); 3018 ERROUT(ENOENT); 3019 } 3020 3021 nvlist_destroy(nvl); 3022 nvl = NULL; 3023 free(nvlpacked, M_NVLIST); 3024 nvlpacked = NULL; 3025 3026 nvl = nvlist_create(0); 3027 if (nvl == NULL) { 3028 NET_EPOCH_EXIT(et); 3029 ERROUT(ENOMEM); 3030 } 3031 3032 if (ruleset->anchor == NULL) { 3033 RB_FOREACH(anchor, pf_keth_anchor_global, 3034 &V_pf_keth_anchors) { 3035 if (anchor->parent == NULL && nr++ == req_nr) { 3036 found = true; 3037 break; 3038 } 3039 } 3040 } else { 3041 RB_FOREACH(anchor, pf_keth_anchor_node, 3042 &ruleset->anchor->children) { 3043 if (nr++ == req_nr) { 3044 found = true; 3045 break; 3046 } 3047 } 3048 } 3049 3050 NET_EPOCH_EXIT(et); 3051 if (found) { 3052 nvlist_add_number(nvl, "nr", nr); 3053 nvlist_add_string(nvl, "name", anchor->name); 3054 if (ruleset->anchor) 3055 nvlist_add_string(nvl, "path", 3056 ruleset->anchor->path); 3057 else 3058 nvlist_add_string(nvl, "path", ""); 3059 } else { 3060 ERROUT(EBUSY); 3061 } 3062 3063 nvlpacked = nvlist_pack(nvl, &nv->len); 3064 if (nvlpacked == NULL) 3065 ERROUT(ENOMEM); 3066 3067 if (nv->size == 0) 3068 ERROUT(0); 3069 else if (nv->size < nv->len) 3070 ERROUT(ENOSPC); 3071 3072 error = copyout(nvlpacked, nv->data, nv->len); 3073 3074 #undef ERROUT 3075 DIOCGETETHRULESET_error: 3076 free(nvlpacked, M_NVLIST); 3077 nvlist_destroy(nvl); 3078 break; 3079 } 3080 3081 case DIOCADDRULENV: { 3082 struct pfioc_nv *nv = (struct pfioc_nv *)addr; 3083 nvlist_t *nvl = NULL; 3084 void *nvlpacked = NULL; 3085 struct pf_krule *rule = NULL; 3086 const char *anchor = "", *anchor_call = ""; 3087 uint32_t ticket = 0, pool_ticket = 0; 3088 3089 #define ERROUT(x) ERROUT_IOCTL(DIOCADDRULENV_error, x) 3090 3091 if (nv->len > pf_ioctl_maxcount) 3092 ERROUT(ENOMEM); 3093 3094 nvlpacked = malloc(nv->len, M_TEMP, M_WAITOK); 3095 error = copyin(nv->data, nvlpacked, nv->len); 3096 if (error) 3097 ERROUT(error); 3098 3099 nvl = nvlist_unpack(nvlpacked, nv->len, 0); 3100 if (nvl == NULL) 3101 ERROUT(EBADMSG); 3102 3103 if (! nvlist_exists_number(nvl, "ticket")) 3104 ERROUT(EINVAL); 3105 ticket = nvlist_get_number(nvl, "ticket"); 3106 3107 if (! nvlist_exists_number(nvl, "pool_ticket")) 3108 ERROUT(EINVAL); 3109 pool_ticket = nvlist_get_number(nvl, "pool_ticket"); 3110 3111 if (! nvlist_exists_nvlist(nvl, "rule")) 3112 ERROUT(EINVAL); 3113 3114 rule = pf_krule_alloc(); 3115 error = pf_nvrule_to_krule(nvlist_get_nvlist(nvl, "rule"), 3116 rule); 3117 if (error) 3118 ERROUT(error); 3119 3120 if (nvlist_exists_string(nvl, "anchor")) 3121 anchor = nvlist_get_string(nvl, "anchor"); 3122 if (nvlist_exists_string(nvl, "anchor_call")) 3123 anchor_call = nvlist_get_string(nvl, "anchor_call"); 3124 3125 if ((error = nvlist_error(nvl))) 3126 ERROUT(error); 3127 3128 /* Frees rule on error */ 3129 error = pf_ioctl_addrule(rule, ticket, pool_ticket, anchor, 3130 anchor_call, td); 3131 3132 nvlist_destroy(nvl); 3133 free(nvlpacked, M_TEMP); 3134 break; 3135 #undef ERROUT 3136 DIOCADDRULENV_error: 3137 pf_krule_free(rule); 3138 nvlist_destroy(nvl); 3139 free(nvlpacked, M_TEMP); 3140 3141 break; 3142 } 3143 case DIOCADDRULE: { 3144 struct pfioc_rule *pr = (struct pfioc_rule *)addr; 3145 struct pf_krule *rule; 3146 3147 rule = pf_krule_alloc(); 3148 error = pf_rule_to_krule(&pr->rule, rule); 3149 if (error != 0) { 3150 pf_krule_free(rule); 3151 break; 3152 } 3153 3154 pr->anchor[sizeof(pr->anchor) - 1] = 0; 3155 3156 /* Frees rule on error */ 3157 error = pf_ioctl_addrule(rule, pr->ticket, pr->pool_ticket, 3158 pr->anchor, pr->anchor_call, td); 3159 break; 3160 } 3161 3162 case DIOCGETRULES: { 3163 struct pfioc_rule *pr = (struct pfioc_rule *)addr; 3164 struct pf_kruleset *ruleset; 3165 struct pf_krule *tail; 3166 int rs_num; 3167 3168 pr->anchor[sizeof(pr->anchor) - 1] = 0; 3169 3170 PF_RULES_WLOCK(); 3171 ruleset = pf_find_kruleset(pr->anchor); 3172 if (ruleset == NULL) { 3173 PF_RULES_WUNLOCK(); 3174 error = EINVAL; 3175 break; 3176 } 3177 rs_num = pf_get_ruleset_number(pr->rule.action); 3178 if (rs_num >= PF_RULESET_MAX) { 3179 PF_RULES_WUNLOCK(); 3180 error = EINVAL; 3181 break; 3182 } 3183 tail = TAILQ_LAST(ruleset->rules[rs_num].active.ptr, 3184 pf_krulequeue); 3185 if (tail) 3186 pr->nr = tail->nr + 1; 3187 else 3188 pr->nr = 0; 3189 pr->ticket = ruleset->rules[rs_num].active.ticket; 3190 PF_RULES_WUNLOCK(); 3191 break; 3192 } 3193 3194 case DIOCGETRULE: { 3195 struct pfioc_rule *pr = (struct pfioc_rule *)addr; 3196 struct pf_kruleset *ruleset; 3197 struct pf_krule *rule; 3198 int rs_num; 3199 3200 pr->anchor[sizeof(pr->anchor) - 1] = 0; 3201 3202 PF_RULES_WLOCK(); 3203 ruleset = pf_find_kruleset(pr->anchor); 3204 if (ruleset == NULL) { 3205 PF_RULES_WUNLOCK(); 3206 error = EINVAL; 3207 break; 3208 } 3209 rs_num = pf_get_ruleset_number(pr->rule.action); 3210 if (rs_num >= PF_RULESET_MAX) { 3211 PF_RULES_WUNLOCK(); 3212 error = EINVAL; 3213 break; 3214 } 3215 if (pr->ticket != ruleset->rules[rs_num].active.ticket) { 3216 PF_RULES_WUNLOCK(); 3217 error = EBUSY; 3218 break; 3219 } 3220 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr); 3221 while ((rule != NULL) && (rule->nr != pr->nr)) 3222 rule = TAILQ_NEXT(rule, entries); 3223 if (rule == NULL) { 3224 PF_RULES_WUNLOCK(); 3225 error = EBUSY; 3226 break; 3227 } 3228 3229 pf_krule_to_rule(rule, &pr->rule); 3230 3231 if (pf_kanchor_copyout(ruleset, rule, pr)) { 3232 PF_RULES_WUNLOCK(); 3233 error = EBUSY; 3234 break; 3235 } 3236 pf_addr_copyout(&pr->rule.src.addr); 3237 pf_addr_copyout(&pr->rule.dst.addr); 3238 3239 if (pr->action == PF_GET_CLR_CNTR) { 3240 pf_counter_u64_zero(&rule->evaluations); 3241 for (int i = 0; i < 2; i++) { 3242 pf_counter_u64_zero(&rule->packets[i]); 3243 pf_counter_u64_zero(&rule->bytes[i]); 3244 } 3245 counter_u64_zero(rule->states_tot); 3246 } 3247 PF_RULES_WUNLOCK(); 3248 break; 3249 } 3250 3251 case DIOCGETRULENV: { 3252 struct pfioc_nv *nv = (struct pfioc_nv *)addr; 3253 nvlist_t *nvrule = NULL; 3254 nvlist_t *nvl = NULL; 3255 struct pf_kruleset *ruleset; 3256 struct pf_krule *rule; 3257 void *nvlpacked = NULL; 3258 int rs_num, nr; 3259 bool clear_counter = false; 3260 3261 #define ERROUT(x) ERROUT_IOCTL(DIOCGETRULENV_error, x) 3262 3263 if (nv->len > pf_ioctl_maxcount) 3264 ERROUT(ENOMEM); 3265 3266 /* Copy the request in */ 3267 nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK); 3268 if (nvlpacked == NULL) 3269 ERROUT(ENOMEM); 3270 3271 error = copyin(nv->data, nvlpacked, nv->len); 3272 if (error) 3273 ERROUT(error); 3274 3275 nvl = nvlist_unpack(nvlpacked, nv->len, 0); 3276 if (nvl == NULL) 3277 ERROUT(EBADMSG); 3278 3279 if (! nvlist_exists_string(nvl, "anchor")) 3280 ERROUT(EBADMSG); 3281 if (! nvlist_exists_number(nvl, "ruleset")) 3282 ERROUT(EBADMSG); 3283 if (! nvlist_exists_number(nvl, "ticket")) 3284 ERROUT(EBADMSG); 3285 if (! nvlist_exists_number(nvl, "nr")) 3286 ERROUT(EBADMSG); 3287 3288 if (nvlist_exists_bool(nvl, "clear_counter")) 3289 clear_counter = nvlist_get_bool(nvl, "clear_counter"); 3290 3291 if (clear_counter && !(flags & FWRITE)) 3292 ERROUT(EACCES); 3293 3294 nr = nvlist_get_number(nvl, "nr"); 3295 3296 PF_RULES_WLOCK(); 3297 ruleset = pf_find_kruleset(nvlist_get_string(nvl, "anchor")); 3298 if (ruleset == NULL) { 3299 PF_RULES_WUNLOCK(); 3300 ERROUT(ENOENT); 3301 } 3302 3303 rs_num = pf_get_ruleset_number(nvlist_get_number(nvl, "ruleset")); 3304 if (rs_num >= PF_RULESET_MAX) { 3305 PF_RULES_WUNLOCK(); 3306 ERROUT(EINVAL); 3307 } 3308 3309 if (nvlist_get_number(nvl, "ticket") != 3310 ruleset->rules[rs_num].active.ticket) { 3311 PF_RULES_WUNLOCK(); 3312 ERROUT(EBUSY); 3313 } 3314 3315 if ((error = nvlist_error(nvl))) { 3316 PF_RULES_WUNLOCK(); 3317 ERROUT(error); 3318 } 3319 3320 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr); 3321 while ((rule != NULL) && (rule->nr != nr)) 3322 rule = TAILQ_NEXT(rule, entries); 3323 if (rule == NULL) { 3324 PF_RULES_WUNLOCK(); 3325 ERROUT(EBUSY); 3326 } 3327 3328 nvrule = pf_krule_to_nvrule(rule); 3329 3330 nvlist_destroy(nvl); 3331 nvl = nvlist_create(0); 3332 if (nvl == NULL) { 3333 PF_RULES_WUNLOCK(); 3334 ERROUT(ENOMEM); 3335 } 3336 nvlist_add_number(nvl, "nr", nr); 3337 nvlist_add_nvlist(nvl, "rule", nvrule); 3338 nvlist_destroy(nvrule); 3339 nvrule = NULL; 3340 if (pf_kanchor_nvcopyout(ruleset, rule, nvl)) { 3341 PF_RULES_WUNLOCK(); 3342 ERROUT(EBUSY); 3343 } 3344 3345 free(nvlpacked, M_NVLIST); 3346 nvlpacked = nvlist_pack(nvl, &nv->len); 3347 if (nvlpacked == NULL) { 3348 PF_RULES_WUNLOCK(); 3349 ERROUT(ENOMEM); 3350 } 3351 3352 if (nv->size == 0) { 3353 PF_RULES_WUNLOCK(); 3354 ERROUT(0); 3355 } 3356 else if (nv->size < nv->len) { 3357 PF_RULES_WUNLOCK(); 3358 ERROUT(ENOSPC); 3359 } 3360 3361 if (clear_counter) { 3362 pf_counter_u64_zero(&rule->evaluations); 3363 for (int i = 0; i < 2; i++) { 3364 pf_counter_u64_zero(&rule->packets[i]); 3365 pf_counter_u64_zero(&rule->bytes[i]); 3366 } 3367 counter_u64_zero(rule->states_tot); 3368 } 3369 PF_RULES_WUNLOCK(); 3370 3371 error = copyout(nvlpacked, nv->data, nv->len); 3372 3373 #undef ERROUT 3374 DIOCGETRULENV_error: 3375 free(nvlpacked, M_NVLIST); 3376 nvlist_destroy(nvrule); 3377 nvlist_destroy(nvl); 3378 3379 break; 3380 } 3381 3382 case DIOCCHANGERULE: { 3383 struct pfioc_rule *pcr = (struct pfioc_rule *)addr; 3384 struct pf_kruleset *ruleset; 3385 struct pf_krule *oldrule = NULL, *newrule = NULL; 3386 struct pfi_kkif *kif = NULL; 3387 struct pf_kpooladdr *pa; 3388 u_int32_t nr = 0; 3389 int rs_num; 3390 3391 pcr->anchor[sizeof(pcr->anchor) - 1] = 0; 3392 3393 if (pcr->action < PF_CHANGE_ADD_HEAD || 3394 pcr->action > PF_CHANGE_GET_TICKET) { 3395 error = EINVAL; 3396 break; 3397 } 3398 if (pcr->rule.return_icmp >> 8 > ICMP_MAXTYPE) { 3399 error = EINVAL; 3400 break; 3401 } 3402 3403 if (pcr->action != PF_CHANGE_REMOVE) { 3404 newrule = pf_krule_alloc(); 3405 error = pf_rule_to_krule(&pcr->rule, newrule); 3406 if (error != 0) { 3407 free(newrule, M_PFRULE); 3408 break; 3409 } 3410 3411 if (newrule->ifname[0]) 3412 kif = pf_kkif_create(M_WAITOK); 3413 pf_counter_u64_init(&newrule->evaluations, M_WAITOK); 3414 for (int i = 0; i < 2; i++) { 3415 pf_counter_u64_init(&newrule->packets[i], M_WAITOK); 3416 pf_counter_u64_init(&newrule->bytes[i], M_WAITOK); 3417 } 3418 newrule->states_cur = counter_u64_alloc(M_WAITOK); 3419 newrule->states_tot = counter_u64_alloc(M_WAITOK); 3420 newrule->src_nodes = counter_u64_alloc(M_WAITOK); 3421 newrule->cuid = td->td_ucred->cr_ruid; 3422 newrule->cpid = td->td_proc ? td->td_proc->p_pid : 0; 3423 TAILQ_INIT(&newrule->rpool.list); 3424 } 3425 #define ERROUT(x) ERROUT_IOCTL(DIOCCHANGERULE_error, x) 3426 3427 PF_RULES_WLOCK(); 3428 #ifdef PF_WANT_32_TO_64_COUNTER 3429 if (newrule != NULL) { 3430 LIST_INSERT_HEAD(&V_pf_allrulelist, newrule, allrulelist); 3431 newrule->allrulelinked = true; 3432 V_pf_allrulecount++; 3433 } 3434 #endif 3435 3436 if (!(pcr->action == PF_CHANGE_REMOVE || 3437 pcr->action == PF_CHANGE_GET_TICKET) && 3438 pcr->pool_ticket != V_ticket_pabuf) 3439 ERROUT(EBUSY); 3440 3441 ruleset = pf_find_kruleset(pcr->anchor); 3442 if (ruleset == NULL) 3443 ERROUT(EINVAL); 3444 3445 rs_num = pf_get_ruleset_number(pcr->rule.action); 3446 if (rs_num >= PF_RULESET_MAX) 3447 ERROUT(EINVAL); 3448 3449 if (pcr->action == PF_CHANGE_GET_TICKET) { 3450 pcr->ticket = ++ruleset->rules[rs_num].active.ticket; 3451 ERROUT(0); 3452 } else if (pcr->ticket != 3453 ruleset->rules[rs_num].active.ticket) 3454 ERROUT(EINVAL); 3455 3456 if (pcr->action != PF_CHANGE_REMOVE) { 3457 if (newrule->ifname[0]) { 3458 newrule->kif = pfi_kkif_attach(kif, 3459 newrule->ifname); 3460 kif = NULL; 3461 pfi_kkif_ref(newrule->kif); 3462 } else 3463 newrule->kif = NULL; 3464 3465 if (newrule->rtableid > 0 && 3466 newrule->rtableid >= rt_numfibs) 3467 error = EBUSY; 3468 3469 #ifdef ALTQ 3470 /* set queue IDs */ 3471 if (newrule->qname[0] != 0) { 3472 if ((newrule->qid = 3473 pf_qname2qid(newrule->qname)) == 0) 3474 error = EBUSY; 3475 else if (newrule->pqname[0] != 0) { 3476 if ((newrule->pqid = 3477 pf_qname2qid(newrule->pqname)) == 0) 3478 error = EBUSY; 3479 } else 3480 newrule->pqid = newrule->qid; 3481 } 3482 #endif /* ALTQ */ 3483 if (newrule->tagname[0]) 3484 if ((newrule->tag = 3485 pf_tagname2tag(newrule->tagname)) == 0) 3486 error = EBUSY; 3487 if (newrule->match_tagname[0]) 3488 if ((newrule->match_tag = pf_tagname2tag( 3489 newrule->match_tagname)) == 0) 3490 error = EBUSY; 3491 if (newrule->rt && !newrule->direction) 3492 error = EINVAL; 3493 if (!newrule->log) 3494 newrule->logif = 0; 3495 if (newrule->logif >= PFLOGIFS_MAX) 3496 error = EINVAL; 3497 if (pf_addr_setup(ruleset, &newrule->src.addr, newrule->af)) 3498 error = ENOMEM; 3499 if (pf_addr_setup(ruleset, &newrule->dst.addr, newrule->af)) 3500 error = ENOMEM; 3501 if (pf_kanchor_setup(newrule, ruleset, pcr->anchor_call)) 3502 error = EINVAL; 3503 TAILQ_FOREACH(pa, &V_pf_pabuf, entries) 3504 if (pa->addr.type == PF_ADDR_TABLE) { 3505 pa->addr.p.tbl = 3506 pfr_attach_table(ruleset, 3507 pa->addr.v.tblname); 3508 if (pa->addr.p.tbl == NULL) 3509 error = ENOMEM; 3510 } 3511 3512 newrule->overload_tbl = NULL; 3513 if (newrule->overload_tblname[0]) { 3514 if ((newrule->overload_tbl = pfr_attach_table( 3515 ruleset, newrule->overload_tblname)) == 3516 NULL) 3517 error = EINVAL; 3518 else 3519 newrule->overload_tbl->pfrkt_flags |= 3520 PFR_TFLAG_ACTIVE; 3521 } 3522 3523 pf_mv_kpool(&V_pf_pabuf, &newrule->rpool.list); 3524 if (((((newrule->action == PF_NAT) || 3525 (newrule->action == PF_RDR) || 3526 (newrule->action == PF_BINAT) || 3527 (newrule->rt > PF_NOPFROUTE)) && 3528 !newrule->anchor)) && 3529 (TAILQ_FIRST(&newrule->rpool.list) == NULL)) 3530 error = EINVAL; 3531 3532 if (error) { 3533 pf_free_rule(newrule); 3534 PF_RULES_WUNLOCK(); 3535 break; 3536 } 3537 3538 newrule->rpool.cur = TAILQ_FIRST(&newrule->rpool.list); 3539 } 3540 pf_empty_kpool(&V_pf_pabuf); 3541 3542 if (pcr->action == PF_CHANGE_ADD_HEAD) 3543 oldrule = TAILQ_FIRST( 3544 ruleset->rules[rs_num].active.ptr); 3545 else if (pcr->action == PF_CHANGE_ADD_TAIL) 3546 oldrule = TAILQ_LAST( 3547 ruleset->rules[rs_num].active.ptr, pf_krulequeue); 3548 else { 3549 oldrule = TAILQ_FIRST( 3550 ruleset->rules[rs_num].active.ptr); 3551 while ((oldrule != NULL) && (oldrule->nr != pcr->nr)) 3552 oldrule = TAILQ_NEXT(oldrule, entries); 3553 if (oldrule == NULL) { 3554 if (newrule != NULL) 3555 pf_free_rule(newrule); 3556 PF_RULES_WUNLOCK(); 3557 error = EINVAL; 3558 break; 3559 } 3560 } 3561 3562 if (pcr->action == PF_CHANGE_REMOVE) { 3563 pf_unlink_rule(ruleset->rules[rs_num].active.ptr, 3564 oldrule); 3565 ruleset->rules[rs_num].active.rcount--; 3566 } else { 3567 if (oldrule == NULL) 3568 TAILQ_INSERT_TAIL( 3569 ruleset->rules[rs_num].active.ptr, 3570 newrule, entries); 3571 else if (pcr->action == PF_CHANGE_ADD_HEAD || 3572 pcr->action == PF_CHANGE_ADD_BEFORE) 3573 TAILQ_INSERT_BEFORE(oldrule, newrule, entries); 3574 else 3575 TAILQ_INSERT_AFTER( 3576 ruleset->rules[rs_num].active.ptr, 3577 oldrule, newrule, entries); 3578 ruleset->rules[rs_num].active.rcount++; 3579 } 3580 3581 nr = 0; 3582 TAILQ_FOREACH(oldrule, 3583 ruleset->rules[rs_num].active.ptr, entries) 3584 oldrule->nr = nr++; 3585 3586 ruleset->rules[rs_num].active.ticket++; 3587 3588 pf_calc_skip_steps(ruleset->rules[rs_num].active.ptr); 3589 pf_remove_if_empty_kruleset(ruleset); 3590 3591 PF_RULES_WUNLOCK(); 3592 break; 3593 3594 #undef ERROUT 3595 DIOCCHANGERULE_error: 3596 PF_RULES_WUNLOCK(); 3597 pf_krule_free(newrule); 3598 pf_kkif_free(kif); 3599 break; 3600 } 3601 3602 case DIOCCLRSTATES: { 3603 struct pfioc_state_kill *psk = (struct pfioc_state_kill *)addr; 3604 struct pf_kstate_kill kill; 3605 3606 error = pf_state_kill_to_kstate_kill(psk, &kill); 3607 if (error) 3608 break; 3609 3610 psk->psk_killed = pf_clear_states(&kill); 3611 break; 3612 } 3613 3614 case DIOCCLRSTATESNV: { 3615 error = pf_clearstates_nv((struct pfioc_nv *)addr); 3616 break; 3617 } 3618 3619 case DIOCKILLSTATES: { 3620 struct pfioc_state_kill *psk = (struct pfioc_state_kill *)addr; 3621 struct pf_kstate_kill kill; 3622 3623 error = pf_state_kill_to_kstate_kill(psk, &kill); 3624 if (error) 3625 break; 3626 3627 psk->psk_killed = 0; 3628 pf_killstates(&kill, &psk->psk_killed); 3629 break; 3630 } 3631 3632 case DIOCKILLSTATESNV: { 3633 error = pf_killstates_nv((struct pfioc_nv *)addr); 3634 break; 3635 } 3636 3637 case DIOCADDSTATE: { 3638 struct pfioc_state *ps = (struct pfioc_state *)addr; 3639 struct pfsync_state *sp = &ps->state; 3640 3641 if (sp->timeout >= PFTM_MAX) { 3642 error = EINVAL; 3643 break; 3644 } 3645 if (V_pfsync_state_import_ptr != NULL) { 3646 PF_RULES_RLOCK(); 3647 error = V_pfsync_state_import_ptr(sp, PFSYNC_SI_IOCTL); 3648 PF_RULES_RUNLOCK(); 3649 } else 3650 error = EOPNOTSUPP; 3651 break; 3652 } 3653 3654 case DIOCGETSTATE: { 3655 struct pfioc_state *ps = (struct pfioc_state *)addr; 3656 struct pf_kstate *s; 3657 3658 s = pf_find_state_byid(ps->state.id, ps->state.creatorid); 3659 if (s == NULL) { 3660 error = ENOENT; 3661 break; 3662 } 3663 3664 pfsync_state_export(&ps->state, s); 3665 PF_STATE_UNLOCK(s); 3666 break; 3667 } 3668 3669 case DIOCGETSTATENV: { 3670 error = pf_getstate((struct pfioc_nv *)addr); 3671 break; 3672 } 3673 3674 case DIOCGETSTATES: { 3675 struct pfioc_states *ps = (struct pfioc_states *)addr; 3676 struct pf_kstate *s; 3677 struct pfsync_state *pstore, *p; 3678 int i, nr; 3679 size_t slice_count = 16, count; 3680 void *out; 3681 3682 if (ps->ps_len <= 0) { 3683 nr = uma_zone_get_cur(V_pf_state_z); 3684 ps->ps_len = sizeof(struct pfsync_state) * nr; 3685 break; 3686 } 3687 3688 out = ps->ps_states; 3689 pstore = mallocarray(slice_count, 3690 sizeof(struct pfsync_state), M_TEMP, M_WAITOK | M_ZERO); 3691 nr = 0; 3692 3693 for (i = 0; i <= pf_hashmask; i++) { 3694 struct pf_idhash *ih = &V_pf_idhash[i]; 3695 3696 DIOCGETSTATES_retry: 3697 p = pstore; 3698 3699 if (LIST_EMPTY(&ih->states)) 3700 continue; 3701 3702 PF_HASHROW_LOCK(ih); 3703 count = 0; 3704 LIST_FOREACH(s, &ih->states, entry) { 3705 if (s->timeout == PFTM_UNLINKED) 3706 continue; 3707 count++; 3708 } 3709 3710 if (count > slice_count) { 3711 PF_HASHROW_UNLOCK(ih); 3712 free(pstore, M_TEMP); 3713 slice_count = count * 2; 3714 pstore = mallocarray(slice_count, 3715 sizeof(struct pfsync_state), M_TEMP, 3716 M_WAITOK | M_ZERO); 3717 goto DIOCGETSTATES_retry; 3718 } 3719 3720 if ((nr+count) * sizeof(*p) > ps->ps_len) { 3721 PF_HASHROW_UNLOCK(ih); 3722 goto DIOCGETSTATES_full; 3723 } 3724 3725 LIST_FOREACH(s, &ih->states, entry) { 3726 if (s->timeout == PFTM_UNLINKED) 3727 continue; 3728 3729 pfsync_state_export(p, s); 3730 p++; 3731 nr++; 3732 } 3733 PF_HASHROW_UNLOCK(ih); 3734 error = copyout(pstore, out, 3735 sizeof(struct pfsync_state) * count); 3736 if (error) 3737 break; 3738 out = ps->ps_states + nr; 3739 } 3740 DIOCGETSTATES_full: 3741 ps->ps_len = sizeof(struct pfsync_state) * nr; 3742 free(pstore, M_TEMP); 3743 3744 break; 3745 } 3746 3747 case DIOCGETSTATESV2: { 3748 struct pfioc_states_v2 *ps = (struct pfioc_states_v2 *)addr; 3749 struct pf_kstate *s; 3750 struct pf_state_export *pstore, *p; 3751 int i, nr; 3752 size_t slice_count = 16, count; 3753 void *out; 3754 3755 if (ps->ps_req_version > PF_STATE_VERSION) { 3756 error = ENOTSUP; 3757 break; 3758 } 3759 3760 if (ps->ps_len <= 0) { 3761 nr = uma_zone_get_cur(V_pf_state_z); 3762 ps->ps_len = sizeof(struct pf_state_export) * nr; 3763 break; 3764 } 3765 3766 out = ps->ps_states; 3767 pstore = mallocarray(slice_count, 3768 sizeof(struct pf_state_export), M_TEMP, M_WAITOK | M_ZERO); 3769 nr = 0; 3770 3771 for (i = 0; i <= pf_hashmask; i++) { 3772 struct pf_idhash *ih = &V_pf_idhash[i]; 3773 3774 DIOCGETSTATESV2_retry: 3775 p = pstore; 3776 3777 if (LIST_EMPTY(&ih->states)) 3778 continue; 3779 3780 PF_HASHROW_LOCK(ih); 3781 count = 0; 3782 LIST_FOREACH(s, &ih->states, entry) { 3783 if (s->timeout == PFTM_UNLINKED) 3784 continue; 3785 count++; 3786 } 3787 3788 if (count > slice_count) { 3789 PF_HASHROW_UNLOCK(ih); 3790 free(pstore, M_TEMP); 3791 slice_count = count * 2; 3792 pstore = mallocarray(slice_count, 3793 sizeof(struct pf_state_export), M_TEMP, 3794 M_WAITOK | M_ZERO); 3795 goto DIOCGETSTATESV2_retry; 3796 } 3797 3798 if ((nr+count) * sizeof(*p) > ps->ps_len) { 3799 PF_HASHROW_UNLOCK(ih); 3800 goto DIOCGETSTATESV2_full; 3801 } 3802 3803 LIST_FOREACH(s, &ih->states, entry) { 3804 if (s->timeout == PFTM_UNLINKED) 3805 continue; 3806 3807 pf_state_export(p, s); 3808 p++; 3809 nr++; 3810 } 3811 PF_HASHROW_UNLOCK(ih); 3812 error = copyout(pstore, out, 3813 sizeof(struct pf_state_export) * count); 3814 if (error) 3815 break; 3816 out = ps->ps_states + nr; 3817 } 3818 DIOCGETSTATESV2_full: 3819 ps->ps_len = nr * sizeof(struct pf_state_export); 3820 free(pstore, M_TEMP); 3821 3822 break; 3823 } 3824 3825 case DIOCGETSTATUS: { 3826 struct pf_status *s = (struct pf_status *)addr; 3827 3828 PF_RULES_RLOCK(); 3829 s->running = V_pf_status.running; 3830 s->since = V_pf_status.since; 3831 s->debug = V_pf_status.debug; 3832 s->hostid = V_pf_status.hostid; 3833 s->states = V_pf_status.states; 3834 s->src_nodes = V_pf_status.src_nodes; 3835 3836 for (int i = 0; i < PFRES_MAX; i++) 3837 s->counters[i] = 3838 counter_u64_fetch(V_pf_status.counters[i]); 3839 for (int i = 0; i < LCNT_MAX; i++) 3840 s->lcounters[i] = 3841 counter_u64_fetch(V_pf_status.lcounters[i]); 3842 for (int i = 0; i < FCNT_MAX; i++) 3843 s->fcounters[i] = 3844 pf_counter_u64_fetch(&V_pf_status.fcounters[i]); 3845 for (int i = 0; i < SCNT_MAX; i++) 3846 s->scounters[i] = 3847 counter_u64_fetch(V_pf_status.scounters[i]); 3848 3849 bcopy(V_pf_status.ifname, s->ifname, IFNAMSIZ); 3850 bcopy(V_pf_status.pf_chksum, s->pf_chksum, 3851 PF_MD5_DIGEST_LENGTH); 3852 3853 pfi_update_status(s->ifname, s); 3854 PF_RULES_RUNLOCK(); 3855 break; 3856 } 3857 3858 case DIOCGETSTATUSNV: { 3859 error = pf_getstatus((struct pfioc_nv *)addr); 3860 break; 3861 } 3862 3863 case DIOCSETSTATUSIF: { 3864 struct pfioc_if *pi = (struct pfioc_if *)addr; 3865 3866 if (pi->ifname[0] == 0) { 3867 bzero(V_pf_status.ifname, IFNAMSIZ); 3868 break; 3869 } 3870 PF_RULES_WLOCK(); 3871 error = pf_user_strcpy(V_pf_status.ifname, pi->ifname, IFNAMSIZ); 3872 PF_RULES_WUNLOCK(); 3873 break; 3874 } 3875 3876 case DIOCCLRSTATUS: { 3877 PF_RULES_WLOCK(); 3878 for (int i = 0; i < PFRES_MAX; i++) 3879 counter_u64_zero(V_pf_status.counters[i]); 3880 for (int i = 0; i < FCNT_MAX; i++) 3881 pf_counter_u64_zero(&V_pf_status.fcounters[i]); 3882 for (int i = 0; i < SCNT_MAX; i++) 3883 counter_u64_zero(V_pf_status.scounters[i]); 3884 for (int i = 0; i < KLCNT_MAX; i++) 3885 counter_u64_zero(V_pf_status.lcounters[i]); 3886 V_pf_status.since = time_second; 3887 if (*V_pf_status.ifname) 3888 pfi_update_status(V_pf_status.ifname, NULL); 3889 PF_RULES_WUNLOCK(); 3890 break; 3891 } 3892 3893 case DIOCNATLOOK: { 3894 struct pfioc_natlook *pnl = (struct pfioc_natlook *)addr; 3895 struct pf_state_key *sk; 3896 struct pf_kstate *state; 3897 struct pf_state_key_cmp key; 3898 int m = 0, direction = pnl->direction; 3899 int sidx, didx; 3900 3901 /* NATLOOK src and dst are reversed, so reverse sidx/didx */ 3902 sidx = (direction == PF_IN) ? 1 : 0; 3903 didx = (direction == PF_IN) ? 0 : 1; 3904 3905 if (!pnl->proto || 3906 PF_AZERO(&pnl->saddr, pnl->af) || 3907 PF_AZERO(&pnl->daddr, pnl->af) || 3908 ((pnl->proto == IPPROTO_TCP || 3909 pnl->proto == IPPROTO_UDP) && 3910 (!pnl->dport || !pnl->sport))) 3911 error = EINVAL; 3912 else { 3913 bzero(&key, sizeof(key)); 3914 key.af = pnl->af; 3915 key.proto = pnl->proto; 3916 PF_ACPY(&key.addr[sidx], &pnl->saddr, pnl->af); 3917 key.port[sidx] = pnl->sport; 3918 PF_ACPY(&key.addr[didx], &pnl->daddr, pnl->af); 3919 key.port[didx] = pnl->dport; 3920 3921 state = pf_find_state_all(&key, direction, &m); 3922 if (state == NULL) { 3923 error = ENOENT; 3924 } else { 3925 if (m > 1) { 3926 PF_STATE_UNLOCK(state); 3927 error = E2BIG; /* more than one state */ 3928 } else { 3929 sk = state->key[sidx]; 3930 PF_ACPY(&pnl->rsaddr, &sk->addr[sidx], sk->af); 3931 pnl->rsport = sk->port[sidx]; 3932 PF_ACPY(&pnl->rdaddr, &sk->addr[didx], sk->af); 3933 pnl->rdport = sk->port[didx]; 3934 PF_STATE_UNLOCK(state); 3935 } 3936 } 3937 } 3938 break; 3939 } 3940 3941 case DIOCSETTIMEOUT: { 3942 struct pfioc_tm *pt = (struct pfioc_tm *)addr; 3943 int old; 3944 3945 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX || 3946 pt->seconds < 0) { 3947 error = EINVAL; 3948 break; 3949 } 3950 PF_RULES_WLOCK(); 3951 old = V_pf_default_rule.timeout[pt->timeout]; 3952 if (pt->timeout == PFTM_INTERVAL && pt->seconds == 0) 3953 pt->seconds = 1; 3954 V_pf_default_rule.timeout[pt->timeout] = pt->seconds; 3955 if (pt->timeout == PFTM_INTERVAL && pt->seconds < old) 3956 wakeup(pf_purge_thread); 3957 pt->seconds = old; 3958 PF_RULES_WUNLOCK(); 3959 break; 3960 } 3961 3962 case DIOCGETTIMEOUT: { 3963 struct pfioc_tm *pt = (struct pfioc_tm *)addr; 3964 3965 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX) { 3966 error = EINVAL; 3967 break; 3968 } 3969 PF_RULES_RLOCK(); 3970 pt->seconds = V_pf_default_rule.timeout[pt->timeout]; 3971 PF_RULES_RUNLOCK(); 3972 break; 3973 } 3974 3975 case DIOCGETLIMIT: { 3976 struct pfioc_limit *pl = (struct pfioc_limit *)addr; 3977 3978 if (pl->index < 0 || pl->index >= PF_LIMIT_MAX) { 3979 error = EINVAL; 3980 break; 3981 } 3982 PF_RULES_RLOCK(); 3983 pl->limit = V_pf_limits[pl->index].limit; 3984 PF_RULES_RUNLOCK(); 3985 break; 3986 } 3987 3988 case DIOCSETLIMIT: { 3989 struct pfioc_limit *pl = (struct pfioc_limit *)addr; 3990 int old_limit; 3991 3992 PF_RULES_WLOCK(); 3993 if (pl->index < 0 || pl->index >= PF_LIMIT_MAX || 3994 V_pf_limits[pl->index].zone == NULL) { 3995 PF_RULES_WUNLOCK(); 3996 error = EINVAL; 3997 break; 3998 } 3999 uma_zone_set_max(V_pf_limits[pl->index].zone, pl->limit); 4000 old_limit = V_pf_limits[pl->index].limit; 4001 V_pf_limits[pl->index].limit = pl->limit; 4002 pl->limit = old_limit; 4003 PF_RULES_WUNLOCK(); 4004 break; 4005 } 4006 4007 case DIOCSETDEBUG: { 4008 u_int32_t *level = (u_int32_t *)addr; 4009 4010 PF_RULES_WLOCK(); 4011 V_pf_status.debug = *level; 4012 PF_RULES_WUNLOCK(); 4013 break; 4014 } 4015 4016 case DIOCCLRRULECTRS: { 4017 /* obsoleted by DIOCGETRULE with action=PF_GET_CLR_CNTR */ 4018 struct pf_kruleset *ruleset = &pf_main_ruleset; 4019 struct pf_krule *rule; 4020 4021 PF_RULES_WLOCK(); 4022 TAILQ_FOREACH(rule, 4023 ruleset->rules[PF_RULESET_FILTER].active.ptr, entries) { 4024 pf_counter_u64_zero(&rule->evaluations); 4025 for (int i = 0; i < 2; i++) { 4026 pf_counter_u64_zero(&rule->packets[i]); 4027 pf_counter_u64_zero(&rule->bytes[i]); 4028 } 4029 } 4030 PF_RULES_WUNLOCK(); 4031 break; 4032 } 4033 4034 case DIOCGIFSPEEDV0: 4035 case DIOCGIFSPEEDV1: { 4036 struct pf_ifspeed_v1 *psp = (struct pf_ifspeed_v1 *)addr; 4037 struct pf_ifspeed_v1 ps; 4038 struct ifnet *ifp; 4039 4040 if (psp->ifname[0] == '\0') { 4041 error = EINVAL; 4042 break; 4043 } 4044 4045 error = pf_user_strcpy(ps.ifname, psp->ifname, IFNAMSIZ); 4046 if (error != 0) 4047 break; 4048 ifp = ifunit(ps.ifname); 4049 if (ifp != NULL) { 4050 psp->baudrate32 = 4051 (u_int32_t)uqmin(ifp->if_baudrate, UINT_MAX); 4052 if (cmd == DIOCGIFSPEEDV1) 4053 psp->baudrate = ifp->if_baudrate; 4054 } else { 4055 error = EINVAL; 4056 } 4057 break; 4058 } 4059 4060 #ifdef ALTQ 4061 case DIOCSTARTALTQ: { 4062 struct pf_altq *altq; 4063 4064 PF_RULES_WLOCK(); 4065 /* enable all altq interfaces on active list */ 4066 TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) { 4067 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) { 4068 error = pf_enable_altq(altq); 4069 if (error != 0) 4070 break; 4071 } 4072 } 4073 if (error == 0) 4074 V_pf_altq_running = 1; 4075 PF_RULES_WUNLOCK(); 4076 DPFPRINTF(PF_DEBUG_MISC, ("altq: started\n")); 4077 break; 4078 } 4079 4080 case DIOCSTOPALTQ: { 4081 struct pf_altq *altq; 4082 4083 PF_RULES_WLOCK(); 4084 /* disable all altq interfaces on active list */ 4085 TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) { 4086 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) { 4087 error = pf_disable_altq(altq); 4088 if (error != 0) 4089 break; 4090 } 4091 } 4092 if (error == 0) 4093 V_pf_altq_running = 0; 4094 PF_RULES_WUNLOCK(); 4095 DPFPRINTF(PF_DEBUG_MISC, ("altq: stopped\n")); 4096 break; 4097 } 4098 4099 case DIOCADDALTQV0: 4100 case DIOCADDALTQV1: { 4101 struct pfioc_altq_v1 *pa = (struct pfioc_altq_v1 *)addr; 4102 struct pf_altq *altq, *a; 4103 struct ifnet *ifp; 4104 4105 altq = malloc(sizeof(*altq), M_PFALTQ, M_WAITOK | M_ZERO); 4106 error = pf_import_kaltq(pa, altq, IOCPARM_LEN(cmd)); 4107 if (error) 4108 break; 4109 altq->local_flags = 0; 4110 4111 PF_RULES_WLOCK(); 4112 if (pa->ticket != V_ticket_altqs_inactive) { 4113 PF_RULES_WUNLOCK(); 4114 free(altq, M_PFALTQ); 4115 error = EBUSY; 4116 break; 4117 } 4118 4119 /* 4120 * if this is for a queue, find the discipline and 4121 * copy the necessary fields 4122 */ 4123 if (altq->qname[0] != 0) { 4124 if ((altq->qid = pf_qname2qid(altq->qname)) == 0) { 4125 PF_RULES_WUNLOCK(); 4126 error = EBUSY; 4127 free(altq, M_PFALTQ); 4128 break; 4129 } 4130 altq->altq_disc = NULL; 4131 TAILQ_FOREACH(a, V_pf_altq_ifs_inactive, entries) { 4132 if (strncmp(a->ifname, altq->ifname, 4133 IFNAMSIZ) == 0) { 4134 altq->altq_disc = a->altq_disc; 4135 break; 4136 } 4137 } 4138 } 4139 4140 if ((ifp = ifunit(altq->ifname)) == NULL) 4141 altq->local_flags |= PFALTQ_FLAG_IF_REMOVED; 4142 else 4143 error = altq_add(ifp, altq); 4144 4145 if (error) { 4146 PF_RULES_WUNLOCK(); 4147 free(altq, M_PFALTQ); 4148 break; 4149 } 4150 4151 if (altq->qname[0] != 0) 4152 TAILQ_INSERT_TAIL(V_pf_altqs_inactive, altq, entries); 4153 else 4154 TAILQ_INSERT_TAIL(V_pf_altq_ifs_inactive, altq, entries); 4155 /* version error check done on import above */ 4156 pf_export_kaltq(altq, pa, IOCPARM_LEN(cmd)); 4157 PF_RULES_WUNLOCK(); 4158 break; 4159 } 4160 4161 case DIOCGETALTQSV0: 4162 case DIOCGETALTQSV1: { 4163 struct pfioc_altq_v1 *pa = (struct pfioc_altq_v1 *)addr; 4164 struct pf_altq *altq; 4165 4166 PF_RULES_RLOCK(); 4167 pa->nr = 0; 4168 TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) 4169 pa->nr++; 4170 TAILQ_FOREACH(altq, V_pf_altqs_active, entries) 4171 pa->nr++; 4172 pa->ticket = V_ticket_altqs_active; 4173 PF_RULES_RUNLOCK(); 4174 break; 4175 } 4176 4177 case DIOCGETALTQV0: 4178 case DIOCGETALTQV1: { 4179 struct pfioc_altq_v1 *pa = (struct pfioc_altq_v1 *)addr; 4180 struct pf_altq *altq; 4181 4182 PF_RULES_RLOCK(); 4183 if (pa->ticket != V_ticket_altqs_active) { 4184 PF_RULES_RUNLOCK(); 4185 error = EBUSY; 4186 break; 4187 } 4188 altq = pf_altq_get_nth_active(pa->nr); 4189 if (altq == NULL) { 4190 PF_RULES_RUNLOCK(); 4191 error = EBUSY; 4192 break; 4193 } 4194 pf_export_kaltq(altq, pa, IOCPARM_LEN(cmd)); 4195 PF_RULES_RUNLOCK(); 4196 break; 4197 } 4198 4199 case DIOCCHANGEALTQV0: 4200 case DIOCCHANGEALTQV1: 4201 /* CHANGEALTQ not supported yet! */ 4202 error = ENODEV; 4203 break; 4204 4205 case DIOCGETQSTATSV0: 4206 case DIOCGETQSTATSV1: { 4207 struct pfioc_qstats_v1 *pq = (struct pfioc_qstats_v1 *)addr; 4208 struct pf_altq *altq; 4209 int nbytes; 4210 u_int32_t version; 4211 4212 PF_RULES_RLOCK(); 4213 if (pq->ticket != V_ticket_altqs_active) { 4214 PF_RULES_RUNLOCK(); 4215 error = EBUSY; 4216 break; 4217 } 4218 nbytes = pq->nbytes; 4219 altq = pf_altq_get_nth_active(pq->nr); 4220 if (altq == NULL) { 4221 PF_RULES_RUNLOCK(); 4222 error = EBUSY; 4223 break; 4224 } 4225 4226 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) != 0) { 4227 PF_RULES_RUNLOCK(); 4228 error = ENXIO; 4229 break; 4230 } 4231 PF_RULES_RUNLOCK(); 4232 if (cmd == DIOCGETQSTATSV0) 4233 version = 0; /* DIOCGETQSTATSV0 means stats struct v0 */ 4234 else 4235 version = pq->version; 4236 error = altq_getqstats(altq, pq->buf, &nbytes, version); 4237 if (error == 0) { 4238 pq->scheduler = altq->scheduler; 4239 pq->nbytes = nbytes; 4240 } 4241 break; 4242 } 4243 #endif /* ALTQ */ 4244 4245 case DIOCBEGINADDRS: { 4246 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr; 4247 4248 PF_RULES_WLOCK(); 4249 pf_empty_kpool(&V_pf_pabuf); 4250 pp->ticket = ++V_ticket_pabuf; 4251 PF_RULES_WUNLOCK(); 4252 break; 4253 } 4254 4255 case DIOCADDADDR: { 4256 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr; 4257 struct pf_kpooladdr *pa; 4258 struct pfi_kkif *kif = NULL; 4259 4260 #ifndef INET 4261 if (pp->af == AF_INET) { 4262 error = EAFNOSUPPORT; 4263 break; 4264 } 4265 #endif /* INET */ 4266 #ifndef INET6 4267 if (pp->af == AF_INET6) { 4268 error = EAFNOSUPPORT; 4269 break; 4270 } 4271 #endif /* INET6 */ 4272 if (pp->addr.addr.type != PF_ADDR_ADDRMASK && 4273 pp->addr.addr.type != PF_ADDR_DYNIFTL && 4274 pp->addr.addr.type != PF_ADDR_TABLE) { 4275 error = EINVAL; 4276 break; 4277 } 4278 if (pp->addr.addr.p.dyn != NULL) { 4279 error = EINVAL; 4280 break; 4281 } 4282 pa = malloc(sizeof(*pa), M_PFRULE, M_WAITOK); 4283 error = pf_pooladdr_to_kpooladdr(&pp->addr, pa); 4284 if (error != 0) 4285 break; 4286 if (pa->ifname[0]) 4287 kif = pf_kkif_create(M_WAITOK); 4288 PF_RULES_WLOCK(); 4289 if (pp->ticket != V_ticket_pabuf) { 4290 PF_RULES_WUNLOCK(); 4291 if (pa->ifname[0]) 4292 pf_kkif_free(kif); 4293 free(pa, M_PFRULE); 4294 error = EBUSY; 4295 break; 4296 } 4297 if (pa->ifname[0]) { 4298 pa->kif = pfi_kkif_attach(kif, pa->ifname); 4299 kif = NULL; 4300 pfi_kkif_ref(pa->kif); 4301 } else 4302 pa->kif = NULL; 4303 if (pa->addr.type == PF_ADDR_DYNIFTL && ((error = 4304 pfi_dynaddr_setup(&pa->addr, pp->af)) != 0)) { 4305 if (pa->ifname[0]) 4306 pfi_kkif_unref(pa->kif); 4307 PF_RULES_WUNLOCK(); 4308 free(pa, M_PFRULE); 4309 break; 4310 } 4311 TAILQ_INSERT_TAIL(&V_pf_pabuf, pa, entries); 4312 PF_RULES_WUNLOCK(); 4313 break; 4314 } 4315 4316 case DIOCGETADDRS: { 4317 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr; 4318 struct pf_kpool *pool; 4319 struct pf_kpooladdr *pa; 4320 4321 pp->anchor[sizeof(pp->anchor) - 1] = 0; 4322 pp->nr = 0; 4323 4324 PF_RULES_RLOCK(); 4325 pool = pf_get_kpool(pp->anchor, pp->ticket, pp->r_action, 4326 pp->r_num, 0, 1, 0); 4327 if (pool == NULL) { 4328 PF_RULES_RUNLOCK(); 4329 error = EBUSY; 4330 break; 4331 } 4332 TAILQ_FOREACH(pa, &pool->list, entries) 4333 pp->nr++; 4334 PF_RULES_RUNLOCK(); 4335 break; 4336 } 4337 4338 case DIOCGETADDR: { 4339 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr; 4340 struct pf_kpool *pool; 4341 struct pf_kpooladdr *pa; 4342 u_int32_t nr = 0; 4343 4344 pp->anchor[sizeof(pp->anchor) - 1] = 0; 4345 4346 PF_RULES_RLOCK(); 4347 pool = pf_get_kpool(pp->anchor, pp->ticket, pp->r_action, 4348 pp->r_num, 0, 1, 1); 4349 if (pool == NULL) { 4350 PF_RULES_RUNLOCK(); 4351 error = EBUSY; 4352 break; 4353 } 4354 pa = TAILQ_FIRST(&pool->list); 4355 while ((pa != NULL) && (nr < pp->nr)) { 4356 pa = TAILQ_NEXT(pa, entries); 4357 nr++; 4358 } 4359 if (pa == NULL) { 4360 PF_RULES_RUNLOCK(); 4361 error = EBUSY; 4362 break; 4363 } 4364 pf_kpooladdr_to_pooladdr(pa, &pp->addr); 4365 pf_addr_copyout(&pp->addr.addr); 4366 PF_RULES_RUNLOCK(); 4367 break; 4368 } 4369 4370 case DIOCCHANGEADDR: { 4371 struct pfioc_pooladdr *pca = (struct pfioc_pooladdr *)addr; 4372 struct pf_kpool *pool; 4373 struct pf_kpooladdr *oldpa = NULL, *newpa = NULL; 4374 struct pf_kruleset *ruleset; 4375 struct pfi_kkif *kif = NULL; 4376 4377 pca->anchor[sizeof(pca->anchor) - 1] = 0; 4378 4379 if (pca->action < PF_CHANGE_ADD_HEAD || 4380 pca->action > PF_CHANGE_REMOVE) { 4381 error = EINVAL; 4382 break; 4383 } 4384 if (pca->addr.addr.type != PF_ADDR_ADDRMASK && 4385 pca->addr.addr.type != PF_ADDR_DYNIFTL && 4386 pca->addr.addr.type != PF_ADDR_TABLE) { 4387 error = EINVAL; 4388 break; 4389 } 4390 if (pca->addr.addr.p.dyn != NULL) { 4391 error = EINVAL; 4392 break; 4393 } 4394 4395 if (pca->action != PF_CHANGE_REMOVE) { 4396 #ifndef INET 4397 if (pca->af == AF_INET) { 4398 error = EAFNOSUPPORT; 4399 break; 4400 } 4401 #endif /* INET */ 4402 #ifndef INET6 4403 if (pca->af == AF_INET6) { 4404 error = EAFNOSUPPORT; 4405 break; 4406 } 4407 #endif /* INET6 */ 4408 newpa = malloc(sizeof(*newpa), M_PFRULE, M_WAITOK); 4409 bcopy(&pca->addr, newpa, sizeof(struct pf_pooladdr)); 4410 if (newpa->ifname[0]) 4411 kif = pf_kkif_create(M_WAITOK); 4412 newpa->kif = NULL; 4413 } 4414 #define ERROUT(x) ERROUT_IOCTL(DIOCCHANGEADDR_error, x) 4415 PF_RULES_WLOCK(); 4416 ruleset = pf_find_kruleset(pca->anchor); 4417 if (ruleset == NULL) 4418 ERROUT(EBUSY); 4419 4420 pool = pf_get_kpool(pca->anchor, pca->ticket, pca->r_action, 4421 pca->r_num, pca->r_last, 1, 1); 4422 if (pool == NULL) 4423 ERROUT(EBUSY); 4424 4425 if (pca->action != PF_CHANGE_REMOVE) { 4426 if (newpa->ifname[0]) { 4427 newpa->kif = pfi_kkif_attach(kif, newpa->ifname); 4428 pfi_kkif_ref(newpa->kif); 4429 kif = NULL; 4430 } 4431 4432 switch (newpa->addr.type) { 4433 case PF_ADDR_DYNIFTL: 4434 error = pfi_dynaddr_setup(&newpa->addr, 4435 pca->af); 4436 break; 4437 case PF_ADDR_TABLE: 4438 newpa->addr.p.tbl = pfr_attach_table(ruleset, 4439 newpa->addr.v.tblname); 4440 if (newpa->addr.p.tbl == NULL) 4441 error = ENOMEM; 4442 break; 4443 } 4444 if (error) 4445 goto DIOCCHANGEADDR_error; 4446 } 4447 4448 switch (pca->action) { 4449 case PF_CHANGE_ADD_HEAD: 4450 oldpa = TAILQ_FIRST(&pool->list); 4451 break; 4452 case PF_CHANGE_ADD_TAIL: 4453 oldpa = TAILQ_LAST(&pool->list, pf_kpalist); 4454 break; 4455 default: 4456 oldpa = TAILQ_FIRST(&pool->list); 4457 for (int i = 0; oldpa && i < pca->nr; i++) 4458 oldpa = TAILQ_NEXT(oldpa, entries); 4459 4460 if (oldpa == NULL) 4461 ERROUT(EINVAL); 4462 } 4463 4464 if (pca->action == PF_CHANGE_REMOVE) { 4465 TAILQ_REMOVE(&pool->list, oldpa, entries); 4466 switch (oldpa->addr.type) { 4467 case PF_ADDR_DYNIFTL: 4468 pfi_dynaddr_remove(oldpa->addr.p.dyn); 4469 break; 4470 case PF_ADDR_TABLE: 4471 pfr_detach_table(oldpa->addr.p.tbl); 4472 break; 4473 } 4474 if (oldpa->kif) 4475 pfi_kkif_unref(oldpa->kif); 4476 free(oldpa, M_PFRULE); 4477 } else { 4478 if (oldpa == NULL) 4479 TAILQ_INSERT_TAIL(&pool->list, newpa, entries); 4480 else if (pca->action == PF_CHANGE_ADD_HEAD || 4481 pca->action == PF_CHANGE_ADD_BEFORE) 4482 TAILQ_INSERT_BEFORE(oldpa, newpa, entries); 4483 else 4484 TAILQ_INSERT_AFTER(&pool->list, oldpa, 4485 newpa, entries); 4486 } 4487 4488 pool->cur = TAILQ_FIRST(&pool->list); 4489 PF_ACPY(&pool->counter, &pool->cur->addr.v.a.addr, pca->af); 4490 PF_RULES_WUNLOCK(); 4491 break; 4492 4493 #undef ERROUT 4494 DIOCCHANGEADDR_error: 4495 if (newpa != NULL) { 4496 if (newpa->kif) 4497 pfi_kkif_unref(newpa->kif); 4498 free(newpa, M_PFRULE); 4499 } 4500 PF_RULES_WUNLOCK(); 4501 pf_kkif_free(kif); 4502 break; 4503 } 4504 4505 case DIOCGETRULESETS: { 4506 struct pfioc_ruleset *pr = (struct pfioc_ruleset *)addr; 4507 struct pf_kruleset *ruleset; 4508 struct pf_kanchor *anchor; 4509 4510 pr->path[sizeof(pr->path) - 1] = 0; 4511 4512 PF_RULES_RLOCK(); 4513 if ((ruleset = pf_find_kruleset(pr->path)) == NULL) { 4514 PF_RULES_RUNLOCK(); 4515 error = ENOENT; 4516 break; 4517 } 4518 pr->nr = 0; 4519 if (ruleset->anchor == NULL) { 4520 /* XXX kludge for pf_main_ruleset */ 4521 RB_FOREACH(anchor, pf_kanchor_global, &V_pf_anchors) 4522 if (anchor->parent == NULL) 4523 pr->nr++; 4524 } else { 4525 RB_FOREACH(anchor, pf_kanchor_node, 4526 &ruleset->anchor->children) 4527 pr->nr++; 4528 } 4529 PF_RULES_RUNLOCK(); 4530 break; 4531 } 4532 4533 case DIOCGETRULESET: { 4534 struct pfioc_ruleset *pr = (struct pfioc_ruleset *)addr; 4535 struct pf_kruleset *ruleset; 4536 struct pf_kanchor *anchor; 4537 u_int32_t nr = 0; 4538 4539 pr->path[sizeof(pr->path) - 1] = 0; 4540 4541 PF_RULES_RLOCK(); 4542 if ((ruleset = pf_find_kruleset(pr->path)) == NULL) { 4543 PF_RULES_RUNLOCK(); 4544 error = ENOENT; 4545 break; 4546 } 4547 pr->name[0] = 0; 4548 if (ruleset->anchor == NULL) { 4549 /* XXX kludge for pf_main_ruleset */ 4550 RB_FOREACH(anchor, pf_kanchor_global, &V_pf_anchors) 4551 if (anchor->parent == NULL && nr++ == pr->nr) { 4552 strlcpy(pr->name, anchor->name, 4553 sizeof(pr->name)); 4554 break; 4555 } 4556 } else { 4557 RB_FOREACH(anchor, pf_kanchor_node, 4558 &ruleset->anchor->children) 4559 if (nr++ == pr->nr) { 4560 strlcpy(pr->name, anchor->name, 4561 sizeof(pr->name)); 4562 break; 4563 } 4564 } 4565 if (!pr->name[0]) 4566 error = EBUSY; 4567 PF_RULES_RUNLOCK(); 4568 break; 4569 } 4570 4571 case DIOCRCLRTABLES: { 4572 struct pfioc_table *io = (struct pfioc_table *)addr; 4573 4574 if (io->pfrio_esize != 0) { 4575 error = ENODEV; 4576 break; 4577 } 4578 PF_RULES_WLOCK(); 4579 error = pfr_clr_tables(&io->pfrio_table, &io->pfrio_ndel, 4580 io->pfrio_flags | PFR_FLAG_USERIOCTL); 4581 PF_RULES_WUNLOCK(); 4582 break; 4583 } 4584 4585 case DIOCRADDTABLES: { 4586 struct pfioc_table *io = (struct pfioc_table *)addr; 4587 struct pfr_table *pfrts; 4588 size_t totlen; 4589 4590 if (io->pfrio_esize != sizeof(struct pfr_table)) { 4591 error = ENODEV; 4592 break; 4593 } 4594 4595 if (io->pfrio_size < 0 || io->pfrio_size > pf_ioctl_maxcount || 4596 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_table))) { 4597 error = ENOMEM; 4598 break; 4599 } 4600 4601 totlen = io->pfrio_size * sizeof(struct pfr_table); 4602 pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table), 4603 M_TEMP, M_WAITOK); 4604 error = copyin(io->pfrio_buffer, pfrts, totlen); 4605 if (error) { 4606 free(pfrts, M_TEMP); 4607 break; 4608 } 4609 PF_RULES_WLOCK(); 4610 error = pfr_add_tables(pfrts, io->pfrio_size, 4611 &io->pfrio_nadd, io->pfrio_flags | PFR_FLAG_USERIOCTL); 4612 PF_RULES_WUNLOCK(); 4613 free(pfrts, M_TEMP); 4614 break; 4615 } 4616 4617 case DIOCRDELTABLES: { 4618 struct pfioc_table *io = (struct pfioc_table *)addr; 4619 struct pfr_table *pfrts; 4620 size_t totlen; 4621 4622 if (io->pfrio_esize != sizeof(struct pfr_table)) { 4623 error = ENODEV; 4624 break; 4625 } 4626 4627 if (io->pfrio_size < 0 || io->pfrio_size > pf_ioctl_maxcount || 4628 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_table))) { 4629 error = ENOMEM; 4630 break; 4631 } 4632 4633 totlen = io->pfrio_size * sizeof(struct pfr_table); 4634 pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table), 4635 M_TEMP, M_WAITOK); 4636 error = copyin(io->pfrio_buffer, pfrts, totlen); 4637 if (error) { 4638 free(pfrts, M_TEMP); 4639 break; 4640 } 4641 PF_RULES_WLOCK(); 4642 error = pfr_del_tables(pfrts, io->pfrio_size, 4643 &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL); 4644 PF_RULES_WUNLOCK(); 4645 free(pfrts, M_TEMP); 4646 break; 4647 } 4648 4649 case DIOCRGETTABLES: { 4650 struct pfioc_table *io = (struct pfioc_table *)addr; 4651 struct pfr_table *pfrts; 4652 size_t totlen; 4653 int n; 4654 4655 if (io->pfrio_esize != sizeof(struct pfr_table)) { 4656 error = ENODEV; 4657 break; 4658 } 4659 PF_RULES_RLOCK(); 4660 n = pfr_table_count(&io->pfrio_table, io->pfrio_flags); 4661 if (n < 0) { 4662 PF_RULES_RUNLOCK(); 4663 error = EINVAL; 4664 break; 4665 } 4666 io->pfrio_size = min(io->pfrio_size, n); 4667 4668 totlen = io->pfrio_size * sizeof(struct pfr_table); 4669 4670 pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table), 4671 M_TEMP, M_NOWAIT | M_ZERO); 4672 if (pfrts == NULL) { 4673 error = ENOMEM; 4674 PF_RULES_RUNLOCK(); 4675 break; 4676 } 4677 error = pfr_get_tables(&io->pfrio_table, pfrts, 4678 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); 4679 PF_RULES_RUNLOCK(); 4680 if (error == 0) 4681 error = copyout(pfrts, io->pfrio_buffer, totlen); 4682 free(pfrts, M_TEMP); 4683 break; 4684 } 4685 4686 case DIOCRGETTSTATS: { 4687 struct pfioc_table *io = (struct pfioc_table *)addr; 4688 struct pfr_tstats *pfrtstats; 4689 size_t totlen; 4690 int n; 4691 4692 if (io->pfrio_esize != sizeof(struct pfr_tstats)) { 4693 error = ENODEV; 4694 break; 4695 } 4696 PF_TABLE_STATS_LOCK(); 4697 PF_RULES_RLOCK(); 4698 n = pfr_table_count(&io->pfrio_table, io->pfrio_flags); 4699 if (n < 0) { 4700 PF_RULES_RUNLOCK(); 4701 PF_TABLE_STATS_UNLOCK(); 4702 error = EINVAL; 4703 break; 4704 } 4705 io->pfrio_size = min(io->pfrio_size, n); 4706 4707 totlen = io->pfrio_size * sizeof(struct pfr_tstats); 4708 pfrtstats = mallocarray(io->pfrio_size, 4709 sizeof(struct pfr_tstats), M_TEMP, M_NOWAIT | M_ZERO); 4710 if (pfrtstats == NULL) { 4711 error = ENOMEM; 4712 PF_RULES_RUNLOCK(); 4713 PF_TABLE_STATS_UNLOCK(); 4714 break; 4715 } 4716 error = pfr_get_tstats(&io->pfrio_table, pfrtstats, 4717 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); 4718 PF_RULES_RUNLOCK(); 4719 PF_TABLE_STATS_UNLOCK(); 4720 if (error == 0) 4721 error = copyout(pfrtstats, io->pfrio_buffer, totlen); 4722 free(pfrtstats, M_TEMP); 4723 break; 4724 } 4725 4726 case DIOCRCLRTSTATS: { 4727 struct pfioc_table *io = (struct pfioc_table *)addr; 4728 struct pfr_table *pfrts; 4729 size_t totlen; 4730 4731 if (io->pfrio_esize != sizeof(struct pfr_table)) { 4732 error = ENODEV; 4733 break; 4734 } 4735 4736 if (io->pfrio_size < 0 || io->pfrio_size > pf_ioctl_maxcount || 4737 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_table))) { 4738 /* We used to count tables and use the minimum required 4739 * size, so we didn't fail on overly large requests. 4740 * Keep doing so. */ 4741 io->pfrio_size = pf_ioctl_maxcount; 4742 break; 4743 } 4744 4745 totlen = io->pfrio_size * sizeof(struct pfr_table); 4746 pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table), 4747 M_TEMP, M_WAITOK); 4748 error = copyin(io->pfrio_buffer, pfrts, totlen); 4749 if (error) { 4750 free(pfrts, M_TEMP); 4751 break; 4752 } 4753 4754 PF_TABLE_STATS_LOCK(); 4755 PF_RULES_RLOCK(); 4756 error = pfr_clr_tstats(pfrts, io->pfrio_size, 4757 &io->pfrio_nzero, io->pfrio_flags | PFR_FLAG_USERIOCTL); 4758 PF_RULES_RUNLOCK(); 4759 PF_TABLE_STATS_UNLOCK(); 4760 free(pfrts, M_TEMP); 4761 break; 4762 } 4763 4764 case DIOCRSETTFLAGS: { 4765 struct pfioc_table *io = (struct pfioc_table *)addr; 4766 struct pfr_table *pfrts; 4767 size_t totlen; 4768 int n; 4769 4770 if (io->pfrio_esize != sizeof(struct pfr_table)) { 4771 error = ENODEV; 4772 break; 4773 } 4774 4775 PF_RULES_RLOCK(); 4776 n = pfr_table_count(&io->pfrio_table, io->pfrio_flags); 4777 if (n < 0) { 4778 PF_RULES_RUNLOCK(); 4779 error = EINVAL; 4780 break; 4781 } 4782 4783 io->pfrio_size = min(io->pfrio_size, n); 4784 PF_RULES_RUNLOCK(); 4785 4786 totlen = io->pfrio_size * sizeof(struct pfr_table); 4787 pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table), 4788 M_TEMP, M_WAITOK); 4789 error = copyin(io->pfrio_buffer, pfrts, totlen); 4790 if (error) { 4791 free(pfrts, M_TEMP); 4792 break; 4793 } 4794 PF_RULES_WLOCK(); 4795 error = pfr_set_tflags(pfrts, io->pfrio_size, 4796 io->pfrio_setflag, io->pfrio_clrflag, &io->pfrio_nchange, 4797 &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL); 4798 PF_RULES_WUNLOCK(); 4799 free(pfrts, M_TEMP); 4800 break; 4801 } 4802 4803 case DIOCRCLRADDRS: { 4804 struct pfioc_table *io = (struct pfioc_table *)addr; 4805 4806 if (io->pfrio_esize != 0) { 4807 error = ENODEV; 4808 break; 4809 } 4810 PF_RULES_WLOCK(); 4811 error = pfr_clr_addrs(&io->pfrio_table, &io->pfrio_ndel, 4812 io->pfrio_flags | PFR_FLAG_USERIOCTL); 4813 PF_RULES_WUNLOCK(); 4814 break; 4815 } 4816 4817 case DIOCRADDADDRS: { 4818 struct pfioc_table *io = (struct pfioc_table *)addr; 4819 struct pfr_addr *pfras; 4820 size_t totlen; 4821 4822 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 4823 error = ENODEV; 4824 break; 4825 } 4826 if (io->pfrio_size < 0 || 4827 io->pfrio_size > pf_ioctl_maxcount || 4828 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) { 4829 error = EINVAL; 4830 break; 4831 } 4832 totlen = io->pfrio_size * sizeof(struct pfr_addr); 4833 pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr), 4834 M_TEMP, M_WAITOK); 4835 error = copyin(io->pfrio_buffer, pfras, totlen); 4836 if (error) { 4837 free(pfras, M_TEMP); 4838 break; 4839 } 4840 PF_RULES_WLOCK(); 4841 error = pfr_add_addrs(&io->pfrio_table, pfras, 4842 io->pfrio_size, &io->pfrio_nadd, io->pfrio_flags | 4843 PFR_FLAG_USERIOCTL); 4844 PF_RULES_WUNLOCK(); 4845 if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK) 4846 error = copyout(pfras, io->pfrio_buffer, totlen); 4847 free(pfras, M_TEMP); 4848 break; 4849 } 4850 4851 case DIOCRDELADDRS: { 4852 struct pfioc_table *io = (struct pfioc_table *)addr; 4853 struct pfr_addr *pfras; 4854 size_t totlen; 4855 4856 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 4857 error = ENODEV; 4858 break; 4859 } 4860 if (io->pfrio_size < 0 || 4861 io->pfrio_size > pf_ioctl_maxcount || 4862 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) { 4863 error = EINVAL; 4864 break; 4865 } 4866 totlen = io->pfrio_size * sizeof(struct pfr_addr); 4867 pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr), 4868 M_TEMP, M_WAITOK); 4869 error = copyin(io->pfrio_buffer, pfras, totlen); 4870 if (error) { 4871 free(pfras, M_TEMP); 4872 break; 4873 } 4874 PF_RULES_WLOCK(); 4875 error = pfr_del_addrs(&io->pfrio_table, pfras, 4876 io->pfrio_size, &io->pfrio_ndel, io->pfrio_flags | 4877 PFR_FLAG_USERIOCTL); 4878 PF_RULES_WUNLOCK(); 4879 if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK) 4880 error = copyout(pfras, io->pfrio_buffer, totlen); 4881 free(pfras, M_TEMP); 4882 break; 4883 } 4884 4885 case DIOCRSETADDRS: { 4886 struct pfioc_table *io = (struct pfioc_table *)addr; 4887 struct pfr_addr *pfras; 4888 size_t totlen, count; 4889 4890 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 4891 error = ENODEV; 4892 break; 4893 } 4894 if (io->pfrio_size < 0 || io->pfrio_size2 < 0) { 4895 error = EINVAL; 4896 break; 4897 } 4898 count = max(io->pfrio_size, io->pfrio_size2); 4899 if (count > pf_ioctl_maxcount || 4900 WOULD_OVERFLOW(count, sizeof(struct pfr_addr))) { 4901 error = EINVAL; 4902 break; 4903 } 4904 totlen = count * sizeof(struct pfr_addr); 4905 pfras = mallocarray(count, sizeof(struct pfr_addr), M_TEMP, 4906 M_WAITOK); 4907 error = copyin(io->pfrio_buffer, pfras, totlen); 4908 if (error) { 4909 free(pfras, M_TEMP); 4910 break; 4911 } 4912 PF_RULES_WLOCK(); 4913 error = pfr_set_addrs(&io->pfrio_table, pfras, 4914 io->pfrio_size, &io->pfrio_size2, &io->pfrio_nadd, 4915 &io->pfrio_ndel, &io->pfrio_nchange, io->pfrio_flags | 4916 PFR_FLAG_USERIOCTL, 0); 4917 PF_RULES_WUNLOCK(); 4918 if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK) 4919 error = copyout(pfras, io->pfrio_buffer, totlen); 4920 free(pfras, M_TEMP); 4921 break; 4922 } 4923 4924 case DIOCRGETADDRS: { 4925 struct pfioc_table *io = (struct pfioc_table *)addr; 4926 struct pfr_addr *pfras; 4927 size_t totlen; 4928 4929 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 4930 error = ENODEV; 4931 break; 4932 } 4933 if (io->pfrio_size < 0 || 4934 io->pfrio_size > pf_ioctl_maxcount || 4935 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) { 4936 error = EINVAL; 4937 break; 4938 } 4939 totlen = io->pfrio_size * sizeof(struct pfr_addr); 4940 pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr), 4941 M_TEMP, M_WAITOK | M_ZERO); 4942 PF_RULES_RLOCK(); 4943 error = pfr_get_addrs(&io->pfrio_table, pfras, 4944 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); 4945 PF_RULES_RUNLOCK(); 4946 if (error == 0) 4947 error = copyout(pfras, io->pfrio_buffer, totlen); 4948 free(pfras, M_TEMP); 4949 break; 4950 } 4951 4952 case DIOCRGETASTATS: { 4953 struct pfioc_table *io = (struct pfioc_table *)addr; 4954 struct pfr_astats *pfrastats; 4955 size_t totlen; 4956 4957 if (io->pfrio_esize != sizeof(struct pfr_astats)) { 4958 error = ENODEV; 4959 break; 4960 } 4961 if (io->pfrio_size < 0 || 4962 io->pfrio_size > pf_ioctl_maxcount || 4963 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_astats))) { 4964 error = EINVAL; 4965 break; 4966 } 4967 totlen = io->pfrio_size * sizeof(struct pfr_astats); 4968 pfrastats = mallocarray(io->pfrio_size, 4969 sizeof(struct pfr_astats), M_TEMP, M_WAITOK | M_ZERO); 4970 PF_RULES_RLOCK(); 4971 error = pfr_get_astats(&io->pfrio_table, pfrastats, 4972 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); 4973 PF_RULES_RUNLOCK(); 4974 if (error == 0) 4975 error = copyout(pfrastats, io->pfrio_buffer, totlen); 4976 free(pfrastats, M_TEMP); 4977 break; 4978 } 4979 4980 case DIOCRCLRASTATS: { 4981 struct pfioc_table *io = (struct pfioc_table *)addr; 4982 struct pfr_addr *pfras; 4983 size_t totlen; 4984 4985 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 4986 error = ENODEV; 4987 break; 4988 } 4989 if (io->pfrio_size < 0 || 4990 io->pfrio_size > pf_ioctl_maxcount || 4991 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) { 4992 error = EINVAL; 4993 break; 4994 } 4995 totlen = io->pfrio_size * sizeof(struct pfr_addr); 4996 pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr), 4997 M_TEMP, M_WAITOK); 4998 error = copyin(io->pfrio_buffer, pfras, totlen); 4999 if (error) { 5000 free(pfras, M_TEMP); 5001 break; 5002 } 5003 PF_RULES_WLOCK(); 5004 error = pfr_clr_astats(&io->pfrio_table, pfras, 5005 io->pfrio_size, &io->pfrio_nzero, io->pfrio_flags | 5006 PFR_FLAG_USERIOCTL); 5007 PF_RULES_WUNLOCK(); 5008 if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK) 5009 error = copyout(pfras, io->pfrio_buffer, totlen); 5010 free(pfras, M_TEMP); 5011 break; 5012 } 5013 5014 case DIOCRTSTADDRS: { 5015 struct pfioc_table *io = (struct pfioc_table *)addr; 5016 struct pfr_addr *pfras; 5017 size_t totlen; 5018 5019 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 5020 error = ENODEV; 5021 break; 5022 } 5023 if (io->pfrio_size < 0 || 5024 io->pfrio_size > pf_ioctl_maxcount || 5025 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) { 5026 error = EINVAL; 5027 break; 5028 } 5029 totlen = io->pfrio_size * sizeof(struct pfr_addr); 5030 pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr), 5031 M_TEMP, M_WAITOK); 5032 error = copyin(io->pfrio_buffer, pfras, totlen); 5033 if (error) { 5034 free(pfras, M_TEMP); 5035 break; 5036 } 5037 PF_RULES_RLOCK(); 5038 error = pfr_tst_addrs(&io->pfrio_table, pfras, 5039 io->pfrio_size, &io->pfrio_nmatch, io->pfrio_flags | 5040 PFR_FLAG_USERIOCTL); 5041 PF_RULES_RUNLOCK(); 5042 if (error == 0) 5043 error = copyout(pfras, io->pfrio_buffer, totlen); 5044 free(pfras, M_TEMP); 5045 break; 5046 } 5047 5048 case DIOCRINADEFINE: { 5049 struct pfioc_table *io = (struct pfioc_table *)addr; 5050 struct pfr_addr *pfras; 5051 size_t totlen; 5052 5053 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 5054 error = ENODEV; 5055 break; 5056 } 5057 if (io->pfrio_size < 0 || 5058 io->pfrio_size > pf_ioctl_maxcount || 5059 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) { 5060 error = EINVAL; 5061 break; 5062 } 5063 totlen = io->pfrio_size * sizeof(struct pfr_addr); 5064 pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr), 5065 M_TEMP, M_WAITOK); 5066 error = copyin(io->pfrio_buffer, pfras, totlen); 5067 if (error) { 5068 free(pfras, M_TEMP); 5069 break; 5070 } 5071 PF_RULES_WLOCK(); 5072 error = pfr_ina_define(&io->pfrio_table, pfras, 5073 io->pfrio_size, &io->pfrio_nadd, &io->pfrio_naddr, 5074 io->pfrio_ticket, io->pfrio_flags | PFR_FLAG_USERIOCTL); 5075 PF_RULES_WUNLOCK(); 5076 free(pfras, M_TEMP); 5077 break; 5078 } 5079 5080 case DIOCOSFPADD: { 5081 struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr; 5082 PF_RULES_WLOCK(); 5083 error = pf_osfp_add(io); 5084 PF_RULES_WUNLOCK(); 5085 break; 5086 } 5087 5088 case DIOCOSFPGET: { 5089 struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr; 5090 PF_RULES_RLOCK(); 5091 error = pf_osfp_get(io); 5092 PF_RULES_RUNLOCK(); 5093 break; 5094 } 5095 5096 case DIOCXBEGIN: { 5097 struct pfioc_trans *io = (struct pfioc_trans *)addr; 5098 struct pfioc_trans_e *ioes, *ioe; 5099 size_t totlen; 5100 int i; 5101 5102 if (io->esize != sizeof(*ioe)) { 5103 error = ENODEV; 5104 break; 5105 } 5106 if (io->size < 0 || 5107 io->size > pf_ioctl_maxcount || 5108 WOULD_OVERFLOW(io->size, sizeof(struct pfioc_trans_e))) { 5109 error = EINVAL; 5110 break; 5111 } 5112 totlen = sizeof(struct pfioc_trans_e) * io->size; 5113 ioes = mallocarray(io->size, sizeof(struct pfioc_trans_e), 5114 M_TEMP, M_WAITOK); 5115 error = copyin(io->array, ioes, totlen); 5116 if (error) { 5117 free(ioes, M_TEMP); 5118 break; 5119 } 5120 /* Ensure there's no more ethernet rules to clean up. */ 5121 epoch_drain_callbacks(net_epoch_preempt); 5122 PF_RULES_WLOCK(); 5123 for (i = 0, ioe = ioes; i < io->size; i++, ioe++) { 5124 ioe->anchor[sizeof(ioe->anchor) - 1] = '\0'; 5125 switch (ioe->rs_num) { 5126 case PF_RULESET_ETH: 5127 if ((error = pf_begin_eth(&ioe->ticket, ioe->anchor))) { 5128 PF_RULES_WUNLOCK(); 5129 free(ioes, M_TEMP); 5130 goto fail; 5131 } 5132 break; 5133 #ifdef ALTQ 5134 case PF_RULESET_ALTQ: 5135 if (ioe->anchor[0]) { 5136 PF_RULES_WUNLOCK(); 5137 free(ioes, M_TEMP); 5138 error = EINVAL; 5139 goto fail; 5140 } 5141 if ((error = pf_begin_altq(&ioe->ticket))) { 5142 PF_RULES_WUNLOCK(); 5143 free(ioes, M_TEMP); 5144 goto fail; 5145 } 5146 break; 5147 #endif /* ALTQ */ 5148 case PF_RULESET_TABLE: 5149 { 5150 struct pfr_table table; 5151 5152 bzero(&table, sizeof(table)); 5153 strlcpy(table.pfrt_anchor, ioe->anchor, 5154 sizeof(table.pfrt_anchor)); 5155 if ((error = pfr_ina_begin(&table, 5156 &ioe->ticket, NULL, 0))) { 5157 PF_RULES_WUNLOCK(); 5158 free(ioes, M_TEMP); 5159 goto fail; 5160 } 5161 break; 5162 } 5163 default: 5164 if ((error = pf_begin_rules(&ioe->ticket, 5165 ioe->rs_num, ioe->anchor))) { 5166 PF_RULES_WUNLOCK(); 5167 free(ioes, M_TEMP); 5168 goto fail; 5169 } 5170 break; 5171 } 5172 } 5173 PF_RULES_WUNLOCK(); 5174 error = copyout(ioes, io->array, totlen); 5175 free(ioes, M_TEMP); 5176 break; 5177 } 5178 5179 case DIOCXROLLBACK: { 5180 struct pfioc_trans *io = (struct pfioc_trans *)addr; 5181 struct pfioc_trans_e *ioe, *ioes; 5182 size_t totlen; 5183 int i; 5184 5185 if (io->esize != sizeof(*ioe)) { 5186 error = ENODEV; 5187 break; 5188 } 5189 if (io->size < 0 || 5190 io->size > pf_ioctl_maxcount || 5191 WOULD_OVERFLOW(io->size, sizeof(struct pfioc_trans_e))) { 5192 error = EINVAL; 5193 break; 5194 } 5195 totlen = sizeof(struct pfioc_trans_e) * io->size; 5196 ioes = mallocarray(io->size, sizeof(struct pfioc_trans_e), 5197 M_TEMP, M_WAITOK); 5198 error = copyin(io->array, ioes, totlen); 5199 if (error) { 5200 free(ioes, M_TEMP); 5201 break; 5202 } 5203 PF_RULES_WLOCK(); 5204 for (i = 0, ioe = ioes; i < io->size; i++, ioe++) { 5205 ioe->anchor[sizeof(ioe->anchor) - 1] = '\0'; 5206 switch (ioe->rs_num) { 5207 case PF_RULESET_ETH: 5208 if ((error = pf_rollback_eth(ioe->ticket, 5209 ioe->anchor))) { 5210 PF_RULES_WUNLOCK(); 5211 free(ioes, M_TEMP); 5212 goto fail; /* really bad */ 5213 } 5214 break; 5215 #ifdef ALTQ 5216 case PF_RULESET_ALTQ: 5217 if (ioe->anchor[0]) { 5218 PF_RULES_WUNLOCK(); 5219 free(ioes, M_TEMP); 5220 error = EINVAL; 5221 goto fail; 5222 } 5223 if ((error = pf_rollback_altq(ioe->ticket))) { 5224 PF_RULES_WUNLOCK(); 5225 free(ioes, M_TEMP); 5226 goto fail; /* really bad */ 5227 } 5228 break; 5229 #endif /* ALTQ */ 5230 case PF_RULESET_TABLE: 5231 { 5232 struct pfr_table table; 5233 5234 bzero(&table, sizeof(table)); 5235 strlcpy(table.pfrt_anchor, ioe->anchor, 5236 sizeof(table.pfrt_anchor)); 5237 if ((error = pfr_ina_rollback(&table, 5238 ioe->ticket, NULL, 0))) { 5239 PF_RULES_WUNLOCK(); 5240 free(ioes, M_TEMP); 5241 goto fail; /* really bad */ 5242 } 5243 break; 5244 } 5245 default: 5246 if ((error = pf_rollback_rules(ioe->ticket, 5247 ioe->rs_num, ioe->anchor))) { 5248 PF_RULES_WUNLOCK(); 5249 free(ioes, M_TEMP); 5250 goto fail; /* really bad */ 5251 } 5252 break; 5253 } 5254 } 5255 PF_RULES_WUNLOCK(); 5256 free(ioes, M_TEMP); 5257 break; 5258 } 5259 5260 case DIOCXCOMMIT: { 5261 struct pfioc_trans *io = (struct pfioc_trans *)addr; 5262 struct pfioc_trans_e *ioe, *ioes; 5263 struct pf_kruleset *rs; 5264 struct pf_keth_ruleset *ers; 5265 size_t totlen; 5266 int i; 5267 5268 if (io->esize != sizeof(*ioe)) { 5269 error = ENODEV; 5270 break; 5271 } 5272 5273 if (io->size < 0 || 5274 io->size > pf_ioctl_maxcount || 5275 WOULD_OVERFLOW(io->size, sizeof(struct pfioc_trans_e))) { 5276 error = EINVAL; 5277 break; 5278 } 5279 5280 totlen = sizeof(struct pfioc_trans_e) * io->size; 5281 ioes = mallocarray(io->size, sizeof(struct pfioc_trans_e), 5282 M_TEMP, M_WAITOK); 5283 error = copyin(io->array, ioes, totlen); 5284 if (error) { 5285 free(ioes, M_TEMP); 5286 break; 5287 } 5288 PF_RULES_WLOCK(); 5289 /* First makes sure everything will succeed. */ 5290 for (i = 0, ioe = ioes; i < io->size; i++, ioe++) { 5291 ioe->anchor[sizeof(ioe->anchor) - 1] = 0; 5292 switch (ioe->rs_num) { 5293 case PF_RULESET_ETH: 5294 ers = pf_find_keth_ruleset(ioe->anchor); 5295 if (ers == NULL || ioe->ticket == 0 || 5296 ioe->ticket != ers->inactive.ticket) { 5297 PF_RULES_WUNLOCK(); 5298 free(ioes, M_TEMP); 5299 error = EINVAL; 5300 goto fail; 5301 } 5302 break; 5303 #ifdef ALTQ 5304 case PF_RULESET_ALTQ: 5305 if (ioe->anchor[0]) { 5306 PF_RULES_WUNLOCK(); 5307 free(ioes, M_TEMP); 5308 error = EINVAL; 5309 goto fail; 5310 } 5311 if (!V_altqs_inactive_open || ioe->ticket != 5312 V_ticket_altqs_inactive) { 5313 PF_RULES_WUNLOCK(); 5314 free(ioes, M_TEMP); 5315 error = EBUSY; 5316 goto fail; 5317 } 5318 break; 5319 #endif /* ALTQ */ 5320 case PF_RULESET_TABLE: 5321 rs = pf_find_kruleset(ioe->anchor); 5322 if (rs == NULL || !rs->topen || ioe->ticket != 5323 rs->tticket) { 5324 PF_RULES_WUNLOCK(); 5325 free(ioes, M_TEMP); 5326 error = EBUSY; 5327 goto fail; 5328 } 5329 break; 5330 default: 5331 if (ioe->rs_num < 0 || ioe->rs_num >= 5332 PF_RULESET_MAX) { 5333 PF_RULES_WUNLOCK(); 5334 free(ioes, M_TEMP); 5335 error = EINVAL; 5336 goto fail; 5337 } 5338 rs = pf_find_kruleset(ioe->anchor); 5339 if (rs == NULL || 5340 !rs->rules[ioe->rs_num].inactive.open || 5341 rs->rules[ioe->rs_num].inactive.ticket != 5342 ioe->ticket) { 5343 PF_RULES_WUNLOCK(); 5344 free(ioes, M_TEMP); 5345 error = EBUSY; 5346 goto fail; 5347 } 5348 break; 5349 } 5350 } 5351 /* Now do the commit - no errors should happen here. */ 5352 for (i = 0, ioe = ioes; i < io->size; i++, ioe++) { 5353 switch (ioe->rs_num) { 5354 case PF_RULESET_ETH: 5355 if ((error = pf_commit_eth(ioe->ticket, ioe->anchor))) { 5356 PF_RULES_WUNLOCK(); 5357 free(ioes, M_TEMP); 5358 goto fail; /* really bad */ 5359 } 5360 break; 5361 #ifdef ALTQ 5362 case PF_RULESET_ALTQ: 5363 if ((error = pf_commit_altq(ioe->ticket))) { 5364 PF_RULES_WUNLOCK(); 5365 free(ioes, M_TEMP); 5366 goto fail; /* really bad */ 5367 } 5368 break; 5369 #endif /* ALTQ */ 5370 case PF_RULESET_TABLE: 5371 { 5372 struct pfr_table table; 5373 5374 bzero(&table, sizeof(table)); 5375 (void)strlcpy(table.pfrt_anchor, ioe->anchor, 5376 sizeof(table.pfrt_anchor)); 5377 if ((error = pfr_ina_commit(&table, 5378 ioe->ticket, NULL, NULL, 0))) { 5379 PF_RULES_WUNLOCK(); 5380 free(ioes, M_TEMP); 5381 goto fail; /* really bad */ 5382 } 5383 break; 5384 } 5385 default: 5386 if ((error = pf_commit_rules(ioe->ticket, 5387 ioe->rs_num, ioe->anchor))) { 5388 PF_RULES_WUNLOCK(); 5389 free(ioes, M_TEMP); 5390 goto fail; /* really bad */ 5391 } 5392 break; 5393 } 5394 } 5395 PF_RULES_WUNLOCK(); 5396 5397 /* Only hook into EtherNet taffic if we've got rules for it. */ 5398 if (! TAILQ_EMPTY(V_pf_keth->active.rules)) 5399 hook_pf_eth(); 5400 else 5401 dehook_pf_eth(); 5402 5403 free(ioes, M_TEMP); 5404 break; 5405 } 5406 5407 case DIOCGETSRCNODES: { 5408 struct pfioc_src_nodes *psn = (struct pfioc_src_nodes *)addr; 5409 struct pf_srchash *sh; 5410 struct pf_ksrc_node *n; 5411 struct pf_src_node *p, *pstore; 5412 uint32_t i, nr = 0; 5413 5414 for (i = 0, sh = V_pf_srchash; i <= pf_srchashmask; 5415 i++, sh++) { 5416 PF_HASHROW_LOCK(sh); 5417 LIST_FOREACH(n, &sh->nodes, entry) 5418 nr++; 5419 PF_HASHROW_UNLOCK(sh); 5420 } 5421 5422 psn->psn_len = min(psn->psn_len, 5423 sizeof(struct pf_src_node) * nr); 5424 5425 if (psn->psn_len == 0) { 5426 psn->psn_len = sizeof(struct pf_src_node) * nr; 5427 break; 5428 } 5429 5430 nr = 0; 5431 5432 p = pstore = malloc(psn->psn_len, M_TEMP, M_WAITOK | M_ZERO); 5433 for (i = 0, sh = V_pf_srchash; i <= pf_srchashmask; 5434 i++, sh++) { 5435 PF_HASHROW_LOCK(sh); 5436 LIST_FOREACH(n, &sh->nodes, entry) { 5437 5438 if ((nr + 1) * sizeof(*p) > (unsigned)psn->psn_len) 5439 break; 5440 5441 pf_src_node_copy(n, p); 5442 5443 p++; 5444 nr++; 5445 } 5446 PF_HASHROW_UNLOCK(sh); 5447 } 5448 error = copyout(pstore, psn->psn_src_nodes, 5449 sizeof(struct pf_src_node) * nr); 5450 if (error) { 5451 free(pstore, M_TEMP); 5452 break; 5453 } 5454 psn->psn_len = sizeof(struct pf_src_node) * nr; 5455 free(pstore, M_TEMP); 5456 break; 5457 } 5458 5459 case DIOCCLRSRCNODES: { 5460 pf_clear_srcnodes(NULL); 5461 pf_purge_expired_src_nodes(); 5462 break; 5463 } 5464 5465 case DIOCKILLSRCNODES: 5466 pf_kill_srcnodes((struct pfioc_src_node_kill *)addr); 5467 break; 5468 5469 #ifdef COMPAT_FREEBSD13 5470 case DIOCKEEPCOUNTERS_FREEBSD13: 5471 #endif 5472 case DIOCKEEPCOUNTERS: 5473 error = pf_keepcounters((struct pfioc_nv *)addr); 5474 break; 5475 5476 case DIOCGETSYNCOOKIES: 5477 error = pf_get_syncookies((struct pfioc_nv *)addr); 5478 break; 5479 5480 case DIOCSETSYNCOOKIES: 5481 error = pf_set_syncookies((struct pfioc_nv *)addr); 5482 break; 5483 5484 case DIOCSETHOSTID: { 5485 u_int32_t *hostid = (u_int32_t *)addr; 5486 5487 PF_RULES_WLOCK(); 5488 if (*hostid == 0) 5489 V_pf_status.hostid = arc4random(); 5490 else 5491 V_pf_status.hostid = *hostid; 5492 PF_RULES_WUNLOCK(); 5493 break; 5494 } 5495 5496 case DIOCOSFPFLUSH: 5497 PF_RULES_WLOCK(); 5498 pf_osfp_flush(); 5499 PF_RULES_WUNLOCK(); 5500 break; 5501 5502 case DIOCIGETIFACES: { 5503 struct pfioc_iface *io = (struct pfioc_iface *)addr; 5504 struct pfi_kif *ifstore; 5505 size_t bufsiz; 5506 5507 if (io->pfiio_esize != sizeof(struct pfi_kif)) { 5508 error = ENODEV; 5509 break; 5510 } 5511 5512 if (io->pfiio_size < 0 || 5513 io->pfiio_size > pf_ioctl_maxcount || 5514 WOULD_OVERFLOW(io->pfiio_size, sizeof(struct pfi_kif))) { 5515 error = EINVAL; 5516 break; 5517 } 5518 5519 bufsiz = io->pfiio_size * sizeof(struct pfi_kif); 5520 ifstore = mallocarray(io->pfiio_size, sizeof(struct pfi_kif), 5521 M_TEMP, M_WAITOK | M_ZERO); 5522 5523 PF_RULES_RLOCK(); 5524 pfi_get_ifaces(io->pfiio_name, ifstore, &io->pfiio_size); 5525 PF_RULES_RUNLOCK(); 5526 error = copyout(ifstore, io->pfiio_buffer, bufsiz); 5527 free(ifstore, M_TEMP); 5528 break; 5529 } 5530 5531 case DIOCSETIFFLAG: { 5532 struct pfioc_iface *io = (struct pfioc_iface *)addr; 5533 5534 PF_RULES_WLOCK(); 5535 error = pfi_set_flags(io->pfiio_name, io->pfiio_flags); 5536 PF_RULES_WUNLOCK(); 5537 break; 5538 } 5539 5540 case DIOCCLRIFFLAG: { 5541 struct pfioc_iface *io = (struct pfioc_iface *)addr; 5542 5543 PF_RULES_WLOCK(); 5544 error = pfi_clear_flags(io->pfiio_name, io->pfiio_flags); 5545 PF_RULES_WUNLOCK(); 5546 break; 5547 } 5548 5549 default: 5550 error = ENODEV; 5551 break; 5552 } 5553 fail: 5554 if (sx_xlocked(&pf_ioctl_lock)) 5555 sx_xunlock(&pf_ioctl_lock); 5556 CURVNET_RESTORE(); 5557 5558 #undef ERROUT_IOCTL 5559 5560 return (error); 5561 } 5562 5563 void 5564 pfsync_state_export(struct pfsync_state *sp, struct pf_kstate *st) 5565 { 5566 bzero(sp, sizeof(struct pfsync_state)); 5567 5568 /* copy from state key */ 5569 sp->key[PF_SK_WIRE].addr[0] = st->key[PF_SK_WIRE]->addr[0]; 5570 sp->key[PF_SK_WIRE].addr[1] = st->key[PF_SK_WIRE]->addr[1]; 5571 sp->key[PF_SK_WIRE].port[0] = st->key[PF_SK_WIRE]->port[0]; 5572 sp->key[PF_SK_WIRE].port[1] = st->key[PF_SK_WIRE]->port[1]; 5573 sp->key[PF_SK_STACK].addr[0] = st->key[PF_SK_STACK]->addr[0]; 5574 sp->key[PF_SK_STACK].addr[1] = st->key[PF_SK_STACK]->addr[1]; 5575 sp->key[PF_SK_STACK].port[0] = st->key[PF_SK_STACK]->port[0]; 5576 sp->key[PF_SK_STACK].port[1] = st->key[PF_SK_STACK]->port[1]; 5577 sp->proto = st->key[PF_SK_WIRE]->proto; 5578 sp->af = st->key[PF_SK_WIRE]->af; 5579 5580 /* copy from state */ 5581 strlcpy(sp->ifname, st->kif->pfik_name, sizeof(sp->ifname)); 5582 bcopy(&st->rt_addr, &sp->rt_addr, sizeof(sp->rt_addr)); 5583 sp->creation = htonl(time_uptime - st->creation); 5584 sp->expire = pf_state_expires(st); 5585 if (sp->expire <= time_uptime) 5586 sp->expire = htonl(0); 5587 else 5588 sp->expire = htonl(sp->expire - time_uptime); 5589 5590 sp->direction = st->direction; 5591 sp->log = st->log; 5592 sp->timeout = st->timeout; 5593 sp->state_flags = st->state_flags; 5594 if (st->src_node) 5595 sp->sync_flags |= PFSYNC_FLAG_SRCNODE; 5596 if (st->nat_src_node) 5597 sp->sync_flags |= PFSYNC_FLAG_NATSRCNODE; 5598 5599 sp->id = st->id; 5600 sp->creatorid = st->creatorid; 5601 pf_state_peer_hton(&st->src, &sp->src); 5602 pf_state_peer_hton(&st->dst, &sp->dst); 5603 5604 if (st->rule.ptr == NULL) 5605 sp->rule = htonl(-1); 5606 else 5607 sp->rule = htonl(st->rule.ptr->nr); 5608 if (st->anchor.ptr == NULL) 5609 sp->anchor = htonl(-1); 5610 else 5611 sp->anchor = htonl(st->anchor.ptr->nr); 5612 if (st->nat_rule.ptr == NULL) 5613 sp->nat_rule = htonl(-1); 5614 else 5615 sp->nat_rule = htonl(st->nat_rule.ptr->nr); 5616 5617 pf_state_counter_hton(st->packets[0], sp->packets[0]); 5618 pf_state_counter_hton(st->packets[1], sp->packets[1]); 5619 pf_state_counter_hton(st->bytes[0], sp->bytes[0]); 5620 pf_state_counter_hton(st->bytes[1], sp->bytes[1]); 5621 } 5622 5623 void 5624 pf_state_export(struct pf_state_export *sp, struct pf_kstate *st) 5625 { 5626 bzero(sp, sizeof(*sp)); 5627 5628 sp->version = PF_STATE_VERSION; 5629 5630 /* copy from state key */ 5631 sp->key[PF_SK_WIRE].addr[0] = st->key[PF_SK_WIRE]->addr[0]; 5632 sp->key[PF_SK_WIRE].addr[1] = st->key[PF_SK_WIRE]->addr[1]; 5633 sp->key[PF_SK_WIRE].port[0] = st->key[PF_SK_WIRE]->port[0]; 5634 sp->key[PF_SK_WIRE].port[1] = st->key[PF_SK_WIRE]->port[1]; 5635 sp->key[PF_SK_STACK].addr[0] = st->key[PF_SK_STACK]->addr[0]; 5636 sp->key[PF_SK_STACK].addr[1] = st->key[PF_SK_STACK]->addr[1]; 5637 sp->key[PF_SK_STACK].port[0] = st->key[PF_SK_STACK]->port[0]; 5638 sp->key[PF_SK_STACK].port[1] = st->key[PF_SK_STACK]->port[1]; 5639 sp->proto = st->key[PF_SK_WIRE]->proto; 5640 sp->af = st->key[PF_SK_WIRE]->af; 5641 5642 /* copy from state */ 5643 strlcpy(sp->ifname, st->kif->pfik_name, sizeof(sp->ifname)); 5644 strlcpy(sp->orig_ifname, st->orig_kif->pfik_name, 5645 sizeof(sp->orig_ifname)); 5646 bcopy(&st->rt_addr, &sp->rt_addr, sizeof(sp->rt_addr)); 5647 sp->creation = htonl(time_uptime - st->creation); 5648 sp->expire = pf_state_expires(st); 5649 if (sp->expire <= time_uptime) 5650 sp->expire = htonl(0); 5651 else 5652 sp->expire = htonl(sp->expire - time_uptime); 5653 5654 sp->direction = st->direction; 5655 sp->log = st->log; 5656 sp->timeout = st->timeout; 5657 sp->state_flags = st->state_flags; 5658 if (st->src_node) 5659 sp->sync_flags |= PFSYNC_FLAG_SRCNODE; 5660 if (st->nat_src_node) 5661 sp->sync_flags |= PFSYNC_FLAG_NATSRCNODE; 5662 5663 sp->id = st->id; 5664 sp->creatorid = st->creatorid; 5665 pf_state_peer_hton(&st->src, &sp->src); 5666 pf_state_peer_hton(&st->dst, &sp->dst); 5667 5668 if (st->rule.ptr == NULL) 5669 sp->rule = htonl(-1); 5670 else 5671 sp->rule = htonl(st->rule.ptr->nr); 5672 if (st->anchor.ptr == NULL) 5673 sp->anchor = htonl(-1); 5674 else 5675 sp->anchor = htonl(st->anchor.ptr->nr); 5676 if (st->nat_rule.ptr == NULL) 5677 sp->nat_rule = htonl(-1); 5678 else 5679 sp->nat_rule = htonl(st->nat_rule.ptr->nr); 5680 5681 sp->packets[0] = st->packets[0]; 5682 sp->packets[1] = st->packets[1]; 5683 sp->bytes[0] = st->bytes[0]; 5684 sp->bytes[1] = st->bytes[1]; 5685 } 5686 5687 static void 5688 pf_tbladdr_copyout(struct pf_addr_wrap *aw) 5689 { 5690 struct pfr_ktable *kt; 5691 5692 KASSERT(aw->type == PF_ADDR_TABLE, ("%s: type %u", __func__, aw->type)); 5693 5694 kt = aw->p.tbl; 5695 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL) 5696 kt = kt->pfrkt_root; 5697 aw->p.tbl = NULL; 5698 aw->p.tblcnt = (kt->pfrkt_flags & PFR_TFLAG_ACTIVE) ? 5699 kt->pfrkt_cnt : -1; 5700 } 5701 5702 static int 5703 pf_add_status_counters(nvlist_t *nvl, const char *name, counter_u64_t *counters, 5704 size_t number, char **names) 5705 { 5706 nvlist_t *nvc; 5707 5708 nvc = nvlist_create(0); 5709 if (nvc == NULL) 5710 return (ENOMEM); 5711 5712 for (int i = 0; i < number; i++) { 5713 nvlist_append_number_array(nvc, "counters", 5714 counter_u64_fetch(counters[i])); 5715 nvlist_append_string_array(nvc, "names", 5716 names[i]); 5717 nvlist_append_number_array(nvc, "ids", 5718 i); 5719 } 5720 nvlist_add_nvlist(nvl, name, nvc); 5721 nvlist_destroy(nvc); 5722 5723 return (0); 5724 } 5725 5726 static int 5727 pf_getstatus(struct pfioc_nv *nv) 5728 { 5729 nvlist_t *nvl = NULL, *nvc = NULL; 5730 void *nvlpacked = NULL; 5731 int error; 5732 struct pf_status s; 5733 char *pf_reasons[PFRES_MAX+1] = PFRES_NAMES; 5734 char *pf_lcounter[KLCNT_MAX+1] = KLCNT_NAMES; 5735 char *pf_fcounter[FCNT_MAX+1] = FCNT_NAMES; 5736 PF_RULES_RLOCK_TRACKER; 5737 5738 #define ERROUT(x) ERROUT_FUNCTION(errout, x) 5739 5740 PF_RULES_RLOCK(); 5741 5742 nvl = nvlist_create(0); 5743 if (nvl == NULL) 5744 ERROUT(ENOMEM); 5745 5746 nvlist_add_bool(nvl, "running", V_pf_status.running); 5747 nvlist_add_number(nvl, "since", V_pf_status.since); 5748 nvlist_add_number(nvl, "debug", V_pf_status.debug); 5749 nvlist_add_number(nvl, "hostid", V_pf_status.hostid); 5750 nvlist_add_number(nvl, "states", V_pf_status.states); 5751 nvlist_add_number(nvl, "src_nodes", V_pf_status.src_nodes); 5752 5753 /* counters */ 5754 error = pf_add_status_counters(nvl, "counters", V_pf_status.counters, 5755 PFRES_MAX, pf_reasons); 5756 if (error != 0) 5757 ERROUT(error); 5758 5759 /* lcounters */ 5760 error = pf_add_status_counters(nvl, "lcounters", V_pf_status.lcounters, 5761 KLCNT_MAX, pf_lcounter); 5762 if (error != 0) 5763 ERROUT(error); 5764 5765 /* fcounters */ 5766 nvc = nvlist_create(0); 5767 if (nvc == NULL) 5768 ERROUT(ENOMEM); 5769 5770 for (int i = 0; i < FCNT_MAX; i++) { 5771 nvlist_append_number_array(nvc, "counters", 5772 pf_counter_u64_fetch(&V_pf_status.fcounters[i])); 5773 nvlist_append_string_array(nvc, "names", 5774 pf_fcounter[i]); 5775 nvlist_append_number_array(nvc, "ids", 5776 i); 5777 } 5778 nvlist_add_nvlist(nvl, "fcounters", nvc); 5779 nvlist_destroy(nvc); 5780 nvc = NULL; 5781 5782 /* scounters */ 5783 error = pf_add_status_counters(nvl, "scounters", V_pf_status.scounters, 5784 SCNT_MAX, pf_fcounter); 5785 if (error != 0) 5786 ERROUT(error); 5787 5788 nvlist_add_string(nvl, "ifname", V_pf_status.ifname); 5789 nvlist_add_binary(nvl, "chksum", V_pf_status.pf_chksum, 5790 PF_MD5_DIGEST_LENGTH); 5791 5792 pfi_update_status(V_pf_status.ifname, &s); 5793 5794 /* pcounters / bcounters */ 5795 for (int i = 0; i < 2; i++) { 5796 for (int j = 0; j < 2; j++) { 5797 for (int k = 0; k < 2; k++) { 5798 nvlist_append_number_array(nvl, "pcounters", 5799 s.pcounters[i][j][k]); 5800 } 5801 nvlist_append_number_array(nvl, "bcounters", 5802 s.bcounters[i][j]); 5803 } 5804 } 5805 5806 nvlpacked = nvlist_pack(nvl, &nv->len); 5807 if (nvlpacked == NULL) 5808 ERROUT(ENOMEM); 5809 5810 if (nv->size == 0) 5811 ERROUT(0); 5812 else if (nv->size < nv->len) 5813 ERROUT(ENOSPC); 5814 5815 PF_RULES_RUNLOCK(); 5816 error = copyout(nvlpacked, nv->data, nv->len); 5817 goto done; 5818 5819 #undef ERROUT 5820 errout: 5821 PF_RULES_RUNLOCK(); 5822 done: 5823 free(nvlpacked, M_NVLIST); 5824 nvlist_destroy(nvc); 5825 nvlist_destroy(nvl); 5826 5827 return (error); 5828 } 5829 5830 /* 5831 * XXX - Check for version missmatch!!! 5832 */ 5833 static void 5834 pf_clear_all_states(void) 5835 { 5836 struct pf_kstate *s; 5837 u_int i; 5838 5839 for (i = 0; i <= pf_hashmask; i++) { 5840 struct pf_idhash *ih = &V_pf_idhash[i]; 5841 relock: 5842 PF_HASHROW_LOCK(ih); 5843 LIST_FOREACH(s, &ih->states, entry) { 5844 s->timeout = PFTM_PURGE; 5845 /* Don't send out individual delete messages. */ 5846 s->state_flags |= PFSTATE_NOSYNC; 5847 pf_unlink_state(s); 5848 goto relock; 5849 } 5850 PF_HASHROW_UNLOCK(ih); 5851 } 5852 } 5853 5854 static int 5855 pf_clear_tables(void) 5856 { 5857 struct pfioc_table io; 5858 int error; 5859 5860 bzero(&io, sizeof(io)); 5861 5862 error = pfr_clr_tables(&io.pfrio_table, &io.pfrio_ndel, 5863 io.pfrio_flags); 5864 5865 return (error); 5866 } 5867 5868 static void 5869 pf_clear_srcnodes(struct pf_ksrc_node *n) 5870 { 5871 struct pf_kstate *s; 5872 int i; 5873 5874 for (i = 0; i <= pf_hashmask; i++) { 5875 struct pf_idhash *ih = &V_pf_idhash[i]; 5876 5877 PF_HASHROW_LOCK(ih); 5878 LIST_FOREACH(s, &ih->states, entry) { 5879 if (n == NULL || n == s->src_node) 5880 s->src_node = NULL; 5881 if (n == NULL || n == s->nat_src_node) 5882 s->nat_src_node = NULL; 5883 } 5884 PF_HASHROW_UNLOCK(ih); 5885 } 5886 5887 if (n == NULL) { 5888 struct pf_srchash *sh; 5889 5890 for (i = 0, sh = V_pf_srchash; i <= pf_srchashmask; 5891 i++, sh++) { 5892 PF_HASHROW_LOCK(sh); 5893 LIST_FOREACH(n, &sh->nodes, entry) { 5894 n->expire = 1; 5895 n->states = 0; 5896 } 5897 PF_HASHROW_UNLOCK(sh); 5898 } 5899 } else { 5900 /* XXX: hash slot should already be locked here. */ 5901 n->expire = 1; 5902 n->states = 0; 5903 } 5904 } 5905 5906 static void 5907 pf_kill_srcnodes(struct pfioc_src_node_kill *psnk) 5908 { 5909 struct pf_ksrc_node_list kill; 5910 5911 LIST_INIT(&kill); 5912 for (int i = 0; i <= pf_srchashmask; i++) { 5913 struct pf_srchash *sh = &V_pf_srchash[i]; 5914 struct pf_ksrc_node *sn, *tmp; 5915 5916 PF_HASHROW_LOCK(sh); 5917 LIST_FOREACH_SAFE(sn, &sh->nodes, entry, tmp) 5918 if (PF_MATCHA(psnk->psnk_src.neg, 5919 &psnk->psnk_src.addr.v.a.addr, 5920 &psnk->psnk_src.addr.v.a.mask, 5921 &sn->addr, sn->af) && 5922 PF_MATCHA(psnk->psnk_dst.neg, 5923 &psnk->psnk_dst.addr.v.a.addr, 5924 &psnk->psnk_dst.addr.v.a.mask, 5925 &sn->raddr, sn->af)) { 5926 pf_unlink_src_node(sn); 5927 LIST_INSERT_HEAD(&kill, sn, entry); 5928 sn->expire = 1; 5929 } 5930 PF_HASHROW_UNLOCK(sh); 5931 } 5932 5933 for (int i = 0; i <= pf_hashmask; i++) { 5934 struct pf_idhash *ih = &V_pf_idhash[i]; 5935 struct pf_kstate *s; 5936 5937 PF_HASHROW_LOCK(ih); 5938 LIST_FOREACH(s, &ih->states, entry) { 5939 if (s->src_node && s->src_node->expire == 1) 5940 s->src_node = NULL; 5941 if (s->nat_src_node && s->nat_src_node->expire == 1) 5942 s->nat_src_node = NULL; 5943 } 5944 PF_HASHROW_UNLOCK(ih); 5945 } 5946 5947 psnk->psnk_killed = pf_free_src_nodes(&kill); 5948 } 5949 5950 static int 5951 pf_keepcounters(struct pfioc_nv *nv) 5952 { 5953 nvlist_t *nvl = NULL; 5954 void *nvlpacked = NULL; 5955 int error = 0; 5956 5957 #define ERROUT(x) ERROUT_FUNCTION(on_error, x) 5958 5959 if (nv->len > pf_ioctl_maxcount) 5960 ERROUT(ENOMEM); 5961 5962 nvlpacked = malloc(nv->len, M_TEMP, M_WAITOK); 5963 if (nvlpacked == NULL) 5964 ERROUT(ENOMEM); 5965 5966 error = copyin(nv->data, nvlpacked, nv->len); 5967 if (error) 5968 ERROUT(error); 5969 5970 nvl = nvlist_unpack(nvlpacked, nv->len, 0); 5971 if (nvl == NULL) 5972 ERROUT(EBADMSG); 5973 5974 if (! nvlist_exists_bool(nvl, "keep_counters")) 5975 ERROUT(EBADMSG); 5976 5977 V_pf_status.keep_counters = nvlist_get_bool(nvl, "keep_counters"); 5978 5979 on_error: 5980 nvlist_destroy(nvl); 5981 free(nvlpacked, M_TEMP); 5982 return (error); 5983 } 5984 5985 static unsigned int 5986 pf_clear_states(const struct pf_kstate_kill *kill) 5987 { 5988 struct pf_state_key_cmp match_key; 5989 struct pf_kstate *s; 5990 struct pfi_kkif *kif; 5991 int idx; 5992 unsigned int killed = 0, dir; 5993 5994 for (unsigned int i = 0; i <= pf_hashmask; i++) { 5995 struct pf_idhash *ih = &V_pf_idhash[i]; 5996 5997 relock_DIOCCLRSTATES: 5998 PF_HASHROW_LOCK(ih); 5999 LIST_FOREACH(s, &ih->states, entry) { 6000 /* For floating states look at the original kif. */ 6001 kif = s->kif == V_pfi_all ? s->orig_kif : s->kif; 6002 6003 if (kill->psk_ifname[0] && 6004 strcmp(kill->psk_ifname, 6005 kif->pfik_name)) 6006 continue; 6007 6008 if (kill->psk_kill_match) { 6009 bzero(&match_key, sizeof(match_key)); 6010 6011 if (s->direction == PF_OUT) { 6012 dir = PF_IN; 6013 idx = PF_SK_STACK; 6014 } else { 6015 dir = PF_OUT; 6016 idx = PF_SK_WIRE; 6017 } 6018 6019 match_key.af = s->key[idx]->af; 6020 match_key.proto = s->key[idx]->proto; 6021 PF_ACPY(&match_key.addr[0], 6022 &s->key[idx]->addr[1], match_key.af); 6023 match_key.port[0] = s->key[idx]->port[1]; 6024 PF_ACPY(&match_key.addr[1], 6025 &s->key[idx]->addr[0], match_key.af); 6026 match_key.port[1] = s->key[idx]->port[0]; 6027 } 6028 6029 /* 6030 * Don't send out individual 6031 * delete messages. 6032 */ 6033 s->state_flags |= PFSTATE_NOSYNC; 6034 pf_unlink_state(s); 6035 killed++; 6036 6037 if (kill->psk_kill_match) 6038 killed += pf_kill_matching_state(&match_key, 6039 dir); 6040 6041 goto relock_DIOCCLRSTATES; 6042 } 6043 PF_HASHROW_UNLOCK(ih); 6044 } 6045 6046 if (V_pfsync_clear_states_ptr != NULL) 6047 V_pfsync_clear_states_ptr(V_pf_status.hostid, kill->psk_ifname); 6048 6049 return (killed); 6050 } 6051 6052 static void 6053 pf_killstates(struct pf_kstate_kill *kill, unsigned int *killed) 6054 { 6055 struct pf_kstate *s; 6056 6057 if (kill->psk_pfcmp.id) { 6058 if (kill->psk_pfcmp.creatorid == 0) 6059 kill->psk_pfcmp.creatorid = V_pf_status.hostid; 6060 if ((s = pf_find_state_byid(kill->psk_pfcmp.id, 6061 kill->psk_pfcmp.creatorid))) { 6062 pf_unlink_state(s); 6063 *killed = 1; 6064 } 6065 return; 6066 } 6067 6068 for (unsigned int i = 0; i <= pf_hashmask; i++) 6069 *killed += pf_killstates_row(kill, &V_pf_idhash[i]); 6070 6071 return; 6072 } 6073 6074 static int 6075 pf_killstates_nv(struct pfioc_nv *nv) 6076 { 6077 struct pf_kstate_kill kill; 6078 nvlist_t *nvl = NULL; 6079 void *nvlpacked = NULL; 6080 int error = 0; 6081 unsigned int killed = 0; 6082 6083 #define ERROUT(x) ERROUT_FUNCTION(on_error, x) 6084 6085 if (nv->len > pf_ioctl_maxcount) 6086 ERROUT(ENOMEM); 6087 6088 nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK); 6089 if (nvlpacked == NULL) 6090 ERROUT(ENOMEM); 6091 6092 error = copyin(nv->data, nvlpacked, nv->len); 6093 if (error) 6094 ERROUT(error); 6095 6096 nvl = nvlist_unpack(nvlpacked, nv->len, 0); 6097 if (nvl == NULL) 6098 ERROUT(EBADMSG); 6099 6100 error = pf_nvstate_kill_to_kstate_kill(nvl, &kill); 6101 if (error) 6102 ERROUT(error); 6103 6104 pf_killstates(&kill, &killed); 6105 6106 free(nvlpacked, M_NVLIST); 6107 nvlpacked = NULL; 6108 nvlist_destroy(nvl); 6109 nvl = nvlist_create(0); 6110 if (nvl == NULL) 6111 ERROUT(ENOMEM); 6112 6113 nvlist_add_number(nvl, "killed", killed); 6114 6115 nvlpacked = nvlist_pack(nvl, &nv->len); 6116 if (nvlpacked == NULL) 6117 ERROUT(ENOMEM); 6118 6119 if (nv->size == 0) 6120 ERROUT(0); 6121 else if (nv->size < nv->len) 6122 ERROUT(ENOSPC); 6123 6124 error = copyout(nvlpacked, nv->data, nv->len); 6125 6126 on_error: 6127 nvlist_destroy(nvl); 6128 free(nvlpacked, M_NVLIST); 6129 return (error); 6130 } 6131 6132 static int 6133 pf_clearstates_nv(struct pfioc_nv *nv) 6134 { 6135 struct pf_kstate_kill kill; 6136 nvlist_t *nvl = NULL; 6137 void *nvlpacked = NULL; 6138 int error = 0; 6139 unsigned int killed; 6140 6141 #define ERROUT(x) ERROUT_FUNCTION(on_error, x) 6142 6143 if (nv->len > pf_ioctl_maxcount) 6144 ERROUT(ENOMEM); 6145 6146 nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK); 6147 if (nvlpacked == NULL) 6148 ERROUT(ENOMEM); 6149 6150 error = copyin(nv->data, nvlpacked, nv->len); 6151 if (error) 6152 ERROUT(error); 6153 6154 nvl = nvlist_unpack(nvlpacked, nv->len, 0); 6155 if (nvl == NULL) 6156 ERROUT(EBADMSG); 6157 6158 error = pf_nvstate_kill_to_kstate_kill(nvl, &kill); 6159 if (error) 6160 ERROUT(error); 6161 6162 killed = pf_clear_states(&kill); 6163 6164 free(nvlpacked, M_NVLIST); 6165 nvlpacked = NULL; 6166 nvlist_destroy(nvl); 6167 nvl = nvlist_create(0); 6168 if (nvl == NULL) 6169 ERROUT(ENOMEM); 6170 6171 nvlist_add_number(nvl, "killed", killed); 6172 6173 nvlpacked = nvlist_pack(nvl, &nv->len); 6174 if (nvlpacked == NULL) 6175 ERROUT(ENOMEM); 6176 6177 if (nv->size == 0) 6178 ERROUT(0); 6179 else if (nv->size < nv->len) 6180 ERROUT(ENOSPC); 6181 6182 error = copyout(nvlpacked, nv->data, nv->len); 6183 6184 #undef ERROUT 6185 on_error: 6186 nvlist_destroy(nvl); 6187 free(nvlpacked, M_NVLIST); 6188 return (error); 6189 } 6190 6191 static int 6192 pf_getstate(struct pfioc_nv *nv) 6193 { 6194 nvlist_t *nvl = NULL, *nvls; 6195 void *nvlpacked = NULL; 6196 struct pf_kstate *s = NULL; 6197 int error = 0; 6198 uint64_t id, creatorid; 6199 6200 #define ERROUT(x) ERROUT_FUNCTION(errout, x) 6201 6202 if (nv->len > pf_ioctl_maxcount) 6203 ERROUT(ENOMEM); 6204 6205 nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK); 6206 if (nvlpacked == NULL) 6207 ERROUT(ENOMEM); 6208 6209 error = copyin(nv->data, nvlpacked, nv->len); 6210 if (error) 6211 ERROUT(error); 6212 6213 nvl = nvlist_unpack(nvlpacked, nv->len, 0); 6214 if (nvl == NULL) 6215 ERROUT(EBADMSG); 6216 6217 PFNV_CHK(pf_nvuint64(nvl, "id", &id)); 6218 PFNV_CHK(pf_nvuint64(nvl, "creatorid", &creatorid)); 6219 6220 s = pf_find_state_byid(id, creatorid); 6221 if (s == NULL) 6222 ERROUT(ENOENT); 6223 6224 free(nvlpacked, M_NVLIST); 6225 nvlpacked = NULL; 6226 nvlist_destroy(nvl); 6227 nvl = nvlist_create(0); 6228 if (nvl == NULL) 6229 ERROUT(ENOMEM); 6230 6231 nvls = pf_state_to_nvstate(s); 6232 if (nvls == NULL) 6233 ERROUT(ENOMEM); 6234 6235 nvlist_add_nvlist(nvl, "state", nvls); 6236 nvlist_destroy(nvls); 6237 6238 nvlpacked = nvlist_pack(nvl, &nv->len); 6239 if (nvlpacked == NULL) 6240 ERROUT(ENOMEM); 6241 6242 if (nv->size == 0) 6243 ERROUT(0); 6244 else if (nv->size < nv->len) 6245 ERROUT(ENOSPC); 6246 6247 error = copyout(nvlpacked, nv->data, nv->len); 6248 6249 #undef ERROUT 6250 errout: 6251 if (s != NULL) 6252 PF_STATE_UNLOCK(s); 6253 free(nvlpacked, M_NVLIST); 6254 nvlist_destroy(nvl); 6255 return (error); 6256 } 6257 6258 /* 6259 * XXX - Check for version missmatch!!! 6260 */ 6261 6262 /* 6263 * Duplicate pfctl -Fa operation to get rid of as much as we can. 6264 */ 6265 static int 6266 shutdown_pf(void) 6267 { 6268 int error = 0; 6269 u_int32_t t[5]; 6270 char nn = '\0'; 6271 6272 do { 6273 if ((error = pf_begin_rules(&t[0], PF_RULESET_SCRUB, &nn)) 6274 != 0) { 6275 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: SCRUB\n")); 6276 break; 6277 } 6278 if ((error = pf_begin_rules(&t[1], PF_RULESET_FILTER, &nn)) 6279 != 0) { 6280 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: FILTER\n")); 6281 break; /* XXX: rollback? */ 6282 } 6283 if ((error = pf_begin_rules(&t[2], PF_RULESET_NAT, &nn)) 6284 != 0) { 6285 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: NAT\n")); 6286 break; /* XXX: rollback? */ 6287 } 6288 if ((error = pf_begin_rules(&t[3], PF_RULESET_BINAT, &nn)) 6289 != 0) { 6290 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: BINAT\n")); 6291 break; /* XXX: rollback? */ 6292 } 6293 if ((error = pf_begin_rules(&t[4], PF_RULESET_RDR, &nn)) 6294 != 0) { 6295 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: RDR\n")); 6296 break; /* XXX: rollback? */ 6297 } 6298 6299 /* XXX: these should always succeed here */ 6300 pf_commit_rules(t[0], PF_RULESET_SCRUB, &nn); 6301 pf_commit_rules(t[1], PF_RULESET_FILTER, &nn); 6302 pf_commit_rules(t[2], PF_RULESET_NAT, &nn); 6303 pf_commit_rules(t[3], PF_RULESET_BINAT, &nn); 6304 pf_commit_rules(t[4], PF_RULESET_RDR, &nn); 6305 6306 if ((error = pf_clear_tables()) != 0) 6307 break; 6308 6309 if ((error = pf_begin_eth(&t[0], &nn)) != 0) { 6310 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: eth\n")); 6311 break; 6312 } 6313 pf_commit_eth(t[0], &nn); 6314 6315 #ifdef ALTQ 6316 if ((error = pf_begin_altq(&t[0])) != 0) { 6317 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: ALTQ\n")); 6318 break; 6319 } 6320 pf_commit_altq(t[0]); 6321 #endif 6322 6323 pf_clear_all_states(); 6324 6325 pf_clear_srcnodes(NULL); 6326 6327 /* status does not use malloced mem so no need to cleanup */ 6328 /* fingerprints and interfaces have their own cleanup code */ 6329 } while(0); 6330 6331 return (error); 6332 } 6333 6334 static pfil_return_t 6335 pf_check_return(int chk, struct mbuf **m) 6336 { 6337 6338 switch (chk) { 6339 case PF_PASS: 6340 if (*m == NULL) 6341 return (PFIL_CONSUMED); 6342 else 6343 return (PFIL_PASS); 6344 break; 6345 default: 6346 if (*m != NULL) { 6347 m_freem(*m); 6348 *m = NULL; 6349 } 6350 return (PFIL_DROPPED); 6351 } 6352 } 6353 6354 static pfil_return_t 6355 pf_eth_check_in(struct mbuf **m, struct ifnet *ifp, int flags, 6356 void *ruleset __unused, struct inpcb *inp) 6357 { 6358 int chk; 6359 6360 chk = pf_test_eth(PF_IN, flags, ifp, m, inp); 6361 6362 return (pf_check_return(chk, m)); 6363 } 6364 6365 static pfil_return_t 6366 pf_eth_check_out(struct mbuf **m, struct ifnet *ifp, int flags, 6367 void *ruleset __unused, struct inpcb *inp) 6368 { 6369 int chk; 6370 6371 chk = pf_test_eth(PF_OUT, flags, ifp, m, inp); 6372 6373 return (pf_check_return(chk, m)); 6374 } 6375 6376 #ifdef INET 6377 static pfil_return_t 6378 pf_check_in(struct mbuf **m, struct ifnet *ifp, int flags, 6379 void *ruleset __unused, struct inpcb *inp) 6380 { 6381 int chk; 6382 6383 chk = pf_test(PF_IN, flags, ifp, m, inp); 6384 6385 return (pf_check_return(chk, m)); 6386 } 6387 6388 static pfil_return_t 6389 pf_check_out(struct mbuf **m, struct ifnet *ifp, int flags, 6390 void *ruleset __unused, struct inpcb *inp) 6391 { 6392 int chk; 6393 6394 chk = pf_test(PF_OUT, flags, ifp, m, inp); 6395 6396 return (pf_check_return(chk, m)); 6397 } 6398 #endif 6399 6400 #ifdef INET6 6401 static pfil_return_t 6402 pf_check6_in(struct mbuf **m, struct ifnet *ifp, int flags, 6403 void *ruleset __unused, struct inpcb *inp) 6404 { 6405 int chk; 6406 6407 /* 6408 * In case of loopback traffic IPv6 uses the real interface in 6409 * order to support scoped addresses. In order to support stateful 6410 * filtering we have change this to lo0 as it is the case in IPv4. 6411 */ 6412 CURVNET_SET(ifp->if_vnet); 6413 chk = pf_test6(PF_IN, flags, (*m)->m_flags & M_LOOP ? V_loif : ifp, m, inp); 6414 CURVNET_RESTORE(); 6415 6416 return (pf_check_return(chk, m)); 6417 } 6418 6419 static pfil_return_t 6420 pf_check6_out(struct mbuf **m, struct ifnet *ifp, int flags, 6421 void *ruleset __unused, struct inpcb *inp) 6422 { 6423 int chk; 6424 6425 CURVNET_SET(ifp->if_vnet); 6426 chk = pf_test6(PF_OUT, flags, ifp, m, inp); 6427 CURVNET_RESTORE(); 6428 6429 return (pf_check_return(chk, m)); 6430 } 6431 #endif /* INET6 */ 6432 6433 VNET_DEFINE_STATIC(pfil_hook_t, pf_eth_in_hook); 6434 VNET_DEFINE_STATIC(pfil_hook_t, pf_eth_out_hook); 6435 #define V_pf_eth_in_hook VNET(pf_eth_in_hook) 6436 #define V_pf_eth_out_hook VNET(pf_eth_out_hook) 6437 6438 #ifdef INET 6439 VNET_DEFINE_STATIC(pfil_hook_t, pf_ip4_in_hook); 6440 VNET_DEFINE_STATIC(pfil_hook_t, pf_ip4_out_hook); 6441 #define V_pf_ip4_in_hook VNET(pf_ip4_in_hook) 6442 #define V_pf_ip4_out_hook VNET(pf_ip4_out_hook) 6443 #endif 6444 #ifdef INET6 6445 VNET_DEFINE_STATIC(pfil_hook_t, pf_ip6_in_hook); 6446 VNET_DEFINE_STATIC(pfil_hook_t, pf_ip6_out_hook); 6447 #define V_pf_ip6_in_hook VNET(pf_ip6_in_hook) 6448 #define V_pf_ip6_out_hook VNET(pf_ip6_out_hook) 6449 #endif 6450 6451 static void 6452 hook_pf_eth(void) 6453 { 6454 struct pfil_hook_args pha; 6455 struct pfil_link_args pla; 6456 int ret __diagused; 6457 6458 if (V_pf_pfil_eth_hooked) 6459 return; 6460 6461 pha.pa_version = PFIL_VERSION; 6462 pha.pa_modname = "pf"; 6463 pha.pa_ruleset = NULL; 6464 6465 pla.pa_version = PFIL_VERSION; 6466 6467 pha.pa_type = PFIL_TYPE_ETHERNET; 6468 pha.pa_func = pf_eth_check_in; 6469 pha.pa_flags = PFIL_IN; 6470 pha.pa_rulname = "eth-in"; 6471 V_pf_eth_in_hook = pfil_add_hook(&pha); 6472 pla.pa_flags = PFIL_IN | PFIL_HEADPTR | PFIL_HOOKPTR; 6473 pla.pa_head = V_link_pfil_head; 6474 pla.pa_hook = V_pf_eth_in_hook; 6475 ret = pfil_link(&pla); 6476 MPASS(ret == 0); 6477 pha.pa_func = pf_eth_check_out; 6478 pha.pa_flags = PFIL_OUT; 6479 pha.pa_rulname = "eth-out"; 6480 V_pf_eth_out_hook = pfil_add_hook(&pha); 6481 pla.pa_flags = PFIL_OUT | PFIL_HEADPTR | PFIL_HOOKPTR; 6482 pla.pa_head = V_link_pfil_head; 6483 pla.pa_hook = V_pf_eth_out_hook; 6484 ret = pfil_link(&pla); 6485 MPASS(ret == 0); 6486 6487 V_pf_pfil_eth_hooked = 1; 6488 } 6489 6490 static void 6491 hook_pf(void) 6492 { 6493 struct pfil_hook_args pha; 6494 struct pfil_link_args pla; 6495 int ret __diagused; 6496 6497 if (V_pf_pfil_hooked) 6498 return; 6499 6500 pha.pa_version = PFIL_VERSION; 6501 pha.pa_modname = "pf"; 6502 pha.pa_ruleset = NULL; 6503 6504 pla.pa_version = PFIL_VERSION; 6505 6506 #ifdef INET 6507 pha.pa_type = PFIL_TYPE_IP4; 6508 pha.pa_func = pf_check_in; 6509 pha.pa_flags = PFIL_IN; 6510 pha.pa_rulname = "default-in"; 6511 V_pf_ip4_in_hook = pfil_add_hook(&pha); 6512 pla.pa_flags = PFIL_IN | PFIL_HEADPTR | PFIL_HOOKPTR; 6513 pla.pa_head = V_inet_pfil_head; 6514 pla.pa_hook = V_pf_ip4_in_hook; 6515 ret = pfil_link(&pla); 6516 MPASS(ret == 0); 6517 pha.pa_func = pf_check_out; 6518 pha.pa_flags = PFIL_OUT; 6519 pha.pa_rulname = "default-out"; 6520 V_pf_ip4_out_hook = pfil_add_hook(&pha); 6521 pla.pa_flags = PFIL_OUT | PFIL_HEADPTR | PFIL_HOOKPTR; 6522 pla.pa_head = V_inet_pfil_head; 6523 pla.pa_hook = V_pf_ip4_out_hook; 6524 ret = pfil_link(&pla); 6525 MPASS(ret == 0); 6526 #endif 6527 #ifdef INET6 6528 pha.pa_type = PFIL_TYPE_IP6; 6529 pha.pa_func = pf_check6_in; 6530 pha.pa_flags = PFIL_IN; 6531 pha.pa_rulname = "default-in6"; 6532 V_pf_ip6_in_hook = pfil_add_hook(&pha); 6533 pla.pa_flags = PFIL_IN | PFIL_HEADPTR | PFIL_HOOKPTR; 6534 pla.pa_head = V_inet6_pfil_head; 6535 pla.pa_hook = V_pf_ip6_in_hook; 6536 ret = pfil_link(&pla); 6537 MPASS(ret == 0); 6538 pha.pa_func = pf_check6_out; 6539 pha.pa_rulname = "default-out6"; 6540 pha.pa_flags = PFIL_OUT; 6541 V_pf_ip6_out_hook = pfil_add_hook(&pha); 6542 pla.pa_flags = PFIL_OUT | PFIL_HEADPTR | PFIL_HOOKPTR; 6543 pla.pa_head = V_inet6_pfil_head; 6544 pla.pa_hook = V_pf_ip6_out_hook; 6545 ret = pfil_link(&pla); 6546 MPASS(ret == 0); 6547 #endif 6548 6549 V_pf_pfil_hooked = 1; 6550 } 6551 6552 static void 6553 dehook_pf_eth(void) 6554 { 6555 6556 if (V_pf_pfil_eth_hooked == 0) 6557 return; 6558 6559 pfil_remove_hook(V_pf_eth_in_hook); 6560 pfil_remove_hook(V_pf_eth_out_hook); 6561 6562 V_pf_pfil_eth_hooked = 0; 6563 } 6564 6565 static void 6566 dehook_pf(void) 6567 { 6568 6569 if (V_pf_pfil_hooked == 0) 6570 return; 6571 6572 #ifdef INET 6573 pfil_remove_hook(V_pf_ip4_in_hook); 6574 pfil_remove_hook(V_pf_ip4_out_hook); 6575 #endif 6576 #ifdef INET6 6577 pfil_remove_hook(V_pf_ip6_in_hook); 6578 pfil_remove_hook(V_pf_ip6_out_hook); 6579 #endif 6580 6581 V_pf_pfil_hooked = 0; 6582 } 6583 6584 static void 6585 pf_load_vnet(void) 6586 { 6587 V_pf_tag_z = uma_zcreate("pf tags", sizeof(struct pf_tagname), 6588 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); 6589 6590 pf_init_tagset(&V_pf_tags, &pf_rule_tag_hashsize, 6591 PF_RULE_TAG_HASH_SIZE_DEFAULT); 6592 #ifdef ALTQ 6593 pf_init_tagset(&V_pf_qids, &pf_queue_tag_hashsize, 6594 PF_QUEUE_TAG_HASH_SIZE_DEFAULT); 6595 #endif 6596 6597 V_pf_keth = &V_pf_main_keth_anchor.ruleset; 6598 6599 pfattach_vnet(); 6600 V_pf_vnet_active = 1; 6601 } 6602 6603 static int 6604 pf_load(void) 6605 { 6606 int error; 6607 6608 rm_init_flags(&pf_rules_lock, "pf rulesets", RM_RECURSE); 6609 sx_init(&pf_ioctl_lock, "pf ioctl"); 6610 sx_init(&pf_end_lock, "pf end thread"); 6611 6612 pf_mtag_initialize(); 6613 6614 pf_dev = make_dev(&pf_cdevsw, 0, UID_ROOT, GID_WHEEL, 0600, PF_NAME); 6615 if (pf_dev == NULL) 6616 return (ENOMEM); 6617 6618 pf_end_threads = 0; 6619 error = kproc_create(pf_purge_thread, NULL, &pf_purge_proc, 0, 0, "pf purge"); 6620 if (error != 0) 6621 return (error); 6622 6623 pfi_initialize(); 6624 6625 return (0); 6626 } 6627 6628 static void 6629 pf_unload_vnet(void) 6630 { 6631 int ret __diagused; 6632 6633 V_pf_vnet_active = 0; 6634 V_pf_status.running = 0; 6635 dehook_pf(); 6636 dehook_pf_eth(); 6637 6638 PF_RULES_WLOCK(); 6639 pf_syncookies_cleanup(); 6640 shutdown_pf(); 6641 PF_RULES_WUNLOCK(); 6642 6643 ret = swi_remove(V_pf_swi_cookie); 6644 MPASS(ret == 0); 6645 ret = intr_event_destroy(V_pf_swi_ie); 6646 MPASS(ret == 0); 6647 6648 pf_unload_vnet_purge(); 6649 6650 pf_normalize_cleanup(); 6651 PF_RULES_WLOCK(); 6652 pfi_cleanup_vnet(); 6653 PF_RULES_WUNLOCK(); 6654 pfr_cleanup(); 6655 pf_osfp_flush(); 6656 pf_cleanup(); 6657 if (IS_DEFAULT_VNET(curvnet)) 6658 pf_mtag_cleanup(); 6659 6660 pf_cleanup_tagset(&V_pf_tags); 6661 #ifdef ALTQ 6662 pf_cleanup_tagset(&V_pf_qids); 6663 #endif 6664 uma_zdestroy(V_pf_tag_z); 6665 6666 #ifdef PF_WANT_32_TO_64_COUNTER 6667 PF_RULES_WLOCK(); 6668 LIST_REMOVE(V_pf_kifmarker, pfik_allkiflist); 6669 6670 MPASS(LIST_EMPTY(&V_pf_allkiflist)); 6671 MPASS(V_pf_allkifcount == 0); 6672 6673 LIST_REMOVE(&V_pf_default_rule, allrulelist); 6674 V_pf_allrulecount--; 6675 LIST_REMOVE(V_pf_rulemarker, allrulelist); 6676 6677 /* 6678 * There are known pf rule leaks when running the test suite. 6679 */ 6680 #ifdef notyet 6681 MPASS(LIST_EMPTY(&V_pf_allrulelist)); 6682 MPASS(V_pf_allrulecount == 0); 6683 #endif 6684 6685 PF_RULES_WUNLOCK(); 6686 6687 free(V_pf_kifmarker, PFI_MTYPE); 6688 free(V_pf_rulemarker, M_PFRULE); 6689 #endif 6690 6691 /* Free counters last as we updated them during shutdown. */ 6692 pf_counter_u64_deinit(&V_pf_default_rule.evaluations); 6693 for (int i = 0; i < 2; i++) { 6694 pf_counter_u64_deinit(&V_pf_default_rule.packets[i]); 6695 pf_counter_u64_deinit(&V_pf_default_rule.bytes[i]); 6696 } 6697 counter_u64_free(V_pf_default_rule.states_cur); 6698 counter_u64_free(V_pf_default_rule.states_tot); 6699 counter_u64_free(V_pf_default_rule.src_nodes); 6700 6701 for (int i = 0; i < PFRES_MAX; i++) 6702 counter_u64_free(V_pf_status.counters[i]); 6703 for (int i = 0; i < KLCNT_MAX; i++) 6704 counter_u64_free(V_pf_status.lcounters[i]); 6705 for (int i = 0; i < FCNT_MAX; i++) 6706 pf_counter_u64_deinit(&V_pf_status.fcounters[i]); 6707 for (int i = 0; i < SCNT_MAX; i++) 6708 counter_u64_free(V_pf_status.scounters[i]); 6709 } 6710 6711 static void 6712 pf_unload(void) 6713 { 6714 6715 sx_xlock(&pf_end_lock); 6716 pf_end_threads = 1; 6717 while (pf_end_threads < 2) { 6718 wakeup_one(pf_purge_thread); 6719 sx_sleep(pf_purge_proc, &pf_end_lock, 0, "pftmo", 0); 6720 } 6721 sx_xunlock(&pf_end_lock); 6722 6723 if (pf_dev != NULL) 6724 destroy_dev(pf_dev); 6725 6726 pfi_cleanup(); 6727 6728 rm_destroy(&pf_rules_lock); 6729 sx_destroy(&pf_ioctl_lock); 6730 sx_destroy(&pf_end_lock); 6731 } 6732 6733 static void 6734 vnet_pf_init(void *unused __unused) 6735 { 6736 6737 pf_load_vnet(); 6738 } 6739 VNET_SYSINIT(vnet_pf_init, SI_SUB_PROTO_FIREWALL, SI_ORDER_THIRD, 6740 vnet_pf_init, NULL); 6741 6742 static void 6743 vnet_pf_uninit(const void *unused __unused) 6744 { 6745 6746 pf_unload_vnet(); 6747 } 6748 SYSUNINIT(pf_unload, SI_SUB_PROTO_FIREWALL, SI_ORDER_SECOND, pf_unload, NULL); 6749 VNET_SYSUNINIT(vnet_pf_uninit, SI_SUB_PROTO_FIREWALL, SI_ORDER_THIRD, 6750 vnet_pf_uninit, NULL); 6751 6752 static int 6753 pf_modevent(module_t mod, int type, void *data) 6754 { 6755 int error = 0; 6756 6757 switch(type) { 6758 case MOD_LOAD: 6759 error = pf_load(); 6760 break; 6761 case MOD_UNLOAD: 6762 /* Handled in SYSUNINIT(pf_unload) to ensure it's done after 6763 * the vnet_pf_uninit()s */ 6764 break; 6765 default: 6766 error = EINVAL; 6767 break; 6768 } 6769 6770 return (error); 6771 } 6772 6773 static moduledata_t pf_mod = { 6774 "pf", 6775 pf_modevent, 6776 0 6777 }; 6778 6779 DECLARE_MODULE(pf, pf_mod, SI_SUB_PROTO_FIREWALL, SI_ORDER_SECOND); 6780 MODULE_VERSION(pf, PF_MODVER); 6781