1 /*- 2 * Copyright (c) 2001 Daniel Hartmeier 3 * Copyright (c) 2002,2003 Henning Brauer 4 * Copyright (c) 2012 Gleb Smirnoff <glebius@FreeBSD.org> 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * - Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * - Redistributions in binary form must reproduce the above 14 * copyright notice, this list of conditions and the following 15 * disclaimer in the documentation and/or other materials provided 16 * with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 21 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 22 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 24 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 25 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 26 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN 28 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 * 31 * Effort sponsored in part by the Defense Advanced Research Projects 32 * Agency (DARPA) and Air Force Research Laboratory, Air Force 33 * Materiel Command, USAF, under agreement number F30602-01-2-0537. 34 * 35 * $OpenBSD: pf_ioctl.c,v 1.213 2009/02/15 21:46:12 mbalmer Exp $ 36 */ 37 38 #include <sys/cdefs.h> 39 __FBSDID("$FreeBSD$"); 40 41 #include "opt_inet.h" 42 #include "opt_inet6.h" 43 #include "opt_bpf.h" 44 #include "opt_pf.h" 45 46 #include <sys/param.h> 47 #include <sys/bus.h> 48 #include <sys/conf.h> 49 #include <sys/endian.h> 50 #include <sys/fcntl.h> 51 #include <sys/filio.h> 52 #include <sys/interrupt.h> 53 #include <sys/jail.h> 54 #include <sys/kernel.h> 55 #include <sys/kthread.h> 56 #include <sys/lock.h> 57 #include <sys/mbuf.h> 58 #include <sys/module.h> 59 #include <sys/proc.h> 60 #include <sys/rwlock.h> 61 #include <sys/smp.h> 62 #include <sys/socket.h> 63 #include <sys/sysctl.h> 64 #include <sys/md5.h> 65 #include <sys/ucred.h> 66 67 #include <net/if.h> 68 #include <net/if_var.h> 69 #include <net/vnet.h> 70 #include <net/route.h> 71 #include <net/pfil.h> 72 #include <net/pfvar.h> 73 #include <net/if_pfsync.h> 74 #include <net/if_pflog.h> 75 76 #include <netinet/in.h> 77 #include <netinet/ip.h> 78 #include <netinet/ip_var.h> 79 #include <netinet/ip_icmp.h> 80 81 #ifdef INET6 82 #include <netinet/ip6.h> 83 #endif /* INET6 */ 84 85 #ifdef ALTQ 86 #include <altq/altq.h> 87 #endif 88 89 static int pfattach(void); 90 static struct pf_pool *pf_get_pool(char *, u_int32_t, u_int8_t, u_int32_t, 91 u_int8_t, u_int8_t, u_int8_t); 92 93 static void pf_mv_pool(struct pf_palist *, struct pf_palist *); 94 static void pf_empty_pool(struct pf_palist *); 95 static int pfioctl(struct cdev *, u_long, caddr_t, int, 96 struct thread *); 97 #ifdef ALTQ 98 static int pf_begin_altq(u_int32_t *); 99 static int pf_rollback_altq(u_int32_t); 100 static int pf_commit_altq(u_int32_t); 101 static int pf_enable_altq(struct pf_altq *); 102 static int pf_disable_altq(struct pf_altq *); 103 static u_int32_t pf_qname2qid(char *); 104 static void pf_qid_unref(u_int32_t); 105 #endif /* ALTQ */ 106 static int pf_begin_rules(u_int32_t *, int, const char *); 107 static int pf_rollback_rules(u_int32_t, int, char *); 108 static int pf_setup_pfsync_matching(struct pf_ruleset *); 109 static void pf_hash_rule(MD5_CTX *, struct pf_rule *); 110 static void pf_hash_rule_addr(MD5_CTX *, struct pf_rule_addr *); 111 static int pf_commit_rules(u_int32_t, int, char *); 112 static int pf_addr_setup(struct pf_ruleset *, 113 struct pf_addr_wrap *, sa_family_t); 114 static void pf_addr_copyout(struct pf_addr_wrap *); 115 116 VNET_DEFINE(struct pf_rule, pf_default_rule); 117 118 #ifdef ALTQ 119 static VNET_DEFINE(int, pf_altq_running); 120 #define V_pf_altq_running VNET(pf_altq_running) 121 #endif 122 123 #define TAGID_MAX 50000 124 struct pf_tagname { 125 TAILQ_ENTRY(pf_tagname) entries; 126 char name[PF_TAG_NAME_SIZE]; 127 uint16_t tag; 128 int ref; 129 }; 130 131 TAILQ_HEAD(pf_tags, pf_tagname); 132 #define V_pf_tags VNET(pf_tags) 133 VNET_DEFINE(struct pf_tags, pf_tags); 134 #define V_pf_qids VNET(pf_qids) 135 VNET_DEFINE(struct pf_tags, pf_qids); 136 static MALLOC_DEFINE(M_PFTAG, "pf_tag", "pf(4) tag names"); 137 static MALLOC_DEFINE(M_PFALTQ, "pf_altq", "pf(4) altq configuration db"); 138 static MALLOC_DEFINE(M_PFRULE, "pf_rule", "pf(4) rules"); 139 140 #if (PF_QNAME_SIZE != PF_TAG_NAME_SIZE) 141 #error PF_QNAME_SIZE must be equal to PF_TAG_NAME_SIZE 142 #endif 143 144 static u_int16_t tagname2tag(struct pf_tags *, char *); 145 static u_int16_t pf_tagname2tag(char *); 146 static void tag_unref(struct pf_tags *, u_int16_t); 147 148 #define DPFPRINTF(n, x) if (V_pf_status.debug >= (n)) printf x 149 150 struct cdev *pf_dev; 151 152 /* 153 * XXX - These are new and need to be checked when moveing to a new version 154 */ 155 static void pf_clear_states(void); 156 static int pf_clear_tables(void); 157 static void pf_clear_srcnodes(struct pf_src_node *); 158 static void pf_kill_srcnodes(struct pfioc_src_node_kill *); 159 static void pf_tbladdr_copyout(struct pf_addr_wrap *); 160 161 /* 162 * Wrapper functions for pfil(9) hooks 163 */ 164 #ifdef INET 165 static int pf_check_in(void *arg, struct mbuf **m, struct ifnet *ifp, 166 int dir, struct inpcb *inp); 167 static int pf_check_out(void *arg, struct mbuf **m, struct ifnet *ifp, 168 int dir, struct inpcb *inp); 169 #endif 170 #ifdef INET6 171 static int pf_check6_in(void *arg, struct mbuf **m, struct ifnet *ifp, 172 int dir, struct inpcb *inp); 173 static int pf_check6_out(void *arg, struct mbuf **m, struct ifnet *ifp, 174 int dir, struct inpcb *inp); 175 #endif 176 177 static int hook_pf(void); 178 static int dehook_pf(void); 179 static int shutdown_pf(void); 180 static int pf_load(void); 181 static int pf_unload(void); 182 183 static struct cdevsw pf_cdevsw = { 184 .d_ioctl = pfioctl, 185 .d_name = PF_NAME, 186 .d_version = D_VERSION, 187 }; 188 189 static volatile VNET_DEFINE(int, pf_pfil_hooked); 190 #define V_pf_pfil_hooked VNET(pf_pfil_hooked) 191 VNET_DEFINE(int, pf_end_threads); 192 193 struct rwlock pf_rules_lock; 194 195 /* pfsync */ 196 pfsync_state_import_t *pfsync_state_import_ptr = NULL; 197 pfsync_insert_state_t *pfsync_insert_state_ptr = NULL; 198 pfsync_update_state_t *pfsync_update_state_ptr = NULL; 199 pfsync_delete_state_t *pfsync_delete_state_ptr = NULL; 200 pfsync_clear_states_t *pfsync_clear_states_ptr = NULL; 201 pfsync_defer_t *pfsync_defer_ptr = NULL; 202 /* pflog */ 203 pflog_packet_t *pflog_packet_ptr = NULL; 204 205 static int 206 pfattach(void) 207 { 208 u_int32_t *my_timeout = V_pf_default_rule.timeout; 209 int error; 210 211 pf_initialize(); 212 pfr_initialize(); 213 pfi_initialize(); 214 pf_normalize_init(); 215 216 V_pf_limits[PF_LIMIT_STATES].limit = PFSTATE_HIWAT; 217 V_pf_limits[PF_LIMIT_SRC_NODES].limit = PFSNODE_HIWAT; 218 219 RB_INIT(&V_pf_anchors); 220 pf_init_ruleset(&pf_main_ruleset); 221 222 /* default rule should never be garbage collected */ 223 V_pf_default_rule.entries.tqe_prev = &V_pf_default_rule.entries.tqe_next; 224 #ifdef PF_DEFAULT_TO_DROP 225 V_pf_default_rule.action = PF_DROP; 226 #else 227 V_pf_default_rule.action = PF_PASS; 228 #endif 229 V_pf_default_rule.nr = -1; 230 V_pf_default_rule.rtableid = -1; 231 232 V_pf_default_rule.states_cur = counter_u64_alloc(M_WAITOK); 233 V_pf_default_rule.states_tot = counter_u64_alloc(M_WAITOK); 234 V_pf_default_rule.src_nodes = counter_u64_alloc(M_WAITOK); 235 236 /* initialize default timeouts */ 237 my_timeout[PFTM_TCP_FIRST_PACKET] = PFTM_TCP_FIRST_PACKET_VAL; 238 my_timeout[PFTM_TCP_OPENING] = PFTM_TCP_OPENING_VAL; 239 my_timeout[PFTM_TCP_ESTABLISHED] = PFTM_TCP_ESTABLISHED_VAL; 240 my_timeout[PFTM_TCP_CLOSING] = PFTM_TCP_CLOSING_VAL; 241 my_timeout[PFTM_TCP_FIN_WAIT] = PFTM_TCP_FIN_WAIT_VAL; 242 my_timeout[PFTM_TCP_CLOSED] = PFTM_TCP_CLOSED_VAL; 243 my_timeout[PFTM_UDP_FIRST_PACKET] = PFTM_UDP_FIRST_PACKET_VAL; 244 my_timeout[PFTM_UDP_SINGLE] = PFTM_UDP_SINGLE_VAL; 245 my_timeout[PFTM_UDP_MULTIPLE] = PFTM_UDP_MULTIPLE_VAL; 246 my_timeout[PFTM_ICMP_FIRST_PACKET] = PFTM_ICMP_FIRST_PACKET_VAL; 247 my_timeout[PFTM_ICMP_ERROR_REPLY] = PFTM_ICMP_ERROR_REPLY_VAL; 248 my_timeout[PFTM_OTHER_FIRST_PACKET] = PFTM_OTHER_FIRST_PACKET_VAL; 249 my_timeout[PFTM_OTHER_SINGLE] = PFTM_OTHER_SINGLE_VAL; 250 my_timeout[PFTM_OTHER_MULTIPLE] = PFTM_OTHER_MULTIPLE_VAL; 251 my_timeout[PFTM_FRAG] = PFTM_FRAG_VAL; 252 my_timeout[PFTM_INTERVAL] = PFTM_INTERVAL_VAL; 253 my_timeout[PFTM_SRC_NODE] = PFTM_SRC_NODE_VAL; 254 my_timeout[PFTM_TS_DIFF] = PFTM_TS_DIFF_VAL; 255 my_timeout[PFTM_ADAPTIVE_START] = PFSTATE_ADAPT_START; 256 my_timeout[PFTM_ADAPTIVE_END] = PFSTATE_ADAPT_END; 257 258 bzero(&V_pf_status, sizeof(V_pf_status)); 259 V_pf_status.debug = PF_DEBUG_URGENT; 260 261 V_pf_pfil_hooked = 0; 262 263 /* XXX do our best to avoid a conflict */ 264 V_pf_status.hostid = arc4random(); 265 266 if ((error = kproc_create(pf_purge_thread, curvnet, NULL, 0, 0, 267 "pf purge")) != 0) 268 /* XXXGL: leaked all above. */ 269 return (error); 270 if ((error = swi_add(NULL, "pf send", pf_intr, curvnet, SWI_NET, 271 INTR_MPSAFE, &V_pf_swi_cookie)) != 0) 272 /* XXXGL: leaked all above. */ 273 return (error); 274 275 return (0); 276 } 277 278 static struct pf_pool * 279 pf_get_pool(char *anchor, u_int32_t ticket, u_int8_t rule_action, 280 u_int32_t rule_number, u_int8_t r_last, u_int8_t active, 281 u_int8_t check_ticket) 282 { 283 struct pf_ruleset *ruleset; 284 struct pf_rule *rule; 285 int rs_num; 286 287 ruleset = pf_find_ruleset(anchor); 288 if (ruleset == NULL) 289 return (NULL); 290 rs_num = pf_get_ruleset_number(rule_action); 291 if (rs_num >= PF_RULESET_MAX) 292 return (NULL); 293 if (active) { 294 if (check_ticket && ticket != 295 ruleset->rules[rs_num].active.ticket) 296 return (NULL); 297 if (r_last) 298 rule = TAILQ_LAST(ruleset->rules[rs_num].active.ptr, 299 pf_rulequeue); 300 else 301 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr); 302 } else { 303 if (check_ticket && ticket != 304 ruleset->rules[rs_num].inactive.ticket) 305 return (NULL); 306 if (r_last) 307 rule = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr, 308 pf_rulequeue); 309 else 310 rule = TAILQ_FIRST(ruleset->rules[rs_num].inactive.ptr); 311 } 312 if (!r_last) { 313 while ((rule != NULL) && (rule->nr != rule_number)) 314 rule = TAILQ_NEXT(rule, entries); 315 } 316 if (rule == NULL) 317 return (NULL); 318 319 return (&rule->rpool); 320 } 321 322 static void 323 pf_mv_pool(struct pf_palist *poola, struct pf_palist *poolb) 324 { 325 struct pf_pooladdr *mv_pool_pa; 326 327 while ((mv_pool_pa = TAILQ_FIRST(poola)) != NULL) { 328 TAILQ_REMOVE(poola, mv_pool_pa, entries); 329 TAILQ_INSERT_TAIL(poolb, mv_pool_pa, entries); 330 } 331 } 332 333 static void 334 pf_empty_pool(struct pf_palist *poola) 335 { 336 struct pf_pooladdr *pa; 337 338 while ((pa = TAILQ_FIRST(poola)) != NULL) { 339 switch (pa->addr.type) { 340 case PF_ADDR_DYNIFTL: 341 pfi_dynaddr_remove(pa->addr.p.dyn); 342 break; 343 case PF_ADDR_TABLE: 344 pfr_detach_table(pa->addr.p.tbl); 345 break; 346 } 347 if (pa->kif) 348 pfi_kif_unref(pa->kif); 349 TAILQ_REMOVE(poola, pa, entries); 350 free(pa, M_PFRULE); 351 } 352 } 353 354 static void 355 pf_unlink_rule(struct pf_rulequeue *rulequeue, struct pf_rule *rule) 356 { 357 358 PF_RULES_WASSERT(); 359 360 TAILQ_REMOVE(rulequeue, rule, entries); 361 362 PF_UNLNKDRULES_LOCK(); 363 rule->rule_flag |= PFRULE_REFS; 364 TAILQ_INSERT_TAIL(&V_pf_unlinked_rules, rule, entries); 365 PF_UNLNKDRULES_UNLOCK(); 366 } 367 368 void 369 pf_free_rule(struct pf_rule *rule) 370 { 371 372 PF_RULES_WASSERT(); 373 374 if (rule->tag) 375 tag_unref(&V_pf_tags, rule->tag); 376 if (rule->match_tag) 377 tag_unref(&V_pf_tags, rule->match_tag); 378 #ifdef ALTQ 379 if (rule->pqid != rule->qid) 380 pf_qid_unref(rule->pqid); 381 pf_qid_unref(rule->qid); 382 #endif 383 switch (rule->src.addr.type) { 384 case PF_ADDR_DYNIFTL: 385 pfi_dynaddr_remove(rule->src.addr.p.dyn); 386 break; 387 case PF_ADDR_TABLE: 388 pfr_detach_table(rule->src.addr.p.tbl); 389 break; 390 } 391 switch (rule->dst.addr.type) { 392 case PF_ADDR_DYNIFTL: 393 pfi_dynaddr_remove(rule->dst.addr.p.dyn); 394 break; 395 case PF_ADDR_TABLE: 396 pfr_detach_table(rule->dst.addr.p.tbl); 397 break; 398 } 399 if (rule->overload_tbl) 400 pfr_detach_table(rule->overload_tbl); 401 if (rule->kif) 402 pfi_kif_unref(rule->kif); 403 pf_anchor_remove(rule); 404 pf_empty_pool(&rule->rpool.list); 405 counter_u64_free(rule->states_cur); 406 counter_u64_free(rule->states_tot); 407 counter_u64_free(rule->src_nodes); 408 free(rule, M_PFRULE); 409 } 410 411 static u_int16_t 412 tagname2tag(struct pf_tags *head, char *tagname) 413 { 414 struct pf_tagname *tag, *p = NULL; 415 u_int16_t new_tagid = 1; 416 417 PF_RULES_WASSERT(); 418 419 TAILQ_FOREACH(tag, head, entries) 420 if (strcmp(tagname, tag->name) == 0) { 421 tag->ref++; 422 return (tag->tag); 423 } 424 425 /* 426 * to avoid fragmentation, we do a linear search from the beginning 427 * and take the first free slot we find. if there is none or the list 428 * is empty, append a new entry at the end. 429 */ 430 431 /* new entry */ 432 if (!TAILQ_EMPTY(head)) 433 for (p = TAILQ_FIRST(head); p != NULL && 434 p->tag == new_tagid; p = TAILQ_NEXT(p, entries)) 435 new_tagid = p->tag + 1; 436 437 if (new_tagid > TAGID_MAX) 438 return (0); 439 440 /* allocate and fill new struct pf_tagname */ 441 tag = malloc(sizeof(*tag), M_PFTAG, M_NOWAIT|M_ZERO); 442 if (tag == NULL) 443 return (0); 444 strlcpy(tag->name, tagname, sizeof(tag->name)); 445 tag->tag = new_tagid; 446 tag->ref++; 447 448 if (p != NULL) /* insert new entry before p */ 449 TAILQ_INSERT_BEFORE(p, tag, entries); 450 else /* either list empty or no free slot in between */ 451 TAILQ_INSERT_TAIL(head, tag, entries); 452 453 return (tag->tag); 454 } 455 456 static void 457 tag_unref(struct pf_tags *head, u_int16_t tag) 458 { 459 struct pf_tagname *p, *next; 460 461 PF_RULES_WASSERT(); 462 463 for (p = TAILQ_FIRST(head); p != NULL; p = next) { 464 next = TAILQ_NEXT(p, entries); 465 if (tag == p->tag) { 466 if (--p->ref == 0) { 467 TAILQ_REMOVE(head, p, entries); 468 free(p, M_PFTAG); 469 } 470 break; 471 } 472 } 473 } 474 475 static u_int16_t 476 pf_tagname2tag(char *tagname) 477 { 478 return (tagname2tag(&V_pf_tags, tagname)); 479 } 480 481 #ifdef ALTQ 482 static u_int32_t 483 pf_qname2qid(char *qname) 484 { 485 return ((u_int32_t)tagname2tag(&V_pf_qids, qname)); 486 } 487 488 static void 489 pf_qid_unref(u_int32_t qid) 490 { 491 tag_unref(&V_pf_qids, (u_int16_t)qid); 492 } 493 494 static int 495 pf_begin_altq(u_int32_t *ticket) 496 { 497 struct pf_altq *altq; 498 int error = 0; 499 500 PF_RULES_WASSERT(); 501 502 /* Purge the old altq list */ 503 while ((altq = TAILQ_FIRST(V_pf_altqs_inactive)) != NULL) { 504 TAILQ_REMOVE(V_pf_altqs_inactive, altq, entries); 505 if (altq->qname[0] == 0 && 506 (altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) { 507 /* detach and destroy the discipline */ 508 error = altq_remove(altq); 509 } else 510 pf_qid_unref(altq->qid); 511 free(altq, M_PFALTQ); 512 } 513 if (error) 514 return (error); 515 *ticket = ++V_ticket_altqs_inactive; 516 V_altqs_inactive_open = 1; 517 return (0); 518 } 519 520 static int 521 pf_rollback_altq(u_int32_t ticket) 522 { 523 struct pf_altq *altq; 524 int error = 0; 525 526 PF_RULES_WASSERT(); 527 528 if (!V_altqs_inactive_open || ticket != V_ticket_altqs_inactive) 529 return (0); 530 /* Purge the old altq list */ 531 while ((altq = TAILQ_FIRST(V_pf_altqs_inactive)) != NULL) { 532 TAILQ_REMOVE(V_pf_altqs_inactive, altq, entries); 533 if (altq->qname[0] == 0 && 534 (altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) { 535 /* detach and destroy the discipline */ 536 error = altq_remove(altq); 537 } else 538 pf_qid_unref(altq->qid); 539 free(altq, M_PFALTQ); 540 } 541 V_altqs_inactive_open = 0; 542 return (error); 543 } 544 545 static int 546 pf_commit_altq(u_int32_t ticket) 547 { 548 struct pf_altqqueue *old_altqs; 549 struct pf_altq *altq; 550 int err, error = 0; 551 552 PF_RULES_WASSERT(); 553 554 if (!V_altqs_inactive_open || ticket != V_ticket_altqs_inactive) 555 return (EBUSY); 556 557 /* swap altqs, keep the old. */ 558 old_altqs = V_pf_altqs_active; 559 V_pf_altqs_active = V_pf_altqs_inactive; 560 V_pf_altqs_inactive = old_altqs; 561 V_ticket_altqs_active = V_ticket_altqs_inactive; 562 563 /* Attach new disciplines */ 564 TAILQ_FOREACH(altq, V_pf_altqs_active, entries) { 565 if (altq->qname[0] == 0 && 566 (altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) { 567 /* attach the discipline */ 568 error = altq_pfattach(altq); 569 if (error == 0 && V_pf_altq_running) 570 error = pf_enable_altq(altq); 571 if (error != 0) 572 return (error); 573 } 574 } 575 576 /* Purge the old altq list */ 577 while ((altq = TAILQ_FIRST(V_pf_altqs_inactive)) != NULL) { 578 TAILQ_REMOVE(V_pf_altqs_inactive, altq, entries); 579 if (altq->qname[0] == 0 && 580 (altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) { 581 /* detach and destroy the discipline */ 582 if (V_pf_altq_running) 583 error = pf_disable_altq(altq); 584 err = altq_pfdetach(altq); 585 if (err != 0 && error == 0) 586 error = err; 587 err = altq_remove(altq); 588 if (err != 0 && error == 0) 589 error = err; 590 } else 591 pf_qid_unref(altq->qid); 592 free(altq, M_PFALTQ); 593 } 594 595 V_altqs_inactive_open = 0; 596 return (error); 597 } 598 599 static int 600 pf_enable_altq(struct pf_altq *altq) 601 { 602 struct ifnet *ifp; 603 struct tb_profile tb; 604 int error = 0; 605 606 if ((ifp = ifunit(altq->ifname)) == NULL) 607 return (EINVAL); 608 609 if (ifp->if_snd.altq_type != ALTQT_NONE) 610 error = altq_enable(&ifp->if_snd); 611 612 /* set tokenbucket regulator */ 613 if (error == 0 && ifp != NULL && ALTQ_IS_ENABLED(&ifp->if_snd)) { 614 tb.rate = altq->ifbandwidth; 615 tb.depth = altq->tbrsize; 616 error = tbr_set(&ifp->if_snd, &tb); 617 } 618 619 return (error); 620 } 621 622 static int 623 pf_disable_altq(struct pf_altq *altq) 624 { 625 struct ifnet *ifp; 626 struct tb_profile tb; 627 int error; 628 629 if ((ifp = ifunit(altq->ifname)) == NULL) 630 return (EINVAL); 631 632 /* 633 * when the discipline is no longer referenced, it was overridden 634 * by a new one. if so, just return. 635 */ 636 if (altq->altq_disc != ifp->if_snd.altq_disc) 637 return (0); 638 639 error = altq_disable(&ifp->if_snd); 640 641 if (error == 0) { 642 /* clear tokenbucket regulator */ 643 tb.rate = 0; 644 error = tbr_set(&ifp->if_snd, &tb); 645 } 646 647 return (error); 648 } 649 650 void 651 pf_altq_ifnet_event(struct ifnet *ifp, int remove) 652 { 653 struct ifnet *ifp1; 654 struct pf_altq *a1, *a2, *a3; 655 u_int32_t ticket; 656 int error = 0; 657 658 /* Interrupt userland queue modifications */ 659 if (V_altqs_inactive_open) 660 pf_rollback_altq(V_ticket_altqs_inactive); 661 662 /* Start new altq ruleset */ 663 if (pf_begin_altq(&ticket)) 664 return; 665 666 /* Copy the current active set */ 667 TAILQ_FOREACH(a1, V_pf_altqs_active, entries) { 668 a2 = malloc(sizeof(*a2), M_PFALTQ, M_NOWAIT); 669 if (a2 == NULL) { 670 error = ENOMEM; 671 break; 672 } 673 bcopy(a1, a2, sizeof(struct pf_altq)); 674 675 if (a2->qname[0] != 0) { 676 if ((a2->qid = pf_qname2qid(a2->qname)) == 0) { 677 error = EBUSY; 678 free(a2, M_PFALTQ); 679 break; 680 } 681 a2->altq_disc = NULL; 682 TAILQ_FOREACH(a3, V_pf_altqs_inactive, entries) { 683 if (strncmp(a3->ifname, a2->ifname, 684 IFNAMSIZ) == 0 && a3->qname[0] == 0) { 685 a2->altq_disc = a3->altq_disc; 686 break; 687 } 688 } 689 } 690 /* Deactivate the interface in question */ 691 a2->local_flags &= ~PFALTQ_FLAG_IF_REMOVED; 692 if ((ifp1 = ifunit(a2->ifname)) == NULL || 693 (remove && ifp1 == ifp)) { 694 a2->local_flags |= PFALTQ_FLAG_IF_REMOVED; 695 } else { 696 error = altq_add(a2); 697 698 if (ticket != V_ticket_altqs_inactive) 699 error = EBUSY; 700 701 if (error) { 702 free(a2, M_PFALTQ); 703 break; 704 } 705 } 706 707 TAILQ_INSERT_TAIL(V_pf_altqs_inactive, a2, entries); 708 } 709 710 if (error != 0) 711 pf_rollback_altq(ticket); 712 else 713 pf_commit_altq(ticket); 714 } 715 #endif /* ALTQ */ 716 717 static int 718 pf_begin_rules(u_int32_t *ticket, int rs_num, const char *anchor) 719 { 720 struct pf_ruleset *rs; 721 struct pf_rule *rule; 722 723 PF_RULES_WASSERT(); 724 725 if (rs_num < 0 || rs_num >= PF_RULESET_MAX) 726 return (EINVAL); 727 rs = pf_find_or_create_ruleset(anchor); 728 if (rs == NULL) 729 return (EINVAL); 730 while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) { 731 pf_unlink_rule(rs->rules[rs_num].inactive.ptr, rule); 732 rs->rules[rs_num].inactive.rcount--; 733 } 734 *ticket = ++rs->rules[rs_num].inactive.ticket; 735 rs->rules[rs_num].inactive.open = 1; 736 return (0); 737 } 738 739 static int 740 pf_rollback_rules(u_int32_t ticket, int rs_num, char *anchor) 741 { 742 struct pf_ruleset *rs; 743 struct pf_rule *rule; 744 745 PF_RULES_WASSERT(); 746 747 if (rs_num < 0 || rs_num >= PF_RULESET_MAX) 748 return (EINVAL); 749 rs = pf_find_ruleset(anchor); 750 if (rs == NULL || !rs->rules[rs_num].inactive.open || 751 rs->rules[rs_num].inactive.ticket != ticket) 752 return (0); 753 while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) { 754 pf_unlink_rule(rs->rules[rs_num].inactive.ptr, rule); 755 rs->rules[rs_num].inactive.rcount--; 756 } 757 rs->rules[rs_num].inactive.open = 0; 758 return (0); 759 } 760 761 #define PF_MD5_UPD(st, elm) \ 762 MD5Update(ctx, (u_int8_t *) &(st)->elm, sizeof((st)->elm)) 763 764 #define PF_MD5_UPD_STR(st, elm) \ 765 MD5Update(ctx, (u_int8_t *) (st)->elm, strlen((st)->elm)) 766 767 #define PF_MD5_UPD_HTONL(st, elm, stor) do { \ 768 (stor) = htonl((st)->elm); \ 769 MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int32_t));\ 770 } while (0) 771 772 #define PF_MD5_UPD_HTONS(st, elm, stor) do { \ 773 (stor) = htons((st)->elm); \ 774 MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int16_t));\ 775 } while (0) 776 777 static void 778 pf_hash_rule_addr(MD5_CTX *ctx, struct pf_rule_addr *pfr) 779 { 780 PF_MD5_UPD(pfr, addr.type); 781 switch (pfr->addr.type) { 782 case PF_ADDR_DYNIFTL: 783 PF_MD5_UPD(pfr, addr.v.ifname); 784 PF_MD5_UPD(pfr, addr.iflags); 785 break; 786 case PF_ADDR_TABLE: 787 PF_MD5_UPD(pfr, addr.v.tblname); 788 break; 789 case PF_ADDR_ADDRMASK: 790 /* XXX ignore af? */ 791 PF_MD5_UPD(pfr, addr.v.a.addr.addr32); 792 PF_MD5_UPD(pfr, addr.v.a.mask.addr32); 793 break; 794 } 795 796 PF_MD5_UPD(pfr, port[0]); 797 PF_MD5_UPD(pfr, port[1]); 798 PF_MD5_UPD(pfr, neg); 799 PF_MD5_UPD(pfr, port_op); 800 } 801 802 static void 803 pf_hash_rule(MD5_CTX *ctx, struct pf_rule *rule) 804 { 805 u_int16_t x; 806 u_int32_t y; 807 808 pf_hash_rule_addr(ctx, &rule->src); 809 pf_hash_rule_addr(ctx, &rule->dst); 810 PF_MD5_UPD_STR(rule, label); 811 PF_MD5_UPD_STR(rule, ifname); 812 PF_MD5_UPD_STR(rule, match_tagname); 813 PF_MD5_UPD_HTONS(rule, match_tag, x); /* dup? */ 814 PF_MD5_UPD_HTONL(rule, os_fingerprint, y); 815 PF_MD5_UPD_HTONL(rule, prob, y); 816 PF_MD5_UPD_HTONL(rule, uid.uid[0], y); 817 PF_MD5_UPD_HTONL(rule, uid.uid[1], y); 818 PF_MD5_UPD(rule, uid.op); 819 PF_MD5_UPD_HTONL(rule, gid.gid[0], y); 820 PF_MD5_UPD_HTONL(rule, gid.gid[1], y); 821 PF_MD5_UPD(rule, gid.op); 822 PF_MD5_UPD_HTONL(rule, rule_flag, y); 823 PF_MD5_UPD(rule, action); 824 PF_MD5_UPD(rule, direction); 825 PF_MD5_UPD(rule, af); 826 PF_MD5_UPD(rule, quick); 827 PF_MD5_UPD(rule, ifnot); 828 PF_MD5_UPD(rule, match_tag_not); 829 PF_MD5_UPD(rule, natpass); 830 PF_MD5_UPD(rule, keep_state); 831 PF_MD5_UPD(rule, proto); 832 PF_MD5_UPD(rule, type); 833 PF_MD5_UPD(rule, code); 834 PF_MD5_UPD(rule, flags); 835 PF_MD5_UPD(rule, flagset); 836 PF_MD5_UPD(rule, allow_opts); 837 PF_MD5_UPD(rule, rt); 838 PF_MD5_UPD(rule, tos); 839 } 840 841 static int 842 pf_commit_rules(u_int32_t ticket, int rs_num, char *anchor) 843 { 844 struct pf_ruleset *rs; 845 struct pf_rule *rule, **old_array; 846 struct pf_rulequeue *old_rules; 847 int error; 848 u_int32_t old_rcount; 849 850 PF_RULES_WASSERT(); 851 852 if (rs_num < 0 || rs_num >= PF_RULESET_MAX) 853 return (EINVAL); 854 rs = pf_find_ruleset(anchor); 855 if (rs == NULL || !rs->rules[rs_num].inactive.open || 856 ticket != rs->rules[rs_num].inactive.ticket) 857 return (EBUSY); 858 859 /* Calculate checksum for the main ruleset */ 860 if (rs == &pf_main_ruleset) { 861 error = pf_setup_pfsync_matching(rs); 862 if (error != 0) 863 return (error); 864 } 865 866 /* Swap rules, keep the old. */ 867 old_rules = rs->rules[rs_num].active.ptr; 868 old_rcount = rs->rules[rs_num].active.rcount; 869 old_array = rs->rules[rs_num].active.ptr_array; 870 871 rs->rules[rs_num].active.ptr = 872 rs->rules[rs_num].inactive.ptr; 873 rs->rules[rs_num].active.ptr_array = 874 rs->rules[rs_num].inactive.ptr_array; 875 rs->rules[rs_num].active.rcount = 876 rs->rules[rs_num].inactive.rcount; 877 rs->rules[rs_num].inactive.ptr = old_rules; 878 rs->rules[rs_num].inactive.ptr_array = old_array; 879 rs->rules[rs_num].inactive.rcount = old_rcount; 880 881 rs->rules[rs_num].active.ticket = 882 rs->rules[rs_num].inactive.ticket; 883 pf_calc_skip_steps(rs->rules[rs_num].active.ptr); 884 885 886 /* Purge the old rule list. */ 887 while ((rule = TAILQ_FIRST(old_rules)) != NULL) 888 pf_unlink_rule(old_rules, rule); 889 if (rs->rules[rs_num].inactive.ptr_array) 890 free(rs->rules[rs_num].inactive.ptr_array, M_TEMP); 891 rs->rules[rs_num].inactive.ptr_array = NULL; 892 rs->rules[rs_num].inactive.rcount = 0; 893 rs->rules[rs_num].inactive.open = 0; 894 pf_remove_if_empty_ruleset(rs); 895 896 return (0); 897 } 898 899 static int 900 pf_setup_pfsync_matching(struct pf_ruleset *rs) 901 { 902 MD5_CTX ctx; 903 struct pf_rule *rule; 904 int rs_cnt; 905 u_int8_t digest[PF_MD5_DIGEST_LENGTH]; 906 907 MD5Init(&ctx); 908 for (rs_cnt = 0; rs_cnt < PF_RULESET_MAX; rs_cnt++) { 909 /* XXX PF_RULESET_SCRUB as well? */ 910 if (rs_cnt == PF_RULESET_SCRUB) 911 continue; 912 913 if (rs->rules[rs_cnt].inactive.ptr_array) 914 free(rs->rules[rs_cnt].inactive.ptr_array, M_TEMP); 915 rs->rules[rs_cnt].inactive.ptr_array = NULL; 916 917 if (rs->rules[rs_cnt].inactive.rcount) { 918 rs->rules[rs_cnt].inactive.ptr_array = 919 malloc(sizeof(caddr_t) * 920 rs->rules[rs_cnt].inactive.rcount, 921 M_TEMP, M_NOWAIT); 922 923 if (!rs->rules[rs_cnt].inactive.ptr_array) 924 return (ENOMEM); 925 } 926 927 TAILQ_FOREACH(rule, rs->rules[rs_cnt].inactive.ptr, 928 entries) { 929 pf_hash_rule(&ctx, rule); 930 (rs->rules[rs_cnt].inactive.ptr_array)[rule->nr] = rule; 931 } 932 } 933 934 MD5Final(digest, &ctx); 935 memcpy(V_pf_status.pf_chksum, digest, sizeof(V_pf_status.pf_chksum)); 936 return (0); 937 } 938 939 static int 940 pf_addr_setup(struct pf_ruleset *ruleset, struct pf_addr_wrap *addr, 941 sa_family_t af) 942 { 943 int error = 0; 944 945 switch (addr->type) { 946 case PF_ADDR_TABLE: 947 addr->p.tbl = pfr_attach_table(ruleset, addr->v.tblname); 948 if (addr->p.tbl == NULL) 949 error = ENOMEM; 950 break; 951 case PF_ADDR_DYNIFTL: 952 error = pfi_dynaddr_setup(addr, af); 953 break; 954 } 955 956 return (error); 957 } 958 959 static void 960 pf_addr_copyout(struct pf_addr_wrap *addr) 961 { 962 963 switch (addr->type) { 964 case PF_ADDR_DYNIFTL: 965 pfi_dynaddr_copyout(addr); 966 break; 967 case PF_ADDR_TABLE: 968 pf_tbladdr_copyout(addr); 969 break; 970 } 971 } 972 973 static int 974 pfioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td) 975 { 976 int error = 0; 977 978 /* XXX keep in sync with switch() below */ 979 if (securelevel_gt(td->td_ucred, 2)) 980 switch (cmd) { 981 case DIOCGETRULES: 982 case DIOCGETRULE: 983 case DIOCGETADDRS: 984 case DIOCGETADDR: 985 case DIOCGETSTATE: 986 case DIOCSETSTATUSIF: 987 case DIOCGETSTATUS: 988 case DIOCCLRSTATUS: 989 case DIOCNATLOOK: 990 case DIOCSETDEBUG: 991 case DIOCGETSTATES: 992 case DIOCGETTIMEOUT: 993 case DIOCCLRRULECTRS: 994 case DIOCGETLIMIT: 995 case DIOCGETALTQS: 996 case DIOCGETALTQ: 997 case DIOCGETQSTATS: 998 case DIOCGETRULESETS: 999 case DIOCGETRULESET: 1000 case DIOCRGETTABLES: 1001 case DIOCRGETTSTATS: 1002 case DIOCRCLRTSTATS: 1003 case DIOCRCLRADDRS: 1004 case DIOCRADDADDRS: 1005 case DIOCRDELADDRS: 1006 case DIOCRSETADDRS: 1007 case DIOCRGETADDRS: 1008 case DIOCRGETASTATS: 1009 case DIOCRCLRASTATS: 1010 case DIOCRTSTADDRS: 1011 case DIOCOSFPGET: 1012 case DIOCGETSRCNODES: 1013 case DIOCCLRSRCNODES: 1014 case DIOCIGETIFACES: 1015 case DIOCGIFSPEED: 1016 case DIOCSETIFFLAG: 1017 case DIOCCLRIFFLAG: 1018 break; 1019 case DIOCRCLRTABLES: 1020 case DIOCRADDTABLES: 1021 case DIOCRDELTABLES: 1022 case DIOCRSETTFLAGS: 1023 if (((struct pfioc_table *)addr)->pfrio_flags & 1024 PFR_FLAG_DUMMY) 1025 break; /* dummy operation ok */ 1026 return (EPERM); 1027 default: 1028 return (EPERM); 1029 } 1030 1031 if (!(flags & FWRITE)) 1032 switch (cmd) { 1033 case DIOCGETRULES: 1034 case DIOCGETADDRS: 1035 case DIOCGETADDR: 1036 case DIOCGETSTATE: 1037 case DIOCGETSTATUS: 1038 case DIOCGETSTATES: 1039 case DIOCGETTIMEOUT: 1040 case DIOCGETLIMIT: 1041 case DIOCGETALTQS: 1042 case DIOCGETALTQ: 1043 case DIOCGETQSTATS: 1044 case DIOCGETRULESETS: 1045 case DIOCGETRULESET: 1046 case DIOCNATLOOK: 1047 case DIOCRGETTABLES: 1048 case DIOCRGETTSTATS: 1049 case DIOCRGETADDRS: 1050 case DIOCRGETASTATS: 1051 case DIOCRTSTADDRS: 1052 case DIOCOSFPGET: 1053 case DIOCGETSRCNODES: 1054 case DIOCIGETIFACES: 1055 case DIOCGIFSPEED: 1056 break; 1057 case DIOCRCLRTABLES: 1058 case DIOCRADDTABLES: 1059 case DIOCRDELTABLES: 1060 case DIOCRCLRTSTATS: 1061 case DIOCRCLRADDRS: 1062 case DIOCRADDADDRS: 1063 case DIOCRDELADDRS: 1064 case DIOCRSETADDRS: 1065 case DIOCRSETTFLAGS: 1066 if (((struct pfioc_table *)addr)->pfrio_flags & 1067 PFR_FLAG_DUMMY) { 1068 flags |= FWRITE; /* need write lock for dummy */ 1069 break; /* dummy operation ok */ 1070 } 1071 return (EACCES); 1072 case DIOCGETRULE: 1073 if (((struct pfioc_rule *)addr)->action == 1074 PF_GET_CLR_CNTR) 1075 return (EACCES); 1076 break; 1077 default: 1078 return (EACCES); 1079 } 1080 1081 CURVNET_SET(TD_TO_VNET(td)); 1082 1083 switch (cmd) { 1084 case DIOCSTART: 1085 PF_RULES_WLOCK(); 1086 if (V_pf_status.running) 1087 error = EEXIST; 1088 else { 1089 int cpu; 1090 1091 PF_RULES_WUNLOCK(); 1092 error = hook_pf(); 1093 if (error) { 1094 DPFPRINTF(PF_DEBUG_MISC, 1095 ("pf: pfil registration failed\n")); 1096 break; 1097 } 1098 PF_RULES_WLOCK(); 1099 V_pf_status.running = 1; 1100 V_pf_status.since = time_second; 1101 1102 CPU_FOREACH(cpu) 1103 V_pf_stateid[cpu] = time_second; 1104 1105 DPFPRINTF(PF_DEBUG_MISC, ("pf: started\n")); 1106 } 1107 PF_RULES_WUNLOCK(); 1108 break; 1109 1110 case DIOCSTOP: 1111 PF_RULES_WLOCK(); 1112 if (!V_pf_status.running) 1113 error = ENOENT; 1114 else { 1115 V_pf_status.running = 0; 1116 PF_RULES_WUNLOCK(); 1117 error = dehook_pf(); 1118 if (error) { 1119 V_pf_status.running = 1; 1120 DPFPRINTF(PF_DEBUG_MISC, 1121 ("pf: pfil unregistration failed\n")); 1122 } 1123 PF_RULES_WLOCK(); 1124 V_pf_status.since = time_second; 1125 DPFPRINTF(PF_DEBUG_MISC, ("pf: stopped\n")); 1126 } 1127 PF_RULES_WUNLOCK(); 1128 break; 1129 1130 case DIOCADDRULE: { 1131 struct pfioc_rule *pr = (struct pfioc_rule *)addr; 1132 struct pf_ruleset *ruleset; 1133 struct pf_rule *rule, *tail; 1134 struct pf_pooladdr *pa; 1135 struct pfi_kif *kif = NULL; 1136 int rs_num; 1137 1138 if (pr->rule.return_icmp >> 8 > ICMP_MAXTYPE) { 1139 error = EINVAL; 1140 break; 1141 } 1142 #ifndef INET 1143 if (pr->rule.af == AF_INET) { 1144 error = EAFNOSUPPORT; 1145 break; 1146 } 1147 #endif /* INET */ 1148 #ifndef INET6 1149 if (pr->rule.af == AF_INET6) { 1150 error = EAFNOSUPPORT; 1151 break; 1152 } 1153 #endif /* INET6 */ 1154 1155 rule = malloc(sizeof(*rule), M_PFRULE, M_WAITOK); 1156 bcopy(&pr->rule, rule, sizeof(struct pf_rule)); 1157 if (rule->ifname[0]) 1158 kif = malloc(sizeof(*kif), PFI_MTYPE, M_WAITOK); 1159 rule->states_cur = counter_u64_alloc(M_WAITOK); 1160 rule->states_tot = counter_u64_alloc(M_WAITOK); 1161 rule->src_nodes = counter_u64_alloc(M_WAITOK); 1162 rule->cuid = td->td_ucred->cr_ruid; 1163 rule->cpid = td->td_proc ? td->td_proc->p_pid : 0; 1164 TAILQ_INIT(&rule->rpool.list); 1165 1166 #define ERROUT(x) { error = (x); goto DIOCADDRULE_error; } 1167 1168 PF_RULES_WLOCK(); 1169 pr->anchor[sizeof(pr->anchor) - 1] = 0; 1170 ruleset = pf_find_ruleset(pr->anchor); 1171 if (ruleset == NULL) 1172 ERROUT(EINVAL); 1173 rs_num = pf_get_ruleset_number(pr->rule.action); 1174 if (rs_num >= PF_RULESET_MAX) 1175 ERROUT(EINVAL); 1176 if (pr->ticket != ruleset->rules[rs_num].inactive.ticket) { 1177 DPFPRINTF(PF_DEBUG_MISC, 1178 ("ticket: %d != [%d]%d\n", pr->ticket, rs_num, 1179 ruleset->rules[rs_num].inactive.ticket)); 1180 ERROUT(EBUSY); 1181 } 1182 if (pr->pool_ticket != V_ticket_pabuf) { 1183 DPFPRINTF(PF_DEBUG_MISC, 1184 ("pool_ticket: %d != %d\n", pr->pool_ticket, 1185 V_ticket_pabuf)); 1186 ERROUT(EBUSY); 1187 } 1188 1189 tail = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr, 1190 pf_rulequeue); 1191 if (tail) 1192 rule->nr = tail->nr + 1; 1193 else 1194 rule->nr = 0; 1195 if (rule->ifname[0]) { 1196 rule->kif = pfi_kif_attach(kif, rule->ifname); 1197 pfi_kif_ref(rule->kif); 1198 } else 1199 rule->kif = NULL; 1200 1201 if (rule->rtableid > 0 && rule->rtableid >= rt_numfibs) 1202 error = EBUSY; 1203 1204 #ifdef ALTQ 1205 /* set queue IDs */ 1206 if (rule->qname[0] != 0) { 1207 if ((rule->qid = pf_qname2qid(rule->qname)) == 0) 1208 error = EBUSY; 1209 else if (rule->pqname[0] != 0) { 1210 if ((rule->pqid = 1211 pf_qname2qid(rule->pqname)) == 0) 1212 error = EBUSY; 1213 } else 1214 rule->pqid = rule->qid; 1215 } 1216 #endif 1217 if (rule->tagname[0]) 1218 if ((rule->tag = pf_tagname2tag(rule->tagname)) == 0) 1219 error = EBUSY; 1220 if (rule->match_tagname[0]) 1221 if ((rule->match_tag = 1222 pf_tagname2tag(rule->match_tagname)) == 0) 1223 error = EBUSY; 1224 if (rule->rt && !rule->direction) 1225 error = EINVAL; 1226 if (!rule->log) 1227 rule->logif = 0; 1228 if (rule->logif >= PFLOGIFS_MAX) 1229 error = EINVAL; 1230 if (pf_addr_setup(ruleset, &rule->src.addr, rule->af)) 1231 error = ENOMEM; 1232 if (pf_addr_setup(ruleset, &rule->dst.addr, rule->af)) 1233 error = ENOMEM; 1234 if (pf_anchor_setup(rule, ruleset, pr->anchor_call)) 1235 error = EINVAL; 1236 TAILQ_FOREACH(pa, &V_pf_pabuf, entries) 1237 if (pa->addr.type == PF_ADDR_TABLE) { 1238 pa->addr.p.tbl = pfr_attach_table(ruleset, 1239 pa->addr.v.tblname); 1240 if (pa->addr.p.tbl == NULL) 1241 error = ENOMEM; 1242 } 1243 1244 if (rule->overload_tblname[0]) { 1245 if ((rule->overload_tbl = pfr_attach_table(ruleset, 1246 rule->overload_tblname)) == NULL) 1247 error = EINVAL; 1248 else 1249 rule->overload_tbl->pfrkt_flags |= 1250 PFR_TFLAG_ACTIVE; 1251 } 1252 1253 pf_mv_pool(&V_pf_pabuf, &rule->rpool.list); 1254 if (((((rule->action == PF_NAT) || (rule->action == PF_RDR) || 1255 (rule->action == PF_BINAT)) && rule->anchor == NULL) || 1256 (rule->rt > PF_FASTROUTE)) && 1257 (TAILQ_FIRST(&rule->rpool.list) == NULL)) 1258 error = EINVAL; 1259 1260 if (error) { 1261 pf_free_rule(rule); 1262 PF_RULES_WUNLOCK(); 1263 break; 1264 } 1265 1266 rule->rpool.cur = TAILQ_FIRST(&rule->rpool.list); 1267 rule->evaluations = rule->packets[0] = rule->packets[1] = 1268 rule->bytes[0] = rule->bytes[1] = 0; 1269 TAILQ_INSERT_TAIL(ruleset->rules[rs_num].inactive.ptr, 1270 rule, entries); 1271 ruleset->rules[rs_num].inactive.rcount++; 1272 PF_RULES_WUNLOCK(); 1273 break; 1274 1275 #undef ERROUT 1276 DIOCADDRULE_error: 1277 PF_RULES_WUNLOCK(); 1278 counter_u64_free(rule->states_cur); 1279 counter_u64_free(rule->states_tot); 1280 counter_u64_free(rule->src_nodes); 1281 free(rule, M_PFRULE); 1282 if (kif) 1283 free(kif, PFI_MTYPE); 1284 break; 1285 } 1286 1287 case DIOCGETRULES: { 1288 struct pfioc_rule *pr = (struct pfioc_rule *)addr; 1289 struct pf_ruleset *ruleset; 1290 struct pf_rule *tail; 1291 int rs_num; 1292 1293 PF_RULES_WLOCK(); 1294 pr->anchor[sizeof(pr->anchor) - 1] = 0; 1295 ruleset = pf_find_ruleset(pr->anchor); 1296 if (ruleset == NULL) { 1297 PF_RULES_WUNLOCK(); 1298 error = EINVAL; 1299 break; 1300 } 1301 rs_num = pf_get_ruleset_number(pr->rule.action); 1302 if (rs_num >= PF_RULESET_MAX) { 1303 PF_RULES_WUNLOCK(); 1304 error = EINVAL; 1305 break; 1306 } 1307 tail = TAILQ_LAST(ruleset->rules[rs_num].active.ptr, 1308 pf_rulequeue); 1309 if (tail) 1310 pr->nr = tail->nr + 1; 1311 else 1312 pr->nr = 0; 1313 pr->ticket = ruleset->rules[rs_num].active.ticket; 1314 PF_RULES_WUNLOCK(); 1315 break; 1316 } 1317 1318 case DIOCGETRULE: { 1319 struct pfioc_rule *pr = (struct pfioc_rule *)addr; 1320 struct pf_ruleset *ruleset; 1321 struct pf_rule *rule; 1322 int rs_num, i; 1323 1324 PF_RULES_WLOCK(); 1325 pr->anchor[sizeof(pr->anchor) - 1] = 0; 1326 ruleset = pf_find_ruleset(pr->anchor); 1327 if (ruleset == NULL) { 1328 PF_RULES_WUNLOCK(); 1329 error = EINVAL; 1330 break; 1331 } 1332 rs_num = pf_get_ruleset_number(pr->rule.action); 1333 if (rs_num >= PF_RULESET_MAX) { 1334 PF_RULES_WUNLOCK(); 1335 error = EINVAL; 1336 break; 1337 } 1338 if (pr->ticket != ruleset->rules[rs_num].active.ticket) { 1339 PF_RULES_WUNLOCK(); 1340 error = EBUSY; 1341 break; 1342 } 1343 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr); 1344 while ((rule != NULL) && (rule->nr != pr->nr)) 1345 rule = TAILQ_NEXT(rule, entries); 1346 if (rule == NULL) { 1347 PF_RULES_WUNLOCK(); 1348 error = EBUSY; 1349 break; 1350 } 1351 bcopy(rule, &pr->rule, sizeof(struct pf_rule)); 1352 /* 1353 * XXXGL: this is what happens when internal kernel 1354 * structures are used as ioctl API structures. 1355 */ 1356 pr->rule.states_cur = 1357 (counter_u64_t )counter_u64_fetch(rule->states_cur); 1358 pr->rule.states_tot = 1359 (counter_u64_t )counter_u64_fetch(rule->states_tot); 1360 pr->rule.src_nodes = 1361 (counter_u64_t )counter_u64_fetch(rule->src_nodes); 1362 if (pf_anchor_copyout(ruleset, rule, pr)) { 1363 PF_RULES_WUNLOCK(); 1364 error = EBUSY; 1365 break; 1366 } 1367 pf_addr_copyout(&pr->rule.src.addr); 1368 pf_addr_copyout(&pr->rule.dst.addr); 1369 for (i = 0; i < PF_SKIP_COUNT; ++i) 1370 if (rule->skip[i].ptr == NULL) 1371 pr->rule.skip[i].nr = -1; 1372 else 1373 pr->rule.skip[i].nr = 1374 rule->skip[i].ptr->nr; 1375 1376 if (pr->action == PF_GET_CLR_CNTR) { 1377 rule->evaluations = 0; 1378 rule->packets[0] = rule->packets[1] = 0; 1379 rule->bytes[0] = rule->bytes[1] = 0; 1380 counter_u64_zero(rule->states_tot); 1381 } 1382 PF_RULES_WUNLOCK(); 1383 break; 1384 } 1385 1386 case DIOCCHANGERULE: { 1387 struct pfioc_rule *pcr = (struct pfioc_rule *)addr; 1388 struct pf_ruleset *ruleset; 1389 struct pf_rule *oldrule = NULL, *newrule = NULL; 1390 struct pfi_kif *kif = NULL; 1391 struct pf_pooladdr *pa; 1392 u_int32_t nr = 0; 1393 int rs_num; 1394 1395 if (pcr->action < PF_CHANGE_ADD_HEAD || 1396 pcr->action > PF_CHANGE_GET_TICKET) { 1397 error = EINVAL; 1398 break; 1399 } 1400 if (pcr->rule.return_icmp >> 8 > ICMP_MAXTYPE) { 1401 error = EINVAL; 1402 break; 1403 } 1404 1405 if (pcr->action != PF_CHANGE_REMOVE) { 1406 #ifndef INET 1407 if (pcr->rule.af == AF_INET) { 1408 error = EAFNOSUPPORT; 1409 break; 1410 } 1411 #endif /* INET */ 1412 #ifndef INET6 1413 if (pcr->rule.af == AF_INET6) { 1414 error = EAFNOSUPPORT; 1415 break; 1416 } 1417 #endif /* INET6 */ 1418 newrule = malloc(sizeof(*newrule), M_PFRULE, M_WAITOK); 1419 bcopy(&pcr->rule, newrule, sizeof(struct pf_rule)); 1420 if (newrule->ifname[0]) 1421 kif = malloc(sizeof(*kif), PFI_MTYPE, M_WAITOK); 1422 newrule->states_cur = counter_u64_alloc(M_WAITOK); 1423 newrule->states_tot = counter_u64_alloc(M_WAITOK); 1424 newrule->src_nodes = counter_u64_alloc(M_WAITOK); 1425 newrule->cuid = td->td_ucred->cr_ruid; 1426 newrule->cpid = td->td_proc ? td->td_proc->p_pid : 0; 1427 TAILQ_INIT(&newrule->rpool.list); 1428 } 1429 1430 #define ERROUT(x) { error = (x); goto DIOCCHANGERULE_error; } 1431 1432 PF_RULES_WLOCK(); 1433 if (!(pcr->action == PF_CHANGE_REMOVE || 1434 pcr->action == PF_CHANGE_GET_TICKET) && 1435 pcr->pool_ticket != V_ticket_pabuf) 1436 ERROUT(EBUSY); 1437 1438 ruleset = pf_find_ruleset(pcr->anchor); 1439 if (ruleset == NULL) 1440 ERROUT(EINVAL); 1441 1442 rs_num = pf_get_ruleset_number(pcr->rule.action); 1443 if (rs_num >= PF_RULESET_MAX) 1444 ERROUT(EINVAL); 1445 1446 if (pcr->action == PF_CHANGE_GET_TICKET) { 1447 pcr->ticket = ++ruleset->rules[rs_num].active.ticket; 1448 ERROUT(0); 1449 } else if (pcr->ticket != 1450 ruleset->rules[rs_num].active.ticket) 1451 ERROUT(EINVAL); 1452 1453 if (pcr->action != PF_CHANGE_REMOVE) { 1454 if (newrule->ifname[0]) { 1455 newrule->kif = pfi_kif_attach(kif, 1456 newrule->ifname); 1457 pfi_kif_ref(newrule->kif); 1458 } else 1459 newrule->kif = NULL; 1460 1461 if (newrule->rtableid > 0 && 1462 newrule->rtableid >= rt_numfibs) 1463 error = EBUSY; 1464 1465 #ifdef ALTQ 1466 /* set queue IDs */ 1467 if (newrule->qname[0] != 0) { 1468 if ((newrule->qid = 1469 pf_qname2qid(newrule->qname)) == 0) 1470 error = EBUSY; 1471 else if (newrule->pqname[0] != 0) { 1472 if ((newrule->pqid = 1473 pf_qname2qid(newrule->pqname)) == 0) 1474 error = EBUSY; 1475 } else 1476 newrule->pqid = newrule->qid; 1477 } 1478 #endif /* ALTQ */ 1479 if (newrule->tagname[0]) 1480 if ((newrule->tag = 1481 pf_tagname2tag(newrule->tagname)) == 0) 1482 error = EBUSY; 1483 if (newrule->match_tagname[0]) 1484 if ((newrule->match_tag = pf_tagname2tag( 1485 newrule->match_tagname)) == 0) 1486 error = EBUSY; 1487 if (newrule->rt && !newrule->direction) 1488 error = EINVAL; 1489 if (!newrule->log) 1490 newrule->logif = 0; 1491 if (newrule->logif >= PFLOGIFS_MAX) 1492 error = EINVAL; 1493 if (pf_addr_setup(ruleset, &newrule->src.addr, newrule->af)) 1494 error = ENOMEM; 1495 if (pf_addr_setup(ruleset, &newrule->dst.addr, newrule->af)) 1496 error = ENOMEM; 1497 if (pf_anchor_setup(newrule, ruleset, pcr->anchor_call)) 1498 error = EINVAL; 1499 TAILQ_FOREACH(pa, &V_pf_pabuf, entries) 1500 if (pa->addr.type == PF_ADDR_TABLE) { 1501 pa->addr.p.tbl = 1502 pfr_attach_table(ruleset, 1503 pa->addr.v.tblname); 1504 if (pa->addr.p.tbl == NULL) 1505 error = ENOMEM; 1506 } 1507 1508 if (newrule->overload_tblname[0]) { 1509 if ((newrule->overload_tbl = pfr_attach_table( 1510 ruleset, newrule->overload_tblname)) == 1511 NULL) 1512 error = EINVAL; 1513 else 1514 newrule->overload_tbl->pfrkt_flags |= 1515 PFR_TFLAG_ACTIVE; 1516 } 1517 1518 pf_mv_pool(&V_pf_pabuf, &newrule->rpool.list); 1519 if (((((newrule->action == PF_NAT) || 1520 (newrule->action == PF_RDR) || 1521 (newrule->action == PF_BINAT) || 1522 (newrule->rt > PF_FASTROUTE)) && 1523 !newrule->anchor)) && 1524 (TAILQ_FIRST(&newrule->rpool.list) == NULL)) 1525 error = EINVAL; 1526 1527 if (error) { 1528 pf_free_rule(newrule); 1529 PF_RULES_WUNLOCK(); 1530 break; 1531 } 1532 1533 newrule->rpool.cur = TAILQ_FIRST(&newrule->rpool.list); 1534 newrule->evaluations = 0; 1535 newrule->packets[0] = newrule->packets[1] = 0; 1536 newrule->bytes[0] = newrule->bytes[1] = 0; 1537 } 1538 pf_empty_pool(&V_pf_pabuf); 1539 1540 if (pcr->action == PF_CHANGE_ADD_HEAD) 1541 oldrule = TAILQ_FIRST( 1542 ruleset->rules[rs_num].active.ptr); 1543 else if (pcr->action == PF_CHANGE_ADD_TAIL) 1544 oldrule = TAILQ_LAST( 1545 ruleset->rules[rs_num].active.ptr, pf_rulequeue); 1546 else { 1547 oldrule = TAILQ_FIRST( 1548 ruleset->rules[rs_num].active.ptr); 1549 while ((oldrule != NULL) && (oldrule->nr != pcr->nr)) 1550 oldrule = TAILQ_NEXT(oldrule, entries); 1551 if (oldrule == NULL) { 1552 if (newrule != NULL) 1553 pf_free_rule(newrule); 1554 PF_RULES_WUNLOCK(); 1555 error = EINVAL; 1556 break; 1557 } 1558 } 1559 1560 if (pcr->action == PF_CHANGE_REMOVE) { 1561 pf_unlink_rule(ruleset->rules[rs_num].active.ptr, 1562 oldrule); 1563 ruleset->rules[rs_num].active.rcount--; 1564 } else { 1565 if (oldrule == NULL) 1566 TAILQ_INSERT_TAIL( 1567 ruleset->rules[rs_num].active.ptr, 1568 newrule, entries); 1569 else if (pcr->action == PF_CHANGE_ADD_HEAD || 1570 pcr->action == PF_CHANGE_ADD_BEFORE) 1571 TAILQ_INSERT_BEFORE(oldrule, newrule, entries); 1572 else 1573 TAILQ_INSERT_AFTER( 1574 ruleset->rules[rs_num].active.ptr, 1575 oldrule, newrule, entries); 1576 ruleset->rules[rs_num].active.rcount++; 1577 } 1578 1579 nr = 0; 1580 TAILQ_FOREACH(oldrule, 1581 ruleset->rules[rs_num].active.ptr, entries) 1582 oldrule->nr = nr++; 1583 1584 ruleset->rules[rs_num].active.ticket++; 1585 1586 pf_calc_skip_steps(ruleset->rules[rs_num].active.ptr); 1587 pf_remove_if_empty_ruleset(ruleset); 1588 1589 PF_RULES_WUNLOCK(); 1590 break; 1591 1592 #undef ERROUT 1593 DIOCCHANGERULE_error: 1594 PF_RULES_WUNLOCK(); 1595 if (newrule != NULL) { 1596 counter_u64_free(newrule->states_cur); 1597 counter_u64_free(newrule->states_tot); 1598 counter_u64_free(newrule->src_nodes); 1599 free(newrule, M_PFRULE); 1600 } 1601 if (kif != NULL) 1602 free(kif, PFI_MTYPE); 1603 break; 1604 } 1605 1606 case DIOCCLRSTATES: { 1607 struct pf_state *s; 1608 struct pfioc_state_kill *psk = (struct pfioc_state_kill *)addr; 1609 u_int i, killed = 0; 1610 1611 for (i = 0; i <= V_pf_hashmask; i++) { 1612 struct pf_idhash *ih = &V_pf_idhash[i]; 1613 1614 relock_DIOCCLRSTATES: 1615 PF_HASHROW_LOCK(ih); 1616 LIST_FOREACH(s, &ih->states, entry) 1617 if (!psk->psk_ifname[0] || 1618 !strcmp(psk->psk_ifname, 1619 s->kif->pfik_name)) { 1620 /* 1621 * Don't send out individual 1622 * delete messages. 1623 */ 1624 s->state_flags |= PFSTATE_NOSYNC; 1625 pf_unlink_state(s, PF_ENTER_LOCKED); 1626 killed++; 1627 goto relock_DIOCCLRSTATES; 1628 } 1629 PF_HASHROW_UNLOCK(ih); 1630 } 1631 psk->psk_killed = killed; 1632 if (pfsync_clear_states_ptr != NULL) 1633 pfsync_clear_states_ptr(V_pf_status.hostid, psk->psk_ifname); 1634 break; 1635 } 1636 1637 case DIOCKILLSTATES: { 1638 struct pf_state *s; 1639 struct pf_state_key *sk; 1640 struct pf_addr *srcaddr, *dstaddr; 1641 u_int16_t srcport, dstport; 1642 struct pfioc_state_kill *psk = (struct pfioc_state_kill *)addr; 1643 u_int i, killed = 0; 1644 1645 if (psk->psk_pfcmp.id) { 1646 if (psk->psk_pfcmp.creatorid == 0) 1647 psk->psk_pfcmp.creatorid = V_pf_status.hostid; 1648 if ((s = pf_find_state_byid(psk->psk_pfcmp.id, 1649 psk->psk_pfcmp.creatorid))) { 1650 pf_unlink_state(s, PF_ENTER_LOCKED); 1651 psk->psk_killed = 1; 1652 } 1653 break; 1654 } 1655 1656 for (i = 0; i <= V_pf_hashmask; i++) { 1657 struct pf_idhash *ih = &V_pf_idhash[i]; 1658 1659 relock_DIOCKILLSTATES: 1660 PF_HASHROW_LOCK(ih); 1661 LIST_FOREACH(s, &ih->states, entry) { 1662 sk = s->key[PF_SK_WIRE]; 1663 if (s->direction == PF_OUT) { 1664 srcaddr = &sk->addr[1]; 1665 dstaddr = &sk->addr[0]; 1666 srcport = sk->port[0]; 1667 dstport = sk->port[0]; 1668 } else { 1669 srcaddr = &sk->addr[0]; 1670 dstaddr = &sk->addr[1]; 1671 srcport = sk->port[0]; 1672 dstport = sk->port[0]; 1673 } 1674 1675 if ((!psk->psk_af || sk->af == psk->psk_af) 1676 && (!psk->psk_proto || psk->psk_proto == 1677 sk->proto) && 1678 PF_MATCHA(psk->psk_src.neg, 1679 &psk->psk_src.addr.v.a.addr, 1680 &psk->psk_src.addr.v.a.mask, 1681 srcaddr, sk->af) && 1682 PF_MATCHA(psk->psk_dst.neg, 1683 &psk->psk_dst.addr.v.a.addr, 1684 &psk->psk_dst.addr.v.a.mask, 1685 dstaddr, sk->af) && 1686 (psk->psk_src.port_op == 0 || 1687 pf_match_port(psk->psk_src.port_op, 1688 psk->psk_src.port[0], psk->psk_src.port[1], 1689 srcport)) && 1690 (psk->psk_dst.port_op == 0 || 1691 pf_match_port(psk->psk_dst.port_op, 1692 psk->psk_dst.port[0], psk->psk_dst.port[1], 1693 dstport)) && 1694 (!psk->psk_label[0] || 1695 (s->rule.ptr->label[0] && 1696 !strcmp(psk->psk_label, 1697 s->rule.ptr->label))) && 1698 (!psk->psk_ifname[0] || 1699 !strcmp(psk->psk_ifname, 1700 s->kif->pfik_name))) { 1701 pf_unlink_state(s, PF_ENTER_LOCKED); 1702 killed++; 1703 goto relock_DIOCKILLSTATES; 1704 } 1705 } 1706 PF_HASHROW_UNLOCK(ih); 1707 } 1708 psk->psk_killed = killed; 1709 break; 1710 } 1711 1712 case DIOCADDSTATE: { 1713 struct pfioc_state *ps = (struct pfioc_state *)addr; 1714 struct pfsync_state *sp = &ps->state; 1715 1716 if (sp->timeout >= PFTM_MAX) { 1717 error = EINVAL; 1718 break; 1719 } 1720 if (pfsync_state_import_ptr != NULL) { 1721 PF_RULES_RLOCK(); 1722 error = pfsync_state_import_ptr(sp, PFSYNC_SI_IOCTL); 1723 PF_RULES_RUNLOCK(); 1724 } else 1725 error = EOPNOTSUPP; 1726 break; 1727 } 1728 1729 case DIOCGETSTATE: { 1730 struct pfioc_state *ps = (struct pfioc_state *)addr; 1731 struct pf_state *s; 1732 1733 s = pf_find_state_byid(ps->state.id, ps->state.creatorid); 1734 if (s == NULL) { 1735 error = ENOENT; 1736 break; 1737 } 1738 1739 pfsync_state_export(&ps->state, s); 1740 PF_STATE_UNLOCK(s); 1741 break; 1742 } 1743 1744 case DIOCGETSTATES: { 1745 struct pfioc_states *ps = (struct pfioc_states *)addr; 1746 struct pf_state *s; 1747 struct pfsync_state *pstore, *p; 1748 int i, nr; 1749 1750 if (ps->ps_len == 0) { 1751 nr = uma_zone_get_cur(V_pf_state_z); 1752 ps->ps_len = sizeof(struct pfsync_state) * nr; 1753 break; 1754 } 1755 1756 p = pstore = malloc(ps->ps_len, M_TEMP, M_WAITOK); 1757 nr = 0; 1758 1759 for (i = 0; i <= V_pf_hashmask; i++) { 1760 struct pf_idhash *ih = &V_pf_idhash[i]; 1761 1762 PF_HASHROW_LOCK(ih); 1763 LIST_FOREACH(s, &ih->states, entry) { 1764 1765 if (s->timeout == PFTM_UNLINKED) 1766 continue; 1767 1768 if ((nr+1) * sizeof(*p) > ps->ps_len) { 1769 PF_HASHROW_UNLOCK(ih); 1770 goto DIOCGETSTATES_full; 1771 } 1772 pfsync_state_export(p, s); 1773 p++; 1774 nr++; 1775 } 1776 PF_HASHROW_UNLOCK(ih); 1777 } 1778 DIOCGETSTATES_full: 1779 error = copyout(pstore, ps->ps_states, 1780 sizeof(struct pfsync_state) * nr); 1781 if (error) { 1782 free(pstore, M_TEMP); 1783 break; 1784 } 1785 ps->ps_len = sizeof(struct pfsync_state) * nr; 1786 free(pstore, M_TEMP); 1787 1788 break; 1789 } 1790 1791 case DIOCGETSTATUS: { 1792 struct pf_status *s = (struct pf_status *)addr; 1793 PF_RULES_RLOCK(); 1794 bcopy(&V_pf_status, s, sizeof(struct pf_status)); 1795 pfi_update_status(s->ifname, s); 1796 PF_RULES_RUNLOCK(); 1797 break; 1798 } 1799 1800 case DIOCSETSTATUSIF: { 1801 struct pfioc_if *pi = (struct pfioc_if *)addr; 1802 1803 if (pi->ifname[0] == 0) { 1804 bzero(V_pf_status.ifname, IFNAMSIZ); 1805 break; 1806 } 1807 PF_RULES_WLOCK(); 1808 strlcpy(V_pf_status.ifname, pi->ifname, IFNAMSIZ); 1809 PF_RULES_WUNLOCK(); 1810 break; 1811 } 1812 1813 case DIOCCLRSTATUS: { 1814 PF_RULES_WLOCK(); 1815 bzero(V_pf_status.counters, sizeof(V_pf_status.counters)); 1816 bzero(V_pf_status.fcounters, sizeof(V_pf_status.fcounters)); 1817 bzero(V_pf_status.scounters, sizeof(V_pf_status.scounters)); 1818 V_pf_status.since = time_second; 1819 if (*V_pf_status.ifname) 1820 pfi_update_status(V_pf_status.ifname, NULL); 1821 PF_RULES_WUNLOCK(); 1822 break; 1823 } 1824 1825 case DIOCNATLOOK: { 1826 struct pfioc_natlook *pnl = (struct pfioc_natlook *)addr; 1827 struct pf_state_key *sk; 1828 struct pf_state *state; 1829 struct pf_state_key_cmp key; 1830 int m = 0, direction = pnl->direction; 1831 int sidx, didx; 1832 1833 /* NATLOOK src and dst are reversed, so reverse sidx/didx */ 1834 sidx = (direction == PF_IN) ? 1 : 0; 1835 didx = (direction == PF_IN) ? 0 : 1; 1836 1837 if (!pnl->proto || 1838 PF_AZERO(&pnl->saddr, pnl->af) || 1839 PF_AZERO(&pnl->daddr, pnl->af) || 1840 ((pnl->proto == IPPROTO_TCP || 1841 pnl->proto == IPPROTO_UDP) && 1842 (!pnl->dport || !pnl->sport))) 1843 error = EINVAL; 1844 else { 1845 bzero(&key, sizeof(key)); 1846 key.af = pnl->af; 1847 key.proto = pnl->proto; 1848 PF_ACPY(&key.addr[sidx], &pnl->saddr, pnl->af); 1849 key.port[sidx] = pnl->sport; 1850 PF_ACPY(&key.addr[didx], &pnl->daddr, pnl->af); 1851 key.port[didx] = pnl->dport; 1852 1853 state = pf_find_state_all(&key, direction, &m); 1854 1855 if (m > 1) 1856 error = E2BIG; /* more than one state */ 1857 else if (state != NULL) { 1858 /* XXXGL: not locked read */ 1859 sk = state->key[sidx]; 1860 PF_ACPY(&pnl->rsaddr, &sk->addr[sidx], sk->af); 1861 pnl->rsport = sk->port[sidx]; 1862 PF_ACPY(&pnl->rdaddr, &sk->addr[didx], sk->af); 1863 pnl->rdport = sk->port[didx]; 1864 } else 1865 error = ENOENT; 1866 } 1867 break; 1868 } 1869 1870 case DIOCSETTIMEOUT: { 1871 struct pfioc_tm *pt = (struct pfioc_tm *)addr; 1872 int old; 1873 1874 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX || 1875 pt->seconds < 0) { 1876 error = EINVAL; 1877 break; 1878 } 1879 PF_RULES_WLOCK(); 1880 old = V_pf_default_rule.timeout[pt->timeout]; 1881 if (pt->timeout == PFTM_INTERVAL && pt->seconds == 0) 1882 pt->seconds = 1; 1883 V_pf_default_rule.timeout[pt->timeout] = pt->seconds; 1884 if (pt->timeout == PFTM_INTERVAL && pt->seconds < old) 1885 wakeup(pf_purge_thread); 1886 pt->seconds = old; 1887 PF_RULES_WUNLOCK(); 1888 break; 1889 } 1890 1891 case DIOCGETTIMEOUT: { 1892 struct pfioc_tm *pt = (struct pfioc_tm *)addr; 1893 1894 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX) { 1895 error = EINVAL; 1896 break; 1897 } 1898 PF_RULES_RLOCK(); 1899 pt->seconds = V_pf_default_rule.timeout[pt->timeout]; 1900 PF_RULES_RUNLOCK(); 1901 break; 1902 } 1903 1904 case DIOCGETLIMIT: { 1905 struct pfioc_limit *pl = (struct pfioc_limit *)addr; 1906 1907 if (pl->index < 0 || pl->index >= PF_LIMIT_MAX) { 1908 error = EINVAL; 1909 break; 1910 } 1911 PF_RULES_RLOCK(); 1912 pl->limit = V_pf_limits[pl->index].limit; 1913 PF_RULES_RUNLOCK(); 1914 break; 1915 } 1916 1917 case DIOCSETLIMIT: { 1918 struct pfioc_limit *pl = (struct pfioc_limit *)addr; 1919 int old_limit; 1920 1921 PF_RULES_WLOCK(); 1922 if (pl->index < 0 || pl->index >= PF_LIMIT_MAX || 1923 V_pf_limits[pl->index].zone == NULL) { 1924 PF_RULES_WUNLOCK(); 1925 error = EINVAL; 1926 break; 1927 } 1928 uma_zone_set_max(V_pf_limits[pl->index].zone, pl->limit); 1929 old_limit = V_pf_limits[pl->index].limit; 1930 V_pf_limits[pl->index].limit = pl->limit; 1931 pl->limit = old_limit; 1932 PF_RULES_WUNLOCK(); 1933 break; 1934 } 1935 1936 case DIOCSETDEBUG: { 1937 u_int32_t *level = (u_int32_t *)addr; 1938 1939 PF_RULES_WLOCK(); 1940 V_pf_status.debug = *level; 1941 PF_RULES_WUNLOCK(); 1942 break; 1943 } 1944 1945 case DIOCCLRRULECTRS: { 1946 /* obsoleted by DIOCGETRULE with action=PF_GET_CLR_CNTR */ 1947 struct pf_ruleset *ruleset = &pf_main_ruleset; 1948 struct pf_rule *rule; 1949 1950 PF_RULES_WLOCK(); 1951 TAILQ_FOREACH(rule, 1952 ruleset->rules[PF_RULESET_FILTER].active.ptr, entries) { 1953 rule->evaluations = 0; 1954 rule->packets[0] = rule->packets[1] = 0; 1955 rule->bytes[0] = rule->bytes[1] = 0; 1956 } 1957 PF_RULES_WUNLOCK(); 1958 break; 1959 } 1960 1961 case DIOCGIFSPEED: { 1962 struct pf_ifspeed *psp = (struct pf_ifspeed *)addr; 1963 struct pf_ifspeed ps; 1964 struct ifnet *ifp; 1965 1966 if (psp->ifname[0] != 0) { 1967 /* Can we completely trust user-land? */ 1968 strlcpy(ps.ifname, psp->ifname, IFNAMSIZ); 1969 ifp = ifunit(ps.ifname); 1970 if (ifp != NULL) 1971 psp->baudrate = ifp->if_baudrate; 1972 else 1973 error = EINVAL; 1974 } else 1975 error = EINVAL; 1976 break; 1977 } 1978 1979 #ifdef ALTQ 1980 case DIOCSTARTALTQ: { 1981 struct pf_altq *altq; 1982 1983 PF_RULES_WLOCK(); 1984 /* enable all altq interfaces on active list */ 1985 TAILQ_FOREACH(altq, V_pf_altqs_active, entries) { 1986 if (altq->qname[0] == 0 && (altq->local_flags & 1987 PFALTQ_FLAG_IF_REMOVED) == 0) { 1988 error = pf_enable_altq(altq); 1989 if (error != 0) 1990 break; 1991 } 1992 } 1993 if (error == 0) 1994 V_pf_altq_running = 1; 1995 PF_RULES_WUNLOCK(); 1996 DPFPRINTF(PF_DEBUG_MISC, ("altq: started\n")); 1997 break; 1998 } 1999 2000 case DIOCSTOPALTQ: { 2001 struct pf_altq *altq; 2002 2003 PF_RULES_WLOCK(); 2004 /* disable all altq interfaces on active list */ 2005 TAILQ_FOREACH(altq, V_pf_altqs_active, entries) { 2006 if (altq->qname[0] == 0 && (altq->local_flags & 2007 PFALTQ_FLAG_IF_REMOVED) == 0) { 2008 error = pf_disable_altq(altq); 2009 if (error != 0) 2010 break; 2011 } 2012 } 2013 if (error == 0) 2014 V_pf_altq_running = 0; 2015 PF_RULES_WUNLOCK(); 2016 DPFPRINTF(PF_DEBUG_MISC, ("altq: stopped\n")); 2017 break; 2018 } 2019 2020 case DIOCADDALTQ: { 2021 struct pfioc_altq *pa = (struct pfioc_altq *)addr; 2022 struct pf_altq *altq, *a; 2023 struct ifnet *ifp; 2024 2025 altq = malloc(sizeof(*altq), M_PFALTQ, M_WAITOK); 2026 bcopy(&pa->altq, altq, sizeof(struct pf_altq)); 2027 altq->local_flags = 0; 2028 2029 PF_RULES_WLOCK(); 2030 if (pa->ticket != V_ticket_altqs_inactive) { 2031 PF_RULES_WUNLOCK(); 2032 free(altq, M_PFALTQ); 2033 error = EBUSY; 2034 break; 2035 } 2036 2037 /* 2038 * if this is for a queue, find the discipline and 2039 * copy the necessary fields 2040 */ 2041 if (altq->qname[0] != 0) { 2042 if ((altq->qid = pf_qname2qid(altq->qname)) == 0) { 2043 PF_RULES_WUNLOCK(); 2044 error = EBUSY; 2045 free(altq, M_PFALTQ); 2046 break; 2047 } 2048 altq->altq_disc = NULL; 2049 TAILQ_FOREACH(a, V_pf_altqs_inactive, entries) { 2050 if (strncmp(a->ifname, altq->ifname, 2051 IFNAMSIZ) == 0 && a->qname[0] == 0) { 2052 altq->altq_disc = a->altq_disc; 2053 break; 2054 } 2055 } 2056 } 2057 2058 if ((ifp = ifunit(altq->ifname)) == NULL) 2059 altq->local_flags |= PFALTQ_FLAG_IF_REMOVED; 2060 else 2061 error = altq_add(altq); 2062 2063 if (error) { 2064 PF_RULES_WUNLOCK(); 2065 free(altq, M_PFALTQ); 2066 break; 2067 } 2068 2069 TAILQ_INSERT_TAIL(V_pf_altqs_inactive, altq, entries); 2070 bcopy(altq, &pa->altq, sizeof(struct pf_altq)); 2071 PF_RULES_WUNLOCK(); 2072 break; 2073 } 2074 2075 case DIOCGETALTQS: { 2076 struct pfioc_altq *pa = (struct pfioc_altq *)addr; 2077 struct pf_altq *altq; 2078 2079 PF_RULES_RLOCK(); 2080 pa->nr = 0; 2081 TAILQ_FOREACH(altq, V_pf_altqs_active, entries) 2082 pa->nr++; 2083 pa->ticket = V_ticket_altqs_active; 2084 PF_RULES_RUNLOCK(); 2085 break; 2086 } 2087 2088 case DIOCGETALTQ: { 2089 struct pfioc_altq *pa = (struct pfioc_altq *)addr; 2090 struct pf_altq *altq; 2091 u_int32_t nr; 2092 2093 PF_RULES_RLOCK(); 2094 if (pa->ticket != V_ticket_altqs_active) { 2095 PF_RULES_RUNLOCK(); 2096 error = EBUSY; 2097 break; 2098 } 2099 nr = 0; 2100 altq = TAILQ_FIRST(V_pf_altqs_active); 2101 while ((altq != NULL) && (nr < pa->nr)) { 2102 altq = TAILQ_NEXT(altq, entries); 2103 nr++; 2104 } 2105 if (altq == NULL) { 2106 PF_RULES_RUNLOCK(); 2107 error = EBUSY; 2108 break; 2109 } 2110 bcopy(altq, &pa->altq, sizeof(struct pf_altq)); 2111 PF_RULES_RUNLOCK(); 2112 break; 2113 } 2114 2115 case DIOCCHANGEALTQ: 2116 /* CHANGEALTQ not supported yet! */ 2117 error = ENODEV; 2118 break; 2119 2120 case DIOCGETQSTATS: { 2121 struct pfioc_qstats *pq = (struct pfioc_qstats *)addr; 2122 struct pf_altq *altq; 2123 u_int32_t nr; 2124 int nbytes; 2125 2126 PF_RULES_RLOCK(); 2127 if (pq->ticket != V_ticket_altqs_active) { 2128 PF_RULES_RUNLOCK(); 2129 error = EBUSY; 2130 break; 2131 } 2132 nbytes = pq->nbytes; 2133 nr = 0; 2134 altq = TAILQ_FIRST(V_pf_altqs_active); 2135 while ((altq != NULL) && (nr < pq->nr)) { 2136 altq = TAILQ_NEXT(altq, entries); 2137 nr++; 2138 } 2139 if (altq == NULL) { 2140 PF_RULES_RUNLOCK(); 2141 error = EBUSY; 2142 break; 2143 } 2144 2145 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) != 0) { 2146 PF_RULES_RUNLOCK(); 2147 error = ENXIO; 2148 break; 2149 } 2150 PF_RULES_RUNLOCK(); 2151 error = altq_getqstats(altq, pq->buf, &nbytes); 2152 if (error == 0) { 2153 pq->scheduler = altq->scheduler; 2154 pq->nbytes = nbytes; 2155 } 2156 break; 2157 } 2158 #endif /* ALTQ */ 2159 2160 case DIOCBEGINADDRS: { 2161 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr; 2162 2163 PF_RULES_WLOCK(); 2164 pf_empty_pool(&V_pf_pabuf); 2165 pp->ticket = ++V_ticket_pabuf; 2166 PF_RULES_WUNLOCK(); 2167 break; 2168 } 2169 2170 case DIOCADDADDR: { 2171 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr; 2172 struct pf_pooladdr *pa; 2173 struct pfi_kif *kif = NULL; 2174 2175 #ifndef INET 2176 if (pp->af == AF_INET) { 2177 error = EAFNOSUPPORT; 2178 break; 2179 } 2180 #endif /* INET */ 2181 #ifndef INET6 2182 if (pp->af == AF_INET6) { 2183 error = EAFNOSUPPORT; 2184 break; 2185 } 2186 #endif /* INET6 */ 2187 if (pp->addr.addr.type != PF_ADDR_ADDRMASK && 2188 pp->addr.addr.type != PF_ADDR_DYNIFTL && 2189 pp->addr.addr.type != PF_ADDR_TABLE) { 2190 error = EINVAL; 2191 break; 2192 } 2193 pa = malloc(sizeof(*pa), M_PFRULE, M_WAITOK); 2194 bcopy(&pp->addr, pa, sizeof(struct pf_pooladdr)); 2195 if (pa->ifname[0]) 2196 kif = malloc(sizeof(*kif), PFI_MTYPE, M_WAITOK); 2197 PF_RULES_WLOCK(); 2198 if (pp->ticket != V_ticket_pabuf) { 2199 PF_RULES_WUNLOCK(); 2200 if (pa->ifname[0]) 2201 free(kif, PFI_MTYPE); 2202 free(pa, M_PFRULE); 2203 error = EBUSY; 2204 break; 2205 } 2206 if (pa->ifname[0]) { 2207 pa->kif = pfi_kif_attach(kif, pa->ifname); 2208 pfi_kif_ref(pa->kif); 2209 } else 2210 pa->kif = NULL; 2211 if (pa->addr.type == PF_ADDR_DYNIFTL && ((error = 2212 pfi_dynaddr_setup(&pa->addr, pp->af)) != 0)) { 2213 if (pa->ifname[0]) 2214 pfi_kif_unref(pa->kif); 2215 PF_RULES_WUNLOCK(); 2216 free(pa, M_PFRULE); 2217 break; 2218 } 2219 TAILQ_INSERT_TAIL(&V_pf_pabuf, pa, entries); 2220 PF_RULES_WUNLOCK(); 2221 break; 2222 } 2223 2224 case DIOCGETADDRS: { 2225 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr; 2226 struct pf_pool *pool; 2227 struct pf_pooladdr *pa; 2228 2229 PF_RULES_RLOCK(); 2230 pp->nr = 0; 2231 pool = pf_get_pool(pp->anchor, pp->ticket, pp->r_action, 2232 pp->r_num, 0, 1, 0); 2233 if (pool == NULL) { 2234 PF_RULES_RUNLOCK(); 2235 error = EBUSY; 2236 break; 2237 } 2238 TAILQ_FOREACH(pa, &pool->list, entries) 2239 pp->nr++; 2240 PF_RULES_RUNLOCK(); 2241 break; 2242 } 2243 2244 case DIOCGETADDR: { 2245 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr; 2246 struct pf_pool *pool; 2247 struct pf_pooladdr *pa; 2248 u_int32_t nr = 0; 2249 2250 PF_RULES_RLOCK(); 2251 pool = pf_get_pool(pp->anchor, pp->ticket, pp->r_action, 2252 pp->r_num, 0, 1, 1); 2253 if (pool == NULL) { 2254 PF_RULES_RUNLOCK(); 2255 error = EBUSY; 2256 break; 2257 } 2258 pa = TAILQ_FIRST(&pool->list); 2259 while ((pa != NULL) && (nr < pp->nr)) { 2260 pa = TAILQ_NEXT(pa, entries); 2261 nr++; 2262 } 2263 if (pa == NULL) { 2264 PF_RULES_RUNLOCK(); 2265 error = EBUSY; 2266 break; 2267 } 2268 bcopy(pa, &pp->addr, sizeof(struct pf_pooladdr)); 2269 pf_addr_copyout(&pp->addr.addr); 2270 PF_RULES_RUNLOCK(); 2271 break; 2272 } 2273 2274 case DIOCCHANGEADDR: { 2275 struct pfioc_pooladdr *pca = (struct pfioc_pooladdr *)addr; 2276 struct pf_pool *pool; 2277 struct pf_pooladdr *oldpa = NULL, *newpa = NULL; 2278 struct pf_ruleset *ruleset; 2279 struct pfi_kif *kif = NULL; 2280 2281 if (pca->action < PF_CHANGE_ADD_HEAD || 2282 pca->action > PF_CHANGE_REMOVE) { 2283 error = EINVAL; 2284 break; 2285 } 2286 if (pca->addr.addr.type != PF_ADDR_ADDRMASK && 2287 pca->addr.addr.type != PF_ADDR_DYNIFTL && 2288 pca->addr.addr.type != PF_ADDR_TABLE) { 2289 error = EINVAL; 2290 break; 2291 } 2292 2293 if (pca->action != PF_CHANGE_REMOVE) { 2294 #ifndef INET 2295 if (pca->af == AF_INET) { 2296 error = EAFNOSUPPORT; 2297 break; 2298 } 2299 #endif /* INET */ 2300 #ifndef INET6 2301 if (pca->af == AF_INET6) { 2302 error = EAFNOSUPPORT; 2303 break; 2304 } 2305 #endif /* INET6 */ 2306 newpa = malloc(sizeof(*newpa), M_PFRULE, M_WAITOK); 2307 bcopy(&pca->addr, newpa, sizeof(struct pf_pooladdr)); 2308 if (newpa->ifname[0]) 2309 kif = malloc(sizeof(*kif), PFI_MTYPE, M_WAITOK); 2310 newpa->kif = NULL; 2311 } 2312 2313 #define ERROUT(x) { error = (x); goto DIOCCHANGEADDR_error; } 2314 PF_RULES_WLOCK(); 2315 ruleset = pf_find_ruleset(pca->anchor); 2316 if (ruleset == NULL) 2317 ERROUT(EBUSY); 2318 2319 pool = pf_get_pool(pca->anchor, pca->ticket, pca->r_action, 2320 pca->r_num, pca->r_last, 1, 1); 2321 if (pool == NULL) 2322 ERROUT(EBUSY); 2323 2324 if (pca->action != PF_CHANGE_REMOVE) { 2325 if (newpa->ifname[0]) { 2326 newpa->kif = pfi_kif_attach(kif, newpa->ifname); 2327 pfi_kif_ref(newpa->kif); 2328 kif = NULL; 2329 } 2330 2331 switch (newpa->addr.type) { 2332 case PF_ADDR_DYNIFTL: 2333 error = pfi_dynaddr_setup(&newpa->addr, 2334 pca->af); 2335 break; 2336 case PF_ADDR_TABLE: 2337 newpa->addr.p.tbl = pfr_attach_table(ruleset, 2338 newpa->addr.v.tblname); 2339 if (newpa->addr.p.tbl == NULL) 2340 error = ENOMEM; 2341 break; 2342 } 2343 if (error) 2344 goto DIOCCHANGEADDR_error; 2345 } 2346 2347 switch (pca->action) { 2348 case PF_CHANGE_ADD_HEAD: 2349 oldpa = TAILQ_FIRST(&pool->list); 2350 break; 2351 case PF_CHANGE_ADD_TAIL: 2352 oldpa = TAILQ_LAST(&pool->list, pf_palist); 2353 break; 2354 default: 2355 oldpa = TAILQ_FIRST(&pool->list); 2356 for (int i = 0; oldpa && i < pca->nr; i++) 2357 oldpa = TAILQ_NEXT(oldpa, entries); 2358 2359 if (oldpa == NULL) 2360 ERROUT(EINVAL); 2361 } 2362 2363 if (pca->action == PF_CHANGE_REMOVE) { 2364 TAILQ_REMOVE(&pool->list, oldpa, entries); 2365 switch (oldpa->addr.type) { 2366 case PF_ADDR_DYNIFTL: 2367 pfi_dynaddr_remove(oldpa->addr.p.dyn); 2368 break; 2369 case PF_ADDR_TABLE: 2370 pfr_detach_table(oldpa->addr.p.tbl); 2371 break; 2372 } 2373 if (oldpa->kif) 2374 pfi_kif_unref(oldpa->kif); 2375 free(oldpa, M_PFRULE); 2376 } else { 2377 if (oldpa == NULL) 2378 TAILQ_INSERT_TAIL(&pool->list, newpa, entries); 2379 else if (pca->action == PF_CHANGE_ADD_HEAD || 2380 pca->action == PF_CHANGE_ADD_BEFORE) 2381 TAILQ_INSERT_BEFORE(oldpa, newpa, entries); 2382 else 2383 TAILQ_INSERT_AFTER(&pool->list, oldpa, 2384 newpa, entries); 2385 } 2386 2387 pool->cur = TAILQ_FIRST(&pool->list); 2388 PF_ACPY(&pool->counter, &pool->cur->addr.v.a.addr, pca->af); 2389 PF_RULES_WUNLOCK(); 2390 break; 2391 2392 #undef ERROUT 2393 DIOCCHANGEADDR_error: 2394 if (newpa->kif) 2395 pfi_kif_unref(newpa->kif); 2396 PF_RULES_WUNLOCK(); 2397 if (newpa != NULL) 2398 free(newpa, M_PFRULE); 2399 if (kif != NULL) 2400 free(kif, PFI_MTYPE); 2401 break; 2402 } 2403 2404 case DIOCGETRULESETS: { 2405 struct pfioc_ruleset *pr = (struct pfioc_ruleset *)addr; 2406 struct pf_ruleset *ruleset; 2407 struct pf_anchor *anchor; 2408 2409 PF_RULES_RLOCK(); 2410 pr->path[sizeof(pr->path) - 1] = 0; 2411 if ((ruleset = pf_find_ruleset(pr->path)) == NULL) { 2412 PF_RULES_RUNLOCK(); 2413 error = ENOENT; 2414 break; 2415 } 2416 pr->nr = 0; 2417 if (ruleset->anchor == NULL) { 2418 /* XXX kludge for pf_main_ruleset */ 2419 RB_FOREACH(anchor, pf_anchor_global, &V_pf_anchors) 2420 if (anchor->parent == NULL) 2421 pr->nr++; 2422 } else { 2423 RB_FOREACH(anchor, pf_anchor_node, 2424 &ruleset->anchor->children) 2425 pr->nr++; 2426 } 2427 PF_RULES_RUNLOCK(); 2428 break; 2429 } 2430 2431 case DIOCGETRULESET: { 2432 struct pfioc_ruleset *pr = (struct pfioc_ruleset *)addr; 2433 struct pf_ruleset *ruleset; 2434 struct pf_anchor *anchor; 2435 u_int32_t nr = 0; 2436 2437 PF_RULES_RLOCK(); 2438 pr->path[sizeof(pr->path) - 1] = 0; 2439 if ((ruleset = pf_find_ruleset(pr->path)) == NULL) { 2440 PF_RULES_RUNLOCK(); 2441 error = ENOENT; 2442 break; 2443 } 2444 pr->name[0] = 0; 2445 if (ruleset->anchor == NULL) { 2446 /* XXX kludge for pf_main_ruleset */ 2447 RB_FOREACH(anchor, pf_anchor_global, &V_pf_anchors) 2448 if (anchor->parent == NULL && nr++ == pr->nr) { 2449 strlcpy(pr->name, anchor->name, 2450 sizeof(pr->name)); 2451 break; 2452 } 2453 } else { 2454 RB_FOREACH(anchor, pf_anchor_node, 2455 &ruleset->anchor->children) 2456 if (nr++ == pr->nr) { 2457 strlcpy(pr->name, anchor->name, 2458 sizeof(pr->name)); 2459 break; 2460 } 2461 } 2462 if (!pr->name[0]) 2463 error = EBUSY; 2464 PF_RULES_RUNLOCK(); 2465 break; 2466 } 2467 2468 case DIOCRCLRTABLES: { 2469 struct pfioc_table *io = (struct pfioc_table *)addr; 2470 2471 if (io->pfrio_esize != 0) { 2472 error = ENODEV; 2473 break; 2474 } 2475 PF_RULES_WLOCK(); 2476 error = pfr_clr_tables(&io->pfrio_table, &io->pfrio_ndel, 2477 io->pfrio_flags | PFR_FLAG_USERIOCTL); 2478 PF_RULES_WUNLOCK(); 2479 break; 2480 } 2481 2482 case DIOCRADDTABLES: { 2483 struct pfioc_table *io = (struct pfioc_table *)addr; 2484 struct pfr_table *pfrts; 2485 size_t totlen; 2486 2487 if (io->pfrio_esize != sizeof(struct pfr_table)) { 2488 error = ENODEV; 2489 break; 2490 } 2491 totlen = io->pfrio_size * sizeof(struct pfr_table); 2492 pfrts = malloc(totlen, M_TEMP, M_WAITOK); 2493 error = copyin(io->pfrio_buffer, pfrts, totlen); 2494 if (error) { 2495 free(pfrts, M_TEMP); 2496 break; 2497 } 2498 PF_RULES_WLOCK(); 2499 error = pfr_add_tables(pfrts, io->pfrio_size, 2500 &io->pfrio_nadd, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2501 PF_RULES_WUNLOCK(); 2502 free(pfrts, M_TEMP); 2503 break; 2504 } 2505 2506 case DIOCRDELTABLES: { 2507 struct pfioc_table *io = (struct pfioc_table *)addr; 2508 struct pfr_table *pfrts; 2509 size_t totlen; 2510 2511 if (io->pfrio_esize != sizeof(struct pfr_table)) { 2512 error = ENODEV; 2513 break; 2514 } 2515 totlen = io->pfrio_size * sizeof(struct pfr_table); 2516 pfrts = malloc(totlen, M_TEMP, M_WAITOK); 2517 error = copyin(io->pfrio_buffer, pfrts, totlen); 2518 if (error) { 2519 free(pfrts, M_TEMP); 2520 break; 2521 } 2522 PF_RULES_WLOCK(); 2523 error = pfr_del_tables(pfrts, io->pfrio_size, 2524 &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2525 PF_RULES_WUNLOCK(); 2526 free(pfrts, M_TEMP); 2527 break; 2528 } 2529 2530 case DIOCRGETTABLES: { 2531 struct pfioc_table *io = (struct pfioc_table *)addr; 2532 struct pfr_table *pfrts; 2533 size_t totlen; 2534 2535 if (io->pfrio_esize != sizeof(struct pfr_table)) { 2536 error = ENODEV; 2537 break; 2538 } 2539 totlen = io->pfrio_size * sizeof(struct pfr_table); 2540 pfrts = malloc(totlen, M_TEMP, M_WAITOK); 2541 PF_RULES_RLOCK(); 2542 error = pfr_get_tables(&io->pfrio_table, pfrts, 2543 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2544 PF_RULES_RUNLOCK(); 2545 if (error == 0) 2546 error = copyout(pfrts, io->pfrio_buffer, totlen); 2547 free(pfrts, M_TEMP); 2548 break; 2549 } 2550 2551 case DIOCRGETTSTATS: { 2552 struct pfioc_table *io = (struct pfioc_table *)addr; 2553 struct pfr_tstats *pfrtstats; 2554 size_t totlen; 2555 2556 if (io->pfrio_esize != sizeof(struct pfr_tstats)) { 2557 error = ENODEV; 2558 break; 2559 } 2560 totlen = io->pfrio_size * sizeof(struct pfr_tstats); 2561 pfrtstats = malloc(totlen, M_TEMP, M_WAITOK); 2562 PF_RULES_WLOCK(); 2563 error = pfr_get_tstats(&io->pfrio_table, pfrtstats, 2564 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2565 PF_RULES_WUNLOCK(); 2566 if (error == 0) 2567 error = copyout(pfrtstats, io->pfrio_buffer, totlen); 2568 free(pfrtstats, M_TEMP); 2569 break; 2570 } 2571 2572 case DIOCRCLRTSTATS: { 2573 struct pfioc_table *io = (struct pfioc_table *)addr; 2574 struct pfr_table *pfrts; 2575 size_t totlen; 2576 2577 if (io->pfrio_esize != sizeof(struct pfr_table)) { 2578 error = ENODEV; 2579 break; 2580 } 2581 totlen = io->pfrio_size * sizeof(struct pfr_table); 2582 pfrts = malloc(totlen, M_TEMP, M_WAITOK); 2583 error = copyin(io->pfrio_buffer, pfrts, totlen); 2584 if (error) { 2585 free(pfrts, M_TEMP); 2586 break; 2587 } 2588 PF_RULES_WLOCK(); 2589 error = pfr_clr_tstats(pfrts, io->pfrio_size, 2590 &io->pfrio_nzero, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2591 PF_RULES_WUNLOCK(); 2592 free(pfrts, M_TEMP); 2593 break; 2594 } 2595 2596 case DIOCRSETTFLAGS: { 2597 struct pfioc_table *io = (struct pfioc_table *)addr; 2598 struct pfr_table *pfrts; 2599 size_t totlen; 2600 2601 if (io->pfrio_esize != sizeof(struct pfr_table)) { 2602 error = ENODEV; 2603 break; 2604 } 2605 totlen = io->pfrio_size * sizeof(struct pfr_table); 2606 pfrts = malloc(totlen, M_TEMP, M_WAITOK); 2607 error = copyin(io->pfrio_buffer, pfrts, totlen); 2608 if (error) { 2609 free(pfrts, M_TEMP); 2610 break; 2611 } 2612 PF_RULES_WLOCK(); 2613 error = pfr_set_tflags(pfrts, io->pfrio_size, 2614 io->pfrio_setflag, io->pfrio_clrflag, &io->pfrio_nchange, 2615 &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2616 PF_RULES_WUNLOCK(); 2617 free(pfrts, M_TEMP); 2618 break; 2619 } 2620 2621 case DIOCRCLRADDRS: { 2622 struct pfioc_table *io = (struct pfioc_table *)addr; 2623 2624 if (io->pfrio_esize != 0) { 2625 error = ENODEV; 2626 break; 2627 } 2628 PF_RULES_WLOCK(); 2629 error = pfr_clr_addrs(&io->pfrio_table, &io->pfrio_ndel, 2630 io->pfrio_flags | PFR_FLAG_USERIOCTL); 2631 PF_RULES_WUNLOCK(); 2632 break; 2633 } 2634 2635 case DIOCRADDADDRS: { 2636 struct pfioc_table *io = (struct pfioc_table *)addr; 2637 struct pfr_addr *pfras; 2638 size_t totlen; 2639 2640 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 2641 error = ENODEV; 2642 break; 2643 } 2644 totlen = io->pfrio_size * sizeof(struct pfr_addr); 2645 pfras = malloc(totlen, M_TEMP, M_WAITOK); 2646 error = copyin(io->pfrio_buffer, pfras, totlen); 2647 if (error) { 2648 free(pfras, M_TEMP); 2649 break; 2650 } 2651 PF_RULES_WLOCK(); 2652 error = pfr_add_addrs(&io->pfrio_table, pfras, 2653 io->pfrio_size, &io->pfrio_nadd, io->pfrio_flags | 2654 PFR_FLAG_USERIOCTL); 2655 PF_RULES_WUNLOCK(); 2656 if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK) 2657 error = copyout(pfras, io->pfrio_buffer, totlen); 2658 free(pfras, M_TEMP); 2659 break; 2660 } 2661 2662 case DIOCRDELADDRS: { 2663 struct pfioc_table *io = (struct pfioc_table *)addr; 2664 struct pfr_addr *pfras; 2665 size_t totlen; 2666 2667 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 2668 error = ENODEV; 2669 break; 2670 } 2671 totlen = io->pfrio_size * sizeof(struct pfr_addr); 2672 pfras = malloc(totlen, M_TEMP, M_WAITOK); 2673 error = copyin(io->pfrio_buffer, pfras, totlen); 2674 if (error) { 2675 free(pfras, M_TEMP); 2676 break; 2677 } 2678 PF_RULES_WLOCK(); 2679 error = pfr_del_addrs(&io->pfrio_table, pfras, 2680 io->pfrio_size, &io->pfrio_ndel, io->pfrio_flags | 2681 PFR_FLAG_USERIOCTL); 2682 PF_RULES_WUNLOCK(); 2683 if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK) 2684 error = copyout(pfras, io->pfrio_buffer, totlen); 2685 free(pfras, M_TEMP); 2686 break; 2687 } 2688 2689 case DIOCRSETADDRS: { 2690 struct pfioc_table *io = (struct pfioc_table *)addr; 2691 struct pfr_addr *pfras; 2692 size_t totlen; 2693 2694 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 2695 error = ENODEV; 2696 break; 2697 } 2698 totlen = (io->pfrio_size + io->pfrio_size2) * 2699 sizeof(struct pfr_addr); 2700 pfras = malloc(totlen, M_TEMP, M_WAITOK); 2701 error = copyin(io->pfrio_buffer, pfras, totlen); 2702 if (error) { 2703 free(pfras, M_TEMP); 2704 break; 2705 } 2706 PF_RULES_WLOCK(); 2707 error = pfr_set_addrs(&io->pfrio_table, pfras, 2708 io->pfrio_size, &io->pfrio_size2, &io->pfrio_nadd, 2709 &io->pfrio_ndel, &io->pfrio_nchange, io->pfrio_flags | 2710 PFR_FLAG_USERIOCTL, 0); 2711 PF_RULES_WUNLOCK(); 2712 if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK) 2713 error = copyout(pfras, io->pfrio_buffer, totlen); 2714 free(pfras, M_TEMP); 2715 break; 2716 } 2717 2718 case DIOCRGETADDRS: { 2719 struct pfioc_table *io = (struct pfioc_table *)addr; 2720 struct pfr_addr *pfras; 2721 size_t totlen; 2722 2723 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 2724 error = ENODEV; 2725 break; 2726 } 2727 totlen = io->pfrio_size * sizeof(struct pfr_addr); 2728 pfras = malloc(totlen, M_TEMP, M_WAITOK); 2729 PF_RULES_RLOCK(); 2730 error = pfr_get_addrs(&io->pfrio_table, pfras, 2731 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2732 PF_RULES_RUNLOCK(); 2733 if (error == 0) 2734 error = copyout(pfras, io->pfrio_buffer, totlen); 2735 free(pfras, M_TEMP); 2736 break; 2737 } 2738 2739 case DIOCRGETASTATS: { 2740 struct pfioc_table *io = (struct pfioc_table *)addr; 2741 struct pfr_astats *pfrastats; 2742 size_t totlen; 2743 2744 if (io->pfrio_esize != sizeof(struct pfr_astats)) { 2745 error = ENODEV; 2746 break; 2747 } 2748 totlen = io->pfrio_size * sizeof(struct pfr_astats); 2749 pfrastats = malloc(totlen, M_TEMP, M_WAITOK); 2750 PF_RULES_RLOCK(); 2751 error = pfr_get_astats(&io->pfrio_table, pfrastats, 2752 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2753 PF_RULES_RUNLOCK(); 2754 if (error == 0) 2755 error = copyout(pfrastats, io->pfrio_buffer, totlen); 2756 free(pfrastats, M_TEMP); 2757 break; 2758 } 2759 2760 case DIOCRCLRASTATS: { 2761 struct pfioc_table *io = (struct pfioc_table *)addr; 2762 struct pfr_addr *pfras; 2763 size_t totlen; 2764 2765 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 2766 error = ENODEV; 2767 break; 2768 } 2769 totlen = io->pfrio_size * sizeof(struct pfr_addr); 2770 pfras = malloc(totlen, M_TEMP, M_WAITOK); 2771 error = copyin(io->pfrio_buffer, pfras, totlen); 2772 if (error) { 2773 free(pfras, M_TEMP); 2774 break; 2775 } 2776 PF_RULES_WLOCK(); 2777 error = pfr_clr_astats(&io->pfrio_table, pfras, 2778 io->pfrio_size, &io->pfrio_nzero, io->pfrio_flags | 2779 PFR_FLAG_USERIOCTL); 2780 PF_RULES_WUNLOCK(); 2781 if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK) 2782 error = copyout(pfras, io->pfrio_buffer, totlen); 2783 free(pfras, M_TEMP); 2784 break; 2785 } 2786 2787 case DIOCRTSTADDRS: { 2788 struct pfioc_table *io = (struct pfioc_table *)addr; 2789 struct pfr_addr *pfras; 2790 size_t totlen; 2791 2792 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 2793 error = ENODEV; 2794 break; 2795 } 2796 totlen = io->pfrio_size * sizeof(struct pfr_addr); 2797 pfras = malloc(totlen, M_TEMP, M_WAITOK); 2798 error = copyin(io->pfrio_buffer, pfras, totlen); 2799 if (error) { 2800 free(pfras, M_TEMP); 2801 break; 2802 } 2803 PF_RULES_RLOCK(); 2804 error = pfr_tst_addrs(&io->pfrio_table, pfras, 2805 io->pfrio_size, &io->pfrio_nmatch, io->pfrio_flags | 2806 PFR_FLAG_USERIOCTL); 2807 PF_RULES_RUNLOCK(); 2808 if (error == 0) 2809 error = copyout(pfras, io->pfrio_buffer, totlen); 2810 free(pfras, M_TEMP); 2811 break; 2812 } 2813 2814 case DIOCRINADEFINE: { 2815 struct pfioc_table *io = (struct pfioc_table *)addr; 2816 struct pfr_addr *pfras; 2817 size_t totlen; 2818 2819 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 2820 error = ENODEV; 2821 break; 2822 } 2823 totlen = io->pfrio_size * sizeof(struct pfr_addr); 2824 pfras = malloc(totlen, M_TEMP, M_WAITOK); 2825 error = copyin(io->pfrio_buffer, pfras, totlen); 2826 if (error) { 2827 free(pfras, M_TEMP); 2828 break; 2829 } 2830 PF_RULES_WLOCK(); 2831 error = pfr_ina_define(&io->pfrio_table, pfras, 2832 io->pfrio_size, &io->pfrio_nadd, &io->pfrio_naddr, 2833 io->pfrio_ticket, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2834 PF_RULES_WUNLOCK(); 2835 free(pfras, M_TEMP); 2836 break; 2837 } 2838 2839 case DIOCOSFPADD: { 2840 struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr; 2841 PF_RULES_WLOCK(); 2842 error = pf_osfp_add(io); 2843 PF_RULES_WUNLOCK(); 2844 break; 2845 } 2846 2847 case DIOCOSFPGET: { 2848 struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr; 2849 PF_RULES_RLOCK(); 2850 error = pf_osfp_get(io); 2851 PF_RULES_RUNLOCK(); 2852 break; 2853 } 2854 2855 case DIOCXBEGIN: { 2856 struct pfioc_trans *io = (struct pfioc_trans *)addr; 2857 struct pfioc_trans_e *ioes, *ioe; 2858 size_t totlen; 2859 int i; 2860 2861 if (io->esize != sizeof(*ioe)) { 2862 error = ENODEV; 2863 break; 2864 } 2865 totlen = sizeof(struct pfioc_trans_e) * io->size; 2866 ioes = malloc(totlen, M_TEMP, M_WAITOK); 2867 error = copyin(io->array, ioes, totlen); 2868 if (error) { 2869 free(ioes, M_TEMP); 2870 break; 2871 } 2872 PF_RULES_WLOCK(); 2873 for (i = 0, ioe = ioes; i < io->size; i++, ioe++) { 2874 switch (ioe->rs_num) { 2875 #ifdef ALTQ 2876 case PF_RULESET_ALTQ: 2877 if (ioe->anchor[0]) { 2878 PF_RULES_WUNLOCK(); 2879 free(ioes, M_TEMP); 2880 error = EINVAL; 2881 goto fail; 2882 } 2883 if ((error = pf_begin_altq(&ioe->ticket))) { 2884 PF_RULES_WUNLOCK(); 2885 free(ioes, M_TEMP); 2886 goto fail; 2887 } 2888 break; 2889 #endif /* ALTQ */ 2890 case PF_RULESET_TABLE: 2891 { 2892 struct pfr_table table; 2893 2894 bzero(&table, sizeof(table)); 2895 strlcpy(table.pfrt_anchor, ioe->anchor, 2896 sizeof(table.pfrt_anchor)); 2897 if ((error = pfr_ina_begin(&table, 2898 &ioe->ticket, NULL, 0))) { 2899 PF_RULES_WUNLOCK(); 2900 free(ioes, M_TEMP); 2901 goto fail; 2902 } 2903 break; 2904 } 2905 default: 2906 if ((error = pf_begin_rules(&ioe->ticket, 2907 ioe->rs_num, ioe->anchor))) { 2908 PF_RULES_WUNLOCK(); 2909 free(ioes, M_TEMP); 2910 goto fail; 2911 } 2912 break; 2913 } 2914 } 2915 PF_RULES_WUNLOCK(); 2916 error = copyout(ioes, io->array, totlen); 2917 free(ioes, M_TEMP); 2918 break; 2919 } 2920 2921 case DIOCXROLLBACK: { 2922 struct pfioc_trans *io = (struct pfioc_trans *)addr; 2923 struct pfioc_trans_e *ioe, *ioes; 2924 size_t totlen; 2925 int i; 2926 2927 if (io->esize != sizeof(*ioe)) { 2928 error = ENODEV; 2929 break; 2930 } 2931 totlen = sizeof(struct pfioc_trans_e) * io->size; 2932 ioes = malloc(totlen, M_TEMP, M_WAITOK); 2933 error = copyin(io->array, ioes, totlen); 2934 if (error) { 2935 free(ioes, M_TEMP); 2936 break; 2937 } 2938 PF_RULES_WLOCK(); 2939 for (i = 0, ioe = ioes; i < io->size; i++, ioe++) { 2940 switch (ioe->rs_num) { 2941 #ifdef ALTQ 2942 case PF_RULESET_ALTQ: 2943 if (ioe->anchor[0]) { 2944 PF_RULES_WUNLOCK(); 2945 free(ioes, M_TEMP); 2946 error = EINVAL; 2947 goto fail; 2948 } 2949 if ((error = pf_rollback_altq(ioe->ticket))) { 2950 PF_RULES_WUNLOCK(); 2951 free(ioes, M_TEMP); 2952 goto fail; /* really bad */ 2953 } 2954 break; 2955 #endif /* ALTQ */ 2956 case PF_RULESET_TABLE: 2957 { 2958 struct pfr_table table; 2959 2960 bzero(&table, sizeof(table)); 2961 strlcpy(table.pfrt_anchor, ioe->anchor, 2962 sizeof(table.pfrt_anchor)); 2963 if ((error = pfr_ina_rollback(&table, 2964 ioe->ticket, NULL, 0))) { 2965 PF_RULES_WUNLOCK(); 2966 free(ioes, M_TEMP); 2967 goto fail; /* really bad */ 2968 } 2969 break; 2970 } 2971 default: 2972 if ((error = pf_rollback_rules(ioe->ticket, 2973 ioe->rs_num, ioe->anchor))) { 2974 PF_RULES_WUNLOCK(); 2975 free(ioes, M_TEMP); 2976 goto fail; /* really bad */ 2977 } 2978 break; 2979 } 2980 } 2981 PF_RULES_WUNLOCK(); 2982 free(ioes, M_TEMP); 2983 break; 2984 } 2985 2986 case DIOCXCOMMIT: { 2987 struct pfioc_trans *io = (struct pfioc_trans *)addr; 2988 struct pfioc_trans_e *ioe, *ioes; 2989 struct pf_ruleset *rs; 2990 size_t totlen; 2991 int i; 2992 2993 if (io->esize != sizeof(*ioe)) { 2994 error = ENODEV; 2995 break; 2996 } 2997 totlen = sizeof(struct pfioc_trans_e) * io->size; 2998 ioes = malloc(totlen, M_TEMP, M_WAITOK); 2999 error = copyin(io->array, ioes, totlen); 3000 if (error) { 3001 free(ioes, M_TEMP); 3002 break; 3003 } 3004 PF_RULES_WLOCK(); 3005 /* First makes sure everything will succeed. */ 3006 for (i = 0, ioe = ioes; i < io->size; i++, ioe++) { 3007 switch (ioe->rs_num) { 3008 #ifdef ALTQ 3009 case PF_RULESET_ALTQ: 3010 if (ioe->anchor[0]) { 3011 PF_RULES_WUNLOCK(); 3012 free(ioes, M_TEMP); 3013 error = EINVAL; 3014 goto fail; 3015 } 3016 if (!V_altqs_inactive_open || ioe->ticket != 3017 V_ticket_altqs_inactive) { 3018 PF_RULES_WUNLOCK(); 3019 free(ioes, M_TEMP); 3020 error = EBUSY; 3021 goto fail; 3022 } 3023 break; 3024 #endif /* ALTQ */ 3025 case PF_RULESET_TABLE: 3026 rs = pf_find_ruleset(ioe->anchor); 3027 if (rs == NULL || !rs->topen || ioe->ticket != 3028 rs->tticket) { 3029 PF_RULES_WUNLOCK(); 3030 free(ioes, M_TEMP); 3031 error = EBUSY; 3032 goto fail; 3033 } 3034 break; 3035 default: 3036 if (ioe->rs_num < 0 || ioe->rs_num >= 3037 PF_RULESET_MAX) { 3038 PF_RULES_WUNLOCK(); 3039 free(ioes, M_TEMP); 3040 error = EINVAL; 3041 goto fail; 3042 } 3043 rs = pf_find_ruleset(ioe->anchor); 3044 if (rs == NULL || 3045 !rs->rules[ioe->rs_num].inactive.open || 3046 rs->rules[ioe->rs_num].inactive.ticket != 3047 ioe->ticket) { 3048 PF_RULES_WUNLOCK(); 3049 free(ioes, M_TEMP); 3050 error = EBUSY; 3051 goto fail; 3052 } 3053 break; 3054 } 3055 } 3056 /* Now do the commit - no errors should happen here. */ 3057 for (i = 0, ioe = ioes; i < io->size; i++, ioe++) { 3058 switch (ioe->rs_num) { 3059 #ifdef ALTQ 3060 case PF_RULESET_ALTQ: 3061 if ((error = pf_commit_altq(ioe->ticket))) { 3062 PF_RULES_WUNLOCK(); 3063 free(ioes, M_TEMP); 3064 goto fail; /* really bad */ 3065 } 3066 break; 3067 #endif /* ALTQ */ 3068 case PF_RULESET_TABLE: 3069 { 3070 struct pfr_table table; 3071 3072 bzero(&table, sizeof(table)); 3073 strlcpy(table.pfrt_anchor, ioe->anchor, 3074 sizeof(table.pfrt_anchor)); 3075 if ((error = pfr_ina_commit(&table, 3076 ioe->ticket, NULL, NULL, 0))) { 3077 PF_RULES_WUNLOCK(); 3078 free(ioes, M_TEMP); 3079 goto fail; /* really bad */ 3080 } 3081 break; 3082 } 3083 default: 3084 if ((error = pf_commit_rules(ioe->ticket, 3085 ioe->rs_num, ioe->anchor))) { 3086 PF_RULES_WUNLOCK(); 3087 free(ioes, M_TEMP); 3088 goto fail; /* really bad */ 3089 } 3090 break; 3091 } 3092 } 3093 PF_RULES_WUNLOCK(); 3094 free(ioes, M_TEMP); 3095 break; 3096 } 3097 3098 case DIOCGETSRCNODES: { 3099 struct pfioc_src_nodes *psn = (struct pfioc_src_nodes *)addr; 3100 struct pf_srchash *sh; 3101 struct pf_src_node *n, *p, *pstore; 3102 uint32_t i, nr = 0; 3103 3104 if (psn->psn_len == 0) { 3105 for (i = 0, sh = V_pf_srchash; i <= V_pf_srchashmask; 3106 i++, sh++) { 3107 PF_HASHROW_LOCK(sh); 3108 LIST_FOREACH(n, &sh->nodes, entry) 3109 nr++; 3110 PF_HASHROW_UNLOCK(sh); 3111 } 3112 psn->psn_len = sizeof(struct pf_src_node) * nr; 3113 break; 3114 } 3115 3116 p = pstore = malloc(psn->psn_len, M_TEMP, M_WAITOK); 3117 for (i = 0, sh = V_pf_srchash; i <= V_pf_srchashmask; 3118 i++, sh++) { 3119 PF_HASHROW_LOCK(sh); 3120 LIST_FOREACH(n, &sh->nodes, entry) { 3121 int secs = time_uptime, diff; 3122 3123 if ((nr + 1) * sizeof(*p) > (unsigned)psn->psn_len) 3124 break; 3125 3126 bcopy(n, p, sizeof(struct pf_src_node)); 3127 if (n->rule.ptr != NULL) 3128 p->rule.nr = n->rule.ptr->nr; 3129 p->creation = secs - p->creation; 3130 if (p->expire > secs) 3131 p->expire -= secs; 3132 else 3133 p->expire = 0; 3134 3135 /* Adjust the connection rate estimate. */ 3136 diff = secs - n->conn_rate.last; 3137 if (diff >= n->conn_rate.seconds) 3138 p->conn_rate.count = 0; 3139 else 3140 p->conn_rate.count -= 3141 n->conn_rate.count * diff / 3142 n->conn_rate.seconds; 3143 p++; 3144 nr++; 3145 } 3146 PF_HASHROW_UNLOCK(sh); 3147 } 3148 error = copyout(pstore, psn->psn_src_nodes, 3149 sizeof(struct pf_src_node) * nr); 3150 if (error) { 3151 free(pstore, M_TEMP); 3152 break; 3153 } 3154 psn->psn_len = sizeof(struct pf_src_node) * nr; 3155 free(pstore, M_TEMP); 3156 break; 3157 } 3158 3159 case DIOCCLRSRCNODES: { 3160 3161 pf_clear_srcnodes(NULL); 3162 pf_purge_expired_src_nodes(); 3163 V_pf_status.src_nodes = 0; 3164 break; 3165 } 3166 3167 case DIOCKILLSRCNODES: 3168 pf_kill_srcnodes((struct pfioc_src_node_kill *)addr); 3169 break; 3170 3171 case DIOCSETHOSTID: { 3172 u_int32_t *hostid = (u_int32_t *)addr; 3173 3174 PF_RULES_WLOCK(); 3175 if (*hostid == 0) 3176 V_pf_status.hostid = arc4random(); 3177 else 3178 V_pf_status.hostid = *hostid; 3179 PF_RULES_WUNLOCK(); 3180 break; 3181 } 3182 3183 case DIOCOSFPFLUSH: 3184 PF_RULES_WLOCK(); 3185 pf_osfp_flush(); 3186 PF_RULES_WUNLOCK(); 3187 break; 3188 3189 case DIOCIGETIFACES: { 3190 struct pfioc_iface *io = (struct pfioc_iface *)addr; 3191 struct pfi_kif *ifstore; 3192 size_t bufsiz; 3193 3194 if (io->pfiio_esize != sizeof(struct pfi_kif)) { 3195 error = ENODEV; 3196 break; 3197 } 3198 3199 bufsiz = io->pfiio_size * sizeof(struct pfi_kif); 3200 ifstore = malloc(bufsiz, M_TEMP, M_WAITOK); 3201 PF_RULES_RLOCK(); 3202 pfi_get_ifaces(io->pfiio_name, ifstore, &io->pfiio_size); 3203 PF_RULES_RUNLOCK(); 3204 error = copyout(ifstore, io->pfiio_buffer, bufsiz); 3205 free(ifstore, M_TEMP); 3206 break; 3207 } 3208 3209 case DIOCSETIFFLAG: { 3210 struct pfioc_iface *io = (struct pfioc_iface *)addr; 3211 3212 PF_RULES_WLOCK(); 3213 error = pfi_set_flags(io->pfiio_name, io->pfiio_flags); 3214 PF_RULES_WUNLOCK(); 3215 break; 3216 } 3217 3218 case DIOCCLRIFFLAG: { 3219 struct pfioc_iface *io = (struct pfioc_iface *)addr; 3220 3221 PF_RULES_WLOCK(); 3222 error = pfi_clear_flags(io->pfiio_name, io->pfiio_flags); 3223 PF_RULES_WUNLOCK(); 3224 break; 3225 } 3226 3227 default: 3228 error = ENODEV; 3229 break; 3230 } 3231 fail: 3232 CURVNET_RESTORE(); 3233 3234 return (error); 3235 } 3236 3237 void 3238 pfsync_state_export(struct pfsync_state *sp, struct pf_state *st) 3239 { 3240 bzero(sp, sizeof(struct pfsync_state)); 3241 3242 /* copy from state key */ 3243 sp->key[PF_SK_WIRE].addr[0] = st->key[PF_SK_WIRE]->addr[0]; 3244 sp->key[PF_SK_WIRE].addr[1] = st->key[PF_SK_WIRE]->addr[1]; 3245 sp->key[PF_SK_WIRE].port[0] = st->key[PF_SK_WIRE]->port[0]; 3246 sp->key[PF_SK_WIRE].port[1] = st->key[PF_SK_WIRE]->port[1]; 3247 sp->key[PF_SK_STACK].addr[0] = st->key[PF_SK_STACK]->addr[0]; 3248 sp->key[PF_SK_STACK].addr[1] = st->key[PF_SK_STACK]->addr[1]; 3249 sp->key[PF_SK_STACK].port[0] = st->key[PF_SK_STACK]->port[0]; 3250 sp->key[PF_SK_STACK].port[1] = st->key[PF_SK_STACK]->port[1]; 3251 sp->proto = st->key[PF_SK_WIRE]->proto; 3252 sp->af = st->key[PF_SK_WIRE]->af; 3253 3254 /* copy from state */ 3255 strlcpy(sp->ifname, st->kif->pfik_name, sizeof(sp->ifname)); 3256 bcopy(&st->rt_addr, &sp->rt_addr, sizeof(sp->rt_addr)); 3257 sp->creation = htonl(time_uptime - st->creation); 3258 sp->expire = pf_state_expires(st); 3259 if (sp->expire <= time_uptime) 3260 sp->expire = htonl(0); 3261 else 3262 sp->expire = htonl(sp->expire - time_uptime); 3263 3264 sp->direction = st->direction; 3265 sp->log = st->log; 3266 sp->timeout = st->timeout; 3267 sp->state_flags = st->state_flags; 3268 if (st->src_node) 3269 sp->sync_flags |= PFSYNC_FLAG_SRCNODE; 3270 if (st->nat_src_node) 3271 sp->sync_flags |= PFSYNC_FLAG_NATSRCNODE; 3272 3273 sp->id = st->id; 3274 sp->creatorid = st->creatorid; 3275 pf_state_peer_hton(&st->src, &sp->src); 3276 pf_state_peer_hton(&st->dst, &sp->dst); 3277 3278 if (st->rule.ptr == NULL) 3279 sp->rule = htonl(-1); 3280 else 3281 sp->rule = htonl(st->rule.ptr->nr); 3282 if (st->anchor.ptr == NULL) 3283 sp->anchor = htonl(-1); 3284 else 3285 sp->anchor = htonl(st->anchor.ptr->nr); 3286 if (st->nat_rule.ptr == NULL) 3287 sp->nat_rule = htonl(-1); 3288 else 3289 sp->nat_rule = htonl(st->nat_rule.ptr->nr); 3290 3291 pf_state_counter_hton(st->packets[0], sp->packets[0]); 3292 pf_state_counter_hton(st->packets[1], sp->packets[1]); 3293 pf_state_counter_hton(st->bytes[0], sp->bytes[0]); 3294 pf_state_counter_hton(st->bytes[1], sp->bytes[1]); 3295 3296 } 3297 3298 static void 3299 pf_tbladdr_copyout(struct pf_addr_wrap *aw) 3300 { 3301 struct pfr_ktable *kt; 3302 3303 KASSERT(aw->type == PF_ADDR_TABLE, ("%s: type %u", __func__, aw->type)); 3304 3305 kt = aw->p.tbl; 3306 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL) 3307 kt = kt->pfrkt_root; 3308 aw->p.tbl = NULL; 3309 aw->p.tblcnt = (kt->pfrkt_flags & PFR_TFLAG_ACTIVE) ? 3310 kt->pfrkt_cnt : -1; 3311 } 3312 3313 /* 3314 * XXX - Check for version missmatch!!! 3315 */ 3316 static void 3317 pf_clear_states(void) 3318 { 3319 struct pf_state *s; 3320 u_int i; 3321 3322 for (i = 0; i <= V_pf_hashmask; i++) { 3323 struct pf_idhash *ih = &V_pf_idhash[i]; 3324 relock: 3325 PF_HASHROW_LOCK(ih); 3326 LIST_FOREACH(s, &ih->states, entry) { 3327 s->timeout = PFTM_PURGE; 3328 /* Don't send out individual delete messages. */ 3329 s->sync_state = PFSTATE_NOSYNC; 3330 pf_unlink_state(s, PF_ENTER_LOCKED); 3331 goto relock; 3332 } 3333 PF_HASHROW_UNLOCK(ih); 3334 } 3335 } 3336 3337 static int 3338 pf_clear_tables(void) 3339 { 3340 struct pfioc_table io; 3341 int error; 3342 3343 bzero(&io, sizeof(io)); 3344 3345 error = pfr_clr_tables(&io.pfrio_table, &io.pfrio_ndel, 3346 io.pfrio_flags); 3347 3348 return (error); 3349 } 3350 3351 static void 3352 pf_clear_srcnodes(struct pf_src_node *n) 3353 { 3354 struct pf_state *s; 3355 int i; 3356 3357 for (i = 0; i <= V_pf_hashmask; i++) { 3358 struct pf_idhash *ih = &V_pf_idhash[i]; 3359 3360 PF_HASHROW_LOCK(ih); 3361 LIST_FOREACH(s, &ih->states, entry) { 3362 if (n == NULL || n == s->src_node) 3363 s->src_node = NULL; 3364 if (n == NULL || n == s->nat_src_node) 3365 s->nat_src_node = NULL; 3366 } 3367 PF_HASHROW_UNLOCK(ih); 3368 } 3369 3370 if (n == NULL) { 3371 struct pf_srchash *sh; 3372 3373 for (i = 0, sh = V_pf_srchash; i <= V_pf_srchashmask; 3374 i++, sh++) { 3375 PF_HASHROW_LOCK(sh); 3376 LIST_FOREACH(n, &sh->nodes, entry) { 3377 n->expire = 1; 3378 n->states = 0; 3379 } 3380 PF_HASHROW_UNLOCK(sh); 3381 } 3382 } else { 3383 /* XXX: hash slot should already be locked here. */ 3384 n->expire = 1; 3385 n->states = 0; 3386 } 3387 } 3388 3389 static void 3390 pf_kill_srcnodes(struct pfioc_src_node_kill *psnk) 3391 { 3392 struct pf_src_node_list kill; 3393 3394 LIST_INIT(&kill); 3395 for (int i = 0; i <= V_pf_srchashmask; i++) { 3396 struct pf_srchash *sh = &V_pf_srchash[i]; 3397 struct pf_src_node *sn, *tmp; 3398 3399 PF_HASHROW_LOCK(sh); 3400 LIST_FOREACH_SAFE(sn, &sh->nodes, entry, tmp) 3401 if (PF_MATCHA(psnk->psnk_src.neg, 3402 &psnk->psnk_src.addr.v.a.addr, 3403 &psnk->psnk_src.addr.v.a.mask, 3404 &sn->addr, sn->af) && 3405 PF_MATCHA(psnk->psnk_dst.neg, 3406 &psnk->psnk_dst.addr.v.a.addr, 3407 &psnk->psnk_dst.addr.v.a.mask, 3408 &sn->raddr, sn->af)) { 3409 pf_unlink_src_node_locked(sn); 3410 LIST_INSERT_HEAD(&kill, sn, entry); 3411 sn->expire = 1; 3412 } 3413 PF_HASHROW_UNLOCK(sh); 3414 } 3415 3416 for (int i = 0; i <= V_pf_hashmask; i++) { 3417 struct pf_idhash *ih = &V_pf_idhash[i]; 3418 struct pf_state *s; 3419 3420 PF_HASHROW_LOCK(ih); 3421 LIST_FOREACH(s, &ih->states, entry) { 3422 if (s->src_node && s->src_node->expire == 1) { 3423 #ifdef INVARIANTS 3424 s->src_node->states--; 3425 #endif 3426 s->src_node = NULL; 3427 } 3428 if (s->nat_src_node && s->nat_src_node->expire == 1) { 3429 #ifdef INVARIANTS 3430 s->nat_src_node->states--; 3431 #endif 3432 s->nat_src_node = NULL; 3433 } 3434 } 3435 PF_HASHROW_UNLOCK(ih); 3436 } 3437 3438 psnk->psnk_killed = pf_free_src_nodes(&kill); 3439 } 3440 3441 /* 3442 * XXX - Check for version missmatch!!! 3443 */ 3444 3445 /* 3446 * Duplicate pfctl -Fa operation to get rid of as much as we can. 3447 */ 3448 static int 3449 shutdown_pf(void) 3450 { 3451 int error = 0; 3452 u_int32_t t[5]; 3453 char nn = '\0'; 3454 3455 V_pf_status.running = 0; 3456 3457 counter_u64_free(V_pf_default_rule.states_cur); 3458 counter_u64_free(V_pf_default_rule.states_tot); 3459 counter_u64_free(V_pf_default_rule.src_nodes); 3460 3461 do { 3462 if ((error = pf_begin_rules(&t[0], PF_RULESET_SCRUB, &nn)) 3463 != 0) { 3464 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: SCRUB\n")); 3465 break; 3466 } 3467 if ((error = pf_begin_rules(&t[1], PF_RULESET_FILTER, &nn)) 3468 != 0) { 3469 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: FILTER\n")); 3470 break; /* XXX: rollback? */ 3471 } 3472 if ((error = pf_begin_rules(&t[2], PF_RULESET_NAT, &nn)) 3473 != 0) { 3474 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: NAT\n")); 3475 break; /* XXX: rollback? */ 3476 } 3477 if ((error = pf_begin_rules(&t[3], PF_RULESET_BINAT, &nn)) 3478 != 0) { 3479 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: BINAT\n")); 3480 break; /* XXX: rollback? */ 3481 } 3482 if ((error = pf_begin_rules(&t[4], PF_RULESET_RDR, &nn)) 3483 != 0) { 3484 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: RDR\n")); 3485 break; /* XXX: rollback? */ 3486 } 3487 3488 /* XXX: these should always succeed here */ 3489 pf_commit_rules(t[0], PF_RULESET_SCRUB, &nn); 3490 pf_commit_rules(t[1], PF_RULESET_FILTER, &nn); 3491 pf_commit_rules(t[2], PF_RULESET_NAT, &nn); 3492 pf_commit_rules(t[3], PF_RULESET_BINAT, &nn); 3493 pf_commit_rules(t[4], PF_RULESET_RDR, &nn); 3494 3495 if ((error = pf_clear_tables()) != 0) 3496 break; 3497 3498 #ifdef ALTQ 3499 if ((error = pf_begin_altq(&t[0])) != 0) { 3500 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: ALTQ\n")); 3501 break; 3502 } 3503 pf_commit_altq(t[0]); 3504 #endif 3505 3506 pf_clear_states(); 3507 3508 pf_clear_srcnodes(NULL); 3509 3510 /* status does not use malloced mem so no need to cleanup */ 3511 /* fingerprints and interfaces have thier own cleanup code */ 3512 } while(0); 3513 3514 return (error); 3515 } 3516 3517 #ifdef INET 3518 static int 3519 pf_check_in(void *arg, struct mbuf **m, struct ifnet *ifp, int dir, 3520 struct inpcb *inp) 3521 { 3522 int chk; 3523 3524 chk = pf_test(PF_IN, ifp, m, inp); 3525 if (chk && *m) { 3526 m_freem(*m); 3527 *m = NULL; 3528 } 3529 3530 return (chk); 3531 } 3532 3533 static int 3534 pf_check_out(void *arg, struct mbuf **m, struct ifnet *ifp, int dir, 3535 struct inpcb *inp) 3536 { 3537 int chk; 3538 3539 /* We need a proper CSUM befor we start (s. OpenBSD ip_output) */ 3540 if ((*m)->m_pkthdr.csum_flags & CSUM_DELAY_DATA) { 3541 in_delayed_cksum(*m); 3542 (*m)->m_pkthdr.csum_flags &= ~CSUM_DELAY_DATA; 3543 } 3544 3545 chk = pf_test(PF_OUT, ifp, m, inp); 3546 if (chk && *m) { 3547 m_freem(*m); 3548 *m = NULL; 3549 } 3550 3551 return (chk); 3552 } 3553 #endif 3554 3555 #ifdef INET6 3556 static int 3557 pf_check6_in(void *arg, struct mbuf **m, struct ifnet *ifp, int dir, 3558 struct inpcb *inp) 3559 { 3560 int chk; 3561 3562 /* 3563 * In case of loopback traffic IPv6 uses the real interface in 3564 * order to support scoped addresses. In order to support stateful 3565 * filtering we have change this to lo0 as it is the case in IPv4. 3566 */ 3567 CURVNET_SET(ifp->if_vnet); 3568 chk = pf_test6(PF_IN, (*m)->m_flags & M_LOOP ? V_loif : ifp, m, inp); 3569 CURVNET_RESTORE(); 3570 if (chk && *m) { 3571 m_freem(*m); 3572 *m = NULL; 3573 } 3574 return chk; 3575 } 3576 3577 static int 3578 pf_check6_out(void *arg, struct mbuf **m, struct ifnet *ifp, int dir, 3579 struct inpcb *inp) 3580 { 3581 int chk; 3582 3583 /* We need a proper CSUM before we start (s. OpenBSD ip_output) */ 3584 if ((*m)->m_pkthdr.csum_flags & CSUM_DELAY_DATA) { 3585 #ifdef INET 3586 /* XXX-BZ copy&paste error from r126261? */ 3587 in_delayed_cksum(*m); 3588 #endif 3589 (*m)->m_pkthdr.csum_flags &= ~CSUM_DELAY_DATA; 3590 } 3591 CURVNET_SET(ifp->if_vnet); 3592 chk = pf_test6(PF_OUT, ifp, m, inp); 3593 CURVNET_RESTORE(); 3594 if (chk && *m) { 3595 m_freem(*m); 3596 *m = NULL; 3597 } 3598 return chk; 3599 } 3600 #endif /* INET6 */ 3601 3602 static int 3603 hook_pf(void) 3604 { 3605 #ifdef INET 3606 struct pfil_head *pfh_inet; 3607 #endif 3608 #ifdef INET6 3609 struct pfil_head *pfh_inet6; 3610 #endif 3611 3612 if (V_pf_pfil_hooked) 3613 return (0); 3614 3615 #ifdef INET 3616 pfh_inet = pfil_head_get(PFIL_TYPE_AF, AF_INET); 3617 if (pfh_inet == NULL) 3618 return (ESRCH); /* XXX */ 3619 pfil_add_hook(pf_check_in, NULL, PFIL_IN | PFIL_WAITOK, pfh_inet); 3620 pfil_add_hook(pf_check_out, NULL, PFIL_OUT | PFIL_WAITOK, pfh_inet); 3621 #endif 3622 #ifdef INET6 3623 pfh_inet6 = pfil_head_get(PFIL_TYPE_AF, AF_INET6); 3624 if (pfh_inet6 == NULL) { 3625 #ifdef INET 3626 pfil_remove_hook(pf_check_in, NULL, PFIL_IN | PFIL_WAITOK, 3627 pfh_inet); 3628 pfil_remove_hook(pf_check_out, NULL, PFIL_OUT | PFIL_WAITOK, 3629 pfh_inet); 3630 #endif 3631 return (ESRCH); /* XXX */ 3632 } 3633 pfil_add_hook(pf_check6_in, NULL, PFIL_IN | PFIL_WAITOK, pfh_inet6); 3634 pfil_add_hook(pf_check6_out, NULL, PFIL_OUT | PFIL_WAITOK, pfh_inet6); 3635 #endif 3636 3637 V_pf_pfil_hooked = 1; 3638 return (0); 3639 } 3640 3641 static int 3642 dehook_pf(void) 3643 { 3644 #ifdef INET 3645 struct pfil_head *pfh_inet; 3646 #endif 3647 #ifdef INET6 3648 struct pfil_head *pfh_inet6; 3649 #endif 3650 3651 if (V_pf_pfil_hooked == 0) 3652 return (0); 3653 3654 #ifdef INET 3655 pfh_inet = pfil_head_get(PFIL_TYPE_AF, AF_INET); 3656 if (pfh_inet == NULL) 3657 return (ESRCH); /* XXX */ 3658 pfil_remove_hook(pf_check_in, NULL, PFIL_IN | PFIL_WAITOK, 3659 pfh_inet); 3660 pfil_remove_hook(pf_check_out, NULL, PFIL_OUT | PFIL_WAITOK, 3661 pfh_inet); 3662 #endif 3663 #ifdef INET6 3664 pfh_inet6 = pfil_head_get(PFIL_TYPE_AF, AF_INET6); 3665 if (pfh_inet6 == NULL) 3666 return (ESRCH); /* XXX */ 3667 pfil_remove_hook(pf_check6_in, NULL, PFIL_IN | PFIL_WAITOK, 3668 pfh_inet6); 3669 pfil_remove_hook(pf_check6_out, NULL, PFIL_OUT | PFIL_WAITOK, 3670 pfh_inet6); 3671 #endif 3672 3673 V_pf_pfil_hooked = 0; 3674 return (0); 3675 } 3676 3677 static int 3678 pf_load(void) 3679 { 3680 int error; 3681 3682 VNET_ITERATOR_DECL(vnet_iter); 3683 3684 VNET_LIST_RLOCK(); 3685 VNET_FOREACH(vnet_iter) { 3686 CURVNET_SET(vnet_iter); 3687 V_pf_pfil_hooked = 0; 3688 V_pf_end_threads = 0; 3689 TAILQ_INIT(&V_pf_tags); 3690 TAILQ_INIT(&V_pf_qids); 3691 CURVNET_RESTORE(); 3692 } 3693 VNET_LIST_RUNLOCK(); 3694 3695 rw_init(&pf_rules_lock, "pf rulesets"); 3696 3697 pf_dev = make_dev(&pf_cdevsw, 0, 0, 0, 0600, PF_NAME); 3698 if ((error = pfattach()) != 0) 3699 return (error); 3700 3701 return (0); 3702 } 3703 3704 static int 3705 pf_unload(void) 3706 { 3707 int error = 0; 3708 3709 PF_RULES_WLOCK(); 3710 V_pf_status.running = 0; 3711 PF_RULES_WUNLOCK(); 3712 swi_remove(V_pf_swi_cookie); 3713 error = dehook_pf(); 3714 if (error) { 3715 /* 3716 * Should not happen! 3717 * XXX Due to error code ESRCH, kldunload will show 3718 * a message like 'No such process'. 3719 */ 3720 printf("%s : pfil unregisteration fail\n", __FUNCTION__); 3721 return error; 3722 } 3723 PF_RULES_WLOCK(); 3724 shutdown_pf(); 3725 V_pf_end_threads = 1; 3726 while (V_pf_end_threads < 2) { 3727 wakeup_one(pf_purge_thread); 3728 rw_sleep(pf_purge_thread, &pf_rules_lock, 0, "pftmo", 0); 3729 } 3730 pf_normalize_cleanup(); 3731 pfi_cleanup(); 3732 pfr_cleanup(); 3733 pf_osfp_flush(); 3734 pf_cleanup(); 3735 PF_RULES_WUNLOCK(); 3736 destroy_dev(pf_dev); 3737 rw_destroy(&pf_rules_lock); 3738 3739 return (error); 3740 } 3741 3742 static int 3743 pf_modevent(module_t mod, int type, void *data) 3744 { 3745 int error = 0; 3746 3747 switch(type) { 3748 case MOD_LOAD: 3749 error = pf_load(); 3750 break; 3751 case MOD_QUIESCE: 3752 /* 3753 * Module should not be unloaded due to race conditions. 3754 */ 3755 error = EBUSY; 3756 break; 3757 case MOD_UNLOAD: 3758 error = pf_unload(); 3759 break; 3760 default: 3761 error = EINVAL; 3762 break; 3763 } 3764 3765 return (error); 3766 } 3767 3768 static moduledata_t pf_mod = { 3769 "pf", 3770 pf_modevent, 3771 0 3772 }; 3773 3774 DECLARE_MODULE(pf, pf_mod, SI_SUB_PSEUDO, SI_ORDER_FIRST); 3775 MODULE_VERSION(pf, PF_MODVER); 3776