1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2002-2009 Luigi Rizzo, Universita` di Pisa 5 * Copyright (c) 2014 Yandex LLC 6 * Copyright (c) 2014 Alexander V. Chernikov 7 * 8 * Supported by: Valeria Paoli 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 */ 31 32 #include <sys/cdefs.h> 33 __FBSDID("$FreeBSD$"); 34 35 /* 36 * Control socket and rule management routines for ipfw. 37 * Control is currently implemented via IP_FW3 setsockopt() code. 38 */ 39 40 #include "opt_ipfw.h" 41 #include "opt_inet.h" 42 #ifndef INET 43 #error IPFIREWALL requires INET. 44 #endif /* INET */ 45 #include "opt_inet6.h" 46 47 #include <sys/param.h> 48 #include <sys/systm.h> 49 #include <sys/malloc.h> 50 #include <sys/mbuf.h> /* struct m_tag used by nested headers */ 51 #include <sys/kernel.h> 52 #include <sys/lock.h> 53 #include <sys/priv.h> 54 #include <sys/proc.h> 55 #include <sys/rwlock.h> 56 #include <sys/rmlock.h> 57 #include <sys/socket.h> 58 #include <sys/socketvar.h> 59 #include <sys/sysctl.h> 60 #include <sys/syslog.h> 61 #include <sys/fnv_hash.h> 62 #include <net/if.h> 63 #include <net/route.h> 64 #include <net/vnet.h> 65 #include <vm/vm.h> 66 #include <vm/vm_extern.h> 67 68 #include <netinet/in.h> 69 #include <netinet/ip_var.h> /* hooks */ 70 #include <netinet/ip_fw.h> 71 72 #include <netpfil/ipfw/ip_fw_private.h> 73 #include <netpfil/ipfw/ip_fw_table.h> 74 75 #ifdef MAC 76 #include <security/mac/mac_framework.h> 77 #endif 78 79 static int ipfw_ctl(struct sockopt *sopt); 80 static int check_ipfw_rule_body(ipfw_insn *cmd, int cmd_len, 81 struct rule_check_info *ci); 82 static int check_ipfw_rule1(struct ip_fw_rule *rule, int size, 83 struct rule_check_info *ci); 84 static int check_ipfw_rule0(struct ip_fw_rule0 *rule, int size, 85 struct rule_check_info *ci); 86 static int rewrite_rule_uidx(struct ip_fw_chain *chain, 87 struct rule_check_info *ci); 88 89 #define NAMEDOBJ_HASH_SIZE 32 90 91 struct namedobj_instance { 92 struct namedobjects_head *names; 93 struct namedobjects_head *values; 94 uint32_t nn_size; /* names hash size */ 95 uint32_t nv_size; /* number hash size */ 96 u_long *idx_mask; /* used items bitmask */ 97 uint32_t max_blocks; /* number of "long" blocks in bitmask */ 98 uint32_t count; /* number of items */ 99 uint16_t free_off[IPFW_MAX_SETS]; /* first possible free offset */ 100 objhash_hash_f *hash_f; 101 objhash_cmp_f *cmp_f; 102 }; 103 #define BLOCK_ITEMS (8 * sizeof(u_long)) /* Number of items for ffsl() */ 104 105 static uint32_t objhash_hash_name(struct namedobj_instance *ni, 106 const void *key, uint32_t kopt); 107 static uint32_t objhash_hash_idx(struct namedobj_instance *ni, uint32_t val); 108 static int objhash_cmp_name(struct named_object *no, const void *name, 109 uint32_t set); 110 111 MALLOC_DEFINE(M_IPFW, "IpFw/IpAcct", "IpFw/IpAcct chain's"); 112 113 static int dump_config(struct ip_fw_chain *chain, ip_fw3_opheader *op3, 114 struct sockopt_data *sd); 115 static int add_rules(struct ip_fw_chain *chain, ip_fw3_opheader *op3, 116 struct sockopt_data *sd); 117 static int del_rules(struct ip_fw_chain *chain, ip_fw3_opheader *op3, 118 struct sockopt_data *sd); 119 static int clear_rules(struct ip_fw_chain *chain, ip_fw3_opheader *op3, 120 struct sockopt_data *sd); 121 static int move_rules(struct ip_fw_chain *chain, ip_fw3_opheader *op3, 122 struct sockopt_data *sd); 123 static int manage_sets(struct ip_fw_chain *chain, ip_fw3_opheader *op3, 124 struct sockopt_data *sd); 125 static int dump_soptcodes(struct ip_fw_chain *chain, ip_fw3_opheader *op3, 126 struct sockopt_data *sd); 127 static int dump_srvobjects(struct ip_fw_chain *chain, ip_fw3_opheader *op3, 128 struct sockopt_data *sd); 129 130 /* ctl3 handler data */ 131 struct mtx ctl3_lock; 132 #define CTL3_LOCK_INIT() mtx_init(&ctl3_lock, "ctl3_lock", NULL, MTX_DEF) 133 #define CTL3_LOCK_DESTROY() mtx_destroy(&ctl3_lock) 134 #define CTL3_LOCK() mtx_lock(&ctl3_lock) 135 #define CTL3_UNLOCK() mtx_unlock(&ctl3_lock) 136 137 static struct ipfw_sopt_handler *ctl3_handlers; 138 static size_t ctl3_hsize; 139 static uint64_t ctl3_refct, ctl3_gencnt; 140 #define CTL3_SMALLBUF 4096 /* small page-size write buffer */ 141 #define CTL3_LARGEBUF 16 * 1024 * 1024 /* handle large rulesets */ 142 143 static int ipfw_flush_sopt_data(struct sockopt_data *sd); 144 145 static struct ipfw_sopt_handler scodes[] = { 146 { IP_FW_XGET, 0, HDIR_GET, dump_config }, 147 { IP_FW_XADD, 0, HDIR_BOTH, add_rules }, 148 { IP_FW_XDEL, 0, HDIR_BOTH, del_rules }, 149 { IP_FW_XZERO, 0, HDIR_SET, clear_rules }, 150 { IP_FW_XRESETLOG, 0, HDIR_SET, clear_rules }, 151 { IP_FW_XMOVE, 0, HDIR_SET, move_rules }, 152 { IP_FW_SET_SWAP, 0, HDIR_SET, manage_sets }, 153 { IP_FW_SET_MOVE, 0, HDIR_SET, manage_sets }, 154 { IP_FW_SET_ENABLE, 0, HDIR_SET, manage_sets }, 155 { IP_FW_DUMP_SOPTCODES, 0, HDIR_GET, dump_soptcodes }, 156 { IP_FW_DUMP_SRVOBJECTS,0, HDIR_GET, dump_srvobjects }, 157 }; 158 159 static int 160 set_legacy_obj_kidx(struct ip_fw_chain *ch, struct ip_fw_rule0 *rule); 161 static struct opcode_obj_rewrite *find_op_rw(ipfw_insn *cmd, 162 uint16_t *puidx, uint8_t *ptype); 163 static int ref_rule_objects(struct ip_fw_chain *ch, struct ip_fw *rule, 164 struct rule_check_info *ci, struct obj_idx *oib, struct tid_info *ti); 165 static int ref_opcode_object(struct ip_fw_chain *ch, ipfw_insn *cmd, 166 struct tid_info *ti, struct obj_idx *pidx, int *unresolved); 167 static void unref_rule_objects(struct ip_fw_chain *chain, struct ip_fw *rule); 168 static void unref_oib_objects(struct ip_fw_chain *ch, ipfw_insn *cmd, 169 struct obj_idx *oib, struct obj_idx *end); 170 static int export_objhash_ntlv(struct namedobj_instance *ni, uint16_t kidx, 171 struct sockopt_data *sd); 172 173 /* 174 * Opcode object rewriter variables 175 */ 176 struct opcode_obj_rewrite *ctl3_rewriters; 177 static size_t ctl3_rsize; 178 179 /* 180 * static variables followed by global ones 181 */ 182 183 VNET_DEFINE_STATIC(uma_zone_t, ipfw_cntr_zone); 184 #define V_ipfw_cntr_zone VNET(ipfw_cntr_zone) 185 186 void 187 ipfw_init_counters(void) 188 { 189 190 V_ipfw_cntr_zone = uma_zcreate("IPFW counters", 191 IPFW_RULE_CNTR_SIZE, NULL, NULL, NULL, NULL, 192 UMA_ALIGN_PTR, UMA_ZONE_PCPU); 193 } 194 195 void 196 ipfw_destroy_counters(void) 197 { 198 199 uma_zdestroy(V_ipfw_cntr_zone); 200 } 201 202 struct ip_fw * 203 ipfw_alloc_rule(struct ip_fw_chain *chain, size_t rulesize) 204 { 205 struct ip_fw *rule; 206 207 rule = malloc(rulesize, M_IPFW, M_WAITOK | M_ZERO); 208 rule->cntr = uma_zalloc_pcpu(V_ipfw_cntr_zone, M_WAITOK | M_ZERO); 209 rule->refcnt = 1; 210 211 return (rule); 212 } 213 214 void 215 ipfw_free_rule(struct ip_fw *rule) 216 { 217 218 /* 219 * We don't release refcnt here, since this function 220 * can be called without any locks held. The caller 221 * must release reference under IPFW_UH_WLOCK, and then 222 * call this function if refcount becomes 1. 223 */ 224 if (rule->refcnt > 1) 225 return; 226 uma_zfree_pcpu(V_ipfw_cntr_zone, rule->cntr); 227 free(rule, M_IPFW); 228 } 229 230 /* 231 * Find the smallest rule >= key, id. 232 * We could use bsearch but it is so simple that we code it directly 233 */ 234 int 235 ipfw_find_rule(struct ip_fw_chain *chain, uint32_t key, uint32_t id) 236 { 237 int i, lo, hi; 238 struct ip_fw *r; 239 240 for (lo = 0, hi = chain->n_rules - 1; lo < hi;) { 241 i = (lo + hi) / 2; 242 r = chain->map[i]; 243 if (r->rulenum < key) 244 lo = i + 1; /* continue from the next one */ 245 else if (r->rulenum > key) 246 hi = i; /* this might be good */ 247 else if (r->id < id) 248 lo = i + 1; /* continue from the next one */ 249 else /* r->id >= id */ 250 hi = i; /* this might be good */ 251 } 252 return hi; 253 } 254 255 /* 256 * Builds skipto cache on rule set @map. 257 */ 258 static void 259 update_skipto_cache(struct ip_fw_chain *chain, struct ip_fw **map) 260 { 261 int *smap, rulenum; 262 int i, mi; 263 264 IPFW_UH_WLOCK_ASSERT(chain); 265 266 mi = 0; 267 rulenum = map[mi]->rulenum; 268 smap = chain->idxmap_back; 269 270 if (smap == NULL) 271 return; 272 273 for (i = 0; i < 65536; i++) { 274 smap[i] = mi; 275 /* Use the same rule index until i < rulenum */ 276 if (i != rulenum || i == 65535) 277 continue; 278 /* Find next rule with num > i */ 279 rulenum = map[++mi]->rulenum; 280 while (rulenum == i) 281 rulenum = map[++mi]->rulenum; 282 } 283 } 284 285 /* 286 * Swaps prepared (backup) index with current one. 287 */ 288 static void 289 swap_skipto_cache(struct ip_fw_chain *chain) 290 { 291 int *map; 292 293 IPFW_UH_WLOCK_ASSERT(chain); 294 IPFW_WLOCK_ASSERT(chain); 295 296 map = chain->idxmap; 297 chain->idxmap = chain->idxmap_back; 298 chain->idxmap_back = map; 299 } 300 301 /* 302 * Allocate and initialize skipto cache. 303 */ 304 void 305 ipfw_init_skipto_cache(struct ip_fw_chain *chain) 306 { 307 int *idxmap, *idxmap_back; 308 309 idxmap = malloc(65536 * sizeof(int), M_IPFW, M_WAITOK | M_ZERO); 310 idxmap_back = malloc(65536 * sizeof(int), M_IPFW, M_WAITOK); 311 312 /* 313 * Note we may be called at any time after initialization, 314 * for example, on first skipto rule, so we need to 315 * provide valid chain->idxmap on return 316 */ 317 318 IPFW_UH_WLOCK(chain); 319 if (chain->idxmap != NULL) { 320 IPFW_UH_WUNLOCK(chain); 321 free(idxmap, M_IPFW); 322 free(idxmap_back, M_IPFW); 323 return; 324 } 325 326 /* Set backup pointer first to permit building cache */ 327 chain->idxmap_back = idxmap_back; 328 update_skipto_cache(chain, chain->map); 329 IPFW_WLOCK(chain); 330 /* It is now safe to set chain->idxmap ptr */ 331 chain->idxmap = idxmap; 332 swap_skipto_cache(chain); 333 IPFW_WUNLOCK(chain); 334 IPFW_UH_WUNLOCK(chain); 335 } 336 337 /* 338 * Destroys skipto cache. 339 */ 340 void 341 ipfw_destroy_skipto_cache(struct ip_fw_chain *chain) 342 { 343 344 if (chain->idxmap != NULL) 345 free(chain->idxmap, M_IPFW); 346 if (chain->idxmap != NULL) 347 free(chain->idxmap_back, M_IPFW); 348 } 349 350 /* 351 * allocate a new map, returns the chain locked. extra is the number 352 * of entries to add or delete. 353 */ 354 static struct ip_fw ** 355 get_map(struct ip_fw_chain *chain, int extra, int locked) 356 { 357 358 for (;;) { 359 struct ip_fw **map; 360 u_int i, mflags; 361 362 mflags = M_ZERO | ((locked != 0) ? M_NOWAIT : M_WAITOK); 363 364 i = chain->n_rules + extra; 365 map = malloc(i * sizeof(struct ip_fw *), M_IPFW, mflags); 366 if (map == NULL) { 367 printf("%s: cannot allocate map\n", __FUNCTION__); 368 return NULL; 369 } 370 if (!locked) 371 IPFW_UH_WLOCK(chain); 372 if (i >= chain->n_rules + extra) /* good */ 373 return map; 374 /* otherwise we lost the race, free and retry */ 375 if (!locked) 376 IPFW_UH_WUNLOCK(chain); 377 free(map, M_IPFW); 378 } 379 } 380 381 /* 382 * swap the maps. It is supposed to be called with IPFW_UH_WLOCK 383 */ 384 static struct ip_fw ** 385 swap_map(struct ip_fw_chain *chain, struct ip_fw **new_map, int new_len) 386 { 387 struct ip_fw **old_map; 388 389 IPFW_WLOCK(chain); 390 chain->id++; 391 chain->n_rules = new_len; 392 old_map = chain->map; 393 chain->map = new_map; 394 swap_skipto_cache(chain); 395 IPFW_WUNLOCK(chain); 396 return old_map; 397 } 398 399 static void 400 export_cntr1_base(struct ip_fw *krule, struct ip_fw_bcounter *cntr) 401 { 402 struct timeval boottime; 403 404 cntr->size = sizeof(*cntr); 405 406 if (krule->cntr != NULL) { 407 cntr->pcnt = counter_u64_fetch(krule->cntr); 408 cntr->bcnt = counter_u64_fetch(krule->cntr + 1); 409 cntr->timestamp = krule->timestamp; 410 } 411 if (cntr->timestamp > 0) { 412 getboottime(&boottime); 413 cntr->timestamp += boottime.tv_sec; 414 } 415 } 416 417 static void 418 export_cntr0_base(struct ip_fw *krule, struct ip_fw_bcounter0 *cntr) 419 { 420 struct timeval boottime; 421 422 if (krule->cntr != NULL) { 423 cntr->pcnt = counter_u64_fetch(krule->cntr); 424 cntr->bcnt = counter_u64_fetch(krule->cntr + 1); 425 cntr->timestamp = krule->timestamp; 426 } 427 if (cntr->timestamp > 0) { 428 getboottime(&boottime); 429 cntr->timestamp += boottime.tv_sec; 430 } 431 } 432 433 /* 434 * Copies rule @urule from v1 userland format (current). 435 * to kernel @krule. 436 * Assume @krule is zeroed. 437 */ 438 static void 439 import_rule1(struct rule_check_info *ci) 440 { 441 struct ip_fw_rule *urule; 442 struct ip_fw *krule; 443 444 urule = (struct ip_fw_rule *)ci->urule; 445 krule = (struct ip_fw *)ci->krule; 446 447 /* copy header */ 448 krule->act_ofs = urule->act_ofs; 449 krule->cmd_len = urule->cmd_len; 450 krule->rulenum = urule->rulenum; 451 krule->set = urule->set; 452 krule->flags = urule->flags; 453 454 /* Save rulenum offset */ 455 ci->urule_numoff = offsetof(struct ip_fw_rule, rulenum); 456 457 /* Copy opcodes */ 458 memcpy(krule->cmd, urule->cmd, krule->cmd_len * sizeof(uint32_t)); 459 } 460 461 /* 462 * Export rule into v1 format (Current). 463 * Layout: 464 * [ ipfw_obj_tlv(IPFW_TLV_RULE_ENT) 465 * [ ip_fw_rule ] OR 466 * [ ip_fw_bcounter ip_fw_rule] (depends on rcntrs). 467 * ] 468 * Assume @data is zeroed. 469 */ 470 static void 471 export_rule1(struct ip_fw *krule, caddr_t data, int len, int rcntrs) 472 { 473 struct ip_fw_bcounter *cntr; 474 struct ip_fw_rule *urule; 475 ipfw_obj_tlv *tlv; 476 477 /* Fill in TLV header */ 478 tlv = (ipfw_obj_tlv *)data; 479 tlv->type = IPFW_TLV_RULE_ENT; 480 tlv->length = len; 481 482 if (rcntrs != 0) { 483 /* Copy counters */ 484 cntr = (struct ip_fw_bcounter *)(tlv + 1); 485 urule = (struct ip_fw_rule *)(cntr + 1); 486 export_cntr1_base(krule, cntr); 487 } else 488 urule = (struct ip_fw_rule *)(tlv + 1); 489 490 /* copy header */ 491 urule->act_ofs = krule->act_ofs; 492 urule->cmd_len = krule->cmd_len; 493 urule->rulenum = krule->rulenum; 494 urule->set = krule->set; 495 urule->flags = krule->flags; 496 urule->id = krule->id; 497 498 /* Copy opcodes */ 499 memcpy(urule->cmd, krule->cmd, krule->cmd_len * sizeof(uint32_t)); 500 } 501 502 /* 503 * Copies rule @urule from FreeBSD8 userland format (v0) 504 * to kernel @krule. 505 * Assume @krule is zeroed. 506 */ 507 static void 508 import_rule0(struct rule_check_info *ci) 509 { 510 struct ip_fw_rule0 *urule; 511 struct ip_fw *krule; 512 int cmdlen, l; 513 ipfw_insn *cmd; 514 ipfw_insn_limit *lcmd; 515 ipfw_insn_if *cmdif; 516 517 urule = (struct ip_fw_rule0 *)ci->urule; 518 krule = (struct ip_fw *)ci->krule; 519 520 /* copy header */ 521 krule->act_ofs = urule->act_ofs; 522 krule->cmd_len = urule->cmd_len; 523 krule->rulenum = urule->rulenum; 524 krule->set = urule->set; 525 if ((urule->_pad & 1) != 0) 526 krule->flags |= IPFW_RULE_NOOPT; 527 528 /* Save rulenum offset */ 529 ci->urule_numoff = offsetof(struct ip_fw_rule0, rulenum); 530 531 /* Copy opcodes */ 532 memcpy(krule->cmd, urule->cmd, krule->cmd_len * sizeof(uint32_t)); 533 534 /* 535 * Alter opcodes: 536 * 1) convert tablearg value from 65535 to 0 537 * 2) Add high bit to O_SETFIB/O_SETDSCP values (to make room 538 * for targ). 539 * 3) convert table number in iface opcodes to u16 540 * 4) convert old `nat global` into new 65535 541 */ 542 l = krule->cmd_len; 543 cmd = krule->cmd; 544 cmdlen = 0; 545 546 for ( ; l > 0 ; l -= cmdlen, cmd += cmdlen) { 547 cmdlen = F_LEN(cmd); 548 549 switch (cmd->opcode) { 550 /* Opcodes supporting tablearg */ 551 case O_TAG: 552 case O_TAGGED: 553 case O_PIPE: 554 case O_QUEUE: 555 case O_DIVERT: 556 case O_TEE: 557 case O_SKIPTO: 558 case O_CALLRETURN: 559 case O_NETGRAPH: 560 case O_NGTEE: 561 case O_NAT: 562 if (cmd->arg1 == IP_FW_TABLEARG) 563 cmd->arg1 = IP_FW_TARG; 564 else if (cmd->arg1 == 0) 565 cmd->arg1 = IP_FW_NAT44_GLOBAL; 566 break; 567 case O_SETFIB: 568 case O_SETDSCP: 569 case O_SETMARK: 570 case O_MARK: 571 if (cmd->arg1 == IP_FW_TABLEARG) 572 cmd->arg1 = IP_FW_TARG; 573 else 574 cmd->arg1 |= 0x8000; 575 break; 576 case O_LIMIT: 577 lcmd = (ipfw_insn_limit *)cmd; 578 if (lcmd->conn_limit == IP_FW_TABLEARG) 579 lcmd->conn_limit = IP_FW_TARG; 580 break; 581 /* Interface tables */ 582 case O_XMIT: 583 case O_RECV: 584 case O_VIA: 585 /* Interface table, possibly */ 586 cmdif = (ipfw_insn_if *)cmd; 587 if (cmdif->name[0] != '\1') 588 break; 589 590 cmdif->p.kidx = (uint16_t)cmdif->p.glob; 591 break; 592 } 593 } 594 } 595 596 /* 597 * Copies rule @krule from kernel to FreeBSD8 userland format (v0) 598 */ 599 static void 600 export_rule0(struct ip_fw *krule, struct ip_fw_rule0 *urule, int len) 601 { 602 int cmdlen, l; 603 ipfw_insn *cmd; 604 ipfw_insn_limit *lcmd; 605 ipfw_insn_if *cmdif; 606 607 /* copy header */ 608 memset(urule, 0, len); 609 urule->act_ofs = krule->act_ofs; 610 urule->cmd_len = krule->cmd_len; 611 urule->rulenum = krule->rulenum; 612 urule->set = krule->set; 613 if ((krule->flags & IPFW_RULE_NOOPT) != 0) 614 urule->_pad |= 1; 615 616 /* Copy opcodes */ 617 memcpy(urule->cmd, krule->cmd, krule->cmd_len * sizeof(uint32_t)); 618 619 /* Export counters */ 620 export_cntr0_base(krule, (struct ip_fw_bcounter0 *)&urule->pcnt); 621 622 /* 623 * Alter opcodes: 624 * 1) convert tablearg value from 0 to 65535 625 * 2) Remove highest bit from O_SETFIB/O_SETDSCP values. 626 * 3) convert table number in iface opcodes to int 627 */ 628 l = urule->cmd_len; 629 cmd = urule->cmd; 630 cmdlen = 0; 631 632 for ( ; l > 0 ; l -= cmdlen, cmd += cmdlen) { 633 cmdlen = F_LEN(cmd); 634 635 switch (cmd->opcode) { 636 /* Opcodes supporting tablearg */ 637 case O_TAG: 638 case O_TAGGED: 639 case O_PIPE: 640 case O_QUEUE: 641 case O_DIVERT: 642 case O_TEE: 643 case O_SKIPTO: 644 case O_CALLRETURN: 645 case O_NETGRAPH: 646 case O_NGTEE: 647 case O_NAT: 648 if (cmd->arg1 == IP_FW_TARG) 649 cmd->arg1 = IP_FW_TABLEARG; 650 else if (cmd->arg1 == IP_FW_NAT44_GLOBAL) 651 cmd->arg1 = 0; 652 break; 653 case O_SETFIB: 654 case O_SETDSCP: 655 case O_SETMARK: 656 case O_MARK: 657 if (cmd->arg1 == IP_FW_TARG) 658 cmd->arg1 = IP_FW_TABLEARG; 659 else 660 cmd->arg1 &= ~0x8000; 661 break; 662 case O_LIMIT: 663 lcmd = (ipfw_insn_limit *)cmd; 664 if (lcmd->conn_limit == IP_FW_TARG) 665 lcmd->conn_limit = IP_FW_TABLEARG; 666 break; 667 /* Interface tables */ 668 case O_XMIT: 669 case O_RECV: 670 case O_VIA: 671 /* Interface table, possibly */ 672 cmdif = (ipfw_insn_if *)cmd; 673 if (cmdif->name[0] != '\1') 674 break; 675 676 cmdif->p.glob = cmdif->p.kidx; 677 break; 678 } 679 } 680 } 681 682 /* 683 * Add new rule(s) to the list possibly creating rule number for each. 684 * Update the rule_number in the input struct so the caller knows it as well. 685 * Must be called without IPFW_UH held 686 */ 687 static int 688 commit_rules(struct ip_fw_chain *chain, struct rule_check_info *rci, int count) 689 { 690 int error, i, insert_before, tcount; 691 uint16_t rulenum, *pnum; 692 struct rule_check_info *ci; 693 struct ip_fw *krule; 694 struct ip_fw **map; /* the new array of pointers */ 695 696 /* Check if we need to do table/obj index remap */ 697 tcount = 0; 698 for (ci = rci, i = 0; i < count; ci++, i++) { 699 if (ci->object_opcodes == 0) 700 continue; 701 702 /* 703 * Rule has some object opcodes. 704 * We need to find (and create non-existing) 705 * kernel objects, and reference existing ones. 706 */ 707 error = rewrite_rule_uidx(chain, ci); 708 if (error != 0) { 709 /* 710 * rewrite failed, state for current rule 711 * has been reverted. Check if we need to 712 * revert more. 713 */ 714 if (tcount > 0) { 715 /* 716 * We have some more table rules 717 * we need to rollback. 718 */ 719 720 IPFW_UH_WLOCK(chain); 721 while (ci != rci) { 722 ci--; 723 if (ci->object_opcodes == 0) 724 continue; 725 unref_rule_objects(chain,ci->krule); 726 } 727 IPFW_UH_WUNLOCK(chain); 728 } 729 730 return (error); 731 } 732 733 tcount++; 734 } 735 736 /* get_map returns with IPFW_UH_WLOCK if successful */ 737 map = get_map(chain, count, 0 /* not locked */); 738 if (map == NULL) { 739 if (tcount > 0) { 740 /* Unbind tables */ 741 IPFW_UH_WLOCK(chain); 742 for (ci = rci, i = 0; i < count; ci++, i++) { 743 if (ci->object_opcodes == 0) 744 continue; 745 746 unref_rule_objects(chain, ci->krule); 747 } 748 IPFW_UH_WUNLOCK(chain); 749 } 750 751 return (ENOSPC); 752 } 753 754 if (V_autoinc_step < 1) 755 V_autoinc_step = 1; 756 else if (V_autoinc_step > 1000) 757 V_autoinc_step = 1000; 758 759 /* FIXME: Handle count > 1 */ 760 ci = rci; 761 krule = ci->krule; 762 rulenum = krule->rulenum; 763 764 /* find the insertion point, we will insert before */ 765 insert_before = rulenum ? rulenum + 1 : IPFW_DEFAULT_RULE; 766 i = ipfw_find_rule(chain, insert_before, 0); 767 /* duplicate first part */ 768 if (i > 0) 769 bcopy(chain->map, map, i * sizeof(struct ip_fw *)); 770 map[i] = krule; 771 /* duplicate remaining part, we always have the default rule */ 772 bcopy(chain->map + i, map + i + 1, 773 sizeof(struct ip_fw *) *(chain->n_rules - i)); 774 if (rulenum == 0) { 775 /* Compute rule number and write it back */ 776 rulenum = i > 0 ? map[i-1]->rulenum : 0; 777 if (rulenum < IPFW_DEFAULT_RULE - V_autoinc_step) 778 rulenum += V_autoinc_step; 779 krule->rulenum = rulenum; 780 /* Save number to userland rule */ 781 pnum = (uint16_t *)((caddr_t)ci->urule + ci->urule_numoff); 782 *pnum = rulenum; 783 } 784 785 krule->id = chain->id + 1; 786 update_skipto_cache(chain, map); 787 map = swap_map(chain, map, chain->n_rules + 1); 788 chain->static_len += RULEUSIZE0(krule); 789 IPFW_UH_WUNLOCK(chain); 790 if (map) 791 free(map, M_IPFW); 792 return (0); 793 } 794 795 int 796 ipfw_add_protected_rule(struct ip_fw_chain *chain, struct ip_fw *rule, 797 int locked) 798 { 799 struct ip_fw **map; 800 801 map = get_map(chain, 1, locked); 802 if (map == NULL) 803 return (ENOMEM); 804 if (chain->n_rules > 0) 805 bcopy(chain->map, map, 806 chain->n_rules * sizeof(struct ip_fw *)); 807 map[chain->n_rules] = rule; 808 rule->rulenum = IPFW_DEFAULT_RULE; 809 rule->set = RESVD_SET; 810 rule->id = chain->id + 1; 811 /* We add rule in the end of chain, no need to update skipto cache */ 812 map = swap_map(chain, map, chain->n_rules + 1); 813 chain->static_len += RULEUSIZE0(rule); 814 IPFW_UH_WUNLOCK(chain); 815 free(map, M_IPFW); 816 return (0); 817 } 818 819 /* 820 * Adds @rule to the list of rules to reap 821 */ 822 void 823 ipfw_reap_add(struct ip_fw_chain *chain, struct ip_fw **head, 824 struct ip_fw *rule) 825 { 826 827 IPFW_UH_WLOCK_ASSERT(chain); 828 829 /* Unlink rule from everywhere */ 830 unref_rule_objects(chain, rule); 831 832 rule->next = *head; 833 *head = rule; 834 } 835 836 /* 837 * Reclaim storage associated with a list of rules. This is 838 * typically the list created using remove_rule. 839 * A NULL pointer on input is handled correctly. 840 */ 841 void 842 ipfw_reap_rules(struct ip_fw *head) 843 { 844 struct ip_fw *rule; 845 846 while ((rule = head) != NULL) { 847 head = head->next; 848 ipfw_free_rule(rule); 849 } 850 } 851 852 /* 853 * Rules to keep are 854 * (default || reserved || !match_set || !match_number) 855 * where 856 * default ::= (rule->rulenum == IPFW_DEFAULT_RULE) 857 * // the default rule is always protected 858 * 859 * reserved ::= (cmd == 0 && n == 0 && rule->set == RESVD_SET) 860 * // RESVD_SET is protected only if cmd == 0 and n == 0 ("ipfw flush") 861 * 862 * match_set ::= (cmd == 0 || rule->set == set) 863 * // set number is ignored for cmd == 0 864 * 865 * match_number ::= (cmd == 1 || n == 0 || n == rule->rulenum) 866 * // number is ignored for cmd == 1 or n == 0 867 * 868 */ 869 int 870 ipfw_match_range(struct ip_fw *rule, ipfw_range_tlv *rt) 871 { 872 873 /* Don't match default rule for modification queries */ 874 if (rule->rulenum == IPFW_DEFAULT_RULE && 875 (rt->flags & IPFW_RCFLAG_DEFAULT) == 0) 876 return (0); 877 878 /* Don't match rules in reserved set for flush requests */ 879 if ((rt->flags & IPFW_RCFLAG_ALL) != 0 && rule->set == RESVD_SET) 880 return (0); 881 882 /* If we're filtering by set, don't match other sets */ 883 if ((rt->flags & IPFW_RCFLAG_SET) != 0 && rule->set != rt->set) 884 return (0); 885 886 if ((rt->flags & IPFW_RCFLAG_RANGE) != 0 && 887 (rule->rulenum < rt->start_rule || rule->rulenum > rt->end_rule)) 888 return (0); 889 890 return (1); 891 } 892 893 struct manage_sets_args { 894 uint16_t set; 895 uint8_t new_set; 896 }; 897 898 static int 899 swap_sets_cb(struct namedobj_instance *ni, struct named_object *no, 900 void *arg) 901 { 902 struct manage_sets_args *args; 903 904 args = (struct manage_sets_args *)arg; 905 if (no->set == (uint8_t)args->set) 906 no->set = args->new_set; 907 else if (no->set == args->new_set) 908 no->set = (uint8_t)args->set; 909 return (0); 910 } 911 912 static int 913 move_sets_cb(struct namedobj_instance *ni, struct named_object *no, 914 void *arg) 915 { 916 struct manage_sets_args *args; 917 918 args = (struct manage_sets_args *)arg; 919 if (no->set == (uint8_t)args->set) 920 no->set = args->new_set; 921 return (0); 922 } 923 924 static int 925 test_sets_cb(struct namedobj_instance *ni, struct named_object *no, 926 void *arg) 927 { 928 struct manage_sets_args *args; 929 930 args = (struct manage_sets_args *)arg; 931 if (no->set != (uint8_t)args->set) 932 return (0); 933 if (ipfw_objhash_lookup_name_type(ni, args->new_set, 934 no->etlv, no->name) != NULL) 935 return (EEXIST); 936 return (0); 937 } 938 939 /* 940 * Generic function to handler moving and swapping sets. 941 */ 942 int 943 ipfw_obj_manage_sets(struct namedobj_instance *ni, uint16_t type, 944 uint16_t set, uint8_t new_set, enum ipfw_sets_cmd cmd) 945 { 946 struct manage_sets_args args; 947 struct named_object *no; 948 949 args.set = set; 950 args.new_set = new_set; 951 switch (cmd) { 952 case SWAP_ALL: 953 return (ipfw_objhash_foreach_type(ni, swap_sets_cb, 954 &args, type)); 955 case TEST_ALL: 956 return (ipfw_objhash_foreach_type(ni, test_sets_cb, 957 &args, type)); 958 case MOVE_ALL: 959 return (ipfw_objhash_foreach_type(ni, move_sets_cb, 960 &args, type)); 961 case COUNT_ONE: 962 /* 963 * @set used to pass kidx. 964 * When @new_set is zero - reset object counter, 965 * otherwise increment it. 966 */ 967 no = ipfw_objhash_lookup_kidx(ni, set); 968 if (new_set != 0) 969 no->ocnt++; 970 else 971 no->ocnt = 0; 972 return (0); 973 case TEST_ONE: 974 /* @set used to pass kidx */ 975 no = ipfw_objhash_lookup_kidx(ni, set); 976 /* 977 * First check number of references: 978 * when it differs, this mean other rules are holding 979 * reference to given object, so it is not possible to 980 * change its set. Note that refcnt may account references 981 * to some going-to-be-added rules. Since we don't know 982 * their numbers (and even if they will be added) it is 983 * perfectly OK to return error here. 984 */ 985 if (no->ocnt != no->refcnt) 986 return (EBUSY); 987 if (ipfw_objhash_lookup_name_type(ni, new_set, type, 988 no->name) != NULL) 989 return (EEXIST); 990 return (0); 991 case MOVE_ONE: 992 /* @set used to pass kidx */ 993 no = ipfw_objhash_lookup_kidx(ni, set); 994 no->set = new_set; 995 return (0); 996 } 997 return (EINVAL); 998 } 999 1000 /* 1001 * Delete rules matching range @rt. 1002 * Saves number of deleted rules in @ndel. 1003 * 1004 * Returns 0 on success. 1005 */ 1006 static int 1007 delete_range(struct ip_fw_chain *chain, ipfw_range_tlv *rt, int *ndel) 1008 { 1009 struct ip_fw *reap, *rule, **map; 1010 int end, start; 1011 int i, n, ndyn, ofs; 1012 1013 reap = NULL; 1014 IPFW_UH_WLOCK(chain); /* arbitrate writers */ 1015 1016 /* 1017 * Stage 1: Determine range to inspect. 1018 * Range is half-inclusive, e.g [start, end). 1019 */ 1020 start = 0; 1021 end = chain->n_rules - 1; 1022 1023 if ((rt->flags & IPFW_RCFLAG_RANGE) != 0) { 1024 start = ipfw_find_rule(chain, rt->start_rule, 0); 1025 1026 if (rt->end_rule >= IPFW_DEFAULT_RULE) 1027 rt->end_rule = IPFW_DEFAULT_RULE - 1; 1028 end = ipfw_find_rule(chain, rt->end_rule, UINT32_MAX); 1029 } 1030 1031 if (rt->flags & IPFW_RCFLAG_DYNAMIC) { 1032 /* 1033 * Requested deleting only for dynamic states. 1034 */ 1035 *ndel = 0; 1036 ipfw_expire_dyn_states(chain, rt); 1037 IPFW_UH_WUNLOCK(chain); 1038 return (0); 1039 } 1040 1041 /* Allocate new map of the same size */ 1042 map = get_map(chain, 0, 1 /* locked */); 1043 if (map == NULL) { 1044 IPFW_UH_WUNLOCK(chain); 1045 return (ENOMEM); 1046 } 1047 1048 n = 0; 1049 ndyn = 0; 1050 ofs = start; 1051 /* 1. bcopy the initial part of the map */ 1052 if (start > 0) 1053 bcopy(chain->map, map, start * sizeof(struct ip_fw *)); 1054 /* 2. copy active rules between start and end */ 1055 for (i = start; i < end; i++) { 1056 rule = chain->map[i]; 1057 if (ipfw_match_range(rule, rt) == 0) { 1058 map[ofs++] = rule; 1059 continue; 1060 } 1061 1062 n++; 1063 if (ipfw_is_dyn_rule(rule) != 0) 1064 ndyn++; 1065 } 1066 /* 3. copy the final part of the map */ 1067 bcopy(chain->map + end, map + ofs, 1068 (chain->n_rules - end) * sizeof(struct ip_fw *)); 1069 /* 4. recalculate skipto cache */ 1070 update_skipto_cache(chain, map); 1071 /* 5. swap the maps (under UH_WLOCK + WHLOCK) */ 1072 map = swap_map(chain, map, chain->n_rules - n); 1073 /* 6. Remove all dynamic states originated by deleted rules */ 1074 if (ndyn > 0) 1075 ipfw_expire_dyn_states(chain, rt); 1076 /* 7. now remove the rules deleted from the old map */ 1077 for (i = start; i < end; i++) { 1078 rule = map[i]; 1079 if (ipfw_match_range(rule, rt) == 0) 1080 continue; 1081 chain->static_len -= RULEUSIZE0(rule); 1082 ipfw_reap_add(chain, &reap, rule); 1083 } 1084 IPFW_UH_WUNLOCK(chain); 1085 1086 ipfw_reap_rules(reap); 1087 if (map != NULL) 1088 free(map, M_IPFW); 1089 *ndel = n; 1090 return (0); 1091 } 1092 1093 static int 1094 move_objects(struct ip_fw_chain *ch, ipfw_range_tlv *rt) 1095 { 1096 struct opcode_obj_rewrite *rw; 1097 struct ip_fw *rule; 1098 ipfw_insn *cmd; 1099 int cmdlen, i, l, c; 1100 uint16_t kidx; 1101 1102 IPFW_UH_WLOCK_ASSERT(ch); 1103 1104 /* Stage 1: count number of references by given rules */ 1105 for (c = 0, i = 0; i < ch->n_rules - 1; i++) { 1106 rule = ch->map[i]; 1107 if (ipfw_match_range(rule, rt) == 0) 1108 continue; 1109 if (rule->set == rt->new_set) /* nothing to do */ 1110 continue; 1111 /* Search opcodes with named objects */ 1112 for (l = rule->cmd_len, cmdlen = 0, cmd = rule->cmd; 1113 l > 0; l -= cmdlen, cmd += cmdlen) { 1114 cmdlen = F_LEN(cmd); 1115 rw = find_op_rw(cmd, &kidx, NULL); 1116 if (rw == NULL || rw->manage_sets == NULL) 1117 continue; 1118 /* 1119 * When manage_sets() returns non-zero value to 1120 * COUNT_ONE command, consider this as an object 1121 * doesn't support sets (e.g. disabled with sysctl). 1122 * So, skip checks for this object. 1123 */ 1124 if (rw->manage_sets(ch, kidx, 1, COUNT_ONE) != 0) 1125 continue; 1126 c++; 1127 } 1128 } 1129 if (c == 0) /* No objects found */ 1130 return (0); 1131 /* Stage 2: verify "ownership" */ 1132 for (c = 0, i = 0; (i < ch->n_rules - 1) && c == 0; i++) { 1133 rule = ch->map[i]; 1134 if (ipfw_match_range(rule, rt) == 0) 1135 continue; 1136 if (rule->set == rt->new_set) /* nothing to do */ 1137 continue; 1138 /* Search opcodes with named objects */ 1139 for (l = rule->cmd_len, cmdlen = 0, cmd = rule->cmd; 1140 l > 0 && c == 0; l -= cmdlen, cmd += cmdlen) { 1141 cmdlen = F_LEN(cmd); 1142 rw = find_op_rw(cmd, &kidx, NULL); 1143 if (rw == NULL || rw->manage_sets == NULL) 1144 continue; 1145 /* Test for ownership and conflicting names */ 1146 c = rw->manage_sets(ch, kidx, 1147 (uint8_t)rt->new_set, TEST_ONE); 1148 } 1149 } 1150 /* Stage 3: change set and cleanup */ 1151 for (i = 0; i < ch->n_rules - 1; i++) { 1152 rule = ch->map[i]; 1153 if (ipfw_match_range(rule, rt) == 0) 1154 continue; 1155 if (rule->set == rt->new_set) /* nothing to do */ 1156 continue; 1157 /* Search opcodes with named objects */ 1158 for (l = rule->cmd_len, cmdlen = 0, cmd = rule->cmd; 1159 l > 0; l -= cmdlen, cmd += cmdlen) { 1160 cmdlen = F_LEN(cmd); 1161 rw = find_op_rw(cmd, &kidx, NULL); 1162 if (rw == NULL || rw->manage_sets == NULL) 1163 continue; 1164 /* cleanup object counter */ 1165 rw->manage_sets(ch, kidx, 1166 0 /* reset counter */, COUNT_ONE); 1167 if (c != 0) 1168 continue; 1169 /* change set */ 1170 rw->manage_sets(ch, kidx, 1171 (uint8_t)rt->new_set, MOVE_ONE); 1172 } 1173 } 1174 return (c); 1175 } 1176 1177 /* 1178 * Changes set of given rule rannge @rt 1179 * with each other. 1180 * 1181 * Returns 0 on success. 1182 */ 1183 static int 1184 move_range(struct ip_fw_chain *chain, ipfw_range_tlv *rt) 1185 { 1186 struct ip_fw *rule; 1187 int i; 1188 1189 IPFW_UH_WLOCK(chain); 1190 1191 /* 1192 * Move rules with matching paramenerts to a new set. 1193 * This one is much more complex. We have to ensure 1194 * that all referenced tables (if any) are referenced 1195 * by given rule subset only. Otherwise, we can't move 1196 * them to new set and have to return error. 1197 */ 1198 if ((i = move_objects(chain, rt)) != 0) { 1199 IPFW_UH_WUNLOCK(chain); 1200 return (i); 1201 } 1202 1203 /* XXX: We have to do swap holding WLOCK */ 1204 for (i = 0; i < chain->n_rules; i++) { 1205 rule = chain->map[i]; 1206 if (ipfw_match_range(rule, rt) == 0) 1207 continue; 1208 rule->set = rt->new_set; 1209 } 1210 1211 IPFW_UH_WUNLOCK(chain); 1212 1213 return (0); 1214 } 1215 1216 /* 1217 * Returns pointer to action instruction, skips all possible rule 1218 * modifiers like O_LOG, O_TAG, O_ALTQ. 1219 */ 1220 ipfw_insn * 1221 ipfw_get_action(struct ip_fw *rule) 1222 { 1223 ipfw_insn *cmd; 1224 int l, cmdlen; 1225 1226 cmd = ACTION_PTR(rule); 1227 l = rule->cmd_len - rule->act_ofs; 1228 while (l > 0) { 1229 switch (cmd->opcode) { 1230 case O_ALTQ: 1231 case O_LOG: 1232 case O_TAG: 1233 break; 1234 default: 1235 return (cmd); 1236 } 1237 cmdlen = F_LEN(cmd); 1238 l -= cmdlen; 1239 cmd += cmdlen; 1240 } 1241 panic("%s: rule (%p) has not action opcode", __func__, rule); 1242 return (NULL); 1243 } 1244 1245 /* 1246 * Clear counters for a specific rule. 1247 * Normally run under IPFW_UH_RLOCK, but these are idempotent ops 1248 * so we only care that rules do not disappear. 1249 */ 1250 static void 1251 clear_counters(struct ip_fw *rule, int log_only) 1252 { 1253 ipfw_insn_log *l = (ipfw_insn_log *)ACTION_PTR(rule); 1254 1255 if (log_only == 0) 1256 IPFW_ZERO_RULE_COUNTER(rule); 1257 if (l->o.opcode == O_LOG) 1258 l->log_left = l->max_log; 1259 } 1260 1261 /* 1262 * Flushes rules counters and/or log values on matching range. 1263 * 1264 * Returns number of items cleared. 1265 */ 1266 static int 1267 clear_range(struct ip_fw_chain *chain, ipfw_range_tlv *rt, int log_only) 1268 { 1269 struct ip_fw *rule; 1270 int num; 1271 int i; 1272 1273 num = 0; 1274 rt->flags |= IPFW_RCFLAG_DEFAULT; 1275 1276 IPFW_UH_WLOCK(chain); /* arbitrate writers */ 1277 for (i = 0; i < chain->n_rules; i++) { 1278 rule = chain->map[i]; 1279 if (ipfw_match_range(rule, rt) == 0) 1280 continue; 1281 clear_counters(rule, log_only); 1282 num++; 1283 } 1284 IPFW_UH_WUNLOCK(chain); 1285 1286 return (num); 1287 } 1288 1289 static int 1290 check_range_tlv(ipfw_range_tlv *rt) 1291 { 1292 1293 if (rt->head.length != sizeof(*rt)) 1294 return (1); 1295 if (rt->start_rule > rt->end_rule) 1296 return (1); 1297 if (rt->set >= IPFW_MAX_SETS || rt->new_set >= IPFW_MAX_SETS) 1298 return (1); 1299 1300 if ((rt->flags & IPFW_RCFLAG_USER) != rt->flags) 1301 return (1); 1302 1303 return (0); 1304 } 1305 1306 /* 1307 * Delete rules matching specified parameters 1308 * Data layout (v0)(current): 1309 * Request: [ ipfw_obj_header ipfw_range_tlv ] 1310 * Reply: [ ipfw_obj_header ipfw_range_tlv ] 1311 * 1312 * Saves number of deleted rules in ipfw_range_tlv->new_set. 1313 * 1314 * Returns 0 on success. 1315 */ 1316 static int 1317 del_rules(struct ip_fw_chain *chain, ip_fw3_opheader *op3, 1318 struct sockopt_data *sd) 1319 { 1320 ipfw_range_header *rh; 1321 int error, ndel; 1322 1323 if (sd->valsize != sizeof(*rh)) 1324 return (EINVAL); 1325 1326 rh = (ipfw_range_header *)ipfw_get_sopt_space(sd, sd->valsize); 1327 1328 if (check_range_tlv(&rh->range) != 0) 1329 return (EINVAL); 1330 1331 ndel = 0; 1332 if ((error = delete_range(chain, &rh->range, &ndel)) != 0) 1333 return (error); 1334 1335 /* Save number of rules deleted */ 1336 rh->range.new_set = ndel; 1337 return (0); 1338 } 1339 1340 /* 1341 * Move rules/sets matching specified parameters 1342 * Data layout (v0)(current): 1343 * Request: [ ipfw_obj_header ipfw_range_tlv ] 1344 * 1345 * Returns 0 on success. 1346 */ 1347 static int 1348 move_rules(struct ip_fw_chain *chain, ip_fw3_opheader *op3, 1349 struct sockopt_data *sd) 1350 { 1351 ipfw_range_header *rh; 1352 1353 if (sd->valsize != sizeof(*rh)) 1354 return (EINVAL); 1355 1356 rh = (ipfw_range_header *)ipfw_get_sopt_space(sd, sd->valsize); 1357 1358 if (check_range_tlv(&rh->range) != 0) 1359 return (EINVAL); 1360 1361 return (move_range(chain, &rh->range)); 1362 } 1363 1364 /* 1365 * Clear rule accounting data matching specified parameters 1366 * Data layout (v0)(current): 1367 * Request: [ ipfw_obj_header ipfw_range_tlv ] 1368 * Reply: [ ipfw_obj_header ipfw_range_tlv ] 1369 * 1370 * Saves number of cleared rules in ipfw_range_tlv->new_set. 1371 * 1372 * Returns 0 on success. 1373 */ 1374 static int 1375 clear_rules(struct ip_fw_chain *chain, ip_fw3_opheader *op3, 1376 struct sockopt_data *sd) 1377 { 1378 ipfw_range_header *rh; 1379 int log_only, num; 1380 char *msg; 1381 1382 if (sd->valsize != sizeof(*rh)) 1383 return (EINVAL); 1384 1385 rh = (ipfw_range_header *)ipfw_get_sopt_space(sd, sd->valsize); 1386 1387 if (check_range_tlv(&rh->range) != 0) 1388 return (EINVAL); 1389 1390 log_only = (op3->opcode == IP_FW_XRESETLOG); 1391 1392 num = clear_range(chain, &rh->range, log_only); 1393 1394 if (rh->range.flags & IPFW_RCFLAG_ALL) 1395 msg = log_only ? "All logging counts reset" : 1396 "Accounting cleared"; 1397 else 1398 msg = log_only ? "logging count reset" : "cleared"; 1399 1400 if (V_fw_verbose) { 1401 int lev = LOG_SECURITY | LOG_NOTICE; 1402 log(lev, "ipfw: %s.\n", msg); 1403 } 1404 1405 /* Save number of rules cleared */ 1406 rh->range.new_set = num; 1407 return (0); 1408 } 1409 1410 static void 1411 enable_sets(struct ip_fw_chain *chain, ipfw_range_tlv *rt) 1412 { 1413 uint32_t v_set; 1414 1415 IPFW_UH_WLOCK_ASSERT(chain); 1416 1417 /* Change enabled/disabled sets mask */ 1418 v_set = (V_set_disable | rt->set) & ~rt->new_set; 1419 v_set &= ~(1 << RESVD_SET); /* set RESVD_SET always enabled */ 1420 IPFW_WLOCK(chain); 1421 V_set_disable = v_set; 1422 IPFW_WUNLOCK(chain); 1423 } 1424 1425 static int 1426 swap_sets(struct ip_fw_chain *chain, ipfw_range_tlv *rt, int mv) 1427 { 1428 struct opcode_obj_rewrite *rw; 1429 struct ip_fw *rule; 1430 int i; 1431 1432 IPFW_UH_WLOCK_ASSERT(chain); 1433 1434 if (rt->set == rt->new_set) /* nothing to do */ 1435 return (0); 1436 1437 if (mv != 0) { 1438 /* 1439 * Berfore moving the rules we need to check that 1440 * there aren't any conflicting named objects. 1441 */ 1442 for (rw = ctl3_rewriters; 1443 rw < ctl3_rewriters + ctl3_rsize; rw++) { 1444 if (rw->manage_sets == NULL) 1445 continue; 1446 i = rw->manage_sets(chain, (uint8_t)rt->set, 1447 (uint8_t)rt->new_set, TEST_ALL); 1448 if (i != 0) 1449 return (EEXIST); 1450 } 1451 } 1452 /* Swap or move two sets */ 1453 for (i = 0; i < chain->n_rules - 1; i++) { 1454 rule = chain->map[i]; 1455 if (rule->set == (uint8_t)rt->set) 1456 rule->set = (uint8_t)rt->new_set; 1457 else if (rule->set == (uint8_t)rt->new_set && mv == 0) 1458 rule->set = (uint8_t)rt->set; 1459 } 1460 for (rw = ctl3_rewriters; rw < ctl3_rewriters + ctl3_rsize; rw++) { 1461 if (rw->manage_sets == NULL) 1462 continue; 1463 rw->manage_sets(chain, (uint8_t)rt->set, 1464 (uint8_t)rt->new_set, mv != 0 ? MOVE_ALL: SWAP_ALL); 1465 } 1466 return (0); 1467 } 1468 1469 /* 1470 * Swaps or moves set 1471 * Data layout (v0)(current): 1472 * Request: [ ipfw_obj_header ipfw_range_tlv ] 1473 * 1474 * Returns 0 on success. 1475 */ 1476 static int 1477 manage_sets(struct ip_fw_chain *chain, ip_fw3_opheader *op3, 1478 struct sockopt_data *sd) 1479 { 1480 ipfw_range_header *rh; 1481 int ret; 1482 1483 if (sd->valsize != sizeof(*rh)) 1484 return (EINVAL); 1485 1486 rh = (ipfw_range_header *)ipfw_get_sopt_space(sd, sd->valsize); 1487 1488 if (rh->range.head.length != sizeof(ipfw_range_tlv)) 1489 return (1); 1490 /* enable_sets() expects bitmasks. */ 1491 if (op3->opcode != IP_FW_SET_ENABLE && 1492 (rh->range.set >= IPFW_MAX_SETS || 1493 rh->range.new_set >= IPFW_MAX_SETS)) 1494 return (EINVAL); 1495 1496 ret = 0; 1497 IPFW_UH_WLOCK(chain); 1498 switch (op3->opcode) { 1499 case IP_FW_SET_SWAP: 1500 case IP_FW_SET_MOVE: 1501 ret = swap_sets(chain, &rh->range, 1502 op3->opcode == IP_FW_SET_MOVE); 1503 break; 1504 case IP_FW_SET_ENABLE: 1505 enable_sets(chain, &rh->range); 1506 break; 1507 } 1508 IPFW_UH_WUNLOCK(chain); 1509 1510 return (ret); 1511 } 1512 1513 /** 1514 * Remove all rules with given number, or do set manipulation. 1515 * Assumes chain != NULL && *chain != NULL. 1516 * 1517 * The argument is an uint32_t. The low 16 bit are the rule or set number; 1518 * the next 8 bits are the new set; the top 8 bits indicate the command: 1519 * 1520 * 0 delete rules numbered "rulenum" 1521 * 1 delete rules in set "rulenum" 1522 * 2 move rules "rulenum" to set "new_set" 1523 * 3 move rules from set "rulenum" to set "new_set" 1524 * 4 swap sets "rulenum" and "new_set" 1525 * 5 delete rules "rulenum" and set "new_set" 1526 */ 1527 static int 1528 del_entry(struct ip_fw_chain *chain, uint32_t arg) 1529 { 1530 uint32_t num; /* rule number or old_set */ 1531 uint8_t cmd, new_set; 1532 int do_del, ndel; 1533 int error = 0; 1534 ipfw_range_tlv rt; 1535 1536 num = arg & 0xffff; 1537 cmd = (arg >> 24) & 0xff; 1538 new_set = (arg >> 16) & 0xff; 1539 1540 if (cmd > 5 || new_set > RESVD_SET) 1541 return EINVAL; 1542 if (cmd == 0 || cmd == 2 || cmd == 5) { 1543 if (num >= IPFW_DEFAULT_RULE) 1544 return EINVAL; 1545 } else { 1546 if (num > RESVD_SET) /* old_set */ 1547 return EINVAL; 1548 } 1549 1550 /* Convert old requests into new representation */ 1551 memset(&rt, 0, sizeof(rt)); 1552 rt.start_rule = num; 1553 rt.end_rule = num; 1554 rt.set = num; 1555 rt.new_set = new_set; 1556 do_del = 0; 1557 1558 switch (cmd) { 1559 case 0: /* delete rules numbered "rulenum" */ 1560 if (num == 0) 1561 rt.flags |= IPFW_RCFLAG_ALL; 1562 else 1563 rt.flags |= IPFW_RCFLAG_RANGE; 1564 do_del = 1; 1565 break; 1566 case 1: /* delete rules in set "rulenum" */ 1567 rt.flags |= IPFW_RCFLAG_SET; 1568 do_del = 1; 1569 break; 1570 case 5: /* delete rules "rulenum" and set "new_set" */ 1571 rt.flags |= IPFW_RCFLAG_RANGE | IPFW_RCFLAG_SET; 1572 rt.set = new_set; 1573 rt.new_set = 0; 1574 do_del = 1; 1575 break; 1576 case 2: /* move rules "rulenum" to set "new_set" */ 1577 rt.flags |= IPFW_RCFLAG_RANGE; 1578 break; 1579 case 3: /* move rules from set "rulenum" to set "new_set" */ 1580 IPFW_UH_WLOCK(chain); 1581 error = swap_sets(chain, &rt, 1); 1582 IPFW_UH_WUNLOCK(chain); 1583 return (error); 1584 case 4: /* swap sets "rulenum" and "new_set" */ 1585 IPFW_UH_WLOCK(chain); 1586 error = swap_sets(chain, &rt, 0); 1587 IPFW_UH_WUNLOCK(chain); 1588 return (error); 1589 default: 1590 return (ENOTSUP); 1591 } 1592 1593 if (do_del != 0) { 1594 if ((error = delete_range(chain, &rt, &ndel)) != 0) 1595 return (error); 1596 1597 if (ndel == 0 && (cmd != 1 && num != 0)) 1598 return (EINVAL); 1599 1600 return (0); 1601 } 1602 1603 return (move_range(chain, &rt)); 1604 } 1605 1606 /** 1607 * Reset some or all counters on firewall rules. 1608 * The argument `arg' is an u_int32_t. The low 16 bit are the rule number, 1609 * the next 8 bits are the set number, the top 8 bits are the command: 1610 * 0 work with rules from all set's; 1611 * 1 work with rules only from specified set. 1612 * Specified rule number is zero if we want to clear all entries. 1613 * log_only is 1 if we only want to reset logs, zero otherwise. 1614 */ 1615 static int 1616 zero_entry(struct ip_fw_chain *chain, u_int32_t arg, int log_only) 1617 { 1618 struct ip_fw *rule; 1619 char *msg; 1620 int i; 1621 1622 uint16_t rulenum = arg & 0xffff; 1623 uint8_t set = (arg >> 16) & 0xff; 1624 uint8_t cmd = (arg >> 24) & 0xff; 1625 1626 if (cmd > 1) 1627 return (EINVAL); 1628 if (cmd == 1 && set > RESVD_SET) 1629 return (EINVAL); 1630 1631 IPFW_UH_RLOCK(chain); 1632 if (rulenum == 0) { 1633 V_norule_counter = 0; 1634 for (i = 0; i < chain->n_rules; i++) { 1635 rule = chain->map[i]; 1636 /* Skip rules not in our set. */ 1637 if (cmd == 1 && rule->set != set) 1638 continue; 1639 clear_counters(rule, log_only); 1640 } 1641 msg = log_only ? "All logging counts reset" : 1642 "Accounting cleared"; 1643 } else { 1644 int cleared = 0; 1645 for (i = 0; i < chain->n_rules; i++) { 1646 rule = chain->map[i]; 1647 if (rule->rulenum == rulenum) { 1648 if (cmd == 0 || rule->set == set) 1649 clear_counters(rule, log_only); 1650 cleared = 1; 1651 } 1652 if (rule->rulenum > rulenum) 1653 break; 1654 } 1655 if (!cleared) { /* we did not find any matching rules */ 1656 IPFW_UH_RUNLOCK(chain); 1657 return (EINVAL); 1658 } 1659 msg = log_only ? "logging count reset" : "cleared"; 1660 } 1661 IPFW_UH_RUNLOCK(chain); 1662 1663 if (V_fw_verbose) { 1664 int lev = LOG_SECURITY | LOG_NOTICE; 1665 1666 if (rulenum) 1667 log(lev, "ipfw: Entry %d %s.\n", rulenum, msg); 1668 else 1669 log(lev, "ipfw: %s.\n", msg); 1670 } 1671 return (0); 1672 } 1673 1674 /* 1675 * Check rule head in FreeBSD11 format 1676 * 1677 */ 1678 static int 1679 check_ipfw_rule1(struct ip_fw_rule *rule, int size, 1680 struct rule_check_info *ci) 1681 { 1682 int l; 1683 1684 if (size < sizeof(*rule)) { 1685 printf("ipfw: rule too short\n"); 1686 return (EINVAL); 1687 } 1688 1689 /* Check for valid cmd_len */ 1690 l = roundup2(RULESIZE(rule), sizeof(uint64_t)); 1691 if (l != size) { 1692 printf("ipfw: size mismatch (have %d want %d)\n", size, l); 1693 return (EINVAL); 1694 } 1695 if (rule->act_ofs >= rule->cmd_len) { 1696 printf("ipfw: bogus action offset (%u > %u)\n", 1697 rule->act_ofs, rule->cmd_len - 1); 1698 return (EINVAL); 1699 } 1700 1701 if (rule->rulenum > IPFW_DEFAULT_RULE - 1) 1702 return (EINVAL); 1703 1704 return (check_ipfw_rule_body(rule->cmd, rule->cmd_len, ci)); 1705 } 1706 1707 /* 1708 * Check rule head in FreeBSD8 format 1709 * 1710 */ 1711 static int 1712 check_ipfw_rule0(struct ip_fw_rule0 *rule, int size, 1713 struct rule_check_info *ci) 1714 { 1715 int l; 1716 1717 if (size < sizeof(*rule)) { 1718 printf("ipfw: rule too short\n"); 1719 return (EINVAL); 1720 } 1721 1722 /* Check for valid cmd_len */ 1723 l = sizeof(*rule) + rule->cmd_len * 4 - 4; 1724 if (l != size) { 1725 printf("ipfw: size mismatch (have %d want %d)\n", size, l); 1726 return (EINVAL); 1727 } 1728 if (rule->act_ofs >= rule->cmd_len) { 1729 printf("ipfw: bogus action offset (%u > %u)\n", 1730 rule->act_ofs, rule->cmd_len - 1); 1731 return (EINVAL); 1732 } 1733 1734 if (rule->rulenum > IPFW_DEFAULT_RULE - 1) 1735 return (EINVAL); 1736 1737 return (check_ipfw_rule_body(rule->cmd, rule->cmd_len, ci)); 1738 } 1739 1740 static int 1741 check_ipfw_rule_body(ipfw_insn *cmd, int cmd_len, struct rule_check_info *ci) 1742 { 1743 int cmdlen, l; 1744 int have_action; 1745 1746 have_action = 0; 1747 1748 /* 1749 * Now go for the individual checks. Very simple ones, basically only 1750 * instruction sizes. 1751 */ 1752 for (l = cmd_len; l > 0 ; l -= cmdlen, cmd += cmdlen) { 1753 cmdlen = F_LEN(cmd); 1754 if (cmdlen > l) { 1755 printf("ipfw: opcode %d size truncated\n", 1756 cmd->opcode); 1757 return EINVAL; 1758 } 1759 switch (cmd->opcode) { 1760 case O_PROBE_STATE: 1761 case O_KEEP_STATE: 1762 if (cmdlen != F_INSN_SIZE(ipfw_insn)) 1763 goto bad_size; 1764 ci->object_opcodes++; 1765 break; 1766 case O_PROTO: 1767 case O_IP_SRC_ME: 1768 case O_IP_DST_ME: 1769 case O_LAYER2: 1770 case O_IN: 1771 case O_FRAG: 1772 case O_DIVERTED: 1773 case O_IPOPT: 1774 case O_IPTOS: 1775 case O_IPPRECEDENCE: 1776 case O_IPVER: 1777 case O_SOCKARG: 1778 case O_TCPFLAGS: 1779 case O_TCPOPTS: 1780 case O_ESTAB: 1781 case O_VERREVPATH: 1782 case O_VERSRCREACH: 1783 case O_ANTISPOOF: 1784 case O_IPSEC: 1785 #ifdef INET6 1786 case O_IP6_SRC_ME: 1787 case O_IP6_DST_ME: 1788 case O_EXT_HDR: 1789 case O_IP6: 1790 #endif 1791 case O_IP4: 1792 case O_TAG: 1793 case O_SKIP_ACTION: 1794 if (cmdlen != F_INSN_SIZE(ipfw_insn)) 1795 goto bad_size; 1796 break; 1797 1798 case O_EXTERNAL_ACTION: 1799 if (cmd->arg1 == 0 || 1800 cmdlen != F_INSN_SIZE(ipfw_insn)) { 1801 printf("ipfw: invalid external " 1802 "action opcode\n"); 1803 return (EINVAL); 1804 } 1805 ci->object_opcodes++; 1806 /* 1807 * Do we have O_EXTERNAL_INSTANCE or O_EXTERNAL_DATA 1808 * opcode? 1809 */ 1810 if (l != cmdlen) { 1811 l -= cmdlen; 1812 cmd += cmdlen; 1813 cmdlen = F_LEN(cmd); 1814 if (cmd->opcode == O_EXTERNAL_DATA) 1815 goto check_action; 1816 if (cmd->opcode != O_EXTERNAL_INSTANCE) { 1817 printf("ipfw: invalid opcode " 1818 "next to external action %u\n", 1819 cmd->opcode); 1820 return (EINVAL); 1821 } 1822 if (cmd->arg1 == 0 || 1823 cmdlen != F_INSN_SIZE(ipfw_insn)) { 1824 printf("ipfw: invalid external " 1825 "action instance opcode\n"); 1826 return (EINVAL); 1827 } 1828 ci->object_opcodes++; 1829 } 1830 goto check_action; 1831 1832 case O_FIB: 1833 if (cmdlen != F_INSN_SIZE(ipfw_insn)) 1834 goto bad_size; 1835 if (cmd->arg1 >= rt_numfibs) { 1836 printf("ipfw: invalid fib number %d\n", 1837 cmd->arg1); 1838 return EINVAL; 1839 } 1840 break; 1841 1842 case O_SETFIB: 1843 if (cmdlen != F_INSN_SIZE(ipfw_insn)) 1844 goto bad_size; 1845 if ((cmd->arg1 != IP_FW_TARG) && 1846 ((cmd->arg1 & 0x7FFF) >= rt_numfibs)) { 1847 printf("ipfw: invalid fib number %d\n", 1848 cmd->arg1 & 0x7FFF); 1849 return EINVAL; 1850 } 1851 goto check_action; 1852 1853 case O_UID: 1854 case O_GID: 1855 case O_JAIL: 1856 case O_IP_SRC: 1857 case O_IP_DST: 1858 case O_TCPSEQ: 1859 case O_TCPACK: 1860 case O_PROB: 1861 case O_ICMPTYPE: 1862 if (cmdlen != F_INSN_SIZE(ipfw_insn_u32)) 1863 goto bad_size; 1864 break; 1865 1866 case O_LIMIT: 1867 if (cmdlen != F_INSN_SIZE(ipfw_insn_limit)) 1868 goto bad_size; 1869 ci->object_opcodes++; 1870 break; 1871 1872 case O_LOG: 1873 if (cmdlen != F_INSN_SIZE(ipfw_insn_log)) 1874 goto bad_size; 1875 1876 ((ipfw_insn_log *)cmd)->log_left = 1877 ((ipfw_insn_log *)cmd)->max_log; 1878 1879 break; 1880 1881 case O_IP_SRC_MASK: 1882 case O_IP_DST_MASK: 1883 /* only odd command lengths */ 1884 if ((cmdlen & 1) == 0) 1885 goto bad_size; 1886 break; 1887 1888 case O_IP_SRC_SET: 1889 case O_IP_DST_SET: 1890 if (cmd->arg1 == 0 || cmd->arg1 > 256) { 1891 printf("ipfw: invalid set size %d\n", 1892 cmd->arg1); 1893 return EINVAL; 1894 } 1895 if (cmdlen != F_INSN_SIZE(ipfw_insn_u32) + 1896 (cmd->arg1+31)/32 ) 1897 goto bad_size; 1898 break; 1899 1900 case O_IP_SRC_LOOKUP: 1901 if (cmdlen > F_INSN_SIZE(ipfw_insn_u32)) 1902 goto bad_size; 1903 case O_IP_DST_LOOKUP: 1904 if (cmd->arg1 >= V_fw_tables_max) { 1905 printf("ipfw: invalid table number %d\n", 1906 cmd->arg1); 1907 return (EINVAL); 1908 } 1909 if (cmdlen != F_INSN_SIZE(ipfw_insn) && 1910 cmdlen != F_INSN_SIZE(ipfw_insn_u32) + 1 && 1911 cmdlen != F_INSN_SIZE(ipfw_insn_u32)) 1912 goto bad_size; 1913 ci->object_opcodes++; 1914 break; 1915 case O_IP_FLOW_LOOKUP: 1916 case O_MAC_DST_LOOKUP: 1917 case O_MAC_SRC_LOOKUP: 1918 if (cmd->arg1 >= V_fw_tables_max) { 1919 printf("ipfw: invalid table number %d\n", 1920 cmd->arg1); 1921 return (EINVAL); 1922 } 1923 if (cmdlen != F_INSN_SIZE(ipfw_insn) && 1924 cmdlen != F_INSN_SIZE(ipfw_insn_u32)) 1925 goto bad_size; 1926 ci->object_opcodes++; 1927 break; 1928 case O_MACADDR2: 1929 if (cmdlen != F_INSN_SIZE(ipfw_insn_mac)) 1930 goto bad_size; 1931 break; 1932 1933 case O_NOP: 1934 case O_IPID: 1935 case O_IPTTL: 1936 case O_IPLEN: 1937 case O_TCPDATALEN: 1938 case O_TCPMSS: 1939 case O_TCPWIN: 1940 case O_TAGGED: 1941 if (cmdlen < 1 || cmdlen > 31) 1942 goto bad_size; 1943 break; 1944 1945 case O_DSCP: 1946 case O_MARK: 1947 if (cmdlen != F_INSN_SIZE(ipfw_insn_u32) + 1) 1948 goto bad_size; 1949 break; 1950 1951 case O_MAC_TYPE: 1952 case O_IP_SRCPORT: 1953 case O_IP_DSTPORT: /* XXX artificial limit, 30 port pairs */ 1954 if (cmdlen < 2 || cmdlen > 31) 1955 goto bad_size; 1956 break; 1957 1958 case O_RECV: 1959 case O_XMIT: 1960 case O_VIA: 1961 if (cmdlen != F_INSN_SIZE(ipfw_insn_if)) 1962 goto bad_size; 1963 ci->object_opcodes++; 1964 break; 1965 1966 case O_ALTQ: 1967 if (cmdlen != F_INSN_SIZE(ipfw_insn_altq)) 1968 goto bad_size; 1969 break; 1970 1971 case O_PIPE: 1972 case O_QUEUE: 1973 if (cmdlen != F_INSN_SIZE(ipfw_insn)) 1974 goto bad_size; 1975 goto check_action; 1976 1977 case O_FORWARD_IP: 1978 if (cmdlen != F_INSN_SIZE(ipfw_insn_sa)) 1979 goto bad_size; 1980 goto check_action; 1981 #ifdef INET6 1982 case O_FORWARD_IP6: 1983 if (cmdlen != F_INSN_SIZE(ipfw_insn_sa6)) 1984 goto bad_size; 1985 goto check_action; 1986 #endif /* INET6 */ 1987 1988 case O_DIVERT: 1989 case O_TEE: 1990 if (ip_divert_ptr == NULL) 1991 return EINVAL; 1992 else 1993 goto check_size; 1994 case O_NETGRAPH: 1995 case O_NGTEE: 1996 if (ng_ipfw_input_p == NULL) 1997 return EINVAL; 1998 else 1999 goto check_size; 2000 case O_NAT: 2001 if (!IPFW_NAT_LOADED) 2002 return EINVAL; 2003 if (cmdlen != F_INSN_SIZE(ipfw_insn_nat)) 2004 goto bad_size; 2005 goto check_action; 2006 case O_CHECK_STATE: 2007 ci->object_opcodes++; 2008 goto check_size; 2009 case O_SETMARK: 2010 if (cmdlen != F_INSN_SIZE(ipfw_insn_u32)) 2011 goto bad_size; 2012 goto check_action; 2013 case O_REJECT: 2014 /* "unreach needfrag" has variable len. */ 2015 if ((cmdlen == F_INSN_SIZE(ipfw_insn) || 2016 cmdlen == F_INSN_SIZE(ipfw_insn_u16))) 2017 goto check_action; 2018 /* FALLTHROUGH */ 2019 case O_FORWARD_MAC: /* XXX not implemented yet */ 2020 case O_COUNT: 2021 case O_ACCEPT: 2022 case O_DENY: 2023 case O_SETDSCP: 2024 #ifdef INET6 2025 case O_UNREACH6: 2026 #endif 2027 case O_SKIPTO: 2028 case O_REASS: 2029 case O_CALLRETURN: 2030 check_size: 2031 if (cmdlen != F_INSN_SIZE(ipfw_insn)) 2032 goto bad_size; 2033 check_action: 2034 if (have_action) { 2035 printf("ipfw: opcode %d, multiple actions" 2036 " not allowed\n", 2037 cmd->opcode); 2038 return (EINVAL); 2039 } 2040 have_action = 1; 2041 if (l != cmdlen) { 2042 printf("ipfw: opcode %d, action must be" 2043 " last opcode\n", 2044 cmd->opcode); 2045 return (EINVAL); 2046 } 2047 break; 2048 #ifdef INET6 2049 case O_IP6_SRC: 2050 case O_IP6_DST: 2051 if (cmdlen != F_INSN_SIZE(struct in6_addr) + 2052 F_INSN_SIZE(ipfw_insn)) 2053 goto bad_size; 2054 break; 2055 2056 case O_FLOW6ID: 2057 if (cmdlen != F_INSN_SIZE(ipfw_insn_u32) + 2058 ((ipfw_insn_u32 *)cmd)->o.arg1) 2059 goto bad_size; 2060 break; 2061 2062 case O_IP6_SRC_MASK: 2063 case O_IP6_DST_MASK: 2064 if ( !(cmdlen & 1) || cmdlen > 127) 2065 goto bad_size; 2066 break; 2067 case O_ICMP6TYPE: 2068 if( cmdlen != F_INSN_SIZE( ipfw_insn_icmp6 ) ) 2069 goto bad_size; 2070 break; 2071 #endif 2072 2073 default: 2074 switch (cmd->opcode) { 2075 #ifndef INET6 2076 case O_IP6_SRC_ME: 2077 case O_IP6_DST_ME: 2078 case O_EXT_HDR: 2079 case O_IP6: 2080 case O_UNREACH6: 2081 case O_IP6_SRC: 2082 case O_IP6_DST: 2083 case O_FLOW6ID: 2084 case O_IP6_SRC_MASK: 2085 case O_IP6_DST_MASK: 2086 case O_ICMP6TYPE: 2087 printf("ipfw: no IPv6 support in kernel\n"); 2088 return (EPROTONOSUPPORT); 2089 #endif 2090 default: 2091 printf("ipfw: opcode %d, unknown opcode\n", 2092 cmd->opcode); 2093 return (EINVAL); 2094 } 2095 } 2096 } 2097 if (have_action == 0) { 2098 printf("ipfw: missing action\n"); 2099 return (EINVAL); 2100 } 2101 return 0; 2102 2103 bad_size: 2104 printf("ipfw: opcode %d size %d wrong\n", 2105 cmd->opcode, cmdlen); 2106 return (EINVAL); 2107 } 2108 2109 /* 2110 * Translation of requests for compatibility with FreeBSD 7.2/8. 2111 * a static variable tells us if we have an old client from userland, 2112 * and if necessary we translate requests and responses between the 2113 * two formats. 2114 */ 2115 static int is7 = 0; 2116 2117 struct ip_fw7 { 2118 struct ip_fw7 *next; /* linked list of rules */ 2119 struct ip_fw7 *next_rule; /* ptr to next [skipto] rule */ 2120 /* 'next_rule' is used to pass up 'set_disable' status */ 2121 2122 uint16_t act_ofs; /* offset of action in 32-bit units */ 2123 uint16_t cmd_len; /* # of 32-bit words in cmd */ 2124 uint16_t rulenum; /* rule number */ 2125 uint8_t set; /* rule set (0..31) */ 2126 // #define RESVD_SET 31 /* set for default and persistent rules */ 2127 uint8_t _pad; /* padding */ 2128 // uint32_t id; /* rule id, only in v.8 */ 2129 /* These fields are present in all rules. */ 2130 uint64_t pcnt; /* Packet counter */ 2131 uint64_t bcnt; /* Byte counter */ 2132 uint32_t timestamp; /* tv_sec of last match */ 2133 2134 ipfw_insn cmd[1]; /* storage for commands */ 2135 }; 2136 2137 static int convert_rule_to_7(struct ip_fw_rule0 *rule); 2138 static int convert_rule_to_8(struct ip_fw_rule0 *rule); 2139 2140 #ifndef RULESIZE7 2141 #define RULESIZE7(rule) (sizeof(struct ip_fw7) + \ 2142 ((struct ip_fw7 *)(rule))->cmd_len * 4 - 4) 2143 #endif 2144 2145 /* 2146 * Copy the static and dynamic rules to the supplied buffer 2147 * and return the amount of space actually used. 2148 * Must be run under IPFW_UH_RLOCK 2149 */ 2150 static size_t 2151 ipfw_getrules(struct ip_fw_chain *chain, void *buf, size_t space) 2152 { 2153 char *bp = buf; 2154 char *ep = bp + space; 2155 struct ip_fw *rule; 2156 struct ip_fw_rule0 *dst; 2157 struct timeval boottime; 2158 int error, i, l, warnflag; 2159 time_t boot_seconds; 2160 2161 warnflag = 0; 2162 2163 getboottime(&boottime); 2164 boot_seconds = boottime.tv_sec; 2165 for (i = 0; i < chain->n_rules; i++) { 2166 rule = chain->map[i]; 2167 2168 if (is7) { 2169 /* Convert rule to FreeBSd 7.2 format */ 2170 l = RULESIZE7(rule); 2171 if (bp + l + sizeof(uint32_t) <= ep) { 2172 bcopy(rule, bp, l + sizeof(uint32_t)); 2173 error = set_legacy_obj_kidx(chain, 2174 (struct ip_fw_rule0 *)bp); 2175 if (error != 0) 2176 return (0); 2177 error = convert_rule_to_7((struct ip_fw_rule0 *) bp); 2178 if (error) 2179 return 0; /*XXX correct? */ 2180 /* 2181 * XXX HACK. Store the disable mask in the "next" 2182 * pointer in a wild attempt to keep the ABI the same. 2183 * Why do we do this on EVERY rule? 2184 */ 2185 bcopy(&V_set_disable, 2186 &(((struct ip_fw7 *)bp)->next_rule), 2187 sizeof(V_set_disable)); 2188 if (((struct ip_fw7 *)bp)->timestamp) 2189 ((struct ip_fw7 *)bp)->timestamp += boot_seconds; 2190 bp += l; 2191 } 2192 continue; /* go to next rule */ 2193 } 2194 2195 l = RULEUSIZE0(rule); 2196 if (bp + l > ep) { /* should not happen */ 2197 printf("overflow dumping static rules\n"); 2198 break; 2199 } 2200 dst = (struct ip_fw_rule0 *)bp; 2201 export_rule0(rule, dst, l); 2202 error = set_legacy_obj_kidx(chain, dst); 2203 2204 /* 2205 * XXX HACK. Store the disable mask in the "next" 2206 * pointer in a wild attempt to keep the ABI the same. 2207 * Why do we do this on EVERY rule? 2208 * 2209 * XXX: "ipfw set show" (ab)uses IP_FW_GET to read disabled mask 2210 * so we need to fail _after_ saving at least one mask. 2211 */ 2212 bcopy(&V_set_disable, &dst->next_rule, sizeof(V_set_disable)); 2213 if (dst->timestamp) 2214 dst->timestamp += boot_seconds; 2215 bp += l; 2216 2217 if (error != 0) { 2218 if (error == 2) { 2219 /* Non-fatal table rewrite error. */ 2220 warnflag = 1; 2221 continue; 2222 } 2223 printf("Stop on rule %d. Fail to convert table\n", 2224 rule->rulenum); 2225 break; 2226 } 2227 } 2228 if (warnflag != 0) 2229 printf("ipfw: process %s is using legacy interfaces," 2230 " consider rebuilding\n", ""); 2231 ipfw_get_dynamic(chain, &bp, ep); /* protected by the dynamic lock */ 2232 return (bp - (char *)buf); 2233 } 2234 2235 struct dump_args { 2236 uint32_t b; /* start rule */ 2237 uint32_t e; /* end rule */ 2238 uint32_t rcount; /* number of rules */ 2239 uint32_t rsize; /* rules size */ 2240 uint32_t tcount; /* number of tables */ 2241 int rcounters; /* counters */ 2242 uint32_t *bmask; /* index bitmask of used named objects */ 2243 }; 2244 2245 void 2246 ipfw_export_obj_ntlv(struct named_object *no, ipfw_obj_ntlv *ntlv) 2247 { 2248 2249 ntlv->head.type = no->etlv; 2250 ntlv->head.length = sizeof(*ntlv); 2251 ntlv->idx = no->kidx; 2252 strlcpy(ntlv->name, no->name, sizeof(ntlv->name)); 2253 } 2254 2255 /* 2256 * Export named object info in instance @ni, identified by @kidx 2257 * to ipfw_obj_ntlv. TLV is allocated from @sd space. 2258 * 2259 * Returns 0 on success. 2260 */ 2261 static int 2262 export_objhash_ntlv(struct namedobj_instance *ni, uint16_t kidx, 2263 struct sockopt_data *sd) 2264 { 2265 struct named_object *no; 2266 ipfw_obj_ntlv *ntlv; 2267 2268 no = ipfw_objhash_lookup_kidx(ni, kidx); 2269 KASSERT(no != NULL, ("invalid object kernel index passed")); 2270 2271 ntlv = (ipfw_obj_ntlv *)ipfw_get_sopt_space(sd, sizeof(*ntlv)); 2272 if (ntlv == NULL) 2273 return (ENOMEM); 2274 2275 ipfw_export_obj_ntlv(no, ntlv); 2276 return (0); 2277 } 2278 2279 static int 2280 export_named_objects(struct namedobj_instance *ni, struct dump_args *da, 2281 struct sockopt_data *sd) 2282 { 2283 int error, i; 2284 2285 for (i = 0; i < IPFW_TABLES_MAX && da->tcount > 0; i++) { 2286 if ((da->bmask[i / 32] & (1 << (i % 32))) == 0) 2287 continue; 2288 if ((error = export_objhash_ntlv(ni, i, sd)) != 0) 2289 return (error); 2290 da->tcount--; 2291 } 2292 return (0); 2293 } 2294 2295 static int 2296 dump_named_objects(struct ip_fw_chain *ch, struct dump_args *da, 2297 struct sockopt_data *sd) 2298 { 2299 ipfw_obj_ctlv *ctlv; 2300 int error; 2301 2302 MPASS(da->tcount > 0); 2303 /* Header first */ 2304 ctlv = (ipfw_obj_ctlv *)ipfw_get_sopt_space(sd, sizeof(*ctlv)); 2305 if (ctlv == NULL) 2306 return (ENOMEM); 2307 ctlv->head.type = IPFW_TLV_TBLNAME_LIST; 2308 ctlv->head.length = da->tcount * sizeof(ipfw_obj_ntlv) + 2309 sizeof(*ctlv); 2310 ctlv->count = da->tcount; 2311 ctlv->objsize = sizeof(ipfw_obj_ntlv); 2312 2313 /* Dump table names first (if any) */ 2314 error = export_named_objects(ipfw_get_table_objhash(ch), da, sd); 2315 if (error != 0) 2316 return (error); 2317 /* Then dump another named objects */ 2318 da->bmask += IPFW_TABLES_MAX / 32; 2319 return (export_named_objects(CHAIN_TO_SRV(ch), da, sd)); 2320 } 2321 2322 /* 2323 * Dumps static rules with table TLVs in buffer @sd. 2324 * 2325 * Returns 0 on success. 2326 */ 2327 static int 2328 dump_static_rules(struct ip_fw_chain *chain, struct dump_args *da, 2329 struct sockopt_data *sd) 2330 { 2331 ipfw_obj_ctlv *ctlv; 2332 struct ip_fw *krule; 2333 caddr_t dst; 2334 int i, l; 2335 2336 /* Dump rules */ 2337 ctlv = (ipfw_obj_ctlv *)ipfw_get_sopt_space(sd, sizeof(*ctlv)); 2338 if (ctlv == NULL) 2339 return (ENOMEM); 2340 ctlv->head.type = IPFW_TLV_RULE_LIST; 2341 ctlv->head.length = da->rsize + sizeof(*ctlv); 2342 ctlv->count = da->rcount; 2343 2344 for (i = da->b; i < da->e; i++) { 2345 krule = chain->map[i]; 2346 2347 l = RULEUSIZE1(krule) + sizeof(ipfw_obj_tlv); 2348 if (da->rcounters != 0) 2349 l += sizeof(struct ip_fw_bcounter); 2350 dst = (caddr_t)ipfw_get_sopt_space(sd, l); 2351 if (dst == NULL) 2352 return (ENOMEM); 2353 2354 export_rule1(krule, dst, l, da->rcounters); 2355 } 2356 2357 return (0); 2358 } 2359 2360 int 2361 ipfw_mark_object_kidx(uint32_t *bmask, uint16_t etlv, uint16_t kidx) 2362 { 2363 uint32_t bidx; 2364 2365 /* 2366 * Maintain separate bitmasks for table and non-table objects. 2367 */ 2368 bidx = (etlv == IPFW_TLV_TBL_NAME) ? 0: IPFW_TABLES_MAX / 32; 2369 bidx += kidx / 32; 2370 if ((bmask[bidx] & (1 << (kidx % 32))) != 0) 2371 return (0); 2372 2373 bmask[bidx] |= 1 << (kidx % 32); 2374 return (1); 2375 } 2376 2377 /* 2378 * Marks every object index used in @rule with bit in @bmask. 2379 * Used to generate bitmask of referenced tables/objects for given ruleset 2380 * or its part. 2381 */ 2382 static void 2383 mark_rule_objects(struct ip_fw_chain *ch, struct ip_fw *rule, 2384 struct dump_args *da) 2385 { 2386 struct opcode_obj_rewrite *rw; 2387 ipfw_insn *cmd; 2388 int cmdlen, l; 2389 uint16_t kidx; 2390 uint8_t subtype; 2391 2392 l = rule->cmd_len; 2393 cmd = rule->cmd; 2394 cmdlen = 0; 2395 for ( ; l > 0 ; l -= cmdlen, cmd += cmdlen) { 2396 cmdlen = F_LEN(cmd); 2397 2398 rw = find_op_rw(cmd, &kidx, &subtype); 2399 if (rw == NULL) 2400 continue; 2401 2402 if (ipfw_mark_object_kidx(da->bmask, rw->etlv, kidx)) 2403 da->tcount++; 2404 } 2405 } 2406 2407 /* 2408 * Dumps requested objects data 2409 * Data layout (version 0)(current): 2410 * Request: [ ipfw_cfg_lheader ] + IPFW_CFG_GET_* flags 2411 * size = ipfw_cfg_lheader.size 2412 * Reply: [ ipfw_cfg_lheader 2413 * [ ipfw_obj_ctlv(IPFW_TLV_TBL_LIST) ipfw_obj_ntlv x N ] (optional) 2414 * [ ipfw_obj_ctlv(IPFW_TLV_RULE_LIST) 2415 * ipfw_obj_tlv(IPFW_TLV_RULE_ENT) [ ip_fw_bcounter (optional) ip_fw_rule ] 2416 * ] (optional) 2417 * [ ipfw_obj_ctlv(IPFW_TLV_STATE_LIST) ipfw_obj_dyntlv x N ] (optional) 2418 * ] 2419 * * NOTE IPFW_TLV_STATE_LIST has the single valid field: objsize. 2420 * The rest (size, count) are set to zero and needs to be ignored. 2421 * 2422 * Returns 0 on success. 2423 */ 2424 static int 2425 dump_config(struct ip_fw_chain *chain, ip_fw3_opheader *op3, 2426 struct sockopt_data *sd) 2427 { 2428 struct dump_args da; 2429 ipfw_cfg_lheader *hdr; 2430 struct ip_fw *rule; 2431 size_t sz, rnum; 2432 uint32_t hdr_flags, *bmask; 2433 int error, i; 2434 2435 hdr = (ipfw_cfg_lheader *)ipfw_get_sopt_header(sd, sizeof(*hdr)); 2436 if (hdr == NULL) 2437 return (EINVAL); 2438 2439 error = 0; 2440 bmask = NULL; 2441 memset(&da, 0, sizeof(da)); 2442 /* 2443 * Allocate needed state. 2444 * Note we allocate 2xspace mask, for table & srv 2445 */ 2446 if (hdr->flags & (IPFW_CFG_GET_STATIC | IPFW_CFG_GET_STATES)) 2447 da.bmask = bmask = malloc( 2448 sizeof(uint32_t) * IPFW_TABLES_MAX * 2 / 32, M_TEMP, 2449 M_WAITOK | M_ZERO); 2450 IPFW_UH_RLOCK(chain); 2451 2452 /* 2453 * STAGE 1: Determine size/count for objects in range. 2454 * Prepare used tables bitmask. 2455 */ 2456 sz = sizeof(ipfw_cfg_lheader); 2457 da.e = chain->n_rules; 2458 2459 if (hdr->end_rule != 0) { 2460 /* Handle custom range */ 2461 if ((rnum = hdr->start_rule) > IPFW_DEFAULT_RULE) 2462 rnum = IPFW_DEFAULT_RULE; 2463 da.b = ipfw_find_rule(chain, rnum, 0); 2464 rnum = (hdr->end_rule < IPFW_DEFAULT_RULE) ? 2465 hdr->end_rule + 1: IPFW_DEFAULT_RULE; 2466 da.e = ipfw_find_rule(chain, rnum, UINT32_MAX) + 1; 2467 } 2468 2469 if (hdr->flags & IPFW_CFG_GET_STATIC) { 2470 for (i = da.b; i < da.e; i++) { 2471 rule = chain->map[i]; 2472 da.rsize += RULEUSIZE1(rule) + sizeof(ipfw_obj_tlv); 2473 da.rcount++; 2474 /* Update bitmask of used objects for given range */ 2475 mark_rule_objects(chain, rule, &da); 2476 } 2477 /* Add counters if requested */ 2478 if (hdr->flags & IPFW_CFG_GET_COUNTERS) { 2479 da.rsize += sizeof(struct ip_fw_bcounter) * da.rcount; 2480 da.rcounters = 1; 2481 } 2482 sz += da.rsize + sizeof(ipfw_obj_ctlv); 2483 } 2484 2485 if (hdr->flags & IPFW_CFG_GET_STATES) { 2486 sz += sizeof(ipfw_obj_ctlv) + 2487 ipfw_dyn_get_count(bmask, &i) * sizeof(ipfw_obj_dyntlv); 2488 da.tcount += i; 2489 } 2490 2491 if (da.tcount > 0) 2492 sz += da.tcount * sizeof(ipfw_obj_ntlv) + 2493 sizeof(ipfw_obj_ctlv); 2494 2495 /* 2496 * Fill header anyway. 2497 * Note we have to save header fields to stable storage 2498 * buffer inside @sd can be flushed after dumping rules 2499 */ 2500 hdr->size = sz; 2501 hdr->set_mask = ~V_set_disable; 2502 hdr_flags = hdr->flags; 2503 hdr = NULL; 2504 2505 if (sd->valsize < sz) { 2506 error = ENOMEM; 2507 goto cleanup; 2508 } 2509 2510 /* STAGE2: Store actual data */ 2511 if (da.tcount > 0) { 2512 error = dump_named_objects(chain, &da, sd); 2513 if (error != 0) 2514 goto cleanup; 2515 } 2516 2517 if (hdr_flags & IPFW_CFG_GET_STATIC) { 2518 error = dump_static_rules(chain, &da, sd); 2519 if (error != 0) 2520 goto cleanup; 2521 } 2522 2523 if (hdr_flags & IPFW_CFG_GET_STATES) 2524 error = ipfw_dump_states(chain, sd); 2525 2526 cleanup: 2527 IPFW_UH_RUNLOCK(chain); 2528 2529 if (bmask != NULL) 2530 free(bmask, M_TEMP); 2531 2532 return (error); 2533 } 2534 2535 int 2536 ipfw_check_object_name_generic(const char *name) 2537 { 2538 int nsize; 2539 2540 nsize = sizeof(((ipfw_obj_ntlv *)0)->name); 2541 if (strnlen(name, nsize) == nsize) 2542 return (EINVAL); 2543 if (name[0] == '\0') 2544 return (EINVAL); 2545 return (0); 2546 } 2547 2548 /* 2549 * Creates non-existent objects referenced by rule. 2550 * 2551 * Return 0 on success. 2552 */ 2553 int 2554 create_objects_compat(struct ip_fw_chain *ch, ipfw_insn *cmd, 2555 struct obj_idx *oib, struct obj_idx *pidx, struct tid_info *ti) 2556 { 2557 struct opcode_obj_rewrite *rw; 2558 struct obj_idx *p; 2559 uint16_t kidx; 2560 int error; 2561 2562 /* 2563 * Compatibility stuff: do actual creation for non-existing, 2564 * but referenced objects. 2565 */ 2566 for (p = oib; p < pidx; p++) { 2567 if (p->kidx != 0) 2568 continue; 2569 2570 ti->uidx = p->uidx; 2571 ti->type = p->type; 2572 ti->atype = 0; 2573 2574 rw = find_op_rw(cmd + p->off, NULL, NULL); 2575 KASSERT(rw != NULL, ("Unable to find handler for op %d", 2576 (cmd + p->off)->opcode)); 2577 2578 if (rw->create_object == NULL) 2579 error = EOPNOTSUPP; 2580 else 2581 error = rw->create_object(ch, ti, &kidx); 2582 if (error == 0) { 2583 p->kidx = kidx; 2584 continue; 2585 } 2586 2587 /* 2588 * Error happened. We have to rollback everything. 2589 * Drop all already acquired references. 2590 */ 2591 IPFW_UH_WLOCK(ch); 2592 unref_oib_objects(ch, cmd, oib, pidx); 2593 IPFW_UH_WUNLOCK(ch); 2594 2595 return (error); 2596 } 2597 2598 return (0); 2599 } 2600 2601 /* 2602 * Compatibility function for old ipfw(8) binaries. 2603 * Rewrites table/nat kernel indices with userland ones. 2604 * Convert tables matching '/^\d+$/' to their atoi() value. 2605 * Use number 65535 for other tables. 2606 * 2607 * Returns 0 on success. 2608 */ 2609 static int 2610 set_legacy_obj_kidx(struct ip_fw_chain *ch, struct ip_fw_rule0 *rule) 2611 { 2612 struct opcode_obj_rewrite *rw; 2613 struct named_object *no; 2614 ipfw_insn *cmd; 2615 char *end; 2616 long val; 2617 int cmdlen, error, l; 2618 uint16_t kidx, uidx; 2619 uint8_t subtype; 2620 2621 error = 0; 2622 2623 l = rule->cmd_len; 2624 cmd = rule->cmd; 2625 cmdlen = 0; 2626 for ( ; l > 0 ; l -= cmdlen, cmd += cmdlen) { 2627 cmdlen = F_LEN(cmd); 2628 2629 /* Check if is index in given opcode */ 2630 rw = find_op_rw(cmd, &kidx, &subtype); 2631 if (rw == NULL) 2632 continue; 2633 2634 /* Try to find referenced kernel object */ 2635 no = rw->find_bykidx(ch, kidx); 2636 if (no == NULL) 2637 continue; 2638 2639 val = strtol(no->name, &end, 10); 2640 if (*end == '\0' && val < 65535) { 2641 uidx = val; 2642 } else { 2643 /* 2644 * We are called via legacy opcode. 2645 * Save error and show table as fake number 2646 * not to make ipfw(8) hang. 2647 */ 2648 uidx = 65535; 2649 error = 2; 2650 } 2651 2652 rw->update(cmd, uidx); 2653 } 2654 2655 return (error); 2656 } 2657 2658 /* 2659 * Unreferences all already-referenced objects in given @cmd rule, 2660 * using information in @oib. 2661 * 2662 * Used to rollback partially converted rule on error. 2663 */ 2664 static void 2665 unref_oib_objects(struct ip_fw_chain *ch, ipfw_insn *cmd, struct obj_idx *oib, 2666 struct obj_idx *end) 2667 { 2668 struct opcode_obj_rewrite *rw; 2669 struct named_object *no; 2670 struct obj_idx *p; 2671 2672 IPFW_UH_WLOCK_ASSERT(ch); 2673 2674 for (p = oib; p < end; p++) { 2675 if (p->kidx == 0) 2676 continue; 2677 2678 rw = find_op_rw(cmd + p->off, NULL, NULL); 2679 KASSERT(rw != NULL, ("Unable to find handler for op %d", 2680 (cmd + p->off)->opcode)); 2681 2682 /* Find & unref by existing idx */ 2683 no = rw->find_bykidx(ch, p->kidx); 2684 KASSERT(no != NULL, ("Ref'd object %d disappeared", p->kidx)); 2685 no->refcnt--; 2686 } 2687 } 2688 2689 /* 2690 * Remove references from every object used in @rule. 2691 * Used at rule removal code. 2692 */ 2693 static void 2694 unref_rule_objects(struct ip_fw_chain *ch, struct ip_fw *rule) 2695 { 2696 struct opcode_obj_rewrite *rw; 2697 struct named_object *no; 2698 ipfw_insn *cmd; 2699 int cmdlen, l; 2700 uint16_t kidx; 2701 uint8_t subtype; 2702 2703 IPFW_UH_WLOCK_ASSERT(ch); 2704 2705 l = rule->cmd_len; 2706 cmd = rule->cmd; 2707 cmdlen = 0; 2708 for ( ; l > 0 ; l -= cmdlen, cmd += cmdlen) { 2709 cmdlen = F_LEN(cmd); 2710 2711 rw = find_op_rw(cmd, &kidx, &subtype); 2712 if (rw == NULL) 2713 continue; 2714 no = rw->find_bykidx(ch, kidx); 2715 2716 KASSERT(no != NULL, ("object id %d not found", kidx)); 2717 KASSERT(no->subtype == subtype, 2718 ("wrong type %d (%d) for object id %d", 2719 no->subtype, subtype, kidx)); 2720 KASSERT(no->refcnt > 0, ("refcount for object %d is %d", 2721 kidx, no->refcnt)); 2722 2723 if (no->refcnt == 1 && rw->destroy_object != NULL) 2724 rw->destroy_object(ch, no); 2725 else 2726 no->refcnt--; 2727 } 2728 } 2729 2730 /* 2731 * Find and reference object (if any) stored in instruction @cmd. 2732 * 2733 * Saves object info in @pidx, sets 2734 * - @unresolved to 1 if object should exists but not found 2735 * 2736 * Returns non-zero value in case of error. 2737 */ 2738 static int 2739 ref_opcode_object(struct ip_fw_chain *ch, ipfw_insn *cmd, struct tid_info *ti, 2740 struct obj_idx *pidx, int *unresolved) 2741 { 2742 struct named_object *no; 2743 struct opcode_obj_rewrite *rw; 2744 int error; 2745 2746 /* Check if this opcode is candidate for rewrite */ 2747 rw = find_op_rw(cmd, &ti->uidx, &ti->type); 2748 if (rw == NULL) 2749 return (0); 2750 2751 /* Need to rewrite. Save necessary fields */ 2752 pidx->uidx = ti->uidx; 2753 pidx->type = ti->type; 2754 2755 /* Try to find referenced kernel object */ 2756 error = rw->find_byname(ch, ti, &no); 2757 if (error != 0) 2758 return (error); 2759 if (no == NULL) { 2760 /* 2761 * Report about unresolved object for automaic 2762 * creation. 2763 */ 2764 *unresolved = 1; 2765 return (0); 2766 } 2767 2768 /* 2769 * Object is already exist. 2770 * Its subtype should match with expected value. 2771 */ 2772 if (ti->type != no->subtype) 2773 return (EINVAL); 2774 2775 /* Bump refcount and update kidx. */ 2776 no->refcnt++; 2777 rw->update(cmd, no->kidx); 2778 return (0); 2779 } 2780 2781 /* 2782 * Finds and bumps refcount for objects referenced by given @rule. 2783 * Auto-creates non-existing tables. 2784 * Fills in @oib array with userland/kernel indexes. 2785 * 2786 * Returns 0 on success. 2787 */ 2788 static int 2789 ref_rule_objects(struct ip_fw_chain *ch, struct ip_fw *rule, 2790 struct rule_check_info *ci, struct obj_idx *oib, struct tid_info *ti) 2791 { 2792 struct obj_idx *pidx; 2793 ipfw_insn *cmd; 2794 int cmdlen, error, l, unresolved; 2795 2796 pidx = oib; 2797 l = rule->cmd_len; 2798 cmd = rule->cmd; 2799 cmdlen = 0; 2800 error = 0; 2801 2802 IPFW_UH_WLOCK(ch); 2803 2804 /* Increase refcount on each existing referenced table. */ 2805 for ( ; l > 0 ; l -= cmdlen, cmd += cmdlen) { 2806 cmdlen = F_LEN(cmd); 2807 unresolved = 0; 2808 2809 error = ref_opcode_object(ch, cmd, ti, pidx, &unresolved); 2810 if (error != 0) 2811 break; 2812 /* 2813 * Compatibility stuff for old clients: 2814 * prepare to automaitcally create non-existing objects. 2815 */ 2816 if (unresolved != 0) { 2817 pidx->off = rule->cmd_len - l; 2818 pidx++; 2819 } 2820 } 2821 2822 if (error != 0) { 2823 /* Unref everything we have already done */ 2824 unref_oib_objects(ch, rule->cmd, oib, pidx); 2825 IPFW_UH_WUNLOCK(ch); 2826 return (error); 2827 } 2828 IPFW_UH_WUNLOCK(ch); 2829 2830 /* Perform auto-creation for non-existing objects */ 2831 if (pidx != oib) 2832 error = create_objects_compat(ch, rule->cmd, oib, pidx, ti); 2833 2834 /* Calculate real number of dynamic objects */ 2835 ci->object_opcodes = (uint16_t)(pidx - oib); 2836 2837 return (error); 2838 } 2839 2840 /* 2841 * Checks is opcode is referencing table of appropriate type. 2842 * Adds reference count for found table if true. 2843 * Rewrites user-supplied opcode values with kernel ones. 2844 * 2845 * Returns 0 on success and appropriate error code otherwise. 2846 */ 2847 static int 2848 rewrite_rule_uidx(struct ip_fw_chain *chain, struct rule_check_info *ci) 2849 { 2850 int error; 2851 ipfw_insn *cmd; 2852 struct obj_idx *p, *pidx_first, *pidx_last; 2853 struct tid_info ti; 2854 2855 /* 2856 * Prepare an array for storing opcode indices. 2857 * Use stack allocation by default. 2858 */ 2859 if (ci->object_opcodes <= (sizeof(ci->obuf)/sizeof(ci->obuf[0]))) { 2860 /* Stack */ 2861 pidx_first = ci->obuf; 2862 } else 2863 pidx_first = malloc( 2864 ci->object_opcodes * sizeof(struct obj_idx), 2865 M_IPFW, M_WAITOK | M_ZERO); 2866 2867 error = 0; 2868 memset(&ti, 0, sizeof(ti)); 2869 2870 /* Use set rule is assigned to. */ 2871 ti.set = ci->krule->set; 2872 if (ci->ctlv != NULL) { 2873 ti.tlvs = (void *)(ci->ctlv + 1); 2874 ti.tlen = ci->ctlv->head.length - sizeof(ipfw_obj_ctlv); 2875 } 2876 2877 /* Reference all used tables and other objects */ 2878 error = ref_rule_objects(chain, ci->krule, ci, pidx_first, &ti); 2879 if (error != 0) 2880 goto free; 2881 /* 2882 * Note that ref_rule_objects() might have updated ci->object_opcodes 2883 * to reflect actual number of object opcodes. 2884 */ 2885 2886 /* Perform rewrite of remaining opcodes */ 2887 p = pidx_first; 2888 pidx_last = pidx_first + ci->object_opcodes; 2889 for (p = pidx_first; p < pidx_last; p++) { 2890 cmd = ci->krule->cmd + p->off; 2891 update_opcode_kidx(cmd, p->kidx); 2892 } 2893 2894 free: 2895 if (pidx_first != ci->obuf) 2896 free(pidx_first, M_IPFW); 2897 2898 return (error); 2899 } 2900 2901 /* 2902 * Adds one or more rules to ipfw @chain. 2903 * Data layout (version 0)(current): 2904 * Request: 2905 * [ 2906 * ip_fw3_opheader 2907 * [ ipfw_obj_ctlv(IPFW_TLV_TBL_LIST) ipfw_obj_ntlv x N ] (optional *1) 2908 * [ ipfw_obj_ctlv(IPFW_TLV_RULE_LIST) ip_fw x N ] (*2) (*3) 2909 * ] 2910 * Reply: 2911 * [ 2912 * ip_fw3_opheader 2913 * [ ipfw_obj_ctlv(IPFW_TLV_TBL_LIST) ipfw_obj_ntlv x N ] (optional) 2914 * [ ipfw_obj_ctlv(IPFW_TLV_RULE_LIST) ip_fw x N ] 2915 * ] 2916 * 2917 * Rules in reply are modified to store their actual ruleset number. 2918 * 2919 * (*1) TLVs inside IPFW_TLV_TBL_LIST needs to be sorted ascending 2920 * according to their idx field and there has to be no duplicates. 2921 * (*2) Numbered rules inside IPFW_TLV_RULE_LIST needs to be sorted ascending. 2922 * (*3) Each ip_fw structure needs to be aligned to u64 boundary. 2923 * 2924 * Returns 0 on success. 2925 */ 2926 static int 2927 add_rules(struct ip_fw_chain *chain, ip_fw3_opheader *op3, 2928 struct sockopt_data *sd) 2929 { 2930 ipfw_obj_ctlv *ctlv, *rtlv, *tstate; 2931 ipfw_obj_ntlv *ntlv; 2932 int clen, error, idx; 2933 uint32_t count, read; 2934 struct ip_fw_rule *r; 2935 struct rule_check_info rci, *ci, *cbuf; 2936 int i, rsize; 2937 2938 op3 = (ip_fw3_opheader *)ipfw_get_sopt_space(sd, sd->valsize); 2939 ctlv = (ipfw_obj_ctlv *)(op3 + 1); 2940 2941 read = sizeof(ip_fw3_opheader); 2942 rtlv = NULL; 2943 tstate = NULL; 2944 cbuf = NULL; 2945 memset(&rci, 0, sizeof(struct rule_check_info)); 2946 2947 if (read + sizeof(*ctlv) > sd->valsize) 2948 return (EINVAL); 2949 2950 if (ctlv->head.type == IPFW_TLV_TBLNAME_LIST) { 2951 clen = ctlv->head.length; 2952 /* Check size and alignment */ 2953 if (clen > sd->valsize || clen < sizeof(*ctlv)) 2954 return (EINVAL); 2955 if ((clen % sizeof(uint64_t)) != 0) 2956 return (EINVAL); 2957 2958 /* 2959 * Some table names or other named objects. 2960 * Check for validness. 2961 */ 2962 count = (ctlv->head.length - sizeof(*ctlv)) / sizeof(*ntlv); 2963 if (ctlv->count != count || ctlv->objsize != sizeof(*ntlv)) 2964 return (EINVAL); 2965 2966 /* 2967 * Check each TLV. 2968 * Ensure TLVs are sorted ascending and 2969 * there are no duplicates. 2970 */ 2971 idx = -1; 2972 ntlv = (ipfw_obj_ntlv *)(ctlv + 1); 2973 while (count > 0) { 2974 if (ntlv->head.length != sizeof(ipfw_obj_ntlv)) 2975 return (EINVAL); 2976 2977 error = ipfw_check_object_name_generic(ntlv->name); 2978 if (error != 0) 2979 return (error); 2980 2981 if (ntlv->idx <= idx) 2982 return (EINVAL); 2983 2984 idx = ntlv->idx; 2985 count--; 2986 ntlv++; 2987 } 2988 2989 tstate = ctlv; 2990 read += ctlv->head.length; 2991 ctlv = (ipfw_obj_ctlv *)((caddr_t)ctlv + ctlv->head.length); 2992 } 2993 2994 if (read + sizeof(*ctlv) > sd->valsize) 2995 return (EINVAL); 2996 2997 if (ctlv->head.type == IPFW_TLV_RULE_LIST) { 2998 clen = ctlv->head.length; 2999 if (clen + read > sd->valsize || clen < sizeof(*ctlv)) 3000 return (EINVAL); 3001 if ((clen % sizeof(uint64_t)) != 0) 3002 return (EINVAL); 3003 3004 /* 3005 * TODO: Permit adding multiple rules at once 3006 */ 3007 if (ctlv->count != 1) 3008 return (ENOTSUP); 3009 3010 clen -= sizeof(*ctlv); 3011 3012 if (ctlv->count > clen / sizeof(struct ip_fw_rule)) 3013 return (EINVAL); 3014 3015 /* Allocate state for each rule or use stack */ 3016 if (ctlv->count == 1) { 3017 memset(&rci, 0, sizeof(struct rule_check_info)); 3018 cbuf = &rci; 3019 } else 3020 cbuf = malloc(ctlv->count * sizeof(*ci), M_TEMP, 3021 M_WAITOK | M_ZERO); 3022 ci = cbuf; 3023 3024 /* 3025 * Check each rule for validness. 3026 * Ensure numbered rules are sorted ascending 3027 * and properly aligned 3028 */ 3029 idx = 0; 3030 r = (struct ip_fw_rule *)(ctlv + 1); 3031 count = 0; 3032 error = 0; 3033 while (clen > 0) { 3034 rsize = roundup2(RULESIZE(r), sizeof(uint64_t)); 3035 if (rsize > clen || ctlv->count <= count) { 3036 error = EINVAL; 3037 break; 3038 } 3039 3040 ci->ctlv = tstate; 3041 error = check_ipfw_rule1(r, rsize, ci); 3042 if (error != 0) 3043 break; 3044 3045 /* Check sorting */ 3046 if (r->rulenum != 0 && r->rulenum < idx) { 3047 printf("rulenum %d idx %d\n", r->rulenum, idx); 3048 error = EINVAL; 3049 break; 3050 } 3051 idx = r->rulenum; 3052 3053 ci->urule = (caddr_t)r; 3054 3055 rsize = roundup2(rsize, sizeof(uint64_t)); 3056 clen -= rsize; 3057 r = (struct ip_fw_rule *)((caddr_t)r + rsize); 3058 count++; 3059 ci++; 3060 } 3061 3062 if (ctlv->count != count || error != 0) { 3063 if (cbuf != &rci) 3064 free(cbuf, M_TEMP); 3065 return (EINVAL); 3066 } 3067 3068 rtlv = ctlv; 3069 read += ctlv->head.length; 3070 ctlv = (ipfw_obj_ctlv *)((caddr_t)ctlv + ctlv->head.length); 3071 } 3072 3073 if (read != sd->valsize || rtlv == NULL || rtlv->count == 0) { 3074 if (cbuf != NULL && cbuf != &rci) 3075 free(cbuf, M_TEMP); 3076 return (EINVAL); 3077 } 3078 3079 /* 3080 * Passed rules seems to be valid. 3081 * Allocate storage and try to add them to chain. 3082 */ 3083 for (i = 0, ci = cbuf; i < rtlv->count; i++, ci++) { 3084 clen = RULEKSIZE1((struct ip_fw_rule *)ci->urule); 3085 ci->krule = ipfw_alloc_rule(chain, clen); 3086 import_rule1(ci); 3087 } 3088 3089 if ((error = commit_rules(chain, cbuf, rtlv->count)) != 0) { 3090 /* Free allocate krules */ 3091 for (i = 0, ci = cbuf; i < rtlv->count; i++, ci++) 3092 ipfw_free_rule(ci->krule); 3093 } 3094 3095 if (cbuf != NULL && cbuf != &rci) 3096 free(cbuf, M_TEMP); 3097 3098 return (error); 3099 } 3100 3101 /* 3102 * Lists all sopts currently registered. 3103 * Data layout (v0)(current): 3104 * Request: [ ipfw_obj_lheader ], size = ipfw_obj_lheader.size 3105 * Reply: [ ipfw_obj_lheader ipfw_sopt_info x N ] 3106 * 3107 * Returns 0 on success 3108 */ 3109 static int 3110 dump_soptcodes(struct ip_fw_chain *chain, ip_fw3_opheader *op3, 3111 struct sockopt_data *sd) 3112 { 3113 struct _ipfw_obj_lheader *olh; 3114 ipfw_sopt_info *i; 3115 struct ipfw_sopt_handler *sh; 3116 uint32_t count, n, size; 3117 3118 olh = (struct _ipfw_obj_lheader *)ipfw_get_sopt_header(sd,sizeof(*olh)); 3119 if (olh == NULL) 3120 return (EINVAL); 3121 if (sd->valsize < olh->size) 3122 return (EINVAL); 3123 3124 CTL3_LOCK(); 3125 count = ctl3_hsize; 3126 size = count * sizeof(ipfw_sopt_info) + sizeof(ipfw_obj_lheader); 3127 3128 /* Fill in header regadless of buffer size */ 3129 olh->count = count; 3130 olh->objsize = sizeof(ipfw_sopt_info); 3131 3132 if (size > olh->size) { 3133 olh->size = size; 3134 CTL3_UNLOCK(); 3135 return (ENOMEM); 3136 } 3137 olh->size = size; 3138 3139 for (n = 1; n <= count; n++) { 3140 i = (ipfw_sopt_info *)ipfw_get_sopt_space(sd, sizeof(*i)); 3141 KASSERT(i != NULL, ("previously checked buffer is not enough")); 3142 sh = &ctl3_handlers[n]; 3143 i->opcode = sh->opcode; 3144 i->version = sh->version; 3145 i->refcnt = sh->refcnt; 3146 } 3147 CTL3_UNLOCK(); 3148 3149 return (0); 3150 } 3151 3152 /* 3153 * Compares two opcodes. 3154 * Used both in qsort() and bsearch(). 3155 * 3156 * Returns 0 if match is found. 3157 */ 3158 static int 3159 compare_opcodes(const void *_a, const void *_b) 3160 { 3161 const struct opcode_obj_rewrite *a, *b; 3162 3163 a = (const struct opcode_obj_rewrite *)_a; 3164 b = (const struct opcode_obj_rewrite *)_b; 3165 3166 if (a->opcode < b->opcode) 3167 return (-1); 3168 else if (a->opcode > b->opcode) 3169 return (1); 3170 3171 return (0); 3172 } 3173 3174 /* 3175 * XXX: Rewrite bsearch() 3176 */ 3177 static int 3178 find_op_rw_range(uint16_t op, struct opcode_obj_rewrite **plo, 3179 struct opcode_obj_rewrite **phi) 3180 { 3181 struct opcode_obj_rewrite *ctl3_max, *lo, *hi, h, *rw; 3182 3183 memset(&h, 0, sizeof(h)); 3184 h.opcode = op; 3185 3186 rw = (struct opcode_obj_rewrite *)bsearch(&h, ctl3_rewriters, 3187 ctl3_rsize, sizeof(h), compare_opcodes); 3188 if (rw == NULL) 3189 return (1); 3190 3191 /* Find the first element matching the same opcode */ 3192 lo = rw; 3193 for ( ; lo > ctl3_rewriters && (lo - 1)->opcode == op; lo--) 3194 ; 3195 3196 /* Find the last element matching the same opcode */ 3197 hi = rw; 3198 ctl3_max = ctl3_rewriters + ctl3_rsize; 3199 for ( ; (hi + 1) < ctl3_max && (hi + 1)->opcode == op; hi++) 3200 ; 3201 3202 *plo = lo; 3203 *phi = hi; 3204 3205 return (0); 3206 } 3207 3208 /* 3209 * Finds opcode object rewriter based on @code. 3210 * 3211 * Returns pointer to handler or NULL. 3212 */ 3213 static struct opcode_obj_rewrite * 3214 find_op_rw(ipfw_insn *cmd, uint16_t *puidx, uint8_t *ptype) 3215 { 3216 struct opcode_obj_rewrite *rw, *lo, *hi; 3217 uint16_t uidx; 3218 uint8_t subtype; 3219 3220 if (find_op_rw_range(cmd->opcode, &lo, &hi) != 0) 3221 return (NULL); 3222 3223 for (rw = lo; rw <= hi; rw++) { 3224 if (rw->classifier(cmd, &uidx, &subtype) == 0) { 3225 if (puidx != NULL) 3226 *puidx = uidx; 3227 if (ptype != NULL) 3228 *ptype = subtype; 3229 return (rw); 3230 } 3231 } 3232 3233 return (NULL); 3234 } 3235 int 3236 classify_opcode_kidx(ipfw_insn *cmd, uint16_t *puidx) 3237 { 3238 3239 if (find_op_rw(cmd, puidx, NULL) == NULL) 3240 return (1); 3241 return (0); 3242 } 3243 3244 void 3245 update_opcode_kidx(ipfw_insn *cmd, uint16_t idx) 3246 { 3247 struct opcode_obj_rewrite *rw; 3248 3249 rw = find_op_rw(cmd, NULL, NULL); 3250 KASSERT(rw != NULL, ("No handler to update opcode %d", cmd->opcode)); 3251 rw->update(cmd, idx); 3252 } 3253 3254 void 3255 ipfw_init_obj_rewriter(void) 3256 { 3257 3258 ctl3_rewriters = NULL; 3259 ctl3_rsize = 0; 3260 } 3261 3262 void 3263 ipfw_destroy_obj_rewriter(void) 3264 { 3265 3266 if (ctl3_rewriters != NULL) 3267 free(ctl3_rewriters, M_IPFW); 3268 ctl3_rewriters = NULL; 3269 ctl3_rsize = 0; 3270 } 3271 3272 /* 3273 * Adds one or more opcode object rewrite handlers to the global array. 3274 * Function may sleep. 3275 */ 3276 void 3277 ipfw_add_obj_rewriter(struct opcode_obj_rewrite *rw, size_t count) 3278 { 3279 size_t sz; 3280 struct opcode_obj_rewrite *tmp; 3281 3282 CTL3_LOCK(); 3283 3284 for (;;) { 3285 sz = ctl3_rsize + count; 3286 CTL3_UNLOCK(); 3287 tmp = malloc(sizeof(*rw) * sz, M_IPFW, M_WAITOK | M_ZERO); 3288 CTL3_LOCK(); 3289 if (ctl3_rsize + count <= sz) 3290 break; 3291 3292 /* Retry */ 3293 free(tmp, M_IPFW); 3294 } 3295 3296 /* Merge old & new arrays */ 3297 sz = ctl3_rsize + count; 3298 memcpy(tmp, ctl3_rewriters, ctl3_rsize * sizeof(*rw)); 3299 memcpy(&tmp[ctl3_rsize], rw, count * sizeof(*rw)); 3300 qsort(tmp, sz, sizeof(*rw), compare_opcodes); 3301 /* Switch new and free old */ 3302 if (ctl3_rewriters != NULL) 3303 free(ctl3_rewriters, M_IPFW); 3304 ctl3_rewriters = tmp; 3305 ctl3_rsize = sz; 3306 3307 CTL3_UNLOCK(); 3308 } 3309 3310 /* 3311 * Removes one or more object rewrite handlers from the global array. 3312 */ 3313 int 3314 ipfw_del_obj_rewriter(struct opcode_obj_rewrite *rw, size_t count) 3315 { 3316 size_t sz; 3317 struct opcode_obj_rewrite *ctl3_max, *ktmp, *lo, *hi; 3318 int i; 3319 3320 CTL3_LOCK(); 3321 3322 for (i = 0; i < count; i++) { 3323 if (find_op_rw_range(rw[i].opcode, &lo, &hi) != 0) 3324 continue; 3325 3326 for (ktmp = lo; ktmp <= hi; ktmp++) { 3327 if (ktmp->classifier != rw[i].classifier) 3328 continue; 3329 3330 ctl3_max = ctl3_rewriters + ctl3_rsize; 3331 sz = (ctl3_max - (ktmp + 1)) * sizeof(*ktmp); 3332 memmove(ktmp, ktmp + 1, sz); 3333 ctl3_rsize--; 3334 break; 3335 } 3336 } 3337 3338 if (ctl3_rsize == 0) { 3339 if (ctl3_rewriters != NULL) 3340 free(ctl3_rewriters, M_IPFW); 3341 ctl3_rewriters = NULL; 3342 } 3343 3344 CTL3_UNLOCK(); 3345 3346 return (0); 3347 } 3348 3349 static int 3350 export_objhash_ntlv_internal(struct namedobj_instance *ni, 3351 struct named_object *no, void *arg) 3352 { 3353 struct sockopt_data *sd; 3354 ipfw_obj_ntlv *ntlv; 3355 3356 sd = (struct sockopt_data *)arg; 3357 ntlv = (ipfw_obj_ntlv *)ipfw_get_sopt_space(sd, sizeof(*ntlv)); 3358 if (ntlv == NULL) 3359 return (ENOMEM); 3360 ipfw_export_obj_ntlv(no, ntlv); 3361 return (0); 3362 } 3363 3364 /* 3365 * Lists all service objects. 3366 * Data layout (v0)(current): 3367 * Request: [ ipfw_obj_lheader ] size = ipfw_obj_lheader.size 3368 * Reply: [ ipfw_obj_lheader [ ipfw_obj_ntlv x N ] (optional) ] 3369 * Returns 0 on success 3370 */ 3371 static int 3372 dump_srvobjects(struct ip_fw_chain *chain, ip_fw3_opheader *op3, 3373 struct sockopt_data *sd) 3374 { 3375 ipfw_obj_lheader *hdr; 3376 int count; 3377 3378 hdr = (ipfw_obj_lheader *)ipfw_get_sopt_header(sd, sizeof(*hdr)); 3379 if (hdr == NULL) 3380 return (EINVAL); 3381 3382 IPFW_UH_RLOCK(chain); 3383 count = ipfw_objhash_count(CHAIN_TO_SRV(chain)); 3384 hdr->size = sizeof(ipfw_obj_lheader) + count * sizeof(ipfw_obj_ntlv); 3385 if (sd->valsize < hdr->size) { 3386 IPFW_UH_RUNLOCK(chain); 3387 return (ENOMEM); 3388 } 3389 hdr->count = count; 3390 hdr->objsize = sizeof(ipfw_obj_ntlv); 3391 if (count > 0) 3392 ipfw_objhash_foreach(CHAIN_TO_SRV(chain), 3393 export_objhash_ntlv_internal, sd); 3394 IPFW_UH_RUNLOCK(chain); 3395 return (0); 3396 } 3397 3398 /* 3399 * Compares two sopt handlers (code, version and handler ptr). 3400 * Used both as qsort() and bsearch(). 3401 * Does not compare handler for latter case. 3402 * 3403 * Returns 0 if match is found. 3404 */ 3405 static int 3406 compare_sh(const void *_a, const void *_b) 3407 { 3408 const struct ipfw_sopt_handler *a, *b; 3409 3410 a = (const struct ipfw_sopt_handler *)_a; 3411 b = (const struct ipfw_sopt_handler *)_b; 3412 3413 if (a->opcode < b->opcode) 3414 return (-1); 3415 else if (a->opcode > b->opcode) 3416 return (1); 3417 3418 if (a->version < b->version) 3419 return (-1); 3420 else if (a->version > b->version) 3421 return (1); 3422 3423 /* bsearch helper */ 3424 if (a->handler == NULL) 3425 return (0); 3426 3427 if ((uintptr_t)a->handler < (uintptr_t)b->handler) 3428 return (-1); 3429 else if ((uintptr_t)a->handler > (uintptr_t)b->handler) 3430 return (1); 3431 3432 return (0); 3433 } 3434 3435 /* 3436 * Finds sopt handler based on @code and @version. 3437 * 3438 * Returns pointer to handler or NULL. 3439 */ 3440 static struct ipfw_sopt_handler * 3441 find_sh(uint16_t code, uint8_t version, sopt_handler_f *handler) 3442 { 3443 struct ipfw_sopt_handler *sh, h; 3444 3445 memset(&h, 0, sizeof(h)); 3446 h.opcode = code; 3447 h.version = version; 3448 h.handler = handler; 3449 3450 sh = (struct ipfw_sopt_handler *)bsearch(&h, ctl3_handlers, 3451 ctl3_hsize, sizeof(h), compare_sh); 3452 3453 return (sh); 3454 } 3455 3456 static int 3457 find_ref_sh(uint16_t opcode, uint8_t version, struct ipfw_sopt_handler *psh) 3458 { 3459 struct ipfw_sopt_handler *sh; 3460 3461 CTL3_LOCK(); 3462 if ((sh = find_sh(opcode, version, NULL)) == NULL) { 3463 CTL3_UNLOCK(); 3464 printf("ipfw: ipfw_ctl3 invalid option %d""v""%d\n", 3465 opcode, version); 3466 return (EINVAL); 3467 } 3468 sh->refcnt++; 3469 ctl3_refct++; 3470 /* Copy handler data to requested buffer */ 3471 *psh = *sh; 3472 CTL3_UNLOCK(); 3473 3474 return (0); 3475 } 3476 3477 static void 3478 find_unref_sh(struct ipfw_sopt_handler *psh) 3479 { 3480 struct ipfw_sopt_handler *sh; 3481 3482 CTL3_LOCK(); 3483 sh = find_sh(psh->opcode, psh->version, NULL); 3484 KASSERT(sh != NULL, ("ctl3 handler disappeared")); 3485 sh->refcnt--; 3486 ctl3_refct--; 3487 CTL3_UNLOCK(); 3488 } 3489 3490 void 3491 ipfw_init_sopt_handler(void) 3492 { 3493 3494 CTL3_LOCK_INIT(); 3495 IPFW_ADD_SOPT_HANDLER(1, scodes); 3496 } 3497 3498 void 3499 ipfw_destroy_sopt_handler(void) 3500 { 3501 3502 IPFW_DEL_SOPT_HANDLER(1, scodes); 3503 CTL3_LOCK_DESTROY(); 3504 } 3505 3506 /* 3507 * Adds one or more sockopt handlers to the global array. 3508 * Function may sleep. 3509 */ 3510 void 3511 ipfw_add_sopt_handler(struct ipfw_sopt_handler *sh, size_t count) 3512 { 3513 size_t sz; 3514 struct ipfw_sopt_handler *tmp; 3515 3516 CTL3_LOCK(); 3517 3518 for (;;) { 3519 sz = ctl3_hsize + count; 3520 CTL3_UNLOCK(); 3521 tmp = malloc(sizeof(*sh) * sz, M_IPFW, M_WAITOK | M_ZERO); 3522 CTL3_LOCK(); 3523 if (ctl3_hsize + count <= sz) 3524 break; 3525 3526 /* Retry */ 3527 free(tmp, M_IPFW); 3528 } 3529 3530 /* Merge old & new arrays */ 3531 sz = ctl3_hsize + count; 3532 memcpy(tmp, ctl3_handlers, ctl3_hsize * sizeof(*sh)); 3533 memcpy(&tmp[ctl3_hsize], sh, count * sizeof(*sh)); 3534 qsort(tmp, sz, sizeof(*sh), compare_sh); 3535 /* Switch new and free old */ 3536 if (ctl3_handlers != NULL) 3537 free(ctl3_handlers, M_IPFW); 3538 ctl3_handlers = tmp; 3539 ctl3_hsize = sz; 3540 ctl3_gencnt++; 3541 3542 CTL3_UNLOCK(); 3543 } 3544 3545 /* 3546 * Removes one or more sockopt handlers from the global array. 3547 */ 3548 int 3549 ipfw_del_sopt_handler(struct ipfw_sopt_handler *sh, size_t count) 3550 { 3551 size_t sz; 3552 struct ipfw_sopt_handler *tmp, *h; 3553 int i; 3554 3555 CTL3_LOCK(); 3556 3557 for (i = 0; i < count; i++) { 3558 tmp = &sh[i]; 3559 h = find_sh(tmp->opcode, tmp->version, tmp->handler); 3560 if (h == NULL) 3561 continue; 3562 3563 sz = (ctl3_handlers + ctl3_hsize - (h + 1)) * sizeof(*h); 3564 memmove(h, h + 1, sz); 3565 ctl3_hsize--; 3566 } 3567 3568 if (ctl3_hsize == 0) { 3569 if (ctl3_handlers != NULL) 3570 free(ctl3_handlers, M_IPFW); 3571 ctl3_handlers = NULL; 3572 } 3573 3574 ctl3_gencnt++; 3575 3576 CTL3_UNLOCK(); 3577 3578 return (0); 3579 } 3580 3581 /* 3582 * Writes data accumulated in @sd to sockopt buffer. 3583 * Zeroes internal @sd buffer. 3584 */ 3585 static int 3586 ipfw_flush_sopt_data(struct sockopt_data *sd) 3587 { 3588 struct sockopt *sopt; 3589 int error; 3590 size_t sz; 3591 3592 sz = sd->koff; 3593 if (sz == 0) 3594 return (0); 3595 3596 sopt = sd->sopt; 3597 3598 if (sopt->sopt_dir == SOPT_GET) { 3599 error = copyout(sd->kbuf, sopt->sopt_val, sz); 3600 if (error != 0) 3601 return (error); 3602 } 3603 3604 memset(sd->kbuf, 0, sd->ksize); 3605 sd->ktotal += sz; 3606 sd->koff = 0; 3607 if (sd->ktotal + sd->ksize < sd->valsize) 3608 sd->kavail = sd->ksize; 3609 else 3610 sd->kavail = sd->valsize - sd->ktotal; 3611 3612 /* Update sopt buffer data */ 3613 sopt->sopt_valsize = sd->ktotal; 3614 sopt->sopt_val = sd->sopt_val + sd->ktotal; 3615 3616 return (0); 3617 } 3618 3619 /* 3620 * Ensures that @sd buffer has contiguous @neeeded number of 3621 * bytes. 3622 * 3623 * Returns pointer to requested space or NULL. 3624 */ 3625 caddr_t 3626 ipfw_get_sopt_space(struct sockopt_data *sd, size_t needed) 3627 { 3628 int error; 3629 caddr_t addr; 3630 3631 if (sd->kavail < needed) { 3632 /* 3633 * Flush data and try another time. 3634 */ 3635 error = ipfw_flush_sopt_data(sd); 3636 3637 if (sd->kavail < needed || error != 0) 3638 return (NULL); 3639 } 3640 3641 addr = sd->kbuf + sd->koff; 3642 sd->koff += needed; 3643 sd->kavail -= needed; 3644 return (addr); 3645 } 3646 3647 /* 3648 * Requests @needed contiguous bytes from @sd buffer. 3649 * Function is used to notify subsystem that we are 3650 * interesed in first @needed bytes (request header) 3651 * and the rest buffer can be safely zeroed. 3652 * 3653 * Returns pointer to requested space or NULL. 3654 */ 3655 caddr_t 3656 ipfw_get_sopt_header(struct sockopt_data *sd, size_t needed) 3657 { 3658 caddr_t addr; 3659 3660 if ((addr = ipfw_get_sopt_space(sd, needed)) == NULL) 3661 return (NULL); 3662 3663 if (sd->kavail > 0) 3664 memset(sd->kbuf + sd->koff, 0, sd->kavail); 3665 3666 return (addr); 3667 } 3668 3669 /* 3670 * New sockopt handler. 3671 */ 3672 int 3673 ipfw_ctl3(struct sockopt *sopt) 3674 { 3675 int error, locked; 3676 size_t size, valsize; 3677 struct ip_fw_chain *chain; 3678 char xbuf[256]; 3679 struct sockopt_data sdata; 3680 struct ipfw_sopt_handler h; 3681 ip_fw3_opheader *op3 = NULL; 3682 3683 error = priv_check(sopt->sopt_td, PRIV_NETINET_IPFW); 3684 if (error != 0) 3685 return (error); 3686 3687 if (sopt->sopt_name != IP_FW3) 3688 return (ipfw_ctl(sopt)); 3689 3690 chain = &V_layer3_chain; 3691 error = 0; 3692 3693 /* Save original valsize before it is altered via sooptcopyin() */ 3694 valsize = sopt->sopt_valsize; 3695 memset(&sdata, 0, sizeof(sdata)); 3696 /* Read op3 header first to determine actual operation */ 3697 op3 = (ip_fw3_opheader *)xbuf; 3698 error = sooptcopyin(sopt, op3, sizeof(*op3), sizeof(*op3)); 3699 if (error != 0) 3700 return (error); 3701 sopt->sopt_valsize = valsize; 3702 3703 /* 3704 * Find and reference command. 3705 */ 3706 error = find_ref_sh(op3->opcode, op3->version, &h); 3707 if (error != 0) 3708 return (error); 3709 3710 /* 3711 * Disallow modifications in really-really secure mode, but still allow 3712 * the logging counters to be reset. 3713 */ 3714 if ((h.dir & HDIR_SET) != 0 && h.opcode != IP_FW_XRESETLOG) { 3715 error = securelevel_ge(sopt->sopt_td->td_ucred, 3); 3716 if (error != 0) { 3717 find_unref_sh(&h); 3718 return (error); 3719 } 3720 } 3721 3722 /* 3723 * Fill in sockopt_data structure that may be useful for 3724 * IP_FW3 get requests. 3725 */ 3726 locked = 0; 3727 if (valsize <= sizeof(xbuf)) { 3728 /* use on-stack buffer */ 3729 sdata.kbuf = xbuf; 3730 sdata.ksize = sizeof(xbuf); 3731 sdata.kavail = valsize; 3732 } else { 3733 /* 3734 * Determine opcode type/buffer size: 3735 * allocate sliding-window buf for data export or 3736 * contiguous buffer for special ops. 3737 */ 3738 if ((h.dir & HDIR_SET) != 0) { 3739 /* Set request. Allocate contigous buffer. */ 3740 if (valsize > CTL3_LARGEBUF) { 3741 find_unref_sh(&h); 3742 return (EFBIG); 3743 } 3744 3745 size = valsize; 3746 } else { 3747 /* Get request. Allocate sliding window buffer */ 3748 size = (valsize<CTL3_SMALLBUF) ? valsize:CTL3_SMALLBUF; 3749 3750 if (size < valsize) { 3751 /* We have to wire user buffer */ 3752 error = vslock(sopt->sopt_val, valsize); 3753 if (error != 0) 3754 return (error); 3755 locked = 1; 3756 } 3757 } 3758 3759 sdata.kbuf = malloc(size, M_TEMP, M_WAITOK | M_ZERO); 3760 sdata.ksize = size; 3761 sdata.kavail = size; 3762 } 3763 3764 sdata.sopt = sopt; 3765 sdata.sopt_val = sopt->sopt_val; 3766 sdata.valsize = valsize; 3767 3768 /* 3769 * Copy either all request (if valsize < bsize_max) 3770 * or first bsize_max bytes to guarantee most consumers 3771 * that all necessary data has been copied). 3772 * Anyway, copy not less than sizeof(ip_fw3_opheader). 3773 */ 3774 if ((error = sooptcopyin(sopt, sdata.kbuf, sdata.ksize, 3775 sizeof(ip_fw3_opheader))) != 0) 3776 return (error); 3777 op3 = (ip_fw3_opheader *)sdata.kbuf; 3778 3779 /* Finally, run handler */ 3780 error = h.handler(chain, op3, &sdata); 3781 find_unref_sh(&h); 3782 3783 /* Flush state and free buffers */ 3784 if (error == 0) 3785 error = ipfw_flush_sopt_data(&sdata); 3786 else 3787 ipfw_flush_sopt_data(&sdata); 3788 3789 if (locked != 0) 3790 vsunlock(sdata.sopt_val, valsize); 3791 3792 /* Restore original pointer and set number of bytes written */ 3793 sopt->sopt_val = sdata.sopt_val; 3794 sopt->sopt_valsize = sdata.ktotal; 3795 if (sdata.kbuf != xbuf) 3796 free(sdata.kbuf, M_TEMP); 3797 3798 return (error); 3799 } 3800 3801 /** 3802 * {set|get}sockopt parser. 3803 */ 3804 int 3805 ipfw_ctl(struct sockopt *sopt) 3806 { 3807 #define RULE_MAXSIZE (512*sizeof(u_int32_t)) 3808 int error; 3809 size_t size; 3810 struct ip_fw *buf; 3811 struct ip_fw_rule0 *rule; 3812 struct ip_fw_chain *chain; 3813 u_int32_t rulenum[2]; 3814 uint32_t opt; 3815 struct rule_check_info ci; 3816 IPFW_RLOCK_TRACKER; 3817 3818 chain = &V_layer3_chain; 3819 error = 0; 3820 3821 opt = sopt->sopt_name; 3822 3823 /* 3824 * Disallow modifications in really-really secure mode, but still allow 3825 * the logging counters to be reset. 3826 */ 3827 if (opt == IP_FW_ADD || 3828 (sopt->sopt_dir == SOPT_SET && opt != IP_FW_RESETLOG)) { 3829 error = securelevel_ge(sopt->sopt_td->td_ucred, 3); 3830 if (error != 0) 3831 return (error); 3832 } 3833 3834 switch (opt) { 3835 case IP_FW_GET: 3836 /* 3837 * pass up a copy of the current rules. Static rules 3838 * come first (the last of which has number IPFW_DEFAULT_RULE), 3839 * followed by a possibly empty list of dynamic rule. 3840 * The last dynamic rule has NULL in the "next" field. 3841 * 3842 * Note that the calculated size is used to bound the 3843 * amount of data returned to the user. The rule set may 3844 * change between calculating the size and returning the 3845 * data in which case we'll just return what fits. 3846 */ 3847 for (;;) { 3848 int len = 0, want; 3849 3850 size = chain->static_len; 3851 size += ipfw_dyn_len(); 3852 if (size >= sopt->sopt_valsize) 3853 break; 3854 buf = malloc(size, M_TEMP, M_WAITOK | M_ZERO); 3855 IPFW_UH_RLOCK(chain); 3856 /* check again how much space we need */ 3857 want = chain->static_len + ipfw_dyn_len(); 3858 if (size >= want) 3859 len = ipfw_getrules(chain, buf, size); 3860 IPFW_UH_RUNLOCK(chain); 3861 if (size >= want) 3862 error = sooptcopyout(sopt, buf, len); 3863 free(buf, M_TEMP); 3864 if (size >= want) 3865 break; 3866 } 3867 break; 3868 3869 case IP_FW_FLUSH: 3870 /* locking is done within del_entry() */ 3871 error = del_entry(chain, 0); /* special case, rule=0, cmd=0 means all */ 3872 break; 3873 3874 case IP_FW_ADD: 3875 rule = malloc(RULE_MAXSIZE, M_TEMP, M_WAITOK); 3876 error = sooptcopyin(sopt, rule, RULE_MAXSIZE, 3877 sizeof(struct ip_fw7) ); 3878 3879 memset(&ci, 0, sizeof(struct rule_check_info)); 3880 3881 /* 3882 * If the size of commands equals RULESIZE7 then we assume 3883 * a FreeBSD7.2 binary is talking to us (set is7=1). 3884 * is7 is persistent so the next 'ipfw list' command 3885 * will use this format. 3886 * NOTE: If wrong version is guessed (this can happen if 3887 * the first ipfw command is 'ipfw [pipe] list') 3888 * the ipfw binary may crash or loop infinitly... 3889 */ 3890 size = sopt->sopt_valsize; 3891 if (size == RULESIZE7(rule)) { 3892 is7 = 1; 3893 error = convert_rule_to_8(rule); 3894 if (error) { 3895 free(rule, M_TEMP); 3896 return error; 3897 } 3898 size = RULESIZE(rule); 3899 } else 3900 is7 = 0; 3901 if (error == 0) 3902 error = check_ipfw_rule0(rule, size, &ci); 3903 if (error == 0) { 3904 /* locking is done within add_rule() */ 3905 struct ip_fw *krule; 3906 krule = ipfw_alloc_rule(chain, RULEKSIZE0(rule)); 3907 ci.urule = (caddr_t)rule; 3908 ci.krule = krule; 3909 import_rule0(&ci); 3910 error = commit_rules(chain, &ci, 1); 3911 if (error != 0) 3912 ipfw_free_rule(ci.krule); 3913 else if (sopt->sopt_dir == SOPT_GET) { 3914 if (is7) { 3915 error = convert_rule_to_7(rule); 3916 size = RULESIZE7(rule); 3917 if (error) { 3918 free(rule, M_TEMP); 3919 return error; 3920 } 3921 } 3922 error = sooptcopyout(sopt, rule, size); 3923 } 3924 } 3925 free(rule, M_TEMP); 3926 break; 3927 3928 case IP_FW_DEL: 3929 /* 3930 * IP_FW_DEL is used for deleting single rules or sets, 3931 * and (ab)used to atomically manipulate sets. Argument size 3932 * is used to distinguish between the two: 3933 * sizeof(u_int32_t) 3934 * delete single rule or set of rules, 3935 * or reassign rules (or sets) to a different set. 3936 * 2*sizeof(u_int32_t) 3937 * atomic disable/enable sets. 3938 * first u_int32_t contains sets to be disabled, 3939 * second u_int32_t contains sets to be enabled. 3940 */ 3941 error = sooptcopyin(sopt, rulenum, 3942 2*sizeof(u_int32_t), sizeof(u_int32_t)); 3943 if (error) 3944 break; 3945 size = sopt->sopt_valsize; 3946 if (size == sizeof(u_int32_t) && rulenum[0] != 0) { 3947 /* delete or reassign, locking done in del_entry() */ 3948 error = del_entry(chain, rulenum[0]); 3949 } else if (size == 2*sizeof(u_int32_t)) { /* set enable/disable */ 3950 IPFW_UH_WLOCK(chain); 3951 V_set_disable = 3952 (V_set_disable | rulenum[0]) & ~rulenum[1] & 3953 ~(1<<RESVD_SET); /* set RESVD_SET always enabled */ 3954 IPFW_UH_WUNLOCK(chain); 3955 } else 3956 error = EINVAL; 3957 break; 3958 3959 case IP_FW_ZERO: 3960 case IP_FW_RESETLOG: /* argument is an u_int_32, the rule number */ 3961 rulenum[0] = 0; 3962 if (sopt->sopt_val != 0) { 3963 error = sooptcopyin(sopt, rulenum, 3964 sizeof(u_int32_t), sizeof(u_int32_t)); 3965 if (error) 3966 break; 3967 } 3968 error = zero_entry(chain, rulenum[0], 3969 sopt->sopt_name == IP_FW_RESETLOG); 3970 break; 3971 3972 /*--- TABLE opcodes ---*/ 3973 case IP_FW_TABLE_ADD: 3974 case IP_FW_TABLE_DEL: 3975 { 3976 ipfw_table_entry ent; 3977 struct tentry_info tei; 3978 struct tid_info ti; 3979 struct table_value v; 3980 3981 error = sooptcopyin(sopt, &ent, 3982 sizeof(ent), sizeof(ent)); 3983 if (error) 3984 break; 3985 3986 memset(&tei, 0, sizeof(tei)); 3987 tei.paddr = &ent.addr; 3988 tei.subtype = AF_INET; 3989 tei.masklen = ent.masklen; 3990 ipfw_import_table_value_legacy(ent.value, &v); 3991 tei.pvalue = &v; 3992 memset(&ti, 0, sizeof(ti)); 3993 ti.uidx = ent.tbl; 3994 ti.type = IPFW_TABLE_CIDR; 3995 3996 error = (opt == IP_FW_TABLE_ADD) ? 3997 add_table_entry(chain, &ti, &tei, 0, 1) : 3998 del_table_entry(chain, &ti, &tei, 0, 1); 3999 } 4000 break; 4001 4002 case IP_FW_TABLE_FLUSH: 4003 { 4004 u_int16_t tbl; 4005 struct tid_info ti; 4006 4007 error = sooptcopyin(sopt, &tbl, 4008 sizeof(tbl), sizeof(tbl)); 4009 if (error) 4010 break; 4011 memset(&ti, 0, sizeof(ti)); 4012 ti.uidx = tbl; 4013 error = flush_table(chain, &ti); 4014 } 4015 break; 4016 4017 case IP_FW_TABLE_GETSIZE: 4018 { 4019 u_int32_t tbl, cnt; 4020 struct tid_info ti; 4021 4022 if ((error = sooptcopyin(sopt, &tbl, sizeof(tbl), 4023 sizeof(tbl)))) 4024 break; 4025 memset(&ti, 0, sizeof(ti)); 4026 ti.uidx = tbl; 4027 IPFW_RLOCK(chain); 4028 error = ipfw_count_table(chain, &ti, &cnt); 4029 IPFW_RUNLOCK(chain); 4030 if (error) 4031 break; 4032 error = sooptcopyout(sopt, &cnt, sizeof(cnt)); 4033 } 4034 break; 4035 4036 case IP_FW_TABLE_LIST: 4037 { 4038 ipfw_table *tbl; 4039 struct tid_info ti; 4040 4041 if (sopt->sopt_valsize < sizeof(*tbl)) { 4042 error = EINVAL; 4043 break; 4044 } 4045 size = sopt->sopt_valsize; 4046 tbl = malloc(size, M_TEMP, M_WAITOK); 4047 error = sooptcopyin(sopt, tbl, size, sizeof(*tbl)); 4048 if (error) { 4049 free(tbl, M_TEMP); 4050 break; 4051 } 4052 tbl->size = (size - sizeof(*tbl)) / 4053 sizeof(ipfw_table_entry); 4054 memset(&ti, 0, sizeof(ti)); 4055 ti.uidx = tbl->tbl; 4056 IPFW_RLOCK(chain); 4057 error = ipfw_dump_table_legacy(chain, &ti, tbl); 4058 IPFW_RUNLOCK(chain); 4059 if (error) { 4060 free(tbl, M_TEMP); 4061 break; 4062 } 4063 error = sooptcopyout(sopt, tbl, size); 4064 free(tbl, M_TEMP); 4065 } 4066 break; 4067 4068 /*--- NAT operations are protected by the IPFW_LOCK ---*/ 4069 case IP_FW_NAT_CFG: 4070 if (IPFW_NAT_LOADED) 4071 error = ipfw_nat_cfg_ptr(sopt); 4072 else { 4073 printf("IP_FW_NAT_CFG: %s\n", 4074 "ipfw_nat not present, please load it"); 4075 error = EINVAL; 4076 } 4077 break; 4078 4079 case IP_FW_NAT_DEL: 4080 if (IPFW_NAT_LOADED) 4081 error = ipfw_nat_del_ptr(sopt); 4082 else { 4083 printf("IP_FW_NAT_DEL: %s\n", 4084 "ipfw_nat not present, please load it"); 4085 error = EINVAL; 4086 } 4087 break; 4088 4089 case IP_FW_NAT_GET_CONFIG: 4090 if (IPFW_NAT_LOADED) 4091 error = ipfw_nat_get_cfg_ptr(sopt); 4092 else { 4093 printf("IP_FW_NAT_GET_CFG: %s\n", 4094 "ipfw_nat not present, please load it"); 4095 error = EINVAL; 4096 } 4097 break; 4098 4099 case IP_FW_NAT_GET_LOG: 4100 if (IPFW_NAT_LOADED) 4101 error = ipfw_nat_get_log_ptr(sopt); 4102 else { 4103 printf("IP_FW_NAT_GET_LOG: %s\n", 4104 "ipfw_nat not present, please load it"); 4105 error = EINVAL; 4106 } 4107 break; 4108 4109 default: 4110 printf("ipfw: ipfw_ctl invalid option %d\n", sopt->sopt_name); 4111 error = EINVAL; 4112 } 4113 4114 return (error); 4115 #undef RULE_MAXSIZE 4116 } 4117 #define RULE_MAXSIZE (256*sizeof(u_int32_t)) 4118 4119 /* Functions to convert rules 7.2 <==> 8.0 */ 4120 static int 4121 convert_rule_to_7(struct ip_fw_rule0 *rule) 4122 { 4123 /* Used to modify original rule */ 4124 struct ip_fw7 *rule7 = (struct ip_fw7 *)rule; 4125 /* copy of original rule, version 8 */ 4126 struct ip_fw_rule0 *tmp; 4127 4128 /* Used to copy commands */ 4129 ipfw_insn *ccmd, *dst; 4130 int ll = 0, ccmdlen = 0; 4131 4132 tmp = malloc(RULE_MAXSIZE, M_TEMP, M_NOWAIT | M_ZERO); 4133 if (tmp == NULL) { 4134 return 1; //XXX error 4135 } 4136 bcopy(rule, tmp, RULE_MAXSIZE); 4137 4138 /* Copy fields */ 4139 //rule7->_pad = tmp->_pad; 4140 rule7->set = tmp->set; 4141 rule7->rulenum = tmp->rulenum; 4142 rule7->cmd_len = tmp->cmd_len; 4143 rule7->act_ofs = tmp->act_ofs; 4144 rule7->next_rule = (struct ip_fw7 *)tmp->next_rule; 4145 rule7->cmd_len = tmp->cmd_len; 4146 rule7->pcnt = tmp->pcnt; 4147 rule7->bcnt = tmp->bcnt; 4148 rule7->timestamp = tmp->timestamp; 4149 4150 /* Copy commands */ 4151 for (ll = tmp->cmd_len, ccmd = tmp->cmd, dst = rule7->cmd ; 4152 ll > 0 ; ll -= ccmdlen, ccmd += ccmdlen, dst += ccmdlen) { 4153 ccmdlen = F_LEN(ccmd); 4154 4155 bcopy(ccmd, dst, F_LEN(ccmd)*sizeof(uint32_t)); 4156 4157 if (dst->opcode > O_NAT) 4158 /* O_REASS doesn't exists in 7.2 version, so 4159 * decrement opcode if it is after O_REASS 4160 */ 4161 dst->opcode--; 4162 4163 if (ccmdlen > ll) { 4164 printf("ipfw: opcode %d size truncated\n", 4165 ccmd->opcode); 4166 return EINVAL; 4167 } 4168 } 4169 free(tmp, M_TEMP); 4170 4171 return 0; 4172 } 4173 4174 static int 4175 convert_rule_to_8(struct ip_fw_rule0 *rule) 4176 { 4177 /* Used to modify original rule */ 4178 struct ip_fw7 *rule7 = (struct ip_fw7 *) rule; 4179 4180 /* Used to copy commands */ 4181 ipfw_insn *ccmd, *dst; 4182 int ll = 0, ccmdlen = 0; 4183 4184 /* Copy of original rule */ 4185 struct ip_fw7 *tmp = malloc(RULE_MAXSIZE, M_TEMP, M_NOWAIT | M_ZERO); 4186 if (tmp == NULL) { 4187 return 1; //XXX error 4188 } 4189 4190 bcopy(rule7, tmp, RULE_MAXSIZE); 4191 4192 for (ll = tmp->cmd_len, ccmd = tmp->cmd, dst = rule->cmd ; 4193 ll > 0 ; ll -= ccmdlen, ccmd += ccmdlen, dst += ccmdlen) { 4194 ccmdlen = F_LEN(ccmd); 4195 4196 bcopy(ccmd, dst, F_LEN(ccmd)*sizeof(uint32_t)); 4197 4198 if (dst->opcode > O_NAT) 4199 /* O_REASS doesn't exists in 7.2 version, so 4200 * increment opcode if it is after O_REASS 4201 */ 4202 dst->opcode++; 4203 4204 if (ccmdlen > ll) { 4205 printf("ipfw: opcode %d size truncated\n", 4206 ccmd->opcode); 4207 return EINVAL; 4208 } 4209 } 4210 4211 rule->_pad = tmp->_pad; 4212 rule->set = tmp->set; 4213 rule->rulenum = tmp->rulenum; 4214 rule->cmd_len = tmp->cmd_len; 4215 rule->act_ofs = tmp->act_ofs; 4216 rule->next_rule = (struct ip_fw *)tmp->next_rule; 4217 rule->cmd_len = tmp->cmd_len; 4218 rule->id = 0; /* XXX see if is ok = 0 */ 4219 rule->pcnt = tmp->pcnt; 4220 rule->bcnt = tmp->bcnt; 4221 rule->timestamp = tmp->timestamp; 4222 4223 free (tmp, M_TEMP); 4224 return 0; 4225 } 4226 4227 /* 4228 * Named object api 4229 * 4230 */ 4231 4232 void 4233 ipfw_init_srv(struct ip_fw_chain *ch) 4234 { 4235 4236 ch->srvmap = ipfw_objhash_create(IPFW_OBJECTS_DEFAULT); 4237 ch->srvstate = malloc(sizeof(void *) * IPFW_OBJECTS_DEFAULT, 4238 M_IPFW, M_WAITOK | M_ZERO); 4239 } 4240 4241 void 4242 ipfw_destroy_srv(struct ip_fw_chain *ch) 4243 { 4244 4245 free(ch->srvstate, M_IPFW); 4246 ipfw_objhash_destroy(ch->srvmap); 4247 } 4248 4249 /* 4250 * Allocate new bitmask which can be used to enlarge/shrink 4251 * named instance index. 4252 */ 4253 void 4254 ipfw_objhash_bitmap_alloc(uint32_t items, void **idx, int *pblocks) 4255 { 4256 size_t size; 4257 int max_blocks; 4258 u_long *idx_mask; 4259 4260 KASSERT((items % BLOCK_ITEMS) == 0, 4261 ("bitmask size needs to power of 2 and greater or equal to %zu", 4262 BLOCK_ITEMS)); 4263 4264 max_blocks = items / BLOCK_ITEMS; 4265 size = items / 8; 4266 idx_mask = malloc(size * IPFW_MAX_SETS, M_IPFW, M_WAITOK); 4267 /* Mark all as free */ 4268 memset(idx_mask, 0xFF, size * IPFW_MAX_SETS); 4269 *idx_mask &= ~(u_long)1; /* Skip index 0 */ 4270 4271 *idx = idx_mask; 4272 *pblocks = max_blocks; 4273 } 4274 4275 /* 4276 * Copy current bitmask index to new one. 4277 */ 4278 void 4279 ipfw_objhash_bitmap_merge(struct namedobj_instance *ni, void **idx, int *blocks) 4280 { 4281 int old_blocks, new_blocks; 4282 u_long *old_idx, *new_idx; 4283 int i; 4284 4285 old_idx = ni->idx_mask; 4286 old_blocks = ni->max_blocks; 4287 new_idx = *idx; 4288 new_blocks = *blocks; 4289 4290 for (i = 0; i < IPFW_MAX_SETS; i++) { 4291 memcpy(&new_idx[new_blocks * i], &old_idx[old_blocks * i], 4292 old_blocks * sizeof(u_long)); 4293 } 4294 } 4295 4296 /* 4297 * Swaps current @ni index with new one. 4298 */ 4299 void 4300 ipfw_objhash_bitmap_swap(struct namedobj_instance *ni, void **idx, int *blocks) 4301 { 4302 int old_blocks; 4303 u_long *old_idx; 4304 4305 old_idx = ni->idx_mask; 4306 old_blocks = ni->max_blocks; 4307 4308 ni->idx_mask = *idx; 4309 ni->max_blocks = *blocks; 4310 4311 /* Save old values */ 4312 *idx = old_idx; 4313 *blocks = old_blocks; 4314 } 4315 4316 void 4317 ipfw_objhash_bitmap_free(void *idx, int blocks) 4318 { 4319 4320 free(idx, M_IPFW); 4321 } 4322 4323 /* 4324 * Creates named hash instance. 4325 * Must be called without holding any locks. 4326 * Return pointer to new instance. 4327 */ 4328 struct namedobj_instance * 4329 ipfw_objhash_create(uint32_t items) 4330 { 4331 struct namedobj_instance *ni; 4332 int i; 4333 size_t size; 4334 4335 size = sizeof(struct namedobj_instance) + 4336 sizeof(struct namedobjects_head) * NAMEDOBJ_HASH_SIZE + 4337 sizeof(struct namedobjects_head) * NAMEDOBJ_HASH_SIZE; 4338 4339 ni = malloc(size, M_IPFW, M_WAITOK | M_ZERO); 4340 ni->nn_size = NAMEDOBJ_HASH_SIZE; 4341 ni->nv_size = NAMEDOBJ_HASH_SIZE; 4342 4343 ni->names = (struct namedobjects_head *)(ni +1); 4344 ni->values = &ni->names[ni->nn_size]; 4345 4346 for (i = 0; i < ni->nn_size; i++) 4347 TAILQ_INIT(&ni->names[i]); 4348 4349 for (i = 0; i < ni->nv_size; i++) 4350 TAILQ_INIT(&ni->values[i]); 4351 4352 /* Set default hashing/comparison functions */ 4353 ni->hash_f = objhash_hash_name; 4354 ni->cmp_f = objhash_cmp_name; 4355 4356 /* Allocate bitmask separately due to possible resize */ 4357 ipfw_objhash_bitmap_alloc(items, (void*)&ni->idx_mask, &ni->max_blocks); 4358 4359 return (ni); 4360 } 4361 4362 void 4363 ipfw_objhash_destroy(struct namedobj_instance *ni) 4364 { 4365 4366 free(ni->idx_mask, M_IPFW); 4367 free(ni, M_IPFW); 4368 } 4369 4370 void 4371 ipfw_objhash_set_funcs(struct namedobj_instance *ni, objhash_hash_f *hash_f, 4372 objhash_cmp_f *cmp_f) 4373 { 4374 4375 ni->hash_f = hash_f; 4376 ni->cmp_f = cmp_f; 4377 } 4378 4379 static uint32_t 4380 objhash_hash_name(struct namedobj_instance *ni, const void *name, uint32_t set) 4381 { 4382 4383 return (fnv_32_str((const char *)name, FNV1_32_INIT)); 4384 } 4385 4386 static int 4387 objhash_cmp_name(struct named_object *no, const void *name, uint32_t set) 4388 { 4389 4390 if ((strcmp(no->name, (const char *)name) == 0) && (no->set == set)) 4391 return (0); 4392 4393 return (1); 4394 } 4395 4396 static uint32_t 4397 objhash_hash_idx(struct namedobj_instance *ni, uint32_t val) 4398 { 4399 uint32_t v; 4400 4401 v = val % (ni->nv_size - 1); 4402 4403 return (v); 4404 } 4405 4406 struct named_object * 4407 ipfw_objhash_lookup_name(struct namedobj_instance *ni, uint32_t set, 4408 const char *name) 4409 { 4410 struct named_object *no; 4411 uint32_t hash; 4412 4413 hash = ni->hash_f(ni, name, set) % ni->nn_size; 4414 4415 TAILQ_FOREACH(no, &ni->names[hash], nn_next) { 4416 if (ni->cmp_f(no, name, set) == 0) 4417 return (no); 4418 } 4419 4420 return (NULL); 4421 } 4422 4423 /* 4424 * Find named object by @uid. 4425 * Check @tlvs for valid data inside. 4426 * 4427 * Returns pointer to found TLV or NULL. 4428 */ 4429 ipfw_obj_ntlv * 4430 ipfw_find_name_tlv_type(void *tlvs, int len, uint16_t uidx, uint32_t etlv) 4431 { 4432 ipfw_obj_ntlv *ntlv; 4433 uintptr_t pa, pe; 4434 int l; 4435 4436 pa = (uintptr_t)tlvs; 4437 pe = pa + len; 4438 l = 0; 4439 for (; pa < pe; pa += l) { 4440 ntlv = (ipfw_obj_ntlv *)pa; 4441 l = ntlv->head.length; 4442 4443 if (l != sizeof(*ntlv)) 4444 return (NULL); 4445 4446 if (ntlv->idx != uidx) 4447 continue; 4448 /* 4449 * When userland has specified zero TLV type, do 4450 * not compare it with eltv. In some cases userland 4451 * doesn't know what type should it have. Use only 4452 * uidx and name for search named_object. 4453 */ 4454 if (ntlv->head.type != 0 && 4455 ntlv->head.type != (uint16_t)etlv) 4456 continue; 4457 4458 if (ipfw_check_object_name_generic(ntlv->name) != 0) 4459 return (NULL); 4460 4461 return (ntlv); 4462 } 4463 4464 return (NULL); 4465 } 4466 4467 /* 4468 * Finds object config based on either legacy index 4469 * or name in ntlv. 4470 * Note @ti structure contains unchecked data from userland. 4471 * 4472 * Returns 0 in success and fills in @pno with found config 4473 */ 4474 int 4475 ipfw_objhash_find_type(struct namedobj_instance *ni, struct tid_info *ti, 4476 uint32_t etlv, struct named_object **pno) 4477 { 4478 char *name; 4479 ipfw_obj_ntlv *ntlv; 4480 uint32_t set; 4481 4482 if (ti->tlvs == NULL) 4483 return (EINVAL); 4484 4485 ntlv = ipfw_find_name_tlv_type(ti->tlvs, ti->tlen, ti->uidx, etlv); 4486 if (ntlv == NULL) 4487 return (EINVAL); 4488 name = ntlv->name; 4489 4490 /* 4491 * Use set provided by @ti instead of @ntlv one. 4492 * This is needed due to different sets behavior 4493 * controlled by V_fw_tables_sets. 4494 */ 4495 set = ti->set; 4496 *pno = ipfw_objhash_lookup_name(ni, set, name); 4497 if (*pno == NULL) 4498 return (ESRCH); 4499 return (0); 4500 } 4501 4502 /* 4503 * Find named object by name, considering also its TLV type. 4504 */ 4505 struct named_object * 4506 ipfw_objhash_lookup_name_type(struct namedobj_instance *ni, uint32_t set, 4507 uint32_t type, const char *name) 4508 { 4509 struct named_object *no; 4510 uint32_t hash; 4511 4512 hash = ni->hash_f(ni, name, set) % ni->nn_size; 4513 4514 TAILQ_FOREACH(no, &ni->names[hash], nn_next) { 4515 if (ni->cmp_f(no, name, set) == 0 && 4516 no->etlv == (uint16_t)type) 4517 return (no); 4518 } 4519 4520 return (NULL); 4521 } 4522 4523 struct named_object * 4524 ipfw_objhash_lookup_kidx(struct namedobj_instance *ni, uint16_t kidx) 4525 { 4526 struct named_object *no; 4527 uint32_t hash; 4528 4529 hash = objhash_hash_idx(ni, kidx); 4530 4531 TAILQ_FOREACH(no, &ni->values[hash], nv_next) { 4532 if (no->kidx == kidx) 4533 return (no); 4534 } 4535 4536 return (NULL); 4537 } 4538 4539 int 4540 ipfw_objhash_same_name(struct namedobj_instance *ni, struct named_object *a, 4541 struct named_object *b) 4542 { 4543 4544 if ((strcmp(a->name, b->name) == 0) && a->set == b->set) 4545 return (1); 4546 4547 return (0); 4548 } 4549 4550 void 4551 ipfw_objhash_add(struct namedobj_instance *ni, struct named_object *no) 4552 { 4553 uint32_t hash; 4554 4555 hash = ni->hash_f(ni, no->name, no->set) % ni->nn_size; 4556 TAILQ_INSERT_HEAD(&ni->names[hash], no, nn_next); 4557 4558 hash = objhash_hash_idx(ni, no->kidx); 4559 TAILQ_INSERT_HEAD(&ni->values[hash], no, nv_next); 4560 4561 ni->count++; 4562 } 4563 4564 void 4565 ipfw_objhash_del(struct namedobj_instance *ni, struct named_object *no) 4566 { 4567 uint32_t hash; 4568 4569 hash = ni->hash_f(ni, no->name, no->set) % ni->nn_size; 4570 TAILQ_REMOVE(&ni->names[hash], no, nn_next); 4571 4572 hash = objhash_hash_idx(ni, no->kidx); 4573 TAILQ_REMOVE(&ni->values[hash], no, nv_next); 4574 4575 ni->count--; 4576 } 4577 4578 uint32_t 4579 ipfw_objhash_count(struct namedobj_instance *ni) 4580 { 4581 4582 return (ni->count); 4583 } 4584 4585 uint32_t 4586 ipfw_objhash_count_type(struct namedobj_instance *ni, uint16_t type) 4587 { 4588 struct named_object *no; 4589 uint32_t count; 4590 int i; 4591 4592 count = 0; 4593 for (i = 0; i < ni->nn_size; i++) { 4594 TAILQ_FOREACH(no, &ni->names[i], nn_next) { 4595 if (no->etlv == type) 4596 count++; 4597 } 4598 } 4599 return (count); 4600 } 4601 4602 /* 4603 * Runs @func for each found named object. 4604 * It is safe to delete objects from callback 4605 */ 4606 int 4607 ipfw_objhash_foreach(struct namedobj_instance *ni, objhash_cb_t *f, void *arg) 4608 { 4609 struct named_object *no, *no_tmp; 4610 int i, ret; 4611 4612 for (i = 0; i < ni->nn_size; i++) { 4613 TAILQ_FOREACH_SAFE(no, &ni->names[i], nn_next, no_tmp) { 4614 ret = f(ni, no, arg); 4615 if (ret != 0) 4616 return (ret); 4617 } 4618 } 4619 return (0); 4620 } 4621 4622 /* 4623 * Runs @f for each found named object with type @type. 4624 * It is safe to delete objects from callback 4625 */ 4626 int 4627 ipfw_objhash_foreach_type(struct namedobj_instance *ni, objhash_cb_t *f, 4628 void *arg, uint16_t type) 4629 { 4630 struct named_object *no, *no_tmp; 4631 int i, ret; 4632 4633 for (i = 0; i < ni->nn_size; i++) { 4634 TAILQ_FOREACH_SAFE(no, &ni->names[i], nn_next, no_tmp) { 4635 if (no->etlv != type) 4636 continue; 4637 ret = f(ni, no, arg); 4638 if (ret != 0) 4639 return (ret); 4640 } 4641 } 4642 return (0); 4643 } 4644 4645 /* 4646 * Removes index from given set. 4647 * Returns 0 on success. 4648 */ 4649 int 4650 ipfw_objhash_free_idx(struct namedobj_instance *ni, uint16_t idx) 4651 { 4652 u_long *mask; 4653 int i, v; 4654 4655 i = idx / BLOCK_ITEMS; 4656 v = idx % BLOCK_ITEMS; 4657 4658 if (i >= ni->max_blocks) 4659 return (1); 4660 4661 mask = &ni->idx_mask[i]; 4662 4663 if ((*mask & ((u_long)1 << v)) != 0) 4664 return (1); 4665 4666 /* Mark as free */ 4667 *mask |= (u_long)1 << v; 4668 4669 /* Update free offset */ 4670 if (ni->free_off[0] > i) 4671 ni->free_off[0] = i; 4672 4673 return (0); 4674 } 4675 4676 /* 4677 * Allocate new index in given instance and stores in in @pidx. 4678 * Returns 0 on success. 4679 */ 4680 int 4681 ipfw_objhash_alloc_idx(void *n, uint16_t *pidx) 4682 { 4683 struct namedobj_instance *ni; 4684 u_long *mask; 4685 int i, off, v; 4686 4687 ni = (struct namedobj_instance *)n; 4688 4689 off = ni->free_off[0]; 4690 mask = &ni->idx_mask[off]; 4691 4692 for (i = off; i < ni->max_blocks; i++, mask++) { 4693 if ((v = ffsl(*mask)) == 0) 4694 continue; 4695 4696 /* Mark as busy */ 4697 *mask &= ~ ((u_long)1 << (v - 1)); 4698 4699 ni->free_off[0] = i; 4700 4701 v = BLOCK_ITEMS * i + v - 1; 4702 4703 *pidx = v; 4704 return (0); 4705 } 4706 4707 return (1); 4708 } 4709 4710 /* end of file */ 4711