1 /*- 2 * Copyright (c) 2002-2009 Luigi Rizzo, Universita` di Pisa 3 * Copyright (c) 2014 Yandex LLC 4 * Copyright (c) 2014 Alexander V. Chernikov 5 * 6 * Supported by: Valeria Paoli 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 /* 34 * Control socket and rule management routines for ipfw. 35 * Control is currently implemented via IP_FW3 setsockopt() code. 36 */ 37 38 #include "opt_ipfw.h" 39 #include "opt_inet.h" 40 #ifndef INET 41 #error IPFIREWALL requires INET. 42 #endif /* INET */ 43 #include "opt_inet6.h" 44 45 #include <sys/param.h> 46 #include <sys/systm.h> 47 #include <sys/malloc.h> 48 #include <sys/mbuf.h> /* struct m_tag used by nested headers */ 49 #include <sys/kernel.h> 50 #include <sys/lock.h> 51 #include <sys/priv.h> 52 #include <sys/proc.h> 53 #include <sys/rwlock.h> 54 #include <sys/rmlock.h> 55 #include <sys/socket.h> 56 #include <sys/socketvar.h> 57 #include <sys/sysctl.h> 58 #include <sys/syslog.h> 59 #include <sys/fnv_hash.h> 60 #include <net/if.h> 61 #include <net/route.h> 62 #include <net/vnet.h> 63 #include <vm/vm.h> 64 #include <vm/vm_extern.h> 65 66 #include <netinet/in.h> 67 #include <netinet/ip_var.h> /* hooks */ 68 #include <netinet/ip_fw.h> 69 70 #include <netpfil/ipfw/ip_fw_private.h> 71 #include <netpfil/ipfw/ip_fw_table.h> 72 73 #ifdef MAC 74 #include <security/mac/mac_framework.h> 75 #endif 76 77 static int ipfw_ctl(struct sockopt *sopt); 78 static int check_ipfw_rule_body(ipfw_insn *cmd, int cmd_len, 79 struct rule_check_info *ci); 80 static int check_ipfw_rule1(struct ip_fw_rule *rule, int size, 81 struct rule_check_info *ci); 82 static int check_ipfw_rule0(struct ip_fw_rule0 *rule, int size, 83 struct rule_check_info *ci); 84 static int rewrite_rule_uidx(struct ip_fw_chain *chain, 85 struct rule_check_info *ci); 86 87 #define NAMEDOBJ_HASH_SIZE 32 88 89 struct namedobj_instance { 90 struct namedobjects_head *names; 91 struct namedobjects_head *values; 92 uint32_t nn_size; /* names hash size */ 93 uint32_t nv_size; /* number hash size */ 94 u_long *idx_mask; /* used items bitmask */ 95 uint32_t max_blocks; /* number of "long" blocks in bitmask */ 96 uint32_t count; /* number of items */ 97 uint16_t free_off[IPFW_MAX_SETS]; /* first possible free offset */ 98 objhash_hash_f *hash_f; 99 objhash_cmp_f *cmp_f; 100 }; 101 #define BLOCK_ITEMS (8 * sizeof(u_long)) /* Number of items for ffsl() */ 102 103 static uint32_t objhash_hash_name(struct namedobj_instance *ni, 104 const void *key, uint32_t kopt); 105 static uint32_t objhash_hash_idx(struct namedobj_instance *ni, uint32_t val); 106 static int objhash_cmp_name(struct named_object *no, const void *name, 107 uint32_t set); 108 109 MALLOC_DEFINE(M_IPFW, "IpFw/IpAcct", "IpFw/IpAcct chain's"); 110 111 static int dump_config(struct ip_fw_chain *chain, ip_fw3_opheader *op3, 112 struct sockopt_data *sd); 113 static int add_rules(struct ip_fw_chain *chain, ip_fw3_opheader *op3, 114 struct sockopt_data *sd); 115 static int del_rules(struct ip_fw_chain *chain, ip_fw3_opheader *op3, 116 struct sockopt_data *sd); 117 static int clear_rules(struct ip_fw_chain *chain, ip_fw3_opheader *op3, 118 struct sockopt_data *sd); 119 static int move_rules(struct ip_fw_chain *chain, ip_fw3_opheader *op3, 120 struct sockopt_data *sd); 121 static int manage_sets(struct ip_fw_chain *chain, ip_fw3_opheader *op3, 122 struct sockopt_data *sd); 123 static int dump_soptcodes(struct ip_fw_chain *chain, ip_fw3_opheader *op3, 124 struct sockopt_data *sd); 125 static int dump_srvobjects(struct ip_fw_chain *chain, ip_fw3_opheader *op3, 126 struct sockopt_data *sd); 127 128 /* ctl3 handler data */ 129 struct mtx ctl3_lock; 130 #define CTL3_LOCK_INIT() mtx_init(&ctl3_lock, "ctl3_lock", NULL, MTX_DEF) 131 #define CTL3_LOCK_DESTROY() mtx_destroy(&ctl3_lock) 132 #define CTL3_LOCK() mtx_lock(&ctl3_lock) 133 #define CTL3_UNLOCK() mtx_unlock(&ctl3_lock) 134 135 static struct ipfw_sopt_handler *ctl3_handlers; 136 static size_t ctl3_hsize; 137 static uint64_t ctl3_refct, ctl3_gencnt; 138 #define CTL3_SMALLBUF 4096 /* small page-size write buffer */ 139 #define CTL3_LARGEBUF 16 * 1024 * 1024 /* handle large rulesets */ 140 141 static int ipfw_flush_sopt_data(struct sockopt_data *sd); 142 143 static struct ipfw_sopt_handler scodes[] = { 144 { IP_FW_XGET, 0, HDIR_GET, dump_config }, 145 { IP_FW_XADD, 0, HDIR_BOTH, add_rules }, 146 { IP_FW_XDEL, 0, HDIR_BOTH, del_rules }, 147 { IP_FW_XZERO, 0, HDIR_SET, clear_rules }, 148 { IP_FW_XRESETLOG, 0, HDIR_SET, clear_rules }, 149 { IP_FW_XMOVE, 0, HDIR_SET, move_rules }, 150 { IP_FW_SET_SWAP, 0, HDIR_SET, manage_sets }, 151 { IP_FW_SET_MOVE, 0, HDIR_SET, manage_sets }, 152 { IP_FW_SET_ENABLE, 0, HDIR_SET, manage_sets }, 153 { IP_FW_DUMP_SOPTCODES, 0, HDIR_GET, dump_soptcodes }, 154 { IP_FW_DUMP_SRVOBJECTS,0, HDIR_GET, dump_srvobjects }, 155 }; 156 157 static int 158 set_legacy_obj_kidx(struct ip_fw_chain *ch, struct ip_fw_rule0 *rule); 159 static struct opcode_obj_rewrite *find_op_rw(ipfw_insn *cmd, 160 uint16_t *puidx, uint8_t *ptype); 161 static int mark_object_kidx(struct ip_fw_chain *ch, struct ip_fw *rule, 162 uint32_t *bmask); 163 static int ref_rule_objects(struct ip_fw_chain *ch, struct ip_fw *rule, 164 struct rule_check_info *ci, struct obj_idx *oib, struct tid_info *ti); 165 static int ref_opcode_object(struct ip_fw_chain *ch, ipfw_insn *cmd, 166 struct tid_info *ti, struct obj_idx *pidx, int *unresolved); 167 static void unref_rule_objects(struct ip_fw_chain *chain, struct ip_fw *rule); 168 static void unref_oib_objects(struct ip_fw_chain *ch, ipfw_insn *cmd, 169 struct obj_idx *oib, struct obj_idx *end); 170 static int export_objhash_ntlv(struct namedobj_instance *ni, uint16_t kidx, 171 struct sockopt_data *sd); 172 173 /* 174 * Opcode object rewriter variables 175 */ 176 struct opcode_obj_rewrite *ctl3_rewriters; 177 static size_t ctl3_rsize; 178 179 /* 180 * static variables followed by global ones 181 */ 182 183 static VNET_DEFINE(uma_zone_t, ipfw_cntr_zone); 184 #define V_ipfw_cntr_zone VNET(ipfw_cntr_zone) 185 186 void 187 ipfw_init_counters() 188 { 189 190 V_ipfw_cntr_zone = uma_zcreate("IPFW counters", 191 IPFW_RULE_CNTR_SIZE, NULL, NULL, NULL, NULL, 192 UMA_ALIGN_PTR, UMA_ZONE_PCPU); 193 } 194 195 void 196 ipfw_destroy_counters() 197 { 198 199 uma_zdestroy(V_ipfw_cntr_zone); 200 } 201 202 struct ip_fw * 203 ipfw_alloc_rule(struct ip_fw_chain *chain, size_t rulesize) 204 { 205 struct ip_fw *rule; 206 207 rule = malloc(rulesize, M_IPFW, M_WAITOK | M_ZERO); 208 rule->cntr = uma_zalloc(V_ipfw_cntr_zone, M_WAITOK | M_ZERO); 209 210 return (rule); 211 } 212 213 static void 214 free_rule(struct ip_fw *rule) 215 { 216 217 uma_zfree(V_ipfw_cntr_zone, rule->cntr); 218 free(rule, M_IPFW); 219 } 220 221 222 /* 223 * Find the smallest rule >= key, id. 224 * We could use bsearch but it is so simple that we code it directly 225 */ 226 int 227 ipfw_find_rule(struct ip_fw_chain *chain, uint32_t key, uint32_t id) 228 { 229 int i, lo, hi; 230 struct ip_fw *r; 231 232 for (lo = 0, hi = chain->n_rules - 1; lo < hi;) { 233 i = (lo + hi) / 2; 234 r = chain->map[i]; 235 if (r->rulenum < key) 236 lo = i + 1; /* continue from the next one */ 237 else if (r->rulenum > key) 238 hi = i; /* this might be good */ 239 else if (r->id < id) 240 lo = i + 1; /* continue from the next one */ 241 else /* r->id >= id */ 242 hi = i; /* this might be good */ 243 } 244 return hi; 245 } 246 247 /* 248 * Builds skipto cache on rule set @map. 249 */ 250 static void 251 update_skipto_cache(struct ip_fw_chain *chain, struct ip_fw **map) 252 { 253 int *smap, rulenum; 254 int i, mi; 255 256 IPFW_UH_WLOCK_ASSERT(chain); 257 258 mi = 0; 259 rulenum = map[mi]->rulenum; 260 smap = chain->idxmap_back; 261 262 if (smap == NULL) 263 return; 264 265 for (i = 0; i < 65536; i++) { 266 smap[i] = mi; 267 /* Use the same rule index until i < rulenum */ 268 if (i != rulenum || i == 65535) 269 continue; 270 /* Find next rule with num > i */ 271 rulenum = map[++mi]->rulenum; 272 while (rulenum == i) 273 rulenum = map[++mi]->rulenum; 274 } 275 } 276 277 /* 278 * Swaps prepared (backup) index with current one. 279 */ 280 static void 281 swap_skipto_cache(struct ip_fw_chain *chain) 282 { 283 int *map; 284 285 IPFW_UH_WLOCK_ASSERT(chain); 286 IPFW_WLOCK_ASSERT(chain); 287 288 map = chain->idxmap; 289 chain->idxmap = chain->idxmap_back; 290 chain->idxmap_back = map; 291 } 292 293 /* 294 * Allocate and initialize skipto cache. 295 */ 296 void 297 ipfw_init_skipto_cache(struct ip_fw_chain *chain) 298 { 299 int *idxmap, *idxmap_back; 300 301 idxmap = malloc(65536 * sizeof(uint32_t *), M_IPFW, 302 M_WAITOK | M_ZERO); 303 idxmap_back = malloc(65536 * sizeof(uint32_t *), M_IPFW, 304 M_WAITOK | M_ZERO); 305 306 /* 307 * Note we may be called at any time after initialization, 308 * for example, on first skipto rule, so we need to 309 * provide valid chain->idxmap on return 310 */ 311 312 IPFW_UH_WLOCK(chain); 313 if (chain->idxmap != NULL) { 314 IPFW_UH_WUNLOCK(chain); 315 free(idxmap, M_IPFW); 316 free(idxmap_back, M_IPFW); 317 return; 318 } 319 320 /* Set backup pointer first to permit building cache */ 321 chain->idxmap_back = idxmap_back; 322 update_skipto_cache(chain, chain->map); 323 IPFW_WLOCK(chain); 324 /* It is now safe to set chain->idxmap ptr */ 325 chain->idxmap = idxmap; 326 swap_skipto_cache(chain); 327 IPFW_WUNLOCK(chain); 328 IPFW_UH_WUNLOCK(chain); 329 } 330 331 /* 332 * Destroys skipto cache. 333 */ 334 void 335 ipfw_destroy_skipto_cache(struct ip_fw_chain *chain) 336 { 337 338 if (chain->idxmap != NULL) 339 free(chain->idxmap, M_IPFW); 340 if (chain->idxmap != NULL) 341 free(chain->idxmap_back, M_IPFW); 342 } 343 344 345 /* 346 * allocate a new map, returns the chain locked. extra is the number 347 * of entries to add or delete. 348 */ 349 static struct ip_fw ** 350 get_map(struct ip_fw_chain *chain, int extra, int locked) 351 { 352 353 for (;;) { 354 struct ip_fw **map; 355 int i, mflags; 356 357 mflags = M_ZERO | ((locked != 0) ? M_NOWAIT : M_WAITOK); 358 359 i = chain->n_rules + extra; 360 map = malloc(i * sizeof(struct ip_fw *), M_IPFW, mflags); 361 if (map == NULL) { 362 printf("%s: cannot allocate map\n", __FUNCTION__); 363 return NULL; 364 } 365 if (!locked) 366 IPFW_UH_WLOCK(chain); 367 if (i >= chain->n_rules + extra) /* good */ 368 return map; 369 /* otherwise we lost the race, free and retry */ 370 if (!locked) 371 IPFW_UH_WUNLOCK(chain); 372 free(map, M_IPFW); 373 } 374 } 375 376 /* 377 * swap the maps. It is supposed to be called with IPFW_UH_WLOCK 378 */ 379 static struct ip_fw ** 380 swap_map(struct ip_fw_chain *chain, struct ip_fw **new_map, int new_len) 381 { 382 struct ip_fw **old_map; 383 384 IPFW_WLOCK(chain); 385 chain->id++; 386 chain->n_rules = new_len; 387 old_map = chain->map; 388 chain->map = new_map; 389 swap_skipto_cache(chain); 390 IPFW_WUNLOCK(chain); 391 return old_map; 392 } 393 394 395 static void 396 export_cntr1_base(struct ip_fw *krule, struct ip_fw_bcounter *cntr) 397 { 398 399 cntr->size = sizeof(*cntr); 400 401 if (krule->cntr != NULL) { 402 cntr->pcnt = counter_u64_fetch(krule->cntr); 403 cntr->bcnt = counter_u64_fetch(krule->cntr + 1); 404 cntr->timestamp = krule->timestamp; 405 } 406 if (cntr->timestamp > 0) 407 cntr->timestamp += boottime.tv_sec; 408 } 409 410 static void 411 export_cntr0_base(struct ip_fw *krule, struct ip_fw_bcounter0 *cntr) 412 { 413 414 if (krule->cntr != NULL) { 415 cntr->pcnt = counter_u64_fetch(krule->cntr); 416 cntr->bcnt = counter_u64_fetch(krule->cntr + 1); 417 cntr->timestamp = krule->timestamp; 418 } 419 if (cntr->timestamp > 0) 420 cntr->timestamp += boottime.tv_sec; 421 } 422 423 /* 424 * Copies rule @urule from v1 userland format (current). 425 * to kernel @krule. 426 * Assume @krule is zeroed. 427 */ 428 static void 429 import_rule1(struct rule_check_info *ci) 430 { 431 struct ip_fw_rule *urule; 432 struct ip_fw *krule; 433 434 urule = (struct ip_fw_rule *)ci->urule; 435 krule = (struct ip_fw *)ci->krule; 436 437 /* copy header */ 438 krule->act_ofs = urule->act_ofs; 439 krule->cmd_len = urule->cmd_len; 440 krule->rulenum = urule->rulenum; 441 krule->set = urule->set; 442 krule->flags = urule->flags; 443 444 /* Save rulenum offset */ 445 ci->urule_numoff = offsetof(struct ip_fw_rule, rulenum); 446 447 /* Copy opcodes */ 448 memcpy(krule->cmd, urule->cmd, krule->cmd_len * sizeof(uint32_t)); 449 } 450 451 /* 452 * Export rule into v1 format (Current). 453 * Layout: 454 * [ ipfw_obj_tlv(IPFW_TLV_RULE_ENT) 455 * [ ip_fw_rule ] OR 456 * [ ip_fw_bcounter ip_fw_rule] (depends on rcntrs). 457 * ] 458 * Assume @data is zeroed. 459 */ 460 static void 461 export_rule1(struct ip_fw *krule, caddr_t data, int len, int rcntrs) 462 { 463 struct ip_fw_bcounter *cntr; 464 struct ip_fw_rule *urule; 465 ipfw_obj_tlv *tlv; 466 467 /* Fill in TLV header */ 468 tlv = (ipfw_obj_tlv *)data; 469 tlv->type = IPFW_TLV_RULE_ENT; 470 tlv->length = len; 471 472 if (rcntrs != 0) { 473 /* Copy counters */ 474 cntr = (struct ip_fw_bcounter *)(tlv + 1); 475 urule = (struct ip_fw_rule *)(cntr + 1); 476 export_cntr1_base(krule, cntr); 477 } else 478 urule = (struct ip_fw_rule *)(tlv + 1); 479 480 /* copy header */ 481 urule->act_ofs = krule->act_ofs; 482 urule->cmd_len = krule->cmd_len; 483 urule->rulenum = krule->rulenum; 484 urule->set = krule->set; 485 urule->flags = krule->flags; 486 urule->id = krule->id; 487 488 /* Copy opcodes */ 489 memcpy(urule->cmd, krule->cmd, krule->cmd_len * sizeof(uint32_t)); 490 } 491 492 493 /* 494 * Copies rule @urule from FreeBSD8 userland format (v0) 495 * to kernel @krule. 496 * Assume @krule is zeroed. 497 */ 498 static void 499 import_rule0(struct rule_check_info *ci) 500 { 501 struct ip_fw_rule0 *urule; 502 struct ip_fw *krule; 503 int cmdlen, l; 504 ipfw_insn *cmd; 505 ipfw_insn_limit *lcmd; 506 ipfw_insn_if *cmdif; 507 508 urule = (struct ip_fw_rule0 *)ci->urule; 509 krule = (struct ip_fw *)ci->krule; 510 511 /* copy header */ 512 krule->act_ofs = urule->act_ofs; 513 krule->cmd_len = urule->cmd_len; 514 krule->rulenum = urule->rulenum; 515 krule->set = urule->set; 516 if ((urule->_pad & 1) != 0) 517 krule->flags |= IPFW_RULE_NOOPT; 518 519 /* Save rulenum offset */ 520 ci->urule_numoff = offsetof(struct ip_fw_rule0, rulenum); 521 522 /* Copy opcodes */ 523 memcpy(krule->cmd, urule->cmd, krule->cmd_len * sizeof(uint32_t)); 524 525 /* 526 * Alter opcodes: 527 * 1) convert tablearg value from 65335 to 0 528 * 2) Add high bit to O_SETFIB/O_SETDSCP values (to make room for targ). 529 * 3) convert table number in iface opcodes to u16 530 */ 531 l = krule->cmd_len; 532 cmd = krule->cmd; 533 cmdlen = 0; 534 535 for ( ; l > 0 ; l -= cmdlen, cmd += cmdlen) { 536 cmdlen = F_LEN(cmd); 537 538 switch (cmd->opcode) { 539 /* Opcodes supporting tablearg */ 540 case O_TAG: 541 case O_TAGGED: 542 case O_PIPE: 543 case O_QUEUE: 544 case O_DIVERT: 545 case O_TEE: 546 case O_SKIPTO: 547 case O_CALLRETURN: 548 case O_NETGRAPH: 549 case O_NGTEE: 550 case O_NAT: 551 if (cmd->arg1 == 65535) 552 cmd->arg1 = IP_FW_TARG; 553 break; 554 case O_SETFIB: 555 case O_SETDSCP: 556 if (cmd->arg1 == 65535) 557 cmd->arg1 = IP_FW_TARG; 558 else 559 cmd->arg1 |= 0x8000; 560 break; 561 case O_LIMIT: 562 lcmd = (ipfw_insn_limit *)cmd; 563 if (lcmd->conn_limit == 65535) 564 lcmd->conn_limit = IP_FW_TARG; 565 break; 566 /* Interface tables */ 567 case O_XMIT: 568 case O_RECV: 569 case O_VIA: 570 /* Interface table, possibly */ 571 cmdif = (ipfw_insn_if *)cmd; 572 if (cmdif->name[0] != '\1') 573 break; 574 575 cmdif->p.kidx = (uint16_t)cmdif->p.glob; 576 break; 577 } 578 } 579 } 580 581 /* 582 * Copies rule @krule from kernel to FreeBSD8 userland format (v0) 583 */ 584 static void 585 export_rule0(struct ip_fw *krule, struct ip_fw_rule0 *urule, int len) 586 { 587 int cmdlen, l; 588 ipfw_insn *cmd; 589 ipfw_insn_limit *lcmd; 590 ipfw_insn_if *cmdif; 591 592 /* copy header */ 593 memset(urule, 0, len); 594 urule->act_ofs = krule->act_ofs; 595 urule->cmd_len = krule->cmd_len; 596 urule->rulenum = krule->rulenum; 597 urule->set = krule->set; 598 if ((krule->flags & IPFW_RULE_NOOPT) != 0) 599 urule->_pad |= 1; 600 601 /* Copy opcodes */ 602 memcpy(urule->cmd, krule->cmd, krule->cmd_len * sizeof(uint32_t)); 603 604 /* Export counters */ 605 export_cntr0_base(krule, (struct ip_fw_bcounter0 *)&urule->pcnt); 606 607 /* 608 * Alter opcodes: 609 * 1) convert tablearg value from 0 to 65335 610 * 2) Remove highest bit from O_SETFIB/O_SETDSCP values. 611 * 3) convert table number in iface opcodes to int 612 */ 613 l = urule->cmd_len; 614 cmd = urule->cmd; 615 cmdlen = 0; 616 617 for ( ; l > 0 ; l -= cmdlen, cmd += cmdlen) { 618 cmdlen = F_LEN(cmd); 619 620 switch (cmd->opcode) { 621 /* Opcodes supporting tablearg */ 622 case O_TAG: 623 case O_TAGGED: 624 case O_PIPE: 625 case O_QUEUE: 626 case O_DIVERT: 627 case O_TEE: 628 case O_SKIPTO: 629 case O_CALLRETURN: 630 case O_NETGRAPH: 631 case O_NGTEE: 632 case O_NAT: 633 if (cmd->arg1 == IP_FW_TARG) 634 cmd->arg1 = 65535; 635 break; 636 case O_SETFIB: 637 case O_SETDSCP: 638 if (cmd->arg1 == IP_FW_TARG) 639 cmd->arg1 = 65535; 640 else 641 cmd->arg1 &= ~0x8000; 642 break; 643 case O_LIMIT: 644 lcmd = (ipfw_insn_limit *)cmd; 645 if (lcmd->conn_limit == IP_FW_TARG) 646 lcmd->conn_limit = 65535; 647 break; 648 /* Interface tables */ 649 case O_XMIT: 650 case O_RECV: 651 case O_VIA: 652 /* Interface table, possibly */ 653 cmdif = (ipfw_insn_if *)cmd; 654 if (cmdif->name[0] != '\1') 655 break; 656 657 cmdif->p.glob = cmdif->p.kidx; 658 break; 659 } 660 } 661 } 662 663 /* 664 * Add new rule(s) to the list possibly creating rule number for each. 665 * Update the rule_number in the input struct so the caller knows it as well. 666 * Must be called without IPFW_UH held 667 */ 668 static int 669 commit_rules(struct ip_fw_chain *chain, struct rule_check_info *rci, int count) 670 { 671 int error, i, insert_before, tcount; 672 uint16_t rulenum, *pnum; 673 struct rule_check_info *ci; 674 struct ip_fw *krule; 675 struct ip_fw **map; /* the new array of pointers */ 676 677 /* Check if we need to do table/obj index remap */ 678 tcount = 0; 679 for (ci = rci, i = 0; i < count; ci++, i++) { 680 if (ci->object_opcodes == 0) 681 continue; 682 683 /* 684 * Rule has some object opcodes. 685 * We need to find (and create non-existing) 686 * kernel objects, and reference existing ones. 687 */ 688 error = rewrite_rule_uidx(chain, ci); 689 if (error != 0) { 690 691 /* 692 * rewrite failed, state for current rule 693 * has been reverted. Check if we need to 694 * revert more. 695 */ 696 if (tcount > 0) { 697 698 /* 699 * We have some more table rules 700 * we need to rollback. 701 */ 702 703 IPFW_UH_WLOCK(chain); 704 while (ci != rci) { 705 ci--; 706 if (ci->object_opcodes == 0) 707 continue; 708 unref_rule_objects(chain,ci->krule); 709 710 } 711 IPFW_UH_WUNLOCK(chain); 712 713 } 714 715 return (error); 716 } 717 718 tcount++; 719 } 720 721 /* get_map returns with IPFW_UH_WLOCK if successful */ 722 map = get_map(chain, count, 0 /* not locked */); 723 if (map == NULL) { 724 if (tcount > 0) { 725 /* Unbind tables */ 726 IPFW_UH_WLOCK(chain); 727 for (ci = rci, i = 0; i < count; ci++, i++) { 728 if (ci->object_opcodes == 0) 729 continue; 730 731 unref_rule_objects(chain, ci->krule); 732 } 733 IPFW_UH_WUNLOCK(chain); 734 } 735 736 return (ENOSPC); 737 } 738 739 if (V_autoinc_step < 1) 740 V_autoinc_step = 1; 741 else if (V_autoinc_step > 1000) 742 V_autoinc_step = 1000; 743 744 /* FIXME: Handle count > 1 */ 745 ci = rci; 746 krule = ci->krule; 747 rulenum = krule->rulenum; 748 749 /* find the insertion point, we will insert before */ 750 insert_before = rulenum ? rulenum + 1 : IPFW_DEFAULT_RULE; 751 i = ipfw_find_rule(chain, insert_before, 0); 752 /* duplicate first part */ 753 if (i > 0) 754 bcopy(chain->map, map, i * sizeof(struct ip_fw *)); 755 map[i] = krule; 756 /* duplicate remaining part, we always have the default rule */ 757 bcopy(chain->map + i, map + i + 1, 758 sizeof(struct ip_fw *) *(chain->n_rules - i)); 759 if (rulenum == 0) { 760 /* Compute rule number and write it back */ 761 rulenum = i > 0 ? map[i-1]->rulenum : 0; 762 if (rulenum < IPFW_DEFAULT_RULE - V_autoinc_step) 763 rulenum += V_autoinc_step; 764 krule->rulenum = rulenum; 765 /* Save number to userland rule */ 766 pnum = (uint16_t *)((caddr_t)ci->urule + ci->urule_numoff); 767 *pnum = rulenum; 768 } 769 770 krule->id = chain->id + 1; 771 update_skipto_cache(chain, map); 772 map = swap_map(chain, map, chain->n_rules + 1); 773 chain->static_len += RULEUSIZE0(krule); 774 IPFW_UH_WUNLOCK(chain); 775 if (map) 776 free(map, M_IPFW); 777 return (0); 778 } 779 780 /* 781 * Adds @rule to the list of rules to reap 782 */ 783 void 784 ipfw_reap_add(struct ip_fw_chain *chain, struct ip_fw **head, 785 struct ip_fw *rule) 786 { 787 788 IPFW_UH_WLOCK_ASSERT(chain); 789 790 /* Unlink rule from everywhere */ 791 unref_rule_objects(chain, rule); 792 793 *((struct ip_fw **)rule) = *head; 794 *head = rule; 795 } 796 797 /* 798 * Reclaim storage associated with a list of rules. This is 799 * typically the list created using remove_rule. 800 * A NULL pointer on input is handled correctly. 801 */ 802 void 803 ipfw_reap_rules(struct ip_fw *head) 804 { 805 struct ip_fw *rule; 806 807 while ((rule = head) != NULL) { 808 head = *((struct ip_fw **)head); 809 free_rule(rule); 810 } 811 } 812 813 /* 814 * Rules to keep are 815 * (default || reserved || !match_set || !match_number) 816 * where 817 * default ::= (rule->rulenum == IPFW_DEFAULT_RULE) 818 * // the default rule is always protected 819 * 820 * reserved ::= (cmd == 0 && n == 0 && rule->set == RESVD_SET) 821 * // RESVD_SET is protected only if cmd == 0 and n == 0 ("ipfw flush") 822 * 823 * match_set ::= (cmd == 0 || rule->set == set) 824 * // set number is ignored for cmd == 0 825 * 826 * match_number ::= (cmd == 1 || n == 0 || n == rule->rulenum) 827 * // number is ignored for cmd == 1 or n == 0 828 * 829 */ 830 int 831 ipfw_match_range(struct ip_fw *rule, ipfw_range_tlv *rt) 832 { 833 834 /* Don't match default rule for modification queries */ 835 if (rule->rulenum == IPFW_DEFAULT_RULE && 836 (rt->flags & IPFW_RCFLAG_DEFAULT) == 0) 837 return (0); 838 839 /* Don't match rules in reserved set for flush requests */ 840 if ((rt->flags & IPFW_RCFLAG_ALL) != 0 && rule->set == RESVD_SET) 841 return (0); 842 843 /* If we're filtering by set, don't match other sets */ 844 if ((rt->flags & IPFW_RCFLAG_SET) != 0 && rule->set != rt->set) 845 return (0); 846 847 if ((rt->flags & IPFW_RCFLAG_RANGE) != 0 && 848 (rule->rulenum < rt->start_rule || rule->rulenum > rt->end_rule)) 849 return (0); 850 851 return (1); 852 } 853 854 /* 855 * Delete rules matching range @rt. 856 * Saves number of deleted rules in @ndel. 857 * 858 * Returns 0 on success. 859 */ 860 static int 861 delete_range(struct ip_fw_chain *chain, ipfw_range_tlv *rt, int *ndel) 862 { 863 struct ip_fw *reap, *rule, **map; 864 int end, start; 865 int i, n, ndyn, ofs; 866 867 reap = NULL; 868 IPFW_UH_WLOCK(chain); /* arbitrate writers */ 869 870 /* 871 * Stage 1: Determine range to inspect. 872 * Range is half-inclusive, e.g [start, end). 873 */ 874 start = 0; 875 end = chain->n_rules - 1; 876 877 if ((rt->flags & IPFW_RCFLAG_RANGE) != 0) { 878 start = ipfw_find_rule(chain, rt->start_rule, 0); 879 880 end = ipfw_find_rule(chain, rt->end_rule, 0); 881 if (rt->end_rule != IPFW_DEFAULT_RULE) 882 while (chain->map[end]->rulenum == rt->end_rule) 883 end++; 884 } 885 886 /* Allocate new map of the same size */ 887 map = get_map(chain, 0, 1 /* locked */); 888 if (map == NULL) { 889 IPFW_UH_WUNLOCK(chain); 890 return (ENOMEM); 891 } 892 893 n = 0; 894 ndyn = 0; 895 ofs = start; 896 /* 1. bcopy the initial part of the map */ 897 if (start > 0) 898 bcopy(chain->map, map, start * sizeof(struct ip_fw *)); 899 /* 2. copy active rules between start and end */ 900 for (i = start; i < end; i++) { 901 rule = chain->map[i]; 902 if (ipfw_match_range(rule, rt) == 0) { 903 map[ofs++] = rule; 904 continue; 905 } 906 907 n++; 908 if (ipfw_is_dyn_rule(rule) != 0) 909 ndyn++; 910 } 911 /* 3. copy the final part of the map */ 912 bcopy(chain->map + end, map + ofs, 913 (chain->n_rules - end) * sizeof(struct ip_fw *)); 914 /* 4. recalculate skipto cache */ 915 update_skipto_cache(chain, map); 916 /* 5. swap the maps (under UH_WLOCK + WHLOCK) */ 917 map = swap_map(chain, map, chain->n_rules - n); 918 /* 6. Remove all dynamic states originated by deleted rules */ 919 if (ndyn > 0) 920 ipfw_expire_dyn_rules(chain, rt); 921 /* 7. now remove the rules deleted from the old map */ 922 for (i = start; i < end; i++) { 923 rule = map[i]; 924 if (ipfw_match_range(rule, rt) == 0) 925 continue; 926 chain->static_len -= RULEUSIZE0(rule); 927 ipfw_reap_add(chain, &reap, rule); 928 } 929 IPFW_UH_WUNLOCK(chain); 930 931 ipfw_reap_rules(reap); 932 if (map != NULL) 933 free(map, M_IPFW); 934 *ndel = n; 935 return (0); 936 } 937 938 /* 939 * Changes set of given rule rannge @rt 940 * with each other. 941 * 942 * Returns 0 on success. 943 */ 944 static int 945 move_range(struct ip_fw_chain *chain, ipfw_range_tlv *rt) 946 { 947 struct ip_fw *rule; 948 int i; 949 950 IPFW_UH_WLOCK(chain); 951 952 /* 953 * Move rules with matching paramenerts to a new set. 954 * This one is much more complex. We have to ensure 955 * that all referenced tables (if any) are referenced 956 * by given rule subset only. Otherwise, we can't move 957 * them to new set and have to return error. 958 */ 959 if (V_fw_tables_sets != 0) { 960 if (ipfw_move_tables_sets(chain, rt, rt->new_set) != 0) { 961 IPFW_UH_WUNLOCK(chain); 962 return (EBUSY); 963 } 964 } 965 966 /* XXX: We have to do swap holding WLOCK */ 967 for (i = 0; i < chain->n_rules; i++) { 968 rule = chain->map[i]; 969 if (ipfw_match_range(rule, rt) == 0) 970 continue; 971 rule->set = rt->new_set; 972 } 973 974 IPFW_UH_WUNLOCK(chain); 975 976 return (0); 977 } 978 979 /* 980 * Clear counters for a specific rule. 981 * Normally run under IPFW_UH_RLOCK, but these are idempotent ops 982 * so we only care that rules do not disappear. 983 */ 984 static void 985 clear_counters(struct ip_fw *rule, int log_only) 986 { 987 ipfw_insn_log *l = (ipfw_insn_log *)ACTION_PTR(rule); 988 989 if (log_only == 0) 990 IPFW_ZERO_RULE_COUNTER(rule); 991 if (l->o.opcode == O_LOG) 992 l->log_left = l->max_log; 993 } 994 995 /* 996 * Flushes rules counters and/or log values on matching range. 997 * 998 * Returns number of items cleared. 999 */ 1000 static int 1001 clear_range(struct ip_fw_chain *chain, ipfw_range_tlv *rt, int log_only) 1002 { 1003 struct ip_fw *rule; 1004 int num; 1005 int i; 1006 1007 num = 0; 1008 rt->flags |= IPFW_RCFLAG_DEFAULT; 1009 1010 IPFW_UH_WLOCK(chain); /* arbitrate writers */ 1011 for (i = 0; i < chain->n_rules; i++) { 1012 rule = chain->map[i]; 1013 if (ipfw_match_range(rule, rt) == 0) 1014 continue; 1015 clear_counters(rule, log_only); 1016 num++; 1017 } 1018 IPFW_UH_WUNLOCK(chain); 1019 1020 return (num); 1021 } 1022 1023 static int 1024 check_range_tlv(ipfw_range_tlv *rt) 1025 { 1026 1027 if (rt->head.length != sizeof(*rt)) 1028 return (1); 1029 if (rt->start_rule > rt->end_rule) 1030 return (1); 1031 if (rt->set >= IPFW_MAX_SETS || rt->new_set >= IPFW_MAX_SETS) 1032 return (1); 1033 1034 if ((rt->flags & IPFW_RCFLAG_USER) != rt->flags) 1035 return (1); 1036 1037 return (0); 1038 } 1039 1040 /* 1041 * Delete rules matching specified parameters 1042 * Data layout (v0)(current): 1043 * Request: [ ipfw_obj_header ipfw_range_tlv ] 1044 * Reply: [ ipfw_obj_header ipfw_range_tlv ] 1045 * 1046 * Saves number of deleted rules in ipfw_range_tlv->new_set. 1047 * 1048 * Returns 0 on success. 1049 */ 1050 static int 1051 del_rules(struct ip_fw_chain *chain, ip_fw3_opheader *op3, 1052 struct sockopt_data *sd) 1053 { 1054 ipfw_range_header *rh; 1055 int error, ndel; 1056 1057 if (sd->valsize != sizeof(*rh)) 1058 return (EINVAL); 1059 1060 rh = (ipfw_range_header *)ipfw_get_sopt_space(sd, sd->valsize); 1061 1062 if (check_range_tlv(&rh->range) != 0) 1063 return (EINVAL); 1064 1065 ndel = 0; 1066 if ((error = delete_range(chain, &rh->range, &ndel)) != 0) 1067 return (error); 1068 1069 /* Save number of rules deleted */ 1070 rh->range.new_set = ndel; 1071 return (0); 1072 } 1073 1074 /* 1075 * Move rules/sets matching specified parameters 1076 * Data layout (v0)(current): 1077 * Request: [ ipfw_obj_header ipfw_range_tlv ] 1078 * 1079 * Returns 0 on success. 1080 */ 1081 static int 1082 move_rules(struct ip_fw_chain *chain, ip_fw3_opheader *op3, 1083 struct sockopt_data *sd) 1084 { 1085 ipfw_range_header *rh; 1086 1087 if (sd->valsize != sizeof(*rh)) 1088 return (EINVAL); 1089 1090 rh = (ipfw_range_header *)ipfw_get_sopt_space(sd, sd->valsize); 1091 1092 if (check_range_tlv(&rh->range) != 0) 1093 return (EINVAL); 1094 1095 return (move_range(chain, &rh->range)); 1096 } 1097 1098 /* 1099 * Clear rule accounting data matching specified parameters 1100 * Data layout (v0)(current): 1101 * Request: [ ipfw_obj_header ipfw_range_tlv ] 1102 * Reply: [ ipfw_obj_header ipfw_range_tlv ] 1103 * 1104 * Saves number of cleared rules in ipfw_range_tlv->new_set. 1105 * 1106 * Returns 0 on success. 1107 */ 1108 static int 1109 clear_rules(struct ip_fw_chain *chain, ip_fw3_opheader *op3, 1110 struct sockopt_data *sd) 1111 { 1112 ipfw_range_header *rh; 1113 int log_only, num; 1114 char *msg; 1115 1116 if (sd->valsize != sizeof(*rh)) 1117 return (EINVAL); 1118 1119 rh = (ipfw_range_header *)ipfw_get_sopt_space(sd, sd->valsize); 1120 1121 if (check_range_tlv(&rh->range) != 0) 1122 return (EINVAL); 1123 1124 log_only = (op3->opcode == IP_FW_XRESETLOG); 1125 1126 num = clear_range(chain, &rh->range, log_only); 1127 1128 if (rh->range.flags & IPFW_RCFLAG_ALL) 1129 msg = log_only ? "All logging counts reset" : 1130 "Accounting cleared"; 1131 else 1132 msg = log_only ? "logging count reset" : "cleared"; 1133 1134 if (V_fw_verbose) { 1135 int lev = LOG_SECURITY | LOG_NOTICE; 1136 log(lev, "ipfw: %s.\n", msg); 1137 } 1138 1139 /* Save number of rules cleared */ 1140 rh->range.new_set = num; 1141 return (0); 1142 } 1143 1144 static void 1145 enable_sets(struct ip_fw_chain *chain, ipfw_range_tlv *rt) 1146 { 1147 uint32_t v_set; 1148 1149 IPFW_UH_WLOCK_ASSERT(chain); 1150 1151 /* Change enabled/disabled sets mask */ 1152 v_set = (V_set_disable | rt->set) & ~rt->new_set; 1153 v_set &= ~(1 << RESVD_SET); /* set RESVD_SET always enabled */ 1154 IPFW_WLOCK(chain); 1155 V_set_disable = v_set; 1156 IPFW_WUNLOCK(chain); 1157 } 1158 1159 static void 1160 swap_sets(struct ip_fw_chain *chain, ipfw_range_tlv *rt, int mv) 1161 { 1162 struct ip_fw *rule; 1163 int i; 1164 1165 IPFW_UH_WLOCK_ASSERT(chain); 1166 1167 /* Swap or move two sets */ 1168 for (i = 0; i < chain->n_rules - 1; i++) { 1169 rule = chain->map[i]; 1170 if (rule->set == rt->set) 1171 rule->set = rt->new_set; 1172 else if (rule->set == rt->new_set && mv == 0) 1173 rule->set = rt->set; 1174 } 1175 if (V_fw_tables_sets != 0) 1176 ipfw_swap_tables_sets(chain, rt->set, rt->new_set, mv); 1177 } 1178 1179 /* 1180 * Swaps or moves set 1181 * Data layout (v0)(current): 1182 * Request: [ ipfw_obj_header ipfw_range_tlv ] 1183 * 1184 * Returns 0 on success. 1185 */ 1186 static int 1187 manage_sets(struct ip_fw_chain *chain, ip_fw3_opheader *op3, 1188 struct sockopt_data *sd) 1189 { 1190 ipfw_range_header *rh; 1191 1192 if (sd->valsize != sizeof(*rh)) 1193 return (EINVAL); 1194 1195 rh = (ipfw_range_header *)ipfw_get_sopt_space(sd, sd->valsize); 1196 1197 if (rh->range.head.length != sizeof(ipfw_range_tlv)) 1198 return (1); 1199 1200 IPFW_UH_WLOCK(chain); 1201 switch (op3->opcode) { 1202 case IP_FW_SET_SWAP: 1203 case IP_FW_SET_MOVE: 1204 swap_sets(chain, &rh->range, op3->opcode == IP_FW_SET_MOVE); 1205 break; 1206 case IP_FW_SET_ENABLE: 1207 enable_sets(chain, &rh->range); 1208 break; 1209 } 1210 IPFW_UH_WUNLOCK(chain); 1211 1212 return (0); 1213 } 1214 1215 /** 1216 * Remove all rules with given number, or do set manipulation. 1217 * Assumes chain != NULL && *chain != NULL. 1218 * 1219 * The argument is an uint32_t. The low 16 bit are the rule or set number; 1220 * the next 8 bits are the new set; the top 8 bits indicate the command: 1221 * 1222 * 0 delete rules numbered "rulenum" 1223 * 1 delete rules in set "rulenum" 1224 * 2 move rules "rulenum" to set "new_set" 1225 * 3 move rules from set "rulenum" to set "new_set" 1226 * 4 swap sets "rulenum" and "new_set" 1227 * 5 delete rules "rulenum" and set "new_set" 1228 */ 1229 static int 1230 del_entry(struct ip_fw_chain *chain, uint32_t arg) 1231 { 1232 uint32_t num; /* rule number or old_set */ 1233 uint8_t cmd, new_set; 1234 int do_del, ndel; 1235 int error = 0; 1236 ipfw_range_tlv rt; 1237 1238 num = arg & 0xffff; 1239 cmd = (arg >> 24) & 0xff; 1240 new_set = (arg >> 16) & 0xff; 1241 1242 if (cmd > 5 || new_set > RESVD_SET) 1243 return EINVAL; 1244 if (cmd == 0 || cmd == 2 || cmd == 5) { 1245 if (num >= IPFW_DEFAULT_RULE) 1246 return EINVAL; 1247 } else { 1248 if (num > RESVD_SET) /* old_set */ 1249 return EINVAL; 1250 } 1251 1252 /* Convert old requests into new representation */ 1253 memset(&rt, 0, sizeof(rt)); 1254 rt.start_rule = num; 1255 rt.end_rule = num; 1256 rt.set = num; 1257 rt.new_set = new_set; 1258 do_del = 0; 1259 1260 switch (cmd) { 1261 case 0: /* delete rules numbered "rulenum" */ 1262 if (num == 0) 1263 rt.flags |= IPFW_RCFLAG_ALL; 1264 else 1265 rt.flags |= IPFW_RCFLAG_RANGE; 1266 do_del = 1; 1267 break; 1268 case 1: /* delete rules in set "rulenum" */ 1269 rt.flags |= IPFW_RCFLAG_SET; 1270 do_del = 1; 1271 break; 1272 case 5: /* delete rules "rulenum" and set "new_set" */ 1273 rt.flags |= IPFW_RCFLAG_RANGE | IPFW_RCFLAG_SET; 1274 rt.set = new_set; 1275 rt.new_set = 0; 1276 do_del = 1; 1277 break; 1278 case 2: /* move rules "rulenum" to set "new_set" */ 1279 rt.flags |= IPFW_RCFLAG_RANGE; 1280 break; 1281 case 3: /* move rules from set "rulenum" to set "new_set" */ 1282 IPFW_UH_WLOCK(chain); 1283 swap_sets(chain, &rt, 1); 1284 IPFW_UH_WUNLOCK(chain); 1285 return (0); 1286 case 4: /* swap sets "rulenum" and "new_set" */ 1287 IPFW_UH_WLOCK(chain); 1288 swap_sets(chain, &rt, 0); 1289 IPFW_UH_WUNLOCK(chain); 1290 return (0); 1291 default: 1292 return (ENOTSUP); 1293 } 1294 1295 if (do_del != 0) { 1296 if ((error = delete_range(chain, &rt, &ndel)) != 0) 1297 return (error); 1298 1299 if (ndel == 0 && (cmd != 1 && num != 0)) 1300 return (EINVAL); 1301 1302 return (0); 1303 } 1304 1305 return (move_range(chain, &rt)); 1306 } 1307 1308 /** 1309 * Reset some or all counters on firewall rules. 1310 * The argument `arg' is an u_int32_t. The low 16 bit are the rule number, 1311 * the next 8 bits are the set number, the top 8 bits are the command: 1312 * 0 work with rules from all set's; 1313 * 1 work with rules only from specified set. 1314 * Specified rule number is zero if we want to clear all entries. 1315 * log_only is 1 if we only want to reset logs, zero otherwise. 1316 */ 1317 static int 1318 zero_entry(struct ip_fw_chain *chain, u_int32_t arg, int log_only) 1319 { 1320 struct ip_fw *rule; 1321 char *msg; 1322 int i; 1323 1324 uint16_t rulenum = arg & 0xffff; 1325 uint8_t set = (arg >> 16) & 0xff; 1326 uint8_t cmd = (arg >> 24) & 0xff; 1327 1328 if (cmd > 1) 1329 return (EINVAL); 1330 if (cmd == 1 && set > RESVD_SET) 1331 return (EINVAL); 1332 1333 IPFW_UH_RLOCK(chain); 1334 if (rulenum == 0) { 1335 V_norule_counter = 0; 1336 for (i = 0; i < chain->n_rules; i++) { 1337 rule = chain->map[i]; 1338 /* Skip rules not in our set. */ 1339 if (cmd == 1 && rule->set != set) 1340 continue; 1341 clear_counters(rule, log_only); 1342 } 1343 msg = log_only ? "All logging counts reset" : 1344 "Accounting cleared"; 1345 } else { 1346 int cleared = 0; 1347 for (i = 0; i < chain->n_rules; i++) { 1348 rule = chain->map[i]; 1349 if (rule->rulenum == rulenum) { 1350 if (cmd == 0 || rule->set == set) 1351 clear_counters(rule, log_only); 1352 cleared = 1; 1353 } 1354 if (rule->rulenum > rulenum) 1355 break; 1356 } 1357 if (!cleared) { /* we did not find any matching rules */ 1358 IPFW_UH_RUNLOCK(chain); 1359 return (EINVAL); 1360 } 1361 msg = log_only ? "logging count reset" : "cleared"; 1362 } 1363 IPFW_UH_RUNLOCK(chain); 1364 1365 if (V_fw_verbose) { 1366 int lev = LOG_SECURITY | LOG_NOTICE; 1367 1368 if (rulenum) 1369 log(lev, "ipfw: Entry %d %s.\n", rulenum, msg); 1370 else 1371 log(lev, "ipfw: %s.\n", msg); 1372 } 1373 return (0); 1374 } 1375 1376 1377 /* 1378 * Check rule head in FreeBSD11 format 1379 * 1380 */ 1381 static int 1382 check_ipfw_rule1(struct ip_fw_rule *rule, int size, 1383 struct rule_check_info *ci) 1384 { 1385 int l; 1386 1387 if (size < sizeof(*rule)) { 1388 printf("ipfw: rule too short\n"); 1389 return (EINVAL); 1390 } 1391 1392 /* Check for valid cmd_len */ 1393 l = roundup2(RULESIZE(rule), sizeof(uint64_t)); 1394 if (l != size) { 1395 printf("ipfw: size mismatch (have %d want %d)\n", size, l); 1396 return (EINVAL); 1397 } 1398 if (rule->act_ofs >= rule->cmd_len) { 1399 printf("ipfw: bogus action offset (%u > %u)\n", 1400 rule->act_ofs, rule->cmd_len - 1); 1401 return (EINVAL); 1402 } 1403 1404 if (rule->rulenum > IPFW_DEFAULT_RULE - 1) 1405 return (EINVAL); 1406 1407 return (check_ipfw_rule_body(rule->cmd, rule->cmd_len, ci)); 1408 } 1409 1410 /* 1411 * Check rule head in FreeBSD8 format 1412 * 1413 */ 1414 static int 1415 check_ipfw_rule0(struct ip_fw_rule0 *rule, int size, 1416 struct rule_check_info *ci) 1417 { 1418 int l; 1419 1420 if (size < sizeof(*rule)) { 1421 printf("ipfw: rule too short\n"); 1422 return (EINVAL); 1423 } 1424 1425 /* Check for valid cmd_len */ 1426 l = sizeof(*rule) + rule->cmd_len * 4 - 4; 1427 if (l != size) { 1428 printf("ipfw: size mismatch (have %d want %d)\n", size, l); 1429 return (EINVAL); 1430 } 1431 if (rule->act_ofs >= rule->cmd_len) { 1432 printf("ipfw: bogus action offset (%u > %u)\n", 1433 rule->act_ofs, rule->cmd_len - 1); 1434 return (EINVAL); 1435 } 1436 1437 if (rule->rulenum > IPFW_DEFAULT_RULE - 1) 1438 return (EINVAL); 1439 1440 return (check_ipfw_rule_body(rule->cmd, rule->cmd_len, ci)); 1441 } 1442 1443 static int 1444 check_ipfw_rule_body(ipfw_insn *cmd, int cmd_len, struct rule_check_info *ci) 1445 { 1446 int cmdlen, l; 1447 int have_action; 1448 1449 have_action = 0; 1450 1451 /* 1452 * Now go for the individual checks. Very simple ones, basically only 1453 * instruction sizes. 1454 */ 1455 for (l = cmd_len; l > 0 ; l -= cmdlen, cmd += cmdlen) { 1456 cmdlen = F_LEN(cmd); 1457 if (cmdlen > l) { 1458 printf("ipfw: opcode %d size truncated\n", 1459 cmd->opcode); 1460 return EINVAL; 1461 } 1462 switch (cmd->opcode) { 1463 case O_PROBE_STATE: 1464 case O_KEEP_STATE: 1465 case O_PROTO: 1466 case O_IP_SRC_ME: 1467 case O_IP_DST_ME: 1468 case O_LAYER2: 1469 case O_IN: 1470 case O_FRAG: 1471 case O_DIVERTED: 1472 case O_IPOPT: 1473 case O_IPTOS: 1474 case O_IPPRECEDENCE: 1475 case O_IPVER: 1476 case O_SOCKARG: 1477 case O_TCPFLAGS: 1478 case O_TCPOPTS: 1479 case O_ESTAB: 1480 case O_VERREVPATH: 1481 case O_VERSRCREACH: 1482 case O_ANTISPOOF: 1483 case O_IPSEC: 1484 #ifdef INET6 1485 case O_IP6_SRC_ME: 1486 case O_IP6_DST_ME: 1487 case O_EXT_HDR: 1488 case O_IP6: 1489 #endif 1490 case O_IP4: 1491 case O_TAG: 1492 if (cmdlen != F_INSN_SIZE(ipfw_insn)) 1493 goto bad_size; 1494 break; 1495 1496 case O_EXTERNAL_ACTION: 1497 if (cmd->arg1 == 0 || 1498 cmdlen != F_INSN_SIZE(ipfw_insn)) { 1499 printf("ipfw: invalid external " 1500 "action opcode\n"); 1501 return (EINVAL); 1502 } 1503 ci->object_opcodes++; 1504 /* Do we have O_EXTERNAL_INSTANCE opcode? */ 1505 if (l != cmdlen) { 1506 l -= cmdlen; 1507 cmd += cmdlen; 1508 cmdlen = F_LEN(cmd); 1509 if (cmd->opcode != O_EXTERNAL_INSTANCE) { 1510 printf("ipfw: invalid opcode " 1511 "next to external action %u\n", 1512 cmd->opcode); 1513 return (EINVAL); 1514 } 1515 if (cmd->arg1 == 0 || 1516 cmdlen != F_INSN_SIZE(ipfw_insn)) { 1517 printf("ipfw: invalid external " 1518 "action instance opcode\n"); 1519 return (EINVAL); 1520 } 1521 ci->object_opcodes++; 1522 } 1523 goto check_action; 1524 1525 case O_FIB: 1526 if (cmdlen != F_INSN_SIZE(ipfw_insn)) 1527 goto bad_size; 1528 if (cmd->arg1 >= rt_numfibs) { 1529 printf("ipfw: invalid fib number %d\n", 1530 cmd->arg1); 1531 return EINVAL; 1532 } 1533 break; 1534 1535 case O_SETFIB: 1536 if (cmdlen != F_INSN_SIZE(ipfw_insn)) 1537 goto bad_size; 1538 if ((cmd->arg1 != IP_FW_TARG) && 1539 ((cmd->arg1 & 0x7FFF) >= rt_numfibs)) { 1540 printf("ipfw: invalid fib number %d\n", 1541 cmd->arg1 & 0x7FFF); 1542 return EINVAL; 1543 } 1544 goto check_action; 1545 1546 case O_UID: 1547 case O_GID: 1548 case O_JAIL: 1549 case O_IP_SRC: 1550 case O_IP_DST: 1551 case O_TCPSEQ: 1552 case O_TCPACK: 1553 case O_PROB: 1554 case O_ICMPTYPE: 1555 if (cmdlen != F_INSN_SIZE(ipfw_insn_u32)) 1556 goto bad_size; 1557 break; 1558 1559 case O_LIMIT: 1560 if (cmdlen != F_INSN_SIZE(ipfw_insn_limit)) 1561 goto bad_size; 1562 break; 1563 1564 case O_LOG: 1565 if (cmdlen != F_INSN_SIZE(ipfw_insn_log)) 1566 goto bad_size; 1567 1568 ((ipfw_insn_log *)cmd)->log_left = 1569 ((ipfw_insn_log *)cmd)->max_log; 1570 1571 break; 1572 1573 case O_IP_SRC_MASK: 1574 case O_IP_DST_MASK: 1575 /* only odd command lengths */ 1576 if ((cmdlen & 1) == 0) 1577 goto bad_size; 1578 break; 1579 1580 case O_IP_SRC_SET: 1581 case O_IP_DST_SET: 1582 if (cmd->arg1 == 0 || cmd->arg1 > 256) { 1583 printf("ipfw: invalid set size %d\n", 1584 cmd->arg1); 1585 return EINVAL; 1586 } 1587 if (cmdlen != F_INSN_SIZE(ipfw_insn_u32) + 1588 (cmd->arg1+31)/32 ) 1589 goto bad_size; 1590 break; 1591 1592 case O_IP_SRC_LOOKUP: 1593 case O_IP_DST_LOOKUP: 1594 if (cmd->arg1 >= V_fw_tables_max) { 1595 printf("ipfw: invalid table number %d\n", 1596 cmd->arg1); 1597 return (EINVAL); 1598 } 1599 if (cmdlen != F_INSN_SIZE(ipfw_insn) && 1600 cmdlen != F_INSN_SIZE(ipfw_insn_u32) + 1 && 1601 cmdlen != F_INSN_SIZE(ipfw_insn_u32)) 1602 goto bad_size; 1603 ci->object_opcodes++; 1604 break; 1605 case O_IP_FLOW_LOOKUP: 1606 if (cmd->arg1 >= V_fw_tables_max) { 1607 printf("ipfw: invalid table number %d\n", 1608 cmd->arg1); 1609 return (EINVAL); 1610 } 1611 if (cmdlen != F_INSN_SIZE(ipfw_insn) && 1612 cmdlen != F_INSN_SIZE(ipfw_insn_u32)) 1613 goto bad_size; 1614 ci->object_opcodes++; 1615 break; 1616 case O_MACADDR2: 1617 if (cmdlen != F_INSN_SIZE(ipfw_insn_mac)) 1618 goto bad_size; 1619 break; 1620 1621 case O_NOP: 1622 case O_IPID: 1623 case O_IPTTL: 1624 case O_IPLEN: 1625 case O_TCPDATALEN: 1626 case O_TCPWIN: 1627 case O_TAGGED: 1628 if (cmdlen < 1 || cmdlen > 31) 1629 goto bad_size; 1630 break; 1631 1632 case O_DSCP: 1633 if (cmdlen != F_INSN_SIZE(ipfw_insn_u32) + 1) 1634 goto bad_size; 1635 break; 1636 1637 case O_MAC_TYPE: 1638 case O_IP_SRCPORT: 1639 case O_IP_DSTPORT: /* XXX artificial limit, 30 port pairs */ 1640 if (cmdlen < 2 || cmdlen > 31) 1641 goto bad_size; 1642 break; 1643 1644 case O_RECV: 1645 case O_XMIT: 1646 case O_VIA: 1647 if (cmdlen != F_INSN_SIZE(ipfw_insn_if)) 1648 goto bad_size; 1649 ci->object_opcodes++; 1650 break; 1651 1652 case O_ALTQ: 1653 if (cmdlen != F_INSN_SIZE(ipfw_insn_altq)) 1654 goto bad_size; 1655 break; 1656 1657 case O_PIPE: 1658 case O_QUEUE: 1659 if (cmdlen != F_INSN_SIZE(ipfw_insn)) 1660 goto bad_size; 1661 goto check_action; 1662 1663 case O_FORWARD_IP: 1664 if (cmdlen != F_INSN_SIZE(ipfw_insn_sa)) 1665 goto bad_size; 1666 goto check_action; 1667 #ifdef INET6 1668 case O_FORWARD_IP6: 1669 if (cmdlen != F_INSN_SIZE(ipfw_insn_sa6)) 1670 goto bad_size; 1671 goto check_action; 1672 #endif /* INET6 */ 1673 1674 case O_DIVERT: 1675 case O_TEE: 1676 if (ip_divert_ptr == NULL) 1677 return EINVAL; 1678 else 1679 goto check_size; 1680 case O_NETGRAPH: 1681 case O_NGTEE: 1682 if (ng_ipfw_input_p == NULL) 1683 return EINVAL; 1684 else 1685 goto check_size; 1686 case O_NAT: 1687 if (!IPFW_NAT_LOADED) 1688 return EINVAL; 1689 if (cmdlen != F_INSN_SIZE(ipfw_insn_nat)) 1690 goto bad_size; 1691 goto check_action; 1692 case O_FORWARD_MAC: /* XXX not implemented yet */ 1693 case O_CHECK_STATE: 1694 case O_COUNT: 1695 case O_ACCEPT: 1696 case O_DENY: 1697 case O_REJECT: 1698 case O_SETDSCP: 1699 #ifdef INET6 1700 case O_UNREACH6: 1701 #endif 1702 case O_SKIPTO: 1703 case O_REASS: 1704 case O_CALLRETURN: 1705 check_size: 1706 if (cmdlen != F_INSN_SIZE(ipfw_insn)) 1707 goto bad_size; 1708 check_action: 1709 if (have_action) { 1710 printf("ipfw: opcode %d, multiple actions" 1711 " not allowed\n", 1712 cmd->opcode); 1713 return (EINVAL); 1714 } 1715 have_action = 1; 1716 if (l != cmdlen) { 1717 printf("ipfw: opcode %d, action must be" 1718 " last opcode\n", 1719 cmd->opcode); 1720 return (EINVAL); 1721 } 1722 break; 1723 #ifdef INET6 1724 case O_IP6_SRC: 1725 case O_IP6_DST: 1726 if (cmdlen != F_INSN_SIZE(struct in6_addr) + 1727 F_INSN_SIZE(ipfw_insn)) 1728 goto bad_size; 1729 break; 1730 1731 case O_FLOW6ID: 1732 if (cmdlen != F_INSN_SIZE(ipfw_insn_u32) + 1733 ((ipfw_insn_u32 *)cmd)->o.arg1) 1734 goto bad_size; 1735 break; 1736 1737 case O_IP6_SRC_MASK: 1738 case O_IP6_DST_MASK: 1739 if ( !(cmdlen & 1) || cmdlen > 127) 1740 goto bad_size; 1741 break; 1742 case O_ICMP6TYPE: 1743 if( cmdlen != F_INSN_SIZE( ipfw_insn_icmp6 ) ) 1744 goto bad_size; 1745 break; 1746 #endif 1747 1748 default: 1749 switch (cmd->opcode) { 1750 #ifndef INET6 1751 case O_IP6_SRC_ME: 1752 case O_IP6_DST_ME: 1753 case O_EXT_HDR: 1754 case O_IP6: 1755 case O_UNREACH6: 1756 case O_IP6_SRC: 1757 case O_IP6_DST: 1758 case O_FLOW6ID: 1759 case O_IP6_SRC_MASK: 1760 case O_IP6_DST_MASK: 1761 case O_ICMP6TYPE: 1762 printf("ipfw: no IPv6 support in kernel\n"); 1763 return (EPROTONOSUPPORT); 1764 #endif 1765 default: 1766 printf("ipfw: opcode %d, unknown opcode\n", 1767 cmd->opcode); 1768 return (EINVAL); 1769 } 1770 } 1771 } 1772 if (have_action == 0) { 1773 printf("ipfw: missing action\n"); 1774 return (EINVAL); 1775 } 1776 return 0; 1777 1778 bad_size: 1779 printf("ipfw: opcode %d size %d wrong\n", 1780 cmd->opcode, cmdlen); 1781 return (EINVAL); 1782 } 1783 1784 1785 /* 1786 * Translation of requests for compatibility with FreeBSD 7.2/8. 1787 * a static variable tells us if we have an old client from userland, 1788 * and if necessary we translate requests and responses between the 1789 * two formats. 1790 */ 1791 static int is7 = 0; 1792 1793 struct ip_fw7 { 1794 struct ip_fw7 *next; /* linked list of rules */ 1795 struct ip_fw7 *next_rule; /* ptr to next [skipto] rule */ 1796 /* 'next_rule' is used to pass up 'set_disable' status */ 1797 1798 uint16_t act_ofs; /* offset of action in 32-bit units */ 1799 uint16_t cmd_len; /* # of 32-bit words in cmd */ 1800 uint16_t rulenum; /* rule number */ 1801 uint8_t set; /* rule set (0..31) */ 1802 // #define RESVD_SET 31 /* set for default and persistent rules */ 1803 uint8_t _pad; /* padding */ 1804 // uint32_t id; /* rule id, only in v.8 */ 1805 /* These fields are present in all rules. */ 1806 uint64_t pcnt; /* Packet counter */ 1807 uint64_t bcnt; /* Byte counter */ 1808 uint32_t timestamp; /* tv_sec of last match */ 1809 1810 ipfw_insn cmd[1]; /* storage for commands */ 1811 }; 1812 1813 static int convert_rule_to_7(struct ip_fw_rule0 *rule); 1814 static int convert_rule_to_8(struct ip_fw_rule0 *rule); 1815 1816 #ifndef RULESIZE7 1817 #define RULESIZE7(rule) (sizeof(struct ip_fw7) + \ 1818 ((struct ip_fw7 *)(rule))->cmd_len * 4 - 4) 1819 #endif 1820 1821 1822 /* 1823 * Copy the static and dynamic rules to the supplied buffer 1824 * and return the amount of space actually used. 1825 * Must be run under IPFW_UH_RLOCK 1826 */ 1827 static size_t 1828 ipfw_getrules(struct ip_fw_chain *chain, void *buf, size_t space) 1829 { 1830 char *bp = buf; 1831 char *ep = bp + space; 1832 struct ip_fw *rule; 1833 struct ip_fw_rule0 *dst; 1834 int error, i, l, warnflag; 1835 time_t boot_seconds; 1836 1837 warnflag = 0; 1838 1839 boot_seconds = boottime.tv_sec; 1840 for (i = 0; i < chain->n_rules; i++) { 1841 rule = chain->map[i]; 1842 1843 if (is7) { 1844 /* Convert rule to FreeBSd 7.2 format */ 1845 l = RULESIZE7(rule); 1846 if (bp + l + sizeof(uint32_t) <= ep) { 1847 bcopy(rule, bp, l + sizeof(uint32_t)); 1848 error = set_legacy_obj_kidx(chain, 1849 (struct ip_fw_rule0 *)bp); 1850 if (error != 0) 1851 return (0); 1852 error = convert_rule_to_7((struct ip_fw_rule0 *) bp); 1853 if (error) 1854 return 0; /*XXX correct? */ 1855 /* 1856 * XXX HACK. Store the disable mask in the "next" 1857 * pointer in a wild attempt to keep the ABI the same. 1858 * Why do we do this on EVERY rule? 1859 */ 1860 bcopy(&V_set_disable, 1861 &(((struct ip_fw7 *)bp)->next_rule), 1862 sizeof(V_set_disable)); 1863 if (((struct ip_fw7 *)bp)->timestamp) 1864 ((struct ip_fw7 *)bp)->timestamp += boot_seconds; 1865 bp += l; 1866 } 1867 continue; /* go to next rule */ 1868 } 1869 1870 l = RULEUSIZE0(rule); 1871 if (bp + l > ep) { /* should not happen */ 1872 printf("overflow dumping static rules\n"); 1873 break; 1874 } 1875 dst = (struct ip_fw_rule0 *)bp; 1876 export_rule0(rule, dst, l); 1877 error = set_legacy_obj_kidx(chain, dst); 1878 1879 /* 1880 * XXX HACK. Store the disable mask in the "next" 1881 * pointer in a wild attempt to keep the ABI the same. 1882 * Why do we do this on EVERY rule? 1883 * 1884 * XXX: "ipfw set show" (ab)uses IP_FW_GET to read disabled mask 1885 * so we need to fail _after_ saving at least one mask. 1886 */ 1887 bcopy(&V_set_disable, &dst->next_rule, sizeof(V_set_disable)); 1888 if (dst->timestamp) 1889 dst->timestamp += boot_seconds; 1890 bp += l; 1891 1892 if (error != 0) { 1893 if (error == 2) { 1894 /* Non-fatal table rewrite error. */ 1895 warnflag = 1; 1896 continue; 1897 } 1898 printf("Stop on rule %d. Fail to convert table\n", 1899 rule->rulenum); 1900 break; 1901 } 1902 } 1903 if (warnflag != 0) 1904 printf("ipfw: process %s is using legacy interfaces," 1905 " consider rebuilding\n", ""); 1906 ipfw_get_dynamic(chain, &bp, ep); /* protected by the dynamic lock */ 1907 return (bp - (char *)buf); 1908 } 1909 1910 1911 struct dump_args { 1912 uint32_t b; /* start rule */ 1913 uint32_t e; /* end rule */ 1914 uint32_t rcount; /* number of rules */ 1915 uint32_t rsize; /* rules size */ 1916 uint32_t tcount; /* number of tables */ 1917 int rcounters; /* counters */ 1918 }; 1919 1920 void 1921 ipfw_export_obj_ntlv(struct named_object *no, ipfw_obj_ntlv *ntlv) 1922 { 1923 1924 ntlv->head.type = no->etlv; 1925 ntlv->head.length = sizeof(*ntlv); 1926 ntlv->idx = no->kidx; 1927 strlcpy(ntlv->name, no->name, sizeof(ntlv->name)); 1928 } 1929 1930 /* 1931 * Export named object info in instance @ni, identified by @kidx 1932 * to ipfw_obj_ntlv. TLV is allocated from @sd space. 1933 * 1934 * Returns 0 on success. 1935 */ 1936 static int 1937 export_objhash_ntlv(struct namedobj_instance *ni, uint16_t kidx, 1938 struct sockopt_data *sd) 1939 { 1940 struct named_object *no; 1941 ipfw_obj_ntlv *ntlv; 1942 1943 no = ipfw_objhash_lookup_kidx(ni, kidx); 1944 KASSERT(no != NULL, ("invalid object kernel index passed")); 1945 1946 ntlv = (ipfw_obj_ntlv *)ipfw_get_sopt_space(sd, sizeof(*ntlv)); 1947 if (ntlv == NULL) 1948 return (ENOMEM); 1949 1950 ipfw_export_obj_ntlv(no, ntlv); 1951 return (0); 1952 } 1953 1954 /* 1955 * Dumps static rules with table TLVs in buffer @sd. 1956 * 1957 * Returns 0 on success. 1958 */ 1959 static int 1960 dump_static_rules(struct ip_fw_chain *chain, struct dump_args *da, 1961 uint32_t *bmask, struct sockopt_data *sd) 1962 { 1963 int error; 1964 int i, l; 1965 uint32_t tcount; 1966 ipfw_obj_ctlv *ctlv; 1967 struct ip_fw *krule; 1968 struct namedobj_instance *ni; 1969 caddr_t dst; 1970 1971 /* Dump table names first (if any) */ 1972 if (da->tcount > 0) { 1973 /* Header first */ 1974 ctlv = (ipfw_obj_ctlv *)ipfw_get_sopt_space(sd, sizeof(*ctlv)); 1975 if (ctlv == NULL) 1976 return (ENOMEM); 1977 ctlv->head.type = IPFW_TLV_TBLNAME_LIST; 1978 ctlv->head.length = da->tcount * sizeof(ipfw_obj_ntlv) + 1979 sizeof(*ctlv); 1980 ctlv->count = da->tcount; 1981 ctlv->objsize = sizeof(ipfw_obj_ntlv); 1982 } 1983 1984 i = 0; 1985 tcount = da->tcount; 1986 ni = ipfw_get_table_objhash(chain); 1987 while (tcount > 0) { 1988 if ((bmask[i / 32] & (1 << (i % 32))) == 0) { 1989 i++; 1990 continue; 1991 } 1992 1993 /* Jump to shared named object bitmask */ 1994 if (i >= IPFW_TABLES_MAX) { 1995 ni = CHAIN_TO_SRV(chain); 1996 i -= IPFW_TABLES_MAX; 1997 bmask += IPFW_TABLES_MAX / 32; 1998 } 1999 2000 if ((error = export_objhash_ntlv(ni, i, sd)) != 0) 2001 return (error); 2002 2003 i++; 2004 tcount--; 2005 } 2006 2007 /* Dump rules */ 2008 ctlv = (ipfw_obj_ctlv *)ipfw_get_sopt_space(sd, sizeof(*ctlv)); 2009 if (ctlv == NULL) 2010 return (ENOMEM); 2011 ctlv->head.type = IPFW_TLV_RULE_LIST; 2012 ctlv->head.length = da->rsize + sizeof(*ctlv); 2013 ctlv->count = da->rcount; 2014 2015 for (i = da->b; i < da->e; i++) { 2016 krule = chain->map[i]; 2017 2018 l = RULEUSIZE1(krule) + sizeof(ipfw_obj_tlv); 2019 if (da->rcounters != 0) 2020 l += sizeof(struct ip_fw_bcounter); 2021 dst = (caddr_t)ipfw_get_sopt_space(sd, l); 2022 if (dst == NULL) 2023 return (ENOMEM); 2024 2025 export_rule1(krule, dst, l, da->rcounters); 2026 } 2027 2028 return (0); 2029 } 2030 2031 /* 2032 * Marks every object index used in @rule with bit in @bmask. 2033 * Used to generate bitmask of referenced tables/objects for given ruleset 2034 * or its part. 2035 * 2036 * Returns number of newly-referenced objects. 2037 */ 2038 static int 2039 mark_object_kidx(struct ip_fw_chain *ch, struct ip_fw *rule, 2040 uint32_t *bmask) 2041 { 2042 struct opcode_obj_rewrite *rw; 2043 ipfw_insn *cmd; 2044 int bidx, cmdlen, l, count; 2045 uint16_t kidx; 2046 uint8_t subtype; 2047 2048 l = rule->cmd_len; 2049 cmd = rule->cmd; 2050 cmdlen = 0; 2051 count = 0; 2052 for ( ; l > 0 ; l -= cmdlen, cmd += cmdlen) { 2053 cmdlen = F_LEN(cmd); 2054 2055 rw = find_op_rw(cmd, &kidx, &subtype); 2056 if (rw == NULL) 2057 continue; 2058 2059 bidx = kidx / 32; 2060 /* 2061 * Maintain separate bitmasks for table and 2062 * non-table objects. 2063 */ 2064 if (rw->etlv != IPFW_TLV_TBL_NAME) 2065 bidx += IPFW_TABLES_MAX / 32; 2066 2067 if ((bmask[bidx] & (1 << (kidx % 32))) == 0) 2068 count++; 2069 2070 bmask[bidx] |= 1 << (kidx % 32); 2071 } 2072 2073 return (count); 2074 } 2075 2076 /* 2077 * Dumps requested objects data 2078 * Data layout (version 0)(current): 2079 * Request: [ ipfw_cfg_lheader ] + IPFW_CFG_GET_* flags 2080 * size = ipfw_cfg_lheader.size 2081 * Reply: [ ipfw_cfg_lheader 2082 * [ ipfw_obj_ctlv(IPFW_TLV_TBL_LIST) ipfw_obj_ntlv x N ] (optional) 2083 * [ ipfw_obj_ctlv(IPFW_TLV_RULE_LIST) 2084 * ipfw_obj_tlv(IPFW_TLV_RULE_ENT) [ ip_fw_bcounter (optional) ip_fw_rule ] 2085 * ] (optional) 2086 * [ ipfw_obj_ctlv(IPFW_TLV_STATE_LIST) ipfw_obj_dyntlv x N ] (optional) 2087 * ] 2088 * * NOTE IPFW_TLV_STATE_LIST has the single valid field: objsize. 2089 * The rest (size, count) are set to zero and needs to be ignored. 2090 * 2091 * Returns 0 on success. 2092 */ 2093 static int 2094 dump_config(struct ip_fw_chain *chain, ip_fw3_opheader *op3, 2095 struct sockopt_data *sd) 2096 { 2097 ipfw_cfg_lheader *hdr; 2098 struct ip_fw *rule; 2099 size_t sz, rnum; 2100 uint32_t hdr_flags; 2101 int error, i; 2102 struct dump_args da; 2103 uint32_t *bmask; 2104 2105 hdr = (ipfw_cfg_lheader *)ipfw_get_sopt_header(sd, sizeof(*hdr)); 2106 if (hdr == NULL) 2107 return (EINVAL); 2108 2109 error = 0; 2110 bmask = NULL; 2111 /* Allocate needed state. Note we allocate 2xspace mask, for table&srv */ 2112 if (hdr->flags & IPFW_CFG_GET_STATIC) 2113 bmask = malloc(IPFW_TABLES_MAX / 4, M_TEMP, M_WAITOK | M_ZERO); 2114 2115 IPFW_UH_RLOCK(chain); 2116 2117 /* 2118 * STAGE 1: Determine size/count for objects in range. 2119 * Prepare used tables bitmask. 2120 */ 2121 sz = sizeof(ipfw_cfg_lheader); 2122 memset(&da, 0, sizeof(da)); 2123 2124 da.b = 0; 2125 da.e = chain->n_rules; 2126 2127 if (hdr->end_rule != 0) { 2128 /* Handle custom range */ 2129 if ((rnum = hdr->start_rule) > IPFW_DEFAULT_RULE) 2130 rnum = IPFW_DEFAULT_RULE; 2131 da.b = ipfw_find_rule(chain, rnum, 0); 2132 rnum = hdr->end_rule; 2133 rnum = (rnum < IPFW_DEFAULT_RULE) ? rnum+1 : IPFW_DEFAULT_RULE; 2134 da.e = ipfw_find_rule(chain, rnum, 0) + 1; 2135 } 2136 2137 if (hdr->flags & IPFW_CFG_GET_STATIC) { 2138 for (i = da.b; i < da.e; i++) { 2139 rule = chain->map[i]; 2140 da.rsize += RULEUSIZE1(rule) + sizeof(ipfw_obj_tlv); 2141 da.rcount++; 2142 /* Update bitmask of used objects for given range */ 2143 da.tcount += mark_object_kidx(chain, rule, bmask); 2144 } 2145 /* Add counters if requested */ 2146 if (hdr->flags & IPFW_CFG_GET_COUNTERS) { 2147 da.rsize += sizeof(struct ip_fw_bcounter) * da.rcount; 2148 da.rcounters = 1; 2149 } 2150 2151 if (da.tcount > 0) 2152 sz += da.tcount * sizeof(ipfw_obj_ntlv) + 2153 sizeof(ipfw_obj_ctlv); 2154 sz += da.rsize + sizeof(ipfw_obj_ctlv); 2155 } 2156 2157 if (hdr->flags & IPFW_CFG_GET_STATES) 2158 sz += ipfw_dyn_get_count() * sizeof(ipfw_obj_dyntlv) + 2159 sizeof(ipfw_obj_ctlv); 2160 2161 2162 /* 2163 * Fill header anyway. 2164 * Note we have to save header fields to stable storage 2165 * buffer inside @sd can be flushed after dumping rules 2166 */ 2167 hdr->size = sz; 2168 hdr->set_mask = ~V_set_disable; 2169 hdr_flags = hdr->flags; 2170 hdr = NULL; 2171 2172 if (sd->valsize < sz) { 2173 error = ENOMEM; 2174 goto cleanup; 2175 } 2176 2177 /* STAGE2: Store actual data */ 2178 if (hdr_flags & IPFW_CFG_GET_STATIC) { 2179 error = dump_static_rules(chain, &da, bmask, sd); 2180 if (error != 0) 2181 goto cleanup; 2182 } 2183 2184 if (hdr_flags & IPFW_CFG_GET_STATES) 2185 error = ipfw_dump_states(chain, sd); 2186 2187 cleanup: 2188 IPFW_UH_RUNLOCK(chain); 2189 2190 if (bmask != NULL) 2191 free(bmask, M_TEMP); 2192 2193 return (error); 2194 } 2195 2196 int 2197 ipfw_check_object_name_generic(const char *name) 2198 { 2199 int nsize; 2200 2201 nsize = sizeof(((ipfw_obj_ntlv *)0)->name); 2202 if (strnlen(name, nsize) == nsize) 2203 return (EINVAL); 2204 if (name[0] == '\0') 2205 return (EINVAL); 2206 return (0); 2207 } 2208 2209 /* 2210 * Creates non-existent objects referenced by rule. 2211 * 2212 * Return 0 on success. 2213 */ 2214 int 2215 create_objects_compat(struct ip_fw_chain *ch, ipfw_insn *cmd, 2216 struct obj_idx *oib, struct obj_idx *pidx, struct tid_info *ti) 2217 { 2218 struct opcode_obj_rewrite *rw; 2219 struct obj_idx *p; 2220 uint16_t kidx; 2221 int error; 2222 2223 /* 2224 * Compatibility stuff: do actual creation for non-existing, 2225 * but referenced objects. 2226 */ 2227 for (p = oib; p < pidx; p++) { 2228 if (p->kidx != 0) 2229 continue; 2230 2231 ti->uidx = p->uidx; 2232 ti->type = p->type; 2233 ti->atype = 0; 2234 2235 rw = find_op_rw(cmd + p->off, NULL, NULL); 2236 KASSERT(rw != NULL, ("Unable to find handler for op %d", 2237 (cmd + p->off)->opcode)); 2238 2239 error = rw->create_object(ch, ti, &kidx); 2240 if (error == 0) { 2241 p->kidx = kidx; 2242 continue; 2243 } 2244 2245 /* 2246 * Error happened. We have to rollback everything. 2247 * Drop all already acquired references. 2248 */ 2249 IPFW_UH_WLOCK(ch); 2250 unref_oib_objects(ch, cmd, oib, pidx); 2251 IPFW_UH_WUNLOCK(ch); 2252 2253 return (error); 2254 } 2255 2256 return (0); 2257 } 2258 2259 /* 2260 * Compatibility function for old ipfw(8) binaries. 2261 * Rewrites table/nat kernel indices with userland ones. 2262 * Convert tables matching '/^\d+$/' to their atoi() value. 2263 * Use number 65535 for other tables. 2264 * 2265 * Returns 0 on success. 2266 */ 2267 static int 2268 set_legacy_obj_kidx(struct ip_fw_chain *ch, struct ip_fw_rule0 *rule) 2269 { 2270 struct opcode_obj_rewrite *rw; 2271 struct named_object *no; 2272 ipfw_insn *cmd; 2273 char *end; 2274 long val; 2275 int cmdlen, error, l; 2276 uint16_t kidx, uidx; 2277 uint8_t subtype; 2278 2279 error = 0; 2280 2281 l = rule->cmd_len; 2282 cmd = rule->cmd; 2283 cmdlen = 0; 2284 for ( ; l > 0 ; l -= cmdlen, cmd += cmdlen) { 2285 cmdlen = F_LEN(cmd); 2286 2287 /* Check if is index in given opcode */ 2288 rw = find_op_rw(cmd, &kidx, &subtype); 2289 if (rw == NULL) 2290 continue; 2291 2292 /* Try to find referenced kernel object */ 2293 no = rw->find_bykidx(ch, kidx); 2294 if (no == NULL) 2295 continue; 2296 2297 val = strtol(no->name, &end, 10); 2298 if (*end == '\0' && val < 65535) { 2299 uidx = val; 2300 } else { 2301 2302 /* 2303 * We are called via legacy opcode. 2304 * Save error and show table as fake number 2305 * not to make ipfw(8) hang. 2306 */ 2307 uidx = 65535; 2308 error = 2; 2309 } 2310 2311 rw->update(cmd, uidx); 2312 } 2313 2314 return (error); 2315 } 2316 2317 2318 /* 2319 * Unreferences all already-referenced objects in given @cmd rule, 2320 * using information in @oib. 2321 * 2322 * Used to rollback partially converted rule on error. 2323 */ 2324 static void 2325 unref_oib_objects(struct ip_fw_chain *ch, ipfw_insn *cmd, struct obj_idx *oib, 2326 struct obj_idx *end) 2327 { 2328 struct opcode_obj_rewrite *rw; 2329 struct named_object *no; 2330 struct obj_idx *p; 2331 2332 IPFW_UH_WLOCK_ASSERT(ch); 2333 2334 for (p = oib; p < end; p++) { 2335 if (p->kidx == 0) 2336 continue; 2337 2338 rw = find_op_rw(cmd + p->off, NULL, NULL); 2339 KASSERT(rw != NULL, ("Unable to find handler for op %d", 2340 (cmd + p->off)->opcode)); 2341 2342 /* Find & unref by existing idx */ 2343 no = rw->find_bykidx(ch, p->kidx); 2344 KASSERT(no != NULL, ("Ref'd object %d disappeared", p->kidx)); 2345 no->refcnt--; 2346 } 2347 } 2348 2349 /* 2350 * Remove references from every object used in @rule. 2351 * Used at rule removal code. 2352 */ 2353 static void 2354 unref_rule_objects(struct ip_fw_chain *ch, struct ip_fw *rule) 2355 { 2356 struct opcode_obj_rewrite *rw; 2357 struct named_object *no; 2358 ipfw_insn *cmd; 2359 int cmdlen, l; 2360 uint16_t kidx; 2361 uint8_t subtype; 2362 2363 IPFW_UH_WLOCK_ASSERT(ch); 2364 2365 l = rule->cmd_len; 2366 cmd = rule->cmd; 2367 cmdlen = 0; 2368 for ( ; l > 0 ; l -= cmdlen, cmd += cmdlen) { 2369 cmdlen = F_LEN(cmd); 2370 2371 rw = find_op_rw(cmd, &kidx, &subtype); 2372 if (rw == NULL) 2373 continue; 2374 no = rw->find_bykidx(ch, kidx); 2375 2376 KASSERT(no != NULL, ("table id %d not found", kidx)); 2377 KASSERT(no->subtype == subtype, 2378 ("wrong type %d (%d) for table id %d", 2379 no->subtype, subtype, kidx)); 2380 KASSERT(no->refcnt > 0, ("refcount for table %d is %d", 2381 kidx, no->refcnt)); 2382 2383 if (no->refcnt == 1 && rw->destroy_object != NULL) 2384 rw->destroy_object(ch, no); 2385 else 2386 no->refcnt--; 2387 } 2388 } 2389 2390 2391 /* 2392 * Find and reference object (if any) stored in instruction @cmd. 2393 * 2394 * Saves object info in @pidx, sets 2395 * - @unresolved to 1 if object should exists but not found 2396 * 2397 * Returns non-zero value in case of error. 2398 */ 2399 static int 2400 ref_opcode_object(struct ip_fw_chain *ch, ipfw_insn *cmd, struct tid_info *ti, 2401 struct obj_idx *pidx, int *unresolved) 2402 { 2403 struct named_object *no; 2404 struct opcode_obj_rewrite *rw; 2405 int error; 2406 2407 /* Check if this opcode is candidate for rewrite */ 2408 rw = find_op_rw(cmd, &ti->uidx, &ti->type); 2409 if (rw == NULL) 2410 return (0); 2411 2412 /* Need to rewrite. Save necessary fields */ 2413 pidx->uidx = ti->uidx; 2414 pidx->type = ti->type; 2415 2416 /* Try to find referenced kernel object */ 2417 error = rw->find_byname(ch, ti, &no); 2418 if (error != 0) 2419 return (error); 2420 if (no == NULL) { 2421 /* 2422 * Report about unresolved object for automaic 2423 * creation. 2424 */ 2425 *unresolved = 1; 2426 return (0); 2427 } 2428 2429 /* Found. Bump refcount and update kidx. */ 2430 no->refcnt++; 2431 rw->update(cmd, no->kidx); 2432 return (0); 2433 } 2434 2435 /* 2436 * Finds and bumps refcount for objects referenced by given @rule. 2437 * Auto-creates non-existing tables. 2438 * Fills in @oib array with userland/kernel indexes. 2439 * 2440 * Returns 0 on success. 2441 */ 2442 static int 2443 ref_rule_objects(struct ip_fw_chain *ch, struct ip_fw *rule, 2444 struct rule_check_info *ci, struct obj_idx *oib, struct tid_info *ti) 2445 { 2446 struct obj_idx *pidx; 2447 ipfw_insn *cmd; 2448 int cmdlen, error, l, unresolved; 2449 2450 pidx = oib; 2451 l = rule->cmd_len; 2452 cmd = rule->cmd; 2453 cmdlen = 0; 2454 error = 0; 2455 2456 IPFW_UH_WLOCK(ch); 2457 2458 /* Increase refcount on each existing referenced table. */ 2459 for ( ; l > 0 ; l -= cmdlen, cmd += cmdlen) { 2460 cmdlen = F_LEN(cmd); 2461 unresolved = 0; 2462 2463 error = ref_opcode_object(ch, cmd, ti, pidx, &unresolved); 2464 if (error != 0) 2465 break; 2466 /* 2467 * Compability stuff for old clients: 2468 * prepare to automaitcally create non-existing objects. 2469 */ 2470 if (unresolved != 0) { 2471 pidx->off = rule->cmd_len - l; 2472 pidx++; 2473 } 2474 } 2475 2476 if (error != 0) { 2477 /* Unref everything we have already done */ 2478 unref_oib_objects(ch, rule->cmd, oib, pidx); 2479 IPFW_UH_WUNLOCK(ch); 2480 return (error); 2481 } 2482 IPFW_UH_WUNLOCK(ch); 2483 2484 /* Perform auto-creation for non-existing objects */ 2485 if (pidx != oib) 2486 error = create_objects_compat(ch, rule->cmd, oib, pidx, ti); 2487 2488 /* Calculate real number of dynamic objects */ 2489 ci->object_opcodes = (uint16_t)(pidx - oib); 2490 2491 return (error); 2492 } 2493 2494 /* 2495 * Checks is opcode is referencing table of appropriate type. 2496 * Adds reference count for found table if true. 2497 * Rewrites user-supplied opcode values with kernel ones. 2498 * 2499 * Returns 0 on success and appropriate error code otherwise. 2500 */ 2501 static int 2502 rewrite_rule_uidx(struct ip_fw_chain *chain, struct rule_check_info *ci) 2503 { 2504 int error; 2505 ipfw_insn *cmd; 2506 uint8_t type; 2507 struct obj_idx *p, *pidx_first, *pidx_last; 2508 struct tid_info ti; 2509 2510 /* 2511 * Prepare an array for storing opcode indices. 2512 * Use stack allocation by default. 2513 */ 2514 if (ci->object_opcodes <= (sizeof(ci->obuf)/sizeof(ci->obuf[0]))) { 2515 /* Stack */ 2516 pidx_first = ci->obuf; 2517 } else 2518 pidx_first = malloc( 2519 ci->object_opcodes * sizeof(struct obj_idx), 2520 M_IPFW, M_WAITOK | M_ZERO); 2521 2522 error = 0; 2523 type = 0; 2524 memset(&ti, 0, sizeof(ti)); 2525 2526 /* 2527 * Use default set for looking up tables (old way) or 2528 * use set rule is assigned to (new way). 2529 */ 2530 ti.set = (V_fw_tables_sets != 0) ? ci->krule->set : 0; 2531 if (ci->ctlv != NULL) { 2532 ti.tlvs = (void *)(ci->ctlv + 1); 2533 ti.tlen = ci->ctlv->head.length - sizeof(ipfw_obj_ctlv); 2534 } 2535 2536 /* Reference all used tables and other objects */ 2537 error = ref_rule_objects(chain, ci->krule, ci, pidx_first, &ti); 2538 if (error != 0) 2539 goto free; 2540 /* 2541 * Note that ref_rule_objects() might have updated ci->object_opcodes 2542 * to reflect actual number of object opcodes. 2543 */ 2544 2545 /* Perform rewrite of remaining opcodes */ 2546 p = pidx_first; 2547 pidx_last = pidx_first + ci->object_opcodes; 2548 for (p = pidx_first; p < pidx_last; p++) { 2549 cmd = ci->krule->cmd + p->off; 2550 update_opcode_kidx(cmd, p->kidx); 2551 } 2552 2553 free: 2554 if (pidx_first != ci->obuf) 2555 free(pidx_first, M_IPFW); 2556 2557 return (error); 2558 } 2559 2560 /* 2561 * Adds one or more rules to ipfw @chain. 2562 * Data layout (version 0)(current): 2563 * Request: 2564 * [ 2565 * ip_fw3_opheader 2566 * [ ipfw_obj_ctlv(IPFW_TLV_TBL_LIST) ipfw_obj_ntlv x N ] (optional *1) 2567 * [ ipfw_obj_ctlv(IPFW_TLV_RULE_LIST) ip_fw x N ] (*2) (*3) 2568 * ] 2569 * Reply: 2570 * [ 2571 * ip_fw3_opheader 2572 * [ ipfw_obj_ctlv(IPFW_TLV_TBL_LIST) ipfw_obj_ntlv x N ] (optional) 2573 * [ ipfw_obj_ctlv(IPFW_TLV_RULE_LIST) ip_fw x N ] 2574 * ] 2575 * 2576 * Rules in reply are modified to store their actual ruleset number. 2577 * 2578 * (*1) TLVs inside IPFW_TLV_TBL_LIST needs to be sorted ascending 2579 * accoring to their idx field and there has to be no duplicates. 2580 * (*2) Numbered rules inside IPFW_TLV_RULE_LIST needs to be sorted ascending. 2581 * (*3) Each ip_fw structure needs to be aligned to u64 boundary. 2582 * 2583 * Returns 0 on success. 2584 */ 2585 static int 2586 add_rules(struct ip_fw_chain *chain, ip_fw3_opheader *op3, 2587 struct sockopt_data *sd) 2588 { 2589 ipfw_obj_ctlv *ctlv, *rtlv, *tstate; 2590 ipfw_obj_ntlv *ntlv; 2591 int clen, error, idx; 2592 uint32_t count, read; 2593 struct ip_fw_rule *r; 2594 struct rule_check_info rci, *ci, *cbuf; 2595 int i, rsize; 2596 2597 op3 = (ip_fw3_opheader *)ipfw_get_sopt_space(sd, sd->valsize); 2598 ctlv = (ipfw_obj_ctlv *)(op3 + 1); 2599 2600 read = sizeof(ip_fw3_opheader); 2601 rtlv = NULL; 2602 tstate = NULL; 2603 cbuf = NULL; 2604 memset(&rci, 0, sizeof(struct rule_check_info)); 2605 2606 if (read + sizeof(*ctlv) > sd->valsize) 2607 return (EINVAL); 2608 2609 if (ctlv->head.type == IPFW_TLV_TBLNAME_LIST) { 2610 clen = ctlv->head.length; 2611 /* Check size and alignment */ 2612 if (clen > sd->valsize || clen < sizeof(*ctlv)) 2613 return (EINVAL); 2614 if ((clen % sizeof(uint64_t)) != 0) 2615 return (EINVAL); 2616 2617 /* 2618 * Some table names or other named objects. 2619 * Check for validness. 2620 */ 2621 count = (ctlv->head.length - sizeof(*ctlv)) / sizeof(*ntlv); 2622 if (ctlv->count != count || ctlv->objsize != sizeof(*ntlv)) 2623 return (EINVAL); 2624 2625 /* 2626 * Check each TLV. 2627 * Ensure TLVs are sorted ascending and 2628 * there are no duplicates. 2629 */ 2630 idx = -1; 2631 ntlv = (ipfw_obj_ntlv *)(ctlv + 1); 2632 while (count > 0) { 2633 if (ntlv->head.length != sizeof(ipfw_obj_ntlv)) 2634 return (EINVAL); 2635 2636 error = ipfw_check_object_name_generic(ntlv->name); 2637 if (error != 0) 2638 return (error); 2639 2640 if (ntlv->idx <= idx) 2641 return (EINVAL); 2642 2643 idx = ntlv->idx; 2644 count--; 2645 ntlv++; 2646 } 2647 2648 tstate = ctlv; 2649 read += ctlv->head.length; 2650 ctlv = (ipfw_obj_ctlv *)((caddr_t)ctlv + ctlv->head.length); 2651 } 2652 2653 if (read + sizeof(*ctlv) > sd->valsize) 2654 return (EINVAL); 2655 2656 if (ctlv->head.type == IPFW_TLV_RULE_LIST) { 2657 clen = ctlv->head.length; 2658 if (clen + read > sd->valsize || clen < sizeof(*ctlv)) 2659 return (EINVAL); 2660 if ((clen % sizeof(uint64_t)) != 0) 2661 return (EINVAL); 2662 2663 /* 2664 * TODO: Permit adding multiple rules at once 2665 */ 2666 if (ctlv->count != 1) 2667 return (ENOTSUP); 2668 2669 clen -= sizeof(*ctlv); 2670 2671 if (ctlv->count > clen / sizeof(struct ip_fw_rule)) 2672 return (EINVAL); 2673 2674 /* Allocate state for each rule or use stack */ 2675 if (ctlv->count == 1) { 2676 memset(&rci, 0, sizeof(struct rule_check_info)); 2677 cbuf = &rci; 2678 } else 2679 cbuf = malloc(ctlv->count * sizeof(*ci), M_TEMP, 2680 M_WAITOK | M_ZERO); 2681 ci = cbuf; 2682 2683 /* 2684 * Check each rule for validness. 2685 * Ensure numbered rules are sorted ascending 2686 * and properly aligned 2687 */ 2688 idx = 0; 2689 r = (struct ip_fw_rule *)(ctlv + 1); 2690 count = 0; 2691 error = 0; 2692 while (clen > 0) { 2693 rsize = roundup2(RULESIZE(r), sizeof(uint64_t)); 2694 if (rsize > clen || ctlv->count <= count) { 2695 error = EINVAL; 2696 break; 2697 } 2698 2699 ci->ctlv = tstate; 2700 error = check_ipfw_rule1(r, rsize, ci); 2701 if (error != 0) 2702 break; 2703 2704 /* Check sorting */ 2705 if (r->rulenum != 0 && r->rulenum < idx) { 2706 printf("rulenum %d idx %d\n", r->rulenum, idx); 2707 error = EINVAL; 2708 break; 2709 } 2710 idx = r->rulenum; 2711 2712 ci->urule = (caddr_t)r; 2713 2714 rsize = roundup2(rsize, sizeof(uint64_t)); 2715 clen -= rsize; 2716 r = (struct ip_fw_rule *)((caddr_t)r + rsize); 2717 count++; 2718 ci++; 2719 } 2720 2721 if (ctlv->count != count || error != 0) { 2722 if (cbuf != &rci) 2723 free(cbuf, M_TEMP); 2724 return (EINVAL); 2725 } 2726 2727 rtlv = ctlv; 2728 read += ctlv->head.length; 2729 ctlv = (ipfw_obj_ctlv *)((caddr_t)ctlv + ctlv->head.length); 2730 } 2731 2732 if (read != sd->valsize || rtlv == NULL || rtlv->count == 0) { 2733 if (cbuf != NULL && cbuf != &rci) 2734 free(cbuf, M_TEMP); 2735 return (EINVAL); 2736 } 2737 2738 /* 2739 * Passed rules seems to be valid. 2740 * Allocate storage and try to add them to chain. 2741 */ 2742 for (i = 0, ci = cbuf; i < rtlv->count; i++, ci++) { 2743 clen = RULEKSIZE1((struct ip_fw_rule *)ci->urule); 2744 ci->krule = ipfw_alloc_rule(chain, clen); 2745 import_rule1(ci); 2746 } 2747 2748 if ((error = commit_rules(chain, cbuf, rtlv->count)) != 0) { 2749 /* Free allocate krules */ 2750 for (i = 0, ci = cbuf; i < rtlv->count; i++, ci++) 2751 free(ci->krule, M_IPFW); 2752 } 2753 2754 if (cbuf != NULL && cbuf != &rci) 2755 free(cbuf, M_TEMP); 2756 2757 return (error); 2758 } 2759 2760 /* 2761 * Lists all sopts currently registered. 2762 * Data layout (v0)(current): 2763 * Request: [ ipfw_obj_lheader ], size = ipfw_obj_lheader.size 2764 * Reply: [ ipfw_obj_lheader ipfw_sopt_info x N ] 2765 * 2766 * Returns 0 on success 2767 */ 2768 static int 2769 dump_soptcodes(struct ip_fw_chain *chain, ip_fw3_opheader *op3, 2770 struct sockopt_data *sd) 2771 { 2772 struct _ipfw_obj_lheader *olh; 2773 ipfw_sopt_info *i; 2774 struct ipfw_sopt_handler *sh; 2775 uint32_t count, n, size; 2776 2777 olh = (struct _ipfw_obj_lheader *)ipfw_get_sopt_header(sd,sizeof(*olh)); 2778 if (olh == NULL) 2779 return (EINVAL); 2780 if (sd->valsize < olh->size) 2781 return (EINVAL); 2782 2783 CTL3_LOCK(); 2784 count = ctl3_hsize; 2785 size = count * sizeof(ipfw_sopt_info) + sizeof(ipfw_obj_lheader); 2786 2787 /* Fill in header regadless of buffer size */ 2788 olh->count = count; 2789 olh->objsize = sizeof(ipfw_sopt_info); 2790 2791 if (size > olh->size) { 2792 olh->size = size; 2793 CTL3_UNLOCK(); 2794 return (ENOMEM); 2795 } 2796 olh->size = size; 2797 2798 for (n = 1; n <= count; n++) { 2799 i = (ipfw_sopt_info *)ipfw_get_sopt_space(sd, sizeof(*i)); 2800 KASSERT(i != NULL, ("previously checked buffer is not enough")); 2801 sh = &ctl3_handlers[n]; 2802 i->opcode = sh->opcode; 2803 i->version = sh->version; 2804 i->refcnt = sh->refcnt; 2805 } 2806 CTL3_UNLOCK(); 2807 2808 return (0); 2809 } 2810 2811 /* 2812 * Compares two opcodes. 2813 * Used both in qsort() and bsearch(). 2814 * 2815 * Returns 0 if match is found. 2816 */ 2817 static int 2818 compare_opcodes(const void *_a, const void *_b) 2819 { 2820 const struct opcode_obj_rewrite *a, *b; 2821 2822 a = (const struct opcode_obj_rewrite *)_a; 2823 b = (const struct opcode_obj_rewrite *)_b; 2824 2825 if (a->opcode < b->opcode) 2826 return (-1); 2827 else if (a->opcode > b->opcode) 2828 return (1); 2829 2830 return (0); 2831 } 2832 2833 /* 2834 * XXX: Rewrite bsearch() 2835 */ 2836 static int 2837 find_op_rw_range(uint16_t op, struct opcode_obj_rewrite **plo, 2838 struct opcode_obj_rewrite **phi) 2839 { 2840 struct opcode_obj_rewrite *ctl3_max, *lo, *hi, h, *rw; 2841 2842 memset(&h, 0, sizeof(h)); 2843 h.opcode = op; 2844 2845 rw = (struct opcode_obj_rewrite *)bsearch(&h, ctl3_rewriters, 2846 ctl3_rsize, sizeof(h), compare_opcodes); 2847 if (rw == NULL) 2848 return (1); 2849 2850 /* Find the first element matching the same opcode */ 2851 lo = rw; 2852 for ( ; lo > ctl3_rewriters && (lo - 1)->opcode == op; lo--) 2853 ; 2854 2855 /* Find the last element matching the same opcode */ 2856 hi = rw; 2857 ctl3_max = ctl3_rewriters + ctl3_rsize; 2858 for ( ; (hi + 1) < ctl3_max && (hi + 1)->opcode == op; hi++) 2859 ; 2860 2861 *plo = lo; 2862 *phi = hi; 2863 2864 return (0); 2865 } 2866 2867 /* 2868 * Finds opcode object rewriter based on @code. 2869 * 2870 * Returns pointer to handler or NULL. 2871 */ 2872 static struct opcode_obj_rewrite * 2873 find_op_rw(ipfw_insn *cmd, uint16_t *puidx, uint8_t *ptype) 2874 { 2875 struct opcode_obj_rewrite *rw, *lo, *hi; 2876 uint16_t uidx; 2877 uint8_t subtype; 2878 2879 if (find_op_rw_range(cmd->opcode, &lo, &hi) != 0) 2880 return (NULL); 2881 2882 for (rw = lo; rw <= hi; rw++) { 2883 if (rw->classifier(cmd, &uidx, &subtype) == 0) { 2884 if (puidx != NULL) 2885 *puidx = uidx; 2886 if (ptype != NULL) 2887 *ptype = subtype; 2888 return (rw); 2889 } 2890 } 2891 2892 return (NULL); 2893 } 2894 int 2895 classify_opcode_kidx(ipfw_insn *cmd, uint16_t *puidx) 2896 { 2897 2898 if (find_op_rw(cmd, puidx, NULL) == 0) 2899 return (1); 2900 return (0); 2901 } 2902 2903 void 2904 update_opcode_kidx(ipfw_insn *cmd, uint16_t idx) 2905 { 2906 struct opcode_obj_rewrite *rw; 2907 2908 rw = find_op_rw(cmd, NULL, NULL); 2909 KASSERT(rw != NULL, ("No handler to update opcode %d", cmd->opcode)); 2910 rw->update(cmd, idx); 2911 } 2912 2913 void 2914 ipfw_init_obj_rewriter() 2915 { 2916 2917 ctl3_rewriters = NULL; 2918 ctl3_rsize = 0; 2919 } 2920 2921 void 2922 ipfw_destroy_obj_rewriter() 2923 { 2924 2925 if (ctl3_rewriters != NULL) 2926 free(ctl3_rewriters, M_IPFW); 2927 ctl3_rewriters = NULL; 2928 ctl3_rsize = 0; 2929 } 2930 2931 /* 2932 * Adds one or more opcode object rewrite handlers to the global array. 2933 * Function may sleep. 2934 */ 2935 void 2936 ipfw_add_obj_rewriter(struct opcode_obj_rewrite *rw, size_t count) 2937 { 2938 size_t sz; 2939 struct opcode_obj_rewrite *tmp; 2940 2941 CTL3_LOCK(); 2942 2943 for (;;) { 2944 sz = ctl3_rsize + count; 2945 CTL3_UNLOCK(); 2946 tmp = malloc(sizeof(*rw) * sz, M_IPFW, M_WAITOK | M_ZERO); 2947 CTL3_LOCK(); 2948 if (ctl3_rsize + count <= sz) 2949 break; 2950 2951 /* Retry */ 2952 free(tmp, M_IPFW); 2953 } 2954 2955 /* Merge old & new arrays */ 2956 sz = ctl3_rsize + count; 2957 memcpy(tmp, ctl3_rewriters, ctl3_rsize * sizeof(*rw)); 2958 memcpy(&tmp[ctl3_rsize], rw, count * sizeof(*rw)); 2959 qsort(tmp, sz, sizeof(*rw), compare_opcodes); 2960 /* Switch new and free old */ 2961 if (ctl3_rewriters != NULL) 2962 free(ctl3_rewriters, M_IPFW); 2963 ctl3_rewriters = tmp; 2964 ctl3_rsize = sz; 2965 2966 CTL3_UNLOCK(); 2967 } 2968 2969 /* 2970 * Removes one or more object rewrite handlers from the global array. 2971 */ 2972 int 2973 ipfw_del_obj_rewriter(struct opcode_obj_rewrite *rw, size_t count) 2974 { 2975 size_t sz; 2976 struct opcode_obj_rewrite *ctl3_max, *ktmp, *lo, *hi; 2977 int i; 2978 2979 CTL3_LOCK(); 2980 2981 for (i = 0; i < count; i++) { 2982 if (find_op_rw_range(rw[i].opcode, &lo, &hi) != 0) 2983 continue; 2984 2985 for (ktmp = lo; ktmp <= hi; ktmp++) { 2986 if (ktmp->classifier != rw[i].classifier) 2987 continue; 2988 2989 ctl3_max = ctl3_rewriters + ctl3_rsize; 2990 sz = (ctl3_max - (ktmp + 1)) * sizeof(*ktmp); 2991 memmove(ktmp, ktmp + 1, sz); 2992 ctl3_rsize--; 2993 break; 2994 } 2995 2996 } 2997 2998 if (ctl3_rsize == 0) { 2999 if (ctl3_rewriters != NULL) 3000 free(ctl3_rewriters, M_IPFW); 3001 ctl3_rewriters = NULL; 3002 } 3003 3004 CTL3_UNLOCK(); 3005 3006 return (0); 3007 } 3008 3009 static void 3010 export_objhash_ntlv_internal(struct namedobj_instance *ni, 3011 struct named_object *no, void *arg) 3012 { 3013 struct sockopt_data *sd; 3014 ipfw_obj_ntlv *ntlv; 3015 3016 sd = (struct sockopt_data *)arg; 3017 ntlv = (ipfw_obj_ntlv *)ipfw_get_sopt_space(sd, sizeof(*ntlv)); 3018 if (ntlv == NULL) 3019 return; 3020 ipfw_export_obj_ntlv(no, ntlv); 3021 } 3022 3023 /* 3024 * Lists all service objects. 3025 * Data layout (v0)(current): 3026 * Request: [ ipfw_obj_lheader ] size = ipfw_obj_lheader.size 3027 * Reply: [ ipfw_obj_lheader [ ipfw_obj_ntlv x N ] (optional) ] 3028 * Returns 0 on success 3029 */ 3030 static int 3031 dump_srvobjects(struct ip_fw_chain *chain, ip_fw3_opheader *op3, 3032 struct sockopt_data *sd) 3033 { 3034 ipfw_obj_lheader *hdr; 3035 int count; 3036 3037 hdr = (ipfw_obj_lheader *)ipfw_get_sopt_header(sd, sizeof(*hdr)); 3038 if (hdr == NULL) 3039 return (EINVAL); 3040 3041 IPFW_UH_RLOCK(chain); 3042 count = ipfw_objhash_count(CHAIN_TO_SRV(chain)); 3043 hdr->size = sizeof(ipfw_obj_lheader) + count * sizeof(ipfw_obj_ntlv); 3044 if (sd->valsize < hdr->size) { 3045 IPFW_UH_RUNLOCK(chain); 3046 return (ENOMEM); 3047 } 3048 hdr->count = count; 3049 hdr->objsize = sizeof(ipfw_obj_ntlv); 3050 if (count > 0) 3051 ipfw_objhash_foreach(CHAIN_TO_SRV(chain), 3052 export_objhash_ntlv_internal, sd); 3053 IPFW_UH_RUNLOCK(chain); 3054 return (0); 3055 } 3056 3057 /* 3058 * Compares two sopt handlers (code, version and handler ptr). 3059 * Used both as qsort() and bsearch(). 3060 * Does not compare handler for latter case. 3061 * 3062 * Returns 0 if match is found. 3063 */ 3064 static int 3065 compare_sh(const void *_a, const void *_b) 3066 { 3067 const struct ipfw_sopt_handler *a, *b; 3068 3069 a = (const struct ipfw_sopt_handler *)_a; 3070 b = (const struct ipfw_sopt_handler *)_b; 3071 3072 if (a->opcode < b->opcode) 3073 return (-1); 3074 else if (a->opcode > b->opcode) 3075 return (1); 3076 3077 if (a->version < b->version) 3078 return (-1); 3079 else if (a->version > b->version) 3080 return (1); 3081 3082 /* bsearch helper */ 3083 if (a->handler == NULL) 3084 return (0); 3085 3086 if ((uintptr_t)a->handler < (uintptr_t)b->handler) 3087 return (-1); 3088 else if ((uintptr_t)a->handler > (uintptr_t)b->handler) 3089 return (1); 3090 3091 return (0); 3092 } 3093 3094 /* 3095 * Finds sopt handler based on @code and @version. 3096 * 3097 * Returns pointer to handler or NULL. 3098 */ 3099 static struct ipfw_sopt_handler * 3100 find_sh(uint16_t code, uint8_t version, sopt_handler_f *handler) 3101 { 3102 struct ipfw_sopt_handler *sh, h; 3103 3104 memset(&h, 0, sizeof(h)); 3105 h.opcode = code; 3106 h.version = version; 3107 h.handler = handler; 3108 3109 sh = (struct ipfw_sopt_handler *)bsearch(&h, ctl3_handlers, 3110 ctl3_hsize, sizeof(h), compare_sh); 3111 3112 return (sh); 3113 } 3114 3115 static int 3116 find_ref_sh(uint16_t opcode, uint8_t version, struct ipfw_sopt_handler *psh) 3117 { 3118 struct ipfw_sopt_handler *sh; 3119 3120 CTL3_LOCK(); 3121 if ((sh = find_sh(opcode, version, NULL)) == NULL) { 3122 CTL3_UNLOCK(); 3123 printf("ipfw: ipfw_ctl3 invalid option %d""v""%d\n", 3124 opcode, version); 3125 return (EINVAL); 3126 } 3127 sh->refcnt++; 3128 ctl3_refct++; 3129 /* Copy handler data to requested buffer */ 3130 *psh = *sh; 3131 CTL3_UNLOCK(); 3132 3133 return (0); 3134 } 3135 3136 static void 3137 find_unref_sh(struct ipfw_sopt_handler *psh) 3138 { 3139 struct ipfw_sopt_handler *sh; 3140 3141 CTL3_LOCK(); 3142 sh = find_sh(psh->opcode, psh->version, NULL); 3143 KASSERT(sh != NULL, ("ctl3 handler disappeared")); 3144 sh->refcnt--; 3145 ctl3_refct--; 3146 CTL3_UNLOCK(); 3147 } 3148 3149 void 3150 ipfw_init_sopt_handler() 3151 { 3152 3153 CTL3_LOCK_INIT(); 3154 IPFW_ADD_SOPT_HANDLER(1, scodes); 3155 } 3156 3157 void 3158 ipfw_destroy_sopt_handler() 3159 { 3160 3161 IPFW_DEL_SOPT_HANDLER(1, scodes); 3162 CTL3_LOCK_DESTROY(); 3163 } 3164 3165 /* 3166 * Adds one or more sockopt handlers to the global array. 3167 * Function may sleep. 3168 */ 3169 void 3170 ipfw_add_sopt_handler(struct ipfw_sopt_handler *sh, size_t count) 3171 { 3172 size_t sz; 3173 struct ipfw_sopt_handler *tmp; 3174 3175 CTL3_LOCK(); 3176 3177 for (;;) { 3178 sz = ctl3_hsize + count; 3179 CTL3_UNLOCK(); 3180 tmp = malloc(sizeof(*sh) * sz, M_IPFW, M_WAITOK | M_ZERO); 3181 CTL3_LOCK(); 3182 if (ctl3_hsize + count <= sz) 3183 break; 3184 3185 /* Retry */ 3186 free(tmp, M_IPFW); 3187 } 3188 3189 /* Merge old & new arrays */ 3190 sz = ctl3_hsize + count; 3191 memcpy(tmp, ctl3_handlers, ctl3_hsize * sizeof(*sh)); 3192 memcpy(&tmp[ctl3_hsize], sh, count * sizeof(*sh)); 3193 qsort(tmp, sz, sizeof(*sh), compare_sh); 3194 /* Switch new and free old */ 3195 if (ctl3_handlers != NULL) 3196 free(ctl3_handlers, M_IPFW); 3197 ctl3_handlers = tmp; 3198 ctl3_hsize = sz; 3199 ctl3_gencnt++; 3200 3201 CTL3_UNLOCK(); 3202 } 3203 3204 /* 3205 * Removes one or more sockopt handlers from the global array. 3206 */ 3207 int 3208 ipfw_del_sopt_handler(struct ipfw_sopt_handler *sh, size_t count) 3209 { 3210 size_t sz; 3211 struct ipfw_sopt_handler *tmp, *h; 3212 int i; 3213 3214 CTL3_LOCK(); 3215 3216 for (i = 0; i < count; i++) { 3217 tmp = &sh[i]; 3218 h = find_sh(tmp->opcode, tmp->version, tmp->handler); 3219 if (h == NULL) 3220 continue; 3221 3222 sz = (ctl3_handlers + ctl3_hsize - (h + 1)) * sizeof(*h); 3223 memmove(h, h + 1, sz); 3224 ctl3_hsize--; 3225 } 3226 3227 if (ctl3_hsize == 0) { 3228 if (ctl3_handlers != NULL) 3229 free(ctl3_handlers, M_IPFW); 3230 ctl3_handlers = NULL; 3231 } 3232 3233 ctl3_gencnt++; 3234 3235 CTL3_UNLOCK(); 3236 3237 return (0); 3238 } 3239 3240 /* 3241 * Writes data accumulated in @sd to sockopt buffer. 3242 * Zeroes internal @sd buffer. 3243 */ 3244 static int 3245 ipfw_flush_sopt_data(struct sockopt_data *sd) 3246 { 3247 struct sockopt *sopt; 3248 int error; 3249 size_t sz; 3250 3251 sz = sd->koff; 3252 if (sz == 0) 3253 return (0); 3254 3255 sopt = sd->sopt; 3256 3257 if (sopt->sopt_dir == SOPT_GET) { 3258 error = copyout(sd->kbuf, sopt->sopt_val, sz); 3259 if (error != 0) 3260 return (error); 3261 } 3262 3263 memset(sd->kbuf, 0, sd->ksize); 3264 sd->ktotal += sz; 3265 sd->koff = 0; 3266 if (sd->ktotal + sd->ksize < sd->valsize) 3267 sd->kavail = sd->ksize; 3268 else 3269 sd->kavail = sd->valsize - sd->ktotal; 3270 3271 /* Update sopt buffer data */ 3272 sopt->sopt_valsize = sd->ktotal; 3273 sopt->sopt_val = sd->sopt_val + sd->ktotal; 3274 3275 return (0); 3276 } 3277 3278 /* 3279 * Ensures that @sd buffer has contigious @neeeded number of 3280 * bytes. 3281 * 3282 * Returns pointer to requested space or NULL. 3283 */ 3284 caddr_t 3285 ipfw_get_sopt_space(struct sockopt_data *sd, size_t needed) 3286 { 3287 int error; 3288 caddr_t addr; 3289 3290 if (sd->kavail < needed) { 3291 /* 3292 * Flush data and try another time. 3293 */ 3294 error = ipfw_flush_sopt_data(sd); 3295 3296 if (sd->kavail < needed || error != 0) 3297 return (NULL); 3298 } 3299 3300 addr = sd->kbuf + sd->koff; 3301 sd->koff += needed; 3302 sd->kavail -= needed; 3303 return (addr); 3304 } 3305 3306 /* 3307 * Requests @needed contigious bytes from @sd buffer. 3308 * Function is used to notify subsystem that we are 3309 * interesed in first @needed bytes (request header) 3310 * and the rest buffer can be safely zeroed. 3311 * 3312 * Returns pointer to requested space or NULL. 3313 */ 3314 caddr_t 3315 ipfw_get_sopt_header(struct sockopt_data *sd, size_t needed) 3316 { 3317 caddr_t addr; 3318 3319 if ((addr = ipfw_get_sopt_space(sd, needed)) == NULL) 3320 return (NULL); 3321 3322 if (sd->kavail > 0) 3323 memset(sd->kbuf + sd->koff, 0, sd->kavail); 3324 3325 return (addr); 3326 } 3327 3328 /* 3329 * New sockopt handler. 3330 */ 3331 int 3332 ipfw_ctl3(struct sockopt *sopt) 3333 { 3334 int error, locked; 3335 size_t size, valsize; 3336 struct ip_fw_chain *chain; 3337 char xbuf[256]; 3338 struct sockopt_data sdata; 3339 struct ipfw_sopt_handler h; 3340 ip_fw3_opheader *op3 = NULL; 3341 3342 error = priv_check(sopt->sopt_td, PRIV_NETINET_IPFW); 3343 if (error != 0) 3344 return (error); 3345 3346 if (sopt->sopt_name != IP_FW3) 3347 return (ipfw_ctl(sopt)); 3348 3349 chain = &V_layer3_chain; 3350 error = 0; 3351 3352 /* Save original valsize before it is altered via sooptcopyin() */ 3353 valsize = sopt->sopt_valsize; 3354 memset(&sdata, 0, sizeof(sdata)); 3355 /* Read op3 header first to determine actual operation */ 3356 op3 = (ip_fw3_opheader *)xbuf; 3357 error = sooptcopyin(sopt, op3, sizeof(*op3), sizeof(*op3)); 3358 if (error != 0) 3359 return (error); 3360 sopt->sopt_valsize = valsize; 3361 3362 /* 3363 * Find and reference command. 3364 */ 3365 error = find_ref_sh(op3->opcode, op3->version, &h); 3366 if (error != 0) 3367 return (error); 3368 3369 /* 3370 * Disallow modifications in really-really secure mode, but still allow 3371 * the logging counters to be reset. 3372 */ 3373 if ((h.dir & HDIR_SET) != 0 && h.opcode != IP_FW_XRESETLOG) { 3374 error = securelevel_ge(sopt->sopt_td->td_ucred, 3); 3375 if (error != 0) { 3376 find_unref_sh(&h); 3377 return (error); 3378 } 3379 } 3380 3381 /* 3382 * Fill in sockopt_data structure that may be useful for 3383 * IP_FW3 get requests. 3384 */ 3385 locked = 0; 3386 if (valsize <= sizeof(xbuf)) { 3387 /* use on-stack buffer */ 3388 sdata.kbuf = xbuf; 3389 sdata.ksize = sizeof(xbuf); 3390 sdata.kavail = valsize; 3391 } else { 3392 3393 /* 3394 * Determine opcode type/buffer size: 3395 * allocate sliding-window buf for data export or 3396 * contigious buffer for special ops. 3397 */ 3398 if ((h.dir & HDIR_SET) != 0) { 3399 /* Set request. Allocate contigous buffer. */ 3400 if (valsize > CTL3_LARGEBUF) { 3401 find_unref_sh(&h); 3402 return (EFBIG); 3403 } 3404 3405 size = valsize; 3406 } else { 3407 /* Get request. Allocate sliding window buffer */ 3408 size = (valsize<CTL3_SMALLBUF) ? valsize:CTL3_SMALLBUF; 3409 3410 if (size < valsize) { 3411 /* We have to wire user buffer */ 3412 error = vslock(sopt->sopt_val, valsize); 3413 if (error != 0) 3414 return (error); 3415 locked = 1; 3416 } 3417 } 3418 3419 sdata.kbuf = malloc(size, M_TEMP, M_WAITOK | M_ZERO); 3420 sdata.ksize = size; 3421 sdata.kavail = size; 3422 } 3423 3424 sdata.sopt = sopt; 3425 sdata.sopt_val = sopt->sopt_val; 3426 sdata.valsize = valsize; 3427 3428 /* 3429 * Copy either all request (if valsize < bsize_max) 3430 * or first bsize_max bytes to guarantee most consumers 3431 * that all necessary data has been copied). 3432 * Anyway, copy not less than sizeof(ip_fw3_opheader). 3433 */ 3434 if ((error = sooptcopyin(sopt, sdata.kbuf, sdata.ksize, 3435 sizeof(ip_fw3_opheader))) != 0) 3436 return (error); 3437 op3 = (ip_fw3_opheader *)sdata.kbuf; 3438 3439 /* Finally, run handler */ 3440 error = h.handler(chain, op3, &sdata); 3441 find_unref_sh(&h); 3442 3443 /* Flush state and free buffers */ 3444 if (error == 0) 3445 error = ipfw_flush_sopt_data(&sdata); 3446 else 3447 ipfw_flush_sopt_data(&sdata); 3448 3449 if (locked != 0) 3450 vsunlock(sdata.sopt_val, valsize); 3451 3452 /* Restore original pointer and set number of bytes written */ 3453 sopt->sopt_val = sdata.sopt_val; 3454 sopt->sopt_valsize = sdata.ktotal; 3455 if (sdata.kbuf != xbuf) 3456 free(sdata.kbuf, M_TEMP); 3457 3458 return (error); 3459 } 3460 3461 /** 3462 * {set|get}sockopt parser. 3463 */ 3464 int 3465 ipfw_ctl(struct sockopt *sopt) 3466 { 3467 #define RULE_MAXSIZE (512*sizeof(u_int32_t)) 3468 int error; 3469 size_t size, valsize; 3470 struct ip_fw *buf; 3471 struct ip_fw_rule0 *rule; 3472 struct ip_fw_chain *chain; 3473 u_int32_t rulenum[2]; 3474 uint32_t opt; 3475 struct rule_check_info ci; 3476 IPFW_RLOCK_TRACKER; 3477 3478 chain = &V_layer3_chain; 3479 error = 0; 3480 3481 /* Save original valsize before it is altered via sooptcopyin() */ 3482 valsize = sopt->sopt_valsize; 3483 opt = sopt->sopt_name; 3484 3485 /* 3486 * Disallow modifications in really-really secure mode, but still allow 3487 * the logging counters to be reset. 3488 */ 3489 if (opt == IP_FW_ADD || 3490 (sopt->sopt_dir == SOPT_SET && opt != IP_FW_RESETLOG)) { 3491 error = securelevel_ge(sopt->sopt_td->td_ucred, 3); 3492 if (error != 0) 3493 return (error); 3494 } 3495 3496 switch (opt) { 3497 case IP_FW_GET: 3498 /* 3499 * pass up a copy of the current rules. Static rules 3500 * come first (the last of which has number IPFW_DEFAULT_RULE), 3501 * followed by a possibly empty list of dynamic rule. 3502 * The last dynamic rule has NULL in the "next" field. 3503 * 3504 * Note that the calculated size is used to bound the 3505 * amount of data returned to the user. The rule set may 3506 * change between calculating the size and returning the 3507 * data in which case we'll just return what fits. 3508 */ 3509 for (;;) { 3510 int len = 0, want; 3511 3512 size = chain->static_len; 3513 size += ipfw_dyn_len(); 3514 if (size >= sopt->sopt_valsize) 3515 break; 3516 buf = malloc(size, M_TEMP, M_WAITOK | M_ZERO); 3517 IPFW_UH_RLOCK(chain); 3518 /* check again how much space we need */ 3519 want = chain->static_len + ipfw_dyn_len(); 3520 if (size >= want) 3521 len = ipfw_getrules(chain, buf, size); 3522 IPFW_UH_RUNLOCK(chain); 3523 if (size >= want) 3524 error = sooptcopyout(sopt, buf, len); 3525 free(buf, M_TEMP); 3526 if (size >= want) 3527 break; 3528 } 3529 break; 3530 3531 case IP_FW_FLUSH: 3532 /* locking is done within del_entry() */ 3533 error = del_entry(chain, 0); /* special case, rule=0, cmd=0 means all */ 3534 break; 3535 3536 case IP_FW_ADD: 3537 rule = malloc(RULE_MAXSIZE, M_TEMP, M_WAITOK); 3538 error = sooptcopyin(sopt, rule, RULE_MAXSIZE, 3539 sizeof(struct ip_fw7) ); 3540 3541 memset(&ci, 0, sizeof(struct rule_check_info)); 3542 3543 /* 3544 * If the size of commands equals RULESIZE7 then we assume 3545 * a FreeBSD7.2 binary is talking to us (set is7=1). 3546 * is7 is persistent so the next 'ipfw list' command 3547 * will use this format. 3548 * NOTE: If wrong version is guessed (this can happen if 3549 * the first ipfw command is 'ipfw [pipe] list') 3550 * the ipfw binary may crash or loop infinitly... 3551 */ 3552 size = sopt->sopt_valsize; 3553 if (size == RULESIZE7(rule)) { 3554 is7 = 1; 3555 error = convert_rule_to_8(rule); 3556 if (error) { 3557 free(rule, M_TEMP); 3558 return error; 3559 } 3560 size = RULESIZE(rule); 3561 } else 3562 is7 = 0; 3563 if (error == 0) 3564 error = check_ipfw_rule0(rule, size, &ci); 3565 if (error == 0) { 3566 /* locking is done within add_rule() */ 3567 struct ip_fw *krule; 3568 krule = ipfw_alloc_rule(chain, RULEKSIZE0(rule)); 3569 ci.urule = (caddr_t)rule; 3570 ci.krule = krule; 3571 import_rule0(&ci); 3572 error = commit_rules(chain, &ci, 1); 3573 if (!error && sopt->sopt_dir == SOPT_GET) { 3574 if (is7) { 3575 error = convert_rule_to_7(rule); 3576 size = RULESIZE7(rule); 3577 if (error) { 3578 free(rule, M_TEMP); 3579 return error; 3580 } 3581 } 3582 error = sooptcopyout(sopt, rule, size); 3583 } 3584 } 3585 free(rule, M_TEMP); 3586 break; 3587 3588 case IP_FW_DEL: 3589 /* 3590 * IP_FW_DEL is used for deleting single rules or sets, 3591 * and (ab)used to atomically manipulate sets. Argument size 3592 * is used to distinguish between the two: 3593 * sizeof(u_int32_t) 3594 * delete single rule or set of rules, 3595 * or reassign rules (or sets) to a different set. 3596 * 2*sizeof(u_int32_t) 3597 * atomic disable/enable sets. 3598 * first u_int32_t contains sets to be disabled, 3599 * second u_int32_t contains sets to be enabled. 3600 */ 3601 error = sooptcopyin(sopt, rulenum, 3602 2*sizeof(u_int32_t), sizeof(u_int32_t)); 3603 if (error) 3604 break; 3605 size = sopt->sopt_valsize; 3606 if (size == sizeof(u_int32_t) && rulenum[0] != 0) { 3607 /* delete or reassign, locking done in del_entry() */ 3608 error = del_entry(chain, rulenum[0]); 3609 } else if (size == 2*sizeof(u_int32_t)) { /* set enable/disable */ 3610 IPFW_UH_WLOCK(chain); 3611 V_set_disable = 3612 (V_set_disable | rulenum[0]) & ~rulenum[1] & 3613 ~(1<<RESVD_SET); /* set RESVD_SET always enabled */ 3614 IPFW_UH_WUNLOCK(chain); 3615 } else 3616 error = EINVAL; 3617 break; 3618 3619 case IP_FW_ZERO: 3620 case IP_FW_RESETLOG: /* argument is an u_int_32, the rule number */ 3621 rulenum[0] = 0; 3622 if (sopt->sopt_val != 0) { 3623 error = sooptcopyin(sopt, rulenum, 3624 sizeof(u_int32_t), sizeof(u_int32_t)); 3625 if (error) 3626 break; 3627 } 3628 error = zero_entry(chain, rulenum[0], 3629 sopt->sopt_name == IP_FW_RESETLOG); 3630 break; 3631 3632 /*--- TABLE opcodes ---*/ 3633 case IP_FW_TABLE_ADD: 3634 case IP_FW_TABLE_DEL: 3635 { 3636 ipfw_table_entry ent; 3637 struct tentry_info tei; 3638 struct tid_info ti; 3639 struct table_value v; 3640 3641 error = sooptcopyin(sopt, &ent, 3642 sizeof(ent), sizeof(ent)); 3643 if (error) 3644 break; 3645 3646 memset(&tei, 0, sizeof(tei)); 3647 tei.paddr = &ent.addr; 3648 tei.subtype = AF_INET; 3649 tei.masklen = ent.masklen; 3650 ipfw_import_table_value_legacy(ent.value, &v); 3651 tei.pvalue = &v; 3652 memset(&ti, 0, sizeof(ti)); 3653 ti.uidx = ent.tbl; 3654 ti.type = IPFW_TABLE_CIDR; 3655 3656 error = (opt == IP_FW_TABLE_ADD) ? 3657 add_table_entry(chain, &ti, &tei, 0, 1) : 3658 del_table_entry(chain, &ti, &tei, 0, 1); 3659 } 3660 break; 3661 3662 3663 case IP_FW_TABLE_FLUSH: 3664 { 3665 u_int16_t tbl; 3666 struct tid_info ti; 3667 3668 error = sooptcopyin(sopt, &tbl, 3669 sizeof(tbl), sizeof(tbl)); 3670 if (error) 3671 break; 3672 memset(&ti, 0, sizeof(ti)); 3673 ti.uidx = tbl; 3674 error = flush_table(chain, &ti); 3675 } 3676 break; 3677 3678 case IP_FW_TABLE_GETSIZE: 3679 { 3680 u_int32_t tbl, cnt; 3681 struct tid_info ti; 3682 3683 if ((error = sooptcopyin(sopt, &tbl, sizeof(tbl), 3684 sizeof(tbl)))) 3685 break; 3686 memset(&ti, 0, sizeof(ti)); 3687 ti.uidx = tbl; 3688 IPFW_RLOCK(chain); 3689 error = ipfw_count_table(chain, &ti, &cnt); 3690 IPFW_RUNLOCK(chain); 3691 if (error) 3692 break; 3693 error = sooptcopyout(sopt, &cnt, sizeof(cnt)); 3694 } 3695 break; 3696 3697 case IP_FW_TABLE_LIST: 3698 { 3699 ipfw_table *tbl; 3700 struct tid_info ti; 3701 3702 if (sopt->sopt_valsize < sizeof(*tbl)) { 3703 error = EINVAL; 3704 break; 3705 } 3706 size = sopt->sopt_valsize; 3707 tbl = malloc(size, M_TEMP, M_WAITOK); 3708 error = sooptcopyin(sopt, tbl, size, sizeof(*tbl)); 3709 if (error) { 3710 free(tbl, M_TEMP); 3711 break; 3712 } 3713 tbl->size = (size - sizeof(*tbl)) / 3714 sizeof(ipfw_table_entry); 3715 memset(&ti, 0, sizeof(ti)); 3716 ti.uidx = tbl->tbl; 3717 IPFW_RLOCK(chain); 3718 error = ipfw_dump_table_legacy(chain, &ti, tbl); 3719 IPFW_RUNLOCK(chain); 3720 if (error) { 3721 free(tbl, M_TEMP); 3722 break; 3723 } 3724 error = sooptcopyout(sopt, tbl, size); 3725 free(tbl, M_TEMP); 3726 } 3727 break; 3728 3729 /*--- NAT operations are protected by the IPFW_LOCK ---*/ 3730 case IP_FW_NAT_CFG: 3731 if (IPFW_NAT_LOADED) 3732 error = ipfw_nat_cfg_ptr(sopt); 3733 else { 3734 printf("IP_FW_NAT_CFG: %s\n", 3735 "ipfw_nat not present, please load it"); 3736 error = EINVAL; 3737 } 3738 break; 3739 3740 case IP_FW_NAT_DEL: 3741 if (IPFW_NAT_LOADED) 3742 error = ipfw_nat_del_ptr(sopt); 3743 else { 3744 printf("IP_FW_NAT_DEL: %s\n", 3745 "ipfw_nat not present, please load it"); 3746 error = EINVAL; 3747 } 3748 break; 3749 3750 case IP_FW_NAT_GET_CONFIG: 3751 if (IPFW_NAT_LOADED) 3752 error = ipfw_nat_get_cfg_ptr(sopt); 3753 else { 3754 printf("IP_FW_NAT_GET_CFG: %s\n", 3755 "ipfw_nat not present, please load it"); 3756 error = EINVAL; 3757 } 3758 break; 3759 3760 case IP_FW_NAT_GET_LOG: 3761 if (IPFW_NAT_LOADED) 3762 error = ipfw_nat_get_log_ptr(sopt); 3763 else { 3764 printf("IP_FW_NAT_GET_LOG: %s\n", 3765 "ipfw_nat not present, please load it"); 3766 error = EINVAL; 3767 } 3768 break; 3769 3770 default: 3771 printf("ipfw: ipfw_ctl invalid option %d\n", sopt->sopt_name); 3772 error = EINVAL; 3773 } 3774 3775 return (error); 3776 #undef RULE_MAXSIZE 3777 } 3778 #define RULE_MAXSIZE (256*sizeof(u_int32_t)) 3779 3780 /* Functions to convert rules 7.2 <==> 8.0 */ 3781 static int 3782 convert_rule_to_7(struct ip_fw_rule0 *rule) 3783 { 3784 /* Used to modify original rule */ 3785 struct ip_fw7 *rule7 = (struct ip_fw7 *)rule; 3786 /* copy of original rule, version 8 */ 3787 struct ip_fw_rule0 *tmp; 3788 3789 /* Used to copy commands */ 3790 ipfw_insn *ccmd, *dst; 3791 int ll = 0, ccmdlen = 0; 3792 3793 tmp = malloc(RULE_MAXSIZE, M_TEMP, M_NOWAIT | M_ZERO); 3794 if (tmp == NULL) { 3795 return 1; //XXX error 3796 } 3797 bcopy(rule, tmp, RULE_MAXSIZE); 3798 3799 /* Copy fields */ 3800 //rule7->_pad = tmp->_pad; 3801 rule7->set = tmp->set; 3802 rule7->rulenum = tmp->rulenum; 3803 rule7->cmd_len = tmp->cmd_len; 3804 rule7->act_ofs = tmp->act_ofs; 3805 rule7->next_rule = (struct ip_fw7 *)tmp->next_rule; 3806 rule7->cmd_len = tmp->cmd_len; 3807 rule7->pcnt = tmp->pcnt; 3808 rule7->bcnt = tmp->bcnt; 3809 rule7->timestamp = tmp->timestamp; 3810 3811 /* Copy commands */ 3812 for (ll = tmp->cmd_len, ccmd = tmp->cmd, dst = rule7->cmd ; 3813 ll > 0 ; ll -= ccmdlen, ccmd += ccmdlen, dst += ccmdlen) { 3814 ccmdlen = F_LEN(ccmd); 3815 3816 bcopy(ccmd, dst, F_LEN(ccmd)*sizeof(uint32_t)); 3817 3818 if (dst->opcode > O_NAT) 3819 /* O_REASS doesn't exists in 7.2 version, so 3820 * decrement opcode if it is after O_REASS 3821 */ 3822 dst->opcode--; 3823 3824 if (ccmdlen > ll) { 3825 printf("ipfw: opcode %d size truncated\n", 3826 ccmd->opcode); 3827 return EINVAL; 3828 } 3829 } 3830 free(tmp, M_TEMP); 3831 3832 return 0; 3833 } 3834 3835 static int 3836 convert_rule_to_8(struct ip_fw_rule0 *rule) 3837 { 3838 /* Used to modify original rule */ 3839 struct ip_fw7 *rule7 = (struct ip_fw7 *) rule; 3840 3841 /* Used to copy commands */ 3842 ipfw_insn *ccmd, *dst; 3843 int ll = 0, ccmdlen = 0; 3844 3845 /* Copy of original rule */ 3846 struct ip_fw7 *tmp = malloc(RULE_MAXSIZE, M_TEMP, M_NOWAIT | M_ZERO); 3847 if (tmp == NULL) { 3848 return 1; //XXX error 3849 } 3850 3851 bcopy(rule7, tmp, RULE_MAXSIZE); 3852 3853 for (ll = tmp->cmd_len, ccmd = tmp->cmd, dst = rule->cmd ; 3854 ll > 0 ; ll -= ccmdlen, ccmd += ccmdlen, dst += ccmdlen) { 3855 ccmdlen = F_LEN(ccmd); 3856 3857 bcopy(ccmd, dst, F_LEN(ccmd)*sizeof(uint32_t)); 3858 3859 if (dst->opcode > O_NAT) 3860 /* O_REASS doesn't exists in 7.2 version, so 3861 * increment opcode if it is after O_REASS 3862 */ 3863 dst->opcode++; 3864 3865 if (ccmdlen > ll) { 3866 printf("ipfw: opcode %d size truncated\n", 3867 ccmd->opcode); 3868 return EINVAL; 3869 } 3870 } 3871 3872 rule->_pad = tmp->_pad; 3873 rule->set = tmp->set; 3874 rule->rulenum = tmp->rulenum; 3875 rule->cmd_len = tmp->cmd_len; 3876 rule->act_ofs = tmp->act_ofs; 3877 rule->next_rule = (struct ip_fw *)tmp->next_rule; 3878 rule->cmd_len = tmp->cmd_len; 3879 rule->id = 0; /* XXX see if is ok = 0 */ 3880 rule->pcnt = tmp->pcnt; 3881 rule->bcnt = tmp->bcnt; 3882 rule->timestamp = tmp->timestamp; 3883 3884 free (tmp, M_TEMP); 3885 return 0; 3886 } 3887 3888 /* 3889 * Named object api 3890 * 3891 */ 3892 3893 void 3894 ipfw_init_srv(struct ip_fw_chain *ch) 3895 { 3896 3897 ch->srvmap = ipfw_objhash_create(IPFW_OBJECTS_DEFAULT); 3898 ch->srvstate = malloc(sizeof(void *) * IPFW_OBJECTS_DEFAULT, 3899 M_IPFW, M_WAITOK | M_ZERO); 3900 } 3901 3902 void 3903 ipfw_destroy_srv(struct ip_fw_chain *ch) 3904 { 3905 3906 free(ch->srvstate, M_IPFW); 3907 ipfw_objhash_destroy(ch->srvmap); 3908 } 3909 3910 /* 3911 * Allocate new bitmask which can be used to enlarge/shrink 3912 * named instance index. 3913 */ 3914 void 3915 ipfw_objhash_bitmap_alloc(uint32_t items, void **idx, int *pblocks) 3916 { 3917 size_t size; 3918 int max_blocks; 3919 u_long *idx_mask; 3920 3921 KASSERT((items % BLOCK_ITEMS) == 0, 3922 ("bitmask size needs to power of 2 and greater or equal to %zu", 3923 BLOCK_ITEMS)); 3924 3925 max_blocks = items / BLOCK_ITEMS; 3926 size = items / 8; 3927 idx_mask = malloc(size * IPFW_MAX_SETS, M_IPFW, M_WAITOK); 3928 /* Mark all as free */ 3929 memset(idx_mask, 0xFF, size * IPFW_MAX_SETS); 3930 *idx_mask &= ~(u_long)1; /* Skip index 0 */ 3931 3932 *idx = idx_mask; 3933 *pblocks = max_blocks; 3934 } 3935 3936 /* 3937 * Copy current bitmask index to new one. 3938 */ 3939 void 3940 ipfw_objhash_bitmap_merge(struct namedobj_instance *ni, void **idx, int *blocks) 3941 { 3942 int old_blocks, new_blocks; 3943 u_long *old_idx, *new_idx; 3944 int i; 3945 3946 old_idx = ni->idx_mask; 3947 old_blocks = ni->max_blocks; 3948 new_idx = *idx; 3949 new_blocks = *blocks; 3950 3951 for (i = 0; i < IPFW_MAX_SETS; i++) { 3952 memcpy(&new_idx[new_blocks * i], &old_idx[old_blocks * i], 3953 old_blocks * sizeof(u_long)); 3954 } 3955 } 3956 3957 /* 3958 * Swaps current @ni index with new one. 3959 */ 3960 void 3961 ipfw_objhash_bitmap_swap(struct namedobj_instance *ni, void **idx, int *blocks) 3962 { 3963 int old_blocks; 3964 u_long *old_idx; 3965 3966 old_idx = ni->idx_mask; 3967 old_blocks = ni->max_blocks; 3968 3969 ni->idx_mask = *idx; 3970 ni->max_blocks = *blocks; 3971 3972 /* Save old values */ 3973 *idx = old_idx; 3974 *blocks = old_blocks; 3975 } 3976 3977 void 3978 ipfw_objhash_bitmap_free(void *idx, int blocks) 3979 { 3980 3981 free(idx, M_IPFW); 3982 } 3983 3984 /* 3985 * Creates named hash instance. 3986 * Must be called without holding any locks. 3987 * Return pointer to new instance. 3988 */ 3989 struct namedobj_instance * 3990 ipfw_objhash_create(uint32_t items) 3991 { 3992 struct namedobj_instance *ni; 3993 int i; 3994 size_t size; 3995 3996 size = sizeof(struct namedobj_instance) + 3997 sizeof(struct namedobjects_head) * NAMEDOBJ_HASH_SIZE + 3998 sizeof(struct namedobjects_head) * NAMEDOBJ_HASH_SIZE; 3999 4000 ni = malloc(size, M_IPFW, M_WAITOK | M_ZERO); 4001 ni->nn_size = NAMEDOBJ_HASH_SIZE; 4002 ni->nv_size = NAMEDOBJ_HASH_SIZE; 4003 4004 ni->names = (struct namedobjects_head *)(ni +1); 4005 ni->values = &ni->names[ni->nn_size]; 4006 4007 for (i = 0; i < ni->nn_size; i++) 4008 TAILQ_INIT(&ni->names[i]); 4009 4010 for (i = 0; i < ni->nv_size; i++) 4011 TAILQ_INIT(&ni->values[i]); 4012 4013 /* Set default hashing/comparison functions */ 4014 ni->hash_f = objhash_hash_name; 4015 ni->cmp_f = objhash_cmp_name; 4016 4017 /* Allocate bitmask separately due to possible resize */ 4018 ipfw_objhash_bitmap_alloc(items, (void*)&ni->idx_mask, &ni->max_blocks); 4019 4020 return (ni); 4021 } 4022 4023 void 4024 ipfw_objhash_destroy(struct namedobj_instance *ni) 4025 { 4026 4027 free(ni->idx_mask, M_IPFW); 4028 free(ni, M_IPFW); 4029 } 4030 4031 void 4032 ipfw_objhash_set_funcs(struct namedobj_instance *ni, objhash_hash_f *hash_f, 4033 objhash_cmp_f *cmp_f) 4034 { 4035 4036 ni->hash_f = hash_f; 4037 ni->cmp_f = cmp_f; 4038 } 4039 4040 static uint32_t 4041 objhash_hash_name(struct namedobj_instance *ni, const void *name, uint32_t set) 4042 { 4043 4044 return (fnv_32_str((const char *)name, FNV1_32_INIT)); 4045 } 4046 4047 static int 4048 objhash_cmp_name(struct named_object *no, const void *name, uint32_t set) 4049 { 4050 4051 if ((strcmp(no->name, (const char *)name) == 0) && (no->set == set)) 4052 return (0); 4053 4054 return (1); 4055 } 4056 4057 static uint32_t 4058 objhash_hash_idx(struct namedobj_instance *ni, uint32_t val) 4059 { 4060 uint32_t v; 4061 4062 v = val % (ni->nv_size - 1); 4063 4064 return (v); 4065 } 4066 4067 struct named_object * 4068 ipfw_objhash_lookup_name(struct namedobj_instance *ni, uint32_t set, char *name) 4069 { 4070 struct named_object *no; 4071 uint32_t hash; 4072 4073 hash = ni->hash_f(ni, name, set) % ni->nn_size; 4074 4075 TAILQ_FOREACH(no, &ni->names[hash], nn_next) { 4076 if (ni->cmp_f(no, name, set) == 0) 4077 return (no); 4078 } 4079 4080 return (NULL); 4081 } 4082 4083 /* 4084 * Find named object by @uid. 4085 * Check @tlvs for valid data inside. 4086 * 4087 * Returns pointer to found TLV or NULL. 4088 */ 4089 static ipfw_obj_ntlv * 4090 find_name_tlv_type(void *tlvs, int len, uint16_t uidx, uint32_t etlv) 4091 { 4092 ipfw_obj_ntlv *ntlv; 4093 uintptr_t pa, pe; 4094 int l; 4095 4096 pa = (uintptr_t)tlvs; 4097 pe = pa + len; 4098 l = 0; 4099 for (; pa < pe; pa += l) { 4100 ntlv = (ipfw_obj_ntlv *)pa; 4101 l = ntlv->head.length; 4102 4103 if (l != sizeof(*ntlv)) 4104 return (NULL); 4105 4106 if (ntlv->idx != uidx) 4107 continue; 4108 /* 4109 * When userland has specified zero TLV type, do 4110 * not compare it with eltv. In some cases userland 4111 * doesn't know what type should it have. Use only 4112 * uidx and name for search named_object. 4113 */ 4114 if (ntlv->head.type != 0 && 4115 ntlv->head.type != (uint16_t)etlv) 4116 continue; 4117 4118 if (ipfw_check_object_name_generic(ntlv->name) != 0) 4119 return (NULL); 4120 4121 return (ntlv); 4122 } 4123 4124 return (NULL); 4125 } 4126 4127 /* 4128 * Finds object config based on either legacy index 4129 * or name in ntlv. 4130 * Note @ti structure contains unchecked data from userland. 4131 * 4132 * Returns 0 in success and fills in @pno with found config 4133 */ 4134 int 4135 ipfw_objhash_find_type(struct namedobj_instance *ni, struct tid_info *ti, 4136 uint32_t etlv, struct named_object **pno) 4137 { 4138 char *name; 4139 ipfw_obj_ntlv *ntlv; 4140 uint32_t set; 4141 4142 if (ti->tlvs == NULL) 4143 return (EINVAL); 4144 4145 ntlv = find_name_tlv_type(ti->tlvs, ti->tlen, ti->uidx, etlv); 4146 if (ntlv == NULL) 4147 return (EINVAL); 4148 name = ntlv->name; 4149 4150 /* 4151 * Use set provided by @ti instead of @ntlv one. 4152 * This is needed due to different sets behavior 4153 * controlled by V_fw_tables_sets. 4154 */ 4155 set = ti->set; 4156 *pno = ipfw_objhash_lookup_name(ni, set, name); 4157 if (*pno == NULL) 4158 return (ESRCH); 4159 return (0); 4160 } 4161 4162 /* 4163 * Find named object by name, considering also its TLV type. 4164 */ 4165 struct named_object * 4166 ipfw_objhash_lookup_name_type(struct namedobj_instance *ni, uint32_t set, 4167 uint32_t type, const char *name) 4168 { 4169 struct named_object *no; 4170 uint32_t hash; 4171 4172 hash = ni->hash_f(ni, name, set) % ni->nn_size; 4173 4174 TAILQ_FOREACH(no, &ni->names[hash], nn_next) { 4175 if (ni->cmp_f(no, name, set) == 0 && 4176 no->etlv == (uint16_t)type) 4177 return (no); 4178 } 4179 4180 return (NULL); 4181 } 4182 4183 struct named_object * 4184 ipfw_objhash_lookup_kidx(struct namedobj_instance *ni, uint16_t kidx) 4185 { 4186 struct named_object *no; 4187 uint32_t hash; 4188 4189 hash = objhash_hash_idx(ni, kidx); 4190 4191 TAILQ_FOREACH(no, &ni->values[hash], nv_next) { 4192 if (no->kidx == kidx) 4193 return (no); 4194 } 4195 4196 return (NULL); 4197 } 4198 4199 int 4200 ipfw_objhash_same_name(struct namedobj_instance *ni, struct named_object *a, 4201 struct named_object *b) 4202 { 4203 4204 if ((strcmp(a->name, b->name) == 0) && a->set == b->set) 4205 return (1); 4206 4207 return (0); 4208 } 4209 4210 void 4211 ipfw_objhash_add(struct namedobj_instance *ni, struct named_object *no) 4212 { 4213 uint32_t hash; 4214 4215 hash = ni->hash_f(ni, no->name, no->set) % ni->nn_size; 4216 TAILQ_INSERT_HEAD(&ni->names[hash], no, nn_next); 4217 4218 hash = objhash_hash_idx(ni, no->kidx); 4219 TAILQ_INSERT_HEAD(&ni->values[hash], no, nv_next); 4220 4221 ni->count++; 4222 } 4223 4224 void 4225 ipfw_objhash_del(struct namedobj_instance *ni, struct named_object *no) 4226 { 4227 uint32_t hash; 4228 4229 hash = ni->hash_f(ni, no->name, no->set) % ni->nn_size; 4230 TAILQ_REMOVE(&ni->names[hash], no, nn_next); 4231 4232 hash = objhash_hash_idx(ni, no->kidx); 4233 TAILQ_REMOVE(&ni->values[hash], no, nv_next); 4234 4235 ni->count--; 4236 } 4237 4238 uint32_t 4239 ipfw_objhash_count(struct namedobj_instance *ni) 4240 { 4241 4242 return (ni->count); 4243 } 4244 4245 /* 4246 * Runs @func for each found named object. 4247 * It is safe to delete objects from callback 4248 */ 4249 void 4250 ipfw_objhash_foreach(struct namedobj_instance *ni, objhash_cb_t *f, void *arg) 4251 { 4252 struct named_object *no, *no_tmp; 4253 int i; 4254 4255 for (i = 0; i < ni->nn_size; i++) { 4256 TAILQ_FOREACH_SAFE(no, &ni->names[i], nn_next, no_tmp) 4257 f(ni, no, arg); 4258 } 4259 } 4260 4261 /* 4262 * Removes index from given set. 4263 * Returns 0 on success. 4264 */ 4265 int 4266 ipfw_objhash_free_idx(struct namedobj_instance *ni, uint16_t idx) 4267 { 4268 u_long *mask; 4269 int i, v; 4270 4271 i = idx / BLOCK_ITEMS; 4272 v = idx % BLOCK_ITEMS; 4273 4274 if (i >= ni->max_blocks) 4275 return (1); 4276 4277 mask = &ni->idx_mask[i]; 4278 4279 if ((*mask & ((u_long)1 << v)) != 0) 4280 return (1); 4281 4282 /* Mark as free */ 4283 *mask |= (u_long)1 << v; 4284 4285 /* Update free offset */ 4286 if (ni->free_off[0] > i) 4287 ni->free_off[0] = i; 4288 4289 return (0); 4290 } 4291 4292 /* 4293 * Allocate new index in given instance and stores in in @pidx. 4294 * Returns 0 on success. 4295 */ 4296 int 4297 ipfw_objhash_alloc_idx(void *n, uint16_t *pidx) 4298 { 4299 struct namedobj_instance *ni; 4300 u_long *mask; 4301 int i, off, v; 4302 4303 ni = (struct namedobj_instance *)n; 4304 4305 off = ni->free_off[0]; 4306 mask = &ni->idx_mask[off]; 4307 4308 for (i = off; i < ni->max_blocks; i++, mask++) { 4309 if ((v = ffsl(*mask)) == 0) 4310 continue; 4311 4312 /* Mark as busy */ 4313 *mask &= ~ ((u_long)1 << (v - 1)); 4314 4315 ni->free_off[0] = i; 4316 4317 v = BLOCK_ITEMS * i + v - 1; 4318 4319 *pidx = v; 4320 return (0); 4321 } 4322 4323 return (1); 4324 } 4325 4326 /* end of file */ 4327