1 /*- 2 * Copyright (c) 2002-2009 Luigi Rizzo, Universita` di Pisa 3 * Copyright (c) 2014 Yandex LLC 4 * Copyright (c) 2014 Alexander V. Chernikov 5 * 6 * Supported by: Valeria Paoli 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 /* 34 * Control socket and rule management routines for ipfw. 35 * Control is currently implemented via IP_FW3 setsockopt() code. 36 */ 37 38 #include "opt_ipfw.h" 39 #include "opt_inet.h" 40 #ifndef INET 41 #error IPFIREWALL requires INET. 42 #endif /* INET */ 43 #include "opt_inet6.h" 44 45 #include <sys/param.h> 46 #include <sys/systm.h> 47 #include <sys/malloc.h> 48 #include <sys/mbuf.h> /* struct m_tag used by nested headers */ 49 #include <sys/kernel.h> 50 #include <sys/lock.h> 51 #include <sys/priv.h> 52 #include <sys/proc.h> 53 #include <sys/rwlock.h> 54 #include <sys/rmlock.h> 55 #include <sys/socket.h> 56 #include <sys/socketvar.h> 57 #include <sys/sysctl.h> 58 #include <sys/syslog.h> 59 #include <sys/fnv_hash.h> 60 #include <net/if.h> 61 #include <net/route.h> 62 #include <net/vnet.h> 63 #include <vm/vm.h> 64 #include <vm/vm_extern.h> 65 66 #include <netinet/in.h> 67 #include <netinet/ip_var.h> /* hooks */ 68 #include <netinet/ip_fw.h> 69 70 #include <netpfil/ipfw/ip_fw_private.h> 71 #include <netpfil/ipfw/ip_fw_table.h> 72 73 #ifdef MAC 74 #include <security/mac/mac_framework.h> 75 #endif 76 77 static int ipfw_ctl(struct sockopt *sopt); 78 static int check_ipfw_rule_body(ipfw_insn *cmd, int cmd_len, 79 struct rule_check_info *ci); 80 static int check_ipfw_rule1(struct ip_fw_rule *rule, int size, 81 struct rule_check_info *ci); 82 static int check_ipfw_rule0(struct ip_fw_rule0 *rule, int size, 83 struct rule_check_info *ci); 84 static int rewrite_rule_uidx(struct ip_fw_chain *chain, 85 struct rule_check_info *ci); 86 87 #define NAMEDOBJ_HASH_SIZE 32 88 89 struct namedobj_instance { 90 struct namedobjects_head *names; 91 struct namedobjects_head *values; 92 uint32_t nn_size; /* names hash size */ 93 uint32_t nv_size; /* number hash size */ 94 u_long *idx_mask; /* used items bitmask */ 95 uint32_t max_blocks; /* number of "long" blocks in bitmask */ 96 uint32_t count; /* number of items */ 97 uint16_t free_off[IPFW_MAX_SETS]; /* first possible free offset */ 98 objhash_hash_f *hash_f; 99 objhash_cmp_f *cmp_f; 100 }; 101 #define BLOCK_ITEMS (8 * sizeof(u_long)) /* Number of items for ffsl() */ 102 103 static uint32_t objhash_hash_name(struct namedobj_instance *ni, 104 const void *key, uint32_t kopt); 105 static uint32_t objhash_hash_idx(struct namedobj_instance *ni, uint32_t val); 106 static int objhash_cmp_name(struct named_object *no, const void *name, 107 uint32_t set); 108 109 MALLOC_DEFINE(M_IPFW, "IpFw/IpAcct", "IpFw/IpAcct chain's"); 110 111 static int dump_config(struct ip_fw_chain *chain, ip_fw3_opheader *op3, 112 struct sockopt_data *sd); 113 static int add_rules(struct ip_fw_chain *chain, ip_fw3_opheader *op3, 114 struct sockopt_data *sd); 115 static int del_rules(struct ip_fw_chain *chain, ip_fw3_opheader *op3, 116 struct sockopt_data *sd); 117 static int clear_rules(struct ip_fw_chain *chain, ip_fw3_opheader *op3, 118 struct sockopt_data *sd); 119 static int move_rules(struct ip_fw_chain *chain, ip_fw3_opheader *op3, 120 struct sockopt_data *sd); 121 static int manage_sets(struct ip_fw_chain *chain, ip_fw3_opheader *op3, 122 struct sockopt_data *sd); 123 static int dump_soptcodes(struct ip_fw_chain *chain, ip_fw3_opheader *op3, 124 struct sockopt_data *sd); 125 static int dump_srvobjects(struct ip_fw_chain *chain, ip_fw3_opheader *op3, 126 struct sockopt_data *sd); 127 128 /* ctl3 handler data */ 129 struct mtx ctl3_lock; 130 #define CTL3_LOCK_INIT() mtx_init(&ctl3_lock, "ctl3_lock", NULL, MTX_DEF) 131 #define CTL3_LOCK_DESTROY() mtx_destroy(&ctl3_lock) 132 #define CTL3_LOCK() mtx_lock(&ctl3_lock) 133 #define CTL3_UNLOCK() mtx_unlock(&ctl3_lock) 134 135 static struct ipfw_sopt_handler *ctl3_handlers; 136 static size_t ctl3_hsize; 137 static uint64_t ctl3_refct, ctl3_gencnt; 138 #define CTL3_SMALLBUF 4096 /* small page-size write buffer */ 139 #define CTL3_LARGEBUF 16 * 1024 * 1024 /* handle large rulesets */ 140 141 static int ipfw_flush_sopt_data(struct sockopt_data *sd); 142 143 static struct ipfw_sopt_handler scodes[] = { 144 { IP_FW_XGET, 0, HDIR_GET, dump_config }, 145 { IP_FW_XADD, 0, HDIR_BOTH, add_rules }, 146 { IP_FW_XDEL, 0, HDIR_BOTH, del_rules }, 147 { IP_FW_XZERO, 0, HDIR_SET, clear_rules }, 148 { IP_FW_XRESETLOG, 0, HDIR_SET, clear_rules }, 149 { IP_FW_XMOVE, 0, HDIR_SET, move_rules }, 150 { IP_FW_SET_SWAP, 0, HDIR_SET, manage_sets }, 151 { IP_FW_SET_MOVE, 0, HDIR_SET, manage_sets }, 152 { IP_FW_SET_ENABLE, 0, HDIR_SET, manage_sets }, 153 { IP_FW_DUMP_SOPTCODES, 0, HDIR_GET, dump_soptcodes }, 154 { IP_FW_DUMP_SRVOBJECTS,0, HDIR_GET, dump_srvobjects }, 155 }; 156 157 static int 158 set_legacy_obj_kidx(struct ip_fw_chain *ch, struct ip_fw_rule0 *rule); 159 static struct opcode_obj_rewrite *find_op_rw(ipfw_insn *cmd, 160 uint16_t *puidx, uint8_t *ptype); 161 static int mark_object_kidx(struct ip_fw_chain *ch, struct ip_fw *rule, 162 uint32_t *bmask); 163 static int ref_rule_objects(struct ip_fw_chain *ch, struct ip_fw *rule, 164 struct rule_check_info *ci, struct obj_idx *oib, struct tid_info *ti); 165 static int ref_opcode_object(struct ip_fw_chain *ch, ipfw_insn *cmd, 166 struct tid_info *ti, struct obj_idx *pidx, int *unresolved); 167 static void unref_rule_objects(struct ip_fw_chain *chain, struct ip_fw *rule); 168 static void unref_oib_objects(struct ip_fw_chain *ch, ipfw_insn *cmd, 169 struct obj_idx *oib, struct obj_idx *end); 170 static int export_objhash_ntlv(struct namedobj_instance *ni, uint16_t kidx, 171 struct sockopt_data *sd); 172 173 /* 174 * Opcode object rewriter variables 175 */ 176 struct opcode_obj_rewrite *ctl3_rewriters; 177 static size_t ctl3_rsize; 178 179 /* 180 * static variables followed by global ones 181 */ 182 183 static VNET_DEFINE(uma_zone_t, ipfw_cntr_zone); 184 #define V_ipfw_cntr_zone VNET(ipfw_cntr_zone) 185 186 void 187 ipfw_init_counters() 188 { 189 190 V_ipfw_cntr_zone = uma_zcreate("IPFW counters", 191 IPFW_RULE_CNTR_SIZE, NULL, NULL, NULL, NULL, 192 UMA_ALIGN_PTR, UMA_ZONE_PCPU); 193 } 194 195 void 196 ipfw_destroy_counters() 197 { 198 199 uma_zdestroy(V_ipfw_cntr_zone); 200 } 201 202 struct ip_fw * 203 ipfw_alloc_rule(struct ip_fw_chain *chain, size_t rulesize) 204 { 205 struct ip_fw *rule; 206 207 rule = malloc(rulesize, M_IPFW, M_WAITOK | M_ZERO); 208 rule->cntr = uma_zalloc(V_ipfw_cntr_zone, M_WAITOK | M_ZERO); 209 210 return (rule); 211 } 212 213 static void 214 free_rule(struct ip_fw *rule) 215 { 216 217 uma_zfree(V_ipfw_cntr_zone, rule->cntr); 218 free(rule, M_IPFW); 219 } 220 221 222 /* 223 * Find the smallest rule >= key, id. 224 * We could use bsearch but it is so simple that we code it directly 225 */ 226 int 227 ipfw_find_rule(struct ip_fw_chain *chain, uint32_t key, uint32_t id) 228 { 229 int i, lo, hi; 230 struct ip_fw *r; 231 232 for (lo = 0, hi = chain->n_rules - 1; lo < hi;) { 233 i = (lo + hi) / 2; 234 r = chain->map[i]; 235 if (r->rulenum < key) 236 lo = i + 1; /* continue from the next one */ 237 else if (r->rulenum > key) 238 hi = i; /* this might be good */ 239 else if (r->id < id) 240 lo = i + 1; /* continue from the next one */ 241 else /* r->id >= id */ 242 hi = i; /* this might be good */ 243 } 244 return hi; 245 } 246 247 /* 248 * Builds skipto cache on rule set @map. 249 */ 250 static void 251 update_skipto_cache(struct ip_fw_chain *chain, struct ip_fw **map) 252 { 253 int *smap, rulenum; 254 int i, mi; 255 256 IPFW_UH_WLOCK_ASSERT(chain); 257 258 mi = 0; 259 rulenum = map[mi]->rulenum; 260 smap = chain->idxmap_back; 261 262 if (smap == NULL) 263 return; 264 265 for (i = 0; i < 65536; i++) { 266 smap[i] = mi; 267 /* Use the same rule index until i < rulenum */ 268 if (i != rulenum || i == 65535) 269 continue; 270 /* Find next rule with num > i */ 271 rulenum = map[++mi]->rulenum; 272 while (rulenum == i) 273 rulenum = map[++mi]->rulenum; 274 } 275 } 276 277 /* 278 * Swaps prepared (backup) index with current one. 279 */ 280 static void 281 swap_skipto_cache(struct ip_fw_chain *chain) 282 { 283 int *map; 284 285 IPFW_UH_WLOCK_ASSERT(chain); 286 IPFW_WLOCK_ASSERT(chain); 287 288 map = chain->idxmap; 289 chain->idxmap = chain->idxmap_back; 290 chain->idxmap_back = map; 291 } 292 293 /* 294 * Allocate and initialize skipto cache. 295 */ 296 void 297 ipfw_init_skipto_cache(struct ip_fw_chain *chain) 298 { 299 int *idxmap, *idxmap_back; 300 301 idxmap = malloc(65536 * sizeof(uint32_t *), M_IPFW, 302 M_WAITOK | M_ZERO); 303 idxmap_back = malloc(65536 * sizeof(uint32_t *), M_IPFW, 304 M_WAITOK | M_ZERO); 305 306 /* 307 * Note we may be called at any time after initialization, 308 * for example, on first skipto rule, so we need to 309 * provide valid chain->idxmap on return 310 */ 311 312 IPFW_UH_WLOCK(chain); 313 if (chain->idxmap != NULL) { 314 IPFW_UH_WUNLOCK(chain); 315 free(idxmap, M_IPFW); 316 free(idxmap_back, M_IPFW); 317 return; 318 } 319 320 /* Set backup pointer first to permit building cache */ 321 chain->idxmap_back = idxmap_back; 322 update_skipto_cache(chain, chain->map); 323 IPFW_WLOCK(chain); 324 /* It is now safe to set chain->idxmap ptr */ 325 chain->idxmap = idxmap; 326 swap_skipto_cache(chain); 327 IPFW_WUNLOCK(chain); 328 IPFW_UH_WUNLOCK(chain); 329 } 330 331 /* 332 * Destroys skipto cache. 333 */ 334 void 335 ipfw_destroy_skipto_cache(struct ip_fw_chain *chain) 336 { 337 338 if (chain->idxmap != NULL) 339 free(chain->idxmap, M_IPFW); 340 if (chain->idxmap != NULL) 341 free(chain->idxmap_back, M_IPFW); 342 } 343 344 345 /* 346 * allocate a new map, returns the chain locked. extra is the number 347 * of entries to add or delete. 348 */ 349 static struct ip_fw ** 350 get_map(struct ip_fw_chain *chain, int extra, int locked) 351 { 352 353 for (;;) { 354 struct ip_fw **map; 355 int i, mflags; 356 357 mflags = M_ZERO | ((locked != 0) ? M_NOWAIT : M_WAITOK); 358 359 i = chain->n_rules + extra; 360 map = malloc(i * sizeof(struct ip_fw *), M_IPFW, mflags); 361 if (map == NULL) { 362 printf("%s: cannot allocate map\n", __FUNCTION__); 363 return NULL; 364 } 365 if (!locked) 366 IPFW_UH_WLOCK(chain); 367 if (i >= chain->n_rules + extra) /* good */ 368 return map; 369 /* otherwise we lost the race, free and retry */ 370 if (!locked) 371 IPFW_UH_WUNLOCK(chain); 372 free(map, M_IPFW); 373 } 374 } 375 376 /* 377 * swap the maps. It is supposed to be called with IPFW_UH_WLOCK 378 */ 379 static struct ip_fw ** 380 swap_map(struct ip_fw_chain *chain, struct ip_fw **new_map, int new_len) 381 { 382 struct ip_fw **old_map; 383 384 IPFW_WLOCK(chain); 385 chain->id++; 386 chain->n_rules = new_len; 387 old_map = chain->map; 388 chain->map = new_map; 389 swap_skipto_cache(chain); 390 IPFW_WUNLOCK(chain); 391 return old_map; 392 } 393 394 395 static void 396 export_cntr1_base(struct ip_fw *krule, struct ip_fw_bcounter *cntr) 397 { 398 399 cntr->size = sizeof(*cntr); 400 401 if (krule->cntr != NULL) { 402 cntr->pcnt = counter_u64_fetch(krule->cntr); 403 cntr->bcnt = counter_u64_fetch(krule->cntr + 1); 404 cntr->timestamp = krule->timestamp; 405 } 406 if (cntr->timestamp > 0) 407 cntr->timestamp += boottime.tv_sec; 408 } 409 410 static void 411 export_cntr0_base(struct ip_fw *krule, struct ip_fw_bcounter0 *cntr) 412 { 413 414 if (krule->cntr != NULL) { 415 cntr->pcnt = counter_u64_fetch(krule->cntr); 416 cntr->bcnt = counter_u64_fetch(krule->cntr + 1); 417 cntr->timestamp = krule->timestamp; 418 } 419 if (cntr->timestamp > 0) 420 cntr->timestamp += boottime.tv_sec; 421 } 422 423 /* 424 * Copies rule @urule from v1 userland format (current). 425 * to kernel @krule. 426 * Assume @krule is zeroed. 427 */ 428 static void 429 import_rule1(struct rule_check_info *ci) 430 { 431 struct ip_fw_rule *urule; 432 struct ip_fw *krule; 433 434 urule = (struct ip_fw_rule *)ci->urule; 435 krule = (struct ip_fw *)ci->krule; 436 437 /* copy header */ 438 krule->act_ofs = urule->act_ofs; 439 krule->cmd_len = urule->cmd_len; 440 krule->rulenum = urule->rulenum; 441 krule->set = urule->set; 442 krule->flags = urule->flags; 443 444 /* Save rulenum offset */ 445 ci->urule_numoff = offsetof(struct ip_fw_rule, rulenum); 446 447 /* Copy opcodes */ 448 memcpy(krule->cmd, urule->cmd, krule->cmd_len * sizeof(uint32_t)); 449 } 450 451 /* 452 * Export rule into v1 format (Current). 453 * Layout: 454 * [ ipfw_obj_tlv(IPFW_TLV_RULE_ENT) 455 * [ ip_fw_rule ] OR 456 * [ ip_fw_bcounter ip_fw_rule] (depends on rcntrs). 457 * ] 458 * Assume @data is zeroed. 459 */ 460 static void 461 export_rule1(struct ip_fw *krule, caddr_t data, int len, int rcntrs) 462 { 463 struct ip_fw_bcounter *cntr; 464 struct ip_fw_rule *urule; 465 ipfw_obj_tlv *tlv; 466 467 /* Fill in TLV header */ 468 tlv = (ipfw_obj_tlv *)data; 469 tlv->type = IPFW_TLV_RULE_ENT; 470 tlv->length = len; 471 472 if (rcntrs != 0) { 473 /* Copy counters */ 474 cntr = (struct ip_fw_bcounter *)(tlv + 1); 475 urule = (struct ip_fw_rule *)(cntr + 1); 476 export_cntr1_base(krule, cntr); 477 } else 478 urule = (struct ip_fw_rule *)(tlv + 1); 479 480 /* copy header */ 481 urule->act_ofs = krule->act_ofs; 482 urule->cmd_len = krule->cmd_len; 483 urule->rulenum = krule->rulenum; 484 urule->set = krule->set; 485 urule->flags = krule->flags; 486 urule->id = krule->id; 487 488 /* Copy opcodes */ 489 memcpy(urule->cmd, krule->cmd, krule->cmd_len * sizeof(uint32_t)); 490 } 491 492 493 /* 494 * Copies rule @urule from FreeBSD8 userland format (v0) 495 * to kernel @krule. 496 * Assume @krule is zeroed. 497 */ 498 static void 499 import_rule0(struct rule_check_info *ci) 500 { 501 struct ip_fw_rule0 *urule; 502 struct ip_fw *krule; 503 int cmdlen, l; 504 ipfw_insn *cmd; 505 ipfw_insn_limit *lcmd; 506 ipfw_insn_if *cmdif; 507 508 urule = (struct ip_fw_rule0 *)ci->urule; 509 krule = (struct ip_fw *)ci->krule; 510 511 /* copy header */ 512 krule->act_ofs = urule->act_ofs; 513 krule->cmd_len = urule->cmd_len; 514 krule->rulenum = urule->rulenum; 515 krule->set = urule->set; 516 if ((urule->_pad & 1) != 0) 517 krule->flags |= IPFW_RULE_NOOPT; 518 519 /* Save rulenum offset */ 520 ci->urule_numoff = offsetof(struct ip_fw_rule0, rulenum); 521 522 /* Copy opcodes */ 523 memcpy(krule->cmd, urule->cmd, krule->cmd_len * sizeof(uint32_t)); 524 525 /* 526 * Alter opcodes: 527 * 1) convert tablearg value from 65335 to 0 528 * 2) Add high bit to O_SETFIB/O_SETDSCP values (to make room for targ). 529 * 3) convert table number in iface opcodes to u16 530 */ 531 l = krule->cmd_len; 532 cmd = krule->cmd; 533 cmdlen = 0; 534 535 for ( ; l > 0 ; l -= cmdlen, cmd += cmdlen) { 536 cmdlen = F_LEN(cmd); 537 538 switch (cmd->opcode) { 539 /* Opcodes supporting tablearg */ 540 case O_TAG: 541 case O_TAGGED: 542 case O_PIPE: 543 case O_QUEUE: 544 case O_DIVERT: 545 case O_TEE: 546 case O_SKIPTO: 547 case O_CALLRETURN: 548 case O_NETGRAPH: 549 case O_NGTEE: 550 case O_NAT: 551 if (cmd->arg1 == 65535) 552 cmd->arg1 = IP_FW_TARG; 553 break; 554 case O_SETFIB: 555 case O_SETDSCP: 556 if (cmd->arg1 == 65535) 557 cmd->arg1 = IP_FW_TARG; 558 else 559 cmd->arg1 |= 0x8000; 560 break; 561 case O_LIMIT: 562 lcmd = (ipfw_insn_limit *)cmd; 563 if (lcmd->conn_limit == 65535) 564 lcmd->conn_limit = IP_FW_TARG; 565 break; 566 /* Interface tables */ 567 case O_XMIT: 568 case O_RECV: 569 case O_VIA: 570 /* Interface table, possibly */ 571 cmdif = (ipfw_insn_if *)cmd; 572 if (cmdif->name[0] != '\1') 573 break; 574 575 cmdif->p.kidx = (uint16_t)cmdif->p.glob; 576 break; 577 } 578 } 579 } 580 581 /* 582 * Copies rule @krule from kernel to FreeBSD8 userland format (v0) 583 */ 584 static void 585 export_rule0(struct ip_fw *krule, struct ip_fw_rule0 *urule, int len) 586 { 587 int cmdlen, l; 588 ipfw_insn *cmd; 589 ipfw_insn_limit *lcmd; 590 ipfw_insn_if *cmdif; 591 592 /* copy header */ 593 memset(urule, 0, len); 594 urule->act_ofs = krule->act_ofs; 595 urule->cmd_len = krule->cmd_len; 596 urule->rulenum = krule->rulenum; 597 urule->set = krule->set; 598 if ((krule->flags & IPFW_RULE_NOOPT) != 0) 599 urule->_pad |= 1; 600 601 /* Copy opcodes */ 602 memcpy(urule->cmd, krule->cmd, krule->cmd_len * sizeof(uint32_t)); 603 604 /* Export counters */ 605 export_cntr0_base(krule, (struct ip_fw_bcounter0 *)&urule->pcnt); 606 607 /* 608 * Alter opcodes: 609 * 1) convert tablearg value from 0 to 65335 610 * 2) Remove highest bit from O_SETFIB/O_SETDSCP values. 611 * 3) convert table number in iface opcodes to int 612 */ 613 l = urule->cmd_len; 614 cmd = urule->cmd; 615 cmdlen = 0; 616 617 for ( ; l > 0 ; l -= cmdlen, cmd += cmdlen) { 618 cmdlen = F_LEN(cmd); 619 620 switch (cmd->opcode) { 621 /* Opcodes supporting tablearg */ 622 case O_TAG: 623 case O_TAGGED: 624 case O_PIPE: 625 case O_QUEUE: 626 case O_DIVERT: 627 case O_TEE: 628 case O_SKIPTO: 629 case O_CALLRETURN: 630 case O_NETGRAPH: 631 case O_NGTEE: 632 case O_NAT: 633 if (cmd->arg1 == IP_FW_TARG) 634 cmd->arg1 = 65535; 635 break; 636 case O_SETFIB: 637 case O_SETDSCP: 638 if (cmd->arg1 == IP_FW_TARG) 639 cmd->arg1 = 65535; 640 else 641 cmd->arg1 &= ~0x8000; 642 break; 643 case O_LIMIT: 644 lcmd = (ipfw_insn_limit *)cmd; 645 if (lcmd->conn_limit == IP_FW_TARG) 646 lcmd->conn_limit = 65535; 647 break; 648 /* Interface tables */ 649 case O_XMIT: 650 case O_RECV: 651 case O_VIA: 652 /* Interface table, possibly */ 653 cmdif = (ipfw_insn_if *)cmd; 654 if (cmdif->name[0] != '\1') 655 break; 656 657 cmdif->p.glob = cmdif->p.kidx; 658 break; 659 } 660 } 661 } 662 663 /* 664 * Add new rule(s) to the list possibly creating rule number for each. 665 * Update the rule_number in the input struct so the caller knows it as well. 666 * Must be called without IPFW_UH held 667 */ 668 static int 669 commit_rules(struct ip_fw_chain *chain, struct rule_check_info *rci, int count) 670 { 671 int error, i, insert_before, tcount; 672 uint16_t rulenum, *pnum; 673 struct rule_check_info *ci; 674 struct ip_fw *krule; 675 struct ip_fw **map; /* the new array of pointers */ 676 677 /* Check if we need to do table/obj index remap */ 678 tcount = 0; 679 for (ci = rci, i = 0; i < count; ci++, i++) { 680 if (ci->object_opcodes == 0) 681 continue; 682 683 /* 684 * Rule has some object opcodes. 685 * We need to find (and create non-existing) 686 * kernel objects, and reference existing ones. 687 */ 688 error = rewrite_rule_uidx(chain, ci); 689 if (error != 0) { 690 691 /* 692 * rewrite failed, state for current rule 693 * has been reverted. Check if we need to 694 * revert more. 695 */ 696 if (tcount > 0) { 697 698 /* 699 * We have some more table rules 700 * we need to rollback. 701 */ 702 703 IPFW_UH_WLOCK(chain); 704 while (ci != rci) { 705 ci--; 706 if (ci->object_opcodes == 0) 707 continue; 708 unref_rule_objects(chain,ci->krule); 709 710 } 711 IPFW_UH_WUNLOCK(chain); 712 713 } 714 715 return (error); 716 } 717 718 tcount++; 719 } 720 721 /* get_map returns with IPFW_UH_WLOCK if successful */ 722 map = get_map(chain, count, 0 /* not locked */); 723 if (map == NULL) { 724 if (tcount > 0) { 725 /* Unbind tables */ 726 IPFW_UH_WLOCK(chain); 727 for (ci = rci, i = 0; i < count; ci++, i++) { 728 if (ci->object_opcodes == 0) 729 continue; 730 731 unref_rule_objects(chain, ci->krule); 732 } 733 IPFW_UH_WUNLOCK(chain); 734 } 735 736 return (ENOSPC); 737 } 738 739 if (V_autoinc_step < 1) 740 V_autoinc_step = 1; 741 else if (V_autoinc_step > 1000) 742 V_autoinc_step = 1000; 743 744 /* FIXME: Handle count > 1 */ 745 ci = rci; 746 krule = ci->krule; 747 rulenum = krule->rulenum; 748 749 /* find the insertion point, we will insert before */ 750 insert_before = rulenum ? rulenum + 1 : IPFW_DEFAULT_RULE; 751 i = ipfw_find_rule(chain, insert_before, 0); 752 /* duplicate first part */ 753 if (i > 0) 754 bcopy(chain->map, map, i * sizeof(struct ip_fw *)); 755 map[i] = krule; 756 /* duplicate remaining part, we always have the default rule */ 757 bcopy(chain->map + i, map + i + 1, 758 sizeof(struct ip_fw *) *(chain->n_rules - i)); 759 if (rulenum == 0) { 760 /* Compute rule number and write it back */ 761 rulenum = i > 0 ? map[i-1]->rulenum : 0; 762 if (rulenum < IPFW_DEFAULT_RULE - V_autoinc_step) 763 rulenum += V_autoinc_step; 764 krule->rulenum = rulenum; 765 /* Save number to userland rule */ 766 pnum = (uint16_t *)((caddr_t)ci->urule + ci->urule_numoff); 767 *pnum = rulenum; 768 } 769 770 krule->id = chain->id + 1; 771 update_skipto_cache(chain, map); 772 map = swap_map(chain, map, chain->n_rules + 1); 773 chain->static_len += RULEUSIZE0(krule); 774 IPFW_UH_WUNLOCK(chain); 775 if (map) 776 free(map, M_IPFW); 777 return (0); 778 } 779 780 /* 781 * Adds @rule to the list of rules to reap 782 */ 783 void 784 ipfw_reap_add(struct ip_fw_chain *chain, struct ip_fw **head, 785 struct ip_fw *rule) 786 { 787 788 IPFW_UH_WLOCK_ASSERT(chain); 789 790 /* Unlink rule from everywhere */ 791 unref_rule_objects(chain, rule); 792 793 *((struct ip_fw **)rule) = *head; 794 *head = rule; 795 } 796 797 /* 798 * Reclaim storage associated with a list of rules. This is 799 * typically the list created using remove_rule. 800 * A NULL pointer on input is handled correctly. 801 */ 802 void 803 ipfw_reap_rules(struct ip_fw *head) 804 { 805 struct ip_fw *rule; 806 807 while ((rule = head) != NULL) { 808 head = *((struct ip_fw **)head); 809 free_rule(rule); 810 } 811 } 812 813 /* 814 * Rules to keep are 815 * (default || reserved || !match_set || !match_number) 816 * where 817 * default ::= (rule->rulenum == IPFW_DEFAULT_RULE) 818 * // the default rule is always protected 819 * 820 * reserved ::= (cmd == 0 && n == 0 && rule->set == RESVD_SET) 821 * // RESVD_SET is protected only if cmd == 0 and n == 0 ("ipfw flush") 822 * 823 * match_set ::= (cmd == 0 || rule->set == set) 824 * // set number is ignored for cmd == 0 825 * 826 * match_number ::= (cmd == 1 || n == 0 || n == rule->rulenum) 827 * // number is ignored for cmd == 1 or n == 0 828 * 829 */ 830 int 831 ipfw_match_range(struct ip_fw *rule, ipfw_range_tlv *rt) 832 { 833 834 /* Don't match default rule for modification queries */ 835 if (rule->rulenum == IPFW_DEFAULT_RULE && 836 (rt->flags & IPFW_RCFLAG_DEFAULT) == 0) 837 return (0); 838 839 /* Don't match rules in reserved set for flush requests */ 840 if ((rt->flags & IPFW_RCFLAG_ALL) != 0 && rule->set == RESVD_SET) 841 return (0); 842 843 /* If we're filtering by set, don't match other sets */ 844 if ((rt->flags & IPFW_RCFLAG_SET) != 0 && rule->set != rt->set) 845 return (0); 846 847 if ((rt->flags & IPFW_RCFLAG_RANGE) != 0 && 848 (rule->rulenum < rt->start_rule || rule->rulenum > rt->end_rule)) 849 return (0); 850 851 return (1); 852 } 853 854 /* 855 * Delete rules matching range @rt. 856 * Saves number of deleted rules in @ndel. 857 * 858 * Returns 0 on success. 859 */ 860 static int 861 delete_range(struct ip_fw_chain *chain, ipfw_range_tlv *rt, int *ndel) 862 { 863 struct ip_fw *reap, *rule, **map; 864 int end, start; 865 int i, n, ndyn, ofs; 866 867 reap = NULL; 868 IPFW_UH_WLOCK(chain); /* arbitrate writers */ 869 870 /* 871 * Stage 1: Determine range to inspect. 872 * Range is half-inclusive, e.g [start, end). 873 */ 874 start = 0; 875 end = chain->n_rules - 1; 876 877 if ((rt->flags & IPFW_RCFLAG_RANGE) != 0) { 878 start = ipfw_find_rule(chain, rt->start_rule, 0); 879 880 end = ipfw_find_rule(chain, rt->end_rule, 0); 881 if (rt->end_rule != IPFW_DEFAULT_RULE) 882 while (chain->map[end]->rulenum == rt->end_rule) 883 end++; 884 } 885 886 /* Allocate new map of the same size */ 887 map = get_map(chain, 0, 1 /* locked */); 888 if (map == NULL) { 889 IPFW_UH_WUNLOCK(chain); 890 return (ENOMEM); 891 } 892 893 n = 0; 894 ndyn = 0; 895 ofs = start; 896 /* 1. bcopy the initial part of the map */ 897 if (start > 0) 898 bcopy(chain->map, map, start * sizeof(struct ip_fw *)); 899 /* 2. copy active rules between start and end */ 900 for (i = start; i < end; i++) { 901 rule = chain->map[i]; 902 if (ipfw_match_range(rule, rt) == 0) { 903 map[ofs++] = rule; 904 continue; 905 } 906 907 n++; 908 if (ipfw_is_dyn_rule(rule) != 0) 909 ndyn++; 910 } 911 /* 3. copy the final part of the map */ 912 bcopy(chain->map + end, map + ofs, 913 (chain->n_rules - end) * sizeof(struct ip_fw *)); 914 /* 4. recalculate skipto cache */ 915 update_skipto_cache(chain, map); 916 /* 5. swap the maps (under UH_WLOCK + WHLOCK) */ 917 map = swap_map(chain, map, chain->n_rules - n); 918 /* 6. Remove all dynamic states originated by deleted rules */ 919 if (ndyn > 0) 920 ipfw_expire_dyn_rules(chain, rt); 921 /* 7. now remove the rules deleted from the old map */ 922 for (i = start; i < end; i++) { 923 rule = map[i]; 924 if (ipfw_match_range(rule, rt) == 0) 925 continue; 926 chain->static_len -= RULEUSIZE0(rule); 927 ipfw_reap_add(chain, &reap, rule); 928 } 929 IPFW_UH_WUNLOCK(chain); 930 931 ipfw_reap_rules(reap); 932 if (map != NULL) 933 free(map, M_IPFW); 934 *ndel = n; 935 return (0); 936 } 937 938 /* 939 * Changes set of given rule rannge @rt 940 * with each other. 941 * 942 * Returns 0 on success. 943 */ 944 static int 945 move_range(struct ip_fw_chain *chain, ipfw_range_tlv *rt) 946 { 947 struct ip_fw *rule; 948 int i; 949 950 IPFW_UH_WLOCK(chain); 951 952 /* 953 * Move rules with matching paramenerts to a new set. 954 * This one is much more complex. We have to ensure 955 * that all referenced tables (if any) are referenced 956 * by given rule subset only. Otherwise, we can't move 957 * them to new set and have to return error. 958 */ 959 if (V_fw_tables_sets != 0) { 960 if (ipfw_move_tables_sets(chain, rt, rt->new_set) != 0) { 961 IPFW_UH_WUNLOCK(chain); 962 return (EBUSY); 963 } 964 } 965 966 /* XXX: We have to do swap holding WLOCK */ 967 for (i = 0; i < chain->n_rules; i++) { 968 rule = chain->map[i]; 969 if (ipfw_match_range(rule, rt) == 0) 970 continue; 971 rule->set = rt->new_set; 972 } 973 974 IPFW_UH_WUNLOCK(chain); 975 976 return (0); 977 } 978 979 /* 980 * Clear counters for a specific rule. 981 * Normally run under IPFW_UH_RLOCK, but these are idempotent ops 982 * so we only care that rules do not disappear. 983 */ 984 static void 985 clear_counters(struct ip_fw *rule, int log_only) 986 { 987 ipfw_insn_log *l = (ipfw_insn_log *)ACTION_PTR(rule); 988 989 if (log_only == 0) 990 IPFW_ZERO_RULE_COUNTER(rule); 991 if (l->o.opcode == O_LOG) 992 l->log_left = l->max_log; 993 } 994 995 /* 996 * Flushes rules counters and/or log values on matching range. 997 * 998 * Returns number of items cleared. 999 */ 1000 static int 1001 clear_range(struct ip_fw_chain *chain, ipfw_range_tlv *rt, int log_only) 1002 { 1003 struct ip_fw *rule; 1004 int num; 1005 int i; 1006 1007 num = 0; 1008 rt->flags |= IPFW_RCFLAG_DEFAULT; 1009 1010 IPFW_UH_WLOCK(chain); /* arbitrate writers */ 1011 for (i = 0; i < chain->n_rules; i++) { 1012 rule = chain->map[i]; 1013 if (ipfw_match_range(rule, rt) == 0) 1014 continue; 1015 clear_counters(rule, log_only); 1016 num++; 1017 } 1018 IPFW_UH_WUNLOCK(chain); 1019 1020 return (num); 1021 } 1022 1023 static int 1024 check_range_tlv(ipfw_range_tlv *rt) 1025 { 1026 1027 if (rt->head.length != sizeof(*rt)) 1028 return (1); 1029 if (rt->start_rule > rt->end_rule) 1030 return (1); 1031 if (rt->set >= IPFW_MAX_SETS || rt->new_set >= IPFW_MAX_SETS) 1032 return (1); 1033 1034 if ((rt->flags & IPFW_RCFLAG_USER) != rt->flags) 1035 return (1); 1036 1037 return (0); 1038 } 1039 1040 /* 1041 * Delete rules matching specified parameters 1042 * Data layout (v0)(current): 1043 * Request: [ ipfw_obj_header ipfw_range_tlv ] 1044 * Reply: [ ipfw_obj_header ipfw_range_tlv ] 1045 * 1046 * Saves number of deleted rules in ipfw_range_tlv->new_set. 1047 * 1048 * Returns 0 on success. 1049 */ 1050 static int 1051 del_rules(struct ip_fw_chain *chain, ip_fw3_opheader *op3, 1052 struct sockopt_data *sd) 1053 { 1054 ipfw_range_header *rh; 1055 int error, ndel; 1056 1057 if (sd->valsize != sizeof(*rh)) 1058 return (EINVAL); 1059 1060 rh = (ipfw_range_header *)ipfw_get_sopt_space(sd, sd->valsize); 1061 1062 if (check_range_tlv(&rh->range) != 0) 1063 return (EINVAL); 1064 1065 ndel = 0; 1066 if ((error = delete_range(chain, &rh->range, &ndel)) != 0) 1067 return (error); 1068 1069 /* Save number of rules deleted */ 1070 rh->range.new_set = ndel; 1071 return (0); 1072 } 1073 1074 /* 1075 * Move rules/sets matching specified parameters 1076 * Data layout (v0)(current): 1077 * Request: [ ipfw_obj_header ipfw_range_tlv ] 1078 * 1079 * Returns 0 on success. 1080 */ 1081 static int 1082 move_rules(struct ip_fw_chain *chain, ip_fw3_opheader *op3, 1083 struct sockopt_data *sd) 1084 { 1085 ipfw_range_header *rh; 1086 1087 if (sd->valsize != sizeof(*rh)) 1088 return (EINVAL); 1089 1090 rh = (ipfw_range_header *)ipfw_get_sopt_space(sd, sd->valsize); 1091 1092 if (check_range_tlv(&rh->range) != 0) 1093 return (EINVAL); 1094 1095 return (move_range(chain, &rh->range)); 1096 } 1097 1098 /* 1099 * Clear rule accounting data matching specified parameters 1100 * Data layout (v0)(current): 1101 * Request: [ ipfw_obj_header ipfw_range_tlv ] 1102 * Reply: [ ipfw_obj_header ipfw_range_tlv ] 1103 * 1104 * Saves number of cleared rules in ipfw_range_tlv->new_set. 1105 * 1106 * Returns 0 on success. 1107 */ 1108 static int 1109 clear_rules(struct ip_fw_chain *chain, ip_fw3_opheader *op3, 1110 struct sockopt_data *sd) 1111 { 1112 ipfw_range_header *rh; 1113 int log_only, num; 1114 char *msg; 1115 1116 if (sd->valsize != sizeof(*rh)) 1117 return (EINVAL); 1118 1119 rh = (ipfw_range_header *)ipfw_get_sopt_space(sd, sd->valsize); 1120 1121 if (check_range_tlv(&rh->range) != 0) 1122 return (EINVAL); 1123 1124 log_only = (op3->opcode == IP_FW_XRESETLOG); 1125 1126 num = clear_range(chain, &rh->range, log_only); 1127 1128 if (rh->range.flags & IPFW_RCFLAG_ALL) 1129 msg = log_only ? "All logging counts reset" : 1130 "Accounting cleared"; 1131 else 1132 msg = log_only ? "logging count reset" : "cleared"; 1133 1134 if (V_fw_verbose) { 1135 int lev = LOG_SECURITY | LOG_NOTICE; 1136 log(lev, "ipfw: %s.\n", msg); 1137 } 1138 1139 /* Save number of rules cleared */ 1140 rh->range.new_set = num; 1141 return (0); 1142 } 1143 1144 static void 1145 enable_sets(struct ip_fw_chain *chain, ipfw_range_tlv *rt) 1146 { 1147 uint32_t v_set; 1148 1149 IPFW_UH_WLOCK_ASSERT(chain); 1150 1151 /* Change enabled/disabled sets mask */ 1152 v_set = (V_set_disable | rt->set) & ~rt->new_set; 1153 v_set &= ~(1 << RESVD_SET); /* set RESVD_SET always enabled */ 1154 IPFW_WLOCK(chain); 1155 V_set_disable = v_set; 1156 IPFW_WUNLOCK(chain); 1157 } 1158 1159 static void 1160 swap_sets(struct ip_fw_chain *chain, ipfw_range_tlv *rt, int mv) 1161 { 1162 struct ip_fw *rule; 1163 int i; 1164 1165 IPFW_UH_WLOCK_ASSERT(chain); 1166 1167 /* Swap or move two sets */ 1168 for (i = 0; i < chain->n_rules - 1; i++) { 1169 rule = chain->map[i]; 1170 if (rule->set == rt->set) 1171 rule->set = rt->new_set; 1172 else if (rule->set == rt->new_set && mv == 0) 1173 rule->set = rt->set; 1174 } 1175 if (V_fw_tables_sets != 0) 1176 ipfw_swap_tables_sets(chain, rt->set, rt->new_set, mv); 1177 } 1178 1179 /* 1180 * Swaps or moves set 1181 * Data layout (v0)(current): 1182 * Request: [ ipfw_obj_header ipfw_range_tlv ] 1183 * 1184 * Returns 0 on success. 1185 */ 1186 static int 1187 manage_sets(struct ip_fw_chain *chain, ip_fw3_opheader *op3, 1188 struct sockopt_data *sd) 1189 { 1190 ipfw_range_header *rh; 1191 1192 if (sd->valsize != sizeof(*rh)) 1193 return (EINVAL); 1194 1195 rh = (ipfw_range_header *)ipfw_get_sopt_space(sd, sd->valsize); 1196 1197 if (rh->range.head.length != sizeof(ipfw_range_tlv)) 1198 return (1); 1199 1200 IPFW_UH_WLOCK(chain); 1201 switch (op3->opcode) { 1202 case IP_FW_SET_SWAP: 1203 case IP_FW_SET_MOVE: 1204 swap_sets(chain, &rh->range, op3->opcode == IP_FW_SET_MOVE); 1205 break; 1206 case IP_FW_SET_ENABLE: 1207 enable_sets(chain, &rh->range); 1208 break; 1209 } 1210 IPFW_UH_WUNLOCK(chain); 1211 1212 return (0); 1213 } 1214 1215 /** 1216 * Remove all rules with given number, or do set manipulation. 1217 * Assumes chain != NULL && *chain != NULL. 1218 * 1219 * The argument is an uint32_t. The low 16 bit are the rule or set number; 1220 * the next 8 bits are the new set; the top 8 bits indicate the command: 1221 * 1222 * 0 delete rules numbered "rulenum" 1223 * 1 delete rules in set "rulenum" 1224 * 2 move rules "rulenum" to set "new_set" 1225 * 3 move rules from set "rulenum" to set "new_set" 1226 * 4 swap sets "rulenum" and "new_set" 1227 * 5 delete rules "rulenum" and set "new_set" 1228 */ 1229 static int 1230 del_entry(struct ip_fw_chain *chain, uint32_t arg) 1231 { 1232 uint32_t num; /* rule number or old_set */ 1233 uint8_t cmd, new_set; 1234 int do_del, ndel; 1235 int error = 0; 1236 ipfw_range_tlv rt; 1237 1238 num = arg & 0xffff; 1239 cmd = (arg >> 24) & 0xff; 1240 new_set = (arg >> 16) & 0xff; 1241 1242 if (cmd > 5 || new_set > RESVD_SET) 1243 return EINVAL; 1244 if (cmd == 0 || cmd == 2 || cmd == 5) { 1245 if (num >= IPFW_DEFAULT_RULE) 1246 return EINVAL; 1247 } else { 1248 if (num > RESVD_SET) /* old_set */ 1249 return EINVAL; 1250 } 1251 1252 /* Convert old requests into new representation */ 1253 memset(&rt, 0, sizeof(rt)); 1254 rt.start_rule = num; 1255 rt.end_rule = num; 1256 rt.set = num; 1257 rt.new_set = new_set; 1258 do_del = 0; 1259 1260 switch (cmd) { 1261 case 0: /* delete rules numbered "rulenum" */ 1262 if (num == 0) 1263 rt.flags |= IPFW_RCFLAG_ALL; 1264 else 1265 rt.flags |= IPFW_RCFLAG_RANGE; 1266 do_del = 1; 1267 break; 1268 case 1: /* delete rules in set "rulenum" */ 1269 rt.flags |= IPFW_RCFLAG_SET; 1270 do_del = 1; 1271 break; 1272 case 5: /* delete rules "rulenum" and set "new_set" */ 1273 rt.flags |= IPFW_RCFLAG_RANGE | IPFW_RCFLAG_SET; 1274 rt.set = new_set; 1275 rt.new_set = 0; 1276 do_del = 1; 1277 break; 1278 case 2: /* move rules "rulenum" to set "new_set" */ 1279 rt.flags |= IPFW_RCFLAG_RANGE; 1280 break; 1281 case 3: /* move rules from set "rulenum" to set "new_set" */ 1282 IPFW_UH_WLOCK(chain); 1283 swap_sets(chain, &rt, 1); 1284 IPFW_UH_WUNLOCK(chain); 1285 return (0); 1286 case 4: /* swap sets "rulenum" and "new_set" */ 1287 IPFW_UH_WLOCK(chain); 1288 swap_sets(chain, &rt, 0); 1289 IPFW_UH_WUNLOCK(chain); 1290 return (0); 1291 default: 1292 return (ENOTSUP); 1293 } 1294 1295 if (do_del != 0) { 1296 if ((error = delete_range(chain, &rt, &ndel)) != 0) 1297 return (error); 1298 1299 if (ndel == 0 && (cmd != 1 && num != 0)) 1300 return (EINVAL); 1301 1302 return (0); 1303 } 1304 1305 return (move_range(chain, &rt)); 1306 } 1307 1308 /** 1309 * Reset some or all counters on firewall rules. 1310 * The argument `arg' is an u_int32_t. The low 16 bit are the rule number, 1311 * the next 8 bits are the set number, the top 8 bits are the command: 1312 * 0 work with rules from all set's; 1313 * 1 work with rules only from specified set. 1314 * Specified rule number is zero if we want to clear all entries. 1315 * log_only is 1 if we only want to reset logs, zero otherwise. 1316 */ 1317 static int 1318 zero_entry(struct ip_fw_chain *chain, u_int32_t arg, int log_only) 1319 { 1320 struct ip_fw *rule; 1321 char *msg; 1322 int i; 1323 1324 uint16_t rulenum = arg & 0xffff; 1325 uint8_t set = (arg >> 16) & 0xff; 1326 uint8_t cmd = (arg >> 24) & 0xff; 1327 1328 if (cmd > 1) 1329 return (EINVAL); 1330 if (cmd == 1 && set > RESVD_SET) 1331 return (EINVAL); 1332 1333 IPFW_UH_RLOCK(chain); 1334 if (rulenum == 0) { 1335 V_norule_counter = 0; 1336 for (i = 0; i < chain->n_rules; i++) { 1337 rule = chain->map[i]; 1338 /* Skip rules not in our set. */ 1339 if (cmd == 1 && rule->set != set) 1340 continue; 1341 clear_counters(rule, log_only); 1342 } 1343 msg = log_only ? "All logging counts reset" : 1344 "Accounting cleared"; 1345 } else { 1346 int cleared = 0; 1347 for (i = 0; i < chain->n_rules; i++) { 1348 rule = chain->map[i]; 1349 if (rule->rulenum == rulenum) { 1350 if (cmd == 0 || rule->set == set) 1351 clear_counters(rule, log_only); 1352 cleared = 1; 1353 } 1354 if (rule->rulenum > rulenum) 1355 break; 1356 } 1357 if (!cleared) { /* we did not find any matching rules */ 1358 IPFW_UH_RUNLOCK(chain); 1359 return (EINVAL); 1360 } 1361 msg = log_only ? "logging count reset" : "cleared"; 1362 } 1363 IPFW_UH_RUNLOCK(chain); 1364 1365 if (V_fw_verbose) { 1366 int lev = LOG_SECURITY | LOG_NOTICE; 1367 1368 if (rulenum) 1369 log(lev, "ipfw: Entry %d %s.\n", rulenum, msg); 1370 else 1371 log(lev, "ipfw: %s.\n", msg); 1372 } 1373 return (0); 1374 } 1375 1376 1377 /* 1378 * Check rule head in FreeBSD11 format 1379 * 1380 */ 1381 static int 1382 check_ipfw_rule1(struct ip_fw_rule *rule, int size, 1383 struct rule_check_info *ci) 1384 { 1385 int l; 1386 1387 if (size < sizeof(*rule)) { 1388 printf("ipfw: rule too short\n"); 1389 return (EINVAL); 1390 } 1391 1392 /* Check for valid cmd_len */ 1393 l = roundup2(RULESIZE(rule), sizeof(uint64_t)); 1394 if (l != size) { 1395 printf("ipfw: size mismatch (have %d want %d)\n", size, l); 1396 return (EINVAL); 1397 } 1398 if (rule->act_ofs >= rule->cmd_len) { 1399 printf("ipfw: bogus action offset (%u > %u)\n", 1400 rule->act_ofs, rule->cmd_len - 1); 1401 return (EINVAL); 1402 } 1403 1404 if (rule->rulenum > IPFW_DEFAULT_RULE - 1) 1405 return (EINVAL); 1406 1407 return (check_ipfw_rule_body(rule->cmd, rule->cmd_len, ci)); 1408 } 1409 1410 /* 1411 * Check rule head in FreeBSD8 format 1412 * 1413 */ 1414 static int 1415 check_ipfw_rule0(struct ip_fw_rule0 *rule, int size, 1416 struct rule_check_info *ci) 1417 { 1418 int l; 1419 1420 if (size < sizeof(*rule)) { 1421 printf("ipfw: rule too short\n"); 1422 return (EINVAL); 1423 } 1424 1425 /* Check for valid cmd_len */ 1426 l = sizeof(*rule) + rule->cmd_len * 4 - 4; 1427 if (l != size) { 1428 printf("ipfw: size mismatch (have %d want %d)\n", size, l); 1429 return (EINVAL); 1430 } 1431 if (rule->act_ofs >= rule->cmd_len) { 1432 printf("ipfw: bogus action offset (%u > %u)\n", 1433 rule->act_ofs, rule->cmd_len - 1); 1434 return (EINVAL); 1435 } 1436 1437 if (rule->rulenum > IPFW_DEFAULT_RULE - 1) 1438 return (EINVAL); 1439 1440 return (check_ipfw_rule_body(rule->cmd, rule->cmd_len, ci)); 1441 } 1442 1443 static int 1444 check_ipfw_rule_body(ipfw_insn *cmd, int cmd_len, struct rule_check_info *ci) 1445 { 1446 int cmdlen, l; 1447 int have_action; 1448 1449 have_action = 0; 1450 1451 /* 1452 * Now go for the individual checks. Very simple ones, basically only 1453 * instruction sizes. 1454 */ 1455 for (l = cmd_len; l > 0 ; l -= cmdlen, cmd += cmdlen) { 1456 cmdlen = F_LEN(cmd); 1457 if (cmdlen > l) { 1458 printf("ipfw: opcode %d size truncated\n", 1459 cmd->opcode); 1460 return EINVAL; 1461 } 1462 switch (cmd->opcode) { 1463 case O_PROBE_STATE: 1464 case O_KEEP_STATE: 1465 case O_PROTO: 1466 case O_IP_SRC_ME: 1467 case O_IP_DST_ME: 1468 case O_LAYER2: 1469 case O_IN: 1470 case O_FRAG: 1471 case O_DIVERTED: 1472 case O_IPOPT: 1473 case O_IPTOS: 1474 case O_IPPRECEDENCE: 1475 case O_IPVER: 1476 case O_SOCKARG: 1477 case O_TCPFLAGS: 1478 case O_TCPOPTS: 1479 case O_ESTAB: 1480 case O_VERREVPATH: 1481 case O_VERSRCREACH: 1482 case O_ANTISPOOF: 1483 case O_IPSEC: 1484 #ifdef INET6 1485 case O_IP6_SRC_ME: 1486 case O_IP6_DST_ME: 1487 case O_EXT_HDR: 1488 case O_IP6: 1489 #endif 1490 case O_IP4: 1491 case O_TAG: 1492 if (cmdlen != F_INSN_SIZE(ipfw_insn)) 1493 goto bad_size; 1494 break; 1495 1496 case O_EXTERNAL_ACTION: 1497 if (cmd->arg1 == 0 || 1498 cmdlen != F_INSN_SIZE(ipfw_insn)) { 1499 printf("ipfw: invalid external " 1500 "action opcode\n"); 1501 return (EINVAL); 1502 } 1503 ci->object_opcodes++; 1504 /* Do we have O_EXTERNAL_INSTANCE opcode? */ 1505 if (l != cmdlen) { 1506 l -= cmdlen; 1507 cmd += cmdlen; 1508 cmdlen = F_LEN(cmd); 1509 if (cmd->opcode != O_EXTERNAL_INSTANCE) { 1510 printf("ipfw: invalid opcode " 1511 "next to external action %u\n", 1512 cmd->opcode); 1513 return (EINVAL); 1514 } 1515 if (cmd->arg1 == 0 || 1516 cmdlen != F_INSN_SIZE(ipfw_insn)) { 1517 printf("ipfw: invalid external " 1518 "action instance opcode\n"); 1519 return (EINVAL); 1520 } 1521 ci->object_opcodes++; 1522 } 1523 goto check_action; 1524 1525 case O_FIB: 1526 if (cmdlen != F_INSN_SIZE(ipfw_insn)) 1527 goto bad_size; 1528 if (cmd->arg1 >= rt_numfibs) { 1529 printf("ipfw: invalid fib number %d\n", 1530 cmd->arg1); 1531 return EINVAL; 1532 } 1533 break; 1534 1535 case O_SETFIB: 1536 if (cmdlen != F_INSN_SIZE(ipfw_insn)) 1537 goto bad_size; 1538 if ((cmd->arg1 != IP_FW_TARG) && 1539 ((cmd->arg1 & 0x7FFF) >= rt_numfibs)) { 1540 printf("ipfw: invalid fib number %d\n", 1541 cmd->arg1 & 0x7FFF); 1542 return EINVAL; 1543 } 1544 goto check_action; 1545 1546 case O_UID: 1547 case O_GID: 1548 case O_JAIL: 1549 case O_IP_SRC: 1550 case O_IP_DST: 1551 case O_TCPSEQ: 1552 case O_TCPACK: 1553 case O_PROB: 1554 case O_ICMPTYPE: 1555 if (cmdlen != F_INSN_SIZE(ipfw_insn_u32)) 1556 goto bad_size; 1557 break; 1558 1559 case O_LIMIT: 1560 if (cmdlen != F_INSN_SIZE(ipfw_insn_limit)) 1561 goto bad_size; 1562 break; 1563 1564 case O_LOG: 1565 if (cmdlen != F_INSN_SIZE(ipfw_insn_log)) 1566 goto bad_size; 1567 1568 ((ipfw_insn_log *)cmd)->log_left = 1569 ((ipfw_insn_log *)cmd)->max_log; 1570 1571 break; 1572 1573 case O_IP_SRC_MASK: 1574 case O_IP_DST_MASK: 1575 /* only odd command lengths */ 1576 if ((cmdlen & 1) == 0) 1577 goto bad_size; 1578 break; 1579 1580 case O_IP_SRC_SET: 1581 case O_IP_DST_SET: 1582 if (cmd->arg1 == 0 || cmd->arg1 > 256) { 1583 printf("ipfw: invalid set size %d\n", 1584 cmd->arg1); 1585 return EINVAL; 1586 } 1587 if (cmdlen != F_INSN_SIZE(ipfw_insn_u32) + 1588 (cmd->arg1+31)/32 ) 1589 goto bad_size; 1590 break; 1591 1592 case O_IP_SRC_LOOKUP: 1593 case O_IP_DST_LOOKUP: 1594 if (cmd->arg1 >= V_fw_tables_max) { 1595 printf("ipfw: invalid table number %d\n", 1596 cmd->arg1); 1597 return (EINVAL); 1598 } 1599 if (cmdlen != F_INSN_SIZE(ipfw_insn) && 1600 cmdlen != F_INSN_SIZE(ipfw_insn_u32) + 1 && 1601 cmdlen != F_INSN_SIZE(ipfw_insn_u32)) 1602 goto bad_size; 1603 ci->object_opcodes++; 1604 break; 1605 case O_IP_FLOW_LOOKUP: 1606 if (cmd->arg1 >= V_fw_tables_max) { 1607 printf("ipfw: invalid table number %d\n", 1608 cmd->arg1); 1609 return (EINVAL); 1610 } 1611 if (cmdlen != F_INSN_SIZE(ipfw_insn) && 1612 cmdlen != F_INSN_SIZE(ipfw_insn_u32)) 1613 goto bad_size; 1614 ci->object_opcodes++; 1615 break; 1616 case O_MACADDR2: 1617 if (cmdlen != F_INSN_SIZE(ipfw_insn_mac)) 1618 goto bad_size; 1619 break; 1620 1621 case O_NOP: 1622 case O_IPID: 1623 case O_IPTTL: 1624 case O_IPLEN: 1625 case O_TCPDATALEN: 1626 case O_TCPWIN: 1627 case O_TAGGED: 1628 if (cmdlen < 1 || cmdlen > 31) 1629 goto bad_size; 1630 break; 1631 1632 case O_DSCP: 1633 if (cmdlen != F_INSN_SIZE(ipfw_insn_u32) + 1) 1634 goto bad_size; 1635 break; 1636 1637 case O_MAC_TYPE: 1638 case O_IP_SRCPORT: 1639 case O_IP_DSTPORT: /* XXX artificial limit, 30 port pairs */ 1640 if (cmdlen < 2 || cmdlen > 31) 1641 goto bad_size; 1642 break; 1643 1644 case O_RECV: 1645 case O_XMIT: 1646 case O_VIA: 1647 if (cmdlen != F_INSN_SIZE(ipfw_insn_if)) 1648 goto bad_size; 1649 ci->object_opcodes++; 1650 break; 1651 1652 case O_ALTQ: 1653 if (cmdlen != F_INSN_SIZE(ipfw_insn_altq)) 1654 goto bad_size; 1655 break; 1656 1657 case O_PIPE: 1658 case O_QUEUE: 1659 if (cmdlen != F_INSN_SIZE(ipfw_insn)) 1660 goto bad_size; 1661 goto check_action; 1662 1663 case O_FORWARD_IP: 1664 if (cmdlen != F_INSN_SIZE(ipfw_insn_sa)) 1665 goto bad_size; 1666 goto check_action; 1667 #ifdef INET6 1668 case O_FORWARD_IP6: 1669 if (cmdlen != F_INSN_SIZE(ipfw_insn_sa6)) 1670 goto bad_size; 1671 goto check_action; 1672 #endif /* INET6 */ 1673 1674 case O_DIVERT: 1675 case O_TEE: 1676 if (ip_divert_ptr == NULL) 1677 return EINVAL; 1678 else 1679 goto check_size; 1680 case O_NETGRAPH: 1681 case O_NGTEE: 1682 if (ng_ipfw_input_p == NULL) 1683 return EINVAL; 1684 else 1685 goto check_size; 1686 case O_NAT: 1687 if (!IPFW_NAT_LOADED) 1688 return EINVAL; 1689 if (cmdlen != F_INSN_SIZE(ipfw_insn_nat)) 1690 goto bad_size; 1691 goto check_action; 1692 case O_FORWARD_MAC: /* XXX not implemented yet */ 1693 case O_CHECK_STATE: 1694 case O_COUNT: 1695 case O_ACCEPT: 1696 case O_DENY: 1697 case O_REJECT: 1698 case O_SETDSCP: 1699 #ifdef INET6 1700 case O_UNREACH6: 1701 #endif 1702 case O_SKIPTO: 1703 case O_REASS: 1704 case O_CALLRETURN: 1705 check_size: 1706 if (cmdlen != F_INSN_SIZE(ipfw_insn)) 1707 goto bad_size; 1708 check_action: 1709 if (have_action) { 1710 printf("ipfw: opcode %d, multiple actions" 1711 " not allowed\n", 1712 cmd->opcode); 1713 return (EINVAL); 1714 } 1715 have_action = 1; 1716 if (l != cmdlen) { 1717 printf("ipfw: opcode %d, action must be" 1718 " last opcode\n", 1719 cmd->opcode); 1720 return (EINVAL); 1721 } 1722 break; 1723 #ifdef INET6 1724 case O_IP6_SRC: 1725 case O_IP6_DST: 1726 if (cmdlen != F_INSN_SIZE(struct in6_addr) + 1727 F_INSN_SIZE(ipfw_insn)) 1728 goto bad_size; 1729 break; 1730 1731 case O_FLOW6ID: 1732 if (cmdlen != F_INSN_SIZE(ipfw_insn_u32) + 1733 ((ipfw_insn_u32 *)cmd)->o.arg1) 1734 goto bad_size; 1735 break; 1736 1737 case O_IP6_SRC_MASK: 1738 case O_IP6_DST_MASK: 1739 if ( !(cmdlen & 1) || cmdlen > 127) 1740 goto bad_size; 1741 break; 1742 case O_ICMP6TYPE: 1743 if( cmdlen != F_INSN_SIZE( ipfw_insn_icmp6 ) ) 1744 goto bad_size; 1745 break; 1746 #endif 1747 1748 default: 1749 switch (cmd->opcode) { 1750 #ifndef INET6 1751 case O_IP6_SRC_ME: 1752 case O_IP6_DST_ME: 1753 case O_EXT_HDR: 1754 case O_IP6: 1755 case O_UNREACH6: 1756 case O_IP6_SRC: 1757 case O_IP6_DST: 1758 case O_FLOW6ID: 1759 case O_IP6_SRC_MASK: 1760 case O_IP6_DST_MASK: 1761 case O_ICMP6TYPE: 1762 printf("ipfw: no IPv6 support in kernel\n"); 1763 return (EPROTONOSUPPORT); 1764 #endif 1765 default: 1766 printf("ipfw: opcode %d, unknown opcode\n", 1767 cmd->opcode); 1768 return (EINVAL); 1769 } 1770 } 1771 } 1772 if (have_action == 0) { 1773 printf("ipfw: missing action\n"); 1774 return (EINVAL); 1775 } 1776 return 0; 1777 1778 bad_size: 1779 printf("ipfw: opcode %d size %d wrong\n", 1780 cmd->opcode, cmdlen); 1781 return (EINVAL); 1782 } 1783 1784 1785 /* 1786 * Translation of requests for compatibility with FreeBSD 7.2/8. 1787 * a static variable tells us if we have an old client from userland, 1788 * and if necessary we translate requests and responses between the 1789 * two formats. 1790 */ 1791 static int is7 = 0; 1792 1793 struct ip_fw7 { 1794 struct ip_fw7 *next; /* linked list of rules */ 1795 struct ip_fw7 *next_rule; /* ptr to next [skipto] rule */ 1796 /* 'next_rule' is used to pass up 'set_disable' status */ 1797 1798 uint16_t act_ofs; /* offset of action in 32-bit units */ 1799 uint16_t cmd_len; /* # of 32-bit words in cmd */ 1800 uint16_t rulenum; /* rule number */ 1801 uint8_t set; /* rule set (0..31) */ 1802 // #define RESVD_SET 31 /* set for default and persistent rules */ 1803 uint8_t _pad; /* padding */ 1804 // uint32_t id; /* rule id, only in v.8 */ 1805 /* These fields are present in all rules. */ 1806 uint64_t pcnt; /* Packet counter */ 1807 uint64_t bcnt; /* Byte counter */ 1808 uint32_t timestamp; /* tv_sec of last match */ 1809 1810 ipfw_insn cmd[1]; /* storage for commands */ 1811 }; 1812 1813 static int convert_rule_to_7(struct ip_fw_rule0 *rule); 1814 static int convert_rule_to_8(struct ip_fw_rule0 *rule); 1815 1816 #ifndef RULESIZE7 1817 #define RULESIZE7(rule) (sizeof(struct ip_fw7) + \ 1818 ((struct ip_fw7 *)(rule))->cmd_len * 4 - 4) 1819 #endif 1820 1821 1822 /* 1823 * Copy the static and dynamic rules to the supplied buffer 1824 * and return the amount of space actually used. 1825 * Must be run under IPFW_UH_RLOCK 1826 */ 1827 static size_t 1828 ipfw_getrules(struct ip_fw_chain *chain, void *buf, size_t space) 1829 { 1830 char *bp = buf; 1831 char *ep = bp + space; 1832 struct ip_fw *rule; 1833 struct ip_fw_rule0 *dst; 1834 int error, i, l, warnflag; 1835 time_t boot_seconds; 1836 1837 warnflag = 0; 1838 1839 boot_seconds = boottime.tv_sec; 1840 for (i = 0; i < chain->n_rules; i++) { 1841 rule = chain->map[i]; 1842 1843 if (is7) { 1844 /* Convert rule to FreeBSd 7.2 format */ 1845 l = RULESIZE7(rule); 1846 if (bp + l + sizeof(uint32_t) <= ep) { 1847 bcopy(rule, bp, l + sizeof(uint32_t)); 1848 error = set_legacy_obj_kidx(chain, 1849 (struct ip_fw_rule0 *)bp); 1850 if (error != 0) 1851 return (0); 1852 error = convert_rule_to_7((struct ip_fw_rule0 *) bp); 1853 if (error) 1854 return 0; /*XXX correct? */ 1855 /* 1856 * XXX HACK. Store the disable mask in the "next" 1857 * pointer in a wild attempt to keep the ABI the same. 1858 * Why do we do this on EVERY rule? 1859 */ 1860 bcopy(&V_set_disable, 1861 &(((struct ip_fw7 *)bp)->next_rule), 1862 sizeof(V_set_disable)); 1863 if (((struct ip_fw7 *)bp)->timestamp) 1864 ((struct ip_fw7 *)bp)->timestamp += boot_seconds; 1865 bp += l; 1866 } 1867 continue; /* go to next rule */ 1868 } 1869 1870 l = RULEUSIZE0(rule); 1871 if (bp + l > ep) { /* should not happen */ 1872 printf("overflow dumping static rules\n"); 1873 break; 1874 } 1875 dst = (struct ip_fw_rule0 *)bp; 1876 export_rule0(rule, dst, l); 1877 error = set_legacy_obj_kidx(chain, dst); 1878 1879 /* 1880 * XXX HACK. Store the disable mask in the "next" 1881 * pointer in a wild attempt to keep the ABI the same. 1882 * Why do we do this on EVERY rule? 1883 * 1884 * XXX: "ipfw set show" (ab)uses IP_FW_GET to read disabled mask 1885 * so we need to fail _after_ saving at least one mask. 1886 */ 1887 bcopy(&V_set_disable, &dst->next_rule, sizeof(V_set_disable)); 1888 if (dst->timestamp) 1889 dst->timestamp += boot_seconds; 1890 bp += l; 1891 1892 if (error != 0) { 1893 if (error == 2) { 1894 /* Non-fatal table rewrite error. */ 1895 warnflag = 1; 1896 continue; 1897 } 1898 printf("Stop on rule %d. Fail to convert table\n", 1899 rule->rulenum); 1900 break; 1901 } 1902 } 1903 if (warnflag != 0) 1904 printf("ipfw: process %s is using legacy interfaces," 1905 " consider rebuilding\n", ""); 1906 ipfw_get_dynamic(chain, &bp, ep); /* protected by the dynamic lock */ 1907 return (bp - (char *)buf); 1908 } 1909 1910 1911 struct dump_args { 1912 uint32_t b; /* start rule */ 1913 uint32_t e; /* end rule */ 1914 uint32_t rcount; /* number of rules */ 1915 uint32_t rsize; /* rules size */ 1916 uint32_t tcount; /* number of tables */ 1917 int rcounters; /* counters */ 1918 }; 1919 1920 void 1921 ipfw_export_obj_ntlv(struct named_object *no, ipfw_obj_ntlv *ntlv) 1922 { 1923 1924 ntlv->head.type = no->etlv; 1925 ntlv->head.length = sizeof(*ntlv); 1926 ntlv->idx = no->kidx; 1927 strlcpy(ntlv->name, no->name, sizeof(ntlv->name)); 1928 } 1929 1930 /* 1931 * Export named object info in instance @ni, identified by @kidx 1932 * to ipfw_obj_ntlv. TLV is allocated from @sd space. 1933 * 1934 * Returns 0 on success. 1935 */ 1936 static int 1937 export_objhash_ntlv(struct namedobj_instance *ni, uint16_t kidx, 1938 struct sockopt_data *sd) 1939 { 1940 struct named_object *no; 1941 ipfw_obj_ntlv *ntlv; 1942 1943 no = ipfw_objhash_lookup_kidx(ni, kidx); 1944 KASSERT(no != NULL, ("invalid object kernel index passed")); 1945 1946 ntlv = (ipfw_obj_ntlv *)ipfw_get_sopt_space(sd, sizeof(*ntlv)); 1947 if (ntlv == NULL) 1948 return (ENOMEM); 1949 1950 ipfw_export_obj_ntlv(no, ntlv); 1951 return (0); 1952 } 1953 1954 /* 1955 * Dumps static rules with table TLVs in buffer @sd. 1956 * 1957 * Returns 0 on success. 1958 */ 1959 static int 1960 dump_static_rules(struct ip_fw_chain *chain, struct dump_args *da, 1961 uint32_t *bmask, struct sockopt_data *sd) 1962 { 1963 int error; 1964 int i, l; 1965 uint32_t tcount; 1966 ipfw_obj_ctlv *ctlv; 1967 struct ip_fw *krule; 1968 struct namedobj_instance *ni; 1969 caddr_t dst; 1970 1971 /* Dump table names first (if any) */ 1972 if (da->tcount > 0) { 1973 /* Header first */ 1974 ctlv = (ipfw_obj_ctlv *)ipfw_get_sopt_space(sd, sizeof(*ctlv)); 1975 if (ctlv == NULL) 1976 return (ENOMEM); 1977 ctlv->head.type = IPFW_TLV_TBLNAME_LIST; 1978 ctlv->head.length = da->tcount * sizeof(ipfw_obj_ntlv) + 1979 sizeof(*ctlv); 1980 ctlv->count = da->tcount; 1981 ctlv->objsize = sizeof(ipfw_obj_ntlv); 1982 } 1983 1984 i = 0; 1985 tcount = da->tcount; 1986 ni = ipfw_get_table_objhash(chain); 1987 while (tcount > 0) { 1988 if ((bmask[i / 32] & (1 << (i % 32))) == 0) { 1989 i++; 1990 continue; 1991 } 1992 1993 /* Jump to shared named object bitmask */ 1994 if (i >= IPFW_TABLES_MAX) { 1995 ni = CHAIN_TO_SRV(chain); 1996 i -= IPFW_TABLES_MAX; 1997 bmask += IPFW_TABLES_MAX / 32; 1998 } 1999 2000 if ((error = export_objhash_ntlv(ni, i, sd)) != 0) 2001 return (error); 2002 2003 i++; 2004 tcount--; 2005 } 2006 2007 /* Dump rules */ 2008 ctlv = (ipfw_obj_ctlv *)ipfw_get_sopt_space(sd, sizeof(*ctlv)); 2009 if (ctlv == NULL) 2010 return (ENOMEM); 2011 ctlv->head.type = IPFW_TLV_RULE_LIST; 2012 ctlv->head.length = da->rsize + sizeof(*ctlv); 2013 ctlv->count = da->rcount; 2014 2015 for (i = da->b; i < da->e; i++) { 2016 krule = chain->map[i]; 2017 2018 l = RULEUSIZE1(krule) + sizeof(ipfw_obj_tlv); 2019 if (da->rcounters != 0) 2020 l += sizeof(struct ip_fw_bcounter); 2021 dst = (caddr_t)ipfw_get_sopt_space(sd, l); 2022 if (dst == NULL) 2023 return (ENOMEM); 2024 2025 export_rule1(krule, dst, l, da->rcounters); 2026 } 2027 2028 return (0); 2029 } 2030 2031 /* 2032 * Marks every object index used in @rule with bit in @bmask. 2033 * Used to generate bitmask of referenced tables/objects for given ruleset 2034 * or its part. 2035 * 2036 * Returns number of newly-referenced objects. 2037 */ 2038 static int 2039 mark_object_kidx(struct ip_fw_chain *ch, struct ip_fw *rule, 2040 uint32_t *bmask) 2041 { 2042 struct opcode_obj_rewrite *rw; 2043 ipfw_insn *cmd; 2044 int bidx, cmdlen, l, count; 2045 uint16_t kidx; 2046 uint8_t subtype; 2047 2048 l = rule->cmd_len; 2049 cmd = rule->cmd; 2050 cmdlen = 0; 2051 count = 0; 2052 for ( ; l > 0 ; l -= cmdlen, cmd += cmdlen) { 2053 cmdlen = F_LEN(cmd); 2054 2055 rw = find_op_rw(cmd, &kidx, &subtype); 2056 if (rw == NULL) 2057 continue; 2058 2059 bidx = kidx / 32; 2060 /* 2061 * Maintain separate bitmasks for table and 2062 * non-table objects. 2063 */ 2064 if (rw->etlv != IPFW_TLV_TBL_NAME) 2065 bidx += IPFW_TABLES_MAX / 32; 2066 2067 if ((bmask[bidx] & (1 << (kidx % 32))) == 0) 2068 count++; 2069 2070 bmask[bidx] |= 1 << (kidx % 32); 2071 } 2072 2073 return (count); 2074 } 2075 2076 /* 2077 * Dumps requested objects data 2078 * Data layout (version 0)(current): 2079 * Request: [ ipfw_cfg_lheader ] + IPFW_CFG_GET_* flags 2080 * size = ipfw_cfg_lheader.size 2081 * Reply: [ ipfw_cfg_lheader 2082 * [ ipfw_obj_ctlv(IPFW_TLV_TBL_LIST) ipfw_obj_ntlv x N ] (optional) 2083 * [ ipfw_obj_ctlv(IPFW_TLV_RULE_LIST) 2084 * ipfw_obj_tlv(IPFW_TLV_RULE_ENT) [ ip_fw_bcounter (optional) ip_fw_rule ] 2085 * ] (optional) 2086 * [ ipfw_obj_ctlv(IPFW_TLV_STATE_LIST) ipfw_obj_dyntlv x N ] (optional) 2087 * ] 2088 * * NOTE IPFW_TLV_STATE_LIST has the single valid field: objsize. 2089 * The rest (size, count) are set to zero and needs to be ignored. 2090 * 2091 * Returns 0 on success. 2092 */ 2093 static int 2094 dump_config(struct ip_fw_chain *chain, ip_fw3_opheader *op3, 2095 struct sockopt_data *sd) 2096 { 2097 ipfw_cfg_lheader *hdr; 2098 struct ip_fw *rule; 2099 size_t sz, rnum; 2100 uint32_t hdr_flags; 2101 int error, i; 2102 struct dump_args da; 2103 uint32_t *bmask; 2104 2105 hdr = (ipfw_cfg_lheader *)ipfw_get_sopt_header(sd, sizeof(*hdr)); 2106 if (hdr == NULL) 2107 return (EINVAL); 2108 2109 error = 0; 2110 bmask = NULL; 2111 /* Allocate needed state. Note we allocate 2xspace mask, for table&srv */ 2112 if (hdr->flags & IPFW_CFG_GET_STATIC) 2113 bmask = malloc(IPFW_TABLES_MAX / 4, M_TEMP, M_WAITOK | M_ZERO); 2114 2115 IPFW_UH_RLOCK(chain); 2116 2117 /* 2118 * STAGE 1: Determine size/count for objects in range. 2119 * Prepare used tables bitmask. 2120 */ 2121 sz = sizeof(ipfw_cfg_lheader); 2122 memset(&da, 0, sizeof(da)); 2123 2124 da.b = 0; 2125 da.e = chain->n_rules; 2126 2127 if (hdr->end_rule != 0) { 2128 /* Handle custom range */ 2129 if ((rnum = hdr->start_rule) > IPFW_DEFAULT_RULE) 2130 rnum = IPFW_DEFAULT_RULE; 2131 da.b = ipfw_find_rule(chain, rnum, 0); 2132 rnum = hdr->end_rule; 2133 rnum = (rnum < IPFW_DEFAULT_RULE) ? rnum+1 : IPFW_DEFAULT_RULE; 2134 da.e = ipfw_find_rule(chain, rnum, 0) + 1; 2135 } 2136 2137 if (hdr->flags & IPFW_CFG_GET_STATIC) { 2138 for (i = da.b; i < da.e; i++) { 2139 rule = chain->map[i]; 2140 da.rsize += RULEUSIZE1(rule) + sizeof(ipfw_obj_tlv); 2141 da.rcount++; 2142 /* Update bitmask of used objects for given range */ 2143 da.tcount += mark_object_kidx(chain, rule, bmask); 2144 } 2145 /* Add counters if requested */ 2146 if (hdr->flags & IPFW_CFG_GET_COUNTERS) { 2147 da.rsize += sizeof(struct ip_fw_bcounter) * da.rcount; 2148 da.rcounters = 1; 2149 } 2150 2151 if (da.tcount > 0) 2152 sz += da.tcount * sizeof(ipfw_obj_ntlv) + 2153 sizeof(ipfw_obj_ctlv); 2154 sz += da.rsize + sizeof(ipfw_obj_ctlv); 2155 } 2156 2157 if (hdr->flags & IPFW_CFG_GET_STATES) 2158 sz += ipfw_dyn_get_count() * sizeof(ipfw_obj_dyntlv) + 2159 sizeof(ipfw_obj_ctlv); 2160 2161 2162 /* 2163 * Fill header anyway. 2164 * Note we have to save header fields to stable storage 2165 * buffer inside @sd can be flushed after dumping rules 2166 */ 2167 hdr->size = sz; 2168 hdr->set_mask = ~V_set_disable; 2169 hdr_flags = hdr->flags; 2170 hdr = NULL; 2171 2172 if (sd->valsize < sz) { 2173 error = ENOMEM; 2174 goto cleanup; 2175 } 2176 2177 /* STAGE2: Store actual data */ 2178 if (hdr_flags & IPFW_CFG_GET_STATIC) { 2179 error = dump_static_rules(chain, &da, bmask, sd); 2180 if (error != 0) 2181 goto cleanup; 2182 } 2183 2184 if (hdr_flags & IPFW_CFG_GET_STATES) 2185 error = ipfw_dump_states(chain, sd); 2186 2187 cleanup: 2188 IPFW_UH_RUNLOCK(chain); 2189 2190 if (bmask != NULL) 2191 free(bmask, M_TEMP); 2192 2193 return (error); 2194 } 2195 2196 int 2197 ipfw_check_object_name_generic(const char *name) 2198 { 2199 int nsize; 2200 2201 nsize = sizeof(((ipfw_obj_ntlv *)0)->name); 2202 if (strnlen(name, nsize) == nsize) 2203 return (EINVAL); 2204 if (name[0] == '\0') 2205 return (EINVAL); 2206 return (0); 2207 } 2208 2209 /* 2210 * Creates non-existent objects referenced by rule. 2211 * 2212 * Return 0 on success. 2213 */ 2214 int 2215 create_objects_compat(struct ip_fw_chain *ch, ipfw_insn *cmd, 2216 struct obj_idx *oib, struct obj_idx *pidx, struct tid_info *ti) 2217 { 2218 struct opcode_obj_rewrite *rw; 2219 struct obj_idx *p; 2220 uint16_t kidx; 2221 int error; 2222 2223 /* 2224 * Compatibility stuff: do actual creation for non-existing, 2225 * but referenced objects. 2226 */ 2227 for (p = oib; p < pidx; p++) { 2228 if (p->kidx != 0) 2229 continue; 2230 2231 ti->uidx = p->uidx; 2232 ti->type = p->type; 2233 ti->atype = 0; 2234 2235 rw = find_op_rw(cmd + p->off, NULL, NULL); 2236 KASSERT(rw != NULL, ("Unable to find handler for op %d", 2237 (cmd + p->off)->opcode)); 2238 2239 if (rw->create_object == NULL) 2240 error = EOPNOTSUPP; 2241 else 2242 error = rw->create_object(ch, ti, &kidx); 2243 if (error == 0) { 2244 p->kidx = kidx; 2245 continue; 2246 } 2247 2248 /* 2249 * Error happened. We have to rollback everything. 2250 * Drop all already acquired references. 2251 */ 2252 IPFW_UH_WLOCK(ch); 2253 unref_oib_objects(ch, cmd, oib, pidx); 2254 IPFW_UH_WUNLOCK(ch); 2255 2256 return (error); 2257 } 2258 2259 return (0); 2260 } 2261 2262 /* 2263 * Compatibility function for old ipfw(8) binaries. 2264 * Rewrites table/nat kernel indices with userland ones. 2265 * Convert tables matching '/^\d+$/' to their atoi() value. 2266 * Use number 65535 for other tables. 2267 * 2268 * Returns 0 on success. 2269 */ 2270 static int 2271 set_legacy_obj_kidx(struct ip_fw_chain *ch, struct ip_fw_rule0 *rule) 2272 { 2273 struct opcode_obj_rewrite *rw; 2274 struct named_object *no; 2275 ipfw_insn *cmd; 2276 char *end; 2277 long val; 2278 int cmdlen, error, l; 2279 uint16_t kidx, uidx; 2280 uint8_t subtype; 2281 2282 error = 0; 2283 2284 l = rule->cmd_len; 2285 cmd = rule->cmd; 2286 cmdlen = 0; 2287 for ( ; l > 0 ; l -= cmdlen, cmd += cmdlen) { 2288 cmdlen = F_LEN(cmd); 2289 2290 /* Check if is index in given opcode */ 2291 rw = find_op_rw(cmd, &kidx, &subtype); 2292 if (rw == NULL) 2293 continue; 2294 2295 /* Try to find referenced kernel object */ 2296 no = rw->find_bykidx(ch, kidx); 2297 if (no == NULL) 2298 continue; 2299 2300 val = strtol(no->name, &end, 10); 2301 if (*end == '\0' && val < 65535) { 2302 uidx = val; 2303 } else { 2304 2305 /* 2306 * We are called via legacy opcode. 2307 * Save error and show table as fake number 2308 * not to make ipfw(8) hang. 2309 */ 2310 uidx = 65535; 2311 error = 2; 2312 } 2313 2314 rw->update(cmd, uidx); 2315 } 2316 2317 return (error); 2318 } 2319 2320 2321 /* 2322 * Unreferences all already-referenced objects in given @cmd rule, 2323 * using information in @oib. 2324 * 2325 * Used to rollback partially converted rule on error. 2326 */ 2327 static void 2328 unref_oib_objects(struct ip_fw_chain *ch, ipfw_insn *cmd, struct obj_idx *oib, 2329 struct obj_idx *end) 2330 { 2331 struct opcode_obj_rewrite *rw; 2332 struct named_object *no; 2333 struct obj_idx *p; 2334 2335 IPFW_UH_WLOCK_ASSERT(ch); 2336 2337 for (p = oib; p < end; p++) { 2338 if (p->kidx == 0) 2339 continue; 2340 2341 rw = find_op_rw(cmd + p->off, NULL, NULL); 2342 KASSERT(rw != NULL, ("Unable to find handler for op %d", 2343 (cmd + p->off)->opcode)); 2344 2345 /* Find & unref by existing idx */ 2346 no = rw->find_bykidx(ch, p->kidx); 2347 KASSERT(no != NULL, ("Ref'd object %d disappeared", p->kidx)); 2348 no->refcnt--; 2349 } 2350 } 2351 2352 /* 2353 * Remove references from every object used in @rule. 2354 * Used at rule removal code. 2355 */ 2356 static void 2357 unref_rule_objects(struct ip_fw_chain *ch, struct ip_fw *rule) 2358 { 2359 struct opcode_obj_rewrite *rw; 2360 struct named_object *no; 2361 ipfw_insn *cmd; 2362 int cmdlen, l; 2363 uint16_t kidx; 2364 uint8_t subtype; 2365 2366 IPFW_UH_WLOCK_ASSERT(ch); 2367 2368 l = rule->cmd_len; 2369 cmd = rule->cmd; 2370 cmdlen = 0; 2371 for ( ; l > 0 ; l -= cmdlen, cmd += cmdlen) { 2372 cmdlen = F_LEN(cmd); 2373 2374 rw = find_op_rw(cmd, &kidx, &subtype); 2375 if (rw == NULL) 2376 continue; 2377 no = rw->find_bykidx(ch, kidx); 2378 2379 KASSERT(no != NULL, ("table id %d not found", kidx)); 2380 KASSERT(no->subtype == subtype, 2381 ("wrong type %d (%d) for table id %d", 2382 no->subtype, subtype, kidx)); 2383 KASSERT(no->refcnt > 0, ("refcount for table %d is %d", 2384 kidx, no->refcnt)); 2385 2386 if (no->refcnt == 1 && rw->destroy_object != NULL) 2387 rw->destroy_object(ch, no); 2388 else 2389 no->refcnt--; 2390 } 2391 } 2392 2393 2394 /* 2395 * Find and reference object (if any) stored in instruction @cmd. 2396 * 2397 * Saves object info in @pidx, sets 2398 * - @unresolved to 1 if object should exists but not found 2399 * 2400 * Returns non-zero value in case of error. 2401 */ 2402 static int 2403 ref_opcode_object(struct ip_fw_chain *ch, ipfw_insn *cmd, struct tid_info *ti, 2404 struct obj_idx *pidx, int *unresolved) 2405 { 2406 struct named_object *no; 2407 struct opcode_obj_rewrite *rw; 2408 int error; 2409 2410 /* Check if this opcode is candidate for rewrite */ 2411 rw = find_op_rw(cmd, &ti->uidx, &ti->type); 2412 if (rw == NULL) 2413 return (0); 2414 2415 /* Need to rewrite. Save necessary fields */ 2416 pidx->uidx = ti->uidx; 2417 pidx->type = ti->type; 2418 2419 /* Try to find referenced kernel object */ 2420 error = rw->find_byname(ch, ti, &no); 2421 if (error != 0) 2422 return (error); 2423 if (no == NULL) { 2424 /* 2425 * Report about unresolved object for automaic 2426 * creation. 2427 */ 2428 *unresolved = 1; 2429 return (0); 2430 } 2431 2432 /* Found. Bump refcount and update kidx. */ 2433 no->refcnt++; 2434 rw->update(cmd, no->kidx); 2435 return (0); 2436 } 2437 2438 /* 2439 * Finds and bumps refcount for objects referenced by given @rule. 2440 * Auto-creates non-existing tables. 2441 * Fills in @oib array with userland/kernel indexes. 2442 * 2443 * Returns 0 on success. 2444 */ 2445 static int 2446 ref_rule_objects(struct ip_fw_chain *ch, struct ip_fw *rule, 2447 struct rule_check_info *ci, struct obj_idx *oib, struct tid_info *ti) 2448 { 2449 struct obj_idx *pidx; 2450 ipfw_insn *cmd; 2451 int cmdlen, error, l, unresolved; 2452 2453 pidx = oib; 2454 l = rule->cmd_len; 2455 cmd = rule->cmd; 2456 cmdlen = 0; 2457 error = 0; 2458 2459 IPFW_UH_WLOCK(ch); 2460 2461 /* Increase refcount on each existing referenced table. */ 2462 for ( ; l > 0 ; l -= cmdlen, cmd += cmdlen) { 2463 cmdlen = F_LEN(cmd); 2464 unresolved = 0; 2465 2466 error = ref_opcode_object(ch, cmd, ti, pidx, &unresolved); 2467 if (error != 0) 2468 break; 2469 /* 2470 * Compatibility stuff for old clients: 2471 * prepare to automaitcally create non-existing objects. 2472 */ 2473 if (unresolved != 0) { 2474 pidx->off = rule->cmd_len - l; 2475 pidx++; 2476 } 2477 } 2478 2479 if (error != 0) { 2480 /* Unref everything we have already done */ 2481 unref_oib_objects(ch, rule->cmd, oib, pidx); 2482 IPFW_UH_WUNLOCK(ch); 2483 return (error); 2484 } 2485 IPFW_UH_WUNLOCK(ch); 2486 2487 /* Perform auto-creation for non-existing objects */ 2488 if (pidx != oib) 2489 error = create_objects_compat(ch, rule->cmd, oib, pidx, ti); 2490 2491 /* Calculate real number of dynamic objects */ 2492 ci->object_opcodes = (uint16_t)(pidx - oib); 2493 2494 return (error); 2495 } 2496 2497 /* 2498 * Checks is opcode is referencing table of appropriate type. 2499 * Adds reference count for found table if true. 2500 * Rewrites user-supplied opcode values with kernel ones. 2501 * 2502 * Returns 0 on success and appropriate error code otherwise. 2503 */ 2504 static int 2505 rewrite_rule_uidx(struct ip_fw_chain *chain, struct rule_check_info *ci) 2506 { 2507 int error; 2508 ipfw_insn *cmd; 2509 uint8_t type; 2510 struct obj_idx *p, *pidx_first, *pidx_last; 2511 struct tid_info ti; 2512 2513 /* 2514 * Prepare an array for storing opcode indices. 2515 * Use stack allocation by default. 2516 */ 2517 if (ci->object_opcodes <= (sizeof(ci->obuf)/sizeof(ci->obuf[0]))) { 2518 /* Stack */ 2519 pidx_first = ci->obuf; 2520 } else 2521 pidx_first = malloc( 2522 ci->object_opcodes * sizeof(struct obj_idx), 2523 M_IPFW, M_WAITOK | M_ZERO); 2524 2525 error = 0; 2526 type = 0; 2527 memset(&ti, 0, sizeof(ti)); 2528 2529 /* 2530 * Use default set for looking up tables (old way) or 2531 * use set rule is assigned to (new way). 2532 */ 2533 ti.set = (V_fw_tables_sets != 0) ? ci->krule->set : 0; 2534 if (ci->ctlv != NULL) { 2535 ti.tlvs = (void *)(ci->ctlv + 1); 2536 ti.tlen = ci->ctlv->head.length - sizeof(ipfw_obj_ctlv); 2537 } 2538 2539 /* Reference all used tables and other objects */ 2540 error = ref_rule_objects(chain, ci->krule, ci, pidx_first, &ti); 2541 if (error != 0) 2542 goto free; 2543 /* 2544 * Note that ref_rule_objects() might have updated ci->object_opcodes 2545 * to reflect actual number of object opcodes. 2546 */ 2547 2548 /* Perform rewrite of remaining opcodes */ 2549 p = pidx_first; 2550 pidx_last = pidx_first + ci->object_opcodes; 2551 for (p = pidx_first; p < pidx_last; p++) { 2552 cmd = ci->krule->cmd + p->off; 2553 update_opcode_kidx(cmd, p->kidx); 2554 } 2555 2556 free: 2557 if (pidx_first != ci->obuf) 2558 free(pidx_first, M_IPFW); 2559 2560 return (error); 2561 } 2562 2563 /* 2564 * Adds one or more rules to ipfw @chain. 2565 * Data layout (version 0)(current): 2566 * Request: 2567 * [ 2568 * ip_fw3_opheader 2569 * [ ipfw_obj_ctlv(IPFW_TLV_TBL_LIST) ipfw_obj_ntlv x N ] (optional *1) 2570 * [ ipfw_obj_ctlv(IPFW_TLV_RULE_LIST) ip_fw x N ] (*2) (*3) 2571 * ] 2572 * Reply: 2573 * [ 2574 * ip_fw3_opheader 2575 * [ ipfw_obj_ctlv(IPFW_TLV_TBL_LIST) ipfw_obj_ntlv x N ] (optional) 2576 * [ ipfw_obj_ctlv(IPFW_TLV_RULE_LIST) ip_fw x N ] 2577 * ] 2578 * 2579 * Rules in reply are modified to store their actual ruleset number. 2580 * 2581 * (*1) TLVs inside IPFW_TLV_TBL_LIST needs to be sorted ascending 2582 * according to their idx field and there has to be no duplicates. 2583 * (*2) Numbered rules inside IPFW_TLV_RULE_LIST needs to be sorted ascending. 2584 * (*3) Each ip_fw structure needs to be aligned to u64 boundary. 2585 * 2586 * Returns 0 on success. 2587 */ 2588 static int 2589 add_rules(struct ip_fw_chain *chain, ip_fw3_opheader *op3, 2590 struct sockopt_data *sd) 2591 { 2592 ipfw_obj_ctlv *ctlv, *rtlv, *tstate; 2593 ipfw_obj_ntlv *ntlv; 2594 int clen, error, idx; 2595 uint32_t count, read; 2596 struct ip_fw_rule *r; 2597 struct rule_check_info rci, *ci, *cbuf; 2598 int i, rsize; 2599 2600 op3 = (ip_fw3_opheader *)ipfw_get_sopt_space(sd, sd->valsize); 2601 ctlv = (ipfw_obj_ctlv *)(op3 + 1); 2602 2603 read = sizeof(ip_fw3_opheader); 2604 rtlv = NULL; 2605 tstate = NULL; 2606 cbuf = NULL; 2607 memset(&rci, 0, sizeof(struct rule_check_info)); 2608 2609 if (read + sizeof(*ctlv) > sd->valsize) 2610 return (EINVAL); 2611 2612 if (ctlv->head.type == IPFW_TLV_TBLNAME_LIST) { 2613 clen = ctlv->head.length; 2614 /* Check size and alignment */ 2615 if (clen > sd->valsize || clen < sizeof(*ctlv)) 2616 return (EINVAL); 2617 if ((clen % sizeof(uint64_t)) != 0) 2618 return (EINVAL); 2619 2620 /* 2621 * Some table names or other named objects. 2622 * Check for validness. 2623 */ 2624 count = (ctlv->head.length - sizeof(*ctlv)) / sizeof(*ntlv); 2625 if (ctlv->count != count || ctlv->objsize != sizeof(*ntlv)) 2626 return (EINVAL); 2627 2628 /* 2629 * Check each TLV. 2630 * Ensure TLVs are sorted ascending and 2631 * there are no duplicates. 2632 */ 2633 idx = -1; 2634 ntlv = (ipfw_obj_ntlv *)(ctlv + 1); 2635 while (count > 0) { 2636 if (ntlv->head.length != sizeof(ipfw_obj_ntlv)) 2637 return (EINVAL); 2638 2639 error = ipfw_check_object_name_generic(ntlv->name); 2640 if (error != 0) 2641 return (error); 2642 2643 if (ntlv->idx <= idx) 2644 return (EINVAL); 2645 2646 idx = ntlv->idx; 2647 count--; 2648 ntlv++; 2649 } 2650 2651 tstate = ctlv; 2652 read += ctlv->head.length; 2653 ctlv = (ipfw_obj_ctlv *)((caddr_t)ctlv + ctlv->head.length); 2654 } 2655 2656 if (read + sizeof(*ctlv) > sd->valsize) 2657 return (EINVAL); 2658 2659 if (ctlv->head.type == IPFW_TLV_RULE_LIST) { 2660 clen = ctlv->head.length; 2661 if (clen + read > sd->valsize || clen < sizeof(*ctlv)) 2662 return (EINVAL); 2663 if ((clen % sizeof(uint64_t)) != 0) 2664 return (EINVAL); 2665 2666 /* 2667 * TODO: Permit adding multiple rules at once 2668 */ 2669 if (ctlv->count != 1) 2670 return (ENOTSUP); 2671 2672 clen -= sizeof(*ctlv); 2673 2674 if (ctlv->count > clen / sizeof(struct ip_fw_rule)) 2675 return (EINVAL); 2676 2677 /* Allocate state for each rule or use stack */ 2678 if (ctlv->count == 1) { 2679 memset(&rci, 0, sizeof(struct rule_check_info)); 2680 cbuf = &rci; 2681 } else 2682 cbuf = malloc(ctlv->count * sizeof(*ci), M_TEMP, 2683 M_WAITOK | M_ZERO); 2684 ci = cbuf; 2685 2686 /* 2687 * Check each rule for validness. 2688 * Ensure numbered rules are sorted ascending 2689 * and properly aligned 2690 */ 2691 idx = 0; 2692 r = (struct ip_fw_rule *)(ctlv + 1); 2693 count = 0; 2694 error = 0; 2695 while (clen > 0) { 2696 rsize = roundup2(RULESIZE(r), sizeof(uint64_t)); 2697 if (rsize > clen || ctlv->count <= count) { 2698 error = EINVAL; 2699 break; 2700 } 2701 2702 ci->ctlv = tstate; 2703 error = check_ipfw_rule1(r, rsize, ci); 2704 if (error != 0) 2705 break; 2706 2707 /* Check sorting */ 2708 if (r->rulenum != 0 && r->rulenum < idx) { 2709 printf("rulenum %d idx %d\n", r->rulenum, idx); 2710 error = EINVAL; 2711 break; 2712 } 2713 idx = r->rulenum; 2714 2715 ci->urule = (caddr_t)r; 2716 2717 rsize = roundup2(rsize, sizeof(uint64_t)); 2718 clen -= rsize; 2719 r = (struct ip_fw_rule *)((caddr_t)r + rsize); 2720 count++; 2721 ci++; 2722 } 2723 2724 if (ctlv->count != count || error != 0) { 2725 if (cbuf != &rci) 2726 free(cbuf, M_TEMP); 2727 return (EINVAL); 2728 } 2729 2730 rtlv = ctlv; 2731 read += ctlv->head.length; 2732 ctlv = (ipfw_obj_ctlv *)((caddr_t)ctlv + ctlv->head.length); 2733 } 2734 2735 if (read != sd->valsize || rtlv == NULL || rtlv->count == 0) { 2736 if (cbuf != NULL && cbuf != &rci) 2737 free(cbuf, M_TEMP); 2738 return (EINVAL); 2739 } 2740 2741 /* 2742 * Passed rules seems to be valid. 2743 * Allocate storage and try to add them to chain. 2744 */ 2745 for (i = 0, ci = cbuf; i < rtlv->count; i++, ci++) { 2746 clen = RULEKSIZE1((struct ip_fw_rule *)ci->urule); 2747 ci->krule = ipfw_alloc_rule(chain, clen); 2748 import_rule1(ci); 2749 } 2750 2751 if ((error = commit_rules(chain, cbuf, rtlv->count)) != 0) { 2752 /* Free allocate krules */ 2753 for (i = 0, ci = cbuf; i < rtlv->count; i++, ci++) 2754 free_rule(ci->krule); 2755 } 2756 2757 if (cbuf != NULL && cbuf != &rci) 2758 free(cbuf, M_TEMP); 2759 2760 return (error); 2761 } 2762 2763 /* 2764 * Lists all sopts currently registered. 2765 * Data layout (v0)(current): 2766 * Request: [ ipfw_obj_lheader ], size = ipfw_obj_lheader.size 2767 * Reply: [ ipfw_obj_lheader ipfw_sopt_info x N ] 2768 * 2769 * Returns 0 on success 2770 */ 2771 static int 2772 dump_soptcodes(struct ip_fw_chain *chain, ip_fw3_opheader *op3, 2773 struct sockopt_data *sd) 2774 { 2775 struct _ipfw_obj_lheader *olh; 2776 ipfw_sopt_info *i; 2777 struct ipfw_sopt_handler *sh; 2778 uint32_t count, n, size; 2779 2780 olh = (struct _ipfw_obj_lheader *)ipfw_get_sopt_header(sd,sizeof(*olh)); 2781 if (olh == NULL) 2782 return (EINVAL); 2783 if (sd->valsize < olh->size) 2784 return (EINVAL); 2785 2786 CTL3_LOCK(); 2787 count = ctl3_hsize; 2788 size = count * sizeof(ipfw_sopt_info) + sizeof(ipfw_obj_lheader); 2789 2790 /* Fill in header regadless of buffer size */ 2791 olh->count = count; 2792 olh->objsize = sizeof(ipfw_sopt_info); 2793 2794 if (size > olh->size) { 2795 olh->size = size; 2796 CTL3_UNLOCK(); 2797 return (ENOMEM); 2798 } 2799 olh->size = size; 2800 2801 for (n = 1; n <= count; n++) { 2802 i = (ipfw_sopt_info *)ipfw_get_sopt_space(sd, sizeof(*i)); 2803 KASSERT(i != NULL, ("previously checked buffer is not enough")); 2804 sh = &ctl3_handlers[n]; 2805 i->opcode = sh->opcode; 2806 i->version = sh->version; 2807 i->refcnt = sh->refcnt; 2808 } 2809 CTL3_UNLOCK(); 2810 2811 return (0); 2812 } 2813 2814 /* 2815 * Compares two opcodes. 2816 * Used both in qsort() and bsearch(). 2817 * 2818 * Returns 0 if match is found. 2819 */ 2820 static int 2821 compare_opcodes(const void *_a, const void *_b) 2822 { 2823 const struct opcode_obj_rewrite *a, *b; 2824 2825 a = (const struct opcode_obj_rewrite *)_a; 2826 b = (const struct opcode_obj_rewrite *)_b; 2827 2828 if (a->opcode < b->opcode) 2829 return (-1); 2830 else if (a->opcode > b->opcode) 2831 return (1); 2832 2833 return (0); 2834 } 2835 2836 /* 2837 * XXX: Rewrite bsearch() 2838 */ 2839 static int 2840 find_op_rw_range(uint16_t op, struct opcode_obj_rewrite **plo, 2841 struct opcode_obj_rewrite **phi) 2842 { 2843 struct opcode_obj_rewrite *ctl3_max, *lo, *hi, h, *rw; 2844 2845 memset(&h, 0, sizeof(h)); 2846 h.opcode = op; 2847 2848 rw = (struct opcode_obj_rewrite *)bsearch(&h, ctl3_rewriters, 2849 ctl3_rsize, sizeof(h), compare_opcodes); 2850 if (rw == NULL) 2851 return (1); 2852 2853 /* Find the first element matching the same opcode */ 2854 lo = rw; 2855 for ( ; lo > ctl3_rewriters && (lo - 1)->opcode == op; lo--) 2856 ; 2857 2858 /* Find the last element matching the same opcode */ 2859 hi = rw; 2860 ctl3_max = ctl3_rewriters + ctl3_rsize; 2861 for ( ; (hi + 1) < ctl3_max && (hi + 1)->opcode == op; hi++) 2862 ; 2863 2864 *plo = lo; 2865 *phi = hi; 2866 2867 return (0); 2868 } 2869 2870 /* 2871 * Finds opcode object rewriter based on @code. 2872 * 2873 * Returns pointer to handler or NULL. 2874 */ 2875 static struct opcode_obj_rewrite * 2876 find_op_rw(ipfw_insn *cmd, uint16_t *puidx, uint8_t *ptype) 2877 { 2878 struct opcode_obj_rewrite *rw, *lo, *hi; 2879 uint16_t uidx; 2880 uint8_t subtype; 2881 2882 if (find_op_rw_range(cmd->opcode, &lo, &hi) != 0) 2883 return (NULL); 2884 2885 for (rw = lo; rw <= hi; rw++) { 2886 if (rw->classifier(cmd, &uidx, &subtype) == 0) { 2887 if (puidx != NULL) 2888 *puidx = uidx; 2889 if (ptype != NULL) 2890 *ptype = subtype; 2891 return (rw); 2892 } 2893 } 2894 2895 return (NULL); 2896 } 2897 int 2898 classify_opcode_kidx(ipfw_insn *cmd, uint16_t *puidx) 2899 { 2900 2901 if (find_op_rw(cmd, puidx, NULL) == 0) 2902 return (1); 2903 return (0); 2904 } 2905 2906 void 2907 update_opcode_kidx(ipfw_insn *cmd, uint16_t idx) 2908 { 2909 struct opcode_obj_rewrite *rw; 2910 2911 rw = find_op_rw(cmd, NULL, NULL); 2912 KASSERT(rw != NULL, ("No handler to update opcode %d", cmd->opcode)); 2913 rw->update(cmd, idx); 2914 } 2915 2916 void 2917 ipfw_init_obj_rewriter() 2918 { 2919 2920 ctl3_rewriters = NULL; 2921 ctl3_rsize = 0; 2922 } 2923 2924 void 2925 ipfw_destroy_obj_rewriter() 2926 { 2927 2928 if (ctl3_rewriters != NULL) 2929 free(ctl3_rewriters, M_IPFW); 2930 ctl3_rewriters = NULL; 2931 ctl3_rsize = 0; 2932 } 2933 2934 /* 2935 * Adds one or more opcode object rewrite handlers to the global array. 2936 * Function may sleep. 2937 */ 2938 void 2939 ipfw_add_obj_rewriter(struct opcode_obj_rewrite *rw, size_t count) 2940 { 2941 size_t sz; 2942 struct opcode_obj_rewrite *tmp; 2943 2944 CTL3_LOCK(); 2945 2946 for (;;) { 2947 sz = ctl3_rsize + count; 2948 CTL3_UNLOCK(); 2949 tmp = malloc(sizeof(*rw) * sz, M_IPFW, M_WAITOK | M_ZERO); 2950 CTL3_LOCK(); 2951 if (ctl3_rsize + count <= sz) 2952 break; 2953 2954 /* Retry */ 2955 free(tmp, M_IPFW); 2956 } 2957 2958 /* Merge old & new arrays */ 2959 sz = ctl3_rsize + count; 2960 memcpy(tmp, ctl3_rewriters, ctl3_rsize * sizeof(*rw)); 2961 memcpy(&tmp[ctl3_rsize], rw, count * sizeof(*rw)); 2962 qsort(tmp, sz, sizeof(*rw), compare_opcodes); 2963 /* Switch new and free old */ 2964 if (ctl3_rewriters != NULL) 2965 free(ctl3_rewriters, M_IPFW); 2966 ctl3_rewriters = tmp; 2967 ctl3_rsize = sz; 2968 2969 CTL3_UNLOCK(); 2970 } 2971 2972 /* 2973 * Removes one or more object rewrite handlers from the global array. 2974 */ 2975 int 2976 ipfw_del_obj_rewriter(struct opcode_obj_rewrite *rw, size_t count) 2977 { 2978 size_t sz; 2979 struct opcode_obj_rewrite *ctl3_max, *ktmp, *lo, *hi; 2980 int i; 2981 2982 CTL3_LOCK(); 2983 2984 for (i = 0; i < count; i++) { 2985 if (find_op_rw_range(rw[i].opcode, &lo, &hi) != 0) 2986 continue; 2987 2988 for (ktmp = lo; ktmp <= hi; ktmp++) { 2989 if (ktmp->classifier != rw[i].classifier) 2990 continue; 2991 2992 ctl3_max = ctl3_rewriters + ctl3_rsize; 2993 sz = (ctl3_max - (ktmp + 1)) * sizeof(*ktmp); 2994 memmove(ktmp, ktmp + 1, sz); 2995 ctl3_rsize--; 2996 break; 2997 } 2998 2999 } 3000 3001 if (ctl3_rsize == 0) { 3002 if (ctl3_rewriters != NULL) 3003 free(ctl3_rewriters, M_IPFW); 3004 ctl3_rewriters = NULL; 3005 } 3006 3007 CTL3_UNLOCK(); 3008 3009 return (0); 3010 } 3011 3012 static int 3013 export_objhash_ntlv_internal(struct namedobj_instance *ni, 3014 struct named_object *no, void *arg) 3015 { 3016 struct sockopt_data *sd; 3017 ipfw_obj_ntlv *ntlv; 3018 3019 sd = (struct sockopt_data *)arg; 3020 ntlv = (ipfw_obj_ntlv *)ipfw_get_sopt_space(sd, sizeof(*ntlv)); 3021 if (ntlv == NULL) 3022 return (ENOMEM); 3023 ipfw_export_obj_ntlv(no, ntlv); 3024 return (0); 3025 } 3026 3027 /* 3028 * Lists all service objects. 3029 * Data layout (v0)(current): 3030 * Request: [ ipfw_obj_lheader ] size = ipfw_obj_lheader.size 3031 * Reply: [ ipfw_obj_lheader [ ipfw_obj_ntlv x N ] (optional) ] 3032 * Returns 0 on success 3033 */ 3034 static int 3035 dump_srvobjects(struct ip_fw_chain *chain, ip_fw3_opheader *op3, 3036 struct sockopt_data *sd) 3037 { 3038 ipfw_obj_lheader *hdr; 3039 int count; 3040 3041 hdr = (ipfw_obj_lheader *)ipfw_get_sopt_header(sd, sizeof(*hdr)); 3042 if (hdr == NULL) 3043 return (EINVAL); 3044 3045 IPFW_UH_RLOCK(chain); 3046 count = ipfw_objhash_count(CHAIN_TO_SRV(chain)); 3047 hdr->size = sizeof(ipfw_obj_lheader) + count * sizeof(ipfw_obj_ntlv); 3048 if (sd->valsize < hdr->size) { 3049 IPFW_UH_RUNLOCK(chain); 3050 return (ENOMEM); 3051 } 3052 hdr->count = count; 3053 hdr->objsize = sizeof(ipfw_obj_ntlv); 3054 if (count > 0) 3055 ipfw_objhash_foreach(CHAIN_TO_SRV(chain), 3056 export_objhash_ntlv_internal, sd); 3057 IPFW_UH_RUNLOCK(chain); 3058 return (0); 3059 } 3060 3061 /* 3062 * Compares two sopt handlers (code, version and handler ptr). 3063 * Used both as qsort() and bsearch(). 3064 * Does not compare handler for latter case. 3065 * 3066 * Returns 0 if match is found. 3067 */ 3068 static int 3069 compare_sh(const void *_a, const void *_b) 3070 { 3071 const struct ipfw_sopt_handler *a, *b; 3072 3073 a = (const struct ipfw_sopt_handler *)_a; 3074 b = (const struct ipfw_sopt_handler *)_b; 3075 3076 if (a->opcode < b->opcode) 3077 return (-1); 3078 else if (a->opcode > b->opcode) 3079 return (1); 3080 3081 if (a->version < b->version) 3082 return (-1); 3083 else if (a->version > b->version) 3084 return (1); 3085 3086 /* bsearch helper */ 3087 if (a->handler == NULL) 3088 return (0); 3089 3090 if ((uintptr_t)a->handler < (uintptr_t)b->handler) 3091 return (-1); 3092 else if ((uintptr_t)a->handler > (uintptr_t)b->handler) 3093 return (1); 3094 3095 return (0); 3096 } 3097 3098 /* 3099 * Finds sopt handler based on @code and @version. 3100 * 3101 * Returns pointer to handler or NULL. 3102 */ 3103 static struct ipfw_sopt_handler * 3104 find_sh(uint16_t code, uint8_t version, sopt_handler_f *handler) 3105 { 3106 struct ipfw_sopt_handler *sh, h; 3107 3108 memset(&h, 0, sizeof(h)); 3109 h.opcode = code; 3110 h.version = version; 3111 h.handler = handler; 3112 3113 sh = (struct ipfw_sopt_handler *)bsearch(&h, ctl3_handlers, 3114 ctl3_hsize, sizeof(h), compare_sh); 3115 3116 return (sh); 3117 } 3118 3119 static int 3120 find_ref_sh(uint16_t opcode, uint8_t version, struct ipfw_sopt_handler *psh) 3121 { 3122 struct ipfw_sopt_handler *sh; 3123 3124 CTL3_LOCK(); 3125 if ((sh = find_sh(opcode, version, NULL)) == NULL) { 3126 CTL3_UNLOCK(); 3127 printf("ipfw: ipfw_ctl3 invalid option %d""v""%d\n", 3128 opcode, version); 3129 return (EINVAL); 3130 } 3131 sh->refcnt++; 3132 ctl3_refct++; 3133 /* Copy handler data to requested buffer */ 3134 *psh = *sh; 3135 CTL3_UNLOCK(); 3136 3137 return (0); 3138 } 3139 3140 static void 3141 find_unref_sh(struct ipfw_sopt_handler *psh) 3142 { 3143 struct ipfw_sopt_handler *sh; 3144 3145 CTL3_LOCK(); 3146 sh = find_sh(psh->opcode, psh->version, NULL); 3147 KASSERT(sh != NULL, ("ctl3 handler disappeared")); 3148 sh->refcnt--; 3149 ctl3_refct--; 3150 CTL3_UNLOCK(); 3151 } 3152 3153 void 3154 ipfw_init_sopt_handler() 3155 { 3156 3157 CTL3_LOCK_INIT(); 3158 IPFW_ADD_SOPT_HANDLER(1, scodes); 3159 } 3160 3161 void 3162 ipfw_destroy_sopt_handler() 3163 { 3164 3165 IPFW_DEL_SOPT_HANDLER(1, scodes); 3166 CTL3_LOCK_DESTROY(); 3167 } 3168 3169 /* 3170 * Adds one or more sockopt handlers to the global array. 3171 * Function may sleep. 3172 */ 3173 void 3174 ipfw_add_sopt_handler(struct ipfw_sopt_handler *sh, size_t count) 3175 { 3176 size_t sz; 3177 struct ipfw_sopt_handler *tmp; 3178 3179 CTL3_LOCK(); 3180 3181 for (;;) { 3182 sz = ctl3_hsize + count; 3183 CTL3_UNLOCK(); 3184 tmp = malloc(sizeof(*sh) * sz, M_IPFW, M_WAITOK | M_ZERO); 3185 CTL3_LOCK(); 3186 if (ctl3_hsize + count <= sz) 3187 break; 3188 3189 /* Retry */ 3190 free(tmp, M_IPFW); 3191 } 3192 3193 /* Merge old & new arrays */ 3194 sz = ctl3_hsize + count; 3195 memcpy(tmp, ctl3_handlers, ctl3_hsize * sizeof(*sh)); 3196 memcpy(&tmp[ctl3_hsize], sh, count * sizeof(*sh)); 3197 qsort(tmp, sz, sizeof(*sh), compare_sh); 3198 /* Switch new and free old */ 3199 if (ctl3_handlers != NULL) 3200 free(ctl3_handlers, M_IPFW); 3201 ctl3_handlers = tmp; 3202 ctl3_hsize = sz; 3203 ctl3_gencnt++; 3204 3205 CTL3_UNLOCK(); 3206 } 3207 3208 /* 3209 * Removes one or more sockopt handlers from the global array. 3210 */ 3211 int 3212 ipfw_del_sopt_handler(struct ipfw_sopt_handler *sh, size_t count) 3213 { 3214 size_t sz; 3215 struct ipfw_sopt_handler *tmp, *h; 3216 int i; 3217 3218 CTL3_LOCK(); 3219 3220 for (i = 0; i < count; i++) { 3221 tmp = &sh[i]; 3222 h = find_sh(tmp->opcode, tmp->version, tmp->handler); 3223 if (h == NULL) 3224 continue; 3225 3226 sz = (ctl3_handlers + ctl3_hsize - (h + 1)) * sizeof(*h); 3227 memmove(h, h + 1, sz); 3228 ctl3_hsize--; 3229 } 3230 3231 if (ctl3_hsize == 0) { 3232 if (ctl3_handlers != NULL) 3233 free(ctl3_handlers, M_IPFW); 3234 ctl3_handlers = NULL; 3235 } 3236 3237 ctl3_gencnt++; 3238 3239 CTL3_UNLOCK(); 3240 3241 return (0); 3242 } 3243 3244 /* 3245 * Writes data accumulated in @sd to sockopt buffer. 3246 * Zeroes internal @sd buffer. 3247 */ 3248 static int 3249 ipfw_flush_sopt_data(struct sockopt_data *sd) 3250 { 3251 struct sockopt *sopt; 3252 int error; 3253 size_t sz; 3254 3255 sz = sd->koff; 3256 if (sz == 0) 3257 return (0); 3258 3259 sopt = sd->sopt; 3260 3261 if (sopt->sopt_dir == SOPT_GET) { 3262 error = copyout(sd->kbuf, sopt->sopt_val, sz); 3263 if (error != 0) 3264 return (error); 3265 } 3266 3267 memset(sd->kbuf, 0, sd->ksize); 3268 sd->ktotal += sz; 3269 sd->koff = 0; 3270 if (sd->ktotal + sd->ksize < sd->valsize) 3271 sd->kavail = sd->ksize; 3272 else 3273 sd->kavail = sd->valsize - sd->ktotal; 3274 3275 /* Update sopt buffer data */ 3276 sopt->sopt_valsize = sd->ktotal; 3277 sopt->sopt_val = sd->sopt_val + sd->ktotal; 3278 3279 return (0); 3280 } 3281 3282 /* 3283 * Ensures that @sd buffer has contiguous @neeeded number of 3284 * bytes. 3285 * 3286 * Returns pointer to requested space or NULL. 3287 */ 3288 caddr_t 3289 ipfw_get_sopt_space(struct sockopt_data *sd, size_t needed) 3290 { 3291 int error; 3292 caddr_t addr; 3293 3294 if (sd->kavail < needed) { 3295 /* 3296 * Flush data and try another time. 3297 */ 3298 error = ipfw_flush_sopt_data(sd); 3299 3300 if (sd->kavail < needed || error != 0) 3301 return (NULL); 3302 } 3303 3304 addr = sd->kbuf + sd->koff; 3305 sd->koff += needed; 3306 sd->kavail -= needed; 3307 return (addr); 3308 } 3309 3310 /* 3311 * Requests @needed contiguous bytes from @sd buffer. 3312 * Function is used to notify subsystem that we are 3313 * interesed in first @needed bytes (request header) 3314 * and the rest buffer can be safely zeroed. 3315 * 3316 * Returns pointer to requested space or NULL. 3317 */ 3318 caddr_t 3319 ipfw_get_sopt_header(struct sockopt_data *sd, size_t needed) 3320 { 3321 caddr_t addr; 3322 3323 if ((addr = ipfw_get_sopt_space(sd, needed)) == NULL) 3324 return (NULL); 3325 3326 if (sd->kavail > 0) 3327 memset(sd->kbuf + sd->koff, 0, sd->kavail); 3328 3329 return (addr); 3330 } 3331 3332 /* 3333 * New sockopt handler. 3334 */ 3335 int 3336 ipfw_ctl3(struct sockopt *sopt) 3337 { 3338 int error, locked; 3339 size_t size, valsize; 3340 struct ip_fw_chain *chain; 3341 char xbuf[256]; 3342 struct sockopt_data sdata; 3343 struct ipfw_sopt_handler h; 3344 ip_fw3_opheader *op3 = NULL; 3345 3346 error = priv_check(sopt->sopt_td, PRIV_NETINET_IPFW); 3347 if (error != 0) 3348 return (error); 3349 3350 if (sopt->sopt_name != IP_FW3) 3351 return (ipfw_ctl(sopt)); 3352 3353 chain = &V_layer3_chain; 3354 error = 0; 3355 3356 /* Save original valsize before it is altered via sooptcopyin() */ 3357 valsize = sopt->sopt_valsize; 3358 memset(&sdata, 0, sizeof(sdata)); 3359 /* Read op3 header first to determine actual operation */ 3360 op3 = (ip_fw3_opheader *)xbuf; 3361 error = sooptcopyin(sopt, op3, sizeof(*op3), sizeof(*op3)); 3362 if (error != 0) 3363 return (error); 3364 sopt->sopt_valsize = valsize; 3365 3366 /* 3367 * Find and reference command. 3368 */ 3369 error = find_ref_sh(op3->opcode, op3->version, &h); 3370 if (error != 0) 3371 return (error); 3372 3373 /* 3374 * Disallow modifications in really-really secure mode, but still allow 3375 * the logging counters to be reset. 3376 */ 3377 if ((h.dir & HDIR_SET) != 0 && h.opcode != IP_FW_XRESETLOG) { 3378 error = securelevel_ge(sopt->sopt_td->td_ucred, 3); 3379 if (error != 0) { 3380 find_unref_sh(&h); 3381 return (error); 3382 } 3383 } 3384 3385 /* 3386 * Fill in sockopt_data structure that may be useful for 3387 * IP_FW3 get requests. 3388 */ 3389 locked = 0; 3390 if (valsize <= sizeof(xbuf)) { 3391 /* use on-stack buffer */ 3392 sdata.kbuf = xbuf; 3393 sdata.ksize = sizeof(xbuf); 3394 sdata.kavail = valsize; 3395 } else { 3396 3397 /* 3398 * Determine opcode type/buffer size: 3399 * allocate sliding-window buf for data export or 3400 * contiguous buffer for special ops. 3401 */ 3402 if ((h.dir & HDIR_SET) != 0) { 3403 /* Set request. Allocate contigous buffer. */ 3404 if (valsize > CTL3_LARGEBUF) { 3405 find_unref_sh(&h); 3406 return (EFBIG); 3407 } 3408 3409 size = valsize; 3410 } else { 3411 /* Get request. Allocate sliding window buffer */ 3412 size = (valsize<CTL3_SMALLBUF) ? valsize:CTL3_SMALLBUF; 3413 3414 if (size < valsize) { 3415 /* We have to wire user buffer */ 3416 error = vslock(sopt->sopt_val, valsize); 3417 if (error != 0) 3418 return (error); 3419 locked = 1; 3420 } 3421 } 3422 3423 sdata.kbuf = malloc(size, M_TEMP, M_WAITOK | M_ZERO); 3424 sdata.ksize = size; 3425 sdata.kavail = size; 3426 } 3427 3428 sdata.sopt = sopt; 3429 sdata.sopt_val = sopt->sopt_val; 3430 sdata.valsize = valsize; 3431 3432 /* 3433 * Copy either all request (if valsize < bsize_max) 3434 * or first bsize_max bytes to guarantee most consumers 3435 * that all necessary data has been copied). 3436 * Anyway, copy not less than sizeof(ip_fw3_opheader). 3437 */ 3438 if ((error = sooptcopyin(sopt, sdata.kbuf, sdata.ksize, 3439 sizeof(ip_fw3_opheader))) != 0) 3440 return (error); 3441 op3 = (ip_fw3_opheader *)sdata.kbuf; 3442 3443 /* Finally, run handler */ 3444 error = h.handler(chain, op3, &sdata); 3445 find_unref_sh(&h); 3446 3447 /* Flush state and free buffers */ 3448 if (error == 0) 3449 error = ipfw_flush_sopt_data(&sdata); 3450 else 3451 ipfw_flush_sopt_data(&sdata); 3452 3453 if (locked != 0) 3454 vsunlock(sdata.sopt_val, valsize); 3455 3456 /* Restore original pointer and set number of bytes written */ 3457 sopt->sopt_val = sdata.sopt_val; 3458 sopt->sopt_valsize = sdata.ktotal; 3459 if (sdata.kbuf != xbuf) 3460 free(sdata.kbuf, M_TEMP); 3461 3462 return (error); 3463 } 3464 3465 /** 3466 * {set|get}sockopt parser. 3467 */ 3468 int 3469 ipfw_ctl(struct sockopt *sopt) 3470 { 3471 #define RULE_MAXSIZE (512*sizeof(u_int32_t)) 3472 int error; 3473 size_t size, valsize; 3474 struct ip_fw *buf; 3475 struct ip_fw_rule0 *rule; 3476 struct ip_fw_chain *chain; 3477 u_int32_t rulenum[2]; 3478 uint32_t opt; 3479 struct rule_check_info ci; 3480 IPFW_RLOCK_TRACKER; 3481 3482 chain = &V_layer3_chain; 3483 error = 0; 3484 3485 /* Save original valsize before it is altered via sooptcopyin() */ 3486 valsize = sopt->sopt_valsize; 3487 opt = sopt->sopt_name; 3488 3489 /* 3490 * Disallow modifications in really-really secure mode, but still allow 3491 * the logging counters to be reset. 3492 */ 3493 if (opt == IP_FW_ADD || 3494 (sopt->sopt_dir == SOPT_SET && opt != IP_FW_RESETLOG)) { 3495 error = securelevel_ge(sopt->sopt_td->td_ucred, 3); 3496 if (error != 0) 3497 return (error); 3498 } 3499 3500 switch (opt) { 3501 case IP_FW_GET: 3502 /* 3503 * pass up a copy of the current rules. Static rules 3504 * come first (the last of which has number IPFW_DEFAULT_RULE), 3505 * followed by a possibly empty list of dynamic rule. 3506 * The last dynamic rule has NULL in the "next" field. 3507 * 3508 * Note that the calculated size is used to bound the 3509 * amount of data returned to the user. The rule set may 3510 * change between calculating the size and returning the 3511 * data in which case we'll just return what fits. 3512 */ 3513 for (;;) { 3514 int len = 0, want; 3515 3516 size = chain->static_len; 3517 size += ipfw_dyn_len(); 3518 if (size >= sopt->sopt_valsize) 3519 break; 3520 buf = malloc(size, M_TEMP, M_WAITOK | M_ZERO); 3521 IPFW_UH_RLOCK(chain); 3522 /* check again how much space we need */ 3523 want = chain->static_len + ipfw_dyn_len(); 3524 if (size >= want) 3525 len = ipfw_getrules(chain, buf, size); 3526 IPFW_UH_RUNLOCK(chain); 3527 if (size >= want) 3528 error = sooptcopyout(sopt, buf, len); 3529 free(buf, M_TEMP); 3530 if (size >= want) 3531 break; 3532 } 3533 break; 3534 3535 case IP_FW_FLUSH: 3536 /* locking is done within del_entry() */ 3537 error = del_entry(chain, 0); /* special case, rule=0, cmd=0 means all */ 3538 break; 3539 3540 case IP_FW_ADD: 3541 rule = malloc(RULE_MAXSIZE, M_TEMP, M_WAITOK); 3542 error = sooptcopyin(sopt, rule, RULE_MAXSIZE, 3543 sizeof(struct ip_fw7) ); 3544 3545 memset(&ci, 0, sizeof(struct rule_check_info)); 3546 3547 /* 3548 * If the size of commands equals RULESIZE7 then we assume 3549 * a FreeBSD7.2 binary is talking to us (set is7=1). 3550 * is7 is persistent so the next 'ipfw list' command 3551 * will use this format. 3552 * NOTE: If wrong version is guessed (this can happen if 3553 * the first ipfw command is 'ipfw [pipe] list') 3554 * the ipfw binary may crash or loop infinitly... 3555 */ 3556 size = sopt->sopt_valsize; 3557 if (size == RULESIZE7(rule)) { 3558 is7 = 1; 3559 error = convert_rule_to_8(rule); 3560 if (error) { 3561 free(rule, M_TEMP); 3562 return error; 3563 } 3564 size = RULESIZE(rule); 3565 } else 3566 is7 = 0; 3567 if (error == 0) 3568 error = check_ipfw_rule0(rule, size, &ci); 3569 if (error == 0) { 3570 /* locking is done within add_rule() */ 3571 struct ip_fw *krule; 3572 krule = ipfw_alloc_rule(chain, RULEKSIZE0(rule)); 3573 ci.urule = (caddr_t)rule; 3574 ci.krule = krule; 3575 import_rule0(&ci); 3576 error = commit_rules(chain, &ci, 1); 3577 if (error != 0) 3578 free_rule(ci.krule); 3579 else if (sopt->sopt_dir == SOPT_GET) { 3580 if (is7) { 3581 error = convert_rule_to_7(rule); 3582 size = RULESIZE7(rule); 3583 if (error) { 3584 free(rule, M_TEMP); 3585 return error; 3586 } 3587 } 3588 error = sooptcopyout(sopt, rule, size); 3589 } 3590 } 3591 free(rule, M_TEMP); 3592 break; 3593 3594 case IP_FW_DEL: 3595 /* 3596 * IP_FW_DEL is used for deleting single rules or sets, 3597 * and (ab)used to atomically manipulate sets. Argument size 3598 * is used to distinguish between the two: 3599 * sizeof(u_int32_t) 3600 * delete single rule or set of rules, 3601 * or reassign rules (or sets) to a different set. 3602 * 2*sizeof(u_int32_t) 3603 * atomic disable/enable sets. 3604 * first u_int32_t contains sets to be disabled, 3605 * second u_int32_t contains sets to be enabled. 3606 */ 3607 error = sooptcopyin(sopt, rulenum, 3608 2*sizeof(u_int32_t), sizeof(u_int32_t)); 3609 if (error) 3610 break; 3611 size = sopt->sopt_valsize; 3612 if (size == sizeof(u_int32_t) && rulenum[0] != 0) { 3613 /* delete or reassign, locking done in del_entry() */ 3614 error = del_entry(chain, rulenum[0]); 3615 } else if (size == 2*sizeof(u_int32_t)) { /* set enable/disable */ 3616 IPFW_UH_WLOCK(chain); 3617 V_set_disable = 3618 (V_set_disable | rulenum[0]) & ~rulenum[1] & 3619 ~(1<<RESVD_SET); /* set RESVD_SET always enabled */ 3620 IPFW_UH_WUNLOCK(chain); 3621 } else 3622 error = EINVAL; 3623 break; 3624 3625 case IP_FW_ZERO: 3626 case IP_FW_RESETLOG: /* argument is an u_int_32, the rule number */ 3627 rulenum[0] = 0; 3628 if (sopt->sopt_val != 0) { 3629 error = sooptcopyin(sopt, rulenum, 3630 sizeof(u_int32_t), sizeof(u_int32_t)); 3631 if (error) 3632 break; 3633 } 3634 error = zero_entry(chain, rulenum[0], 3635 sopt->sopt_name == IP_FW_RESETLOG); 3636 break; 3637 3638 /*--- TABLE opcodes ---*/ 3639 case IP_FW_TABLE_ADD: 3640 case IP_FW_TABLE_DEL: 3641 { 3642 ipfw_table_entry ent; 3643 struct tentry_info tei; 3644 struct tid_info ti; 3645 struct table_value v; 3646 3647 error = sooptcopyin(sopt, &ent, 3648 sizeof(ent), sizeof(ent)); 3649 if (error) 3650 break; 3651 3652 memset(&tei, 0, sizeof(tei)); 3653 tei.paddr = &ent.addr; 3654 tei.subtype = AF_INET; 3655 tei.masklen = ent.masklen; 3656 ipfw_import_table_value_legacy(ent.value, &v); 3657 tei.pvalue = &v; 3658 memset(&ti, 0, sizeof(ti)); 3659 ti.uidx = ent.tbl; 3660 ti.type = IPFW_TABLE_CIDR; 3661 3662 error = (opt == IP_FW_TABLE_ADD) ? 3663 add_table_entry(chain, &ti, &tei, 0, 1) : 3664 del_table_entry(chain, &ti, &tei, 0, 1); 3665 } 3666 break; 3667 3668 3669 case IP_FW_TABLE_FLUSH: 3670 { 3671 u_int16_t tbl; 3672 struct tid_info ti; 3673 3674 error = sooptcopyin(sopt, &tbl, 3675 sizeof(tbl), sizeof(tbl)); 3676 if (error) 3677 break; 3678 memset(&ti, 0, sizeof(ti)); 3679 ti.uidx = tbl; 3680 error = flush_table(chain, &ti); 3681 } 3682 break; 3683 3684 case IP_FW_TABLE_GETSIZE: 3685 { 3686 u_int32_t tbl, cnt; 3687 struct tid_info ti; 3688 3689 if ((error = sooptcopyin(sopt, &tbl, sizeof(tbl), 3690 sizeof(tbl)))) 3691 break; 3692 memset(&ti, 0, sizeof(ti)); 3693 ti.uidx = tbl; 3694 IPFW_RLOCK(chain); 3695 error = ipfw_count_table(chain, &ti, &cnt); 3696 IPFW_RUNLOCK(chain); 3697 if (error) 3698 break; 3699 error = sooptcopyout(sopt, &cnt, sizeof(cnt)); 3700 } 3701 break; 3702 3703 case IP_FW_TABLE_LIST: 3704 { 3705 ipfw_table *tbl; 3706 struct tid_info ti; 3707 3708 if (sopt->sopt_valsize < sizeof(*tbl)) { 3709 error = EINVAL; 3710 break; 3711 } 3712 size = sopt->sopt_valsize; 3713 tbl = malloc(size, M_TEMP, M_WAITOK); 3714 error = sooptcopyin(sopt, tbl, size, sizeof(*tbl)); 3715 if (error) { 3716 free(tbl, M_TEMP); 3717 break; 3718 } 3719 tbl->size = (size - sizeof(*tbl)) / 3720 sizeof(ipfw_table_entry); 3721 memset(&ti, 0, sizeof(ti)); 3722 ti.uidx = tbl->tbl; 3723 IPFW_RLOCK(chain); 3724 error = ipfw_dump_table_legacy(chain, &ti, tbl); 3725 IPFW_RUNLOCK(chain); 3726 if (error) { 3727 free(tbl, M_TEMP); 3728 break; 3729 } 3730 error = sooptcopyout(sopt, tbl, size); 3731 free(tbl, M_TEMP); 3732 } 3733 break; 3734 3735 /*--- NAT operations are protected by the IPFW_LOCK ---*/ 3736 case IP_FW_NAT_CFG: 3737 if (IPFW_NAT_LOADED) 3738 error = ipfw_nat_cfg_ptr(sopt); 3739 else { 3740 printf("IP_FW_NAT_CFG: %s\n", 3741 "ipfw_nat not present, please load it"); 3742 error = EINVAL; 3743 } 3744 break; 3745 3746 case IP_FW_NAT_DEL: 3747 if (IPFW_NAT_LOADED) 3748 error = ipfw_nat_del_ptr(sopt); 3749 else { 3750 printf("IP_FW_NAT_DEL: %s\n", 3751 "ipfw_nat not present, please load it"); 3752 error = EINVAL; 3753 } 3754 break; 3755 3756 case IP_FW_NAT_GET_CONFIG: 3757 if (IPFW_NAT_LOADED) 3758 error = ipfw_nat_get_cfg_ptr(sopt); 3759 else { 3760 printf("IP_FW_NAT_GET_CFG: %s\n", 3761 "ipfw_nat not present, please load it"); 3762 error = EINVAL; 3763 } 3764 break; 3765 3766 case IP_FW_NAT_GET_LOG: 3767 if (IPFW_NAT_LOADED) 3768 error = ipfw_nat_get_log_ptr(sopt); 3769 else { 3770 printf("IP_FW_NAT_GET_LOG: %s\n", 3771 "ipfw_nat not present, please load it"); 3772 error = EINVAL; 3773 } 3774 break; 3775 3776 default: 3777 printf("ipfw: ipfw_ctl invalid option %d\n", sopt->sopt_name); 3778 error = EINVAL; 3779 } 3780 3781 return (error); 3782 #undef RULE_MAXSIZE 3783 } 3784 #define RULE_MAXSIZE (256*sizeof(u_int32_t)) 3785 3786 /* Functions to convert rules 7.2 <==> 8.0 */ 3787 static int 3788 convert_rule_to_7(struct ip_fw_rule0 *rule) 3789 { 3790 /* Used to modify original rule */ 3791 struct ip_fw7 *rule7 = (struct ip_fw7 *)rule; 3792 /* copy of original rule, version 8 */ 3793 struct ip_fw_rule0 *tmp; 3794 3795 /* Used to copy commands */ 3796 ipfw_insn *ccmd, *dst; 3797 int ll = 0, ccmdlen = 0; 3798 3799 tmp = malloc(RULE_MAXSIZE, M_TEMP, M_NOWAIT | M_ZERO); 3800 if (tmp == NULL) { 3801 return 1; //XXX error 3802 } 3803 bcopy(rule, tmp, RULE_MAXSIZE); 3804 3805 /* Copy fields */ 3806 //rule7->_pad = tmp->_pad; 3807 rule7->set = tmp->set; 3808 rule7->rulenum = tmp->rulenum; 3809 rule7->cmd_len = tmp->cmd_len; 3810 rule7->act_ofs = tmp->act_ofs; 3811 rule7->next_rule = (struct ip_fw7 *)tmp->next_rule; 3812 rule7->cmd_len = tmp->cmd_len; 3813 rule7->pcnt = tmp->pcnt; 3814 rule7->bcnt = tmp->bcnt; 3815 rule7->timestamp = tmp->timestamp; 3816 3817 /* Copy commands */ 3818 for (ll = tmp->cmd_len, ccmd = tmp->cmd, dst = rule7->cmd ; 3819 ll > 0 ; ll -= ccmdlen, ccmd += ccmdlen, dst += ccmdlen) { 3820 ccmdlen = F_LEN(ccmd); 3821 3822 bcopy(ccmd, dst, F_LEN(ccmd)*sizeof(uint32_t)); 3823 3824 if (dst->opcode > O_NAT) 3825 /* O_REASS doesn't exists in 7.2 version, so 3826 * decrement opcode if it is after O_REASS 3827 */ 3828 dst->opcode--; 3829 3830 if (ccmdlen > ll) { 3831 printf("ipfw: opcode %d size truncated\n", 3832 ccmd->opcode); 3833 return EINVAL; 3834 } 3835 } 3836 free(tmp, M_TEMP); 3837 3838 return 0; 3839 } 3840 3841 static int 3842 convert_rule_to_8(struct ip_fw_rule0 *rule) 3843 { 3844 /* Used to modify original rule */ 3845 struct ip_fw7 *rule7 = (struct ip_fw7 *) rule; 3846 3847 /* Used to copy commands */ 3848 ipfw_insn *ccmd, *dst; 3849 int ll = 0, ccmdlen = 0; 3850 3851 /* Copy of original rule */ 3852 struct ip_fw7 *tmp = malloc(RULE_MAXSIZE, M_TEMP, M_NOWAIT | M_ZERO); 3853 if (tmp == NULL) { 3854 return 1; //XXX error 3855 } 3856 3857 bcopy(rule7, tmp, RULE_MAXSIZE); 3858 3859 for (ll = tmp->cmd_len, ccmd = tmp->cmd, dst = rule->cmd ; 3860 ll > 0 ; ll -= ccmdlen, ccmd += ccmdlen, dst += ccmdlen) { 3861 ccmdlen = F_LEN(ccmd); 3862 3863 bcopy(ccmd, dst, F_LEN(ccmd)*sizeof(uint32_t)); 3864 3865 if (dst->opcode > O_NAT) 3866 /* O_REASS doesn't exists in 7.2 version, so 3867 * increment opcode if it is after O_REASS 3868 */ 3869 dst->opcode++; 3870 3871 if (ccmdlen > ll) { 3872 printf("ipfw: opcode %d size truncated\n", 3873 ccmd->opcode); 3874 return EINVAL; 3875 } 3876 } 3877 3878 rule->_pad = tmp->_pad; 3879 rule->set = tmp->set; 3880 rule->rulenum = tmp->rulenum; 3881 rule->cmd_len = tmp->cmd_len; 3882 rule->act_ofs = tmp->act_ofs; 3883 rule->next_rule = (struct ip_fw *)tmp->next_rule; 3884 rule->cmd_len = tmp->cmd_len; 3885 rule->id = 0; /* XXX see if is ok = 0 */ 3886 rule->pcnt = tmp->pcnt; 3887 rule->bcnt = tmp->bcnt; 3888 rule->timestamp = tmp->timestamp; 3889 3890 free (tmp, M_TEMP); 3891 return 0; 3892 } 3893 3894 /* 3895 * Named object api 3896 * 3897 */ 3898 3899 void 3900 ipfw_init_srv(struct ip_fw_chain *ch) 3901 { 3902 3903 ch->srvmap = ipfw_objhash_create(IPFW_OBJECTS_DEFAULT); 3904 ch->srvstate = malloc(sizeof(void *) * IPFW_OBJECTS_DEFAULT, 3905 M_IPFW, M_WAITOK | M_ZERO); 3906 } 3907 3908 void 3909 ipfw_destroy_srv(struct ip_fw_chain *ch) 3910 { 3911 3912 free(ch->srvstate, M_IPFW); 3913 ipfw_objhash_destroy(ch->srvmap); 3914 } 3915 3916 /* 3917 * Allocate new bitmask which can be used to enlarge/shrink 3918 * named instance index. 3919 */ 3920 void 3921 ipfw_objhash_bitmap_alloc(uint32_t items, void **idx, int *pblocks) 3922 { 3923 size_t size; 3924 int max_blocks; 3925 u_long *idx_mask; 3926 3927 KASSERT((items % BLOCK_ITEMS) == 0, 3928 ("bitmask size needs to power of 2 and greater or equal to %zu", 3929 BLOCK_ITEMS)); 3930 3931 max_blocks = items / BLOCK_ITEMS; 3932 size = items / 8; 3933 idx_mask = malloc(size * IPFW_MAX_SETS, M_IPFW, M_WAITOK); 3934 /* Mark all as free */ 3935 memset(idx_mask, 0xFF, size * IPFW_MAX_SETS); 3936 *idx_mask &= ~(u_long)1; /* Skip index 0 */ 3937 3938 *idx = idx_mask; 3939 *pblocks = max_blocks; 3940 } 3941 3942 /* 3943 * Copy current bitmask index to new one. 3944 */ 3945 void 3946 ipfw_objhash_bitmap_merge(struct namedobj_instance *ni, void **idx, int *blocks) 3947 { 3948 int old_blocks, new_blocks; 3949 u_long *old_idx, *new_idx; 3950 int i; 3951 3952 old_idx = ni->idx_mask; 3953 old_blocks = ni->max_blocks; 3954 new_idx = *idx; 3955 new_blocks = *blocks; 3956 3957 for (i = 0; i < IPFW_MAX_SETS; i++) { 3958 memcpy(&new_idx[new_blocks * i], &old_idx[old_blocks * i], 3959 old_blocks * sizeof(u_long)); 3960 } 3961 } 3962 3963 /* 3964 * Swaps current @ni index with new one. 3965 */ 3966 void 3967 ipfw_objhash_bitmap_swap(struct namedobj_instance *ni, void **idx, int *blocks) 3968 { 3969 int old_blocks; 3970 u_long *old_idx; 3971 3972 old_idx = ni->idx_mask; 3973 old_blocks = ni->max_blocks; 3974 3975 ni->idx_mask = *idx; 3976 ni->max_blocks = *blocks; 3977 3978 /* Save old values */ 3979 *idx = old_idx; 3980 *blocks = old_blocks; 3981 } 3982 3983 void 3984 ipfw_objhash_bitmap_free(void *idx, int blocks) 3985 { 3986 3987 free(idx, M_IPFW); 3988 } 3989 3990 /* 3991 * Creates named hash instance. 3992 * Must be called without holding any locks. 3993 * Return pointer to new instance. 3994 */ 3995 struct namedobj_instance * 3996 ipfw_objhash_create(uint32_t items) 3997 { 3998 struct namedobj_instance *ni; 3999 int i; 4000 size_t size; 4001 4002 size = sizeof(struct namedobj_instance) + 4003 sizeof(struct namedobjects_head) * NAMEDOBJ_HASH_SIZE + 4004 sizeof(struct namedobjects_head) * NAMEDOBJ_HASH_SIZE; 4005 4006 ni = malloc(size, M_IPFW, M_WAITOK | M_ZERO); 4007 ni->nn_size = NAMEDOBJ_HASH_SIZE; 4008 ni->nv_size = NAMEDOBJ_HASH_SIZE; 4009 4010 ni->names = (struct namedobjects_head *)(ni +1); 4011 ni->values = &ni->names[ni->nn_size]; 4012 4013 for (i = 0; i < ni->nn_size; i++) 4014 TAILQ_INIT(&ni->names[i]); 4015 4016 for (i = 0; i < ni->nv_size; i++) 4017 TAILQ_INIT(&ni->values[i]); 4018 4019 /* Set default hashing/comparison functions */ 4020 ni->hash_f = objhash_hash_name; 4021 ni->cmp_f = objhash_cmp_name; 4022 4023 /* Allocate bitmask separately due to possible resize */ 4024 ipfw_objhash_bitmap_alloc(items, (void*)&ni->idx_mask, &ni->max_blocks); 4025 4026 return (ni); 4027 } 4028 4029 void 4030 ipfw_objhash_destroy(struct namedobj_instance *ni) 4031 { 4032 4033 free(ni->idx_mask, M_IPFW); 4034 free(ni, M_IPFW); 4035 } 4036 4037 void 4038 ipfw_objhash_set_funcs(struct namedobj_instance *ni, objhash_hash_f *hash_f, 4039 objhash_cmp_f *cmp_f) 4040 { 4041 4042 ni->hash_f = hash_f; 4043 ni->cmp_f = cmp_f; 4044 } 4045 4046 static uint32_t 4047 objhash_hash_name(struct namedobj_instance *ni, const void *name, uint32_t set) 4048 { 4049 4050 return (fnv_32_str((const char *)name, FNV1_32_INIT)); 4051 } 4052 4053 static int 4054 objhash_cmp_name(struct named_object *no, const void *name, uint32_t set) 4055 { 4056 4057 if ((strcmp(no->name, (const char *)name) == 0) && (no->set == set)) 4058 return (0); 4059 4060 return (1); 4061 } 4062 4063 static uint32_t 4064 objhash_hash_idx(struct namedobj_instance *ni, uint32_t val) 4065 { 4066 uint32_t v; 4067 4068 v = val % (ni->nv_size - 1); 4069 4070 return (v); 4071 } 4072 4073 struct named_object * 4074 ipfw_objhash_lookup_name(struct namedobj_instance *ni, uint32_t set, char *name) 4075 { 4076 struct named_object *no; 4077 uint32_t hash; 4078 4079 hash = ni->hash_f(ni, name, set) % ni->nn_size; 4080 4081 TAILQ_FOREACH(no, &ni->names[hash], nn_next) { 4082 if (ni->cmp_f(no, name, set) == 0) 4083 return (no); 4084 } 4085 4086 return (NULL); 4087 } 4088 4089 /* 4090 * Find named object by @uid. 4091 * Check @tlvs for valid data inside. 4092 * 4093 * Returns pointer to found TLV or NULL. 4094 */ 4095 ipfw_obj_ntlv * 4096 ipfw_find_name_tlv_type(void *tlvs, int len, uint16_t uidx, uint32_t etlv) 4097 { 4098 ipfw_obj_ntlv *ntlv; 4099 uintptr_t pa, pe; 4100 int l; 4101 4102 pa = (uintptr_t)tlvs; 4103 pe = pa + len; 4104 l = 0; 4105 for (; pa < pe; pa += l) { 4106 ntlv = (ipfw_obj_ntlv *)pa; 4107 l = ntlv->head.length; 4108 4109 if (l != sizeof(*ntlv)) 4110 return (NULL); 4111 4112 if (ntlv->idx != uidx) 4113 continue; 4114 /* 4115 * When userland has specified zero TLV type, do 4116 * not compare it with eltv. In some cases userland 4117 * doesn't know what type should it have. Use only 4118 * uidx and name for search named_object. 4119 */ 4120 if (ntlv->head.type != 0 && 4121 ntlv->head.type != (uint16_t)etlv) 4122 continue; 4123 4124 if (ipfw_check_object_name_generic(ntlv->name) != 0) 4125 return (NULL); 4126 4127 return (ntlv); 4128 } 4129 4130 return (NULL); 4131 } 4132 4133 /* 4134 * Finds object config based on either legacy index 4135 * or name in ntlv. 4136 * Note @ti structure contains unchecked data from userland. 4137 * 4138 * Returns 0 in success and fills in @pno with found config 4139 */ 4140 int 4141 ipfw_objhash_find_type(struct namedobj_instance *ni, struct tid_info *ti, 4142 uint32_t etlv, struct named_object **pno) 4143 { 4144 char *name; 4145 ipfw_obj_ntlv *ntlv; 4146 uint32_t set; 4147 4148 if (ti->tlvs == NULL) 4149 return (EINVAL); 4150 4151 ntlv = ipfw_find_name_tlv_type(ti->tlvs, ti->tlen, ti->uidx, etlv); 4152 if (ntlv == NULL) 4153 return (EINVAL); 4154 name = ntlv->name; 4155 4156 /* 4157 * Use set provided by @ti instead of @ntlv one. 4158 * This is needed due to different sets behavior 4159 * controlled by V_fw_tables_sets. 4160 */ 4161 set = ti->set; 4162 *pno = ipfw_objhash_lookup_name(ni, set, name); 4163 if (*pno == NULL) 4164 return (ESRCH); 4165 return (0); 4166 } 4167 4168 /* 4169 * Find named object by name, considering also its TLV type. 4170 */ 4171 struct named_object * 4172 ipfw_objhash_lookup_name_type(struct namedobj_instance *ni, uint32_t set, 4173 uint32_t type, const char *name) 4174 { 4175 struct named_object *no; 4176 uint32_t hash; 4177 4178 hash = ni->hash_f(ni, name, set) % ni->nn_size; 4179 4180 TAILQ_FOREACH(no, &ni->names[hash], nn_next) { 4181 if (ni->cmp_f(no, name, set) == 0 && 4182 no->etlv == (uint16_t)type) 4183 return (no); 4184 } 4185 4186 return (NULL); 4187 } 4188 4189 struct named_object * 4190 ipfw_objhash_lookup_kidx(struct namedobj_instance *ni, uint16_t kidx) 4191 { 4192 struct named_object *no; 4193 uint32_t hash; 4194 4195 hash = objhash_hash_idx(ni, kidx); 4196 4197 TAILQ_FOREACH(no, &ni->values[hash], nv_next) { 4198 if (no->kidx == kidx) 4199 return (no); 4200 } 4201 4202 return (NULL); 4203 } 4204 4205 int 4206 ipfw_objhash_same_name(struct namedobj_instance *ni, struct named_object *a, 4207 struct named_object *b) 4208 { 4209 4210 if ((strcmp(a->name, b->name) == 0) && a->set == b->set) 4211 return (1); 4212 4213 return (0); 4214 } 4215 4216 void 4217 ipfw_objhash_add(struct namedobj_instance *ni, struct named_object *no) 4218 { 4219 uint32_t hash; 4220 4221 hash = ni->hash_f(ni, no->name, no->set) % ni->nn_size; 4222 TAILQ_INSERT_HEAD(&ni->names[hash], no, nn_next); 4223 4224 hash = objhash_hash_idx(ni, no->kidx); 4225 TAILQ_INSERT_HEAD(&ni->values[hash], no, nv_next); 4226 4227 ni->count++; 4228 } 4229 4230 void 4231 ipfw_objhash_del(struct namedobj_instance *ni, struct named_object *no) 4232 { 4233 uint32_t hash; 4234 4235 hash = ni->hash_f(ni, no->name, no->set) % ni->nn_size; 4236 TAILQ_REMOVE(&ni->names[hash], no, nn_next); 4237 4238 hash = objhash_hash_idx(ni, no->kidx); 4239 TAILQ_REMOVE(&ni->values[hash], no, nv_next); 4240 4241 ni->count--; 4242 } 4243 4244 uint32_t 4245 ipfw_objhash_count(struct namedobj_instance *ni) 4246 { 4247 4248 return (ni->count); 4249 } 4250 4251 /* 4252 * Runs @func for each found named object. 4253 * It is safe to delete objects from callback 4254 */ 4255 int 4256 ipfw_objhash_foreach(struct namedobj_instance *ni, objhash_cb_t *f, void *arg) 4257 { 4258 struct named_object *no, *no_tmp; 4259 int i, ret; 4260 4261 for (i = 0; i < ni->nn_size; i++) { 4262 TAILQ_FOREACH_SAFE(no, &ni->names[i], nn_next, no_tmp) { 4263 ret = f(ni, no, arg); 4264 if (ret != 0) 4265 return (ret); 4266 } 4267 } 4268 return (0); 4269 } 4270 4271 /* 4272 * Removes index from given set. 4273 * Returns 0 on success. 4274 */ 4275 int 4276 ipfw_objhash_free_idx(struct namedobj_instance *ni, uint16_t idx) 4277 { 4278 u_long *mask; 4279 int i, v; 4280 4281 i = idx / BLOCK_ITEMS; 4282 v = idx % BLOCK_ITEMS; 4283 4284 if (i >= ni->max_blocks) 4285 return (1); 4286 4287 mask = &ni->idx_mask[i]; 4288 4289 if ((*mask & ((u_long)1 << v)) != 0) 4290 return (1); 4291 4292 /* Mark as free */ 4293 *mask |= (u_long)1 << v; 4294 4295 /* Update free offset */ 4296 if (ni->free_off[0] > i) 4297 ni->free_off[0] = i; 4298 4299 return (0); 4300 } 4301 4302 /* 4303 * Allocate new index in given instance and stores in in @pidx. 4304 * Returns 0 on success. 4305 */ 4306 int 4307 ipfw_objhash_alloc_idx(void *n, uint16_t *pidx) 4308 { 4309 struct namedobj_instance *ni; 4310 u_long *mask; 4311 int i, off, v; 4312 4313 ni = (struct namedobj_instance *)n; 4314 4315 off = ni->free_off[0]; 4316 mask = &ni->idx_mask[off]; 4317 4318 for (i = off; i < ni->max_blocks; i++, mask++) { 4319 if ((v = ffsl(*mask)) == 0) 4320 continue; 4321 4322 /* Mark as busy */ 4323 *mask &= ~ ((u_long)1 << (v - 1)); 4324 4325 ni->free_off[0] = i; 4326 4327 v = BLOCK_ITEMS * i + v - 1; 4328 4329 *pidx = v; 4330 return (0); 4331 } 4332 4333 return (1); 4334 } 4335 4336 /* end of file */ 4337