1 /*- 2 * Copyright (c) 2002-2009 Luigi Rizzo, Universita` di Pisa 3 * Copyright (c) 2014 Yandex LLC 4 * Copyright (c) 2014 Alexander V. Chernikov 5 * 6 * Supported by: Valeria Paoli 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 /* 34 * Control socket and rule management routines for ipfw. 35 * Control is currently implemented via IP_FW3 setsockopt() code. 36 */ 37 38 #include "opt_ipfw.h" 39 #include "opt_inet.h" 40 #ifndef INET 41 #error IPFIREWALL requires INET. 42 #endif /* INET */ 43 #include "opt_inet6.h" 44 45 #include <sys/param.h> 46 #include <sys/systm.h> 47 #include <sys/malloc.h> 48 #include <sys/mbuf.h> /* struct m_tag used by nested headers */ 49 #include <sys/kernel.h> 50 #include <sys/lock.h> 51 #include <sys/priv.h> 52 #include <sys/proc.h> 53 #include <sys/rwlock.h> 54 #include <sys/rmlock.h> 55 #include <sys/socket.h> 56 #include <sys/socketvar.h> 57 #include <sys/sysctl.h> 58 #include <sys/syslog.h> 59 #include <sys/fnv_hash.h> 60 #include <net/if.h> 61 #include <net/route.h> 62 #include <net/vnet.h> 63 #include <vm/vm.h> 64 #include <vm/vm_extern.h> 65 66 #include <netinet/in.h> 67 #include <netinet/ip_var.h> /* hooks */ 68 #include <netinet/ip_fw.h> 69 70 #include <netpfil/ipfw/ip_fw_private.h> 71 #include <netpfil/ipfw/ip_fw_table.h> 72 73 #ifdef MAC 74 #include <security/mac/mac_framework.h> 75 #endif 76 77 static int ipfw_ctl(struct sockopt *sopt); 78 static int check_ipfw_rule_body(ipfw_insn *cmd, int cmd_len, 79 struct rule_check_info *ci); 80 static int check_ipfw_rule1(struct ip_fw_rule *rule, int size, 81 struct rule_check_info *ci); 82 static int check_ipfw_rule0(struct ip_fw_rule0 *rule, int size, 83 struct rule_check_info *ci); 84 85 #define NAMEDOBJ_HASH_SIZE 32 86 87 struct namedobj_instance { 88 struct namedobjects_head *names; 89 struct namedobjects_head *values; 90 uint32_t nn_size; /* names hash size */ 91 uint32_t nv_size; /* number hash size */ 92 u_long *idx_mask; /* used items bitmask */ 93 uint32_t max_blocks; /* number of "long" blocks in bitmask */ 94 uint32_t count; /* number of items */ 95 uint16_t free_off[IPFW_MAX_SETS]; /* first possible free offset */ 96 objhash_hash_f *hash_f; 97 objhash_cmp_f *cmp_f; 98 }; 99 #define BLOCK_ITEMS (8 * sizeof(u_long)) /* Number of items for ffsl() */ 100 101 static uint32_t objhash_hash_name(struct namedobj_instance *ni, void *key, 102 uint32_t kopt); 103 static uint32_t objhash_hash_idx(struct namedobj_instance *ni, uint32_t val); 104 static int objhash_cmp_name(struct named_object *no, void *name, uint32_t set); 105 106 MALLOC_DEFINE(M_IPFW, "IpFw/IpAcct", "IpFw/IpAcct chain's"); 107 108 static int dump_config(struct ip_fw_chain *chain, ip_fw3_opheader *op3, 109 struct sockopt_data *sd); 110 static int add_rules(struct ip_fw_chain *chain, ip_fw3_opheader *op3, 111 struct sockopt_data *sd); 112 static int del_rules(struct ip_fw_chain *chain, ip_fw3_opheader *op3, 113 struct sockopt_data *sd); 114 static int clear_rules(struct ip_fw_chain *chain, ip_fw3_opheader *op3, 115 struct sockopt_data *sd); 116 static int move_rules(struct ip_fw_chain *chain, ip_fw3_opheader *op3, 117 struct sockopt_data *sd); 118 static int manage_sets(struct ip_fw_chain *chain, ip_fw3_opheader *op3, 119 struct sockopt_data *sd); 120 static int dump_soptcodes(struct ip_fw_chain *chain, ip_fw3_opheader *op3, 121 struct sockopt_data *sd); 122 123 /* ctl3 handler data */ 124 struct mtx ctl3_lock; 125 #define CTL3_LOCK_INIT() mtx_init(&ctl3_lock, "ctl3_lock", NULL, MTX_DEF) 126 #define CTL3_LOCK_DESTROY() mtx_destroy(&ctl3_lock) 127 #define CTL3_LOCK() mtx_lock(&ctl3_lock) 128 #define CTL3_UNLOCK() mtx_unlock(&ctl3_lock) 129 130 static struct ipfw_sopt_handler *ctl3_handlers; 131 static size_t ctl3_hsize; 132 static uint64_t ctl3_refct, ctl3_gencnt; 133 #define CTL3_SMALLBUF 4096 /* small page-size write buffer */ 134 #define CTL3_LARGEBUF 16 * 1024 * 1024 /* handle large rulesets */ 135 136 static int ipfw_flush_sopt_data(struct sockopt_data *sd); 137 138 static struct ipfw_sopt_handler scodes[] = { 139 { IP_FW_XGET, 0, HDIR_GET, dump_config }, 140 { IP_FW_XADD, 0, HDIR_BOTH, add_rules }, 141 { IP_FW_XDEL, 0, HDIR_BOTH, del_rules }, 142 { IP_FW_XZERO, 0, HDIR_SET, clear_rules }, 143 { IP_FW_XRESETLOG, 0, HDIR_SET, clear_rules }, 144 { IP_FW_XMOVE, 0, HDIR_SET, move_rules }, 145 { IP_FW_SET_SWAP, 0, HDIR_SET, manage_sets }, 146 { IP_FW_SET_MOVE, 0, HDIR_SET, manage_sets }, 147 { IP_FW_SET_ENABLE, 0, HDIR_SET, manage_sets }, 148 { IP_FW_DUMP_SOPTCODES, 0, HDIR_GET, dump_soptcodes }, 149 }; 150 151 /* 152 * static variables followed by global ones 153 */ 154 155 #ifndef USERSPACE 156 157 static VNET_DEFINE(uma_zone_t, ipfw_cntr_zone); 158 #define V_ipfw_cntr_zone VNET(ipfw_cntr_zone) 159 160 void 161 ipfw_init_counters() 162 { 163 164 V_ipfw_cntr_zone = uma_zcreate("IPFW counters", 165 sizeof(ip_fw_cntr), NULL, NULL, NULL, NULL, 166 UMA_ALIGN_PTR, UMA_ZONE_PCPU); 167 } 168 169 void 170 ipfw_destroy_counters() 171 { 172 173 uma_zdestroy(V_ipfw_cntr_zone); 174 } 175 176 struct ip_fw * 177 ipfw_alloc_rule(struct ip_fw_chain *chain, size_t rulesize) 178 { 179 struct ip_fw *rule; 180 181 rule = malloc(rulesize, M_IPFW, M_WAITOK | M_ZERO); 182 rule->cntr = uma_zalloc(V_ipfw_cntr_zone, M_WAITOK | M_ZERO); 183 184 return (rule); 185 } 186 187 static void 188 free_rule(struct ip_fw *rule) 189 { 190 191 uma_zfree(V_ipfw_cntr_zone, rule->cntr); 192 free(rule, M_IPFW); 193 } 194 #else 195 void 196 ipfw_init_counters() 197 { 198 } 199 200 void 201 ipfw_destroy_counters() 202 { 203 } 204 205 struct ip_fw * 206 ipfw_alloc_rule(struct ip_fw_chain *chain, size_t rulesize) 207 { 208 struct ip_fw *rule; 209 210 rule = malloc(rulesize, M_IPFW, M_WAITOK | M_ZERO); 211 212 return (rule); 213 } 214 215 static void 216 free_rule(struct ip_fw *rule) 217 { 218 219 free(rule, M_IPFW); 220 } 221 222 #endif 223 224 225 /* 226 * Find the smallest rule >= key, id. 227 * We could use bsearch but it is so simple that we code it directly 228 */ 229 int 230 ipfw_find_rule(struct ip_fw_chain *chain, uint32_t key, uint32_t id) 231 { 232 int i, lo, hi; 233 struct ip_fw *r; 234 235 for (lo = 0, hi = chain->n_rules - 1; lo < hi;) { 236 i = (lo + hi) / 2; 237 r = chain->map[i]; 238 if (r->rulenum < key) 239 lo = i + 1; /* continue from the next one */ 240 else if (r->rulenum > key) 241 hi = i; /* this might be good */ 242 else if (r->id < id) 243 lo = i + 1; /* continue from the next one */ 244 else /* r->id >= id */ 245 hi = i; /* this might be good */ 246 }; 247 return hi; 248 } 249 250 /* 251 * Builds skipto cache on rule set @map. 252 */ 253 static void 254 update_skipto_cache(struct ip_fw_chain *chain, struct ip_fw **map) 255 { 256 int *smap, rulenum; 257 int i, mi; 258 259 IPFW_UH_WLOCK_ASSERT(chain); 260 261 mi = 0; 262 rulenum = map[mi]->rulenum; 263 smap = chain->idxmap_back; 264 265 if (smap == NULL) 266 return; 267 268 for (i = 0; i < 65536; i++) { 269 smap[i] = mi; 270 /* Use the same rule index until i < rulenum */ 271 if (i != rulenum || i == 65535) 272 continue; 273 /* Find next rule with num > i */ 274 rulenum = map[++mi]->rulenum; 275 while (rulenum == i) 276 rulenum = map[++mi]->rulenum; 277 } 278 } 279 280 /* 281 * Swaps prepared (backup) index with current one. 282 */ 283 static void 284 swap_skipto_cache(struct ip_fw_chain *chain) 285 { 286 int *map; 287 288 IPFW_UH_WLOCK_ASSERT(chain); 289 IPFW_WLOCK_ASSERT(chain); 290 291 map = chain->idxmap; 292 chain->idxmap = chain->idxmap_back; 293 chain->idxmap_back = map; 294 } 295 296 /* 297 * Allocate and initialize skipto cache. 298 */ 299 void 300 ipfw_init_skipto_cache(struct ip_fw_chain *chain) 301 { 302 int *idxmap, *idxmap_back; 303 304 idxmap = malloc(65536 * sizeof(uint32_t *), M_IPFW, 305 M_WAITOK | M_ZERO); 306 idxmap_back = malloc(65536 * sizeof(uint32_t *), M_IPFW, 307 M_WAITOK | M_ZERO); 308 309 /* 310 * Note we may be called at any time after initialization, 311 * for example, on first skipto rule, so we need to 312 * provide valid chain->idxmap on return 313 */ 314 315 IPFW_UH_WLOCK(chain); 316 if (chain->idxmap != NULL) { 317 IPFW_UH_WUNLOCK(chain); 318 free(idxmap, M_IPFW); 319 free(idxmap_back, M_IPFW); 320 return; 321 } 322 323 /* Set backup pointer first to permit building cache */ 324 chain->idxmap_back = idxmap_back; 325 update_skipto_cache(chain, chain->map); 326 IPFW_WLOCK(chain); 327 /* It is now safe to set chain->idxmap ptr */ 328 chain->idxmap = idxmap; 329 swap_skipto_cache(chain); 330 IPFW_WUNLOCK(chain); 331 IPFW_UH_WUNLOCK(chain); 332 } 333 334 /* 335 * Destroys skipto cache. 336 */ 337 void 338 ipfw_destroy_skipto_cache(struct ip_fw_chain *chain) 339 { 340 341 if (chain->idxmap != NULL) 342 free(chain->idxmap, M_IPFW); 343 if (chain->idxmap != NULL) 344 free(chain->idxmap_back, M_IPFW); 345 } 346 347 348 /* 349 * allocate a new map, returns the chain locked. extra is the number 350 * of entries to add or delete. 351 */ 352 static struct ip_fw ** 353 get_map(struct ip_fw_chain *chain, int extra, int locked) 354 { 355 356 for (;;) { 357 struct ip_fw **map; 358 int i, mflags; 359 360 mflags = M_ZERO | ((locked != 0) ? M_NOWAIT : M_WAITOK); 361 362 i = chain->n_rules + extra; 363 map = malloc(i * sizeof(struct ip_fw *), M_IPFW, mflags); 364 if (map == NULL) { 365 printf("%s: cannot allocate map\n", __FUNCTION__); 366 return NULL; 367 } 368 if (!locked) 369 IPFW_UH_WLOCK(chain); 370 if (i >= chain->n_rules + extra) /* good */ 371 return map; 372 /* otherwise we lost the race, free and retry */ 373 if (!locked) 374 IPFW_UH_WUNLOCK(chain); 375 free(map, M_IPFW); 376 } 377 } 378 379 /* 380 * swap the maps. It is supposed to be called with IPFW_UH_WLOCK 381 */ 382 static struct ip_fw ** 383 swap_map(struct ip_fw_chain *chain, struct ip_fw **new_map, int new_len) 384 { 385 struct ip_fw **old_map; 386 387 IPFW_WLOCK(chain); 388 chain->id++; 389 chain->n_rules = new_len; 390 old_map = chain->map; 391 chain->map = new_map; 392 swap_skipto_cache(chain); 393 IPFW_WUNLOCK(chain); 394 return old_map; 395 } 396 397 398 static void 399 export_cntr1_base(struct ip_fw *krule, struct ip_fw_bcounter *cntr) 400 { 401 402 cntr->size = sizeof(*cntr); 403 404 if (krule->cntr != NULL) { 405 cntr->pcnt = counter_u64_fetch(krule->cntr); 406 cntr->bcnt = counter_u64_fetch(krule->cntr + 1); 407 cntr->timestamp = krule->timestamp; 408 } 409 if (cntr->timestamp > 0) 410 cntr->timestamp += boottime.tv_sec; 411 } 412 413 static void 414 export_cntr0_base(struct ip_fw *krule, struct ip_fw_bcounter0 *cntr) 415 { 416 417 if (krule->cntr != NULL) { 418 cntr->pcnt = counter_u64_fetch(krule->cntr); 419 cntr->bcnt = counter_u64_fetch(krule->cntr + 1); 420 cntr->timestamp = krule->timestamp; 421 } 422 if (cntr->timestamp > 0) 423 cntr->timestamp += boottime.tv_sec; 424 } 425 426 /* 427 * Copies rule @urule from v1 userland format (current). 428 * to kernel @krule. 429 * Assume @krule is zeroed. 430 */ 431 static void 432 import_rule1(struct rule_check_info *ci) 433 { 434 struct ip_fw_rule *urule; 435 struct ip_fw *krule; 436 437 urule = (struct ip_fw_rule *)ci->urule; 438 krule = (struct ip_fw *)ci->krule; 439 440 /* copy header */ 441 krule->act_ofs = urule->act_ofs; 442 krule->cmd_len = urule->cmd_len; 443 krule->rulenum = urule->rulenum; 444 krule->set = urule->set; 445 krule->flags = urule->flags; 446 447 /* Save rulenum offset */ 448 ci->urule_numoff = offsetof(struct ip_fw_rule, rulenum); 449 450 /* Copy opcodes */ 451 memcpy(krule->cmd, urule->cmd, krule->cmd_len * sizeof(uint32_t)); 452 } 453 454 /* 455 * Export rule into v1 format (Current). 456 * Layout: 457 * [ ipfw_obj_tlv(IPFW_TLV_RULE_ENT) 458 * [ ip_fw_rule ] OR 459 * [ ip_fw_bcounter ip_fw_rule] (depends on rcntrs). 460 * ] 461 * Assume @data is zeroed. 462 */ 463 static void 464 export_rule1(struct ip_fw *krule, caddr_t data, int len, int rcntrs) 465 { 466 struct ip_fw_bcounter *cntr; 467 struct ip_fw_rule *urule; 468 ipfw_obj_tlv *tlv; 469 470 /* Fill in TLV header */ 471 tlv = (ipfw_obj_tlv *)data; 472 tlv->type = IPFW_TLV_RULE_ENT; 473 tlv->length = len; 474 475 if (rcntrs != 0) { 476 /* Copy counters */ 477 cntr = (struct ip_fw_bcounter *)(tlv + 1); 478 urule = (struct ip_fw_rule *)(cntr + 1); 479 export_cntr1_base(krule, cntr); 480 } else 481 urule = (struct ip_fw_rule *)(tlv + 1); 482 483 /* copy header */ 484 urule->act_ofs = krule->act_ofs; 485 urule->cmd_len = krule->cmd_len; 486 urule->rulenum = krule->rulenum; 487 urule->set = krule->set; 488 urule->flags = krule->flags; 489 urule->id = krule->id; 490 491 /* Copy opcodes */ 492 memcpy(urule->cmd, krule->cmd, krule->cmd_len * sizeof(uint32_t)); 493 } 494 495 496 /* 497 * Copies rule @urule from FreeBSD8 userland format (v0) 498 * to kernel @krule. 499 * Assume @krule is zeroed. 500 */ 501 static void 502 import_rule0(struct rule_check_info *ci) 503 { 504 struct ip_fw_rule0 *urule; 505 struct ip_fw *krule; 506 int cmdlen, l; 507 ipfw_insn *cmd; 508 ipfw_insn_limit *lcmd; 509 ipfw_insn_if *cmdif; 510 511 urule = (struct ip_fw_rule0 *)ci->urule; 512 krule = (struct ip_fw *)ci->krule; 513 514 /* copy header */ 515 krule->act_ofs = urule->act_ofs; 516 krule->cmd_len = urule->cmd_len; 517 krule->rulenum = urule->rulenum; 518 krule->set = urule->set; 519 if ((urule->_pad & 1) != 0) 520 krule->flags |= IPFW_RULE_NOOPT; 521 522 /* Save rulenum offset */ 523 ci->urule_numoff = offsetof(struct ip_fw_rule0, rulenum); 524 525 /* Copy opcodes */ 526 memcpy(krule->cmd, urule->cmd, krule->cmd_len * sizeof(uint32_t)); 527 528 /* 529 * Alter opcodes: 530 * 1) convert tablearg value from 65335 to 0 531 * 2) Add high bit to O_SETFIB/O_SETDSCP values (to make room for targ). 532 * 3) convert table number in iface opcodes to u16 533 */ 534 l = krule->cmd_len; 535 cmd = krule->cmd; 536 cmdlen = 0; 537 538 for ( ; l > 0 ; l -= cmdlen, cmd += cmdlen) { 539 cmdlen = F_LEN(cmd); 540 541 switch (cmd->opcode) { 542 /* Opcodes supporting tablearg */ 543 case O_TAG: 544 case O_TAGGED: 545 case O_PIPE: 546 case O_QUEUE: 547 case O_DIVERT: 548 case O_TEE: 549 case O_SKIPTO: 550 case O_CALLRETURN: 551 case O_NETGRAPH: 552 case O_NGTEE: 553 case O_NAT: 554 if (cmd->arg1 == 65535) 555 cmd->arg1 = IP_FW_TARG; 556 break; 557 case O_SETFIB: 558 case O_SETDSCP: 559 if (cmd->arg1 == 65535) 560 cmd->arg1 = IP_FW_TARG; 561 else 562 cmd->arg1 |= 0x8000; 563 break; 564 case O_LIMIT: 565 lcmd = (ipfw_insn_limit *)cmd; 566 if (lcmd->conn_limit == 65535) 567 lcmd->conn_limit = IP_FW_TARG; 568 break; 569 /* Interface tables */ 570 case O_XMIT: 571 case O_RECV: 572 case O_VIA: 573 /* Interface table, possibly */ 574 cmdif = (ipfw_insn_if *)cmd; 575 if (cmdif->name[0] != '\1') 576 break; 577 578 cmdif->p.kidx = (uint16_t)cmdif->p.glob; 579 break; 580 } 581 } 582 } 583 584 /* 585 * Copies rule @krule from kernel to FreeBSD8 userland format (v0) 586 */ 587 static void 588 export_rule0(struct ip_fw *krule, struct ip_fw_rule0 *urule, int len) 589 { 590 int cmdlen, l; 591 ipfw_insn *cmd; 592 ipfw_insn_limit *lcmd; 593 ipfw_insn_if *cmdif; 594 595 /* copy header */ 596 memset(urule, 0, len); 597 urule->act_ofs = krule->act_ofs; 598 urule->cmd_len = krule->cmd_len; 599 urule->rulenum = krule->rulenum; 600 urule->set = krule->set; 601 if ((krule->flags & IPFW_RULE_NOOPT) != 0) 602 urule->_pad |= 1; 603 604 /* Copy opcodes */ 605 memcpy(urule->cmd, krule->cmd, krule->cmd_len * sizeof(uint32_t)); 606 607 /* Export counters */ 608 export_cntr0_base(krule, (struct ip_fw_bcounter0 *)&urule->pcnt); 609 610 /* 611 * Alter opcodes: 612 * 1) convert tablearg value from 0 to 65335 613 * 2) Remove highest bit from O_SETFIB/O_SETDSCP values. 614 * 3) convert table number in iface opcodes to int 615 */ 616 l = urule->cmd_len; 617 cmd = urule->cmd; 618 cmdlen = 0; 619 620 for ( ; l > 0 ; l -= cmdlen, cmd += cmdlen) { 621 cmdlen = F_LEN(cmd); 622 623 switch (cmd->opcode) { 624 /* Opcodes supporting tablearg */ 625 case O_TAG: 626 case O_TAGGED: 627 case O_PIPE: 628 case O_QUEUE: 629 case O_DIVERT: 630 case O_TEE: 631 case O_SKIPTO: 632 case O_CALLRETURN: 633 case O_NETGRAPH: 634 case O_NGTEE: 635 case O_NAT: 636 if (cmd->arg1 == IP_FW_TARG) 637 cmd->arg1 = 65535; 638 break; 639 case O_SETFIB: 640 case O_SETDSCP: 641 if (cmd->arg1 == IP_FW_TARG) 642 cmd->arg1 = 65535; 643 else 644 cmd->arg1 &= ~0x8000; 645 break; 646 case O_LIMIT: 647 lcmd = (ipfw_insn_limit *)cmd; 648 if (lcmd->conn_limit == IP_FW_TARG) 649 lcmd->conn_limit = 65535; 650 break; 651 /* Interface tables */ 652 case O_XMIT: 653 case O_RECV: 654 case O_VIA: 655 /* Interface table, possibly */ 656 cmdif = (ipfw_insn_if *)cmd; 657 if (cmdif->name[0] != '\1') 658 break; 659 660 cmdif->p.glob = cmdif->p.kidx; 661 break; 662 } 663 } 664 } 665 666 /* 667 * Add new rule(s) to the list possibly creating rule number for each. 668 * Update the rule_number in the input struct so the caller knows it as well. 669 * Must be called without IPFW_UH held 670 */ 671 static int 672 commit_rules(struct ip_fw_chain *chain, struct rule_check_info *rci, int count) 673 { 674 int error, i, insert_before, tcount; 675 uint16_t rulenum, *pnum; 676 struct rule_check_info *ci; 677 struct ip_fw *krule; 678 struct ip_fw **map; /* the new array of pointers */ 679 680 /* Check if we need to do table remap */ 681 tcount = 0; 682 for (ci = rci, i = 0; i < count; ci++, i++) { 683 if (ci->table_opcodes == 0) 684 continue; 685 686 /* 687 * Rule has some table opcodes. 688 * Reference & allocate needed tables/ 689 */ 690 error = ipfw_rewrite_table_uidx(chain, ci); 691 if (error != 0) { 692 693 /* 694 * rewrite failed, state for current rule 695 * has been reverted. Check if we need to 696 * revert more. 697 */ 698 if (tcount > 0) { 699 700 /* 701 * We have some more table rules 702 * we need to rollback. 703 */ 704 705 IPFW_UH_WLOCK(chain); 706 while (ci != rci) { 707 ci--; 708 if (ci->table_opcodes == 0) 709 continue; 710 ipfw_unref_rule_tables(chain,ci->krule); 711 712 } 713 IPFW_UH_WUNLOCK(chain); 714 715 } 716 717 return (error); 718 } 719 720 tcount++; 721 } 722 723 /* get_map returns with IPFW_UH_WLOCK if successful */ 724 map = get_map(chain, count, 0 /* not locked */); 725 if (map == NULL) { 726 if (tcount > 0) { 727 /* Unbind tables */ 728 IPFW_UH_WLOCK(chain); 729 for (ci = rci, i = 0; i < count; ci++, i++) { 730 if (ci->table_opcodes == 0) 731 continue; 732 733 ipfw_unref_rule_tables(chain, ci->krule); 734 } 735 IPFW_UH_WUNLOCK(chain); 736 } 737 738 return (ENOSPC); 739 } 740 741 if (V_autoinc_step < 1) 742 V_autoinc_step = 1; 743 else if (V_autoinc_step > 1000) 744 V_autoinc_step = 1000; 745 746 /* FIXME: Handle count > 1 */ 747 ci = rci; 748 krule = ci->krule; 749 rulenum = krule->rulenum; 750 751 /* find the insertion point, we will insert before */ 752 insert_before = rulenum ? rulenum + 1 : IPFW_DEFAULT_RULE; 753 i = ipfw_find_rule(chain, insert_before, 0); 754 /* duplicate first part */ 755 if (i > 0) 756 bcopy(chain->map, map, i * sizeof(struct ip_fw *)); 757 map[i] = krule; 758 /* duplicate remaining part, we always have the default rule */ 759 bcopy(chain->map + i, map + i + 1, 760 sizeof(struct ip_fw *) *(chain->n_rules - i)); 761 if (rulenum == 0) { 762 /* Compute rule number and write it back */ 763 rulenum = i > 0 ? map[i-1]->rulenum : 0; 764 if (rulenum < IPFW_DEFAULT_RULE - V_autoinc_step) 765 rulenum += V_autoinc_step; 766 krule->rulenum = rulenum; 767 /* Save number to userland rule */ 768 pnum = (uint16_t *)((caddr_t)ci->urule + ci->urule_numoff); 769 *pnum = rulenum; 770 } 771 772 krule->id = chain->id + 1; 773 update_skipto_cache(chain, map); 774 map = swap_map(chain, map, chain->n_rules + 1); 775 chain->static_len += RULEUSIZE0(krule); 776 IPFW_UH_WUNLOCK(chain); 777 if (map) 778 free(map, M_IPFW); 779 return (0); 780 } 781 782 /* 783 * Adds @rule to the list of rules to reap 784 */ 785 void 786 ipfw_reap_add(struct ip_fw_chain *chain, struct ip_fw **head, 787 struct ip_fw *rule) 788 { 789 790 IPFW_UH_WLOCK_ASSERT(chain); 791 792 /* Unlink rule from everywhere */ 793 ipfw_unref_rule_tables(chain, rule); 794 795 *((struct ip_fw **)rule) = *head; 796 *head = rule; 797 } 798 799 /* 800 * Reclaim storage associated with a list of rules. This is 801 * typically the list created using remove_rule. 802 * A NULL pointer on input is handled correctly. 803 */ 804 void 805 ipfw_reap_rules(struct ip_fw *head) 806 { 807 struct ip_fw *rule; 808 809 while ((rule = head) != NULL) { 810 head = *((struct ip_fw **)head); 811 free_rule(rule); 812 } 813 } 814 815 /* 816 * Rules to keep are 817 * (default || reserved || !match_set || !match_number) 818 * where 819 * default ::= (rule->rulenum == IPFW_DEFAULT_RULE) 820 * // the default rule is always protected 821 * 822 * reserved ::= (cmd == 0 && n == 0 && rule->set == RESVD_SET) 823 * // RESVD_SET is protected only if cmd == 0 and n == 0 ("ipfw flush") 824 * 825 * match_set ::= (cmd == 0 || rule->set == set) 826 * // set number is ignored for cmd == 0 827 * 828 * match_number ::= (cmd == 1 || n == 0 || n == rule->rulenum) 829 * // number is ignored for cmd == 1 or n == 0 830 * 831 */ 832 int 833 ipfw_match_range(struct ip_fw *rule, ipfw_range_tlv *rt) 834 { 835 836 /* Don't match default rule regardless of query */ 837 if (rule->rulenum == IPFW_DEFAULT_RULE) 838 return (0); 839 840 /* Don't match rules in reserved set for flush requests */ 841 if ((rt->flags & IPFW_RCFLAG_ALL) != 0 && rule->set == RESVD_SET) 842 return (0); 843 844 /* If we're filtering by set, don't match other sets */ 845 if ((rt->flags & IPFW_RCFLAG_SET) != 0 && rule->set != rt->set) 846 return (0); 847 848 if ((rt->flags & IPFW_RCFLAG_RANGE) != 0 && 849 (rule->rulenum < rt->start_rule || rule->rulenum > rt->end_rule)) 850 return (0); 851 852 return (1); 853 } 854 855 /* 856 * Delete rules matching range @rt. 857 * Saves number of deleted rules in @ndel. 858 * 859 * Returns 0 on success. 860 */ 861 static int 862 delete_range(struct ip_fw_chain *chain, ipfw_range_tlv *rt, int *ndel) 863 { 864 struct ip_fw *reap, *rule, **map; 865 int end, start; 866 int i, n, ndyn, ofs; 867 868 reap = NULL; 869 IPFW_UH_WLOCK(chain); /* arbitrate writers */ 870 871 /* 872 * Stage 1: Determine range to inspect. 873 * Range is half-inclusive, e.g [start, end). 874 */ 875 start = 0; 876 end = chain->n_rules - 1; 877 878 if ((rt->flags & IPFW_RCFLAG_RANGE) != 0) { 879 start = ipfw_find_rule(chain, rt->start_rule, 0); 880 881 end = ipfw_find_rule(chain, rt->end_rule, 0); 882 if (rt->end_rule != IPFW_DEFAULT_RULE) 883 while (chain->map[end]->rulenum == rt->end_rule) 884 end++; 885 } 886 887 /* Allocate new map of the same size */ 888 map = get_map(chain, 0, 1 /* locked */); 889 if (map == NULL) { 890 IPFW_UH_WUNLOCK(chain); 891 return (ENOMEM); 892 } 893 894 n = 0; 895 ndyn = 0; 896 ofs = start; 897 /* 1. bcopy the initial part of the map */ 898 if (start > 0) 899 bcopy(chain->map, map, start * sizeof(struct ip_fw *)); 900 /* 2. copy active rules between start and end */ 901 for (i = start; i < end; i++) { 902 rule = chain->map[i]; 903 if (ipfw_match_range(rule, rt) == 0) { 904 map[ofs++] = rule; 905 continue; 906 } 907 908 n++; 909 if (ipfw_is_dyn_rule(rule) != 0) 910 ndyn++; 911 } 912 /* 3. copy the final part of the map */ 913 bcopy(chain->map + end, map + ofs, 914 (chain->n_rules - end) * sizeof(struct ip_fw *)); 915 /* 4. recalculate skipto cache */ 916 update_skipto_cache(chain, map); 917 /* 5. swap the maps (under UH_WLOCK + WHLOCK) */ 918 map = swap_map(chain, map, chain->n_rules - n); 919 /* 6. Remove all dynamic states originated by deleted rules */ 920 if (ndyn > 0) 921 ipfw_expire_dyn_rules(chain, rt); 922 /* 7. now remove the rules deleted from the old map */ 923 for (i = start; i < end; i++) { 924 rule = map[i]; 925 if (ipfw_match_range(rule, rt) == 0) 926 continue; 927 chain->static_len -= RULEUSIZE0(rule); 928 ipfw_reap_add(chain, &reap, rule); 929 } 930 IPFW_UH_WUNLOCK(chain); 931 932 ipfw_reap_rules(reap); 933 if (map != NULL) 934 free(map, M_IPFW); 935 *ndel = n; 936 return (0); 937 } 938 939 /* 940 * Changes set of given rule rannge @rt 941 * with each other. 942 * 943 * Returns 0 on success. 944 */ 945 static int 946 move_range(struct ip_fw_chain *chain, ipfw_range_tlv *rt) 947 { 948 struct ip_fw *rule; 949 int i; 950 951 IPFW_UH_WLOCK(chain); 952 953 /* 954 * Move rules with matching paramenerts to a new set. 955 * This one is much more complex. We have to ensure 956 * that all referenced tables (if any) are referenced 957 * by given rule subset only. Otherwise, we can't move 958 * them to new set and have to return error. 959 */ 960 if (V_fw_tables_sets != 0) { 961 if (ipfw_move_tables_sets(chain, rt, rt->new_set) != 0) { 962 IPFW_UH_WUNLOCK(chain); 963 return (EBUSY); 964 } 965 } 966 967 /* XXX: We have to do swap holding WLOCK */ 968 for (i = 0; i < chain->n_rules - 1; i++) { 969 rule = chain->map[i]; 970 if (ipfw_match_range(rule, rt) == 0) 971 continue; 972 rule->set = rt->new_set; 973 } 974 975 IPFW_UH_WUNLOCK(chain); 976 977 return (0); 978 } 979 980 /* 981 * Clear counters for a specific rule. 982 * Normally run under IPFW_UH_RLOCK, but these are idempotent ops 983 * so we only care that rules do not disappear. 984 */ 985 static void 986 clear_counters(struct ip_fw *rule, int log_only) 987 { 988 ipfw_insn_log *l = (ipfw_insn_log *)ACTION_PTR(rule); 989 990 if (log_only == 0) 991 IPFW_ZERO_RULE_COUNTER(rule); 992 if (l->o.opcode == O_LOG) 993 l->log_left = l->max_log; 994 } 995 996 /* 997 * Flushes rules counters and/or log values on matching range. 998 * 999 * Returns number of items cleared. 1000 */ 1001 static int 1002 clear_range(struct ip_fw_chain *chain, ipfw_range_tlv *rt, int log_only) 1003 { 1004 struct ip_fw *rule; 1005 int num; 1006 int i; 1007 1008 num = 0; 1009 1010 IPFW_UH_WLOCK(chain); /* arbitrate writers */ 1011 for (i = 0; i < chain->n_rules - 1; i++) { 1012 rule = chain->map[i]; 1013 if (ipfw_match_range(rule, rt) == 0) 1014 continue; 1015 clear_counters(rule, log_only); 1016 num++; 1017 } 1018 IPFW_UH_WUNLOCK(chain); 1019 1020 return (num); 1021 } 1022 1023 static int 1024 check_range_tlv(ipfw_range_tlv *rt) 1025 { 1026 1027 if (rt->head.length != sizeof(*rt)) 1028 return (1); 1029 if (rt->start_rule > rt->end_rule) 1030 return (1); 1031 if (rt->set >= IPFW_MAX_SETS || rt->new_set >= IPFW_MAX_SETS) 1032 return (1); 1033 1034 return (0); 1035 } 1036 1037 /* 1038 * Delete rules matching specified parameters 1039 * Data layout (v0)(current): 1040 * Request: [ ipfw_obj_header ipfw_range_tlv ] 1041 * Reply: [ ipfw_obj_header ipfw_range_tlv ] 1042 * 1043 * Saves number of deleted rules in ipfw_range_tlv->new_set. 1044 * 1045 * Returns 0 on success. 1046 */ 1047 static int 1048 del_rules(struct ip_fw_chain *chain, ip_fw3_opheader *op3, 1049 struct sockopt_data *sd) 1050 { 1051 ipfw_range_header *rh; 1052 int error, ndel; 1053 1054 if (sd->valsize != sizeof(*rh)) 1055 return (EINVAL); 1056 1057 rh = (ipfw_range_header *)ipfw_get_sopt_space(sd, sd->valsize); 1058 1059 if (check_range_tlv(&rh->range) != 0) 1060 return (EINVAL); 1061 1062 ndel = 0; 1063 if ((error = delete_range(chain, &rh->range, &ndel)) != 0) 1064 return (error); 1065 1066 /* Save number of rules deleted */ 1067 rh->range.new_set = ndel; 1068 return (0); 1069 } 1070 1071 /* 1072 * Move rules/sets matching specified parameters 1073 * Data layout (v0)(current): 1074 * Request: [ ipfw_obj_header ipfw_range_tlv ] 1075 * 1076 * Returns 0 on success. 1077 */ 1078 static int 1079 move_rules(struct ip_fw_chain *chain, ip_fw3_opheader *op3, 1080 struct sockopt_data *sd) 1081 { 1082 ipfw_range_header *rh; 1083 1084 if (sd->valsize != sizeof(*rh)) 1085 return (EINVAL); 1086 1087 rh = (ipfw_range_header *)ipfw_get_sopt_space(sd, sd->valsize); 1088 1089 if (check_range_tlv(&rh->range) != 0) 1090 return (EINVAL); 1091 1092 return (move_range(chain, &rh->range)); 1093 } 1094 1095 /* 1096 * Clear rule accounting data matching specified parameters 1097 * Data layout (v0)(current): 1098 * Request: [ ipfw_obj_header ipfw_range_tlv ] 1099 * Reply: [ ipfw_obj_header ipfw_range_tlv ] 1100 * 1101 * Saves number of cleared rules in ipfw_range_tlv->new_set. 1102 * 1103 * Returns 0 on success. 1104 */ 1105 static int 1106 clear_rules(struct ip_fw_chain *chain, ip_fw3_opheader *op3, 1107 struct sockopt_data *sd) 1108 { 1109 ipfw_range_header *rh; 1110 int log_only, num; 1111 char *msg; 1112 1113 if (sd->valsize != sizeof(*rh)) 1114 return (EINVAL); 1115 1116 rh = (ipfw_range_header *)ipfw_get_sopt_space(sd, sd->valsize); 1117 1118 if (check_range_tlv(&rh->range) != 0) 1119 return (EINVAL); 1120 1121 log_only = (op3->opcode == IP_FW_XRESETLOG); 1122 1123 num = clear_range(chain, &rh->range, log_only); 1124 1125 if (rh->range.flags & IPFW_RCFLAG_ALL) 1126 msg = log_only ? "All logging counts reset" : 1127 "Accounting cleared"; 1128 else 1129 msg = log_only ? "logging count reset" : "cleared"; 1130 1131 if (V_fw_verbose) { 1132 int lev = LOG_SECURITY | LOG_NOTICE; 1133 log(lev, "ipfw: %s.\n", msg); 1134 } 1135 1136 /* Save number of rules cleared */ 1137 rh->range.new_set = num; 1138 return (0); 1139 } 1140 1141 static void 1142 enable_sets(struct ip_fw_chain *chain, ipfw_range_tlv *rt) 1143 { 1144 uint32_t v_set; 1145 1146 IPFW_UH_WLOCK_ASSERT(chain); 1147 1148 /* Change enabled/disabled sets mask */ 1149 v_set = (V_set_disable | rt->set) & ~rt->new_set; 1150 v_set &= ~(1 << RESVD_SET); /* set RESVD_SET always enabled */ 1151 IPFW_WLOCK(chain); 1152 V_set_disable = v_set; 1153 IPFW_WUNLOCK(chain); 1154 } 1155 1156 static void 1157 swap_sets(struct ip_fw_chain *chain, ipfw_range_tlv *rt, int mv) 1158 { 1159 struct ip_fw *rule; 1160 int i; 1161 1162 IPFW_UH_WLOCK_ASSERT(chain); 1163 1164 /* Swap or move two sets */ 1165 for (i = 0; i < chain->n_rules - 1; i++) { 1166 rule = chain->map[i]; 1167 if (rule->set == rt->set) 1168 rule->set = rt->new_set; 1169 else if (rule->set == rt->new_set && mv == 0) 1170 rule->set = rt->set; 1171 } 1172 if (V_fw_tables_sets != 0) 1173 ipfw_swap_tables_sets(chain, rt->set, rt->new_set, mv); 1174 } 1175 1176 /* 1177 * Swaps or moves set 1178 * Data layout (v0)(current): 1179 * Request: [ ipfw_obj_header ipfw_range_tlv ] 1180 * 1181 * Returns 0 on success. 1182 */ 1183 static int 1184 manage_sets(struct ip_fw_chain *chain, ip_fw3_opheader *op3, 1185 struct sockopt_data *sd) 1186 { 1187 ipfw_range_header *rh; 1188 1189 if (sd->valsize != sizeof(*rh)) 1190 return (EINVAL); 1191 1192 rh = (ipfw_range_header *)ipfw_get_sopt_space(sd, sd->valsize); 1193 1194 if (rh->range.head.length != sizeof(ipfw_range_tlv)) 1195 return (1); 1196 1197 IPFW_UH_WLOCK(chain); 1198 switch (op3->opcode) { 1199 case IP_FW_SET_SWAP: 1200 case IP_FW_SET_MOVE: 1201 swap_sets(chain, &rh->range, op3->opcode == IP_FW_SET_MOVE); 1202 break; 1203 case IP_FW_SET_ENABLE: 1204 enable_sets(chain, &rh->range); 1205 break; 1206 } 1207 IPFW_UH_WUNLOCK(chain); 1208 1209 return (0); 1210 } 1211 1212 /** 1213 * Remove all rules with given number, or do set manipulation. 1214 * Assumes chain != NULL && *chain != NULL. 1215 * 1216 * The argument is an uint32_t. The low 16 bit are the rule or set number; 1217 * the next 8 bits are the new set; the top 8 bits indicate the command: 1218 * 1219 * 0 delete rules numbered "rulenum" 1220 * 1 delete rules in set "rulenum" 1221 * 2 move rules "rulenum" to set "new_set" 1222 * 3 move rules from set "rulenum" to set "new_set" 1223 * 4 swap sets "rulenum" and "new_set" 1224 * 5 delete rules "rulenum" and set "new_set" 1225 */ 1226 static int 1227 del_entry(struct ip_fw_chain *chain, uint32_t arg) 1228 { 1229 uint32_t num; /* rule number or old_set */ 1230 uint8_t cmd, new_set; 1231 int do_del, ndel; 1232 int error = 0; 1233 ipfw_range_tlv rt; 1234 1235 num = arg & 0xffff; 1236 cmd = (arg >> 24) & 0xff; 1237 new_set = (arg >> 16) & 0xff; 1238 1239 if (cmd > 5 || new_set > RESVD_SET) 1240 return EINVAL; 1241 if (cmd == 0 || cmd == 2 || cmd == 5) { 1242 if (num >= IPFW_DEFAULT_RULE) 1243 return EINVAL; 1244 } else { 1245 if (num > RESVD_SET) /* old_set */ 1246 return EINVAL; 1247 } 1248 1249 /* Convert old requests into new representation */ 1250 memset(&rt, 0, sizeof(rt)); 1251 rt.start_rule = num; 1252 rt.end_rule = num; 1253 rt.set = num; 1254 rt.new_set = new_set; 1255 do_del = 0; 1256 1257 switch (cmd) { 1258 case 0: /* delete rules numbered "rulenum" */ 1259 if (num == 0) 1260 rt.flags |= IPFW_RCFLAG_ALL; 1261 else 1262 rt.flags |= IPFW_RCFLAG_RANGE; 1263 do_del = 1; 1264 break; 1265 case 1: /* delete rules in set "rulenum" */ 1266 rt.flags |= IPFW_RCFLAG_SET; 1267 do_del = 1; 1268 break; 1269 case 5: /* delete rules "rulenum" and set "new_set" */ 1270 rt.flags |= IPFW_RCFLAG_RANGE | IPFW_RCFLAG_SET; 1271 rt.set = new_set; 1272 rt.new_set = 0; 1273 do_del = 1; 1274 break; 1275 case 2: /* move rules "rulenum" to set "new_set" */ 1276 rt.flags |= IPFW_RCFLAG_RANGE; 1277 break; 1278 case 3: /* move rules from set "rulenum" to set "new_set" */ 1279 IPFW_UH_WLOCK(chain); 1280 swap_sets(chain, &rt, 1); 1281 IPFW_UH_WUNLOCK(chain); 1282 return (0); 1283 case 4: /* swap sets "rulenum" and "new_set" */ 1284 IPFW_UH_WLOCK(chain); 1285 swap_sets(chain, &rt, 0); 1286 IPFW_UH_WUNLOCK(chain); 1287 return (0); 1288 default: 1289 return (ENOTSUP); 1290 } 1291 1292 if (do_del != 0) { 1293 if ((error = delete_range(chain, &rt, &ndel)) != 0) 1294 return (error); 1295 1296 if (ndel == 0 && (cmd != 1 && num != 0)) 1297 return (EINVAL); 1298 1299 return (0); 1300 } 1301 1302 return (move_range(chain, &rt)); 1303 } 1304 1305 /** 1306 * Reset some or all counters on firewall rules. 1307 * The argument `arg' is an u_int32_t. The low 16 bit are the rule number, 1308 * the next 8 bits are the set number, the top 8 bits are the command: 1309 * 0 work with rules from all set's; 1310 * 1 work with rules only from specified set. 1311 * Specified rule number is zero if we want to clear all entries. 1312 * log_only is 1 if we only want to reset logs, zero otherwise. 1313 */ 1314 static int 1315 zero_entry(struct ip_fw_chain *chain, u_int32_t arg, int log_only) 1316 { 1317 struct ip_fw *rule; 1318 char *msg; 1319 int i; 1320 1321 uint16_t rulenum = arg & 0xffff; 1322 uint8_t set = (arg >> 16) & 0xff; 1323 uint8_t cmd = (arg >> 24) & 0xff; 1324 1325 if (cmd > 1) 1326 return (EINVAL); 1327 if (cmd == 1 && set > RESVD_SET) 1328 return (EINVAL); 1329 1330 IPFW_UH_RLOCK(chain); 1331 if (rulenum == 0) { 1332 V_norule_counter = 0; 1333 for (i = 0; i < chain->n_rules; i++) { 1334 rule = chain->map[i]; 1335 /* Skip rules not in our set. */ 1336 if (cmd == 1 && rule->set != set) 1337 continue; 1338 clear_counters(rule, log_only); 1339 } 1340 msg = log_only ? "All logging counts reset" : 1341 "Accounting cleared"; 1342 } else { 1343 int cleared = 0; 1344 for (i = 0; i < chain->n_rules; i++) { 1345 rule = chain->map[i]; 1346 if (rule->rulenum == rulenum) { 1347 if (cmd == 0 || rule->set == set) 1348 clear_counters(rule, log_only); 1349 cleared = 1; 1350 } 1351 if (rule->rulenum > rulenum) 1352 break; 1353 } 1354 if (!cleared) { /* we did not find any matching rules */ 1355 IPFW_UH_RUNLOCK(chain); 1356 return (EINVAL); 1357 } 1358 msg = log_only ? "logging count reset" : "cleared"; 1359 } 1360 IPFW_UH_RUNLOCK(chain); 1361 1362 if (V_fw_verbose) { 1363 int lev = LOG_SECURITY | LOG_NOTICE; 1364 1365 if (rulenum) 1366 log(lev, "ipfw: Entry %d %s.\n", rulenum, msg); 1367 else 1368 log(lev, "ipfw: %s.\n", msg); 1369 } 1370 return (0); 1371 } 1372 1373 1374 /* 1375 * Check rule head in FreeBSD11 format 1376 * 1377 */ 1378 static int 1379 check_ipfw_rule1(struct ip_fw_rule *rule, int size, 1380 struct rule_check_info *ci) 1381 { 1382 int l; 1383 1384 if (size < sizeof(*rule)) { 1385 printf("ipfw: rule too short\n"); 1386 return (EINVAL); 1387 } 1388 1389 /* Check for valid cmd_len */ 1390 l = roundup2(RULESIZE(rule), sizeof(uint64_t)); 1391 if (l != size) { 1392 printf("ipfw: size mismatch (have %d want %d)\n", size, l); 1393 return (EINVAL); 1394 } 1395 if (rule->act_ofs >= rule->cmd_len) { 1396 printf("ipfw: bogus action offset (%u > %u)\n", 1397 rule->act_ofs, rule->cmd_len - 1); 1398 return (EINVAL); 1399 } 1400 1401 if (rule->rulenum > IPFW_DEFAULT_RULE - 1) 1402 return (EINVAL); 1403 1404 return (check_ipfw_rule_body(rule->cmd, rule->cmd_len, ci)); 1405 } 1406 1407 /* 1408 * Check rule head in FreeBSD8 format 1409 * 1410 */ 1411 static int 1412 check_ipfw_rule0(struct ip_fw_rule0 *rule, int size, 1413 struct rule_check_info *ci) 1414 { 1415 int l; 1416 1417 if (size < sizeof(*rule)) { 1418 printf("ipfw: rule too short\n"); 1419 return (EINVAL); 1420 } 1421 1422 /* Check for valid cmd_len */ 1423 l = sizeof(*rule) + rule->cmd_len * 4 - 4; 1424 if (l != size) { 1425 printf("ipfw: size mismatch (have %d want %d)\n", size, l); 1426 return (EINVAL); 1427 } 1428 if (rule->act_ofs >= rule->cmd_len) { 1429 printf("ipfw: bogus action offset (%u > %u)\n", 1430 rule->act_ofs, rule->cmd_len - 1); 1431 return (EINVAL); 1432 } 1433 1434 if (rule->rulenum > IPFW_DEFAULT_RULE - 1) 1435 return (EINVAL); 1436 1437 return (check_ipfw_rule_body(rule->cmd, rule->cmd_len, ci)); 1438 } 1439 1440 static int 1441 check_ipfw_rule_body(ipfw_insn *cmd, int cmd_len, struct rule_check_info *ci) 1442 { 1443 int cmdlen, l; 1444 int have_action; 1445 1446 have_action = 0; 1447 1448 /* 1449 * Now go for the individual checks. Very simple ones, basically only 1450 * instruction sizes. 1451 */ 1452 for (l = cmd_len; l > 0 ; l -= cmdlen, cmd += cmdlen) { 1453 cmdlen = F_LEN(cmd); 1454 if (cmdlen > l) { 1455 printf("ipfw: opcode %d size truncated\n", 1456 cmd->opcode); 1457 return EINVAL; 1458 } 1459 switch (cmd->opcode) { 1460 case O_PROBE_STATE: 1461 case O_KEEP_STATE: 1462 case O_PROTO: 1463 case O_IP_SRC_ME: 1464 case O_IP_DST_ME: 1465 case O_LAYER2: 1466 case O_IN: 1467 case O_FRAG: 1468 case O_DIVERTED: 1469 case O_IPOPT: 1470 case O_IPTOS: 1471 case O_IPPRECEDENCE: 1472 case O_IPVER: 1473 case O_SOCKARG: 1474 case O_TCPFLAGS: 1475 case O_TCPOPTS: 1476 case O_ESTAB: 1477 case O_VERREVPATH: 1478 case O_VERSRCREACH: 1479 case O_ANTISPOOF: 1480 case O_IPSEC: 1481 #ifdef INET6 1482 case O_IP6_SRC_ME: 1483 case O_IP6_DST_ME: 1484 case O_EXT_HDR: 1485 case O_IP6: 1486 #endif 1487 case O_IP4: 1488 case O_TAG: 1489 if (cmdlen != F_INSN_SIZE(ipfw_insn)) 1490 goto bad_size; 1491 break; 1492 1493 case O_FIB: 1494 if (cmdlen != F_INSN_SIZE(ipfw_insn)) 1495 goto bad_size; 1496 if (cmd->arg1 >= rt_numfibs) { 1497 printf("ipfw: invalid fib number %d\n", 1498 cmd->arg1); 1499 return EINVAL; 1500 } 1501 break; 1502 1503 case O_SETFIB: 1504 if (cmdlen != F_INSN_SIZE(ipfw_insn)) 1505 goto bad_size; 1506 if ((cmd->arg1 != IP_FW_TARG) && 1507 ((cmd->arg1 & 0x7FFFF) >= rt_numfibs)) { 1508 printf("ipfw: invalid fib number %d\n", 1509 cmd->arg1 & 0x7FFFF); 1510 return EINVAL; 1511 } 1512 goto check_action; 1513 1514 case O_UID: 1515 case O_GID: 1516 case O_JAIL: 1517 case O_IP_SRC: 1518 case O_IP_DST: 1519 case O_TCPSEQ: 1520 case O_TCPACK: 1521 case O_PROB: 1522 case O_ICMPTYPE: 1523 if (cmdlen != F_INSN_SIZE(ipfw_insn_u32)) 1524 goto bad_size; 1525 break; 1526 1527 case O_LIMIT: 1528 if (cmdlen != F_INSN_SIZE(ipfw_insn_limit)) 1529 goto bad_size; 1530 break; 1531 1532 case O_LOG: 1533 if (cmdlen != F_INSN_SIZE(ipfw_insn_log)) 1534 goto bad_size; 1535 1536 ((ipfw_insn_log *)cmd)->log_left = 1537 ((ipfw_insn_log *)cmd)->max_log; 1538 1539 break; 1540 1541 case O_IP_SRC_MASK: 1542 case O_IP_DST_MASK: 1543 /* only odd command lengths */ 1544 if ( !(cmdlen & 1) || cmdlen > 31) 1545 goto bad_size; 1546 break; 1547 1548 case O_IP_SRC_SET: 1549 case O_IP_DST_SET: 1550 if (cmd->arg1 == 0 || cmd->arg1 > 256) { 1551 printf("ipfw: invalid set size %d\n", 1552 cmd->arg1); 1553 return EINVAL; 1554 } 1555 if (cmdlen != F_INSN_SIZE(ipfw_insn_u32) + 1556 (cmd->arg1+31)/32 ) 1557 goto bad_size; 1558 break; 1559 1560 case O_IP_SRC_LOOKUP: 1561 case O_IP_DST_LOOKUP: 1562 if (cmd->arg1 >= V_fw_tables_max) { 1563 printf("ipfw: invalid table number %d\n", 1564 cmd->arg1); 1565 return (EINVAL); 1566 } 1567 if (cmdlen != F_INSN_SIZE(ipfw_insn) && 1568 cmdlen != F_INSN_SIZE(ipfw_insn_u32) + 1 && 1569 cmdlen != F_INSN_SIZE(ipfw_insn_u32)) 1570 goto bad_size; 1571 ci->table_opcodes++; 1572 break; 1573 case O_IP_FLOW_LOOKUP: 1574 if (cmd->arg1 >= V_fw_tables_max) { 1575 printf("ipfw: invalid table number %d\n", 1576 cmd->arg1); 1577 return (EINVAL); 1578 } 1579 if (cmdlen != F_INSN_SIZE(ipfw_insn) && 1580 cmdlen != F_INSN_SIZE(ipfw_insn_u32)) 1581 goto bad_size; 1582 ci->table_opcodes++; 1583 break; 1584 case O_MACADDR2: 1585 if (cmdlen != F_INSN_SIZE(ipfw_insn_mac)) 1586 goto bad_size; 1587 break; 1588 1589 case O_NOP: 1590 case O_IPID: 1591 case O_IPTTL: 1592 case O_IPLEN: 1593 case O_TCPDATALEN: 1594 case O_TCPWIN: 1595 case O_TAGGED: 1596 if (cmdlen < 1 || cmdlen > 31) 1597 goto bad_size; 1598 break; 1599 1600 case O_DSCP: 1601 if (cmdlen != F_INSN_SIZE(ipfw_insn_u32) + 1) 1602 goto bad_size; 1603 break; 1604 1605 case O_MAC_TYPE: 1606 case O_IP_SRCPORT: 1607 case O_IP_DSTPORT: /* XXX artificial limit, 30 port pairs */ 1608 if (cmdlen < 2 || cmdlen > 31) 1609 goto bad_size; 1610 break; 1611 1612 case O_RECV: 1613 case O_XMIT: 1614 case O_VIA: 1615 if (((ipfw_insn_if *)cmd)->name[0] == '\1') 1616 ci->table_opcodes++; 1617 if (cmdlen != F_INSN_SIZE(ipfw_insn_if)) 1618 goto bad_size; 1619 break; 1620 1621 case O_ALTQ: 1622 if (cmdlen != F_INSN_SIZE(ipfw_insn_altq)) 1623 goto bad_size; 1624 break; 1625 1626 case O_PIPE: 1627 case O_QUEUE: 1628 if (cmdlen != F_INSN_SIZE(ipfw_insn)) 1629 goto bad_size; 1630 goto check_action; 1631 1632 case O_FORWARD_IP: 1633 if (cmdlen != F_INSN_SIZE(ipfw_insn_sa)) 1634 goto bad_size; 1635 goto check_action; 1636 #ifdef INET6 1637 case O_FORWARD_IP6: 1638 if (cmdlen != F_INSN_SIZE(ipfw_insn_sa6)) 1639 goto bad_size; 1640 goto check_action; 1641 #endif /* INET6 */ 1642 1643 case O_DIVERT: 1644 case O_TEE: 1645 if (ip_divert_ptr == NULL) 1646 return EINVAL; 1647 else 1648 goto check_size; 1649 case O_NETGRAPH: 1650 case O_NGTEE: 1651 if (ng_ipfw_input_p == NULL) 1652 return EINVAL; 1653 else 1654 goto check_size; 1655 case O_NAT: 1656 if (!IPFW_NAT_LOADED) 1657 return EINVAL; 1658 if (cmdlen != F_INSN_SIZE(ipfw_insn_nat)) 1659 goto bad_size; 1660 goto check_action; 1661 case O_FORWARD_MAC: /* XXX not implemented yet */ 1662 case O_CHECK_STATE: 1663 case O_COUNT: 1664 case O_ACCEPT: 1665 case O_DENY: 1666 case O_REJECT: 1667 case O_SETDSCP: 1668 #ifdef INET6 1669 case O_UNREACH6: 1670 #endif 1671 case O_SKIPTO: 1672 case O_REASS: 1673 case O_CALLRETURN: 1674 check_size: 1675 if (cmdlen != F_INSN_SIZE(ipfw_insn)) 1676 goto bad_size; 1677 check_action: 1678 if (have_action) { 1679 printf("ipfw: opcode %d, multiple actions" 1680 " not allowed\n", 1681 cmd->opcode); 1682 return (EINVAL); 1683 } 1684 have_action = 1; 1685 if (l != cmdlen) { 1686 printf("ipfw: opcode %d, action must be" 1687 " last opcode\n", 1688 cmd->opcode); 1689 return (EINVAL); 1690 } 1691 break; 1692 #ifdef INET6 1693 case O_IP6_SRC: 1694 case O_IP6_DST: 1695 if (cmdlen != F_INSN_SIZE(struct in6_addr) + 1696 F_INSN_SIZE(ipfw_insn)) 1697 goto bad_size; 1698 break; 1699 1700 case O_FLOW6ID: 1701 if (cmdlen != F_INSN_SIZE(ipfw_insn_u32) + 1702 ((ipfw_insn_u32 *)cmd)->o.arg1) 1703 goto bad_size; 1704 break; 1705 1706 case O_IP6_SRC_MASK: 1707 case O_IP6_DST_MASK: 1708 if ( !(cmdlen & 1) || cmdlen > 127) 1709 goto bad_size; 1710 break; 1711 case O_ICMP6TYPE: 1712 if( cmdlen != F_INSN_SIZE( ipfw_insn_icmp6 ) ) 1713 goto bad_size; 1714 break; 1715 #endif 1716 1717 default: 1718 switch (cmd->opcode) { 1719 #ifndef INET6 1720 case O_IP6_SRC_ME: 1721 case O_IP6_DST_ME: 1722 case O_EXT_HDR: 1723 case O_IP6: 1724 case O_UNREACH6: 1725 case O_IP6_SRC: 1726 case O_IP6_DST: 1727 case O_FLOW6ID: 1728 case O_IP6_SRC_MASK: 1729 case O_IP6_DST_MASK: 1730 case O_ICMP6TYPE: 1731 printf("ipfw: no IPv6 support in kernel\n"); 1732 return (EPROTONOSUPPORT); 1733 #endif 1734 default: 1735 printf("ipfw: opcode %d, unknown opcode\n", 1736 cmd->opcode); 1737 return (EINVAL); 1738 } 1739 } 1740 } 1741 if (have_action == 0) { 1742 printf("ipfw: missing action\n"); 1743 return (EINVAL); 1744 } 1745 return 0; 1746 1747 bad_size: 1748 printf("ipfw: opcode %d size %d wrong\n", 1749 cmd->opcode, cmdlen); 1750 return (EINVAL); 1751 } 1752 1753 1754 /* 1755 * Translation of requests for compatibility with FreeBSD 7.2/8. 1756 * a static variable tells us if we have an old client from userland, 1757 * and if necessary we translate requests and responses between the 1758 * two formats. 1759 */ 1760 static int is7 = 0; 1761 1762 struct ip_fw7 { 1763 struct ip_fw7 *next; /* linked list of rules */ 1764 struct ip_fw7 *next_rule; /* ptr to next [skipto] rule */ 1765 /* 'next_rule' is used to pass up 'set_disable' status */ 1766 1767 uint16_t act_ofs; /* offset of action in 32-bit units */ 1768 uint16_t cmd_len; /* # of 32-bit words in cmd */ 1769 uint16_t rulenum; /* rule number */ 1770 uint8_t set; /* rule set (0..31) */ 1771 // #define RESVD_SET 31 /* set for default and persistent rules */ 1772 uint8_t _pad; /* padding */ 1773 // uint32_t id; /* rule id, only in v.8 */ 1774 /* These fields are present in all rules. */ 1775 uint64_t pcnt; /* Packet counter */ 1776 uint64_t bcnt; /* Byte counter */ 1777 uint32_t timestamp; /* tv_sec of last match */ 1778 1779 ipfw_insn cmd[1]; /* storage for commands */ 1780 }; 1781 1782 static int convert_rule_to_7(struct ip_fw_rule0 *rule); 1783 static int convert_rule_to_8(struct ip_fw_rule0 *rule); 1784 1785 #ifndef RULESIZE7 1786 #define RULESIZE7(rule) (sizeof(struct ip_fw7) + \ 1787 ((struct ip_fw7 *)(rule))->cmd_len * 4 - 4) 1788 #endif 1789 1790 1791 /* 1792 * Copy the static and dynamic rules to the supplied buffer 1793 * and return the amount of space actually used. 1794 * Must be run under IPFW_UH_RLOCK 1795 */ 1796 static size_t 1797 ipfw_getrules(struct ip_fw_chain *chain, void *buf, size_t space) 1798 { 1799 char *bp = buf; 1800 char *ep = bp + space; 1801 struct ip_fw *rule; 1802 struct ip_fw_rule0 *dst; 1803 int error, i, l, warnflag; 1804 time_t boot_seconds; 1805 1806 warnflag = 0; 1807 1808 boot_seconds = boottime.tv_sec; 1809 for (i = 0; i < chain->n_rules; i++) { 1810 rule = chain->map[i]; 1811 1812 if (is7) { 1813 /* Convert rule to FreeBSd 7.2 format */ 1814 l = RULESIZE7(rule); 1815 if (bp + l + sizeof(uint32_t) <= ep) { 1816 bcopy(rule, bp, l + sizeof(uint32_t)); 1817 error = ipfw_rewrite_table_kidx(chain, 1818 (struct ip_fw_rule0 *)bp); 1819 if (error != 0) 1820 return (0); 1821 error = convert_rule_to_7((struct ip_fw_rule0 *) bp); 1822 if (error) 1823 return 0; /*XXX correct? */ 1824 /* 1825 * XXX HACK. Store the disable mask in the "next" 1826 * pointer in a wild attempt to keep the ABI the same. 1827 * Why do we do this on EVERY rule? 1828 */ 1829 bcopy(&V_set_disable, 1830 &(((struct ip_fw7 *)bp)->next_rule), 1831 sizeof(V_set_disable)); 1832 if (((struct ip_fw7 *)bp)->timestamp) 1833 ((struct ip_fw7 *)bp)->timestamp += boot_seconds; 1834 bp += l; 1835 } 1836 continue; /* go to next rule */ 1837 } 1838 1839 l = RULEUSIZE0(rule); 1840 if (bp + l > ep) { /* should not happen */ 1841 printf("overflow dumping static rules\n"); 1842 break; 1843 } 1844 dst = (struct ip_fw_rule0 *)bp; 1845 export_rule0(rule, dst, l); 1846 error = ipfw_rewrite_table_kidx(chain, dst); 1847 1848 /* 1849 * XXX HACK. Store the disable mask in the "next" 1850 * pointer in a wild attempt to keep the ABI the same. 1851 * Why do we do this on EVERY rule? 1852 * 1853 * XXX: "ipfw set show" (ab)uses IP_FW_GET to read disabled mask 1854 * so we need to fail _after_ saving at least one mask. 1855 */ 1856 bcopy(&V_set_disable, &dst->next_rule, sizeof(V_set_disable)); 1857 if (dst->timestamp) 1858 dst->timestamp += boot_seconds; 1859 bp += l; 1860 1861 if (error != 0) { 1862 if (error == 2) { 1863 /* Non-fatal table rewrite error. */ 1864 warnflag = 1; 1865 continue; 1866 } 1867 printf("Stop on rule %d. Fail to convert table\n", 1868 rule->rulenum); 1869 break; 1870 } 1871 } 1872 if (warnflag != 0) 1873 printf("ipfw: process %s is using legacy interfaces," 1874 " consider rebuilding\n", ""); 1875 ipfw_get_dynamic(chain, &bp, ep); /* protected by the dynamic lock */ 1876 return (bp - (char *)buf); 1877 } 1878 1879 1880 struct dump_args { 1881 uint32_t b; /* start rule */ 1882 uint32_t e; /* end rule */ 1883 uint32_t rcount; /* number of rules */ 1884 uint32_t rsize; /* rules size */ 1885 uint32_t tcount; /* number of tables */ 1886 int rcounters; /* counters */ 1887 }; 1888 1889 /* 1890 * Dumps static rules with table TLVs in buffer @sd. 1891 * 1892 * Returns 0 on success. 1893 */ 1894 static int 1895 dump_static_rules(struct ip_fw_chain *chain, struct dump_args *da, 1896 uint32_t *bmask, struct sockopt_data *sd) 1897 { 1898 int error; 1899 int i, l; 1900 uint32_t tcount; 1901 ipfw_obj_ctlv *ctlv; 1902 struct ip_fw *krule; 1903 caddr_t dst; 1904 1905 /* Dump table names first (if any) */ 1906 if (da->tcount > 0) { 1907 /* Header first */ 1908 ctlv = (ipfw_obj_ctlv *)ipfw_get_sopt_space(sd, sizeof(*ctlv)); 1909 if (ctlv == NULL) 1910 return (ENOMEM); 1911 ctlv->head.type = IPFW_TLV_TBLNAME_LIST; 1912 ctlv->head.length = da->tcount * sizeof(ipfw_obj_ntlv) + 1913 sizeof(*ctlv); 1914 ctlv->count = da->tcount; 1915 ctlv->objsize = sizeof(ipfw_obj_ntlv); 1916 } 1917 1918 i = 0; 1919 tcount = da->tcount; 1920 while (tcount > 0) { 1921 if ((bmask[i / 32] & (1 << (i % 32))) == 0) { 1922 i++; 1923 continue; 1924 } 1925 1926 if ((error = ipfw_export_table_ntlv(chain, i, sd)) != 0) 1927 return (error); 1928 1929 i++; 1930 tcount--; 1931 } 1932 1933 /* Dump rules */ 1934 ctlv = (ipfw_obj_ctlv *)ipfw_get_sopt_space(sd, sizeof(*ctlv)); 1935 if (ctlv == NULL) 1936 return (ENOMEM); 1937 ctlv->head.type = IPFW_TLV_RULE_LIST; 1938 ctlv->head.length = da->rsize + sizeof(*ctlv); 1939 ctlv->count = da->rcount; 1940 1941 for (i = da->b; i < da->e; i++) { 1942 krule = chain->map[i]; 1943 1944 l = RULEUSIZE1(krule) + sizeof(ipfw_obj_tlv); 1945 if (da->rcounters != 0) 1946 l += sizeof(struct ip_fw_bcounter); 1947 dst = (caddr_t)ipfw_get_sopt_space(sd, l); 1948 if (dst == NULL) 1949 return (ENOMEM); 1950 1951 export_rule1(krule, dst, l, da->rcounters); 1952 } 1953 1954 return (0); 1955 } 1956 1957 /* 1958 * Dumps requested objects data 1959 * Data layout (version 0)(current): 1960 * Request: [ ipfw_cfg_lheader ] + IPFW_CFG_GET_* flags 1961 * size = ipfw_cfg_lheader.size 1962 * Reply: [ ipfw_cfg_lheader 1963 * [ ipfw_obj_ctlv(IPFW_TLV_TBL_LIST) ipfw_obj_ntlv x N ] (optional) 1964 * [ ipfw_obj_ctlv(IPFW_TLV_RULE_LIST) 1965 * ipfw_obj_tlv(IPFW_TLV_RULE_ENT) [ ip_fw_bcounter (optional) ip_fw_rule ] 1966 * ] (optional) 1967 * [ ipfw_obj_ctlv(IPFW_TLV_STATE_LIST) ipfw_obj_dyntlv x N ] (optional) 1968 * ] 1969 * * NOTE IPFW_TLV_STATE_LIST has the single valid field: objsize. 1970 * The rest (size, count) are set to zero and needs to be ignored. 1971 * 1972 * Returns 0 on success. 1973 */ 1974 static int 1975 dump_config(struct ip_fw_chain *chain, ip_fw3_opheader *op3, 1976 struct sockopt_data *sd) 1977 { 1978 ipfw_cfg_lheader *hdr; 1979 struct ip_fw *rule; 1980 size_t sz, rnum; 1981 uint32_t hdr_flags; 1982 int error, i; 1983 struct dump_args da; 1984 uint32_t *bmask; 1985 1986 hdr = (ipfw_cfg_lheader *)ipfw_get_sopt_header(sd, sizeof(*hdr)); 1987 if (hdr == NULL) 1988 return (EINVAL); 1989 1990 error = 0; 1991 bmask = NULL; 1992 /* Allocate needed state */ 1993 if (hdr->flags & IPFW_CFG_GET_STATIC) 1994 bmask = malloc(IPFW_TABLES_MAX / 8, M_TEMP, M_WAITOK | M_ZERO); 1995 1996 IPFW_UH_RLOCK(chain); 1997 1998 /* 1999 * STAGE 1: Determine size/count for objects in range. 2000 * Prepare used tables bitmask. 2001 */ 2002 sz = sizeof(ipfw_cfg_lheader); 2003 memset(&da, 0, sizeof(da)); 2004 2005 da.b = 0; 2006 da.e = chain->n_rules; 2007 2008 if (hdr->end_rule != 0) { 2009 /* Handle custom range */ 2010 if ((rnum = hdr->start_rule) > IPFW_DEFAULT_RULE) 2011 rnum = IPFW_DEFAULT_RULE; 2012 da.b = ipfw_find_rule(chain, rnum, 0); 2013 rnum = hdr->end_rule; 2014 rnum = (rnum < IPFW_DEFAULT_RULE) ? rnum+1 : IPFW_DEFAULT_RULE; 2015 da.e = ipfw_find_rule(chain, rnum, 0); 2016 } 2017 2018 if (hdr->flags & IPFW_CFG_GET_STATIC) { 2019 for (i = da.b; i < da.e; i++) { 2020 rule = chain->map[i]; 2021 da.rsize += RULEUSIZE1(rule) + sizeof(ipfw_obj_tlv); 2022 da.rcount++; 2023 da.tcount += ipfw_mark_table_kidx(chain, rule, bmask); 2024 } 2025 /* Add counters if requested */ 2026 if (hdr->flags & IPFW_CFG_GET_COUNTERS) { 2027 da.rsize += sizeof(struct ip_fw_bcounter) * da.rcount; 2028 da.rcounters = 1; 2029 } 2030 2031 if (da.tcount > 0) 2032 sz += da.tcount * sizeof(ipfw_obj_ntlv) + 2033 sizeof(ipfw_obj_ctlv); 2034 sz += da.rsize + sizeof(ipfw_obj_ctlv); 2035 } 2036 2037 if (hdr->flags & IPFW_CFG_GET_STATES) 2038 sz += ipfw_dyn_get_count() * sizeof(ipfw_obj_dyntlv) + 2039 sizeof(ipfw_obj_ctlv); 2040 2041 2042 /* 2043 * Fill header anyway. 2044 * Note we have to save header fields to stable storage 2045 * buffer inside @sd can be flushed after dumping rules 2046 */ 2047 hdr->size = sz; 2048 hdr->set_mask = ~V_set_disable; 2049 hdr_flags = hdr->flags; 2050 hdr = NULL; 2051 2052 if (sd->valsize < sz) { 2053 error = ENOMEM; 2054 goto cleanup; 2055 } 2056 2057 /* STAGE2: Store actual data */ 2058 if (hdr_flags & IPFW_CFG_GET_STATIC) { 2059 error = dump_static_rules(chain, &da, bmask, sd); 2060 if (error != 0) 2061 goto cleanup; 2062 } 2063 2064 if (hdr_flags & IPFW_CFG_GET_STATES) 2065 error = ipfw_dump_states(chain, sd); 2066 2067 cleanup: 2068 IPFW_UH_RUNLOCK(chain); 2069 2070 if (bmask != NULL) 2071 free(bmask, M_TEMP); 2072 2073 return (error); 2074 } 2075 2076 static int 2077 check_object_name(ipfw_obj_ntlv *ntlv) 2078 { 2079 int error; 2080 2081 switch (ntlv->head.type) { 2082 case IPFW_TLV_TBL_NAME: 2083 error = ipfw_check_table_name(ntlv->name); 2084 break; 2085 default: 2086 error = ENOTSUP; 2087 } 2088 2089 return (0); 2090 } 2091 2092 /* 2093 * Adds one or more rules to ipfw @chain. 2094 * Data layout (version 0)(current): 2095 * Request: 2096 * [ 2097 * ip_fw3_opheader 2098 * [ ipfw_obj_ctlv(IPFW_TLV_TBL_LIST) ipfw_obj_ntlv x N ] (optional *1) 2099 * [ ipfw_obj_ctlv(IPFW_TLV_RULE_LIST) ip_fw x N ] (*2) (*3) 2100 * ] 2101 * Reply: 2102 * [ 2103 * ip_fw3_opheader 2104 * [ ipfw_obj_ctlv(IPFW_TLV_TBL_LIST) ipfw_obj_ntlv x N ] (optional) 2105 * [ ipfw_obj_ctlv(IPFW_TLV_RULE_LIST) ip_fw x N ] 2106 * ] 2107 * 2108 * Rules in reply are modified to store their actual ruleset number. 2109 * 2110 * (*1) TLVs inside IPFW_TLV_TBL_LIST needs to be sorted ascending 2111 * accoring to their idx field and there has to be no duplicates. 2112 * (*2) Numbered rules inside IPFW_TLV_RULE_LIST needs to be sorted ascending. 2113 * (*3) Each ip_fw structure needs to be aligned to u64 boundary. 2114 * 2115 * Returns 0 on success. 2116 */ 2117 static int 2118 add_rules(struct ip_fw_chain *chain, ip_fw3_opheader *op3, 2119 struct sockopt_data *sd) 2120 { 2121 ipfw_obj_ctlv *ctlv, *rtlv, *tstate; 2122 ipfw_obj_ntlv *ntlv; 2123 int clen, error, idx; 2124 uint32_t count, read; 2125 struct ip_fw_rule *r; 2126 struct rule_check_info rci, *ci, *cbuf; 2127 int i, rsize; 2128 2129 op3 = (ip_fw3_opheader *)ipfw_get_sopt_space(sd, sd->valsize); 2130 ctlv = (ipfw_obj_ctlv *)(op3 + 1); 2131 2132 read = sizeof(ip_fw3_opheader); 2133 rtlv = NULL; 2134 tstate = NULL; 2135 cbuf = NULL; 2136 memset(&rci, 0, sizeof(struct rule_check_info)); 2137 2138 if (read + sizeof(*ctlv) > sd->valsize) 2139 return (EINVAL); 2140 2141 if (ctlv->head.type == IPFW_TLV_TBLNAME_LIST) { 2142 clen = ctlv->head.length; 2143 /* Check size and alignment */ 2144 if (clen > sd->valsize || clen < sizeof(*ctlv)) 2145 return (EINVAL); 2146 if ((clen % sizeof(uint64_t)) != 0) 2147 return (EINVAL); 2148 2149 /* 2150 * Some table names or other named objects. 2151 * Check for validness. 2152 */ 2153 count = (ctlv->head.length - sizeof(*ctlv)) / sizeof(*ntlv); 2154 if (ctlv->count != count || ctlv->objsize != sizeof(*ntlv)) 2155 return (EINVAL); 2156 2157 /* 2158 * Check each TLV. 2159 * Ensure TLVs are sorted ascending and 2160 * there are no duplicates. 2161 */ 2162 idx = -1; 2163 ntlv = (ipfw_obj_ntlv *)(ctlv + 1); 2164 while (count > 0) { 2165 if (ntlv->head.length != sizeof(ipfw_obj_ntlv)) 2166 return (EINVAL); 2167 2168 error = check_object_name(ntlv); 2169 if (error != 0) 2170 return (error); 2171 2172 if (ntlv->idx <= idx) 2173 return (EINVAL); 2174 2175 idx = ntlv->idx; 2176 count--; 2177 ntlv++; 2178 } 2179 2180 tstate = ctlv; 2181 read += ctlv->head.length; 2182 ctlv = (ipfw_obj_ctlv *)((caddr_t)ctlv + ctlv->head.length); 2183 } 2184 2185 if (read + sizeof(*ctlv) > sd->valsize) 2186 return (EINVAL); 2187 2188 if (ctlv->head.type == IPFW_TLV_RULE_LIST) { 2189 clen = ctlv->head.length; 2190 if (clen + read > sd->valsize || clen < sizeof(*ctlv)) 2191 return (EINVAL); 2192 if ((clen % sizeof(uint64_t)) != 0) 2193 return (EINVAL); 2194 2195 /* 2196 * TODO: Permit adding multiple rules at once 2197 */ 2198 if (ctlv->count != 1) 2199 return (ENOTSUP); 2200 2201 clen -= sizeof(*ctlv); 2202 2203 if (ctlv->count > clen / sizeof(struct ip_fw_rule)) 2204 return (EINVAL); 2205 2206 /* Allocate state for each rule or use stack */ 2207 if (ctlv->count == 1) { 2208 memset(&rci, 0, sizeof(struct rule_check_info)); 2209 cbuf = &rci; 2210 } else 2211 cbuf = malloc(ctlv->count * sizeof(*ci), M_TEMP, 2212 M_WAITOK | M_ZERO); 2213 ci = cbuf; 2214 2215 /* 2216 * Check each rule for validness. 2217 * Ensure numbered rules are sorted ascending 2218 * and properly aligned 2219 */ 2220 idx = 0; 2221 r = (struct ip_fw_rule *)(ctlv + 1); 2222 count = 0; 2223 error = 0; 2224 while (clen > 0) { 2225 rsize = roundup2(RULESIZE(r), sizeof(uint64_t)); 2226 if (rsize > clen || ctlv->count <= count) { 2227 error = EINVAL; 2228 break; 2229 } 2230 2231 ci->ctlv = tstate; 2232 error = check_ipfw_rule1(r, rsize, ci); 2233 if (error != 0) 2234 break; 2235 2236 /* Check sorting */ 2237 if (r->rulenum != 0 && r->rulenum < idx) { 2238 printf("rulenum %d idx %d\n", r->rulenum, idx); 2239 error = EINVAL; 2240 break; 2241 } 2242 idx = r->rulenum; 2243 2244 ci->urule = (caddr_t)r; 2245 2246 rsize = roundup2(rsize, sizeof(uint64_t)); 2247 clen -= rsize; 2248 r = (struct ip_fw_rule *)((caddr_t)r + rsize); 2249 count++; 2250 ci++; 2251 } 2252 2253 if (ctlv->count != count || error != 0) { 2254 if (cbuf != &rci) 2255 free(cbuf, M_TEMP); 2256 return (EINVAL); 2257 } 2258 2259 rtlv = ctlv; 2260 read += ctlv->head.length; 2261 ctlv = (ipfw_obj_ctlv *)((caddr_t)ctlv + ctlv->head.length); 2262 } 2263 2264 if (read != sd->valsize || rtlv == NULL || rtlv->count == 0) { 2265 if (cbuf != NULL && cbuf != &rci) 2266 free(cbuf, M_TEMP); 2267 return (EINVAL); 2268 } 2269 2270 /* 2271 * Passed rules seems to be valid. 2272 * Allocate storage and try to add them to chain. 2273 */ 2274 for (i = 0, ci = cbuf; i < rtlv->count; i++, ci++) { 2275 clen = RULEKSIZE1((struct ip_fw_rule *)ci->urule); 2276 ci->krule = ipfw_alloc_rule(chain, clen); 2277 import_rule1(ci); 2278 } 2279 2280 if ((error = commit_rules(chain, cbuf, rtlv->count)) != 0) { 2281 /* Free allocate krules */ 2282 for (i = 0, ci = cbuf; i < rtlv->count; i++, ci++) 2283 free(ci->krule, M_IPFW); 2284 } 2285 2286 if (cbuf != NULL && cbuf != &rci) 2287 free(cbuf, M_TEMP); 2288 2289 return (error); 2290 } 2291 2292 /* 2293 * Lists all sopts currently registered. 2294 * Data layout (v0)(current): 2295 * Request: [ ipfw_obj_lheader ], size = ipfw_obj_lheader.size 2296 * Reply: [ ipfw_obj_lheader ipfw_sopt_info x N ] 2297 * 2298 * Returns 0 on success 2299 */ 2300 static int 2301 dump_soptcodes(struct ip_fw_chain *chain, ip_fw3_opheader *op3, 2302 struct sockopt_data *sd) 2303 { 2304 struct _ipfw_obj_lheader *olh; 2305 ipfw_sopt_info *i; 2306 struct ipfw_sopt_handler *sh; 2307 uint32_t count, n, size; 2308 2309 olh = (struct _ipfw_obj_lheader *)ipfw_get_sopt_header(sd,sizeof(*olh)); 2310 if (olh == NULL) 2311 return (EINVAL); 2312 if (sd->valsize < olh->size) 2313 return (EINVAL); 2314 2315 CTL3_LOCK(); 2316 count = ctl3_hsize; 2317 size = count * sizeof(ipfw_sopt_info) + sizeof(ipfw_obj_lheader); 2318 2319 /* Fill in header regadless of buffer size */ 2320 olh->count = count; 2321 olh->objsize = sizeof(ipfw_sopt_info); 2322 2323 if (size > olh->size) { 2324 olh->size = size; 2325 CTL3_UNLOCK(); 2326 return (ENOMEM); 2327 } 2328 olh->size = size; 2329 2330 for (n = 1; n <= count; n++) { 2331 i = (ipfw_sopt_info *)ipfw_get_sopt_space(sd, sizeof(*i)); 2332 KASSERT(i != 0, ("previously checked buffer is not enough")); 2333 sh = &ctl3_handlers[n]; 2334 i->opcode = sh->opcode; 2335 i->version = sh->version; 2336 i->refcnt = sh->refcnt; 2337 } 2338 CTL3_UNLOCK(); 2339 2340 return (0); 2341 } 2342 2343 /* 2344 * Compares two sopt handlers (code, version and handler ptr). 2345 * Used both as qsort() and bsearch(). 2346 * Does not compare handler for latter case. 2347 * 2348 * Returns 0 if match is found. 2349 */ 2350 static int 2351 compare_sh(const void *_a, const void *_b) 2352 { 2353 const struct ipfw_sopt_handler *a, *b; 2354 2355 a = (const struct ipfw_sopt_handler *)_a; 2356 b = (const struct ipfw_sopt_handler *)_b; 2357 2358 if (a->opcode < b->opcode) 2359 return (-1); 2360 else if (a->opcode > b->opcode) 2361 return (1); 2362 2363 if (a->version < b->version) 2364 return (-1); 2365 else if (a->version > b->version) 2366 return (1); 2367 2368 /* bsearch helper */ 2369 if (a->handler == NULL) 2370 return (0); 2371 2372 if ((uintptr_t)a->handler < (uintptr_t)b->handler) 2373 return (-1); 2374 else if ((uintptr_t)b->handler > (uintptr_t)b->handler) 2375 return (1); 2376 2377 return (0); 2378 } 2379 2380 /* 2381 * Finds sopt handler based on @code and @version. 2382 * 2383 * Returns pointer to handler or NULL. 2384 */ 2385 static struct ipfw_sopt_handler * 2386 find_sh(uint16_t code, uint8_t version, void *handler) 2387 { 2388 struct ipfw_sopt_handler *sh, h; 2389 2390 memset(&h, 0, sizeof(h)); 2391 h.opcode = code; 2392 h.version = version; 2393 h.handler = handler; 2394 2395 sh = (struct ipfw_sopt_handler *)bsearch(&h, ctl3_handlers, 2396 ctl3_hsize, sizeof(h), compare_sh); 2397 2398 return (sh); 2399 } 2400 2401 static int 2402 find_ref_sh(uint16_t opcode, uint8_t version, struct ipfw_sopt_handler *psh) 2403 { 2404 struct ipfw_sopt_handler *sh; 2405 2406 CTL3_LOCK(); 2407 if ((sh = find_sh(opcode, version, NULL)) == NULL) { 2408 CTL3_UNLOCK(); 2409 printf("ipfw: ipfw_ctl3 invalid option %d""v""%d\n", 2410 opcode, version); 2411 return (EINVAL); 2412 } 2413 sh->refcnt++; 2414 ctl3_refct++; 2415 /* Copy handler data to requested buffer */ 2416 *psh = *sh; 2417 CTL3_UNLOCK(); 2418 2419 return (0); 2420 } 2421 2422 static void 2423 find_unref_sh(struct ipfw_sopt_handler *psh) 2424 { 2425 struct ipfw_sopt_handler *sh; 2426 2427 CTL3_LOCK(); 2428 sh = find_sh(psh->opcode, psh->version, NULL); 2429 KASSERT(sh != NULL, ("ctl3 handler disappeared")); 2430 sh->refcnt--; 2431 ctl3_refct--; 2432 CTL3_UNLOCK(); 2433 } 2434 2435 void 2436 ipfw_init_sopt_handler() 2437 { 2438 2439 CTL3_LOCK_INIT(); 2440 IPFW_ADD_SOPT_HANDLER(1, scodes); 2441 } 2442 2443 void 2444 ipfw_destroy_sopt_handler() 2445 { 2446 2447 IPFW_DEL_SOPT_HANDLER(1, scodes); 2448 CTL3_LOCK_DESTROY(); 2449 } 2450 2451 /* 2452 * Adds one or more sockopt handlers to the global array. 2453 * Function may sleep. 2454 */ 2455 void 2456 ipfw_add_sopt_handler(struct ipfw_sopt_handler *sh, size_t count) 2457 { 2458 size_t sz; 2459 struct ipfw_sopt_handler *tmp; 2460 2461 CTL3_LOCK(); 2462 2463 for (;;) { 2464 sz = ctl3_hsize + count; 2465 CTL3_UNLOCK(); 2466 tmp = malloc(sizeof(*sh) * sz, M_IPFW, M_WAITOK | M_ZERO); 2467 CTL3_LOCK(); 2468 if (ctl3_hsize + count <= sz) 2469 break; 2470 2471 /* Retry */ 2472 free(tmp, M_IPFW); 2473 } 2474 2475 /* Merge old & new arrays */ 2476 sz = ctl3_hsize + count; 2477 memcpy(tmp, ctl3_handlers, ctl3_hsize * sizeof(*sh)); 2478 memcpy(&tmp[ctl3_hsize], sh, count * sizeof(*sh)); 2479 qsort(tmp, sz, sizeof(*sh), compare_sh); 2480 /* Switch new and free old */ 2481 if (ctl3_handlers != NULL) 2482 free(ctl3_handlers, M_IPFW); 2483 ctl3_handlers = tmp; 2484 ctl3_hsize = sz; 2485 ctl3_gencnt++; 2486 2487 CTL3_UNLOCK(); 2488 } 2489 2490 /* 2491 * Removes one or more sockopt handlers from the global array. 2492 */ 2493 int 2494 ipfw_del_sopt_handler(struct ipfw_sopt_handler *sh, size_t count) 2495 { 2496 size_t sz; 2497 struct ipfw_sopt_handler *tmp, *h; 2498 int i; 2499 2500 CTL3_LOCK(); 2501 2502 for (i = 0; i < count; i++) { 2503 tmp = &sh[i]; 2504 h = find_sh(tmp->opcode, tmp->version, tmp->handler); 2505 if (h == NULL) 2506 continue; 2507 2508 sz = (ctl3_handlers + ctl3_hsize - (h + 1)) * sizeof(*h); 2509 memmove(h, h + 1, sz); 2510 ctl3_hsize--; 2511 } 2512 2513 if (ctl3_hsize == 0) { 2514 if (ctl3_handlers != NULL) 2515 free(ctl3_handlers, M_IPFW); 2516 ctl3_handlers = NULL; 2517 } 2518 2519 ctl3_gencnt++; 2520 2521 CTL3_UNLOCK(); 2522 2523 return (0); 2524 } 2525 2526 /* 2527 * Writes data accumulated in @sd to sockopt buffer. 2528 * Zeroes internal @sd buffer. 2529 */ 2530 static int 2531 ipfw_flush_sopt_data(struct sockopt_data *sd) 2532 { 2533 #define RULE_MAXSIZE (512*sizeof(u_int32_t)) 2534 int error; 2535 size_t sz; 2536 2537 if ((sz = sd->koff) == 0) 2538 return (0); 2539 2540 if (sd->sopt->sopt_dir == SOPT_GET) { 2541 error = sooptcopyout(sd->sopt, sd->kbuf, sz); 2542 if (error != 0) 2543 return (error); 2544 } 2545 2546 memset(sd->kbuf, 0, sd->ksize); 2547 sd->ktotal += sd->koff; 2548 sd->koff = 0; 2549 if (sd->ktotal + sd->ksize < sd->valsize) 2550 sd->kavail = sd->ksize; 2551 else 2552 sd->kavail = sd->valsize - sd->ktotal; 2553 2554 /* Update sopt buffer */ 2555 sd->sopt->sopt_valsize = sd->ktotal; 2556 sd->sopt->sopt_val = sd->sopt_val + sd->ktotal; 2557 2558 return (0); 2559 } 2560 2561 /* 2562 * Ensures that @sd buffer has contigious @neeeded number of 2563 * bytes. 2564 * 2565 * Returns pointer to requested space or NULL. 2566 */ 2567 caddr_t 2568 ipfw_get_sopt_space(struct sockopt_data *sd, size_t needed) 2569 { 2570 int error; 2571 caddr_t addr; 2572 2573 if (sd->kavail < needed) { 2574 /* 2575 * Flush data and try another time. 2576 */ 2577 error = ipfw_flush_sopt_data(sd); 2578 2579 if (sd->kavail < needed || error != 0) 2580 return (NULL); 2581 } 2582 2583 addr = sd->kbuf + sd->koff; 2584 sd->koff += needed; 2585 sd->kavail -= needed; 2586 return (addr); 2587 } 2588 2589 /* 2590 * Requests @needed contigious bytes from @sd buffer. 2591 * Function is used to notify subsystem that we are 2592 * interesed in first @needed bytes (request header) 2593 * and the rest buffer can be safely zeroed. 2594 * 2595 * Returns pointer to requested space or NULL. 2596 */ 2597 caddr_t 2598 ipfw_get_sopt_header(struct sockopt_data *sd, size_t needed) 2599 { 2600 caddr_t addr; 2601 2602 if ((addr = ipfw_get_sopt_space(sd, needed)) == NULL) 2603 return (NULL); 2604 2605 if (sd->kavail > 0) 2606 memset(sd->kbuf + sd->koff, 0, sd->kavail); 2607 2608 return (addr); 2609 } 2610 2611 /* 2612 * New sockopt handler. 2613 */ 2614 int 2615 ipfw_ctl3(struct sockopt *sopt) 2616 { 2617 int error, locked; 2618 size_t size, valsize; 2619 struct ip_fw_chain *chain; 2620 char xbuf[256]; 2621 struct sockopt_data sdata; 2622 struct ipfw_sopt_handler h; 2623 ip_fw3_opheader *op3 = NULL; 2624 2625 error = priv_check(sopt->sopt_td, PRIV_NETINET_IPFW); 2626 if (error != 0) 2627 return (error); 2628 2629 if (sopt->sopt_name != IP_FW3) 2630 return (ipfw_ctl(sopt)); 2631 2632 chain = &V_layer3_chain; 2633 error = 0; 2634 2635 /* Save original valsize before it is altered via sooptcopyin() */ 2636 valsize = sopt->sopt_valsize; 2637 memset(&sdata, 0, sizeof(sdata)); 2638 /* Read op3 header first to determine actual operation */ 2639 op3 = (ip_fw3_opheader *)xbuf; 2640 error = sooptcopyin(sopt, op3, sizeof(*op3), sizeof(*op3)); 2641 if (error != 0) 2642 return (error); 2643 sopt->sopt_valsize = valsize; 2644 2645 /* 2646 * Find and reference command. 2647 */ 2648 error = find_ref_sh(op3->opcode, op3->version, &h); 2649 if (error != 0) 2650 return (error); 2651 2652 /* 2653 * Disallow modifications in really-really secure mode, but still allow 2654 * the logging counters to be reset. 2655 */ 2656 if ((h.dir & HDIR_SET) != 0 && h.opcode != IP_FW_XRESETLOG) { 2657 error = securelevel_ge(sopt->sopt_td->td_ucred, 3); 2658 if (error != 0) { 2659 find_unref_sh(&h); 2660 return (error); 2661 } 2662 } 2663 2664 /* 2665 * Fill in sockopt_data structure that may be useful for 2666 * IP_FW3 get requests. 2667 */ 2668 locked = 0; 2669 if (valsize <= sizeof(xbuf)) { 2670 /* use on-stack buffer */ 2671 sdata.kbuf = xbuf; 2672 sdata.ksize = sizeof(xbuf); 2673 sdata.kavail = valsize; 2674 } else { 2675 2676 /* 2677 * Determine opcode type/buffer size: 2678 * allocate sliding-window buf for data export or 2679 * contigious buffer for special ops. 2680 */ 2681 if ((h.dir & HDIR_SET) != 0) { 2682 /* Set request. Allocate contigous buffer. */ 2683 if (valsize > CTL3_LARGEBUF) { 2684 find_unref_sh(&h); 2685 return (EFBIG); 2686 } 2687 2688 size = valsize; 2689 } else { 2690 /* Get request. Allocate sliding window buffer */ 2691 size = (valsize<CTL3_SMALLBUF) ? valsize:CTL3_SMALLBUF; 2692 2693 if (size < valsize) { 2694 /* We have to wire user buffer */ 2695 error = vslock(sopt->sopt_val, valsize); 2696 if (error != 0) 2697 return (error); 2698 locked = 1; 2699 } 2700 } 2701 2702 sdata.kbuf = malloc(size, M_TEMP, M_WAITOK | M_ZERO); 2703 sdata.ksize = size; 2704 sdata.kavail = size; 2705 } 2706 2707 sdata.sopt = sopt; 2708 sdata.sopt_val = sopt->sopt_val; 2709 sdata.valsize = valsize; 2710 2711 /* 2712 * Copy either all request (if valsize < bsize_max) 2713 * or first bsize_max bytes to guarantee most consumers 2714 * that all necessary data has been copied). 2715 * Anyway, copy not less than sizeof(ip_fw3_opheader). 2716 */ 2717 if ((error = sooptcopyin(sopt, sdata.kbuf, sdata.ksize, 2718 sizeof(ip_fw3_opheader))) != 0) 2719 return (error); 2720 op3 = (ip_fw3_opheader *)sdata.kbuf; 2721 2722 /* Finally, run handler */ 2723 error = h.handler(chain, op3, &sdata); 2724 find_unref_sh(&h); 2725 2726 /* Flush state and free buffers */ 2727 if (error == 0) 2728 error = ipfw_flush_sopt_data(&sdata); 2729 else 2730 ipfw_flush_sopt_data(&sdata); 2731 2732 if (locked != 0) 2733 vsunlock(sdata.sopt_val, valsize); 2734 2735 /* Restore original pointer and set number of bytes written */ 2736 sopt->sopt_val = sdata.sopt_val; 2737 sopt->sopt_valsize = sdata.ktotal; 2738 if (sdata.kbuf != xbuf) 2739 free(sdata.kbuf, M_TEMP); 2740 2741 return (error); 2742 } 2743 2744 /** 2745 * {set|get}sockopt parser. 2746 */ 2747 int 2748 ipfw_ctl(struct sockopt *sopt) 2749 { 2750 #define RULE_MAXSIZE (512*sizeof(u_int32_t)) 2751 int error; 2752 size_t size, valsize; 2753 struct ip_fw *buf; 2754 struct ip_fw_rule0 *rule; 2755 struct ip_fw_chain *chain; 2756 u_int32_t rulenum[2]; 2757 uint32_t opt; 2758 struct rule_check_info ci; 2759 IPFW_RLOCK_TRACKER; 2760 2761 chain = &V_layer3_chain; 2762 error = 0; 2763 2764 /* Save original valsize before it is altered via sooptcopyin() */ 2765 valsize = sopt->sopt_valsize; 2766 opt = sopt->sopt_name; 2767 2768 /* 2769 * Disallow modifications in really-really secure mode, but still allow 2770 * the logging counters to be reset. 2771 */ 2772 if (opt == IP_FW_ADD || 2773 (sopt->sopt_dir == SOPT_SET && opt != IP_FW_RESETLOG)) { 2774 error = securelevel_ge(sopt->sopt_td->td_ucred, 3); 2775 if (error != 0) 2776 return (error); 2777 } 2778 2779 switch (opt) { 2780 case IP_FW_GET: 2781 /* 2782 * pass up a copy of the current rules. Static rules 2783 * come first (the last of which has number IPFW_DEFAULT_RULE), 2784 * followed by a possibly empty list of dynamic rule. 2785 * The last dynamic rule has NULL in the "next" field. 2786 * 2787 * Note that the calculated size is used to bound the 2788 * amount of data returned to the user. The rule set may 2789 * change between calculating the size and returning the 2790 * data in which case we'll just return what fits. 2791 */ 2792 for (;;) { 2793 int len = 0, want; 2794 2795 size = chain->static_len; 2796 size += ipfw_dyn_len(); 2797 if (size >= sopt->sopt_valsize) 2798 break; 2799 buf = malloc(size, M_TEMP, M_WAITOK | M_ZERO); 2800 IPFW_UH_RLOCK(chain); 2801 /* check again how much space we need */ 2802 want = chain->static_len + ipfw_dyn_len(); 2803 if (size >= want) 2804 len = ipfw_getrules(chain, buf, size); 2805 IPFW_UH_RUNLOCK(chain); 2806 if (size >= want) 2807 error = sooptcopyout(sopt, buf, len); 2808 free(buf, M_TEMP); 2809 if (size >= want) 2810 break; 2811 } 2812 break; 2813 2814 case IP_FW_FLUSH: 2815 /* locking is done within del_entry() */ 2816 error = del_entry(chain, 0); /* special case, rule=0, cmd=0 means all */ 2817 break; 2818 2819 case IP_FW_ADD: 2820 rule = malloc(RULE_MAXSIZE, M_TEMP, M_WAITOK); 2821 error = sooptcopyin(sopt, rule, RULE_MAXSIZE, 2822 sizeof(struct ip_fw7) ); 2823 2824 memset(&ci, 0, sizeof(struct rule_check_info)); 2825 2826 /* 2827 * If the size of commands equals RULESIZE7 then we assume 2828 * a FreeBSD7.2 binary is talking to us (set is7=1). 2829 * is7 is persistent so the next 'ipfw list' command 2830 * will use this format. 2831 * NOTE: If wrong version is guessed (this can happen if 2832 * the first ipfw command is 'ipfw [pipe] list') 2833 * the ipfw binary may crash or loop infinitly... 2834 */ 2835 size = sopt->sopt_valsize; 2836 if (size == RULESIZE7(rule)) { 2837 is7 = 1; 2838 error = convert_rule_to_8(rule); 2839 if (error) { 2840 free(rule, M_TEMP); 2841 return error; 2842 } 2843 size = RULESIZE(rule); 2844 } else 2845 is7 = 0; 2846 if (error == 0) 2847 error = check_ipfw_rule0(rule, size, &ci); 2848 if (error == 0) { 2849 /* locking is done within add_rule() */ 2850 struct ip_fw *krule; 2851 krule = ipfw_alloc_rule(chain, RULEKSIZE0(rule)); 2852 ci.urule = (caddr_t)rule; 2853 ci.krule = krule; 2854 import_rule0(&ci); 2855 error = commit_rules(chain, &ci, 1); 2856 if (!error && sopt->sopt_dir == SOPT_GET) { 2857 if (is7) { 2858 error = convert_rule_to_7(rule); 2859 size = RULESIZE7(rule); 2860 if (error) { 2861 free(rule, M_TEMP); 2862 return error; 2863 } 2864 } 2865 error = sooptcopyout(sopt, rule, size); 2866 } 2867 } 2868 free(rule, M_TEMP); 2869 break; 2870 2871 case IP_FW_DEL: 2872 /* 2873 * IP_FW_DEL is used for deleting single rules or sets, 2874 * and (ab)used to atomically manipulate sets. Argument size 2875 * is used to distinguish between the two: 2876 * sizeof(u_int32_t) 2877 * delete single rule or set of rules, 2878 * or reassign rules (or sets) to a different set. 2879 * 2*sizeof(u_int32_t) 2880 * atomic disable/enable sets. 2881 * first u_int32_t contains sets to be disabled, 2882 * second u_int32_t contains sets to be enabled. 2883 */ 2884 error = sooptcopyin(sopt, rulenum, 2885 2*sizeof(u_int32_t), sizeof(u_int32_t)); 2886 if (error) 2887 break; 2888 size = sopt->sopt_valsize; 2889 if (size == sizeof(u_int32_t) && rulenum[0] != 0) { 2890 /* delete or reassign, locking done in del_entry() */ 2891 error = del_entry(chain, rulenum[0]); 2892 } else if (size == 2*sizeof(u_int32_t)) { /* set enable/disable */ 2893 IPFW_UH_WLOCK(chain); 2894 V_set_disable = 2895 (V_set_disable | rulenum[0]) & ~rulenum[1] & 2896 ~(1<<RESVD_SET); /* set RESVD_SET always enabled */ 2897 IPFW_UH_WUNLOCK(chain); 2898 } else 2899 error = EINVAL; 2900 break; 2901 2902 case IP_FW_ZERO: 2903 case IP_FW_RESETLOG: /* argument is an u_int_32, the rule number */ 2904 rulenum[0] = 0; 2905 if (sopt->sopt_val != 0) { 2906 error = sooptcopyin(sopt, rulenum, 2907 sizeof(u_int32_t), sizeof(u_int32_t)); 2908 if (error) 2909 break; 2910 } 2911 error = zero_entry(chain, rulenum[0], 2912 sopt->sopt_name == IP_FW_RESETLOG); 2913 break; 2914 2915 /*--- TABLE opcodes ---*/ 2916 case IP_FW_TABLE_ADD: 2917 case IP_FW_TABLE_DEL: 2918 { 2919 ipfw_table_entry ent; 2920 struct tentry_info tei; 2921 struct tid_info ti; 2922 struct table_value v; 2923 2924 error = sooptcopyin(sopt, &ent, 2925 sizeof(ent), sizeof(ent)); 2926 if (error) 2927 break; 2928 2929 memset(&tei, 0, sizeof(tei)); 2930 tei.paddr = &ent.addr; 2931 tei.subtype = AF_INET; 2932 tei.masklen = ent.masklen; 2933 ipfw_import_table_value_legacy(ent.value, &v); 2934 tei.pvalue = &v; 2935 memset(&ti, 0, sizeof(ti)); 2936 ti.uidx = ent.tbl; 2937 ti.type = IPFW_TABLE_CIDR; 2938 2939 error = (opt == IP_FW_TABLE_ADD) ? 2940 add_table_entry(chain, &ti, &tei, 0, 1) : 2941 del_table_entry(chain, &ti, &tei, 0, 1); 2942 } 2943 break; 2944 2945 2946 case IP_FW_TABLE_FLUSH: 2947 { 2948 u_int16_t tbl; 2949 struct tid_info ti; 2950 2951 error = sooptcopyin(sopt, &tbl, 2952 sizeof(tbl), sizeof(tbl)); 2953 if (error) 2954 break; 2955 memset(&ti, 0, sizeof(ti)); 2956 ti.uidx = tbl; 2957 error = flush_table(chain, &ti); 2958 } 2959 break; 2960 2961 case IP_FW_TABLE_GETSIZE: 2962 { 2963 u_int32_t tbl, cnt; 2964 struct tid_info ti; 2965 2966 if ((error = sooptcopyin(sopt, &tbl, sizeof(tbl), 2967 sizeof(tbl)))) 2968 break; 2969 memset(&ti, 0, sizeof(ti)); 2970 ti.uidx = tbl; 2971 IPFW_RLOCK(chain); 2972 error = ipfw_count_table(chain, &ti, &cnt); 2973 IPFW_RUNLOCK(chain); 2974 if (error) 2975 break; 2976 error = sooptcopyout(sopt, &cnt, sizeof(cnt)); 2977 } 2978 break; 2979 2980 case IP_FW_TABLE_LIST: 2981 { 2982 ipfw_table *tbl; 2983 struct tid_info ti; 2984 2985 if (sopt->sopt_valsize < sizeof(*tbl)) { 2986 error = EINVAL; 2987 break; 2988 } 2989 size = sopt->sopt_valsize; 2990 tbl = malloc(size, M_TEMP, M_WAITOK); 2991 error = sooptcopyin(sopt, tbl, size, sizeof(*tbl)); 2992 if (error) { 2993 free(tbl, M_TEMP); 2994 break; 2995 } 2996 tbl->size = (size - sizeof(*tbl)) / 2997 sizeof(ipfw_table_entry); 2998 memset(&ti, 0, sizeof(ti)); 2999 ti.uidx = tbl->tbl; 3000 IPFW_RLOCK(chain); 3001 error = ipfw_dump_table_legacy(chain, &ti, tbl); 3002 IPFW_RUNLOCK(chain); 3003 if (error) { 3004 free(tbl, M_TEMP); 3005 break; 3006 } 3007 error = sooptcopyout(sopt, tbl, size); 3008 free(tbl, M_TEMP); 3009 } 3010 break; 3011 3012 /*--- NAT operations are protected by the IPFW_LOCK ---*/ 3013 case IP_FW_NAT_CFG: 3014 if (IPFW_NAT_LOADED) 3015 error = ipfw_nat_cfg_ptr(sopt); 3016 else { 3017 printf("IP_FW_NAT_CFG: %s\n", 3018 "ipfw_nat not present, please load it"); 3019 error = EINVAL; 3020 } 3021 break; 3022 3023 case IP_FW_NAT_DEL: 3024 if (IPFW_NAT_LOADED) 3025 error = ipfw_nat_del_ptr(sopt); 3026 else { 3027 printf("IP_FW_NAT_DEL: %s\n", 3028 "ipfw_nat not present, please load it"); 3029 error = EINVAL; 3030 } 3031 break; 3032 3033 case IP_FW_NAT_GET_CONFIG: 3034 if (IPFW_NAT_LOADED) 3035 error = ipfw_nat_get_cfg_ptr(sopt); 3036 else { 3037 printf("IP_FW_NAT_GET_CFG: %s\n", 3038 "ipfw_nat not present, please load it"); 3039 error = EINVAL; 3040 } 3041 break; 3042 3043 case IP_FW_NAT_GET_LOG: 3044 if (IPFW_NAT_LOADED) 3045 error = ipfw_nat_get_log_ptr(sopt); 3046 else { 3047 printf("IP_FW_NAT_GET_LOG: %s\n", 3048 "ipfw_nat not present, please load it"); 3049 error = EINVAL; 3050 } 3051 break; 3052 3053 default: 3054 printf("ipfw: ipfw_ctl invalid option %d\n", sopt->sopt_name); 3055 error = EINVAL; 3056 } 3057 3058 return (error); 3059 #undef RULE_MAXSIZE 3060 } 3061 #define RULE_MAXSIZE (256*sizeof(u_int32_t)) 3062 3063 /* Functions to convert rules 7.2 <==> 8.0 */ 3064 static int 3065 convert_rule_to_7(struct ip_fw_rule0 *rule) 3066 { 3067 /* Used to modify original rule */ 3068 struct ip_fw7 *rule7 = (struct ip_fw7 *)rule; 3069 /* copy of original rule, version 8 */ 3070 struct ip_fw_rule0 *tmp; 3071 3072 /* Used to copy commands */ 3073 ipfw_insn *ccmd, *dst; 3074 int ll = 0, ccmdlen = 0; 3075 3076 tmp = malloc(RULE_MAXSIZE, M_TEMP, M_NOWAIT | M_ZERO); 3077 if (tmp == NULL) { 3078 return 1; //XXX error 3079 } 3080 bcopy(rule, tmp, RULE_MAXSIZE); 3081 3082 /* Copy fields */ 3083 //rule7->_pad = tmp->_pad; 3084 rule7->set = tmp->set; 3085 rule7->rulenum = tmp->rulenum; 3086 rule7->cmd_len = tmp->cmd_len; 3087 rule7->act_ofs = tmp->act_ofs; 3088 rule7->next_rule = (struct ip_fw7 *)tmp->next_rule; 3089 rule7->cmd_len = tmp->cmd_len; 3090 rule7->pcnt = tmp->pcnt; 3091 rule7->bcnt = tmp->bcnt; 3092 rule7->timestamp = tmp->timestamp; 3093 3094 /* Copy commands */ 3095 for (ll = tmp->cmd_len, ccmd = tmp->cmd, dst = rule7->cmd ; 3096 ll > 0 ; ll -= ccmdlen, ccmd += ccmdlen, dst += ccmdlen) { 3097 ccmdlen = F_LEN(ccmd); 3098 3099 bcopy(ccmd, dst, F_LEN(ccmd)*sizeof(uint32_t)); 3100 3101 if (dst->opcode > O_NAT) 3102 /* O_REASS doesn't exists in 7.2 version, so 3103 * decrement opcode if it is after O_REASS 3104 */ 3105 dst->opcode--; 3106 3107 if (ccmdlen > ll) { 3108 printf("ipfw: opcode %d size truncated\n", 3109 ccmd->opcode); 3110 return EINVAL; 3111 } 3112 } 3113 free(tmp, M_TEMP); 3114 3115 return 0; 3116 } 3117 3118 static int 3119 convert_rule_to_8(struct ip_fw_rule0 *rule) 3120 { 3121 /* Used to modify original rule */ 3122 struct ip_fw7 *rule7 = (struct ip_fw7 *) rule; 3123 3124 /* Used to copy commands */ 3125 ipfw_insn *ccmd, *dst; 3126 int ll = 0, ccmdlen = 0; 3127 3128 /* Copy of original rule */ 3129 struct ip_fw7 *tmp = malloc(RULE_MAXSIZE, M_TEMP, M_NOWAIT | M_ZERO); 3130 if (tmp == NULL) { 3131 return 1; //XXX error 3132 } 3133 3134 bcopy(rule7, tmp, RULE_MAXSIZE); 3135 3136 for (ll = tmp->cmd_len, ccmd = tmp->cmd, dst = rule->cmd ; 3137 ll > 0 ; ll -= ccmdlen, ccmd += ccmdlen, dst += ccmdlen) { 3138 ccmdlen = F_LEN(ccmd); 3139 3140 bcopy(ccmd, dst, F_LEN(ccmd)*sizeof(uint32_t)); 3141 3142 if (dst->opcode > O_NAT) 3143 /* O_REASS doesn't exists in 7.2 version, so 3144 * increment opcode if it is after O_REASS 3145 */ 3146 dst->opcode++; 3147 3148 if (ccmdlen > ll) { 3149 printf("ipfw: opcode %d size truncated\n", 3150 ccmd->opcode); 3151 return EINVAL; 3152 } 3153 } 3154 3155 rule->_pad = tmp->_pad; 3156 rule->set = tmp->set; 3157 rule->rulenum = tmp->rulenum; 3158 rule->cmd_len = tmp->cmd_len; 3159 rule->act_ofs = tmp->act_ofs; 3160 rule->next_rule = (struct ip_fw *)tmp->next_rule; 3161 rule->cmd_len = tmp->cmd_len; 3162 rule->id = 0; /* XXX see if is ok = 0 */ 3163 rule->pcnt = tmp->pcnt; 3164 rule->bcnt = tmp->bcnt; 3165 rule->timestamp = tmp->timestamp; 3166 3167 free (tmp, M_TEMP); 3168 return 0; 3169 } 3170 3171 /* 3172 * Named object api 3173 * 3174 */ 3175 3176 /* 3177 * Allocate new bitmask which can be used to enlarge/shrink 3178 * named instance index. 3179 */ 3180 void 3181 ipfw_objhash_bitmap_alloc(uint32_t items, void **idx, int *pblocks) 3182 { 3183 size_t size; 3184 int max_blocks; 3185 u_long *idx_mask; 3186 3187 KASSERT((items % BLOCK_ITEMS) == 0, 3188 ("bitmask size needs to power of 2 and greater or equal to %zu", 3189 BLOCK_ITEMS)); 3190 3191 max_blocks = items / BLOCK_ITEMS; 3192 size = items / 8; 3193 idx_mask = malloc(size * IPFW_MAX_SETS, M_IPFW, M_WAITOK); 3194 /* Mark all as free */ 3195 memset(idx_mask, 0xFF, size * IPFW_MAX_SETS); 3196 *idx_mask &= ~(u_long)1; /* Skip index 0 */ 3197 3198 *idx = idx_mask; 3199 *pblocks = max_blocks; 3200 } 3201 3202 /* 3203 * Copy current bitmask index to new one. 3204 */ 3205 void 3206 ipfw_objhash_bitmap_merge(struct namedobj_instance *ni, void **idx, int *blocks) 3207 { 3208 int old_blocks, new_blocks; 3209 u_long *old_idx, *new_idx; 3210 int i; 3211 3212 old_idx = ni->idx_mask; 3213 old_blocks = ni->max_blocks; 3214 new_idx = *idx; 3215 new_blocks = *blocks; 3216 3217 for (i = 0; i < IPFW_MAX_SETS; i++) { 3218 memcpy(&new_idx[new_blocks * i], &old_idx[old_blocks * i], 3219 old_blocks * sizeof(u_long)); 3220 } 3221 } 3222 3223 /* 3224 * Swaps current @ni index with new one. 3225 */ 3226 void 3227 ipfw_objhash_bitmap_swap(struct namedobj_instance *ni, void **idx, int *blocks) 3228 { 3229 int old_blocks; 3230 u_long *old_idx; 3231 3232 old_idx = ni->idx_mask; 3233 old_blocks = ni->max_blocks; 3234 3235 ni->idx_mask = *idx; 3236 ni->max_blocks = *blocks; 3237 3238 /* Save old values */ 3239 *idx = old_idx; 3240 *blocks = old_blocks; 3241 } 3242 3243 void 3244 ipfw_objhash_bitmap_free(void *idx, int blocks) 3245 { 3246 3247 free(idx, M_IPFW); 3248 } 3249 3250 /* 3251 * Creates named hash instance. 3252 * Must be called without holding any locks. 3253 * Return pointer to new instance. 3254 */ 3255 struct namedobj_instance * 3256 ipfw_objhash_create(uint32_t items) 3257 { 3258 struct namedobj_instance *ni; 3259 int i; 3260 size_t size; 3261 3262 size = sizeof(struct namedobj_instance) + 3263 sizeof(struct namedobjects_head) * NAMEDOBJ_HASH_SIZE + 3264 sizeof(struct namedobjects_head) * NAMEDOBJ_HASH_SIZE; 3265 3266 ni = malloc(size, M_IPFW, M_WAITOK | M_ZERO); 3267 ni->nn_size = NAMEDOBJ_HASH_SIZE; 3268 ni->nv_size = NAMEDOBJ_HASH_SIZE; 3269 3270 ni->names = (struct namedobjects_head *)(ni +1); 3271 ni->values = &ni->names[ni->nn_size]; 3272 3273 for (i = 0; i < ni->nn_size; i++) 3274 TAILQ_INIT(&ni->names[i]); 3275 3276 for (i = 0; i < ni->nv_size; i++) 3277 TAILQ_INIT(&ni->values[i]); 3278 3279 /* Set default hashing/comparison functions */ 3280 ni->hash_f = objhash_hash_name; 3281 ni->cmp_f = objhash_cmp_name; 3282 3283 /* Allocate bitmask separately due to possible resize */ 3284 ipfw_objhash_bitmap_alloc(items, (void*)&ni->idx_mask, &ni->max_blocks); 3285 3286 return (ni); 3287 } 3288 3289 void 3290 ipfw_objhash_destroy(struct namedobj_instance *ni) 3291 { 3292 3293 free(ni->idx_mask, M_IPFW); 3294 free(ni, M_IPFW); 3295 } 3296 3297 void 3298 ipfw_objhash_set_funcs(struct namedobj_instance *ni, objhash_hash_f *hash_f, 3299 objhash_cmp_f *cmp_f) 3300 { 3301 3302 ni->hash_f = hash_f; 3303 ni->cmp_f = cmp_f; 3304 } 3305 3306 static uint32_t 3307 objhash_hash_name(struct namedobj_instance *ni, void *name, uint32_t set) 3308 { 3309 3310 return (fnv_32_str((char *)name, FNV1_32_INIT)); 3311 } 3312 3313 static int 3314 objhash_cmp_name(struct named_object *no, void *name, uint32_t set) 3315 { 3316 3317 if ((strcmp(no->name, (char *)name) == 0) && (no->set == set)) 3318 return (0); 3319 3320 return (1); 3321 } 3322 3323 static uint32_t 3324 objhash_hash_idx(struct namedobj_instance *ni, uint32_t val) 3325 { 3326 uint32_t v; 3327 3328 v = val % (ni->nv_size - 1); 3329 3330 return (v); 3331 } 3332 3333 struct named_object * 3334 ipfw_objhash_lookup_name(struct namedobj_instance *ni, uint32_t set, char *name) 3335 { 3336 struct named_object *no; 3337 uint32_t hash; 3338 3339 hash = ni->hash_f(ni, name, set) % ni->nn_size; 3340 3341 TAILQ_FOREACH(no, &ni->names[hash], nn_next) { 3342 if (ni->cmp_f(no, name, set) == 0) 3343 return (no); 3344 } 3345 3346 return (NULL); 3347 } 3348 3349 struct named_object * 3350 ipfw_objhash_lookup_kidx(struct namedobj_instance *ni, uint16_t kidx) 3351 { 3352 struct named_object *no; 3353 uint32_t hash; 3354 3355 hash = objhash_hash_idx(ni, kidx); 3356 3357 TAILQ_FOREACH(no, &ni->values[hash], nv_next) { 3358 if (no->kidx == kidx) 3359 return (no); 3360 } 3361 3362 return (NULL); 3363 } 3364 3365 int 3366 ipfw_objhash_same_name(struct namedobj_instance *ni, struct named_object *a, 3367 struct named_object *b) 3368 { 3369 3370 if ((strcmp(a->name, b->name) == 0) && a->set == b->set) 3371 return (1); 3372 3373 return (0); 3374 } 3375 3376 void 3377 ipfw_objhash_add(struct namedobj_instance *ni, struct named_object *no) 3378 { 3379 uint32_t hash; 3380 3381 hash = ni->hash_f(ni, no->name, no->set) % ni->nn_size; 3382 TAILQ_INSERT_HEAD(&ni->names[hash], no, nn_next); 3383 3384 hash = objhash_hash_idx(ni, no->kidx); 3385 TAILQ_INSERT_HEAD(&ni->values[hash], no, nv_next); 3386 3387 ni->count++; 3388 } 3389 3390 void 3391 ipfw_objhash_del(struct namedobj_instance *ni, struct named_object *no) 3392 { 3393 uint32_t hash; 3394 3395 hash = ni->hash_f(ni, no->name, no->set) % ni->nn_size; 3396 TAILQ_REMOVE(&ni->names[hash], no, nn_next); 3397 3398 hash = objhash_hash_idx(ni, no->kidx); 3399 TAILQ_REMOVE(&ni->values[hash], no, nv_next); 3400 3401 ni->count--; 3402 } 3403 3404 uint32_t 3405 ipfw_objhash_count(struct namedobj_instance *ni) 3406 { 3407 3408 return (ni->count); 3409 } 3410 3411 /* 3412 * Runs @func for each found named object. 3413 * It is safe to delete objects from callback 3414 */ 3415 void 3416 ipfw_objhash_foreach(struct namedobj_instance *ni, objhash_cb_t *f, void *arg) 3417 { 3418 struct named_object *no, *no_tmp; 3419 int i; 3420 3421 for (i = 0; i < ni->nn_size; i++) { 3422 TAILQ_FOREACH_SAFE(no, &ni->names[i], nn_next, no_tmp) 3423 f(ni, no, arg); 3424 } 3425 } 3426 3427 /* 3428 * Removes index from given set. 3429 * Returns 0 on success. 3430 */ 3431 int 3432 ipfw_objhash_free_idx(struct namedobj_instance *ni, uint16_t idx) 3433 { 3434 u_long *mask; 3435 int i, v; 3436 3437 i = idx / BLOCK_ITEMS; 3438 v = idx % BLOCK_ITEMS; 3439 3440 if (i >= ni->max_blocks) 3441 return (1); 3442 3443 mask = &ni->idx_mask[i]; 3444 3445 if ((*mask & ((u_long)1 << v)) != 0) 3446 return (1); 3447 3448 /* Mark as free */ 3449 *mask |= (u_long)1 << v; 3450 3451 /* Update free offset */ 3452 if (ni->free_off[0] > i) 3453 ni->free_off[0] = i; 3454 3455 return (0); 3456 } 3457 3458 /* 3459 * Allocate new index in given instance and stores in in @pidx. 3460 * Returns 0 on success. 3461 */ 3462 int 3463 ipfw_objhash_alloc_idx(void *n, uint16_t *pidx) 3464 { 3465 struct namedobj_instance *ni; 3466 u_long *mask; 3467 int i, off, v; 3468 3469 ni = (struct namedobj_instance *)n; 3470 3471 off = ni->free_off[0]; 3472 mask = &ni->idx_mask[off]; 3473 3474 for (i = off; i < ni->max_blocks; i++, mask++) { 3475 if ((v = ffsl(*mask)) == 0) 3476 continue; 3477 3478 /* Mark as busy */ 3479 *mask &= ~ ((u_long)1 << (v - 1)); 3480 3481 ni->free_off[0] = i; 3482 3483 v = BLOCK_ITEMS * i + v - 1; 3484 3485 *pidx = v; 3486 return (0); 3487 } 3488 3489 return (1); 3490 } 3491 3492 /* end of file */ 3493