1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2004 Ruslan Ermilov and Vsevolod Lobko. 5 * Copyright (c) 2014 Yandex LLC 6 * Copyright (c) 2014 Alexander V. Chernikov 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30 #include <sys/cdefs.h> 31 /* 32 * Lookup table support for ipfw. 33 * 34 * This file contains handlers for all generic tables' operations: 35 * add/del/flush entries, list/dump tables etc.. 36 * 37 * Table data modification is protected by both UH and runtime lock 38 * while reading configuration/data is protected by UH lock. 39 * 40 * Lookup algorithms for all table types are located in ip_fw_table_algo.c 41 */ 42 43 #include "opt_ipfw.h" 44 45 #include <sys/param.h> 46 #include <sys/systm.h> 47 #include <sys/malloc.h> 48 #include <sys/kernel.h> 49 #include <sys/lock.h> 50 #include <sys/rwlock.h> 51 #include <sys/rmlock.h> 52 #include <sys/socket.h> 53 #include <sys/socketvar.h> 54 #include <sys/queue.h> 55 #include <net/if.h> /* ip_fw.h requires IFNAMSIZ */ 56 57 #include <netinet/in.h> 58 #include <netinet/ip_var.h> /* struct ipfw_rule_ref */ 59 #include <netinet/ip_fw.h> 60 61 #include <netpfil/ipfw/ip_fw_private.h> 62 #include <netpfil/ipfw/ip_fw_table.h> 63 64 /* 65 * Table has the following `type` concepts: 66 * 67 * `no.type` represents lookup key type (addr, ifp, uid, etc..) 68 * vmask represents bitmask of table values which are present at the moment. 69 * Special IPFW_VTYPE_LEGACY ( (uint32_t)-1 ) represents old 70 * single-value-for-all approach. 71 */ 72 struct table_config { 73 struct named_object no; 74 uint8_t tflags; /* type flags */ 75 uint8_t locked; /* 1 if locked from changes */ 76 uint8_t linked; /* 1 if already linked */ 77 uint8_t ochanged; /* used by set swapping */ 78 uint8_t vshared; /* 1 if using shared value array */ 79 uint8_t spare[3]; 80 uint32_t count; /* Number of records */ 81 uint32_t limit; /* Max number of records */ 82 uint32_t vmask; /* bitmask with supported values */ 83 uint32_t ocount; /* used by set swapping */ 84 uint64_t gencnt; /* generation count */ 85 char tablename[64]; /* table name */ 86 struct table_algo *ta; /* Callbacks for given algo */ 87 void *astate; /* algorithm state */ 88 struct table_info ti_copy; /* data to put to table_info */ 89 struct namedobj_instance *vi; 90 }; 91 92 static int find_table_err(struct namedobj_instance *ni, struct tid_info *ti, 93 struct table_config **tc); 94 static struct table_config *find_table(struct namedobj_instance *ni, 95 struct tid_info *ti); 96 static struct table_config *alloc_table_config(struct ip_fw_chain *ch, 97 struct tid_info *ti, struct table_algo *ta, char *adata, uint8_t tflags); 98 static void free_table_config(struct namedobj_instance *ni, 99 struct table_config *tc); 100 static int create_table_internal(struct ip_fw_chain *ch, struct tid_info *ti, 101 char *aname, ipfw_xtable_info *i, uint16_t *pkidx, int ref); 102 static void link_table(struct ip_fw_chain *ch, struct table_config *tc); 103 static void unlink_table(struct ip_fw_chain *ch, struct table_config *tc); 104 static int find_ref_table(struct ip_fw_chain *ch, struct tid_info *ti, 105 struct tentry_info *tei, uint32_t count, int op, struct table_config **ptc); 106 #define OP_ADD 1 107 #define OP_DEL 0 108 static int export_tables(struct ip_fw_chain *ch, ipfw_obj_lheader *olh, 109 struct sockopt_data *sd); 110 static void export_table_info(struct ip_fw_chain *ch, struct table_config *tc, 111 ipfw_xtable_info *i); 112 static int dump_table_tentry(void *e, void *arg); 113 static int dump_table_xentry(void *e, void *arg); 114 115 static int swap_tables(struct ip_fw_chain *ch, struct tid_info *a, 116 struct tid_info *b); 117 118 static int check_table_name(const char *name); 119 static int check_table_space(struct ip_fw_chain *ch, struct tableop_state *ts, 120 struct table_config *tc, struct table_info *ti, uint32_t count); 121 static int destroy_table(struct ip_fw_chain *ch, struct tid_info *ti); 122 123 static struct table_algo *find_table_algo(struct tables_config *tableconf, 124 struct tid_info *ti, char *name); 125 126 static void objheader_to_ti(struct _ipfw_obj_header *oh, struct tid_info *ti); 127 static void ntlv_to_ti(struct _ipfw_obj_ntlv *ntlv, struct tid_info *ti); 128 129 #define CHAIN_TO_NI(chain) (CHAIN_TO_TCFG(chain)->namehash) 130 #define KIDX_TO_TI(ch, k) (&(((struct table_info *)(ch)->tablestate)[k])) 131 132 #define TA_BUF_SZ 128 /* On-stack buffer for add/delete state */ 133 134 void 135 rollback_toperation_state(struct ip_fw_chain *ch, void *object) 136 { 137 struct tables_config *tcfg; 138 struct op_state *os; 139 140 tcfg = CHAIN_TO_TCFG(ch); 141 TAILQ_FOREACH(os, &tcfg->state_list, next) 142 os->func(object, os); 143 } 144 145 void 146 add_toperation_state(struct ip_fw_chain *ch, struct tableop_state *ts) 147 { 148 struct tables_config *tcfg; 149 150 tcfg = CHAIN_TO_TCFG(ch); 151 TAILQ_INSERT_HEAD(&tcfg->state_list, &ts->opstate, next); 152 } 153 154 void 155 del_toperation_state(struct ip_fw_chain *ch, struct tableop_state *ts) 156 { 157 struct tables_config *tcfg; 158 159 tcfg = CHAIN_TO_TCFG(ch); 160 TAILQ_REMOVE(&tcfg->state_list, &ts->opstate, next); 161 } 162 163 void 164 tc_ref(struct table_config *tc) 165 { 166 167 tc->no.refcnt++; 168 } 169 170 void 171 tc_unref(struct table_config *tc) 172 { 173 174 tc->no.refcnt--; 175 } 176 177 static struct table_value * 178 get_table_value(struct ip_fw_chain *ch, struct table_config *tc, uint32_t kidx) 179 { 180 struct table_value *pval; 181 182 pval = (struct table_value *)ch->valuestate; 183 184 return (&pval[kidx]); 185 } 186 187 /* 188 * Checks if we're able to insert/update entry @tei into table 189 * w.r.t @tc limits. 190 * May alter @tei to indicate insertion error / insert 191 * options. 192 * 193 * Returns 0 if operation can be performed/ 194 */ 195 static int 196 check_table_limit(struct table_config *tc, struct tentry_info *tei) 197 { 198 199 if (tc->limit == 0 || tc->count < tc->limit) 200 return (0); 201 202 if ((tei->flags & TEI_FLAGS_UPDATE) == 0) { 203 /* Notify userland on error cause */ 204 tei->flags |= TEI_FLAGS_LIMIT; 205 return (EFBIG); 206 } 207 208 /* 209 * We have UPDATE flag set. 210 * Permit updating record (if found), 211 * but restrict adding new one since we've 212 * already hit the limit. 213 */ 214 tei->flags |= TEI_FLAGS_DONTADD; 215 216 return (0); 217 } 218 219 /* 220 * Convert algorithm callback return code into 221 * one of pre-defined states known by userland. 222 */ 223 static void 224 store_tei_result(struct tentry_info *tei, int op, int error, uint32_t num) 225 { 226 int flag; 227 228 flag = 0; 229 230 switch (error) { 231 case 0: 232 if (op == OP_ADD && num != 0) 233 flag = TEI_FLAGS_ADDED; 234 if (op == OP_DEL) 235 flag = TEI_FLAGS_DELETED; 236 break; 237 case ENOENT: 238 flag = TEI_FLAGS_NOTFOUND; 239 break; 240 case EEXIST: 241 flag = TEI_FLAGS_EXISTS; 242 break; 243 default: 244 flag = TEI_FLAGS_ERROR; 245 } 246 247 tei->flags |= flag; 248 } 249 250 /* 251 * Creates and references table with default parameters. 252 * Saves table config, algo and allocated kidx info @ptc, @pta and 253 * @pkidx if non-zero. 254 * Used for table auto-creation to support old binaries. 255 * 256 * Returns 0 on success. 257 */ 258 static int 259 create_table_compat(struct ip_fw_chain *ch, struct tid_info *ti, 260 uint16_t *pkidx) 261 { 262 ipfw_xtable_info xi; 263 int error; 264 265 memset(&xi, 0, sizeof(xi)); 266 /* Set default value mask for legacy clients */ 267 xi.vmask = IPFW_VTYPE_LEGACY; 268 269 error = create_table_internal(ch, ti, NULL, &xi, pkidx, 1); 270 if (error != 0) 271 return (error); 272 273 return (0); 274 } 275 276 /* 277 * Find and reference existing table optionally 278 * creating new one. 279 * 280 * Saves found table config into @ptc. 281 * Note function may drop/acquire UH_WLOCK. 282 * Returns 0 if table was found/created and referenced 283 * or non-zero return code. 284 */ 285 static int 286 find_ref_table(struct ip_fw_chain *ch, struct tid_info *ti, 287 struct tentry_info *tei, uint32_t count, int op, 288 struct table_config **ptc) 289 { 290 struct namedobj_instance *ni; 291 struct table_config *tc; 292 uint16_t kidx; 293 int error; 294 295 IPFW_UH_WLOCK_ASSERT(ch); 296 297 ni = CHAIN_TO_NI(ch); 298 tc = NULL; 299 if ((tc = find_table(ni, ti)) != NULL) { 300 /* check table type */ 301 if (tc->no.subtype != ti->type) 302 return (EINVAL); 303 304 if (tc->locked != 0) 305 return (EACCES); 306 307 /* Try to exit early on limit hit */ 308 if (op == OP_ADD && count == 1 && 309 check_table_limit(tc, tei) != 0) 310 return (EFBIG); 311 312 /* Reference and return */ 313 tc->no.refcnt++; 314 *ptc = tc; 315 return (0); 316 } 317 318 if (op == OP_DEL) 319 return (ESRCH); 320 321 /* Compatibility mode: create new table for old clients */ 322 if ((tei->flags & TEI_FLAGS_COMPAT) == 0) 323 return (ESRCH); 324 325 IPFW_UH_WUNLOCK(ch); 326 error = create_table_compat(ch, ti, &kidx); 327 IPFW_UH_WLOCK(ch); 328 329 if (error != 0) 330 return (error); 331 332 tc = (struct table_config *)ipfw_objhash_lookup_kidx(ni, kidx); 333 KASSERT(tc != NULL, ("create_table_compat returned bad idx %d", kidx)); 334 335 /* OK, now we've got referenced table. */ 336 *ptc = tc; 337 return (0); 338 } 339 340 /* 341 * Rolls back already @added to @tc entries using state array @ta_buf_m. 342 * Assume the following layout: 343 * 1) ADD state (ta_buf_m[0] ... t_buf_m[added - 1]) for handling update cases 344 * 2) DEL state (ta_buf_m[count[ ... t_buf_m[count + added - 1]) 345 * for storing deleted state 346 */ 347 static void 348 rollback_added_entries(struct ip_fw_chain *ch, struct table_config *tc, 349 struct table_info *tinfo, struct tentry_info *tei, caddr_t ta_buf_m, 350 uint32_t count, uint32_t added) 351 { 352 struct table_algo *ta; 353 struct tentry_info *ptei; 354 caddr_t v, vv; 355 size_t ta_buf_sz; 356 int error __diagused, i; 357 uint32_t num; 358 359 IPFW_UH_WLOCK_ASSERT(ch); 360 361 ta = tc->ta; 362 ta_buf_sz = ta->ta_buf_size; 363 v = ta_buf_m; 364 vv = v + count * ta_buf_sz; 365 for (i = 0; i < added; i++, v += ta_buf_sz, vv += ta_buf_sz) { 366 ptei = &tei[i]; 367 if ((ptei->flags & TEI_FLAGS_UPDATED) != 0) { 368 /* 369 * We have old value stored by previous 370 * call in @ptei->value. Do add once again 371 * to restore it. 372 */ 373 error = ta->add(tc->astate, tinfo, ptei, v, &num); 374 KASSERT(error == 0, ("rollback UPDATE fail")); 375 KASSERT(num == 0, ("rollback UPDATE fail2")); 376 continue; 377 } 378 379 error = ta->prepare_del(ch, ptei, vv); 380 KASSERT(error == 0, ("pre-rollback INSERT failed")); 381 error = ta->del(tc->astate, tinfo, ptei, vv, &num); 382 KASSERT(error == 0, ("rollback INSERT failed")); 383 tc->count -= num; 384 } 385 } 386 387 /* 388 * Prepares add/del state for all @count entries in @tei. 389 * Uses either stack buffer (@ta_buf) or allocates a new one. 390 * Stores pointer to allocated buffer back to @ta_buf. 391 * 392 * Returns 0 on success. 393 */ 394 static int 395 prepare_batch_buffer(struct ip_fw_chain *ch, struct table_algo *ta, 396 struct tentry_info *tei, uint32_t count, int op, caddr_t *ta_buf) 397 { 398 caddr_t ta_buf_m, v; 399 size_t ta_buf_sz, sz; 400 struct tentry_info *ptei; 401 int error, i; 402 403 error = 0; 404 ta_buf_sz = ta->ta_buf_size; 405 if (count == 1) { 406 /* Single add/delete, use on-stack buffer */ 407 memset(*ta_buf, 0, TA_BUF_SZ); 408 ta_buf_m = *ta_buf; 409 } else { 410 /* 411 * Multiple adds/deletes, allocate larger buffer 412 * 413 * Note we need 2xcount buffer for add case: 414 * we have hold both ADD state 415 * and DELETE state (this may be needed 416 * if we need to rollback all changes) 417 */ 418 sz = count * ta_buf_sz; 419 ta_buf_m = malloc((op == OP_ADD) ? sz * 2 : sz, M_TEMP, 420 M_WAITOK | M_ZERO); 421 } 422 423 v = ta_buf_m; 424 for (i = 0; i < count; i++, v += ta_buf_sz) { 425 ptei = &tei[i]; 426 error = (op == OP_ADD) ? 427 ta->prepare_add(ch, ptei, v) : ta->prepare_del(ch, ptei, v); 428 429 /* 430 * Some syntax error (incorrect mask, or address, or 431 * anything). Return error regardless of atomicity 432 * settings. 433 */ 434 if (error != 0) 435 break; 436 } 437 438 *ta_buf = ta_buf_m; 439 return (error); 440 } 441 442 /* 443 * Flushes allocated state for each @count entries in @tei. 444 * Frees @ta_buf_m if differs from stack buffer @ta_buf. 445 */ 446 static void 447 flush_batch_buffer(struct ip_fw_chain *ch, struct table_algo *ta, 448 struct tentry_info *tei, uint32_t count, int rollback, 449 caddr_t ta_buf_m, caddr_t ta_buf) 450 { 451 caddr_t v; 452 struct tentry_info *ptei; 453 size_t ta_buf_sz; 454 int i; 455 456 ta_buf_sz = ta->ta_buf_size; 457 458 /* Run cleaning callback anyway */ 459 v = ta_buf_m; 460 for (i = 0; i < count; i++, v += ta_buf_sz) { 461 ptei = &tei[i]; 462 ta->flush_entry(ch, ptei, v); 463 if (ptei->ptv != NULL) { 464 free(ptei->ptv, M_IPFW); 465 ptei->ptv = NULL; 466 } 467 } 468 469 /* Clean up "deleted" state in case of rollback */ 470 if (rollback != 0) { 471 v = ta_buf_m + count * ta_buf_sz; 472 for (i = 0; i < count; i++, v += ta_buf_sz) 473 ta->flush_entry(ch, &tei[i], v); 474 } 475 476 if (ta_buf_m != ta_buf) 477 free(ta_buf_m, M_TEMP); 478 } 479 480 static void 481 rollback_add_entry(void *object, struct op_state *_state) 482 { 483 struct ip_fw_chain *ch __diagused; 484 struct tableop_state *ts; 485 486 ts = (struct tableop_state *)_state; 487 488 if (ts->tc != object && ts->ch != object) 489 return; 490 491 ch = ts->ch; 492 493 IPFW_UH_WLOCK_ASSERT(ch); 494 495 /* Call specifid unlockers */ 496 rollback_table_values(ts); 497 498 /* Indicate we've called */ 499 ts->modified = 1; 500 } 501 502 /* 503 * Adds/updates one or more entries in table @ti. 504 * 505 * Function may drop/reacquire UH wlock multiple times due to 506 * items alloc, algorithm callbacks (check_space), value linkage 507 * (new values, value storage realloc), etc.. 508 * Other processes like other adds (which may involve storage resize), 509 * table swaps (which changes table data and may change algo type), 510 * table modify (which may change value mask) may be executed 511 * simultaneously so we need to deal with it. 512 * 513 * The following approach was implemented: 514 * we have per-chain linked list, protected with UH lock. 515 * add_table_entry prepares special on-stack structure wthich is passed 516 * to its descendants. Users add this structure to this list before unlock. 517 * After performing needed operations and acquiring UH lock back, each user 518 * checks if structure has changed. If true, it rolls local state back and 519 * returns without error to the caller. 520 * add_table_entry() on its own checks if structure has changed and restarts 521 * its operation from the beginning (goto restart). 522 * 523 * Functions which are modifying fields of interest (currently 524 * resize_shared_value_storage() and swap_tables() ) 525 * traverses given list while holding UH lock immediately before 526 * performing their operations calling function provided be list entry 527 * ( currently rollback_add_entry ) which performs rollback for all necessary 528 * state and sets appropriate values in structure indicating rollback 529 * has happened. 530 * 531 * Algo interaction: 532 * Function references @ti first to ensure table won't 533 * disappear or change its type. 534 * After that, prepare_add callback is called for each @tei entry. 535 * Next, we try to add each entry under UH+WHLOCK 536 * using add() callback. 537 * Finally, we free all state by calling flush_entry callback 538 * for each @tei. 539 * 540 * Returns 0 on success. 541 */ 542 int 543 add_table_entry(struct ip_fw_chain *ch, struct tid_info *ti, 544 struct tentry_info *tei, uint8_t flags, uint32_t count) 545 { 546 struct table_config *tc; 547 struct table_algo *ta; 548 uint16_t kidx; 549 int error, first_error, i, rollback; 550 uint32_t num, numadd; 551 struct tentry_info *ptei; 552 struct tableop_state ts; 553 char ta_buf[TA_BUF_SZ]; 554 caddr_t ta_buf_m, v; 555 556 memset(&ts, 0, sizeof(ts)); 557 ta = NULL; 558 IPFW_UH_WLOCK(ch); 559 560 /* 561 * Find and reference existing table. 562 */ 563 restart: 564 if (ts.modified != 0) { 565 IPFW_UH_WUNLOCK(ch); 566 flush_batch_buffer(ch, ta, tei, count, rollback, 567 ta_buf_m, ta_buf); 568 memset(&ts, 0, sizeof(ts)); 569 ta = NULL; 570 IPFW_UH_WLOCK(ch); 571 } 572 573 error = find_ref_table(ch, ti, tei, count, OP_ADD, &tc); 574 if (error != 0) { 575 IPFW_UH_WUNLOCK(ch); 576 return (error); 577 } 578 ta = tc->ta; 579 580 /* Fill in tablestate */ 581 ts.ch = ch; 582 ts.opstate.func = rollback_add_entry; 583 ts.tc = tc; 584 ts.vshared = tc->vshared; 585 ts.vmask = tc->vmask; 586 ts.ta = ta; 587 ts.tei = tei; 588 ts.count = count; 589 rollback = 0; 590 add_toperation_state(ch, &ts); 591 IPFW_UH_WUNLOCK(ch); 592 593 /* Allocate memory and prepare record(s) */ 594 /* Pass stack buffer by default */ 595 ta_buf_m = ta_buf; 596 error = prepare_batch_buffer(ch, ta, tei, count, OP_ADD, &ta_buf_m); 597 598 IPFW_UH_WLOCK(ch); 599 del_toperation_state(ch, &ts); 600 /* Drop reference we've used in first search */ 601 tc->no.refcnt--; 602 603 /* Check prepare_batch_buffer() error */ 604 if (error != 0) 605 goto cleanup; 606 607 /* 608 * Check if table swap has happened. 609 * (so table algo might be changed). 610 * Restart operation to achieve consistent behavior. 611 */ 612 if (ts.modified != 0) 613 goto restart; 614 615 /* 616 * Link all values values to shared/per-table value array. 617 * 618 * May release/reacquire UH_WLOCK. 619 */ 620 error = ipfw_link_table_values(ch, &ts, flags); 621 if (error != 0) 622 goto cleanup; 623 if (ts.modified != 0) 624 goto restart; 625 626 /* 627 * Ensure we are able to add all entries without additional 628 * memory allocations. May release/reacquire UH_WLOCK. 629 */ 630 kidx = tc->no.kidx; 631 error = check_table_space(ch, &ts, tc, KIDX_TO_TI(ch, kidx), count); 632 if (error != 0) 633 goto cleanup; 634 if (ts.modified != 0) 635 goto restart; 636 637 /* We've got valid table in @tc. Let's try to add data */ 638 kidx = tc->no.kidx; 639 ta = tc->ta; 640 numadd = 0; 641 first_error = 0; 642 643 IPFW_WLOCK(ch); 644 645 v = ta_buf_m; 646 for (i = 0; i < count; i++, v += ta->ta_buf_size) { 647 ptei = &tei[i]; 648 num = 0; 649 /* check limit before adding */ 650 if ((error = check_table_limit(tc, ptei)) == 0) { 651 /* 652 * It should be safe to insert a record w/o 653 * a properly-linked value if atomicity is 654 * not required. 655 * 656 * If the added item does not have a valid value 657 * index, it would get rejected by ta->add(). 658 * */ 659 error = ta->add(tc->astate, KIDX_TO_TI(ch, kidx), 660 ptei, v, &num); 661 /* Set status flag to inform userland */ 662 store_tei_result(ptei, OP_ADD, error, num); 663 } 664 if (error == 0) { 665 /* Update number of records to ease limit checking */ 666 tc->count += num; 667 numadd += num; 668 continue; 669 } 670 671 if (first_error == 0) 672 first_error = error; 673 674 /* 675 * Some error have happened. Check our atomicity 676 * settings: continue if atomicity is not required, 677 * rollback changes otherwise. 678 */ 679 if ((flags & IPFW_CTF_ATOMIC) == 0) 680 continue; 681 682 rollback_added_entries(ch, tc, KIDX_TO_TI(ch, kidx), 683 tei, ta_buf_m, count, i); 684 685 rollback = 1; 686 break; 687 } 688 689 IPFW_WUNLOCK(ch); 690 691 ipfw_garbage_table_values(ch, tc, tei, count, rollback); 692 693 /* Permit post-add algorithm grow/rehash. */ 694 if (numadd != 0) 695 check_table_space(ch, NULL, tc, KIDX_TO_TI(ch, kidx), 0); 696 697 /* Return first error to user, if any */ 698 error = first_error; 699 700 cleanup: 701 IPFW_UH_WUNLOCK(ch); 702 703 flush_batch_buffer(ch, ta, tei, count, rollback, ta_buf_m, ta_buf); 704 705 return (error); 706 } 707 708 /* 709 * Deletes one or more entries in table @ti. 710 * 711 * Returns 0 on success. 712 */ 713 int 714 del_table_entry(struct ip_fw_chain *ch, struct tid_info *ti, 715 struct tentry_info *tei, uint8_t flags, uint32_t count) 716 { 717 struct table_config *tc; 718 struct table_algo *ta; 719 struct tentry_info *ptei; 720 uint16_t kidx; 721 int error, first_error, i; 722 uint32_t num, numdel; 723 char ta_buf[TA_BUF_SZ]; 724 caddr_t ta_buf_m, v; 725 726 /* 727 * Find and reference existing table. 728 */ 729 IPFW_UH_WLOCK(ch); 730 error = find_ref_table(ch, ti, tei, count, OP_DEL, &tc); 731 if (error != 0) { 732 IPFW_UH_WUNLOCK(ch); 733 return (error); 734 } 735 ta = tc->ta; 736 IPFW_UH_WUNLOCK(ch); 737 738 /* Allocate memory and prepare record(s) */ 739 /* Pass stack buffer by default */ 740 ta_buf_m = ta_buf; 741 error = prepare_batch_buffer(ch, ta, tei, count, OP_DEL, &ta_buf_m); 742 if (error != 0) 743 goto cleanup; 744 745 IPFW_UH_WLOCK(ch); 746 747 /* Drop reference we've used in first search */ 748 tc->no.refcnt--; 749 750 /* 751 * Check if table algo is still the same. 752 * (changed ta may be the result of table swap). 753 */ 754 if (ta != tc->ta) { 755 IPFW_UH_WUNLOCK(ch); 756 error = EINVAL; 757 goto cleanup; 758 } 759 760 kidx = tc->no.kidx; 761 numdel = 0; 762 first_error = 0; 763 764 IPFW_WLOCK(ch); 765 v = ta_buf_m; 766 for (i = 0; i < count; i++, v += ta->ta_buf_size) { 767 ptei = &tei[i]; 768 num = 0; 769 error = ta->del(tc->astate, KIDX_TO_TI(ch, kidx), ptei, v, 770 &num); 771 /* Save state for userland */ 772 store_tei_result(ptei, OP_DEL, error, num); 773 if (error != 0 && first_error == 0) 774 first_error = error; 775 tc->count -= num; 776 numdel += num; 777 } 778 IPFW_WUNLOCK(ch); 779 780 /* Unlink non-used values */ 781 ipfw_garbage_table_values(ch, tc, tei, count, 0); 782 783 if (numdel != 0) { 784 /* Run post-del hook to permit shrinking */ 785 check_table_space(ch, NULL, tc, KIDX_TO_TI(ch, kidx), 0); 786 } 787 788 IPFW_UH_WUNLOCK(ch); 789 790 /* Return first error to user, if any */ 791 error = first_error; 792 793 cleanup: 794 flush_batch_buffer(ch, ta, tei, count, 0, ta_buf_m, ta_buf); 795 796 return (error); 797 } 798 799 /* 800 * Ensure that table @tc has enough space to add @count entries without 801 * need for reallocation. 802 * 803 * Callbacks order: 804 * 0) need_modify() (UH_WLOCK) - checks if @count items can be added w/o resize. 805 * 806 * 1) alloc_modify (no locks, M_WAITOK) - alloc new state based on @pflags. 807 * 2) prepare_modifyt (UH_WLOCK) - copy old data into new storage 808 * 3) modify (UH_WLOCK + WLOCK) - switch pointers 809 * 4) flush_modify (UH_WLOCK) - free state, if needed 810 * 811 * Returns 0 on success. 812 */ 813 static int 814 check_table_space(struct ip_fw_chain *ch, struct tableop_state *ts, 815 struct table_config *tc, struct table_info *ti, uint32_t count) 816 { 817 struct table_algo *ta; 818 uint64_t pflags; 819 char ta_buf[TA_BUF_SZ]; 820 int error; 821 822 IPFW_UH_WLOCK_ASSERT(ch); 823 824 error = 0; 825 ta = tc->ta; 826 if (ta->need_modify == NULL) 827 return (0); 828 829 /* Acquire reference not to loose @tc between locks/unlocks */ 830 tc->no.refcnt++; 831 832 /* 833 * TODO: think about avoiding race between large add/large delete 834 * operation on algorithm which implements shrinking along with 835 * growing. 836 */ 837 while (true) { 838 pflags = 0; 839 if (ta->need_modify(tc->astate, ti, count, &pflags) == 0) { 840 error = 0; 841 break; 842 } 843 844 /* We have to shrink/grow table */ 845 if (ts != NULL) 846 add_toperation_state(ch, ts); 847 IPFW_UH_WUNLOCK(ch); 848 849 memset(&ta_buf, 0, sizeof(ta_buf)); 850 error = ta->prepare_mod(ta_buf, &pflags); 851 852 IPFW_UH_WLOCK(ch); 853 if (ts != NULL) 854 del_toperation_state(ch, ts); 855 856 if (error != 0) 857 break; 858 859 if (ts != NULL && ts->modified != 0) { 860 /* 861 * Swap operation has happened 862 * so we're currently operating on other 863 * table data. Stop doing this. 864 */ 865 ta->flush_mod(ta_buf); 866 break; 867 } 868 869 /* Check if we still need to alter table */ 870 ti = KIDX_TO_TI(ch, tc->no.kidx); 871 if (ta->need_modify(tc->astate, ti, count, &pflags) == 0) { 872 IPFW_UH_WUNLOCK(ch); 873 874 /* 875 * Other thread has already performed resize. 876 * Flush our state and return. 877 */ 878 ta->flush_mod(ta_buf); 879 break; 880 } 881 882 error = ta->fill_mod(tc->astate, ti, ta_buf, &pflags); 883 if (error == 0) { 884 /* Do actual modification */ 885 IPFW_WLOCK(ch); 886 ta->modify(tc->astate, ti, ta_buf, pflags); 887 IPFW_WUNLOCK(ch); 888 } 889 890 /* Anyway, flush data and retry */ 891 ta->flush_mod(ta_buf); 892 } 893 894 tc->no.refcnt--; 895 return (error); 896 } 897 898 /* 899 * Adds or deletes record in table. 900 * Data layout (v0): 901 * Request: [ ip_fw3_opheader ipfw_table_xentry ] 902 * 903 * Returns 0 on success 904 */ 905 static int 906 manage_table_ent_v0(struct ip_fw_chain *ch, ip_fw3_opheader *op3, 907 struct sockopt_data *sd) 908 { 909 ipfw_table_xentry *xent; 910 struct tentry_info tei; 911 struct tid_info ti; 912 struct table_value v; 913 int error, hdrlen, read; 914 915 hdrlen = offsetof(ipfw_table_xentry, k); 916 917 /* Check minimum header size */ 918 if (sd->valsize < (sizeof(*op3) + hdrlen)) 919 return (EINVAL); 920 921 read = sizeof(ip_fw3_opheader); 922 923 /* Check if xentry len field is valid */ 924 xent = (ipfw_table_xentry *)(op3 + 1); 925 if (xent->len < hdrlen || xent->len + read > sd->valsize) 926 return (EINVAL); 927 928 memset(&tei, 0, sizeof(tei)); 929 tei.paddr = &xent->k; 930 tei.masklen = xent->masklen; 931 ipfw_import_table_value_legacy(xent->value, &v); 932 tei.pvalue = &v; 933 /* Old requests compatibility */ 934 tei.flags = TEI_FLAGS_COMPAT; 935 if (xent->type == IPFW_TABLE_ADDR) { 936 if (xent->len - hdrlen == sizeof(in_addr_t)) 937 tei.subtype = AF_INET; 938 else 939 tei.subtype = AF_INET6; 940 } 941 942 memset(&ti, 0, sizeof(ti)); 943 ti.uidx = xent->tbl; 944 ti.type = xent->type; 945 946 error = (op3->opcode == IP_FW_TABLE_XADD) ? 947 add_table_entry(ch, &ti, &tei, 0, 1) : 948 del_table_entry(ch, &ti, &tei, 0, 1); 949 950 return (error); 951 } 952 953 /* 954 * Adds or deletes record in table. 955 * Data layout (v1)(current): 956 * Request: [ ipfw_obj_header 957 * ipfw_obj_ctlv(IPFW_TLV_TBLENT_LIST) [ ipfw_obj_tentry x N ] 958 * ] 959 * 960 * Returns 0 on success 961 */ 962 static int 963 manage_table_ent_v1(struct ip_fw_chain *ch, ip_fw3_opheader *op3, 964 struct sockopt_data *sd) 965 { 966 ipfw_obj_tentry *tent, *ptent; 967 ipfw_obj_ctlv *ctlv; 968 ipfw_obj_header *oh; 969 struct tentry_info *ptei, tei, *tei_buf; 970 struct tid_info ti; 971 int error, i, kidx, read; 972 973 /* Check minimum header size */ 974 if (sd->valsize < (sizeof(*oh) + sizeof(*ctlv))) 975 return (EINVAL); 976 977 /* Check if passed data is too long */ 978 if (sd->valsize != sd->kavail) 979 return (EINVAL); 980 981 oh = (ipfw_obj_header *)sd->kbuf; 982 983 /* Basic length checks for TLVs */ 984 if (oh->ntlv.head.length != sizeof(oh->ntlv)) 985 return (EINVAL); 986 987 read = sizeof(*oh); 988 989 ctlv = (ipfw_obj_ctlv *)(oh + 1); 990 if (ctlv->head.length + read != sd->valsize) 991 return (EINVAL); 992 993 read += sizeof(*ctlv); 994 tent = (ipfw_obj_tentry *)(ctlv + 1); 995 if (ctlv->count * sizeof(*tent) + read != sd->valsize) 996 return (EINVAL); 997 998 if (ctlv->count == 0) 999 return (0); 1000 1001 /* 1002 * Mark entire buffer as "read". 1003 * This instructs sopt api write it back 1004 * after function return. 1005 */ 1006 ipfw_get_sopt_header(sd, sd->valsize); 1007 1008 /* Perform basic checks for each entry */ 1009 ptent = tent; 1010 kidx = tent->idx; 1011 for (i = 0; i < ctlv->count; i++, ptent++) { 1012 if (ptent->head.length != sizeof(*ptent)) 1013 return (EINVAL); 1014 if (ptent->idx != kidx) 1015 return (ENOTSUP); 1016 } 1017 1018 /* Convert data into kernel request objects */ 1019 objheader_to_ti(oh, &ti); 1020 ti.type = oh->ntlv.type; 1021 ti.uidx = kidx; 1022 1023 /* Use on-stack buffer for single add/del */ 1024 if (ctlv->count == 1) { 1025 memset(&tei, 0, sizeof(tei)); 1026 tei_buf = &tei; 1027 } else 1028 tei_buf = malloc(ctlv->count * sizeof(tei), M_TEMP, 1029 M_WAITOK | M_ZERO); 1030 1031 ptei = tei_buf; 1032 ptent = tent; 1033 for (i = 0; i < ctlv->count; i++, ptent++, ptei++) { 1034 ptei->paddr = &ptent->k; 1035 ptei->subtype = ptent->subtype; 1036 ptei->masklen = ptent->masklen; 1037 if (ptent->head.flags & IPFW_TF_UPDATE) 1038 ptei->flags |= TEI_FLAGS_UPDATE; 1039 1040 ipfw_import_table_value_v1(&ptent->v.value); 1041 ptei->pvalue = (struct table_value *)&ptent->v.value; 1042 } 1043 1044 error = (oh->opheader.opcode == IP_FW_TABLE_XADD) ? 1045 add_table_entry(ch, &ti, tei_buf, ctlv->flags, ctlv->count) : 1046 del_table_entry(ch, &ti, tei_buf, ctlv->flags, ctlv->count); 1047 1048 /* Translate result back to userland */ 1049 ptei = tei_buf; 1050 ptent = tent; 1051 for (i = 0; i < ctlv->count; i++, ptent++, ptei++) { 1052 if (ptei->flags & TEI_FLAGS_ADDED) 1053 ptent->result = IPFW_TR_ADDED; 1054 else if (ptei->flags & TEI_FLAGS_DELETED) 1055 ptent->result = IPFW_TR_DELETED; 1056 else if (ptei->flags & TEI_FLAGS_UPDATED) 1057 ptent->result = IPFW_TR_UPDATED; 1058 else if (ptei->flags & TEI_FLAGS_LIMIT) 1059 ptent->result = IPFW_TR_LIMIT; 1060 else if (ptei->flags & TEI_FLAGS_ERROR) 1061 ptent->result = IPFW_TR_ERROR; 1062 else if (ptei->flags & TEI_FLAGS_NOTFOUND) 1063 ptent->result = IPFW_TR_NOTFOUND; 1064 else if (ptei->flags & TEI_FLAGS_EXISTS) 1065 ptent->result = IPFW_TR_EXISTS; 1066 ipfw_export_table_value_v1(ptei->pvalue, &ptent->v.value); 1067 } 1068 1069 if (tei_buf != &tei) 1070 free(tei_buf, M_TEMP); 1071 1072 return (error); 1073 } 1074 1075 /* 1076 * Looks up an entry in given table. 1077 * Data layout (v0)(current): 1078 * Request: [ ipfw_obj_header ipfw_obj_tentry ] 1079 * Reply: [ ipfw_obj_header ipfw_obj_tentry ] 1080 * 1081 * Returns 0 on success 1082 */ 1083 static int 1084 find_table_entry(struct ip_fw_chain *ch, ip_fw3_opheader *op3, 1085 struct sockopt_data *sd) 1086 { 1087 ipfw_obj_tentry *tent; 1088 ipfw_obj_header *oh; 1089 struct tid_info ti; 1090 struct table_config *tc; 1091 struct table_algo *ta; 1092 struct table_info *kti; 1093 struct table_value *pval; 1094 struct namedobj_instance *ni; 1095 int error; 1096 size_t sz; 1097 1098 /* Check minimum header size */ 1099 sz = sizeof(*oh) + sizeof(*tent); 1100 if (sd->valsize != sz) 1101 return (EINVAL); 1102 1103 oh = (struct _ipfw_obj_header *)ipfw_get_sopt_header(sd, sz); 1104 tent = (ipfw_obj_tentry *)(oh + 1); 1105 1106 /* Basic length checks for TLVs */ 1107 if (oh->ntlv.head.length != sizeof(oh->ntlv)) 1108 return (EINVAL); 1109 1110 objheader_to_ti(oh, &ti); 1111 ti.type = oh->ntlv.type; 1112 ti.uidx = tent->idx; 1113 1114 IPFW_UH_RLOCK(ch); 1115 ni = CHAIN_TO_NI(ch); 1116 1117 /* 1118 * Find existing table and check its type . 1119 */ 1120 ta = NULL; 1121 if ((tc = find_table(ni, &ti)) == NULL) { 1122 IPFW_UH_RUNLOCK(ch); 1123 return (ESRCH); 1124 } 1125 1126 /* check table type */ 1127 if (tc->no.subtype != ti.type) { 1128 IPFW_UH_RUNLOCK(ch); 1129 return (EINVAL); 1130 } 1131 1132 kti = KIDX_TO_TI(ch, tc->no.kidx); 1133 ta = tc->ta; 1134 1135 if (ta->find_tentry == NULL) 1136 return (ENOTSUP); 1137 1138 error = ta->find_tentry(tc->astate, kti, tent); 1139 if (error == 0) { 1140 pval = get_table_value(ch, tc, tent->v.kidx); 1141 ipfw_export_table_value_v1(pval, &tent->v.value); 1142 } 1143 IPFW_UH_RUNLOCK(ch); 1144 1145 return (error); 1146 } 1147 1148 /* 1149 * Flushes all entries or destroys given table. 1150 * Data layout (v0)(current): 1151 * Request: [ ipfw_obj_header ] 1152 * 1153 * Returns 0 on success 1154 */ 1155 static int 1156 flush_table_v0(struct ip_fw_chain *ch, ip_fw3_opheader *op3, 1157 struct sockopt_data *sd) 1158 { 1159 int error; 1160 struct _ipfw_obj_header *oh; 1161 struct tid_info ti; 1162 1163 if (sd->valsize != sizeof(*oh)) 1164 return (EINVAL); 1165 1166 oh = (struct _ipfw_obj_header *)op3; 1167 objheader_to_ti(oh, &ti); 1168 1169 if (op3->opcode == IP_FW_TABLE_XDESTROY) 1170 error = destroy_table(ch, &ti); 1171 else if (op3->opcode == IP_FW_TABLE_XFLUSH) 1172 error = flush_table(ch, &ti); 1173 else 1174 return (ENOTSUP); 1175 1176 return (error); 1177 } 1178 1179 static void 1180 restart_flush(void *object, struct op_state *_state) 1181 { 1182 struct tableop_state *ts; 1183 1184 ts = (struct tableop_state *)_state; 1185 1186 if (ts->tc != object) 1187 return; 1188 1189 /* Indicate we've called */ 1190 ts->modified = 1; 1191 } 1192 1193 /* 1194 * Flushes given table. 1195 * 1196 * Function create new table instance with the same 1197 * parameters, swaps it with old one and 1198 * flushes state without holding runtime WLOCK. 1199 * 1200 * Returns 0 on success. 1201 */ 1202 int 1203 flush_table(struct ip_fw_chain *ch, struct tid_info *ti) 1204 { 1205 struct namedobj_instance *ni; 1206 struct table_config *tc; 1207 struct table_algo *ta; 1208 struct table_info ti_old, ti_new, *tablestate; 1209 void *astate_old, *astate_new; 1210 char algostate[64], *pstate; 1211 struct tableop_state ts; 1212 int error, need_gc; 1213 uint16_t kidx; 1214 uint8_t tflags; 1215 1216 /* 1217 * Stage 1: save table algorithm. 1218 * Reference found table to ensure it won't disappear. 1219 */ 1220 IPFW_UH_WLOCK(ch); 1221 ni = CHAIN_TO_NI(ch); 1222 if ((tc = find_table(ni, ti)) == NULL) { 1223 IPFW_UH_WUNLOCK(ch); 1224 return (ESRCH); 1225 } 1226 need_gc = 0; 1227 astate_new = NULL; 1228 memset(&ti_new, 0, sizeof(ti_new)); 1229 restart: 1230 /* Set up swap handler */ 1231 memset(&ts, 0, sizeof(ts)); 1232 ts.opstate.func = restart_flush; 1233 ts.tc = tc; 1234 1235 ta = tc->ta; 1236 /* Do not flush readonly tables */ 1237 if ((ta->flags & TA_FLAG_READONLY) != 0) { 1238 IPFW_UH_WUNLOCK(ch); 1239 return (EACCES); 1240 } 1241 /* Save startup algo parameters */ 1242 if (ta->print_config != NULL) { 1243 ta->print_config(tc->astate, KIDX_TO_TI(ch, tc->no.kidx), 1244 algostate, sizeof(algostate)); 1245 pstate = algostate; 1246 } else 1247 pstate = NULL; 1248 tflags = tc->tflags; 1249 tc->no.refcnt++; 1250 add_toperation_state(ch, &ts); 1251 IPFW_UH_WUNLOCK(ch); 1252 1253 /* 1254 * Stage 1.5: if this is not the first attempt, destroy previous state 1255 */ 1256 if (need_gc != 0) { 1257 ta->destroy(astate_new, &ti_new); 1258 need_gc = 0; 1259 } 1260 1261 /* 1262 * Stage 2: allocate new table instance using same algo. 1263 */ 1264 memset(&ti_new, 0, sizeof(struct table_info)); 1265 error = ta->init(ch, &astate_new, &ti_new, pstate, tflags); 1266 1267 /* 1268 * Stage 3: swap old state pointers with newly-allocated ones. 1269 * Decrease refcount. 1270 */ 1271 IPFW_UH_WLOCK(ch); 1272 tc->no.refcnt--; 1273 del_toperation_state(ch, &ts); 1274 1275 if (error != 0) { 1276 IPFW_UH_WUNLOCK(ch); 1277 return (error); 1278 } 1279 1280 /* 1281 * Restart operation if table swap has happened: 1282 * even if algo may be the same, algo init parameters 1283 * may change. Restart operation instead of doing 1284 * complex checks. 1285 */ 1286 if (ts.modified != 0) { 1287 /* Delay destroying data since we're holding UH lock */ 1288 need_gc = 1; 1289 goto restart; 1290 } 1291 1292 ni = CHAIN_TO_NI(ch); 1293 kidx = tc->no.kidx; 1294 tablestate = (struct table_info *)ch->tablestate; 1295 1296 IPFW_WLOCK(ch); 1297 ti_old = tablestate[kidx]; 1298 tablestate[kidx] = ti_new; 1299 IPFW_WUNLOCK(ch); 1300 1301 astate_old = tc->astate; 1302 tc->astate = astate_new; 1303 tc->ti_copy = ti_new; 1304 tc->count = 0; 1305 1306 /* Notify algo on real @ti address */ 1307 if (ta->change_ti != NULL) 1308 ta->change_ti(tc->astate, &tablestate[kidx]); 1309 1310 /* 1311 * Stage 4: unref values. 1312 */ 1313 ipfw_unref_table_values(ch, tc, ta, astate_old, &ti_old); 1314 IPFW_UH_WUNLOCK(ch); 1315 1316 /* 1317 * Stage 5: perform real flush/destroy. 1318 */ 1319 ta->destroy(astate_old, &ti_old); 1320 1321 return (0); 1322 } 1323 1324 /* 1325 * Swaps two tables. 1326 * Data layout (v0)(current): 1327 * Request: [ ipfw_obj_header ipfw_obj_ntlv ] 1328 * 1329 * Returns 0 on success 1330 */ 1331 static int 1332 swap_table(struct ip_fw_chain *ch, ip_fw3_opheader *op3, 1333 struct sockopt_data *sd) 1334 { 1335 int error; 1336 struct _ipfw_obj_header *oh; 1337 struct tid_info ti_a, ti_b; 1338 1339 if (sd->valsize != sizeof(*oh) + sizeof(ipfw_obj_ntlv)) 1340 return (EINVAL); 1341 1342 oh = (struct _ipfw_obj_header *)op3; 1343 ntlv_to_ti(&oh->ntlv, &ti_a); 1344 ntlv_to_ti((ipfw_obj_ntlv *)(oh + 1), &ti_b); 1345 1346 error = swap_tables(ch, &ti_a, &ti_b); 1347 1348 return (error); 1349 } 1350 1351 /* 1352 * Swaps two tables of the same type/valtype. 1353 * 1354 * Checks if tables are compatible and limits 1355 * permits swap, than actually perform swap. 1356 * 1357 * Each table consists of 2 different parts: 1358 * config: 1359 * @tc (with name, set, kidx) and rule bindings, which is "stable". 1360 * number of items 1361 * table algo 1362 * runtime: 1363 * runtime data @ti (ch->tablestate) 1364 * runtime cache in @tc 1365 * algo-specific data (@tc->astate) 1366 * 1367 * So we switch: 1368 * all runtime data 1369 * number of items 1370 * table algo 1371 * 1372 * After that we call @ti change handler for each table. 1373 * 1374 * Note that referencing @tc won't protect tc->ta from change. 1375 * XXX: Do we need to restrict swap between locked tables? 1376 * XXX: Do we need to exchange ftype? 1377 * 1378 * Returns 0 on success. 1379 */ 1380 static int 1381 swap_tables(struct ip_fw_chain *ch, struct tid_info *a, 1382 struct tid_info *b) 1383 { 1384 struct namedobj_instance *ni; 1385 struct table_config *tc_a, *tc_b; 1386 struct table_algo *ta; 1387 struct table_info ti, *tablestate; 1388 void *astate; 1389 uint32_t count; 1390 1391 /* 1392 * Stage 1: find both tables and ensure they are of 1393 * the same type. 1394 */ 1395 IPFW_UH_WLOCK(ch); 1396 ni = CHAIN_TO_NI(ch); 1397 if ((tc_a = find_table(ni, a)) == NULL) { 1398 IPFW_UH_WUNLOCK(ch); 1399 return (ESRCH); 1400 } 1401 if ((tc_b = find_table(ni, b)) == NULL) { 1402 IPFW_UH_WUNLOCK(ch); 1403 return (ESRCH); 1404 } 1405 1406 /* It is very easy to swap between the same table */ 1407 if (tc_a == tc_b) { 1408 IPFW_UH_WUNLOCK(ch); 1409 return (0); 1410 } 1411 1412 /* Check type and value are the same */ 1413 if (tc_a->no.subtype!=tc_b->no.subtype || tc_a->tflags!=tc_b->tflags) { 1414 IPFW_UH_WUNLOCK(ch); 1415 return (EINVAL); 1416 } 1417 1418 /* Check limits before swap */ 1419 if ((tc_a->limit != 0 && tc_b->count > tc_a->limit) || 1420 (tc_b->limit != 0 && tc_a->count > tc_b->limit)) { 1421 IPFW_UH_WUNLOCK(ch); 1422 return (EFBIG); 1423 } 1424 1425 /* Check if one of the tables is readonly */ 1426 if (((tc_a->ta->flags | tc_b->ta->flags) & TA_FLAG_READONLY) != 0) { 1427 IPFW_UH_WUNLOCK(ch); 1428 return (EACCES); 1429 } 1430 1431 /* Notify we're going to swap */ 1432 rollback_toperation_state(ch, tc_a); 1433 rollback_toperation_state(ch, tc_b); 1434 1435 /* Everything is fine, prepare to swap */ 1436 tablestate = (struct table_info *)ch->tablestate; 1437 ti = tablestate[tc_a->no.kidx]; 1438 ta = tc_a->ta; 1439 astate = tc_a->astate; 1440 count = tc_a->count; 1441 1442 IPFW_WLOCK(ch); 1443 /* a <- b */ 1444 tablestate[tc_a->no.kidx] = tablestate[tc_b->no.kidx]; 1445 tc_a->ta = tc_b->ta; 1446 tc_a->astate = tc_b->astate; 1447 tc_a->count = tc_b->count; 1448 /* b <- a */ 1449 tablestate[tc_b->no.kidx] = ti; 1450 tc_b->ta = ta; 1451 tc_b->astate = astate; 1452 tc_b->count = count; 1453 IPFW_WUNLOCK(ch); 1454 1455 /* Ensure tc.ti copies are in sync */ 1456 tc_a->ti_copy = tablestate[tc_a->no.kidx]; 1457 tc_b->ti_copy = tablestate[tc_b->no.kidx]; 1458 1459 /* Notify both tables on @ti change */ 1460 if (tc_a->ta->change_ti != NULL) 1461 tc_a->ta->change_ti(tc_a->astate, &tablestate[tc_a->no.kidx]); 1462 if (tc_b->ta->change_ti != NULL) 1463 tc_b->ta->change_ti(tc_b->astate, &tablestate[tc_b->no.kidx]); 1464 1465 IPFW_UH_WUNLOCK(ch); 1466 1467 return (0); 1468 } 1469 1470 /* 1471 * Destroys table specified by @ti. 1472 * Data layout (v0)(current): 1473 * Request: [ ip_fw3_opheader ] 1474 * 1475 * Returns 0 on success 1476 */ 1477 static int 1478 destroy_table(struct ip_fw_chain *ch, struct tid_info *ti) 1479 { 1480 struct namedobj_instance *ni; 1481 struct table_config *tc; 1482 1483 IPFW_UH_WLOCK(ch); 1484 1485 ni = CHAIN_TO_NI(ch); 1486 if ((tc = find_table(ni, ti)) == NULL) { 1487 IPFW_UH_WUNLOCK(ch); 1488 return (ESRCH); 1489 } 1490 1491 /* Do not permit destroying referenced tables */ 1492 if (tc->no.refcnt > 0) { 1493 IPFW_UH_WUNLOCK(ch); 1494 return (EBUSY); 1495 } 1496 1497 IPFW_WLOCK(ch); 1498 unlink_table(ch, tc); 1499 IPFW_WUNLOCK(ch); 1500 1501 /* Free obj index */ 1502 if (ipfw_objhash_free_idx(ni, tc->no.kidx) != 0) 1503 printf("Error unlinking kidx %d from table %s\n", 1504 tc->no.kidx, tc->tablename); 1505 1506 /* Unref values used in tables while holding UH lock */ 1507 ipfw_unref_table_values(ch, tc, tc->ta, tc->astate, &tc->ti_copy); 1508 IPFW_UH_WUNLOCK(ch); 1509 1510 free_table_config(ni, tc); 1511 1512 return (0); 1513 } 1514 1515 static uint32_t 1516 roundup2p(uint32_t v) 1517 { 1518 1519 v--; 1520 v |= v >> 1; 1521 v |= v >> 2; 1522 v |= v >> 4; 1523 v |= v >> 8; 1524 v |= v >> 16; 1525 v++; 1526 1527 return (v); 1528 } 1529 1530 /* 1531 * Grow tables index. 1532 * 1533 * Returns 0 on success. 1534 */ 1535 int 1536 ipfw_resize_tables(struct ip_fw_chain *ch, unsigned int ntables) 1537 { 1538 unsigned int tbl; 1539 struct namedobj_instance *ni; 1540 void *new_idx, *old_tablestate, *tablestate; 1541 struct table_info *ti; 1542 struct table_config *tc; 1543 int i, new_blocks; 1544 1545 /* Check new value for validity */ 1546 if (ntables == 0) 1547 return (EINVAL); 1548 if (ntables > IPFW_TABLES_MAX) 1549 ntables = IPFW_TABLES_MAX; 1550 /* Alight to nearest power of 2 */ 1551 ntables = (unsigned int)roundup2p(ntables); 1552 1553 /* Allocate new pointers */ 1554 tablestate = malloc(ntables * sizeof(struct table_info), 1555 M_IPFW, M_WAITOK | M_ZERO); 1556 1557 ipfw_objhash_bitmap_alloc(ntables, (void *)&new_idx, &new_blocks); 1558 1559 IPFW_UH_WLOCK(ch); 1560 1561 tbl = (ntables >= V_fw_tables_max) ? V_fw_tables_max : ntables; 1562 ni = CHAIN_TO_NI(ch); 1563 1564 /* Temporary restrict decreasing max_tables */ 1565 if (ntables < V_fw_tables_max) { 1566 /* 1567 * FIXME: Check if we really can shrink 1568 */ 1569 IPFW_UH_WUNLOCK(ch); 1570 return (EINVAL); 1571 } 1572 1573 /* Copy table info/indices */ 1574 memcpy(tablestate, ch->tablestate, sizeof(struct table_info) * tbl); 1575 ipfw_objhash_bitmap_merge(ni, &new_idx, &new_blocks); 1576 1577 IPFW_WLOCK(ch); 1578 1579 /* Change pointers */ 1580 old_tablestate = ch->tablestate; 1581 ch->tablestate = tablestate; 1582 ipfw_objhash_bitmap_swap(ni, &new_idx, &new_blocks); 1583 1584 V_fw_tables_max = ntables; 1585 1586 IPFW_WUNLOCK(ch); 1587 1588 /* Notify all consumers that their @ti pointer has changed */ 1589 ti = (struct table_info *)ch->tablestate; 1590 for (i = 0; i < tbl; i++, ti++) { 1591 if (ti->lookup == NULL) 1592 continue; 1593 tc = (struct table_config *)ipfw_objhash_lookup_kidx(ni, i); 1594 if (tc == NULL || tc->ta->change_ti == NULL) 1595 continue; 1596 1597 tc->ta->change_ti(tc->astate, ti); 1598 } 1599 1600 IPFW_UH_WUNLOCK(ch); 1601 1602 /* Free old pointers */ 1603 free(old_tablestate, M_IPFW); 1604 ipfw_objhash_bitmap_free(new_idx, new_blocks); 1605 1606 return (0); 1607 } 1608 1609 /* 1610 * Lookup table's named object by its @kidx. 1611 */ 1612 struct named_object * 1613 ipfw_objhash_lookup_table_kidx(struct ip_fw_chain *ch, uint16_t kidx) 1614 { 1615 1616 return (ipfw_objhash_lookup_kidx(CHAIN_TO_NI(ch), kidx)); 1617 } 1618 1619 /* 1620 * Take reference to table specified in @ntlv. 1621 * On success return its @kidx. 1622 */ 1623 int 1624 ipfw_ref_table(struct ip_fw_chain *ch, ipfw_obj_ntlv *ntlv, uint16_t *kidx) 1625 { 1626 struct tid_info ti; 1627 struct table_config *tc; 1628 int error; 1629 1630 IPFW_UH_WLOCK_ASSERT(ch); 1631 1632 ntlv_to_ti(ntlv, &ti); 1633 error = find_table_err(CHAIN_TO_NI(ch), &ti, &tc); 1634 if (error != 0) 1635 return (error); 1636 1637 if (tc == NULL) 1638 return (ESRCH); 1639 1640 tc_ref(tc); 1641 *kidx = tc->no.kidx; 1642 1643 return (0); 1644 } 1645 1646 void 1647 ipfw_unref_table(struct ip_fw_chain *ch, uint16_t kidx) 1648 { 1649 1650 struct namedobj_instance *ni; 1651 struct named_object *no; 1652 1653 IPFW_UH_WLOCK_ASSERT(ch); 1654 ni = CHAIN_TO_NI(ch); 1655 no = ipfw_objhash_lookup_kidx(ni, kidx); 1656 KASSERT(no != NULL, ("Table with index %d not found", kidx)); 1657 no->refcnt--; 1658 } 1659 1660 /* 1661 * Lookup an arbitrary key @paddr of length @plen in table @tbl. 1662 * Stores found value in @val. 1663 * 1664 * Returns 1 if key was found. 1665 */ 1666 int 1667 ipfw_lookup_table(struct ip_fw_chain *ch, uint16_t tbl, uint16_t plen, 1668 void *paddr, uint32_t *val) 1669 { 1670 struct table_info *ti; 1671 1672 ti = KIDX_TO_TI(ch, tbl); 1673 1674 return (ti->lookup(ti, paddr, plen, val)); 1675 } 1676 1677 /* 1678 * Info/List/dump support for tables. 1679 * 1680 */ 1681 1682 /* 1683 * High-level 'get' cmds sysctl handlers 1684 */ 1685 1686 /* 1687 * Lists all tables currently available in kernel. 1688 * Data layout (v0)(current): 1689 * Request: [ ipfw_obj_lheader ], size = ipfw_obj_lheader.size 1690 * Reply: [ ipfw_obj_lheader ipfw_xtable_info x N ] 1691 * 1692 * Returns 0 on success 1693 */ 1694 static int 1695 list_tables(struct ip_fw_chain *ch, ip_fw3_opheader *op3, 1696 struct sockopt_data *sd) 1697 { 1698 struct _ipfw_obj_lheader *olh; 1699 int error; 1700 1701 olh = (struct _ipfw_obj_lheader *)ipfw_get_sopt_header(sd,sizeof(*olh)); 1702 if (olh == NULL) 1703 return (EINVAL); 1704 if (sd->valsize < olh->size) 1705 return (EINVAL); 1706 1707 IPFW_UH_RLOCK(ch); 1708 error = export_tables(ch, olh, sd); 1709 IPFW_UH_RUNLOCK(ch); 1710 1711 return (error); 1712 } 1713 1714 /* 1715 * Store table info to buffer provided by @sd. 1716 * Data layout (v0)(current): 1717 * Request: [ ipfw_obj_header ipfw_xtable_info(empty)] 1718 * Reply: [ ipfw_obj_header ipfw_xtable_info ] 1719 * 1720 * Returns 0 on success. 1721 */ 1722 static int 1723 describe_table(struct ip_fw_chain *ch, ip_fw3_opheader *op3, 1724 struct sockopt_data *sd) 1725 { 1726 struct _ipfw_obj_header *oh; 1727 struct table_config *tc; 1728 struct tid_info ti; 1729 size_t sz; 1730 1731 sz = sizeof(*oh) + sizeof(ipfw_xtable_info); 1732 oh = (struct _ipfw_obj_header *)ipfw_get_sopt_header(sd, sz); 1733 if (oh == NULL) 1734 return (EINVAL); 1735 1736 objheader_to_ti(oh, &ti); 1737 1738 IPFW_UH_RLOCK(ch); 1739 if ((tc = find_table(CHAIN_TO_NI(ch), &ti)) == NULL) { 1740 IPFW_UH_RUNLOCK(ch); 1741 return (ESRCH); 1742 } 1743 1744 export_table_info(ch, tc, (ipfw_xtable_info *)(oh + 1)); 1745 IPFW_UH_RUNLOCK(ch); 1746 1747 return (0); 1748 } 1749 1750 /* 1751 * Modifies existing table. 1752 * Data layout (v0)(current): 1753 * Request: [ ipfw_obj_header ipfw_xtable_info ] 1754 * 1755 * Returns 0 on success 1756 */ 1757 static int 1758 modify_table(struct ip_fw_chain *ch, ip_fw3_opheader *op3, 1759 struct sockopt_data *sd) 1760 { 1761 struct _ipfw_obj_header *oh; 1762 ipfw_xtable_info *i; 1763 char *tname; 1764 struct tid_info ti; 1765 struct namedobj_instance *ni; 1766 struct table_config *tc; 1767 1768 if (sd->valsize != sizeof(*oh) + sizeof(ipfw_xtable_info)) 1769 return (EINVAL); 1770 1771 oh = (struct _ipfw_obj_header *)sd->kbuf; 1772 i = (ipfw_xtable_info *)(oh + 1); 1773 1774 /* 1775 * Verify user-supplied strings. 1776 * Check for null-terminated/zero-length strings/ 1777 */ 1778 tname = oh->ntlv.name; 1779 if (check_table_name(tname) != 0) 1780 return (EINVAL); 1781 1782 objheader_to_ti(oh, &ti); 1783 ti.type = i->type; 1784 1785 IPFW_UH_WLOCK(ch); 1786 ni = CHAIN_TO_NI(ch); 1787 if ((tc = find_table(ni, &ti)) == NULL) { 1788 IPFW_UH_WUNLOCK(ch); 1789 return (ESRCH); 1790 } 1791 1792 /* Do not support any modifications for readonly tables */ 1793 if ((tc->ta->flags & TA_FLAG_READONLY) != 0) { 1794 IPFW_UH_WUNLOCK(ch); 1795 return (EACCES); 1796 } 1797 1798 if ((i->mflags & IPFW_TMFLAGS_LIMIT) != 0) 1799 tc->limit = i->limit; 1800 if ((i->mflags & IPFW_TMFLAGS_LOCK) != 0) 1801 tc->locked = ((i->flags & IPFW_TGFLAGS_LOCKED) != 0); 1802 IPFW_UH_WUNLOCK(ch); 1803 1804 return (0); 1805 } 1806 1807 /* 1808 * Creates new table. 1809 * Data layout (v0)(current): 1810 * Request: [ ipfw_obj_header ipfw_xtable_info ] 1811 * 1812 * Returns 0 on success 1813 */ 1814 static int 1815 create_table(struct ip_fw_chain *ch, ip_fw3_opheader *op3, 1816 struct sockopt_data *sd) 1817 { 1818 struct _ipfw_obj_header *oh; 1819 ipfw_xtable_info *i; 1820 char *tname, *aname; 1821 struct tid_info ti; 1822 struct namedobj_instance *ni; 1823 1824 if (sd->valsize != sizeof(*oh) + sizeof(ipfw_xtable_info)) 1825 return (EINVAL); 1826 1827 oh = (struct _ipfw_obj_header *)sd->kbuf; 1828 i = (ipfw_xtable_info *)(oh + 1); 1829 1830 /* 1831 * Verify user-supplied strings. 1832 * Check for null-terminated/zero-length strings/ 1833 */ 1834 tname = oh->ntlv.name; 1835 aname = i->algoname; 1836 if (check_table_name(tname) != 0 || 1837 strnlen(aname, sizeof(i->algoname)) == sizeof(i->algoname)) 1838 return (EINVAL); 1839 1840 if (aname[0] == '\0') { 1841 /* Use default algorithm */ 1842 aname = NULL; 1843 } 1844 1845 objheader_to_ti(oh, &ti); 1846 ti.type = i->type; 1847 1848 ni = CHAIN_TO_NI(ch); 1849 1850 IPFW_UH_RLOCK(ch); 1851 if (find_table(ni, &ti) != NULL) { 1852 IPFW_UH_RUNLOCK(ch); 1853 return (EEXIST); 1854 } 1855 IPFW_UH_RUNLOCK(ch); 1856 1857 return (create_table_internal(ch, &ti, aname, i, NULL, 0)); 1858 } 1859 1860 /* 1861 * Creates new table based on @ti and @aname. 1862 * 1863 * Assume @aname to be checked and valid. 1864 * Stores allocated table kidx inside @pkidx (if non-NULL). 1865 * Reference created table if @compat is non-zero. 1866 * 1867 * Returns 0 on success. 1868 */ 1869 static int 1870 create_table_internal(struct ip_fw_chain *ch, struct tid_info *ti, 1871 char *aname, ipfw_xtable_info *i, uint16_t *pkidx, int compat) 1872 { 1873 struct namedobj_instance *ni; 1874 struct table_config *tc, *tc_new, *tmp; 1875 struct table_algo *ta; 1876 uint16_t kidx; 1877 1878 ni = CHAIN_TO_NI(ch); 1879 1880 ta = find_table_algo(CHAIN_TO_TCFG(ch), ti, aname); 1881 if (ta == NULL) 1882 return (ENOTSUP); 1883 1884 tc = alloc_table_config(ch, ti, ta, aname, i->tflags); 1885 if (tc == NULL) 1886 return (ENOMEM); 1887 1888 tc->vmask = i->vmask; 1889 tc->limit = i->limit; 1890 if (ta->flags & TA_FLAG_READONLY) 1891 tc->locked = 1; 1892 else 1893 tc->locked = (i->flags & IPFW_TGFLAGS_LOCKED) != 0; 1894 1895 IPFW_UH_WLOCK(ch); 1896 1897 /* Check if table has been already created */ 1898 tc_new = find_table(ni, ti); 1899 if (tc_new != NULL) { 1900 /* 1901 * Compat: do not fail if we're 1902 * requesting to create existing table 1903 * which has the same type 1904 */ 1905 if (compat == 0 || tc_new->no.subtype != tc->no.subtype) { 1906 IPFW_UH_WUNLOCK(ch); 1907 free_table_config(ni, tc); 1908 return (EEXIST); 1909 } 1910 1911 /* Exchange tc and tc_new for proper refcounting & freeing */ 1912 tmp = tc; 1913 tc = tc_new; 1914 tc_new = tmp; 1915 } else { 1916 /* New table */ 1917 if (ipfw_objhash_alloc_idx(ni, &kidx) != 0) { 1918 IPFW_UH_WUNLOCK(ch); 1919 printf("Unable to allocate table index." 1920 " Consider increasing net.inet.ip.fw.tables_max"); 1921 free_table_config(ni, tc); 1922 return (EBUSY); 1923 } 1924 tc->no.kidx = kidx; 1925 tc->no.etlv = IPFW_TLV_TBL_NAME; 1926 1927 link_table(ch, tc); 1928 } 1929 1930 if (compat != 0) 1931 tc->no.refcnt++; 1932 if (pkidx != NULL) 1933 *pkidx = tc->no.kidx; 1934 1935 IPFW_UH_WUNLOCK(ch); 1936 1937 if (tc_new != NULL) 1938 free_table_config(ni, tc_new); 1939 1940 return (0); 1941 } 1942 1943 static void 1944 ntlv_to_ti(ipfw_obj_ntlv *ntlv, struct tid_info *ti) 1945 { 1946 1947 memset(ti, 0, sizeof(struct tid_info)); 1948 ti->set = ntlv->set; 1949 ti->uidx = ntlv->idx; 1950 ti->tlvs = ntlv; 1951 ti->tlen = ntlv->head.length; 1952 } 1953 1954 static void 1955 objheader_to_ti(struct _ipfw_obj_header *oh, struct tid_info *ti) 1956 { 1957 1958 ntlv_to_ti(&oh->ntlv, ti); 1959 } 1960 1961 struct namedobj_instance * 1962 ipfw_get_table_objhash(struct ip_fw_chain *ch) 1963 { 1964 1965 return (CHAIN_TO_NI(ch)); 1966 } 1967 1968 /* 1969 * Exports basic table info as name TLV. 1970 * Used inside dump_static_rules() to provide info 1971 * about all tables referenced by current ruleset. 1972 * 1973 * Returns 0 on success. 1974 */ 1975 int 1976 ipfw_export_table_ntlv(struct ip_fw_chain *ch, uint16_t kidx, 1977 struct sockopt_data *sd) 1978 { 1979 struct namedobj_instance *ni; 1980 struct named_object *no; 1981 ipfw_obj_ntlv *ntlv; 1982 1983 ni = CHAIN_TO_NI(ch); 1984 1985 no = ipfw_objhash_lookup_kidx(ni, kidx); 1986 KASSERT(no != NULL, ("invalid table kidx passed")); 1987 1988 ntlv = (ipfw_obj_ntlv *)ipfw_get_sopt_space(sd, sizeof(*ntlv)); 1989 if (ntlv == NULL) 1990 return (ENOMEM); 1991 1992 ntlv->head.type = IPFW_TLV_TBL_NAME; 1993 ntlv->head.length = sizeof(*ntlv); 1994 ntlv->idx = no->kidx; 1995 strlcpy(ntlv->name, no->name, sizeof(ntlv->name)); 1996 1997 return (0); 1998 } 1999 2000 struct dump_args { 2001 struct ip_fw_chain *ch; 2002 struct table_info *ti; 2003 struct table_config *tc; 2004 struct sockopt_data *sd; 2005 uint32_t cnt; 2006 uint16_t uidx; 2007 int error; 2008 uint32_t size; 2009 ipfw_table_entry *ent; 2010 ta_foreach_f *f; 2011 void *farg; 2012 ipfw_obj_tentry tent; 2013 }; 2014 2015 static int 2016 count_ext_entries(void *e, void *arg) 2017 { 2018 struct dump_args *da; 2019 2020 da = (struct dump_args *)arg; 2021 da->cnt++; 2022 2023 return (0); 2024 } 2025 2026 /* 2027 * Gets number of items from table either using 2028 * internal counter or calling algo callback for 2029 * externally-managed tables. 2030 * 2031 * Returns number of records. 2032 */ 2033 static uint32_t 2034 table_get_count(struct ip_fw_chain *ch, struct table_config *tc) 2035 { 2036 struct table_info *ti; 2037 struct table_algo *ta; 2038 struct dump_args da; 2039 2040 ti = KIDX_TO_TI(ch, tc->no.kidx); 2041 ta = tc->ta; 2042 2043 /* Use internal counter for self-managed tables */ 2044 if ((ta->flags & TA_FLAG_READONLY) == 0) 2045 return (tc->count); 2046 2047 /* Use callback to quickly get number of items */ 2048 if ((ta->flags & TA_FLAG_EXTCOUNTER) != 0) 2049 return (ta->get_count(tc->astate, ti)); 2050 2051 /* Count number of iterms ourselves */ 2052 memset(&da, 0, sizeof(da)); 2053 ta->foreach(tc->astate, ti, count_ext_entries, &da); 2054 2055 return (da.cnt); 2056 } 2057 2058 /* 2059 * Exports table @tc info into standard ipfw_xtable_info format. 2060 */ 2061 static void 2062 export_table_info(struct ip_fw_chain *ch, struct table_config *tc, 2063 ipfw_xtable_info *i) 2064 { 2065 struct table_info *ti; 2066 struct table_algo *ta; 2067 2068 i->type = tc->no.subtype; 2069 i->tflags = tc->tflags; 2070 i->vmask = tc->vmask; 2071 i->set = tc->no.set; 2072 i->kidx = tc->no.kidx; 2073 i->refcnt = tc->no.refcnt; 2074 i->count = table_get_count(ch, tc); 2075 i->limit = tc->limit; 2076 i->flags |= (tc->locked != 0) ? IPFW_TGFLAGS_LOCKED : 0; 2077 i->size = i->count * sizeof(ipfw_obj_tentry); 2078 i->size += sizeof(ipfw_obj_header) + sizeof(ipfw_xtable_info); 2079 strlcpy(i->tablename, tc->tablename, sizeof(i->tablename)); 2080 ti = KIDX_TO_TI(ch, tc->no.kidx); 2081 ta = tc->ta; 2082 if (ta->print_config != NULL) { 2083 /* Use algo function to print table config to string */ 2084 ta->print_config(tc->astate, ti, i->algoname, 2085 sizeof(i->algoname)); 2086 } else 2087 strlcpy(i->algoname, ta->name, sizeof(i->algoname)); 2088 /* Dump algo-specific data, if possible */ 2089 if (ta->dump_tinfo != NULL) { 2090 ta->dump_tinfo(tc->astate, ti, &i->ta_info); 2091 i->ta_info.flags |= IPFW_TATFLAGS_DATA; 2092 } 2093 } 2094 2095 struct dump_table_args { 2096 struct ip_fw_chain *ch; 2097 struct sockopt_data *sd; 2098 }; 2099 2100 static int 2101 export_table_internal(struct namedobj_instance *ni, struct named_object *no, 2102 void *arg) 2103 { 2104 ipfw_xtable_info *i; 2105 struct dump_table_args *dta; 2106 2107 dta = (struct dump_table_args *)arg; 2108 2109 i = (ipfw_xtable_info *)ipfw_get_sopt_space(dta->sd, sizeof(*i)); 2110 KASSERT(i != NULL, ("previously checked buffer is not enough")); 2111 2112 export_table_info(dta->ch, (struct table_config *)no, i); 2113 return (0); 2114 } 2115 2116 /* 2117 * Export all tables as ipfw_xtable_info structures to 2118 * storage provided by @sd. 2119 * 2120 * If supplied buffer is too small, fills in required size 2121 * and returns ENOMEM. 2122 * Returns 0 on success. 2123 */ 2124 static int 2125 export_tables(struct ip_fw_chain *ch, ipfw_obj_lheader *olh, 2126 struct sockopt_data *sd) 2127 { 2128 uint32_t size; 2129 uint32_t count; 2130 struct dump_table_args dta; 2131 2132 count = ipfw_objhash_count(CHAIN_TO_NI(ch)); 2133 size = count * sizeof(ipfw_xtable_info) + sizeof(ipfw_obj_lheader); 2134 2135 /* Fill in header regadless of buffer size */ 2136 olh->count = count; 2137 olh->objsize = sizeof(ipfw_xtable_info); 2138 2139 if (size > olh->size) { 2140 olh->size = size; 2141 return (ENOMEM); 2142 } 2143 2144 olh->size = size; 2145 2146 dta.ch = ch; 2147 dta.sd = sd; 2148 2149 ipfw_objhash_foreach(CHAIN_TO_NI(ch), export_table_internal, &dta); 2150 2151 return (0); 2152 } 2153 2154 /* 2155 * Dumps all table data 2156 * Data layout (v1)(current): 2157 * Request: [ ipfw_obj_header ], size = ipfw_xtable_info.size 2158 * Reply: [ ipfw_obj_header ipfw_xtable_info ipfw_obj_tentry x N ] 2159 * 2160 * Returns 0 on success 2161 */ 2162 static int 2163 dump_table_v1(struct ip_fw_chain *ch, ip_fw3_opheader *op3, 2164 struct sockopt_data *sd) 2165 { 2166 struct _ipfw_obj_header *oh; 2167 ipfw_xtable_info *i; 2168 struct tid_info ti; 2169 struct table_config *tc; 2170 struct table_algo *ta; 2171 struct dump_args da; 2172 uint32_t sz; 2173 2174 sz = sizeof(ipfw_obj_header) + sizeof(ipfw_xtable_info); 2175 oh = (struct _ipfw_obj_header *)ipfw_get_sopt_header(sd, sz); 2176 if (oh == NULL) 2177 return (EINVAL); 2178 2179 i = (ipfw_xtable_info *)(oh + 1); 2180 objheader_to_ti(oh, &ti); 2181 2182 IPFW_UH_RLOCK(ch); 2183 if ((tc = find_table(CHAIN_TO_NI(ch), &ti)) == NULL) { 2184 IPFW_UH_RUNLOCK(ch); 2185 return (ESRCH); 2186 } 2187 export_table_info(ch, tc, i); 2188 2189 if (sd->valsize < i->size) { 2190 /* 2191 * Submitted buffer size is not enough. 2192 * WE've already filled in @i structure with 2193 * relevant table info including size, so we 2194 * can return. Buffer will be flushed automatically. 2195 */ 2196 IPFW_UH_RUNLOCK(ch); 2197 return (ENOMEM); 2198 } 2199 2200 /* 2201 * Do the actual dump in eXtended format 2202 */ 2203 memset(&da, 0, sizeof(da)); 2204 da.ch = ch; 2205 da.ti = KIDX_TO_TI(ch, tc->no.kidx); 2206 da.tc = tc; 2207 da.sd = sd; 2208 2209 ta = tc->ta; 2210 2211 ta->foreach(tc->astate, da.ti, dump_table_tentry, &da); 2212 IPFW_UH_RUNLOCK(ch); 2213 2214 return (da.error); 2215 } 2216 2217 /* 2218 * Dumps all table data 2219 * Data layout (version 0)(legacy): 2220 * Request: [ ipfw_xtable ], size = IP_FW_TABLE_XGETSIZE() 2221 * Reply: [ ipfw_xtable ipfw_table_xentry x N ] 2222 * 2223 * Returns 0 on success 2224 */ 2225 static int 2226 dump_table_v0(struct ip_fw_chain *ch, ip_fw3_opheader *op3, 2227 struct sockopt_data *sd) 2228 { 2229 ipfw_xtable *xtbl; 2230 struct tid_info ti; 2231 struct table_config *tc; 2232 struct table_algo *ta; 2233 struct dump_args da; 2234 size_t sz, count; 2235 2236 xtbl = (ipfw_xtable *)ipfw_get_sopt_header(sd, sizeof(ipfw_xtable)); 2237 if (xtbl == NULL) 2238 return (EINVAL); 2239 2240 memset(&ti, 0, sizeof(ti)); 2241 ti.uidx = xtbl->tbl; 2242 2243 IPFW_UH_RLOCK(ch); 2244 if ((tc = find_table(CHAIN_TO_NI(ch), &ti)) == NULL) { 2245 IPFW_UH_RUNLOCK(ch); 2246 return (0); 2247 } 2248 count = table_get_count(ch, tc); 2249 sz = count * sizeof(ipfw_table_xentry) + sizeof(ipfw_xtable); 2250 2251 xtbl->cnt = count; 2252 xtbl->size = sz; 2253 xtbl->type = tc->no.subtype; 2254 xtbl->tbl = ti.uidx; 2255 2256 if (sd->valsize < sz) { 2257 /* 2258 * Submitted buffer size is not enough. 2259 * WE've already filled in @i structure with 2260 * relevant table info including size, so we 2261 * can return. Buffer will be flushed automatically. 2262 */ 2263 IPFW_UH_RUNLOCK(ch); 2264 return (ENOMEM); 2265 } 2266 2267 /* Do the actual dump in eXtended format */ 2268 memset(&da, 0, sizeof(da)); 2269 da.ch = ch; 2270 da.ti = KIDX_TO_TI(ch, tc->no.kidx); 2271 da.tc = tc; 2272 da.sd = sd; 2273 2274 ta = tc->ta; 2275 2276 ta->foreach(tc->astate, da.ti, dump_table_xentry, &da); 2277 IPFW_UH_RUNLOCK(ch); 2278 2279 return (0); 2280 } 2281 2282 /* 2283 * Legacy function to retrieve number of items in table. 2284 */ 2285 static int 2286 get_table_size(struct ip_fw_chain *ch, ip_fw3_opheader *op3, 2287 struct sockopt_data *sd) 2288 { 2289 uint32_t *tbl; 2290 struct tid_info ti; 2291 size_t sz; 2292 int error; 2293 2294 sz = sizeof(*op3) + sizeof(uint32_t); 2295 op3 = (ip_fw3_opheader *)ipfw_get_sopt_header(sd, sz); 2296 if (op3 == NULL) 2297 return (EINVAL); 2298 2299 tbl = (uint32_t *)(op3 + 1); 2300 memset(&ti, 0, sizeof(ti)); 2301 ti.uidx = *tbl; 2302 IPFW_UH_RLOCK(ch); 2303 error = ipfw_count_xtable(ch, &ti, tbl); 2304 IPFW_UH_RUNLOCK(ch); 2305 return (error); 2306 } 2307 2308 /* 2309 * Legacy IP_FW_TABLE_GETSIZE handler 2310 */ 2311 int 2312 ipfw_count_table(struct ip_fw_chain *ch, struct tid_info *ti, uint32_t *cnt) 2313 { 2314 struct table_config *tc; 2315 2316 if ((tc = find_table(CHAIN_TO_NI(ch), ti)) == NULL) 2317 return (ESRCH); 2318 *cnt = table_get_count(ch, tc); 2319 return (0); 2320 } 2321 2322 /* 2323 * Legacy IP_FW_TABLE_XGETSIZE handler 2324 */ 2325 int 2326 ipfw_count_xtable(struct ip_fw_chain *ch, struct tid_info *ti, uint32_t *cnt) 2327 { 2328 struct table_config *tc; 2329 uint32_t count; 2330 2331 if ((tc = find_table(CHAIN_TO_NI(ch), ti)) == NULL) { 2332 *cnt = 0; 2333 return (0); /* 'table all list' requires success */ 2334 } 2335 2336 count = table_get_count(ch, tc); 2337 *cnt = count * sizeof(ipfw_table_xentry); 2338 if (count > 0) 2339 *cnt += sizeof(ipfw_xtable); 2340 return (0); 2341 } 2342 2343 static int 2344 dump_table_entry(void *e, void *arg) 2345 { 2346 struct dump_args *da; 2347 struct table_config *tc; 2348 struct table_algo *ta; 2349 ipfw_table_entry *ent; 2350 struct table_value *pval; 2351 int error; 2352 2353 da = (struct dump_args *)arg; 2354 2355 tc = da->tc; 2356 ta = tc->ta; 2357 2358 /* Out of memory, returning */ 2359 if (da->cnt == da->size) 2360 return (1); 2361 ent = da->ent++; 2362 ent->tbl = da->uidx; 2363 da->cnt++; 2364 2365 error = ta->dump_tentry(tc->astate, da->ti, e, &da->tent); 2366 if (error != 0) 2367 return (error); 2368 2369 ent->addr = da->tent.k.addr.s_addr; 2370 ent->masklen = da->tent.masklen; 2371 pval = get_table_value(da->ch, da->tc, da->tent.v.kidx); 2372 ent->value = ipfw_export_table_value_legacy(pval); 2373 2374 return (0); 2375 } 2376 2377 /* 2378 * Dumps table in pre-8.1 legacy format. 2379 */ 2380 int 2381 ipfw_dump_table_legacy(struct ip_fw_chain *ch, struct tid_info *ti, 2382 ipfw_table *tbl) 2383 { 2384 struct table_config *tc; 2385 struct table_algo *ta; 2386 struct dump_args da; 2387 2388 tbl->cnt = 0; 2389 2390 if ((tc = find_table(CHAIN_TO_NI(ch), ti)) == NULL) 2391 return (0); /* XXX: We should return ESRCH */ 2392 2393 ta = tc->ta; 2394 2395 /* This dump format supports IPv4 only */ 2396 if (tc->no.subtype != IPFW_TABLE_ADDR) 2397 return (0); 2398 2399 memset(&da, 0, sizeof(da)); 2400 da.ch = ch; 2401 da.ti = KIDX_TO_TI(ch, tc->no.kidx); 2402 da.tc = tc; 2403 da.ent = &tbl->ent[0]; 2404 da.size = tbl->size; 2405 2406 tbl->cnt = 0; 2407 ta->foreach(tc->astate, da.ti, dump_table_entry, &da); 2408 tbl->cnt = da.cnt; 2409 2410 return (0); 2411 } 2412 2413 /* 2414 * Dumps table entry in eXtended format (v1)(current). 2415 */ 2416 static int 2417 dump_table_tentry(void *e, void *arg) 2418 { 2419 struct dump_args *da; 2420 struct table_config *tc; 2421 struct table_algo *ta; 2422 struct table_value *pval; 2423 ipfw_obj_tentry *tent; 2424 int error; 2425 2426 da = (struct dump_args *)arg; 2427 2428 tc = da->tc; 2429 ta = tc->ta; 2430 2431 tent = (ipfw_obj_tentry *)ipfw_get_sopt_space(da->sd, sizeof(*tent)); 2432 /* Out of memory, returning */ 2433 if (tent == NULL) { 2434 da->error = ENOMEM; 2435 return (1); 2436 } 2437 tent->head.length = sizeof(ipfw_obj_tentry); 2438 tent->idx = da->uidx; 2439 2440 error = ta->dump_tentry(tc->astate, da->ti, e, tent); 2441 if (error != 0) 2442 return (error); 2443 2444 pval = get_table_value(da->ch, da->tc, tent->v.kidx); 2445 ipfw_export_table_value_v1(pval, &tent->v.value); 2446 2447 return (0); 2448 } 2449 2450 /* 2451 * Dumps table entry in eXtended format (v0). 2452 */ 2453 static int 2454 dump_table_xentry(void *e, void *arg) 2455 { 2456 struct dump_args *da; 2457 struct table_config *tc; 2458 struct table_algo *ta; 2459 ipfw_table_xentry *xent; 2460 ipfw_obj_tentry *tent; 2461 struct table_value *pval; 2462 int error; 2463 2464 da = (struct dump_args *)arg; 2465 2466 tc = da->tc; 2467 ta = tc->ta; 2468 2469 xent = (ipfw_table_xentry *)ipfw_get_sopt_space(da->sd, sizeof(*xent)); 2470 /* Out of memory, returning */ 2471 if (xent == NULL) 2472 return (1); 2473 xent->len = sizeof(ipfw_table_xentry); 2474 xent->tbl = da->uidx; 2475 2476 memset(&da->tent, 0, sizeof(da->tent)); 2477 tent = &da->tent; 2478 error = ta->dump_tentry(tc->astate, da->ti, e, tent); 2479 if (error != 0) 2480 return (error); 2481 2482 /* Convert current format to previous one */ 2483 xent->masklen = tent->masklen; 2484 pval = get_table_value(da->ch, da->tc, da->tent.v.kidx); 2485 xent->value = ipfw_export_table_value_legacy(pval); 2486 /* Apply some hacks */ 2487 if (tc->no.subtype == IPFW_TABLE_ADDR && tent->subtype == AF_INET) { 2488 xent->k.addr6.s6_addr32[3] = tent->k.addr.s_addr; 2489 xent->flags = IPFW_TCF_INET; 2490 } else 2491 memcpy(&xent->k, &tent->k, sizeof(xent->k)); 2492 2493 return (0); 2494 } 2495 2496 /* 2497 * Helper function to export table algo data 2498 * to tentry format before calling user function. 2499 * 2500 * Returns 0 on success. 2501 */ 2502 static int 2503 prepare_table_tentry(void *e, void *arg) 2504 { 2505 struct dump_args *da; 2506 struct table_config *tc; 2507 struct table_algo *ta; 2508 int error; 2509 2510 da = (struct dump_args *)arg; 2511 2512 tc = da->tc; 2513 ta = tc->ta; 2514 2515 error = ta->dump_tentry(tc->astate, da->ti, e, &da->tent); 2516 if (error != 0) 2517 return (error); 2518 2519 da->f(&da->tent, da->farg); 2520 2521 return (0); 2522 } 2523 2524 /* 2525 * Allow external consumers to read table entries in standard format. 2526 */ 2527 int 2528 ipfw_foreach_table_tentry(struct ip_fw_chain *ch, uint16_t kidx, 2529 ta_foreach_f *f, void *arg) 2530 { 2531 struct namedobj_instance *ni; 2532 struct table_config *tc; 2533 struct table_algo *ta; 2534 struct dump_args da; 2535 2536 ni = CHAIN_TO_NI(ch); 2537 2538 tc = (struct table_config *)ipfw_objhash_lookup_kidx(ni, kidx); 2539 if (tc == NULL) 2540 return (ESRCH); 2541 2542 ta = tc->ta; 2543 2544 memset(&da, 0, sizeof(da)); 2545 da.ch = ch; 2546 da.ti = KIDX_TO_TI(ch, tc->no.kidx); 2547 da.tc = tc; 2548 da.f = f; 2549 da.farg = arg; 2550 2551 ta->foreach(tc->astate, da.ti, prepare_table_tentry, &da); 2552 2553 return (0); 2554 } 2555 2556 /* 2557 * Table algorithms 2558 */ 2559 2560 /* 2561 * Finds algorithm by index, table type or supplied name. 2562 * 2563 * Returns pointer to algo or NULL. 2564 */ 2565 static struct table_algo * 2566 find_table_algo(struct tables_config *tcfg, struct tid_info *ti, char *name) 2567 { 2568 int i, l; 2569 struct table_algo *ta; 2570 2571 if (ti->type > IPFW_TABLE_MAXTYPE) 2572 return (NULL); 2573 2574 /* Search by index */ 2575 if (ti->atype != 0) { 2576 if (ti->atype > tcfg->algo_count) 2577 return (NULL); 2578 return (tcfg->algo[ti->atype]); 2579 } 2580 2581 if (name == NULL) { 2582 /* Return default algorithm for given type if set */ 2583 return (tcfg->def_algo[ti->type]); 2584 } 2585 2586 /* Search by name */ 2587 /* TODO: better search */ 2588 for (i = 1; i <= tcfg->algo_count; i++) { 2589 ta = tcfg->algo[i]; 2590 2591 /* 2592 * One can supply additional algorithm 2593 * parameters so we compare only the first word 2594 * of supplied name: 2595 * 'addr:chash hsize=32' 2596 * '^^^^^^^^^' 2597 * 2598 */ 2599 l = strlen(ta->name); 2600 if (strncmp(name, ta->name, l) != 0) 2601 continue; 2602 if (name[l] != '\0' && name[l] != ' ') 2603 continue; 2604 /* Check if we're requesting proper table type */ 2605 if (ti->type != 0 && ti->type != ta->type) 2606 return (NULL); 2607 return (ta); 2608 } 2609 2610 return (NULL); 2611 } 2612 2613 /* 2614 * Register new table algo @ta. 2615 * Stores algo id inside @idx. 2616 * 2617 * Returns 0 on success. 2618 */ 2619 int 2620 ipfw_add_table_algo(struct ip_fw_chain *ch, struct table_algo *ta, size_t size, 2621 int *idx) 2622 { 2623 struct tables_config *tcfg; 2624 struct table_algo *ta_new; 2625 size_t sz; 2626 2627 if (size > sizeof(struct table_algo)) 2628 return (EINVAL); 2629 2630 /* Check for the required on-stack size for add/del */ 2631 sz = roundup2(ta->ta_buf_size, sizeof(void *)); 2632 if (sz > TA_BUF_SZ) 2633 return (EINVAL); 2634 2635 KASSERT(ta->type <= IPFW_TABLE_MAXTYPE,("Increase IPFW_TABLE_MAXTYPE")); 2636 2637 /* Copy algorithm data to stable storage. */ 2638 ta_new = malloc(sizeof(struct table_algo), M_IPFW, M_WAITOK | M_ZERO); 2639 memcpy(ta_new, ta, size); 2640 2641 tcfg = CHAIN_TO_TCFG(ch); 2642 2643 KASSERT(tcfg->algo_count < 255, ("Increase algo array size")); 2644 2645 tcfg->algo[++tcfg->algo_count] = ta_new; 2646 ta_new->idx = tcfg->algo_count; 2647 2648 /* Set algorithm as default one for given type */ 2649 if ((ta_new->flags & TA_FLAG_DEFAULT) != 0 && 2650 tcfg->def_algo[ta_new->type] == NULL) 2651 tcfg->def_algo[ta_new->type] = ta_new; 2652 2653 *idx = ta_new->idx; 2654 2655 return (0); 2656 } 2657 2658 /* 2659 * Unregisters table algo using @idx as id. 2660 * XXX: It is NOT safe to call this function in any place 2661 * other than ipfw instance destroy handler. 2662 */ 2663 void 2664 ipfw_del_table_algo(struct ip_fw_chain *ch, int idx) 2665 { 2666 struct tables_config *tcfg; 2667 struct table_algo *ta; 2668 2669 tcfg = CHAIN_TO_TCFG(ch); 2670 2671 KASSERT(idx <= tcfg->algo_count, ("algo idx %d out of range 1..%d", 2672 idx, tcfg->algo_count)); 2673 2674 ta = tcfg->algo[idx]; 2675 KASSERT(ta != NULL, ("algo idx %d is NULL", idx)); 2676 2677 if (tcfg->def_algo[ta->type] == ta) 2678 tcfg->def_algo[ta->type] = NULL; 2679 2680 free(ta, M_IPFW); 2681 } 2682 2683 /* 2684 * Lists all table algorithms currently available. 2685 * Data layout (v0)(current): 2686 * Request: [ ipfw_obj_lheader ], size = ipfw_obj_lheader.size 2687 * Reply: [ ipfw_obj_lheader ipfw_ta_info x N ] 2688 * 2689 * Returns 0 on success 2690 */ 2691 static int 2692 list_table_algo(struct ip_fw_chain *ch, ip_fw3_opheader *op3, 2693 struct sockopt_data *sd) 2694 { 2695 struct _ipfw_obj_lheader *olh; 2696 struct tables_config *tcfg; 2697 ipfw_ta_info *i; 2698 struct table_algo *ta; 2699 uint32_t count, n, size; 2700 2701 olh = (struct _ipfw_obj_lheader *)ipfw_get_sopt_header(sd,sizeof(*olh)); 2702 if (olh == NULL) 2703 return (EINVAL); 2704 if (sd->valsize < olh->size) 2705 return (EINVAL); 2706 2707 IPFW_UH_RLOCK(ch); 2708 tcfg = CHAIN_TO_TCFG(ch); 2709 count = tcfg->algo_count; 2710 size = count * sizeof(ipfw_ta_info) + sizeof(ipfw_obj_lheader); 2711 2712 /* Fill in header regadless of buffer size */ 2713 olh->count = count; 2714 olh->objsize = sizeof(ipfw_ta_info); 2715 2716 if (size > olh->size) { 2717 olh->size = size; 2718 IPFW_UH_RUNLOCK(ch); 2719 return (ENOMEM); 2720 } 2721 olh->size = size; 2722 2723 for (n = 1; n <= count; n++) { 2724 i = (ipfw_ta_info *)ipfw_get_sopt_space(sd, sizeof(*i)); 2725 KASSERT(i != NULL, ("previously checked buffer is not enough")); 2726 ta = tcfg->algo[n]; 2727 strlcpy(i->algoname, ta->name, sizeof(i->algoname)); 2728 i->type = ta->type; 2729 i->refcnt = ta->refcnt; 2730 } 2731 2732 IPFW_UH_RUNLOCK(ch); 2733 2734 return (0); 2735 } 2736 2737 static int 2738 classify_srcdst(ipfw_insn *cmd, uint16_t *puidx, uint8_t *ptype) 2739 { 2740 /* Basic IPv4/IPv6 or u32 lookups */ 2741 *puidx = cmd->arg1; 2742 /* Assume ADDR by default */ 2743 *ptype = IPFW_TABLE_ADDR; 2744 int v; 2745 2746 if (F_LEN(cmd) > F_INSN_SIZE(ipfw_insn_u32)) { 2747 /* 2748 * generic lookup. The key must be 2749 * in 32bit big-endian format. 2750 */ 2751 v = ((ipfw_insn_u32 *)cmd)->d[1]; 2752 switch (v) { 2753 case LOOKUP_DST_IP: 2754 case LOOKUP_SRC_IP: 2755 break; 2756 case LOOKUP_DST_PORT: 2757 case LOOKUP_SRC_PORT: 2758 case LOOKUP_UID: 2759 case LOOKUP_JAIL: 2760 case LOOKUP_DSCP: 2761 case LOOKUP_MARK: 2762 *ptype = IPFW_TABLE_NUMBER; 2763 break; 2764 case LOOKUP_DST_MAC: 2765 case LOOKUP_SRC_MAC: 2766 *ptype = IPFW_TABLE_MAC; 2767 break; 2768 } 2769 } 2770 2771 return (0); 2772 } 2773 2774 static int 2775 classify_via(ipfw_insn *cmd, uint16_t *puidx, uint8_t *ptype) 2776 { 2777 ipfw_insn_if *cmdif; 2778 2779 /* Interface table, possibly */ 2780 cmdif = (ipfw_insn_if *)cmd; 2781 if (cmdif->name[0] != '\1') 2782 return (1); 2783 2784 *ptype = IPFW_TABLE_INTERFACE; 2785 *puidx = cmdif->p.kidx; 2786 2787 return (0); 2788 } 2789 2790 static int 2791 classify_flow(ipfw_insn *cmd, uint16_t *puidx, uint8_t *ptype) 2792 { 2793 2794 *puidx = cmd->arg1; 2795 *ptype = IPFW_TABLE_FLOW; 2796 2797 return (0); 2798 } 2799 2800 static int 2801 classify_mac_lookup(ipfw_insn *cmd, uint16_t *puidx, uint8_t *ptype) 2802 { 2803 *puidx = cmd->arg1; 2804 *ptype = IPFW_TABLE_MAC; 2805 return (0); 2806 } 2807 2808 static void 2809 update_arg1(ipfw_insn *cmd, uint16_t idx) 2810 { 2811 2812 cmd->arg1 = idx; 2813 } 2814 2815 static void 2816 update_via(ipfw_insn *cmd, uint16_t idx) 2817 { 2818 ipfw_insn_if *cmdif; 2819 2820 cmdif = (ipfw_insn_if *)cmd; 2821 cmdif->p.kidx = idx; 2822 } 2823 2824 static int 2825 table_findbyname(struct ip_fw_chain *ch, struct tid_info *ti, 2826 struct named_object **pno) 2827 { 2828 struct table_config *tc; 2829 int error; 2830 2831 IPFW_UH_WLOCK_ASSERT(ch); 2832 2833 error = find_table_err(CHAIN_TO_NI(ch), ti, &tc); 2834 if (error != 0) 2835 return (error); 2836 2837 *pno = &tc->no; 2838 return (0); 2839 } 2840 2841 /* XXX: sets-sets! */ 2842 static struct named_object * 2843 table_findbykidx(struct ip_fw_chain *ch, uint16_t idx) 2844 { 2845 struct namedobj_instance *ni; 2846 struct table_config *tc; 2847 2848 IPFW_UH_WLOCK_ASSERT(ch); 2849 ni = CHAIN_TO_NI(ch); 2850 tc = (struct table_config *)ipfw_objhash_lookup_kidx(ni, idx); 2851 KASSERT(tc != NULL, ("Table with index %d not found", idx)); 2852 2853 return (&tc->no); 2854 } 2855 2856 static int 2857 table_manage_sets(struct ip_fw_chain *ch, uint16_t set, uint8_t new_set, 2858 enum ipfw_sets_cmd cmd) 2859 { 2860 2861 switch (cmd) { 2862 case SWAP_ALL: 2863 case TEST_ALL: 2864 case MOVE_ALL: 2865 /* 2866 * Always return success, the real action and decision 2867 * should make table_manage_sets_all(). 2868 */ 2869 return (0); 2870 case TEST_ONE: 2871 case MOVE_ONE: 2872 /* 2873 * NOTE: we need to use ipfw_objhash_del/ipfw_objhash_add 2874 * if set number will be used in hash function. Currently 2875 * we can just use generic handler that replaces set value. 2876 */ 2877 if (V_fw_tables_sets == 0) 2878 return (0); 2879 break; 2880 case COUNT_ONE: 2881 /* 2882 * Return EOPNOTSUPP for COUNT_ONE when per-set sysctl is 2883 * disabled. This allow skip table's opcodes from additional 2884 * checks when specific rules moved to another set. 2885 */ 2886 if (V_fw_tables_sets == 0) 2887 return (EOPNOTSUPP); 2888 } 2889 /* Use generic sets handler when per-set sysctl is enabled. */ 2890 return (ipfw_obj_manage_sets(CHAIN_TO_NI(ch), IPFW_TLV_TBL_NAME, 2891 set, new_set, cmd)); 2892 } 2893 2894 /* 2895 * We register several opcode rewriters for lookup tables. 2896 * All tables opcodes have the same ETLV type, but different subtype. 2897 * To avoid invoking sets handler several times for XXX_ALL commands, 2898 * we use separate manage_sets handler. O_RECV has the lowest value, 2899 * so it should be called first. 2900 */ 2901 static int 2902 table_manage_sets_all(struct ip_fw_chain *ch, uint16_t set, uint8_t new_set, 2903 enum ipfw_sets_cmd cmd) 2904 { 2905 2906 switch (cmd) { 2907 case SWAP_ALL: 2908 case TEST_ALL: 2909 /* 2910 * Return success for TEST_ALL, since nothing prevents 2911 * move rules from one set to another. All tables are 2912 * accessible from all sets when per-set tables sysctl 2913 * is disabled. 2914 */ 2915 case MOVE_ALL: 2916 if (V_fw_tables_sets == 0) 2917 return (0); 2918 break; 2919 default: 2920 return (table_manage_sets(ch, set, new_set, cmd)); 2921 } 2922 /* Use generic sets handler when per-set sysctl is enabled. */ 2923 return (ipfw_obj_manage_sets(CHAIN_TO_NI(ch), IPFW_TLV_TBL_NAME, 2924 set, new_set, cmd)); 2925 } 2926 2927 static struct opcode_obj_rewrite opcodes[] = { 2928 { 2929 .opcode = O_IP_SRC_LOOKUP, 2930 .etlv = IPFW_TLV_TBL_NAME, 2931 .classifier = classify_srcdst, 2932 .update = update_arg1, 2933 .find_byname = table_findbyname, 2934 .find_bykidx = table_findbykidx, 2935 .create_object = create_table_compat, 2936 .manage_sets = table_manage_sets, 2937 }, 2938 { 2939 .opcode = O_IP_DST_LOOKUP, 2940 .etlv = IPFW_TLV_TBL_NAME, 2941 .classifier = classify_srcdst, 2942 .update = update_arg1, 2943 .find_byname = table_findbyname, 2944 .find_bykidx = table_findbykidx, 2945 .create_object = create_table_compat, 2946 .manage_sets = table_manage_sets, 2947 }, 2948 { 2949 .opcode = O_IP_FLOW_LOOKUP, 2950 .etlv = IPFW_TLV_TBL_NAME, 2951 .classifier = classify_flow, 2952 .update = update_arg1, 2953 .find_byname = table_findbyname, 2954 .find_bykidx = table_findbykidx, 2955 .create_object = create_table_compat, 2956 .manage_sets = table_manage_sets, 2957 }, 2958 { 2959 .opcode = O_MAC_SRC_LOOKUP, 2960 .etlv = IPFW_TLV_TBL_NAME, 2961 .classifier = classify_mac_lookup, 2962 .update = update_arg1, 2963 .find_byname = table_findbyname, 2964 .find_bykidx = table_findbykidx, 2965 .create_object = create_table_compat, 2966 .manage_sets = table_manage_sets, 2967 }, 2968 { 2969 .opcode = O_MAC_DST_LOOKUP, 2970 .etlv = IPFW_TLV_TBL_NAME, 2971 .classifier = classify_mac_lookup, 2972 .update = update_arg1, 2973 .find_byname = table_findbyname, 2974 .find_bykidx = table_findbykidx, 2975 .create_object = create_table_compat, 2976 .manage_sets = table_manage_sets, 2977 }, 2978 { 2979 .opcode = O_XMIT, 2980 .etlv = IPFW_TLV_TBL_NAME, 2981 .classifier = classify_via, 2982 .update = update_via, 2983 .find_byname = table_findbyname, 2984 .find_bykidx = table_findbykidx, 2985 .create_object = create_table_compat, 2986 .manage_sets = table_manage_sets, 2987 }, 2988 { 2989 .opcode = O_RECV, 2990 .etlv = IPFW_TLV_TBL_NAME, 2991 .classifier = classify_via, 2992 .update = update_via, 2993 .find_byname = table_findbyname, 2994 .find_bykidx = table_findbykidx, 2995 .create_object = create_table_compat, 2996 .manage_sets = table_manage_sets_all, 2997 }, 2998 { 2999 .opcode = O_VIA, 3000 .etlv = IPFW_TLV_TBL_NAME, 3001 .classifier = classify_via, 3002 .update = update_via, 3003 .find_byname = table_findbyname, 3004 .find_bykidx = table_findbykidx, 3005 .create_object = create_table_compat, 3006 .manage_sets = table_manage_sets, 3007 }, 3008 }; 3009 3010 static int 3011 test_sets_cb(struct namedobj_instance *ni __unused, struct named_object *no, 3012 void *arg __unused) 3013 { 3014 3015 /* Check that there aren't any tables in not default set */ 3016 if (no->set != 0) 3017 return (EBUSY); 3018 return (0); 3019 } 3020 3021 /* 3022 * Switch between "set 0" and "rule's set" table binding, 3023 * Check all ruleset bindings and permits changing 3024 * IFF each binding has both rule AND table in default set (set 0). 3025 * 3026 * Returns 0 on success. 3027 */ 3028 int 3029 ipfw_switch_tables_namespace(struct ip_fw_chain *ch, unsigned int sets) 3030 { 3031 struct opcode_obj_rewrite *rw; 3032 struct namedobj_instance *ni; 3033 struct named_object *no; 3034 struct ip_fw *rule; 3035 ipfw_insn *cmd; 3036 int cmdlen, i, l; 3037 uint16_t kidx; 3038 uint8_t subtype; 3039 3040 IPFW_UH_WLOCK(ch); 3041 3042 if (V_fw_tables_sets == sets) { 3043 IPFW_UH_WUNLOCK(ch); 3044 return (0); 3045 } 3046 ni = CHAIN_TO_NI(ch); 3047 if (sets == 0) { 3048 /* 3049 * Prevent disabling sets support if we have some tables 3050 * in not default sets. 3051 */ 3052 if (ipfw_objhash_foreach_type(ni, test_sets_cb, 3053 NULL, IPFW_TLV_TBL_NAME) != 0) { 3054 IPFW_UH_WUNLOCK(ch); 3055 return (EBUSY); 3056 } 3057 } 3058 /* 3059 * Scan all rules and examine tables opcodes. 3060 */ 3061 for (i = 0; i < ch->n_rules; i++) { 3062 rule = ch->map[i]; 3063 3064 l = rule->cmd_len; 3065 cmd = rule->cmd; 3066 cmdlen = 0; 3067 for ( ; l > 0 ; l -= cmdlen, cmd += cmdlen) { 3068 cmdlen = F_LEN(cmd); 3069 /* Check only tables opcodes */ 3070 for (kidx = 0, rw = opcodes; 3071 rw < opcodes + nitems(opcodes); rw++) { 3072 if (rw->opcode != cmd->opcode) 3073 continue; 3074 if (rw->classifier(cmd, &kidx, &subtype) == 0) 3075 break; 3076 } 3077 if (kidx == 0) 3078 continue; 3079 no = ipfw_objhash_lookup_kidx(ni, kidx); 3080 /* Check if both table object and rule has the set 0 */ 3081 if (no->set != 0 || rule->set != 0) { 3082 IPFW_UH_WUNLOCK(ch); 3083 return (EBUSY); 3084 } 3085 } 3086 } 3087 V_fw_tables_sets = sets; 3088 IPFW_UH_WUNLOCK(ch); 3089 return (0); 3090 } 3091 3092 /* 3093 * Checks table name for validity. 3094 * Enforce basic length checks, the rest 3095 * should be done in userland. 3096 * 3097 * Returns 0 if name is considered valid. 3098 */ 3099 static int 3100 check_table_name(const char *name) 3101 { 3102 3103 /* 3104 * TODO: do some more complicated checks 3105 */ 3106 return (ipfw_check_object_name_generic(name)); 3107 } 3108 3109 /* 3110 * Finds table config based on either legacy index 3111 * or name in ntlv. 3112 * Note @ti structure contains unchecked data from userland. 3113 * 3114 * Returns 0 in success and fills in @tc with found config 3115 */ 3116 static int 3117 find_table_err(struct namedobj_instance *ni, struct tid_info *ti, 3118 struct table_config **tc) 3119 { 3120 char *name, bname[16]; 3121 struct named_object *no; 3122 ipfw_obj_ntlv *ntlv; 3123 uint32_t set; 3124 3125 if (ti->tlvs != NULL) { 3126 ntlv = ipfw_find_name_tlv_type(ti->tlvs, ti->tlen, ti->uidx, 3127 IPFW_TLV_TBL_NAME); 3128 if (ntlv == NULL) 3129 return (EINVAL); 3130 name = ntlv->name; 3131 3132 /* 3133 * Use set provided by @ti instead of @ntlv one. 3134 * This is needed due to different sets behavior 3135 * controlled by V_fw_tables_sets. 3136 */ 3137 set = (V_fw_tables_sets != 0) ? ti->set : 0; 3138 } else { 3139 snprintf(bname, sizeof(bname), "%d", ti->uidx); 3140 name = bname; 3141 set = 0; 3142 } 3143 3144 no = ipfw_objhash_lookup_name(ni, set, name); 3145 *tc = (struct table_config *)no; 3146 3147 return (0); 3148 } 3149 3150 /* 3151 * Finds table config based on either legacy index 3152 * or name in ntlv. 3153 * Note @ti structure contains unchecked data from userland. 3154 * 3155 * Returns pointer to table_config or NULL. 3156 */ 3157 static struct table_config * 3158 find_table(struct namedobj_instance *ni, struct tid_info *ti) 3159 { 3160 struct table_config *tc; 3161 3162 if (find_table_err(ni, ti, &tc) != 0) 3163 return (NULL); 3164 3165 return (tc); 3166 } 3167 3168 /* 3169 * Allocate new table config structure using 3170 * specified @algo and @aname. 3171 * 3172 * Returns pointer to config or NULL. 3173 */ 3174 static struct table_config * 3175 alloc_table_config(struct ip_fw_chain *ch, struct tid_info *ti, 3176 struct table_algo *ta, char *aname, uint8_t tflags) 3177 { 3178 char *name, bname[16]; 3179 struct table_config *tc; 3180 int error; 3181 ipfw_obj_ntlv *ntlv; 3182 uint32_t set; 3183 3184 if (ti->tlvs != NULL) { 3185 ntlv = ipfw_find_name_tlv_type(ti->tlvs, ti->tlen, ti->uidx, 3186 IPFW_TLV_TBL_NAME); 3187 if (ntlv == NULL) 3188 return (NULL); 3189 name = ntlv->name; 3190 set = (V_fw_tables_sets == 0) ? 0 : ntlv->set; 3191 } else { 3192 /* Compat part: convert number to string representation */ 3193 snprintf(bname, sizeof(bname), "%d", ti->uidx); 3194 name = bname; 3195 set = 0; 3196 } 3197 3198 tc = malloc(sizeof(struct table_config), M_IPFW, M_WAITOK | M_ZERO); 3199 tc->no.name = tc->tablename; 3200 tc->no.subtype = ta->type; 3201 tc->no.set = set; 3202 tc->tflags = tflags; 3203 tc->ta = ta; 3204 strlcpy(tc->tablename, name, sizeof(tc->tablename)); 3205 /* Set "shared" value type by default */ 3206 tc->vshared = 1; 3207 3208 /* Preallocate data structures for new tables */ 3209 error = ta->init(ch, &tc->astate, &tc->ti_copy, aname, tflags); 3210 if (error != 0) { 3211 free(tc, M_IPFW); 3212 return (NULL); 3213 } 3214 3215 return (tc); 3216 } 3217 3218 /* 3219 * Destroys table state and config. 3220 */ 3221 static void 3222 free_table_config(struct namedobj_instance *ni, struct table_config *tc) 3223 { 3224 3225 KASSERT(tc->linked == 0, ("free() on linked config")); 3226 /* UH lock MUST NOT be held */ 3227 3228 /* 3229 * We're using ta without any locking/referencing. 3230 * TODO: fix this if we're going to use unloadable algos. 3231 */ 3232 tc->ta->destroy(tc->astate, &tc->ti_copy); 3233 free(tc, M_IPFW); 3234 } 3235 3236 /* 3237 * Links @tc to @chain table named instance. 3238 * Sets appropriate type/states in @chain table info. 3239 */ 3240 static void 3241 link_table(struct ip_fw_chain *ch, struct table_config *tc) 3242 { 3243 struct namedobj_instance *ni; 3244 struct table_info *ti; 3245 uint16_t kidx; 3246 3247 IPFW_UH_WLOCK_ASSERT(ch); 3248 3249 ni = CHAIN_TO_NI(ch); 3250 kidx = tc->no.kidx; 3251 3252 ipfw_objhash_add(ni, &tc->no); 3253 3254 ti = KIDX_TO_TI(ch, kidx); 3255 *ti = tc->ti_copy; 3256 3257 /* Notify algo on real @ti address */ 3258 if (tc->ta->change_ti != NULL) 3259 tc->ta->change_ti(tc->astate, ti); 3260 3261 tc->linked = 1; 3262 tc->ta->refcnt++; 3263 } 3264 3265 /* 3266 * Unlinks @tc from @chain table named instance. 3267 * Zeroes states in @chain and stores them in @tc. 3268 */ 3269 static void 3270 unlink_table(struct ip_fw_chain *ch, struct table_config *tc) 3271 { 3272 struct namedobj_instance *ni; 3273 struct table_info *ti; 3274 uint16_t kidx; 3275 3276 IPFW_UH_WLOCK_ASSERT(ch); 3277 IPFW_WLOCK_ASSERT(ch); 3278 3279 ni = CHAIN_TO_NI(ch); 3280 kidx = tc->no.kidx; 3281 3282 /* Clear state. @ti copy is already saved inside @tc */ 3283 ipfw_objhash_del(ni, &tc->no); 3284 ti = KIDX_TO_TI(ch, kidx); 3285 memset(ti, 0, sizeof(struct table_info)); 3286 tc->linked = 0; 3287 tc->ta->refcnt--; 3288 3289 /* Notify algo on real @ti address */ 3290 if (tc->ta->change_ti != NULL) 3291 tc->ta->change_ti(tc->astate, NULL); 3292 } 3293 3294 static struct ipfw_sopt_handler scodes[] = { 3295 { IP_FW_TABLE_XCREATE, 0, HDIR_SET, create_table }, 3296 { IP_FW_TABLE_XDESTROY, 0, HDIR_SET, flush_table_v0 }, 3297 { IP_FW_TABLE_XFLUSH, 0, HDIR_SET, flush_table_v0 }, 3298 { IP_FW_TABLE_XMODIFY, 0, HDIR_BOTH, modify_table }, 3299 { IP_FW_TABLE_XINFO, 0, HDIR_GET, describe_table }, 3300 { IP_FW_TABLES_XLIST, 0, HDIR_GET, list_tables }, 3301 { IP_FW_TABLE_XLIST, 0, HDIR_GET, dump_table_v0 }, 3302 { IP_FW_TABLE_XLIST, 1, HDIR_GET, dump_table_v1 }, 3303 { IP_FW_TABLE_XADD, 0, HDIR_BOTH, manage_table_ent_v0 }, 3304 { IP_FW_TABLE_XADD, 1, HDIR_BOTH, manage_table_ent_v1 }, 3305 { IP_FW_TABLE_XDEL, 0, HDIR_BOTH, manage_table_ent_v0 }, 3306 { IP_FW_TABLE_XDEL, 1, HDIR_BOTH, manage_table_ent_v1 }, 3307 { IP_FW_TABLE_XFIND, 0, HDIR_GET, find_table_entry }, 3308 { IP_FW_TABLE_XSWAP, 0, HDIR_SET, swap_table }, 3309 { IP_FW_TABLES_ALIST, 0, HDIR_GET, list_table_algo }, 3310 { IP_FW_TABLE_XGETSIZE, 0, HDIR_GET, get_table_size }, 3311 }; 3312 3313 static int 3314 destroy_table_locked(struct namedobj_instance *ni, struct named_object *no, 3315 void *arg) 3316 { 3317 3318 unlink_table((struct ip_fw_chain *)arg, (struct table_config *)no); 3319 if (ipfw_objhash_free_idx(ni, no->kidx) != 0) 3320 printf("Error unlinking kidx %d from table %s\n", 3321 no->kidx, no->name); 3322 free_table_config(ni, (struct table_config *)no); 3323 return (0); 3324 } 3325 3326 /* 3327 * Shuts tables module down. 3328 */ 3329 void 3330 ipfw_destroy_tables(struct ip_fw_chain *ch, int last) 3331 { 3332 3333 IPFW_DEL_SOPT_HANDLER(last, scodes); 3334 IPFW_DEL_OBJ_REWRITER(last, opcodes); 3335 3336 /* Remove all tables from working set */ 3337 IPFW_UH_WLOCK(ch); 3338 IPFW_WLOCK(ch); 3339 ipfw_objhash_foreach(CHAIN_TO_NI(ch), destroy_table_locked, ch); 3340 IPFW_WUNLOCK(ch); 3341 IPFW_UH_WUNLOCK(ch); 3342 3343 /* Free pointers itself */ 3344 free(ch->tablestate, M_IPFW); 3345 3346 ipfw_table_value_destroy(ch, last); 3347 ipfw_table_algo_destroy(ch); 3348 3349 ipfw_objhash_destroy(CHAIN_TO_NI(ch)); 3350 free(CHAIN_TO_TCFG(ch), M_IPFW); 3351 } 3352 3353 /* 3354 * Starts tables module. 3355 */ 3356 int 3357 ipfw_init_tables(struct ip_fw_chain *ch, int first) 3358 { 3359 struct tables_config *tcfg; 3360 3361 /* Allocate pointers */ 3362 ch->tablestate = malloc(V_fw_tables_max * sizeof(struct table_info), 3363 M_IPFW, M_WAITOK | M_ZERO); 3364 3365 tcfg = malloc(sizeof(struct tables_config), M_IPFW, M_WAITOK | M_ZERO); 3366 tcfg->namehash = ipfw_objhash_create(V_fw_tables_max); 3367 ch->tblcfg = tcfg; 3368 3369 ipfw_table_value_init(ch, first); 3370 ipfw_table_algo_init(ch); 3371 3372 IPFW_ADD_OBJ_REWRITER(first, opcodes); 3373 IPFW_ADD_SOPT_HANDLER(first, scodes); 3374 return (0); 3375 } 3376