1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2004 Ruslan Ermilov and Vsevolod Lobko. 5 * Copyright (c) 2014 Yandex LLC 6 * Copyright (c) 2014 Alexander V. Chernikov 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 /* 34 * Lookup table support for ipfw. 35 * 36 * This file contains handlers for all generic tables' operations: 37 * add/del/flush entries, list/dump tables etc.. 38 * 39 * Table data modification is protected by both UH and runtime lock 40 * while reading configuration/data is protected by UH lock. 41 * 42 * Lookup algorithms for all table types are located in ip_fw_table_algo.c 43 */ 44 45 #include "opt_ipfw.h" 46 47 #include <sys/param.h> 48 #include <sys/systm.h> 49 #include <sys/malloc.h> 50 #include <sys/kernel.h> 51 #include <sys/lock.h> 52 #include <sys/rwlock.h> 53 #include <sys/rmlock.h> 54 #include <sys/socket.h> 55 #include <sys/socketvar.h> 56 #include <sys/queue.h> 57 #include <net/if.h> /* ip_fw.h requires IFNAMSIZ */ 58 59 #include <netinet/in.h> 60 #include <netinet/ip_var.h> /* struct ipfw_rule_ref */ 61 #include <netinet/ip_fw.h> 62 63 #include <netpfil/ipfw/ip_fw_private.h> 64 #include <netpfil/ipfw/ip_fw_table.h> 65 66 /* 67 * Table has the following `type` concepts: 68 * 69 * `no.type` represents lookup key type (addr, ifp, uid, etc..) 70 * vmask represents bitmask of table values which are present at the moment. 71 * Special IPFW_VTYPE_LEGACY ( (uint32_t)-1 ) represents old 72 * single-value-for-all approach. 73 */ 74 struct table_config { 75 struct named_object no; 76 uint8_t tflags; /* type flags */ 77 uint8_t locked; /* 1 if locked from changes */ 78 uint8_t linked; /* 1 if already linked */ 79 uint8_t ochanged; /* used by set swapping */ 80 uint8_t vshared; /* 1 if using shared value array */ 81 uint8_t spare[3]; 82 uint32_t count; /* Number of records */ 83 uint32_t limit; /* Max number of records */ 84 uint32_t vmask; /* bitmask with supported values */ 85 uint32_t ocount; /* used by set swapping */ 86 uint64_t gencnt; /* generation count */ 87 char tablename[64]; /* table name */ 88 struct table_algo *ta; /* Callbacks for given algo */ 89 void *astate; /* algorithm state */ 90 struct table_info ti_copy; /* data to put to table_info */ 91 struct namedobj_instance *vi; 92 }; 93 94 static int find_table_err(struct namedobj_instance *ni, struct tid_info *ti, 95 struct table_config **tc); 96 static struct table_config *find_table(struct namedobj_instance *ni, 97 struct tid_info *ti); 98 static struct table_config *alloc_table_config(struct ip_fw_chain *ch, 99 struct tid_info *ti, struct table_algo *ta, char *adata, uint8_t tflags); 100 static void free_table_config(struct namedobj_instance *ni, 101 struct table_config *tc); 102 static int create_table_internal(struct ip_fw_chain *ch, struct tid_info *ti, 103 char *aname, ipfw_xtable_info *i, uint16_t *pkidx, int ref); 104 static void link_table(struct ip_fw_chain *ch, struct table_config *tc); 105 static void unlink_table(struct ip_fw_chain *ch, struct table_config *tc); 106 static int find_ref_table(struct ip_fw_chain *ch, struct tid_info *ti, 107 struct tentry_info *tei, uint32_t count, int op, struct table_config **ptc); 108 #define OP_ADD 1 109 #define OP_DEL 0 110 static int export_tables(struct ip_fw_chain *ch, ipfw_obj_lheader *olh, 111 struct sockopt_data *sd); 112 static void export_table_info(struct ip_fw_chain *ch, struct table_config *tc, 113 ipfw_xtable_info *i); 114 static int dump_table_tentry(void *e, void *arg); 115 static int dump_table_xentry(void *e, void *arg); 116 117 static int swap_tables(struct ip_fw_chain *ch, struct tid_info *a, 118 struct tid_info *b); 119 120 static int check_table_name(const char *name); 121 static int check_table_space(struct ip_fw_chain *ch, struct tableop_state *ts, 122 struct table_config *tc, struct table_info *ti, uint32_t count); 123 static int destroy_table(struct ip_fw_chain *ch, struct tid_info *ti); 124 125 static struct table_algo *find_table_algo(struct tables_config *tableconf, 126 struct tid_info *ti, char *name); 127 128 static void objheader_to_ti(struct _ipfw_obj_header *oh, struct tid_info *ti); 129 static void ntlv_to_ti(struct _ipfw_obj_ntlv *ntlv, struct tid_info *ti); 130 131 #define CHAIN_TO_NI(chain) (CHAIN_TO_TCFG(chain)->namehash) 132 #define KIDX_TO_TI(ch, k) (&(((struct table_info *)(ch)->tablestate)[k])) 133 134 #define TA_BUF_SZ 128 /* On-stack buffer for add/delete state */ 135 136 void 137 rollback_toperation_state(struct ip_fw_chain *ch, void *object) 138 { 139 struct tables_config *tcfg; 140 struct op_state *os; 141 142 tcfg = CHAIN_TO_TCFG(ch); 143 TAILQ_FOREACH(os, &tcfg->state_list, next) 144 os->func(object, os); 145 } 146 147 void 148 add_toperation_state(struct ip_fw_chain *ch, struct tableop_state *ts) 149 { 150 struct tables_config *tcfg; 151 152 tcfg = CHAIN_TO_TCFG(ch); 153 TAILQ_INSERT_HEAD(&tcfg->state_list, &ts->opstate, next); 154 } 155 156 void 157 del_toperation_state(struct ip_fw_chain *ch, struct tableop_state *ts) 158 { 159 struct tables_config *tcfg; 160 161 tcfg = CHAIN_TO_TCFG(ch); 162 TAILQ_REMOVE(&tcfg->state_list, &ts->opstate, next); 163 } 164 165 void 166 tc_ref(struct table_config *tc) 167 { 168 169 tc->no.refcnt++; 170 } 171 172 void 173 tc_unref(struct table_config *tc) 174 { 175 176 tc->no.refcnt--; 177 } 178 179 static struct table_value * 180 get_table_value(struct ip_fw_chain *ch, struct table_config *tc, uint32_t kidx) 181 { 182 struct table_value *pval; 183 184 pval = (struct table_value *)ch->valuestate; 185 186 return (&pval[kidx]); 187 } 188 189 /* 190 * Checks if we're able to insert/update entry @tei into table 191 * w.r.t @tc limits. 192 * May alter @tei to indicate insertion error / insert 193 * options. 194 * 195 * Returns 0 if operation can be performed/ 196 */ 197 static int 198 check_table_limit(struct table_config *tc, struct tentry_info *tei) 199 { 200 201 if (tc->limit == 0 || tc->count < tc->limit) 202 return (0); 203 204 if ((tei->flags & TEI_FLAGS_UPDATE) == 0) { 205 /* Notify userland on error cause */ 206 tei->flags |= TEI_FLAGS_LIMIT; 207 return (EFBIG); 208 } 209 210 /* 211 * We have UPDATE flag set. 212 * Permit updating record (if found), 213 * but restrict adding new one since we've 214 * already hit the limit. 215 */ 216 tei->flags |= TEI_FLAGS_DONTADD; 217 218 return (0); 219 } 220 221 /* 222 * Convert algorithm callback return code into 223 * one of pre-defined states known by userland. 224 */ 225 static void 226 store_tei_result(struct tentry_info *tei, int op, int error, uint32_t num) 227 { 228 int flag; 229 230 flag = 0; 231 232 switch (error) { 233 case 0: 234 if (op == OP_ADD && num != 0) 235 flag = TEI_FLAGS_ADDED; 236 if (op == OP_DEL) 237 flag = TEI_FLAGS_DELETED; 238 break; 239 case ENOENT: 240 flag = TEI_FLAGS_NOTFOUND; 241 break; 242 case EEXIST: 243 flag = TEI_FLAGS_EXISTS; 244 break; 245 default: 246 flag = TEI_FLAGS_ERROR; 247 } 248 249 tei->flags |= flag; 250 } 251 252 /* 253 * Creates and references table with default parameters. 254 * Saves table config, algo and allocated kidx info @ptc, @pta and 255 * @pkidx if non-zero. 256 * Used for table auto-creation to support old binaries. 257 * 258 * Returns 0 on success. 259 */ 260 static int 261 create_table_compat(struct ip_fw_chain *ch, struct tid_info *ti, 262 uint16_t *pkidx) 263 { 264 ipfw_xtable_info xi; 265 int error; 266 267 memset(&xi, 0, sizeof(xi)); 268 /* Set default value mask for legacy clients */ 269 xi.vmask = IPFW_VTYPE_LEGACY; 270 271 error = create_table_internal(ch, ti, NULL, &xi, pkidx, 1); 272 if (error != 0) 273 return (error); 274 275 return (0); 276 } 277 278 /* 279 * Find and reference existing table optionally 280 * creating new one. 281 * 282 * Saves found table config into @ptc. 283 * Note function may drop/acquire UH_WLOCK. 284 * Returns 0 if table was found/created and referenced 285 * or non-zero return code. 286 */ 287 static int 288 find_ref_table(struct ip_fw_chain *ch, struct tid_info *ti, 289 struct tentry_info *tei, uint32_t count, int op, 290 struct table_config **ptc) 291 { 292 struct namedobj_instance *ni; 293 struct table_config *tc; 294 uint16_t kidx; 295 int error; 296 297 IPFW_UH_WLOCK_ASSERT(ch); 298 299 ni = CHAIN_TO_NI(ch); 300 tc = NULL; 301 if ((tc = find_table(ni, ti)) != NULL) { 302 /* check table type */ 303 if (tc->no.subtype != ti->type) 304 return (EINVAL); 305 306 if (tc->locked != 0) 307 return (EACCES); 308 309 /* Try to exit early on limit hit */ 310 if (op == OP_ADD && count == 1 && 311 check_table_limit(tc, tei) != 0) 312 return (EFBIG); 313 314 /* Reference and return */ 315 tc->no.refcnt++; 316 *ptc = tc; 317 return (0); 318 } 319 320 if (op == OP_DEL) 321 return (ESRCH); 322 323 /* Compatibility mode: create new table for old clients */ 324 if ((tei->flags & TEI_FLAGS_COMPAT) == 0) 325 return (ESRCH); 326 327 IPFW_UH_WUNLOCK(ch); 328 error = create_table_compat(ch, ti, &kidx); 329 IPFW_UH_WLOCK(ch); 330 331 if (error != 0) 332 return (error); 333 334 tc = (struct table_config *)ipfw_objhash_lookup_kidx(ni, kidx); 335 KASSERT(tc != NULL, ("create_table_compat returned bad idx %d", kidx)); 336 337 /* OK, now we've got referenced table. */ 338 *ptc = tc; 339 return (0); 340 } 341 342 /* 343 * Rolls back already @added to @tc entries using state array @ta_buf_m. 344 * Assume the following layout: 345 * 1) ADD state (ta_buf_m[0] ... t_buf_m[added - 1]) for handling update cases 346 * 2) DEL state (ta_buf_m[count[ ... t_buf_m[count + added - 1]) 347 * for storing deleted state 348 */ 349 static void 350 rollback_added_entries(struct ip_fw_chain *ch, struct table_config *tc, 351 struct table_info *tinfo, struct tentry_info *tei, caddr_t ta_buf_m, 352 uint32_t count, uint32_t added) 353 { 354 struct table_algo *ta; 355 struct tentry_info *ptei; 356 caddr_t v, vv; 357 size_t ta_buf_sz; 358 int error, i; 359 uint32_t num; 360 361 IPFW_UH_WLOCK_ASSERT(ch); 362 363 ta = tc->ta; 364 ta_buf_sz = ta->ta_buf_size; 365 v = ta_buf_m; 366 vv = v + count * ta_buf_sz; 367 for (i = 0; i < added; i++, v += ta_buf_sz, vv += ta_buf_sz) { 368 ptei = &tei[i]; 369 if ((ptei->flags & TEI_FLAGS_UPDATED) != 0) { 370 /* 371 * We have old value stored by previous 372 * call in @ptei->value. Do add once again 373 * to restore it. 374 */ 375 error = ta->add(tc->astate, tinfo, ptei, v, &num); 376 KASSERT(error == 0, ("rollback UPDATE fail")); 377 KASSERT(num == 0, ("rollback UPDATE fail2")); 378 continue; 379 } 380 381 error = ta->prepare_del(ch, ptei, vv); 382 KASSERT(error == 0, ("pre-rollback INSERT failed")); 383 error = ta->del(tc->astate, tinfo, ptei, vv, &num); 384 KASSERT(error == 0, ("rollback INSERT failed")); 385 tc->count -= num; 386 } 387 } 388 389 /* 390 * Prepares add/del state for all @count entries in @tei. 391 * Uses either stack buffer (@ta_buf) or allocates a new one. 392 * Stores pointer to allocated buffer back to @ta_buf. 393 * 394 * Returns 0 on success. 395 */ 396 static int 397 prepare_batch_buffer(struct ip_fw_chain *ch, struct table_algo *ta, 398 struct tentry_info *tei, uint32_t count, int op, caddr_t *ta_buf) 399 { 400 caddr_t ta_buf_m, v; 401 size_t ta_buf_sz, sz; 402 struct tentry_info *ptei; 403 int error, i; 404 405 error = 0; 406 ta_buf_sz = ta->ta_buf_size; 407 if (count == 1) { 408 /* Single add/delete, use on-stack buffer */ 409 memset(*ta_buf, 0, TA_BUF_SZ); 410 ta_buf_m = *ta_buf; 411 } else { 412 /* 413 * Multiple adds/deletes, allocate larger buffer 414 * 415 * Note we need 2xcount buffer for add case: 416 * we have hold both ADD state 417 * and DELETE state (this may be needed 418 * if we need to rollback all changes) 419 */ 420 sz = count * ta_buf_sz; 421 ta_buf_m = malloc((op == OP_ADD) ? sz * 2 : sz, M_TEMP, 422 M_WAITOK | M_ZERO); 423 } 424 425 v = ta_buf_m; 426 for (i = 0; i < count; i++, v += ta_buf_sz) { 427 ptei = &tei[i]; 428 error = (op == OP_ADD) ? 429 ta->prepare_add(ch, ptei, v) : ta->prepare_del(ch, ptei, v); 430 431 /* 432 * Some syntax error (incorrect mask, or address, or 433 * anything). Return error regardless of atomicity 434 * settings. 435 */ 436 if (error != 0) 437 break; 438 } 439 440 *ta_buf = ta_buf_m; 441 return (error); 442 } 443 444 /* 445 * Flushes allocated state for each @count entries in @tei. 446 * Frees @ta_buf_m if differs from stack buffer @ta_buf. 447 */ 448 static void 449 flush_batch_buffer(struct ip_fw_chain *ch, struct table_algo *ta, 450 struct tentry_info *tei, uint32_t count, int rollback, 451 caddr_t ta_buf_m, caddr_t ta_buf) 452 { 453 caddr_t v; 454 struct tentry_info *ptei; 455 size_t ta_buf_sz; 456 int i; 457 458 ta_buf_sz = ta->ta_buf_size; 459 460 /* Run cleaning callback anyway */ 461 v = ta_buf_m; 462 for (i = 0; i < count; i++, v += ta_buf_sz) { 463 ptei = &tei[i]; 464 ta->flush_entry(ch, ptei, v); 465 if (ptei->ptv != NULL) { 466 free(ptei->ptv, M_IPFW); 467 ptei->ptv = NULL; 468 } 469 } 470 471 /* Clean up "deleted" state in case of rollback */ 472 if (rollback != 0) { 473 v = ta_buf_m + count * ta_buf_sz; 474 for (i = 0; i < count; i++, v += ta_buf_sz) 475 ta->flush_entry(ch, &tei[i], v); 476 } 477 478 if (ta_buf_m != ta_buf) 479 free(ta_buf_m, M_TEMP); 480 } 481 482 static void 483 rollback_add_entry(void *object, struct op_state *_state) 484 { 485 struct ip_fw_chain *ch; 486 struct tableop_state *ts; 487 488 ts = (struct tableop_state *)_state; 489 490 if (ts->tc != object && ts->ch != object) 491 return; 492 493 ch = ts->ch; 494 495 IPFW_UH_WLOCK_ASSERT(ch); 496 497 /* Call specifid unlockers */ 498 rollback_table_values(ts); 499 500 /* Indicate we've called */ 501 ts->modified = 1; 502 } 503 504 /* 505 * Adds/updates one or more entries in table @ti. 506 * 507 * Function may drop/reacquire UH wlock multiple times due to 508 * items alloc, algorithm callbacks (check_space), value linkage 509 * (new values, value storage realloc), etc.. 510 * Other processes like other adds (which may involve storage resize), 511 * table swaps (which changes table data and may change algo type), 512 * table modify (which may change value mask) may be executed 513 * simultaneously so we need to deal with it. 514 * 515 * The following approach was implemented: 516 * we have per-chain linked list, protected with UH lock. 517 * add_table_entry prepares special on-stack structure wthich is passed 518 * to its descendants. Users add this structure to this list before unlock. 519 * After performing needed operations and acquiring UH lock back, each user 520 * checks if structure has changed. If true, it rolls local state back and 521 * returns without error to the caller. 522 * add_table_entry() on its own checks if structure has changed and restarts 523 * its operation from the beginning (goto restart). 524 * 525 * Functions which are modifying fields of interest (currently 526 * resize_shared_value_storage() and swap_tables() ) 527 * traverses given list while holding UH lock immediately before 528 * performing their operations calling function provided be list entry 529 * ( currently rollback_add_entry ) which performs rollback for all necessary 530 * state and sets appropriate values in structure indicating rollback 531 * has happened. 532 * 533 * Algo interaction: 534 * Function references @ti first to ensure table won't 535 * disappear or change its type. 536 * After that, prepare_add callback is called for each @tei entry. 537 * Next, we try to add each entry under UH+WHLOCK 538 * using add() callback. 539 * Finally, we free all state by calling flush_entry callback 540 * for each @tei. 541 * 542 * Returns 0 on success. 543 */ 544 int 545 add_table_entry(struct ip_fw_chain *ch, struct tid_info *ti, 546 struct tentry_info *tei, uint8_t flags, uint32_t count) 547 { 548 struct table_config *tc; 549 struct table_algo *ta; 550 uint16_t kidx; 551 int error, first_error, i, rollback; 552 uint32_t num, numadd; 553 struct tentry_info *ptei; 554 struct tableop_state ts; 555 char ta_buf[TA_BUF_SZ]; 556 caddr_t ta_buf_m, v; 557 558 memset(&ts, 0, sizeof(ts)); 559 ta = NULL; 560 IPFW_UH_WLOCK(ch); 561 562 /* 563 * Find and reference existing table. 564 */ 565 restart: 566 if (ts.modified != 0) { 567 IPFW_UH_WUNLOCK(ch); 568 flush_batch_buffer(ch, ta, tei, count, rollback, 569 ta_buf_m, ta_buf); 570 memset(&ts, 0, sizeof(ts)); 571 ta = NULL; 572 IPFW_UH_WLOCK(ch); 573 } 574 575 error = find_ref_table(ch, ti, tei, count, OP_ADD, &tc); 576 if (error != 0) { 577 IPFW_UH_WUNLOCK(ch); 578 return (error); 579 } 580 ta = tc->ta; 581 582 /* Fill in tablestate */ 583 ts.ch = ch; 584 ts.opstate.func = rollback_add_entry; 585 ts.tc = tc; 586 ts.vshared = tc->vshared; 587 ts.vmask = tc->vmask; 588 ts.ta = ta; 589 ts.tei = tei; 590 ts.count = count; 591 rollback = 0; 592 add_toperation_state(ch, &ts); 593 IPFW_UH_WUNLOCK(ch); 594 595 /* Allocate memory and prepare record(s) */ 596 /* Pass stack buffer by default */ 597 ta_buf_m = ta_buf; 598 error = prepare_batch_buffer(ch, ta, tei, count, OP_ADD, &ta_buf_m); 599 600 IPFW_UH_WLOCK(ch); 601 del_toperation_state(ch, &ts); 602 /* Drop reference we've used in first search */ 603 tc->no.refcnt--; 604 605 /* Check prepare_batch_buffer() error */ 606 if (error != 0) 607 goto cleanup; 608 609 /* 610 * Check if table swap has happened. 611 * (so table algo might be changed). 612 * Restart operation to achieve consistent behavior. 613 */ 614 if (ts.modified != 0) 615 goto restart; 616 617 /* 618 * Link all values values to shared/per-table value array. 619 * 620 * May release/reacquire UH_WLOCK. 621 */ 622 error = ipfw_link_table_values(ch, &ts, flags); 623 if (error != 0) 624 goto cleanup; 625 if (ts.modified != 0) 626 goto restart; 627 628 /* 629 * Ensure we are able to add all entries without additional 630 * memory allocations. May release/reacquire UH_WLOCK. 631 */ 632 kidx = tc->no.kidx; 633 error = check_table_space(ch, &ts, tc, KIDX_TO_TI(ch, kidx), count); 634 if (error != 0) 635 goto cleanup; 636 if (ts.modified != 0) 637 goto restart; 638 639 /* We've got valid table in @tc. Let's try to add data */ 640 kidx = tc->no.kidx; 641 ta = tc->ta; 642 numadd = 0; 643 first_error = 0; 644 645 IPFW_WLOCK(ch); 646 647 v = ta_buf_m; 648 for (i = 0; i < count; i++, v += ta->ta_buf_size) { 649 ptei = &tei[i]; 650 num = 0; 651 /* check limit before adding */ 652 if ((error = check_table_limit(tc, ptei)) == 0) { 653 /* 654 * It should be safe to insert a record w/o 655 * a properly-linked value if atomicity is 656 * not required. 657 * 658 * If the added item does not have a valid value 659 * index, it would get rejected by ta->add(). 660 * */ 661 error = ta->add(tc->astate, KIDX_TO_TI(ch, kidx), 662 ptei, v, &num); 663 /* Set status flag to inform userland */ 664 store_tei_result(ptei, OP_ADD, error, num); 665 } 666 if (error == 0) { 667 /* Update number of records to ease limit checking */ 668 tc->count += num; 669 numadd += num; 670 continue; 671 } 672 673 if (first_error == 0) 674 first_error = error; 675 676 /* 677 * Some error have happened. Check our atomicity 678 * settings: continue if atomicity is not required, 679 * rollback changes otherwise. 680 */ 681 if ((flags & IPFW_CTF_ATOMIC) == 0) 682 continue; 683 684 rollback_added_entries(ch, tc, KIDX_TO_TI(ch, kidx), 685 tei, ta_buf_m, count, i); 686 687 rollback = 1; 688 break; 689 } 690 691 IPFW_WUNLOCK(ch); 692 693 ipfw_garbage_table_values(ch, tc, tei, count, rollback); 694 695 /* Permit post-add algorithm grow/rehash. */ 696 if (numadd != 0) 697 check_table_space(ch, NULL, tc, KIDX_TO_TI(ch, kidx), 0); 698 699 /* Return first error to user, if any */ 700 error = first_error; 701 702 cleanup: 703 IPFW_UH_WUNLOCK(ch); 704 705 flush_batch_buffer(ch, ta, tei, count, rollback, ta_buf_m, ta_buf); 706 707 return (error); 708 } 709 710 /* 711 * Deletes one or more entries in table @ti. 712 * 713 * Returns 0 on success. 714 */ 715 int 716 del_table_entry(struct ip_fw_chain *ch, struct tid_info *ti, 717 struct tentry_info *tei, uint8_t flags, uint32_t count) 718 { 719 struct table_config *tc; 720 struct table_algo *ta; 721 struct tentry_info *ptei; 722 uint16_t kidx; 723 int error, first_error, i; 724 uint32_t num, numdel; 725 char ta_buf[TA_BUF_SZ]; 726 caddr_t ta_buf_m, v; 727 728 /* 729 * Find and reference existing table. 730 */ 731 IPFW_UH_WLOCK(ch); 732 error = find_ref_table(ch, ti, tei, count, OP_DEL, &tc); 733 if (error != 0) { 734 IPFW_UH_WUNLOCK(ch); 735 return (error); 736 } 737 ta = tc->ta; 738 IPFW_UH_WUNLOCK(ch); 739 740 /* Allocate memory and prepare record(s) */ 741 /* Pass stack buffer by default */ 742 ta_buf_m = ta_buf; 743 error = prepare_batch_buffer(ch, ta, tei, count, OP_DEL, &ta_buf_m); 744 if (error != 0) 745 goto cleanup; 746 747 IPFW_UH_WLOCK(ch); 748 749 /* Drop reference we've used in first search */ 750 tc->no.refcnt--; 751 752 /* 753 * Check if table algo is still the same. 754 * (changed ta may be the result of table swap). 755 */ 756 if (ta != tc->ta) { 757 IPFW_UH_WUNLOCK(ch); 758 error = EINVAL; 759 goto cleanup; 760 } 761 762 kidx = tc->no.kidx; 763 numdel = 0; 764 first_error = 0; 765 766 IPFW_WLOCK(ch); 767 v = ta_buf_m; 768 for (i = 0; i < count; i++, v += ta->ta_buf_size) { 769 ptei = &tei[i]; 770 num = 0; 771 error = ta->del(tc->astate, KIDX_TO_TI(ch, kidx), ptei, v, 772 &num); 773 /* Save state for userland */ 774 store_tei_result(ptei, OP_DEL, error, num); 775 if (error != 0 && first_error == 0) 776 first_error = error; 777 tc->count -= num; 778 numdel += num; 779 } 780 IPFW_WUNLOCK(ch); 781 782 /* Unlink non-used values */ 783 ipfw_garbage_table_values(ch, tc, tei, count, 0); 784 785 if (numdel != 0) { 786 /* Run post-del hook to permit shrinking */ 787 check_table_space(ch, NULL, tc, KIDX_TO_TI(ch, kidx), 0); 788 } 789 790 IPFW_UH_WUNLOCK(ch); 791 792 /* Return first error to user, if any */ 793 error = first_error; 794 795 cleanup: 796 flush_batch_buffer(ch, ta, tei, count, 0, ta_buf_m, ta_buf); 797 798 return (error); 799 } 800 801 /* 802 * Ensure that table @tc has enough space to add @count entries without 803 * need for reallocation. 804 * 805 * Callbacks order: 806 * 0) need_modify() (UH_WLOCK) - checks if @count items can be added w/o resize. 807 * 808 * 1) alloc_modify (no locks, M_WAITOK) - alloc new state based on @pflags. 809 * 2) prepare_modifyt (UH_WLOCK) - copy old data into new storage 810 * 3) modify (UH_WLOCK + WLOCK) - switch pointers 811 * 4) flush_modify (UH_WLOCK) - free state, if needed 812 * 813 * Returns 0 on success. 814 */ 815 static int 816 check_table_space(struct ip_fw_chain *ch, struct tableop_state *ts, 817 struct table_config *tc, struct table_info *ti, uint32_t count) 818 { 819 struct table_algo *ta; 820 uint64_t pflags; 821 char ta_buf[TA_BUF_SZ]; 822 int error; 823 824 IPFW_UH_WLOCK_ASSERT(ch); 825 826 error = 0; 827 ta = tc->ta; 828 if (ta->need_modify == NULL) 829 return (0); 830 831 /* Acquire reference not to loose @tc between locks/unlocks */ 832 tc->no.refcnt++; 833 834 /* 835 * TODO: think about avoiding race between large add/large delete 836 * operation on algorithm which implements shrinking along with 837 * growing. 838 */ 839 while (true) { 840 pflags = 0; 841 if (ta->need_modify(tc->astate, ti, count, &pflags) == 0) { 842 error = 0; 843 break; 844 } 845 846 /* We have to shrink/grow table */ 847 if (ts != NULL) 848 add_toperation_state(ch, ts); 849 IPFW_UH_WUNLOCK(ch); 850 851 memset(&ta_buf, 0, sizeof(ta_buf)); 852 error = ta->prepare_mod(ta_buf, &pflags); 853 854 IPFW_UH_WLOCK(ch); 855 if (ts != NULL) 856 del_toperation_state(ch, ts); 857 858 if (error != 0) 859 break; 860 861 if (ts != NULL && ts->modified != 0) { 862 /* 863 * Swap operation has happened 864 * so we're currently operating on other 865 * table data. Stop doing this. 866 */ 867 ta->flush_mod(ta_buf); 868 break; 869 } 870 871 /* Check if we still need to alter table */ 872 ti = KIDX_TO_TI(ch, tc->no.kidx); 873 if (ta->need_modify(tc->astate, ti, count, &pflags) == 0) { 874 IPFW_UH_WUNLOCK(ch); 875 876 /* 877 * Other thread has already performed resize. 878 * Flush our state and return. 879 */ 880 ta->flush_mod(ta_buf); 881 break; 882 } 883 884 error = ta->fill_mod(tc->astate, ti, ta_buf, &pflags); 885 if (error == 0) { 886 /* Do actual modification */ 887 IPFW_WLOCK(ch); 888 ta->modify(tc->astate, ti, ta_buf, pflags); 889 IPFW_WUNLOCK(ch); 890 } 891 892 /* Anyway, flush data and retry */ 893 ta->flush_mod(ta_buf); 894 } 895 896 tc->no.refcnt--; 897 return (error); 898 } 899 900 /* 901 * Adds or deletes record in table. 902 * Data layout (v0): 903 * Request: [ ip_fw3_opheader ipfw_table_xentry ] 904 * 905 * Returns 0 on success 906 */ 907 static int 908 manage_table_ent_v0(struct ip_fw_chain *ch, ip_fw3_opheader *op3, 909 struct sockopt_data *sd) 910 { 911 ipfw_table_xentry *xent; 912 struct tentry_info tei; 913 struct tid_info ti; 914 struct table_value v; 915 int error, hdrlen, read; 916 917 hdrlen = offsetof(ipfw_table_xentry, k); 918 919 /* Check minimum header size */ 920 if (sd->valsize < (sizeof(*op3) + hdrlen)) 921 return (EINVAL); 922 923 read = sizeof(ip_fw3_opheader); 924 925 /* Check if xentry len field is valid */ 926 xent = (ipfw_table_xentry *)(op3 + 1); 927 if (xent->len < hdrlen || xent->len + read > sd->valsize) 928 return (EINVAL); 929 930 memset(&tei, 0, sizeof(tei)); 931 tei.paddr = &xent->k; 932 tei.masklen = xent->masklen; 933 ipfw_import_table_value_legacy(xent->value, &v); 934 tei.pvalue = &v; 935 /* Old requests compatibility */ 936 tei.flags = TEI_FLAGS_COMPAT; 937 if (xent->type == IPFW_TABLE_ADDR) { 938 if (xent->len - hdrlen == sizeof(in_addr_t)) 939 tei.subtype = AF_INET; 940 else 941 tei.subtype = AF_INET6; 942 } 943 944 memset(&ti, 0, sizeof(ti)); 945 ti.uidx = xent->tbl; 946 ti.type = xent->type; 947 948 error = (op3->opcode == IP_FW_TABLE_XADD) ? 949 add_table_entry(ch, &ti, &tei, 0, 1) : 950 del_table_entry(ch, &ti, &tei, 0, 1); 951 952 return (error); 953 } 954 955 /* 956 * Adds or deletes record in table. 957 * Data layout (v1)(current): 958 * Request: [ ipfw_obj_header 959 * ipfw_obj_ctlv(IPFW_TLV_TBLENT_LIST) [ ipfw_obj_tentry x N ] 960 * ] 961 * 962 * Returns 0 on success 963 */ 964 static int 965 manage_table_ent_v1(struct ip_fw_chain *ch, ip_fw3_opheader *op3, 966 struct sockopt_data *sd) 967 { 968 ipfw_obj_tentry *tent, *ptent; 969 ipfw_obj_ctlv *ctlv; 970 ipfw_obj_header *oh; 971 struct tentry_info *ptei, tei, *tei_buf; 972 struct tid_info ti; 973 int error, i, kidx, read; 974 975 /* Check minimum header size */ 976 if (sd->valsize < (sizeof(*oh) + sizeof(*ctlv))) 977 return (EINVAL); 978 979 /* Check if passed data is too long */ 980 if (sd->valsize != sd->kavail) 981 return (EINVAL); 982 983 oh = (ipfw_obj_header *)sd->kbuf; 984 985 /* Basic length checks for TLVs */ 986 if (oh->ntlv.head.length != sizeof(oh->ntlv)) 987 return (EINVAL); 988 989 read = sizeof(*oh); 990 991 ctlv = (ipfw_obj_ctlv *)(oh + 1); 992 if (ctlv->head.length + read != sd->valsize) 993 return (EINVAL); 994 995 read += sizeof(*ctlv); 996 tent = (ipfw_obj_tentry *)(ctlv + 1); 997 if (ctlv->count * sizeof(*tent) + read != sd->valsize) 998 return (EINVAL); 999 1000 if (ctlv->count == 0) 1001 return (0); 1002 1003 /* 1004 * Mark entire buffer as "read". 1005 * This instructs sopt api write it back 1006 * after function return. 1007 */ 1008 ipfw_get_sopt_header(sd, sd->valsize); 1009 1010 /* Perform basic checks for each entry */ 1011 ptent = tent; 1012 kidx = tent->idx; 1013 for (i = 0; i < ctlv->count; i++, ptent++) { 1014 if (ptent->head.length != sizeof(*ptent)) 1015 return (EINVAL); 1016 if (ptent->idx != kidx) 1017 return (ENOTSUP); 1018 } 1019 1020 /* Convert data into kernel request objects */ 1021 objheader_to_ti(oh, &ti); 1022 ti.type = oh->ntlv.type; 1023 ti.uidx = kidx; 1024 1025 /* Use on-stack buffer for single add/del */ 1026 if (ctlv->count == 1) { 1027 memset(&tei, 0, sizeof(tei)); 1028 tei_buf = &tei; 1029 } else 1030 tei_buf = malloc(ctlv->count * sizeof(tei), M_TEMP, 1031 M_WAITOK | M_ZERO); 1032 1033 ptei = tei_buf; 1034 ptent = tent; 1035 for (i = 0; i < ctlv->count; i++, ptent++, ptei++) { 1036 ptei->paddr = &ptent->k; 1037 ptei->subtype = ptent->subtype; 1038 ptei->masklen = ptent->masklen; 1039 if (ptent->head.flags & IPFW_TF_UPDATE) 1040 ptei->flags |= TEI_FLAGS_UPDATE; 1041 1042 ipfw_import_table_value_v1(&ptent->v.value); 1043 ptei->pvalue = (struct table_value *)&ptent->v.value; 1044 } 1045 1046 error = (oh->opheader.opcode == IP_FW_TABLE_XADD) ? 1047 add_table_entry(ch, &ti, tei_buf, ctlv->flags, ctlv->count) : 1048 del_table_entry(ch, &ti, tei_buf, ctlv->flags, ctlv->count); 1049 1050 /* Translate result back to userland */ 1051 ptei = tei_buf; 1052 ptent = tent; 1053 for (i = 0; i < ctlv->count; i++, ptent++, ptei++) { 1054 if (ptei->flags & TEI_FLAGS_ADDED) 1055 ptent->result = IPFW_TR_ADDED; 1056 else if (ptei->flags & TEI_FLAGS_DELETED) 1057 ptent->result = IPFW_TR_DELETED; 1058 else if (ptei->flags & TEI_FLAGS_UPDATED) 1059 ptent->result = IPFW_TR_UPDATED; 1060 else if (ptei->flags & TEI_FLAGS_LIMIT) 1061 ptent->result = IPFW_TR_LIMIT; 1062 else if (ptei->flags & TEI_FLAGS_ERROR) 1063 ptent->result = IPFW_TR_ERROR; 1064 else if (ptei->flags & TEI_FLAGS_NOTFOUND) 1065 ptent->result = IPFW_TR_NOTFOUND; 1066 else if (ptei->flags & TEI_FLAGS_EXISTS) 1067 ptent->result = IPFW_TR_EXISTS; 1068 ipfw_export_table_value_v1(ptei->pvalue, &ptent->v.value); 1069 } 1070 1071 if (tei_buf != &tei) 1072 free(tei_buf, M_TEMP); 1073 1074 return (error); 1075 } 1076 1077 /* 1078 * Looks up an entry in given table. 1079 * Data layout (v0)(current): 1080 * Request: [ ipfw_obj_header ipfw_obj_tentry ] 1081 * Reply: [ ipfw_obj_header ipfw_obj_tentry ] 1082 * 1083 * Returns 0 on success 1084 */ 1085 static int 1086 find_table_entry(struct ip_fw_chain *ch, ip_fw3_opheader *op3, 1087 struct sockopt_data *sd) 1088 { 1089 ipfw_obj_tentry *tent; 1090 ipfw_obj_header *oh; 1091 struct tid_info ti; 1092 struct table_config *tc; 1093 struct table_algo *ta; 1094 struct table_info *kti; 1095 struct table_value *pval; 1096 struct namedobj_instance *ni; 1097 int error; 1098 size_t sz; 1099 1100 /* Check minimum header size */ 1101 sz = sizeof(*oh) + sizeof(*tent); 1102 if (sd->valsize != sz) 1103 return (EINVAL); 1104 1105 oh = (struct _ipfw_obj_header *)ipfw_get_sopt_header(sd, sz); 1106 tent = (ipfw_obj_tentry *)(oh + 1); 1107 1108 /* Basic length checks for TLVs */ 1109 if (oh->ntlv.head.length != sizeof(oh->ntlv)) 1110 return (EINVAL); 1111 1112 objheader_to_ti(oh, &ti); 1113 ti.type = oh->ntlv.type; 1114 ti.uidx = tent->idx; 1115 1116 IPFW_UH_RLOCK(ch); 1117 ni = CHAIN_TO_NI(ch); 1118 1119 /* 1120 * Find existing table and check its type . 1121 */ 1122 ta = NULL; 1123 if ((tc = find_table(ni, &ti)) == NULL) { 1124 IPFW_UH_RUNLOCK(ch); 1125 return (ESRCH); 1126 } 1127 1128 /* check table type */ 1129 if (tc->no.subtype != ti.type) { 1130 IPFW_UH_RUNLOCK(ch); 1131 return (EINVAL); 1132 } 1133 1134 kti = KIDX_TO_TI(ch, tc->no.kidx); 1135 ta = tc->ta; 1136 1137 if (ta->find_tentry == NULL) 1138 return (ENOTSUP); 1139 1140 error = ta->find_tentry(tc->astate, kti, tent); 1141 if (error == 0) { 1142 pval = get_table_value(ch, tc, tent->v.kidx); 1143 ipfw_export_table_value_v1(pval, &tent->v.value); 1144 } 1145 IPFW_UH_RUNLOCK(ch); 1146 1147 return (error); 1148 } 1149 1150 /* 1151 * Flushes all entries or destroys given table. 1152 * Data layout (v0)(current): 1153 * Request: [ ipfw_obj_header ] 1154 * 1155 * Returns 0 on success 1156 */ 1157 static int 1158 flush_table_v0(struct ip_fw_chain *ch, ip_fw3_opheader *op3, 1159 struct sockopt_data *sd) 1160 { 1161 int error; 1162 struct _ipfw_obj_header *oh; 1163 struct tid_info ti; 1164 1165 if (sd->valsize != sizeof(*oh)) 1166 return (EINVAL); 1167 1168 oh = (struct _ipfw_obj_header *)op3; 1169 objheader_to_ti(oh, &ti); 1170 1171 if (op3->opcode == IP_FW_TABLE_XDESTROY) 1172 error = destroy_table(ch, &ti); 1173 else if (op3->opcode == IP_FW_TABLE_XFLUSH) 1174 error = flush_table(ch, &ti); 1175 else 1176 return (ENOTSUP); 1177 1178 return (error); 1179 } 1180 1181 static void 1182 restart_flush(void *object, struct op_state *_state) 1183 { 1184 struct tableop_state *ts; 1185 1186 ts = (struct tableop_state *)_state; 1187 1188 if (ts->tc != object) 1189 return; 1190 1191 /* Indicate we've called */ 1192 ts->modified = 1; 1193 } 1194 1195 /* 1196 * Flushes given table. 1197 * 1198 * Function create new table instance with the same 1199 * parameters, swaps it with old one and 1200 * flushes state without holding runtime WLOCK. 1201 * 1202 * Returns 0 on success. 1203 */ 1204 int 1205 flush_table(struct ip_fw_chain *ch, struct tid_info *ti) 1206 { 1207 struct namedobj_instance *ni; 1208 struct table_config *tc; 1209 struct table_algo *ta; 1210 struct table_info ti_old, ti_new, *tablestate; 1211 void *astate_old, *astate_new; 1212 char algostate[64], *pstate; 1213 struct tableop_state ts; 1214 int error, need_gc; 1215 uint16_t kidx; 1216 uint8_t tflags; 1217 1218 /* 1219 * Stage 1: save table algorithm. 1220 * Reference found table to ensure it won't disappear. 1221 */ 1222 IPFW_UH_WLOCK(ch); 1223 ni = CHAIN_TO_NI(ch); 1224 if ((tc = find_table(ni, ti)) == NULL) { 1225 IPFW_UH_WUNLOCK(ch); 1226 return (ESRCH); 1227 } 1228 need_gc = 0; 1229 astate_new = NULL; 1230 memset(&ti_new, 0, sizeof(ti_new)); 1231 restart: 1232 /* Set up swap handler */ 1233 memset(&ts, 0, sizeof(ts)); 1234 ts.opstate.func = restart_flush; 1235 ts.tc = tc; 1236 1237 ta = tc->ta; 1238 /* Do not flush readonly tables */ 1239 if ((ta->flags & TA_FLAG_READONLY) != 0) { 1240 IPFW_UH_WUNLOCK(ch); 1241 return (EACCES); 1242 } 1243 /* Save startup algo parameters */ 1244 if (ta->print_config != NULL) { 1245 ta->print_config(tc->astate, KIDX_TO_TI(ch, tc->no.kidx), 1246 algostate, sizeof(algostate)); 1247 pstate = algostate; 1248 } else 1249 pstate = NULL; 1250 tflags = tc->tflags; 1251 tc->no.refcnt++; 1252 add_toperation_state(ch, &ts); 1253 IPFW_UH_WUNLOCK(ch); 1254 1255 /* 1256 * Stage 1.5: if this is not the first attempt, destroy previous state 1257 */ 1258 if (need_gc != 0) { 1259 ta->destroy(astate_new, &ti_new); 1260 need_gc = 0; 1261 } 1262 1263 /* 1264 * Stage 2: allocate new table instance using same algo. 1265 */ 1266 memset(&ti_new, 0, sizeof(struct table_info)); 1267 error = ta->init(ch, &astate_new, &ti_new, pstate, tflags); 1268 1269 /* 1270 * Stage 3: swap old state pointers with newly-allocated ones. 1271 * Decrease refcount. 1272 */ 1273 IPFW_UH_WLOCK(ch); 1274 tc->no.refcnt--; 1275 del_toperation_state(ch, &ts); 1276 1277 if (error != 0) { 1278 IPFW_UH_WUNLOCK(ch); 1279 return (error); 1280 } 1281 1282 /* 1283 * Restart operation if table swap has happened: 1284 * even if algo may be the same, algo init parameters 1285 * may change. Restart operation instead of doing 1286 * complex checks. 1287 */ 1288 if (ts.modified != 0) { 1289 /* Delay destroying data since we're holding UH lock */ 1290 need_gc = 1; 1291 goto restart; 1292 } 1293 1294 ni = CHAIN_TO_NI(ch); 1295 kidx = tc->no.kidx; 1296 tablestate = (struct table_info *)ch->tablestate; 1297 1298 IPFW_WLOCK(ch); 1299 ti_old = tablestate[kidx]; 1300 tablestate[kidx] = ti_new; 1301 IPFW_WUNLOCK(ch); 1302 1303 astate_old = tc->astate; 1304 tc->astate = astate_new; 1305 tc->ti_copy = ti_new; 1306 tc->count = 0; 1307 1308 /* Notify algo on real @ti address */ 1309 if (ta->change_ti != NULL) 1310 ta->change_ti(tc->astate, &tablestate[kidx]); 1311 1312 /* 1313 * Stage 4: unref values. 1314 */ 1315 ipfw_unref_table_values(ch, tc, ta, astate_old, &ti_old); 1316 IPFW_UH_WUNLOCK(ch); 1317 1318 /* 1319 * Stage 5: perform real flush/destroy. 1320 */ 1321 ta->destroy(astate_old, &ti_old); 1322 1323 return (0); 1324 } 1325 1326 /* 1327 * Swaps two tables. 1328 * Data layout (v0)(current): 1329 * Request: [ ipfw_obj_header ipfw_obj_ntlv ] 1330 * 1331 * Returns 0 on success 1332 */ 1333 static int 1334 swap_table(struct ip_fw_chain *ch, ip_fw3_opheader *op3, 1335 struct sockopt_data *sd) 1336 { 1337 int error; 1338 struct _ipfw_obj_header *oh; 1339 struct tid_info ti_a, ti_b; 1340 1341 if (sd->valsize != sizeof(*oh) + sizeof(ipfw_obj_ntlv)) 1342 return (EINVAL); 1343 1344 oh = (struct _ipfw_obj_header *)op3; 1345 ntlv_to_ti(&oh->ntlv, &ti_a); 1346 ntlv_to_ti((ipfw_obj_ntlv *)(oh + 1), &ti_b); 1347 1348 error = swap_tables(ch, &ti_a, &ti_b); 1349 1350 return (error); 1351 } 1352 1353 /* 1354 * Swaps two tables of the same type/valtype. 1355 * 1356 * Checks if tables are compatible and limits 1357 * permits swap, than actually perform swap. 1358 * 1359 * Each table consists of 2 different parts: 1360 * config: 1361 * @tc (with name, set, kidx) and rule bindings, which is "stable". 1362 * number of items 1363 * table algo 1364 * runtime: 1365 * runtime data @ti (ch->tablestate) 1366 * runtime cache in @tc 1367 * algo-specific data (@tc->astate) 1368 * 1369 * So we switch: 1370 * all runtime data 1371 * number of items 1372 * table algo 1373 * 1374 * After that we call @ti change handler for each table. 1375 * 1376 * Note that referencing @tc won't protect tc->ta from change. 1377 * XXX: Do we need to restrict swap between locked tables? 1378 * XXX: Do we need to exchange ftype? 1379 * 1380 * Returns 0 on success. 1381 */ 1382 static int 1383 swap_tables(struct ip_fw_chain *ch, struct tid_info *a, 1384 struct tid_info *b) 1385 { 1386 struct namedobj_instance *ni; 1387 struct table_config *tc_a, *tc_b; 1388 struct table_algo *ta; 1389 struct table_info ti, *tablestate; 1390 void *astate; 1391 uint32_t count; 1392 1393 /* 1394 * Stage 1: find both tables and ensure they are of 1395 * the same type. 1396 */ 1397 IPFW_UH_WLOCK(ch); 1398 ni = CHAIN_TO_NI(ch); 1399 if ((tc_a = find_table(ni, a)) == NULL) { 1400 IPFW_UH_WUNLOCK(ch); 1401 return (ESRCH); 1402 } 1403 if ((tc_b = find_table(ni, b)) == NULL) { 1404 IPFW_UH_WUNLOCK(ch); 1405 return (ESRCH); 1406 } 1407 1408 /* It is very easy to swap between the same table */ 1409 if (tc_a == tc_b) { 1410 IPFW_UH_WUNLOCK(ch); 1411 return (0); 1412 } 1413 1414 /* Check type and value are the same */ 1415 if (tc_a->no.subtype!=tc_b->no.subtype || tc_a->tflags!=tc_b->tflags) { 1416 IPFW_UH_WUNLOCK(ch); 1417 return (EINVAL); 1418 } 1419 1420 /* Check limits before swap */ 1421 if ((tc_a->limit != 0 && tc_b->count > tc_a->limit) || 1422 (tc_b->limit != 0 && tc_a->count > tc_b->limit)) { 1423 IPFW_UH_WUNLOCK(ch); 1424 return (EFBIG); 1425 } 1426 1427 /* Check if one of the tables is readonly */ 1428 if (((tc_a->ta->flags | tc_b->ta->flags) & TA_FLAG_READONLY) != 0) { 1429 IPFW_UH_WUNLOCK(ch); 1430 return (EACCES); 1431 } 1432 1433 /* Notify we're going to swap */ 1434 rollback_toperation_state(ch, tc_a); 1435 rollback_toperation_state(ch, tc_b); 1436 1437 /* Everything is fine, prepare to swap */ 1438 tablestate = (struct table_info *)ch->tablestate; 1439 ti = tablestate[tc_a->no.kidx]; 1440 ta = tc_a->ta; 1441 astate = tc_a->astate; 1442 count = tc_a->count; 1443 1444 IPFW_WLOCK(ch); 1445 /* a <- b */ 1446 tablestate[tc_a->no.kidx] = tablestate[tc_b->no.kidx]; 1447 tc_a->ta = tc_b->ta; 1448 tc_a->astate = tc_b->astate; 1449 tc_a->count = tc_b->count; 1450 /* b <- a */ 1451 tablestate[tc_b->no.kidx] = ti; 1452 tc_b->ta = ta; 1453 tc_b->astate = astate; 1454 tc_b->count = count; 1455 IPFW_WUNLOCK(ch); 1456 1457 /* Ensure tc.ti copies are in sync */ 1458 tc_a->ti_copy = tablestate[tc_a->no.kidx]; 1459 tc_b->ti_copy = tablestate[tc_b->no.kidx]; 1460 1461 /* Notify both tables on @ti change */ 1462 if (tc_a->ta->change_ti != NULL) 1463 tc_a->ta->change_ti(tc_a->astate, &tablestate[tc_a->no.kidx]); 1464 if (tc_b->ta->change_ti != NULL) 1465 tc_b->ta->change_ti(tc_b->astate, &tablestate[tc_b->no.kidx]); 1466 1467 IPFW_UH_WUNLOCK(ch); 1468 1469 return (0); 1470 } 1471 1472 /* 1473 * Destroys table specified by @ti. 1474 * Data layout (v0)(current): 1475 * Request: [ ip_fw3_opheader ] 1476 * 1477 * Returns 0 on success 1478 */ 1479 static int 1480 destroy_table(struct ip_fw_chain *ch, struct tid_info *ti) 1481 { 1482 struct namedobj_instance *ni; 1483 struct table_config *tc; 1484 1485 IPFW_UH_WLOCK(ch); 1486 1487 ni = CHAIN_TO_NI(ch); 1488 if ((tc = find_table(ni, ti)) == NULL) { 1489 IPFW_UH_WUNLOCK(ch); 1490 return (ESRCH); 1491 } 1492 1493 /* Do not permit destroying referenced tables */ 1494 if (tc->no.refcnt > 0) { 1495 IPFW_UH_WUNLOCK(ch); 1496 return (EBUSY); 1497 } 1498 1499 IPFW_WLOCK(ch); 1500 unlink_table(ch, tc); 1501 IPFW_WUNLOCK(ch); 1502 1503 /* Free obj index */ 1504 if (ipfw_objhash_free_idx(ni, tc->no.kidx) != 0) 1505 printf("Error unlinking kidx %d from table %s\n", 1506 tc->no.kidx, tc->tablename); 1507 1508 /* Unref values used in tables while holding UH lock */ 1509 ipfw_unref_table_values(ch, tc, tc->ta, tc->astate, &tc->ti_copy); 1510 IPFW_UH_WUNLOCK(ch); 1511 1512 free_table_config(ni, tc); 1513 1514 return (0); 1515 } 1516 1517 static uint32_t 1518 roundup2p(uint32_t v) 1519 { 1520 1521 v--; 1522 v |= v >> 1; 1523 v |= v >> 2; 1524 v |= v >> 4; 1525 v |= v >> 8; 1526 v |= v >> 16; 1527 v++; 1528 1529 return (v); 1530 } 1531 1532 /* 1533 * Grow tables index. 1534 * 1535 * Returns 0 on success. 1536 */ 1537 int 1538 ipfw_resize_tables(struct ip_fw_chain *ch, unsigned int ntables) 1539 { 1540 unsigned int ntables_old, tbl; 1541 struct namedobj_instance *ni; 1542 void *new_idx, *old_tablestate, *tablestate; 1543 struct table_info *ti; 1544 struct table_config *tc; 1545 int i, new_blocks; 1546 1547 /* Check new value for validity */ 1548 if (ntables == 0) 1549 return (EINVAL); 1550 if (ntables > IPFW_TABLES_MAX) 1551 ntables = IPFW_TABLES_MAX; 1552 /* Alight to nearest power of 2 */ 1553 ntables = (unsigned int)roundup2p(ntables); 1554 1555 /* Allocate new pointers */ 1556 tablestate = malloc(ntables * sizeof(struct table_info), 1557 M_IPFW, M_WAITOK | M_ZERO); 1558 1559 ipfw_objhash_bitmap_alloc(ntables, (void *)&new_idx, &new_blocks); 1560 1561 IPFW_UH_WLOCK(ch); 1562 1563 tbl = (ntables >= V_fw_tables_max) ? V_fw_tables_max : ntables; 1564 ni = CHAIN_TO_NI(ch); 1565 1566 /* Temporary restrict decreasing max_tables */ 1567 if (ntables < V_fw_tables_max) { 1568 /* 1569 * FIXME: Check if we really can shrink 1570 */ 1571 IPFW_UH_WUNLOCK(ch); 1572 return (EINVAL); 1573 } 1574 1575 /* Copy table info/indices */ 1576 memcpy(tablestate, ch->tablestate, sizeof(struct table_info) * tbl); 1577 ipfw_objhash_bitmap_merge(ni, &new_idx, &new_blocks); 1578 1579 IPFW_WLOCK(ch); 1580 1581 /* Change pointers */ 1582 old_tablestate = ch->tablestate; 1583 ch->tablestate = tablestate; 1584 ipfw_objhash_bitmap_swap(ni, &new_idx, &new_blocks); 1585 1586 ntables_old = V_fw_tables_max; 1587 V_fw_tables_max = ntables; 1588 1589 IPFW_WUNLOCK(ch); 1590 1591 /* Notify all consumers that their @ti pointer has changed */ 1592 ti = (struct table_info *)ch->tablestate; 1593 for (i = 0; i < tbl; i++, ti++) { 1594 if (ti->lookup == NULL) 1595 continue; 1596 tc = (struct table_config *)ipfw_objhash_lookup_kidx(ni, i); 1597 if (tc == NULL || tc->ta->change_ti == NULL) 1598 continue; 1599 1600 tc->ta->change_ti(tc->astate, ti); 1601 } 1602 1603 IPFW_UH_WUNLOCK(ch); 1604 1605 /* Free old pointers */ 1606 free(old_tablestate, M_IPFW); 1607 ipfw_objhash_bitmap_free(new_idx, new_blocks); 1608 1609 return (0); 1610 } 1611 1612 /* 1613 * Lookup table's named object by its @kidx. 1614 */ 1615 struct named_object * 1616 ipfw_objhash_lookup_table_kidx(struct ip_fw_chain *ch, uint16_t kidx) 1617 { 1618 1619 return (ipfw_objhash_lookup_kidx(CHAIN_TO_NI(ch), kidx)); 1620 } 1621 1622 /* 1623 * Take reference to table specified in @ntlv. 1624 * On success return its @kidx. 1625 */ 1626 int 1627 ipfw_ref_table(struct ip_fw_chain *ch, ipfw_obj_ntlv *ntlv, uint16_t *kidx) 1628 { 1629 struct tid_info ti; 1630 struct table_config *tc; 1631 int error; 1632 1633 IPFW_UH_WLOCK_ASSERT(ch); 1634 1635 ntlv_to_ti(ntlv, &ti); 1636 error = find_table_err(CHAIN_TO_NI(ch), &ti, &tc); 1637 if (error != 0) 1638 return (error); 1639 1640 if (tc == NULL) 1641 return (ESRCH); 1642 1643 tc_ref(tc); 1644 *kidx = tc->no.kidx; 1645 1646 return (0); 1647 } 1648 1649 void 1650 ipfw_unref_table(struct ip_fw_chain *ch, uint16_t kidx) 1651 { 1652 1653 struct namedobj_instance *ni; 1654 struct named_object *no; 1655 1656 IPFW_UH_WLOCK_ASSERT(ch); 1657 ni = CHAIN_TO_NI(ch); 1658 no = ipfw_objhash_lookup_kidx(ni, kidx); 1659 KASSERT(no != NULL, ("Table with index %d not found", kidx)); 1660 no->refcnt--; 1661 } 1662 1663 /* 1664 * Lookup an arbitrary key @paddr of length @plen in table @tbl. 1665 * Stores found value in @val. 1666 * 1667 * Returns 1 if key was found. 1668 */ 1669 int 1670 ipfw_lookup_table(struct ip_fw_chain *ch, uint16_t tbl, uint16_t plen, 1671 void *paddr, uint32_t *val) 1672 { 1673 struct table_info *ti; 1674 1675 ti = KIDX_TO_TI(ch, tbl); 1676 1677 return (ti->lookup(ti, paddr, plen, val)); 1678 } 1679 1680 /* 1681 * Info/List/dump support for tables. 1682 * 1683 */ 1684 1685 /* 1686 * High-level 'get' cmds sysctl handlers 1687 */ 1688 1689 /* 1690 * Lists all tables currently available in kernel. 1691 * Data layout (v0)(current): 1692 * Request: [ ipfw_obj_lheader ], size = ipfw_obj_lheader.size 1693 * Reply: [ ipfw_obj_lheader ipfw_xtable_info x N ] 1694 * 1695 * Returns 0 on success 1696 */ 1697 static int 1698 list_tables(struct ip_fw_chain *ch, ip_fw3_opheader *op3, 1699 struct sockopt_data *sd) 1700 { 1701 struct _ipfw_obj_lheader *olh; 1702 int error; 1703 1704 olh = (struct _ipfw_obj_lheader *)ipfw_get_sopt_header(sd,sizeof(*olh)); 1705 if (olh == NULL) 1706 return (EINVAL); 1707 if (sd->valsize < olh->size) 1708 return (EINVAL); 1709 1710 IPFW_UH_RLOCK(ch); 1711 error = export_tables(ch, olh, sd); 1712 IPFW_UH_RUNLOCK(ch); 1713 1714 return (error); 1715 } 1716 1717 /* 1718 * Store table info to buffer provided by @sd. 1719 * Data layout (v0)(current): 1720 * Request: [ ipfw_obj_header ipfw_xtable_info(empty)] 1721 * Reply: [ ipfw_obj_header ipfw_xtable_info ] 1722 * 1723 * Returns 0 on success. 1724 */ 1725 static int 1726 describe_table(struct ip_fw_chain *ch, ip_fw3_opheader *op3, 1727 struct sockopt_data *sd) 1728 { 1729 struct _ipfw_obj_header *oh; 1730 struct table_config *tc; 1731 struct tid_info ti; 1732 size_t sz; 1733 1734 sz = sizeof(*oh) + sizeof(ipfw_xtable_info); 1735 oh = (struct _ipfw_obj_header *)ipfw_get_sopt_header(sd, sz); 1736 if (oh == NULL) 1737 return (EINVAL); 1738 1739 objheader_to_ti(oh, &ti); 1740 1741 IPFW_UH_RLOCK(ch); 1742 if ((tc = find_table(CHAIN_TO_NI(ch), &ti)) == NULL) { 1743 IPFW_UH_RUNLOCK(ch); 1744 return (ESRCH); 1745 } 1746 1747 export_table_info(ch, tc, (ipfw_xtable_info *)(oh + 1)); 1748 IPFW_UH_RUNLOCK(ch); 1749 1750 return (0); 1751 } 1752 1753 /* 1754 * Modifies existing table. 1755 * Data layout (v0)(current): 1756 * Request: [ ipfw_obj_header ipfw_xtable_info ] 1757 * 1758 * Returns 0 on success 1759 */ 1760 static int 1761 modify_table(struct ip_fw_chain *ch, ip_fw3_opheader *op3, 1762 struct sockopt_data *sd) 1763 { 1764 struct _ipfw_obj_header *oh; 1765 ipfw_xtable_info *i; 1766 char *tname; 1767 struct tid_info ti; 1768 struct namedobj_instance *ni; 1769 struct table_config *tc; 1770 1771 if (sd->valsize != sizeof(*oh) + sizeof(ipfw_xtable_info)) 1772 return (EINVAL); 1773 1774 oh = (struct _ipfw_obj_header *)sd->kbuf; 1775 i = (ipfw_xtable_info *)(oh + 1); 1776 1777 /* 1778 * Verify user-supplied strings. 1779 * Check for null-terminated/zero-length strings/ 1780 */ 1781 tname = oh->ntlv.name; 1782 if (check_table_name(tname) != 0) 1783 return (EINVAL); 1784 1785 objheader_to_ti(oh, &ti); 1786 ti.type = i->type; 1787 1788 IPFW_UH_WLOCK(ch); 1789 ni = CHAIN_TO_NI(ch); 1790 if ((tc = find_table(ni, &ti)) == NULL) { 1791 IPFW_UH_WUNLOCK(ch); 1792 return (ESRCH); 1793 } 1794 1795 /* Do not support any modifications for readonly tables */ 1796 if ((tc->ta->flags & TA_FLAG_READONLY) != 0) { 1797 IPFW_UH_WUNLOCK(ch); 1798 return (EACCES); 1799 } 1800 1801 if ((i->mflags & IPFW_TMFLAGS_LIMIT) != 0) 1802 tc->limit = i->limit; 1803 if ((i->mflags & IPFW_TMFLAGS_LOCK) != 0) 1804 tc->locked = ((i->flags & IPFW_TGFLAGS_LOCKED) != 0); 1805 IPFW_UH_WUNLOCK(ch); 1806 1807 return (0); 1808 } 1809 1810 /* 1811 * Creates new table. 1812 * Data layout (v0)(current): 1813 * Request: [ ipfw_obj_header ipfw_xtable_info ] 1814 * 1815 * Returns 0 on success 1816 */ 1817 static int 1818 create_table(struct ip_fw_chain *ch, ip_fw3_opheader *op3, 1819 struct sockopt_data *sd) 1820 { 1821 struct _ipfw_obj_header *oh; 1822 ipfw_xtable_info *i; 1823 char *tname, *aname; 1824 struct tid_info ti; 1825 struct namedobj_instance *ni; 1826 1827 if (sd->valsize != sizeof(*oh) + sizeof(ipfw_xtable_info)) 1828 return (EINVAL); 1829 1830 oh = (struct _ipfw_obj_header *)sd->kbuf; 1831 i = (ipfw_xtable_info *)(oh + 1); 1832 1833 /* 1834 * Verify user-supplied strings. 1835 * Check for null-terminated/zero-length strings/ 1836 */ 1837 tname = oh->ntlv.name; 1838 aname = i->algoname; 1839 if (check_table_name(tname) != 0 || 1840 strnlen(aname, sizeof(i->algoname)) == sizeof(i->algoname)) 1841 return (EINVAL); 1842 1843 if (aname[0] == '\0') { 1844 /* Use default algorithm */ 1845 aname = NULL; 1846 } 1847 1848 objheader_to_ti(oh, &ti); 1849 ti.type = i->type; 1850 1851 ni = CHAIN_TO_NI(ch); 1852 1853 IPFW_UH_RLOCK(ch); 1854 if (find_table(ni, &ti) != NULL) { 1855 IPFW_UH_RUNLOCK(ch); 1856 return (EEXIST); 1857 } 1858 IPFW_UH_RUNLOCK(ch); 1859 1860 return (create_table_internal(ch, &ti, aname, i, NULL, 0)); 1861 } 1862 1863 /* 1864 * Creates new table based on @ti and @aname. 1865 * 1866 * Assume @aname to be checked and valid. 1867 * Stores allocated table kidx inside @pkidx (if non-NULL). 1868 * Reference created table if @compat is non-zero. 1869 * 1870 * Returns 0 on success. 1871 */ 1872 static int 1873 create_table_internal(struct ip_fw_chain *ch, struct tid_info *ti, 1874 char *aname, ipfw_xtable_info *i, uint16_t *pkidx, int compat) 1875 { 1876 struct namedobj_instance *ni; 1877 struct table_config *tc, *tc_new, *tmp; 1878 struct table_algo *ta; 1879 uint16_t kidx; 1880 1881 ni = CHAIN_TO_NI(ch); 1882 1883 ta = find_table_algo(CHAIN_TO_TCFG(ch), ti, aname); 1884 if (ta == NULL) 1885 return (ENOTSUP); 1886 1887 tc = alloc_table_config(ch, ti, ta, aname, i->tflags); 1888 if (tc == NULL) 1889 return (ENOMEM); 1890 1891 tc->vmask = i->vmask; 1892 tc->limit = i->limit; 1893 if (ta->flags & TA_FLAG_READONLY) 1894 tc->locked = 1; 1895 else 1896 tc->locked = (i->flags & IPFW_TGFLAGS_LOCKED) != 0; 1897 1898 IPFW_UH_WLOCK(ch); 1899 1900 /* Check if table has been already created */ 1901 tc_new = find_table(ni, ti); 1902 if (tc_new != NULL) { 1903 /* 1904 * Compat: do not fail if we're 1905 * requesting to create existing table 1906 * which has the same type 1907 */ 1908 if (compat == 0 || tc_new->no.subtype != tc->no.subtype) { 1909 IPFW_UH_WUNLOCK(ch); 1910 free_table_config(ni, tc); 1911 return (EEXIST); 1912 } 1913 1914 /* Exchange tc and tc_new for proper refcounting & freeing */ 1915 tmp = tc; 1916 tc = tc_new; 1917 tc_new = tmp; 1918 } else { 1919 /* New table */ 1920 if (ipfw_objhash_alloc_idx(ni, &kidx) != 0) { 1921 IPFW_UH_WUNLOCK(ch); 1922 printf("Unable to allocate table index." 1923 " Consider increasing net.inet.ip.fw.tables_max"); 1924 free_table_config(ni, tc); 1925 return (EBUSY); 1926 } 1927 tc->no.kidx = kidx; 1928 tc->no.etlv = IPFW_TLV_TBL_NAME; 1929 1930 link_table(ch, tc); 1931 } 1932 1933 if (compat != 0) 1934 tc->no.refcnt++; 1935 if (pkidx != NULL) 1936 *pkidx = tc->no.kidx; 1937 1938 IPFW_UH_WUNLOCK(ch); 1939 1940 if (tc_new != NULL) 1941 free_table_config(ni, tc_new); 1942 1943 return (0); 1944 } 1945 1946 static void 1947 ntlv_to_ti(ipfw_obj_ntlv *ntlv, struct tid_info *ti) 1948 { 1949 1950 memset(ti, 0, sizeof(struct tid_info)); 1951 ti->set = ntlv->set; 1952 ti->uidx = ntlv->idx; 1953 ti->tlvs = ntlv; 1954 ti->tlen = ntlv->head.length; 1955 } 1956 1957 static void 1958 objheader_to_ti(struct _ipfw_obj_header *oh, struct tid_info *ti) 1959 { 1960 1961 ntlv_to_ti(&oh->ntlv, ti); 1962 } 1963 1964 struct namedobj_instance * 1965 ipfw_get_table_objhash(struct ip_fw_chain *ch) 1966 { 1967 1968 return (CHAIN_TO_NI(ch)); 1969 } 1970 1971 /* 1972 * Exports basic table info as name TLV. 1973 * Used inside dump_static_rules() to provide info 1974 * about all tables referenced by current ruleset. 1975 * 1976 * Returns 0 on success. 1977 */ 1978 int 1979 ipfw_export_table_ntlv(struct ip_fw_chain *ch, uint16_t kidx, 1980 struct sockopt_data *sd) 1981 { 1982 struct namedobj_instance *ni; 1983 struct named_object *no; 1984 ipfw_obj_ntlv *ntlv; 1985 1986 ni = CHAIN_TO_NI(ch); 1987 1988 no = ipfw_objhash_lookup_kidx(ni, kidx); 1989 KASSERT(no != NULL, ("invalid table kidx passed")); 1990 1991 ntlv = (ipfw_obj_ntlv *)ipfw_get_sopt_space(sd, sizeof(*ntlv)); 1992 if (ntlv == NULL) 1993 return (ENOMEM); 1994 1995 ntlv->head.type = IPFW_TLV_TBL_NAME; 1996 ntlv->head.length = sizeof(*ntlv); 1997 ntlv->idx = no->kidx; 1998 strlcpy(ntlv->name, no->name, sizeof(ntlv->name)); 1999 2000 return (0); 2001 } 2002 2003 struct dump_args { 2004 struct ip_fw_chain *ch; 2005 struct table_info *ti; 2006 struct table_config *tc; 2007 struct sockopt_data *sd; 2008 uint32_t cnt; 2009 uint16_t uidx; 2010 int error; 2011 uint32_t size; 2012 ipfw_table_entry *ent; 2013 ta_foreach_f *f; 2014 void *farg; 2015 ipfw_obj_tentry tent; 2016 }; 2017 2018 static int 2019 count_ext_entries(void *e, void *arg) 2020 { 2021 struct dump_args *da; 2022 2023 da = (struct dump_args *)arg; 2024 da->cnt++; 2025 2026 return (0); 2027 } 2028 2029 /* 2030 * Gets number of items from table either using 2031 * internal counter or calling algo callback for 2032 * externally-managed tables. 2033 * 2034 * Returns number of records. 2035 */ 2036 static uint32_t 2037 table_get_count(struct ip_fw_chain *ch, struct table_config *tc) 2038 { 2039 struct table_info *ti; 2040 struct table_algo *ta; 2041 struct dump_args da; 2042 2043 ti = KIDX_TO_TI(ch, tc->no.kidx); 2044 ta = tc->ta; 2045 2046 /* Use internal counter for self-managed tables */ 2047 if ((ta->flags & TA_FLAG_READONLY) == 0) 2048 return (tc->count); 2049 2050 /* Use callback to quickly get number of items */ 2051 if ((ta->flags & TA_FLAG_EXTCOUNTER) != 0) 2052 return (ta->get_count(tc->astate, ti)); 2053 2054 /* Count number of iterms ourselves */ 2055 memset(&da, 0, sizeof(da)); 2056 ta->foreach(tc->astate, ti, count_ext_entries, &da); 2057 2058 return (da.cnt); 2059 } 2060 2061 /* 2062 * Exports table @tc info into standard ipfw_xtable_info format. 2063 */ 2064 static void 2065 export_table_info(struct ip_fw_chain *ch, struct table_config *tc, 2066 ipfw_xtable_info *i) 2067 { 2068 struct table_info *ti; 2069 struct table_algo *ta; 2070 2071 i->type = tc->no.subtype; 2072 i->tflags = tc->tflags; 2073 i->vmask = tc->vmask; 2074 i->set = tc->no.set; 2075 i->kidx = tc->no.kidx; 2076 i->refcnt = tc->no.refcnt; 2077 i->count = table_get_count(ch, tc); 2078 i->limit = tc->limit; 2079 i->flags |= (tc->locked != 0) ? IPFW_TGFLAGS_LOCKED : 0; 2080 i->size = i->count * sizeof(ipfw_obj_tentry); 2081 i->size += sizeof(ipfw_obj_header) + sizeof(ipfw_xtable_info); 2082 strlcpy(i->tablename, tc->tablename, sizeof(i->tablename)); 2083 ti = KIDX_TO_TI(ch, tc->no.kidx); 2084 ta = tc->ta; 2085 if (ta->print_config != NULL) { 2086 /* Use algo function to print table config to string */ 2087 ta->print_config(tc->astate, ti, i->algoname, 2088 sizeof(i->algoname)); 2089 } else 2090 strlcpy(i->algoname, ta->name, sizeof(i->algoname)); 2091 /* Dump algo-specific data, if possible */ 2092 if (ta->dump_tinfo != NULL) { 2093 ta->dump_tinfo(tc->astate, ti, &i->ta_info); 2094 i->ta_info.flags |= IPFW_TATFLAGS_DATA; 2095 } 2096 } 2097 2098 struct dump_table_args { 2099 struct ip_fw_chain *ch; 2100 struct sockopt_data *sd; 2101 }; 2102 2103 static int 2104 export_table_internal(struct namedobj_instance *ni, struct named_object *no, 2105 void *arg) 2106 { 2107 ipfw_xtable_info *i; 2108 struct dump_table_args *dta; 2109 2110 dta = (struct dump_table_args *)arg; 2111 2112 i = (ipfw_xtable_info *)ipfw_get_sopt_space(dta->sd, sizeof(*i)); 2113 KASSERT(i != NULL, ("previously checked buffer is not enough")); 2114 2115 export_table_info(dta->ch, (struct table_config *)no, i); 2116 return (0); 2117 } 2118 2119 /* 2120 * Export all tables as ipfw_xtable_info structures to 2121 * storage provided by @sd. 2122 * 2123 * If supplied buffer is too small, fills in required size 2124 * and returns ENOMEM. 2125 * Returns 0 on success. 2126 */ 2127 static int 2128 export_tables(struct ip_fw_chain *ch, ipfw_obj_lheader *olh, 2129 struct sockopt_data *sd) 2130 { 2131 uint32_t size; 2132 uint32_t count; 2133 struct dump_table_args dta; 2134 2135 count = ipfw_objhash_count(CHAIN_TO_NI(ch)); 2136 size = count * sizeof(ipfw_xtable_info) + sizeof(ipfw_obj_lheader); 2137 2138 /* Fill in header regadless of buffer size */ 2139 olh->count = count; 2140 olh->objsize = sizeof(ipfw_xtable_info); 2141 2142 if (size > olh->size) { 2143 olh->size = size; 2144 return (ENOMEM); 2145 } 2146 2147 olh->size = size; 2148 2149 dta.ch = ch; 2150 dta.sd = sd; 2151 2152 ipfw_objhash_foreach(CHAIN_TO_NI(ch), export_table_internal, &dta); 2153 2154 return (0); 2155 } 2156 2157 /* 2158 * Dumps all table data 2159 * Data layout (v1)(current): 2160 * Request: [ ipfw_obj_header ], size = ipfw_xtable_info.size 2161 * Reply: [ ipfw_obj_header ipfw_xtable_info ipfw_obj_tentry x N ] 2162 * 2163 * Returns 0 on success 2164 */ 2165 static int 2166 dump_table_v1(struct ip_fw_chain *ch, ip_fw3_opheader *op3, 2167 struct sockopt_data *sd) 2168 { 2169 struct _ipfw_obj_header *oh; 2170 ipfw_xtable_info *i; 2171 struct tid_info ti; 2172 struct table_config *tc; 2173 struct table_algo *ta; 2174 struct dump_args da; 2175 uint32_t sz; 2176 2177 sz = sizeof(ipfw_obj_header) + sizeof(ipfw_xtable_info); 2178 oh = (struct _ipfw_obj_header *)ipfw_get_sopt_header(sd, sz); 2179 if (oh == NULL) 2180 return (EINVAL); 2181 2182 i = (ipfw_xtable_info *)(oh + 1); 2183 objheader_to_ti(oh, &ti); 2184 2185 IPFW_UH_RLOCK(ch); 2186 if ((tc = find_table(CHAIN_TO_NI(ch), &ti)) == NULL) { 2187 IPFW_UH_RUNLOCK(ch); 2188 return (ESRCH); 2189 } 2190 export_table_info(ch, tc, i); 2191 2192 if (sd->valsize < i->size) { 2193 /* 2194 * Submitted buffer size is not enough. 2195 * WE've already filled in @i structure with 2196 * relevant table info including size, so we 2197 * can return. Buffer will be flushed automatically. 2198 */ 2199 IPFW_UH_RUNLOCK(ch); 2200 return (ENOMEM); 2201 } 2202 2203 /* 2204 * Do the actual dump in eXtended format 2205 */ 2206 memset(&da, 0, sizeof(da)); 2207 da.ch = ch; 2208 da.ti = KIDX_TO_TI(ch, tc->no.kidx); 2209 da.tc = tc; 2210 da.sd = sd; 2211 2212 ta = tc->ta; 2213 2214 ta->foreach(tc->astate, da.ti, dump_table_tentry, &da); 2215 IPFW_UH_RUNLOCK(ch); 2216 2217 return (da.error); 2218 } 2219 2220 /* 2221 * Dumps all table data 2222 * Data layout (version 0)(legacy): 2223 * Request: [ ipfw_xtable ], size = IP_FW_TABLE_XGETSIZE() 2224 * Reply: [ ipfw_xtable ipfw_table_xentry x N ] 2225 * 2226 * Returns 0 on success 2227 */ 2228 static int 2229 dump_table_v0(struct ip_fw_chain *ch, ip_fw3_opheader *op3, 2230 struct sockopt_data *sd) 2231 { 2232 ipfw_xtable *xtbl; 2233 struct tid_info ti; 2234 struct table_config *tc; 2235 struct table_algo *ta; 2236 struct dump_args da; 2237 size_t sz, count; 2238 2239 xtbl = (ipfw_xtable *)ipfw_get_sopt_header(sd, sizeof(ipfw_xtable)); 2240 if (xtbl == NULL) 2241 return (EINVAL); 2242 2243 memset(&ti, 0, sizeof(ti)); 2244 ti.uidx = xtbl->tbl; 2245 2246 IPFW_UH_RLOCK(ch); 2247 if ((tc = find_table(CHAIN_TO_NI(ch), &ti)) == NULL) { 2248 IPFW_UH_RUNLOCK(ch); 2249 return (0); 2250 } 2251 count = table_get_count(ch, tc); 2252 sz = count * sizeof(ipfw_table_xentry) + sizeof(ipfw_xtable); 2253 2254 xtbl->cnt = count; 2255 xtbl->size = sz; 2256 xtbl->type = tc->no.subtype; 2257 xtbl->tbl = ti.uidx; 2258 2259 if (sd->valsize < sz) { 2260 /* 2261 * Submitted buffer size is not enough. 2262 * WE've already filled in @i structure with 2263 * relevant table info including size, so we 2264 * can return. Buffer will be flushed automatically. 2265 */ 2266 IPFW_UH_RUNLOCK(ch); 2267 return (ENOMEM); 2268 } 2269 2270 /* Do the actual dump in eXtended format */ 2271 memset(&da, 0, sizeof(da)); 2272 da.ch = ch; 2273 da.ti = KIDX_TO_TI(ch, tc->no.kidx); 2274 da.tc = tc; 2275 da.sd = sd; 2276 2277 ta = tc->ta; 2278 2279 ta->foreach(tc->astate, da.ti, dump_table_xentry, &da); 2280 IPFW_UH_RUNLOCK(ch); 2281 2282 return (0); 2283 } 2284 2285 /* 2286 * Legacy function to retrieve number of items in table. 2287 */ 2288 static int 2289 get_table_size(struct ip_fw_chain *ch, ip_fw3_opheader *op3, 2290 struct sockopt_data *sd) 2291 { 2292 uint32_t *tbl; 2293 struct tid_info ti; 2294 size_t sz; 2295 int error; 2296 2297 sz = sizeof(*op3) + sizeof(uint32_t); 2298 op3 = (ip_fw3_opheader *)ipfw_get_sopt_header(sd, sz); 2299 if (op3 == NULL) 2300 return (EINVAL); 2301 2302 tbl = (uint32_t *)(op3 + 1); 2303 memset(&ti, 0, sizeof(ti)); 2304 ti.uidx = *tbl; 2305 IPFW_UH_RLOCK(ch); 2306 error = ipfw_count_xtable(ch, &ti, tbl); 2307 IPFW_UH_RUNLOCK(ch); 2308 return (error); 2309 } 2310 2311 /* 2312 * Legacy IP_FW_TABLE_GETSIZE handler 2313 */ 2314 int 2315 ipfw_count_table(struct ip_fw_chain *ch, struct tid_info *ti, uint32_t *cnt) 2316 { 2317 struct table_config *tc; 2318 2319 if ((tc = find_table(CHAIN_TO_NI(ch), ti)) == NULL) 2320 return (ESRCH); 2321 *cnt = table_get_count(ch, tc); 2322 return (0); 2323 } 2324 2325 /* 2326 * Legacy IP_FW_TABLE_XGETSIZE handler 2327 */ 2328 int 2329 ipfw_count_xtable(struct ip_fw_chain *ch, struct tid_info *ti, uint32_t *cnt) 2330 { 2331 struct table_config *tc; 2332 uint32_t count; 2333 2334 if ((tc = find_table(CHAIN_TO_NI(ch), ti)) == NULL) { 2335 *cnt = 0; 2336 return (0); /* 'table all list' requires success */ 2337 } 2338 2339 count = table_get_count(ch, tc); 2340 *cnt = count * sizeof(ipfw_table_xentry); 2341 if (count > 0) 2342 *cnt += sizeof(ipfw_xtable); 2343 return (0); 2344 } 2345 2346 static int 2347 dump_table_entry(void *e, void *arg) 2348 { 2349 struct dump_args *da; 2350 struct table_config *tc; 2351 struct table_algo *ta; 2352 ipfw_table_entry *ent; 2353 struct table_value *pval; 2354 int error; 2355 2356 da = (struct dump_args *)arg; 2357 2358 tc = da->tc; 2359 ta = tc->ta; 2360 2361 /* Out of memory, returning */ 2362 if (da->cnt == da->size) 2363 return (1); 2364 ent = da->ent++; 2365 ent->tbl = da->uidx; 2366 da->cnt++; 2367 2368 error = ta->dump_tentry(tc->astate, da->ti, e, &da->tent); 2369 if (error != 0) 2370 return (error); 2371 2372 ent->addr = da->tent.k.addr.s_addr; 2373 ent->masklen = da->tent.masklen; 2374 pval = get_table_value(da->ch, da->tc, da->tent.v.kidx); 2375 ent->value = ipfw_export_table_value_legacy(pval); 2376 2377 return (0); 2378 } 2379 2380 /* 2381 * Dumps table in pre-8.1 legacy format. 2382 */ 2383 int 2384 ipfw_dump_table_legacy(struct ip_fw_chain *ch, struct tid_info *ti, 2385 ipfw_table *tbl) 2386 { 2387 struct table_config *tc; 2388 struct table_algo *ta; 2389 struct dump_args da; 2390 2391 tbl->cnt = 0; 2392 2393 if ((tc = find_table(CHAIN_TO_NI(ch), ti)) == NULL) 2394 return (0); /* XXX: We should return ESRCH */ 2395 2396 ta = tc->ta; 2397 2398 /* This dump format supports IPv4 only */ 2399 if (tc->no.subtype != IPFW_TABLE_ADDR) 2400 return (0); 2401 2402 memset(&da, 0, sizeof(da)); 2403 da.ch = ch; 2404 da.ti = KIDX_TO_TI(ch, tc->no.kidx); 2405 da.tc = tc; 2406 da.ent = &tbl->ent[0]; 2407 da.size = tbl->size; 2408 2409 tbl->cnt = 0; 2410 ta->foreach(tc->astate, da.ti, dump_table_entry, &da); 2411 tbl->cnt = da.cnt; 2412 2413 return (0); 2414 } 2415 2416 /* 2417 * Dumps table entry in eXtended format (v1)(current). 2418 */ 2419 static int 2420 dump_table_tentry(void *e, void *arg) 2421 { 2422 struct dump_args *da; 2423 struct table_config *tc; 2424 struct table_algo *ta; 2425 struct table_value *pval; 2426 ipfw_obj_tentry *tent; 2427 int error; 2428 2429 da = (struct dump_args *)arg; 2430 2431 tc = da->tc; 2432 ta = tc->ta; 2433 2434 tent = (ipfw_obj_tentry *)ipfw_get_sopt_space(da->sd, sizeof(*tent)); 2435 /* Out of memory, returning */ 2436 if (tent == NULL) { 2437 da->error = ENOMEM; 2438 return (1); 2439 } 2440 tent->head.length = sizeof(ipfw_obj_tentry); 2441 tent->idx = da->uidx; 2442 2443 error = ta->dump_tentry(tc->astate, da->ti, e, tent); 2444 if (error != 0) 2445 return (error); 2446 2447 pval = get_table_value(da->ch, da->tc, tent->v.kidx); 2448 ipfw_export_table_value_v1(pval, &tent->v.value); 2449 2450 return (0); 2451 } 2452 2453 /* 2454 * Dumps table entry in eXtended format (v0). 2455 */ 2456 static int 2457 dump_table_xentry(void *e, void *arg) 2458 { 2459 struct dump_args *da; 2460 struct table_config *tc; 2461 struct table_algo *ta; 2462 ipfw_table_xentry *xent; 2463 ipfw_obj_tentry *tent; 2464 struct table_value *pval; 2465 int error; 2466 2467 da = (struct dump_args *)arg; 2468 2469 tc = da->tc; 2470 ta = tc->ta; 2471 2472 xent = (ipfw_table_xentry *)ipfw_get_sopt_space(da->sd, sizeof(*xent)); 2473 /* Out of memory, returning */ 2474 if (xent == NULL) 2475 return (1); 2476 xent->len = sizeof(ipfw_table_xentry); 2477 xent->tbl = da->uidx; 2478 2479 memset(&da->tent, 0, sizeof(da->tent)); 2480 tent = &da->tent; 2481 error = ta->dump_tentry(tc->astate, da->ti, e, tent); 2482 if (error != 0) 2483 return (error); 2484 2485 /* Convert current format to previous one */ 2486 xent->masklen = tent->masklen; 2487 pval = get_table_value(da->ch, da->tc, da->tent.v.kidx); 2488 xent->value = ipfw_export_table_value_legacy(pval); 2489 /* Apply some hacks */ 2490 if (tc->no.subtype == IPFW_TABLE_ADDR && tent->subtype == AF_INET) { 2491 xent->k.addr6.s6_addr32[3] = tent->k.addr.s_addr; 2492 xent->flags = IPFW_TCF_INET; 2493 } else 2494 memcpy(&xent->k, &tent->k, sizeof(xent->k)); 2495 2496 return (0); 2497 } 2498 2499 /* 2500 * Helper function to export table algo data 2501 * to tentry format before calling user function. 2502 * 2503 * Returns 0 on success. 2504 */ 2505 static int 2506 prepare_table_tentry(void *e, void *arg) 2507 { 2508 struct dump_args *da; 2509 struct table_config *tc; 2510 struct table_algo *ta; 2511 int error; 2512 2513 da = (struct dump_args *)arg; 2514 2515 tc = da->tc; 2516 ta = tc->ta; 2517 2518 error = ta->dump_tentry(tc->astate, da->ti, e, &da->tent); 2519 if (error != 0) 2520 return (error); 2521 2522 da->f(&da->tent, da->farg); 2523 2524 return (0); 2525 } 2526 2527 /* 2528 * Allow external consumers to read table entries in standard format. 2529 */ 2530 int 2531 ipfw_foreach_table_tentry(struct ip_fw_chain *ch, uint16_t kidx, 2532 ta_foreach_f *f, void *arg) 2533 { 2534 struct namedobj_instance *ni; 2535 struct table_config *tc; 2536 struct table_algo *ta; 2537 struct dump_args da; 2538 2539 ni = CHAIN_TO_NI(ch); 2540 2541 tc = (struct table_config *)ipfw_objhash_lookup_kidx(ni, kidx); 2542 if (tc == NULL) 2543 return (ESRCH); 2544 2545 ta = tc->ta; 2546 2547 memset(&da, 0, sizeof(da)); 2548 da.ch = ch; 2549 da.ti = KIDX_TO_TI(ch, tc->no.kidx); 2550 da.tc = tc; 2551 da.f = f; 2552 da.farg = arg; 2553 2554 ta->foreach(tc->astate, da.ti, prepare_table_tentry, &da); 2555 2556 return (0); 2557 } 2558 2559 /* 2560 * Table algorithms 2561 */ 2562 2563 /* 2564 * Finds algorithm by index, table type or supplied name. 2565 * 2566 * Returns pointer to algo or NULL. 2567 */ 2568 static struct table_algo * 2569 find_table_algo(struct tables_config *tcfg, struct tid_info *ti, char *name) 2570 { 2571 int i, l; 2572 struct table_algo *ta; 2573 2574 if (ti->type > IPFW_TABLE_MAXTYPE) 2575 return (NULL); 2576 2577 /* Search by index */ 2578 if (ti->atype != 0) { 2579 if (ti->atype > tcfg->algo_count) 2580 return (NULL); 2581 return (tcfg->algo[ti->atype]); 2582 } 2583 2584 if (name == NULL) { 2585 /* Return default algorithm for given type if set */ 2586 return (tcfg->def_algo[ti->type]); 2587 } 2588 2589 /* Search by name */ 2590 /* TODO: better search */ 2591 for (i = 1; i <= tcfg->algo_count; i++) { 2592 ta = tcfg->algo[i]; 2593 2594 /* 2595 * One can supply additional algorithm 2596 * parameters so we compare only the first word 2597 * of supplied name: 2598 * 'addr:chash hsize=32' 2599 * '^^^^^^^^^' 2600 * 2601 */ 2602 l = strlen(ta->name); 2603 if (strncmp(name, ta->name, l) != 0) 2604 continue; 2605 if (name[l] != '\0' && name[l] != ' ') 2606 continue; 2607 /* Check if we're requesting proper table type */ 2608 if (ti->type != 0 && ti->type != ta->type) 2609 return (NULL); 2610 return (ta); 2611 } 2612 2613 return (NULL); 2614 } 2615 2616 /* 2617 * Register new table algo @ta. 2618 * Stores algo id inside @idx. 2619 * 2620 * Returns 0 on success. 2621 */ 2622 int 2623 ipfw_add_table_algo(struct ip_fw_chain *ch, struct table_algo *ta, size_t size, 2624 int *idx) 2625 { 2626 struct tables_config *tcfg; 2627 struct table_algo *ta_new; 2628 size_t sz; 2629 2630 if (size > sizeof(struct table_algo)) 2631 return (EINVAL); 2632 2633 /* Check for the required on-stack size for add/del */ 2634 sz = roundup2(ta->ta_buf_size, sizeof(void *)); 2635 if (sz > TA_BUF_SZ) 2636 return (EINVAL); 2637 2638 KASSERT(ta->type <= IPFW_TABLE_MAXTYPE,("Increase IPFW_TABLE_MAXTYPE")); 2639 2640 /* Copy algorithm data to stable storage. */ 2641 ta_new = malloc(sizeof(struct table_algo), M_IPFW, M_WAITOK | M_ZERO); 2642 memcpy(ta_new, ta, size); 2643 2644 tcfg = CHAIN_TO_TCFG(ch); 2645 2646 KASSERT(tcfg->algo_count < 255, ("Increase algo array size")); 2647 2648 tcfg->algo[++tcfg->algo_count] = ta_new; 2649 ta_new->idx = tcfg->algo_count; 2650 2651 /* Set algorithm as default one for given type */ 2652 if ((ta_new->flags & TA_FLAG_DEFAULT) != 0 && 2653 tcfg->def_algo[ta_new->type] == NULL) 2654 tcfg->def_algo[ta_new->type] = ta_new; 2655 2656 *idx = ta_new->idx; 2657 2658 return (0); 2659 } 2660 2661 /* 2662 * Unregisters table algo using @idx as id. 2663 * XXX: It is NOT safe to call this function in any place 2664 * other than ipfw instance destroy handler. 2665 */ 2666 void 2667 ipfw_del_table_algo(struct ip_fw_chain *ch, int idx) 2668 { 2669 struct tables_config *tcfg; 2670 struct table_algo *ta; 2671 2672 tcfg = CHAIN_TO_TCFG(ch); 2673 2674 KASSERT(idx <= tcfg->algo_count, ("algo idx %d out of range 1..%d", 2675 idx, tcfg->algo_count)); 2676 2677 ta = tcfg->algo[idx]; 2678 KASSERT(ta != NULL, ("algo idx %d is NULL", idx)); 2679 2680 if (tcfg->def_algo[ta->type] == ta) 2681 tcfg->def_algo[ta->type] = NULL; 2682 2683 free(ta, M_IPFW); 2684 } 2685 2686 /* 2687 * Lists all table algorithms currently available. 2688 * Data layout (v0)(current): 2689 * Request: [ ipfw_obj_lheader ], size = ipfw_obj_lheader.size 2690 * Reply: [ ipfw_obj_lheader ipfw_ta_info x N ] 2691 * 2692 * Returns 0 on success 2693 */ 2694 static int 2695 list_table_algo(struct ip_fw_chain *ch, ip_fw3_opheader *op3, 2696 struct sockopt_data *sd) 2697 { 2698 struct _ipfw_obj_lheader *olh; 2699 struct tables_config *tcfg; 2700 ipfw_ta_info *i; 2701 struct table_algo *ta; 2702 uint32_t count, n, size; 2703 2704 olh = (struct _ipfw_obj_lheader *)ipfw_get_sopt_header(sd,sizeof(*olh)); 2705 if (olh == NULL) 2706 return (EINVAL); 2707 if (sd->valsize < olh->size) 2708 return (EINVAL); 2709 2710 IPFW_UH_RLOCK(ch); 2711 tcfg = CHAIN_TO_TCFG(ch); 2712 count = tcfg->algo_count; 2713 size = count * sizeof(ipfw_ta_info) + sizeof(ipfw_obj_lheader); 2714 2715 /* Fill in header regadless of buffer size */ 2716 olh->count = count; 2717 olh->objsize = sizeof(ipfw_ta_info); 2718 2719 if (size > olh->size) { 2720 olh->size = size; 2721 IPFW_UH_RUNLOCK(ch); 2722 return (ENOMEM); 2723 } 2724 olh->size = size; 2725 2726 for (n = 1; n <= count; n++) { 2727 i = (ipfw_ta_info *)ipfw_get_sopt_space(sd, sizeof(*i)); 2728 KASSERT(i != NULL, ("previously checked buffer is not enough")); 2729 ta = tcfg->algo[n]; 2730 strlcpy(i->algoname, ta->name, sizeof(i->algoname)); 2731 i->type = ta->type; 2732 i->refcnt = ta->refcnt; 2733 } 2734 2735 IPFW_UH_RUNLOCK(ch); 2736 2737 return (0); 2738 } 2739 2740 static int 2741 classify_srcdst(ipfw_insn *cmd, uint16_t *puidx, uint8_t *ptype) 2742 { 2743 /* Basic IPv4/IPv6 or u32 lookups */ 2744 *puidx = cmd->arg1; 2745 /* Assume ADDR by default */ 2746 *ptype = IPFW_TABLE_ADDR; 2747 int v; 2748 2749 if (F_LEN(cmd) > F_INSN_SIZE(ipfw_insn_u32)) { 2750 /* 2751 * generic lookup. The key must be 2752 * in 32bit big-endian format. 2753 */ 2754 v = ((ipfw_insn_u32 *)cmd)->d[1]; 2755 switch (v) { 2756 case 0: 2757 case 1: 2758 /* IPv4 src/dst */ 2759 break; 2760 case 2: 2761 case 3: 2762 /* src/dst port */ 2763 *ptype = IPFW_TABLE_NUMBER; 2764 break; 2765 case 4: 2766 /* uid/gid */ 2767 *ptype = IPFW_TABLE_NUMBER; 2768 break; 2769 case 5: 2770 /* jid */ 2771 *ptype = IPFW_TABLE_NUMBER; 2772 break; 2773 case 6: 2774 /* dscp */ 2775 *ptype = IPFW_TABLE_NUMBER; 2776 break; 2777 } 2778 } 2779 2780 return (0); 2781 } 2782 2783 static int 2784 classify_via(ipfw_insn *cmd, uint16_t *puidx, uint8_t *ptype) 2785 { 2786 ipfw_insn_if *cmdif; 2787 2788 /* Interface table, possibly */ 2789 cmdif = (ipfw_insn_if *)cmd; 2790 if (cmdif->name[0] != '\1') 2791 return (1); 2792 2793 *ptype = IPFW_TABLE_INTERFACE; 2794 *puidx = cmdif->p.kidx; 2795 2796 return (0); 2797 } 2798 2799 static int 2800 classify_flow(ipfw_insn *cmd, uint16_t *puidx, uint8_t *ptype) 2801 { 2802 2803 *puidx = cmd->arg1; 2804 *ptype = IPFW_TABLE_FLOW; 2805 2806 return (0); 2807 } 2808 2809 static void 2810 update_arg1(ipfw_insn *cmd, uint16_t idx) 2811 { 2812 2813 cmd->arg1 = idx; 2814 } 2815 2816 static void 2817 update_via(ipfw_insn *cmd, uint16_t idx) 2818 { 2819 ipfw_insn_if *cmdif; 2820 2821 cmdif = (ipfw_insn_if *)cmd; 2822 cmdif->p.kidx = idx; 2823 } 2824 2825 static int 2826 table_findbyname(struct ip_fw_chain *ch, struct tid_info *ti, 2827 struct named_object **pno) 2828 { 2829 struct table_config *tc; 2830 int error; 2831 2832 IPFW_UH_WLOCK_ASSERT(ch); 2833 2834 error = find_table_err(CHAIN_TO_NI(ch), ti, &tc); 2835 if (error != 0) 2836 return (error); 2837 2838 *pno = &tc->no; 2839 return (0); 2840 } 2841 2842 /* XXX: sets-sets! */ 2843 static struct named_object * 2844 table_findbykidx(struct ip_fw_chain *ch, uint16_t idx) 2845 { 2846 struct namedobj_instance *ni; 2847 struct table_config *tc; 2848 2849 IPFW_UH_WLOCK_ASSERT(ch); 2850 ni = CHAIN_TO_NI(ch); 2851 tc = (struct table_config *)ipfw_objhash_lookup_kidx(ni, idx); 2852 KASSERT(tc != NULL, ("Table with index %d not found", idx)); 2853 2854 return (&tc->no); 2855 } 2856 2857 static int 2858 table_manage_sets(struct ip_fw_chain *ch, uint16_t set, uint8_t new_set, 2859 enum ipfw_sets_cmd cmd) 2860 { 2861 2862 switch (cmd) { 2863 case SWAP_ALL: 2864 case TEST_ALL: 2865 case MOVE_ALL: 2866 /* 2867 * Always return success, the real action and decision 2868 * should make table_manage_sets_all(). 2869 */ 2870 return (0); 2871 case TEST_ONE: 2872 case MOVE_ONE: 2873 /* 2874 * NOTE: we need to use ipfw_objhash_del/ipfw_objhash_add 2875 * if set number will be used in hash function. Currently 2876 * we can just use generic handler that replaces set value. 2877 */ 2878 if (V_fw_tables_sets == 0) 2879 return (0); 2880 break; 2881 case COUNT_ONE: 2882 /* 2883 * Return EOPNOTSUPP for COUNT_ONE when per-set sysctl is 2884 * disabled. This allow skip table's opcodes from additional 2885 * checks when specific rules moved to another set. 2886 */ 2887 if (V_fw_tables_sets == 0) 2888 return (EOPNOTSUPP); 2889 } 2890 /* Use generic sets handler when per-set sysctl is enabled. */ 2891 return (ipfw_obj_manage_sets(CHAIN_TO_NI(ch), IPFW_TLV_TBL_NAME, 2892 set, new_set, cmd)); 2893 } 2894 2895 /* 2896 * We register several opcode rewriters for lookup tables. 2897 * All tables opcodes have the same ETLV type, but different subtype. 2898 * To avoid invoking sets handler several times for XXX_ALL commands, 2899 * we use separate manage_sets handler. O_RECV has the lowest value, 2900 * so it should be called first. 2901 */ 2902 static int 2903 table_manage_sets_all(struct ip_fw_chain *ch, uint16_t set, uint8_t new_set, 2904 enum ipfw_sets_cmd cmd) 2905 { 2906 2907 switch (cmd) { 2908 case SWAP_ALL: 2909 case TEST_ALL: 2910 /* 2911 * Return success for TEST_ALL, since nothing prevents 2912 * move rules from one set to another. All tables are 2913 * accessible from all sets when per-set tables sysctl 2914 * is disabled. 2915 */ 2916 case MOVE_ALL: 2917 if (V_fw_tables_sets == 0) 2918 return (0); 2919 break; 2920 default: 2921 return (table_manage_sets(ch, set, new_set, cmd)); 2922 } 2923 /* Use generic sets handler when per-set sysctl is enabled. */ 2924 return (ipfw_obj_manage_sets(CHAIN_TO_NI(ch), IPFW_TLV_TBL_NAME, 2925 set, new_set, cmd)); 2926 } 2927 2928 static struct opcode_obj_rewrite opcodes[] = { 2929 { 2930 .opcode = O_IP_SRC_LOOKUP, 2931 .etlv = IPFW_TLV_TBL_NAME, 2932 .classifier = classify_srcdst, 2933 .update = update_arg1, 2934 .find_byname = table_findbyname, 2935 .find_bykidx = table_findbykidx, 2936 .create_object = create_table_compat, 2937 .manage_sets = table_manage_sets, 2938 }, 2939 { 2940 .opcode = O_IP_DST_LOOKUP, 2941 .etlv = IPFW_TLV_TBL_NAME, 2942 .classifier = classify_srcdst, 2943 .update = update_arg1, 2944 .find_byname = table_findbyname, 2945 .find_bykidx = table_findbykidx, 2946 .create_object = create_table_compat, 2947 .manage_sets = table_manage_sets, 2948 }, 2949 { 2950 .opcode = O_IP_FLOW_LOOKUP, 2951 .etlv = IPFW_TLV_TBL_NAME, 2952 .classifier = classify_flow, 2953 .update = update_arg1, 2954 .find_byname = table_findbyname, 2955 .find_bykidx = table_findbykidx, 2956 .create_object = create_table_compat, 2957 .manage_sets = table_manage_sets, 2958 }, 2959 { 2960 .opcode = O_XMIT, 2961 .etlv = IPFW_TLV_TBL_NAME, 2962 .classifier = classify_via, 2963 .update = update_via, 2964 .find_byname = table_findbyname, 2965 .find_bykidx = table_findbykidx, 2966 .create_object = create_table_compat, 2967 .manage_sets = table_manage_sets, 2968 }, 2969 { 2970 .opcode = O_RECV, 2971 .etlv = IPFW_TLV_TBL_NAME, 2972 .classifier = classify_via, 2973 .update = update_via, 2974 .find_byname = table_findbyname, 2975 .find_bykidx = table_findbykidx, 2976 .create_object = create_table_compat, 2977 .manage_sets = table_manage_sets_all, 2978 }, 2979 { 2980 .opcode = O_VIA, 2981 .etlv = IPFW_TLV_TBL_NAME, 2982 .classifier = classify_via, 2983 .update = update_via, 2984 .find_byname = table_findbyname, 2985 .find_bykidx = table_findbykidx, 2986 .create_object = create_table_compat, 2987 .manage_sets = table_manage_sets, 2988 }, 2989 }; 2990 2991 static int 2992 test_sets_cb(struct namedobj_instance *ni __unused, struct named_object *no, 2993 void *arg __unused) 2994 { 2995 2996 /* Check that there aren't any tables in not default set */ 2997 if (no->set != 0) 2998 return (EBUSY); 2999 return (0); 3000 } 3001 3002 /* 3003 * Switch between "set 0" and "rule's set" table binding, 3004 * Check all ruleset bindings and permits changing 3005 * IFF each binding has both rule AND table in default set (set 0). 3006 * 3007 * Returns 0 on success. 3008 */ 3009 int 3010 ipfw_switch_tables_namespace(struct ip_fw_chain *ch, unsigned int sets) 3011 { 3012 struct opcode_obj_rewrite *rw; 3013 struct namedobj_instance *ni; 3014 struct named_object *no; 3015 struct ip_fw *rule; 3016 ipfw_insn *cmd; 3017 int cmdlen, i, l; 3018 uint16_t kidx; 3019 uint8_t subtype; 3020 3021 IPFW_UH_WLOCK(ch); 3022 3023 if (V_fw_tables_sets == sets) { 3024 IPFW_UH_WUNLOCK(ch); 3025 return (0); 3026 } 3027 ni = CHAIN_TO_NI(ch); 3028 if (sets == 0) { 3029 /* 3030 * Prevent disabling sets support if we have some tables 3031 * in not default sets. 3032 */ 3033 if (ipfw_objhash_foreach_type(ni, test_sets_cb, 3034 NULL, IPFW_TLV_TBL_NAME) != 0) { 3035 IPFW_UH_WUNLOCK(ch); 3036 return (EBUSY); 3037 } 3038 } 3039 /* 3040 * Scan all rules and examine tables opcodes. 3041 */ 3042 for (i = 0; i < ch->n_rules; i++) { 3043 rule = ch->map[i]; 3044 3045 l = rule->cmd_len; 3046 cmd = rule->cmd; 3047 cmdlen = 0; 3048 for ( ; l > 0 ; l -= cmdlen, cmd += cmdlen) { 3049 cmdlen = F_LEN(cmd); 3050 /* Check only tables opcodes */ 3051 for (kidx = 0, rw = opcodes; 3052 rw < opcodes + nitems(opcodes); rw++) { 3053 if (rw->opcode != cmd->opcode) 3054 continue; 3055 if (rw->classifier(cmd, &kidx, &subtype) == 0) 3056 break; 3057 } 3058 if (kidx == 0) 3059 continue; 3060 no = ipfw_objhash_lookup_kidx(ni, kidx); 3061 /* Check if both table object and rule has the set 0 */ 3062 if (no->set != 0 || rule->set != 0) { 3063 IPFW_UH_WUNLOCK(ch); 3064 return (EBUSY); 3065 } 3066 } 3067 } 3068 V_fw_tables_sets = sets; 3069 IPFW_UH_WUNLOCK(ch); 3070 return (0); 3071 } 3072 3073 /* 3074 * Checks table name for validity. 3075 * Enforce basic length checks, the rest 3076 * should be done in userland. 3077 * 3078 * Returns 0 if name is considered valid. 3079 */ 3080 static int 3081 check_table_name(const char *name) 3082 { 3083 3084 /* 3085 * TODO: do some more complicated checks 3086 */ 3087 return (ipfw_check_object_name_generic(name)); 3088 } 3089 3090 /* 3091 * Finds table config based on either legacy index 3092 * or name in ntlv. 3093 * Note @ti structure contains unchecked data from userland. 3094 * 3095 * Returns 0 in success and fills in @tc with found config 3096 */ 3097 static int 3098 find_table_err(struct namedobj_instance *ni, struct tid_info *ti, 3099 struct table_config **tc) 3100 { 3101 char *name, bname[16]; 3102 struct named_object *no; 3103 ipfw_obj_ntlv *ntlv; 3104 uint32_t set; 3105 3106 if (ti->tlvs != NULL) { 3107 ntlv = ipfw_find_name_tlv_type(ti->tlvs, ti->tlen, ti->uidx, 3108 IPFW_TLV_TBL_NAME); 3109 if (ntlv == NULL) 3110 return (EINVAL); 3111 name = ntlv->name; 3112 3113 /* 3114 * Use set provided by @ti instead of @ntlv one. 3115 * This is needed due to different sets behavior 3116 * controlled by V_fw_tables_sets. 3117 */ 3118 set = (V_fw_tables_sets != 0) ? ti->set : 0; 3119 } else { 3120 snprintf(bname, sizeof(bname), "%d", ti->uidx); 3121 name = bname; 3122 set = 0; 3123 } 3124 3125 no = ipfw_objhash_lookup_name(ni, set, name); 3126 *tc = (struct table_config *)no; 3127 3128 return (0); 3129 } 3130 3131 /* 3132 * Finds table config based on either legacy index 3133 * or name in ntlv. 3134 * Note @ti structure contains unchecked data from userland. 3135 * 3136 * Returns pointer to table_config or NULL. 3137 */ 3138 static struct table_config * 3139 find_table(struct namedobj_instance *ni, struct tid_info *ti) 3140 { 3141 struct table_config *tc; 3142 3143 if (find_table_err(ni, ti, &tc) != 0) 3144 return (NULL); 3145 3146 return (tc); 3147 } 3148 3149 /* 3150 * Allocate new table config structure using 3151 * specified @algo and @aname. 3152 * 3153 * Returns pointer to config or NULL. 3154 */ 3155 static struct table_config * 3156 alloc_table_config(struct ip_fw_chain *ch, struct tid_info *ti, 3157 struct table_algo *ta, char *aname, uint8_t tflags) 3158 { 3159 char *name, bname[16]; 3160 struct table_config *tc; 3161 int error; 3162 ipfw_obj_ntlv *ntlv; 3163 uint32_t set; 3164 3165 if (ti->tlvs != NULL) { 3166 ntlv = ipfw_find_name_tlv_type(ti->tlvs, ti->tlen, ti->uidx, 3167 IPFW_TLV_TBL_NAME); 3168 if (ntlv == NULL) 3169 return (NULL); 3170 name = ntlv->name; 3171 set = (V_fw_tables_sets == 0) ? 0 : ntlv->set; 3172 } else { 3173 /* Compat part: convert number to string representation */ 3174 snprintf(bname, sizeof(bname), "%d", ti->uidx); 3175 name = bname; 3176 set = 0; 3177 } 3178 3179 tc = malloc(sizeof(struct table_config), M_IPFW, M_WAITOK | M_ZERO); 3180 tc->no.name = tc->tablename; 3181 tc->no.subtype = ta->type; 3182 tc->no.set = set; 3183 tc->tflags = tflags; 3184 tc->ta = ta; 3185 strlcpy(tc->tablename, name, sizeof(tc->tablename)); 3186 /* Set "shared" value type by default */ 3187 tc->vshared = 1; 3188 3189 /* Preallocate data structures for new tables */ 3190 error = ta->init(ch, &tc->astate, &tc->ti_copy, aname, tflags); 3191 if (error != 0) { 3192 free(tc, M_IPFW); 3193 return (NULL); 3194 } 3195 3196 return (tc); 3197 } 3198 3199 /* 3200 * Destroys table state and config. 3201 */ 3202 static void 3203 free_table_config(struct namedobj_instance *ni, struct table_config *tc) 3204 { 3205 3206 KASSERT(tc->linked == 0, ("free() on linked config")); 3207 /* UH lock MUST NOT be held */ 3208 3209 /* 3210 * We're using ta without any locking/referencing. 3211 * TODO: fix this if we're going to use unloadable algos. 3212 */ 3213 tc->ta->destroy(tc->astate, &tc->ti_copy); 3214 free(tc, M_IPFW); 3215 } 3216 3217 /* 3218 * Links @tc to @chain table named instance. 3219 * Sets appropriate type/states in @chain table info. 3220 */ 3221 static void 3222 link_table(struct ip_fw_chain *ch, struct table_config *tc) 3223 { 3224 struct namedobj_instance *ni; 3225 struct table_info *ti; 3226 uint16_t kidx; 3227 3228 IPFW_UH_WLOCK_ASSERT(ch); 3229 3230 ni = CHAIN_TO_NI(ch); 3231 kidx = tc->no.kidx; 3232 3233 ipfw_objhash_add(ni, &tc->no); 3234 3235 ti = KIDX_TO_TI(ch, kidx); 3236 *ti = tc->ti_copy; 3237 3238 /* Notify algo on real @ti address */ 3239 if (tc->ta->change_ti != NULL) 3240 tc->ta->change_ti(tc->astate, ti); 3241 3242 tc->linked = 1; 3243 tc->ta->refcnt++; 3244 } 3245 3246 /* 3247 * Unlinks @tc from @chain table named instance. 3248 * Zeroes states in @chain and stores them in @tc. 3249 */ 3250 static void 3251 unlink_table(struct ip_fw_chain *ch, struct table_config *tc) 3252 { 3253 struct namedobj_instance *ni; 3254 struct table_info *ti; 3255 uint16_t kidx; 3256 3257 IPFW_UH_WLOCK_ASSERT(ch); 3258 IPFW_WLOCK_ASSERT(ch); 3259 3260 ni = CHAIN_TO_NI(ch); 3261 kidx = tc->no.kidx; 3262 3263 /* Clear state. @ti copy is already saved inside @tc */ 3264 ipfw_objhash_del(ni, &tc->no); 3265 ti = KIDX_TO_TI(ch, kidx); 3266 memset(ti, 0, sizeof(struct table_info)); 3267 tc->linked = 0; 3268 tc->ta->refcnt--; 3269 3270 /* Notify algo on real @ti address */ 3271 if (tc->ta->change_ti != NULL) 3272 tc->ta->change_ti(tc->astate, NULL); 3273 } 3274 3275 static struct ipfw_sopt_handler scodes[] = { 3276 { IP_FW_TABLE_XCREATE, 0, HDIR_SET, create_table }, 3277 { IP_FW_TABLE_XDESTROY, 0, HDIR_SET, flush_table_v0 }, 3278 { IP_FW_TABLE_XFLUSH, 0, HDIR_SET, flush_table_v0 }, 3279 { IP_FW_TABLE_XMODIFY, 0, HDIR_BOTH, modify_table }, 3280 { IP_FW_TABLE_XINFO, 0, HDIR_GET, describe_table }, 3281 { IP_FW_TABLES_XLIST, 0, HDIR_GET, list_tables }, 3282 { IP_FW_TABLE_XLIST, 0, HDIR_GET, dump_table_v0 }, 3283 { IP_FW_TABLE_XLIST, 1, HDIR_GET, dump_table_v1 }, 3284 { IP_FW_TABLE_XADD, 0, HDIR_BOTH, manage_table_ent_v0 }, 3285 { IP_FW_TABLE_XADD, 1, HDIR_BOTH, manage_table_ent_v1 }, 3286 { IP_FW_TABLE_XDEL, 0, HDIR_BOTH, manage_table_ent_v0 }, 3287 { IP_FW_TABLE_XDEL, 1, HDIR_BOTH, manage_table_ent_v1 }, 3288 { IP_FW_TABLE_XFIND, 0, HDIR_GET, find_table_entry }, 3289 { IP_FW_TABLE_XSWAP, 0, HDIR_SET, swap_table }, 3290 { IP_FW_TABLES_ALIST, 0, HDIR_GET, list_table_algo }, 3291 { IP_FW_TABLE_XGETSIZE, 0, HDIR_GET, get_table_size }, 3292 }; 3293 3294 static int 3295 destroy_table_locked(struct namedobj_instance *ni, struct named_object *no, 3296 void *arg) 3297 { 3298 3299 unlink_table((struct ip_fw_chain *)arg, (struct table_config *)no); 3300 if (ipfw_objhash_free_idx(ni, no->kidx) != 0) 3301 printf("Error unlinking kidx %d from table %s\n", 3302 no->kidx, no->name); 3303 free_table_config(ni, (struct table_config *)no); 3304 return (0); 3305 } 3306 3307 /* 3308 * Shuts tables module down. 3309 */ 3310 void 3311 ipfw_destroy_tables(struct ip_fw_chain *ch, int last) 3312 { 3313 3314 IPFW_DEL_SOPT_HANDLER(last, scodes); 3315 IPFW_DEL_OBJ_REWRITER(last, opcodes); 3316 3317 /* Remove all tables from working set */ 3318 IPFW_UH_WLOCK(ch); 3319 IPFW_WLOCK(ch); 3320 ipfw_objhash_foreach(CHAIN_TO_NI(ch), destroy_table_locked, ch); 3321 IPFW_WUNLOCK(ch); 3322 IPFW_UH_WUNLOCK(ch); 3323 3324 /* Free pointers itself */ 3325 free(ch->tablestate, M_IPFW); 3326 3327 ipfw_table_value_destroy(ch, last); 3328 ipfw_table_algo_destroy(ch); 3329 3330 ipfw_objhash_destroy(CHAIN_TO_NI(ch)); 3331 free(CHAIN_TO_TCFG(ch), M_IPFW); 3332 } 3333 3334 /* 3335 * Starts tables module. 3336 */ 3337 int 3338 ipfw_init_tables(struct ip_fw_chain *ch, int first) 3339 { 3340 struct tables_config *tcfg; 3341 3342 /* Allocate pointers */ 3343 ch->tablestate = malloc(V_fw_tables_max * sizeof(struct table_info), 3344 M_IPFW, M_WAITOK | M_ZERO); 3345 3346 tcfg = malloc(sizeof(struct tables_config), M_IPFW, M_WAITOK | M_ZERO); 3347 tcfg->namehash = ipfw_objhash_create(V_fw_tables_max); 3348 ch->tblcfg = tcfg; 3349 3350 ipfw_table_value_init(ch, first); 3351 ipfw_table_algo_init(ch); 3352 3353 IPFW_ADD_OBJ_REWRITER(first, opcodes); 3354 IPFW_ADD_SOPT_HANDLER(first, scodes); 3355 return (0); 3356 } 3357