1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2004 Ruslan Ermilov and Vsevolod Lobko. 5 * Copyright (c) 2014 Yandex LLC 6 * Copyright (c) 2014 Alexander V. Chernikov 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30 #include <sys/cdefs.h> 31 /* 32 * Lookup table support for ipfw. 33 * 34 * This file contains handlers for all generic tables' operations: 35 * add/del/flush entries, list/dump tables etc.. 36 * 37 * Table data modification is protected by both UH and runtime lock 38 * while reading configuration/data is protected by UH lock. 39 * 40 * Lookup algorithms for all table types are located in ip_fw_table_algo.c 41 */ 42 43 #include "opt_ipfw.h" 44 45 #include <sys/param.h> 46 #include <sys/systm.h> 47 #include <sys/malloc.h> 48 #include <sys/kernel.h> 49 #include <sys/lock.h> 50 #include <sys/rwlock.h> 51 #include <sys/rmlock.h> 52 #include <sys/socket.h> 53 #include <sys/socketvar.h> 54 #include <sys/queue.h> 55 #include <net/if.h> /* ip_fw.h requires IFNAMSIZ */ 56 57 #include <netinet/in.h> 58 #include <netinet/ip_var.h> /* struct ipfw_rule_ref */ 59 #include <netinet/ip_fw.h> 60 61 #include <netpfil/ipfw/ip_fw_private.h> 62 #include <netpfil/ipfw/ip_fw_table.h> 63 64 /* 65 * Table has the following `type` concepts: 66 * 67 * `no.type` represents lookup key type (addr, ifp, uid, etc..) 68 * vmask represents bitmask of table values which are present at the moment. 69 * Special IPFW_VTYPE_LEGACY ( (uint32_t)-1 ) represents old 70 * single-value-for-all approach. 71 */ 72 struct table_config { 73 struct named_object no; 74 uint8_t tflags; /* type flags */ 75 uint8_t locked; /* 1 if locked from changes */ 76 uint8_t linked; /* 1 if already linked */ 77 uint8_t ochanged; /* used by set swapping */ 78 uint8_t vshared; /* 1 if using shared value array */ 79 uint8_t spare[3]; 80 uint32_t count; /* Number of records */ 81 uint32_t limit; /* Max number of records */ 82 uint32_t vmask; /* bitmask with supported values */ 83 uint32_t ocount; /* used by set swapping */ 84 uint64_t gencnt; /* generation count */ 85 char tablename[64]; /* table name */ 86 struct table_algo *ta; /* Callbacks for given algo */ 87 void *astate; /* algorithm state */ 88 struct table_info ti_copy; /* data to put to table_info */ 89 struct namedobj_instance *vi; 90 }; 91 92 static int find_table_err(struct namedobj_instance *ni, struct tid_info *ti, 93 struct table_config **tc); 94 static struct table_config *find_table(struct namedobj_instance *ni, 95 struct tid_info *ti); 96 static struct table_config *alloc_table_config(struct ip_fw_chain *ch, 97 struct tid_info *ti, struct table_algo *ta, char *adata, uint8_t tflags); 98 static void free_table_config(struct namedobj_instance *ni, 99 struct table_config *tc); 100 static int create_table_internal(struct ip_fw_chain *ch, struct tid_info *ti, 101 char *aname, ipfw_xtable_info *i, uint16_t *pkidx, int ref); 102 static void link_table(struct ip_fw_chain *ch, struct table_config *tc); 103 static void unlink_table(struct ip_fw_chain *ch, struct table_config *tc); 104 static int find_ref_table(struct ip_fw_chain *ch, struct tid_info *ti, 105 struct tentry_info *tei, uint32_t count, int op, struct table_config **ptc); 106 #define OP_ADD 1 107 #define OP_DEL 0 108 static int export_tables(struct ip_fw_chain *ch, ipfw_obj_lheader *olh, 109 struct sockopt_data *sd); 110 static void export_table_info(struct ip_fw_chain *ch, struct table_config *tc, 111 ipfw_xtable_info *i); 112 static int dump_table_tentry(void *e, void *arg); 113 static int dump_table_xentry(void *e, void *arg); 114 115 static int swap_tables(struct ip_fw_chain *ch, struct tid_info *a, 116 struct tid_info *b); 117 118 static int check_table_name(const char *name); 119 static int check_table_space(struct ip_fw_chain *ch, struct tableop_state *ts, 120 struct table_config *tc, struct table_info *ti, uint32_t count); 121 static int destroy_table(struct ip_fw_chain *ch, struct tid_info *ti); 122 123 static struct table_algo *find_table_algo(struct tables_config *tableconf, 124 struct tid_info *ti, char *name); 125 126 static void objheader_to_ti(struct _ipfw_obj_header *oh, struct tid_info *ti); 127 static void ntlv_to_ti(struct _ipfw_obj_ntlv *ntlv, struct tid_info *ti); 128 129 #define CHAIN_TO_NI(chain) (CHAIN_TO_TCFG(chain)->namehash) 130 #define KIDX_TO_TI(ch, k) (&(((struct table_info *)(ch)->tablestate)[k])) 131 132 #define TA_BUF_SZ 128 /* On-stack buffer for add/delete state */ 133 134 void 135 rollback_toperation_state(struct ip_fw_chain *ch, void *object) 136 { 137 struct tables_config *tcfg; 138 struct op_state *os; 139 140 tcfg = CHAIN_TO_TCFG(ch); 141 TAILQ_FOREACH(os, &tcfg->state_list, next) 142 os->func(object, os); 143 } 144 145 void 146 add_toperation_state(struct ip_fw_chain *ch, struct tableop_state *ts) 147 { 148 struct tables_config *tcfg; 149 150 tcfg = CHAIN_TO_TCFG(ch); 151 TAILQ_INSERT_HEAD(&tcfg->state_list, &ts->opstate, next); 152 } 153 154 void 155 del_toperation_state(struct ip_fw_chain *ch, struct tableop_state *ts) 156 { 157 struct tables_config *tcfg; 158 159 tcfg = CHAIN_TO_TCFG(ch); 160 TAILQ_REMOVE(&tcfg->state_list, &ts->opstate, next); 161 } 162 163 void 164 tc_ref(struct table_config *tc) 165 { 166 167 tc->no.refcnt++; 168 } 169 170 void 171 tc_unref(struct table_config *tc) 172 { 173 174 tc->no.refcnt--; 175 } 176 177 static struct table_value * 178 get_table_value(struct ip_fw_chain *ch, struct table_config *tc, uint32_t kidx) 179 { 180 struct table_value *pval; 181 182 pval = (struct table_value *)ch->valuestate; 183 184 return (&pval[kidx]); 185 } 186 187 /* 188 * Checks if we're able to insert/update entry @tei into table 189 * w.r.t @tc limits. 190 * May alter @tei to indicate insertion error / insert 191 * options. 192 * 193 * Returns 0 if operation can be performed/ 194 */ 195 static int 196 check_table_limit(struct table_config *tc, struct tentry_info *tei) 197 { 198 199 if (tc->limit == 0 || tc->count < tc->limit) 200 return (0); 201 202 if ((tei->flags & TEI_FLAGS_UPDATE) == 0) { 203 /* Notify userland on error cause */ 204 tei->flags |= TEI_FLAGS_LIMIT; 205 return (EFBIG); 206 } 207 208 /* 209 * We have UPDATE flag set. 210 * Permit updating record (if found), 211 * but restrict adding new one since we've 212 * already hit the limit. 213 */ 214 tei->flags |= TEI_FLAGS_DONTADD; 215 216 return (0); 217 } 218 219 /* 220 * Convert algorithm callback return code into 221 * one of pre-defined states known by userland. 222 */ 223 static void 224 store_tei_result(struct tentry_info *tei, int op, int error, uint32_t num) 225 { 226 int flag; 227 228 flag = 0; 229 230 switch (error) { 231 case 0: 232 if (op == OP_ADD && num != 0) 233 flag = TEI_FLAGS_ADDED; 234 if (op == OP_DEL) 235 flag = TEI_FLAGS_DELETED; 236 break; 237 case ENOENT: 238 flag = TEI_FLAGS_NOTFOUND; 239 break; 240 case EEXIST: 241 flag = TEI_FLAGS_EXISTS; 242 break; 243 default: 244 flag = TEI_FLAGS_ERROR; 245 } 246 247 tei->flags |= flag; 248 } 249 250 /* 251 * Creates and references table with default parameters. 252 * Saves table config, algo and allocated kidx info @ptc, @pta and 253 * @pkidx if non-zero. 254 * Used for table auto-creation to support old binaries. 255 * 256 * Returns 0 on success. 257 */ 258 static int 259 create_table_compat(struct ip_fw_chain *ch, struct tid_info *ti, 260 uint16_t *pkidx) 261 { 262 ipfw_xtable_info xi; 263 int error; 264 265 memset(&xi, 0, sizeof(xi)); 266 /* Set default value mask for legacy clients */ 267 xi.vmask = IPFW_VTYPE_LEGACY; 268 269 error = create_table_internal(ch, ti, NULL, &xi, pkidx, 1); 270 if (error != 0) 271 return (error); 272 273 return (0); 274 } 275 276 /* 277 * Find and reference existing table optionally 278 * creating new one. 279 * 280 * Saves found table config into @ptc. 281 * Note function may drop/acquire UH_WLOCK. 282 * Returns 0 if table was found/created and referenced 283 * or non-zero return code. 284 */ 285 static int 286 find_ref_table(struct ip_fw_chain *ch, struct tid_info *ti, 287 struct tentry_info *tei, uint32_t count, int op, 288 struct table_config **ptc) 289 { 290 struct namedobj_instance *ni; 291 struct table_config *tc; 292 uint16_t kidx; 293 int error; 294 295 IPFW_UH_WLOCK_ASSERT(ch); 296 297 ni = CHAIN_TO_NI(ch); 298 tc = NULL; 299 if ((tc = find_table(ni, ti)) != NULL) { 300 /* check table type */ 301 if (tc->no.subtype != ti->type) 302 return (EINVAL); 303 304 if (tc->locked != 0) 305 return (EACCES); 306 307 /* Try to exit early on limit hit */ 308 if (op == OP_ADD && count == 1 && 309 check_table_limit(tc, tei) != 0) 310 return (EFBIG); 311 312 /* Reference and return */ 313 tc->no.refcnt++; 314 *ptc = tc; 315 return (0); 316 } 317 318 if (op == OP_DEL) 319 return (ESRCH); 320 321 /* Compatibility mode: create new table for old clients */ 322 if ((tei->flags & TEI_FLAGS_COMPAT) == 0) 323 return (ESRCH); 324 325 IPFW_UH_WUNLOCK(ch); 326 error = create_table_compat(ch, ti, &kidx); 327 IPFW_UH_WLOCK(ch); 328 329 if (error != 0) 330 return (error); 331 332 tc = (struct table_config *)ipfw_objhash_lookup_kidx(ni, kidx); 333 KASSERT(tc != NULL, ("create_table_compat returned bad idx %d", kidx)); 334 335 /* OK, now we've got referenced table. */ 336 *ptc = tc; 337 return (0); 338 } 339 340 /* 341 * Rolls back already @added to @tc entries using state array @ta_buf_m. 342 * Assume the following layout: 343 * 1) ADD state (ta_buf_m[0] ... t_buf_m[added - 1]) for handling update cases 344 * 2) DEL state (ta_buf_m[count[ ... t_buf_m[count + added - 1]) 345 * for storing deleted state 346 */ 347 static void 348 rollback_added_entries(struct ip_fw_chain *ch, struct table_config *tc, 349 struct table_info *tinfo, struct tentry_info *tei, caddr_t ta_buf_m, 350 uint32_t count, uint32_t added) 351 { 352 struct table_algo *ta; 353 struct tentry_info *ptei; 354 caddr_t v, vv; 355 size_t ta_buf_sz; 356 int error __diagused, i; 357 uint32_t num; 358 359 IPFW_UH_WLOCK_ASSERT(ch); 360 361 ta = tc->ta; 362 ta_buf_sz = ta->ta_buf_size; 363 v = ta_buf_m; 364 vv = v + count * ta_buf_sz; 365 for (i = 0; i < added; i++, v += ta_buf_sz, vv += ta_buf_sz) { 366 ptei = &tei[i]; 367 if ((ptei->flags & TEI_FLAGS_UPDATED) != 0) { 368 /* 369 * We have old value stored by previous 370 * call in @ptei->value. Do add once again 371 * to restore it. 372 */ 373 error = ta->add(tc->astate, tinfo, ptei, v, &num); 374 KASSERT(error == 0, ("rollback UPDATE fail")); 375 KASSERT(num == 0, ("rollback UPDATE fail2")); 376 continue; 377 } 378 379 error = ta->prepare_del(ch, ptei, vv); 380 KASSERT(error == 0, ("pre-rollback INSERT failed")); 381 error = ta->del(tc->astate, tinfo, ptei, vv, &num); 382 KASSERT(error == 0, ("rollback INSERT failed")); 383 tc->count -= num; 384 } 385 } 386 387 /* 388 * Prepares add/del state for all @count entries in @tei. 389 * Uses either stack buffer (@ta_buf) or allocates a new one. 390 * Stores pointer to allocated buffer back to @ta_buf. 391 * 392 * Returns 0 on success. 393 */ 394 static int 395 prepare_batch_buffer(struct ip_fw_chain *ch, struct table_algo *ta, 396 struct tentry_info *tei, uint32_t count, int op, caddr_t *ta_buf) 397 { 398 caddr_t ta_buf_m, v; 399 size_t ta_buf_sz, sz; 400 struct tentry_info *ptei; 401 int error, i; 402 403 error = 0; 404 ta_buf_sz = ta->ta_buf_size; 405 if (count == 1) { 406 /* Single add/delete, use on-stack buffer */ 407 memset(*ta_buf, 0, TA_BUF_SZ); 408 ta_buf_m = *ta_buf; 409 } else { 410 /* 411 * Multiple adds/deletes, allocate larger buffer 412 * 413 * Note we need 2xcount buffer for add case: 414 * we have hold both ADD state 415 * and DELETE state (this may be needed 416 * if we need to rollback all changes) 417 */ 418 sz = count * ta_buf_sz; 419 ta_buf_m = malloc((op == OP_ADD) ? sz * 2 : sz, M_TEMP, 420 M_WAITOK | M_ZERO); 421 } 422 423 v = ta_buf_m; 424 for (i = 0; i < count; i++, v += ta_buf_sz) { 425 ptei = &tei[i]; 426 error = (op == OP_ADD) ? 427 ta->prepare_add(ch, ptei, v) : ta->prepare_del(ch, ptei, v); 428 429 /* 430 * Some syntax error (incorrect mask, or address, or 431 * anything). Return error regardless of atomicity 432 * settings. 433 */ 434 if (error != 0) 435 break; 436 } 437 438 *ta_buf = ta_buf_m; 439 return (error); 440 } 441 442 /* 443 * Flushes allocated state for each @count entries in @tei. 444 * Frees @ta_buf_m if differs from stack buffer @ta_buf. 445 */ 446 static void 447 flush_batch_buffer(struct ip_fw_chain *ch, struct table_algo *ta, 448 struct tentry_info *tei, uint32_t count, int rollback, 449 caddr_t ta_buf_m, caddr_t ta_buf) 450 { 451 caddr_t v; 452 struct tentry_info *ptei; 453 size_t ta_buf_sz; 454 int i; 455 456 ta_buf_sz = ta->ta_buf_size; 457 458 /* Run cleaning callback anyway */ 459 v = ta_buf_m; 460 for (i = 0; i < count; i++, v += ta_buf_sz) { 461 ptei = &tei[i]; 462 ta->flush_entry(ch, ptei, v); 463 if (ptei->ptv != NULL) { 464 free(ptei->ptv, M_IPFW); 465 ptei->ptv = NULL; 466 } 467 } 468 469 /* Clean up "deleted" state in case of rollback */ 470 if (rollback != 0) { 471 v = ta_buf_m + count * ta_buf_sz; 472 for (i = 0; i < count; i++, v += ta_buf_sz) 473 ta->flush_entry(ch, &tei[i], v); 474 } 475 476 if (ta_buf_m != ta_buf) 477 free(ta_buf_m, M_TEMP); 478 } 479 480 static void 481 rollback_add_entry(void *object, struct op_state *_state) 482 { 483 struct ip_fw_chain *ch __diagused; 484 struct tableop_state *ts; 485 486 ts = (struct tableop_state *)_state; 487 488 if (ts->tc != object && ts->ch != object) 489 return; 490 491 ch = ts->ch; 492 493 IPFW_UH_WLOCK_ASSERT(ch); 494 495 /* Call specifid unlockers */ 496 rollback_table_values(ts); 497 498 /* Indicate we've called */ 499 ts->modified = 1; 500 } 501 502 /* 503 * Adds/updates one or more entries in table @ti. 504 * 505 * Function may drop/reacquire UH wlock multiple times due to 506 * items alloc, algorithm callbacks (check_space), value linkage 507 * (new values, value storage realloc), etc.. 508 * Other processes like other adds (which may involve storage resize), 509 * table swaps (which changes table data and may change algo type), 510 * table modify (which may change value mask) may be executed 511 * simultaneously so we need to deal with it. 512 * 513 * The following approach was implemented: 514 * we have per-chain linked list, protected with UH lock. 515 * add_table_entry prepares special on-stack structure wthich is passed 516 * to its descendants. Users add this structure to this list before unlock. 517 * After performing needed operations and acquiring UH lock back, each user 518 * checks if structure has changed. If true, it rolls local state back and 519 * returns without error to the caller. 520 * add_table_entry() on its own checks if structure has changed and restarts 521 * its operation from the beginning (goto restart). 522 * 523 * Functions which are modifying fields of interest (currently 524 * resize_shared_value_storage() and swap_tables() ) 525 * traverses given list while holding UH lock immediately before 526 * performing their operations calling function provided be list entry 527 * ( currently rollback_add_entry ) which performs rollback for all necessary 528 * state and sets appropriate values in structure indicating rollback 529 * has happened. 530 * 531 * Algo interaction: 532 * Function references @ti first to ensure table won't 533 * disappear or change its type. 534 * After that, prepare_add callback is called for each @tei entry. 535 * Next, we try to add each entry under UH+WHLOCK 536 * using add() callback. 537 * Finally, we free all state by calling flush_entry callback 538 * for each @tei. 539 * 540 * Returns 0 on success. 541 */ 542 int 543 add_table_entry(struct ip_fw_chain *ch, struct tid_info *ti, 544 struct tentry_info *tei, uint8_t flags, uint32_t count) 545 { 546 struct table_config *tc; 547 struct table_algo *ta; 548 uint16_t kidx; 549 int error, first_error, i, rollback; 550 uint32_t num, numadd; 551 struct tentry_info *ptei; 552 struct tableop_state ts; 553 char ta_buf[TA_BUF_SZ]; 554 caddr_t ta_buf_m, v; 555 556 memset(&ts, 0, sizeof(ts)); 557 ta = NULL; 558 IPFW_UH_WLOCK(ch); 559 560 /* 561 * Find and reference existing table. 562 */ 563 restart: 564 if (ts.modified != 0) { 565 IPFW_UH_WUNLOCK(ch); 566 flush_batch_buffer(ch, ta, tei, count, rollback, 567 ta_buf_m, ta_buf); 568 memset(&ts, 0, sizeof(ts)); 569 ta = NULL; 570 IPFW_UH_WLOCK(ch); 571 } 572 573 error = find_ref_table(ch, ti, tei, count, OP_ADD, &tc); 574 if (error != 0) { 575 IPFW_UH_WUNLOCK(ch); 576 return (error); 577 } 578 ta = tc->ta; 579 580 /* Fill in tablestate */ 581 ts.ch = ch; 582 ts.opstate.func = rollback_add_entry; 583 ts.tc = tc; 584 ts.vshared = tc->vshared; 585 ts.vmask = tc->vmask; 586 ts.ta = ta; 587 ts.tei = tei; 588 ts.count = count; 589 rollback = 0; 590 add_toperation_state(ch, &ts); 591 IPFW_UH_WUNLOCK(ch); 592 593 /* Allocate memory and prepare record(s) */ 594 /* Pass stack buffer by default */ 595 ta_buf_m = ta_buf; 596 error = prepare_batch_buffer(ch, ta, tei, count, OP_ADD, &ta_buf_m); 597 598 IPFW_UH_WLOCK(ch); 599 del_toperation_state(ch, &ts); 600 /* Drop reference we've used in first search */ 601 tc->no.refcnt--; 602 603 /* Check prepare_batch_buffer() error */ 604 if (error != 0) 605 goto cleanup; 606 607 /* 608 * Check if table swap has happened. 609 * (so table algo might be changed). 610 * Restart operation to achieve consistent behavior. 611 */ 612 if (ts.modified != 0) 613 goto restart; 614 615 /* 616 * Link all values values to shared/per-table value array. 617 * 618 * May release/reacquire UH_WLOCK. 619 */ 620 error = ipfw_link_table_values(ch, &ts, flags); 621 if (error != 0) 622 goto cleanup; 623 if (ts.modified != 0) 624 goto restart; 625 626 /* 627 * Ensure we are able to add all entries without additional 628 * memory allocations. May release/reacquire UH_WLOCK. 629 */ 630 kidx = tc->no.kidx; 631 error = check_table_space(ch, &ts, tc, KIDX_TO_TI(ch, kidx), count); 632 if (error != 0) 633 goto cleanup; 634 if (ts.modified != 0) 635 goto restart; 636 637 /* We've got valid table in @tc. Let's try to add data */ 638 kidx = tc->no.kidx; 639 ta = tc->ta; 640 numadd = 0; 641 first_error = 0; 642 643 IPFW_WLOCK(ch); 644 645 v = ta_buf_m; 646 for (i = 0; i < count; i++, v += ta->ta_buf_size) { 647 ptei = &tei[i]; 648 num = 0; 649 /* check limit before adding */ 650 if ((error = check_table_limit(tc, ptei)) == 0) { 651 /* 652 * It should be safe to insert a record w/o 653 * a properly-linked value if atomicity is 654 * not required. 655 * 656 * If the added item does not have a valid value 657 * index, it would get rejected by ta->add(). 658 * */ 659 error = ta->add(tc->astate, KIDX_TO_TI(ch, kidx), 660 ptei, v, &num); 661 /* Set status flag to inform userland */ 662 store_tei_result(ptei, OP_ADD, error, num); 663 } 664 if (error == 0) { 665 /* Update number of records to ease limit checking */ 666 tc->count += num; 667 numadd += num; 668 continue; 669 } 670 671 if (first_error == 0) 672 first_error = error; 673 674 /* 675 * Some error have happened. Check our atomicity 676 * settings: continue if atomicity is not required, 677 * rollback changes otherwise. 678 */ 679 if ((flags & IPFW_CTF_ATOMIC) == 0) 680 continue; 681 682 rollback_added_entries(ch, tc, KIDX_TO_TI(ch, kidx), 683 tei, ta_buf_m, count, i); 684 685 rollback = 1; 686 break; 687 } 688 689 IPFW_WUNLOCK(ch); 690 691 ipfw_garbage_table_values(ch, tc, tei, count, rollback); 692 693 /* Permit post-add algorithm grow/rehash. */ 694 if (numadd != 0) 695 check_table_space(ch, NULL, tc, KIDX_TO_TI(ch, kidx), 0); 696 697 /* Return first error to user, if any */ 698 error = first_error; 699 700 cleanup: 701 IPFW_UH_WUNLOCK(ch); 702 703 flush_batch_buffer(ch, ta, tei, count, rollback, ta_buf_m, ta_buf); 704 705 return (error); 706 } 707 708 /* 709 * Deletes one or more entries in table @ti. 710 * 711 * Returns 0 on success. 712 */ 713 int 714 del_table_entry(struct ip_fw_chain *ch, struct tid_info *ti, 715 struct tentry_info *tei, uint8_t flags, uint32_t count) 716 { 717 struct table_config *tc; 718 struct table_algo *ta; 719 struct tentry_info *ptei; 720 uint16_t kidx; 721 int error, first_error, i; 722 uint32_t num, numdel; 723 char ta_buf[TA_BUF_SZ]; 724 caddr_t ta_buf_m, v; 725 726 /* 727 * Find and reference existing table. 728 */ 729 IPFW_UH_WLOCK(ch); 730 error = find_ref_table(ch, ti, tei, count, OP_DEL, &tc); 731 if (error != 0) { 732 IPFW_UH_WUNLOCK(ch); 733 return (error); 734 } 735 ta = tc->ta; 736 IPFW_UH_WUNLOCK(ch); 737 738 /* Allocate memory and prepare record(s) */ 739 /* Pass stack buffer by default */ 740 ta_buf_m = ta_buf; 741 error = prepare_batch_buffer(ch, ta, tei, count, OP_DEL, &ta_buf_m); 742 if (error != 0) 743 goto cleanup; 744 745 IPFW_UH_WLOCK(ch); 746 747 /* Drop reference we've used in first search */ 748 tc->no.refcnt--; 749 750 /* 751 * Check if table algo is still the same. 752 * (changed ta may be the result of table swap). 753 */ 754 if (ta != tc->ta) { 755 IPFW_UH_WUNLOCK(ch); 756 error = EINVAL; 757 goto cleanup; 758 } 759 760 kidx = tc->no.kidx; 761 numdel = 0; 762 first_error = 0; 763 764 IPFW_WLOCK(ch); 765 v = ta_buf_m; 766 for (i = 0; i < count; i++, v += ta->ta_buf_size) { 767 ptei = &tei[i]; 768 num = 0; 769 error = ta->del(tc->astate, KIDX_TO_TI(ch, kidx), ptei, v, 770 &num); 771 /* Save state for userland */ 772 store_tei_result(ptei, OP_DEL, error, num); 773 if (error != 0 && first_error == 0) 774 first_error = error; 775 tc->count -= num; 776 numdel += num; 777 } 778 IPFW_WUNLOCK(ch); 779 780 /* Unlink non-used values */ 781 ipfw_garbage_table_values(ch, tc, tei, count, 0); 782 783 if (numdel != 0) { 784 /* Run post-del hook to permit shrinking */ 785 check_table_space(ch, NULL, tc, KIDX_TO_TI(ch, kidx), 0); 786 } 787 788 IPFW_UH_WUNLOCK(ch); 789 790 /* Return first error to user, if any */ 791 error = first_error; 792 793 cleanup: 794 flush_batch_buffer(ch, ta, tei, count, 0, ta_buf_m, ta_buf); 795 796 return (error); 797 } 798 799 /* 800 * Ensure that table @tc has enough space to add @count entries without 801 * need for reallocation. 802 * 803 * Callbacks order: 804 * 0) need_modify() (UH_WLOCK) - checks if @count items can be added w/o resize. 805 * 806 * 1) alloc_modify (no locks, M_WAITOK) - alloc new state based on @pflags. 807 * 2) prepare_modifyt (UH_WLOCK) - copy old data into new storage 808 * 3) modify (UH_WLOCK + WLOCK) - switch pointers 809 * 4) flush_modify (UH_WLOCK) - free state, if needed 810 * 811 * Returns 0 on success. 812 */ 813 static int 814 check_table_space(struct ip_fw_chain *ch, struct tableop_state *ts, 815 struct table_config *tc, struct table_info *ti, uint32_t count) 816 { 817 struct table_algo *ta; 818 uint64_t pflags; 819 char ta_buf[TA_BUF_SZ]; 820 int error; 821 822 IPFW_UH_WLOCK_ASSERT(ch); 823 824 error = 0; 825 ta = tc->ta; 826 if (ta->need_modify == NULL) 827 return (0); 828 829 /* Acquire reference not to loose @tc between locks/unlocks */ 830 tc->no.refcnt++; 831 832 /* 833 * TODO: think about avoiding race between large add/large delete 834 * operation on algorithm which implements shrinking along with 835 * growing. 836 */ 837 while (true) { 838 pflags = 0; 839 if (ta->need_modify(tc->astate, ti, count, &pflags) == 0) { 840 error = 0; 841 break; 842 } 843 844 /* We have to shrink/grow table */ 845 if (ts != NULL) 846 add_toperation_state(ch, ts); 847 IPFW_UH_WUNLOCK(ch); 848 849 memset(&ta_buf, 0, sizeof(ta_buf)); 850 error = ta->prepare_mod(ta_buf, &pflags); 851 852 IPFW_UH_WLOCK(ch); 853 if (ts != NULL) 854 del_toperation_state(ch, ts); 855 856 if (error != 0) 857 break; 858 859 if (ts != NULL && ts->modified != 0) { 860 /* 861 * Swap operation has happened 862 * so we're currently operating on other 863 * table data. Stop doing this. 864 */ 865 ta->flush_mod(ta_buf); 866 break; 867 } 868 869 /* Check if we still need to alter table */ 870 ti = KIDX_TO_TI(ch, tc->no.kidx); 871 if (ta->need_modify(tc->astate, ti, count, &pflags) == 0) { 872 IPFW_UH_WUNLOCK(ch); 873 874 /* 875 * Other thread has already performed resize. 876 * Flush our state and return. 877 */ 878 ta->flush_mod(ta_buf); 879 break; 880 } 881 882 error = ta->fill_mod(tc->astate, ti, ta_buf, &pflags); 883 if (error == 0) { 884 /* Do actual modification */ 885 IPFW_WLOCK(ch); 886 ta->modify(tc->astate, ti, ta_buf, pflags); 887 IPFW_WUNLOCK(ch); 888 } 889 890 /* Anyway, flush data and retry */ 891 ta->flush_mod(ta_buf); 892 } 893 894 tc->no.refcnt--; 895 return (error); 896 } 897 898 /* 899 * Adds or deletes record in table. 900 * Data layout (v0): 901 * Request: [ ip_fw3_opheader ipfw_table_xentry ] 902 * 903 * Returns 0 on success 904 */ 905 static int 906 manage_table_ent_v0(struct ip_fw_chain *ch, ip_fw3_opheader *op3, 907 struct sockopt_data *sd) 908 { 909 ipfw_table_xentry *xent; 910 struct tentry_info tei; 911 struct tid_info ti; 912 struct table_value v; 913 int error, hdrlen, read; 914 915 hdrlen = offsetof(ipfw_table_xentry, k); 916 917 /* Check minimum header size */ 918 if (sd->valsize < (sizeof(*op3) + hdrlen)) 919 return (EINVAL); 920 921 read = sizeof(ip_fw3_opheader); 922 923 /* Check if xentry len field is valid */ 924 xent = (ipfw_table_xentry *)(op3 + 1); 925 if (xent->len < hdrlen || xent->len + read > sd->valsize) 926 return (EINVAL); 927 928 memset(&tei, 0, sizeof(tei)); 929 tei.paddr = &xent->k; 930 tei.masklen = xent->masklen; 931 ipfw_import_table_value_legacy(xent->value, &v); 932 tei.pvalue = &v; 933 /* Old requests compatibility */ 934 tei.flags = TEI_FLAGS_COMPAT; 935 if (xent->type == IPFW_TABLE_ADDR) { 936 if (xent->len - hdrlen == sizeof(in_addr_t)) 937 tei.subtype = AF_INET; 938 else 939 tei.subtype = AF_INET6; 940 } 941 942 memset(&ti, 0, sizeof(ti)); 943 ti.uidx = xent->tbl; 944 ti.type = xent->type; 945 946 error = (op3->opcode == IP_FW_TABLE_XADD) ? 947 add_table_entry(ch, &ti, &tei, 0, 1) : 948 del_table_entry(ch, &ti, &tei, 0, 1); 949 950 return (error); 951 } 952 953 /* 954 * Adds or deletes record in table. 955 * Data layout (v1)(current): 956 * Request: [ ipfw_obj_header 957 * ipfw_obj_ctlv(IPFW_TLV_TBLENT_LIST) [ ipfw_obj_tentry x N ] 958 * ] 959 * 960 * Returns 0 on success 961 */ 962 static int 963 manage_table_ent_v1(struct ip_fw_chain *ch, ip_fw3_opheader *op3, 964 struct sockopt_data *sd) 965 { 966 ipfw_obj_tentry *tent, *ptent; 967 ipfw_obj_ctlv *ctlv; 968 ipfw_obj_header *oh; 969 struct tentry_info *ptei, tei, *tei_buf; 970 struct tid_info ti; 971 int error, i, kidx, read; 972 973 /* Check minimum header size */ 974 if (sd->valsize < (sizeof(*oh) + sizeof(*ctlv))) 975 return (EINVAL); 976 977 /* Check if passed data is too long */ 978 if (sd->valsize != sd->kavail) 979 return (EINVAL); 980 981 oh = (ipfw_obj_header *)sd->kbuf; 982 983 /* Basic length checks for TLVs */ 984 if (oh->ntlv.head.length != sizeof(oh->ntlv)) 985 return (EINVAL); 986 987 read = sizeof(*oh); 988 989 ctlv = (ipfw_obj_ctlv *)(oh + 1); 990 if (ctlv->head.length + read != sd->valsize) 991 return (EINVAL); 992 993 read += sizeof(*ctlv); 994 tent = (ipfw_obj_tentry *)(ctlv + 1); 995 if (ctlv->count * sizeof(*tent) + read != sd->valsize) 996 return (EINVAL); 997 998 if (ctlv->count == 0) 999 return (0); 1000 1001 /* 1002 * Mark entire buffer as "read". 1003 * This instructs sopt api write it back 1004 * after function return. 1005 */ 1006 ipfw_get_sopt_header(sd, sd->valsize); 1007 1008 /* Perform basic checks for each entry */ 1009 ptent = tent; 1010 kidx = tent->idx; 1011 for (i = 0; i < ctlv->count; i++, ptent++) { 1012 if (ptent->head.length != sizeof(*ptent)) 1013 return (EINVAL); 1014 if (ptent->idx != kidx) 1015 return (ENOTSUP); 1016 } 1017 1018 /* Convert data into kernel request objects */ 1019 objheader_to_ti(oh, &ti); 1020 ti.type = oh->ntlv.type; 1021 ti.uidx = kidx; 1022 1023 /* Use on-stack buffer for single add/del */ 1024 if (ctlv->count == 1) { 1025 memset(&tei, 0, sizeof(tei)); 1026 tei_buf = &tei; 1027 } else 1028 tei_buf = malloc(ctlv->count * sizeof(tei), M_TEMP, 1029 M_WAITOK | M_ZERO); 1030 1031 ptei = tei_buf; 1032 ptent = tent; 1033 for (i = 0; i < ctlv->count; i++, ptent++, ptei++) { 1034 ptei->paddr = &ptent->k; 1035 ptei->subtype = ptent->subtype; 1036 ptei->masklen = ptent->masklen; 1037 if (ptent->head.flags & IPFW_TF_UPDATE) 1038 ptei->flags |= TEI_FLAGS_UPDATE; 1039 1040 ipfw_import_table_value_v1(&ptent->v.value); 1041 ptei->pvalue = (struct table_value *)&ptent->v.value; 1042 } 1043 1044 error = (oh->opheader.opcode == IP_FW_TABLE_XADD) ? 1045 add_table_entry(ch, &ti, tei_buf, ctlv->flags, ctlv->count) : 1046 del_table_entry(ch, &ti, tei_buf, ctlv->flags, ctlv->count); 1047 1048 /* Translate result back to userland */ 1049 ptei = tei_buf; 1050 ptent = tent; 1051 for (i = 0; i < ctlv->count; i++, ptent++, ptei++) { 1052 if (ptei->flags & TEI_FLAGS_ADDED) 1053 ptent->result = IPFW_TR_ADDED; 1054 else if (ptei->flags & TEI_FLAGS_DELETED) 1055 ptent->result = IPFW_TR_DELETED; 1056 else if (ptei->flags & TEI_FLAGS_UPDATED) 1057 ptent->result = IPFW_TR_UPDATED; 1058 else if (ptei->flags & TEI_FLAGS_LIMIT) 1059 ptent->result = IPFW_TR_LIMIT; 1060 else if (ptei->flags & TEI_FLAGS_ERROR) 1061 ptent->result = IPFW_TR_ERROR; 1062 else if (ptei->flags & TEI_FLAGS_NOTFOUND) 1063 ptent->result = IPFW_TR_NOTFOUND; 1064 else if (ptei->flags & TEI_FLAGS_EXISTS) 1065 ptent->result = IPFW_TR_EXISTS; 1066 ipfw_export_table_value_v1(ptei->pvalue, &ptent->v.value); 1067 } 1068 1069 if (tei_buf != &tei) 1070 free(tei_buf, M_TEMP); 1071 1072 return (error); 1073 } 1074 1075 /* 1076 * Looks up an entry in given table. 1077 * Data layout (v0)(current): 1078 * Request: [ ipfw_obj_header ipfw_obj_tentry ] 1079 * Reply: [ ipfw_obj_header ipfw_obj_tentry ] 1080 * 1081 * Returns 0 on success 1082 */ 1083 static int 1084 find_table_entry(struct ip_fw_chain *ch, ip_fw3_opheader *op3, 1085 struct sockopt_data *sd) 1086 { 1087 ipfw_obj_tentry *tent; 1088 ipfw_obj_header *oh; 1089 struct tid_info ti; 1090 struct table_config *tc; 1091 struct table_algo *ta; 1092 struct table_info *kti; 1093 struct table_value *pval; 1094 struct namedobj_instance *ni; 1095 int error; 1096 size_t sz; 1097 1098 /* Check minimum header size */ 1099 sz = sizeof(*oh) + sizeof(*tent); 1100 if (sd->valsize != sz) 1101 return (EINVAL); 1102 1103 oh = (struct _ipfw_obj_header *)ipfw_get_sopt_header(sd, sz); 1104 tent = (ipfw_obj_tentry *)(oh + 1); 1105 1106 /* Basic length checks for TLVs */ 1107 if (oh->ntlv.head.length != sizeof(oh->ntlv)) 1108 return (EINVAL); 1109 1110 objheader_to_ti(oh, &ti); 1111 ti.type = oh->ntlv.type; 1112 ti.uidx = tent->idx; 1113 1114 IPFW_UH_RLOCK(ch); 1115 ni = CHAIN_TO_NI(ch); 1116 1117 /* 1118 * Find existing table and check its type . 1119 */ 1120 ta = NULL; 1121 if ((tc = find_table(ni, &ti)) == NULL) { 1122 IPFW_UH_RUNLOCK(ch); 1123 return (ESRCH); 1124 } 1125 1126 /* check table type */ 1127 if (tc->no.subtype != ti.type) { 1128 IPFW_UH_RUNLOCK(ch); 1129 return (EINVAL); 1130 } 1131 1132 kti = KIDX_TO_TI(ch, tc->no.kidx); 1133 ta = tc->ta; 1134 1135 if (ta->find_tentry == NULL) 1136 return (ENOTSUP); 1137 1138 error = ta->find_tentry(tc->astate, kti, tent); 1139 if (error == 0) { 1140 pval = get_table_value(ch, tc, tent->v.kidx); 1141 ipfw_export_table_value_v1(pval, &tent->v.value); 1142 } 1143 IPFW_UH_RUNLOCK(ch); 1144 1145 return (error); 1146 } 1147 1148 /* 1149 * Flushes all entries or destroys given table. 1150 * Data layout (v0)(current): 1151 * Request: [ ipfw_obj_header ] 1152 * 1153 * Returns 0 on success 1154 */ 1155 static int 1156 flush_table_v0(struct ip_fw_chain *ch, ip_fw3_opheader *op3, 1157 struct sockopt_data *sd) 1158 { 1159 int error; 1160 struct _ipfw_obj_header *oh; 1161 struct tid_info ti; 1162 1163 if (sd->valsize != sizeof(*oh)) 1164 return (EINVAL); 1165 1166 oh = (struct _ipfw_obj_header *)op3; 1167 objheader_to_ti(oh, &ti); 1168 1169 if (op3->opcode == IP_FW_TABLE_XDESTROY) 1170 error = destroy_table(ch, &ti); 1171 else if (op3->opcode == IP_FW_TABLE_XFLUSH) 1172 error = flush_table(ch, &ti); 1173 else 1174 return (ENOTSUP); 1175 1176 return (error); 1177 } 1178 1179 static void 1180 restart_flush(void *object, struct op_state *_state) 1181 { 1182 struct tableop_state *ts; 1183 1184 ts = (struct tableop_state *)_state; 1185 1186 if (ts->tc != object) 1187 return; 1188 1189 /* Indicate we've called */ 1190 ts->modified = 1; 1191 } 1192 1193 /* 1194 * Flushes given table. 1195 * 1196 * Function create new table instance with the same 1197 * parameters, swaps it with old one and 1198 * flushes state without holding runtime WLOCK. 1199 * 1200 * Returns 0 on success. 1201 */ 1202 int 1203 flush_table(struct ip_fw_chain *ch, struct tid_info *ti) 1204 { 1205 struct namedobj_instance *ni; 1206 struct table_config *tc; 1207 struct table_algo *ta; 1208 struct table_info ti_old, ti_new, *tablestate; 1209 void *astate_old, *astate_new; 1210 char algostate[64], *pstate; 1211 struct tableop_state ts; 1212 int error, need_gc; 1213 uint16_t kidx; 1214 uint8_t tflags; 1215 1216 /* 1217 * Stage 1: save table algorithm. 1218 * Reference found table to ensure it won't disappear. 1219 */ 1220 IPFW_UH_WLOCK(ch); 1221 ni = CHAIN_TO_NI(ch); 1222 if ((tc = find_table(ni, ti)) == NULL) { 1223 IPFW_UH_WUNLOCK(ch); 1224 return (ESRCH); 1225 } 1226 need_gc = 0; 1227 astate_new = NULL; 1228 memset(&ti_new, 0, sizeof(ti_new)); 1229 restart: 1230 /* Set up swap handler */ 1231 memset(&ts, 0, sizeof(ts)); 1232 ts.opstate.func = restart_flush; 1233 ts.tc = tc; 1234 1235 ta = tc->ta; 1236 /* Do not flush readonly tables */ 1237 if ((ta->flags & TA_FLAG_READONLY) != 0) { 1238 IPFW_UH_WUNLOCK(ch); 1239 return (EACCES); 1240 } 1241 /* Save startup algo parameters */ 1242 if (ta->print_config != NULL) { 1243 ta->print_config(tc->astate, KIDX_TO_TI(ch, tc->no.kidx), 1244 algostate, sizeof(algostate)); 1245 pstate = algostate; 1246 } else 1247 pstate = NULL; 1248 tflags = tc->tflags; 1249 tc->no.refcnt++; 1250 add_toperation_state(ch, &ts); 1251 IPFW_UH_WUNLOCK(ch); 1252 1253 /* 1254 * Stage 1.5: if this is not the first attempt, destroy previous state 1255 */ 1256 if (need_gc != 0) { 1257 ta->destroy(astate_new, &ti_new); 1258 need_gc = 0; 1259 } 1260 1261 /* 1262 * Stage 2: allocate new table instance using same algo. 1263 */ 1264 memset(&ti_new, 0, sizeof(struct table_info)); 1265 error = ta->init(ch, &astate_new, &ti_new, pstate, tflags); 1266 1267 /* 1268 * Stage 3: swap old state pointers with newly-allocated ones. 1269 * Decrease refcount. 1270 */ 1271 IPFW_UH_WLOCK(ch); 1272 tc->no.refcnt--; 1273 del_toperation_state(ch, &ts); 1274 1275 if (error != 0) { 1276 IPFW_UH_WUNLOCK(ch); 1277 return (error); 1278 } 1279 1280 /* 1281 * Restart operation if table swap has happened: 1282 * even if algo may be the same, algo init parameters 1283 * may change. Restart operation instead of doing 1284 * complex checks. 1285 */ 1286 if (ts.modified != 0) { 1287 /* Delay destroying data since we're holding UH lock */ 1288 need_gc = 1; 1289 goto restart; 1290 } 1291 1292 ni = CHAIN_TO_NI(ch); 1293 kidx = tc->no.kidx; 1294 tablestate = (struct table_info *)ch->tablestate; 1295 1296 IPFW_WLOCK(ch); 1297 ti_old = tablestate[kidx]; 1298 tablestate[kidx] = ti_new; 1299 IPFW_WUNLOCK(ch); 1300 1301 astate_old = tc->astate; 1302 tc->astate = astate_new; 1303 tc->ti_copy = ti_new; 1304 tc->count = 0; 1305 1306 /* Notify algo on real @ti address */ 1307 if (ta->change_ti != NULL) 1308 ta->change_ti(tc->astate, &tablestate[kidx]); 1309 1310 /* 1311 * Stage 4: unref values. 1312 */ 1313 ipfw_unref_table_values(ch, tc, ta, astate_old, &ti_old); 1314 IPFW_UH_WUNLOCK(ch); 1315 1316 /* 1317 * Stage 5: perform real flush/destroy. 1318 */ 1319 ta->destroy(astate_old, &ti_old); 1320 1321 return (0); 1322 } 1323 1324 /* 1325 * Swaps two tables. 1326 * Data layout (v0)(current): 1327 * Request: [ ipfw_obj_header ipfw_obj_ntlv ] 1328 * 1329 * Returns 0 on success 1330 */ 1331 static int 1332 swap_table(struct ip_fw_chain *ch, ip_fw3_opheader *op3, 1333 struct sockopt_data *sd) 1334 { 1335 int error; 1336 struct _ipfw_obj_header *oh; 1337 struct tid_info ti_a, ti_b; 1338 1339 if (sd->valsize != sizeof(*oh) + sizeof(ipfw_obj_ntlv)) 1340 return (EINVAL); 1341 1342 oh = (struct _ipfw_obj_header *)op3; 1343 ntlv_to_ti(&oh->ntlv, &ti_a); 1344 ntlv_to_ti((ipfw_obj_ntlv *)(oh + 1), &ti_b); 1345 1346 error = swap_tables(ch, &ti_a, &ti_b); 1347 1348 return (error); 1349 } 1350 1351 /* 1352 * Swaps two tables of the same type/valtype. 1353 * 1354 * Checks if tables are compatible and limits 1355 * permits swap, than actually perform swap. 1356 * 1357 * Each table consists of 2 different parts: 1358 * config: 1359 * @tc (with name, set, kidx) and rule bindings, which is "stable". 1360 * number of items 1361 * table algo 1362 * runtime: 1363 * runtime data @ti (ch->tablestate) 1364 * runtime cache in @tc 1365 * algo-specific data (@tc->astate) 1366 * 1367 * So we switch: 1368 * all runtime data 1369 * number of items 1370 * table algo 1371 * 1372 * After that we call @ti change handler for each table. 1373 * 1374 * Note that referencing @tc won't protect tc->ta from change. 1375 * XXX: Do we need to restrict swap between locked tables? 1376 * XXX: Do we need to exchange ftype? 1377 * 1378 * Returns 0 on success. 1379 */ 1380 static int 1381 swap_tables(struct ip_fw_chain *ch, struct tid_info *a, 1382 struct tid_info *b) 1383 { 1384 struct namedobj_instance *ni; 1385 struct table_config *tc_a, *tc_b; 1386 struct table_algo *ta; 1387 struct table_info ti, *tablestate; 1388 void *astate; 1389 uint32_t count; 1390 1391 /* 1392 * Stage 1: find both tables and ensure they are of 1393 * the same type. 1394 */ 1395 IPFW_UH_WLOCK(ch); 1396 ni = CHAIN_TO_NI(ch); 1397 if ((tc_a = find_table(ni, a)) == NULL) { 1398 IPFW_UH_WUNLOCK(ch); 1399 return (ESRCH); 1400 } 1401 if ((tc_b = find_table(ni, b)) == NULL) { 1402 IPFW_UH_WUNLOCK(ch); 1403 return (ESRCH); 1404 } 1405 1406 /* It is very easy to swap between the same table */ 1407 if (tc_a == tc_b) { 1408 IPFW_UH_WUNLOCK(ch); 1409 return (0); 1410 } 1411 1412 /* Check type and value are the same */ 1413 if (tc_a->no.subtype!=tc_b->no.subtype || tc_a->tflags!=tc_b->tflags) { 1414 IPFW_UH_WUNLOCK(ch); 1415 return (EINVAL); 1416 } 1417 1418 /* Check limits before swap */ 1419 if ((tc_a->limit != 0 && tc_b->count > tc_a->limit) || 1420 (tc_b->limit != 0 && tc_a->count > tc_b->limit)) { 1421 IPFW_UH_WUNLOCK(ch); 1422 return (EFBIG); 1423 } 1424 1425 /* Check if one of the tables is readonly */ 1426 if (((tc_a->ta->flags | tc_b->ta->flags) & TA_FLAG_READONLY) != 0) { 1427 IPFW_UH_WUNLOCK(ch); 1428 return (EACCES); 1429 } 1430 1431 /* Notify we're going to swap */ 1432 rollback_toperation_state(ch, tc_a); 1433 rollback_toperation_state(ch, tc_b); 1434 1435 /* Everything is fine, prepare to swap */ 1436 tablestate = (struct table_info *)ch->tablestate; 1437 ti = tablestate[tc_a->no.kidx]; 1438 ta = tc_a->ta; 1439 astate = tc_a->astate; 1440 count = tc_a->count; 1441 1442 IPFW_WLOCK(ch); 1443 /* a <- b */ 1444 tablestate[tc_a->no.kidx] = tablestate[tc_b->no.kidx]; 1445 tc_a->ta = tc_b->ta; 1446 tc_a->astate = tc_b->astate; 1447 tc_a->count = tc_b->count; 1448 /* b <- a */ 1449 tablestate[tc_b->no.kidx] = ti; 1450 tc_b->ta = ta; 1451 tc_b->astate = astate; 1452 tc_b->count = count; 1453 IPFW_WUNLOCK(ch); 1454 1455 /* Ensure tc.ti copies are in sync */ 1456 tc_a->ti_copy = tablestate[tc_a->no.kidx]; 1457 tc_b->ti_copy = tablestate[tc_b->no.kidx]; 1458 1459 /* Notify both tables on @ti change */ 1460 if (tc_a->ta->change_ti != NULL) 1461 tc_a->ta->change_ti(tc_a->astate, &tablestate[tc_a->no.kidx]); 1462 if (tc_b->ta->change_ti != NULL) 1463 tc_b->ta->change_ti(tc_b->astate, &tablestate[tc_b->no.kidx]); 1464 1465 IPFW_UH_WUNLOCK(ch); 1466 1467 return (0); 1468 } 1469 1470 /* 1471 * Destroys table specified by @ti. 1472 * Data layout (v0)(current): 1473 * Request: [ ip_fw3_opheader ] 1474 * 1475 * Returns 0 on success 1476 */ 1477 static int 1478 destroy_table(struct ip_fw_chain *ch, struct tid_info *ti) 1479 { 1480 struct namedobj_instance *ni; 1481 struct table_config *tc; 1482 1483 IPFW_UH_WLOCK(ch); 1484 1485 ni = CHAIN_TO_NI(ch); 1486 if ((tc = find_table(ni, ti)) == NULL) { 1487 IPFW_UH_WUNLOCK(ch); 1488 return (ESRCH); 1489 } 1490 1491 /* Do not permit destroying referenced tables */ 1492 if (tc->no.refcnt > 0) { 1493 IPFW_UH_WUNLOCK(ch); 1494 return (EBUSY); 1495 } 1496 1497 IPFW_WLOCK(ch); 1498 unlink_table(ch, tc); 1499 IPFW_WUNLOCK(ch); 1500 1501 /* Free obj index */ 1502 if (ipfw_objhash_free_idx(ni, tc->no.kidx) != 0) 1503 printf("Error unlinking kidx %d from table %s\n", 1504 tc->no.kidx, tc->tablename); 1505 1506 /* Unref values used in tables while holding UH lock */ 1507 ipfw_unref_table_values(ch, tc, tc->ta, tc->astate, &tc->ti_copy); 1508 IPFW_UH_WUNLOCK(ch); 1509 1510 free_table_config(ni, tc); 1511 1512 return (0); 1513 } 1514 1515 /* 1516 * Grow tables index. 1517 * 1518 * Returns 0 on success. 1519 */ 1520 int 1521 ipfw_resize_tables(struct ip_fw_chain *ch, unsigned int ntables) 1522 { 1523 unsigned int tbl; 1524 struct namedobj_instance *ni; 1525 void *new_idx, *old_tablestate, *tablestate; 1526 struct table_info *ti; 1527 struct table_config *tc; 1528 int i, new_blocks; 1529 1530 /* Check new value for validity */ 1531 if (ntables == 0) 1532 return (EINVAL); 1533 if (ntables > IPFW_TABLES_MAX) 1534 ntables = IPFW_TABLES_MAX; 1535 /* Alight to nearest power of 2 */ 1536 ntables = roundup_pow_of_two(ntables); 1537 1538 /* Allocate new pointers */ 1539 tablestate = malloc(ntables * sizeof(struct table_info), 1540 M_IPFW, M_WAITOK | M_ZERO); 1541 1542 ipfw_objhash_bitmap_alloc(ntables, (void *)&new_idx, &new_blocks); 1543 1544 IPFW_UH_WLOCK(ch); 1545 1546 tbl = (ntables >= V_fw_tables_max) ? V_fw_tables_max : ntables; 1547 ni = CHAIN_TO_NI(ch); 1548 1549 /* Temporary restrict decreasing max_tables */ 1550 if (ntables < V_fw_tables_max) { 1551 /* 1552 * FIXME: Check if we really can shrink 1553 */ 1554 IPFW_UH_WUNLOCK(ch); 1555 return (EINVAL); 1556 } 1557 1558 /* Copy table info/indices */ 1559 memcpy(tablestate, ch->tablestate, sizeof(struct table_info) * tbl); 1560 ipfw_objhash_bitmap_merge(ni, &new_idx, &new_blocks); 1561 1562 IPFW_WLOCK(ch); 1563 1564 /* Change pointers */ 1565 old_tablestate = ch->tablestate; 1566 ch->tablestate = tablestate; 1567 ipfw_objhash_bitmap_swap(ni, &new_idx, &new_blocks); 1568 1569 V_fw_tables_max = ntables; 1570 1571 IPFW_WUNLOCK(ch); 1572 1573 /* Notify all consumers that their @ti pointer has changed */ 1574 ti = (struct table_info *)ch->tablestate; 1575 for (i = 0; i < tbl; i++, ti++) { 1576 if (ti->lookup == NULL) 1577 continue; 1578 tc = (struct table_config *)ipfw_objhash_lookup_kidx(ni, i); 1579 if (tc == NULL || tc->ta->change_ti == NULL) 1580 continue; 1581 1582 tc->ta->change_ti(tc->astate, ti); 1583 } 1584 1585 IPFW_UH_WUNLOCK(ch); 1586 1587 /* Free old pointers */ 1588 free(old_tablestate, M_IPFW); 1589 ipfw_objhash_bitmap_free(new_idx, new_blocks); 1590 1591 return (0); 1592 } 1593 1594 /* 1595 * Lookup table's named object by its @kidx. 1596 */ 1597 struct named_object * 1598 ipfw_objhash_lookup_table_kidx(struct ip_fw_chain *ch, uint16_t kidx) 1599 { 1600 1601 return (ipfw_objhash_lookup_kidx(CHAIN_TO_NI(ch), kidx)); 1602 } 1603 1604 /* 1605 * Take reference to table specified in @ntlv. 1606 * On success return its @kidx. 1607 */ 1608 int 1609 ipfw_ref_table(struct ip_fw_chain *ch, ipfw_obj_ntlv *ntlv, uint16_t *kidx) 1610 { 1611 struct tid_info ti; 1612 struct table_config *tc; 1613 int error; 1614 1615 IPFW_UH_WLOCK_ASSERT(ch); 1616 1617 ntlv_to_ti(ntlv, &ti); 1618 error = find_table_err(CHAIN_TO_NI(ch), &ti, &tc); 1619 if (error != 0) 1620 return (error); 1621 1622 if (tc == NULL) 1623 return (ESRCH); 1624 1625 tc_ref(tc); 1626 *kidx = tc->no.kidx; 1627 1628 return (0); 1629 } 1630 1631 void 1632 ipfw_unref_table(struct ip_fw_chain *ch, uint16_t kidx) 1633 { 1634 1635 struct namedobj_instance *ni; 1636 struct named_object *no; 1637 1638 IPFW_UH_WLOCK_ASSERT(ch); 1639 ni = CHAIN_TO_NI(ch); 1640 no = ipfw_objhash_lookup_kidx(ni, kidx); 1641 KASSERT(no != NULL, ("Table with index %d not found", kidx)); 1642 no->refcnt--; 1643 } 1644 1645 /* 1646 * Lookup an arbitrary key @paddr of length @plen in table @tbl. 1647 * Stores found value in @val. 1648 * 1649 * Returns 1 if key was found. 1650 */ 1651 int 1652 ipfw_lookup_table(struct ip_fw_chain *ch, uint16_t tbl, uint16_t plen, 1653 void *paddr, uint32_t *val) 1654 { 1655 struct table_info *ti; 1656 1657 ti = KIDX_TO_TI(ch, tbl); 1658 1659 return (ti->lookup(ti, paddr, plen, val)); 1660 } 1661 1662 /* 1663 * Info/List/dump support for tables. 1664 * 1665 */ 1666 1667 /* 1668 * High-level 'get' cmds sysctl handlers 1669 */ 1670 1671 /* 1672 * Lists all tables currently available in kernel. 1673 * Data layout (v0)(current): 1674 * Request: [ ipfw_obj_lheader ], size = ipfw_obj_lheader.size 1675 * Reply: [ ipfw_obj_lheader ipfw_xtable_info x N ] 1676 * 1677 * Returns 0 on success 1678 */ 1679 static int 1680 list_tables(struct ip_fw_chain *ch, ip_fw3_opheader *op3, 1681 struct sockopt_data *sd) 1682 { 1683 struct _ipfw_obj_lheader *olh; 1684 int error; 1685 1686 olh = (struct _ipfw_obj_lheader *)ipfw_get_sopt_header(sd,sizeof(*olh)); 1687 if (olh == NULL) 1688 return (EINVAL); 1689 if (sd->valsize < olh->size) 1690 return (EINVAL); 1691 1692 IPFW_UH_RLOCK(ch); 1693 error = export_tables(ch, olh, sd); 1694 IPFW_UH_RUNLOCK(ch); 1695 1696 return (error); 1697 } 1698 1699 /* 1700 * Store table info to buffer provided by @sd. 1701 * Data layout (v0)(current): 1702 * Request: [ ipfw_obj_header ipfw_xtable_info(empty)] 1703 * Reply: [ ipfw_obj_header ipfw_xtable_info ] 1704 * 1705 * Returns 0 on success. 1706 */ 1707 static int 1708 describe_table(struct ip_fw_chain *ch, ip_fw3_opheader *op3, 1709 struct sockopt_data *sd) 1710 { 1711 struct _ipfw_obj_header *oh; 1712 struct table_config *tc; 1713 struct tid_info ti; 1714 size_t sz; 1715 1716 sz = sizeof(*oh) + sizeof(ipfw_xtable_info); 1717 oh = (struct _ipfw_obj_header *)ipfw_get_sopt_header(sd, sz); 1718 if (oh == NULL) 1719 return (EINVAL); 1720 1721 objheader_to_ti(oh, &ti); 1722 1723 IPFW_UH_RLOCK(ch); 1724 if ((tc = find_table(CHAIN_TO_NI(ch), &ti)) == NULL) { 1725 IPFW_UH_RUNLOCK(ch); 1726 return (ESRCH); 1727 } 1728 1729 export_table_info(ch, tc, (ipfw_xtable_info *)(oh + 1)); 1730 IPFW_UH_RUNLOCK(ch); 1731 1732 return (0); 1733 } 1734 1735 /* 1736 * Modifies existing table. 1737 * Data layout (v0)(current): 1738 * Request: [ ipfw_obj_header ipfw_xtable_info ] 1739 * 1740 * Returns 0 on success 1741 */ 1742 static int 1743 modify_table(struct ip_fw_chain *ch, ip_fw3_opheader *op3, 1744 struct sockopt_data *sd) 1745 { 1746 struct _ipfw_obj_header *oh; 1747 ipfw_xtable_info *i; 1748 char *tname; 1749 struct tid_info ti; 1750 struct namedobj_instance *ni; 1751 struct table_config *tc; 1752 1753 if (sd->valsize != sizeof(*oh) + sizeof(ipfw_xtable_info)) 1754 return (EINVAL); 1755 1756 oh = (struct _ipfw_obj_header *)sd->kbuf; 1757 i = (ipfw_xtable_info *)(oh + 1); 1758 1759 /* 1760 * Verify user-supplied strings. 1761 * Check for null-terminated/zero-length strings/ 1762 */ 1763 tname = oh->ntlv.name; 1764 if (check_table_name(tname) != 0) 1765 return (EINVAL); 1766 1767 objheader_to_ti(oh, &ti); 1768 ti.type = i->type; 1769 1770 IPFW_UH_WLOCK(ch); 1771 ni = CHAIN_TO_NI(ch); 1772 if ((tc = find_table(ni, &ti)) == NULL) { 1773 IPFW_UH_WUNLOCK(ch); 1774 return (ESRCH); 1775 } 1776 1777 /* Do not support any modifications for readonly tables */ 1778 if ((tc->ta->flags & TA_FLAG_READONLY) != 0) { 1779 IPFW_UH_WUNLOCK(ch); 1780 return (EACCES); 1781 } 1782 1783 if ((i->mflags & IPFW_TMFLAGS_LIMIT) != 0) 1784 tc->limit = i->limit; 1785 if ((i->mflags & IPFW_TMFLAGS_LOCK) != 0) 1786 tc->locked = ((i->flags & IPFW_TGFLAGS_LOCKED) != 0); 1787 IPFW_UH_WUNLOCK(ch); 1788 1789 return (0); 1790 } 1791 1792 /* 1793 * Creates new table. 1794 * Data layout (v0)(current): 1795 * Request: [ ipfw_obj_header ipfw_xtable_info ] 1796 * 1797 * Returns 0 on success 1798 */ 1799 static int 1800 create_table(struct ip_fw_chain *ch, ip_fw3_opheader *op3, 1801 struct sockopt_data *sd) 1802 { 1803 struct _ipfw_obj_header *oh; 1804 ipfw_xtable_info *i; 1805 char *tname, *aname; 1806 struct tid_info ti; 1807 struct namedobj_instance *ni; 1808 1809 if (sd->valsize != sizeof(*oh) + sizeof(ipfw_xtable_info)) 1810 return (EINVAL); 1811 1812 oh = (struct _ipfw_obj_header *)sd->kbuf; 1813 i = (ipfw_xtable_info *)(oh + 1); 1814 1815 /* 1816 * Verify user-supplied strings. 1817 * Check for null-terminated/zero-length strings/ 1818 */ 1819 tname = oh->ntlv.name; 1820 aname = i->algoname; 1821 if (check_table_name(tname) != 0 || 1822 strnlen(aname, sizeof(i->algoname)) == sizeof(i->algoname)) 1823 return (EINVAL); 1824 1825 if (aname[0] == '\0') { 1826 /* Use default algorithm */ 1827 aname = NULL; 1828 } 1829 1830 objheader_to_ti(oh, &ti); 1831 ti.type = i->type; 1832 1833 ni = CHAIN_TO_NI(ch); 1834 1835 IPFW_UH_RLOCK(ch); 1836 if (find_table(ni, &ti) != NULL) { 1837 IPFW_UH_RUNLOCK(ch); 1838 return (EEXIST); 1839 } 1840 IPFW_UH_RUNLOCK(ch); 1841 1842 return (create_table_internal(ch, &ti, aname, i, NULL, 0)); 1843 } 1844 1845 /* 1846 * Creates new table based on @ti and @aname. 1847 * 1848 * Assume @aname to be checked and valid. 1849 * Stores allocated table kidx inside @pkidx (if non-NULL). 1850 * Reference created table if @compat is non-zero. 1851 * 1852 * Returns 0 on success. 1853 */ 1854 static int 1855 create_table_internal(struct ip_fw_chain *ch, struct tid_info *ti, 1856 char *aname, ipfw_xtable_info *i, uint16_t *pkidx, int compat) 1857 { 1858 struct namedobj_instance *ni; 1859 struct table_config *tc, *tc_new, *tmp; 1860 struct table_algo *ta; 1861 uint16_t kidx; 1862 1863 ni = CHAIN_TO_NI(ch); 1864 1865 ta = find_table_algo(CHAIN_TO_TCFG(ch), ti, aname); 1866 if (ta == NULL) 1867 return (ENOTSUP); 1868 1869 tc = alloc_table_config(ch, ti, ta, aname, i->tflags); 1870 if (tc == NULL) 1871 return (ENOMEM); 1872 1873 tc->vmask = i->vmask; 1874 tc->limit = i->limit; 1875 if (ta->flags & TA_FLAG_READONLY) 1876 tc->locked = 1; 1877 else 1878 tc->locked = (i->flags & IPFW_TGFLAGS_LOCKED) != 0; 1879 1880 IPFW_UH_WLOCK(ch); 1881 1882 /* Check if table has been already created */ 1883 tc_new = find_table(ni, ti); 1884 if (tc_new != NULL) { 1885 /* 1886 * Compat: do not fail if we're 1887 * requesting to create existing table 1888 * which has the same type 1889 */ 1890 if (compat == 0 || tc_new->no.subtype != tc->no.subtype) { 1891 IPFW_UH_WUNLOCK(ch); 1892 free_table_config(ni, tc); 1893 return (EEXIST); 1894 } 1895 1896 /* Exchange tc and tc_new for proper refcounting & freeing */ 1897 tmp = tc; 1898 tc = tc_new; 1899 tc_new = tmp; 1900 } else { 1901 /* New table */ 1902 if (ipfw_objhash_alloc_idx(ni, &kidx) != 0) { 1903 IPFW_UH_WUNLOCK(ch); 1904 printf("Unable to allocate table index." 1905 " Consider increasing net.inet.ip.fw.tables_max"); 1906 free_table_config(ni, tc); 1907 return (EBUSY); 1908 } 1909 tc->no.kidx = kidx; 1910 tc->no.etlv = IPFW_TLV_TBL_NAME; 1911 1912 link_table(ch, tc); 1913 } 1914 1915 if (compat != 0) 1916 tc->no.refcnt++; 1917 if (pkidx != NULL) 1918 *pkidx = tc->no.kidx; 1919 1920 IPFW_UH_WUNLOCK(ch); 1921 1922 if (tc_new != NULL) 1923 free_table_config(ni, tc_new); 1924 1925 return (0); 1926 } 1927 1928 static void 1929 ntlv_to_ti(ipfw_obj_ntlv *ntlv, struct tid_info *ti) 1930 { 1931 1932 memset(ti, 0, sizeof(struct tid_info)); 1933 ti->set = ntlv->set; 1934 ti->uidx = ntlv->idx; 1935 ti->tlvs = ntlv; 1936 ti->tlen = ntlv->head.length; 1937 } 1938 1939 static void 1940 objheader_to_ti(struct _ipfw_obj_header *oh, struct tid_info *ti) 1941 { 1942 1943 ntlv_to_ti(&oh->ntlv, ti); 1944 } 1945 1946 struct namedobj_instance * 1947 ipfw_get_table_objhash(struct ip_fw_chain *ch) 1948 { 1949 1950 return (CHAIN_TO_NI(ch)); 1951 } 1952 1953 /* 1954 * Exports basic table info as name TLV. 1955 * Used inside dump_static_rules() to provide info 1956 * about all tables referenced by current ruleset. 1957 * 1958 * Returns 0 on success. 1959 */ 1960 int 1961 ipfw_export_table_ntlv(struct ip_fw_chain *ch, uint16_t kidx, 1962 struct sockopt_data *sd) 1963 { 1964 struct namedobj_instance *ni; 1965 struct named_object *no; 1966 ipfw_obj_ntlv *ntlv; 1967 1968 ni = CHAIN_TO_NI(ch); 1969 1970 no = ipfw_objhash_lookup_kidx(ni, kidx); 1971 KASSERT(no != NULL, ("invalid table kidx passed")); 1972 1973 ntlv = (ipfw_obj_ntlv *)ipfw_get_sopt_space(sd, sizeof(*ntlv)); 1974 if (ntlv == NULL) 1975 return (ENOMEM); 1976 1977 ntlv->head.type = IPFW_TLV_TBL_NAME; 1978 ntlv->head.length = sizeof(*ntlv); 1979 ntlv->idx = no->kidx; 1980 strlcpy(ntlv->name, no->name, sizeof(ntlv->name)); 1981 1982 return (0); 1983 } 1984 1985 struct dump_args { 1986 struct ip_fw_chain *ch; 1987 struct table_info *ti; 1988 struct table_config *tc; 1989 struct sockopt_data *sd; 1990 uint32_t cnt; 1991 uint16_t uidx; 1992 int error; 1993 uint32_t size; 1994 ipfw_table_entry *ent; 1995 ta_foreach_f *f; 1996 void *farg; 1997 ipfw_obj_tentry tent; 1998 }; 1999 2000 static int 2001 count_ext_entries(void *e, void *arg) 2002 { 2003 struct dump_args *da; 2004 2005 da = (struct dump_args *)arg; 2006 da->cnt++; 2007 2008 return (0); 2009 } 2010 2011 /* 2012 * Gets number of items from table either using 2013 * internal counter or calling algo callback for 2014 * externally-managed tables. 2015 * 2016 * Returns number of records. 2017 */ 2018 static uint32_t 2019 table_get_count(struct ip_fw_chain *ch, struct table_config *tc) 2020 { 2021 struct table_info *ti; 2022 struct table_algo *ta; 2023 struct dump_args da; 2024 2025 ti = KIDX_TO_TI(ch, tc->no.kidx); 2026 ta = tc->ta; 2027 2028 /* Use internal counter for self-managed tables */ 2029 if ((ta->flags & TA_FLAG_READONLY) == 0) 2030 return (tc->count); 2031 2032 /* Use callback to quickly get number of items */ 2033 if ((ta->flags & TA_FLAG_EXTCOUNTER) != 0) 2034 return (ta->get_count(tc->astate, ti)); 2035 2036 /* Count number of iterms ourselves */ 2037 memset(&da, 0, sizeof(da)); 2038 ta->foreach(tc->astate, ti, count_ext_entries, &da); 2039 2040 return (da.cnt); 2041 } 2042 2043 /* 2044 * Exports table @tc info into standard ipfw_xtable_info format. 2045 */ 2046 static void 2047 export_table_info(struct ip_fw_chain *ch, struct table_config *tc, 2048 ipfw_xtable_info *i) 2049 { 2050 struct table_info *ti; 2051 struct table_algo *ta; 2052 2053 i->type = tc->no.subtype; 2054 i->tflags = tc->tflags; 2055 i->vmask = tc->vmask; 2056 i->set = tc->no.set; 2057 i->kidx = tc->no.kidx; 2058 i->refcnt = tc->no.refcnt; 2059 i->count = table_get_count(ch, tc); 2060 i->limit = tc->limit; 2061 i->flags |= (tc->locked != 0) ? IPFW_TGFLAGS_LOCKED : 0; 2062 i->size = i->count * sizeof(ipfw_obj_tentry); 2063 i->size += sizeof(ipfw_obj_header) + sizeof(ipfw_xtable_info); 2064 strlcpy(i->tablename, tc->tablename, sizeof(i->tablename)); 2065 ti = KIDX_TO_TI(ch, tc->no.kidx); 2066 ta = tc->ta; 2067 if (ta->print_config != NULL) { 2068 /* Use algo function to print table config to string */ 2069 ta->print_config(tc->astate, ti, i->algoname, 2070 sizeof(i->algoname)); 2071 } else 2072 strlcpy(i->algoname, ta->name, sizeof(i->algoname)); 2073 /* Dump algo-specific data, if possible */ 2074 if (ta->dump_tinfo != NULL) { 2075 ta->dump_tinfo(tc->astate, ti, &i->ta_info); 2076 i->ta_info.flags |= IPFW_TATFLAGS_DATA; 2077 } 2078 } 2079 2080 struct dump_table_args { 2081 struct ip_fw_chain *ch; 2082 struct sockopt_data *sd; 2083 }; 2084 2085 static int 2086 export_table_internal(struct namedobj_instance *ni, struct named_object *no, 2087 void *arg) 2088 { 2089 ipfw_xtable_info *i; 2090 struct dump_table_args *dta; 2091 2092 dta = (struct dump_table_args *)arg; 2093 2094 i = (ipfw_xtable_info *)ipfw_get_sopt_space(dta->sd, sizeof(*i)); 2095 KASSERT(i != NULL, ("previously checked buffer is not enough")); 2096 2097 export_table_info(dta->ch, (struct table_config *)no, i); 2098 return (0); 2099 } 2100 2101 /* 2102 * Export all tables as ipfw_xtable_info structures to 2103 * storage provided by @sd. 2104 * 2105 * If supplied buffer is too small, fills in required size 2106 * and returns ENOMEM. 2107 * Returns 0 on success. 2108 */ 2109 static int 2110 export_tables(struct ip_fw_chain *ch, ipfw_obj_lheader *olh, 2111 struct sockopt_data *sd) 2112 { 2113 uint32_t size; 2114 uint32_t count; 2115 struct dump_table_args dta; 2116 2117 count = ipfw_objhash_count(CHAIN_TO_NI(ch)); 2118 size = count * sizeof(ipfw_xtable_info) + sizeof(ipfw_obj_lheader); 2119 2120 /* Fill in header regadless of buffer size */ 2121 olh->count = count; 2122 olh->objsize = sizeof(ipfw_xtable_info); 2123 2124 if (size > olh->size) { 2125 olh->size = size; 2126 return (ENOMEM); 2127 } 2128 2129 olh->size = size; 2130 2131 dta.ch = ch; 2132 dta.sd = sd; 2133 2134 ipfw_objhash_foreach(CHAIN_TO_NI(ch), export_table_internal, &dta); 2135 2136 return (0); 2137 } 2138 2139 /* 2140 * Dumps all table data 2141 * Data layout (v1)(current): 2142 * Request: [ ipfw_obj_header ], size = ipfw_xtable_info.size 2143 * Reply: [ ipfw_obj_header ipfw_xtable_info ipfw_obj_tentry x N ] 2144 * 2145 * Returns 0 on success 2146 */ 2147 static int 2148 dump_table_v1(struct ip_fw_chain *ch, ip_fw3_opheader *op3, 2149 struct sockopt_data *sd) 2150 { 2151 struct _ipfw_obj_header *oh; 2152 ipfw_xtable_info *i; 2153 struct tid_info ti; 2154 struct table_config *tc; 2155 struct table_algo *ta; 2156 struct dump_args da; 2157 uint32_t sz; 2158 2159 sz = sizeof(ipfw_obj_header) + sizeof(ipfw_xtable_info); 2160 oh = (struct _ipfw_obj_header *)ipfw_get_sopt_header(sd, sz); 2161 if (oh == NULL) 2162 return (EINVAL); 2163 2164 i = (ipfw_xtable_info *)(oh + 1); 2165 objheader_to_ti(oh, &ti); 2166 2167 IPFW_UH_RLOCK(ch); 2168 if ((tc = find_table(CHAIN_TO_NI(ch), &ti)) == NULL) { 2169 IPFW_UH_RUNLOCK(ch); 2170 return (ESRCH); 2171 } 2172 export_table_info(ch, tc, i); 2173 2174 if (sd->valsize < i->size) { 2175 /* 2176 * Submitted buffer size is not enough. 2177 * WE've already filled in @i structure with 2178 * relevant table info including size, so we 2179 * can return. Buffer will be flushed automatically. 2180 */ 2181 IPFW_UH_RUNLOCK(ch); 2182 return (ENOMEM); 2183 } 2184 2185 /* 2186 * Do the actual dump in eXtended format 2187 */ 2188 memset(&da, 0, sizeof(da)); 2189 da.ch = ch; 2190 da.ti = KIDX_TO_TI(ch, tc->no.kidx); 2191 da.tc = tc; 2192 da.sd = sd; 2193 2194 ta = tc->ta; 2195 2196 ta->foreach(tc->astate, da.ti, dump_table_tentry, &da); 2197 IPFW_UH_RUNLOCK(ch); 2198 2199 return (da.error); 2200 } 2201 2202 /* 2203 * Dumps all table data 2204 * Data layout (version 0)(legacy): 2205 * Request: [ ipfw_xtable ], size = IP_FW_TABLE_XGETSIZE() 2206 * Reply: [ ipfw_xtable ipfw_table_xentry x N ] 2207 * 2208 * Returns 0 on success 2209 */ 2210 static int 2211 dump_table_v0(struct ip_fw_chain *ch, ip_fw3_opheader *op3, 2212 struct sockopt_data *sd) 2213 { 2214 ipfw_xtable *xtbl; 2215 struct tid_info ti; 2216 struct table_config *tc; 2217 struct table_algo *ta; 2218 struct dump_args da; 2219 size_t sz, count; 2220 2221 xtbl = (ipfw_xtable *)ipfw_get_sopt_header(sd, sizeof(ipfw_xtable)); 2222 if (xtbl == NULL) 2223 return (EINVAL); 2224 2225 memset(&ti, 0, sizeof(ti)); 2226 ti.uidx = xtbl->tbl; 2227 2228 IPFW_UH_RLOCK(ch); 2229 if ((tc = find_table(CHAIN_TO_NI(ch), &ti)) == NULL) { 2230 IPFW_UH_RUNLOCK(ch); 2231 return (0); 2232 } 2233 count = table_get_count(ch, tc); 2234 sz = count * sizeof(ipfw_table_xentry) + sizeof(ipfw_xtable); 2235 2236 xtbl->cnt = count; 2237 xtbl->size = sz; 2238 xtbl->type = tc->no.subtype; 2239 xtbl->tbl = ti.uidx; 2240 2241 if (sd->valsize < sz) { 2242 /* 2243 * Submitted buffer size is not enough. 2244 * WE've already filled in @i structure with 2245 * relevant table info including size, so we 2246 * can return. Buffer will be flushed automatically. 2247 */ 2248 IPFW_UH_RUNLOCK(ch); 2249 return (ENOMEM); 2250 } 2251 2252 /* Do the actual dump in eXtended format */ 2253 memset(&da, 0, sizeof(da)); 2254 da.ch = ch; 2255 da.ti = KIDX_TO_TI(ch, tc->no.kidx); 2256 da.tc = tc; 2257 da.sd = sd; 2258 2259 ta = tc->ta; 2260 2261 ta->foreach(tc->astate, da.ti, dump_table_xentry, &da); 2262 IPFW_UH_RUNLOCK(ch); 2263 2264 return (0); 2265 } 2266 2267 /* 2268 * Legacy function to retrieve number of items in table. 2269 */ 2270 static int 2271 get_table_size(struct ip_fw_chain *ch, ip_fw3_opheader *op3, 2272 struct sockopt_data *sd) 2273 { 2274 uint32_t *tbl; 2275 struct tid_info ti; 2276 size_t sz; 2277 int error; 2278 2279 sz = sizeof(*op3) + sizeof(uint32_t); 2280 op3 = (ip_fw3_opheader *)ipfw_get_sopt_header(sd, sz); 2281 if (op3 == NULL) 2282 return (EINVAL); 2283 2284 tbl = (uint32_t *)(op3 + 1); 2285 memset(&ti, 0, sizeof(ti)); 2286 ti.uidx = *tbl; 2287 IPFW_UH_RLOCK(ch); 2288 error = ipfw_count_xtable(ch, &ti, tbl); 2289 IPFW_UH_RUNLOCK(ch); 2290 return (error); 2291 } 2292 2293 /* 2294 * Legacy IP_FW_TABLE_GETSIZE handler 2295 */ 2296 int 2297 ipfw_count_table(struct ip_fw_chain *ch, struct tid_info *ti, uint32_t *cnt) 2298 { 2299 struct table_config *tc; 2300 2301 if ((tc = find_table(CHAIN_TO_NI(ch), ti)) == NULL) 2302 return (ESRCH); 2303 *cnt = table_get_count(ch, tc); 2304 return (0); 2305 } 2306 2307 /* 2308 * Legacy IP_FW_TABLE_XGETSIZE handler 2309 */ 2310 int 2311 ipfw_count_xtable(struct ip_fw_chain *ch, struct tid_info *ti, uint32_t *cnt) 2312 { 2313 struct table_config *tc; 2314 uint32_t count; 2315 2316 if ((tc = find_table(CHAIN_TO_NI(ch), ti)) == NULL) { 2317 *cnt = 0; 2318 return (0); /* 'table all list' requires success */ 2319 } 2320 2321 count = table_get_count(ch, tc); 2322 *cnt = count * sizeof(ipfw_table_xentry); 2323 if (count > 0) 2324 *cnt += sizeof(ipfw_xtable); 2325 return (0); 2326 } 2327 2328 static int 2329 dump_table_entry(void *e, void *arg) 2330 { 2331 struct dump_args *da; 2332 struct table_config *tc; 2333 struct table_algo *ta; 2334 ipfw_table_entry *ent; 2335 struct table_value *pval; 2336 int error; 2337 2338 da = (struct dump_args *)arg; 2339 2340 tc = da->tc; 2341 ta = tc->ta; 2342 2343 /* Out of memory, returning */ 2344 if (da->cnt == da->size) 2345 return (1); 2346 ent = da->ent++; 2347 ent->tbl = da->uidx; 2348 da->cnt++; 2349 2350 error = ta->dump_tentry(tc->astate, da->ti, e, &da->tent); 2351 if (error != 0) 2352 return (error); 2353 2354 ent->addr = da->tent.k.addr.s_addr; 2355 ent->masklen = da->tent.masklen; 2356 pval = get_table_value(da->ch, da->tc, da->tent.v.kidx); 2357 ent->value = ipfw_export_table_value_legacy(pval); 2358 2359 return (0); 2360 } 2361 2362 /* 2363 * Dumps table in pre-8.1 legacy format. 2364 */ 2365 int 2366 ipfw_dump_table_legacy(struct ip_fw_chain *ch, struct tid_info *ti, 2367 ipfw_table *tbl) 2368 { 2369 struct table_config *tc; 2370 struct table_algo *ta; 2371 struct dump_args da; 2372 2373 tbl->cnt = 0; 2374 2375 if ((tc = find_table(CHAIN_TO_NI(ch), ti)) == NULL) 2376 return (0); /* XXX: We should return ESRCH */ 2377 2378 ta = tc->ta; 2379 2380 /* This dump format supports IPv4 only */ 2381 if (tc->no.subtype != IPFW_TABLE_ADDR) 2382 return (0); 2383 2384 memset(&da, 0, sizeof(da)); 2385 da.ch = ch; 2386 da.ti = KIDX_TO_TI(ch, tc->no.kidx); 2387 da.tc = tc; 2388 da.ent = &tbl->ent[0]; 2389 da.size = tbl->size; 2390 2391 tbl->cnt = 0; 2392 ta->foreach(tc->astate, da.ti, dump_table_entry, &da); 2393 tbl->cnt = da.cnt; 2394 2395 return (0); 2396 } 2397 2398 /* 2399 * Dumps table entry in eXtended format (v1)(current). 2400 */ 2401 static int 2402 dump_table_tentry(void *e, void *arg) 2403 { 2404 struct dump_args *da; 2405 struct table_config *tc; 2406 struct table_algo *ta; 2407 struct table_value *pval; 2408 ipfw_obj_tentry *tent; 2409 int error; 2410 2411 da = (struct dump_args *)arg; 2412 2413 tc = da->tc; 2414 ta = tc->ta; 2415 2416 tent = (ipfw_obj_tentry *)ipfw_get_sopt_space(da->sd, sizeof(*tent)); 2417 /* Out of memory, returning */ 2418 if (tent == NULL) { 2419 da->error = ENOMEM; 2420 return (1); 2421 } 2422 tent->head.length = sizeof(ipfw_obj_tentry); 2423 tent->idx = da->uidx; 2424 2425 error = ta->dump_tentry(tc->astate, da->ti, e, tent); 2426 if (error != 0) 2427 return (error); 2428 2429 pval = get_table_value(da->ch, da->tc, tent->v.kidx); 2430 ipfw_export_table_value_v1(pval, &tent->v.value); 2431 2432 return (0); 2433 } 2434 2435 /* 2436 * Dumps table entry in eXtended format (v0). 2437 */ 2438 static int 2439 dump_table_xentry(void *e, void *arg) 2440 { 2441 struct dump_args *da; 2442 struct table_config *tc; 2443 struct table_algo *ta; 2444 ipfw_table_xentry *xent; 2445 ipfw_obj_tentry *tent; 2446 struct table_value *pval; 2447 int error; 2448 2449 da = (struct dump_args *)arg; 2450 2451 tc = da->tc; 2452 ta = tc->ta; 2453 2454 xent = (ipfw_table_xentry *)ipfw_get_sopt_space(da->sd, sizeof(*xent)); 2455 /* Out of memory, returning */ 2456 if (xent == NULL) 2457 return (1); 2458 xent->len = sizeof(ipfw_table_xentry); 2459 xent->tbl = da->uidx; 2460 2461 memset(&da->tent, 0, sizeof(da->tent)); 2462 tent = &da->tent; 2463 error = ta->dump_tentry(tc->astate, da->ti, e, tent); 2464 if (error != 0) 2465 return (error); 2466 2467 /* Convert current format to previous one */ 2468 xent->masklen = tent->masklen; 2469 pval = get_table_value(da->ch, da->tc, da->tent.v.kidx); 2470 xent->value = ipfw_export_table_value_legacy(pval); 2471 /* Apply some hacks */ 2472 if (tc->no.subtype == IPFW_TABLE_ADDR && tent->subtype == AF_INET) { 2473 xent->k.addr6.s6_addr32[3] = tent->k.addr.s_addr; 2474 xent->flags = IPFW_TCF_INET; 2475 } else 2476 memcpy(&xent->k, &tent->k, sizeof(xent->k)); 2477 2478 return (0); 2479 } 2480 2481 /* 2482 * Helper function to export table algo data 2483 * to tentry format before calling user function. 2484 * 2485 * Returns 0 on success. 2486 */ 2487 static int 2488 prepare_table_tentry(void *e, void *arg) 2489 { 2490 struct dump_args *da; 2491 struct table_config *tc; 2492 struct table_algo *ta; 2493 int error; 2494 2495 da = (struct dump_args *)arg; 2496 2497 tc = da->tc; 2498 ta = tc->ta; 2499 2500 error = ta->dump_tentry(tc->astate, da->ti, e, &da->tent); 2501 if (error != 0) 2502 return (error); 2503 2504 da->f(&da->tent, da->farg); 2505 2506 return (0); 2507 } 2508 2509 /* 2510 * Allow external consumers to read table entries in standard format. 2511 */ 2512 int 2513 ipfw_foreach_table_tentry(struct ip_fw_chain *ch, uint16_t kidx, 2514 ta_foreach_f *f, void *arg) 2515 { 2516 struct namedobj_instance *ni; 2517 struct table_config *tc; 2518 struct table_algo *ta; 2519 struct dump_args da; 2520 2521 ni = CHAIN_TO_NI(ch); 2522 2523 tc = (struct table_config *)ipfw_objhash_lookup_kidx(ni, kidx); 2524 if (tc == NULL) 2525 return (ESRCH); 2526 2527 ta = tc->ta; 2528 2529 memset(&da, 0, sizeof(da)); 2530 da.ch = ch; 2531 da.ti = KIDX_TO_TI(ch, tc->no.kidx); 2532 da.tc = tc; 2533 da.f = f; 2534 da.farg = arg; 2535 2536 ta->foreach(tc->astate, da.ti, prepare_table_tentry, &da); 2537 2538 return (0); 2539 } 2540 2541 /* 2542 * Table algorithms 2543 */ 2544 2545 /* 2546 * Finds algorithm by index, table type or supplied name. 2547 * 2548 * Returns pointer to algo or NULL. 2549 */ 2550 static struct table_algo * 2551 find_table_algo(struct tables_config *tcfg, struct tid_info *ti, char *name) 2552 { 2553 int i, l; 2554 struct table_algo *ta; 2555 2556 if (ti->type > IPFW_TABLE_MAXTYPE) 2557 return (NULL); 2558 2559 /* Search by index */ 2560 if (ti->atype != 0) { 2561 if (ti->atype > tcfg->algo_count) 2562 return (NULL); 2563 return (tcfg->algo[ti->atype]); 2564 } 2565 2566 if (name == NULL) { 2567 /* Return default algorithm for given type if set */ 2568 return (tcfg->def_algo[ti->type]); 2569 } 2570 2571 /* Search by name */ 2572 /* TODO: better search */ 2573 for (i = 1; i <= tcfg->algo_count; i++) { 2574 ta = tcfg->algo[i]; 2575 2576 /* 2577 * One can supply additional algorithm 2578 * parameters so we compare only the first word 2579 * of supplied name: 2580 * 'addr:chash hsize=32' 2581 * '^^^^^^^^^' 2582 * 2583 */ 2584 l = strlen(ta->name); 2585 if (strncmp(name, ta->name, l) != 0) 2586 continue; 2587 if (name[l] != '\0' && name[l] != ' ') 2588 continue; 2589 /* Check if we're requesting proper table type */ 2590 if (ti->type != 0 && ti->type != ta->type) 2591 return (NULL); 2592 return (ta); 2593 } 2594 2595 return (NULL); 2596 } 2597 2598 /* 2599 * Register new table algo @ta. 2600 * Stores algo id inside @idx. 2601 * 2602 * Returns 0 on success. 2603 */ 2604 int 2605 ipfw_add_table_algo(struct ip_fw_chain *ch, struct table_algo *ta, size_t size, 2606 int *idx) 2607 { 2608 struct tables_config *tcfg; 2609 struct table_algo *ta_new; 2610 size_t sz; 2611 2612 if (size > sizeof(struct table_algo)) 2613 return (EINVAL); 2614 2615 /* Check for the required on-stack size for add/del */ 2616 sz = roundup2(ta->ta_buf_size, sizeof(void *)); 2617 if (sz > TA_BUF_SZ) 2618 return (EINVAL); 2619 2620 KASSERT(ta->type <= IPFW_TABLE_MAXTYPE,("Increase IPFW_TABLE_MAXTYPE")); 2621 2622 /* Copy algorithm data to stable storage. */ 2623 ta_new = malloc(sizeof(struct table_algo), M_IPFW, M_WAITOK | M_ZERO); 2624 memcpy(ta_new, ta, size); 2625 2626 tcfg = CHAIN_TO_TCFG(ch); 2627 2628 KASSERT(tcfg->algo_count < 255, ("Increase algo array size")); 2629 2630 tcfg->algo[++tcfg->algo_count] = ta_new; 2631 ta_new->idx = tcfg->algo_count; 2632 2633 /* Set algorithm as default one for given type */ 2634 if ((ta_new->flags & TA_FLAG_DEFAULT) != 0 && 2635 tcfg->def_algo[ta_new->type] == NULL) 2636 tcfg->def_algo[ta_new->type] = ta_new; 2637 2638 *idx = ta_new->idx; 2639 2640 return (0); 2641 } 2642 2643 /* 2644 * Unregisters table algo using @idx as id. 2645 * XXX: It is NOT safe to call this function in any place 2646 * other than ipfw instance destroy handler. 2647 */ 2648 void 2649 ipfw_del_table_algo(struct ip_fw_chain *ch, int idx) 2650 { 2651 struct tables_config *tcfg; 2652 struct table_algo *ta; 2653 2654 tcfg = CHAIN_TO_TCFG(ch); 2655 2656 KASSERT(idx <= tcfg->algo_count, ("algo idx %d out of range 1..%d", 2657 idx, tcfg->algo_count)); 2658 2659 ta = tcfg->algo[idx]; 2660 KASSERT(ta != NULL, ("algo idx %d is NULL", idx)); 2661 2662 if (tcfg->def_algo[ta->type] == ta) 2663 tcfg->def_algo[ta->type] = NULL; 2664 2665 free(ta, M_IPFW); 2666 } 2667 2668 /* 2669 * Lists all table algorithms currently available. 2670 * Data layout (v0)(current): 2671 * Request: [ ipfw_obj_lheader ], size = ipfw_obj_lheader.size 2672 * Reply: [ ipfw_obj_lheader ipfw_ta_info x N ] 2673 * 2674 * Returns 0 on success 2675 */ 2676 static int 2677 list_table_algo(struct ip_fw_chain *ch, ip_fw3_opheader *op3, 2678 struct sockopt_data *sd) 2679 { 2680 struct _ipfw_obj_lheader *olh; 2681 struct tables_config *tcfg; 2682 ipfw_ta_info *i; 2683 struct table_algo *ta; 2684 uint32_t count, n, size; 2685 2686 olh = (struct _ipfw_obj_lheader *)ipfw_get_sopt_header(sd,sizeof(*olh)); 2687 if (olh == NULL) 2688 return (EINVAL); 2689 if (sd->valsize < olh->size) 2690 return (EINVAL); 2691 2692 IPFW_UH_RLOCK(ch); 2693 tcfg = CHAIN_TO_TCFG(ch); 2694 count = tcfg->algo_count; 2695 size = count * sizeof(ipfw_ta_info) + sizeof(ipfw_obj_lheader); 2696 2697 /* Fill in header regadless of buffer size */ 2698 olh->count = count; 2699 olh->objsize = sizeof(ipfw_ta_info); 2700 2701 if (size > olh->size) { 2702 olh->size = size; 2703 IPFW_UH_RUNLOCK(ch); 2704 return (ENOMEM); 2705 } 2706 olh->size = size; 2707 2708 for (n = 1; n <= count; n++) { 2709 i = (ipfw_ta_info *)ipfw_get_sopt_space(sd, sizeof(*i)); 2710 KASSERT(i != NULL, ("previously checked buffer is not enough")); 2711 ta = tcfg->algo[n]; 2712 strlcpy(i->algoname, ta->name, sizeof(i->algoname)); 2713 i->type = ta->type; 2714 i->refcnt = ta->refcnt; 2715 } 2716 2717 IPFW_UH_RUNLOCK(ch); 2718 2719 return (0); 2720 } 2721 2722 static int 2723 classify_srcdst(ipfw_insn *cmd, uint16_t *puidx, uint8_t *ptype) 2724 { 2725 /* Basic IPv4/IPv6 or u32 lookups */ 2726 *puidx = cmd->arg1; 2727 /* Assume ADDR by default */ 2728 *ptype = IPFW_TABLE_ADDR; 2729 int v; 2730 2731 if (F_LEN(cmd) > F_INSN_SIZE(ipfw_insn_u32)) { 2732 /* 2733 * generic lookup. The key must be 2734 * in 32bit big-endian format. 2735 */ 2736 v = ((ipfw_insn_u32 *)cmd)->d[1]; 2737 switch (v) { 2738 case LOOKUP_DST_IP: 2739 case LOOKUP_SRC_IP: 2740 break; 2741 case LOOKUP_DST_PORT: 2742 case LOOKUP_SRC_PORT: 2743 case LOOKUP_UID: 2744 case LOOKUP_JAIL: 2745 case LOOKUP_DSCP: 2746 case LOOKUP_MARK: 2747 *ptype = IPFW_TABLE_NUMBER; 2748 break; 2749 case LOOKUP_DST_MAC: 2750 case LOOKUP_SRC_MAC: 2751 *ptype = IPFW_TABLE_MAC; 2752 break; 2753 } 2754 } 2755 2756 return (0); 2757 } 2758 2759 static int 2760 classify_via(ipfw_insn *cmd, uint16_t *puidx, uint8_t *ptype) 2761 { 2762 ipfw_insn_if *cmdif; 2763 2764 /* Interface table, possibly */ 2765 cmdif = (ipfw_insn_if *)cmd; 2766 if (cmdif->name[0] != '\1') 2767 return (1); 2768 2769 *ptype = IPFW_TABLE_INTERFACE; 2770 *puidx = cmdif->p.kidx; 2771 2772 return (0); 2773 } 2774 2775 static int 2776 classify_flow(ipfw_insn *cmd, uint16_t *puidx, uint8_t *ptype) 2777 { 2778 2779 *puidx = cmd->arg1; 2780 *ptype = IPFW_TABLE_FLOW; 2781 2782 return (0); 2783 } 2784 2785 static int 2786 classify_mac_lookup(ipfw_insn *cmd, uint16_t *puidx, uint8_t *ptype) 2787 { 2788 *puidx = cmd->arg1; 2789 *ptype = IPFW_TABLE_MAC; 2790 return (0); 2791 } 2792 2793 static void 2794 update_arg1(ipfw_insn *cmd, uint16_t idx) 2795 { 2796 2797 cmd->arg1 = idx; 2798 } 2799 2800 static void 2801 update_via(ipfw_insn *cmd, uint16_t idx) 2802 { 2803 ipfw_insn_if *cmdif; 2804 2805 cmdif = (ipfw_insn_if *)cmd; 2806 cmdif->p.kidx = idx; 2807 } 2808 2809 static int 2810 table_findbyname(struct ip_fw_chain *ch, struct tid_info *ti, 2811 struct named_object **pno) 2812 { 2813 struct table_config *tc; 2814 int error; 2815 2816 IPFW_UH_WLOCK_ASSERT(ch); 2817 2818 error = find_table_err(CHAIN_TO_NI(ch), ti, &tc); 2819 if (error != 0) 2820 return (error); 2821 2822 *pno = &tc->no; 2823 return (0); 2824 } 2825 2826 /* XXX: sets-sets! */ 2827 static struct named_object * 2828 table_findbykidx(struct ip_fw_chain *ch, uint16_t idx) 2829 { 2830 struct namedobj_instance *ni; 2831 struct table_config *tc; 2832 2833 IPFW_UH_WLOCK_ASSERT(ch); 2834 ni = CHAIN_TO_NI(ch); 2835 tc = (struct table_config *)ipfw_objhash_lookup_kidx(ni, idx); 2836 KASSERT(tc != NULL, ("Table with index %d not found", idx)); 2837 2838 return (&tc->no); 2839 } 2840 2841 static int 2842 table_manage_sets(struct ip_fw_chain *ch, uint16_t set, uint8_t new_set, 2843 enum ipfw_sets_cmd cmd) 2844 { 2845 2846 switch (cmd) { 2847 case SWAP_ALL: 2848 case TEST_ALL: 2849 case MOVE_ALL: 2850 /* 2851 * Always return success, the real action and decision 2852 * should make table_manage_sets_all(). 2853 */ 2854 return (0); 2855 case TEST_ONE: 2856 case MOVE_ONE: 2857 /* 2858 * NOTE: we need to use ipfw_objhash_del/ipfw_objhash_add 2859 * if set number will be used in hash function. Currently 2860 * we can just use generic handler that replaces set value. 2861 */ 2862 if (V_fw_tables_sets == 0) 2863 return (0); 2864 break; 2865 case COUNT_ONE: 2866 /* 2867 * Return EOPNOTSUPP for COUNT_ONE when per-set sysctl is 2868 * disabled. This allow skip table's opcodes from additional 2869 * checks when specific rules moved to another set. 2870 */ 2871 if (V_fw_tables_sets == 0) 2872 return (EOPNOTSUPP); 2873 } 2874 /* Use generic sets handler when per-set sysctl is enabled. */ 2875 return (ipfw_obj_manage_sets(CHAIN_TO_NI(ch), IPFW_TLV_TBL_NAME, 2876 set, new_set, cmd)); 2877 } 2878 2879 /* 2880 * We register several opcode rewriters for lookup tables. 2881 * All tables opcodes have the same ETLV type, but different subtype. 2882 * To avoid invoking sets handler several times for XXX_ALL commands, 2883 * we use separate manage_sets handler. O_RECV has the lowest value, 2884 * so it should be called first. 2885 */ 2886 static int 2887 table_manage_sets_all(struct ip_fw_chain *ch, uint16_t set, uint8_t new_set, 2888 enum ipfw_sets_cmd cmd) 2889 { 2890 2891 switch (cmd) { 2892 case SWAP_ALL: 2893 case TEST_ALL: 2894 /* 2895 * Return success for TEST_ALL, since nothing prevents 2896 * move rules from one set to another. All tables are 2897 * accessible from all sets when per-set tables sysctl 2898 * is disabled. 2899 */ 2900 case MOVE_ALL: 2901 if (V_fw_tables_sets == 0) 2902 return (0); 2903 break; 2904 default: 2905 return (table_manage_sets(ch, set, new_set, cmd)); 2906 } 2907 /* Use generic sets handler when per-set sysctl is enabled. */ 2908 return (ipfw_obj_manage_sets(CHAIN_TO_NI(ch), IPFW_TLV_TBL_NAME, 2909 set, new_set, cmd)); 2910 } 2911 2912 static struct opcode_obj_rewrite opcodes[] = { 2913 { 2914 .opcode = O_IP_SRC_LOOKUP, 2915 .etlv = IPFW_TLV_TBL_NAME, 2916 .classifier = classify_srcdst, 2917 .update = update_arg1, 2918 .find_byname = table_findbyname, 2919 .find_bykidx = table_findbykidx, 2920 .create_object = create_table_compat, 2921 .manage_sets = table_manage_sets, 2922 }, 2923 { 2924 .opcode = O_IP_DST_LOOKUP, 2925 .etlv = IPFW_TLV_TBL_NAME, 2926 .classifier = classify_srcdst, 2927 .update = update_arg1, 2928 .find_byname = table_findbyname, 2929 .find_bykidx = table_findbykidx, 2930 .create_object = create_table_compat, 2931 .manage_sets = table_manage_sets, 2932 }, 2933 { 2934 .opcode = O_IP_FLOW_LOOKUP, 2935 .etlv = IPFW_TLV_TBL_NAME, 2936 .classifier = classify_flow, 2937 .update = update_arg1, 2938 .find_byname = table_findbyname, 2939 .find_bykidx = table_findbykidx, 2940 .create_object = create_table_compat, 2941 .manage_sets = table_manage_sets, 2942 }, 2943 { 2944 .opcode = O_MAC_SRC_LOOKUP, 2945 .etlv = IPFW_TLV_TBL_NAME, 2946 .classifier = classify_mac_lookup, 2947 .update = update_arg1, 2948 .find_byname = table_findbyname, 2949 .find_bykidx = table_findbykidx, 2950 .create_object = create_table_compat, 2951 .manage_sets = table_manage_sets, 2952 }, 2953 { 2954 .opcode = O_MAC_DST_LOOKUP, 2955 .etlv = IPFW_TLV_TBL_NAME, 2956 .classifier = classify_mac_lookup, 2957 .update = update_arg1, 2958 .find_byname = table_findbyname, 2959 .find_bykidx = table_findbykidx, 2960 .create_object = create_table_compat, 2961 .manage_sets = table_manage_sets, 2962 }, 2963 { 2964 .opcode = O_XMIT, 2965 .etlv = IPFW_TLV_TBL_NAME, 2966 .classifier = classify_via, 2967 .update = update_via, 2968 .find_byname = table_findbyname, 2969 .find_bykidx = table_findbykidx, 2970 .create_object = create_table_compat, 2971 .manage_sets = table_manage_sets, 2972 }, 2973 { 2974 .opcode = O_RECV, 2975 .etlv = IPFW_TLV_TBL_NAME, 2976 .classifier = classify_via, 2977 .update = update_via, 2978 .find_byname = table_findbyname, 2979 .find_bykidx = table_findbykidx, 2980 .create_object = create_table_compat, 2981 .manage_sets = table_manage_sets_all, 2982 }, 2983 { 2984 .opcode = O_VIA, 2985 .etlv = IPFW_TLV_TBL_NAME, 2986 .classifier = classify_via, 2987 .update = update_via, 2988 .find_byname = table_findbyname, 2989 .find_bykidx = table_findbykidx, 2990 .create_object = create_table_compat, 2991 .manage_sets = table_manage_sets, 2992 }, 2993 }; 2994 2995 static int 2996 test_sets_cb(struct namedobj_instance *ni __unused, struct named_object *no, 2997 void *arg __unused) 2998 { 2999 3000 /* Check that there aren't any tables in not default set */ 3001 if (no->set != 0) 3002 return (EBUSY); 3003 return (0); 3004 } 3005 3006 /* 3007 * Switch between "set 0" and "rule's set" table binding, 3008 * Check all ruleset bindings and permits changing 3009 * IFF each binding has both rule AND table in default set (set 0). 3010 * 3011 * Returns 0 on success. 3012 */ 3013 int 3014 ipfw_switch_tables_namespace(struct ip_fw_chain *ch, unsigned int sets) 3015 { 3016 struct opcode_obj_rewrite *rw; 3017 struct namedobj_instance *ni; 3018 struct named_object *no; 3019 struct ip_fw *rule; 3020 ipfw_insn *cmd; 3021 int cmdlen, i, l; 3022 uint16_t kidx; 3023 uint8_t subtype; 3024 3025 IPFW_UH_WLOCK(ch); 3026 3027 if (V_fw_tables_sets == sets) { 3028 IPFW_UH_WUNLOCK(ch); 3029 return (0); 3030 } 3031 ni = CHAIN_TO_NI(ch); 3032 if (sets == 0) { 3033 /* 3034 * Prevent disabling sets support if we have some tables 3035 * in not default sets. 3036 */ 3037 if (ipfw_objhash_foreach_type(ni, test_sets_cb, 3038 NULL, IPFW_TLV_TBL_NAME) != 0) { 3039 IPFW_UH_WUNLOCK(ch); 3040 return (EBUSY); 3041 } 3042 } 3043 /* 3044 * Scan all rules and examine tables opcodes. 3045 */ 3046 for (i = 0; i < ch->n_rules; i++) { 3047 rule = ch->map[i]; 3048 3049 l = rule->cmd_len; 3050 cmd = rule->cmd; 3051 cmdlen = 0; 3052 for ( ; l > 0 ; l -= cmdlen, cmd += cmdlen) { 3053 cmdlen = F_LEN(cmd); 3054 /* Check only tables opcodes */ 3055 for (kidx = 0, rw = opcodes; 3056 rw < opcodes + nitems(opcodes); rw++) { 3057 if (rw->opcode != cmd->opcode) 3058 continue; 3059 if (rw->classifier(cmd, &kidx, &subtype) == 0) 3060 break; 3061 } 3062 if (kidx == 0) 3063 continue; 3064 no = ipfw_objhash_lookup_kidx(ni, kidx); 3065 /* Check if both table object and rule has the set 0 */ 3066 if (no->set != 0 || rule->set != 0) { 3067 IPFW_UH_WUNLOCK(ch); 3068 return (EBUSY); 3069 } 3070 } 3071 } 3072 V_fw_tables_sets = sets; 3073 IPFW_UH_WUNLOCK(ch); 3074 return (0); 3075 } 3076 3077 /* 3078 * Checks table name for validity. 3079 * Enforce basic length checks, the rest 3080 * should be done in userland. 3081 * 3082 * Returns 0 if name is considered valid. 3083 */ 3084 static int 3085 check_table_name(const char *name) 3086 { 3087 3088 /* 3089 * TODO: do some more complicated checks 3090 */ 3091 return (ipfw_check_object_name_generic(name)); 3092 } 3093 3094 /* 3095 * Finds table config based on either legacy index 3096 * or name in ntlv. 3097 * Note @ti structure contains unchecked data from userland. 3098 * 3099 * Returns 0 in success and fills in @tc with found config 3100 */ 3101 static int 3102 find_table_err(struct namedobj_instance *ni, struct tid_info *ti, 3103 struct table_config **tc) 3104 { 3105 char *name, bname[16]; 3106 struct named_object *no; 3107 ipfw_obj_ntlv *ntlv; 3108 uint32_t set; 3109 3110 if (ti->tlvs != NULL) { 3111 ntlv = ipfw_find_name_tlv_type(ti->tlvs, ti->tlen, ti->uidx, 3112 IPFW_TLV_TBL_NAME); 3113 if (ntlv == NULL) 3114 return (EINVAL); 3115 name = ntlv->name; 3116 3117 /* 3118 * Use set provided by @ti instead of @ntlv one. 3119 * This is needed due to different sets behavior 3120 * controlled by V_fw_tables_sets. 3121 */ 3122 set = (V_fw_tables_sets != 0) ? ti->set : 0; 3123 } else { 3124 snprintf(bname, sizeof(bname), "%d", ti->uidx); 3125 name = bname; 3126 set = 0; 3127 } 3128 3129 no = ipfw_objhash_lookup_name(ni, set, name); 3130 *tc = (struct table_config *)no; 3131 3132 return (0); 3133 } 3134 3135 /* 3136 * Finds table config based on either legacy index 3137 * or name in ntlv. 3138 * Note @ti structure contains unchecked data from userland. 3139 * 3140 * Returns pointer to table_config or NULL. 3141 */ 3142 static struct table_config * 3143 find_table(struct namedobj_instance *ni, struct tid_info *ti) 3144 { 3145 struct table_config *tc; 3146 3147 if (find_table_err(ni, ti, &tc) != 0) 3148 return (NULL); 3149 3150 return (tc); 3151 } 3152 3153 /* 3154 * Allocate new table config structure using 3155 * specified @algo and @aname. 3156 * 3157 * Returns pointer to config or NULL. 3158 */ 3159 static struct table_config * 3160 alloc_table_config(struct ip_fw_chain *ch, struct tid_info *ti, 3161 struct table_algo *ta, char *aname, uint8_t tflags) 3162 { 3163 char *name, bname[16]; 3164 struct table_config *tc; 3165 int error; 3166 ipfw_obj_ntlv *ntlv; 3167 uint32_t set; 3168 3169 if (ti->tlvs != NULL) { 3170 ntlv = ipfw_find_name_tlv_type(ti->tlvs, ti->tlen, ti->uidx, 3171 IPFW_TLV_TBL_NAME); 3172 if (ntlv == NULL) 3173 return (NULL); 3174 name = ntlv->name; 3175 set = (V_fw_tables_sets == 0) ? 0 : ntlv->set; 3176 } else { 3177 /* Compat part: convert number to string representation */ 3178 snprintf(bname, sizeof(bname), "%d", ti->uidx); 3179 name = bname; 3180 set = 0; 3181 } 3182 3183 tc = malloc(sizeof(struct table_config), M_IPFW, M_WAITOK | M_ZERO); 3184 tc->no.name = tc->tablename; 3185 tc->no.subtype = ta->type; 3186 tc->no.set = set; 3187 tc->tflags = tflags; 3188 tc->ta = ta; 3189 strlcpy(tc->tablename, name, sizeof(tc->tablename)); 3190 /* Set "shared" value type by default */ 3191 tc->vshared = 1; 3192 3193 /* Preallocate data structures for new tables */ 3194 error = ta->init(ch, &tc->astate, &tc->ti_copy, aname, tflags); 3195 if (error != 0) { 3196 free(tc, M_IPFW); 3197 return (NULL); 3198 } 3199 3200 return (tc); 3201 } 3202 3203 /* 3204 * Destroys table state and config. 3205 */ 3206 static void 3207 free_table_config(struct namedobj_instance *ni, struct table_config *tc) 3208 { 3209 3210 KASSERT(tc->linked == 0, ("free() on linked config")); 3211 /* UH lock MUST NOT be held */ 3212 3213 /* 3214 * We're using ta without any locking/referencing. 3215 * TODO: fix this if we're going to use unloadable algos. 3216 */ 3217 tc->ta->destroy(tc->astate, &tc->ti_copy); 3218 free(tc, M_IPFW); 3219 } 3220 3221 /* 3222 * Links @tc to @chain table named instance. 3223 * Sets appropriate type/states in @chain table info. 3224 */ 3225 static void 3226 link_table(struct ip_fw_chain *ch, struct table_config *tc) 3227 { 3228 struct namedobj_instance *ni; 3229 struct table_info *ti; 3230 uint16_t kidx; 3231 3232 IPFW_UH_WLOCK_ASSERT(ch); 3233 3234 ni = CHAIN_TO_NI(ch); 3235 kidx = tc->no.kidx; 3236 3237 ipfw_objhash_add(ni, &tc->no); 3238 3239 ti = KIDX_TO_TI(ch, kidx); 3240 *ti = tc->ti_copy; 3241 3242 /* Notify algo on real @ti address */ 3243 if (tc->ta->change_ti != NULL) 3244 tc->ta->change_ti(tc->astate, ti); 3245 3246 tc->linked = 1; 3247 tc->ta->refcnt++; 3248 } 3249 3250 /* 3251 * Unlinks @tc from @chain table named instance. 3252 * Zeroes states in @chain and stores them in @tc. 3253 */ 3254 static void 3255 unlink_table(struct ip_fw_chain *ch, struct table_config *tc) 3256 { 3257 struct namedobj_instance *ni; 3258 struct table_info *ti; 3259 uint16_t kidx; 3260 3261 IPFW_UH_WLOCK_ASSERT(ch); 3262 IPFW_WLOCK_ASSERT(ch); 3263 3264 ni = CHAIN_TO_NI(ch); 3265 kidx = tc->no.kidx; 3266 3267 /* Clear state. @ti copy is already saved inside @tc */ 3268 ipfw_objhash_del(ni, &tc->no); 3269 ti = KIDX_TO_TI(ch, kidx); 3270 memset(ti, 0, sizeof(struct table_info)); 3271 tc->linked = 0; 3272 tc->ta->refcnt--; 3273 3274 /* Notify algo on real @ti address */ 3275 if (tc->ta->change_ti != NULL) 3276 tc->ta->change_ti(tc->astate, NULL); 3277 } 3278 3279 static struct ipfw_sopt_handler scodes[] = { 3280 { IP_FW_TABLE_XCREATE, 0, HDIR_SET, create_table }, 3281 { IP_FW_TABLE_XDESTROY, 0, HDIR_SET, flush_table_v0 }, 3282 { IP_FW_TABLE_XFLUSH, 0, HDIR_SET, flush_table_v0 }, 3283 { IP_FW_TABLE_XMODIFY, 0, HDIR_BOTH, modify_table }, 3284 { IP_FW_TABLE_XINFO, 0, HDIR_GET, describe_table }, 3285 { IP_FW_TABLES_XLIST, 0, HDIR_GET, list_tables }, 3286 { IP_FW_TABLE_XLIST, 0, HDIR_GET, dump_table_v0 }, 3287 { IP_FW_TABLE_XLIST, 1, HDIR_GET, dump_table_v1 }, 3288 { IP_FW_TABLE_XADD, 0, HDIR_BOTH, manage_table_ent_v0 }, 3289 { IP_FW_TABLE_XADD, 1, HDIR_BOTH, manage_table_ent_v1 }, 3290 { IP_FW_TABLE_XDEL, 0, HDIR_BOTH, manage_table_ent_v0 }, 3291 { IP_FW_TABLE_XDEL, 1, HDIR_BOTH, manage_table_ent_v1 }, 3292 { IP_FW_TABLE_XFIND, 0, HDIR_GET, find_table_entry }, 3293 { IP_FW_TABLE_XSWAP, 0, HDIR_SET, swap_table }, 3294 { IP_FW_TABLES_ALIST, 0, HDIR_GET, list_table_algo }, 3295 { IP_FW_TABLE_XGETSIZE, 0, HDIR_GET, get_table_size }, 3296 }; 3297 3298 static int 3299 destroy_table_locked(struct namedobj_instance *ni, struct named_object *no, 3300 void *arg) 3301 { 3302 3303 unlink_table((struct ip_fw_chain *)arg, (struct table_config *)no); 3304 if (ipfw_objhash_free_idx(ni, no->kidx) != 0) 3305 printf("Error unlinking kidx %d from table %s\n", 3306 no->kidx, no->name); 3307 free_table_config(ni, (struct table_config *)no); 3308 return (0); 3309 } 3310 3311 /* 3312 * Shuts tables module down. 3313 */ 3314 void 3315 ipfw_destroy_tables(struct ip_fw_chain *ch, int last) 3316 { 3317 3318 IPFW_DEL_SOPT_HANDLER(last, scodes); 3319 IPFW_DEL_OBJ_REWRITER(last, opcodes); 3320 3321 /* Remove all tables from working set */ 3322 IPFW_UH_WLOCK(ch); 3323 IPFW_WLOCK(ch); 3324 ipfw_objhash_foreach(CHAIN_TO_NI(ch), destroy_table_locked, ch); 3325 IPFW_WUNLOCK(ch); 3326 IPFW_UH_WUNLOCK(ch); 3327 3328 /* Free pointers itself */ 3329 free(ch->tablestate, M_IPFW); 3330 3331 ipfw_table_value_destroy(ch, last); 3332 ipfw_table_algo_destroy(ch); 3333 3334 ipfw_objhash_destroy(CHAIN_TO_NI(ch)); 3335 free(CHAIN_TO_TCFG(ch), M_IPFW); 3336 } 3337 3338 /* 3339 * Starts tables module. 3340 */ 3341 int 3342 ipfw_init_tables(struct ip_fw_chain *ch, int first) 3343 { 3344 struct tables_config *tcfg; 3345 3346 /* Allocate pointers */ 3347 ch->tablestate = malloc(V_fw_tables_max * sizeof(struct table_info), 3348 M_IPFW, M_WAITOK | M_ZERO); 3349 3350 tcfg = malloc(sizeof(struct tables_config), M_IPFW, M_WAITOK | M_ZERO); 3351 tcfg->namehash = ipfw_objhash_create(V_fw_tables_max); 3352 ch->tblcfg = tcfg; 3353 3354 ipfw_table_value_init(ch, first); 3355 ipfw_table_algo_init(ch); 3356 3357 IPFW_ADD_OBJ_REWRITER(first, opcodes); 3358 IPFW_ADD_SOPT_HANDLER(first, scodes); 3359 return (0); 3360 } 3361