xref: /freebsd/sys/netpfil/ipfw/ip_fw_table.c (revision 7d8f797b725e3efc0a4256554654780df83c456c)
1 /*-
2  * Copyright (c) 2004 Ruslan Ermilov and Vsevolod Lobko.
3  * Copyright (c) 2014 Yandex LLC
4  * Copyright (c) 2014 Alexander V. Chernikov
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  */
27 
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
30 
31 /*
32  * Lookup table support for ipfw.
33  *
34  * This file contains handlers for all generic tables' operations:
35  * add/del/flush entries, list/dump tables etc..
36  *
37  * Table data modification is protected by both UH and runtime lock
38  * while reading configuration/data is protected by UH lock.
39  *
40  * Lookup algorithms for all table types are located in ip_fw_table_algo.c
41  */
42 
43 #include "opt_ipfw.h"
44 
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/malloc.h>
48 #include <sys/kernel.h>
49 #include <sys/lock.h>
50 #include <sys/rwlock.h>
51 #include <sys/rmlock.h>
52 #include <sys/socket.h>
53 #include <sys/socketvar.h>
54 #include <sys/queue.h>
55 #include <net/if.h>	/* ip_fw.h requires IFNAMSIZ */
56 
57 #include <netinet/in.h>
58 #include <netinet/ip_var.h>	/* struct ipfw_rule_ref */
59 #include <netinet/ip_fw.h>
60 
61 #include <netpfil/ipfw/ip_fw_private.h>
62 #include <netpfil/ipfw/ip_fw_table.h>
63 
64  /*
65  * Table has the following `type` concepts:
66  *
67  * `no.type` represents lookup key type (addr, ifp, uid, etc..)
68  * vmask represents bitmask of table values which are present at the moment.
69  * Special IPFW_VTYPE_LEGACY ( (uint32_t)-1 ) represents old
70  * single-value-for-all approach.
71  */
72 struct table_config {
73 	struct named_object	no;
74 	uint8_t		tflags;		/* type flags */
75 	uint8_t		locked;		/* 1 if locked from changes */
76 	uint8_t		linked;		/* 1 if already linked */
77 	uint8_t		ochanged;	/* used by set swapping */
78 	uint8_t		vshared;	/* 1 if using shared value array */
79 	uint8_t		spare[3];
80 	uint32_t	count;		/* Number of records */
81 	uint32_t	limit;		/* Max number of records */
82 	uint32_t	vmask;		/* bitmask with supported values */
83 	uint32_t	ocount;		/* used by set swapping */
84 	uint64_t	gencnt;		/* generation count */
85 	char		tablename[64];	/* table name */
86 	struct table_algo	*ta;	/* Callbacks for given algo */
87 	void		*astate;	/* algorithm state */
88 	struct table_info	ti_copy;	/* data to put to table_info */
89 	struct namedobj_instance	*vi;
90 };
91 
92 static int find_table_err(struct namedobj_instance *ni, struct tid_info *ti,
93     struct table_config **tc);
94 static struct table_config *find_table(struct namedobj_instance *ni,
95     struct tid_info *ti);
96 static struct table_config *alloc_table_config(struct ip_fw_chain *ch,
97     struct tid_info *ti, struct table_algo *ta, char *adata, uint8_t tflags);
98 static void free_table_config(struct namedobj_instance *ni,
99     struct table_config *tc);
100 static int create_table_internal(struct ip_fw_chain *ch, struct tid_info *ti,
101     char *aname, ipfw_xtable_info *i, uint16_t *pkidx, int ref);
102 static void link_table(struct ip_fw_chain *ch, struct table_config *tc);
103 static void unlink_table(struct ip_fw_chain *ch, struct table_config *tc);
104 static int find_ref_table(struct ip_fw_chain *ch, struct tid_info *ti,
105     struct tentry_info *tei, uint32_t count, int op, struct table_config **ptc);
106 #define	OP_ADD	1
107 #define	OP_DEL	0
108 static int export_tables(struct ip_fw_chain *ch, ipfw_obj_lheader *olh,
109     struct sockopt_data *sd);
110 static void export_table_info(struct ip_fw_chain *ch, struct table_config *tc,
111     ipfw_xtable_info *i);
112 static int dump_table_tentry(void *e, void *arg);
113 static int dump_table_xentry(void *e, void *arg);
114 
115 static int swap_tables(struct ip_fw_chain *ch, struct tid_info *a,
116     struct tid_info *b);
117 
118 static int check_table_space(struct ip_fw_chain *ch, struct tableop_state *ts,
119     struct table_config *tc, struct table_info *ti, uint32_t count);
120 static int destroy_table(struct ip_fw_chain *ch, struct tid_info *ti);
121 
122 static struct table_algo *find_table_algo(struct tables_config *tableconf,
123     struct tid_info *ti, char *name);
124 
125 static void objheader_to_ti(struct _ipfw_obj_header *oh, struct tid_info *ti);
126 static void ntlv_to_ti(struct _ipfw_obj_ntlv *ntlv, struct tid_info *ti);
127 
128 #define	CHAIN_TO_NI(chain)	(CHAIN_TO_TCFG(chain)->namehash)
129 #define	KIDX_TO_TI(ch, k)	(&(((struct table_info *)(ch)->tablestate)[k]))
130 
131 #define	TA_BUF_SZ	128	/* On-stack buffer for add/delete state */
132 
133 void
134 rollback_toperation_state(struct ip_fw_chain *ch, void *object)
135 {
136 	struct tables_config *tcfg;
137 	struct op_state *os;
138 
139 	tcfg = CHAIN_TO_TCFG(ch);
140 	TAILQ_FOREACH(os, &tcfg->state_list, next)
141 		os->func(object, os);
142 }
143 
144 void
145 add_toperation_state(struct ip_fw_chain *ch, struct tableop_state *ts)
146 {
147 	struct tables_config *tcfg;
148 
149 	tcfg = CHAIN_TO_TCFG(ch);
150 	TAILQ_INSERT_HEAD(&tcfg->state_list, &ts->opstate, next);
151 }
152 
153 void
154 del_toperation_state(struct ip_fw_chain *ch, struct tableop_state *ts)
155 {
156 	struct tables_config *tcfg;
157 
158 	tcfg = CHAIN_TO_TCFG(ch);
159 	TAILQ_REMOVE(&tcfg->state_list, &ts->opstate, next);
160 }
161 
162 void
163 tc_ref(struct table_config *tc)
164 {
165 
166 	tc->no.refcnt++;
167 }
168 
169 void
170 tc_unref(struct table_config *tc)
171 {
172 
173 	tc->no.refcnt--;
174 }
175 
176 static struct table_value *
177 get_table_value(struct ip_fw_chain *ch, struct table_config *tc, uint32_t kidx)
178 {
179 	struct table_value *pval;
180 
181 	pval = (struct table_value *)ch->valuestate;
182 
183 	return (&pval[kidx]);
184 }
185 
186 
187 /*
188  * Checks if we're able to insert/update entry @tei into table
189  * w.r.t @tc limits.
190  * May alter @tei to indicate insertion error / insert
191  * options.
192  *
193  * Returns 0 if operation can be performed/
194  */
195 static int
196 check_table_limit(struct table_config *tc, struct tentry_info *tei)
197 {
198 
199 	if (tc->limit == 0 || tc->count < tc->limit)
200 		return (0);
201 
202 	if ((tei->flags & TEI_FLAGS_UPDATE) == 0) {
203 		/* Notify userland on error cause */
204 		tei->flags |= TEI_FLAGS_LIMIT;
205 		return (EFBIG);
206 	}
207 
208 	/*
209 	 * We have UPDATE flag set.
210 	 * Permit updating record (if found),
211 	 * but restrict adding new one since we've
212 	 * already hit the limit.
213 	 */
214 	tei->flags |= TEI_FLAGS_DONTADD;
215 
216 	return (0);
217 }
218 
219 /*
220  * Convert algorithm callback return code into
221  * one of pre-defined states known by userland.
222  */
223 static void
224 store_tei_result(struct tentry_info *tei, int op, int error, uint32_t num)
225 {
226 	int flag;
227 
228 	flag = 0;
229 
230 	switch (error) {
231 	case 0:
232 		if (op == OP_ADD && num != 0)
233 			flag = TEI_FLAGS_ADDED;
234 		if (op == OP_DEL)
235 			flag = TEI_FLAGS_DELETED;
236 		break;
237 	case ENOENT:
238 		flag = TEI_FLAGS_NOTFOUND;
239 		break;
240 	case EEXIST:
241 		flag = TEI_FLAGS_EXISTS;
242 		break;
243 	default:
244 		flag = TEI_FLAGS_ERROR;
245 	}
246 
247 	tei->flags |= flag;
248 }
249 
250 /*
251  * Creates and references table with default parameters.
252  * Saves table config, algo and allocated kidx info @ptc, @pta and
253  * @pkidx if non-zero.
254  * Used for table auto-creation to support old binaries.
255  *
256  * Returns 0 on success.
257  */
258 static int
259 create_table_compat(struct ip_fw_chain *ch, struct tid_info *ti,
260     uint16_t *pkidx)
261 {
262 	ipfw_xtable_info xi;
263 	int error;
264 
265 	memset(&xi, 0, sizeof(xi));
266 	/* Set default value mask for legacy clients */
267 	xi.vmask = IPFW_VTYPE_LEGACY;
268 
269 	error = create_table_internal(ch, ti, NULL, &xi, pkidx, 1);
270 	if (error != 0)
271 		return (error);
272 
273 	return (0);
274 }
275 
276 /*
277  * Find and reference existing table optionally
278  * creating new one.
279  *
280  * Saves found table config into @ptc.
281  * Note function may drop/acquire UH_WLOCK.
282  * Returns 0 if table was found/created and referenced
283  * or non-zero return code.
284  */
285 static int
286 find_ref_table(struct ip_fw_chain *ch, struct tid_info *ti,
287     struct tentry_info *tei, uint32_t count, int op,
288     struct table_config **ptc)
289 {
290 	struct namedobj_instance *ni;
291 	struct table_config *tc;
292 	uint16_t kidx;
293 	int error;
294 
295 	IPFW_UH_WLOCK_ASSERT(ch);
296 
297 	ni = CHAIN_TO_NI(ch);
298 	tc = NULL;
299 	if ((tc = find_table(ni, ti)) != NULL) {
300 		/* check table type */
301 		if (tc->no.subtype != ti->type)
302 			return (EINVAL);
303 
304 		if (tc->locked != 0)
305 			return (EACCES);
306 
307 		/* Try to exit early on limit hit */
308 		if (op == OP_ADD && count == 1 &&
309 		    check_table_limit(tc, tei) != 0)
310 			return (EFBIG);
311 
312 		/* Reference and return */
313 		tc->no.refcnt++;
314 		*ptc = tc;
315 		return (0);
316 	}
317 
318 	if (op == OP_DEL)
319 		return (ESRCH);
320 
321 	/* Compability mode: create new table for old clients */
322 	if ((tei->flags & TEI_FLAGS_COMPAT) == 0)
323 		return (ESRCH);
324 
325 	IPFW_UH_WUNLOCK(ch);
326 	error = create_table_compat(ch, ti, &kidx);
327 	IPFW_UH_WLOCK(ch);
328 
329 	if (error != 0)
330 		return (error);
331 
332 	tc = (struct table_config *)ipfw_objhash_lookup_kidx(ni, kidx);
333 	KASSERT(tc != NULL, ("create_table_compat returned bad idx %d", kidx));
334 
335 	/* OK, now we've got referenced table. */
336 	*ptc = tc;
337 	return (0);
338 }
339 
340 /*
341  * Rolls back already @added to @tc entries using state array @ta_buf_m.
342  * Assume the following layout:
343  * 1) ADD state (ta_buf_m[0] ... t_buf_m[added - 1]) for handling update cases
344  * 2) DEL state (ta_buf_m[count[ ... t_buf_m[count + added - 1])
345  *   for storing deleted state
346  */
347 static void
348 rollback_added_entries(struct ip_fw_chain *ch, struct table_config *tc,
349     struct table_info *tinfo, struct tentry_info *tei, caddr_t ta_buf_m,
350     uint32_t count, uint32_t added)
351 {
352 	struct table_algo *ta;
353 	struct tentry_info *ptei;
354 	caddr_t v, vv;
355 	size_t ta_buf_sz;
356 	int error, i;
357 	uint32_t num;
358 
359 	IPFW_UH_WLOCK_ASSERT(ch);
360 
361 	ta = tc->ta;
362 	ta_buf_sz = ta->ta_buf_size;
363 	v = ta_buf_m;
364 	vv = v + count * ta_buf_sz;
365 	for (i = 0; i < added; i++, v += ta_buf_sz, vv += ta_buf_sz) {
366 		ptei = &tei[i];
367 		if ((ptei->flags & TEI_FLAGS_UPDATED) != 0) {
368 
369 			/*
370 			 * We have old value stored by previous
371 			 * call in @ptei->value. Do add once again
372 			 * to restore it.
373 			 */
374 			error = ta->add(tc->astate, tinfo, ptei, v, &num);
375 			KASSERT(error == 0, ("rollback UPDATE fail"));
376 			KASSERT(num == 0, ("rollback UPDATE fail2"));
377 			continue;
378 		}
379 
380 		error = ta->prepare_del(ch, ptei, vv);
381 		KASSERT(error == 0, ("pre-rollback INSERT failed"));
382 		error = ta->del(tc->astate, tinfo, ptei, vv, &num);
383 		KASSERT(error == 0, ("rollback INSERT failed"));
384 		tc->count -= num;
385 	}
386 }
387 
388 /*
389  * Prepares add/del state for all @count entries in @tei.
390  * Uses either stack buffer (@ta_buf) or allocates a new one.
391  * Stores pointer to allocated buffer back to @ta_buf.
392  *
393  * Returns 0 on success.
394  */
395 static int
396 prepare_batch_buffer(struct ip_fw_chain *ch, struct table_algo *ta,
397     struct tentry_info *tei, uint32_t count, int op, caddr_t *ta_buf)
398 {
399 	caddr_t ta_buf_m, v;
400 	size_t ta_buf_sz, sz;
401 	struct tentry_info *ptei;
402 	int error, i;
403 
404 	error = 0;
405 	ta_buf_sz = ta->ta_buf_size;
406 	if (count == 1) {
407 		/* Sigle add/delete, use on-stack buffer */
408 		memset(*ta_buf, 0, TA_BUF_SZ);
409 		ta_buf_m = *ta_buf;
410 	} else {
411 
412 		/*
413 		 * Multiple adds/deletes, allocate larger buffer
414 		 *
415 		 * Note we need 2xcount buffer for add case:
416 		 * we have hold both ADD state
417 		 * and DELETE state (this may be needed
418 		 * if we need to rollback all changes)
419 		 */
420 		sz = count * ta_buf_sz;
421 		ta_buf_m = malloc((op == OP_ADD) ? sz * 2 : sz, M_TEMP,
422 		    M_WAITOK | M_ZERO);
423 	}
424 
425 	v = ta_buf_m;
426 	for (i = 0; i < count; i++, v += ta_buf_sz) {
427 		ptei = &tei[i];
428 		error = (op == OP_ADD) ?
429 		    ta->prepare_add(ch, ptei, v) : ta->prepare_del(ch, ptei, v);
430 
431 		/*
432 		 * Some syntax error (incorrect mask, or address, or
433 		 * anything). Return error regardless of atomicity
434 		 * settings.
435 		 */
436 		if (error != 0)
437 			break;
438 	}
439 
440 	*ta_buf = ta_buf_m;
441 	return (error);
442 }
443 
444 /*
445  * Flushes allocated state for each @count entries in @tei.
446  * Frees @ta_buf_m if differs from stack buffer @ta_buf.
447  */
448 static void
449 flush_batch_buffer(struct ip_fw_chain *ch, struct table_algo *ta,
450     struct tentry_info *tei, uint32_t count, int rollback,
451     caddr_t ta_buf_m, caddr_t ta_buf)
452 {
453 	caddr_t v;
454 	struct tentry_info *ptei;
455 	size_t ta_buf_sz;
456 	int i;
457 
458 	ta_buf_sz = ta->ta_buf_size;
459 
460 	/* Run cleaning callback anyway */
461 	v = ta_buf_m;
462 	for (i = 0; i < count; i++, v += ta_buf_sz) {
463 		ptei = &tei[i];
464 		ta->flush_entry(ch, ptei, v);
465 		if (ptei->ptv != NULL) {
466 			free(ptei->ptv, M_IPFW);
467 			ptei->ptv = NULL;
468 		}
469 	}
470 
471 	/* Clean up "deleted" state in case of rollback */
472 	if (rollback != 0) {
473 		v = ta_buf_m + count * ta_buf_sz;
474 		for (i = 0; i < count; i++, v += ta_buf_sz)
475 			ta->flush_entry(ch, &tei[i], v);
476 	}
477 
478 	if (ta_buf_m != ta_buf)
479 		free(ta_buf_m, M_TEMP);
480 }
481 
482 
483 static void
484 rollback_add_entry(void *object, struct op_state *_state)
485 {
486 	struct ip_fw_chain *ch;
487 	struct tableop_state *ts;
488 
489 	ts = (struct tableop_state *)_state;
490 
491 	if (ts->tc != object && ts->ch != object)
492 		return;
493 
494 	ch = ts->ch;
495 
496 	IPFW_UH_WLOCK_ASSERT(ch);
497 
498 	/* Call specifid unlockers */
499 	rollback_table_values(ts);
500 
501 	/* Indicate we've called */
502 	ts->modified = 1;
503 }
504 
505 /*
506  * Adds/updates one or more entries in table @ti.
507  *
508  * Function may drop/reacquire UH wlock multiple times due to
509  * items alloc, algorithm callbacks (check_space), value linkage
510  * (new values, value storage realloc), etc..
511  * Other processes like other adds (which may involve storage resize),
512  * table swaps (which changes table data and may change algo type),
513  * table modify (which may change value mask) may be executed
514  * simultaneously so we need to deal with it.
515  *
516  * The following approach was implemented:
517  * we have per-chain linked list, protected with UH lock.
518  * add_table_entry prepares special on-stack structure wthich is passed
519  * to its descendants. Users add this structure to this list before unlock.
520  * After performing needed operations and acquiring UH lock back, each user
521  * checks if structure has changed. If true, it rolls local state back and
522  * returns without error to the caller.
523  * add_table_entry() on its own checks if structure has changed and restarts
524  * its operation from the beginning (goto restart).
525  *
526  * Functions which are modifying fields of interest (currently
527  *   resize_shared_value_storage() and swap_tables() )
528  * traverses given list while holding UH lock immediately before
529  * performing their operations calling function provided be list entry
530  * ( currently rollback_add_entry  ) which performs rollback for all necessary
531  * state and sets appropriate values in structure indicating rollback
532  * has happened.
533  *
534  * Algo interaction:
535  * Function references @ti first to ensure table won't
536  * disappear or change its type.
537  * After that, prepare_add callback is called for each @tei entry.
538  * Next, we try to add each entry under UH+WHLOCK
539  * using add() callback.
540  * Finally, we free all state by calling flush_entry callback
541  * for each @tei.
542  *
543  * Returns 0 on success.
544  */
545 int
546 add_table_entry(struct ip_fw_chain *ch, struct tid_info *ti,
547     struct tentry_info *tei, uint8_t flags, uint32_t count)
548 {
549 	struct table_config *tc;
550 	struct table_algo *ta;
551 	uint16_t kidx;
552 	int error, first_error, i, rollback;
553 	uint32_t num, numadd;
554 	struct tentry_info *ptei;
555 	struct tableop_state ts;
556 	char ta_buf[TA_BUF_SZ];
557 	caddr_t ta_buf_m, v;
558 
559 	memset(&ts, 0, sizeof(ts));
560 	ta = NULL;
561 	IPFW_UH_WLOCK(ch);
562 
563 	/*
564 	 * Find and reference existing table.
565 	 */
566 restart:
567 	if (ts.modified != 0) {
568 		IPFW_UH_WUNLOCK(ch);
569 		flush_batch_buffer(ch, ta, tei, count, rollback,
570 		    ta_buf_m, ta_buf);
571 		memset(&ts, 0, sizeof(ts));
572 		ta = NULL;
573 		IPFW_UH_WLOCK(ch);
574 	}
575 
576 	error = find_ref_table(ch, ti, tei, count, OP_ADD, &tc);
577 	if (error != 0) {
578 		IPFW_UH_WUNLOCK(ch);
579 		return (error);
580 	}
581 	ta = tc->ta;
582 
583 	/* Fill in tablestate */
584 	ts.ch = ch;
585 	ts.opstate.func = rollback_add_entry;
586 	ts.tc = tc;
587 	ts.vshared = tc->vshared;
588 	ts.vmask = tc->vmask;
589 	ts.ta = ta;
590 	ts.tei = tei;
591 	ts.count = count;
592 	rollback = 0;
593 	add_toperation_state(ch, &ts);
594 	IPFW_UH_WUNLOCK(ch);
595 
596 	/* Allocate memory and prepare record(s) */
597 	/* Pass stack buffer by default */
598 	ta_buf_m = ta_buf;
599 	error = prepare_batch_buffer(ch, ta, tei, count, OP_ADD, &ta_buf_m);
600 
601 	IPFW_UH_WLOCK(ch);
602 	del_toperation_state(ch, &ts);
603 	/* Drop reference we've used in first search */
604 	tc->no.refcnt--;
605 
606 	/* Check prepare_batch_buffer() error */
607 	if (error != 0)
608 		goto cleanup;
609 
610 	/*
611 	 * Check if table swap has happened.
612 	 * (so table algo might be changed).
613 	 * Restart operation to achieve consistent behavior.
614 	 */
615 	if (ts.modified != 0)
616 		goto restart;
617 
618 	/*
619 	 * Link all values values to shared/per-table value array.
620 	 *
621 	 * May release/reacquire UH_WLOCK.
622 	 */
623 	error = ipfw_link_table_values(ch, &ts);
624 	if (error != 0)
625 		goto cleanup;
626 	if (ts.modified != 0)
627 		goto restart;
628 
629 	/*
630 	 * Ensure we are able to add all entries without additional
631 	 * memory allocations. May release/reacquire UH_WLOCK.
632 	 */
633 	kidx = tc->no.kidx;
634 	error = check_table_space(ch, &ts, tc, KIDX_TO_TI(ch, kidx), count);
635 	if (error != 0)
636 		goto cleanup;
637 	if (ts.modified != 0)
638 		goto restart;
639 
640 	/* We've got valid table in @tc. Let's try to add data */
641 	kidx = tc->no.kidx;
642 	ta = tc->ta;
643 	numadd = 0;
644 	first_error = 0;
645 
646 	IPFW_WLOCK(ch);
647 
648 	v = ta_buf_m;
649 	for (i = 0; i < count; i++, v += ta->ta_buf_size) {
650 		ptei = &tei[i];
651 		num = 0;
652 		/* check limit before adding */
653 		if ((error = check_table_limit(tc, ptei)) == 0) {
654 			error = ta->add(tc->astate, KIDX_TO_TI(ch, kidx),
655 			    ptei, v, &num);
656 			/* Set status flag to inform userland */
657 			store_tei_result(ptei, OP_ADD, error, num);
658 		}
659 		if (error == 0) {
660 			/* Update number of records to ease limit checking */
661 			tc->count += num;
662 			numadd += num;
663 			continue;
664 		}
665 
666 		if (first_error == 0)
667 			first_error = error;
668 
669 		/*
670 		 * Some error have happened. Check our atomicity
671 		 * settings: continue if atomicity is not required,
672 		 * rollback changes otherwise.
673 		 */
674 		if ((flags & IPFW_CTF_ATOMIC) == 0)
675 			continue;
676 
677 		rollback_added_entries(ch, tc, KIDX_TO_TI(ch, kidx),
678 		    tei, ta_buf_m, count, i);
679 
680 		rollback = 1;
681 		break;
682 	}
683 
684 	IPFW_WUNLOCK(ch);
685 
686 	ipfw_garbage_table_values(ch, tc, tei, count, rollback);
687 
688 	/* Permit post-add algorithm grow/rehash. */
689 	if (numadd != 0)
690 		check_table_space(ch, NULL, tc, KIDX_TO_TI(ch, kidx), 0);
691 
692 	/* Return first error to user, if any */
693 	error = first_error;
694 
695 cleanup:
696 	IPFW_UH_WUNLOCK(ch);
697 
698 	flush_batch_buffer(ch, ta, tei, count, rollback, ta_buf_m, ta_buf);
699 
700 	return (error);
701 }
702 
703 /*
704  * Deletes one or more entries in table @ti.
705  *
706  * Returns 0 on success.
707  */
708 int
709 del_table_entry(struct ip_fw_chain *ch, struct tid_info *ti,
710     struct tentry_info *tei, uint8_t flags, uint32_t count)
711 {
712 	struct table_config *tc;
713 	struct table_algo *ta;
714 	struct tentry_info *ptei;
715 	uint16_t kidx;
716 	int error, first_error, i;
717 	uint32_t num, numdel;
718 	char ta_buf[TA_BUF_SZ];
719 	caddr_t ta_buf_m, v;
720 
721 	/*
722 	 * Find and reference existing table.
723 	 */
724 	IPFW_UH_WLOCK(ch);
725 	error = find_ref_table(ch, ti, tei, count, OP_DEL, &tc);
726 	if (error != 0) {
727 		IPFW_UH_WUNLOCK(ch);
728 		return (error);
729 	}
730 	ta = tc->ta;
731 	IPFW_UH_WUNLOCK(ch);
732 
733 	/* Allocate memory and prepare record(s) */
734 	/* Pass stack buffer by default */
735 	ta_buf_m = ta_buf;
736 	error = prepare_batch_buffer(ch, ta, tei, count, OP_DEL, &ta_buf_m);
737 	if (error != 0)
738 		goto cleanup;
739 
740 	IPFW_UH_WLOCK(ch);
741 
742 	/* Drop reference we've used in first search */
743 	tc->no.refcnt--;
744 
745 	/*
746 	 * Check if table algo is still the same.
747 	 * (changed ta may be the result of table swap).
748 	 */
749 	if (ta != tc->ta) {
750 		IPFW_UH_WUNLOCK(ch);
751 		error = EINVAL;
752 		goto cleanup;
753 	}
754 
755 	kidx = tc->no.kidx;
756 	numdel = 0;
757 	first_error = 0;
758 
759 	IPFW_WLOCK(ch);
760 	v = ta_buf_m;
761 	for (i = 0; i < count; i++, v += ta->ta_buf_size) {
762 		ptei = &tei[i];
763 		num = 0;
764 		error = ta->del(tc->astate, KIDX_TO_TI(ch, kidx), ptei, v,
765 		    &num);
766 		/* Save state for userland */
767 		store_tei_result(ptei, OP_DEL, error, num);
768 		if (error != 0 && first_error == 0)
769 			first_error = error;
770 		tc->count -= num;
771 		numdel += num;
772 	}
773 	IPFW_WUNLOCK(ch);
774 
775 	/* Unlink non-used values */
776 	ipfw_garbage_table_values(ch, tc, tei, count, 0);
777 
778 	if (numdel != 0) {
779 		/* Run post-del hook to permit shrinking */
780 		check_table_space(ch, NULL, tc, KIDX_TO_TI(ch, kidx), 0);
781 	}
782 
783 	IPFW_UH_WUNLOCK(ch);
784 
785 	/* Return first error to user, if any */
786 	error = first_error;
787 
788 cleanup:
789 	flush_batch_buffer(ch, ta, tei, count, 0, ta_buf_m, ta_buf);
790 
791 	return (error);
792 }
793 
794 /*
795  * Ensure that table @tc has enough space to add @count entries without
796  * need for reallocation.
797  *
798  * Callbacks order:
799  * 0) need_modify() (UH_WLOCK) - checks if @count items can be added w/o resize.
800  *
801  * 1) alloc_modify (no locks, M_WAITOK) - alloc new state based on @pflags.
802  * 2) prepare_modifyt (UH_WLOCK) - copy old data into new storage
803  * 3) modify (UH_WLOCK + WLOCK) - switch pointers
804  * 4) flush_modify (UH_WLOCK) - free state, if needed
805  *
806  * Returns 0 on success.
807  */
808 static int
809 check_table_space(struct ip_fw_chain *ch, struct tableop_state *ts,
810     struct table_config *tc, struct table_info *ti, uint32_t count)
811 {
812 	struct table_algo *ta;
813 	uint64_t pflags;
814 	char ta_buf[TA_BUF_SZ];
815 	int error;
816 
817 	IPFW_UH_WLOCK_ASSERT(ch);
818 
819 	error = 0;
820 	ta = tc->ta;
821 	if (ta->need_modify == NULL)
822 		return (0);
823 
824 	/* Acquire reference not to loose @tc between locks/unlocks */
825 	tc->no.refcnt++;
826 
827 	/*
828 	 * TODO: think about avoiding race between large add/large delete
829 	 * operation on algorithm which implements shrinking along with
830 	 * growing.
831 	 */
832 	while (true) {
833 		pflags = 0;
834 		if (ta->need_modify(tc->astate, ti, count, &pflags) == 0) {
835 			error = 0;
836 			break;
837 		}
838 
839 		/* We have to shrink/grow table */
840 		if (ts != NULL)
841 			add_toperation_state(ch, ts);
842 		IPFW_UH_WUNLOCK(ch);
843 
844 		memset(&ta_buf, 0, sizeof(ta_buf));
845 		error = ta->prepare_mod(ta_buf, &pflags);
846 
847 		IPFW_UH_WLOCK(ch);
848 		if (ts != NULL)
849 			del_toperation_state(ch, ts);
850 
851 		if (error != 0)
852 			break;
853 
854 		if (ts != NULL && ts->modified != 0) {
855 
856 			/*
857 			 * Swap operation has happened
858 			 * so we're currently operating on other
859 			 * table data. Stop doing this.
860 			 */
861 			ta->flush_mod(ta_buf);
862 			break;
863 		}
864 
865 		/* Check if we still need to alter table */
866 		ti = KIDX_TO_TI(ch, tc->no.kidx);
867 		if (ta->need_modify(tc->astate, ti, count, &pflags) == 0) {
868 			IPFW_UH_WUNLOCK(ch);
869 
870 			/*
871 			 * Other thread has already performed resize.
872 			 * Flush our state and return.
873 			 */
874 			ta->flush_mod(ta_buf);
875 			break;
876 		}
877 
878 		error = ta->fill_mod(tc->astate, ti, ta_buf, &pflags);
879 		if (error == 0) {
880 			/* Do actual modification */
881 			IPFW_WLOCK(ch);
882 			ta->modify(tc->astate, ti, ta_buf, pflags);
883 			IPFW_WUNLOCK(ch);
884 		}
885 
886 		/* Anyway, flush data and retry */
887 		ta->flush_mod(ta_buf);
888 	}
889 
890 	tc->no.refcnt--;
891 	return (error);
892 }
893 
894 /*
895  * Adds or deletes record in table.
896  * Data layout (v0):
897  * Request: [ ip_fw3_opheader ipfw_table_xentry ]
898  *
899  * Returns 0 on success
900  */
901 static int
902 manage_table_ent_v0(struct ip_fw_chain *ch, ip_fw3_opheader *op3,
903     struct sockopt_data *sd)
904 {
905 	ipfw_table_xentry *xent;
906 	struct tentry_info tei;
907 	struct tid_info ti;
908 	struct table_value v;
909 	int error, hdrlen, read;
910 
911 	hdrlen = offsetof(ipfw_table_xentry, k);
912 
913 	/* Check minimum header size */
914 	if (sd->valsize < (sizeof(*op3) + hdrlen))
915 		return (EINVAL);
916 
917 	read = sizeof(ip_fw3_opheader);
918 
919 	/* Check if xentry len field is valid */
920 	xent = (ipfw_table_xentry *)(op3 + 1);
921 	if (xent->len < hdrlen || xent->len + read > sd->valsize)
922 		return (EINVAL);
923 
924 	memset(&tei, 0, sizeof(tei));
925 	tei.paddr = &xent->k;
926 	tei.masklen = xent->masklen;
927 	ipfw_import_table_value_legacy(xent->value, &v);
928 	tei.pvalue = &v;
929 	/* Old requests compability */
930 	tei.flags = TEI_FLAGS_COMPAT;
931 	if (xent->type == IPFW_TABLE_ADDR) {
932 		if (xent->len - hdrlen == sizeof(in_addr_t))
933 			tei.subtype = AF_INET;
934 		else
935 			tei.subtype = AF_INET6;
936 	}
937 
938 	memset(&ti, 0, sizeof(ti));
939 	ti.uidx = xent->tbl;
940 	ti.type = xent->type;
941 
942 	error = (op3->opcode == IP_FW_TABLE_XADD) ?
943 	    add_table_entry(ch, &ti, &tei, 0, 1) :
944 	    del_table_entry(ch, &ti, &tei, 0, 1);
945 
946 	return (error);
947 }
948 
949 /*
950  * Adds or deletes record in table.
951  * Data layout (v1)(current):
952  * Request: [ ipfw_obj_header
953  *   ipfw_obj_ctlv(IPFW_TLV_TBLENT_LIST) [ ipfw_obj_tentry x N ]
954  * ]
955  *
956  * Returns 0 on success
957  */
958 static int
959 manage_table_ent_v1(struct ip_fw_chain *ch, ip_fw3_opheader *op3,
960     struct sockopt_data *sd)
961 {
962 	ipfw_obj_tentry *tent, *ptent;
963 	ipfw_obj_ctlv *ctlv;
964 	ipfw_obj_header *oh;
965 	struct tentry_info *ptei, tei, *tei_buf;
966 	struct tid_info ti;
967 	int error, i, kidx, read;
968 
969 	/* Check minimum header size */
970 	if (sd->valsize < (sizeof(*oh) + sizeof(*ctlv)))
971 		return (EINVAL);
972 
973 	/* Check if passed data is too long */
974 	if (sd->valsize != sd->kavail)
975 		return (EINVAL);
976 
977 	oh = (ipfw_obj_header *)sd->kbuf;
978 
979 	/* Basic length checks for TLVs */
980 	if (oh->ntlv.head.length != sizeof(oh->ntlv))
981 		return (EINVAL);
982 
983 	read = sizeof(*oh);
984 
985 	ctlv = (ipfw_obj_ctlv *)(oh + 1);
986 	if (ctlv->head.length + read != sd->valsize)
987 		return (EINVAL);
988 
989 	read += sizeof(*ctlv);
990 	tent = (ipfw_obj_tentry *)(ctlv + 1);
991 	if (ctlv->count * sizeof(*tent) + read != sd->valsize)
992 		return (EINVAL);
993 
994 	if (ctlv->count == 0)
995 		return (0);
996 
997 	/*
998 	 * Mark entire buffer as "read".
999 	 * This instructs sopt api write it back
1000 	 * after function return.
1001 	 */
1002 	ipfw_get_sopt_header(sd, sd->valsize);
1003 
1004 	/* Perform basic checks for each entry */
1005 	ptent = tent;
1006 	kidx = tent->idx;
1007 	for (i = 0; i < ctlv->count; i++, ptent++) {
1008 		if (ptent->head.length != sizeof(*ptent))
1009 			return (EINVAL);
1010 		if (ptent->idx != kidx)
1011 			return (ENOTSUP);
1012 	}
1013 
1014 	/* Convert data into kernel request objects */
1015 	objheader_to_ti(oh, &ti);
1016 	ti.type = oh->ntlv.type;
1017 	ti.uidx = kidx;
1018 
1019 	/* Use on-stack buffer for single add/del */
1020 	if (ctlv->count == 1) {
1021 		memset(&tei, 0, sizeof(tei));
1022 		tei_buf = &tei;
1023 	} else
1024 		tei_buf = malloc(ctlv->count * sizeof(tei), M_TEMP,
1025 		    M_WAITOK | M_ZERO);
1026 
1027 	ptei = tei_buf;
1028 	ptent = tent;
1029 	for (i = 0; i < ctlv->count; i++, ptent++, ptei++) {
1030 		ptei->paddr = &ptent->k;
1031 		ptei->subtype = ptent->subtype;
1032 		ptei->masklen = ptent->masklen;
1033 		if (ptent->head.flags & IPFW_TF_UPDATE)
1034 			ptei->flags |= TEI_FLAGS_UPDATE;
1035 
1036 		ipfw_import_table_value_v1(&ptent->v.value);
1037 		ptei->pvalue = (struct table_value *)&ptent->v.value;
1038 	}
1039 
1040 	error = (oh->opheader.opcode == IP_FW_TABLE_XADD) ?
1041 	    add_table_entry(ch, &ti, tei_buf, ctlv->flags, ctlv->count) :
1042 	    del_table_entry(ch, &ti, tei_buf, ctlv->flags, ctlv->count);
1043 
1044 	/* Translate result back to userland */
1045 	ptei = tei_buf;
1046 	ptent = tent;
1047 	for (i = 0; i < ctlv->count; i++, ptent++, ptei++) {
1048 		if (ptei->flags & TEI_FLAGS_ADDED)
1049 			ptent->result = IPFW_TR_ADDED;
1050 		else if (ptei->flags & TEI_FLAGS_DELETED)
1051 			ptent->result = IPFW_TR_DELETED;
1052 		else if (ptei->flags & TEI_FLAGS_UPDATED)
1053 			ptent->result = IPFW_TR_UPDATED;
1054 		else if (ptei->flags & TEI_FLAGS_LIMIT)
1055 			ptent->result = IPFW_TR_LIMIT;
1056 		else if (ptei->flags & TEI_FLAGS_ERROR)
1057 			ptent->result = IPFW_TR_ERROR;
1058 		else if (ptei->flags & TEI_FLAGS_NOTFOUND)
1059 			ptent->result = IPFW_TR_NOTFOUND;
1060 		else if (ptei->flags & TEI_FLAGS_EXISTS)
1061 			ptent->result = IPFW_TR_EXISTS;
1062 		ipfw_export_table_value_v1(ptei->pvalue, &ptent->v.value);
1063 	}
1064 
1065 	if (tei_buf != &tei)
1066 		free(tei_buf, M_TEMP);
1067 
1068 	return (error);
1069 }
1070 
1071 /*
1072  * Looks up an entry in given table.
1073  * Data layout (v0)(current):
1074  * Request: [ ipfw_obj_header ipfw_obj_tentry ]
1075  * Reply: [ ipfw_obj_header ipfw_obj_tentry ]
1076  *
1077  * Returns 0 on success
1078  */
1079 static int
1080 find_table_entry(struct ip_fw_chain *ch, ip_fw3_opheader *op3,
1081     struct sockopt_data *sd)
1082 {
1083 	ipfw_obj_tentry *tent;
1084 	ipfw_obj_header *oh;
1085 	struct tid_info ti;
1086 	struct table_config *tc;
1087 	struct table_algo *ta;
1088 	struct table_info *kti;
1089 	struct namedobj_instance *ni;
1090 	int error;
1091 	size_t sz;
1092 
1093 	/* Check minimum header size */
1094 	sz = sizeof(*oh) + sizeof(*tent);
1095 	if (sd->valsize != sz)
1096 		return (EINVAL);
1097 
1098 	oh = (struct _ipfw_obj_header *)ipfw_get_sopt_header(sd, sz);
1099 	tent = (ipfw_obj_tentry *)(oh + 1);
1100 
1101 	/* Basic length checks for TLVs */
1102 	if (oh->ntlv.head.length != sizeof(oh->ntlv))
1103 		return (EINVAL);
1104 
1105 	objheader_to_ti(oh, &ti);
1106 	ti.type = oh->ntlv.type;
1107 	ti.uidx = tent->idx;
1108 
1109 	IPFW_UH_RLOCK(ch);
1110 	ni = CHAIN_TO_NI(ch);
1111 
1112 	/*
1113 	 * Find existing table and check its type .
1114 	 */
1115 	ta = NULL;
1116 	if ((tc = find_table(ni, &ti)) == NULL) {
1117 		IPFW_UH_RUNLOCK(ch);
1118 		return (ESRCH);
1119 	}
1120 
1121 	/* check table type */
1122 	if (tc->no.subtype != ti.type) {
1123 		IPFW_UH_RUNLOCK(ch);
1124 		return (EINVAL);
1125 	}
1126 
1127 	kti = KIDX_TO_TI(ch, tc->no.kidx);
1128 	ta = tc->ta;
1129 
1130 	if (ta->find_tentry == NULL)
1131 		return (ENOTSUP);
1132 
1133 	error = ta->find_tentry(tc->astate, kti, tent);
1134 
1135 	IPFW_UH_RUNLOCK(ch);
1136 
1137 	return (error);
1138 }
1139 
1140 /*
1141  * Flushes all entries or destroys given table.
1142  * Data layout (v0)(current):
1143  * Request: [ ipfw_obj_header ]
1144  *
1145  * Returns 0 on success
1146  */
1147 static int
1148 flush_table_v0(struct ip_fw_chain *ch, ip_fw3_opheader *op3,
1149     struct sockopt_data *sd)
1150 {
1151 	int error;
1152 	struct _ipfw_obj_header *oh;
1153 	struct tid_info ti;
1154 
1155 	if (sd->valsize != sizeof(*oh))
1156 		return (EINVAL);
1157 
1158 	oh = (struct _ipfw_obj_header *)op3;
1159 	objheader_to_ti(oh, &ti);
1160 
1161 	if (op3->opcode == IP_FW_TABLE_XDESTROY)
1162 		error = destroy_table(ch, &ti);
1163 	else if (op3->opcode == IP_FW_TABLE_XFLUSH)
1164 		error = flush_table(ch, &ti);
1165 	else
1166 		return (ENOTSUP);
1167 
1168 	return (error);
1169 }
1170 
1171 static void
1172 restart_flush(void *object, struct op_state *_state)
1173 {
1174 	struct tableop_state *ts;
1175 
1176 	ts = (struct tableop_state *)_state;
1177 
1178 	if (ts->tc != object)
1179 		return;
1180 
1181 	/* Indicate we've called */
1182 	ts->modified = 1;
1183 }
1184 
1185 /*
1186  * Flushes given table.
1187  *
1188  * Function create new table instance with the same
1189  * parameters, swaps it with old one and
1190  * flushes state without holding runtime WLOCK.
1191  *
1192  * Returns 0 on success.
1193  */
1194 int
1195 flush_table(struct ip_fw_chain *ch, struct tid_info *ti)
1196 {
1197 	struct namedobj_instance *ni;
1198 	struct table_config *tc;
1199 	struct table_algo *ta;
1200 	struct table_info ti_old, ti_new, *tablestate;
1201 	void *astate_old, *astate_new;
1202 	char algostate[64], *pstate;
1203 	struct tableop_state ts;
1204 	int error, need_gc;
1205 	uint16_t kidx;
1206 	uint8_t tflags;
1207 
1208 	/*
1209 	 * Stage 1: save table algoritm.
1210 	 * Reference found table to ensure it won't disappear.
1211 	 */
1212 	IPFW_UH_WLOCK(ch);
1213 	ni = CHAIN_TO_NI(ch);
1214 	if ((tc = find_table(ni, ti)) == NULL) {
1215 		IPFW_UH_WUNLOCK(ch);
1216 		return (ESRCH);
1217 	}
1218 	need_gc = 0;
1219 	astate_new = NULL;
1220 	memset(&ti_new, 0, sizeof(ti_new));
1221 restart:
1222 	/* Set up swap handler */
1223 	memset(&ts, 0, sizeof(ts));
1224 	ts.opstate.func = restart_flush;
1225 	ts.tc = tc;
1226 
1227 	ta = tc->ta;
1228 	/* Do not flush readonly tables */
1229 	if ((ta->flags & TA_FLAG_READONLY) != 0) {
1230 		IPFW_UH_WUNLOCK(ch);
1231 		return (EACCES);
1232 	}
1233 	/* Save startup algo parameters */
1234 	if (ta->print_config != NULL) {
1235 		ta->print_config(tc->astate, KIDX_TO_TI(ch, tc->no.kidx),
1236 		    algostate, sizeof(algostate));
1237 		pstate = algostate;
1238 	} else
1239 		pstate = NULL;
1240 	tflags = tc->tflags;
1241 	tc->no.refcnt++;
1242 	add_toperation_state(ch, &ts);
1243 	IPFW_UH_WUNLOCK(ch);
1244 
1245 	/*
1246 	 * Stage 1.5: if this is not the first attempt, destroy previous state
1247 	 */
1248 	if (need_gc != 0) {
1249 		ta->destroy(astate_new, &ti_new);
1250 		need_gc = 0;
1251 	}
1252 
1253 	/*
1254 	 * Stage 2: allocate new table instance using same algo.
1255 	 */
1256 	memset(&ti_new, 0, sizeof(struct table_info));
1257 	error = ta->init(ch, &astate_new, &ti_new, pstate, tflags);
1258 
1259 	/*
1260 	 * Stage 3: swap old state pointers with newly-allocated ones.
1261 	 * Decrease refcount.
1262 	 */
1263 	IPFW_UH_WLOCK(ch);
1264 	tc->no.refcnt--;
1265 	del_toperation_state(ch, &ts);
1266 
1267 	if (error != 0) {
1268 		IPFW_UH_WUNLOCK(ch);
1269 		return (error);
1270 	}
1271 
1272 	/*
1273 	 * Restart operation if table swap has happened:
1274 	 * even if algo may be the same, algo init parameters
1275 	 * may change. Restart operation instead of doing
1276 	 * complex checks.
1277 	 */
1278 	if (ts.modified != 0) {
1279 		/* Delay destroying data since we're holding UH lock */
1280 		need_gc = 1;
1281 		goto restart;
1282 	}
1283 
1284 	ni = CHAIN_TO_NI(ch);
1285 	kidx = tc->no.kidx;
1286 	tablestate = (struct table_info *)ch->tablestate;
1287 
1288 	IPFW_WLOCK(ch);
1289 	ti_old = tablestate[kidx];
1290 	tablestate[kidx] = ti_new;
1291 	IPFW_WUNLOCK(ch);
1292 
1293 	astate_old = tc->astate;
1294 	tc->astate = astate_new;
1295 	tc->ti_copy = ti_new;
1296 	tc->count = 0;
1297 
1298 	/* Notify algo on real @ti address */
1299 	if (ta->change_ti != NULL)
1300 		ta->change_ti(tc->astate, &tablestate[kidx]);
1301 
1302 	/*
1303 	 * Stage 4: unref values.
1304 	 */
1305 	ipfw_unref_table_values(ch, tc, ta, astate_old, &ti_old);
1306 	IPFW_UH_WUNLOCK(ch);
1307 
1308 	/*
1309 	 * Stage 5: perform real flush/destroy.
1310 	 */
1311 	ta->destroy(astate_old, &ti_old);
1312 
1313 	return (0);
1314 }
1315 
1316 /*
1317  * Swaps two tables.
1318  * Data layout (v0)(current):
1319  * Request: [ ipfw_obj_header ipfw_obj_ntlv ]
1320  *
1321  * Returns 0 on success
1322  */
1323 static int
1324 swap_table(struct ip_fw_chain *ch, ip_fw3_opheader *op3,
1325     struct sockopt_data *sd)
1326 {
1327 	int error;
1328 	struct _ipfw_obj_header *oh;
1329 	struct tid_info ti_a, ti_b;
1330 
1331 	if (sd->valsize != sizeof(*oh) + sizeof(ipfw_obj_ntlv))
1332 		return (EINVAL);
1333 
1334 	oh = (struct _ipfw_obj_header *)op3;
1335 	ntlv_to_ti(&oh->ntlv, &ti_a);
1336 	ntlv_to_ti((ipfw_obj_ntlv *)(oh + 1), &ti_b);
1337 
1338 	error = swap_tables(ch, &ti_a, &ti_b);
1339 
1340 	return (error);
1341 }
1342 
1343 /*
1344  * Swaps two tables of the same type/valtype.
1345  *
1346  * Checks if tables are compatible and limits
1347  * permits swap, than actually perform swap.
1348  *
1349  * Each table consists of 2 different parts:
1350  * config:
1351  *   @tc (with name, set, kidx) and rule bindings, which is "stable".
1352  *   number of items
1353  *   table algo
1354  * runtime:
1355  *   runtime data @ti (ch->tablestate)
1356  *   runtime cache in @tc
1357  *   algo-specific data (@tc->astate)
1358  *
1359  * So we switch:
1360  *  all runtime data
1361  *   number of items
1362  *   table algo
1363  *
1364  * After that we call @ti change handler for each table.
1365  *
1366  * Note that referencing @tc won't protect tc->ta from change.
1367  * XXX: Do we need to restrict swap between locked tables?
1368  * XXX: Do we need to exchange ftype?
1369  *
1370  * Returns 0 on success.
1371  */
1372 static int
1373 swap_tables(struct ip_fw_chain *ch, struct tid_info *a,
1374     struct tid_info *b)
1375 {
1376 	struct namedobj_instance *ni;
1377 	struct table_config *tc_a, *tc_b;
1378 	struct table_algo *ta;
1379 	struct table_info ti, *tablestate;
1380 	void *astate;
1381 	uint32_t count;
1382 
1383 	/*
1384 	 * Stage 1: find both tables and ensure they are of
1385 	 * the same type.
1386 	 */
1387 	IPFW_UH_WLOCK(ch);
1388 	ni = CHAIN_TO_NI(ch);
1389 	if ((tc_a = find_table(ni, a)) == NULL) {
1390 		IPFW_UH_WUNLOCK(ch);
1391 		return (ESRCH);
1392 	}
1393 	if ((tc_b = find_table(ni, b)) == NULL) {
1394 		IPFW_UH_WUNLOCK(ch);
1395 		return (ESRCH);
1396 	}
1397 
1398 	/* It is very easy to swap between the same table */
1399 	if (tc_a == tc_b) {
1400 		IPFW_UH_WUNLOCK(ch);
1401 		return (0);
1402 	}
1403 
1404 	/* Check type and value are the same */
1405 	if (tc_a->no.subtype!=tc_b->no.subtype || tc_a->tflags!=tc_b->tflags) {
1406 		IPFW_UH_WUNLOCK(ch);
1407 		return (EINVAL);
1408 	}
1409 
1410 	/* Check limits before swap */
1411 	if ((tc_a->limit != 0 && tc_b->count > tc_a->limit) ||
1412 	    (tc_b->limit != 0 && tc_a->count > tc_b->limit)) {
1413 		IPFW_UH_WUNLOCK(ch);
1414 		return (EFBIG);
1415 	}
1416 
1417 	/* Check if one of the tables is readonly */
1418 	if (((tc_a->ta->flags | tc_b->ta->flags) & TA_FLAG_READONLY) != 0) {
1419 		IPFW_UH_WUNLOCK(ch);
1420 		return (EACCES);
1421 	}
1422 
1423 	/* Notify we're going to swap */
1424 	rollback_toperation_state(ch, tc_a);
1425 	rollback_toperation_state(ch, tc_b);
1426 
1427 	/* Everything is fine, prepare to swap */
1428 	tablestate = (struct table_info *)ch->tablestate;
1429 	ti = tablestate[tc_a->no.kidx];
1430 	ta = tc_a->ta;
1431 	astate = tc_a->astate;
1432 	count = tc_a->count;
1433 
1434 	IPFW_WLOCK(ch);
1435 	/* a <- b */
1436 	tablestate[tc_a->no.kidx] = tablestate[tc_b->no.kidx];
1437 	tc_a->ta = tc_b->ta;
1438 	tc_a->astate = tc_b->astate;
1439 	tc_a->count = tc_b->count;
1440 	/* b <- a */
1441 	tablestate[tc_b->no.kidx] = ti;
1442 	tc_b->ta = ta;
1443 	tc_b->astate = astate;
1444 	tc_b->count = count;
1445 	IPFW_WUNLOCK(ch);
1446 
1447 	/* Ensure tc.ti copies are in sync */
1448 	tc_a->ti_copy = tablestate[tc_a->no.kidx];
1449 	tc_b->ti_copy = tablestate[tc_b->no.kidx];
1450 
1451 	/* Notify both tables on @ti change */
1452 	if (tc_a->ta->change_ti != NULL)
1453 		tc_a->ta->change_ti(tc_a->astate, &tablestate[tc_a->no.kidx]);
1454 	if (tc_b->ta->change_ti != NULL)
1455 		tc_b->ta->change_ti(tc_b->astate, &tablestate[tc_b->no.kidx]);
1456 
1457 	IPFW_UH_WUNLOCK(ch);
1458 
1459 	return (0);
1460 }
1461 
1462 /*
1463  * Destroys table specified by @ti.
1464  * Data layout (v0)(current):
1465  * Request: [ ip_fw3_opheader ]
1466  *
1467  * Returns 0 on success
1468  */
1469 static int
1470 destroy_table(struct ip_fw_chain *ch, struct tid_info *ti)
1471 {
1472 	struct namedobj_instance *ni;
1473 	struct table_config *tc;
1474 
1475 	IPFW_UH_WLOCK(ch);
1476 
1477 	ni = CHAIN_TO_NI(ch);
1478 	if ((tc = find_table(ni, ti)) == NULL) {
1479 		IPFW_UH_WUNLOCK(ch);
1480 		return (ESRCH);
1481 	}
1482 
1483 	/* Do not permit destroying referenced tables */
1484 	if (tc->no.refcnt > 0) {
1485 		IPFW_UH_WUNLOCK(ch);
1486 		return (EBUSY);
1487 	}
1488 
1489 	IPFW_WLOCK(ch);
1490 	unlink_table(ch, tc);
1491 	IPFW_WUNLOCK(ch);
1492 
1493 	/* Free obj index */
1494 	if (ipfw_objhash_free_idx(ni, tc->no.kidx) != 0)
1495 		printf("Error unlinking kidx %d from table %s\n",
1496 		    tc->no.kidx, tc->tablename);
1497 
1498 	/* Unref values used in tables while holding UH lock */
1499 	ipfw_unref_table_values(ch, tc, tc->ta, tc->astate, &tc->ti_copy);
1500 	IPFW_UH_WUNLOCK(ch);
1501 
1502 	free_table_config(ni, tc);
1503 
1504 	return (0);
1505 }
1506 
1507 static uint32_t
1508 roundup2p(uint32_t v)
1509 {
1510 
1511 	v--;
1512 	v |= v >> 1;
1513 	v |= v >> 2;
1514 	v |= v >> 4;
1515 	v |= v >> 8;
1516 	v |= v >> 16;
1517 	v++;
1518 
1519 	return (v);
1520 }
1521 
1522 /*
1523  * Grow tables index.
1524  *
1525  * Returns 0 on success.
1526  */
1527 int
1528 ipfw_resize_tables(struct ip_fw_chain *ch, unsigned int ntables)
1529 {
1530 	unsigned int ntables_old, tbl;
1531 	struct namedobj_instance *ni;
1532 	void *new_idx, *old_tablestate, *tablestate;
1533 	struct table_info *ti;
1534 	struct table_config *tc;
1535 	int i, new_blocks;
1536 
1537 	/* Check new value for validity */
1538 	if (ntables == 0)
1539 		return (EINVAL);
1540 	if (ntables > IPFW_TABLES_MAX)
1541 		ntables = IPFW_TABLES_MAX;
1542 	/* Alight to nearest power of 2 */
1543 	ntables = (unsigned int)roundup2p(ntables);
1544 
1545 	/* Allocate new pointers */
1546 	tablestate = malloc(ntables * sizeof(struct table_info),
1547 	    M_IPFW, M_WAITOK | M_ZERO);
1548 
1549 	ipfw_objhash_bitmap_alloc(ntables, (void *)&new_idx, &new_blocks);
1550 
1551 	IPFW_UH_WLOCK(ch);
1552 
1553 	tbl = (ntables >= V_fw_tables_max) ? V_fw_tables_max : ntables;
1554 	ni = CHAIN_TO_NI(ch);
1555 
1556 	/* Temporary restrict decreasing max_tables */
1557 	if (ntables < V_fw_tables_max) {
1558 
1559 		/*
1560 		 * FIXME: Check if we really can shrink
1561 		 */
1562 		IPFW_UH_WUNLOCK(ch);
1563 		return (EINVAL);
1564 	}
1565 
1566 	/* Copy table info/indices */
1567 	memcpy(tablestate, ch->tablestate, sizeof(struct table_info) * tbl);
1568 	ipfw_objhash_bitmap_merge(ni, &new_idx, &new_blocks);
1569 
1570 	IPFW_WLOCK(ch);
1571 
1572 	/* Change pointers */
1573 	old_tablestate = ch->tablestate;
1574 	ch->tablestate = tablestate;
1575 	ipfw_objhash_bitmap_swap(ni, &new_idx, &new_blocks);
1576 
1577 	ntables_old = V_fw_tables_max;
1578 	V_fw_tables_max = ntables;
1579 
1580 	IPFW_WUNLOCK(ch);
1581 
1582 	/* Notify all consumers that their @ti pointer has changed */
1583 	ti = (struct table_info *)ch->tablestate;
1584 	for (i = 0; i < tbl; i++, ti++) {
1585 		if (ti->lookup == NULL)
1586 			continue;
1587 		tc = (struct table_config *)ipfw_objhash_lookup_kidx(ni, i);
1588 		if (tc == NULL || tc->ta->change_ti == NULL)
1589 			continue;
1590 
1591 		tc->ta->change_ti(tc->astate, ti);
1592 	}
1593 
1594 	IPFW_UH_WUNLOCK(ch);
1595 
1596 	/* Free old pointers */
1597 	free(old_tablestate, M_IPFW);
1598 	ipfw_objhash_bitmap_free(new_idx, new_blocks);
1599 
1600 	return (0);
1601 }
1602 
1603 /*
1604  * Switch between "set 0" and "rule's set" table binding,
1605  * Check all ruleset bindings and permits changing
1606  * IFF each binding has both rule AND table in default set (set 0).
1607  *
1608  * Returns 0 on success.
1609  */
1610 int
1611 ipfw_switch_tables_namespace(struct ip_fw_chain *ch, unsigned int sets)
1612 {
1613 	struct namedobj_instance *ni;
1614 	struct named_object *no;
1615 	struct ip_fw *rule;
1616 	ipfw_insn *cmd;
1617 	int cmdlen, i, l;
1618 	uint16_t kidx;
1619 
1620 	IPFW_UH_WLOCK(ch);
1621 
1622 	if (V_fw_tables_sets == sets) {
1623 		IPFW_UH_WUNLOCK(ch);
1624 		return (0);
1625 	}
1626 
1627 	ni = CHAIN_TO_NI(ch);
1628 
1629 	/*
1630 	 * Scan all rules and examine tables opcodes.
1631 	 */
1632 	for (i = 0; i < ch->n_rules; i++) {
1633 		rule = ch->map[i];
1634 
1635 		l = rule->cmd_len;
1636 		cmd = rule->cmd;
1637 		cmdlen = 0;
1638 		for ( ;	l > 0 ; l -= cmdlen, cmd += cmdlen) {
1639 			cmdlen = F_LEN(cmd);
1640 
1641 			if (classify_opcode_kidx(cmd, &kidx) != 0)
1642 				continue;
1643 
1644 			no = ipfw_objhash_lookup_kidx(ni, kidx);
1645 
1646 			/* Check if both table object and rule has the set 0 */
1647 			if (no->set != 0 || rule->set != 0) {
1648 				IPFW_UH_WUNLOCK(ch);
1649 				return (EBUSY);
1650 			}
1651 
1652 		}
1653 	}
1654 	V_fw_tables_sets = sets;
1655 
1656 	IPFW_UH_WUNLOCK(ch);
1657 
1658 	return (0);
1659 }
1660 
1661 /*
1662  * Lookup an IP @addr in table @tbl.
1663  * Stores found value in @val.
1664  *
1665  * Returns 1 if @addr was found.
1666  */
1667 int
1668 ipfw_lookup_table(struct ip_fw_chain *ch, uint16_t tbl, in_addr_t addr,
1669     uint32_t *val)
1670 {
1671 	struct table_info *ti;
1672 
1673 	ti = KIDX_TO_TI(ch, tbl);
1674 
1675 	return (ti->lookup(ti, &addr, sizeof(in_addr_t), val));
1676 }
1677 
1678 /*
1679  * Lookup an arbtrary key @paddr of legth @plen in table @tbl.
1680  * Stores found value in @val.
1681  *
1682  * Returns 1 if key was found.
1683  */
1684 int
1685 ipfw_lookup_table_extended(struct ip_fw_chain *ch, uint16_t tbl, uint16_t plen,
1686     void *paddr, uint32_t *val)
1687 {
1688 	struct table_info *ti;
1689 
1690 	ti = KIDX_TO_TI(ch, tbl);
1691 
1692 	return (ti->lookup(ti, paddr, plen, val));
1693 }
1694 
1695 /*
1696  * Info/List/dump support for tables.
1697  *
1698  */
1699 
1700 /*
1701  * High-level 'get' cmds sysctl handlers
1702  */
1703 
1704 /*
1705  * Lists all tables currently available in kernel.
1706  * Data layout (v0)(current):
1707  * Request: [ ipfw_obj_lheader ], size = ipfw_obj_lheader.size
1708  * Reply: [ ipfw_obj_lheader ipfw_xtable_info x N ]
1709  *
1710  * Returns 0 on success
1711  */
1712 static int
1713 list_tables(struct ip_fw_chain *ch, ip_fw3_opheader *op3,
1714     struct sockopt_data *sd)
1715 {
1716 	struct _ipfw_obj_lheader *olh;
1717 	int error;
1718 
1719 	olh = (struct _ipfw_obj_lheader *)ipfw_get_sopt_header(sd,sizeof(*olh));
1720 	if (olh == NULL)
1721 		return (EINVAL);
1722 	if (sd->valsize < olh->size)
1723 		return (EINVAL);
1724 
1725 	IPFW_UH_RLOCK(ch);
1726 	error = export_tables(ch, olh, sd);
1727 	IPFW_UH_RUNLOCK(ch);
1728 
1729 	return (error);
1730 }
1731 
1732 /*
1733  * Store table info to buffer provided by @sd.
1734  * Data layout (v0)(current):
1735  * Request: [ ipfw_obj_header ipfw_xtable_info(empty)]
1736  * Reply: [ ipfw_obj_header ipfw_xtable_info ]
1737  *
1738  * Returns 0 on success.
1739  */
1740 static int
1741 describe_table(struct ip_fw_chain *ch, ip_fw3_opheader *op3,
1742     struct sockopt_data *sd)
1743 {
1744 	struct _ipfw_obj_header *oh;
1745 	struct table_config *tc;
1746 	struct tid_info ti;
1747 	size_t sz;
1748 
1749 	sz = sizeof(*oh) + sizeof(ipfw_xtable_info);
1750 	oh = (struct _ipfw_obj_header *)ipfw_get_sopt_header(sd, sz);
1751 	if (oh == NULL)
1752 		return (EINVAL);
1753 
1754 	objheader_to_ti(oh, &ti);
1755 
1756 	IPFW_UH_RLOCK(ch);
1757 	if ((tc = find_table(CHAIN_TO_NI(ch), &ti)) == NULL) {
1758 		IPFW_UH_RUNLOCK(ch);
1759 		return (ESRCH);
1760 	}
1761 
1762 	export_table_info(ch, tc, (ipfw_xtable_info *)(oh + 1));
1763 	IPFW_UH_RUNLOCK(ch);
1764 
1765 	return (0);
1766 }
1767 
1768 /*
1769  * Modifies existing table.
1770  * Data layout (v0)(current):
1771  * Request: [ ipfw_obj_header ipfw_xtable_info ]
1772  *
1773  * Returns 0 on success
1774  */
1775 static int
1776 modify_table(struct ip_fw_chain *ch, ip_fw3_opheader *op3,
1777     struct sockopt_data *sd)
1778 {
1779 	struct _ipfw_obj_header *oh;
1780 	ipfw_xtable_info *i;
1781 	char *tname;
1782 	struct tid_info ti;
1783 	struct namedobj_instance *ni;
1784 	struct table_config *tc;
1785 
1786 	if (sd->valsize != sizeof(*oh) + sizeof(ipfw_xtable_info))
1787 		return (EINVAL);
1788 
1789 	oh = (struct _ipfw_obj_header *)sd->kbuf;
1790 	i = (ipfw_xtable_info *)(oh + 1);
1791 
1792 	/*
1793 	 * Verify user-supplied strings.
1794 	 * Check for null-terminated/zero-length strings/
1795 	 */
1796 	tname = oh->ntlv.name;
1797 	if (ipfw_check_table_name(tname) != 0)
1798 		return (EINVAL);
1799 
1800 	objheader_to_ti(oh, &ti);
1801 	ti.type = i->type;
1802 
1803 	IPFW_UH_WLOCK(ch);
1804 	ni = CHAIN_TO_NI(ch);
1805 	if ((tc = find_table(ni, &ti)) == NULL) {
1806 		IPFW_UH_WUNLOCK(ch);
1807 		return (ESRCH);
1808 	}
1809 
1810 	/* Do not support any modifications for readonly tables */
1811 	if ((tc->ta->flags & TA_FLAG_READONLY) != 0) {
1812 		IPFW_UH_WUNLOCK(ch);
1813 		return (EACCES);
1814 	}
1815 
1816 	if ((i->mflags & IPFW_TMFLAGS_LIMIT) != 0)
1817 		tc->limit = i->limit;
1818 	if ((i->mflags & IPFW_TMFLAGS_LOCK) != 0)
1819 		tc->locked = ((i->flags & IPFW_TGFLAGS_LOCKED) != 0);
1820 	IPFW_UH_WUNLOCK(ch);
1821 
1822 	return (0);
1823 }
1824 
1825 /*
1826  * Creates new table.
1827  * Data layout (v0)(current):
1828  * Request: [ ipfw_obj_header ipfw_xtable_info ]
1829  *
1830  * Returns 0 on success
1831  */
1832 static int
1833 create_table(struct ip_fw_chain *ch, ip_fw3_opheader *op3,
1834     struct sockopt_data *sd)
1835 {
1836 	struct _ipfw_obj_header *oh;
1837 	ipfw_xtable_info *i;
1838 	char *tname, *aname;
1839 	struct tid_info ti;
1840 	struct namedobj_instance *ni;
1841 
1842 	if (sd->valsize != sizeof(*oh) + sizeof(ipfw_xtable_info))
1843 		return (EINVAL);
1844 
1845 	oh = (struct _ipfw_obj_header *)sd->kbuf;
1846 	i = (ipfw_xtable_info *)(oh + 1);
1847 
1848 	/*
1849 	 * Verify user-supplied strings.
1850 	 * Check for null-terminated/zero-length strings/
1851 	 */
1852 	tname = oh->ntlv.name;
1853 	aname = i->algoname;
1854 	if (ipfw_check_table_name(tname) != 0 ||
1855 	    strnlen(aname, sizeof(i->algoname)) == sizeof(i->algoname))
1856 		return (EINVAL);
1857 
1858 	if (aname[0] == '\0') {
1859 		/* Use default algorithm */
1860 		aname = NULL;
1861 	}
1862 
1863 	objheader_to_ti(oh, &ti);
1864 	ti.type = i->type;
1865 
1866 	ni = CHAIN_TO_NI(ch);
1867 
1868 	IPFW_UH_RLOCK(ch);
1869 	if (find_table(ni, &ti) != NULL) {
1870 		IPFW_UH_RUNLOCK(ch);
1871 		return (EEXIST);
1872 	}
1873 	IPFW_UH_RUNLOCK(ch);
1874 
1875 	return (create_table_internal(ch, &ti, aname, i, NULL, 0));
1876 }
1877 
1878 /*
1879  * Creates new table based on @ti and @aname.
1880  *
1881  * Relies on table name checking inside find_name_tlv()
1882  * Assume @aname to be checked and valid.
1883  * Stores allocated table kidx inside @pkidx (if non-NULL).
1884  * Reference created table if @compat is non-zero.
1885  *
1886  * Returns 0 on success.
1887  */
1888 static int
1889 create_table_internal(struct ip_fw_chain *ch, struct tid_info *ti,
1890     char *aname, ipfw_xtable_info *i, uint16_t *pkidx, int compat)
1891 {
1892 	struct namedobj_instance *ni;
1893 	struct table_config *tc, *tc_new, *tmp;
1894 	struct table_algo *ta;
1895 	uint16_t kidx;
1896 
1897 	ni = CHAIN_TO_NI(ch);
1898 
1899 	ta = find_table_algo(CHAIN_TO_TCFG(ch), ti, aname);
1900 	if (ta == NULL)
1901 		return (ENOTSUP);
1902 
1903 	tc = alloc_table_config(ch, ti, ta, aname, i->tflags);
1904 	if (tc == NULL)
1905 		return (ENOMEM);
1906 
1907 	tc->vmask = i->vmask;
1908 	tc->limit = i->limit;
1909 	if (ta->flags & TA_FLAG_READONLY)
1910 		tc->locked = 1;
1911 	else
1912 		tc->locked = (i->flags & IPFW_TGFLAGS_LOCKED) != 0;
1913 
1914 	IPFW_UH_WLOCK(ch);
1915 
1916 	/* Check if table has been already created */
1917 	tc_new = find_table(ni, ti);
1918 	if (tc_new != NULL) {
1919 
1920 		/*
1921 		 * Compat: do not fail if we're
1922 		 * requesting to create existing table
1923 		 * which has the same type
1924 		 */
1925 		if (compat == 0 || tc_new->no.subtype != tc->no.subtype) {
1926 			IPFW_UH_WUNLOCK(ch);
1927 			free_table_config(ni, tc);
1928 			return (EEXIST);
1929 		}
1930 
1931 		/* Exchange tc and tc_new for proper refcounting & freeing */
1932 		tmp = tc;
1933 		tc = tc_new;
1934 		tc_new = tmp;
1935 	} else {
1936 		/* New table */
1937 		if (ipfw_objhash_alloc_idx(ni, &kidx) != 0) {
1938 			IPFW_UH_WUNLOCK(ch);
1939 			printf("Unable to allocate table index."
1940 			    " Consider increasing net.inet.ip.fw.tables_max");
1941 			free_table_config(ni, tc);
1942 			return (EBUSY);
1943 		}
1944 		tc->no.kidx = kidx;
1945 		tc->no.etlv = IPFW_TLV_TBL_NAME;
1946 
1947 		IPFW_WLOCK(ch);
1948 		link_table(ch, tc);
1949 		IPFW_WUNLOCK(ch);
1950 	}
1951 
1952 	if (compat != 0)
1953 		tc->no.refcnt++;
1954 	if (pkidx != NULL)
1955 		*pkidx = tc->no.kidx;
1956 
1957 	IPFW_UH_WUNLOCK(ch);
1958 
1959 	if (tc_new != NULL)
1960 		free_table_config(ni, tc_new);
1961 
1962 	return (0);
1963 }
1964 
1965 static void
1966 ntlv_to_ti(ipfw_obj_ntlv *ntlv, struct tid_info *ti)
1967 {
1968 
1969 	memset(ti, 0, sizeof(struct tid_info));
1970 	ti->set = ntlv->set;
1971 	ti->uidx = ntlv->idx;
1972 	ti->tlvs = ntlv;
1973 	ti->tlen = ntlv->head.length;
1974 }
1975 
1976 static void
1977 objheader_to_ti(struct _ipfw_obj_header *oh, struct tid_info *ti)
1978 {
1979 
1980 	ntlv_to_ti(&oh->ntlv, ti);
1981 }
1982 
1983 struct namedobj_instance *
1984 ipfw_get_table_objhash(struct ip_fw_chain *ch)
1985 {
1986 
1987 	return (CHAIN_TO_NI(ch));
1988 }
1989 
1990 /*
1991  * Exports basic table info as name TLV.
1992  * Used inside dump_static_rules() to provide info
1993  * about all tables referenced by current ruleset.
1994  *
1995  * Returns 0 on success.
1996  */
1997 int
1998 ipfw_export_table_ntlv(struct ip_fw_chain *ch, uint16_t kidx,
1999     struct sockopt_data *sd)
2000 {
2001 	struct namedobj_instance *ni;
2002 	struct named_object *no;
2003 	ipfw_obj_ntlv *ntlv;
2004 
2005 	ni = CHAIN_TO_NI(ch);
2006 
2007 	no = ipfw_objhash_lookup_kidx(ni, kidx);
2008 	KASSERT(no != NULL, ("invalid table kidx passed"));
2009 
2010 	ntlv = (ipfw_obj_ntlv *)ipfw_get_sopt_space(sd, sizeof(*ntlv));
2011 	if (ntlv == NULL)
2012 		return (ENOMEM);
2013 
2014 	ntlv->head.type = IPFW_TLV_TBL_NAME;
2015 	ntlv->head.length = sizeof(*ntlv);
2016 	ntlv->idx = no->kidx;
2017 	strlcpy(ntlv->name, no->name, sizeof(ntlv->name));
2018 
2019 	return (0);
2020 }
2021 
2022 struct dump_args {
2023 	struct ip_fw_chain *ch;
2024 	struct table_info *ti;
2025 	struct table_config *tc;
2026 	struct sockopt_data *sd;
2027 	uint32_t cnt;
2028 	uint16_t uidx;
2029 	int error;
2030 	uint32_t size;
2031 	ipfw_table_entry *ent;
2032 	ta_foreach_f *f;
2033 	void *farg;
2034 	ipfw_obj_tentry tent;
2035 };
2036 
2037 static int
2038 count_ext_entries(void *e, void *arg)
2039 {
2040 	struct dump_args *da;
2041 
2042 	da = (struct dump_args *)arg;
2043 	da->cnt++;
2044 
2045 	return (0);
2046 }
2047 
2048 /*
2049  * Gets number of items from table either using
2050  * internal counter or calling algo callback for
2051  * externally-managed tables.
2052  *
2053  * Returns number of records.
2054  */
2055 static uint32_t
2056 table_get_count(struct ip_fw_chain *ch, struct table_config *tc)
2057 {
2058 	struct table_info *ti;
2059 	struct table_algo *ta;
2060 	struct dump_args da;
2061 
2062 	ti = KIDX_TO_TI(ch, tc->no.kidx);
2063 	ta = tc->ta;
2064 
2065 	/* Use internal counter for self-managed tables */
2066 	if ((ta->flags & TA_FLAG_READONLY) == 0)
2067 		return (tc->count);
2068 
2069 	/* Use callback to quickly get number of items */
2070 	if ((ta->flags & TA_FLAG_EXTCOUNTER) != 0)
2071 		return (ta->get_count(tc->astate, ti));
2072 
2073 	/* Count number of iterms ourselves */
2074 	memset(&da, 0, sizeof(da));
2075 	ta->foreach(tc->astate, ti, count_ext_entries, &da);
2076 
2077 	return (da.cnt);
2078 }
2079 
2080 /*
2081  * Exports table @tc info into standard ipfw_xtable_info format.
2082  */
2083 static void
2084 export_table_info(struct ip_fw_chain *ch, struct table_config *tc,
2085     ipfw_xtable_info *i)
2086 {
2087 	struct table_info *ti;
2088 	struct table_algo *ta;
2089 
2090 	i->type = tc->no.subtype;
2091 	i->tflags = tc->tflags;
2092 	i->vmask = tc->vmask;
2093 	i->set = tc->no.set;
2094 	i->kidx = tc->no.kidx;
2095 	i->refcnt = tc->no.refcnt;
2096 	i->count = table_get_count(ch, tc);
2097 	i->limit = tc->limit;
2098 	i->flags |= (tc->locked != 0) ? IPFW_TGFLAGS_LOCKED : 0;
2099 	i->size = tc->count * sizeof(ipfw_obj_tentry);
2100 	i->size += sizeof(ipfw_obj_header) + sizeof(ipfw_xtable_info);
2101 	strlcpy(i->tablename, tc->tablename, sizeof(i->tablename));
2102 	ti = KIDX_TO_TI(ch, tc->no.kidx);
2103 	ta = tc->ta;
2104 	if (ta->print_config != NULL) {
2105 		/* Use algo function to print table config to string */
2106 		ta->print_config(tc->astate, ti, i->algoname,
2107 		    sizeof(i->algoname));
2108 	} else
2109 		strlcpy(i->algoname, ta->name, sizeof(i->algoname));
2110 	/* Dump algo-specific data, if possible */
2111 	if (ta->dump_tinfo != NULL) {
2112 		ta->dump_tinfo(tc->astate, ti, &i->ta_info);
2113 		i->ta_info.flags |= IPFW_TATFLAGS_DATA;
2114 	}
2115 }
2116 
2117 struct dump_table_args {
2118 	struct ip_fw_chain *ch;
2119 	struct sockopt_data *sd;
2120 };
2121 
2122 static void
2123 export_table_internal(struct namedobj_instance *ni, struct named_object *no,
2124     void *arg)
2125 {
2126 	ipfw_xtable_info *i;
2127 	struct dump_table_args *dta;
2128 
2129 	dta = (struct dump_table_args *)arg;
2130 
2131 	i = (ipfw_xtable_info *)ipfw_get_sopt_space(dta->sd, sizeof(*i));
2132 	KASSERT(i != 0, ("previously checked buffer is not enough"));
2133 
2134 	export_table_info(dta->ch, (struct table_config *)no, i);
2135 }
2136 
2137 /*
2138  * Export all tables as ipfw_xtable_info structures to
2139  * storage provided by @sd.
2140  *
2141  * If supplied buffer is too small, fills in required size
2142  * and returns ENOMEM.
2143  * Returns 0 on success.
2144  */
2145 static int
2146 export_tables(struct ip_fw_chain *ch, ipfw_obj_lheader *olh,
2147     struct sockopt_data *sd)
2148 {
2149 	uint32_t size;
2150 	uint32_t count;
2151 	struct dump_table_args dta;
2152 
2153 	count = ipfw_objhash_count(CHAIN_TO_NI(ch));
2154 	size = count * sizeof(ipfw_xtable_info) + sizeof(ipfw_obj_lheader);
2155 
2156 	/* Fill in header regadless of buffer size */
2157 	olh->count = count;
2158 	olh->objsize = sizeof(ipfw_xtable_info);
2159 
2160 	if (size > olh->size) {
2161 		olh->size = size;
2162 		return (ENOMEM);
2163 	}
2164 
2165 	olh->size = size;
2166 
2167 	dta.ch = ch;
2168 	dta.sd = sd;
2169 
2170 	ipfw_objhash_foreach(CHAIN_TO_NI(ch), export_table_internal, &dta);
2171 
2172 	return (0);
2173 }
2174 
2175 /*
2176  * Dumps all table data
2177  * Data layout (v1)(current):
2178  * Request: [ ipfw_obj_header ], size = ipfw_xtable_info.size
2179  * Reply: [ ipfw_obj_header ipfw_xtable_info ipfw_obj_tentry x N ]
2180  *
2181  * Returns 0 on success
2182  */
2183 static int
2184 dump_table_v1(struct ip_fw_chain *ch, ip_fw3_opheader *op3,
2185     struct sockopt_data *sd)
2186 {
2187 	struct _ipfw_obj_header *oh;
2188 	ipfw_xtable_info *i;
2189 	struct tid_info ti;
2190 	struct table_config *tc;
2191 	struct table_algo *ta;
2192 	struct dump_args da;
2193 	uint32_t sz;
2194 
2195 	sz = sizeof(ipfw_obj_header) + sizeof(ipfw_xtable_info);
2196 	oh = (struct _ipfw_obj_header *)ipfw_get_sopt_header(sd, sz);
2197 	if (oh == NULL)
2198 		return (EINVAL);
2199 
2200 	i = (ipfw_xtable_info *)(oh + 1);
2201 	objheader_to_ti(oh, &ti);
2202 
2203 	IPFW_UH_RLOCK(ch);
2204 	if ((tc = find_table(CHAIN_TO_NI(ch), &ti)) == NULL) {
2205 		IPFW_UH_RUNLOCK(ch);
2206 		return (ESRCH);
2207 	}
2208 	export_table_info(ch, tc, i);
2209 
2210 	if (sd->valsize < i->size) {
2211 
2212 		/*
2213 		 * Submitted buffer size is not enough.
2214 		 * WE've already filled in @i structure with
2215 		 * relevant table info including size, so we
2216 		 * can return. Buffer will be flushed automatically.
2217 		 */
2218 		IPFW_UH_RUNLOCK(ch);
2219 		return (ENOMEM);
2220 	}
2221 
2222 	/*
2223 	 * Do the actual dump in eXtended format
2224 	 */
2225 	memset(&da, 0, sizeof(da));
2226 	da.ch = ch;
2227 	da.ti = KIDX_TO_TI(ch, tc->no.kidx);
2228 	da.tc = tc;
2229 	da.sd = sd;
2230 
2231 	ta = tc->ta;
2232 
2233 	ta->foreach(tc->astate, da.ti, dump_table_tentry, &da);
2234 	IPFW_UH_RUNLOCK(ch);
2235 
2236 	return (da.error);
2237 }
2238 
2239 /*
2240  * Dumps all table data
2241  * Data layout (version 0)(legacy):
2242  * Request: [ ipfw_xtable ], size = IP_FW_TABLE_XGETSIZE()
2243  * Reply: [ ipfw_xtable ipfw_table_xentry x N ]
2244  *
2245  * Returns 0 on success
2246  */
2247 static int
2248 dump_table_v0(struct ip_fw_chain *ch, ip_fw3_opheader *op3,
2249     struct sockopt_data *sd)
2250 {
2251 	ipfw_xtable *xtbl;
2252 	struct tid_info ti;
2253 	struct table_config *tc;
2254 	struct table_algo *ta;
2255 	struct dump_args da;
2256 	size_t sz, count;
2257 
2258 	xtbl = (ipfw_xtable *)ipfw_get_sopt_header(sd, sizeof(ipfw_xtable));
2259 	if (xtbl == NULL)
2260 		return (EINVAL);
2261 
2262 	memset(&ti, 0, sizeof(ti));
2263 	ti.uidx = xtbl->tbl;
2264 
2265 	IPFW_UH_RLOCK(ch);
2266 	if ((tc = find_table(CHAIN_TO_NI(ch), &ti)) == NULL) {
2267 		IPFW_UH_RUNLOCK(ch);
2268 		return (0);
2269 	}
2270 	count = table_get_count(ch, tc);
2271 	sz = count * sizeof(ipfw_table_xentry) + sizeof(ipfw_xtable);
2272 
2273 	xtbl->cnt = count;
2274 	xtbl->size = sz;
2275 	xtbl->type = tc->no.subtype;
2276 	xtbl->tbl = ti.uidx;
2277 
2278 	if (sd->valsize < sz) {
2279 
2280 		/*
2281 		 * Submitted buffer size is not enough.
2282 		 * WE've already filled in @i structure with
2283 		 * relevant table info including size, so we
2284 		 * can return. Buffer will be flushed automatically.
2285 		 */
2286 		IPFW_UH_RUNLOCK(ch);
2287 		return (ENOMEM);
2288 	}
2289 
2290 	/* Do the actual dump in eXtended format */
2291 	memset(&da, 0, sizeof(da));
2292 	da.ch = ch;
2293 	da.ti = KIDX_TO_TI(ch, tc->no.kidx);
2294 	da.tc = tc;
2295 	da.sd = sd;
2296 
2297 	ta = tc->ta;
2298 
2299 	ta->foreach(tc->astate, da.ti, dump_table_xentry, &da);
2300 	IPFW_UH_RUNLOCK(ch);
2301 
2302 	return (0);
2303 }
2304 
2305 /*
2306  * Legacy function to retrieve number of items in table.
2307  */
2308 static int
2309 get_table_size(struct ip_fw_chain *ch, ip_fw3_opheader *op3,
2310     struct sockopt_data *sd)
2311 {
2312 	uint32_t *tbl;
2313 	struct tid_info ti;
2314 	size_t sz;
2315 	int error;
2316 
2317 	sz = sizeof(*op3) + sizeof(uint32_t);
2318 	op3 = (ip_fw3_opheader *)ipfw_get_sopt_header(sd, sz);
2319 	if (op3 == NULL)
2320 		return (EINVAL);
2321 
2322 	tbl = (uint32_t *)(op3 + 1);
2323 	memset(&ti, 0, sizeof(ti));
2324 	ti.uidx = *tbl;
2325 	IPFW_UH_RLOCK(ch);
2326 	error = ipfw_count_xtable(ch, &ti, tbl);
2327 	IPFW_UH_RUNLOCK(ch);
2328 	return (error);
2329 }
2330 
2331 /*
2332  * Legacy IP_FW_TABLE_GETSIZE handler
2333  */
2334 int
2335 ipfw_count_table(struct ip_fw_chain *ch, struct tid_info *ti, uint32_t *cnt)
2336 {
2337 	struct table_config *tc;
2338 
2339 	if ((tc = find_table(CHAIN_TO_NI(ch), ti)) == NULL)
2340 		return (ESRCH);
2341 	*cnt = table_get_count(ch, tc);
2342 	return (0);
2343 }
2344 
2345 /*
2346  * Legacy IP_FW_TABLE_XGETSIZE handler
2347  */
2348 int
2349 ipfw_count_xtable(struct ip_fw_chain *ch, struct tid_info *ti, uint32_t *cnt)
2350 {
2351 	struct table_config *tc;
2352 	uint32_t count;
2353 
2354 	if ((tc = find_table(CHAIN_TO_NI(ch), ti)) == NULL) {
2355 		*cnt = 0;
2356 		return (0); /* 'table all list' requires success */
2357 	}
2358 
2359 	count = table_get_count(ch, tc);
2360 	*cnt = count * sizeof(ipfw_table_xentry);
2361 	if (count > 0)
2362 		*cnt += sizeof(ipfw_xtable);
2363 	return (0);
2364 }
2365 
2366 static int
2367 dump_table_entry(void *e, void *arg)
2368 {
2369 	struct dump_args *da;
2370 	struct table_config *tc;
2371 	struct table_algo *ta;
2372 	ipfw_table_entry *ent;
2373 	struct table_value *pval;
2374 	int error;
2375 
2376 	da = (struct dump_args *)arg;
2377 
2378 	tc = da->tc;
2379 	ta = tc->ta;
2380 
2381 	/* Out of memory, returning */
2382 	if (da->cnt == da->size)
2383 		return (1);
2384 	ent = da->ent++;
2385 	ent->tbl = da->uidx;
2386 	da->cnt++;
2387 
2388 	error = ta->dump_tentry(tc->astate, da->ti, e, &da->tent);
2389 	if (error != 0)
2390 		return (error);
2391 
2392 	ent->addr = da->tent.k.addr.s_addr;
2393 	ent->masklen = da->tent.masklen;
2394 	pval = get_table_value(da->ch, da->tc, da->tent.v.kidx);
2395 	ent->value = ipfw_export_table_value_legacy(pval);
2396 
2397 	return (0);
2398 }
2399 
2400 /*
2401  * Dumps table in pre-8.1 legacy format.
2402  */
2403 int
2404 ipfw_dump_table_legacy(struct ip_fw_chain *ch, struct tid_info *ti,
2405     ipfw_table *tbl)
2406 {
2407 	struct table_config *tc;
2408 	struct table_algo *ta;
2409 	struct dump_args da;
2410 
2411 	tbl->cnt = 0;
2412 
2413 	if ((tc = find_table(CHAIN_TO_NI(ch), ti)) == NULL)
2414 		return (0);	/* XXX: We should return ESRCH */
2415 
2416 	ta = tc->ta;
2417 
2418 	/* This dump format supports IPv4 only */
2419 	if (tc->no.subtype != IPFW_TABLE_ADDR)
2420 		return (0);
2421 
2422 	memset(&da, 0, sizeof(da));
2423 	da.ch = ch;
2424 	da.ti = KIDX_TO_TI(ch, tc->no.kidx);
2425 	da.tc = tc;
2426 	da.ent = &tbl->ent[0];
2427 	da.size = tbl->size;
2428 
2429 	tbl->cnt = 0;
2430 	ta->foreach(tc->astate, da.ti, dump_table_entry, &da);
2431 	tbl->cnt = da.cnt;
2432 
2433 	return (0);
2434 }
2435 
2436 /*
2437  * Dumps table entry in eXtended format (v1)(current).
2438  */
2439 static int
2440 dump_table_tentry(void *e, void *arg)
2441 {
2442 	struct dump_args *da;
2443 	struct table_config *tc;
2444 	struct table_algo *ta;
2445 	struct table_value *pval;
2446 	ipfw_obj_tentry *tent;
2447 	int error;
2448 
2449 	da = (struct dump_args *)arg;
2450 
2451 	tc = da->tc;
2452 	ta = tc->ta;
2453 
2454 	tent = (ipfw_obj_tentry *)ipfw_get_sopt_space(da->sd, sizeof(*tent));
2455 	/* Out of memory, returning */
2456 	if (tent == NULL) {
2457 		da->error = ENOMEM;
2458 		return (1);
2459 	}
2460 	tent->head.length = sizeof(ipfw_obj_tentry);
2461 	tent->idx = da->uidx;
2462 
2463 	error = ta->dump_tentry(tc->astate, da->ti, e, tent);
2464 	if (error != 0)
2465 		return (error);
2466 
2467 	pval = get_table_value(da->ch, da->tc, tent->v.kidx);
2468 	ipfw_export_table_value_v1(pval, &tent->v.value);
2469 
2470 	return (0);
2471 }
2472 
2473 /*
2474  * Dumps table entry in eXtended format (v0).
2475  */
2476 static int
2477 dump_table_xentry(void *e, void *arg)
2478 {
2479 	struct dump_args *da;
2480 	struct table_config *tc;
2481 	struct table_algo *ta;
2482 	ipfw_table_xentry *xent;
2483 	ipfw_obj_tentry *tent;
2484 	struct table_value *pval;
2485 	int error;
2486 
2487 	da = (struct dump_args *)arg;
2488 
2489 	tc = da->tc;
2490 	ta = tc->ta;
2491 
2492 	xent = (ipfw_table_xentry *)ipfw_get_sopt_space(da->sd, sizeof(*xent));
2493 	/* Out of memory, returning */
2494 	if (xent == NULL)
2495 		return (1);
2496 	xent->len = sizeof(ipfw_table_xentry);
2497 	xent->tbl = da->uidx;
2498 
2499 	memset(&da->tent, 0, sizeof(da->tent));
2500 	tent = &da->tent;
2501 	error = ta->dump_tentry(tc->astate, da->ti, e, tent);
2502 	if (error != 0)
2503 		return (error);
2504 
2505 	/* Convert current format to previous one */
2506 	xent->masklen = tent->masklen;
2507 	pval = get_table_value(da->ch, da->tc, da->tent.v.kidx);
2508 	xent->value = ipfw_export_table_value_legacy(pval);
2509 	/* Apply some hacks */
2510 	if (tc->no.subtype == IPFW_TABLE_ADDR && tent->subtype == AF_INET) {
2511 		xent->k.addr6.s6_addr32[3] = tent->k.addr.s_addr;
2512 		xent->flags = IPFW_TCF_INET;
2513 	} else
2514 		memcpy(&xent->k, &tent->k, sizeof(xent->k));
2515 
2516 	return (0);
2517 }
2518 
2519 /*
2520  * Helper function to export table algo data
2521  * to tentry format before calling user function.
2522  *
2523  * Returns 0 on success.
2524  */
2525 static int
2526 prepare_table_tentry(void *e, void *arg)
2527 {
2528 	struct dump_args *da;
2529 	struct table_config *tc;
2530 	struct table_algo *ta;
2531 	int error;
2532 
2533 	da = (struct dump_args *)arg;
2534 
2535 	tc = da->tc;
2536 	ta = tc->ta;
2537 
2538 	error = ta->dump_tentry(tc->astate, da->ti, e, &da->tent);
2539 	if (error != 0)
2540 		return (error);
2541 
2542 	da->f(&da->tent, da->farg);
2543 
2544 	return (0);
2545 }
2546 
2547 /*
2548  * Allow external consumers to read table entries in standard format.
2549  */
2550 int
2551 ipfw_foreach_table_tentry(struct ip_fw_chain *ch, uint16_t kidx,
2552     ta_foreach_f *f, void *arg)
2553 {
2554 	struct namedobj_instance *ni;
2555 	struct table_config *tc;
2556 	struct table_algo *ta;
2557 	struct dump_args da;
2558 
2559 	ni = CHAIN_TO_NI(ch);
2560 
2561 	tc = (struct table_config *)ipfw_objhash_lookup_kidx(ni, kidx);
2562 	if (tc == NULL)
2563 		return (ESRCH);
2564 
2565 	ta = tc->ta;
2566 
2567 	memset(&da, 0, sizeof(da));
2568 	da.ch = ch;
2569 	da.ti = KIDX_TO_TI(ch, tc->no.kidx);
2570 	da.tc = tc;
2571 	da.f = f;
2572 	da.farg = arg;
2573 
2574 	ta->foreach(tc->astate, da.ti, prepare_table_tentry, &da);
2575 
2576 	return (0);
2577 }
2578 
2579 /*
2580  * Table algorithms
2581  */
2582 
2583 /*
2584  * Finds algoritm by index, table type or supplied name.
2585  *
2586  * Returns pointer to algo or NULL.
2587  */
2588 static struct table_algo *
2589 find_table_algo(struct tables_config *tcfg, struct tid_info *ti, char *name)
2590 {
2591 	int i, l;
2592 	struct table_algo *ta;
2593 
2594 	if (ti->type > IPFW_TABLE_MAXTYPE)
2595 		return (NULL);
2596 
2597 	/* Search by index */
2598 	if (ti->atype != 0) {
2599 		if (ti->atype > tcfg->algo_count)
2600 			return (NULL);
2601 		return (tcfg->algo[ti->atype]);
2602 	}
2603 
2604 	if (name == NULL) {
2605 		/* Return default algorithm for given type if set */
2606 		return (tcfg->def_algo[ti->type]);
2607 	}
2608 
2609 	/* Search by name */
2610 	/* TODO: better search */
2611 	for (i = 1; i <= tcfg->algo_count; i++) {
2612 		ta = tcfg->algo[i];
2613 
2614 		/*
2615 		 * One can supply additional algorithm
2616 		 * parameters so we compare only the first word
2617 		 * of supplied name:
2618 		 * 'addr:chash hsize=32'
2619 		 * '^^^^^^^^^'
2620 		 *
2621 		 */
2622 		l = strlen(ta->name);
2623 		if (strncmp(name, ta->name, l) != 0)
2624 			continue;
2625 		if (name[l] != '\0' && name[l] != ' ')
2626 			continue;
2627 		/* Check if we're requesting proper table type */
2628 		if (ti->type != 0 && ti->type != ta->type)
2629 			return (NULL);
2630 		return (ta);
2631 	}
2632 
2633 	return (NULL);
2634 }
2635 
2636 /*
2637  * Register new table algo @ta.
2638  * Stores algo id inside @idx.
2639  *
2640  * Returns 0 on success.
2641  */
2642 int
2643 ipfw_add_table_algo(struct ip_fw_chain *ch, struct table_algo *ta, size_t size,
2644     int *idx)
2645 {
2646 	struct tables_config *tcfg;
2647 	struct table_algo *ta_new;
2648 	size_t sz;
2649 
2650 	if (size > sizeof(struct table_algo))
2651 		return (EINVAL);
2652 
2653 	/* Check for the required on-stack size for add/del */
2654 	sz = roundup2(ta->ta_buf_size, sizeof(void *));
2655 	if (sz > TA_BUF_SZ)
2656 		return (EINVAL);
2657 
2658 	KASSERT(ta->type <= IPFW_TABLE_MAXTYPE,("Increase IPFW_TABLE_MAXTYPE"));
2659 
2660 	/* Copy algorithm data to stable storage. */
2661 	ta_new = malloc(sizeof(struct table_algo), M_IPFW, M_WAITOK | M_ZERO);
2662 	memcpy(ta_new, ta, size);
2663 
2664 	tcfg = CHAIN_TO_TCFG(ch);
2665 
2666 	KASSERT(tcfg->algo_count < 255, ("Increase algo array size"));
2667 
2668 	tcfg->algo[++tcfg->algo_count] = ta_new;
2669 	ta_new->idx = tcfg->algo_count;
2670 
2671 	/* Set algorithm as default one for given type */
2672 	if ((ta_new->flags & TA_FLAG_DEFAULT) != 0 &&
2673 	    tcfg->def_algo[ta_new->type] == NULL)
2674 		tcfg->def_algo[ta_new->type] = ta_new;
2675 
2676 	*idx = ta_new->idx;
2677 
2678 	return (0);
2679 }
2680 
2681 /*
2682  * Unregisters table algo using @idx as id.
2683  * XXX: It is NOT safe to call this function in any place
2684  * other than ipfw instance destroy handler.
2685  */
2686 void
2687 ipfw_del_table_algo(struct ip_fw_chain *ch, int idx)
2688 {
2689 	struct tables_config *tcfg;
2690 	struct table_algo *ta;
2691 
2692 	tcfg = CHAIN_TO_TCFG(ch);
2693 
2694 	KASSERT(idx <= tcfg->algo_count, ("algo idx %d out of range 1..%d",
2695 	    idx, tcfg->algo_count));
2696 
2697 	ta = tcfg->algo[idx];
2698 	KASSERT(ta != NULL, ("algo idx %d is NULL", idx));
2699 
2700 	if (tcfg->def_algo[ta->type] == ta)
2701 		tcfg->def_algo[ta->type] = NULL;
2702 
2703 	free(ta, M_IPFW);
2704 }
2705 
2706 /*
2707  * Lists all table algorithms currently available.
2708  * Data layout (v0)(current):
2709  * Request: [ ipfw_obj_lheader ], size = ipfw_obj_lheader.size
2710  * Reply: [ ipfw_obj_lheader ipfw_ta_info x N ]
2711  *
2712  * Returns 0 on success
2713  */
2714 static int
2715 list_table_algo(struct ip_fw_chain *ch, ip_fw3_opheader *op3,
2716     struct sockopt_data *sd)
2717 {
2718 	struct _ipfw_obj_lheader *olh;
2719 	struct tables_config *tcfg;
2720 	ipfw_ta_info *i;
2721 	struct table_algo *ta;
2722 	uint32_t count, n, size;
2723 
2724 	olh = (struct _ipfw_obj_lheader *)ipfw_get_sopt_header(sd,sizeof(*olh));
2725 	if (olh == NULL)
2726 		return (EINVAL);
2727 	if (sd->valsize < olh->size)
2728 		return (EINVAL);
2729 
2730 	IPFW_UH_RLOCK(ch);
2731 	tcfg = CHAIN_TO_TCFG(ch);
2732 	count = tcfg->algo_count;
2733 	size = count * sizeof(ipfw_ta_info) + sizeof(ipfw_obj_lheader);
2734 
2735 	/* Fill in header regadless of buffer size */
2736 	olh->count = count;
2737 	olh->objsize = sizeof(ipfw_ta_info);
2738 
2739 	if (size > olh->size) {
2740 		olh->size = size;
2741 		IPFW_UH_RUNLOCK(ch);
2742 		return (ENOMEM);
2743 	}
2744 	olh->size = size;
2745 
2746 	for (n = 1; n <= count; n++) {
2747 		i = (ipfw_ta_info *)ipfw_get_sopt_space(sd, sizeof(*i));
2748 		KASSERT(i != 0, ("previously checked buffer is not enough"));
2749 		ta = tcfg->algo[n];
2750 		strlcpy(i->algoname, ta->name, sizeof(i->algoname));
2751 		i->type = ta->type;
2752 		i->refcnt = ta->refcnt;
2753 	}
2754 
2755 	IPFW_UH_RUNLOCK(ch);
2756 
2757 	return (0);
2758 }
2759 
2760 static int
2761 classify_srcdst(ipfw_insn *cmd, uint16_t *puidx, uint8_t *ptype)
2762 {
2763 	/* Basic IPv4/IPv6 or u32 lookups */
2764 	*puidx = cmd->arg1;
2765 	/* Assume ADDR by default */
2766 	*ptype = IPFW_TABLE_ADDR;
2767 	int v;
2768 
2769 	if (F_LEN(cmd) > F_INSN_SIZE(ipfw_insn_u32)) {
2770 		/*
2771 		 * generic lookup. The key must be
2772 		 * in 32bit big-endian format.
2773 		 */
2774 		v = ((ipfw_insn_u32 *)cmd)->d[1];
2775 		switch (v) {
2776 		case 0:
2777 		case 1:
2778 			/* IPv4 src/dst */
2779 			break;
2780 		case 2:
2781 		case 3:
2782 			/* src/dst port */
2783 			*ptype = IPFW_TABLE_NUMBER;
2784 			break;
2785 		case 4:
2786 			/* uid/gid */
2787 			*ptype = IPFW_TABLE_NUMBER;
2788 			break;
2789 		case 5:
2790 			/* jid */
2791 			*ptype = IPFW_TABLE_NUMBER;
2792 			break;
2793 		case 6:
2794 			/* dscp */
2795 			*ptype = IPFW_TABLE_NUMBER;
2796 			break;
2797 		}
2798 	}
2799 
2800 	return (0);
2801 }
2802 
2803 static int
2804 classify_via(ipfw_insn *cmd, uint16_t *puidx, uint8_t *ptype)
2805 {
2806 	ipfw_insn_if *cmdif;
2807 
2808 	/* Interface table, possibly */
2809 	cmdif = (ipfw_insn_if *)cmd;
2810 	if (cmdif->name[0] != '\1')
2811 		return (1);
2812 
2813 	*ptype = IPFW_TABLE_INTERFACE;
2814 	*puidx = cmdif->p.kidx;
2815 
2816 	return (0);
2817 }
2818 
2819 static int
2820 classify_flow(ipfw_insn *cmd, uint16_t *puidx, uint8_t *ptype)
2821 {
2822 
2823 	*puidx = cmd->arg1;
2824 	*ptype = IPFW_TABLE_FLOW;
2825 
2826 	return (0);
2827 }
2828 
2829 static void
2830 update_arg1(ipfw_insn *cmd, uint16_t idx)
2831 {
2832 
2833 	cmd->arg1 = idx;
2834 }
2835 
2836 static void
2837 update_via(ipfw_insn *cmd, uint16_t idx)
2838 {
2839 	ipfw_insn_if *cmdif;
2840 
2841 	cmdif = (ipfw_insn_if *)cmd;
2842 	cmdif->p.kidx = idx;
2843 }
2844 
2845 static int
2846 table_findbyname(struct ip_fw_chain *ch, struct tid_info *ti,
2847     struct named_object **pno)
2848 {
2849 	struct table_config *tc;
2850 	int error;
2851 
2852 	IPFW_UH_WLOCK_ASSERT(ch);
2853 
2854 	error = find_table_err(CHAIN_TO_NI(ch), ti, &tc);
2855 	if (error != 0)
2856 		return (error);
2857 
2858 	*pno = &tc->no;
2859 	return (0);
2860 }
2861 
2862 /* XXX: sets-sets! */
2863 static struct named_object *
2864 table_findbykidx(struct ip_fw_chain *ch, uint16_t idx)
2865 {
2866 	struct namedobj_instance *ni;
2867 	struct table_config *tc;
2868 
2869 	IPFW_UH_WLOCK_ASSERT(ch);
2870 	ni = CHAIN_TO_NI(ch);
2871 	tc = (struct table_config *)ipfw_objhash_lookup_kidx(ni, idx);
2872 	KASSERT(tc != NULL, ("Table with index %d not found", idx));
2873 
2874 	return (&tc->no);
2875 }
2876 
2877 static struct opcode_obj_rewrite opcodes[] = {
2878 	{
2879 		O_IP_SRC_LOOKUP, IPFW_TLV_TBL_NAME,
2880 		classify_srcdst, update_arg1,
2881 		table_findbyname, table_findbykidx, create_table_compat
2882 	},
2883 	{
2884 		O_IP_DST_LOOKUP, IPFW_TLV_TBL_NAME,
2885 		classify_srcdst, update_arg1,
2886 		table_findbyname, table_findbykidx, create_table_compat
2887 	},
2888 	{
2889 		O_IP_FLOW_LOOKUP, IPFW_TLV_TBL_NAME,
2890 		classify_flow, update_arg1,
2891 		table_findbyname, table_findbykidx, create_table_compat
2892 	},
2893 	{
2894 		O_XMIT, IPFW_TLV_TBL_NAME,
2895 		classify_via, update_via,
2896 		table_findbyname, table_findbykidx, create_table_compat
2897 	},
2898 	{
2899 		O_RECV, IPFW_TLV_TBL_NAME,
2900 		classify_via, update_via,
2901 		table_findbyname, table_findbykidx, create_table_compat
2902 	},
2903 	{
2904 		O_VIA, IPFW_TLV_TBL_NAME,
2905 		classify_via, update_via,
2906 		table_findbyname, table_findbykidx, create_table_compat
2907 	},
2908 };
2909 
2910 
2911 /*
2912  * Checks table name for validity.
2913  * Enforce basic length checks, the rest
2914  * should be done in userland.
2915  *
2916  * Returns 0 if name is considered valid.
2917  */
2918 int
2919 ipfw_check_table_name(char *name)
2920 {
2921 	int nsize;
2922 	ipfw_obj_ntlv *ntlv = NULL;
2923 
2924 	nsize = sizeof(ntlv->name);
2925 
2926 	if (strnlen(name, nsize) == nsize)
2927 		return (EINVAL);
2928 
2929 	if (name[0] == '\0')
2930 		return (EINVAL);
2931 
2932 	/*
2933 	 * TODO: do some more complicated checks
2934 	 */
2935 
2936 	return (0);
2937 }
2938 
2939 /*
2940  * Find tablename TLV by @uid.
2941  * Check @tlvs for valid data inside.
2942  *
2943  * Returns pointer to found TLV or NULL.
2944  */
2945 static ipfw_obj_ntlv *
2946 find_name_tlv(void *tlvs, int len, uint16_t uidx)
2947 {
2948 	ipfw_obj_ntlv *ntlv;
2949 	uintptr_t pa, pe;
2950 	int l;
2951 
2952 	pa = (uintptr_t)tlvs;
2953 	pe = pa + len;
2954 	l = 0;
2955 	for (; pa < pe; pa += l) {
2956 		ntlv = (ipfw_obj_ntlv *)pa;
2957 		l = ntlv->head.length;
2958 
2959 		if (l != sizeof(*ntlv))
2960 			return (NULL);
2961 
2962 		if (ntlv->head.type != IPFW_TLV_TBL_NAME)
2963 			continue;
2964 
2965 		if (ntlv->idx != uidx)
2966 			continue;
2967 
2968 		if (ipfw_check_table_name(ntlv->name) != 0)
2969 			return (NULL);
2970 
2971 		return (ntlv);
2972 	}
2973 
2974 	return (NULL);
2975 }
2976 
2977 /*
2978  * Finds table config based on either legacy index
2979  * or name in ntlv.
2980  * Note @ti structure contains unchecked data from userland.
2981  *
2982  * Returns 0 in success and fills in @tc with found config
2983  */
2984 static int
2985 find_table_err(struct namedobj_instance *ni, struct tid_info *ti,
2986     struct table_config **tc)
2987 {
2988 	char *name, bname[16];
2989 	struct named_object *no;
2990 	ipfw_obj_ntlv *ntlv;
2991 	uint32_t set;
2992 
2993 	if (ti->tlvs != NULL) {
2994 		ntlv = find_name_tlv(ti->tlvs, ti->tlen, ti->uidx);
2995 		if (ntlv == NULL)
2996 			return (EINVAL);
2997 		name = ntlv->name;
2998 
2999 		/*
3000 		 * Use set provided by @ti instead of @ntlv one.
3001 		 * This is needed due to different sets behavior
3002 		 * controlled by V_fw_tables_sets.
3003 		 */
3004 		set = ti->set;
3005 	} else {
3006 		snprintf(bname, sizeof(bname), "%d", ti->uidx);
3007 		name = bname;
3008 		set = 0;
3009 	}
3010 
3011 	no = ipfw_objhash_lookup_name(ni, set, name);
3012 	*tc = (struct table_config *)no;
3013 
3014 	return (0);
3015 }
3016 
3017 /*
3018  * Finds table config based on either legacy index
3019  * or name in ntlv.
3020  * Note @ti structure contains unchecked data from userland.
3021  *
3022  * Returns pointer to table_config or NULL.
3023  */
3024 static struct table_config *
3025 find_table(struct namedobj_instance *ni, struct tid_info *ti)
3026 {
3027 	struct table_config *tc;
3028 
3029 	if (find_table_err(ni, ti, &tc) != 0)
3030 		return (NULL);
3031 
3032 	return (tc);
3033 }
3034 
3035 /*
3036  * Allocate new table config structure using
3037  * specified @algo and @aname.
3038  *
3039  * Returns pointer to config or NULL.
3040  */
3041 static struct table_config *
3042 alloc_table_config(struct ip_fw_chain *ch, struct tid_info *ti,
3043     struct table_algo *ta, char *aname, uint8_t tflags)
3044 {
3045 	char *name, bname[16];
3046 	struct table_config *tc;
3047 	int error;
3048 	ipfw_obj_ntlv *ntlv;
3049 	uint32_t set;
3050 
3051 	if (ti->tlvs != NULL) {
3052 		ntlv = find_name_tlv(ti->tlvs, ti->tlen, ti->uidx);
3053 		if (ntlv == NULL)
3054 			return (NULL);
3055 		name = ntlv->name;
3056 		set = ntlv->set;
3057 	} else {
3058 		/* Compat part: convert number to string representation */
3059 		snprintf(bname, sizeof(bname), "%d", ti->uidx);
3060 		name = bname;
3061 		set = 0;
3062 	}
3063 
3064 	tc = malloc(sizeof(struct table_config), M_IPFW, M_WAITOK | M_ZERO);
3065 	tc->no.name = tc->tablename;
3066 	tc->no.subtype = ta->type;
3067 	tc->no.set = set;
3068 	tc->tflags = tflags;
3069 	tc->ta = ta;
3070 	strlcpy(tc->tablename, name, sizeof(tc->tablename));
3071 	/* Set "shared" value type by default */
3072 	tc->vshared = 1;
3073 
3074 	/* Preallocate data structures for new tables */
3075 	error = ta->init(ch, &tc->astate, &tc->ti_copy, aname, tflags);
3076 	if (error != 0) {
3077 		free(tc, M_IPFW);
3078 		return (NULL);
3079 	}
3080 
3081 	return (tc);
3082 }
3083 
3084 /*
3085  * Destroys table state and config.
3086  */
3087 static void
3088 free_table_config(struct namedobj_instance *ni, struct table_config *tc)
3089 {
3090 
3091 	KASSERT(tc->linked == 0, ("free() on linked config"));
3092 	/* UH lock MUST NOT be held */
3093 
3094 	/*
3095 	 * We're using ta without any locking/referencing.
3096 	 * TODO: fix this if we're going to use unloadable algos.
3097 	 */
3098 	tc->ta->destroy(tc->astate, &tc->ti_copy);
3099 	free(tc, M_IPFW);
3100 }
3101 
3102 /*
3103  * Links @tc to @chain table named instance.
3104  * Sets appropriate type/states in @chain table info.
3105  */
3106 static void
3107 link_table(struct ip_fw_chain *ch, struct table_config *tc)
3108 {
3109 	struct namedobj_instance *ni;
3110 	struct table_info *ti;
3111 	uint16_t kidx;
3112 
3113 	IPFW_UH_WLOCK_ASSERT(ch);
3114 	IPFW_WLOCK_ASSERT(ch);
3115 
3116 	ni = CHAIN_TO_NI(ch);
3117 	kidx = tc->no.kidx;
3118 
3119 	ipfw_objhash_add(ni, &tc->no);
3120 
3121 	ti = KIDX_TO_TI(ch, kidx);
3122 	*ti = tc->ti_copy;
3123 
3124 	/* Notify algo on real @ti address */
3125 	if (tc->ta->change_ti != NULL)
3126 		tc->ta->change_ti(tc->astate, ti);
3127 
3128 	tc->linked = 1;
3129 	tc->ta->refcnt++;
3130 }
3131 
3132 /*
3133  * Unlinks @tc from @chain table named instance.
3134  * Zeroes states in @chain and stores them in @tc.
3135  */
3136 static void
3137 unlink_table(struct ip_fw_chain *ch, struct table_config *tc)
3138 {
3139 	struct namedobj_instance *ni;
3140 	struct table_info *ti;
3141 	uint16_t kidx;
3142 
3143 	IPFW_UH_WLOCK_ASSERT(ch);
3144 	IPFW_WLOCK_ASSERT(ch);
3145 
3146 	ni = CHAIN_TO_NI(ch);
3147 	kidx = tc->no.kidx;
3148 
3149 	/* Clear state. @ti copy is already saved inside @tc */
3150 	ipfw_objhash_del(ni, &tc->no);
3151 	ti = KIDX_TO_TI(ch, kidx);
3152 	memset(ti, 0, sizeof(struct table_info));
3153 	tc->linked = 0;
3154 	tc->ta->refcnt--;
3155 
3156 	/* Notify algo on real @ti address */
3157 	if (tc->ta->change_ti != NULL)
3158 		tc->ta->change_ti(tc->astate, NULL);
3159 }
3160 
3161 struct swap_table_args {
3162 	int set;
3163 	int new_set;
3164 	int mv;
3165 };
3166 
3167 /*
3168  * Change set for each matching table.
3169  *
3170  * Ensure we dispatch each table once by setting/checking ochange
3171  * fields.
3172  */
3173 static void
3174 swap_table_set(struct namedobj_instance *ni, struct named_object *no,
3175     void *arg)
3176 {
3177 	struct table_config *tc;
3178 	struct swap_table_args *sta;
3179 
3180 	tc = (struct table_config *)no;
3181 	sta = (struct swap_table_args *)arg;
3182 
3183 	if (no->set != sta->set && (no->set != sta->new_set || sta->mv != 0))
3184 		return;
3185 
3186 	if (tc->ochanged != 0)
3187 		return;
3188 
3189 	tc->ochanged = 1;
3190 	ipfw_objhash_del(ni, no);
3191 	if (no->set == sta->set)
3192 		no->set = sta->new_set;
3193 	else
3194 		no->set = sta->set;
3195 	ipfw_objhash_add(ni, no);
3196 }
3197 
3198 /*
3199  * Cleans up ochange field for all tables.
3200  */
3201 static void
3202 clean_table_set_data(struct namedobj_instance *ni, struct named_object *no,
3203     void *arg)
3204 {
3205 	struct table_config *tc;
3206 	struct swap_table_args *sta;
3207 
3208 	tc = (struct table_config *)no;
3209 	sta = (struct swap_table_args *)arg;
3210 
3211 	tc->ochanged = 0;
3212 }
3213 
3214 /*
3215  * Swaps tables within two sets.
3216  */
3217 void
3218 ipfw_swap_tables_sets(struct ip_fw_chain *ch, uint32_t set,
3219     uint32_t new_set, int mv)
3220 {
3221 	struct swap_table_args sta;
3222 
3223 	IPFW_UH_WLOCK_ASSERT(ch);
3224 
3225 	sta.set = set;
3226 	sta.new_set = new_set;
3227 	sta.mv = mv;
3228 
3229 	ipfw_objhash_foreach(CHAIN_TO_NI(ch), swap_table_set, &sta);
3230 	ipfw_objhash_foreach(CHAIN_TO_NI(ch), clean_table_set_data, &sta);
3231 }
3232 
3233 /*
3234  * Move all tables which are reference by rules in @rr to set @new_set.
3235  * Makes sure that all relevant tables are referenced ONLLY by given rules.
3236  *
3237  * Retuns 0 on success,
3238  */
3239 int
3240 ipfw_move_tables_sets(struct ip_fw_chain *ch, ipfw_range_tlv *rt,
3241     uint32_t new_set)
3242 {
3243 	struct ip_fw *rule;
3244 	struct table_config *tc;
3245 	struct named_object *no;
3246 	struct namedobj_instance *ni;
3247 	int bad, i, l, cmdlen;
3248 	uint16_t kidx;
3249 	ipfw_insn *cmd;
3250 
3251 	IPFW_UH_WLOCK_ASSERT(ch);
3252 
3253 	ni = CHAIN_TO_NI(ch);
3254 
3255 	/* Stage 1: count number of references by given rules */
3256 	for (i = 0; i < ch->n_rules - 1; i++) {
3257 		rule = ch->map[i];
3258 		if (ipfw_match_range(rule, rt) == 0)
3259 			continue;
3260 
3261 		l = rule->cmd_len;
3262 		cmd = rule->cmd;
3263 		cmdlen = 0;
3264 		for ( ;	l > 0 ; l -= cmdlen, cmd += cmdlen) {
3265 			cmdlen = F_LEN(cmd);
3266 			if (classify_opcode_kidx(cmd, &kidx) != 0)
3267 				continue;
3268 			no = ipfw_objhash_lookup_kidx(ni, kidx);
3269 			KASSERT(no != NULL,
3270 			    ("objhash lookup failed on index %d", kidx));
3271 			tc = (struct table_config *)no;
3272 			tc->ocount++;
3273 		}
3274 
3275 	}
3276 
3277 	/* Stage 2: verify "ownership" */
3278 	bad = 0;
3279 	for (i = 0; i < ch->n_rules - 1; i++) {
3280 		rule = ch->map[i];
3281 		if (ipfw_match_range(rule, rt) == 0)
3282 			continue;
3283 
3284 		l = rule->cmd_len;
3285 		cmd = rule->cmd;
3286 		cmdlen = 0;
3287 		for ( ;	l > 0 ; l -= cmdlen, cmd += cmdlen) {
3288 			cmdlen = F_LEN(cmd);
3289 			if (classify_opcode_kidx(cmd, &kidx) != 0)
3290 				continue;
3291 			no = ipfw_objhash_lookup_kidx(ni, kidx);
3292 			KASSERT(no != NULL,
3293 			    ("objhash lookup failed on index %d", kidx));
3294 			tc = (struct table_config *)no;
3295 			if (tc->no.refcnt != tc->ocount) {
3296 
3297 				/*
3298 				 * Number of references differ:
3299 				 * Other rule(s) are holding reference to given
3300 				 * table, so it is not possible to change its set.
3301 				 *
3302 				 * Note that refcnt may account
3303 				 * references to some going-to-be-added rules.
3304 				 * Since we don't know their numbers (and event
3305 				 * if they will be added) it is perfectly OK
3306 				 * to return error here.
3307 				 */
3308 				bad = 1;
3309 				break;
3310 			}
3311 		}
3312 
3313 		if (bad != 0)
3314 			break;
3315 	}
3316 
3317 	/* Stage 3: change set or cleanup */
3318 	for (i = 0; i < ch->n_rules - 1; i++) {
3319 		rule = ch->map[i];
3320 		if (ipfw_match_range(rule, rt) == 0)
3321 			continue;
3322 
3323 		l = rule->cmd_len;
3324 		cmd = rule->cmd;
3325 		cmdlen = 0;
3326 		for ( ;	l > 0 ; l -= cmdlen, cmd += cmdlen) {
3327 			cmdlen = F_LEN(cmd);
3328 			if (classify_opcode_kidx(cmd, &kidx) != 0)
3329 				continue;
3330 			no = ipfw_objhash_lookup_kidx(ni, kidx);
3331 			KASSERT(no != NULL,
3332 			    ("objhash lookup failed on index %d", kidx));
3333 			tc = (struct table_config *)no;
3334 
3335 			tc->ocount = 0;
3336 			if (bad != 0)
3337 				continue;
3338 
3339 			/* Actually change set. */
3340 			ipfw_objhash_del(ni, no);
3341 			no->set = new_set;
3342 			ipfw_objhash_add(ni, no);
3343 		}
3344 	}
3345 
3346 	return (bad);
3347 }
3348 
3349 /*
3350  * Finds and bumps refcount for objects referenced by given @rule.
3351  * Auto-creates non-existing tables.
3352  * Fills in @oib array with userland/kernel indexes.
3353  *
3354  * Returns 0 on success.
3355  */
3356 static int
3357 ref_rule_objects(struct ip_fw_chain *ch, struct ip_fw *rule,
3358     struct rule_check_info *ci, struct obj_idx *oib, struct tid_info *ti)
3359 {
3360 	int cmdlen, error, l, numnew;
3361 	ipfw_insn *cmd;
3362 	struct obj_idx *pidx;
3363 	int found, unresolved;
3364 
3365 	pidx = oib;
3366 	l = rule->cmd_len;
3367 	cmd = rule->cmd;
3368 	cmdlen = 0;
3369 	error = 0;
3370 	numnew = 0;
3371 	found = 0;
3372 	unresolved = 0;
3373 
3374 	IPFW_UH_WLOCK(ch);
3375 
3376 	/* Increase refcount on each existing referenced table. */
3377 	for ( ;	l > 0 ; l -= cmdlen, cmd += cmdlen) {
3378 		cmdlen = F_LEN(cmd);
3379 
3380 		error = ref_opcode_object(ch, cmd, ti, pidx, &found, &unresolved);
3381 		if (error != 0)
3382 			break;
3383 		if (found || unresolved) {
3384 			pidx->off = rule->cmd_len - l;
3385 			pidx++;
3386 		}
3387 		/*
3388 		 * Compability stuff for old clients:
3389 		 * prepare to manually create non-existing objects.
3390 		 */
3391 		if (unresolved)
3392 			numnew++;
3393 	}
3394 
3395 	if (error != 0) {
3396 		/* Unref everything we have already done */
3397 		unref_oib_objects(ch, rule->cmd, oib, pidx);
3398 		IPFW_UH_WUNLOCK(ch);
3399 		return (error);
3400 	}
3401 
3402 	IPFW_UH_WUNLOCK(ch);
3403 
3404 	found = pidx - oib;
3405 	KASSERT(found == ci->object_opcodes,
3406 	    ("refcount inconsistency: found: %d total: %d",
3407 	    found, ci->object_opcodes));
3408 
3409 	/* Perform auto-creation for non-existing objects */
3410 	if (numnew != 0)
3411 		error = create_objects_compat(ch, rule->cmd, oib, pidx, ti);
3412 
3413 	return (error);
3414 }
3415 
3416 /*
3417  * Checks is opcode is referencing table of appropriate type.
3418  * Adds reference count for found table if true.
3419  * Rewrites user-supplied opcode values with kernel ones.
3420  *
3421  * Returns 0 on success and appropriate error code otherwise.
3422  */
3423 int
3424 ipfw_rewrite_rule_uidx(struct ip_fw_chain *chain,
3425     struct rule_check_info *ci)
3426 {
3427 	int error;
3428 	ipfw_insn *cmd;
3429 	uint8_t type;
3430 	struct obj_idx *p, *pidx_first, *pidx_last;
3431 	struct tid_info ti;
3432 
3433 	/*
3434 	 * Prepare an array for storing opcode indices.
3435 	 * Use stack allocation by default.
3436 	 */
3437 	if (ci->object_opcodes <= (sizeof(ci->obuf)/sizeof(ci->obuf[0]))) {
3438 		/* Stack */
3439 		pidx_first = ci->obuf;
3440 	} else
3441 		pidx_first = malloc(ci->object_opcodes * sizeof(struct obj_idx),
3442 		    M_IPFW, M_WAITOK | M_ZERO);
3443 
3444 	pidx_last = pidx_first + ci->object_opcodes;
3445 	error = 0;
3446 	type = 0;
3447 	memset(&ti, 0, sizeof(ti));
3448 
3449 	/*
3450 	 * Use default set for looking up tables (old way) or
3451 	 * use set rule is assigned to (new way).
3452 	 */
3453 	ti.set = (V_fw_tables_sets != 0) ? ci->krule->set : 0;
3454 	if (ci->ctlv != NULL) {
3455 		ti.tlvs = (void *)(ci->ctlv + 1);
3456 		ti.tlen = ci->ctlv->head.length - sizeof(ipfw_obj_ctlv);
3457 	}
3458 
3459 	/* Reference all used tables and other objects */
3460 	error = ref_rule_objects(chain, ci->krule, ci, pidx_first, &ti);
3461 	if (error != 0)
3462 		goto free;
3463 
3464 	/* Perform rule rewrite */
3465 	p = pidx_first;
3466 	for (p = pidx_first; p < pidx_last; p++) {
3467 		cmd = ci->krule->cmd + p->off;
3468 		update_opcode_kidx(cmd, p->kidx);
3469 	}
3470 
3471 free:
3472 	if (pidx_first != ci->obuf)
3473 		free(pidx_first, M_IPFW);
3474 
3475 	return (error);
3476 }
3477 
3478 static struct ipfw_sopt_handler	scodes[] = {
3479 	{ IP_FW_TABLE_XCREATE,	0,	HDIR_SET,	create_table },
3480 	{ IP_FW_TABLE_XDESTROY,	0,	HDIR_SET,	flush_table_v0 },
3481 	{ IP_FW_TABLE_XFLUSH,	0,	HDIR_SET,	flush_table_v0 },
3482 	{ IP_FW_TABLE_XMODIFY,	0,	HDIR_BOTH,	modify_table },
3483 	{ IP_FW_TABLE_XINFO,	0,	HDIR_GET,	describe_table },
3484 	{ IP_FW_TABLES_XLIST,	0,	HDIR_GET,	list_tables },
3485 	{ IP_FW_TABLE_XLIST,	0,	HDIR_GET,	dump_table_v0 },
3486 	{ IP_FW_TABLE_XLIST,	1,	HDIR_GET,	dump_table_v1 },
3487 	{ IP_FW_TABLE_XADD,	0,	HDIR_BOTH,	manage_table_ent_v0 },
3488 	{ IP_FW_TABLE_XADD,	1,	HDIR_BOTH,	manage_table_ent_v1 },
3489 	{ IP_FW_TABLE_XDEL,	0,	HDIR_BOTH,	manage_table_ent_v0 },
3490 	{ IP_FW_TABLE_XDEL,	1,	HDIR_BOTH,	manage_table_ent_v1 },
3491 	{ IP_FW_TABLE_XFIND,	0,	HDIR_GET,	find_table_entry },
3492 	{ IP_FW_TABLE_XSWAP,	0,	HDIR_SET,	swap_table },
3493 	{ IP_FW_TABLES_ALIST,	0,	HDIR_GET,	list_table_algo },
3494 	{ IP_FW_TABLE_XGETSIZE,	0,	HDIR_GET,	get_table_size },
3495 };
3496 
3497 static void
3498 destroy_table_locked(struct namedobj_instance *ni, struct named_object *no,
3499     void *arg)
3500 {
3501 
3502 	unlink_table((struct ip_fw_chain *)arg, (struct table_config *)no);
3503 	if (ipfw_objhash_free_idx(ni, no->kidx) != 0)
3504 		printf("Error unlinking kidx %d from table %s\n",
3505 		    no->kidx, no->name);
3506 	free_table_config(ni, (struct table_config *)no);
3507 }
3508 
3509 /*
3510  * Shuts tables module down.
3511  */
3512 void
3513 ipfw_destroy_tables(struct ip_fw_chain *ch, int last)
3514 {
3515 
3516 	IPFW_DEL_SOPT_HANDLER(last, scodes);
3517 	IPFW_DEL_OBJ_REWRITER(last, opcodes);
3518 
3519 	/* Remove all tables from working set */
3520 	IPFW_UH_WLOCK(ch);
3521 	IPFW_WLOCK(ch);
3522 	ipfw_objhash_foreach(CHAIN_TO_NI(ch), destroy_table_locked, ch);
3523 	IPFW_WUNLOCK(ch);
3524 	IPFW_UH_WUNLOCK(ch);
3525 
3526 	/* Free pointers itself */
3527 	free(ch->tablestate, M_IPFW);
3528 
3529 	ipfw_table_value_destroy(ch, last);
3530 	ipfw_table_algo_destroy(ch);
3531 
3532 	ipfw_objhash_destroy(CHAIN_TO_NI(ch));
3533 	free(CHAIN_TO_TCFG(ch), M_IPFW);
3534 }
3535 
3536 /*
3537  * Starts tables module.
3538  */
3539 int
3540 ipfw_init_tables(struct ip_fw_chain *ch, int first)
3541 {
3542 	struct tables_config *tcfg;
3543 
3544 	/* Allocate pointers */
3545 	ch->tablestate = malloc(V_fw_tables_max * sizeof(struct table_info),
3546 	    M_IPFW, M_WAITOK | M_ZERO);
3547 
3548 	tcfg = malloc(sizeof(struct tables_config), M_IPFW, M_WAITOK | M_ZERO);
3549 	tcfg->namehash = ipfw_objhash_create(V_fw_tables_max);
3550 	ch->tblcfg = tcfg;
3551 
3552 	ipfw_table_value_init(ch, first);
3553 	ipfw_table_algo_init(ch);
3554 
3555 	IPFW_ADD_OBJ_REWRITER(first, opcodes);
3556 	IPFW_ADD_SOPT_HANDLER(first, scodes);
3557 	return (0);
3558 }
3559 
3560 
3561 
3562