xref: /freebsd/sys/netpfil/ipfw/ip_fw_table.c (revision 7e00348e7605b9906601438008341ffc37c00e2c)
1 /*-
2  * Copyright (c) 2004 Ruslan Ermilov and Vsevolod Lobko.
3  * Copyright (c) 2014 Yandex LLC
4  * Copyright (c) 2014 Alexander V. Chernikov
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  */
27 
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
30 
31 /*
32  * Lookup table support for ipfw.
33  *
34  * This file contains handlers for all generic tables' operations:
35  * add/del/flush entries, list/dump tables etc..
36  *
37  * Table data modification is protected by both UH and runtime lock
38  * while reading configuration/data is protected by UH lock.
39  *
40  * Lookup algorithms for all table types are located in ip_fw_table_algo.c
41  */
42 
43 #include "opt_ipfw.h"
44 
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/malloc.h>
48 #include <sys/kernel.h>
49 #include <sys/lock.h>
50 #include <sys/rwlock.h>
51 #include <sys/rmlock.h>
52 #include <sys/socket.h>
53 #include <sys/socketvar.h>
54 #include <sys/queue.h>
55 #include <net/if.h>	/* ip_fw.h requires IFNAMSIZ */
56 
57 #include <netinet/in.h>
58 #include <netinet/ip_var.h>	/* struct ipfw_rule_ref */
59 #include <netinet/ip_fw.h>
60 
61 #include <netpfil/ipfw/ip_fw_private.h>
62 #include <netpfil/ipfw/ip_fw_table.h>
63 
64  /*
65  * Table has the following `type` concepts:
66  *
67  * `no.type` represents lookup key type (addr, ifp, uid, etc..)
68  * vmask represents bitmask of table values which are present at the moment.
69  * Special IPFW_VTYPE_LEGACY ( (uint32_t)-1 ) represents old
70  * single-value-for-all approach.
71  */
72 struct table_config {
73 	struct named_object	no;
74 	uint8_t		tflags;		/* type flags */
75 	uint8_t		locked;		/* 1 if locked from changes */
76 	uint8_t		linked;		/* 1 if already linked */
77 	uint8_t		ochanged;	/* used by set swapping */
78 	uint8_t		vshared;	/* 1 if using shared value array */
79 	uint8_t		spare[3];
80 	uint32_t	count;		/* Number of records */
81 	uint32_t	limit;		/* Max number of records */
82 	uint32_t	vmask;		/* bitmask with supported values */
83 	uint32_t	ocount;		/* used by set swapping */
84 	uint64_t	gencnt;		/* generation count */
85 	char		tablename[64];	/* table name */
86 	struct table_algo	*ta;	/* Callbacks for given algo */
87 	void		*astate;	/* algorithm state */
88 	struct table_info	ti_copy;	/* data to put to table_info */
89 	struct namedobj_instance	*vi;
90 };
91 
92 static struct table_config *find_table(struct namedobj_instance *ni,
93     struct tid_info *ti);
94 static struct table_config *alloc_table_config(struct ip_fw_chain *ch,
95     struct tid_info *ti, struct table_algo *ta, char *adata, uint8_t tflags);
96 static void free_table_config(struct namedobj_instance *ni,
97     struct table_config *tc);
98 static int create_table_internal(struct ip_fw_chain *ch, struct tid_info *ti,
99     char *aname, ipfw_xtable_info *i, uint16_t *pkidx, int ref);
100 static void link_table(struct ip_fw_chain *ch, struct table_config *tc);
101 static void unlink_table(struct ip_fw_chain *ch, struct table_config *tc);
102 static int find_ref_table(struct ip_fw_chain *ch, struct tid_info *ti,
103     struct tentry_info *tei, uint32_t count, int op, struct table_config **ptc);
104 #define	OP_ADD	1
105 #define	OP_DEL	0
106 static int export_tables(struct ip_fw_chain *ch, ipfw_obj_lheader *olh,
107     struct sockopt_data *sd);
108 static void export_table_info(struct ip_fw_chain *ch, struct table_config *tc,
109     ipfw_xtable_info *i);
110 static int dump_table_tentry(void *e, void *arg);
111 static int dump_table_xentry(void *e, void *arg);
112 
113 static int swap_tables(struct ip_fw_chain *ch, struct tid_info *a,
114     struct tid_info *b);
115 
116 static int check_table_space(struct ip_fw_chain *ch, struct tableop_state *ts,
117     struct table_config *tc, struct table_info *ti, uint32_t count);
118 static int destroy_table(struct ip_fw_chain *ch, struct tid_info *ti);
119 
120 static struct table_algo *find_table_algo(struct tables_config *tableconf,
121     struct tid_info *ti, char *name);
122 
123 static void objheader_to_ti(struct _ipfw_obj_header *oh, struct tid_info *ti);
124 static void ntlv_to_ti(struct _ipfw_obj_ntlv *ntlv, struct tid_info *ti);
125 static int classify_table_opcode(ipfw_insn *cmd, uint16_t *puidx, uint8_t *ptype);
126 
127 #define	CHAIN_TO_NI(chain)	(CHAIN_TO_TCFG(chain)->namehash)
128 #define	KIDX_TO_TI(ch, k)	(&(((struct table_info *)(ch)->tablestate)[k]))
129 
130 #define	TA_BUF_SZ	128	/* On-stack buffer for add/delete state */
131 
132 void
133 rollback_toperation_state(struct ip_fw_chain *ch, void *object)
134 {
135 	struct tables_config *tcfg;
136 	struct op_state *os;
137 
138 	tcfg = CHAIN_TO_TCFG(ch);
139 	TAILQ_FOREACH(os, &tcfg->state_list, next)
140 		os->func(object, os);
141 }
142 
143 void
144 add_toperation_state(struct ip_fw_chain *ch, struct tableop_state *ts)
145 {
146 	struct tables_config *tcfg;
147 
148 	tcfg = CHAIN_TO_TCFG(ch);
149 	TAILQ_INSERT_HEAD(&tcfg->state_list, &ts->opstate, next);
150 }
151 
152 void
153 del_toperation_state(struct ip_fw_chain *ch, struct tableop_state *ts)
154 {
155 	struct tables_config *tcfg;
156 
157 	tcfg = CHAIN_TO_TCFG(ch);
158 	TAILQ_REMOVE(&tcfg->state_list, &ts->opstate, next);
159 }
160 
161 void
162 tc_ref(struct table_config *tc)
163 {
164 
165 	tc->no.refcnt++;
166 }
167 
168 void
169 tc_unref(struct table_config *tc)
170 {
171 
172 	tc->no.refcnt--;
173 }
174 
175 static struct table_value *
176 get_table_value(struct ip_fw_chain *ch, struct table_config *tc, uint32_t kidx)
177 {
178 	struct table_value *pval;
179 
180 	pval = (struct table_value *)ch->valuestate;
181 
182 	return (&pval[kidx]);
183 }
184 
185 
186 /*
187  * Checks if we're able to insert/update entry @tei into table
188  * w.r.t @tc limits.
189  * May alter @tei to indicate insertion error / insert
190  * options.
191  *
192  * Returns 0 if operation can be performed/
193  */
194 static int
195 check_table_limit(struct table_config *tc, struct tentry_info *tei)
196 {
197 
198 	if (tc->limit == 0 || tc->count < tc->limit)
199 		return (0);
200 
201 	if ((tei->flags & TEI_FLAGS_UPDATE) == 0) {
202 		/* Notify userland on error cause */
203 		tei->flags |= TEI_FLAGS_LIMIT;
204 		return (EFBIG);
205 	}
206 
207 	/*
208 	 * We have UPDATE flag set.
209 	 * Permit updating record (if found),
210 	 * but restrict adding new one since we've
211 	 * already hit the limit.
212 	 */
213 	tei->flags |= TEI_FLAGS_DONTADD;
214 
215 	return (0);
216 }
217 
218 /*
219  * Convert algorithm callback return code into
220  * one of pre-defined states known by userland.
221  */
222 static void
223 store_tei_result(struct tentry_info *tei, int op, int error, uint32_t num)
224 {
225 	int flag;
226 
227 	flag = 0;
228 
229 	switch (error) {
230 	case 0:
231 		if (op == OP_ADD && num != 0)
232 			flag = TEI_FLAGS_ADDED;
233 		if (op == OP_DEL)
234 			flag = TEI_FLAGS_DELETED;
235 		break;
236 	case ENOENT:
237 		flag = TEI_FLAGS_NOTFOUND;
238 		break;
239 	case EEXIST:
240 		flag = TEI_FLAGS_EXISTS;
241 		break;
242 	default:
243 		flag = TEI_FLAGS_ERROR;
244 	}
245 
246 	tei->flags |= flag;
247 }
248 
249 /*
250  * Creates and references table with default parameters.
251  * Saves table config, algo and allocated kidx info @ptc, @pta and
252  * @pkidx if non-zero.
253  * Used for table auto-creation to support old binaries.
254  *
255  * Returns 0 on success.
256  */
257 static int
258 create_table_compat(struct ip_fw_chain *ch, struct tid_info *ti,
259     uint16_t *pkidx)
260 {
261 	ipfw_xtable_info xi;
262 	int error;
263 
264 	memset(&xi, 0, sizeof(xi));
265 	/* Set default value mask for legacy clients */
266 	xi.vmask = IPFW_VTYPE_LEGACY;
267 
268 	error = create_table_internal(ch, ti, NULL, &xi, pkidx, 1);
269 	if (error != 0)
270 		return (error);
271 
272 	return (0);
273 }
274 
275 /*
276  * Find and reference existing table optionally
277  * creating new one.
278  *
279  * Saves found table config into @ptc.
280  * Note function may drop/acquire UH_WLOCK.
281  * Returns 0 if table was found/created and referenced
282  * or non-zero return code.
283  */
284 static int
285 find_ref_table(struct ip_fw_chain *ch, struct tid_info *ti,
286     struct tentry_info *tei, uint32_t count, int op,
287     struct table_config **ptc)
288 {
289 	struct namedobj_instance *ni;
290 	struct table_config *tc;
291 	uint16_t kidx;
292 	int error;
293 
294 	IPFW_UH_WLOCK_ASSERT(ch);
295 
296 	ni = CHAIN_TO_NI(ch);
297 	tc = NULL;
298 	if ((tc = find_table(ni, ti)) != NULL) {
299 		/* check table type */
300 		if (tc->no.type != ti->type)
301 			return (EINVAL);
302 
303 		if (tc->locked != 0)
304 			return (EACCES);
305 
306 		/* Try to exit early on limit hit */
307 		if (op == OP_ADD && count == 1 &&
308 		    check_table_limit(tc, tei) != 0)
309 			return (EFBIG);
310 
311 		/* Reference and return */
312 		tc->no.refcnt++;
313 		*ptc = tc;
314 		return (0);
315 	}
316 
317 	if (op == OP_DEL)
318 		return (ESRCH);
319 
320 	/* Compability mode: create new table for old clients */
321 	if ((tei->flags & TEI_FLAGS_COMPAT) == 0)
322 		return (ESRCH);
323 
324 	IPFW_UH_WUNLOCK(ch);
325 	error = create_table_compat(ch, ti, &kidx);
326 	IPFW_UH_WLOCK(ch);
327 
328 	if (error != 0)
329 		return (error);
330 
331 	tc = (struct table_config *)ipfw_objhash_lookup_kidx(ni, kidx);
332 	KASSERT(tc != NULL, ("create_table_compat returned bad idx %d", kidx));
333 
334 	/* OK, now we've got referenced table. */
335 	*ptc = tc;
336 	return (0);
337 }
338 
339 /*
340  * Rolls back already @added to @tc entries using state array @ta_buf_m.
341  * Assume the following layout:
342  * 1) ADD state (ta_buf_m[0] ... t_buf_m[added - 1]) for handling update cases
343  * 2) DEL state (ta_buf_m[count[ ... t_buf_m[count + added - 1])
344  *   for storing deleted state
345  */
346 static void
347 rollback_added_entries(struct ip_fw_chain *ch, struct table_config *tc,
348     struct table_info *tinfo, struct tentry_info *tei, caddr_t ta_buf_m,
349     uint32_t count, uint32_t added)
350 {
351 	struct table_algo *ta;
352 	struct tentry_info *ptei;
353 	caddr_t v, vv;
354 	size_t ta_buf_sz;
355 	int error, i;
356 	uint32_t num;
357 
358 	IPFW_UH_WLOCK_ASSERT(ch);
359 
360 	ta = tc->ta;
361 	ta_buf_sz = ta->ta_buf_size;
362 	v = ta_buf_m;
363 	vv = v + count * ta_buf_sz;
364 	for (i = 0; i < added; i++, v += ta_buf_sz, vv += ta_buf_sz) {
365 		ptei = &tei[i];
366 		if ((ptei->flags & TEI_FLAGS_UPDATED) != 0) {
367 
368 			/*
369 			 * We have old value stored by previous
370 			 * call in @ptei->value. Do add once again
371 			 * to restore it.
372 			 */
373 			error = ta->add(tc->astate, tinfo, ptei, v, &num);
374 			KASSERT(error == 0, ("rollback UPDATE fail"));
375 			KASSERT(num == 0, ("rollback UPDATE fail2"));
376 			continue;
377 		}
378 
379 		error = ta->prepare_del(ch, ptei, vv);
380 		KASSERT(error == 0, ("pre-rollback INSERT failed"));
381 		error = ta->del(tc->astate, tinfo, ptei, vv, &num);
382 		KASSERT(error == 0, ("rollback INSERT failed"));
383 		tc->count -= num;
384 	}
385 }
386 
387 /*
388  * Prepares add/del state for all @count entries in @tei.
389  * Uses either stack buffer (@ta_buf) or allocates a new one.
390  * Stores pointer to allocated buffer back to @ta_buf.
391  *
392  * Returns 0 on success.
393  */
394 static int
395 prepare_batch_buffer(struct ip_fw_chain *ch, struct table_algo *ta,
396     struct tentry_info *tei, uint32_t count, int op, caddr_t *ta_buf)
397 {
398 	caddr_t ta_buf_m, v;
399 	size_t ta_buf_sz, sz;
400 	struct tentry_info *ptei;
401 	int error, i;
402 
403 	error = 0;
404 	ta_buf_sz = ta->ta_buf_size;
405 	if (count == 1) {
406 		/* Sigle add/delete, use on-stack buffer */
407 		memset(*ta_buf, 0, TA_BUF_SZ);
408 		ta_buf_m = *ta_buf;
409 	} else {
410 
411 		/*
412 		 * Multiple adds/deletes, allocate larger buffer
413 		 *
414 		 * Note we need 2xcount buffer for add case:
415 		 * we have hold both ADD state
416 		 * and DELETE state (this may be needed
417 		 * if we need to rollback all changes)
418 		 */
419 		sz = count * ta_buf_sz;
420 		ta_buf_m = malloc((op == OP_ADD) ? sz * 2 : sz, M_TEMP,
421 		    M_WAITOK | M_ZERO);
422 	}
423 
424 	v = ta_buf_m;
425 	for (i = 0; i < count; i++, v += ta_buf_sz) {
426 		ptei = &tei[i];
427 		error = (op == OP_ADD) ?
428 		    ta->prepare_add(ch, ptei, v) : ta->prepare_del(ch, ptei, v);
429 
430 		/*
431 		 * Some syntax error (incorrect mask, or address, or
432 		 * anything). Return error regardless of atomicity
433 		 * settings.
434 		 */
435 		if (error != 0)
436 			break;
437 	}
438 
439 	*ta_buf = ta_buf_m;
440 	return (error);
441 }
442 
443 /*
444  * Flushes allocated state for each @count entries in @tei.
445  * Frees @ta_buf_m if differs from stack buffer @ta_buf.
446  */
447 static void
448 flush_batch_buffer(struct ip_fw_chain *ch, struct table_algo *ta,
449     struct tentry_info *tei, uint32_t count, int rollback,
450     caddr_t ta_buf_m, caddr_t ta_buf)
451 {
452 	caddr_t v;
453 	struct tentry_info *ptei;
454 	size_t ta_buf_sz;
455 	int i;
456 
457 	ta_buf_sz = ta->ta_buf_size;
458 
459 	/* Run cleaning callback anyway */
460 	v = ta_buf_m;
461 	for (i = 0; i < count; i++, v += ta_buf_sz) {
462 		ptei = &tei[i];
463 		ta->flush_entry(ch, ptei, v);
464 		if (ptei->ptv != NULL) {
465 			free(ptei->ptv, M_IPFW);
466 			ptei->ptv = NULL;
467 		}
468 	}
469 
470 	/* Clean up "deleted" state in case of rollback */
471 	if (rollback != 0) {
472 		v = ta_buf_m + count * ta_buf_sz;
473 		for (i = 0; i < count; i++, v += ta_buf_sz)
474 			ta->flush_entry(ch, &tei[i], v);
475 	}
476 
477 	if (ta_buf_m != ta_buf)
478 		free(ta_buf_m, M_TEMP);
479 }
480 
481 
482 static void
483 rollback_add_entry(void *object, struct op_state *_state)
484 {
485 	struct ip_fw_chain *ch;
486 	struct tableop_state *ts;
487 
488 	ts = (struct tableop_state *)_state;
489 
490 	if (ts->tc != object && ts->ch != object)
491 		return;
492 
493 	ch = ts->ch;
494 
495 	IPFW_UH_WLOCK_ASSERT(ch);
496 
497 	/* Call specifid unlockers */
498 	rollback_table_values(ts);
499 
500 	/* Indicate we've called */
501 	ts->modified = 1;
502 }
503 
504 /*
505  * Adds/updates one or more entries in table @ti.
506  *
507  * Function may drop/reacquire UH wlock multiple times due to
508  * items alloc, algorithm callbacks (check_space), value linkage
509  * (new values, value storage realloc), etc..
510  * Other processes like other adds (which may involve storage resize),
511  * table swaps (which changes table data and may change algo type),
512  * table modify (which may change value mask) may be executed
513  * simultaneously so we need to deal with it.
514  *
515  * The following approach was implemented:
516  * we have per-chain linked list, protected with UH lock.
517  * add_table_entry prepares special on-stack structure wthich is passed
518  * to its descendants. Users add this structure to this list before unlock.
519  * After performing needed operations and acquiring UH lock back, each user
520  * checks if structure has changed. If true, it rolls local state back and
521  * returns without error to the caller.
522  * add_table_entry() on its own checks if structure has changed and restarts
523  * its operation from the beginning (goto restart).
524  *
525  * Functions which are modifying fields of interest (currently
526  *   resize_shared_value_storage() and swap_tables() )
527  * traverses given list while holding UH lock immediately before
528  * performing their operations calling function provided be list entry
529  * ( currently rollback_add_entry  ) which performs rollback for all necessary
530  * state and sets appropriate values in structure indicating rollback
531  * has happened.
532  *
533  * Algo interaction:
534  * Function references @ti first to ensure table won't
535  * disappear or change its type.
536  * After that, prepare_add callback is called for each @tei entry.
537  * Next, we try to add each entry under UH+WHLOCK
538  * using add() callback.
539  * Finally, we free all state by calling flush_entry callback
540  * for each @tei.
541  *
542  * Returns 0 on success.
543  */
544 int
545 add_table_entry(struct ip_fw_chain *ch, struct tid_info *ti,
546     struct tentry_info *tei, uint8_t flags, uint32_t count)
547 {
548 	struct table_config *tc;
549 	struct table_algo *ta;
550 	uint16_t kidx;
551 	int error, first_error, i, rollback;
552 	uint32_t num, numadd;
553 	struct tentry_info *ptei;
554 	struct tableop_state ts;
555 	char ta_buf[TA_BUF_SZ];
556 	caddr_t ta_buf_m, v;
557 
558 	memset(&ts, 0, sizeof(ts));
559 	ta = NULL;
560 	IPFW_UH_WLOCK(ch);
561 
562 	/*
563 	 * Find and reference existing table.
564 	 */
565 restart:
566 	if (ts.modified != 0) {
567 		IPFW_UH_WUNLOCK(ch);
568 		flush_batch_buffer(ch, ta, tei, count, rollback,
569 		    ta_buf_m, ta_buf);
570 		memset(&ts, 0, sizeof(ts));
571 		ta = NULL;
572 		IPFW_UH_WLOCK(ch);
573 	}
574 
575 	error = find_ref_table(ch, ti, tei, count, OP_ADD, &tc);
576 	if (error != 0) {
577 		IPFW_UH_WUNLOCK(ch);
578 		return (error);
579 	}
580 	ta = tc->ta;
581 
582 	/* Fill in tablestate */
583 	ts.ch = ch;
584 	ts.opstate.func = rollback_add_entry;
585 	ts.tc = tc;
586 	ts.vshared = tc->vshared;
587 	ts.vmask = tc->vmask;
588 	ts.ta = ta;
589 	ts.tei = tei;
590 	ts.count = count;
591 	rollback = 0;
592 	add_toperation_state(ch, &ts);
593 	IPFW_UH_WUNLOCK(ch);
594 
595 	/* Allocate memory and prepare record(s) */
596 	/* Pass stack buffer by default */
597 	ta_buf_m = ta_buf;
598 	error = prepare_batch_buffer(ch, ta, tei, count, OP_ADD, &ta_buf_m);
599 	if (error != 0)
600 		goto cleanup;
601 
602 	IPFW_UH_WLOCK(ch);
603 	/* Drop reference we've used in first search */
604 	tc->no.refcnt--;
605 
606 	/*
607 	 * Check if table swap has happened.
608 	 * (so table algo might be changed).
609 	 * Restart operation to achieve consistent behavior.
610 	 */
611 	del_toperation_state(ch, &ts);
612 	if (ts.modified != 0)
613 		goto restart;
614 
615 	/*
616 	 * Link all values values to shared/per-table value array.
617 	 *
618 	 * May release/reacquire UH_WLOCK.
619 	 */
620 	error = ipfw_link_table_values(ch, &ts);
621 	if (error != 0)
622 		goto cleanup;
623 	if (ts.modified != 0)
624 		goto restart;
625 
626 	/*
627 	 * Ensure we are able to add all entries without additional
628 	 * memory allocations. May release/reacquire UH_WLOCK.
629 	 */
630 	kidx = tc->no.kidx;
631 	error = check_table_space(ch, &ts, tc, KIDX_TO_TI(ch, kidx), count);
632 	if (error != 0)
633 		goto cleanup;
634 	if (ts.modified != 0)
635 		goto restart;
636 
637 	/* We've got valid table in @tc. Let's try to add data */
638 	kidx = tc->no.kidx;
639 	ta = tc->ta;
640 	numadd = 0;
641 	first_error = 0;
642 
643 	IPFW_WLOCK(ch);
644 
645 	v = ta_buf_m;
646 	for (i = 0; i < count; i++, v += ta->ta_buf_size) {
647 		ptei = &tei[i];
648 		num = 0;
649 		/* check limit before adding */
650 		if ((error = check_table_limit(tc, ptei)) == 0) {
651 			error = ta->add(tc->astate, KIDX_TO_TI(ch, kidx),
652 			    ptei, v, &num);
653 			/* Set status flag to inform userland */
654 			store_tei_result(ptei, OP_ADD, error, num);
655 		}
656 		if (error == 0) {
657 			/* Update number of records to ease limit checking */
658 			tc->count += num;
659 			numadd += num;
660 			continue;
661 		}
662 
663 		if (first_error == 0)
664 			first_error = error;
665 
666 		/*
667 		 * Some error have happened. Check our atomicity
668 		 * settings: continue if atomicity is not required,
669 		 * rollback changes otherwise.
670 		 */
671 		if ((flags & IPFW_CTF_ATOMIC) == 0)
672 			continue;
673 
674 		rollback_added_entries(ch, tc, KIDX_TO_TI(ch, kidx),
675 		    tei, ta_buf_m, count, i);
676 
677 		rollback = 1;
678 		break;
679 	}
680 
681 	IPFW_WUNLOCK(ch);
682 
683 	ipfw_garbage_table_values(ch, tc, tei, count, rollback);
684 
685 	/* Permit post-add algorithm grow/rehash. */
686 	if (numadd != 0)
687 		check_table_space(ch, NULL, tc, KIDX_TO_TI(ch, kidx), 0);
688 
689 	/* Return first error to user, if any */
690 	error = first_error;
691 
692 cleanup:
693 	IPFW_UH_WUNLOCK(ch);
694 
695 	flush_batch_buffer(ch, ta, tei, count, rollback, ta_buf_m, ta_buf);
696 
697 	return (error);
698 }
699 
700 /*
701  * Deletes one or more entries in table @ti.
702  *
703  * Returns 0 on success.
704  */
705 int
706 del_table_entry(struct ip_fw_chain *ch, struct tid_info *ti,
707     struct tentry_info *tei, uint8_t flags, uint32_t count)
708 {
709 	struct table_config *tc;
710 	struct table_algo *ta;
711 	struct tentry_info *ptei;
712 	uint16_t kidx;
713 	int error, first_error, i;
714 	uint32_t num, numdel;
715 	char ta_buf[TA_BUF_SZ];
716 	caddr_t ta_buf_m, v;
717 
718 	/*
719 	 * Find and reference existing table.
720 	 */
721 	IPFW_UH_WLOCK(ch);
722 	error = find_ref_table(ch, ti, tei, count, OP_DEL, &tc);
723 	if (error != 0) {
724 		IPFW_UH_WUNLOCK(ch);
725 		return (error);
726 	}
727 	ta = tc->ta;
728 	IPFW_UH_WUNLOCK(ch);
729 
730 	/* Allocate memory and prepare record(s) */
731 	/* Pass stack buffer by default */
732 	ta_buf_m = ta_buf;
733 	error = prepare_batch_buffer(ch, ta, tei, count, OP_DEL, &ta_buf_m);
734 	if (error != 0)
735 		goto cleanup;
736 
737 	IPFW_UH_WLOCK(ch);
738 
739 	/* Drop reference we've used in first search */
740 	tc->no.refcnt--;
741 
742 	/*
743 	 * Check if table algo is still the same.
744 	 * (changed ta may be the result of table swap).
745 	 */
746 	if (ta != tc->ta) {
747 		IPFW_UH_WUNLOCK(ch);
748 		error = EINVAL;
749 		goto cleanup;
750 	}
751 
752 	kidx = tc->no.kidx;
753 	numdel = 0;
754 	first_error = 0;
755 
756 	IPFW_WLOCK(ch);
757 	v = ta_buf_m;
758 	for (i = 0; i < count; i++, v += ta->ta_buf_size) {
759 		ptei = &tei[i];
760 		num = 0;
761 		error = ta->del(tc->astate, KIDX_TO_TI(ch, kidx), ptei, v,
762 		    &num);
763 		/* Save state for userland */
764 		store_tei_result(ptei, OP_DEL, error, num);
765 		if (error != 0 && first_error == 0)
766 			first_error = error;
767 		tc->count -= num;
768 		numdel += num;
769 	}
770 	IPFW_WUNLOCK(ch);
771 
772 	/* Unlink non-used values */
773 	ipfw_garbage_table_values(ch, tc, tei, count, 0);
774 
775 	if (numdel != 0) {
776 		/* Run post-del hook to permit shrinking */
777 		check_table_space(ch, NULL, tc, KIDX_TO_TI(ch, kidx), 0);
778 	}
779 
780 	IPFW_UH_WUNLOCK(ch);
781 
782 	/* Return first error to user, if any */
783 	error = first_error;
784 
785 cleanup:
786 	flush_batch_buffer(ch, ta, tei, count, 0, ta_buf_m, ta_buf);
787 
788 	return (error);
789 }
790 
791 /*
792  * Ensure that table @tc has enough space to add @count entries without
793  * need for reallocation.
794  *
795  * Callbacks order:
796  * 0) need_modify() (UH_WLOCK) - checks if @count items can be added w/o resize.
797  *
798  * 1) alloc_modify (no locks, M_WAITOK) - alloc new state based on @pflags.
799  * 2) prepare_modifyt (UH_WLOCK) - copy old data into new storage
800  * 3) modify (UH_WLOCK + WLOCK) - switch pointers
801  * 4) flush_modify (UH_WLOCK) - free state, if needed
802  *
803  * Returns 0 on success.
804  */
805 static int
806 check_table_space(struct ip_fw_chain *ch, struct tableop_state *ts,
807     struct table_config *tc, struct table_info *ti, uint32_t count)
808 {
809 	struct table_algo *ta;
810 	uint64_t pflags;
811 	char ta_buf[TA_BUF_SZ];
812 	int error;
813 
814 	IPFW_UH_WLOCK_ASSERT(ch);
815 
816 	error = 0;
817 	ta = tc->ta;
818 	if (ta->need_modify == NULL)
819 		return (0);
820 
821 	/* Acquire reference not to loose @tc between locks/unlocks */
822 	tc->no.refcnt++;
823 
824 	/*
825 	 * TODO: think about avoiding race between large add/large delete
826 	 * operation on algorithm which implements shrinking along with
827 	 * growing.
828 	 */
829 	while (true) {
830 		pflags = 0;
831 		if (ta->need_modify(tc->astate, ti, count, &pflags) == 0) {
832 			error = 0;
833 			break;
834 		}
835 
836 		/* We have to shrink/grow table */
837 		if (ts != NULL)
838 			add_toperation_state(ch, ts);
839 		IPFW_UH_WUNLOCK(ch);
840 
841 		memset(&ta_buf, 0, sizeof(ta_buf));
842 		error = ta->prepare_mod(ta_buf, &pflags);
843 
844 		IPFW_UH_WLOCK(ch);
845 		if (ts != NULL)
846 			del_toperation_state(ch, ts);
847 
848 		if (error != 0)
849 			break;
850 
851 		if (ts != NULL && ts->modified != 0) {
852 
853 			/*
854 			 * Swap operation has happened
855 			 * so we're currently operating on other
856 			 * table data. Stop doing this.
857 			 */
858 			ta->flush_mod(ta_buf);
859 			break;
860 		}
861 
862 		/* Check if we still need to alter table */
863 		ti = KIDX_TO_TI(ch, tc->no.kidx);
864 		if (ta->need_modify(tc->astate, ti, count, &pflags) == 0) {
865 			IPFW_UH_WUNLOCK(ch);
866 
867 			/*
868 			 * Other thread has already performed resize.
869 			 * Flush our state and return.
870 			 */
871 			ta->flush_mod(ta_buf);
872 			break;
873 		}
874 
875 		error = ta->fill_mod(tc->astate, ti, ta_buf, &pflags);
876 		if (error == 0) {
877 			/* Do actual modification */
878 			IPFW_WLOCK(ch);
879 			ta->modify(tc->astate, ti, ta_buf, pflags);
880 			IPFW_WUNLOCK(ch);
881 		}
882 
883 		/* Anyway, flush data and retry */
884 		ta->flush_mod(ta_buf);
885 	}
886 
887 	tc->no.refcnt--;
888 	return (error);
889 }
890 
891 /*
892  * Adds or deletes record in table.
893  * Data layout (v0):
894  * Request: [ ip_fw3_opheader ipfw_table_xentry ]
895  *
896  * Returns 0 on success
897  */
898 static int
899 manage_table_ent_v0(struct ip_fw_chain *ch, ip_fw3_opheader *op3,
900     struct sockopt_data *sd)
901 {
902 	ipfw_table_xentry *xent;
903 	struct tentry_info tei;
904 	struct tid_info ti;
905 	struct table_value v;
906 	int error, hdrlen, read;
907 
908 	hdrlen = offsetof(ipfw_table_xentry, k);
909 
910 	/* Check minimum header size */
911 	if (sd->valsize < (sizeof(*op3) + hdrlen))
912 		return (EINVAL);
913 
914 	read = sizeof(ip_fw3_opheader);
915 
916 	/* Check if xentry len field is valid */
917 	xent = (ipfw_table_xentry *)(op3 + 1);
918 	if (xent->len < hdrlen || xent->len + read > sd->valsize)
919 		return (EINVAL);
920 
921 	memset(&tei, 0, sizeof(tei));
922 	tei.paddr = &xent->k;
923 	tei.masklen = xent->masklen;
924 	ipfw_import_table_value_legacy(xent->value, &v);
925 	tei.pvalue = &v;
926 	/* Old requests compability */
927 	tei.flags = TEI_FLAGS_COMPAT;
928 	if (xent->type == IPFW_TABLE_ADDR) {
929 		if (xent->len - hdrlen == sizeof(in_addr_t))
930 			tei.subtype = AF_INET;
931 		else
932 			tei.subtype = AF_INET6;
933 	}
934 
935 	memset(&ti, 0, sizeof(ti));
936 	ti.uidx = xent->tbl;
937 	ti.type = xent->type;
938 
939 	error = (op3->opcode == IP_FW_TABLE_XADD) ?
940 	    add_table_entry(ch, &ti, &tei, 0, 1) :
941 	    del_table_entry(ch, &ti, &tei, 0, 1);
942 
943 	return (error);
944 }
945 
946 /*
947  * Adds or deletes record in table.
948  * Data layout (v1)(current):
949  * Request: [ ipfw_obj_header
950  *   ipfw_obj_ctlv(IPFW_TLV_TBLENT_LIST) [ ipfw_obj_tentry x N ]
951  * ]
952  *
953  * Returns 0 on success
954  */
955 static int
956 manage_table_ent_v1(struct ip_fw_chain *ch, ip_fw3_opheader *op3,
957     struct sockopt_data *sd)
958 {
959 	ipfw_obj_tentry *tent, *ptent;
960 	ipfw_obj_ctlv *ctlv;
961 	ipfw_obj_header *oh;
962 	struct tentry_info *ptei, tei, *tei_buf;
963 	struct tid_info ti;
964 	int error, i, kidx, read;
965 
966 	/* Check minimum header size */
967 	if (sd->valsize < (sizeof(*oh) + sizeof(*ctlv)))
968 		return (EINVAL);
969 
970 	/* Check if passed data is too long */
971 	if (sd->valsize != sd->kavail)
972 		return (EINVAL);
973 
974 	oh = (ipfw_obj_header *)sd->kbuf;
975 
976 	/* Basic length checks for TLVs */
977 	if (oh->ntlv.head.length != sizeof(oh->ntlv))
978 		return (EINVAL);
979 
980 	read = sizeof(*oh);
981 
982 	ctlv = (ipfw_obj_ctlv *)(oh + 1);
983 	if (ctlv->head.length + read != sd->valsize)
984 		return (EINVAL);
985 
986 	read += sizeof(*ctlv);
987 	tent = (ipfw_obj_tentry *)(ctlv + 1);
988 	if (ctlv->count * sizeof(*tent) + read != sd->valsize)
989 		return (EINVAL);
990 
991 	if (ctlv->count == 0)
992 		return (0);
993 
994 	/*
995 	 * Mark entire buffer as "read".
996 	 * This instructs sopt api write it back
997 	 * after function return.
998 	 */
999 	ipfw_get_sopt_header(sd, sd->valsize);
1000 
1001 	/* Perform basic checks for each entry */
1002 	ptent = tent;
1003 	kidx = tent->idx;
1004 	for (i = 0; i < ctlv->count; i++, ptent++) {
1005 		if (ptent->head.length != sizeof(*ptent))
1006 			return (EINVAL);
1007 		if (ptent->idx != kidx)
1008 			return (ENOTSUP);
1009 	}
1010 
1011 	/* Convert data into kernel request objects */
1012 	objheader_to_ti(oh, &ti);
1013 	ti.type = oh->ntlv.type;
1014 	ti.uidx = kidx;
1015 
1016 	/* Use on-stack buffer for single add/del */
1017 	if (ctlv->count == 1) {
1018 		memset(&tei, 0, sizeof(tei));
1019 		tei_buf = &tei;
1020 	} else
1021 		tei_buf = malloc(ctlv->count * sizeof(tei), M_TEMP,
1022 		    M_WAITOK | M_ZERO);
1023 
1024 	ptei = tei_buf;
1025 	ptent = tent;
1026 	for (i = 0; i < ctlv->count; i++, ptent++, ptei++) {
1027 		ptei->paddr = &ptent->k;
1028 		ptei->subtype = ptent->subtype;
1029 		ptei->masklen = ptent->masklen;
1030 		if (ptent->head.flags & IPFW_TF_UPDATE)
1031 			ptei->flags |= TEI_FLAGS_UPDATE;
1032 
1033 		ipfw_import_table_value_v1(&ptent->v.value);
1034 		ptei->pvalue = (struct table_value *)&ptent->v.value;
1035 	}
1036 
1037 	error = (oh->opheader.opcode == IP_FW_TABLE_XADD) ?
1038 	    add_table_entry(ch, &ti, tei_buf, ctlv->flags, ctlv->count) :
1039 	    del_table_entry(ch, &ti, tei_buf, ctlv->flags, ctlv->count);
1040 
1041 	/* Translate result back to userland */
1042 	ptei = tei_buf;
1043 	ptent = tent;
1044 	for (i = 0; i < ctlv->count; i++, ptent++, ptei++) {
1045 		if (ptei->flags & TEI_FLAGS_ADDED)
1046 			ptent->result = IPFW_TR_ADDED;
1047 		else if (ptei->flags & TEI_FLAGS_DELETED)
1048 			ptent->result = IPFW_TR_DELETED;
1049 		else if (ptei->flags & TEI_FLAGS_UPDATED)
1050 			ptent->result = IPFW_TR_UPDATED;
1051 		else if (ptei->flags & TEI_FLAGS_LIMIT)
1052 			ptent->result = IPFW_TR_LIMIT;
1053 		else if (ptei->flags & TEI_FLAGS_ERROR)
1054 			ptent->result = IPFW_TR_ERROR;
1055 		else if (ptei->flags & TEI_FLAGS_NOTFOUND)
1056 			ptent->result = IPFW_TR_NOTFOUND;
1057 		else if (ptei->flags & TEI_FLAGS_EXISTS)
1058 			ptent->result = IPFW_TR_EXISTS;
1059 		ipfw_export_table_value_v1(ptei->pvalue, &ptent->v.value);
1060 	}
1061 
1062 	if (tei_buf != &tei)
1063 		free(tei_buf, M_TEMP);
1064 
1065 	return (error);
1066 }
1067 
1068 /*
1069  * Looks up an entry in given table.
1070  * Data layout (v0)(current):
1071  * Request: [ ipfw_obj_header ipfw_obj_tentry ]
1072  * Reply: [ ipfw_obj_header ipfw_obj_tentry ]
1073  *
1074  * Returns 0 on success
1075  */
1076 static int
1077 find_table_entry(struct ip_fw_chain *ch, ip_fw3_opheader *op3,
1078     struct sockopt_data *sd)
1079 {
1080 	ipfw_obj_tentry *tent;
1081 	ipfw_obj_header *oh;
1082 	struct tid_info ti;
1083 	struct table_config *tc;
1084 	struct table_algo *ta;
1085 	struct table_info *kti;
1086 	struct namedobj_instance *ni;
1087 	int error;
1088 	size_t sz;
1089 
1090 	/* Check minimum header size */
1091 	sz = sizeof(*oh) + sizeof(*tent);
1092 	if (sd->valsize != sz)
1093 		return (EINVAL);
1094 
1095 	oh = (struct _ipfw_obj_header *)ipfw_get_sopt_header(sd, sz);
1096 	tent = (ipfw_obj_tentry *)(oh + 1);
1097 
1098 	/* Basic length checks for TLVs */
1099 	if (oh->ntlv.head.length != sizeof(oh->ntlv))
1100 		return (EINVAL);
1101 
1102 	objheader_to_ti(oh, &ti);
1103 	ti.type = oh->ntlv.type;
1104 	ti.uidx = tent->idx;
1105 
1106 	IPFW_UH_RLOCK(ch);
1107 	ni = CHAIN_TO_NI(ch);
1108 
1109 	/*
1110 	 * Find existing table and check its type .
1111 	 */
1112 	ta = NULL;
1113 	if ((tc = find_table(ni, &ti)) == NULL) {
1114 		IPFW_UH_RUNLOCK(ch);
1115 		return (ESRCH);
1116 	}
1117 
1118 	/* check table type */
1119 	if (tc->no.type != ti.type) {
1120 		IPFW_UH_RUNLOCK(ch);
1121 		return (EINVAL);
1122 	}
1123 
1124 	kti = KIDX_TO_TI(ch, tc->no.kidx);
1125 	ta = tc->ta;
1126 
1127 	if (ta->find_tentry == NULL)
1128 		return (ENOTSUP);
1129 
1130 	error = ta->find_tentry(tc->astate, kti, tent);
1131 
1132 	IPFW_UH_RUNLOCK(ch);
1133 
1134 	return (error);
1135 }
1136 
1137 /*
1138  * Flushes all entries or destroys given table.
1139  * Data layout (v0)(current):
1140  * Request: [ ipfw_obj_header ]
1141  *
1142  * Returns 0 on success
1143  */
1144 static int
1145 flush_table_v0(struct ip_fw_chain *ch, ip_fw3_opheader *op3,
1146     struct sockopt_data *sd)
1147 {
1148 	int error;
1149 	struct _ipfw_obj_header *oh;
1150 	struct tid_info ti;
1151 
1152 	if (sd->valsize != sizeof(*oh))
1153 		return (EINVAL);
1154 
1155 	oh = (struct _ipfw_obj_header *)op3;
1156 	objheader_to_ti(oh, &ti);
1157 
1158 	if (op3->opcode == IP_FW_TABLE_XDESTROY)
1159 		error = destroy_table(ch, &ti);
1160 	else if (op3->opcode == IP_FW_TABLE_XFLUSH)
1161 		error = flush_table(ch, &ti);
1162 	else
1163 		return (ENOTSUP);
1164 
1165 	return (error);
1166 }
1167 
1168 static void
1169 restart_flush(void *object, struct op_state *_state)
1170 {
1171 	struct tableop_state *ts;
1172 
1173 	ts = (struct tableop_state *)_state;
1174 
1175 	if (ts->tc != object)
1176 		return;
1177 
1178 	/* Indicate we've called */
1179 	ts->modified = 1;
1180 }
1181 
1182 /*
1183  * Flushes given table.
1184  *
1185  * Function create new table instance with the same
1186  * parameters, swaps it with old one and
1187  * flushes state without holding runtime WLOCK.
1188  *
1189  * Returns 0 on success.
1190  */
1191 int
1192 flush_table(struct ip_fw_chain *ch, struct tid_info *ti)
1193 {
1194 	struct namedobj_instance *ni;
1195 	struct table_config *tc;
1196 	struct table_algo *ta;
1197 	struct table_info ti_old, ti_new, *tablestate;
1198 	void *astate_old, *astate_new;
1199 	char algostate[64], *pstate;
1200 	struct tableop_state ts;
1201 	int error;
1202 	uint16_t kidx;
1203 	uint8_t tflags;
1204 
1205 	/*
1206 	 * Stage 1: save table algoritm.
1207 	 * Reference found table to ensure it won't disappear.
1208 	 */
1209 	IPFW_UH_WLOCK(ch);
1210 	ni = CHAIN_TO_NI(ch);
1211 	if ((tc = find_table(ni, ti)) == NULL) {
1212 		IPFW_UH_WUNLOCK(ch);
1213 		return (ESRCH);
1214 	}
1215 restart:
1216 	/* Set up swap handler */
1217 	memset(&ts, 0, sizeof(ts));
1218 	ts.opstate.func = restart_flush;
1219 	ts.tc = tc;
1220 
1221 	ta = tc->ta;
1222 	/* Do not flush readonly tables */
1223 	if ((ta->flags & TA_FLAG_READONLY) != 0) {
1224 		IPFW_UH_WUNLOCK(ch);
1225 		return (EACCES);
1226 	}
1227 	/* Save startup algo parameters */
1228 	if (ta->print_config != NULL) {
1229 		ta->print_config(tc->astate, KIDX_TO_TI(ch, tc->no.kidx),
1230 		    algostate, sizeof(algostate));
1231 		pstate = algostate;
1232 	} else
1233 		pstate = NULL;
1234 	tflags = tc->tflags;
1235 	tc->no.refcnt++;
1236 	add_toperation_state(ch, &ts);
1237 	IPFW_UH_WUNLOCK(ch);
1238 
1239 	/*
1240 	 * Stage 2: allocate new table instance using same algo.
1241 	 */
1242 	memset(&ti_new, 0, sizeof(struct table_info));
1243 	error = ta->init(ch, &astate_new, &ti_new, pstate, tflags);
1244 
1245 	/*
1246 	 * Stage 3: swap old state pointers with newly-allocated ones.
1247 	 * Decrease refcount.
1248 	 */
1249 	IPFW_UH_WLOCK(ch);
1250 	tc->no.refcnt--;
1251 	del_toperation_state(ch, &ts);
1252 
1253 	if (error != 0) {
1254 		IPFW_UH_WUNLOCK(ch);
1255 		return (error);
1256 	}
1257 
1258 	/*
1259 	 * Restart operation if table swap has happened:
1260 	 * even if algo may be the same, algo init parameters
1261 	 * may change. Restart operation instead of doing
1262 	 * complex checks.
1263 	 */
1264 	if (ts.modified != 0) {
1265 		ta->destroy(astate_new, &ti_new);
1266 		goto restart;
1267 	}
1268 
1269 	ni = CHAIN_TO_NI(ch);
1270 	kidx = tc->no.kidx;
1271 	tablestate = (struct table_info *)ch->tablestate;
1272 
1273 	IPFW_WLOCK(ch);
1274 	ti_old = tablestate[kidx];
1275 	tablestate[kidx] = ti_new;
1276 	IPFW_WUNLOCK(ch);
1277 
1278 	astate_old = tc->astate;
1279 	tc->astate = astate_new;
1280 	tc->ti_copy = ti_new;
1281 	tc->count = 0;
1282 
1283 	/* Notify algo on real @ti address */
1284 	if (ta->change_ti != NULL)
1285 		ta->change_ti(tc->astate, &tablestate[kidx]);
1286 
1287 	/*
1288 	 * Stage 4: unref values.
1289 	 */
1290 	ipfw_unref_table_values(ch, tc, ta, astate_old, &ti_old);
1291 	IPFW_UH_WUNLOCK(ch);
1292 
1293 	/*
1294 	 * Stage 5: perform real flush/destroy.
1295 	 */
1296 	ta->destroy(astate_old, &ti_old);
1297 
1298 	return (0);
1299 }
1300 
1301 /*
1302  * Swaps two tables.
1303  * Data layout (v0)(current):
1304  * Request: [ ipfw_obj_header ipfw_obj_ntlv ]
1305  *
1306  * Returns 0 on success
1307  */
1308 static int
1309 swap_table(struct ip_fw_chain *ch, ip_fw3_opheader *op3,
1310     struct sockopt_data *sd)
1311 {
1312 	int error;
1313 	struct _ipfw_obj_header *oh;
1314 	struct tid_info ti_a, ti_b;
1315 
1316 	if (sd->valsize != sizeof(*oh) + sizeof(ipfw_obj_ntlv))
1317 		return (EINVAL);
1318 
1319 	oh = (struct _ipfw_obj_header *)op3;
1320 	ntlv_to_ti(&oh->ntlv, &ti_a);
1321 	ntlv_to_ti((ipfw_obj_ntlv *)(oh + 1), &ti_b);
1322 
1323 	error = swap_tables(ch, &ti_a, &ti_b);
1324 
1325 	return (error);
1326 }
1327 
1328 /*
1329  * Swaps two tables of the same type/valtype.
1330  *
1331  * Checks if tables are compatible and limits
1332  * permits swap, than actually perform swap.
1333  *
1334  * Each table consists of 2 different parts:
1335  * config:
1336  *   @tc (with name, set, kidx) and rule bindings, which is "stable".
1337  *   number of items
1338  *   table algo
1339  * runtime:
1340  *   runtime data @ti (ch->tablestate)
1341  *   runtime cache in @tc
1342  *   algo-specific data (@tc->astate)
1343  *
1344  * So we switch:
1345  *  all runtime data
1346  *   number of items
1347  *   table algo
1348  *
1349  * After that we call @ti change handler for each table.
1350  *
1351  * Note that referencing @tc won't protect tc->ta from change.
1352  * XXX: Do we need to restrict swap between locked tables?
1353  * XXX: Do we need to exchange ftype?
1354  *
1355  * Returns 0 on success.
1356  */
1357 static int
1358 swap_tables(struct ip_fw_chain *ch, struct tid_info *a,
1359     struct tid_info *b)
1360 {
1361 	struct namedobj_instance *ni;
1362 	struct table_config *tc_a, *tc_b;
1363 	struct table_algo *ta;
1364 	struct table_info ti, *tablestate;
1365 	void *astate;
1366 	uint32_t count;
1367 
1368 	/*
1369 	 * Stage 1: find both tables and ensure they are of
1370 	 * the same type.
1371 	 */
1372 	IPFW_UH_WLOCK(ch);
1373 	ni = CHAIN_TO_NI(ch);
1374 	if ((tc_a = find_table(ni, a)) == NULL) {
1375 		IPFW_UH_WUNLOCK(ch);
1376 		return (ESRCH);
1377 	}
1378 	if ((tc_b = find_table(ni, b)) == NULL) {
1379 		IPFW_UH_WUNLOCK(ch);
1380 		return (ESRCH);
1381 	}
1382 
1383 	/* It is very easy to swap between the same table */
1384 	if (tc_a == tc_b) {
1385 		IPFW_UH_WUNLOCK(ch);
1386 		return (0);
1387 	}
1388 
1389 	/* Check type and value are the same */
1390 	if (tc_a->no.type != tc_b->no.type || tc_a->tflags != tc_b->tflags) {
1391 		IPFW_UH_WUNLOCK(ch);
1392 		return (EINVAL);
1393 	}
1394 
1395 	/* Check limits before swap */
1396 	if ((tc_a->limit != 0 && tc_b->count > tc_a->limit) ||
1397 	    (tc_b->limit != 0 && tc_a->count > tc_b->limit)) {
1398 		IPFW_UH_WUNLOCK(ch);
1399 		return (EFBIG);
1400 	}
1401 
1402 	/* Check if one of the tables is readonly */
1403 	if (((tc_a->ta->flags | tc_b->ta->flags) & TA_FLAG_READONLY) != 0) {
1404 		IPFW_UH_WUNLOCK(ch);
1405 		return (EACCES);
1406 	}
1407 
1408 	/* Notify we're going to swap */
1409 	rollback_toperation_state(ch, tc_a);
1410 	rollback_toperation_state(ch, tc_b);
1411 
1412 	/* Everything is fine, prepare to swap */
1413 	tablestate = (struct table_info *)ch->tablestate;
1414 	ti = tablestate[tc_a->no.kidx];
1415 	ta = tc_a->ta;
1416 	astate = tc_a->astate;
1417 	count = tc_a->count;
1418 
1419 	IPFW_WLOCK(ch);
1420 	/* a <- b */
1421 	tablestate[tc_a->no.kidx] = tablestate[tc_b->no.kidx];
1422 	tc_a->ta = tc_b->ta;
1423 	tc_a->astate = tc_b->astate;
1424 	tc_a->count = tc_b->count;
1425 	/* b <- a */
1426 	tablestate[tc_b->no.kidx] = ti;
1427 	tc_b->ta = ta;
1428 	tc_b->astate = astate;
1429 	tc_b->count = count;
1430 	IPFW_WUNLOCK(ch);
1431 
1432 	/* Ensure tc.ti copies are in sync */
1433 	tc_a->ti_copy = tablestate[tc_a->no.kidx];
1434 	tc_b->ti_copy = tablestate[tc_b->no.kidx];
1435 
1436 	/* Notify both tables on @ti change */
1437 	if (tc_a->ta->change_ti != NULL)
1438 		tc_a->ta->change_ti(tc_a->astate, &tablestate[tc_a->no.kidx]);
1439 	if (tc_b->ta->change_ti != NULL)
1440 		tc_b->ta->change_ti(tc_b->astate, &tablestate[tc_b->no.kidx]);
1441 
1442 	IPFW_UH_WUNLOCK(ch);
1443 
1444 	return (0);
1445 }
1446 
1447 /*
1448  * Destroys table specified by @ti.
1449  * Data layout (v0)(current):
1450  * Request: [ ip_fw3_opheader ]
1451  *
1452  * Returns 0 on success
1453  */
1454 static int
1455 destroy_table(struct ip_fw_chain *ch, struct tid_info *ti)
1456 {
1457 	struct namedobj_instance *ni;
1458 	struct table_config *tc;
1459 
1460 	IPFW_UH_WLOCK(ch);
1461 
1462 	ni = CHAIN_TO_NI(ch);
1463 	if ((tc = find_table(ni, ti)) == NULL) {
1464 		IPFW_UH_WUNLOCK(ch);
1465 		return (ESRCH);
1466 	}
1467 
1468 	/* Do not permit destroying referenced tables */
1469 	if (tc->no.refcnt > 0) {
1470 		IPFW_UH_WUNLOCK(ch);
1471 		return (EBUSY);
1472 	}
1473 
1474 	IPFW_WLOCK(ch);
1475 	unlink_table(ch, tc);
1476 	IPFW_WUNLOCK(ch);
1477 
1478 	/* Free obj index */
1479 	if (ipfw_objhash_free_idx(ni, tc->no.kidx) != 0)
1480 		printf("Error unlinking kidx %d from table %s\n",
1481 		    tc->no.kidx, tc->tablename);
1482 
1483 	/* Unref values used in tables while holding UH lock */
1484 	ipfw_unref_table_values(ch, tc, tc->ta, tc->astate, &tc->ti_copy);
1485 	IPFW_UH_WUNLOCK(ch);
1486 
1487 	free_table_config(ni, tc);
1488 
1489 	return (0);
1490 }
1491 
1492 static uint32_t
1493 roundup2p(uint32_t v)
1494 {
1495 
1496 	v--;
1497 	v |= v >> 1;
1498 	v |= v >> 2;
1499 	v |= v >> 4;
1500 	v |= v >> 8;
1501 	v |= v >> 16;
1502 	v++;
1503 
1504 	return (v);
1505 }
1506 
1507 /*
1508  * Grow tables index.
1509  *
1510  * Returns 0 on success.
1511  */
1512 int
1513 ipfw_resize_tables(struct ip_fw_chain *ch, unsigned int ntables)
1514 {
1515 	unsigned int ntables_old, tbl;
1516 	struct namedobj_instance *ni;
1517 	void *new_idx, *old_tablestate, *tablestate;
1518 	struct table_info *ti;
1519 	struct table_config *tc;
1520 	int i, new_blocks;
1521 
1522 	/* Check new value for validity */
1523 	if (ntables == 0)
1524 		return (EINVAL);
1525 	if (ntables > IPFW_TABLES_MAX)
1526 		ntables = IPFW_TABLES_MAX;
1527 	/* Alight to nearest power of 2 */
1528 	ntables = (unsigned int)roundup2p(ntables);
1529 
1530 	/* Allocate new pointers */
1531 	tablestate = malloc(ntables * sizeof(struct table_info),
1532 	    M_IPFW, M_WAITOK | M_ZERO);
1533 
1534 	ipfw_objhash_bitmap_alloc(ntables, (void *)&new_idx, &new_blocks);
1535 
1536 	IPFW_UH_WLOCK(ch);
1537 
1538 	tbl = (ntables >= V_fw_tables_max) ? V_fw_tables_max : ntables;
1539 	ni = CHAIN_TO_NI(ch);
1540 
1541 	/* Temporary restrict decreasing max_tables */
1542 	if (ntables < V_fw_tables_max) {
1543 
1544 		/*
1545 		 * FIXME: Check if we really can shrink
1546 		 */
1547 		IPFW_UH_WUNLOCK(ch);
1548 		return (EINVAL);
1549 	}
1550 
1551 	/* Copy table info/indices */
1552 	memcpy(tablestate, ch->tablestate, sizeof(struct table_info) * tbl);
1553 	ipfw_objhash_bitmap_merge(ni, &new_idx, &new_blocks);
1554 
1555 	IPFW_WLOCK(ch);
1556 
1557 	/* Change pointers */
1558 	old_tablestate = ch->tablestate;
1559 	ch->tablestate = tablestate;
1560 	ipfw_objhash_bitmap_swap(ni, &new_idx, &new_blocks);
1561 
1562 	ntables_old = V_fw_tables_max;
1563 	V_fw_tables_max = ntables;
1564 
1565 	IPFW_WUNLOCK(ch);
1566 
1567 	/* Notify all consumers that their @ti pointer has changed */
1568 	ti = (struct table_info *)ch->tablestate;
1569 	for (i = 0; i < tbl; i++, ti++) {
1570 		if (ti->lookup == NULL)
1571 			continue;
1572 		tc = (struct table_config *)ipfw_objhash_lookup_kidx(ni, i);
1573 		if (tc == NULL || tc->ta->change_ti == NULL)
1574 			continue;
1575 
1576 		tc->ta->change_ti(tc->astate, ti);
1577 	}
1578 
1579 	IPFW_UH_WUNLOCK(ch);
1580 
1581 	/* Free old pointers */
1582 	free(old_tablestate, M_IPFW);
1583 	ipfw_objhash_bitmap_free(new_idx, new_blocks);
1584 
1585 	return (0);
1586 }
1587 
1588 /*
1589  * Switch between "set 0" and "rule's set" table binding,
1590  * Check all ruleset bindings and permits changing
1591  * IFF each binding has both rule AND table in default set (set 0).
1592  *
1593  * Returns 0 on success.
1594  */
1595 int
1596 ipfw_switch_tables_namespace(struct ip_fw_chain *ch, unsigned int sets)
1597 {
1598 	struct namedobj_instance *ni;
1599 	struct named_object *no;
1600 	struct ip_fw *rule;
1601 	ipfw_insn *cmd;
1602 	int cmdlen, i, l;
1603 	uint16_t kidx;
1604 	uint8_t type;
1605 
1606 	IPFW_UH_WLOCK(ch);
1607 
1608 	if (V_fw_tables_sets == sets) {
1609 		IPFW_UH_WUNLOCK(ch);
1610 		return (0);
1611 	}
1612 
1613 	ni = CHAIN_TO_NI(ch);
1614 
1615 	/*
1616 	 * Scan all rules and examine tables opcodes.
1617 	 */
1618 	for (i = 0; i < ch->n_rules; i++) {
1619 		rule = ch->map[i];
1620 
1621 		l = rule->cmd_len;
1622 		cmd = rule->cmd;
1623 		cmdlen = 0;
1624 		for ( ;	l > 0 ; l -= cmdlen, cmd += cmdlen) {
1625 			cmdlen = F_LEN(cmd);
1626 
1627 			if (classify_table_opcode(cmd, &kidx, &type) != 0)
1628 				continue;
1629 
1630 			no = ipfw_objhash_lookup_kidx(ni, kidx);
1631 
1632 			/* Check if both table object and rule has the set 0 */
1633 			if (no->set != 0 || rule->set != 0) {
1634 				IPFW_UH_WUNLOCK(ch);
1635 				return (EBUSY);
1636 			}
1637 
1638 		}
1639 	}
1640 	V_fw_tables_sets = sets;
1641 
1642 	IPFW_UH_WUNLOCK(ch);
1643 
1644 	return (0);
1645 }
1646 
1647 /*
1648  * Lookup an IP @addr in table @tbl.
1649  * Stores found value in @val.
1650  *
1651  * Returns 1 if @addr was found.
1652  */
1653 int
1654 ipfw_lookup_table(struct ip_fw_chain *ch, uint16_t tbl, in_addr_t addr,
1655     uint32_t *val)
1656 {
1657 	struct table_info *ti;
1658 
1659 	ti = KIDX_TO_TI(ch, tbl);
1660 
1661 	return (ti->lookup(ti, &addr, sizeof(in_addr_t), val));
1662 }
1663 
1664 /*
1665  * Lookup an arbtrary key @paddr of legth @plen in table @tbl.
1666  * Stores found value in @val.
1667  *
1668  * Returns 1 if key was found.
1669  */
1670 int
1671 ipfw_lookup_table_extended(struct ip_fw_chain *ch, uint16_t tbl, uint16_t plen,
1672     void *paddr, uint32_t *val)
1673 {
1674 	struct table_info *ti;
1675 
1676 	ti = KIDX_TO_TI(ch, tbl);
1677 
1678 	return (ti->lookup(ti, paddr, plen, val));
1679 }
1680 
1681 /*
1682  * Info/List/dump support for tables.
1683  *
1684  */
1685 
1686 /*
1687  * High-level 'get' cmds sysctl handlers
1688  */
1689 
1690 /*
1691  * Lists all tables currently available in kernel.
1692  * Data layout (v0)(current):
1693  * Request: [ ipfw_obj_lheader ], size = ipfw_obj_lheader.size
1694  * Reply: [ ipfw_obj_lheader ipfw_xtable_info x N ]
1695  *
1696  * Returns 0 on success
1697  */
1698 static int
1699 list_tables(struct ip_fw_chain *ch, ip_fw3_opheader *op3,
1700     struct sockopt_data *sd)
1701 {
1702 	struct _ipfw_obj_lheader *olh;
1703 	int error;
1704 
1705 	olh = (struct _ipfw_obj_lheader *)ipfw_get_sopt_header(sd,sizeof(*olh));
1706 	if (olh == NULL)
1707 		return (EINVAL);
1708 	if (sd->valsize < olh->size)
1709 		return (EINVAL);
1710 
1711 	IPFW_UH_RLOCK(ch);
1712 	error = export_tables(ch, olh, sd);
1713 	IPFW_UH_RUNLOCK(ch);
1714 
1715 	return (error);
1716 }
1717 
1718 /*
1719  * Store table info to buffer provided by @sd.
1720  * Data layout (v0)(current):
1721  * Request: [ ipfw_obj_header ipfw_xtable_info(empty)]
1722  * Reply: [ ipfw_obj_header ipfw_xtable_info ]
1723  *
1724  * Returns 0 on success.
1725  */
1726 static int
1727 describe_table(struct ip_fw_chain *ch, ip_fw3_opheader *op3,
1728     struct sockopt_data *sd)
1729 {
1730 	struct _ipfw_obj_header *oh;
1731 	struct table_config *tc;
1732 	struct tid_info ti;
1733 	size_t sz;
1734 
1735 	sz = sizeof(*oh) + sizeof(ipfw_xtable_info);
1736 	oh = (struct _ipfw_obj_header *)ipfw_get_sopt_header(sd, sz);
1737 	if (oh == NULL)
1738 		return (EINVAL);
1739 
1740 	objheader_to_ti(oh, &ti);
1741 
1742 	IPFW_UH_RLOCK(ch);
1743 	if ((tc = find_table(CHAIN_TO_NI(ch), &ti)) == NULL) {
1744 		IPFW_UH_RUNLOCK(ch);
1745 		return (ESRCH);
1746 	}
1747 
1748 	export_table_info(ch, tc, (ipfw_xtable_info *)(oh + 1));
1749 	IPFW_UH_RUNLOCK(ch);
1750 
1751 	return (0);
1752 }
1753 
1754 /*
1755  * Modifies existing table.
1756  * Data layout (v0)(current):
1757  * Request: [ ipfw_obj_header ipfw_xtable_info ]
1758  *
1759  * Returns 0 on success
1760  */
1761 static int
1762 modify_table(struct ip_fw_chain *ch, ip_fw3_opheader *op3,
1763     struct sockopt_data *sd)
1764 {
1765 	struct _ipfw_obj_header *oh;
1766 	ipfw_xtable_info *i;
1767 	char *tname;
1768 	struct tid_info ti;
1769 	struct namedobj_instance *ni;
1770 	struct table_config *tc;
1771 
1772 	if (sd->valsize != sizeof(*oh) + sizeof(ipfw_xtable_info))
1773 		return (EINVAL);
1774 
1775 	oh = (struct _ipfw_obj_header *)sd->kbuf;
1776 	i = (ipfw_xtable_info *)(oh + 1);
1777 
1778 	/*
1779 	 * Verify user-supplied strings.
1780 	 * Check for null-terminated/zero-length strings/
1781 	 */
1782 	tname = oh->ntlv.name;
1783 	if (ipfw_check_table_name(tname) != 0)
1784 		return (EINVAL);
1785 
1786 	objheader_to_ti(oh, &ti);
1787 	ti.type = i->type;
1788 
1789 	IPFW_UH_WLOCK(ch);
1790 	ni = CHAIN_TO_NI(ch);
1791 	if ((tc = find_table(ni, &ti)) == NULL) {
1792 		IPFW_UH_WUNLOCK(ch);
1793 		return (ESRCH);
1794 	}
1795 
1796 	/* Do not support any modifications for readonly tables */
1797 	if ((tc->ta->flags & TA_FLAG_READONLY) != 0) {
1798 		IPFW_UH_WUNLOCK(ch);
1799 		return (EACCES);
1800 	}
1801 
1802 	if ((i->mflags & IPFW_TMFLAGS_LIMIT) != 0)
1803 		tc->limit = i->limit;
1804 	if ((i->mflags & IPFW_TMFLAGS_LOCK) != 0)
1805 		tc->locked = ((i->flags & IPFW_TGFLAGS_LOCKED) != 0);
1806 	IPFW_UH_WUNLOCK(ch);
1807 
1808 	return (0);
1809 }
1810 
1811 /*
1812  * Creates new table.
1813  * Data layout (v0)(current):
1814  * Request: [ ipfw_obj_header ipfw_xtable_info ]
1815  *
1816  * Returns 0 on success
1817  */
1818 static int
1819 create_table(struct ip_fw_chain *ch, ip_fw3_opheader *op3,
1820     struct sockopt_data *sd)
1821 {
1822 	struct _ipfw_obj_header *oh;
1823 	ipfw_xtable_info *i;
1824 	char *tname, *aname;
1825 	struct tid_info ti;
1826 	struct namedobj_instance *ni;
1827 
1828 	if (sd->valsize != sizeof(*oh) + sizeof(ipfw_xtable_info))
1829 		return (EINVAL);
1830 
1831 	oh = (struct _ipfw_obj_header *)sd->kbuf;
1832 	i = (ipfw_xtable_info *)(oh + 1);
1833 
1834 	/*
1835 	 * Verify user-supplied strings.
1836 	 * Check for null-terminated/zero-length strings/
1837 	 */
1838 	tname = oh->ntlv.name;
1839 	aname = i->algoname;
1840 	if (ipfw_check_table_name(tname) != 0 ||
1841 	    strnlen(aname, sizeof(i->algoname)) == sizeof(i->algoname))
1842 		return (EINVAL);
1843 
1844 	if (aname[0] == '\0') {
1845 		/* Use default algorithm */
1846 		aname = NULL;
1847 	}
1848 
1849 	objheader_to_ti(oh, &ti);
1850 	ti.type = i->type;
1851 
1852 	ni = CHAIN_TO_NI(ch);
1853 
1854 	IPFW_UH_RLOCK(ch);
1855 	if (find_table(ni, &ti) != NULL) {
1856 		IPFW_UH_RUNLOCK(ch);
1857 		return (EEXIST);
1858 	}
1859 	IPFW_UH_RUNLOCK(ch);
1860 
1861 	return (create_table_internal(ch, &ti, aname, i, NULL, 0));
1862 }
1863 
1864 /*
1865  * Creates new table based on @ti and @aname.
1866  *
1867  * Relies on table name checking inside find_name_tlv()
1868  * Assume @aname to be checked and valid.
1869  * Stores allocated table kidx inside @pkidx (if non-NULL).
1870  * Reference created table if @compat is non-zero.
1871  *
1872  * Returns 0 on success.
1873  */
1874 static int
1875 create_table_internal(struct ip_fw_chain *ch, struct tid_info *ti,
1876     char *aname, ipfw_xtable_info *i, uint16_t *pkidx, int compat)
1877 {
1878 	struct namedobj_instance *ni;
1879 	struct table_config *tc, *tc_new, *tmp;
1880 	struct table_algo *ta;
1881 	uint16_t kidx;
1882 
1883 	ni = CHAIN_TO_NI(ch);
1884 
1885 	ta = find_table_algo(CHAIN_TO_TCFG(ch), ti, aname);
1886 	if (ta == NULL)
1887 		return (ENOTSUP);
1888 
1889 	tc = alloc_table_config(ch, ti, ta, aname, i->tflags);
1890 	if (tc == NULL)
1891 		return (ENOMEM);
1892 
1893 	tc->vmask = i->vmask;
1894 	tc->limit = i->limit;
1895 	if (ta->flags & TA_FLAG_READONLY)
1896 		tc->locked = 1;
1897 	else
1898 		tc->locked = (i->flags & IPFW_TGFLAGS_LOCKED) != 0;
1899 
1900 	IPFW_UH_WLOCK(ch);
1901 
1902 	/* Check if table has been already created */
1903 	tc_new = find_table(ni, ti);
1904 	if (tc_new != NULL) {
1905 
1906 		/*
1907 		 * Compat: do not fail if we're
1908 		 * requesting to create existing table
1909 		 * which has the same type
1910 		 */
1911 		if (compat == 0 || tc_new->no.type != tc->no.type) {
1912 			IPFW_UH_WUNLOCK(ch);
1913 			free_table_config(ni, tc);
1914 			return (EEXIST);
1915 		}
1916 
1917 		/* Exchange tc and tc_new for proper refcounting & freeing */
1918 		tmp = tc;
1919 		tc = tc_new;
1920 		tc_new = tmp;
1921 	} else {
1922 		/* New table */
1923 		if (ipfw_objhash_alloc_idx(ni, &kidx) != 0) {
1924 			IPFW_UH_WUNLOCK(ch);
1925 			printf("Unable to allocate table index."
1926 			    " Consider increasing net.inet.ip.fw.tables_max");
1927 			free_table_config(ni, tc);
1928 			return (EBUSY);
1929 		}
1930 		tc->no.kidx = kidx;
1931 
1932 		IPFW_WLOCK(ch);
1933 		link_table(ch, tc);
1934 		IPFW_WUNLOCK(ch);
1935 	}
1936 
1937 	if (compat != 0)
1938 		tc->no.refcnt++;
1939 	if (pkidx != NULL)
1940 		*pkidx = tc->no.kidx;
1941 
1942 	IPFW_UH_WUNLOCK(ch);
1943 
1944 	if (tc_new != NULL)
1945 		free_table_config(ni, tc_new);
1946 
1947 	return (0);
1948 }
1949 
1950 static void
1951 ntlv_to_ti(ipfw_obj_ntlv *ntlv, struct tid_info *ti)
1952 {
1953 
1954 	memset(ti, 0, sizeof(struct tid_info));
1955 	ti->set = ntlv->set;
1956 	ti->uidx = ntlv->idx;
1957 	ti->tlvs = ntlv;
1958 	ti->tlen = ntlv->head.length;
1959 }
1960 
1961 static void
1962 objheader_to_ti(struct _ipfw_obj_header *oh, struct tid_info *ti)
1963 {
1964 
1965 	ntlv_to_ti(&oh->ntlv, ti);
1966 }
1967 
1968 /*
1969  * Exports basic table info as name TLV.
1970  * Used inside dump_static_rules() to provide info
1971  * about all tables referenced by current ruleset.
1972  *
1973  * Returns 0 on success.
1974  */
1975 int
1976 ipfw_export_table_ntlv(struct ip_fw_chain *ch, uint16_t kidx,
1977     struct sockopt_data *sd)
1978 {
1979 	struct namedobj_instance *ni;
1980 	struct named_object *no;
1981 	ipfw_obj_ntlv *ntlv;
1982 
1983 	ni = CHAIN_TO_NI(ch);
1984 
1985 	no = ipfw_objhash_lookup_kidx(ni, kidx);
1986 	KASSERT(no != NULL, ("invalid table kidx passed"));
1987 
1988 	ntlv = (ipfw_obj_ntlv *)ipfw_get_sopt_space(sd, sizeof(*ntlv));
1989 	if (ntlv == NULL)
1990 		return (ENOMEM);
1991 
1992 	ntlv->head.type = IPFW_TLV_TBL_NAME;
1993 	ntlv->head.length = sizeof(*ntlv);
1994 	ntlv->idx = no->kidx;
1995 	strlcpy(ntlv->name, no->name, sizeof(ntlv->name));
1996 
1997 	return (0);
1998 }
1999 
2000 /*
2001  * Marks every table kidx used in @rule with bit in @bmask.
2002  * Used to generate bitmask of referenced tables for given ruleset.
2003  *
2004  * Returns number of newly-referenced tables.
2005  */
2006 int
2007 ipfw_mark_table_kidx(struct ip_fw_chain *chain, struct ip_fw *rule,
2008     uint32_t *bmask)
2009 {
2010 	int cmdlen, l, count;
2011 	ipfw_insn *cmd;
2012 	uint16_t kidx;
2013 	uint8_t type;
2014 
2015 	l = rule->cmd_len;
2016 	cmd = rule->cmd;
2017 	cmdlen = 0;
2018 	count = 0;
2019 	for ( ;	l > 0 ; l -= cmdlen, cmd += cmdlen) {
2020 		cmdlen = F_LEN(cmd);
2021 
2022 		if (classify_table_opcode(cmd, &kidx, &type) != 0)
2023 			continue;
2024 
2025 		if ((bmask[kidx / 32] & (1 << (kidx % 32))) == 0)
2026 			count++;
2027 
2028 		bmask[kidx / 32] |= 1 << (kidx % 32);
2029 	}
2030 
2031 	return (count);
2032 }
2033 
2034 struct dump_args {
2035 	struct ip_fw_chain *ch;
2036 	struct table_info *ti;
2037 	struct table_config *tc;
2038 	struct sockopt_data *sd;
2039 	uint32_t cnt;
2040 	uint16_t uidx;
2041 	int error;
2042 	uint32_t size;
2043 	ipfw_table_entry *ent;
2044 	ta_foreach_f *f;
2045 	void *farg;
2046 	ipfw_obj_tentry tent;
2047 };
2048 
2049 static int
2050 count_ext_entries(void *e, void *arg)
2051 {
2052 	struct dump_args *da;
2053 
2054 	da = (struct dump_args *)arg;
2055 	da->cnt++;
2056 
2057 	return (0);
2058 }
2059 
2060 /*
2061  * Gets number of items from table either using
2062  * internal counter or calling algo callback for
2063  * externally-managed tables.
2064  *
2065  * Returns number of records.
2066  */
2067 static uint32_t
2068 table_get_count(struct ip_fw_chain *ch, struct table_config *tc)
2069 {
2070 	struct table_info *ti;
2071 	struct table_algo *ta;
2072 	struct dump_args da;
2073 
2074 	ti = KIDX_TO_TI(ch, tc->no.kidx);
2075 	ta = tc->ta;
2076 
2077 	/* Use internal counter for self-managed tables */
2078 	if ((ta->flags & TA_FLAG_READONLY) == 0)
2079 		return (tc->count);
2080 
2081 	/* Use callback to quickly get number of items */
2082 	if ((ta->flags & TA_FLAG_EXTCOUNTER) != 0)
2083 		return (ta->get_count(tc->astate, ti));
2084 
2085 	/* Count number of iterms ourselves */
2086 	memset(&da, 0, sizeof(da));
2087 	ta->foreach(tc->astate, ti, count_ext_entries, &da);
2088 
2089 	return (da.cnt);
2090 }
2091 
2092 /*
2093  * Exports table @tc info into standard ipfw_xtable_info format.
2094  */
2095 static void
2096 export_table_info(struct ip_fw_chain *ch, struct table_config *tc,
2097     ipfw_xtable_info *i)
2098 {
2099 	struct table_info *ti;
2100 	struct table_algo *ta;
2101 
2102 	i->type = tc->no.type;
2103 	i->tflags = tc->tflags;
2104 	i->vmask = tc->vmask;
2105 	i->set = tc->no.set;
2106 	i->kidx = tc->no.kidx;
2107 	i->refcnt = tc->no.refcnt;
2108 	i->count = table_get_count(ch, tc);
2109 	i->limit = tc->limit;
2110 	i->flags |= (tc->locked != 0) ? IPFW_TGFLAGS_LOCKED : 0;
2111 	i->size = tc->count * sizeof(ipfw_obj_tentry);
2112 	i->size += sizeof(ipfw_obj_header) + sizeof(ipfw_xtable_info);
2113 	strlcpy(i->tablename, tc->tablename, sizeof(i->tablename));
2114 	ti = KIDX_TO_TI(ch, tc->no.kidx);
2115 	ta = tc->ta;
2116 	if (ta->print_config != NULL) {
2117 		/* Use algo function to print table config to string */
2118 		ta->print_config(tc->astate, ti, i->algoname,
2119 		    sizeof(i->algoname));
2120 	} else
2121 		strlcpy(i->algoname, ta->name, sizeof(i->algoname));
2122 	/* Dump algo-specific data, if possible */
2123 	if (ta->dump_tinfo != NULL) {
2124 		ta->dump_tinfo(tc->astate, ti, &i->ta_info);
2125 		i->ta_info.flags |= IPFW_TATFLAGS_DATA;
2126 	}
2127 }
2128 
2129 struct dump_table_args {
2130 	struct ip_fw_chain *ch;
2131 	struct sockopt_data *sd;
2132 };
2133 
2134 static void
2135 export_table_internal(struct namedobj_instance *ni, struct named_object *no,
2136     void *arg)
2137 {
2138 	ipfw_xtable_info *i;
2139 	struct dump_table_args *dta;
2140 
2141 	dta = (struct dump_table_args *)arg;
2142 
2143 	i = (ipfw_xtable_info *)ipfw_get_sopt_space(dta->sd, sizeof(*i));
2144 	KASSERT(i != 0, ("previously checked buffer is not enough"));
2145 
2146 	export_table_info(dta->ch, (struct table_config *)no, i);
2147 }
2148 
2149 /*
2150  * Export all tables as ipfw_xtable_info structures to
2151  * storage provided by @sd.
2152  *
2153  * If supplied buffer is too small, fills in required size
2154  * and returns ENOMEM.
2155  * Returns 0 on success.
2156  */
2157 static int
2158 export_tables(struct ip_fw_chain *ch, ipfw_obj_lheader *olh,
2159     struct sockopt_data *sd)
2160 {
2161 	uint32_t size;
2162 	uint32_t count;
2163 	struct dump_table_args dta;
2164 
2165 	count = ipfw_objhash_count(CHAIN_TO_NI(ch));
2166 	size = count * sizeof(ipfw_xtable_info) + sizeof(ipfw_obj_lheader);
2167 
2168 	/* Fill in header regadless of buffer size */
2169 	olh->count = count;
2170 	olh->objsize = sizeof(ipfw_xtable_info);
2171 
2172 	if (size > olh->size) {
2173 		olh->size = size;
2174 		return (ENOMEM);
2175 	}
2176 
2177 	olh->size = size;
2178 
2179 	dta.ch = ch;
2180 	dta.sd = sd;
2181 
2182 	ipfw_objhash_foreach(CHAIN_TO_NI(ch), export_table_internal, &dta);
2183 
2184 	return (0);
2185 }
2186 
2187 /*
2188  * Dumps all table data
2189  * Data layout (v1)(current):
2190  * Request: [ ipfw_obj_header ], size = ipfw_xtable_info.size
2191  * Reply: [ ipfw_obj_header ipfw_xtable_info ipfw_obj_tentry x N ]
2192  *
2193  * Returns 0 on success
2194  */
2195 static int
2196 dump_table_v1(struct ip_fw_chain *ch, ip_fw3_opheader *op3,
2197     struct sockopt_data *sd)
2198 {
2199 	struct _ipfw_obj_header *oh;
2200 	ipfw_xtable_info *i;
2201 	struct tid_info ti;
2202 	struct table_config *tc;
2203 	struct table_algo *ta;
2204 	struct dump_args da;
2205 	uint32_t sz;
2206 
2207 	sz = sizeof(ipfw_obj_header) + sizeof(ipfw_xtable_info);
2208 	oh = (struct _ipfw_obj_header *)ipfw_get_sopt_header(sd, sz);
2209 	if (oh == NULL)
2210 		return (EINVAL);
2211 
2212 	i = (ipfw_xtable_info *)(oh + 1);
2213 	objheader_to_ti(oh, &ti);
2214 
2215 	IPFW_UH_RLOCK(ch);
2216 	if ((tc = find_table(CHAIN_TO_NI(ch), &ti)) == NULL) {
2217 		IPFW_UH_RUNLOCK(ch);
2218 		return (ESRCH);
2219 	}
2220 	export_table_info(ch, tc, i);
2221 
2222 	if (sd->valsize < i->size) {
2223 
2224 		/*
2225 		 * Submitted buffer size is not enough.
2226 		 * WE've already filled in @i structure with
2227 		 * relevant table info including size, so we
2228 		 * can return. Buffer will be flushed automatically.
2229 		 */
2230 		IPFW_UH_RUNLOCK(ch);
2231 		return (ENOMEM);
2232 	}
2233 
2234 	/*
2235 	 * Do the actual dump in eXtended format
2236 	 */
2237 	memset(&da, 0, sizeof(da));
2238 	da.ch = ch;
2239 	da.ti = KIDX_TO_TI(ch, tc->no.kidx);
2240 	da.tc = tc;
2241 	da.sd = sd;
2242 
2243 	ta = tc->ta;
2244 
2245 	ta->foreach(tc->astate, da.ti, dump_table_tentry, &da);
2246 	IPFW_UH_RUNLOCK(ch);
2247 
2248 	return (da.error);
2249 }
2250 
2251 /*
2252  * Dumps all table data
2253  * Data layout (version 0)(legacy):
2254  * Request: [ ipfw_xtable ], size = IP_FW_TABLE_XGETSIZE()
2255  * Reply: [ ipfw_xtable ipfw_table_xentry x N ]
2256  *
2257  * Returns 0 on success
2258  */
2259 static int
2260 dump_table_v0(struct ip_fw_chain *ch, ip_fw3_opheader *op3,
2261     struct sockopt_data *sd)
2262 {
2263 	ipfw_xtable *xtbl;
2264 	struct tid_info ti;
2265 	struct table_config *tc;
2266 	struct table_algo *ta;
2267 	struct dump_args da;
2268 	size_t sz, count;
2269 
2270 	xtbl = (ipfw_xtable *)ipfw_get_sopt_header(sd, sizeof(ipfw_xtable));
2271 	if (xtbl == NULL)
2272 		return (EINVAL);
2273 
2274 	memset(&ti, 0, sizeof(ti));
2275 	ti.uidx = xtbl->tbl;
2276 
2277 	IPFW_UH_RLOCK(ch);
2278 	if ((tc = find_table(CHAIN_TO_NI(ch), &ti)) == NULL) {
2279 		IPFW_UH_RUNLOCK(ch);
2280 		return (0);
2281 	}
2282 	count = table_get_count(ch, tc);
2283 	sz = count * sizeof(ipfw_table_xentry) + sizeof(ipfw_xtable);
2284 
2285 	xtbl->cnt = count;
2286 	xtbl->size = sz;
2287 	xtbl->type = tc->no.type;
2288 	xtbl->tbl = ti.uidx;
2289 
2290 	if (sd->valsize < sz) {
2291 
2292 		/*
2293 		 * Submitted buffer size is not enough.
2294 		 * WE've already filled in @i structure with
2295 		 * relevant table info including size, so we
2296 		 * can return. Buffer will be flushed automatically.
2297 		 */
2298 		IPFW_UH_RUNLOCK(ch);
2299 		return (ENOMEM);
2300 	}
2301 
2302 	/* Do the actual dump in eXtended format */
2303 	memset(&da, 0, sizeof(da));
2304 	da.ch = ch;
2305 	da.ti = KIDX_TO_TI(ch, tc->no.kidx);
2306 	da.tc = tc;
2307 	da.sd = sd;
2308 
2309 	ta = tc->ta;
2310 
2311 	ta->foreach(tc->astate, da.ti, dump_table_xentry, &da);
2312 	IPFW_UH_RUNLOCK(ch);
2313 
2314 	return (0);
2315 }
2316 
2317 /*
2318  * Legacy function to retrieve number of items in table.
2319  */
2320 static int
2321 get_table_size(struct ip_fw_chain *ch, ip_fw3_opheader *op3,
2322     struct sockopt_data *sd)
2323 {
2324 	uint32_t *tbl;
2325 	struct tid_info ti;
2326 	size_t sz;
2327 	int error;
2328 
2329 	sz = sizeof(*op3) + sizeof(uint32_t);
2330 	op3 = (ip_fw3_opheader *)ipfw_get_sopt_header(sd, sz);
2331 	if (op3 == NULL)
2332 		return (EINVAL);
2333 
2334 	tbl = (uint32_t *)(op3 + 1);
2335 	memset(&ti, 0, sizeof(ti));
2336 	ti.uidx = *tbl;
2337 	IPFW_UH_RLOCK(ch);
2338 	error = ipfw_count_xtable(ch, &ti, tbl);
2339 	IPFW_UH_RUNLOCK(ch);
2340 	return (error);
2341 }
2342 
2343 /*
2344  * Legacy IP_FW_TABLE_GETSIZE handler
2345  */
2346 int
2347 ipfw_count_table(struct ip_fw_chain *ch, struct tid_info *ti, uint32_t *cnt)
2348 {
2349 	struct table_config *tc;
2350 
2351 	if ((tc = find_table(CHAIN_TO_NI(ch), ti)) == NULL)
2352 		return (ESRCH);
2353 	*cnt = table_get_count(ch, tc);
2354 	return (0);
2355 }
2356 
2357 /*
2358  * Legacy IP_FW_TABLE_XGETSIZE handler
2359  */
2360 int
2361 ipfw_count_xtable(struct ip_fw_chain *ch, struct tid_info *ti, uint32_t *cnt)
2362 {
2363 	struct table_config *tc;
2364 	uint32_t count;
2365 
2366 	if ((tc = find_table(CHAIN_TO_NI(ch), ti)) == NULL) {
2367 		*cnt = 0;
2368 		return (0); /* 'table all list' requires success */
2369 	}
2370 
2371 	count = table_get_count(ch, tc);
2372 	*cnt = count * sizeof(ipfw_table_xentry);
2373 	if (count > 0)
2374 		*cnt += sizeof(ipfw_xtable);
2375 	return (0);
2376 }
2377 
2378 static int
2379 dump_table_entry(void *e, void *arg)
2380 {
2381 	struct dump_args *da;
2382 	struct table_config *tc;
2383 	struct table_algo *ta;
2384 	ipfw_table_entry *ent;
2385 	struct table_value *pval;
2386 	int error;
2387 
2388 	da = (struct dump_args *)arg;
2389 
2390 	tc = da->tc;
2391 	ta = tc->ta;
2392 
2393 	/* Out of memory, returning */
2394 	if (da->cnt == da->size)
2395 		return (1);
2396 	ent = da->ent++;
2397 	ent->tbl = da->uidx;
2398 	da->cnt++;
2399 
2400 	error = ta->dump_tentry(tc->astate, da->ti, e, &da->tent);
2401 	if (error != 0)
2402 		return (error);
2403 
2404 	ent->addr = da->tent.k.addr.s_addr;
2405 	ent->masklen = da->tent.masklen;
2406 	pval = get_table_value(da->ch, da->tc, da->tent.v.kidx);
2407 	ent->value = ipfw_export_table_value_legacy(pval);
2408 
2409 	return (0);
2410 }
2411 
2412 /*
2413  * Dumps table in pre-8.1 legacy format.
2414  */
2415 int
2416 ipfw_dump_table_legacy(struct ip_fw_chain *ch, struct tid_info *ti,
2417     ipfw_table *tbl)
2418 {
2419 	struct table_config *tc;
2420 	struct table_algo *ta;
2421 	struct dump_args da;
2422 
2423 	tbl->cnt = 0;
2424 
2425 	if ((tc = find_table(CHAIN_TO_NI(ch), ti)) == NULL)
2426 		return (0);	/* XXX: We should return ESRCH */
2427 
2428 	ta = tc->ta;
2429 
2430 	/* This dump format supports IPv4 only */
2431 	if (tc->no.type != IPFW_TABLE_ADDR)
2432 		return (0);
2433 
2434 	memset(&da, 0, sizeof(da));
2435 	da.ch = ch;
2436 	da.ti = KIDX_TO_TI(ch, tc->no.kidx);
2437 	da.tc = tc;
2438 	da.ent = &tbl->ent[0];
2439 	da.size = tbl->size;
2440 
2441 	tbl->cnt = 0;
2442 	ta->foreach(tc->astate, da.ti, dump_table_entry, &da);
2443 	tbl->cnt = da.cnt;
2444 
2445 	return (0);
2446 }
2447 
2448 /*
2449  * Dumps table entry in eXtended format (v1)(current).
2450  */
2451 static int
2452 dump_table_tentry(void *e, void *arg)
2453 {
2454 	struct dump_args *da;
2455 	struct table_config *tc;
2456 	struct table_algo *ta;
2457 	struct table_value *pval;
2458 	ipfw_obj_tentry *tent;
2459 	int error;
2460 
2461 	da = (struct dump_args *)arg;
2462 
2463 	tc = da->tc;
2464 	ta = tc->ta;
2465 
2466 	tent = (ipfw_obj_tentry *)ipfw_get_sopt_space(da->sd, sizeof(*tent));
2467 	/* Out of memory, returning */
2468 	if (tent == NULL) {
2469 		da->error = ENOMEM;
2470 		return (1);
2471 	}
2472 	tent->head.length = sizeof(ipfw_obj_tentry);
2473 	tent->idx = da->uidx;
2474 
2475 	error = ta->dump_tentry(tc->astate, da->ti, e, tent);
2476 	if (error != 0)
2477 		return (error);
2478 
2479 	pval = get_table_value(da->ch, da->tc, tent->v.kidx);
2480 	ipfw_export_table_value_v1(pval, &tent->v.value);
2481 
2482 	return (0);
2483 }
2484 
2485 /*
2486  * Dumps table entry in eXtended format (v0).
2487  */
2488 static int
2489 dump_table_xentry(void *e, void *arg)
2490 {
2491 	struct dump_args *da;
2492 	struct table_config *tc;
2493 	struct table_algo *ta;
2494 	ipfw_table_xentry *xent;
2495 	ipfw_obj_tentry *tent;
2496 	struct table_value *pval;
2497 	int error;
2498 
2499 	da = (struct dump_args *)arg;
2500 
2501 	tc = da->tc;
2502 	ta = tc->ta;
2503 
2504 	xent = (ipfw_table_xentry *)ipfw_get_sopt_space(da->sd, sizeof(*xent));
2505 	/* Out of memory, returning */
2506 	if (xent == NULL)
2507 		return (1);
2508 	xent->len = sizeof(ipfw_table_xentry);
2509 	xent->tbl = da->uidx;
2510 
2511 	memset(&da->tent, 0, sizeof(da->tent));
2512 	tent = &da->tent;
2513 	error = ta->dump_tentry(tc->astate, da->ti, e, tent);
2514 	if (error != 0)
2515 		return (error);
2516 
2517 	/* Convert current format to previous one */
2518 	xent->masklen = tent->masklen;
2519 	pval = get_table_value(da->ch, da->tc, da->tent.v.kidx);
2520 	xent->value = ipfw_export_table_value_legacy(pval);
2521 	/* Apply some hacks */
2522 	if (tc->no.type == IPFW_TABLE_ADDR && tent->subtype == AF_INET) {
2523 		xent->k.addr6.s6_addr32[3] = tent->k.addr.s_addr;
2524 		xent->flags = IPFW_TCF_INET;
2525 	} else
2526 		memcpy(&xent->k, &tent->k, sizeof(xent->k));
2527 
2528 	return (0);
2529 }
2530 
2531 /*
2532  * Helper function to export table algo data
2533  * to tentry format before calling user function.
2534  *
2535  * Returns 0 on success.
2536  */
2537 static int
2538 prepare_table_tentry(void *e, void *arg)
2539 {
2540 	struct dump_args *da;
2541 	struct table_config *tc;
2542 	struct table_algo *ta;
2543 	int error;
2544 
2545 	da = (struct dump_args *)arg;
2546 
2547 	tc = da->tc;
2548 	ta = tc->ta;
2549 
2550 	error = ta->dump_tentry(tc->astate, da->ti, e, &da->tent);
2551 	if (error != 0)
2552 		return (error);
2553 
2554 	da->f(&da->tent, da->farg);
2555 
2556 	return (0);
2557 }
2558 
2559 /*
2560  * Allow external consumers to read table entries in standard format.
2561  */
2562 int
2563 ipfw_foreach_table_tentry(struct ip_fw_chain *ch, uint16_t kidx,
2564     ta_foreach_f *f, void *arg)
2565 {
2566 	struct namedobj_instance *ni;
2567 	struct table_config *tc;
2568 	struct table_algo *ta;
2569 	struct dump_args da;
2570 
2571 	ni = CHAIN_TO_NI(ch);
2572 
2573 	tc = (struct table_config *)ipfw_objhash_lookup_kidx(ni, kidx);
2574 	if (tc == NULL)
2575 		return (ESRCH);
2576 
2577 	ta = tc->ta;
2578 
2579 	memset(&da, 0, sizeof(da));
2580 	da.ch = ch;
2581 	da.ti = KIDX_TO_TI(ch, tc->no.kidx);
2582 	da.tc = tc;
2583 	da.f = f;
2584 	da.farg = arg;
2585 
2586 	ta->foreach(tc->astate, da.ti, prepare_table_tentry, &da);
2587 
2588 	return (0);
2589 }
2590 
2591 /*
2592  * Table algorithms
2593  */
2594 
2595 /*
2596  * Finds algoritm by index, table type or supplied name.
2597  *
2598  * Returns pointer to algo or NULL.
2599  */
2600 static struct table_algo *
2601 find_table_algo(struct tables_config *tcfg, struct tid_info *ti, char *name)
2602 {
2603 	int i, l;
2604 	struct table_algo *ta;
2605 
2606 	if (ti->type > IPFW_TABLE_MAXTYPE)
2607 		return (NULL);
2608 
2609 	/* Search by index */
2610 	if (ti->atype != 0) {
2611 		if (ti->atype > tcfg->algo_count)
2612 			return (NULL);
2613 		return (tcfg->algo[ti->atype]);
2614 	}
2615 
2616 	if (name == NULL) {
2617 		/* Return default algorithm for given type if set */
2618 		return (tcfg->def_algo[ti->type]);
2619 	}
2620 
2621 	/* Search by name */
2622 	/* TODO: better search */
2623 	for (i = 1; i <= tcfg->algo_count; i++) {
2624 		ta = tcfg->algo[i];
2625 
2626 		/*
2627 		 * One can supply additional algorithm
2628 		 * parameters so we compare only the first word
2629 		 * of supplied name:
2630 		 * 'addr:chash hsize=32'
2631 		 * '^^^^^^^^^'
2632 		 *
2633 		 */
2634 		l = strlen(ta->name);
2635 		if (strncmp(name, ta->name, l) != 0)
2636 			continue;
2637 		if (name[l] != '\0' && name[l] != ' ')
2638 			continue;
2639 		/* Check if we're requesting proper table type */
2640 		if (ti->type != 0 && ti->type != ta->type)
2641 			return (NULL);
2642 		return (ta);
2643 	}
2644 
2645 	return (NULL);
2646 }
2647 
2648 /*
2649  * Register new table algo @ta.
2650  * Stores algo id inside @idx.
2651  *
2652  * Returns 0 on success.
2653  */
2654 int
2655 ipfw_add_table_algo(struct ip_fw_chain *ch, struct table_algo *ta, size_t size,
2656     int *idx)
2657 {
2658 	struct tables_config *tcfg;
2659 	struct table_algo *ta_new;
2660 	size_t sz;
2661 
2662 	if (size > sizeof(struct table_algo))
2663 		return (EINVAL);
2664 
2665 	/* Check for the required on-stack size for add/del */
2666 	sz = roundup2(ta->ta_buf_size, sizeof(void *));
2667 	if (sz > TA_BUF_SZ)
2668 		return (EINVAL);
2669 
2670 	KASSERT(ta->type <= IPFW_TABLE_MAXTYPE,("Increase IPFW_TABLE_MAXTYPE"));
2671 
2672 	/* Copy algorithm data to stable storage. */
2673 	ta_new = malloc(sizeof(struct table_algo), M_IPFW, M_WAITOK | M_ZERO);
2674 	memcpy(ta_new, ta, size);
2675 
2676 	tcfg = CHAIN_TO_TCFG(ch);
2677 
2678 	KASSERT(tcfg->algo_count < 255, ("Increase algo array size"));
2679 
2680 	tcfg->algo[++tcfg->algo_count] = ta_new;
2681 	ta_new->idx = tcfg->algo_count;
2682 
2683 	/* Set algorithm as default one for given type */
2684 	if ((ta_new->flags & TA_FLAG_DEFAULT) != 0 &&
2685 	    tcfg->def_algo[ta_new->type] == NULL)
2686 		tcfg->def_algo[ta_new->type] = ta_new;
2687 
2688 	*idx = ta_new->idx;
2689 
2690 	return (0);
2691 }
2692 
2693 /*
2694  * Unregisters table algo using @idx as id.
2695  * XXX: It is NOT safe to call this function in any place
2696  * other than ipfw instance destroy handler.
2697  */
2698 void
2699 ipfw_del_table_algo(struct ip_fw_chain *ch, int idx)
2700 {
2701 	struct tables_config *tcfg;
2702 	struct table_algo *ta;
2703 
2704 	tcfg = CHAIN_TO_TCFG(ch);
2705 
2706 	KASSERT(idx <= tcfg->algo_count, ("algo idx %d out of range 1..%d",
2707 	    idx, tcfg->algo_count));
2708 
2709 	ta = tcfg->algo[idx];
2710 	KASSERT(ta != NULL, ("algo idx %d is NULL", idx));
2711 
2712 	if (tcfg->def_algo[ta->type] == ta)
2713 		tcfg->def_algo[ta->type] = NULL;
2714 
2715 	free(ta, M_IPFW);
2716 }
2717 
2718 /*
2719  * Lists all table algorithms currently available.
2720  * Data layout (v0)(current):
2721  * Request: [ ipfw_obj_lheader ], size = ipfw_obj_lheader.size
2722  * Reply: [ ipfw_obj_lheader ipfw_ta_info x N ]
2723  *
2724  * Returns 0 on success
2725  */
2726 static int
2727 list_table_algo(struct ip_fw_chain *ch, ip_fw3_opheader *op3,
2728     struct sockopt_data *sd)
2729 {
2730 	struct _ipfw_obj_lheader *olh;
2731 	struct tables_config *tcfg;
2732 	ipfw_ta_info *i;
2733 	struct table_algo *ta;
2734 	uint32_t count, n, size;
2735 
2736 	olh = (struct _ipfw_obj_lheader *)ipfw_get_sopt_header(sd,sizeof(*olh));
2737 	if (olh == NULL)
2738 		return (EINVAL);
2739 	if (sd->valsize < olh->size)
2740 		return (EINVAL);
2741 
2742 	IPFW_UH_RLOCK(ch);
2743 	tcfg = CHAIN_TO_TCFG(ch);
2744 	count = tcfg->algo_count;
2745 	size = count * sizeof(ipfw_ta_info) + sizeof(ipfw_obj_lheader);
2746 
2747 	/* Fill in header regadless of buffer size */
2748 	olh->count = count;
2749 	olh->objsize = sizeof(ipfw_ta_info);
2750 
2751 	if (size > olh->size) {
2752 		olh->size = size;
2753 		IPFW_UH_RUNLOCK(ch);
2754 		return (ENOMEM);
2755 	}
2756 	olh->size = size;
2757 
2758 	for (n = 1; n <= count; n++) {
2759 		i = (ipfw_ta_info *)ipfw_get_sopt_space(sd, sizeof(*i));
2760 		KASSERT(i != 0, ("previously checked buffer is not enough"));
2761 		ta = tcfg->algo[n];
2762 		strlcpy(i->algoname, ta->name, sizeof(i->algoname));
2763 		i->type = ta->type;
2764 		i->refcnt = ta->refcnt;
2765 	}
2766 
2767 	IPFW_UH_RUNLOCK(ch);
2768 
2769 	return (0);
2770 }
2771 
2772 /*
2773  * Tables rewriting code
2774  */
2775 
2776 /*
2777  * Determine table number and lookup type for @cmd.
2778  * Fill @tbl and @type with appropriate values.
2779  * Returns 0 for relevant opcodes, 1 otherwise.
2780  */
2781 static int
2782 classify_table_opcode(ipfw_insn *cmd, uint16_t *puidx, uint8_t *ptype)
2783 {
2784 	ipfw_insn_if *cmdif;
2785 	int skip;
2786 	uint16_t v;
2787 
2788 	skip = 1;
2789 
2790 	switch (cmd->opcode) {
2791 	case O_IP_SRC_LOOKUP:
2792 	case O_IP_DST_LOOKUP:
2793 		/* Basic IPv4/IPv6 or u32 lookups */
2794 		*puidx = cmd->arg1;
2795 		/* Assume ADDR by default */
2796 		*ptype = IPFW_TABLE_ADDR;
2797 		skip = 0;
2798 
2799 		if (F_LEN(cmd) > F_INSN_SIZE(ipfw_insn_u32)) {
2800 			/*
2801 			 * generic lookup. The key must be
2802 			 * in 32bit big-endian format.
2803 			 */
2804 			v = ((ipfw_insn_u32 *)cmd)->d[1];
2805 			switch (v) {
2806 			case 0:
2807 			case 1:
2808 				/* IPv4 src/dst */
2809 				break;
2810 			case 2:
2811 			case 3:
2812 				/* src/dst port */
2813 				*ptype = IPFW_TABLE_NUMBER;
2814 				break;
2815 			case 4:
2816 				/* uid/gid */
2817 				*ptype = IPFW_TABLE_NUMBER;
2818 				break;
2819 			case 5:
2820 				/* jid */
2821 				*ptype = IPFW_TABLE_NUMBER;
2822 				break;
2823 			case 6:
2824 				/* dscp */
2825 				*ptype = IPFW_TABLE_NUMBER;
2826 				break;
2827 			}
2828 		}
2829 		break;
2830 	case O_XMIT:
2831 	case O_RECV:
2832 	case O_VIA:
2833 		/* Interface table, possibly */
2834 		cmdif = (ipfw_insn_if *)cmd;
2835 		if (cmdif->name[0] != '\1')
2836 			break;
2837 
2838 		*ptype = IPFW_TABLE_INTERFACE;
2839 		*puidx = cmdif->p.kidx;
2840 		skip = 0;
2841 		break;
2842 	case O_IP_FLOW_LOOKUP:
2843 		*puidx = cmd->arg1;
2844 		*ptype = IPFW_TABLE_FLOW;
2845 		skip = 0;
2846 		break;
2847 	}
2848 
2849 	return (skip);
2850 }
2851 
2852 /*
2853  * Sets new table value for given opcode.
2854  * Assume the same opcodes as classify_table_opcode()
2855  */
2856 static void
2857 update_table_opcode(ipfw_insn *cmd, uint16_t idx)
2858 {
2859 	ipfw_insn_if *cmdif;
2860 
2861 	switch (cmd->opcode) {
2862 	case O_IP_SRC_LOOKUP:
2863 	case O_IP_DST_LOOKUP:
2864 		/* Basic IPv4/IPv6 or u32 lookups */
2865 		cmd->arg1 = idx;
2866 		break;
2867 	case O_XMIT:
2868 	case O_RECV:
2869 	case O_VIA:
2870 		/* Interface table, possibly */
2871 		cmdif = (ipfw_insn_if *)cmd;
2872 		cmdif->p.kidx = idx;
2873 		break;
2874 	case O_IP_FLOW_LOOKUP:
2875 		cmd->arg1 = idx;
2876 		break;
2877 	}
2878 }
2879 
2880 /*
2881  * Checks table name for validity.
2882  * Enforce basic length checks, the rest
2883  * should be done in userland.
2884  *
2885  * Returns 0 if name is considered valid.
2886  */
2887 int
2888 ipfw_check_table_name(char *name)
2889 {
2890 	int nsize;
2891 	ipfw_obj_ntlv *ntlv = NULL;
2892 
2893 	nsize = sizeof(ntlv->name);
2894 
2895 	if (strnlen(name, nsize) == nsize)
2896 		return (EINVAL);
2897 
2898 	if (name[0] == '\0')
2899 		return (EINVAL);
2900 
2901 	/*
2902 	 * TODO: do some more complicated checks
2903 	 */
2904 
2905 	return (0);
2906 }
2907 
2908 /*
2909  * Find tablename TLV by @uid.
2910  * Check @tlvs for valid data inside.
2911  *
2912  * Returns pointer to found TLV or NULL.
2913  */
2914 static ipfw_obj_ntlv *
2915 find_name_tlv(void *tlvs, int len, uint16_t uidx)
2916 {
2917 	ipfw_obj_ntlv *ntlv;
2918 	uintptr_t pa, pe;
2919 	int l;
2920 
2921 	pa = (uintptr_t)tlvs;
2922 	pe = pa + len;
2923 	l = 0;
2924 	for (; pa < pe; pa += l) {
2925 		ntlv = (ipfw_obj_ntlv *)pa;
2926 		l = ntlv->head.length;
2927 
2928 		if (l != sizeof(*ntlv))
2929 			return (NULL);
2930 
2931 		if (ntlv->head.type != IPFW_TLV_TBL_NAME)
2932 			continue;
2933 
2934 		if (ntlv->idx != uidx)
2935 			continue;
2936 
2937 		if (ipfw_check_table_name(ntlv->name) != 0)
2938 			return (NULL);
2939 
2940 		return (ntlv);
2941 	}
2942 
2943 	return (NULL);
2944 }
2945 
2946 /*
2947  * Finds table config based on either legacy index
2948  * or name in ntlv.
2949  * Note @ti structure contains unchecked data from userland.
2950  *
2951  * Returns pointer to table_config or NULL.
2952  */
2953 static struct table_config *
2954 find_table(struct namedobj_instance *ni, struct tid_info *ti)
2955 {
2956 	char *name, bname[16];
2957 	struct named_object *no;
2958 	ipfw_obj_ntlv *ntlv;
2959 	uint32_t set;
2960 
2961 	if (ti->tlvs != NULL) {
2962 		ntlv = find_name_tlv(ti->tlvs, ti->tlen, ti->uidx);
2963 		if (ntlv == NULL)
2964 			return (NULL);
2965 		name = ntlv->name;
2966 
2967 		/*
2968 		 * Use set provided by @ti instead of @ntlv one.
2969 		 * This is needed due to different sets behavior
2970 		 * controlled by V_fw_tables_sets.
2971 		 */
2972 		set = ti->set;
2973 	} else {
2974 		snprintf(bname, sizeof(bname), "%d", ti->uidx);
2975 		name = bname;
2976 		set = 0;
2977 	}
2978 
2979 	no = ipfw_objhash_lookup_name(ni, set, name);
2980 
2981 	return ((struct table_config *)no);
2982 }
2983 
2984 /*
2985  * Allocate new table config structure using
2986  * specified @algo and @aname.
2987  *
2988  * Returns pointer to config or NULL.
2989  */
2990 static struct table_config *
2991 alloc_table_config(struct ip_fw_chain *ch, struct tid_info *ti,
2992     struct table_algo *ta, char *aname, uint8_t tflags)
2993 {
2994 	char *name, bname[16];
2995 	struct table_config *tc;
2996 	int error;
2997 	ipfw_obj_ntlv *ntlv;
2998 	uint32_t set;
2999 
3000 	if (ti->tlvs != NULL) {
3001 		ntlv = find_name_tlv(ti->tlvs, ti->tlen, ti->uidx);
3002 		if (ntlv == NULL)
3003 			return (NULL);
3004 		name = ntlv->name;
3005 		set = ntlv->set;
3006 	} else {
3007 		snprintf(bname, sizeof(bname), "%d", ti->uidx);
3008 		name = bname;
3009 		set = 0;
3010 	}
3011 
3012 	tc = malloc(sizeof(struct table_config), M_IPFW, M_WAITOK | M_ZERO);
3013 	tc->no.name = tc->tablename;
3014 	tc->no.type = ta->type;
3015 	tc->no.set = set;
3016 	tc->tflags = tflags;
3017 	tc->ta = ta;
3018 	strlcpy(tc->tablename, name, sizeof(tc->tablename));
3019 	/* Set "shared" value type by default */
3020 	tc->vshared = 1;
3021 
3022 	if (ti->tlvs == NULL) {
3023 		tc->no.compat = 1;
3024 		tc->no.uidx = ti->uidx;
3025 	}
3026 
3027 	/* Preallocate data structures for new tables */
3028 	error = ta->init(ch, &tc->astate, &tc->ti_copy, aname, tflags);
3029 	if (error != 0) {
3030 		free(tc, M_IPFW);
3031 		return (NULL);
3032 	}
3033 
3034 	return (tc);
3035 }
3036 
3037 /*
3038  * Destroys table state and config.
3039  */
3040 static void
3041 free_table_config(struct namedobj_instance *ni, struct table_config *tc)
3042 {
3043 
3044 	KASSERT(tc->linked == 0, ("free() on linked config"));
3045 
3046 	/*
3047 	 * We're using ta without any locking/referencing.
3048 	 * TODO: fix this if we're going to use unloadable algos.
3049 	 */
3050 	tc->ta->destroy(tc->astate, &tc->ti_copy);
3051 	free(tc, M_IPFW);
3052 }
3053 
3054 /*
3055  * Links @tc to @chain table named instance.
3056  * Sets appropriate type/states in @chain table info.
3057  */
3058 static void
3059 link_table(struct ip_fw_chain *ch, struct table_config *tc)
3060 {
3061 	struct namedobj_instance *ni;
3062 	struct table_info *ti;
3063 	uint16_t kidx;
3064 
3065 	IPFW_UH_WLOCK_ASSERT(ch);
3066 	IPFW_WLOCK_ASSERT(ch);
3067 
3068 	ni = CHAIN_TO_NI(ch);
3069 	kidx = tc->no.kidx;
3070 
3071 	ipfw_objhash_add(ni, &tc->no);
3072 
3073 	ti = KIDX_TO_TI(ch, kidx);
3074 	*ti = tc->ti_copy;
3075 
3076 	/* Notify algo on real @ti address */
3077 	if (tc->ta->change_ti != NULL)
3078 		tc->ta->change_ti(tc->astate, ti);
3079 
3080 	tc->linked = 1;
3081 	tc->ta->refcnt++;
3082 }
3083 
3084 /*
3085  * Unlinks @tc from @chain table named instance.
3086  * Zeroes states in @chain and stores them in @tc.
3087  */
3088 static void
3089 unlink_table(struct ip_fw_chain *ch, struct table_config *tc)
3090 {
3091 	struct namedobj_instance *ni;
3092 	struct table_info *ti;
3093 	uint16_t kidx;
3094 
3095 	IPFW_UH_WLOCK_ASSERT(ch);
3096 	IPFW_WLOCK_ASSERT(ch);
3097 
3098 	ni = CHAIN_TO_NI(ch);
3099 	kidx = tc->no.kidx;
3100 
3101 	/* Clear state. @ti copy is already saved inside @tc */
3102 	ipfw_objhash_del(ni, &tc->no);
3103 	ti = KIDX_TO_TI(ch, kidx);
3104 	memset(ti, 0, sizeof(struct table_info));
3105 	tc->linked = 0;
3106 	tc->ta->refcnt--;
3107 
3108 	/* Notify algo on real @ti address */
3109 	if (tc->ta->change_ti != NULL)
3110 		tc->ta->change_ti(tc->astate, NULL);
3111 }
3112 
3113 struct swap_table_args {
3114 	int set;
3115 	int new_set;
3116 	int mv;
3117 };
3118 
3119 /*
3120  * Change set for each matching table.
3121  *
3122  * Ensure we dispatch each table once by setting/checking ochange
3123  * fields.
3124  */
3125 static void
3126 swap_table_set(struct namedobj_instance *ni, struct named_object *no,
3127     void *arg)
3128 {
3129 	struct table_config *tc;
3130 	struct swap_table_args *sta;
3131 
3132 	tc = (struct table_config *)no;
3133 	sta = (struct swap_table_args *)arg;
3134 
3135 	if (no->set != sta->set && (no->set != sta->new_set || sta->mv != 0))
3136 		return;
3137 
3138 	if (tc->ochanged != 0)
3139 		return;
3140 
3141 	tc->ochanged = 1;
3142 	ipfw_objhash_del(ni, no);
3143 	if (no->set == sta->set)
3144 		no->set = sta->new_set;
3145 	else
3146 		no->set = sta->set;
3147 	ipfw_objhash_add(ni, no);
3148 }
3149 
3150 /*
3151  * Cleans up ochange field for all tables.
3152  */
3153 static void
3154 clean_table_set_data(struct namedobj_instance *ni, struct named_object *no,
3155     void *arg)
3156 {
3157 	struct table_config *tc;
3158 	struct swap_table_args *sta;
3159 
3160 	tc = (struct table_config *)no;
3161 	sta = (struct swap_table_args *)arg;
3162 
3163 	tc->ochanged = 0;
3164 }
3165 
3166 /*
3167  * Swaps tables within two sets.
3168  */
3169 void
3170 ipfw_swap_tables_sets(struct ip_fw_chain *ch, uint32_t set,
3171     uint32_t new_set, int mv)
3172 {
3173 	struct swap_table_args sta;
3174 
3175 	IPFW_UH_WLOCK_ASSERT(ch);
3176 
3177 	sta.set = set;
3178 	sta.new_set = new_set;
3179 	sta.mv = mv;
3180 
3181 	ipfw_objhash_foreach(CHAIN_TO_NI(ch), swap_table_set, &sta);
3182 	ipfw_objhash_foreach(CHAIN_TO_NI(ch), clean_table_set_data, &sta);
3183 }
3184 
3185 /*
3186  * Move all tables which are reference by rules in @rr to set @new_set.
3187  * Makes sure that all relevant tables are referenced ONLLY by given rules.
3188  *
3189  * Retuns 0 on success,
3190  */
3191 int
3192 ipfw_move_tables_sets(struct ip_fw_chain *ch, ipfw_range_tlv *rt,
3193     uint32_t new_set)
3194 {
3195 	struct ip_fw *rule;
3196 	struct table_config *tc;
3197 	struct named_object *no;
3198 	struct namedobj_instance *ni;
3199 	int bad, i, l, cmdlen;
3200 	uint16_t kidx;
3201 	uint8_t type;
3202 	ipfw_insn *cmd;
3203 
3204 	IPFW_UH_WLOCK_ASSERT(ch);
3205 
3206 	ni = CHAIN_TO_NI(ch);
3207 
3208 	/* Stage 1: count number of references by given rules */
3209 	for (i = 0; i < ch->n_rules - 1; i++) {
3210 		rule = ch->map[i];
3211 		if (ipfw_match_range(rule, rt) == 0)
3212 			continue;
3213 
3214 		l = rule->cmd_len;
3215 		cmd = rule->cmd;
3216 		cmdlen = 0;
3217 		for ( ;	l > 0 ; l -= cmdlen, cmd += cmdlen) {
3218 			cmdlen = F_LEN(cmd);
3219 			if (classify_table_opcode(cmd, &kidx, &type) != 0)
3220 				continue;
3221 			no = ipfw_objhash_lookup_kidx(ni, kidx);
3222 			KASSERT(no != NULL,
3223 			    ("objhash lookup failed on index %d", kidx));
3224 			tc = (struct table_config *)no;
3225 			tc->ocount++;
3226 		}
3227 
3228 	}
3229 
3230 	/* Stage 2: verify "ownership" */
3231 	bad = 0;
3232 	for (i = 0; i < ch->n_rules - 1; i++) {
3233 		rule = ch->map[i];
3234 		if (ipfw_match_range(rule, rt) == 0)
3235 			continue;
3236 
3237 		l = rule->cmd_len;
3238 		cmd = rule->cmd;
3239 		cmdlen = 0;
3240 		for ( ;	l > 0 ; l -= cmdlen, cmd += cmdlen) {
3241 			cmdlen = F_LEN(cmd);
3242 			if (classify_table_opcode(cmd, &kidx, &type) != 0)
3243 				continue;
3244 			no = ipfw_objhash_lookup_kidx(ni, kidx);
3245 			KASSERT(no != NULL,
3246 			    ("objhash lookup failed on index %d", kidx));
3247 			tc = (struct table_config *)no;
3248 			if (tc->no.refcnt != tc->ocount) {
3249 
3250 				/*
3251 				 * Number of references differ:
3252 				 * Other rule(s) are holding reference to given
3253 				 * table, so it is not possible to change its set.
3254 				 *
3255 				 * Note that refcnt may account
3256 				 * references to some going-to-be-added rules.
3257 				 * Since we don't know their numbers (and event
3258 				 * if they will be added) it is perfectly OK
3259 				 * to return error here.
3260 				 */
3261 				bad = 1;
3262 				break;
3263 			}
3264 		}
3265 
3266 		if (bad != 0)
3267 			break;
3268 	}
3269 
3270 	/* Stage 3: change set or cleanup */
3271 	for (i = 0; i < ch->n_rules - 1; i++) {
3272 		rule = ch->map[i];
3273 		if (ipfw_match_range(rule, rt) == 0)
3274 			continue;
3275 
3276 		l = rule->cmd_len;
3277 		cmd = rule->cmd;
3278 		cmdlen = 0;
3279 		for ( ;	l > 0 ; l -= cmdlen, cmd += cmdlen) {
3280 			cmdlen = F_LEN(cmd);
3281 			if (classify_table_opcode(cmd, &kidx, &type) != 0)
3282 				continue;
3283 			no = ipfw_objhash_lookup_kidx(ni, kidx);
3284 			KASSERT(no != NULL,
3285 			    ("objhash lookup failed on index %d", kidx));
3286 			tc = (struct table_config *)no;
3287 
3288 			tc->ocount = 0;
3289 			if (bad != 0)
3290 				continue;
3291 
3292 			/* Actually change set. */
3293 			ipfw_objhash_del(ni, no);
3294 			no->set = new_set;
3295 			ipfw_objhash_add(ni, no);
3296 		}
3297 	}
3298 
3299 	return (bad);
3300 }
3301 
3302 /*
3303  * Finds and bumps refcount for tables referenced by given @rule.
3304  * Auto-creates non-existing tables.
3305  * Fills in @oib array with userland/kernel indexes.
3306  * First free oidx pointer is saved back in @oib.
3307  *
3308  * Returns 0 on success.
3309  */
3310 static int
3311 find_ref_rule_tables(struct ip_fw_chain *ch, struct ip_fw *rule,
3312     struct rule_check_info *ci, struct obj_idx **oib, struct tid_info *ti)
3313 {
3314 	struct table_config *tc;
3315 	struct namedobj_instance *ni;
3316 	struct named_object *no;
3317 	int cmdlen, error, l, numnew;
3318 	uint16_t kidx;
3319 	ipfw_insn *cmd;
3320 	struct obj_idx *pidx, *pidx_first, *p;
3321 
3322 	pidx_first = *oib;
3323 	pidx = pidx_first;
3324 	l = rule->cmd_len;
3325 	cmd = rule->cmd;
3326 	cmdlen = 0;
3327 	error = 0;
3328 	numnew = 0;
3329 
3330 	IPFW_UH_WLOCK(ch);
3331 	ni = CHAIN_TO_NI(ch);
3332 
3333 	/* Increase refcount on each existing referenced table. */
3334 	for ( ;	l > 0 ; l -= cmdlen, cmd += cmdlen) {
3335 		cmdlen = F_LEN(cmd);
3336 
3337 		if (classify_table_opcode(cmd, &ti->uidx, &ti->type) != 0)
3338 			continue;
3339 
3340 		pidx->uidx = ti->uidx;
3341 		pidx->type = ti->type;
3342 
3343 		if ((tc = find_table(ni, ti)) != NULL) {
3344 			if (tc->no.type != ti->type) {
3345 				/* Incompatible types */
3346 				error = EINVAL;
3347 				break;
3348 			}
3349 
3350 			/* Reference found table and save kidx */
3351 			tc->no.refcnt++;
3352 			pidx->kidx = tc->no.kidx;
3353 			pidx++;
3354 			continue;
3355 		}
3356 
3357 		/*
3358 		 * Compability stuff for old clients:
3359 		 * prepare to manually create non-existing tables.
3360 		 */
3361 		pidx++;
3362 		numnew++;
3363 	}
3364 
3365 	if (error != 0) {
3366 		/* Unref everything we have already done */
3367 		for (p = *oib; p < pidx; p++) {
3368 			if (p->kidx == 0)
3369 				continue;
3370 
3371 			/* Find & unref by existing idx */
3372 			no = ipfw_objhash_lookup_kidx(ni, p->kidx);
3373 			KASSERT(no != NULL, ("Ref'd table %d disappeared",
3374 			    p->kidx));
3375 
3376 			no->refcnt--;
3377 		}
3378 	}
3379 
3380 	IPFW_UH_WUNLOCK(ch);
3381 
3382 	if (numnew == 0) {
3383 		*oib = pidx;
3384 		return (error);
3385 	}
3386 
3387 	/*
3388 	 * Compatibility stuff: do actual creation for non-existing,
3389 	 * but referenced tables.
3390 	 */
3391 	for (p = pidx_first; p < pidx; p++) {
3392 		if (p->kidx != 0)
3393 			continue;
3394 
3395 		ti->uidx = p->uidx;
3396 		ti->type = p->type;
3397 		ti->atype = 0;
3398 
3399 		error = create_table_compat(ch, ti, &kidx);
3400 		if (error == 0) {
3401 			p->kidx = kidx;
3402 			continue;
3403 		}
3404 
3405 		/* Error. We have to drop references */
3406 		IPFW_UH_WLOCK(ch);
3407 		for (p = pidx_first; p < pidx; p++) {
3408 			if (p->kidx == 0)
3409 				continue;
3410 
3411 			/* Find & unref by existing idx */
3412 			no = ipfw_objhash_lookup_kidx(ni, p->kidx);
3413 			KASSERT(no != NULL, ("Ref'd table %d disappeared",
3414 			    p->kidx));
3415 
3416 			no->refcnt--;
3417 		}
3418 		IPFW_UH_WUNLOCK(ch);
3419 
3420 		return (error);
3421 	}
3422 
3423 	*oib = pidx;
3424 
3425 	return (error);
3426 }
3427 
3428 /*
3429  * Remove references from every table used in @rule.
3430  */
3431 void
3432 ipfw_unref_rule_tables(struct ip_fw_chain *chain, struct ip_fw *rule)
3433 {
3434 	int cmdlen, l;
3435 	ipfw_insn *cmd;
3436 	struct namedobj_instance *ni;
3437 	struct named_object *no;
3438 	uint16_t kidx;
3439 	uint8_t type;
3440 
3441 	IPFW_UH_WLOCK_ASSERT(chain);
3442 	ni = CHAIN_TO_NI(chain);
3443 
3444 	l = rule->cmd_len;
3445 	cmd = rule->cmd;
3446 	cmdlen = 0;
3447 	for ( ;	l > 0 ; l -= cmdlen, cmd += cmdlen) {
3448 		cmdlen = F_LEN(cmd);
3449 
3450 		if (classify_table_opcode(cmd, &kidx, &type) != 0)
3451 			continue;
3452 
3453 		no = ipfw_objhash_lookup_kidx(ni, kidx);
3454 
3455 		KASSERT(no != NULL, ("table id %d not found", kidx));
3456 		KASSERT(no->type == type, ("wrong type %d (%d) for table id %d",
3457 		    no->type, type, kidx));
3458 		KASSERT(no->refcnt > 0, ("refcount for table %d is %d",
3459 		    kidx, no->refcnt));
3460 
3461 		no->refcnt--;
3462 	}
3463 }
3464 
3465 /*
3466  * Compatibility function for old ipfw(8) binaries.
3467  * Rewrites table kernel indices with userland ones.
3468  * Convert tables matching '/^\d+$/' to their atoi() value.
3469  * Use number 65535 for other tables.
3470  *
3471  * Returns 0 on success.
3472  */
3473 int
3474 ipfw_rewrite_table_kidx(struct ip_fw_chain *chain, struct ip_fw_rule0 *rule)
3475 {
3476 	int cmdlen, error, l;
3477 	ipfw_insn *cmd;
3478 	uint16_t kidx, uidx;
3479 	uint8_t type;
3480 	struct named_object *no;
3481 	struct namedobj_instance *ni;
3482 
3483 	ni = CHAIN_TO_NI(chain);
3484 	error = 0;
3485 
3486 	l = rule->cmd_len;
3487 	cmd = rule->cmd;
3488 	cmdlen = 0;
3489 	for ( ;	l > 0 ; l -= cmdlen, cmd += cmdlen) {
3490 		cmdlen = F_LEN(cmd);
3491 
3492 		if (classify_table_opcode(cmd, &kidx, &type) != 0)
3493 			continue;
3494 
3495 		if ((no = ipfw_objhash_lookup_kidx(ni, kidx)) == NULL)
3496 			return (1);
3497 
3498 		uidx = no->uidx;
3499 		if (no->compat == 0) {
3500 
3501 			/*
3502 			 * We are called via legacy opcode.
3503 			 * Save error and show table as fake number
3504 			 * not to make ipfw(8) hang.
3505 			 */
3506 			uidx = 65535;
3507 			error = 2;
3508 		}
3509 
3510 		update_table_opcode(cmd, uidx);
3511 	}
3512 
3513 	return (error);
3514 }
3515 
3516 /*
3517  * Checks is opcode is referencing table of appropriate type.
3518  * Adds reference count for found table if true.
3519  * Rewrites user-supplied opcode values with kernel ones.
3520  *
3521  * Returns 0 on success and appropriate error code otherwise.
3522  */
3523 int
3524 ipfw_rewrite_table_uidx(struct ip_fw_chain *chain,
3525     struct rule_check_info *ci)
3526 {
3527 	int cmdlen, error, l;
3528 	ipfw_insn *cmd;
3529 	uint16_t uidx;
3530 	uint8_t type;
3531 	struct namedobj_instance *ni;
3532 	struct obj_idx *p, *pidx_first, *pidx_last;
3533 	struct tid_info ti;
3534 
3535 	ni = CHAIN_TO_NI(chain);
3536 
3537 	/*
3538 	 * Prepare an array for storing opcode indices.
3539 	 * Use stack allocation by default.
3540 	 */
3541 	if (ci->table_opcodes <= (sizeof(ci->obuf)/sizeof(ci->obuf[0]))) {
3542 		/* Stack */
3543 		pidx_first = ci->obuf;
3544 	} else
3545 		pidx_first = malloc(ci->table_opcodes * sizeof(struct obj_idx),
3546 		    M_IPFW, M_WAITOK | M_ZERO);
3547 
3548 	pidx_last = pidx_first;
3549 	error = 0;
3550 	type = 0;
3551 	memset(&ti, 0, sizeof(ti));
3552 
3553 	/*
3554 	 * Use default set for looking up tables (old way) or
3555 	 * use set rule is assigned to (new way).
3556 	 */
3557 	ti.set = (V_fw_tables_sets != 0) ? ci->krule->set : 0;
3558 	if (ci->ctlv != NULL) {
3559 		ti.tlvs = (void *)(ci->ctlv + 1);
3560 		ti.tlen = ci->ctlv->head.length - sizeof(ipfw_obj_ctlv);
3561 	}
3562 
3563 	/* Reference all used tables */
3564 	error = find_ref_rule_tables(chain, ci->krule, ci, &pidx_last, &ti);
3565 	if (error != 0)
3566 		goto free;
3567 
3568 	IPFW_UH_WLOCK(chain);
3569 
3570 	/* Perform rule rewrite */
3571 	l = ci->krule->cmd_len;
3572 	cmd = ci->krule->cmd;
3573 	cmdlen = 0;
3574 	p = pidx_first;
3575 	for ( ;	l > 0 ; l -= cmdlen, cmd += cmdlen) {
3576 		cmdlen = F_LEN(cmd);
3577 		if (classify_table_opcode(cmd, &uidx, &type) != 0)
3578 			continue;
3579 		update_table_opcode(cmd, p->kidx);
3580 		p++;
3581 	}
3582 
3583 	IPFW_UH_WUNLOCK(chain);
3584 
3585 free:
3586 	if (pidx_first != ci->obuf)
3587 		free(pidx_first, M_IPFW);
3588 
3589 	return (error);
3590 }
3591 
3592 static struct ipfw_sopt_handler	scodes[] = {
3593 	{ IP_FW_TABLE_XCREATE,	0,	HDIR_SET,	create_table },
3594 	{ IP_FW_TABLE_XDESTROY,	0,	HDIR_SET,	flush_table_v0 },
3595 	{ IP_FW_TABLE_XFLUSH,	0,	HDIR_SET,	flush_table_v0 },
3596 	{ IP_FW_TABLE_XMODIFY,	0,	HDIR_BOTH,	modify_table },
3597 	{ IP_FW_TABLE_XINFO,	0,	HDIR_GET,	describe_table },
3598 	{ IP_FW_TABLES_XLIST,	0,	HDIR_GET,	list_tables },
3599 	{ IP_FW_TABLE_XLIST,	0,	HDIR_GET,	dump_table_v0 },
3600 	{ IP_FW_TABLE_XLIST,	1,	HDIR_GET,	dump_table_v1 },
3601 	{ IP_FW_TABLE_XADD,	0,	HDIR_BOTH,	manage_table_ent_v0 },
3602 	{ IP_FW_TABLE_XADD,	1,	HDIR_BOTH,	manage_table_ent_v1 },
3603 	{ IP_FW_TABLE_XDEL,	0,	HDIR_BOTH,	manage_table_ent_v0 },
3604 	{ IP_FW_TABLE_XDEL,	1,	HDIR_BOTH,	manage_table_ent_v1 },
3605 	{ IP_FW_TABLE_XFIND,	0,	HDIR_GET,	find_table_entry },
3606 	{ IP_FW_TABLE_XSWAP,	0,	HDIR_SET,	swap_table },
3607 	{ IP_FW_TABLES_ALIST,	0,	HDIR_GET,	list_table_algo },
3608 	{ IP_FW_TABLE_XGETSIZE,	0,	HDIR_GET,	get_table_size },
3609 };
3610 
3611 static void
3612 destroy_table_locked(struct namedobj_instance *ni, struct named_object *no,
3613     void *arg)
3614 {
3615 
3616 	unlink_table((struct ip_fw_chain *)arg, (struct table_config *)no);
3617 	if (ipfw_objhash_free_idx(ni, no->kidx) != 0)
3618 		printf("Error unlinking kidx %d from table %s\n",
3619 		    no->kidx, no->name);
3620 	free_table_config(ni, (struct table_config *)no);
3621 }
3622 
3623 /*
3624  * Shuts tables module down.
3625  */
3626 void
3627 ipfw_destroy_tables(struct ip_fw_chain *ch, int last)
3628 {
3629 
3630 	IPFW_DEL_SOPT_HANDLER(last, scodes);
3631 
3632 	/* Remove all tables from working set */
3633 	IPFW_UH_WLOCK(ch);
3634 	IPFW_WLOCK(ch);
3635 	ipfw_objhash_foreach(CHAIN_TO_NI(ch), destroy_table_locked, ch);
3636 	IPFW_WUNLOCK(ch);
3637 	IPFW_UH_WUNLOCK(ch);
3638 
3639 	/* Free pointers itself */
3640 	free(ch->tablestate, M_IPFW);
3641 
3642 	ipfw_table_value_destroy(ch, last);
3643 	ipfw_table_algo_destroy(ch);
3644 
3645 	ipfw_objhash_destroy(CHAIN_TO_NI(ch));
3646 	free(CHAIN_TO_TCFG(ch), M_IPFW);
3647 }
3648 
3649 /*
3650  * Starts tables module.
3651  */
3652 int
3653 ipfw_init_tables(struct ip_fw_chain *ch, int first)
3654 {
3655 	struct tables_config *tcfg;
3656 
3657 	/* Allocate pointers */
3658 	ch->tablestate = malloc(V_fw_tables_max * sizeof(struct table_info),
3659 	    M_IPFW, M_WAITOK | M_ZERO);
3660 
3661 	tcfg = malloc(sizeof(struct tables_config), M_IPFW, M_WAITOK | M_ZERO);
3662 	tcfg->namehash = ipfw_objhash_create(V_fw_tables_max);
3663 	ch->tblcfg = tcfg;
3664 
3665 	ipfw_table_value_init(ch, first);
3666 	ipfw_table_algo_init(ch);
3667 
3668 	IPFW_ADD_SOPT_HANDLER(first, scodes);
3669 	return (0);
3670 }
3671 
3672 
3673 
3674