xref: /freebsd/sys/netpfil/ipfw/ip_fw_table.c (revision 313376588638950ba1e93c403dd8c97bc52fd3a2)
1 /*-
2  * Copyright (c) 2004 Ruslan Ermilov and Vsevolod Lobko.
3  * Copyright (c) 2014 Yandex LLC
4  * Copyright (c) 2014 Alexander V. Chernikov
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  */
27 
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
30 
31 /*
32  * Lookup table support for ipfw.
33  *
34  * This file contains handlers for all generic tables' operations:
35  * add/del/flush entries, list/dump tables etc..
36  *
37  * Table data modification is protected by both UH and runtime lock
38  * while reading configuration/data is protected by UH lock.
39  *
40  * Lookup algorithms for all table types are located in ip_fw_table_algo.c
41  */
42 
43 #include "opt_ipfw.h"
44 
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/malloc.h>
48 #include <sys/kernel.h>
49 #include <sys/lock.h>
50 #include <sys/rwlock.h>
51 #include <sys/rmlock.h>
52 #include <sys/socket.h>
53 #include <sys/socketvar.h>
54 #include <sys/queue.h>
55 #include <net/if.h>	/* ip_fw.h requires IFNAMSIZ */
56 
57 #include <netinet/in.h>
58 #include <netinet/ip_var.h>	/* struct ipfw_rule_ref */
59 #include <netinet/ip_fw.h>
60 
61 #include <netpfil/ipfw/ip_fw_private.h>
62 #include <netpfil/ipfw/ip_fw_table.h>
63 
64  /*
65  * Table has the following `type` concepts:
66  *
67  * `no.type` represents lookup key type (addr, ifp, uid, etc..)
68  * vmask represents bitmask of table values which are present at the moment.
69  * Special IPFW_VTYPE_LEGACY ( (uint32_t)-1 ) represents old
70  * single-value-for-all approach.
71  */
72 struct table_config {
73 	struct named_object	no;
74 	uint8_t		tflags;		/* type flags */
75 	uint8_t		locked;		/* 1 if locked from changes */
76 	uint8_t		linked;		/* 1 if already linked */
77 	uint8_t		ochanged;	/* used by set swapping */
78 	uint8_t		vshared;	/* 1 if using shared value array */
79 	uint8_t		spare[3];
80 	uint32_t	count;		/* Number of records */
81 	uint32_t	limit;		/* Max number of records */
82 	uint32_t	vmask;		/* bitmask with supported values */
83 	uint32_t	ocount;		/* used by set swapping */
84 	uint64_t	gencnt;		/* generation count */
85 	char		tablename[64];	/* table name */
86 	struct table_algo	*ta;	/* Callbacks for given algo */
87 	void		*astate;	/* algorithm state */
88 	struct table_info	ti_copy;	/* data to put to table_info */
89 	struct namedobj_instance	*vi;
90 };
91 
92 static struct table_config *find_table(struct namedobj_instance *ni,
93     struct tid_info *ti);
94 static struct table_config *alloc_table_config(struct ip_fw_chain *ch,
95     struct tid_info *ti, struct table_algo *ta, char *adata, uint8_t tflags);
96 static void free_table_config(struct namedobj_instance *ni,
97     struct table_config *tc);
98 static int create_table_internal(struct ip_fw_chain *ch, struct tid_info *ti,
99     char *aname, ipfw_xtable_info *i, uint16_t *pkidx, int ref);
100 static void link_table(struct ip_fw_chain *ch, struct table_config *tc);
101 static void unlink_table(struct ip_fw_chain *ch, struct table_config *tc);
102 static int find_ref_table(struct ip_fw_chain *ch, struct tid_info *ti,
103     struct tentry_info *tei, uint32_t count, int op, struct table_config **ptc);
104 #define	OP_ADD	1
105 #define	OP_DEL	0
106 static int export_tables(struct ip_fw_chain *ch, ipfw_obj_lheader *olh,
107     struct sockopt_data *sd);
108 static void export_table_info(struct ip_fw_chain *ch, struct table_config *tc,
109     ipfw_xtable_info *i);
110 static int dump_table_tentry(void *e, void *arg);
111 static int dump_table_xentry(void *e, void *arg);
112 
113 static int swap_tables(struct ip_fw_chain *ch, struct tid_info *a,
114     struct tid_info *b);
115 
116 static int check_table_space(struct ip_fw_chain *ch, struct tableop_state *ts,
117     struct table_config *tc, struct table_info *ti, uint32_t count);
118 static int destroy_table(struct ip_fw_chain *ch, struct tid_info *ti);
119 
120 static struct table_algo *find_table_algo(struct tables_config *tableconf,
121     struct tid_info *ti, char *name);
122 
123 static void objheader_to_ti(struct _ipfw_obj_header *oh, struct tid_info *ti);
124 static void ntlv_to_ti(struct _ipfw_obj_ntlv *ntlv, struct tid_info *ti);
125 static int classify_table_opcode(ipfw_insn *cmd, uint16_t *puidx, uint8_t *ptype);
126 
127 #define	CHAIN_TO_NI(chain)	(CHAIN_TO_TCFG(chain)->namehash)
128 #define	KIDX_TO_TI(ch, k)	(&(((struct table_info *)(ch)->tablestate)[k]))
129 
130 #define	TA_BUF_SZ	128	/* On-stack buffer for add/delete state */
131 
132 void
133 rollback_toperation_state(struct ip_fw_chain *ch, void *object)
134 {
135 	struct tables_config *tcfg;
136 	struct op_state *os;
137 
138 	tcfg = CHAIN_TO_TCFG(ch);
139 	TAILQ_FOREACH(os, &tcfg->state_list, next)
140 		os->func(object, os);
141 }
142 
143 void
144 add_toperation_state(struct ip_fw_chain *ch, struct tableop_state *ts)
145 {
146 	struct tables_config *tcfg;
147 
148 	tcfg = CHAIN_TO_TCFG(ch);
149 	TAILQ_INSERT_HEAD(&tcfg->state_list, &ts->opstate, next);
150 }
151 
152 void
153 del_toperation_state(struct ip_fw_chain *ch, struct tableop_state *ts)
154 {
155 	struct tables_config *tcfg;
156 
157 	tcfg = CHAIN_TO_TCFG(ch);
158 	TAILQ_REMOVE(&tcfg->state_list, &ts->opstate, next);
159 }
160 
161 void
162 tc_ref(struct table_config *tc)
163 {
164 
165 	tc->no.refcnt++;
166 }
167 
168 void
169 tc_unref(struct table_config *tc)
170 {
171 
172 	tc->no.refcnt--;
173 }
174 
175 static struct table_value *
176 get_table_value(struct ip_fw_chain *ch, struct table_config *tc, uint32_t kidx)
177 {
178 	struct table_value *pval;
179 
180 	pval = (struct table_value *)ch->valuestate;
181 
182 	return (&pval[kidx]);
183 }
184 
185 
186 /*
187  * Checks if we're able to insert/update entry @tei into table
188  * w.r.t @tc limits.
189  * May alter @tei to indicate insertion error / insert
190  * options.
191  *
192  * Returns 0 if operation can be performed/
193  */
194 static int
195 check_table_limit(struct table_config *tc, struct tentry_info *tei)
196 {
197 
198 	if (tc->limit == 0 || tc->count < tc->limit)
199 		return (0);
200 
201 	if ((tei->flags & TEI_FLAGS_UPDATE) == 0) {
202 		/* Notify userland on error cause */
203 		tei->flags |= TEI_FLAGS_LIMIT;
204 		return (EFBIG);
205 	}
206 
207 	/*
208 	 * We have UPDATE flag set.
209 	 * Permit updating record (if found),
210 	 * but restrict adding new one since we've
211 	 * already hit the limit.
212 	 */
213 	tei->flags |= TEI_FLAGS_DONTADD;
214 
215 	return (0);
216 }
217 
218 /*
219  * Convert algorithm callback return code into
220  * one of pre-defined states known by userland.
221  */
222 static void
223 store_tei_result(struct tentry_info *tei, int op, int error, uint32_t num)
224 {
225 	int flag;
226 
227 	flag = 0;
228 
229 	switch (error) {
230 	case 0:
231 		if (op == OP_ADD && num != 0)
232 			flag = TEI_FLAGS_ADDED;
233 		if (op == OP_DEL)
234 			flag = TEI_FLAGS_DELETED;
235 		break;
236 	case ENOENT:
237 		flag = TEI_FLAGS_NOTFOUND;
238 		break;
239 	case EEXIST:
240 		flag = TEI_FLAGS_EXISTS;
241 		break;
242 	default:
243 		flag = TEI_FLAGS_ERROR;
244 	}
245 
246 	tei->flags |= flag;
247 }
248 
249 /*
250  * Creates and references table with default parameters.
251  * Saves table config, algo and allocated kidx info @ptc, @pta and
252  * @pkidx if non-zero.
253  * Used for table auto-creation to support old binaries.
254  *
255  * Returns 0 on success.
256  */
257 static int
258 create_table_compat(struct ip_fw_chain *ch, struct tid_info *ti,
259     uint16_t *pkidx)
260 {
261 	ipfw_xtable_info xi;
262 	int error;
263 
264 	memset(&xi, 0, sizeof(xi));
265 	/* Set default value mask for legacy clients */
266 	xi.vmask = IPFW_VTYPE_LEGACY;
267 
268 	error = create_table_internal(ch, ti, NULL, &xi, pkidx, 1);
269 	if (error != 0)
270 		return (error);
271 
272 	return (0);
273 }
274 
275 /*
276  * Find and reference existing table optionally
277  * creating new one.
278  *
279  * Saves found table config into @ptc.
280  * Note function may drop/acquire UH_WLOCK.
281  * Returns 0 if table was found/created and referenced
282  * or non-zero return code.
283  */
284 static int
285 find_ref_table(struct ip_fw_chain *ch, struct tid_info *ti,
286     struct tentry_info *tei, uint32_t count, int op,
287     struct table_config **ptc)
288 {
289 	struct namedobj_instance *ni;
290 	struct table_config *tc;
291 	uint16_t kidx;
292 	int error;
293 
294 	IPFW_UH_WLOCK_ASSERT(ch);
295 
296 	ni = CHAIN_TO_NI(ch);
297 	tc = NULL;
298 	if ((tc = find_table(ni, ti)) != NULL) {
299 		/* check table type */
300 		if (tc->no.type != ti->type)
301 			return (EINVAL);
302 
303 		if (tc->locked != 0)
304 			return (EACCES);
305 
306 		/* Try to exit early on limit hit */
307 		if (op == OP_ADD && count == 1 &&
308 		    check_table_limit(tc, tei) != 0)
309 			return (EFBIG);
310 
311 		/* Reference and return */
312 		tc->no.refcnt++;
313 		*ptc = tc;
314 		return (0);
315 	}
316 
317 	if (op == OP_DEL)
318 		return (ESRCH);
319 
320 	/* Compability mode: create new table for old clients */
321 	if ((tei->flags & TEI_FLAGS_COMPAT) == 0)
322 		return (ESRCH);
323 
324 	IPFW_UH_WUNLOCK(ch);
325 	error = create_table_compat(ch, ti, &kidx);
326 	IPFW_UH_WLOCK(ch);
327 
328 	if (error != 0)
329 		return (error);
330 
331 	tc = (struct table_config *)ipfw_objhash_lookup_kidx(ni, kidx);
332 	KASSERT(tc != NULL, ("create_table_compat returned bad idx %d", kidx));
333 
334 	/* OK, now we've got referenced table. */
335 	*ptc = tc;
336 	return (0);
337 }
338 
339 /*
340  * Rolls back already @added to @tc entries using state array @ta_buf_m.
341  * Assume the following layout:
342  * 1) ADD state (ta_buf_m[0] ... t_buf_m[added - 1]) for handling update cases
343  * 2) DEL state (ta_buf_m[count[ ... t_buf_m[count + added - 1])
344  *   for storing deleted state
345  */
346 static void
347 rollback_added_entries(struct ip_fw_chain *ch, struct table_config *tc,
348     struct table_info *tinfo, struct tentry_info *tei, caddr_t ta_buf_m,
349     uint32_t count, uint32_t added)
350 {
351 	struct table_algo *ta;
352 	struct tentry_info *ptei;
353 	caddr_t v, vv;
354 	size_t ta_buf_sz;
355 	int error, i;
356 	uint32_t num;
357 
358 	IPFW_UH_WLOCK_ASSERT(ch);
359 
360 	ta = tc->ta;
361 	ta_buf_sz = ta->ta_buf_size;
362 	v = ta_buf_m;
363 	vv = v + count * ta_buf_sz;
364 	for (i = 0; i < added; i++, v += ta_buf_sz, vv += ta_buf_sz) {
365 		ptei = &tei[i];
366 		if ((ptei->flags & TEI_FLAGS_UPDATED) != 0) {
367 
368 			/*
369 			 * We have old value stored by previous
370 			 * call in @ptei->value. Do add once again
371 			 * to restore it.
372 			 */
373 			error = ta->add(tc->astate, tinfo, ptei, v, &num);
374 			KASSERT(error == 0, ("rollback UPDATE fail"));
375 			KASSERT(num == 0, ("rollback UPDATE fail2"));
376 			continue;
377 		}
378 
379 		error = ta->prepare_del(ch, ptei, vv);
380 		KASSERT(error == 0, ("pre-rollback INSERT failed"));
381 		error = ta->del(tc->astate, tinfo, ptei, vv, &num);
382 		KASSERT(error == 0, ("rollback INSERT failed"));
383 		tc->count -= num;
384 	}
385 }
386 
387 /*
388  * Prepares add/del state for all @count entries in @tei.
389  * Uses either stack buffer (@ta_buf) or allocates a new one.
390  * Stores pointer to allocated buffer back to @ta_buf.
391  *
392  * Returns 0 on success.
393  */
394 static int
395 prepare_batch_buffer(struct ip_fw_chain *ch, struct table_algo *ta,
396     struct tentry_info *tei, uint32_t count, int op, caddr_t *ta_buf)
397 {
398 	caddr_t ta_buf_m, v;
399 	size_t ta_buf_sz, sz;
400 	struct tentry_info *ptei;
401 	int error, i;
402 
403 	error = 0;
404 	ta_buf_sz = ta->ta_buf_size;
405 	if (count == 1) {
406 		/* Sigle add/delete, use on-stack buffer */
407 		memset(*ta_buf, 0, TA_BUF_SZ);
408 		ta_buf_m = *ta_buf;
409 	} else {
410 
411 		/*
412 		 * Multiple adds/deletes, allocate larger buffer
413 		 *
414 		 * Note we need 2xcount buffer for add case:
415 		 * we have hold both ADD state
416 		 * and DELETE state (this may be needed
417 		 * if we need to rollback all changes)
418 		 */
419 		sz = count * ta_buf_sz;
420 		ta_buf_m = malloc((op == OP_ADD) ? sz * 2 : sz, M_TEMP,
421 		    M_WAITOK | M_ZERO);
422 	}
423 
424 	v = ta_buf_m;
425 	for (i = 0; i < count; i++, v += ta_buf_sz) {
426 		ptei = &tei[i];
427 		error = (op == OP_ADD) ?
428 		    ta->prepare_add(ch, ptei, v) : ta->prepare_del(ch, ptei, v);
429 
430 		/*
431 		 * Some syntax error (incorrect mask, or address, or
432 		 * anything). Return error regardless of atomicity
433 		 * settings.
434 		 */
435 		if (error != 0)
436 			break;
437 	}
438 
439 	*ta_buf = ta_buf_m;
440 	return (error);
441 }
442 
443 /*
444  * Flushes allocated state for each @count entries in @tei.
445  * Frees @ta_buf_m if differs from stack buffer @ta_buf.
446  */
447 static void
448 flush_batch_buffer(struct ip_fw_chain *ch, struct table_algo *ta,
449     struct tentry_info *tei, uint32_t count, int rollback,
450     caddr_t ta_buf_m, caddr_t ta_buf)
451 {
452 	caddr_t v;
453 	struct tentry_info *ptei;
454 	size_t ta_buf_sz;
455 	int i;
456 
457 	ta_buf_sz = ta->ta_buf_size;
458 
459 	/* Run cleaning callback anyway */
460 	v = ta_buf_m;
461 	for (i = 0; i < count; i++, v += ta_buf_sz) {
462 		ptei = &tei[i];
463 		ta->flush_entry(ch, ptei, v);
464 		if (ptei->ptv != NULL) {
465 			free(ptei->ptv, M_IPFW);
466 			ptei->ptv = NULL;
467 		}
468 	}
469 
470 	/* Clean up "deleted" state in case of rollback */
471 	if (rollback != 0) {
472 		v = ta_buf_m + count * ta_buf_sz;
473 		for (i = 0; i < count; i++, v += ta_buf_sz)
474 			ta->flush_entry(ch, &tei[i], v);
475 	}
476 
477 	if (ta_buf_m != ta_buf)
478 		free(ta_buf_m, M_TEMP);
479 }
480 
481 
482 static void
483 rollback_add_entry(void *object, struct op_state *_state)
484 {
485 	struct ip_fw_chain *ch;
486 	struct tableop_state *ts;
487 
488 	ts = (struct tableop_state *)_state;
489 
490 	if (ts->tc != object && ts->ch != object)
491 		return;
492 
493 	ch = ts->ch;
494 
495 	IPFW_UH_WLOCK_ASSERT(ch);
496 
497 	/* Call specifid unlockers */
498 	rollback_table_values(ts);
499 
500 	/* Indicate we've called */
501 	ts->modified = 1;
502 }
503 
504 /*
505  * Adds/updates one or more entries in table @ti.
506  *
507  * Function may drop/reacquire UH wlock multiple times due to
508  * items alloc, algorithm callbacks (check_space), value linkage
509  * (new values, value storage realloc), etc..
510  * Other processes like other adds (which may involve storage resize),
511  * table swaps (which changes table data and may change algo type),
512  * table modify (which may change value mask) may be executed
513  * simultaneously so we need to deal with it.
514  *
515  * The following approach was implemented:
516  * we have per-chain linked list, protected with UH lock.
517  * add_table_entry prepares special on-stack structure wthich is passed
518  * to its descendants. Users add this structure to this list before unlock.
519  * After performing needed operations and acquiring UH lock back, each user
520  * checks if structure has changed. If true, it rolls local state back and
521  * returns without error to the caller.
522  * add_table_entry() on its own checks if structure has changed and restarts
523  * its operation from the beginning (goto restart).
524  *
525  * Functions which are modifying fields of interest (currently
526  *   resize_shared_value_storage() and swap_tables() )
527  * traverses given list while holding UH lock immediately before
528  * performing their operations calling function provided be list entry
529  * ( currently rollback_add_entry  ) which performs rollback for all necessary
530  * state and sets appropriate values in structure indicating rollback
531  * has happened.
532  *
533  * Algo interaction:
534  * Function references @ti first to ensure table won't
535  * disappear or change its type.
536  * After that, prepare_add callback is called for each @tei entry.
537  * Next, we try to add each entry under UH+WHLOCK
538  * using add() callback.
539  * Finally, we free all state by calling flush_entry callback
540  * for each @tei.
541  *
542  * Returns 0 on success.
543  */
544 int
545 add_table_entry(struct ip_fw_chain *ch, struct tid_info *ti,
546     struct tentry_info *tei, uint8_t flags, uint32_t count)
547 {
548 	struct table_config *tc;
549 	struct table_algo *ta;
550 	uint16_t kidx;
551 	int error, first_error, i, rollback;
552 	uint32_t num, numadd;
553 	struct tentry_info *ptei;
554 	struct tableop_state ts;
555 	char ta_buf[TA_BUF_SZ];
556 	caddr_t ta_buf_m, v;
557 
558 	memset(&ts, 0, sizeof(ts));
559 	ta = NULL;
560 	IPFW_UH_WLOCK(ch);
561 
562 	/*
563 	 * Find and reference existing table.
564 	 */
565 restart:
566 	if (ts.modified != 0) {
567 		IPFW_UH_WUNLOCK(ch);
568 		flush_batch_buffer(ch, ta, tei, count, rollback,
569 		    ta_buf_m, ta_buf);
570 		memset(&ts, 0, sizeof(ts));
571 		ta = NULL;
572 		IPFW_UH_WLOCK(ch);
573 	}
574 
575 	error = find_ref_table(ch, ti, tei, count, OP_ADD, &tc);
576 	if (error != 0) {
577 		IPFW_UH_WUNLOCK(ch);
578 		return (error);
579 	}
580 	ta = tc->ta;
581 
582 	/* Fill in tablestate */
583 	ts.ch = ch;
584 	ts.opstate.func = rollback_add_entry;
585 	ts.tc = tc;
586 	ts.vshared = tc->vshared;
587 	ts.vmask = tc->vmask;
588 	ts.ta = ta;
589 	ts.tei = tei;
590 	ts.count = count;
591 	rollback = 0;
592 	add_toperation_state(ch, &ts);
593 	IPFW_UH_WUNLOCK(ch);
594 
595 	/* Allocate memory and prepare record(s) */
596 	/* Pass stack buffer by default */
597 	ta_buf_m = ta_buf;
598 	error = prepare_batch_buffer(ch, ta, tei, count, OP_ADD, &ta_buf_m);
599 	if (error != 0)
600 		goto cleanup;
601 
602 	IPFW_UH_WLOCK(ch);
603 	/* Drop reference we've used in first search */
604 	tc->no.refcnt--;
605 
606 	/*
607 	 * Check if table swap has happened.
608 	 * (so table algo might be changed).
609 	 * Restart operation to achieve consistent behavior.
610 	 */
611 	del_toperation_state(ch, &ts);
612 	if (ts.modified != 0)
613 		goto restart;
614 
615 	/*
616 	 * Link all values values to shared/per-table value array.
617 	 *
618 	 * May release/reacquire UH_WLOCK.
619 	 */
620 	error = ipfw_link_table_values(ch, &ts);
621 	if (error != 0)
622 		goto cleanup;
623 	if (ts.modified != 0)
624 		goto restart;
625 
626 	/*
627 	 * Ensure we are able to add all entries without additional
628 	 * memory allocations. May release/reacquire UH_WLOCK.
629 	 */
630 	kidx = tc->no.kidx;
631 	error = check_table_space(ch, &ts, tc, KIDX_TO_TI(ch, kidx), count);
632 	if (error != 0)
633 		goto cleanup;
634 	if (ts.modified != 0)
635 		goto restart;
636 
637 	/* We've got valid table in @tc. Let's try to add data */
638 	kidx = tc->no.kidx;
639 	ta = tc->ta;
640 	numadd = 0;
641 	first_error = 0;
642 
643 	IPFW_WLOCK(ch);
644 
645 	v = ta_buf_m;
646 	for (i = 0; i < count; i++, v += ta->ta_buf_size) {
647 		ptei = &tei[i];
648 		num = 0;
649 		/* check limit before adding */
650 		if ((error = check_table_limit(tc, ptei)) == 0) {
651 			error = ta->add(tc->astate, KIDX_TO_TI(ch, kidx),
652 			    ptei, v, &num);
653 			/* Set status flag to inform userland */
654 			store_tei_result(ptei, OP_ADD, error, num);
655 		}
656 		if (error == 0) {
657 			/* Update number of records to ease limit checking */
658 			tc->count += num;
659 			numadd += num;
660 			continue;
661 		}
662 
663 		if (first_error == 0)
664 			first_error = error;
665 
666 		/*
667 		 * Some error have happened. Check our atomicity
668 		 * settings: continue if atomicity is not required,
669 		 * rollback changes otherwise.
670 		 */
671 		if ((flags & IPFW_CTF_ATOMIC) == 0)
672 			continue;
673 
674 		rollback_added_entries(ch, tc, KIDX_TO_TI(ch, kidx),
675 		    tei, ta_buf_m, count, i);
676 
677 		rollback = 1;
678 		break;
679 	}
680 
681 	IPFW_WUNLOCK(ch);
682 
683 	ipfw_garbage_table_values(ch, tc, tei, count, rollback);
684 
685 	/* Permit post-add algorithm grow/rehash. */
686 	if (numadd != 0)
687 		check_table_space(ch, NULL, tc, KIDX_TO_TI(ch, kidx), 0);
688 
689 	/* Return first error to user, if any */
690 	error = first_error;
691 
692 cleanup:
693 	IPFW_UH_WUNLOCK(ch);
694 
695 	flush_batch_buffer(ch, ta, tei, count, rollback, ta_buf_m, ta_buf);
696 
697 	return (error);
698 }
699 
700 /*
701  * Deletes one or more entries in table @ti.
702  *
703  * Returns 0 on success.
704  */
705 int
706 del_table_entry(struct ip_fw_chain *ch, struct tid_info *ti,
707     struct tentry_info *tei, uint8_t flags, uint32_t count)
708 {
709 	struct table_config *tc;
710 	struct table_algo *ta;
711 	struct tentry_info *ptei;
712 	uint16_t kidx;
713 	int error, first_error, i;
714 	uint32_t num, numdel;
715 	char ta_buf[TA_BUF_SZ];
716 	caddr_t ta_buf_m, v;
717 
718 	/*
719 	 * Find and reference existing table.
720 	 */
721 	IPFW_UH_WLOCK(ch);
722 	error = find_ref_table(ch, ti, tei, count, OP_DEL, &tc);
723 	if (error != 0) {
724 		IPFW_UH_WUNLOCK(ch);
725 		return (error);
726 	}
727 	ta = tc->ta;
728 	IPFW_UH_WUNLOCK(ch);
729 
730 	/* Allocate memory and prepare record(s) */
731 	/* Pass stack buffer by default */
732 	ta_buf_m = ta_buf;
733 	error = prepare_batch_buffer(ch, ta, tei, count, OP_DEL, &ta_buf_m);
734 	if (error != 0)
735 		goto cleanup;
736 
737 	IPFW_UH_WLOCK(ch);
738 
739 	/* Drop reference we've used in first search */
740 	tc->no.refcnt--;
741 
742 	/*
743 	 * Check if table algo is still the same.
744 	 * (changed ta may be the result of table swap).
745 	 */
746 	if (ta != tc->ta) {
747 		IPFW_UH_WUNLOCK(ch);
748 		error = EINVAL;
749 		goto cleanup;
750 	}
751 
752 	kidx = tc->no.kidx;
753 	numdel = 0;
754 	first_error = 0;
755 
756 	IPFW_WLOCK(ch);
757 	v = ta_buf_m;
758 	for (i = 0; i < count; i++, v += ta->ta_buf_size) {
759 		ptei = &tei[i];
760 		num = 0;
761 		error = ta->del(tc->astate, KIDX_TO_TI(ch, kidx), ptei, v,
762 		    &num);
763 		/* Save state for userland */
764 		store_tei_result(ptei, OP_DEL, error, num);
765 		if (error != 0 && first_error == 0)
766 			first_error = error;
767 		tc->count -= num;
768 		numdel += num;
769 	}
770 	IPFW_WUNLOCK(ch);
771 
772 	/* Unlink non-used values */
773 	ipfw_garbage_table_values(ch, tc, tei, count, 0);
774 
775 	if (numdel != 0) {
776 		/* Run post-del hook to permit shrinking */
777 		check_table_space(ch, NULL, tc, KIDX_TO_TI(ch, kidx), 0);
778 	}
779 
780 	IPFW_UH_WUNLOCK(ch);
781 
782 	/* Return first error to user, if any */
783 	error = first_error;
784 
785 cleanup:
786 	flush_batch_buffer(ch, ta, tei, count, 0, ta_buf_m, ta_buf);
787 
788 	return (error);
789 }
790 
791 /*
792  * Ensure that table @tc has enough space to add @count entries without
793  * need for reallocation.
794  *
795  * Callbacks order:
796  * 0) need_modify() (UH_WLOCK) - checks if @count items can be added w/o resize.
797  *
798  * 1) alloc_modify (no locks, M_WAITOK) - alloc new state based on @pflags.
799  * 2) prepare_modifyt (UH_WLOCK) - copy old data into new storage
800  * 3) modify (UH_WLOCK + WLOCK) - switch pointers
801  * 4) flush_modify (UH_WLOCK) - free state, if needed
802  *
803  * Returns 0 on success.
804  */
805 static int
806 check_table_space(struct ip_fw_chain *ch, struct tableop_state *ts,
807     struct table_config *tc, struct table_info *ti, uint32_t count)
808 {
809 	struct table_algo *ta;
810 	uint64_t pflags;
811 	char ta_buf[TA_BUF_SZ];
812 	int error;
813 
814 	IPFW_UH_WLOCK_ASSERT(ch);
815 
816 	error = 0;
817 	ta = tc->ta;
818 	if (ta->need_modify == NULL)
819 		return (0);
820 
821 	/* Acquire reference not to loose @tc between locks/unlocks */
822 	tc->no.refcnt++;
823 
824 	/*
825 	 * TODO: think about avoiding race between large add/large delete
826 	 * operation on algorithm which implements shrinking along with
827 	 * growing.
828 	 */
829 	while (true) {
830 		pflags = 0;
831 		if (ta->need_modify(tc->astate, ti, count, &pflags) == 0) {
832 			error = 0;
833 			break;
834 		}
835 
836 		/* We have to shrink/grow table */
837 		if (ts != NULL)
838 			add_toperation_state(ch, ts);
839 		IPFW_UH_WUNLOCK(ch);
840 
841 		memset(&ta_buf, 0, sizeof(ta_buf));
842 		error = ta->prepare_mod(ta_buf, &pflags);
843 
844 		IPFW_UH_WLOCK(ch);
845 		if (ts != NULL)
846 			del_toperation_state(ch, ts);
847 
848 		if (error != 0)
849 			break;
850 
851 		if (ts != NULL && ts->modified != 0) {
852 
853 			/*
854 			 * Swap operation has happened
855 			 * so we're currently operating on other
856 			 * table data. Stop doing this.
857 			 */
858 			ta->flush_mod(ta_buf);
859 			break;
860 		}
861 
862 		/* Check if we still need to alter table */
863 		ti = KIDX_TO_TI(ch, tc->no.kidx);
864 		if (ta->need_modify(tc->astate, ti, count, &pflags) == 0) {
865 			IPFW_UH_WUNLOCK(ch);
866 
867 			/*
868 			 * Other thread has already performed resize.
869 			 * Flush our state and return.
870 			 */
871 			ta->flush_mod(ta_buf);
872 			break;
873 		}
874 
875 		error = ta->fill_mod(tc->astate, ti, ta_buf, &pflags);
876 		if (error == 0) {
877 			/* Do actual modification */
878 			IPFW_WLOCK(ch);
879 			ta->modify(tc->astate, ti, ta_buf, pflags);
880 			IPFW_WUNLOCK(ch);
881 		}
882 
883 		/* Anyway, flush data and retry */
884 		ta->flush_mod(ta_buf);
885 	}
886 
887 	tc->no.refcnt--;
888 	return (error);
889 }
890 
891 /*
892  * Adds or deletes record in table.
893  * Data layout (v0):
894  * Request: [ ip_fw3_opheader ipfw_table_xentry ]
895  *
896  * Returns 0 on success
897  */
898 static int
899 manage_table_ent_v0(struct ip_fw_chain *ch, ip_fw3_opheader *op3,
900     struct sockopt_data *sd)
901 {
902 	ipfw_table_xentry *xent;
903 	struct tentry_info tei;
904 	struct tid_info ti;
905 	struct table_value v;
906 	int error, hdrlen, read;
907 
908 	hdrlen = offsetof(ipfw_table_xentry, k);
909 
910 	/* Check minimum header size */
911 	if (sd->valsize < (sizeof(*op3) + hdrlen))
912 		return (EINVAL);
913 
914 	read = sizeof(ip_fw3_opheader);
915 
916 	/* Check if xentry len field is valid */
917 	xent = (ipfw_table_xentry *)(op3 + 1);
918 	if (xent->len < hdrlen || xent->len + read > sd->valsize)
919 		return (EINVAL);
920 
921 	memset(&tei, 0, sizeof(tei));
922 	tei.paddr = &xent->k;
923 	tei.masklen = xent->masklen;
924 	ipfw_import_table_value_legacy(xent->value, &v);
925 	tei.pvalue = &v;
926 	/* Old requests compability */
927 	tei.flags = TEI_FLAGS_COMPAT;
928 	if (xent->type == IPFW_TABLE_ADDR) {
929 		if (xent->len - hdrlen == sizeof(in_addr_t))
930 			tei.subtype = AF_INET;
931 		else
932 			tei.subtype = AF_INET6;
933 	}
934 
935 	memset(&ti, 0, sizeof(ti));
936 	ti.uidx = xent->tbl;
937 	ti.type = xent->type;
938 
939 	error = (op3->opcode == IP_FW_TABLE_XADD) ?
940 	    add_table_entry(ch, &ti, &tei, 0, 1) :
941 	    del_table_entry(ch, &ti, &tei, 0, 1);
942 
943 	return (error);
944 }
945 
946 /*
947  * Adds or deletes record in table.
948  * Data layout (v1)(current):
949  * Request: [ ipfw_obj_header
950  *   ipfw_obj_ctlv(IPFW_TLV_TBLENT_LIST) [ ipfw_obj_tentry x N ]
951  * ]
952  *
953  * Returns 0 on success
954  */
955 static int
956 manage_table_ent_v1(struct ip_fw_chain *ch, ip_fw3_opheader *op3,
957     struct sockopt_data *sd)
958 {
959 	ipfw_obj_tentry *tent, *ptent;
960 	ipfw_obj_ctlv *ctlv;
961 	ipfw_obj_header *oh;
962 	struct tentry_info *ptei, tei, *tei_buf;
963 	struct tid_info ti;
964 	int error, i, kidx, read;
965 
966 	/* Check minimum header size */
967 	if (sd->valsize < (sizeof(*oh) + sizeof(*ctlv)))
968 		return (EINVAL);
969 
970 	/* Check if passed data is too long */
971 	if (sd->valsize != sd->kavail)
972 		return (EINVAL);
973 
974 	oh = (ipfw_obj_header *)sd->kbuf;
975 
976 	/* Basic length checks for TLVs */
977 	if (oh->ntlv.head.length != sizeof(oh->ntlv))
978 		return (EINVAL);
979 
980 	read = sizeof(*oh);
981 
982 	ctlv = (ipfw_obj_ctlv *)(oh + 1);
983 	if (ctlv->head.length + read != sd->valsize)
984 		return (EINVAL);
985 
986 	read += sizeof(*ctlv);
987 	tent = (ipfw_obj_tentry *)(ctlv + 1);
988 	if (ctlv->count * sizeof(*tent) + read != sd->valsize)
989 		return (EINVAL);
990 
991 	if (ctlv->count == 0)
992 		return (0);
993 
994 	/*
995 	 * Mark entire buffer as "read".
996 	 * This instructs sopt api write it back
997 	 * after function return.
998 	 */
999 	ipfw_get_sopt_header(sd, sd->valsize);
1000 
1001 	/* Perform basic checks for each entry */
1002 	ptent = tent;
1003 	kidx = tent->idx;
1004 	for (i = 0; i < ctlv->count; i++, ptent++) {
1005 		if (ptent->head.length != sizeof(*ptent))
1006 			return (EINVAL);
1007 		if (ptent->idx != kidx)
1008 			return (ENOTSUP);
1009 	}
1010 
1011 	/* Convert data into kernel request objects */
1012 	objheader_to_ti(oh, &ti);
1013 	ti.type = oh->ntlv.type;
1014 	ti.uidx = kidx;
1015 
1016 	/* Use on-stack buffer for single add/del */
1017 	if (ctlv->count == 1) {
1018 		memset(&tei, 0, sizeof(tei));
1019 		tei_buf = &tei;
1020 	} else
1021 		tei_buf = malloc(ctlv->count * sizeof(tei), M_TEMP,
1022 		    M_WAITOK | M_ZERO);
1023 
1024 	ptei = tei_buf;
1025 	ptent = tent;
1026 	for (i = 0; i < ctlv->count; i++, ptent++, ptei++) {
1027 		ptei->paddr = &ptent->k;
1028 		ptei->subtype = ptent->subtype;
1029 		ptei->masklen = ptent->masklen;
1030 		if (ptent->head.flags & IPFW_TF_UPDATE)
1031 			ptei->flags |= TEI_FLAGS_UPDATE;
1032 
1033 		ipfw_import_table_value_v1(&ptent->v.value);
1034 		ptei->pvalue = (struct table_value *)&ptent->v.value;
1035 	}
1036 
1037 	error = (oh->opheader.opcode == IP_FW_TABLE_XADD) ?
1038 	    add_table_entry(ch, &ti, tei_buf, ctlv->flags, ctlv->count) :
1039 	    del_table_entry(ch, &ti, tei_buf, ctlv->flags, ctlv->count);
1040 
1041 	/* Translate result back to userland */
1042 	ptei = tei_buf;
1043 	ptent = tent;
1044 	for (i = 0; i < ctlv->count; i++, ptent++, ptei++) {
1045 		if (ptei->flags & TEI_FLAGS_ADDED)
1046 			ptent->result = IPFW_TR_ADDED;
1047 		else if (ptei->flags & TEI_FLAGS_DELETED)
1048 			ptent->result = IPFW_TR_DELETED;
1049 		else if (ptei->flags & TEI_FLAGS_UPDATED)
1050 			ptent->result = IPFW_TR_UPDATED;
1051 		else if (ptei->flags & TEI_FLAGS_LIMIT)
1052 			ptent->result = IPFW_TR_LIMIT;
1053 		else if (ptei->flags & TEI_FLAGS_ERROR)
1054 			ptent->result = IPFW_TR_ERROR;
1055 		else if (ptei->flags & TEI_FLAGS_NOTFOUND)
1056 			ptent->result = IPFW_TR_NOTFOUND;
1057 		else if (ptei->flags & TEI_FLAGS_EXISTS)
1058 			ptent->result = IPFW_TR_EXISTS;
1059 		ipfw_export_table_value_v1(ptei->pvalue, &ptent->v.value);
1060 	}
1061 
1062 	if (tei_buf != &tei)
1063 		free(tei_buf, M_TEMP);
1064 
1065 	return (error);
1066 }
1067 
1068 /*
1069  * Looks up an entry in given table.
1070  * Data layout (v0)(current):
1071  * Request: [ ipfw_obj_header ipfw_obj_tentry ]
1072  * Reply: [ ipfw_obj_header ipfw_obj_tentry ]
1073  *
1074  * Returns 0 on success
1075  */
1076 static int
1077 find_table_entry(struct ip_fw_chain *ch, ip_fw3_opheader *op3,
1078     struct sockopt_data *sd)
1079 {
1080 	ipfw_obj_tentry *tent;
1081 	ipfw_obj_header *oh;
1082 	struct tid_info ti;
1083 	struct table_config *tc;
1084 	struct table_algo *ta;
1085 	struct table_info *kti;
1086 	struct namedobj_instance *ni;
1087 	int error;
1088 	size_t sz;
1089 
1090 	/* Check minimum header size */
1091 	sz = sizeof(*oh) + sizeof(*tent);
1092 	if (sd->valsize != sz)
1093 		return (EINVAL);
1094 
1095 	oh = (struct _ipfw_obj_header *)ipfw_get_sopt_header(sd, sz);
1096 	tent = (ipfw_obj_tentry *)(oh + 1);
1097 
1098 	/* Basic length checks for TLVs */
1099 	if (oh->ntlv.head.length != sizeof(oh->ntlv))
1100 		return (EINVAL);
1101 
1102 	objheader_to_ti(oh, &ti);
1103 	ti.type = oh->ntlv.type;
1104 	ti.uidx = tent->idx;
1105 
1106 	IPFW_UH_RLOCK(ch);
1107 	ni = CHAIN_TO_NI(ch);
1108 
1109 	/*
1110 	 * Find existing table and check its type .
1111 	 */
1112 	ta = NULL;
1113 	if ((tc = find_table(ni, &ti)) == NULL) {
1114 		IPFW_UH_RUNLOCK(ch);
1115 		return (ESRCH);
1116 	}
1117 
1118 	/* check table type */
1119 	if (tc->no.type != ti.type) {
1120 		IPFW_UH_RUNLOCK(ch);
1121 		return (EINVAL);
1122 	}
1123 
1124 	kti = KIDX_TO_TI(ch, tc->no.kidx);
1125 	ta = tc->ta;
1126 
1127 	if (ta->find_tentry == NULL)
1128 		return (ENOTSUP);
1129 
1130 	error = ta->find_tentry(tc->astate, kti, tent);
1131 
1132 	IPFW_UH_RUNLOCK(ch);
1133 
1134 	return (error);
1135 }
1136 
1137 /*
1138  * Flushes all entries or destroys given table.
1139  * Data layout (v0)(current):
1140  * Request: [ ipfw_obj_header ]
1141  *
1142  * Returns 0 on success
1143  */
1144 static int
1145 flush_table_v0(struct ip_fw_chain *ch, ip_fw3_opheader *op3,
1146     struct sockopt_data *sd)
1147 {
1148 	int error;
1149 	struct _ipfw_obj_header *oh;
1150 	struct tid_info ti;
1151 
1152 	if (sd->valsize != sizeof(*oh))
1153 		return (EINVAL);
1154 
1155 	oh = (struct _ipfw_obj_header *)op3;
1156 	objheader_to_ti(oh, &ti);
1157 
1158 	if (op3->opcode == IP_FW_TABLE_XDESTROY)
1159 		error = destroy_table(ch, &ti);
1160 	else if (op3->opcode == IP_FW_TABLE_XFLUSH)
1161 		error = flush_table(ch, &ti);
1162 	else
1163 		return (ENOTSUP);
1164 
1165 	return (error);
1166 }
1167 
1168 static void
1169 restart_flush(void *object, struct op_state *_state)
1170 {
1171 	struct tableop_state *ts;
1172 
1173 	ts = (struct tableop_state *)_state;
1174 
1175 	if (ts->tc != object)
1176 		return;
1177 
1178 	/* Indicate we've called */
1179 	ts->modified = 1;
1180 }
1181 
1182 /*
1183  * Flushes given table.
1184  *
1185  * Function create new table instance with the same
1186  * parameters, swaps it with old one and
1187  * flushes state without holding runtime WLOCK.
1188  *
1189  * Returns 0 on success.
1190  */
1191 int
1192 flush_table(struct ip_fw_chain *ch, struct tid_info *ti)
1193 {
1194 	struct namedobj_instance *ni;
1195 	struct table_config *tc;
1196 	struct table_algo *ta;
1197 	struct table_info ti_old, ti_new, *tablestate;
1198 	void *astate_old, *astate_new;
1199 	char algostate[64], *pstate;
1200 	struct tableop_state ts;
1201 	int error;
1202 	uint16_t kidx;
1203 	uint8_t tflags;
1204 
1205 	/*
1206 	 * Stage 1: save table algoritm.
1207 	 * Reference found table to ensure it won't disappear.
1208 	 */
1209 	IPFW_UH_WLOCK(ch);
1210 	ni = CHAIN_TO_NI(ch);
1211 	if ((tc = find_table(ni, ti)) == NULL) {
1212 		IPFW_UH_WUNLOCK(ch);
1213 		return (ESRCH);
1214 	}
1215 restart:
1216 	/* Set up swap handler */
1217 	memset(&ts, 0, sizeof(ts));
1218 	ts.opstate.func = restart_flush;
1219 	ts.tc = tc;
1220 
1221 	ta = tc->ta;
1222 	/* Do not flush readonly tables */
1223 	if ((ta->flags & TA_FLAG_READONLY) != 0) {
1224 		IPFW_UH_WUNLOCK(ch);
1225 		return (EACCES);
1226 	}
1227 	/* Save startup algo parameters */
1228 	if (ta->print_config != NULL) {
1229 		ta->print_config(tc->astate, KIDX_TO_TI(ch, tc->no.kidx),
1230 		    algostate, sizeof(algostate));
1231 		pstate = algostate;
1232 	} else
1233 		pstate = NULL;
1234 	tflags = tc->tflags;
1235 	tc->no.refcnt++;
1236 	add_toperation_state(ch, &ts);
1237 	IPFW_UH_WUNLOCK(ch);
1238 
1239 	/*
1240 	 * Stage 2: allocate new table instance using same algo.
1241 	 */
1242 	memset(&ti_new, 0, sizeof(struct table_info));
1243 	error = ta->init(ch, &astate_new, &ti_new, pstate, tflags);
1244 
1245 	/*
1246 	 * Stage 3: swap old state pointers with newly-allocated ones.
1247 	 * Decrease refcount.
1248 	 */
1249 	IPFW_UH_WLOCK(ch);
1250 	tc->no.refcnt--;
1251 	del_toperation_state(ch, &ts);
1252 
1253 	if (error != 0) {
1254 		IPFW_UH_WUNLOCK(ch);
1255 		return (error);
1256 	}
1257 
1258 	/*
1259 	 * Restart operation if table swap has happened:
1260 	 * even if algo may be the same, algo init parameters
1261 	 * may change. Restart operation instead of doing
1262 	 * complex checks.
1263 	 */
1264 	if (ts.modified != 0) {
1265 		ta->destroy(astate_new, &ti_new);
1266 		goto restart;
1267 	}
1268 
1269 	ni = CHAIN_TO_NI(ch);
1270 	kidx = tc->no.kidx;
1271 	tablestate = (struct table_info *)ch->tablestate;
1272 
1273 	IPFW_WLOCK(ch);
1274 	ti_old = tablestate[kidx];
1275 	tablestate[kidx] = ti_new;
1276 	IPFW_WUNLOCK(ch);
1277 
1278 	astate_old = tc->astate;
1279 	tc->astate = astate_new;
1280 	tc->ti_copy = ti_new;
1281 	tc->count = 0;
1282 
1283 	/* Notify algo on real @ti address */
1284 	if (ta->change_ti != NULL)
1285 		ta->change_ti(tc->astate, &tablestate[kidx]);
1286 
1287 	/*
1288 	 * Stage 4: unref values.
1289 	 */
1290 	ipfw_unref_table_values(ch, tc, ta, astate_old, &ti_old);
1291 	IPFW_UH_WUNLOCK(ch);
1292 
1293 	/*
1294 	 * Stage 5: perform real flush/destroy.
1295 	 */
1296 	ta->destroy(astate_old, &ti_old);
1297 
1298 	return (0);
1299 }
1300 
1301 /*
1302  * Swaps two tables.
1303  * Data layout (v0)(current):
1304  * Request: [ ipfw_obj_header ipfw_obj_ntlv ]
1305  *
1306  * Returns 0 on success
1307  */
1308 static int
1309 swap_table(struct ip_fw_chain *ch, ip_fw3_opheader *op3,
1310     struct sockopt_data *sd)
1311 {
1312 	int error;
1313 	struct _ipfw_obj_header *oh;
1314 	struct tid_info ti_a, ti_b;
1315 
1316 	if (sd->valsize != sizeof(*oh) + sizeof(ipfw_obj_ntlv))
1317 		return (EINVAL);
1318 
1319 	oh = (struct _ipfw_obj_header *)op3;
1320 	ntlv_to_ti(&oh->ntlv, &ti_a);
1321 	ntlv_to_ti((ipfw_obj_ntlv *)(oh + 1), &ti_b);
1322 
1323 	error = swap_tables(ch, &ti_a, &ti_b);
1324 
1325 	return (error);
1326 }
1327 
1328 /*
1329  * Swaps two tables of the same type/valtype.
1330  *
1331  * Checks if tables are compatible and limits
1332  * permits swap, than actually perform swap.
1333  *
1334  * Each table consists of 2 different parts:
1335  * config:
1336  *   @tc (with name, set, kidx) and rule bindings, which is "stable".
1337  *   number of items
1338  *   table algo
1339  * runtime:
1340  *   runtime data @ti (ch->tablestate)
1341  *   runtime cache in @tc
1342  *   algo-specific data (@tc->astate)
1343  *
1344  * So we switch:
1345  *  all runtime data
1346  *   number of items
1347  *   table algo
1348  *
1349  * After that we call @ti change handler for each table.
1350  *
1351  * Note that referencing @tc won't protect tc->ta from change.
1352  * XXX: Do we need to restrict swap between locked tables?
1353  * XXX: Do we need to exchange ftype?
1354  *
1355  * Returns 0 on success.
1356  */
1357 static int
1358 swap_tables(struct ip_fw_chain *ch, struct tid_info *a,
1359     struct tid_info *b)
1360 {
1361 	struct namedobj_instance *ni;
1362 	struct table_config *tc_a, *tc_b;
1363 	struct table_algo *ta;
1364 	struct table_info ti, *tablestate;
1365 	void *astate;
1366 	uint32_t count;
1367 
1368 	/*
1369 	 * Stage 1: find both tables and ensure they are of
1370 	 * the same type.
1371 	 */
1372 	IPFW_UH_WLOCK(ch);
1373 	ni = CHAIN_TO_NI(ch);
1374 	if ((tc_a = find_table(ni, a)) == NULL) {
1375 		IPFW_UH_WUNLOCK(ch);
1376 		return (ESRCH);
1377 	}
1378 	if ((tc_b = find_table(ni, b)) == NULL) {
1379 		IPFW_UH_WUNLOCK(ch);
1380 		return (ESRCH);
1381 	}
1382 
1383 	/* It is very easy to swap between the same table */
1384 	if (tc_a == tc_b) {
1385 		IPFW_UH_WUNLOCK(ch);
1386 		return (0);
1387 	}
1388 
1389 	/* Check type and value are the same */
1390 	if (tc_a->no.type != tc_b->no.type || tc_a->tflags != tc_b->tflags) {
1391 		IPFW_UH_WUNLOCK(ch);
1392 		return (EINVAL);
1393 	}
1394 
1395 	/* Check limits before swap */
1396 	if ((tc_a->limit != 0 && tc_b->count > tc_a->limit) ||
1397 	    (tc_b->limit != 0 && tc_a->count > tc_b->limit)) {
1398 		IPFW_UH_WUNLOCK(ch);
1399 		return (EFBIG);
1400 	}
1401 
1402 	/* Check if one of the tables is readonly */
1403 	if (((tc_a->ta->flags | tc_b->ta->flags) & TA_FLAG_READONLY) != 0) {
1404 		IPFW_UH_WUNLOCK(ch);
1405 		return (EACCES);
1406 	}
1407 
1408 	/* Notify we're going to swap */
1409 	rollback_toperation_state(ch, tc_a);
1410 	rollback_toperation_state(ch, tc_b);
1411 
1412 	/* Everything is fine, prepare to swap */
1413 	tablestate = (struct table_info *)ch->tablestate;
1414 	ti = tablestate[tc_a->no.kidx];
1415 	ta = tc_a->ta;
1416 	astate = tc_a->astate;
1417 	count = tc_a->count;
1418 
1419 	IPFW_WLOCK(ch);
1420 	/* a <- b */
1421 	tablestate[tc_a->no.kidx] = tablestate[tc_b->no.kidx];
1422 	tc_a->ta = tc_b->ta;
1423 	tc_a->astate = tc_b->astate;
1424 	tc_a->count = tc_b->count;
1425 	/* b <- a */
1426 	tablestate[tc_b->no.kidx] = ti;
1427 	tc_b->ta = ta;
1428 	tc_b->astate = astate;
1429 	tc_b->count = count;
1430 	IPFW_WUNLOCK(ch);
1431 
1432 	/* Ensure tc.ti copies are in sync */
1433 	tc_a->ti_copy = tablestate[tc_a->no.kidx];
1434 	tc_b->ti_copy = tablestate[tc_b->no.kidx];
1435 
1436 	/* Notify both tables on @ti change */
1437 	if (tc_a->ta->change_ti != NULL)
1438 		tc_a->ta->change_ti(tc_a->astate, &tablestate[tc_a->no.kidx]);
1439 	if (tc_b->ta->change_ti != NULL)
1440 		tc_b->ta->change_ti(tc_b->astate, &tablestate[tc_b->no.kidx]);
1441 
1442 	IPFW_UH_WUNLOCK(ch);
1443 
1444 	return (0);
1445 }
1446 
1447 /*
1448  * Destroys table specified by @ti.
1449  * Data layout (v0)(current):
1450  * Request: [ ip_fw3_opheader ]
1451  *
1452  * Returns 0 on success
1453  */
1454 static int
1455 destroy_table(struct ip_fw_chain *ch, struct tid_info *ti)
1456 {
1457 	struct namedobj_instance *ni;
1458 	struct table_config *tc;
1459 
1460 	IPFW_UH_WLOCK(ch);
1461 
1462 	ni = CHAIN_TO_NI(ch);
1463 	if ((tc = find_table(ni, ti)) == NULL) {
1464 		IPFW_UH_WUNLOCK(ch);
1465 		return (ESRCH);
1466 	}
1467 
1468 	/* Do not permit destroying referenced tables */
1469 	if (tc->no.refcnt > 0) {
1470 		IPFW_UH_WUNLOCK(ch);
1471 		return (EBUSY);
1472 	}
1473 
1474 	IPFW_WLOCK(ch);
1475 	unlink_table(ch, tc);
1476 	IPFW_WUNLOCK(ch);
1477 
1478 	/* Free obj index */
1479 	if (ipfw_objhash_free_idx(ni, tc->no.kidx) != 0)
1480 		printf("Error unlinking kidx %d from table %s\n",
1481 		    tc->no.kidx, tc->tablename);
1482 
1483 	/* Unref values used in tables while holding UH lock */
1484 	ipfw_unref_table_values(ch, tc, tc->ta, tc->astate, &tc->ti_copy);
1485 	IPFW_UH_WUNLOCK(ch);
1486 
1487 	free_table_config(ni, tc);
1488 
1489 	return (0);
1490 }
1491 
1492 static uint32_t
1493 roundup2p(uint32_t v)
1494 {
1495 
1496 	v--;
1497 	v |= v >> 1;
1498 	v |= v >> 2;
1499 	v |= v >> 4;
1500 	v |= v >> 8;
1501 	v |= v >> 16;
1502 	v++;
1503 
1504 	return (v);
1505 }
1506 
1507 /*
1508  * Grow tables index.
1509  *
1510  * Returns 0 on success.
1511  */
1512 int
1513 ipfw_resize_tables(struct ip_fw_chain *ch, unsigned int ntables)
1514 {
1515 	unsigned int ntables_old, tbl;
1516 	struct namedobj_instance *ni;
1517 	void *new_idx, *old_tablestate, *tablestate;
1518 	struct table_info *ti;
1519 	struct table_config *tc;
1520 	int i, new_blocks;
1521 
1522 	/* Check new value for validity */
1523 	if (ntables == 0)
1524 		return (EINVAL);
1525 	if (ntables > IPFW_TABLES_MAX)
1526 		ntables = IPFW_TABLES_MAX;
1527 	/* Alight to nearest power of 2 */
1528 	ntables = (unsigned int)roundup2p(ntables);
1529 
1530 	/* Allocate new pointers */
1531 	tablestate = malloc(ntables * sizeof(struct table_info),
1532 	    M_IPFW, M_WAITOK | M_ZERO);
1533 
1534 	ipfw_objhash_bitmap_alloc(ntables, (void *)&new_idx, &new_blocks);
1535 
1536 	IPFW_UH_WLOCK(ch);
1537 
1538 	tbl = (ntables >= V_fw_tables_max) ? V_fw_tables_max : ntables;
1539 	ni = CHAIN_TO_NI(ch);
1540 
1541 	/* Temporary restrict decreasing max_tables */
1542 	if (ntables < V_fw_tables_max) {
1543 
1544 		/*
1545 		 * FIXME: Check if we really can shrink
1546 		 */
1547 		IPFW_UH_WUNLOCK(ch);
1548 		return (EINVAL);
1549 	}
1550 
1551 	/* Copy table info/indices */
1552 	memcpy(tablestate, ch->tablestate, sizeof(struct table_info) * tbl);
1553 	ipfw_objhash_bitmap_merge(ni, &new_idx, &new_blocks);
1554 
1555 	IPFW_WLOCK(ch);
1556 
1557 	/* Change pointers */
1558 	old_tablestate = ch->tablestate;
1559 	ch->tablestate = tablestate;
1560 	ipfw_objhash_bitmap_swap(ni, &new_idx, &new_blocks);
1561 
1562 	ntables_old = V_fw_tables_max;
1563 	V_fw_tables_max = ntables;
1564 
1565 	IPFW_WUNLOCK(ch);
1566 
1567 	/* Notify all consumers that their @ti pointer has changed */
1568 	ti = (struct table_info *)ch->tablestate;
1569 	for (i = 0; i < tbl; i++, ti++) {
1570 		if (ti->lookup == NULL)
1571 			continue;
1572 		tc = (struct table_config *)ipfw_objhash_lookup_kidx(ni, i);
1573 		if (tc == NULL || tc->ta->change_ti == NULL)
1574 			continue;
1575 
1576 		tc->ta->change_ti(tc->astate, ti);
1577 	}
1578 
1579 	IPFW_UH_WUNLOCK(ch);
1580 
1581 	/* Free old pointers */
1582 	free(old_tablestate, M_IPFW);
1583 	ipfw_objhash_bitmap_free(new_idx, new_blocks);
1584 
1585 	return (0);
1586 }
1587 
1588 /*
1589  * Switch between "set 0" and "rule's set" table binding,
1590  * Check all ruleset bindings and permits changing
1591  * IFF each binding has both rule AND table in default set (set 0).
1592  *
1593  * Returns 0 on success.
1594  */
1595 int
1596 ipfw_switch_tables_namespace(struct ip_fw_chain *ch, unsigned int sets)
1597 {
1598 	struct namedobj_instance *ni;
1599 	struct named_object *no;
1600 	struct ip_fw *rule;
1601 	ipfw_insn *cmd;
1602 	int cmdlen, i, l;
1603 	uint16_t kidx;
1604 	uint8_t type;
1605 
1606 	IPFW_UH_WLOCK(ch);
1607 
1608 	if (V_fw_tables_sets == sets) {
1609 		IPFW_UH_WUNLOCK(ch);
1610 		return (0);
1611 	}
1612 
1613 	ni = CHAIN_TO_NI(ch);
1614 
1615 	/*
1616 	 * Scan all rules and examine tables opcodes.
1617 	 */
1618 	for (i = 0; i < ch->n_rules; i++) {
1619 		rule = ch->map[i];
1620 
1621 		l = rule->cmd_len;
1622 		cmd = rule->cmd;
1623 		cmdlen = 0;
1624 		for ( ;	l > 0 ; l -= cmdlen, cmd += cmdlen) {
1625 			cmdlen = F_LEN(cmd);
1626 
1627 			if (classify_table_opcode(cmd, &kidx, &type) != 0)
1628 				continue;
1629 
1630 			no = ipfw_objhash_lookup_kidx(ni, kidx);
1631 
1632 			/* Check if both table object and rule has the set 0 */
1633 			if (no->set != 0 || rule->set != 0) {
1634 				IPFW_UH_WUNLOCK(ch);
1635 				return (EBUSY);
1636 			}
1637 
1638 		}
1639 	}
1640 	V_fw_tables_sets = sets;
1641 
1642 	IPFW_UH_WUNLOCK(ch);
1643 
1644 	return (0);
1645 }
1646 
1647 /*
1648  * Lookup an IP @addr in table @tbl.
1649  * Stores found value in @val.
1650  *
1651  * Returns 1 if @addr was found.
1652  */
1653 int
1654 ipfw_lookup_table(struct ip_fw_chain *ch, uint16_t tbl, in_addr_t addr,
1655     uint32_t *val)
1656 {
1657 	struct table_info *ti;
1658 
1659 	ti = KIDX_TO_TI(ch, tbl);
1660 
1661 	return (ti->lookup(ti, &addr, sizeof(in_addr_t), val));
1662 }
1663 
1664 /*
1665  * Lookup an arbtrary key @paddr of legth @plen in table @tbl.
1666  * Stores found value in @val.
1667  *
1668  * Returns 1 if key was found.
1669  */
1670 int
1671 ipfw_lookup_table_extended(struct ip_fw_chain *ch, uint16_t tbl, uint16_t plen,
1672     void *paddr, uint32_t *val)
1673 {
1674 	struct table_info *ti;
1675 
1676 	ti = KIDX_TO_TI(ch, tbl);
1677 
1678 	return (ti->lookup(ti, paddr, plen, val));
1679 }
1680 
1681 /*
1682  * Info/List/dump support for tables.
1683  *
1684  */
1685 
1686 /*
1687  * High-level 'get' cmds sysctl handlers
1688  */
1689 
1690 /*
1691  * Lists all tables currently available in kernel.
1692  * Data layout (v0)(current):
1693  * Request: [ ipfw_obj_lheader ], size = ipfw_obj_lheader.size
1694  * Reply: [ ipfw_obj_lheader ipfw_xtable_info x N ]
1695  *
1696  * Returns 0 on success
1697  */
1698 static int
1699 list_tables(struct ip_fw_chain *ch, ip_fw3_opheader *op3,
1700     struct sockopt_data *sd)
1701 {
1702 	struct _ipfw_obj_lheader *olh;
1703 	int error;
1704 
1705 	olh = (struct _ipfw_obj_lheader *)ipfw_get_sopt_header(sd,sizeof(*olh));
1706 	if (olh == NULL)
1707 		return (EINVAL);
1708 	if (sd->valsize < olh->size)
1709 		return (EINVAL);
1710 
1711 	IPFW_UH_RLOCK(ch);
1712 	error = export_tables(ch, olh, sd);
1713 	IPFW_UH_RUNLOCK(ch);
1714 
1715 	return (error);
1716 }
1717 
1718 /*
1719  * Store table info to buffer provided by @sd.
1720  * Data layout (v0)(current):
1721  * Request: [ ipfw_obj_header ipfw_xtable_info(empty)]
1722  * Reply: [ ipfw_obj_header ipfw_xtable_info ]
1723  *
1724  * Returns 0 on success.
1725  */
1726 static int
1727 describe_table(struct ip_fw_chain *ch, ip_fw3_opheader *op3,
1728     struct sockopt_data *sd)
1729 {
1730 	struct _ipfw_obj_header *oh;
1731 	struct table_config *tc;
1732 	struct tid_info ti;
1733 	size_t sz;
1734 
1735 	sz = sizeof(*oh) + sizeof(ipfw_xtable_info);
1736 	oh = (struct _ipfw_obj_header *)ipfw_get_sopt_header(sd, sz);
1737 	if (oh == NULL)
1738 		return (EINVAL);
1739 
1740 	objheader_to_ti(oh, &ti);
1741 
1742 	IPFW_UH_RLOCK(ch);
1743 	if ((tc = find_table(CHAIN_TO_NI(ch), &ti)) == NULL) {
1744 		IPFW_UH_RUNLOCK(ch);
1745 		return (ESRCH);
1746 	}
1747 
1748 	export_table_info(ch, tc, (ipfw_xtable_info *)(oh + 1));
1749 	IPFW_UH_RUNLOCK(ch);
1750 
1751 	return (0);
1752 }
1753 
1754 /*
1755  * Modifies existing table.
1756  * Data layout (v0)(current):
1757  * Request: [ ipfw_obj_header ipfw_xtable_info ]
1758  *
1759  * Returns 0 on success
1760  */
1761 static int
1762 modify_table(struct ip_fw_chain *ch, ip_fw3_opheader *op3,
1763     struct sockopt_data *sd)
1764 {
1765 	struct _ipfw_obj_header *oh;
1766 	ipfw_xtable_info *i;
1767 	char *tname;
1768 	struct tid_info ti;
1769 	struct namedobj_instance *ni;
1770 	struct table_config *tc;
1771 
1772 	if (sd->valsize != sizeof(*oh) + sizeof(ipfw_xtable_info))
1773 		return (EINVAL);
1774 
1775 	oh = (struct _ipfw_obj_header *)sd->kbuf;
1776 	i = (ipfw_xtable_info *)(oh + 1);
1777 
1778 	/*
1779 	 * Verify user-supplied strings.
1780 	 * Check for null-terminated/zero-length strings/
1781 	 */
1782 	tname = oh->ntlv.name;
1783 	if (ipfw_check_table_name(tname) != 0)
1784 		return (EINVAL);
1785 
1786 	objheader_to_ti(oh, &ti);
1787 	ti.type = i->type;
1788 
1789 	IPFW_UH_WLOCK(ch);
1790 	ni = CHAIN_TO_NI(ch);
1791 	if ((tc = find_table(ni, &ti)) == NULL) {
1792 		IPFW_UH_WUNLOCK(ch);
1793 		return (ESRCH);
1794 	}
1795 
1796 	/* Do not support any modifications for readonly tables */
1797 	if ((tc->ta->flags & TA_FLAG_READONLY) != 0) {
1798 		IPFW_UH_WUNLOCK(ch);
1799 		return (EACCES);
1800 	}
1801 
1802 	if ((i->mflags & IPFW_TMFLAGS_LIMIT) != 0)
1803 		tc->limit = i->limit;
1804 	if ((i->mflags & IPFW_TMFLAGS_LOCK) != 0)
1805 		tc->locked = ((i->flags & IPFW_TGFLAGS_LOCKED) != 0);
1806 	IPFW_UH_WUNLOCK(ch);
1807 
1808 	return (0);
1809 }
1810 
1811 /*
1812  * Creates new table.
1813  * Data layout (v0)(current):
1814  * Request: [ ipfw_obj_header ipfw_xtable_info ]
1815  *
1816  * Returns 0 on success
1817  */
1818 static int
1819 create_table(struct ip_fw_chain *ch, ip_fw3_opheader *op3,
1820     struct sockopt_data *sd)
1821 {
1822 	struct _ipfw_obj_header *oh;
1823 	ipfw_xtable_info *i;
1824 	char *tname, *aname;
1825 	struct tid_info ti;
1826 	struct namedobj_instance *ni;
1827 	struct table_config *tc;
1828 
1829 	if (sd->valsize != sizeof(*oh) + sizeof(ipfw_xtable_info))
1830 		return (EINVAL);
1831 
1832 	oh = (struct _ipfw_obj_header *)sd->kbuf;
1833 	i = (ipfw_xtable_info *)(oh + 1);
1834 
1835 	/*
1836 	 * Verify user-supplied strings.
1837 	 * Check for null-terminated/zero-length strings/
1838 	 */
1839 	tname = oh->ntlv.name;
1840 	aname = i->algoname;
1841 	if (ipfw_check_table_name(tname) != 0 ||
1842 	    strnlen(aname, sizeof(i->algoname)) == sizeof(i->algoname))
1843 		return (EINVAL);
1844 
1845 	if (aname[0] == '\0') {
1846 		/* Use default algorithm */
1847 		aname = NULL;
1848 	}
1849 
1850 	objheader_to_ti(oh, &ti);
1851 	ti.type = i->type;
1852 
1853 	ni = CHAIN_TO_NI(ch);
1854 
1855 	IPFW_UH_RLOCK(ch);
1856 	if ((tc = find_table(ni, &ti)) != NULL) {
1857 		IPFW_UH_RUNLOCK(ch);
1858 		return (EEXIST);
1859 	}
1860 	IPFW_UH_RUNLOCK(ch);
1861 
1862 	return (create_table_internal(ch, &ti, aname, i, NULL, 0));
1863 }
1864 
1865 /*
1866  * Creates new table based on @ti and @aname.
1867  *
1868  * Relies on table name checking inside find_name_tlv()
1869  * Assume @aname to be checked and valid.
1870  * Stores allocated table kidx inside @pkidx (if non-NULL).
1871  * Reference created table if @compat is non-zero.
1872  *
1873  * Returns 0 on success.
1874  */
1875 static int
1876 create_table_internal(struct ip_fw_chain *ch, struct tid_info *ti,
1877     char *aname, ipfw_xtable_info *i, uint16_t *pkidx, int compat)
1878 {
1879 	struct namedobj_instance *ni;
1880 	struct table_config *tc, *tc_new, *tmp;
1881 	struct table_algo *ta;
1882 	uint16_t kidx;
1883 
1884 	ni = CHAIN_TO_NI(ch);
1885 
1886 	ta = find_table_algo(CHAIN_TO_TCFG(ch), ti, aname);
1887 	if (ta == NULL)
1888 		return (ENOTSUP);
1889 
1890 	tc = alloc_table_config(ch, ti, ta, aname, i->tflags);
1891 	if (tc == NULL)
1892 		return (ENOMEM);
1893 
1894 	tc->vmask = i->vmask;
1895 	tc->limit = i->limit;
1896 	if (ta->flags & TA_FLAG_READONLY)
1897 		tc->locked = 1;
1898 	else
1899 		tc->locked = (i->flags & IPFW_TGFLAGS_LOCKED) != 0;
1900 
1901 	IPFW_UH_WLOCK(ch);
1902 
1903 	/* Check if table has been already created */
1904 	tc_new = find_table(ni, ti);
1905 	if (tc_new != NULL) {
1906 
1907 		/*
1908 		 * Compat: do not fail if we're
1909 		 * requesting to create existing table
1910 		 * which has the same type
1911 		 */
1912 		if (compat == 0 || tc_new->no.type != tc->no.type) {
1913 			IPFW_UH_WUNLOCK(ch);
1914 			free_table_config(ni, tc);
1915 			return (EEXIST);
1916 		}
1917 
1918 		/* Exchange tc and tc_new for proper refcounting & freeing */
1919 		tmp = tc;
1920 		tc = tc_new;
1921 		tc_new = tmp;
1922 	} else {
1923 		/* New table */
1924 		if (ipfw_objhash_alloc_idx(ni, &kidx) != 0) {
1925 			IPFW_UH_WUNLOCK(ch);
1926 			printf("Unable to allocate table index."
1927 			    " Consider increasing net.inet.ip.fw.tables_max");
1928 			free_table_config(ni, tc);
1929 			return (EBUSY);
1930 		}
1931 		tc->no.kidx = kidx;
1932 
1933 		IPFW_WLOCK(ch);
1934 		link_table(ch, tc);
1935 		IPFW_WUNLOCK(ch);
1936 	}
1937 
1938 	if (compat != 0)
1939 		tc->no.refcnt++;
1940 	if (pkidx != NULL)
1941 		*pkidx = tc->no.kidx;
1942 
1943 	IPFW_UH_WUNLOCK(ch);
1944 
1945 	if (tc_new != NULL)
1946 		free_table_config(ni, tc_new);
1947 
1948 	return (0);
1949 }
1950 
1951 static void
1952 ntlv_to_ti(ipfw_obj_ntlv *ntlv, struct tid_info *ti)
1953 {
1954 
1955 	memset(ti, 0, sizeof(struct tid_info));
1956 	ti->set = ntlv->set;
1957 	ti->uidx = ntlv->idx;
1958 	ti->tlvs = ntlv;
1959 	ti->tlen = ntlv->head.length;
1960 }
1961 
1962 static void
1963 objheader_to_ti(struct _ipfw_obj_header *oh, struct tid_info *ti)
1964 {
1965 
1966 	ntlv_to_ti(&oh->ntlv, ti);
1967 }
1968 
1969 /*
1970  * Exports basic table info as name TLV.
1971  * Used inside dump_static_rules() to provide info
1972  * about all tables referenced by current ruleset.
1973  *
1974  * Returns 0 on success.
1975  */
1976 int
1977 ipfw_export_table_ntlv(struct ip_fw_chain *ch, uint16_t kidx,
1978     struct sockopt_data *sd)
1979 {
1980 	struct namedobj_instance *ni;
1981 	struct named_object *no;
1982 	ipfw_obj_ntlv *ntlv;
1983 
1984 	ni = CHAIN_TO_NI(ch);
1985 
1986 	no = ipfw_objhash_lookup_kidx(ni, kidx);
1987 	KASSERT(no != NULL, ("invalid table kidx passed"));
1988 
1989 	ntlv = (ipfw_obj_ntlv *)ipfw_get_sopt_space(sd, sizeof(*ntlv));
1990 	if (ntlv == NULL)
1991 		return (ENOMEM);
1992 
1993 	ntlv->head.type = IPFW_TLV_TBL_NAME;
1994 	ntlv->head.length = sizeof(*ntlv);
1995 	ntlv->idx = no->kidx;
1996 	strlcpy(ntlv->name, no->name, sizeof(ntlv->name));
1997 
1998 	return (0);
1999 }
2000 
2001 /*
2002  * Marks every table kidx used in @rule with bit in @bmask.
2003  * Used to generate bitmask of referenced tables for given ruleset.
2004  *
2005  * Returns number of newly-referenced tables.
2006  */
2007 int
2008 ipfw_mark_table_kidx(struct ip_fw_chain *chain, struct ip_fw *rule,
2009     uint32_t *bmask)
2010 {
2011 	int cmdlen, l, count;
2012 	ipfw_insn *cmd;
2013 	uint16_t kidx;
2014 	uint8_t type;
2015 
2016 	l = rule->cmd_len;
2017 	cmd = rule->cmd;
2018 	cmdlen = 0;
2019 	count = 0;
2020 	for ( ;	l > 0 ; l -= cmdlen, cmd += cmdlen) {
2021 		cmdlen = F_LEN(cmd);
2022 
2023 		if (classify_table_opcode(cmd, &kidx, &type) != 0)
2024 			continue;
2025 
2026 		if ((bmask[kidx / 32] & (1 << (kidx % 32))) == 0)
2027 			count++;
2028 
2029 		bmask[kidx / 32] |= 1 << (kidx % 32);
2030 	}
2031 
2032 	return (count);
2033 }
2034 
2035 struct dump_args {
2036 	struct ip_fw_chain *ch;
2037 	struct table_info *ti;
2038 	struct table_config *tc;
2039 	struct sockopt_data *sd;
2040 	uint32_t cnt;
2041 	uint16_t uidx;
2042 	int error;
2043 	uint32_t size;
2044 	ipfw_table_entry *ent;
2045 	ta_foreach_f *f;
2046 	void *farg;
2047 	ipfw_obj_tentry tent;
2048 };
2049 
2050 static int
2051 count_ext_entries(void *e, void *arg)
2052 {
2053 	struct dump_args *da;
2054 
2055 	da = (struct dump_args *)arg;
2056 	da->cnt++;
2057 
2058 	return (0);
2059 }
2060 
2061 /*
2062  * Gets number of items from table either using
2063  * internal counter or calling algo callback for
2064  * externally-managed tables.
2065  *
2066  * Returns number of records.
2067  */
2068 static uint32_t
2069 table_get_count(struct ip_fw_chain *ch, struct table_config *tc)
2070 {
2071 	struct table_info *ti;
2072 	struct table_algo *ta;
2073 	struct dump_args da;
2074 
2075 	ti = KIDX_TO_TI(ch, tc->no.kidx);
2076 	ta = tc->ta;
2077 
2078 	/* Use internal counter for self-managed tables */
2079 	if ((ta->flags & TA_FLAG_READONLY) == 0)
2080 		return (tc->count);
2081 
2082 	/* Use callback to quickly get number of items */
2083 	if ((ta->flags & TA_FLAG_EXTCOUNTER) != 0)
2084 		return (ta->get_count(tc->astate, ti));
2085 
2086 	/* Count number of iterms ourselves */
2087 	memset(&da, 0, sizeof(da));
2088 	ta->foreach(tc->astate, ti, count_ext_entries, &da);
2089 
2090 	return (da.cnt);
2091 }
2092 
2093 /*
2094  * Exports table @tc info into standard ipfw_xtable_info format.
2095  */
2096 static void
2097 export_table_info(struct ip_fw_chain *ch, struct table_config *tc,
2098     ipfw_xtable_info *i)
2099 {
2100 	struct table_info *ti;
2101 	struct table_algo *ta;
2102 
2103 	i->type = tc->no.type;
2104 	i->tflags = tc->tflags;
2105 	i->vmask = tc->vmask;
2106 	i->set = tc->no.set;
2107 	i->kidx = tc->no.kidx;
2108 	i->refcnt = tc->no.refcnt;
2109 	i->count = table_get_count(ch, tc);
2110 	i->limit = tc->limit;
2111 	i->flags |= (tc->locked != 0) ? IPFW_TGFLAGS_LOCKED : 0;
2112 	i->size = tc->count * sizeof(ipfw_obj_tentry);
2113 	i->size += sizeof(ipfw_obj_header) + sizeof(ipfw_xtable_info);
2114 	strlcpy(i->tablename, tc->tablename, sizeof(i->tablename));
2115 	ti = KIDX_TO_TI(ch, tc->no.kidx);
2116 	ta = tc->ta;
2117 	if (ta->print_config != NULL) {
2118 		/* Use algo function to print table config to string */
2119 		ta->print_config(tc->astate, ti, i->algoname,
2120 		    sizeof(i->algoname));
2121 	} else
2122 		strlcpy(i->algoname, ta->name, sizeof(i->algoname));
2123 	/* Dump algo-specific data, if possible */
2124 	if (ta->dump_tinfo != NULL) {
2125 		ta->dump_tinfo(tc->astate, ti, &i->ta_info);
2126 		i->ta_info.flags |= IPFW_TATFLAGS_DATA;
2127 	}
2128 }
2129 
2130 struct dump_table_args {
2131 	struct ip_fw_chain *ch;
2132 	struct sockopt_data *sd;
2133 };
2134 
2135 static void
2136 export_table_internal(struct namedobj_instance *ni, struct named_object *no,
2137     void *arg)
2138 {
2139 	ipfw_xtable_info *i;
2140 	struct dump_table_args *dta;
2141 
2142 	dta = (struct dump_table_args *)arg;
2143 
2144 	i = (ipfw_xtable_info *)ipfw_get_sopt_space(dta->sd, sizeof(*i));
2145 	KASSERT(i != 0, ("previously checked buffer is not enough"));
2146 
2147 	export_table_info(dta->ch, (struct table_config *)no, i);
2148 }
2149 
2150 /*
2151  * Export all tables as ipfw_xtable_info structures to
2152  * storage provided by @sd.
2153  *
2154  * If supplied buffer is too small, fills in required size
2155  * and returns ENOMEM.
2156  * Returns 0 on success.
2157  */
2158 static int
2159 export_tables(struct ip_fw_chain *ch, ipfw_obj_lheader *olh,
2160     struct sockopt_data *sd)
2161 {
2162 	uint32_t size;
2163 	uint32_t count;
2164 	struct dump_table_args dta;
2165 
2166 	count = ipfw_objhash_count(CHAIN_TO_NI(ch));
2167 	size = count * sizeof(ipfw_xtable_info) + sizeof(ipfw_obj_lheader);
2168 
2169 	/* Fill in header regadless of buffer size */
2170 	olh->count = count;
2171 	olh->objsize = sizeof(ipfw_xtable_info);
2172 
2173 	if (size > olh->size) {
2174 		olh->size = size;
2175 		return (ENOMEM);
2176 	}
2177 
2178 	olh->size = size;
2179 
2180 	dta.ch = ch;
2181 	dta.sd = sd;
2182 
2183 	ipfw_objhash_foreach(CHAIN_TO_NI(ch), export_table_internal, &dta);
2184 
2185 	return (0);
2186 }
2187 
2188 /*
2189  * Dumps all table data
2190  * Data layout (v1)(current):
2191  * Request: [ ipfw_obj_header ], size = ipfw_xtable_info.size
2192  * Reply: [ ipfw_obj_header ipfw_xtable_info ipfw_obj_tentry x N ]
2193  *
2194  * Returns 0 on success
2195  */
2196 static int
2197 dump_table_v1(struct ip_fw_chain *ch, ip_fw3_opheader *op3,
2198     struct sockopt_data *sd)
2199 {
2200 	struct _ipfw_obj_header *oh;
2201 	ipfw_xtable_info *i;
2202 	struct tid_info ti;
2203 	struct table_config *tc;
2204 	struct table_algo *ta;
2205 	struct dump_args da;
2206 	uint32_t sz;
2207 
2208 	sz = sizeof(ipfw_obj_header) + sizeof(ipfw_xtable_info);
2209 	oh = (struct _ipfw_obj_header *)ipfw_get_sopt_header(sd, sz);
2210 	if (oh == NULL)
2211 		return (EINVAL);
2212 
2213 	i = (ipfw_xtable_info *)(oh + 1);
2214 	objheader_to_ti(oh, &ti);
2215 
2216 	IPFW_UH_RLOCK(ch);
2217 	if ((tc = find_table(CHAIN_TO_NI(ch), &ti)) == NULL) {
2218 		IPFW_UH_RUNLOCK(ch);
2219 		return (ESRCH);
2220 	}
2221 	export_table_info(ch, tc, i);
2222 
2223 	if (sd->valsize < i->size) {
2224 
2225 		/*
2226 		 * Submitted buffer size is not enough.
2227 		 * WE've already filled in @i structure with
2228 		 * relevant table info including size, so we
2229 		 * can return. Buffer will be flushed automatically.
2230 		 */
2231 		IPFW_UH_RUNLOCK(ch);
2232 		return (ENOMEM);
2233 	}
2234 
2235 	/*
2236 	 * Do the actual dump in eXtended format
2237 	 */
2238 	memset(&da, 0, sizeof(da));
2239 	da.ch = ch;
2240 	da.ti = KIDX_TO_TI(ch, tc->no.kidx);
2241 	da.tc = tc;
2242 	da.sd = sd;
2243 
2244 	ta = tc->ta;
2245 
2246 	ta->foreach(tc->astate, da.ti, dump_table_tentry, &da);
2247 	IPFW_UH_RUNLOCK(ch);
2248 
2249 	return (da.error);
2250 }
2251 
2252 /*
2253  * Dumps all table data
2254  * Data layout (version 0)(legacy):
2255  * Request: [ ipfw_xtable ], size = IP_FW_TABLE_XGETSIZE()
2256  * Reply: [ ipfw_xtable ipfw_table_xentry x N ]
2257  *
2258  * Returns 0 on success
2259  */
2260 static int
2261 dump_table_v0(struct ip_fw_chain *ch, ip_fw3_opheader *op3,
2262     struct sockopt_data *sd)
2263 {
2264 	ipfw_xtable *xtbl;
2265 	struct tid_info ti;
2266 	struct table_config *tc;
2267 	struct table_algo *ta;
2268 	struct dump_args da;
2269 	size_t sz, count;
2270 
2271 	xtbl = (ipfw_xtable *)ipfw_get_sopt_header(sd, sizeof(ipfw_xtable));
2272 	if (xtbl == NULL)
2273 		return (EINVAL);
2274 
2275 	memset(&ti, 0, sizeof(ti));
2276 	ti.uidx = xtbl->tbl;
2277 
2278 	IPFW_UH_RLOCK(ch);
2279 	if ((tc = find_table(CHAIN_TO_NI(ch), &ti)) == NULL) {
2280 		IPFW_UH_RUNLOCK(ch);
2281 		return (0);
2282 	}
2283 	count = table_get_count(ch, tc);
2284 	sz = count * sizeof(ipfw_table_xentry) + sizeof(ipfw_xtable);
2285 
2286 	xtbl->cnt = count;
2287 	xtbl->size = sz;
2288 	xtbl->type = tc->no.type;
2289 	xtbl->tbl = ti.uidx;
2290 
2291 	if (sd->valsize < sz) {
2292 
2293 		/*
2294 		 * Submitted buffer size is not enough.
2295 		 * WE've already filled in @i structure with
2296 		 * relevant table info including size, so we
2297 		 * can return. Buffer will be flushed automatically.
2298 		 */
2299 		IPFW_UH_RUNLOCK(ch);
2300 		return (ENOMEM);
2301 	}
2302 
2303 	/* Do the actual dump in eXtended format */
2304 	memset(&da, 0, sizeof(da));
2305 	da.ch = ch;
2306 	da.ti = KIDX_TO_TI(ch, tc->no.kidx);
2307 	da.tc = tc;
2308 	da.sd = sd;
2309 
2310 	ta = tc->ta;
2311 
2312 	ta->foreach(tc->astate, da.ti, dump_table_xentry, &da);
2313 	IPFW_UH_RUNLOCK(ch);
2314 
2315 	return (0);
2316 }
2317 
2318 /*
2319  * Legacy function to retrieve number of items in table.
2320  */
2321 static int
2322 get_table_size(struct ip_fw_chain *ch, ip_fw3_opheader *op3,
2323     struct sockopt_data *sd)
2324 {
2325 	uint32_t *tbl;
2326 	struct tid_info ti;
2327 	size_t sz;
2328 	int error;
2329 
2330 	sz = sizeof(*op3) + sizeof(uint32_t);
2331 	op3 = (ip_fw3_opheader *)ipfw_get_sopt_header(sd, sz);
2332 	if (op3 == NULL)
2333 		return (EINVAL);
2334 
2335 	tbl = (uint32_t *)(op3 + 1);
2336 	memset(&ti, 0, sizeof(ti));
2337 	ti.uidx = *tbl;
2338 	IPFW_UH_RLOCK(ch);
2339 	error = ipfw_count_xtable(ch, &ti, tbl);
2340 	IPFW_UH_RUNLOCK(ch);
2341 	return (error);
2342 }
2343 
2344 /*
2345  * Legacy IP_FW_TABLE_GETSIZE handler
2346  */
2347 int
2348 ipfw_count_table(struct ip_fw_chain *ch, struct tid_info *ti, uint32_t *cnt)
2349 {
2350 	struct table_config *tc;
2351 
2352 	if ((tc = find_table(CHAIN_TO_NI(ch), ti)) == NULL)
2353 		return (ESRCH);
2354 	*cnt = table_get_count(ch, tc);
2355 	return (0);
2356 }
2357 
2358 /*
2359  * Legacy IP_FW_TABLE_XGETSIZE handler
2360  */
2361 int
2362 ipfw_count_xtable(struct ip_fw_chain *ch, struct tid_info *ti, uint32_t *cnt)
2363 {
2364 	struct table_config *tc;
2365 	uint32_t count;
2366 
2367 	if ((tc = find_table(CHAIN_TO_NI(ch), ti)) == NULL) {
2368 		*cnt = 0;
2369 		return (0); /* 'table all list' requires success */
2370 	}
2371 
2372 	count = table_get_count(ch, tc);
2373 	*cnt = count * sizeof(ipfw_table_xentry);
2374 	if (count > 0)
2375 		*cnt += sizeof(ipfw_xtable);
2376 	return (0);
2377 }
2378 
2379 static int
2380 dump_table_entry(void *e, void *arg)
2381 {
2382 	struct dump_args *da;
2383 	struct table_config *tc;
2384 	struct table_algo *ta;
2385 	ipfw_table_entry *ent;
2386 	struct table_value *pval;
2387 	int error;
2388 
2389 	da = (struct dump_args *)arg;
2390 
2391 	tc = da->tc;
2392 	ta = tc->ta;
2393 
2394 	/* Out of memory, returning */
2395 	if (da->cnt == da->size)
2396 		return (1);
2397 	ent = da->ent++;
2398 	ent->tbl = da->uidx;
2399 	da->cnt++;
2400 
2401 	error = ta->dump_tentry(tc->astate, da->ti, e, &da->tent);
2402 	if (error != 0)
2403 		return (error);
2404 
2405 	ent->addr = da->tent.k.addr.s_addr;
2406 	ent->masklen = da->tent.masklen;
2407 	pval = get_table_value(da->ch, da->tc, da->tent.v.kidx);
2408 	ent->value = ipfw_export_table_value_legacy(pval);
2409 
2410 	return (0);
2411 }
2412 
2413 /*
2414  * Dumps table in pre-8.1 legacy format.
2415  */
2416 int
2417 ipfw_dump_table_legacy(struct ip_fw_chain *ch, struct tid_info *ti,
2418     ipfw_table *tbl)
2419 {
2420 	struct table_config *tc;
2421 	struct table_algo *ta;
2422 	struct dump_args da;
2423 
2424 	tbl->cnt = 0;
2425 
2426 	if ((tc = find_table(CHAIN_TO_NI(ch), ti)) == NULL)
2427 		return (0);	/* XXX: We should return ESRCH */
2428 
2429 	ta = tc->ta;
2430 
2431 	/* This dump format supports IPv4 only */
2432 	if (tc->no.type != IPFW_TABLE_ADDR)
2433 		return (0);
2434 
2435 	memset(&da, 0, sizeof(da));
2436 	da.ch = ch;
2437 	da.ti = KIDX_TO_TI(ch, tc->no.kidx);
2438 	da.tc = tc;
2439 	da.ent = &tbl->ent[0];
2440 	da.size = tbl->size;
2441 
2442 	tbl->cnt = 0;
2443 	ta->foreach(tc->astate, da.ti, dump_table_entry, &da);
2444 	tbl->cnt = da.cnt;
2445 
2446 	return (0);
2447 }
2448 
2449 /*
2450  * Dumps table entry in eXtended format (v1)(current).
2451  */
2452 static int
2453 dump_table_tentry(void *e, void *arg)
2454 {
2455 	struct dump_args *da;
2456 	struct table_config *tc;
2457 	struct table_algo *ta;
2458 	struct table_value *pval;
2459 	ipfw_obj_tentry *tent;
2460 	int error;
2461 
2462 	da = (struct dump_args *)arg;
2463 
2464 	tc = da->tc;
2465 	ta = tc->ta;
2466 
2467 	tent = (ipfw_obj_tentry *)ipfw_get_sopt_space(da->sd, sizeof(*tent));
2468 	/* Out of memory, returning */
2469 	if (tent == NULL) {
2470 		da->error = ENOMEM;
2471 		return (1);
2472 	}
2473 	tent->head.length = sizeof(ipfw_obj_tentry);
2474 	tent->idx = da->uidx;
2475 
2476 	error = ta->dump_tentry(tc->astate, da->ti, e, tent);
2477 	if (error != 0)
2478 		return (error);
2479 
2480 	pval = get_table_value(da->ch, da->tc, tent->v.kidx);
2481 	ipfw_export_table_value_v1(pval, &tent->v.value);
2482 
2483 	return (0);
2484 }
2485 
2486 /*
2487  * Dumps table entry in eXtended format (v0).
2488  */
2489 static int
2490 dump_table_xentry(void *e, void *arg)
2491 {
2492 	struct dump_args *da;
2493 	struct table_config *tc;
2494 	struct table_algo *ta;
2495 	ipfw_table_xentry *xent;
2496 	ipfw_obj_tentry *tent;
2497 	struct table_value *pval;
2498 	int error;
2499 
2500 	da = (struct dump_args *)arg;
2501 
2502 	tc = da->tc;
2503 	ta = tc->ta;
2504 
2505 	xent = (ipfw_table_xentry *)ipfw_get_sopt_space(da->sd, sizeof(*xent));
2506 	/* Out of memory, returning */
2507 	if (xent == NULL)
2508 		return (1);
2509 	xent->len = sizeof(ipfw_table_xentry);
2510 	xent->tbl = da->uidx;
2511 
2512 	memset(&da->tent, 0, sizeof(da->tent));
2513 	tent = &da->tent;
2514 	error = ta->dump_tentry(tc->astate, da->ti, e, tent);
2515 	if (error != 0)
2516 		return (error);
2517 
2518 	/* Convert current format to previous one */
2519 	xent->masklen = tent->masklen;
2520 	pval = get_table_value(da->ch, da->tc, da->tent.v.kidx);
2521 	xent->value = ipfw_export_table_value_legacy(pval);
2522 	/* Apply some hacks */
2523 	if (tc->no.type == IPFW_TABLE_ADDR && tent->subtype == AF_INET) {
2524 		xent->k.addr6.s6_addr32[3] = tent->k.addr.s_addr;
2525 		xent->flags = IPFW_TCF_INET;
2526 	} else
2527 		memcpy(&xent->k, &tent->k, sizeof(xent->k));
2528 
2529 	return (0);
2530 }
2531 
2532 /*
2533  * Helper function to export table algo data
2534  * to tentry format before calling user function.
2535  *
2536  * Returns 0 on success.
2537  */
2538 static int
2539 prepare_table_tentry(void *e, void *arg)
2540 {
2541 	struct dump_args *da;
2542 	struct table_config *tc;
2543 	struct table_algo *ta;
2544 	int error;
2545 
2546 	da = (struct dump_args *)arg;
2547 
2548 	tc = da->tc;
2549 	ta = tc->ta;
2550 
2551 	error = ta->dump_tentry(tc->astate, da->ti, e, &da->tent);
2552 	if (error != 0)
2553 		return (error);
2554 
2555 	da->f(&da->tent, da->farg);
2556 
2557 	return (0);
2558 }
2559 
2560 /*
2561  * Allow external consumers to read table entries in standard format.
2562  */
2563 int
2564 ipfw_foreach_table_tentry(struct ip_fw_chain *ch, uint16_t kidx,
2565     ta_foreach_f *f, void *arg)
2566 {
2567 	struct namedobj_instance *ni;
2568 	struct table_config *tc;
2569 	struct table_algo *ta;
2570 	struct dump_args da;
2571 
2572 	ni = CHAIN_TO_NI(ch);
2573 
2574 	tc = (struct table_config *)ipfw_objhash_lookup_kidx(ni, kidx);
2575 	if (tc == NULL)
2576 		return (ESRCH);
2577 
2578 	ta = tc->ta;
2579 
2580 	memset(&da, 0, sizeof(da));
2581 	da.ch = ch;
2582 	da.ti = KIDX_TO_TI(ch, tc->no.kidx);
2583 	da.tc = tc;
2584 	da.f = f;
2585 	da.farg = arg;
2586 
2587 	ta->foreach(tc->astate, da.ti, prepare_table_tentry, &da);
2588 
2589 	return (0);
2590 }
2591 
2592 /*
2593  * Table algorithms
2594  */
2595 
2596 /*
2597  * Finds algoritm by index, table type or supplied name.
2598  *
2599  * Returns pointer to algo or NULL.
2600  */
2601 static struct table_algo *
2602 find_table_algo(struct tables_config *tcfg, struct tid_info *ti, char *name)
2603 {
2604 	int i, l;
2605 	struct table_algo *ta;
2606 
2607 	if (ti->type > IPFW_TABLE_MAXTYPE)
2608 		return (NULL);
2609 
2610 	/* Search by index */
2611 	if (ti->atype != 0) {
2612 		if (ti->atype > tcfg->algo_count)
2613 			return (NULL);
2614 		return (tcfg->algo[ti->atype]);
2615 	}
2616 
2617 	if (name == NULL) {
2618 		/* Return default algorithm for given type if set */
2619 		return (tcfg->def_algo[ti->type]);
2620 	}
2621 
2622 	/* Search by name */
2623 	/* TODO: better search */
2624 	for (i = 1; i <= tcfg->algo_count; i++) {
2625 		ta = tcfg->algo[i];
2626 
2627 		/*
2628 		 * One can supply additional algorithm
2629 		 * parameters so we compare only the first word
2630 		 * of supplied name:
2631 		 * 'addr:chash hsize=32'
2632 		 * '^^^^^^^^^'
2633 		 *
2634 		 */
2635 		l = strlen(ta->name);
2636 		if (strncmp(name, ta->name, l) != 0)
2637 			continue;
2638 		if (name[l] != '\0' && name[l] != ' ')
2639 			continue;
2640 		/* Check if we're requesting proper table type */
2641 		if (ti->type != 0 && ti->type != ta->type)
2642 			return (NULL);
2643 		return (ta);
2644 	}
2645 
2646 	return (NULL);
2647 }
2648 
2649 /*
2650  * Register new table algo @ta.
2651  * Stores algo id inside @idx.
2652  *
2653  * Returns 0 on success.
2654  */
2655 int
2656 ipfw_add_table_algo(struct ip_fw_chain *ch, struct table_algo *ta, size_t size,
2657     int *idx)
2658 {
2659 	struct tables_config *tcfg;
2660 	struct table_algo *ta_new;
2661 	size_t sz;
2662 
2663 	if (size > sizeof(struct table_algo))
2664 		return (EINVAL);
2665 
2666 	/* Check for the required on-stack size for add/del */
2667 	sz = roundup2(ta->ta_buf_size, sizeof(void *));
2668 	if (sz > TA_BUF_SZ)
2669 		return (EINVAL);
2670 
2671 	KASSERT(ta->type <= IPFW_TABLE_MAXTYPE,("Increase IPFW_TABLE_MAXTYPE"));
2672 
2673 	/* Copy algorithm data to stable storage. */
2674 	ta_new = malloc(sizeof(struct table_algo), M_IPFW, M_WAITOK | M_ZERO);
2675 	memcpy(ta_new, ta, size);
2676 
2677 	tcfg = CHAIN_TO_TCFG(ch);
2678 
2679 	KASSERT(tcfg->algo_count < 255, ("Increase algo array size"));
2680 
2681 	tcfg->algo[++tcfg->algo_count] = ta_new;
2682 	ta_new->idx = tcfg->algo_count;
2683 
2684 	/* Set algorithm as default one for given type */
2685 	if ((ta_new->flags & TA_FLAG_DEFAULT) != 0 &&
2686 	    tcfg->def_algo[ta_new->type] == NULL)
2687 		tcfg->def_algo[ta_new->type] = ta_new;
2688 
2689 	*idx = ta_new->idx;
2690 
2691 	return (0);
2692 }
2693 
2694 /*
2695  * Unregisters table algo using @idx as id.
2696  * XXX: It is NOT safe to call this function in any place
2697  * other than ipfw instance destroy handler.
2698  */
2699 void
2700 ipfw_del_table_algo(struct ip_fw_chain *ch, int idx)
2701 {
2702 	struct tables_config *tcfg;
2703 	struct table_algo *ta;
2704 
2705 	tcfg = CHAIN_TO_TCFG(ch);
2706 
2707 	KASSERT(idx <= tcfg->algo_count, ("algo idx %d out of range 1..%d",
2708 	    idx, tcfg->algo_count));
2709 
2710 	ta = tcfg->algo[idx];
2711 	KASSERT(ta != NULL, ("algo idx %d is NULL", idx));
2712 
2713 	if (tcfg->def_algo[ta->type] == ta)
2714 		tcfg->def_algo[ta->type] = NULL;
2715 
2716 	free(ta, M_IPFW);
2717 }
2718 
2719 /*
2720  * Lists all table algorithms currently available.
2721  * Data layout (v0)(current):
2722  * Request: [ ipfw_obj_lheader ], size = ipfw_obj_lheader.size
2723  * Reply: [ ipfw_obj_lheader ipfw_ta_info x N ]
2724  *
2725  * Returns 0 on success
2726  */
2727 static int
2728 list_table_algo(struct ip_fw_chain *ch, ip_fw3_opheader *op3,
2729     struct sockopt_data *sd)
2730 {
2731 	struct _ipfw_obj_lheader *olh;
2732 	struct tables_config *tcfg;
2733 	ipfw_ta_info *i;
2734 	struct table_algo *ta;
2735 	uint32_t count, n, size;
2736 
2737 	olh = (struct _ipfw_obj_lheader *)ipfw_get_sopt_header(sd,sizeof(*olh));
2738 	if (olh == NULL)
2739 		return (EINVAL);
2740 	if (sd->valsize < olh->size)
2741 		return (EINVAL);
2742 
2743 	IPFW_UH_RLOCK(ch);
2744 	tcfg = CHAIN_TO_TCFG(ch);
2745 	count = tcfg->algo_count;
2746 	size = count * sizeof(ipfw_ta_info) + sizeof(ipfw_obj_lheader);
2747 
2748 	/* Fill in header regadless of buffer size */
2749 	olh->count = count;
2750 	olh->objsize = sizeof(ipfw_ta_info);
2751 
2752 	if (size > olh->size) {
2753 		olh->size = size;
2754 		IPFW_UH_RUNLOCK(ch);
2755 		return (ENOMEM);
2756 	}
2757 	olh->size = size;
2758 
2759 	for (n = 1; n <= count; n++) {
2760 		i = (ipfw_ta_info *)ipfw_get_sopt_space(sd, sizeof(*i));
2761 		KASSERT(i != 0, ("previously checked buffer is not enough"));
2762 		ta = tcfg->algo[n];
2763 		strlcpy(i->algoname, ta->name, sizeof(i->algoname));
2764 		i->type = ta->type;
2765 		i->refcnt = ta->refcnt;
2766 	}
2767 
2768 	IPFW_UH_RUNLOCK(ch);
2769 
2770 	return (0);
2771 }
2772 
2773 /*
2774  * Tables rewriting code
2775  */
2776 
2777 /*
2778  * Determine table number and lookup type for @cmd.
2779  * Fill @tbl and @type with appropriate values.
2780  * Returns 0 for relevant opcodes, 1 otherwise.
2781  */
2782 static int
2783 classify_table_opcode(ipfw_insn *cmd, uint16_t *puidx, uint8_t *ptype)
2784 {
2785 	ipfw_insn_if *cmdif;
2786 	int skip;
2787 	uint16_t v;
2788 
2789 	skip = 1;
2790 
2791 	switch (cmd->opcode) {
2792 	case O_IP_SRC_LOOKUP:
2793 	case O_IP_DST_LOOKUP:
2794 		/* Basic IPv4/IPv6 or u32 lookups */
2795 		*puidx = cmd->arg1;
2796 		/* Assume ADDR by default */
2797 		*ptype = IPFW_TABLE_ADDR;
2798 		skip = 0;
2799 
2800 		if (F_LEN(cmd) > F_INSN_SIZE(ipfw_insn_u32)) {
2801 			/*
2802 			 * generic lookup. The key must be
2803 			 * in 32bit big-endian format.
2804 			 */
2805 			v = ((ipfw_insn_u32 *)cmd)->d[1];
2806 			switch (v) {
2807 			case 0:
2808 			case 1:
2809 				/* IPv4 src/dst */
2810 				break;
2811 			case 2:
2812 			case 3:
2813 				/* src/dst port */
2814 				*ptype = IPFW_TABLE_NUMBER;
2815 				break;
2816 			case 4:
2817 				/* uid/gid */
2818 				*ptype = IPFW_TABLE_NUMBER;
2819 				break;
2820 			case 5:
2821 				/* jid */
2822 				*ptype = IPFW_TABLE_NUMBER;
2823 				break;
2824 			case 6:
2825 				/* dscp */
2826 				*ptype = IPFW_TABLE_NUMBER;
2827 				break;
2828 			}
2829 		}
2830 		break;
2831 	case O_XMIT:
2832 	case O_RECV:
2833 	case O_VIA:
2834 		/* Interface table, possibly */
2835 		cmdif = (ipfw_insn_if *)cmd;
2836 		if (cmdif->name[0] != '\1')
2837 			break;
2838 
2839 		*ptype = IPFW_TABLE_INTERFACE;
2840 		*puidx = cmdif->p.kidx;
2841 		skip = 0;
2842 		break;
2843 	case O_IP_FLOW_LOOKUP:
2844 		*puidx = cmd->arg1;
2845 		*ptype = IPFW_TABLE_FLOW;
2846 		skip = 0;
2847 		break;
2848 	}
2849 
2850 	return (skip);
2851 }
2852 
2853 /*
2854  * Sets new table value for given opcode.
2855  * Assume the same opcodes as classify_table_opcode()
2856  */
2857 static void
2858 update_table_opcode(ipfw_insn *cmd, uint16_t idx)
2859 {
2860 	ipfw_insn_if *cmdif;
2861 
2862 	switch (cmd->opcode) {
2863 	case O_IP_SRC_LOOKUP:
2864 	case O_IP_DST_LOOKUP:
2865 		/* Basic IPv4/IPv6 or u32 lookups */
2866 		cmd->arg1 = idx;
2867 		break;
2868 	case O_XMIT:
2869 	case O_RECV:
2870 	case O_VIA:
2871 		/* Interface table, possibly */
2872 		cmdif = (ipfw_insn_if *)cmd;
2873 		cmdif->p.kidx = idx;
2874 		break;
2875 	case O_IP_FLOW_LOOKUP:
2876 		cmd->arg1 = idx;
2877 		break;
2878 	}
2879 }
2880 
2881 /*
2882  * Checks table name for validity.
2883  * Enforce basic length checks, the rest
2884  * should be done in userland.
2885  *
2886  * Returns 0 if name is considered valid.
2887  */
2888 int
2889 ipfw_check_table_name(char *name)
2890 {
2891 	int nsize;
2892 	ipfw_obj_ntlv *ntlv = NULL;
2893 
2894 	nsize = sizeof(ntlv->name);
2895 
2896 	if (strnlen(name, nsize) == nsize)
2897 		return (EINVAL);
2898 
2899 	if (name[0] == '\0')
2900 		return (EINVAL);
2901 
2902 	/*
2903 	 * TODO: do some more complicated checks
2904 	 */
2905 
2906 	return (0);
2907 }
2908 
2909 /*
2910  * Find tablename TLV by @uid.
2911  * Check @tlvs for valid data inside.
2912  *
2913  * Returns pointer to found TLV or NULL.
2914  */
2915 static ipfw_obj_ntlv *
2916 find_name_tlv(void *tlvs, int len, uint16_t uidx)
2917 {
2918 	ipfw_obj_ntlv *ntlv;
2919 	uintptr_t pa, pe;
2920 	int l;
2921 
2922 	pa = (uintptr_t)tlvs;
2923 	pe = pa + len;
2924 	l = 0;
2925 	for (; pa < pe; pa += l) {
2926 		ntlv = (ipfw_obj_ntlv *)pa;
2927 		l = ntlv->head.length;
2928 
2929 		if (l != sizeof(*ntlv))
2930 			return (NULL);
2931 
2932 		if (ntlv->head.type != IPFW_TLV_TBL_NAME)
2933 			continue;
2934 
2935 		if (ntlv->idx != uidx)
2936 			continue;
2937 
2938 		if (ipfw_check_table_name(ntlv->name) != 0)
2939 			return (NULL);
2940 
2941 		return (ntlv);
2942 	}
2943 
2944 	return (NULL);
2945 }
2946 
2947 /*
2948  * Finds table config based on either legacy index
2949  * or name in ntlv.
2950  * Note @ti structure contains unchecked data from userland.
2951  *
2952  * Returns pointer to table_config or NULL.
2953  */
2954 static struct table_config *
2955 find_table(struct namedobj_instance *ni, struct tid_info *ti)
2956 {
2957 	char *name, bname[16];
2958 	struct named_object *no;
2959 	ipfw_obj_ntlv *ntlv;
2960 	uint32_t set;
2961 
2962 	if (ti->tlvs != NULL) {
2963 		ntlv = find_name_tlv(ti->tlvs, ti->tlen, ti->uidx);
2964 		if (ntlv == NULL)
2965 			return (NULL);
2966 		name = ntlv->name;
2967 
2968 		/*
2969 		 * Use set provided by @ti instead of @ntlv one.
2970 		 * This is needed due to different sets behavior
2971 		 * controlled by V_fw_tables_sets.
2972 		 */
2973 		set = ti->set;
2974 	} else {
2975 		snprintf(bname, sizeof(bname), "%d", ti->uidx);
2976 		name = bname;
2977 		set = 0;
2978 	}
2979 
2980 	no = ipfw_objhash_lookup_name(ni, set, name);
2981 
2982 	return ((struct table_config *)no);
2983 }
2984 
2985 /*
2986  * Allocate new table config structure using
2987  * specified @algo and @aname.
2988  *
2989  * Returns pointer to config or NULL.
2990  */
2991 static struct table_config *
2992 alloc_table_config(struct ip_fw_chain *ch, struct tid_info *ti,
2993     struct table_algo *ta, char *aname, uint8_t tflags)
2994 {
2995 	char *name, bname[16];
2996 	struct table_config *tc;
2997 	int error;
2998 	ipfw_obj_ntlv *ntlv;
2999 	uint32_t set;
3000 
3001 	if (ti->tlvs != NULL) {
3002 		ntlv = find_name_tlv(ti->tlvs, ti->tlen, ti->uidx);
3003 		if (ntlv == NULL)
3004 			return (NULL);
3005 		name = ntlv->name;
3006 		set = ntlv->set;
3007 	} else {
3008 		snprintf(bname, sizeof(bname), "%d", ti->uidx);
3009 		name = bname;
3010 		set = 0;
3011 	}
3012 
3013 	tc = malloc(sizeof(struct table_config), M_IPFW, M_WAITOK | M_ZERO);
3014 	tc->no.name = tc->tablename;
3015 	tc->no.type = ta->type;
3016 	tc->no.set = set;
3017 	tc->tflags = tflags;
3018 	tc->ta = ta;
3019 	strlcpy(tc->tablename, name, sizeof(tc->tablename));
3020 	/* Set "shared" value type by default */
3021 	tc->vshared = 1;
3022 
3023 	if (ti->tlvs == NULL) {
3024 		tc->no.compat = 1;
3025 		tc->no.uidx = ti->uidx;
3026 	}
3027 
3028 	/* Preallocate data structures for new tables */
3029 	error = ta->init(ch, &tc->astate, &tc->ti_copy, aname, tflags);
3030 	if (error != 0) {
3031 		free(tc, M_IPFW);
3032 		return (NULL);
3033 	}
3034 
3035 	return (tc);
3036 }
3037 
3038 /*
3039  * Destroys table state and config.
3040  */
3041 static void
3042 free_table_config(struct namedobj_instance *ni, struct table_config *tc)
3043 {
3044 
3045 	KASSERT(tc->linked == 0, ("free() on linked config"));
3046 
3047 	/*
3048 	 * We're using ta without any locking/referencing.
3049 	 * TODO: fix this if we're going to use unloadable algos.
3050 	 */
3051 	tc->ta->destroy(tc->astate, &tc->ti_copy);
3052 	free(tc, M_IPFW);
3053 }
3054 
3055 /*
3056  * Links @tc to @chain table named instance.
3057  * Sets appropriate type/states in @chain table info.
3058  */
3059 static void
3060 link_table(struct ip_fw_chain *ch, struct table_config *tc)
3061 {
3062 	struct namedobj_instance *ni;
3063 	struct table_info *ti;
3064 	uint16_t kidx;
3065 
3066 	IPFW_UH_WLOCK_ASSERT(ch);
3067 	IPFW_WLOCK_ASSERT(ch);
3068 
3069 	ni = CHAIN_TO_NI(ch);
3070 	kidx = tc->no.kidx;
3071 
3072 	ipfw_objhash_add(ni, &tc->no);
3073 
3074 	ti = KIDX_TO_TI(ch, kidx);
3075 	*ti = tc->ti_copy;
3076 
3077 	/* Notify algo on real @ti address */
3078 	if (tc->ta->change_ti != NULL)
3079 		tc->ta->change_ti(tc->astate, ti);
3080 
3081 	tc->linked = 1;
3082 	tc->ta->refcnt++;
3083 }
3084 
3085 /*
3086  * Unlinks @tc from @chain table named instance.
3087  * Zeroes states in @chain and stores them in @tc.
3088  */
3089 static void
3090 unlink_table(struct ip_fw_chain *ch, struct table_config *tc)
3091 {
3092 	struct namedobj_instance *ni;
3093 	struct table_info *ti;
3094 	uint16_t kidx;
3095 
3096 	IPFW_UH_WLOCK_ASSERT(ch);
3097 	IPFW_WLOCK_ASSERT(ch);
3098 
3099 	ni = CHAIN_TO_NI(ch);
3100 	kidx = tc->no.kidx;
3101 
3102 	/* Clear state. @ti copy is already saved inside @tc */
3103 	ipfw_objhash_del(ni, &tc->no);
3104 	ti = KIDX_TO_TI(ch, kidx);
3105 	memset(ti, 0, sizeof(struct table_info));
3106 	tc->linked = 0;
3107 	tc->ta->refcnt--;
3108 
3109 	/* Notify algo on real @ti address */
3110 	if (tc->ta->change_ti != NULL)
3111 		tc->ta->change_ti(tc->astate, NULL);
3112 }
3113 
3114 struct swap_table_args {
3115 	int set;
3116 	int new_set;
3117 	int mv;
3118 };
3119 
3120 /*
3121  * Change set for each matching table.
3122  *
3123  * Ensure we dispatch each table once by setting/checking ochange
3124  * fields.
3125  */
3126 static void
3127 swap_table_set(struct namedobj_instance *ni, struct named_object *no,
3128     void *arg)
3129 {
3130 	struct table_config *tc;
3131 	struct swap_table_args *sta;
3132 
3133 	tc = (struct table_config *)no;
3134 	sta = (struct swap_table_args *)arg;
3135 
3136 	if (no->set != sta->set && (no->set != sta->new_set || sta->mv != 0))
3137 		return;
3138 
3139 	if (tc->ochanged != 0)
3140 		return;
3141 
3142 	tc->ochanged = 1;
3143 	ipfw_objhash_del(ni, no);
3144 	if (no->set == sta->set)
3145 		no->set = sta->new_set;
3146 	else
3147 		no->set = sta->set;
3148 	ipfw_objhash_add(ni, no);
3149 }
3150 
3151 /*
3152  * Cleans up ochange field for all tables.
3153  */
3154 static void
3155 clean_table_set_data(struct namedobj_instance *ni, struct named_object *no,
3156     void *arg)
3157 {
3158 	struct table_config *tc;
3159 	struct swap_table_args *sta;
3160 
3161 	tc = (struct table_config *)no;
3162 	sta = (struct swap_table_args *)arg;
3163 
3164 	tc->ochanged = 0;
3165 }
3166 
3167 /*
3168  * Swaps tables within two sets.
3169  */
3170 void
3171 ipfw_swap_tables_sets(struct ip_fw_chain *ch, uint32_t set,
3172     uint32_t new_set, int mv)
3173 {
3174 	struct swap_table_args sta;
3175 
3176 	IPFW_UH_WLOCK_ASSERT(ch);
3177 
3178 	sta.set = set;
3179 	sta.new_set = new_set;
3180 	sta.mv = mv;
3181 
3182 	ipfw_objhash_foreach(CHAIN_TO_NI(ch), swap_table_set, &sta);
3183 	ipfw_objhash_foreach(CHAIN_TO_NI(ch), clean_table_set_data, &sta);
3184 }
3185 
3186 /*
3187  * Move all tables which are reference by rules in @rr to set @new_set.
3188  * Makes sure that all relevant tables are referenced ONLLY by given rules.
3189  *
3190  * Retuns 0 on success,
3191  */
3192 int
3193 ipfw_move_tables_sets(struct ip_fw_chain *ch, ipfw_range_tlv *rt,
3194     uint32_t new_set)
3195 {
3196 	struct ip_fw *rule;
3197 	struct table_config *tc;
3198 	struct named_object *no;
3199 	struct namedobj_instance *ni;
3200 	int bad, i, l, cmdlen;
3201 	uint16_t kidx;
3202 	uint8_t type;
3203 	ipfw_insn *cmd;
3204 
3205 	IPFW_UH_WLOCK_ASSERT(ch);
3206 
3207 	ni = CHAIN_TO_NI(ch);
3208 
3209 	/* Stage 1: count number of references by given rules */
3210 	for (i = 0; i < ch->n_rules - 1; i++) {
3211 		rule = ch->map[i];
3212 		if (ipfw_match_range(rule, rt) == 0)
3213 			continue;
3214 
3215 		l = rule->cmd_len;
3216 		cmd = rule->cmd;
3217 		cmdlen = 0;
3218 		for ( ;	l > 0 ; l -= cmdlen, cmd += cmdlen) {
3219 			cmdlen = F_LEN(cmd);
3220 			if (classify_table_opcode(cmd, &kidx, &type) != 0)
3221 				continue;
3222 			no = ipfw_objhash_lookup_kidx(ni, kidx);
3223 			KASSERT(no != NULL,
3224 			    ("objhash lookup failed on index %d", kidx));
3225 			tc = (struct table_config *)no;
3226 			tc->ocount++;
3227 		}
3228 
3229 	}
3230 
3231 	/* Stage 2: verify "ownership" */
3232 	bad = 0;
3233 	for (i = 0; i < ch->n_rules - 1; i++) {
3234 		rule = ch->map[i];
3235 		if (ipfw_match_range(rule, rt) == 0)
3236 			continue;
3237 
3238 		l = rule->cmd_len;
3239 		cmd = rule->cmd;
3240 		cmdlen = 0;
3241 		for ( ;	l > 0 ; l -= cmdlen, cmd += cmdlen) {
3242 			cmdlen = F_LEN(cmd);
3243 			if (classify_table_opcode(cmd, &kidx, &type) != 0)
3244 				continue;
3245 			no = ipfw_objhash_lookup_kidx(ni, kidx);
3246 			KASSERT(no != NULL,
3247 			    ("objhash lookup failed on index %d", kidx));
3248 			tc = (struct table_config *)no;
3249 			if (tc->no.refcnt != tc->ocount) {
3250 
3251 				/*
3252 				 * Number of references differ:
3253 				 * Other rule(s) are holding reference to given
3254 				 * table, so it is not possible to change its set.
3255 				 *
3256 				 * Note that refcnt may account
3257 				 * references to some going-to-be-added rules.
3258 				 * Since we don't know their numbers (and event
3259 				 * if they will be added) it is perfectly OK
3260 				 * to return error here.
3261 				 */
3262 				bad = 1;
3263 				break;
3264 			}
3265 		}
3266 
3267 		if (bad != 0)
3268 			break;
3269 	}
3270 
3271 	/* Stage 3: change set or cleanup */
3272 	for (i = 0; i < ch->n_rules - 1; i++) {
3273 		rule = ch->map[i];
3274 		if (ipfw_match_range(rule, rt) == 0)
3275 			continue;
3276 
3277 		l = rule->cmd_len;
3278 		cmd = rule->cmd;
3279 		cmdlen = 0;
3280 		for ( ;	l > 0 ; l -= cmdlen, cmd += cmdlen) {
3281 			cmdlen = F_LEN(cmd);
3282 			if (classify_table_opcode(cmd, &kidx, &type) != 0)
3283 				continue;
3284 			no = ipfw_objhash_lookup_kidx(ni, kidx);
3285 			KASSERT(no != NULL,
3286 			    ("objhash lookup failed on index %d", kidx));
3287 			tc = (struct table_config *)no;
3288 
3289 			tc->ocount = 0;
3290 			if (bad != 0)
3291 				continue;
3292 
3293 			/* Actually change set. */
3294 			ipfw_objhash_del(ni, no);
3295 			no->set = new_set;
3296 			ipfw_objhash_add(ni, no);
3297 		}
3298 	}
3299 
3300 	return (bad);
3301 }
3302 
3303 /*
3304  * Finds and bumps refcount for tables referenced by given @rule.
3305  * Auto-creates non-existing tables.
3306  * Fills in @oib array with userland/kernel indexes.
3307  * First free oidx pointer is saved back in @oib.
3308  *
3309  * Returns 0 on success.
3310  */
3311 static int
3312 find_ref_rule_tables(struct ip_fw_chain *ch, struct ip_fw *rule,
3313     struct rule_check_info *ci, struct obj_idx **oib, struct tid_info *ti)
3314 {
3315 	struct table_config *tc;
3316 	struct namedobj_instance *ni;
3317 	struct named_object *no;
3318 	int cmdlen, error, l, numnew;
3319 	uint16_t kidx;
3320 	ipfw_insn *cmd;
3321 	struct obj_idx *pidx, *pidx_first, *p;
3322 
3323 	pidx_first = *oib;
3324 	pidx = pidx_first;
3325 	l = rule->cmd_len;
3326 	cmd = rule->cmd;
3327 	cmdlen = 0;
3328 	error = 0;
3329 	numnew = 0;
3330 
3331 	IPFW_UH_WLOCK(ch);
3332 	ni = CHAIN_TO_NI(ch);
3333 
3334 	/* Increase refcount on each existing referenced table. */
3335 	for ( ;	l > 0 ; l -= cmdlen, cmd += cmdlen) {
3336 		cmdlen = F_LEN(cmd);
3337 
3338 		if (classify_table_opcode(cmd, &ti->uidx, &ti->type) != 0)
3339 			continue;
3340 
3341 		pidx->uidx = ti->uidx;
3342 		pidx->type = ti->type;
3343 
3344 		if ((tc = find_table(ni, ti)) != NULL) {
3345 			if (tc->no.type != ti->type) {
3346 				/* Incompatible types */
3347 				error = EINVAL;
3348 				break;
3349 			}
3350 
3351 			/* Reference found table and save kidx */
3352 			tc->no.refcnt++;
3353 			pidx->kidx = tc->no.kidx;
3354 			pidx++;
3355 			continue;
3356 		}
3357 
3358 		/*
3359 		 * Compability stuff for old clients:
3360 		 * prepare to manually create non-existing tables.
3361 		 */
3362 		pidx++;
3363 		numnew++;
3364 	}
3365 
3366 	if (error != 0) {
3367 		/* Unref everything we have already done */
3368 		for (p = *oib; p < pidx; p++) {
3369 			if (p->kidx == 0)
3370 				continue;
3371 
3372 			/* Find & unref by existing idx */
3373 			no = ipfw_objhash_lookup_kidx(ni, p->kidx);
3374 			KASSERT(no != NULL, ("Ref'd table %d disappeared",
3375 			    p->kidx));
3376 
3377 			no->refcnt--;
3378 		}
3379 	}
3380 
3381 	IPFW_UH_WUNLOCK(ch);
3382 
3383 	if (numnew == 0) {
3384 		*oib = pidx;
3385 		return (error);
3386 	}
3387 
3388 	/*
3389 	 * Compatibility stuff: do actual creation for non-existing,
3390 	 * but referenced tables.
3391 	 */
3392 	for (p = pidx_first; p < pidx; p++) {
3393 		if (p->kidx != 0)
3394 			continue;
3395 
3396 		ti->uidx = p->uidx;
3397 		ti->type = p->type;
3398 		ti->atype = 0;
3399 
3400 		error = create_table_compat(ch, ti, &kidx);
3401 		if (error == 0) {
3402 			p->kidx = kidx;
3403 			continue;
3404 		}
3405 
3406 		/* Error. We have to drop references */
3407 		IPFW_UH_WLOCK(ch);
3408 		for (p = pidx_first; p < pidx; p++) {
3409 			if (p->kidx == 0)
3410 				continue;
3411 
3412 			/* Find & unref by existing idx */
3413 			no = ipfw_objhash_lookup_kidx(ni, p->kidx);
3414 			KASSERT(no != NULL, ("Ref'd table %d disappeared",
3415 			    p->kidx));
3416 
3417 			no->refcnt--;
3418 		}
3419 		IPFW_UH_WUNLOCK(ch);
3420 
3421 		return (error);
3422 	}
3423 
3424 	*oib = pidx;
3425 
3426 	return (error);
3427 }
3428 
3429 /*
3430  * Remove references from every table used in @rule.
3431  */
3432 void
3433 ipfw_unref_rule_tables(struct ip_fw_chain *chain, struct ip_fw *rule)
3434 {
3435 	int cmdlen, l;
3436 	ipfw_insn *cmd;
3437 	struct namedobj_instance *ni;
3438 	struct named_object *no;
3439 	uint16_t kidx;
3440 	uint8_t type;
3441 
3442 	IPFW_UH_WLOCK_ASSERT(chain);
3443 	ni = CHAIN_TO_NI(chain);
3444 
3445 	l = rule->cmd_len;
3446 	cmd = rule->cmd;
3447 	cmdlen = 0;
3448 	for ( ;	l > 0 ; l -= cmdlen, cmd += cmdlen) {
3449 		cmdlen = F_LEN(cmd);
3450 
3451 		if (classify_table_opcode(cmd, &kidx, &type) != 0)
3452 			continue;
3453 
3454 		no = ipfw_objhash_lookup_kidx(ni, kidx);
3455 
3456 		KASSERT(no != NULL, ("table id %d not found", kidx));
3457 		KASSERT(no->type == type, ("wrong type %d (%d) for table id %d",
3458 		    no->type, type, kidx));
3459 		KASSERT(no->refcnt > 0, ("refcount for table %d is %d",
3460 		    kidx, no->refcnt));
3461 
3462 		no->refcnt--;
3463 	}
3464 }
3465 
3466 /*
3467  * Compatibility function for old ipfw(8) binaries.
3468  * Rewrites table kernel indices with userland ones.
3469  * Convert tables matching '/^\d+$/' to their atoi() value.
3470  * Use number 65535 for other tables.
3471  *
3472  * Returns 0 on success.
3473  */
3474 int
3475 ipfw_rewrite_table_kidx(struct ip_fw_chain *chain, struct ip_fw_rule0 *rule)
3476 {
3477 	int cmdlen, error, l;
3478 	ipfw_insn *cmd;
3479 	uint16_t kidx, uidx;
3480 	uint8_t type;
3481 	struct named_object *no;
3482 	struct namedobj_instance *ni;
3483 
3484 	ni = CHAIN_TO_NI(chain);
3485 	error = 0;
3486 
3487 	l = rule->cmd_len;
3488 	cmd = rule->cmd;
3489 	cmdlen = 0;
3490 	for ( ;	l > 0 ; l -= cmdlen, cmd += cmdlen) {
3491 		cmdlen = F_LEN(cmd);
3492 
3493 		if (classify_table_opcode(cmd, &kidx, &type) != 0)
3494 			continue;
3495 
3496 		if ((no = ipfw_objhash_lookup_kidx(ni, kidx)) == NULL)
3497 			return (1);
3498 
3499 		uidx = no->uidx;
3500 		if (no->compat == 0) {
3501 
3502 			/*
3503 			 * We are called via legacy opcode.
3504 			 * Save error and show table as fake number
3505 			 * not to make ipfw(8) hang.
3506 			 */
3507 			uidx = 65535;
3508 			error = 2;
3509 		}
3510 
3511 		update_table_opcode(cmd, uidx);
3512 	}
3513 
3514 	return (error);
3515 }
3516 
3517 /*
3518  * Checks is opcode is referencing table of appropriate type.
3519  * Adds reference count for found table if true.
3520  * Rewrites user-supplied opcode values with kernel ones.
3521  *
3522  * Returns 0 on success and appropriate error code otherwise.
3523  */
3524 int
3525 ipfw_rewrite_table_uidx(struct ip_fw_chain *chain,
3526     struct rule_check_info *ci)
3527 {
3528 	int cmdlen, error, l;
3529 	ipfw_insn *cmd;
3530 	uint16_t uidx;
3531 	uint8_t type;
3532 	struct namedobj_instance *ni;
3533 	struct obj_idx *p, *pidx_first, *pidx_last;
3534 	struct tid_info ti;
3535 
3536 	ni = CHAIN_TO_NI(chain);
3537 
3538 	/*
3539 	 * Prepare an array for storing opcode indices.
3540 	 * Use stack allocation by default.
3541 	 */
3542 	if (ci->table_opcodes <= (sizeof(ci->obuf)/sizeof(ci->obuf[0]))) {
3543 		/* Stack */
3544 		pidx_first = ci->obuf;
3545 	} else
3546 		pidx_first = malloc(ci->table_opcodes * sizeof(struct obj_idx),
3547 		    M_IPFW, M_WAITOK | M_ZERO);
3548 
3549 	pidx_last = pidx_first;
3550 	error = 0;
3551 	type = 0;
3552 	memset(&ti, 0, sizeof(ti));
3553 
3554 	/*
3555 	 * Use default set for looking up tables (old way) or
3556 	 * use set rule is assigned to (new way).
3557 	 */
3558 	ti.set = (V_fw_tables_sets != 0) ? ci->krule->set : 0;
3559 	if (ci->ctlv != NULL) {
3560 		ti.tlvs = (void *)(ci->ctlv + 1);
3561 		ti.tlen = ci->ctlv->head.length - sizeof(ipfw_obj_ctlv);
3562 	}
3563 
3564 	/* Reference all used tables */
3565 	error = find_ref_rule_tables(chain, ci->krule, ci, &pidx_last, &ti);
3566 	if (error != 0)
3567 		goto free;
3568 
3569 	IPFW_UH_WLOCK(chain);
3570 
3571 	/* Perform rule rewrite */
3572 	l = ci->krule->cmd_len;
3573 	cmd = ci->krule->cmd;
3574 	cmdlen = 0;
3575 	p = pidx_first;
3576 	for ( ;	l > 0 ; l -= cmdlen, cmd += cmdlen) {
3577 		cmdlen = F_LEN(cmd);
3578 		if (classify_table_opcode(cmd, &uidx, &type) != 0)
3579 			continue;
3580 		update_table_opcode(cmd, p->kidx);
3581 		p++;
3582 	}
3583 
3584 	IPFW_UH_WUNLOCK(chain);
3585 
3586 free:
3587 	if (pidx_first != ci->obuf)
3588 		free(pidx_first, M_IPFW);
3589 
3590 	return (error);
3591 }
3592 
3593 static struct ipfw_sopt_handler	scodes[] = {
3594 	{ IP_FW_TABLE_XCREATE,	0,	HDIR_SET,	create_table },
3595 	{ IP_FW_TABLE_XDESTROY,	0,	HDIR_SET,	flush_table_v0 },
3596 	{ IP_FW_TABLE_XFLUSH,	0,	HDIR_SET,	flush_table_v0 },
3597 	{ IP_FW_TABLE_XMODIFY,	0,	HDIR_BOTH,	modify_table },
3598 	{ IP_FW_TABLE_XINFO,	0,	HDIR_GET,	describe_table },
3599 	{ IP_FW_TABLES_XLIST,	0,	HDIR_GET,	list_tables },
3600 	{ IP_FW_TABLE_XLIST,	0,	HDIR_GET,	dump_table_v0 },
3601 	{ IP_FW_TABLE_XLIST,	1,	HDIR_GET,	dump_table_v1 },
3602 	{ IP_FW_TABLE_XADD,	0,	HDIR_BOTH,	manage_table_ent_v0 },
3603 	{ IP_FW_TABLE_XADD,	1,	HDIR_BOTH,	manage_table_ent_v1 },
3604 	{ IP_FW_TABLE_XDEL,	0,	HDIR_BOTH,	manage_table_ent_v0 },
3605 	{ IP_FW_TABLE_XDEL,	1,	HDIR_BOTH,	manage_table_ent_v1 },
3606 	{ IP_FW_TABLE_XFIND,	0,	HDIR_GET,	find_table_entry },
3607 	{ IP_FW_TABLE_XSWAP,	0,	HDIR_SET,	swap_table },
3608 	{ IP_FW_TABLES_ALIST,	0,	HDIR_GET,	list_table_algo },
3609 	{ IP_FW_TABLE_XGETSIZE,	0,	HDIR_GET,	get_table_size },
3610 };
3611 
3612 static void
3613 destroy_table_locked(struct namedobj_instance *ni, struct named_object *no,
3614     void *arg)
3615 {
3616 
3617 	unlink_table((struct ip_fw_chain *)arg, (struct table_config *)no);
3618 	if (ipfw_objhash_free_idx(ni, no->kidx) != 0)
3619 		printf("Error unlinking kidx %d from table %s\n",
3620 		    no->kidx, no->name);
3621 	free_table_config(ni, (struct table_config *)no);
3622 }
3623 
3624 /*
3625  * Shuts tables module down.
3626  */
3627 void
3628 ipfw_destroy_tables(struct ip_fw_chain *ch, int last)
3629 {
3630 
3631 	IPFW_DEL_SOPT_HANDLER(last, scodes);
3632 
3633 	/* Remove all tables from working set */
3634 	IPFW_UH_WLOCK(ch);
3635 	IPFW_WLOCK(ch);
3636 	ipfw_objhash_foreach(CHAIN_TO_NI(ch), destroy_table_locked, ch);
3637 	IPFW_WUNLOCK(ch);
3638 	IPFW_UH_WUNLOCK(ch);
3639 
3640 	/* Free pointers itself */
3641 	free(ch->tablestate, M_IPFW);
3642 
3643 	ipfw_table_value_destroy(ch, last);
3644 	ipfw_table_algo_destroy(ch);
3645 
3646 	ipfw_objhash_destroy(CHAIN_TO_NI(ch));
3647 	free(CHAIN_TO_TCFG(ch), M_IPFW);
3648 }
3649 
3650 /*
3651  * Starts tables module.
3652  */
3653 int
3654 ipfw_init_tables(struct ip_fw_chain *ch, int first)
3655 {
3656 	struct tables_config *tcfg;
3657 
3658 	/* Allocate pointers */
3659 	ch->tablestate = malloc(V_fw_tables_max * sizeof(struct table_info),
3660 	    M_IPFW, M_WAITOK | M_ZERO);
3661 
3662 	tcfg = malloc(sizeof(struct tables_config), M_IPFW, M_WAITOK | M_ZERO);
3663 	tcfg->namehash = ipfw_objhash_create(V_fw_tables_max);
3664 	ch->tblcfg = tcfg;
3665 
3666 	ipfw_table_value_init(ch, first);
3667 	ipfw_table_algo_init(ch);
3668 
3669 	IPFW_ADD_SOPT_HANDLER(first, scodes);
3670 	return (0);
3671 }
3672 
3673 
3674 
3675