xref: /freebsd/sys/netpfil/ipfw/ip_fw_table.c (revision 64de80195bba295c961a4cdf96dbe0e4979bdf2a)
1 /*-
2  * Copyright (c) 2004 Ruslan Ermilov and Vsevolod Lobko.
3  * Copyright (c) 2014 Yandex LLC
4  * Copyright (c) 2014 Alexander V. Chernikov
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  */
27 
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
30 
31 /*
32  * Lookup table support for ipfw.
33  *
34  * This file contains handlers for all generic tables' operations:
35  * add/del/flush entries, list/dump tables etc..
36  *
37  * Table data modification is protected by both UH and runtime lock
38  * while reading configuration/data is protected by UH lock.
39  *
40  * Lookup algorithms for all table types are located in ip_fw_table_algo.c
41  */
42 
43 #include "opt_ipfw.h"
44 
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/malloc.h>
48 #include <sys/kernel.h>
49 #include <sys/lock.h>
50 #include <sys/rwlock.h>
51 #include <sys/rmlock.h>
52 #include <sys/socket.h>
53 #include <sys/socketvar.h>
54 #include <sys/queue.h>
55 #include <net/if.h>	/* ip_fw.h requires IFNAMSIZ */
56 
57 #include <netinet/in.h>
58 #include <netinet/ip_var.h>	/* struct ipfw_rule_ref */
59 #include <netinet/ip_fw.h>
60 
61 #include <netpfil/ipfw/ip_fw_private.h>
62 #include <netpfil/ipfw/ip_fw_table.h>
63 
64  /*
65  * Table has the following `type` concepts:
66  *
67  * `no.type` represents lookup key type (addr, ifp, uid, etc..)
68  * vmask represents bitmask of table values which are present at the moment.
69  * Special IPFW_VTYPE_LEGACY ( (uint32_t)-1 ) represents old
70  * single-value-for-all approach.
71  */
72 struct table_config {
73 	struct named_object	no;
74 	uint8_t		tflags;		/* type flags */
75 	uint8_t		locked;		/* 1 if locked from changes */
76 	uint8_t		linked;		/* 1 if already linked */
77 	uint8_t		ochanged;	/* used by set swapping */
78 	uint8_t		vshared;	/* 1 if using shared value array */
79 	uint8_t		spare[3];
80 	uint32_t	count;		/* Number of records */
81 	uint32_t	limit;		/* Max number of records */
82 	uint32_t	vmask;		/* bitmask with supported values */
83 	uint32_t	ocount;		/* used by set swapping */
84 	uint64_t	gencnt;		/* generation count */
85 	char		tablename[64];	/* table name */
86 	struct table_algo	*ta;	/* Callbacks for given algo */
87 	void		*astate;	/* algorithm state */
88 	struct table_info	ti_copy;	/* data to put to table_info */
89 	struct namedobj_instance	*vi;
90 };
91 
92 static struct table_config *find_table(struct namedobj_instance *ni,
93     struct tid_info *ti);
94 static struct table_config *alloc_table_config(struct ip_fw_chain *ch,
95     struct tid_info *ti, struct table_algo *ta, char *adata, uint8_t tflags);
96 static void free_table_config(struct namedobj_instance *ni,
97     struct table_config *tc);
98 static int create_table_internal(struct ip_fw_chain *ch, struct tid_info *ti,
99     char *aname, ipfw_xtable_info *i, uint16_t *pkidx, int ref);
100 static void link_table(struct ip_fw_chain *ch, struct table_config *tc);
101 static void unlink_table(struct ip_fw_chain *ch, struct table_config *tc);
102 static int find_ref_table(struct ip_fw_chain *ch, struct tid_info *ti,
103     struct tentry_info *tei, uint32_t count, int op, struct table_config **ptc);
104 #define	OP_ADD	1
105 #define	OP_DEL	0
106 static int export_tables(struct ip_fw_chain *ch, ipfw_obj_lheader *olh,
107     struct sockopt_data *sd);
108 static void export_table_info(struct ip_fw_chain *ch, struct table_config *tc,
109     ipfw_xtable_info *i);
110 static int dump_table_tentry(void *e, void *arg);
111 static int dump_table_xentry(void *e, void *arg);
112 
113 static int swap_tables(struct ip_fw_chain *ch, struct tid_info *a,
114     struct tid_info *b);
115 
116 static int check_table_space(struct ip_fw_chain *ch, struct tableop_state *ts,
117     struct table_config *tc, struct table_info *ti, uint32_t count);
118 static int destroy_table(struct ip_fw_chain *ch, struct tid_info *ti);
119 
120 static struct table_algo *find_table_algo(struct tables_config *tableconf,
121     struct tid_info *ti, char *name);
122 
123 static void objheader_to_ti(struct _ipfw_obj_header *oh, struct tid_info *ti);
124 static void ntlv_to_ti(struct _ipfw_obj_ntlv *ntlv, struct tid_info *ti);
125 static int classify_table_opcode(ipfw_insn *cmd, uint16_t *puidx, uint8_t *ptype);
126 
127 #define	CHAIN_TO_NI(chain)	(CHAIN_TO_TCFG(chain)->namehash)
128 #define	KIDX_TO_TI(ch, k)	(&(((struct table_info *)(ch)->tablestate)[k]))
129 
130 #define	TA_BUF_SZ	128	/* On-stack buffer for add/delete state */
131 
132 void
133 rollback_toperation_state(struct ip_fw_chain *ch, void *object)
134 {
135 	struct tables_config *tcfg;
136 	struct op_state *os;
137 
138 	tcfg = CHAIN_TO_TCFG(ch);
139 	TAILQ_FOREACH(os, &tcfg->state_list, next)
140 		os->func(object, os);
141 }
142 
143 void
144 add_toperation_state(struct ip_fw_chain *ch, struct tableop_state *ts)
145 {
146 	struct tables_config *tcfg;
147 
148 	tcfg = CHAIN_TO_TCFG(ch);
149 	TAILQ_INSERT_HEAD(&tcfg->state_list, &ts->opstate, next);
150 }
151 
152 void
153 del_toperation_state(struct ip_fw_chain *ch, struct tableop_state *ts)
154 {
155 	struct tables_config *tcfg;
156 
157 	tcfg = CHAIN_TO_TCFG(ch);
158 	TAILQ_REMOVE(&tcfg->state_list, &ts->opstate, next);
159 }
160 
161 void
162 tc_ref(struct table_config *tc)
163 {
164 
165 	tc->no.refcnt++;
166 }
167 
168 void
169 tc_unref(struct table_config *tc)
170 {
171 
172 	tc->no.refcnt--;
173 }
174 
175 static struct table_value *
176 get_table_value(struct ip_fw_chain *ch, struct table_config *tc, uint32_t kidx)
177 {
178 	struct table_value *pval;
179 
180 	pval = (struct table_value *)ch->valuestate;
181 
182 	return (&pval[kidx]);
183 }
184 
185 
186 /*
187  * Checks if we're able to insert/update entry @tei into table
188  * w.r.t @tc limits.
189  * May alter @tei to indicate insertion error / insert
190  * options.
191  *
192  * Returns 0 if operation can be performed/
193  */
194 static int
195 check_table_limit(struct table_config *tc, struct tentry_info *tei)
196 {
197 
198 	if (tc->limit == 0 || tc->count < tc->limit)
199 		return (0);
200 
201 	if ((tei->flags & TEI_FLAGS_UPDATE) == 0) {
202 		/* Notify userland on error cause */
203 		tei->flags |= TEI_FLAGS_LIMIT;
204 		return (EFBIG);
205 	}
206 
207 	/*
208 	 * We have UPDATE flag set.
209 	 * Permit updating record (if found),
210 	 * but restrict adding new one since we've
211 	 * already hit the limit.
212 	 */
213 	tei->flags |= TEI_FLAGS_DONTADD;
214 
215 	return (0);
216 }
217 
218 /*
219  * Convert algorithm callback return code into
220  * one of pre-defined states known by userland.
221  */
222 static void
223 store_tei_result(struct tentry_info *tei, int op, int error, uint32_t num)
224 {
225 	int flag;
226 
227 	flag = 0;
228 
229 	switch (error) {
230 	case 0:
231 		if (op == OP_ADD && num != 0)
232 			flag = TEI_FLAGS_ADDED;
233 		if (op == OP_DEL)
234 			flag = TEI_FLAGS_DELETED;
235 		break;
236 	case ENOENT:
237 		flag = TEI_FLAGS_NOTFOUND;
238 		break;
239 	case EEXIST:
240 		flag = TEI_FLAGS_EXISTS;
241 		break;
242 	default:
243 		flag = TEI_FLAGS_ERROR;
244 	}
245 
246 	tei->flags |= flag;
247 }
248 
249 /*
250  * Creates and references table with default parameters.
251  * Saves table config, algo and allocated kidx info @ptc, @pta and
252  * @pkidx if non-zero.
253  * Used for table auto-creation to support old binaries.
254  *
255  * Returns 0 on success.
256  */
257 static int
258 create_table_compat(struct ip_fw_chain *ch, struct tid_info *ti,
259     uint16_t *pkidx)
260 {
261 	ipfw_xtable_info xi;
262 	int error;
263 
264 	memset(&xi, 0, sizeof(xi));
265 	/* Set default value mask for legacy clients */
266 	xi.vmask = IPFW_VTYPE_LEGACY;
267 
268 	error = create_table_internal(ch, ti, NULL, &xi, pkidx, 1);
269 	if (error != 0)
270 		return (error);
271 
272 	return (0);
273 }
274 
275 /*
276  * Find and reference existing table optionally
277  * creating new one.
278  *
279  * Saves found table config into @ptc.
280  * Note function may drop/acquire UH_WLOCK.
281  * Returns 0 if table was found/created and referenced
282  * or non-zero return code.
283  */
284 static int
285 find_ref_table(struct ip_fw_chain *ch, struct tid_info *ti,
286     struct tentry_info *tei, uint32_t count, int op,
287     struct table_config **ptc)
288 {
289 	struct namedobj_instance *ni;
290 	struct table_config *tc;
291 	uint16_t kidx;
292 	int error;
293 
294 	IPFW_UH_WLOCK_ASSERT(ch);
295 
296 	ni = CHAIN_TO_NI(ch);
297 	tc = NULL;
298 	if ((tc = find_table(ni, ti)) != NULL) {
299 		/* check table type */
300 		if (tc->no.type != ti->type)
301 			return (EINVAL);
302 
303 		if (tc->locked != 0)
304 			return (EACCES);
305 
306 		/* Try to exit early on limit hit */
307 		if (op == OP_ADD && count == 1 &&
308 		    check_table_limit(tc, tei) != 0)
309 			return (EFBIG);
310 
311 		/* Reference and return */
312 		tc->no.refcnt++;
313 		*ptc = tc;
314 		return (0);
315 	}
316 
317 	if (op == OP_DEL)
318 		return (ESRCH);
319 
320 	/* Compability mode: create new table for old clients */
321 	if ((tei->flags & TEI_FLAGS_COMPAT) == 0)
322 		return (ESRCH);
323 
324 	IPFW_UH_WUNLOCK(ch);
325 	error = create_table_compat(ch, ti, &kidx);
326 	IPFW_UH_WLOCK(ch);
327 
328 	if (error != 0)
329 		return (error);
330 
331 	tc = (struct table_config *)ipfw_objhash_lookup_kidx(ni, kidx);
332 	KASSERT(tc != NULL, ("create_table_compat returned bad idx %d", kidx));
333 
334 	/* OK, now we've got referenced table. */
335 	*ptc = tc;
336 	return (0);
337 }
338 
339 /*
340  * Rolls back already @added to @tc entries using state array @ta_buf_m.
341  * Assume the following layout:
342  * 1) ADD state (ta_buf_m[0] ... t_buf_m[added - 1]) for handling update cases
343  * 2) DEL state (ta_buf_m[count[ ... t_buf_m[count + added - 1])
344  *   for storing deleted state
345  */
346 static void
347 rollback_added_entries(struct ip_fw_chain *ch, struct table_config *tc,
348     struct table_info *tinfo, struct tentry_info *tei, caddr_t ta_buf_m,
349     uint32_t count, uint32_t added)
350 {
351 	struct table_algo *ta;
352 	struct tentry_info *ptei;
353 	caddr_t v, vv;
354 	size_t ta_buf_sz;
355 	int error, i;
356 	uint32_t num;
357 
358 	IPFW_UH_WLOCK_ASSERT(ch);
359 
360 	ta = tc->ta;
361 	ta_buf_sz = ta->ta_buf_size;
362 	v = ta_buf_m;
363 	vv = v + count * ta_buf_sz;
364 	for (i = 0; i < added; i++, v += ta_buf_sz, vv += ta_buf_sz) {
365 		ptei = &tei[i];
366 		if ((ptei->flags & TEI_FLAGS_UPDATED) != 0) {
367 
368 			/*
369 			 * We have old value stored by previous
370 			 * call in @ptei->value. Do add once again
371 			 * to restore it.
372 			 */
373 			error = ta->add(tc->astate, tinfo, ptei, v, &num);
374 			KASSERT(error == 0, ("rollback UPDATE fail"));
375 			KASSERT(num == 0, ("rollback UPDATE fail2"));
376 			continue;
377 		}
378 
379 		error = ta->prepare_del(ch, ptei, vv);
380 		KASSERT(error == 0, ("pre-rollback INSERT failed"));
381 		error = ta->del(tc->astate, tinfo, ptei, vv, &num);
382 		KASSERT(error == 0, ("rollback INSERT failed"));
383 		tc->count -= num;
384 	}
385 }
386 
387 /*
388  * Prepares add/del state for all @count entries in @tei.
389  * Uses either stack buffer (@ta_buf) or allocates a new one.
390  * Stores pointer to allocated buffer back to @ta_buf.
391  *
392  * Returns 0 on success.
393  */
394 static int
395 prepare_batch_buffer(struct ip_fw_chain *ch, struct table_algo *ta,
396     struct tentry_info *tei, uint32_t count, int op, caddr_t *ta_buf)
397 {
398 	caddr_t ta_buf_m, v;
399 	size_t ta_buf_sz, sz;
400 	struct tentry_info *ptei;
401 	int error, i;
402 
403 	error = 0;
404 	ta_buf_sz = ta->ta_buf_size;
405 	if (count == 1) {
406 		/* Sigle add/delete, use on-stack buffer */
407 		memset(*ta_buf, 0, TA_BUF_SZ);
408 		ta_buf_m = *ta_buf;
409 	} else {
410 
411 		/*
412 		 * Multiple adds/deletes, allocate larger buffer
413 		 *
414 		 * Note we need 2xcount buffer for add case:
415 		 * we have hold both ADD state
416 		 * and DELETE state (this may be needed
417 		 * if we need to rollback all changes)
418 		 */
419 		sz = count * ta_buf_sz;
420 		ta_buf_m = malloc((op == OP_ADD) ? sz * 2 : sz, M_TEMP,
421 		    M_WAITOK | M_ZERO);
422 	}
423 
424 	v = ta_buf_m;
425 	for (i = 0; i < count; i++, v += ta_buf_sz) {
426 		ptei = &tei[i];
427 		error = (op == OP_ADD) ?
428 		    ta->prepare_add(ch, ptei, v) : ta->prepare_del(ch, ptei, v);
429 
430 		/*
431 		 * Some syntax error (incorrect mask, or address, or
432 		 * anything). Return error regardless of atomicity
433 		 * settings.
434 		 */
435 		if (error != 0)
436 			break;
437 	}
438 
439 	*ta_buf = ta_buf_m;
440 	return (error);
441 }
442 
443 /*
444  * Flushes allocated state for each @count entries in @tei.
445  * Frees @ta_buf_m if differs from stack buffer @ta_buf.
446  */
447 static void
448 flush_batch_buffer(struct ip_fw_chain *ch, struct table_algo *ta,
449     struct tentry_info *tei, uint32_t count, int rollback,
450     caddr_t ta_buf_m, caddr_t ta_buf)
451 {
452 	caddr_t v;
453 	struct tentry_info *ptei;
454 	size_t ta_buf_sz;
455 	int i;
456 
457 	ta_buf_sz = ta->ta_buf_size;
458 
459 	/* Run cleaning callback anyway */
460 	v = ta_buf_m;
461 	for (i = 0; i < count; i++, v += ta_buf_sz) {
462 		ptei = &tei[i];
463 		ta->flush_entry(ch, ptei, v);
464 		if (ptei->ptv != NULL) {
465 			free(ptei->ptv, M_IPFW);
466 			ptei->ptv = NULL;
467 		}
468 	}
469 
470 	/* Clean up "deleted" state in case of rollback */
471 	if (rollback != 0) {
472 		v = ta_buf_m + count * ta_buf_sz;
473 		for (i = 0; i < count; i++, v += ta_buf_sz)
474 			ta->flush_entry(ch, &tei[i], v);
475 	}
476 
477 	if (ta_buf_m != ta_buf)
478 		free(ta_buf_m, M_TEMP);
479 }
480 
481 
482 static void
483 rollback_add_entry(void *object, struct op_state *_state)
484 {
485 	struct ip_fw_chain *ch;
486 	struct tableop_state *ts;
487 
488 	ts = (struct tableop_state *)_state;
489 
490 	if (ts->tc != object && ts->ch != object)
491 		return;
492 
493 	ch = ts->ch;
494 
495 	IPFW_UH_WLOCK_ASSERT(ch);
496 
497 	/* Call specifid unlockers */
498 	rollback_table_values(ts);
499 
500 	/* Indicate we've called */
501 	ts->modified = 1;
502 }
503 
504 /*
505  * Adds/updates one or more entries in table @ti.
506  *
507  * Function may drop/reacquire UH wlock multiple times due to
508  * items alloc, algorithm callbacks (check_space), value linkage
509  * (new values, value storage realloc), etc..
510  * Other processes like other adds (which may involve storage resize),
511  * table swaps (which changes table data and may change algo type),
512  * table modify (which may change value mask) may be executed
513  * simultaneously so we need to deal with it.
514  *
515  * The following approach was implemented:
516  * we have per-chain linked list, protected with UH lock.
517  * add_table_entry prepares special on-stack structure wthich is passed
518  * to its descendants. Users add this structure to this list before unlock.
519  * After performing needed operations and acquiring UH lock back, each user
520  * checks if structure has changed. If true, it rolls local state back and
521  * returns without error to the caller.
522  * add_table_entry() on its own checks if structure has changed and restarts
523  * its operation from the beginning (goto restart).
524  *
525  * Functions which are modifying fields of interest (currently
526  *   resize_shared_value_storage() and swap_tables() )
527  * traverses given list while holding UH lock immediately before
528  * performing their operations calling function provided be list entry
529  * ( currently rollback_add_entry  ) which performs rollback for all necessary
530  * state and sets appropriate values in structure indicating rollback
531  * has happened.
532  *
533  * Algo interaction:
534  * Function references @ti first to ensure table won't
535  * disappear or change its type.
536  * After that, prepare_add callback is called for each @tei entry.
537  * Next, we try to add each entry under UH+WHLOCK
538  * using add() callback.
539  * Finally, we free all state by calling flush_entry callback
540  * for each @tei.
541  *
542  * Returns 0 on success.
543  */
544 int
545 add_table_entry(struct ip_fw_chain *ch, struct tid_info *ti,
546     struct tentry_info *tei, uint8_t flags, uint32_t count)
547 {
548 	struct table_config *tc;
549 	struct table_algo *ta;
550 	uint16_t kidx;
551 	int error, first_error, i, rollback;
552 	uint32_t num, numadd;
553 	struct tentry_info *ptei;
554 	struct tableop_state ts;
555 	char ta_buf[TA_BUF_SZ];
556 	caddr_t ta_buf_m, v;
557 
558 	memset(&ts, 0, sizeof(ts));
559 	ta = NULL;
560 	IPFW_UH_WLOCK(ch);
561 
562 	/*
563 	 * Find and reference existing table.
564 	 */
565 restart:
566 	if (ts.modified != 0) {
567 		IPFW_UH_WUNLOCK(ch);
568 		flush_batch_buffer(ch, ta, tei, count, rollback,
569 		    ta_buf_m, ta_buf);
570 		memset(&ts, 0, sizeof(ts));
571 		ta = NULL;
572 		IPFW_UH_WLOCK(ch);
573 	}
574 
575 	error = find_ref_table(ch, ti, tei, count, OP_ADD, &tc);
576 	if (error != 0) {
577 		IPFW_UH_WUNLOCK(ch);
578 		return (error);
579 	}
580 	ta = tc->ta;
581 
582 	/* Fill in tablestate */
583 	ts.ch = ch;
584 	ts.opstate.func = rollback_add_entry;
585 	ts.tc = tc;
586 	ts.vshared = tc->vshared;
587 	ts.vmask = tc->vmask;
588 	ts.ta = ta;
589 	ts.tei = tei;
590 	ts.count = count;
591 	rollback = 0;
592 	add_toperation_state(ch, &ts);
593 	IPFW_UH_WUNLOCK(ch);
594 
595 	/* Allocate memory and prepare record(s) */
596 	/* Pass stack buffer by default */
597 	ta_buf_m = ta_buf;
598 	error = prepare_batch_buffer(ch, ta, tei, count, OP_ADD, &ta_buf_m);
599 	if (error != 0)
600 		goto cleanup;
601 
602 	IPFW_UH_WLOCK(ch);
603 	/* Drop reference we've used in first search */
604 	tc->no.refcnt--;
605 
606 	/*
607 	 * Check if table swap has happened.
608 	 * (so table algo might be changed).
609 	 * Restart operation to achieve consistent behavior.
610 	 */
611 	del_toperation_state(ch, &ts);
612 	if (ts.modified != 0)
613 		goto restart;
614 
615 	/*
616 	 * Link all values values to shared/per-table value array.
617 	 *
618 	 * May release/reacquire UH_WLOCK.
619 	 */
620 	error = ipfw_link_table_values(ch, &ts);
621 	if (error != 0)
622 		goto cleanup;
623 	if (ts.modified != 0)
624 		goto restart;
625 
626 	/*
627 	 * Ensure we are able to add all entries without additional
628 	 * memory allocations. May release/reacquire UH_WLOCK.
629 	 */
630 	kidx = tc->no.kidx;
631 	error = check_table_space(ch, &ts, tc, KIDX_TO_TI(ch, kidx), count);
632 	if (error != 0)
633 		goto cleanup;
634 	if (ts.modified != 0)
635 		goto restart;
636 
637 	/* We've got valid table in @tc. Let's try to add data */
638 	kidx = tc->no.kidx;
639 	ta = tc->ta;
640 	numadd = 0;
641 	first_error = 0;
642 
643 	IPFW_WLOCK(ch);
644 
645 	v = ta_buf_m;
646 	for (i = 0; i < count; i++, v += ta->ta_buf_size) {
647 		ptei = &tei[i];
648 		num = 0;
649 		/* check limit before adding */
650 		if ((error = check_table_limit(tc, ptei)) == 0) {
651 			error = ta->add(tc->astate, KIDX_TO_TI(ch, kidx),
652 			    ptei, v, &num);
653 			/* Set status flag to inform userland */
654 			store_tei_result(ptei, OP_ADD, error, num);
655 		}
656 		if (error == 0) {
657 			/* Update number of records to ease limit checking */
658 			tc->count += num;
659 			numadd += num;
660 			continue;
661 		}
662 
663 		if (first_error == 0)
664 			first_error = error;
665 
666 		/*
667 		 * Some error have happened. Check our atomicity
668 		 * settings: continue if atomicity is not required,
669 		 * rollback changes otherwise.
670 		 */
671 		if ((flags & IPFW_CTF_ATOMIC) == 0)
672 			continue;
673 
674 		rollback_added_entries(ch, tc, KIDX_TO_TI(ch, kidx),
675 		    tei, ta_buf_m, count, i);
676 
677 		rollback = 1;
678 		break;
679 	}
680 
681 	IPFW_WUNLOCK(ch);
682 
683 	ipfw_garbage_table_values(ch, tc, tei, count, rollback);
684 
685 	/* Permit post-add algorithm grow/rehash. */
686 	if (numadd != 0)
687 		check_table_space(ch, NULL, tc, KIDX_TO_TI(ch, kidx), 0);
688 
689 	/* Return first error to user, if any */
690 	error = first_error;
691 
692 cleanup:
693 	IPFW_UH_WUNLOCK(ch);
694 
695 	flush_batch_buffer(ch, ta, tei, count, rollback, ta_buf_m, ta_buf);
696 
697 	return (error);
698 }
699 
700 /*
701  * Deletes one or more entries in table @ti.
702  *
703  * Returns 0 on success.
704  */
705 int
706 del_table_entry(struct ip_fw_chain *ch, struct tid_info *ti,
707     struct tentry_info *tei, uint8_t flags, uint32_t count)
708 {
709 	struct table_config *tc;
710 	struct table_algo *ta;
711 	struct tentry_info *ptei;
712 	uint16_t kidx;
713 	int error, first_error, i;
714 	uint32_t num, numdel;
715 	char ta_buf[TA_BUF_SZ];
716 	caddr_t ta_buf_m, v;
717 
718 	/*
719 	 * Find and reference existing table.
720 	 */
721 	IPFW_UH_WLOCK(ch);
722 	error = find_ref_table(ch, ti, tei, count, OP_DEL, &tc);
723 	if (error != 0) {
724 		IPFW_UH_WUNLOCK(ch);
725 		return (error);
726 	}
727 	ta = tc->ta;
728 	IPFW_UH_WUNLOCK(ch);
729 
730 	/* Allocate memory and prepare record(s) */
731 	/* Pass stack buffer by default */
732 	ta_buf_m = ta_buf;
733 	error = prepare_batch_buffer(ch, ta, tei, count, OP_DEL, &ta_buf_m);
734 	if (error != 0)
735 		goto cleanup;
736 
737 	IPFW_UH_WLOCK(ch);
738 
739 	/* Drop reference we've used in first search */
740 	tc->no.refcnt--;
741 
742 	/*
743 	 * Check if table algo is still the same.
744 	 * (changed ta may be the result of table swap).
745 	 */
746 	if (ta != tc->ta) {
747 		IPFW_UH_WUNLOCK(ch);
748 		error = EINVAL;
749 		goto cleanup;
750 	}
751 
752 	kidx = tc->no.kidx;
753 	numdel = 0;
754 	first_error = 0;
755 
756 	IPFW_WLOCK(ch);
757 	v = ta_buf_m;
758 	for (i = 0; i < count; i++, v += ta->ta_buf_size) {
759 		ptei = &tei[i];
760 		num = 0;
761 		error = ta->del(tc->astate, KIDX_TO_TI(ch, kidx), ptei, v,
762 		    &num);
763 		/* Save state for userland */
764 		store_tei_result(ptei, OP_DEL, error, num);
765 		if (error != 0 && first_error == 0)
766 			first_error = error;
767 		tc->count -= num;
768 		numdel += num;
769 	}
770 	IPFW_WUNLOCK(ch);
771 
772 	/* Unlink non-used values */
773 	ipfw_garbage_table_values(ch, tc, tei, count, 0);
774 
775 	if (numdel != 0) {
776 		/* Run post-del hook to permit shrinking */
777 		check_table_space(ch, NULL, tc, KIDX_TO_TI(ch, kidx), 0);
778 	}
779 
780 	IPFW_UH_WUNLOCK(ch);
781 
782 	/* Return first error to user, if any */
783 	error = first_error;
784 
785 cleanup:
786 	flush_batch_buffer(ch, ta, tei, count, 0, ta_buf_m, ta_buf);
787 
788 	return (error);
789 }
790 
791 /*
792  * Ensure that table @tc has enough space to add @count entries without
793  * need for reallocation.
794  *
795  * Callbacks order:
796  * 0) need_modify() (UH_WLOCK) - checks if @count items can be added w/o resize.
797  *
798  * 1) alloc_modify (no locks, M_WAITOK) - alloc new state based on @pflags.
799  * 2) prepare_modifyt (UH_WLOCK) - copy old data into new storage
800  * 3) modify (UH_WLOCK + WLOCK) - switch pointers
801  * 4) flush_modify (UH_WLOCK) - free state, if needed
802  *
803  * Returns 0 on success.
804  */
805 static int
806 check_table_space(struct ip_fw_chain *ch, struct tableop_state *ts,
807     struct table_config *tc, struct table_info *ti, uint32_t count)
808 {
809 	struct table_algo *ta;
810 	uint64_t pflags;
811 	char ta_buf[TA_BUF_SZ];
812 	int error;
813 
814 	IPFW_UH_WLOCK_ASSERT(ch);
815 
816 	error = 0;
817 	ta = tc->ta;
818 	if (ta->need_modify == NULL)
819 		return (0);
820 
821 	/* Acquire reference not to loose @tc between locks/unlocks */
822 	tc->no.refcnt++;
823 
824 	/*
825 	 * TODO: think about avoiding race between large add/large delete
826 	 * operation on algorithm which implements shrinking along with
827 	 * growing.
828 	 */
829 	while (true) {
830 		pflags = 0;
831 		if (ta->need_modify(tc->astate, ti, count, &pflags) == 0) {
832 			error = 0;
833 			break;
834 		}
835 
836 		/* We have to shrink/grow table */
837 		if (ts != NULL)
838 			add_toperation_state(ch, ts);
839 		IPFW_UH_WUNLOCK(ch);
840 
841 		memset(&ta_buf, 0, sizeof(ta_buf));
842 		error = ta->prepare_mod(ta_buf, &pflags);
843 
844 		IPFW_UH_WLOCK(ch);
845 		if (ts != NULL)
846 			del_toperation_state(ch, ts);
847 
848 		if (error != 0)
849 			break;
850 
851 		if (ts != NULL && ts->modified != 0) {
852 
853 			/*
854 			 * Swap operation has happened
855 			 * so we're currently operating on other
856 			 * table data. Stop doing this.
857 			 */
858 			ta->flush_mod(ta_buf);
859 			break;
860 		}
861 
862 		/* Check if we still need to alter table */
863 		ti = KIDX_TO_TI(ch, tc->no.kidx);
864 		if (ta->need_modify(tc->astate, ti, count, &pflags) == 0) {
865 			IPFW_UH_WUNLOCK(ch);
866 
867 			/*
868 			 * Other thread has already performed resize.
869 			 * Flush our state and return.
870 			 */
871 			ta->flush_mod(ta_buf);
872 			break;
873 		}
874 
875 		error = ta->fill_mod(tc->astate, ti, ta_buf, &pflags);
876 		if (error == 0) {
877 			/* Do actual modification */
878 			IPFW_WLOCK(ch);
879 			ta->modify(tc->astate, ti, ta_buf, pflags);
880 			IPFW_WUNLOCK(ch);
881 		}
882 
883 		/* Anyway, flush data and retry */
884 		ta->flush_mod(ta_buf);
885 	}
886 
887 	tc->no.refcnt--;
888 	return (error);
889 }
890 
891 /*
892  * Adds or deletes record in table.
893  * Data layout (v0):
894  * Request: [ ip_fw3_opheader ipfw_table_xentry ]
895  *
896  * Returns 0 on success
897  */
898 static int
899 manage_table_ent_v0(struct ip_fw_chain *ch, ip_fw3_opheader *op3,
900     struct sockopt_data *sd)
901 {
902 	ipfw_table_xentry *xent;
903 	struct tentry_info tei;
904 	struct tid_info ti;
905 	struct table_value v;
906 	int error, hdrlen, read;
907 
908 	hdrlen = offsetof(ipfw_table_xentry, k);
909 
910 	/* Check minimum header size */
911 	if (sd->valsize < (sizeof(*op3) + hdrlen))
912 		return (EINVAL);
913 
914 	read = sizeof(ip_fw3_opheader);
915 
916 	/* Check if xentry len field is valid */
917 	xent = (ipfw_table_xentry *)(op3 + 1);
918 	if (xent->len < hdrlen || xent->len + read > sd->valsize)
919 		return (EINVAL);
920 
921 	memset(&tei, 0, sizeof(tei));
922 	tei.paddr = &xent->k;
923 	tei.masklen = xent->masklen;
924 	ipfw_import_table_value_legacy(xent->value, &v);
925 	tei.pvalue = &v;
926 	/* Old requests compability */
927 	tei.flags = TEI_FLAGS_COMPAT;
928 	if (xent->type == IPFW_TABLE_ADDR) {
929 		if (xent->len - hdrlen == sizeof(in_addr_t))
930 			tei.subtype = AF_INET;
931 		else
932 			tei.subtype = AF_INET6;
933 	}
934 
935 	memset(&ti, 0, sizeof(ti));
936 	ti.uidx = xent->tbl;
937 	ti.type = xent->type;
938 
939 	error = (op3->opcode == IP_FW_TABLE_XADD) ?
940 	    add_table_entry(ch, &ti, &tei, 0, 1) :
941 	    del_table_entry(ch, &ti, &tei, 0, 1);
942 
943 	return (error);
944 }
945 
946 /*
947  * Adds or deletes record in table.
948  * Data layout (v1)(current):
949  * Request: [ ipfw_obj_header
950  *   ipfw_obj_ctlv(IPFW_TLV_TBLENT_LIST) [ ipfw_obj_tentry x N ]
951  * ]
952  *
953  * Returns 0 on success
954  */
955 static int
956 manage_table_ent_v1(struct ip_fw_chain *ch, ip_fw3_opheader *op3,
957     struct sockopt_data *sd)
958 {
959 	ipfw_obj_tentry *tent, *ptent;
960 	ipfw_obj_ctlv *ctlv;
961 	ipfw_obj_header *oh;
962 	struct tentry_info *ptei, tei, *tei_buf;
963 	struct tid_info ti;
964 	int error, i, kidx, read;
965 
966 	/* Check minimum header size */
967 	if (sd->valsize < (sizeof(*oh) + sizeof(*ctlv)))
968 		return (EINVAL);
969 
970 	/* Check if passed data is too long */
971 	if (sd->valsize != sd->kavail)
972 		return (EINVAL);
973 
974 	oh = (ipfw_obj_header *)sd->kbuf;
975 
976 	/* Basic length checks for TLVs */
977 	if (oh->ntlv.head.length != sizeof(oh->ntlv))
978 		return (EINVAL);
979 
980 	read = sizeof(*oh);
981 
982 	ctlv = (ipfw_obj_ctlv *)(oh + 1);
983 	if (ctlv->head.length + read != sd->valsize)
984 		return (EINVAL);
985 
986 	read += sizeof(*ctlv);
987 	tent = (ipfw_obj_tentry *)(ctlv + 1);
988 	if (ctlv->count * sizeof(*tent) + read != sd->valsize)
989 		return (EINVAL);
990 
991 	if (ctlv->count == 0)
992 		return (0);
993 
994 	/*
995 	 * Mark entire buffer as "read".
996 	 * This instructs sopt api write it back
997 	 * after function return.
998 	 */
999 	ipfw_get_sopt_header(sd, sd->valsize);
1000 
1001 	/* Perform basic checks for each entry */
1002 	ptent = tent;
1003 	kidx = tent->idx;
1004 	for (i = 0; i < ctlv->count; i++, ptent++) {
1005 		if (ptent->head.length != sizeof(*ptent))
1006 			return (EINVAL);
1007 		if (ptent->idx != kidx)
1008 			return (ENOTSUP);
1009 	}
1010 
1011 	/* Convert data into kernel request objects */
1012 	objheader_to_ti(oh, &ti);
1013 	ti.type = oh->ntlv.type;
1014 	ti.uidx = kidx;
1015 
1016 	/* Use on-stack buffer for single add/del */
1017 	if (ctlv->count == 1) {
1018 		memset(&tei, 0, sizeof(tei));
1019 		tei_buf = &tei;
1020 	} else
1021 		tei_buf = malloc(ctlv->count * sizeof(tei), M_TEMP,
1022 		    M_WAITOK | M_ZERO);
1023 
1024 	ptei = tei_buf;
1025 	ptent = tent;
1026 	for (i = 0; i < ctlv->count; i++, ptent++, ptei++) {
1027 		ptei->paddr = &ptent->k;
1028 		ptei->subtype = ptent->subtype;
1029 		ptei->masklen = ptent->masklen;
1030 		if (ptent->head.flags & IPFW_TF_UPDATE)
1031 			ptei->flags |= TEI_FLAGS_UPDATE;
1032 
1033 		ipfw_import_table_value_v1(&ptent->v.value);
1034 		ptei->pvalue = (struct table_value *)&ptent->v.value;
1035 	}
1036 
1037 	error = (oh->opheader.opcode == IP_FW_TABLE_XADD) ?
1038 	    add_table_entry(ch, &ti, tei_buf, ctlv->flags, ctlv->count) :
1039 	    del_table_entry(ch, &ti, tei_buf, ctlv->flags, ctlv->count);
1040 
1041 	/* Translate result back to userland */
1042 	ptei = tei_buf;
1043 	ptent = tent;
1044 	for (i = 0; i < ctlv->count; i++, ptent++, ptei++) {
1045 		if (ptei->flags & TEI_FLAGS_ADDED)
1046 			ptent->result = IPFW_TR_ADDED;
1047 		else if (ptei->flags & TEI_FLAGS_DELETED)
1048 			ptent->result = IPFW_TR_DELETED;
1049 		else if (ptei->flags & TEI_FLAGS_UPDATED)
1050 			ptent->result = IPFW_TR_UPDATED;
1051 		else if (ptei->flags & TEI_FLAGS_LIMIT)
1052 			ptent->result = IPFW_TR_LIMIT;
1053 		else if (ptei->flags & TEI_FLAGS_ERROR)
1054 			ptent->result = IPFW_TR_ERROR;
1055 		else if (ptei->flags & TEI_FLAGS_NOTFOUND)
1056 			ptent->result = IPFW_TR_NOTFOUND;
1057 		else if (ptei->flags & TEI_FLAGS_EXISTS)
1058 			ptent->result = IPFW_TR_EXISTS;
1059 		ipfw_export_table_value_v1(ptei->pvalue, &ptent->v.value);
1060 	}
1061 
1062 	if (tei_buf != &tei)
1063 		free(tei_buf, M_TEMP);
1064 
1065 	return (error);
1066 }
1067 
1068 /*
1069  * Looks up an entry in given table.
1070  * Data layout (v0)(current):
1071  * Request: [ ipfw_obj_header ipfw_obj_tentry ]
1072  * Reply: [ ipfw_obj_header ipfw_obj_tentry ]
1073  *
1074  * Returns 0 on success
1075  */
1076 static int
1077 find_table_entry(struct ip_fw_chain *ch, ip_fw3_opheader *op3,
1078     struct sockopt_data *sd)
1079 {
1080 	ipfw_obj_tentry *tent;
1081 	ipfw_obj_header *oh;
1082 	struct tid_info ti;
1083 	struct table_config *tc;
1084 	struct table_algo *ta;
1085 	struct table_info *kti;
1086 	struct namedobj_instance *ni;
1087 	int error;
1088 	size_t sz;
1089 
1090 	/* Check minimum header size */
1091 	sz = sizeof(*oh) + sizeof(*tent);
1092 	if (sd->valsize != sz)
1093 		return (EINVAL);
1094 
1095 	oh = (struct _ipfw_obj_header *)ipfw_get_sopt_header(sd, sz);
1096 	tent = (ipfw_obj_tentry *)(oh + 1);
1097 
1098 	/* Basic length checks for TLVs */
1099 	if (oh->ntlv.head.length != sizeof(oh->ntlv))
1100 		return (EINVAL);
1101 
1102 	objheader_to_ti(oh, &ti);
1103 	ti.type = oh->ntlv.type;
1104 	ti.uidx = tent->idx;
1105 
1106 	IPFW_UH_RLOCK(ch);
1107 	ni = CHAIN_TO_NI(ch);
1108 
1109 	/*
1110 	 * Find existing table and check its type .
1111 	 */
1112 	ta = NULL;
1113 	if ((tc = find_table(ni, &ti)) == NULL) {
1114 		IPFW_UH_RUNLOCK(ch);
1115 		return (ESRCH);
1116 	}
1117 
1118 	/* check table type */
1119 	if (tc->no.type != ti.type) {
1120 		IPFW_UH_RUNLOCK(ch);
1121 		return (EINVAL);
1122 	}
1123 
1124 	kti = KIDX_TO_TI(ch, tc->no.kidx);
1125 	ta = tc->ta;
1126 
1127 	if (ta->find_tentry == NULL)
1128 		return (ENOTSUP);
1129 
1130 	error = ta->find_tentry(tc->astate, kti, tent);
1131 
1132 	IPFW_UH_RUNLOCK(ch);
1133 
1134 	return (error);
1135 }
1136 
1137 /*
1138  * Flushes all entries or destroys given table.
1139  * Data layout (v0)(current):
1140  * Request: [ ipfw_obj_header ]
1141  *
1142  * Returns 0 on success
1143  */
1144 static int
1145 flush_table_v0(struct ip_fw_chain *ch, ip_fw3_opheader *op3,
1146     struct sockopt_data *sd)
1147 {
1148 	int error;
1149 	struct _ipfw_obj_header *oh;
1150 	struct tid_info ti;
1151 
1152 	if (sd->valsize != sizeof(*oh))
1153 		return (EINVAL);
1154 
1155 	oh = (struct _ipfw_obj_header *)op3;
1156 	objheader_to_ti(oh, &ti);
1157 
1158 	if (op3->opcode == IP_FW_TABLE_XDESTROY)
1159 		error = destroy_table(ch, &ti);
1160 	else if (op3->opcode == IP_FW_TABLE_XFLUSH)
1161 		error = flush_table(ch, &ti);
1162 	else
1163 		return (ENOTSUP);
1164 
1165 	return (error);
1166 }
1167 
1168 static void
1169 restart_flush(void *object, struct op_state *_state)
1170 {
1171 	struct tableop_state *ts;
1172 
1173 	ts = (struct tableop_state *)_state;
1174 
1175 	if (ts->tc != object)
1176 		return;
1177 
1178 	/* Indicate we've called */
1179 	ts->modified = 1;
1180 }
1181 
1182 /*
1183  * Flushes given table.
1184  *
1185  * Function create new table instance with the same
1186  * parameters, swaps it with old one and
1187  * flushes state without holding runtime WLOCK.
1188  *
1189  * Returns 0 on success.
1190  */
1191 int
1192 flush_table(struct ip_fw_chain *ch, struct tid_info *ti)
1193 {
1194 	struct namedobj_instance *ni;
1195 	struct table_config *tc;
1196 	struct table_algo *ta;
1197 	struct table_info ti_old, ti_new, *tablestate;
1198 	void *astate_old, *astate_new;
1199 	char algostate[64], *pstate;
1200 	struct tableop_state ts;
1201 	int error, need_gc;
1202 	uint16_t kidx;
1203 	uint8_t tflags;
1204 
1205 	/*
1206 	 * Stage 1: save table algoritm.
1207 	 * Reference found table to ensure it won't disappear.
1208 	 */
1209 	IPFW_UH_WLOCK(ch);
1210 	ni = CHAIN_TO_NI(ch);
1211 	if ((tc = find_table(ni, ti)) == NULL) {
1212 		IPFW_UH_WUNLOCK(ch);
1213 		return (ESRCH);
1214 	}
1215 	need_gc = 0;
1216 	astate_new = NULL;
1217 	memset(&ti_new, 0, sizeof(ti_new));
1218 restart:
1219 	/* Set up swap handler */
1220 	memset(&ts, 0, sizeof(ts));
1221 	ts.opstate.func = restart_flush;
1222 	ts.tc = tc;
1223 
1224 	ta = tc->ta;
1225 	/* Do not flush readonly tables */
1226 	if ((ta->flags & TA_FLAG_READONLY) != 0) {
1227 		IPFW_UH_WUNLOCK(ch);
1228 		return (EACCES);
1229 	}
1230 	/* Save startup algo parameters */
1231 	if (ta->print_config != NULL) {
1232 		ta->print_config(tc->astate, KIDX_TO_TI(ch, tc->no.kidx),
1233 		    algostate, sizeof(algostate));
1234 		pstate = algostate;
1235 	} else
1236 		pstate = NULL;
1237 	tflags = tc->tflags;
1238 	tc->no.refcnt++;
1239 	add_toperation_state(ch, &ts);
1240 	IPFW_UH_WUNLOCK(ch);
1241 
1242 	/*
1243 	 * Stage 1.5: if this is not the first attempt, destroy previous state
1244 	 */
1245 	if (need_gc != 0) {
1246 		ta->destroy(astate_new, &ti_new);
1247 		need_gc = 0;
1248 	}
1249 
1250 	/*
1251 	 * Stage 2: allocate new table instance using same algo.
1252 	 */
1253 	memset(&ti_new, 0, sizeof(struct table_info));
1254 	error = ta->init(ch, &astate_new, &ti_new, pstate, tflags);
1255 
1256 	/*
1257 	 * Stage 3: swap old state pointers with newly-allocated ones.
1258 	 * Decrease refcount.
1259 	 */
1260 	IPFW_UH_WLOCK(ch);
1261 	tc->no.refcnt--;
1262 	del_toperation_state(ch, &ts);
1263 
1264 	if (error != 0) {
1265 		IPFW_UH_WUNLOCK(ch);
1266 		return (error);
1267 	}
1268 
1269 	/*
1270 	 * Restart operation if table swap has happened:
1271 	 * even if algo may be the same, algo init parameters
1272 	 * may change. Restart operation instead of doing
1273 	 * complex checks.
1274 	 */
1275 	if (ts.modified != 0) {
1276 		/* Delay destroying data since we're holding UH lock */
1277 		need_gc = 1;
1278 		goto restart;
1279 	}
1280 
1281 	ni = CHAIN_TO_NI(ch);
1282 	kidx = tc->no.kidx;
1283 	tablestate = (struct table_info *)ch->tablestate;
1284 
1285 	IPFW_WLOCK(ch);
1286 	ti_old = tablestate[kidx];
1287 	tablestate[kidx] = ti_new;
1288 	IPFW_WUNLOCK(ch);
1289 
1290 	astate_old = tc->astate;
1291 	tc->astate = astate_new;
1292 	tc->ti_copy = ti_new;
1293 	tc->count = 0;
1294 
1295 	/* Notify algo on real @ti address */
1296 	if (ta->change_ti != NULL)
1297 		ta->change_ti(tc->astate, &tablestate[kidx]);
1298 
1299 	/*
1300 	 * Stage 4: unref values.
1301 	 */
1302 	ipfw_unref_table_values(ch, tc, ta, astate_old, &ti_old);
1303 	IPFW_UH_WUNLOCK(ch);
1304 
1305 	/*
1306 	 * Stage 5: perform real flush/destroy.
1307 	 */
1308 	ta->destroy(astate_old, &ti_old);
1309 
1310 	return (0);
1311 }
1312 
1313 /*
1314  * Swaps two tables.
1315  * Data layout (v0)(current):
1316  * Request: [ ipfw_obj_header ipfw_obj_ntlv ]
1317  *
1318  * Returns 0 on success
1319  */
1320 static int
1321 swap_table(struct ip_fw_chain *ch, ip_fw3_opheader *op3,
1322     struct sockopt_data *sd)
1323 {
1324 	int error;
1325 	struct _ipfw_obj_header *oh;
1326 	struct tid_info ti_a, ti_b;
1327 
1328 	if (sd->valsize != sizeof(*oh) + sizeof(ipfw_obj_ntlv))
1329 		return (EINVAL);
1330 
1331 	oh = (struct _ipfw_obj_header *)op3;
1332 	ntlv_to_ti(&oh->ntlv, &ti_a);
1333 	ntlv_to_ti((ipfw_obj_ntlv *)(oh + 1), &ti_b);
1334 
1335 	error = swap_tables(ch, &ti_a, &ti_b);
1336 
1337 	return (error);
1338 }
1339 
1340 /*
1341  * Swaps two tables of the same type/valtype.
1342  *
1343  * Checks if tables are compatible and limits
1344  * permits swap, than actually perform swap.
1345  *
1346  * Each table consists of 2 different parts:
1347  * config:
1348  *   @tc (with name, set, kidx) and rule bindings, which is "stable".
1349  *   number of items
1350  *   table algo
1351  * runtime:
1352  *   runtime data @ti (ch->tablestate)
1353  *   runtime cache in @tc
1354  *   algo-specific data (@tc->astate)
1355  *
1356  * So we switch:
1357  *  all runtime data
1358  *   number of items
1359  *   table algo
1360  *
1361  * After that we call @ti change handler for each table.
1362  *
1363  * Note that referencing @tc won't protect tc->ta from change.
1364  * XXX: Do we need to restrict swap between locked tables?
1365  * XXX: Do we need to exchange ftype?
1366  *
1367  * Returns 0 on success.
1368  */
1369 static int
1370 swap_tables(struct ip_fw_chain *ch, struct tid_info *a,
1371     struct tid_info *b)
1372 {
1373 	struct namedobj_instance *ni;
1374 	struct table_config *tc_a, *tc_b;
1375 	struct table_algo *ta;
1376 	struct table_info ti, *tablestate;
1377 	void *astate;
1378 	uint32_t count;
1379 
1380 	/*
1381 	 * Stage 1: find both tables and ensure they are of
1382 	 * the same type.
1383 	 */
1384 	IPFW_UH_WLOCK(ch);
1385 	ni = CHAIN_TO_NI(ch);
1386 	if ((tc_a = find_table(ni, a)) == NULL) {
1387 		IPFW_UH_WUNLOCK(ch);
1388 		return (ESRCH);
1389 	}
1390 	if ((tc_b = find_table(ni, b)) == NULL) {
1391 		IPFW_UH_WUNLOCK(ch);
1392 		return (ESRCH);
1393 	}
1394 
1395 	/* It is very easy to swap between the same table */
1396 	if (tc_a == tc_b) {
1397 		IPFW_UH_WUNLOCK(ch);
1398 		return (0);
1399 	}
1400 
1401 	/* Check type and value are the same */
1402 	if (tc_a->no.type != tc_b->no.type || tc_a->tflags != tc_b->tflags) {
1403 		IPFW_UH_WUNLOCK(ch);
1404 		return (EINVAL);
1405 	}
1406 
1407 	/* Check limits before swap */
1408 	if ((tc_a->limit != 0 && tc_b->count > tc_a->limit) ||
1409 	    (tc_b->limit != 0 && tc_a->count > tc_b->limit)) {
1410 		IPFW_UH_WUNLOCK(ch);
1411 		return (EFBIG);
1412 	}
1413 
1414 	/* Check if one of the tables is readonly */
1415 	if (((tc_a->ta->flags | tc_b->ta->flags) & TA_FLAG_READONLY) != 0) {
1416 		IPFW_UH_WUNLOCK(ch);
1417 		return (EACCES);
1418 	}
1419 
1420 	/* Notify we're going to swap */
1421 	rollback_toperation_state(ch, tc_a);
1422 	rollback_toperation_state(ch, tc_b);
1423 
1424 	/* Everything is fine, prepare to swap */
1425 	tablestate = (struct table_info *)ch->tablestate;
1426 	ti = tablestate[tc_a->no.kidx];
1427 	ta = tc_a->ta;
1428 	astate = tc_a->astate;
1429 	count = tc_a->count;
1430 
1431 	IPFW_WLOCK(ch);
1432 	/* a <- b */
1433 	tablestate[tc_a->no.kidx] = tablestate[tc_b->no.kidx];
1434 	tc_a->ta = tc_b->ta;
1435 	tc_a->astate = tc_b->astate;
1436 	tc_a->count = tc_b->count;
1437 	/* b <- a */
1438 	tablestate[tc_b->no.kidx] = ti;
1439 	tc_b->ta = ta;
1440 	tc_b->astate = astate;
1441 	tc_b->count = count;
1442 	IPFW_WUNLOCK(ch);
1443 
1444 	/* Ensure tc.ti copies are in sync */
1445 	tc_a->ti_copy = tablestate[tc_a->no.kidx];
1446 	tc_b->ti_copy = tablestate[tc_b->no.kidx];
1447 
1448 	/* Notify both tables on @ti change */
1449 	if (tc_a->ta->change_ti != NULL)
1450 		tc_a->ta->change_ti(tc_a->astate, &tablestate[tc_a->no.kidx]);
1451 	if (tc_b->ta->change_ti != NULL)
1452 		tc_b->ta->change_ti(tc_b->astate, &tablestate[tc_b->no.kidx]);
1453 
1454 	IPFW_UH_WUNLOCK(ch);
1455 
1456 	return (0);
1457 }
1458 
1459 /*
1460  * Destroys table specified by @ti.
1461  * Data layout (v0)(current):
1462  * Request: [ ip_fw3_opheader ]
1463  *
1464  * Returns 0 on success
1465  */
1466 static int
1467 destroy_table(struct ip_fw_chain *ch, struct tid_info *ti)
1468 {
1469 	struct namedobj_instance *ni;
1470 	struct table_config *tc;
1471 
1472 	IPFW_UH_WLOCK(ch);
1473 
1474 	ni = CHAIN_TO_NI(ch);
1475 	if ((tc = find_table(ni, ti)) == NULL) {
1476 		IPFW_UH_WUNLOCK(ch);
1477 		return (ESRCH);
1478 	}
1479 
1480 	/* Do not permit destroying referenced tables */
1481 	if (tc->no.refcnt > 0) {
1482 		IPFW_UH_WUNLOCK(ch);
1483 		return (EBUSY);
1484 	}
1485 
1486 	IPFW_WLOCK(ch);
1487 	unlink_table(ch, tc);
1488 	IPFW_WUNLOCK(ch);
1489 
1490 	/* Free obj index */
1491 	if (ipfw_objhash_free_idx(ni, tc->no.kidx) != 0)
1492 		printf("Error unlinking kidx %d from table %s\n",
1493 		    tc->no.kidx, tc->tablename);
1494 
1495 	/* Unref values used in tables while holding UH lock */
1496 	ipfw_unref_table_values(ch, tc, tc->ta, tc->astate, &tc->ti_copy);
1497 	IPFW_UH_WUNLOCK(ch);
1498 
1499 	free_table_config(ni, tc);
1500 
1501 	return (0);
1502 }
1503 
1504 static uint32_t
1505 roundup2p(uint32_t v)
1506 {
1507 
1508 	v--;
1509 	v |= v >> 1;
1510 	v |= v >> 2;
1511 	v |= v >> 4;
1512 	v |= v >> 8;
1513 	v |= v >> 16;
1514 	v++;
1515 
1516 	return (v);
1517 }
1518 
1519 /*
1520  * Grow tables index.
1521  *
1522  * Returns 0 on success.
1523  */
1524 int
1525 ipfw_resize_tables(struct ip_fw_chain *ch, unsigned int ntables)
1526 {
1527 	unsigned int ntables_old, tbl;
1528 	struct namedobj_instance *ni;
1529 	void *new_idx, *old_tablestate, *tablestate;
1530 	struct table_info *ti;
1531 	struct table_config *tc;
1532 	int i, new_blocks;
1533 
1534 	/* Check new value for validity */
1535 	if (ntables == 0)
1536 		return (EINVAL);
1537 	if (ntables > IPFW_TABLES_MAX)
1538 		ntables = IPFW_TABLES_MAX;
1539 	/* Alight to nearest power of 2 */
1540 	ntables = (unsigned int)roundup2p(ntables);
1541 
1542 	/* Allocate new pointers */
1543 	tablestate = malloc(ntables * sizeof(struct table_info),
1544 	    M_IPFW, M_WAITOK | M_ZERO);
1545 
1546 	ipfw_objhash_bitmap_alloc(ntables, (void *)&new_idx, &new_blocks);
1547 
1548 	IPFW_UH_WLOCK(ch);
1549 
1550 	tbl = (ntables >= V_fw_tables_max) ? V_fw_tables_max : ntables;
1551 	ni = CHAIN_TO_NI(ch);
1552 
1553 	/* Temporary restrict decreasing max_tables */
1554 	if (ntables < V_fw_tables_max) {
1555 
1556 		/*
1557 		 * FIXME: Check if we really can shrink
1558 		 */
1559 		IPFW_UH_WUNLOCK(ch);
1560 		return (EINVAL);
1561 	}
1562 
1563 	/* Copy table info/indices */
1564 	memcpy(tablestate, ch->tablestate, sizeof(struct table_info) * tbl);
1565 	ipfw_objhash_bitmap_merge(ni, &new_idx, &new_blocks);
1566 
1567 	IPFW_WLOCK(ch);
1568 
1569 	/* Change pointers */
1570 	old_tablestate = ch->tablestate;
1571 	ch->tablestate = tablestate;
1572 	ipfw_objhash_bitmap_swap(ni, &new_idx, &new_blocks);
1573 
1574 	ntables_old = V_fw_tables_max;
1575 	V_fw_tables_max = ntables;
1576 
1577 	IPFW_WUNLOCK(ch);
1578 
1579 	/* Notify all consumers that their @ti pointer has changed */
1580 	ti = (struct table_info *)ch->tablestate;
1581 	for (i = 0; i < tbl; i++, ti++) {
1582 		if (ti->lookup == NULL)
1583 			continue;
1584 		tc = (struct table_config *)ipfw_objhash_lookup_kidx(ni, i);
1585 		if (tc == NULL || tc->ta->change_ti == NULL)
1586 			continue;
1587 
1588 		tc->ta->change_ti(tc->astate, ti);
1589 	}
1590 
1591 	IPFW_UH_WUNLOCK(ch);
1592 
1593 	/* Free old pointers */
1594 	free(old_tablestate, M_IPFW);
1595 	ipfw_objhash_bitmap_free(new_idx, new_blocks);
1596 
1597 	return (0);
1598 }
1599 
1600 /*
1601  * Switch between "set 0" and "rule's set" table binding,
1602  * Check all ruleset bindings and permits changing
1603  * IFF each binding has both rule AND table in default set (set 0).
1604  *
1605  * Returns 0 on success.
1606  */
1607 int
1608 ipfw_switch_tables_namespace(struct ip_fw_chain *ch, unsigned int sets)
1609 {
1610 	struct namedobj_instance *ni;
1611 	struct named_object *no;
1612 	struct ip_fw *rule;
1613 	ipfw_insn *cmd;
1614 	int cmdlen, i, l;
1615 	uint16_t kidx;
1616 	uint8_t type;
1617 
1618 	IPFW_UH_WLOCK(ch);
1619 
1620 	if (V_fw_tables_sets == sets) {
1621 		IPFW_UH_WUNLOCK(ch);
1622 		return (0);
1623 	}
1624 
1625 	ni = CHAIN_TO_NI(ch);
1626 
1627 	/*
1628 	 * Scan all rules and examine tables opcodes.
1629 	 */
1630 	for (i = 0; i < ch->n_rules; i++) {
1631 		rule = ch->map[i];
1632 
1633 		l = rule->cmd_len;
1634 		cmd = rule->cmd;
1635 		cmdlen = 0;
1636 		for ( ;	l > 0 ; l -= cmdlen, cmd += cmdlen) {
1637 			cmdlen = F_LEN(cmd);
1638 
1639 			if (classify_table_opcode(cmd, &kidx, &type) != 0)
1640 				continue;
1641 
1642 			no = ipfw_objhash_lookup_kidx(ni, kidx);
1643 
1644 			/* Check if both table object and rule has the set 0 */
1645 			if (no->set != 0 || rule->set != 0) {
1646 				IPFW_UH_WUNLOCK(ch);
1647 				return (EBUSY);
1648 			}
1649 
1650 		}
1651 	}
1652 	V_fw_tables_sets = sets;
1653 
1654 	IPFW_UH_WUNLOCK(ch);
1655 
1656 	return (0);
1657 }
1658 
1659 /*
1660  * Lookup an IP @addr in table @tbl.
1661  * Stores found value in @val.
1662  *
1663  * Returns 1 if @addr was found.
1664  */
1665 int
1666 ipfw_lookup_table(struct ip_fw_chain *ch, uint16_t tbl, in_addr_t addr,
1667     uint32_t *val)
1668 {
1669 	struct table_info *ti;
1670 
1671 	ti = KIDX_TO_TI(ch, tbl);
1672 
1673 	return (ti->lookup(ti, &addr, sizeof(in_addr_t), val));
1674 }
1675 
1676 /*
1677  * Lookup an arbtrary key @paddr of legth @plen in table @tbl.
1678  * Stores found value in @val.
1679  *
1680  * Returns 1 if key was found.
1681  */
1682 int
1683 ipfw_lookup_table_extended(struct ip_fw_chain *ch, uint16_t tbl, uint16_t plen,
1684     void *paddr, uint32_t *val)
1685 {
1686 	struct table_info *ti;
1687 
1688 	ti = KIDX_TO_TI(ch, tbl);
1689 
1690 	return (ti->lookup(ti, paddr, plen, val));
1691 }
1692 
1693 /*
1694  * Info/List/dump support for tables.
1695  *
1696  */
1697 
1698 /*
1699  * High-level 'get' cmds sysctl handlers
1700  */
1701 
1702 /*
1703  * Lists all tables currently available in kernel.
1704  * Data layout (v0)(current):
1705  * Request: [ ipfw_obj_lheader ], size = ipfw_obj_lheader.size
1706  * Reply: [ ipfw_obj_lheader ipfw_xtable_info x N ]
1707  *
1708  * Returns 0 on success
1709  */
1710 static int
1711 list_tables(struct ip_fw_chain *ch, ip_fw3_opheader *op3,
1712     struct sockopt_data *sd)
1713 {
1714 	struct _ipfw_obj_lheader *olh;
1715 	int error;
1716 
1717 	olh = (struct _ipfw_obj_lheader *)ipfw_get_sopt_header(sd,sizeof(*olh));
1718 	if (olh == NULL)
1719 		return (EINVAL);
1720 	if (sd->valsize < olh->size)
1721 		return (EINVAL);
1722 
1723 	IPFW_UH_RLOCK(ch);
1724 	error = export_tables(ch, olh, sd);
1725 	IPFW_UH_RUNLOCK(ch);
1726 
1727 	return (error);
1728 }
1729 
1730 /*
1731  * Store table info to buffer provided by @sd.
1732  * Data layout (v0)(current):
1733  * Request: [ ipfw_obj_header ipfw_xtable_info(empty)]
1734  * Reply: [ ipfw_obj_header ipfw_xtable_info ]
1735  *
1736  * Returns 0 on success.
1737  */
1738 static int
1739 describe_table(struct ip_fw_chain *ch, ip_fw3_opheader *op3,
1740     struct sockopt_data *sd)
1741 {
1742 	struct _ipfw_obj_header *oh;
1743 	struct table_config *tc;
1744 	struct tid_info ti;
1745 	size_t sz;
1746 
1747 	sz = sizeof(*oh) + sizeof(ipfw_xtable_info);
1748 	oh = (struct _ipfw_obj_header *)ipfw_get_sopt_header(sd, sz);
1749 	if (oh == NULL)
1750 		return (EINVAL);
1751 
1752 	objheader_to_ti(oh, &ti);
1753 
1754 	IPFW_UH_RLOCK(ch);
1755 	if ((tc = find_table(CHAIN_TO_NI(ch), &ti)) == NULL) {
1756 		IPFW_UH_RUNLOCK(ch);
1757 		return (ESRCH);
1758 	}
1759 
1760 	export_table_info(ch, tc, (ipfw_xtable_info *)(oh + 1));
1761 	IPFW_UH_RUNLOCK(ch);
1762 
1763 	return (0);
1764 }
1765 
1766 /*
1767  * Modifies existing table.
1768  * Data layout (v0)(current):
1769  * Request: [ ipfw_obj_header ipfw_xtable_info ]
1770  *
1771  * Returns 0 on success
1772  */
1773 static int
1774 modify_table(struct ip_fw_chain *ch, ip_fw3_opheader *op3,
1775     struct sockopt_data *sd)
1776 {
1777 	struct _ipfw_obj_header *oh;
1778 	ipfw_xtable_info *i;
1779 	char *tname;
1780 	struct tid_info ti;
1781 	struct namedobj_instance *ni;
1782 	struct table_config *tc;
1783 
1784 	if (sd->valsize != sizeof(*oh) + sizeof(ipfw_xtable_info))
1785 		return (EINVAL);
1786 
1787 	oh = (struct _ipfw_obj_header *)sd->kbuf;
1788 	i = (ipfw_xtable_info *)(oh + 1);
1789 
1790 	/*
1791 	 * Verify user-supplied strings.
1792 	 * Check for null-terminated/zero-length strings/
1793 	 */
1794 	tname = oh->ntlv.name;
1795 	if (ipfw_check_table_name(tname) != 0)
1796 		return (EINVAL);
1797 
1798 	objheader_to_ti(oh, &ti);
1799 	ti.type = i->type;
1800 
1801 	IPFW_UH_WLOCK(ch);
1802 	ni = CHAIN_TO_NI(ch);
1803 	if ((tc = find_table(ni, &ti)) == NULL) {
1804 		IPFW_UH_WUNLOCK(ch);
1805 		return (ESRCH);
1806 	}
1807 
1808 	/* Do not support any modifications for readonly tables */
1809 	if ((tc->ta->flags & TA_FLAG_READONLY) != 0) {
1810 		IPFW_UH_WUNLOCK(ch);
1811 		return (EACCES);
1812 	}
1813 
1814 	if ((i->mflags & IPFW_TMFLAGS_LIMIT) != 0)
1815 		tc->limit = i->limit;
1816 	if ((i->mflags & IPFW_TMFLAGS_LOCK) != 0)
1817 		tc->locked = ((i->flags & IPFW_TGFLAGS_LOCKED) != 0);
1818 	IPFW_UH_WUNLOCK(ch);
1819 
1820 	return (0);
1821 }
1822 
1823 /*
1824  * Creates new table.
1825  * Data layout (v0)(current):
1826  * Request: [ ipfw_obj_header ipfw_xtable_info ]
1827  *
1828  * Returns 0 on success
1829  */
1830 static int
1831 create_table(struct ip_fw_chain *ch, ip_fw3_opheader *op3,
1832     struct sockopt_data *sd)
1833 {
1834 	struct _ipfw_obj_header *oh;
1835 	ipfw_xtable_info *i;
1836 	char *tname, *aname;
1837 	struct tid_info ti;
1838 	struct namedobj_instance *ni;
1839 
1840 	if (sd->valsize != sizeof(*oh) + sizeof(ipfw_xtable_info))
1841 		return (EINVAL);
1842 
1843 	oh = (struct _ipfw_obj_header *)sd->kbuf;
1844 	i = (ipfw_xtable_info *)(oh + 1);
1845 
1846 	/*
1847 	 * Verify user-supplied strings.
1848 	 * Check for null-terminated/zero-length strings/
1849 	 */
1850 	tname = oh->ntlv.name;
1851 	aname = i->algoname;
1852 	if (ipfw_check_table_name(tname) != 0 ||
1853 	    strnlen(aname, sizeof(i->algoname)) == sizeof(i->algoname))
1854 		return (EINVAL);
1855 
1856 	if (aname[0] == '\0') {
1857 		/* Use default algorithm */
1858 		aname = NULL;
1859 	}
1860 
1861 	objheader_to_ti(oh, &ti);
1862 	ti.type = i->type;
1863 
1864 	ni = CHAIN_TO_NI(ch);
1865 
1866 	IPFW_UH_RLOCK(ch);
1867 	if (find_table(ni, &ti) != NULL) {
1868 		IPFW_UH_RUNLOCK(ch);
1869 		return (EEXIST);
1870 	}
1871 	IPFW_UH_RUNLOCK(ch);
1872 
1873 	return (create_table_internal(ch, &ti, aname, i, NULL, 0));
1874 }
1875 
1876 /*
1877  * Creates new table based on @ti and @aname.
1878  *
1879  * Relies on table name checking inside find_name_tlv()
1880  * Assume @aname to be checked and valid.
1881  * Stores allocated table kidx inside @pkidx (if non-NULL).
1882  * Reference created table if @compat is non-zero.
1883  *
1884  * Returns 0 on success.
1885  */
1886 static int
1887 create_table_internal(struct ip_fw_chain *ch, struct tid_info *ti,
1888     char *aname, ipfw_xtable_info *i, uint16_t *pkidx, int compat)
1889 {
1890 	struct namedobj_instance *ni;
1891 	struct table_config *tc, *tc_new, *tmp;
1892 	struct table_algo *ta;
1893 	uint16_t kidx;
1894 
1895 	ni = CHAIN_TO_NI(ch);
1896 
1897 	ta = find_table_algo(CHAIN_TO_TCFG(ch), ti, aname);
1898 	if (ta == NULL)
1899 		return (ENOTSUP);
1900 
1901 	tc = alloc_table_config(ch, ti, ta, aname, i->tflags);
1902 	if (tc == NULL)
1903 		return (ENOMEM);
1904 
1905 	tc->vmask = i->vmask;
1906 	tc->limit = i->limit;
1907 	if (ta->flags & TA_FLAG_READONLY)
1908 		tc->locked = 1;
1909 	else
1910 		tc->locked = (i->flags & IPFW_TGFLAGS_LOCKED) != 0;
1911 
1912 	IPFW_UH_WLOCK(ch);
1913 
1914 	/* Check if table has been already created */
1915 	tc_new = find_table(ni, ti);
1916 	if (tc_new != NULL) {
1917 
1918 		/*
1919 		 * Compat: do not fail if we're
1920 		 * requesting to create existing table
1921 		 * which has the same type
1922 		 */
1923 		if (compat == 0 || tc_new->no.type != tc->no.type) {
1924 			IPFW_UH_WUNLOCK(ch);
1925 			free_table_config(ni, tc);
1926 			return (EEXIST);
1927 		}
1928 
1929 		/* Exchange tc and tc_new for proper refcounting & freeing */
1930 		tmp = tc;
1931 		tc = tc_new;
1932 		tc_new = tmp;
1933 	} else {
1934 		/* New table */
1935 		if (ipfw_objhash_alloc_idx(ni, &kidx) != 0) {
1936 			IPFW_UH_WUNLOCK(ch);
1937 			printf("Unable to allocate table index."
1938 			    " Consider increasing net.inet.ip.fw.tables_max");
1939 			free_table_config(ni, tc);
1940 			return (EBUSY);
1941 		}
1942 		tc->no.kidx = kidx;
1943 
1944 		IPFW_WLOCK(ch);
1945 		link_table(ch, tc);
1946 		IPFW_WUNLOCK(ch);
1947 	}
1948 
1949 	if (compat != 0)
1950 		tc->no.refcnt++;
1951 	if (pkidx != NULL)
1952 		*pkidx = tc->no.kidx;
1953 
1954 	IPFW_UH_WUNLOCK(ch);
1955 
1956 	if (tc_new != NULL)
1957 		free_table_config(ni, tc_new);
1958 
1959 	return (0);
1960 }
1961 
1962 static void
1963 ntlv_to_ti(ipfw_obj_ntlv *ntlv, struct tid_info *ti)
1964 {
1965 
1966 	memset(ti, 0, sizeof(struct tid_info));
1967 	ti->set = ntlv->set;
1968 	ti->uidx = ntlv->idx;
1969 	ti->tlvs = ntlv;
1970 	ti->tlen = ntlv->head.length;
1971 }
1972 
1973 static void
1974 objheader_to_ti(struct _ipfw_obj_header *oh, struct tid_info *ti)
1975 {
1976 
1977 	ntlv_to_ti(&oh->ntlv, ti);
1978 }
1979 
1980 /*
1981  * Exports basic table info as name TLV.
1982  * Used inside dump_static_rules() to provide info
1983  * about all tables referenced by current ruleset.
1984  *
1985  * Returns 0 on success.
1986  */
1987 int
1988 ipfw_export_table_ntlv(struct ip_fw_chain *ch, uint16_t kidx,
1989     struct sockopt_data *sd)
1990 {
1991 	struct namedobj_instance *ni;
1992 	struct named_object *no;
1993 	ipfw_obj_ntlv *ntlv;
1994 
1995 	ni = CHAIN_TO_NI(ch);
1996 
1997 	no = ipfw_objhash_lookup_kidx(ni, kidx);
1998 	KASSERT(no != NULL, ("invalid table kidx passed"));
1999 
2000 	ntlv = (ipfw_obj_ntlv *)ipfw_get_sopt_space(sd, sizeof(*ntlv));
2001 	if (ntlv == NULL)
2002 		return (ENOMEM);
2003 
2004 	ntlv->head.type = IPFW_TLV_TBL_NAME;
2005 	ntlv->head.length = sizeof(*ntlv);
2006 	ntlv->idx = no->kidx;
2007 	strlcpy(ntlv->name, no->name, sizeof(ntlv->name));
2008 
2009 	return (0);
2010 }
2011 
2012 /*
2013  * Marks every table kidx used in @rule with bit in @bmask.
2014  * Used to generate bitmask of referenced tables for given ruleset.
2015  *
2016  * Returns number of newly-referenced tables.
2017  */
2018 int
2019 ipfw_mark_table_kidx(struct ip_fw_chain *chain, struct ip_fw *rule,
2020     uint32_t *bmask)
2021 {
2022 	int cmdlen, l, count;
2023 	ipfw_insn *cmd;
2024 	uint16_t kidx;
2025 	uint8_t type;
2026 
2027 	l = rule->cmd_len;
2028 	cmd = rule->cmd;
2029 	cmdlen = 0;
2030 	count = 0;
2031 	for ( ;	l > 0 ; l -= cmdlen, cmd += cmdlen) {
2032 		cmdlen = F_LEN(cmd);
2033 
2034 		if (classify_table_opcode(cmd, &kidx, &type) != 0)
2035 			continue;
2036 
2037 		if ((bmask[kidx / 32] & (1 << (kidx % 32))) == 0)
2038 			count++;
2039 
2040 		bmask[kidx / 32] |= 1 << (kidx % 32);
2041 	}
2042 
2043 	return (count);
2044 }
2045 
2046 struct dump_args {
2047 	struct ip_fw_chain *ch;
2048 	struct table_info *ti;
2049 	struct table_config *tc;
2050 	struct sockopt_data *sd;
2051 	uint32_t cnt;
2052 	uint16_t uidx;
2053 	int error;
2054 	uint32_t size;
2055 	ipfw_table_entry *ent;
2056 	ta_foreach_f *f;
2057 	void *farg;
2058 	ipfw_obj_tentry tent;
2059 };
2060 
2061 static int
2062 count_ext_entries(void *e, void *arg)
2063 {
2064 	struct dump_args *da;
2065 
2066 	da = (struct dump_args *)arg;
2067 	da->cnt++;
2068 
2069 	return (0);
2070 }
2071 
2072 /*
2073  * Gets number of items from table either using
2074  * internal counter or calling algo callback for
2075  * externally-managed tables.
2076  *
2077  * Returns number of records.
2078  */
2079 static uint32_t
2080 table_get_count(struct ip_fw_chain *ch, struct table_config *tc)
2081 {
2082 	struct table_info *ti;
2083 	struct table_algo *ta;
2084 	struct dump_args da;
2085 
2086 	ti = KIDX_TO_TI(ch, tc->no.kidx);
2087 	ta = tc->ta;
2088 
2089 	/* Use internal counter for self-managed tables */
2090 	if ((ta->flags & TA_FLAG_READONLY) == 0)
2091 		return (tc->count);
2092 
2093 	/* Use callback to quickly get number of items */
2094 	if ((ta->flags & TA_FLAG_EXTCOUNTER) != 0)
2095 		return (ta->get_count(tc->astate, ti));
2096 
2097 	/* Count number of iterms ourselves */
2098 	memset(&da, 0, sizeof(da));
2099 	ta->foreach(tc->astate, ti, count_ext_entries, &da);
2100 
2101 	return (da.cnt);
2102 }
2103 
2104 /*
2105  * Exports table @tc info into standard ipfw_xtable_info format.
2106  */
2107 static void
2108 export_table_info(struct ip_fw_chain *ch, struct table_config *tc,
2109     ipfw_xtable_info *i)
2110 {
2111 	struct table_info *ti;
2112 	struct table_algo *ta;
2113 
2114 	i->type = tc->no.type;
2115 	i->tflags = tc->tflags;
2116 	i->vmask = tc->vmask;
2117 	i->set = tc->no.set;
2118 	i->kidx = tc->no.kidx;
2119 	i->refcnt = tc->no.refcnt;
2120 	i->count = table_get_count(ch, tc);
2121 	i->limit = tc->limit;
2122 	i->flags |= (tc->locked != 0) ? IPFW_TGFLAGS_LOCKED : 0;
2123 	i->size = tc->count * sizeof(ipfw_obj_tentry);
2124 	i->size += sizeof(ipfw_obj_header) + sizeof(ipfw_xtable_info);
2125 	strlcpy(i->tablename, tc->tablename, sizeof(i->tablename));
2126 	ti = KIDX_TO_TI(ch, tc->no.kidx);
2127 	ta = tc->ta;
2128 	if (ta->print_config != NULL) {
2129 		/* Use algo function to print table config to string */
2130 		ta->print_config(tc->astate, ti, i->algoname,
2131 		    sizeof(i->algoname));
2132 	} else
2133 		strlcpy(i->algoname, ta->name, sizeof(i->algoname));
2134 	/* Dump algo-specific data, if possible */
2135 	if (ta->dump_tinfo != NULL) {
2136 		ta->dump_tinfo(tc->astate, ti, &i->ta_info);
2137 		i->ta_info.flags |= IPFW_TATFLAGS_DATA;
2138 	}
2139 }
2140 
2141 struct dump_table_args {
2142 	struct ip_fw_chain *ch;
2143 	struct sockopt_data *sd;
2144 };
2145 
2146 static void
2147 export_table_internal(struct namedobj_instance *ni, struct named_object *no,
2148     void *arg)
2149 {
2150 	ipfw_xtable_info *i;
2151 	struct dump_table_args *dta;
2152 
2153 	dta = (struct dump_table_args *)arg;
2154 
2155 	i = (ipfw_xtable_info *)ipfw_get_sopt_space(dta->sd, sizeof(*i));
2156 	KASSERT(i != 0, ("previously checked buffer is not enough"));
2157 
2158 	export_table_info(dta->ch, (struct table_config *)no, i);
2159 }
2160 
2161 /*
2162  * Export all tables as ipfw_xtable_info structures to
2163  * storage provided by @sd.
2164  *
2165  * If supplied buffer is too small, fills in required size
2166  * and returns ENOMEM.
2167  * Returns 0 on success.
2168  */
2169 static int
2170 export_tables(struct ip_fw_chain *ch, ipfw_obj_lheader *olh,
2171     struct sockopt_data *sd)
2172 {
2173 	uint32_t size;
2174 	uint32_t count;
2175 	struct dump_table_args dta;
2176 
2177 	count = ipfw_objhash_count(CHAIN_TO_NI(ch));
2178 	size = count * sizeof(ipfw_xtable_info) + sizeof(ipfw_obj_lheader);
2179 
2180 	/* Fill in header regadless of buffer size */
2181 	olh->count = count;
2182 	olh->objsize = sizeof(ipfw_xtable_info);
2183 
2184 	if (size > olh->size) {
2185 		olh->size = size;
2186 		return (ENOMEM);
2187 	}
2188 
2189 	olh->size = size;
2190 
2191 	dta.ch = ch;
2192 	dta.sd = sd;
2193 
2194 	ipfw_objhash_foreach(CHAIN_TO_NI(ch), export_table_internal, &dta);
2195 
2196 	return (0);
2197 }
2198 
2199 /*
2200  * Dumps all table data
2201  * Data layout (v1)(current):
2202  * Request: [ ipfw_obj_header ], size = ipfw_xtable_info.size
2203  * Reply: [ ipfw_obj_header ipfw_xtable_info ipfw_obj_tentry x N ]
2204  *
2205  * Returns 0 on success
2206  */
2207 static int
2208 dump_table_v1(struct ip_fw_chain *ch, ip_fw3_opheader *op3,
2209     struct sockopt_data *sd)
2210 {
2211 	struct _ipfw_obj_header *oh;
2212 	ipfw_xtable_info *i;
2213 	struct tid_info ti;
2214 	struct table_config *tc;
2215 	struct table_algo *ta;
2216 	struct dump_args da;
2217 	uint32_t sz;
2218 
2219 	sz = sizeof(ipfw_obj_header) + sizeof(ipfw_xtable_info);
2220 	oh = (struct _ipfw_obj_header *)ipfw_get_sopt_header(sd, sz);
2221 	if (oh == NULL)
2222 		return (EINVAL);
2223 
2224 	i = (ipfw_xtable_info *)(oh + 1);
2225 	objheader_to_ti(oh, &ti);
2226 
2227 	IPFW_UH_RLOCK(ch);
2228 	if ((tc = find_table(CHAIN_TO_NI(ch), &ti)) == NULL) {
2229 		IPFW_UH_RUNLOCK(ch);
2230 		return (ESRCH);
2231 	}
2232 	export_table_info(ch, tc, i);
2233 
2234 	if (sd->valsize < i->size) {
2235 
2236 		/*
2237 		 * Submitted buffer size is not enough.
2238 		 * WE've already filled in @i structure with
2239 		 * relevant table info including size, so we
2240 		 * can return. Buffer will be flushed automatically.
2241 		 */
2242 		IPFW_UH_RUNLOCK(ch);
2243 		return (ENOMEM);
2244 	}
2245 
2246 	/*
2247 	 * Do the actual dump in eXtended format
2248 	 */
2249 	memset(&da, 0, sizeof(da));
2250 	da.ch = ch;
2251 	da.ti = KIDX_TO_TI(ch, tc->no.kidx);
2252 	da.tc = tc;
2253 	da.sd = sd;
2254 
2255 	ta = tc->ta;
2256 
2257 	ta->foreach(tc->astate, da.ti, dump_table_tentry, &da);
2258 	IPFW_UH_RUNLOCK(ch);
2259 
2260 	return (da.error);
2261 }
2262 
2263 /*
2264  * Dumps all table data
2265  * Data layout (version 0)(legacy):
2266  * Request: [ ipfw_xtable ], size = IP_FW_TABLE_XGETSIZE()
2267  * Reply: [ ipfw_xtable ipfw_table_xentry x N ]
2268  *
2269  * Returns 0 on success
2270  */
2271 static int
2272 dump_table_v0(struct ip_fw_chain *ch, ip_fw3_opheader *op3,
2273     struct sockopt_data *sd)
2274 {
2275 	ipfw_xtable *xtbl;
2276 	struct tid_info ti;
2277 	struct table_config *tc;
2278 	struct table_algo *ta;
2279 	struct dump_args da;
2280 	size_t sz, count;
2281 
2282 	xtbl = (ipfw_xtable *)ipfw_get_sopt_header(sd, sizeof(ipfw_xtable));
2283 	if (xtbl == NULL)
2284 		return (EINVAL);
2285 
2286 	memset(&ti, 0, sizeof(ti));
2287 	ti.uidx = xtbl->tbl;
2288 
2289 	IPFW_UH_RLOCK(ch);
2290 	if ((tc = find_table(CHAIN_TO_NI(ch), &ti)) == NULL) {
2291 		IPFW_UH_RUNLOCK(ch);
2292 		return (0);
2293 	}
2294 	count = table_get_count(ch, tc);
2295 	sz = count * sizeof(ipfw_table_xentry) + sizeof(ipfw_xtable);
2296 
2297 	xtbl->cnt = count;
2298 	xtbl->size = sz;
2299 	xtbl->type = tc->no.type;
2300 	xtbl->tbl = ti.uidx;
2301 
2302 	if (sd->valsize < sz) {
2303 
2304 		/*
2305 		 * Submitted buffer size is not enough.
2306 		 * WE've already filled in @i structure with
2307 		 * relevant table info including size, so we
2308 		 * can return. Buffer will be flushed automatically.
2309 		 */
2310 		IPFW_UH_RUNLOCK(ch);
2311 		return (ENOMEM);
2312 	}
2313 
2314 	/* Do the actual dump in eXtended format */
2315 	memset(&da, 0, sizeof(da));
2316 	da.ch = ch;
2317 	da.ti = KIDX_TO_TI(ch, tc->no.kidx);
2318 	da.tc = tc;
2319 	da.sd = sd;
2320 
2321 	ta = tc->ta;
2322 
2323 	ta->foreach(tc->astate, da.ti, dump_table_xentry, &da);
2324 	IPFW_UH_RUNLOCK(ch);
2325 
2326 	return (0);
2327 }
2328 
2329 /*
2330  * Legacy function to retrieve number of items in table.
2331  */
2332 static int
2333 get_table_size(struct ip_fw_chain *ch, ip_fw3_opheader *op3,
2334     struct sockopt_data *sd)
2335 {
2336 	uint32_t *tbl;
2337 	struct tid_info ti;
2338 	size_t sz;
2339 	int error;
2340 
2341 	sz = sizeof(*op3) + sizeof(uint32_t);
2342 	op3 = (ip_fw3_opheader *)ipfw_get_sopt_header(sd, sz);
2343 	if (op3 == NULL)
2344 		return (EINVAL);
2345 
2346 	tbl = (uint32_t *)(op3 + 1);
2347 	memset(&ti, 0, sizeof(ti));
2348 	ti.uidx = *tbl;
2349 	IPFW_UH_RLOCK(ch);
2350 	error = ipfw_count_xtable(ch, &ti, tbl);
2351 	IPFW_UH_RUNLOCK(ch);
2352 	return (error);
2353 }
2354 
2355 /*
2356  * Legacy IP_FW_TABLE_GETSIZE handler
2357  */
2358 int
2359 ipfw_count_table(struct ip_fw_chain *ch, struct tid_info *ti, uint32_t *cnt)
2360 {
2361 	struct table_config *tc;
2362 
2363 	if ((tc = find_table(CHAIN_TO_NI(ch), ti)) == NULL)
2364 		return (ESRCH);
2365 	*cnt = table_get_count(ch, tc);
2366 	return (0);
2367 }
2368 
2369 /*
2370  * Legacy IP_FW_TABLE_XGETSIZE handler
2371  */
2372 int
2373 ipfw_count_xtable(struct ip_fw_chain *ch, struct tid_info *ti, uint32_t *cnt)
2374 {
2375 	struct table_config *tc;
2376 	uint32_t count;
2377 
2378 	if ((tc = find_table(CHAIN_TO_NI(ch), ti)) == NULL) {
2379 		*cnt = 0;
2380 		return (0); /* 'table all list' requires success */
2381 	}
2382 
2383 	count = table_get_count(ch, tc);
2384 	*cnt = count * sizeof(ipfw_table_xentry);
2385 	if (count > 0)
2386 		*cnt += sizeof(ipfw_xtable);
2387 	return (0);
2388 }
2389 
2390 static int
2391 dump_table_entry(void *e, void *arg)
2392 {
2393 	struct dump_args *da;
2394 	struct table_config *tc;
2395 	struct table_algo *ta;
2396 	ipfw_table_entry *ent;
2397 	struct table_value *pval;
2398 	int error;
2399 
2400 	da = (struct dump_args *)arg;
2401 
2402 	tc = da->tc;
2403 	ta = tc->ta;
2404 
2405 	/* Out of memory, returning */
2406 	if (da->cnt == da->size)
2407 		return (1);
2408 	ent = da->ent++;
2409 	ent->tbl = da->uidx;
2410 	da->cnt++;
2411 
2412 	error = ta->dump_tentry(tc->astate, da->ti, e, &da->tent);
2413 	if (error != 0)
2414 		return (error);
2415 
2416 	ent->addr = da->tent.k.addr.s_addr;
2417 	ent->masklen = da->tent.masklen;
2418 	pval = get_table_value(da->ch, da->tc, da->tent.v.kidx);
2419 	ent->value = ipfw_export_table_value_legacy(pval);
2420 
2421 	return (0);
2422 }
2423 
2424 /*
2425  * Dumps table in pre-8.1 legacy format.
2426  */
2427 int
2428 ipfw_dump_table_legacy(struct ip_fw_chain *ch, struct tid_info *ti,
2429     ipfw_table *tbl)
2430 {
2431 	struct table_config *tc;
2432 	struct table_algo *ta;
2433 	struct dump_args da;
2434 
2435 	tbl->cnt = 0;
2436 
2437 	if ((tc = find_table(CHAIN_TO_NI(ch), ti)) == NULL)
2438 		return (0);	/* XXX: We should return ESRCH */
2439 
2440 	ta = tc->ta;
2441 
2442 	/* This dump format supports IPv4 only */
2443 	if (tc->no.type != IPFW_TABLE_ADDR)
2444 		return (0);
2445 
2446 	memset(&da, 0, sizeof(da));
2447 	da.ch = ch;
2448 	da.ti = KIDX_TO_TI(ch, tc->no.kidx);
2449 	da.tc = tc;
2450 	da.ent = &tbl->ent[0];
2451 	da.size = tbl->size;
2452 
2453 	tbl->cnt = 0;
2454 	ta->foreach(tc->astate, da.ti, dump_table_entry, &da);
2455 	tbl->cnt = da.cnt;
2456 
2457 	return (0);
2458 }
2459 
2460 /*
2461  * Dumps table entry in eXtended format (v1)(current).
2462  */
2463 static int
2464 dump_table_tentry(void *e, void *arg)
2465 {
2466 	struct dump_args *da;
2467 	struct table_config *tc;
2468 	struct table_algo *ta;
2469 	struct table_value *pval;
2470 	ipfw_obj_tentry *tent;
2471 	int error;
2472 
2473 	da = (struct dump_args *)arg;
2474 
2475 	tc = da->tc;
2476 	ta = tc->ta;
2477 
2478 	tent = (ipfw_obj_tentry *)ipfw_get_sopt_space(da->sd, sizeof(*tent));
2479 	/* Out of memory, returning */
2480 	if (tent == NULL) {
2481 		da->error = ENOMEM;
2482 		return (1);
2483 	}
2484 	tent->head.length = sizeof(ipfw_obj_tentry);
2485 	tent->idx = da->uidx;
2486 
2487 	error = ta->dump_tentry(tc->astate, da->ti, e, tent);
2488 	if (error != 0)
2489 		return (error);
2490 
2491 	pval = get_table_value(da->ch, da->tc, tent->v.kidx);
2492 	ipfw_export_table_value_v1(pval, &tent->v.value);
2493 
2494 	return (0);
2495 }
2496 
2497 /*
2498  * Dumps table entry in eXtended format (v0).
2499  */
2500 static int
2501 dump_table_xentry(void *e, void *arg)
2502 {
2503 	struct dump_args *da;
2504 	struct table_config *tc;
2505 	struct table_algo *ta;
2506 	ipfw_table_xentry *xent;
2507 	ipfw_obj_tentry *tent;
2508 	struct table_value *pval;
2509 	int error;
2510 
2511 	da = (struct dump_args *)arg;
2512 
2513 	tc = da->tc;
2514 	ta = tc->ta;
2515 
2516 	xent = (ipfw_table_xentry *)ipfw_get_sopt_space(da->sd, sizeof(*xent));
2517 	/* Out of memory, returning */
2518 	if (xent == NULL)
2519 		return (1);
2520 	xent->len = sizeof(ipfw_table_xentry);
2521 	xent->tbl = da->uidx;
2522 
2523 	memset(&da->tent, 0, sizeof(da->tent));
2524 	tent = &da->tent;
2525 	error = ta->dump_tentry(tc->astate, da->ti, e, tent);
2526 	if (error != 0)
2527 		return (error);
2528 
2529 	/* Convert current format to previous one */
2530 	xent->masklen = tent->masklen;
2531 	pval = get_table_value(da->ch, da->tc, da->tent.v.kidx);
2532 	xent->value = ipfw_export_table_value_legacy(pval);
2533 	/* Apply some hacks */
2534 	if (tc->no.type == IPFW_TABLE_ADDR && tent->subtype == AF_INET) {
2535 		xent->k.addr6.s6_addr32[3] = tent->k.addr.s_addr;
2536 		xent->flags = IPFW_TCF_INET;
2537 	} else
2538 		memcpy(&xent->k, &tent->k, sizeof(xent->k));
2539 
2540 	return (0);
2541 }
2542 
2543 /*
2544  * Helper function to export table algo data
2545  * to tentry format before calling user function.
2546  *
2547  * Returns 0 on success.
2548  */
2549 static int
2550 prepare_table_tentry(void *e, void *arg)
2551 {
2552 	struct dump_args *da;
2553 	struct table_config *tc;
2554 	struct table_algo *ta;
2555 	int error;
2556 
2557 	da = (struct dump_args *)arg;
2558 
2559 	tc = da->tc;
2560 	ta = tc->ta;
2561 
2562 	error = ta->dump_tentry(tc->astate, da->ti, e, &da->tent);
2563 	if (error != 0)
2564 		return (error);
2565 
2566 	da->f(&da->tent, da->farg);
2567 
2568 	return (0);
2569 }
2570 
2571 /*
2572  * Allow external consumers to read table entries in standard format.
2573  */
2574 int
2575 ipfw_foreach_table_tentry(struct ip_fw_chain *ch, uint16_t kidx,
2576     ta_foreach_f *f, void *arg)
2577 {
2578 	struct namedobj_instance *ni;
2579 	struct table_config *tc;
2580 	struct table_algo *ta;
2581 	struct dump_args da;
2582 
2583 	ni = CHAIN_TO_NI(ch);
2584 
2585 	tc = (struct table_config *)ipfw_objhash_lookup_kidx(ni, kidx);
2586 	if (tc == NULL)
2587 		return (ESRCH);
2588 
2589 	ta = tc->ta;
2590 
2591 	memset(&da, 0, sizeof(da));
2592 	da.ch = ch;
2593 	da.ti = KIDX_TO_TI(ch, tc->no.kidx);
2594 	da.tc = tc;
2595 	da.f = f;
2596 	da.farg = arg;
2597 
2598 	ta->foreach(tc->astate, da.ti, prepare_table_tentry, &da);
2599 
2600 	return (0);
2601 }
2602 
2603 /*
2604  * Table algorithms
2605  */
2606 
2607 /*
2608  * Finds algoritm by index, table type or supplied name.
2609  *
2610  * Returns pointer to algo or NULL.
2611  */
2612 static struct table_algo *
2613 find_table_algo(struct tables_config *tcfg, struct tid_info *ti, char *name)
2614 {
2615 	int i, l;
2616 	struct table_algo *ta;
2617 
2618 	if (ti->type > IPFW_TABLE_MAXTYPE)
2619 		return (NULL);
2620 
2621 	/* Search by index */
2622 	if (ti->atype != 0) {
2623 		if (ti->atype > tcfg->algo_count)
2624 			return (NULL);
2625 		return (tcfg->algo[ti->atype]);
2626 	}
2627 
2628 	if (name == NULL) {
2629 		/* Return default algorithm for given type if set */
2630 		return (tcfg->def_algo[ti->type]);
2631 	}
2632 
2633 	/* Search by name */
2634 	/* TODO: better search */
2635 	for (i = 1; i <= tcfg->algo_count; i++) {
2636 		ta = tcfg->algo[i];
2637 
2638 		/*
2639 		 * One can supply additional algorithm
2640 		 * parameters so we compare only the first word
2641 		 * of supplied name:
2642 		 * 'addr:chash hsize=32'
2643 		 * '^^^^^^^^^'
2644 		 *
2645 		 */
2646 		l = strlen(ta->name);
2647 		if (strncmp(name, ta->name, l) != 0)
2648 			continue;
2649 		if (name[l] != '\0' && name[l] != ' ')
2650 			continue;
2651 		/* Check if we're requesting proper table type */
2652 		if (ti->type != 0 && ti->type != ta->type)
2653 			return (NULL);
2654 		return (ta);
2655 	}
2656 
2657 	return (NULL);
2658 }
2659 
2660 /*
2661  * Register new table algo @ta.
2662  * Stores algo id inside @idx.
2663  *
2664  * Returns 0 on success.
2665  */
2666 int
2667 ipfw_add_table_algo(struct ip_fw_chain *ch, struct table_algo *ta, size_t size,
2668     int *idx)
2669 {
2670 	struct tables_config *tcfg;
2671 	struct table_algo *ta_new;
2672 	size_t sz;
2673 
2674 	if (size > sizeof(struct table_algo))
2675 		return (EINVAL);
2676 
2677 	/* Check for the required on-stack size for add/del */
2678 	sz = roundup2(ta->ta_buf_size, sizeof(void *));
2679 	if (sz > TA_BUF_SZ)
2680 		return (EINVAL);
2681 
2682 	KASSERT(ta->type <= IPFW_TABLE_MAXTYPE,("Increase IPFW_TABLE_MAXTYPE"));
2683 
2684 	/* Copy algorithm data to stable storage. */
2685 	ta_new = malloc(sizeof(struct table_algo), M_IPFW, M_WAITOK | M_ZERO);
2686 	memcpy(ta_new, ta, size);
2687 
2688 	tcfg = CHAIN_TO_TCFG(ch);
2689 
2690 	KASSERT(tcfg->algo_count < 255, ("Increase algo array size"));
2691 
2692 	tcfg->algo[++tcfg->algo_count] = ta_new;
2693 	ta_new->idx = tcfg->algo_count;
2694 
2695 	/* Set algorithm as default one for given type */
2696 	if ((ta_new->flags & TA_FLAG_DEFAULT) != 0 &&
2697 	    tcfg->def_algo[ta_new->type] == NULL)
2698 		tcfg->def_algo[ta_new->type] = ta_new;
2699 
2700 	*idx = ta_new->idx;
2701 
2702 	return (0);
2703 }
2704 
2705 /*
2706  * Unregisters table algo using @idx as id.
2707  * XXX: It is NOT safe to call this function in any place
2708  * other than ipfw instance destroy handler.
2709  */
2710 void
2711 ipfw_del_table_algo(struct ip_fw_chain *ch, int idx)
2712 {
2713 	struct tables_config *tcfg;
2714 	struct table_algo *ta;
2715 
2716 	tcfg = CHAIN_TO_TCFG(ch);
2717 
2718 	KASSERT(idx <= tcfg->algo_count, ("algo idx %d out of range 1..%d",
2719 	    idx, tcfg->algo_count));
2720 
2721 	ta = tcfg->algo[idx];
2722 	KASSERT(ta != NULL, ("algo idx %d is NULL", idx));
2723 
2724 	if (tcfg->def_algo[ta->type] == ta)
2725 		tcfg->def_algo[ta->type] = NULL;
2726 
2727 	free(ta, M_IPFW);
2728 }
2729 
2730 /*
2731  * Lists all table algorithms currently available.
2732  * Data layout (v0)(current):
2733  * Request: [ ipfw_obj_lheader ], size = ipfw_obj_lheader.size
2734  * Reply: [ ipfw_obj_lheader ipfw_ta_info x N ]
2735  *
2736  * Returns 0 on success
2737  */
2738 static int
2739 list_table_algo(struct ip_fw_chain *ch, ip_fw3_opheader *op3,
2740     struct sockopt_data *sd)
2741 {
2742 	struct _ipfw_obj_lheader *olh;
2743 	struct tables_config *tcfg;
2744 	ipfw_ta_info *i;
2745 	struct table_algo *ta;
2746 	uint32_t count, n, size;
2747 
2748 	olh = (struct _ipfw_obj_lheader *)ipfw_get_sopt_header(sd,sizeof(*olh));
2749 	if (olh == NULL)
2750 		return (EINVAL);
2751 	if (sd->valsize < olh->size)
2752 		return (EINVAL);
2753 
2754 	IPFW_UH_RLOCK(ch);
2755 	tcfg = CHAIN_TO_TCFG(ch);
2756 	count = tcfg->algo_count;
2757 	size = count * sizeof(ipfw_ta_info) + sizeof(ipfw_obj_lheader);
2758 
2759 	/* Fill in header regadless of buffer size */
2760 	olh->count = count;
2761 	olh->objsize = sizeof(ipfw_ta_info);
2762 
2763 	if (size > olh->size) {
2764 		olh->size = size;
2765 		IPFW_UH_RUNLOCK(ch);
2766 		return (ENOMEM);
2767 	}
2768 	olh->size = size;
2769 
2770 	for (n = 1; n <= count; n++) {
2771 		i = (ipfw_ta_info *)ipfw_get_sopt_space(sd, sizeof(*i));
2772 		KASSERT(i != 0, ("previously checked buffer is not enough"));
2773 		ta = tcfg->algo[n];
2774 		strlcpy(i->algoname, ta->name, sizeof(i->algoname));
2775 		i->type = ta->type;
2776 		i->refcnt = ta->refcnt;
2777 	}
2778 
2779 	IPFW_UH_RUNLOCK(ch);
2780 
2781 	return (0);
2782 }
2783 
2784 /*
2785  * Tables rewriting code
2786  */
2787 
2788 /*
2789  * Determine table number and lookup type for @cmd.
2790  * Fill @tbl and @type with appropriate values.
2791  * Returns 0 for relevant opcodes, 1 otherwise.
2792  */
2793 static int
2794 classify_table_opcode(ipfw_insn *cmd, uint16_t *puidx, uint8_t *ptype)
2795 {
2796 	ipfw_insn_if *cmdif;
2797 	int skip;
2798 	uint16_t v;
2799 
2800 	skip = 1;
2801 
2802 	switch (cmd->opcode) {
2803 	case O_IP_SRC_LOOKUP:
2804 	case O_IP_DST_LOOKUP:
2805 		/* Basic IPv4/IPv6 or u32 lookups */
2806 		*puidx = cmd->arg1;
2807 		/* Assume ADDR by default */
2808 		*ptype = IPFW_TABLE_ADDR;
2809 		skip = 0;
2810 
2811 		if (F_LEN(cmd) > F_INSN_SIZE(ipfw_insn_u32)) {
2812 			/*
2813 			 * generic lookup. The key must be
2814 			 * in 32bit big-endian format.
2815 			 */
2816 			v = ((ipfw_insn_u32 *)cmd)->d[1];
2817 			switch (v) {
2818 			case 0:
2819 			case 1:
2820 				/* IPv4 src/dst */
2821 				break;
2822 			case 2:
2823 			case 3:
2824 				/* src/dst port */
2825 				*ptype = IPFW_TABLE_NUMBER;
2826 				break;
2827 			case 4:
2828 				/* uid/gid */
2829 				*ptype = IPFW_TABLE_NUMBER;
2830 				break;
2831 			case 5:
2832 				/* jid */
2833 				*ptype = IPFW_TABLE_NUMBER;
2834 				break;
2835 			case 6:
2836 				/* dscp */
2837 				*ptype = IPFW_TABLE_NUMBER;
2838 				break;
2839 			}
2840 		}
2841 		break;
2842 	case O_XMIT:
2843 	case O_RECV:
2844 	case O_VIA:
2845 		/* Interface table, possibly */
2846 		cmdif = (ipfw_insn_if *)cmd;
2847 		if (cmdif->name[0] != '\1')
2848 			break;
2849 
2850 		*ptype = IPFW_TABLE_INTERFACE;
2851 		*puidx = cmdif->p.kidx;
2852 		skip = 0;
2853 		break;
2854 	case O_IP_FLOW_LOOKUP:
2855 		*puidx = cmd->arg1;
2856 		*ptype = IPFW_TABLE_FLOW;
2857 		skip = 0;
2858 		break;
2859 	}
2860 
2861 	return (skip);
2862 }
2863 
2864 /*
2865  * Sets new table value for given opcode.
2866  * Assume the same opcodes as classify_table_opcode()
2867  */
2868 static void
2869 update_table_opcode(ipfw_insn *cmd, uint16_t idx)
2870 {
2871 	ipfw_insn_if *cmdif;
2872 
2873 	switch (cmd->opcode) {
2874 	case O_IP_SRC_LOOKUP:
2875 	case O_IP_DST_LOOKUP:
2876 		/* Basic IPv4/IPv6 or u32 lookups */
2877 		cmd->arg1 = idx;
2878 		break;
2879 	case O_XMIT:
2880 	case O_RECV:
2881 	case O_VIA:
2882 		/* Interface table, possibly */
2883 		cmdif = (ipfw_insn_if *)cmd;
2884 		cmdif->p.kidx = idx;
2885 		break;
2886 	case O_IP_FLOW_LOOKUP:
2887 		cmd->arg1 = idx;
2888 		break;
2889 	}
2890 }
2891 
2892 /*
2893  * Checks table name for validity.
2894  * Enforce basic length checks, the rest
2895  * should be done in userland.
2896  *
2897  * Returns 0 if name is considered valid.
2898  */
2899 int
2900 ipfw_check_table_name(char *name)
2901 {
2902 	int nsize;
2903 	ipfw_obj_ntlv *ntlv = NULL;
2904 
2905 	nsize = sizeof(ntlv->name);
2906 
2907 	if (strnlen(name, nsize) == nsize)
2908 		return (EINVAL);
2909 
2910 	if (name[0] == '\0')
2911 		return (EINVAL);
2912 
2913 	/*
2914 	 * TODO: do some more complicated checks
2915 	 */
2916 
2917 	return (0);
2918 }
2919 
2920 /*
2921  * Find tablename TLV by @uid.
2922  * Check @tlvs for valid data inside.
2923  *
2924  * Returns pointer to found TLV or NULL.
2925  */
2926 static ipfw_obj_ntlv *
2927 find_name_tlv(void *tlvs, int len, uint16_t uidx)
2928 {
2929 	ipfw_obj_ntlv *ntlv;
2930 	uintptr_t pa, pe;
2931 	int l;
2932 
2933 	pa = (uintptr_t)tlvs;
2934 	pe = pa + len;
2935 	l = 0;
2936 	for (; pa < pe; pa += l) {
2937 		ntlv = (ipfw_obj_ntlv *)pa;
2938 		l = ntlv->head.length;
2939 
2940 		if (l != sizeof(*ntlv))
2941 			return (NULL);
2942 
2943 		if (ntlv->head.type != IPFW_TLV_TBL_NAME)
2944 			continue;
2945 
2946 		if (ntlv->idx != uidx)
2947 			continue;
2948 
2949 		if (ipfw_check_table_name(ntlv->name) != 0)
2950 			return (NULL);
2951 
2952 		return (ntlv);
2953 	}
2954 
2955 	return (NULL);
2956 }
2957 
2958 /*
2959  * Finds table config based on either legacy index
2960  * or name in ntlv.
2961  * Note @ti structure contains unchecked data from userland.
2962  *
2963  * Returns pointer to table_config or NULL.
2964  */
2965 static struct table_config *
2966 find_table(struct namedobj_instance *ni, struct tid_info *ti)
2967 {
2968 	char *name, bname[16];
2969 	struct named_object *no;
2970 	ipfw_obj_ntlv *ntlv;
2971 	uint32_t set;
2972 
2973 	if (ti->tlvs != NULL) {
2974 		ntlv = find_name_tlv(ti->tlvs, ti->tlen, ti->uidx);
2975 		if (ntlv == NULL)
2976 			return (NULL);
2977 		name = ntlv->name;
2978 
2979 		/*
2980 		 * Use set provided by @ti instead of @ntlv one.
2981 		 * This is needed due to different sets behavior
2982 		 * controlled by V_fw_tables_sets.
2983 		 */
2984 		set = ti->set;
2985 	} else {
2986 		snprintf(bname, sizeof(bname), "%d", ti->uidx);
2987 		name = bname;
2988 		set = 0;
2989 	}
2990 
2991 	no = ipfw_objhash_lookup_name(ni, set, name);
2992 
2993 	return ((struct table_config *)no);
2994 }
2995 
2996 /*
2997  * Allocate new table config structure using
2998  * specified @algo and @aname.
2999  *
3000  * Returns pointer to config or NULL.
3001  */
3002 static struct table_config *
3003 alloc_table_config(struct ip_fw_chain *ch, struct tid_info *ti,
3004     struct table_algo *ta, char *aname, uint8_t tflags)
3005 {
3006 	char *name, bname[16];
3007 	struct table_config *tc;
3008 	int error;
3009 	ipfw_obj_ntlv *ntlv;
3010 	uint32_t set;
3011 
3012 	if (ti->tlvs != NULL) {
3013 		ntlv = find_name_tlv(ti->tlvs, ti->tlen, ti->uidx);
3014 		if (ntlv == NULL)
3015 			return (NULL);
3016 		name = ntlv->name;
3017 		set = ntlv->set;
3018 	} else {
3019 		snprintf(bname, sizeof(bname), "%d", ti->uidx);
3020 		name = bname;
3021 		set = 0;
3022 	}
3023 
3024 	tc = malloc(sizeof(struct table_config), M_IPFW, M_WAITOK | M_ZERO);
3025 	tc->no.name = tc->tablename;
3026 	tc->no.type = ta->type;
3027 	tc->no.set = set;
3028 	tc->tflags = tflags;
3029 	tc->ta = ta;
3030 	strlcpy(tc->tablename, name, sizeof(tc->tablename));
3031 	/* Set "shared" value type by default */
3032 	tc->vshared = 1;
3033 
3034 	if (ti->tlvs == NULL) {
3035 		tc->no.compat = 1;
3036 		tc->no.uidx = ti->uidx;
3037 	}
3038 
3039 	/* Preallocate data structures for new tables */
3040 	error = ta->init(ch, &tc->astate, &tc->ti_copy, aname, tflags);
3041 	if (error != 0) {
3042 		free(tc, M_IPFW);
3043 		return (NULL);
3044 	}
3045 
3046 	return (tc);
3047 }
3048 
3049 /*
3050  * Destroys table state and config.
3051  */
3052 static void
3053 free_table_config(struct namedobj_instance *ni, struct table_config *tc)
3054 {
3055 
3056 	KASSERT(tc->linked == 0, ("free() on linked config"));
3057 	/* UH lock MUST NOT be held */
3058 
3059 	/*
3060 	 * We're using ta without any locking/referencing.
3061 	 * TODO: fix this if we're going to use unloadable algos.
3062 	 */
3063 	tc->ta->destroy(tc->astate, &tc->ti_copy);
3064 	free(tc, M_IPFW);
3065 }
3066 
3067 /*
3068  * Links @tc to @chain table named instance.
3069  * Sets appropriate type/states in @chain table info.
3070  */
3071 static void
3072 link_table(struct ip_fw_chain *ch, struct table_config *tc)
3073 {
3074 	struct namedobj_instance *ni;
3075 	struct table_info *ti;
3076 	uint16_t kidx;
3077 
3078 	IPFW_UH_WLOCK_ASSERT(ch);
3079 	IPFW_WLOCK_ASSERT(ch);
3080 
3081 	ni = CHAIN_TO_NI(ch);
3082 	kidx = tc->no.kidx;
3083 
3084 	ipfw_objhash_add(ni, &tc->no);
3085 
3086 	ti = KIDX_TO_TI(ch, kidx);
3087 	*ti = tc->ti_copy;
3088 
3089 	/* Notify algo on real @ti address */
3090 	if (tc->ta->change_ti != NULL)
3091 		tc->ta->change_ti(tc->astate, ti);
3092 
3093 	tc->linked = 1;
3094 	tc->ta->refcnt++;
3095 }
3096 
3097 /*
3098  * Unlinks @tc from @chain table named instance.
3099  * Zeroes states in @chain and stores them in @tc.
3100  */
3101 static void
3102 unlink_table(struct ip_fw_chain *ch, struct table_config *tc)
3103 {
3104 	struct namedobj_instance *ni;
3105 	struct table_info *ti;
3106 	uint16_t kidx;
3107 
3108 	IPFW_UH_WLOCK_ASSERT(ch);
3109 	IPFW_WLOCK_ASSERT(ch);
3110 
3111 	ni = CHAIN_TO_NI(ch);
3112 	kidx = tc->no.kidx;
3113 
3114 	/* Clear state. @ti copy is already saved inside @tc */
3115 	ipfw_objhash_del(ni, &tc->no);
3116 	ti = KIDX_TO_TI(ch, kidx);
3117 	memset(ti, 0, sizeof(struct table_info));
3118 	tc->linked = 0;
3119 	tc->ta->refcnt--;
3120 
3121 	/* Notify algo on real @ti address */
3122 	if (tc->ta->change_ti != NULL)
3123 		tc->ta->change_ti(tc->astate, NULL);
3124 }
3125 
3126 struct swap_table_args {
3127 	int set;
3128 	int new_set;
3129 	int mv;
3130 };
3131 
3132 /*
3133  * Change set for each matching table.
3134  *
3135  * Ensure we dispatch each table once by setting/checking ochange
3136  * fields.
3137  */
3138 static void
3139 swap_table_set(struct namedobj_instance *ni, struct named_object *no,
3140     void *arg)
3141 {
3142 	struct table_config *tc;
3143 	struct swap_table_args *sta;
3144 
3145 	tc = (struct table_config *)no;
3146 	sta = (struct swap_table_args *)arg;
3147 
3148 	if (no->set != sta->set && (no->set != sta->new_set || sta->mv != 0))
3149 		return;
3150 
3151 	if (tc->ochanged != 0)
3152 		return;
3153 
3154 	tc->ochanged = 1;
3155 	ipfw_objhash_del(ni, no);
3156 	if (no->set == sta->set)
3157 		no->set = sta->new_set;
3158 	else
3159 		no->set = sta->set;
3160 	ipfw_objhash_add(ni, no);
3161 }
3162 
3163 /*
3164  * Cleans up ochange field for all tables.
3165  */
3166 static void
3167 clean_table_set_data(struct namedobj_instance *ni, struct named_object *no,
3168     void *arg)
3169 {
3170 	struct table_config *tc;
3171 	struct swap_table_args *sta;
3172 
3173 	tc = (struct table_config *)no;
3174 	sta = (struct swap_table_args *)arg;
3175 
3176 	tc->ochanged = 0;
3177 }
3178 
3179 /*
3180  * Swaps tables within two sets.
3181  */
3182 void
3183 ipfw_swap_tables_sets(struct ip_fw_chain *ch, uint32_t set,
3184     uint32_t new_set, int mv)
3185 {
3186 	struct swap_table_args sta;
3187 
3188 	IPFW_UH_WLOCK_ASSERT(ch);
3189 
3190 	sta.set = set;
3191 	sta.new_set = new_set;
3192 	sta.mv = mv;
3193 
3194 	ipfw_objhash_foreach(CHAIN_TO_NI(ch), swap_table_set, &sta);
3195 	ipfw_objhash_foreach(CHAIN_TO_NI(ch), clean_table_set_data, &sta);
3196 }
3197 
3198 /*
3199  * Move all tables which are reference by rules in @rr to set @new_set.
3200  * Makes sure that all relevant tables are referenced ONLLY by given rules.
3201  *
3202  * Retuns 0 on success,
3203  */
3204 int
3205 ipfw_move_tables_sets(struct ip_fw_chain *ch, ipfw_range_tlv *rt,
3206     uint32_t new_set)
3207 {
3208 	struct ip_fw *rule;
3209 	struct table_config *tc;
3210 	struct named_object *no;
3211 	struct namedobj_instance *ni;
3212 	int bad, i, l, cmdlen;
3213 	uint16_t kidx;
3214 	uint8_t type;
3215 	ipfw_insn *cmd;
3216 
3217 	IPFW_UH_WLOCK_ASSERT(ch);
3218 
3219 	ni = CHAIN_TO_NI(ch);
3220 
3221 	/* Stage 1: count number of references by given rules */
3222 	for (i = 0; i < ch->n_rules - 1; i++) {
3223 		rule = ch->map[i];
3224 		if (ipfw_match_range(rule, rt) == 0)
3225 			continue;
3226 
3227 		l = rule->cmd_len;
3228 		cmd = rule->cmd;
3229 		cmdlen = 0;
3230 		for ( ;	l > 0 ; l -= cmdlen, cmd += cmdlen) {
3231 			cmdlen = F_LEN(cmd);
3232 			if (classify_table_opcode(cmd, &kidx, &type) != 0)
3233 				continue;
3234 			no = ipfw_objhash_lookup_kidx(ni, kidx);
3235 			KASSERT(no != NULL,
3236 			    ("objhash lookup failed on index %d", kidx));
3237 			tc = (struct table_config *)no;
3238 			tc->ocount++;
3239 		}
3240 
3241 	}
3242 
3243 	/* Stage 2: verify "ownership" */
3244 	bad = 0;
3245 	for (i = 0; i < ch->n_rules - 1; i++) {
3246 		rule = ch->map[i];
3247 		if (ipfw_match_range(rule, rt) == 0)
3248 			continue;
3249 
3250 		l = rule->cmd_len;
3251 		cmd = rule->cmd;
3252 		cmdlen = 0;
3253 		for ( ;	l > 0 ; l -= cmdlen, cmd += cmdlen) {
3254 			cmdlen = F_LEN(cmd);
3255 			if (classify_table_opcode(cmd, &kidx, &type) != 0)
3256 				continue;
3257 			no = ipfw_objhash_lookup_kidx(ni, kidx);
3258 			KASSERT(no != NULL,
3259 			    ("objhash lookup failed on index %d", kidx));
3260 			tc = (struct table_config *)no;
3261 			if (tc->no.refcnt != tc->ocount) {
3262 
3263 				/*
3264 				 * Number of references differ:
3265 				 * Other rule(s) are holding reference to given
3266 				 * table, so it is not possible to change its set.
3267 				 *
3268 				 * Note that refcnt may account
3269 				 * references to some going-to-be-added rules.
3270 				 * Since we don't know their numbers (and event
3271 				 * if they will be added) it is perfectly OK
3272 				 * to return error here.
3273 				 */
3274 				bad = 1;
3275 				break;
3276 			}
3277 		}
3278 
3279 		if (bad != 0)
3280 			break;
3281 	}
3282 
3283 	/* Stage 3: change set or cleanup */
3284 	for (i = 0; i < ch->n_rules - 1; i++) {
3285 		rule = ch->map[i];
3286 		if (ipfw_match_range(rule, rt) == 0)
3287 			continue;
3288 
3289 		l = rule->cmd_len;
3290 		cmd = rule->cmd;
3291 		cmdlen = 0;
3292 		for ( ;	l > 0 ; l -= cmdlen, cmd += cmdlen) {
3293 			cmdlen = F_LEN(cmd);
3294 			if (classify_table_opcode(cmd, &kidx, &type) != 0)
3295 				continue;
3296 			no = ipfw_objhash_lookup_kidx(ni, kidx);
3297 			KASSERT(no != NULL,
3298 			    ("objhash lookup failed on index %d", kidx));
3299 			tc = (struct table_config *)no;
3300 
3301 			tc->ocount = 0;
3302 			if (bad != 0)
3303 				continue;
3304 
3305 			/* Actually change set. */
3306 			ipfw_objhash_del(ni, no);
3307 			no->set = new_set;
3308 			ipfw_objhash_add(ni, no);
3309 		}
3310 	}
3311 
3312 	return (bad);
3313 }
3314 
3315 /*
3316  * Finds and bumps refcount for tables referenced by given @rule.
3317  * Auto-creates non-existing tables.
3318  * Fills in @oib array with userland/kernel indexes.
3319  * First free oidx pointer is saved back in @oib.
3320  *
3321  * Returns 0 on success.
3322  */
3323 static int
3324 find_ref_rule_tables(struct ip_fw_chain *ch, struct ip_fw *rule,
3325     struct rule_check_info *ci, struct obj_idx **oib, struct tid_info *ti)
3326 {
3327 	struct table_config *tc;
3328 	struct namedobj_instance *ni;
3329 	struct named_object *no;
3330 	int cmdlen, error, l, numnew;
3331 	uint16_t kidx;
3332 	ipfw_insn *cmd;
3333 	struct obj_idx *pidx, *pidx_first, *p;
3334 
3335 	pidx_first = *oib;
3336 	pidx = pidx_first;
3337 	l = rule->cmd_len;
3338 	cmd = rule->cmd;
3339 	cmdlen = 0;
3340 	error = 0;
3341 	numnew = 0;
3342 
3343 	IPFW_UH_WLOCK(ch);
3344 	ni = CHAIN_TO_NI(ch);
3345 
3346 	/* Increase refcount on each existing referenced table. */
3347 	for ( ;	l > 0 ; l -= cmdlen, cmd += cmdlen) {
3348 		cmdlen = F_LEN(cmd);
3349 
3350 		if (classify_table_opcode(cmd, &ti->uidx, &ti->type) != 0)
3351 			continue;
3352 
3353 		pidx->uidx = ti->uidx;
3354 		pidx->type = ti->type;
3355 
3356 		if ((tc = find_table(ni, ti)) != NULL) {
3357 			if (tc->no.type != ti->type) {
3358 				/* Incompatible types */
3359 				error = EINVAL;
3360 				break;
3361 			}
3362 
3363 			/* Reference found table and save kidx */
3364 			tc->no.refcnt++;
3365 			pidx->kidx = tc->no.kidx;
3366 			pidx++;
3367 			continue;
3368 		}
3369 
3370 		/*
3371 		 * Compability stuff for old clients:
3372 		 * prepare to manually create non-existing tables.
3373 		 */
3374 		pidx++;
3375 		numnew++;
3376 	}
3377 
3378 	if (error != 0) {
3379 		/* Unref everything we have already done */
3380 		for (p = *oib; p < pidx; p++) {
3381 			if (p->kidx == 0)
3382 				continue;
3383 
3384 			/* Find & unref by existing idx */
3385 			no = ipfw_objhash_lookup_kidx(ni, p->kidx);
3386 			KASSERT(no != NULL, ("Ref'd table %d disappeared",
3387 			    p->kidx));
3388 
3389 			no->refcnt--;
3390 		}
3391 	}
3392 
3393 	IPFW_UH_WUNLOCK(ch);
3394 
3395 	if (numnew == 0) {
3396 		*oib = pidx;
3397 		return (error);
3398 	}
3399 
3400 	/*
3401 	 * Compatibility stuff: do actual creation for non-existing,
3402 	 * but referenced tables.
3403 	 */
3404 	for (p = pidx_first; p < pidx; p++) {
3405 		if (p->kidx != 0)
3406 			continue;
3407 
3408 		ti->uidx = p->uidx;
3409 		ti->type = p->type;
3410 		ti->atype = 0;
3411 
3412 		error = create_table_compat(ch, ti, &kidx);
3413 		if (error == 0) {
3414 			p->kidx = kidx;
3415 			continue;
3416 		}
3417 
3418 		/* Error. We have to drop references */
3419 		IPFW_UH_WLOCK(ch);
3420 		for (p = pidx_first; p < pidx; p++) {
3421 			if (p->kidx == 0)
3422 				continue;
3423 
3424 			/* Find & unref by existing idx */
3425 			no = ipfw_objhash_lookup_kidx(ni, p->kidx);
3426 			KASSERT(no != NULL, ("Ref'd table %d disappeared",
3427 			    p->kidx));
3428 
3429 			no->refcnt--;
3430 		}
3431 		IPFW_UH_WUNLOCK(ch);
3432 
3433 		return (error);
3434 	}
3435 
3436 	*oib = pidx;
3437 
3438 	return (error);
3439 }
3440 
3441 /*
3442  * Remove references from every table used in @rule.
3443  */
3444 void
3445 ipfw_unref_rule_tables(struct ip_fw_chain *chain, struct ip_fw *rule)
3446 {
3447 	int cmdlen, l;
3448 	ipfw_insn *cmd;
3449 	struct namedobj_instance *ni;
3450 	struct named_object *no;
3451 	uint16_t kidx;
3452 	uint8_t type;
3453 
3454 	IPFW_UH_WLOCK_ASSERT(chain);
3455 	ni = CHAIN_TO_NI(chain);
3456 
3457 	l = rule->cmd_len;
3458 	cmd = rule->cmd;
3459 	cmdlen = 0;
3460 	for ( ;	l > 0 ; l -= cmdlen, cmd += cmdlen) {
3461 		cmdlen = F_LEN(cmd);
3462 
3463 		if (classify_table_opcode(cmd, &kidx, &type) != 0)
3464 			continue;
3465 
3466 		no = ipfw_objhash_lookup_kidx(ni, kidx);
3467 
3468 		KASSERT(no != NULL, ("table id %d not found", kidx));
3469 		KASSERT(no->type == type, ("wrong type %d (%d) for table id %d",
3470 		    no->type, type, kidx));
3471 		KASSERT(no->refcnt > 0, ("refcount for table %d is %d",
3472 		    kidx, no->refcnt));
3473 
3474 		no->refcnt--;
3475 	}
3476 }
3477 
3478 /*
3479  * Compatibility function for old ipfw(8) binaries.
3480  * Rewrites table kernel indices with userland ones.
3481  * Convert tables matching '/^\d+$/' to their atoi() value.
3482  * Use number 65535 for other tables.
3483  *
3484  * Returns 0 on success.
3485  */
3486 int
3487 ipfw_rewrite_table_kidx(struct ip_fw_chain *chain, struct ip_fw_rule0 *rule)
3488 {
3489 	int cmdlen, error, l;
3490 	ipfw_insn *cmd;
3491 	uint16_t kidx, uidx;
3492 	uint8_t type;
3493 	struct named_object *no;
3494 	struct namedobj_instance *ni;
3495 
3496 	ni = CHAIN_TO_NI(chain);
3497 	error = 0;
3498 
3499 	l = rule->cmd_len;
3500 	cmd = rule->cmd;
3501 	cmdlen = 0;
3502 	for ( ;	l > 0 ; l -= cmdlen, cmd += cmdlen) {
3503 		cmdlen = F_LEN(cmd);
3504 
3505 		if (classify_table_opcode(cmd, &kidx, &type) != 0)
3506 			continue;
3507 
3508 		if ((no = ipfw_objhash_lookup_kidx(ni, kidx)) == NULL)
3509 			return (1);
3510 
3511 		uidx = no->uidx;
3512 		if (no->compat == 0) {
3513 
3514 			/*
3515 			 * We are called via legacy opcode.
3516 			 * Save error and show table as fake number
3517 			 * not to make ipfw(8) hang.
3518 			 */
3519 			uidx = 65535;
3520 			error = 2;
3521 		}
3522 
3523 		update_table_opcode(cmd, uidx);
3524 	}
3525 
3526 	return (error);
3527 }
3528 
3529 /*
3530  * Checks is opcode is referencing table of appropriate type.
3531  * Adds reference count for found table if true.
3532  * Rewrites user-supplied opcode values with kernel ones.
3533  *
3534  * Returns 0 on success and appropriate error code otherwise.
3535  */
3536 int
3537 ipfw_rewrite_table_uidx(struct ip_fw_chain *chain,
3538     struct rule_check_info *ci)
3539 {
3540 	int cmdlen, error, l;
3541 	ipfw_insn *cmd;
3542 	uint16_t uidx;
3543 	uint8_t type;
3544 	struct namedobj_instance *ni;
3545 	struct obj_idx *p, *pidx_first, *pidx_last;
3546 	struct tid_info ti;
3547 
3548 	ni = CHAIN_TO_NI(chain);
3549 
3550 	/*
3551 	 * Prepare an array for storing opcode indices.
3552 	 * Use stack allocation by default.
3553 	 */
3554 	if (ci->table_opcodes <= (sizeof(ci->obuf)/sizeof(ci->obuf[0]))) {
3555 		/* Stack */
3556 		pidx_first = ci->obuf;
3557 	} else
3558 		pidx_first = malloc(ci->table_opcodes * sizeof(struct obj_idx),
3559 		    M_IPFW, M_WAITOK | M_ZERO);
3560 
3561 	pidx_last = pidx_first;
3562 	error = 0;
3563 	type = 0;
3564 	memset(&ti, 0, sizeof(ti));
3565 
3566 	/*
3567 	 * Use default set for looking up tables (old way) or
3568 	 * use set rule is assigned to (new way).
3569 	 */
3570 	ti.set = (V_fw_tables_sets != 0) ? ci->krule->set : 0;
3571 	if (ci->ctlv != NULL) {
3572 		ti.tlvs = (void *)(ci->ctlv + 1);
3573 		ti.tlen = ci->ctlv->head.length - sizeof(ipfw_obj_ctlv);
3574 	}
3575 
3576 	/* Reference all used tables */
3577 	error = find_ref_rule_tables(chain, ci->krule, ci, &pidx_last, &ti);
3578 	if (error != 0)
3579 		goto free;
3580 
3581 	IPFW_UH_WLOCK(chain);
3582 
3583 	/* Perform rule rewrite */
3584 	l = ci->krule->cmd_len;
3585 	cmd = ci->krule->cmd;
3586 	cmdlen = 0;
3587 	p = pidx_first;
3588 	for ( ;	l > 0 ; l -= cmdlen, cmd += cmdlen) {
3589 		cmdlen = F_LEN(cmd);
3590 		if (classify_table_opcode(cmd, &uidx, &type) != 0)
3591 			continue;
3592 		update_table_opcode(cmd, p->kidx);
3593 		p++;
3594 	}
3595 
3596 	IPFW_UH_WUNLOCK(chain);
3597 
3598 free:
3599 	if (pidx_first != ci->obuf)
3600 		free(pidx_first, M_IPFW);
3601 
3602 	return (error);
3603 }
3604 
3605 static struct ipfw_sopt_handler	scodes[] = {
3606 	{ IP_FW_TABLE_XCREATE,	0,	HDIR_SET,	create_table },
3607 	{ IP_FW_TABLE_XDESTROY,	0,	HDIR_SET,	flush_table_v0 },
3608 	{ IP_FW_TABLE_XFLUSH,	0,	HDIR_SET,	flush_table_v0 },
3609 	{ IP_FW_TABLE_XMODIFY,	0,	HDIR_BOTH,	modify_table },
3610 	{ IP_FW_TABLE_XINFO,	0,	HDIR_GET,	describe_table },
3611 	{ IP_FW_TABLES_XLIST,	0,	HDIR_GET,	list_tables },
3612 	{ IP_FW_TABLE_XLIST,	0,	HDIR_GET,	dump_table_v0 },
3613 	{ IP_FW_TABLE_XLIST,	1,	HDIR_GET,	dump_table_v1 },
3614 	{ IP_FW_TABLE_XADD,	0,	HDIR_BOTH,	manage_table_ent_v0 },
3615 	{ IP_FW_TABLE_XADD,	1,	HDIR_BOTH,	manage_table_ent_v1 },
3616 	{ IP_FW_TABLE_XDEL,	0,	HDIR_BOTH,	manage_table_ent_v0 },
3617 	{ IP_FW_TABLE_XDEL,	1,	HDIR_BOTH,	manage_table_ent_v1 },
3618 	{ IP_FW_TABLE_XFIND,	0,	HDIR_GET,	find_table_entry },
3619 	{ IP_FW_TABLE_XSWAP,	0,	HDIR_SET,	swap_table },
3620 	{ IP_FW_TABLES_ALIST,	0,	HDIR_GET,	list_table_algo },
3621 	{ IP_FW_TABLE_XGETSIZE,	0,	HDIR_GET,	get_table_size },
3622 };
3623 
3624 static void
3625 destroy_table_locked(struct namedobj_instance *ni, struct named_object *no,
3626     void *arg)
3627 {
3628 
3629 	unlink_table((struct ip_fw_chain *)arg, (struct table_config *)no);
3630 	if (ipfw_objhash_free_idx(ni, no->kidx) != 0)
3631 		printf("Error unlinking kidx %d from table %s\n",
3632 		    no->kidx, no->name);
3633 	free_table_config(ni, (struct table_config *)no);
3634 }
3635 
3636 /*
3637  * Shuts tables module down.
3638  */
3639 void
3640 ipfw_destroy_tables(struct ip_fw_chain *ch, int last)
3641 {
3642 
3643 	IPFW_DEL_SOPT_HANDLER(last, scodes);
3644 
3645 	/* Remove all tables from working set */
3646 	IPFW_UH_WLOCK(ch);
3647 	IPFW_WLOCK(ch);
3648 	ipfw_objhash_foreach(CHAIN_TO_NI(ch), destroy_table_locked, ch);
3649 	IPFW_WUNLOCK(ch);
3650 	IPFW_UH_WUNLOCK(ch);
3651 
3652 	/* Free pointers itself */
3653 	free(ch->tablestate, M_IPFW);
3654 
3655 	ipfw_table_value_destroy(ch, last);
3656 	ipfw_table_algo_destroy(ch);
3657 
3658 	ipfw_objhash_destroy(CHAIN_TO_NI(ch));
3659 	free(CHAIN_TO_TCFG(ch), M_IPFW);
3660 }
3661 
3662 /*
3663  * Starts tables module.
3664  */
3665 int
3666 ipfw_init_tables(struct ip_fw_chain *ch, int first)
3667 {
3668 	struct tables_config *tcfg;
3669 
3670 	/* Allocate pointers */
3671 	ch->tablestate = malloc(V_fw_tables_max * sizeof(struct table_info),
3672 	    M_IPFW, M_WAITOK | M_ZERO);
3673 
3674 	tcfg = malloc(sizeof(struct tables_config), M_IPFW, M_WAITOK | M_ZERO);
3675 	tcfg->namehash = ipfw_objhash_create(V_fw_tables_max);
3676 	ch->tblcfg = tcfg;
3677 
3678 	ipfw_table_value_init(ch, first);
3679 	ipfw_table_algo_init(ch);
3680 
3681 	IPFW_ADD_SOPT_HANDLER(first, scodes);
3682 	return (0);
3683 }
3684 
3685 
3686 
3687