xref: /freebsd/sys/netpfil/ipfw/ip_fw_sockopt.c (revision aa24f48b361effe51163877d84f1b70d32b77e04)
1 /*-
2  * Copyright (c) 2002-2009 Luigi Rizzo, Universita` di Pisa
3  * Copyright (c) 2014 Yandex LLC
4  * Copyright (c) 2014 Alexander V. Chernikov
5  *
6  * Supported by: Valeria Paoli
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  */
29 
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
32 
33 /*
34  * Control socket and rule management routines for ipfw.
35  * Control is currently implemented via IP_FW3 setsockopt() code.
36  */
37 
38 #include "opt_ipfw.h"
39 #include "opt_inet.h"
40 #ifndef INET
41 #error IPFIREWALL requires INET.
42 #endif /* INET */
43 #include "opt_inet6.h"
44 
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/malloc.h>
48 #include <sys/mbuf.h>	/* struct m_tag used by nested headers */
49 #include <sys/kernel.h>
50 #include <sys/lock.h>
51 #include <sys/priv.h>
52 #include <sys/proc.h>
53 #include <sys/rwlock.h>
54 #include <sys/rmlock.h>
55 #include <sys/socket.h>
56 #include <sys/socketvar.h>
57 #include <sys/sysctl.h>
58 #include <sys/syslog.h>
59 #include <sys/fnv_hash.h>
60 #include <net/if.h>
61 #include <net/pfil.h>
62 #include <net/route.h>
63 #include <net/vnet.h>
64 #include <vm/vm.h>
65 #include <vm/vm_extern.h>
66 
67 #include <netinet/in.h>
68 #include <netinet/ip_var.h> /* hooks */
69 #include <netinet/ip_fw.h>
70 
71 #include <netpfil/ipfw/ip_fw_private.h>
72 #include <netpfil/ipfw/ip_fw_table.h>
73 
74 #ifdef MAC
75 #include <security/mac/mac_framework.h>
76 #endif
77 
78 static int ipfw_ctl(struct sockopt *sopt);
79 static int check_ipfw_rule_body(ipfw_insn *cmd, int cmd_len,
80     struct rule_check_info *ci);
81 static int check_ipfw_rule1(struct ip_fw_rule *rule, int size,
82     struct rule_check_info *ci);
83 static int check_ipfw_rule0(struct ip_fw_rule0 *rule, int size,
84     struct rule_check_info *ci);
85 static int rewrite_rule_uidx(struct ip_fw_chain *chain,
86     struct rule_check_info *ci);
87 
88 #define	NAMEDOBJ_HASH_SIZE	32
89 
90 struct namedobj_instance {
91 	struct namedobjects_head	*names;
92 	struct namedobjects_head	*values;
93 	uint32_t nn_size;		/* names hash size */
94 	uint32_t nv_size;		/* number hash size */
95 	u_long *idx_mask;		/* used items bitmask */
96 	uint32_t max_blocks;		/* number of "long" blocks in bitmask */
97 	uint32_t count;			/* number of items */
98 	uint16_t free_off[IPFW_MAX_SETS];	/* first possible free offset */
99 	objhash_hash_f	*hash_f;
100 	objhash_cmp_f	*cmp_f;
101 };
102 #define	BLOCK_ITEMS	(8 * sizeof(u_long))	/* Number of items for ffsl() */
103 
104 static uint32_t objhash_hash_name(struct namedobj_instance *ni,
105     const void *key, uint32_t kopt);
106 static uint32_t objhash_hash_idx(struct namedobj_instance *ni, uint32_t val);
107 static int objhash_cmp_name(struct named_object *no, const void *name,
108     uint32_t set);
109 
110 MALLOC_DEFINE(M_IPFW, "IpFw/IpAcct", "IpFw/IpAcct chain's");
111 
112 static int dump_config(struct ip_fw_chain *chain, ip_fw3_opheader *op3,
113     struct sockopt_data *sd);
114 static int add_rules(struct ip_fw_chain *chain, ip_fw3_opheader *op3,
115     struct sockopt_data *sd);
116 static int del_rules(struct ip_fw_chain *chain, ip_fw3_opheader *op3,
117     struct sockopt_data *sd);
118 static int clear_rules(struct ip_fw_chain *chain, ip_fw3_opheader *op3,
119     struct sockopt_data *sd);
120 static int move_rules(struct ip_fw_chain *chain, ip_fw3_opheader *op3,
121     struct sockopt_data *sd);
122 static int manage_sets(struct ip_fw_chain *chain, ip_fw3_opheader *op3,
123     struct sockopt_data *sd);
124 static int dump_soptcodes(struct ip_fw_chain *chain, ip_fw3_opheader *op3,
125     struct sockopt_data *sd);
126 static int dump_srvobjects(struct ip_fw_chain *chain, ip_fw3_opheader *op3,
127     struct sockopt_data *sd);
128 
129 /* ctl3 handler data */
130 struct mtx ctl3_lock;
131 #define	CTL3_LOCK_INIT()	mtx_init(&ctl3_lock, "ctl3_lock", NULL, MTX_DEF)
132 #define	CTL3_LOCK_DESTROY()	mtx_destroy(&ctl3_lock)
133 #define	CTL3_LOCK()		mtx_lock(&ctl3_lock)
134 #define	CTL3_UNLOCK()		mtx_unlock(&ctl3_lock)
135 
136 static struct ipfw_sopt_handler *ctl3_handlers;
137 static size_t ctl3_hsize;
138 static uint64_t ctl3_refct, ctl3_gencnt;
139 #define	CTL3_SMALLBUF	4096			/* small page-size write buffer */
140 #define	CTL3_LARGEBUF	16 * 1024 * 1024	/* handle large rulesets */
141 
142 static int ipfw_flush_sopt_data(struct sockopt_data *sd);
143 
144 static struct ipfw_sopt_handler	scodes[] = {
145 	{ IP_FW_XGET,		0,	HDIR_GET,	dump_config },
146 	{ IP_FW_XADD,		0,	HDIR_BOTH,	add_rules },
147 	{ IP_FW_XDEL,		0,	HDIR_BOTH,	del_rules },
148 	{ IP_FW_XZERO,		0,	HDIR_SET,	clear_rules },
149 	{ IP_FW_XRESETLOG,	0,	HDIR_SET,	clear_rules },
150 	{ IP_FW_XMOVE,		0,	HDIR_SET,	move_rules },
151 	{ IP_FW_SET_SWAP,	0,	HDIR_SET,	manage_sets },
152 	{ IP_FW_SET_MOVE,	0,	HDIR_SET,	manage_sets },
153 	{ IP_FW_SET_ENABLE,	0,	HDIR_SET,	manage_sets },
154 	{ IP_FW_DUMP_SOPTCODES,	0,	HDIR_GET,	dump_soptcodes },
155 	{ IP_FW_DUMP_SRVOBJECTS,0,	HDIR_GET,	dump_srvobjects },
156 };
157 
158 static int
159 set_legacy_obj_kidx(struct ip_fw_chain *ch, struct ip_fw_rule0 *rule);
160 static struct opcode_obj_rewrite *find_op_rw(ipfw_insn *cmd,
161     uint16_t *puidx, uint8_t *ptype);
162 static int mark_object_kidx(struct ip_fw_chain *ch, struct ip_fw *rule,
163     uint32_t *bmask);
164 static int ref_rule_objects(struct ip_fw_chain *ch, struct ip_fw *rule,
165     struct rule_check_info *ci, struct obj_idx *oib, struct tid_info *ti);
166 static int ref_opcode_object(struct ip_fw_chain *ch, ipfw_insn *cmd,
167     struct tid_info *ti, struct obj_idx *pidx, int *unresolved);
168 static void unref_rule_objects(struct ip_fw_chain *chain, struct ip_fw *rule);
169 static void unref_oib_objects(struct ip_fw_chain *ch, ipfw_insn *cmd,
170     struct obj_idx *oib, struct obj_idx *end);
171 static int export_objhash_ntlv(struct namedobj_instance *ni, uint16_t kidx,
172     struct sockopt_data *sd);
173 
174 /*
175  * Opcode object rewriter variables
176  */
177 struct opcode_obj_rewrite *ctl3_rewriters;
178 static size_t ctl3_rsize;
179 
180 /*
181  * static variables followed by global ones
182  */
183 
184 static VNET_DEFINE(uma_zone_t, ipfw_cntr_zone);
185 #define	V_ipfw_cntr_zone		VNET(ipfw_cntr_zone)
186 
187 void
188 ipfw_init_counters()
189 {
190 
191 	V_ipfw_cntr_zone = uma_zcreate("IPFW counters",
192 	    IPFW_RULE_CNTR_SIZE, NULL, NULL, NULL, NULL,
193 	    UMA_ALIGN_PTR, UMA_ZONE_PCPU);
194 }
195 
196 void
197 ipfw_destroy_counters()
198 {
199 
200 	uma_zdestroy(V_ipfw_cntr_zone);
201 }
202 
203 struct ip_fw *
204 ipfw_alloc_rule(struct ip_fw_chain *chain, size_t rulesize)
205 {
206 	struct ip_fw *rule;
207 
208 	rule = malloc(rulesize, M_IPFW, M_WAITOK | M_ZERO);
209 	rule->cntr = uma_zalloc(V_ipfw_cntr_zone, M_WAITOK | M_ZERO);
210 
211 	return (rule);
212 }
213 
214 static void
215 free_rule(struct ip_fw *rule)
216 {
217 
218 	uma_zfree(V_ipfw_cntr_zone, rule->cntr);
219 	free(rule, M_IPFW);
220 }
221 
222 
223 /*
224  * Find the smallest rule >= key, id.
225  * We could use bsearch but it is so simple that we code it directly
226  */
227 int
228 ipfw_find_rule(struct ip_fw_chain *chain, uint32_t key, uint32_t id)
229 {
230 	int i, lo, hi;
231 	struct ip_fw *r;
232 
233   	for (lo = 0, hi = chain->n_rules - 1; lo < hi;) {
234 		i = (lo + hi) / 2;
235 		r = chain->map[i];
236 		if (r->rulenum < key)
237 			lo = i + 1;	/* continue from the next one */
238 		else if (r->rulenum > key)
239 			hi = i;		/* this might be good */
240 		else if (r->id < id)
241 			lo = i + 1;	/* continue from the next one */
242 		else /* r->id >= id */
243 			hi = i;		/* this might be good */
244 	}
245 	return hi;
246 }
247 
248 /*
249  * Builds skipto cache on rule set @map.
250  */
251 static void
252 update_skipto_cache(struct ip_fw_chain *chain, struct ip_fw **map)
253 {
254 	int *smap, rulenum;
255 	int i, mi;
256 
257 	IPFW_UH_WLOCK_ASSERT(chain);
258 
259 	mi = 0;
260 	rulenum = map[mi]->rulenum;
261 	smap = chain->idxmap_back;
262 
263 	if (smap == NULL)
264 		return;
265 
266 	for (i = 0; i < 65536; i++) {
267 		smap[i] = mi;
268 		/* Use the same rule index until i < rulenum */
269 		if (i != rulenum || i == 65535)
270 			continue;
271 		/* Find next rule with num > i */
272 		rulenum = map[++mi]->rulenum;
273 		while (rulenum == i)
274 			rulenum = map[++mi]->rulenum;
275 	}
276 }
277 
278 /*
279  * Swaps prepared (backup) index with current one.
280  */
281 static void
282 swap_skipto_cache(struct ip_fw_chain *chain)
283 {
284 	int *map;
285 
286 	IPFW_UH_WLOCK_ASSERT(chain);
287 	IPFW_WLOCK_ASSERT(chain);
288 
289 	map = chain->idxmap;
290 	chain->idxmap = chain->idxmap_back;
291 	chain->idxmap_back = map;
292 }
293 
294 /*
295  * Allocate and initialize skipto cache.
296  */
297 void
298 ipfw_init_skipto_cache(struct ip_fw_chain *chain)
299 {
300 	int *idxmap, *idxmap_back;
301 
302 	idxmap = malloc(65536 * sizeof(uint32_t *), M_IPFW,
303 	    M_WAITOK | M_ZERO);
304 	idxmap_back = malloc(65536 * sizeof(uint32_t *), M_IPFW,
305 	    M_WAITOK | M_ZERO);
306 
307 	/*
308 	 * Note we may be called at any time after initialization,
309 	 * for example, on first skipto rule, so we need to
310 	 * provide valid chain->idxmap on return
311 	 */
312 
313 	IPFW_UH_WLOCK(chain);
314 	if (chain->idxmap != NULL) {
315 		IPFW_UH_WUNLOCK(chain);
316 		free(idxmap, M_IPFW);
317 		free(idxmap_back, M_IPFW);
318 		return;
319 	}
320 
321 	/* Set backup pointer first to permit building cache */
322 	chain->idxmap_back = idxmap_back;
323 	update_skipto_cache(chain, chain->map);
324 	IPFW_WLOCK(chain);
325 	/* It is now safe to set chain->idxmap ptr */
326 	chain->idxmap = idxmap;
327 	swap_skipto_cache(chain);
328 	IPFW_WUNLOCK(chain);
329 	IPFW_UH_WUNLOCK(chain);
330 }
331 
332 /*
333  * Destroys skipto cache.
334  */
335 void
336 ipfw_destroy_skipto_cache(struct ip_fw_chain *chain)
337 {
338 
339 	if (chain->idxmap != NULL)
340 		free(chain->idxmap, M_IPFW);
341 	if (chain->idxmap != NULL)
342 		free(chain->idxmap_back, M_IPFW);
343 }
344 
345 
346 /*
347  * allocate a new map, returns the chain locked. extra is the number
348  * of entries to add or delete.
349  */
350 static struct ip_fw **
351 get_map(struct ip_fw_chain *chain, int extra, int locked)
352 {
353 
354 	for (;;) {
355 		struct ip_fw **map;
356 		int i, mflags;
357 
358 		mflags = M_ZERO | ((locked != 0) ? M_NOWAIT : M_WAITOK);
359 
360 		i = chain->n_rules + extra;
361 		map = malloc(i * sizeof(struct ip_fw *), M_IPFW, mflags);
362 		if (map == NULL) {
363 			printf("%s: cannot allocate map\n", __FUNCTION__);
364 			return NULL;
365 		}
366 		if (!locked)
367 			IPFW_UH_WLOCK(chain);
368 		if (i >= chain->n_rules + extra) /* good */
369 			return map;
370 		/* otherwise we lost the race, free and retry */
371 		if (!locked)
372 			IPFW_UH_WUNLOCK(chain);
373 		free(map, M_IPFW);
374 	}
375 }
376 
377 /*
378  * swap the maps. It is supposed to be called with IPFW_UH_WLOCK
379  */
380 static struct ip_fw **
381 swap_map(struct ip_fw_chain *chain, struct ip_fw **new_map, int new_len)
382 {
383 	struct ip_fw **old_map;
384 
385 	IPFW_WLOCK(chain);
386 	chain->id++;
387 	chain->n_rules = new_len;
388 	old_map = chain->map;
389 	chain->map = new_map;
390 	swap_skipto_cache(chain);
391 	IPFW_WUNLOCK(chain);
392 	return old_map;
393 }
394 
395 
396 static void
397 export_cntr1_base(struct ip_fw *krule, struct ip_fw_bcounter *cntr)
398 {
399 	struct timeval boottime;
400 
401 	cntr->size = sizeof(*cntr);
402 
403 	if (krule->cntr != NULL) {
404 		cntr->pcnt = counter_u64_fetch(krule->cntr);
405 		cntr->bcnt = counter_u64_fetch(krule->cntr + 1);
406 		cntr->timestamp = krule->timestamp;
407 	}
408 	if (cntr->timestamp > 0) {
409 		getboottime(&boottime);
410 		cntr->timestamp += boottime.tv_sec;
411 	}
412 }
413 
414 static void
415 export_cntr0_base(struct ip_fw *krule, struct ip_fw_bcounter0 *cntr)
416 {
417 	struct timeval boottime;
418 
419 	if (krule->cntr != NULL) {
420 		cntr->pcnt = counter_u64_fetch(krule->cntr);
421 		cntr->bcnt = counter_u64_fetch(krule->cntr + 1);
422 		cntr->timestamp = krule->timestamp;
423 	}
424 	if (cntr->timestamp > 0) {
425 		getboottime(&boottime);
426 		cntr->timestamp += boottime.tv_sec;
427 	}
428 }
429 
430 /*
431  * Copies rule @urule from v1 userland format (current).
432  * to kernel @krule.
433  * Assume @krule is zeroed.
434  */
435 static void
436 import_rule1(struct rule_check_info *ci)
437 {
438 	struct ip_fw_rule *urule;
439 	struct ip_fw *krule;
440 
441 	urule = (struct ip_fw_rule *)ci->urule;
442 	krule = (struct ip_fw *)ci->krule;
443 
444 	/* copy header */
445 	krule->act_ofs = urule->act_ofs;
446 	krule->cmd_len = urule->cmd_len;
447 	krule->rulenum = urule->rulenum;
448 	krule->set = urule->set;
449 	krule->flags = urule->flags;
450 
451 	/* Save rulenum offset */
452 	ci->urule_numoff = offsetof(struct ip_fw_rule, rulenum);
453 
454 	/* Copy opcodes */
455 	memcpy(krule->cmd, urule->cmd, krule->cmd_len * sizeof(uint32_t));
456 }
457 
458 /*
459  * Export rule into v1 format (Current).
460  * Layout:
461  * [ ipfw_obj_tlv(IPFW_TLV_RULE_ENT)
462  *     [ ip_fw_rule ] OR
463  *     [ ip_fw_bcounter ip_fw_rule] (depends on rcntrs).
464  * ]
465  * Assume @data is zeroed.
466  */
467 static void
468 export_rule1(struct ip_fw *krule, caddr_t data, int len, int rcntrs)
469 {
470 	struct ip_fw_bcounter *cntr;
471 	struct ip_fw_rule *urule;
472 	ipfw_obj_tlv *tlv;
473 
474 	/* Fill in TLV header */
475 	tlv = (ipfw_obj_tlv *)data;
476 	tlv->type = IPFW_TLV_RULE_ENT;
477 	tlv->length = len;
478 
479 	if (rcntrs != 0) {
480 		/* Copy counters */
481 		cntr = (struct ip_fw_bcounter *)(tlv + 1);
482 		urule = (struct ip_fw_rule *)(cntr + 1);
483 		export_cntr1_base(krule, cntr);
484 	} else
485 		urule = (struct ip_fw_rule *)(tlv + 1);
486 
487 	/* copy header */
488 	urule->act_ofs = krule->act_ofs;
489 	urule->cmd_len = krule->cmd_len;
490 	urule->rulenum = krule->rulenum;
491 	urule->set = krule->set;
492 	urule->flags = krule->flags;
493 	urule->id = krule->id;
494 
495 	/* Copy opcodes */
496 	memcpy(urule->cmd, krule->cmd, krule->cmd_len * sizeof(uint32_t));
497 }
498 
499 
500 /*
501  * Copies rule @urule from FreeBSD8 userland format (v0)
502  * to kernel @krule.
503  * Assume @krule is zeroed.
504  */
505 static void
506 import_rule0(struct rule_check_info *ci)
507 {
508 	struct ip_fw_rule0 *urule;
509 	struct ip_fw *krule;
510 	int cmdlen, l;
511 	ipfw_insn *cmd;
512 	ipfw_insn_limit *lcmd;
513 	ipfw_insn_if *cmdif;
514 
515 	urule = (struct ip_fw_rule0 *)ci->urule;
516 	krule = (struct ip_fw *)ci->krule;
517 
518 	/* copy header */
519 	krule->act_ofs = urule->act_ofs;
520 	krule->cmd_len = urule->cmd_len;
521 	krule->rulenum = urule->rulenum;
522 	krule->set = urule->set;
523 	if ((urule->_pad & 1) != 0)
524 		krule->flags |= IPFW_RULE_NOOPT;
525 
526 	/* Save rulenum offset */
527 	ci->urule_numoff = offsetof(struct ip_fw_rule0, rulenum);
528 
529 	/* Copy opcodes */
530 	memcpy(krule->cmd, urule->cmd, krule->cmd_len * sizeof(uint32_t));
531 
532 	/*
533 	 * Alter opcodes:
534 	 * 1) convert tablearg value from 65535 to 0
535 	 * 2) Add high bit to O_SETFIB/O_SETDSCP values (to make room
536 	 *    for targ).
537 	 * 3) convert table number in iface opcodes to u16
538 	 * 4) convert old `nat global` into new 65535
539 	 */
540 	l = krule->cmd_len;
541 	cmd = krule->cmd;
542 	cmdlen = 0;
543 
544 	for ( ;	l > 0 ; l -= cmdlen, cmd += cmdlen) {
545 		cmdlen = F_LEN(cmd);
546 
547 		switch (cmd->opcode) {
548 		/* Opcodes supporting tablearg */
549 		case O_TAG:
550 		case O_TAGGED:
551 		case O_PIPE:
552 		case O_QUEUE:
553 		case O_DIVERT:
554 		case O_TEE:
555 		case O_SKIPTO:
556 		case O_CALLRETURN:
557 		case O_NETGRAPH:
558 		case O_NGTEE:
559 		case O_NAT:
560 			if (cmd->arg1 == IP_FW_TABLEARG)
561 				cmd->arg1 = IP_FW_TARG;
562 			else if (cmd->arg1 == 0)
563 				cmd->arg1 = IP_FW_NAT44_GLOBAL;
564 			break;
565 		case O_SETFIB:
566 		case O_SETDSCP:
567 			if (cmd->arg1 == IP_FW_TABLEARG)
568 				cmd->arg1 = IP_FW_TARG;
569 			else
570 				cmd->arg1 |= 0x8000;
571 			break;
572 		case O_LIMIT:
573 			lcmd = (ipfw_insn_limit *)cmd;
574 			if (lcmd->conn_limit == IP_FW_TABLEARG)
575 				lcmd->conn_limit = IP_FW_TARG;
576 			break;
577 		/* Interface tables */
578 		case O_XMIT:
579 		case O_RECV:
580 		case O_VIA:
581 			/* Interface table, possibly */
582 			cmdif = (ipfw_insn_if *)cmd;
583 			if (cmdif->name[0] != '\1')
584 				break;
585 
586 			cmdif->p.kidx = (uint16_t)cmdif->p.glob;
587 			break;
588 		}
589 	}
590 }
591 
592 /*
593  * Copies rule @krule from kernel to FreeBSD8 userland format (v0)
594  */
595 static void
596 export_rule0(struct ip_fw *krule, struct ip_fw_rule0 *urule, int len)
597 {
598 	int cmdlen, l;
599 	ipfw_insn *cmd;
600 	ipfw_insn_limit *lcmd;
601 	ipfw_insn_if *cmdif;
602 
603 	/* copy header */
604 	memset(urule, 0, len);
605 	urule->act_ofs = krule->act_ofs;
606 	urule->cmd_len = krule->cmd_len;
607 	urule->rulenum = krule->rulenum;
608 	urule->set = krule->set;
609 	if ((krule->flags & IPFW_RULE_NOOPT) != 0)
610 		urule->_pad |= 1;
611 
612 	/* Copy opcodes */
613 	memcpy(urule->cmd, krule->cmd, krule->cmd_len * sizeof(uint32_t));
614 
615 	/* Export counters */
616 	export_cntr0_base(krule, (struct ip_fw_bcounter0 *)&urule->pcnt);
617 
618 	/*
619 	 * Alter opcodes:
620 	 * 1) convert tablearg value from 0 to 65535
621 	 * 2) Remove highest bit from O_SETFIB/O_SETDSCP values.
622 	 * 3) convert table number in iface opcodes to int
623 	 */
624 	l = urule->cmd_len;
625 	cmd = urule->cmd;
626 	cmdlen = 0;
627 
628 	for ( ;	l > 0 ; l -= cmdlen, cmd += cmdlen) {
629 		cmdlen = F_LEN(cmd);
630 
631 		switch (cmd->opcode) {
632 		/* Opcodes supporting tablearg */
633 		case O_TAG:
634 		case O_TAGGED:
635 		case O_PIPE:
636 		case O_QUEUE:
637 		case O_DIVERT:
638 		case O_TEE:
639 		case O_SKIPTO:
640 		case O_CALLRETURN:
641 		case O_NETGRAPH:
642 		case O_NGTEE:
643 		case O_NAT:
644 			if (cmd->arg1 == IP_FW_TARG)
645 				cmd->arg1 = IP_FW_TABLEARG;
646 			else if (cmd->arg1 == IP_FW_NAT44_GLOBAL)
647 				cmd->arg1 = 0;
648 			break;
649 		case O_SETFIB:
650 		case O_SETDSCP:
651 			if (cmd->arg1 == IP_FW_TARG)
652 				cmd->arg1 = IP_FW_TABLEARG;
653 			else
654 				cmd->arg1 &= ~0x8000;
655 			break;
656 		case O_LIMIT:
657 			lcmd = (ipfw_insn_limit *)cmd;
658 			if (lcmd->conn_limit == IP_FW_TARG)
659 				lcmd->conn_limit = IP_FW_TABLEARG;
660 			break;
661 		/* Interface tables */
662 		case O_XMIT:
663 		case O_RECV:
664 		case O_VIA:
665 			/* Interface table, possibly */
666 			cmdif = (ipfw_insn_if *)cmd;
667 			if (cmdif->name[0] != '\1')
668 				break;
669 
670 			cmdif->p.glob = cmdif->p.kidx;
671 			break;
672 		}
673 	}
674 }
675 
676 /*
677  * Add new rule(s) to the list possibly creating rule number for each.
678  * Update the rule_number in the input struct so the caller knows it as well.
679  * Must be called without IPFW_UH held
680  */
681 static int
682 commit_rules(struct ip_fw_chain *chain, struct rule_check_info *rci, int count)
683 {
684 	int error, i, insert_before, tcount;
685 	uint16_t rulenum, *pnum;
686 	struct rule_check_info *ci;
687 	struct ip_fw *krule;
688 	struct ip_fw **map;	/* the new array of pointers */
689 
690 	/* Check if we need to do table/obj index remap */
691 	tcount = 0;
692 	for (ci = rci, i = 0; i < count; ci++, i++) {
693 		if (ci->object_opcodes == 0)
694 			continue;
695 
696 		/*
697 		 * Rule has some object opcodes.
698 		 * We need to find (and create non-existing)
699 		 * kernel objects, and reference existing ones.
700 		 */
701 		error = rewrite_rule_uidx(chain, ci);
702 		if (error != 0) {
703 
704 			/*
705 			 * rewrite failed, state for current rule
706 			 * has been reverted. Check if we need to
707 			 * revert more.
708 			 */
709 			if (tcount > 0) {
710 
711 				/*
712 				 * We have some more table rules
713 				 * we need to rollback.
714 				 */
715 
716 				IPFW_UH_WLOCK(chain);
717 				while (ci != rci) {
718 					ci--;
719 					if (ci->object_opcodes == 0)
720 						continue;
721 					unref_rule_objects(chain,ci->krule);
722 
723 				}
724 				IPFW_UH_WUNLOCK(chain);
725 
726 			}
727 
728 			return (error);
729 		}
730 
731 		tcount++;
732 	}
733 
734 	/* get_map returns with IPFW_UH_WLOCK if successful */
735 	map = get_map(chain, count, 0 /* not locked */);
736 	if (map == NULL) {
737 		if (tcount > 0) {
738 			/* Unbind tables */
739 			IPFW_UH_WLOCK(chain);
740 			for (ci = rci, i = 0; i < count; ci++, i++) {
741 				if (ci->object_opcodes == 0)
742 					continue;
743 
744 				unref_rule_objects(chain, ci->krule);
745 			}
746 			IPFW_UH_WUNLOCK(chain);
747 		}
748 
749 		return (ENOSPC);
750 	}
751 
752 	if (V_autoinc_step < 1)
753 		V_autoinc_step = 1;
754 	else if (V_autoinc_step > 1000)
755 		V_autoinc_step = 1000;
756 
757 	/* FIXME: Handle count > 1 */
758 	ci = rci;
759 	krule = ci->krule;
760 	rulenum = krule->rulenum;
761 
762 	/* find the insertion point, we will insert before */
763 	insert_before = rulenum ? rulenum + 1 : IPFW_DEFAULT_RULE;
764 	i = ipfw_find_rule(chain, insert_before, 0);
765 	/* duplicate first part */
766 	if (i > 0)
767 		bcopy(chain->map, map, i * sizeof(struct ip_fw *));
768 	map[i] = krule;
769 	/* duplicate remaining part, we always have the default rule */
770 	bcopy(chain->map + i, map + i + 1,
771 		sizeof(struct ip_fw *) *(chain->n_rules - i));
772 	if (rulenum == 0) {
773 		/* Compute rule number and write it back */
774 		rulenum = i > 0 ? map[i-1]->rulenum : 0;
775 		if (rulenum < IPFW_DEFAULT_RULE - V_autoinc_step)
776 			rulenum += V_autoinc_step;
777 		krule->rulenum = rulenum;
778 		/* Save number to userland rule */
779 		pnum = (uint16_t *)((caddr_t)ci->urule + ci->urule_numoff);
780 		*pnum = rulenum;
781 	}
782 
783 	krule->id = chain->id + 1;
784 	update_skipto_cache(chain, map);
785 	map = swap_map(chain, map, chain->n_rules + 1);
786 	chain->static_len += RULEUSIZE0(krule);
787 	IPFW_UH_WUNLOCK(chain);
788 	if (map)
789 		free(map, M_IPFW);
790 	return (0);
791 }
792 
793 /*
794  * Adds @rule to the list of rules to reap
795  */
796 void
797 ipfw_reap_add(struct ip_fw_chain *chain, struct ip_fw **head,
798     struct ip_fw *rule)
799 {
800 
801 	IPFW_UH_WLOCK_ASSERT(chain);
802 
803 	/* Unlink rule from everywhere */
804 	unref_rule_objects(chain, rule);
805 
806 	*((struct ip_fw **)rule) = *head;
807 	*head = rule;
808 }
809 
810 /*
811  * Reclaim storage associated with a list of rules.  This is
812  * typically the list created using remove_rule.
813  * A NULL pointer on input is handled correctly.
814  */
815 void
816 ipfw_reap_rules(struct ip_fw *head)
817 {
818 	struct ip_fw *rule;
819 
820 	while ((rule = head) != NULL) {
821 		head = *((struct ip_fw **)head);
822 		free_rule(rule);
823 	}
824 }
825 
826 /*
827  * Rules to keep are
828  *	(default || reserved || !match_set || !match_number)
829  * where
830  *   default ::= (rule->rulenum == IPFW_DEFAULT_RULE)
831  *	// the default rule is always protected
832  *
833  *   reserved ::= (cmd == 0 && n == 0 && rule->set == RESVD_SET)
834  *	// RESVD_SET is protected only if cmd == 0 and n == 0 ("ipfw flush")
835  *
836  *   match_set ::= (cmd == 0 || rule->set == set)
837  *	// set number is ignored for cmd == 0
838  *
839  *   match_number ::= (cmd == 1 || n == 0 || n == rule->rulenum)
840  *	// number is ignored for cmd == 1 or n == 0
841  *
842  */
843 int
844 ipfw_match_range(struct ip_fw *rule, ipfw_range_tlv *rt)
845 {
846 
847 	/* Don't match default rule for modification queries */
848 	if (rule->rulenum == IPFW_DEFAULT_RULE &&
849 	    (rt->flags & IPFW_RCFLAG_DEFAULT) == 0)
850 		return (0);
851 
852 	/* Don't match rules in reserved set for flush requests */
853 	if ((rt->flags & IPFW_RCFLAG_ALL) != 0 && rule->set == RESVD_SET)
854 		return (0);
855 
856 	/* If we're filtering by set, don't match other sets */
857 	if ((rt->flags & IPFW_RCFLAG_SET) != 0 && rule->set != rt->set)
858 		return (0);
859 
860 	if ((rt->flags & IPFW_RCFLAG_RANGE) != 0 &&
861 	    (rule->rulenum < rt->start_rule || rule->rulenum > rt->end_rule))
862 		return (0);
863 
864 	return (1);
865 }
866 
867 struct manage_sets_args {
868 	uint16_t	set;
869 	uint8_t		new_set;
870 };
871 
872 static int
873 swap_sets_cb(struct namedobj_instance *ni, struct named_object *no,
874     void *arg)
875 {
876 	struct manage_sets_args *args;
877 
878 	args = (struct manage_sets_args *)arg;
879 	if (no->set == (uint8_t)args->set)
880 		no->set = args->new_set;
881 	else if (no->set == args->new_set)
882 		no->set = (uint8_t)args->set;
883 	return (0);
884 }
885 
886 static int
887 move_sets_cb(struct namedobj_instance *ni, struct named_object *no,
888     void *arg)
889 {
890 	struct manage_sets_args *args;
891 
892 	args = (struct manage_sets_args *)arg;
893 	if (no->set == (uint8_t)args->set)
894 		no->set = args->new_set;
895 	return (0);
896 }
897 
898 static int
899 test_sets_cb(struct namedobj_instance *ni, struct named_object *no,
900     void *arg)
901 {
902 	struct manage_sets_args *args;
903 
904 	args = (struct manage_sets_args *)arg;
905 	if (no->set != (uint8_t)args->set)
906 		return (0);
907 	if (ipfw_objhash_lookup_name_type(ni, args->new_set,
908 	    no->etlv, no->name) != NULL)
909 		return (EEXIST);
910 	return (0);
911 }
912 
913 /*
914  * Generic function to handler moving and swapping sets.
915  */
916 int
917 ipfw_obj_manage_sets(struct namedobj_instance *ni, uint16_t type,
918     uint16_t set, uint8_t new_set, enum ipfw_sets_cmd cmd)
919 {
920 	struct manage_sets_args args;
921 	struct named_object *no;
922 
923 	args.set = set;
924 	args.new_set = new_set;
925 	switch (cmd) {
926 	case SWAP_ALL:
927 		return (ipfw_objhash_foreach_type(ni, swap_sets_cb,
928 		    &args, type));
929 	case TEST_ALL:
930 		return (ipfw_objhash_foreach_type(ni, test_sets_cb,
931 		    &args, type));
932 	case MOVE_ALL:
933 		return (ipfw_objhash_foreach_type(ni, move_sets_cb,
934 		    &args, type));
935 	case COUNT_ONE:
936 		/*
937 		 * @set used to pass kidx.
938 		 * When @new_set is zero - reset object counter,
939 		 * otherwise increment it.
940 		 */
941 		no = ipfw_objhash_lookup_kidx(ni, set);
942 		if (new_set != 0)
943 			no->ocnt++;
944 		else
945 			no->ocnt = 0;
946 		return (0);
947 	case TEST_ONE:
948 		/* @set used to pass kidx */
949 		no = ipfw_objhash_lookup_kidx(ni, set);
950 		/*
951 		 * First check number of references:
952 		 * when it differs, this mean other rules are holding
953 		 * reference to given object, so it is not possible to
954 		 * change its set. Note that refcnt may account references
955 		 * to some going-to-be-added rules. Since we don't know
956 		 * their numbers (and even if they will be added) it is
957 		 * perfectly OK to return error here.
958 		 */
959 		if (no->ocnt != no->refcnt)
960 			return (EBUSY);
961 		if (ipfw_objhash_lookup_name_type(ni, new_set, type,
962 		    no->name) != NULL)
963 			return (EEXIST);
964 		return (0);
965 	case MOVE_ONE:
966 		/* @set used to pass kidx */
967 		no = ipfw_objhash_lookup_kidx(ni, set);
968 		no->set = new_set;
969 		return (0);
970 	}
971 	return (EINVAL);
972 }
973 
974 /*
975  * Delete rules matching range @rt.
976  * Saves number of deleted rules in @ndel.
977  *
978  * Returns 0 on success.
979  */
980 static int
981 delete_range(struct ip_fw_chain *chain, ipfw_range_tlv *rt, int *ndel)
982 {
983 	struct ip_fw *reap, *rule, **map;
984 	int end, start;
985 	int i, n, ndyn, ofs;
986 
987 	reap = NULL;
988 	IPFW_UH_WLOCK(chain);	/* arbitrate writers */
989 
990 	/*
991 	 * Stage 1: Determine range to inspect.
992 	 * Range is half-inclusive, e.g [start, end).
993 	 */
994 	start = 0;
995 	end = chain->n_rules - 1;
996 
997 	if ((rt->flags & IPFW_RCFLAG_RANGE) != 0) {
998 		start = ipfw_find_rule(chain, rt->start_rule, 0);
999 
1000 		end = ipfw_find_rule(chain, rt->end_rule, 0);
1001 		if (rt->end_rule != IPFW_DEFAULT_RULE)
1002 			while (chain->map[end]->rulenum == rt->end_rule)
1003 				end++;
1004 	}
1005 
1006 	/* Allocate new map of the same size */
1007 	map = get_map(chain, 0, 1 /* locked */);
1008 	if (map == NULL) {
1009 		IPFW_UH_WUNLOCK(chain);
1010 		return (ENOMEM);
1011 	}
1012 
1013 	n = 0;
1014 	ndyn = 0;
1015 	ofs = start;
1016 	/* 1. bcopy the initial part of the map */
1017 	if (start > 0)
1018 		bcopy(chain->map, map, start * sizeof(struct ip_fw *));
1019 	/* 2. copy active rules between start and end */
1020 	for (i = start; i < end; i++) {
1021 		rule = chain->map[i];
1022 		if (ipfw_match_range(rule, rt) == 0) {
1023 			map[ofs++] = rule;
1024 			continue;
1025 		}
1026 
1027 		n++;
1028 		if (ipfw_is_dyn_rule(rule) != 0)
1029 			ndyn++;
1030 	}
1031 	/* 3. copy the final part of the map */
1032 	bcopy(chain->map + end, map + ofs,
1033 		(chain->n_rules - end) * sizeof(struct ip_fw *));
1034 	/* 4. recalculate skipto cache */
1035 	update_skipto_cache(chain, map);
1036 	/* 5. swap the maps (under UH_WLOCK + WHLOCK) */
1037 	map = swap_map(chain, map, chain->n_rules - n);
1038 	/* 6. Remove all dynamic states originated by deleted rules */
1039 	if (ndyn > 0)
1040 		ipfw_expire_dyn_rules(chain, rt);
1041 	/* 7. now remove the rules deleted from the old map */
1042 	for (i = start; i < end; i++) {
1043 		rule = map[i];
1044 		if (ipfw_match_range(rule, rt) == 0)
1045 			continue;
1046 		chain->static_len -= RULEUSIZE0(rule);
1047 		ipfw_reap_add(chain, &reap, rule);
1048 	}
1049 	IPFW_UH_WUNLOCK(chain);
1050 
1051 	ipfw_reap_rules(reap);
1052 	if (map != NULL)
1053 		free(map, M_IPFW);
1054 	*ndel = n;
1055 	return (0);
1056 }
1057 
1058 static int
1059 move_objects(struct ip_fw_chain *ch, ipfw_range_tlv *rt)
1060 {
1061 	struct opcode_obj_rewrite *rw;
1062 	struct ip_fw *rule;
1063 	ipfw_insn *cmd;
1064 	int cmdlen, i, l, c;
1065 	uint16_t kidx;
1066 
1067 	IPFW_UH_WLOCK_ASSERT(ch);
1068 
1069 	/* Stage 1: count number of references by given rules */
1070 	for (c = 0, i = 0; i < ch->n_rules - 1; i++) {
1071 		rule = ch->map[i];
1072 		if (ipfw_match_range(rule, rt) == 0)
1073 			continue;
1074 		if (rule->set == rt->new_set) /* nothing to do */
1075 			continue;
1076 		/* Search opcodes with named objects */
1077 		for (l = rule->cmd_len, cmdlen = 0, cmd = rule->cmd;
1078 		    l > 0; l -= cmdlen, cmd += cmdlen) {
1079 			cmdlen = F_LEN(cmd);
1080 			rw = find_op_rw(cmd, &kidx, NULL);
1081 			if (rw == NULL || rw->manage_sets == NULL)
1082 				continue;
1083 			/*
1084 			 * When manage_sets() returns non-zero value to
1085 			 * COUNT_ONE command, consider this as an object
1086 			 * doesn't support sets (e.g. disabled with sysctl).
1087 			 * So, skip checks for this object.
1088 			 */
1089 			if (rw->manage_sets(ch, kidx, 1, COUNT_ONE) != 0)
1090 				continue;
1091 			c++;
1092 		}
1093 	}
1094 	if (c == 0) /* No objects found */
1095 		return (0);
1096 	/* Stage 2: verify "ownership" */
1097 	for (c = 0, i = 0; (i < ch->n_rules - 1) && c == 0; i++) {
1098 		rule = ch->map[i];
1099 		if (ipfw_match_range(rule, rt) == 0)
1100 			continue;
1101 		if (rule->set == rt->new_set) /* nothing to do */
1102 			continue;
1103 		/* Search opcodes with named objects */
1104 		for (l = rule->cmd_len, cmdlen = 0, cmd = rule->cmd;
1105 		    l > 0 && c == 0; l -= cmdlen, cmd += cmdlen) {
1106 			cmdlen = F_LEN(cmd);
1107 			rw = find_op_rw(cmd, &kidx, NULL);
1108 			if (rw == NULL || rw->manage_sets == NULL)
1109 				continue;
1110 			/* Test for ownership and conflicting names */
1111 			c = rw->manage_sets(ch, kidx,
1112 			    (uint8_t)rt->new_set, TEST_ONE);
1113 		}
1114 	}
1115 	/* Stage 3: change set and cleanup */
1116 	for (i = 0; i < ch->n_rules - 1; i++) {
1117 		rule = ch->map[i];
1118 		if (ipfw_match_range(rule, rt) == 0)
1119 			continue;
1120 		if (rule->set == rt->new_set) /* nothing to do */
1121 			continue;
1122 		/* Search opcodes with named objects */
1123 		for (l = rule->cmd_len, cmdlen = 0, cmd = rule->cmd;
1124 		    l > 0; l -= cmdlen, cmd += cmdlen) {
1125 			cmdlen = F_LEN(cmd);
1126 			rw = find_op_rw(cmd, &kidx, NULL);
1127 			if (rw == NULL || rw->manage_sets == NULL)
1128 				continue;
1129 			/* cleanup object counter */
1130 			rw->manage_sets(ch, kidx,
1131 			    0 /* reset counter */, COUNT_ONE);
1132 			if (c != 0)
1133 				continue;
1134 			/* change set */
1135 			rw->manage_sets(ch, kidx,
1136 			    (uint8_t)rt->new_set, MOVE_ONE);
1137 		}
1138 	}
1139 	return (c);
1140 }/*
1141  * Changes set of given rule rannge @rt
1142  * with each other.
1143  *
1144  * Returns 0 on success.
1145  */
1146 static int
1147 move_range(struct ip_fw_chain *chain, ipfw_range_tlv *rt)
1148 {
1149 	struct ip_fw *rule;
1150 	int i;
1151 
1152 	IPFW_UH_WLOCK(chain);
1153 
1154 	/*
1155 	 * Move rules with matching paramenerts to a new set.
1156 	 * This one is much more complex. We have to ensure
1157 	 * that all referenced tables (if any) are referenced
1158 	 * by given rule subset only. Otherwise, we can't move
1159 	 * them to new set and have to return error.
1160 	 */
1161 	if ((i = move_objects(chain, rt)) != 0) {
1162 		IPFW_UH_WUNLOCK(chain);
1163 		return (i);
1164 	}
1165 
1166 	/* XXX: We have to do swap holding WLOCK */
1167 	for (i = 0; i < chain->n_rules; i++) {
1168 		rule = chain->map[i];
1169 		if (ipfw_match_range(rule, rt) == 0)
1170 			continue;
1171 		rule->set = rt->new_set;
1172 	}
1173 
1174 	IPFW_UH_WUNLOCK(chain);
1175 
1176 	return (0);
1177 }
1178 
1179 /*
1180  * Clear counters for a specific rule.
1181  * Normally run under IPFW_UH_RLOCK, but these are idempotent ops
1182  * so we only care that rules do not disappear.
1183  */
1184 static void
1185 clear_counters(struct ip_fw *rule, int log_only)
1186 {
1187 	ipfw_insn_log *l = (ipfw_insn_log *)ACTION_PTR(rule);
1188 
1189 	if (log_only == 0)
1190 		IPFW_ZERO_RULE_COUNTER(rule);
1191 	if (l->o.opcode == O_LOG)
1192 		l->log_left = l->max_log;
1193 }
1194 
1195 /*
1196  * Flushes rules counters and/or log values on matching range.
1197  *
1198  * Returns number of items cleared.
1199  */
1200 static int
1201 clear_range(struct ip_fw_chain *chain, ipfw_range_tlv *rt, int log_only)
1202 {
1203 	struct ip_fw *rule;
1204 	int num;
1205 	int i;
1206 
1207 	num = 0;
1208 	rt->flags |= IPFW_RCFLAG_DEFAULT;
1209 
1210 	IPFW_UH_WLOCK(chain);	/* arbitrate writers */
1211 	for (i = 0; i < chain->n_rules; i++) {
1212 		rule = chain->map[i];
1213 		if (ipfw_match_range(rule, rt) == 0)
1214 			continue;
1215 		clear_counters(rule, log_only);
1216 		num++;
1217 	}
1218 	IPFW_UH_WUNLOCK(chain);
1219 
1220 	return (num);
1221 }
1222 
1223 static int
1224 check_range_tlv(ipfw_range_tlv *rt)
1225 {
1226 
1227 	if (rt->head.length != sizeof(*rt))
1228 		return (1);
1229 	if (rt->start_rule > rt->end_rule)
1230 		return (1);
1231 	if (rt->set >= IPFW_MAX_SETS || rt->new_set >= IPFW_MAX_SETS)
1232 		return (1);
1233 
1234 	if ((rt->flags & IPFW_RCFLAG_USER) != rt->flags)
1235 		return (1);
1236 
1237 	return (0);
1238 }
1239 
1240 /*
1241  * Delete rules matching specified parameters
1242  * Data layout (v0)(current):
1243  * Request: [ ipfw_obj_header ipfw_range_tlv ]
1244  * Reply: [ ipfw_obj_header ipfw_range_tlv ]
1245  *
1246  * Saves number of deleted rules in ipfw_range_tlv->new_set.
1247  *
1248  * Returns 0 on success.
1249  */
1250 static int
1251 del_rules(struct ip_fw_chain *chain, ip_fw3_opheader *op3,
1252     struct sockopt_data *sd)
1253 {
1254 	ipfw_range_header *rh;
1255 	int error, ndel;
1256 
1257 	if (sd->valsize != sizeof(*rh))
1258 		return (EINVAL);
1259 
1260 	rh = (ipfw_range_header *)ipfw_get_sopt_space(sd, sd->valsize);
1261 
1262 	if (check_range_tlv(&rh->range) != 0)
1263 		return (EINVAL);
1264 
1265 	ndel = 0;
1266 	if ((error = delete_range(chain, &rh->range, &ndel)) != 0)
1267 		return (error);
1268 
1269 	/* Save number of rules deleted */
1270 	rh->range.new_set = ndel;
1271 	return (0);
1272 }
1273 
1274 /*
1275  * Move rules/sets matching specified parameters
1276  * Data layout (v0)(current):
1277  * Request: [ ipfw_obj_header ipfw_range_tlv ]
1278  *
1279  * Returns 0 on success.
1280  */
1281 static int
1282 move_rules(struct ip_fw_chain *chain, ip_fw3_opheader *op3,
1283     struct sockopt_data *sd)
1284 {
1285 	ipfw_range_header *rh;
1286 
1287 	if (sd->valsize != sizeof(*rh))
1288 		return (EINVAL);
1289 
1290 	rh = (ipfw_range_header *)ipfw_get_sopt_space(sd, sd->valsize);
1291 
1292 	if (check_range_tlv(&rh->range) != 0)
1293 		return (EINVAL);
1294 
1295 	return (move_range(chain, &rh->range));
1296 }
1297 
1298 /*
1299  * Clear rule accounting data matching specified parameters
1300  * Data layout (v0)(current):
1301  * Request: [ ipfw_obj_header ipfw_range_tlv ]
1302  * Reply: [ ipfw_obj_header ipfw_range_tlv ]
1303  *
1304  * Saves number of cleared rules in ipfw_range_tlv->new_set.
1305  *
1306  * Returns 0 on success.
1307  */
1308 static int
1309 clear_rules(struct ip_fw_chain *chain, ip_fw3_opheader *op3,
1310     struct sockopt_data *sd)
1311 {
1312 	ipfw_range_header *rh;
1313 	int log_only, num;
1314 	char *msg;
1315 
1316 	if (sd->valsize != sizeof(*rh))
1317 		return (EINVAL);
1318 
1319 	rh = (ipfw_range_header *)ipfw_get_sopt_space(sd, sd->valsize);
1320 
1321 	if (check_range_tlv(&rh->range) != 0)
1322 		return (EINVAL);
1323 
1324 	log_only = (op3->opcode == IP_FW_XRESETLOG);
1325 
1326 	num = clear_range(chain, &rh->range, log_only);
1327 
1328 	if (rh->range.flags & IPFW_RCFLAG_ALL)
1329 		msg = log_only ? "All logging counts reset" :
1330 		    "Accounting cleared";
1331 	else
1332 		msg = log_only ? "logging count reset" : "cleared";
1333 
1334 	if (V_fw_verbose) {
1335 		int lev = LOG_SECURITY | LOG_NOTICE;
1336 		log(lev, "ipfw: %s.\n", msg);
1337 	}
1338 
1339 	/* Save number of rules cleared */
1340 	rh->range.new_set = num;
1341 	return (0);
1342 }
1343 
1344 static void
1345 enable_sets(struct ip_fw_chain *chain, ipfw_range_tlv *rt)
1346 {
1347 	uint32_t v_set;
1348 
1349 	IPFW_UH_WLOCK_ASSERT(chain);
1350 
1351 	/* Change enabled/disabled sets mask */
1352 	v_set = (V_set_disable | rt->set) & ~rt->new_set;
1353 	v_set &= ~(1 << RESVD_SET); /* set RESVD_SET always enabled */
1354 	IPFW_WLOCK(chain);
1355 	V_set_disable = v_set;
1356 	IPFW_WUNLOCK(chain);
1357 }
1358 
1359 static int
1360 swap_sets(struct ip_fw_chain *chain, ipfw_range_tlv *rt, int mv)
1361 {
1362 	struct opcode_obj_rewrite *rw;
1363 	struct ip_fw *rule;
1364 	int i;
1365 
1366 	IPFW_UH_WLOCK_ASSERT(chain);
1367 
1368 	if (rt->set == rt->new_set) /* nothing to do */
1369 		return (0);
1370 
1371 	if (mv != 0) {
1372 		/*
1373 		 * Berfore moving the rules we need to check that
1374 		 * there aren't any conflicting named objects.
1375 		 */
1376 		for (rw = ctl3_rewriters;
1377 		    rw < ctl3_rewriters + ctl3_rsize; rw++) {
1378 			if (rw->manage_sets == NULL)
1379 				continue;
1380 			i = rw->manage_sets(chain, (uint8_t)rt->set,
1381 			    (uint8_t)rt->new_set, TEST_ALL);
1382 			if (i != 0)
1383 				return (EEXIST);
1384 		}
1385 	}
1386 	/* Swap or move two sets */
1387 	for (i = 0; i < chain->n_rules - 1; i++) {
1388 		rule = chain->map[i];
1389 		if (rule->set == (uint8_t)rt->set)
1390 			rule->set = (uint8_t)rt->new_set;
1391 		else if (rule->set == (uint8_t)rt->new_set && mv == 0)
1392 			rule->set = (uint8_t)rt->set;
1393 	}
1394 	for (rw = ctl3_rewriters; rw < ctl3_rewriters + ctl3_rsize; rw++) {
1395 		if (rw->manage_sets == NULL)
1396 			continue;
1397 		rw->manage_sets(chain, (uint8_t)rt->set,
1398 		    (uint8_t)rt->new_set, mv != 0 ? MOVE_ALL: SWAP_ALL);
1399 	}
1400 	return (0);
1401 }
1402 
1403 /*
1404  * Swaps or moves set
1405  * Data layout (v0)(current):
1406  * Request: [ ipfw_obj_header ipfw_range_tlv ]
1407  *
1408  * Returns 0 on success.
1409  */
1410 static int
1411 manage_sets(struct ip_fw_chain *chain, ip_fw3_opheader *op3,
1412     struct sockopt_data *sd)
1413 {
1414 	ipfw_range_header *rh;
1415 	int ret;
1416 
1417 	if (sd->valsize != sizeof(*rh))
1418 		return (EINVAL);
1419 
1420 	rh = (ipfw_range_header *)ipfw_get_sopt_space(sd, sd->valsize);
1421 
1422 	if (rh->range.head.length != sizeof(ipfw_range_tlv))
1423 		return (1);
1424 	/* enable_sets() expects bitmasks. */
1425 	if (op3->opcode != IP_FW_SET_ENABLE &&
1426 	    (rh->range.set >= IPFW_MAX_SETS ||
1427 	    rh->range.new_set >= IPFW_MAX_SETS))
1428 		return (EINVAL);
1429 
1430 	ret = 0;
1431 	IPFW_UH_WLOCK(chain);
1432 	switch (op3->opcode) {
1433 	case IP_FW_SET_SWAP:
1434 	case IP_FW_SET_MOVE:
1435 		ret = swap_sets(chain, &rh->range,
1436 		    op3->opcode == IP_FW_SET_MOVE);
1437 		break;
1438 	case IP_FW_SET_ENABLE:
1439 		enable_sets(chain, &rh->range);
1440 		break;
1441 	}
1442 	IPFW_UH_WUNLOCK(chain);
1443 
1444 	return (ret);
1445 }
1446 
1447 /**
1448  * Remove all rules with given number, or do set manipulation.
1449  * Assumes chain != NULL && *chain != NULL.
1450  *
1451  * The argument is an uint32_t. The low 16 bit are the rule or set number;
1452  * the next 8 bits are the new set; the top 8 bits indicate the command:
1453  *
1454  *	0	delete rules numbered "rulenum"
1455  *	1	delete rules in set "rulenum"
1456  *	2	move rules "rulenum" to set "new_set"
1457  *	3	move rules from set "rulenum" to set "new_set"
1458  *	4	swap sets "rulenum" and "new_set"
1459  *	5	delete rules "rulenum" and set "new_set"
1460  */
1461 static int
1462 del_entry(struct ip_fw_chain *chain, uint32_t arg)
1463 {
1464 	uint32_t num;	/* rule number or old_set */
1465 	uint8_t cmd, new_set;
1466 	int do_del, ndel;
1467 	int error = 0;
1468 	ipfw_range_tlv rt;
1469 
1470 	num = arg & 0xffff;
1471 	cmd = (arg >> 24) & 0xff;
1472 	new_set = (arg >> 16) & 0xff;
1473 
1474 	if (cmd > 5 || new_set > RESVD_SET)
1475 		return EINVAL;
1476 	if (cmd == 0 || cmd == 2 || cmd == 5) {
1477 		if (num >= IPFW_DEFAULT_RULE)
1478 			return EINVAL;
1479 	} else {
1480 		if (num > RESVD_SET)	/* old_set */
1481 			return EINVAL;
1482 	}
1483 
1484 	/* Convert old requests into new representation */
1485 	memset(&rt, 0, sizeof(rt));
1486 	rt.start_rule = num;
1487 	rt.end_rule = num;
1488 	rt.set = num;
1489 	rt.new_set = new_set;
1490 	do_del = 0;
1491 
1492 	switch (cmd) {
1493 	case 0: /* delete rules numbered "rulenum" */
1494 		if (num == 0)
1495 			rt.flags |= IPFW_RCFLAG_ALL;
1496 		else
1497 			rt.flags |= IPFW_RCFLAG_RANGE;
1498 		do_del = 1;
1499 		break;
1500 	case 1: /* delete rules in set "rulenum" */
1501 		rt.flags |= IPFW_RCFLAG_SET;
1502 		do_del = 1;
1503 		break;
1504 	case 5: /* delete rules "rulenum" and set "new_set" */
1505 		rt.flags |= IPFW_RCFLAG_RANGE | IPFW_RCFLAG_SET;
1506 		rt.set = new_set;
1507 		rt.new_set = 0;
1508 		do_del = 1;
1509 		break;
1510 	case 2: /* move rules "rulenum" to set "new_set" */
1511 		rt.flags |= IPFW_RCFLAG_RANGE;
1512 		break;
1513 	case 3: /* move rules from set "rulenum" to set "new_set" */
1514 		IPFW_UH_WLOCK(chain);
1515 		error = swap_sets(chain, &rt, 1);
1516 		IPFW_UH_WUNLOCK(chain);
1517 		return (error);
1518 	case 4: /* swap sets "rulenum" and "new_set" */
1519 		IPFW_UH_WLOCK(chain);
1520 		error = swap_sets(chain, &rt, 0);
1521 		IPFW_UH_WUNLOCK(chain);
1522 		return (error);
1523 	default:
1524 		return (ENOTSUP);
1525 	}
1526 
1527 	if (do_del != 0) {
1528 		if ((error = delete_range(chain, &rt, &ndel)) != 0)
1529 			return (error);
1530 
1531 		if (ndel == 0 && (cmd != 1 && num != 0))
1532 			return (EINVAL);
1533 
1534 		return (0);
1535 	}
1536 
1537 	return (move_range(chain, &rt));
1538 }
1539 
1540 /**
1541  * Reset some or all counters on firewall rules.
1542  * The argument `arg' is an u_int32_t. The low 16 bit are the rule number,
1543  * the next 8 bits are the set number, the top 8 bits are the command:
1544  *	0	work with rules from all set's;
1545  *	1	work with rules only from specified set.
1546  * Specified rule number is zero if we want to clear all entries.
1547  * log_only is 1 if we only want to reset logs, zero otherwise.
1548  */
1549 static int
1550 zero_entry(struct ip_fw_chain *chain, u_int32_t arg, int log_only)
1551 {
1552 	struct ip_fw *rule;
1553 	char *msg;
1554 	int i;
1555 
1556 	uint16_t rulenum = arg & 0xffff;
1557 	uint8_t set = (arg >> 16) & 0xff;
1558 	uint8_t cmd = (arg >> 24) & 0xff;
1559 
1560 	if (cmd > 1)
1561 		return (EINVAL);
1562 	if (cmd == 1 && set > RESVD_SET)
1563 		return (EINVAL);
1564 
1565 	IPFW_UH_RLOCK(chain);
1566 	if (rulenum == 0) {
1567 		V_norule_counter = 0;
1568 		for (i = 0; i < chain->n_rules; i++) {
1569 			rule = chain->map[i];
1570 			/* Skip rules not in our set. */
1571 			if (cmd == 1 && rule->set != set)
1572 				continue;
1573 			clear_counters(rule, log_only);
1574 		}
1575 		msg = log_only ? "All logging counts reset" :
1576 		    "Accounting cleared";
1577 	} else {
1578 		int cleared = 0;
1579 		for (i = 0; i < chain->n_rules; i++) {
1580 			rule = chain->map[i];
1581 			if (rule->rulenum == rulenum) {
1582 				if (cmd == 0 || rule->set == set)
1583 					clear_counters(rule, log_only);
1584 				cleared = 1;
1585 			}
1586 			if (rule->rulenum > rulenum)
1587 				break;
1588 		}
1589 		if (!cleared) {	/* we did not find any matching rules */
1590 			IPFW_UH_RUNLOCK(chain);
1591 			return (EINVAL);
1592 		}
1593 		msg = log_only ? "logging count reset" : "cleared";
1594 	}
1595 	IPFW_UH_RUNLOCK(chain);
1596 
1597 	if (V_fw_verbose) {
1598 		int lev = LOG_SECURITY | LOG_NOTICE;
1599 
1600 		if (rulenum)
1601 			log(lev, "ipfw: Entry %d %s.\n", rulenum, msg);
1602 		else
1603 			log(lev, "ipfw: %s.\n", msg);
1604 	}
1605 	return (0);
1606 }
1607 
1608 
1609 /*
1610  * Check rule head in FreeBSD11 format
1611  *
1612  */
1613 static int
1614 check_ipfw_rule1(struct ip_fw_rule *rule, int size,
1615     struct rule_check_info *ci)
1616 {
1617 	int l;
1618 
1619 	if (size < sizeof(*rule)) {
1620 		printf("ipfw: rule too short\n");
1621 		return (EINVAL);
1622 	}
1623 
1624 	/* Check for valid cmd_len */
1625 	l = roundup2(RULESIZE(rule), sizeof(uint64_t));
1626 	if (l != size) {
1627 		printf("ipfw: size mismatch (have %d want %d)\n", size, l);
1628 		return (EINVAL);
1629 	}
1630 	if (rule->act_ofs >= rule->cmd_len) {
1631 		printf("ipfw: bogus action offset (%u > %u)\n",
1632 		    rule->act_ofs, rule->cmd_len - 1);
1633 		return (EINVAL);
1634 	}
1635 
1636 	if (rule->rulenum > IPFW_DEFAULT_RULE - 1)
1637 		return (EINVAL);
1638 
1639 	return (check_ipfw_rule_body(rule->cmd, rule->cmd_len, ci));
1640 }
1641 
1642 /*
1643  * Check rule head in FreeBSD8 format
1644  *
1645  */
1646 static int
1647 check_ipfw_rule0(struct ip_fw_rule0 *rule, int size,
1648     struct rule_check_info *ci)
1649 {
1650 	int l;
1651 
1652 	if (size < sizeof(*rule)) {
1653 		printf("ipfw: rule too short\n");
1654 		return (EINVAL);
1655 	}
1656 
1657 	/* Check for valid cmd_len */
1658 	l = sizeof(*rule) + rule->cmd_len * 4 - 4;
1659 	if (l != size) {
1660 		printf("ipfw: size mismatch (have %d want %d)\n", size, l);
1661 		return (EINVAL);
1662 	}
1663 	if (rule->act_ofs >= rule->cmd_len) {
1664 		printf("ipfw: bogus action offset (%u > %u)\n",
1665 		    rule->act_ofs, rule->cmd_len - 1);
1666 		return (EINVAL);
1667 	}
1668 
1669 	if (rule->rulenum > IPFW_DEFAULT_RULE - 1)
1670 		return (EINVAL);
1671 
1672 	return (check_ipfw_rule_body(rule->cmd, rule->cmd_len, ci));
1673 }
1674 
1675 static int
1676 check_ipfw_rule_body(ipfw_insn *cmd, int cmd_len, struct rule_check_info *ci)
1677 {
1678 	int cmdlen, l;
1679 	int have_action;
1680 
1681 	have_action = 0;
1682 
1683 	/*
1684 	 * Now go for the individual checks. Very simple ones, basically only
1685 	 * instruction sizes.
1686 	 */
1687 	for (l = cmd_len; l > 0 ; l -= cmdlen, cmd += cmdlen) {
1688 		cmdlen = F_LEN(cmd);
1689 		if (cmdlen > l) {
1690 			printf("ipfw: opcode %d size truncated\n",
1691 			    cmd->opcode);
1692 			return EINVAL;
1693 		}
1694 		switch (cmd->opcode) {
1695 		case O_PROBE_STATE:
1696 		case O_KEEP_STATE:
1697 			if (cmdlen != F_INSN_SIZE(ipfw_insn))
1698 				goto bad_size;
1699 			ci->object_opcodes++;
1700 			break;
1701 		case O_PROTO:
1702 		case O_IP_SRC_ME:
1703 		case O_IP_DST_ME:
1704 		case O_LAYER2:
1705 		case O_IN:
1706 		case O_FRAG:
1707 		case O_DIVERTED:
1708 		case O_IPOPT:
1709 		case O_IPTOS:
1710 		case O_IPPRECEDENCE:
1711 		case O_IPVER:
1712 		case O_SOCKARG:
1713 		case O_TCPFLAGS:
1714 		case O_TCPOPTS:
1715 		case O_ESTAB:
1716 		case O_VERREVPATH:
1717 		case O_VERSRCREACH:
1718 		case O_ANTISPOOF:
1719 		case O_IPSEC:
1720 #ifdef INET6
1721 		case O_IP6_SRC_ME:
1722 		case O_IP6_DST_ME:
1723 		case O_EXT_HDR:
1724 		case O_IP6:
1725 #endif
1726 		case O_IP4:
1727 		case O_TAG:
1728 			if (cmdlen != F_INSN_SIZE(ipfw_insn))
1729 				goto bad_size;
1730 			break;
1731 
1732 		case O_EXTERNAL_ACTION:
1733 			if (cmd->arg1 == 0 ||
1734 			    cmdlen != F_INSN_SIZE(ipfw_insn)) {
1735 				printf("ipfw: invalid external "
1736 				    "action opcode\n");
1737 				return (EINVAL);
1738 			}
1739 			ci->object_opcodes++;
1740 			/*
1741 			 * Do we have O_EXTERNAL_INSTANCE or O_EXTERNAL_DATA
1742 			 * opcode?
1743 			 */
1744 			if (l != cmdlen) {
1745 				l -= cmdlen;
1746 				cmd += cmdlen;
1747 				cmdlen = F_LEN(cmd);
1748 				if (cmd->opcode == O_EXTERNAL_DATA)
1749 					goto check_action;
1750 				if (cmd->opcode != O_EXTERNAL_INSTANCE) {
1751 					printf("ipfw: invalid opcode "
1752 					    "next to external action %u\n",
1753 					    cmd->opcode);
1754 					return (EINVAL);
1755 				}
1756 				if (cmd->arg1 == 0 ||
1757 				    cmdlen != F_INSN_SIZE(ipfw_insn)) {
1758 					printf("ipfw: invalid external "
1759 					    "action instance opcode\n");
1760 					return (EINVAL);
1761 				}
1762 				ci->object_opcodes++;
1763 			}
1764 			goto check_action;
1765 
1766 		case O_FIB:
1767 			if (cmdlen != F_INSN_SIZE(ipfw_insn))
1768 				goto bad_size;
1769 			if (cmd->arg1 >= rt_numfibs) {
1770 				printf("ipfw: invalid fib number %d\n",
1771 					cmd->arg1);
1772 				return EINVAL;
1773 			}
1774 			break;
1775 
1776 		case O_SETFIB:
1777 			if (cmdlen != F_INSN_SIZE(ipfw_insn))
1778 				goto bad_size;
1779 			if ((cmd->arg1 != IP_FW_TARG) &&
1780 			    ((cmd->arg1 & 0x7FFF) >= rt_numfibs)) {
1781 				printf("ipfw: invalid fib number %d\n",
1782 					cmd->arg1 & 0x7FFF);
1783 				return EINVAL;
1784 			}
1785 			goto check_action;
1786 
1787 		case O_UID:
1788 		case O_GID:
1789 		case O_JAIL:
1790 		case O_IP_SRC:
1791 		case O_IP_DST:
1792 		case O_TCPSEQ:
1793 		case O_TCPACK:
1794 		case O_PROB:
1795 		case O_ICMPTYPE:
1796 			if (cmdlen != F_INSN_SIZE(ipfw_insn_u32))
1797 				goto bad_size;
1798 			break;
1799 
1800 		case O_LIMIT:
1801 			if (cmdlen != F_INSN_SIZE(ipfw_insn_limit))
1802 				goto bad_size;
1803 			ci->object_opcodes++;
1804 			break;
1805 
1806 		case O_LOG:
1807 			if (cmdlen != F_INSN_SIZE(ipfw_insn_log))
1808 				goto bad_size;
1809 
1810 			((ipfw_insn_log *)cmd)->log_left =
1811 			    ((ipfw_insn_log *)cmd)->max_log;
1812 
1813 			break;
1814 
1815 		case O_IP_SRC_MASK:
1816 		case O_IP_DST_MASK:
1817 			/* only odd command lengths */
1818 			if ((cmdlen & 1) == 0)
1819 				goto bad_size;
1820 			break;
1821 
1822 		case O_IP_SRC_SET:
1823 		case O_IP_DST_SET:
1824 			if (cmd->arg1 == 0 || cmd->arg1 > 256) {
1825 				printf("ipfw: invalid set size %d\n",
1826 					cmd->arg1);
1827 				return EINVAL;
1828 			}
1829 			if (cmdlen != F_INSN_SIZE(ipfw_insn_u32) +
1830 			    (cmd->arg1+31)/32 )
1831 				goto bad_size;
1832 			break;
1833 
1834 		case O_IP_SRC_LOOKUP:
1835 			if (cmdlen > F_INSN_SIZE(ipfw_insn_u32))
1836 				goto bad_size;
1837 		case O_IP_DST_LOOKUP:
1838 			if (cmd->arg1 >= V_fw_tables_max) {
1839 				printf("ipfw: invalid table number %d\n",
1840 				    cmd->arg1);
1841 				return (EINVAL);
1842 			}
1843 			if (cmdlen != F_INSN_SIZE(ipfw_insn) &&
1844 			    cmdlen != F_INSN_SIZE(ipfw_insn_u32) + 1 &&
1845 			    cmdlen != F_INSN_SIZE(ipfw_insn_u32))
1846 				goto bad_size;
1847 			ci->object_opcodes++;
1848 			break;
1849 		case O_IP_FLOW_LOOKUP:
1850 			if (cmd->arg1 >= V_fw_tables_max) {
1851 				printf("ipfw: invalid table number %d\n",
1852 				    cmd->arg1);
1853 				return (EINVAL);
1854 			}
1855 			if (cmdlen != F_INSN_SIZE(ipfw_insn) &&
1856 			    cmdlen != F_INSN_SIZE(ipfw_insn_u32))
1857 				goto bad_size;
1858 			ci->object_opcodes++;
1859 			break;
1860 		case O_MACADDR2:
1861 			if (cmdlen != F_INSN_SIZE(ipfw_insn_mac))
1862 				goto bad_size;
1863 			break;
1864 
1865 		case O_NOP:
1866 		case O_IPID:
1867 		case O_IPTTL:
1868 		case O_IPLEN:
1869 		case O_TCPDATALEN:
1870 		case O_TCPWIN:
1871 		case O_TAGGED:
1872 			if (cmdlen < 1 || cmdlen > 31)
1873 				goto bad_size;
1874 			break;
1875 
1876 		case O_DSCP:
1877 			if (cmdlen != F_INSN_SIZE(ipfw_insn_u32) + 1)
1878 				goto bad_size;
1879 			break;
1880 
1881 		case O_MAC_TYPE:
1882 		case O_IP_SRCPORT:
1883 		case O_IP_DSTPORT: /* XXX artificial limit, 30 port pairs */
1884 			if (cmdlen < 2 || cmdlen > 31)
1885 				goto bad_size;
1886 			break;
1887 
1888 		case O_RECV:
1889 		case O_XMIT:
1890 		case O_VIA:
1891 			if (cmdlen != F_INSN_SIZE(ipfw_insn_if))
1892 				goto bad_size;
1893 			ci->object_opcodes++;
1894 			break;
1895 
1896 		case O_ALTQ:
1897 			if (cmdlen != F_INSN_SIZE(ipfw_insn_altq))
1898 				goto bad_size;
1899 			break;
1900 
1901 		case O_PIPE:
1902 		case O_QUEUE:
1903 			if (cmdlen != F_INSN_SIZE(ipfw_insn))
1904 				goto bad_size;
1905 			goto check_action;
1906 
1907 		case O_FORWARD_IP:
1908 			if (cmdlen != F_INSN_SIZE(ipfw_insn_sa))
1909 				goto bad_size;
1910 			goto check_action;
1911 #ifdef INET6
1912 		case O_FORWARD_IP6:
1913 			if (cmdlen != F_INSN_SIZE(ipfw_insn_sa6))
1914 				goto bad_size;
1915 			goto check_action;
1916 #endif /* INET6 */
1917 
1918 		case O_DIVERT:
1919 		case O_TEE:
1920 			if (ip_divert_ptr == NULL)
1921 				return EINVAL;
1922 			else
1923 				goto check_size;
1924 		case O_NETGRAPH:
1925 		case O_NGTEE:
1926 			if (ng_ipfw_input_p == NULL)
1927 				return EINVAL;
1928 			else
1929 				goto check_size;
1930 		case O_NAT:
1931 			if (!IPFW_NAT_LOADED)
1932 				return EINVAL;
1933 			if (cmdlen != F_INSN_SIZE(ipfw_insn_nat))
1934  				goto bad_size;
1935  			goto check_action;
1936 		case O_CHECK_STATE:
1937 			ci->object_opcodes++;
1938 			/* FALLTHROUGH */
1939 		case O_FORWARD_MAC: /* XXX not implemented yet */
1940 		case O_COUNT:
1941 		case O_ACCEPT:
1942 		case O_DENY:
1943 		case O_REJECT:
1944 		case O_SETDSCP:
1945 #ifdef INET6
1946 		case O_UNREACH6:
1947 #endif
1948 		case O_SKIPTO:
1949 		case O_REASS:
1950 		case O_CALLRETURN:
1951 check_size:
1952 			if (cmdlen != F_INSN_SIZE(ipfw_insn))
1953 				goto bad_size;
1954 check_action:
1955 			if (have_action) {
1956 				printf("ipfw: opcode %d, multiple actions"
1957 					" not allowed\n",
1958 					cmd->opcode);
1959 				return (EINVAL);
1960 			}
1961 			have_action = 1;
1962 			if (l != cmdlen) {
1963 				printf("ipfw: opcode %d, action must be"
1964 					" last opcode\n",
1965 					cmd->opcode);
1966 				return (EINVAL);
1967 			}
1968 			break;
1969 #ifdef INET6
1970 		case O_IP6_SRC:
1971 		case O_IP6_DST:
1972 			if (cmdlen != F_INSN_SIZE(struct in6_addr) +
1973 			    F_INSN_SIZE(ipfw_insn))
1974 				goto bad_size;
1975 			break;
1976 
1977 		case O_FLOW6ID:
1978 			if (cmdlen != F_INSN_SIZE(ipfw_insn_u32) +
1979 			    ((ipfw_insn_u32 *)cmd)->o.arg1)
1980 				goto bad_size;
1981 			break;
1982 
1983 		case O_IP6_SRC_MASK:
1984 		case O_IP6_DST_MASK:
1985 			if ( !(cmdlen & 1) || cmdlen > 127)
1986 				goto bad_size;
1987 			break;
1988 		case O_ICMP6TYPE:
1989 			if( cmdlen != F_INSN_SIZE( ipfw_insn_icmp6 ) )
1990 				goto bad_size;
1991 			break;
1992 #endif
1993 
1994 		default:
1995 			switch (cmd->opcode) {
1996 #ifndef INET6
1997 			case O_IP6_SRC_ME:
1998 			case O_IP6_DST_ME:
1999 			case O_EXT_HDR:
2000 			case O_IP6:
2001 			case O_UNREACH6:
2002 			case O_IP6_SRC:
2003 			case O_IP6_DST:
2004 			case O_FLOW6ID:
2005 			case O_IP6_SRC_MASK:
2006 			case O_IP6_DST_MASK:
2007 			case O_ICMP6TYPE:
2008 				printf("ipfw: no IPv6 support in kernel\n");
2009 				return (EPROTONOSUPPORT);
2010 #endif
2011 			default:
2012 				printf("ipfw: opcode %d, unknown opcode\n",
2013 					cmd->opcode);
2014 				return (EINVAL);
2015 			}
2016 		}
2017 	}
2018 	if (have_action == 0) {
2019 		printf("ipfw: missing action\n");
2020 		return (EINVAL);
2021 	}
2022 	return 0;
2023 
2024 bad_size:
2025 	printf("ipfw: opcode %d size %d wrong\n",
2026 		cmd->opcode, cmdlen);
2027 	return (EINVAL);
2028 }
2029 
2030 
2031 /*
2032  * Translation of requests for compatibility with FreeBSD 7.2/8.
2033  * a static variable tells us if we have an old client from userland,
2034  * and if necessary we translate requests and responses between the
2035  * two formats.
2036  */
2037 static int is7 = 0;
2038 
2039 struct ip_fw7 {
2040 	struct ip_fw7	*next;		/* linked list of rules     */
2041 	struct ip_fw7	*next_rule;	/* ptr to next [skipto] rule    */
2042 	/* 'next_rule' is used to pass up 'set_disable' status      */
2043 
2044 	uint16_t	act_ofs;	/* offset of action in 32-bit units */
2045 	uint16_t	cmd_len;	/* # of 32-bit words in cmd */
2046 	uint16_t	rulenum;	/* rule number          */
2047 	uint8_t		set;		/* rule set (0..31)     */
2048 	// #define RESVD_SET   31  /* set for default and persistent rules */
2049 	uint8_t		_pad;		/* padding          */
2050 	// uint32_t        id;             /* rule id, only in v.8 */
2051 	/* These fields are present in all rules.           */
2052 	uint64_t	pcnt;		/* Packet counter       */
2053 	uint64_t	bcnt;		/* Byte counter         */
2054 	uint32_t	timestamp;	/* tv_sec of last match     */
2055 
2056 	ipfw_insn	cmd[1];		/* storage for commands     */
2057 };
2058 
2059 static int convert_rule_to_7(struct ip_fw_rule0 *rule);
2060 static int convert_rule_to_8(struct ip_fw_rule0 *rule);
2061 
2062 #ifndef RULESIZE7
2063 #define RULESIZE7(rule)  (sizeof(struct ip_fw7) + \
2064 	((struct ip_fw7 *)(rule))->cmd_len * 4 - 4)
2065 #endif
2066 
2067 
2068 /*
2069  * Copy the static and dynamic rules to the supplied buffer
2070  * and return the amount of space actually used.
2071  * Must be run under IPFW_UH_RLOCK
2072  */
2073 static size_t
2074 ipfw_getrules(struct ip_fw_chain *chain, void *buf, size_t space)
2075 {
2076 	char *bp = buf;
2077 	char *ep = bp + space;
2078 	struct ip_fw *rule;
2079 	struct ip_fw_rule0 *dst;
2080 	struct timeval boottime;
2081 	int error, i, l, warnflag;
2082 	time_t	boot_seconds;
2083 
2084 	warnflag = 0;
2085 
2086 	getboottime(&boottime);
2087         boot_seconds = boottime.tv_sec;
2088 	for (i = 0; i < chain->n_rules; i++) {
2089 		rule = chain->map[i];
2090 
2091 		if (is7) {
2092 		    /* Convert rule to FreeBSd 7.2 format */
2093 		    l = RULESIZE7(rule);
2094 		    if (bp + l + sizeof(uint32_t) <= ep) {
2095 			bcopy(rule, bp, l + sizeof(uint32_t));
2096 			error = set_legacy_obj_kidx(chain,
2097 			    (struct ip_fw_rule0 *)bp);
2098 			if (error != 0)
2099 				return (0);
2100 			error = convert_rule_to_7((struct ip_fw_rule0 *) bp);
2101 			if (error)
2102 				return 0; /*XXX correct? */
2103 			/*
2104 			 * XXX HACK. Store the disable mask in the "next"
2105 			 * pointer in a wild attempt to keep the ABI the same.
2106 			 * Why do we do this on EVERY rule?
2107 			 */
2108 			bcopy(&V_set_disable,
2109 				&(((struct ip_fw7 *)bp)->next_rule),
2110 				sizeof(V_set_disable));
2111 			if (((struct ip_fw7 *)bp)->timestamp)
2112 			    ((struct ip_fw7 *)bp)->timestamp += boot_seconds;
2113 			bp += l;
2114 		    }
2115 		    continue; /* go to next rule */
2116 		}
2117 
2118 		l = RULEUSIZE0(rule);
2119 		if (bp + l > ep) { /* should not happen */
2120 			printf("overflow dumping static rules\n");
2121 			break;
2122 		}
2123 		dst = (struct ip_fw_rule0 *)bp;
2124 		export_rule0(rule, dst, l);
2125 		error = set_legacy_obj_kidx(chain, dst);
2126 
2127 		/*
2128 		 * XXX HACK. Store the disable mask in the "next"
2129 		 * pointer in a wild attempt to keep the ABI the same.
2130 		 * Why do we do this on EVERY rule?
2131 		 *
2132 		 * XXX: "ipfw set show" (ab)uses IP_FW_GET to read disabled mask
2133 		 * so we need to fail _after_ saving at least one mask.
2134 		 */
2135 		bcopy(&V_set_disable, &dst->next_rule, sizeof(V_set_disable));
2136 		if (dst->timestamp)
2137 			dst->timestamp += boot_seconds;
2138 		bp += l;
2139 
2140 		if (error != 0) {
2141 			if (error == 2) {
2142 				/* Non-fatal table rewrite error. */
2143 				warnflag = 1;
2144 				continue;
2145 			}
2146 			printf("Stop on rule %d. Fail to convert table\n",
2147 			    rule->rulenum);
2148 			break;
2149 		}
2150 	}
2151 	if (warnflag != 0)
2152 		printf("ipfw: process %s is using legacy interfaces,"
2153 		    " consider rebuilding\n", "");
2154 	ipfw_get_dynamic(chain, &bp, ep); /* protected by the dynamic lock */
2155 	return (bp - (char *)buf);
2156 }
2157 
2158 
2159 struct dump_args {
2160 	uint32_t	b;	/* start rule */
2161 	uint32_t	e;	/* end rule */
2162 	uint32_t	rcount;	/* number of rules */
2163 	uint32_t	rsize;	/* rules size */
2164 	uint32_t	tcount;	/* number of tables */
2165 	int		rcounters;	/* counters */
2166 };
2167 
2168 void
2169 ipfw_export_obj_ntlv(struct named_object *no, ipfw_obj_ntlv *ntlv)
2170 {
2171 
2172 	ntlv->head.type = no->etlv;
2173 	ntlv->head.length = sizeof(*ntlv);
2174 	ntlv->idx = no->kidx;
2175 	strlcpy(ntlv->name, no->name, sizeof(ntlv->name));
2176 }
2177 
2178 /*
2179  * Export named object info in instance @ni, identified by @kidx
2180  * to ipfw_obj_ntlv. TLV is allocated from @sd space.
2181  *
2182  * Returns 0 on success.
2183  */
2184 static int
2185 export_objhash_ntlv(struct namedobj_instance *ni, uint16_t kidx,
2186     struct sockopt_data *sd)
2187 {
2188 	struct named_object *no;
2189 	ipfw_obj_ntlv *ntlv;
2190 
2191 	no = ipfw_objhash_lookup_kidx(ni, kidx);
2192 	KASSERT(no != NULL, ("invalid object kernel index passed"));
2193 
2194 	ntlv = (ipfw_obj_ntlv *)ipfw_get_sopt_space(sd, sizeof(*ntlv));
2195 	if (ntlv == NULL)
2196 		return (ENOMEM);
2197 
2198 	ipfw_export_obj_ntlv(no, ntlv);
2199 	return (0);
2200 }
2201 
2202 /*
2203  * Dumps static rules with table TLVs in buffer @sd.
2204  *
2205  * Returns 0 on success.
2206  */
2207 static int
2208 dump_static_rules(struct ip_fw_chain *chain, struct dump_args *da,
2209     uint32_t *bmask, struct sockopt_data *sd)
2210 {
2211 	int error;
2212 	int i, l;
2213 	uint32_t tcount;
2214 	ipfw_obj_ctlv *ctlv;
2215 	struct ip_fw *krule;
2216 	struct namedobj_instance *ni;
2217 	caddr_t dst;
2218 
2219 	/* Dump table names first (if any) */
2220 	if (da->tcount > 0) {
2221 		/* Header first */
2222 		ctlv = (ipfw_obj_ctlv *)ipfw_get_sopt_space(sd, sizeof(*ctlv));
2223 		if (ctlv == NULL)
2224 			return (ENOMEM);
2225 		ctlv->head.type = IPFW_TLV_TBLNAME_LIST;
2226 		ctlv->head.length = da->tcount * sizeof(ipfw_obj_ntlv) +
2227 		    sizeof(*ctlv);
2228 		ctlv->count = da->tcount;
2229 		ctlv->objsize = sizeof(ipfw_obj_ntlv);
2230 	}
2231 
2232 	i = 0;
2233 	tcount = da->tcount;
2234 	ni = ipfw_get_table_objhash(chain);
2235 	while (tcount > 0) {
2236 		if ((bmask[i / 32] & (1 << (i % 32))) == 0) {
2237 			i++;
2238 			continue;
2239 		}
2240 
2241 		/* Jump to shared named object bitmask */
2242 		if (i >= IPFW_TABLES_MAX) {
2243 			ni = CHAIN_TO_SRV(chain);
2244 			i -= IPFW_TABLES_MAX;
2245 			bmask += IPFW_TABLES_MAX / 32;
2246 		}
2247 
2248 		if ((error = export_objhash_ntlv(ni, i, sd)) != 0)
2249 			return (error);
2250 
2251 		i++;
2252 		tcount--;
2253 	}
2254 
2255 	/* Dump rules */
2256 	ctlv = (ipfw_obj_ctlv *)ipfw_get_sopt_space(sd, sizeof(*ctlv));
2257 	if (ctlv == NULL)
2258 		return (ENOMEM);
2259 	ctlv->head.type = IPFW_TLV_RULE_LIST;
2260 	ctlv->head.length = da->rsize + sizeof(*ctlv);
2261 	ctlv->count = da->rcount;
2262 
2263 	for (i = da->b; i < da->e; i++) {
2264 		krule = chain->map[i];
2265 
2266 		l = RULEUSIZE1(krule) + sizeof(ipfw_obj_tlv);
2267 		if (da->rcounters != 0)
2268 			l += sizeof(struct ip_fw_bcounter);
2269 		dst = (caddr_t)ipfw_get_sopt_space(sd, l);
2270 		if (dst == NULL)
2271 			return (ENOMEM);
2272 
2273 		export_rule1(krule, dst, l, da->rcounters);
2274 	}
2275 
2276 	return (0);
2277 }
2278 
2279 /*
2280  * Marks every object index used in @rule with bit in @bmask.
2281  * Used to generate bitmask of referenced tables/objects for given ruleset
2282  * or its part.
2283  *
2284  * Returns number of newly-referenced objects.
2285  */
2286 static int
2287 mark_object_kidx(struct ip_fw_chain *ch, struct ip_fw *rule,
2288     uint32_t *bmask)
2289 {
2290 	struct opcode_obj_rewrite *rw;
2291 	ipfw_insn *cmd;
2292 	int bidx, cmdlen, l, count;
2293 	uint16_t kidx;
2294 	uint8_t subtype;
2295 
2296 	l = rule->cmd_len;
2297 	cmd = rule->cmd;
2298 	cmdlen = 0;
2299 	count = 0;
2300 	for ( ;	l > 0 ; l -= cmdlen, cmd += cmdlen) {
2301 		cmdlen = F_LEN(cmd);
2302 
2303 		rw = find_op_rw(cmd, &kidx, &subtype);
2304 		if (rw == NULL)
2305 			continue;
2306 
2307 		bidx = kidx / 32;
2308 		/*
2309 		 * Maintain separate bitmasks for table and
2310 		 * non-table objects.
2311 		 */
2312 		if (rw->etlv != IPFW_TLV_TBL_NAME)
2313 			bidx += IPFW_TABLES_MAX / 32;
2314 
2315 		if ((bmask[bidx] & (1 << (kidx % 32))) == 0)
2316 			count++;
2317 
2318 		bmask[bidx] |= 1 << (kidx % 32);
2319 	}
2320 
2321 	return (count);
2322 }
2323 
2324 /*
2325  * Dumps requested objects data
2326  * Data layout (version 0)(current):
2327  * Request: [ ipfw_cfg_lheader ] + IPFW_CFG_GET_* flags
2328  *   size = ipfw_cfg_lheader.size
2329  * Reply: [ ipfw_cfg_lheader
2330  *   [ ipfw_obj_ctlv(IPFW_TLV_TBL_LIST) ipfw_obj_ntlv x N ] (optional)
2331  *   [ ipfw_obj_ctlv(IPFW_TLV_RULE_LIST)
2332  *     ipfw_obj_tlv(IPFW_TLV_RULE_ENT) [ ip_fw_bcounter (optional) ip_fw_rule ]
2333  *   ] (optional)
2334  *   [ ipfw_obj_ctlv(IPFW_TLV_STATE_LIST) ipfw_obj_dyntlv x N ] (optional)
2335  * ]
2336  * * NOTE IPFW_TLV_STATE_LIST has the single valid field: objsize.
2337  * The rest (size, count) are set to zero and needs to be ignored.
2338  *
2339  * Returns 0 on success.
2340  */
2341 static int
2342 dump_config(struct ip_fw_chain *chain, ip_fw3_opheader *op3,
2343     struct sockopt_data *sd)
2344 {
2345 	ipfw_cfg_lheader *hdr;
2346 	struct ip_fw *rule;
2347 	size_t sz, rnum;
2348 	uint32_t hdr_flags;
2349 	int error, i;
2350 	struct dump_args da;
2351 	uint32_t *bmask;
2352 
2353 	hdr = (ipfw_cfg_lheader *)ipfw_get_sopt_header(sd, sizeof(*hdr));
2354 	if (hdr == NULL)
2355 		return (EINVAL);
2356 
2357 	error = 0;
2358 	bmask = NULL;
2359 	/* Allocate needed state. Note we allocate 2xspace mask, for table&srv  */
2360 	if (hdr->flags & IPFW_CFG_GET_STATIC)
2361 		bmask = malloc(IPFW_TABLES_MAX / 4, M_TEMP, M_WAITOK | M_ZERO);
2362 
2363 	IPFW_UH_RLOCK(chain);
2364 
2365 	/*
2366 	 * STAGE 1: Determine size/count for objects in range.
2367 	 * Prepare used tables bitmask.
2368 	 */
2369 	sz = sizeof(ipfw_cfg_lheader);
2370 	memset(&da, 0, sizeof(da));
2371 
2372 	da.b = 0;
2373 	da.e = chain->n_rules;
2374 
2375 	if (hdr->end_rule != 0) {
2376 		/* Handle custom range */
2377 		if ((rnum = hdr->start_rule) > IPFW_DEFAULT_RULE)
2378 			rnum = IPFW_DEFAULT_RULE;
2379 		da.b = ipfw_find_rule(chain, rnum, 0);
2380 		rnum = hdr->end_rule;
2381 		rnum = (rnum < IPFW_DEFAULT_RULE) ? rnum+1 : IPFW_DEFAULT_RULE;
2382 		da.e = ipfw_find_rule(chain, rnum, 0) + 1;
2383 	}
2384 
2385 	if (hdr->flags & IPFW_CFG_GET_STATIC) {
2386 		for (i = da.b; i < da.e; i++) {
2387 			rule = chain->map[i];
2388 			da.rsize += RULEUSIZE1(rule) + sizeof(ipfw_obj_tlv);
2389 			da.rcount++;
2390 			/* Update bitmask of used objects for given range */
2391 			da.tcount += mark_object_kidx(chain, rule, bmask);
2392 		}
2393 		/* Add counters if requested */
2394 		if (hdr->flags & IPFW_CFG_GET_COUNTERS) {
2395 			da.rsize += sizeof(struct ip_fw_bcounter) * da.rcount;
2396 			da.rcounters = 1;
2397 		}
2398 
2399 		if (da.tcount > 0)
2400 			sz += da.tcount * sizeof(ipfw_obj_ntlv) +
2401 			    sizeof(ipfw_obj_ctlv);
2402 		sz += da.rsize + sizeof(ipfw_obj_ctlv);
2403 	}
2404 
2405 	if (hdr->flags & IPFW_CFG_GET_STATES)
2406 		sz += ipfw_dyn_get_count() * sizeof(ipfw_obj_dyntlv) +
2407 		     sizeof(ipfw_obj_ctlv);
2408 
2409 
2410 	/*
2411 	 * Fill header anyway.
2412 	 * Note we have to save header fields to stable storage
2413 	 * buffer inside @sd can be flushed after dumping rules
2414 	 */
2415 	hdr->size = sz;
2416 	hdr->set_mask = ~V_set_disable;
2417 	hdr_flags = hdr->flags;
2418 	hdr = NULL;
2419 
2420 	if (sd->valsize < sz) {
2421 		error = ENOMEM;
2422 		goto cleanup;
2423 	}
2424 
2425 	/* STAGE2: Store actual data */
2426 	if (hdr_flags & IPFW_CFG_GET_STATIC) {
2427 		error = dump_static_rules(chain, &da, bmask, sd);
2428 		if (error != 0)
2429 			goto cleanup;
2430 	}
2431 
2432 	if (hdr_flags & IPFW_CFG_GET_STATES)
2433 		error = ipfw_dump_states(chain, sd);
2434 
2435 cleanup:
2436 	IPFW_UH_RUNLOCK(chain);
2437 
2438 	if (bmask != NULL)
2439 		free(bmask, M_TEMP);
2440 
2441 	return (error);
2442 }
2443 
2444 int
2445 ipfw_check_object_name_generic(const char *name)
2446 {
2447 	int nsize;
2448 
2449 	nsize = sizeof(((ipfw_obj_ntlv *)0)->name);
2450 	if (strnlen(name, nsize) == nsize)
2451 		return (EINVAL);
2452 	if (name[0] == '\0')
2453 		return (EINVAL);
2454 	return (0);
2455 }
2456 
2457 /*
2458  * Creates non-existent objects referenced by rule.
2459  *
2460  * Return 0 on success.
2461  */
2462 int
2463 create_objects_compat(struct ip_fw_chain *ch, ipfw_insn *cmd,
2464     struct obj_idx *oib, struct obj_idx *pidx, struct tid_info *ti)
2465 {
2466 	struct opcode_obj_rewrite *rw;
2467 	struct obj_idx *p;
2468 	uint16_t kidx;
2469 	int error;
2470 
2471 	/*
2472 	 * Compatibility stuff: do actual creation for non-existing,
2473 	 * but referenced objects.
2474 	 */
2475 	for (p = oib; p < pidx; p++) {
2476 		if (p->kidx != 0)
2477 			continue;
2478 
2479 		ti->uidx = p->uidx;
2480 		ti->type = p->type;
2481 		ti->atype = 0;
2482 
2483 		rw = find_op_rw(cmd + p->off, NULL, NULL);
2484 		KASSERT(rw != NULL, ("Unable to find handler for op %d",
2485 		    (cmd + p->off)->opcode));
2486 
2487 		if (rw->create_object == NULL)
2488 			error = EOPNOTSUPP;
2489 		else
2490 			error = rw->create_object(ch, ti, &kidx);
2491 		if (error == 0) {
2492 			p->kidx = kidx;
2493 			continue;
2494 		}
2495 
2496 		/*
2497 		 * Error happened. We have to rollback everything.
2498 		 * Drop all already acquired references.
2499 		 */
2500 		IPFW_UH_WLOCK(ch);
2501 		unref_oib_objects(ch, cmd, oib, pidx);
2502 		IPFW_UH_WUNLOCK(ch);
2503 
2504 		return (error);
2505 	}
2506 
2507 	return (0);
2508 }
2509 
2510 /*
2511  * Compatibility function for old ipfw(8) binaries.
2512  * Rewrites table/nat kernel indices with userland ones.
2513  * Convert tables matching '/^\d+$/' to their atoi() value.
2514  * Use number 65535 for other tables.
2515  *
2516  * Returns 0 on success.
2517  */
2518 static int
2519 set_legacy_obj_kidx(struct ip_fw_chain *ch, struct ip_fw_rule0 *rule)
2520 {
2521 	struct opcode_obj_rewrite *rw;
2522 	struct named_object *no;
2523 	ipfw_insn *cmd;
2524 	char *end;
2525 	long val;
2526 	int cmdlen, error, l;
2527 	uint16_t kidx, uidx;
2528 	uint8_t subtype;
2529 
2530 	error = 0;
2531 
2532 	l = rule->cmd_len;
2533 	cmd = rule->cmd;
2534 	cmdlen = 0;
2535 	for ( ;	l > 0 ; l -= cmdlen, cmd += cmdlen) {
2536 		cmdlen = F_LEN(cmd);
2537 
2538 		/* Check if is index in given opcode */
2539 		rw = find_op_rw(cmd, &kidx, &subtype);
2540 		if (rw == NULL)
2541 			continue;
2542 
2543 		/* Try to find referenced kernel object */
2544 		no = rw->find_bykidx(ch, kidx);
2545 		if (no == NULL)
2546 			continue;
2547 
2548 		val = strtol(no->name, &end, 10);
2549 		if (*end == '\0' && val < 65535) {
2550 			uidx = val;
2551 		} else {
2552 
2553 			/*
2554 			 * We are called via legacy opcode.
2555 			 * Save error and show table as fake number
2556 			 * not to make ipfw(8) hang.
2557 			 */
2558 			uidx = 65535;
2559 			error = 2;
2560 		}
2561 
2562 		rw->update(cmd, uidx);
2563 	}
2564 
2565 	return (error);
2566 }
2567 
2568 
2569 /*
2570  * Unreferences all already-referenced objects in given @cmd rule,
2571  * using information in @oib.
2572  *
2573  * Used to rollback partially converted rule on error.
2574  */
2575 static void
2576 unref_oib_objects(struct ip_fw_chain *ch, ipfw_insn *cmd, struct obj_idx *oib,
2577     struct obj_idx *end)
2578 {
2579 	struct opcode_obj_rewrite *rw;
2580 	struct named_object *no;
2581 	struct obj_idx *p;
2582 
2583 	IPFW_UH_WLOCK_ASSERT(ch);
2584 
2585 	for (p = oib; p < end; p++) {
2586 		if (p->kidx == 0)
2587 			continue;
2588 
2589 		rw = find_op_rw(cmd + p->off, NULL, NULL);
2590 		KASSERT(rw != NULL, ("Unable to find handler for op %d",
2591 		    (cmd + p->off)->opcode));
2592 
2593 		/* Find & unref by existing idx */
2594 		no = rw->find_bykidx(ch, p->kidx);
2595 		KASSERT(no != NULL, ("Ref'd object %d disappeared", p->kidx));
2596 		no->refcnt--;
2597 	}
2598 }
2599 
2600 /*
2601  * Remove references from every object used in @rule.
2602  * Used at rule removal code.
2603  */
2604 static void
2605 unref_rule_objects(struct ip_fw_chain *ch, struct ip_fw *rule)
2606 {
2607 	struct opcode_obj_rewrite *rw;
2608 	struct named_object *no;
2609 	ipfw_insn *cmd;
2610 	int cmdlen, l;
2611 	uint16_t kidx;
2612 	uint8_t subtype;
2613 
2614 	IPFW_UH_WLOCK_ASSERT(ch);
2615 
2616 	l = rule->cmd_len;
2617 	cmd = rule->cmd;
2618 	cmdlen = 0;
2619 	for ( ;	l > 0 ; l -= cmdlen, cmd += cmdlen) {
2620 		cmdlen = F_LEN(cmd);
2621 
2622 		rw = find_op_rw(cmd, &kidx, &subtype);
2623 		if (rw == NULL)
2624 			continue;
2625 		no = rw->find_bykidx(ch, kidx);
2626 
2627 		KASSERT(no != NULL, ("object id %d not found", kidx));
2628 		KASSERT(no->subtype == subtype,
2629 		    ("wrong type %d (%d) for object id %d",
2630 		    no->subtype, subtype, kidx));
2631 		KASSERT(no->refcnt > 0, ("refcount for object %d is %d",
2632 		    kidx, no->refcnt));
2633 
2634 		if (no->refcnt == 1 && rw->destroy_object != NULL)
2635 			rw->destroy_object(ch, no);
2636 		else
2637 			no->refcnt--;
2638 	}
2639 }
2640 
2641 
2642 /*
2643  * Find and reference object (if any) stored in instruction @cmd.
2644  *
2645  * Saves object info in @pidx, sets
2646  *  - @unresolved to 1 if object should exists but not found
2647  *
2648  * Returns non-zero value in case of error.
2649  */
2650 static int
2651 ref_opcode_object(struct ip_fw_chain *ch, ipfw_insn *cmd, struct tid_info *ti,
2652     struct obj_idx *pidx, int *unresolved)
2653 {
2654 	struct named_object *no;
2655 	struct opcode_obj_rewrite *rw;
2656 	int error;
2657 
2658 	/* Check if this opcode is candidate for rewrite */
2659 	rw = find_op_rw(cmd, &ti->uidx, &ti->type);
2660 	if (rw == NULL)
2661 		return (0);
2662 
2663 	/* Need to rewrite. Save necessary fields */
2664 	pidx->uidx = ti->uidx;
2665 	pidx->type = ti->type;
2666 
2667 	/* Try to find referenced kernel object */
2668 	error = rw->find_byname(ch, ti, &no);
2669 	if (error != 0)
2670 		return (error);
2671 	if (no == NULL) {
2672 		/*
2673 		 * Report about unresolved object for automaic
2674 		 * creation.
2675 		 */
2676 		*unresolved = 1;
2677 		return (0);
2678 	}
2679 
2680 	/*
2681 	 * Object is already exist.
2682 	 * Its subtype should match with expected value.
2683 	 */
2684 	if (ti->type != no->subtype)
2685 		return (EINVAL);
2686 
2687 	/* Bump refcount and update kidx. */
2688 	no->refcnt++;
2689 	rw->update(cmd, no->kidx);
2690 	return (0);
2691 }
2692 
2693 /*
2694  * Finds and bumps refcount for objects referenced by given @rule.
2695  * Auto-creates non-existing tables.
2696  * Fills in @oib array with userland/kernel indexes.
2697  *
2698  * Returns 0 on success.
2699  */
2700 static int
2701 ref_rule_objects(struct ip_fw_chain *ch, struct ip_fw *rule,
2702     struct rule_check_info *ci, struct obj_idx *oib, struct tid_info *ti)
2703 {
2704 	struct obj_idx *pidx;
2705 	ipfw_insn *cmd;
2706 	int cmdlen, error, l, unresolved;
2707 
2708 	pidx = oib;
2709 	l = rule->cmd_len;
2710 	cmd = rule->cmd;
2711 	cmdlen = 0;
2712 	error = 0;
2713 
2714 	IPFW_UH_WLOCK(ch);
2715 
2716 	/* Increase refcount on each existing referenced table. */
2717 	for ( ;	l > 0 ; l -= cmdlen, cmd += cmdlen) {
2718 		cmdlen = F_LEN(cmd);
2719 		unresolved = 0;
2720 
2721 		error = ref_opcode_object(ch, cmd, ti, pidx, &unresolved);
2722 		if (error != 0)
2723 			break;
2724 		/*
2725 		 * Compatibility stuff for old clients:
2726 		 * prepare to automaitcally create non-existing objects.
2727 		 */
2728 		if (unresolved != 0) {
2729 			pidx->off = rule->cmd_len - l;
2730 			pidx++;
2731 		}
2732 	}
2733 
2734 	if (error != 0) {
2735 		/* Unref everything we have already done */
2736 		unref_oib_objects(ch, rule->cmd, oib, pidx);
2737 		IPFW_UH_WUNLOCK(ch);
2738 		return (error);
2739 	}
2740 	IPFW_UH_WUNLOCK(ch);
2741 
2742 	/* Perform auto-creation for non-existing objects */
2743 	if (pidx != oib)
2744 		error = create_objects_compat(ch, rule->cmd, oib, pidx, ti);
2745 
2746 	/* Calculate real number of dynamic objects */
2747 	ci->object_opcodes = (uint16_t)(pidx - oib);
2748 
2749 	return (error);
2750 }
2751 
2752 /*
2753  * Checks is opcode is referencing table of appropriate type.
2754  * Adds reference count for found table if true.
2755  * Rewrites user-supplied opcode values with kernel ones.
2756  *
2757  * Returns 0 on success and appropriate error code otherwise.
2758  */
2759 static int
2760 rewrite_rule_uidx(struct ip_fw_chain *chain, struct rule_check_info *ci)
2761 {
2762 	int error;
2763 	ipfw_insn *cmd;
2764 	uint8_t type;
2765 	struct obj_idx *p, *pidx_first, *pidx_last;
2766 	struct tid_info ti;
2767 
2768 	/*
2769 	 * Prepare an array for storing opcode indices.
2770 	 * Use stack allocation by default.
2771 	 */
2772 	if (ci->object_opcodes <= (sizeof(ci->obuf)/sizeof(ci->obuf[0]))) {
2773 		/* Stack */
2774 		pidx_first = ci->obuf;
2775 	} else
2776 		pidx_first = malloc(
2777 		    ci->object_opcodes * sizeof(struct obj_idx),
2778 		    M_IPFW, M_WAITOK | M_ZERO);
2779 
2780 	error = 0;
2781 	type = 0;
2782 	memset(&ti, 0, sizeof(ti));
2783 
2784 	/* Use set rule is assigned to. */
2785 	ti.set = ci->krule->set;
2786 	if (ci->ctlv != NULL) {
2787 		ti.tlvs = (void *)(ci->ctlv + 1);
2788 		ti.tlen = ci->ctlv->head.length - sizeof(ipfw_obj_ctlv);
2789 	}
2790 
2791 	/* Reference all used tables and other objects */
2792 	error = ref_rule_objects(chain, ci->krule, ci, pidx_first, &ti);
2793 	if (error != 0)
2794 		goto free;
2795 	/*
2796 	 * Note that ref_rule_objects() might have updated ci->object_opcodes
2797 	 * to reflect actual number of object opcodes.
2798 	 */
2799 
2800 	/* Perform rewrite of remaining opcodes */
2801 	p = pidx_first;
2802 	pidx_last = pidx_first + ci->object_opcodes;
2803 	for (p = pidx_first; p < pidx_last; p++) {
2804 		cmd = ci->krule->cmd + p->off;
2805 		update_opcode_kidx(cmd, p->kidx);
2806 	}
2807 
2808 free:
2809 	if (pidx_first != ci->obuf)
2810 		free(pidx_first, M_IPFW);
2811 
2812 	return (error);
2813 }
2814 
2815 /*
2816  * Adds one or more rules to ipfw @chain.
2817  * Data layout (version 0)(current):
2818  * Request:
2819  * [
2820  *   ip_fw3_opheader
2821  *   [ ipfw_obj_ctlv(IPFW_TLV_TBL_LIST) ipfw_obj_ntlv x N ] (optional *1)
2822  *   [ ipfw_obj_ctlv(IPFW_TLV_RULE_LIST) ip_fw x N ] (*2) (*3)
2823  * ]
2824  * Reply:
2825  * [
2826  *   ip_fw3_opheader
2827  *   [ ipfw_obj_ctlv(IPFW_TLV_TBL_LIST) ipfw_obj_ntlv x N ] (optional)
2828  *   [ ipfw_obj_ctlv(IPFW_TLV_RULE_LIST) ip_fw x N ]
2829  * ]
2830  *
2831  * Rules in reply are modified to store their actual ruleset number.
2832  *
2833  * (*1) TLVs inside IPFW_TLV_TBL_LIST needs to be sorted ascending
2834  * according to their idx field and there has to be no duplicates.
2835  * (*2) Numbered rules inside IPFW_TLV_RULE_LIST needs to be sorted ascending.
2836  * (*3) Each ip_fw structure needs to be aligned to u64 boundary.
2837  *
2838  * Returns 0 on success.
2839  */
2840 static int
2841 add_rules(struct ip_fw_chain *chain, ip_fw3_opheader *op3,
2842     struct sockopt_data *sd)
2843 {
2844 	ipfw_obj_ctlv *ctlv, *rtlv, *tstate;
2845 	ipfw_obj_ntlv *ntlv;
2846 	int clen, error, idx;
2847 	uint32_t count, read;
2848 	struct ip_fw_rule *r;
2849 	struct rule_check_info rci, *ci, *cbuf;
2850 	int i, rsize;
2851 
2852 	op3 = (ip_fw3_opheader *)ipfw_get_sopt_space(sd, sd->valsize);
2853 	ctlv = (ipfw_obj_ctlv *)(op3 + 1);
2854 
2855 	read = sizeof(ip_fw3_opheader);
2856 	rtlv = NULL;
2857 	tstate = NULL;
2858 	cbuf = NULL;
2859 	memset(&rci, 0, sizeof(struct rule_check_info));
2860 
2861 	if (read + sizeof(*ctlv) > sd->valsize)
2862 		return (EINVAL);
2863 
2864 	if (ctlv->head.type == IPFW_TLV_TBLNAME_LIST) {
2865 		clen = ctlv->head.length;
2866 		/* Check size and alignment */
2867 		if (clen > sd->valsize || clen < sizeof(*ctlv))
2868 			return (EINVAL);
2869 		if ((clen % sizeof(uint64_t)) != 0)
2870 			return (EINVAL);
2871 
2872 		/*
2873 		 * Some table names or other named objects.
2874 		 * Check for validness.
2875 		 */
2876 		count = (ctlv->head.length - sizeof(*ctlv)) / sizeof(*ntlv);
2877 		if (ctlv->count != count || ctlv->objsize != sizeof(*ntlv))
2878 			return (EINVAL);
2879 
2880 		/*
2881 		 * Check each TLV.
2882 		 * Ensure TLVs are sorted ascending and
2883 		 * there are no duplicates.
2884 		 */
2885 		idx = -1;
2886 		ntlv = (ipfw_obj_ntlv *)(ctlv + 1);
2887 		while (count > 0) {
2888 			if (ntlv->head.length != sizeof(ipfw_obj_ntlv))
2889 				return (EINVAL);
2890 
2891 			error = ipfw_check_object_name_generic(ntlv->name);
2892 			if (error != 0)
2893 				return (error);
2894 
2895 			if (ntlv->idx <= idx)
2896 				return (EINVAL);
2897 
2898 			idx = ntlv->idx;
2899 			count--;
2900 			ntlv++;
2901 		}
2902 
2903 		tstate = ctlv;
2904 		read += ctlv->head.length;
2905 		ctlv = (ipfw_obj_ctlv *)((caddr_t)ctlv + ctlv->head.length);
2906 	}
2907 
2908 	if (read + sizeof(*ctlv) > sd->valsize)
2909 		return (EINVAL);
2910 
2911 	if (ctlv->head.type == IPFW_TLV_RULE_LIST) {
2912 		clen = ctlv->head.length;
2913 		if (clen + read > sd->valsize || clen < sizeof(*ctlv))
2914 			return (EINVAL);
2915 		if ((clen % sizeof(uint64_t)) != 0)
2916 			return (EINVAL);
2917 
2918 		/*
2919 		 * TODO: Permit adding multiple rules at once
2920 		 */
2921 		if (ctlv->count != 1)
2922 			return (ENOTSUP);
2923 
2924 		clen -= sizeof(*ctlv);
2925 
2926 		if (ctlv->count > clen / sizeof(struct ip_fw_rule))
2927 			return (EINVAL);
2928 
2929 		/* Allocate state for each rule or use stack */
2930 		if (ctlv->count == 1) {
2931 			memset(&rci, 0, sizeof(struct rule_check_info));
2932 			cbuf = &rci;
2933 		} else
2934 			cbuf = malloc(ctlv->count * sizeof(*ci), M_TEMP,
2935 			    M_WAITOK | M_ZERO);
2936 		ci = cbuf;
2937 
2938 		/*
2939 		 * Check each rule for validness.
2940 		 * Ensure numbered rules are sorted ascending
2941 		 * and properly aligned
2942 		 */
2943 		idx = 0;
2944 		r = (struct ip_fw_rule *)(ctlv + 1);
2945 		count = 0;
2946 		error = 0;
2947 		while (clen > 0) {
2948 			rsize = roundup2(RULESIZE(r), sizeof(uint64_t));
2949 			if (rsize > clen || ctlv->count <= count) {
2950 				error = EINVAL;
2951 				break;
2952 			}
2953 
2954 			ci->ctlv = tstate;
2955 			error = check_ipfw_rule1(r, rsize, ci);
2956 			if (error != 0)
2957 				break;
2958 
2959 			/* Check sorting */
2960 			if (r->rulenum != 0 && r->rulenum < idx) {
2961 				printf("rulenum %d idx %d\n", r->rulenum, idx);
2962 				error = EINVAL;
2963 				break;
2964 			}
2965 			idx = r->rulenum;
2966 
2967 			ci->urule = (caddr_t)r;
2968 
2969 			rsize = roundup2(rsize, sizeof(uint64_t));
2970 			clen -= rsize;
2971 			r = (struct ip_fw_rule *)((caddr_t)r + rsize);
2972 			count++;
2973 			ci++;
2974 		}
2975 
2976 		if (ctlv->count != count || error != 0) {
2977 			if (cbuf != &rci)
2978 				free(cbuf, M_TEMP);
2979 			return (EINVAL);
2980 		}
2981 
2982 		rtlv = ctlv;
2983 		read += ctlv->head.length;
2984 		ctlv = (ipfw_obj_ctlv *)((caddr_t)ctlv + ctlv->head.length);
2985 	}
2986 
2987 	if (read != sd->valsize || rtlv == NULL || rtlv->count == 0) {
2988 		if (cbuf != NULL && cbuf != &rci)
2989 			free(cbuf, M_TEMP);
2990 		return (EINVAL);
2991 	}
2992 
2993 	/*
2994 	 * Passed rules seems to be valid.
2995 	 * Allocate storage and try to add them to chain.
2996 	 */
2997 	for (i = 0, ci = cbuf; i < rtlv->count; i++, ci++) {
2998 		clen = RULEKSIZE1((struct ip_fw_rule *)ci->urule);
2999 		ci->krule = ipfw_alloc_rule(chain, clen);
3000 		import_rule1(ci);
3001 	}
3002 
3003 	if ((error = commit_rules(chain, cbuf, rtlv->count)) != 0) {
3004 		/* Free allocate krules */
3005 		for (i = 0, ci = cbuf; i < rtlv->count; i++, ci++)
3006 			free_rule(ci->krule);
3007 	}
3008 
3009 	if (cbuf != NULL && cbuf != &rci)
3010 		free(cbuf, M_TEMP);
3011 
3012 	return (error);
3013 }
3014 
3015 /*
3016  * Lists all sopts currently registered.
3017  * Data layout (v0)(current):
3018  * Request: [ ipfw_obj_lheader ], size = ipfw_obj_lheader.size
3019  * Reply: [ ipfw_obj_lheader ipfw_sopt_info x N ]
3020  *
3021  * Returns 0 on success
3022  */
3023 static int
3024 dump_soptcodes(struct ip_fw_chain *chain, ip_fw3_opheader *op3,
3025     struct sockopt_data *sd)
3026 {
3027 	struct _ipfw_obj_lheader *olh;
3028 	ipfw_sopt_info *i;
3029 	struct ipfw_sopt_handler *sh;
3030 	uint32_t count, n, size;
3031 
3032 	olh = (struct _ipfw_obj_lheader *)ipfw_get_sopt_header(sd,sizeof(*olh));
3033 	if (olh == NULL)
3034 		return (EINVAL);
3035 	if (sd->valsize < olh->size)
3036 		return (EINVAL);
3037 
3038 	CTL3_LOCK();
3039 	count = ctl3_hsize;
3040 	size = count * sizeof(ipfw_sopt_info) + sizeof(ipfw_obj_lheader);
3041 
3042 	/* Fill in header regadless of buffer size */
3043 	olh->count = count;
3044 	olh->objsize = sizeof(ipfw_sopt_info);
3045 
3046 	if (size > olh->size) {
3047 		olh->size = size;
3048 		CTL3_UNLOCK();
3049 		return (ENOMEM);
3050 	}
3051 	olh->size = size;
3052 
3053 	for (n = 1; n <= count; n++) {
3054 		i = (ipfw_sopt_info *)ipfw_get_sopt_space(sd, sizeof(*i));
3055 		KASSERT(i != NULL, ("previously checked buffer is not enough"));
3056 		sh = &ctl3_handlers[n];
3057 		i->opcode = sh->opcode;
3058 		i->version = sh->version;
3059 		i->refcnt = sh->refcnt;
3060 	}
3061 	CTL3_UNLOCK();
3062 
3063 	return (0);
3064 }
3065 
3066 /*
3067  * Compares two opcodes.
3068  * Used both in qsort() and bsearch().
3069  *
3070  * Returns 0 if match is found.
3071  */
3072 static int
3073 compare_opcodes(const void *_a, const void *_b)
3074 {
3075 	const struct opcode_obj_rewrite *a, *b;
3076 
3077 	a = (const struct opcode_obj_rewrite *)_a;
3078 	b = (const struct opcode_obj_rewrite *)_b;
3079 
3080 	if (a->opcode < b->opcode)
3081 		return (-1);
3082 	else if (a->opcode > b->opcode)
3083 		return (1);
3084 
3085 	return (0);
3086 }
3087 
3088 /*
3089  * XXX: Rewrite bsearch()
3090  */
3091 static int
3092 find_op_rw_range(uint16_t op, struct opcode_obj_rewrite **plo,
3093     struct opcode_obj_rewrite **phi)
3094 {
3095 	struct opcode_obj_rewrite *ctl3_max, *lo, *hi, h, *rw;
3096 
3097 	memset(&h, 0, sizeof(h));
3098 	h.opcode = op;
3099 
3100 	rw = (struct opcode_obj_rewrite *)bsearch(&h, ctl3_rewriters,
3101 	    ctl3_rsize, sizeof(h), compare_opcodes);
3102 	if (rw == NULL)
3103 		return (1);
3104 
3105 	/* Find the first element matching the same opcode */
3106 	lo = rw;
3107 	for ( ; lo > ctl3_rewriters && (lo - 1)->opcode == op; lo--)
3108 		;
3109 
3110 	/* Find the last element matching the same opcode */
3111 	hi = rw;
3112 	ctl3_max = ctl3_rewriters + ctl3_rsize;
3113 	for ( ; (hi + 1) < ctl3_max && (hi + 1)->opcode == op; hi++)
3114 		;
3115 
3116 	*plo = lo;
3117 	*phi = hi;
3118 
3119 	return (0);
3120 }
3121 
3122 /*
3123  * Finds opcode object rewriter based on @code.
3124  *
3125  * Returns pointer to handler or NULL.
3126  */
3127 static struct opcode_obj_rewrite *
3128 find_op_rw(ipfw_insn *cmd, uint16_t *puidx, uint8_t *ptype)
3129 {
3130 	struct opcode_obj_rewrite *rw, *lo, *hi;
3131 	uint16_t uidx;
3132 	uint8_t subtype;
3133 
3134 	if (find_op_rw_range(cmd->opcode, &lo, &hi) != 0)
3135 		return (NULL);
3136 
3137 	for (rw = lo; rw <= hi; rw++) {
3138 		if (rw->classifier(cmd, &uidx, &subtype) == 0) {
3139 			if (puidx != NULL)
3140 				*puidx = uidx;
3141 			if (ptype != NULL)
3142 				*ptype = subtype;
3143 			return (rw);
3144 		}
3145 	}
3146 
3147 	return (NULL);
3148 }
3149 int
3150 classify_opcode_kidx(ipfw_insn *cmd, uint16_t *puidx)
3151 {
3152 
3153 	if (find_op_rw(cmd, puidx, NULL) == NULL)
3154 		return (1);
3155 	return (0);
3156 }
3157 
3158 void
3159 update_opcode_kidx(ipfw_insn *cmd, uint16_t idx)
3160 {
3161 	struct opcode_obj_rewrite *rw;
3162 
3163 	rw = find_op_rw(cmd, NULL, NULL);
3164 	KASSERT(rw != NULL, ("No handler to update opcode %d", cmd->opcode));
3165 	rw->update(cmd, idx);
3166 }
3167 
3168 void
3169 ipfw_init_obj_rewriter()
3170 {
3171 
3172 	ctl3_rewriters = NULL;
3173 	ctl3_rsize = 0;
3174 }
3175 
3176 void
3177 ipfw_destroy_obj_rewriter()
3178 {
3179 
3180 	if (ctl3_rewriters != NULL)
3181 		free(ctl3_rewriters, M_IPFW);
3182 	ctl3_rewriters = NULL;
3183 	ctl3_rsize = 0;
3184 }
3185 
3186 /*
3187  * Adds one or more opcode object rewrite handlers to the global array.
3188  * Function may sleep.
3189  */
3190 void
3191 ipfw_add_obj_rewriter(struct opcode_obj_rewrite *rw, size_t count)
3192 {
3193 	size_t sz;
3194 	struct opcode_obj_rewrite *tmp;
3195 
3196 	CTL3_LOCK();
3197 
3198 	for (;;) {
3199 		sz = ctl3_rsize + count;
3200 		CTL3_UNLOCK();
3201 		tmp = malloc(sizeof(*rw) * sz, M_IPFW, M_WAITOK | M_ZERO);
3202 		CTL3_LOCK();
3203 		if (ctl3_rsize + count <= sz)
3204 			break;
3205 
3206 		/* Retry */
3207 		free(tmp, M_IPFW);
3208 	}
3209 
3210 	/* Merge old & new arrays */
3211 	sz = ctl3_rsize + count;
3212 	memcpy(tmp, ctl3_rewriters, ctl3_rsize * sizeof(*rw));
3213 	memcpy(&tmp[ctl3_rsize], rw, count * sizeof(*rw));
3214 	qsort(tmp, sz, sizeof(*rw), compare_opcodes);
3215 	/* Switch new and free old */
3216 	if (ctl3_rewriters != NULL)
3217 		free(ctl3_rewriters, M_IPFW);
3218 	ctl3_rewriters = tmp;
3219 	ctl3_rsize = sz;
3220 
3221 	CTL3_UNLOCK();
3222 }
3223 
3224 /*
3225  * Removes one or more object rewrite handlers from the global array.
3226  */
3227 int
3228 ipfw_del_obj_rewriter(struct opcode_obj_rewrite *rw, size_t count)
3229 {
3230 	size_t sz;
3231 	struct opcode_obj_rewrite *ctl3_max, *ktmp, *lo, *hi;
3232 	int i;
3233 
3234 	CTL3_LOCK();
3235 
3236 	for (i = 0; i < count; i++) {
3237 		if (find_op_rw_range(rw[i].opcode, &lo, &hi) != 0)
3238 			continue;
3239 
3240 		for (ktmp = lo; ktmp <= hi; ktmp++) {
3241 			if (ktmp->classifier != rw[i].classifier)
3242 				continue;
3243 
3244 			ctl3_max = ctl3_rewriters + ctl3_rsize;
3245 			sz = (ctl3_max - (ktmp + 1)) * sizeof(*ktmp);
3246 			memmove(ktmp, ktmp + 1, sz);
3247 			ctl3_rsize--;
3248 			break;
3249 		}
3250 
3251 	}
3252 
3253 	if (ctl3_rsize == 0) {
3254 		if (ctl3_rewriters != NULL)
3255 			free(ctl3_rewriters, M_IPFW);
3256 		ctl3_rewriters = NULL;
3257 	}
3258 
3259 	CTL3_UNLOCK();
3260 
3261 	return (0);
3262 }
3263 
3264 static int
3265 export_objhash_ntlv_internal(struct namedobj_instance *ni,
3266     struct named_object *no, void *arg)
3267 {
3268 	struct sockopt_data *sd;
3269 	ipfw_obj_ntlv *ntlv;
3270 
3271 	sd = (struct sockopt_data *)arg;
3272 	ntlv = (ipfw_obj_ntlv *)ipfw_get_sopt_space(sd, sizeof(*ntlv));
3273 	if (ntlv == NULL)
3274 		return (ENOMEM);
3275 	ipfw_export_obj_ntlv(no, ntlv);
3276 	return (0);
3277 }
3278 
3279 /*
3280  * Lists all service objects.
3281  * Data layout (v0)(current):
3282  * Request: [ ipfw_obj_lheader ] size = ipfw_obj_lheader.size
3283  * Reply: [ ipfw_obj_lheader [ ipfw_obj_ntlv x N ] (optional) ]
3284  * Returns 0 on success
3285  */
3286 static int
3287 dump_srvobjects(struct ip_fw_chain *chain, ip_fw3_opheader *op3,
3288     struct sockopt_data *sd)
3289 {
3290 	ipfw_obj_lheader *hdr;
3291 	int count;
3292 
3293 	hdr = (ipfw_obj_lheader *)ipfw_get_sopt_header(sd, sizeof(*hdr));
3294 	if (hdr == NULL)
3295 		return (EINVAL);
3296 
3297 	IPFW_UH_RLOCK(chain);
3298 	count = ipfw_objhash_count(CHAIN_TO_SRV(chain));
3299 	hdr->size = sizeof(ipfw_obj_lheader) + count * sizeof(ipfw_obj_ntlv);
3300 	if (sd->valsize < hdr->size) {
3301 		IPFW_UH_RUNLOCK(chain);
3302 		return (ENOMEM);
3303 	}
3304 	hdr->count = count;
3305 	hdr->objsize = sizeof(ipfw_obj_ntlv);
3306 	if (count > 0)
3307 		ipfw_objhash_foreach(CHAIN_TO_SRV(chain),
3308 		    export_objhash_ntlv_internal, sd);
3309 	IPFW_UH_RUNLOCK(chain);
3310 	return (0);
3311 }
3312 
3313 /*
3314  * Compares two sopt handlers (code, version and handler ptr).
3315  * Used both as qsort() and bsearch().
3316  * Does not compare handler for latter case.
3317  *
3318  * Returns 0 if match is found.
3319  */
3320 static int
3321 compare_sh(const void *_a, const void *_b)
3322 {
3323 	const struct ipfw_sopt_handler *a, *b;
3324 
3325 	a = (const struct ipfw_sopt_handler *)_a;
3326 	b = (const struct ipfw_sopt_handler *)_b;
3327 
3328 	if (a->opcode < b->opcode)
3329 		return (-1);
3330 	else if (a->opcode > b->opcode)
3331 		return (1);
3332 
3333 	if (a->version < b->version)
3334 		return (-1);
3335 	else if (a->version > b->version)
3336 		return (1);
3337 
3338 	/* bsearch helper */
3339 	if (a->handler == NULL)
3340 		return (0);
3341 
3342 	if ((uintptr_t)a->handler < (uintptr_t)b->handler)
3343 		return (-1);
3344 	else if ((uintptr_t)a->handler > (uintptr_t)b->handler)
3345 		return (1);
3346 
3347 	return (0);
3348 }
3349 
3350 /*
3351  * Finds sopt handler based on @code and @version.
3352  *
3353  * Returns pointer to handler or NULL.
3354  */
3355 static struct ipfw_sopt_handler *
3356 find_sh(uint16_t code, uint8_t version, sopt_handler_f *handler)
3357 {
3358 	struct ipfw_sopt_handler *sh, h;
3359 
3360 	memset(&h, 0, sizeof(h));
3361 	h.opcode = code;
3362 	h.version = version;
3363 	h.handler = handler;
3364 
3365 	sh = (struct ipfw_sopt_handler *)bsearch(&h, ctl3_handlers,
3366 	    ctl3_hsize, sizeof(h), compare_sh);
3367 
3368 	return (sh);
3369 }
3370 
3371 static int
3372 find_ref_sh(uint16_t opcode, uint8_t version, struct ipfw_sopt_handler *psh)
3373 {
3374 	struct ipfw_sopt_handler *sh;
3375 
3376 	CTL3_LOCK();
3377 	if ((sh = find_sh(opcode, version, NULL)) == NULL) {
3378 		CTL3_UNLOCK();
3379 		printf("ipfw: ipfw_ctl3 invalid option %d""v""%d\n",
3380 		    opcode, version);
3381 		return (EINVAL);
3382 	}
3383 	sh->refcnt++;
3384 	ctl3_refct++;
3385 	/* Copy handler data to requested buffer */
3386 	*psh = *sh;
3387 	CTL3_UNLOCK();
3388 
3389 	return (0);
3390 }
3391 
3392 static void
3393 find_unref_sh(struct ipfw_sopt_handler *psh)
3394 {
3395 	struct ipfw_sopt_handler *sh;
3396 
3397 	CTL3_LOCK();
3398 	sh = find_sh(psh->opcode, psh->version, NULL);
3399 	KASSERT(sh != NULL, ("ctl3 handler disappeared"));
3400 	sh->refcnt--;
3401 	ctl3_refct--;
3402 	CTL3_UNLOCK();
3403 }
3404 
3405 void
3406 ipfw_init_sopt_handler()
3407 {
3408 
3409 	CTL3_LOCK_INIT();
3410 	IPFW_ADD_SOPT_HANDLER(1, scodes);
3411 }
3412 
3413 void
3414 ipfw_destroy_sopt_handler()
3415 {
3416 
3417 	IPFW_DEL_SOPT_HANDLER(1, scodes);
3418 	CTL3_LOCK_DESTROY();
3419 }
3420 
3421 /*
3422  * Adds one or more sockopt handlers to the global array.
3423  * Function may sleep.
3424  */
3425 void
3426 ipfw_add_sopt_handler(struct ipfw_sopt_handler *sh, size_t count)
3427 {
3428 	size_t sz;
3429 	struct ipfw_sopt_handler *tmp;
3430 
3431 	CTL3_LOCK();
3432 
3433 	for (;;) {
3434 		sz = ctl3_hsize + count;
3435 		CTL3_UNLOCK();
3436 		tmp = malloc(sizeof(*sh) * sz, M_IPFW, M_WAITOK | M_ZERO);
3437 		CTL3_LOCK();
3438 		if (ctl3_hsize + count <= sz)
3439 			break;
3440 
3441 		/* Retry */
3442 		free(tmp, M_IPFW);
3443 	}
3444 
3445 	/* Merge old & new arrays */
3446 	sz = ctl3_hsize + count;
3447 	memcpy(tmp, ctl3_handlers, ctl3_hsize * sizeof(*sh));
3448 	memcpy(&tmp[ctl3_hsize], sh, count * sizeof(*sh));
3449 	qsort(tmp, sz, sizeof(*sh), compare_sh);
3450 	/* Switch new and free old */
3451 	if (ctl3_handlers != NULL)
3452 		free(ctl3_handlers, M_IPFW);
3453 	ctl3_handlers = tmp;
3454 	ctl3_hsize = sz;
3455 	ctl3_gencnt++;
3456 
3457 	CTL3_UNLOCK();
3458 }
3459 
3460 /*
3461  * Removes one or more sockopt handlers from the global array.
3462  */
3463 int
3464 ipfw_del_sopt_handler(struct ipfw_sopt_handler *sh, size_t count)
3465 {
3466 	size_t sz;
3467 	struct ipfw_sopt_handler *tmp, *h;
3468 	int i;
3469 
3470 	CTL3_LOCK();
3471 
3472 	for (i = 0; i < count; i++) {
3473 		tmp = &sh[i];
3474 		h = find_sh(tmp->opcode, tmp->version, tmp->handler);
3475 		if (h == NULL)
3476 			continue;
3477 
3478 		sz = (ctl3_handlers + ctl3_hsize - (h + 1)) * sizeof(*h);
3479 		memmove(h, h + 1, sz);
3480 		ctl3_hsize--;
3481 	}
3482 
3483 	if (ctl3_hsize == 0) {
3484 		if (ctl3_handlers != NULL)
3485 			free(ctl3_handlers, M_IPFW);
3486 		ctl3_handlers = NULL;
3487 	}
3488 
3489 	ctl3_gencnt++;
3490 
3491 	CTL3_UNLOCK();
3492 
3493 	return (0);
3494 }
3495 
3496 /*
3497  * Writes data accumulated in @sd to sockopt buffer.
3498  * Zeroes internal @sd buffer.
3499  */
3500 static int
3501 ipfw_flush_sopt_data(struct sockopt_data *sd)
3502 {
3503 	struct sockopt *sopt;
3504 	int error;
3505 	size_t sz;
3506 
3507 	sz = sd->koff;
3508 	if (sz == 0)
3509 		return (0);
3510 
3511 	sopt = sd->sopt;
3512 
3513 	if (sopt->sopt_dir == SOPT_GET) {
3514 		error = copyout(sd->kbuf, sopt->sopt_val, sz);
3515 		if (error != 0)
3516 			return (error);
3517 	}
3518 
3519 	memset(sd->kbuf, 0, sd->ksize);
3520 	sd->ktotal += sz;
3521 	sd->koff = 0;
3522 	if (sd->ktotal + sd->ksize < sd->valsize)
3523 		sd->kavail = sd->ksize;
3524 	else
3525 		sd->kavail = sd->valsize - sd->ktotal;
3526 
3527 	/* Update sopt buffer data */
3528 	sopt->sopt_valsize = sd->ktotal;
3529 	sopt->sopt_val = sd->sopt_val + sd->ktotal;
3530 
3531 	return (0);
3532 }
3533 
3534 /*
3535  * Ensures that @sd buffer has contiguous @neeeded number of
3536  * bytes.
3537  *
3538  * Returns pointer to requested space or NULL.
3539  */
3540 caddr_t
3541 ipfw_get_sopt_space(struct sockopt_data *sd, size_t needed)
3542 {
3543 	int error;
3544 	caddr_t addr;
3545 
3546 	if (sd->kavail < needed) {
3547 		/*
3548 		 * Flush data and try another time.
3549 		 */
3550 		error = ipfw_flush_sopt_data(sd);
3551 
3552 		if (sd->kavail < needed || error != 0)
3553 			return (NULL);
3554 	}
3555 
3556 	addr = sd->kbuf + sd->koff;
3557 	sd->koff += needed;
3558 	sd->kavail -= needed;
3559 	return (addr);
3560 }
3561 
3562 /*
3563  * Requests @needed contiguous bytes from @sd buffer.
3564  * Function is used to notify subsystem that we are
3565  * interesed in first @needed bytes (request header)
3566  * and the rest buffer can be safely zeroed.
3567  *
3568  * Returns pointer to requested space or NULL.
3569  */
3570 caddr_t
3571 ipfw_get_sopt_header(struct sockopt_data *sd, size_t needed)
3572 {
3573 	caddr_t addr;
3574 
3575 	if ((addr = ipfw_get_sopt_space(sd, needed)) == NULL)
3576 		return (NULL);
3577 
3578 	if (sd->kavail > 0)
3579 		memset(sd->kbuf + sd->koff, 0, sd->kavail);
3580 
3581 	return (addr);
3582 }
3583 
3584 /*
3585  * New sockopt handler.
3586  */
3587 int
3588 ipfw_ctl3(struct sockopt *sopt)
3589 {
3590 	int error, locked;
3591 	size_t size, valsize;
3592 	struct ip_fw_chain *chain;
3593 	char xbuf[256];
3594 	struct sockopt_data sdata;
3595 	struct ipfw_sopt_handler h;
3596 	ip_fw3_opheader *op3 = NULL;
3597 
3598 	error = priv_check(sopt->sopt_td, PRIV_NETINET_IPFW);
3599 	if (error != 0)
3600 		return (error);
3601 
3602 	if (sopt->sopt_name != IP_FW3)
3603 		return (ipfw_ctl(sopt));
3604 
3605 	chain = &V_layer3_chain;
3606 	error = 0;
3607 
3608 	/* Save original valsize before it is altered via sooptcopyin() */
3609 	valsize = sopt->sopt_valsize;
3610 	memset(&sdata, 0, sizeof(sdata));
3611 	/* Read op3 header first to determine actual operation */
3612 	op3 = (ip_fw3_opheader *)xbuf;
3613 	error = sooptcopyin(sopt, op3, sizeof(*op3), sizeof(*op3));
3614 	if (error != 0)
3615 		return (error);
3616 	sopt->sopt_valsize = valsize;
3617 
3618 	/*
3619 	 * Find and reference command.
3620 	 */
3621 	error = find_ref_sh(op3->opcode, op3->version, &h);
3622 	if (error != 0)
3623 		return (error);
3624 
3625 	/*
3626 	 * Disallow modifications in really-really secure mode, but still allow
3627 	 * the logging counters to be reset.
3628 	 */
3629 	if ((h.dir & HDIR_SET) != 0 && h.opcode != IP_FW_XRESETLOG) {
3630 		error = securelevel_ge(sopt->sopt_td->td_ucred, 3);
3631 		if (error != 0) {
3632 			find_unref_sh(&h);
3633 			return (error);
3634 		}
3635 	}
3636 
3637 	/*
3638 	 * Fill in sockopt_data structure that may be useful for
3639 	 * IP_FW3 get requests.
3640 	 */
3641 	locked = 0;
3642 	if (valsize <= sizeof(xbuf)) {
3643 		/* use on-stack buffer */
3644 		sdata.kbuf = xbuf;
3645 		sdata.ksize = sizeof(xbuf);
3646 		sdata.kavail = valsize;
3647 	} else {
3648 
3649 		/*
3650 		 * Determine opcode type/buffer size:
3651 		 * allocate sliding-window buf for data export or
3652 		 * contiguous buffer for special ops.
3653 		 */
3654 		if ((h.dir & HDIR_SET) != 0) {
3655 			/* Set request. Allocate contigous buffer. */
3656 			if (valsize > CTL3_LARGEBUF) {
3657 				find_unref_sh(&h);
3658 				return (EFBIG);
3659 			}
3660 
3661 			size = valsize;
3662 		} else {
3663 			/* Get request. Allocate sliding window buffer */
3664 			size = (valsize<CTL3_SMALLBUF) ? valsize:CTL3_SMALLBUF;
3665 
3666 			if (size < valsize) {
3667 				/* We have to wire user buffer */
3668 				error = vslock(sopt->sopt_val, valsize);
3669 				if (error != 0)
3670 					return (error);
3671 				locked = 1;
3672 			}
3673 		}
3674 
3675 		sdata.kbuf = malloc(size, M_TEMP, M_WAITOK | M_ZERO);
3676 		sdata.ksize = size;
3677 		sdata.kavail = size;
3678 	}
3679 
3680 	sdata.sopt = sopt;
3681 	sdata.sopt_val = sopt->sopt_val;
3682 	sdata.valsize = valsize;
3683 
3684 	/*
3685 	 * Copy either all request (if valsize < bsize_max)
3686 	 * or first bsize_max bytes to guarantee most consumers
3687 	 * that all necessary data has been copied).
3688 	 * Anyway, copy not less than sizeof(ip_fw3_opheader).
3689 	 */
3690 	if ((error = sooptcopyin(sopt, sdata.kbuf, sdata.ksize,
3691 	    sizeof(ip_fw3_opheader))) != 0)
3692 		return (error);
3693 	op3 = (ip_fw3_opheader *)sdata.kbuf;
3694 
3695 	/* Finally, run handler */
3696 	error = h.handler(chain, op3, &sdata);
3697 	find_unref_sh(&h);
3698 
3699 	/* Flush state and free buffers */
3700 	if (error == 0)
3701 		error = ipfw_flush_sopt_data(&sdata);
3702 	else
3703 		ipfw_flush_sopt_data(&sdata);
3704 
3705 	if (locked != 0)
3706 		vsunlock(sdata.sopt_val, valsize);
3707 
3708 	/* Restore original pointer and set number of bytes written */
3709 	sopt->sopt_val = sdata.sopt_val;
3710 	sopt->sopt_valsize = sdata.ktotal;
3711 	if (sdata.kbuf != xbuf)
3712 		free(sdata.kbuf, M_TEMP);
3713 
3714 	return (error);
3715 }
3716 
3717 /**
3718  * {set|get}sockopt parser.
3719  */
3720 int
3721 ipfw_ctl(struct sockopt *sopt)
3722 {
3723 #define	RULE_MAXSIZE	(512*sizeof(u_int32_t))
3724 	int error;
3725 	size_t size, valsize;
3726 	struct ip_fw *buf;
3727 	struct ip_fw_rule0 *rule;
3728 	struct ip_fw_chain *chain;
3729 	u_int32_t rulenum[2];
3730 	uint32_t opt;
3731 	struct rule_check_info ci;
3732 	IPFW_RLOCK_TRACKER;
3733 
3734 	chain = &V_layer3_chain;
3735 	error = 0;
3736 
3737 	/* Save original valsize before it is altered via sooptcopyin() */
3738 	valsize = sopt->sopt_valsize;
3739 	opt = sopt->sopt_name;
3740 
3741 	/*
3742 	 * Disallow modifications in really-really secure mode, but still allow
3743 	 * the logging counters to be reset.
3744 	 */
3745 	if (opt == IP_FW_ADD ||
3746 	    (sopt->sopt_dir == SOPT_SET && opt != IP_FW_RESETLOG)) {
3747 		error = securelevel_ge(sopt->sopt_td->td_ucred, 3);
3748 		if (error != 0)
3749 			return (error);
3750 	}
3751 
3752 	switch (opt) {
3753 	case IP_FW_GET:
3754 		/*
3755 		 * pass up a copy of the current rules. Static rules
3756 		 * come first (the last of which has number IPFW_DEFAULT_RULE),
3757 		 * followed by a possibly empty list of dynamic rule.
3758 		 * The last dynamic rule has NULL in the "next" field.
3759 		 *
3760 		 * Note that the calculated size is used to bound the
3761 		 * amount of data returned to the user.  The rule set may
3762 		 * change between calculating the size and returning the
3763 		 * data in which case we'll just return what fits.
3764 		 */
3765 		for (;;) {
3766 			int len = 0, want;
3767 
3768 			size = chain->static_len;
3769 			size += ipfw_dyn_len();
3770 			if (size >= sopt->sopt_valsize)
3771 				break;
3772 			buf = malloc(size, M_TEMP, M_WAITOK | M_ZERO);
3773 			IPFW_UH_RLOCK(chain);
3774 			/* check again how much space we need */
3775 			want = chain->static_len + ipfw_dyn_len();
3776 			if (size >= want)
3777 				len = ipfw_getrules(chain, buf, size);
3778 			IPFW_UH_RUNLOCK(chain);
3779 			if (size >= want)
3780 				error = sooptcopyout(sopt, buf, len);
3781 			free(buf, M_TEMP);
3782 			if (size >= want)
3783 				break;
3784 		}
3785 		break;
3786 
3787 	case IP_FW_FLUSH:
3788 		/* locking is done within del_entry() */
3789 		error = del_entry(chain, 0); /* special case, rule=0, cmd=0 means all */
3790 		break;
3791 
3792 	case IP_FW_ADD:
3793 		rule = malloc(RULE_MAXSIZE, M_TEMP, M_WAITOK);
3794 		error = sooptcopyin(sopt, rule, RULE_MAXSIZE,
3795 			sizeof(struct ip_fw7) );
3796 
3797 		memset(&ci, 0, sizeof(struct rule_check_info));
3798 
3799 		/*
3800 		 * If the size of commands equals RULESIZE7 then we assume
3801 		 * a FreeBSD7.2 binary is talking to us (set is7=1).
3802 		 * is7 is persistent so the next 'ipfw list' command
3803 		 * will use this format.
3804 		 * NOTE: If wrong version is guessed (this can happen if
3805 		 *       the first ipfw command is 'ipfw [pipe] list')
3806 		 *       the ipfw binary may crash or loop infinitly...
3807 		 */
3808 		size = sopt->sopt_valsize;
3809 		if (size == RULESIZE7(rule)) {
3810 		    is7 = 1;
3811 		    error = convert_rule_to_8(rule);
3812 		    if (error) {
3813 			free(rule, M_TEMP);
3814 			return error;
3815 		    }
3816 		    size = RULESIZE(rule);
3817 		} else
3818 		    is7 = 0;
3819 		if (error == 0)
3820 			error = check_ipfw_rule0(rule, size, &ci);
3821 		if (error == 0) {
3822 			/* locking is done within add_rule() */
3823 			struct ip_fw *krule;
3824 			krule = ipfw_alloc_rule(chain, RULEKSIZE0(rule));
3825 			ci.urule = (caddr_t)rule;
3826 			ci.krule = krule;
3827 			import_rule0(&ci);
3828 			error = commit_rules(chain, &ci, 1);
3829 			if (error != 0)
3830 				free_rule(ci.krule);
3831 			else if (sopt->sopt_dir == SOPT_GET) {
3832 				if (is7) {
3833 					error = convert_rule_to_7(rule);
3834 					size = RULESIZE7(rule);
3835 					if (error) {
3836 						free(rule, M_TEMP);
3837 						return error;
3838 					}
3839 				}
3840 				error = sooptcopyout(sopt, rule, size);
3841 			}
3842 		}
3843 		free(rule, M_TEMP);
3844 		break;
3845 
3846 	case IP_FW_DEL:
3847 		/*
3848 		 * IP_FW_DEL is used for deleting single rules or sets,
3849 		 * and (ab)used to atomically manipulate sets. Argument size
3850 		 * is used to distinguish between the two:
3851 		 *    sizeof(u_int32_t)
3852 		 *	delete single rule or set of rules,
3853 		 *	or reassign rules (or sets) to a different set.
3854 		 *    2*sizeof(u_int32_t)
3855 		 *	atomic disable/enable sets.
3856 		 *	first u_int32_t contains sets to be disabled,
3857 		 *	second u_int32_t contains sets to be enabled.
3858 		 */
3859 		error = sooptcopyin(sopt, rulenum,
3860 			2*sizeof(u_int32_t), sizeof(u_int32_t));
3861 		if (error)
3862 			break;
3863 		size = sopt->sopt_valsize;
3864 		if (size == sizeof(u_int32_t) && rulenum[0] != 0) {
3865 			/* delete or reassign, locking done in del_entry() */
3866 			error = del_entry(chain, rulenum[0]);
3867 		} else if (size == 2*sizeof(u_int32_t)) { /* set enable/disable */
3868 			IPFW_UH_WLOCK(chain);
3869 			V_set_disable =
3870 			    (V_set_disable | rulenum[0]) & ~rulenum[1] &
3871 			    ~(1<<RESVD_SET); /* set RESVD_SET always enabled */
3872 			IPFW_UH_WUNLOCK(chain);
3873 		} else
3874 			error = EINVAL;
3875 		break;
3876 
3877 	case IP_FW_ZERO:
3878 	case IP_FW_RESETLOG: /* argument is an u_int_32, the rule number */
3879 		rulenum[0] = 0;
3880 		if (sopt->sopt_val != 0) {
3881 		    error = sooptcopyin(sopt, rulenum,
3882 			    sizeof(u_int32_t), sizeof(u_int32_t));
3883 		    if (error)
3884 			break;
3885 		}
3886 		error = zero_entry(chain, rulenum[0],
3887 			sopt->sopt_name == IP_FW_RESETLOG);
3888 		break;
3889 
3890 	/*--- TABLE opcodes ---*/
3891 	case IP_FW_TABLE_ADD:
3892 	case IP_FW_TABLE_DEL:
3893 		{
3894 			ipfw_table_entry ent;
3895 			struct tentry_info tei;
3896 			struct tid_info ti;
3897 			struct table_value v;
3898 
3899 			error = sooptcopyin(sopt, &ent,
3900 			    sizeof(ent), sizeof(ent));
3901 			if (error)
3902 				break;
3903 
3904 			memset(&tei, 0, sizeof(tei));
3905 			tei.paddr = &ent.addr;
3906 			tei.subtype = AF_INET;
3907 			tei.masklen = ent.masklen;
3908 			ipfw_import_table_value_legacy(ent.value, &v);
3909 			tei.pvalue = &v;
3910 			memset(&ti, 0, sizeof(ti));
3911 			ti.uidx = ent.tbl;
3912 			ti.type = IPFW_TABLE_CIDR;
3913 
3914 			error = (opt == IP_FW_TABLE_ADD) ?
3915 			    add_table_entry(chain, &ti, &tei, 0, 1) :
3916 			    del_table_entry(chain, &ti, &tei, 0, 1);
3917 		}
3918 		break;
3919 
3920 
3921 	case IP_FW_TABLE_FLUSH:
3922 		{
3923 			u_int16_t tbl;
3924 			struct tid_info ti;
3925 
3926 			error = sooptcopyin(sopt, &tbl,
3927 			    sizeof(tbl), sizeof(tbl));
3928 			if (error)
3929 				break;
3930 			memset(&ti, 0, sizeof(ti));
3931 			ti.uidx = tbl;
3932 			error = flush_table(chain, &ti);
3933 		}
3934 		break;
3935 
3936 	case IP_FW_TABLE_GETSIZE:
3937 		{
3938 			u_int32_t tbl, cnt;
3939 			struct tid_info ti;
3940 
3941 			if ((error = sooptcopyin(sopt, &tbl, sizeof(tbl),
3942 			    sizeof(tbl))))
3943 				break;
3944 			memset(&ti, 0, sizeof(ti));
3945 			ti.uidx = tbl;
3946 			IPFW_RLOCK(chain);
3947 			error = ipfw_count_table(chain, &ti, &cnt);
3948 			IPFW_RUNLOCK(chain);
3949 			if (error)
3950 				break;
3951 			error = sooptcopyout(sopt, &cnt, sizeof(cnt));
3952 		}
3953 		break;
3954 
3955 	case IP_FW_TABLE_LIST:
3956 		{
3957 			ipfw_table *tbl;
3958 			struct tid_info ti;
3959 
3960 			if (sopt->sopt_valsize < sizeof(*tbl)) {
3961 				error = EINVAL;
3962 				break;
3963 			}
3964 			size = sopt->sopt_valsize;
3965 			tbl = malloc(size, M_TEMP, M_WAITOK);
3966 			error = sooptcopyin(sopt, tbl, size, sizeof(*tbl));
3967 			if (error) {
3968 				free(tbl, M_TEMP);
3969 				break;
3970 			}
3971 			tbl->size = (size - sizeof(*tbl)) /
3972 			    sizeof(ipfw_table_entry);
3973 			memset(&ti, 0, sizeof(ti));
3974 			ti.uidx = tbl->tbl;
3975 			IPFW_RLOCK(chain);
3976 			error = ipfw_dump_table_legacy(chain, &ti, tbl);
3977 			IPFW_RUNLOCK(chain);
3978 			if (error) {
3979 				free(tbl, M_TEMP);
3980 				break;
3981 			}
3982 			error = sooptcopyout(sopt, tbl, size);
3983 			free(tbl, M_TEMP);
3984 		}
3985 		break;
3986 
3987 	/*--- NAT operations are protected by the IPFW_LOCK ---*/
3988 	case IP_FW_NAT_CFG:
3989 		if (IPFW_NAT_LOADED)
3990 			error = ipfw_nat_cfg_ptr(sopt);
3991 		else {
3992 			printf("IP_FW_NAT_CFG: %s\n",
3993 			    "ipfw_nat not present, please load it");
3994 			error = EINVAL;
3995 		}
3996 		break;
3997 
3998 	case IP_FW_NAT_DEL:
3999 		if (IPFW_NAT_LOADED)
4000 			error = ipfw_nat_del_ptr(sopt);
4001 		else {
4002 			printf("IP_FW_NAT_DEL: %s\n",
4003 			    "ipfw_nat not present, please load it");
4004 			error = EINVAL;
4005 		}
4006 		break;
4007 
4008 	case IP_FW_NAT_GET_CONFIG:
4009 		if (IPFW_NAT_LOADED)
4010 			error = ipfw_nat_get_cfg_ptr(sopt);
4011 		else {
4012 			printf("IP_FW_NAT_GET_CFG: %s\n",
4013 			    "ipfw_nat not present, please load it");
4014 			error = EINVAL;
4015 		}
4016 		break;
4017 
4018 	case IP_FW_NAT_GET_LOG:
4019 		if (IPFW_NAT_LOADED)
4020 			error = ipfw_nat_get_log_ptr(sopt);
4021 		else {
4022 			printf("IP_FW_NAT_GET_LOG: %s\n",
4023 			    "ipfw_nat not present, please load it");
4024 			error = EINVAL;
4025 		}
4026 		break;
4027 
4028 	default:
4029 		printf("ipfw: ipfw_ctl invalid option %d\n", sopt->sopt_name);
4030 		error = EINVAL;
4031 	}
4032 
4033 	return (error);
4034 #undef RULE_MAXSIZE
4035 }
4036 #define	RULE_MAXSIZE	(256*sizeof(u_int32_t))
4037 
4038 /* Functions to convert rules 7.2 <==> 8.0 */
4039 static int
4040 convert_rule_to_7(struct ip_fw_rule0 *rule)
4041 {
4042 	/* Used to modify original rule */
4043 	struct ip_fw7 *rule7 = (struct ip_fw7 *)rule;
4044 	/* copy of original rule, version 8 */
4045 	struct ip_fw_rule0 *tmp;
4046 
4047 	/* Used to copy commands */
4048 	ipfw_insn *ccmd, *dst;
4049 	int ll = 0, ccmdlen = 0;
4050 
4051 	tmp = malloc(RULE_MAXSIZE, M_TEMP, M_NOWAIT | M_ZERO);
4052 	if (tmp == NULL) {
4053 		return 1; //XXX error
4054 	}
4055 	bcopy(rule, tmp, RULE_MAXSIZE);
4056 
4057 	/* Copy fields */
4058 	//rule7->_pad = tmp->_pad;
4059 	rule7->set = tmp->set;
4060 	rule7->rulenum = tmp->rulenum;
4061 	rule7->cmd_len = tmp->cmd_len;
4062 	rule7->act_ofs = tmp->act_ofs;
4063 	rule7->next_rule = (struct ip_fw7 *)tmp->next_rule;
4064 	rule7->cmd_len = tmp->cmd_len;
4065 	rule7->pcnt = tmp->pcnt;
4066 	rule7->bcnt = tmp->bcnt;
4067 	rule7->timestamp = tmp->timestamp;
4068 
4069 	/* Copy commands */
4070 	for (ll = tmp->cmd_len, ccmd = tmp->cmd, dst = rule7->cmd ;
4071 			ll > 0 ; ll -= ccmdlen, ccmd += ccmdlen, dst += ccmdlen) {
4072 		ccmdlen = F_LEN(ccmd);
4073 
4074 		bcopy(ccmd, dst, F_LEN(ccmd)*sizeof(uint32_t));
4075 
4076 		if (dst->opcode > O_NAT)
4077 			/* O_REASS doesn't exists in 7.2 version, so
4078 			 * decrement opcode if it is after O_REASS
4079 			 */
4080 			dst->opcode--;
4081 
4082 		if (ccmdlen > ll) {
4083 			printf("ipfw: opcode %d size truncated\n",
4084 				ccmd->opcode);
4085 			return EINVAL;
4086 		}
4087 	}
4088 	free(tmp, M_TEMP);
4089 
4090 	return 0;
4091 }
4092 
4093 static int
4094 convert_rule_to_8(struct ip_fw_rule0 *rule)
4095 {
4096 	/* Used to modify original rule */
4097 	struct ip_fw7 *rule7 = (struct ip_fw7 *) rule;
4098 
4099 	/* Used to copy commands */
4100 	ipfw_insn *ccmd, *dst;
4101 	int ll = 0, ccmdlen = 0;
4102 
4103 	/* Copy of original rule */
4104 	struct ip_fw7 *tmp = malloc(RULE_MAXSIZE, M_TEMP, M_NOWAIT | M_ZERO);
4105 	if (tmp == NULL) {
4106 		return 1; //XXX error
4107 	}
4108 
4109 	bcopy(rule7, tmp, RULE_MAXSIZE);
4110 
4111 	for (ll = tmp->cmd_len, ccmd = tmp->cmd, dst = rule->cmd ;
4112 			ll > 0 ; ll -= ccmdlen, ccmd += ccmdlen, dst += ccmdlen) {
4113 		ccmdlen = F_LEN(ccmd);
4114 
4115 		bcopy(ccmd, dst, F_LEN(ccmd)*sizeof(uint32_t));
4116 
4117 		if (dst->opcode > O_NAT)
4118 			/* O_REASS doesn't exists in 7.2 version, so
4119 			 * increment opcode if it is after O_REASS
4120 			 */
4121 			dst->opcode++;
4122 
4123 		if (ccmdlen > ll) {
4124 			printf("ipfw: opcode %d size truncated\n",
4125 			    ccmd->opcode);
4126 			return EINVAL;
4127 		}
4128 	}
4129 
4130 	rule->_pad = tmp->_pad;
4131 	rule->set = tmp->set;
4132 	rule->rulenum = tmp->rulenum;
4133 	rule->cmd_len = tmp->cmd_len;
4134 	rule->act_ofs = tmp->act_ofs;
4135 	rule->next_rule = (struct ip_fw *)tmp->next_rule;
4136 	rule->cmd_len = tmp->cmd_len;
4137 	rule->id = 0; /* XXX see if is ok = 0 */
4138 	rule->pcnt = tmp->pcnt;
4139 	rule->bcnt = tmp->bcnt;
4140 	rule->timestamp = tmp->timestamp;
4141 
4142 	free (tmp, M_TEMP);
4143 	return 0;
4144 }
4145 
4146 /*
4147  * Named object api
4148  *
4149  */
4150 
4151 void
4152 ipfw_init_srv(struct ip_fw_chain *ch)
4153 {
4154 
4155 	ch->srvmap = ipfw_objhash_create(IPFW_OBJECTS_DEFAULT);
4156 	ch->srvstate = malloc(sizeof(void *) * IPFW_OBJECTS_DEFAULT,
4157 	    M_IPFW, M_WAITOK | M_ZERO);
4158 }
4159 
4160 void
4161 ipfw_destroy_srv(struct ip_fw_chain *ch)
4162 {
4163 
4164 	free(ch->srvstate, M_IPFW);
4165 	ipfw_objhash_destroy(ch->srvmap);
4166 }
4167 
4168 /*
4169  * Allocate new bitmask which can be used to enlarge/shrink
4170  * named instance index.
4171  */
4172 void
4173 ipfw_objhash_bitmap_alloc(uint32_t items, void **idx, int *pblocks)
4174 {
4175 	size_t size;
4176 	int max_blocks;
4177 	u_long *idx_mask;
4178 
4179 	KASSERT((items % BLOCK_ITEMS) == 0,
4180 	   ("bitmask size needs to power of 2 and greater or equal to %zu",
4181 	    BLOCK_ITEMS));
4182 
4183 	max_blocks = items / BLOCK_ITEMS;
4184 	size = items / 8;
4185 	idx_mask = malloc(size * IPFW_MAX_SETS, M_IPFW, M_WAITOK);
4186 	/* Mark all as free */
4187 	memset(idx_mask, 0xFF, size * IPFW_MAX_SETS);
4188 	*idx_mask &= ~(u_long)1; /* Skip index 0 */
4189 
4190 	*idx = idx_mask;
4191 	*pblocks = max_blocks;
4192 }
4193 
4194 /*
4195  * Copy current bitmask index to new one.
4196  */
4197 void
4198 ipfw_objhash_bitmap_merge(struct namedobj_instance *ni, void **idx, int *blocks)
4199 {
4200 	int old_blocks, new_blocks;
4201 	u_long *old_idx, *new_idx;
4202 	int i;
4203 
4204 	old_idx = ni->idx_mask;
4205 	old_blocks = ni->max_blocks;
4206 	new_idx = *idx;
4207 	new_blocks = *blocks;
4208 
4209 	for (i = 0; i < IPFW_MAX_SETS; i++) {
4210 		memcpy(&new_idx[new_blocks * i], &old_idx[old_blocks * i],
4211 		    old_blocks * sizeof(u_long));
4212 	}
4213 }
4214 
4215 /*
4216  * Swaps current @ni index with new one.
4217  */
4218 void
4219 ipfw_objhash_bitmap_swap(struct namedobj_instance *ni, void **idx, int *blocks)
4220 {
4221 	int old_blocks;
4222 	u_long *old_idx;
4223 
4224 	old_idx = ni->idx_mask;
4225 	old_blocks = ni->max_blocks;
4226 
4227 	ni->idx_mask = *idx;
4228 	ni->max_blocks = *blocks;
4229 
4230 	/* Save old values */
4231 	*idx = old_idx;
4232 	*blocks = old_blocks;
4233 }
4234 
4235 void
4236 ipfw_objhash_bitmap_free(void *idx, int blocks)
4237 {
4238 
4239 	free(idx, M_IPFW);
4240 }
4241 
4242 /*
4243  * Creates named hash instance.
4244  * Must be called without holding any locks.
4245  * Return pointer to new instance.
4246  */
4247 struct namedobj_instance *
4248 ipfw_objhash_create(uint32_t items)
4249 {
4250 	struct namedobj_instance *ni;
4251 	int i;
4252 	size_t size;
4253 
4254 	size = sizeof(struct namedobj_instance) +
4255 	    sizeof(struct namedobjects_head) * NAMEDOBJ_HASH_SIZE +
4256 	    sizeof(struct namedobjects_head) * NAMEDOBJ_HASH_SIZE;
4257 
4258 	ni = malloc(size, M_IPFW, M_WAITOK | M_ZERO);
4259 	ni->nn_size = NAMEDOBJ_HASH_SIZE;
4260 	ni->nv_size = NAMEDOBJ_HASH_SIZE;
4261 
4262 	ni->names = (struct namedobjects_head *)(ni +1);
4263 	ni->values = &ni->names[ni->nn_size];
4264 
4265 	for (i = 0; i < ni->nn_size; i++)
4266 		TAILQ_INIT(&ni->names[i]);
4267 
4268 	for (i = 0; i < ni->nv_size; i++)
4269 		TAILQ_INIT(&ni->values[i]);
4270 
4271 	/* Set default hashing/comparison functions */
4272 	ni->hash_f = objhash_hash_name;
4273 	ni->cmp_f = objhash_cmp_name;
4274 
4275 	/* Allocate bitmask separately due to possible resize */
4276 	ipfw_objhash_bitmap_alloc(items, (void*)&ni->idx_mask, &ni->max_blocks);
4277 
4278 	return (ni);
4279 }
4280 
4281 void
4282 ipfw_objhash_destroy(struct namedobj_instance *ni)
4283 {
4284 
4285 	free(ni->idx_mask, M_IPFW);
4286 	free(ni, M_IPFW);
4287 }
4288 
4289 void
4290 ipfw_objhash_set_funcs(struct namedobj_instance *ni, objhash_hash_f *hash_f,
4291     objhash_cmp_f *cmp_f)
4292 {
4293 
4294 	ni->hash_f = hash_f;
4295 	ni->cmp_f = cmp_f;
4296 }
4297 
4298 static uint32_t
4299 objhash_hash_name(struct namedobj_instance *ni, const void *name, uint32_t set)
4300 {
4301 
4302 	return (fnv_32_str((const char *)name, FNV1_32_INIT));
4303 }
4304 
4305 static int
4306 objhash_cmp_name(struct named_object *no, const void *name, uint32_t set)
4307 {
4308 
4309 	if ((strcmp(no->name, (const char *)name) == 0) && (no->set == set))
4310 		return (0);
4311 
4312 	return (1);
4313 }
4314 
4315 static uint32_t
4316 objhash_hash_idx(struct namedobj_instance *ni, uint32_t val)
4317 {
4318 	uint32_t v;
4319 
4320 	v = val % (ni->nv_size - 1);
4321 
4322 	return (v);
4323 }
4324 
4325 struct named_object *
4326 ipfw_objhash_lookup_name(struct namedobj_instance *ni, uint32_t set, char *name)
4327 {
4328 	struct named_object *no;
4329 	uint32_t hash;
4330 
4331 	hash = ni->hash_f(ni, name, set) % ni->nn_size;
4332 
4333 	TAILQ_FOREACH(no, &ni->names[hash], nn_next) {
4334 		if (ni->cmp_f(no, name, set) == 0)
4335 			return (no);
4336 	}
4337 
4338 	return (NULL);
4339 }
4340 
4341 /*
4342  * Find named object by @uid.
4343  * Check @tlvs for valid data inside.
4344  *
4345  * Returns pointer to found TLV or NULL.
4346  */
4347 ipfw_obj_ntlv *
4348 ipfw_find_name_tlv_type(void *tlvs, int len, uint16_t uidx, uint32_t etlv)
4349 {
4350 	ipfw_obj_ntlv *ntlv;
4351 	uintptr_t pa, pe;
4352 	int l;
4353 
4354 	pa = (uintptr_t)tlvs;
4355 	pe = pa + len;
4356 	l = 0;
4357 	for (; pa < pe; pa += l) {
4358 		ntlv = (ipfw_obj_ntlv *)pa;
4359 		l = ntlv->head.length;
4360 
4361 		if (l != sizeof(*ntlv))
4362 			return (NULL);
4363 
4364 		if (ntlv->idx != uidx)
4365 			continue;
4366 		/*
4367 		 * When userland has specified zero TLV type, do
4368 		 * not compare it with eltv. In some cases userland
4369 		 * doesn't know what type should it have. Use only
4370 		 * uidx and name for search named_object.
4371 		 */
4372 		if (ntlv->head.type != 0 &&
4373 		    ntlv->head.type != (uint16_t)etlv)
4374 			continue;
4375 
4376 		if (ipfw_check_object_name_generic(ntlv->name) != 0)
4377 			return (NULL);
4378 
4379 		return (ntlv);
4380 	}
4381 
4382 	return (NULL);
4383 }
4384 
4385 /*
4386  * Finds object config based on either legacy index
4387  * or name in ntlv.
4388  * Note @ti structure contains unchecked data from userland.
4389  *
4390  * Returns 0 in success and fills in @pno with found config
4391  */
4392 int
4393 ipfw_objhash_find_type(struct namedobj_instance *ni, struct tid_info *ti,
4394     uint32_t etlv, struct named_object **pno)
4395 {
4396 	char *name;
4397 	ipfw_obj_ntlv *ntlv;
4398 	uint32_t set;
4399 
4400 	if (ti->tlvs == NULL)
4401 		return (EINVAL);
4402 
4403 	ntlv = ipfw_find_name_tlv_type(ti->tlvs, ti->tlen, ti->uidx, etlv);
4404 	if (ntlv == NULL)
4405 		return (EINVAL);
4406 	name = ntlv->name;
4407 
4408 	/*
4409 	 * Use set provided by @ti instead of @ntlv one.
4410 	 * This is needed due to different sets behavior
4411 	 * controlled by V_fw_tables_sets.
4412 	 */
4413 	set = ti->set;
4414 	*pno = ipfw_objhash_lookup_name(ni, set, name);
4415 	if (*pno == NULL)
4416 		return (ESRCH);
4417 	return (0);
4418 }
4419 
4420 /*
4421  * Find named object by name, considering also its TLV type.
4422  */
4423 struct named_object *
4424 ipfw_objhash_lookup_name_type(struct namedobj_instance *ni, uint32_t set,
4425     uint32_t type, const char *name)
4426 {
4427 	struct named_object *no;
4428 	uint32_t hash;
4429 
4430 	hash = ni->hash_f(ni, name, set) % ni->nn_size;
4431 
4432 	TAILQ_FOREACH(no, &ni->names[hash], nn_next) {
4433 		if (ni->cmp_f(no, name, set) == 0 &&
4434 		    no->etlv == (uint16_t)type)
4435 			return (no);
4436 	}
4437 
4438 	return (NULL);
4439 }
4440 
4441 struct named_object *
4442 ipfw_objhash_lookup_kidx(struct namedobj_instance *ni, uint16_t kidx)
4443 {
4444 	struct named_object *no;
4445 	uint32_t hash;
4446 
4447 	hash = objhash_hash_idx(ni, kidx);
4448 
4449 	TAILQ_FOREACH(no, &ni->values[hash], nv_next) {
4450 		if (no->kidx == kidx)
4451 			return (no);
4452 	}
4453 
4454 	return (NULL);
4455 }
4456 
4457 int
4458 ipfw_objhash_same_name(struct namedobj_instance *ni, struct named_object *a,
4459     struct named_object *b)
4460 {
4461 
4462 	if ((strcmp(a->name, b->name) == 0) && a->set == b->set)
4463 		return (1);
4464 
4465 	return (0);
4466 }
4467 
4468 void
4469 ipfw_objhash_add(struct namedobj_instance *ni, struct named_object *no)
4470 {
4471 	uint32_t hash;
4472 
4473 	hash = ni->hash_f(ni, no->name, no->set) % ni->nn_size;
4474 	TAILQ_INSERT_HEAD(&ni->names[hash], no, nn_next);
4475 
4476 	hash = objhash_hash_idx(ni, no->kidx);
4477 	TAILQ_INSERT_HEAD(&ni->values[hash], no, nv_next);
4478 
4479 	ni->count++;
4480 }
4481 
4482 void
4483 ipfw_objhash_del(struct namedobj_instance *ni, struct named_object *no)
4484 {
4485 	uint32_t hash;
4486 
4487 	hash = ni->hash_f(ni, no->name, no->set) % ni->nn_size;
4488 	TAILQ_REMOVE(&ni->names[hash], no, nn_next);
4489 
4490 	hash = objhash_hash_idx(ni, no->kidx);
4491 	TAILQ_REMOVE(&ni->values[hash], no, nv_next);
4492 
4493 	ni->count--;
4494 }
4495 
4496 uint32_t
4497 ipfw_objhash_count(struct namedobj_instance *ni)
4498 {
4499 
4500 	return (ni->count);
4501 }
4502 
4503 uint32_t
4504 ipfw_objhash_count_type(struct namedobj_instance *ni, uint16_t type)
4505 {
4506 	struct named_object *no;
4507 	uint32_t count;
4508 	int i;
4509 
4510 	count = 0;
4511 	for (i = 0; i < ni->nn_size; i++) {
4512 		TAILQ_FOREACH(no, &ni->names[i], nn_next) {
4513 			if (no->etlv == type)
4514 				count++;
4515 		}
4516 	}
4517 	return (count);
4518 }
4519 
4520 /*
4521  * Runs @func for each found named object.
4522  * It is safe to delete objects from callback
4523  */
4524 int
4525 ipfw_objhash_foreach(struct namedobj_instance *ni, objhash_cb_t *f, void *arg)
4526 {
4527 	struct named_object *no, *no_tmp;
4528 	int i, ret;
4529 
4530 	for (i = 0; i < ni->nn_size; i++) {
4531 		TAILQ_FOREACH_SAFE(no, &ni->names[i], nn_next, no_tmp) {
4532 			ret = f(ni, no, arg);
4533 			if (ret != 0)
4534 				return (ret);
4535 		}
4536 	}
4537 	return (0);
4538 }
4539 
4540 /*
4541  * Runs @f for each found named object with type @type.
4542  * It is safe to delete objects from callback
4543  */
4544 int
4545 ipfw_objhash_foreach_type(struct namedobj_instance *ni, objhash_cb_t *f,
4546     void *arg, uint16_t type)
4547 {
4548 	struct named_object *no, *no_tmp;
4549 	int i, ret;
4550 
4551 	for (i = 0; i < ni->nn_size; i++) {
4552 		TAILQ_FOREACH_SAFE(no, &ni->names[i], nn_next, no_tmp) {
4553 			if (no->etlv != type)
4554 				continue;
4555 			ret = f(ni, no, arg);
4556 			if (ret != 0)
4557 				return (ret);
4558 		}
4559 	}
4560 	return (0);
4561 }
4562 
4563 /*
4564  * Removes index from given set.
4565  * Returns 0 on success.
4566  */
4567 int
4568 ipfw_objhash_free_idx(struct namedobj_instance *ni, uint16_t idx)
4569 {
4570 	u_long *mask;
4571 	int i, v;
4572 
4573 	i = idx / BLOCK_ITEMS;
4574 	v = idx % BLOCK_ITEMS;
4575 
4576 	if (i >= ni->max_blocks)
4577 		return (1);
4578 
4579 	mask = &ni->idx_mask[i];
4580 
4581 	if ((*mask & ((u_long)1 << v)) != 0)
4582 		return (1);
4583 
4584 	/* Mark as free */
4585 	*mask |= (u_long)1 << v;
4586 
4587 	/* Update free offset */
4588 	if (ni->free_off[0] > i)
4589 		ni->free_off[0] = i;
4590 
4591 	return (0);
4592 }
4593 
4594 /*
4595  * Allocate new index in given instance and stores in in @pidx.
4596  * Returns 0 on success.
4597  */
4598 int
4599 ipfw_objhash_alloc_idx(void *n, uint16_t *pidx)
4600 {
4601 	struct namedobj_instance *ni;
4602 	u_long *mask;
4603 	int i, off, v;
4604 
4605 	ni = (struct namedobj_instance *)n;
4606 
4607 	off = ni->free_off[0];
4608 	mask = &ni->idx_mask[off];
4609 
4610 	for (i = off; i < ni->max_blocks; i++, mask++) {
4611 		if ((v = ffsl(*mask)) == 0)
4612 			continue;
4613 
4614 		/* Mark as busy */
4615 		*mask &= ~ ((u_long)1 << (v - 1));
4616 
4617 		ni->free_off[0] = i;
4618 
4619 		v = BLOCK_ITEMS * i + v - 1;
4620 
4621 		*pidx = v;
4622 		return (0);
4623 	}
4624 
4625 	return (1);
4626 }
4627 
4628 /* end of file */
4629