xref: /freebsd/sys/netpfil/ipfw/ip_fw_sockopt.c (revision cc426dd31990b8b50b210efc450e404596548ca1)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2002-2009 Luigi Rizzo, Universita` di Pisa
5  * Copyright (c) 2014 Yandex LLC
6  * Copyright (c) 2014 Alexander V. Chernikov
7  *
8  * Supported by: Valeria Paoli
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  */
31 
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
34 
35 /*
36  * Control socket and rule management routines for ipfw.
37  * Control is currently implemented via IP_FW3 setsockopt() code.
38  */
39 
40 #include "opt_ipfw.h"
41 #include "opt_inet.h"
42 #ifndef INET
43 #error IPFIREWALL requires INET.
44 #endif /* INET */
45 #include "opt_inet6.h"
46 
47 #include <sys/param.h>
48 #include <sys/systm.h>
49 #include <sys/malloc.h>
50 #include <sys/mbuf.h>	/* struct m_tag used by nested headers */
51 #include <sys/kernel.h>
52 #include <sys/lock.h>
53 #include <sys/priv.h>
54 #include <sys/proc.h>
55 #include <sys/rwlock.h>
56 #include <sys/rmlock.h>
57 #include <sys/socket.h>
58 #include <sys/socketvar.h>
59 #include <sys/sysctl.h>
60 #include <sys/syslog.h>
61 #include <sys/fnv_hash.h>
62 #include <net/if.h>
63 #include <net/pfil.h>
64 #include <net/route.h>
65 #include <net/vnet.h>
66 #include <vm/vm.h>
67 #include <vm/vm_extern.h>
68 
69 #include <netinet/in.h>
70 #include <netinet/ip_var.h> /* hooks */
71 #include <netinet/ip_fw.h>
72 
73 #include <netpfil/ipfw/ip_fw_private.h>
74 #include <netpfil/ipfw/ip_fw_table.h>
75 
76 #ifdef MAC
77 #include <security/mac/mac_framework.h>
78 #endif
79 
80 static int ipfw_ctl(struct sockopt *sopt);
81 static int check_ipfw_rule_body(ipfw_insn *cmd, int cmd_len,
82     struct rule_check_info *ci);
83 static int check_ipfw_rule1(struct ip_fw_rule *rule, int size,
84     struct rule_check_info *ci);
85 static int check_ipfw_rule0(struct ip_fw_rule0 *rule, int size,
86     struct rule_check_info *ci);
87 static int rewrite_rule_uidx(struct ip_fw_chain *chain,
88     struct rule_check_info *ci);
89 
90 #define	NAMEDOBJ_HASH_SIZE	32
91 
92 struct namedobj_instance {
93 	struct namedobjects_head	*names;
94 	struct namedobjects_head	*values;
95 	uint32_t nn_size;		/* names hash size */
96 	uint32_t nv_size;		/* number hash size */
97 	u_long *idx_mask;		/* used items bitmask */
98 	uint32_t max_blocks;		/* number of "long" blocks in bitmask */
99 	uint32_t count;			/* number of items */
100 	uint16_t free_off[IPFW_MAX_SETS];	/* first possible free offset */
101 	objhash_hash_f	*hash_f;
102 	objhash_cmp_f	*cmp_f;
103 };
104 #define	BLOCK_ITEMS	(8 * sizeof(u_long))	/* Number of items for ffsl() */
105 
106 static uint32_t objhash_hash_name(struct namedobj_instance *ni,
107     const void *key, uint32_t kopt);
108 static uint32_t objhash_hash_idx(struct namedobj_instance *ni, uint32_t val);
109 static int objhash_cmp_name(struct named_object *no, const void *name,
110     uint32_t set);
111 
112 MALLOC_DEFINE(M_IPFW, "IpFw/IpAcct", "IpFw/IpAcct chain's");
113 
114 static int dump_config(struct ip_fw_chain *chain, ip_fw3_opheader *op3,
115     struct sockopt_data *sd);
116 static int add_rules(struct ip_fw_chain *chain, ip_fw3_opheader *op3,
117     struct sockopt_data *sd);
118 static int del_rules(struct ip_fw_chain *chain, ip_fw3_opheader *op3,
119     struct sockopt_data *sd);
120 static int clear_rules(struct ip_fw_chain *chain, ip_fw3_opheader *op3,
121     struct sockopt_data *sd);
122 static int move_rules(struct ip_fw_chain *chain, ip_fw3_opheader *op3,
123     struct sockopt_data *sd);
124 static int manage_sets(struct ip_fw_chain *chain, ip_fw3_opheader *op3,
125     struct sockopt_data *sd);
126 static int dump_soptcodes(struct ip_fw_chain *chain, ip_fw3_opheader *op3,
127     struct sockopt_data *sd);
128 static int dump_srvobjects(struct ip_fw_chain *chain, ip_fw3_opheader *op3,
129     struct sockopt_data *sd);
130 
131 /* ctl3 handler data */
132 struct mtx ctl3_lock;
133 #define	CTL3_LOCK_INIT()	mtx_init(&ctl3_lock, "ctl3_lock", NULL, MTX_DEF)
134 #define	CTL3_LOCK_DESTROY()	mtx_destroy(&ctl3_lock)
135 #define	CTL3_LOCK()		mtx_lock(&ctl3_lock)
136 #define	CTL3_UNLOCK()		mtx_unlock(&ctl3_lock)
137 
138 static struct ipfw_sopt_handler *ctl3_handlers;
139 static size_t ctl3_hsize;
140 static uint64_t ctl3_refct, ctl3_gencnt;
141 #define	CTL3_SMALLBUF	4096			/* small page-size write buffer */
142 #define	CTL3_LARGEBUF	16 * 1024 * 1024	/* handle large rulesets */
143 
144 static int ipfw_flush_sopt_data(struct sockopt_data *sd);
145 
146 static struct ipfw_sopt_handler	scodes[] = {
147 	{ IP_FW_XGET,		0,	HDIR_GET,	dump_config },
148 	{ IP_FW_XADD,		0,	HDIR_BOTH,	add_rules },
149 	{ IP_FW_XDEL,		0,	HDIR_BOTH,	del_rules },
150 	{ IP_FW_XZERO,		0,	HDIR_SET,	clear_rules },
151 	{ IP_FW_XRESETLOG,	0,	HDIR_SET,	clear_rules },
152 	{ IP_FW_XMOVE,		0,	HDIR_SET,	move_rules },
153 	{ IP_FW_SET_SWAP,	0,	HDIR_SET,	manage_sets },
154 	{ IP_FW_SET_MOVE,	0,	HDIR_SET,	manage_sets },
155 	{ IP_FW_SET_ENABLE,	0,	HDIR_SET,	manage_sets },
156 	{ IP_FW_DUMP_SOPTCODES,	0,	HDIR_GET,	dump_soptcodes },
157 	{ IP_FW_DUMP_SRVOBJECTS,0,	HDIR_GET,	dump_srvobjects },
158 };
159 
160 static int
161 set_legacy_obj_kidx(struct ip_fw_chain *ch, struct ip_fw_rule0 *rule);
162 static struct opcode_obj_rewrite *find_op_rw(ipfw_insn *cmd,
163     uint16_t *puidx, uint8_t *ptype);
164 static int ref_rule_objects(struct ip_fw_chain *ch, struct ip_fw *rule,
165     struct rule_check_info *ci, struct obj_idx *oib, struct tid_info *ti);
166 static int ref_opcode_object(struct ip_fw_chain *ch, ipfw_insn *cmd,
167     struct tid_info *ti, struct obj_idx *pidx, int *unresolved);
168 static void unref_rule_objects(struct ip_fw_chain *chain, struct ip_fw *rule);
169 static void unref_oib_objects(struct ip_fw_chain *ch, ipfw_insn *cmd,
170     struct obj_idx *oib, struct obj_idx *end);
171 static int export_objhash_ntlv(struct namedobj_instance *ni, uint16_t kidx,
172     struct sockopt_data *sd);
173 
174 /*
175  * Opcode object rewriter variables
176  */
177 struct opcode_obj_rewrite *ctl3_rewriters;
178 static size_t ctl3_rsize;
179 
180 /*
181  * static variables followed by global ones
182  */
183 
184 VNET_DEFINE_STATIC(uma_zone_t, ipfw_cntr_zone);
185 #define	V_ipfw_cntr_zone		VNET(ipfw_cntr_zone)
186 
187 void
188 ipfw_init_counters()
189 {
190 
191 	V_ipfw_cntr_zone = uma_zcreate("IPFW counters",
192 	    IPFW_RULE_CNTR_SIZE, NULL, NULL, NULL, NULL,
193 	    UMA_ALIGN_PTR, UMA_ZONE_PCPU);
194 }
195 
196 void
197 ipfw_destroy_counters()
198 {
199 
200 	uma_zdestroy(V_ipfw_cntr_zone);
201 }
202 
203 struct ip_fw *
204 ipfw_alloc_rule(struct ip_fw_chain *chain, size_t rulesize)
205 {
206 	struct ip_fw *rule;
207 
208 	rule = malloc(rulesize, M_IPFW, M_WAITOK | M_ZERO);
209 	rule->cntr = uma_zalloc_pcpu(V_ipfw_cntr_zone, M_WAITOK | M_ZERO);
210 	rule->refcnt = 1;
211 
212 	return (rule);
213 }
214 
215 void
216 ipfw_free_rule(struct ip_fw *rule)
217 {
218 
219 	/*
220 	 * We don't release refcnt here, since this function
221 	 * can be called without any locks held. The caller
222 	 * must release reference under IPFW_UH_WLOCK, and then
223 	 * call this function if refcount becomes 1.
224 	 */
225 	if (rule->refcnt > 1)
226 		return;
227 	uma_zfree_pcpu(V_ipfw_cntr_zone, rule->cntr);
228 	free(rule, M_IPFW);
229 }
230 
231 
232 /*
233  * Find the smallest rule >= key, id.
234  * We could use bsearch but it is so simple that we code it directly
235  */
236 int
237 ipfw_find_rule(struct ip_fw_chain *chain, uint32_t key, uint32_t id)
238 {
239 	int i, lo, hi;
240 	struct ip_fw *r;
241 
242   	for (lo = 0, hi = chain->n_rules - 1; lo < hi;) {
243 		i = (lo + hi) / 2;
244 		r = chain->map[i];
245 		if (r->rulenum < key)
246 			lo = i + 1;	/* continue from the next one */
247 		else if (r->rulenum > key)
248 			hi = i;		/* this might be good */
249 		else if (r->id < id)
250 			lo = i + 1;	/* continue from the next one */
251 		else /* r->id >= id */
252 			hi = i;		/* this might be good */
253 	}
254 	return hi;
255 }
256 
257 /*
258  * Builds skipto cache on rule set @map.
259  */
260 static void
261 update_skipto_cache(struct ip_fw_chain *chain, struct ip_fw **map)
262 {
263 	int *smap, rulenum;
264 	int i, mi;
265 
266 	IPFW_UH_WLOCK_ASSERT(chain);
267 
268 	mi = 0;
269 	rulenum = map[mi]->rulenum;
270 	smap = chain->idxmap_back;
271 
272 	if (smap == NULL)
273 		return;
274 
275 	for (i = 0; i < 65536; i++) {
276 		smap[i] = mi;
277 		/* Use the same rule index until i < rulenum */
278 		if (i != rulenum || i == 65535)
279 			continue;
280 		/* Find next rule with num > i */
281 		rulenum = map[++mi]->rulenum;
282 		while (rulenum == i)
283 			rulenum = map[++mi]->rulenum;
284 	}
285 }
286 
287 /*
288  * Swaps prepared (backup) index with current one.
289  */
290 static void
291 swap_skipto_cache(struct ip_fw_chain *chain)
292 {
293 	int *map;
294 
295 	IPFW_UH_WLOCK_ASSERT(chain);
296 	IPFW_WLOCK_ASSERT(chain);
297 
298 	map = chain->idxmap;
299 	chain->idxmap = chain->idxmap_back;
300 	chain->idxmap_back = map;
301 }
302 
303 /*
304  * Allocate and initialize skipto cache.
305  */
306 void
307 ipfw_init_skipto_cache(struct ip_fw_chain *chain)
308 {
309 	int *idxmap, *idxmap_back;
310 
311 	idxmap = malloc(65536 * sizeof(int), M_IPFW, M_WAITOK | M_ZERO);
312 	idxmap_back = malloc(65536 * sizeof(int), M_IPFW, M_WAITOK);
313 
314 	/*
315 	 * Note we may be called at any time after initialization,
316 	 * for example, on first skipto rule, so we need to
317 	 * provide valid chain->idxmap on return
318 	 */
319 
320 	IPFW_UH_WLOCK(chain);
321 	if (chain->idxmap != NULL) {
322 		IPFW_UH_WUNLOCK(chain);
323 		free(idxmap, M_IPFW);
324 		free(idxmap_back, M_IPFW);
325 		return;
326 	}
327 
328 	/* Set backup pointer first to permit building cache */
329 	chain->idxmap_back = idxmap_back;
330 	update_skipto_cache(chain, chain->map);
331 	IPFW_WLOCK(chain);
332 	/* It is now safe to set chain->idxmap ptr */
333 	chain->idxmap = idxmap;
334 	swap_skipto_cache(chain);
335 	IPFW_WUNLOCK(chain);
336 	IPFW_UH_WUNLOCK(chain);
337 }
338 
339 /*
340  * Destroys skipto cache.
341  */
342 void
343 ipfw_destroy_skipto_cache(struct ip_fw_chain *chain)
344 {
345 
346 	if (chain->idxmap != NULL)
347 		free(chain->idxmap, M_IPFW);
348 	if (chain->idxmap != NULL)
349 		free(chain->idxmap_back, M_IPFW);
350 }
351 
352 
353 /*
354  * allocate a new map, returns the chain locked. extra is the number
355  * of entries to add or delete.
356  */
357 static struct ip_fw **
358 get_map(struct ip_fw_chain *chain, int extra, int locked)
359 {
360 
361 	for (;;) {
362 		struct ip_fw **map;
363 		u_int i, mflags;
364 
365 		mflags = M_ZERO | ((locked != 0) ? M_NOWAIT : M_WAITOK);
366 
367 		i = chain->n_rules + extra;
368 		map = malloc(i * sizeof(struct ip_fw *), M_IPFW, mflags);
369 		if (map == NULL) {
370 			printf("%s: cannot allocate map\n", __FUNCTION__);
371 			return NULL;
372 		}
373 		if (!locked)
374 			IPFW_UH_WLOCK(chain);
375 		if (i >= chain->n_rules + extra) /* good */
376 			return map;
377 		/* otherwise we lost the race, free and retry */
378 		if (!locked)
379 			IPFW_UH_WUNLOCK(chain);
380 		free(map, M_IPFW);
381 	}
382 }
383 
384 /*
385  * swap the maps. It is supposed to be called with IPFW_UH_WLOCK
386  */
387 static struct ip_fw **
388 swap_map(struct ip_fw_chain *chain, struct ip_fw **new_map, int new_len)
389 {
390 	struct ip_fw **old_map;
391 
392 	IPFW_WLOCK(chain);
393 	chain->id++;
394 	chain->n_rules = new_len;
395 	old_map = chain->map;
396 	chain->map = new_map;
397 	swap_skipto_cache(chain);
398 	IPFW_WUNLOCK(chain);
399 	return old_map;
400 }
401 
402 
403 static void
404 export_cntr1_base(struct ip_fw *krule, struct ip_fw_bcounter *cntr)
405 {
406 	struct timeval boottime;
407 
408 	cntr->size = sizeof(*cntr);
409 
410 	if (krule->cntr != NULL) {
411 		cntr->pcnt = counter_u64_fetch(krule->cntr);
412 		cntr->bcnt = counter_u64_fetch(krule->cntr + 1);
413 		cntr->timestamp = krule->timestamp;
414 	}
415 	if (cntr->timestamp > 0) {
416 		getboottime(&boottime);
417 		cntr->timestamp += boottime.tv_sec;
418 	}
419 }
420 
421 static void
422 export_cntr0_base(struct ip_fw *krule, struct ip_fw_bcounter0 *cntr)
423 {
424 	struct timeval boottime;
425 
426 	if (krule->cntr != NULL) {
427 		cntr->pcnt = counter_u64_fetch(krule->cntr);
428 		cntr->bcnt = counter_u64_fetch(krule->cntr + 1);
429 		cntr->timestamp = krule->timestamp;
430 	}
431 	if (cntr->timestamp > 0) {
432 		getboottime(&boottime);
433 		cntr->timestamp += boottime.tv_sec;
434 	}
435 }
436 
437 /*
438  * Copies rule @urule from v1 userland format (current).
439  * to kernel @krule.
440  * Assume @krule is zeroed.
441  */
442 static void
443 import_rule1(struct rule_check_info *ci)
444 {
445 	struct ip_fw_rule *urule;
446 	struct ip_fw *krule;
447 
448 	urule = (struct ip_fw_rule *)ci->urule;
449 	krule = (struct ip_fw *)ci->krule;
450 
451 	/* copy header */
452 	krule->act_ofs = urule->act_ofs;
453 	krule->cmd_len = urule->cmd_len;
454 	krule->rulenum = urule->rulenum;
455 	krule->set = urule->set;
456 	krule->flags = urule->flags;
457 
458 	/* Save rulenum offset */
459 	ci->urule_numoff = offsetof(struct ip_fw_rule, rulenum);
460 
461 	/* Copy opcodes */
462 	memcpy(krule->cmd, urule->cmd, krule->cmd_len * sizeof(uint32_t));
463 }
464 
465 /*
466  * Export rule into v1 format (Current).
467  * Layout:
468  * [ ipfw_obj_tlv(IPFW_TLV_RULE_ENT)
469  *     [ ip_fw_rule ] OR
470  *     [ ip_fw_bcounter ip_fw_rule] (depends on rcntrs).
471  * ]
472  * Assume @data is zeroed.
473  */
474 static void
475 export_rule1(struct ip_fw *krule, caddr_t data, int len, int rcntrs)
476 {
477 	struct ip_fw_bcounter *cntr;
478 	struct ip_fw_rule *urule;
479 	ipfw_obj_tlv *tlv;
480 
481 	/* Fill in TLV header */
482 	tlv = (ipfw_obj_tlv *)data;
483 	tlv->type = IPFW_TLV_RULE_ENT;
484 	tlv->length = len;
485 
486 	if (rcntrs != 0) {
487 		/* Copy counters */
488 		cntr = (struct ip_fw_bcounter *)(tlv + 1);
489 		urule = (struct ip_fw_rule *)(cntr + 1);
490 		export_cntr1_base(krule, cntr);
491 	} else
492 		urule = (struct ip_fw_rule *)(tlv + 1);
493 
494 	/* copy header */
495 	urule->act_ofs = krule->act_ofs;
496 	urule->cmd_len = krule->cmd_len;
497 	urule->rulenum = krule->rulenum;
498 	urule->set = krule->set;
499 	urule->flags = krule->flags;
500 	urule->id = krule->id;
501 
502 	/* Copy opcodes */
503 	memcpy(urule->cmd, krule->cmd, krule->cmd_len * sizeof(uint32_t));
504 }
505 
506 
507 /*
508  * Copies rule @urule from FreeBSD8 userland format (v0)
509  * to kernel @krule.
510  * Assume @krule is zeroed.
511  */
512 static void
513 import_rule0(struct rule_check_info *ci)
514 {
515 	struct ip_fw_rule0 *urule;
516 	struct ip_fw *krule;
517 	int cmdlen, l;
518 	ipfw_insn *cmd;
519 	ipfw_insn_limit *lcmd;
520 	ipfw_insn_if *cmdif;
521 
522 	urule = (struct ip_fw_rule0 *)ci->urule;
523 	krule = (struct ip_fw *)ci->krule;
524 
525 	/* copy header */
526 	krule->act_ofs = urule->act_ofs;
527 	krule->cmd_len = urule->cmd_len;
528 	krule->rulenum = urule->rulenum;
529 	krule->set = urule->set;
530 	if ((urule->_pad & 1) != 0)
531 		krule->flags |= IPFW_RULE_NOOPT;
532 
533 	/* Save rulenum offset */
534 	ci->urule_numoff = offsetof(struct ip_fw_rule0, rulenum);
535 
536 	/* Copy opcodes */
537 	memcpy(krule->cmd, urule->cmd, krule->cmd_len * sizeof(uint32_t));
538 
539 	/*
540 	 * Alter opcodes:
541 	 * 1) convert tablearg value from 65535 to 0
542 	 * 2) Add high bit to O_SETFIB/O_SETDSCP values (to make room
543 	 *    for targ).
544 	 * 3) convert table number in iface opcodes to u16
545 	 * 4) convert old `nat global` into new 65535
546 	 */
547 	l = krule->cmd_len;
548 	cmd = krule->cmd;
549 	cmdlen = 0;
550 
551 	for ( ;	l > 0 ; l -= cmdlen, cmd += cmdlen) {
552 		cmdlen = F_LEN(cmd);
553 
554 		switch (cmd->opcode) {
555 		/* Opcodes supporting tablearg */
556 		case O_TAG:
557 		case O_TAGGED:
558 		case O_PIPE:
559 		case O_QUEUE:
560 		case O_DIVERT:
561 		case O_TEE:
562 		case O_SKIPTO:
563 		case O_CALLRETURN:
564 		case O_NETGRAPH:
565 		case O_NGTEE:
566 		case O_NAT:
567 			if (cmd->arg1 == IP_FW_TABLEARG)
568 				cmd->arg1 = IP_FW_TARG;
569 			else if (cmd->arg1 == 0)
570 				cmd->arg1 = IP_FW_NAT44_GLOBAL;
571 			break;
572 		case O_SETFIB:
573 		case O_SETDSCP:
574 			if (cmd->arg1 == IP_FW_TABLEARG)
575 				cmd->arg1 = IP_FW_TARG;
576 			else
577 				cmd->arg1 |= 0x8000;
578 			break;
579 		case O_LIMIT:
580 			lcmd = (ipfw_insn_limit *)cmd;
581 			if (lcmd->conn_limit == IP_FW_TABLEARG)
582 				lcmd->conn_limit = IP_FW_TARG;
583 			break;
584 		/* Interface tables */
585 		case O_XMIT:
586 		case O_RECV:
587 		case O_VIA:
588 			/* Interface table, possibly */
589 			cmdif = (ipfw_insn_if *)cmd;
590 			if (cmdif->name[0] != '\1')
591 				break;
592 
593 			cmdif->p.kidx = (uint16_t)cmdif->p.glob;
594 			break;
595 		}
596 	}
597 }
598 
599 /*
600  * Copies rule @krule from kernel to FreeBSD8 userland format (v0)
601  */
602 static void
603 export_rule0(struct ip_fw *krule, struct ip_fw_rule0 *urule, int len)
604 {
605 	int cmdlen, l;
606 	ipfw_insn *cmd;
607 	ipfw_insn_limit *lcmd;
608 	ipfw_insn_if *cmdif;
609 
610 	/* copy header */
611 	memset(urule, 0, len);
612 	urule->act_ofs = krule->act_ofs;
613 	urule->cmd_len = krule->cmd_len;
614 	urule->rulenum = krule->rulenum;
615 	urule->set = krule->set;
616 	if ((krule->flags & IPFW_RULE_NOOPT) != 0)
617 		urule->_pad |= 1;
618 
619 	/* Copy opcodes */
620 	memcpy(urule->cmd, krule->cmd, krule->cmd_len * sizeof(uint32_t));
621 
622 	/* Export counters */
623 	export_cntr0_base(krule, (struct ip_fw_bcounter0 *)&urule->pcnt);
624 
625 	/*
626 	 * Alter opcodes:
627 	 * 1) convert tablearg value from 0 to 65535
628 	 * 2) Remove highest bit from O_SETFIB/O_SETDSCP values.
629 	 * 3) convert table number in iface opcodes to int
630 	 */
631 	l = urule->cmd_len;
632 	cmd = urule->cmd;
633 	cmdlen = 0;
634 
635 	for ( ;	l > 0 ; l -= cmdlen, cmd += cmdlen) {
636 		cmdlen = F_LEN(cmd);
637 
638 		switch (cmd->opcode) {
639 		/* Opcodes supporting tablearg */
640 		case O_TAG:
641 		case O_TAGGED:
642 		case O_PIPE:
643 		case O_QUEUE:
644 		case O_DIVERT:
645 		case O_TEE:
646 		case O_SKIPTO:
647 		case O_CALLRETURN:
648 		case O_NETGRAPH:
649 		case O_NGTEE:
650 		case O_NAT:
651 			if (cmd->arg1 == IP_FW_TARG)
652 				cmd->arg1 = IP_FW_TABLEARG;
653 			else if (cmd->arg1 == IP_FW_NAT44_GLOBAL)
654 				cmd->arg1 = 0;
655 			break;
656 		case O_SETFIB:
657 		case O_SETDSCP:
658 			if (cmd->arg1 == IP_FW_TARG)
659 				cmd->arg1 = IP_FW_TABLEARG;
660 			else
661 				cmd->arg1 &= ~0x8000;
662 			break;
663 		case O_LIMIT:
664 			lcmd = (ipfw_insn_limit *)cmd;
665 			if (lcmd->conn_limit == IP_FW_TARG)
666 				lcmd->conn_limit = IP_FW_TABLEARG;
667 			break;
668 		/* Interface tables */
669 		case O_XMIT:
670 		case O_RECV:
671 		case O_VIA:
672 			/* Interface table, possibly */
673 			cmdif = (ipfw_insn_if *)cmd;
674 			if (cmdif->name[0] != '\1')
675 				break;
676 
677 			cmdif->p.glob = cmdif->p.kidx;
678 			break;
679 		}
680 	}
681 }
682 
683 /*
684  * Add new rule(s) to the list possibly creating rule number for each.
685  * Update the rule_number in the input struct so the caller knows it as well.
686  * Must be called without IPFW_UH held
687  */
688 static int
689 commit_rules(struct ip_fw_chain *chain, struct rule_check_info *rci, int count)
690 {
691 	int error, i, insert_before, tcount;
692 	uint16_t rulenum, *pnum;
693 	struct rule_check_info *ci;
694 	struct ip_fw *krule;
695 	struct ip_fw **map;	/* the new array of pointers */
696 
697 	/* Check if we need to do table/obj index remap */
698 	tcount = 0;
699 	for (ci = rci, i = 0; i < count; ci++, i++) {
700 		if (ci->object_opcodes == 0)
701 			continue;
702 
703 		/*
704 		 * Rule has some object opcodes.
705 		 * We need to find (and create non-existing)
706 		 * kernel objects, and reference existing ones.
707 		 */
708 		error = rewrite_rule_uidx(chain, ci);
709 		if (error != 0) {
710 
711 			/*
712 			 * rewrite failed, state for current rule
713 			 * has been reverted. Check if we need to
714 			 * revert more.
715 			 */
716 			if (tcount > 0) {
717 
718 				/*
719 				 * We have some more table rules
720 				 * we need to rollback.
721 				 */
722 
723 				IPFW_UH_WLOCK(chain);
724 				while (ci != rci) {
725 					ci--;
726 					if (ci->object_opcodes == 0)
727 						continue;
728 					unref_rule_objects(chain,ci->krule);
729 
730 				}
731 				IPFW_UH_WUNLOCK(chain);
732 
733 			}
734 
735 			return (error);
736 		}
737 
738 		tcount++;
739 	}
740 
741 	/* get_map returns with IPFW_UH_WLOCK if successful */
742 	map = get_map(chain, count, 0 /* not locked */);
743 	if (map == NULL) {
744 		if (tcount > 0) {
745 			/* Unbind tables */
746 			IPFW_UH_WLOCK(chain);
747 			for (ci = rci, i = 0; i < count; ci++, i++) {
748 				if (ci->object_opcodes == 0)
749 					continue;
750 
751 				unref_rule_objects(chain, ci->krule);
752 			}
753 			IPFW_UH_WUNLOCK(chain);
754 		}
755 
756 		return (ENOSPC);
757 	}
758 
759 	if (V_autoinc_step < 1)
760 		V_autoinc_step = 1;
761 	else if (V_autoinc_step > 1000)
762 		V_autoinc_step = 1000;
763 
764 	/* FIXME: Handle count > 1 */
765 	ci = rci;
766 	krule = ci->krule;
767 	rulenum = krule->rulenum;
768 
769 	/* find the insertion point, we will insert before */
770 	insert_before = rulenum ? rulenum + 1 : IPFW_DEFAULT_RULE;
771 	i = ipfw_find_rule(chain, insert_before, 0);
772 	/* duplicate first part */
773 	if (i > 0)
774 		bcopy(chain->map, map, i * sizeof(struct ip_fw *));
775 	map[i] = krule;
776 	/* duplicate remaining part, we always have the default rule */
777 	bcopy(chain->map + i, map + i + 1,
778 		sizeof(struct ip_fw *) *(chain->n_rules - i));
779 	if (rulenum == 0) {
780 		/* Compute rule number and write it back */
781 		rulenum = i > 0 ? map[i-1]->rulenum : 0;
782 		if (rulenum < IPFW_DEFAULT_RULE - V_autoinc_step)
783 			rulenum += V_autoinc_step;
784 		krule->rulenum = rulenum;
785 		/* Save number to userland rule */
786 		pnum = (uint16_t *)((caddr_t)ci->urule + ci->urule_numoff);
787 		*pnum = rulenum;
788 	}
789 
790 	krule->id = chain->id + 1;
791 	update_skipto_cache(chain, map);
792 	map = swap_map(chain, map, chain->n_rules + 1);
793 	chain->static_len += RULEUSIZE0(krule);
794 	IPFW_UH_WUNLOCK(chain);
795 	if (map)
796 		free(map, M_IPFW);
797 	return (0);
798 }
799 
800 int
801 ipfw_add_protected_rule(struct ip_fw_chain *chain, struct ip_fw *rule,
802     int locked)
803 {
804 	struct ip_fw **map;
805 
806 	map = get_map(chain, 1, locked);
807 	if (map == NULL)
808 		return (ENOMEM);
809 	if (chain->n_rules > 0)
810 		bcopy(chain->map, map,
811 		    chain->n_rules * sizeof(struct ip_fw *));
812 	map[chain->n_rules] = rule;
813 	rule->rulenum = IPFW_DEFAULT_RULE;
814 	rule->set = RESVD_SET;
815 	rule->id = chain->id + 1;
816 	/* We add rule in the end of chain, no need to update skipto cache */
817 	map = swap_map(chain, map, chain->n_rules + 1);
818 	chain->static_len += RULEUSIZE0(rule);
819 	IPFW_UH_WUNLOCK(chain);
820 	free(map, M_IPFW);
821 	return (0);
822 }
823 
824 /*
825  * Adds @rule to the list of rules to reap
826  */
827 void
828 ipfw_reap_add(struct ip_fw_chain *chain, struct ip_fw **head,
829     struct ip_fw *rule)
830 {
831 
832 	IPFW_UH_WLOCK_ASSERT(chain);
833 
834 	/* Unlink rule from everywhere */
835 	unref_rule_objects(chain, rule);
836 
837 	rule->next = *head;
838 	*head = rule;
839 }
840 
841 /*
842  * Reclaim storage associated with a list of rules.  This is
843  * typically the list created using remove_rule.
844  * A NULL pointer on input is handled correctly.
845  */
846 void
847 ipfw_reap_rules(struct ip_fw *head)
848 {
849 	struct ip_fw *rule;
850 
851 	while ((rule = head) != NULL) {
852 		head = head->next;
853 		ipfw_free_rule(rule);
854 	}
855 }
856 
857 /*
858  * Rules to keep are
859  *	(default || reserved || !match_set || !match_number)
860  * where
861  *   default ::= (rule->rulenum == IPFW_DEFAULT_RULE)
862  *	// the default rule is always protected
863  *
864  *   reserved ::= (cmd == 0 && n == 0 && rule->set == RESVD_SET)
865  *	// RESVD_SET is protected only if cmd == 0 and n == 0 ("ipfw flush")
866  *
867  *   match_set ::= (cmd == 0 || rule->set == set)
868  *	// set number is ignored for cmd == 0
869  *
870  *   match_number ::= (cmd == 1 || n == 0 || n == rule->rulenum)
871  *	// number is ignored for cmd == 1 or n == 0
872  *
873  */
874 int
875 ipfw_match_range(struct ip_fw *rule, ipfw_range_tlv *rt)
876 {
877 
878 	/* Don't match default rule for modification queries */
879 	if (rule->rulenum == IPFW_DEFAULT_RULE &&
880 	    (rt->flags & IPFW_RCFLAG_DEFAULT) == 0)
881 		return (0);
882 
883 	/* Don't match rules in reserved set for flush requests */
884 	if ((rt->flags & IPFW_RCFLAG_ALL) != 0 && rule->set == RESVD_SET)
885 		return (0);
886 
887 	/* If we're filtering by set, don't match other sets */
888 	if ((rt->flags & IPFW_RCFLAG_SET) != 0 && rule->set != rt->set)
889 		return (0);
890 
891 	if ((rt->flags & IPFW_RCFLAG_RANGE) != 0 &&
892 	    (rule->rulenum < rt->start_rule || rule->rulenum > rt->end_rule))
893 		return (0);
894 
895 	return (1);
896 }
897 
898 struct manage_sets_args {
899 	uint16_t	set;
900 	uint8_t		new_set;
901 };
902 
903 static int
904 swap_sets_cb(struct namedobj_instance *ni, struct named_object *no,
905     void *arg)
906 {
907 	struct manage_sets_args *args;
908 
909 	args = (struct manage_sets_args *)arg;
910 	if (no->set == (uint8_t)args->set)
911 		no->set = args->new_set;
912 	else if (no->set == args->new_set)
913 		no->set = (uint8_t)args->set;
914 	return (0);
915 }
916 
917 static int
918 move_sets_cb(struct namedobj_instance *ni, struct named_object *no,
919     void *arg)
920 {
921 	struct manage_sets_args *args;
922 
923 	args = (struct manage_sets_args *)arg;
924 	if (no->set == (uint8_t)args->set)
925 		no->set = args->new_set;
926 	return (0);
927 }
928 
929 static int
930 test_sets_cb(struct namedobj_instance *ni, struct named_object *no,
931     void *arg)
932 {
933 	struct manage_sets_args *args;
934 
935 	args = (struct manage_sets_args *)arg;
936 	if (no->set != (uint8_t)args->set)
937 		return (0);
938 	if (ipfw_objhash_lookup_name_type(ni, args->new_set,
939 	    no->etlv, no->name) != NULL)
940 		return (EEXIST);
941 	return (0);
942 }
943 
944 /*
945  * Generic function to handler moving and swapping sets.
946  */
947 int
948 ipfw_obj_manage_sets(struct namedobj_instance *ni, uint16_t type,
949     uint16_t set, uint8_t new_set, enum ipfw_sets_cmd cmd)
950 {
951 	struct manage_sets_args args;
952 	struct named_object *no;
953 
954 	args.set = set;
955 	args.new_set = new_set;
956 	switch (cmd) {
957 	case SWAP_ALL:
958 		return (ipfw_objhash_foreach_type(ni, swap_sets_cb,
959 		    &args, type));
960 	case TEST_ALL:
961 		return (ipfw_objhash_foreach_type(ni, test_sets_cb,
962 		    &args, type));
963 	case MOVE_ALL:
964 		return (ipfw_objhash_foreach_type(ni, move_sets_cb,
965 		    &args, type));
966 	case COUNT_ONE:
967 		/*
968 		 * @set used to pass kidx.
969 		 * When @new_set is zero - reset object counter,
970 		 * otherwise increment it.
971 		 */
972 		no = ipfw_objhash_lookup_kidx(ni, set);
973 		if (new_set != 0)
974 			no->ocnt++;
975 		else
976 			no->ocnt = 0;
977 		return (0);
978 	case TEST_ONE:
979 		/* @set used to pass kidx */
980 		no = ipfw_objhash_lookup_kidx(ni, set);
981 		/*
982 		 * First check number of references:
983 		 * when it differs, this mean other rules are holding
984 		 * reference to given object, so it is not possible to
985 		 * change its set. Note that refcnt may account references
986 		 * to some going-to-be-added rules. Since we don't know
987 		 * their numbers (and even if they will be added) it is
988 		 * perfectly OK to return error here.
989 		 */
990 		if (no->ocnt != no->refcnt)
991 			return (EBUSY);
992 		if (ipfw_objhash_lookup_name_type(ni, new_set, type,
993 		    no->name) != NULL)
994 			return (EEXIST);
995 		return (0);
996 	case MOVE_ONE:
997 		/* @set used to pass kidx */
998 		no = ipfw_objhash_lookup_kidx(ni, set);
999 		no->set = new_set;
1000 		return (0);
1001 	}
1002 	return (EINVAL);
1003 }
1004 
1005 /*
1006  * Delete rules matching range @rt.
1007  * Saves number of deleted rules in @ndel.
1008  *
1009  * Returns 0 on success.
1010  */
1011 static int
1012 delete_range(struct ip_fw_chain *chain, ipfw_range_tlv *rt, int *ndel)
1013 {
1014 	struct ip_fw *reap, *rule, **map;
1015 	int end, start;
1016 	int i, n, ndyn, ofs;
1017 
1018 	reap = NULL;
1019 	IPFW_UH_WLOCK(chain);	/* arbitrate writers */
1020 
1021 	/*
1022 	 * Stage 1: Determine range to inspect.
1023 	 * Range is half-inclusive, e.g [start, end).
1024 	 */
1025 	start = 0;
1026 	end = chain->n_rules - 1;
1027 
1028 	if ((rt->flags & IPFW_RCFLAG_RANGE) != 0) {
1029 		start = ipfw_find_rule(chain, rt->start_rule, 0);
1030 
1031 		if (rt->end_rule >= IPFW_DEFAULT_RULE)
1032 			rt->end_rule = IPFW_DEFAULT_RULE - 1;
1033 		end = ipfw_find_rule(chain, rt->end_rule, UINT32_MAX);
1034 	}
1035 
1036 	if (rt->flags & IPFW_RCFLAG_DYNAMIC) {
1037 		/*
1038 		 * Requested deleting only for dynamic states.
1039 		 */
1040 		*ndel = 0;
1041 		ipfw_expire_dyn_states(chain, rt);
1042 		IPFW_UH_WUNLOCK(chain);
1043 		return (0);
1044 	}
1045 
1046 	/* Allocate new map of the same size */
1047 	map = get_map(chain, 0, 1 /* locked */);
1048 	if (map == NULL) {
1049 		IPFW_UH_WUNLOCK(chain);
1050 		return (ENOMEM);
1051 	}
1052 
1053 	n = 0;
1054 	ndyn = 0;
1055 	ofs = start;
1056 	/* 1. bcopy the initial part of the map */
1057 	if (start > 0)
1058 		bcopy(chain->map, map, start * sizeof(struct ip_fw *));
1059 	/* 2. copy active rules between start and end */
1060 	for (i = start; i < end; i++) {
1061 		rule = chain->map[i];
1062 		if (ipfw_match_range(rule, rt) == 0) {
1063 			map[ofs++] = rule;
1064 			continue;
1065 		}
1066 
1067 		n++;
1068 		if (ipfw_is_dyn_rule(rule) != 0)
1069 			ndyn++;
1070 	}
1071 	/* 3. copy the final part of the map */
1072 	bcopy(chain->map + end, map + ofs,
1073 		(chain->n_rules - end) * sizeof(struct ip_fw *));
1074 	/* 4. recalculate skipto cache */
1075 	update_skipto_cache(chain, map);
1076 	/* 5. swap the maps (under UH_WLOCK + WHLOCK) */
1077 	map = swap_map(chain, map, chain->n_rules - n);
1078 	/* 6. Remove all dynamic states originated by deleted rules */
1079 	if (ndyn > 0)
1080 		ipfw_expire_dyn_states(chain, rt);
1081 	/* 7. now remove the rules deleted from the old map */
1082 	for (i = start; i < end; i++) {
1083 		rule = map[i];
1084 		if (ipfw_match_range(rule, rt) == 0)
1085 			continue;
1086 		chain->static_len -= RULEUSIZE0(rule);
1087 		ipfw_reap_add(chain, &reap, rule);
1088 	}
1089 	IPFW_UH_WUNLOCK(chain);
1090 
1091 	ipfw_reap_rules(reap);
1092 	if (map != NULL)
1093 		free(map, M_IPFW);
1094 	*ndel = n;
1095 	return (0);
1096 }
1097 
1098 static int
1099 move_objects(struct ip_fw_chain *ch, ipfw_range_tlv *rt)
1100 {
1101 	struct opcode_obj_rewrite *rw;
1102 	struct ip_fw *rule;
1103 	ipfw_insn *cmd;
1104 	int cmdlen, i, l, c;
1105 	uint16_t kidx;
1106 
1107 	IPFW_UH_WLOCK_ASSERT(ch);
1108 
1109 	/* Stage 1: count number of references by given rules */
1110 	for (c = 0, i = 0; i < ch->n_rules - 1; i++) {
1111 		rule = ch->map[i];
1112 		if (ipfw_match_range(rule, rt) == 0)
1113 			continue;
1114 		if (rule->set == rt->new_set) /* nothing to do */
1115 			continue;
1116 		/* Search opcodes with named objects */
1117 		for (l = rule->cmd_len, cmdlen = 0, cmd = rule->cmd;
1118 		    l > 0; l -= cmdlen, cmd += cmdlen) {
1119 			cmdlen = F_LEN(cmd);
1120 			rw = find_op_rw(cmd, &kidx, NULL);
1121 			if (rw == NULL || rw->manage_sets == NULL)
1122 				continue;
1123 			/*
1124 			 * When manage_sets() returns non-zero value to
1125 			 * COUNT_ONE command, consider this as an object
1126 			 * doesn't support sets (e.g. disabled with sysctl).
1127 			 * So, skip checks for this object.
1128 			 */
1129 			if (rw->manage_sets(ch, kidx, 1, COUNT_ONE) != 0)
1130 				continue;
1131 			c++;
1132 		}
1133 	}
1134 	if (c == 0) /* No objects found */
1135 		return (0);
1136 	/* Stage 2: verify "ownership" */
1137 	for (c = 0, i = 0; (i < ch->n_rules - 1) && c == 0; i++) {
1138 		rule = ch->map[i];
1139 		if (ipfw_match_range(rule, rt) == 0)
1140 			continue;
1141 		if (rule->set == rt->new_set) /* nothing to do */
1142 			continue;
1143 		/* Search opcodes with named objects */
1144 		for (l = rule->cmd_len, cmdlen = 0, cmd = rule->cmd;
1145 		    l > 0 && c == 0; l -= cmdlen, cmd += cmdlen) {
1146 			cmdlen = F_LEN(cmd);
1147 			rw = find_op_rw(cmd, &kidx, NULL);
1148 			if (rw == NULL || rw->manage_sets == NULL)
1149 				continue;
1150 			/* Test for ownership and conflicting names */
1151 			c = rw->manage_sets(ch, kidx,
1152 			    (uint8_t)rt->new_set, TEST_ONE);
1153 		}
1154 	}
1155 	/* Stage 3: change set and cleanup */
1156 	for (i = 0; i < ch->n_rules - 1; i++) {
1157 		rule = ch->map[i];
1158 		if (ipfw_match_range(rule, rt) == 0)
1159 			continue;
1160 		if (rule->set == rt->new_set) /* nothing to do */
1161 			continue;
1162 		/* Search opcodes with named objects */
1163 		for (l = rule->cmd_len, cmdlen = 0, cmd = rule->cmd;
1164 		    l > 0; l -= cmdlen, cmd += cmdlen) {
1165 			cmdlen = F_LEN(cmd);
1166 			rw = find_op_rw(cmd, &kidx, NULL);
1167 			if (rw == NULL || rw->manage_sets == NULL)
1168 				continue;
1169 			/* cleanup object counter */
1170 			rw->manage_sets(ch, kidx,
1171 			    0 /* reset counter */, COUNT_ONE);
1172 			if (c != 0)
1173 				continue;
1174 			/* change set */
1175 			rw->manage_sets(ch, kidx,
1176 			    (uint8_t)rt->new_set, MOVE_ONE);
1177 		}
1178 	}
1179 	return (c);
1180 }/*
1181  * Changes set of given rule rannge @rt
1182  * with each other.
1183  *
1184  * Returns 0 on success.
1185  */
1186 static int
1187 move_range(struct ip_fw_chain *chain, ipfw_range_tlv *rt)
1188 {
1189 	struct ip_fw *rule;
1190 	int i;
1191 
1192 	IPFW_UH_WLOCK(chain);
1193 
1194 	/*
1195 	 * Move rules with matching paramenerts to a new set.
1196 	 * This one is much more complex. We have to ensure
1197 	 * that all referenced tables (if any) are referenced
1198 	 * by given rule subset only. Otherwise, we can't move
1199 	 * them to new set and have to return error.
1200 	 */
1201 	if ((i = move_objects(chain, rt)) != 0) {
1202 		IPFW_UH_WUNLOCK(chain);
1203 		return (i);
1204 	}
1205 
1206 	/* XXX: We have to do swap holding WLOCK */
1207 	for (i = 0; i < chain->n_rules; i++) {
1208 		rule = chain->map[i];
1209 		if (ipfw_match_range(rule, rt) == 0)
1210 			continue;
1211 		rule->set = rt->new_set;
1212 	}
1213 
1214 	IPFW_UH_WUNLOCK(chain);
1215 
1216 	return (0);
1217 }
1218 
1219 /*
1220  * Clear counters for a specific rule.
1221  * Normally run under IPFW_UH_RLOCK, but these are idempotent ops
1222  * so we only care that rules do not disappear.
1223  */
1224 static void
1225 clear_counters(struct ip_fw *rule, int log_only)
1226 {
1227 	ipfw_insn_log *l = (ipfw_insn_log *)ACTION_PTR(rule);
1228 
1229 	if (log_only == 0)
1230 		IPFW_ZERO_RULE_COUNTER(rule);
1231 	if (l->o.opcode == O_LOG)
1232 		l->log_left = l->max_log;
1233 }
1234 
1235 /*
1236  * Flushes rules counters and/or log values on matching range.
1237  *
1238  * Returns number of items cleared.
1239  */
1240 static int
1241 clear_range(struct ip_fw_chain *chain, ipfw_range_tlv *rt, int log_only)
1242 {
1243 	struct ip_fw *rule;
1244 	int num;
1245 	int i;
1246 
1247 	num = 0;
1248 	rt->flags |= IPFW_RCFLAG_DEFAULT;
1249 
1250 	IPFW_UH_WLOCK(chain);	/* arbitrate writers */
1251 	for (i = 0; i < chain->n_rules; i++) {
1252 		rule = chain->map[i];
1253 		if (ipfw_match_range(rule, rt) == 0)
1254 			continue;
1255 		clear_counters(rule, log_only);
1256 		num++;
1257 	}
1258 	IPFW_UH_WUNLOCK(chain);
1259 
1260 	return (num);
1261 }
1262 
1263 static int
1264 check_range_tlv(ipfw_range_tlv *rt)
1265 {
1266 
1267 	if (rt->head.length != sizeof(*rt))
1268 		return (1);
1269 	if (rt->start_rule > rt->end_rule)
1270 		return (1);
1271 	if (rt->set >= IPFW_MAX_SETS || rt->new_set >= IPFW_MAX_SETS)
1272 		return (1);
1273 
1274 	if ((rt->flags & IPFW_RCFLAG_USER) != rt->flags)
1275 		return (1);
1276 
1277 	return (0);
1278 }
1279 
1280 /*
1281  * Delete rules matching specified parameters
1282  * Data layout (v0)(current):
1283  * Request: [ ipfw_obj_header ipfw_range_tlv ]
1284  * Reply: [ ipfw_obj_header ipfw_range_tlv ]
1285  *
1286  * Saves number of deleted rules in ipfw_range_tlv->new_set.
1287  *
1288  * Returns 0 on success.
1289  */
1290 static int
1291 del_rules(struct ip_fw_chain *chain, ip_fw3_opheader *op3,
1292     struct sockopt_data *sd)
1293 {
1294 	ipfw_range_header *rh;
1295 	int error, ndel;
1296 
1297 	if (sd->valsize != sizeof(*rh))
1298 		return (EINVAL);
1299 
1300 	rh = (ipfw_range_header *)ipfw_get_sopt_space(sd, sd->valsize);
1301 
1302 	if (check_range_tlv(&rh->range) != 0)
1303 		return (EINVAL);
1304 
1305 	ndel = 0;
1306 	if ((error = delete_range(chain, &rh->range, &ndel)) != 0)
1307 		return (error);
1308 
1309 	/* Save number of rules deleted */
1310 	rh->range.new_set = ndel;
1311 	return (0);
1312 }
1313 
1314 /*
1315  * Move rules/sets matching specified parameters
1316  * Data layout (v0)(current):
1317  * Request: [ ipfw_obj_header ipfw_range_tlv ]
1318  *
1319  * Returns 0 on success.
1320  */
1321 static int
1322 move_rules(struct ip_fw_chain *chain, ip_fw3_opheader *op3,
1323     struct sockopt_data *sd)
1324 {
1325 	ipfw_range_header *rh;
1326 
1327 	if (sd->valsize != sizeof(*rh))
1328 		return (EINVAL);
1329 
1330 	rh = (ipfw_range_header *)ipfw_get_sopt_space(sd, sd->valsize);
1331 
1332 	if (check_range_tlv(&rh->range) != 0)
1333 		return (EINVAL);
1334 
1335 	return (move_range(chain, &rh->range));
1336 }
1337 
1338 /*
1339  * Clear rule accounting data matching specified parameters
1340  * Data layout (v0)(current):
1341  * Request: [ ipfw_obj_header ipfw_range_tlv ]
1342  * Reply: [ ipfw_obj_header ipfw_range_tlv ]
1343  *
1344  * Saves number of cleared rules in ipfw_range_tlv->new_set.
1345  *
1346  * Returns 0 on success.
1347  */
1348 static int
1349 clear_rules(struct ip_fw_chain *chain, ip_fw3_opheader *op3,
1350     struct sockopt_data *sd)
1351 {
1352 	ipfw_range_header *rh;
1353 	int log_only, num;
1354 	char *msg;
1355 
1356 	if (sd->valsize != sizeof(*rh))
1357 		return (EINVAL);
1358 
1359 	rh = (ipfw_range_header *)ipfw_get_sopt_space(sd, sd->valsize);
1360 
1361 	if (check_range_tlv(&rh->range) != 0)
1362 		return (EINVAL);
1363 
1364 	log_only = (op3->opcode == IP_FW_XRESETLOG);
1365 
1366 	num = clear_range(chain, &rh->range, log_only);
1367 
1368 	if (rh->range.flags & IPFW_RCFLAG_ALL)
1369 		msg = log_only ? "All logging counts reset" :
1370 		    "Accounting cleared";
1371 	else
1372 		msg = log_only ? "logging count reset" : "cleared";
1373 
1374 	if (V_fw_verbose) {
1375 		int lev = LOG_SECURITY | LOG_NOTICE;
1376 		log(lev, "ipfw: %s.\n", msg);
1377 	}
1378 
1379 	/* Save number of rules cleared */
1380 	rh->range.new_set = num;
1381 	return (0);
1382 }
1383 
1384 static void
1385 enable_sets(struct ip_fw_chain *chain, ipfw_range_tlv *rt)
1386 {
1387 	uint32_t v_set;
1388 
1389 	IPFW_UH_WLOCK_ASSERT(chain);
1390 
1391 	/* Change enabled/disabled sets mask */
1392 	v_set = (V_set_disable | rt->set) & ~rt->new_set;
1393 	v_set &= ~(1 << RESVD_SET); /* set RESVD_SET always enabled */
1394 	IPFW_WLOCK(chain);
1395 	V_set_disable = v_set;
1396 	IPFW_WUNLOCK(chain);
1397 }
1398 
1399 static int
1400 swap_sets(struct ip_fw_chain *chain, ipfw_range_tlv *rt, int mv)
1401 {
1402 	struct opcode_obj_rewrite *rw;
1403 	struct ip_fw *rule;
1404 	int i;
1405 
1406 	IPFW_UH_WLOCK_ASSERT(chain);
1407 
1408 	if (rt->set == rt->new_set) /* nothing to do */
1409 		return (0);
1410 
1411 	if (mv != 0) {
1412 		/*
1413 		 * Berfore moving the rules we need to check that
1414 		 * there aren't any conflicting named objects.
1415 		 */
1416 		for (rw = ctl3_rewriters;
1417 		    rw < ctl3_rewriters + ctl3_rsize; rw++) {
1418 			if (rw->manage_sets == NULL)
1419 				continue;
1420 			i = rw->manage_sets(chain, (uint8_t)rt->set,
1421 			    (uint8_t)rt->new_set, TEST_ALL);
1422 			if (i != 0)
1423 				return (EEXIST);
1424 		}
1425 	}
1426 	/* Swap or move two sets */
1427 	for (i = 0; i < chain->n_rules - 1; i++) {
1428 		rule = chain->map[i];
1429 		if (rule->set == (uint8_t)rt->set)
1430 			rule->set = (uint8_t)rt->new_set;
1431 		else if (rule->set == (uint8_t)rt->new_set && mv == 0)
1432 			rule->set = (uint8_t)rt->set;
1433 	}
1434 	for (rw = ctl3_rewriters; rw < ctl3_rewriters + ctl3_rsize; rw++) {
1435 		if (rw->manage_sets == NULL)
1436 			continue;
1437 		rw->manage_sets(chain, (uint8_t)rt->set,
1438 		    (uint8_t)rt->new_set, mv != 0 ? MOVE_ALL: SWAP_ALL);
1439 	}
1440 	return (0);
1441 }
1442 
1443 /*
1444  * Swaps or moves set
1445  * Data layout (v0)(current):
1446  * Request: [ ipfw_obj_header ipfw_range_tlv ]
1447  *
1448  * Returns 0 on success.
1449  */
1450 static int
1451 manage_sets(struct ip_fw_chain *chain, ip_fw3_opheader *op3,
1452     struct sockopt_data *sd)
1453 {
1454 	ipfw_range_header *rh;
1455 	int ret;
1456 
1457 	if (sd->valsize != sizeof(*rh))
1458 		return (EINVAL);
1459 
1460 	rh = (ipfw_range_header *)ipfw_get_sopt_space(sd, sd->valsize);
1461 
1462 	if (rh->range.head.length != sizeof(ipfw_range_tlv))
1463 		return (1);
1464 	/* enable_sets() expects bitmasks. */
1465 	if (op3->opcode != IP_FW_SET_ENABLE &&
1466 	    (rh->range.set >= IPFW_MAX_SETS ||
1467 	    rh->range.new_set >= IPFW_MAX_SETS))
1468 		return (EINVAL);
1469 
1470 	ret = 0;
1471 	IPFW_UH_WLOCK(chain);
1472 	switch (op3->opcode) {
1473 	case IP_FW_SET_SWAP:
1474 	case IP_FW_SET_MOVE:
1475 		ret = swap_sets(chain, &rh->range,
1476 		    op3->opcode == IP_FW_SET_MOVE);
1477 		break;
1478 	case IP_FW_SET_ENABLE:
1479 		enable_sets(chain, &rh->range);
1480 		break;
1481 	}
1482 	IPFW_UH_WUNLOCK(chain);
1483 
1484 	return (ret);
1485 }
1486 
1487 /**
1488  * Remove all rules with given number, or do set manipulation.
1489  * Assumes chain != NULL && *chain != NULL.
1490  *
1491  * The argument is an uint32_t. The low 16 bit are the rule or set number;
1492  * the next 8 bits are the new set; the top 8 bits indicate the command:
1493  *
1494  *	0	delete rules numbered "rulenum"
1495  *	1	delete rules in set "rulenum"
1496  *	2	move rules "rulenum" to set "new_set"
1497  *	3	move rules from set "rulenum" to set "new_set"
1498  *	4	swap sets "rulenum" and "new_set"
1499  *	5	delete rules "rulenum" and set "new_set"
1500  */
1501 static int
1502 del_entry(struct ip_fw_chain *chain, uint32_t arg)
1503 {
1504 	uint32_t num;	/* rule number or old_set */
1505 	uint8_t cmd, new_set;
1506 	int do_del, ndel;
1507 	int error = 0;
1508 	ipfw_range_tlv rt;
1509 
1510 	num = arg & 0xffff;
1511 	cmd = (arg >> 24) & 0xff;
1512 	new_set = (arg >> 16) & 0xff;
1513 
1514 	if (cmd > 5 || new_set > RESVD_SET)
1515 		return EINVAL;
1516 	if (cmd == 0 || cmd == 2 || cmd == 5) {
1517 		if (num >= IPFW_DEFAULT_RULE)
1518 			return EINVAL;
1519 	} else {
1520 		if (num > RESVD_SET)	/* old_set */
1521 			return EINVAL;
1522 	}
1523 
1524 	/* Convert old requests into new representation */
1525 	memset(&rt, 0, sizeof(rt));
1526 	rt.start_rule = num;
1527 	rt.end_rule = num;
1528 	rt.set = num;
1529 	rt.new_set = new_set;
1530 	do_del = 0;
1531 
1532 	switch (cmd) {
1533 	case 0: /* delete rules numbered "rulenum" */
1534 		if (num == 0)
1535 			rt.flags |= IPFW_RCFLAG_ALL;
1536 		else
1537 			rt.flags |= IPFW_RCFLAG_RANGE;
1538 		do_del = 1;
1539 		break;
1540 	case 1: /* delete rules in set "rulenum" */
1541 		rt.flags |= IPFW_RCFLAG_SET;
1542 		do_del = 1;
1543 		break;
1544 	case 5: /* delete rules "rulenum" and set "new_set" */
1545 		rt.flags |= IPFW_RCFLAG_RANGE | IPFW_RCFLAG_SET;
1546 		rt.set = new_set;
1547 		rt.new_set = 0;
1548 		do_del = 1;
1549 		break;
1550 	case 2: /* move rules "rulenum" to set "new_set" */
1551 		rt.flags |= IPFW_RCFLAG_RANGE;
1552 		break;
1553 	case 3: /* move rules from set "rulenum" to set "new_set" */
1554 		IPFW_UH_WLOCK(chain);
1555 		error = swap_sets(chain, &rt, 1);
1556 		IPFW_UH_WUNLOCK(chain);
1557 		return (error);
1558 	case 4: /* swap sets "rulenum" and "new_set" */
1559 		IPFW_UH_WLOCK(chain);
1560 		error = swap_sets(chain, &rt, 0);
1561 		IPFW_UH_WUNLOCK(chain);
1562 		return (error);
1563 	default:
1564 		return (ENOTSUP);
1565 	}
1566 
1567 	if (do_del != 0) {
1568 		if ((error = delete_range(chain, &rt, &ndel)) != 0)
1569 			return (error);
1570 
1571 		if (ndel == 0 && (cmd != 1 && num != 0))
1572 			return (EINVAL);
1573 
1574 		return (0);
1575 	}
1576 
1577 	return (move_range(chain, &rt));
1578 }
1579 
1580 /**
1581  * Reset some or all counters on firewall rules.
1582  * The argument `arg' is an u_int32_t. The low 16 bit are the rule number,
1583  * the next 8 bits are the set number, the top 8 bits are the command:
1584  *	0	work with rules from all set's;
1585  *	1	work with rules only from specified set.
1586  * Specified rule number is zero if we want to clear all entries.
1587  * log_only is 1 if we only want to reset logs, zero otherwise.
1588  */
1589 static int
1590 zero_entry(struct ip_fw_chain *chain, u_int32_t arg, int log_only)
1591 {
1592 	struct ip_fw *rule;
1593 	char *msg;
1594 	int i;
1595 
1596 	uint16_t rulenum = arg & 0xffff;
1597 	uint8_t set = (arg >> 16) & 0xff;
1598 	uint8_t cmd = (arg >> 24) & 0xff;
1599 
1600 	if (cmd > 1)
1601 		return (EINVAL);
1602 	if (cmd == 1 && set > RESVD_SET)
1603 		return (EINVAL);
1604 
1605 	IPFW_UH_RLOCK(chain);
1606 	if (rulenum == 0) {
1607 		V_norule_counter = 0;
1608 		for (i = 0; i < chain->n_rules; i++) {
1609 			rule = chain->map[i];
1610 			/* Skip rules not in our set. */
1611 			if (cmd == 1 && rule->set != set)
1612 				continue;
1613 			clear_counters(rule, log_only);
1614 		}
1615 		msg = log_only ? "All logging counts reset" :
1616 		    "Accounting cleared";
1617 	} else {
1618 		int cleared = 0;
1619 		for (i = 0; i < chain->n_rules; i++) {
1620 			rule = chain->map[i];
1621 			if (rule->rulenum == rulenum) {
1622 				if (cmd == 0 || rule->set == set)
1623 					clear_counters(rule, log_only);
1624 				cleared = 1;
1625 			}
1626 			if (rule->rulenum > rulenum)
1627 				break;
1628 		}
1629 		if (!cleared) {	/* we did not find any matching rules */
1630 			IPFW_UH_RUNLOCK(chain);
1631 			return (EINVAL);
1632 		}
1633 		msg = log_only ? "logging count reset" : "cleared";
1634 	}
1635 	IPFW_UH_RUNLOCK(chain);
1636 
1637 	if (V_fw_verbose) {
1638 		int lev = LOG_SECURITY | LOG_NOTICE;
1639 
1640 		if (rulenum)
1641 			log(lev, "ipfw: Entry %d %s.\n", rulenum, msg);
1642 		else
1643 			log(lev, "ipfw: %s.\n", msg);
1644 	}
1645 	return (0);
1646 }
1647 
1648 
1649 /*
1650  * Check rule head in FreeBSD11 format
1651  *
1652  */
1653 static int
1654 check_ipfw_rule1(struct ip_fw_rule *rule, int size,
1655     struct rule_check_info *ci)
1656 {
1657 	int l;
1658 
1659 	if (size < sizeof(*rule)) {
1660 		printf("ipfw: rule too short\n");
1661 		return (EINVAL);
1662 	}
1663 
1664 	/* Check for valid cmd_len */
1665 	l = roundup2(RULESIZE(rule), sizeof(uint64_t));
1666 	if (l != size) {
1667 		printf("ipfw: size mismatch (have %d want %d)\n", size, l);
1668 		return (EINVAL);
1669 	}
1670 	if (rule->act_ofs >= rule->cmd_len) {
1671 		printf("ipfw: bogus action offset (%u > %u)\n",
1672 		    rule->act_ofs, rule->cmd_len - 1);
1673 		return (EINVAL);
1674 	}
1675 
1676 	if (rule->rulenum > IPFW_DEFAULT_RULE - 1)
1677 		return (EINVAL);
1678 
1679 	return (check_ipfw_rule_body(rule->cmd, rule->cmd_len, ci));
1680 }
1681 
1682 /*
1683  * Check rule head in FreeBSD8 format
1684  *
1685  */
1686 static int
1687 check_ipfw_rule0(struct ip_fw_rule0 *rule, int size,
1688     struct rule_check_info *ci)
1689 {
1690 	int l;
1691 
1692 	if (size < sizeof(*rule)) {
1693 		printf("ipfw: rule too short\n");
1694 		return (EINVAL);
1695 	}
1696 
1697 	/* Check for valid cmd_len */
1698 	l = sizeof(*rule) + rule->cmd_len * 4 - 4;
1699 	if (l != size) {
1700 		printf("ipfw: size mismatch (have %d want %d)\n", size, l);
1701 		return (EINVAL);
1702 	}
1703 	if (rule->act_ofs >= rule->cmd_len) {
1704 		printf("ipfw: bogus action offset (%u > %u)\n",
1705 		    rule->act_ofs, rule->cmd_len - 1);
1706 		return (EINVAL);
1707 	}
1708 
1709 	if (rule->rulenum > IPFW_DEFAULT_RULE - 1)
1710 		return (EINVAL);
1711 
1712 	return (check_ipfw_rule_body(rule->cmd, rule->cmd_len, ci));
1713 }
1714 
1715 static int
1716 check_ipfw_rule_body(ipfw_insn *cmd, int cmd_len, struct rule_check_info *ci)
1717 {
1718 	int cmdlen, l;
1719 	int have_action;
1720 
1721 	have_action = 0;
1722 
1723 	/*
1724 	 * Now go for the individual checks. Very simple ones, basically only
1725 	 * instruction sizes.
1726 	 */
1727 	for (l = cmd_len; l > 0 ; l -= cmdlen, cmd += cmdlen) {
1728 		cmdlen = F_LEN(cmd);
1729 		if (cmdlen > l) {
1730 			printf("ipfw: opcode %d size truncated\n",
1731 			    cmd->opcode);
1732 			return EINVAL;
1733 		}
1734 		switch (cmd->opcode) {
1735 		case O_PROBE_STATE:
1736 		case O_KEEP_STATE:
1737 			if (cmdlen != F_INSN_SIZE(ipfw_insn))
1738 				goto bad_size;
1739 			ci->object_opcodes++;
1740 			break;
1741 		case O_PROTO:
1742 		case O_IP_SRC_ME:
1743 		case O_IP_DST_ME:
1744 		case O_LAYER2:
1745 		case O_IN:
1746 		case O_FRAG:
1747 		case O_DIVERTED:
1748 		case O_IPOPT:
1749 		case O_IPTOS:
1750 		case O_IPPRECEDENCE:
1751 		case O_IPVER:
1752 		case O_SOCKARG:
1753 		case O_TCPFLAGS:
1754 		case O_TCPOPTS:
1755 		case O_ESTAB:
1756 		case O_VERREVPATH:
1757 		case O_VERSRCREACH:
1758 		case O_ANTISPOOF:
1759 		case O_IPSEC:
1760 #ifdef INET6
1761 		case O_IP6_SRC_ME:
1762 		case O_IP6_DST_ME:
1763 		case O_EXT_HDR:
1764 		case O_IP6:
1765 #endif
1766 		case O_IP4:
1767 		case O_TAG:
1768 		case O_SKIP_ACTION:
1769 			if (cmdlen != F_INSN_SIZE(ipfw_insn))
1770 				goto bad_size;
1771 			break;
1772 
1773 		case O_EXTERNAL_ACTION:
1774 			if (cmd->arg1 == 0 ||
1775 			    cmdlen != F_INSN_SIZE(ipfw_insn)) {
1776 				printf("ipfw: invalid external "
1777 				    "action opcode\n");
1778 				return (EINVAL);
1779 			}
1780 			ci->object_opcodes++;
1781 			/*
1782 			 * Do we have O_EXTERNAL_INSTANCE or O_EXTERNAL_DATA
1783 			 * opcode?
1784 			 */
1785 			if (l != cmdlen) {
1786 				l -= cmdlen;
1787 				cmd += cmdlen;
1788 				cmdlen = F_LEN(cmd);
1789 				if (cmd->opcode == O_EXTERNAL_DATA)
1790 					goto check_action;
1791 				if (cmd->opcode != O_EXTERNAL_INSTANCE) {
1792 					printf("ipfw: invalid opcode "
1793 					    "next to external action %u\n",
1794 					    cmd->opcode);
1795 					return (EINVAL);
1796 				}
1797 				if (cmd->arg1 == 0 ||
1798 				    cmdlen != F_INSN_SIZE(ipfw_insn)) {
1799 					printf("ipfw: invalid external "
1800 					    "action instance opcode\n");
1801 					return (EINVAL);
1802 				}
1803 				ci->object_opcodes++;
1804 			}
1805 			goto check_action;
1806 
1807 		case O_FIB:
1808 			if (cmdlen != F_INSN_SIZE(ipfw_insn))
1809 				goto bad_size;
1810 			if (cmd->arg1 >= rt_numfibs) {
1811 				printf("ipfw: invalid fib number %d\n",
1812 					cmd->arg1);
1813 				return EINVAL;
1814 			}
1815 			break;
1816 
1817 		case O_SETFIB:
1818 			if (cmdlen != F_INSN_SIZE(ipfw_insn))
1819 				goto bad_size;
1820 			if ((cmd->arg1 != IP_FW_TARG) &&
1821 			    ((cmd->arg1 & 0x7FFF) >= rt_numfibs)) {
1822 				printf("ipfw: invalid fib number %d\n",
1823 					cmd->arg1 & 0x7FFF);
1824 				return EINVAL;
1825 			}
1826 			goto check_action;
1827 
1828 		case O_UID:
1829 		case O_GID:
1830 		case O_JAIL:
1831 		case O_IP_SRC:
1832 		case O_IP_DST:
1833 		case O_TCPSEQ:
1834 		case O_TCPACK:
1835 		case O_PROB:
1836 		case O_ICMPTYPE:
1837 			if (cmdlen != F_INSN_SIZE(ipfw_insn_u32))
1838 				goto bad_size;
1839 			break;
1840 
1841 		case O_LIMIT:
1842 			if (cmdlen != F_INSN_SIZE(ipfw_insn_limit))
1843 				goto bad_size;
1844 			ci->object_opcodes++;
1845 			break;
1846 
1847 		case O_LOG:
1848 			if (cmdlen != F_INSN_SIZE(ipfw_insn_log))
1849 				goto bad_size;
1850 
1851 			((ipfw_insn_log *)cmd)->log_left =
1852 			    ((ipfw_insn_log *)cmd)->max_log;
1853 
1854 			break;
1855 
1856 		case O_IP_SRC_MASK:
1857 		case O_IP_DST_MASK:
1858 			/* only odd command lengths */
1859 			if ((cmdlen & 1) == 0)
1860 				goto bad_size;
1861 			break;
1862 
1863 		case O_IP_SRC_SET:
1864 		case O_IP_DST_SET:
1865 			if (cmd->arg1 == 0 || cmd->arg1 > 256) {
1866 				printf("ipfw: invalid set size %d\n",
1867 					cmd->arg1);
1868 				return EINVAL;
1869 			}
1870 			if (cmdlen != F_INSN_SIZE(ipfw_insn_u32) +
1871 			    (cmd->arg1+31)/32 )
1872 				goto bad_size;
1873 			break;
1874 
1875 		case O_IP_SRC_LOOKUP:
1876 			if (cmdlen > F_INSN_SIZE(ipfw_insn_u32))
1877 				goto bad_size;
1878 		case O_IP_DST_LOOKUP:
1879 			if (cmd->arg1 >= V_fw_tables_max) {
1880 				printf("ipfw: invalid table number %d\n",
1881 				    cmd->arg1);
1882 				return (EINVAL);
1883 			}
1884 			if (cmdlen != F_INSN_SIZE(ipfw_insn) &&
1885 			    cmdlen != F_INSN_SIZE(ipfw_insn_u32) + 1 &&
1886 			    cmdlen != F_INSN_SIZE(ipfw_insn_u32))
1887 				goto bad_size;
1888 			ci->object_opcodes++;
1889 			break;
1890 		case O_IP_FLOW_LOOKUP:
1891 			if (cmd->arg1 >= V_fw_tables_max) {
1892 				printf("ipfw: invalid table number %d\n",
1893 				    cmd->arg1);
1894 				return (EINVAL);
1895 			}
1896 			if (cmdlen != F_INSN_SIZE(ipfw_insn) &&
1897 			    cmdlen != F_INSN_SIZE(ipfw_insn_u32))
1898 				goto bad_size;
1899 			ci->object_opcodes++;
1900 			break;
1901 		case O_MACADDR2:
1902 			if (cmdlen != F_INSN_SIZE(ipfw_insn_mac))
1903 				goto bad_size;
1904 			break;
1905 
1906 		case O_NOP:
1907 		case O_IPID:
1908 		case O_IPTTL:
1909 		case O_IPLEN:
1910 		case O_TCPDATALEN:
1911 		case O_TCPWIN:
1912 		case O_TAGGED:
1913 			if (cmdlen < 1 || cmdlen > 31)
1914 				goto bad_size;
1915 			break;
1916 
1917 		case O_DSCP:
1918 			if (cmdlen != F_INSN_SIZE(ipfw_insn_u32) + 1)
1919 				goto bad_size;
1920 			break;
1921 
1922 		case O_MAC_TYPE:
1923 		case O_IP_SRCPORT:
1924 		case O_IP_DSTPORT: /* XXX artificial limit, 30 port pairs */
1925 			if (cmdlen < 2 || cmdlen > 31)
1926 				goto bad_size;
1927 			break;
1928 
1929 		case O_RECV:
1930 		case O_XMIT:
1931 		case O_VIA:
1932 			if (cmdlen != F_INSN_SIZE(ipfw_insn_if))
1933 				goto bad_size;
1934 			ci->object_opcodes++;
1935 			break;
1936 
1937 		case O_ALTQ:
1938 			if (cmdlen != F_INSN_SIZE(ipfw_insn_altq))
1939 				goto bad_size;
1940 			break;
1941 
1942 		case O_PIPE:
1943 		case O_QUEUE:
1944 			if (cmdlen != F_INSN_SIZE(ipfw_insn))
1945 				goto bad_size;
1946 			goto check_action;
1947 
1948 		case O_FORWARD_IP:
1949 			if (cmdlen != F_INSN_SIZE(ipfw_insn_sa))
1950 				goto bad_size;
1951 			goto check_action;
1952 #ifdef INET6
1953 		case O_FORWARD_IP6:
1954 			if (cmdlen != F_INSN_SIZE(ipfw_insn_sa6))
1955 				goto bad_size;
1956 			goto check_action;
1957 #endif /* INET6 */
1958 
1959 		case O_DIVERT:
1960 		case O_TEE:
1961 			if (ip_divert_ptr == NULL)
1962 				return EINVAL;
1963 			else
1964 				goto check_size;
1965 		case O_NETGRAPH:
1966 		case O_NGTEE:
1967 			if (ng_ipfw_input_p == NULL)
1968 				return EINVAL;
1969 			else
1970 				goto check_size;
1971 		case O_NAT:
1972 			if (!IPFW_NAT_LOADED)
1973 				return EINVAL;
1974 			if (cmdlen != F_INSN_SIZE(ipfw_insn_nat))
1975  				goto bad_size;
1976  			goto check_action;
1977 		case O_CHECK_STATE:
1978 			ci->object_opcodes++;
1979 			/* FALLTHROUGH */
1980 		case O_FORWARD_MAC: /* XXX not implemented yet */
1981 		case O_COUNT:
1982 		case O_ACCEPT:
1983 		case O_DENY:
1984 		case O_REJECT:
1985 		case O_SETDSCP:
1986 #ifdef INET6
1987 		case O_UNREACH6:
1988 #endif
1989 		case O_SKIPTO:
1990 		case O_REASS:
1991 		case O_CALLRETURN:
1992 check_size:
1993 			if (cmdlen != F_INSN_SIZE(ipfw_insn))
1994 				goto bad_size;
1995 check_action:
1996 			if (have_action) {
1997 				printf("ipfw: opcode %d, multiple actions"
1998 					" not allowed\n",
1999 					cmd->opcode);
2000 				return (EINVAL);
2001 			}
2002 			have_action = 1;
2003 			if (l != cmdlen) {
2004 				printf("ipfw: opcode %d, action must be"
2005 					" last opcode\n",
2006 					cmd->opcode);
2007 				return (EINVAL);
2008 			}
2009 			break;
2010 #ifdef INET6
2011 		case O_IP6_SRC:
2012 		case O_IP6_DST:
2013 			if (cmdlen != F_INSN_SIZE(struct in6_addr) +
2014 			    F_INSN_SIZE(ipfw_insn))
2015 				goto bad_size;
2016 			break;
2017 
2018 		case O_FLOW6ID:
2019 			if (cmdlen != F_INSN_SIZE(ipfw_insn_u32) +
2020 			    ((ipfw_insn_u32 *)cmd)->o.arg1)
2021 				goto bad_size;
2022 			break;
2023 
2024 		case O_IP6_SRC_MASK:
2025 		case O_IP6_DST_MASK:
2026 			if ( !(cmdlen & 1) || cmdlen > 127)
2027 				goto bad_size;
2028 			break;
2029 		case O_ICMP6TYPE:
2030 			if( cmdlen != F_INSN_SIZE( ipfw_insn_icmp6 ) )
2031 				goto bad_size;
2032 			break;
2033 #endif
2034 
2035 		default:
2036 			switch (cmd->opcode) {
2037 #ifndef INET6
2038 			case O_IP6_SRC_ME:
2039 			case O_IP6_DST_ME:
2040 			case O_EXT_HDR:
2041 			case O_IP6:
2042 			case O_UNREACH6:
2043 			case O_IP6_SRC:
2044 			case O_IP6_DST:
2045 			case O_FLOW6ID:
2046 			case O_IP6_SRC_MASK:
2047 			case O_IP6_DST_MASK:
2048 			case O_ICMP6TYPE:
2049 				printf("ipfw: no IPv6 support in kernel\n");
2050 				return (EPROTONOSUPPORT);
2051 #endif
2052 			default:
2053 				printf("ipfw: opcode %d, unknown opcode\n",
2054 					cmd->opcode);
2055 				return (EINVAL);
2056 			}
2057 		}
2058 	}
2059 	if (have_action == 0) {
2060 		printf("ipfw: missing action\n");
2061 		return (EINVAL);
2062 	}
2063 	return 0;
2064 
2065 bad_size:
2066 	printf("ipfw: opcode %d size %d wrong\n",
2067 		cmd->opcode, cmdlen);
2068 	return (EINVAL);
2069 }
2070 
2071 
2072 /*
2073  * Translation of requests for compatibility with FreeBSD 7.2/8.
2074  * a static variable tells us if we have an old client from userland,
2075  * and if necessary we translate requests and responses between the
2076  * two formats.
2077  */
2078 static int is7 = 0;
2079 
2080 struct ip_fw7 {
2081 	struct ip_fw7	*next;		/* linked list of rules     */
2082 	struct ip_fw7	*next_rule;	/* ptr to next [skipto] rule    */
2083 	/* 'next_rule' is used to pass up 'set_disable' status      */
2084 
2085 	uint16_t	act_ofs;	/* offset of action in 32-bit units */
2086 	uint16_t	cmd_len;	/* # of 32-bit words in cmd */
2087 	uint16_t	rulenum;	/* rule number          */
2088 	uint8_t		set;		/* rule set (0..31)     */
2089 	// #define RESVD_SET   31  /* set for default and persistent rules */
2090 	uint8_t		_pad;		/* padding          */
2091 	// uint32_t        id;             /* rule id, only in v.8 */
2092 	/* These fields are present in all rules.           */
2093 	uint64_t	pcnt;		/* Packet counter       */
2094 	uint64_t	bcnt;		/* Byte counter         */
2095 	uint32_t	timestamp;	/* tv_sec of last match     */
2096 
2097 	ipfw_insn	cmd[1];		/* storage for commands     */
2098 };
2099 
2100 static int convert_rule_to_7(struct ip_fw_rule0 *rule);
2101 static int convert_rule_to_8(struct ip_fw_rule0 *rule);
2102 
2103 #ifndef RULESIZE7
2104 #define RULESIZE7(rule)  (sizeof(struct ip_fw7) + \
2105 	((struct ip_fw7 *)(rule))->cmd_len * 4 - 4)
2106 #endif
2107 
2108 
2109 /*
2110  * Copy the static and dynamic rules to the supplied buffer
2111  * and return the amount of space actually used.
2112  * Must be run under IPFW_UH_RLOCK
2113  */
2114 static size_t
2115 ipfw_getrules(struct ip_fw_chain *chain, void *buf, size_t space)
2116 {
2117 	char *bp = buf;
2118 	char *ep = bp + space;
2119 	struct ip_fw *rule;
2120 	struct ip_fw_rule0 *dst;
2121 	struct timeval boottime;
2122 	int error, i, l, warnflag;
2123 	time_t	boot_seconds;
2124 
2125 	warnflag = 0;
2126 
2127 	getboottime(&boottime);
2128         boot_seconds = boottime.tv_sec;
2129 	for (i = 0; i < chain->n_rules; i++) {
2130 		rule = chain->map[i];
2131 
2132 		if (is7) {
2133 		    /* Convert rule to FreeBSd 7.2 format */
2134 		    l = RULESIZE7(rule);
2135 		    if (bp + l + sizeof(uint32_t) <= ep) {
2136 			bcopy(rule, bp, l + sizeof(uint32_t));
2137 			error = set_legacy_obj_kidx(chain,
2138 			    (struct ip_fw_rule0 *)bp);
2139 			if (error != 0)
2140 				return (0);
2141 			error = convert_rule_to_7((struct ip_fw_rule0 *) bp);
2142 			if (error)
2143 				return 0; /*XXX correct? */
2144 			/*
2145 			 * XXX HACK. Store the disable mask in the "next"
2146 			 * pointer in a wild attempt to keep the ABI the same.
2147 			 * Why do we do this on EVERY rule?
2148 			 */
2149 			bcopy(&V_set_disable,
2150 				&(((struct ip_fw7 *)bp)->next_rule),
2151 				sizeof(V_set_disable));
2152 			if (((struct ip_fw7 *)bp)->timestamp)
2153 			    ((struct ip_fw7 *)bp)->timestamp += boot_seconds;
2154 			bp += l;
2155 		    }
2156 		    continue; /* go to next rule */
2157 		}
2158 
2159 		l = RULEUSIZE0(rule);
2160 		if (bp + l > ep) { /* should not happen */
2161 			printf("overflow dumping static rules\n");
2162 			break;
2163 		}
2164 		dst = (struct ip_fw_rule0 *)bp;
2165 		export_rule0(rule, dst, l);
2166 		error = set_legacy_obj_kidx(chain, dst);
2167 
2168 		/*
2169 		 * XXX HACK. Store the disable mask in the "next"
2170 		 * pointer in a wild attempt to keep the ABI the same.
2171 		 * Why do we do this on EVERY rule?
2172 		 *
2173 		 * XXX: "ipfw set show" (ab)uses IP_FW_GET to read disabled mask
2174 		 * so we need to fail _after_ saving at least one mask.
2175 		 */
2176 		bcopy(&V_set_disable, &dst->next_rule, sizeof(V_set_disable));
2177 		if (dst->timestamp)
2178 			dst->timestamp += boot_seconds;
2179 		bp += l;
2180 
2181 		if (error != 0) {
2182 			if (error == 2) {
2183 				/* Non-fatal table rewrite error. */
2184 				warnflag = 1;
2185 				continue;
2186 			}
2187 			printf("Stop on rule %d. Fail to convert table\n",
2188 			    rule->rulenum);
2189 			break;
2190 		}
2191 	}
2192 	if (warnflag != 0)
2193 		printf("ipfw: process %s is using legacy interfaces,"
2194 		    " consider rebuilding\n", "");
2195 	ipfw_get_dynamic(chain, &bp, ep); /* protected by the dynamic lock */
2196 	return (bp - (char *)buf);
2197 }
2198 
2199 
2200 struct dump_args {
2201 	uint32_t	b;	/* start rule */
2202 	uint32_t	e;	/* end rule */
2203 	uint32_t	rcount;	/* number of rules */
2204 	uint32_t	rsize;	/* rules size */
2205 	uint32_t	tcount;	/* number of tables */
2206 	int		rcounters;	/* counters */
2207 	uint32_t	*bmask;	/* index bitmask of used named objects */
2208 };
2209 
2210 void
2211 ipfw_export_obj_ntlv(struct named_object *no, ipfw_obj_ntlv *ntlv)
2212 {
2213 
2214 	ntlv->head.type = no->etlv;
2215 	ntlv->head.length = sizeof(*ntlv);
2216 	ntlv->idx = no->kidx;
2217 	strlcpy(ntlv->name, no->name, sizeof(ntlv->name));
2218 }
2219 
2220 /*
2221  * Export named object info in instance @ni, identified by @kidx
2222  * to ipfw_obj_ntlv. TLV is allocated from @sd space.
2223  *
2224  * Returns 0 on success.
2225  */
2226 static int
2227 export_objhash_ntlv(struct namedobj_instance *ni, uint16_t kidx,
2228     struct sockopt_data *sd)
2229 {
2230 	struct named_object *no;
2231 	ipfw_obj_ntlv *ntlv;
2232 
2233 	no = ipfw_objhash_lookup_kidx(ni, kidx);
2234 	KASSERT(no != NULL, ("invalid object kernel index passed"));
2235 
2236 	ntlv = (ipfw_obj_ntlv *)ipfw_get_sopt_space(sd, sizeof(*ntlv));
2237 	if (ntlv == NULL)
2238 		return (ENOMEM);
2239 
2240 	ipfw_export_obj_ntlv(no, ntlv);
2241 	return (0);
2242 }
2243 
2244 static int
2245 export_named_objects(struct namedobj_instance *ni, struct dump_args *da,
2246     struct sockopt_data *sd)
2247 {
2248 	int error, i;
2249 
2250 	for (i = 0; i < IPFW_TABLES_MAX && da->tcount > 0; i++) {
2251 		if ((da->bmask[i / 32] & (1 << (i % 32))) == 0)
2252 			continue;
2253 		if ((error = export_objhash_ntlv(ni, i, sd)) != 0)
2254 			return (error);
2255 		da->tcount--;
2256 	}
2257 	return (0);
2258 }
2259 
2260 static int
2261 dump_named_objects(struct ip_fw_chain *ch, struct dump_args *da,
2262     struct sockopt_data *sd)
2263 {
2264 	ipfw_obj_ctlv *ctlv;
2265 	int error;
2266 
2267 	MPASS(da->tcount > 0);
2268 	/* Header first */
2269 	ctlv = (ipfw_obj_ctlv *)ipfw_get_sopt_space(sd, sizeof(*ctlv));
2270 	if (ctlv == NULL)
2271 		return (ENOMEM);
2272 	ctlv->head.type = IPFW_TLV_TBLNAME_LIST;
2273 	ctlv->head.length = da->tcount * sizeof(ipfw_obj_ntlv) +
2274 	    sizeof(*ctlv);
2275 	ctlv->count = da->tcount;
2276 	ctlv->objsize = sizeof(ipfw_obj_ntlv);
2277 
2278 	/* Dump table names first (if any) */
2279 	error = export_named_objects(ipfw_get_table_objhash(ch), da, sd);
2280 	if (error != 0)
2281 		return (error);
2282 	/* Then dump another named objects */
2283 	da->bmask += IPFW_TABLES_MAX / 32;
2284 	return (export_named_objects(CHAIN_TO_SRV(ch), da, sd));
2285 }
2286 
2287 /*
2288  * Dumps static rules with table TLVs in buffer @sd.
2289  *
2290  * Returns 0 on success.
2291  */
2292 static int
2293 dump_static_rules(struct ip_fw_chain *chain, struct dump_args *da,
2294     struct sockopt_data *sd)
2295 {
2296 	ipfw_obj_ctlv *ctlv;
2297 	struct ip_fw *krule;
2298 	caddr_t dst;
2299 	int i, l;
2300 
2301 	/* Dump rules */
2302 	ctlv = (ipfw_obj_ctlv *)ipfw_get_sopt_space(sd, sizeof(*ctlv));
2303 	if (ctlv == NULL)
2304 		return (ENOMEM);
2305 	ctlv->head.type = IPFW_TLV_RULE_LIST;
2306 	ctlv->head.length = da->rsize + sizeof(*ctlv);
2307 	ctlv->count = da->rcount;
2308 
2309 	for (i = da->b; i < da->e; i++) {
2310 		krule = chain->map[i];
2311 
2312 		l = RULEUSIZE1(krule) + sizeof(ipfw_obj_tlv);
2313 		if (da->rcounters != 0)
2314 			l += sizeof(struct ip_fw_bcounter);
2315 		dst = (caddr_t)ipfw_get_sopt_space(sd, l);
2316 		if (dst == NULL)
2317 			return (ENOMEM);
2318 
2319 		export_rule1(krule, dst, l, da->rcounters);
2320 	}
2321 
2322 	return (0);
2323 }
2324 
2325 int
2326 ipfw_mark_object_kidx(uint32_t *bmask, uint16_t etlv, uint16_t kidx)
2327 {
2328 	uint32_t bidx;
2329 
2330 	/*
2331 	 * Maintain separate bitmasks for table and non-table objects.
2332 	 */
2333 	bidx = (etlv == IPFW_TLV_TBL_NAME) ? 0: IPFW_TABLES_MAX / 32;
2334 	bidx += kidx / 32;
2335 	if ((bmask[bidx] & (1 << (kidx % 32))) != 0)
2336 		return (0);
2337 
2338 	bmask[bidx] |= 1 << (kidx % 32);
2339 	return (1);
2340 }
2341 
2342 /*
2343  * Marks every object index used in @rule with bit in @bmask.
2344  * Used to generate bitmask of referenced tables/objects for given ruleset
2345  * or its part.
2346  */
2347 static void
2348 mark_rule_objects(struct ip_fw_chain *ch, struct ip_fw *rule,
2349     struct dump_args *da)
2350 {
2351 	struct opcode_obj_rewrite *rw;
2352 	ipfw_insn *cmd;
2353 	int cmdlen, l;
2354 	uint16_t kidx;
2355 	uint8_t subtype;
2356 
2357 	l = rule->cmd_len;
2358 	cmd = rule->cmd;
2359 	cmdlen = 0;
2360 	for ( ;	l > 0 ; l -= cmdlen, cmd += cmdlen) {
2361 		cmdlen = F_LEN(cmd);
2362 
2363 		rw = find_op_rw(cmd, &kidx, &subtype);
2364 		if (rw == NULL)
2365 			continue;
2366 
2367 		if (ipfw_mark_object_kidx(da->bmask, rw->etlv, kidx))
2368 			da->tcount++;
2369 	}
2370 }
2371 
2372 /*
2373  * Dumps requested objects data
2374  * Data layout (version 0)(current):
2375  * Request: [ ipfw_cfg_lheader ] + IPFW_CFG_GET_* flags
2376  *   size = ipfw_cfg_lheader.size
2377  * Reply: [ ipfw_cfg_lheader
2378  *   [ ipfw_obj_ctlv(IPFW_TLV_TBL_LIST) ipfw_obj_ntlv x N ] (optional)
2379  *   [ ipfw_obj_ctlv(IPFW_TLV_RULE_LIST)
2380  *     ipfw_obj_tlv(IPFW_TLV_RULE_ENT) [ ip_fw_bcounter (optional) ip_fw_rule ]
2381  *   ] (optional)
2382  *   [ ipfw_obj_ctlv(IPFW_TLV_STATE_LIST) ipfw_obj_dyntlv x N ] (optional)
2383  * ]
2384  * * NOTE IPFW_TLV_STATE_LIST has the single valid field: objsize.
2385  * The rest (size, count) are set to zero and needs to be ignored.
2386  *
2387  * Returns 0 on success.
2388  */
2389 static int
2390 dump_config(struct ip_fw_chain *chain, ip_fw3_opheader *op3,
2391     struct sockopt_data *sd)
2392 {
2393 	struct dump_args da;
2394 	ipfw_cfg_lheader *hdr;
2395 	struct ip_fw *rule;
2396 	size_t sz, rnum;
2397 	uint32_t hdr_flags, *bmask;
2398 	int error, i;
2399 
2400 	hdr = (ipfw_cfg_lheader *)ipfw_get_sopt_header(sd, sizeof(*hdr));
2401 	if (hdr == NULL)
2402 		return (EINVAL);
2403 
2404 	error = 0;
2405 	bmask = NULL;
2406 	memset(&da, 0, sizeof(da));
2407 	/*
2408 	 * Allocate needed state.
2409 	 * Note we allocate 2xspace mask, for table & srv
2410 	 */
2411 	if (hdr->flags & (IPFW_CFG_GET_STATIC | IPFW_CFG_GET_STATES))
2412 		da.bmask = bmask = malloc(
2413 		    sizeof(uint32_t) * IPFW_TABLES_MAX * 2 / 32, M_TEMP,
2414 		    M_WAITOK | M_ZERO);
2415 	IPFW_UH_RLOCK(chain);
2416 
2417 	/*
2418 	 * STAGE 1: Determine size/count for objects in range.
2419 	 * Prepare used tables bitmask.
2420 	 */
2421 	sz = sizeof(ipfw_cfg_lheader);
2422 	da.e = chain->n_rules;
2423 
2424 	if (hdr->end_rule != 0) {
2425 		/* Handle custom range */
2426 		if ((rnum = hdr->start_rule) > IPFW_DEFAULT_RULE)
2427 			rnum = IPFW_DEFAULT_RULE;
2428 		da.b = ipfw_find_rule(chain, rnum, 0);
2429 		rnum = (hdr->end_rule < IPFW_DEFAULT_RULE) ?
2430 		    hdr->end_rule + 1: IPFW_DEFAULT_RULE;
2431 		da.e = ipfw_find_rule(chain, rnum, UINT32_MAX) + 1;
2432 	}
2433 
2434 	if (hdr->flags & IPFW_CFG_GET_STATIC) {
2435 		for (i = da.b; i < da.e; i++) {
2436 			rule = chain->map[i];
2437 			da.rsize += RULEUSIZE1(rule) + sizeof(ipfw_obj_tlv);
2438 			da.rcount++;
2439 			/* Update bitmask of used objects for given range */
2440 			mark_rule_objects(chain, rule, &da);
2441 		}
2442 		/* Add counters if requested */
2443 		if (hdr->flags & IPFW_CFG_GET_COUNTERS) {
2444 			da.rsize += sizeof(struct ip_fw_bcounter) * da.rcount;
2445 			da.rcounters = 1;
2446 		}
2447 		sz += da.rsize + sizeof(ipfw_obj_ctlv);
2448 	}
2449 
2450 	if (hdr->flags & IPFW_CFG_GET_STATES) {
2451 		sz += sizeof(ipfw_obj_ctlv) +
2452 		    ipfw_dyn_get_count(bmask, &i) * sizeof(ipfw_obj_dyntlv);
2453 		da.tcount += i;
2454 	}
2455 
2456 	if (da.tcount > 0)
2457 		sz += da.tcount * sizeof(ipfw_obj_ntlv) +
2458 		    sizeof(ipfw_obj_ctlv);
2459 
2460 	/*
2461 	 * Fill header anyway.
2462 	 * Note we have to save header fields to stable storage
2463 	 * buffer inside @sd can be flushed after dumping rules
2464 	 */
2465 	hdr->size = sz;
2466 	hdr->set_mask = ~V_set_disable;
2467 	hdr_flags = hdr->flags;
2468 	hdr = NULL;
2469 
2470 	if (sd->valsize < sz) {
2471 		error = ENOMEM;
2472 		goto cleanup;
2473 	}
2474 
2475 	/* STAGE2: Store actual data */
2476 	if (da.tcount > 0) {
2477 		error = dump_named_objects(chain, &da, sd);
2478 		if (error != 0)
2479 			goto cleanup;
2480 	}
2481 
2482 	if (hdr_flags & IPFW_CFG_GET_STATIC) {
2483 		error = dump_static_rules(chain, &da, sd);
2484 		if (error != 0)
2485 			goto cleanup;
2486 	}
2487 
2488 	if (hdr_flags & IPFW_CFG_GET_STATES)
2489 		error = ipfw_dump_states(chain, sd);
2490 
2491 cleanup:
2492 	IPFW_UH_RUNLOCK(chain);
2493 
2494 	if (bmask != NULL)
2495 		free(bmask, M_TEMP);
2496 
2497 	return (error);
2498 }
2499 
2500 int
2501 ipfw_check_object_name_generic(const char *name)
2502 {
2503 	int nsize;
2504 
2505 	nsize = sizeof(((ipfw_obj_ntlv *)0)->name);
2506 	if (strnlen(name, nsize) == nsize)
2507 		return (EINVAL);
2508 	if (name[0] == '\0')
2509 		return (EINVAL);
2510 	return (0);
2511 }
2512 
2513 /*
2514  * Creates non-existent objects referenced by rule.
2515  *
2516  * Return 0 on success.
2517  */
2518 int
2519 create_objects_compat(struct ip_fw_chain *ch, ipfw_insn *cmd,
2520     struct obj_idx *oib, struct obj_idx *pidx, struct tid_info *ti)
2521 {
2522 	struct opcode_obj_rewrite *rw;
2523 	struct obj_idx *p;
2524 	uint16_t kidx;
2525 	int error;
2526 
2527 	/*
2528 	 * Compatibility stuff: do actual creation for non-existing,
2529 	 * but referenced objects.
2530 	 */
2531 	for (p = oib; p < pidx; p++) {
2532 		if (p->kidx != 0)
2533 			continue;
2534 
2535 		ti->uidx = p->uidx;
2536 		ti->type = p->type;
2537 		ti->atype = 0;
2538 
2539 		rw = find_op_rw(cmd + p->off, NULL, NULL);
2540 		KASSERT(rw != NULL, ("Unable to find handler for op %d",
2541 		    (cmd + p->off)->opcode));
2542 
2543 		if (rw->create_object == NULL)
2544 			error = EOPNOTSUPP;
2545 		else
2546 			error = rw->create_object(ch, ti, &kidx);
2547 		if (error == 0) {
2548 			p->kidx = kidx;
2549 			continue;
2550 		}
2551 
2552 		/*
2553 		 * Error happened. We have to rollback everything.
2554 		 * Drop all already acquired references.
2555 		 */
2556 		IPFW_UH_WLOCK(ch);
2557 		unref_oib_objects(ch, cmd, oib, pidx);
2558 		IPFW_UH_WUNLOCK(ch);
2559 
2560 		return (error);
2561 	}
2562 
2563 	return (0);
2564 }
2565 
2566 /*
2567  * Compatibility function for old ipfw(8) binaries.
2568  * Rewrites table/nat kernel indices with userland ones.
2569  * Convert tables matching '/^\d+$/' to their atoi() value.
2570  * Use number 65535 for other tables.
2571  *
2572  * Returns 0 on success.
2573  */
2574 static int
2575 set_legacy_obj_kidx(struct ip_fw_chain *ch, struct ip_fw_rule0 *rule)
2576 {
2577 	struct opcode_obj_rewrite *rw;
2578 	struct named_object *no;
2579 	ipfw_insn *cmd;
2580 	char *end;
2581 	long val;
2582 	int cmdlen, error, l;
2583 	uint16_t kidx, uidx;
2584 	uint8_t subtype;
2585 
2586 	error = 0;
2587 
2588 	l = rule->cmd_len;
2589 	cmd = rule->cmd;
2590 	cmdlen = 0;
2591 	for ( ;	l > 0 ; l -= cmdlen, cmd += cmdlen) {
2592 		cmdlen = F_LEN(cmd);
2593 
2594 		/* Check if is index in given opcode */
2595 		rw = find_op_rw(cmd, &kidx, &subtype);
2596 		if (rw == NULL)
2597 			continue;
2598 
2599 		/* Try to find referenced kernel object */
2600 		no = rw->find_bykidx(ch, kidx);
2601 		if (no == NULL)
2602 			continue;
2603 
2604 		val = strtol(no->name, &end, 10);
2605 		if (*end == '\0' && val < 65535) {
2606 			uidx = val;
2607 		} else {
2608 
2609 			/*
2610 			 * We are called via legacy opcode.
2611 			 * Save error and show table as fake number
2612 			 * not to make ipfw(8) hang.
2613 			 */
2614 			uidx = 65535;
2615 			error = 2;
2616 		}
2617 
2618 		rw->update(cmd, uidx);
2619 	}
2620 
2621 	return (error);
2622 }
2623 
2624 
2625 /*
2626  * Unreferences all already-referenced objects in given @cmd rule,
2627  * using information in @oib.
2628  *
2629  * Used to rollback partially converted rule on error.
2630  */
2631 static void
2632 unref_oib_objects(struct ip_fw_chain *ch, ipfw_insn *cmd, struct obj_idx *oib,
2633     struct obj_idx *end)
2634 {
2635 	struct opcode_obj_rewrite *rw;
2636 	struct named_object *no;
2637 	struct obj_idx *p;
2638 
2639 	IPFW_UH_WLOCK_ASSERT(ch);
2640 
2641 	for (p = oib; p < end; p++) {
2642 		if (p->kidx == 0)
2643 			continue;
2644 
2645 		rw = find_op_rw(cmd + p->off, NULL, NULL);
2646 		KASSERT(rw != NULL, ("Unable to find handler for op %d",
2647 		    (cmd + p->off)->opcode));
2648 
2649 		/* Find & unref by existing idx */
2650 		no = rw->find_bykidx(ch, p->kidx);
2651 		KASSERT(no != NULL, ("Ref'd object %d disappeared", p->kidx));
2652 		no->refcnt--;
2653 	}
2654 }
2655 
2656 /*
2657  * Remove references from every object used in @rule.
2658  * Used at rule removal code.
2659  */
2660 static void
2661 unref_rule_objects(struct ip_fw_chain *ch, struct ip_fw *rule)
2662 {
2663 	struct opcode_obj_rewrite *rw;
2664 	struct named_object *no;
2665 	ipfw_insn *cmd;
2666 	int cmdlen, l;
2667 	uint16_t kidx;
2668 	uint8_t subtype;
2669 
2670 	IPFW_UH_WLOCK_ASSERT(ch);
2671 
2672 	l = rule->cmd_len;
2673 	cmd = rule->cmd;
2674 	cmdlen = 0;
2675 	for ( ;	l > 0 ; l -= cmdlen, cmd += cmdlen) {
2676 		cmdlen = F_LEN(cmd);
2677 
2678 		rw = find_op_rw(cmd, &kidx, &subtype);
2679 		if (rw == NULL)
2680 			continue;
2681 		no = rw->find_bykidx(ch, kidx);
2682 
2683 		KASSERT(no != NULL, ("object id %d not found", kidx));
2684 		KASSERT(no->subtype == subtype,
2685 		    ("wrong type %d (%d) for object id %d",
2686 		    no->subtype, subtype, kidx));
2687 		KASSERT(no->refcnt > 0, ("refcount for object %d is %d",
2688 		    kidx, no->refcnt));
2689 
2690 		if (no->refcnt == 1 && rw->destroy_object != NULL)
2691 			rw->destroy_object(ch, no);
2692 		else
2693 			no->refcnt--;
2694 	}
2695 }
2696 
2697 
2698 /*
2699  * Find and reference object (if any) stored in instruction @cmd.
2700  *
2701  * Saves object info in @pidx, sets
2702  *  - @unresolved to 1 if object should exists but not found
2703  *
2704  * Returns non-zero value in case of error.
2705  */
2706 static int
2707 ref_opcode_object(struct ip_fw_chain *ch, ipfw_insn *cmd, struct tid_info *ti,
2708     struct obj_idx *pidx, int *unresolved)
2709 {
2710 	struct named_object *no;
2711 	struct opcode_obj_rewrite *rw;
2712 	int error;
2713 
2714 	/* Check if this opcode is candidate for rewrite */
2715 	rw = find_op_rw(cmd, &ti->uidx, &ti->type);
2716 	if (rw == NULL)
2717 		return (0);
2718 
2719 	/* Need to rewrite. Save necessary fields */
2720 	pidx->uidx = ti->uidx;
2721 	pidx->type = ti->type;
2722 
2723 	/* Try to find referenced kernel object */
2724 	error = rw->find_byname(ch, ti, &no);
2725 	if (error != 0)
2726 		return (error);
2727 	if (no == NULL) {
2728 		/*
2729 		 * Report about unresolved object for automaic
2730 		 * creation.
2731 		 */
2732 		*unresolved = 1;
2733 		return (0);
2734 	}
2735 
2736 	/*
2737 	 * Object is already exist.
2738 	 * Its subtype should match with expected value.
2739 	 */
2740 	if (ti->type != no->subtype)
2741 		return (EINVAL);
2742 
2743 	/* Bump refcount and update kidx. */
2744 	no->refcnt++;
2745 	rw->update(cmd, no->kidx);
2746 	return (0);
2747 }
2748 
2749 /*
2750  * Finds and bumps refcount for objects referenced by given @rule.
2751  * Auto-creates non-existing tables.
2752  * Fills in @oib array with userland/kernel indexes.
2753  *
2754  * Returns 0 on success.
2755  */
2756 static int
2757 ref_rule_objects(struct ip_fw_chain *ch, struct ip_fw *rule,
2758     struct rule_check_info *ci, struct obj_idx *oib, struct tid_info *ti)
2759 {
2760 	struct obj_idx *pidx;
2761 	ipfw_insn *cmd;
2762 	int cmdlen, error, l, unresolved;
2763 
2764 	pidx = oib;
2765 	l = rule->cmd_len;
2766 	cmd = rule->cmd;
2767 	cmdlen = 0;
2768 	error = 0;
2769 
2770 	IPFW_UH_WLOCK(ch);
2771 
2772 	/* Increase refcount on each existing referenced table. */
2773 	for ( ;	l > 0 ; l -= cmdlen, cmd += cmdlen) {
2774 		cmdlen = F_LEN(cmd);
2775 		unresolved = 0;
2776 
2777 		error = ref_opcode_object(ch, cmd, ti, pidx, &unresolved);
2778 		if (error != 0)
2779 			break;
2780 		/*
2781 		 * Compatibility stuff for old clients:
2782 		 * prepare to automaitcally create non-existing objects.
2783 		 */
2784 		if (unresolved != 0) {
2785 			pidx->off = rule->cmd_len - l;
2786 			pidx++;
2787 		}
2788 	}
2789 
2790 	if (error != 0) {
2791 		/* Unref everything we have already done */
2792 		unref_oib_objects(ch, rule->cmd, oib, pidx);
2793 		IPFW_UH_WUNLOCK(ch);
2794 		return (error);
2795 	}
2796 	IPFW_UH_WUNLOCK(ch);
2797 
2798 	/* Perform auto-creation for non-existing objects */
2799 	if (pidx != oib)
2800 		error = create_objects_compat(ch, rule->cmd, oib, pidx, ti);
2801 
2802 	/* Calculate real number of dynamic objects */
2803 	ci->object_opcodes = (uint16_t)(pidx - oib);
2804 
2805 	return (error);
2806 }
2807 
2808 /*
2809  * Checks is opcode is referencing table of appropriate type.
2810  * Adds reference count for found table if true.
2811  * Rewrites user-supplied opcode values with kernel ones.
2812  *
2813  * Returns 0 on success and appropriate error code otherwise.
2814  */
2815 static int
2816 rewrite_rule_uidx(struct ip_fw_chain *chain, struct rule_check_info *ci)
2817 {
2818 	int error;
2819 	ipfw_insn *cmd;
2820 	uint8_t type;
2821 	struct obj_idx *p, *pidx_first, *pidx_last;
2822 	struct tid_info ti;
2823 
2824 	/*
2825 	 * Prepare an array for storing opcode indices.
2826 	 * Use stack allocation by default.
2827 	 */
2828 	if (ci->object_opcodes <= (sizeof(ci->obuf)/sizeof(ci->obuf[0]))) {
2829 		/* Stack */
2830 		pidx_first = ci->obuf;
2831 	} else
2832 		pidx_first = malloc(
2833 		    ci->object_opcodes * sizeof(struct obj_idx),
2834 		    M_IPFW, M_WAITOK | M_ZERO);
2835 
2836 	error = 0;
2837 	type = 0;
2838 	memset(&ti, 0, sizeof(ti));
2839 
2840 	/* Use set rule is assigned to. */
2841 	ti.set = ci->krule->set;
2842 	if (ci->ctlv != NULL) {
2843 		ti.tlvs = (void *)(ci->ctlv + 1);
2844 		ti.tlen = ci->ctlv->head.length - sizeof(ipfw_obj_ctlv);
2845 	}
2846 
2847 	/* Reference all used tables and other objects */
2848 	error = ref_rule_objects(chain, ci->krule, ci, pidx_first, &ti);
2849 	if (error != 0)
2850 		goto free;
2851 	/*
2852 	 * Note that ref_rule_objects() might have updated ci->object_opcodes
2853 	 * to reflect actual number of object opcodes.
2854 	 */
2855 
2856 	/* Perform rewrite of remaining opcodes */
2857 	p = pidx_first;
2858 	pidx_last = pidx_first + ci->object_opcodes;
2859 	for (p = pidx_first; p < pidx_last; p++) {
2860 		cmd = ci->krule->cmd + p->off;
2861 		update_opcode_kidx(cmd, p->kidx);
2862 	}
2863 
2864 free:
2865 	if (pidx_first != ci->obuf)
2866 		free(pidx_first, M_IPFW);
2867 
2868 	return (error);
2869 }
2870 
2871 /*
2872  * Adds one or more rules to ipfw @chain.
2873  * Data layout (version 0)(current):
2874  * Request:
2875  * [
2876  *   ip_fw3_opheader
2877  *   [ ipfw_obj_ctlv(IPFW_TLV_TBL_LIST) ipfw_obj_ntlv x N ] (optional *1)
2878  *   [ ipfw_obj_ctlv(IPFW_TLV_RULE_LIST) ip_fw x N ] (*2) (*3)
2879  * ]
2880  * Reply:
2881  * [
2882  *   ip_fw3_opheader
2883  *   [ ipfw_obj_ctlv(IPFW_TLV_TBL_LIST) ipfw_obj_ntlv x N ] (optional)
2884  *   [ ipfw_obj_ctlv(IPFW_TLV_RULE_LIST) ip_fw x N ]
2885  * ]
2886  *
2887  * Rules in reply are modified to store their actual ruleset number.
2888  *
2889  * (*1) TLVs inside IPFW_TLV_TBL_LIST needs to be sorted ascending
2890  * according to their idx field and there has to be no duplicates.
2891  * (*2) Numbered rules inside IPFW_TLV_RULE_LIST needs to be sorted ascending.
2892  * (*3) Each ip_fw structure needs to be aligned to u64 boundary.
2893  *
2894  * Returns 0 on success.
2895  */
2896 static int
2897 add_rules(struct ip_fw_chain *chain, ip_fw3_opheader *op3,
2898     struct sockopt_data *sd)
2899 {
2900 	ipfw_obj_ctlv *ctlv, *rtlv, *tstate;
2901 	ipfw_obj_ntlv *ntlv;
2902 	int clen, error, idx;
2903 	uint32_t count, read;
2904 	struct ip_fw_rule *r;
2905 	struct rule_check_info rci, *ci, *cbuf;
2906 	int i, rsize;
2907 
2908 	op3 = (ip_fw3_opheader *)ipfw_get_sopt_space(sd, sd->valsize);
2909 	ctlv = (ipfw_obj_ctlv *)(op3 + 1);
2910 
2911 	read = sizeof(ip_fw3_opheader);
2912 	rtlv = NULL;
2913 	tstate = NULL;
2914 	cbuf = NULL;
2915 	memset(&rci, 0, sizeof(struct rule_check_info));
2916 
2917 	if (read + sizeof(*ctlv) > sd->valsize)
2918 		return (EINVAL);
2919 
2920 	if (ctlv->head.type == IPFW_TLV_TBLNAME_LIST) {
2921 		clen = ctlv->head.length;
2922 		/* Check size and alignment */
2923 		if (clen > sd->valsize || clen < sizeof(*ctlv))
2924 			return (EINVAL);
2925 		if ((clen % sizeof(uint64_t)) != 0)
2926 			return (EINVAL);
2927 
2928 		/*
2929 		 * Some table names or other named objects.
2930 		 * Check for validness.
2931 		 */
2932 		count = (ctlv->head.length - sizeof(*ctlv)) / sizeof(*ntlv);
2933 		if (ctlv->count != count || ctlv->objsize != sizeof(*ntlv))
2934 			return (EINVAL);
2935 
2936 		/*
2937 		 * Check each TLV.
2938 		 * Ensure TLVs are sorted ascending and
2939 		 * there are no duplicates.
2940 		 */
2941 		idx = -1;
2942 		ntlv = (ipfw_obj_ntlv *)(ctlv + 1);
2943 		while (count > 0) {
2944 			if (ntlv->head.length != sizeof(ipfw_obj_ntlv))
2945 				return (EINVAL);
2946 
2947 			error = ipfw_check_object_name_generic(ntlv->name);
2948 			if (error != 0)
2949 				return (error);
2950 
2951 			if (ntlv->idx <= idx)
2952 				return (EINVAL);
2953 
2954 			idx = ntlv->idx;
2955 			count--;
2956 			ntlv++;
2957 		}
2958 
2959 		tstate = ctlv;
2960 		read += ctlv->head.length;
2961 		ctlv = (ipfw_obj_ctlv *)((caddr_t)ctlv + ctlv->head.length);
2962 	}
2963 
2964 	if (read + sizeof(*ctlv) > sd->valsize)
2965 		return (EINVAL);
2966 
2967 	if (ctlv->head.type == IPFW_TLV_RULE_LIST) {
2968 		clen = ctlv->head.length;
2969 		if (clen + read > sd->valsize || clen < sizeof(*ctlv))
2970 			return (EINVAL);
2971 		if ((clen % sizeof(uint64_t)) != 0)
2972 			return (EINVAL);
2973 
2974 		/*
2975 		 * TODO: Permit adding multiple rules at once
2976 		 */
2977 		if (ctlv->count != 1)
2978 			return (ENOTSUP);
2979 
2980 		clen -= sizeof(*ctlv);
2981 
2982 		if (ctlv->count > clen / sizeof(struct ip_fw_rule))
2983 			return (EINVAL);
2984 
2985 		/* Allocate state for each rule or use stack */
2986 		if (ctlv->count == 1) {
2987 			memset(&rci, 0, sizeof(struct rule_check_info));
2988 			cbuf = &rci;
2989 		} else
2990 			cbuf = malloc(ctlv->count * sizeof(*ci), M_TEMP,
2991 			    M_WAITOK | M_ZERO);
2992 		ci = cbuf;
2993 
2994 		/*
2995 		 * Check each rule for validness.
2996 		 * Ensure numbered rules are sorted ascending
2997 		 * and properly aligned
2998 		 */
2999 		idx = 0;
3000 		r = (struct ip_fw_rule *)(ctlv + 1);
3001 		count = 0;
3002 		error = 0;
3003 		while (clen > 0) {
3004 			rsize = roundup2(RULESIZE(r), sizeof(uint64_t));
3005 			if (rsize > clen || ctlv->count <= count) {
3006 				error = EINVAL;
3007 				break;
3008 			}
3009 
3010 			ci->ctlv = tstate;
3011 			error = check_ipfw_rule1(r, rsize, ci);
3012 			if (error != 0)
3013 				break;
3014 
3015 			/* Check sorting */
3016 			if (r->rulenum != 0 && r->rulenum < idx) {
3017 				printf("rulenum %d idx %d\n", r->rulenum, idx);
3018 				error = EINVAL;
3019 				break;
3020 			}
3021 			idx = r->rulenum;
3022 
3023 			ci->urule = (caddr_t)r;
3024 
3025 			rsize = roundup2(rsize, sizeof(uint64_t));
3026 			clen -= rsize;
3027 			r = (struct ip_fw_rule *)((caddr_t)r + rsize);
3028 			count++;
3029 			ci++;
3030 		}
3031 
3032 		if (ctlv->count != count || error != 0) {
3033 			if (cbuf != &rci)
3034 				free(cbuf, M_TEMP);
3035 			return (EINVAL);
3036 		}
3037 
3038 		rtlv = ctlv;
3039 		read += ctlv->head.length;
3040 		ctlv = (ipfw_obj_ctlv *)((caddr_t)ctlv + ctlv->head.length);
3041 	}
3042 
3043 	if (read != sd->valsize || rtlv == NULL || rtlv->count == 0) {
3044 		if (cbuf != NULL && cbuf != &rci)
3045 			free(cbuf, M_TEMP);
3046 		return (EINVAL);
3047 	}
3048 
3049 	/*
3050 	 * Passed rules seems to be valid.
3051 	 * Allocate storage and try to add them to chain.
3052 	 */
3053 	for (i = 0, ci = cbuf; i < rtlv->count; i++, ci++) {
3054 		clen = RULEKSIZE1((struct ip_fw_rule *)ci->urule);
3055 		ci->krule = ipfw_alloc_rule(chain, clen);
3056 		import_rule1(ci);
3057 	}
3058 
3059 	if ((error = commit_rules(chain, cbuf, rtlv->count)) != 0) {
3060 		/* Free allocate krules */
3061 		for (i = 0, ci = cbuf; i < rtlv->count; i++, ci++)
3062 			ipfw_free_rule(ci->krule);
3063 	}
3064 
3065 	if (cbuf != NULL && cbuf != &rci)
3066 		free(cbuf, M_TEMP);
3067 
3068 	return (error);
3069 }
3070 
3071 /*
3072  * Lists all sopts currently registered.
3073  * Data layout (v0)(current):
3074  * Request: [ ipfw_obj_lheader ], size = ipfw_obj_lheader.size
3075  * Reply: [ ipfw_obj_lheader ipfw_sopt_info x N ]
3076  *
3077  * Returns 0 on success
3078  */
3079 static int
3080 dump_soptcodes(struct ip_fw_chain *chain, ip_fw3_opheader *op3,
3081     struct sockopt_data *sd)
3082 {
3083 	struct _ipfw_obj_lheader *olh;
3084 	ipfw_sopt_info *i;
3085 	struct ipfw_sopt_handler *sh;
3086 	uint32_t count, n, size;
3087 
3088 	olh = (struct _ipfw_obj_lheader *)ipfw_get_sopt_header(sd,sizeof(*olh));
3089 	if (olh == NULL)
3090 		return (EINVAL);
3091 	if (sd->valsize < olh->size)
3092 		return (EINVAL);
3093 
3094 	CTL3_LOCK();
3095 	count = ctl3_hsize;
3096 	size = count * sizeof(ipfw_sopt_info) + sizeof(ipfw_obj_lheader);
3097 
3098 	/* Fill in header regadless of buffer size */
3099 	olh->count = count;
3100 	olh->objsize = sizeof(ipfw_sopt_info);
3101 
3102 	if (size > olh->size) {
3103 		olh->size = size;
3104 		CTL3_UNLOCK();
3105 		return (ENOMEM);
3106 	}
3107 	olh->size = size;
3108 
3109 	for (n = 1; n <= count; n++) {
3110 		i = (ipfw_sopt_info *)ipfw_get_sopt_space(sd, sizeof(*i));
3111 		KASSERT(i != NULL, ("previously checked buffer is not enough"));
3112 		sh = &ctl3_handlers[n];
3113 		i->opcode = sh->opcode;
3114 		i->version = sh->version;
3115 		i->refcnt = sh->refcnt;
3116 	}
3117 	CTL3_UNLOCK();
3118 
3119 	return (0);
3120 }
3121 
3122 /*
3123  * Compares two opcodes.
3124  * Used both in qsort() and bsearch().
3125  *
3126  * Returns 0 if match is found.
3127  */
3128 static int
3129 compare_opcodes(const void *_a, const void *_b)
3130 {
3131 	const struct opcode_obj_rewrite *a, *b;
3132 
3133 	a = (const struct opcode_obj_rewrite *)_a;
3134 	b = (const struct opcode_obj_rewrite *)_b;
3135 
3136 	if (a->opcode < b->opcode)
3137 		return (-1);
3138 	else if (a->opcode > b->opcode)
3139 		return (1);
3140 
3141 	return (0);
3142 }
3143 
3144 /*
3145  * XXX: Rewrite bsearch()
3146  */
3147 static int
3148 find_op_rw_range(uint16_t op, struct opcode_obj_rewrite **plo,
3149     struct opcode_obj_rewrite **phi)
3150 {
3151 	struct opcode_obj_rewrite *ctl3_max, *lo, *hi, h, *rw;
3152 
3153 	memset(&h, 0, sizeof(h));
3154 	h.opcode = op;
3155 
3156 	rw = (struct opcode_obj_rewrite *)bsearch(&h, ctl3_rewriters,
3157 	    ctl3_rsize, sizeof(h), compare_opcodes);
3158 	if (rw == NULL)
3159 		return (1);
3160 
3161 	/* Find the first element matching the same opcode */
3162 	lo = rw;
3163 	for ( ; lo > ctl3_rewriters && (lo - 1)->opcode == op; lo--)
3164 		;
3165 
3166 	/* Find the last element matching the same opcode */
3167 	hi = rw;
3168 	ctl3_max = ctl3_rewriters + ctl3_rsize;
3169 	for ( ; (hi + 1) < ctl3_max && (hi + 1)->opcode == op; hi++)
3170 		;
3171 
3172 	*plo = lo;
3173 	*phi = hi;
3174 
3175 	return (0);
3176 }
3177 
3178 /*
3179  * Finds opcode object rewriter based on @code.
3180  *
3181  * Returns pointer to handler or NULL.
3182  */
3183 static struct opcode_obj_rewrite *
3184 find_op_rw(ipfw_insn *cmd, uint16_t *puidx, uint8_t *ptype)
3185 {
3186 	struct opcode_obj_rewrite *rw, *lo, *hi;
3187 	uint16_t uidx;
3188 	uint8_t subtype;
3189 
3190 	if (find_op_rw_range(cmd->opcode, &lo, &hi) != 0)
3191 		return (NULL);
3192 
3193 	for (rw = lo; rw <= hi; rw++) {
3194 		if (rw->classifier(cmd, &uidx, &subtype) == 0) {
3195 			if (puidx != NULL)
3196 				*puidx = uidx;
3197 			if (ptype != NULL)
3198 				*ptype = subtype;
3199 			return (rw);
3200 		}
3201 	}
3202 
3203 	return (NULL);
3204 }
3205 int
3206 classify_opcode_kidx(ipfw_insn *cmd, uint16_t *puidx)
3207 {
3208 
3209 	if (find_op_rw(cmd, puidx, NULL) == NULL)
3210 		return (1);
3211 	return (0);
3212 }
3213 
3214 void
3215 update_opcode_kidx(ipfw_insn *cmd, uint16_t idx)
3216 {
3217 	struct opcode_obj_rewrite *rw;
3218 
3219 	rw = find_op_rw(cmd, NULL, NULL);
3220 	KASSERT(rw != NULL, ("No handler to update opcode %d", cmd->opcode));
3221 	rw->update(cmd, idx);
3222 }
3223 
3224 void
3225 ipfw_init_obj_rewriter()
3226 {
3227 
3228 	ctl3_rewriters = NULL;
3229 	ctl3_rsize = 0;
3230 }
3231 
3232 void
3233 ipfw_destroy_obj_rewriter()
3234 {
3235 
3236 	if (ctl3_rewriters != NULL)
3237 		free(ctl3_rewriters, M_IPFW);
3238 	ctl3_rewriters = NULL;
3239 	ctl3_rsize = 0;
3240 }
3241 
3242 /*
3243  * Adds one or more opcode object rewrite handlers to the global array.
3244  * Function may sleep.
3245  */
3246 void
3247 ipfw_add_obj_rewriter(struct opcode_obj_rewrite *rw, size_t count)
3248 {
3249 	size_t sz;
3250 	struct opcode_obj_rewrite *tmp;
3251 
3252 	CTL3_LOCK();
3253 
3254 	for (;;) {
3255 		sz = ctl3_rsize + count;
3256 		CTL3_UNLOCK();
3257 		tmp = malloc(sizeof(*rw) * sz, M_IPFW, M_WAITOK | M_ZERO);
3258 		CTL3_LOCK();
3259 		if (ctl3_rsize + count <= sz)
3260 			break;
3261 
3262 		/* Retry */
3263 		free(tmp, M_IPFW);
3264 	}
3265 
3266 	/* Merge old & new arrays */
3267 	sz = ctl3_rsize + count;
3268 	memcpy(tmp, ctl3_rewriters, ctl3_rsize * sizeof(*rw));
3269 	memcpy(&tmp[ctl3_rsize], rw, count * sizeof(*rw));
3270 	qsort(tmp, sz, sizeof(*rw), compare_opcodes);
3271 	/* Switch new and free old */
3272 	if (ctl3_rewriters != NULL)
3273 		free(ctl3_rewriters, M_IPFW);
3274 	ctl3_rewriters = tmp;
3275 	ctl3_rsize = sz;
3276 
3277 	CTL3_UNLOCK();
3278 }
3279 
3280 /*
3281  * Removes one or more object rewrite handlers from the global array.
3282  */
3283 int
3284 ipfw_del_obj_rewriter(struct opcode_obj_rewrite *rw, size_t count)
3285 {
3286 	size_t sz;
3287 	struct opcode_obj_rewrite *ctl3_max, *ktmp, *lo, *hi;
3288 	int i;
3289 
3290 	CTL3_LOCK();
3291 
3292 	for (i = 0; i < count; i++) {
3293 		if (find_op_rw_range(rw[i].opcode, &lo, &hi) != 0)
3294 			continue;
3295 
3296 		for (ktmp = lo; ktmp <= hi; ktmp++) {
3297 			if (ktmp->classifier != rw[i].classifier)
3298 				continue;
3299 
3300 			ctl3_max = ctl3_rewriters + ctl3_rsize;
3301 			sz = (ctl3_max - (ktmp + 1)) * sizeof(*ktmp);
3302 			memmove(ktmp, ktmp + 1, sz);
3303 			ctl3_rsize--;
3304 			break;
3305 		}
3306 
3307 	}
3308 
3309 	if (ctl3_rsize == 0) {
3310 		if (ctl3_rewriters != NULL)
3311 			free(ctl3_rewriters, M_IPFW);
3312 		ctl3_rewriters = NULL;
3313 	}
3314 
3315 	CTL3_UNLOCK();
3316 
3317 	return (0);
3318 }
3319 
3320 static int
3321 export_objhash_ntlv_internal(struct namedobj_instance *ni,
3322     struct named_object *no, void *arg)
3323 {
3324 	struct sockopt_data *sd;
3325 	ipfw_obj_ntlv *ntlv;
3326 
3327 	sd = (struct sockopt_data *)arg;
3328 	ntlv = (ipfw_obj_ntlv *)ipfw_get_sopt_space(sd, sizeof(*ntlv));
3329 	if (ntlv == NULL)
3330 		return (ENOMEM);
3331 	ipfw_export_obj_ntlv(no, ntlv);
3332 	return (0);
3333 }
3334 
3335 /*
3336  * Lists all service objects.
3337  * Data layout (v0)(current):
3338  * Request: [ ipfw_obj_lheader ] size = ipfw_obj_lheader.size
3339  * Reply: [ ipfw_obj_lheader [ ipfw_obj_ntlv x N ] (optional) ]
3340  * Returns 0 on success
3341  */
3342 static int
3343 dump_srvobjects(struct ip_fw_chain *chain, ip_fw3_opheader *op3,
3344     struct sockopt_data *sd)
3345 {
3346 	ipfw_obj_lheader *hdr;
3347 	int count;
3348 
3349 	hdr = (ipfw_obj_lheader *)ipfw_get_sopt_header(sd, sizeof(*hdr));
3350 	if (hdr == NULL)
3351 		return (EINVAL);
3352 
3353 	IPFW_UH_RLOCK(chain);
3354 	count = ipfw_objhash_count(CHAIN_TO_SRV(chain));
3355 	hdr->size = sizeof(ipfw_obj_lheader) + count * sizeof(ipfw_obj_ntlv);
3356 	if (sd->valsize < hdr->size) {
3357 		IPFW_UH_RUNLOCK(chain);
3358 		return (ENOMEM);
3359 	}
3360 	hdr->count = count;
3361 	hdr->objsize = sizeof(ipfw_obj_ntlv);
3362 	if (count > 0)
3363 		ipfw_objhash_foreach(CHAIN_TO_SRV(chain),
3364 		    export_objhash_ntlv_internal, sd);
3365 	IPFW_UH_RUNLOCK(chain);
3366 	return (0);
3367 }
3368 
3369 /*
3370  * Compares two sopt handlers (code, version and handler ptr).
3371  * Used both as qsort() and bsearch().
3372  * Does not compare handler for latter case.
3373  *
3374  * Returns 0 if match is found.
3375  */
3376 static int
3377 compare_sh(const void *_a, const void *_b)
3378 {
3379 	const struct ipfw_sopt_handler *a, *b;
3380 
3381 	a = (const struct ipfw_sopt_handler *)_a;
3382 	b = (const struct ipfw_sopt_handler *)_b;
3383 
3384 	if (a->opcode < b->opcode)
3385 		return (-1);
3386 	else if (a->opcode > b->opcode)
3387 		return (1);
3388 
3389 	if (a->version < b->version)
3390 		return (-1);
3391 	else if (a->version > b->version)
3392 		return (1);
3393 
3394 	/* bsearch helper */
3395 	if (a->handler == NULL)
3396 		return (0);
3397 
3398 	if ((uintptr_t)a->handler < (uintptr_t)b->handler)
3399 		return (-1);
3400 	else if ((uintptr_t)a->handler > (uintptr_t)b->handler)
3401 		return (1);
3402 
3403 	return (0);
3404 }
3405 
3406 /*
3407  * Finds sopt handler based on @code and @version.
3408  *
3409  * Returns pointer to handler or NULL.
3410  */
3411 static struct ipfw_sopt_handler *
3412 find_sh(uint16_t code, uint8_t version, sopt_handler_f *handler)
3413 {
3414 	struct ipfw_sopt_handler *sh, h;
3415 
3416 	memset(&h, 0, sizeof(h));
3417 	h.opcode = code;
3418 	h.version = version;
3419 	h.handler = handler;
3420 
3421 	sh = (struct ipfw_sopt_handler *)bsearch(&h, ctl3_handlers,
3422 	    ctl3_hsize, sizeof(h), compare_sh);
3423 
3424 	return (sh);
3425 }
3426 
3427 static int
3428 find_ref_sh(uint16_t opcode, uint8_t version, struct ipfw_sopt_handler *psh)
3429 {
3430 	struct ipfw_sopt_handler *sh;
3431 
3432 	CTL3_LOCK();
3433 	if ((sh = find_sh(opcode, version, NULL)) == NULL) {
3434 		CTL3_UNLOCK();
3435 		printf("ipfw: ipfw_ctl3 invalid option %d""v""%d\n",
3436 		    opcode, version);
3437 		return (EINVAL);
3438 	}
3439 	sh->refcnt++;
3440 	ctl3_refct++;
3441 	/* Copy handler data to requested buffer */
3442 	*psh = *sh;
3443 	CTL3_UNLOCK();
3444 
3445 	return (0);
3446 }
3447 
3448 static void
3449 find_unref_sh(struct ipfw_sopt_handler *psh)
3450 {
3451 	struct ipfw_sopt_handler *sh;
3452 
3453 	CTL3_LOCK();
3454 	sh = find_sh(psh->opcode, psh->version, NULL);
3455 	KASSERT(sh != NULL, ("ctl3 handler disappeared"));
3456 	sh->refcnt--;
3457 	ctl3_refct--;
3458 	CTL3_UNLOCK();
3459 }
3460 
3461 void
3462 ipfw_init_sopt_handler()
3463 {
3464 
3465 	CTL3_LOCK_INIT();
3466 	IPFW_ADD_SOPT_HANDLER(1, scodes);
3467 }
3468 
3469 void
3470 ipfw_destroy_sopt_handler()
3471 {
3472 
3473 	IPFW_DEL_SOPT_HANDLER(1, scodes);
3474 	CTL3_LOCK_DESTROY();
3475 }
3476 
3477 /*
3478  * Adds one or more sockopt handlers to the global array.
3479  * Function may sleep.
3480  */
3481 void
3482 ipfw_add_sopt_handler(struct ipfw_sopt_handler *sh, size_t count)
3483 {
3484 	size_t sz;
3485 	struct ipfw_sopt_handler *tmp;
3486 
3487 	CTL3_LOCK();
3488 
3489 	for (;;) {
3490 		sz = ctl3_hsize + count;
3491 		CTL3_UNLOCK();
3492 		tmp = malloc(sizeof(*sh) * sz, M_IPFW, M_WAITOK | M_ZERO);
3493 		CTL3_LOCK();
3494 		if (ctl3_hsize + count <= sz)
3495 			break;
3496 
3497 		/* Retry */
3498 		free(tmp, M_IPFW);
3499 	}
3500 
3501 	/* Merge old & new arrays */
3502 	sz = ctl3_hsize + count;
3503 	memcpy(tmp, ctl3_handlers, ctl3_hsize * sizeof(*sh));
3504 	memcpy(&tmp[ctl3_hsize], sh, count * sizeof(*sh));
3505 	qsort(tmp, sz, sizeof(*sh), compare_sh);
3506 	/* Switch new and free old */
3507 	if (ctl3_handlers != NULL)
3508 		free(ctl3_handlers, M_IPFW);
3509 	ctl3_handlers = tmp;
3510 	ctl3_hsize = sz;
3511 	ctl3_gencnt++;
3512 
3513 	CTL3_UNLOCK();
3514 }
3515 
3516 /*
3517  * Removes one or more sockopt handlers from the global array.
3518  */
3519 int
3520 ipfw_del_sopt_handler(struct ipfw_sopt_handler *sh, size_t count)
3521 {
3522 	size_t sz;
3523 	struct ipfw_sopt_handler *tmp, *h;
3524 	int i;
3525 
3526 	CTL3_LOCK();
3527 
3528 	for (i = 0; i < count; i++) {
3529 		tmp = &sh[i];
3530 		h = find_sh(tmp->opcode, tmp->version, tmp->handler);
3531 		if (h == NULL)
3532 			continue;
3533 
3534 		sz = (ctl3_handlers + ctl3_hsize - (h + 1)) * sizeof(*h);
3535 		memmove(h, h + 1, sz);
3536 		ctl3_hsize--;
3537 	}
3538 
3539 	if (ctl3_hsize == 0) {
3540 		if (ctl3_handlers != NULL)
3541 			free(ctl3_handlers, M_IPFW);
3542 		ctl3_handlers = NULL;
3543 	}
3544 
3545 	ctl3_gencnt++;
3546 
3547 	CTL3_UNLOCK();
3548 
3549 	return (0);
3550 }
3551 
3552 /*
3553  * Writes data accumulated in @sd to sockopt buffer.
3554  * Zeroes internal @sd buffer.
3555  */
3556 static int
3557 ipfw_flush_sopt_data(struct sockopt_data *sd)
3558 {
3559 	struct sockopt *sopt;
3560 	int error;
3561 	size_t sz;
3562 
3563 	sz = sd->koff;
3564 	if (sz == 0)
3565 		return (0);
3566 
3567 	sopt = sd->sopt;
3568 
3569 	if (sopt->sopt_dir == SOPT_GET) {
3570 		error = copyout(sd->kbuf, sopt->sopt_val, sz);
3571 		if (error != 0)
3572 			return (error);
3573 	}
3574 
3575 	memset(sd->kbuf, 0, sd->ksize);
3576 	sd->ktotal += sz;
3577 	sd->koff = 0;
3578 	if (sd->ktotal + sd->ksize < sd->valsize)
3579 		sd->kavail = sd->ksize;
3580 	else
3581 		sd->kavail = sd->valsize - sd->ktotal;
3582 
3583 	/* Update sopt buffer data */
3584 	sopt->sopt_valsize = sd->ktotal;
3585 	sopt->sopt_val = sd->sopt_val + sd->ktotal;
3586 
3587 	return (0);
3588 }
3589 
3590 /*
3591  * Ensures that @sd buffer has contiguous @neeeded number of
3592  * bytes.
3593  *
3594  * Returns pointer to requested space or NULL.
3595  */
3596 caddr_t
3597 ipfw_get_sopt_space(struct sockopt_data *sd, size_t needed)
3598 {
3599 	int error;
3600 	caddr_t addr;
3601 
3602 	if (sd->kavail < needed) {
3603 		/*
3604 		 * Flush data and try another time.
3605 		 */
3606 		error = ipfw_flush_sopt_data(sd);
3607 
3608 		if (sd->kavail < needed || error != 0)
3609 			return (NULL);
3610 	}
3611 
3612 	addr = sd->kbuf + sd->koff;
3613 	sd->koff += needed;
3614 	sd->kavail -= needed;
3615 	return (addr);
3616 }
3617 
3618 /*
3619  * Requests @needed contiguous bytes from @sd buffer.
3620  * Function is used to notify subsystem that we are
3621  * interesed in first @needed bytes (request header)
3622  * and the rest buffer can be safely zeroed.
3623  *
3624  * Returns pointer to requested space or NULL.
3625  */
3626 caddr_t
3627 ipfw_get_sopt_header(struct sockopt_data *sd, size_t needed)
3628 {
3629 	caddr_t addr;
3630 
3631 	if ((addr = ipfw_get_sopt_space(sd, needed)) == NULL)
3632 		return (NULL);
3633 
3634 	if (sd->kavail > 0)
3635 		memset(sd->kbuf + sd->koff, 0, sd->kavail);
3636 
3637 	return (addr);
3638 }
3639 
3640 /*
3641  * New sockopt handler.
3642  */
3643 int
3644 ipfw_ctl3(struct sockopt *sopt)
3645 {
3646 	int error, locked;
3647 	size_t size, valsize;
3648 	struct ip_fw_chain *chain;
3649 	char xbuf[256];
3650 	struct sockopt_data sdata;
3651 	struct ipfw_sopt_handler h;
3652 	ip_fw3_opheader *op3 = NULL;
3653 
3654 	error = priv_check(sopt->sopt_td, PRIV_NETINET_IPFW);
3655 	if (error != 0)
3656 		return (error);
3657 
3658 	if (sopt->sopt_name != IP_FW3)
3659 		return (ipfw_ctl(sopt));
3660 
3661 	chain = &V_layer3_chain;
3662 	error = 0;
3663 
3664 	/* Save original valsize before it is altered via sooptcopyin() */
3665 	valsize = sopt->sopt_valsize;
3666 	memset(&sdata, 0, sizeof(sdata));
3667 	/* Read op3 header first to determine actual operation */
3668 	op3 = (ip_fw3_opheader *)xbuf;
3669 	error = sooptcopyin(sopt, op3, sizeof(*op3), sizeof(*op3));
3670 	if (error != 0)
3671 		return (error);
3672 	sopt->sopt_valsize = valsize;
3673 
3674 	/*
3675 	 * Find and reference command.
3676 	 */
3677 	error = find_ref_sh(op3->opcode, op3->version, &h);
3678 	if (error != 0)
3679 		return (error);
3680 
3681 	/*
3682 	 * Disallow modifications in really-really secure mode, but still allow
3683 	 * the logging counters to be reset.
3684 	 */
3685 	if ((h.dir & HDIR_SET) != 0 && h.opcode != IP_FW_XRESETLOG) {
3686 		error = securelevel_ge(sopt->sopt_td->td_ucred, 3);
3687 		if (error != 0) {
3688 			find_unref_sh(&h);
3689 			return (error);
3690 		}
3691 	}
3692 
3693 	/*
3694 	 * Fill in sockopt_data structure that may be useful for
3695 	 * IP_FW3 get requests.
3696 	 */
3697 	locked = 0;
3698 	if (valsize <= sizeof(xbuf)) {
3699 		/* use on-stack buffer */
3700 		sdata.kbuf = xbuf;
3701 		sdata.ksize = sizeof(xbuf);
3702 		sdata.kavail = valsize;
3703 	} else {
3704 
3705 		/*
3706 		 * Determine opcode type/buffer size:
3707 		 * allocate sliding-window buf for data export or
3708 		 * contiguous buffer for special ops.
3709 		 */
3710 		if ((h.dir & HDIR_SET) != 0) {
3711 			/* Set request. Allocate contigous buffer. */
3712 			if (valsize > CTL3_LARGEBUF) {
3713 				find_unref_sh(&h);
3714 				return (EFBIG);
3715 			}
3716 
3717 			size = valsize;
3718 		} else {
3719 			/* Get request. Allocate sliding window buffer */
3720 			size = (valsize<CTL3_SMALLBUF) ? valsize:CTL3_SMALLBUF;
3721 
3722 			if (size < valsize) {
3723 				/* We have to wire user buffer */
3724 				error = vslock(sopt->sopt_val, valsize);
3725 				if (error != 0)
3726 					return (error);
3727 				locked = 1;
3728 			}
3729 		}
3730 
3731 		sdata.kbuf = malloc(size, M_TEMP, M_WAITOK | M_ZERO);
3732 		sdata.ksize = size;
3733 		sdata.kavail = size;
3734 	}
3735 
3736 	sdata.sopt = sopt;
3737 	sdata.sopt_val = sopt->sopt_val;
3738 	sdata.valsize = valsize;
3739 
3740 	/*
3741 	 * Copy either all request (if valsize < bsize_max)
3742 	 * or first bsize_max bytes to guarantee most consumers
3743 	 * that all necessary data has been copied).
3744 	 * Anyway, copy not less than sizeof(ip_fw3_opheader).
3745 	 */
3746 	if ((error = sooptcopyin(sopt, sdata.kbuf, sdata.ksize,
3747 	    sizeof(ip_fw3_opheader))) != 0)
3748 		return (error);
3749 	op3 = (ip_fw3_opheader *)sdata.kbuf;
3750 
3751 	/* Finally, run handler */
3752 	error = h.handler(chain, op3, &sdata);
3753 	find_unref_sh(&h);
3754 
3755 	/* Flush state and free buffers */
3756 	if (error == 0)
3757 		error = ipfw_flush_sopt_data(&sdata);
3758 	else
3759 		ipfw_flush_sopt_data(&sdata);
3760 
3761 	if (locked != 0)
3762 		vsunlock(sdata.sopt_val, valsize);
3763 
3764 	/* Restore original pointer and set number of bytes written */
3765 	sopt->sopt_val = sdata.sopt_val;
3766 	sopt->sopt_valsize = sdata.ktotal;
3767 	if (sdata.kbuf != xbuf)
3768 		free(sdata.kbuf, M_TEMP);
3769 
3770 	return (error);
3771 }
3772 
3773 /**
3774  * {set|get}sockopt parser.
3775  */
3776 int
3777 ipfw_ctl(struct sockopt *sopt)
3778 {
3779 #define	RULE_MAXSIZE	(512*sizeof(u_int32_t))
3780 	int error;
3781 	size_t size, valsize;
3782 	struct ip_fw *buf;
3783 	struct ip_fw_rule0 *rule;
3784 	struct ip_fw_chain *chain;
3785 	u_int32_t rulenum[2];
3786 	uint32_t opt;
3787 	struct rule_check_info ci;
3788 	IPFW_RLOCK_TRACKER;
3789 
3790 	chain = &V_layer3_chain;
3791 	error = 0;
3792 
3793 	/* Save original valsize before it is altered via sooptcopyin() */
3794 	valsize = sopt->sopt_valsize;
3795 	opt = sopt->sopt_name;
3796 
3797 	/*
3798 	 * Disallow modifications in really-really secure mode, but still allow
3799 	 * the logging counters to be reset.
3800 	 */
3801 	if (opt == IP_FW_ADD ||
3802 	    (sopt->sopt_dir == SOPT_SET && opt != IP_FW_RESETLOG)) {
3803 		error = securelevel_ge(sopt->sopt_td->td_ucred, 3);
3804 		if (error != 0)
3805 			return (error);
3806 	}
3807 
3808 	switch (opt) {
3809 	case IP_FW_GET:
3810 		/*
3811 		 * pass up a copy of the current rules. Static rules
3812 		 * come first (the last of which has number IPFW_DEFAULT_RULE),
3813 		 * followed by a possibly empty list of dynamic rule.
3814 		 * The last dynamic rule has NULL in the "next" field.
3815 		 *
3816 		 * Note that the calculated size is used to bound the
3817 		 * amount of data returned to the user.  The rule set may
3818 		 * change between calculating the size and returning the
3819 		 * data in which case we'll just return what fits.
3820 		 */
3821 		for (;;) {
3822 			int len = 0, want;
3823 
3824 			size = chain->static_len;
3825 			size += ipfw_dyn_len();
3826 			if (size >= sopt->sopt_valsize)
3827 				break;
3828 			buf = malloc(size, M_TEMP, M_WAITOK | M_ZERO);
3829 			IPFW_UH_RLOCK(chain);
3830 			/* check again how much space we need */
3831 			want = chain->static_len + ipfw_dyn_len();
3832 			if (size >= want)
3833 				len = ipfw_getrules(chain, buf, size);
3834 			IPFW_UH_RUNLOCK(chain);
3835 			if (size >= want)
3836 				error = sooptcopyout(sopt, buf, len);
3837 			free(buf, M_TEMP);
3838 			if (size >= want)
3839 				break;
3840 		}
3841 		break;
3842 
3843 	case IP_FW_FLUSH:
3844 		/* locking is done within del_entry() */
3845 		error = del_entry(chain, 0); /* special case, rule=0, cmd=0 means all */
3846 		break;
3847 
3848 	case IP_FW_ADD:
3849 		rule = malloc(RULE_MAXSIZE, M_TEMP, M_WAITOK);
3850 		error = sooptcopyin(sopt, rule, RULE_MAXSIZE,
3851 			sizeof(struct ip_fw7) );
3852 
3853 		memset(&ci, 0, sizeof(struct rule_check_info));
3854 
3855 		/*
3856 		 * If the size of commands equals RULESIZE7 then we assume
3857 		 * a FreeBSD7.2 binary is talking to us (set is7=1).
3858 		 * is7 is persistent so the next 'ipfw list' command
3859 		 * will use this format.
3860 		 * NOTE: If wrong version is guessed (this can happen if
3861 		 *       the first ipfw command is 'ipfw [pipe] list')
3862 		 *       the ipfw binary may crash or loop infinitly...
3863 		 */
3864 		size = sopt->sopt_valsize;
3865 		if (size == RULESIZE7(rule)) {
3866 		    is7 = 1;
3867 		    error = convert_rule_to_8(rule);
3868 		    if (error) {
3869 			free(rule, M_TEMP);
3870 			return error;
3871 		    }
3872 		    size = RULESIZE(rule);
3873 		} else
3874 		    is7 = 0;
3875 		if (error == 0)
3876 			error = check_ipfw_rule0(rule, size, &ci);
3877 		if (error == 0) {
3878 			/* locking is done within add_rule() */
3879 			struct ip_fw *krule;
3880 			krule = ipfw_alloc_rule(chain, RULEKSIZE0(rule));
3881 			ci.urule = (caddr_t)rule;
3882 			ci.krule = krule;
3883 			import_rule0(&ci);
3884 			error = commit_rules(chain, &ci, 1);
3885 			if (error != 0)
3886 				ipfw_free_rule(ci.krule);
3887 			else if (sopt->sopt_dir == SOPT_GET) {
3888 				if (is7) {
3889 					error = convert_rule_to_7(rule);
3890 					size = RULESIZE7(rule);
3891 					if (error) {
3892 						free(rule, M_TEMP);
3893 						return error;
3894 					}
3895 				}
3896 				error = sooptcopyout(sopt, rule, size);
3897 			}
3898 		}
3899 		free(rule, M_TEMP);
3900 		break;
3901 
3902 	case IP_FW_DEL:
3903 		/*
3904 		 * IP_FW_DEL is used for deleting single rules or sets,
3905 		 * and (ab)used to atomically manipulate sets. Argument size
3906 		 * is used to distinguish between the two:
3907 		 *    sizeof(u_int32_t)
3908 		 *	delete single rule or set of rules,
3909 		 *	or reassign rules (or sets) to a different set.
3910 		 *    2*sizeof(u_int32_t)
3911 		 *	atomic disable/enable sets.
3912 		 *	first u_int32_t contains sets to be disabled,
3913 		 *	second u_int32_t contains sets to be enabled.
3914 		 */
3915 		error = sooptcopyin(sopt, rulenum,
3916 			2*sizeof(u_int32_t), sizeof(u_int32_t));
3917 		if (error)
3918 			break;
3919 		size = sopt->sopt_valsize;
3920 		if (size == sizeof(u_int32_t) && rulenum[0] != 0) {
3921 			/* delete or reassign, locking done in del_entry() */
3922 			error = del_entry(chain, rulenum[0]);
3923 		} else if (size == 2*sizeof(u_int32_t)) { /* set enable/disable */
3924 			IPFW_UH_WLOCK(chain);
3925 			V_set_disable =
3926 			    (V_set_disable | rulenum[0]) & ~rulenum[1] &
3927 			    ~(1<<RESVD_SET); /* set RESVD_SET always enabled */
3928 			IPFW_UH_WUNLOCK(chain);
3929 		} else
3930 			error = EINVAL;
3931 		break;
3932 
3933 	case IP_FW_ZERO:
3934 	case IP_FW_RESETLOG: /* argument is an u_int_32, the rule number */
3935 		rulenum[0] = 0;
3936 		if (sopt->sopt_val != 0) {
3937 		    error = sooptcopyin(sopt, rulenum,
3938 			    sizeof(u_int32_t), sizeof(u_int32_t));
3939 		    if (error)
3940 			break;
3941 		}
3942 		error = zero_entry(chain, rulenum[0],
3943 			sopt->sopt_name == IP_FW_RESETLOG);
3944 		break;
3945 
3946 	/*--- TABLE opcodes ---*/
3947 	case IP_FW_TABLE_ADD:
3948 	case IP_FW_TABLE_DEL:
3949 		{
3950 			ipfw_table_entry ent;
3951 			struct tentry_info tei;
3952 			struct tid_info ti;
3953 			struct table_value v;
3954 
3955 			error = sooptcopyin(sopt, &ent,
3956 			    sizeof(ent), sizeof(ent));
3957 			if (error)
3958 				break;
3959 
3960 			memset(&tei, 0, sizeof(tei));
3961 			tei.paddr = &ent.addr;
3962 			tei.subtype = AF_INET;
3963 			tei.masklen = ent.masklen;
3964 			ipfw_import_table_value_legacy(ent.value, &v);
3965 			tei.pvalue = &v;
3966 			memset(&ti, 0, sizeof(ti));
3967 			ti.uidx = ent.tbl;
3968 			ti.type = IPFW_TABLE_CIDR;
3969 
3970 			error = (opt == IP_FW_TABLE_ADD) ?
3971 			    add_table_entry(chain, &ti, &tei, 0, 1) :
3972 			    del_table_entry(chain, &ti, &tei, 0, 1);
3973 		}
3974 		break;
3975 
3976 
3977 	case IP_FW_TABLE_FLUSH:
3978 		{
3979 			u_int16_t tbl;
3980 			struct tid_info ti;
3981 
3982 			error = sooptcopyin(sopt, &tbl,
3983 			    sizeof(tbl), sizeof(tbl));
3984 			if (error)
3985 				break;
3986 			memset(&ti, 0, sizeof(ti));
3987 			ti.uidx = tbl;
3988 			error = flush_table(chain, &ti);
3989 		}
3990 		break;
3991 
3992 	case IP_FW_TABLE_GETSIZE:
3993 		{
3994 			u_int32_t tbl, cnt;
3995 			struct tid_info ti;
3996 
3997 			if ((error = sooptcopyin(sopt, &tbl, sizeof(tbl),
3998 			    sizeof(tbl))))
3999 				break;
4000 			memset(&ti, 0, sizeof(ti));
4001 			ti.uidx = tbl;
4002 			IPFW_RLOCK(chain);
4003 			error = ipfw_count_table(chain, &ti, &cnt);
4004 			IPFW_RUNLOCK(chain);
4005 			if (error)
4006 				break;
4007 			error = sooptcopyout(sopt, &cnt, sizeof(cnt));
4008 		}
4009 		break;
4010 
4011 	case IP_FW_TABLE_LIST:
4012 		{
4013 			ipfw_table *tbl;
4014 			struct tid_info ti;
4015 
4016 			if (sopt->sopt_valsize < sizeof(*tbl)) {
4017 				error = EINVAL;
4018 				break;
4019 			}
4020 			size = sopt->sopt_valsize;
4021 			tbl = malloc(size, M_TEMP, M_WAITOK);
4022 			error = sooptcopyin(sopt, tbl, size, sizeof(*tbl));
4023 			if (error) {
4024 				free(tbl, M_TEMP);
4025 				break;
4026 			}
4027 			tbl->size = (size - sizeof(*tbl)) /
4028 			    sizeof(ipfw_table_entry);
4029 			memset(&ti, 0, sizeof(ti));
4030 			ti.uidx = tbl->tbl;
4031 			IPFW_RLOCK(chain);
4032 			error = ipfw_dump_table_legacy(chain, &ti, tbl);
4033 			IPFW_RUNLOCK(chain);
4034 			if (error) {
4035 				free(tbl, M_TEMP);
4036 				break;
4037 			}
4038 			error = sooptcopyout(sopt, tbl, size);
4039 			free(tbl, M_TEMP);
4040 		}
4041 		break;
4042 
4043 	/*--- NAT operations are protected by the IPFW_LOCK ---*/
4044 	case IP_FW_NAT_CFG:
4045 		if (IPFW_NAT_LOADED)
4046 			error = ipfw_nat_cfg_ptr(sopt);
4047 		else {
4048 			printf("IP_FW_NAT_CFG: %s\n",
4049 			    "ipfw_nat not present, please load it");
4050 			error = EINVAL;
4051 		}
4052 		break;
4053 
4054 	case IP_FW_NAT_DEL:
4055 		if (IPFW_NAT_LOADED)
4056 			error = ipfw_nat_del_ptr(sopt);
4057 		else {
4058 			printf("IP_FW_NAT_DEL: %s\n",
4059 			    "ipfw_nat not present, please load it");
4060 			error = EINVAL;
4061 		}
4062 		break;
4063 
4064 	case IP_FW_NAT_GET_CONFIG:
4065 		if (IPFW_NAT_LOADED)
4066 			error = ipfw_nat_get_cfg_ptr(sopt);
4067 		else {
4068 			printf("IP_FW_NAT_GET_CFG: %s\n",
4069 			    "ipfw_nat not present, please load it");
4070 			error = EINVAL;
4071 		}
4072 		break;
4073 
4074 	case IP_FW_NAT_GET_LOG:
4075 		if (IPFW_NAT_LOADED)
4076 			error = ipfw_nat_get_log_ptr(sopt);
4077 		else {
4078 			printf("IP_FW_NAT_GET_LOG: %s\n",
4079 			    "ipfw_nat not present, please load it");
4080 			error = EINVAL;
4081 		}
4082 		break;
4083 
4084 	default:
4085 		printf("ipfw: ipfw_ctl invalid option %d\n", sopt->sopt_name);
4086 		error = EINVAL;
4087 	}
4088 
4089 	return (error);
4090 #undef RULE_MAXSIZE
4091 }
4092 #define	RULE_MAXSIZE	(256*sizeof(u_int32_t))
4093 
4094 /* Functions to convert rules 7.2 <==> 8.0 */
4095 static int
4096 convert_rule_to_7(struct ip_fw_rule0 *rule)
4097 {
4098 	/* Used to modify original rule */
4099 	struct ip_fw7 *rule7 = (struct ip_fw7 *)rule;
4100 	/* copy of original rule, version 8 */
4101 	struct ip_fw_rule0 *tmp;
4102 
4103 	/* Used to copy commands */
4104 	ipfw_insn *ccmd, *dst;
4105 	int ll = 0, ccmdlen = 0;
4106 
4107 	tmp = malloc(RULE_MAXSIZE, M_TEMP, M_NOWAIT | M_ZERO);
4108 	if (tmp == NULL) {
4109 		return 1; //XXX error
4110 	}
4111 	bcopy(rule, tmp, RULE_MAXSIZE);
4112 
4113 	/* Copy fields */
4114 	//rule7->_pad = tmp->_pad;
4115 	rule7->set = tmp->set;
4116 	rule7->rulenum = tmp->rulenum;
4117 	rule7->cmd_len = tmp->cmd_len;
4118 	rule7->act_ofs = tmp->act_ofs;
4119 	rule7->next_rule = (struct ip_fw7 *)tmp->next_rule;
4120 	rule7->cmd_len = tmp->cmd_len;
4121 	rule7->pcnt = tmp->pcnt;
4122 	rule7->bcnt = tmp->bcnt;
4123 	rule7->timestamp = tmp->timestamp;
4124 
4125 	/* Copy commands */
4126 	for (ll = tmp->cmd_len, ccmd = tmp->cmd, dst = rule7->cmd ;
4127 			ll > 0 ; ll -= ccmdlen, ccmd += ccmdlen, dst += ccmdlen) {
4128 		ccmdlen = F_LEN(ccmd);
4129 
4130 		bcopy(ccmd, dst, F_LEN(ccmd)*sizeof(uint32_t));
4131 
4132 		if (dst->opcode > O_NAT)
4133 			/* O_REASS doesn't exists in 7.2 version, so
4134 			 * decrement opcode if it is after O_REASS
4135 			 */
4136 			dst->opcode--;
4137 
4138 		if (ccmdlen > ll) {
4139 			printf("ipfw: opcode %d size truncated\n",
4140 				ccmd->opcode);
4141 			return EINVAL;
4142 		}
4143 	}
4144 	free(tmp, M_TEMP);
4145 
4146 	return 0;
4147 }
4148 
4149 static int
4150 convert_rule_to_8(struct ip_fw_rule0 *rule)
4151 {
4152 	/* Used to modify original rule */
4153 	struct ip_fw7 *rule7 = (struct ip_fw7 *) rule;
4154 
4155 	/* Used to copy commands */
4156 	ipfw_insn *ccmd, *dst;
4157 	int ll = 0, ccmdlen = 0;
4158 
4159 	/* Copy of original rule */
4160 	struct ip_fw7 *tmp = malloc(RULE_MAXSIZE, M_TEMP, M_NOWAIT | M_ZERO);
4161 	if (tmp == NULL) {
4162 		return 1; //XXX error
4163 	}
4164 
4165 	bcopy(rule7, tmp, RULE_MAXSIZE);
4166 
4167 	for (ll = tmp->cmd_len, ccmd = tmp->cmd, dst = rule->cmd ;
4168 			ll > 0 ; ll -= ccmdlen, ccmd += ccmdlen, dst += ccmdlen) {
4169 		ccmdlen = F_LEN(ccmd);
4170 
4171 		bcopy(ccmd, dst, F_LEN(ccmd)*sizeof(uint32_t));
4172 
4173 		if (dst->opcode > O_NAT)
4174 			/* O_REASS doesn't exists in 7.2 version, so
4175 			 * increment opcode if it is after O_REASS
4176 			 */
4177 			dst->opcode++;
4178 
4179 		if (ccmdlen > ll) {
4180 			printf("ipfw: opcode %d size truncated\n",
4181 			    ccmd->opcode);
4182 			return EINVAL;
4183 		}
4184 	}
4185 
4186 	rule->_pad = tmp->_pad;
4187 	rule->set = tmp->set;
4188 	rule->rulenum = tmp->rulenum;
4189 	rule->cmd_len = tmp->cmd_len;
4190 	rule->act_ofs = tmp->act_ofs;
4191 	rule->next_rule = (struct ip_fw *)tmp->next_rule;
4192 	rule->cmd_len = tmp->cmd_len;
4193 	rule->id = 0; /* XXX see if is ok = 0 */
4194 	rule->pcnt = tmp->pcnt;
4195 	rule->bcnt = tmp->bcnt;
4196 	rule->timestamp = tmp->timestamp;
4197 
4198 	free (tmp, M_TEMP);
4199 	return 0;
4200 }
4201 
4202 /*
4203  * Named object api
4204  *
4205  */
4206 
4207 void
4208 ipfw_init_srv(struct ip_fw_chain *ch)
4209 {
4210 
4211 	ch->srvmap = ipfw_objhash_create(IPFW_OBJECTS_DEFAULT);
4212 	ch->srvstate = malloc(sizeof(void *) * IPFW_OBJECTS_DEFAULT,
4213 	    M_IPFW, M_WAITOK | M_ZERO);
4214 }
4215 
4216 void
4217 ipfw_destroy_srv(struct ip_fw_chain *ch)
4218 {
4219 
4220 	free(ch->srvstate, M_IPFW);
4221 	ipfw_objhash_destroy(ch->srvmap);
4222 }
4223 
4224 /*
4225  * Allocate new bitmask which can be used to enlarge/shrink
4226  * named instance index.
4227  */
4228 void
4229 ipfw_objhash_bitmap_alloc(uint32_t items, void **idx, int *pblocks)
4230 {
4231 	size_t size;
4232 	int max_blocks;
4233 	u_long *idx_mask;
4234 
4235 	KASSERT((items % BLOCK_ITEMS) == 0,
4236 	   ("bitmask size needs to power of 2 and greater or equal to %zu",
4237 	    BLOCK_ITEMS));
4238 
4239 	max_blocks = items / BLOCK_ITEMS;
4240 	size = items / 8;
4241 	idx_mask = malloc(size * IPFW_MAX_SETS, M_IPFW, M_WAITOK);
4242 	/* Mark all as free */
4243 	memset(idx_mask, 0xFF, size * IPFW_MAX_SETS);
4244 	*idx_mask &= ~(u_long)1; /* Skip index 0 */
4245 
4246 	*idx = idx_mask;
4247 	*pblocks = max_blocks;
4248 }
4249 
4250 /*
4251  * Copy current bitmask index to new one.
4252  */
4253 void
4254 ipfw_objhash_bitmap_merge(struct namedobj_instance *ni, void **idx, int *blocks)
4255 {
4256 	int old_blocks, new_blocks;
4257 	u_long *old_idx, *new_idx;
4258 	int i;
4259 
4260 	old_idx = ni->idx_mask;
4261 	old_blocks = ni->max_blocks;
4262 	new_idx = *idx;
4263 	new_blocks = *blocks;
4264 
4265 	for (i = 0; i < IPFW_MAX_SETS; i++) {
4266 		memcpy(&new_idx[new_blocks * i], &old_idx[old_blocks * i],
4267 		    old_blocks * sizeof(u_long));
4268 	}
4269 }
4270 
4271 /*
4272  * Swaps current @ni index with new one.
4273  */
4274 void
4275 ipfw_objhash_bitmap_swap(struct namedobj_instance *ni, void **idx, int *blocks)
4276 {
4277 	int old_blocks;
4278 	u_long *old_idx;
4279 
4280 	old_idx = ni->idx_mask;
4281 	old_blocks = ni->max_blocks;
4282 
4283 	ni->idx_mask = *idx;
4284 	ni->max_blocks = *blocks;
4285 
4286 	/* Save old values */
4287 	*idx = old_idx;
4288 	*blocks = old_blocks;
4289 }
4290 
4291 void
4292 ipfw_objhash_bitmap_free(void *idx, int blocks)
4293 {
4294 
4295 	free(idx, M_IPFW);
4296 }
4297 
4298 /*
4299  * Creates named hash instance.
4300  * Must be called without holding any locks.
4301  * Return pointer to new instance.
4302  */
4303 struct namedobj_instance *
4304 ipfw_objhash_create(uint32_t items)
4305 {
4306 	struct namedobj_instance *ni;
4307 	int i;
4308 	size_t size;
4309 
4310 	size = sizeof(struct namedobj_instance) +
4311 	    sizeof(struct namedobjects_head) * NAMEDOBJ_HASH_SIZE +
4312 	    sizeof(struct namedobjects_head) * NAMEDOBJ_HASH_SIZE;
4313 
4314 	ni = malloc(size, M_IPFW, M_WAITOK | M_ZERO);
4315 	ni->nn_size = NAMEDOBJ_HASH_SIZE;
4316 	ni->nv_size = NAMEDOBJ_HASH_SIZE;
4317 
4318 	ni->names = (struct namedobjects_head *)(ni +1);
4319 	ni->values = &ni->names[ni->nn_size];
4320 
4321 	for (i = 0; i < ni->nn_size; i++)
4322 		TAILQ_INIT(&ni->names[i]);
4323 
4324 	for (i = 0; i < ni->nv_size; i++)
4325 		TAILQ_INIT(&ni->values[i]);
4326 
4327 	/* Set default hashing/comparison functions */
4328 	ni->hash_f = objhash_hash_name;
4329 	ni->cmp_f = objhash_cmp_name;
4330 
4331 	/* Allocate bitmask separately due to possible resize */
4332 	ipfw_objhash_bitmap_alloc(items, (void*)&ni->idx_mask, &ni->max_blocks);
4333 
4334 	return (ni);
4335 }
4336 
4337 void
4338 ipfw_objhash_destroy(struct namedobj_instance *ni)
4339 {
4340 
4341 	free(ni->idx_mask, M_IPFW);
4342 	free(ni, M_IPFW);
4343 }
4344 
4345 void
4346 ipfw_objhash_set_funcs(struct namedobj_instance *ni, objhash_hash_f *hash_f,
4347     objhash_cmp_f *cmp_f)
4348 {
4349 
4350 	ni->hash_f = hash_f;
4351 	ni->cmp_f = cmp_f;
4352 }
4353 
4354 static uint32_t
4355 objhash_hash_name(struct namedobj_instance *ni, const void *name, uint32_t set)
4356 {
4357 
4358 	return (fnv_32_str((const char *)name, FNV1_32_INIT));
4359 }
4360 
4361 static int
4362 objhash_cmp_name(struct named_object *no, const void *name, uint32_t set)
4363 {
4364 
4365 	if ((strcmp(no->name, (const char *)name) == 0) && (no->set == set))
4366 		return (0);
4367 
4368 	return (1);
4369 }
4370 
4371 static uint32_t
4372 objhash_hash_idx(struct namedobj_instance *ni, uint32_t val)
4373 {
4374 	uint32_t v;
4375 
4376 	v = val % (ni->nv_size - 1);
4377 
4378 	return (v);
4379 }
4380 
4381 struct named_object *
4382 ipfw_objhash_lookup_name(struct namedobj_instance *ni, uint32_t set, char *name)
4383 {
4384 	struct named_object *no;
4385 	uint32_t hash;
4386 
4387 	hash = ni->hash_f(ni, name, set) % ni->nn_size;
4388 
4389 	TAILQ_FOREACH(no, &ni->names[hash], nn_next) {
4390 		if (ni->cmp_f(no, name, set) == 0)
4391 			return (no);
4392 	}
4393 
4394 	return (NULL);
4395 }
4396 
4397 /*
4398  * Find named object by @uid.
4399  * Check @tlvs for valid data inside.
4400  *
4401  * Returns pointer to found TLV or NULL.
4402  */
4403 ipfw_obj_ntlv *
4404 ipfw_find_name_tlv_type(void *tlvs, int len, uint16_t uidx, uint32_t etlv)
4405 {
4406 	ipfw_obj_ntlv *ntlv;
4407 	uintptr_t pa, pe;
4408 	int l;
4409 
4410 	pa = (uintptr_t)tlvs;
4411 	pe = pa + len;
4412 	l = 0;
4413 	for (; pa < pe; pa += l) {
4414 		ntlv = (ipfw_obj_ntlv *)pa;
4415 		l = ntlv->head.length;
4416 
4417 		if (l != sizeof(*ntlv))
4418 			return (NULL);
4419 
4420 		if (ntlv->idx != uidx)
4421 			continue;
4422 		/*
4423 		 * When userland has specified zero TLV type, do
4424 		 * not compare it with eltv. In some cases userland
4425 		 * doesn't know what type should it have. Use only
4426 		 * uidx and name for search named_object.
4427 		 */
4428 		if (ntlv->head.type != 0 &&
4429 		    ntlv->head.type != (uint16_t)etlv)
4430 			continue;
4431 
4432 		if (ipfw_check_object_name_generic(ntlv->name) != 0)
4433 			return (NULL);
4434 
4435 		return (ntlv);
4436 	}
4437 
4438 	return (NULL);
4439 }
4440 
4441 /*
4442  * Finds object config based on either legacy index
4443  * or name in ntlv.
4444  * Note @ti structure contains unchecked data from userland.
4445  *
4446  * Returns 0 in success and fills in @pno with found config
4447  */
4448 int
4449 ipfw_objhash_find_type(struct namedobj_instance *ni, struct tid_info *ti,
4450     uint32_t etlv, struct named_object **pno)
4451 {
4452 	char *name;
4453 	ipfw_obj_ntlv *ntlv;
4454 	uint32_t set;
4455 
4456 	if (ti->tlvs == NULL)
4457 		return (EINVAL);
4458 
4459 	ntlv = ipfw_find_name_tlv_type(ti->tlvs, ti->tlen, ti->uidx, etlv);
4460 	if (ntlv == NULL)
4461 		return (EINVAL);
4462 	name = ntlv->name;
4463 
4464 	/*
4465 	 * Use set provided by @ti instead of @ntlv one.
4466 	 * This is needed due to different sets behavior
4467 	 * controlled by V_fw_tables_sets.
4468 	 */
4469 	set = ti->set;
4470 	*pno = ipfw_objhash_lookup_name(ni, set, name);
4471 	if (*pno == NULL)
4472 		return (ESRCH);
4473 	return (0);
4474 }
4475 
4476 /*
4477  * Find named object by name, considering also its TLV type.
4478  */
4479 struct named_object *
4480 ipfw_objhash_lookup_name_type(struct namedobj_instance *ni, uint32_t set,
4481     uint32_t type, const char *name)
4482 {
4483 	struct named_object *no;
4484 	uint32_t hash;
4485 
4486 	hash = ni->hash_f(ni, name, set) % ni->nn_size;
4487 
4488 	TAILQ_FOREACH(no, &ni->names[hash], nn_next) {
4489 		if (ni->cmp_f(no, name, set) == 0 &&
4490 		    no->etlv == (uint16_t)type)
4491 			return (no);
4492 	}
4493 
4494 	return (NULL);
4495 }
4496 
4497 struct named_object *
4498 ipfw_objhash_lookup_kidx(struct namedobj_instance *ni, uint16_t kidx)
4499 {
4500 	struct named_object *no;
4501 	uint32_t hash;
4502 
4503 	hash = objhash_hash_idx(ni, kidx);
4504 
4505 	TAILQ_FOREACH(no, &ni->values[hash], nv_next) {
4506 		if (no->kidx == kidx)
4507 			return (no);
4508 	}
4509 
4510 	return (NULL);
4511 }
4512 
4513 int
4514 ipfw_objhash_same_name(struct namedobj_instance *ni, struct named_object *a,
4515     struct named_object *b)
4516 {
4517 
4518 	if ((strcmp(a->name, b->name) == 0) && a->set == b->set)
4519 		return (1);
4520 
4521 	return (0);
4522 }
4523 
4524 void
4525 ipfw_objhash_add(struct namedobj_instance *ni, struct named_object *no)
4526 {
4527 	uint32_t hash;
4528 
4529 	hash = ni->hash_f(ni, no->name, no->set) % ni->nn_size;
4530 	TAILQ_INSERT_HEAD(&ni->names[hash], no, nn_next);
4531 
4532 	hash = objhash_hash_idx(ni, no->kidx);
4533 	TAILQ_INSERT_HEAD(&ni->values[hash], no, nv_next);
4534 
4535 	ni->count++;
4536 }
4537 
4538 void
4539 ipfw_objhash_del(struct namedobj_instance *ni, struct named_object *no)
4540 {
4541 	uint32_t hash;
4542 
4543 	hash = ni->hash_f(ni, no->name, no->set) % ni->nn_size;
4544 	TAILQ_REMOVE(&ni->names[hash], no, nn_next);
4545 
4546 	hash = objhash_hash_idx(ni, no->kidx);
4547 	TAILQ_REMOVE(&ni->values[hash], no, nv_next);
4548 
4549 	ni->count--;
4550 }
4551 
4552 uint32_t
4553 ipfw_objhash_count(struct namedobj_instance *ni)
4554 {
4555 
4556 	return (ni->count);
4557 }
4558 
4559 uint32_t
4560 ipfw_objhash_count_type(struct namedobj_instance *ni, uint16_t type)
4561 {
4562 	struct named_object *no;
4563 	uint32_t count;
4564 	int i;
4565 
4566 	count = 0;
4567 	for (i = 0; i < ni->nn_size; i++) {
4568 		TAILQ_FOREACH(no, &ni->names[i], nn_next) {
4569 			if (no->etlv == type)
4570 				count++;
4571 		}
4572 	}
4573 	return (count);
4574 }
4575 
4576 /*
4577  * Runs @func for each found named object.
4578  * It is safe to delete objects from callback
4579  */
4580 int
4581 ipfw_objhash_foreach(struct namedobj_instance *ni, objhash_cb_t *f, void *arg)
4582 {
4583 	struct named_object *no, *no_tmp;
4584 	int i, ret;
4585 
4586 	for (i = 0; i < ni->nn_size; i++) {
4587 		TAILQ_FOREACH_SAFE(no, &ni->names[i], nn_next, no_tmp) {
4588 			ret = f(ni, no, arg);
4589 			if (ret != 0)
4590 				return (ret);
4591 		}
4592 	}
4593 	return (0);
4594 }
4595 
4596 /*
4597  * Runs @f for each found named object with type @type.
4598  * It is safe to delete objects from callback
4599  */
4600 int
4601 ipfw_objhash_foreach_type(struct namedobj_instance *ni, objhash_cb_t *f,
4602     void *arg, uint16_t type)
4603 {
4604 	struct named_object *no, *no_tmp;
4605 	int i, ret;
4606 
4607 	for (i = 0; i < ni->nn_size; i++) {
4608 		TAILQ_FOREACH_SAFE(no, &ni->names[i], nn_next, no_tmp) {
4609 			if (no->etlv != type)
4610 				continue;
4611 			ret = f(ni, no, arg);
4612 			if (ret != 0)
4613 				return (ret);
4614 		}
4615 	}
4616 	return (0);
4617 }
4618 
4619 /*
4620  * Removes index from given set.
4621  * Returns 0 on success.
4622  */
4623 int
4624 ipfw_objhash_free_idx(struct namedobj_instance *ni, uint16_t idx)
4625 {
4626 	u_long *mask;
4627 	int i, v;
4628 
4629 	i = idx / BLOCK_ITEMS;
4630 	v = idx % BLOCK_ITEMS;
4631 
4632 	if (i >= ni->max_blocks)
4633 		return (1);
4634 
4635 	mask = &ni->idx_mask[i];
4636 
4637 	if ((*mask & ((u_long)1 << v)) != 0)
4638 		return (1);
4639 
4640 	/* Mark as free */
4641 	*mask |= (u_long)1 << v;
4642 
4643 	/* Update free offset */
4644 	if (ni->free_off[0] > i)
4645 		ni->free_off[0] = i;
4646 
4647 	return (0);
4648 }
4649 
4650 /*
4651  * Allocate new index in given instance and stores in in @pidx.
4652  * Returns 0 on success.
4653  */
4654 int
4655 ipfw_objhash_alloc_idx(void *n, uint16_t *pidx)
4656 {
4657 	struct namedobj_instance *ni;
4658 	u_long *mask;
4659 	int i, off, v;
4660 
4661 	ni = (struct namedobj_instance *)n;
4662 
4663 	off = ni->free_off[0];
4664 	mask = &ni->idx_mask[off];
4665 
4666 	for (i = off; i < ni->max_blocks; i++, mask++) {
4667 		if ((v = ffsl(*mask)) == 0)
4668 			continue;
4669 
4670 		/* Mark as busy */
4671 		*mask &= ~ ((u_long)1 << (v - 1));
4672 
4673 		ni->free_off[0] = i;
4674 
4675 		v = BLOCK_ITEMS * i + v - 1;
4676 
4677 		*pidx = v;
4678 		return (0);
4679 	}
4680 
4681 	return (1);
4682 }
4683 
4684 /* end of file */
4685