xref: /freebsd/sys/netpfil/ipfw/ip_fw_sockopt.c (revision b405250c77e6841a8159a4081d4e0f61e49dfbf8)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2002-2009 Luigi Rizzo, Universita` di Pisa
5  * Copyright (c) 2014-2025 Yandex LLC
6  * Copyright (c) 2014 Alexander V. Chernikov
7  *
8  * Supported by: Valeria Paoli
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  */
31 
32 #include <sys/cdefs.h>
33 /*
34  * Control socket and rule management routines for ipfw.
35  * Control is currently implemented via IP_FW3 setsockopt() code.
36  */
37 
38 #include "opt_ipfw.h"
39 #include "opt_inet.h"
40 #ifndef INET
41 #error IPFIREWALL requires INET.
42 #endif /* INET */
43 #include "opt_inet6.h"
44 
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/malloc.h>
48 #include <sys/mbuf.h>	/* struct m_tag used by nested headers */
49 #include <sys/kernel.h>
50 #include <sys/lock.h>
51 #include <sys/priv.h>
52 #include <sys/proc.h>
53 #include <sys/rwlock.h>
54 #include <sys/rmlock.h>
55 #include <sys/socket.h>
56 #include <sys/socketvar.h>
57 #include <sys/sysctl.h>
58 #include <sys/syslog.h>
59 #include <sys/fnv_hash.h>
60 #include <net/if.h>
61 #include <net/route.h>
62 #include <net/vnet.h>
63 #include <vm/vm.h>
64 #include <vm/vm_extern.h>
65 
66 #include <netinet/in.h>
67 #include <netinet/ip_var.h> /* hooks */
68 #include <netinet/ip_fw.h>
69 
70 #include <netpfil/ipfw/ip_fw_private.h>
71 #include <netpfil/ipfw/ip_fw_table.h>
72 
73 #ifdef MAC
74 #include <security/mac/mac_framework.h>
75 #endif
76 
77 static enum ipfw_opcheck_result
check_opcode_compat_nop(ipfw_insn ** pcmd,int * plen,struct rule_check_info * ci)78 check_opcode_compat_nop(ipfw_insn **pcmd, int *plen,
79     struct rule_check_info *ci)
80 {
81 	/* Compatibility code is not registered */
82 	return (FAILED);
83 }
84 
85 static ipfw_check_opcode_t check_opcode_f = check_opcode_compat_nop;
86 
87 static int check_ipfw_rule_body(ipfw_insn *cmd, int cmd_len,
88     struct rule_check_info *ci);
89 static int rewrite_rule_uidx(struct ip_fw_chain *chain,
90     struct rule_check_info *ci);
91 
92 struct namedobj_instance {
93 	struct namedobjects_head	*names;
94 	struct namedobjects_head	*values;
95 	uint32_t nn_size;		/* names hash size */
96 	uint32_t nv_size;		/* number hash size */
97 	u_long *idx_mask;		/* used items bitmask */
98 	uint32_t max_blocks;		/* number of "long" blocks in bitmask */
99 	uint32_t count;			/* number of items */
100 	uint16_t free_off[IPFW_MAX_SETS];	/* first possible free offset */
101 	objhash_hash_f	*hash_f;
102 	objhash_cmp_f	*cmp_f;
103 };
104 #define	BLOCK_ITEMS	(8 * sizeof(u_long))	/* Number of items for ffsl() */
105 
106 static uint32_t objhash_hash_name(struct namedobj_instance *ni,
107     const void *key, uint32_t kopt);
108 static uint32_t objhash_hash_idx(struct namedobj_instance *ni, uint32_t val);
109 static int objhash_cmp_name(struct named_object *no, const void *name,
110     uint32_t set);
111 
112 MALLOC_DEFINE(M_IPFW, "IpFw/IpAcct", "IpFw/IpAcct chain's");
113 
114 /* ctl3 handler data */
115 static struct mtx ctl3_lock;
116 #define	CTL3_LOCK_INIT()	mtx_init(&ctl3_lock, "ctl3_lock", NULL, MTX_DEF)
117 #define	CTL3_LOCK_DESTROY()	mtx_destroy(&ctl3_lock)
118 #define	CTL3_LOCK()		mtx_lock(&ctl3_lock)
119 #define	CTL3_UNLOCK()		mtx_unlock(&ctl3_lock)
120 
121 static struct ipfw_sopt_handler *ctl3_handlers;
122 static size_t ctl3_hsize;
123 static uint64_t ctl3_refct, ctl3_gencnt;
124 #define	CTL3_SMALLBUF	4096			/* small page-size write buffer */
125 #define	CTL3_LARGEBUF	(16 * 1024 * 1024)	/* handle large rulesets */
126 
127 static int ipfw_flush_sopt_data(struct sockopt_data *sd);
128 
129 static sopt_handler_f dump_config, add_rules, del_rules, clear_rules,
130     move_rules, manage_sets, dump_soptcodes, dump_srvobjects,
131     manage_skiptocache;
132 
133 static struct ipfw_sopt_handler scodes[] = {
134     { IP_FW_XGET,		IP_FW3_OPVER, HDIR_GET, dump_config },
135     { IP_FW_XADD,		IP_FW3_OPVER, HDIR_BOTH, add_rules },
136     { IP_FW_XDEL,		IP_FW3_OPVER, HDIR_BOTH, del_rules },
137     { IP_FW_XZERO,		IP_FW3_OPVER, HDIR_SET, clear_rules },
138     { IP_FW_XRESETLOG,		IP_FW3_OPVER, HDIR_SET, clear_rules },
139     { IP_FW_XMOVE,		IP_FW3_OPVER, HDIR_SET, move_rules },
140     { IP_FW_SET_SWAP,		IP_FW3_OPVER, HDIR_SET, manage_sets },
141     { IP_FW_SET_MOVE,		IP_FW3_OPVER, HDIR_SET, manage_sets },
142     { IP_FW_SET_ENABLE,		IP_FW3_OPVER, HDIR_SET, manage_sets },
143     { IP_FW_DUMP_SOPTCODES,	IP_FW3_OPVER, HDIR_GET, dump_soptcodes },
144     { IP_FW_DUMP_SRVOBJECTS,	IP_FW3_OPVER, HDIR_GET, dump_srvobjects },
145     { IP_FW_SKIPTO_CACHE,	IP_FW3_OPVER, HDIR_BOTH, manage_skiptocache },
146 };
147 
148 static struct opcode_obj_rewrite *find_op_rw(ipfw_insn *cmd,
149     uint32_t *puidx, uint8_t *ptype);
150 static int ref_rule_objects(struct ip_fw_chain *ch, struct ip_fw *rule,
151     struct rule_check_info *ci, struct obj_idx *oib, struct tid_info *ti);
152 static int ref_opcode_object(struct ip_fw_chain *ch, ipfw_insn *cmd,
153     struct tid_info *ti, struct obj_idx *pidx, int *unresolved);
154 static void unref_rule_objects(struct ip_fw_chain *chain, struct ip_fw *rule);
155 static void unref_oib_objects(struct ip_fw_chain *ch, ipfw_insn *cmd,
156     struct obj_idx *oib, struct obj_idx *end);
157 static int export_objhash_ntlv(struct namedobj_instance *ni, uint32_t kidx,
158     struct sockopt_data *sd);
159 
160 /*
161  * Opcode object rewriter variables
162  */
163 struct opcode_obj_rewrite *ctl3_rewriters;
164 static size_t ctl3_rsize;
165 
166 /*
167  * static variables followed by global ones
168  */
169 
170 VNET_DEFINE_STATIC(uma_zone_t, ipfw_cntr_zone);
171 #define	V_ipfw_cntr_zone		VNET(ipfw_cntr_zone)
172 
173 void
ipfw_init_counters(void)174 ipfw_init_counters(void)
175 {
176 
177 	V_ipfw_cntr_zone = uma_zcreate("IPFW counters",
178 	    IPFW_RULE_CNTR_SIZE, NULL, NULL, NULL, NULL,
179 	    UMA_ALIGN_PTR, UMA_ZONE_PCPU);
180 }
181 
182 void
ipfw_destroy_counters(void)183 ipfw_destroy_counters(void)
184 {
185 
186 	uma_zdestroy(V_ipfw_cntr_zone);
187 }
188 
189 struct ip_fw *
ipfw_alloc_rule(struct ip_fw_chain * chain,size_t rulesize)190 ipfw_alloc_rule(struct ip_fw_chain *chain, size_t rulesize)
191 {
192 	struct ip_fw *rule;
193 
194 	rule = malloc(rulesize, M_IPFW, M_WAITOK | M_ZERO);
195 	rule->cntr = uma_zalloc_pcpu(V_ipfw_cntr_zone, M_WAITOK | M_ZERO);
196 	rule->refcnt = 1;
197 
198 	return (rule);
199 }
200 
201 void
ipfw_free_rule(struct ip_fw * rule)202 ipfw_free_rule(struct ip_fw *rule)
203 {
204 
205 	/*
206 	 * We don't release refcnt here, since this function
207 	 * can be called without any locks held. The caller
208 	 * must release reference under IPFW_UH_WLOCK, and then
209 	 * call this function if refcount becomes 1.
210 	 */
211 	if (rule->refcnt > 1)
212 		return;
213 	uma_zfree_pcpu(V_ipfw_cntr_zone, rule->cntr);
214 	free(rule, M_IPFW);
215 }
216 
217 /*
218  * Find the smallest rule >= key, id.
219  * We could use bsearch but it is so simple that we code it directly
220  */
221 int
ipfw_find_rule(struct ip_fw_chain * chain,uint32_t key,uint32_t id)222 ipfw_find_rule(struct ip_fw_chain *chain, uint32_t key, uint32_t id)
223 {
224 	int i, lo, hi;
225 	struct ip_fw *r;
226 
227   	for (lo = 0, hi = chain->n_rules - 1; lo < hi;) {
228 		i = (lo + hi) / 2;
229 		r = chain->map[i];
230 		if (r->rulenum < key)
231 			lo = i + 1;	/* continue from the next one */
232 		else if (r->rulenum > key)
233 			hi = i;		/* this might be good */
234 		else if (r->id < id)
235 			lo = i + 1;	/* continue from the next one */
236 		else /* r->id >= id */
237 			hi = i;		/* this might be good */
238 	}
239 	return hi;
240 }
241 
242 /*
243  * Builds skipto cache on rule set @map.
244  */
245 static void
update_skipto_cache(struct ip_fw_chain * chain,struct ip_fw ** map)246 update_skipto_cache(struct ip_fw_chain *chain, struct ip_fw **map)
247 {
248 	uint32_t *smap, rulenum;
249 	int i, mi;
250 
251 	IPFW_UH_WLOCK_ASSERT(chain);
252 
253 	mi = 0;
254 	rulenum = map[mi]->rulenum;
255 	smap = chain->idxmap_back;
256 
257 	if (smap == NULL)
258 		return;
259 
260 	for (i = 0; i <= IPFW_DEFAULT_RULE; i++) {
261 		smap[i] = mi;
262 		/* Use the same rule index until i < rulenum */
263 		if (i != rulenum || i == IPFW_DEFAULT_RULE)
264 			continue;
265 		/* Find next rule with num > i */
266 		rulenum = map[++mi]->rulenum;
267 		while (rulenum == i)
268 			rulenum = map[++mi]->rulenum;
269 	}
270 }
271 
272 /*
273  * Swaps prepared (backup) index with current one.
274  */
275 static void
swap_skipto_cache(struct ip_fw_chain * chain)276 swap_skipto_cache(struct ip_fw_chain *chain)
277 {
278 	uint32_t *map;
279 
280 	IPFW_UH_WLOCK_ASSERT(chain);
281 	IPFW_WLOCK_ASSERT(chain);
282 
283 	map = chain->idxmap;
284 	chain->idxmap = chain->idxmap_back;
285 	chain->idxmap_back = map;
286 }
287 
288 /*
289  * Allocate and initialize skipto cache.
290  */
291 void
ipfw_init_skipto_cache(struct ip_fw_chain * chain)292 ipfw_init_skipto_cache(struct ip_fw_chain *chain)
293 {
294 	uint32_t *idxmap, *idxmap_back;
295 
296 	idxmap = malloc((IPFW_DEFAULT_RULE + 1) * sizeof(uint32_t),
297 	    M_IPFW, M_WAITOK | M_ZERO);
298 	idxmap_back = malloc((IPFW_DEFAULT_RULE + 1) * sizeof(uint32_t),
299 	    M_IPFW, M_WAITOK | M_ZERO);
300 
301 	/*
302 	 * Note we may be called at any time after initialization,
303 	 * for example, on first skipto rule, so we need to
304 	 * provide valid chain->idxmap on return
305 	 */
306 
307 	IPFW_UH_WLOCK(chain);
308 	if (chain->idxmap != NULL) {
309 		IPFW_UH_WUNLOCK(chain);
310 		free(idxmap, M_IPFW);
311 		free(idxmap_back, M_IPFW);
312 		return;
313 	}
314 
315 	/* Set backup pointer first to permit building cache */
316 	chain->idxmap_back = idxmap_back;
317 	if (V_skipto_cache != 0)
318 		update_skipto_cache(chain, chain->map);
319 	IPFW_WLOCK(chain);
320 	/* It is now safe to set chain->idxmap ptr */
321 	chain->idxmap = idxmap;
322 	swap_skipto_cache(chain);
323 	IPFW_WUNLOCK(chain);
324 	IPFW_UH_WUNLOCK(chain);
325 }
326 
327 /*
328  * Destroys skipto cache.
329  */
330 void
ipfw_destroy_skipto_cache(struct ip_fw_chain * chain)331 ipfw_destroy_skipto_cache(struct ip_fw_chain *chain)
332 {
333 	free(chain->idxmap, M_IPFW);
334 	free(chain->idxmap_back, M_IPFW);
335 }
336 
337 /*
338  * allocate a new map, returns the chain locked. extra is the number
339  * of entries to add or delete.
340  */
341 static struct ip_fw **
get_map(struct ip_fw_chain * chain,int extra,int locked)342 get_map(struct ip_fw_chain *chain, int extra, int locked)
343 {
344 
345 	for (;;) {
346 		struct ip_fw **map;
347 		u_int i, mflags;
348 
349 		mflags = M_ZERO | ((locked != 0) ? M_NOWAIT : M_WAITOK);
350 
351 		i = chain->n_rules + extra;
352 		map = malloc(i * sizeof(struct ip_fw *), M_IPFW, mflags);
353 		if (map == NULL) {
354 			printf("%s: cannot allocate map\n", __FUNCTION__);
355 			return NULL;
356 		}
357 		if (!locked)
358 			IPFW_UH_WLOCK(chain);
359 		if (i >= chain->n_rules + extra) /* good */
360 			return map;
361 		/* otherwise we lost the race, free and retry */
362 		if (!locked)
363 			IPFW_UH_WUNLOCK(chain);
364 		free(map, M_IPFW);
365 	}
366 }
367 
368 /*
369  * swap the maps. It is supposed to be called with IPFW_UH_WLOCK
370  */
371 static struct ip_fw **
swap_map(struct ip_fw_chain * chain,struct ip_fw ** new_map,int new_len)372 swap_map(struct ip_fw_chain *chain, struct ip_fw **new_map, int new_len)
373 {
374 	struct ip_fw **old_map;
375 
376 	IPFW_WLOCK(chain);
377 	chain->id++;
378 	chain->n_rules = new_len;
379 	old_map = chain->map;
380 	chain->map = new_map;
381 	swap_skipto_cache(chain);
382 	IPFW_WUNLOCK(chain);
383 	return old_map;
384 }
385 
386 static void
export_cntr1_base(struct ip_fw * krule,struct ip_fw_bcounter * cntr)387 export_cntr1_base(struct ip_fw *krule, struct ip_fw_bcounter *cntr)
388 {
389 	struct timeval boottime;
390 
391 	cntr->size = sizeof(*cntr);
392 
393 	if (krule->cntr != NULL) {
394 		cntr->pcnt = counter_u64_fetch(krule->cntr);
395 		cntr->bcnt = counter_u64_fetch(krule->cntr + 1);
396 		cntr->timestamp = krule->timestamp;
397 	}
398 	if (cntr->timestamp > 0) {
399 		getboottime(&boottime);
400 		cntr->timestamp += boottime.tv_sec;
401 	}
402 }
403 
404 /*
405  * Export rule into v1 format (Current).
406  * Layout:
407  * [ ipfw_obj_tlv(IPFW_TLV_RULE_ENT)
408  *     [ ip_fw_rule ] OR
409  *     [ ip_fw_bcounter ip_fw_rule] (depends on rcntrs).
410  * ]
411  * Assume @data is zeroed.
412  */
413 static void
export_rule1(struct ip_fw * krule,caddr_t data,int len,int rcntrs)414 export_rule1(struct ip_fw *krule, caddr_t data, int len, int rcntrs)
415 {
416 	struct ip_fw_bcounter *cntr;
417 	struct ip_fw_rule *urule;
418 	ipfw_obj_tlv *tlv;
419 
420 	/* Fill in TLV header */
421 	tlv = (ipfw_obj_tlv *)data;
422 	tlv->type = IPFW_TLV_RULE_ENT;
423 	tlv->length = len;
424 
425 	if (rcntrs != 0) {
426 		/* Copy counters */
427 		cntr = (struct ip_fw_bcounter *)(tlv + 1);
428 		urule = (struct ip_fw_rule *)(cntr + 1);
429 		export_cntr1_base(krule, cntr);
430 	} else
431 		urule = (struct ip_fw_rule *)(tlv + 1);
432 
433 	/* copy header */
434 	urule->act_ofs = krule->act_ofs;
435 	urule->cmd_len = krule->cmd_len;
436 	urule->rulenum = krule->rulenum;
437 	urule->set = krule->set;
438 	urule->flags = krule->flags;
439 	urule->id = krule->id;
440 
441 	/* Copy opcodes */
442 	memcpy(urule->cmd, krule->cmd, krule->cmd_len * sizeof(uint32_t));
443 }
444 
445 /*
446  * Add new rule(s) to the list possibly creating rule number for each.
447  * Update the rule_number in the input struct so the caller knows it as well.
448  * Must be called without IPFW_UH held
449  */
450 int
ipfw_commit_rules(struct ip_fw_chain * chain,struct rule_check_info * rci,int count)451 ipfw_commit_rules(struct ip_fw_chain *chain, struct rule_check_info *rci,
452     int count)
453 {
454 	int error, i, insert_before, tcount, rule_idx, last_rule_idx;
455 	uint32_t rulenum;
456 	struct rule_check_info *ci;
457 	struct ip_fw *krule;
458 	struct ip_fw **map;	/* the new array of pointers */
459 
460 	/* Check if we need to do table/obj index remap */
461 	tcount = 0;
462 	for (ci = rci, i = 0; i < count; ci++, i++) {
463 		if (ci->object_opcodes == 0)
464 			continue;
465 
466 		/*
467 		 * Rule has some object opcodes.
468 		 * We need to find (and create non-existing)
469 		 * kernel objects, and reference existing ones.
470 		 */
471 		error = rewrite_rule_uidx(chain, ci);
472 		if (error != 0) {
473 
474 			/*
475 			 * rewrite failed, state for current rule
476 			 * has been reverted. Check if we need to
477 			 * revert more.
478 			 */
479 			if (tcount > 0) {
480 
481 				/*
482 				 * We have some more table rules
483 				 * we need to rollback.
484 				 */
485 
486 				IPFW_UH_WLOCK(chain);
487 				while (ci != rci) {
488 					ci--;
489 					if (ci->object_opcodes == 0)
490 						continue;
491 					unref_rule_objects(chain,ci->krule);
492 
493 				}
494 				IPFW_UH_WUNLOCK(chain);
495 
496 			}
497 
498 			return (error);
499 		}
500 
501 		tcount++;
502 	}
503 
504 	/* get_map returns with IPFW_UH_WLOCK if successful */
505 	map = get_map(chain, count, 0 /* not locked */);
506 	if (map == NULL) {
507 		if (tcount > 0) {
508 			/* Unbind tables */
509 			IPFW_UH_WLOCK(chain);
510 			for (ci = rci, i = 0; i < count; ci++, i++) {
511 				if (ci->object_opcodes == 0)
512 					continue;
513 
514 				unref_rule_objects(chain, ci->krule);
515 			}
516 			IPFW_UH_WUNLOCK(chain);
517 		}
518 
519 		return (ENOSPC);
520 	}
521 
522 	if (V_autoinc_step < 1)
523 		V_autoinc_step = 1;
524 	else if (V_autoinc_step > 1000)
525 		V_autoinc_step = 1000;
526 
527 	last_rule_idx = 0;
528 	for (ci = rci, i = 0; i < count; ci++, i++) {
529 		krule = ci->krule;
530 		rulenum = krule->rulenum;
531 
532 		krule->id = chain->id + 1;
533 
534 		/* find the insertion point, we will insert before */
535 		insert_before = rulenum ? rulenum + 1 : IPFW_DEFAULT_RULE;
536 		rule_idx = ipfw_find_rule(chain, insert_before, 0);
537 		/* duplicate the previous part */
538 		if (last_rule_idx < rule_idx)
539 			bcopy(chain->map + last_rule_idx, map + last_rule_idx + i,
540 			    (rule_idx - last_rule_idx) * sizeof(struct ip_fw *));
541 		last_rule_idx = rule_idx;
542 		map[rule_idx + i] = krule;
543 		if (rulenum == 0) {
544 			/* Compute rule number and write it back */
545 			rulenum = rule_idx + i > 0 ? map[rule_idx + i - 1]->rulenum : 0;
546 			if (rulenum < IPFW_DEFAULT_RULE - V_autoinc_step)
547 				rulenum += V_autoinc_step;
548 			krule->rulenum = rulenum;
549 			/* Save number to userland rule */
550 			memcpy((char *)ci->urule + ci->urule_numoff, &rulenum,
551 			    sizeof(rulenum));
552 		}
553 	}
554 
555 	/* duplicate the remaining part, we always have the default rule */
556 	bcopy(chain->map + last_rule_idx, map + last_rule_idx + count,
557 	    (chain->n_rules - last_rule_idx) * sizeof(struct ip_fw *));
558 
559 	if (V_skipto_cache != 0)
560 		update_skipto_cache(chain, map);
561 	map = swap_map(chain, map, chain->n_rules + count);
562 	IPFW_UH_WUNLOCK(chain);
563 	if (map)
564 		free(map, M_IPFW);
565 	return (0);
566 }
567 
568 int
ipfw_add_protected_rule(struct ip_fw_chain * chain,struct ip_fw * rule,int locked)569 ipfw_add_protected_rule(struct ip_fw_chain *chain, struct ip_fw *rule,
570     int locked)
571 {
572 	struct ip_fw **map;
573 
574 	map = get_map(chain, 1, locked);
575 	if (map == NULL)
576 		return (ENOMEM);
577 	if (chain->n_rules > 0)
578 		bcopy(chain->map, map,
579 		    chain->n_rules * sizeof(struct ip_fw *));
580 	map[chain->n_rules] = rule;
581 	rule->rulenum = IPFW_DEFAULT_RULE;
582 	rule->set = RESVD_SET;
583 	rule->id = chain->id + 1;
584 	/* We add rule in the end of chain, no need to update skipto cache */
585 	map = swap_map(chain, map, chain->n_rules + 1);
586 	IPFW_UH_WUNLOCK(chain);
587 	free(map, M_IPFW);
588 	return (0);
589 }
590 
591 /*
592  * Adds @rule to the list of rules to reap
593  */
594 void
ipfw_reap_add(struct ip_fw_chain * chain,struct ip_fw ** head,struct ip_fw * rule)595 ipfw_reap_add(struct ip_fw_chain *chain, struct ip_fw **head,
596     struct ip_fw *rule)
597 {
598 
599 	IPFW_UH_WLOCK_ASSERT(chain);
600 
601 	/* Unlink rule from everywhere */
602 	unref_rule_objects(chain, rule);
603 
604 	rule->next = *head;
605 	*head = rule;
606 }
607 
608 /*
609  * Reclaim storage associated with a list of rules.  This is
610  * typically the list created using remove_rule.
611  * A NULL pointer on input is handled correctly.
612  */
613 void
ipfw_reap_rules(struct ip_fw * head)614 ipfw_reap_rules(struct ip_fw *head)
615 {
616 	struct ip_fw *rule;
617 
618 	while ((rule = head) != NULL) {
619 		head = head->next;
620 		ipfw_free_rule(rule);
621 	}
622 }
623 
624 /*
625  * Rules to keep are
626  *	(default || reserved || !match_set || !match_number)
627  * where
628  *   default ::= (rule->rulenum == IPFW_DEFAULT_RULE)
629  *	// the default rule is always protected
630  *
631  *   reserved ::= (cmd == 0 && n == 0 && rule->set == RESVD_SET)
632  *	// RESVD_SET is protected only if cmd == 0 and n == 0 ("ipfw flush")
633  *
634  *   match_set ::= (cmd == 0 || rule->set == set)
635  *	// set number is ignored for cmd == 0
636  *
637  *   match_number ::= (cmd == 1 || n == 0 || n == rule->rulenum)
638  *	// number is ignored for cmd == 1 or n == 0
639  *
640  */
641 int
ipfw_match_range(struct ip_fw * rule,ipfw_range_tlv * rt)642 ipfw_match_range(struct ip_fw *rule, ipfw_range_tlv *rt)
643 {
644 
645 	/* Don't match default rule for modification queries */
646 	if (rule->rulenum == IPFW_DEFAULT_RULE &&
647 	    (rt->flags & IPFW_RCFLAG_DEFAULT) == 0)
648 		return (0);
649 
650 	/* Don't match rules in reserved set for flush requests */
651 	if ((rt->flags & IPFW_RCFLAG_ALL) != 0 && rule->set == RESVD_SET)
652 		return (0);
653 
654 	/* If we're filtering by set, don't match other sets */
655 	if ((rt->flags & IPFW_RCFLAG_SET) != 0 && rule->set != rt->set)
656 		return (0);
657 
658 	if ((rt->flags & IPFW_RCFLAG_RANGE) != 0 &&
659 	    (rule->rulenum < rt->start_rule || rule->rulenum > rt->end_rule))
660 		return (0);
661 
662 	return (1);
663 }
664 
665 struct manage_sets_args {
666 	uint32_t	set;
667 	uint8_t		new_set;
668 };
669 
670 static int
swap_sets_cb(struct namedobj_instance * ni,struct named_object * no,void * arg)671 swap_sets_cb(struct namedobj_instance *ni, struct named_object *no,
672     void *arg)
673 {
674 	struct manage_sets_args *args;
675 
676 	args = (struct manage_sets_args *)arg;
677 	if (no->set == (uint8_t)args->set)
678 		no->set = args->new_set;
679 	else if (no->set == args->new_set)
680 		no->set = (uint8_t)args->set;
681 	return (0);
682 }
683 
684 static int
move_sets_cb(struct namedobj_instance * ni,struct named_object * no,void * arg)685 move_sets_cb(struct namedobj_instance *ni, struct named_object *no,
686     void *arg)
687 {
688 	struct manage_sets_args *args;
689 
690 	args = (struct manage_sets_args *)arg;
691 	if (no->set == (uint8_t)args->set)
692 		no->set = args->new_set;
693 	return (0);
694 }
695 
696 static int
test_sets_cb(struct namedobj_instance * ni,struct named_object * no,void * arg)697 test_sets_cb(struct namedobj_instance *ni, struct named_object *no,
698     void *arg)
699 {
700 	struct manage_sets_args *args;
701 
702 	args = (struct manage_sets_args *)arg;
703 	if (no->set != (uint8_t)args->set)
704 		return (0);
705 	if (ipfw_objhash_lookup_name_type(ni, args->new_set,
706 	    no->etlv, no->name) != NULL)
707 		return (EEXIST);
708 	return (0);
709 }
710 
711 /*
712  * Generic function to handler moving and swapping sets.
713  */
714 int
ipfw_obj_manage_sets(struct namedobj_instance * ni,uint16_t type,uint32_t set,uint8_t new_set,enum ipfw_sets_cmd cmd)715 ipfw_obj_manage_sets(struct namedobj_instance *ni, uint16_t type,
716     uint32_t set, uint8_t new_set, enum ipfw_sets_cmd cmd)
717 {
718 	struct manage_sets_args args;
719 	struct named_object *no;
720 
721 	args.set = set;
722 	args.new_set = new_set;
723 	switch (cmd) {
724 	case SWAP_ALL:
725 		return (ipfw_objhash_foreach_type(ni, swap_sets_cb,
726 		    &args, type));
727 	case TEST_ALL:
728 		return (ipfw_objhash_foreach_type(ni, test_sets_cb,
729 		    &args, type));
730 	case MOVE_ALL:
731 		return (ipfw_objhash_foreach_type(ni, move_sets_cb,
732 		    &args, type));
733 	case COUNT_ONE:
734 		/*
735 		 * @set used to pass kidx.
736 		 * When @new_set is zero - reset object counter,
737 		 * otherwise increment it.
738 		 */
739 		no = ipfw_objhash_lookup_kidx(ni, set);
740 		if (new_set != 0)
741 			no->ocnt++;
742 		else
743 			no->ocnt = 0;
744 		return (0);
745 	case TEST_ONE:
746 		/* @set used to pass kidx */
747 		no = ipfw_objhash_lookup_kidx(ni, set);
748 		/*
749 		 * First check number of references:
750 		 * when it differs, this mean other rules are holding
751 		 * reference to given object, so it is not possible to
752 		 * change its set. Note that refcnt may account references
753 		 * to some going-to-be-added rules. Since we don't know
754 		 * their numbers (and even if they will be added) it is
755 		 * perfectly OK to return error here.
756 		 */
757 		if (no->ocnt != no->refcnt)
758 			return (EBUSY);
759 		if (ipfw_objhash_lookup_name_type(ni, new_set, type,
760 		    no->name) != NULL)
761 			return (EEXIST);
762 		return (0);
763 	case MOVE_ONE:
764 		/* @set used to pass kidx */
765 		no = ipfw_objhash_lookup_kidx(ni, set);
766 		no->set = new_set;
767 		return (0);
768 	}
769 	return (EINVAL);
770 }
771 
772 /*
773  * Delete rules matching range @rt.
774  * Saves number of deleted rules in @ndel.
775  *
776  * Returns 0 on success.
777  */
778 int
delete_range(struct ip_fw_chain * chain,ipfw_range_tlv * rt,int * ndel)779 delete_range(struct ip_fw_chain *chain, ipfw_range_tlv *rt, int *ndel)
780 {
781 	struct ip_fw *reap, *rule, **map;
782 	uint32_t end, start;
783 	int i, n, ndyn, ofs;
784 
785 	reap = NULL;
786 	IPFW_UH_WLOCK(chain);	/* arbitrate writers */
787 
788 	/*
789 	 * Stage 1: Determine range to inspect.
790 	 * Range is half-inclusive, e.g [start, end).
791 	 */
792 	start = 0;
793 	end = chain->n_rules - 1;
794 
795 	if ((rt->flags & IPFW_RCFLAG_RANGE) != 0) {
796 		start = ipfw_find_rule(chain, rt->start_rule, 0);
797 
798 		if (rt->end_rule >= IPFW_DEFAULT_RULE)
799 			rt->end_rule = IPFW_DEFAULT_RULE - 1;
800 		end = ipfw_find_rule(chain, rt->end_rule, UINT32_MAX);
801 	}
802 
803 	if (rt->flags & IPFW_RCFLAG_DYNAMIC) {
804 		/*
805 		 * Requested deleting only for dynamic states.
806 		 */
807 		*ndel = 0;
808 		ipfw_expire_dyn_states(chain, rt);
809 		IPFW_UH_WUNLOCK(chain);
810 		return (0);
811 	}
812 
813 	/* Allocate new map of the same size */
814 	map = get_map(chain, 0, 1 /* locked */);
815 	if (map == NULL) {
816 		IPFW_UH_WUNLOCK(chain);
817 		return (ENOMEM);
818 	}
819 
820 	n = 0;
821 	ndyn = 0;
822 	ofs = start;
823 	/* 1. bcopy the initial part of the map */
824 	if (start > 0)
825 		bcopy(chain->map, map, start * sizeof(struct ip_fw *));
826 	/* 2. copy active rules between start and end */
827 	for (i = start; i < end; i++) {
828 		rule = chain->map[i];
829 		if (ipfw_match_range(rule, rt) == 0) {
830 			map[ofs++] = rule;
831 			continue;
832 		}
833 
834 		n++;
835 		if (ipfw_is_dyn_rule(rule) != 0)
836 			ndyn++;
837 	}
838 	/* 3. copy the final part of the map */
839 	bcopy(chain->map + end, map + ofs,
840 		(chain->n_rules - end) * sizeof(struct ip_fw *));
841 	/* 4. recalculate skipto cache */
842 	update_skipto_cache(chain, map);
843 	/* 5. swap the maps (under UH_WLOCK + WHLOCK) */
844 	map = swap_map(chain, map, chain->n_rules - n);
845 	/* 6. Remove all dynamic states originated by deleted rules */
846 	if (ndyn > 0)
847 		ipfw_expire_dyn_states(chain, rt);
848 	/* 7. now remove the rules deleted from the old map */
849 	for (i = start; i < end; i++) {
850 		rule = map[i];
851 		if (ipfw_match_range(rule, rt) == 0)
852 			continue;
853 		ipfw_reap_add(chain, &reap, rule);
854 	}
855 	IPFW_UH_WUNLOCK(chain);
856 
857 	ipfw_reap_rules(reap);
858 	if (map != NULL)
859 		free(map, M_IPFW);
860 	*ndel = n;
861 	return (0);
862 }
863 
864 static int
move_objects(struct ip_fw_chain * ch,ipfw_range_tlv * rt)865 move_objects(struct ip_fw_chain *ch, ipfw_range_tlv *rt)
866 {
867 	struct opcode_obj_rewrite *rw;
868 	struct ip_fw *rule;
869 	ipfw_insn *cmd;
870 	uint32_t kidx;
871 	int cmdlen, i, l, c;
872 
873 	IPFW_UH_WLOCK_ASSERT(ch);
874 
875 	/* Stage 1: count number of references by given rules */
876 	for (c = 0, i = 0; i < ch->n_rules - 1; i++) {
877 		rule = ch->map[i];
878 		if (ipfw_match_range(rule, rt) == 0)
879 			continue;
880 		if (rule->set == rt->new_set) /* nothing to do */
881 			continue;
882 		/* Search opcodes with named objects */
883 		for (l = rule->cmd_len, cmdlen = 0, cmd = rule->cmd;
884 		    l > 0; l -= cmdlen, cmd += cmdlen) {
885 			cmdlen = F_LEN(cmd);
886 			rw = find_op_rw(cmd, &kidx, NULL);
887 			if (rw == NULL || rw->manage_sets == NULL)
888 				continue;
889 			/*
890 			 * When manage_sets() returns non-zero value to
891 			 * COUNT_ONE command, consider this as an object
892 			 * doesn't support sets (e.g. disabled with sysctl).
893 			 * So, skip checks for this object.
894 			 */
895 			if (rw->manage_sets(ch, kidx, 1, COUNT_ONE) != 0)
896 				continue;
897 			c++;
898 		}
899 	}
900 	if (c == 0) /* No objects found */
901 		return (0);
902 	/* Stage 2: verify "ownership" */
903 	for (c = 0, i = 0; (i < ch->n_rules - 1) && c == 0; i++) {
904 		rule = ch->map[i];
905 		if (ipfw_match_range(rule, rt) == 0)
906 			continue;
907 		if (rule->set == rt->new_set) /* nothing to do */
908 			continue;
909 		/* Search opcodes with named objects */
910 		for (l = rule->cmd_len, cmdlen = 0, cmd = rule->cmd;
911 		    l > 0 && c == 0; l -= cmdlen, cmd += cmdlen) {
912 			cmdlen = F_LEN(cmd);
913 			rw = find_op_rw(cmd, &kidx, NULL);
914 			if (rw == NULL || rw->manage_sets == NULL)
915 				continue;
916 			/* Test for ownership and conflicting names */
917 			c = rw->manage_sets(ch, kidx,
918 			    (uint8_t)rt->new_set, TEST_ONE);
919 		}
920 	}
921 	/* Stage 3: change set and cleanup */
922 	for (i = 0; i < ch->n_rules - 1; i++) {
923 		rule = ch->map[i];
924 		if (ipfw_match_range(rule, rt) == 0)
925 			continue;
926 		if (rule->set == rt->new_set) /* nothing to do */
927 			continue;
928 		/* Search opcodes with named objects */
929 		for (l = rule->cmd_len, cmdlen = 0, cmd = rule->cmd;
930 		    l > 0; l -= cmdlen, cmd += cmdlen) {
931 			cmdlen = F_LEN(cmd);
932 			rw = find_op_rw(cmd, &kidx, NULL);
933 			if (rw == NULL || rw->manage_sets == NULL)
934 				continue;
935 			/* cleanup object counter */
936 			rw->manage_sets(ch, kidx,
937 			    0 /* reset counter */, COUNT_ONE);
938 			if (c != 0)
939 				continue;
940 			/* change set */
941 			rw->manage_sets(ch, kidx,
942 			    (uint8_t)rt->new_set, MOVE_ONE);
943 		}
944 	}
945 	return (c);
946 }
947 
948 /*
949  * Changes set of given rule rannge @rt
950  * with each other.
951  *
952  * Returns 0 on success.
953  */
954 static int
move_range(struct ip_fw_chain * chain,ipfw_range_tlv * rt)955 move_range(struct ip_fw_chain *chain, ipfw_range_tlv *rt)
956 {
957 	struct ip_fw *rule;
958 	int i;
959 
960 	IPFW_UH_WLOCK(chain);
961 
962 	/*
963 	 * Move rules with matching paramenerts to a new set.
964 	 * This one is much more complex. We have to ensure
965 	 * that all referenced tables (if any) are referenced
966 	 * by given rule subset only. Otherwise, we can't move
967 	 * them to new set and have to return error.
968 	 */
969 	if ((i = move_objects(chain, rt)) != 0) {
970 		IPFW_UH_WUNLOCK(chain);
971 		return (i);
972 	}
973 
974 	/* XXX: We have to do swap holding WLOCK */
975 	for (i = 0; i < chain->n_rules; i++) {
976 		rule = chain->map[i];
977 		if (ipfw_match_range(rule, rt) == 0)
978 			continue;
979 		rule->set = rt->new_set;
980 	}
981 
982 	IPFW_UH_WUNLOCK(chain);
983 
984 	return (0);
985 }
986 
987 /*
988  * Returns pointer to action instruction, skips all possible rule
989  * modifiers like O_LOG, O_TAG, O_ALTQ.
990  */
991 ipfw_insn *
ipfw_get_action(struct ip_fw * rule)992 ipfw_get_action(struct ip_fw *rule)
993 {
994 	ipfw_insn *cmd;
995 	int l, cmdlen;
996 
997 	cmd = ACTION_PTR(rule);
998 	l = rule->cmd_len - rule->act_ofs;
999 	while (l > 0) {
1000 		switch (cmd->opcode) {
1001 		case O_ALTQ:
1002 		case O_LOG:
1003 		case O_TAG:
1004 			break;
1005 		default:
1006 			return (cmd);
1007 		}
1008 		cmdlen = F_LEN(cmd);
1009 		l -= cmdlen;
1010 		cmd += cmdlen;
1011 	}
1012 	panic("%s: rule (%p) has not action opcode", __func__, rule);
1013 	return (NULL);
1014 }
1015 
1016 /*
1017  * Clear counters for a specific rule.
1018  * Normally run under IPFW_UH_RLOCK, but these are idempotent ops
1019  * so we only care that rules do not disappear.
1020  */
1021 static void
clear_counters(struct ip_fw * rule,int log_only)1022 clear_counters(struct ip_fw *rule, int log_only)
1023 {
1024 	ipfw_insn_log *l = (ipfw_insn_log *)ACTION_PTR(rule);
1025 
1026 	if (log_only == 0)
1027 		IPFW_ZERO_RULE_COUNTER(rule);
1028 	if (l->o.opcode == O_LOG)
1029 		l->log_left = l->max_log;
1030 }
1031 
1032 /*
1033  * Flushes rules counters and/or log values on matching range.
1034  *
1035  * Returns number of items cleared.
1036  */
1037 static int
clear_range(struct ip_fw_chain * chain,ipfw_range_tlv * rt,int log_only)1038 clear_range(struct ip_fw_chain *chain, ipfw_range_tlv *rt, int log_only)
1039 {
1040 	struct ip_fw *rule;
1041 	int num;
1042 	int i;
1043 
1044 	num = 0;
1045 	rt->flags |= IPFW_RCFLAG_DEFAULT;
1046 
1047 	IPFW_UH_WLOCK(chain);	/* arbitrate writers */
1048 	for (i = 0; i < chain->n_rules; i++) {
1049 		rule = chain->map[i];
1050 		if (ipfw_match_range(rule, rt) == 0)
1051 			continue;
1052 		clear_counters(rule, log_only);
1053 		num++;
1054 	}
1055 	IPFW_UH_WUNLOCK(chain);
1056 
1057 	return (num);
1058 }
1059 
1060 static int
check_range_tlv(ipfw_range_tlv * rt)1061 check_range_tlv(ipfw_range_tlv *rt)
1062 {
1063 
1064 	if (rt->head.length != sizeof(*rt))
1065 		return (1);
1066 	if (rt->start_rule > rt->end_rule)
1067 		return (1);
1068 	if (rt->set >= IPFW_MAX_SETS || rt->new_set >= IPFW_MAX_SETS)
1069 		return (1);
1070 
1071 	if ((rt->flags & IPFW_RCFLAG_USER) != rt->flags)
1072 		return (1);
1073 
1074 	return (0);
1075 }
1076 
1077 /*
1078  * Delete rules matching specified parameters
1079  * Data layout (v0)(current):
1080  * Request: [ ipfw_obj_header ipfw_range_tlv ]
1081  * Reply: [ ipfw_obj_header ipfw_range_tlv ]
1082  *
1083  * Saves number of deleted rules in ipfw_range_tlv->new_set.
1084  *
1085  * Returns 0 on success.
1086  */
1087 static int
del_rules(struct ip_fw_chain * chain,ip_fw3_opheader * op3,struct sockopt_data * sd)1088 del_rules(struct ip_fw_chain *chain, ip_fw3_opheader *op3,
1089     struct sockopt_data *sd)
1090 {
1091 	ipfw_range_header *rh;
1092 	int error, ndel;
1093 
1094 	if (sd->valsize != sizeof(*rh))
1095 		return (EINVAL);
1096 
1097 	rh = (ipfw_range_header *)ipfw_get_sopt_space(sd, sd->valsize);
1098 
1099 	if (check_range_tlv(&rh->range) != 0)
1100 		return (EINVAL);
1101 
1102 	ndel = 0;
1103 	if ((error = delete_range(chain, &rh->range, &ndel)) != 0)
1104 		return (error);
1105 
1106 	/* Save number of rules deleted */
1107 	rh->range.new_set = ndel;
1108 	return (0);
1109 }
1110 
1111 /*
1112  * Move rules/sets matching specified parameters
1113  * Data layout (v0)(current):
1114  * Request: [ ipfw_obj_header ipfw_range_tlv ]
1115  *
1116  * Returns 0 on success.
1117  */
1118 static int
move_rules(struct ip_fw_chain * chain,ip_fw3_opheader * op3,struct sockopt_data * sd)1119 move_rules(struct ip_fw_chain *chain, ip_fw3_opheader *op3,
1120     struct sockopt_data *sd)
1121 {
1122 	ipfw_range_header *rh;
1123 
1124 	if (sd->valsize != sizeof(*rh))
1125 		return (EINVAL);
1126 
1127 	rh = (ipfw_range_header *)ipfw_get_sopt_space(sd, sd->valsize);
1128 
1129 	if (check_range_tlv(&rh->range) != 0)
1130 		return (EINVAL);
1131 
1132 	return (move_range(chain, &rh->range));
1133 }
1134 
1135 /*
1136  * Clear rule accounting data matching specified parameters
1137  * Data layout (v0)(current):
1138  * Request: [ ipfw_obj_header ipfw_range_tlv ]
1139  * Reply: [ ipfw_obj_header ipfw_range_tlv ]
1140  *
1141  * Saves number of cleared rules in ipfw_range_tlv->new_set.
1142  *
1143  * Returns 0 on success.
1144  */
1145 static int
clear_rules(struct ip_fw_chain * chain,ip_fw3_opheader * op3,struct sockopt_data * sd)1146 clear_rules(struct ip_fw_chain *chain, ip_fw3_opheader *op3,
1147     struct sockopt_data *sd)
1148 {
1149 	ipfw_range_header *rh;
1150 	int log_only, num;
1151 	char *msg;
1152 
1153 	if (sd->valsize != sizeof(*rh))
1154 		return (EINVAL);
1155 
1156 	rh = (ipfw_range_header *)ipfw_get_sopt_space(sd, sd->valsize);
1157 
1158 	if (check_range_tlv(&rh->range) != 0)
1159 		return (EINVAL);
1160 
1161 	log_only = (op3->opcode == IP_FW_XRESETLOG);
1162 
1163 	num = clear_range(chain, &rh->range, log_only);
1164 
1165 	if (rh->range.flags & IPFW_RCFLAG_ALL)
1166 		msg = log_only ? "All logging counts reset" :
1167 		    "Accounting cleared";
1168 	else
1169 		msg = log_only ? "logging count reset" : "cleared";
1170 
1171 	if (V_fw_verbose) {
1172 		int lev = LOG_SECURITY | LOG_NOTICE;
1173 		log(lev, "ipfw: %s.\n", msg);
1174 	}
1175 
1176 	/* Save number of rules cleared */
1177 	rh->range.new_set = num;
1178 	return (0);
1179 }
1180 
1181 static void
enable_sets(struct ip_fw_chain * chain,ipfw_range_tlv * rt)1182 enable_sets(struct ip_fw_chain *chain, ipfw_range_tlv *rt)
1183 {
1184 	uint32_t v_set;
1185 
1186 	IPFW_UH_WLOCK_ASSERT(chain);
1187 
1188 	/* Change enabled/disabled sets mask */
1189 	v_set = (V_set_disable | rt->set) & ~rt->new_set;
1190 	v_set &= ~(1 << RESVD_SET); /* set RESVD_SET always enabled */
1191 	IPFW_WLOCK(chain);
1192 	V_set_disable = v_set;
1193 	IPFW_WUNLOCK(chain);
1194 }
1195 
1196 static int
swap_sets(struct ip_fw_chain * chain,ipfw_range_tlv * rt,int mv)1197 swap_sets(struct ip_fw_chain *chain, ipfw_range_tlv *rt, int mv)
1198 {
1199 	struct opcode_obj_rewrite *rw;
1200 	struct ip_fw *rule;
1201 	int i;
1202 
1203 	IPFW_UH_WLOCK_ASSERT(chain);
1204 
1205 	if (rt->set == rt->new_set) /* nothing to do */
1206 		return (0);
1207 
1208 	if (mv != 0) {
1209 		/*
1210 		 * Berfore moving the rules we need to check that
1211 		 * there aren't any conflicting named objects.
1212 		 */
1213 		for (rw = ctl3_rewriters;
1214 		    rw < ctl3_rewriters + ctl3_rsize; rw++) {
1215 			if (rw->manage_sets == NULL)
1216 				continue;
1217 			i = rw->manage_sets(chain, (uint8_t)rt->set,
1218 			    (uint8_t)rt->new_set, TEST_ALL);
1219 			if (i != 0)
1220 				return (EEXIST);
1221 		}
1222 	}
1223 	/* Swap or move two sets */
1224 	for (i = 0; i < chain->n_rules - 1; i++) {
1225 		rule = chain->map[i];
1226 		if (rule->set == (uint8_t)rt->set)
1227 			rule->set = (uint8_t)rt->new_set;
1228 		else if (rule->set == (uint8_t)rt->new_set && mv == 0)
1229 			rule->set = (uint8_t)rt->set;
1230 	}
1231 	for (rw = ctl3_rewriters; rw < ctl3_rewriters + ctl3_rsize; rw++) {
1232 		if (rw->manage_sets == NULL)
1233 			continue;
1234 		rw->manage_sets(chain, (uint8_t)rt->set,
1235 		    (uint8_t)rt->new_set, mv != 0 ? MOVE_ALL: SWAP_ALL);
1236 	}
1237 	return (0);
1238 }
1239 
1240 /*
1241  * Swaps or moves set
1242  * Data layout (v0)(current):
1243  * Request: [ ipfw_obj_header ipfw_range_tlv ]
1244  *
1245  * Returns 0 on success.
1246  */
1247 static int
manage_sets(struct ip_fw_chain * chain,ip_fw3_opheader * op3,struct sockopt_data * sd)1248 manage_sets(struct ip_fw_chain *chain, ip_fw3_opheader *op3,
1249     struct sockopt_data *sd)
1250 {
1251 	ipfw_range_header *rh;
1252 	int ret;
1253 
1254 	if (sd->valsize != sizeof(*rh))
1255 		return (EINVAL);
1256 
1257 	rh = (ipfw_range_header *)ipfw_get_sopt_space(sd, sd->valsize);
1258 
1259 	if (rh->range.head.length != sizeof(ipfw_range_tlv))
1260 		return (1);
1261 	/* enable_sets() expects bitmasks. */
1262 	if (op3->opcode != IP_FW_SET_ENABLE &&
1263 	    (rh->range.set >= IPFW_MAX_SETS ||
1264 	    rh->range.new_set >= IPFW_MAX_SETS))
1265 		return (EINVAL);
1266 
1267 	ret = 0;
1268 	IPFW_UH_WLOCK(chain);
1269 	switch (op3->opcode) {
1270 	case IP_FW_SET_SWAP:
1271 	case IP_FW_SET_MOVE:
1272 		ret = swap_sets(chain, &rh->range,
1273 		    op3->opcode == IP_FW_SET_MOVE);
1274 		break;
1275 	case IP_FW_SET_ENABLE:
1276 		enable_sets(chain, &rh->range);
1277 		break;
1278 	}
1279 	IPFW_UH_WUNLOCK(chain);
1280 
1281 	return (ret);
1282 }
1283 
1284 /* Check rule format */
1285 int
ipfw_check_rule(struct ip_fw_rule * rule,size_t size,struct rule_check_info * ci)1286 ipfw_check_rule(struct ip_fw_rule *rule, size_t size,
1287     struct rule_check_info *ci)
1288 {
1289 	int l;
1290 
1291 	if (size < sizeof(*rule)) {
1292 		printf("ipfw: rule too short\n");
1293 		return (EINVAL);
1294 	}
1295 
1296 	/* Check for valid cmd_len */
1297 	l = roundup2(RULESIZE(rule), sizeof(uint64_t));
1298 	if (l != size) {
1299 		printf("ipfw: size mismatch (have %zu want %d)\n", size, l);
1300 		return (EINVAL);
1301 	}
1302 	if (rule->act_ofs >= rule->cmd_len) {
1303 		printf("ipfw: bogus action offset (%u > %u)\n",
1304 		    rule->act_ofs, rule->cmd_len - 1);
1305 		return (EINVAL);
1306 	}
1307 
1308 	if (rule->rulenum > IPFW_DEFAULT_RULE - 1)
1309 		return (EINVAL);
1310 
1311 	return (check_ipfw_rule_body(rule->cmd, rule->cmd_len, ci));
1312 }
1313 
1314 enum ipfw_opcheck_result
ipfw_check_opcode(ipfw_insn ** pcmd,int * plen,struct rule_check_info * ci)1315 ipfw_check_opcode(ipfw_insn **pcmd, int *plen, struct rule_check_info *ci)
1316 {
1317 	ipfw_insn *cmd;
1318 	size_t cmdlen;
1319 
1320 	cmd = *pcmd;
1321 	cmdlen = F_LEN(cmd);
1322 
1323 	switch (cmd->opcode) {
1324 	case O_PROBE_STATE:
1325 	case O_KEEP_STATE:
1326 		if (cmdlen != F_INSN_SIZE(ipfw_insn_kidx))
1327 			return (BAD_SIZE);
1328 		ci->object_opcodes++;
1329 		break;
1330 	case O_PROTO:
1331 	case O_IP_SRC_ME:
1332 	case O_IP_DST_ME:
1333 	case O_LAYER2:
1334 	case O_IN:
1335 	case O_FRAG:
1336 	case O_DIVERTED:
1337 	case O_IPOPT:
1338 	case O_IPTOS:
1339 	case O_IPPRECEDENCE:
1340 	case O_IPVER:
1341 	case O_SOCKARG:
1342 	case O_TCPFLAGS:
1343 	case O_TCPOPTS:
1344 	case O_ESTAB:
1345 	case O_VERREVPATH:
1346 	case O_VERSRCREACH:
1347 	case O_ANTISPOOF:
1348 	case O_IPSEC:
1349 #ifdef INET6
1350 	case O_IP6_SRC_ME:
1351 	case O_IP6_DST_ME:
1352 	case O_EXT_HDR:
1353 	case O_IP6:
1354 #endif
1355 	case O_IP4:
1356 	case O_TAG:
1357 	case O_SKIP_ACTION:
1358 		if (cmdlen != F_INSN_SIZE(ipfw_insn))
1359 			return (BAD_SIZE);
1360 		break;
1361 
1362 	case O_EXTERNAL_ACTION:
1363 		if (cmdlen != F_INSN_SIZE(ipfw_insn_kidx))
1364 			return (BAD_SIZE);
1365 
1366 		if (insntod(cmd, kidx)->kidx == 0)
1367 			return (FAILED);
1368 		ci->object_opcodes++;
1369 		/*
1370 		 * Do we have O_EXTERNAL_INSTANCE or O_EXTERNAL_DATA
1371 		 * opcode?
1372 		 */
1373 		if (*plen != cmdlen) {
1374 			*plen -= cmdlen;
1375 			cmd += cmdlen;
1376 			*pcmd = cmd;
1377 			cmdlen = F_LEN(cmd);
1378 			if (cmd->opcode == O_EXTERNAL_DATA)
1379 				return (CHECK_ACTION);
1380 			if (cmd->opcode != O_EXTERNAL_INSTANCE) {
1381 				printf("ipfw: invalid opcode "
1382 				    "next to external action %u\n",
1383 				    cmd->opcode);
1384 				return (FAILED);
1385 			}
1386 			if (cmdlen != F_INSN_SIZE(ipfw_insn_kidx))
1387 				return (BAD_SIZE);
1388 			if (insntod(cmd, kidx)->kidx == 0)
1389 				return (FAILED);
1390 			ci->object_opcodes++;
1391 		}
1392 		return (CHECK_ACTION);
1393 
1394 	case O_FIB:
1395 		if (cmdlen != F_INSN_SIZE(ipfw_insn))
1396 			return (BAD_SIZE);
1397 		if (cmd->arg1 >= rt_numfibs) {
1398 			printf("ipfw: invalid fib number %d\n",
1399 				cmd->arg1);
1400 			return (FAILED);
1401 		}
1402 		break;
1403 
1404 	case O_SETFIB:
1405 		if (cmdlen != F_INSN_SIZE(ipfw_insn))
1406 			return (BAD_SIZE);
1407 		if ((cmd->arg1 != IP_FW_TARG) &&
1408 		    ((cmd->arg1 & 0x7FFF) >= rt_numfibs)) {
1409 			printf("ipfw: invalid fib number %d\n",
1410 				cmd->arg1 & 0x7FFF);
1411 			return (FAILED);
1412 		}
1413 		return (CHECK_ACTION);
1414 
1415 	case O_UID:
1416 	case O_GID:
1417 	case O_JAIL:
1418 	case O_IP_SRC:
1419 	case O_IP_DST:
1420 	case O_TCPSEQ:
1421 	case O_TCPACK:
1422 	case O_PROB:
1423 	case O_ICMPTYPE:
1424 		if (cmdlen != F_INSN_SIZE(ipfw_insn_u32))
1425 			return (BAD_SIZE);
1426 		break;
1427 
1428 	case O_LIMIT:
1429 		if (cmdlen != F_INSN_SIZE(ipfw_insn_limit))
1430 			return (BAD_SIZE);
1431 		ci->object_opcodes++;
1432 		break;
1433 
1434 	case O_LOG:
1435 		if (cmdlen != F_INSN_SIZE(ipfw_insn_log))
1436 			return (BAD_SIZE);
1437 		insntod(cmd, log)->log_left = insntod(cmd, log)->max_log;
1438 		break;
1439 
1440 	case O_IP_SRC_MASK:
1441 	case O_IP_DST_MASK:
1442 		/* only odd command lengths */
1443 		if ((cmdlen & 1) == 0)
1444 			return (BAD_SIZE);
1445 		break;
1446 
1447 	case O_IP_SRC_SET:
1448 	case O_IP_DST_SET:
1449 		if (cmd->arg1 == 0 || cmd->arg1 > 256) {
1450 			printf("ipfw: invalid set size %d\n",
1451 				cmd->arg1);
1452 			return (FAILED);
1453 		}
1454 		if (cmdlen != F_INSN_SIZE(ipfw_insn_u32) +
1455 		    (cmd->arg1+31)/32 )
1456 			return (BAD_SIZE);
1457 		break;
1458 
1459 	case O_IP_SRC_LOOKUP:
1460 	case O_IP_DST_LOOKUP:
1461 	case O_IP_FLOW_LOOKUP:
1462 	case O_MAC_SRC_LOOKUP:
1463 	case O_MAC_DST_LOOKUP:
1464 		if (cmdlen != F_INSN_SIZE(ipfw_insn_kidx) &&
1465 		    cmdlen != F_INSN_SIZE(ipfw_insn_table))
1466 			return (BAD_SIZE);
1467 		if (insntod(cmd, kidx)->kidx >= V_fw_tables_max) {
1468 			printf("ipfw: invalid table index %u\n",
1469 			    insntod(cmd, kidx)->kidx);
1470 			return (FAILED);
1471 		}
1472 		ci->object_opcodes++;
1473 		break;
1474 	case O_MACADDR2:
1475 		if (cmdlen != F_INSN_SIZE(ipfw_insn_mac))
1476 			return (BAD_SIZE);
1477 		break;
1478 
1479 	case O_NOP:
1480 	case O_IPID:
1481 	case O_IPTTL:
1482 	case O_IPLEN:
1483 	case O_TCPDATALEN:
1484 	case O_TCPMSS:
1485 	case O_TCPWIN:
1486 	case O_TAGGED:
1487 		if (cmdlen < 1 || cmdlen > 31)
1488 			return (BAD_SIZE);
1489 		break;
1490 
1491 	case O_DSCP:
1492 	case O_MARK:
1493 		if (cmdlen != F_INSN_SIZE(ipfw_insn_u32) + 1)
1494 			return (BAD_SIZE);
1495 		break;
1496 
1497 	case O_MAC_TYPE:
1498 	case O_IP_SRCPORT:
1499 	case O_IP_DSTPORT: /* XXX artificial limit, 30 port pairs */
1500 		if (cmdlen < 2 || cmdlen > 31)
1501 			return (BAD_SIZE);
1502 		break;
1503 
1504 	case O_RECV:
1505 	case O_XMIT:
1506 	case O_VIA:
1507 		if (cmdlen != F_INSN_SIZE(ipfw_insn_if))
1508 			return (BAD_SIZE);
1509 		ci->object_opcodes++;
1510 		break;
1511 
1512 	case O_ALTQ:
1513 		if (cmdlen != F_INSN_SIZE(ipfw_insn_altq))
1514 			return (BAD_SIZE);
1515 		break;
1516 
1517 	case O_PIPE:
1518 	case O_QUEUE:
1519 		if (cmdlen != F_INSN_SIZE(ipfw_insn))
1520 			return (BAD_SIZE);
1521 		return (CHECK_ACTION);
1522 
1523 	case O_FORWARD_IP:
1524 		if (cmdlen != F_INSN_SIZE(ipfw_insn_sa))
1525 			return (BAD_SIZE);
1526 		return (CHECK_ACTION);
1527 #ifdef INET6
1528 	case O_FORWARD_IP6:
1529 		if (cmdlen != F_INSN_SIZE(ipfw_insn_sa6))
1530 			return (BAD_SIZE);
1531 		return (CHECK_ACTION);
1532 #endif /* INET6 */
1533 
1534 	case O_DIVERT:
1535 	case O_TEE:
1536 		if (ip_divert_ptr == NULL)
1537 			return (FAILED);
1538 		if (cmdlen != F_INSN_SIZE(ipfw_insn))
1539 			return (BAD_SIZE);
1540 		return (CHECK_ACTION);
1541 	case O_NETGRAPH:
1542 	case O_NGTEE:
1543 		if (ng_ipfw_input_p == NULL)
1544 			return (FAILED);
1545 		if (cmdlen != F_INSN_SIZE(ipfw_insn))
1546 			return (BAD_SIZE);
1547 		return (CHECK_ACTION);
1548 	case O_NAT:
1549 		if (!IPFW_NAT_LOADED)
1550 			return (FAILED);
1551 		if (cmdlen != F_INSN_SIZE(ipfw_insn_nat))
1552 			return (BAD_SIZE);
1553 		return (CHECK_ACTION);
1554 
1555 	case O_SKIPTO:
1556 	case O_CALLRETURN:
1557 	case O_SETMARK:
1558 		if (cmdlen != F_INSN_SIZE(ipfw_insn_u32))
1559 			return (BAD_SIZE);
1560 		return (CHECK_ACTION);
1561 
1562 	case O_CHECK_STATE:
1563 		if (cmdlen != F_INSN_SIZE(ipfw_insn_kidx))
1564 			return (BAD_SIZE);
1565 		ci->object_opcodes++;
1566 		return (CHECK_ACTION);
1567 
1568 	case O_FORWARD_MAC: /* XXX not implemented yet */
1569 	case O_COUNT:
1570 	case O_ACCEPT:
1571 	case O_DENY:
1572 	case O_REJECT:
1573 	case O_SETDSCP:
1574 #ifdef INET6
1575 	case O_UNREACH6:
1576 #endif
1577 	case O_REASS:
1578 		if (cmdlen != F_INSN_SIZE(ipfw_insn))
1579 			return (BAD_SIZE);
1580 		return (CHECK_ACTION);
1581 #ifdef INET6
1582 	case O_IP6_SRC:
1583 	case O_IP6_DST:
1584 		if (cmdlen != F_INSN_SIZE(struct in6_addr) +
1585 		    F_INSN_SIZE(ipfw_insn))
1586 			return (BAD_SIZE);
1587 		break;
1588 
1589 	case O_FLOW6ID:
1590 		if (cmdlen != F_INSN_SIZE(ipfw_insn_u32) +
1591 		    ((ipfw_insn_u32 *)cmd)->o.arg1)
1592 			return (BAD_SIZE);
1593 		break;
1594 
1595 	case O_IP6_SRC_MASK:
1596 	case O_IP6_DST_MASK:
1597 		if ( !(cmdlen & 1) || cmdlen > 127)
1598 			return (BAD_SIZE);
1599 		break;
1600 	case O_ICMP6TYPE:
1601 		if( cmdlen != F_INSN_SIZE( ipfw_insn_icmp6 ) )
1602 			return (BAD_SIZE);
1603 		break;
1604 #endif
1605 
1606 	default:
1607 		switch (cmd->opcode) {
1608 #ifndef INET6
1609 		case O_IP6_SRC_ME:
1610 		case O_IP6_DST_ME:
1611 		case O_EXT_HDR:
1612 		case O_IP6:
1613 		case O_UNREACH6:
1614 		case O_IP6_SRC:
1615 		case O_IP6_DST:
1616 		case O_FLOW6ID:
1617 		case O_IP6_SRC_MASK:
1618 		case O_IP6_DST_MASK:
1619 		case O_ICMP6TYPE:
1620 			printf("ipfw: no IPv6 support in kernel\n");
1621 			return (FAILED);
1622 #endif
1623 		default:
1624 			printf("ipfw: opcode %d: unknown opcode\n",
1625 				cmd->opcode);
1626 			return (FAILED);
1627 		}
1628 	}
1629 	return (SUCCESS);
1630 }
1631 
1632 static __noinline int
check_ipfw_rule_body(ipfw_insn * cmd,int cmd_len,struct rule_check_info * ci)1633 check_ipfw_rule_body(ipfw_insn *cmd, int cmd_len, struct rule_check_info *ci)
1634 {
1635 	int cmdlen, l;
1636 	int have_action, ret;
1637 
1638 	/*
1639 	 * Now go for the individual checks. Very simple ones, basically only
1640 	 * instruction sizes.
1641 	 */
1642 	have_action = 0;
1643 	for (l = cmd_len; l > 0 ; l -= cmdlen, cmd += cmdlen) {
1644 		cmdlen = F_LEN(cmd);
1645 		if (cmdlen > l) {
1646 			printf("ipfw: opcode %d: size truncated\n",
1647 			    cmd->opcode);
1648 			return (EINVAL);
1649 		}
1650 		if (ci->version != IP_FW3_OPVER)
1651 			ret = (*check_opcode_f)(&cmd, &l, ci);
1652 		else
1653 			ret = ipfw_check_opcode(&cmd, &l, ci);
1654 
1655 		if (ret == CHECK_ACTION) {
1656 			if (have_action != 0) {
1657 				printf("ipfw: opcode %d: multiple actions"
1658 				    " not allowed\n", cmd->opcode);
1659 				ret = FAILED;
1660 			} else
1661 				have_action = 1;
1662 
1663 			if (l != F_LEN(cmd)) {
1664 				printf("ipfw: opcode %d: action must be"
1665 				    " last opcode\n", cmd->opcode);
1666 				ret = FAILED;
1667 			}
1668 		}
1669 		switch (ret) {
1670 		case SUCCESS:
1671 			continue;
1672 		case BAD_SIZE:
1673 			printf("ipfw: opcode %d: wrong size %d\n",
1674 			    cmd->opcode, cmdlen);
1675 			/* FALLTHROUGH */
1676 		case FAILED:
1677 			return (EINVAL);
1678 		}
1679 	}
1680 	if (have_action == 0) {
1681 		printf("ipfw: missing action\n");
1682 		return (EINVAL);
1683 	}
1684 	return (0);
1685 }
1686 
1687 struct dump_args {
1688 	uint32_t	b;	/* start rule */
1689 	uint32_t	e;	/* end rule */
1690 	uint32_t	rcount;	/* number of rules */
1691 	uint32_t	rsize;	/* rules size */
1692 	uint32_t	tcount;	/* number of tables */
1693 	int		rcounters;	/* counters */
1694 	uint32_t	*bmask;	/* index bitmask of used named objects */
1695 };
1696 
1697 void
ipfw_export_obj_ntlv(struct named_object * no,ipfw_obj_ntlv * ntlv)1698 ipfw_export_obj_ntlv(struct named_object *no, ipfw_obj_ntlv *ntlv)
1699 {
1700 
1701 	ntlv->head.type = no->etlv;
1702 	ntlv->head.length = sizeof(*ntlv);
1703 	ntlv->idx = no->kidx;
1704 	strlcpy(ntlv->name, no->name, sizeof(ntlv->name));
1705 }
1706 
1707 /*
1708  * Export named object info in instance @ni, identified by @kidx
1709  * to ipfw_obj_ntlv. TLV is allocated from @sd space.
1710  *
1711  * Returns 0 on success.
1712  */
1713 static int
export_objhash_ntlv(struct namedobj_instance * ni,uint32_t kidx,struct sockopt_data * sd)1714 export_objhash_ntlv(struct namedobj_instance *ni, uint32_t kidx,
1715     struct sockopt_data *sd)
1716 {
1717 	struct named_object *no;
1718 	ipfw_obj_ntlv *ntlv;
1719 
1720 	no = ipfw_objhash_lookup_kidx(ni, kidx);
1721 	KASSERT(no != NULL, ("invalid object kernel index passed"));
1722 
1723 	ntlv = (ipfw_obj_ntlv *)ipfw_get_sopt_space(sd, sizeof(*ntlv));
1724 	if (ntlv == NULL)
1725 		return (ENOMEM);
1726 
1727 	ipfw_export_obj_ntlv(no, ntlv);
1728 	return (0);
1729 }
1730 
1731 static int
export_named_objects(struct namedobj_instance * ni,struct dump_args * da,struct sockopt_data * sd)1732 export_named_objects(struct namedobj_instance *ni, struct dump_args *da,
1733     struct sockopt_data *sd)
1734 {
1735 	uint32_t i;
1736 	int error;
1737 
1738 	for (i = 0; i < IPFW_TABLES_MAX && da->tcount > 0; i++) {
1739 		if ((da->bmask[i / 32] & (1 << (i % 32))) == 0)
1740 			continue;
1741 		if ((error = export_objhash_ntlv(ni, i, sd)) != 0)
1742 			return (error);
1743 		da->tcount--;
1744 	}
1745 	return (0);
1746 }
1747 
1748 static int
dump_named_objects(struct ip_fw_chain * ch,struct dump_args * da,struct sockopt_data * sd)1749 dump_named_objects(struct ip_fw_chain *ch, struct dump_args *da,
1750     struct sockopt_data *sd)
1751 {
1752 	ipfw_obj_ctlv *ctlv;
1753 	int error;
1754 
1755 	MPASS(da->tcount > 0);
1756 	/* Header first */
1757 	ctlv = (ipfw_obj_ctlv *)ipfw_get_sopt_space(sd, sizeof(*ctlv));
1758 	if (ctlv == NULL)
1759 		return (ENOMEM);
1760 	ctlv->head.type = IPFW_TLV_TBLNAME_LIST;
1761 	ctlv->head.length = da->tcount * sizeof(ipfw_obj_ntlv) +
1762 	    sizeof(*ctlv);
1763 	ctlv->count = da->tcount;
1764 	ctlv->objsize = sizeof(ipfw_obj_ntlv);
1765 
1766 	/* Dump table names first (if any) */
1767 	error = export_named_objects(ipfw_get_table_objhash(ch), da, sd);
1768 	if (error != 0)
1769 		return (error);
1770 	/* Then dump another named objects */
1771 	da->bmask += IPFW_TABLES_MAX / 32;
1772 	return (export_named_objects(CHAIN_TO_SRV(ch), da, sd));
1773 }
1774 
1775 /*
1776  * Dumps static rules with table TLVs in buffer @sd.
1777  *
1778  * Returns 0 on success.
1779  */
1780 static int
dump_static_rules(struct ip_fw_chain * chain,struct dump_args * da,struct sockopt_data * sd)1781 dump_static_rules(struct ip_fw_chain *chain, struct dump_args *da,
1782     struct sockopt_data *sd)
1783 {
1784 	ipfw_obj_ctlv *ctlv;
1785 	struct ip_fw *krule;
1786 	caddr_t dst;
1787 	int i, l;
1788 
1789 	/* Dump rules */
1790 	ctlv = (ipfw_obj_ctlv *)ipfw_get_sopt_space(sd, sizeof(*ctlv));
1791 	if (ctlv == NULL)
1792 		return (ENOMEM);
1793 	ctlv->head.type = IPFW_TLV_RULE_LIST;
1794 	ctlv->head.length = da->rsize + sizeof(*ctlv);
1795 	ctlv->count = da->rcount;
1796 
1797 	for (i = da->b; i < da->e; i++) {
1798 		krule = chain->map[i];
1799 
1800 		l = RULEUSIZE1(krule) + sizeof(ipfw_obj_tlv);
1801 		if (da->rcounters != 0)
1802 			l += sizeof(struct ip_fw_bcounter);
1803 		dst = (caddr_t)ipfw_get_sopt_space(sd, l);
1804 		if (dst == NULL)
1805 			return (ENOMEM);
1806 
1807 		export_rule1(krule, dst, l, da->rcounters);
1808 	}
1809 
1810 	return (0);
1811 }
1812 
1813 int
ipfw_mark_object_kidx(uint32_t * bmask,uint16_t etlv,uint32_t kidx)1814 ipfw_mark_object_kidx(uint32_t *bmask, uint16_t etlv, uint32_t kidx)
1815 {
1816 	uint32_t bidx;
1817 
1818 	/*
1819 	 * Maintain separate bitmasks for table and non-table objects.
1820 	 */
1821 	bidx = (etlv == IPFW_TLV_TBL_NAME) ? 0: IPFW_TABLES_MAX / 32;
1822 	bidx += kidx / 32;
1823 	if ((bmask[bidx] & (1 << (kidx % 32))) != 0)
1824 		return (0);
1825 
1826 	bmask[bidx] |= 1 << (kidx % 32);
1827 	return (1);
1828 }
1829 
1830 /*
1831  * Marks every object index used in @rule with bit in @bmask.
1832  * Used to generate bitmask of referenced tables/objects for given ruleset
1833  * or its part.
1834  */
1835 static void
mark_rule_objects(struct ip_fw_chain * ch,struct ip_fw * rule,struct dump_args * da)1836 mark_rule_objects(struct ip_fw_chain *ch, struct ip_fw *rule,
1837     struct dump_args *da)
1838 {
1839 	struct opcode_obj_rewrite *rw;
1840 	ipfw_insn *cmd;
1841 	uint32_t kidx;
1842 	int cmdlen, l;
1843 	uint8_t subtype;
1844 
1845 	l = rule->cmd_len;
1846 	cmd = rule->cmd;
1847 	cmdlen = 0;
1848 	for ( ;	l > 0 ; l -= cmdlen, cmd += cmdlen) {
1849 		cmdlen = F_LEN(cmd);
1850 
1851 		rw = find_op_rw(cmd, &kidx, &subtype);
1852 		if (rw == NULL)
1853 			continue;
1854 
1855 		if (ipfw_mark_object_kidx(da->bmask, rw->etlv, kidx))
1856 			da->tcount++;
1857 	}
1858 }
1859 
1860 /*
1861  * Dumps requested objects data
1862  * Data layout (version 0)(current):
1863  * Request: [ ipfw_cfg_lheader ] + IPFW_CFG_GET_* flags
1864  *   size = ipfw_cfg_lheader.size
1865  * Reply: [ ipfw_cfg_lheader
1866  *   [ ipfw_obj_ctlv(IPFW_TLV_TBL_LIST) ipfw_obj_ntlv x N ] (optional)
1867  *   [ ipfw_obj_ctlv(IPFW_TLV_RULE_LIST)
1868  *     ipfw_obj_tlv(IPFW_TLV_RULE_ENT) [ ip_fw_bcounter (optional) ip_fw_rule ]
1869  *   ] (optional)
1870  *   [ ipfw_obj_ctlv(IPFW_TLV_STATE_LIST) ipfw_obj_dyntlv x N ] (optional)
1871  * ]
1872  * * NOTE IPFW_TLV_STATE_LIST has the single valid field: objsize.
1873  * The rest (size, count) are set to zero and needs to be ignored.
1874  *
1875  * Returns 0 on success.
1876  */
1877 static int
dump_config(struct ip_fw_chain * chain,ip_fw3_opheader * op3,struct sockopt_data * sd)1878 dump_config(struct ip_fw_chain *chain, ip_fw3_opheader *op3,
1879     struct sockopt_data *sd)
1880 {
1881 	struct dump_args da;
1882 	ipfw_cfg_lheader *hdr;
1883 	struct ip_fw *rule;
1884 	size_t sz, rnum;
1885 	uint32_t hdr_flags, *bmask;
1886 	int error, i;
1887 
1888 	hdr = (ipfw_cfg_lheader *)ipfw_get_sopt_header(sd, sizeof(*hdr));
1889 	if (hdr == NULL)
1890 		return (EINVAL);
1891 
1892 	error = 0;
1893 	bmask = NULL;
1894 	memset(&da, 0, sizeof(da));
1895 	/*
1896 	 * Allocate needed state.
1897 	 * Note we allocate 2xspace mask, for table & srv
1898 	 */
1899 	if (hdr->flags & (IPFW_CFG_GET_STATIC | IPFW_CFG_GET_STATES))
1900 		da.bmask = bmask = malloc(
1901 		    sizeof(uint32_t) * IPFW_TABLES_MAX * 2 / 32, M_TEMP,
1902 		    M_WAITOK | M_ZERO);
1903 	IPFW_UH_RLOCK(chain);
1904 
1905 	/*
1906 	 * STAGE 1: Determine size/count for objects in range.
1907 	 * Prepare used tables bitmask.
1908 	 */
1909 	sz = sizeof(ipfw_cfg_lheader);
1910 	da.e = chain->n_rules;
1911 
1912 	if (hdr->end_rule != 0) {
1913 		/* Handle custom range */
1914 		if ((rnum = hdr->start_rule) > IPFW_DEFAULT_RULE)
1915 			rnum = IPFW_DEFAULT_RULE;
1916 		da.b = ipfw_find_rule(chain, rnum, 0);
1917 		rnum = (hdr->end_rule < IPFW_DEFAULT_RULE) ?
1918 		    hdr->end_rule + 1: IPFW_DEFAULT_RULE;
1919 		da.e = ipfw_find_rule(chain, rnum, UINT32_MAX) + 1;
1920 	}
1921 
1922 	if (hdr->flags & IPFW_CFG_GET_STATIC) {
1923 		for (i = da.b; i < da.e; i++) {
1924 			rule = chain->map[i];
1925 			da.rsize += RULEUSIZE1(rule) + sizeof(ipfw_obj_tlv);
1926 			da.rcount++;
1927 			/* Update bitmask of used objects for given range */
1928 			mark_rule_objects(chain, rule, &da);
1929 		}
1930 		/* Add counters if requested */
1931 		if (hdr->flags & IPFW_CFG_GET_COUNTERS) {
1932 			da.rsize += sizeof(struct ip_fw_bcounter) * da.rcount;
1933 			da.rcounters = 1;
1934 		}
1935 		sz += da.rsize + sizeof(ipfw_obj_ctlv);
1936 	}
1937 
1938 	if (hdr->flags & IPFW_CFG_GET_STATES) {
1939 		sz += sizeof(ipfw_obj_ctlv) +
1940 		    ipfw_dyn_get_count(bmask, &i) * sizeof(ipfw_obj_dyntlv);
1941 		da.tcount += i;
1942 	}
1943 
1944 	if (da.tcount > 0)
1945 		sz += da.tcount * sizeof(ipfw_obj_ntlv) +
1946 		    sizeof(ipfw_obj_ctlv);
1947 
1948 	/*
1949 	 * Fill header anyway.
1950 	 * Note we have to save header fields to stable storage
1951 	 * buffer inside @sd can be flushed after dumping rules
1952 	 */
1953 	hdr->size = sz;
1954 	hdr->set_mask = ~V_set_disable;
1955 	hdr_flags = hdr->flags;
1956 	hdr = NULL;
1957 
1958 	if (sd->valsize < sz) {
1959 		error = ENOMEM;
1960 		goto cleanup;
1961 	}
1962 
1963 	/* STAGE2: Store actual data */
1964 	if (da.tcount > 0) {
1965 		error = dump_named_objects(chain, &da, sd);
1966 		if (error != 0)
1967 			goto cleanup;
1968 	}
1969 
1970 	if (hdr_flags & IPFW_CFG_GET_STATIC) {
1971 		error = dump_static_rules(chain, &da, sd);
1972 		if (error != 0)
1973 			goto cleanup;
1974 	}
1975 
1976 	if (hdr_flags & IPFW_CFG_GET_STATES)
1977 		error = ipfw_dump_states(chain, sd);
1978 
1979 cleanup:
1980 	IPFW_UH_RUNLOCK(chain);
1981 
1982 	if (bmask != NULL)
1983 		free(bmask, M_TEMP);
1984 
1985 	return (error);
1986 }
1987 
1988 int
ipfw_check_object_name_generic(const char * name)1989 ipfw_check_object_name_generic(const char *name)
1990 {
1991 	int nsize;
1992 
1993 	nsize = sizeof(((ipfw_obj_ntlv *)0)->name);
1994 	if (strnlen(name, nsize) == nsize)
1995 		return (EINVAL);
1996 	if (name[0] == '\0')
1997 		return (EINVAL);
1998 	return (0);
1999 }
2000 
2001 /*
2002  * Creates non-existent objects referenced by rule.
2003  *
2004  * Return 0 on success.
2005  */
2006 int
create_objects_compat(struct ip_fw_chain * ch,ipfw_insn * cmd,struct obj_idx * oib,struct obj_idx * pidx,struct tid_info * ti)2007 create_objects_compat(struct ip_fw_chain *ch, ipfw_insn *cmd,
2008     struct obj_idx *oib, struct obj_idx *pidx, struct tid_info *ti)
2009 {
2010 	struct opcode_obj_rewrite *rw;
2011 	struct obj_idx *p;
2012 	uint32_t kidx;
2013 	int error;
2014 
2015 	/*
2016 	 * Compatibility stuff: do actual creation for non-existing,
2017 	 * but referenced objects.
2018 	 */
2019 	for (p = oib; p < pidx; p++) {
2020 		if (p->kidx != 0)
2021 			continue;
2022 
2023 		ti->uidx = p->uidx;
2024 		ti->type = p->type;
2025 		ti->atype = 0;
2026 
2027 		rw = find_op_rw(cmd + p->off, NULL, NULL);
2028 		KASSERT(rw != NULL, ("Unable to find handler for op %d",
2029 		    (cmd + p->off)->opcode));
2030 
2031 		if (rw->create_object == NULL)
2032 			error = EOPNOTSUPP;
2033 		else
2034 			error = rw->create_object(ch, ti, &kidx);
2035 		if (error == 0) {
2036 			p->kidx = kidx;
2037 			continue;
2038 		}
2039 
2040 		/*
2041 		 * Error happened. We have to rollback everything.
2042 		 * Drop all already acquired references.
2043 		 */
2044 		IPFW_UH_WLOCK(ch);
2045 		unref_oib_objects(ch, cmd, oib, pidx);
2046 		IPFW_UH_WUNLOCK(ch);
2047 
2048 		return (error);
2049 	}
2050 
2051 	return (0);
2052 }
2053 
2054 /*
2055  * Unreferences all already-referenced objects in given @cmd rule,
2056  * using information in @oib.
2057  *
2058  * Used to rollback partially converted rule on error.
2059  */
2060 static void
unref_oib_objects(struct ip_fw_chain * ch,ipfw_insn * cmd,struct obj_idx * oib,struct obj_idx * end)2061 unref_oib_objects(struct ip_fw_chain *ch, ipfw_insn *cmd, struct obj_idx *oib,
2062     struct obj_idx *end)
2063 {
2064 	struct opcode_obj_rewrite *rw;
2065 	struct named_object *no;
2066 	struct obj_idx *p;
2067 
2068 	IPFW_UH_WLOCK_ASSERT(ch);
2069 
2070 	for (p = oib; p < end; p++) {
2071 		if (p->kidx == 0)
2072 			continue;
2073 
2074 		rw = find_op_rw(cmd + p->off, NULL, NULL);
2075 		KASSERT(rw != NULL, ("Unable to find handler for op %d",
2076 		    (cmd + p->off)->opcode));
2077 
2078 		/* Find & unref by existing idx */
2079 		no = rw->find_bykidx(ch, p->kidx);
2080 		KASSERT(no != NULL, ("Ref'd object %d disappeared", p->kidx));
2081 		no->refcnt--;
2082 	}
2083 }
2084 
2085 /*
2086  * Remove references from every object used in @rule.
2087  * Used at rule removal code.
2088  */
2089 static void
unref_rule_objects(struct ip_fw_chain * ch,struct ip_fw * rule)2090 unref_rule_objects(struct ip_fw_chain *ch, struct ip_fw *rule)
2091 {
2092 	struct opcode_obj_rewrite *rw;
2093 	struct named_object *no;
2094 	ipfw_insn *cmd;
2095 	uint32_t kidx;
2096 	int cmdlen, l;
2097 	uint8_t subtype;
2098 
2099 	IPFW_UH_WLOCK_ASSERT(ch);
2100 
2101 	l = rule->cmd_len;
2102 	cmd = rule->cmd;
2103 	cmdlen = 0;
2104 	for ( ;	l > 0 ; l -= cmdlen, cmd += cmdlen) {
2105 		cmdlen = F_LEN(cmd);
2106 
2107 		rw = find_op_rw(cmd, &kidx, &subtype);
2108 		if (rw == NULL)
2109 			continue;
2110 		no = rw->find_bykidx(ch, kidx);
2111 
2112 		KASSERT(no != NULL, ("object id %d not found", kidx));
2113 		KASSERT(no->subtype == subtype,
2114 		    ("wrong type %d (%d) for object id %d",
2115 		    no->subtype, subtype, kidx));
2116 		KASSERT(no->refcnt > 0, ("refcount for object %d is %d",
2117 		    kidx, no->refcnt));
2118 
2119 		if (no->refcnt == 1 && rw->destroy_object != NULL)
2120 			rw->destroy_object(ch, no);
2121 		else
2122 			no->refcnt--;
2123 	}
2124 }
2125 
2126 /*
2127  * Find and reference object (if any) stored in instruction @cmd.
2128  *
2129  * Saves object info in @pidx, sets
2130  *  - @unresolved to 1 if object should exists but not found
2131  *
2132  * Returns non-zero value in case of error.
2133  */
2134 static int
ref_opcode_object(struct ip_fw_chain * ch,ipfw_insn * cmd,struct tid_info * ti,struct obj_idx * pidx,int * unresolved)2135 ref_opcode_object(struct ip_fw_chain *ch, ipfw_insn *cmd, struct tid_info *ti,
2136     struct obj_idx *pidx, int *unresolved)
2137 {
2138 	struct named_object *no;
2139 	struct opcode_obj_rewrite *rw;
2140 	int error;
2141 
2142 	/* Check if this opcode is candidate for rewrite */
2143 	rw = find_op_rw(cmd, &ti->uidx, &ti->type);
2144 	if (rw == NULL)
2145 		return (0);
2146 
2147 	/* Need to rewrite. Save necessary fields */
2148 	pidx->uidx = ti->uidx;
2149 	pidx->type = ti->type;
2150 
2151 	/* Try to find referenced kernel object */
2152 	error = rw->find_byname(ch, ti, &no);
2153 	if (error != 0)
2154 		return (error);
2155 	if (no == NULL) {
2156 		/*
2157 		 * Report about unresolved object for automaic
2158 		 * creation.
2159 		 */
2160 		*unresolved = 1;
2161 		return (0);
2162 	}
2163 
2164 	/*
2165 	 * Object is already exist.
2166 	 * Its subtype should match with expected value.
2167 	 */
2168 	if (ti->type != no->subtype)
2169 		return (EINVAL);
2170 
2171 	/* Bump refcount and update kidx. */
2172 	no->refcnt++;
2173 	rw->update(cmd, no->kidx);
2174 	return (0);
2175 }
2176 
2177 /*
2178  * Finds and bumps refcount for objects referenced by given @rule.
2179  * Auto-creates non-existing tables.
2180  * Fills in @oib array with userland/kernel indexes.
2181  *
2182  * Returns 0 on success.
2183  */
2184 static int
ref_rule_objects(struct ip_fw_chain * ch,struct ip_fw * rule,struct rule_check_info * ci,struct obj_idx * oib,struct tid_info * ti)2185 ref_rule_objects(struct ip_fw_chain *ch, struct ip_fw *rule,
2186     struct rule_check_info *ci, struct obj_idx *oib, struct tid_info *ti)
2187 {
2188 	struct obj_idx *pidx;
2189 	ipfw_insn *cmd;
2190 	int cmdlen, error, l, unresolved;
2191 
2192 	pidx = oib;
2193 	l = rule->cmd_len;
2194 	cmd = rule->cmd;
2195 	cmdlen = 0;
2196 	error = 0;
2197 
2198 	IPFW_UH_WLOCK(ch);
2199 
2200 	/* Increase refcount on each existing referenced table. */
2201 	for ( ;	l > 0 ; l -= cmdlen, cmd += cmdlen) {
2202 		cmdlen = F_LEN(cmd);
2203 		unresolved = 0;
2204 
2205 		error = ref_opcode_object(ch, cmd, ti, pidx, &unresolved);
2206 		if (error != 0)
2207 			break;
2208 		/*
2209 		 * Compatibility stuff for old clients:
2210 		 * prepare to automaitcally create non-existing objects.
2211 		 */
2212 		if (unresolved != 0) {
2213 			pidx->off = rule->cmd_len - l;
2214 			pidx++;
2215 		}
2216 	}
2217 
2218 	if (error != 0) {
2219 		/* Unref everything we have already done */
2220 		unref_oib_objects(ch, rule->cmd, oib, pidx);
2221 		IPFW_UH_WUNLOCK(ch);
2222 		return (error);
2223 	}
2224 	IPFW_UH_WUNLOCK(ch);
2225 
2226 	/* Perform auto-creation for non-existing objects */
2227 	if (pidx != oib)
2228 		error = create_objects_compat(ch, rule->cmd, oib, pidx, ti);
2229 
2230 	/* Calculate real number of dynamic objects */
2231 	ci->object_opcodes = (uint16_t)(pidx - oib);
2232 
2233 	return (error);
2234 }
2235 
2236 /*
2237  * Checks is opcode is referencing table of appropriate type.
2238  * Adds reference count for found table if true.
2239  * Rewrites user-supplied opcode values with kernel ones.
2240  *
2241  * Returns 0 on success and appropriate error code otherwise.
2242  */
2243 static int
rewrite_rule_uidx(struct ip_fw_chain * chain,struct rule_check_info * ci)2244 rewrite_rule_uidx(struct ip_fw_chain *chain, struct rule_check_info *ci)
2245 {
2246 	int error;
2247 	ipfw_insn *cmd;
2248 	struct obj_idx *p, *pidx_first, *pidx_last;
2249 	struct tid_info ti;
2250 
2251 	/*
2252 	 * Prepare an array for storing opcode indices.
2253 	 * Use stack allocation by default.
2254 	 */
2255 	if (ci->object_opcodes <= (sizeof(ci->obuf)/sizeof(ci->obuf[0]))) {
2256 		/* Stack */
2257 		pidx_first = ci->obuf;
2258 	} else
2259 		pidx_first = malloc(
2260 		    ci->object_opcodes * sizeof(struct obj_idx),
2261 		    M_IPFW, M_WAITOK | M_ZERO);
2262 
2263 	error = 0;
2264 	memset(&ti, 0, sizeof(ti));
2265 
2266 	/* Use set rule is assigned to. */
2267 	ti.set = ci->krule->set;
2268 	if (ci->ctlv != NULL) {
2269 		ti.tlvs = (void *)(ci->ctlv + 1);
2270 		ti.tlen = ci->ctlv->head.length - sizeof(ipfw_obj_ctlv);
2271 	}
2272 
2273 	/* Reference all used tables and other objects */
2274 	error = ref_rule_objects(chain, ci->krule, ci, pidx_first, &ti);
2275 	if (error != 0)
2276 		goto free;
2277 	/*
2278 	 * Note that ref_rule_objects() might have updated ci->object_opcodes
2279 	 * to reflect actual number of object opcodes.
2280 	 */
2281 
2282 	/* Perform rewrite of remaining opcodes */
2283 	p = pidx_first;
2284 	pidx_last = pidx_first + ci->object_opcodes;
2285 	for (p = pidx_first; p < pidx_last; p++) {
2286 		cmd = ci->krule->cmd + p->off;
2287 		update_opcode_kidx(cmd, p->kidx);
2288 	}
2289 
2290 free:
2291 	if (pidx_first != ci->obuf)
2292 		free(pidx_first, M_IPFW);
2293 
2294 	return (error);
2295 }
2296 
2297 /*
2298  * Parses one or more rules from userland.
2299  * Data layout (version 1)(current):
2300  * Request:
2301  * [
2302  *   ip_fw3_opheader
2303  *   [ ipfw_obj_ctlv(IPFW_TLV_TBL_LIST) ipfw_obj_ntlv x N ] (optional *1)
2304  *   [ ipfw_obj_ctlv(IPFW_TLV_RULE_LIST) ip_fw x N ] (*2) (*3)
2305  * ]
2306  * Reply:
2307  * [
2308  *   ip_fw3_opheader
2309  *   [ ipfw_obj_ctlv(IPFW_TLV_TBL_LIST) ipfw_obj_ntlv x N ] (optional)
2310  *   [ ipfw_obj_ctlv(IPFW_TLV_RULE_LIST) ip_fw x N ]
2311  * ]
2312  *
2313  * Rules in reply are modified to store their actual ruleset number.
2314  *
2315  * (*1) TLVs inside IPFW_TLV_TBL_LIST needs to be sorted ascending
2316  * according to their idx field and there has to be no duplicates.
2317  * (*2) Numbered rules inside IPFW_TLV_RULE_LIST needs to be sorted ascending.
2318  * (*3) Each ip_fw structure needs to be aligned to u64 boundary.
2319  *
2320  * Returns 0 on success.
2321  */
2322 static __noinline int
parse_rules_v1(struct ip_fw_chain * chain,ip_fw3_opheader * op3,struct sockopt_data * sd,ipfw_obj_ctlv ** prtlv,struct rule_check_info ** pci)2323 parse_rules_v1(struct ip_fw_chain *chain, ip_fw3_opheader *op3,
2324     struct sockopt_data *sd, ipfw_obj_ctlv **prtlv,
2325     struct rule_check_info **pci)
2326 {
2327 	ipfw_obj_ctlv *ctlv, *rtlv, *tstate;
2328 	ipfw_obj_ntlv *ntlv;
2329 	struct rule_check_info *ci, *cbuf;
2330 	struct ip_fw_rule *r;
2331 	size_t count, clen, read, rsize;
2332 	uint32_t idx, rulenum;
2333 	int error;
2334 
2335 	op3 = (ip_fw3_opheader *)ipfw_get_sopt_space(sd, sd->valsize);
2336 	ctlv = (ipfw_obj_ctlv *)(op3 + 1);
2337 	read = sizeof(ip_fw3_opheader);
2338 	if (read + sizeof(*ctlv) > sd->valsize)
2339 		return (EINVAL);
2340 
2341 	rtlv = NULL;
2342 	tstate = NULL;
2343 	cbuf = NULL;
2344 	/* Table names or other named objects. */
2345 	if (ctlv->head.type == IPFW_TLV_TBLNAME_LIST) {
2346 		/* Check size and alignment. */
2347 		clen = ctlv->head.length;
2348 		if (read + clen > sd->valsize || clen < sizeof(*ctlv) ||
2349 		    (clen % sizeof(uint64_t)) != 0)
2350 			return (EINVAL);
2351 		/* Check for validness. */
2352 		count = (ctlv->head.length - sizeof(*ctlv)) / sizeof(*ntlv);
2353 		if (ctlv->count != count || ctlv->objsize != sizeof(*ntlv))
2354 			return (EINVAL);
2355 		/*
2356 		 * Check each TLV.
2357 		 * Ensure TLVs are sorted ascending and
2358 		 * there are no duplicates.
2359 		 */
2360 		idx = 0;
2361 		ntlv = (ipfw_obj_ntlv *)(ctlv + 1);
2362 		while (count > 0) {
2363 			if (ntlv->head.length != sizeof(ipfw_obj_ntlv))
2364 				return (EINVAL);
2365 
2366 			error = ipfw_check_object_name_generic(ntlv->name);
2367 			if (error != 0)
2368 				return (error);
2369 
2370 			if (ntlv->idx <= idx)
2371 				return (EINVAL);
2372 
2373 			idx = ntlv->idx;
2374 			count--;
2375 			ntlv++;
2376 		}
2377 
2378 		tstate = ctlv;
2379 		read += ctlv->head.length;
2380 		ctlv = (ipfw_obj_ctlv *)((caddr_t)ctlv + ctlv->head.length);
2381 
2382 		if (read + sizeof(*ctlv) > sd->valsize)
2383 			return (EINVAL);
2384 	}
2385 
2386 	/* List of rules. */
2387 	if (ctlv->head.type == IPFW_TLV_RULE_LIST) {
2388 		clen = ctlv->head.length;
2389 		if (read + clen > sd->valsize || clen < sizeof(*ctlv) ||
2390 		    (clen % sizeof(uint64_t)) != 0)
2391 			return (EINVAL);
2392 
2393 		clen -= sizeof(*ctlv);
2394 		if (ctlv->count == 0 ||
2395 		    ctlv->count > clen / sizeof(struct ip_fw_rule))
2396 			return (EINVAL);
2397 
2398 		/* Allocate state for each rule */
2399 		cbuf = malloc(ctlv->count * sizeof(struct rule_check_info),
2400 		    M_TEMP, M_WAITOK | M_ZERO);
2401 
2402 		/*
2403 		 * Check each rule for validness.
2404 		 * Ensure numbered rules are sorted ascending
2405 		 * and properly aligned
2406 		 */
2407 		rulenum = 0;
2408 		count = 0;
2409 		error = 0;
2410 		ci = cbuf;
2411 		r = (struct ip_fw_rule *)(ctlv + 1);
2412 		while (clen > 0) {
2413 			rsize = RULEUSIZE1(r);
2414 			if (rsize > clen || count > ctlv->count) {
2415 				error = EINVAL;
2416 				break;
2417 			}
2418 			ci->ctlv = tstate;
2419 			ci->version = IP_FW3_OPVER;
2420 			error = ipfw_check_rule(r, rsize, ci);
2421 			if (error != 0)
2422 				break;
2423 
2424 			/* Check sorting */
2425 			if (count != 0 && ((rulenum == 0) != (r->rulenum == 0) ||
2426 			    r->rulenum < rulenum)) {
2427 				printf("ipfw: wrong order: rulenum %u"
2428 				    " vs %u\n", r->rulenum, rulenum);
2429 				error = EINVAL;
2430 				break;
2431 			}
2432 			rulenum = r->rulenum;
2433 			ci->urule = (caddr_t)r;
2434 			clen -= rsize;
2435 			r = (struct ip_fw_rule *)((caddr_t)r + rsize);
2436 			count++;
2437 			ci++;
2438 		}
2439 
2440 		if (ctlv->count != count || error != 0) {
2441 			free(cbuf, M_TEMP);
2442 			return (EINVAL);
2443 		}
2444 
2445 		rtlv = ctlv;
2446 		read += ctlv->head.length;
2447 		ctlv = (ipfw_obj_ctlv *)((caddr_t)ctlv + ctlv->head.length);
2448 	}
2449 
2450 	if (read != sd->valsize || rtlv == NULL) {
2451 		free(cbuf, M_TEMP);
2452 		return (EINVAL);
2453 	}
2454 
2455 	*prtlv = rtlv;
2456 	*pci = cbuf;
2457 	return (0);
2458 }
2459 
2460 /*
2461  * Copy rule @urule from v1 userland format (current) to kernel @krule.
2462  */
2463 static void
import_rule_v1(struct ip_fw_chain * chain,struct rule_check_info * ci)2464 import_rule_v1(struct ip_fw_chain *chain, struct rule_check_info *ci)
2465 {
2466 	struct ip_fw_rule *urule;
2467 	struct ip_fw *krule;
2468 
2469 	urule = (struct ip_fw_rule *)ci->urule;
2470 	krule = ci->krule = ipfw_alloc_rule(chain, RULEKSIZE1(urule));
2471 
2472 	krule->act_ofs = urule->act_ofs;
2473 	krule->cmd_len = urule->cmd_len;
2474 	krule->rulenum = urule->rulenum;
2475 	krule->set = urule->set;
2476 	krule->flags = urule->flags;
2477 
2478 	/* Save rulenum offset */
2479 	ci->urule_numoff = offsetof(struct ip_fw_rule, rulenum);
2480 
2481 	/* Copy opcodes */
2482 	memcpy(krule->cmd, urule->cmd, krule->cmd_len * sizeof(uint32_t));
2483 }
2484 
2485 /*
2486  * Adds one or more rules to ipfw @chain.
2487  */
2488 static int
add_rules(struct ip_fw_chain * chain,ip_fw3_opheader * op3,struct sockopt_data * sd)2489 add_rules(struct ip_fw_chain *chain, ip_fw3_opheader *op3,
2490     struct sockopt_data *sd)
2491 {
2492 	ipfw_obj_ctlv *rtlv;
2493 	struct rule_check_info *ci, *nci;
2494 	int i, ret;
2495 
2496 	/*
2497 	 * Check rules buffer for validness.
2498 	 */
2499 	ret = parse_rules_v1(chain, op3, sd, &rtlv, &nci);
2500 	if (ret != 0)
2501 		return (ret);
2502 	/*
2503 	 * Allocate storage for the kernel representation of rules.
2504 	 */
2505 	for (i = 0, ci = nci; i < rtlv->count; i++, ci++)
2506 		import_rule_v1(chain, ci);
2507 	/*
2508 	 * Try to add new rules to the chain.
2509 	 */
2510 	if ((ret = ipfw_commit_rules(chain, nci, rtlv->count)) != 0) {
2511 		for (i = 0, ci = nci; i < rtlv->count; i++, ci++)
2512 			ipfw_free_rule(ci->krule);
2513 	}
2514 	/* Cleanup after parse_rules() */
2515 	free(nci, M_TEMP);
2516 	return (ret);
2517 }
2518 
2519 /*
2520  * Lists all sopts currently registered.
2521  * Data layout (v1)(current):
2522  * Request: [ ipfw_obj_lheader ], size = ipfw_obj_lheader.size
2523  * Reply: [ ipfw_obj_lheader ipfw_sopt_info x N ]
2524  *
2525  * Returns 0 on success
2526  */
2527 static int
dump_soptcodes(struct ip_fw_chain * chain,ip_fw3_opheader * op3,struct sockopt_data * sd)2528 dump_soptcodes(struct ip_fw_chain *chain, ip_fw3_opheader *op3,
2529     struct sockopt_data *sd)
2530 {
2531 	struct _ipfw_obj_lheader *olh;
2532 	ipfw_sopt_info *i;
2533 	struct ipfw_sopt_handler *sh;
2534 	uint32_t count, n, size;
2535 
2536 	olh = (struct _ipfw_obj_lheader *)ipfw_get_sopt_header(sd,
2537 	    sizeof(*olh));
2538 	if (olh == NULL)
2539 		return (EINVAL);
2540 	if (sd->valsize < olh->size)
2541 		return (EINVAL);
2542 
2543 	CTL3_LOCK();
2544 	count = ctl3_hsize;
2545 	size = count * sizeof(ipfw_sopt_info) + sizeof(ipfw_obj_lheader);
2546 
2547 	/* Fill in header regadless of buffer size */
2548 	olh->count = count;
2549 	olh->objsize = sizeof(ipfw_sopt_info);
2550 
2551 	if (size > olh->size) {
2552 		olh->size = size;
2553 		CTL3_UNLOCK();
2554 		return (ENOMEM);
2555 	}
2556 	olh->size = size;
2557 
2558 	for (n = 0; n < count; n++) {
2559 		i = (ipfw_sopt_info *)ipfw_get_sopt_space(sd, sizeof(*i));
2560 		KASSERT(i != NULL, ("previously checked buffer is not enough"));
2561 		sh = &ctl3_handlers[n];
2562 		i->opcode = sh->opcode;
2563 		i->version = sh->version;
2564 		i->refcnt = sh->refcnt;
2565 	}
2566 	CTL3_UNLOCK();
2567 
2568 	return (0);
2569 }
2570 
2571 /*
2572  * Compares two opcodes.
2573  * Used both in qsort() and bsearch().
2574  *
2575  * Returns 0 if match is found.
2576  */
2577 static int
compare_opcodes(const void * _a,const void * _b)2578 compare_opcodes(const void *_a, const void *_b)
2579 {
2580 	const struct opcode_obj_rewrite *a, *b;
2581 
2582 	a = (const struct opcode_obj_rewrite *)_a;
2583 	b = (const struct opcode_obj_rewrite *)_b;
2584 
2585 	if (a->opcode < b->opcode)
2586 		return (-1);
2587 	else if (a->opcode > b->opcode)
2588 		return (1);
2589 
2590 	return (0);
2591 }
2592 
2593 /*
2594  * XXX: Rewrite bsearch()
2595  */
2596 static int
find_op_rw_range(uint16_t op,struct opcode_obj_rewrite ** plo,struct opcode_obj_rewrite ** phi)2597 find_op_rw_range(uint16_t op, struct opcode_obj_rewrite **plo,
2598     struct opcode_obj_rewrite **phi)
2599 {
2600 	struct opcode_obj_rewrite *ctl3_max, *lo, *hi, h, *rw;
2601 
2602 	memset(&h, 0, sizeof(h));
2603 	h.opcode = op;
2604 
2605 	rw = (struct opcode_obj_rewrite *)bsearch(&h, ctl3_rewriters,
2606 	    ctl3_rsize, sizeof(h), compare_opcodes);
2607 	if (rw == NULL)
2608 		return (1);
2609 
2610 	/* Find the first element matching the same opcode */
2611 	lo = rw;
2612 	for ( ; lo > ctl3_rewriters && (lo - 1)->opcode == op; lo--)
2613 		;
2614 
2615 	/* Find the last element matching the same opcode */
2616 	hi = rw;
2617 	ctl3_max = ctl3_rewriters + ctl3_rsize;
2618 	for ( ; (hi + 1) < ctl3_max && (hi + 1)->opcode == op; hi++)
2619 		;
2620 
2621 	*plo = lo;
2622 	*phi = hi;
2623 
2624 	return (0);
2625 }
2626 
2627 /*
2628  * Finds opcode object rewriter based on @code.
2629  *
2630  * Returns pointer to handler or NULL.
2631  */
2632 static struct opcode_obj_rewrite *
find_op_rw(ipfw_insn * cmd,uint32_t * puidx,uint8_t * ptype)2633 find_op_rw(ipfw_insn *cmd, uint32_t *puidx, uint8_t *ptype)
2634 {
2635 	struct opcode_obj_rewrite *rw, *lo, *hi;
2636 	uint32_t uidx;
2637 	uint8_t subtype;
2638 
2639 	if (find_op_rw_range(cmd->opcode, &lo, &hi) != 0)
2640 		return (NULL);
2641 
2642 	for (rw = lo; rw <= hi; rw++) {
2643 		if (rw->classifier(cmd, &uidx, &subtype) == 0) {
2644 			if (puidx != NULL)
2645 				*puidx = uidx;
2646 			if (ptype != NULL)
2647 				*ptype = subtype;
2648 			return (rw);
2649 		}
2650 	}
2651 
2652 	return (NULL);
2653 }
2654 int
classify_opcode_kidx(ipfw_insn * cmd,uint32_t * puidx)2655 classify_opcode_kidx(ipfw_insn *cmd, uint32_t *puidx)
2656 {
2657 
2658 	if (find_op_rw(cmd, puidx, NULL) == NULL)
2659 		return (1);
2660 	return (0);
2661 }
2662 
2663 void
update_opcode_kidx(ipfw_insn * cmd,uint32_t idx)2664 update_opcode_kidx(ipfw_insn *cmd, uint32_t idx)
2665 {
2666 	struct opcode_obj_rewrite *rw;
2667 
2668 	rw = find_op_rw(cmd, NULL, NULL);
2669 	KASSERT(rw != NULL, ("No handler to update opcode %d", cmd->opcode));
2670 	rw->update(cmd, idx);
2671 }
2672 
2673 void
ipfw_init_obj_rewriter(void)2674 ipfw_init_obj_rewriter(void)
2675 {
2676 	ctl3_rewriters = NULL;
2677 	ctl3_rsize = 0;
2678 }
2679 
2680 void
ipfw_destroy_obj_rewriter(void)2681 ipfw_destroy_obj_rewriter(void)
2682 {
2683 	if (ctl3_rewriters != NULL)
2684 		free(ctl3_rewriters, M_IPFW);
2685 	ctl3_rewriters = NULL;
2686 	ctl3_rsize = 0;
2687 }
2688 
2689 /*
2690  * Adds one or more opcode object rewrite handlers to the global array.
2691  * Function may sleep.
2692  */
2693 void
ipfw_add_obj_rewriter(struct opcode_obj_rewrite * rw,size_t count)2694 ipfw_add_obj_rewriter(struct opcode_obj_rewrite *rw, size_t count)
2695 {
2696 	size_t sz;
2697 	struct opcode_obj_rewrite *tmp;
2698 
2699 	CTL3_LOCK();
2700 
2701 	for (;;) {
2702 		sz = ctl3_rsize + count;
2703 		CTL3_UNLOCK();
2704 		tmp = malloc(sizeof(*rw) * sz, M_IPFW, M_WAITOK | M_ZERO);
2705 		CTL3_LOCK();
2706 		if (ctl3_rsize + count <= sz)
2707 			break;
2708 
2709 		/* Retry */
2710 		free(tmp, M_IPFW);
2711 	}
2712 
2713 	/* Merge old & new arrays */
2714 	sz = ctl3_rsize + count;
2715 	memcpy(tmp, ctl3_rewriters, ctl3_rsize * sizeof(*rw));
2716 	memcpy(&tmp[ctl3_rsize], rw, count * sizeof(*rw));
2717 	qsort(tmp, sz, sizeof(*rw), compare_opcodes);
2718 	/* Switch new and free old */
2719 	if (ctl3_rewriters != NULL)
2720 		free(ctl3_rewriters, M_IPFW);
2721 	ctl3_rewriters = tmp;
2722 	ctl3_rsize = sz;
2723 
2724 	CTL3_UNLOCK();
2725 }
2726 
2727 /*
2728  * Removes one or more object rewrite handlers from the global array.
2729  */
2730 int
ipfw_del_obj_rewriter(struct opcode_obj_rewrite * rw,size_t count)2731 ipfw_del_obj_rewriter(struct opcode_obj_rewrite *rw, size_t count)
2732 {
2733 	size_t sz;
2734 	struct opcode_obj_rewrite *ctl3_max, *ktmp, *lo, *hi;
2735 	int i;
2736 
2737 	CTL3_LOCK();
2738 
2739 	for (i = 0; i < count; i++) {
2740 		if (find_op_rw_range(rw[i].opcode, &lo, &hi) != 0)
2741 			continue;
2742 
2743 		for (ktmp = lo; ktmp <= hi; ktmp++) {
2744 			if (ktmp->classifier != rw[i].classifier)
2745 				continue;
2746 
2747 			ctl3_max = ctl3_rewriters + ctl3_rsize;
2748 			sz = (ctl3_max - (ktmp + 1)) * sizeof(*ktmp);
2749 			memmove(ktmp, ktmp + 1, sz);
2750 			ctl3_rsize--;
2751 			break;
2752 		}
2753 	}
2754 
2755 	if (ctl3_rsize == 0) {
2756 		if (ctl3_rewriters != NULL)
2757 			free(ctl3_rewriters, M_IPFW);
2758 		ctl3_rewriters = NULL;
2759 	}
2760 
2761 	CTL3_UNLOCK();
2762 
2763 	return (0);
2764 }
2765 
2766 static int
export_objhash_ntlv_internal(struct namedobj_instance * ni,struct named_object * no,void * arg)2767 export_objhash_ntlv_internal(struct namedobj_instance *ni,
2768     struct named_object *no, void *arg)
2769 {
2770 	struct sockopt_data *sd;
2771 	ipfw_obj_ntlv *ntlv;
2772 
2773 	sd = (struct sockopt_data *)arg;
2774 	ntlv = (ipfw_obj_ntlv *)ipfw_get_sopt_space(sd, sizeof(*ntlv));
2775 	if (ntlv == NULL)
2776 		return (ENOMEM);
2777 	ipfw_export_obj_ntlv(no, ntlv);
2778 	return (0);
2779 }
2780 
2781 /*
2782  * Lists all service objects.
2783  * Data layout (v0)(current):
2784  * Request: [ ipfw_obj_lheader ] size = ipfw_obj_lheader.size
2785  * Reply: [ ipfw_obj_lheader [ ipfw_obj_ntlv x N ] (optional) ]
2786  * Returns 0 on success
2787  */
2788 static int
dump_srvobjects(struct ip_fw_chain * chain,ip_fw3_opheader * op3,struct sockopt_data * sd)2789 dump_srvobjects(struct ip_fw_chain *chain, ip_fw3_opheader *op3,
2790     struct sockopt_data *sd)
2791 {
2792 	ipfw_obj_lheader *hdr;
2793 	int count;
2794 
2795 	hdr = (ipfw_obj_lheader *)ipfw_get_sopt_header(sd, sizeof(*hdr));
2796 	if (hdr == NULL)
2797 		return (EINVAL);
2798 
2799 	IPFW_UH_RLOCK(chain);
2800 	count = ipfw_objhash_count(CHAIN_TO_SRV(chain));
2801 	hdr->size = sizeof(ipfw_obj_lheader) + count * sizeof(ipfw_obj_ntlv);
2802 	if (sd->valsize < hdr->size) {
2803 		IPFW_UH_RUNLOCK(chain);
2804 		return (ENOMEM);
2805 	}
2806 	hdr->count = count;
2807 	hdr->objsize = sizeof(ipfw_obj_ntlv);
2808 	if (count > 0)
2809 		ipfw_objhash_foreach(CHAIN_TO_SRV(chain),
2810 		    export_objhash_ntlv_internal, sd);
2811 	IPFW_UH_RUNLOCK(chain);
2812 	return (0);
2813 }
2814 
2815 void
ipfw_enable_skipto_cache(struct ip_fw_chain * chain)2816 ipfw_enable_skipto_cache(struct ip_fw_chain *chain)
2817 {
2818 
2819 	IPFW_UH_WLOCK_ASSERT(chain);
2820 	update_skipto_cache(chain, chain->map);
2821 
2822 	IPFW_WLOCK(chain);
2823 	swap_skipto_cache(chain);
2824 	V_skipto_cache = 1;
2825 	IPFW_WUNLOCK(chain);
2826 }
2827 
2828 /*
2829  * Enables or disable skipto cache.
2830  * Request: [ ipfw_cmd_header ] size = ipfw_cmd_header.size
2831  * Reply: [ ipfw_cmd_header ]
2832  * Returns 0 on success
2833  */
2834 static int
manage_skiptocache(struct ip_fw_chain * chain,ip_fw3_opheader * op3,struct sockopt_data * sd)2835 manage_skiptocache(struct ip_fw_chain *chain, ip_fw3_opheader *op3,
2836     struct sockopt_data *sd)
2837 {
2838 	ipfw_cmd_header *hdr;
2839 
2840 	if (sd->valsize != sizeof(*hdr))
2841 		return (EINVAL);
2842 
2843 	hdr = (ipfw_cmd_header *)ipfw_get_sopt_space(sd, sd->valsize);
2844 	if (hdr->cmd != SKIPTO_CACHE_DISABLE &&
2845 	    hdr->cmd != SKIPTO_CACHE_ENABLE)
2846 		return (EOPNOTSUPP);
2847 
2848 	IPFW_UH_WLOCK(chain);
2849 	if (hdr->cmd != V_skipto_cache) {
2850 		if (hdr->cmd == SKIPTO_CACHE_ENABLE)
2851 			ipfw_enable_skipto_cache(chain);
2852 		V_skipto_cache = hdr->cmd;
2853 	}
2854 	IPFW_UH_WUNLOCK(chain);
2855 	return (0);
2856 }
2857 
2858 /*
2859  * Compares two sopt handlers (code, version and handler ptr).
2860  * Used both as qsort() and bsearch().
2861  * Does not compare handler for latter case.
2862  *
2863  * Returns 0 if match is found.
2864  */
2865 static int
compare_sh(const void * _a,const void * _b)2866 compare_sh(const void *_a, const void *_b)
2867 {
2868 	const struct ipfw_sopt_handler *a, *b;
2869 
2870 	a = (const struct ipfw_sopt_handler *)_a;
2871 	b = (const struct ipfw_sopt_handler *)_b;
2872 
2873 	if (a->opcode < b->opcode)
2874 		return (-1);
2875 	else if (a->opcode > b->opcode)
2876 		return (1);
2877 
2878 	if (a->version < b->version)
2879 		return (-1);
2880 	else if (a->version > b->version)
2881 		return (1);
2882 
2883 	/* bsearch helper */
2884 	if (a->handler == NULL)
2885 		return (0);
2886 
2887 	if ((uintptr_t)a->handler < (uintptr_t)b->handler)
2888 		return (-1);
2889 	else if ((uintptr_t)a->handler > (uintptr_t)b->handler)
2890 		return (1);
2891 
2892 	return (0);
2893 }
2894 
2895 /*
2896  * Finds sopt handler based on @code and @version.
2897  *
2898  * Returns pointer to handler or NULL.
2899  */
2900 static struct ipfw_sopt_handler *
find_sh(uint16_t code,uint8_t version,sopt_handler_f * handler)2901 find_sh(uint16_t code, uint8_t version, sopt_handler_f *handler)
2902 {
2903 	struct ipfw_sopt_handler *sh, h;
2904 
2905 	memset(&h, 0, sizeof(h));
2906 	h.opcode = code;
2907 	h.version = version;
2908 	h.handler = handler;
2909 
2910 	sh = (struct ipfw_sopt_handler *)bsearch(&h, ctl3_handlers,
2911 	    ctl3_hsize, sizeof(h), compare_sh);
2912 
2913 	return (sh);
2914 }
2915 
2916 static int
find_ref_sh(uint16_t opcode,uint8_t version,struct ipfw_sopt_handler * psh)2917 find_ref_sh(uint16_t opcode, uint8_t version, struct ipfw_sopt_handler *psh)
2918 {
2919 	struct ipfw_sopt_handler *sh;
2920 
2921 	CTL3_LOCK();
2922 	if ((sh = find_sh(opcode, version, NULL)) == NULL) {
2923 		CTL3_UNLOCK();
2924 		printf("ipfw: ipfw_ctl3 invalid option %d""v""%d\n",
2925 		    opcode, version);
2926 		return (EINVAL);
2927 	}
2928 	sh->refcnt++;
2929 	ctl3_refct++;
2930 	/* Copy handler data to requested buffer */
2931 	*psh = *sh;
2932 	CTL3_UNLOCK();
2933 
2934 	return (0);
2935 }
2936 
2937 static void
find_unref_sh(struct ipfw_sopt_handler * psh)2938 find_unref_sh(struct ipfw_sopt_handler *psh)
2939 {
2940 	struct ipfw_sopt_handler *sh;
2941 
2942 	CTL3_LOCK();
2943 	sh = find_sh(psh->opcode, psh->version, NULL);
2944 	KASSERT(sh != NULL, ("ctl3 handler disappeared"));
2945 	sh->refcnt--;
2946 	ctl3_refct--;
2947 	CTL3_UNLOCK();
2948 }
2949 
2950 void
ipfw_init_sopt_handler(void)2951 ipfw_init_sopt_handler(void)
2952 {
2953 	CTL3_LOCK_INIT();
2954 	IPFW_ADD_SOPT_HANDLER(1, scodes);
2955 }
2956 
2957 void
ipfw_destroy_sopt_handler(void)2958 ipfw_destroy_sopt_handler(void)
2959 {
2960 	IPFW_DEL_SOPT_HANDLER(1, scodes);
2961 	CTL3_LOCK_DESTROY();
2962 }
2963 
2964 void
ipfw_register_compat(ipfw_check_opcode_t f)2965 ipfw_register_compat(ipfw_check_opcode_t f)
2966 {
2967 	check_opcode_f = f;
2968 }
2969 
2970 void
ipfw_unregister_compat(void)2971 ipfw_unregister_compat(void)
2972 {
2973 	check_opcode_f = check_opcode_compat_nop;
2974 }
2975 
2976 /*
2977  * Adds one or more sockopt handlers to the global array.
2978  * Function may sleep.
2979  */
2980 void
ipfw_add_sopt_handler(struct ipfw_sopt_handler * sh,size_t count)2981 ipfw_add_sopt_handler(struct ipfw_sopt_handler *sh, size_t count)
2982 {
2983 	size_t sz;
2984 	struct ipfw_sopt_handler *tmp;
2985 
2986 	CTL3_LOCK();
2987 
2988 	for (;;) {
2989 		sz = ctl3_hsize + count;
2990 		CTL3_UNLOCK();
2991 		tmp = malloc(sizeof(*sh) * sz, M_IPFW, M_WAITOK | M_ZERO);
2992 		CTL3_LOCK();
2993 		if (ctl3_hsize + count <= sz)
2994 			break;
2995 
2996 		/* Retry */
2997 		free(tmp, M_IPFW);
2998 	}
2999 
3000 	/* Merge old & new arrays */
3001 	sz = ctl3_hsize + count;
3002 	memcpy(tmp, ctl3_handlers, ctl3_hsize * sizeof(*sh));
3003 	memcpy(&tmp[ctl3_hsize], sh, count * sizeof(*sh));
3004 	qsort(tmp, sz, sizeof(*sh), compare_sh);
3005 	/* Switch new and free old */
3006 	if (ctl3_handlers != NULL)
3007 		free(ctl3_handlers, M_IPFW);
3008 	ctl3_handlers = tmp;
3009 	ctl3_hsize = sz;
3010 	ctl3_gencnt++;
3011 
3012 	CTL3_UNLOCK();
3013 }
3014 
3015 /*
3016  * Removes one or more sockopt handlers from the global array.
3017  */
3018 int
ipfw_del_sopt_handler(struct ipfw_sopt_handler * sh,size_t count)3019 ipfw_del_sopt_handler(struct ipfw_sopt_handler *sh, size_t count)
3020 {
3021 	size_t sz;
3022 	struct ipfw_sopt_handler *tmp, *h;
3023 	int i;
3024 
3025 	CTL3_LOCK();
3026 
3027 	for (i = 0; i < count; i++) {
3028 		tmp = &sh[i];
3029 		h = find_sh(tmp->opcode, tmp->version, tmp->handler);
3030 		if (h == NULL)
3031 			continue;
3032 
3033 		sz = (ctl3_handlers + ctl3_hsize - (h + 1)) * sizeof(*h);
3034 		memmove(h, h + 1, sz);
3035 		ctl3_hsize--;
3036 	}
3037 
3038 	if (ctl3_hsize == 0) {
3039 		if (ctl3_handlers != NULL)
3040 			free(ctl3_handlers, M_IPFW);
3041 		ctl3_handlers = NULL;
3042 	}
3043 
3044 	ctl3_gencnt++;
3045 
3046 	CTL3_UNLOCK();
3047 
3048 	return (0);
3049 }
3050 
3051 /*
3052  * Writes data accumulated in @sd to sockopt buffer.
3053  * Zeroes internal @sd buffer.
3054  */
3055 static int
ipfw_flush_sopt_data(struct sockopt_data * sd)3056 ipfw_flush_sopt_data(struct sockopt_data *sd)
3057 {
3058 	struct sockopt *sopt;
3059 	int error;
3060 	size_t sz;
3061 
3062 	sz = sd->koff;
3063 	if (sz == 0)
3064 		return (0);
3065 
3066 	sopt = sd->sopt;
3067 
3068 	if (sopt->sopt_dir == SOPT_GET) {
3069 		error = copyout(sd->kbuf, sopt->sopt_val, sz);
3070 		if (error != 0)
3071 			return (error);
3072 	}
3073 
3074 	memset(sd->kbuf, 0, sd->ksize);
3075 	sd->ktotal += sz;
3076 	sd->koff = 0;
3077 	if (sd->ktotal + sd->ksize < sd->valsize)
3078 		sd->kavail = sd->ksize;
3079 	else
3080 		sd->kavail = sd->valsize - sd->ktotal;
3081 
3082 	/* Update sopt buffer data */
3083 	sopt->sopt_valsize = sd->ktotal;
3084 	sopt->sopt_val = sd->sopt_val + sd->ktotal;
3085 
3086 	return (0);
3087 }
3088 
3089 /*
3090  * Ensures that @sd buffer has contiguous @neeeded number of
3091  * bytes.
3092  *
3093  * Returns pointer to requested space or NULL.
3094  */
3095 caddr_t
ipfw_get_sopt_space(struct sockopt_data * sd,size_t needed)3096 ipfw_get_sopt_space(struct sockopt_data *sd, size_t needed)
3097 {
3098 	int error;
3099 	caddr_t addr;
3100 
3101 	if (sd->kavail < needed) {
3102 		/*
3103 		 * Flush data and try another time.
3104 		 */
3105 		error = ipfw_flush_sopt_data(sd);
3106 
3107 		if (sd->kavail < needed || error != 0)
3108 			return (NULL);
3109 	}
3110 
3111 	addr = sd->kbuf + sd->koff;
3112 	sd->koff += needed;
3113 	sd->kavail -= needed;
3114 	return (addr);
3115 }
3116 
3117 /*
3118  * Requests @needed contiguous bytes from @sd buffer.
3119  * Function is used to notify subsystem that we are
3120  * interesed in first @needed bytes (request header)
3121  * and the rest buffer can be safely zeroed.
3122  *
3123  * Returns pointer to requested space or NULL.
3124  */
3125 caddr_t
ipfw_get_sopt_header(struct sockopt_data * sd,size_t needed)3126 ipfw_get_sopt_header(struct sockopt_data *sd, size_t needed)
3127 {
3128 	caddr_t addr;
3129 
3130 	if ((addr = ipfw_get_sopt_space(sd, needed)) == NULL)
3131 		return (NULL);
3132 
3133 	if (sd->kavail > 0)
3134 		memset(sd->kbuf + sd->koff, 0, sd->kavail);
3135 
3136 	return (addr);
3137 }
3138 
3139 /*
3140  * New sockopt handler.
3141  */
3142 int
ipfw_ctl3(struct sockopt * sopt)3143 ipfw_ctl3(struct sockopt *sopt)
3144 {
3145 	int error, locked;
3146 	size_t size, valsize;
3147 	struct ip_fw_chain *chain;
3148 	char xbuf[256];
3149 	struct sockopt_data sdata;
3150 	struct ipfw_sopt_handler h;
3151 	ip_fw3_opheader *op3 = NULL;
3152 
3153 	error = priv_check(sopt->sopt_td, PRIV_NETINET_IPFW);
3154 	if (error != 0)
3155 		return (error);
3156 
3157 	if (sopt->sopt_name != IP_FW3)
3158 		return (EOPNOTSUPP);
3159 
3160 	chain = &V_layer3_chain;
3161 	error = 0;
3162 
3163 	/* Save original valsize before it is altered via sooptcopyin() */
3164 	valsize = sopt->sopt_valsize;
3165 	memset(&sdata, 0, sizeof(sdata));
3166 	/* Read op3 header first to determine actual operation */
3167 	op3 = (ip_fw3_opheader *)xbuf;
3168 	error = sooptcopyin(sopt, op3, sizeof(*op3), sizeof(*op3));
3169 	if (error != 0)
3170 		return (error);
3171 	sopt->sopt_valsize = valsize;
3172 
3173 	/*
3174 	 * Find and reference command.
3175 	 */
3176 	error = find_ref_sh(op3->opcode, op3->version, &h);
3177 	if (error != 0)
3178 		return (error);
3179 
3180 	/*
3181 	 * Disallow modifications in really-really secure mode, but still allow
3182 	 * the logging counters to be reset.
3183 	 */
3184 	if ((h.dir & HDIR_SET) != 0 && h.opcode != IP_FW_XRESETLOG) {
3185 		error = securelevel_ge(sopt->sopt_td->td_ucred, 3);
3186 		if (error != 0) {
3187 			find_unref_sh(&h);
3188 			return (error);
3189 		}
3190 	}
3191 
3192 	/*
3193 	 * Fill in sockopt_data structure that may be useful for
3194 	 * IP_FW3 get requests.
3195 	 */
3196 	locked = 0;
3197 	if (valsize <= sizeof(xbuf)) {
3198 		/* use on-stack buffer */
3199 		sdata.kbuf = xbuf;
3200 		sdata.ksize = sizeof(xbuf);
3201 		sdata.kavail = valsize;
3202 	} else {
3203 		/*
3204 		 * Determine opcode type/buffer size:
3205 		 * allocate sliding-window buf for data export or
3206 		 * contiguous buffer for special ops.
3207 		 */
3208 		if ((h.dir & HDIR_SET) != 0) {
3209 			/* Set request. Allocate contigous buffer. */
3210 			if (valsize > CTL3_LARGEBUF) {
3211 				find_unref_sh(&h);
3212 				return (EFBIG);
3213 			}
3214 
3215 			size = valsize;
3216 		} else {
3217 			/* Get request. Allocate sliding window buffer */
3218 			size = (valsize<CTL3_SMALLBUF) ? valsize:CTL3_SMALLBUF;
3219 
3220 			if (size < valsize) {
3221 				/* We have to wire user buffer */
3222 				error = vslock(sopt->sopt_val, valsize);
3223 				if (error != 0)
3224 					return (error);
3225 				locked = 1;
3226 			}
3227 		}
3228 
3229 		sdata.kbuf = malloc(size, M_TEMP, M_WAITOK | M_ZERO);
3230 		sdata.ksize = size;
3231 		sdata.kavail = size;
3232 	}
3233 
3234 	sdata.sopt = sopt;
3235 	sdata.sopt_val = sopt->sopt_val;
3236 	sdata.valsize = valsize;
3237 
3238 	/*
3239 	 * Copy either all request (if valsize < bsize_max)
3240 	 * or first bsize_max bytes to guarantee most consumers
3241 	 * that all necessary data has been copied).
3242 	 * Anyway, copy not less than sizeof(ip_fw3_opheader).
3243 	 */
3244 	if ((error = sooptcopyin(sopt, sdata.kbuf, sdata.ksize,
3245 	    sizeof(ip_fw3_opheader))) != 0)
3246 		return (error);
3247 	op3 = (ip_fw3_opheader *)sdata.kbuf;
3248 
3249 	/* Finally, run handler */
3250 	error = h.handler(chain, op3, &sdata);
3251 	find_unref_sh(&h);
3252 
3253 	/* Flush state and free buffers */
3254 	if (error == 0)
3255 		error = ipfw_flush_sopt_data(&sdata);
3256 	else
3257 		ipfw_flush_sopt_data(&sdata);
3258 
3259 	if (locked != 0)
3260 		vsunlock(sdata.sopt_val, valsize);
3261 
3262 	/* Restore original pointer and set number of bytes written */
3263 	sopt->sopt_val = sdata.sopt_val;
3264 	sopt->sopt_valsize = sdata.ktotal;
3265 	if (sdata.kbuf != xbuf)
3266 		free(sdata.kbuf, M_TEMP);
3267 
3268 	return (error);
3269 }
3270 
3271 /*
3272  * Named object api
3273  *
3274  */
3275 
3276 void
ipfw_init_srv(struct ip_fw_chain * ch)3277 ipfw_init_srv(struct ip_fw_chain *ch)
3278 {
3279 	ch->srvmap = ipfw_objhash_create(IPFW_OBJECTS_DEFAULT,
3280 	    DEFAULT_OBJHASH_SIZE);
3281 	ch->srvstate = malloc(sizeof(void *) * IPFW_OBJECTS_DEFAULT,
3282 	    M_IPFW, M_WAITOK | M_ZERO);
3283 }
3284 
3285 void
ipfw_destroy_srv(struct ip_fw_chain * ch)3286 ipfw_destroy_srv(struct ip_fw_chain *ch)
3287 {
3288 	free(ch->srvstate, M_IPFW);
3289 	ipfw_objhash_destroy(ch->srvmap);
3290 }
3291 
3292 /*
3293  * Allocate new bitmask which can be used to enlarge/shrink
3294  * named instance index.
3295  */
3296 void
ipfw_objhash_bitmap_alloc(uint32_t items,void ** idx,int * pblocks)3297 ipfw_objhash_bitmap_alloc(uint32_t items, void **idx, int *pblocks)
3298 {
3299 	size_t size;
3300 	int max_blocks;
3301 	u_long *idx_mask;
3302 
3303 	KASSERT((items % BLOCK_ITEMS) == 0,
3304 	   ("bitmask size needs to power of 2 and greater or equal to %zu",
3305 	    BLOCK_ITEMS));
3306 
3307 	max_blocks = items / BLOCK_ITEMS;
3308 	size = items / 8;
3309 	idx_mask = malloc(size * IPFW_MAX_SETS, M_IPFW, M_WAITOK);
3310 	/* Mark all as free */
3311 	memset(idx_mask, 0xFF, size * IPFW_MAX_SETS);
3312 	*idx_mask &= ~(u_long)1; /* Skip index 0 */
3313 
3314 	*idx = idx_mask;
3315 	*pblocks = max_blocks;
3316 }
3317 
3318 /*
3319  * Copy current bitmask index to new one.
3320  */
3321 void
ipfw_objhash_bitmap_merge(struct namedobj_instance * ni,void ** idx,int * blocks)3322 ipfw_objhash_bitmap_merge(struct namedobj_instance *ni, void **idx, int *blocks)
3323 {
3324 	int old_blocks, new_blocks;
3325 	u_long *old_idx, *new_idx;
3326 	int i;
3327 
3328 	old_idx = ni->idx_mask;
3329 	old_blocks = ni->max_blocks;
3330 	new_idx = *idx;
3331 	new_blocks = *blocks;
3332 
3333 	for (i = 0; i < IPFW_MAX_SETS; i++) {
3334 		memcpy(&new_idx[new_blocks * i], &old_idx[old_blocks * i],
3335 		    old_blocks * sizeof(u_long));
3336 	}
3337 }
3338 
3339 /*
3340  * Swaps current @ni index with new one.
3341  */
3342 void
ipfw_objhash_bitmap_swap(struct namedobj_instance * ni,void ** idx,int * blocks)3343 ipfw_objhash_bitmap_swap(struct namedobj_instance *ni, void **idx, int *blocks)
3344 {
3345 	int old_blocks;
3346 	u_long *old_idx;
3347 
3348 	old_idx = ni->idx_mask;
3349 	old_blocks = ni->max_blocks;
3350 
3351 	ni->idx_mask = *idx;
3352 	ni->max_blocks = *blocks;
3353 
3354 	/* Save old values */
3355 	*idx = old_idx;
3356 	*blocks = old_blocks;
3357 }
3358 
3359 void
ipfw_objhash_bitmap_free(void * idx,int blocks)3360 ipfw_objhash_bitmap_free(void *idx, int blocks)
3361 {
3362 	free(idx, M_IPFW);
3363 }
3364 
3365 /*
3366  * Creates named hash instance.
3367  * Must be called without holding any locks.
3368  * Return pointer to new instance.
3369  */
3370 struct namedobj_instance *
ipfw_objhash_create(uint32_t items,size_t hash_size)3371 ipfw_objhash_create(uint32_t items, size_t hash_size)
3372 {
3373 	struct namedobj_instance *ni;
3374 	int i;
3375 	size_t size;
3376 
3377 	size = sizeof(struct namedobj_instance) +
3378 	    sizeof(struct namedobjects_head) * hash_size +
3379 	    sizeof(struct namedobjects_head) * hash_size;
3380 
3381 	ni = malloc(size, M_IPFW, M_WAITOK | M_ZERO);
3382 	ni->nn_size = hash_size;
3383 	ni->nv_size = hash_size;
3384 
3385 	ni->names = (struct namedobjects_head *)(ni +1);
3386 	ni->values = &ni->names[ni->nn_size];
3387 
3388 	for (i = 0; i < ni->nn_size; i++)
3389 		TAILQ_INIT(&ni->names[i]);
3390 
3391 	for (i = 0; i < ni->nv_size; i++)
3392 		TAILQ_INIT(&ni->values[i]);
3393 
3394 	/* Set default hashing/comparison functions */
3395 	ni->hash_f = objhash_hash_name;
3396 	ni->cmp_f = objhash_cmp_name;
3397 
3398 	/* Allocate bitmask separately due to possible resize */
3399 	ipfw_objhash_bitmap_alloc(items, (void*)&ni->idx_mask, &ni->max_blocks);
3400 
3401 	return (ni);
3402 }
3403 
3404 void
ipfw_objhash_destroy(struct namedobj_instance * ni)3405 ipfw_objhash_destroy(struct namedobj_instance *ni)
3406 {
3407 	free(ni->idx_mask, M_IPFW);
3408 	free(ni, M_IPFW);
3409 }
3410 
3411 void
ipfw_objhash_set_funcs(struct namedobj_instance * ni,objhash_hash_f * hash_f,objhash_cmp_f * cmp_f)3412 ipfw_objhash_set_funcs(struct namedobj_instance *ni, objhash_hash_f *hash_f,
3413     objhash_cmp_f *cmp_f)
3414 {
3415 
3416 	ni->hash_f = hash_f;
3417 	ni->cmp_f = cmp_f;
3418 }
3419 
3420 static uint32_t
objhash_hash_name(struct namedobj_instance * ni,const void * name,uint32_t set)3421 objhash_hash_name(struct namedobj_instance *ni, const void *name, uint32_t set)
3422 {
3423 
3424 	return (fnv_32_str((const char *)name, FNV1_32_INIT));
3425 }
3426 
3427 static int
objhash_cmp_name(struct named_object * no,const void * name,uint32_t set)3428 objhash_cmp_name(struct named_object *no, const void *name, uint32_t set)
3429 {
3430 
3431 	if ((strcmp(no->name, (const char *)name) == 0) && (no->set == set))
3432 		return (0);
3433 
3434 	return (1);
3435 }
3436 
3437 static uint32_t
objhash_hash_idx(struct namedobj_instance * ni,uint32_t val)3438 objhash_hash_idx(struct namedobj_instance *ni, uint32_t val)
3439 {
3440 	uint32_t v;
3441 
3442 	v = val % (ni->nv_size - 1);
3443 
3444 	return (v);
3445 }
3446 
3447 struct named_object *
ipfw_objhash_lookup_name(struct namedobj_instance * ni,uint32_t set,const char * name)3448 ipfw_objhash_lookup_name(struct namedobj_instance *ni, uint32_t set,
3449     const char *name)
3450 {
3451 	struct named_object *no;
3452 	uint32_t hash;
3453 
3454 	hash = ni->hash_f(ni, name, set) % ni->nn_size;
3455 
3456 	TAILQ_FOREACH(no, &ni->names[hash], nn_next) {
3457 		if (ni->cmp_f(no, name, set) == 0)
3458 			return (no);
3459 	}
3460 
3461 	return (NULL);
3462 }
3463 
3464 /*
3465  * Find named object by @uid.
3466  * Check @tlvs for valid data inside.
3467  *
3468  * Returns pointer to found TLV or NULL.
3469  */
3470 ipfw_obj_ntlv *
ipfw_find_name_tlv_type(void * tlvs,int len,uint32_t uidx,uint32_t etlv)3471 ipfw_find_name_tlv_type(void *tlvs, int len, uint32_t uidx, uint32_t etlv)
3472 {
3473 	ipfw_obj_ntlv *ntlv;
3474 	uintptr_t pa, pe;
3475 	int l;
3476 
3477 	pa = (uintptr_t)tlvs;
3478 	pe = pa + len;
3479 	l = 0;
3480 	for (; pa < pe; pa += l) {
3481 		ntlv = (ipfw_obj_ntlv *)pa;
3482 		l = ntlv->head.length;
3483 
3484 		if (l != sizeof(*ntlv))
3485 			return (NULL);
3486 
3487 		if (ntlv->idx != uidx)
3488 			continue;
3489 		/*
3490 		 * When userland has specified zero TLV type, do
3491 		 * not compare it with eltv. In some cases userland
3492 		 * doesn't know what type should it have. Use only
3493 		 * uidx and name for search named_object.
3494 		 */
3495 		if (ntlv->head.type != 0 &&
3496 		    ntlv->head.type != (uint16_t)etlv)
3497 			continue;
3498 
3499 		if (ipfw_check_object_name_generic(ntlv->name) != 0)
3500 			return (NULL);
3501 
3502 		return (ntlv);
3503 	}
3504 
3505 	return (NULL);
3506 }
3507 
3508 /*
3509  * Finds object config based on either legacy index
3510  * or name in ntlv.
3511  * Note @ti structure contains unchecked data from userland.
3512  *
3513  * Returns 0 in success and fills in @pno with found config
3514  */
3515 int
ipfw_objhash_find_type(struct namedobj_instance * ni,struct tid_info * ti,uint32_t etlv,struct named_object ** pno)3516 ipfw_objhash_find_type(struct namedobj_instance *ni, struct tid_info *ti,
3517     uint32_t etlv, struct named_object **pno)
3518 {
3519 	char *name;
3520 	ipfw_obj_ntlv *ntlv;
3521 	uint32_t set;
3522 
3523 	if (ti->tlvs == NULL)
3524 		return (EINVAL);
3525 
3526 	ntlv = ipfw_find_name_tlv_type(ti->tlvs, ti->tlen, ti->uidx, etlv);
3527 	if (ntlv == NULL)
3528 		return (EINVAL);
3529 	name = ntlv->name;
3530 
3531 	/*
3532 	 * Use set provided by @ti instead of @ntlv one.
3533 	 * This is needed due to different sets behavior
3534 	 * controlled by V_fw_tables_sets.
3535 	 */
3536 	set = ti->set;
3537 	*pno = ipfw_objhash_lookup_name(ni, set, name);
3538 	if (*pno == NULL)
3539 		return (ESRCH);
3540 	return (0);
3541 }
3542 
3543 /*
3544  * Find named object by name, considering also its TLV type.
3545  */
3546 struct named_object *
ipfw_objhash_lookup_name_type(struct namedobj_instance * ni,uint32_t set,uint32_t type,const char * name)3547 ipfw_objhash_lookup_name_type(struct namedobj_instance *ni, uint32_t set,
3548     uint32_t type, const char *name)
3549 {
3550 	struct named_object *no;
3551 	uint32_t hash;
3552 
3553 	hash = ni->hash_f(ni, name, set) % ni->nn_size;
3554 
3555 	TAILQ_FOREACH(no, &ni->names[hash], nn_next) {
3556 		if (ni->cmp_f(no, name, set) == 0 &&
3557 		    no->etlv == (uint16_t)type)
3558 			return (no);
3559 	}
3560 
3561 	return (NULL);
3562 }
3563 
3564 struct named_object *
ipfw_objhash_lookup_kidx(struct namedobj_instance * ni,uint32_t kidx)3565 ipfw_objhash_lookup_kidx(struct namedobj_instance *ni, uint32_t kidx)
3566 {
3567 	struct named_object *no;
3568 	uint32_t hash;
3569 
3570 	hash = objhash_hash_idx(ni, kidx);
3571 
3572 	TAILQ_FOREACH(no, &ni->values[hash], nv_next) {
3573 		if (no->kidx == kidx)
3574 			return (no);
3575 	}
3576 
3577 	return (NULL);
3578 }
3579 
3580 int
ipfw_objhash_same_name(struct namedobj_instance * ni,struct named_object * a,struct named_object * b)3581 ipfw_objhash_same_name(struct namedobj_instance *ni, struct named_object *a,
3582     struct named_object *b)
3583 {
3584 
3585 	if ((strcmp(a->name, b->name) == 0) && a->set == b->set)
3586 		return (1);
3587 
3588 	return (0);
3589 }
3590 
3591 void
ipfw_objhash_add(struct namedobj_instance * ni,struct named_object * no)3592 ipfw_objhash_add(struct namedobj_instance *ni, struct named_object *no)
3593 {
3594 	uint32_t hash;
3595 
3596 	hash = ni->hash_f(ni, no->name, no->set) % ni->nn_size;
3597 	TAILQ_INSERT_HEAD(&ni->names[hash], no, nn_next);
3598 
3599 	hash = objhash_hash_idx(ni, no->kidx);
3600 	TAILQ_INSERT_HEAD(&ni->values[hash], no, nv_next);
3601 
3602 	ni->count++;
3603 }
3604 
3605 void
ipfw_objhash_del(struct namedobj_instance * ni,struct named_object * no)3606 ipfw_objhash_del(struct namedobj_instance *ni, struct named_object *no)
3607 {
3608 	uint32_t hash;
3609 
3610 	hash = ni->hash_f(ni, no->name, no->set) % ni->nn_size;
3611 	TAILQ_REMOVE(&ni->names[hash], no, nn_next);
3612 
3613 	hash = objhash_hash_idx(ni, no->kidx);
3614 	TAILQ_REMOVE(&ni->values[hash], no, nv_next);
3615 
3616 	ni->count--;
3617 }
3618 
3619 uint32_t
ipfw_objhash_count(struct namedobj_instance * ni)3620 ipfw_objhash_count(struct namedobj_instance *ni)
3621 {
3622 
3623 	return (ni->count);
3624 }
3625 
3626 uint32_t
ipfw_objhash_count_type(struct namedobj_instance * ni,uint16_t type)3627 ipfw_objhash_count_type(struct namedobj_instance *ni, uint16_t type)
3628 {
3629 	struct named_object *no;
3630 	uint32_t count;
3631 	int i;
3632 
3633 	count = 0;
3634 	for (i = 0; i < ni->nn_size; i++) {
3635 		TAILQ_FOREACH(no, &ni->names[i], nn_next) {
3636 			if (no->etlv == type)
3637 				count++;
3638 		}
3639 	}
3640 	return (count);
3641 }
3642 
3643 /*
3644  * Runs @func for each found named object.
3645  * It is safe to delete objects from callback
3646  */
3647 int
ipfw_objhash_foreach(struct namedobj_instance * ni,objhash_cb_t * f,void * arg)3648 ipfw_objhash_foreach(struct namedobj_instance *ni, objhash_cb_t *f, void *arg)
3649 {
3650 	struct named_object *no, *no_tmp;
3651 	int i, ret;
3652 
3653 	for (i = 0; i < ni->nn_size; i++) {
3654 		TAILQ_FOREACH_SAFE(no, &ni->names[i], nn_next, no_tmp) {
3655 			ret = f(ni, no, arg);
3656 			if (ret != 0)
3657 				return (ret);
3658 		}
3659 	}
3660 	return (0);
3661 }
3662 
3663 /*
3664  * Runs @f for each found named object with type @type.
3665  * It is safe to delete objects from callback
3666  */
3667 int
ipfw_objhash_foreach_type(struct namedobj_instance * ni,objhash_cb_t * f,void * arg,uint16_t type)3668 ipfw_objhash_foreach_type(struct namedobj_instance *ni, objhash_cb_t *f,
3669     void *arg, uint16_t type)
3670 {
3671 	struct named_object *no, *no_tmp;
3672 	int i, ret;
3673 
3674 	for (i = 0; i < ni->nn_size; i++) {
3675 		TAILQ_FOREACH_SAFE(no, &ni->names[i], nn_next, no_tmp) {
3676 			if (no->etlv != type)
3677 				continue;
3678 			ret = f(ni, no, arg);
3679 			if (ret != 0)
3680 				return (ret);
3681 		}
3682 	}
3683 	return (0);
3684 }
3685 
3686 /*
3687  * Removes index from given set.
3688  * Returns 0 on success.
3689  */
3690 int
ipfw_objhash_free_idx(struct namedobj_instance * ni,uint32_t idx)3691 ipfw_objhash_free_idx(struct namedobj_instance *ni, uint32_t idx)
3692 {
3693 	u_long *mask;
3694 	int i, v;
3695 
3696 	i = idx / BLOCK_ITEMS;
3697 	v = idx % BLOCK_ITEMS;
3698 
3699 	if (i >= ni->max_blocks)
3700 		return (1);
3701 
3702 	mask = &ni->idx_mask[i];
3703 
3704 	if ((*mask & ((u_long)1 << v)) != 0)
3705 		return (1);
3706 
3707 	/* Mark as free */
3708 	*mask |= (u_long)1 << v;
3709 
3710 	/* Update free offset */
3711 	if (ni->free_off[0] > i)
3712 		ni->free_off[0] = i;
3713 
3714 	return (0);
3715 }
3716 
3717 /*
3718  * Allocate new index in given instance and stores in in @pidx.
3719  * Returns 0 on success.
3720  */
3721 int
ipfw_objhash_alloc_idx(void * n,uint32_t * pidx)3722 ipfw_objhash_alloc_idx(void *n, uint32_t *pidx)
3723 {
3724 	struct namedobj_instance *ni;
3725 	u_long *mask;
3726 	int i, off, v;
3727 
3728 	ni = (struct namedobj_instance *)n;
3729 
3730 	off = ni->free_off[0];
3731 	mask = &ni->idx_mask[off];
3732 
3733 	for (i = off; i < ni->max_blocks; i++, mask++) {
3734 		if ((v = ffsl(*mask)) == 0)
3735 			continue;
3736 
3737 		/* Mark as busy */
3738 		*mask &= ~ ((u_long)1 << (v - 1));
3739 
3740 		ni->free_off[0] = i;
3741 
3742 		v = BLOCK_ITEMS * i + v - 1;
3743 
3744 		*pidx = v;
3745 		return (0);
3746 	}
3747 
3748 	return (1);
3749 }
3750 
3751 /* end of file */
3752