xref: /freebsd/sys/netpfil/ipfw/ip_fw_dynamic.c (revision 63d1fd5970ec814904aa0f4580b10a0d302d08b2)
1 /*-
2  * Copyright (c) 2002 Luigi Rizzo, Universita` di Pisa
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23  * SUCH DAMAGE.
24  */
25 
26 #include <sys/cdefs.h>
27 __FBSDID("$FreeBSD$");
28 
29 #define        DEB(x)
30 #define        DDB(x) x
31 
32 /*
33  * Dynamic rule support for ipfw
34  */
35 
36 #include "opt_ipfw.h"
37 #include "opt_inet.h"
38 #ifndef INET
39 #error IPFIREWALL requires INET.
40 #endif /* INET */
41 #include "opt_inet6.h"
42 
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/malloc.h>
46 #include <sys/mbuf.h>
47 #include <sys/kernel.h>
48 #include <sys/ktr.h>
49 #include <sys/lock.h>
50 #include <sys/rmlock.h>
51 #include <sys/socket.h>
52 #include <sys/sysctl.h>
53 #include <sys/syslog.h>
54 #include <net/ethernet.h> /* for ETHERTYPE_IP */
55 #include <net/if.h>
56 #include <net/if_var.h>
57 #include <net/vnet.h>
58 
59 #include <netinet/in.h>
60 #include <netinet/ip.h>
61 #include <netinet/ip_var.h>	/* ip_defttl */
62 #include <netinet/ip_fw.h>
63 #include <netinet/tcp_var.h>
64 #include <netinet/udp.h>
65 
66 #include <netinet/ip6.h>	/* IN6_ARE_ADDR_EQUAL */
67 #ifdef INET6
68 #include <netinet6/in6_var.h>
69 #include <netinet6/ip6_var.h>
70 #endif
71 
72 #include <netpfil/ipfw/ip_fw_private.h>
73 
74 #include <machine/in_cksum.h>	/* XXX for in_cksum */
75 
76 #ifdef MAC
77 #include <security/mac/mac_framework.h>
78 #endif
79 
80 /*
81  * Description of dynamic rules.
82  *
83  * Dynamic rules are stored in lists accessed through a hash table
84  * (ipfw_dyn_v) whose size is curr_dyn_buckets. This value can
85  * be modified through the sysctl variable dyn_buckets which is
86  * updated when the table becomes empty.
87  *
88  * XXX currently there is only one list, ipfw_dyn.
89  *
90  * When a packet is received, its address fields are first masked
91  * with the mask defined for the rule, then hashed, then matched
92  * against the entries in the corresponding list.
93  * Dynamic rules can be used for different purposes:
94  *  + stateful rules;
95  *  + enforcing limits on the number of sessions;
96  *  + in-kernel NAT (not implemented yet)
97  *
98  * The lifetime of dynamic rules is regulated by dyn_*_lifetime,
99  * measured in seconds and depending on the flags.
100  *
101  * The total number of dynamic rules is equal to UMA zone items count.
102  * The max number of dynamic rules is dyn_max. When we reach
103  * the maximum number of rules we do not create anymore. This is
104  * done to avoid consuming too much memory, but also too much
105  * time when searching on each packet (ideally, we should try instead
106  * to put a limit on the length of the list on each bucket...).
107  *
108  * Each dynamic rule holds a pointer to the parent ipfw rule so
109  * we know what action to perform. Dynamic rules are removed when
110  * the parent rule is deleted. This can be changed by dyn_keep_states
111  * sysctl.
112  *
113  * There are some limitations with dynamic rules -- we do not
114  * obey the 'randomized match', and we do not do multiple
115  * passes through the firewall. XXX check the latter!!!
116  */
117 
118 struct ipfw_dyn_bucket {
119 	struct mtx	mtx;		/* Bucket protecting lock */
120 	ipfw_dyn_rule	*head;		/* Pointer to first rule */
121 };
122 
123 /*
124  * Static variables followed by global ones
125  */
126 static VNET_DEFINE(struct ipfw_dyn_bucket *, ipfw_dyn_v);
127 static VNET_DEFINE(u_int32_t, dyn_buckets_max);
128 static VNET_DEFINE(u_int32_t, curr_dyn_buckets);
129 static VNET_DEFINE(struct callout, ipfw_timeout);
130 #define	V_ipfw_dyn_v			VNET(ipfw_dyn_v)
131 #define	V_dyn_buckets_max		VNET(dyn_buckets_max)
132 #define	V_curr_dyn_buckets		VNET(curr_dyn_buckets)
133 #define V_ipfw_timeout                  VNET(ipfw_timeout)
134 
135 static VNET_DEFINE(uma_zone_t, ipfw_dyn_rule_zone);
136 #define	V_ipfw_dyn_rule_zone		VNET(ipfw_dyn_rule_zone)
137 
138 #define	IPFW_BUCK_LOCK_INIT(b)	\
139 	mtx_init(&(b)->mtx, "IPFW dynamic bucket", NULL, MTX_DEF)
140 #define	IPFW_BUCK_LOCK_DESTROY(b)	\
141 	mtx_destroy(&(b)->mtx)
142 #define	IPFW_BUCK_LOCK(i)	mtx_lock(&V_ipfw_dyn_v[(i)].mtx)
143 #define	IPFW_BUCK_UNLOCK(i)	mtx_unlock(&V_ipfw_dyn_v[(i)].mtx)
144 #define	IPFW_BUCK_ASSERT(i)	mtx_assert(&V_ipfw_dyn_v[(i)].mtx, MA_OWNED)
145 
146 
147 static VNET_DEFINE(int, dyn_keep_states);
148 #define	V_dyn_keep_states		VNET(dyn_keep_states)
149 
150 /*
151  * Timeouts for various events in handing dynamic rules.
152  */
153 static VNET_DEFINE(u_int32_t, dyn_ack_lifetime);
154 static VNET_DEFINE(u_int32_t, dyn_syn_lifetime);
155 static VNET_DEFINE(u_int32_t, dyn_fin_lifetime);
156 static VNET_DEFINE(u_int32_t, dyn_rst_lifetime);
157 static VNET_DEFINE(u_int32_t, dyn_udp_lifetime);
158 static VNET_DEFINE(u_int32_t, dyn_short_lifetime);
159 
160 #define	V_dyn_ack_lifetime		VNET(dyn_ack_lifetime)
161 #define	V_dyn_syn_lifetime		VNET(dyn_syn_lifetime)
162 #define	V_dyn_fin_lifetime		VNET(dyn_fin_lifetime)
163 #define	V_dyn_rst_lifetime		VNET(dyn_rst_lifetime)
164 #define	V_dyn_udp_lifetime		VNET(dyn_udp_lifetime)
165 #define	V_dyn_short_lifetime		VNET(dyn_short_lifetime)
166 
167 /*
168  * Keepalives are sent if dyn_keepalive is set. They are sent every
169  * dyn_keepalive_period seconds, in the last dyn_keepalive_interval
170  * seconds of lifetime of a rule.
171  * dyn_rst_lifetime and dyn_fin_lifetime should be strictly lower
172  * than dyn_keepalive_period.
173  */
174 
175 static VNET_DEFINE(u_int32_t, dyn_keepalive_interval);
176 static VNET_DEFINE(u_int32_t, dyn_keepalive_period);
177 static VNET_DEFINE(u_int32_t, dyn_keepalive);
178 static VNET_DEFINE(time_t, dyn_keepalive_last);
179 
180 #define	V_dyn_keepalive_interval	VNET(dyn_keepalive_interval)
181 #define	V_dyn_keepalive_period		VNET(dyn_keepalive_period)
182 #define	V_dyn_keepalive			VNET(dyn_keepalive)
183 #define	V_dyn_keepalive_last		VNET(dyn_keepalive_last)
184 
185 static VNET_DEFINE(u_int32_t, dyn_max);		/* max # of dynamic rules */
186 
187 #define	DYN_COUNT			uma_zone_get_cur(V_ipfw_dyn_rule_zone)
188 #define	V_dyn_max			VNET(dyn_max)
189 
190 /* for userspace, we emulate the uma_zone_counter with ipfw_dyn_count */
191 static int ipfw_dyn_count;	/* number of objects */
192 
193 #ifdef USERSPACE /* emulation of UMA object counters for userspace */
194 #define uma_zone_get_cur(x)	ipfw_dyn_count
195 #endif /* USERSPACE */
196 
197 static int last_log;	/* Log ratelimiting */
198 
199 static void ipfw_dyn_tick(void *vnetx);
200 static void check_dyn_rules(struct ip_fw_chain *, ipfw_range_tlv *, int, int);
201 #ifdef SYSCTL_NODE
202 
203 static int sysctl_ipfw_dyn_count(SYSCTL_HANDLER_ARGS);
204 static int sysctl_ipfw_dyn_max(SYSCTL_HANDLER_ARGS);
205 
206 SYSBEGIN(f2)
207 
208 SYSCTL_DECL(_net_inet_ip_fw);
209 SYSCTL_UINT(_net_inet_ip_fw, OID_AUTO, dyn_buckets,
210     CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(dyn_buckets_max), 0,
211     "Max number of dyn. buckets");
212 SYSCTL_UINT(_net_inet_ip_fw, OID_AUTO, curr_dyn_buckets,
213     CTLFLAG_VNET | CTLFLAG_RD, &VNET_NAME(curr_dyn_buckets), 0,
214     "Current Number of dyn. buckets");
215 SYSCTL_PROC(_net_inet_ip_fw, OID_AUTO, dyn_count,
216     CTLFLAG_VNET | CTLTYPE_UINT | CTLFLAG_RD, 0, 0, sysctl_ipfw_dyn_count, "IU",
217     "Number of dyn. rules");
218 SYSCTL_PROC(_net_inet_ip_fw, OID_AUTO, dyn_max,
219     CTLFLAG_VNET | CTLTYPE_UINT | CTLFLAG_RW, 0, 0, sysctl_ipfw_dyn_max, "IU",
220     "Max number of dyn. rules");
221 SYSCTL_UINT(_net_inet_ip_fw, OID_AUTO, dyn_ack_lifetime,
222     CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(dyn_ack_lifetime), 0,
223     "Lifetime of dyn. rules for acks");
224 SYSCTL_UINT(_net_inet_ip_fw, OID_AUTO, dyn_syn_lifetime,
225     CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(dyn_syn_lifetime), 0,
226     "Lifetime of dyn. rules for syn");
227 SYSCTL_UINT(_net_inet_ip_fw, OID_AUTO, dyn_fin_lifetime,
228     CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(dyn_fin_lifetime), 0,
229     "Lifetime of dyn. rules for fin");
230 SYSCTL_UINT(_net_inet_ip_fw, OID_AUTO, dyn_rst_lifetime,
231     CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(dyn_rst_lifetime), 0,
232     "Lifetime of dyn. rules for rst");
233 SYSCTL_UINT(_net_inet_ip_fw, OID_AUTO, dyn_udp_lifetime,
234     CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(dyn_udp_lifetime), 0,
235     "Lifetime of dyn. rules for UDP");
236 SYSCTL_UINT(_net_inet_ip_fw, OID_AUTO, dyn_short_lifetime,
237     CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(dyn_short_lifetime), 0,
238     "Lifetime of dyn. rules for other situations");
239 SYSCTL_UINT(_net_inet_ip_fw, OID_AUTO, dyn_keepalive,
240     CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(dyn_keepalive), 0,
241     "Enable keepalives for dyn. rules");
242 SYSCTL_UINT(_net_inet_ip_fw, OID_AUTO, dyn_keep_states,
243     CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(dyn_keep_states), 0,
244     "Do not flush dynamic states on rule deletion");
245 
246 SYSEND
247 
248 #endif /* SYSCTL_NODE */
249 
250 
251 #ifdef INET6
252 static __inline int
253 hash_packet6(struct ipfw_flow_id *id)
254 {
255 	u_int32_t i;
256 	i = (id->dst_ip6.__u6_addr.__u6_addr32[2]) ^
257 	    (id->dst_ip6.__u6_addr.__u6_addr32[3]) ^
258 	    (id->src_ip6.__u6_addr.__u6_addr32[2]) ^
259 	    (id->src_ip6.__u6_addr.__u6_addr32[3]);
260 	return ntohl(i);
261 }
262 #endif
263 
264 /*
265  * IMPORTANT: the hash function for dynamic rules must be commutative
266  * in source and destination (ip,port), because rules are bidirectional
267  * and we want to find both in the same bucket.
268  */
269 static __inline int
270 hash_packet(struct ipfw_flow_id *id, int buckets)
271 {
272 	u_int32_t i;
273 
274 #ifdef INET6
275 	if (IS_IP6_FLOW_ID(id))
276 		i = hash_packet6(id);
277 	else
278 #endif /* INET6 */
279 	i = (id->dst_ip) ^ (id->src_ip);
280 	i ^= (id->dst_port) ^ (id->src_port);
281 	return (i & (buckets - 1));
282 }
283 
284 #if 0
285 #define	DYN_DEBUG(fmt, ...)	do {			\
286 	printf("%s: " fmt "\n", __func__, __VA_ARGS__);	\
287 } while (0)
288 #else
289 #define	DYN_DEBUG(fmt, ...)
290 #endif
291 
292 static char *default_state_name = "default";
293 struct dyn_state_obj {
294 	struct named_object	no;
295 	char			name[64];
296 };
297 
298 #define	DYN_STATE_OBJ(ch, cmd)	\
299     ((struct dyn_state_obj *)SRV_OBJECT(ch, (cmd)->arg1))
300 /*
301  * Classifier callback.
302  * Return 0 if opcode contains object that should be referenced
303  * or rewritten.
304  */
305 static int
306 dyn_classify(ipfw_insn *cmd, uint16_t *puidx, uint8_t *ptype)
307 {
308 
309 	DYN_DEBUG("opcode %d, arg1 %d", cmd->opcode, cmd->arg1);
310 	/* Don't rewrite "check-state any" */
311 	if (cmd->arg1 == 0 &&
312 	    cmd->opcode == O_CHECK_STATE)
313 		return (1);
314 
315 	*puidx = cmd->arg1;
316 	*ptype = 0;
317 	return (0);
318 }
319 
320 static void
321 dyn_update(ipfw_insn *cmd, uint16_t idx)
322 {
323 
324 	cmd->arg1 = idx;
325 	DYN_DEBUG("opcode %d, arg1 %d", cmd->opcode, cmd->arg1);
326 }
327 
328 static int
329 dyn_findbyname(struct ip_fw_chain *ch, struct tid_info *ti,
330     struct named_object **pno)
331 {
332 	ipfw_obj_ntlv *ntlv;
333 	const char *name;
334 
335 	DYN_DEBUG("uidx %d", ti->uidx);
336 	if (ti->uidx != 0) {
337 		if (ti->tlvs == NULL)
338 			return (EINVAL);
339 		/* Search ntlv in the buffer provided by user */
340 		ntlv = ipfw_find_name_tlv_type(ti->tlvs, ti->tlen, ti->uidx,
341 		    IPFW_TLV_STATE_NAME);
342 		if (ntlv == NULL)
343 			return (EINVAL);
344 		name = ntlv->name;
345 	} else
346 		name = default_state_name;
347 	/*
348 	 * Search named object with corresponding name.
349 	 * Since states objects are global - ignore the set value
350 	 * and use zero instead.
351 	 */
352 	*pno = ipfw_objhash_lookup_name_type(CHAIN_TO_SRV(ch), 0,
353 	    IPFW_TLV_STATE_NAME, name);
354 	/*
355 	 * We always return success here.
356 	 * The caller will check *pno and mark object as unresolved,
357 	 * then it will automatically create "default" object.
358 	 */
359 	return (0);
360 }
361 
362 static struct named_object *
363 dyn_findbykidx(struct ip_fw_chain *ch, uint16_t idx)
364 {
365 
366 	DYN_DEBUG("kidx %d", idx);
367 	return (ipfw_objhash_lookup_kidx(CHAIN_TO_SRV(ch), idx));
368 }
369 
370 static int
371 dyn_create(struct ip_fw_chain *ch, struct tid_info *ti,
372     uint16_t *pkidx)
373 {
374 	struct namedobj_instance *ni;
375 	struct dyn_state_obj *obj;
376 	struct named_object *no;
377 	ipfw_obj_ntlv *ntlv;
378 	char *name;
379 
380 	DYN_DEBUG("uidx %d", ti->uidx);
381 	if (ti->uidx != 0) {
382 		if (ti->tlvs == NULL)
383 			return (EINVAL);
384 		ntlv = ipfw_find_name_tlv_type(ti->tlvs, ti->tlen, ti->uidx,
385 		    IPFW_TLV_STATE_NAME);
386 		if (ntlv == NULL)
387 			return (EINVAL);
388 		name = ntlv->name;
389 	} else
390 		name = default_state_name;
391 
392 	ni = CHAIN_TO_SRV(ch);
393 	obj = malloc(sizeof(*obj), M_IPFW, M_WAITOK | M_ZERO);
394 	obj->no.name = obj->name;
395 	obj->no.etlv = IPFW_TLV_STATE_NAME;
396 	strlcpy(obj->name, name, sizeof(obj->name));
397 
398 	IPFW_UH_WLOCK(ch);
399 	no = ipfw_objhash_lookup_name_type(ni, 0,
400 	    IPFW_TLV_STATE_NAME, name);
401 	if (no != NULL) {
402 		/*
403 		 * Object is already created.
404 		 * Just return its kidx and bump refcount.
405 		 */
406 		*pkidx = no->kidx;
407 		no->refcnt++;
408 		IPFW_UH_WUNLOCK(ch);
409 		free(obj, M_IPFW);
410 		DYN_DEBUG("\tfound kidx %d", *pkidx);
411 		return (0);
412 	}
413 	if (ipfw_objhash_alloc_idx(ni, &obj->no.kidx) != 0) {
414 		DYN_DEBUG("\talloc_idx failed for %s", name);
415 		IPFW_UH_WUNLOCK(ch);
416 		free(obj, M_IPFW);
417 		return (ENOSPC);
418 	}
419 	ipfw_objhash_add(ni, &obj->no);
420 	IPFW_WLOCK(ch);
421 	SRV_OBJECT(ch, obj->no.kidx) = obj;
422 	IPFW_WUNLOCK(ch);
423 	obj->no.refcnt++;
424 	*pkidx = obj->no.kidx;
425 	IPFW_UH_WUNLOCK(ch);
426 	DYN_DEBUG("\tcreated kidx %d", *pkidx);
427 	return (0);
428 }
429 
430 static void
431 dyn_destroy(struct ip_fw_chain *ch, struct named_object *no)
432 {
433 	struct dyn_state_obj *obj;
434 
435 	IPFW_UH_WLOCK_ASSERT(ch);
436 
437 	KASSERT(no->refcnt == 1,
438 	    ("Destroying object '%s' (type %u, idx %u) with refcnt %u",
439 	    no->name, no->etlv, no->kidx, no->refcnt));
440 
441 	DYN_DEBUG("kidx %d", no->kidx);
442 	IPFW_WLOCK(ch);
443 	obj = SRV_OBJECT(ch, no->kidx);
444 	SRV_OBJECT(ch, no->kidx) = NULL;
445 	IPFW_WUNLOCK(ch);
446 	ipfw_objhash_del(CHAIN_TO_SRV(ch), no);
447 	ipfw_objhash_free_idx(CHAIN_TO_SRV(ch), no->kidx);
448 
449 	free(obj, M_IPFW);
450 }
451 
452 static struct opcode_obj_rewrite dyn_opcodes[] = {
453 	{
454 		O_KEEP_STATE, IPFW_TLV_STATE_NAME,
455 		dyn_classify, dyn_update,
456 		dyn_findbyname, dyn_findbykidx,
457 		dyn_create, dyn_destroy
458 	},
459 	{
460 		O_CHECK_STATE, IPFW_TLV_STATE_NAME,
461 		dyn_classify, dyn_update,
462 		dyn_findbyname, dyn_findbykidx,
463 		dyn_create, dyn_destroy
464 	},
465 	{
466 		O_PROBE_STATE, IPFW_TLV_STATE_NAME,
467 		dyn_classify, dyn_update,
468 		dyn_findbyname, dyn_findbykidx,
469 		dyn_create, dyn_destroy
470 	},
471 	{
472 		O_LIMIT, IPFW_TLV_STATE_NAME,
473 		dyn_classify, dyn_update,
474 		dyn_findbyname, dyn_findbykidx,
475 		dyn_create, dyn_destroy
476 	},
477 };
478 /**
479  * Print customizable flow id description via log(9) facility.
480  */
481 static void
482 print_dyn_rule_flags(struct ipfw_flow_id *id, int dyn_type, int log_flags,
483     char *prefix, char *postfix)
484 {
485 	struct in_addr da;
486 #ifdef INET6
487 	char src[INET6_ADDRSTRLEN], dst[INET6_ADDRSTRLEN];
488 #else
489 	char src[INET_ADDRSTRLEN], dst[INET_ADDRSTRLEN];
490 #endif
491 
492 #ifdef INET6
493 	if (IS_IP6_FLOW_ID(id)) {
494 		ip6_sprintf(src, &id->src_ip6);
495 		ip6_sprintf(dst, &id->dst_ip6);
496 	} else
497 #endif
498 	{
499 		da.s_addr = htonl(id->src_ip);
500 		inet_ntop(AF_INET, &da, src, sizeof(src));
501 		da.s_addr = htonl(id->dst_ip);
502 		inet_ntop(AF_INET, &da, dst, sizeof(dst));
503 	}
504 	log(log_flags, "ipfw: %s type %d %s %d -> %s %d, %d %s\n",
505 	    prefix, dyn_type, src, id->src_port, dst,
506 	    id->dst_port, DYN_COUNT, postfix);
507 }
508 
509 #define	print_dyn_rule(id, dtype, prefix, postfix)	\
510 	print_dyn_rule_flags(id, dtype, LOG_DEBUG, prefix, postfix)
511 
512 #define TIME_LEQ(a,b)       ((int)((a)-(b)) <= 0)
513 #define TIME_LE(a,b)       ((int)((a)-(b)) < 0)
514 
515 static void
516 dyn_update_proto_state(ipfw_dyn_rule *q, const struct ipfw_flow_id *id,
517     const struct tcphdr *tcp, int dir)
518 {
519 	uint32_t ack;
520 	u_char flags;
521 
522 	if (id->proto == IPPROTO_TCP) {
523 		flags = id->_flags & (TH_FIN | TH_SYN | TH_RST);
524 #define BOTH_SYN	(TH_SYN | (TH_SYN << 8))
525 #define BOTH_FIN	(TH_FIN | (TH_FIN << 8))
526 #define	TCP_FLAGS	(TH_FLAGS | (TH_FLAGS << 8))
527 #define	ACK_FWD		0x10000			/* fwd ack seen */
528 #define	ACK_REV		0x20000			/* rev ack seen */
529 
530 		q->state |= (dir == MATCH_FORWARD) ? flags : (flags << 8);
531 		switch (q->state & TCP_FLAGS) {
532 		case TH_SYN:			/* opening */
533 			q->expire = time_uptime + V_dyn_syn_lifetime;
534 			break;
535 
536 		case BOTH_SYN:			/* move to established */
537 		case BOTH_SYN | TH_FIN:		/* one side tries to close */
538 		case BOTH_SYN | (TH_FIN << 8):
539 #define _SEQ_GE(a,b) ((int)(a) - (int)(b) >= 0)
540 			if (tcp == NULL)
541 				break;
542 
543 			ack = ntohl(tcp->th_ack);
544 			if (dir == MATCH_FORWARD) {
545 				if (q->ack_fwd == 0 ||
546 				    _SEQ_GE(ack, q->ack_fwd)) {
547 					q->ack_fwd = ack;
548 					q->state |= ACK_FWD;
549 				}
550 			} else {
551 				if (q->ack_rev == 0 ||
552 				    _SEQ_GE(ack, q->ack_rev)) {
553 					q->ack_rev = ack;
554 					q->state |= ACK_REV;
555 				}
556 			}
557 			if ((q->state & (ACK_FWD | ACK_REV)) ==
558 			    (ACK_FWD | ACK_REV)) {
559 				q->expire = time_uptime + V_dyn_ack_lifetime;
560 				q->state &= ~(ACK_FWD | ACK_REV);
561 			}
562 			break;
563 
564 		case BOTH_SYN | BOTH_FIN:	/* both sides closed */
565 			if (V_dyn_fin_lifetime >= V_dyn_keepalive_period)
566 				V_dyn_fin_lifetime =
567 				    V_dyn_keepalive_period - 1;
568 			q->expire = time_uptime + V_dyn_fin_lifetime;
569 			break;
570 
571 		default:
572 #if 0
573 			/*
574 			 * reset or some invalid combination, but can also
575 			 * occur if we use keep-state the wrong way.
576 			 */
577 			if ( (q->state & ((TH_RST << 8)|TH_RST)) == 0)
578 				printf("invalid state: 0x%x\n", q->state);
579 #endif
580 			if (V_dyn_rst_lifetime >= V_dyn_keepalive_period)
581 				V_dyn_rst_lifetime =
582 				    V_dyn_keepalive_period - 1;
583 			q->expire = time_uptime + V_dyn_rst_lifetime;
584 			break;
585 		}
586 	} else if (id->proto == IPPROTO_UDP) {
587 		q->expire = time_uptime + V_dyn_udp_lifetime;
588 	} else {
589 		/* other protocols */
590 		q->expire = time_uptime + V_dyn_short_lifetime;
591 	}
592 }
593 
594 /*
595  * Lookup a dynamic rule, locked version.
596  */
597 static ipfw_dyn_rule *
598 lookup_dyn_rule_locked(struct ipfw_flow_id *pkt, int i, int *match_direction,
599     struct tcphdr *tcp, uint16_t kidx)
600 {
601 	/*
602 	 * Stateful ipfw extensions.
603 	 * Lookup into dynamic session queue.
604 	 */
605 	ipfw_dyn_rule *prev, *q = NULL;
606 	int dir;
607 
608 	IPFW_BUCK_ASSERT(i);
609 
610 	dir = MATCH_NONE;
611 	for (prev = NULL, q = V_ipfw_dyn_v[i].head; q; prev = q, q = q->next) {
612 		if (q->dyn_type == O_LIMIT_PARENT)
613 			continue;
614 
615 		if (pkt->proto != q->id.proto)
616 			continue;
617 
618 		if (kidx != 0 && kidx != q->kidx)
619 			continue;
620 
621 		if (IS_IP6_FLOW_ID(pkt)) {
622 			if (IN6_ARE_ADDR_EQUAL(&pkt->src_ip6, &q->id.src_ip6) &&
623 			    IN6_ARE_ADDR_EQUAL(&pkt->dst_ip6, &q->id.dst_ip6) &&
624 			    pkt->src_port == q->id.src_port &&
625 			    pkt->dst_port == q->id.dst_port) {
626 				dir = MATCH_FORWARD;
627 				break;
628 			}
629 			if (IN6_ARE_ADDR_EQUAL(&pkt->src_ip6, &q->id.dst_ip6) &&
630 			    IN6_ARE_ADDR_EQUAL(&pkt->dst_ip6, &q->id.src_ip6) &&
631 			    pkt->src_port == q->id.dst_port &&
632 			    pkt->dst_port == q->id.src_port) {
633 				dir = MATCH_REVERSE;
634 				break;
635 			}
636 		} else {
637 			if (pkt->src_ip == q->id.src_ip &&
638 			    pkt->dst_ip == q->id.dst_ip &&
639 			    pkt->src_port == q->id.src_port &&
640 			    pkt->dst_port == q->id.dst_port) {
641 				dir = MATCH_FORWARD;
642 				break;
643 			}
644 			if (pkt->src_ip == q->id.dst_ip &&
645 			    pkt->dst_ip == q->id.src_ip &&
646 			    pkt->src_port == q->id.dst_port &&
647 			    pkt->dst_port == q->id.src_port) {
648 				dir = MATCH_REVERSE;
649 				break;
650 			}
651 		}
652 	}
653 	if (q == NULL)
654 		goto done;	/* q = NULL, not found */
655 
656 	if (prev != NULL) {	/* found and not in front */
657 		prev->next = q->next;
658 		q->next = V_ipfw_dyn_v[i].head;
659 		V_ipfw_dyn_v[i].head = q;
660 	}
661 
662 	/* update state according to flags */
663 	dyn_update_proto_state(q, pkt, tcp, dir);
664 done:
665 	if (match_direction != NULL)
666 		*match_direction = dir;
667 	return (q);
668 }
669 
670 ipfw_dyn_rule *
671 ipfw_lookup_dyn_rule(struct ipfw_flow_id *pkt, int *match_direction,
672     struct tcphdr *tcp, uint16_t kidx)
673 {
674 	ipfw_dyn_rule *q;
675 	int i;
676 
677 	i = hash_packet(pkt, V_curr_dyn_buckets);
678 
679 	IPFW_BUCK_LOCK(i);
680 	q = lookup_dyn_rule_locked(pkt, i, match_direction, tcp, kidx);
681 	if (q == NULL)
682 		IPFW_BUCK_UNLOCK(i);
683 	/* NB: return table locked when q is not NULL */
684 	return q;
685 }
686 
687 /*
688  * Unlock bucket mtx
689  * @p - pointer to dynamic rule
690  */
691 void
692 ipfw_dyn_unlock(ipfw_dyn_rule *q)
693 {
694 
695 	IPFW_BUCK_UNLOCK(q->bucket);
696 }
697 
698 static int
699 resize_dynamic_table(struct ip_fw_chain *chain, int nbuckets)
700 {
701 	int i, k, nbuckets_old;
702 	ipfw_dyn_rule *q;
703 	struct ipfw_dyn_bucket *dyn_v, *dyn_v_old;
704 
705 	/* Check if given number is power of 2 and less than 64k */
706 	if ((nbuckets > 65536) || (!powerof2(nbuckets)))
707 		return 1;
708 
709 	CTR3(KTR_NET, "%s: resize dynamic hash: %d -> %d", __func__,
710 	    V_curr_dyn_buckets, nbuckets);
711 
712 	/* Allocate and initialize new hash */
713 	dyn_v = malloc(nbuckets * sizeof(*dyn_v), M_IPFW,
714 	    M_WAITOK | M_ZERO);
715 
716 	for (i = 0 ; i < nbuckets; i++)
717 		IPFW_BUCK_LOCK_INIT(&dyn_v[i]);
718 
719 	/*
720 	 * Call upper half lock, as get_map() do to ease
721 	 * read-only access to dynamic rules hash from sysctl
722 	 */
723 	IPFW_UH_WLOCK(chain);
724 
725 	/*
726 	 * Acquire chain write lock to permit hash access
727 	 * for main traffic path without additional locks
728 	 */
729 	IPFW_WLOCK(chain);
730 
731 	/* Save old values */
732 	nbuckets_old = V_curr_dyn_buckets;
733 	dyn_v_old = V_ipfw_dyn_v;
734 
735 	/* Skip relinking if array is not set up */
736 	if (V_ipfw_dyn_v == NULL)
737 		V_curr_dyn_buckets = 0;
738 
739 	/* Re-link all dynamic states */
740 	for (i = 0 ; i < V_curr_dyn_buckets ; i++) {
741 		while (V_ipfw_dyn_v[i].head != NULL) {
742 			/* Remove from current chain */
743 			q = V_ipfw_dyn_v[i].head;
744 			V_ipfw_dyn_v[i].head = q->next;
745 
746 			/* Get new hash value */
747 			k = hash_packet(&q->id, nbuckets);
748 			q->bucket = k;
749 			/* Add to the new head */
750 			q->next = dyn_v[k].head;
751 			dyn_v[k].head = q;
752              }
753 	}
754 
755 	/* Update current pointers/buckets values */
756 	V_curr_dyn_buckets = nbuckets;
757 	V_ipfw_dyn_v = dyn_v;
758 
759 	IPFW_WUNLOCK(chain);
760 
761 	IPFW_UH_WUNLOCK(chain);
762 
763 	/* Start periodic callout on initial creation */
764 	if (dyn_v_old == NULL) {
765         	callout_reset_on(&V_ipfw_timeout, hz, ipfw_dyn_tick, curvnet, 0);
766 		return (0);
767 	}
768 
769 	/* Destroy all mutexes */
770 	for (i = 0 ; i < nbuckets_old ; i++)
771 		IPFW_BUCK_LOCK_DESTROY(&dyn_v_old[i]);
772 
773 	/* Free old hash */
774 	free(dyn_v_old, M_IPFW);
775 
776 	return 0;
777 }
778 
779 /**
780  * Install state of type 'type' for a dynamic session.
781  * The hash table contains two type of rules:
782  * - regular rules (O_KEEP_STATE)
783  * - rules for sessions with limited number of sess per user
784  *   (O_LIMIT). When they are created, the parent is
785  *   increased by 1, and decreased on delete. In this case,
786  *   the third parameter is the parent rule and not the chain.
787  * - "parent" rules for the above (O_LIMIT_PARENT).
788  */
789 static ipfw_dyn_rule *
790 add_dyn_rule(struct ipfw_flow_id *id, int i, uint8_t dyn_type,
791     struct ip_fw *rule, uint16_t kidx)
792 {
793 	ipfw_dyn_rule *r;
794 
795 	IPFW_BUCK_ASSERT(i);
796 
797 	r = uma_zalloc(V_ipfw_dyn_rule_zone, M_NOWAIT | M_ZERO);
798 	if (r == NULL) {
799 		if (last_log != time_uptime) {
800 			last_log = time_uptime;
801 			log(LOG_DEBUG,
802 			    "ipfw: Cannot allocate dynamic state, "
803 			    "consider increasing net.inet.ip.fw.dyn_max\n");
804 		}
805 		return NULL;
806 	}
807 	ipfw_dyn_count++;
808 
809 	/*
810 	 * refcount on parent is already incremented, so
811 	 * it is safe to use parent unlocked.
812 	 */
813 	if (dyn_type == O_LIMIT) {
814 		ipfw_dyn_rule *parent = (ipfw_dyn_rule *)rule;
815 		if ( parent->dyn_type != O_LIMIT_PARENT)
816 			panic("invalid parent");
817 		r->parent = parent;
818 		rule = parent->rule;
819 	}
820 
821 	r->id = *id;
822 	r->expire = time_uptime + V_dyn_syn_lifetime;
823 	r->rule = rule;
824 	r->dyn_type = dyn_type;
825 	IPFW_ZERO_DYN_COUNTER(r);
826 	r->count = 0;
827 	r->kidx = kidx;
828 	r->bucket = i;
829 	r->next = V_ipfw_dyn_v[i].head;
830 	V_ipfw_dyn_v[i].head = r;
831 	DEB(print_dyn_rule(id, dyn_type, "add dyn entry", "total");)
832 	return r;
833 }
834 
835 /**
836  * lookup dynamic parent rule using pkt and rule as search keys.
837  * If the lookup fails, then install one.
838  */
839 static ipfw_dyn_rule *
840 lookup_dyn_parent(struct ipfw_flow_id *pkt, int *pindex, struct ip_fw *rule,
841     uint16_t kidx)
842 {
843 	ipfw_dyn_rule *q;
844 	int i, is_v6;
845 
846 	is_v6 = IS_IP6_FLOW_ID(pkt);
847 	i = hash_packet( pkt, V_curr_dyn_buckets );
848 	*pindex = i;
849 	IPFW_BUCK_LOCK(i);
850 	for (q = V_ipfw_dyn_v[i].head ; q != NULL ; q=q->next)
851 		if (q->dyn_type == O_LIMIT_PARENT &&
852 		    kidx == q->kidx &&
853 		    rule == q->rule &&
854 		    pkt->proto == q->id.proto &&
855 		    pkt->src_port == q->id.src_port &&
856 		    pkt->dst_port == q->id.dst_port &&
857 		    (
858 			(is_v6 &&
859 			 IN6_ARE_ADDR_EQUAL(&(pkt->src_ip6),
860 				&(q->id.src_ip6)) &&
861 			 IN6_ARE_ADDR_EQUAL(&(pkt->dst_ip6),
862 				&(q->id.dst_ip6))) ||
863 			(!is_v6 &&
864 			 pkt->src_ip == q->id.src_ip &&
865 			 pkt->dst_ip == q->id.dst_ip)
866 		    )
867 		) {
868 			q->expire = time_uptime + V_dyn_short_lifetime;
869 			DEB(print_dyn_rule(pkt, q->dyn_type,
870 			    "lookup_dyn_parent found", "");)
871 			return q;
872 		}
873 
874 	/* Add virtual limiting rule */
875 	return add_dyn_rule(pkt, i, O_LIMIT_PARENT, rule, kidx);
876 }
877 
878 /**
879  * Install dynamic state for rule type cmd->o.opcode
880  *
881  * Returns 1 (failure) if state is not installed because of errors or because
882  * session limitations are enforced.
883  */
884 int
885 ipfw_install_state(struct ip_fw_chain *chain, struct ip_fw *rule,
886     ipfw_insn_limit *cmd, struct ip_fw_args *args, uint32_t tablearg)
887 {
888 	ipfw_dyn_rule *q;
889 	int i;
890 
891 	DEB(print_dyn_rule(&args->f_id, cmd->o.opcode, "install_state",
892 	    (cmd->o.arg1 == 0 ? "": DYN_STATE_OBJ(chain, &cmd->o)->name));)
893 
894 	i = hash_packet(&args->f_id, V_curr_dyn_buckets);
895 
896 	IPFW_BUCK_LOCK(i);
897 
898 	q = lookup_dyn_rule_locked(&args->f_id, i, NULL, NULL, cmd->o.arg1);
899 	if (q != NULL) {	/* should never occur */
900 		DEB(
901 		if (last_log != time_uptime) {
902 			last_log = time_uptime;
903 			printf("ipfw: %s: entry already present, done\n",
904 			    __func__);
905 		})
906 		IPFW_BUCK_UNLOCK(i);
907 		return (0);
908 	}
909 
910 	/*
911 	 * State limiting is done via uma(9) zone limiting.
912 	 * Save pointer to newly-installed rule and reject
913 	 * packet if add_dyn_rule() returned NULL.
914 	 * Note q is currently set to NULL.
915 	 */
916 
917 	switch (cmd->o.opcode) {
918 	case O_KEEP_STATE:	/* bidir rule */
919 		q = add_dyn_rule(&args->f_id, i, O_KEEP_STATE, rule,
920 		    cmd->o.arg1);
921 		break;
922 
923 	case O_LIMIT: {		/* limit number of sessions */
924 		struct ipfw_flow_id id;
925 		ipfw_dyn_rule *parent;
926 		uint32_t conn_limit;
927 		uint16_t limit_mask = cmd->limit_mask;
928 		int pindex;
929 
930 		conn_limit = IP_FW_ARG_TABLEARG(chain, cmd->conn_limit, limit);
931 
932 		DEB(
933 		if (cmd->conn_limit == IP_FW_TARG)
934 			printf("ipfw: %s: O_LIMIT rule, conn_limit: %u "
935 			    "(tablearg)\n", __func__, conn_limit);
936 		else
937 			printf("ipfw: %s: O_LIMIT rule, conn_limit: %u\n",
938 			    __func__, conn_limit);
939 		)
940 
941 		id.dst_ip = id.src_ip = id.dst_port = id.src_port = 0;
942 		id.proto = args->f_id.proto;
943 		id.addr_type = args->f_id.addr_type;
944 		id.fib = M_GETFIB(args->m);
945 
946 		if (IS_IP6_FLOW_ID (&(args->f_id))) {
947 			bzero(&id.src_ip6, sizeof(id.src_ip6));
948 			bzero(&id.dst_ip6, sizeof(id.dst_ip6));
949 
950 			if (limit_mask & DYN_SRC_ADDR)
951 				id.src_ip6 = args->f_id.src_ip6;
952 			if (limit_mask & DYN_DST_ADDR)
953 				id.dst_ip6 = args->f_id.dst_ip6;
954 		} else {
955 			if (limit_mask & DYN_SRC_ADDR)
956 				id.src_ip = args->f_id.src_ip;
957 			if (limit_mask & DYN_DST_ADDR)
958 				id.dst_ip = args->f_id.dst_ip;
959 		}
960 		if (limit_mask & DYN_SRC_PORT)
961 			id.src_port = args->f_id.src_port;
962 		if (limit_mask & DYN_DST_PORT)
963 			id.dst_port = args->f_id.dst_port;
964 
965 		/*
966 		 * We have to release lock for previous bucket to
967 		 * avoid possible deadlock
968 		 */
969 		IPFW_BUCK_UNLOCK(i);
970 
971 		parent = lookup_dyn_parent(&id, &pindex, rule, cmd->o.arg1);
972 		if (parent == NULL) {
973 			printf("ipfw: %s: add parent failed\n", __func__);
974 			IPFW_BUCK_UNLOCK(pindex);
975 			return (1);
976 		}
977 
978 		if (parent->count >= conn_limit) {
979 			if (V_fw_verbose && last_log != time_uptime) {
980 				last_log = time_uptime;
981 				char sbuf[24];
982 				last_log = time_uptime;
983 				snprintf(sbuf, sizeof(sbuf),
984 				    "%d drop session",
985 				    parent->rule->rulenum);
986 				print_dyn_rule_flags(&args->f_id,
987 				    cmd->o.opcode,
988 				    LOG_SECURITY | LOG_DEBUG,
989 				    sbuf, "too many entries");
990 			}
991 			IPFW_BUCK_UNLOCK(pindex);
992 			return (1);
993 		}
994 		/* Increment counter on parent */
995 		parent->count++;
996 		IPFW_BUCK_UNLOCK(pindex);
997 
998 		IPFW_BUCK_LOCK(i);
999 		q = add_dyn_rule(&args->f_id, i, O_LIMIT,
1000 		    (struct ip_fw *)parent, cmd->o.arg1);
1001 		if (q == NULL) {
1002 			/* Decrement index and notify caller */
1003 			IPFW_BUCK_UNLOCK(i);
1004 			IPFW_BUCK_LOCK(pindex);
1005 			parent->count--;
1006 			IPFW_BUCK_UNLOCK(pindex);
1007 			return (1);
1008 		}
1009 		break;
1010 	}
1011 	default:
1012 		printf("ipfw: %s: unknown dynamic rule type %u\n",
1013 		    __func__, cmd->o.opcode);
1014 	}
1015 
1016 	if (q == NULL) {
1017 		IPFW_BUCK_UNLOCK(i);
1018 		return (1);	/* Notify caller about failure */
1019 	}
1020 
1021 	dyn_update_proto_state(q, &args->f_id, NULL, MATCH_FORWARD);
1022 	IPFW_BUCK_UNLOCK(i);
1023 	return (0);
1024 }
1025 
1026 /*
1027  * Generate a TCP packet, containing either a RST or a keepalive.
1028  * When flags & TH_RST, we are sending a RST packet, because of a
1029  * "reset" action matched the packet.
1030  * Otherwise we are sending a keepalive, and flags & TH_
1031  * The 'replyto' mbuf is the mbuf being replied to, if any, and is required
1032  * so that MAC can label the reply appropriately.
1033  */
1034 struct mbuf *
1035 ipfw_send_pkt(struct mbuf *replyto, struct ipfw_flow_id *id, u_int32_t seq,
1036     u_int32_t ack, int flags)
1037 {
1038 	struct mbuf *m = NULL;		/* stupid compiler */
1039 	int len, dir;
1040 	struct ip *h = NULL;		/* stupid compiler */
1041 #ifdef INET6
1042 	struct ip6_hdr *h6 = NULL;
1043 #endif
1044 	struct tcphdr *th = NULL;
1045 
1046 	MGETHDR(m, M_NOWAIT, MT_DATA);
1047 	if (m == NULL)
1048 		return (NULL);
1049 
1050 	M_SETFIB(m, id->fib);
1051 #ifdef MAC
1052 	if (replyto != NULL)
1053 		mac_netinet_firewall_reply(replyto, m);
1054 	else
1055 		mac_netinet_firewall_send(m);
1056 #else
1057 	(void)replyto;		/* don't warn about unused arg */
1058 #endif
1059 
1060 	switch (id->addr_type) {
1061 	case 4:
1062 		len = sizeof(struct ip) + sizeof(struct tcphdr);
1063 		break;
1064 #ifdef INET6
1065 	case 6:
1066 		len = sizeof(struct ip6_hdr) + sizeof(struct tcphdr);
1067 		break;
1068 #endif
1069 	default:
1070 		/* XXX: log me?!? */
1071 		FREE_PKT(m);
1072 		return (NULL);
1073 	}
1074 	dir = ((flags & (TH_SYN | TH_RST)) == TH_SYN);
1075 
1076 	m->m_data += max_linkhdr;
1077 	m->m_flags |= M_SKIP_FIREWALL;
1078 	m->m_pkthdr.len = m->m_len = len;
1079 	m->m_pkthdr.rcvif = NULL;
1080 	bzero(m->m_data, len);
1081 
1082 	switch (id->addr_type) {
1083 	case 4:
1084 		h = mtod(m, struct ip *);
1085 
1086 		/* prepare for checksum */
1087 		h->ip_p = IPPROTO_TCP;
1088 		h->ip_len = htons(sizeof(struct tcphdr));
1089 		if (dir) {
1090 			h->ip_src.s_addr = htonl(id->src_ip);
1091 			h->ip_dst.s_addr = htonl(id->dst_ip);
1092 		} else {
1093 			h->ip_src.s_addr = htonl(id->dst_ip);
1094 			h->ip_dst.s_addr = htonl(id->src_ip);
1095 		}
1096 
1097 		th = (struct tcphdr *)(h + 1);
1098 		break;
1099 #ifdef INET6
1100 	case 6:
1101 		h6 = mtod(m, struct ip6_hdr *);
1102 
1103 		/* prepare for checksum */
1104 		h6->ip6_nxt = IPPROTO_TCP;
1105 		h6->ip6_plen = htons(sizeof(struct tcphdr));
1106 		if (dir) {
1107 			h6->ip6_src = id->src_ip6;
1108 			h6->ip6_dst = id->dst_ip6;
1109 		} else {
1110 			h6->ip6_src = id->dst_ip6;
1111 			h6->ip6_dst = id->src_ip6;
1112 		}
1113 
1114 		th = (struct tcphdr *)(h6 + 1);
1115 		break;
1116 #endif
1117 	}
1118 
1119 	if (dir) {
1120 		th->th_sport = htons(id->src_port);
1121 		th->th_dport = htons(id->dst_port);
1122 	} else {
1123 		th->th_sport = htons(id->dst_port);
1124 		th->th_dport = htons(id->src_port);
1125 	}
1126 	th->th_off = sizeof(struct tcphdr) >> 2;
1127 
1128 	if (flags & TH_RST) {
1129 		if (flags & TH_ACK) {
1130 			th->th_seq = htonl(ack);
1131 			th->th_flags = TH_RST;
1132 		} else {
1133 			if (flags & TH_SYN)
1134 				seq++;
1135 			th->th_ack = htonl(seq);
1136 			th->th_flags = TH_RST | TH_ACK;
1137 		}
1138 	} else {
1139 		/*
1140 		 * Keepalive - use caller provided sequence numbers
1141 		 */
1142 		th->th_seq = htonl(seq);
1143 		th->th_ack = htonl(ack);
1144 		th->th_flags = TH_ACK;
1145 	}
1146 
1147 	switch (id->addr_type) {
1148 	case 4:
1149 		th->th_sum = in_cksum(m, len);
1150 
1151 		/* finish the ip header */
1152 		h->ip_v = 4;
1153 		h->ip_hl = sizeof(*h) >> 2;
1154 		h->ip_tos = IPTOS_LOWDELAY;
1155 		h->ip_off = htons(0);
1156 		h->ip_len = htons(len);
1157 		h->ip_ttl = V_ip_defttl;
1158 		h->ip_sum = 0;
1159 		break;
1160 #ifdef INET6
1161 	case 6:
1162 		th->th_sum = in6_cksum(m, IPPROTO_TCP, sizeof(*h6),
1163 		    sizeof(struct tcphdr));
1164 
1165 		/* finish the ip6 header */
1166 		h6->ip6_vfc |= IPV6_VERSION;
1167 		h6->ip6_hlim = IPV6_DEFHLIM;
1168 		break;
1169 #endif
1170 	}
1171 
1172 	return (m);
1173 }
1174 
1175 /*
1176  * Queue keepalive packets for given dynamic rule
1177  */
1178 static struct mbuf **
1179 ipfw_dyn_send_ka(struct mbuf **mtailp, ipfw_dyn_rule *q)
1180 {
1181 	struct mbuf *m_rev, *m_fwd;
1182 
1183 	m_rev = (q->state & ACK_REV) ? NULL :
1184 	    ipfw_send_pkt(NULL, &(q->id), q->ack_rev - 1, q->ack_fwd, TH_SYN);
1185 	m_fwd = (q->state & ACK_FWD) ? NULL :
1186 	    ipfw_send_pkt(NULL, &(q->id), q->ack_fwd - 1, q->ack_rev, 0);
1187 
1188 	if (m_rev != NULL) {
1189 		*mtailp = m_rev;
1190 		mtailp = &(*mtailp)->m_nextpkt;
1191 	}
1192 	if (m_fwd != NULL) {
1193 		*mtailp = m_fwd;
1194 		mtailp = &(*mtailp)->m_nextpkt;
1195 	}
1196 
1197 	return (mtailp);
1198 }
1199 
1200 /*
1201  * This procedure is used to perform various maintenance
1202  * on dynamic hash list. Currently it is called every second.
1203  */
1204 static void
1205 ipfw_dyn_tick(void * vnetx)
1206 {
1207 	struct ip_fw_chain *chain;
1208 	int check_ka = 0;
1209 #ifdef VIMAGE
1210 	struct vnet *vp = vnetx;
1211 #endif
1212 
1213 	CURVNET_SET(vp);
1214 
1215 	chain = &V_layer3_chain;
1216 
1217 	/* Run keepalive checks every keepalive_period iff ka is enabled */
1218 	if ((V_dyn_keepalive_last + V_dyn_keepalive_period <= time_uptime) &&
1219 	    (V_dyn_keepalive != 0)) {
1220 		V_dyn_keepalive_last = time_uptime;
1221 		check_ka = 1;
1222 	}
1223 
1224 	check_dyn_rules(chain, NULL, check_ka, 1);
1225 
1226 	callout_reset_on(&V_ipfw_timeout, hz, ipfw_dyn_tick, vnetx, 0);
1227 
1228 	CURVNET_RESTORE();
1229 }
1230 
1231 
1232 /*
1233  * Walk through all dynamic states doing generic maintenance:
1234  * 1) free expired states
1235  * 2) free all states based on deleted rule / set
1236  * 3) send keepalives for states if needed
1237  *
1238  * @chain - pointer to current ipfw rules chain
1239  * @rule - delete all states originated by given rule if != NULL
1240  * @set - delete all states originated by any rule in set @set if != RESVD_SET
1241  * @check_ka - perform checking/sending keepalives
1242  * @timer - indicate call from timer routine.
1243  *
1244  * Timer routine must call this function unlocked to permit
1245  * sending keepalives/resizing table.
1246  *
1247  * Others has to call function with IPFW_UH_WLOCK held.
1248  * Additionally, function assume that dynamic rule/set is
1249  * ALREADY deleted so no new states can be generated by
1250  * 'deleted' rules.
1251  *
1252  * Write lock is needed to ensure that unused parent rules
1253  * are not freed by other instance (see stage 2, 3)
1254  */
1255 static void
1256 check_dyn_rules(struct ip_fw_chain *chain, ipfw_range_tlv *rt,
1257     int check_ka, int timer)
1258 {
1259 	struct mbuf *m0, *m, *mnext, **mtailp;
1260 	struct ip *h;
1261 	int i, dyn_count, new_buckets = 0, max_buckets;
1262 	int expired = 0, expired_limits = 0, parents = 0, total = 0;
1263 	ipfw_dyn_rule *q, *q_prev, *q_next;
1264 	ipfw_dyn_rule *exp_head, **exptailp;
1265 	ipfw_dyn_rule *exp_lhead, **expltailp;
1266 
1267 	KASSERT(V_ipfw_dyn_v != NULL, ("%s: dynamic table not allocated",
1268 	    __func__));
1269 
1270 	/* Avoid possible LOR */
1271 	KASSERT(!check_ka || timer, ("%s: keepalive check with lock held",
1272 	    __func__));
1273 
1274 	/*
1275 	 * Do not perform any checks if we currently have no dynamic states
1276 	 */
1277 	if (DYN_COUNT == 0)
1278 		return;
1279 
1280 	/* Expired states */
1281 	exp_head = NULL;
1282 	exptailp = &exp_head;
1283 
1284 	/* Expired limit states */
1285 	exp_lhead = NULL;
1286 	expltailp = &exp_lhead;
1287 
1288 	/*
1289 	 * We make a chain of packets to go out here -- not deferring
1290 	 * until after we drop the IPFW dynamic rule lock would result
1291 	 * in a lock order reversal with the normal packet input -> ipfw
1292 	 * call stack.
1293 	 */
1294 	m0 = NULL;
1295 	mtailp = &m0;
1296 
1297 	/* Protect from hash resizing */
1298 	if (timer != 0)
1299 		IPFW_UH_WLOCK(chain);
1300 	else
1301 		IPFW_UH_WLOCK_ASSERT(chain);
1302 
1303 #define	NEXT_RULE()	{ q_prev = q; q = q->next ; continue; }
1304 
1305 	/* Stage 1: perform requested deletion */
1306 	for (i = 0 ; i < V_curr_dyn_buckets ; i++) {
1307 		IPFW_BUCK_LOCK(i);
1308 		for (q = V_ipfw_dyn_v[i].head, q_prev = q; q ; ) {
1309 			/* account every rule */
1310 			total++;
1311 
1312 			/* Skip parent rules at all */
1313 			if (q->dyn_type == O_LIMIT_PARENT) {
1314 				parents++;
1315 				NEXT_RULE();
1316 			}
1317 
1318 			/*
1319 			 * Remove rules which are:
1320 			 * 1) expired
1321 			 * 2) matches deletion range
1322 			 */
1323 			if ((TIME_LEQ(q->expire, time_uptime)) ||
1324 			    (rt != NULL && ipfw_match_range(q->rule, rt))) {
1325 				if (TIME_LE(time_uptime, q->expire) &&
1326 				    q->dyn_type == O_KEEP_STATE &&
1327 				    V_dyn_keep_states != 0) {
1328 					/*
1329 					 * Do not delete state if
1330 					 * it is not expired and
1331 					 * dyn_keep_states is ON.
1332 					 * However we need to re-link it
1333 					 * to any other stable rule
1334 					 */
1335 					q->rule = chain->default_rule;
1336 					NEXT_RULE();
1337 				}
1338 
1339 				/* Unlink q from current list */
1340 				q_next = q->next;
1341 				if (q == V_ipfw_dyn_v[i].head)
1342 					V_ipfw_dyn_v[i].head = q_next;
1343 				else
1344 					q_prev->next = q_next;
1345 
1346 				q->next = NULL;
1347 
1348 				/* queue q to expire list */
1349 				if (q->dyn_type != O_LIMIT) {
1350 					*exptailp = q;
1351 					exptailp = &(*exptailp)->next;
1352 					DEB(print_dyn_rule(&q->id, q->dyn_type,
1353 					    "unlink entry", "left");
1354 					)
1355 				} else {
1356 					/* Separate list for limit rules */
1357 					*expltailp = q;
1358 					expltailp = &(*expltailp)->next;
1359 					expired_limits++;
1360 					DEB(print_dyn_rule(&q->id, q->dyn_type,
1361 					    "unlink limit entry", "left");
1362 					)
1363 				}
1364 
1365 				q = q_next;
1366 				expired++;
1367 				continue;
1368 			}
1369 
1370 			/*
1371 			 * Check if we need to send keepalive:
1372 			 * we need to ensure if is time to do KA,
1373 			 * this is established TCP session, and
1374 			 * expire time is within keepalive interval
1375 			 */
1376 			if ((check_ka != 0) && (q->id.proto == IPPROTO_TCP) &&
1377 			    ((q->state & BOTH_SYN) == BOTH_SYN) &&
1378 			    (TIME_LEQ(q->expire, time_uptime +
1379 			      V_dyn_keepalive_interval)))
1380 				mtailp = ipfw_dyn_send_ka(mtailp, q);
1381 
1382 			NEXT_RULE();
1383 		}
1384 		IPFW_BUCK_UNLOCK(i);
1385 	}
1386 
1387 	/* Stage 2: decrement counters from O_LIMIT parents */
1388 	if (expired_limits != 0) {
1389 		/*
1390 		 * XXX: Note that deleting set with more than one
1391 		 * heavily-used LIMIT rules can result in overwhelming
1392 		 * locking due to lack of per-hash value sorting
1393 		 *
1394 		 * We should probably think about:
1395 		 * 1) pre-allocating hash of size, say,
1396 		 * MAX(16, V_curr_dyn_buckets / 1024)
1397 		 * 2) checking if expired_limits is large enough
1398 		 * 3) If yes, init hash (or its part), re-link
1399 		 * current list and start decrementing procedure in
1400 		 * each bucket separately
1401 		 */
1402 
1403 		/*
1404 		 * Small optimization: do not unlock bucket until
1405 		 * we see the next item resides in different bucket
1406 		 */
1407 		if (exp_lhead != NULL) {
1408 			i = exp_lhead->parent->bucket;
1409 			IPFW_BUCK_LOCK(i);
1410 		}
1411 		for (q = exp_lhead; q != NULL; q = q->next) {
1412 			if (i != q->parent->bucket) {
1413 				IPFW_BUCK_UNLOCK(i);
1414 				i = q->parent->bucket;
1415 				IPFW_BUCK_LOCK(i);
1416 			}
1417 
1418 			/* Decrease parent refcount */
1419 			q->parent->count--;
1420 		}
1421 		if (exp_lhead != NULL)
1422 			IPFW_BUCK_UNLOCK(i);
1423 	}
1424 
1425 	/*
1426 	 * We protectet ourselves from unused parent deletion
1427 	 * (from the timer function) by holding UH write lock.
1428 	 */
1429 
1430 	/* Stage 3: remove unused parent rules */
1431 	if ((parents != 0) && (expired != 0)) {
1432 		for (i = 0 ; i < V_curr_dyn_buckets ; i++) {
1433 			IPFW_BUCK_LOCK(i);
1434 			for (q = V_ipfw_dyn_v[i].head, q_prev = q ; q ; ) {
1435 				if (q->dyn_type != O_LIMIT_PARENT)
1436 					NEXT_RULE();
1437 
1438 				if (q->count != 0)
1439 					NEXT_RULE();
1440 
1441 				/* Parent rule without consumers */
1442 
1443 				/* Unlink q from current list */
1444 				q_next = q->next;
1445 				if (q == V_ipfw_dyn_v[i].head)
1446 					V_ipfw_dyn_v[i].head = q_next;
1447 				else
1448 					q_prev->next = q_next;
1449 
1450 				q->next = NULL;
1451 
1452 				/* Add to expired list */
1453 				*exptailp = q;
1454 				exptailp = &(*exptailp)->next;
1455 
1456 				DEB(print_dyn_rule(&q->id, q->dyn_type,
1457 				    "unlink parent entry", "left");
1458 				)
1459 
1460 				expired++;
1461 
1462 				q = q_next;
1463 			}
1464 			IPFW_BUCK_UNLOCK(i);
1465 		}
1466 	}
1467 
1468 #undef NEXT_RULE
1469 
1470 	if (timer != 0) {
1471 		/*
1472 		 * Check if we need to resize hash:
1473 		 * if current number of states exceeds number of buckes in hash,
1474 		 * grow hash size to the minimum power of 2 which is bigger than
1475 		 * current states count. Limit hash size by 64k.
1476 		 */
1477 		max_buckets = (V_dyn_buckets_max > 65536) ?
1478 		    65536 : V_dyn_buckets_max;
1479 
1480 		dyn_count = DYN_COUNT;
1481 
1482 		if ((dyn_count > V_curr_dyn_buckets * 2) &&
1483 		    (dyn_count < max_buckets)) {
1484 			new_buckets = V_curr_dyn_buckets;
1485 			while (new_buckets < dyn_count) {
1486 				new_buckets *= 2;
1487 
1488 				if (new_buckets >= max_buckets)
1489 					break;
1490 			}
1491 		}
1492 
1493 		IPFW_UH_WUNLOCK(chain);
1494 	}
1495 
1496 	/* Finally delete old states ad limits if any */
1497 	for (q = exp_head; q != NULL; q = q_next) {
1498 		q_next = q->next;
1499 		uma_zfree(V_ipfw_dyn_rule_zone, q);
1500 		ipfw_dyn_count--;
1501 	}
1502 
1503 	for (q = exp_lhead; q != NULL; q = q_next) {
1504 		q_next = q->next;
1505 		uma_zfree(V_ipfw_dyn_rule_zone, q);
1506 		ipfw_dyn_count--;
1507 	}
1508 
1509 	/*
1510 	 * The rest code MUST be called from timer routine only
1511 	 * without holding any locks
1512 	 */
1513 	if (timer == 0)
1514 		return;
1515 
1516 	/* Send keepalive packets if any */
1517 	for (m = m0; m != NULL; m = mnext) {
1518 		mnext = m->m_nextpkt;
1519 		m->m_nextpkt = NULL;
1520 		h = mtod(m, struct ip *);
1521 		if (h->ip_v == 4)
1522 			ip_output(m, NULL, NULL, 0, NULL, NULL);
1523 #ifdef INET6
1524 		else
1525 			ip6_output(m, NULL, NULL, 0, NULL, NULL, NULL);
1526 #endif
1527 	}
1528 
1529 	/* Run table resize without holding any locks */
1530 	if (new_buckets != 0)
1531 		resize_dynamic_table(chain, new_buckets);
1532 }
1533 
1534 /*
1535  * Deletes all dynamic rules originated by given rule or all rules in
1536  * given set. Specify RESVD_SET to indicate set should not be used.
1537  * @chain - pointer to current ipfw rules chain
1538  * @rr - delete all states originated by rules in matched range.
1539  *
1540  * Function has to be called with IPFW_UH_WLOCK held.
1541  * Additionally, function assume that dynamic rule/set is
1542  * ALREADY deleted so no new states can be generated by
1543  * 'deleted' rules.
1544  */
1545 void
1546 ipfw_expire_dyn_rules(struct ip_fw_chain *chain, ipfw_range_tlv *rt)
1547 {
1548 
1549 	check_dyn_rules(chain, rt, 0, 0);
1550 }
1551 
1552 /*
1553  * Check if rule contains at least one dynamic opcode.
1554  *
1555  * Returns 1 if such opcode is found, 0 otherwise.
1556  */
1557 int
1558 ipfw_is_dyn_rule(struct ip_fw *rule)
1559 {
1560 	int cmdlen, l;
1561 	ipfw_insn *cmd;
1562 
1563 	l = rule->cmd_len;
1564 	cmd = rule->cmd;
1565 	cmdlen = 0;
1566 	for ( ;	l > 0 ; l -= cmdlen, cmd += cmdlen) {
1567 		cmdlen = F_LEN(cmd);
1568 
1569 		switch (cmd->opcode) {
1570 		case O_LIMIT:
1571 		case O_KEEP_STATE:
1572 		case O_PROBE_STATE:
1573 		case O_CHECK_STATE:
1574 			return (1);
1575 		}
1576 	}
1577 
1578 	return (0);
1579 }
1580 
1581 void
1582 ipfw_dyn_init(struct ip_fw_chain *chain)
1583 {
1584 
1585         V_ipfw_dyn_v = NULL;
1586         V_dyn_buckets_max = 256; /* must be power of 2 */
1587         V_curr_dyn_buckets = 256; /* must be power of 2 */
1588 
1589         V_dyn_ack_lifetime = 300;
1590         V_dyn_syn_lifetime = 20;
1591         V_dyn_fin_lifetime = 1;
1592         V_dyn_rst_lifetime = 1;
1593         V_dyn_udp_lifetime = 10;
1594         V_dyn_short_lifetime = 5;
1595 
1596         V_dyn_keepalive_interval = 20;
1597         V_dyn_keepalive_period = 5;
1598         V_dyn_keepalive = 1;    /* do send keepalives */
1599 	V_dyn_keepalive_last = time_uptime;
1600 
1601         V_dyn_max = 16384; /* max # of dynamic rules */
1602 
1603 	V_ipfw_dyn_rule_zone = uma_zcreate("IPFW dynamic rule",
1604 	    sizeof(ipfw_dyn_rule), NULL, NULL, NULL, NULL,
1605 	    UMA_ALIGN_PTR, 0);
1606 
1607 	/* Enforce limit on dynamic rules */
1608 	uma_zone_set_max(V_ipfw_dyn_rule_zone, V_dyn_max);
1609 
1610         callout_init(&V_ipfw_timeout, 1);
1611 
1612 	/*
1613 	 * This can potentially be done on first dynamic rule
1614 	 * being added to chain.
1615 	 */
1616 	resize_dynamic_table(chain, V_curr_dyn_buckets);
1617 	IPFW_ADD_OBJ_REWRITER(IS_DEFAULT_VNET(curvnet), dyn_opcodes);
1618 }
1619 
1620 void
1621 ipfw_dyn_uninit(int pass)
1622 {
1623 	int i;
1624 
1625 	if (pass == 0) {
1626 		callout_drain(&V_ipfw_timeout);
1627 		return;
1628 	}
1629 	IPFW_DEL_OBJ_REWRITER(IS_DEFAULT_VNET(curvnet), dyn_opcodes);
1630 
1631 	if (V_ipfw_dyn_v != NULL) {
1632 		/*
1633 		 * Skip deleting all dynamic states -
1634 		 * uma_zdestroy() does this more efficiently;
1635 		 */
1636 
1637 		/* Destroy all mutexes */
1638 		for (i = 0 ; i < V_curr_dyn_buckets ; i++)
1639 			IPFW_BUCK_LOCK_DESTROY(&V_ipfw_dyn_v[i]);
1640 		free(V_ipfw_dyn_v, M_IPFW);
1641 		V_ipfw_dyn_v = NULL;
1642 	}
1643 
1644         uma_zdestroy(V_ipfw_dyn_rule_zone);
1645 }
1646 
1647 #ifdef SYSCTL_NODE
1648 /*
1649  * Get/set maximum number of dynamic states in given VNET instance.
1650  */
1651 static int
1652 sysctl_ipfw_dyn_max(SYSCTL_HANDLER_ARGS)
1653 {
1654 	int error;
1655 	unsigned int nstates;
1656 
1657 	nstates = V_dyn_max;
1658 
1659 	error = sysctl_handle_int(oidp, &nstates, 0, req);
1660 	/* Read operation or some error */
1661 	if ((error != 0) || (req->newptr == NULL))
1662 		return (error);
1663 
1664 	V_dyn_max = nstates;
1665 	uma_zone_set_max(V_ipfw_dyn_rule_zone, V_dyn_max);
1666 
1667 	return (0);
1668 }
1669 
1670 /*
1671  * Get current number of dynamic states in given VNET instance.
1672  */
1673 static int
1674 sysctl_ipfw_dyn_count(SYSCTL_HANDLER_ARGS)
1675 {
1676 	int error;
1677 	unsigned int nstates;
1678 
1679 	nstates = DYN_COUNT;
1680 
1681 	error = sysctl_handle_int(oidp, &nstates, 0, req);
1682 
1683 	return (error);
1684 }
1685 #endif
1686 
1687 /*
1688  * Returns size of dynamic states in legacy format
1689  */
1690 int
1691 ipfw_dyn_len(void)
1692 {
1693 
1694 	return (V_ipfw_dyn_v == NULL) ? 0 :
1695 		(DYN_COUNT * sizeof(ipfw_dyn_rule));
1696 }
1697 
1698 /*
1699  * Returns number of dynamic states.
1700  * Used by dump format v1 (current).
1701  */
1702 int
1703 ipfw_dyn_get_count(void)
1704 {
1705 
1706 	return (V_ipfw_dyn_v == NULL) ? 0 : DYN_COUNT;
1707 }
1708 
1709 static void
1710 export_dyn_rule(ipfw_dyn_rule *src, ipfw_dyn_rule *dst)
1711 {
1712 
1713 	memcpy(dst, src, sizeof(*src));
1714 	memcpy(&(dst->rule), &(src->rule->rulenum), sizeof(src->rule->rulenum));
1715 	/*
1716 	 * store set number into high word of
1717 	 * dst->rule pointer.
1718 	 */
1719 	memcpy((char *)&dst->rule + sizeof(src->rule->rulenum),
1720 	    &(src->rule->set), sizeof(src->rule->set));
1721 	/*
1722 	 * store a non-null value in "next".
1723 	 * The userland code will interpret a
1724 	 * NULL here as a marker
1725 	 * for the last dynamic rule.
1726 	 */
1727 	memcpy(&dst->next, &dst, sizeof(dst));
1728 	dst->expire =
1729 	    TIME_LEQ(dst->expire, time_uptime) ?  0 : dst->expire - time_uptime;
1730 }
1731 
1732 /*
1733  * Fills int buffer given by @sd with dynamic states.
1734  * Used by dump format v1 (current).
1735  *
1736  * Returns 0 on success.
1737  */
1738 int
1739 ipfw_dump_states(struct ip_fw_chain *chain, struct sockopt_data *sd)
1740 {
1741 	ipfw_dyn_rule *p;
1742 	ipfw_obj_dyntlv *dst, *last;
1743 	ipfw_obj_ctlv *ctlv;
1744 	int i;
1745 	size_t sz;
1746 
1747 	if (V_ipfw_dyn_v == NULL)
1748 		return (0);
1749 
1750 	IPFW_UH_RLOCK_ASSERT(chain);
1751 
1752 	ctlv = (ipfw_obj_ctlv *)ipfw_get_sopt_space(sd, sizeof(*ctlv));
1753 	if (ctlv == NULL)
1754 		return (ENOMEM);
1755 	sz = sizeof(ipfw_obj_dyntlv);
1756 	ctlv->head.type = IPFW_TLV_DYNSTATE_LIST;
1757 	ctlv->objsize = sz;
1758 	last = NULL;
1759 
1760 	for (i = 0 ; i < V_curr_dyn_buckets; i++) {
1761 		IPFW_BUCK_LOCK(i);
1762 		for (p = V_ipfw_dyn_v[i].head ; p != NULL; p = p->next) {
1763 			dst = (ipfw_obj_dyntlv *)ipfw_get_sopt_space(sd, sz);
1764 			if (dst == NULL) {
1765 				IPFW_BUCK_UNLOCK(i);
1766 				return (ENOMEM);
1767 			}
1768 
1769 			export_dyn_rule(p, &dst->state);
1770 			dst->head.length = sz;
1771 			dst->head.type = IPFW_TLV_DYN_ENT;
1772 			last = dst;
1773 		}
1774 		IPFW_BUCK_UNLOCK(i);
1775 	}
1776 
1777 	if (last != NULL) /* mark last dynamic rule */
1778 		last->head.flags = IPFW_DF_LAST;
1779 
1780 	return (0);
1781 }
1782 
1783 /*
1784  * Fill given buffer with dynamic states (legacy format).
1785  * IPFW_UH_RLOCK has to be held while calling.
1786  */
1787 void
1788 ipfw_get_dynamic(struct ip_fw_chain *chain, char **pbp, const char *ep)
1789 {
1790 	ipfw_dyn_rule *p, *last = NULL;
1791 	char *bp;
1792 	int i;
1793 
1794 	if (V_ipfw_dyn_v == NULL)
1795 		return;
1796 	bp = *pbp;
1797 
1798 	IPFW_UH_RLOCK_ASSERT(chain);
1799 
1800 	for (i = 0 ; i < V_curr_dyn_buckets; i++) {
1801 		IPFW_BUCK_LOCK(i);
1802 		for (p = V_ipfw_dyn_v[i].head ; p != NULL; p = p->next) {
1803 			if (bp + sizeof *p <= ep) {
1804 				ipfw_dyn_rule *dst =
1805 					(ipfw_dyn_rule *)bp;
1806 
1807 				export_dyn_rule(p, dst);
1808 				last = dst;
1809 				bp += sizeof(ipfw_dyn_rule);
1810 			}
1811 		}
1812 		IPFW_BUCK_UNLOCK(i);
1813 	}
1814 
1815 	if (last != NULL) /* mark last dynamic rule */
1816 		bzero(&last->next, sizeof(last));
1817 	*pbp = bp;
1818 }
1819 /* end of file */
1820