xref: /freebsd/sys/netpfil/ipfw/ip_fw_dynamic.c (revision 1c05a6ea6b849ff95e539c31adea887c644a6a01)
1 /*-
2  * Copyright (c) 2002 Luigi Rizzo, Universita` di Pisa
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23  * SUCH DAMAGE.
24  */
25 
26 #include <sys/cdefs.h>
27 __FBSDID("$FreeBSD$");
28 
29 #define        DEB(x)
30 #define        DDB(x) x
31 
32 /*
33  * Dynamic rule support for ipfw
34  */
35 
36 #include "opt_ipfw.h"
37 #include "opt_inet.h"
38 #ifndef INET
39 #error IPFIREWALL requires INET.
40 #endif /* INET */
41 #include "opt_inet6.h"
42 
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/malloc.h>
46 #include <sys/mbuf.h>
47 #include <sys/kernel.h>
48 #include <sys/ktr.h>
49 #include <sys/lock.h>
50 #include <sys/rmlock.h>
51 #include <sys/socket.h>
52 #include <sys/sysctl.h>
53 #include <sys/syslog.h>
54 #include <net/ethernet.h> /* for ETHERTYPE_IP */
55 #include <net/if.h>
56 #include <net/if_var.h>
57 #include <net/pfil.h>
58 #include <net/vnet.h>
59 
60 #include <netinet/in.h>
61 #include <netinet/ip.h>
62 #include <netinet/ip_var.h>	/* ip_defttl */
63 #include <netinet/ip_fw.h>
64 #include <netinet/tcp_var.h>
65 #include <netinet/udp.h>
66 
67 #include <netinet/ip6.h>	/* IN6_ARE_ADDR_EQUAL */
68 #ifdef INET6
69 #include <netinet6/in6_var.h>
70 #include <netinet6/ip6_var.h>
71 #endif
72 
73 #include <netpfil/ipfw/ip_fw_private.h>
74 
75 #include <machine/in_cksum.h>	/* XXX for in_cksum */
76 
77 #ifdef MAC
78 #include <security/mac/mac_framework.h>
79 #endif
80 
81 /*
82  * Description of dynamic rules.
83  *
84  * Dynamic rules are stored in lists accessed through a hash table
85  * (ipfw_dyn_v) whose size is curr_dyn_buckets. This value can
86  * be modified through the sysctl variable dyn_buckets which is
87  * updated when the table becomes empty.
88  *
89  * XXX currently there is only one list, ipfw_dyn.
90  *
91  * When a packet is received, its address fields are first masked
92  * with the mask defined for the rule, then hashed, then matched
93  * against the entries in the corresponding list.
94  * Dynamic rules can be used for different purposes:
95  *  + stateful rules;
96  *  + enforcing limits on the number of sessions;
97  *  + in-kernel NAT (not implemented yet)
98  *
99  * The lifetime of dynamic rules is regulated by dyn_*_lifetime,
100  * measured in seconds and depending on the flags.
101  *
102  * The total number of dynamic rules is equal to UMA zone items count.
103  * The max number of dynamic rules is dyn_max. When we reach
104  * the maximum number of rules we do not create anymore. This is
105  * done to avoid consuming too much memory, but also too much
106  * time when searching on each packet (ideally, we should try instead
107  * to put a limit on the length of the list on each bucket...).
108  *
109  * Each dynamic rule holds a pointer to the parent ipfw rule so
110  * we know what action to perform. Dynamic rules are removed when
111  * the parent rule is deleted. This can be changed by dyn_keep_states
112  * sysctl.
113  *
114  * There are some limitations with dynamic rules -- we do not
115  * obey the 'randomized match', and we do not do multiple
116  * passes through the firewall. XXX check the latter!!!
117  */
118 
119 struct ipfw_dyn_bucket {
120 	struct mtx	mtx;		/* Bucket protecting lock */
121 	ipfw_dyn_rule	*head;		/* Pointer to first rule */
122 };
123 
124 /*
125  * Static variables followed by global ones
126  */
127 static VNET_DEFINE(struct ipfw_dyn_bucket *, ipfw_dyn_v);
128 static VNET_DEFINE(u_int32_t, dyn_buckets_max);
129 static VNET_DEFINE(u_int32_t, curr_dyn_buckets);
130 static VNET_DEFINE(struct callout, ipfw_timeout);
131 #define	V_ipfw_dyn_v			VNET(ipfw_dyn_v)
132 #define	V_dyn_buckets_max		VNET(dyn_buckets_max)
133 #define	V_curr_dyn_buckets		VNET(curr_dyn_buckets)
134 #define V_ipfw_timeout                  VNET(ipfw_timeout)
135 
136 static VNET_DEFINE(uma_zone_t, ipfw_dyn_rule_zone);
137 #define	V_ipfw_dyn_rule_zone		VNET(ipfw_dyn_rule_zone)
138 
139 #define	IPFW_BUCK_LOCK_INIT(b)	\
140 	mtx_init(&(b)->mtx, "IPFW dynamic bucket", NULL, MTX_DEF)
141 #define	IPFW_BUCK_LOCK_DESTROY(b)	\
142 	mtx_destroy(&(b)->mtx)
143 #define	IPFW_BUCK_LOCK(i)	mtx_lock(&V_ipfw_dyn_v[(i)].mtx)
144 #define	IPFW_BUCK_UNLOCK(i)	mtx_unlock(&V_ipfw_dyn_v[(i)].mtx)
145 #define	IPFW_BUCK_ASSERT(i)	mtx_assert(&V_ipfw_dyn_v[(i)].mtx, MA_OWNED)
146 
147 
148 static VNET_DEFINE(int, dyn_keep_states);
149 #define	V_dyn_keep_states		VNET(dyn_keep_states)
150 
151 /*
152  * Timeouts for various events in handing dynamic rules.
153  */
154 static VNET_DEFINE(u_int32_t, dyn_ack_lifetime);
155 static VNET_DEFINE(u_int32_t, dyn_syn_lifetime);
156 static VNET_DEFINE(u_int32_t, dyn_fin_lifetime);
157 static VNET_DEFINE(u_int32_t, dyn_rst_lifetime);
158 static VNET_DEFINE(u_int32_t, dyn_udp_lifetime);
159 static VNET_DEFINE(u_int32_t, dyn_short_lifetime);
160 
161 #define	V_dyn_ack_lifetime		VNET(dyn_ack_lifetime)
162 #define	V_dyn_syn_lifetime		VNET(dyn_syn_lifetime)
163 #define	V_dyn_fin_lifetime		VNET(dyn_fin_lifetime)
164 #define	V_dyn_rst_lifetime		VNET(dyn_rst_lifetime)
165 #define	V_dyn_udp_lifetime		VNET(dyn_udp_lifetime)
166 #define	V_dyn_short_lifetime		VNET(dyn_short_lifetime)
167 
168 /*
169  * Keepalives are sent if dyn_keepalive is set. They are sent every
170  * dyn_keepalive_period seconds, in the last dyn_keepalive_interval
171  * seconds of lifetime of a rule.
172  * dyn_rst_lifetime and dyn_fin_lifetime should be strictly lower
173  * than dyn_keepalive_period.
174  */
175 
176 static VNET_DEFINE(u_int32_t, dyn_keepalive_interval);
177 static VNET_DEFINE(u_int32_t, dyn_keepalive_period);
178 static VNET_DEFINE(u_int32_t, dyn_keepalive);
179 static VNET_DEFINE(time_t, dyn_keepalive_last);
180 
181 #define	V_dyn_keepalive_interval	VNET(dyn_keepalive_interval)
182 #define	V_dyn_keepalive_period		VNET(dyn_keepalive_period)
183 #define	V_dyn_keepalive			VNET(dyn_keepalive)
184 #define	V_dyn_keepalive_last		VNET(dyn_keepalive_last)
185 
186 static VNET_DEFINE(u_int32_t, dyn_max);		/* max # of dynamic rules */
187 
188 #define	DYN_COUNT			uma_zone_get_cur(V_ipfw_dyn_rule_zone)
189 #define	V_dyn_max			VNET(dyn_max)
190 
191 /* for userspace, we emulate the uma_zone_counter with ipfw_dyn_count */
192 static int ipfw_dyn_count;	/* number of objects */
193 
194 #ifdef USERSPACE /* emulation of UMA object counters for userspace */
195 #define uma_zone_get_cur(x)	ipfw_dyn_count
196 #endif /* USERSPACE */
197 
198 static int last_log;	/* Log ratelimiting */
199 
200 static void ipfw_dyn_tick(void *vnetx);
201 static void check_dyn_rules(struct ip_fw_chain *, ipfw_range_tlv *, int, int);
202 #ifdef SYSCTL_NODE
203 
204 static int sysctl_ipfw_dyn_count(SYSCTL_HANDLER_ARGS);
205 static int sysctl_ipfw_dyn_max(SYSCTL_HANDLER_ARGS);
206 
207 SYSBEGIN(f2)
208 
209 SYSCTL_DECL(_net_inet_ip_fw);
210 SYSCTL_UINT(_net_inet_ip_fw, OID_AUTO, dyn_buckets,
211     CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(dyn_buckets_max), 0,
212     "Max number of dyn. buckets");
213 SYSCTL_UINT(_net_inet_ip_fw, OID_AUTO, curr_dyn_buckets,
214     CTLFLAG_VNET | CTLFLAG_RD, &VNET_NAME(curr_dyn_buckets), 0,
215     "Current Number of dyn. buckets");
216 SYSCTL_PROC(_net_inet_ip_fw, OID_AUTO, dyn_count,
217     CTLFLAG_VNET | CTLTYPE_UINT | CTLFLAG_RD, 0, 0, sysctl_ipfw_dyn_count, "IU",
218     "Number of dyn. rules");
219 SYSCTL_PROC(_net_inet_ip_fw, OID_AUTO, dyn_max,
220     CTLFLAG_VNET | CTLTYPE_UINT | CTLFLAG_RW, 0, 0, sysctl_ipfw_dyn_max, "IU",
221     "Max number of dyn. rules");
222 SYSCTL_UINT(_net_inet_ip_fw, OID_AUTO, dyn_ack_lifetime,
223     CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(dyn_ack_lifetime), 0,
224     "Lifetime of dyn. rules for acks");
225 SYSCTL_UINT(_net_inet_ip_fw, OID_AUTO, dyn_syn_lifetime,
226     CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(dyn_syn_lifetime), 0,
227     "Lifetime of dyn. rules for syn");
228 SYSCTL_UINT(_net_inet_ip_fw, OID_AUTO, dyn_fin_lifetime,
229     CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(dyn_fin_lifetime), 0,
230     "Lifetime of dyn. rules for fin");
231 SYSCTL_UINT(_net_inet_ip_fw, OID_AUTO, dyn_rst_lifetime,
232     CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(dyn_rst_lifetime), 0,
233     "Lifetime of dyn. rules for rst");
234 SYSCTL_UINT(_net_inet_ip_fw, OID_AUTO, dyn_udp_lifetime,
235     CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(dyn_udp_lifetime), 0,
236     "Lifetime of dyn. rules for UDP");
237 SYSCTL_UINT(_net_inet_ip_fw, OID_AUTO, dyn_short_lifetime,
238     CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(dyn_short_lifetime), 0,
239     "Lifetime of dyn. rules for other situations");
240 SYSCTL_UINT(_net_inet_ip_fw, OID_AUTO, dyn_keepalive,
241     CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(dyn_keepalive), 0,
242     "Enable keepalives for dyn. rules");
243 SYSCTL_UINT(_net_inet_ip_fw, OID_AUTO, dyn_keep_states,
244     CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(dyn_keep_states), 0,
245     "Do not flush dynamic states on rule deletion");
246 
247 SYSEND
248 
249 #endif /* SYSCTL_NODE */
250 
251 
252 #ifdef INET6
253 static __inline int
254 hash_packet6(struct ipfw_flow_id *id)
255 {
256 	u_int32_t i;
257 	i = (id->dst_ip6.__u6_addr.__u6_addr32[2]) ^
258 	    (id->dst_ip6.__u6_addr.__u6_addr32[3]) ^
259 	    (id->src_ip6.__u6_addr.__u6_addr32[2]) ^
260 	    (id->src_ip6.__u6_addr.__u6_addr32[3]);
261 	return ntohl(i);
262 }
263 #endif
264 
265 /*
266  * IMPORTANT: the hash function for dynamic rules must be commutative
267  * in source and destination (ip,port), because rules are bidirectional
268  * and we want to find both in the same bucket.
269  */
270 static __inline int
271 hash_packet(struct ipfw_flow_id *id, int buckets)
272 {
273 	u_int32_t i;
274 
275 #ifdef INET6
276 	if (IS_IP6_FLOW_ID(id))
277 		i = hash_packet6(id);
278 	else
279 #endif /* INET6 */
280 	i = (id->dst_ip) ^ (id->src_ip);
281 	i ^= (id->dst_port) ^ (id->src_port);
282 	return (i & (buckets - 1));
283 }
284 
285 #if 0
286 #define	DYN_DEBUG(fmt, ...)	do {			\
287 	printf("%s: " fmt "\n", __func__, __VA_ARGS__);	\
288 } while (0)
289 #else
290 #define	DYN_DEBUG(fmt, ...)
291 #endif
292 
293 static char *default_state_name = "default";
294 struct dyn_state_obj {
295 	struct named_object	no;
296 	char			name[64];
297 };
298 
299 #define	DYN_STATE_OBJ(ch, cmd)	\
300     ((struct dyn_state_obj *)SRV_OBJECT(ch, (cmd)->arg1))
301 /*
302  * Classifier callback.
303  * Return 0 if opcode contains object that should be referenced
304  * or rewritten.
305  */
306 static int
307 dyn_classify(ipfw_insn *cmd, uint16_t *puidx, uint8_t *ptype)
308 {
309 
310 	DYN_DEBUG("opcode %d, arg1 %d", cmd->opcode, cmd->arg1);
311 	/* Don't rewrite "check-state any" */
312 	if (cmd->arg1 == 0 &&
313 	    cmd->opcode == O_CHECK_STATE)
314 		return (1);
315 
316 	*puidx = cmd->arg1;
317 	*ptype = 0;
318 	return (0);
319 }
320 
321 static void
322 dyn_update(ipfw_insn *cmd, uint16_t idx)
323 {
324 
325 	cmd->arg1 = idx;
326 	DYN_DEBUG("opcode %d, arg1 %d", cmd->opcode, cmd->arg1);
327 }
328 
329 static int
330 dyn_findbyname(struct ip_fw_chain *ch, struct tid_info *ti,
331     struct named_object **pno)
332 {
333 	ipfw_obj_ntlv *ntlv;
334 	const char *name;
335 
336 	DYN_DEBUG("uidx %d", ti->uidx);
337 	if (ti->uidx != 0) {
338 		if (ti->tlvs == NULL)
339 			return (EINVAL);
340 		/* Search ntlv in the buffer provided by user */
341 		ntlv = ipfw_find_name_tlv_type(ti->tlvs, ti->tlen, ti->uidx,
342 		    IPFW_TLV_STATE_NAME);
343 		if (ntlv == NULL)
344 			return (EINVAL);
345 		name = ntlv->name;
346 	} else
347 		name = default_state_name;
348 	/*
349 	 * Search named object with corresponding name.
350 	 * Since states objects are global - ignore the set value
351 	 * and use zero instead.
352 	 */
353 	*pno = ipfw_objhash_lookup_name_type(CHAIN_TO_SRV(ch), 0,
354 	    IPFW_TLV_STATE_NAME, name);
355 	/*
356 	 * We always return success here.
357 	 * The caller will check *pno and mark object as unresolved,
358 	 * then it will automatically create "default" object.
359 	 */
360 	return (0);
361 }
362 
363 static struct named_object *
364 dyn_findbykidx(struct ip_fw_chain *ch, uint16_t idx)
365 {
366 
367 	DYN_DEBUG("kidx %d", idx);
368 	return (ipfw_objhash_lookup_kidx(CHAIN_TO_SRV(ch), idx));
369 }
370 
371 static int
372 dyn_create(struct ip_fw_chain *ch, struct tid_info *ti,
373     uint16_t *pkidx)
374 {
375 	struct namedobj_instance *ni;
376 	struct dyn_state_obj *obj;
377 	struct named_object *no;
378 	ipfw_obj_ntlv *ntlv;
379 	char *name;
380 
381 	DYN_DEBUG("uidx %d", ti->uidx);
382 	if (ti->uidx != 0) {
383 		if (ti->tlvs == NULL)
384 			return (EINVAL);
385 		ntlv = ipfw_find_name_tlv_type(ti->tlvs, ti->tlen, ti->uidx,
386 		    IPFW_TLV_STATE_NAME);
387 		if (ntlv == NULL)
388 			return (EINVAL);
389 		name = ntlv->name;
390 	} else
391 		name = default_state_name;
392 
393 	ni = CHAIN_TO_SRV(ch);
394 	obj = malloc(sizeof(*obj), M_IPFW, M_WAITOK | M_ZERO);
395 	obj->no.name = obj->name;
396 	obj->no.etlv = IPFW_TLV_STATE_NAME;
397 	strlcpy(obj->name, name, sizeof(obj->name));
398 
399 	IPFW_UH_WLOCK(ch);
400 	no = ipfw_objhash_lookup_name_type(ni, 0,
401 	    IPFW_TLV_STATE_NAME, name);
402 	if (no != NULL) {
403 		/*
404 		 * Object is already created.
405 		 * Just return its kidx and bump refcount.
406 		 */
407 		*pkidx = no->kidx;
408 		no->refcnt++;
409 		IPFW_UH_WUNLOCK(ch);
410 		free(obj, M_IPFW);
411 		DYN_DEBUG("\tfound kidx %d", *pkidx);
412 		return (0);
413 	}
414 	if (ipfw_objhash_alloc_idx(ni, &obj->no.kidx) != 0) {
415 		DYN_DEBUG("\talloc_idx failed for %s", name);
416 		IPFW_UH_WUNLOCK(ch);
417 		free(obj, M_IPFW);
418 		return (ENOSPC);
419 	}
420 	ipfw_objhash_add(ni, &obj->no);
421 	SRV_OBJECT(ch, obj->no.kidx) = obj;
422 	obj->no.refcnt++;
423 	*pkidx = obj->no.kidx;
424 	IPFW_UH_WUNLOCK(ch);
425 	DYN_DEBUG("\tcreated kidx %d", *pkidx);
426 	return (0);
427 }
428 
429 static void
430 dyn_destroy(struct ip_fw_chain *ch, struct named_object *no)
431 {
432 	struct dyn_state_obj *obj;
433 
434 	IPFW_UH_WLOCK_ASSERT(ch);
435 
436 	KASSERT(no->refcnt == 1,
437 	    ("Destroying object '%s' (type %u, idx %u) with refcnt %u",
438 	    no->name, no->etlv, no->kidx, no->refcnt));
439 
440 	DYN_DEBUG("kidx %d", no->kidx);
441 	obj = SRV_OBJECT(ch, no->kidx);
442 	SRV_OBJECT(ch, no->kidx) = NULL;
443 	ipfw_objhash_del(CHAIN_TO_SRV(ch), no);
444 	ipfw_objhash_free_idx(CHAIN_TO_SRV(ch), no->kidx);
445 
446 	free(obj, M_IPFW);
447 }
448 
449 static struct opcode_obj_rewrite dyn_opcodes[] = {
450 	{
451 		O_KEEP_STATE, IPFW_TLV_STATE_NAME,
452 		dyn_classify, dyn_update,
453 		dyn_findbyname, dyn_findbykidx,
454 		dyn_create, dyn_destroy
455 	},
456 	{
457 		O_CHECK_STATE, IPFW_TLV_STATE_NAME,
458 		dyn_classify, dyn_update,
459 		dyn_findbyname, dyn_findbykidx,
460 		dyn_create, dyn_destroy
461 	},
462 	{
463 		O_PROBE_STATE, IPFW_TLV_STATE_NAME,
464 		dyn_classify, dyn_update,
465 		dyn_findbyname, dyn_findbykidx,
466 		dyn_create, dyn_destroy
467 	},
468 	{
469 		O_LIMIT, IPFW_TLV_STATE_NAME,
470 		dyn_classify, dyn_update,
471 		dyn_findbyname, dyn_findbykidx,
472 		dyn_create, dyn_destroy
473 	},
474 };
475 /**
476  * Print customizable flow id description via log(9) facility.
477  */
478 static void
479 print_dyn_rule_flags(struct ipfw_flow_id *id, int dyn_type, int log_flags,
480     char *prefix, char *postfix)
481 {
482 	struct in_addr da;
483 #ifdef INET6
484 	char src[INET6_ADDRSTRLEN], dst[INET6_ADDRSTRLEN];
485 #else
486 	char src[INET_ADDRSTRLEN], dst[INET_ADDRSTRLEN];
487 #endif
488 
489 #ifdef INET6
490 	if (IS_IP6_FLOW_ID(id)) {
491 		ip6_sprintf(src, &id->src_ip6);
492 		ip6_sprintf(dst, &id->dst_ip6);
493 	} else
494 #endif
495 	{
496 		da.s_addr = htonl(id->src_ip);
497 		inet_ntop(AF_INET, &da, src, sizeof(src));
498 		da.s_addr = htonl(id->dst_ip);
499 		inet_ntop(AF_INET, &da, dst, sizeof(dst));
500 	}
501 	log(log_flags, "ipfw: %s type %d %s %d -> %s %d, %d %s\n",
502 	    prefix, dyn_type, src, id->src_port, dst,
503 	    id->dst_port, DYN_COUNT, postfix);
504 }
505 
506 #define	print_dyn_rule(id, dtype, prefix, postfix)	\
507 	print_dyn_rule_flags(id, dtype, LOG_DEBUG, prefix, postfix)
508 
509 #define TIME_LEQ(a,b)       ((int)((a)-(b)) <= 0)
510 #define TIME_LE(a,b)       ((int)((a)-(b)) < 0)
511 
512 static void
513 dyn_update_proto_state(ipfw_dyn_rule *q, const struct ipfw_flow_id *id,
514     const struct tcphdr *tcp, int dir)
515 {
516 	uint32_t ack;
517 	u_char flags;
518 
519 	if (id->proto == IPPROTO_TCP) {
520 		flags = id->_flags & (TH_FIN | TH_SYN | TH_RST);
521 #define BOTH_SYN	(TH_SYN | (TH_SYN << 8))
522 #define BOTH_FIN	(TH_FIN | (TH_FIN << 8))
523 #define	TCP_FLAGS	(TH_FLAGS | (TH_FLAGS << 8))
524 #define	ACK_FWD		0x10000			/* fwd ack seen */
525 #define	ACK_REV		0x20000			/* rev ack seen */
526 
527 		q->state |= (dir == MATCH_FORWARD) ? flags : (flags << 8);
528 		switch (q->state & TCP_FLAGS) {
529 		case TH_SYN:			/* opening */
530 			q->expire = time_uptime + V_dyn_syn_lifetime;
531 			break;
532 
533 		case BOTH_SYN:			/* move to established */
534 		case BOTH_SYN | TH_FIN:		/* one side tries to close */
535 		case BOTH_SYN | (TH_FIN << 8):
536 #define _SEQ_GE(a,b) ((int)(a) - (int)(b) >= 0)
537 			if (tcp == NULL)
538 				break;
539 
540 			ack = ntohl(tcp->th_ack);
541 			if (dir == MATCH_FORWARD) {
542 				if (q->ack_fwd == 0 ||
543 				    _SEQ_GE(ack, q->ack_fwd)) {
544 					q->ack_fwd = ack;
545 					q->state |= ACK_FWD;
546 				}
547 			} else {
548 				if (q->ack_rev == 0 ||
549 				    _SEQ_GE(ack, q->ack_rev)) {
550 					q->ack_rev = ack;
551 					q->state |= ACK_REV;
552 				}
553 			}
554 			if ((q->state & (ACK_FWD | ACK_REV)) ==
555 			    (ACK_FWD | ACK_REV)) {
556 				q->expire = time_uptime + V_dyn_ack_lifetime;
557 				q->state &= ~(ACK_FWD | ACK_REV);
558 			}
559 			break;
560 
561 		case BOTH_SYN | BOTH_FIN:	/* both sides closed */
562 			if (V_dyn_fin_lifetime >= V_dyn_keepalive_period)
563 				V_dyn_fin_lifetime =
564 				    V_dyn_keepalive_period - 1;
565 			q->expire = time_uptime + V_dyn_fin_lifetime;
566 			break;
567 
568 		default:
569 #if 0
570 			/*
571 			 * reset or some invalid combination, but can also
572 			 * occur if we use keep-state the wrong way.
573 			 */
574 			if ( (q->state & ((TH_RST << 8)|TH_RST)) == 0)
575 				printf("invalid state: 0x%x\n", q->state);
576 #endif
577 			if (V_dyn_rst_lifetime >= V_dyn_keepalive_period)
578 				V_dyn_rst_lifetime =
579 				    V_dyn_keepalive_period - 1;
580 			q->expire = time_uptime + V_dyn_rst_lifetime;
581 			break;
582 		}
583 	} else if (id->proto == IPPROTO_UDP) {
584 		q->expire = time_uptime + V_dyn_udp_lifetime;
585 	} else {
586 		/* other protocols */
587 		q->expire = time_uptime + V_dyn_short_lifetime;
588 	}
589 }
590 
591 /*
592  * Lookup a dynamic rule, locked version.
593  */
594 static ipfw_dyn_rule *
595 lookup_dyn_rule_locked(struct ipfw_flow_id *pkt, int i, int *match_direction,
596     struct tcphdr *tcp, uint16_t kidx)
597 {
598 	/*
599 	 * Stateful ipfw extensions.
600 	 * Lookup into dynamic session queue.
601 	 */
602 	ipfw_dyn_rule *prev, *q = NULL;
603 	int dir;
604 
605 	IPFW_BUCK_ASSERT(i);
606 
607 	dir = MATCH_NONE;
608 	for (prev = NULL, q = V_ipfw_dyn_v[i].head; q; prev = q, q = q->next) {
609 		if (q->dyn_type == O_LIMIT_PARENT)
610 			continue;
611 
612 		if (pkt->proto != q->id.proto)
613 			continue;
614 
615 		if (kidx != 0 && kidx != q->kidx)
616 			continue;
617 
618 		if (IS_IP6_FLOW_ID(pkt)) {
619 			if (IN6_ARE_ADDR_EQUAL(&pkt->src_ip6, &q->id.src_ip6) &&
620 			    IN6_ARE_ADDR_EQUAL(&pkt->dst_ip6, &q->id.dst_ip6) &&
621 			    pkt->src_port == q->id.src_port &&
622 			    pkt->dst_port == q->id.dst_port) {
623 				dir = MATCH_FORWARD;
624 				break;
625 			}
626 			if (IN6_ARE_ADDR_EQUAL(&pkt->src_ip6, &q->id.dst_ip6) &&
627 			    IN6_ARE_ADDR_EQUAL(&pkt->dst_ip6, &q->id.src_ip6) &&
628 			    pkt->src_port == q->id.dst_port &&
629 			    pkt->dst_port == q->id.src_port) {
630 				dir = MATCH_REVERSE;
631 				break;
632 			}
633 		} else {
634 			if (pkt->src_ip == q->id.src_ip &&
635 			    pkt->dst_ip == q->id.dst_ip &&
636 			    pkt->src_port == q->id.src_port &&
637 			    pkt->dst_port == q->id.dst_port) {
638 				dir = MATCH_FORWARD;
639 				break;
640 			}
641 			if (pkt->src_ip == q->id.dst_ip &&
642 			    pkt->dst_ip == q->id.src_ip &&
643 			    pkt->src_port == q->id.dst_port &&
644 			    pkt->dst_port == q->id.src_port) {
645 				dir = MATCH_REVERSE;
646 				break;
647 			}
648 		}
649 	}
650 	if (q == NULL)
651 		goto done;	/* q = NULL, not found */
652 
653 	if (prev != NULL) {	/* found and not in front */
654 		prev->next = q->next;
655 		q->next = V_ipfw_dyn_v[i].head;
656 		V_ipfw_dyn_v[i].head = q;
657 	}
658 
659 	/* update state according to flags */
660 	dyn_update_proto_state(q, pkt, tcp, dir);
661 done:
662 	if (match_direction != NULL)
663 		*match_direction = dir;
664 	return (q);
665 }
666 
667 ipfw_dyn_rule *
668 ipfw_lookup_dyn_rule(struct ipfw_flow_id *pkt, int *match_direction,
669     struct tcphdr *tcp, uint16_t kidx)
670 {
671 	ipfw_dyn_rule *q;
672 	int i;
673 
674 	i = hash_packet(pkt, V_curr_dyn_buckets);
675 
676 	IPFW_BUCK_LOCK(i);
677 	q = lookup_dyn_rule_locked(pkt, i, match_direction, tcp, kidx);
678 	if (q == NULL)
679 		IPFW_BUCK_UNLOCK(i);
680 	/* NB: return table locked when q is not NULL */
681 	return q;
682 }
683 
684 /*
685  * Unlock bucket mtx
686  * @p - pointer to dynamic rule
687  */
688 void
689 ipfw_dyn_unlock(ipfw_dyn_rule *q)
690 {
691 
692 	IPFW_BUCK_UNLOCK(q->bucket);
693 }
694 
695 static int
696 resize_dynamic_table(struct ip_fw_chain *chain, int nbuckets)
697 {
698 	int i, k, nbuckets_old;
699 	ipfw_dyn_rule *q;
700 	struct ipfw_dyn_bucket *dyn_v, *dyn_v_old;
701 
702 	/* Check if given number is power of 2 and less than 64k */
703 	if ((nbuckets > 65536) || (!powerof2(nbuckets)))
704 		return 1;
705 
706 	CTR3(KTR_NET, "%s: resize dynamic hash: %d -> %d", __func__,
707 	    V_curr_dyn_buckets, nbuckets);
708 
709 	/* Allocate and initialize new hash */
710 	dyn_v = malloc(nbuckets * sizeof(*dyn_v), M_IPFW,
711 	    M_WAITOK | M_ZERO);
712 
713 	for (i = 0 ; i < nbuckets; i++)
714 		IPFW_BUCK_LOCK_INIT(&dyn_v[i]);
715 
716 	/*
717 	 * Call upper half lock, as get_map() do to ease
718 	 * read-only access to dynamic rules hash from sysctl
719 	 */
720 	IPFW_UH_WLOCK(chain);
721 
722 	/*
723 	 * Acquire chain write lock to permit hash access
724 	 * for main traffic path without additional locks
725 	 */
726 	IPFW_WLOCK(chain);
727 
728 	/* Save old values */
729 	nbuckets_old = V_curr_dyn_buckets;
730 	dyn_v_old = V_ipfw_dyn_v;
731 
732 	/* Skip relinking if array is not set up */
733 	if (V_ipfw_dyn_v == NULL)
734 		V_curr_dyn_buckets = 0;
735 
736 	/* Re-link all dynamic states */
737 	for (i = 0 ; i < V_curr_dyn_buckets ; i++) {
738 		while (V_ipfw_dyn_v[i].head != NULL) {
739 			/* Remove from current chain */
740 			q = V_ipfw_dyn_v[i].head;
741 			V_ipfw_dyn_v[i].head = q->next;
742 
743 			/* Get new hash value */
744 			k = hash_packet(&q->id, nbuckets);
745 			q->bucket = k;
746 			/* Add to the new head */
747 			q->next = dyn_v[k].head;
748 			dyn_v[k].head = q;
749              }
750 	}
751 
752 	/* Update current pointers/buckets values */
753 	V_curr_dyn_buckets = nbuckets;
754 	V_ipfw_dyn_v = dyn_v;
755 
756 	IPFW_WUNLOCK(chain);
757 
758 	IPFW_UH_WUNLOCK(chain);
759 
760 	/* Start periodic callout on initial creation */
761 	if (dyn_v_old == NULL) {
762         	callout_reset_on(&V_ipfw_timeout, hz, ipfw_dyn_tick, curvnet, 0);
763 		return (0);
764 	}
765 
766 	/* Destroy all mutexes */
767 	for (i = 0 ; i < nbuckets_old ; i++)
768 		IPFW_BUCK_LOCK_DESTROY(&dyn_v_old[i]);
769 
770 	/* Free old hash */
771 	free(dyn_v_old, M_IPFW);
772 
773 	return 0;
774 }
775 
776 /**
777  * Install state of type 'type' for a dynamic session.
778  * The hash table contains two type of rules:
779  * - regular rules (O_KEEP_STATE)
780  * - rules for sessions with limited number of sess per user
781  *   (O_LIMIT). When they are created, the parent is
782  *   increased by 1, and decreased on delete. In this case,
783  *   the third parameter is the parent rule and not the chain.
784  * - "parent" rules for the above (O_LIMIT_PARENT).
785  */
786 static ipfw_dyn_rule *
787 add_dyn_rule(struct ipfw_flow_id *id, int i, uint8_t dyn_type,
788     struct ip_fw *rule, uint16_t kidx)
789 {
790 	ipfw_dyn_rule *r;
791 
792 	IPFW_BUCK_ASSERT(i);
793 
794 	r = uma_zalloc(V_ipfw_dyn_rule_zone, M_NOWAIT | M_ZERO);
795 	if (r == NULL) {
796 		if (last_log != time_uptime) {
797 			last_log = time_uptime;
798 			log(LOG_DEBUG,
799 			    "ipfw: Cannot allocate dynamic state, "
800 			    "consider increasing net.inet.ip.fw.dyn_max\n");
801 		}
802 		return NULL;
803 	}
804 	ipfw_dyn_count++;
805 
806 	/*
807 	 * refcount on parent is already incremented, so
808 	 * it is safe to use parent unlocked.
809 	 */
810 	if (dyn_type == O_LIMIT) {
811 		ipfw_dyn_rule *parent = (ipfw_dyn_rule *)rule;
812 		if ( parent->dyn_type != O_LIMIT_PARENT)
813 			panic("invalid parent");
814 		r->parent = parent;
815 		rule = parent->rule;
816 	}
817 
818 	r->id = *id;
819 	r->expire = time_uptime + V_dyn_syn_lifetime;
820 	r->rule = rule;
821 	r->dyn_type = dyn_type;
822 	IPFW_ZERO_DYN_COUNTER(r);
823 	r->count = 0;
824 	r->kidx = kidx;
825 	r->bucket = i;
826 	r->next = V_ipfw_dyn_v[i].head;
827 	V_ipfw_dyn_v[i].head = r;
828 	DEB(print_dyn_rule(id, dyn_type, "add dyn entry", "total");)
829 	return r;
830 }
831 
832 /**
833  * lookup dynamic parent rule using pkt and rule as search keys.
834  * If the lookup fails, then install one.
835  */
836 static ipfw_dyn_rule *
837 lookup_dyn_parent(struct ipfw_flow_id *pkt, int *pindex, struct ip_fw *rule,
838     uint16_t kidx)
839 {
840 	ipfw_dyn_rule *q;
841 	int i, is_v6;
842 
843 	is_v6 = IS_IP6_FLOW_ID(pkt);
844 	i = hash_packet( pkt, V_curr_dyn_buckets );
845 	*pindex = i;
846 	IPFW_BUCK_LOCK(i);
847 	for (q = V_ipfw_dyn_v[i].head ; q != NULL ; q=q->next)
848 		if (q->dyn_type == O_LIMIT_PARENT &&
849 		    kidx == q->kidx &&
850 		    rule == q->rule &&
851 		    pkt->proto == q->id.proto &&
852 		    pkt->src_port == q->id.src_port &&
853 		    pkt->dst_port == q->id.dst_port &&
854 		    (
855 			(is_v6 &&
856 			 IN6_ARE_ADDR_EQUAL(&(pkt->src_ip6),
857 				&(q->id.src_ip6)) &&
858 			 IN6_ARE_ADDR_EQUAL(&(pkt->dst_ip6),
859 				&(q->id.dst_ip6))) ||
860 			(!is_v6 &&
861 			 pkt->src_ip == q->id.src_ip &&
862 			 pkt->dst_ip == q->id.dst_ip)
863 		    )
864 		) {
865 			q->expire = time_uptime + V_dyn_short_lifetime;
866 			DEB(print_dyn_rule(pkt, q->dyn_type,
867 			    "lookup_dyn_parent found", "");)
868 			return q;
869 		}
870 
871 	/* Add virtual limiting rule */
872 	return add_dyn_rule(pkt, i, O_LIMIT_PARENT, rule, kidx);
873 }
874 
875 /**
876  * Install dynamic state for rule type cmd->o.opcode
877  *
878  * Returns 1 (failure) if state is not installed because of errors or because
879  * session limitations are enforced.
880  */
881 int
882 ipfw_install_state(struct ip_fw_chain *chain, struct ip_fw *rule,
883     ipfw_insn_limit *cmd, struct ip_fw_args *args, uint32_t tablearg)
884 {
885 	ipfw_dyn_rule *q;
886 	int i;
887 
888 	DEB(print_dyn_rule(&args->f_id, cmd->o.opcode, "install_state",
889 	    (cmd->o.arg1 == 0 ? "": DYN_STATE_OBJ(chain, &cmd->o)->name));)
890 
891 	i = hash_packet(&args->f_id, V_curr_dyn_buckets);
892 
893 	IPFW_BUCK_LOCK(i);
894 
895 	q = lookup_dyn_rule_locked(&args->f_id, i, NULL, NULL, cmd->o.arg1);
896 	if (q != NULL) {	/* should never occur */
897 		DEB(
898 		if (last_log != time_uptime) {
899 			last_log = time_uptime;
900 			printf("ipfw: %s: entry already present, done\n",
901 			    __func__);
902 		})
903 		IPFW_BUCK_UNLOCK(i);
904 		return (0);
905 	}
906 
907 	/*
908 	 * State limiting is done via uma(9) zone limiting.
909 	 * Save pointer to newly-installed rule and reject
910 	 * packet if add_dyn_rule() returned NULL.
911 	 * Note q is currently set to NULL.
912 	 */
913 
914 	switch (cmd->o.opcode) {
915 	case O_KEEP_STATE:	/* bidir rule */
916 		q = add_dyn_rule(&args->f_id, i, O_KEEP_STATE, rule,
917 		    cmd->o.arg1);
918 		break;
919 
920 	case O_LIMIT: {		/* limit number of sessions */
921 		struct ipfw_flow_id id;
922 		ipfw_dyn_rule *parent;
923 		uint32_t conn_limit;
924 		uint16_t limit_mask = cmd->limit_mask;
925 		int pindex;
926 
927 		conn_limit = IP_FW_ARG_TABLEARG(chain, cmd->conn_limit, limit);
928 
929 		DEB(
930 		if (cmd->conn_limit == IP_FW_TARG)
931 			printf("ipfw: %s: O_LIMIT rule, conn_limit: %u "
932 			    "(tablearg)\n", __func__, conn_limit);
933 		else
934 			printf("ipfw: %s: O_LIMIT rule, conn_limit: %u\n",
935 			    __func__, conn_limit);
936 		)
937 
938 		id.dst_ip = id.src_ip = id.dst_port = id.src_port = 0;
939 		id.proto = args->f_id.proto;
940 		id.addr_type = args->f_id.addr_type;
941 		id.fib = M_GETFIB(args->m);
942 
943 		if (IS_IP6_FLOW_ID (&(args->f_id))) {
944 			bzero(&id.src_ip6, sizeof(id.src_ip6));
945 			bzero(&id.dst_ip6, sizeof(id.dst_ip6));
946 
947 			if (limit_mask & DYN_SRC_ADDR)
948 				id.src_ip6 = args->f_id.src_ip6;
949 			if (limit_mask & DYN_DST_ADDR)
950 				id.dst_ip6 = args->f_id.dst_ip6;
951 		} else {
952 			if (limit_mask & DYN_SRC_ADDR)
953 				id.src_ip = args->f_id.src_ip;
954 			if (limit_mask & DYN_DST_ADDR)
955 				id.dst_ip = args->f_id.dst_ip;
956 		}
957 		if (limit_mask & DYN_SRC_PORT)
958 			id.src_port = args->f_id.src_port;
959 		if (limit_mask & DYN_DST_PORT)
960 			id.dst_port = args->f_id.dst_port;
961 
962 		/*
963 		 * We have to release lock for previous bucket to
964 		 * avoid possible deadlock
965 		 */
966 		IPFW_BUCK_UNLOCK(i);
967 
968 		parent = lookup_dyn_parent(&id, &pindex, rule, cmd->o.arg1);
969 		if (parent == NULL) {
970 			printf("ipfw: %s: add parent failed\n", __func__);
971 			IPFW_BUCK_UNLOCK(pindex);
972 			return (1);
973 		}
974 
975 		if (parent->count >= conn_limit) {
976 			if (V_fw_verbose && last_log != time_uptime) {
977 				char sbuf[24];
978 				last_log = time_uptime;
979 				snprintf(sbuf, sizeof(sbuf),
980 				    "%d drop session",
981 				    parent->rule->rulenum);
982 				print_dyn_rule_flags(&args->f_id,
983 				    cmd->o.opcode,
984 				    LOG_SECURITY | LOG_DEBUG,
985 				    sbuf, "too many entries");
986 			}
987 			IPFW_BUCK_UNLOCK(pindex);
988 			return (1);
989 		}
990 		/* Increment counter on parent */
991 		parent->count++;
992 		IPFW_BUCK_UNLOCK(pindex);
993 
994 		IPFW_BUCK_LOCK(i);
995 		q = add_dyn_rule(&args->f_id, i, O_LIMIT,
996 		    (struct ip_fw *)parent, cmd->o.arg1);
997 		if (q == NULL) {
998 			/* Decrement index and notify caller */
999 			IPFW_BUCK_UNLOCK(i);
1000 			IPFW_BUCK_LOCK(pindex);
1001 			parent->count--;
1002 			IPFW_BUCK_UNLOCK(pindex);
1003 			return (1);
1004 		}
1005 		break;
1006 	}
1007 	default:
1008 		printf("ipfw: %s: unknown dynamic rule type %u\n",
1009 		    __func__, cmd->o.opcode);
1010 	}
1011 
1012 	if (q == NULL) {
1013 		IPFW_BUCK_UNLOCK(i);
1014 		return (1);	/* Notify caller about failure */
1015 	}
1016 
1017 	dyn_update_proto_state(q, &args->f_id, NULL, MATCH_FORWARD);
1018 	IPFW_BUCK_UNLOCK(i);
1019 	return (0);
1020 }
1021 
1022 /*
1023  * Generate a TCP packet, containing either a RST or a keepalive.
1024  * When flags & TH_RST, we are sending a RST packet, because of a
1025  * "reset" action matched the packet.
1026  * Otherwise we are sending a keepalive, and flags & TH_
1027  * The 'replyto' mbuf is the mbuf being replied to, if any, and is required
1028  * so that MAC can label the reply appropriately.
1029  */
1030 struct mbuf *
1031 ipfw_send_pkt(struct mbuf *replyto, struct ipfw_flow_id *id, u_int32_t seq,
1032     u_int32_t ack, int flags)
1033 {
1034 	struct mbuf *m = NULL;		/* stupid compiler */
1035 	int len, dir;
1036 	struct ip *h = NULL;		/* stupid compiler */
1037 #ifdef INET6
1038 	struct ip6_hdr *h6 = NULL;
1039 #endif
1040 	struct tcphdr *th = NULL;
1041 
1042 	MGETHDR(m, M_NOWAIT, MT_DATA);
1043 	if (m == NULL)
1044 		return (NULL);
1045 
1046 	M_SETFIB(m, id->fib);
1047 #ifdef MAC
1048 	if (replyto != NULL)
1049 		mac_netinet_firewall_reply(replyto, m);
1050 	else
1051 		mac_netinet_firewall_send(m);
1052 #else
1053 	(void)replyto;		/* don't warn about unused arg */
1054 #endif
1055 
1056 	switch (id->addr_type) {
1057 	case 4:
1058 		len = sizeof(struct ip) + sizeof(struct tcphdr);
1059 		break;
1060 #ifdef INET6
1061 	case 6:
1062 		len = sizeof(struct ip6_hdr) + sizeof(struct tcphdr);
1063 		break;
1064 #endif
1065 	default:
1066 		/* XXX: log me?!? */
1067 		FREE_PKT(m);
1068 		return (NULL);
1069 	}
1070 	dir = ((flags & (TH_SYN | TH_RST)) == TH_SYN);
1071 
1072 	m->m_data += max_linkhdr;
1073 	m->m_flags |= M_SKIP_FIREWALL;
1074 	m->m_pkthdr.len = m->m_len = len;
1075 	m->m_pkthdr.rcvif = NULL;
1076 	bzero(m->m_data, len);
1077 
1078 	switch (id->addr_type) {
1079 	case 4:
1080 		h = mtod(m, struct ip *);
1081 
1082 		/* prepare for checksum */
1083 		h->ip_p = IPPROTO_TCP;
1084 		h->ip_len = htons(sizeof(struct tcphdr));
1085 		if (dir) {
1086 			h->ip_src.s_addr = htonl(id->src_ip);
1087 			h->ip_dst.s_addr = htonl(id->dst_ip);
1088 		} else {
1089 			h->ip_src.s_addr = htonl(id->dst_ip);
1090 			h->ip_dst.s_addr = htonl(id->src_ip);
1091 		}
1092 
1093 		th = (struct tcphdr *)(h + 1);
1094 		break;
1095 #ifdef INET6
1096 	case 6:
1097 		h6 = mtod(m, struct ip6_hdr *);
1098 
1099 		/* prepare for checksum */
1100 		h6->ip6_nxt = IPPROTO_TCP;
1101 		h6->ip6_plen = htons(sizeof(struct tcphdr));
1102 		if (dir) {
1103 			h6->ip6_src = id->src_ip6;
1104 			h6->ip6_dst = id->dst_ip6;
1105 		} else {
1106 			h6->ip6_src = id->dst_ip6;
1107 			h6->ip6_dst = id->src_ip6;
1108 		}
1109 
1110 		th = (struct tcphdr *)(h6 + 1);
1111 		break;
1112 #endif
1113 	}
1114 
1115 	if (dir) {
1116 		th->th_sport = htons(id->src_port);
1117 		th->th_dport = htons(id->dst_port);
1118 	} else {
1119 		th->th_sport = htons(id->dst_port);
1120 		th->th_dport = htons(id->src_port);
1121 	}
1122 	th->th_off = sizeof(struct tcphdr) >> 2;
1123 
1124 	if (flags & TH_RST) {
1125 		if (flags & TH_ACK) {
1126 			th->th_seq = htonl(ack);
1127 			th->th_flags = TH_RST;
1128 		} else {
1129 			if (flags & TH_SYN)
1130 				seq++;
1131 			th->th_ack = htonl(seq);
1132 			th->th_flags = TH_RST | TH_ACK;
1133 		}
1134 	} else {
1135 		/*
1136 		 * Keepalive - use caller provided sequence numbers
1137 		 */
1138 		th->th_seq = htonl(seq);
1139 		th->th_ack = htonl(ack);
1140 		th->th_flags = TH_ACK;
1141 	}
1142 
1143 	switch (id->addr_type) {
1144 	case 4:
1145 		th->th_sum = in_cksum(m, len);
1146 
1147 		/* finish the ip header */
1148 		h->ip_v = 4;
1149 		h->ip_hl = sizeof(*h) >> 2;
1150 		h->ip_tos = IPTOS_LOWDELAY;
1151 		h->ip_off = htons(0);
1152 		h->ip_len = htons(len);
1153 		h->ip_ttl = V_ip_defttl;
1154 		h->ip_sum = 0;
1155 		break;
1156 #ifdef INET6
1157 	case 6:
1158 		th->th_sum = in6_cksum(m, IPPROTO_TCP, sizeof(*h6),
1159 		    sizeof(struct tcphdr));
1160 
1161 		/* finish the ip6 header */
1162 		h6->ip6_vfc |= IPV6_VERSION;
1163 		h6->ip6_hlim = IPV6_DEFHLIM;
1164 		break;
1165 #endif
1166 	}
1167 
1168 	return (m);
1169 }
1170 
1171 /*
1172  * Queue keepalive packets for given dynamic rule
1173  */
1174 static struct mbuf **
1175 ipfw_dyn_send_ka(struct mbuf **mtailp, ipfw_dyn_rule *q)
1176 {
1177 	struct mbuf *m_rev, *m_fwd;
1178 
1179 	m_rev = (q->state & ACK_REV) ? NULL :
1180 	    ipfw_send_pkt(NULL, &(q->id), q->ack_rev - 1, q->ack_fwd, TH_SYN);
1181 	m_fwd = (q->state & ACK_FWD) ? NULL :
1182 	    ipfw_send_pkt(NULL, &(q->id), q->ack_fwd - 1, q->ack_rev, 0);
1183 
1184 	if (m_rev != NULL) {
1185 		*mtailp = m_rev;
1186 		mtailp = &(*mtailp)->m_nextpkt;
1187 	}
1188 	if (m_fwd != NULL) {
1189 		*mtailp = m_fwd;
1190 		mtailp = &(*mtailp)->m_nextpkt;
1191 	}
1192 
1193 	return (mtailp);
1194 }
1195 
1196 /*
1197  * This procedure is used to perform various maintenance
1198  * on dynamic hash list. Currently it is called every second.
1199  */
1200 static void
1201 ipfw_dyn_tick(void * vnetx)
1202 {
1203 	struct ip_fw_chain *chain;
1204 	int check_ka = 0;
1205 #ifdef VIMAGE
1206 	struct vnet *vp = vnetx;
1207 #endif
1208 
1209 	CURVNET_SET(vp);
1210 
1211 	chain = &V_layer3_chain;
1212 
1213 	/* Run keepalive checks every keepalive_period iff ka is enabled */
1214 	if ((V_dyn_keepalive_last + V_dyn_keepalive_period <= time_uptime) &&
1215 	    (V_dyn_keepalive != 0)) {
1216 		V_dyn_keepalive_last = time_uptime;
1217 		check_ka = 1;
1218 	}
1219 
1220 	check_dyn_rules(chain, NULL, check_ka, 1);
1221 
1222 	callout_reset_on(&V_ipfw_timeout, hz, ipfw_dyn_tick, vnetx, 0);
1223 
1224 	CURVNET_RESTORE();
1225 }
1226 
1227 
1228 /*
1229  * Walk through all dynamic states doing generic maintenance:
1230  * 1) free expired states
1231  * 2) free all states based on deleted rule / set
1232  * 3) send keepalives for states if needed
1233  *
1234  * @chain - pointer to current ipfw rules chain
1235  * @rule - delete all states originated by given rule if != NULL
1236  * @set - delete all states originated by any rule in set @set if != RESVD_SET
1237  * @check_ka - perform checking/sending keepalives
1238  * @timer - indicate call from timer routine.
1239  *
1240  * Timer routine must call this function unlocked to permit
1241  * sending keepalives/resizing table.
1242  *
1243  * Others has to call function with IPFW_UH_WLOCK held.
1244  * Additionally, function assume that dynamic rule/set is
1245  * ALREADY deleted so no new states can be generated by
1246  * 'deleted' rules.
1247  *
1248  * Write lock is needed to ensure that unused parent rules
1249  * are not freed by other instance (see stage 2, 3)
1250  */
1251 static void
1252 check_dyn_rules(struct ip_fw_chain *chain, ipfw_range_tlv *rt,
1253     int check_ka, int timer)
1254 {
1255 	struct mbuf *m0, *m, *mnext, **mtailp;
1256 	struct ip *h;
1257 	int i, dyn_count, new_buckets = 0, max_buckets;
1258 	int expired = 0, expired_limits = 0, parents = 0, total = 0;
1259 	ipfw_dyn_rule *q, *q_prev, *q_next;
1260 	ipfw_dyn_rule *exp_head, **exptailp;
1261 	ipfw_dyn_rule *exp_lhead, **expltailp;
1262 
1263 	KASSERT(V_ipfw_dyn_v != NULL, ("%s: dynamic table not allocated",
1264 	    __func__));
1265 
1266 	/* Avoid possible LOR */
1267 	KASSERT(!check_ka || timer, ("%s: keepalive check with lock held",
1268 	    __func__));
1269 
1270 	/*
1271 	 * Do not perform any checks if we currently have no dynamic states
1272 	 */
1273 	if (DYN_COUNT == 0)
1274 		return;
1275 
1276 	/* Expired states */
1277 	exp_head = NULL;
1278 	exptailp = &exp_head;
1279 
1280 	/* Expired limit states */
1281 	exp_lhead = NULL;
1282 	expltailp = &exp_lhead;
1283 
1284 	/*
1285 	 * We make a chain of packets to go out here -- not deferring
1286 	 * until after we drop the IPFW dynamic rule lock would result
1287 	 * in a lock order reversal with the normal packet input -> ipfw
1288 	 * call stack.
1289 	 */
1290 	m0 = NULL;
1291 	mtailp = &m0;
1292 
1293 	/* Protect from hash resizing */
1294 	if (timer != 0)
1295 		IPFW_UH_WLOCK(chain);
1296 	else
1297 		IPFW_UH_WLOCK_ASSERT(chain);
1298 
1299 #define	NEXT_RULE()	{ q_prev = q; q = q->next ; continue; }
1300 
1301 	/* Stage 1: perform requested deletion */
1302 	for (i = 0 ; i < V_curr_dyn_buckets ; i++) {
1303 		IPFW_BUCK_LOCK(i);
1304 		for (q = V_ipfw_dyn_v[i].head, q_prev = q; q ; ) {
1305 			/* account every rule */
1306 			total++;
1307 
1308 			/* Skip parent rules at all */
1309 			if (q->dyn_type == O_LIMIT_PARENT) {
1310 				parents++;
1311 				NEXT_RULE();
1312 			}
1313 
1314 			/*
1315 			 * Remove rules which are:
1316 			 * 1) expired
1317 			 * 2) matches deletion range
1318 			 */
1319 			if ((TIME_LEQ(q->expire, time_uptime)) ||
1320 			    (rt != NULL && ipfw_match_range(q->rule, rt))) {
1321 				if (TIME_LE(time_uptime, q->expire) &&
1322 				    q->dyn_type == O_KEEP_STATE &&
1323 				    V_dyn_keep_states != 0) {
1324 					/*
1325 					 * Do not delete state if
1326 					 * it is not expired and
1327 					 * dyn_keep_states is ON.
1328 					 * However we need to re-link it
1329 					 * to any other stable rule
1330 					 */
1331 					q->rule = chain->default_rule;
1332 					NEXT_RULE();
1333 				}
1334 
1335 				/* Unlink q from current list */
1336 				q_next = q->next;
1337 				if (q == V_ipfw_dyn_v[i].head)
1338 					V_ipfw_dyn_v[i].head = q_next;
1339 				else
1340 					q_prev->next = q_next;
1341 
1342 				q->next = NULL;
1343 
1344 				/* queue q to expire list */
1345 				if (q->dyn_type != O_LIMIT) {
1346 					*exptailp = q;
1347 					exptailp = &(*exptailp)->next;
1348 					DEB(print_dyn_rule(&q->id, q->dyn_type,
1349 					    "unlink entry", "left");
1350 					)
1351 				} else {
1352 					/* Separate list for limit rules */
1353 					*expltailp = q;
1354 					expltailp = &(*expltailp)->next;
1355 					expired_limits++;
1356 					DEB(print_dyn_rule(&q->id, q->dyn_type,
1357 					    "unlink limit entry", "left");
1358 					)
1359 				}
1360 
1361 				q = q_next;
1362 				expired++;
1363 				continue;
1364 			}
1365 
1366 			/*
1367 			 * Check if we need to send keepalive:
1368 			 * we need to ensure if is time to do KA,
1369 			 * this is established TCP session, and
1370 			 * expire time is within keepalive interval
1371 			 */
1372 			if ((check_ka != 0) && (q->id.proto == IPPROTO_TCP) &&
1373 			    ((q->state & BOTH_SYN) == BOTH_SYN) &&
1374 			    (TIME_LEQ(q->expire, time_uptime +
1375 			      V_dyn_keepalive_interval)))
1376 				mtailp = ipfw_dyn_send_ka(mtailp, q);
1377 
1378 			NEXT_RULE();
1379 		}
1380 		IPFW_BUCK_UNLOCK(i);
1381 	}
1382 
1383 	/* Stage 2: decrement counters from O_LIMIT parents */
1384 	if (expired_limits != 0) {
1385 		/*
1386 		 * XXX: Note that deleting set with more than one
1387 		 * heavily-used LIMIT rules can result in overwhelming
1388 		 * locking due to lack of per-hash value sorting
1389 		 *
1390 		 * We should probably think about:
1391 		 * 1) pre-allocating hash of size, say,
1392 		 * MAX(16, V_curr_dyn_buckets / 1024)
1393 		 * 2) checking if expired_limits is large enough
1394 		 * 3) If yes, init hash (or its part), re-link
1395 		 * current list and start decrementing procedure in
1396 		 * each bucket separately
1397 		 */
1398 
1399 		/*
1400 		 * Small optimization: do not unlock bucket until
1401 		 * we see the next item resides in different bucket
1402 		 */
1403 		if (exp_lhead != NULL) {
1404 			i = exp_lhead->parent->bucket;
1405 			IPFW_BUCK_LOCK(i);
1406 		}
1407 		for (q = exp_lhead; q != NULL; q = q->next) {
1408 			if (i != q->parent->bucket) {
1409 				IPFW_BUCK_UNLOCK(i);
1410 				i = q->parent->bucket;
1411 				IPFW_BUCK_LOCK(i);
1412 			}
1413 
1414 			/* Decrease parent refcount */
1415 			q->parent->count--;
1416 		}
1417 		if (exp_lhead != NULL)
1418 			IPFW_BUCK_UNLOCK(i);
1419 	}
1420 
1421 	/*
1422 	 * We protectet ourselves from unused parent deletion
1423 	 * (from the timer function) by holding UH write lock.
1424 	 */
1425 
1426 	/* Stage 3: remove unused parent rules */
1427 	if ((parents != 0) && (expired != 0)) {
1428 		for (i = 0 ; i < V_curr_dyn_buckets ; i++) {
1429 			IPFW_BUCK_LOCK(i);
1430 			for (q = V_ipfw_dyn_v[i].head, q_prev = q ; q ; ) {
1431 				if (q->dyn_type != O_LIMIT_PARENT)
1432 					NEXT_RULE();
1433 
1434 				if (q->count != 0)
1435 					NEXT_RULE();
1436 
1437 				/* Parent rule without consumers */
1438 
1439 				/* Unlink q from current list */
1440 				q_next = q->next;
1441 				if (q == V_ipfw_dyn_v[i].head)
1442 					V_ipfw_dyn_v[i].head = q_next;
1443 				else
1444 					q_prev->next = q_next;
1445 
1446 				q->next = NULL;
1447 
1448 				/* Add to expired list */
1449 				*exptailp = q;
1450 				exptailp = &(*exptailp)->next;
1451 
1452 				DEB(print_dyn_rule(&q->id, q->dyn_type,
1453 				    "unlink parent entry", "left");
1454 				)
1455 
1456 				expired++;
1457 
1458 				q = q_next;
1459 			}
1460 			IPFW_BUCK_UNLOCK(i);
1461 		}
1462 	}
1463 
1464 #undef NEXT_RULE
1465 
1466 	if (timer != 0) {
1467 		/*
1468 		 * Check if we need to resize hash:
1469 		 * if current number of states exceeds number of buckes in hash,
1470 		 * grow hash size to the minimum power of 2 which is bigger than
1471 		 * current states count. Limit hash size by 64k.
1472 		 */
1473 		max_buckets = (V_dyn_buckets_max > 65536) ?
1474 		    65536 : V_dyn_buckets_max;
1475 
1476 		dyn_count = DYN_COUNT;
1477 
1478 		if ((dyn_count > V_curr_dyn_buckets * 2) &&
1479 		    (dyn_count < max_buckets)) {
1480 			new_buckets = V_curr_dyn_buckets;
1481 			while (new_buckets < dyn_count) {
1482 				new_buckets *= 2;
1483 
1484 				if (new_buckets >= max_buckets)
1485 					break;
1486 			}
1487 		}
1488 
1489 		IPFW_UH_WUNLOCK(chain);
1490 	}
1491 
1492 	/* Finally delete old states ad limits if any */
1493 	for (q = exp_head; q != NULL; q = q_next) {
1494 		q_next = q->next;
1495 		uma_zfree(V_ipfw_dyn_rule_zone, q);
1496 		ipfw_dyn_count--;
1497 	}
1498 
1499 	for (q = exp_lhead; q != NULL; q = q_next) {
1500 		q_next = q->next;
1501 		uma_zfree(V_ipfw_dyn_rule_zone, q);
1502 		ipfw_dyn_count--;
1503 	}
1504 
1505 	/*
1506 	 * The rest code MUST be called from timer routine only
1507 	 * without holding any locks
1508 	 */
1509 	if (timer == 0)
1510 		return;
1511 
1512 	/* Send keepalive packets if any */
1513 	for (m = m0; m != NULL; m = mnext) {
1514 		mnext = m->m_nextpkt;
1515 		m->m_nextpkt = NULL;
1516 		h = mtod(m, struct ip *);
1517 		if (h->ip_v == 4)
1518 			ip_output(m, NULL, NULL, 0, NULL, NULL);
1519 #ifdef INET6
1520 		else
1521 			ip6_output(m, NULL, NULL, 0, NULL, NULL, NULL);
1522 #endif
1523 	}
1524 
1525 	/* Run table resize without holding any locks */
1526 	if (new_buckets != 0)
1527 		resize_dynamic_table(chain, new_buckets);
1528 }
1529 
1530 /*
1531  * Deletes all dynamic rules originated by given rule or all rules in
1532  * given set. Specify RESVD_SET to indicate set should not be used.
1533  * @chain - pointer to current ipfw rules chain
1534  * @rr - delete all states originated by rules in matched range.
1535  *
1536  * Function has to be called with IPFW_UH_WLOCK held.
1537  * Additionally, function assume that dynamic rule/set is
1538  * ALREADY deleted so no new states can be generated by
1539  * 'deleted' rules.
1540  */
1541 void
1542 ipfw_expire_dyn_rules(struct ip_fw_chain *chain, ipfw_range_tlv *rt)
1543 {
1544 
1545 	check_dyn_rules(chain, rt, 0, 0);
1546 }
1547 
1548 /*
1549  * Check if rule contains at least one dynamic opcode.
1550  *
1551  * Returns 1 if such opcode is found, 0 otherwise.
1552  */
1553 int
1554 ipfw_is_dyn_rule(struct ip_fw *rule)
1555 {
1556 	int cmdlen, l;
1557 	ipfw_insn *cmd;
1558 
1559 	l = rule->cmd_len;
1560 	cmd = rule->cmd;
1561 	cmdlen = 0;
1562 	for ( ;	l > 0 ; l -= cmdlen, cmd += cmdlen) {
1563 		cmdlen = F_LEN(cmd);
1564 
1565 		switch (cmd->opcode) {
1566 		case O_LIMIT:
1567 		case O_KEEP_STATE:
1568 		case O_PROBE_STATE:
1569 		case O_CHECK_STATE:
1570 			return (1);
1571 		}
1572 	}
1573 
1574 	return (0);
1575 }
1576 
1577 void
1578 ipfw_dyn_init(struct ip_fw_chain *chain)
1579 {
1580 
1581         V_ipfw_dyn_v = NULL;
1582         V_dyn_buckets_max = 256; /* must be power of 2 */
1583         V_curr_dyn_buckets = 256; /* must be power of 2 */
1584 
1585         V_dyn_ack_lifetime = 300;
1586         V_dyn_syn_lifetime = 20;
1587         V_dyn_fin_lifetime = 1;
1588         V_dyn_rst_lifetime = 1;
1589         V_dyn_udp_lifetime = 10;
1590         V_dyn_short_lifetime = 5;
1591 
1592         V_dyn_keepalive_interval = 20;
1593         V_dyn_keepalive_period = 5;
1594         V_dyn_keepalive = 1;    /* do send keepalives */
1595 	V_dyn_keepalive_last = time_uptime;
1596 
1597         V_dyn_max = 16384; /* max # of dynamic rules */
1598 
1599 	V_ipfw_dyn_rule_zone = uma_zcreate("IPFW dynamic rule",
1600 	    sizeof(ipfw_dyn_rule), NULL, NULL, NULL, NULL,
1601 	    UMA_ALIGN_PTR, 0);
1602 
1603 	/* Enforce limit on dynamic rules */
1604 	uma_zone_set_max(V_ipfw_dyn_rule_zone, V_dyn_max);
1605 
1606         callout_init(&V_ipfw_timeout, 1);
1607 
1608 	/*
1609 	 * This can potentially be done on first dynamic rule
1610 	 * being added to chain.
1611 	 */
1612 	resize_dynamic_table(chain, V_curr_dyn_buckets);
1613 	IPFW_ADD_OBJ_REWRITER(IS_DEFAULT_VNET(curvnet), dyn_opcodes);
1614 }
1615 
1616 void
1617 ipfw_dyn_uninit(int pass)
1618 {
1619 	int i;
1620 
1621 	if (pass == 0) {
1622 		callout_drain(&V_ipfw_timeout);
1623 		return;
1624 	}
1625 	IPFW_DEL_OBJ_REWRITER(IS_DEFAULT_VNET(curvnet), dyn_opcodes);
1626 
1627 	if (V_ipfw_dyn_v != NULL) {
1628 		/*
1629 		 * Skip deleting all dynamic states -
1630 		 * uma_zdestroy() does this more efficiently;
1631 		 */
1632 
1633 		/* Destroy all mutexes */
1634 		for (i = 0 ; i < V_curr_dyn_buckets ; i++)
1635 			IPFW_BUCK_LOCK_DESTROY(&V_ipfw_dyn_v[i]);
1636 		free(V_ipfw_dyn_v, M_IPFW);
1637 		V_ipfw_dyn_v = NULL;
1638 	}
1639 
1640         uma_zdestroy(V_ipfw_dyn_rule_zone);
1641 }
1642 
1643 #ifdef SYSCTL_NODE
1644 /*
1645  * Get/set maximum number of dynamic states in given VNET instance.
1646  */
1647 static int
1648 sysctl_ipfw_dyn_max(SYSCTL_HANDLER_ARGS)
1649 {
1650 	int error;
1651 	unsigned int nstates;
1652 
1653 	nstates = V_dyn_max;
1654 
1655 	error = sysctl_handle_int(oidp, &nstates, 0, req);
1656 	/* Read operation or some error */
1657 	if ((error != 0) || (req->newptr == NULL))
1658 		return (error);
1659 
1660 	V_dyn_max = nstates;
1661 	uma_zone_set_max(V_ipfw_dyn_rule_zone, V_dyn_max);
1662 
1663 	return (0);
1664 }
1665 
1666 /*
1667  * Get current number of dynamic states in given VNET instance.
1668  */
1669 static int
1670 sysctl_ipfw_dyn_count(SYSCTL_HANDLER_ARGS)
1671 {
1672 	int error;
1673 	unsigned int nstates;
1674 
1675 	nstates = DYN_COUNT;
1676 
1677 	error = sysctl_handle_int(oidp, &nstates, 0, req);
1678 
1679 	return (error);
1680 }
1681 #endif
1682 
1683 /*
1684  * Returns size of dynamic states in legacy format
1685  */
1686 int
1687 ipfw_dyn_len(void)
1688 {
1689 
1690 	return (V_ipfw_dyn_v == NULL) ? 0 :
1691 		(DYN_COUNT * sizeof(ipfw_dyn_rule));
1692 }
1693 
1694 /*
1695  * Returns number of dynamic states.
1696  * Used by dump format v1 (current).
1697  */
1698 int
1699 ipfw_dyn_get_count(void)
1700 {
1701 
1702 	return (V_ipfw_dyn_v == NULL) ? 0 : DYN_COUNT;
1703 }
1704 
1705 static void
1706 export_dyn_rule(ipfw_dyn_rule *src, ipfw_dyn_rule *dst)
1707 {
1708 	uint16_t rulenum;
1709 
1710 	rulenum = (uint16_t)src->rule->rulenum;
1711 	memcpy(dst, src, sizeof(*src));
1712 	memcpy(&dst->rule, &rulenum, sizeof(rulenum));
1713 	/*
1714 	 * store set number into high word of
1715 	 * dst->rule pointer.
1716 	 */
1717 	memcpy((char *)&dst->rule + sizeof(rulenum), &src->rule->set,
1718 	    sizeof(src->rule->set));
1719 	/*
1720 	 * store a non-null value in "next".
1721 	 * The userland code will interpret a
1722 	 * NULL here as a marker
1723 	 * for the last dynamic rule.
1724 	 */
1725 	memcpy(&dst->next, &dst, sizeof(dst));
1726 	dst->expire = TIME_LEQ(dst->expire, time_uptime) ?  0:
1727 	    dst->expire - time_uptime;
1728 }
1729 
1730 /*
1731  * Fills int buffer given by @sd with dynamic states.
1732  * Used by dump format v1 (current).
1733  *
1734  * Returns 0 on success.
1735  */
1736 int
1737 ipfw_dump_states(struct ip_fw_chain *chain, struct sockopt_data *sd)
1738 {
1739 	ipfw_dyn_rule *p;
1740 	ipfw_obj_dyntlv *dst, *last;
1741 	ipfw_obj_ctlv *ctlv;
1742 	int i;
1743 	size_t sz;
1744 
1745 	if (V_ipfw_dyn_v == NULL)
1746 		return (0);
1747 
1748 	IPFW_UH_RLOCK_ASSERT(chain);
1749 
1750 	ctlv = (ipfw_obj_ctlv *)ipfw_get_sopt_space(sd, sizeof(*ctlv));
1751 	if (ctlv == NULL)
1752 		return (ENOMEM);
1753 	sz = sizeof(ipfw_obj_dyntlv);
1754 	ctlv->head.type = IPFW_TLV_DYNSTATE_LIST;
1755 	ctlv->objsize = sz;
1756 	last = NULL;
1757 
1758 	for (i = 0 ; i < V_curr_dyn_buckets; i++) {
1759 		IPFW_BUCK_LOCK(i);
1760 		for (p = V_ipfw_dyn_v[i].head ; p != NULL; p = p->next) {
1761 			dst = (ipfw_obj_dyntlv *)ipfw_get_sopt_space(sd, sz);
1762 			if (dst == NULL) {
1763 				IPFW_BUCK_UNLOCK(i);
1764 				return (ENOMEM);
1765 			}
1766 
1767 			export_dyn_rule(p, &dst->state);
1768 			dst->head.length = sz;
1769 			dst->head.type = IPFW_TLV_DYN_ENT;
1770 			last = dst;
1771 		}
1772 		IPFW_BUCK_UNLOCK(i);
1773 	}
1774 
1775 	if (last != NULL) /* mark last dynamic rule */
1776 		last->head.flags = IPFW_DF_LAST;
1777 
1778 	return (0);
1779 }
1780 
1781 /*
1782  * Fill given buffer with dynamic states (legacy format).
1783  * IPFW_UH_RLOCK has to be held while calling.
1784  */
1785 void
1786 ipfw_get_dynamic(struct ip_fw_chain *chain, char **pbp, const char *ep)
1787 {
1788 	ipfw_dyn_rule *p, *last = NULL;
1789 	char *bp;
1790 	int i;
1791 
1792 	if (V_ipfw_dyn_v == NULL)
1793 		return;
1794 	bp = *pbp;
1795 
1796 	IPFW_UH_RLOCK_ASSERT(chain);
1797 
1798 	for (i = 0 ; i < V_curr_dyn_buckets; i++) {
1799 		IPFW_BUCK_LOCK(i);
1800 		for (p = V_ipfw_dyn_v[i].head ; p != NULL; p = p->next) {
1801 			if (bp + sizeof *p <= ep) {
1802 				ipfw_dyn_rule *dst =
1803 					(ipfw_dyn_rule *)bp;
1804 
1805 				export_dyn_rule(p, dst);
1806 				last = dst;
1807 				bp += sizeof(ipfw_dyn_rule);
1808 			}
1809 		}
1810 		IPFW_BUCK_UNLOCK(i);
1811 	}
1812 
1813 	if (last != NULL) /* mark last dynamic rule */
1814 		bzero(&last->next, sizeof(last));
1815 	*pbp = bp;
1816 }
1817 /* end of file */
1818