xref: /freebsd/sys/netpfil/ipfw/ip_fw_dynamic.c (revision 4ec234c813eed05c166859bba82c882e40826eb9)
1 /*-
2  * Copyright (c) 2002 Luigi Rizzo, Universita` di Pisa
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23  * SUCH DAMAGE.
24  */
25 
26 #include <sys/cdefs.h>
27 __FBSDID("$FreeBSD$");
28 
29 #define        DEB(x)
30 #define        DDB(x) x
31 
32 /*
33  * Dynamic rule support for ipfw
34  */
35 
36 #include "opt_ipfw.h"
37 #include "opt_inet.h"
38 #ifndef INET
39 #error IPFIREWALL requires INET.
40 #endif /* INET */
41 #include "opt_inet6.h"
42 
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/malloc.h>
46 #include <sys/mbuf.h>
47 #include <sys/kernel.h>
48 #include <sys/ktr.h>
49 #include <sys/lock.h>
50 #include <sys/socket.h>
51 #include <sys/sysctl.h>
52 #include <sys/syslog.h>
53 #include <net/ethernet.h> /* for ETHERTYPE_IP */
54 #include <net/if.h>
55 #include <net/if_var.h>
56 #include <net/vnet.h>
57 
58 #include <netinet/in.h>
59 #include <netinet/ip.h>
60 #include <netinet/ip_var.h>	/* ip_defttl */
61 #include <netinet/ip_fw.h>
62 #include <netinet/tcp_var.h>
63 #include <netinet/udp.h>
64 
65 #include <netinet/ip6.h>	/* IN6_ARE_ADDR_EQUAL */
66 #ifdef INET6
67 #include <netinet6/in6_var.h>
68 #include <netinet6/ip6_var.h>
69 #endif
70 
71 #include <netpfil/ipfw/ip_fw_private.h>
72 
73 #include <machine/in_cksum.h>	/* XXX for in_cksum */
74 
75 #ifdef MAC
76 #include <security/mac/mac_framework.h>
77 #endif
78 
79 /*
80  * Description of dynamic rules.
81  *
82  * Dynamic rules are stored in lists accessed through a hash table
83  * (ipfw_dyn_v) whose size is curr_dyn_buckets. This value can
84  * be modified through the sysctl variable dyn_buckets which is
85  * updated when the table becomes empty.
86  *
87  * XXX currently there is only one list, ipfw_dyn.
88  *
89  * When a packet is received, its address fields are first masked
90  * with the mask defined for the rule, then hashed, then matched
91  * against the entries in the corresponding list.
92  * Dynamic rules can be used for different purposes:
93  *  + stateful rules;
94  *  + enforcing limits on the number of sessions;
95  *  + in-kernel NAT (not implemented yet)
96  *
97  * The lifetime of dynamic rules is regulated by dyn_*_lifetime,
98  * measured in seconds and depending on the flags.
99  *
100  * The total number of dynamic rules is equal to UMA zone items count.
101  * The max number of dynamic rules is dyn_max. When we reach
102  * the maximum number of rules we do not create anymore. This is
103  * done to avoid consuming too much memory, but also too much
104  * time when searching on each packet (ideally, we should try instead
105  * to put a limit on the length of the list on each bucket...).
106  *
107  * Each dynamic rule holds a pointer to the parent ipfw rule so
108  * we know what action to perform. Dynamic rules are removed when
109  * the parent rule is deleted. This can be changed by dyn_keep_states
110  * sysctl.
111  *
112  * There are some limitations with dynamic rules -- we do not
113  * obey the 'randomized match', and we do not do multiple
114  * passes through the firewall. XXX check the latter!!!
115  */
116 
117 struct ipfw_dyn_bucket {
118 	struct mtx	mtx;		/* Bucket protecting lock */
119 	ipfw_dyn_rule	*head;		/* Pointer to first rule */
120 };
121 
122 /*
123  * Static variables followed by global ones
124  */
125 static VNET_DEFINE(struct ipfw_dyn_bucket *, ipfw_dyn_v);
126 static VNET_DEFINE(u_int32_t, dyn_buckets_max);
127 static VNET_DEFINE(u_int32_t, curr_dyn_buckets);
128 static VNET_DEFINE(struct callout, ipfw_timeout);
129 #define	V_ipfw_dyn_v			VNET(ipfw_dyn_v)
130 #define	V_dyn_buckets_max		VNET(dyn_buckets_max)
131 #define	V_curr_dyn_buckets		VNET(curr_dyn_buckets)
132 #define V_ipfw_timeout                  VNET(ipfw_timeout)
133 
134 static VNET_DEFINE(uma_zone_t, ipfw_dyn_rule_zone);
135 #define	V_ipfw_dyn_rule_zone		VNET(ipfw_dyn_rule_zone)
136 
137 #define	IPFW_BUCK_LOCK_INIT(b)	\
138 	mtx_init(&(b)->mtx, "IPFW dynamic bucket", NULL, MTX_DEF)
139 #define	IPFW_BUCK_LOCK_DESTROY(b)	\
140 	mtx_destroy(&(b)->mtx)
141 #define	IPFW_BUCK_LOCK(i)	mtx_lock(&V_ipfw_dyn_v[(i)].mtx)
142 #define	IPFW_BUCK_UNLOCK(i)	mtx_unlock(&V_ipfw_dyn_v[(i)].mtx)
143 #define	IPFW_BUCK_ASSERT(i)	mtx_assert(&V_ipfw_dyn_v[(i)].mtx, MA_OWNED)
144 
145 
146 static VNET_DEFINE(int, dyn_keep_states);
147 #define	V_dyn_keep_states		VNET(dyn_keep_states)
148 
149 /*
150  * Timeouts for various events in handing dynamic rules.
151  */
152 static VNET_DEFINE(u_int32_t, dyn_ack_lifetime);
153 static VNET_DEFINE(u_int32_t, dyn_syn_lifetime);
154 static VNET_DEFINE(u_int32_t, dyn_fin_lifetime);
155 static VNET_DEFINE(u_int32_t, dyn_rst_lifetime);
156 static VNET_DEFINE(u_int32_t, dyn_udp_lifetime);
157 static VNET_DEFINE(u_int32_t, dyn_short_lifetime);
158 
159 #define	V_dyn_ack_lifetime		VNET(dyn_ack_lifetime)
160 #define	V_dyn_syn_lifetime		VNET(dyn_syn_lifetime)
161 #define	V_dyn_fin_lifetime		VNET(dyn_fin_lifetime)
162 #define	V_dyn_rst_lifetime		VNET(dyn_rst_lifetime)
163 #define	V_dyn_udp_lifetime		VNET(dyn_udp_lifetime)
164 #define	V_dyn_short_lifetime		VNET(dyn_short_lifetime)
165 
166 /*
167  * Keepalives are sent if dyn_keepalive is set. They are sent every
168  * dyn_keepalive_period seconds, in the last dyn_keepalive_interval
169  * seconds of lifetime of a rule.
170  * dyn_rst_lifetime and dyn_fin_lifetime should be strictly lower
171  * than dyn_keepalive_period.
172  */
173 
174 static VNET_DEFINE(u_int32_t, dyn_keepalive_interval);
175 static VNET_DEFINE(u_int32_t, dyn_keepalive_period);
176 static VNET_DEFINE(u_int32_t, dyn_keepalive);
177 static VNET_DEFINE(time_t, dyn_keepalive_last);
178 
179 #define	V_dyn_keepalive_interval	VNET(dyn_keepalive_interval)
180 #define	V_dyn_keepalive_period		VNET(dyn_keepalive_period)
181 #define	V_dyn_keepalive			VNET(dyn_keepalive)
182 #define	V_dyn_keepalive_last		VNET(dyn_keepalive_last)
183 
184 static VNET_DEFINE(u_int32_t, dyn_max);		/* max # of dynamic rules */
185 
186 #define	DYN_COUNT			uma_zone_get_cur(V_ipfw_dyn_rule_zone)
187 #define	V_dyn_max			VNET(dyn_max)
188 
189 /* for userspace, we emulate the uma_zone_counter with ipfw_dyn_count */
190 static int ipfw_dyn_count;	/* number of objects */
191 
192 #ifdef USERSPACE /* emulation of UMA object counters for userspace */
193 #define uma_zone_get_cur(x)	ipfw_dyn_count
194 #endif /* USERSPACE */
195 
196 static int last_log;	/* Log ratelimiting */
197 
198 static void ipfw_dyn_tick(void *vnetx);
199 static void check_dyn_rules(struct ip_fw_chain *, struct ip_fw *,
200     int, int, int);
201 #ifdef SYSCTL_NODE
202 
203 static int sysctl_ipfw_dyn_count(SYSCTL_HANDLER_ARGS);
204 static int sysctl_ipfw_dyn_max(SYSCTL_HANDLER_ARGS);
205 
206 SYSBEGIN(f2)
207 
208 SYSCTL_DECL(_net_inet_ip_fw);
209 SYSCTL_VNET_UINT(_net_inet_ip_fw, OID_AUTO, dyn_buckets,
210     CTLFLAG_RW, &VNET_NAME(dyn_buckets_max), 0,
211     "Max number of dyn. buckets");
212 SYSCTL_VNET_UINT(_net_inet_ip_fw, OID_AUTO, curr_dyn_buckets,
213     CTLFLAG_RD, &VNET_NAME(curr_dyn_buckets), 0,
214     "Current Number of dyn. buckets");
215 SYSCTL_VNET_PROC(_net_inet_ip_fw, OID_AUTO, dyn_count,
216     CTLTYPE_UINT|CTLFLAG_RD, 0, 0, sysctl_ipfw_dyn_count, "IU",
217     "Number of dyn. rules");
218 SYSCTL_VNET_PROC(_net_inet_ip_fw, OID_AUTO, dyn_max,
219     CTLTYPE_UINT|CTLFLAG_RW, 0, 0, sysctl_ipfw_dyn_max, "IU",
220     "Max number of dyn. rules");
221 SYSCTL_VNET_UINT(_net_inet_ip_fw, OID_AUTO, dyn_ack_lifetime,
222     CTLFLAG_RW, &VNET_NAME(dyn_ack_lifetime), 0,
223     "Lifetime of dyn. rules for acks");
224 SYSCTL_VNET_UINT(_net_inet_ip_fw, OID_AUTO, dyn_syn_lifetime,
225     CTLFLAG_RW, &VNET_NAME(dyn_syn_lifetime), 0,
226     "Lifetime of dyn. rules for syn");
227 SYSCTL_VNET_UINT(_net_inet_ip_fw, OID_AUTO, dyn_fin_lifetime,
228     CTLFLAG_RW, &VNET_NAME(dyn_fin_lifetime), 0,
229     "Lifetime of dyn. rules for fin");
230 SYSCTL_VNET_UINT(_net_inet_ip_fw, OID_AUTO, dyn_rst_lifetime,
231     CTLFLAG_RW, &VNET_NAME(dyn_rst_lifetime), 0,
232     "Lifetime of dyn. rules for rst");
233 SYSCTL_VNET_UINT(_net_inet_ip_fw, OID_AUTO, dyn_udp_lifetime,
234     CTLFLAG_RW, &VNET_NAME(dyn_udp_lifetime), 0,
235     "Lifetime of dyn. rules for UDP");
236 SYSCTL_VNET_UINT(_net_inet_ip_fw, OID_AUTO, dyn_short_lifetime,
237     CTLFLAG_RW, &VNET_NAME(dyn_short_lifetime), 0,
238     "Lifetime of dyn. rules for other situations");
239 SYSCTL_VNET_UINT(_net_inet_ip_fw, OID_AUTO, dyn_keepalive,
240     CTLFLAG_RW, &VNET_NAME(dyn_keepalive), 0,
241     "Enable keepalives for dyn. rules");
242 SYSCTL_VNET_UINT(_net_inet_ip_fw, OID_AUTO, dyn_keep_states,
243     CTLFLAG_RW, &VNET_NAME(dyn_keep_states), 0,
244     "Do not flush dynamic states on rule deletion");
245 
246 SYSEND
247 
248 #endif /* SYSCTL_NODE */
249 
250 
251 static __inline int
252 hash_packet6(struct ipfw_flow_id *id)
253 {
254 	u_int32_t i;
255 	i = (id->dst_ip6.__u6_addr.__u6_addr32[2]) ^
256 	    (id->dst_ip6.__u6_addr.__u6_addr32[3]) ^
257 	    (id->src_ip6.__u6_addr.__u6_addr32[2]) ^
258 	    (id->src_ip6.__u6_addr.__u6_addr32[3]) ^
259 	    (id->dst_port) ^ (id->src_port);
260 	return i;
261 }
262 
263 /*
264  * IMPORTANT: the hash function for dynamic rules must be commutative
265  * in source and destination (ip,port), because rules are bidirectional
266  * and we want to find both in the same bucket.
267  */
268 static __inline int
269 hash_packet(struct ipfw_flow_id *id, int buckets)
270 {
271 	u_int32_t i;
272 
273 #ifdef INET6
274 	if (IS_IP6_FLOW_ID(id))
275 		i = hash_packet6(id);
276 	else
277 #endif /* INET6 */
278 	i = (id->dst_ip) ^ (id->src_ip) ^ (id->dst_port) ^ (id->src_port);
279 	i &= (buckets - 1);
280 	return i;
281 }
282 
283 /**
284  * Print customizable flow id description via log(9) facility.
285  */
286 static void
287 print_dyn_rule_flags(struct ipfw_flow_id *id, int dyn_type, int log_flags,
288     char *prefix, char *postfix)
289 {
290 	struct in_addr da;
291 #ifdef INET6
292 	char src[INET6_ADDRSTRLEN], dst[INET6_ADDRSTRLEN];
293 #else
294 	char src[INET_ADDRSTRLEN], dst[INET_ADDRSTRLEN];
295 #endif
296 
297 #ifdef INET6
298 	if (IS_IP6_FLOW_ID(id)) {
299 		ip6_sprintf(src, &id->src_ip6);
300 		ip6_sprintf(dst, &id->dst_ip6);
301 	} else
302 #endif
303 	{
304 		da.s_addr = htonl(id->src_ip);
305 		inet_ntop(AF_INET, &da, src, sizeof(src));
306 		da.s_addr = htonl(id->dst_ip);
307 		inet_ntop(AF_INET, &da, dst, sizeof(dst));
308 	}
309 	log(log_flags, "ipfw: %s type %d %s %d -> %s %d, %d %s\n",
310 	    prefix, dyn_type, src, id->src_port, dst,
311 	    id->dst_port, DYN_COUNT, postfix);
312 }
313 
314 #define	print_dyn_rule(id, dtype, prefix, postfix)	\
315 	print_dyn_rule_flags(id, dtype, LOG_DEBUG, prefix, postfix)
316 
317 #define TIME_LEQ(a,b)       ((int)((a)-(b)) <= 0)
318 #define TIME_LE(a,b)       ((int)((a)-(b)) < 0)
319 
320 /*
321  * Lookup a dynamic rule, locked version.
322  */
323 static ipfw_dyn_rule *
324 lookup_dyn_rule_locked(struct ipfw_flow_id *pkt, int i, int *match_direction,
325     struct tcphdr *tcp)
326 {
327 	/*
328 	 * Stateful ipfw extensions.
329 	 * Lookup into dynamic session queue.
330 	 */
331 #define MATCH_REVERSE	0
332 #define MATCH_FORWARD	1
333 #define MATCH_NONE	2
334 #define MATCH_UNKNOWN	3
335 	int dir = MATCH_NONE;
336 	ipfw_dyn_rule *prev, *q = NULL;
337 
338 	IPFW_BUCK_ASSERT(i);
339 
340 	for (prev = NULL, q = V_ipfw_dyn_v[i].head; q; prev = q, q = q->next) {
341 		if (q->dyn_type == O_LIMIT_PARENT && q->count)
342 			continue;
343 
344 		if (pkt->proto != q->id.proto || q->dyn_type == O_LIMIT_PARENT)
345 			continue;
346 
347 		if (IS_IP6_FLOW_ID(pkt)) {
348 			if (IN6_ARE_ADDR_EQUAL(&pkt->src_ip6, &q->id.src_ip6) &&
349 			    IN6_ARE_ADDR_EQUAL(&pkt->dst_ip6, &q->id.dst_ip6) &&
350 			    pkt->src_port == q->id.src_port &&
351 			    pkt->dst_port == q->id.dst_port) {
352 				dir = MATCH_FORWARD;
353 				break;
354 			}
355 			if (IN6_ARE_ADDR_EQUAL(&pkt->src_ip6, &q->id.dst_ip6) &&
356 			    IN6_ARE_ADDR_EQUAL(&pkt->dst_ip6, &q->id.src_ip6) &&
357 			    pkt->src_port == q->id.dst_port &&
358 			    pkt->dst_port == q->id.src_port) {
359 				dir = MATCH_REVERSE;
360 				break;
361 			}
362 		} else {
363 			if (pkt->src_ip == q->id.src_ip &&
364 			    pkt->dst_ip == q->id.dst_ip &&
365 			    pkt->src_port == q->id.src_port &&
366 			    pkt->dst_port == q->id.dst_port) {
367 				dir = MATCH_FORWARD;
368 				break;
369 			}
370 			if (pkt->src_ip == q->id.dst_ip &&
371 			    pkt->dst_ip == q->id.src_ip &&
372 			    pkt->src_port == q->id.dst_port &&
373 			    pkt->dst_port == q->id.src_port) {
374 				dir = MATCH_REVERSE;
375 				break;
376 			}
377 		}
378 	}
379 	if (q == NULL)
380 		goto done;	/* q = NULL, not found */
381 
382 	if (prev != NULL) {	/* found and not in front */
383 		prev->next = q->next;
384 		q->next = V_ipfw_dyn_v[i].head;
385 		V_ipfw_dyn_v[i].head = q;
386 	}
387 	if (pkt->proto == IPPROTO_TCP) { /* update state according to flags */
388 		uint32_t ack;
389 		u_char flags = pkt->_flags & (TH_FIN | TH_SYN | TH_RST);
390 
391 #define BOTH_SYN	(TH_SYN | (TH_SYN << 8))
392 #define BOTH_FIN	(TH_FIN | (TH_FIN << 8))
393 #define	TCP_FLAGS	(TH_FLAGS | (TH_FLAGS << 8))
394 #define	ACK_FWD		0x10000			/* fwd ack seen */
395 #define	ACK_REV		0x20000			/* rev ack seen */
396 
397 		q->state |= (dir == MATCH_FORWARD) ? flags : (flags << 8);
398 		switch (q->state & TCP_FLAGS) {
399 		case TH_SYN:			/* opening */
400 			q->expire = time_uptime + V_dyn_syn_lifetime;
401 			break;
402 
403 		case BOTH_SYN:			/* move to established */
404 		case BOTH_SYN | TH_FIN:		/* one side tries to close */
405 		case BOTH_SYN | (TH_FIN << 8):
406 #define _SEQ_GE(a,b) ((int)(a) - (int)(b) >= 0)
407 			if (tcp == NULL)
408 				break;
409 
410 			ack = ntohl(tcp->th_ack);
411 			if (dir == MATCH_FORWARD) {
412 				if (q->ack_fwd == 0 ||
413 				    _SEQ_GE(ack, q->ack_fwd)) {
414 					q->ack_fwd = ack;
415 					q->state |= ACK_FWD;
416 				}
417 			} else {
418 				if (q->ack_rev == 0 ||
419 				    _SEQ_GE(ack, q->ack_rev)) {
420 					q->ack_rev = ack;
421 					q->state |= ACK_REV;
422 				}
423 			}
424 			if ((q->state & (ACK_FWD | ACK_REV)) ==
425 			    (ACK_FWD | ACK_REV)) {
426 				q->expire = time_uptime + V_dyn_ack_lifetime;
427 				q->state &= ~(ACK_FWD | ACK_REV);
428 			}
429 			break;
430 
431 		case BOTH_SYN | BOTH_FIN:	/* both sides closed */
432 			if (V_dyn_fin_lifetime >= V_dyn_keepalive_period)
433 				V_dyn_fin_lifetime = V_dyn_keepalive_period - 1;
434 			q->expire = time_uptime + V_dyn_fin_lifetime;
435 			break;
436 
437 		default:
438 #if 0
439 			/*
440 			 * reset or some invalid combination, but can also
441 			 * occur if we use keep-state the wrong way.
442 			 */
443 			if ( (q->state & ((TH_RST << 8)|TH_RST)) == 0)
444 				printf("invalid state: 0x%x\n", q->state);
445 #endif
446 			if (V_dyn_rst_lifetime >= V_dyn_keepalive_period)
447 				V_dyn_rst_lifetime = V_dyn_keepalive_period - 1;
448 			q->expire = time_uptime + V_dyn_rst_lifetime;
449 			break;
450 		}
451 	} else if (pkt->proto == IPPROTO_UDP) {
452 		q->expire = time_uptime + V_dyn_udp_lifetime;
453 	} else {
454 		/* other protocols */
455 		q->expire = time_uptime + V_dyn_short_lifetime;
456 	}
457 done:
458 	if (match_direction != NULL)
459 		*match_direction = dir;
460 	return (q);
461 }
462 
463 ipfw_dyn_rule *
464 ipfw_lookup_dyn_rule(struct ipfw_flow_id *pkt, int *match_direction,
465     struct tcphdr *tcp)
466 {
467 	ipfw_dyn_rule *q;
468 	int i;
469 
470 	i = hash_packet(pkt, V_curr_dyn_buckets);
471 
472 	IPFW_BUCK_LOCK(i);
473 	q = lookup_dyn_rule_locked(pkt, i, match_direction, tcp);
474 	if (q == NULL)
475 		IPFW_BUCK_UNLOCK(i);
476 	/* NB: return table locked when q is not NULL */
477 	return q;
478 }
479 
480 /*
481  * Unlock bucket mtx
482  * @p - pointer to dynamic rule
483  */
484 void
485 ipfw_dyn_unlock(ipfw_dyn_rule *q)
486 {
487 
488 	IPFW_BUCK_UNLOCK(q->bucket);
489 }
490 
491 static int
492 resize_dynamic_table(struct ip_fw_chain *chain, int nbuckets)
493 {
494 	int i, k, nbuckets_old;
495 	ipfw_dyn_rule *q;
496 	struct ipfw_dyn_bucket *dyn_v, *dyn_v_old;
497 
498 	/* Check if given number is power of 2 and less than 64k */
499 	if ((nbuckets > 65536) || (!powerof2(nbuckets)))
500 		return 1;
501 
502 	CTR3(KTR_NET, "%s: resize dynamic hash: %d -> %d", __func__,
503 	    V_curr_dyn_buckets, nbuckets);
504 
505 	/* Allocate and initialize new hash */
506 	dyn_v = malloc(nbuckets * sizeof(ipfw_dyn_rule), M_IPFW,
507 	    M_WAITOK | M_ZERO);
508 
509 	for (i = 0 ; i < nbuckets; i++)
510 		IPFW_BUCK_LOCK_INIT(&dyn_v[i]);
511 
512 	/*
513 	 * Call upper half lock, as get_map() do to ease
514 	 * read-only access to dynamic rules hash from sysctl
515 	 */
516 	IPFW_UH_WLOCK(chain);
517 
518 	/*
519 	 * Acquire chain write lock to permit hash access
520 	 * for main traffic path without additional locks
521 	 */
522 	IPFW_WLOCK(chain);
523 
524 	/* Save old values */
525 	nbuckets_old = V_curr_dyn_buckets;
526 	dyn_v_old = V_ipfw_dyn_v;
527 
528 	/* Skip relinking if array is not set up */
529 	if (V_ipfw_dyn_v == NULL)
530 		V_curr_dyn_buckets = 0;
531 
532 	/* Re-link all dynamic states */
533 	for (i = 0 ; i < V_curr_dyn_buckets ; i++) {
534 		while (V_ipfw_dyn_v[i].head != NULL) {
535 			/* Remove from current chain */
536 			q = V_ipfw_dyn_v[i].head;
537 			V_ipfw_dyn_v[i].head = q->next;
538 
539 			/* Get new hash value */
540 			k = hash_packet(&q->id, nbuckets);
541 			q->bucket = k;
542 			/* Add to the new head */
543 			q->next = dyn_v[k].head;
544 			dyn_v[k].head = q;
545              }
546 	}
547 
548 	/* Update current pointers/buckets values */
549 	V_curr_dyn_buckets = nbuckets;
550 	V_ipfw_dyn_v = dyn_v;
551 
552 	IPFW_WUNLOCK(chain);
553 
554 	IPFW_UH_WUNLOCK(chain);
555 
556 	/* Start periodic callout on initial creation */
557 	if (dyn_v_old == NULL) {
558         	callout_reset_on(&V_ipfw_timeout, hz, ipfw_dyn_tick, curvnet, 0);
559 		return (0);
560 	}
561 
562 	/* Destroy all mutexes */
563 	for (i = 0 ; i < nbuckets_old ; i++)
564 		IPFW_BUCK_LOCK_DESTROY(&dyn_v_old[i]);
565 
566 	/* Free old hash */
567 	free(dyn_v_old, M_IPFW);
568 
569 	return 0;
570 }
571 
572 /**
573  * Install state of type 'type' for a dynamic session.
574  * The hash table contains two type of rules:
575  * - regular rules (O_KEEP_STATE)
576  * - rules for sessions with limited number of sess per user
577  *   (O_LIMIT). When they are created, the parent is
578  *   increased by 1, and decreased on delete. In this case,
579  *   the third parameter is the parent rule and not the chain.
580  * - "parent" rules for the above (O_LIMIT_PARENT).
581  */
582 static ipfw_dyn_rule *
583 add_dyn_rule(struct ipfw_flow_id *id, int i, u_int8_t dyn_type, struct ip_fw *rule)
584 {
585 	ipfw_dyn_rule *r;
586 
587 	IPFW_BUCK_ASSERT(i);
588 
589 	r = uma_zalloc(V_ipfw_dyn_rule_zone, M_NOWAIT | M_ZERO);
590 	if (r == NULL) {
591 		if (last_log != time_uptime) {
592 			last_log = time_uptime;
593 			log(LOG_DEBUG, "ipfw: %s: Cannot allocate rule\n",
594 			    __func__);
595 		}
596 		return NULL;
597 	}
598 	ipfw_dyn_count++;
599 
600 	/*
601 	 * refcount on parent is already incremented, so
602 	 * it is safe to use parent unlocked.
603 	 */
604 	if (dyn_type == O_LIMIT) {
605 		ipfw_dyn_rule *parent = (ipfw_dyn_rule *)rule;
606 		if ( parent->dyn_type != O_LIMIT_PARENT)
607 			panic("invalid parent");
608 		r->parent = parent;
609 		rule = parent->rule;
610 	}
611 
612 	r->id = *id;
613 	r->expire = time_uptime + V_dyn_syn_lifetime;
614 	r->rule = rule;
615 	r->dyn_type = dyn_type;
616 	IPFW_ZERO_DYN_COUNTER(r);
617 	r->count = 0;
618 
619 	r->bucket = i;
620 	r->next = V_ipfw_dyn_v[i].head;
621 	V_ipfw_dyn_v[i].head = r;
622 	DEB(print_dyn_rule(id, dyn_type, "add dyn entry", "total");)
623 	return r;
624 }
625 
626 /**
627  * lookup dynamic parent rule using pkt and rule as search keys.
628  * If the lookup fails, then install one.
629  */
630 static ipfw_dyn_rule *
631 lookup_dyn_parent(struct ipfw_flow_id *pkt, int *pindex, struct ip_fw *rule)
632 {
633 	ipfw_dyn_rule *q;
634 	int i, is_v6;
635 
636 	is_v6 = IS_IP6_FLOW_ID(pkt);
637 	i = hash_packet( pkt, V_curr_dyn_buckets );
638 	*pindex = i;
639 	IPFW_BUCK_LOCK(i);
640 	for (q = V_ipfw_dyn_v[i].head ; q != NULL ; q=q->next)
641 		if (q->dyn_type == O_LIMIT_PARENT &&
642 		    rule== q->rule &&
643 		    pkt->proto == q->id.proto &&
644 		    pkt->src_port == q->id.src_port &&
645 		    pkt->dst_port == q->id.dst_port &&
646 		    (
647 			(is_v6 &&
648 			 IN6_ARE_ADDR_EQUAL(&(pkt->src_ip6),
649 				&(q->id.src_ip6)) &&
650 			 IN6_ARE_ADDR_EQUAL(&(pkt->dst_ip6),
651 				&(q->id.dst_ip6))) ||
652 			(!is_v6 &&
653 			 pkt->src_ip == q->id.src_ip &&
654 			 pkt->dst_ip == q->id.dst_ip)
655 		    )
656 		) {
657 			q->expire = time_uptime + V_dyn_short_lifetime;
658 			DEB(print_dyn_rule(pkt, q->dyn_type,
659 			    "lookup_dyn_parent found", "");)
660 			return q;
661 		}
662 
663 	/* Add virtual limiting rule */
664 	return add_dyn_rule(pkt, i, O_LIMIT_PARENT, rule);
665 }
666 
667 /**
668  * Install dynamic state for rule type cmd->o.opcode
669  *
670  * Returns 1 (failure) if state is not installed because of errors or because
671  * session limitations are enforced.
672  */
673 int
674 ipfw_install_state(struct ip_fw *rule, ipfw_insn_limit *cmd,
675     struct ip_fw_args *args, uint32_t tablearg)
676 {
677 	ipfw_dyn_rule *q;
678 	int i;
679 
680 	DEB(print_dyn_rule(&args->f_id, cmd->o.opcode, "install_state", "");)
681 
682 	i = hash_packet(&args->f_id, V_curr_dyn_buckets);
683 
684 	IPFW_BUCK_LOCK(i);
685 
686 	q = lookup_dyn_rule_locked(&args->f_id, i, NULL, NULL);
687 
688 	if (q != NULL) {	/* should never occur */
689 		DEB(
690 		if (last_log != time_uptime) {
691 			last_log = time_uptime;
692 			printf("ipfw: %s: entry already present, done\n",
693 			    __func__);
694 		})
695 		IPFW_BUCK_UNLOCK(i);
696 		return (0);
697 	}
698 
699 	/*
700 	 * State limiting is done via uma(9) zone limiting.
701 	 * Save pointer to newly-installed rule and reject
702 	 * packet if add_dyn_rule() returned NULL.
703 	 * Note q is currently set to NULL.
704 	 */
705 
706 	switch (cmd->o.opcode) {
707 	case O_KEEP_STATE:	/* bidir rule */
708 		q = add_dyn_rule(&args->f_id, i, O_KEEP_STATE, rule);
709 		break;
710 
711 	case O_LIMIT: {		/* limit number of sessions */
712 		struct ipfw_flow_id id;
713 		ipfw_dyn_rule *parent;
714 		uint32_t conn_limit;
715 		uint16_t limit_mask = cmd->limit_mask;
716 		int pindex;
717 
718 		conn_limit = IP_FW_ARG_TABLEARG(cmd->conn_limit);
719 
720 		DEB(
721 		if (cmd->conn_limit == IP_FW_TABLEARG)
722 			printf("ipfw: %s: O_LIMIT rule, conn_limit: %u "
723 			    "(tablearg)\n", __func__, conn_limit);
724 		else
725 			printf("ipfw: %s: O_LIMIT rule, conn_limit: %u\n",
726 			    __func__, conn_limit);
727 		)
728 
729 		id.dst_ip = id.src_ip = id.dst_port = id.src_port = 0;
730 		id.proto = args->f_id.proto;
731 		id.addr_type = args->f_id.addr_type;
732 		id.fib = M_GETFIB(args->m);
733 
734 		if (IS_IP6_FLOW_ID (&(args->f_id))) {
735 			if (limit_mask & DYN_SRC_ADDR)
736 				id.src_ip6 = args->f_id.src_ip6;
737 			if (limit_mask & DYN_DST_ADDR)
738 				id.dst_ip6 = args->f_id.dst_ip6;
739 		} else {
740 			if (limit_mask & DYN_SRC_ADDR)
741 				id.src_ip = args->f_id.src_ip;
742 			if (limit_mask & DYN_DST_ADDR)
743 				id.dst_ip = args->f_id.dst_ip;
744 		}
745 		if (limit_mask & DYN_SRC_PORT)
746 			id.src_port = args->f_id.src_port;
747 		if (limit_mask & DYN_DST_PORT)
748 			id.dst_port = args->f_id.dst_port;
749 
750 		/*
751 		 * We have to release lock for previous bucket to
752 		 * avoid possible deadlock
753 		 */
754 		IPFW_BUCK_UNLOCK(i);
755 
756 		if ((parent = lookup_dyn_parent(&id, &pindex, rule)) == NULL) {
757 			printf("ipfw: %s: add parent failed\n", __func__);
758 			IPFW_BUCK_UNLOCK(pindex);
759 			return (1);
760 		}
761 
762 		if (parent->count >= conn_limit) {
763 			if (V_fw_verbose && last_log != time_uptime) {
764 				last_log = time_uptime;
765 				char sbuf[24];
766 				last_log = time_uptime;
767 				snprintf(sbuf, sizeof(sbuf),
768 				    "%d drop session",
769 				    parent->rule->rulenum);
770 				print_dyn_rule_flags(&args->f_id,
771 				    cmd->o.opcode,
772 				    LOG_SECURITY | LOG_DEBUG,
773 				    sbuf, "too many entries");
774 			}
775 			IPFW_BUCK_UNLOCK(pindex);
776 			return (1);
777 		}
778 		/* Increment counter on parent */
779 		parent->count++;
780 		IPFW_BUCK_UNLOCK(pindex);
781 
782 		IPFW_BUCK_LOCK(i);
783 		q = add_dyn_rule(&args->f_id, i, O_LIMIT, (struct ip_fw *)parent);
784 		if (q == NULL) {
785 			/* Decrement index and notify caller */
786 			IPFW_BUCK_UNLOCK(i);
787 			IPFW_BUCK_LOCK(pindex);
788 			parent->count--;
789 			IPFW_BUCK_UNLOCK(pindex);
790 			return (1);
791 		}
792 		break;
793 	}
794 	default:
795 		printf("ipfw: %s: unknown dynamic rule type %u\n",
796 		    __func__, cmd->o.opcode);
797 	}
798 
799 	if (q == NULL) {
800 		IPFW_BUCK_UNLOCK(i);
801 		return (1);	/* Notify caller about failure */
802 	}
803 
804 	/* XXX just set lifetime */
805 	lookup_dyn_rule_locked(&args->f_id, i, NULL, NULL);
806 
807 	IPFW_BUCK_UNLOCK(i);
808 	return (0);
809 }
810 
811 /*
812  * Generate a TCP packet, containing either a RST or a keepalive.
813  * When flags & TH_RST, we are sending a RST packet, because of a
814  * "reset" action matched the packet.
815  * Otherwise we are sending a keepalive, and flags & TH_
816  * The 'replyto' mbuf is the mbuf being replied to, if any, and is required
817  * so that MAC can label the reply appropriately.
818  */
819 struct mbuf *
820 ipfw_send_pkt(struct mbuf *replyto, struct ipfw_flow_id *id, u_int32_t seq,
821     u_int32_t ack, int flags)
822 {
823 	struct mbuf *m = NULL;		/* stupid compiler */
824 	int len, dir;
825 	struct ip *h = NULL;		/* stupid compiler */
826 #ifdef INET6
827 	struct ip6_hdr *h6 = NULL;
828 #endif
829 	struct tcphdr *th = NULL;
830 
831 	MGETHDR(m, M_NOWAIT, MT_DATA);
832 	if (m == NULL)
833 		return (NULL);
834 
835 	M_SETFIB(m, id->fib);
836 #ifdef MAC
837 	if (replyto != NULL)
838 		mac_netinet_firewall_reply(replyto, m);
839 	else
840 		mac_netinet_firewall_send(m);
841 #else
842 	(void)replyto;		/* don't warn about unused arg */
843 #endif
844 
845 	switch (id->addr_type) {
846 	case 4:
847 		len = sizeof(struct ip) + sizeof(struct tcphdr);
848 		break;
849 #ifdef INET6
850 	case 6:
851 		len = sizeof(struct ip6_hdr) + sizeof(struct tcphdr);
852 		break;
853 #endif
854 	default:
855 		/* XXX: log me?!? */
856 		FREE_PKT(m);
857 		return (NULL);
858 	}
859 	dir = ((flags & (TH_SYN | TH_RST)) == TH_SYN);
860 
861 	m->m_data += max_linkhdr;
862 	m->m_flags |= M_SKIP_FIREWALL;
863 	m->m_pkthdr.len = m->m_len = len;
864 	m->m_pkthdr.rcvif = NULL;
865 	bzero(m->m_data, len);
866 
867 	switch (id->addr_type) {
868 	case 4:
869 		h = mtod(m, struct ip *);
870 
871 		/* prepare for checksum */
872 		h->ip_p = IPPROTO_TCP;
873 		h->ip_len = htons(sizeof(struct tcphdr));
874 		if (dir) {
875 			h->ip_src.s_addr = htonl(id->src_ip);
876 			h->ip_dst.s_addr = htonl(id->dst_ip);
877 		} else {
878 			h->ip_src.s_addr = htonl(id->dst_ip);
879 			h->ip_dst.s_addr = htonl(id->src_ip);
880 		}
881 
882 		th = (struct tcphdr *)(h + 1);
883 		break;
884 #ifdef INET6
885 	case 6:
886 		h6 = mtod(m, struct ip6_hdr *);
887 
888 		/* prepare for checksum */
889 		h6->ip6_nxt = IPPROTO_TCP;
890 		h6->ip6_plen = htons(sizeof(struct tcphdr));
891 		if (dir) {
892 			h6->ip6_src = id->src_ip6;
893 			h6->ip6_dst = id->dst_ip6;
894 		} else {
895 			h6->ip6_src = id->dst_ip6;
896 			h6->ip6_dst = id->src_ip6;
897 		}
898 
899 		th = (struct tcphdr *)(h6 + 1);
900 		break;
901 #endif
902 	}
903 
904 	if (dir) {
905 		th->th_sport = htons(id->src_port);
906 		th->th_dport = htons(id->dst_port);
907 	} else {
908 		th->th_sport = htons(id->dst_port);
909 		th->th_dport = htons(id->src_port);
910 	}
911 	th->th_off = sizeof(struct tcphdr) >> 2;
912 
913 	if (flags & TH_RST) {
914 		if (flags & TH_ACK) {
915 			th->th_seq = htonl(ack);
916 			th->th_flags = TH_RST;
917 		} else {
918 			if (flags & TH_SYN)
919 				seq++;
920 			th->th_ack = htonl(seq);
921 			th->th_flags = TH_RST | TH_ACK;
922 		}
923 	} else {
924 		/*
925 		 * Keepalive - use caller provided sequence numbers
926 		 */
927 		th->th_seq = htonl(seq);
928 		th->th_ack = htonl(ack);
929 		th->th_flags = TH_ACK;
930 	}
931 
932 	switch (id->addr_type) {
933 	case 4:
934 		th->th_sum = in_cksum(m, len);
935 
936 		/* finish the ip header */
937 		h->ip_v = 4;
938 		h->ip_hl = sizeof(*h) >> 2;
939 		h->ip_tos = IPTOS_LOWDELAY;
940 		h->ip_off = htons(0);
941 		h->ip_len = htons(len);
942 		h->ip_ttl = V_ip_defttl;
943 		h->ip_sum = 0;
944 		break;
945 #ifdef INET6
946 	case 6:
947 		th->th_sum = in6_cksum(m, IPPROTO_TCP, sizeof(*h6),
948 		    sizeof(struct tcphdr));
949 
950 		/* finish the ip6 header */
951 		h6->ip6_vfc |= IPV6_VERSION;
952 		h6->ip6_hlim = IPV6_DEFHLIM;
953 		break;
954 #endif
955 	}
956 
957 	return (m);
958 }
959 
960 /*
961  * Queue keepalive packets for given dynamic rule
962  */
963 static struct mbuf **
964 ipfw_dyn_send_ka(struct mbuf **mtailp, ipfw_dyn_rule *q)
965 {
966 	struct mbuf *m_rev, *m_fwd;
967 
968 	m_rev = (q->state & ACK_REV) ? NULL :
969 	    ipfw_send_pkt(NULL, &(q->id), q->ack_rev - 1, q->ack_fwd, TH_SYN);
970 	m_fwd = (q->state & ACK_FWD) ? NULL :
971 	    ipfw_send_pkt(NULL, &(q->id), q->ack_fwd - 1, q->ack_rev, 0);
972 
973 	if (m_rev != NULL) {
974 		*mtailp = m_rev;
975 		mtailp = &(*mtailp)->m_nextpkt;
976 	}
977 	if (m_fwd != NULL) {
978 		*mtailp = m_fwd;
979 		mtailp = &(*mtailp)->m_nextpkt;
980 	}
981 
982 	return (mtailp);
983 }
984 
985 /*
986  * This procedure is used to perform various maintance
987  * on dynamic hash list. Currently it is called every second.
988  */
989 static void
990 ipfw_dyn_tick(void * vnetx)
991 {
992 	struct ip_fw_chain *chain;
993 	int check_ka = 0;
994 #ifdef VIMAGE
995 	struct vnet *vp = vnetx;
996 #endif
997 
998 	CURVNET_SET(vp);
999 
1000 	chain = &V_layer3_chain;
1001 
1002 	/* Run keepalive checks every keepalive_period iff ka is enabled */
1003 	if ((V_dyn_keepalive_last + V_dyn_keepalive_period <= time_uptime) &&
1004 	    (V_dyn_keepalive != 0)) {
1005 		V_dyn_keepalive_last = time_uptime;
1006 		check_ka = 1;
1007 	}
1008 
1009 	check_dyn_rules(chain, NULL, RESVD_SET, check_ka, 1);
1010 
1011 	callout_reset_on(&V_ipfw_timeout, hz, ipfw_dyn_tick, vnetx, 0);
1012 
1013 	CURVNET_RESTORE();
1014 }
1015 
1016 
1017 /*
1018  * Walk thru all dynamic states doing generic maintance:
1019  * 1) free expired states
1020  * 2) free all states based on deleted rule / set
1021  * 3) send keepalives for states if needed
1022  *
1023  * @chain - pointer to current ipfw rules chain
1024  * @rule - delete all states originated by given rule if != NULL
1025  * @set - delete all states originated by any rule in set @set if != RESVD_SET
1026  * @check_ka - perform checking/sending keepalives
1027  * @timer - indicate call from timer routine.
1028  *
1029  * Timer routine must call this function unlocked to permit
1030  * sending keepalives/resizing table.
1031  *
1032  * Others has to call function with IPFW_UH_WLOCK held.
1033  * Additionally, function assume that dynamic rule/set is
1034  * ALREADY deleted so no new states can be generated by
1035  * 'deleted' rules.
1036  *
1037  * Write lock is needed to ensure that unused parent rules
1038  * are not freed by other instance (see stage 2, 3)
1039  */
1040 static void
1041 check_dyn_rules(struct ip_fw_chain *chain, struct ip_fw *rule,
1042     int set, int check_ka, int timer)
1043 {
1044 	struct mbuf *m0, *m, *mnext, **mtailp;
1045 	struct ip *h;
1046 	int i, dyn_count, new_buckets = 0, max_buckets;
1047 	int expired = 0, expired_limits = 0, parents = 0, total = 0;
1048 	ipfw_dyn_rule *q, *q_prev, *q_next;
1049 	ipfw_dyn_rule *exp_head, **exptailp;
1050 	ipfw_dyn_rule *exp_lhead, **expltailp;
1051 
1052 	KASSERT(V_ipfw_dyn_v != NULL, ("%s: dynamic table not allocated",
1053 	    __func__));
1054 
1055 	/* Avoid possible LOR */
1056 	KASSERT(!check_ka || timer, ("%s: keepalive check with lock held",
1057 	    __func__));
1058 
1059 	/*
1060 	 * Do not perform any checks if we currently have no dynamic states
1061 	 */
1062 	if (DYN_COUNT == 0)
1063 		return;
1064 
1065 	/* Expired states */
1066 	exp_head = NULL;
1067 	exptailp = &exp_head;
1068 
1069 	/* Expired limit states */
1070 	exp_lhead = NULL;
1071 	expltailp = &exp_lhead;
1072 
1073 	/*
1074 	 * We make a chain of packets to go out here -- not deferring
1075 	 * until after we drop the IPFW dynamic rule lock would result
1076 	 * in a lock order reversal with the normal packet input -> ipfw
1077 	 * call stack.
1078 	 */
1079 	m0 = NULL;
1080 	mtailp = &m0;
1081 
1082 	/* Protect from hash resizing */
1083 	if (timer != 0)
1084 		IPFW_UH_WLOCK(chain);
1085 	else
1086 		IPFW_UH_WLOCK_ASSERT(chain);
1087 
1088 #define	NEXT_RULE()	{ q_prev = q; q = q->next ; continue; }
1089 
1090 	/* Stage 1: perform requested deletion */
1091 	for (i = 0 ; i < V_curr_dyn_buckets ; i++) {
1092 		IPFW_BUCK_LOCK(i);
1093 		for (q = V_ipfw_dyn_v[i].head, q_prev = q; q ; ) {
1094 			/* account every rule */
1095 			total++;
1096 
1097 			/* Skip parent rules at all */
1098 			if (q->dyn_type == O_LIMIT_PARENT) {
1099 				parents++;
1100 				NEXT_RULE();
1101 			}
1102 
1103 			/*
1104 			 * Remove rules which are:
1105 			 * 1) expired
1106 			 * 2) created by given rule
1107 			 * 3) created by any rule in given set
1108 			 */
1109 			if ((TIME_LEQ(q->expire, time_uptime)) ||
1110 			    ((rule != NULL) && (q->rule == rule)) ||
1111 			    ((set != RESVD_SET) && (q->rule->set == set))) {
1112 				if (TIME_LE(time_uptime, q->expire) &&
1113 				    q->dyn_type == O_KEEP_STATE &&
1114 				    V_dyn_keep_states != 0) {
1115 					/*
1116 					 * Do not delete state if
1117 					 * it is not expired and
1118 					 * dyn_keep_states is ON.
1119 					 * However we need to re-link it
1120 					 * to any other stable rule
1121 					 */
1122 					q->rule = chain->default_rule;
1123 					NEXT_RULE();
1124 				}
1125 
1126 				/* Unlink q from current list */
1127 				q_next = q->next;
1128 				if (q == V_ipfw_dyn_v[i].head)
1129 					V_ipfw_dyn_v[i].head = q_next;
1130 				else
1131 					q_prev->next = q_next;
1132 
1133 				q->next = NULL;
1134 
1135 				/* queue q to expire list */
1136 				if (q->dyn_type != O_LIMIT) {
1137 					*exptailp = q;
1138 					exptailp = &(*exptailp)->next;
1139 					DEB(print_dyn_rule(&q->id, q->dyn_type,
1140 					    "unlink entry", "left");
1141 					)
1142 				} else {
1143 					/* Separate list for limit rules */
1144 					*expltailp = q;
1145 					expltailp = &(*expltailp)->next;
1146 					expired_limits++;
1147 					DEB(print_dyn_rule(&q->id, q->dyn_type,
1148 					    "unlink limit entry", "left");
1149 					)
1150 				}
1151 
1152 				q = q_next;
1153 				expired++;
1154 				continue;
1155 			}
1156 
1157 			/*
1158 			 * Check if we need to send keepalive:
1159 			 * we need to ensure if is time to do KA,
1160 			 * this is established TCP session, and
1161 			 * expire time is within keepalive interval
1162 			 */
1163 			if ((check_ka != 0) && (q->id.proto == IPPROTO_TCP) &&
1164 			    ((q->state & BOTH_SYN) == BOTH_SYN) &&
1165 			    (TIME_LEQ(q->expire, time_uptime +
1166 			      V_dyn_keepalive_interval)))
1167 				mtailp = ipfw_dyn_send_ka(mtailp, q);
1168 
1169 			NEXT_RULE();
1170 		}
1171 		IPFW_BUCK_UNLOCK(i);
1172 	}
1173 
1174 	/* Stage 2: decrement counters from O_LIMIT parents */
1175 	if (expired_limits != 0) {
1176 		/*
1177 		 * XXX: Note that deleting set with more than one
1178 		 * heavily-used LIMIT rules can result in overwhelming
1179 		 * locking due to lack of per-hash value sorting
1180 		 *
1181 		 * We should probably think about:
1182 		 * 1) pre-allocating hash of size, say,
1183 		 * MAX(16, V_curr_dyn_buckets / 1024)
1184 		 * 2) checking if expired_limits is large enough
1185 		 * 3) If yes, init hash (or its part), re-link
1186 		 * current list and start decrementing procedure in
1187 		 * each bucket separately
1188 		 */
1189 
1190 		/*
1191 		 * Small optimization: do not unlock bucket until
1192 		 * we see the next item resides in different bucket
1193 		 */
1194 		if (exp_lhead != NULL) {
1195 			i = exp_lhead->parent->bucket;
1196 			IPFW_BUCK_LOCK(i);
1197 		}
1198 		for (q = exp_lhead; q != NULL; q = q->next) {
1199 			if (i != q->parent->bucket) {
1200 				IPFW_BUCK_UNLOCK(i);
1201 				i = q->parent->bucket;
1202 				IPFW_BUCK_LOCK(i);
1203 			}
1204 
1205 			/* Decrease parent refcount */
1206 			q->parent->count--;
1207 		}
1208 		if (exp_lhead != NULL)
1209 			IPFW_BUCK_UNLOCK(i);
1210 	}
1211 
1212 	/*
1213 	 * We protectet ourselves from unused parent deletion
1214 	 * (from the timer function) by holding UH write lock.
1215 	 */
1216 
1217 	/* Stage 3: remove unused parent rules */
1218 	if ((parents != 0) && (expired != 0)) {
1219 		for (i = 0 ; i < V_curr_dyn_buckets ; i++) {
1220 			IPFW_BUCK_LOCK(i);
1221 			for (q = V_ipfw_dyn_v[i].head, q_prev = q ; q ; ) {
1222 				if (q->dyn_type != O_LIMIT_PARENT)
1223 					NEXT_RULE();
1224 
1225 				if (q->count != 0)
1226 					NEXT_RULE();
1227 
1228 				/* Parent rule without consumers */
1229 
1230 				/* Unlink q from current list */
1231 				q_next = q->next;
1232 				if (q == V_ipfw_dyn_v[i].head)
1233 					V_ipfw_dyn_v[i].head = q_next;
1234 				else
1235 					q_prev->next = q_next;
1236 
1237 				q->next = NULL;
1238 
1239 				/* Add to expired list */
1240 				*exptailp = q;
1241 				exptailp = &(*exptailp)->next;
1242 
1243 				DEB(print_dyn_rule(&q->id, q->dyn_type,
1244 				    "unlink parent entry", "left");
1245 				)
1246 
1247 				expired++;
1248 
1249 				q = q_next;
1250 			}
1251 			IPFW_BUCK_UNLOCK(i);
1252 		}
1253 	}
1254 
1255 #undef NEXT_RULE
1256 
1257 	if (timer != 0) {
1258 		/*
1259 		 * Check if we need to resize hash:
1260 		 * if current number of states exceeds number of buckes in hash,
1261 		 * grow hash size to the minimum power of 2 which is bigger than
1262 		 * current states count. Limit hash size by 64k.
1263 		 */
1264 		max_buckets = (V_dyn_buckets_max > 65536) ?
1265 		    65536 : V_dyn_buckets_max;
1266 
1267 		dyn_count = DYN_COUNT;
1268 
1269 		if ((dyn_count > V_curr_dyn_buckets * 2) &&
1270 		    (dyn_count < max_buckets)) {
1271 			new_buckets = V_curr_dyn_buckets;
1272 			while (new_buckets < dyn_count) {
1273 				new_buckets *= 2;
1274 
1275 				if (new_buckets >= max_buckets)
1276 					break;
1277 			}
1278 		}
1279 
1280 		IPFW_UH_WUNLOCK(chain);
1281 	}
1282 
1283 	/* Finally delete old states ad limits if any */
1284 	for (q = exp_head; q != NULL; q = q_next) {
1285 		q_next = q->next;
1286 		uma_zfree(V_ipfw_dyn_rule_zone, q);
1287 		ipfw_dyn_count--;
1288 	}
1289 
1290 	for (q = exp_lhead; q != NULL; q = q_next) {
1291 		q_next = q->next;
1292 		uma_zfree(V_ipfw_dyn_rule_zone, q);
1293 		ipfw_dyn_count--;
1294 	}
1295 
1296 	/*
1297 	 * The rest code MUST be called from timer routine only
1298 	 * without holding any locks
1299 	 */
1300 	if (timer == 0)
1301 		return;
1302 
1303 	/* Send keepalive packets if any */
1304 	for (m = m0; m != NULL; m = mnext) {
1305 		mnext = m->m_nextpkt;
1306 		m->m_nextpkt = NULL;
1307 		h = mtod(m, struct ip *);
1308 		if (h->ip_v == 4)
1309 			ip_output(m, NULL, NULL, 0, NULL, NULL);
1310 #ifdef INET6
1311 		else
1312 			ip6_output(m, NULL, NULL, 0, NULL, NULL, NULL);
1313 #endif
1314 	}
1315 
1316 	/* Run table resize without holding any locks */
1317 	if (new_buckets != 0)
1318 		resize_dynamic_table(chain, new_buckets);
1319 }
1320 
1321 /*
1322  * Deletes all dynamic rules originated by given rule or all rules in
1323  * given set. Specify RESVD_SET to indicate set should not be used.
1324  * @chain - pointer to current ipfw rules chain
1325  * @rule - delete all states originated by given rule if != NULL
1326  * @set - delete all states originated by any rule in set @set if != RESVD_SET
1327  *
1328  * Function has to be called with IPFW_UH_WLOCK held.
1329  * Additionally, function assume that dynamic rule/set is
1330  * ALREADY deleted so no new states can be generated by
1331  * 'deleted' rules.
1332  */
1333 void
1334 ipfw_expire_dyn_rules(struct ip_fw_chain *chain, struct ip_fw *rule, int set)
1335 {
1336 
1337 	check_dyn_rules(chain, rule, set, 0, 0);
1338 }
1339 
1340 void
1341 ipfw_dyn_init(struct ip_fw_chain *chain)
1342 {
1343 
1344         V_ipfw_dyn_v = NULL;
1345         V_dyn_buckets_max = 256; /* must be power of 2 */
1346         V_curr_dyn_buckets = 256; /* must be power of 2 */
1347 
1348         V_dyn_ack_lifetime = 300;
1349         V_dyn_syn_lifetime = 20;
1350         V_dyn_fin_lifetime = 1;
1351         V_dyn_rst_lifetime = 1;
1352         V_dyn_udp_lifetime = 10;
1353         V_dyn_short_lifetime = 5;
1354 
1355         V_dyn_keepalive_interval = 20;
1356         V_dyn_keepalive_period = 5;
1357         V_dyn_keepalive = 1;    /* do send keepalives */
1358 	V_dyn_keepalive_last = time_uptime;
1359 
1360         V_dyn_max = 4096;       /* max # of dynamic rules */
1361 
1362 	V_ipfw_dyn_rule_zone = uma_zcreate("IPFW dynamic rule",
1363 	    sizeof(ipfw_dyn_rule), NULL, NULL, NULL, NULL,
1364 	    UMA_ALIGN_PTR, 0);
1365 
1366 	/* Enforce limit on dynamic rules */
1367 	uma_zone_set_max(V_ipfw_dyn_rule_zone, V_dyn_max);
1368 
1369         callout_init(&V_ipfw_timeout, CALLOUT_MPSAFE);
1370 
1371 	/*
1372 	 * This can potentially be done on first dynamic rule
1373 	 * being added to chain.
1374 	 */
1375 	resize_dynamic_table(chain, V_curr_dyn_buckets);
1376 }
1377 
1378 void
1379 ipfw_dyn_uninit(int pass)
1380 {
1381 	int i;
1382 
1383 	if (pass == 0) {
1384 		callout_drain(&V_ipfw_timeout);
1385 		return;
1386 	}
1387 
1388 	if (V_ipfw_dyn_v != NULL) {
1389 		/*
1390 		 * Skip deleting all dynamic states -
1391 		 * uma_zdestroy() does this more efficiently;
1392 		 */
1393 
1394 		/* Destroy all mutexes */
1395 		for (i = 0 ; i < V_curr_dyn_buckets ; i++)
1396 			IPFW_BUCK_LOCK_DESTROY(&V_ipfw_dyn_v[i]);
1397 		free(V_ipfw_dyn_v, M_IPFW);
1398 		V_ipfw_dyn_v = NULL;
1399 	}
1400 
1401         uma_zdestroy(V_ipfw_dyn_rule_zone);
1402 }
1403 
1404 #ifdef SYSCTL_NODE
1405 /*
1406  * Get/set maximum number of dynamic states in given VNET instance.
1407  */
1408 static int
1409 sysctl_ipfw_dyn_max(SYSCTL_HANDLER_ARGS)
1410 {
1411 	int error;
1412 	unsigned int nstates;
1413 
1414 	nstates = V_dyn_max;
1415 
1416 	error = sysctl_handle_int(oidp, &nstates, 0, req);
1417 	/* Read operation or some error */
1418 	if ((error != 0) || (req->newptr == NULL))
1419 		return (error);
1420 
1421 	V_dyn_max = nstates;
1422 	uma_zone_set_max(V_ipfw_dyn_rule_zone, V_dyn_max);
1423 
1424 	return (0);
1425 }
1426 
1427 /*
1428  * Get current number of dynamic states in given VNET instance.
1429  */
1430 static int
1431 sysctl_ipfw_dyn_count(SYSCTL_HANDLER_ARGS)
1432 {
1433 	int error;
1434 	unsigned int nstates;
1435 
1436 	nstates = DYN_COUNT;
1437 
1438 	error = sysctl_handle_int(oidp, &nstates, 0, req);
1439 
1440 	return (error);
1441 }
1442 #endif
1443 
1444 /*
1445  * Returns number of dynamic rules.
1446  */
1447 int
1448 ipfw_dyn_len(void)
1449 {
1450 
1451 	return (V_ipfw_dyn_v == NULL) ? 0 :
1452 		(DYN_COUNT * sizeof(ipfw_dyn_rule));
1453 }
1454 
1455 /*
1456  * Fill given buffer with dynamic states.
1457  * IPFW_UH_RLOCK has to be held while calling.
1458  */
1459 void
1460 ipfw_get_dynamic(struct ip_fw_chain *chain, char **pbp, const char *ep)
1461 {
1462 	ipfw_dyn_rule *p, *last = NULL;
1463 	char *bp;
1464 	int i;
1465 
1466 	if (V_ipfw_dyn_v == NULL)
1467 		return;
1468 	bp = *pbp;
1469 
1470 	IPFW_UH_RLOCK_ASSERT(chain);
1471 
1472 	for (i = 0 ; i < V_curr_dyn_buckets; i++) {
1473 		IPFW_BUCK_LOCK(i);
1474 		for (p = V_ipfw_dyn_v[i].head ; p != NULL; p = p->next) {
1475 			if (bp + sizeof *p <= ep) {
1476 				ipfw_dyn_rule *dst =
1477 					(ipfw_dyn_rule *)bp;
1478 				bcopy(p, dst, sizeof *p);
1479 				bcopy(&(p->rule->rulenum), &(dst->rule),
1480 				    sizeof(p->rule->rulenum));
1481 				/*
1482 				 * store set number into high word of
1483 				 * dst->rule pointer.
1484 				 */
1485 				bcopy(&(p->rule->set),
1486 				    (char *)&dst->rule +
1487 				    sizeof(p->rule->rulenum),
1488 				    sizeof(p->rule->set));
1489 				/*
1490 				 * store a non-null value in "next".
1491 				 * The userland code will interpret a
1492 				 * NULL here as a marker
1493 				 * for the last dynamic rule.
1494 				 */
1495 				bcopy(&dst, &dst->next, sizeof(dst));
1496 				last = dst;
1497 				dst->expire =
1498 				    TIME_LEQ(dst->expire, time_uptime) ?
1499 					0 : dst->expire - time_uptime ;
1500 				bp += sizeof(ipfw_dyn_rule);
1501 			}
1502 		}
1503 		IPFW_BUCK_UNLOCK(i);
1504 	}
1505 
1506 	if (last != NULL) /* mark last dynamic rule */
1507 		bzero(&last->next, sizeof(last));
1508 	*pbp = bp;
1509 }
1510 /* end of file */
1511