xref: /freebsd/sys/netpfil/ipfw/ip_fw_dynamic.c (revision a18eacbefdfa1085ca3db829e86ece78cd416493)
1 /*-
2  * Copyright (c) 2002 Luigi Rizzo, Universita` di Pisa
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23  * SUCH DAMAGE.
24  */
25 
26 #include <sys/cdefs.h>
27 __FBSDID("$FreeBSD$");
28 
29 #define        DEB(x)
30 #define        DDB(x) x
31 
32 /*
33  * Dynamic rule support for ipfw
34  */
35 
36 #include "opt_ipfw.h"
37 #include "opt_inet.h"
38 #ifndef INET
39 #error IPFIREWALL requires INET.
40 #endif /* INET */
41 #include "opt_inet6.h"
42 
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/malloc.h>
46 #include <sys/mbuf.h>
47 #include <sys/kernel.h>
48 #include <sys/ktr.h>
49 #include <sys/lock.h>
50 #include <sys/socket.h>
51 #include <sys/sysctl.h>
52 #include <sys/syslog.h>
53 #include <net/ethernet.h> /* for ETHERTYPE_IP */
54 #include <net/if.h>
55 #include <net/if_var.h>
56 #include <net/vnet.h>
57 
58 #include <netinet/in.h>
59 #include <netinet/ip.h>
60 #include <netinet/ip_var.h>	/* ip_defttl */
61 #include <netinet/ip_fw.h>
62 #include <netinet/tcp_var.h>
63 #include <netinet/udp.h>
64 
65 #include <netinet/ip6.h>	/* IN6_ARE_ADDR_EQUAL */
66 #ifdef INET6
67 #include <netinet6/in6_var.h>
68 #include <netinet6/ip6_var.h>
69 #endif
70 
71 #include <netpfil/ipfw/ip_fw_private.h>
72 
73 #include <machine/in_cksum.h>	/* XXX for in_cksum */
74 
75 #ifdef MAC
76 #include <security/mac/mac_framework.h>
77 #endif
78 
79 /*
80  * Description of dynamic rules.
81  *
82  * Dynamic rules are stored in lists accessed through a hash table
83  * (ipfw_dyn_v) whose size is curr_dyn_buckets. This value can
84  * be modified through the sysctl variable dyn_buckets which is
85  * updated when the table becomes empty.
86  *
87  * XXX currently there is only one list, ipfw_dyn.
88  *
89  * When a packet is received, its address fields are first masked
90  * with the mask defined for the rule, then hashed, then matched
91  * against the entries in the corresponding list.
92  * Dynamic rules can be used for different purposes:
93  *  + stateful rules;
94  *  + enforcing limits on the number of sessions;
95  *  + in-kernel NAT (not implemented yet)
96  *
97  * The lifetime of dynamic rules is regulated by dyn_*_lifetime,
98  * measured in seconds and depending on the flags.
99  *
100  * The total number of dynamic rules is equal to UMA zone items count.
101  * The max number of dynamic rules is dyn_max. When we reach
102  * the maximum number of rules we do not create anymore. This is
103  * done to avoid consuming too much memory, but also too much
104  * time when searching on each packet (ideally, we should try instead
105  * to put a limit on the length of the list on each bucket...).
106  *
107  * Each dynamic rule holds a pointer to the parent ipfw rule so
108  * we know what action to perform. Dynamic rules are removed when
109  * the parent rule is deleted. XXX we should make them survive.
110  *
111  * There are some limitations with dynamic rules -- we do not
112  * obey the 'randomized match', and we do not do multiple
113  * passes through the firewall. XXX check the latter!!!
114  */
115 
116 struct ipfw_dyn_bucket {
117 	struct mtx	mtx;		/* Bucket protecting lock */
118 	ipfw_dyn_rule	*head;		/* Pointer to first rule */
119 };
120 
121 /*
122  * Static variables followed by global ones
123  */
124 static VNET_DEFINE(struct ipfw_dyn_bucket *, ipfw_dyn_v);
125 static VNET_DEFINE(u_int32_t, dyn_buckets_max);
126 static VNET_DEFINE(u_int32_t, curr_dyn_buckets);
127 static VNET_DEFINE(struct callout, ipfw_timeout);
128 #define	V_ipfw_dyn_v			VNET(ipfw_dyn_v)
129 #define	V_dyn_buckets_max		VNET(dyn_buckets_max)
130 #define	V_curr_dyn_buckets		VNET(curr_dyn_buckets)
131 #define V_ipfw_timeout                  VNET(ipfw_timeout)
132 
133 static VNET_DEFINE(uma_zone_t, ipfw_dyn_rule_zone);
134 #define	V_ipfw_dyn_rule_zone		VNET(ipfw_dyn_rule_zone)
135 
136 #define	IPFW_BUCK_LOCK_INIT(b)	\
137 	mtx_init(&(b)->mtx, "IPFW dynamic bucket", NULL, MTX_DEF)
138 #define	IPFW_BUCK_LOCK_DESTROY(b)	\
139 	mtx_destroy(&(b)->mtx)
140 #define	IPFW_BUCK_LOCK(i)	mtx_lock(&V_ipfw_dyn_v[(i)].mtx)
141 #define	IPFW_BUCK_UNLOCK(i)	mtx_unlock(&V_ipfw_dyn_v[(i)].mtx)
142 #define	IPFW_BUCK_ASSERT(i)	mtx_assert(&V_ipfw_dyn_v[(i)].mtx, MA_OWNED)
143 
144 /*
145  * Timeouts for various events in handing dynamic rules.
146  */
147 static VNET_DEFINE(u_int32_t, dyn_ack_lifetime);
148 static VNET_DEFINE(u_int32_t, dyn_syn_lifetime);
149 static VNET_DEFINE(u_int32_t, dyn_fin_lifetime);
150 static VNET_DEFINE(u_int32_t, dyn_rst_lifetime);
151 static VNET_DEFINE(u_int32_t, dyn_udp_lifetime);
152 static VNET_DEFINE(u_int32_t, dyn_short_lifetime);
153 
154 #define	V_dyn_ack_lifetime		VNET(dyn_ack_lifetime)
155 #define	V_dyn_syn_lifetime		VNET(dyn_syn_lifetime)
156 #define	V_dyn_fin_lifetime		VNET(dyn_fin_lifetime)
157 #define	V_dyn_rst_lifetime		VNET(dyn_rst_lifetime)
158 #define	V_dyn_udp_lifetime		VNET(dyn_udp_lifetime)
159 #define	V_dyn_short_lifetime		VNET(dyn_short_lifetime)
160 
161 /*
162  * Keepalives are sent if dyn_keepalive is set. They are sent every
163  * dyn_keepalive_period seconds, in the last dyn_keepalive_interval
164  * seconds of lifetime of a rule.
165  * dyn_rst_lifetime and dyn_fin_lifetime should be strictly lower
166  * than dyn_keepalive_period.
167  */
168 
169 static VNET_DEFINE(u_int32_t, dyn_keepalive_interval);
170 static VNET_DEFINE(u_int32_t, dyn_keepalive_period);
171 static VNET_DEFINE(u_int32_t, dyn_keepalive);
172 static VNET_DEFINE(time_t, dyn_keepalive_last);
173 
174 #define	V_dyn_keepalive_interval	VNET(dyn_keepalive_interval)
175 #define	V_dyn_keepalive_period		VNET(dyn_keepalive_period)
176 #define	V_dyn_keepalive			VNET(dyn_keepalive)
177 #define	V_dyn_keepalive_last		VNET(dyn_keepalive_last)
178 
179 static VNET_DEFINE(u_int32_t, dyn_max);		/* max # of dynamic rules */
180 
181 #define	DYN_COUNT			uma_zone_get_cur(V_ipfw_dyn_rule_zone)
182 #define	V_dyn_max			VNET(dyn_max)
183 
184 static int last_log;	/* Log ratelimiting */
185 
186 static void ipfw_dyn_tick(void *vnetx);
187 static void check_dyn_rules(struct ip_fw_chain *, struct ip_fw *,
188     int, int, int);
189 #ifdef SYSCTL_NODE
190 
191 static int sysctl_ipfw_dyn_count(SYSCTL_HANDLER_ARGS);
192 static int sysctl_ipfw_dyn_max(SYSCTL_HANDLER_ARGS);
193 
194 SYSBEGIN(f2)
195 
196 SYSCTL_DECL(_net_inet_ip_fw);
197 SYSCTL_VNET_UINT(_net_inet_ip_fw, OID_AUTO, dyn_buckets,
198     CTLFLAG_RW, &VNET_NAME(dyn_buckets_max), 0,
199     "Max number of dyn. buckets");
200 SYSCTL_VNET_UINT(_net_inet_ip_fw, OID_AUTO, curr_dyn_buckets,
201     CTLFLAG_RD, &VNET_NAME(curr_dyn_buckets), 0,
202     "Current Number of dyn. buckets");
203 SYSCTL_VNET_PROC(_net_inet_ip_fw, OID_AUTO, dyn_count,
204     CTLTYPE_UINT|CTLFLAG_RD, 0, 0, sysctl_ipfw_dyn_count, "IU",
205     "Number of dyn. rules");
206 SYSCTL_VNET_PROC(_net_inet_ip_fw, OID_AUTO, dyn_max,
207     CTLTYPE_UINT|CTLFLAG_RW, 0, 0, sysctl_ipfw_dyn_max, "IU",
208     "Max number of dyn. rules");
209 SYSCTL_VNET_UINT(_net_inet_ip_fw, OID_AUTO, dyn_ack_lifetime,
210     CTLFLAG_RW, &VNET_NAME(dyn_ack_lifetime), 0,
211     "Lifetime of dyn. rules for acks");
212 SYSCTL_VNET_UINT(_net_inet_ip_fw, OID_AUTO, dyn_syn_lifetime,
213     CTLFLAG_RW, &VNET_NAME(dyn_syn_lifetime), 0,
214     "Lifetime of dyn. rules for syn");
215 SYSCTL_VNET_UINT(_net_inet_ip_fw, OID_AUTO, dyn_fin_lifetime,
216     CTLFLAG_RW, &VNET_NAME(dyn_fin_lifetime), 0,
217     "Lifetime of dyn. rules for fin");
218 SYSCTL_VNET_UINT(_net_inet_ip_fw, OID_AUTO, dyn_rst_lifetime,
219     CTLFLAG_RW, &VNET_NAME(dyn_rst_lifetime), 0,
220     "Lifetime of dyn. rules for rst");
221 SYSCTL_VNET_UINT(_net_inet_ip_fw, OID_AUTO, dyn_udp_lifetime,
222     CTLFLAG_RW, &VNET_NAME(dyn_udp_lifetime), 0,
223     "Lifetime of dyn. rules for UDP");
224 SYSCTL_VNET_UINT(_net_inet_ip_fw, OID_AUTO, dyn_short_lifetime,
225     CTLFLAG_RW, &VNET_NAME(dyn_short_lifetime), 0,
226     "Lifetime of dyn. rules for other situations");
227 SYSCTL_VNET_UINT(_net_inet_ip_fw, OID_AUTO, dyn_keepalive,
228     CTLFLAG_RW, &VNET_NAME(dyn_keepalive), 0,
229     "Enable keepalives for dyn. rules");
230 
231 SYSEND
232 
233 #endif /* SYSCTL_NODE */
234 
235 
236 static __inline int
237 hash_packet6(struct ipfw_flow_id *id)
238 {
239 	u_int32_t i;
240 	i = (id->dst_ip6.__u6_addr.__u6_addr32[2]) ^
241 	    (id->dst_ip6.__u6_addr.__u6_addr32[3]) ^
242 	    (id->src_ip6.__u6_addr.__u6_addr32[2]) ^
243 	    (id->src_ip6.__u6_addr.__u6_addr32[3]) ^
244 	    (id->dst_port) ^ (id->src_port);
245 	return i;
246 }
247 
248 /*
249  * IMPORTANT: the hash function for dynamic rules must be commutative
250  * in source and destination (ip,port), because rules are bidirectional
251  * and we want to find both in the same bucket.
252  */
253 static __inline int
254 hash_packet(struct ipfw_flow_id *id, int buckets)
255 {
256 	u_int32_t i;
257 
258 #ifdef INET6
259 	if (IS_IP6_FLOW_ID(id))
260 		i = hash_packet6(id);
261 	else
262 #endif /* INET6 */
263 	i = (id->dst_ip) ^ (id->src_ip) ^ (id->dst_port) ^ (id->src_port);
264 	i &= (buckets - 1);
265 	return i;
266 }
267 
268 /**
269  * Print customizable flow id description via log(9) facility.
270  */
271 static void
272 print_dyn_rule_flags(struct ipfw_flow_id *id, int dyn_type, int log_flags,
273     char *prefix, char *postfix)
274 {
275 	struct in_addr da;
276 #ifdef INET6
277 	char src[INET6_ADDRSTRLEN], dst[INET6_ADDRSTRLEN];
278 #else
279 	char src[INET_ADDRSTRLEN], dst[INET_ADDRSTRLEN];
280 #endif
281 
282 #ifdef INET6
283 	if (IS_IP6_FLOW_ID(id)) {
284 		ip6_sprintf(src, &id->src_ip6);
285 		ip6_sprintf(dst, &id->dst_ip6);
286 	} else
287 #endif
288 	{
289 		da.s_addr = htonl(id->src_ip);
290 		inet_ntop(AF_INET, &da, src, sizeof(src));
291 		da.s_addr = htonl(id->dst_ip);
292 		inet_ntop(AF_INET, &da, dst, sizeof(dst));
293 	}
294 	log(log_flags, "ipfw: %s type %d %s %d -> %s %d, %d %s\n",
295 	    prefix, dyn_type, src, id->src_port, dst,
296 	    id->dst_port, DYN_COUNT, postfix);
297 }
298 
299 #define	print_dyn_rule(id, dtype, prefix, postfix)	\
300 	print_dyn_rule_flags(id, dtype, LOG_DEBUG, prefix, postfix)
301 
302 #define TIME_LEQ(a,b)       ((int)((a)-(b)) <= 0)
303 
304 /*
305  * Lookup a dynamic rule, locked version.
306  */
307 static ipfw_dyn_rule *
308 lookup_dyn_rule_locked(struct ipfw_flow_id *pkt, int i, int *match_direction,
309     struct tcphdr *tcp)
310 {
311 	/*
312 	 * Stateful ipfw extensions.
313 	 * Lookup into dynamic session queue.
314 	 */
315 #define MATCH_REVERSE	0
316 #define MATCH_FORWARD	1
317 #define MATCH_NONE	2
318 #define MATCH_UNKNOWN	3
319 	int dir = MATCH_NONE;
320 	ipfw_dyn_rule *prev, *q = NULL;
321 
322 	IPFW_BUCK_ASSERT(i);
323 
324 	for (prev = NULL, q = V_ipfw_dyn_v[i].head; q; prev = q, q = q->next) {
325 		if (q->dyn_type == O_LIMIT_PARENT && q->count)
326 			continue;
327 
328 		if (pkt->proto != q->id.proto || q->dyn_type == O_LIMIT_PARENT)
329 			continue;
330 
331 		if (IS_IP6_FLOW_ID(pkt)) {
332 			if (IN6_ARE_ADDR_EQUAL(&pkt->src_ip6, &q->id.src_ip6) &&
333 			    IN6_ARE_ADDR_EQUAL(&pkt->dst_ip6, &q->id.dst_ip6) &&
334 			    pkt->src_port == q->id.src_port &&
335 			    pkt->dst_port == q->id.dst_port) {
336 				dir = MATCH_FORWARD;
337 				break;
338 			}
339 			if (IN6_ARE_ADDR_EQUAL(&pkt->src_ip6, &q->id.dst_ip6) &&
340 			    IN6_ARE_ADDR_EQUAL(&pkt->dst_ip6, &q->id.src_ip6) &&
341 			    pkt->src_port == q->id.dst_port &&
342 			    pkt->dst_port == q->id.src_port) {
343 				dir = MATCH_REVERSE;
344 				break;
345 			}
346 		} else {
347 			if (pkt->src_ip == q->id.src_ip &&
348 			    pkt->dst_ip == q->id.dst_ip &&
349 			    pkt->src_port == q->id.src_port &&
350 			    pkt->dst_port == q->id.dst_port) {
351 				dir = MATCH_FORWARD;
352 				break;
353 			}
354 			if (pkt->src_ip == q->id.dst_ip &&
355 			    pkt->dst_ip == q->id.src_ip &&
356 			    pkt->src_port == q->id.dst_port &&
357 			    pkt->dst_port == q->id.src_port) {
358 				dir = MATCH_REVERSE;
359 				break;
360 			}
361 		}
362 	}
363 	if (q == NULL)
364 		goto done;	/* q = NULL, not found */
365 
366 	if (prev != NULL) {	/* found and not in front */
367 		prev->next = q->next;
368 		q->next = V_ipfw_dyn_v[i].head;
369 		V_ipfw_dyn_v[i].head = q;
370 	}
371 	if (pkt->proto == IPPROTO_TCP) { /* update state according to flags */
372 		uint32_t ack;
373 		u_char flags = pkt->_flags & (TH_FIN | TH_SYN | TH_RST);
374 
375 #define BOTH_SYN	(TH_SYN | (TH_SYN << 8))
376 #define BOTH_FIN	(TH_FIN | (TH_FIN << 8))
377 #define	TCP_FLAGS	(TH_FLAGS | (TH_FLAGS << 8))
378 #define	ACK_FWD		0x10000			/* fwd ack seen */
379 #define	ACK_REV		0x20000			/* rev ack seen */
380 
381 		q->state |= (dir == MATCH_FORWARD) ? flags : (flags << 8);
382 		switch (q->state & TCP_FLAGS) {
383 		case TH_SYN:			/* opening */
384 			q->expire = time_uptime + V_dyn_syn_lifetime;
385 			break;
386 
387 		case BOTH_SYN:			/* move to established */
388 		case BOTH_SYN | TH_FIN:		/* one side tries to close */
389 		case BOTH_SYN | (TH_FIN << 8):
390 #define _SEQ_GE(a,b) ((int)(a) - (int)(b) >= 0)
391 			if (tcp == NULL)
392 				break;
393 
394 			ack = ntohl(tcp->th_ack);
395 			if (dir == MATCH_FORWARD) {
396 				if (q->ack_fwd == 0 ||
397 				    _SEQ_GE(ack, q->ack_fwd)) {
398 					q->ack_fwd = ack;
399 					q->state |= ACK_FWD;
400 				}
401 			} else {
402 				if (q->ack_rev == 0 ||
403 				    _SEQ_GE(ack, q->ack_rev)) {
404 					q->ack_rev = ack;
405 					q->state |= ACK_REV;
406 				}
407 			}
408 			if ((q->state & (ACK_FWD | ACK_REV)) ==
409 			    (ACK_FWD | ACK_REV)) {
410 				q->expire = time_uptime + V_dyn_ack_lifetime;
411 				q->state &= ~(ACK_FWD | ACK_REV);
412 			}
413 			break;
414 
415 		case BOTH_SYN | BOTH_FIN:	/* both sides closed */
416 			if (V_dyn_fin_lifetime >= V_dyn_keepalive_period)
417 				V_dyn_fin_lifetime = V_dyn_keepalive_period - 1;
418 			q->expire = time_uptime + V_dyn_fin_lifetime;
419 			break;
420 
421 		default:
422 #if 0
423 			/*
424 			 * reset or some invalid combination, but can also
425 			 * occur if we use keep-state the wrong way.
426 			 */
427 			if ( (q->state & ((TH_RST << 8)|TH_RST)) == 0)
428 				printf("invalid state: 0x%x\n", q->state);
429 #endif
430 			if (V_dyn_rst_lifetime >= V_dyn_keepalive_period)
431 				V_dyn_rst_lifetime = V_dyn_keepalive_period - 1;
432 			q->expire = time_uptime + V_dyn_rst_lifetime;
433 			break;
434 		}
435 	} else if (pkt->proto == IPPROTO_UDP) {
436 		q->expire = time_uptime + V_dyn_udp_lifetime;
437 	} else {
438 		/* other protocols */
439 		q->expire = time_uptime + V_dyn_short_lifetime;
440 	}
441 done:
442 	if (match_direction != NULL)
443 		*match_direction = dir;
444 	return (q);
445 }
446 
447 ipfw_dyn_rule *
448 ipfw_lookup_dyn_rule(struct ipfw_flow_id *pkt, int *match_direction,
449     struct tcphdr *tcp)
450 {
451 	ipfw_dyn_rule *q;
452 	int i;
453 
454 	i = hash_packet(pkt, V_curr_dyn_buckets);
455 
456 	IPFW_BUCK_LOCK(i);
457 	q = lookup_dyn_rule_locked(pkt, i, match_direction, tcp);
458 	if (q == NULL)
459 		IPFW_BUCK_UNLOCK(i);
460 	/* NB: return table locked when q is not NULL */
461 	return q;
462 }
463 
464 /*
465  * Unlock bucket mtx
466  * @p - pointer to dynamic rule
467  */
468 void
469 ipfw_dyn_unlock(ipfw_dyn_rule *q)
470 {
471 
472 	IPFW_BUCK_UNLOCK(q->bucket);
473 }
474 
475 static int
476 resize_dynamic_table(struct ip_fw_chain *chain, int nbuckets)
477 {
478 	int i, k, nbuckets_old;
479 	ipfw_dyn_rule *q;
480 	struct ipfw_dyn_bucket *dyn_v, *dyn_v_old;
481 
482 	/* Check if given number is power of 2 and less than 64k */
483 	if ((nbuckets > 65536) || (!powerof2(nbuckets)))
484 		return 1;
485 
486 	CTR3(KTR_NET, "%s: resize dynamic hash: %d -> %d", __func__,
487 	    V_curr_dyn_buckets, nbuckets);
488 
489 	/* Allocate and initialize new hash */
490 	dyn_v = malloc(nbuckets * sizeof(ipfw_dyn_rule), M_IPFW,
491 	    M_WAITOK | M_ZERO);
492 
493 	for (i = 0 ; i < nbuckets; i++)
494 		IPFW_BUCK_LOCK_INIT(&dyn_v[i]);
495 
496 	/*
497 	 * Call upper half lock, as get_map() do to ease
498 	 * read-only access to dynamic rules hash from sysctl
499 	 */
500 	IPFW_UH_WLOCK(chain);
501 
502 	/*
503 	 * Acquire chain write lock to permit hash access
504 	 * for main traffic path without additional locks
505 	 */
506 	IPFW_WLOCK(chain);
507 
508 	/* Save old values */
509 	nbuckets_old = V_curr_dyn_buckets;
510 	dyn_v_old = V_ipfw_dyn_v;
511 
512 	/* Skip relinking if array is not set up */
513 	if (V_ipfw_dyn_v == NULL)
514 		V_curr_dyn_buckets = 0;
515 
516 	/* Re-link all dynamic states */
517 	for (i = 0 ; i < V_curr_dyn_buckets ; i++) {
518 		while (V_ipfw_dyn_v[i].head != NULL) {
519 			/* Remove from current chain */
520 			q = V_ipfw_dyn_v[i].head;
521 			V_ipfw_dyn_v[i].head = q->next;
522 
523 			/* Get new hash value */
524 			k = hash_packet(&q->id, nbuckets);
525 			q->bucket = k;
526 			/* Add to the new head */
527 			q->next = dyn_v[k].head;
528 			dyn_v[k].head = q;
529              }
530 	}
531 
532 	/* Update current pointers/buckets values */
533 	V_curr_dyn_buckets = nbuckets;
534 	V_ipfw_dyn_v = dyn_v;
535 
536 	IPFW_WUNLOCK(chain);
537 
538 	IPFW_UH_WUNLOCK(chain);
539 
540 	/* Start periodic callout on initial creation */
541 	if (dyn_v_old == NULL) {
542         	callout_reset_on(&V_ipfw_timeout, hz, ipfw_dyn_tick, curvnet, 0);
543 		return (0);
544 	}
545 
546 	/* Destroy all mutexes */
547 	for (i = 0 ; i < nbuckets_old ; i++)
548 		IPFW_BUCK_LOCK_DESTROY(&dyn_v_old[i]);
549 
550 	/* Free old hash */
551 	free(dyn_v_old, M_IPFW);
552 
553 	return 0;
554 }
555 
556 /**
557  * Install state of type 'type' for a dynamic session.
558  * The hash table contains two type of rules:
559  * - regular rules (O_KEEP_STATE)
560  * - rules for sessions with limited number of sess per user
561  *   (O_LIMIT). When they are created, the parent is
562  *   increased by 1, and decreased on delete. In this case,
563  *   the third parameter is the parent rule and not the chain.
564  * - "parent" rules for the above (O_LIMIT_PARENT).
565  */
566 static ipfw_dyn_rule *
567 add_dyn_rule(struct ipfw_flow_id *id, int i, u_int8_t dyn_type, struct ip_fw *rule)
568 {
569 	ipfw_dyn_rule *r;
570 
571 	IPFW_BUCK_ASSERT(i);
572 
573 	r = uma_zalloc(V_ipfw_dyn_rule_zone, M_NOWAIT | M_ZERO);
574 	if (r == NULL) {
575 		if (last_log != time_uptime) {
576 			last_log = time_uptime;
577 			log(LOG_DEBUG, "ipfw: %s: Cannot allocate rule\n",
578 			    __func__);
579 		}
580 		return NULL;
581 	}
582 
583 	/*
584 	 * refcount on parent is already incremented, so
585 	 * it is safe to use parent unlocked.
586 	 */
587 	if (dyn_type == O_LIMIT) {
588 		ipfw_dyn_rule *parent = (ipfw_dyn_rule *)rule;
589 		if ( parent->dyn_type != O_LIMIT_PARENT)
590 			panic("invalid parent");
591 		r->parent = parent;
592 		rule = parent->rule;
593 	}
594 
595 	r->id = *id;
596 	r->expire = time_uptime + V_dyn_syn_lifetime;
597 	r->rule = rule;
598 	r->dyn_type = dyn_type;
599 	IPFW_ZERO_DYN_COUNTER(r);
600 	r->count = 0;
601 
602 	r->bucket = i;
603 	r->next = V_ipfw_dyn_v[i].head;
604 	V_ipfw_dyn_v[i].head = r;
605 	DEB(print_dyn_rule(id, dyn_type, "add dyn entry", "total");)
606 	return r;
607 }
608 
609 /**
610  * lookup dynamic parent rule using pkt and rule as search keys.
611  * If the lookup fails, then install one.
612  */
613 static ipfw_dyn_rule *
614 lookup_dyn_parent(struct ipfw_flow_id *pkt, int *pindex, struct ip_fw *rule)
615 {
616 	ipfw_dyn_rule *q;
617 	int i, is_v6;
618 
619 	is_v6 = IS_IP6_FLOW_ID(pkt);
620 	i = hash_packet( pkt, V_curr_dyn_buckets );
621 	*pindex = i;
622 	IPFW_BUCK_LOCK(i);
623 	for (q = V_ipfw_dyn_v[i].head ; q != NULL ; q=q->next)
624 		if (q->dyn_type == O_LIMIT_PARENT &&
625 		    rule== q->rule &&
626 		    pkt->proto == q->id.proto &&
627 		    pkt->src_port == q->id.src_port &&
628 		    pkt->dst_port == q->id.dst_port &&
629 		    (
630 			(is_v6 &&
631 			 IN6_ARE_ADDR_EQUAL(&(pkt->src_ip6),
632 				&(q->id.src_ip6)) &&
633 			 IN6_ARE_ADDR_EQUAL(&(pkt->dst_ip6),
634 				&(q->id.dst_ip6))) ||
635 			(!is_v6 &&
636 			 pkt->src_ip == q->id.src_ip &&
637 			 pkt->dst_ip == q->id.dst_ip)
638 		    )
639 		) {
640 			q->expire = time_uptime + V_dyn_short_lifetime;
641 			DEB(print_dyn_rule(pkt, q->dyn_type,
642 			    "lookup_dyn_parent found", "");)
643 			return q;
644 		}
645 
646 	/* Add virtual limiting rule */
647 	return add_dyn_rule(pkt, i, O_LIMIT_PARENT, rule);
648 }
649 
650 /**
651  * Install dynamic state for rule type cmd->o.opcode
652  *
653  * Returns 1 (failure) if state is not installed because of errors or because
654  * session limitations are enforced.
655  */
656 int
657 ipfw_install_state(struct ip_fw *rule, ipfw_insn_limit *cmd,
658     struct ip_fw_args *args, uint32_t tablearg)
659 {
660 	ipfw_dyn_rule *q;
661 	int i;
662 
663 	DEB(print_dyn_rule(&args->f_id, cmd->o.opcode, "install_state", "");)
664 
665 	i = hash_packet(&args->f_id, V_curr_dyn_buckets);
666 
667 	IPFW_BUCK_LOCK(i);
668 
669 	q = lookup_dyn_rule_locked(&args->f_id, i, NULL, NULL);
670 
671 	if (q != NULL) {	/* should never occur */
672 		DEB(
673 		if (last_log != time_uptime) {
674 			last_log = time_uptime;
675 			printf("ipfw: %s: entry already present, done\n",
676 			    __func__);
677 		})
678 		IPFW_BUCK_UNLOCK(i);
679 		return (0);
680 	}
681 
682 	/*
683 	 * State limiting is done via uma(9) zone limiting.
684 	 * Save pointer to newly-installed rule and reject
685 	 * packet if add_dyn_rule() returned NULL.
686 	 * Note q is currently set to NULL.
687 	 */
688 
689 	switch (cmd->o.opcode) {
690 	case O_KEEP_STATE:	/* bidir rule */
691 		q = add_dyn_rule(&args->f_id, i, O_KEEP_STATE, rule);
692 		break;
693 
694 	case O_LIMIT: {		/* limit number of sessions */
695 		struct ipfw_flow_id id;
696 		ipfw_dyn_rule *parent;
697 		uint32_t conn_limit;
698 		uint16_t limit_mask = cmd->limit_mask;
699 		int pindex;
700 
701 		conn_limit = IP_FW_ARG_TABLEARG(cmd->conn_limit);
702 
703 		DEB(
704 		if (cmd->conn_limit == IP_FW_TABLEARG)
705 			printf("ipfw: %s: O_LIMIT rule, conn_limit: %u "
706 			    "(tablearg)\n", __func__, conn_limit);
707 		else
708 			printf("ipfw: %s: O_LIMIT rule, conn_limit: %u\n",
709 			    __func__, conn_limit);
710 		)
711 
712 		id.dst_ip = id.src_ip = id.dst_port = id.src_port = 0;
713 		id.proto = args->f_id.proto;
714 		id.addr_type = args->f_id.addr_type;
715 		id.fib = M_GETFIB(args->m);
716 
717 		if (IS_IP6_FLOW_ID (&(args->f_id))) {
718 			if (limit_mask & DYN_SRC_ADDR)
719 				id.src_ip6 = args->f_id.src_ip6;
720 			if (limit_mask & DYN_DST_ADDR)
721 				id.dst_ip6 = args->f_id.dst_ip6;
722 		} else {
723 			if (limit_mask & DYN_SRC_ADDR)
724 				id.src_ip = args->f_id.src_ip;
725 			if (limit_mask & DYN_DST_ADDR)
726 				id.dst_ip = args->f_id.dst_ip;
727 		}
728 		if (limit_mask & DYN_SRC_PORT)
729 			id.src_port = args->f_id.src_port;
730 		if (limit_mask & DYN_DST_PORT)
731 			id.dst_port = args->f_id.dst_port;
732 
733 		/*
734 		 * We have to release lock for previous bucket to
735 		 * avoid possible deadlock
736 		 */
737 		IPFW_BUCK_UNLOCK(i);
738 
739 		if ((parent = lookup_dyn_parent(&id, &pindex, rule)) == NULL) {
740 			printf("ipfw: %s: add parent failed\n", __func__);
741 			IPFW_BUCK_UNLOCK(pindex);
742 			return (1);
743 		}
744 
745 		if (parent->count >= conn_limit) {
746 			if (V_fw_verbose && last_log != time_uptime) {
747 				last_log = time_uptime;
748 				char sbuf[24];
749 				last_log = time_uptime;
750 				snprintf(sbuf, sizeof(sbuf),
751 				    "%d drop session",
752 				    parent->rule->rulenum);
753 				print_dyn_rule_flags(&args->f_id,
754 				    cmd->o.opcode,
755 				    LOG_SECURITY | LOG_DEBUG,
756 				    sbuf, "too many entries");
757 			}
758 			IPFW_BUCK_UNLOCK(pindex);
759 			return (1);
760 		}
761 		/* Increment counter on parent */
762 		parent->count++;
763 		IPFW_BUCK_UNLOCK(pindex);
764 
765 		IPFW_BUCK_LOCK(i);
766 		q = add_dyn_rule(&args->f_id, i, O_LIMIT, (struct ip_fw *)parent);
767 		if (q == NULL) {
768 			/* Decrement index and notify caller */
769 			IPFW_BUCK_UNLOCK(i);
770 			IPFW_BUCK_LOCK(pindex);
771 			parent->count--;
772 			IPFW_BUCK_UNLOCK(pindex);
773 			return (1);
774 		}
775 		break;
776 	}
777 	default:
778 		printf("ipfw: %s: unknown dynamic rule type %u\n",
779 		    __func__, cmd->o.opcode);
780 	}
781 
782 	if (q == NULL) {
783 		IPFW_BUCK_UNLOCK(i);
784 		return (1);	/* Notify caller about failure */
785 	}
786 
787 	/* XXX just set lifetime */
788 	lookup_dyn_rule_locked(&args->f_id, i, NULL, NULL);
789 
790 	IPFW_BUCK_UNLOCK(i);
791 	return (0);
792 }
793 
794 /*
795  * Generate a TCP packet, containing either a RST or a keepalive.
796  * When flags & TH_RST, we are sending a RST packet, because of a
797  * "reset" action matched the packet.
798  * Otherwise we are sending a keepalive, and flags & TH_
799  * The 'replyto' mbuf is the mbuf being replied to, if any, and is required
800  * so that MAC can label the reply appropriately.
801  */
802 struct mbuf *
803 ipfw_send_pkt(struct mbuf *replyto, struct ipfw_flow_id *id, u_int32_t seq,
804     u_int32_t ack, int flags)
805 {
806 	struct mbuf *m = NULL;		/* stupid compiler */
807 	int len, dir;
808 	struct ip *h = NULL;		/* stupid compiler */
809 #ifdef INET6
810 	struct ip6_hdr *h6 = NULL;
811 #endif
812 	struct tcphdr *th = NULL;
813 
814 	MGETHDR(m, M_NOWAIT, MT_DATA);
815 	if (m == NULL)
816 		return (NULL);
817 
818 	M_SETFIB(m, id->fib);
819 #ifdef MAC
820 	if (replyto != NULL)
821 		mac_netinet_firewall_reply(replyto, m);
822 	else
823 		mac_netinet_firewall_send(m);
824 #else
825 	(void)replyto;		/* don't warn about unused arg */
826 #endif
827 
828 	switch (id->addr_type) {
829 	case 4:
830 		len = sizeof(struct ip) + sizeof(struct tcphdr);
831 		break;
832 #ifdef INET6
833 	case 6:
834 		len = sizeof(struct ip6_hdr) + sizeof(struct tcphdr);
835 		break;
836 #endif
837 	default:
838 		/* XXX: log me?!? */
839 		FREE_PKT(m);
840 		return (NULL);
841 	}
842 	dir = ((flags & (TH_SYN | TH_RST)) == TH_SYN);
843 
844 	m->m_data += max_linkhdr;
845 	m->m_flags |= M_SKIP_FIREWALL;
846 	m->m_pkthdr.len = m->m_len = len;
847 	m->m_pkthdr.rcvif = NULL;
848 	bzero(m->m_data, len);
849 
850 	switch (id->addr_type) {
851 	case 4:
852 		h = mtod(m, struct ip *);
853 
854 		/* prepare for checksum */
855 		h->ip_p = IPPROTO_TCP;
856 		h->ip_len = htons(sizeof(struct tcphdr));
857 		if (dir) {
858 			h->ip_src.s_addr = htonl(id->src_ip);
859 			h->ip_dst.s_addr = htonl(id->dst_ip);
860 		} else {
861 			h->ip_src.s_addr = htonl(id->dst_ip);
862 			h->ip_dst.s_addr = htonl(id->src_ip);
863 		}
864 
865 		th = (struct tcphdr *)(h + 1);
866 		break;
867 #ifdef INET6
868 	case 6:
869 		h6 = mtod(m, struct ip6_hdr *);
870 
871 		/* prepare for checksum */
872 		h6->ip6_nxt = IPPROTO_TCP;
873 		h6->ip6_plen = htons(sizeof(struct tcphdr));
874 		if (dir) {
875 			h6->ip6_src = id->src_ip6;
876 			h6->ip6_dst = id->dst_ip6;
877 		} else {
878 			h6->ip6_src = id->dst_ip6;
879 			h6->ip6_dst = id->src_ip6;
880 		}
881 
882 		th = (struct tcphdr *)(h6 + 1);
883 		break;
884 #endif
885 	}
886 
887 	if (dir) {
888 		th->th_sport = htons(id->src_port);
889 		th->th_dport = htons(id->dst_port);
890 	} else {
891 		th->th_sport = htons(id->dst_port);
892 		th->th_dport = htons(id->src_port);
893 	}
894 	th->th_off = sizeof(struct tcphdr) >> 2;
895 
896 	if (flags & TH_RST) {
897 		if (flags & TH_ACK) {
898 			th->th_seq = htonl(ack);
899 			th->th_flags = TH_RST;
900 		} else {
901 			if (flags & TH_SYN)
902 				seq++;
903 			th->th_ack = htonl(seq);
904 			th->th_flags = TH_RST | TH_ACK;
905 		}
906 	} else {
907 		/*
908 		 * Keepalive - use caller provided sequence numbers
909 		 */
910 		th->th_seq = htonl(seq);
911 		th->th_ack = htonl(ack);
912 		th->th_flags = TH_ACK;
913 	}
914 
915 	switch (id->addr_type) {
916 	case 4:
917 		th->th_sum = in_cksum(m, len);
918 
919 		/* finish the ip header */
920 		h->ip_v = 4;
921 		h->ip_hl = sizeof(*h) >> 2;
922 		h->ip_tos = IPTOS_LOWDELAY;
923 		h->ip_off = htons(0);
924 		h->ip_len = htons(len);
925 		h->ip_ttl = V_ip_defttl;
926 		h->ip_sum = 0;
927 		break;
928 #ifdef INET6
929 	case 6:
930 		th->th_sum = in6_cksum(m, IPPROTO_TCP, sizeof(*h6),
931 		    sizeof(struct tcphdr));
932 
933 		/* finish the ip6 header */
934 		h6->ip6_vfc |= IPV6_VERSION;
935 		h6->ip6_hlim = IPV6_DEFHLIM;
936 		break;
937 #endif
938 	}
939 
940 	return (m);
941 }
942 
943 /*
944  * Queue keepalive packets for given dynamic rule
945  */
946 static struct mbuf **
947 ipfw_dyn_send_ka(struct mbuf **mtailp, ipfw_dyn_rule *q)
948 {
949 	struct mbuf *m_rev, *m_fwd;
950 
951 	m_rev = (q->state & ACK_REV) ? NULL :
952 	    ipfw_send_pkt(NULL, &(q->id), q->ack_rev - 1, q->ack_fwd, TH_SYN);
953 	m_fwd = (q->state & ACK_FWD) ? NULL :
954 	    ipfw_send_pkt(NULL, &(q->id), q->ack_fwd - 1, q->ack_rev, 0);
955 
956 	if (m_rev != NULL) {
957 		*mtailp = m_rev;
958 		mtailp = &(*mtailp)->m_nextpkt;
959 	}
960 	if (m_fwd != NULL) {
961 		*mtailp = m_fwd;
962 		mtailp = &(*mtailp)->m_nextpkt;
963 	}
964 
965 	return (mtailp);
966 }
967 
968 /*
969  * This procedure is used to perform various maintance
970  * on dynamic hash list. Currently it is called every second.
971  */
972 static void
973 ipfw_dyn_tick(void * vnetx)
974 {
975 	struct ip_fw_chain *chain;
976 	int check_ka = 0;
977 #ifdef VIMAGE
978 	struct vnet *vp = vnetx;
979 #endif
980 
981 	CURVNET_SET(vp);
982 
983 	chain = &V_layer3_chain;
984 
985 	/* Run keepalive checks every keepalive_period iff ka is enabled */
986 	if ((V_dyn_keepalive_last + V_dyn_keepalive_period <= time_uptime) &&
987 	    (V_dyn_keepalive != 0)) {
988 		V_dyn_keepalive_last = time_uptime;
989 		check_ka = 1;
990 	}
991 
992 	check_dyn_rules(chain, NULL, RESVD_SET, check_ka, 1);
993 
994 	callout_reset_on(&V_ipfw_timeout, hz, ipfw_dyn_tick, vnetx, 0);
995 
996 	CURVNET_RESTORE();
997 }
998 
999 
1000 /*
1001  * Walk thru all dynamic states doing generic maintance:
1002  * 1) free expired states
1003  * 2) free all states based on deleted rule / set
1004  * 3) send keepalives for states if needed
1005  *
1006  * @chain - pointer to current ipfw rules chain
1007  * @rule - delete all states originated by given rule if != NULL
1008  * @set - delete all states originated by any rule in set @set if != RESVD_SET
1009  * @check_ka - perform checking/sending keepalives
1010  * @timer - indicate call from timer routine.
1011  *
1012  * Timer routine must call this function unlocked to permit
1013  * sending keepalives/resizing table.
1014  *
1015  * Others has to call function with IPFW_UH_WLOCK held.
1016  * Additionally, function assume that dynamic rule/set is
1017  * ALREADY deleted so no new states can be generated by
1018  * 'deleted' rules.
1019  *
1020  * Write lock is needed to ensure that unused parent rules
1021  * are not freed by other instance (see stage 2, 3)
1022  */
1023 static void
1024 check_dyn_rules(struct ip_fw_chain *chain, struct ip_fw *rule,
1025     int set, int check_ka, int timer)
1026 {
1027 	struct mbuf *m0, *m, *mnext, **mtailp;
1028 	struct ip *h;
1029 	int i, dyn_count, new_buckets = 0, max_buckets;
1030 	int expired = 0, expired_limits = 0, parents = 0, total = 0;
1031 	ipfw_dyn_rule *q, *q_prev, *q_next;
1032 	ipfw_dyn_rule *exp_head, **exptailp;
1033 	ipfw_dyn_rule *exp_lhead, **expltailp;
1034 
1035 	KASSERT(V_ipfw_dyn_v != NULL, ("%s: dynamic table not allocated",
1036 	    __func__));
1037 
1038 	/* Avoid possible LOR */
1039 	KASSERT(!check_ka || timer, ("%s: keepalive check with lock held",
1040 	    __func__));
1041 
1042 	/*
1043 	 * Do not perform any checks if we currently have no dynamic states
1044 	 */
1045 	if (DYN_COUNT == 0)
1046 		return;
1047 
1048 	/* Expired states */
1049 	exp_head = NULL;
1050 	exptailp = &exp_head;
1051 
1052 	/* Expired limit states */
1053 	exp_lhead = NULL;
1054 	expltailp = &exp_lhead;
1055 
1056 	/*
1057 	 * We make a chain of packets to go out here -- not deferring
1058 	 * until after we drop the IPFW dynamic rule lock would result
1059 	 * in a lock order reversal with the normal packet input -> ipfw
1060 	 * call stack.
1061 	 */
1062 	m0 = NULL;
1063 	mtailp = &m0;
1064 
1065 	/* Protect from hash resizing */
1066 	if (timer != 0)
1067 		IPFW_UH_WLOCK(chain);
1068 	else
1069 		IPFW_UH_WLOCK_ASSERT(chain);
1070 
1071 #define	NEXT_RULE()	{ q_prev = q; q = q->next ; continue; }
1072 
1073 	/* Stage 1: perform requested deletion */
1074 	for (i = 0 ; i < V_curr_dyn_buckets ; i++) {
1075 		IPFW_BUCK_LOCK(i);
1076 		for (q = V_ipfw_dyn_v[i].head, q_prev = q; q ; ) {
1077 			/* account every rule */
1078 			total++;
1079 
1080 			/* Skip parent rules at all */
1081 			if (q->dyn_type == O_LIMIT_PARENT) {
1082 				parents++;
1083 				NEXT_RULE();
1084 			}
1085 
1086 			/*
1087 			 * Remove rules which are:
1088 			 * 1) expired
1089 			 * 2) created by given rule
1090 			 * 3) created by any rule in given set
1091 			 */
1092 			if ((TIME_LEQ(q->expire, time_uptime)) ||
1093 			    ((rule != NULL) && (q->rule == rule)) ||
1094 			    ((set != RESVD_SET) && (q->rule->set == set))) {
1095 				/* Unlink q from current list */
1096 				q_next = q->next;
1097 				if (q == V_ipfw_dyn_v[i].head)
1098 					V_ipfw_dyn_v[i].head = q_next;
1099 				else
1100 					q_prev->next = q_next;
1101 
1102 				q->next = NULL;
1103 
1104 				/* queue q to expire list */
1105 				if (q->dyn_type != O_LIMIT) {
1106 					*exptailp = q;
1107 					exptailp = &(*exptailp)->next;
1108 					DEB(print_dyn_rule(&q->id, q->dyn_type,
1109 					    "unlink entry", "left");
1110 					)
1111 				} else {
1112 					/* Separate list for limit rules */
1113 					*expltailp = q;
1114 					expltailp = &(*expltailp)->next;
1115 					expired_limits++;
1116 					DEB(print_dyn_rule(&q->id, q->dyn_type,
1117 					    "unlink limit entry", "left");
1118 					)
1119 				}
1120 
1121 				q = q_next;
1122 				expired++;
1123 				continue;
1124 			}
1125 
1126 			/*
1127 			 * Check if we need to send keepalive:
1128 			 * we need to ensure if is time to do KA,
1129 			 * this is established TCP session, and
1130 			 * expire time is within keepalive interval
1131 			 */
1132 			if ((check_ka != 0) && (q->id.proto == IPPROTO_TCP) &&
1133 			    ((q->state & BOTH_SYN) == BOTH_SYN) &&
1134 			    (TIME_LEQ(q->expire, time_uptime +
1135 			      V_dyn_keepalive_interval)))
1136 				mtailp = ipfw_dyn_send_ka(mtailp, q);
1137 
1138 			NEXT_RULE();
1139 		}
1140 		IPFW_BUCK_UNLOCK(i);
1141 	}
1142 
1143 	/* Stage 2: decrement counters from O_LIMIT parents */
1144 	if (expired_limits != 0) {
1145 		/*
1146 		 * XXX: Note that deleting set with more than one
1147 		 * heavily-used LIMIT rules can result in overwhelming
1148 		 * locking due to lack of per-hash value sorting
1149 		 *
1150 		 * We should probably think about:
1151 		 * 1) pre-allocating hash of size, say,
1152 		 * MAX(16, V_curr_dyn_buckets / 1024)
1153 		 * 2) checking if expired_limits is large enough
1154 		 * 3) If yes, init hash (or its part), re-link
1155 		 * current list and start decrementing procedure in
1156 		 * each bucket separately
1157 		 */
1158 
1159 		/*
1160 		 * Small optimization: do not unlock bucket until
1161 		 * we see the next item resides in different bucket
1162 		 */
1163 		if (exp_lhead != NULL) {
1164 			i = exp_lhead->parent->bucket;
1165 			IPFW_BUCK_LOCK(i);
1166 		}
1167 		for (q = exp_lhead; q != NULL; q = q->next) {
1168 			if (i != q->parent->bucket) {
1169 				IPFW_BUCK_UNLOCK(i);
1170 				i = q->parent->bucket;
1171 				IPFW_BUCK_LOCK(i);
1172 			}
1173 
1174 			/* Decrease parent refcount */
1175 			q->parent->count--;
1176 		}
1177 		if (exp_lhead != NULL)
1178 			IPFW_BUCK_UNLOCK(i);
1179 	}
1180 
1181 	/*
1182 	 * We protectet ourselves from unused parent deletion
1183 	 * (from the timer function) by holding UH write lock.
1184 	 */
1185 
1186 	/* Stage 3: remove unused parent rules */
1187 	if ((parents != 0) && (expired != 0)) {
1188 		for (i = 0 ; i < V_curr_dyn_buckets ; i++) {
1189 			IPFW_BUCK_LOCK(i);
1190 			for (q = V_ipfw_dyn_v[i].head, q_prev = q ; q ; ) {
1191 				if (q->dyn_type != O_LIMIT_PARENT)
1192 					NEXT_RULE();
1193 
1194 				if (q->count != 0)
1195 					NEXT_RULE();
1196 
1197 				/* Parent rule without consumers */
1198 
1199 				/* Unlink q from current list */
1200 				q_next = q->next;
1201 				if (q == V_ipfw_dyn_v[i].head)
1202 					V_ipfw_dyn_v[i].head = q_next;
1203 				else
1204 					q_prev->next = q_next;
1205 
1206 				q->next = NULL;
1207 
1208 				/* Add to expired list */
1209 				*exptailp = q;
1210 				exptailp = &(*exptailp)->next;
1211 
1212 				DEB(print_dyn_rule(&q->id, q->dyn_type,
1213 				    "unlink parent entry", "left");
1214 				)
1215 
1216 				expired++;
1217 
1218 				q = q_next;
1219 			}
1220 			IPFW_BUCK_UNLOCK(i);
1221 		}
1222 	}
1223 
1224 #undef NEXT_RULE
1225 
1226 	if (timer != 0) {
1227 		/*
1228 		 * Check if we need to resize hash:
1229 		 * if current number of states exceeds number of buckes in hash,
1230 		 * grow hash size to the minimum power of 2 which is bigger than
1231 		 * current states count. Limit hash size by 64k.
1232 		 */
1233 		max_buckets = (V_dyn_buckets_max > 65536) ?
1234 		    65536 : V_dyn_buckets_max;
1235 
1236 		dyn_count = DYN_COUNT;
1237 
1238 		if ((dyn_count > V_curr_dyn_buckets * 2) &&
1239 		    (dyn_count < max_buckets)) {
1240 			new_buckets = V_curr_dyn_buckets;
1241 			while (new_buckets < dyn_count) {
1242 				new_buckets *= 2;
1243 
1244 				if (new_buckets >= max_buckets)
1245 					break;
1246 			}
1247 		}
1248 
1249 		IPFW_UH_WUNLOCK(chain);
1250 	}
1251 
1252 	/* Finally delete old states ad limits if any */
1253 	for (q = exp_head; q != NULL; q = q_next) {
1254 		q_next = q->next;
1255 		uma_zfree(V_ipfw_dyn_rule_zone, q);
1256 	}
1257 
1258 	for (q = exp_lhead; q != NULL; q = q_next) {
1259 		q_next = q->next;
1260 		uma_zfree(V_ipfw_dyn_rule_zone, q);
1261 	}
1262 
1263 	/*
1264 	 * The rest code MUST be called from timer routine only
1265 	 * without holding any locks
1266 	 */
1267 	if (timer == 0)
1268 		return;
1269 
1270 	/* Send keepalive packets if any */
1271 	for (m = m0; m != NULL; m = mnext) {
1272 		mnext = m->m_nextpkt;
1273 		m->m_nextpkt = NULL;
1274 		h = mtod(m, struct ip *);
1275 		if (h->ip_v == 4)
1276 			ip_output(m, NULL, NULL, 0, NULL, NULL);
1277 #ifdef INET6
1278 		else
1279 			ip6_output(m, NULL, NULL, 0, NULL, NULL, NULL);
1280 #endif
1281 	}
1282 
1283 	/* Run table resize without holding any locks */
1284 	if (new_buckets != 0)
1285 		resize_dynamic_table(chain, new_buckets);
1286 }
1287 
1288 /*
1289  * Deletes all dynamic rules originated by given rule or all rules in
1290  * given set. Specify RESVD_SET to indicate set should not be used.
1291  * @chain - pointer to current ipfw rules chain
1292  * @rule - delete all states originated by given rule if != NULL
1293  * @set - delete all states originated by any rule in set @set if != RESVD_SET
1294  *
1295  * Function has to be called with IPFW_UH_WLOCK held.
1296  * Additionally, function assume that dynamic rule/set is
1297  * ALREADY deleted so no new states can be generated by
1298  * 'deleted' rules.
1299  */
1300 void
1301 ipfw_expire_dyn_rules(struct ip_fw_chain *chain, struct ip_fw *rule, int set)
1302 {
1303 
1304 	check_dyn_rules(chain, rule, set, 0, 0);
1305 }
1306 
1307 void
1308 ipfw_dyn_init(struct ip_fw_chain *chain)
1309 {
1310 
1311         V_ipfw_dyn_v = NULL;
1312         V_dyn_buckets_max = 256; /* must be power of 2 */
1313         V_curr_dyn_buckets = 256; /* must be power of 2 */
1314 
1315         V_dyn_ack_lifetime = 300;
1316         V_dyn_syn_lifetime = 20;
1317         V_dyn_fin_lifetime = 1;
1318         V_dyn_rst_lifetime = 1;
1319         V_dyn_udp_lifetime = 10;
1320         V_dyn_short_lifetime = 5;
1321 
1322         V_dyn_keepalive_interval = 20;
1323         V_dyn_keepalive_period = 5;
1324         V_dyn_keepalive = 1;    /* do send keepalives */
1325 	V_dyn_keepalive_last = time_uptime;
1326 
1327         V_dyn_max = 4096;       /* max # of dynamic rules */
1328 
1329 	V_ipfw_dyn_rule_zone = uma_zcreate("IPFW dynamic rule",
1330 	    sizeof(ipfw_dyn_rule), NULL, NULL, NULL, NULL,
1331 	    UMA_ALIGN_PTR, 0);
1332 
1333 	/* Enforce limit on dynamic rules */
1334 	uma_zone_set_max(V_ipfw_dyn_rule_zone, V_dyn_max);
1335 
1336         callout_init(&V_ipfw_timeout, CALLOUT_MPSAFE);
1337 
1338 	/*
1339 	 * This can potentially be done on first dynamic rule
1340 	 * being added to chain.
1341 	 */
1342 	resize_dynamic_table(chain, V_curr_dyn_buckets);
1343 }
1344 
1345 void
1346 ipfw_dyn_uninit(int pass)
1347 {
1348 	int i;
1349 
1350 	if (pass == 0) {
1351 		callout_drain(&V_ipfw_timeout);
1352 		return;
1353 	}
1354 
1355 	if (V_ipfw_dyn_v != NULL) {
1356 		/*
1357 		 * Skip deleting all dynamic states -
1358 		 * uma_zdestroy() does this more efficiently;
1359 		 */
1360 
1361 		/* Destroy all mutexes */
1362 		for (i = 0 ; i < V_curr_dyn_buckets ; i++)
1363 			IPFW_BUCK_LOCK_DESTROY(&V_ipfw_dyn_v[i]);
1364 		free(V_ipfw_dyn_v, M_IPFW);
1365 		V_ipfw_dyn_v = NULL;
1366 	}
1367 
1368         uma_zdestroy(V_ipfw_dyn_rule_zone);
1369 }
1370 
1371 #ifdef SYSCTL_NODE
1372 /*
1373  * Get/set maximum number of dynamic states in given VNET instance.
1374  */
1375 static int
1376 sysctl_ipfw_dyn_max(SYSCTL_HANDLER_ARGS)
1377 {
1378 	int error;
1379 	unsigned int nstates;
1380 
1381 	nstates = V_dyn_max;
1382 
1383 	error = sysctl_handle_int(oidp, &nstates, 0, req);
1384 	/* Read operation or some error */
1385 	if ((error != 0) || (req->newptr == NULL))
1386 		return (error);
1387 
1388 	V_dyn_max = nstates;
1389 	uma_zone_set_max(V_ipfw_dyn_rule_zone, V_dyn_max);
1390 
1391 	return (0);
1392 }
1393 
1394 /*
1395  * Get current number of dynamic states in given VNET instance.
1396  */
1397 static int
1398 sysctl_ipfw_dyn_count(SYSCTL_HANDLER_ARGS)
1399 {
1400 	int error;
1401 	unsigned int nstates;
1402 
1403 	nstates = DYN_COUNT;
1404 
1405 	error = sysctl_handle_int(oidp, &nstates, 0, req);
1406 
1407 	return (error);
1408 }
1409 #endif
1410 
1411 /*
1412  * Returns number of dynamic rules.
1413  */
1414 int
1415 ipfw_dyn_len(void)
1416 {
1417 
1418 	return (V_ipfw_dyn_v == NULL) ? 0 :
1419 		(DYN_COUNT * sizeof(ipfw_dyn_rule));
1420 }
1421 
1422 /*
1423  * Fill given buffer with dynamic states.
1424  * IPFW_UH_RLOCK has to be held while calling.
1425  */
1426 void
1427 ipfw_get_dynamic(struct ip_fw_chain *chain, char **pbp, const char *ep)
1428 {
1429 	ipfw_dyn_rule *p, *last = NULL;
1430 	char *bp;
1431 	int i;
1432 
1433 	if (V_ipfw_dyn_v == NULL)
1434 		return;
1435 	bp = *pbp;
1436 
1437 	IPFW_UH_RLOCK_ASSERT(chain);
1438 
1439 	for (i = 0 ; i < V_curr_dyn_buckets; i++) {
1440 		IPFW_BUCK_LOCK(i);
1441 		for (p = V_ipfw_dyn_v[i].head ; p != NULL; p = p->next) {
1442 			if (bp + sizeof *p <= ep) {
1443 				ipfw_dyn_rule *dst =
1444 					(ipfw_dyn_rule *)bp;
1445 				bcopy(p, dst, sizeof *p);
1446 				bcopy(&(p->rule->rulenum), &(dst->rule),
1447 				    sizeof(p->rule->rulenum));
1448 				/*
1449 				 * store set number into high word of
1450 				 * dst->rule pointer.
1451 				 */
1452 				bcopy(&(p->rule->set),
1453 				    (char *)&dst->rule +
1454 				    sizeof(p->rule->rulenum),
1455 				    sizeof(p->rule->set));
1456 				/*
1457 				 * store a non-null value in "next".
1458 				 * The userland code will interpret a
1459 				 * NULL here as a marker
1460 				 * for the last dynamic rule.
1461 				 */
1462 				bcopy(&dst, &dst->next, sizeof(dst));
1463 				last = dst;
1464 				dst->expire =
1465 				    TIME_LEQ(dst->expire, time_uptime) ?
1466 					0 : dst->expire - time_uptime ;
1467 				bp += sizeof(ipfw_dyn_rule);
1468 			}
1469 		}
1470 		IPFW_BUCK_UNLOCK(i);
1471 	}
1472 
1473 	if (last != NULL) /* mark last dynamic rule */
1474 		bzero(&last->next, sizeof(last));
1475 	*pbp = bp;
1476 }
1477 /* end of file */
1478