xref: /freebsd/sys/netpfil/ipfw/ip_fw_dynamic.c (revision c6ec7d31830ab1c80edae95ad5e4b9dba10c47ac)
1 /*-
2  * Copyright (c) 2002 Luigi Rizzo, Universita` di Pisa
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23  * SUCH DAMAGE.
24  */
25 
26 #include <sys/cdefs.h>
27 __FBSDID("$FreeBSD$");
28 
29 #define        DEB(x)
30 #define        DDB(x) x
31 
32 /*
33  * Dynamic rule support for ipfw
34  */
35 
36 #include "opt_ipfw.h"
37 #include "opt_inet.h"
38 #ifndef INET
39 #error IPFIREWALL requires INET.
40 #endif /* INET */
41 #include "opt_inet6.h"
42 
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/malloc.h>
46 #include <sys/mbuf.h>
47 #include <sys/kernel.h>
48 #include <sys/lock.h>
49 #include <sys/socket.h>
50 #include <sys/sysctl.h>
51 #include <sys/syslog.h>
52 #include <net/ethernet.h> /* for ETHERTYPE_IP */
53 #include <net/if.h>
54 #include <net/vnet.h>
55 
56 #include <netinet/in.h>
57 #include <netinet/ip.h>
58 #include <netinet/ip_var.h>	/* ip_defttl */
59 #include <netinet/ip_fw.h>
60 #include <netinet/tcp_var.h>
61 #include <netinet/udp.h>
62 
63 #include <netinet/ip6.h>	/* IN6_ARE_ADDR_EQUAL */
64 #ifdef INET6
65 #include <netinet6/in6_var.h>
66 #include <netinet6/ip6_var.h>
67 #endif
68 
69 #include <netpfil/ipfw/ip_fw_private.h>
70 
71 #include <machine/in_cksum.h>	/* XXX for in_cksum */
72 
73 #ifdef MAC
74 #include <security/mac/mac_framework.h>
75 #endif
76 
77 /*
78  * Description of dynamic rules.
79  *
80  * Dynamic rules are stored in lists accessed through a hash table
81  * (ipfw_dyn_v) whose size is curr_dyn_buckets. This value can
82  * be modified through the sysctl variable dyn_buckets which is
83  * updated when the table becomes empty.
84  *
85  * XXX currently there is only one list, ipfw_dyn.
86  *
87  * When a packet is received, its address fields are first masked
88  * with the mask defined for the rule, then hashed, then matched
89  * against the entries in the corresponding list.
90  * Dynamic rules can be used for different purposes:
91  *  + stateful rules;
92  *  + enforcing limits on the number of sessions;
93  *  + in-kernel NAT (not implemented yet)
94  *
95  * The lifetime of dynamic rules is regulated by dyn_*_lifetime,
96  * measured in seconds and depending on the flags.
97  *
98  * The total number of dynamic rules is equal to UMA zone items count.
99  * The max number of dynamic rules is dyn_max. When we reach
100  * the maximum number of rules we do not create anymore. This is
101  * done to avoid consuming too much memory, but also too much
102  * time when searching on each packet (ideally, we should try instead
103  * to put a limit on the length of the list on each bucket...).
104  *
105  * Each dynamic rule holds a pointer to the parent ipfw rule so
106  * we know what action to perform. Dynamic rules are removed when
107  * the parent rule is deleted. XXX we should make them survive.
108  *
109  * There are some limitations with dynamic rules -- we do not
110  * obey the 'randomized match', and we do not do multiple
111  * passes through the firewall. XXX check the latter!!!
112  */
113 
114 struct ipfw_dyn_bucket {
115 	struct mtx	mtx;		/* Bucket protecting lock */
116 	ipfw_dyn_rule	*head;		/* Pointer to first rule */
117 };
118 
119 /*
120  * Static variables followed by global ones
121  */
122 static VNET_DEFINE(struct ipfw_dyn_bucket *, ipfw_dyn_v);
123 static VNET_DEFINE(u_int32_t, dyn_buckets_max);
124 static VNET_DEFINE(u_int32_t, curr_dyn_buckets);
125 static VNET_DEFINE(struct callout, ipfw_timeout);
126 #define	V_ipfw_dyn_v			VNET(ipfw_dyn_v)
127 #define	V_dyn_buckets_max		VNET(dyn_buckets_max)
128 #define	V_curr_dyn_buckets		VNET(curr_dyn_buckets)
129 #define V_ipfw_timeout                  VNET(ipfw_timeout)
130 
131 static VNET_DEFINE(uma_zone_t, ipfw_dyn_rule_zone);
132 #define	V_ipfw_dyn_rule_zone		VNET(ipfw_dyn_rule_zone)
133 
134 #define	IPFW_BUCK_LOCK_INIT(b)	\
135 	mtx_init(&(b)->mtx, "IPFW dynamic bucket", NULL, MTX_DEF)
136 #define	IPFW_BUCK_LOCK_DESTROY(b)	\
137 	mtx_destroy(&(b)->mtx)
138 #define	IPFW_BUCK_LOCK(i)	mtx_lock(&V_ipfw_dyn_v[(i)].mtx)
139 #define	IPFW_BUCK_UNLOCK(i)	mtx_unlock(&V_ipfw_dyn_v[(i)].mtx)
140 #define	IPFW_BUCK_ASSERT(i)	mtx_assert(&V_ipfw_dyn_v[(i)].mtx, MA_OWNED)
141 
142 /*
143  * Timeouts for various events in handing dynamic rules.
144  */
145 static VNET_DEFINE(u_int32_t, dyn_ack_lifetime);
146 static VNET_DEFINE(u_int32_t, dyn_syn_lifetime);
147 static VNET_DEFINE(u_int32_t, dyn_fin_lifetime);
148 static VNET_DEFINE(u_int32_t, dyn_rst_lifetime);
149 static VNET_DEFINE(u_int32_t, dyn_udp_lifetime);
150 static VNET_DEFINE(u_int32_t, dyn_short_lifetime);
151 
152 #define	V_dyn_ack_lifetime		VNET(dyn_ack_lifetime)
153 #define	V_dyn_syn_lifetime		VNET(dyn_syn_lifetime)
154 #define	V_dyn_fin_lifetime		VNET(dyn_fin_lifetime)
155 #define	V_dyn_rst_lifetime		VNET(dyn_rst_lifetime)
156 #define	V_dyn_udp_lifetime		VNET(dyn_udp_lifetime)
157 #define	V_dyn_short_lifetime		VNET(dyn_short_lifetime)
158 
159 /*
160  * Keepalives are sent if dyn_keepalive is set. They are sent every
161  * dyn_keepalive_period seconds, in the last dyn_keepalive_interval
162  * seconds of lifetime of a rule.
163  * dyn_rst_lifetime and dyn_fin_lifetime should be strictly lower
164  * than dyn_keepalive_period.
165  */
166 
167 static VNET_DEFINE(u_int32_t, dyn_keepalive_interval);
168 static VNET_DEFINE(u_int32_t, dyn_keepalive_period);
169 static VNET_DEFINE(u_int32_t, dyn_keepalive);
170 static VNET_DEFINE(time_t, dyn_keepalive_last);
171 
172 #define	V_dyn_keepalive_interval	VNET(dyn_keepalive_interval)
173 #define	V_dyn_keepalive_period		VNET(dyn_keepalive_period)
174 #define	V_dyn_keepalive			VNET(dyn_keepalive)
175 #define	V_dyn_keepalive_last		VNET(dyn_keepalive_last)
176 
177 static VNET_DEFINE(u_int32_t, dyn_max);		/* max # of dynamic rules */
178 
179 #define	DYN_COUNT			uma_zone_get_cur(V_ipfw_dyn_rule_zone)
180 #define	V_dyn_max			VNET(dyn_max)
181 
182 static int last_log;	/* Log ratelimiting */
183 
184 static void ipfw_dyn_tick(void *vnetx);
185 static void check_dyn_rules(struct ip_fw_chain *, struct ip_fw *,
186     int, int, int);
187 #ifdef SYSCTL_NODE
188 
189 static int sysctl_ipfw_dyn_count(SYSCTL_HANDLER_ARGS);
190 static int sysctl_ipfw_dyn_max(SYSCTL_HANDLER_ARGS);
191 
192 SYSBEGIN(f2)
193 
194 SYSCTL_DECL(_net_inet_ip_fw);
195 SYSCTL_VNET_UINT(_net_inet_ip_fw, OID_AUTO, dyn_buckets,
196     CTLFLAG_RW, &VNET_NAME(dyn_buckets_max), 0,
197     "Max number of dyn. buckets");
198 SYSCTL_VNET_UINT(_net_inet_ip_fw, OID_AUTO, curr_dyn_buckets,
199     CTLFLAG_RD, &VNET_NAME(curr_dyn_buckets), 0,
200     "Current Number of dyn. buckets");
201 SYSCTL_VNET_PROC(_net_inet_ip_fw, OID_AUTO, dyn_count,
202     CTLTYPE_UINT|CTLFLAG_RD, 0, 0, sysctl_ipfw_dyn_count, "IU",
203     "Number of dyn. rules");
204 SYSCTL_VNET_PROC(_net_inet_ip_fw, OID_AUTO, dyn_max,
205     CTLTYPE_UINT|CTLFLAG_RW, 0, 0, sysctl_ipfw_dyn_max, "IU",
206     "Max number of dyn. rules");
207 SYSCTL_VNET_UINT(_net_inet_ip_fw, OID_AUTO, dyn_ack_lifetime,
208     CTLFLAG_RW, &VNET_NAME(dyn_ack_lifetime), 0,
209     "Lifetime of dyn. rules for acks");
210 SYSCTL_VNET_UINT(_net_inet_ip_fw, OID_AUTO, dyn_syn_lifetime,
211     CTLFLAG_RW, &VNET_NAME(dyn_syn_lifetime), 0,
212     "Lifetime of dyn. rules for syn");
213 SYSCTL_VNET_UINT(_net_inet_ip_fw, OID_AUTO, dyn_fin_lifetime,
214     CTLFLAG_RW, &VNET_NAME(dyn_fin_lifetime), 0,
215     "Lifetime of dyn. rules for fin");
216 SYSCTL_VNET_UINT(_net_inet_ip_fw, OID_AUTO, dyn_rst_lifetime,
217     CTLFLAG_RW, &VNET_NAME(dyn_rst_lifetime), 0,
218     "Lifetime of dyn. rules for rst");
219 SYSCTL_VNET_UINT(_net_inet_ip_fw, OID_AUTO, dyn_udp_lifetime,
220     CTLFLAG_RW, &VNET_NAME(dyn_udp_lifetime), 0,
221     "Lifetime of dyn. rules for UDP");
222 SYSCTL_VNET_UINT(_net_inet_ip_fw, OID_AUTO, dyn_short_lifetime,
223     CTLFLAG_RW, &VNET_NAME(dyn_short_lifetime), 0,
224     "Lifetime of dyn. rules for other situations");
225 SYSCTL_VNET_UINT(_net_inet_ip_fw, OID_AUTO, dyn_keepalive,
226     CTLFLAG_RW, &VNET_NAME(dyn_keepalive), 0,
227     "Enable keepalives for dyn. rules");
228 
229 SYSEND
230 
231 #endif /* SYSCTL_NODE */
232 
233 
234 static __inline int
235 hash_packet6(struct ipfw_flow_id *id)
236 {
237 	u_int32_t i;
238 	i = (id->dst_ip6.__u6_addr.__u6_addr32[2]) ^
239 	    (id->dst_ip6.__u6_addr.__u6_addr32[3]) ^
240 	    (id->src_ip6.__u6_addr.__u6_addr32[2]) ^
241 	    (id->src_ip6.__u6_addr.__u6_addr32[3]) ^
242 	    (id->dst_port) ^ (id->src_port);
243 	return i;
244 }
245 
246 /*
247  * IMPORTANT: the hash function for dynamic rules must be commutative
248  * in source and destination (ip,port), because rules are bidirectional
249  * and we want to find both in the same bucket.
250  */
251 static __inline int
252 hash_packet(struct ipfw_flow_id *id, int buckets)
253 {
254 	u_int32_t i;
255 
256 #ifdef INET6
257 	if (IS_IP6_FLOW_ID(id))
258 		i = hash_packet6(id);
259 	else
260 #endif /* INET6 */
261 	i = (id->dst_ip) ^ (id->src_ip) ^ (id->dst_port) ^ (id->src_port);
262 	i &= (buckets - 1);
263 	return i;
264 }
265 
266 /**
267  * Print customizable flow id description via log(9) facility.
268  */
269 static void
270 print_dyn_rule_flags(struct ipfw_flow_id *id, int dyn_type, int log_flags,
271     char *prefix, char *postfix)
272 {
273 	struct in_addr da;
274 #ifdef INET6
275 	char src[INET6_ADDRSTRLEN], dst[INET6_ADDRSTRLEN];
276 #else
277 	char src[INET_ADDRSTRLEN], dst[INET_ADDRSTRLEN];
278 #endif
279 
280 #ifdef INET6
281 	if (IS_IP6_FLOW_ID(id)) {
282 		ip6_sprintf(src, &id->src_ip6);
283 		ip6_sprintf(dst, &id->dst_ip6);
284 	} else
285 #endif
286 	{
287 		da.s_addr = htonl(id->src_ip);
288 		inet_ntop(AF_INET, &da, src, sizeof(src));
289 		da.s_addr = htonl(id->dst_ip);
290 		inet_ntop(AF_INET, &da, dst, sizeof(dst));
291 	}
292 	log(log_flags, "ipfw: %s type %d %s %d -> %s %d, %d %s\n",
293 	    prefix, dyn_type, src, id->src_port, dst,
294 	    id->dst_port, DYN_COUNT, postfix);
295 }
296 
297 #define	print_dyn_rule(id, dtype, prefix, postfix)	\
298 	print_dyn_rule_flags(id, dtype, LOG_DEBUG, prefix, postfix)
299 
300 #define TIME_LEQ(a,b)       ((int)((a)-(b)) <= 0)
301 
302 /*
303  * Lookup a dynamic rule, locked version.
304  */
305 static ipfw_dyn_rule *
306 lookup_dyn_rule_locked(struct ipfw_flow_id *pkt, int i, int *match_direction,
307     struct tcphdr *tcp)
308 {
309 	/*
310 	 * Stateful ipfw extensions.
311 	 * Lookup into dynamic session queue.
312 	 */
313 #define MATCH_REVERSE	0
314 #define MATCH_FORWARD	1
315 #define MATCH_NONE	2
316 #define MATCH_UNKNOWN	3
317 	int dir = MATCH_NONE;
318 	ipfw_dyn_rule *prev, *q = NULL;
319 
320 	IPFW_BUCK_ASSERT(i);
321 
322 	for (prev = NULL, q = V_ipfw_dyn_v[i].head; q; prev = q, q = q->next) {
323 		if (q->dyn_type == O_LIMIT_PARENT && q->count)
324 			continue;
325 
326 		if (pkt->proto != q->id.proto || q->dyn_type == O_LIMIT_PARENT)
327 			continue;
328 
329 		if (IS_IP6_FLOW_ID(pkt)) {
330 			if (IN6_ARE_ADDR_EQUAL(&pkt->src_ip6, &q->id.src_ip6) &&
331 			    IN6_ARE_ADDR_EQUAL(&pkt->dst_ip6, &q->id.dst_ip6) &&
332 			    pkt->src_port == q->id.src_port &&
333 			    pkt->dst_port == q->id.dst_port) {
334 				dir = MATCH_FORWARD;
335 				break;
336 			}
337 			if (IN6_ARE_ADDR_EQUAL(&pkt->src_ip6, &q->id.dst_ip6) &&
338 			    IN6_ARE_ADDR_EQUAL(&pkt->dst_ip6, &q->id.src_ip6) &&
339 			    pkt->src_port == q->id.dst_port &&
340 			    pkt->dst_port == q->id.src_port) {
341 				dir = MATCH_REVERSE;
342 				break;
343 			}
344 		} else {
345 			if (pkt->src_ip == q->id.src_ip &&
346 			    pkt->dst_ip == q->id.dst_ip &&
347 			    pkt->src_port == q->id.src_port &&
348 			    pkt->dst_port == q->id.dst_port) {
349 				dir = MATCH_FORWARD;
350 				break;
351 			}
352 			if (pkt->src_ip == q->id.dst_ip &&
353 			    pkt->dst_ip == q->id.src_ip &&
354 			    pkt->src_port == q->id.dst_port &&
355 			    pkt->dst_port == q->id.src_port) {
356 				dir = MATCH_REVERSE;
357 				break;
358 			}
359 		}
360 	}
361 	if (q == NULL)
362 		goto done;	/* q = NULL, not found */
363 
364 	if (prev != NULL) {	/* found and not in front */
365 		prev->next = q->next;
366 		q->next = V_ipfw_dyn_v[i].head;
367 		V_ipfw_dyn_v[i].head = q;
368 	}
369 	if (pkt->proto == IPPROTO_TCP) { /* update state according to flags */
370 		uint32_t ack;
371 		u_char flags = pkt->_flags & (TH_FIN | TH_SYN | TH_RST);
372 
373 #define BOTH_SYN	(TH_SYN | (TH_SYN << 8))
374 #define BOTH_FIN	(TH_FIN | (TH_FIN << 8))
375 #define	TCP_FLAGS	(TH_FLAGS | (TH_FLAGS << 8))
376 #define	ACK_FWD		0x10000			/* fwd ack seen */
377 #define	ACK_REV		0x20000			/* rev ack seen */
378 
379 		q->state |= (dir == MATCH_FORWARD) ? flags : (flags << 8);
380 		switch (q->state & TCP_FLAGS) {
381 		case TH_SYN:			/* opening */
382 			q->expire = time_uptime + V_dyn_syn_lifetime;
383 			break;
384 
385 		case BOTH_SYN:			/* move to established */
386 		case BOTH_SYN | TH_FIN:		/* one side tries to close */
387 		case BOTH_SYN | (TH_FIN << 8):
388 #define _SEQ_GE(a,b) ((int)(a) - (int)(b) >= 0)
389 			if (tcp == NULL)
390 				break;
391 
392 			ack = ntohl(tcp->th_ack);
393 			if (dir == MATCH_FORWARD) {
394 				if (q->ack_fwd == 0 ||
395 				    _SEQ_GE(ack, q->ack_fwd)) {
396 					q->ack_fwd = ack;
397 					q->state |= ACK_FWD;
398 				}
399 			} else {
400 				if (q->ack_rev == 0 ||
401 				    _SEQ_GE(ack, q->ack_rev)) {
402 					q->ack_rev = ack;
403 					q->state |= ACK_REV;
404 				}
405 			}
406 			if ((q->state & (ACK_FWD | ACK_REV)) ==
407 			    (ACK_FWD | ACK_REV)) {
408 				q->expire = time_uptime + V_dyn_ack_lifetime;
409 				q->state &= ~(ACK_FWD | ACK_REV);
410 			}
411 			break;
412 
413 		case BOTH_SYN | BOTH_FIN:	/* both sides closed */
414 			if (V_dyn_fin_lifetime >= V_dyn_keepalive_period)
415 				V_dyn_fin_lifetime = V_dyn_keepalive_period - 1;
416 			q->expire = time_uptime + V_dyn_fin_lifetime;
417 			break;
418 
419 		default:
420 #if 0
421 			/*
422 			 * reset or some invalid combination, but can also
423 			 * occur if we use keep-state the wrong way.
424 			 */
425 			if ( (q->state & ((TH_RST << 8)|TH_RST)) == 0)
426 				printf("invalid state: 0x%x\n", q->state);
427 #endif
428 			if (V_dyn_rst_lifetime >= V_dyn_keepalive_period)
429 				V_dyn_rst_lifetime = V_dyn_keepalive_period - 1;
430 			q->expire = time_uptime + V_dyn_rst_lifetime;
431 			break;
432 		}
433 	} else if (pkt->proto == IPPROTO_UDP) {
434 		q->expire = time_uptime + V_dyn_udp_lifetime;
435 	} else {
436 		/* other protocols */
437 		q->expire = time_uptime + V_dyn_short_lifetime;
438 	}
439 done:
440 	if (match_direction != NULL)
441 		*match_direction = dir;
442 	return (q);
443 }
444 
445 ipfw_dyn_rule *
446 ipfw_lookup_dyn_rule(struct ipfw_flow_id *pkt, int *match_direction,
447     struct tcphdr *tcp)
448 {
449 	ipfw_dyn_rule *q;
450 	int i;
451 
452 	i = hash_packet(pkt, V_curr_dyn_buckets);
453 
454 	IPFW_BUCK_LOCK(i);
455 	q = lookup_dyn_rule_locked(pkt, i, match_direction, tcp);
456 	if (q == NULL)
457 		IPFW_BUCK_UNLOCK(i);
458 	/* NB: return table locked when q is not NULL */
459 	return q;
460 }
461 
462 /*
463  * Unlock bucket mtx
464  * @p - pointer to dynamic rule
465  */
466 void
467 ipfw_dyn_unlock(ipfw_dyn_rule *q)
468 {
469 
470 	IPFW_BUCK_UNLOCK(q->bucket);
471 }
472 
473 static int
474 resize_dynamic_table(struct ip_fw_chain *chain, int nbuckets)
475 {
476 	int i, k, nbuckets_old;
477 	ipfw_dyn_rule *q;
478 	struct ipfw_dyn_bucket *dyn_v, *dyn_v_old;
479 
480 	/* Check if given number is power of 2 and less than 64k */
481 	if ((nbuckets > 65536) || (!powerof2(nbuckets)))
482 		return 1;
483 
484 	CTR3(KTR_NET, "%s: resize dynamic hash: %d -> %d", __func__,
485 	    V_curr_dyn_buckets, nbuckets);
486 
487 	/* Allocate and initialize new hash */
488 	dyn_v = malloc(nbuckets * sizeof(ipfw_dyn_rule), M_IPFW,
489 	    M_WAITOK | M_ZERO);
490 
491 	for (i = 0 ; i < nbuckets; i++)
492 		IPFW_BUCK_LOCK_INIT(&dyn_v[i]);
493 
494 	/*
495 	 * Call upper half lock, as get_map() do to ease
496 	 * read-only access to dynamic rules hash from sysctl
497 	 */
498 	IPFW_UH_WLOCK(chain);
499 
500 	/*
501 	 * Acquire chain write lock to permit hash access
502 	 * for main traffic path without additional locks
503 	 */
504 	IPFW_WLOCK(chain);
505 
506 	/* Save old values */
507 	nbuckets_old = V_curr_dyn_buckets;
508 	dyn_v_old = V_ipfw_dyn_v;
509 
510 	/* Skip relinking if array is not set up */
511 	if (V_ipfw_dyn_v == NULL)
512 		V_curr_dyn_buckets = 0;
513 
514 	/* Re-link all dynamic states */
515 	for (i = 0 ; i < V_curr_dyn_buckets ; i++) {
516 		while (V_ipfw_dyn_v[i].head != NULL) {
517 			/* Remove from current chain */
518 			q = V_ipfw_dyn_v[i].head;
519 			V_ipfw_dyn_v[i].head = q->next;
520 
521 			/* Get new hash value */
522 			k = hash_packet(&q->id, nbuckets);
523 			q->bucket = k;
524 			/* Add to the new head */
525 			q->next = dyn_v[k].head;
526 			dyn_v[k].head = q;
527              }
528 	}
529 
530 	/* Update current pointers/buckets values */
531 	V_curr_dyn_buckets = nbuckets;
532 	V_ipfw_dyn_v = dyn_v;
533 
534 	IPFW_WUNLOCK(chain);
535 
536 	IPFW_UH_WUNLOCK(chain);
537 
538 	/* Start periodic callout on initial creation */
539 	if (dyn_v_old == NULL) {
540         	callout_reset_on(&V_ipfw_timeout, hz, ipfw_dyn_tick, curvnet, 0);
541 		return (0);
542 	}
543 
544 	/* Destroy all mutexes */
545 	for (i = 0 ; i < nbuckets_old ; i++)
546 		IPFW_BUCK_LOCK_DESTROY(&dyn_v_old[i]);
547 
548 	/* Free old hash */
549 	free(dyn_v_old, M_IPFW);
550 
551 	return 0;
552 }
553 
554 /**
555  * Install state of type 'type' for a dynamic session.
556  * The hash table contains two type of rules:
557  * - regular rules (O_KEEP_STATE)
558  * - rules for sessions with limited number of sess per user
559  *   (O_LIMIT). When they are created, the parent is
560  *   increased by 1, and decreased on delete. In this case,
561  *   the third parameter is the parent rule and not the chain.
562  * - "parent" rules for the above (O_LIMIT_PARENT).
563  */
564 static ipfw_dyn_rule *
565 add_dyn_rule(struct ipfw_flow_id *id, int i, u_int8_t dyn_type, struct ip_fw *rule)
566 {
567 	ipfw_dyn_rule *r;
568 
569 	IPFW_BUCK_ASSERT(i);
570 
571 	r = uma_zalloc(V_ipfw_dyn_rule_zone, M_NOWAIT | M_ZERO);
572 	if (r == NULL) {
573 		if (last_log != time_uptime) {
574 			last_log = time_uptime;
575 			log(LOG_DEBUG, "ipfw: %s: Cannot allocate rule\n",
576 			    __func__);
577 		}
578 		return NULL;
579 	}
580 
581 	/*
582 	 * refcount on parent is already incremented, so
583 	 * it is safe to use parent unlocked.
584 	 */
585 	if (dyn_type == O_LIMIT) {
586 		ipfw_dyn_rule *parent = (ipfw_dyn_rule *)rule;
587 		if ( parent->dyn_type != O_LIMIT_PARENT)
588 			panic("invalid parent");
589 		r->parent = parent;
590 		rule = parent->rule;
591 	}
592 
593 	r->id = *id;
594 	r->expire = time_uptime + V_dyn_syn_lifetime;
595 	r->rule = rule;
596 	r->dyn_type = dyn_type;
597 	IPFW_ZERO_DYN_COUNTER(r);
598 	r->count = 0;
599 
600 	r->bucket = i;
601 	r->next = V_ipfw_dyn_v[i].head;
602 	V_ipfw_dyn_v[i].head = r;
603 	DEB(print_dyn_rule(id, dyn_type, "add dyn entry", "total");)
604 	return r;
605 }
606 
607 /**
608  * lookup dynamic parent rule using pkt and rule as search keys.
609  * If the lookup fails, then install one.
610  */
611 static ipfw_dyn_rule *
612 lookup_dyn_parent(struct ipfw_flow_id *pkt, int *pindex, struct ip_fw *rule)
613 {
614 	ipfw_dyn_rule *q;
615 	int i, is_v6;
616 
617 	is_v6 = IS_IP6_FLOW_ID(pkt);
618 	i = hash_packet( pkt, V_curr_dyn_buckets );
619 	*pindex = i;
620 	IPFW_BUCK_LOCK(i);
621 	for (q = V_ipfw_dyn_v[i].head ; q != NULL ; q=q->next)
622 		if (q->dyn_type == O_LIMIT_PARENT &&
623 		    rule== q->rule &&
624 		    pkt->proto == q->id.proto &&
625 		    pkt->src_port == q->id.src_port &&
626 		    pkt->dst_port == q->id.dst_port &&
627 		    (
628 			(is_v6 &&
629 			 IN6_ARE_ADDR_EQUAL(&(pkt->src_ip6),
630 				&(q->id.src_ip6)) &&
631 			 IN6_ARE_ADDR_EQUAL(&(pkt->dst_ip6),
632 				&(q->id.dst_ip6))) ||
633 			(!is_v6 &&
634 			 pkt->src_ip == q->id.src_ip &&
635 			 pkt->dst_ip == q->id.dst_ip)
636 		    )
637 		) {
638 			q->expire = time_uptime + V_dyn_short_lifetime;
639 			DEB(print_dyn_rule(pkt, q->dyn_type,
640 			    "lookup_dyn_parent found", "");)
641 			return q;
642 		}
643 
644 	/* Add virtual limiting rule */
645 	return add_dyn_rule(pkt, i, O_LIMIT_PARENT, rule);
646 }
647 
648 /**
649  * Install dynamic state for rule type cmd->o.opcode
650  *
651  * Returns 1 (failure) if state is not installed because of errors or because
652  * session limitations are enforced.
653  */
654 int
655 ipfw_install_state(struct ip_fw *rule, ipfw_insn_limit *cmd,
656     struct ip_fw_args *args, uint32_t tablearg)
657 {
658 	ipfw_dyn_rule *q;
659 	int i;
660 
661 	DEB(print_dyn_rule(&args->f_id, cmd->o.opcode, "install_state", "");)
662 
663 	i = hash_packet(&args->f_id, V_curr_dyn_buckets);
664 
665 	IPFW_BUCK_LOCK(i);
666 
667 	q = lookup_dyn_rule_locked(&args->f_id, i, NULL, NULL);
668 
669 	if (q != NULL) {	/* should never occur */
670 		DEB(
671 		if (last_log != time_uptime) {
672 			last_log = time_uptime;
673 			printf("ipfw: %s: entry already present, done\n",
674 			    __func__);
675 		})
676 		IPFW_BUCK_UNLOCK(i);
677 		return (0);
678 	}
679 
680 	/*
681 	 * State limiting is done via uma(9) zone limiting.
682 	 * Save pointer to newly-installed rule and reject
683 	 * packet if add_dyn_rule() returned NULL.
684 	 * Note q is currently set to NULL.
685 	 */
686 
687 	switch (cmd->o.opcode) {
688 	case O_KEEP_STATE:	/* bidir rule */
689 		q = add_dyn_rule(&args->f_id, i, O_KEEP_STATE, rule);
690 		break;
691 
692 	case O_LIMIT: {		/* limit number of sessions */
693 		struct ipfw_flow_id id;
694 		ipfw_dyn_rule *parent;
695 		uint32_t conn_limit;
696 		uint16_t limit_mask = cmd->limit_mask;
697 		int pindex;
698 
699 		conn_limit = (cmd->conn_limit == IP_FW_TABLEARG) ?
700 		    tablearg : cmd->conn_limit;
701 
702 		DEB(
703 		if (cmd->conn_limit == IP_FW_TABLEARG)
704 			printf("ipfw: %s: O_LIMIT rule, conn_limit: %u "
705 			    "(tablearg)\n", __func__, conn_limit);
706 		else
707 			printf("ipfw: %s: O_LIMIT rule, conn_limit: %u\n",
708 			    __func__, conn_limit);
709 		)
710 
711 		id.dst_ip = id.src_ip = id.dst_port = id.src_port = 0;
712 		id.proto = args->f_id.proto;
713 		id.addr_type = args->f_id.addr_type;
714 		id.fib = M_GETFIB(args->m);
715 
716 		if (IS_IP6_FLOW_ID (&(args->f_id))) {
717 			if (limit_mask & DYN_SRC_ADDR)
718 				id.src_ip6 = args->f_id.src_ip6;
719 			if (limit_mask & DYN_DST_ADDR)
720 				id.dst_ip6 = args->f_id.dst_ip6;
721 		} else {
722 			if (limit_mask & DYN_SRC_ADDR)
723 				id.src_ip = args->f_id.src_ip;
724 			if (limit_mask & DYN_DST_ADDR)
725 				id.dst_ip = args->f_id.dst_ip;
726 		}
727 		if (limit_mask & DYN_SRC_PORT)
728 			id.src_port = args->f_id.src_port;
729 		if (limit_mask & DYN_DST_PORT)
730 			id.dst_port = args->f_id.dst_port;
731 
732 		/*
733 		 * We have to release lock for previous bucket to
734 		 * avoid possible deadlock
735 		 */
736 		IPFW_BUCK_UNLOCK(i);
737 
738 		if ((parent = lookup_dyn_parent(&id, &pindex, rule)) == NULL) {
739 			printf("ipfw: %s: add parent failed\n", __func__);
740 			IPFW_BUCK_UNLOCK(pindex);
741 			return (1);
742 		}
743 
744 		if (parent->count >= conn_limit) {
745 			if (V_fw_verbose && last_log != time_uptime) {
746 				last_log = time_uptime;
747 				char sbuf[24];
748 				last_log = time_uptime;
749 				snprintf(sbuf, sizeof(sbuf),
750 				    "%d drop session",
751 				    parent->rule->rulenum);
752 				print_dyn_rule_flags(&args->f_id,
753 				    cmd->o.opcode,
754 				    LOG_SECURITY | LOG_DEBUG,
755 				    sbuf, "too many entries");
756 			}
757 			IPFW_BUCK_UNLOCK(pindex);
758 			return (1);
759 		}
760 		/* Increment counter on parent */
761 		parent->count++;
762 		IPFW_BUCK_UNLOCK(pindex);
763 
764 		IPFW_BUCK_LOCK(i);
765 		q = add_dyn_rule(&args->f_id, i, O_LIMIT, (struct ip_fw *)parent);
766 		if (q == NULL) {
767 			/* Decrement index and notify caller */
768 			IPFW_BUCK_UNLOCK(i);
769 			IPFW_BUCK_LOCK(pindex);
770 			parent->count--;
771 			IPFW_BUCK_UNLOCK(pindex);
772 			return (1);
773 		}
774 		break;
775 	}
776 	default:
777 		printf("ipfw: %s: unknown dynamic rule type %u\n",
778 		    __func__, cmd->o.opcode);
779 	}
780 
781 	if (q == NULL) {
782 		IPFW_BUCK_UNLOCK(i);
783 		return (1);	/* Notify caller about failure */
784 	}
785 
786 	/* XXX just set lifetime */
787 	lookup_dyn_rule_locked(&args->f_id, i, NULL, NULL);
788 
789 	IPFW_BUCK_UNLOCK(i);
790 	return (0);
791 }
792 
793 /*
794  * Generate a TCP packet, containing either a RST or a keepalive.
795  * When flags & TH_RST, we are sending a RST packet, because of a
796  * "reset" action matched the packet.
797  * Otherwise we are sending a keepalive, and flags & TH_
798  * The 'replyto' mbuf is the mbuf being replied to, if any, and is required
799  * so that MAC can label the reply appropriately.
800  */
801 struct mbuf *
802 ipfw_send_pkt(struct mbuf *replyto, struct ipfw_flow_id *id, u_int32_t seq,
803     u_int32_t ack, int flags)
804 {
805 	struct mbuf *m = NULL;		/* stupid compiler */
806 	int len, dir;
807 	struct ip *h = NULL;		/* stupid compiler */
808 #ifdef INET6
809 	struct ip6_hdr *h6 = NULL;
810 #endif
811 	struct tcphdr *th = NULL;
812 
813 	MGETHDR(m, M_NOWAIT, MT_DATA);
814 	if (m == NULL)
815 		return (NULL);
816 
817 	M_SETFIB(m, id->fib);
818 #ifdef MAC
819 	if (replyto != NULL)
820 		mac_netinet_firewall_reply(replyto, m);
821 	else
822 		mac_netinet_firewall_send(m);
823 #else
824 	(void)replyto;		/* don't warn about unused arg */
825 #endif
826 
827 	switch (id->addr_type) {
828 	case 4:
829 		len = sizeof(struct ip) + sizeof(struct tcphdr);
830 		break;
831 #ifdef INET6
832 	case 6:
833 		len = sizeof(struct ip6_hdr) + sizeof(struct tcphdr);
834 		break;
835 #endif
836 	default:
837 		/* XXX: log me?!? */
838 		FREE_PKT(m);
839 		return (NULL);
840 	}
841 	dir = ((flags & (TH_SYN | TH_RST)) == TH_SYN);
842 
843 	m->m_data += max_linkhdr;
844 	m->m_flags |= M_SKIP_FIREWALL;
845 	m->m_pkthdr.len = m->m_len = len;
846 	m->m_pkthdr.rcvif = NULL;
847 	bzero(m->m_data, len);
848 
849 	switch (id->addr_type) {
850 	case 4:
851 		h = mtod(m, struct ip *);
852 
853 		/* prepare for checksum */
854 		h->ip_p = IPPROTO_TCP;
855 		h->ip_len = htons(sizeof(struct tcphdr));
856 		if (dir) {
857 			h->ip_src.s_addr = htonl(id->src_ip);
858 			h->ip_dst.s_addr = htonl(id->dst_ip);
859 		} else {
860 			h->ip_src.s_addr = htonl(id->dst_ip);
861 			h->ip_dst.s_addr = htonl(id->src_ip);
862 		}
863 
864 		th = (struct tcphdr *)(h + 1);
865 		break;
866 #ifdef INET6
867 	case 6:
868 		h6 = mtod(m, struct ip6_hdr *);
869 
870 		/* prepare for checksum */
871 		h6->ip6_nxt = IPPROTO_TCP;
872 		h6->ip6_plen = htons(sizeof(struct tcphdr));
873 		if (dir) {
874 			h6->ip6_src = id->src_ip6;
875 			h6->ip6_dst = id->dst_ip6;
876 		} else {
877 			h6->ip6_src = id->dst_ip6;
878 			h6->ip6_dst = id->src_ip6;
879 		}
880 
881 		th = (struct tcphdr *)(h6 + 1);
882 		break;
883 #endif
884 	}
885 
886 	if (dir) {
887 		th->th_sport = htons(id->src_port);
888 		th->th_dport = htons(id->dst_port);
889 	} else {
890 		th->th_sport = htons(id->dst_port);
891 		th->th_dport = htons(id->src_port);
892 	}
893 	th->th_off = sizeof(struct tcphdr) >> 2;
894 
895 	if (flags & TH_RST) {
896 		if (flags & TH_ACK) {
897 			th->th_seq = htonl(ack);
898 			th->th_flags = TH_RST;
899 		} else {
900 			if (flags & TH_SYN)
901 				seq++;
902 			th->th_ack = htonl(seq);
903 			th->th_flags = TH_RST | TH_ACK;
904 		}
905 	} else {
906 		/*
907 		 * Keepalive - use caller provided sequence numbers
908 		 */
909 		th->th_seq = htonl(seq);
910 		th->th_ack = htonl(ack);
911 		th->th_flags = TH_ACK;
912 	}
913 
914 	switch (id->addr_type) {
915 	case 4:
916 		th->th_sum = in_cksum(m, len);
917 
918 		/* finish the ip header */
919 		h->ip_v = 4;
920 		h->ip_hl = sizeof(*h) >> 2;
921 		h->ip_tos = IPTOS_LOWDELAY;
922 		h->ip_off = htons(0);
923 		h->ip_len = htons(len);
924 		h->ip_ttl = V_ip_defttl;
925 		h->ip_sum = 0;
926 		break;
927 #ifdef INET6
928 	case 6:
929 		th->th_sum = in6_cksum(m, IPPROTO_TCP, sizeof(*h6),
930 		    sizeof(struct tcphdr));
931 
932 		/* finish the ip6 header */
933 		h6->ip6_vfc |= IPV6_VERSION;
934 		h6->ip6_hlim = IPV6_DEFHLIM;
935 		break;
936 #endif
937 	}
938 
939 	return (m);
940 }
941 
942 /*
943  * Queue keepalive packets for given dynamic rule
944  */
945 static struct mbuf **
946 ipfw_dyn_send_ka(struct mbuf **mtailp, ipfw_dyn_rule *q)
947 {
948 	struct mbuf *m_rev, *m_fwd;
949 
950 	m_rev = (q->state & ACK_REV) ? NULL :
951 	    ipfw_send_pkt(NULL, &(q->id), q->ack_rev - 1, q->ack_fwd, TH_SYN);
952 	m_fwd = (q->state & ACK_FWD) ? NULL :
953 	    ipfw_send_pkt(NULL, &(q->id), q->ack_fwd - 1, q->ack_rev, 0);
954 
955 	if (m_rev != NULL) {
956 		*mtailp = m_rev;
957 		mtailp = &(*mtailp)->m_nextpkt;
958 	}
959 	if (m_fwd != NULL) {
960 		*mtailp = m_fwd;
961 		mtailp = &(*mtailp)->m_nextpkt;
962 	}
963 
964 	return (mtailp);
965 }
966 
967 /*
968  * This procedure is used to perform various maintance
969  * on dynamic hash list. Currently it is called every second.
970  */
971 static void
972 ipfw_dyn_tick(void * vnetx)
973 {
974 	struct ip_fw_chain *chain;
975 	int check_ka = 0;
976 #ifdef VIMAGE
977 	struct vnet *vp = vnetx;
978 #endif
979 
980 	CURVNET_SET(vp);
981 
982 	chain = &V_layer3_chain;
983 
984 	/* Run keepalive checks every keepalive_interval iff ka is enabled */
985 	if ((V_dyn_keepalive_last + V_dyn_keepalive_interval >= time_uptime) &&
986 	    (V_dyn_keepalive != 0)) {
987 		V_dyn_keepalive_last = time_uptime;
988 		check_ka = 1;
989 	}
990 
991 	check_dyn_rules(chain, NULL, RESVD_SET, check_ka, 1);
992 
993 	callout_reset_on(&V_ipfw_timeout, hz, ipfw_dyn_tick, vnetx, 0);
994 
995 	CURVNET_RESTORE();
996 }
997 
998 
999 /*
1000  * Walk thru all dynamic states doing generic maintance:
1001  * 1) free expired states
1002  * 2) free all states based on deleted rule / set
1003  * 3) send keepalives for states if needed
1004  *
1005  * @chain - pointer to current ipfw rules chain
1006  * @rule - delete all states originated by given rule if != NULL
1007  * @set - delete all states originated by any rule in set @set if != RESVD_SET
1008  * @check_ka - perform checking/sending keepalives
1009  * @timer - indicate call from timer routine.
1010  *
1011  * Timer routine must call this function unlocked to permit
1012  * sending keepalives/resizing table.
1013  *
1014  * Others has to call function with IPFW_UH_WLOCK held.
1015  * Additionally, function assume that dynamic rule/set is
1016  * ALREADY deleted so no new states can be generated by
1017  * 'deleted' rules.
1018  *
1019  * Write lock is needed to ensure that unused parent rules
1020  * are not freed by other instance (see stage 2, 3)
1021  */
1022 static void
1023 check_dyn_rules(struct ip_fw_chain *chain, struct ip_fw *rule,
1024     int set, int check_ka, int timer)
1025 {
1026 	struct mbuf *m0, *m, *mnext, **mtailp;
1027 	struct ip *h;
1028 	int i, dyn_count, new_buckets = 0, max_buckets;
1029 	int expired = 0, expired_limits = 0, parents = 0, total = 0;
1030 	ipfw_dyn_rule *q, *q_prev, *q_next;
1031 	ipfw_dyn_rule *exp_head, **exptailp;
1032 	ipfw_dyn_rule *exp_lhead, **expltailp;
1033 
1034 	KASSERT(V_ipfw_dyn_v != NULL, ("%s: dynamic table not allocated",
1035 	    __func__));
1036 
1037 	/* Avoid possible LOR */
1038 	KASSERT(!check_ka || timer, ("%s: keepalive check with lock held",
1039 	    __func__));
1040 
1041 	/*
1042 	 * Do not perform any checks if we currently have no dynamic states
1043 	 */
1044 	if (DYN_COUNT == 0)
1045 		return;
1046 
1047 	/* Expired states */
1048 	exp_head = NULL;
1049 	exptailp = &exp_head;
1050 
1051 	/* Expired limit states */
1052 	exp_lhead = NULL;
1053 	expltailp = &exp_lhead;
1054 
1055 	/*
1056 	 * We make a chain of packets to go out here -- not deferring
1057 	 * until after we drop the IPFW dynamic rule lock would result
1058 	 * in a lock order reversal with the normal packet input -> ipfw
1059 	 * call stack.
1060 	 */
1061 	m0 = NULL;
1062 	mtailp = &m0;
1063 
1064 	/* Protect from hash resizing */
1065 	if (timer != 0)
1066 		IPFW_UH_WLOCK(chain);
1067 	else
1068 		IPFW_UH_WLOCK_ASSERT(chain);
1069 
1070 #define	NEXT_RULE()	{ q_prev = q; q = q->next ; continue; }
1071 
1072 	/* Stage 1: perform requested deletion */
1073 	for (i = 0 ; i < V_curr_dyn_buckets ; i++) {
1074 		IPFW_BUCK_LOCK(i);
1075 		for (q = V_ipfw_dyn_v[i].head, q_prev = q; q ; ) {
1076 			/* account every rule */
1077 			total++;
1078 
1079 			/* Skip parent rules at all */
1080 			if (q->dyn_type == O_LIMIT_PARENT) {
1081 				parents++;
1082 				NEXT_RULE();
1083 			}
1084 
1085 			/*
1086 			 * Remove rules which are:
1087 			 * 1) expired
1088 			 * 2) created by given rule
1089 			 * 3) created by any rule in given set
1090 			 */
1091 			if ((TIME_LEQ(q->expire, time_uptime)) ||
1092 			    ((rule != NULL) && (q->rule == rule)) ||
1093 			    ((set != RESVD_SET) && (q->rule->set == set))) {
1094 				/* Unlink q from current list */
1095 				q_next = q->next;
1096 				if (q == V_ipfw_dyn_v[i].head)
1097 					V_ipfw_dyn_v[i].head = q_next;
1098 				else
1099 					q_prev->next = q_next;
1100 
1101 				q->next = NULL;
1102 
1103 				/* queue q to expire list */
1104 				if (q->dyn_type != O_LIMIT) {
1105 					*exptailp = q;
1106 					exptailp = &(*exptailp)->next;
1107 					DEB(print_dyn_rule(&q->id, q->dyn_type,
1108 					    "unlink entry", "left");
1109 					)
1110 				} else {
1111 					/* Separate list for limit rules */
1112 					*expltailp = q;
1113 					expltailp = &(*expltailp)->next;
1114 					expired_limits++;
1115 					DEB(print_dyn_rule(&q->id, q->dyn_type,
1116 					    "unlink limit entry", "left");
1117 					)
1118 				}
1119 
1120 				q = q_next;
1121 				expired++;
1122 				continue;
1123 			}
1124 
1125 			/*
1126 			 * Check if we need to send keepalive:
1127 			 * we need to ensure if is time to do KA,
1128 			 * this is established TCP session, and
1129 			 * expire time is within keepalive interval
1130 			 */
1131 			if ((check_ka != 0) && (q->id.proto == IPPROTO_TCP) &&
1132 			    ((q->state & BOTH_SYN) == BOTH_SYN) &&
1133 			    (TIME_LEQ(q->expire, time_uptime +
1134 			      V_dyn_keepalive_interval)))
1135 				mtailp = ipfw_dyn_send_ka(mtailp, q);
1136 
1137 			NEXT_RULE();
1138 		}
1139 		IPFW_BUCK_UNLOCK(i);
1140 	}
1141 
1142 	/* Stage 2: decrement counters from O_LIMIT parents */
1143 	if (expired_limits != 0) {
1144 		/*
1145 		 * XXX: Note that deleting set with more than one
1146 		 * heavily-used LIMIT rules can result in overwhelming
1147 		 * locking due to lack of per-hash value sorting
1148 		 *
1149 		 * We should probably think about:
1150 		 * 1) pre-allocating hash of size, say,
1151 		 * MAX(16, V_curr_dyn_buckets / 1024)
1152 		 * 2) checking if expired_limits is large enough
1153 		 * 3) If yes, init hash (or its part), re-link
1154 		 * current list and start decrementing procedure in
1155 		 * each bucket separately
1156 		 */
1157 
1158 		/*
1159 		 * Small optimization: do not unlock bucket until
1160 		 * we see the next item resides in different bucket
1161 		 */
1162 		if (exp_lhead != NULL) {
1163 			i = exp_lhead->parent->bucket;
1164 			IPFW_BUCK_LOCK(i);
1165 		}
1166 		for (q = exp_lhead; q != NULL; q = q->next) {
1167 			if (i != q->parent->bucket) {
1168 				IPFW_BUCK_UNLOCK(i);
1169 				i = q->parent->bucket;
1170 				IPFW_BUCK_LOCK(i);
1171 			}
1172 
1173 			/* Decrease parent refcount */
1174 			q->parent->count--;
1175 		}
1176 		if (exp_lhead != NULL)
1177 			IPFW_BUCK_UNLOCK(i);
1178 	}
1179 
1180 	/*
1181 	 * We protectet ourselves from unused parent deletion
1182 	 * (from the timer function) by holding UH write lock.
1183 	 */
1184 
1185 	/* Stage 3: remove unused parent rules */
1186 	if ((parents != 0) && (expired != 0)) {
1187 		for (i = 0 ; i < V_curr_dyn_buckets ; i++) {
1188 			IPFW_BUCK_LOCK(i);
1189 			for (q = V_ipfw_dyn_v[i].head, q_prev = q ; q ; ) {
1190 				if (q->dyn_type != O_LIMIT_PARENT)
1191 					NEXT_RULE();
1192 
1193 				if (q->count != 0)
1194 					NEXT_RULE();
1195 
1196 				/* Parent rule without consumers */
1197 
1198 				/* Unlink q from current list */
1199 				q_next = q->next;
1200 				if (q == V_ipfw_dyn_v[i].head)
1201 					V_ipfw_dyn_v[i].head = q_next;
1202 				else
1203 					q_prev->next = q_next;
1204 
1205 				q->next = NULL;
1206 
1207 				/* Add to expired list */
1208 				*exptailp = q;
1209 				exptailp = &(*exptailp)->next;
1210 
1211 				DEB(print_dyn_rule(&q->id, q->dyn_type,
1212 				    "unlink parent entry", "left");
1213 				)
1214 
1215 				expired++;
1216 
1217 				q = q_next;
1218 			}
1219 			IPFW_BUCK_UNLOCK(i);
1220 		}
1221 	}
1222 
1223 #undef NEXT_RULE
1224 
1225 	if (timer != 0) {
1226 		/*
1227 		 * Check if we need to resize hash:
1228 		 * if current number of states exceeds number of buckes in hash,
1229 		 * grow hash size to the minimum power of 2 which is bigger than
1230 		 * current states count. Limit hash size by 64k.
1231 		 */
1232 		max_buckets = (V_dyn_buckets_max > 65536) ?
1233 		    65536 : V_dyn_buckets_max;
1234 
1235 		dyn_count = DYN_COUNT;
1236 
1237 		if ((dyn_count > V_curr_dyn_buckets * 2) &&
1238 		    (dyn_count < max_buckets)) {
1239 			new_buckets = V_curr_dyn_buckets;
1240 			while (new_buckets < dyn_count) {
1241 				new_buckets *= 2;
1242 
1243 				if (new_buckets >= max_buckets)
1244 					break;
1245 			}
1246 		}
1247 
1248 		IPFW_UH_WUNLOCK(chain);
1249 	}
1250 
1251 	/* Finally delete old states ad limits if any */
1252 	for (q = exp_head; q != NULL; q = q_next) {
1253 		q_next = q->next;
1254 		uma_zfree(V_ipfw_dyn_rule_zone, q);
1255 	}
1256 
1257 	for (q = exp_lhead; q != NULL; q = q_next) {
1258 		q_next = q->next;
1259 		uma_zfree(V_ipfw_dyn_rule_zone, q);
1260 	}
1261 
1262 	/*
1263 	 * The rest code MUST be called from timer routine only
1264 	 * without holding any locks
1265 	 */
1266 	if (timer == 0)
1267 		return;
1268 
1269 	/* Send keepalive packets if any */
1270 	for (m = m0; m != NULL; m = mnext) {
1271 		mnext = m->m_nextpkt;
1272 		m->m_nextpkt = NULL;
1273 		h = mtod(m, struct ip *);
1274 		if (h->ip_v == 4)
1275 			ip_output(m, NULL, NULL, 0, NULL, NULL);
1276 #ifdef INET6
1277 		else
1278 			ip6_output(m, NULL, NULL, 0, NULL, NULL, NULL);
1279 #endif
1280 	}
1281 
1282 	/* Run table resize without holding any locks */
1283 	if (new_buckets != 0)
1284 		resize_dynamic_table(chain, new_buckets);
1285 }
1286 
1287 /*
1288  * Deletes all dynamic rules originated by given rule or all rules in
1289  * given set. Specify RESVD_SET to indicate set should not be used.
1290  * @chain - pointer to current ipfw rules chain
1291  * @rule - delete all states originated by given rule if != NULL
1292  * @set - delete all states originated by any rule in set @set if != RESVD_SET
1293  *
1294  * Function has to be called with IPFW_UH_WLOCK held.
1295  * Additionally, function assume that dynamic rule/set is
1296  * ALREADY deleted so no new states can be generated by
1297  * 'deleted' rules.
1298  */
1299 void
1300 ipfw_expire_dyn_rules(struct ip_fw_chain *chain, struct ip_fw *rule, int set)
1301 {
1302 
1303 	check_dyn_rules(chain, rule, set, 0, 0);
1304 }
1305 
1306 void
1307 ipfw_dyn_init(struct ip_fw_chain *chain)
1308 {
1309 
1310         V_ipfw_dyn_v = NULL;
1311         V_dyn_buckets_max = 256; /* must be power of 2 */
1312         V_curr_dyn_buckets = 256; /* must be power of 2 */
1313 
1314         V_dyn_ack_lifetime = 300;
1315         V_dyn_syn_lifetime = 20;
1316         V_dyn_fin_lifetime = 1;
1317         V_dyn_rst_lifetime = 1;
1318         V_dyn_udp_lifetime = 10;
1319         V_dyn_short_lifetime = 5;
1320 
1321         V_dyn_keepalive_interval = 20;
1322         V_dyn_keepalive_period = 5;
1323         V_dyn_keepalive = 1;    /* do send keepalives */
1324 	V_dyn_keepalive = time_uptime;
1325 
1326         V_dyn_max = 4096;       /* max # of dynamic rules */
1327 
1328 	V_ipfw_dyn_rule_zone = uma_zcreate("IPFW dynamic rule",
1329 	    sizeof(ipfw_dyn_rule), NULL, NULL, NULL, NULL,
1330 	    UMA_ALIGN_PTR, 0);
1331 
1332 	/* Enforce limit on dynamic rules */
1333 	uma_zone_set_max(V_ipfw_dyn_rule_zone, V_dyn_max);
1334 
1335         callout_init(&V_ipfw_timeout, CALLOUT_MPSAFE);
1336 
1337 	/*
1338 	 * This can potentially be done on first dynamic rule
1339 	 * being added to chain.
1340 	 */
1341 	resize_dynamic_table(chain, V_curr_dyn_buckets);
1342 }
1343 
1344 void
1345 ipfw_dyn_uninit(int pass)
1346 {
1347 	int i;
1348 
1349 	if (pass == 0) {
1350 		callout_drain(&V_ipfw_timeout);
1351 		return;
1352 	}
1353 
1354 	if (V_ipfw_dyn_v != NULL) {
1355 		/*
1356 		 * Skip deleting all dynamic states -
1357 		 * uma_zdestroy() does this more efficiently;
1358 		 */
1359 
1360 		/* Destroy all mutexes */
1361 		for (i = 0 ; i < V_curr_dyn_buckets ; i++)
1362 			IPFW_BUCK_LOCK_DESTROY(&V_ipfw_dyn_v[i]);
1363 		free(V_ipfw_dyn_v, M_IPFW);
1364 		V_ipfw_dyn_v = NULL;
1365 	}
1366 
1367         uma_zdestroy(V_ipfw_dyn_rule_zone);
1368 }
1369 
1370 #ifdef SYSCTL_NODE
1371 /*
1372  * Get/set maximum number of dynamic states in given VNET instance.
1373  */
1374 static int
1375 sysctl_ipfw_dyn_max(SYSCTL_HANDLER_ARGS)
1376 {
1377 	int error;
1378 	unsigned int nstates;
1379 
1380 	nstates = V_dyn_max;
1381 
1382 	error = sysctl_handle_int(oidp, &nstates, 0, req);
1383 	/* Read operation or some error */
1384 	if ((error != 0) || (req->newptr == NULL))
1385 		return (error);
1386 
1387 	V_dyn_max = nstates;
1388 	uma_zone_set_max(V_ipfw_dyn_rule_zone, V_dyn_max);
1389 
1390 	return (0);
1391 }
1392 
1393 /*
1394  * Get current number of dynamic states in given VNET instance.
1395  */
1396 static int
1397 sysctl_ipfw_dyn_count(SYSCTL_HANDLER_ARGS)
1398 {
1399 	int error;
1400 	unsigned int nstates;
1401 
1402 	nstates = DYN_COUNT;
1403 
1404 	error = sysctl_handle_int(oidp, &nstates, 0, req);
1405 
1406 	return (error);
1407 }
1408 #endif
1409 
1410 /*
1411  * Returns number of dynamic rules.
1412  */
1413 int
1414 ipfw_dyn_len(void)
1415 {
1416 
1417 	return (V_ipfw_dyn_v == NULL) ? 0 :
1418 		(DYN_COUNT * sizeof(ipfw_dyn_rule));
1419 }
1420 
1421 /*
1422  * Fill given buffer with dynamic states.
1423  * IPFW_UH_RLOCK has to be held while calling.
1424  */
1425 void
1426 ipfw_get_dynamic(struct ip_fw_chain *chain, char **pbp, const char *ep)
1427 {
1428 	ipfw_dyn_rule *p, *last = NULL;
1429 	char *bp;
1430 	int i;
1431 
1432 	if (V_ipfw_dyn_v == NULL)
1433 		return;
1434 	bp = *pbp;
1435 
1436 	IPFW_UH_RLOCK_ASSERT(chain);
1437 
1438 	for (i = 0 ; i < V_curr_dyn_buckets; i++) {
1439 		IPFW_BUCK_LOCK(i);
1440 		for (p = V_ipfw_dyn_v[i].head ; p != NULL; p = p->next) {
1441 			if (bp + sizeof *p <= ep) {
1442 				ipfw_dyn_rule *dst =
1443 					(ipfw_dyn_rule *)bp;
1444 				bcopy(p, dst, sizeof *p);
1445 				bcopy(&(p->rule->rulenum), &(dst->rule),
1446 				    sizeof(p->rule->rulenum));
1447 				/*
1448 				 * store set number into high word of
1449 				 * dst->rule pointer.
1450 				 */
1451 				bcopy(&(p->rule->set),
1452 				    (char *)&dst->rule +
1453 				    sizeof(p->rule->rulenum),
1454 				    sizeof(p->rule->set));
1455 				/*
1456 				 * store a non-null value in "next".
1457 				 * The userland code will interpret a
1458 				 * NULL here as a marker
1459 				 * for the last dynamic rule.
1460 				 */
1461 				bcopy(&dst, &dst->next, sizeof(dst));
1462 				last = dst;
1463 				dst->expire =
1464 				    TIME_LEQ(dst->expire, time_uptime) ?
1465 					0 : dst->expire - time_uptime ;
1466 				bp += sizeof(ipfw_dyn_rule);
1467 			}
1468 		}
1469 		IPFW_BUCK_UNLOCK(i);
1470 	}
1471 
1472 	if (last != NULL) /* mark last dynamic rule */
1473 		bzero(&last->next, sizeof(last));
1474 	*pbp = bp;
1475 }
1476 /* end of file */
1477