xref: /freebsd/sys/netpfil/ipfilter/netinet/ip_sync.c (revision a34c50fbd2a52bb63acde82e5aec4cb57880e39b)
1 
2 /*
3  * Copyright (C) 2012 by Darren Reed.
4  *
5  * See the IPFILTER.LICENCE file for details on licencing.
6  */
7 #if defined(KERNEL) || defined(_KERNEL)
8 # undef KERNEL
9 # undef _KERNEL
10 # define        KERNEL	1
11 # define        _KERNEL	1
12 #endif
13 #include <sys/errno.h>
14 #include <sys/types.h>
15 #include <sys/param.h>
16 #include <sys/file.h>
17 #if !defined(_KERNEL) && !defined(__KERNEL__)
18 # include <stdio.h>
19 # include <stdlib.h>
20 # include <string.h>
21 # define _KERNEL
22 # define KERNEL
23 # include <sys/uio.h>
24 # undef _KERNEL
25 # undef KERNEL
26 #else
27 # include <sys/systm.h>
28 # if !defined(__SVR4)
29 #  include <sys/mbuf.h>
30 # endif
31 # include <sys/select.h>
32 # ifdef __FreeBSD__
33 #  include <sys/selinfo.h>
34 # endif
35 #endif
36 #if defined(__NetBSD__) && (__NetBSD_Version__ >= 104000000)
37 # include <sys/proc.h>
38 #endif
39 #if defined(_KERNEL) && defined(__FreeBSD__)
40 # include <sys/filio.h>
41 # include <sys/fcntl.h>
42 #else
43 # include <sys/ioctl.h>
44 #endif
45 #include <sys/time.h>
46 # include <sys/protosw.h>
47 #include <sys/socket.h>
48 #if defined(__SVR4)
49 # include <sys/filio.h>
50 # include <sys/byteorder.h>
51 # ifdef _KERNEL
52 #  include <sys/dditypes.h>
53 # endif
54 # include <sys/stream.h>
55 # include <sys/kmem.h>
56 #endif
57 
58 #include <net/if.h>
59 #ifdef sun
60 # include <net/af.h>
61 #endif
62 #include <netinet/in.h>
63 #include <netinet/in_systm.h>
64 #include <netinet/ip.h>
65 #include <netinet/tcp.h>
66 # include <netinet/ip_var.h>
67 # include <netinet/tcp_fsm.h>
68 #include <netinet/udp.h>
69 #include <netinet/ip_icmp.h>
70 #include "netinet/ip_compat.h"
71 #include <netinet/tcpip.h>
72 #include "netinet/ip_fil.h"
73 #include "netinet/ip_nat.h"
74 #include "netinet/ip_frag.h"
75 #include "netinet/ip_state.h"
76 #include "netinet/ip_proxy.h"
77 #include "netinet/ip_sync.h"
78 #ifdef  USE_INET6
79 #include <netinet/icmp6.h>
80 #endif
81 #if defined(__FreeBSD__)
82 # include <sys/malloc.h>
83 # if defined(_KERNEL) && !defined(IPFILTER_LKM)
84 #  include <sys/libkern.h>
85 #  include <sys/systm.h>
86 # endif
87 #endif
88 /* END OF INCLUDES */
89 
90 
91 #define	SYNC_STATETABSZ	256
92 #define	SYNC_NATTABSZ	256
93 
94 typedef struct ipf_sync_softc_s {
95 	ipfmutex_t	ipf_syncadd;
96 	ipfmutex_t	ipsl_mutex;
97 	ipfrwlock_t	ipf_syncstate;
98 	ipfrwlock_t	ipf_syncnat;
99 #if SOLARIS && defined(_KERNEL)
100 	kcondvar_t	ipslwait;
101 #endif
102 	synclist_t	**syncstatetab;
103 	synclist_t	**syncnattab;
104 	synclogent_t	*synclog;
105 	syncupdent_t	*syncupd;
106 	u_int		ipf_sync_num;
107 	u_int		ipf_sync_wrap;
108 	u_int		sl_idx;		/* next available sync log entry */
109 	u_int		su_idx;		/* next available sync update entry */
110 	u_int		sl_tail;	/* next sync log entry to read */
111 	u_int		su_tail;	/* next sync update entry to read */
112 	int		ipf_sync_log_sz;
113 	int		ipf_sync_nat_tab_sz;
114 	int		ipf_sync_state_tab_sz;
115 	int		ipf_sync_debug;
116 	int		ipf_sync_events;
117 	u_32_t		ipf_sync_lastwakeup;
118 	int		ipf_sync_wake_interval;
119 	int		ipf_sync_event_high_wm;
120 	int		ipf_sync_queue_high_wm;
121 	int		ipf_sync_inited;
122 } ipf_sync_softc_t;
123 
124 static int ipf_sync_flush_table(ipf_sync_softc_t *, int, synclist_t **);
125 static void ipf_sync_wakeup(ipf_main_softc_t *);
126 static void ipf_sync_del(ipf_sync_softc_t *, synclist_t *);
127 static void ipf_sync_poll_wakeup(ipf_main_softc_t *);
128 static int ipf_sync_nat(ipf_main_softc_t *, synchdr_t *, void *);
129 static int ipf_sync_state(ipf_main_softc_t *, synchdr_t *, void *);
130 
131 # if !defined(sparc) && !defined(__hppa)
132 void ipf_sync_tcporder(int, struct tcpdata *);
133 void ipf_sync_natorder(int, struct nat *);
134 void ipf_sync_storder(int, struct ipstate *);
135 # endif
136 
137 
138 void *
ipf_sync_soft_create(ipf_main_softc_t * softc)139 ipf_sync_soft_create(ipf_main_softc_t *softc)
140 {
141 	ipf_sync_softc_t *softs;
142 
143 	KMALLOC(softs, ipf_sync_softc_t *);
144 	if (softs == NULL) {
145 		IPFERROR(110024);
146 		return (NULL);
147 	}
148 
149 	bzero((char *)softs, sizeof(*softs));
150 
151 	softs->ipf_sync_log_sz = SYNCLOG_SZ;
152 	softs->ipf_sync_nat_tab_sz = SYNC_STATETABSZ;
153 	softs->ipf_sync_state_tab_sz = SYNC_STATETABSZ;
154 	softs->ipf_sync_event_high_wm = SYNCLOG_SZ * 100 / 90;	/* 90% */
155 	softs->ipf_sync_queue_high_wm = SYNCLOG_SZ * 100 / 90;	/* 90% */
156 
157 	return (softs);
158 }
159 
160 
161 /* ------------------------------------------------------------------------ */
162 /* Function:    ipf_sync_init                                               */
163 /* Returns:     int - 0 == success, -1 == failure                           */
164 /* Parameters:  Nil                                                         */
165 /*                                                                          */
166 /* Initialise all of the locks required for the sync code and initialise    */
167 /* any data structures, as required.                                        */
168 /* ------------------------------------------------------------------------ */
169 int
ipf_sync_soft_init(ipf_main_softc_t * softc,void * arg)170 ipf_sync_soft_init(ipf_main_softc_t *softc, void *arg)
171 {
172 	ipf_sync_softc_t *softs = arg;
173 
174 	KMALLOCS(softs->synclog, synclogent_t *,
175 		 softs->ipf_sync_log_sz * sizeof(*softs->synclog));
176 	if (softs->synclog == NULL)
177 		return (-1);
178 	bzero((char *)softs->synclog,
179 	      softs->ipf_sync_log_sz * sizeof(*softs->synclog));
180 
181 	KMALLOCS(softs->syncupd, syncupdent_t *,
182 		 softs->ipf_sync_log_sz * sizeof(*softs->syncupd));
183 	if (softs->syncupd == NULL)
184 		return (-2);
185 	bzero((char *)softs->syncupd,
186 	      softs->ipf_sync_log_sz * sizeof(*softs->syncupd));
187 
188 	KMALLOCS(softs->syncstatetab, synclist_t **,
189 		 softs->ipf_sync_state_tab_sz * sizeof(*softs->syncstatetab));
190 	if (softs->syncstatetab == NULL)
191 		return (-3);
192 	bzero((char *)softs->syncstatetab,
193 	      softs->ipf_sync_state_tab_sz * sizeof(*softs->syncstatetab));
194 
195 	KMALLOCS(softs->syncnattab, synclist_t **,
196 		 softs->ipf_sync_nat_tab_sz * sizeof(*softs->syncnattab));
197 	if (softs->syncnattab == NULL)
198 		return (-3);
199 	bzero((char *)softs->syncnattab,
200 	      softs->ipf_sync_nat_tab_sz * sizeof(*softs->syncnattab));
201 
202 	softs->ipf_sync_num = 1;
203 	softs->ipf_sync_wrap = 0;
204 	softs->sl_idx = 0;
205 	softs->su_idx = 0;
206 	softs->sl_tail = 0;
207 	softs->su_tail = 0;
208 	softs->ipf_sync_events = 0;
209 	softs->ipf_sync_lastwakeup = 0;
210 
211 
212 # if SOLARIS && defined(_KERNEL)
213 	cv_init(&softs->ipslwait, "ipsl condvar", CV_DRIVER, NULL);
214 # endif
215 	RWLOCK_INIT(&softs->ipf_syncstate, "add things to state sync table");
216 	RWLOCK_INIT(&softs->ipf_syncnat, "add things to nat sync table");
217 	MUTEX_INIT(&softs->ipf_syncadd, "add things to sync table");
218 	MUTEX_INIT(&softs->ipsl_mutex, "read ring lock");
219 
220 	softs->ipf_sync_inited = 1;
221 
222 	return (0);
223 }
224 
225 
226 /* ------------------------------------------------------------------------ */
227 /* Function:    ipf_sync_unload                                             */
228 /* Returns:     int - 0 == success, -1 == failure                           */
229 /* Parameters:  Nil                                                         */
230 /*                                                                          */
231 /* Destroy the locks created when initialising and free any memory in use   */
232 /* with the synchronisation tables.                                         */
233 /* ------------------------------------------------------------------------ */
234 int
ipf_sync_soft_fini(ipf_main_softc_t * softc,void * arg)235 ipf_sync_soft_fini(ipf_main_softc_t *softc, void *arg)
236 {
237 	ipf_sync_softc_t *softs = arg;
238 
239 	if (softs->syncnattab != NULL) {
240 		ipf_sync_flush_table(softs, softs->ipf_sync_nat_tab_sz,
241 				     softs->syncnattab);
242 		KFREES(softs->syncnattab,
243 		       softs->ipf_sync_nat_tab_sz * sizeof(*softs->syncnattab));
244 		softs->syncnattab = NULL;
245 	}
246 
247 	if (softs->syncstatetab != NULL) {
248 		ipf_sync_flush_table(softs, softs->ipf_sync_state_tab_sz,
249 				     softs->syncstatetab);
250 		KFREES(softs->syncstatetab,
251 		       softs->ipf_sync_state_tab_sz *
252 		       sizeof(*softs->syncstatetab));
253 		softs->syncstatetab = NULL;
254 	}
255 
256 	if (softs->syncupd != NULL) {
257 		KFREES(softs->syncupd,
258 		       softs->ipf_sync_log_sz * sizeof(*softs->syncupd));
259 		softs->syncupd = NULL;
260 	}
261 
262 	if (softs->synclog != NULL) {
263 		KFREES(softs->synclog,
264 		       softs->ipf_sync_log_sz * sizeof(*softs->synclog));
265 		softs->synclog = NULL;
266 	}
267 
268 	if (softs->ipf_sync_inited == 1) {
269 		MUTEX_DESTROY(&softs->ipsl_mutex);
270 		MUTEX_DESTROY(&softs->ipf_syncadd);
271 		RW_DESTROY(&softs->ipf_syncnat);
272 		RW_DESTROY(&softs->ipf_syncstate);
273 		softs->ipf_sync_inited = 0;
274 	}
275 
276 	return (0);
277 }
278 
279 void
ipf_sync_soft_destroy(ipf_main_softc_t * softc,void * arg)280 ipf_sync_soft_destroy(ipf_main_softc_t *softc, void *arg)
281 {
282 	ipf_sync_softc_t *softs = arg;
283 
284 	KFREE(softs);
285 }
286 
287 
288 # if !defined(sparc)
289 /* ------------------------------------------------------------------------ */
290 /* Function:    ipf_sync_tcporder                                           */
291 /* Returns:     Nil                                                         */
292 /* Parameters:  way(I) - direction of byte order conversion.                */
293 /*              td(IO) - pointer to data to be converted.                   */
294 /*                                                                          */
295 /* Do byte swapping on values in the TCP state information structure that   */
296 /* need to be used at both ends by the host in their native byte order.     */
297 /* ------------------------------------------------------------------------ */
298 void
ipf_sync_tcporder(int way,tcpdata_t * td)299 ipf_sync_tcporder(int way, tcpdata_t *td)
300 {
301 	if (way) {
302 		td->td_maxwin = htons(td->td_maxwin);
303 		td->td_end = htonl(td->td_end);
304 		td->td_maxend = htonl(td->td_maxend);
305 	} else {
306 		td->td_maxwin = ntohs(td->td_maxwin);
307 		td->td_end = ntohl(td->td_end);
308 		td->td_maxend = ntohl(td->td_maxend);
309 	}
310 }
311 
312 
313 /* ------------------------------------------------------------------------ */
314 /* Function:    ipf_sync_natorder                                           */
315 /* Returns:     Nil                                                         */
316 /* Parameters:  way(I)  - direction of byte order conversion.               */
317 /*              nat(IO) - pointer to data to be converted.                  */
318 /*                                                                          */
319 /* Do byte swapping on values in the NAT data structure that need to be     */
320 /* used at both ends by the host in their native byte order.                */
321 /* ------------------------------------------------------------------------ */
322 void
ipf_sync_natorder(int way,nat_t * n)323 ipf_sync_natorder(int way, nat_t *n)
324 {
325 	if (way) {
326 		n->nat_age = htonl(n->nat_age);
327 		n->nat_flags = htonl(n->nat_flags);
328 		n->nat_ipsumd = htonl(n->nat_ipsumd);
329 		n->nat_use = htonl(n->nat_use);
330 		n->nat_dir = htonl(n->nat_dir);
331 	} else {
332 		n->nat_age = ntohl(n->nat_age);
333 		n->nat_flags = ntohl(n->nat_flags);
334 		n->nat_ipsumd = ntohl(n->nat_ipsumd);
335 		n->nat_use = ntohl(n->nat_use);
336 		n->nat_dir = ntohl(n->nat_dir);
337 	}
338 }
339 
340 
341 /* ------------------------------------------------------------------------ */
342 /* Function:    ipf_sync_storder                                            */
343 /* Returns:     Nil                                                         */
344 /* Parameters:  way(I)  - direction of byte order conversion.               */
345 /*              ips(IO) - pointer to data to be converted.                  */
346 /*                                                                          */
347 /* Do byte swapping on values in the IP state data structure that need to   */
348 /* be used at both ends by the host in their native byte order.             */
349 /* ------------------------------------------------------------------------ */
350 void
ipf_sync_storder(int way,ipstate_t * ips)351 ipf_sync_storder(int way, ipstate_t *ips)
352 {
353 	ipf_sync_tcporder(way, &ips->is_tcp.ts_data[0]);
354 	ipf_sync_tcporder(way, &ips->is_tcp.ts_data[1]);
355 
356 	if (way) {
357 		ips->is_hv = htonl(ips->is_hv);
358 		ips->is_die = htonl(ips->is_die);
359 		ips->is_pass = htonl(ips->is_pass);
360 		ips->is_flags = htonl(ips->is_flags);
361 		ips->is_opt[0] = htonl(ips->is_opt[0]);
362 		ips->is_opt[1] = htonl(ips->is_opt[1]);
363 		ips->is_optmsk[0] = htonl(ips->is_optmsk[0]);
364 		ips->is_optmsk[1] = htonl(ips->is_optmsk[1]);
365 		ips->is_sec = htons(ips->is_sec);
366 		ips->is_secmsk = htons(ips->is_secmsk);
367 		ips->is_auth = htons(ips->is_auth);
368 		ips->is_authmsk = htons(ips->is_authmsk);
369 		ips->is_s0[0] = htonl(ips->is_s0[0]);
370 		ips->is_s0[1] = htonl(ips->is_s0[1]);
371 		ips->is_smsk[0] = htons(ips->is_smsk[0]);
372 		ips->is_smsk[1] = htons(ips->is_smsk[1]);
373 	} else {
374 		ips->is_hv = ntohl(ips->is_hv);
375 		ips->is_die = ntohl(ips->is_die);
376 		ips->is_pass = ntohl(ips->is_pass);
377 		ips->is_flags = ntohl(ips->is_flags);
378 		ips->is_opt[0] = ntohl(ips->is_opt[0]);
379 		ips->is_opt[1] = ntohl(ips->is_opt[1]);
380 		ips->is_optmsk[0] = ntohl(ips->is_optmsk[0]);
381 		ips->is_optmsk[1] = ntohl(ips->is_optmsk[1]);
382 		ips->is_sec = ntohs(ips->is_sec);
383 		ips->is_secmsk = ntohs(ips->is_secmsk);
384 		ips->is_auth = ntohs(ips->is_auth);
385 		ips->is_authmsk = ntohs(ips->is_authmsk);
386 		ips->is_s0[0] = ntohl(ips->is_s0[0]);
387 		ips->is_s0[1] = ntohl(ips->is_s0[1]);
388 		ips->is_smsk[0] = ntohl(ips->is_smsk[0]);
389 		ips->is_smsk[1] = ntohl(ips->is_smsk[1]);
390 	}
391 }
392 # else /* !defined(sparc) */
393 #  define	ipf_sync_tcporder(x,y)
394 #  define	ipf_sync_natorder(x,y)
395 #  define	ipf_sync_storder(x,y)
396 # endif /* !defined(sparc) */
397 
398 
399 /* ------------------------------------------------------------------------ */
400 /* Function:    ipf_sync_write                                              */
401 /* Returns:     int    - 0 == success, else error value.                    */
402 /* Parameters:  uio(I) - pointer to information about data to write         */
403 /*                                                                          */
404 /* Moves data from user space into the kernel and uses it for updating data */
405 /* structures in the state/NAT tables.                                      */
406 /* ------------------------------------------------------------------------ */
407 int
ipf_sync_write(ipf_main_softc_t * softc,struct uio * uio)408 ipf_sync_write(ipf_main_softc_t *softc, struct uio *uio)
409 {
410 	ipf_sync_softc_t *softs = softc->ipf_sync_soft;
411 	synchdr_t sh;
412 	union ipf_sync_data {
413 		union ipf_sync_state_data {
414 			ipstate_t create;
415 			synctcp_update_t update;
416 		} state;
417 		union ipf_sync_nat_data {
418 			nat_t create;
419 			syncupdent_t update;
420 		} nat;
421 	} data;
422 	int err = 0;
423 
424 #  if defined(__NetBSD__) || defined(__FreeBSD__)
425 	uio->uio_rw = UIO_WRITE;
426 #  endif
427 
428 	/* Try to get bytes */
429 	while (uio->uio_resid > 0) {
430 
431 		if (uio->uio_resid >= sizeof(sh)) {
432 
433 			err = UIOMOVE(&sh, sizeof(sh), UIO_WRITE, uio);
434 
435 			if (err) {
436 				if (softs->ipf_sync_debug > 2)
437 					printf("uiomove(header) failed: %d\n",
438 						err);
439 				return (err);
440 			}
441 
442 			/* convert to host order */
443 			sh.sm_magic = ntohl(sh.sm_magic);
444 			sh.sm_len = ntohl(sh.sm_len);
445 			sh.sm_num = ntohl(sh.sm_num);
446 
447 			if (softs->ipf_sync_debug > 8)
448 				printf("[%d] Read v:%d p:%d cmd:%d table:%d rev:%d len:%d magic:%x\n",
449 					sh.sm_num, sh.sm_v, sh.sm_p, sh.sm_cmd,
450 					sh.sm_table, sh.sm_rev, sh.sm_len,
451 					sh.sm_magic);
452 
453 			if (sh.sm_magic != SYNHDRMAGIC) {
454 				if (softs->ipf_sync_debug > 2)
455 					printf("uiomove(header) invalid %s\n",
456 						"magic");
457 				IPFERROR(110001);
458 				return (EINVAL);
459 			}
460 
461 			if (sh.sm_v != 4 && sh.sm_v != 6) {
462 				if (softs->ipf_sync_debug > 2)
463 					printf("uiomove(header) invalid %s\n",
464 						"protocol");
465 				IPFERROR(110002);
466 				return (EINVAL);
467 			}
468 
469 			if (sh.sm_cmd > SMC_MAXCMD) {
470 				if (softs->ipf_sync_debug > 2)
471 					printf("uiomove(header) invalid %s\n",
472 						"command");
473 				IPFERROR(110003);
474 				return (EINVAL);
475 			}
476 
477 
478 			if (sh.sm_table > SMC_MAXTBL) {
479 				if (softs->ipf_sync_debug > 2)
480 					printf("uiomove(header) invalid %s\n",
481 						"table");
482 				IPFERROR(110004);
483 				return (EINVAL);
484 			}
485 
486 		} else {
487 			/* unsufficient data, wait until next call */
488 			if (softs->ipf_sync_debug > 2)
489 				printf("uiomove(header) insufficient data");
490 			IPFERROR(110005);
491 			return (EAGAIN);
492 	 	}
493 
494 
495 		/*
496 		 * We have a header, so try to read the amount of data
497 		 * needed for the request
498 		 */
499 
500 		/* too short or too long */
501 		if (sh.sm_len == 0 || sh.sm_len > sizeof(data)) {
502 			if (softs->ipf_sync_debug > 2)
503 				printf("uiomove(data) invalid length %d\n",
504 					sh.sm_len);
505 			IPFERROR(110006);
506 			return (EINVAL);
507 		}
508 
509 		if (uio->uio_resid >= sh.sm_len) {
510 
511 			err = UIOMOVE(&data, sh.sm_len, UIO_WRITE, uio);
512 
513 			if (err) {
514 				if (softs->ipf_sync_debug > 2)
515 					printf("uiomove(data) failed: %d\n",
516 						err);
517 				return (err);
518 			}
519 
520 			if (softs->ipf_sync_debug > 7)
521 				printf("uiomove(data) %d bytes read\n",
522 					sh.sm_len);
523 
524 			if (sh.sm_table == SMC_STATE)
525 				err = ipf_sync_state(softc, &sh, &data);
526 			else if (sh.sm_table == SMC_NAT)
527 				err = ipf_sync_nat(softc, &sh, &data);
528 			if (softs->ipf_sync_debug > 7)
529 				printf("[%d] Finished with error %d\n",
530 					sh.sm_num, err);
531 
532 		} else {
533 			/* insufficient data, wait until next call */
534 			if (softs->ipf_sync_debug > 2)
535 				printf("uiomove(data) %s %d bytes, got %d\n",
536 					"insufficient data, need",
537 					sh.sm_len, (int)uio->uio_resid);
538 			IPFERROR(110007);
539 			return (EAGAIN);
540 		}
541 	}
542 
543 	/* no more data */
544 	return (0);
545 }
546 
547 
548 /* ------------------------------------------------------------------------ */
549 /* Function:    ipf_sync_read                                               */
550 /* Returns:     int    - 0 == success, else error value.                    */
551 /* Parameters:  uio(O) - pointer to information about where to store data   */
552 /*                                                                          */
553 /* This function is called when a user program wants to read some data      */
554 /* for pending state/NAT updates.  If no data is available, the caller is   */
555 /* put to sleep, pending a wakeup from the "lower half" of this code.       */
556 /* ------------------------------------------------------------------------ */
557 int
ipf_sync_read(ipf_main_softc_t * softc,struct uio * uio)558 ipf_sync_read(ipf_main_softc_t *softc, struct uio *uio)
559 {
560 	ipf_sync_softc_t *softs = softc->ipf_sync_soft;
561 	syncupdent_t *su;
562 	synclogent_t *sl;
563 	int err = 0;
564 
565 	if ((uio->uio_resid & 3) || (uio->uio_resid < 8)) {
566 		IPFERROR(110008);
567 		return (EINVAL);
568 	}
569 
570 #  if defined(__NetBSD__) || defined(__FreeBSD__)
571 	uio->uio_rw = UIO_READ;
572 #  endif
573 
574 	MUTEX_ENTER(&softs->ipsl_mutex);
575 	while ((softs->sl_tail == softs->sl_idx) &&
576 	       (softs->su_tail == softs->su_idx)) {
577 #  if defined(_KERNEL)
578 #   if SOLARIS
579 		if (!cv_wait_sig(&softs->ipslwait, &softs->ipsl_mutex.ipf_lk)) {
580 			MUTEX_EXIT(&softs->ipsl_mutex);
581 			IPFERROR(110009);
582 			return (EINTR);
583 		}
584 #   else
585 		MUTEX_EXIT(&softs->ipsl_mutex);
586 		err = SLEEP(&softs->sl_tail, "ipl sleep");
587 		if (err) {
588 			IPFERROR(110012);
589 			return (EINTR);
590 		}
591 		MUTEX_ENTER(&softs->ipsl_mutex);
592 #   endif /* SOLARIS */
593 #  endif /* _KERNEL */
594 	}
595 
596 	while ((softs->sl_tail < softs->sl_idx) &&
597 	       (uio->uio_resid > sizeof(*sl))) {
598 		sl = softs->synclog + softs->sl_tail++;
599 		MUTEX_EXIT(&softs->ipsl_mutex);
600 		err = UIOMOVE(sl, sizeof(*sl), UIO_READ, uio);
601 		if (err != 0)
602 			goto goterror;
603 		MUTEX_ENTER(&softs->ipsl_mutex);
604 	}
605 
606 	while ((softs->su_tail < softs->su_idx) &&
607 	       (uio->uio_resid > sizeof(*su))) {
608 		su = softs->syncupd + softs->su_tail;
609 		softs->su_tail++;
610 		MUTEX_EXIT(&softs->ipsl_mutex);
611 		err = UIOMOVE(su, sizeof(*su), UIO_READ, uio);
612 		if (err != 0)
613 			goto goterror;
614 		MUTEX_ENTER(&softs->ipsl_mutex);
615 		if (su->sup_hdr.sm_sl != NULL)
616 			su->sup_hdr.sm_sl->sl_idx = -1;
617 	}
618 	if (softs->sl_tail == softs->sl_idx)
619 		softs->sl_tail = softs->sl_idx = 0;
620 	if (softs->su_tail == softs->su_idx)
621 		softs->su_tail = softs->su_idx = 0;
622 	MUTEX_EXIT(&softs->ipsl_mutex);
623 goterror:
624 	return (err);
625 }
626 
627 
628 /* ------------------------------------------------------------------------ */
629 /* Function:    ipf_sync_state                                              */
630 /* Returns:     int    - 0 == success, else error value.                    */
631 /* Parameters:  sp(I)  - pointer to sync packet data header                 */
632 /*              uio(I) - pointer to user data for further information       */
633 /*                                                                          */
634 /* Updates the state table according to information passed in the sync      */
635 /* header.  As required, more data is fetched from the uio structure but    */
636 /* varies depending on the contents of the sync header.  This function can  */
637 /* create a new state entry or update one.  Deletion is left to the state   */
638 /* structures being timed out correctly.                                    */
639 /* ------------------------------------------------------------------------ */
640 static int
ipf_sync_state(ipf_main_softc_t * softc,synchdr_t * sp,void * data)641 ipf_sync_state(ipf_main_softc_t *softc, synchdr_t *sp, void *data)
642 {
643 	ipf_sync_softc_t *softs = softc->ipf_sync_soft;
644 	synctcp_update_t su;
645 	ipstate_t *is, sn;
646 	synclist_t *sl;
647 	frentry_t *fr;
648 	u_int hv;
649 	int err = 0;
650 
651 	hv = sp->sm_num & (softs->ipf_sync_state_tab_sz - 1);
652 
653 	switch (sp->sm_cmd)
654 	{
655 	case SMC_CREATE :
656 
657 		if (sp->sm_len != sizeof(sn)) {
658 			IPFERROR(110025);
659 			err = EINVAL;
660 			break;
661 		}
662 		bcopy(data, &sn, sizeof(sn));
663 		KMALLOC(is, ipstate_t *);
664 		if (is == NULL) {
665 			IPFERROR(110013);
666 			err = ENOMEM;
667 			break;
668 		}
669 
670 		KMALLOC(sl, synclist_t *);
671 		if (sl == NULL) {
672 			IPFERROR(110014);
673 			err = ENOMEM;
674 			KFREE(is);
675 			break;
676 		}
677 
678 		bzero((char *)is, offsetof(ipstate_t, is_die));
679 		bcopy((char *)&sn.is_die, (char *)&is->is_die,
680 		      sizeof(*is) - offsetof(ipstate_t, is_die));
681 		ipf_sync_storder(0, is);
682 
683 		/*
684 		 * We need to find the same rule on the slave as was used on
685 		 * the master to create this state entry.
686 		 */
687 		READ_ENTER(&softc->ipf_mutex);
688 		fr = ipf_getrulen(softc, IPL_LOGIPF, sn.is_group, sn.is_rulen);
689 		if (fr != NULL) {
690 			MUTEX_ENTER(&fr->fr_lock);
691 			fr->fr_ref++;
692 			fr->fr_statecnt++;
693 			MUTEX_EXIT(&fr->fr_lock);
694 		}
695 		RWLOCK_EXIT(&softc->ipf_mutex);
696 
697 		if (softs->ipf_sync_debug > 4)
698 			printf("[%d] Filter rules = %p\n", sp->sm_num, fr);
699 
700 		is->is_rule = fr;
701 		is->is_sync = sl;
702 
703 		sl->sl_idx = -1;
704 		sl->sl_ips = is;
705 		bcopy(sp, &sl->sl_hdr, sizeof(struct synchdr));
706 
707 		WRITE_ENTER(&softs->ipf_syncstate);
708 		WRITE_ENTER(&softc->ipf_state);
709 
710 		sl->sl_pnext = softs->syncstatetab + hv;
711 		sl->sl_next = softs->syncstatetab[hv];
712 		if (softs->syncstatetab[hv] != NULL)
713 			softs->syncstatetab[hv]->sl_pnext = &sl->sl_next;
714 		softs->syncstatetab[hv] = sl;
715 		MUTEX_DOWNGRADE(&softs->ipf_syncstate);
716 		ipf_state_insert(softc, is, sp->sm_rev);
717 		/*
718 		 * Do not initialise the interface pointers for the state
719 		 * entry as the full complement of interface names may not
720 		 * be present.
721 		 *
722 		 * Put this state entry on its timeout queue.
723 		 */
724 		/*fr_setstatequeue(is, sp->sm_rev);*/
725 		break;
726 
727 	case SMC_UPDATE :
728 		if (sp->sm_len != sizeof(su)) {
729 			IPFERROR(110026);
730 			err = EINVAL;
731 			break;
732 		}
733 		bcopy(data, &su, sizeof(su));
734 
735 		if (softs->ipf_sync_debug > 4)
736 			printf("[%d] Update age %lu state %d/%d \n",
737 				sp->sm_num, su.stu_age, su.stu_state[0],
738 				su.stu_state[1]);
739 
740 		READ_ENTER(&softs->ipf_syncstate);
741 		for (sl = softs->syncstatetab[hv]; (sl != NULL);
742 		     sl = sl->sl_next)
743 			if (sl->sl_hdr.sm_num == sp->sm_num)
744 				break;
745 		if (sl == NULL) {
746 			if (softs->ipf_sync_debug > 1)
747 				printf("[%d] State not found - can't update\n",
748 					sp->sm_num);
749 			RWLOCK_EXIT(&softs->ipf_syncstate);
750 			IPFERROR(110015);
751 			err = ENOENT;
752 			break;
753 		}
754 
755 		READ_ENTER(&softc->ipf_state);
756 
757 		if (softs->ipf_sync_debug > 6)
758 			printf("[%d] Data from state v:%d p:%d cmd:%d table:%d rev:%d\n",
759 				sp->sm_num, sl->sl_hdr.sm_v, sl->sl_hdr.sm_p,
760 				sl->sl_hdr.sm_cmd, sl->sl_hdr.sm_table,
761 				sl->sl_hdr.sm_rev);
762 
763 		is = sl->sl_ips;
764 
765 		MUTEX_ENTER(&is->is_lock);
766 		switch (sp->sm_p)
767 		{
768 		case IPPROTO_TCP :
769 			/* XXX FV --- shouldn't we do ntohl/htonl???? XXX */
770 			is->is_send = su.stu_data[0].td_end;
771 			is->is_maxsend = su.stu_data[0].td_maxend;
772 			is->is_maxswin = su.stu_data[0].td_maxwin;
773 			is->is_state[0] = su.stu_state[0];
774 			is->is_dend = su.stu_data[1].td_end;
775 			is->is_maxdend = su.stu_data[1].td_maxend;
776 			is->is_maxdwin = su.stu_data[1].td_maxwin;
777 			is->is_state[1] = su.stu_state[1];
778 			break;
779 		default :
780 			break;
781 		}
782 
783 		if (softs->ipf_sync_debug > 6)
784 			printf("[%d] Setting timers for state\n", sp->sm_num);
785 
786 		ipf_state_setqueue(softc, is, sp->sm_rev);
787 
788 		MUTEX_EXIT(&is->is_lock);
789 		break;
790 
791 	default :
792 		IPFERROR(110016);
793 		err = EINVAL;
794 		break;
795 	}
796 
797 	if (err == 0) {
798 		RWLOCK_EXIT(&softc->ipf_state);
799 		RWLOCK_EXIT(&softs->ipf_syncstate);
800 	}
801 
802 	if (softs->ipf_sync_debug > 6)
803 		printf("[%d] Update completed with error %d\n",
804 			sp->sm_num, err);
805 
806 	return (err);
807 }
808 
809 
810 /* ------------------------------------------------------------------------ */
811 /* Function:    ipf_sync_del                                                */
812 /* Returns:     Nil                                                         */
813 /* Parameters:  sl(I) - pointer to synclist object to delete                */
814 /*                                                                          */
815 /* Deletes an object from the synclist.                                     */
816 /* ------------------------------------------------------------------------ */
817 static void
ipf_sync_del(ipf_sync_softc_t * softs,synclist_t * sl)818 ipf_sync_del(ipf_sync_softc_t *softs, synclist_t *sl)
819 {
820 	*sl->sl_pnext = sl->sl_next;
821 	if (sl->sl_next != NULL)
822 		sl->sl_next->sl_pnext = sl->sl_pnext;
823 	if (sl->sl_idx != -1)
824 		softs->syncupd[sl->sl_idx].sup_hdr.sm_sl = NULL;
825 }
826 
827 
828 /* ------------------------------------------------------------------------ */
829 /* Function:    ipf_sync_del_state                                          */
830 /* Returns:     Nil                                                         */
831 /* Parameters:  sl(I) - pointer to synclist object to delete                */
832 /*                                                                          */
833 /* Deletes an object from the synclist state table and free's its memory.   */
834 /* ------------------------------------------------------------------------ */
835 void
ipf_sync_del_state(void * arg,synclist_t * sl)836 ipf_sync_del_state(void *arg, synclist_t *sl)
837 {
838 	ipf_sync_softc_t *softs = arg;
839 
840 	WRITE_ENTER(&softs->ipf_syncstate);
841 	ipf_sync_del(softs, sl);
842 	RWLOCK_EXIT(&softs->ipf_syncstate);
843 	KFREE(sl);
844 }
845 
846 
847 /* ------------------------------------------------------------------------ */
848 /* Function:    ipf_sync_del_nat                                            */
849 /* Returns:     Nil                                                         */
850 /* Parameters:  sl(I) - pointer to synclist object to delete                */
851 /*                                                                          */
852 /* Deletes an object from the synclist nat table and free's its memory.     */
853 /* ------------------------------------------------------------------------ */
854 void
ipf_sync_del_nat(void * arg,synclist_t * sl)855 ipf_sync_del_nat(void *arg, synclist_t *sl)
856 {
857 	ipf_sync_softc_t *softs = arg;
858 
859 	WRITE_ENTER(&softs->ipf_syncnat);
860 	ipf_sync_del(softs, sl);
861 	RWLOCK_EXIT(&softs->ipf_syncnat);
862 	KFREE(sl);
863 }
864 
865 
866 /* ------------------------------------------------------------------------ */
867 /* Function:    ipf_sync_nat                                                */
868 /* Returns:     int    - 0 == success, else error value.                    */
869 /* Parameters:  sp(I)  - pointer to sync packet data header                 */
870 /*              uio(I) - pointer to user data for further information       */
871 /*                                                                          */
872 /* Updates the NAT  table according to information passed in the sync       */
873 /* header.  As required, more data is fetched from the uio structure but    */
874 /* varies depending on the contents of the sync header.  This function can  */
875 /* create a new NAT entry or update one.  Deletion is left to the NAT       */
876 /* structures being timed out correctly.                                    */
877 /* ------------------------------------------------------------------------ */
878 static int
ipf_sync_nat(ipf_main_softc_t * softc,synchdr_t * sp,void * data)879 ipf_sync_nat(ipf_main_softc_t *softc, synchdr_t *sp, void *data)
880 {
881 	ipf_sync_softc_t *softs = softc->ipf_sync_soft;
882 	syncupdent_t su;
883 	nat_t *n, *nat;
884 	synclist_t *sl;
885 	u_int hv = 0;
886 	int err = 0;
887 
888 	READ_ENTER(&softs->ipf_syncnat);
889 
890 	switch (sp->sm_cmd)
891 	{
892 	case SMC_CREATE :
893 		KMALLOC(n, nat_t *);
894 		if (n == NULL) {
895 			IPFERROR(110017);
896 			err = ENOMEM;
897 			break;
898 		}
899 
900 		KMALLOC(sl, synclist_t *);
901 		if (sl == NULL) {
902 			IPFERROR(110018);
903 			err = ENOMEM;
904 			KFREE(n);
905 			break;
906 		}
907 
908 		if (sp->sm_len != sizeof(*nat)) {
909 			IPFERROR(110027);
910 			err = EINVAL;
911 			break;
912 		}
913 		nat = (nat_t *)data;
914 		bzero((char *)n, offsetof(nat_t, nat_age));
915 		bcopy((char *)&nat->nat_age, (char *)&n->nat_age,
916 		      sizeof(*n) - offsetof(nat_t, nat_age));
917 		ipf_sync_natorder(0, n);
918 		n->nat_sync = sl;
919 		n->nat_rev = sl->sl_rev;
920 
921 		sl->sl_idx = -1;
922 		sl->sl_ipn = n;
923 		sl->sl_num = ntohl(sp->sm_num);
924 
925 		WRITE_ENTER(&softc->ipf_nat);
926 		sl->sl_pnext = softs->syncnattab + hv;
927 		sl->sl_next = softs->syncnattab[hv];
928 		if (softs->syncnattab[hv] != NULL)
929 			softs->syncnattab[hv]->sl_pnext = &sl->sl_next;
930 		softs->syncnattab[hv] = sl;
931 		(void) ipf_nat_insert(softc, softc->ipf_nat_soft, n);
932 		RWLOCK_EXIT(&softc->ipf_nat);
933 		break;
934 
935 	case SMC_UPDATE :
936 		if (sp->sm_len != sizeof(su)) {
937 			IPFERROR(110028);
938 			err = EINVAL;
939 			break;
940 		}
941 		bcopy(data, &su, sizeof(su));
942 
943 		for (sl = softs->syncnattab[hv]; (sl != NULL);
944 		     sl = sl->sl_next)
945 			if (sl->sl_hdr.sm_num == sp->sm_num)
946 				break;
947 		if (sl == NULL) {
948 			IPFERROR(110019);
949 			err = ENOENT;
950 			break;
951 		}
952 
953 		READ_ENTER(&softc->ipf_nat);
954 
955 		nat = sl->sl_ipn;
956 		nat->nat_rev = sl->sl_rev;
957 
958 		MUTEX_ENTER(&nat->nat_lock);
959 		ipf_nat_setqueue(softc, softc->ipf_nat_soft, nat);
960 		MUTEX_EXIT(&nat->nat_lock);
961 
962 		RWLOCK_EXIT(&softc->ipf_nat);
963 
964 		break;
965 
966 	default :
967 		IPFERROR(110020);
968 		err = EINVAL;
969 		break;
970 	}
971 
972 	RWLOCK_EXIT(&softs->ipf_syncnat);
973 	return (err);
974 }
975 
976 
977 /* ------------------------------------------------------------------------ */
978 /* Function:    ipf_sync_new                                                */
979 /* Returns:     synclist_t* - NULL == failure, else pointer to new synclist */
980 /*                            data structure.                               */
981 /* Parameters:  tab(I) - type of synclist_t to create                       */
982 /*              fin(I) - pointer to packet information                      */
983 /*              ptr(I) - pointer to owning object                           */
984 /*                                                                          */
985 /* Creates a new sync table entry and notifies any sleepers that it's there */
986 /* waiting to be processed.                                                 */
987 /* ------------------------------------------------------------------------ */
988 synclist_t *
ipf_sync_new(ipf_main_softc_t * softc,int tab,fr_info_t * fin,void * ptr)989 ipf_sync_new(ipf_main_softc_t *softc, int tab, fr_info_t *fin, void *ptr)
990 {
991 	ipf_sync_softc_t *softs = softc->ipf_sync_soft;
992 	synclist_t *sl, *ss;
993 	synclogent_t *sle;
994 	u_int hv, sz;
995 
996 	if (softs->sl_idx == softs->ipf_sync_log_sz)
997 		return (NULL);
998 	KMALLOC(sl, synclist_t *);
999 	if (sl == NULL)
1000 		return (NULL);
1001 
1002 	MUTEX_ENTER(&softs->ipf_syncadd);
1003 	/*
1004 	 * Get a unique number for this synclist_t.  The number is only meant
1005 	 * to be unique for the lifetime of the structure and may be reused
1006 	 * later.
1007 	 */
1008 	softs->ipf_sync_num++;
1009 	if (softs->ipf_sync_num == 0) {
1010 		softs->ipf_sync_num = 1;
1011 		softs->ipf_sync_wrap++;
1012 	}
1013 
1014 	/*
1015 	 * Use the synch number of the object as the hash key.  Should end up
1016 	 * with relatively even distribution over time.
1017 	 * XXX - an attacker could lunch an DoS attack, of sorts, if they are
1018 	 * the only one causing new table entries by only keeping open every
1019 	 * nth connection they make, where n is a value in the interval
1020 	 * [0, SYNC_STATETABSZ-1].
1021 	 */
1022 	switch (tab)
1023 	{
1024 	case SMC_STATE :
1025 		hv = softs->ipf_sync_num & (softs->ipf_sync_state_tab_sz - 1);
1026 		while (softs->ipf_sync_wrap != 0) {
1027 			for (ss = softs->syncstatetab[hv]; ss; ss = ss->sl_next)
1028 				if (ss->sl_hdr.sm_num == softs->ipf_sync_num)
1029 					break;
1030 			if (ss == NULL)
1031 				break;
1032 			softs->ipf_sync_num++;
1033 			hv = softs->ipf_sync_num &
1034 			     (softs->ipf_sync_state_tab_sz - 1);
1035 		}
1036 		sl->sl_pnext = softs->syncstatetab + hv;
1037 		sl->sl_next = softs->syncstatetab[hv];
1038 		softs->syncstatetab[hv] = sl;
1039 		break;
1040 
1041 	case SMC_NAT :
1042 		hv = softs->ipf_sync_num & (softs->ipf_sync_nat_tab_sz - 1);
1043 		while (softs->ipf_sync_wrap != 0) {
1044 			for (ss = softs->syncnattab[hv]; ss; ss = ss->sl_next)
1045 				if (ss->sl_hdr.sm_num == softs->ipf_sync_num)
1046 					break;
1047 			if (ss == NULL)
1048 				break;
1049 			softs->ipf_sync_num++;
1050 			hv = softs->ipf_sync_num &
1051 			     (softs->ipf_sync_nat_tab_sz - 1);
1052 		}
1053 		sl->sl_pnext = softs->syncnattab + hv;
1054 		sl->sl_next = softs->syncnattab[hv];
1055 		softs->syncnattab[hv] = sl;
1056 		break;
1057 
1058 	default :
1059 		break;
1060 	}
1061 
1062 	sl->sl_num = softs->ipf_sync_num;
1063 	MUTEX_EXIT(&softs->ipf_syncadd);
1064 
1065 	sl->sl_magic = htonl(SYNHDRMAGIC);
1066 	sl->sl_v = fin->fin_v;
1067 	sl->sl_p = fin->fin_p;
1068 	sl->sl_cmd = SMC_CREATE;
1069 	sl->sl_idx = -1;
1070 	sl->sl_table = tab;
1071 	sl->sl_rev = fin->fin_rev;
1072 	if (tab == SMC_STATE) {
1073 		sl->sl_ips = ptr;
1074 		sz = sizeof(*sl->sl_ips);
1075 	} else if (tab == SMC_NAT) {
1076 		sl->sl_ipn = ptr;
1077 		sz = sizeof(*sl->sl_ipn);
1078 	} else {
1079 		ptr = NULL;
1080 		sz = 0;
1081 	}
1082 	sl->sl_len = sz;
1083 
1084 	/*
1085 	 * Create the log entry to be read by a user daemon.  When it has been
1086 	 * finished and put on the queue, send a signal to wakeup any waiters.
1087 	 */
1088 	MUTEX_ENTER(&softs->ipf_syncadd);
1089 	sle = softs->synclog + softs->sl_idx++;
1090 	bcopy((char *)&sl->sl_hdr, (char *)&sle->sle_hdr,
1091 	      sizeof(sle->sle_hdr));
1092 	sle->sle_hdr.sm_num = htonl(sle->sle_hdr.sm_num);
1093 	sle->sle_hdr.sm_len = htonl(sle->sle_hdr.sm_len);
1094 	if (ptr != NULL) {
1095 		bcopy((char *)ptr, (char *)&sle->sle_un, sz);
1096 		if (tab == SMC_STATE) {
1097 			ipf_sync_storder(1, &sle->sle_un.sleu_ips);
1098 		} else if (tab == SMC_NAT) {
1099 			ipf_sync_natorder(1, &sle->sle_un.sleu_ipn);
1100 		}
1101 	}
1102 	MUTEX_EXIT(&softs->ipf_syncadd);
1103 
1104 	ipf_sync_wakeup(softc);
1105 	return (sl);
1106 }
1107 
1108 
1109 /* ------------------------------------------------------------------------ */
1110 /* Function:    ipf_sync_update                                             */
1111 /* Returns:     Nil                                                         */
1112 /* Parameters:  tab(I) - type of synclist_t to create                       */
1113 /*              fin(I) - pointer to packet information                      */
1114 /*              sl(I)  - pointer to synchronisation object                  */
1115 /*                                                                          */
1116 /* For outbound packets, only, create an sync update record for the user    */
1117 /* process to read.                                                         */
1118 /* ------------------------------------------------------------------------ */
1119 void
ipf_sync_update(ipf_main_softc_t * softc,int tab,fr_info_t * fin,synclist_t * sl)1120 ipf_sync_update(ipf_main_softc_t *softc, int tab, fr_info_t *fin,
1121 	synclist_t *sl)
1122 {
1123 	ipf_sync_softc_t *softs = softc->ipf_sync_soft;
1124 	synctcp_update_t *st;
1125 	syncupdent_t *slu;
1126 	ipstate_t *ips;
1127 	nat_t *nat;
1128 	ipfrwlock_t *lock;
1129 
1130 	if (fin->fin_out == 0 || sl == NULL)
1131 		return;
1132 
1133 	if (tab == SMC_STATE) {
1134 		lock = &softs->ipf_syncstate;
1135 	} else {
1136 		lock = &softs->ipf_syncnat;
1137 	}
1138 
1139 	READ_ENTER(lock);
1140 	if (sl->sl_idx == -1) {
1141 		MUTEX_ENTER(&softs->ipf_syncadd);
1142 		slu = softs->syncupd + softs->su_idx;
1143 		sl->sl_idx = softs->su_idx++;
1144 		MUTEX_EXIT(&softs->ipf_syncadd);
1145 
1146 		bcopy((char *)&sl->sl_hdr, (char *)&slu->sup_hdr,
1147 		      sizeof(slu->sup_hdr));
1148 		slu->sup_hdr.sm_magic = htonl(SYNHDRMAGIC);
1149 		slu->sup_hdr.sm_sl = sl;
1150 		slu->sup_hdr.sm_cmd = SMC_UPDATE;
1151 		slu->sup_hdr.sm_table = tab;
1152 		slu->sup_hdr.sm_num = htonl(sl->sl_num);
1153 		slu->sup_hdr.sm_len = htonl(sizeof(struct synctcp_update));
1154 		slu->sup_hdr.sm_rev = fin->fin_rev;
1155 # if 0
1156 		if (fin->fin_p == IPPROTO_TCP) {
1157 			st->stu_len[0] = 0;
1158 			st->stu_len[1] = 0;
1159 		}
1160 # endif
1161 	} else
1162 		slu = softs->syncupd + sl->sl_idx;
1163 
1164 	/*
1165 	 * Only TCP has complex timeouts, others just use default timeouts.
1166 	 * For TCP, we only need to track the connection state and window.
1167 	 */
1168 	if (fin->fin_p == IPPROTO_TCP) {
1169 		st = &slu->sup_tcp;
1170 		if (tab == SMC_STATE) {
1171 			ips = sl->sl_ips;
1172 			st->stu_age = htonl(ips->is_die);
1173 			st->stu_data[0].td_end = ips->is_send;
1174 			st->stu_data[0].td_maxend = ips->is_maxsend;
1175 			st->stu_data[0].td_maxwin = ips->is_maxswin;
1176 			st->stu_state[0] = ips->is_state[0];
1177 			st->stu_data[1].td_end = ips->is_dend;
1178 			st->stu_data[1].td_maxend = ips->is_maxdend;
1179 			st->stu_data[1].td_maxwin = ips->is_maxdwin;
1180 			st->stu_state[1] = ips->is_state[1];
1181 		} else if (tab == SMC_NAT) {
1182 			nat = sl->sl_ipn;
1183 			st->stu_age = htonl(nat->nat_age);
1184 		}
1185 	}
1186 	RWLOCK_EXIT(lock);
1187 
1188 	ipf_sync_wakeup(softc);
1189 }
1190 
1191 
1192 /* ------------------------------------------------------------------------ */
1193 /* Function:    ipf_sync_flush_table                                        */
1194 /* Returns:     int - number of entries freed by flushing table             */
1195 /* Parameters:  tabsize(I) - size of the array pointed to by table          */
1196 /*              table(I)   - pointer to sync table to empty                 */
1197 /*                                                                          */
1198 /* Walk through a table of sync entries and free each one.  It is assumed   */
1199 /* that some lock is held so that nobody else tries to access the table     */
1200 /* during this cleanup.                                                     */
1201 /* ------------------------------------------------------------------------ */
1202 static int
ipf_sync_flush_table(ipf_sync_softc_t * softs,int tabsize,synclist_t ** table)1203 ipf_sync_flush_table(ipf_sync_softc_t *softs, int tabsize, synclist_t **table)
1204 {
1205 	synclist_t *sl;
1206 	int i, items;
1207 
1208 	items = 0;
1209 
1210 	for (i = 0; i < tabsize; i++) {
1211 		while ((sl = table[i]) != NULL) {
1212 			switch (sl->sl_table) {
1213 			case SMC_STATE :
1214 				if (sl->sl_ips != NULL)
1215 					sl->sl_ips->is_sync = NULL;
1216 				break;
1217 			case SMC_NAT :
1218 				if (sl->sl_ipn != NULL)
1219 					sl->sl_ipn->nat_sync = NULL;
1220 				break;
1221 			}
1222 			if (sl->sl_next != NULL)
1223 				sl->sl_next->sl_pnext = sl->sl_pnext;
1224 			table[i] = sl->sl_next;
1225 			if (sl->sl_idx != -1)
1226 				softs->syncupd[sl->sl_idx].sup_hdr.sm_sl = NULL;
1227 			KFREE(sl);
1228 			items++;
1229 		}
1230 	}
1231 
1232 	return (items);
1233 }
1234 
1235 
1236 /* ------------------------------------------------------------------------ */
1237 /* Function:    ipf_sync_ioctl                                              */
1238 /* Returns:     int - 0 == success, != 0 == failure                         */
1239 /* Parameters:  data(I) - pointer to ioctl data                             */
1240 /*              cmd(I)  - ioctl command integer                             */
1241 /*              mode(I) - file mode bits used with open                     */
1242 /*                                                                          */
1243 /* This function currently does not handle any ioctls and so just returns   */
1244 /* EINVAL on all occasions.                                                 */
1245 /* ------------------------------------------------------------------------ */
1246 int
ipf_sync_ioctl(ipf_main_softc_t * softc,caddr_t data,ioctlcmd_t cmd,int mode,int uid,void * ctx)1247 ipf_sync_ioctl(ipf_main_softc_t *softc, caddr_t data, ioctlcmd_t cmd,
1248 	int mode, int uid, void *ctx)
1249 {
1250 	ipf_sync_softc_t *softs = softc->ipf_sync_soft;
1251 	int error, i;
1252 	SPL_INT(s);
1253 
1254 	switch (cmd)
1255 	{
1256 	case SIOCIPFFL:
1257 		error = BCOPYIN(data, &i, sizeof(i));
1258 		if (error != 0) {
1259 			IPFERROR(110023);
1260 			error = EFAULT;
1261 			break;
1262 		}
1263 
1264 		switch (i)
1265 		{
1266 		case SMC_RLOG :
1267 			SPL_NET(s);
1268 			MUTEX_ENTER(&softs->ipsl_mutex);
1269 			i = (softs->sl_tail - softs->sl_idx) +
1270 			    (softs->su_tail - softs->su_idx);
1271 			softs->sl_idx = 0;
1272 			softs->su_idx = 0;
1273 			softs->sl_tail = 0;
1274 			softs->su_tail = 0;
1275 			MUTEX_EXIT(&softs->ipsl_mutex);
1276 			SPL_X(s);
1277 			break;
1278 
1279 		case SMC_NAT :
1280 			SPL_NET(s);
1281 			WRITE_ENTER(&softs->ipf_syncnat);
1282 			i = ipf_sync_flush_table(softs, SYNC_NATTABSZ,
1283 						 softs->syncnattab);
1284 			RWLOCK_EXIT(&softs->ipf_syncnat);
1285 			SPL_X(s);
1286 			break;
1287 
1288 		case SMC_STATE :
1289 			SPL_NET(s);
1290 			WRITE_ENTER(&softs->ipf_syncstate);
1291 			i = ipf_sync_flush_table(softs, SYNC_STATETABSZ,
1292 						 softs->syncstatetab);
1293 			RWLOCK_EXIT(&softs->ipf_syncstate);
1294 			SPL_X(s);
1295 			break;
1296 		}
1297 
1298 		error = BCOPYOUT(&i, data, sizeof(i));
1299 		if (error != 0) {
1300 			IPFERROR(110022);
1301 			error = EFAULT;
1302 		}
1303 		break;
1304 
1305 	default :
1306 		IPFERROR(110021);
1307 		error = EINVAL;
1308 		break;
1309 	}
1310 
1311 	return (error);
1312 }
1313 
1314 
1315 /* ------------------------------------------------------------------------ */
1316 /* Function:    ipf_sync_canread                                            */
1317 /* Returns:     int - 0 == success, != 0 == failure                         */
1318 /* Parameters:  Nil                                                         */
1319 /*                                                                          */
1320 /* This function provides input to the poll handler about whether or not    */
1321 /* there is data waiting to be read from the /dev/ipsync device.            */
1322 /* ------------------------------------------------------------------------ */
1323 int
ipf_sync_canread(void * arg)1324 ipf_sync_canread(void *arg)
1325 {
1326 	ipf_sync_softc_t *softs = arg;
1327 	return (!((softs->sl_tail == softs->sl_idx) &&
1328 		 (softs->su_tail == softs->su_idx)));
1329 }
1330 
1331 
1332 /* ------------------------------------------------------------------------ */
1333 /* Function:    ipf_sync_canwrite                                           */
1334 /* Returns:     int - 1 == can always write                                 */
1335 /* Parameters:  Nil                                                         */
1336 /*                                                                          */
1337 /* This function lets the poll handler know that it is always ready willing */
1338 /* to accept write events.                                                  */
1339 /* XXX Maybe this should return false if the sync table is full?            */
1340 /* ------------------------------------------------------------------------ */
1341 int
ipf_sync_canwrite(void * arg)1342 ipf_sync_canwrite(void *arg)
1343 {
1344 	return (1);
1345 }
1346 
1347 
1348 /* ------------------------------------------------------------------------ */
1349 /* Function:    ipf_sync_wakeup                                             */
1350 /* Parameters:  Nil                                                         */
1351 /* Returns:     Nil                                                         */
1352 /*                                                                          */
1353 /* This function implements the heuristics that decide how often to         */
1354 /* generate a poll wakeup for programs that are waiting for information     */
1355 /* about when they can do a read on /dev/ipsync.                            */
1356 /*                                                                          */
1357 /* There are three different considerations here:                           */
1358 /* - do not keep a program waiting too long: ipf_sync_wake_interval is the  */
1359 /*   maximum number of ipf ticks to let pass by;                            */
1360 /* - do not let the queue of ouststanding things to generate notifies for   */
1361 /*   get too full (ipf_sync_queue_high_wm is the high water mark);          */
1362 /* - do not let too many events get collapsed in before deciding that the   */
1363 /*   other host(s) need an update (ipf_sync_event_high_wm is the high water */
1364 /*   mark for this counter.)                                                */
1365 /* ------------------------------------------------------------------------ */
1366 static void
ipf_sync_wakeup(ipf_main_softc_t * softc)1367 ipf_sync_wakeup(ipf_main_softc_t *softc)
1368 {
1369 	ipf_sync_softc_t *softs = softc->ipf_sync_soft;
1370 
1371 	softs->ipf_sync_events++;
1372 	if ((softc->ipf_ticks >
1373 	    softs->ipf_sync_lastwakeup + softs->ipf_sync_wake_interval) ||
1374 	    (softs->ipf_sync_events > softs->ipf_sync_event_high_wm) ||
1375 	    ((softs->sl_tail - softs->sl_idx) >
1376 	     softs->ipf_sync_queue_high_wm) ||
1377 	    ((softs->su_tail - softs->su_idx) >
1378 	     softs->ipf_sync_queue_high_wm)) {
1379 
1380 		ipf_sync_poll_wakeup(softc);
1381 	}
1382 }
1383 
1384 
1385 /* ------------------------------------------------------------------------ */
1386 /* Function:    ipf_sync_poll_wakeup                                        */
1387 /* Parameters:  Nil                                                         */
1388 /* Returns:     Nil                                                         */
1389 /*                                                                          */
1390 /* Deliver a poll wakeup and reset counters for two of the three heuristics */
1391 /* ------------------------------------------------------------------------ */
1392 static void
ipf_sync_poll_wakeup(ipf_main_softc_t * softc)1393 ipf_sync_poll_wakeup(ipf_main_softc_t *softc)
1394 {
1395 	ipf_sync_softc_t *softs = softc->ipf_sync_soft;
1396 
1397 	softs->ipf_sync_events = 0;
1398 	softs->ipf_sync_lastwakeup = softc->ipf_ticks;
1399 
1400 # ifdef _KERNEL
1401 #  if SOLARIS
1402 	MUTEX_ENTER(&softs->ipsl_mutex);
1403 	cv_signal(&softs->ipslwait);
1404 	MUTEX_EXIT(&softs->ipsl_mutex);
1405 	pollwakeup(&softc->ipf_poll_head[IPL_LOGSYNC], POLLIN|POLLRDNORM);
1406 #  else
1407 	WAKEUP(&softs->sl_tail, 0);
1408 	POLLWAKEUP(IPL_LOGSYNC);
1409 #  endif
1410 # endif
1411 }
1412 
1413 
1414 /* ------------------------------------------------------------------------ */
1415 /* Function:    ipf_sync_expire                                             */
1416 /* Parameters:  Nil                                                         */
1417 /* Returns:     Nil                                                         */
1418 /*                                                                          */
1419 /* This is the function called even ipf_tick.  It implements one of the     */
1420 /* three heuristics above *IF* there are events waiting.                    */
1421 /* ------------------------------------------------------------------------ */
1422 void
ipf_sync_expire(ipf_main_softc_t * softc)1423 ipf_sync_expire(ipf_main_softc_t *softc)
1424 {
1425 	ipf_sync_softc_t *softs = softc->ipf_sync_soft;
1426 
1427 	if ((softs->ipf_sync_events > 0) &&
1428 	    (softc->ipf_ticks >
1429 	     softs->ipf_sync_lastwakeup + softs->ipf_sync_wake_interval)) {
1430 		ipf_sync_poll_wakeup(softc);
1431 	}
1432 }
1433