1
2 /*
3 * Copyright (C) 2012 by Darren Reed.
4 *
5 * See the IPFILTER.LICENCE file for details on licencing.
6 */
7 #if defined(KERNEL) || defined(_KERNEL)
8 # undef KERNEL
9 # undef _KERNEL
10 # define KERNEL 1
11 # define _KERNEL 1
12 #endif
13 #include <sys/errno.h>
14 #include <sys/types.h>
15 #include <sys/param.h>
16 #include <sys/time.h>
17 #include <sys/file.h>
18 #if !defined(_KERNEL)
19 # include <stdio.h>
20 # include <string.h>
21 # include <stdlib.h>
22 # define _KERNEL
23 # include <sys/uio.h>
24 # undef _KERNEL
25 #endif
26 #if defined(_KERNEL) && defined(__FreeBSD__)
27 # include <sys/filio.h>
28 # include <sys/fcntl.h>
29 #else
30 # include <sys/ioctl.h>
31 #endif
32 # include <sys/protosw.h>
33 #include <sys/socket.h>
34 #if defined(_KERNEL)
35 # include <sys/systm.h>
36 # if !defined(__SVR4)
37 # include <sys/mbuf.h>
38 # endif
39 #endif
40 #if !defined(__SVR4)
41 # if defined(_KERNEL)
42 # include <sys/kernel.h>
43 # endif
44 #else
45 # include <sys/byteorder.h>
46 # ifdef _KERNEL
47 # include <sys/dditypes.h>
48 # endif
49 # include <sys/stream.h>
50 # include <sys/kmem.h>
51 #endif
52 #include <net/if.h>
53 #ifdef sun
54 # include <net/af.h>
55 #endif
56 #include <netinet/in.h>
57 #include <netinet/in_systm.h>
58 #include <netinet/ip.h>
59 # include <netinet/ip_var.h>
60 #include <netinet/tcp.h>
61 #include <netinet/udp.h>
62 #include <netinet/ip_icmp.h>
63 #include "netinet/ip_compat.h"
64 #include <netinet/tcpip.h>
65 #include "netinet/ip_fil.h"
66 #include "netinet/ip_nat.h"
67 #include "netinet/ip_frag.h"
68 #include "netinet/ip_state.h"
69 #include "netinet/ip_auth.h"
70 #include "netinet/ip_lookup.h"
71 #include "netinet/ip_proxy.h"
72 #include "netinet/ip_sync.h"
73 /* END OF INCLUDES */
74
75
76
77 #ifdef USE_MUTEXES
78 static ipfr_t *ipfr_frag_new(ipf_main_softc_t *, ipf_frag_softc_t *,
79 fr_info_t *, u_32_t, ipfr_t **,
80 ipfrwlock_t *);
81 static ipfr_t *ipf_frag_lookup(ipf_main_softc_t *, ipf_frag_softc_t *, fr_info_t *, ipfr_t **, ipfrwlock_t *);
82 static void ipf_frag_deref(void *, ipfr_t **, ipfrwlock_t *);
83 static int ipf_frag_next(ipf_main_softc_t *, ipftoken_t *, ipfgeniter_t *,
84 ipfr_t **, ipfrwlock_t *);
85 #else
86 static ipfr_t *ipfr_frag_new(ipf_main_softc_t *, ipf_frag_softc_t *,
87 fr_info_t *, u_32_t, ipfr_t **);
88 static ipfr_t *ipf_frag_lookup(ipf_main_softc_t *, ipf_frag_softc_t *, fr_info_t *, ipfr_t **);
89 static void ipf_frag_deref(void *, ipfr_t **);
90 static int ipf_frag_next(ipf_main_softc_t *, ipftoken_t *, ipfgeniter_t *,
91 ipfr_t **);
92 #endif
93 static void ipf_frag_delete(ipf_main_softc_t *, ipfr_t *, ipfr_t ***);
94 static void ipf_frag_free(ipf_frag_softc_t *, ipfr_t *);
95
96 static frentry_t ipfr_block;
97
98 static ipftuneable_t ipf_frag_tuneables[] = {
99 { { (void *)offsetof(ipf_frag_softc_t, ipfr_size) },
100 "frag_size", 1, 0x7fffffff,
101 stsizeof(ipf_frag_softc_t, ipfr_size),
102 IPFT_WRDISABLED, NULL, NULL },
103 { { (void *)offsetof(ipf_frag_softc_t, ipfr_ttl) },
104 "frag_ttl", 1, 0x7fffffff,
105 stsizeof(ipf_frag_softc_t, ipfr_ttl),
106 0, NULL, NULL },
107 { { NULL },
108 NULL, 0, 0,
109 0,
110 0, NULL, NULL }
111 };
112
113 #define FBUMP(x) softf->ipfr_stats.x++
114 #define FBUMPD(x) do { softf->ipfr_stats.x++; DT(x); } while (0)
115
116
117 /* ------------------------------------------------------------------------ */
118 /* Function: ipf_frag_main_load */
119 /* Returns: int - 0 == success, -1 == error */
120 /* Parameters: Nil */
121 /* */
122 /* Initialise the filter rule associted with blocked packets - everyone can */
123 /* use it. */
124 /* ------------------------------------------------------------------------ */
125 int
ipf_frag_main_load(void)126 ipf_frag_main_load(void)
127 {
128 bzero((char *)&ipfr_block, sizeof(ipfr_block));
129 ipfr_block.fr_flags = FR_BLOCK|FR_QUICK;
130 ipfr_block.fr_ref = 1;
131
132 return (0);
133 }
134
135
136 /* ------------------------------------------------------------------------ */
137 /* Function: ipf_frag_main_unload */
138 /* Returns: int - 0 == success, -1 == error */
139 /* Parameters: Nil */
140 /* */
141 /* A null-op function that exists as a placeholder so that the flow in */
142 /* other functions is obvious. */
143 /* ------------------------------------------------------------------------ */
144 int
ipf_frag_main_unload(void)145 ipf_frag_main_unload(void)
146 {
147 return (0);
148 }
149
150
151 /* ------------------------------------------------------------------------ */
152 /* Function: ipf_frag_soft_create */
153 /* Returns: void * - NULL = failure, else pointer to local context */
154 /* Parameters: softc(I) - pointer to soft context main structure */
155 /* */
156 /* Allocate a new soft context structure to track fragment related info. */
157 /* ------------------------------------------------------------------------ */
158 /*ARGSUSED*/
159 void *
ipf_frag_soft_create(ipf_main_softc_t * softc)160 ipf_frag_soft_create(ipf_main_softc_t *softc)
161 {
162 ipf_frag_softc_t *softf;
163
164 KMALLOC(softf, ipf_frag_softc_t *);
165 if (softf == NULL)
166 return (NULL);
167
168 bzero((char *)softf, sizeof(*softf));
169
170 RWLOCK_INIT(&softf->ipfr_ipidfrag, "frag ipid lock");
171 RWLOCK_INIT(&softf->ipfr_frag, "ipf fragment rwlock");
172 RWLOCK_INIT(&softf->ipfr_natfrag, "ipf NAT fragment rwlock");
173
174 softf->ipf_frag_tune = ipf_tune_array_copy(softf,
175 sizeof(ipf_frag_tuneables),
176 ipf_frag_tuneables);
177 if (softf->ipf_frag_tune == NULL) {
178 ipf_frag_soft_destroy(softc, softf);
179 return (NULL);
180 }
181 if (ipf_tune_array_link(softc, softf->ipf_frag_tune) == -1) {
182 ipf_frag_soft_destroy(softc, softf);
183 return (NULL);
184 }
185
186 softf->ipfr_size = IPFT_SIZE;
187 softf->ipfr_ttl = IPF_TTLVAL(60);
188 softf->ipfr_lock = 1;
189 softf->ipfr_tail = &softf->ipfr_list;
190 softf->ipfr_nattail = &softf->ipfr_natlist;
191 softf->ipfr_ipidtail = &softf->ipfr_ipidlist;
192
193 return (softf);
194 }
195
196
197 /* ------------------------------------------------------------------------ */
198 /* Function: ipf_frag_soft_destroy */
199 /* Returns: Nil */
200 /* Parameters: softc(I) - pointer to soft context main structure */
201 /* arg(I) - pointer to local context to use */
202 /* */
203 /* Initialise the hash tables for the fragment cache lookups. */
204 /* ------------------------------------------------------------------------ */
205 void
ipf_frag_soft_destroy(ipf_main_softc_t * softc,void * arg)206 ipf_frag_soft_destroy(ipf_main_softc_t *softc, void *arg)
207 {
208 ipf_frag_softc_t *softf = arg;
209
210 RW_DESTROY(&softf->ipfr_ipidfrag);
211 RW_DESTROY(&softf->ipfr_frag);
212 RW_DESTROY(&softf->ipfr_natfrag);
213
214 if (softf->ipf_frag_tune != NULL) {
215 ipf_tune_array_unlink(softc, softf->ipf_frag_tune);
216 KFREES(softf->ipf_frag_tune, sizeof(ipf_frag_tuneables));
217 softf->ipf_frag_tune = NULL;
218 }
219
220 KFREE(softf);
221 }
222
223
224 /* ------------------------------------------------------------------------ */
225 /* Function: ipf_frag_soft_init */
226 /* Returns: int - 0 == success, -1 == error */
227 /* Parameters: softc(I) - pointer to soft context main structure */
228 /* arg(I) - pointer to local context to use */
229 /* */
230 /* Initialise the hash tables for the fragment cache lookups. */
231 /* ------------------------------------------------------------------------ */
232 /*ARGSUSED*/
233 int
ipf_frag_soft_init(ipf_main_softc_t * softc,void * arg)234 ipf_frag_soft_init(ipf_main_softc_t *softc, void *arg)
235 {
236 ipf_frag_softc_t *softf = arg;
237
238 KMALLOCS(softf->ipfr_heads, ipfr_t **,
239 softf->ipfr_size * sizeof(ipfr_t *));
240 if (softf->ipfr_heads == NULL)
241 return (-1);
242
243 bzero((char *)softf->ipfr_heads, softf->ipfr_size * sizeof(ipfr_t *));
244
245 KMALLOCS(softf->ipfr_nattab, ipfr_t **,
246 softf->ipfr_size * sizeof(ipfr_t *));
247 if (softf->ipfr_nattab == NULL)
248 return (-2);
249
250 bzero((char *)softf->ipfr_nattab, softf->ipfr_size * sizeof(ipfr_t *));
251
252 KMALLOCS(softf->ipfr_ipidtab, ipfr_t **,
253 softf->ipfr_size * sizeof(ipfr_t *));
254 if (softf->ipfr_ipidtab == NULL)
255 return (-3);
256
257 bzero((char *)softf->ipfr_ipidtab,
258 softf->ipfr_size * sizeof(ipfr_t *));
259
260 softf->ipfr_lock = 0;
261 softf->ipfr_inited = 1;
262
263 return (0);
264 }
265
266
267 /* ------------------------------------------------------------------------ */
268 /* Function: ipf_frag_soft_fini */
269 /* Returns: int - 0 == success, -1 == error */
270 /* Parameters: softc(I) - pointer to soft context main structure */
271 /* arg(I) - pointer to local context to use */
272 /* */
273 /* Free all memory allocated whilst running and from initialisation. */
274 /* ------------------------------------------------------------------------ */
275 int
ipf_frag_soft_fini(ipf_main_softc_t * softc,void * arg)276 ipf_frag_soft_fini(ipf_main_softc_t *softc, void *arg)
277 {
278 ipf_frag_softc_t *softf = arg;
279
280 softf->ipfr_lock = 1;
281
282 if (softf->ipfr_inited == 1) {
283 ipf_frag_clear(softc);
284
285 softf->ipfr_inited = 0;
286 }
287
288 if (softf->ipfr_heads != NULL)
289 KFREES(softf->ipfr_heads,
290 softf->ipfr_size * sizeof(ipfr_t *));
291 softf->ipfr_heads = NULL;
292
293 if (softf->ipfr_nattab != NULL)
294 KFREES(softf->ipfr_nattab,
295 softf->ipfr_size * sizeof(ipfr_t *));
296 softf->ipfr_nattab = NULL;
297
298 if (softf->ipfr_ipidtab != NULL)
299 KFREES(softf->ipfr_ipidtab,
300 softf->ipfr_size * sizeof(ipfr_t *));
301 softf->ipfr_ipidtab = NULL;
302
303 return (0);
304 }
305
306
307 /* ------------------------------------------------------------------------ */
308 /* Function: ipf_frag_set_lock */
309 /* Returns: Nil */
310 /* Parameters: arg(I) - pointer to local context to use */
311 /* tmp(I) - new value for lock */
312 /* */
313 /* Stub function that allows for external manipulation of ipfr_lock */
314 /* ------------------------------------------------------------------------ */
315 void
ipf_frag_setlock(void * arg,int tmp)316 ipf_frag_setlock(void *arg, int tmp)
317 {
318 ipf_frag_softc_t *softf = arg;
319
320 softf->ipfr_lock = tmp;
321 }
322
323
324 /* ------------------------------------------------------------------------ */
325 /* Function: ipf_frag_stats */
326 /* Returns: ipfrstat_t* - pointer to struct with current frag stats */
327 /* Parameters: arg(I) - pointer to local context to use */
328 /* */
329 /* Updates ipfr_stats with current information and returns a pointer to it */
330 /* ------------------------------------------------------------------------ */
331 ipfrstat_t *
ipf_frag_stats(void * arg)332 ipf_frag_stats(void *arg)
333 {
334 ipf_frag_softc_t *softf = arg;
335
336 softf->ipfr_stats.ifs_table = softf->ipfr_heads;
337 softf->ipfr_stats.ifs_nattab = softf->ipfr_nattab;
338 return (&softf->ipfr_stats);
339 }
340
341
342 /* ------------------------------------------------------------------------ */
343 /* Function: ipfr_frag_new */
344 /* Returns: ipfr_t * - pointer to fragment cache state info or NULL */
345 /* Parameters: fin(I) - pointer to packet information */
346 /* table(I) - pointer to frag table to add to */
347 /* lock(I) - pointer to lock to get a write hold of */
348 /* */
349 /* Add a new entry to the fragment cache, registering it as having come */
350 /* through this box, with the result of the filter operation. */
351 /* */
352 /* If this function succeeds, it returns with a write lock held on "lock". */
353 /* If it fails, no lock is held on return. */
354 /* ------------------------------------------------------------------------ */
355 static ipfr_t *
ipfr_frag_new(ipf_main_softc_t * softc,ipf_frag_softc_t * softf,fr_info_t * fin,u_32_t pass,ipfr_t * table[],ipfrwlock_t * lock)356 ipfr_frag_new(ipf_main_softc_t *softc, ipf_frag_softc_t *softf,
357 fr_info_t *fin, u_32_t pass, ipfr_t *table[]
358 #ifdef USE_MUTEXES
359 , ipfrwlock_t *lock
360 #endif
361 )
362 {
363 ipfr_t *fra, frag, *fran;
364 u_int idx, off;
365 frentry_t *fr;
366
367 if (softf->ipfr_stats.ifs_inuse >= softf->ipfr_size) {
368 FBUMPD(ifs_maximum);
369 return (NULL);
370 }
371
372 if ((fin->fin_flx & (FI_FRAG|FI_BAD)) != FI_FRAG) {
373 FBUMPD(ifs_newbad);
374 return (NULL);
375 }
376
377 if (pass & FR_FRSTRICT) {
378 if (fin->fin_off != 0) {
379 FBUMPD(ifs_newrestrictnot0);
380 return (NULL);
381 }
382 }
383
384 memset(&frag, 0, sizeof(frag));
385 frag.ipfr_v = fin->fin_v;
386 idx = fin->fin_v;
387 frag.ipfr_p = fin->fin_p;
388 idx += fin->fin_p;
389 frag.ipfr_id = fin->fin_id;
390 idx += fin->fin_id;
391 frag.ipfr_source = fin->fin_fi.fi_src;
392 idx += frag.ipfr_src.s_addr;
393 frag.ipfr_dest = fin->fin_fi.fi_dst;
394 idx += frag.ipfr_dst.s_addr;
395 frag.ipfr_ifp = fin->fin_ifp;
396 idx *= 127;
397 idx %= softf->ipfr_size;
398
399 frag.ipfr_optmsk = fin->fin_fi.fi_optmsk & IPF_OPTCOPY;
400 frag.ipfr_secmsk = fin->fin_fi.fi_secmsk;
401 frag.ipfr_auth = fin->fin_fi.fi_auth;
402
403 off = fin->fin_off >> 3;
404 if (off == 0) {
405 char *ptr;
406 int end;
407
408 #ifdef USE_INET6
409 if (fin->fin_v == 6) {
410
411 ptr = (char *)fin->fin_fraghdr +
412 sizeof(struct ip6_frag);
413 } else
414 #endif
415 {
416 ptr = fin->fin_dp;
417 }
418 end = fin->fin_plen - (ptr - (char *)fin->fin_ip);
419 frag.ipfr_firstend = end >> 3;
420 } else {
421 frag.ipfr_firstend = 0;
422 }
423
424 /*
425 * allocate some memory, if possible, if not, just record that we
426 * failed to do so.
427 */
428 KMALLOC(fran, ipfr_t *);
429 if (fran == NULL) {
430 FBUMPD(ifs_nomem);
431 return (NULL);
432 }
433 memset(fran, 0, sizeof(*fran));
434
435 WRITE_ENTER(lock);
436
437 /*
438 * first, make sure it isn't already there...
439 */
440 for (fra = table[idx]; (fra != NULL); fra = fra->ipfr_hnext)
441 if (!bcmp((char *)&frag.ipfr_ifp, (char *)&fra->ipfr_ifp,
442 IPFR_CMPSZ)) {
443 RWLOCK_EXIT(lock);
444 FBUMPD(ifs_exists);
445 KFREE(fran);
446 return (NULL);
447 }
448
449 fra = fran;
450 fran = NULL;
451 fr = fin->fin_fr;
452 fra->ipfr_rule = fr;
453 if (fr != NULL) {
454 MUTEX_ENTER(&fr->fr_lock);
455 fr->fr_ref++;
456 MUTEX_EXIT(&fr->fr_lock);
457 }
458
459 /*
460 * Insert the fragment into the fragment table, copy the struct used
461 * in the search using bcopy rather than reassign each field.
462 * Set the ttl to the default.
463 */
464 if ((fra->ipfr_hnext = table[idx]) != NULL)
465 table[idx]->ipfr_hprev = &fra->ipfr_hnext;
466 fra->ipfr_hprev = table + idx;
467 fra->ipfr_data = NULL;
468 table[idx] = fra;
469 bcopy((char *)&frag.ipfr_ifp, (char *)&fra->ipfr_ifp, IPFR_CMPSZ);
470 fra->ipfr_v = fin->fin_v;
471 fra->ipfr_p = fin->fin_p;
472 fra->ipfr_ttl = softc->ipf_ticks + softf->ipfr_ttl;
473 fra->ipfr_firstend = frag.ipfr_firstend;
474
475 /*
476 * Compute the offset of the expected start of the next packet.
477 */
478 if (off == 0)
479 fra->ipfr_seen0 = 1;
480 fra->ipfr_off = off + (fin->fin_dlen >> 3);
481 fra->ipfr_pass = pass;
482 fra->ipfr_ref = 1;
483 fra->ipfr_pkts = 1;
484 fra->ipfr_bytes = fin->fin_plen;
485 FBUMP(ifs_inuse);
486 FBUMP(ifs_new);
487 return (fra);
488 }
489
490
491 /* ------------------------------------------------------------------------ */
492 /* Function: ipf_frag_new */
493 /* Returns: int - 0 == success, -1 == error */
494 /* Parameters: fin(I) - pointer to packet information */
495 /* */
496 /* Add a new entry to the fragment cache table based on the current packet */
497 /* ------------------------------------------------------------------------ */
498 int
ipf_frag_new(ipf_main_softc_t * softc,fr_info_t * fin,u_32_t pass)499 ipf_frag_new(ipf_main_softc_t *softc, fr_info_t *fin, u_32_t pass)
500 {
501 ipf_frag_softc_t *softf = softc->ipf_frag_soft;
502 ipfr_t *fra;
503
504 if (softf->ipfr_lock != 0)
505 return (-1);
506
507 #ifdef USE_MUTEXES
508 fra = ipfr_frag_new(softc, softf, fin, pass, softf->ipfr_heads, &softc->ipf_frag);
509 #else
510 fra = ipfr_frag_new(softc, softf, fin, pass, softf->ipfr_heads);
511 #endif
512 if (fra != NULL) {
513 *softf->ipfr_tail = fra;
514 fra->ipfr_prev = softf->ipfr_tail;
515 softf->ipfr_tail = &fra->ipfr_next;
516 fra->ipfr_next = NULL;
517 RWLOCK_EXIT(&softc->ipf_frag);
518 }
519 return (fra ? 0 : -1);
520 }
521
522
523 /* ------------------------------------------------------------------------ */
524 /* Function: ipf_frag_natnew */
525 /* Returns: int - 0 == success, -1 == error */
526 /* Parameters: fin(I) - pointer to packet information */
527 /* nat(I) - pointer to NAT structure */
528 /* */
529 /* Create a new NAT fragment cache entry based on the current packet and */
530 /* the NAT structure for this "session". */
531 /* ------------------------------------------------------------------------ */
532 int
ipf_frag_natnew(ipf_main_softc_t * softc,fr_info_t * fin,u_32_t pass,nat_t * nat)533 ipf_frag_natnew(ipf_main_softc_t *softc, fr_info_t *fin, u_32_t pass,
534 nat_t *nat)
535 {
536 ipf_frag_softc_t *softf = softc->ipf_frag_soft;
537 ipfr_t *fra;
538
539 if (softf->ipfr_lock != 0)
540 return (0);
541
542 #ifdef USE_MUTEXES
543 fra = ipfr_frag_new(softc, softf, fin, pass, softf->ipfr_nattab,
544 &softf->ipfr_natfrag);
545 #else
546 fra = ipfr_frag_new(softc, softf, fin, pass, softf->ipfr_nattab);
547 #endif
548 if (fra != NULL) {
549 fra->ipfr_data = nat;
550 nat->nat_data = fra;
551 *softf->ipfr_nattail = fra;
552 fra->ipfr_prev = softf->ipfr_nattail;
553 softf->ipfr_nattail = &fra->ipfr_next;
554 fra->ipfr_next = NULL;
555 RWLOCK_EXIT(&softf->ipfr_natfrag);
556 return (0);
557 }
558 return (-1);
559 }
560
561
562 /* ------------------------------------------------------------------------ */
563 /* Function: ipf_frag_ipidnew */
564 /* Returns: int - 0 == success, -1 == error */
565 /* Parameters: fin(I) - pointer to packet information */
566 /* ipid(I) - new IP ID for this fragmented packet */
567 /* */
568 /* Create a new fragment cache entry for this packet and store, as a data */
569 /* pointer, the new IP ID value. */
570 /* ------------------------------------------------------------------------ */
571 int
ipf_frag_ipidnew(fr_info_t * fin,u_32_t ipid)572 ipf_frag_ipidnew(fr_info_t *fin, u_32_t ipid)
573 {
574 ipf_main_softc_t *softc = fin->fin_main_soft;
575 ipf_frag_softc_t *softf = softc->ipf_frag_soft;
576 ipfr_t *fra;
577
578 if (softf->ipfr_lock)
579 return (0);
580
581 #ifdef USE_MUTEXES
582 fra = ipfr_frag_new(softc, softf, fin, 0, softf->ipfr_ipidtab, &softf->ipfr_ipidfrag);
583 #else
584 fra = ipfr_frag_new(softc, softf, fin, 0, softf->ipfr_ipidtab);
585 #endif
586 if (fra != NULL) {
587 fra->ipfr_data = (void *)(intptr_t)ipid;
588 *softf->ipfr_ipidtail = fra;
589 fra->ipfr_prev = softf->ipfr_ipidtail;
590 softf->ipfr_ipidtail = &fra->ipfr_next;
591 fra->ipfr_next = NULL;
592 RWLOCK_EXIT(&softf->ipfr_ipidfrag);
593 }
594 return (fra ? 0 : -1);
595 }
596
597
598 /* ------------------------------------------------------------------------ */
599 /* Function: ipf_frag_lookup */
600 /* Returns: ipfr_t * - pointer to ipfr_t structure if there's a */
601 /* matching entry in the frag table, else NULL */
602 /* Parameters: fin(I) - pointer to packet information */
603 /* table(I) - pointer to fragment cache table to search */
604 /* */
605 /* Check the fragment cache to see if there is already a record of this */
606 /* packet with its filter result known. */
607 /* */
608 /* If this function succeeds, it returns with a write lock held on "lock". */
609 /* If it fails, no lock is held on return. */
610 /* ------------------------------------------------------------------------ */
611 static ipfr_t *
ipf_frag_lookup(ipf_main_softc_t * softc,ipf_frag_softc_t * softf,fr_info_t * fin,ipfr_t * table[],ipfrwlock_t * lock)612 ipf_frag_lookup(ipf_main_softc_t *softc, ipf_frag_softc_t *softf,
613 fr_info_t *fin, ipfr_t *table[]
614 #ifdef USE_MUTEXES
615 , ipfrwlock_t *lock
616 #endif
617 )
618 {
619 ipfr_t *f, frag;
620 u_int idx;
621
622 /*
623 * We don't want to let short packets match because they could be
624 * compromising the security of other rules that want to match on
625 * layer 4 fields (and can't because they have been fragmented off.)
626 * Why do this check here? The counter acts as an indicator of this
627 * kind of attack, whereas if it was elsewhere, it wouldn't know if
628 * other matching packets had been seen.
629 */
630 if (fin->fin_flx & FI_SHORT) {
631 FBUMPD(ifs_short);
632 return (NULL);
633 }
634
635 if ((fin->fin_flx & FI_BAD) != 0) {
636 FBUMPD(ifs_bad);
637 return (NULL);
638 }
639
640 /*
641 * For fragments, we record protocol, packet id, TOS and both IP#'s
642 * (these should all be the same for all fragments of a packet).
643 *
644 * build up a hash value to index the table with.
645 */
646 memset(&frag, 0, sizeof(frag));
647 frag.ipfr_v = fin->fin_v;
648 idx = fin->fin_v;
649 frag.ipfr_p = fin->fin_p;
650 idx += fin->fin_p;
651 frag.ipfr_id = fin->fin_id;
652 idx += fin->fin_id;
653 frag.ipfr_source = fin->fin_fi.fi_src;
654 idx += frag.ipfr_src.s_addr;
655 frag.ipfr_dest = fin->fin_fi.fi_dst;
656 idx += frag.ipfr_dst.s_addr;
657 frag.ipfr_ifp = fin->fin_ifp;
658 idx *= 127;
659 idx %= softf->ipfr_size;
660
661 frag.ipfr_optmsk = fin->fin_fi.fi_optmsk & IPF_OPTCOPY;
662 frag.ipfr_secmsk = fin->fin_fi.fi_secmsk;
663 frag.ipfr_auth = fin->fin_fi.fi_auth;
664
665 READ_ENTER(lock);
666
667 /*
668 * check the table, careful to only compare the right amount of data
669 */
670 for (f = table[idx]; f; f = f->ipfr_hnext) {
671 if (!bcmp((char *)&frag.ipfr_ifp, (char *)&f->ipfr_ifp,
672 IPFR_CMPSZ)) {
673 u_short off;
674
675 /*
676 * XXX - We really need to be guarding against the
677 * retransmission of (src,dst,id,offset-range) here
678 * because a fragmented packet is never resent with
679 * the same IP ID# (or shouldn't).
680 */
681 off = fin->fin_off >> 3;
682 if (f->ipfr_seen0) {
683 if (off == 0) {
684 FBUMPD(ifs_retrans0);
685 continue;
686 }
687
688 /*
689 * Case 3. See comment for frpr_fragment6.
690 */
691 if ((f->ipfr_firstend != 0) &&
692 (off < f->ipfr_firstend)) {
693 FBUMP(ifs_overlap);
694 DT2(ifs_overlap, u_short, off,
695 ipfr_t *, f);
696 DT3(ipf_fi_bad_ifs_overlap, fr_info_t *, fin, u_short, off,
697 ipfr_t *, f);
698 fin->fin_flx |= FI_BAD;
699 break;
700 }
701 } else if (off == 0)
702 f->ipfr_seen0 = 1;
703
704 if (f != table[idx] && MUTEX_TRY_UPGRADE(lock)) {
705 ipfr_t **fp;
706
707 /*
708 * Move fragment info. to the top of the list
709 * to speed up searches. First, delink...
710 */
711 fp = f->ipfr_hprev;
712 (*fp) = f->ipfr_hnext;
713 if (f->ipfr_hnext != NULL)
714 f->ipfr_hnext->ipfr_hprev = fp;
715 /*
716 * Then put back at the top of the chain.
717 */
718 f->ipfr_hnext = table[idx];
719 table[idx]->ipfr_hprev = &f->ipfr_hnext;
720 f->ipfr_hprev = table + idx;
721 table[idx] = f;
722 MUTEX_DOWNGRADE(lock);
723 }
724
725 /*
726 * If we've follwed the fragments, and this is the
727 * last (in order), shrink expiration time.
728 */
729 if (off == f->ipfr_off) {
730 f->ipfr_off = (fin->fin_dlen >> 3) + off;
731
732 /*
733 * Well, we could shrink the expiration time
734 * but only if every fragment has been seen
735 * in order upto this, the last. ipfr_badorder
736 * is used here to count those out of order
737 * and if it equals 0 when we get to the last
738 * fragment then we can assume all of the
739 * fragments have been seen and in order.
740 */
741 #if 0
742 /*
743 * Doing this properly requires moving it to
744 * the head of the list which is infesible.
745 */
746 if ((more == 0) && (f->ipfr_badorder == 0))
747 f->ipfr_ttl = softc->ipf_ticks + 1;
748 #endif
749 } else {
750 f->ipfr_badorder++;
751 FBUMPD(ifs_unordered);
752 if (f->ipfr_pass & FR_FRSTRICT) {
753 FBUMPD(ifs_strict);
754 continue;
755 }
756 }
757 f->ipfr_pkts++;
758 f->ipfr_bytes += fin->fin_plen;
759 FBUMP(ifs_hits);
760 return (f);
761 }
762 }
763
764 RWLOCK_EXIT(lock);
765 FBUMP(ifs_miss);
766 return (NULL);
767 }
768
769
770 /* ------------------------------------------------------------------------ */
771 /* Function: ipf_frag_natknown */
772 /* Returns: nat_t* - pointer to 'parent' NAT structure if frag table */
773 /* match found, else NULL */
774 /* Parameters: fin(I) - pointer to packet information */
775 /* */
776 /* Functional interface for NAT lookups of the NAT fragment cache */
777 /* ------------------------------------------------------------------------ */
778 nat_t *
ipf_frag_natknown(fr_info_t * fin)779 ipf_frag_natknown(fr_info_t *fin)
780 {
781 ipf_main_softc_t *softc = fin->fin_main_soft;
782 ipf_frag_softc_t *softf = softc->ipf_frag_soft;
783 nat_t *nat;
784 ipfr_t *ipf;
785
786 if ((softf->ipfr_lock) || !softf->ipfr_natlist)
787 return (NULL);
788 #ifdef USE_MUTEXES
789 ipf = ipf_frag_lookup(softc, softf, fin, softf->ipfr_nattab,
790 &softf->ipfr_natfrag);
791 #else
792 ipf = ipf_frag_lookup(softc, softf, fin, softf->ipfr_nattab);
793 #endif
794 if (ipf != NULL) {
795 nat = ipf->ipfr_data;
796 /*
797 * This is the last fragment for this packet.
798 */
799 if ((ipf->ipfr_ttl == softc->ipf_ticks + 1) && (nat != NULL)) {
800 nat->nat_data = NULL;
801 ipf->ipfr_data = NULL;
802 }
803 RWLOCK_EXIT(&softf->ipfr_natfrag);
804 } else
805 nat = NULL;
806 return (nat);
807 }
808
809
810 /* ------------------------------------------------------------------------ */
811 /* Function: ipf_frag_ipidknown */
812 /* Returns: u_32_t - IPv4 ID for this packet if match found, else */
813 /* return 0xfffffff to indicate no match. */
814 /* Parameters: fin(I) - pointer to packet information */
815 /* */
816 /* Functional interface for IP ID lookups of the IP ID fragment cache */
817 /* ------------------------------------------------------------------------ */
818 u_32_t
ipf_frag_ipidknown(fr_info_t * fin)819 ipf_frag_ipidknown(fr_info_t *fin)
820 {
821 ipf_main_softc_t *softc = fin->fin_main_soft;
822 ipf_frag_softc_t *softf = softc->ipf_frag_soft;
823 ipfr_t *ipf;
824 u_32_t id;
825
826 if (softf->ipfr_lock || !softf->ipfr_ipidlist)
827 return (0xffffffff);
828
829 #ifdef USE_MUTEXES
830 ipf = ipf_frag_lookup(softc, softf, fin, softf->ipfr_ipidtab,
831 &softf->ipfr_ipidfrag);
832 #else
833 ipf = ipf_frag_lookup(softc, softf, fin, softf->ipfr_ipidtab);
834 #endif
835 if (ipf != NULL) {
836 id = (u_32_t)(intptr_t)ipf->ipfr_data;
837 RWLOCK_EXIT(&softf->ipfr_ipidfrag);
838 } else
839 id = 0xffffffff;
840 return (id);
841 }
842
843
844 /* ------------------------------------------------------------------------ */
845 /* Function: ipf_frag_known */
846 /* Returns: frentry_t* - pointer to filter rule if a match is found in */
847 /* the frag cache table, else NULL. */
848 /* Parameters: fin(I) - pointer to packet information */
849 /* passp(O) - pointer to where to store rule flags resturned */
850 /* */
851 /* Functional interface for normal lookups of the fragment cache. If a */
852 /* match is found, return the rule pointer and flags from the rule, except */
853 /* that if FR_LOGFIRST is set, reset FR_LOG. */
854 /* ------------------------------------------------------------------------ */
855 frentry_t *
ipf_frag_known(fr_info_t * fin,u_32_t * passp)856 ipf_frag_known(fr_info_t *fin, u_32_t *passp)
857 {
858 ipf_main_softc_t *softc = fin->fin_main_soft;
859 ipf_frag_softc_t *softf = softc->ipf_frag_soft;
860 frentry_t *fr = NULL;
861 ipfr_t *fra;
862 u_32_t pass;
863
864 if ((softf->ipfr_lock) || (softf->ipfr_list == NULL))
865 return (NULL);
866
867 #ifdef USE_MUTEXES
868 fra = ipf_frag_lookup(softc, softf, fin, softf->ipfr_heads,
869 &softc->ipf_frag);
870 #else
871 fra = ipf_frag_lookup(softc, softf, fin, softf->ipfr_heads);
872 #endif
873 if (fra != NULL) {
874 if (fin->fin_flx & FI_BAD) {
875 fr = &ipfr_block;
876 fin->fin_reason = FRB_BADFRAG;
877 DT2(ipf_frb_badfrag, fr_info_t *, fin, uint, fra);
878 } else {
879 fr = fra->ipfr_rule;
880 }
881 fin->fin_fr = fr;
882 if (fr != NULL) {
883 pass = fr->fr_flags;
884 if ((pass & FR_KEEPSTATE) != 0) {
885 fin->fin_flx |= FI_STATE;
886 /*
887 * Reset the keep state flag here so that we
888 * don't try and add a new state entry because
889 * of a match here. That leads to blocking of
890 * the packet later because the add fails.
891 */
892 pass &= ~FR_KEEPSTATE;
893 }
894 if ((pass & FR_LOGFIRST) != 0)
895 pass &= ~(FR_LOGFIRST|FR_LOG);
896 *passp = pass;
897 }
898 RWLOCK_EXIT(&softc->ipf_frag);
899 }
900 return (fr);
901 }
902
903
904 /* ------------------------------------------------------------------------ */
905 /* Function: ipf_frag_natforget */
906 /* Returns: Nil */
907 /* Parameters: softc(I) - pointer to soft context main structure */
908 /* ptr(I) - pointer to data structure */
909 /* */
910 /* Search through all of the fragment cache entries for NAT and wherever a */
911 /* pointer is found to match ptr, reset it to NULL. */
912 /* ------------------------------------------------------------------------ */
913 void
ipf_frag_natforget(ipf_main_softc_t * softc,void * ptr)914 ipf_frag_natforget(ipf_main_softc_t *softc, void *ptr)
915 {
916 ipf_frag_softc_t *softf = softc->ipf_frag_soft;
917 ipfr_t *fr;
918
919 WRITE_ENTER(&softf->ipfr_natfrag);
920 for (fr = softf->ipfr_natlist; fr; fr = fr->ipfr_next)
921 if (fr->ipfr_data == ptr)
922 fr->ipfr_data = NULL;
923 RWLOCK_EXIT(&softf->ipfr_natfrag);
924 }
925
926
927 /* ------------------------------------------------------------------------ */
928 /* Function: ipf_frag_delete */
929 /* Returns: Nil */
930 /* Parameters: softc(I) - pointer to soft context main structure */
931 /* fra(I) - pointer to fragment structure to delete */
932 /* tail(IO) - pointer to the pointer to the tail of the frag */
933 /* list */
934 /* */
935 /* Remove a fragment cache table entry from the table & list. Also free */
936 /* the filter rule it is associated with it if it is no longer used as a */
937 /* result of decreasing the reference count. */
938 /* ------------------------------------------------------------------------ */
939 static void
ipf_frag_delete(ipf_main_softc_t * softc,ipfr_t * fra,ipfr_t *** tail)940 ipf_frag_delete(ipf_main_softc_t *softc, ipfr_t *fra, ipfr_t ***tail)
941 {
942 ipf_frag_softc_t *softf = softc->ipf_frag_soft;
943
944 if (fra->ipfr_next)
945 fra->ipfr_next->ipfr_prev = fra->ipfr_prev;
946 *fra->ipfr_prev = fra->ipfr_next;
947 if (*tail == &fra->ipfr_next)
948 *tail = fra->ipfr_prev;
949
950 if (fra->ipfr_hnext)
951 fra->ipfr_hnext->ipfr_hprev = fra->ipfr_hprev;
952 *fra->ipfr_hprev = fra->ipfr_hnext;
953
954 if (fra->ipfr_rule != NULL) {
955 (void) ipf_derefrule(softc, &fra->ipfr_rule);
956 }
957
958 if (fra->ipfr_ref <= 0)
959 ipf_frag_free(softf, fra);
960 }
961
962
963 /* ------------------------------------------------------------------------ */
964 /* Function: ipf_frag_free */
965 /* Returns: Nil */
966 /* Parameters: softf(I) - pointer to fragment context information */
967 /* fra(I) - pointer to fragment structure to free */
968 /* */
969 /* Free up a fragment cache entry and bump relevent statistics. */
970 /* ------------------------------------------------------------------------ */
971 static void
ipf_frag_free(ipf_frag_softc_t * softf,ipfr_t * fra)972 ipf_frag_free(ipf_frag_softc_t *softf, ipfr_t *fra)
973 {
974 KFREE(fra);
975 FBUMP(ifs_expire);
976 softf->ipfr_stats.ifs_inuse--;
977 }
978
979
980 /* ------------------------------------------------------------------------ */
981 /* Function: ipf_frag_clear */
982 /* Returns: Nil */
983 /* Parameters: softc(I) - pointer to soft context main structure */
984 /* */
985 /* Free memory in use by fragment state information kept. Do the normal */
986 /* fragment state stuff first and then the NAT-fragment table. */
987 /* ------------------------------------------------------------------------ */
988 void
ipf_frag_clear(ipf_main_softc_t * softc)989 ipf_frag_clear(ipf_main_softc_t *softc)
990 {
991 ipf_frag_softc_t *softf = softc->ipf_frag_soft;
992 ipfr_t *fra;
993 nat_t *nat;
994
995 WRITE_ENTER(&softc->ipf_frag);
996 while ((fra = softf->ipfr_list) != NULL) {
997 fra->ipfr_ref--;
998 ipf_frag_delete(softc, fra, &softf->ipfr_tail);
999 }
1000 softf->ipfr_tail = &softf->ipfr_list;
1001 RWLOCK_EXIT(&softc->ipf_frag);
1002
1003 WRITE_ENTER(&softc->ipf_nat);
1004 WRITE_ENTER(&softf->ipfr_natfrag);
1005 while ((fra = softf->ipfr_natlist) != NULL) {
1006 nat = fra->ipfr_data;
1007 if (nat != NULL) {
1008 if (nat->nat_data == fra)
1009 nat->nat_data = NULL;
1010 }
1011 fra->ipfr_ref--;
1012 ipf_frag_delete(softc, fra, &softf->ipfr_nattail);
1013 }
1014 softf->ipfr_nattail = &softf->ipfr_natlist;
1015 RWLOCK_EXIT(&softf->ipfr_natfrag);
1016 RWLOCK_EXIT(&softc->ipf_nat);
1017 }
1018
1019
1020 /* ------------------------------------------------------------------------ */
1021 /* Function: ipf_frag_expire */
1022 /* Returns: Nil */
1023 /* Parameters: softc(I) - pointer to soft context main structure */
1024 /* */
1025 /* Expire entries in the fragment cache table that have been there too long */
1026 /* ------------------------------------------------------------------------ */
1027 void
ipf_frag_expire(ipf_main_softc_t * softc)1028 ipf_frag_expire(ipf_main_softc_t *softc)
1029 {
1030 ipf_frag_softc_t *softf = softc->ipf_frag_soft;
1031 ipfr_t **fp, *fra;
1032 nat_t *nat;
1033 SPL_INT(s);
1034
1035 if (softf->ipfr_lock)
1036 return;
1037
1038 SPL_NET(s);
1039 WRITE_ENTER(&softc->ipf_frag);
1040 /*
1041 * Go through the entire table, looking for entries to expire,
1042 * which is indicated by the ttl being less than or equal to ipf_ticks.
1043 */
1044 for (fp = &softf->ipfr_list; ((fra = *fp) != NULL); ) {
1045 if (fra->ipfr_ttl > softc->ipf_ticks)
1046 break;
1047 fra->ipfr_ref--;
1048 ipf_frag_delete(softc, fra, &softf->ipfr_tail);
1049 }
1050 RWLOCK_EXIT(&softc->ipf_frag);
1051
1052 WRITE_ENTER(&softf->ipfr_ipidfrag);
1053 for (fp = &softf->ipfr_ipidlist; ((fra = *fp) != NULL); ) {
1054 if (fra->ipfr_ttl > softc->ipf_ticks)
1055 break;
1056 fra->ipfr_ref--;
1057 ipf_frag_delete(softc, fra, &softf->ipfr_ipidtail);
1058 }
1059 RWLOCK_EXIT(&softf->ipfr_ipidfrag);
1060
1061 /*
1062 * Same again for the NAT table, except that if the structure also
1063 * still points to a NAT structure, and the NAT structure points back
1064 * at the one to be free'd, NULL the reference from the NAT struct.
1065 * NOTE: We need to grab both mutex's early, and in this order so as
1066 * to prevent a deadlock if both try to expire at the same time.
1067 * The extra if() statement here is because it locks out all NAT
1068 * operations - no need to do that if there are no entries in this
1069 * list, right?
1070 */
1071 if (softf->ipfr_natlist != NULL) {
1072 WRITE_ENTER(&softc->ipf_nat);
1073 WRITE_ENTER(&softf->ipfr_natfrag);
1074 for (fp = &softf->ipfr_natlist; ((fra = *fp) != NULL); ) {
1075 if (fra->ipfr_ttl > softc->ipf_ticks)
1076 break;
1077 nat = fra->ipfr_data;
1078 if (nat != NULL) {
1079 if (nat->nat_data == fra)
1080 nat->nat_data = NULL;
1081 }
1082 fra->ipfr_ref--;
1083 ipf_frag_delete(softc, fra, &softf->ipfr_nattail);
1084 }
1085 RWLOCK_EXIT(&softf->ipfr_natfrag);
1086 RWLOCK_EXIT(&softc->ipf_nat);
1087 }
1088 SPL_X(s);
1089 }
1090
1091
1092 /* ------------------------------------------------------------------------ */
1093 /* Function: ipf_frag_pkt_next */
1094 /* Returns: int - 0 == success, else error */
1095 /* Parameters: softc(I) - pointer to soft context main structure */
1096 /* token(I) - pointer to token information for this caller */
1097 /* itp(I) - pointer to generic iterator from caller */
1098 /* */
1099 /* This function is used to step through the fragment cache list used for */
1100 /* filter rules. The hard work is done by the more generic ipf_frag_next. */
1101 /* ------------------------------------------------------------------------ */
1102 int
ipf_frag_pkt_next(ipf_main_softc_t * softc,ipftoken_t * token,ipfgeniter_t * itp)1103 ipf_frag_pkt_next(ipf_main_softc_t *softc, ipftoken_t *token,
1104 ipfgeniter_t *itp)
1105 {
1106 ipf_frag_softc_t *softf = softc->ipf_frag_soft;
1107
1108 #ifdef USE_MUTEXES
1109 return (ipf_frag_next(softc, token, itp, &softf->ipfr_list,
1110 &softf->ipfr_frag));
1111 #else
1112 return (ipf_frag_next(softc, token, itp, &softf->ipfr_list));
1113 #endif
1114 }
1115
1116
1117 /* ------------------------------------------------------------------------ */
1118 /* Function: ipf_frag_nat_next */
1119 /* Returns: int - 0 == success, else error */
1120 /* Parameters: softc(I) - pointer to soft context main structure */
1121 /* token(I) - pointer to token information for this caller */
1122 /* itp(I) - pointer to generic iterator from caller */
1123 /* */
1124 /* This function is used to step through the fragment cache list used for */
1125 /* NAT. The hard work is done by the more generic ipf_frag_next. */
1126 /* ------------------------------------------------------------------------ */
1127 int
ipf_frag_nat_next(ipf_main_softc_t * softc,ipftoken_t * token,ipfgeniter_t * itp)1128 ipf_frag_nat_next(ipf_main_softc_t *softc, ipftoken_t *token,
1129 ipfgeniter_t *itp)
1130 {
1131 ipf_frag_softc_t *softf = softc->ipf_frag_soft;
1132
1133 #ifdef USE_MUTEXES
1134 return (ipf_frag_next(softc, token, itp, &softf->ipfr_natlist,
1135 &softf->ipfr_natfrag));
1136 #else
1137 return (ipf_frag_next(softc, token, itp, &softf->ipfr_natlist));
1138 #endif
1139 }
1140
1141 /* ------------------------------------------------------------------------ */
1142 /* Function: ipf_frag_next */
1143 /* Returns: int - 0 == success, else error */
1144 /* Parameters: softc(I) - pointer to soft context main structure */
1145 /* token(I) - pointer to token information for this caller */
1146 /* itp(I) - pointer to generic iterator from caller */
1147 /* top(I) - top of the fragment list */
1148 /* lock(I) - fragment cache lock */
1149 /* */
1150 /* This function is used to interate through the list of entries in the */
1151 /* fragment cache. It increases the reference count on the one currently */
1152 /* being returned so that the caller can come back and resume from it later.*/
1153 /* */
1154 /* This function is used for both the NAT fragment cache as well as the ipf */
1155 /* fragment cache - hence the reason for passing in top and lock. */
1156 /* ------------------------------------------------------------------------ */
1157 static int
ipf_frag_next(ipf_main_softc_t * softc,ipftoken_t * token,ipfgeniter_t * itp,ipfr_t ** top,ipfrwlock_t * lock)1158 ipf_frag_next(ipf_main_softc_t *softc, ipftoken_t *token, ipfgeniter_t *itp,
1159 ipfr_t **top
1160 #ifdef USE_MUTEXES
1161 , ipfrwlock_t *lock
1162 #endif
1163 )
1164 {
1165 ipfr_t *frag, *next, zero;
1166 int error = 0;
1167
1168 if (itp->igi_data == NULL) {
1169 IPFERROR(20001);
1170 return (EFAULT);
1171 }
1172
1173 if (itp->igi_nitems != 1) {
1174 IPFERROR(20003);
1175 return (EFAULT);
1176 }
1177
1178 frag = token->ipt_data;
1179
1180 READ_ENTER(lock);
1181
1182 if (frag == NULL)
1183 next = *top;
1184 else
1185 next = frag->ipfr_next;
1186
1187 if (next != NULL) {
1188 ATOMIC_INC(next->ipfr_ref);
1189 token->ipt_data = next;
1190 } else {
1191 bzero(&zero, sizeof(zero));
1192 next = &zero;
1193 token->ipt_data = NULL;
1194 }
1195 if (next->ipfr_next == NULL)
1196 ipf_token_mark_complete(token);
1197
1198 RWLOCK_EXIT(lock);
1199
1200 error = COPYOUT(next, itp->igi_data, sizeof(*next));
1201 if (error != 0)
1202 IPFERROR(20002);
1203
1204 if (frag != NULL) {
1205 #ifdef USE_MUTEXES
1206 ipf_frag_deref(softc, &frag, lock);
1207 #else
1208 ipf_frag_deref(softc, &frag);
1209 #endif
1210 }
1211 return (error);
1212 }
1213
1214
1215 /* ------------------------------------------------------------------------ */
1216 /* Function: ipf_frag_pkt_deref */
1217 /* Returns: Nil */
1218 /* Parameters: softc(I) - pointer to soft context main structure */
1219 /* data(I) - pointer to frag cache pointer */
1220 /* */
1221 /* This function is the external interface for dropping a reference to a */
1222 /* fragment cache entry used by filter rules. */
1223 /* ------------------------------------------------------------------------ */
1224 void
ipf_frag_pkt_deref(ipf_main_softc_t * softc,void * data)1225 ipf_frag_pkt_deref(ipf_main_softc_t *softc, void *data)
1226 {
1227 ipfr_t **frp = data;
1228
1229 #ifdef USE_MUTEXES
1230 ipf_frag_softc_t *softf = softc->ipf_frag_soft;
1231
1232 ipf_frag_deref(softc->ipf_frag_soft, frp, &softf->ipfr_frag);
1233 #else
1234 ipf_frag_deref(softc->ipf_frag_soft, frp);
1235 #endif
1236 }
1237
1238
1239 /* ------------------------------------------------------------------------ */
1240 /* Function: ipf_frag_nat_deref */
1241 /* Returns: Nil */
1242 /* Parameters: softc(I) - pointer to soft context main structure */
1243 /* data(I) - pointer to frag cache pointer */
1244 /* */
1245 /* This function is the external interface for dropping a reference to a */
1246 /* fragment cache entry used by NAT table entries. */
1247 /* ------------------------------------------------------------------------ */
1248 void
ipf_frag_nat_deref(ipf_main_softc_t * softc,void * data)1249 ipf_frag_nat_deref(ipf_main_softc_t *softc, void *data)
1250 {
1251 ipfr_t **frp = data;
1252
1253 #ifdef USE_MUTEXES
1254 ipf_frag_softc_t *softf = softc->ipf_frag_soft;
1255
1256 ipf_frag_deref(softc->ipf_frag_soft, frp, &softf->ipfr_natfrag);
1257 #else
1258 ipf_frag_deref(softc->ipf_frag_soft, frp);
1259 #endif
1260 }
1261
1262
1263 /* ------------------------------------------------------------------------ */
1264 /* Function: ipf_frag_deref */
1265 /* Returns: Nil */
1266 /* Parameters: frp(IO) - pointer to fragment structure to deference */
1267 /* lock(I) - lock associated with the fragment */
1268 /* */
1269 /* This function dereferences a fragment structure (ipfr_t). The pointer */
1270 /* passed in will always be reset back to NULL, even if the structure is */
1271 /* not freed, to enforce the notion that the caller is no longer entitled */
1272 /* to use the pointer it is dropping the reference to. */
1273 /* ------------------------------------------------------------------------ */
1274 static void
ipf_frag_deref(void * arg,ipfr_t ** frp,ipfrwlock_t * lock)1275 ipf_frag_deref(void *arg, ipfr_t **frp
1276 #ifdef USE_MUTEXES
1277 , ipfrwlock_t *lock
1278 #endif
1279 )
1280 {
1281 ipf_frag_softc_t *softf = arg;
1282 ipfr_t *fra;
1283
1284 fra = *frp;
1285 *frp = NULL;
1286
1287 WRITE_ENTER(lock);
1288 fra->ipfr_ref--;
1289 if (fra->ipfr_ref <= 0)
1290 ipf_frag_free(softf, fra);
1291 RWLOCK_EXIT(lock);
1292 }
1293