xref: /freebsd/sys/net/pfil.c (revision 06e8e46410776c1d2a28c89004cda266402a8e69)
1 /*	$FreeBSD$ */
2 /*	$NetBSD: pfil.c,v 1.20 2001/11/12 23:49:46 lukem Exp $	*/
3 
4 /*-
5  * Copyright (c) 1996 Matthew R. Green
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. The name of the author may not be used to endorse or promote products
17  *    derived from this software without specific prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
20  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
21  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
23  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
24  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
25  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
26  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  */
31 
32 #include <sys/param.h>
33 #include <sys/kernel.h>
34 #include <sys/errno.h>
35 #include <sys/lock.h>
36 #include <sys/malloc.h>
37 #include <sys/rmlock.h>
38 #include <sys/socket.h>
39 #include <sys/socketvar.h>
40 #include <sys/systm.h>
41 #include <sys/condvar.h>
42 #include <sys/lock.h>
43 #include <sys/mutex.h>
44 #include <sys/proc.h>
45 #include <sys/queue.h>
46 
47 #include <net/if.h>
48 #include <net/pfil.h>
49 
50 static struct mtx pfil_global_lock;
51 
52 MTX_SYSINIT(pfil_heads_lock, &pfil_global_lock, "pfil_head_list lock",
53   MTX_DEF);
54 
55 static struct packet_filter_hook *pfil_chain_get(int, struct pfil_head *);
56 static int pfil_chain_add(pfil_chain_t *, struct packet_filter_hook *, int);
57 static int pfil_chain_remove(pfil_chain_t *, pfil_func_t, void *);
58 
59 LIST_HEAD(pfilheadhead, pfil_head);
60 VNET_DEFINE(struct pfilheadhead, pfil_head_list);
61 #define	V_pfil_head_list	VNET(pfil_head_list)
62 VNET_DEFINE(struct rmlock, pfil_lock);
63 #define	V_pfil_lock	VNET(pfil_lock)
64 
65 /*
66  * pfil_run_hooks() runs the specified packet filter hook chain.
67  */
68 int
69 pfil_run_hooks(struct pfil_head *ph, struct mbuf **mp, struct ifnet *ifp,
70     int dir, struct inpcb *inp)
71 {
72 	struct rm_priotracker rmpt;
73 	struct packet_filter_hook *pfh;
74 	struct mbuf *m = *mp;
75 	int rv = 0;
76 
77 	PFIL_RLOCK(ph, &rmpt);
78 	KASSERT(ph->ph_nhooks >= 0, ("Pfil hook count dropped < 0"));
79 	for (pfh = pfil_chain_get(dir, ph); pfh != NULL;
80 	     pfh = TAILQ_NEXT(pfh, pfil_chain)) {
81 		if (pfh->pfil_func != NULL) {
82 			rv = (*pfh->pfil_func)(pfh->pfil_arg, &m, ifp, dir,
83 			    inp);
84 			if (rv != 0 || m == NULL)
85 				break;
86 		}
87 	}
88 	PFIL_RUNLOCK(ph, &rmpt);
89 	*mp = m;
90 	return (rv);
91 }
92 
93 static struct packet_filter_hook *
94 pfil_chain_get(int dir, struct pfil_head *ph)
95 {
96 
97 	if (dir == PFIL_IN)
98 		return (TAILQ_FIRST(&ph->ph_in));
99 	else if (dir == PFIL_OUT)
100 		return (TAILQ_FIRST(&ph->ph_out));
101 	else
102 		return (NULL);
103 }
104 
105 /*
106  * pfil_try_rlock() acquires rm reader lock for specified head
107  * if this is immediately possible.
108  */
109 int
110 pfil_try_rlock(struct pfil_head *ph, struct rm_priotracker *tracker)
111 {
112 
113 	return (PFIL_TRY_RLOCK(ph, tracker));
114 }
115 
116 /*
117  * pfil_rlock() acquires rm reader lock for specified head.
118  */
119 void
120 pfil_rlock(struct pfil_head *ph, struct rm_priotracker *tracker)
121 {
122 
123 	PFIL_RLOCK(ph, tracker);
124 }
125 
126 /*
127  * pfil_runlock() releases reader lock for specified head.
128  */
129 void
130 pfil_runlock(struct pfil_head *ph, struct rm_priotracker *tracker)
131 {
132 
133 	PFIL_RUNLOCK(ph, tracker);
134 }
135 
136 /*
137  * pfil_wlock() acquires writer lock for specified head.
138  */
139 void
140 pfil_wlock(struct pfil_head *ph)
141 {
142 
143 	PFIL_WLOCK(ph);
144 }
145 
146 /*
147  * pfil_wunlock() releases writer lock for specified head.
148  */
149 void
150 pfil_wunlock(struct pfil_head *ph)
151 {
152 
153 	PFIL_WUNLOCK(ph);
154 }
155 
156 /*
157  * pfil_wowned() returns a non-zero value if the current thread owns
158  * an exclusive lock.
159  */
160 int
161 pfil_wowned(struct pfil_head *ph)
162 {
163 
164 	return (PFIL_WOWNED(ph));
165 }
166 
167 /*
168  * pfil_head_register() registers a pfil_head with the packet filter hook
169  * mechanism.
170  */
171 int
172 pfil_head_register(struct pfil_head *ph)
173 {
174 	struct pfil_head *lph;
175 
176 	PFIL_HEADLIST_LOCK();
177 	LIST_FOREACH(lph, &V_pfil_head_list, ph_list) {
178 		if (ph->ph_type == lph->ph_type &&
179 		    ph->ph_un.phu_val == lph->ph_un.phu_val) {
180 			PFIL_HEADLIST_UNLOCK();
181 			return (EEXIST);
182 		}
183 	}
184 	PFIL_LOCK_INIT(ph);
185 	ph->ph_nhooks = 0;
186 	TAILQ_INIT(&ph->ph_in);
187 	TAILQ_INIT(&ph->ph_out);
188 	LIST_INSERT_HEAD(&V_pfil_head_list, ph, ph_list);
189 	PFIL_HEADLIST_UNLOCK();
190 	return (0);
191 }
192 
193 /*
194  * pfil_head_unregister() removes a pfil_head from the packet filter hook
195  * mechanism.  The producer of the hook promises that all outstanding
196  * invocations of the hook have completed before it unregisters the hook.
197  */
198 int
199 pfil_head_unregister(struct pfil_head *ph)
200 {
201 	struct packet_filter_hook *pfh, *pfnext;
202 
203 	PFIL_HEADLIST_LOCK();
204 	LIST_REMOVE(ph, ph_list);
205 	PFIL_HEADLIST_UNLOCK();
206 	TAILQ_FOREACH_SAFE(pfh, &ph->ph_in, pfil_chain, pfnext)
207 		free(pfh, M_IFADDR);
208 	TAILQ_FOREACH_SAFE(pfh, &ph->ph_out, pfil_chain, pfnext)
209 		free(pfh, M_IFADDR);
210 	PFIL_LOCK_DESTROY(ph);
211 	return (0);
212 }
213 
214 /*
215  * pfil_head_get() returns the pfil_head for a given key/dlt.
216  */
217 struct pfil_head *
218 pfil_head_get(int type, u_long val)
219 {
220 	struct pfil_head *ph;
221 
222 	PFIL_HEADLIST_LOCK();
223 	LIST_FOREACH(ph, &V_pfil_head_list, ph_list)
224 		if (ph->ph_type == type && ph->ph_un.phu_val == val)
225 			break;
226 	PFIL_HEADLIST_UNLOCK();
227 	return (ph);
228 }
229 
230 /*
231  * pfil_add_hook() adds a function to the packet filter hook.  the
232  * flags are:
233  *	PFIL_IN		call me on incoming packets
234  *	PFIL_OUT	call me on outgoing packets
235  *	PFIL_ALL	call me on all of the above
236  *	PFIL_WAITOK	OK to call malloc with M_WAITOK.
237  */
238 int
239 pfil_add_hook(pfil_func_t func, void *arg, int flags, struct pfil_head *ph)
240 {
241 	struct packet_filter_hook *pfh1 = NULL;
242 	struct packet_filter_hook *pfh2 = NULL;
243 	int err;
244 
245 	if (flags & PFIL_IN) {
246 		pfh1 = (struct packet_filter_hook *)malloc(sizeof(*pfh1),
247 		    M_IFADDR, (flags & PFIL_WAITOK) ? M_WAITOK : M_NOWAIT);
248 		if (pfh1 == NULL) {
249 			err = ENOMEM;
250 			goto error;
251 		}
252 	}
253 	if (flags & PFIL_OUT) {
254 		pfh2 = (struct packet_filter_hook *)malloc(sizeof(*pfh1),
255 		    M_IFADDR, (flags & PFIL_WAITOK) ? M_WAITOK : M_NOWAIT);
256 		if (pfh2 == NULL) {
257 			err = ENOMEM;
258 			goto error;
259 		}
260 	}
261 	PFIL_WLOCK(ph);
262 	if (flags & PFIL_IN) {
263 		pfh1->pfil_func = func;
264 		pfh1->pfil_arg = arg;
265 		err = pfil_chain_add(&ph->ph_in, pfh1, flags & ~PFIL_OUT);
266 		if (err)
267 			goto locked_error;
268 		ph->ph_nhooks++;
269 	}
270 	if (flags & PFIL_OUT) {
271 		pfh2->pfil_func = func;
272 		pfh2->pfil_arg = arg;
273 		err = pfil_chain_add(&ph->ph_out, pfh2, flags & ~PFIL_IN);
274 		if (err) {
275 			if (flags & PFIL_IN)
276 				pfil_chain_remove(&ph->ph_in, func, arg);
277 			goto locked_error;
278 		}
279 		ph->ph_nhooks++;
280 	}
281 	PFIL_WUNLOCK(ph);
282 	return (0);
283 locked_error:
284 	PFIL_WUNLOCK(ph);
285 error:
286 	if (pfh1 != NULL)
287 		free(pfh1, M_IFADDR);
288 	if (pfh2 != NULL)
289 		free(pfh2, M_IFADDR);
290 	return (err);
291 }
292 
293 /*
294  * pfil_remove_hook removes a specific function from the packet filter hook
295  * chain.
296  */
297 int
298 pfil_remove_hook(pfil_func_t func, void *arg, int flags, struct pfil_head *ph)
299 {
300 	int err = 0;
301 
302 	PFIL_WLOCK(ph);
303 	if (flags & PFIL_IN) {
304 		err = pfil_chain_remove(&ph->ph_in, func, arg);
305 		if (err == 0)
306 			ph->ph_nhooks--;
307 	}
308 	if ((err == 0) && (flags & PFIL_OUT)) {
309 		err = pfil_chain_remove(&ph->ph_out, func, arg);
310 		if (err == 0)
311 			ph->ph_nhooks--;
312 	}
313 	PFIL_WUNLOCK(ph);
314 	return (err);
315 }
316 
317 /*
318  * Internal: Add a new pfil hook into a hook chain.
319  */
320 static int
321 pfil_chain_add(pfil_chain_t *chain, struct packet_filter_hook *pfh1, int flags)
322 {
323 	struct packet_filter_hook *pfh;
324 
325 	/*
326 	 * First make sure the hook is not already there.
327 	 */
328 	TAILQ_FOREACH(pfh, chain, pfil_chain)
329 		if (pfh->pfil_func == pfh1->pfil_func &&
330 		    pfh->pfil_arg == pfh1->pfil_arg)
331 			return (EEXIST);
332 
333 	/*
334 	 * Insert the input list in reverse order of the output list so that
335 	 * the same path is followed in or out of the kernel.
336 	 */
337 	if (flags & PFIL_IN)
338 		TAILQ_INSERT_HEAD(chain, pfh1, pfil_chain);
339 	else
340 		TAILQ_INSERT_TAIL(chain, pfh1, pfil_chain);
341 	return (0);
342 }
343 
344 /*
345  * Internal: Remove a pfil hook from a hook chain.
346  */
347 static int
348 pfil_chain_remove(pfil_chain_t *chain, pfil_func_t func, void *arg)
349 {
350 	struct packet_filter_hook *pfh;
351 
352 	TAILQ_FOREACH(pfh, chain, pfil_chain)
353 		if (pfh->pfil_func == func && pfh->pfil_arg == arg) {
354 			TAILQ_REMOVE(chain, pfh, pfil_chain);
355 			free(pfh, M_IFADDR);
356 			return (0);
357 		}
358 	return (ENOENT);
359 }
360 
361 /*
362  * Stuff that must be initialized for every instance (including the first of
363  * course).
364  */
365 static int
366 vnet_pfil_init(const void *unused)
367 {
368 
369 	LIST_INIT(&V_pfil_head_list);
370 	PFIL_LOCK_INIT_REAL(&V_pfil_lock, "shared");
371 	return (0);
372 }
373 
374 /*
375  * Called for the removal of each instance.
376  */
377 static int
378 vnet_pfil_uninit(const void *unused)
379 {
380 
381 	KASSERT(LIST_EMPTY(&V_pfil_head_list),
382 	    ("%s: pfil_head_list %p not empty", __func__, &V_pfil_head_list));
383 	PFIL_LOCK_DESTROY_REAL(&V_pfil_lock);
384 	return (0);
385 }
386 
387 /* Define startup order. */
388 #define	PFIL_SYSINIT_ORDER	SI_SUB_PROTO_BEGIN
389 #define	PFIL_MODEVENT_ORDER	(SI_ORDER_FIRST) /* On boot slot in here. */
390 #define	PFIL_VNET_ORDER		(PFIL_MODEVENT_ORDER + 2) /* Later still. */
391 
392 /*
393  * Starting up.
394  *
395  * VNET_SYSINIT is called for each existing vnet and each new vnet.
396  */
397 VNET_SYSINIT(vnet_pfil_init, PFIL_SYSINIT_ORDER, PFIL_VNET_ORDER,
398     vnet_pfil_init, NULL);
399 
400 /*
401  * Closing up shop.  These are done in REVERSE ORDER.  Not called on reboot.
402  *
403  * VNET_SYSUNINIT is called for each exiting vnet as it exits.
404  */
405 VNET_SYSUNINIT(vnet_pfil_uninit, PFIL_SYSINIT_ORDER, PFIL_VNET_ORDER,
406     vnet_pfil_uninit, NULL);
407