xref: /linux/net/unix/garbage.c (revision 99b5aa3c10c7cff1e97239fda93649222fc12d25)
1 /*
2  * NET3:	Garbage Collector For AF_UNIX sockets
3  *
4  * Garbage Collector:
5  *	Copyright (C) Barak A. Pearlmutter.
6  *	Released under the GPL version 2 or later.
7  *
8  * Chopped about by Alan Cox 22/3/96 to make it fit the AF_UNIX socket problem.
9  * If it doesn't work blame me, it worked when Barak sent it.
10  *
11  * Assumptions:
12  *
13  *  - object w/ a bit
14  *  - free list
15  *
16  * Current optimizations:
17  *
18  *  - explicit stack instead of recursion
19  *  - tail recurse on first born instead of immediate push/pop
20  *  - we gather the stuff that should not be killed into tree
21  *    and stack is just a path from root to the current pointer.
22  *
23  *  Future optimizations:
24  *
25  *  - don't just push entire root set; process in place
26  *
27  *	This program is free software; you can redistribute it and/or
28  *	modify it under the terms of the GNU General Public License
29  *	as published by the Free Software Foundation; either version
30  *	2 of the License, or (at your option) any later version.
31  *
32  *  Fixes:
33  *	Alan Cox	07 Sept	1997	Vmalloc internal stack as needed.
34  *					Cope with changing max_files.
35  *	Al Viro		11 Oct 1998
36  *		Graph may have cycles. That is, we can send the descriptor
37  *		of foo to bar and vice versa. Current code chokes on that.
38  *		Fix: move SCM_RIGHTS ones into the separate list and then
39  *		skb_free() them all instead of doing explicit fput's.
40  *		Another problem: since fput() may block somebody may
41  *		create a new unix_socket when we are in the middle of sweep
42  *		phase. Fix: revert the logic wrt MARKED. Mark everything
43  *		upon the beginning and unmark non-junk ones.
44  *
45  *		[12 Oct 1998] AAARGH! New code purges all SCM_RIGHTS
46  *		sent to connect()'ed but still not accept()'ed sockets.
47  *		Fixed. Old code had slightly different problem here:
48  *		extra fput() in situation when we passed the descriptor via
49  *		such socket and closed it (descriptor). That would happen on
50  *		each unix_gc() until the accept(). Since the struct file in
51  *		question would go to the free list and might be reused...
52  *		That might be the reason of random oopses on filp_close()
53  *		in unrelated processes.
54  *
55  *	AV		28 Feb 1999
56  *		Kill the explicit allocation of stack. Now we keep the tree
57  *		with root in dummy + pointer (gc_current) to one of the nodes.
58  *		Stack is represented as path from gc_current to dummy. Unmark
59  *		now means "add to tree". Push == "make it a son of gc_current".
60  *		Pop == "move gc_current to parent". We keep only pointers to
61  *		parents (->gc_tree).
62  *	AV		1 Mar 1999
63  *		Damn. Added missing check for ->dead in listen queues scanning.
64  *
65  */
66 
67 #include <linux/kernel.h>
68 #include <linux/sched.h>
69 #include <linux/string.h>
70 #include <linux/socket.h>
71 #include <linux/un.h>
72 #include <linux/net.h>
73 #include <linux/fs.h>
74 #include <linux/slab.h>
75 #include <linux/skbuff.h>
76 #include <linux/netdevice.h>
77 #include <linux/file.h>
78 #include <linux/proc_fs.h>
79 #include <linux/mutex.h>
80 
81 #include <net/sock.h>
82 #include <net/af_unix.h>
83 #include <net/scm.h>
84 #include <net/tcp_states.h>
85 
86 /* Internal data structures and random procedures: */
87 
88 #define GC_HEAD		((struct sock *)(-1))
89 #define GC_ORPHAN	((struct sock *)(-3))
90 
91 static struct sock *gc_current = GC_HEAD; /* stack of objects to mark */
92 
93 atomic_t unix_tot_inflight = ATOMIC_INIT(0);
94 
95 
96 static struct sock *unix_get_socket(struct file *filp)
97 {
98 	struct sock *u_sock = NULL;
99 	struct inode *inode = filp->f_path.dentry->d_inode;
100 
101 	/*
102 	 *	Socket ?
103 	 */
104 	if (S_ISSOCK(inode->i_mode)) {
105 		struct socket * sock = SOCKET_I(inode);
106 		struct sock * s = sock->sk;
107 
108 		/*
109 		 *	PF_UNIX ?
110 		 */
111 		if (s && sock->ops && sock->ops->family == PF_UNIX)
112 			u_sock = s;
113 	}
114 	return u_sock;
115 }
116 
117 /*
118  *	Keep the number of times in flight count for the file
119  *	descriptor if it is for an AF_UNIX socket.
120  */
121 
122 void unix_inflight(struct file *fp)
123 {
124 	struct sock *s = unix_get_socket(fp);
125 	if(s) {
126 		atomic_inc(&unix_sk(s)->inflight);
127 		atomic_inc(&unix_tot_inflight);
128 	}
129 }
130 
131 void unix_notinflight(struct file *fp)
132 {
133 	struct sock *s = unix_get_socket(fp);
134 	if(s) {
135 		atomic_dec(&unix_sk(s)->inflight);
136 		atomic_dec(&unix_tot_inflight);
137 	}
138 }
139 
140 
141 /*
142  *	Garbage Collector Support Functions
143  */
144 
145 static inline struct sock *pop_stack(void)
146 {
147 	struct sock *p = gc_current;
148 	gc_current = unix_sk(p)->gc_tree;
149 	return p;
150 }
151 
152 static inline int empty_stack(void)
153 {
154 	return gc_current == GC_HEAD;
155 }
156 
157 static void maybe_unmark_and_push(struct sock *x)
158 {
159 	struct unix_sock *u = unix_sk(x);
160 
161 	if (u->gc_tree != GC_ORPHAN)
162 		return;
163 	sock_hold(x);
164 	u->gc_tree = gc_current;
165 	gc_current = x;
166 }
167 
168 
169 /* The external entry point: unix_gc() */
170 
171 void unix_gc(void)
172 {
173 	static DEFINE_MUTEX(unix_gc_sem);
174 	int i;
175 	struct sock *s;
176 	struct sk_buff_head hitlist;
177 	struct sk_buff *skb;
178 
179 	/*
180 	 *	Avoid a recursive GC.
181 	 */
182 
183 	if (!mutex_trylock(&unix_gc_sem))
184 		return;
185 
186 	spin_lock(&unix_table_lock);
187 
188 	forall_unix_sockets(i, s)
189 	{
190 		unix_sk(s)->gc_tree = GC_ORPHAN;
191 	}
192 	/*
193 	 *	Everything is now marked
194 	 */
195 
196 	/* Invariant to be maintained:
197 		- everything unmarked is either:
198 		-- (a) on the stack, or
199 		-- (b) has all of its children unmarked
200 		- everything on the stack is always unmarked
201 		- nothing is ever pushed onto the stack twice, because:
202 		-- nothing previously unmarked is ever pushed on the stack
203 	 */
204 
205 	/*
206 	 *	Push root set
207 	 */
208 
209 	forall_unix_sockets(i, s)
210 	{
211 		int open_count = 0;
212 
213 		/*
214 		 *	If all instances of the descriptor are not
215 		 *	in flight we are in use.
216 		 *
217 		 *	Special case: when socket s is embrion, it may be
218 		 *	hashed but still not in queue of listening socket.
219 		 *	In this case (see unix_create1()) we set artificial
220 		 *	negative inflight counter to close race window.
221 		 *	It is trick of course and dirty one.
222 		 */
223 		if (s->sk_socket && s->sk_socket->file)
224 			open_count = file_count(s->sk_socket->file);
225 		if (open_count > atomic_read(&unix_sk(s)->inflight))
226 			maybe_unmark_and_push(s);
227 	}
228 
229 	/*
230 	 *	Mark phase
231 	 */
232 
233 	while (!empty_stack())
234 	{
235 		struct sock *x = pop_stack();
236 		struct sock *sk;
237 
238 		spin_lock(&x->sk_receive_queue.lock);
239 		skb = skb_peek(&x->sk_receive_queue);
240 
241 		/*
242 		 *	Loop through all but first born
243 		 */
244 
245 		while (skb && skb != (struct sk_buff *)&x->sk_receive_queue) {
246 			/*
247 			 *	Do we have file descriptors ?
248 			 */
249 			if(UNIXCB(skb).fp)
250 			{
251 				/*
252 				 *	Process the descriptors of this socket
253 				 */
254 				int nfd=UNIXCB(skb).fp->count;
255 				struct file **fp = UNIXCB(skb).fp->fp;
256 				while(nfd--)
257 				{
258 					/*
259 					 *	Get the socket the fd matches if
260 					 *	it indeed does so
261 					 */
262 					if((sk=unix_get_socket(*fp++))!=NULL)
263 					{
264 						maybe_unmark_and_push(sk);
265 					}
266 				}
267 			}
268 			/* We have to scan not-yet-accepted ones too */
269 			if (x->sk_state == TCP_LISTEN)
270 				maybe_unmark_and_push(skb->sk);
271 			skb=skb->next;
272 		}
273 		spin_unlock(&x->sk_receive_queue.lock);
274 		sock_put(x);
275 	}
276 
277 	skb_queue_head_init(&hitlist);
278 
279 	forall_unix_sockets(i, s)
280 	{
281 		struct unix_sock *u = unix_sk(s);
282 
283 		if (u->gc_tree == GC_ORPHAN) {
284 			struct sk_buff *nextsk;
285 
286 			spin_lock(&s->sk_receive_queue.lock);
287 			skb = skb_peek(&s->sk_receive_queue);
288 			while (skb &&
289 			       skb != (struct sk_buff *)&s->sk_receive_queue) {
290 				nextsk = skb->next;
291 				/*
292 				 *	Do we have file descriptors ?
293 				 */
294 				if (UNIXCB(skb).fp) {
295 					__skb_unlink(skb,
296 						     &s->sk_receive_queue);
297 					__skb_queue_tail(&hitlist, skb);
298 				}
299 				skb = nextsk;
300 			}
301 			spin_unlock(&s->sk_receive_queue.lock);
302 		}
303 		u->gc_tree = GC_ORPHAN;
304 	}
305 	spin_unlock(&unix_table_lock);
306 
307 	/*
308 	 *	Here we are. Hitlist is filled. Die.
309 	 */
310 
311 	__skb_queue_purge(&hitlist);
312 	mutex_unlock(&unix_gc_sem);
313 }
314