xref: /linux/net/unix/garbage.c (revision fb72014d98afd51e85aab9c061344ef32d615606)
1 /*
2  * NET3:	Garbage Collector For AF_UNIX sockets
3  *
4  * Garbage Collector:
5  *	Copyright (C) Barak A. Pearlmutter.
6  *	Released under the GPL version 2 or later.
7  *
8  * Chopped about by Alan Cox 22/3/96 to make it fit the AF_UNIX socket problem.
9  * If it doesn't work blame me, it worked when Barak sent it.
10  *
11  * Assumptions:
12  *
13  *  - object w/ a bit
14  *  - free list
15  *
16  * Current optimizations:
17  *
18  *  - explicit stack instead of recursion
19  *  - tail recurse on first born instead of immediate push/pop
20  *  - we gather the stuff that should not be killed into tree
21  *    and stack is just a path from root to the current pointer.
22  *
23  *  Future optimizations:
24  *
25  *  - don't just push entire root set; process in place
26  *
27  *	This program is free software; you can redistribute it and/or
28  *	modify it under the terms of the GNU General Public License
29  *	as published by the Free Software Foundation; either version
30  *	2 of the License, or (at your option) any later version.
31  *
32  *  Fixes:
33  *	Alan Cox	07 Sept	1997	Vmalloc internal stack as needed.
34  *					Cope with changing max_files.
35  *	Al Viro		11 Oct 1998
36  *		Graph may have cycles. That is, we can send the descriptor
37  *		of foo to bar and vice versa. Current code chokes on that.
38  *		Fix: move SCM_RIGHTS ones into the separate list and then
39  *		skb_free() them all instead of doing explicit fput's.
40  *		Another problem: since fput() may block somebody may
41  *		create a new unix_socket when we are in the middle of sweep
42  *		phase. Fix: revert the logic wrt MARKED. Mark everything
43  *		upon the beginning and unmark non-junk ones.
44  *
45  *		[12 Oct 1998] AAARGH! New code purges all SCM_RIGHTS
46  *		sent to connect()'ed but still not accept()'ed sockets.
47  *		Fixed. Old code had slightly different problem here:
48  *		extra fput() in situation when we passed the descriptor via
49  *		such socket and closed it (descriptor). That would happen on
50  *		each unix_gc() until the accept(). Since the struct file in
51  *		question would go to the free list and might be reused...
52  *		That might be the reason of random oopses on filp_close()
53  *		in unrelated processes.
54  *
55  *	AV		28 Feb 1999
56  *		Kill the explicit allocation of stack. Now we keep the tree
57  *		with root in dummy + pointer (gc_current) to one of the nodes.
58  *		Stack is represented as path from gc_current to dummy. Unmark
59  *		now means "add to tree". Push == "make it a son of gc_current".
60  *		Pop == "move gc_current to parent". We keep only pointers to
61  *		parents (->gc_tree).
62  *	AV		1 Mar 1999
63  *		Damn. Added missing check for ->dead in listen queues scanning.
64  *
65  *	Miklos Szeredi 25 Jun 2007
66  *		Reimplement with a cycle collecting algorithm. This should
67  *		solve several problems with the previous code, like being racy
68  *		wrt receive and holding up unrelated socket operations.
69  */
70 
71 #include <linux/kernel.h>
72 #include <linux/string.h>
73 #include <linux/socket.h>
74 #include <linux/un.h>
75 #include <linux/net.h>
76 #include <linux/fs.h>
77 #include <linux/skbuff.h>
78 #include <linux/netdevice.h>
79 #include <linux/file.h>
80 #include <linux/proc_fs.h>
81 #include <linux/mutex.h>
82 #include <linux/wait.h>
83 
84 #include <net/sock.h>
85 #include <net/af_unix.h>
86 #include <net/scm.h>
87 #include <net/tcp_states.h>
88 
89 /* Internal data structures and random procedures: */
90 
91 static LIST_HEAD(gc_inflight_list);
92 static LIST_HEAD(gc_candidates);
93 static DEFINE_SPINLOCK(unix_gc_lock);
94 static DECLARE_WAIT_QUEUE_HEAD(unix_gc_wait);
95 
96 unsigned int unix_tot_inflight;
97 
98 
99 static struct sock *unix_get_socket(struct file *filp)
100 {
101 	struct sock *u_sock = NULL;
102 	struct inode *inode = filp->f_path.dentry->d_inode;
103 
104 	/*
105 	 *	Socket ?
106 	 */
107 	if (S_ISSOCK(inode->i_mode)) {
108 		struct socket *sock = SOCKET_I(inode);
109 		struct sock *s = sock->sk;
110 
111 		/*
112 		 *	PF_UNIX ?
113 		 */
114 		if (s && sock->ops && sock->ops->family == PF_UNIX)
115 			u_sock = s;
116 	}
117 	return u_sock;
118 }
119 
120 /*
121  *	Keep the number of times in flight count for the file
122  *	descriptor if it is for an AF_UNIX socket.
123  */
124 
125 void unix_inflight(struct file *fp)
126 {
127 	struct sock *s = unix_get_socket(fp);
128 	if (s) {
129 		struct unix_sock *u = unix_sk(s);
130 		spin_lock(&unix_gc_lock);
131 		if (atomic_long_inc_return(&u->inflight) == 1) {
132 			BUG_ON(!list_empty(&u->link));
133 			list_add_tail(&u->link, &gc_inflight_list);
134 		} else {
135 			BUG_ON(list_empty(&u->link));
136 		}
137 		unix_tot_inflight++;
138 		spin_unlock(&unix_gc_lock);
139 	}
140 }
141 
142 void unix_notinflight(struct file *fp)
143 {
144 	struct sock *s = unix_get_socket(fp);
145 	if (s) {
146 		struct unix_sock *u = unix_sk(s);
147 		spin_lock(&unix_gc_lock);
148 		BUG_ON(list_empty(&u->link));
149 		if (atomic_long_dec_and_test(&u->inflight))
150 			list_del_init(&u->link);
151 		unix_tot_inflight--;
152 		spin_unlock(&unix_gc_lock);
153 	}
154 }
155 
156 static inline struct sk_buff *sock_queue_head(struct sock *sk)
157 {
158 	return (struct sk_buff *)&sk->sk_receive_queue;
159 }
160 
161 #define receive_queue_for_each_skb(sk, next, skb) \
162 	for (skb = sock_queue_head(sk)->next, next = skb->next; \
163 	     skb != sock_queue_head(sk); skb = next, next = skb->next)
164 
165 static void scan_inflight(struct sock *x, void (*func)(struct unix_sock *),
166 			  struct sk_buff_head *hitlist)
167 {
168 	struct sk_buff *skb;
169 	struct sk_buff *next;
170 
171 	spin_lock(&x->sk_receive_queue.lock);
172 	receive_queue_for_each_skb(x, next, skb) {
173 		/*
174 		 *	Do we have file descriptors ?
175 		 */
176 		if (UNIXCB(skb).fp) {
177 			bool hit = false;
178 			/*
179 			 *	Process the descriptors of this socket
180 			 */
181 			int nfd = UNIXCB(skb).fp->count;
182 			struct file **fp = UNIXCB(skb).fp->fp;
183 			while (nfd--) {
184 				/*
185 				 *	Get the socket the fd matches
186 				 *	if it indeed does so
187 				 */
188 				struct sock *sk = unix_get_socket(*fp++);
189 				if (sk) {
190 					struct unix_sock *u = unix_sk(sk);
191 
192 					/*
193 					 * Ignore non-candidates, they could
194 					 * have been added to the queues after
195 					 * starting the garbage collection
196 					 */
197 					if (u->gc_candidate) {
198 						hit = true;
199 						func(u);
200 					}
201 				}
202 			}
203 			if (hit && hitlist != NULL) {
204 				__skb_unlink(skb, &x->sk_receive_queue);
205 				__skb_queue_tail(hitlist, skb);
206 			}
207 		}
208 	}
209 	spin_unlock(&x->sk_receive_queue.lock);
210 }
211 
212 static void scan_children(struct sock *x, void (*func)(struct unix_sock *),
213 			  struct sk_buff_head *hitlist)
214 {
215 	if (x->sk_state != TCP_LISTEN)
216 		scan_inflight(x, func, hitlist);
217 	else {
218 		struct sk_buff *skb;
219 		struct sk_buff *next;
220 		struct unix_sock *u;
221 		LIST_HEAD(embryos);
222 
223 		/*
224 		 * For a listening socket collect the queued embryos
225 		 * and perform a scan on them as well.
226 		 */
227 		spin_lock(&x->sk_receive_queue.lock);
228 		receive_queue_for_each_skb(x, next, skb) {
229 			u = unix_sk(skb->sk);
230 
231 			/*
232 			 * An embryo cannot be in-flight, so it's safe
233 			 * to use the list link.
234 			 */
235 			BUG_ON(!list_empty(&u->link));
236 			list_add_tail(&u->link, &embryos);
237 		}
238 		spin_unlock(&x->sk_receive_queue.lock);
239 
240 		while (!list_empty(&embryos)) {
241 			u = list_entry(embryos.next, struct unix_sock, link);
242 			scan_inflight(&u->sk, func, hitlist);
243 			list_del_init(&u->link);
244 		}
245 	}
246 }
247 
248 static void dec_inflight(struct unix_sock *usk)
249 {
250 	atomic_long_dec(&usk->inflight);
251 }
252 
253 static void inc_inflight(struct unix_sock *usk)
254 {
255 	atomic_long_inc(&usk->inflight);
256 }
257 
258 static void inc_inflight_move_tail(struct unix_sock *u)
259 {
260 	atomic_long_inc(&u->inflight);
261 	/*
262 	 * If this still might be part of a cycle, move it to the end
263 	 * of the list, so that it's checked even if it was already
264 	 * passed over
265 	 */
266 	if (u->gc_maybe_cycle)
267 		list_move_tail(&u->link, &gc_candidates);
268 }
269 
270 static bool gc_in_progress = false;
271 
272 void wait_for_unix_gc(void)
273 {
274 	wait_event(unix_gc_wait, gc_in_progress == false);
275 }
276 
277 /* The external entry point: unix_gc() */
278 void unix_gc(void)
279 {
280 	struct unix_sock *u;
281 	struct unix_sock *next;
282 	struct sk_buff_head hitlist;
283 	struct list_head cursor;
284 	LIST_HEAD(not_cycle_list);
285 
286 	spin_lock(&unix_gc_lock);
287 
288 	/* Avoid a recursive GC. */
289 	if (gc_in_progress)
290 		goto out;
291 
292 	gc_in_progress = true;
293 	/*
294 	 * First, select candidates for garbage collection.  Only
295 	 * in-flight sockets are considered, and from those only ones
296 	 * which don't have any external reference.
297 	 *
298 	 * Holding unix_gc_lock will protect these candidates from
299 	 * being detached, and hence from gaining an external
300 	 * reference.  Since there are no possible receivers, all
301 	 * buffers currently on the candidates' queues stay there
302 	 * during the garbage collection.
303 	 *
304 	 * We also know that no new candidate can be added onto the
305 	 * receive queues.  Other, non candidate sockets _can_ be
306 	 * added to queue, so we must make sure only to touch
307 	 * candidates.
308 	 */
309 	list_for_each_entry_safe(u, next, &gc_inflight_list, link) {
310 		long total_refs;
311 		long inflight_refs;
312 
313 		total_refs = file_count(u->sk.sk_socket->file);
314 		inflight_refs = atomic_long_read(&u->inflight);
315 
316 		BUG_ON(inflight_refs < 1);
317 		BUG_ON(total_refs < inflight_refs);
318 		if (total_refs == inflight_refs) {
319 			list_move_tail(&u->link, &gc_candidates);
320 			u->gc_candidate = 1;
321 			u->gc_maybe_cycle = 1;
322 		}
323 	}
324 
325 	/*
326 	 * Now remove all internal in-flight reference to children of
327 	 * the candidates.
328 	 */
329 	list_for_each_entry(u, &gc_candidates, link)
330 		scan_children(&u->sk, dec_inflight, NULL);
331 
332 	/*
333 	 * Restore the references for children of all candidates,
334 	 * which have remaining references.  Do this recursively, so
335 	 * only those remain, which form cyclic references.
336 	 *
337 	 * Use a "cursor" link, to make the list traversal safe, even
338 	 * though elements might be moved about.
339 	 */
340 	list_add(&cursor, &gc_candidates);
341 	while (cursor.next != &gc_candidates) {
342 		u = list_entry(cursor.next, struct unix_sock, link);
343 
344 		/* Move cursor to after the current position. */
345 		list_move(&cursor, &u->link);
346 
347 		if (atomic_long_read(&u->inflight) > 0) {
348 			list_move_tail(&u->link, &not_cycle_list);
349 			u->gc_maybe_cycle = 0;
350 			scan_children(&u->sk, inc_inflight_move_tail, NULL);
351 		}
352 	}
353 	list_del(&cursor);
354 
355 	/*
356 	 * not_cycle_list contains those sockets which do not make up a
357 	 * cycle.  Restore these to the inflight list.
358 	 */
359 	while (!list_empty(&not_cycle_list)) {
360 		u = list_entry(not_cycle_list.next, struct unix_sock, link);
361 		u->gc_candidate = 0;
362 		list_move_tail(&u->link, &gc_inflight_list);
363 	}
364 
365 	/*
366 	 * Now gc_candidates contains only garbage.  Restore original
367 	 * inflight counters for these as well, and remove the skbuffs
368 	 * which are creating the cycle(s).
369 	 */
370 	skb_queue_head_init(&hitlist);
371 	list_for_each_entry(u, &gc_candidates, link)
372 	scan_children(&u->sk, inc_inflight, &hitlist);
373 
374 	spin_unlock(&unix_gc_lock);
375 
376 	/* Here we are. Hitlist is filled. Die. */
377 	__skb_queue_purge(&hitlist);
378 
379 	spin_lock(&unix_gc_lock);
380 
381 	/* All candidates should have been detached by now. */
382 	BUG_ON(!list_empty(&gc_candidates));
383 	gc_in_progress = false;
384 	wake_up(&unix_gc_wait);
385 
386  out:
387 	spin_unlock(&unix_gc_lock);
388 }
389