xref: /linux/net/unix/garbage.c (revision 7df9c24625b9981779afb8fcdbe2bb4765e61147)
11da177e4SLinus Torvalds /*
21da177e4SLinus Torvalds  * NET3:	Garbage Collector For AF_UNIX sockets
31da177e4SLinus Torvalds  *
41da177e4SLinus Torvalds  * Garbage Collector:
51da177e4SLinus Torvalds  *	Copyright (C) Barak A. Pearlmutter.
61da177e4SLinus Torvalds  *	Released under the GPL version 2 or later.
71da177e4SLinus Torvalds  *
81da177e4SLinus Torvalds  * Chopped about by Alan Cox 22/3/96 to make it fit the AF_UNIX socket problem.
91da177e4SLinus Torvalds  * If it doesn't work blame me, it worked when Barak sent it.
101da177e4SLinus Torvalds  *
111da177e4SLinus Torvalds  * Assumptions:
121da177e4SLinus Torvalds  *
131da177e4SLinus Torvalds  *  - object w/ a bit
141da177e4SLinus Torvalds  *  - free list
151da177e4SLinus Torvalds  *
161da177e4SLinus Torvalds  * Current optimizations:
171da177e4SLinus Torvalds  *
181da177e4SLinus Torvalds  *  - explicit stack instead of recursion
191da177e4SLinus Torvalds  *  - tail recurse on first born instead of immediate push/pop
201da177e4SLinus Torvalds  *  - we gather the stuff that should not be killed into tree
211da177e4SLinus Torvalds  *    and stack is just a path from root to the current pointer.
221da177e4SLinus Torvalds  *
231da177e4SLinus Torvalds  *  Future optimizations:
241da177e4SLinus Torvalds  *
251da177e4SLinus Torvalds  *  - don't just push entire root set; process in place
261da177e4SLinus Torvalds  *
271da177e4SLinus Torvalds  *	This program is free software; you can redistribute it and/or
281da177e4SLinus Torvalds  *	modify it under the terms of the GNU General Public License
291da177e4SLinus Torvalds  *	as published by the Free Software Foundation; either version
301da177e4SLinus Torvalds  *	2 of the License, or (at your option) any later version.
311da177e4SLinus Torvalds  *
321da177e4SLinus Torvalds  *  Fixes:
331da177e4SLinus Torvalds  *	Alan Cox	07 Sept	1997	Vmalloc internal stack as needed.
341da177e4SLinus Torvalds  *					Cope with changing max_files.
351da177e4SLinus Torvalds  *	Al Viro		11 Oct 1998
361da177e4SLinus Torvalds  *		Graph may have cycles. That is, we can send the descriptor
371da177e4SLinus Torvalds  *		of foo to bar and vice versa. Current code chokes on that.
381da177e4SLinus Torvalds  *		Fix: move SCM_RIGHTS ones into the separate list and then
391da177e4SLinus Torvalds  *		skb_free() them all instead of doing explicit fput's.
401da177e4SLinus Torvalds  *		Another problem: since fput() may block somebody may
411da177e4SLinus Torvalds  *		create a new unix_socket when we are in the middle of sweep
421da177e4SLinus Torvalds  *		phase. Fix: revert the logic wrt MARKED. Mark everything
431da177e4SLinus Torvalds  *		upon the beginning and unmark non-junk ones.
441da177e4SLinus Torvalds  *
451da177e4SLinus Torvalds  *		[12 Oct 1998] AAARGH! New code purges all SCM_RIGHTS
461da177e4SLinus Torvalds  *		sent to connect()'ed but still not accept()'ed sockets.
471da177e4SLinus Torvalds  *		Fixed. Old code had slightly different problem here:
481da177e4SLinus Torvalds  *		extra fput() in situation when we passed the descriptor via
491da177e4SLinus Torvalds  *		such socket and closed it (descriptor). That would happen on
501da177e4SLinus Torvalds  *		each unix_gc() until the accept(). Since the struct file in
511da177e4SLinus Torvalds  *		question would go to the free list and might be reused...
521da177e4SLinus Torvalds  *		That might be the reason of random oopses on filp_close()
531da177e4SLinus Torvalds  *		in unrelated processes.
541da177e4SLinus Torvalds  *
551da177e4SLinus Torvalds  *	AV		28 Feb 1999
561da177e4SLinus Torvalds  *		Kill the explicit allocation of stack. Now we keep the tree
571da177e4SLinus Torvalds  *		with root in dummy + pointer (gc_current) to one of the nodes.
581da177e4SLinus Torvalds  *		Stack is represented as path from gc_current to dummy. Unmark
591da177e4SLinus Torvalds  *		now means "add to tree". Push == "make it a son of gc_current".
601da177e4SLinus Torvalds  *		Pop == "move gc_current to parent". We keep only pointers to
611da177e4SLinus Torvalds  *		parents (->gc_tree).
621da177e4SLinus Torvalds  *	AV		1 Mar 1999
631da177e4SLinus Torvalds  *		Damn. Added missing check for ->dead in listen queues scanning.
641da177e4SLinus Torvalds  *
651fd05ba5SMiklos Szeredi  *	Miklos Szeredi 25 Jun 2007
661fd05ba5SMiklos Szeredi  *		Reimplement with a cycle collecting algorithm. This should
671fd05ba5SMiklos Szeredi  *		solve several problems with the previous code, like being racy
681fd05ba5SMiklos Szeredi  *		wrt receive and holding up unrelated socket operations.
691da177e4SLinus Torvalds  */
701da177e4SLinus Torvalds 
711da177e4SLinus Torvalds #include <linux/kernel.h>
721da177e4SLinus Torvalds #include <linux/string.h>
731da177e4SLinus Torvalds #include <linux/socket.h>
741da177e4SLinus Torvalds #include <linux/un.h>
751da177e4SLinus Torvalds #include <linux/net.h>
761da177e4SLinus Torvalds #include <linux/fs.h>
771da177e4SLinus Torvalds #include <linux/skbuff.h>
781da177e4SLinus Torvalds #include <linux/netdevice.h>
791da177e4SLinus Torvalds #include <linux/file.h>
801da177e4SLinus Torvalds #include <linux/proc_fs.h>
814a3e2f71SArjan van de Ven #include <linux/mutex.h>
825f23b734Sdann frazier #include <linux/wait.h>
831da177e4SLinus Torvalds 
841da177e4SLinus Torvalds #include <net/sock.h>
851da177e4SLinus Torvalds #include <net/af_unix.h>
861da177e4SLinus Torvalds #include <net/scm.h>
87c752f073SArnaldo Carvalho de Melo #include <net/tcp_states.h>
881da177e4SLinus Torvalds 
891da177e4SLinus Torvalds /* Internal data structures and random procedures: */
901da177e4SLinus Torvalds 
911fd05ba5SMiklos Szeredi static LIST_HEAD(gc_inflight_list);
921fd05ba5SMiklos Szeredi static LIST_HEAD(gc_candidates);
931fd05ba5SMiklos Szeredi static DEFINE_SPINLOCK(unix_gc_lock);
945f23b734Sdann frazier static DECLARE_WAIT_QUEUE_HEAD(unix_gc_wait);
951da177e4SLinus Torvalds 
969305cfa4SPavel Emelyanov unsigned int unix_tot_inflight;
971da177e4SLinus Torvalds 
9825888e30SEric Dumazet struct sock *unix_get_socket(struct file *filp)
991da177e4SLinus Torvalds {
1001da177e4SLinus Torvalds 	struct sock *u_sock = NULL;
101496ad9aaSAl Viro 	struct inode *inode = file_inode(filp);
1021da177e4SLinus Torvalds 
103d1ab39f1SJason Eastman 	/* Socket ? */
104326be7b4SAl Viro 	if (S_ISSOCK(inode->i_mode) && !(filp->f_mode & FMODE_PATH)) {
1051da177e4SLinus Torvalds 		struct socket *sock = SOCKET_I(inode);
1061da177e4SLinus Torvalds 		struct sock *s = sock->sk;
1071da177e4SLinus Torvalds 
108d1ab39f1SJason Eastman 		/* PF_UNIX ? */
1091da177e4SLinus Torvalds 		if (s && sock->ops && sock->ops->family == PF_UNIX)
1101da177e4SLinus Torvalds 			u_sock = s;
1111da177e4SLinus Torvalds 	}
1121da177e4SLinus Torvalds 	return u_sock;
1131da177e4SLinus Torvalds }
1141da177e4SLinus Torvalds 
115d1ab39f1SJason Eastman /* Keep the number of times in flight count for the file
1161da177e4SLinus Torvalds  * descriptor if it is for an AF_UNIX socket.
1171da177e4SLinus Torvalds  */
1181da177e4SLinus Torvalds 
119415e3d3eSHannes Frederic Sowa void unix_inflight(struct user_struct *user, struct file *fp)
1201da177e4SLinus Torvalds {
1211da177e4SLinus Torvalds 	struct sock *s = unix_get_socket(fp);
122d1ab39f1SJason Eastman 
123712f4aadSwilly tarreau 	spin_lock(&unix_gc_lock);
124712f4aadSwilly tarreau 
1251da177e4SLinus Torvalds 	if (s) {
1261fd05ba5SMiklos Szeredi 		struct unix_sock *u = unix_sk(s);
127d1ab39f1SJason Eastman 
128516e0cc5SAl Viro 		if (atomic_long_inc_return(&u->inflight) == 1) {
1291fd05ba5SMiklos Szeredi 			BUG_ON(!list_empty(&u->link));
1301fd05ba5SMiklos Szeredi 			list_add_tail(&u->link, &gc_inflight_list);
1311fd05ba5SMiklos Szeredi 		} else {
1321fd05ba5SMiklos Szeredi 			BUG_ON(list_empty(&u->link));
1331fd05ba5SMiklos Szeredi 		}
1349305cfa4SPavel Emelyanov 		unix_tot_inflight++;
1351da177e4SLinus Torvalds 	}
136415e3d3eSHannes Frederic Sowa 	user->unix_inflight++;
137712f4aadSwilly tarreau 	spin_unlock(&unix_gc_lock);
1381da177e4SLinus Torvalds }
1391da177e4SLinus Torvalds 
140415e3d3eSHannes Frederic Sowa void unix_notinflight(struct user_struct *user, struct file *fp)
1411da177e4SLinus Torvalds {
1421da177e4SLinus Torvalds 	struct sock *s = unix_get_socket(fp);
143d1ab39f1SJason Eastman 
144712f4aadSwilly tarreau 	spin_lock(&unix_gc_lock);
145712f4aadSwilly tarreau 
1461da177e4SLinus Torvalds 	if (s) {
1471fd05ba5SMiklos Szeredi 		struct unix_sock *u = unix_sk(s);
148d1ab39f1SJason Eastman 
149*7df9c246SAndrey Ulanov 		BUG_ON(!atomic_long_read(&u->inflight));
1501fd05ba5SMiklos Szeredi 		BUG_ON(list_empty(&u->link));
151d1ab39f1SJason Eastman 
152516e0cc5SAl Viro 		if (atomic_long_dec_and_test(&u->inflight))
1531fd05ba5SMiklos Szeredi 			list_del_init(&u->link);
1549305cfa4SPavel Emelyanov 		unix_tot_inflight--;
1551da177e4SLinus Torvalds 	}
156415e3d3eSHannes Frederic Sowa 	user->unix_inflight--;
157712f4aadSwilly tarreau 	spin_unlock(&unix_gc_lock);
1581da177e4SLinus Torvalds }
1591da177e4SLinus Torvalds 
1605c80f1aeSPavel Emelyanov static void scan_inflight(struct sock *x, void (*func)(struct unix_sock *),
1611fd05ba5SMiklos Szeredi 			  struct sk_buff_head *hitlist)
1621da177e4SLinus Torvalds {
1631da177e4SLinus Torvalds 	struct sk_buff *skb;
1641fd05ba5SMiklos Szeredi 	struct sk_buff *next;
1651da177e4SLinus Torvalds 
1661da177e4SLinus Torvalds 	spin_lock(&x->sk_receive_queue.lock);
167a2f3be17SIlpo Järvinen 	skb_queue_walk_safe(&x->sk_receive_queue, skb, next) {
168d1ab39f1SJason Eastman 		/* Do we have file descriptors ? */
1691fd05ba5SMiklos Szeredi 		if (UNIXCB(skb).fp) {
1701fd05ba5SMiklos Szeredi 			bool hit = false;
171d1ab39f1SJason Eastman 			/* Process the descriptors of this socket */
1721da177e4SLinus Torvalds 			int nfd = UNIXCB(skb).fp->count;
1731da177e4SLinus Torvalds 			struct file **fp = UNIXCB(skb).fp->fp;
174d1ab39f1SJason Eastman 
1751fd05ba5SMiklos Szeredi 			while (nfd--) {
176d1ab39f1SJason Eastman 				/* Get the socket the fd matches if it indeed does so */
1771fd05ba5SMiklos Szeredi 				struct sock *sk = unix_get_socket(*fp++);
178d1ab39f1SJason Eastman 
1791fd05ba5SMiklos Szeredi 				if (sk) {
1806209344fSMiklos Szeredi 					struct unix_sock *u = unix_sk(sk);
1816209344fSMiklos Szeredi 
182d1ab39f1SJason Eastman 					/* Ignore non-candidates, they could
1836209344fSMiklos Szeredi 					 * have been added to the queues after
1846209344fSMiklos Szeredi 					 * starting the garbage collection
1856209344fSMiklos Szeredi 					 */
18660bc851aSEric Dumazet 					if (test_bit(UNIX_GC_CANDIDATE, &u->gc_flags)) {
1871fd05ba5SMiklos Szeredi 						hit = true;
188d1ab39f1SJason Eastman 
1896209344fSMiklos Szeredi 						func(u);
1906209344fSMiklos Szeredi 					}
1911da177e4SLinus Torvalds 				}
1921da177e4SLinus Torvalds 			}
1931fd05ba5SMiklos Szeredi 			if (hit && hitlist != NULL) {
1941fd05ba5SMiklos Szeredi 				__skb_unlink(skb, &x->sk_receive_queue);
1951fd05ba5SMiklos Szeredi 				__skb_queue_tail(hitlist, skb);
1961da177e4SLinus Torvalds 			}
1971fd05ba5SMiklos Szeredi 		}
1981da177e4SLinus Torvalds 	}
1991da177e4SLinus Torvalds 	spin_unlock(&x->sk_receive_queue.lock);
2001da177e4SLinus Torvalds }
2011da177e4SLinus Torvalds 
2025c80f1aeSPavel Emelyanov static void scan_children(struct sock *x, void (*func)(struct unix_sock *),
2031fd05ba5SMiklos Szeredi 			  struct sk_buff_head *hitlist)
2041da177e4SLinus Torvalds {
205d1ab39f1SJason Eastman 	if (x->sk_state != TCP_LISTEN) {
2061fd05ba5SMiklos Szeredi 		scan_inflight(x, func, hitlist);
207d1ab39f1SJason Eastman 	} else {
2081fd05ba5SMiklos Szeredi 		struct sk_buff *skb;
2091fd05ba5SMiklos Szeredi 		struct sk_buff *next;
2101fd05ba5SMiklos Szeredi 		struct unix_sock *u;
2111fd05ba5SMiklos Szeredi 		LIST_HEAD(embryos);
2121da177e4SLinus Torvalds 
213d1ab39f1SJason Eastman 		/* For a listening socket collect the queued embryos
2141fd05ba5SMiklos Szeredi 		 * and perform a scan on them as well.
2151da177e4SLinus Torvalds 		 */
2161fd05ba5SMiklos Szeredi 		spin_lock(&x->sk_receive_queue.lock);
217a2f3be17SIlpo Järvinen 		skb_queue_walk_safe(&x->sk_receive_queue, skb, next) {
2181fd05ba5SMiklos Szeredi 			u = unix_sk(skb->sk);
2191da177e4SLinus Torvalds 
220d1ab39f1SJason Eastman 			/* An embryo cannot be in-flight, so it's safe
2211fd05ba5SMiklos Szeredi 			 * to use the list link.
2221fd05ba5SMiklos Szeredi 			 */
2231fd05ba5SMiklos Szeredi 			BUG_ON(!list_empty(&u->link));
2241fd05ba5SMiklos Szeredi 			list_add_tail(&u->link, &embryos);
2251fd05ba5SMiklos Szeredi 		}
2261fd05ba5SMiklos Szeredi 		spin_unlock(&x->sk_receive_queue.lock);
2271fd05ba5SMiklos Szeredi 
2281fd05ba5SMiklos Szeredi 		while (!list_empty(&embryos)) {
2291fd05ba5SMiklos Szeredi 			u = list_entry(embryos.next, struct unix_sock, link);
2301fd05ba5SMiklos Szeredi 			scan_inflight(&u->sk, func, hitlist);
2311fd05ba5SMiklos Szeredi 			list_del_init(&u->link);
2321fd05ba5SMiklos Szeredi 		}
2331fd05ba5SMiklos Szeredi 	}
2341fd05ba5SMiklos Szeredi }
2351fd05ba5SMiklos Szeredi 
2365c80f1aeSPavel Emelyanov static void dec_inflight(struct unix_sock *usk)
2371fd05ba5SMiklos Szeredi {
238516e0cc5SAl Viro 	atomic_long_dec(&usk->inflight);
2391fd05ba5SMiklos Szeredi }
2401fd05ba5SMiklos Szeredi 
2415c80f1aeSPavel Emelyanov static void inc_inflight(struct unix_sock *usk)
2421fd05ba5SMiklos Szeredi {
243516e0cc5SAl Viro 	atomic_long_inc(&usk->inflight);
2441fd05ba5SMiklos Szeredi }
2451fd05ba5SMiklos Szeredi 
2465c80f1aeSPavel Emelyanov static void inc_inflight_move_tail(struct unix_sock *u)
2471fd05ba5SMiklos Szeredi {
248516e0cc5SAl Viro 	atomic_long_inc(&u->inflight);
249d1ab39f1SJason Eastman 	/* If this still might be part of a cycle, move it to the end
2506209344fSMiklos Szeredi 	 * of the list, so that it's checked even if it was already
2516209344fSMiklos Szeredi 	 * passed over
2521fd05ba5SMiklos Szeredi 	 */
25360bc851aSEric Dumazet 	if (test_bit(UNIX_GC_MAYBE_CYCLE, &u->gc_flags))
2541fd05ba5SMiklos Szeredi 		list_move_tail(&u->link, &gc_candidates);
2551fd05ba5SMiklos Szeredi }
2561fd05ba5SMiklos Szeredi 
257505e907dSFabian Frederick static bool gc_in_progress;
2589915672dSEric Dumazet #define UNIX_INFLIGHT_TRIGGER_GC 16000
2591fd05ba5SMiklos Szeredi 
2605f23b734Sdann frazier void wait_for_unix_gc(void)
2615f23b734Sdann frazier {
262d1ab39f1SJason Eastman 	/* If number of inflight sockets is insane,
2639915672dSEric Dumazet 	 * force a garbage collect right now.
2649915672dSEric Dumazet 	 */
2659915672dSEric Dumazet 	if (unix_tot_inflight > UNIX_INFLIGHT_TRIGGER_GC && !gc_in_progress)
2669915672dSEric Dumazet 		unix_gc();
2675f23b734Sdann frazier 	wait_event(unix_gc_wait, gc_in_progress == false);
2685f23b734Sdann frazier }
2695f23b734Sdann frazier 
2705f23b734Sdann frazier /* The external entry point: unix_gc() */
2715f23b734Sdann frazier void unix_gc(void)
2725f23b734Sdann frazier {
2731fd05ba5SMiklos Szeredi 	struct unix_sock *u;
2741fd05ba5SMiklos Szeredi 	struct unix_sock *next;
2751fd05ba5SMiklos Szeredi 	struct sk_buff_head hitlist;
2761fd05ba5SMiklos Szeredi 	struct list_head cursor;
2776209344fSMiklos Szeredi 	LIST_HEAD(not_cycle_list);
2781fd05ba5SMiklos Szeredi 
2791fd05ba5SMiklos Szeredi 	spin_lock(&unix_gc_lock);
2801fd05ba5SMiklos Szeredi 
2811fd05ba5SMiklos Szeredi 	/* Avoid a recursive GC. */
2821fd05ba5SMiklos Szeredi 	if (gc_in_progress)
2831fd05ba5SMiklos Szeredi 		goto out;
2841fd05ba5SMiklos Szeredi 
2851fd05ba5SMiklos Szeredi 	gc_in_progress = true;
286d1ab39f1SJason Eastman 	/* First, select candidates for garbage collection.  Only
2871fd05ba5SMiklos Szeredi 	 * in-flight sockets are considered, and from those only ones
2881fd05ba5SMiklos Szeredi 	 * which don't have any external reference.
2891fd05ba5SMiklos Szeredi 	 *
2901fd05ba5SMiklos Szeredi 	 * Holding unix_gc_lock will protect these candidates from
2911fd05ba5SMiklos Szeredi 	 * being detached, and hence from gaining an external
2926209344fSMiklos Szeredi 	 * reference.  Since there are no possible receivers, all
2936209344fSMiklos Szeredi 	 * buffers currently on the candidates' queues stay there
2946209344fSMiklos Szeredi 	 * during the garbage collection.
2956209344fSMiklos Szeredi 	 *
2966209344fSMiklos Szeredi 	 * We also know that no new candidate can be added onto the
2976209344fSMiklos Szeredi 	 * receive queues.  Other, non candidate sockets _can_ be
2986209344fSMiklos Szeredi 	 * added to queue, so we must make sure only to touch
2996209344fSMiklos Szeredi 	 * candidates.
3001fd05ba5SMiklos Szeredi 	 */
3011fd05ba5SMiklos Szeredi 	list_for_each_entry_safe(u, next, &gc_inflight_list, link) {
302516e0cc5SAl Viro 		long total_refs;
303516e0cc5SAl Viro 		long inflight_refs;
3041fd05ba5SMiklos Szeredi 
3051fd05ba5SMiklos Szeredi 		total_refs = file_count(u->sk.sk_socket->file);
306516e0cc5SAl Viro 		inflight_refs = atomic_long_read(&u->inflight);
3071fd05ba5SMiklos Szeredi 
3081fd05ba5SMiklos Szeredi 		BUG_ON(inflight_refs < 1);
3091fd05ba5SMiklos Szeredi 		BUG_ON(total_refs < inflight_refs);
3101fd05ba5SMiklos Szeredi 		if (total_refs == inflight_refs) {
3111fd05ba5SMiklos Szeredi 			list_move_tail(&u->link, &gc_candidates);
31260bc851aSEric Dumazet 			__set_bit(UNIX_GC_CANDIDATE, &u->gc_flags);
31360bc851aSEric Dumazet 			__set_bit(UNIX_GC_MAYBE_CYCLE, &u->gc_flags);
3141fd05ba5SMiklos Szeredi 		}
3151fd05ba5SMiklos Szeredi 	}
3161fd05ba5SMiklos Szeredi 
317d1ab39f1SJason Eastman 	/* Now remove all internal in-flight reference to children of
3181fd05ba5SMiklos Szeredi 	 * the candidates.
3191fd05ba5SMiklos Szeredi 	 */
3201fd05ba5SMiklos Szeredi 	list_for_each_entry(u, &gc_candidates, link)
3211fd05ba5SMiklos Szeredi 		scan_children(&u->sk, dec_inflight, NULL);
3221fd05ba5SMiklos Szeredi 
323d1ab39f1SJason Eastman 	/* Restore the references for children of all candidates,
3241fd05ba5SMiklos Szeredi 	 * which have remaining references.  Do this recursively, so
3251fd05ba5SMiklos Szeredi 	 * only those remain, which form cyclic references.
3261fd05ba5SMiklos Szeredi 	 *
3271fd05ba5SMiklos Szeredi 	 * Use a "cursor" link, to make the list traversal safe, even
3281fd05ba5SMiklos Szeredi 	 * though elements might be moved about.
3291fd05ba5SMiklos Szeredi 	 */
3301fd05ba5SMiklos Szeredi 	list_add(&cursor, &gc_candidates);
3311fd05ba5SMiklos Szeredi 	while (cursor.next != &gc_candidates) {
3321fd05ba5SMiklos Szeredi 		u = list_entry(cursor.next, struct unix_sock, link);
3331fd05ba5SMiklos Szeredi 
3341fd05ba5SMiklos Szeredi 		/* Move cursor to after the current position. */
3351fd05ba5SMiklos Szeredi 		list_move(&cursor, &u->link);
3361fd05ba5SMiklos Szeredi 
337516e0cc5SAl Viro 		if (atomic_long_read(&u->inflight) > 0) {
3386209344fSMiklos Szeredi 			list_move_tail(&u->link, &not_cycle_list);
33960bc851aSEric Dumazet 			__clear_bit(UNIX_GC_MAYBE_CYCLE, &u->gc_flags);
3401fd05ba5SMiklos Szeredi 			scan_children(&u->sk, inc_inflight_move_tail, NULL);
3411fd05ba5SMiklos Szeredi 		}
3421fd05ba5SMiklos Szeredi 	}
3431fd05ba5SMiklos Szeredi 	list_del(&cursor);
3441fd05ba5SMiklos Szeredi 
345*7df9c246SAndrey Ulanov 	/* Now gc_candidates contains only garbage.  Restore original
346*7df9c246SAndrey Ulanov 	 * inflight counters for these as well, and remove the skbuffs
347*7df9c246SAndrey Ulanov 	 * which are creating the cycle(s).
348*7df9c246SAndrey Ulanov 	 */
349*7df9c246SAndrey Ulanov 	skb_queue_head_init(&hitlist);
350*7df9c246SAndrey Ulanov 	list_for_each_entry(u, &gc_candidates, link)
351*7df9c246SAndrey Ulanov 		scan_children(&u->sk, inc_inflight, &hitlist);
352*7df9c246SAndrey Ulanov 
353d1ab39f1SJason Eastman 	/* not_cycle_list contains those sockets which do not make up a
3546209344fSMiklos Szeredi 	 * cycle.  Restore these to the inflight list.
3556209344fSMiklos Szeredi 	 */
3566209344fSMiklos Szeredi 	while (!list_empty(&not_cycle_list)) {
3576209344fSMiklos Szeredi 		u = list_entry(not_cycle_list.next, struct unix_sock, link);
35860bc851aSEric Dumazet 		__clear_bit(UNIX_GC_CANDIDATE, &u->gc_flags);
3596209344fSMiklos Szeredi 		list_move_tail(&u->link, &gc_inflight_list);
3606209344fSMiklos Szeredi 	}
3616209344fSMiklos Szeredi 
3621fd05ba5SMiklos Szeredi 	spin_unlock(&unix_gc_lock);
3631fd05ba5SMiklos Szeredi 
3641fd05ba5SMiklos Szeredi 	/* Here we are. Hitlist is filled. Die. */
3651da177e4SLinus Torvalds 	__skb_queue_purge(&hitlist);
3661fd05ba5SMiklos Szeredi 
3671fd05ba5SMiklos Szeredi 	spin_lock(&unix_gc_lock);
3681fd05ba5SMiklos Szeredi 
3691fd05ba5SMiklos Szeredi 	/* All candidates should have been detached by now. */
3701fd05ba5SMiklos Szeredi 	BUG_ON(!list_empty(&gc_candidates));
3711fd05ba5SMiklos Szeredi 	gc_in_progress = false;
3725f23b734Sdann frazier 	wake_up(&unix_gc_wait);
3731fd05ba5SMiklos Szeredi 
3741fd05ba5SMiklos Szeredi  out:
3751fd05ba5SMiklos Szeredi 	spin_unlock(&unix_gc_lock);
3761da177e4SLinus Torvalds }
377