xref: /linux/net/unix/garbage.c (revision 97af84a6bba2ab2b9c704c08e67de3b5ea551bb2)
1a85036f6SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later
21da177e4SLinus Torvalds /*
31da177e4SLinus Torvalds  * NET3:	Garbage Collector For AF_UNIX sockets
41da177e4SLinus Torvalds  *
51da177e4SLinus Torvalds  * Garbage Collector:
61da177e4SLinus Torvalds  *	Copyright (C) Barak A. Pearlmutter.
71da177e4SLinus Torvalds  *
81da177e4SLinus Torvalds  * Chopped about by Alan Cox 22/3/96 to make it fit the AF_UNIX socket problem.
91da177e4SLinus Torvalds  * If it doesn't work blame me, it worked when Barak sent it.
101da177e4SLinus Torvalds  *
111da177e4SLinus Torvalds  * Assumptions:
121da177e4SLinus Torvalds  *
131da177e4SLinus Torvalds  *  - object w/ a bit
141da177e4SLinus Torvalds  *  - free list
151da177e4SLinus Torvalds  *
161da177e4SLinus Torvalds  * Current optimizations:
171da177e4SLinus Torvalds  *
181da177e4SLinus Torvalds  *  - explicit stack instead of recursion
191da177e4SLinus Torvalds  *  - tail recurse on first born instead of immediate push/pop
201da177e4SLinus Torvalds  *  - we gather the stuff that should not be killed into tree
211da177e4SLinus Torvalds  *    and stack is just a path from root to the current pointer.
221da177e4SLinus Torvalds  *
231da177e4SLinus Torvalds  *  Future optimizations:
241da177e4SLinus Torvalds  *
251da177e4SLinus Torvalds  *  - don't just push entire root set; process in place
261da177e4SLinus Torvalds  *
271da177e4SLinus Torvalds  *  Fixes:
281da177e4SLinus Torvalds  *	Alan Cox	07 Sept	1997	Vmalloc internal stack as needed.
291da177e4SLinus Torvalds  *					Cope with changing max_files.
301da177e4SLinus Torvalds  *	Al Viro		11 Oct 1998
311da177e4SLinus Torvalds  *		Graph may have cycles. That is, we can send the descriptor
321da177e4SLinus Torvalds  *		of foo to bar and vice versa. Current code chokes on that.
331da177e4SLinus Torvalds  *		Fix: move SCM_RIGHTS ones into the separate list and then
341da177e4SLinus Torvalds  *		skb_free() them all instead of doing explicit fput's.
351da177e4SLinus Torvalds  *		Another problem: since fput() may block somebody may
361da177e4SLinus Torvalds  *		create a new unix_socket when we are in the middle of sweep
371da177e4SLinus Torvalds  *		phase. Fix: revert the logic wrt MARKED. Mark everything
381da177e4SLinus Torvalds  *		upon the beginning and unmark non-junk ones.
391da177e4SLinus Torvalds  *
401da177e4SLinus Torvalds  *		[12 Oct 1998] AAARGH! New code purges all SCM_RIGHTS
411da177e4SLinus Torvalds  *		sent to connect()'ed but still not accept()'ed sockets.
421da177e4SLinus Torvalds  *		Fixed. Old code had slightly different problem here:
431da177e4SLinus Torvalds  *		extra fput() in situation when we passed the descriptor via
441da177e4SLinus Torvalds  *		such socket and closed it (descriptor). That would happen on
451da177e4SLinus Torvalds  *		each unix_gc() until the accept(). Since the struct file in
461da177e4SLinus Torvalds  *		question would go to the free list and might be reused...
471da177e4SLinus Torvalds  *		That might be the reason of random oopses on filp_close()
481da177e4SLinus Torvalds  *		in unrelated processes.
491da177e4SLinus Torvalds  *
501da177e4SLinus Torvalds  *	AV		28 Feb 1999
511da177e4SLinus Torvalds  *		Kill the explicit allocation of stack. Now we keep the tree
521da177e4SLinus Torvalds  *		with root in dummy + pointer (gc_current) to one of the nodes.
531da177e4SLinus Torvalds  *		Stack is represented as path from gc_current to dummy. Unmark
541da177e4SLinus Torvalds  *		now means "add to tree". Push == "make it a son of gc_current".
551da177e4SLinus Torvalds  *		Pop == "move gc_current to parent". We keep only pointers to
561da177e4SLinus Torvalds  *		parents (->gc_tree).
571da177e4SLinus Torvalds  *	AV		1 Mar 1999
581da177e4SLinus Torvalds  *		Damn. Added missing check for ->dead in listen queues scanning.
591da177e4SLinus Torvalds  *
601fd05ba5SMiklos Szeredi  *	Miklos Szeredi 25 Jun 2007
611fd05ba5SMiklos Szeredi  *		Reimplement with a cycle collecting algorithm. This should
621fd05ba5SMiklos Szeredi  *		solve several problems with the previous code, like being racy
631fd05ba5SMiklos Szeredi  *		wrt receive and holding up unrelated socket operations.
641da177e4SLinus Torvalds  */
651da177e4SLinus Torvalds 
661da177e4SLinus Torvalds #include <linux/kernel.h>
671da177e4SLinus Torvalds #include <linux/string.h>
681da177e4SLinus Torvalds #include <linux/socket.h>
691da177e4SLinus Torvalds #include <linux/un.h>
701da177e4SLinus Torvalds #include <linux/net.h>
711da177e4SLinus Torvalds #include <linux/fs.h>
721da177e4SLinus Torvalds #include <linux/skbuff.h>
731da177e4SLinus Torvalds #include <linux/netdevice.h>
741da177e4SLinus Torvalds #include <linux/file.h>
751da177e4SLinus Torvalds #include <linux/proc_fs.h>
764a3e2f71SArjan van de Ven #include <linux/mutex.h>
775f23b734Sdann frazier #include <linux/wait.h>
781da177e4SLinus Torvalds 
791da177e4SLinus Torvalds #include <net/sock.h>
801da177e4SLinus Torvalds #include <net/af_unix.h>
811da177e4SLinus Torvalds #include <net/scm.h>
82c752f073SArnaldo Carvalho de Melo #include <net/tcp_states.h>
831da177e4SLinus Torvalds 
84f4e65870SJens Axboe #include "scm.h"
85f4e65870SJens Axboe 
861da177e4SLinus Torvalds /* Internal data structures and random procedures: */
871da177e4SLinus Torvalds 
881fd05ba5SMiklos Szeredi static LIST_HEAD(gc_candidates);
895f23b734Sdann frazier static DECLARE_WAIT_QUEUE_HEAD(unix_gc_wait);
901da177e4SLinus Torvalds 
915c80f1aeSPavel Emelyanov static void scan_inflight(struct sock *x, void (*func)(struct unix_sock *),
921fd05ba5SMiklos Szeredi 			  struct sk_buff_head *hitlist)
931da177e4SLinus Torvalds {
941da177e4SLinus Torvalds 	struct sk_buff *skb;
951fd05ba5SMiklos Szeredi 	struct sk_buff *next;
961da177e4SLinus Torvalds 
971da177e4SLinus Torvalds 	spin_lock(&x->sk_receive_queue.lock);
98a2f3be17SIlpo Järvinen 	skb_queue_walk_safe(&x->sk_receive_queue, skb, next) {
99d1ab39f1SJason Eastman 		/* Do we have file descriptors ? */
1001fd05ba5SMiklos Szeredi 		if (UNIXCB(skb).fp) {
1011fd05ba5SMiklos Szeredi 			bool hit = false;
102d1ab39f1SJason Eastman 			/* Process the descriptors of this socket */
1031da177e4SLinus Torvalds 			int nfd = UNIXCB(skb).fp->count;
1041da177e4SLinus Torvalds 			struct file **fp = UNIXCB(skb).fp->fp;
105d1ab39f1SJason Eastman 
1061fd05ba5SMiklos Szeredi 			while (nfd--) {
107d1ab39f1SJason Eastman 				/* Get the socket the fd matches if it indeed does so */
1081fd05ba5SMiklos Szeredi 				struct sock *sk = unix_get_socket(*fp++);
109d1ab39f1SJason Eastman 
1101fd05ba5SMiklos Szeredi 				if (sk) {
1116209344fSMiklos Szeredi 					struct unix_sock *u = unix_sk(sk);
1126209344fSMiklos Szeredi 
113d1ab39f1SJason Eastman 					/* Ignore non-candidates, they could
1146209344fSMiklos Szeredi 					 * have been added to the queues after
1156209344fSMiklos Szeredi 					 * starting the garbage collection
1166209344fSMiklos Szeredi 					 */
11760bc851aSEric Dumazet 					if (test_bit(UNIX_GC_CANDIDATE, &u->gc_flags)) {
1181fd05ba5SMiklos Szeredi 						hit = true;
119d1ab39f1SJason Eastman 
1206209344fSMiklos Szeredi 						func(u);
1216209344fSMiklos Szeredi 					}
1221da177e4SLinus Torvalds 				}
1231da177e4SLinus Torvalds 			}
1241fd05ba5SMiklos Szeredi 			if (hit && hitlist != NULL) {
1251fd05ba5SMiklos Szeredi 				__skb_unlink(skb, &x->sk_receive_queue);
1261fd05ba5SMiklos Szeredi 				__skb_queue_tail(hitlist, skb);
1271da177e4SLinus Torvalds 			}
1281fd05ba5SMiklos Szeredi 		}
1291da177e4SLinus Torvalds 	}
1301da177e4SLinus Torvalds 	spin_unlock(&x->sk_receive_queue.lock);
1311da177e4SLinus Torvalds }
1321da177e4SLinus Torvalds 
1335c80f1aeSPavel Emelyanov static void scan_children(struct sock *x, void (*func)(struct unix_sock *),
1341fd05ba5SMiklos Szeredi 			  struct sk_buff_head *hitlist)
1351da177e4SLinus Torvalds {
136d1ab39f1SJason Eastman 	if (x->sk_state != TCP_LISTEN) {
1371fd05ba5SMiklos Szeredi 		scan_inflight(x, func, hitlist);
138d1ab39f1SJason Eastman 	} else {
1391fd05ba5SMiklos Szeredi 		struct sk_buff *skb;
1401fd05ba5SMiklos Szeredi 		struct sk_buff *next;
1411fd05ba5SMiklos Szeredi 		struct unix_sock *u;
1421fd05ba5SMiklos Szeredi 		LIST_HEAD(embryos);
1431da177e4SLinus Torvalds 
144d1ab39f1SJason Eastman 		/* For a listening socket collect the queued embryos
1451fd05ba5SMiklos Szeredi 		 * and perform a scan on them as well.
1461da177e4SLinus Torvalds 		 */
1471fd05ba5SMiklos Szeredi 		spin_lock(&x->sk_receive_queue.lock);
148a2f3be17SIlpo Järvinen 		skb_queue_walk_safe(&x->sk_receive_queue, skb, next) {
1491fd05ba5SMiklos Szeredi 			u = unix_sk(skb->sk);
1501da177e4SLinus Torvalds 
151d1ab39f1SJason Eastman 			/* An embryo cannot be in-flight, so it's safe
1521fd05ba5SMiklos Szeredi 			 * to use the list link.
1531fd05ba5SMiklos Szeredi 			 */
1541fd05ba5SMiklos Szeredi 			BUG_ON(!list_empty(&u->link));
1551fd05ba5SMiklos Szeredi 			list_add_tail(&u->link, &embryos);
1561fd05ba5SMiklos Szeredi 		}
1571fd05ba5SMiklos Szeredi 		spin_unlock(&x->sk_receive_queue.lock);
1581fd05ba5SMiklos Szeredi 
1591fd05ba5SMiklos Szeredi 		while (!list_empty(&embryos)) {
1601fd05ba5SMiklos Szeredi 			u = list_entry(embryos.next, struct unix_sock, link);
1611fd05ba5SMiklos Szeredi 			scan_inflight(&u->sk, func, hitlist);
1621fd05ba5SMiklos Szeredi 			list_del_init(&u->link);
1631fd05ba5SMiklos Szeredi 		}
1641fd05ba5SMiklos Szeredi 	}
1651fd05ba5SMiklos Szeredi }
1661fd05ba5SMiklos Szeredi 
1675c80f1aeSPavel Emelyanov static void dec_inflight(struct unix_sock *usk)
1681fd05ba5SMiklos Szeredi {
169*97af84a6SKuniyuki Iwashima 	usk->inflight--;
1701fd05ba5SMiklos Szeredi }
1711fd05ba5SMiklos Szeredi 
1725c80f1aeSPavel Emelyanov static void inc_inflight(struct unix_sock *usk)
1731fd05ba5SMiklos Szeredi {
174*97af84a6SKuniyuki Iwashima 	usk->inflight++;
1751fd05ba5SMiklos Szeredi }
1761fd05ba5SMiklos Szeredi 
1775c80f1aeSPavel Emelyanov static void inc_inflight_move_tail(struct unix_sock *u)
1781fd05ba5SMiklos Szeredi {
179*97af84a6SKuniyuki Iwashima 	u->inflight++;
180*97af84a6SKuniyuki Iwashima 
181d1ab39f1SJason Eastman 	/* If this still might be part of a cycle, move it to the end
1826209344fSMiklos Szeredi 	 * of the list, so that it's checked even if it was already
1836209344fSMiklos Szeredi 	 * passed over
1841fd05ba5SMiklos Szeredi 	 */
18560bc851aSEric Dumazet 	if (test_bit(UNIX_GC_MAYBE_CYCLE, &u->gc_flags))
1861fd05ba5SMiklos Szeredi 		list_move_tail(&u->link, &gc_candidates);
1871fd05ba5SMiklos Szeredi }
1881fd05ba5SMiklos Szeredi 
189505e907dSFabian Frederick static bool gc_in_progress;
1909915672dSEric Dumazet #define UNIX_INFLIGHT_TRIGGER_GC 16000
1911fd05ba5SMiklos Szeredi 
1925f23b734Sdann frazier void wait_for_unix_gc(void)
1935f23b734Sdann frazier {
194d1ab39f1SJason Eastman 	/* If number of inflight sockets is insane,
1959915672dSEric Dumazet 	 * force a garbage collect right now.
1969d6d7f1cSEric Dumazet 	 * Paired with the WRITE_ONCE() in unix_inflight(),
1979d6d7f1cSEric Dumazet 	 * unix_notinflight() and gc_in_progress().
1989915672dSEric Dumazet 	 */
1999d6d7f1cSEric Dumazet 	if (READ_ONCE(unix_tot_inflight) > UNIX_INFLIGHT_TRIGGER_GC &&
2009d6d7f1cSEric Dumazet 	    !READ_ONCE(gc_in_progress))
2019915672dSEric Dumazet 		unix_gc();
20231e03207SKuniyuki Iwashima 	wait_event(unix_gc_wait, !READ_ONCE(gc_in_progress));
2035f23b734Sdann frazier }
2045f23b734Sdann frazier 
2055f23b734Sdann frazier /* The external entry point: unix_gc() */
2065f23b734Sdann frazier void unix_gc(void)
2075f23b734Sdann frazier {
2080091bfc8SPavel Begunkov 	struct sk_buff *next_skb, *skb;
2091fd05ba5SMiklos Szeredi 	struct unix_sock *u;
2101fd05ba5SMiklos Szeredi 	struct unix_sock *next;
2111fd05ba5SMiklos Szeredi 	struct sk_buff_head hitlist;
2121fd05ba5SMiklos Szeredi 	struct list_head cursor;
2136209344fSMiklos Szeredi 	LIST_HEAD(not_cycle_list);
2141fd05ba5SMiklos Szeredi 
2151fd05ba5SMiklos Szeredi 	spin_lock(&unix_gc_lock);
2161fd05ba5SMiklos Szeredi 
2171fd05ba5SMiklos Szeredi 	/* Avoid a recursive GC. */
2181fd05ba5SMiklos Szeredi 	if (gc_in_progress)
2191fd05ba5SMiklos Szeredi 		goto out;
2201fd05ba5SMiklos Szeredi 
2219d6d7f1cSEric Dumazet 	/* Paired with READ_ONCE() in wait_for_unix_gc(). */
2229d6d7f1cSEric Dumazet 	WRITE_ONCE(gc_in_progress, true);
2239d6d7f1cSEric Dumazet 
224d1ab39f1SJason Eastman 	/* First, select candidates for garbage collection.  Only
2251fd05ba5SMiklos Szeredi 	 * in-flight sockets are considered, and from those only ones
2261fd05ba5SMiklos Szeredi 	 * which don't have any external reference.
2271fd05ba5SMiklos Szeredi 	 *
2281fd05ba5SMiklos Szeredi 	 * Holding unix_gc_lock will protect these candidates from
2291fd05ba5SMiklos Szeredi 	 * being detached, and hence from gaining an external
2306209344fSMiklos Szeredi 	 * reference.  Since there are no possible receivers, all
2316209344fSMiklos Szeredi 	 * buffers currently on the candidates' queues stay there
2326209344fSMiklos Szeredi 	 * during the garbage collection.
2336209344fSMiklos Szeredi 	 *
2346209344fSMiklos Szeredi 	 * We also know that no new candidate can be added onto the
2356209344fSMiklos Szeredi 	 * receive queues.  Other, non candidate sockets _can_ be
2366209344fSMiklos Szeredi 	 * added to queue, so we must make sure only to touch
2376209344fSMiklos Szeredi 	 * candidates.
2381fd05ba5SMiklos Szeredi 	 */
2391fd05ba5SMiklos Szeredi 	list_for_each_entry_safe(u, next, &gc_inflight_list, link) {
240516e0cc5SAl Viro 		long total_refs;
2411fd05ba5SMiklos Szeredi 
2421fd05ba5SMiklos Szeredi 		total_refs = file_count(u->sk.sk_socket->file);
2431fd05ba5SMiklos Szeredi 
244*97af84a6SKuniyuki Iwashima 		BUG_ON(!u->inflight);
245*97af84a6SKuniyuki Iwashima 		BUG_ON(total_refs < u->inflight);
246*97af84a6SKuniyuki Iwashima 		if (total_refs == u->inflight) {
2471fd05ba5SMiklos Szeredi 			list_move_tail(&u->link, &gc_candidates);
24860bc851aSEric Dumazet 			__set_bit(UNIX_GC_CANDIDATE, &u->gc_flags);
24960bc851aSEric Dumazet 			__set_bit(UNIX_GC_MAYBE_CYCLE, &u->gc_flags);
2501fd05ba5SMiklos Szeredi 		}
2511fd05ba5SMiklos Szeredi 	}
2521fd05ba5SMiklos Szeredi 
253d1ab39f1SJason Eastman 	/* Now remove all internal in-flight reference to children of
2541fd05ba5SMiklos Szeredi 	 * the candidates.
2551fd05ba5SMiklos Szeredi 	 */
2561fd05ba5SMiklos Szeredi 	list_for_each_entry(u, &gc_candidates, link)
2571fd05ba5SMiklos Szeredi 		scan_children(&u->sk, dec_inflight, NULL);
2581fd05ba5SMiklos Szeredi 
259d1ab39f1SJason Eastman 	/* Restore the references for children of all candidates,
2601fd05ba5SMiklos Szeredi 	 * which have remaining references.  Do this recursively, so
2611fd05ba5SMiklos Szeredi 	 * only those remain, which form cyclic references.
2621fd05ba5SMiklos Szeredi 	 *
2631fd05ba5SMiklos Szeredi 	 * Use a "cursor" link, to make the list traversal safe, even
2641fd05ba5SMiklos Szeredi 	 * though elements might be moved about.
2651fd05ba5SMiklos Szeredi 	 */
2661fd05ba5SMiklos Szeredi 	list_add(&cursor, &gc_candidates);
2671fd05ba5SMiklos Szeredi 	while (cursor.next != &gc_candidates) {
2681fd05ba5SMiklos Szeredi 		u = list_entry(cursor.next, struct unix_sock, link);
2691fd05ba5SMiklos Szeredi 
2701fd05ba5SMiklos Szeredi 		/* Move cursor to after the current position. */
2711fd05ba5SMiklos Szeredi 		list_move(&cursor, &u->link);
2721fd05ba5SMiklos Szeredi 
273*97af84a6SKuniyuki Iwashima 		if (u->inflight) {
2746209344fSMiklos Szeredi 			list_move_tail(&u->link, &not_cycle_list);
27560bc851aSEric Dumazet 			__clear_bit(UNIX_GC_MAYBE_CYCLE, &u->gc_flags);
2761fd05ba5SMiklos Szeredi 			scan_children(&u->sk, inc_inflight_move_tail, NULL);
2771fd05ba5SMiklos Szeredi 		}
2781fd05ba5SMiklos Szeredi 	}
2791fd05ba5SMiklos Szeredi 	list_del(&cursor);
2801fd05ba5SMiklos Szeredi 
2817df9c246SAndrey Ulanov 	/* Now gc_candidates contains only garbage.  Restore original
2827df9c246SAndrey Ulanov 	 * inflight counters for these as well, and remove the skbuffs
2837df9c246SAndrey Ulanov 	 * which are creating the cycle(s).
2847df9c246SAndrey Ulanov 	 */
2857df9c246SAndrey Ulanov 	skb_queue_head_init(&hitlist);
2867df9c246SAndrey Ulanov 	list_for_each_entry(u, &gc_candidates, link)
2877df9c246SAndrey Ulanov 		scan_children(&u->sk, inc_inflight, &hitlist);
2887df9c246SAndrey Ulanov 
289d1ab39f1SJason Eastman 	/* not_cycle_list contains those sockets which do not make up a
2906209344fSMiklos Szeredi 	 * cycle.  Restore these to the inflight list.
2916209344fSMiklos Szeredi 	 */
2926209344fSMiklos Szeredi 	while (!list_empty(&not_cycle_list)) {
2936209344fSMiklos Szeredi 		u = list_entry(not_cycle_list.next, struct unix_sock, link);
29460bc851aSEric Dumazet 		__clear_bit(UNIX_GC_CANDIDATE, &u->gc_flags);
2956209344fSMiklos Szeredi 		list_move_tail(&u->link, &gc_inflight_list);
2966209344fSMiklos Szeredi 	}
2976209344fSMiklos Szeredi 
2981fd05ba5SMiklos Szeredi 	spin_unlock(&unix_gc_lock);
2991fd05ba5SMiklos Szeredi 
3000091bfc8SPavel Begunkov 	/* We need io_uring to clean its registered files, ignore all io_uring
3010091bfc8SPavel Begunkov 	 * originated skbs. It's fine as io_uring doesn't keep references to
3020091bfc8SPavel Begunkov 	 * other io_uring instances and so killing all other files in the cycle
3030091bfc8SPavel Begunkov 	 * will put all io_uring references forcing it to go through normal
3040091bfc8SPavel Begunkov 	 * release.path eventually putting registered files.
3050091bfc8SPavel Begunkov 	 */
3060091bfc8SPavel Begunkov 	skb_queue_walk_safe(&hitlist, skb, next_skb) {
30710369080SEric Dumazet 		if (skb->destructor == io_uring_destruct_scm) {
3080091bfc8SPavel Begunkov 			__skb_unlink(skb, &hitlist);
3090091bfc8SPavel Begunkov 			skb_queue_tail(&skb->sk->sk_receive_queue, skb);
3100091bfc8SPavel Begunkov 		}
3110091bfc8SPavel Begunkov 	}
3120091bfc8SPavel Begunkov 
3131fd05ba5SMiklos Szeredi 	/* Here we are. Hitlist is filled. Die. */
3141da177e4SLinus Torvalds 	__skb_queue_purge(&hitlist);
3151fd05ba5SMiklos Szeredi 
3161fd05ba5SMiklos Szeredi 	spin_lock(&unix_gc_lock);
3171fd05ba5SMiklos Szeredi 
3180091bfc8SPavel Begunkov 	/* There could be io_uring registered files, just push them back to
3190091bfc8SPavel Begunkov 	 * the inflight list
3200091bfc8SPavel Begunkov 	 */
3210091bfc8SPavel Begunkov 	list_for_each_entry_safe(u, next, &gc_candidates, link)
3220091bfc8SPavel Begunkov 		list_move_tail(&u->link, &gc_inflight_list);
3230091bfc8SPavel Begunkov 
3241fd05ba5SMiklos Szeredi 	/* All candidates should have been detached by now. */
3251fd05ba5SMiklos Szeredi 	BUG_ON(!list_empty(&gc_candidates));
3269d6d7f1cSEric Dumazet 
3279d6d7f1cSEric Dumazet 	/* Paired with READ_ONCE() in wait_for_unix_gc(). */
3289d6d7f1cSEric Dumazet 	WRITE_ONCE(gc_in_progress, false);
3299d6d7f1cSEric Dumazet 
3305f23b734Sdann frazier 	wake_up(&unix_gc_wait);
3311fd05ba5SMiklos Szeredi 
3321fd05ba5SMiklos Szeredi  out:
3331fd05ba5SMiklos Szeredi 	spin_unlock(&unix_gc_lock);
3341da177e4SLinus Torvalds }
335