xref: /linux/net/unix/garbage.c (revision 11498715f266a3fb4caabba9dd575636cbcaa8f1)
1a85036f6SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later
21da177e4SLinus Torvalds /*
31da177e4SLinus Torvalds  * NET3:	Garbage Collector For AF_UNIX sockets
41da177e4SLinus Torvalds  *
51da177e4SLinus Torvalds  * Garbage Collector:
61da177e4SLinus Torvalds  *	Copyright (C) Barak A. Pearlmutter.
71da177e4SLinus Torvalds  *
81da177e4SLinus Torvalds  * Chopped about by Alan Cox 22/3/96 to make it fit the AF_UNIX socket problem.
91da177e4SLinus Torvalds  * If it doesn't work blame me, it worked when Barak sent it.
101da177e4SLinus Torvalds  *
111da177e4SLinus Torvalds  * Assumptions:
121da177e4SLinus Torvalds  *
131da177e4SLinus Torvalds  *  - object w/ a bit
141da177e4SLinus Torvalds  *  - free list
151da177e4SLinus Torvalds  *
161da177e4SLinus Torvalds  * Current optimizations:
171da177e4SLinus Torvalds  *
181da177e4SLinus Torvalds  *  - explicit stack instead of recursion
191da177e4SLinus Torvalds  *  - tail recurse on first born instead of immediate push/pop
201da177e4SLinus Torvalds  *  - we gather the stuff that should not be killed into tree
211da177e4SLinus Torvalds  *    and stack is just a path from root to the current pointer.
221da177e4SLinus Torvalds  *
231da177e4SLinus Torvalds  *  Future optimizations:
241da177e4SLinus Torvalds  *
251da177e4SLinus Torvalds  *  - don't just push entire root set; process in place
261da177e4SLinus Torvalds  *
271da177e4SLinus Torvalds  *  Fixes:
281da177e4SLinus Torvalds  *	Alan Cox	07 Sept	1997	Vmalloc internal stack as needed.
291da177e4SLinus Torvalds  *					Cope with changing max_files.
301da177e4SLinus Torvalds  *	Al Viro		11 Oct 1998
311da177e4SLinus Torvalds  *		Graph may have cycles. That is, we can send the descriptor
321da177e4SLinus Torvalds  *		of foo to bar and vice versa. Current code chokes on that.
331da177e4SLinus Torvalds  *		Fix: move SCM_RIGHTS ones into the separate list and then
341da177e4SLinus Torvalds  *		skb_free() them all instead of doing explicit fput's.
351da177e4SLinus Torvalds  *		Another problem: since fput() may block somebody may
361da177e4SLinus Torvalds  *		create a new unix_socket when we are in the middle of sweep
371da177e4SLinus Torvalds  *		phase. Fix: revert the logic wrt MARKED. Mark everything
381da177e4SLinus Torvalds  *		upon the beginning and unmark non-junk ones.
391da177e4SLinus Torvalds  *
401da177e4SLinus Torvalds  *		[12 Oct 1998] AAARGH! New code purges all SCM_RIGHTS
411da177e4SLinus Torvalds  *		sent to connect()'ed but still not accept()'ed sockets.
421da177e4SLinus Torvalds  *		Fixed. Old code had slightly different problem here:
431da177e4SLinus Torvalds  *		extra fput() in situation when we passed the descriptor via
441da177e4SLinus Torvalds  *		such socket and closed it (descriptor). That would happen on
451da177e4SLinus Torvalds  *		each unix_gc() until the accept(). Since the struct file in
461da177e4SLinus Torvalds  *		question would go to the free list and might be reused...
471da177e4SLinus Torvalds  *		That might be the reason of random oopses on filp_close()
481da177e4SLinus Torvalds  *		in unrelated processes.
491da177e4SLinus Torvalds  *
501da177e4SLinus Torvalds  *	AV		28 Feb 1999
511da177e4SLinus Torvalds  *		Kill the explicit allocation of stack. Now we keep the tree
521da177e4SLinus Torvalds  *		with root in dummy + pointer (gc_current) to one of the nodes.
531da177e4SLinus Torvalds  *		Stack is represented as path from gc_current to dummy. Unmark
541da177e4SLinus Torvalds  *		now means "add to tree". Push == "make it a son of gc_current".
551da177e4SLinus Torvalds  *		Pop == "move gc_current to parent". We keep only pointers to
561da177e4SLinus Torvalds  *		parents (->gc_tree).
571da177e4SLinus Torvalds  *	AV		1 Mar 1999
581da177e4SLinus Torvalds  *		Damn. Added missing check for ->dead in listen queues scanning.
591da177e4SLinus Torvalds  *
601fd05ba5SMiklos Szeredi  *	Miklos Szeredi 25 Jun 2007
611fd05ba5SMiklos Szeredi  *		Reimplement with a cycle collecting algorithm. This should
621fd05ba5SMiklos Szeredi  *		solve several problems with the previous code, like being racy
631fd05ba5SMiklos Szeredi  *		wrt receive and holding up unrelated socket operations.
641da177e4SLinus Torvalds  */
651da177e4SLinus Torvalds 
661da177e4SLinus Torvalds #include <linux/kernel.h>
671da177e4SLinus Torvalds #include <linux/string.h>
681da177e4SLinus Torvalds #include <linux/socket.h>
691da177e4SLinus Torvalds #include <linux/un.h>
701da177e4SLinus Torvalds #include <linux/net.h>
711da177e4SLinus Torvalds #include <linux/fs.h>
721da177e4SLinus Torvalds #include <linux/skbuff.h>
731da177e4SLinus Torvalds #include <linux/netdevice.h>
741da177e4SLinus Torvalds #include <linux/file.h>
751da177e4SLinus Torvalds #include <linux/proc_fs.h>
764a3e2f71SArjan van de Ven #include <linux/mutex.h>
775f23b734Sdann frazier #include <linux/wait.h>
781da177e4SLinus Torvalds 
791da177e4SLinus Torvalds #include <net/sock.h>
801da177e4SLinus Torvalds #include <net/af_unix.h>
811da177e4SLinus Torvalds #include <net/scm.h>
82c752f073SArnaldo Carvalho de Melo #include <net/tcp_states.h>
831da177e4SLinus Torvalds 
84f4e65870SJens Axboe #include "scm.h"
85f4e65870SJens Axboe 
861da177e4SLinus Torvalds /* Internal data structures and random procedures: */
871da177e4SLinus Torvalds 
881fd05ba5SMiklos Szeredi static LIST_HEAD(gc_candidates);
891da177e4SLinus Torvalds 
905c80f1aeSPavel Emelyanov static void scan_inflight(struct sock *x, void (*func)(struct unix_sock *),
911fd05ba5SMiklos Szeredi 			  struct sk_buff_head *hitlist)
921da177e4SLinus Torvalds {
931da177e4SLinus Torvalds 	struct sk_buff *skb;
941fd05ba5SMiklos Szeredi 	struct sk_buff *next;
951da177e4SLinus Torvalds 
961da177e4SLinus Torvalds 	spin_lock(&x->sk_receive_queue.lock);
97a2f3be17SIlpo Järvinen 	skb_queue_walk_safe(&x->sk_receive_queue, skb, next) {
98d1ab39f1SJason Eastman 		/* Do we have file descriptors ? */
991fd05ba5SMiklos Szeredi 		if (UNIXCB(skb).fp) {
1001fd05ba5SMiklos Szeredi 			bool hit = false;
101d1ab39f1SJason Eastman 			/* Process the descriptors of this socket */
1021da177e4SLinus Torvalds 			int nfd = UNIXCB(skb).fp->count;
1031da177e4SLinus Torvalds 			struct file **fp = UNIXCB(skb).fp->fp;
104d1ab39f1SJason Eastman 
1051fd05ba5SMiklos Szeredi 			while (nfd--) {
106d1ab39f1SJason Eastman 				/* Get the socket the fd matches if it indeed does so */
1075b17307bSKuniyuki Iwashima 				struct unix_sock *u = unix_get_socket(*fp++);
108d1ab39f1SJason Eastman 
1095b17307bSKuniyuki Iwashima 				/* Ignore non-candidates, they could have been added
1105b17307bSKuniyuki Iwashima 				 * to the queues after starting the garbage collection
1116209344fSMiklos Szeredi 				 */
1125b17307bSKuniyuki Iwashima 				if (u && test_bit(UNIX_GC_CANDIDATE, &u->gc_flags)) {
1131fd05ba5SMiklos Szeredi 					hit = true;
114d1ab39f1SJason Eastman 
1156209344fSMiklos Szeredi 					func(u);
1166209344fSMiklos Szeredi 				}
1171da177e4SLinus Torvalds 			}
1181fd05ba5SMiklos Szeredi 			if (hit && hitlist != NULL) {
1191fd05ba5SMiklos Szeredi 				__skb_unlink(skb, &x->sk_receive_queue);
1201fd05ba5SMiklos Szeredi 				__skb_queue_tail(hitlist, skb);
1211da177e4SLinus Torvalds 			}
1221fd05ba5SMiklos Szeredi 		}
1231da177e4SLinus Torvalds 	}
1241da177e4SLinus Torvalds 	spin_unlock(&x->sk_receive_queue.lock);
1251da177e4SLinus Torvalds }
1261da177e4SLinus Torvalds 
1275c80f1aeSPavel Emelyanov static void scan_children(struct sock *x, void (*func)(struct unix_sock *),
1281fd05ba5SMiklos Szeredi 			  struct sk_buff_head *hitlist)
1291da177e4SLinus Torvalds {
130d1ab39f1SJason Eastman 	if (x->sk_state != TCP_LISTEN) {
1311fd05ba5SMiklos Szeredi 		scan_inflight(x, func, hitlist);
132d1ab39f1SJason Eastman 	} else {
1331fd05ba5SMiklos Szeredi 		struct sk_buff *skb;
1341fd05ba5SMiklos Szeredi 		struct sk_buff *next;
1351fd05ba5SMiklos Szeredi 		struct unix_sock *u;
1361fd05ba5SMiklos Szeredi 		LIST_HEAD(embryos);
1371da177e4SLinus Torvalds 
138d1ab39f1SJason Eastman 		/* For a listening socket collect the queued embryos
1391fd05ba5SMiklos Szeredi 		 * and perform a scan on them as well.
1401da177e4SLinus Torvalds 		 */
1411fd05ba5SMiklos Szeredi 		spin_lock(&x->sk_receive_queue.lock);
142a2f3be17SIlpo Järvinen 		skb_queue_walk_safe(&x->sk_receive_queue, skb, next) {
1431fd05ba5SMiklos Szeredi 			u = unix_sk(skb->sk);
1441da177e4SLinus Torvalds 
145d1ab39f1SJason Eastman 			/* An embryo cannot be in-flight, so it's safe
1461fd05ba5SMiklos Szeredi 			 * to use the list link.
1471fd05ba5SMiklos Szeredi 			 */
148d0f6dc26SKuniyuki Iwashima 			WARN_ON_ONCE(!list_empty(&u->link));
1491fd05ba5SMiklos Szeredi 			list_add_tail(&u->link, &embryos);
1501fd05ba5SMiklos Szeredi 		}
1511fd05ba5SMiklos Szeredi 		spin_unlock(&x->sk_receive_queue.lock);
1521fd05ba5SMiklos Szeredi 
1531fd05ba5SMiklos Szeredi 		while (!list_empty(&embryos)) {
1541fd05ba5SMiklos Szeredi 			u = list_entry(embryos.next, struct unix_sock, link);
1551fd05ba5SMiklos Szeredi 			scan_inflight(&u->sk, func, hitlist);
1561fd05ba5SMiklos Szeredi 			list_del_init(&u->link);
1571fd05ba5SMiklos Szeredi 		}
1581fd05ba5SMiklos Szeredi 	}
1591fd05ba5SMiklos Szeredi }
1601fd05ba5SMiklos Szeredi 
1615c80f1aeSPavel Emelyanov static void dec_inflight(struct unix_sock *usk)
1621fd05ba5SMiklos Szeredi {
16397af84a6SKuniyuki Iwashima 	usk->inflight--;
1641fd05ba5SMiklos Szeredi }
1651fd05ba5SMiklos Szeredi 
1665c80f1aeSPavel Emelyanov static void inc_inflight(struct unix_sock *usk)
1671fd05ba5SMiklos Szeredi {
16897af84a6SKuniyuki Iwashima 	usk->inflight++;
1691fd05ba5SMiklos Szeredi }
1701fd05ba5SMiklos Szeredi 
1715c80f1aeSPavel Emelyanov static void inc_inflight_move_tail(struct unix_sock *u)
1721fd05ba5SMiklos Szeredi {
17397af84a6SKuniyuki Iwashima 	u->inflight++;
17497af84a6SKuniyuki Iwashima 
175d1ab39f1SJason Eastman 	/* If this still might be part of a cycle, move it to the end
1766209344fSMiklos Szeredi 	 * of the list, so that it's checked even if it was already
1776209344fSMiklos Szeredi 	 * passed over
1781fd05ba5SMiklos Szeredi 	 */
17960bc851aSEric Dumazet 	if (test_bit(UNIX_GC_MAYBE_CYCLE, &u->gc_flags))
1801fd05ba5SMiklos Szeredi 		list_move_tail(&u->link, &gc_candidates);
1811fd05ba5SMiklos Szeredi }
1821fd05ba5SMiklos Szeredi 
183505e907dSFabian Frederick static bool gc_in_progress;
1841fd05ba5SMiklos Szeredi 
1858b90a9f8SKuniyuki Iwashima static void __unix_gc(struct work_struct *work)
1865f23b734Sdann frazier {
1871fd05ba5SMiklos Szeredi 	struct sk_buff_head hitlist;
188*11498715SKuniyuki Iwashima 	struct unix_sock *u, *next;
1896209344fSMiklos Szeredi 	LIST_HEAD(not_cycle_list);
190*11498715SKuniyuki Iwashima 	struct list_head cursor;
1911fd05ba5SMiklos Szeredi 
1921fd05ba5SMiklos Szeredi 	spin_lock(&unix_gc_lock);
1931fd05ba5SMiklos Szeredi 
194d1ab39f1SJason Eastman 	/* First, select candidates for garbage collection.  Only
1951fd05ba5SMiklos Szeredi 	 * in-flight sockets are considered, and from those only ones
1961fd05ba5SMiklos Szeredi 	 * which don't have any external reference.
1971fd05ba5SMiklos Szeredi 	 *
1981fd05ba5SMiklos Szeredi 	 * Holding unix_gc_lock will protect these candidates from
1991fd05ba5SMiklos Szeredi 	 * being detached, and hence from gaining an external
2006209344fSMiklos Szeredi 	 * reference.  Since there are no possible receivers, all
2016209344fSMiklos Szeredi 	 * buffers currently on the candidates' queues stay there
2026209344fSMiklos Szeredi 	 * during the garbage collection.
2036209344fSMiklos Szeredi 	 *
2046209344fSMiklos Szeredi 	 * We also know that no new candidate can be added onto the
2056209344fSMiklos Szeredi 	 * receive queues.  Other, non candidate sockets _can_ be
2066209344fSMiklos Szeredi 	 * added to queue, so we must make sure only to touch
2076209344fSMiklos Szeredi 	 * candidates.
2081fd05ba5SMiklos Szeredi 	 */
2091fd05ba5SMiklos Szeredi 	list_for_each_entry_safe(u, next, &gc_inflight_list, link) {
210516e0cc5SAl Viro 		long total_refs;
2111fd05ba5SMiklos Szeredi 
2121fd05ba5SMiklos Szeredi 		total_refs = file_count(u->sk.sk_socket->file);
2131fd05ba5SMiklos Szeredi 
214d0f6dc26SKuniyuki Iwashima 		WARN_ON_ONCE(!u->inflight);
215d0f6dc26SKuniyuki Iwashima 		WARN_ON_ONCE(total_refs < u->inflight);
21697af84a6SKuniyuki Iwashima 		if (total_refs == u->inflight) {
2171fd05ba5SMiklos Szeredi 			list_move_tail(&u->link, &gc_candidates);
21860bc851aSEric Dumazet 			__set_bit(UNIX_GC_CANDIDATE, &u->gc_flags);
21960bc851aSEric Dumazet 			__set_bit(UNIX_GC_MAYBE_CYCLE, &u->gc_flags);
2201fd05ba5SMiklos Szeredi 		}
2211fd05ba5SMiklos Szeredi 	}
2221fd05ba5SMiklos Szeredi 
223d1ab39f1SJason Eastman 	/* Now remove all internal in-flight reference to children of
2241fd05ba5SMiklos Szeredi 	 * the candidates.
2251fd05ba5SMiklos Szeredi 	 */
2261fd05ba5SMiklos Szeredi 	list_for_each_entry(u, &gc_candidates, link)
2271fd05ba5SMiklos Szeredi 		scan_children(&u->sk, dec_inflight, NULL);
2281fd05ba5SMiklos Szeredi 
229d1ab39f1SJason Eastman 	/* Restore the references for children of all candidates,
2301fd05ba5SMiklos Szeredi 	 * which have remaining references.  Do this recursively, so
2311fd05ba5SMiklos Szeredi 	 * only those remain, which form cyclic references.
2321fd05ba5SMiklos Szeredi 	 *
2331fd05ba5SMiklos Szeredi 	 * Use a "cursor" link, to make the list traversal safe, even
2341fd05ba5SMiklos Szeredi 	 * though elements might be moved about.
2351fd05ba5SMiklos Szeredi 	 */
2361fd05ba5SMiklos Szeredi 	list_add(&cursor, &gc_candidates);
2371fd05ba5SMiklos Szeredi 	while (cursor.next != &gc_candidates) {
2381fd05ba5SMiklos Szeredi 		u = list_entry(cursor.next, struct unix_sock, link);
2391fd05ba5SMiklos Szeredi 
2401fd05ba5SMiklos Szeredi 		/* Move cursor to after the current position. */
2411fd05ba5SMiklos Szeredi 		list_move(&cursor, &u->link);
2421fd05ba5SMiklos Szeredi 
24397af84a6SKuniyuki Iwashima 		if (u->inflight) {
2446209344fSMiklos Szeredi 			list_move_tail(&u->link, &not_cycle_list);
24560bc851aSEric Dumazet 			__clear_bit(UNIX_GC_MAYBE_CYCLE, &u->gc_flags);
2461fd05ba5SMiklos Szeredi 			scan_children(&u->sk, inc_inflight_move_tail, NULL);
2471fd05ba5SMiklos Szeredi 		}
2481fd05ba5SMiklos Szeredi 	}
2491fd05ba5SMiklos Szeredi 	list_del(&cursor);
2501fd05ba5SMiklos Szeredi 
2517df9c246SAndrey Ulanov 	/* Now gc_candidates contains only garbage.  Restore original
2527df9c246SAndrey Ulanov 	 * inflight counters for these as well, and remove the skbuffs
2537df9c246SAndrey Ulanov 	 * which are creating the cycle(s).
2547df9c246SAndrey Ulanov 	 */
2557df9c246SAndrey Ulanov 	skb_queue_head_init(&hitlist);
2567df9c246SAndrey Ulanov 	list_for_each_entry(u, &gc_candidates, link)
2577df9c246SAndrey Ulanov 		scan_children(&u->sk, inc_inflight, &hitlist);
2587df9c246SAndrey Ulanov 
259d1ab39f1SJason Eastman 	/* not_cycle_list contains those sockets which do not make up a
2606209344fSMiklos Szeredi 	 * cycle.  Restore these to the inflight list.
2616209344fSMiklos Szeredi 	 */
2626209344fSMiklos Szeredi 	while (!list_empty(&not_cycle_list)) {
2636209344fSMiklos Szeredi 		u = list_entry(not_cycle_list.next, struct unix_sock, link);
26460bc851aSEric Dumazet 		__clear_bit(UNIX_GC_CANDIDATE, &u->gc_flags);
2656209344fSMiklos Szeredi 		list_move_tail(&u->link, &gc_inflight_list);
2666209344fSMiklos Szeredi 	}
2676209344fSMiklos Szeredi 
2681fd05ba5SMiklos Szeredi 	spin_unlock(&unix_gc_lock);
2691fd05ba5SMiklos Szeredi 
2701fd05ba5SMiklos Szeredi 	/* Here we are. Hitlist is filled. Die. */
2711da177e4SLinus Torvalds 	__skb_queue_purge(&hitlist);
2721fd05ba5SMiklos Szeredi 
2731fd05ba5SMiklos Szeredi 	spin_lock(&unix_gc_lock);
2741fd05ba5SMiklos Szeredi 
2751fd05ba5SMiklos Szeredi 	/* All candidates should have been detached by now. */
276d0f6dc26SKuniyuki Iwashima 	WARN_ON_ONCE(!list_empty(&gc_candidates));
2779d6d7f1cSEric Dumazet 
2789d6d7f1cSEric Dumazet 	/* Paired with READ_ONCE() in wait_for_unix_gc(). */
2799d6d7f1cSEric Dumazet 	WRITE_ONCE(gc_in_progress, false);
2809d6d7f1cSEric Dumazet 
2811fd05ba5SMiklos Szeredi 	spin_unlock(&unix_gc_lock);
2821da177e4SLinus Torvalds }
2838b90a9f8SKuniyuki Iwashima 
2848b90a9f8SKuniyuki Iwashima static DECLARE_WORK(unix_gc_work, __unix_gc);
2858b90a9f8SKuniyuki Iwashima 
2868b90a9f8SKuniyuki Iwashima void unix_gc(void)
2878b90a9f8SKuniyuki Iwashima {
2888b90a9f8SKuniyuki Iwashima 	WRITE_ONCE(gc_in_progress, true);
2898b90a9f8SKuniyuki Iwashima 	queue_work(system_unbound_wq, &unix_gc_work);
2908b90a9f8SKuniyuki Iwashima }
2918b90a9f8SKuniyuki Iwashima 
2928b90a9f8SKuniyuki Iwashima #define UNIX_INFLIGHT_TRIGGER_GC 16000
293d9f21b36SKuniyuki Iwashima #define UNIX_INFLIGHT_SANE_USER (SCM_MAX_FD * 8)
2948b90a9f8SKuniyuki Iwashima 
295d9f21b36SKuniyuki Iwashima void wait_for_unix_gc(struct scm_fp_list *fpl)
2968b90a9f8SKuniyuki Iwashima {
2978b90a9f8SKuniyuki Iwashima 	/* If number of inflight sockets is insane,
2988b90a9f8SKuniyuki Iwashima 	 * force a garbage collect right now.
2998b90a9f8SKuniyuki Iwashima 	 *
3008b90a9f8SKuniyuki Iwashima 	 * Paired with the WRITE_ONCE() in unix_inflight(),
3018b90a9f8SKuniyuki Iwashima 	 * unix_notinflight(), and __unix_gc().
3028b90a9f8SKuniyuki Iwashima 	 */
3038b90a9f8SKuniyuki Iwashima 	if (READ_ONCE(unix_tot_inflight) > UNIX_INFLIGHT_TRIGGER_GC &&
3048b90a9f8SKuniyuki Iwashima 	    !READ_ONCE(gc_in_progress))
3058b90a9f8SKuniyuki Iwashima 		unix_gc();
3068b90a9f8SKuniyuki Iwashima 
307d9f21b36SKuniyuki Iwashima 	/* Penalise users who want to send AF_UNIX sockets
308d9f21b36SKuniyuki Iwashima 	 * but whose sockets have not been received yet.
309d9f21b36SKuniyuki Iwashima 	 */
310d9f21b36SKuniyuki Iwashima 	if (!fpl || !fpl->count_unix ||
311d9f21b36SKuniyuki Iwashima 	    READ_ONCE(fpl->user->unix_inflight) < UNIX_INFLIGHT_SANE_USER)
312d9f21b36SKuniyuki Iwashima 		return;
313d9f21b36SKuniyuki Iwashima 
3148b90a9f8SKuniyuki Iwashima 	if (READ_ONCE(gc_in_progress))
3158b90a9f8SKuniyuki Iwashima 		flush_work(&unix_gc_work);
3168b90a9f8SKuniyuki Iwashima }
317