xref: /linux/net/unix/garbage.c (revision d0f6dc26346863e1f4a23117f5468614e54df064)
1a85036f6SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later
21da177e4SLinus Torvalds /*
31da177e4SLinus Torvalds  * NET3:	Garbage Collector For AF_UNIX sockets
41da177e4SLinus Torvalds  *
51da177e4SLinus Torvalds  * Garbage Collector:
61da177e4SLinus Torvalds  *	Copyright (C) Barak A. Pearlmutter.
71da177e4SLinus Torvalds  *
81da177e4SLinus Torvalds  * Chopped about by Alan Cox 22/3/96 to make it fit the AF_UNIX socket problem.
91da177e4SLinus Torvalds  * If it doesn't work blame me, it worked when Barak sent it.
101da177e4SLinus Torvalds  *
111da177e4SLinus Torvalds  * Assumptions:
121da177e4SLinus Torvalds  *
131da177e4SLinus Torvalds  *  - object w/ a bit
141da177e4SLinus Torvalds  *  - free list
151da177e4SLinus Torvalds  *
161da177e4SLinus Torvalds  * Current optimizations:
171da177e4SLinus Torvalds  *
181da177e4SLinus Torvalds  *  - explicit stack instead of recursion
191da177e4SLinus Torvalds  *  - tail recurse on first born instead of immediate push/pop
201da177e4SLinus Torvalds  *  - we gather the stuff that should not be killed into tree
211da177e4SLinus Torvalds  *    and stack is just a path from root to the current pointer.
221da177e4SLinus Torvalds  *
231da177e4SLinus Torvalds  *  Future optimizations:
241da177e4SLinus Torvalds  *
251da177e4SLinus Torvalds  *  - don't just push entire root set; process in place
261da177e4SLinus Torvalds  *
271da177e4SLinus Torvalds  *  Fixes:
281da177e4SLinus Torvalds  *	Alan Cox	07 Sept	1997	Vmalloc internal stack as needed.
291da177e4SLinus Torvalds  *					Cope with changing max_files.
301da177e4SLinus Torvalds  *	Al Viro		11 Oct 1998
311da177e4SLinus Torvalds  *		Graph may have cycles. That is, we can send the descriptor
321da177e4SLinus Torvalds  *		of foo to bar and vice versa. Current code chokes on that.
331da177e4SLinus Torvalds  *		Fix: move SCM_RIGHTS ones into the separate list and then
341da177e4SLinus Torvalds  *		skb_free() them all instead of doing explicit fput's.
351da177e4SLinus Torvalds  *		Another problem: since fput() may block somebody may
361da177e4SLinus Torvalds  *		create a new unix_socket when we are in the middle of sweep
371da177e4SLinus Torvalds  *		phase. Fix: revert the logic wrt MARKED. Mark everything
381da177e4SLinus Torvalds  *		upon the beginning and unmark non-junk ones.
391da177e4SLinus Torvalds  *
401da177e4SLinus Torvalds  *		[12 Oct 1998] AAARGH! New code purges all SCM_RIGHTS
411da177e4SLinus Torvalds  *		sent to connect()'ed but still not accept()'ed sockets.
421da177e4SLinus Torvalds  *		Fixed. Old code had slightly different problem here:
431da177e4SLinus Torvalds  *		extra fput() in situation when we passed the descriptor via
441da177e4SLinus Torvalds  *		such socket and closed it (descriptor). That would happen on
451da177e4SLinus Torvalds  *		each unix_gc() until the accept(). Since the struct file in
461da177e4SLinus Torvalds  *		question would go to the free list and might be reused...
471da177e4SLinus Torvalds  *		That might be the reason of random oopses on filp_close()
481da177e4SLinus Torvalds  *		in unrelated processes.
491da177e4SLinus Torvalds  *
501da177e4SLinus Torvalds  *	AV		28 Feb 1999
511da177e4SLinus Torvalds  *		Kill the explicit allocation of stack. Now we keep the tree
521da177e4SLinus Torvalds  *		with root in dummy + pointer (gc_current) to one of the nodes.
531da177e4SLinus Torvalds  *		Stack is represented as path from gc_current to dummy. Unmark
541da177e4SLinus Torvalds  *		now means "add to tree". Push == "make it a son of gc_current".
551da177e4SLinus Torvalds  *		Pop == "move gc_current to parent". We keep only pointers to
561da177e4SLinus Torvalds  *		parents (->gc_tree).
571da177e4SLinus Torvalds  *	AV		1 Mar 1999
581da177e4SLinus Torvalds  *		Damn. Added missing check for ->dead in listen queues scanning.
591da177e4SLinus Torvalds  *
601fd05ba5SMiklos Szeredi  *	Miklos Szeredi 25 Jun 2007
611fd05ba5SMiklos Szeredi  *		Reimplement with a cycle collecting algorithm. This should
621fd05ba5SMiklos Szeredi  *		solve several problems with the previous code, like being racy
631fd05ba5SMiklos Szeredi  *		wrt receive and holding up unrelated socket operations.
641da177e4SLinus Torvalds  */
651da177e4SLinus Torvalds 
661da177e4SLinus Torvalds #include <linux/kernel.h>
671da177e4SLinus Torvalds #include <linux/string.h>
681da177e4SLinus Torvalds #include <linux/socket.h>
691da177e4SLinus Torvalds #include <linux/un.h>
701da177e4SLinus Torvalds #include <linux/net.h>
711da177e4SLinus Torvalds #include <linux/fs.h>
721da177e4SLinus Torvalds #include <linux/skbuff.h>
731da177e4SLinus Torvalds #include <linux/netdevice.h>
741da177e4SLinus Torvalds #include <linux/file.h>
751da177e4SLinus Torvalds #include <linux/proc_fs.h>
764a3e2f71SArjan van de Ven #include <linux/mutex.h>
775f23b734Sdann frazier #include <linux/wait.h>
781da177e4SLinus Torvalds 
791da177e4SLinus Torvalds #include <net/sock.h>
801da177e4SLinus Torvalds #include <net/af_unix.h>
811da177e4SLinus Torvalds #include <net/scm.h>
82c752f073SArnaldo Carvalho de Melo #include <net/tcp_states.h>
831da177e4SLinus Torvalds 
84f4e65870SJens Axboe #include "scm.h"
85f4e65870SJens Axboe 
861da177e4SLinus Torvalds /* Internal data structures and random procedures: */
871da177e4SLinus Torvalds 
881fd05ba5SMiklos Szeredi static LIST_HEAD(gc_candidates);
891da177e4SLinus Torvalds 
905c80f1aeSPavel Emelyanov static void scan_inflight(struct sock *x, void (*func)(struct unix_sock *),
911fd05ba5SMiklos Szeredi 			  struct sk_buff_head *hitlist)
921da177e4SLinus Torvalds {
931da177e4SLinus Torvalds 	struct sk_buff *skb;
941fd05ba5SMiklos Szeredi 	struct sk_buff *next;
951da177e4SLinus Torvalds 
961da177e4SLinus Torvalds 	spin_lock(&x->sk_receive_queue.lock);
97a2f3be17SIlpo Järvinen 	skb_queue_walk_safe(&x->sk_receive_queue, skb, next) {
98d1ab39f1SJason Eastman 		/* Do we have file descriptors ? */
991fd05ba5SMiklos Szeredi 		if (UNIXCB(skb).fp) {
1001fd05ba5SMiklos Szeredi 			bool hit = false;
101d1ab39f1SJason Eastman 			/* Process the descriptors of this socket */
1021da177e4SLinus Torvalds 			int nfd = UNIXCB(skb).fp->count;
1031da177e4SLinus Torvalds 			struct file **fp = UNIXCB(skb).fp->fp;
104d1ab39f1SJason Eastman 
1051fd05ba5SMiklos Szeredi 			while (nfd--) {
106d1ab39f1SJason Eastman 				/* Get the socket the fd matches if it indeed does so */
1075b17307bSKuniyuki Iwashima 				struct unix_sock *u = unix_get_socket(*fp++);
108d1ab39f1SJason Eastman 
1095b17307bSKuniyuki Iwashima 				/* Ignore non-candidates, they could have been added
1105b17307bSKuniyuki Iwashima 				 * to the queues after starting the garbage collection
1116209344fSMiklos Szeredi 				 */
1125b17307bSKuniyuki Iwashima 				if (u && test_bit(UNIX_GC_CANDIDATE, &u->gc_flags)) {
1131fd05ba5SMiklos Szeredi 					hit = true;
114d1ab39f1SJason Eastman 
1156209344fSMiklos Szeredi 					func(u);
1166209344fSMiklos Szeredi 				}
1171da177e4SLinus Torvalds 			}
1181fd05ba5SMiklos Szeredi 			if (hit && hitlist != NULL) {
1191fd05ba5SMiklos Szeredi 				__skb_unlink(skb, &x->sk_receive_queue);
1201fd05ba5SMiklos Szeredi 				__skb_queue_tail(hitlist, skb);
1211da177e4SLinus Torvalds 			}
1221fd05ba5SMiklos Szeredi 		}
1231da177e4SLinus Torvalds 	}
1241da177e4SLinus Torvalds 	spin_unlock(&x->sk_receive_queue.lock);
1251da177e4SLinus Torvalds }
1261da177e4SLinus Torvalds 
1275c80f1aeSPavel Emelyanov static void scan_children(struct sock *x, void (*func)(struct unix_sock *),
1281fd05ba5SMiklos Szeredi 			  struct sk_buff_head *hitlist)
1291da177e4SLinus Torvalds {
130d1ab39f1SJason Eastman 	if (x->sk_state != TCP_LISTEN) {
1311fd05ba5SMiklos Szeredi 		scan_inflight(x, func, hitlist);
132d1ab39f1SJason Eastman 	} else {
1331fd05ba5SMiklos Szeredi 		struct sk_buff *skb;
1341fd05ba5SMiklos Szeredi 		struct sk_buff *next;
1351fd05ba5SMiklos Szeredi 		struct unix_sock *u;
1361fd05ba5SMiklos Szeredi 		LIST_HEAD(embryos);
1371da177e4SLinus Torvalds 
138d1ab39f1SJason Eastman 		/* For a listening socket collect the queued embryos
1391fd05ba5SMiklos Szeredi 		 * and perform a scan on them as well.
1401da177e4SLinus Torvalds 		 */
1411fd05ba5SMiklos Szeredi 		spin_lock(&x->sk_receive_queue.lock);
142a2f3be17SIlpo Järvinen 		skb_queue_walk_safe(&x->sk_receive_queue, skb, next) {
1431fd05ba5SMiklos Szeredi 			u = unix_sk(skb->sk);
1441da177e4SLinus Torvalds 
145d1ab39f1SJason Eastman 			/* An embryo cannot be in-flight, so it's safe
1461fd05ba5SMiklos Szeredi 			 * to use the list link.
1471fd05ba5SMiklos Szeredi 			 */
148*d0f6dc26SKuniyuki Iwashima 			WARN_ON_ONCE(!list_empty(&u->link));
1491fd05ba5SMiklos Szeredi 			list_add_tail(&u->link, &embryos);
1501fd05ba5SMiklos Szeredi 		}
1511fd05ba5SMiklos Szeredi 		spin_unlock(&x->sk_receive_queue.lock);
1521fd05ba5SMiklos Szeredi 
1531fd05ba5SMiklos Szeredi 		while (!list_empty(&embryos)) {
1541fd05ba5SMiklos Szeredi 			u = list_entry(embryos.next, struct unix_sock, link);
1551fd05ba5SMiklos Szeredi 			scan_inflight(&u->sk, func, hitlist);
1561fd05ba5SMiklos Szeredi 			list_del_init(&u->link);
1571fd05ba5SMiklos Szeredi 		}
1581fd05ba5SMiklos Szeredi 	}
1591fd05ba5SMiklos Szeredi }
1601fd05ba5SMiklos Szeredi 
1615c80f1aeSPavel Emelyanov static void dec_inflight(struct unix_sock *usk)
1621fd05ba5SMiklos Szeredi {
16397af84a6SKuniyuki Iwashima 	usk->inflight--;
1641fd05ba5SMiklos Szeredi }
1651fd05ba5SMiklos Szeredi 
1665c80f1aeSPavel Emelyanov static void inc_inflight(struct unix_sock *usk)
1671fd05ba5SMiklos Szeredi {
16897af84a6SKuniyuki Iwashima 	usk->inflight++;
1691fd05ba5SMiklos Szeredi }
1701fd05ba5SMiklos Szeredi 
1715c80f1aeSPavel Emelyanov static void inc_inflight_move_tail(struct unix_sock *u)
1721fd05ba5SMiklos Szeredi {
17397af84a6SKuniyuki Iwashima 	u->inflight++;
17497af84a6SKuniyuki Iwashima 
175d1ab39f1SJason Eastman 	/* If this still might be part of a cycle, move it to the end
1766209344fSMiklos Szeredi 	 * of the list, so that it's checked even if it was already
1776209344fSMiklos Szeredi 	 * passed over
1781fd05ba5SMiklos Szeredi 	 */
17960bc851aSEric Dumazet 	if (test_bit(UNIX_GC_MAYBE_CYCLE, &u->gc_flags))
1801fd05ba5SMiklos Szeredi 		list_move_tail(&u->link, &gc_candidates);
1811fd05ba5SMiklos Szeredi }
1821fd05ba5SMiklos Szeredi 
183505e907dSFabian Frederick static bool gc_in_progress;
1841fd05ba5SMiklos Szeredi 
1858b90a9f8SKuniyuki Iwashima static void __unix_gc(struct work_struct *work)
1865f23b734Sdann frazier {
1870091bfc8SPavel Begunkov 	struct sk_buff *next_skb, *skb;
1881fd05ba5SMiklos Szeredi 	struct unix_sock *u;
1891fd05ba5SMiklos Szeredi 	struct unix_sock *next;
1901fd05ba5SMiklos Szeredi 	struct sk_buff_head hitlist;
1911fd05ba5SMiklos Szeredi 	struct list_head cursor;
1926209344fSMiklos Szeredi 	LIST_HEAD(not_cycle_list);
1931fd05ba5SMiklos Szeredi 
1941fd05ba5SMiklos Szeredi 	spin_lock(&unix_gc_lock);
1951fd05ba5SMiklos Szeredi 
196d1ab39f1SJason Eastman 	/* First, select candidates for garbage collection.  Only
1971fd05ba5SMiklos Szeredi 	 * in-flight sockets are considered, and from those only ones
1981fd05ba5SMiklos Szeredi 	 * which don't have any external reference.
1991fd05ba5SMiklos Szeredi 	 *
2001fd05ba5SMiklos Szeredi 	 * Holding unix_gc_lock will protect these candidates from
2011fd05ba5SMiklos Szeredi 	 * being detached, and hence from gaining an external
2026209344fSMiklos Szeredi 	 * reference.  Since there are no possible receivers, all
2036209344fSMiklos Szeredi 	 * buffers currently on the candidates' queues stay there
2046209344fSMiklos Szeredi 	 * during the garbage collection.
2056209344fSMiklos Szeredi 	 *
2066209344fSMiklos Szeredi 	 * We also know that no new candidate can be added onto the
2076209344fSMiklos Szeredi 	 * receive queues.  Other, non candidate sockets _can_ be
2086209344fSMiklos Szeredi 	 * added to queue, so we must make sure only to touch
2096209344fSMiklos Szeredi 	 * candidates.
2101fd05ba5SMiklos Szeredi 	 */
2111fd05ba5SMiklos Szeredi 	list_for_each_entry_safe(u, next, &gc_inflight_list, link) {
212516e0cc5SAl Viro 		long total_refs;
2131fd05ba5SMiklos Szeredi 
2141fd05ba5SMiklos Szeredi 		total_refs = file_count(u->sk.sk_socket->file);
2151fd05ba5SMiklos Szeredi 
216*d0f6dc26SKuniyuki Iwashima 		WARN_ON_ONCE(!u->inflight);
217*d0f6dc26SKuniyuki Iwashima 		WARN_ON_ONCE(total_refs < u->inflight);
21897af84a6SKuniyuki Iwashima 		if (total_refs == u->inflight) {
2191fd05ba5SMiklos Szeredi 			list_move_tail(&u->link, &gc_candidates);
22060bc851aSEric Dumazet 			__set_bit(UNIX_GC_CANDIDATE, &u->gc_flags);
22160bc851aSEric Dumazet 			__set_bit(UNIX_GC_MAYBE_CYCLE, &u->gc_flags);
2221fd05ba5SMiklos Szeredi 		}
2231fd05ba5SMiklos Szeredi 	}
2241fd05ba5SMiklos Szeredi 
225d1ab39f1SJason Eastman 	/* Now remove all internal in-flight reference to children of
2261fd05ba5SMiklos Szeredi 	 * the candidates.
2271fd05ba5SMiklos Szeredi 	 */
2281fd05ba5SMiklos Szeredi 	list_for_each_entry(u, &gc_candidates, link)
2291fd05ba5SMiklos Szeredi 		scan_children(&u->sk, dec_inflight, NULL);
2301fd05ba5SMiklos Szeredi 
231d1ab39f1SJason Eastman 	/* Restore the references for children of all candidates,
2321fd05ba5SMiklos Szeredi 	 * which have remaining references.  Do this recursively, so
2331fd05ba5SMiklos Szeredi 	 * only those remain, which form cyclic references.
2341fd05ba5SMiklos Szeredi 	 *
2351fd05ba5SMiklos Szeredi 	 * Use a "cursor" link, to make the list traversal safe, even
2361fd05ba5SMiklos Szeredi 	 * though elements might be moved about.
2371fd05ba5SMiklos Szeredi 	 */
2381fd05ba5SMiklos Szeredi 	list_add(&cursor, &gc_candidates);
2391fd05ba5SMiklos Szeredi 	while (cursor.next != &gc_candidates) {
2401fd05ba5SMiklos Szeredi 		u = list_entry(cursor.next, struct unix_sock, link);
2411fd05ba5SMiklos Szeredi 
2421fd05ba5SMiklos Szeredi 		/* Move cursor to after the current position. */
2431fd05ba5SMiklos Szeredi 		list_move(&cursor, &u->link);
2441fd05ba5SMiklos Szeredi 
24597af84a6SKuniyuki Iwashima 		if (u->inflight) {
2466209344fSMiklos Szeredi 			list_move_tail(&u->link, &not_cycle_list);
24760bc851aSEric Dumazet 			__clear_bit(UNIX_GC_MAYBE_CYCLE, &u->gc_flags);
2481fd05ba5SMiklos Szeredi 			scan_children(&u->sk, inc_inflight_move_tail, NULL);
2491fd05ba5SMiklos Szeredi 		}
2501fd05ba5SMiklos Szeredi 	}
2511fd05ba5SMiklos Szeredi 	list_del(&cursor);
2521fd05ba5SMiklos Szeredi 
2537df9c246SAndrey Ulanov 	/* Now gc_candidates contains only garbage.  Restore original
2547df9c246SAndrey Ulanov 	 * inflight counters for these as well, and remove the skbuffs
2557df9c246SAndrey Ulanov 	 * which are creating the cycle(s).
2567df9c246SAndrey Ulanov 	 */
2577df9c246SAndrey Ulanov 	skb_queue_head_init(&hitlist);
2587df9c246SAndrey Ulanov 	list_for_each_entry(u, &gc_candidates, link)
2597df9c246SAndrey Ulanov 		scan_children(&u->sk, inc_inflight, &hitlist);
2607df9c246SAndrey Ulanov 
261d1ab39f1SJason Eastman 	/* not_cycle_list contains those sockets which do not make up a
2626209344fSMiklos Szeredi 	 * cycle.  Restore these to the inflight list.
2636209344fSMiklos Szeredi 	 */
2646209344fSMiklos Szeredi 	while (!list_empty(&not_cycle_list)) {
2656209344fSMiklos Szeredi 		u = list_entry(not_cycle_list.next, struct unix_sock, link);
26660bc851aSEric Dumazet 		__clear_bit(UNIX_GC_CANDIDATE, &u->gc_flags);
2676209344fSMiklos Szeredi 		list_move_tail(&u->link, &gc_inflight_list);
2686209344fSMiklos Szeredi 	}
2696209344fSMiklos Szeredi 
2701fd05ba5SMiklos Szeredi 	spin_unlock(&unix_gc_lock);
2711fd05ba5SMiklos Szeredi 
2720091bfc8SPavel Begunkov 	/* We need io_uring to clean its registered files, ignore all io_uring
2730091bfc8SPavel Begunkov 	 * originated skbs. It's fine as io_uring doesn't keep references to
2740091bfc8SPavel Begunkov 	 * other io_uring instances and so killing all other files in the cycle
2750091bfc8SPavel Begunkov 	 * will put all io_uring references forcing it to go through normal
2760091bfc8SPavel Begunkov 	 * release.path eventually putting registered files.
2770091bfc8SPavel Begunkov 	 */
2780091bfc8SPavel Begunkov 	skb_queue_walk_safe(&hitlist, skb, next_skb) {
27910369080SEric Dumazet 		if (skb->destructor == io_uring_destruct_scm) {
2800091bfc8SPavel Begunkov 			__skb_unlink(skb, &hitlist);
2810091bfc8SPavel Begunkov 			skb_queue_tail(&skb->sk->sk_receive_queue, skb);
2820091bfc8SPavel Begunkov 		}
2830091bfc8SPavel Begunkov 	}
2840091bfc8SPavel Begunkov 
2851fd05ba5SMiklos Szeredi 	/* Here we are. Hitlist is filled. Die. */
2861da177e4SLinus Torvalds 	__skb_queue_purge(&hitlist);
2871fd05ba5SMiklos Szeredi 
2881fd05ba5SMiklos Szeredi 	spin_lock(&unix_gc_lock);
2891fd05ba5SMiklos Szeredi 
2900091bfc8SPavel Begunkov 	/* There could be io_uring registered files, just push them back to
2910091bfc8SPavel Begunkov 	 * the inflight list
2920091bfc8SPavel Begunkov 	 */
2930091bfc8SPavel Begunkov 	list_for_each_entry_safe(u, next, &gc_candidates, link)
2940091bfc8SPavel Begunkov 		list_move_tail(&u->link, &gc_inflight_list);
2950091bfc8SPavel Begunkov 
2961fd05ba5SMiklos Szeredi 	/* All candidates should have been detached by now. */
297*d0f6dc26SKuniyuki Iwashima 	WARN_ON_ONCE(!list_empty(&gc_candidates));
2989d6d7f1cSEric Dumazet 
2999d6d7f1cSEric Dumazet 	/* Paired with READ_ONCE() in wait_for_unix_gc(). */
3009d6d7f1cSEric Dumazet 	WRITE_ONCE(gc_in_progress, false);
3019d6d7f1cSEric Dumazet 
3021fd05ba5SMiklos Szeredi 	spin_unlock(&unix_gc_lock);
3031da177e4SLinus Torvalds }
3048b90a9f8SKuniyuki Iwashima 
3058b90a9f8SKuniyuki Iwashima static DECLARE_WORK(unix_gc_work, __unix_gc);
3068b90a9f8SKuniyuki Iwashima 
3078b90a9f8SKuniyuki Iwashima void unix_gc(void)
3088b90a9f8SKuniyuki Iwashima {
3098b90a9f8SKuniyuki Iwashima 	WRITE_ONCE(gc_in_progress, true);
3108b90a9f8SKuniyuki Iwashima 	queue_work(system_unbound_wq, &unix_gc_work);
3118b90a9f8SKuniyuki Iwashima }
3128b90a9f8SKuniyuki Iwashima 
3138b90a9f8SKuniyuki Iwashima #define UNIX_INFLIGHT_TRIGGER_GC 16000
314d9f21b36SKuniyuki Iwashima #define UNIX_INFLIGHT_SANE_USER (SCM_MAX_FD * 8)
3158b90a9f8SKuniyuki Iwashima 
316d9f21b36SKuniyuki Iwashima void wait_for_unix_gc(struct scm_fp_list *fpl)
3178b90a9f8SKuniyuki Iwashima {
3188b90a9f8SKuniyuki Iwashima 	/* If number of inflight sockets is insane,
3198b90a9f8SKuniyuki Iwashima 	 * force a garbage collect right now.
3208b90a9f8SKuniyuki Iwashima 	 *
3218b90a9f8SKuniyuki Iwashima 	 * Paired with the WRITE_ONCE() in unix_inflight(),
3228b90a9f8SKuniyuki Iwashima 	 * unix_notinflight(), and __unix_gc().
3238b90a9f8SKuniyuki Iwashima 	 */
3248b90a9f8SKuniyuki Iwashima 	if (READ_ONCE(unix_tot_inflight) > UNIX_INFLIGHT_TRIGGER_GC &&
3258b90a9f8SKuniyuki Iwashima 	    !READ_ONCE(gc_in_progress))
3268b90a9f8SKuniyuki Iwashima 		unix_gc();
3278b90a9f8SKuniyuki Iwashima 
328d9f21b36SKuniyuki Iwashima 	/* Penalise users who want to send AF_UNIX sockets
329d9f21b36SKuniyuki Iwashima 	 * but whose sockets have not been received yet.
330d9f21b36SKuniyuki Iwashima 	 */
331d9f21b36SKuniyuki Iwashima 	if (!fpl || !fpl->count_unix ||
332d9f21b36SKuniyuki Iwashima 	    READ_ONCE(fpl->user->unix_inflight) < UNIX_INFLIGHT_SANE_USER)
333d9f21b36SKuniyuki Iwashima 		return;
334d9f21b36SKuniyuki Iwashima 
3358b90a9f8SKuniyuki Iwashima 	if (READ_ONCE(gc_in_progress))
3368b90a9f8SKuniyuki Iwashima 		flush_work(&unix_gc_work);
3378b90a9f8SKuniyuki Iwashima }
338