xref: /linux/net/unix/garbage.c (revision 1971d13ffa84a551d29a81fdf5b5ec5be166ac83)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * NET3:	Garbage Collector For AF_UNIX sockets
4  *
5  * Garbage Collector:
6  *	Copyright (C) Barak A. Pearlmutter.
7  *
8  * Chopped about by Alan Cox 22/3/96 to make it fit the AF_UNIX socket problem.
9  * If it doesn't work blame me, it worked when Barak sent it.
10  *
11  * Assumptions:
12  *
13  *  - object w/ a bit
14  *  - free list
15  *
16  * Current optimizations:
17  *
18  *  - explicit stack instead of recursion
19  *  - tail recurse on first born instead of immediate push/pop
20  *  - we gather the stuff that should not be killed into tree
21  *    and stack is just a path from root to the current pointer.
22  *
23  *  Future optimizations:
24  *
25  *  - don't just push entire root set; process in place
26  *
27  *  Fixes:
28  *	Alan Cox	07 Sept	1997	Vmalloc internal stack as needed.
29  *					Cope with changing max_files.
30  *	Al Viro		11 Oct 1998
31  *		Graph may have cycles. That is, we can send the descriptor
32  *		of foo to bar and vice versa. Current code chokes on that.
33  *		Fix: move SCM_RIGHTS ones into the separate list and then
34  *		skb_free() them all instead of doing explicit fput's.
35  *		Another problem: since fput() may block somebody may
36  *		create a new unix_socket when we are in the middle of sweep
37  *		phase. Fix: revert the logic wrt MARKED. Mark everything
38  *		upon the beginning and unmark non-junk ones.
39  *
40  *		[12 Oct 1998] AAARGH! New code purges all SCM_RIGHTS
41  *		sent to connect()'ed but still not accept()'ed sockets.
42  *		Fixed. Old code had slightly different problem here:
43  *		extra fput() in situation when we passed the descriptor via
44  *		such socket and closed it (descriptor). That would happen on
45  *		each unix_gc() until the accept(). Since the struct file in
46  *		question would go to the free list and might be reused...
47  *		That might be the reason of random oopses on filp_close()
48  *		in unrelated processes.
49  *
50  *	AV		28 Feb 1999
51  *		Kill the explicit allocation of stack. Now we keep the tree
52  *		with root in dummy + pointer (gc_current) to one of the nodes.
53  *		Stack is represented as path from gc_current to dummy. Unmark
54  *		now means "add to tree". Push == "make it a son of gc_current".
55  *		Pop == "move gc_current to parent". We keep only pointers to
56  *		parents (->gc_tree).
57  *	AV		1 Mar 1999
58  *		Damn. Added missing check for ->dead in listen queues scanning.
59  *
60  *	Miklos Szeredi 25 Jun 2007
61  *		Reimplement with a cycle collecting algorithm. This should
62  *		solve several problems with the previous code, like being racy
63  *		wrt receive and holding up unrelated socket operations.
64  */
65 
66 #include <linux/kernel.h>
67 #include <linux/string.h>
68 #include <linux/socket.h>
69 #include <linux/un.h>
70 #include <linux/net.h>
71 #include <linux/fs.h>
72 #include <linux/skbuff.h>
73 #include <linux/netdevice.h>
74 #include <linux/file.h>
75 #include <linux/proc_fs.h>
76 #include <linux/mutex.h>
77 #include <linux/wait.h>
78 
79 #include <net/sock.h>
80 #include <net/af_unix.h>
81 #include <net/scm.h>
82 #include <net/tcp_states.h>
83 
84 struct unix_sock *unix_get_socket(struct file *filp)
85 {
86 	struct inode *inode = file_inode(filp);
87 
88 	/* Socket ? */
89 	if (S_ISSOCK(inode->i_mode) && !(filp->f_mode & FMODE_PATH)) {
90 		struct socket *sock = SOCKET_I(inode);
91 		const struct proto_ops *ops;
92 		struct sock *sk = sock->sk;
93 
94 		ops = READ_ONCE(sock->ops);
95 
96 		/* PF_UNIX ? */
97 		if (sk && ops && ops->family == PF_UNIX)
98 			return unix_sk(sk);
99 	}
100 
101 	return NULL;
102 }
103 
104 DEFINE_SPINLOCK(unix_gc_lock);
105 unsigned int unix_tot_inflight;
106 static LIST_HEAD(gc_candidates);
107 static LIST_HEAD(gc_inflight_list);
108 
109 /* Keep the number of times in flight count for the file
110  * descriptor if it is for an AF_UNIX socket.
111  */
112 void unix_inflight(struct user_struct *user, struct file *filp)
113 {
114 	struct unix_sock *u = unix_get_socket(filp);
115 
116 	spin_lock(&unix_gc_lock);
117 
118 	if (u) {
119 		if (!u->inflight) {
120 			WARN_ON_ONCE(!list_empty(&u->link));
121 			list_add_tail(&u->link, &gc_inflight_list);
122 		} else {
123 			WARN_ON_ONCE(list_empty(&u->link));
124 		}
125 		u->inflight++;
126 
127 		/* Paired with READ_ONCE() in wait_for_unix_gc() */
128 		WRITE_ONCE(unix_tot_inflight, unix_tot_inflight + 1);
129 	}
130 
131 	WRITE_ONCE(user->unix_inflight, user->unix_inflight + 1);
132 
133 	spin_unlock(&unix_gc_lock);
134 }
135 
136 void unix_notinflight(struct user_struct *user, struct file *filp)
137 {
138 	struct unix_sock *u = unix_get_socket(filp);
139 
140 	spin_lock(&unix_gc_lock);
141 
142 	if (u) {
143 		WARN_ON_ONCE(!u->inflight);
144 		WARN_ON_ONCE(list_empty(&u->link));
145 
146 		u->inflight--;
147 		if (!u->inflight)
148 			list_del_init(&u->link);
149 
150 		/* Paired with READ_ONCE() in wait_for_unix_gc() */
151 		WRITE_ONCE(unix_tot_inflight, unix_tot_inflight - 1);
152 	}
153 
154 	WRITE_ONCE(user->unix_inflight, user->unix_inflight - 1);
155 
156 	spin_unlock(&unix_gc_lock);
157 }
158 
159 static void scan_inflight(struct sock *x, void (*func)(struct unix_sock *),
160 			  struct sk_buff_head *hitlist)
161 {
162 	struct sk_buff *skb;
163 	struct sk_buff *next;
164 
165 	spin_lock(&x->sk_receive_queue.lock);
166 	skb_queue_walk_safe(&x->sk_receive_queue, skb, next) {
167 		/* Do we have file descriptors ? */
168 		if (UNIXCB(skb).fp) {
169 			bool hit = false;
170 			/* Process the descriptors of this socket */
171 			int nfd = UNIXCB(skb).fp->count;
172 			struct file **fp = UNIXCB(skb).fp->fp;
173 
174 			while (nfd--) {
175 				/* Get the socket the fd matches if it indeed does so */
176 				struct unix_sock *u = unix_get_socket(*fp++);
177 
178 				/* Ignore non-candidates, they could have been added
179 				 * to the queues after starting the garbage collection
180 				 */
181 				if (u && test_bit(UNIX_GC_CANDIDATE, &u->gc_flags)) {
182 					hit = true;
183 
184 					func(u);
185 				}
186 			}
187 			if (hit && hitlist != NULL) {
188 				__skb_unlink(skb, &x->sk_receive_queue);
189 				__skb_queue_tail(hitlist, skb);
190 			}
191 		}
192 	}
193 	spin_unlock(&x->sk_receive_queue.lock);
194 }
195 
196 static void scan_children(struct sock *x, void (*func)(struct unix_sock *),
197 			  struct sk_buff_head *hitlist)
198 {
199 	if (x->sk_state != TCP_LISTEN) {
200 		scan_inflight(x, func, hitlist);
201 	} else {
202 		struct sk_buff *skb;
203 		struct sk_buff *next;
204 		struct unix_sock *u;
205 		LIST_HEAD(embryos);
206 
207 		/* For a listening socket collect the queued embryos
208 		 * and perform a scan on them as well.
209 		 */
210 		spin_lock(&x->sk_receive_queue.lock);
211 		skb_queue_walk_safe(&x->sk_receive_queue, skb, next) {
212 			u = unix_sk(skb->sk);
213 
214 			/* An embryo cannot be in-flight, so it's safe
215 			 * to use the list link.
216 			 */
217 			WARN_ON_ONCE(!list_empty(&u->link));
218 			list_add_tail(&u->link, &embryos);
219 		}
220 		spin_unlock(&x->sk_receive_queue.lock);
221 
222 		while (!list_empty(&embryos)) {
223 			u = list_entry(embryos.next, struct unix_sock, link);
224 			scan_inflight(&u->sk, func, hitlist);
225 			list_del_init(&u->link);
226 		}
227 	}
228 }
229 
230 static void dec_inflight(struct unix_sock *usk)
231 {
232 	usk->inflight--;
233 }
234 
235 static void inc_inflight(struct unix_sock *usk)
236 {
237 	usk->inflight++;
238 }
239 
240 static void inc_inflight_move_tail(struct unix_sock *u)
241 {
242 	u->inflight++;
243 
244 	/* If this still might be part of a cycle, move it to the end
245 	 * of the list, so that it's checked even if it was already
246 	 * passed over
247 	 */
248 	if (test_bit(UNIX_GC_MAYBE_CYCLE, &u->gc_flags))
249 		list_move_tail(&u->link, &gc_candidates);
250 }
251 
252 static bool gc_in_progress;
253 
254 static void __unix_gc(struct work_struct *work)
255 {
256 	struct sk_buff_head hitlist;
257 	struct unix_sock *u, *next;
258 	LIST_HEAD(not_cycle_list);
259 	struct list_head cursor;
260 
261 	spin_lock(&unix_gc_lock);
262 
263 	/* First, select candidates for garbage collection.  Only
264 	 * in-flight sockets are considered, and from those only ones
265 	 * which don't have any external reference.
266 	 *
267 	 * Holding unix_gc_lock will protect these candidates from
268 	 * being detached, and hence from gaining an external
269 	 * reference.  Since there are no possible receivers, all
270 	 * buffers currently on the candidates' queues stay there
271 	 * during the garbage collection.
272 	 *
273 	 * We also know that no new candidate can be added onto the
274 	 * receive queues.  Other, non candidate sockets _can_ be
275 	 * added to queue, so we must make sure only to touch
276 	 * candidates.
277 	 *
278 	 * Embryos, though never candidates themselves, affect which
279 	 * candidates are reachable by the garbage collector.  Before
280 	 * being added to a listener's queue, an embryo may already
281 	 * receive data carrying SCM_RIGHTS, potentially making the
282 	 * passed socket a candidate that is not yet reachable by the
283 	 * collector.  It becomes reachable once the embryo is
284 	 * enqueued.  Therefore, we must ensure that no SCM-laden
285 	 * embryo appears in a (candidate) listener's queue between
286 	 * consecutive scan_children() calls.
287 	 */
288 	list_for_each_entry_safe(u, next, &gc_inflight_list, link) {
289 		struct sock *sk = &u->sk;
290 		long total_refs;
291 
292 		total_refs = file_count(sk->sk_socket->file);
293 
294 		WARN_ON_ONCE(!u->inflight);
295 		WARN_ON_ONCE(total_refs < u->inflight);
296 		if (total_refs == u->inflight) {
297 			list_move_tail(&u->link, &gc_candidates);
298 			__set_bit(UNIX_GC_CANDIDATE, &u->gc_flags);
299 			__set_bit(UNIX_GC_MAYBE_CYCLE, &u->gc_flags);
300 
301 			if (sk->sk_state == TCP_LISTEN) {
302 				unix_state_lock_nested(sk, U_LOCK_GC_LISTENER);
303 				unix_state_unlock(sk);
304 			}
305 		}
306 	}
307 
308 	/* Now remove all internal in-flight reference to children of
309 	 * the candidates.
310 	 */
311 	list_for_each_entry(u, &gc_candidates, link)
312 		scan_children(&u->sk, dec_inflight, NULL);
313 
314 	/* Restore the references for children of all candidates,
315 	 * which have remaining references.  Do this recursively, so
316 	 * only those remain, which form cyclic references.
317 	 *
318 	 * Use a "cursor" link, to make the list traversal safe, even
319 	 * though elements might be moved about.
320 	 */
321 	list_add(&cursor, &gc_candidates);
322 	while (cursor.next != &gc_candidates) {
323 		u = list_entry(cursor.next, struct unix_sock, link);
324 
325 		/* Move cursor to after the current position. */
326 		list_move(&cursor, &u->link);
327 
328 		if (u->inflight) {
329 			list_move_tail(&u->link, &not_cycle_list);
330 			__clear_bit(UNIX_GC_MAYBE_CYCLE, &u->gc_flags);
331 			scan_children(&u->sk, inc_inflight_move_tail, NULL);
332 		}
333 	}
334 	list_del(&cursor);
335 
336 	/* Now gc_candidates contains only garbage.  Restore original
337 	 * inflight counters for these as well, and remove the skbuffs
338 	 * which are creating the cycle(s).
339 	 */
340 	skb_queue_head_init(&hitlist);
341 	list_for_each_entry(u, &gc_candidates, link) {
342 		scan_children(&u->sk, inc_inflight, &hitlist);
343 
344 #if IS_ENABLED(CONFIG_AF_UNIX_OOB)
345 		if (u->oob_skb) {
346 			kfree_skb(u->oob_skb);
347 			u->oob_skb = NULL;
348 		}
349 #endif
350 	}
351 
352 	/* not_cycle_list contains those sockets which do not make up a
353 	 * cycle.  Restore these to the inflight list.
354 	 */
355 	while (!list_empty(&not_cycle_list)) {
356 		u = list_entry(not_cycle_list.next, struct unix_sock, link);
357 		__clear_bit(UNIX_GC_CANDIDATE, &u->gc_flags);
358 		list_move_tail(&u->link, &gc_inflight_list);
359 	}
360 
361 	spin_unlock(&unix_gc_lock);
362 
363 	/* Here we are. Hitlist is filled. Die. */
364 	__skb_queue_purge(&hitlist);
365 
366 	spin_lock(&unix_gc_lock);
367 
368 	/* All candidates should have been detached by now. */
369 	WARN_ON_ONCE(!list_empty(&gc_candidates));
370 
371 	/* Paired with READ_ONCE() in wait_for_unix_gc(). */
372 	WRITE_ONCE(gc_in_progress, false);
373 
374 	spin_unlock(&unix_gc_lock);
375 }
376 
377 static DECLARE_WORK(unix_gc_work, __unix_gc);
378 
379 void unix_gc(void)
380 {
381 	WRITE_ONCE(gc_in_progress, true);
382 	queue_work(system_unbound_wq, &unix_gc_work);
383 }
384 
385 #define UNIX_INFLIGHT_TRIGGER_GC 16000
386 #define UNIX_INFLIGHT_SANE_USER (SCM_MAX_FD * 8)
387 
388 void wait_for_unix_gc(struct scm_fp_list *fpl)
389 {
390 	/* If number of inflight sockets is insane,
391 	 * force a garbage collect right now.
392 	 *
393 	 * Paired with the WRITE_ONCE() in unix_inflight(),
394 	 * unix_notinflight(), and __unix_gc().
395 	 */
396 	if (READ_ONCE(unix_tot_inflight) > UNIX_INFLIGHT_TRIGGER_GC &&
397 	    !READ_ONCE(gc_in_progress))
398 		unix_gc();
399 
400 	/* Penalise users who want to send AF_UNIX sockets
401 	 * but whose sockets have not been received yet.
402 	 */
403 	if (!fpl || !fpl->count_unix ||
404 	    READ_ONCE(fpl->user->unix_inflight) < UNIX_INFLIGHT_SANE_USER)
405 		return;
406 
407 	if (READ_ONCE(gc_in_progress))
408 		flush_work(&unix_gc_work);
409 }
410