xref: /linux/net/unix/garbage.c (revision 860a9bed265146b10311bcadbbcef59c3af4454d)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * NET3:	Garbage Collector For AF_UNIX sockets
4  *
5  * Garbage Collector:
6  *	Copyright (C) Barak A. Pearlmutter.
7  *
8  * Chopped about by Alan Cox 22/3/96 to make it fit the AF_UNIX socket problem.
9  * If it doesn't work blame me, it worked when Barak sent it.
10  *
11  * Assumptions:
12  *
13  *  - object w/ a bit
14  *  - free list
15  *
16  * Current optimizations:
17  *
18  *  - explicit stack instead of recursion
19  *  - tail recurse on first born instead of immediate push/pop
20  *  - we gather the stuff that should not be killed into tree
21  *    and stack is just a path from root to the current pointer.
22  *
23  *  Future optimizations:
24  *
25  *  - don't just push entire root set; process in place
26  *
27  *  Fixes:
28  *	Alan Cox	07 Sept	1997	Vmalloc internal stack as needed.
29  *					Cope with changing max_files.
30  *	Al Viro		11 Oct 1998
31  *		Graph may have cycles. That is, we can send the descriptor
32  *		of foo to bar and vice versa. Current code chokes on that.
33  *		Fix: move SCM_RIGHTS ones into the separate list and then
34  *		skb_free() them all instead of doing explicit fput's.
35  *		Another problem: since fput() may block somebody may
36  *		create a new unix_socket when we are in the middle of sweep
37  *		phase. Fix: revert the logic wrt MARKED. Mark everything
38  *		upon the beginning and unmark non-junk ones.
39  *
40  *		[12 Oct 1998] AAARGH! New code purges all SCM_RIGHTS
41  *		sent to connect()'ed but still not accept()'ed sockets.
42  *		Fixed. Old code had slightly different problem here:
43  *		extra fput() in situation when we passed the descriptor via
44  *		such socket and closed it (descriptor). That would happen on
45  *		each unix_gc() until the accept(). Since the struct file in
46  *		question would go to the free list and might be reused...
47  *		That might be the reason of random oopses on filp_close()
48  *		in unrelated processes.
49  *
50  *	AV		28 Feb 1999
51  *		Kill the explicit allocation of stack. Now we keep the tree
52  *		with root in dummy + pointer (gc_current) to one of the nodes.
53  *		Stack is represented as path from gc_current to dummy. Unmark
54  *		now means "add to tree". Push == "make it a son of gc_current".
55  *		Pop == "move gc_current to parent". We keep only pointers to
56  *		parents (->gc_tree).
57  *	AV		1 Mar 1999
58  *		Damn. Added missing check for ->dead in listen queues scanning.
59  *
60  *	Miklos Szeredi 25 Jun 2007
61  *		Reimplement with a cycle collecting algorithm. This should
62  *		solve several problems with the previous code, like being racy
63  *		wrt receive and holding up unrelated socket operations.
64  */
65 
66 #include <linux/kernel.h>
67 #include <linux/string.h>
68 #include <linux/socket.h>
69 #include <linux/un.h>
70 #include <linux/net.h>
71 #include <linux/fs.h>
72 #include <linux/skbuff.h>
73 #include <linux/netdevice.h>
74 #include <linux/file.h>
75 #include <linux/proc_fs.h>
76 #include <linux/mutex.h>
77 #include <linux/wait.h>
78 
79 #include <net/sock.h>
80 #include <net/af_unix.h>
81 #include <net/scm.h>
82 #include <net/tcp_states.h>
83 
84 struct unix_sock *unix_get_socket(struct file *filp)
85 {
86 	struct inode *inode = file_inode(filp);
87 
88 	/* Socket ? */
89 	if (S_ISSOCK(inode->i_mode) && !(filp->f_mode & FMODE_PATH)) {
90 		struct socket *sock = SOCKET_I(inode);
91 		const struct proto_ops *ops;
92 		struct sock *sk = sock->sk;
93 
94 		ops = READ_ONCE(sock->ops);
95 
96 		/* PF_UNIX ? */
97 		if (sk && ops && ops->family == PF_UNIX)
98 			return unix_sk(sk);
99 	}
100 
101 	return NULL;
102 }
103 
104 static struct unix_vertex *unix_edge_successor(struct unix_edge *edge)
105 {
106 	/* If an embryo socket has a fd,
107 	 * the listener indirectly holds the fd's refcnt.
108 	 */
109 	if (edge->successor->listener)
110 		return unix_sk(edge->successor->listener)->vertex;
111 
112 	return edge->successor->vertex;
113 }
114 
115 static bool unix_graph_maybe_cyclic;
116 static bool unix_graph_grouped;
117 
118 static void unix_update_graph(struct unix_vertex *vertex)
119 {
120 	/* If the receiver socket is not inflight, no cyclic
121 	 * reference could be formed.
122 	 */
123 	if (!vertex)
124 		return;
125 
126 	unix_graph_maybe_cyclic = true;
127 	unix_graph_grouped = false;
128 }
129 
130 static LIST_HEAD(unix_unvisited_vertices);
131 
132 enum unix_vertex_index {
133 	UNIX_VERTEX_INDEX_MARK1,
134 	UNIX_VERTEX_INDEX_MARK2,
135 	UNIX_VERTEX_INDEX_START,
136 };
137 
138 static unsigned long unix_vertex_unvisited_index = UNIX_VERTEX_INDEX_MARK1;
139 
140 static void unix_add_edge(struct scm_fp_list *fpl, struct unix_edge *edge)
141 {
142 	struct unix_vertex *vertex = edge->predecessor->vertex;
143 
144 	if (!vertex) {
145 		vertex = list_first_entry(&fpl->vertices, typeof(*vertex), entry);
146 		vertex->index = unix_vertex_unvisited_index;
147 		vertex->out_degree = 0;
148 		INIT_LIST_HEAD(&vertex->edges);
149 		INIT_LIST_HEAD(&vertex->scc_entry);
150 
151 		list_move_tail(&vertex->entry, &unix_unvisited_vertices);
152 		edge->predecessor->vertex = vertex;
153 	}
154 
155 	vertex->out_degree++;
156 	list_add_tail(&edge->vertex_entry, &vertex->edges);
157 
158 	unix_update_graph(unix_edge_successor(edge));
159 }
160 
161 static void unix_del_edge(struct scm_fp_list *fpl, struct unix_edge *edge)
162 {
163 	struct unix_vertex *vertex = edge->predecessor->vertex;
164 
165 	unix_update_graph(unix_edge_successor(edge));
166 
167 	list_del(&edge->vertex_entry);
168 	vertex->out_degree--;
169 
170 	if (!vertex->out_degree) {
171 		edge->predecessor->vertex = NULL;
172 		list_move_tail(&vertex->entry, &fpl->vertices);
173 	}
174 }
175 
176 static void unix_free_vertices(struct scm_fp_list *fpl)
177 {
178 	struct unix_vertex *vertex, *next_vertex;
179 
180 	list_for_each_entry_safe(vertex, next_vertex, &fpl->vertices, entry) {
181 		list_del(&vertex->entry);
182 		kfree(vertex);
183 	}
184 }
185 
186 static DEFINE_SPINLOCK(unix_gc_lock);
187 unsigned int unix_tot_inflight;
188 
189 void unix_add_edges(struct scm_fp_list *fpl, struct unix_sock *receiver)
190 {
191 	int i = 0, j = 0;
192 
193 	spin_lock(&unix_gc_lock);
194 
195 	if (!fpl->count_unix)
196 		goto out;
197 
198 	do {
199 		struct unix_sock *inflight = unix_get_socket(fpl->fp[j++]);
200 		struct unix_edge *edge;
201 
202 		if (!inflight)
203 			continue;
204 
205 		edge = fpl->edges + i++;
206 		edge->predecessor = inflight;
207 		edge->successor = receiver;
208 
209 		unix_add_edge(fpl, edge);
210 	} while (i < fpl->count_unix);
211 
212 	receiver->scm_stat.nr_unix_fds += fpl->count_unix;
213 	WRITE_ONCE(unix_tot_inflight, unix_tot_inflight + fpl->count_unix);
214 out:
215 	WRITE_ONCE(fpl->user->unix_inflight, fpl->user->unix_inflight + fpl->count);
216 
217 	spin_unlock(&unix_gc_lock);
218 
219 	fpl->inflight = true;
220 
221 	unix_free_vertices(fpl);
222 }
223 
224 void unix_del_edges(struct scm_fp_list *fpl)
225 {
226 	struct unix_sock *receiver;
227 	int i = 0;
228 
229 	spin_lock(&unix_gc_lock);
230 
231 	if (!fpl->count_unix)
232 		goto out;
233 
234 	do {
235 		struct unix_edge *edge = fpl->edges + i++;
236 
237 		unix_del_edge(fpl, edge);
238 	} while (i < fpl->count_unix);
239 
240 	receiver = fpl->edges[0].successor;
241 	receiver->scm_stat.nr_unix_fds -= fpl->count_unix;
242 	WRITE_ONCE(unix_tot_inflight, unix_tot_inflight - fpl->count_unix);
243 out:
244 	WRITE_ONCE(fpl->user->unix_inflight, fpl->user->unix_inflight - fpl->count);
245 
246 	spin_unlock(&unix_gc_lock);
247 
248 	fpl->inflight = false;
249 }
250 
251 void unix_update_edges(struct unix_sock *receiver)
252 {
253 	/* nr_unix_fds is only updated under unix_state_lock().
254 	 * If it's 0 here, the embryo socket is not part of the
255 	 * inflight graph, and GC will not see it, so no lock needed.
256 	 */
257 	if (!receiver->scm_stat.nr_unix_fds) {
258 		receiver->listener = NULL;
259 	} else {
260 		spin_lock(&unix_gc_lock);
261 		unix_update_graph(unix_sk(receiver->listener)->vertex);
262 		receiver->listener = NULL;
263 		spin_unlock(&unix_gc_lock);
264 	}
265 }
266 
267 int unix_prepare_fpl(struct scm_fp_list *fpl)
268 {
269 	struct unix_vertex *vertex;
270 	int i;
271 
272 	if (!fpl->count_unix)
273 		return 0;
274 
275 	for (i = 0; i < fpl->count_unix; i++) {
276 		vertex = kmalloc(sizeof(*vertex), GFP_KERNEL);
277 		if (!vertex)
278 			goto err;
279 
280 		list_add(&vertex->entry, &fpl->vertices);
281 	}
282 
283 	fpl->edges = kvmalloc_array(fpl->count_unix, sizeof(*fpl->edges),
284 				    GFP_KERNEL_ACCOUNT);
285 	if (!fpl->edges)
286 		goto err;
287 
288 	return 0;
289 
290 err:
291 	unix_free_vertices(fpl);
292 	return -ENOMEM;
293 }
294 
295 void unix_destroy_fpl(struct scm_fp_list *fpl)
296 {
297 	if (fpl->inflight)
298 		unix_del_edges(fpl);
299 
300 	kvfree(fpl->edges);
301 	unix_free_vertices(fpl);
302 }
303 
304 static bool unix_vertex_dead(struct unix_vertex *vertex)
305 {
306 	struct unix_edge *edge;
307 	struct unix_sock *u;
308 	long total_ref;
309 
310 	list_for_each_entry(edge, &vertex->edges, vertex_entry) {
311 		struct unix_vertex *next_vertex = unix_edge_successor(edge);
312 
313 		/* The vertex's fd can be received by a non-inflight socket. */
314 		if (!next_vertex)
315 			return false;
316 
317 		/* The vertex's fd can be received by an inflight socket in
318 		 * another SCC.
319 		 */
320 		if (next_vertex->scc_index != vertex->scc_index)
321 			return false;
322 	}
323 
324 	/* No receiver exists out of the same SCC. */
325 
326 	edge = list_first_entry(&vertex->edges, typeof(*edge), vertex_entry);
327 	u = edge->predecessor;
328 	total_ref = file_count(u->sk.sk_socket->file);
329 
330 	/* If not close()d, total_ref > out_degree. */
331 	if (total_ref != vertex->out_degree)
332 		return false;
333 
334 	return true;
335 }
336 
337 enum unix_recv_queue_lock_class {
338 	U_RECVQ_LOCK_NORMAL,
339 	U_RECVQ_LOCK_EMBRYO,
340 };
341 
342 static void unix_collect_skb(struct list_head *scc, struct sk_buff_head *hitlist)
343 {
344 	struct unix_vertex *vertex;
345 
346 	list_for_each_entry_reverse(vertex, scc, scc_entry) {
347 		struct sk_buff_head *queue;
348 		struct unix_edge *edge;
349 		struct unix_sock *u;
350 
351 		edge = list_first_entry(&vertex->edges, typeof(*edge), vertex_entry);
352 		u = edge->predecessor;
353 		queue = &u->sk.sk_receive_queue;
354 
355 		spin_lock(&queue->lock);
356 
357 		if (u->sk.sk_state == TCP_LISTEN) {
358 			struct sk_buff *skb;
359 
360 			skb_queue_walk(queue, skb) {
361 				struct sk_buff_head *embryo_queue = &skb->sk->sk_receive_queue;
362 
363 				/* listener -> embryo order, the inversion never happens. */
364 				spin_lock_nested(&embryo_queue->lock, U_RECVQ_LOCK_EMBRYO);
365 				skb_queue_splice_init(embryo_queue, hitlist);
366 				spin_unlock(&embryo_queue->lock);
367 			}
368 		} else {
369 			skb_queue_splice_init(queue, hitlist);
370 
371 #if IS_ENABLED(CONFIG_AF_UNIX_OOB)
372 			if (u->oob_skb) {
373 				kfree_skb(u->oob_skb);
374 				u->oob_skb = NULL;
375 			}
376 #endif
377 		}
378 
379 		spin_unlock(&queue->lock);
380 	}
381 }
382 
383 static bool unix_scc_cyclic(struct list_head *scc)
384 {
385 	struct unix_vertex *vertex;
386 	struct unix_edge *edge;
387 
388 	/* SCC containing multiple vertices ? */
389 	if (!list_is_singular(scc))
390 		return true;
391 
392 	vertex = list_first_entry(scc, typeof(*vertex), scc_entry);
393 
394 	/* Self-reference or a embryo-listener circle ? */
395 	list_for_each_entry(edge, &vertex->edges, vertex_entry) {
396 		if (unix_edge_successor(edge) == vertex)
397 			return true;
398 	}
399 
400 	return false;
401 }
402 
403 static LIST_HEAD(unix_visited_vertices);
404 static unsigned long unix_vertex_grouped_index = UNIX_VERTEX_INDEX_MARK2;
405 
406 static void __unix_walk_scc(struct unix_vertex *vertex, unsigned long *last_index,
407 			    struct sk_buff_head *hitlist)
408 {
409 	LIST_HEAD(vertex_stack);
410 	struct unix_edge *edge;
411 	LIST_HEAD(edge_stack);
412 
413 next_vertex:
414 	/* Push vertex to vertex_stack and mark it as on-stack
415 	 * (index >= UNIX_VERTEX_INDEX_START).
416 	 * The vertex will be popped when finalising SCC later.
417 	 */
418 	list_add(&vertex->scc_entry, &vertex_stack);
419 
420 	vertex->index = *last_index;
421 	vertex->scc_index = *last_index;
422 	(*last_index)++;
423 
424 	/* Explore neighbour vertices (receivers of the current vertex's fd). */
425 	list_for_each_entry(edge, &vertex->edges, vertex_entry) {
426 		struct unix_vertex *next_vertex = unix_edge_successor(edge);
427 
428 		if (!next_vertex)
429 			continue;
430 
431 		if (next_vertex->index == unix_vertex_unvisited_index) {
432 			/* Iterative deepening depth first search
433 			 *
434 			 *   1. Push a forward edge to edge_stack and set
435 			 *      the successor to vertex for the next iteration.
436 			 */
437 			list_add(&edge->stack_entry, &edge_stack);
438 
439 			vertex = next_vertex;
440 			goto next_vertex;
441 
442 			/*   2. Pop the edge directed to the current vertex
443 			 *      and restore the ancestor for backtracking.
444 			 */
445 prev_vertex:
446 			edge = list_first_entry(&edge_stack, typeof(*edge), stack_entry);
447 			list_del_init(&edge->stack_entry);
448 
449 			next_vertex = vertex;
450 			vertex = edge->predecessor->vertex;
451 
452 			/* If the successor has a smaller scc_index, two vertices
453 			 * are in the same SCC, so propagate the smaller scc_index
454 			 * to skip SCC finalisation.
455 			 */
456 			vertex->scc_index = min(vertex->scc_index, next_vertex->scc_index);
457 		} else if (next_vertex->index != unix_vertex_grouped_index) {
458 			/* Loop detected by a back/cross edge.
459 			 *
460 			 * The successor is on vertex_stack, so two vertices are in
461 			 * the same SCC.  If the successor has a smaller *scc_index*,
462 			 * propagate it to skip SCC finalisation.
463 			 */
464 			vertex->scc_index = min(vertex->scc_index, next_vertex->scc_index);
465 		} else {
466 			/* The successor was already grouped as another SCC */
467 		}
468 	}
469 
470 	if (vertex->index == vertex->scc_index) {
471 		struct list_head scc;
472 		bool scc_dead = true;
473 
474 		/* SCC finalised.
475 		 *
476 		 * If the scc_index was not updated, all the vertices above on
477 		 * vertex_stack are in the same SCC.  Group them using scc_entry.
478 		 */
479 		__list_cut_position(&scc, &vertex_stack, &vertex->scc_entry);
480 
481 		list_for_each_entry_reverse(vertex, &scc, scc_entry) {
482 			/* Don't restart DFS from this vertex in unix_walk_scc(). */
483 			list_move_tail(&vertex->entry, &unix_visited_vertices);
484 
485 			/* Mark vertex as off-stack. */
486 			vertex->index = unix_vertex_grouped_index;
487 
488 			if (scc_dead)
489 				scc_dead = unix_vertex_dead(vertex);
490 		}
491 
492 		if (scc_dead)
493 			unix_collect_skb(&scc, hitlist);
494 		else if (!unix_graph_maybe_cyclic)
495 			unix_graph_maybe_cyclic = unix_scc_cyclic(&scc);
496 
497 		list_del(&scc);
498 	}
499 
500 	/* Need backtracking ? */
501 	if (!list_empty(&edge_stack))
502 		goto prev_vertex;
503 }
504 
505 static void unix_walk_scc(struct sk_buff_head *hitlist)
506 {
507 	unsigned long last_index = UNIX_VERTEX_INDEX_START;
508 
509 	unix_graph_maybe_cyclic = false;
510 
511 	/* Visit every vertex exactly once.
512 	 * __unix_walk_scc() moves visited vertices to unix_visited_vertices.
513 	 */
514 	while (!list_empty(&unix_unvisited_vertices)) {
515 		struct unix_vertex *vertex;
516 
517 		vertex = list_first_entry(&unix_unvisited_vertices, typeof(*vertex), entry);
518 		__unix_walk_scc(vertex, &last_index, hitlist);
519 	}
520 
521 	list_replace_init(&unix_visited_vertices, &unix_unvisited_vertices);
522 	swap(unix_vertex_unvisited_index, unix_vertex_grouped_index);
523 
524 	unix_graph_grouped = true;
525 }
526 
527 static void unix_walk_scc_fast(struct sk_buff_head *hitlist)
528 {
529 	while (!list_empty(&unix_unvisited_vertices)) {
530 		struct unix_vertex *vertex;
531 		struct list_head scc;
532 		bool scc_dead = true;
533 
534 		vertex = list_first_entry(&unix_unvisited_vertices, typeof(*vertex), entry);
535 		list_add(&scc, &vertex->scc_entry);
536 
537 		list_for_each_entry_reverse(vertex, &scc, scc_entry) {
538 			list_move_tail(&vertex->entry, &unix_visited_vertices);
539 
540 			if (scc_dead)
541 				scc_dead = unix_vertex_dead(vertex);
542 		}
543 
544 		if (scc_dead)
545 			unix_collect_skb(&scc, hitlist);
546 
547 		list_del(&scc);
548 	}
549 
550 	list_replace_init(&unix_visited_vertices, &unix_unvisited_vertices);
551 }
552 
553 static bool gc_in_progress;
554 
555 static void __unix_gc(struct work_struct *work)
556 {
557 	struct sk_buff_head hitlist;
558 
559 	spin_lock(&unix_gc_lock);
560 
561 	if (!unix_graph_maybe_cyclic) {
562 		spin_unlock(&unix_gc_lock);
563 		goto skip_gc;
564 	}
565 
566 	__skb_queue_head_init(&hitlist);
567 
568 	if (unix_graph_grouped)
569 		unix_walk_scc_fast(&hitlist);
570 	else
571 		unix_walk_scc(&hitlist);
572 
573 	spin_unlock(&unix_gc_lock);
574 
575 	__skb_queue_purge(&hitlist);
576 skip_gc:
577 	WRITE_ONCE(gc_in_progress, false);
578 }
579 
580 static DECLARE_WORK(unix_gc_work, __unix_gc);
581 
582 void unix_gc(void)
583 {
584 	WRITE_ONCE(gc_in_progress, true);
585 	queue_work(system_unbound_wq, &unix_gc_work);
586 }
587 
588 #define UNIX_INFLIGHT_TRIGGER_GC 16000
589 #define UNIX_INFLIGHT_SANE_USER (SCM_MAX_FD * 8)
590 
591 void wait_for_unix_gc(struct scm_fp_list *fpl)
592 {
593 	/* If number of inflight sockets is insane,
594 	 * force a garbage collect right now.
595 	 *
596 	 * Paired with the WRITE_ONCE() in unix_inflight(),
597 	 * unix_notinflight(), and __unix_gc().
598 	 */
599 	if (READ_ONCE(unix_tot_inflight) > UNIX_INFLIGHT_TRIGGER_GC &&
600 	    !READ_ONCE(gc_in_progress))
601 		unix_gc();
602 
603 	/* Penalise users who want to send AF_UNIX sockets
604 	 * but whose sockets have not been received yet.
605 	 */
606 	if (!fpl || !fpl->count_unix ||
607 	    READ_ONCE(fpl->user->unix_inflight) < UNIX_INFLIGHT_SANE_USER)
608 		return;
609 
610 	if (READ_ONCE(gc_in_progress))
611 		flush_work(&unix_gc_work);
612 }
613