1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * NET3: Garbage Collector For AF_UNIX sockets 4 * 5 * Garbage Collector: 6 * Copyright (C) Barak A. Pearlmutter. 7 * 8 * Chopped about by Alan Cox 22/3/96 to make it fit the AF_UNIX socket problem. 9 * If it doesn't work blame me, it worked when Barak sent it. 10 * 11 * Assumptions: 12 * 13 * - object w/ a bit 14 * - free list 15 * 16 * Current optimizations: 17 * 18 * - explicit stack instead of recursion 19 * - tail recurse on first born instead of immediate push/pop 20 * - we gather the stuff that should not be killed into tree 21 * and stack is just a path from root to the current pointer. 22 * 23 * Future optimizations: 24 * 25 * - don't just push entire root set; process in place 26 * 27 * Fixes: 28 * Alan Cox 07 Sept 1997 Vmalloc internal stack as needed. 29 * Cope with changing max_files. 30 * Al Viro 11 Oct 1998 31 * Graph may have cycles. That is, we can send the descriptor 32 * of foo to bar and vice versa. Current code chokes on that. 33 * Fix: move SCM_RIGHTS ones into the separate list and then 34 * skb_free() them all instead of doing explicit fput's. 35 * Another problem: since fput() may block somebody may 36 * create a new unix_socket when we are in the middle of sweep 37 * phase. Fix: revert the logic wrt MARKED. Mark everything 38 * upon the beginning and unmark non-junk ones. 39 * 40 * [12 Oct 1998] AAARGH! New code purges all SCM_RIGHTS 41 * sent to connect()'ed but still not accept()'ed sockets. 42 * Fixed. Old code had slightly different problem here: 43 * extra fput() in situation when we passed the descriptor via 44 * such socket and closed it (descriptor). That would happen on 45 * each unix_gc() until the accept(). Since the struct file in 46 * question would go to the free list and might be reused... 47 * That might be the reason of random oopses on filp_close() 48 * in unrelated processes. 49 * 50 * AV 28 Feb 1999 51 * Kill the explicit allocation of stack. Now we keep the tree 52 * with root in dummy + pointer (gc_current) to one of the nodes. 53 * Stack is represented as path from gc_current to dummy. Unmark 54 * now means "add to tree". Push == "make it a son of gc_current". 55 * Pop == "move gc_current to parent". We keep only pointers to 56 * parents (->gc_tree). 57 * AV 1 Mar 1999 58 * Damn. Added missing check for ->dead in listen queues scanning. 59 * 60 * Miklos Szeredi 25 Jun 2007 61 * Reimplement with a cycle collecting algorithm. This should 62 * solve several problems with the previous code, like being racy 63 * wrt receive and holding up unrelated socket operations. 64 */ 65 66 #include <linux/kernel.h> 67 #include <linux/string.h> 68 #include <linux/socket.h> 69 #include <linux/un.h> 70 #include <linux/net.h> 71 #include <linux/fs.h> 72 #include <linux/skbuff.h> 73 #include <linux/netdevice.h> 74 #include <linux/file.h> 75 #include <linux/proc_fs.h> 76 #include <linux/mutex.h> 77 #include <linux/wait.h> 78 79 #include <net/sock.h> 80 #include <net/af_unix.h> 81 #include <net/scm.h> 82 #include <net/tcp_states.h> 83 84 struct unix_sock *unix_get_socket(struct file *filp) 85 { 86 struct inode *inode = file_inode(filp); 87 88 /* Socket ? */ 89 if (S_ISSOCK(inode->i_mode) && !(filp->f_mode & FMODE_PATH)) { 90 struct socket *sock = SOCKET_I(inode); 91 const struct proto_ops *ops; 92 struct sock *sk = sock->sk; 93 94 ops = READ_ONCE(sock->ops); 95 96 /* PF_UNIX ? */ 97 if (sk && ops && ops->family == PF_UNIX) 98 return unix_sk(sk); 99 } 100 101 return NULL; 102 } 103 104 static struct unix_vertex *unix_edge_successor(struct unix_edge *edge) 105 { 106 /* If an embryo socket has a fd, 107 * the listener indirectly holds the fd's refcnt. 108 */ 109 if (edge->successor->listener) 110 return unix_sk(edge->successor->listener)->vertex; 111 112 return edge->successor->vertex; 113 } 114 115 static bool unix_graph_maybe_cyclic; 116 static bool unix_graph_grouped; 117 118 static void unix_update_graph(struct unix_vertex *vertex) 119 { 120 /* If the receiver socket is not inflight, no cyclic 121 * reference could be formed. 122 */ 123 if (!vertex) 124 return; 125 126 unix_graph_maybe_cyclic = true; 127 unix_graph_grouped = false; 128 } 129 130 static LIST_HEAD(unix_unvisited_vertices); 131 132 enum unix_vertex_index { 133 UNIX_VERTEX_INDEX_MARK1, 134 UNIX_VERTEX_INDEX_MARK2, 135 UNIX_VERTEX_INDEX_START, 136 }; 137 138 static unsigned long unix_vertex_unvisited_index = UNIX_VERTEX_INDEX_MARK1; 139 140 static void unix_add_edge(struct scm_fp_list *fpl, struct unix_edge *edge) 141 { 142 struct unix_vertex *vertex = edge->predecessor->vertex; 143 144 if (!vertex) { 145 vertex = list_first_entry(&fpl->vertices, typeof(*vertex), entry); 146 vertex->index = unix_vertex_unvisited_index; 147 vertex->out_degree = 0; 148 INIT_LIST_HEAD(&vertex->edges); 149 INIT_LIST_HEAD(&vertex->scc_entry); 150 151 list_move_tail(&vertex->entry, &unix_unvisited_vertices); 152 edge->predecessor->vertex = vertex; 153 } 154 155 vertex->out_degree++; 156 list_add_tail(&edge->vertex_entry, &vertex->edges); 157 158 unix_update_graph(unix_edge_successor(edge)); 159 } 160 161 static void unix_del_edge(struct scm_fp_list *fpl, struct unix_edge *edge) 162 { 163 struct unix_vertex *vertex = edge->predecessor->vertex; 164 165 unix_update_graph(unix_edge_successor(edge)); 166 167 list_del(&edge->vertex_entry); 168 vertex->out_degree--; 169 170 if (!vertex->out_degree) { 171 edge->predecessor->vertex = NULL; 172 list_move_tail(&vertex->entry, &fpl->vertices); 173 } 174 } 175 176 static void unix_free_vertices(struct scm_fp_list *fpl) 177 { 178 struct unix_vertex *vertex, *next_vertex; 179 180 list_for_each_entry_safe(vertex, next_vertex, &fpl->vertices, entry) { 181 list_del(&vertex->entry); 182 kfree(vertex); 183 } 184 } 185 186 static DEFINE_SPINLOCK(unix_gc_lock); 187 unsigned int unix_tot_inflight; 188 189 void unix_add_edges(struct scm_fp_list *fpl, struct unix_sock *receiver) 190 { 191 int i = 0, j = 0; 192 193 spin_lock(&unix_gc_lock); 194 195 if (!fpl->count_unix) 196 goto out; 197 198 do { 199 struct unix_sock *inflight = unix_get_socket(fpl->fp[j++]); 200 struct unix_edge *edge; 201 202 if (!inflight) 203 continue; 204 205 edge = fpl->edges + i++; 206 edge->predecessor = inflight; 207 edge->successor = receiver; 208 209 unix_add_edge(fpl, edge); 210 } while (i < fpl->count_unix); 211 212 WRITE_ONCE(unix_tot_inflight, unix_tot_inflight + fpl->count_unix); 213 out: 214 WRITE_ONCE(fpl->user->unix_inflight, fpl->user->unix_inflight + fpl->count); 215 216 spin_unlock(&unix_gc_lock); 217 218 fpl->inflight = true; 219 220 unix_free_vertices(fpl); 221 } 222 223 void unix_del_edges(struct scm_fp_list *fpl) 224 { 225 int i = 0; 226 227 spin_lock(&unix_gc_lock); 228 229 if (!fpl->count_unix) 230 goto out; 231 232 do { 233 struct unix_edge *edge = fpl->edges + i++; 234 235 unix_del_edge(fpl, edge); 236 } while (i < fpl->count_unix); 237 238 WRITE_ONCE(unix_tot_inflight, unix_tot_inflight - fpl->count_unix); 239 out: 240 WRITE_ONCE(fpl->user->unix_inflight, fpl->user->unix_inflight - fpl->count); 241 242 spin_unlock(&unix_gc_lock); 243 244 fpl->inflight = false; 245 } 246 247 void unix_update_edges(struct unix_sock *receiver) 248 { 249 spin_lock(&unix_gc_lock); 250 unix_update_graph(unix_sk(receiver->listener)->vertex); 251 receiver->listener = NULL; 252 spin_unlock(&unix_gc_lock); 253 } 254 255 int unix_prepare_fpl(struct scm_fp_list *fpl) 256 { 257 struct unix_vertex *vertex; 258 int i; 259 260 if (!fpl->count_unix) 261 return 0; 262 263 for (i = 0; i < fpl->count_unix; i++) { 264 vertex = kmalloc(sizeof(*vertex), GFP_KERNEL); 265 if (!vertex) 266 goto err; 267 268 list_add(&vertex->entry, &fpl->vertices); 269 } 270 271 fpl->edges = kvmalloc_array(fpl->count_unix, sizeof(*fpl->edges), 272 GFP_KERNEL_ACCOUNT); 273 if (!fpl->edges) 274 goto err; 275 276 return 0; 277 278 err: 279 unix_free_vertices(fpl); 280 return -ENOMEM; 281 } 282 283 void unix_destroy_fpl(struct scm_fp_list *fpl) 284 { 285 if (fpl->inflight) 286 unix_del_edges(fpl); 287 288 kvfree(fpl->edges); 289 unix_free_vertices(fpl); 290 } 291 292 static bool unix_vertex_dead(struct unix_vertex *vertex) 293 { 294 struct unix_edge *edge; 295 struct unix_sock *u; 296 long total_ref; 297 298 list_for_each_entry(edge, &vertex->edges, vertex_entry) { 299 struct unix_vertex *next_vertex = unix_edge_successor(edge); 300 301 /* The vertex's fd can be received by a non-inflight socket. */ 302 if (!next_vertex) 303 return false; 304 305 /* The vertex's fd can be received by an inflight socket in 306 * another SCC. 307 */ 308 if (next_vertex->scc_index != vertex->scc_index) 309 return false; 310 } 311 312 /* No receiver exists out of the same SCC. */ 313 314 edge = list_first_entry(&vertex->edges, typeof(*edge), vertex_entry); 315 u = edge->predecessor; 316 total_ref = file_count(u->sk.sk_socket->file); 317 318 /* If not close()d, total_ref > out_degree. */ 319 if (total_ref != vertex->out_degree) 320 return false; 321 322 return true; 323 } 324 325 enum unix_recv_queue_lock_class { 326 U_RECVQ_LOCK_NORMAL, 327 U_RECVQ_LOCK_EMBRYO, 328 }; 329 330 static void unix_collect_skb(struct list_head *scc, struct sk_buff_head *hitlist) 331 { 332 struct unix_vertex *vertex; 333 334 list_for_each_entry_reverse(vertex, scc, scc_entry) { 335 struct sk_buff_head *queue; 336 struct unix_edge *edge; 337 struct unix_sock *u; 338 339 edge = list_first_entry(&vertex->edges, typeof(*edge), vertex_entry); 340 u = edge->predecessor; 341 queue = &u->sk.sk_receive_queue; 342 343 spin_lock(&queue->lock); 344 345 if (u->sk.sk_state == TCP_LISTEN) { 346 struct sk_buff *skb; 347 348 skb_queue_walk(queue, skb) { 349 struct sk_buff_head *embryo_queue = &skb->sk->sk_receive_queue; 350 351 /* listener -> embryo order, the inversion never happens. */ 352 spin_lock_nested(&embryo_queue->lock, U_RECVQ_LOCK_EMBRYO); 353 skb_queue_splice_init(embryo_queue, hitlist); 354 spin_unlock(&embryo_queue->lock); 355 } 356 } else { 357 skb_queue_splice_init(queue, hitlist); 358 359 #if IS_ENABLED(CONFIG_AF_UNIX_OOB) 360 if (u->oob_skb) { 361 kfree_skb(u->oob_skb); 362 u->oob_skb = NULL; 363 } 364 #endif 365 } 366 367 spin_unlock(&queue->lock); 368 } 369 } 370 371 static bool unix_scc_cyclic(struct list_head *scc) 372 { 373 struct unix_vertex *vertex; 374 struct unix_edge *edge; 375 376 /* SCC containing multiple vertices ? */ 377 if (!list_is_singular(scc)) 378 return true; 379 380 vertex = list_first_entry(scc, typeof(*vertex), scc_entry); 381 382 /* Self-reference or a embryo-listener circle ? */ 383 list_for_each_entry(edge, &vertex->edges, vertex_entry) { 384 if (unix_edge_successor(edge) == vertex) 385 return true; 386 } 387 388 return false; 389 } 390 391 static LIST_HEAD(unix_visited_vertices); 392 static unsigned long unix_vertex_grouped_index = UNIX_VERTEX_INDEX_MARK2; 393 394 static void __unix_walk_scc(struct unix_vertex *vertex, unsigned long *last_index, 395 struct sk_buff_head *hitlist) 396 { 397 LIST_HEAD(vertex_stack); 398 struct unix_edge *edge; 399 LIST_HEAD(edge_stack); 400 401 next_vertex: 402 /* Push vertex to vertex_stack and mark it as on-stack 403 * (index >= UNIX_VERTEX_INDEX_START). 404 * The vertex will be popped when finalising SCC later. 405 */ 406 list_add(&vertex->scc_entry, &vertex_stack); 407 408 vertex->index = *last_index; 409 vertex->scc_index = *last_index; 410 (*last_index)++; 411 412 /* Explore neighbour vertices (receivers of the current vertex's fd). */ 413 list_for_each_entry(edge, &vertex->edges, vertex_entry) { 414 struct unix_vertex *next_vertex = unix_edge_successor(edge); 415 416 if (!next_vertex) 417 continue; 418 419 if (next_vertex->index == unix_vertex_unvisited_index) { 420 /* Iterative deepening depth first search 421 * 422 * 1. Push a forward edge to edge_stack and set 423 * the successor to vertex for the next iteration. 424 */ 425 list_add(&edge->stack_entry, &edge_stack); 426 427 vertex = next_vertex; 428 goto next_vertex; 429 430 /* 2. Pop the edge directed to the current vertex 431 * and restore the ancestor for backtracking. 432 */ 433 prev_vertex: 434 edge = list_first_entry(&edge_stack, typeof(*edge), stack_entry); 435 list_del_init(&edge->stack_entry); 436 437 next_vertex = vertex; 438 vertex = edge->predecessor->vertex; 439 440 /* If the successor has a smaller scc_index, two vertices 441 * are in the same SCC, so propagate the smaller scc_index 442 * to skip SCC finalisation. 443 */ 444 vertex->scc_index = min(vertex->scc_index, next_vertex->scc_index); 445 } else if (next_vertex->index != unix_vertex_grouped_index) { 446 /* Loop detected by a back/cross edge. 447 * 448 * The successor is on vertex_stack, so two vertices are in 449 * the same SCC. If the successor has a smaller *scc_index*, 450 * propagate it to skip SCC finalisation. 451 */ 452 vertex->scc_index = min(vertex->scc_index, next_vertex->scc_index); 453 } else { 454 /* The successor was already grouped as another SCC */ 455 } 456 } 457 458 if (vertex->index == vertex->scc_index) { 459 struct list_head scc; 460 bool scc_dead = true; 461 462 /* SCC finalised. 463 * 464 * If the scc_index was not updated, all the vertices above on 465 * vertex_stack are in the same SCC. Group them using scc_entry. 466 */ 467 __list_cut_position(&scc, &vertex_stack, &vertex->scc_entry); 468 469 list_for_each_entry_reverse(vertex, &scc, scc_entry) { 470 /* Don't restart DFS from this vertex in unix_walk_scc(). */ 471 list_move_tail(&vertex->entry, &unix_visited_vertices); 472 473 /* Mark vertex as off-stack. */ 474 vertex->index = unix_vertex_grouped_index; 475 476 if (scc_dead) 477 scc_dead = unix_vertex_dead(vertex); 478 } 479 480 if (scc_dead) 481 unix_collect_skb(&scc, hitlist); 482 else if (!unix_graph_maybe_cyclic) 483 unix_graph_maybe_cyclic = unix_scc_cyclic(&scc); 484 485 list_del(&scc); 486 } 487 488 /* Need backtracking ? */ 489 if (!list_empty(&edge_stack)) 490 goto prev_vertex; 491 } 492 493 static void unix_walk_scc(struct sk_buff_head *hitlist) 494 { 495 unsigned long last_index = UNIX_VERTEX_INDEX_START; 496 497 unix_graph_maybe_cyclic = false; 498 499 /* Visit every vertex exactly once. 500 * __unix_walk_scc() moves visited vertices to unix_visited_vertices. 501 */ 502 while (!list_empty(&unix_unvisited_vertices)) { 503 struct unix_vertex *vertex; 504 505 vertex = list_first_entry(&unix_unvisited_vertices, typeof(*vertex), entry); 506 __unix_walk_scc(vertex, &last_index, hitlist); 507 } 508 509 list_replace_init(&unix_visited_vertices, &unix_unvisited_vertices); 510 swap(unix_vertex_unvisited_index, unix_vertex_grouped_index); 511 512 unix_graph_grouped = true; 513 } 514 515 static void unix_walk_scc_fast(struct sk_buff_head *hitlist) 516 { 517 while (!list_empty(&unix_unvisited_vertices)) { 518 struct unix_vertex *vertex; 519 struct list_head scc; 520 bool scc_dead = true; 521 522 vertex = list_first_entry(&unix_unvisited_vertices, typeof(*vertex), entry); 523 list_add(&scc, &vertex->scc_entry); 524 525 list_for_each_entry_reverse(vertex, &scc, scc_entry) { 526 list_move_tail(&vertex->entry, &unix_visited_vertices); 527 528 if (scc_dead) 529 scc_dead = unix_vertex_dead(vertex); 530 } 531 532 if (scc_dead) 533 unix_collect_skb(&scc, hitlist); 534 535 list_del(&scc); 536 } 537 538 list_replace_init(&unix_visited_vertices, &unix_unvisited_vertices); 539 } 540 541 static bool gc_in_progress; 542 543 static void __unix_gc(struct work_struct *work) 544 { 545 struct sk_buff_head hitlist; 546 547 spin_lock(&unix_gc_lock); 548 549 if (!unix_graph_maybe_cyclic) { 550 spin_unlock(&unix_gc_lock); 551 goto skip_gc; 552 } 553 554 __skb_queue_head_init(&hitlist); 555 556 if (unix_graph_grouped) 557 unix_walk_scc_fast(&hitlist); 558 else 559 unix_walk_scc(&hitlist); 560 561 spin_unlock(&unix_gc_lock); 562 563 __skb_queue_purge(&hitlist); 564 skip_gc: 565 WRITE_ONCE(gc_in_progress, false); 566 } 567 568 static DECLARE_WORK(unix_gc_work, __unix_gc); 569 570 void unix_gc(void) 571 { 572 WRITE_ONCE(gc_in_progress, true); 573 queue_work(system_unbound_wq, &unix_gc_work); 574 } 575 576 #define UNIX_INFLIGHT_TRIGGER_GC 16000 577 #define UNIX_INFLIGHT_SANE_USER (SCM_MAX_FD * 8) 578 579 void wait_for_unix_gc(struct scm_fp_list *fpl) 580 { 581 /* If number of inflight sockets is insane, 582 * force a garbage collect right now. 583 * 584 * Paired with the WRITE_ONCE() in unix_inflight(), 585 * unix_notinflight(), and __unix_gc(). 586 */ 587 if (READ_ONCE(unix_tot_inflight) > UNIX_INFLIGHT_TRIGGER_GC && 588 !READ_ONCE(gc_in_progress)) 589 unix_gc(); 590 591 /* Penalise users who want to send AF_UNIX sockets 592 * but whose sockets have not been received yet. 593 */ 594 if (!fpl || !fpl->count_unix || 595 READ_ONCE(fpl->user->unix_inflight) < UNIX_INFLIGHT_SANE_USER) 596 return; 597 598 if (READ_ONCE(gc_in_progress)) 599 flush_work(&unix_gc_work); 600 } 601