1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * NET3: Garbage Collector For AF_UNIX sockets 4 * 5 * Garbage Collector: 6 * Copyright (C) Barak A. Pearlmutter. 7 * 8 * Chopped about by Alan Cox 22/3/96 to make it fit the AF_UNIX socket problem. 9 * If it doesn't work blame me, it worked when Barak sent it. 10 * 11 * Assumptions: 12 * 13 * - object w/ a bit 14 * - free list 15 * 16 * Current optimizations: 17 * 18 * - explicit stack instead of recursion 19 * - tail recurse on first born instead of immediate push/pop 20 * - we gather the stuff that should not be killed into tree 21 * and stack is just a path from root to the current pointer. 22 * 23 * Future optimizations: 24 * 25 * - don't just push entire root set; process in place 26 * 27 * Fixes: 28 * Alan Cox 07 Sept 1997 Vmalloc internal stack as needed. 29 * Cope with changing max_files. 30 * Al Viro 11 Oct 1998 31 * Graph may have cycles. That is, we can send the descriptor 32 * of foo to bar and vice versa. Current code chokes on that. 33 * Fix: move SCM_RIGHTS ones into the separate list and then 34 * skb_free() them all instead of doing explicit fput's. 35 * Another problem: since fput() may block somebody may 36 * create a new unix_socket when we are in the middle of sweep 37 * phase. Fix: revert the logic wrt MARKED. Mark everything 38 * upon the beginning and unmark non-junk ones. 39 * 40 * [12 Oct 1998] AAARGH! New code purges all SCM_RIGHTS 41 * sent to connect()'ed but still not accept()'ed sockets. 42 * Fixed. Old code had slightly different problem here: 43 * extra fput() in situation when we passed the descriptor via 44 * such socket and closed it (descriptor). That would happen on 45 * each unix_gc() until the accept(). Since the struct file in 46 * question would go to the free list and might be reused... 47 * That might be the reason of random oopses on filp_close() 48 * in unrelated processes. 49 * 50 * AV 28 Feb 1999 51 * Kill the explicit allocation of stack. Now we keep the tree 52 * with root in dummy + pointer (gc_current) to one of the nodes. 53 * Stack is represented as path from gc_current to dummy. Unmark 54 * now means "add to tree". Push == "make it a son of gc_current". 55 * Pop == "move gc_current to parent". We keep only pointers to 56 * parents (->gc_tree). 57 * AV 1 Mar 1999 58 * Damn. Added missing check for ->dead in listen queues scanning. 59 * 60 * Miklos Szeredi 25 Jun 2007 61 * Reimplement with a cycle collecting algorithm. This should 62 * solve several problems with the previous code, like being racy 63 * wrt receive and holding up unrelated socket operations. 64 */ 65 66 #include <linux/kernel.h> 67 #include <linux/string.h> 68 #include <linux/socket.h> 69 #include <linux/un.h> 70 #include <linux/net.h> 71 #include <linux/fs.h> 72 #include <linux/skbuff.h> 73 #include <linux/netdevice.h> 74 #include <linux/file.h> 75 #include <linux/proc_fs.h> 76 #include <linux/mutex.h> 77 #include <linux/wait.h> 78 79 #include <net/sock.h> 80 #include <net/af_unix.h> 81 #include <net/scm.h> 82 #include <net/tcp_states.h> 83 84 struct unix_sock *unix_get_socket(struct file *filp) 85 { 86 struct inode *inode = file_inode(filp); 87 88 /* Socket ? */ 89 if (S_ISSOCK(inode->i_mode) && !(filp->f_mode & FMODE_PATH)) { 90 struct socket *sock = SOCKET_I(inode); 91 const struct proto_ops *ops; 92 struct sock *sk = sock->sk; 93 94 ops = READ_ONCE(sock->ops); 95 96 /* PF_UNIX ? */ 97 if (sk && ops && ops->family == PF_UNIX) 98 return unix_sk(sk); 99 } 100 101 return NULL; 102 } 103 104 static struct unix_vertex *unix_edge_successor(struct unix_edge *edge) 105 { 106 /* If an embryo socket has a fd, 107 * the listener indirectly holds the fd's refcnt. 108 */ 109 if (edge->successor->listener) 110 return unix_sk(edge->successor->listener)->vertex; 111 112 return edge->successor->vertex; 113 } 114 115 static LIST_HEAD(unix_unvisited_vertices); 116 117 enum unix_vertex_index { 118 UNIX_VERTEX_INDEX_MARK1, 119 UNIX_VERTEX_INDEX_MARK2, 120 UNIX_VERTEX_INDEX_START, 121 }; 122 123 static unsigned long unix_vertex_unvisited_index = UNIX_VERTEX_INDEX_MARK1; 124 125 static void unix_add_edge(struct scm_fp_list *fpl, struct unix_edge *edge) 126 { 127 struct unix_vertex *vertex = edge->predecessor->vertex; 128 129 if (!vertex) { 130 vertex = list_first_entry(&fpl->vertices, typeof(*vertex), entry); 131 vertex->index = unix_vertex_unvisited_index; 132 vertex->out_degree = 0; 133 INIT_LIST_HEAD(&vertex->edges); 134 135 list_move_tail(&vertex->entry, &unix_unvisited_vertices); 136 edge->predecessor->vertex = vertex; 137 } 138 139 vertex->out_degree++; 140 list_add_tail(&edge->vertex_entry, &vertex->edges); 141 } 142 143 static void unix_del_edge(struct scm_fp_list *fpl, struct unix_edge *edge) 144 { 145 struct unix_vertex *vertex = edge->predecessor->vertex; 146 147 list_del(&edge->vertex_entry); 148 vertex->out_degree--; 149 150 if (!vertex->out_degree) { 151 edge->predecessor->vertex = NULL; 152 list_move_tail(&vertex->entry, &fpl->vertices); 153 } 154 } 155 156 static void unix_free_vertices(struct scm_fp_list *fpl) 157 { 158 struct unix_vertex *vertex, *next_vertex; 159 160 list_for_each_entry_safe(vertex, next_vertex, &fpl->vertices, entry) { 161 list_del(&vertex->entry); 162 kfree(vertex); 163 } 164 } 165 166 DEFINE_SPINLOCK(unix_gc_lock); 167 unsigned int unix_tot_inflight; 168 169 void unix_add_edges(struct scm_fp_list *fpl, struct unix_sock *receiver) 170 { 171 int i = 0, j = 0; 172 173 spin_lock(&unix_gc_lock); 174 175 if (!fpl->count_unix) 176 goto out; 177 178 do { 179 struct unix_sock *inflight = unix_get_socket(fpl->fp[j++]); 180 struct unix_edge *edge; 181 182 if (!inflight) 183 continue; 184 185 edge = fpl->edges + i++; 186 edge->predecessor = inflight; 187 edge->successor = receiver; 188 189 unix_add_edge(fpl, edge); 190 } while (i < fpl->count_unix); 191 192 WRITE_ONCE(unix_tot_inflight, unix_tot_inflight + fpl->count_unix); 193 out: 194 WRITE_ONCE(fpl->user->unix_inflight, fpl->user->unix_inflight + fpl->count); 195 196 spin_unlock(&unix_gc_lock); 197 198 fpl->inflight = true; 199 200 unix_free_vertices(fpl); 201 } 202 203 void unix_del_edges(struct scm_fp_list *fpl) 204 { 205 int i = 0; 206 207 spin_lock(&unix_gc_lock); 208 209 if (!fpl->count_unix) 210 goto out; 211 212 do { 213 struct unix_edge *edge = fpl->edges + i++; 214 215 unix_del_edge(fpl, edge); 216 } while (i < fpl->count_unix); 217 218 WRITE_ONCE(unix_tot_inflight, unix_tot_inflight - fpl->count_unix); 219 out: 220 WRITE_ONCE(fpl->user->unix_inflight, fpl->user->unix_inflight - fpl->count); 221 222 spin_unlock(&unix_gc_lock); 223 224 fpl->inflight = false; 225 } 226 227 void unix_update_edges(struct unix_sock *receiver) 228 { 229 spin_lock(&unix_gc_lock); 230 receiver->listener = NULL; 231 spin_unlock(&unix_gc_lock); 232 } 233 234 int unix_prepare_fpl(struct scm_fp_list *fpl) 235 { 236 struct unix_vertex *vertex; 237 int i; 238 239 if (!fpl->count_unix) 240 return 0; 241 242 for (i = 0; i < fpl->count_unix; i++) { 243 vertex = kmalloc(sizeof(*vertex), GFP_KERNEL); 244 if (!vertex) 245 goto err; 246 247 list_add(&vertex->entry, &fpl->vertices); 248 } 249 250 fpl->edges = kvmalloc_array(fpl->count_unix, sizeof(*fpl->edges), 251 GFP_KERNEL_ACCOUNT); 252 if (!fpl->edges) 253 goto err; 254 255 return 0; 256 257 err: 258 unix_free_vertices(fpl); 259 return -ENOMEM; 260 } 261 262 void unix_destroy_fpl(struct scm_fp_list *fpl) 263 { 264 if (fpl->inflight) 265 unix_del_edges(fpl); 266 267 kvfree(fpl->edges); 268 unix_free_vertices(fpl); 269 } 270 271 static LIST_HEAD(unix_visited_vertices); 272 static unsigned long unix_vertex_grouped_index = UNIX_VERTEX_INDEX_MARK2; 273 274 static void __unix_walk_scc(struct unix_vertex *vertex) 275 { 276 unsigned long index = UNIX_VERTEX_INDEX_START; 277 LIST_HEAD(vertex_stack); 278 struct unix_edge *edge; 279 LIST_HEAD(edge_stack); 280 281 next_vertex: 282 /* Push vertex to vertex_stack and mark it as on-stack 283 * (index >= UNIX_VERTEX_INDEX_START). 284 * The vertex will be popped when finalising SCC later. 285 */ 286 list_add(&vertex->scc_entry, &vertex_stack); 287 288 vertex->index = index; 289 vertex->lowlink = index; 290 index++; 291 292 /* Explore neighbour vertices (receivers of the current vertex's fd). */ 293 list_for_each_entry(edge, &vertex->edges, vertex_entry) { 294 struct unix_vertex *next_vertex = unix_edge_successor(edge); 295 296 if (!next_vertex) 297 continue; 298 299 if (next_vertex->index == unix_vertex_unvisited_index) { 300 /* Iterative deepening depth first search 301 * 302 * 1. Push a forward edge to edge_stack and set 303 * the successor to vertex for the next iteration. 304 */ 305 list_add(&edge->stack_entry, &edge_stack); 306 307 vertex = next_vertex; 308 goto next_vertex; 309 310 /* 2. Pop the edge directed to the current vertex 311 * and restore the ancestor for backtracking. 312 */ 313 prev_vertex: 314 edge = list_first_entry(&edge_stack, typeof(*edge), stack_entry); 315 list_del_init(&edge->stack_entry); 316 317 next_vertex = vertex; 318 vertex = edge->predecessor->vertex; 319 320 /* If the successor has a smaller lowlink, two vertices 321 * are in the same SCC, so propagate the smaller lowlink 322 * to skip SCC finalisation. 323 */ 324 vertex->lowlink = min(vertex->lowlink, next_vertex->lowlink); 325 } else if (next_vertex->index != unix_vertex_grouped_index) { 326 /* Loop detected by a back/cross edge. 327 * 328 * The successor is on vertex_stack, so two vertices are 329 * in the same SCC. If the successor has a smaller index, 330 * propagate it to skip SCC finalisation. 331 */ 332 vertex->lowlink = min(vertex->lowlink, next_vertex->index); 333 } else { 334 /* The successor was already grouped as another SCC */ 335 } 336 } 337 338 if (vertex->index == vertex->lowlink) { 339 struct list_head scc; 340 341 /* SCC finalised. 342 * 343 * If the lowlink was not updated, all the vertices above on 344 * vertex_stack are in the same SCC. Group them using scc_entry. 345 */ 346 __list_cut_position(&scc, &vertex_stack, &vertex->scc_entry); 347 348 list_for_each_entry_reverse(vertex, &scc, scc_entry) { 349 /* Don't restart DFS from this vertex in unix_walk_scc(). */ 350 list_move_tail(&vertex->entry, &unix_visited_vertices); 351 352 /* Mark vertex as off-stack. */ 353 vertex->index = unix_vertex_grouped_index; 354 } 355 356 list_del(&scc); 357 } 358 359 /* Need backtracking ? */ 360 if (!list_empty(&edge_stack)) 361 goto prev_vertex; 362 } 363 364 static void unix_walk_scc(void) 365 { 366 /* Visit every vertex exactly once. 367 * __unix_walk_scc() moves visited vertices to unix_visited_vertices. 368 */ 369 while (!list_empty(&unix_unvisited_vertices)) { 370 struct unix_vertex *vertex; 371 372 vertex = list_first_entry(&unix_unvisited_vertices, typeof(*vertex), entry); 373 __unix_walk_scc(vertex); 374 } 375 376 list_replace_init(&unix_visited_vertices, &unix_unvisited_vertices); 377 swap(unix_vertex_unvisited_index, unix_vertex_grouped_index); 378 } 379 380 static LIST_HEAD(gc_candidates); 381 static LIST_HEAD(gc_inflight_list); 382 383 /* Keep the number of times in flight count for the file 384 * descriptor if it is for an AF_UNIX socket. 385 */ 386 void unix_inflight(struct user_struct *user, struct file *filp) 387 { 388 struct unix_sock *u = unix_get_socket(filp); 389 390 spin_lock(&unix_gc_lock); 391 392 if (u) { 393 if (!u->inflight) { 394 WARN_ON_ONCE(!list_empty(&u->link)); 395 list_add_tail(&u->link, &gc_inflight_list); 396 } else { 397 WARN_ON_ONCE(list_empty(&u->link)); 398 } 399 u->inflight++; 400 } 401 402 spin_unlock(&unix_gc_lock); 403 } 404 405 void unix_notinflight(struct user_struct *user, struct file *filp) 406 { 407 struct unix_sock *u = unix_get_socket(filp); 408 409 spin_lock(&unix_gc_lock); 410 411 if (u) { 412 WARN_ON_ONCE(!u->inflight); 413 WARN_ON_ONCE(list_empty(&u->link)); 414 415 u->inflight--; 416 if (!u->inflight) 417 list_del_init(&u->link); 418 } 419 420 spin_unlock(&unix_gc_lock); 421 } 422 423 static void scan_inflight(struct sock *x, void (*func)(struct unix_sock *), 424 struct sk_buff_head *hitlist) 425 { 426 struct sk_buff *skb; 427 struct sk_buff *next; 428 429 spin_lock(&x->sk_receive_queue.lock); 430 skb_queue_walk_safe(&x->sk_receive_queue, skb, next) { 431 /* Do we have file descriptors ? */ 432 if (UNIXCB(skb).fp) { 433 bool hit = false; 434 /* Process the descriptors of this socket */ 435 int nfd = UNIXCB(skb).fp->count; 436 struct file **fp = UNIXCB(skb).fp->fp; 437 438 while (nfd--) { 439 /* Get the socket the fd matches if it indeed does so */ 440 struct unix_sock *u = unix_get_socket(*fp++); 441 442 /* Ignore non-candidates, they could have been added 443 * to the queues after starting the garbage collection 444 */ 445 if (u && test_bit(UNIX_GC_CANDIDATE, &u->gc_flags)) { 446 hit = true; 447 448 func(u); 449 } 450 } 451 if (hit && hitlist != NULL) { 452 __skb_unlink(skb, &x->sk_receive_queue); 453 __skb_queue_tail(hitlist, skb); 454 } 455 } 456 } 457 spin_unlock(&x->sk_receive_queue.lock); 458 } 459 460 static void scan_children(struct sock *x, void (*func)(struct unix_sock *), 461 struct sk_buff_head *hitlist) 462 { 463 if (x->sk_state != TCP_LISTEN) { 464 scan_inflight(x, func, hitlist); 465 } else { 466 struct sk_buff *skb; 467 struct sk_buff *next; 468 struct unix_sock *u; 469 LIST_HEAD(embryos); 470 471 /* For a listening socket collect the queued embryos 472 * and perform a scan on them as well. 473 */ 474 spin_lock(&x->sk_receive_queue.lock); 475 skb_queue_walk_safe(&x->sk_receive_queue, skb, next) { 476 u = unix_sk(skb->sk); 477 478 /* An embryo cannot be in-flight, so it's safe 479 * to use the list link. 480 */ 481 WARN_ON_ONCE(!list_empty(&u->link)); 482 list_add_tail(&u->link, &embryos); 483 } 484 spin_unlock(&x->sk_receive_queue.lock); 485 486 while (!list_empty(&embryos)) { 487 u = list_entry(embryos.next, struct unix_sock, link); 488 scan_inflight(&u->sk, func, hitlist); 489 list_del_init(&u->link); 490 } 491 } 492 } 493 494 static void dec_inflight(struct unix_sock *usk) 495 { 496 usk->inflight--; 497 } 498 499 static void inc_inflight(struct unix_sock *usk) 500 { 501 usk->inflight++; 502 } 503 504 static void inc_inflight_move_tail(struct unix_sock *u) 505 { 506 u->inflight++; 507 508 /* If this still might be part of a cycle, move it to the end 509 * of the list, so that it's checked even if it was already 510 * passed over 511 */ 512 if (test_bit(UNIX_GC_MAYBE_CYCLE, &u->gc_flags)) 513 list_move_tail(&u->link, &gc_candidates); 514 } 515 516 static bool gc_in_progress; 517 518 static void __unix_gc(struct work_struct *work) 519 { 520 struct sk_buff_head hitlist; 521 struct unix_sock *u, *next; 522 LIST_HEAD(not_cycle_list); 523 struct list_head cursor; 524 525 spin_lock(&unix_gc_lock); 526 527 unix_walk_scc(); 528 529 /* First, select candidates for garbage collection. Only 530 * in-flight sockets are considered, and from those only ones 531 * which don't have any external reference. 532 * 533 * Holding unix_gc_lock will protect these candidates from 534 * being detached, and hence from gaining an external 535 * reference. Since there are no possible receivers, all 536 * buffers currently on the candidates' queues stay there 537 * during the garbage collection. 538 * 539 * We also know that no new candidate can be added onto the 540 * receive queues. Other, non candidate sockets _can_ be 541 * added to queue, so we must make sure only to touch 542 * candidates. 543 */ 544 list_for_each_entry_safe(u, next, &gc_inflight_list, link) { 545 long total_refs; 546 547 total_refs = file_count(u->sk.sk_socket->file); 548 549 WARN_ON_ONCE(!u->inflight); 550 WARN_ON_ONCE(total_refs < u->inflight); 551 if (total_refs == u->inflight) { 552 list_move_tail(&u->link, &gc_candidates); 553 __set_bit(UNIX_GC_CANDIDATE, &u->gc_flags); 554 __set_bit(UNIX_GC_MAYBE_CYCLE, &u->gc_flags); 555 } 556 } 557 558 /* Now remove all internal in-flight reference to children of 559 * the candidates. 560 */ 561 list_for_each_entry(u, &gc_candidates, link) 562 scan_children(&u->sk, dec_inflight, NULL); 563 564 /* Restore the references for children of all candidates, 565 * which have remaining references. Do this recursively, so 566 * only those remain, which form cyclic references. 567 * 568 * Use a "cursor" link, to make the list traversal safe, even 569 * though elements might be moved about. 570 */ 571 list_add(&cursor, &gc_candidates); 572 while (cursor.next != &gc_candidates) { 573 u = list_entry(cursor.next, struct unix_sock, link); 574 575 /* Move cursor to after the current position. */ 576 list_move(&cursor, &u->link); 577 578 if (u->inflight) { 579 list_move_tail(&u->link, ¬_cycle_list); 580 __clear_bit(UNIX_GC_MAYBE_CYCLE, &u->gc_flags); 581 scan_children(&u->sk, inc_inflight_move_tail, NULL); 582 } 583 } 584 list_del(&cursor); 585 586 /* Now gc_candidates contains only garbage. Restore original 587 * inflight counters for these as well, and remove the skbuffs 588 * which are creating the cycle(s). 589 */ 590 skb_queue_head_init(&hitlist); 591 list_for_each_entry(u, &gc_candidates, link) { 592 scan_children(&u->sk, inc_inflight, &hitlist); 593 594 #if IS_ENABLED(CONFIG_AF_UNIX_OOB) 595 if (u->oob_skb) { 596 kfree_skb(u->oob_skb); 597 u->oob_skb = NULL; 598 } 599 #endif 600 } 601 602 /* not_cycle_list contains those sockets which do not make up a 603 * cycle. Restore these to the inflight list. 604 */ 605 while (!list_empty(¬_cycle_list)) { 606 u = list_entry(not_cycle_list.next, struct unix_sock, link); 607 __clear_bit(UNIX_GC_CANDIDATE, &u->gc_flags); 608 list_move_tail(&u->link, &gc_inflight_list); 609 } 610 611 spin_unlock(&unix_gc_lock); 612 613 /* Here we are. Hitlist is filled. Die. */ 614 __skb_queue_purge(&hitlist); 615 616 spin_lock(&unix_gc_lock); 617 618 /* All candidates should have been detached by now. */ 619 WARN_ON_ONCE(!list_empty(&gc_candidates)); 620 621 /* Paired with READ_ONCE() in wait_for_unix_gc(). */ 622 WRITE_ONCE(gc_in_progress, false); 623 624 spin_unlock(&unix_gc_lock); 625 } 626 627 static DECLARE_WORK(unix_gc_work, __unix_gc); 628 629 void unix_gc(void) 630 { 631 WRITE_ONCE(gc_in_progress, true); 632 queue_work(system_unbound_wq, &unix_gc_work); 633 } 634 635 #define UNIX_INFLIGHT_TRIGGER_GC 16000 636 #define UNIX_INFLIGHT_SANE_USER (SCM_MAX_FD * 8) 637 638 void wait_for_unix_gc(struct scm_fp_list *fpl) 639 { 640 /* If number of inflight sockets is insane, 641 * force a garbage collect right now. 642 * 643 * Paired with the WRITE_ONCE() in unix_inflight(), 644 * unix_notinflight(), and __unix_gc(). 645 */ 646 if (READ_ONCE(unix_tot_inflight) > UNIX_INFLIGHT_TRIGGER_GC && 647 !READ_ONCE(gc_in_progress)) 648 unix_gc(); 649 650 /* Penalise users who want to send AF_UNIX sockets 651 * but whose sockets have not been received yet. 652 */ 653 if (!fpl || !fpl->count_unix || 654 READ_ONCE(fpl->user->unix_inflight) < UNIX_INFLIGHT_SANE_USER) 655 return; 656 657 if (READ_ONCE(gc_in_progress)) 658 flush_work(&unix_gc_work); 659 } 660