1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * NET3: Garbage Collector For AF_UNIX sockets 4 * 5 * Garbage Collector: 6 * Copyright (C) Barak A. Pearlmutter. 7 * 8 * Chopped about by Alan Cox 22/3/96 to make it fit the AF_UNIX socket problem. 9 * If it doesn't work blame me, it worked when Barak sent it. 10 * 11 * Assumptions: 12 * 13 * - object w/ a bit 14 * - free list 15 * 16 * Current optimizations: 17 * 18 * - explicit stack instead of recursion 19 * - tail recurse on first born instead of immediate push/pop 20 * - we gather the stuff that should not be killed into tree 21 * and stack is just a path from root to the current pointer. 22 * 23 * Future optimizations: 24 * 25 * - don't just push entire root set; process in place 26 * 27 * Fixes: 28 * Alan Cox 07 Sept 1997 Vmalloc internal stack as needed. 29 * Cope with changing max_files. 30 * Al Viro 11 Oct 1998 31 * Graph may have cycles. That is, we can send the descriptor 32 * of foo to bar and vice versa. Current code chokes on that. 33 * Fix: move SCM_RIGHTS ones into the separate list and then 34 * skb_free() them all instead of doing explicit fput's. 35 * Another problem: since fput() may block somebody may 36 * create a new unix_socket when we are in the middle of sweep 37 * phase. Fix: revert the logic wrt MARKED. Mark everything 38 * upon the beginning and unmark non-junk ones. 39 * 40 * [12 Oct 1998] AAARGH! New code purges all SCM_RIGHTS 41 * sent to connect()'ed but still not accept()'ed sockets. 42 * Fixed. Old code had slightly different problem here: 43 * extra fput() in situation when we passed the descriptor via 44 * such socket and closed it (descriptor). That would happen on 45 * each unix_gc() until the accept(). Since the struct file in 46 * question would go to the free list and might be reused... 47 * That might be the reason of random oopses on filp_close() 48 * in unrelated processes. 49 * 50 * AV 28 Feb 1999 51 * Kill the explicit allocation of stack. Now we keep the tree 52 * with root in dummy + pointer (gc_current) to one of the nodes. 53 * Stack is represented as path from gc_current to dummy. Unmark 54 * now means "add to tree". Push == "make it a son of gc_current". 55 * Pop == "move gc_current to parent". We keep only pointers to 56 * parents (->gc_tree). 57 * AV 1 Mar 1999 58 * Damn. Added missing check for ->dead in listen queues scanning. 59 * 60 * Miklos Szeredi 25 Jun 2007 61 * Reimplement with a cycle collecting algorithm. This should 62 * solve several problems with the previous code, like being racy 63 * wrt receive and holding up unrelated socket operations. 64 */ 65 66 #include <linux/kernel.h> 67 #include <linux/string.h> 68 #include <linux/socket.h> 69 #include <linux/un.h> 70 #include <linux/net.h> 71 #include <linux/fs.h> 72 #include <linux/skbuff.h> 73 #include <linux/netdevice.h> 74 #include <linux/file.h> 75 #include <linux/proc_fs.h> 76 #include <linux/mutex.h> 77 #include <linux/wait.h> 78 79 #include <net/sock.h> 80 #include <net/af_unix.h> 81 #include <net/scm.h> 82 #include <net/tcp_states.h> 83 84 struct unix_sock *unix_get_socket(struct file *filp) 85 { 86 struct inode *inode = file_inode(filp); 87 88 /* Socket ? */ 89 if (S_ISSOCK(inode->i_mode) && !(filp->f_mode & FMODE_PATH)) { 90 struct socket *sock = SOCKET_I(inode); 91 const struct proto_ops *ops; 92 struct sock *sk = sock->sk; 93 94 ops = READ_ONCE(sock->ops); 95 96 /* PF_UNIX ? */ 97 if (sk && ops && ops->family == PF_UNIX) 98 return unix_sk(sk); 99 } 100 101 return NULL; 102 } 103 104 static LIST_HEAD(unix_unvisited_vertices); 105 106 enum unix_vertex_index { 107 UNIX_VERTEX_INDEX_UNVISITED, 108 UNIX_VERTEX_INDEX_START, 109 }; 110 111 static void unix_add_edge(struct scm_fp_list *fpl, struct unix_edge *edge) 112 { 113 struct unix_vertex *vertex = edge->predecessor->vertex; 114 115 if (!vertex) { 116 vertex = list_first_entry(&fpl->vertices, typeof(*vertex), entry); 117 vertex->out_degree = 0; 118 INIT_LIST_HEAD(&vertex->edges); 119 120 list_move_tail(&vertex->entry, &unix_unvisited_vertices); 121 edge->predecessor->vertex = vertex; 122 } 123 124 vertex->out_degree++; 125 list_add_tail(&edge->vertex_entry, &vertex->edges); 126 } 127 128 static void unix_del_edge(struct scm_fp_list *fpl, struct unix_edge *edge) 129 { 130 struct unix_vertex *vertex = edge->predecessor->vertex; 131 132 list_del(&edge->vertex_entry); 133 vertex->out_degree--; 134 135 if (!vertex->out_degree) { 136 edge->predecessor->vertex = NULL; 137 list_move_tail(&vertex->entry, &fpl->vertices); 138 } 139 } 140 141 static void unix_free_vertices(struct scm_fp_list *fpl) 142 { 143 struct unix_vertex *vertex, *next_vertex; 144 145 list_for_each_entry_safe(vertex, next_vertex, &fpl->vertices, entry) { 146 list_del(&vertex->entry); 147 kfree(vertex); 148 } 149 } 150 151 DEFINE_SPINLOCK(unix_gc_lock); 152 unsigned int unix_tot_inflight; 153 154 void unix_add_edges(struct scm_fp_list *fpl, struct unix_sock *receiver) 155 { 156 int i = 0, j = 0; 157 158 spin_lock(&unix_gc_lock); 159 160 if (!fpl->count_unix) 161 goto out; 162 163 do { 164 struct unix_sock *inflight = unix_get_socket(fpl->fp[j++]); 165 struct unix_edge *edge; 166 167 if (!inflight) 168 continue; 169 170 edge = fpl->edges + i++; 171 edge->predecessor = inflight; 172 edge->successor = receiver; 173 174 unix_add_edge(fpl, edge); 175 } while (i < fpl->count_unix); 176 177 WRITE_ONCE(unix_tot_inflight, unix_tot_inflight + fpl->count_unix); 178 out: 179 WRITE_ONCE(fpl->user->unix_inflight, fpl->user->unix_inflight + fpl->count); 180 181 spin_unlock(&unix_gc_lock); 182 183 fpl->inflight = true; 184 185 unix_free_vertices(fpl); 186 } 187 188 void unix_del_edges(struct scm_fp_list *fpl) 189 { 190 int i = 0; 191 192 spin_lock(&unix_gc_lock); 193 194 if (!fpl->count_unix) 195 goto out; 196 197 do { 198 struct unix_edge *edge = fpl->edges + i++; 199 200 unix_del_edge(fpl, edge); 201 } while (i < fpl->count_unix); 202 203 WRITE_ONCE(unix_tot_inflight, unix_tot_inflight - fpl->count_unix); 204 out: 205 WRITE_ONCE(fpl->user->unix_inflight, fpl->user->unix_inflight - fpl->count); 206 207 spin_unlock(&unix_gc_lock); 208 209 fpl->inflight = false; 210 } 211 212 int unix_prepare_fpl(struct scm_fp_list *fpl) 213 { 214 struct unix_vertex *vertex; 215 int i; 216 217 if (!fpl->count_unix) 218 return 0; 219 220 for (i = 0; i < fpl->count_unix; i++) { 221 vertex = kmalloc(sizeof(*vertex), GFP_KERNEL); 222 if (!vertex) 223 goto err; 224 225 list_add(&vertex->entry, &fpl->vertices); 226 } 227 228 fpl->edges = kvmalloc_array(fpl->count_unix, sizeof(*fpl->edges), 229 GFP_KERNEL_ACCOUNT); 230 if (!fpl->edges) 231 goto err; 232 233 return 0; 234 235 err: 236 unix_free_vertices(fpl); 237 return -ENOMEM; 238 } 239 240 void unix_destroy_fpl(struct scm_fp_list *fpl) 241 { 242 if (fpl->inflight) 243 unix_del_edges(fpl); 244 245 kvfree(fpl->edges); 246 unix_free_vertices(fpl); 247 } 248 249 static LIST_HEAD(unix_visited_vertices); 250 251 static void __unix_walk_scc(struct unix_vertex *vertex) 252 { 253 unsigned long index = UNIX_VERTEX_INDEX_START; 254 struct unix_edge *edge; 255 LIST_HEAD(edge_stack); 256 257 next_vertex: 258 vertex->index = index; 259 index++; 260 261 /* Explore neighbour vertices (receivers of the current vertex's fd). */ 262 list_for_each_entry(edge, &vertex->edges, vertex_entry) { 263 struct unix_vertex *next_vertex = edge->successor->vertex; 264 265 if (!next_vertex) 266 continue; 267 268 if (next_vertex->index == UNIX_VERTEX_INDEX_UNVISITED) { 269 /* Iterative deepening depth first search 270 * 271 * 1. Push a forward edge to edge_stack and set 272 * the successor to vertex for the next iteration. 273 */ 274 list_add(&edge->stack_entry, &edge_stack); 275 276 vertex = next_vertex; 277 goto next_vertex; 278 279 /* 2. Pop the edge directed to the current vertex 280 * and restore the ancestor for backtracking. 281 */ 282 prev_vertex: 283 edge = list_first_entry(&edge_stack, typeof(*edge), stack_entry); 284 list_del_init(&edge->stack_entry); 285 286 vertex = edge->predecessor->vertex; 287 } 288 } 289 290 /* Don't restart DFS from this vertex in unix_walk_scc(). */ 291 list_move_tail(&vertex->entry, &unix_visited_vertices); 292 293 /* Need backtracking ? */ 294 if (!list_empty(&edge_stack)) 295 goto prev_vertex; 296 } 297 298 static void unix_walk_scc(void) 299 { 300 struct unix_vertex *vertex; 301 302 list_for_each_entry(vertex, &unix_unvisited_vertices, entry) 303 vertex->index = UNIX_VERTEX_INDEX_UNVISITED; 304 305 /* Visit every vertex exactly once. 306 * __unix_walk_scc() moves visited vertices to unix_visited_vertices. 307 */ 308 while (!list_empty(&unix_unvisited_vertices)) { 309 vertex = list_first_entry(&unix_unvisited_vertices, typeof(*vertex), entry); 310 __unix_walk_scc(vertex); 311 } 312 313 list_replace_init(&unix_visited_vertices, &unix_unvisited_vertices); 314 } 315 316 static LIST_HEAD(gc_candidates); 317 static LIST_HEAD(gc_inflight_list); 318 319 /* Keep the number of times in flight count for the file 320 * descriptor if it is for an AF_UNIX socket. 321 */ 322 void unix_inflight(struct user_struct *user, struct file *filp) 323 { 324 struct unix_sock *u = unix_get_socket(filp); 325 326 spin_lock(&unix_gc_lock); 327 328 if (u) { 329 if (!u->inflight) { 330 WARN_ON_ONCE(!list_empty(&u->link)); 331 list_add_tail(&u->link, &gc_inflight_list); 332 } else { 333 WARN_ON_ONCE(list_empty(&u->link)); 334 } 335 u->inflight++; 336 } 337 338 spin_unlock(&unix_gc_lock); 339 } 340 341 void unix_notinflight(struct user_struct *user, struct file *filp) 342 { 343 struct unix_sock *u = unix_get_socket(filp); 344 345 spin_lock(&unix_gc_lock); 346 347 if (u) { 348 WARN_ON_ONCE(!u->inflight); 349 WARN_ON_ONCE(list_empty(&u->link)); 350 351 u->inflight--; 352 if (!u->inflight) 353 list_del_init(&u->link); 354 } 355 356 spin_unlock(&unix_gc_lock); 357 } 358 359 static void scan_inflight(struct sock *x, void (*func)(struct unix_sock *), 360 struct sk_buff_head *hitlist) 361 { 362 struct sk_buff *skb; 363 struct sk_buff *next; 364 365 spin_lock(&x->sk_receive_queue.lock); 366 skb_queue_walk_safe(&x->sk_receive_queue, skb, next) { 367 /* Do we have file descriptors ? */ 368 if (UNIXCB(skb).fp) { 369 bool hit = false; 370 /* Process the descriptors of this socket */ 371 int nfd = UNIXCB(skb).fp->count; 372 struct file **fp = UNIXCB(skb).fp->fp; 373 374 while (nfd--) { 375 /* Get the socket the fd matches if it indeed does so */ 376 struct unix_sock *u = unix_get_socket(*fp++); 377 378 /* Ignore non-candidates, they could have been added 379 * to the queues after starting the garbage collection 380 */ 381 if (u && test_bit(UNIX_GC_CANDIDATE, &u->gc_flags)) { 382 hit = true; 383 384 func(u); 385 } 386 } 387 if (hit && hitlist != NULL) { 388 __skb_unlink(skb, &x->sk_receive_queue); 389 __skb_queue_tail(hitlist, skb); 390 } 391 } 392 } 393 spin_unlock(&x->sk_receive_queue.lock); 394 } 395 396 static void scan_children(struct sock *x, void (*func)(struct unix_sock *), 397 struct sk_buff_head *hitlist) 398 { 399 if (x->sk_state != TCP_LISTEN) { 400 scan_inflight(x, func, hitlist); 401 } else { 402 struct sk_buff *skb; 403 struct sk_buff *next; 404 struct unix_sock *u; 405 LIST_HEAD(embryos); 406 407 /* For a listening socket collect the queued embryos 408 * and perform a scan on them as well. 409 */ 410 spin_lock(&x->sk_receive_queue.lock); 411 skb_queue_walk_safe(&x->sk_receive_queue, skb, next) { 412 u = unix_sk(skb->sk); 413 414 /* An embryo cannot be in-flight, so it's safe 415 * to use the list link. 416 */ 417 WARN_ON_ONCE(!list_empty(&u->link)); 418 list_add_tail(&u->link, &embryos); 419 } 420 spin_unlock(&x->sk_receive_queue.lock); 421 422 while (!list_empty(&embryos)) { 423 u = list_entry(embryos.next, struct unix_sock, link); 424 scan_inflight(&u->sk, func, hitlist); 425 list_del_init(&u->link); 426 } 427 } 428 } 429 430 static void dec_inflight(struct unix_sock *usk) 431 { 432 usk->inflight--; 433 } 434 435 static void inc_inflight(struct unix_sock *usk) 436 { 437 usk->inflight++; 438 } 439 440 static void inc_inflight_move_tail(struct unix_sock *u) 441 { 442 u->inflight++; 443 444 /* If this still might be part of a cycle, move it to the end 445 * of the list, so that it's checked even if it was already 446 * passed over 447 */ 448 if (test_bit(UNIX_GC_MAYBE_CYCLE, &u->gc_flags)) 449 list_move_tail(&u->link, &gc_candidates); 450 } 451 452 static bool gc_in_progress; 453 454 static void __unix_gc(struct work_struct *work) 455 { 456 struct sk_buff_head hitlist; 457 struct unix_sock *u, *next; 458 LIST_HEAD(not_cycle_list); 459 struct list_head cursor; 460 461 spin_lock(&unix_gc_lock); 462 463 unix_walk_scc(); 464 465 /* First, select candidates for garbage collection. Only 466 * in-flight sockets are considered, and from those only ones 467 * which don't have any external reference. 468 * 469 * Holding unix_gc_lock will protect these candidates from 470 * being detached, and hence from gaining an external 471 * reference. Since there are no possible receivers, all 472 * buffers currently on the candidates' queues stay there 473 * during the garbage collection. 474 * 475 * We also know that no new candidate can be added onto the 476 * receive queues. Other, non candidate sockets _can_ be 477 * added to queue, so we must make sure only to touch 478 * candidates. 479 */ 480 list_for_each_entry_safe(u, next, &gc_inflight_list, link) { 481 long total_refs; 482 483 total_refs = file_count(u->sk.sk_socket->file); 484 485 WARN_ON_ONCE(!u->inflight); 486 WARN_ON_ONCE(total_refs < u->inflight); 487 if (total_refs == u->inflight) { 488 list_move_tail(&u->link, &gc_candidates); 489 __set_bit(UNIX_GC_CANDIDATE, &u->gc_flags); 490 __set_bit(UNIX_GC_MAYBE_CYCLE, &u->gc_flags); 491 } 492 } 493 494 /* Now remove all internal in-flight reference to children of 495 * the candidates. 496 */ 497 list_for_each_entry(u, &gc_candidates, link) 498 scan_children(&u->sk, dec_inflight, NULL); 499 500 /* Restore the references for children of all candidates, 501 * which have remaining references. Do this recursively, so 502 * only those remain, which form cyclic references. 503 * 504 * Use a "cursor" link, to make the list traversal safe, even 505 * though elements might be moved about. 506 */ 507 list_add(&cursor, &gc_candidates); 508 while (cursor.next != &gc_candidates) { 509 u = list_entry(cursor.next, struct unix_sock, link); 510 511 /* Move cursor to after the current position. */ 512 list_move(&cursor, &u->link); 513 514 if (u->inflight) { 515 list_move_tail(&u->link, ¬_cycle_list); 516 __clear_bit(UNIX_GC_MAYBE_CYCLE, &u->gc_flags); 517 scan_children(&u->sk, inc_inflight_move_tail, NULL); 518 } 519 } 520 list_del(&cursor); 521 522 /* Now gc_candidates contains only garbage. Restore original 523 * inflight counters for these as well, and remove the skbuffs 524 * which are creating the cycle(s). 525 */ 526 skb_queue_head_init(&hitlist); 527 list_for_each_entry(u, &gc_candidates, link) { 528 scan_children(&u->sk, inc_inflight, &hitlist); 529 530 #if IS_ENABLED(CONFIG_AF_UNIX_OOB) 531 if (u->oob_skb) { 532 kfree_skb(u->oob_skb); 533 u->oob_skb = NULL; 534 } 535 #endif 536 } 537 538 /* not_cycle_list contains those sockets which do not make up a 539 * cycle. Restore these to the inflight list. 540 */ 541 while (!list_empty(¬_cycle_list)) { 542 u = list_entry(not_cycle_list.next, struct unix_sock, link); 543 __clear_bit(UNIX_GC_CANDIDATE, &u->gc_flags); 544 list_move_tail(&u->link, &gc_inflight_list); 545 } 546 547 spin_unlock(&unix_gc_lock); 548 549 /* Here we are. Hitlist is filled. Die. */ 550 __skb_queue_purge(&hitlist); 551 552 spin_lock(&unix_gc_lock); 553 554 /* All candidates should have been detached by now. */ 555 WARN_ON_ONCE(!list_empty(&gc_candidates)); 556 557 /* Paired with READ_ONCE() in wait_for_unix_gc(). */ 558 WRITE_ONCE(gc_in_progress, false); 559 560 spin_unlock(&unix_gc_lock); 561 } 562 563 static DECLARE_WORK(unix_gc_work, __unix_gc); 564 565 void unix_gc(void) 566 { 567 WRITE_ONCE(gc_in_progress, true); 568 queue_work(system_unbound_wq, &unix_gc_work); 569 } 570 571 #define UNIX_INFLIGHT_TRIGGER_GC 16000 572 #define UNIX_INFLIGHT_SANE_USER (SCM_MAX_FD * 8) 573 574 void wait_for_unix_gc(struct scm_fp_list *fpl) 575 { 576 /* If number of inflight sockets is insane, 577 * force a garbage collect right now. 578 * 579 * Paired with the WRITE_ONCE() in unix_inflight(), 580 * unix_notinflight(), and __unix_gc(). 581 */ 582 if (READ_ONCE(unix_tot_inflight) > UNIX_INFLIGHT_TRIGGER_GC && 583 !READ_ONCE(gc_in_progress)) 584 unix_gc(); 585 586 /* Penalise users who want to send AF_UNIX sockets 587 * but whose sockets have not been received yet. 588 */ 589 if (!fpl || !fpl->count_unix || 590 READ_ONCE(fpl->user->unix_inflight) < UNIX_INFLIGHT_SANE_USER) 591 return; 592 593 if (READ_ONCE(gc_in_progress)) 594 flush_work(&unix_gc_work); 595 } 596