1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * NET3: Garbage Collector For AF_UNIX sockets 4 * 5 * Garbage Collector: 6 * Copyright (C) Barak A. Pearlmutter. 7 * 8 * Chopped about by Alan Cox 22/3/96 to make it fit the AF_UNIX socket problem. 9 * If it doesn't work blame me, it worked when Barak sent it. 10 * 11 * Assumptions: 12 * 13 * - object w/ a bit 14 * - free list 15 * 16 * Current optimizations: 17 * 18 * - explicit stack instead of recursion 19 * - tail recurse on first born instead of immediate push/pop 20 * - we gather the stuff that should not be killed into tree 21 * and stack is just a path from root to the current pointer. 22 * 23 * Future optimizations: 24 * 25 * - don't just push entire root set; process in place 26 * 27 * Fixes: 28 * Alan Cox 07 Sept 1997 Vmalloc internal stack as needed. 29 * Cope with changing max_files. 30 * Al Viro 11 Oct 1998 31 * Graph may have cycles. That is, we can send the descriptor 32 * of foo to bar and vice versa. Current code chokes on that. 33 * Fix: move SCM_RIGHTS ones into the separate list and then 34 * skb_free() them all instead of doing explicit fput's. 35 * Another problem: since fput() may block somebody may 36 * create a new unix_socket when we are in the middle of sweep 37 * phase. Fix: revert the logic wrt MARKED. Mark everything 38 * upon the beginning and unmark non-junk ones. 39 * 40 * [12 Oct 1998] AAARGH! New code purges all SCM_RIGHTS 41 * sent to connect()'ed but still not accept()'ed sockets. 42 * Fixed. Old code had slightly different problem here: 43 * extra fput() in situation when we passed the descriptor via 44 * such socket and closed it (descriptor). That would happen on 45 * each unix_gc() until the accept(). Since the struct file in 46 * question would go to the free list and might be reused... 47 * That might be the reason of random oopses on filp_close() 48 * in unrelated processes. 49 * 50 * AV 28 Feb 1999 51 * Kill the explicit allocation of stack. Now we keep the tree 52 * with root in dummy + pointer (gc_current) to one of the nodes. 53 * Stack is represented as path from gc_current to dummy. Unmark 54 * now means "add to tree". Push == "make it a son of gc_current". 55 * Pop == "move gc_current to parent". We keep only pointers to 56 * parents (->gc_tree). 57 * AV 1 Mar 1999 58 * Damn. Added missing check for ->dead in listen queues scanning. 59 * 60 * Miklos Szeredi 25 Jun 2007 61 * Reimplement with a cycle collecting algorithm. This should 62 * solve several problems with the previous code, like being racy 63 * wrt receive and holding up unrelated socket operations. 64 */ 65 66 #include <linux/kernel.h> 67 #include <linux/string.h> 68 #include <linux/socket.h> 69 #include <linux/un.h> 70 #include <linux/net.h> 71 #include <linux/fs.h> 72 #include <linux/skbuff.h> 73 #include <linux/netdevice.h> 74 #include <linux/file.h> 75 #include <linux/proc_fs.h> 76 #include <linux/mutex.h> 77 #include <linux/wait.h> 78 79 #include <net/sock.h> 80 #include <net/af_unix.h> 81 #include <net/scm.h> 82 #include <net/tcp_states.h> 83 84 struct unix_sock *unix_get_socket(struct file *filp) 85 { 86 struct inode *inode = file_inode(filp); 87 88 /* Socket ? */ 89 if (S_ISSOCK(inode->i_mode) && !(filp->f_mode & FMODE_PATH)) { 90 struct socket *sock = SOCKET_I(inode); 91 const struct proto_ops *ops; 92 struct sock *sk = sock->sk; 93 94 ops = READ_ONCE(sock->ops); 95 96 /* PF_UNIX ? */ 97 if (sk && ops && ops->family == PF_UNIX) 98 return unix_sk(sk); 99 } 100 101 return NULL; 102 } 103 104 static struct unix_vertex *unix_edge_successor(struct unix_edge *edge) 105 { 106 /* If an embryo socket has a fd, 107 * the listener indirectly holds the fd's refcnt. 108 */ 109 if (edge->successor->listener) 110 return unix_sk(edge->successor->listener)->vertex; 111 112 return edge->successor->vertex; 113 } 114 115 static bool unix_graph_maybe_cyclic; 116 static bool unix_graph_grouped; 117 118 static void unix_update_graph(struct unix_vertex *vertex) 119 { 120 /* If the receiver socket is not inflight, no cyclic 121 * reference could be formed. 122 */ 123 if (!vertex) 124 return; 125 126 unix_graph_maybe_cyclic = true; 127 unix_graph_grouped = false; 128 } 129 130 static LIST_HEAD(unix_unvisited_vertices); 131 132 enum unix_vertex_index { 133 UNIX_VERTEX_INDEX_MARK1, 134 UNIX_VERTEX_INDEX_MARK2, 135 UNIX_VERTEX_INDEX_START, 136 }; 137 138 static unsigned long unix_vertex_unvisited_index = UNIX_VERTEX_INDEX_MARK1; 139 140 static void unix_add_edge(struct scm_fp_list *fpl, struct unix_edge *edge) 141 { 142 struct unix_vertex *vertex = edge->predecessor->vertex; 143 144 if (!vertex) { 145 vertex = list_first_entry(&fpl->vertices, typeof(*vertex), entry); 146 vertex->index = unix_vertex_unvisited_index; 147 vertex->out_degree = 0; 148 INIT_LIST_HEAD(&vertex->edges); 149 INIT_LIST_HEAD(&vertex->scc_entry); 150 151 list_move_tail(&vertex->entry, &unix_unvisited_vertices); 152 edge->predecessor->vertex = vertex; 153 } 154 155 vertex->out_degree++; 156 list_add_tail(&edge->vertex_entry, &vertex->edges); 157 158 unix_update_graph(unix_edge_successor(edge)); 159 } 160 161 static void unix_del_edge(struct scm_fp_list *fpl, struct unix_edge *edge) 162 { 163 struct unix_vertex *vertex = edge->predecessor->vertex; 164 165 unix_update_graph(unix_edge_successor(edge)); 166 167 list_del(&edge->vertex_entry); 168 vertex->out_degree--; 169 170 if (!vertex->out_degree) { 171 edge->predecessor->vertex = NULL; 172 list_move_tail(&vertex->entry, &fpl->vertices); 173 } 174 } 175 176 static void unix_free_vertices(struct scm_fp_list *fpl) 177 { 178 struct unix_vertex *vertex, *next_vertex; 179 180 list_for_each_entry_safe(vertex, next_vertex, &fpl->vertices, entry) { 181 list_del(&vertex->entry); 182 kfree(vertex); 183 } 184 } 185 186 DEFINE_SPINLOCK(unix_gc_lock); 187 unsigned int unix_tot_inflight; 188 189 void unix_add_edges(struct scm_fp_list *fpl, struct unix_sock *receiver) 190 { 191 int i = 0, j = 0; 192 193 spin_lock(&unix_gc_lock); 194 195 if (!fpl->count_unix) 196 goto out; 197 198 do { 199 struct unix_sock *inflight = unix_get_socket(fpl->fp[j++]); 200 struct unix_edge *edge; 201 202 if (!inflight) 203 continue; 204 205 edge = fpl->edges + i++; 206 edge->predecessor = inflight; 207 edge->successor = receiver; 208 209 unix_add_edge(fpl, edge); 210 } while (i < fpl->count_unix); 211 212 WRITE_ONCE(unix_tot_inflight, unix_tot_inflight + fpl->count_unix); 213 out: 214 WRITE_ONCE(fpl->user->unix_inflight, fpl->user->unix_inflight + fpl->count); 215 216 spin_unlock(&unix_gc_lock); 217 218 fpl->inflight = true; 219 220 unix_free_vertices(fpl); 221 } 222 223 void unix_del_edges(struct scm_fp_list *fpl) 224 { 225 int i = 0; 226 227 spin_lock(&unix_gc_lock); 228 229 if (!fpl->count_unix) 230 goto out; 231 232 do { 233 struct unix_edge *edge = fpl->edges + i++; 234 235 unix_del_edge(fpl, edge); 236 } while (i < fpl->count_unix); 237 238 WRITE_ONCE(unix_tot_inflight, unix_tot_inflight - fpl->count_unix); 239 out: 240 WRITE_ONCE(fpl->user->unix_inflight, fpl->user->unix_inflight - fpl->count); 241 242 spin_unlock(&unix_gc_lock); 243 244 fpl->inflight = false; 245 } 246 247 void unix_update_edges(struct unix_sock *receiver) 248 { 249 spin_lock(&unix_gc_lock); 250 unix_update_graph(unix_sk(receiver->listener)->vertex); 251 receiver->listener = NULL; 252 spin_unlock(&unix_gc_lock); 253 } 254 255 int unix_prepare_fpl(struct scm_fp_list *fpl) 256 { 257 struct unix_vertex *vertex; 258 int i; 259 260 if (!fpl->count_unix) 261 return 0; 262 263 for (i = 0; i < fpl->count_unix; i++) { 264 vertex = kmalloc(sizeof(*vertex), GFP_KERNEL); 265 if (!vertex) 266 goto err; 267 268 list_add(&vertex->entry, &fpl->vertices); 269 } 270 271 fpl->edges = kvmalloc_array(fpl->count_unix, sizeof(*fpl->edges), 272 GFP_KERNEL_ACCOUNT); 273 if (!fpl->edges) 274 goto err; 275 276 return 0; 277 278 err: 279 unix_free_vertices(fpl); 280 return -ENOMEM; 281 } 282 283 void unix_destroy_fpl(struct scm_fp_list *fpl) 284 { 285 if (fpl->inflight) 286 unix_del_edges(fpl); 287 288 kvfree(fpl->edges); 289 unix_free_vertices(fpl); 290 } 291 292 static bool unix_scc_cyclic(struct list_head *scc) 293 { 294 struct unix_vertex *vertex; 295 struct unix_edge *edge; 296 297 /* SCC containing multiple vertices ? */ 298 if (!list_is_singular(scc)) 299 return true; 300 301 vertex = list_first_entry(scc, typeof(*vertex), scc_entry); 302 303 /* Self-reference or a embryo-listener circle ? */ 304 list_for_each_entry(edge, &vertex->edges, vertex_entry) { 305 if (unix_edge_successor(edge) == vertex) 306 return true; 307 } 308 309 return false; 310 } 311 312 static LIST_HEAD(unix_visited_vertices); 313 static unsigned long unix_vertex_grouped_index = UNIX_VERTEX_INDEX_MARK2; 314 315 static void __unix_walk_scc(struct unix_vertex *vertex, unsigned long *last_index) 316 { 317 LIST_HEAD(vertex_stack); 318 struct unix_edge *edge; 319 LIST_HEAD(edge_stack); 320 321 next_vertex: 322 /* Push vertex to vertex_stack and mark it as on-stack 323 * (index >= UNIX_VERTEX_INDEX_START). 324 * The vertex will be popped when finalising SCC later. 325 */ 326 list_add(&vertex->scc_entry, &vertex_stack); 327 328 vertex->index = *last_index; 329 vertex->scc_index = *last_index; 330 (*last_index)++; 331 332 /* Explore neighbour vertices (receivers of the current vertex's fd). */ 333 list_for_each_entry(edge, &vertex->edges, vertex_entry) { 334 struct unix_vertex *next_vertex = unix_edge_successor(edge); 335 336 if (!next_vertex) 337 continue; 338 339 if (next_vertex->index == unix_vertex_unvisited_index) { 340 /* Iterative deepening depth first search 341 * 342 * 1. Push a forward edge to edge_stack and set 343 * the successor to vertex for the next iteration. 344 */ 345 list_add(&edge->stack_entry, &edge_stack); 346 347 vertex = next_vertex; 348 goto next_vertex; 349 350 /* 2. Pop the edge directed to the current vertex 351 * and restore the ancestor for backtracking. 352 */ 353 prev_vertex: 354 edge = list_first_entry(&edge_stack, typeof(*edge), stack_entry); 355 list_del_init(&edge->stack_entry); 356 357 next_vertex = vertex; 358 vertex = edge->predecessor->vertex; 359 360 /* If the successor has a smaller scc_index, two vertices 361 * are in the same SCC, so propagate the smaller scc_index 362 * to skip SCC finalisation. 363 */ 364 vertex->scc_index = min(vertex->scc_index, next_vertex->scc_index); 365 } else if (next_vertex->index != unix_vertex_grouped_index) { 366 /* Loop detected by a back/cross edge. 367 * 368 * The successor is on vertex_stack, so two vertices are in 369 * the same SCC. If the successor has a smaller *scc_index*, 370 * propagate it to skip SCC finalisation. 371 */ 372 vertex->scc_index = min(vertex->scc_index, next_vertex->scc_index); 373 } else { 374 /* The successor was already grouped as another SCC */ 375 } 376 } 377 378 if (vertex->index == vertex->scc_index) { 379 struct list_head scc; 380 381 /* SCC finalised. 382 * 383 * If the scc_index was not updated, all the vertices above on 384 * vertex_stack are in the same SCC. Group them using scc_entry. 385 */ 386 __list_cut_position(&scc, &vertex_stack, &vertex->scc_entry); 387 388 list_for_each_entry_reverse(vertex, &scc, scc_entry) { 389 /* Don't restart DFS from this vertex in unix_walk_scc(). */ 390 list_move_tail(&vertex->entry, &unix_visited_vertices); 391 392 /* Mark vertex as off-stack. */ 393 vertex->index = unix_vertex_grouped_index; 394 } 395 396 if (!unix_graph_maybe_cyclic) 397 unix_graph_maybe_cyclic = unix_scc_cyclic(&scc); 398 399 list_del(&scc); 400 } 401 402 /* Need backtracking ? */ 403 if (!list_empty(&edge_stack)) 404 goto prev_vertex; 405 } 406 407 static void unix_walk_scc(void) 408 { 409 unsigned long last_index = UNIX_VERTEX_INDEX_START; 410 411 unix_graph_maybe_cyclic = false; 412 413 /* Visit every vertex exactly once. 414 * __unix_walk_scc() moves visited vertices to unix_visited_vertices. 415 */ 416 while (!list_empty(&unix_unvisited_vertices)) { 417 struct unix_vertex *vertex; 418 419 vertex = list_first_entry(&unix_unvisited_vertices, typeof(*vertex), entry); 420 __unix_walk_scc(vertex, &last_index); 421 } 422 423 list_replace_init(&unix_visited_vertices, &unix_unvisited_vertices); 424 swap(unix_vertex_unvisited_index, unix_vertex_grouped_index); 425 426 unix_graph_grouped = true; 427 } 428 429 static void unix_walk_scc_fast(void) 430 { 431 while (!list_empty(&unix_unvisited_vertices)) { 432 struct unix_vertex *vertex; 433 struct list_head scc; 434 435 vertex = list_first_entry(&unix_unvisited_vertices, typeof(*vertex), entry); 436 list_add(&scc, &vertex->scc_entry); 437 438 list_for_each_entry_reverse(vertex, &scc, scc_entry) 439 list_move_tail(&vertex->entry, &unix_visited_vertices); 440 441 list_del(&scc); 442 } 443 444 list_replace_init(&unix_visited_vertices, &unix_unvisited_vertices); 445 } 446 447 static LIST_HEAD(gc_candidates); 448 static LIST_HEAD(gc_inflight_list); 449 450 /* Keep the number of times in flight count for the file 451 * descriptor if it is for an AF_UNIX socket. 452 */ 453 void unix_inflight(struct user_struct *user, struct file *filp) 454 { 455 struct unix_sock *u = unix_get_socket(filp); 456 457 spin_lock(&unix_gc_lock); 458 459 if (u) { 460 if (!u->inflight) { 461 WARN_ON_ONCE(!list_empty(&u->link)); 462 list_add_tail(&u->link, &gc_inflight_list); 463 } else { 464 WARN_ON_ONCE(list_empty(&u->link)); 465 } 466 u->inflight++; 467 } 468 469 spin_unlock(&unix_gc_lock); 470 } 471 472 void unix_notinflight(struct user_struct *user, struct file *filp) 473 { 474 struct unix_sock *u = unix_get_socket(filp); 475 476 spin_lock(&unix_gc_lock); 477 478 if (u) { 479 WARN_ON_ONCE(!u->inflight); 480 WARN_ON_ONCE(list_empty(&u->link)); 481 482 u->inflight--; 483 if (!u->inflight) 484 list_del_init(&u->link); 485 } 486 487 spin_unlock(&unix_gc_lock); 488 } 489 490 static void scan_inflight(struct sock *x, void (*func)(struct unix_sock *), 491 struct sk_buff_head *hitlist) 492 { 493 struct sk_buff *skb; 494 struct sk_buff *next; 495 496 spin_lock(&x->sk_receive_queue.lock); 497 skb_queue_walk_safe(&x->sk_receive_queue, skb, next) { 498 /* Do we have file descriptors ? */ 499 if (UNIXCB(skb).fp) { 500 bool hit = false; 501 /* Process the descriptors of this socket */ 502 int nfd = UNIXCB(skb).fp->count; 503 struct file **fp = UNIXCB(skb).fp->fp; 504 505 while (nfd--) { 506 /* Get the socket the fd matches if it indeed does so */ 507 struct unix_sock *u = unix_get_socket(*fp++); 508 509 /* Ignore non-candidates, they could have been added 510 * to the queues after starting the garbage collection 511 */ 512 if (u && test_bit(UNIX_GC_CANDIDATE, &u->gc_flags)) { 513 hit = true; 514 515 func(u); 516 } 517 } 518 if (hit && hitlist != NULL) { 519 __skb_unlink(skb, &x->sk_receive_queue); 520 __skb_queue_tail(hitlist, skb); 521 } 522 } 523 } 524 spin_unlock(&x->sk_receive_queue.lock); 525 } 526 527 static void scan_children(struct sock *x, void (*func)(struct unix_sock *), 528 struct sk_buff_head *hitlist) 529 { 530 if (x->sk_state != TCP_LISTEN) { 531 scan_inflight(x, func, hitlist); 532 } else { 533 struct sk_buff *skb; 534 struct sk_buff *next; 535 struct unix_sock *u; 536 LIST_HEAD(embryos); 537 538 /* For a listening socket collect the queued embryos 539 * and perform a scan on them as well. 540 */ 541 spin_lock(&x->sk_receive_queue.lock); 542 skb_queue_walk_safe(&x->sk_receive_queue, skb, next) { 543 u = unix_sk(skb->sk); 544 545 /* An embryo cannot be in-flight, so it's safe 546 * to use the list link. 547 */ 548 WARN_ON_ONCE(!list_empty(&u->link)); 549 list_add_tail(&u->link, &embryos); 550 } 551 spin_unlock(&x->sk_receive_queue.lock); 552 553 while (!list_empty(&embryos)) { 554 u = list_entry(embryos.next, struct unix_sock, link); 555 scan_inflight(&u->sk, func, hitlist); 556 list_del_init(&u->link); 557 } 558 } 559 } 560 561 static void dec_inflight(struct unix_sock *usk) 562 { 563 usk->inflight--; 564 } 565 566 static void inc_inflight(struct unix_sock *usk) 567 { 568 usk->inflight++; 569 } 570 571 static void inc_inflight_move_tail(struct unix_sock *u) 572 { 573 u->inflight++; 574 575 /* If this still might be part of a cycle, move it to the end 576 * of the list, so that it's checked even if it was already 577 * passed over 578 */ 579 if (test_bit(UNIX_GC_MAYBE_CYCLE, &u->gc_flags)) 580 list_move_tail(&u->link, &gc_candidates); 581 } 582 583 static bool gc_in_progress; 584 585 static void __unix_gc(struct work_struct *work) 586 { 587 struct sk_buff_head hitlist; 588 struct unix_sock *u, *next; 589 LIST_HEAD(not_cycle_list); 590 struct list_head cursor; 591 592 spin_lock(&unix_gc_lock); 593 594 if (!unix_graph_maybe_cyclic) 595 goto skip_gc; 596 597 if (unix_graph_grouped) 598 unix_walk_scc_fast(); 599 else 600 unix_walk_scc(); 601 602 /* First, select candidates for garbage collection. Only 603 * in-flight sockets are considered, and from those only ones 604 * which don't have any external reference. 605 * 606 * Holding unix_gc_lock will protect these candidates from 607 * being detached, and hence from gaining an external 608 * reference. Since there are no possible receivers, all 609 * buffers currently on the candidates' queues stay there 610 * during the garbage collection. 611 * 612 * We also know that no new candidate can be added onto the 613 * receive queues. Other, non candidate sockets _can_ be 614 * added to queue, so we must make sure only to touch 615 * candidates. 616 */ 617 list_for_each_entry_safe(u, next, &gc_inflight_list, link) { 618 long total_refs; 619 620 total_refs = file_count(u->sk.sk_socket->file); 621 622 WARN_ON_ONCE(!u->inflight); 623 WARN_ON_ONCE(total_refs < u->inflight); 624 if (total_refs == u->inflight) { 625 list_move_tail(&u->link, &gc_candidates); 626 __set_bit(UNIX_GC_CANDIDATE, &u->gc_flags); 627 __set_bit(UNIX_GC_MAYBE_CYCLE, &u->gc_flags); 628 } 629 } 630 631 /* Now remove all internal in-flight reference to children of 632 * the candidates. 633 */ 634 list_for_each_entry(u, &gc_candidates, link) 635 scan_children(&u->sk, dec_inflight, NULL); 636 637 /* Restore the references for children of all candidates, 638 * which have remaining references. Do this recursively, so 639 * only those remain, which form cyclic references. 640 * 641 * Use a "cursor" link, to make the list traversal safe, even 642 * though elements might be moved about. 643 */ 644 list_add(&cursor, &gc_candidates); 645 while (cursor.next != &gc_candidates) { 646 u = list_entry(cursor.next, struct unix_sock, link); 647 648 /* Move cursor to after the current position. */ 649 list_move(&cursor, &u->link); 650 651 if (u->inflight) { 652 list_move_tail(&u->link, ¬_cycle_list); 653 __clear_bit(UNIX_GC_MAYBE_CYCLE, &u->gc_flags); 654 scan_children(&u->sk, inc_inflight_move_tail, NULL); 655 } 656 } 657 list_del(&cursor); 658 659 /* Now gc_candidates contains only garbage. Restore original 660 * inflight counters for these as well, and remove the skbuffs 661 * which are creating the cycle(s). 662 */ 663 skb_queue_head_init(&hitlist); 664 list_for_each_entry(u, &gc_candidates, link) { 665 scan_children(&u->sk, inc_inflight, &hitlist); 666 667 #if IS_ENABLED(CONFIG_AF_UNIX_OOB) 668 if (u->oob_skb) { 669 kfree_skb(u->oob_skb); 670 u->oob_skb = NULL; 671 } 672 #endif 673 } 674 675 /* not_cycle_list contains those sockets which do not make up a 676 * cycle. Restore these to the inflight list. 677 */ 678 while (!list_empty(¬_cycle_list)) { 679 u = list_entry(not_cycle_list.next, struct unix_sock, link); 680 __clear_bit(UNIX_GC_CANDIDATE, &u->gc_flags); 681 list_move_tail(&u->link, &gc_inflight_list); 682 } 683 684 spin_unlock(&unix_gc_lock); 685 686 /* Here we are. Hitlist is filled. Die. */ 687 __skb_queue_purge(&hitlist); 688 689 spin_lock(&unix_gc_lock); 690 691 /* All candidates should have been detached by now. */ 692 WARN_ON_ONCE(!list_empty(&gc_candidates)); 693 skip_gc: 694 /* Paired with READ_ONCE() in wait_for_unix_gc(). */ 695 WRITE_ONCE(gc_in_progress, false); 696 697 spin_unlock(&unix_gc_lock); 698 } 699 700 static DECLARE_WORK(unix_gc_work, __unix_gc); 701 702 void unix_gc(void) 703 { 704 WRITE_ONCE(gc_in_progress, true); 705 queue_work(system_unbound_wq, &unix_gc_work); 706 } 707 708 #define UNIX_INFLIGHT_TRIGGER_GC 16000 709 #define UNIX_INFLIGHT_SANE_USER (SCM_MAX_FD * 8) 710 711 void wait_for_unix_gc(struct scm_fp_list *fpl) 712 { 713 /* If number of inflight sockets is insane, 714 * force a garbage collect right now. 715 * 716 * Paired with the WRITE_ONCE() in unix_inflight(), 717 * unix_notinflight(), and __unix_gc(). 718 */ 719 if (READ_ONCE(unix_tot_inflight) > UNIX_INFLIGHT_TRIGGER_GC && 720 !READ_ONCE(gc_in_progress)) 721 unix_gc(); 722 723 /* Penalise users who want to send AF_UNIX sockets 724 * but whose sockets have not been received yet. 725 */ 726 if (!fpl || !fpl->count_unix || 727 READ_ONCE(fpl->user->unix_inflight) < UNIX_INFLIGHT_SANE_USER) 728 return; 729 730 if (READ_ONCE(gc_in_progress)) 731 flush_work(&unix_gc_work); 732 } 733