1a85036f6SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later 21da177e4SLinus Torvalds /* 31da177e4SLinus Torvalds * NET3: Garbage Collector For AF_UNIX sockets 41da177e4SLinus Torvalds * 51da177e4SLinus Torvalds * Garbage Collector: 61da177e4SLinus Torvalds * Copyright (C) Barak A. Pearlmutter. 71da177e4SLinus Torvalds * 81da177e4SLinus Torvalds * Chopped about by Alan Cox 22/3/96 to make it fit the AF_UNIX socket problem. 91da177e4SLinus Torvalds * If it doesn't work blame me, it worked when Barak sent it. 101da177e4SLinus Torvalds * 111da177e4SLinus Torvalds * Assumptions: 121da177e4SLinus Torvalds * 131da177e4SLinus Torvalds * - object w/ a bit 141da177e4SLinus Torvalds * - free list 151da177e4SLinus Torvalds * 161da177e4SLinus Torvalds * Current optimizations: 171da177e4SLinus Torvalds * 181da177e4SLinus Torvalds * - explicit stack instead of recursion 191da177e4SLinus Torvalds * - tail recurse on first born instead of immediate push/pop 201da177e4SLinus Torvalds * - we gather the stuff that should not be killed into tree 211da177e4SLinus Torvalds * and stack is just a path from root to the current pointer. 221da177e4SLinus Torvalds * 231da177e4SLinus Torvalds * Future optimizations: 241da177e4SLinus Torvalds * 251da177e4SLinus Torvalds * - don't just push entire root set; process in place 261da177e4SLinus Torvalds * 271da177e4SLinus Torvalds * Fixes: 281da177e4SLinus Torvalds * Alan Cox 07 Sept 1997 Vmalloc internal stack as needed. 291da177e4SLinus Torvalds * Cope with changing max_files. 301da177e4SLinus Torvalds * Al Viro 11 Oct 1998 311da177e4SLinus Torvalds * Graph may have cycles. That is, we can send the descriptor 321da177e4SLinus Torvalds * of foo to bar and vice versa. Current code chokes on that. 331da177e4SLinus Torvalds * Fix: move SCM_RIGHTS ones into the separate list and then 341da177e4SLinus Torvalds * skb_free() them all instead of doing explicit fput's. 351da177e4SLinus Torvalds * Another problem: since fput() may block somebody may 361da177e4SLinus Torvalds * create a new unix_socket when we are in the middle of sweep 371da177e4SLinus Torvalds * phase. Fix: revert the logic wrt MARKED. Mark everything 381da177e4SLinus Torvalds * upon the beginning and unmark non-junk ones. 391da177e4SLinus Torvalds * 401da177e4SLinus Torvalds * [12 Oct 1998] AAARGH! New code purges all SCM_RIGHTS 411da177e4SLinus Torvalds * sent to connect()'ed but still not accept()'ed sockets. 421da177e4SLinus Torvalds * Fixed. Old code had slightly different problem here: 431da177e4SLinus Torvalds * extra fput() in situation when we passed the descriptor via 441da177e4SLinus Torvalds * such socket and closed it (descriptor). That would happen on 451da177e4SLinus Torvalds * each unix_gc() until the accept(). Since the struct file in 461da177e4SLinus Torvalds * question would go to the free list and might be reused... 471da177e4SLinus Torvalds * That might be the reason of random oopses on filp_close() 481da177e4SLinus Torvalds * in unrelated processes. 491da177e4SLinus Torvalds * 501da177e4SLinus Torvalds * AV 28 Feb 1999 511da177e4SLinus Torvalds * Kill the explicit allocation of stack. Now we keep the tree 521da177e4SLinus Torvalds * with root in dummy + pointer (gc_current) to one of the nodes. 531da177e4SLinus Torvalds * Stack is represented as path from gc_current to dummy. Unmark 541da177e4SLinus Torvalds * now means "add to tree". Push == "make it a son of gc_current". 551da177e4SLinus Torvalds * Pop == "move gc_current to parent". We keep only pointers to 561da177e4SLinus Torvalds * parents (->gc_tree). 571da177e4SLinus Torvalds * AV 1 Mar 1999 581da177e4SLinus Torvalds * Damn. Added missing check for ->dead in listen queues scanning. 591da177e4SLinus Torvalds * 601fd05ba5SMiklos Szeredi * Miklos Szeredi 25 Jun 2007 611fd05ba5SMiklos Szeredi * Reimplement with a cycle collecting algorithm. This should 621fd05ba5SMiklos Szeredi * solve several problems with the previous code, like being racy 631fd05ba5SMiklos Szeredi * wrt receive and holding up unrelated socket operations. 641da177e4SLinus Torvalds */ 651da177e4SLinus Torvalds 661da177e4SLinus Torvalds #include <linux/kernel.h> 671da177e4SLinus Torvalds #include <linux/string.h> 681da177e4SLinus Torvalds #include <linux/socket.h> 691da177e4SLinus Torvalds #include <linux/un.h> 701da177e4SLinus Torvalds #include <linux/net.h> 711da177e4SLinus Torvalds #include <linux/fs.h> 721da177e4SLinus Torvalds #include <linux/skbuff.h> 731da177e4SLinus Torvalds #include <linux/netdevice.h> 741da177e4SLinus Torvalds #include <linux/file.h> 751da177e4SLinus Torvalds #include <linux/proc_fs.h> 764a3e2f71SArjan van de Ven #include <linux/mutex.h> 775f23b734Sdann frazier #include <linux/wait.h> 781da177e4SLinus Torvalds 791da177e4SLinus Torvalds #include <net/sock.h> 801da177e4SLinus Torvalds #include <net/af_unix.h> 811da177e4SLinus Torvalds #include <net/scm.h> 82c752f073SArnaldo Carvalho de Melo #include <net/tcp_states.h> 831da177e4SLinus Torvalds 84f4e65870SJens Axboe #include "scm.h" 85f4e65870SJens Axboe 861da177e4SLinus Torvalds /* Internal data structures and random procedures: */ 871da177e4SLinus Torvalds 881fd05ba5SMiklos Szeredi static LIST_HEAD(gc_candidates); 895f23b734Sdann frazier static DECLARE_WAIT_QUEUE_HEAD(unix_gc_wait); 901da177e4SLinus Torvalds 915c80f1aeSPavel Emelyanov static void scan_inflight(struct sock *x, void (*func)(struct unix_sock *), 921fd05ba5SMiklos Szeredi struct sk_buff_head *hitlist) 931da177e4SLinus Torvalds { 941da177e4SLinus Torvalds struct sk_buff *skb; 951fd05ba5SMiklos Szeredi struct sk_buff *next; 961da177e4SLinus Torvalds 971da177e4SLinus Torvalds spin_lock(&x->sk_receive_queue.lock); 98a2f3be17SIlpo Järvinen skb_queue_walk_safe(&x->sk_receive_queue, skb, next) { 99d1ab39f1SJason Eastman /* Do we have file descriptors ? */ 1001fd05ba5SMiklos Szeredi if (UNIXCB(skb).fp) { 1011fd05ba5SMiklos Szeredi bool hit = false; 102d1ab39f1SJason Eastman /* Process the descriptors of this socket */ 1031da177e4SLinus Torvalds int nfd = UNIXCB(skb).fp->count; 1041da177e4SLinus Torvalds struct file **fp = UNIXCB(skb).fp->fp; 105d1ab39f1SJason Eastman 1061fd05ba5SMiklos Szeredi while (nfd--) { 107d1ab39f1SJason Eastman /* Get the socket the fd matches if it indeed does so */ 1081fd05ba5SMiklos Szeredi struct sock *sk = unix_get_socket(*fp++); 109d1ab39f1SJason Eastman 1101fd05ba5SMiklos Szeredi if (sk) { 1116209344fSMiklos Szeredi struct unix_sock *u = unix_sk(sk); 1126209344fSMiklos Szeredi 113d1ab39f1SJason Eastman /* Ignore non-candidates, they could 1146209344fSMiklos Szeredi * have been added to the queues after 1156209344fSMiklos Szeredi * starting the garbage collection 1166209344fSMiklos Szeredi */ 11760bc851aSEric Dumazet if (test_bit(UNIX_GC_CANDIDATE, &u->gc_flags)) { 1181fd05ba5SMiklos Szeredi hit = true; 119d1ab39f1SJason Eastman 1206209344fSMiklos Szeredi func(u); 1216209344fSMiklos Szeredi } 1221da177e4SLinus Torvalds } 1231da177e4SLinus Torvalds } 1241fd05ba5SMiklos Szeredi if (hit && hitlist != NULL) { 1251fd05ba5SMiklos Szeredi __skb_unlink(skb, &x->sk_receive_queue); 1261fd05ba5SMiklos Szeredi __skb_queue_tail(hitlist, skb); 1271da177e4SLinus Torvalds } 1281fd05ba5SMiklos Szeredi } 1291da177e4SLinus Torvalds } 1301da177e4SLinus Torvalds spin_unlock(&x->sk_receive_queue.lock); 1311da177e4SLinus Torvalds } 1321da177e4SLinus Torvalds 1335c80f1aeSPavel Emelyanov static void scan_children(struct sock *x, void (*func)(struct unix_sock *), 1341fd05ba5SMiklos Szeredi struct sk_buff_head *hitlist) 1351da177e4SLinus Torvalds { 136d1ab39f1SJason Eastman if (x->sk_state != TCP_LISTEN) { 1371fd05ba5SMiklos Szeredi scan_inflight(x, func, hitlist); 138d1ab39f1SJason Eastman } else { 1391fd05ba5SMiklos Szeredi struct sk_buff *skb; 1401fd05ba5SMiklos Szeredi struct sk_buff *next; 1411fd05ba5SMiklos Szeredi struct unix_sock *u; 1421fd05ba5SMiklos Szeredi LIST_HEAD(embryos); 1431da177e4SLinus Torvalds 144d1ab39f1SJason Eastman /* For a listening socket collect the queued embryos 1451fd05ba5SMiklos Szeredi * and perform a scan on them as well. 1461da177e4SLinus Torvalds */ 1471fd05ba5SMiklos Szeredi spin_lock(&x->sk_receive_queue.lock); 148a2f3be17SIlpo Järvinen skb_queue_walk_safe(&x->sk_receive_queue, skb, next) { 1491fd05ba5SMiklos Szeredi u = unix_sk(skb->sk); 1501da177e4SLinus Torvalds 151d1ab39f1SJason Eastman /* An embryo cannot be in-flight, so it's safe 1521fd05ba5SMiklos Szeredi * to use the list link. 1531fd05ba5SMiklos Szeredi */ 1541fd05ba5SMiklos Szeredi BUG_ON(!list_empty(&u->link)); 1551fd05ba5SMiklos Szeredi list_add_tail(&u->link, &embryos); 1561fd05ba5SMiklos Szeredi } 1571fd05ba5SMiklos Szeredi spin_unlock(&x->sk_receive_queue.lock); 1581fd05ba5SMiklos Szeredi 1591fd05ba5SMiklos Szeredi while (!list_empty(&embryos)) { 1601fd05ba5SMiklos Szeredi u = list_entry(embryos.next, struct unix_sock, link); 1611fd05ba5SMiklos Szeredi scan_inflight(&u->sk, func, hitlist); 1621fd05ba5SMiklos Szeredi list_del_init(&u->link); 1631fd05ba5SMiklos Szeredi } 1641fd05ba5SMiklos Szeredi } 1651fd05ba5SMiklos Szeredi } 1661fd05ba5SMiklos Szeredi 1675c80f1aeSPavel Emelyanov static void dec_inflight(struct unix_sock *usk) 1681fd05ba5SMiklos Szeredi { 169516e0cc5SAl Viro atomic_long_dec(&usk->inflight); 1701fd05ba5SMiklos Szeredi } 1711fd05ba5SMiklos Szeredi 1725c80f1aeSPavel Emelyanov static void inc_inflight(struct unix_sock *usk) 1731fd05ba5SMiklos Szeredi { 174516e0cc5SAl Viro atomic_long_inc(&usk->inflight); 1751fd05ba5SMiklos Szeredi } 1761fd05ba5SMiklos Szeredi 1775c80f1aeSPavel Emelyanov static void inc_inflight_move_tail(struct unix_sock *u) 1781fd05ba5SMiklos Szeredi { 179516e0cc5SAl Viro atomic_long_inc(&u->inflight); 180d1ab39f1SJason Eastman /* If this still might be part of a cycle, move it to the end 1816209344fSMiklos Szeredi * of the list, so that it's checked even if it was already 1826209344fSMiklos Szeredi * passed over 1831fd05ba5SMiklos Szeredi */ 18460bc851aSEric Dumazet if (test_bit(UNIX_GC_MAYBE_CYCLE, &u->gc_flags)) 1851fd05ba5SMiklos Szeredi list_move_tail(&u->link, &gc_candidates); 1861fd05ba5SMiklos Szeredi } 1871fd05ba5SMiklos Szeredi 188505e907dSFabian Frederick static bool gc_in_progress; 1899915672dSEric Dumazet #define UNIX_INFLIGHT_TRIGGER_GC 16000 1901fd05ba5SMiklos Szeredi 1915f23b734Sdann frazier void wait_for_unix_gc(void) 1925f23b734Sdann frazier { 193d1ab39f1SJason Eastman /* If number of inflight sockets is insane, 1949915672dSEric Dumazet * force a garbage collect right now. 1959d6d7f1cSEric Dumazet * Paired with the WRITE_ONCE() in unix_inflight(), 1969d6d7f1cSEric Dumazet * unix_notinflight() and gc_in_progress(). 1979915672dSEric Dumazet */ 1989d6d7f1cSEric Dumazet if (READ_ONCE(unix_tot_inflight) > UNIX_INFLIGHT_TRIGGER_GC && 1999d6d7f1cSEric Dumazet !READ_ONCE(gc_in_progress)) 2009915672dSEric Dumazet unix_gc(); 2015f23b734Sdann frazier wait_event(unix_gc_wait, gc_in_progress == false); 2025f23b734Sdann frazier } 2035f23b734Sdann frazier 2045f23b734Sdann frazier /* The external entry point: unix_gc() */ 2055f23b734Sdann frazier void unix_gc(void) 2065f23b734Sdann frazier { 2070091bfc8SPavel Begunkov struct sk_buff *next_skb, *skb; 2081fd05ba5SMiklos Szeredi struct unix_sock *u; 2091fd05ba5SMiklos Szeredi struct unix_sock *next; 2101fd05ba5SMiklos Szeredi struct sk_buff_head hitlist; 2111fd05ba5SMiklos Szeredi struct list_head cursor; 2126209344fSMiklos Szeredi LIST_HEAD(not_cycle_list); 2131fd05ba5SMiklos Szeredi 2141fd05ba5SMiklos Szeredi spin_lock(&unix_gc_lock); 2151fd05ba5SMiklos Szeredi 2161fd05ba5SMiklos Szeredi /* Avoid a recursive GC. */ 2171fd05ba5SMiklos Szeredi if (gc_in_progress) 2181fd05ba5SMiklos Szeredi goto out; 2191fd05ba5SMiklos Szeredi 2209d6d7f1cSEric Dumazet /* Paired with READ_ONCE() in wait_for_unix_gc(). */ 2219d6d7f1cSEric Dumazet WRITE_ONCE(gc_in_progress, true); 2229d6d7f1cSEric Dumazet 223d1ab39f1SJason Eastman /* First, select candidates for garbage collection. Only 2241fd05ba5SMiklos Szeredi * in-flight sockets are considered, and from those only ones 2251fd05ba5SMiklos Szeredi * which don't have any external reference. 2261fd05ba5SMiklos Szeredi * 2271fd05ba5SMiklos Szeredi * Holding unix_gc_lock will protect these candidates from 2281fd05ba5SMiklos Szeredi * being detached, and hence from gaining an external 2296209344fSMiklos Szeredi * reference. Since there are no possible receivers, all 2306209344fSMiklos Szeredi * buffers currently on the candidates' queues stay there 2316209344fSMiklos Szeredi * during the garbage collection. 2326209344fSMiklos Szeredi * 2336209344fSMiklos Szeredi * We also know that no new candidate can be added onto the 2346209344fSMiklos Szeredi * receive queues. Other, non candidate sockets _can_ be 2356209344fSMiklos Szeredi * added to queue, so we must make sure only to touch 2366209344fSMiklos Szeredi * candidates. 2371fd05ba5SMiklos Szeredi */ 2381fd05ba5SMiklos Szeredi list_for_each_entry_safe(u, next, &gc_inflight_list, link) { 239516e0cc5SAl Viro long total_refs; 240516e0cc5SAl Viro long inflight_refs; 2411fd05ba5SMiklos Szeredi 2421fd05ba5SMiklos Szeredi total_refs = file_count(u->sk.sk_socket->file); 243516e0cc5SAl Viro inflight_refs = atomic_long_read(&u->inflight); 2441fd05ba5SMiklos Szeredi 2451fd05ba5SMiklos Szeredi BUG_ON(inflight_refs < 1); 2461fd05ba5SMiklos Szeredi BUG_ON(total_refs < inflight_refs); 2471fd05ba5SMiklos Szeredi if (total_refs == inflight_refs) { 2481fd05ba5SMiklos Szeredi list_move_tail(&u->link, &gc_candidates); 24960bc851aSEric Dumazet __set_bit(UNIX_GC_CANDIDATE, &u->gc_flags); 25060bc851aSEric Dumazet __set_bit(UNIX_GC_MAYBE_CYCLE, &u->gc_flags); 2511fd05ba5SMiklos Szeredi } 2521fd05ba5SMiklos Szeredi } 2531fd05ba5SMiklos Szeredi 254d1ab39f1SJason Eastman /* Now remove all internal in-flight reference to children of 2551fd05ba5SMiklos Szeredi * the candidates. 2561fd05ba5SMiklos Szeredi */ 2571fd05ba5SMiklos Szeredi list_for_each_entry(u, &gc_candidates, link) 2581fd05ba5SMiklos Szeredi scan_children(&u->sk, dec_inflight, NULL); 2591fd05ba5SMiklos Szeredi 260d1ab39f1SJason Eastman /* Restore the references for children of all candidates, 2611fd05ba5SMiklos Szeredi * which have remaining references. Do this recursively, so 2621fd05ba5SMiklos Szeredi * only those remain, which form cyclic references. 2631fd05ba5SMiklos Szeredi * 2641fd05ba5SMiklos Szeredi * Use a "cursor" link, to make the list traversal safe, even 2651fd05ba5SMiklos Szeredi * though elements might be moved about. 2661fd05ba5SMiklos Szeredi */ 2671fd05ba5SMiklos Szeredi list_add(&cursor, &gc_candidates); 2681fd05ba5SMiklos Szeredi while (cursor.next != &gc_candidates) { 2691fd05ba5SMiklos Szeredi u = list_entry(cursor.next, struct unix_sock, link); 2701fd05ba5SMiklos Szeredi 2711fd05ba5SMiklos Szeredi /* Move cursor to after the current position. */ 2721fd05ba5SMiklos Szeredi list_move(&cursor, &u->link); 2731fd05ba5SMiklos Szeredi 274516e0cc5SAl Viro if (atomic_long_read(&u->inflight) > 0) { 2756209344fSMiklos Szeredi list_move_tail(&u->link, ¬_cycle_list); 27660bc851aSEric Dumazet __clear_bit(UNIX_GC_MAYBE_CYCLE, &u->gc_flags); 2771fd05ba5SMiklos Szeredi scan_children(&u->sk, inc_inflight_move_tail, NULL); 2781fd05ba5SMiklos Szeredi } 2791fd05ba5SMiklos Szeredi } 2801fd05ba5SMiklos Szeredi list_del(&cursor); 2811fd05ba5SMiklos Szeredi 2827df9c246SAndrey Ulanov /* Now gc_candidates contains only garbage. Restore original 2837df9c246SAndrey Ulanov * inflight counters for these as well, and remove the skbuffs 2847df9c246SAndrey Ulanov * which are creating the cycle(s). 2857df9c246SAndrey Ulanov */ 2867df9c246SAndrey Ulanov skb_queue_head_init(&hitlist); 287*aa82ac51SKuniyuki Iwashima list_for_each_entry(u, &gc_candidates, link) { 2887df9c246SAndrey Ulanov scan_children(&u->sk, inc_inflight, &hitlist); 2897df9c246SAndrey Ulanov 290*aa82ac51SKuniyuki Iwashima #if IS_ENABLED(CONFIG_AF_UNIX_OOB) 291*aa82ac51SKuniyuki Iwashima if (u->oob_skb) { 292*aa82ac51SKuniyuki Iwashima kfree_skb(u->oob_skb); 293*aa82ac51SKuniyuki Iwashima u->oob_skb = NULL; 294*aa82ac51SKuniyuki Iwashima } 295*aa82ac51SKuniyuki Iwashima #endif 296*aa82ac51SKuniyuki Iwashima } 297*aa82ac51SKuniyuki Iwashima 298d1ab39f1SJason Eastman /* not_cycle_list contains those sockets which do not make up a 2996209344fSMiklos Szeredi * cycle. Restore these to the inflight list. 3006209344fSMiklos Szeredi */ 3016209344fSMiklos Szeredi while (!list_empty(¬_cycle_list)) { 3026209344fSMiklos Szeredi u = list_entry(not_cycle_list.next, struct unix_sock, link); 30360bc851aSEric Dumazet __clear_bit(UNIX_GC_CANDIDATE, &u->gc_flags); 3046209344fSMiklos Szeredi list_move_tail(&u->link, &gc_inflight_list); 3056209344fSMiklos Szeredi } 3066209344fSMiklos Szeredi 3071fd05ba5SMiklos Szeredi spin_unlock(&unix_gc_lock); 3081fd05ba5SMiklos Szeredi 3090091bfc8SPavel Begunkov /* We need io_uring to clean its registered files, ignore all io_uring 3100091bfc8SPavel Begunkov * originated skbs. It's fine as io_uring doesn't keep references to 3110091bfc8SPavel Begunkov * other io_uring instances and so killing all other files in the cycle 3120091bfc8SPavel Begunkov * will put all io_uring references forcing it to go through normal 3130091bfc8SPavel Begunkov * release.path eventually putting registered files. 3140091bfc8SPavel Begunkov */ 3150091bfc8SPavel Begunkov skb_queue_walk_safe(&hitlist, skb, next_skb) { 31610369080SEric Dumazet if (skb->destructor == io_uring_destruct_scm) { 3170091bfc8SPavel Begunkov __skb_unlink(skb, &hitlist); 3180091bfc8SPavel Begunkov skb_queue_tail(&skb->sk->sk_receive_queue, skb); 3190091bfc8SPavel Begunkov } 3200091bfc8SPavel Begunkov } 3210091bfc8SPavel Begunkov 3221fd05ba5SMiklos Szeredi /* Here we are. Hitlist is filled. Die. */ 3231da177e4SLinus Torvalds __skb_queue_purge(&hitlist); 3241fd05ba5SMiklos Szeredi 3251fd05ba5SMiklos Szeredi spin_lock(&unix_gc_lock); 3261fd05ba5SMiklos Szeredi 3270091bfc8SPavel Begunkov /* There could be io_uring registered files, just push them back to 3280091bfc8SPavel Begunkov * the inflight list 3290091bfc8SPavel Begunkov */ 3300091bfc8SPavel Begunkov list_for_each_entry_safe(u, next, &gc_candidates, link) 3310091bfc8SPavel Begunkov list_move_tail(&u->link, &gc_inflight_list); 3320091bfc8SPavel Begunkov 3331fd05ba5SMiklos Szeredi /* All candidates should have been detached by now. */ 3341fd05ba5SMiklos Szeredi BUG_ON(!list_empty(&gc_candidates)); 3359d6d7f1cSEric Dumazet 3369d6d7f1cSEric Dumazet /* Paired with READ_ONCE() in wait_for_unix_gc(). */ 3379d6d7f1cSEric Dumazet WRITE_ONCE(gc_in_progress, false); 3389d6d7f1cSEric Dumazet 3395f23b734Sdann frazier wake_up(&unix_gc_wait); 3401fd05ba5SMiklos Szeredi 3411fd05ba5SMiklos Szeredi out: 3421fd05ba5SMiklos Szeredi spin_unlock(&unix_gc_lock); 3431da177e4SLinus Torvalds } 344