1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * NET3: Garbage Collector For AF_UNIX sockets 4 * 5 * Garbage Collector: 6 * Copyright (C) Barak A. Pearlmutter. 7 * 8 * Chopped about by Alan Cox 22/3/96 to make it fit the AF_UNIX socket problem. 9 * If it doesn't work blame me, it worked when Barak sent it. 10 * 11 * Assumptions: 12 * 13 * - object w/ a bit 14 * - free list 15 * 16 * Current optimizations: 17 * 18 * - explicit stack instead of recursion 19 * - tail recurse on first born instead of immediate push/pop 20 * - we gather the stuff that should not be killed into tree 21 * and stack is just a path from root to the current pointer. 22 * 23 * Future optimizations: 24 * 25 * - don't just push entire root set; process in place 26 * 27 * Fixes: 28 * Alan Cox 07 Sept 1997 Vmalloc internal stack as needed. 29 * Cope with changing max_files. 30 * Al Viro 11 Oct 1998 31 * Graph may have cycles. That is, we can send the descriptor 32 * of foo to bar and vice versa. Current code chokes on that. 33 * Fix: move SCM_RIGHTS ones into the separate list and then 34 * skb_free() them all instead of doing explicit fput's. 35 * Another problem: since fput() may block somebody may 36 * create a new unix_socket when we are in the middle of sweep 37 * phase. Fix: revert the logic wrt MARKED. Mark everything 38 * upon the beginning and unmark non-junk ones. 39 * 40 * [12 Oct 1998] AAARGH! New code purges all SCM_RIGHTS 41 * sent to connect()'ed but still not accept()'ed sockets. 42 * Fixed. Old code had slightly different problem here: 43 * extra fput() in situation when we passed the descriptor via 44 * such socket and closed it (descriptor). That would happen on 45 * each unix_gc() until the accept(). Since the struct file in 46 * question would go to the free list and might be reused... 47 * That might be the reason of random oopses on filp_close() 48 * in unrelated processes. 49 * 50 * AV 28 Feb 1999 51 * Kill the explicit allocation of stack. Now we keep the tree 52 * with root in dummy + pointer (gc_current) to one of the nodes. 53 * Stack is represented as path from gc_current to dummy. Unmark 54 * now means "add to tree". Push == "make it a son of gc_current". 55 * Pop == "move gc_current to parent". We keep only pointers to 56 * parents (->gc_tree). 57 * AV 1 Mar 1999 58 * Damn. Added missing check for ->dead in listen queues scanning. 59 * 60 * Miklos Szeredi 25 Jun 2007 61 * Reimplement with a cycle collecting algorithm. This should 62 * solve several problems with the previous code, like being racy 63 * wrt receive and holding up unrelated socket operations. 64 */ 65 66 #include <linux/kernel.h> 67 #include <linux/string.h> 68 #include <linux/socket.h> 69 #include <linux/un.h> 70 #include <linux/net.h> 71 #include <linux/fs.h> 72 #include <linux/skbuff.h> 73 #include <linux/netdevice.h> 74 #include <linux/file.h> 75 #include <linux/proc_fs.h> 76 #include <linux/mutex.h> 77 #include <linux/wait.h> 78 79 #include <net/sock.h> 80 #include <net/af_unix.h> 81 #include <net/scm.h> 82 #include <net/tcp_states.h> 83 84 struct unix_sock *unix_get_socket(struct file *filp) 85 { 86 struct inode *inode = file_inode(filp); 87 88 /* Socket ? */ 89 if (S_ISSOCK(inode->i_mode) && !(filp->f_mode & FMODE_PATH)) { 90 struct socket *sock = SOCKET_I(inode); 91 const struct proto_ops *ops; 92 struct sock *sk = sock->sk; 93 94 ops = READ_ONCE(sock->ops); 95 96 /* PF_UNIX ? */ 97 if (sk && ops && ops->family == PF_UNIX) 98 return unix_sk(sk); 99 } 100 101 return NULL; 102 } 103 104 static void unix_free_vertices(struct scm_fp_list *fpl) 105 { 106 struct unix_vertex *vertex, *next_vertex; 107 108 list_for_each_entry_safe(vertex, next_vertex, &fpl->vertices, entry) { 109 list_del(&vertex->entry); 110 kfree(vertex); 111 } 112 } 113 114 int unix_prepare_fpl(struct scm_fp_list *fpl) 115 { 116 struct unix_vertex *vertex; 117 int i; 118 119 if (!fpl->count_unix) 120 return 0; 121 122 for (i = 0; i < fpl->count_unix; i++) { 123 vertex = kmalloc(sizeof(*vertex), GFP_KERNEL); 124 if (!vertex) 125 goto err; 126 127 list_add(&vertex->entry, &fpl->vertices); 128 } 129 130 return 0; 131 132 err: 133 unix_free_vertices(fpl); 134 return -ENOMEM; 135 } 136 137 void unix_destroy_fpl(struct scm_fp_list *fpl) 138 { 139 unix_free_vertices(fpl); 140 } 141 142 DEFINE_SPINLOCK(unix_gc_lock); 143 unsigned int unix_tot_inflight; 144 static LIST_HEAD(gc_candidates); 145 static LIST_HEAD(gc_inflight_list); 146 147 /* Keep the number of times in flight count for the file 148 * descriptor if it is for an AF_UNIX socket. 149 */ 150 void unix_inflight(struct user_struct *user, struct file *filp) 151 { 152 struct unix_sock *u = unix_get_socket(filp); 153 154 spin_lock(&unix_gc_lock); 155 156 if (u) { 157 if (!u->inflight) { 158 WARN_ON_ONCE(!list_empty(&u->link)); 159 list_add_tail(&u->link, &gc_inflight_list); 160 } else { 161 WARN_ON_ONCE(list_empty(&u->link)); 162 } 163 u->inflight++; 164 165 /* Paired with READ_ONCE() in wait_for_unix_gc() */ 166 WRITE_ONCE(unix_tot_inflight, unix_tot_inflight + 1); 167 } 168 169 WRITE_ONCE(user->unix_inflight, user->unix_inflight + 1); 170 171 spin_unlock(&unix_gc_lock); 172 } 173 174 void unix_notinflight(struct user_struct *user, struct file *filp) 175 { 176 struct unix_sock *u = unix_get_socket(filp); 177 178 spin_lock(&unix_gc_lock); 179 180 if (u) { 181 WARN_ON_ONCE(!u->inflight); 182 WARN_ON_ONCE(list_empty(&u->link)); 183 184 u->inflight--; 185 if (!u->inflight) 186 list_del_init(&u->link); 187 188 /* Paired with READ_ONCE() in wait_for_unix_gc() */ 189 WRITE_ONCE(unix_tot_inflight, unix_tot_inflight - 1); 190 } 191 192 WRITE_ONCE(user->unix_inflight, user->unix_inflight - 1); 193 194 spin_unlock(&unix_gc_lock); 195 } 196 197 static void scan_inflight(struct sock *x, void (*func)(struct unix_sock *), 198 struct sk_buff_head *hitlist) 199 { 200 struct sk_buff *skb; 201 struct sk_buff *next; 202 203 spin_lock(&x->sk_receive_queue.lock); 204 skb_queue_walk_safe(&x->sk_receive_queue, skb, next) { 205 /* Do we have file descriptors ? */ 206 if (UNIXCB(skb).fp) { 207 bool hit = false; 208 /* Process the descriptors of this socket */ 209 int nfd = UNIXCB(skb).fp->count; 210 struct file **fp = UNIXCB(skb).fp->fp; 211 212 while (nfd--) { 213 /* Get the socket the fd matches if it indeed does so */ 214 struct unix_sock *u = unix_get_socket(*fp++); 215 216 /* Ignore non-candidates, they could have been added 217 * to the queues after starting the garbage collection 218 */ 219 if (u && test_bit(UNIX_GC_CANDIDATE, &u->gc_flags)) { 220 hit = true; 221 222 func(u); 223 } 224 } 225 if (hit && hitlist != NULL) { 226 __skb_unlink(skb, &x->sk_receive_queue); 227 __skb_queue_tail(hitlist, skb); 228 } 229 } 230 } 231 spin_unlock(&x->sk_receive_queue.lock); 232 } 233 234 static void scan_children(struct sock *x, void (*func)(struct unix_sock *), 235 struct sk_buff_head *hitlist) 236 { 237 if (x->sk_state != TCP_LISTEN) { 238 scan_inflight(x, func, hitlist); 239 } else { 240 struct sk_buff *skb; 241 struct sk_buff *next; 242 struct unix_sock *u; 243 LIST_HEAD(embryos); 244 245 /* For a listening socket collect the queued embryos 246 * and perform a scan on them as well. 247 */ 248 spin_lock(&x->sk_receive_queue.lock); 249 skb_queue_walk_safe(&x->sk_receive_queue, skb, next) { 250 u = unix_sk(skb->sk); 251 252 /* An embryo cannot be in-flight, so it's safe 253 * to use the list link. 254 */ 255 WARN_ON_ONCE(!list_empty(&u->link)); 256 list_add_tail(&u->link, &embryos); 257 } 258 spin_unlock(&x->sk_receive_queue.lock); 259 260 while (!list_empty(&embryos)) { 261 u = list_entry(embryos.next, struct unix_sock, link); 262 scan_inflight(&u->sk, func, hitlist); 263 list_del_init(&u->link); 264 } 265 } 266 } 267 268 static void dec_inflight(struct unix_sock *usk) 269 { 270 usk->inflight--; 271 } 272 273 static void inc_inflight(struct unix_sock *usk) 274 { 275 usk->inflight++; 276 } 277 278 static void inc_inflight_move_tail(struct unix_sock *u) 279 { 280 u->inflight++; 281 282 /* If this still might be part of a cycle, move it to the end 283 * of the list, so that it's checked even if it was already 284 * passed over 285 */ 286 if (test_bit(UNIX_GC_MAYBE_CYCLE, &u->gc_flags)) 287 list_move_tail(&u->link, &gc_candidates); 288 } 289 290 static bool gc_in_progress; 291 292 static void __unix_gc(struct work_struct *work) 293 { 294 struct sk_buff_head hitlist; 295 struct unix_sock *u, *next; 296 LIST_HEAD(not_cycle_list); 297 struct list_head cursor; 298 299 spin_lock(&unix_gc_lock); 300 301 /* First, select candidates for garbage collection. Only 302 * in-flight sockets are considered, and from those only ones 303 * which don't have any external reference. 304 * 305 * Holding unix_gc_lock will protect these candidates from 306 * being detached, and hence from gaining an external 307 * reference. Since there are no possible receivers, all 308 * buffers currently on the candidates' queues stay there 309 * during the garbage collection. 310 * 311 * We also know that no new candidate can be added onto the 312 * receive queues. Other, non candidate sockets _can_ be 313 * added to queue, so we must make sure only to touch 314 * candidates. 315 */ 316 list_for_each_entry_safe(u, next, &gc_inflight_list, link) { 317 long total_refs; 318 319 total_refs = file_count(u->sk.sk_socket->file); 320 321 WARN_ON_ONCE(!u->inflight); 322 WARN_ON_ONCE(total_refs < u->inflight); 323 if (total_refs == u->inflight) { 324 list_move_tail(&u->link, &gc_candidates); 325 __set_bit(UNIX_GC_CANDIDATE, &u->gc_flags); 326 __set_bit(UNIX_GC_MAYBE_CYCLE, &u->gc_flags); 327 } 328 } 329 330 /* Now remove all internal in-flight reference to children of 331 * the candidates. 332 */ 333 list_for_each_entry(u, &gc_candidates, link) 334 scan_children(&u->sk, dec_inflight, NULL); 335 336 /* Restore the references for children of all candidates, 337 * which have remaining references. Do this recursively, so 338 * only those remain, which form cyclic references. 339 * 340 * Use a "cursor" link, to make the list traversal safe, even 341 * though elements might be moved about. 342 */ 343 list_add(&cursor, &gc_candidates); 344 while (cursor.next != &gc_candidates) { 345 u = list_entry(cursor.next, struct unix_sock, link); 346 347 /* Move cursor to after the current position. */ 348 list_move(&cursor, &u->link); 349 350 if (u->inflight) { 351 list_move_tail(&u->link, ¬_cycle_list); 352 __clear_bit(UNIX_GC_MAYBE_CYCLE, &u->gc_flags); 353 scan_children(&u->sk, inc_inflight_move_tail, NULL); 354 } 355 } 356 list_del(&cursor); 357 358 /* Now gc_candidates contains only garbage. Restore original 359 * inflight counters for these as well, and remove the skbuffs 360 * which are creating the cycle(s). 361 */ 362 skb_queue_head_init(&hitlist); 363 list_for_each_entry(u, &gc_candidates, link) { 364 scan_children(&u->sk, inc_inflight, &hitlist); 365 366 #if IS_ENABLED(CONFIG_AF_UNIX_OOB) 367 if (u->oob_skb) { 368 kfree_skb(u->oob_skb); 369 u->oob_skb = NULL; 370 } 371 #endif 372 } 373 374 /* not_cycle_list contains those sockets which do not make up a 375 * cycle. Restore these to the inflight list. 376 */ 377 while (!list_empty(¬_cycle_list)) { 378 u = list_entry(not_cycle_list.next, struct unix_sock, link); 379 __clear_bit(UNIX_GC_CANDIDATE, &u->gc_flags); 380 list_move_tail(&u->link, &gc_inflight_list); 381 } 382 383 spin_unlock(&unix_gc_lock); 384 385 /* Here we are. Hitlist is filled. Die. */ 386 __skb_queue_purge(&hitlist); 387 388 spin_lock(&unix_gc_lock); 389 390 /* All candidates should have been detached by now. */ 391 WARN_ON_ONCE(!list_empty(&gc_candidates)); 392 393 /* Paired with READ_ONCE() in wait_for_unix_gc(). */ 394 WRITE_ONCE(gc_in_progress, false); 395 396 spin_unlock(&unix_gc_lock); 397 } 398 399 static DECLARE_WORK(unix_gc_work, __unix_gc); 400 401 void unix_gc(void) 402 { 403 WRITE_ONCE(gc_in_progress, true); 404 queue_work(system_unbound_wq, &unix_gc_work); 405 } 406 407 #define UNIX_INFLIGHT_TRIGGER_GC 16000 408 #define UNIX_INFLIGHT_SANE_USER (SCM_MAX_FD * 8) 409 410 void wait_for_unix_gc(struct scm_fp_list *fpl) 411 { 412 /* If number of inflight sockets is insane, 413 * force a garbage collect right now. 414 * 415 * Paired with the WRITE_ONCE() in unix_inflight(), 416 * unix_notinflight(), and __unix_gc(). 417 */ 418 if (READ_ONCE(unix_tot_inflight) > UNIX_INFLIGHT_TRIGGER_GC && 419 !READ_ONCE(gc_in_progress)) 420 unix_gc(); 421 422 /* Penalise users who want to send AF_UNIX sockets 423 * but whose sockets have not been received yet. 424 */ 425 if (!fpl || !fpl->count_unix || 426 READ_ONCE(fpl->user->unix_inflight) < UNIX_INFLIGHT_SANE_USER) 427 return; 428 429 if (READ_ONCE(gc_in_progress)) 430 flush_work(&unix_gc_work); 431 } 432