xref: /linux/net/unix/af_unix.c (revision cfc4ca8986bb1f6182da6cd7bb57f228590b4643)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * NET4:	Implementation of BSD Unix domain sockets.
4  *
5  * Authors:	Alan Cox, <alan@lxorguk.ukuu.org.uk>
6  *
7  * Fixes:
8  *		Linus Torvalds	:	Assorted bug cures.
9  *		Niibe Yutaka	:	async I/O support.
10  *		Carsten Paeth	:	PF_UNIX check, address fixes.
11  *		Alan Cox	:	Limit size of allocated blocks.
12  *		Alan Cox	:	Fixed the stupid socketpair bug.
13  *		Alan Cox	:	BSD compatibility fine tuning.
14  *		Alan Cox	:	Fixed a bug in connect when interrupted.
15  *		Alan Cox	:	Sorted out a proper draft version of
16  *					file descriptor passing hacked up from
17  *					Mike Shaver's work.
18  *		Marty Leisner	:	Fixes to fd passing
19  *		Nick Nevin	:	recvmsg bugfix.
20  *		Alan Cox	:	Started proper garbage collector
21  *		Heiko EiBfeldt	:	Missing verify_area check
22  *		Alan Cox	:	Started POSIXisms
23  *		Andreas Schwab	:	Replace inode by dentry for proper
24  *					reference counting
25  *		Kirk Petersen	:	Made this a module
26  *	    Christoph Rohland	:	Elegant non-blocking accept/connect algorithm.
27  *					Lots of bug fixes.
28  *	     Alexey Kuznetosv	:	Repaired (I hope) bugs introduces
29  *					by above two patches.
30  *	     Andrea Arcangeli	:	If possible we block in connect(2)
31  *					if the max backlog of the listen socket
32  *					is been reached. This won't break
33  *					old apps and it will avoid huge amount
34  *					of socks hashed (this for unix_gc()
35  *					performances reasons).
36  *					Security fix that limits the max
37  *					number of socks to 2*max_files and
38  *					the number of skb queueable in the
39  *					dgram receiver.
40  *		Artur Skawina   :	Hash function optimizations
41  *	     Alexey Kuznetsov   :	Full scale SMP. Lot of bugs are introduced 8)
42  *	      Malcolm Beattie   :	Set peercred for socketpair
43  *	     Michal Ostrowski   :       Module initialization cleanup.
44  *	     Arnaldo C. Melo	:	Remove MOD_{INC,DEC}_USE_COUNT,
45  *	     				the core infrastructure is doing that
46  *	     				for all net proto families now (2.5.69+)
47  *
48  * Known differences from reference BSD that was tested:
49  *
50  *	[TO FIX]
51  *	ECONNREFUSED is not returned from one end of a connected() socket to the
52  *		other the moment one end closes.
53  *	fstat() doesn't return st_dev=0, and give the blksize as high water mark
54  *		and a fake inode identifier (nor the BSD first socket fstat twice bug).
55  *	[NOT TO FIX]
56  *	accept() returns a path name even if the connecting socket has closed
57  *		in the meantime (BSD loses the path and gives up).
58  *	accept() returns 0 length path for an unbound connector. BSD returns 16
59  *		and a null first byte in the path (but not for gethost/peername - BSD bug ??)
60  *	socketpair(...SOCK_RAW..) doesn't panic the kernel.
61  *	BSD af_unix apparently has connect forgetting to block properly.
62  *		(need to check this with the POSIX spec in detail)
63  *
64  * Differences from 2.0.0-11-... (ANK)
65  *	Bug fixes and improvements.
66  *		- client shutdown killed server socket.
67  *		- removed all useless cli/sti pairs.
68  *
69  *	Semantic changes/extensions.
70  *		- generic control message passing.
71  *		- SCM_CREDENTIALS control message.
72  *		- "Abstract" (not FS based) socket bindings.
73  *		  Abstract names are sequences of bytes (not zero terminated)
74  *		  started by 0, so that this name space does not intersect
75  *		  with BSD names.
76  */
77 
78 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
79 
80 #include <linux/bpf-cgroup.h>
81 #include <linux/btf_ids.h>
82 #include <linux/dcache.h>
83 #include <linux/errno.h>
84 #include <linux/fcntl.h>
85 #include <linux/file.h>
86 #include <linux/filter.h>
87 #include <linux/fs.h>
88 #include <linux/fs_struct.h>
89 #include <linux/init.h>
90 #include <linux/kernel.h>
91 #include <linux/mount.h>
92 #include <linux/namei.h>
93 #include <linux/net.h>
94 #include <linux/pidfs.h>
95 #include <linux/poll.h>
96 #include <linux/proc_fs.h>
97 #include <linux/sched/signal.h>
98 #include <linux/security.h>
99 #include <linux/seq_file.h>
100 #include <linux/skbuff.h>
101 #include <linux/slab.h>
102 #include <linux/socket.h>
103 #include <linux/splice.h>
104 #include <linux/string.h>
105 #include <linux/uaccess.h>
106 #include <net/af_unix.h>
107 #include <net/net_namespace.h>
108 #include <net/scm.h>
109 #include <net/tcp_states.h>
110 #include <uapi/linux/sockios.h>
111 #include <uapi/linux/termios.h>
112 
113 #include "af_unix.h"
114 
115 static atomic_long_t unix_nr_socks;
116 static struct hlist_head bsd_socket_buckets[UNIX_HASH_SIZE / 2];
117 static spinlock_t bsd_socket_locks[UNIX_HASH_SIZE / 2];
118 
119 /* SMP locking strategy:
120  *    hash table is protected with spinlock.
121  *    each socket state is protected by separate spinlock.
122  */
123 #ifdef CONFIG_PROVE_LOCKING
124 #define cmp_ptr(l, r)	(((l) > (r)) - ((l) < (r)))
125 
126 static int unix_table_lock_cmp_fn(const struct lockdep_map *a,
127 				  const struct lockdep_map *b)
128 {
129 	return cmp_ptr(a, b);
130 }
131 
132 static int unix_state_lock_cmp_fn(const struct lockdep_map *_a,
133 				  const struct lockdep_map *_b)
134 {
135 	const struct unix_sock *a, *b;
136 
137 	a = container_of(_a, struct unix_sock, lock.dep_map);
138 	b = container_of(_b, struct unix_sock, lock.dep_map);
139 
140 	if (a->sk.sk_state == TCP_LISTEN) {
141 		/* unix_stream_connect(): Before the 2nd unix_state_lock(),
142 		 *
143 		 *   1. a is TCP_LISTEN.
144 		 *   2. b is not a.
145 		 *   3. concurrent connect(b -> a) must fail.
146 		 *
147 		 * Except for 2. & 3., the b's state can be any possible
148 		 * value due to concurrent connect() or listen().
149 		 *
150 		 * 2. is detected in debug_spin_lock_before(), and 3. cannot
151 		 * be expressed as lock_cmp_fn.
152 		 */
153 		switch (b->sk.sk_state) {
154 		case TCP_CLOSE:
155 		case TCP_ESTABLISHED:
156 		case TCP_LISTEN:
157 			return -1;
158 		default:
159 			/* Invalid case. */
160 			return 0;
161 		}
162 	}
163 
164 	/* Should never happen.  Just to be symmetric. */
165 	if (b->sk.sk_state == TCP_LISTEN) {
166 		switch (b->sk.sk_state) {
167 		case TCP_CLOSE:
168 		case TCP_ESTABLISHED:
169 			return 1;
170 		default:
171 			return 0;
172 		}
173 	}
174 
175 	/* unix_state_double_lock(): ascending address order. */
176 	return cmp_ptr(a, b);
177 }
178 
179 static int unix_recvq_lock_cmp_fn(const struct lockdep_map *_a,
180 				  const struct lockdep_map *_b)
181 {
182 	const struct sock *a, *b;
183 
184 	a = container_of(_a, struct sock, sk_receive_queue.lock.dep_map);
185 	b = container_of(_b, struct sock, sk_receive_queue.lock.dep_map);
186 
187 	/* unix_collect_skb(): listener -> embryo order. */
188 	if (a->sk_state == TCP_LISTEN && unix_sk(b)->listener == a)
189 		return -1;
190 
191 	/* Should never happen.  Just to be symmetric. */
192 	if (b->sk_state == TCP_LISTEN && unix_sk(a)->listener == b)
193 		return 1;
194 
195 	return 0;
196 }
197 #endif
198 
199 static unsigned int unix_unbound_hash(struct sock *sk)
200 {
201 	unsigned long hash = (unsigned long)sk;
202 
203 	hash ^= hash >> 16;
204 	hash ^= hash >> 8;
205 	hash ^= sk->sk_type;
206 
207 	return hash & UNIX_HASH_MOD;
208 }
209 
210 static unsigned int unix_bsd_hash(struct inode *i)
211 {
212 	return i->i_ino & UNIX_HASH_MOD;
213 }
214 
215 static unsigned int unix_abstract_hash(struct sockaddr_un *sunaddr,
216 				       int addr_len, int type)
217 {
218 	__wsum csum = csum_partial(sunaddr, addr_len, 0);
219 	unsigned int hash;
220 
221 	hash = (__force unsigned int)csum_fold(csum);
222 	hash ^= hash >> 8;
223 	hash ^= type;
224 
225 	return UNIX_HASH_MOD + 1 + (hash & UNIX_HASH_MOD);
226 }
227 
228 static void unix_table_double_lock(struct net *net,
229 				   unsigned int hash1, unsigned int hash2)
230 {
231 	if (hash1 == hash2) {
232 		spin_lock(&net->unx.table.locks[hash1]);
233 		return;
234 	}
235 
236 	if (hash1 > hash2)
237 		swap(hash1, hash2);
238 
239 	spin_lock(&net->unx.table.locks[hash1]);
240 	spin_lock(&net->unx.table.locks[hash2]);
241 }
242 
243 static void unix_table_double_unlock(struct net *net,
244 				     unsigned int hash1, unsigned int hash2)
245 {
246 	if (hash1 == hash2) {
247 		spin_unlock(&net->unx.table.locks[hash1]);
248 		return;
249 	}
250 
251 	spin_unlock(&net->unx.table.locks[hash1]);
252 	spin_unlock(&net->unx.table.locks[hash2]);
253 }
254 
255 #ifdef CONFIG_SECURITY_NETWORK
256 static void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb)
257 {
258 	UNIXCB(skb).secid = scm->secid;
259 }
260 
261 static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
262 {
263 	scm->secid = UNIXCB(skb).secid;
264 }
265 
266 static inline bool unix_secdata_eq(struct scm_cookie *scm, struct sk_buff *skb)
267 {
268 	return (scm->secid == UNIXCB(skb).secid);
269 }
270 #else
271 static inline void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb)
272 { }
273 
274 static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
275 { }
276 
277 static inline bool unix_secdata_eq(struct scm_cookie *scm, struct sk_buff *skb)
278 {
279 	return true;
280 }
281 #endif /* CONFIG_SECURITY_NETWORK */
282 
283 static inline int unix_may_send(struct sock *sk, struct sock *osk)
284 {
285 	return !unix_peer(osk) || unix_peer(osk) == sk;
286 }
287 
288 static inline int unix_recvq_full_lockless(const struct sock *sk)
289 {
290 	return skb_queue_len_lockless(&sk->sk_receive_queue) > sk->sk_max_ack_backlog;
291 }
292 
293 struct sock *unix_peer_get(struct sock *s)
294 {
295 	struct sock *peer;
296 
297 	unix_state_lock(s);
298 	peer = unix_peer(s);
299 	if (peer)
300 		sock_hold(peer);
301 	unix_state_unlock(s);
302 	return peer;
303 }
304 EXPORT_SYMBOL_GPL(unix_peer_get);
305 
306 static struct unix_address *unix_create_addr(struct sockaddr_un *sunaddr,
307 					     int addr_len)
308 {
309 	struct unix_address *addr;
310 
311 	addr = kmalloc(sizeof(*addr) + addr_len, GFP_KERNEL);
312 	if (!addr)
313 		return NULL;
314 
315 	refcount_set(&addr->refcnt, 1);
316 	addr->len = addr_len;
317 	memcpy(addr->name, sunaddr, addr_len);
318 
319 	return addr;
320 }
321 
322 static inline void unix_release_addr(struct unix_address *addr)
323 {
324 	if (refcount_dec_and_test(&addr->refcnt))
325 		kfree(addr);
326 }
327 
328 /*
329  *	Check unix socket name:
330  *		- should be not zero length.
331  *	        - if started by not zero, should be NULL terminated (FS object)
332  *		- if started by zero, it is abstract name.
333  */
334 
335 static int unix_validate_addr(struct sockaddr_un *sunaddr, int addr_len)
336 {
337 	if (addr_len <= offsetof(struct sockaddr_un, sun_path) ||
338 	    addr_len > sizeof(*sunaddr))
339 		return -EINVAL;
340 
341 	if (sunaddr->sun_family != AF_UNIX)
342 		return -EINVAL;
343 
344 	return 0;
345 }
346 
347 static int unix_mkname_bsd(struct sockaddr_un *sunaddr, int addr_len)
348 {
349 	struct sockaddr_storage *addr = (struct sockaddr_storage *)sunaddr;
350 	short offset = offsetof(struct sockaddr_storage, __data);
351 
352 	BUILD_BUG_ON(offset != offsetof(struct sockaddr_un, sun_path));
353 
354 	/* This may look like an off by one error but it is a bit more
355 	 * subtle.  108 is the longest valid AF_UNIX path for a binding.
356 	 * sun_path[108] doesn't as such exist.  However in kernel space
357 	 * we are guaranteed that it is a valid memory location in our
358 	 * kernel address buffer because syscall functions always pass
359 	 * a pointer of struct sockaddr_storage which has a bigger buffer
360 	 * than 108.  Also, we must terminate sun_path for strlen() in
361 	 * getname_kernel().
362 	 */
363 	addr->__data[addr_len - offset] = 0;
364 
365 	/* Don't pass sunaddr->sun_path to strlen().  Otherwise, 108 will
366 	 * cause panic if CONFIG_FORTIFY_SOURCE=y.  Let __fortify_strlen()
367 	 * know the actual buffer.
368 	 */
369 	return strlen(addr->__data) + offset + 1;
370 }
371 
372 static void __unix_remove_socket(struct sock *sk)
373 {
374 	sk_del_node_init(sk);
375 }
376 
377 static void __unix_insert_socket(struct net *net, struct sock *sk)
378 {
379 	DEBUG_NET_WARN_ON_ONCE(!sk_unhashed(sk));
380 	sk_add_node(sk, &net->unx.table.buckets[sk->sk_hash]);
381 }
382 
383 static void __unix_set_addr_hash(struct net *net, struct sock *sk,
384 				 struct unix_address *addr, unsigned int hash)
385 {
386 	__unix_remove_socket(sk);
387 	smp_store_release(&unix_sk(sk)->addr, addr);
388 
389 	sk->sk_hash = hash;
390 	__unix_insert_socket(net, sk);
391 }
392 
393 static void unix_remove_socket(struct net *net, struct sock *sk)
394 {
395 	spin_lock(&net->unx.table.locks[sk->sk_hash]);
396 	__unix_remove_socket(sk);
397 	spin_unlock(&net->unx.table.locks[sk->sk_hash]);
398 }
399 
400 static void unix_insert_unbound_socket(struct net *net, struct sock *sk)
401 {
402 	spin_lock(&net->unx.table.locks[sk->sk_hash]);
403 	__unix_insert_socket(net, sk);
404 	spin_unlock(&net->unx.table.locks[sk->sk_hash]);
405 }
406 
407 static void unix_insert_bsd_socket(struct sock *sk)
408 {
409 	spin_lock(&bsd_socket_locks[sk->sk_hash]);
410 	sk_add_bind_node(sk, &bsd_socket_buckets[sk->sk_hash]);
411 	spin_unlock(&bsd_socket_locks[sk->sk_hash]);
412 }
413 
414 static void unix_remove_bsd_socket(struct sock *sk)
415 {
416 	if (!hlist_unhashed(&sk->sk_bind_node)) {
417 		spin_lock(&bsd_socket_locks[sk->sk_hash]);
418 		__sk_del_bind_node(sk);
419 		spin_unlock(&bsd_socket_locks[sk->sk_hash]);
420 
421 		sk_node_init(&sk->sk_bind_node);
422 	}
423 }
424 
425 static struct sock *__unix_find_socket_byname(struct net *net,
426 					      struct sockaddr_un *sunname,
427 					      int len, unsigned int hash)
428 {
429 	struct sock *s;
430 
431 	sk_for_each(s, &net->unx.table.buckets[hash]) {
432 		struct unix_sock *u = unix_sk(s);
433 
434 		if (u->addr->len == len &&
435 		    !memcmp(u->addr->name, sunname, len))
436 			return s;
437 	}
438 	return NULL;
439 }
440 
441 static inline struct sock *unix_find_socket_byname(struct net *net,
442 						   struct sockaddr_un *sunname,
443 						   int len, unsigned int hash)
444 {
445 	struct sock *s;
446 
447 	spin_lock(&net->unx.table.locks[hash]);
448 	s = __unix_find_socket_byname(net, sunname, len, hash);
449 	if (s)
450 		sock_hold(s);
451 	spin_unlock(&net->unx.table.locks[hash]);
452 	return s;
453 }
454 
455 static struct sock *unix_find_socket_byinode(struct inode *i)
456 {
457 	unsigned int hash = unix_bsd_hash(i);
458 	struct sock *s;
459 
460 	spin_lock(&bsd_socket_locks[hash]);
461 	sk_for_each_bound(s, &bsd_socket_buckets[hash]) {
462 		struct dentry *dentry = unix_sk(s)->path.dentry;
463 
464 		if (dentry && d_backing_inode(dentry) == i) {
465 			sock_hold(s);
466 			spin_unlock(&bsd_socket_locks[hash]);
467 			return s;
468 		}
469 	}
470 	spin_unlock(&bsd_socket_locks[hash]);
471 	return NULL;
472 }
473 
474 /* Support code for asymmetrically connected dgram sockets
475  *
476  * If a datagram socket is connected to a socket not itself connected
477  * to the first socket (eg, /dev/log), clients may only enqueue more
478  * messages if the present receive queue of the server socket is not
479  * "too large". This means there's a second writeability condition
480  * poll and sendmsg need to test. The dgram recv code will do a wake
481  * up on the peer_wait wait queue of a socket upon reception of a
482  * datagram which needs to be propagated to sleeping would-be writers
483  * since these might not have sent anything so far. This can't be
484  * accomplished via poll_wait because the lifetime of the server
485  * socket might be less than that of its clients if these break their
486  * association with it or if the server socket is closed while clients
487  * are still connected to it and there's no way to inform "a polling
488  * implementation" that it should let go of a certain wait queue
489  *
490  * In order to propagate a wake up, a wait_queue_entry_t of the client
491  * socket is enqueued on the peer_wait queue of the server socket
492  * whose wake function does a wake_up on the ordinary client socket
493  * wait queue. This connection is established whenever a write (or
494  * poll for write) hit the flow control condition and broken when the
495  * association to the server socket is dissolved or after a wake up
496  * was relayed.
497  */
498 
499 static int unix_dgram_peer_wake_relay(wait_queue_entry_t *q, unsigned mode, int flags,
500 				      void *key)
501 {
502 	struct unix_sock *u;
503 	wait_queue_head_t *u_sleep;
504 
505 	u = container_of(q, struct unix_sock, peer_wake);
506 
507 	__remove_wait_queue(&unix_sk(u->peer_wake.private)->peer_wait,
508 			    q);
509 	u->peer_wake.private = NULL;
510 
511 	/* relaying can only happen while the wq still exists */
512 	u_sleep = sk_sleep(&u->sk);
513 	if (u_sleep)
514 		wake_up_interruptible_poll(u_sleep, key_to_poll(key));
515 
516 	return 0;
517 }
518 
519 static int unix_dgram_peer_wake_connect(struct sock *sk, struct sock *other)
520 {
521 	struct unix_sock *u, *u_other;
522 	int rc;
523 
524 	u = unix_sk(sk);
525 	u_other = unix_sk(other);
526 	rc = 0;
527 	spin_lock(&u_other->peer_wait.lock);
528 
529 	if (!u->peer_wake.private) {
530 		u->peer_wake.private = other;
531 		__add_wait_queue(&u_other->peer_wait, &u->peer_wake);
532 
533 		rc = 1;
534 	}
535 
536 	spin_unlock(&u_other->peer_wait.lock);
537 	return rc;
538 }
539 
540 static void unix_dgram_peer_wake_disconnect(struct sock *sk,
541 					    struct sock *other)
542 {
543 	struct unix_sock *u, *u_other;
544 
545 	u = unix_sk(sk);
546 	u_other = unix_sk(other);
547 	spin_lock(&u_other->peer_wait.lock);
548 
549 	if (u->peer_wake.private == other) {
550 		__remove_wait_queue(&u_other->peer_wait, &u->peer_wake);
551 		u->peer_wake.private = NULL;
552 	}
553 
554 	spin_unlock(&u_other->peer_wait.lock);
555 }
556 
557 static void unix_dgram_peer_wake_disconnect_wakeup(struct sock *sk,
558 						   struct sock *other)
559 {
560 	unix_dgram_peer_wake_disconnect(sk, other);
561 	wake_up_interruptible_poll(sk_sleep(sk),
562 				   EPOLLOUT |
563 				   EPOLLWRNORM |
564 				   EPOLLWRBAND);
565 }
566 
567 /* preconditions:
568  *	- unix_peer(sk) == other
569  *	- association is stable
570  */
571 static int unix_dgram_peer_wake_me(struct sock *sk, struct sock *other)
572 {
573 	int connected;
574 
575 	connected = unix_dgram_peer_wake_connect(sk, other);
576 
577 	/* If other is SOCK_DEAD, we want to make sure we signal
578 	 * POLLOUT, such that a subsequent write() can get a
579 	 * -ECONNREFUSED. Otherwise, if we haven't queued any skbs
580 	 * to other and its full, we will hang waiting for POLLOUT.
581 	 */
582 	if (unix_recvq_full_lockless(other) && !sock_flag(other, SOCK_DEAD))
583 		return 1;
584 
585 	if (connected)
586 		unix_dgram_peer_wake_disconnect(sk, other);
587 
588 	return 0;
589 }
590 
591 static int unix_writable(const struct sock *sk, unsigned char state)
592 {
593 	return state != TCP_LISTEN &&
594 		(refcount_read(&sk->sk_wmem_alloc) << 2) <= READ_ONCE(sk->sk_sndbuf);
595 }
596 
597 static void unix_write_space(struct sock *sk)
598 {
599 	struct socket_wq *wq;
600 
601 	rcu_read_lock();
602 	if (unix_writable(sk, READ_ONCE(sk->sk_state))) {
603 		wq = rcu_dereference(sk->sk_wq);
604 		if (skwq_has_sleeper(wq))
605 			wake_up_interruptible_sync_poll(&wq->wait,
606 				EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND);
607 		sk_wake_async_rcu(sk, SOCK_WAKE_SPACE, POLL_OUT);
608 	}
609 	rcu_read_unlock();
610 }
611 
612 /* When dgram socket disconnects (or changes its peer), we clear its receive
613  * queue of packets arrived from previous peer. First, it allows to do
614  * flow control based only on wmem_alloc; second, sk connected to peer
615  * may receive messages only from that peer. */
616 static void unix_dgram_disconnected(struct sock *sk, struct sock *other)
617 {
618 	if (!skb_queue_empty(&sk->sk_receive_queue)) {
619 		skb_queue_purge_reason(&sk->sk_receive_queue,
620 				       SKB_DROP_REASON_UNIX_DISCONNECT);
621 
622 		wake_up_interruptible_all(&unix_sk(sk)->peer_wait);
623 
624 		/* If one link of bidirectional dgram pipe is disconnected,
625 		 * we signal error. Messages are lost. Do not make this,
626 		 * when peer was not connected to us.
627 		 */
628 		if (!sock_flag(other, SOCK_DEAD) && unix_peer(other) == sk) {
629 			WRITE_ONCE(other->sk_err, ECONNRESET);
630 			sk_error_report(other);
631 		}
632 	}
633 }
634 
635 static void unix_sock_destructor(struct sock *sk)
636 {
637 	struct unix_sock *u = unix_sk(sk);
638 
639 	skb_queue_purge_reason(&sk->sk_receive_queue, SKB_DROP_REASON_SOCKET_CLOSE);
640 
641 	DEBUG_NET_WARN_ON_ONCE(refcount_read(&sk->sk_wmem_alloc));
642 	DEBUG_NET_WARN_ON_ONCE(!sk_unhashed(sk));
643 	DEBUG_NET_WARN_ON_ONCE(sk->sk_socket);
644 	if (!sock_flag(sk, SOCK_DEAD)) {
645 		pr_info("Attempt to release alive unix socket: %p\n", sk);
646 		return;
647 	}
648 
649 	if (sk->sk_peer_pid)
650 		pidfs_put_pid(sk->sk_peer_pid);
651 
652 	if (u->addr)
653 		unix_release_addr(u->addr);
654 
655 	atomic_long_dec(&unix_nr_socks);
656 	sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
657 #ifdef UNIX_REFCNT_DEBUG
658 	pr_debug("UNIX %p is destroyed, %ld are still alive.\n", sk,
659 		atomic_long_read(&unix_nr_socks));
660 #endif
661 }
662 
663 static void unix_release_sock(struct sock *sk, int embrion)
664 {
665 	struct unix_sock *u = unix_sk(sk);
666 	struct sock *skpair;
667 	struct sk_buff *skb;
668 	struct path path;
669 	int state;
670 
671 	unix_remove_socket(sock_net(sk), sk);
672 	unix_remove_bsd_socket(sk);
673 
674 	/* Clear state */
675 	unix_state_lock(sk);
676 	sock_orphan(sk);
677 	WRITE_ONCE(sk->sk_shutdown, SHUTDOWN_MASK);
678 	path	     = u->path;
679 	u->path.dentry = NULL;
680 	u->path.mnt = NULL;
681 	state = sk->sk_state;
682 	WRITE_ONCE(sk->sk_state, TCP_CLOSE);
683 
684 	skpair = unix_peer(sk);
685 	unix_peer(sk) = NULL;
686 
687 	unix_state_unlock(sk);
688 
689 #if IS_ENABLED(CONFIG_AF_UNIX_OOB)
690 	u->oob_skb = NULL;
691 #endif
692 
693 	wake_up_interruptible_all(&u->peer_wait);
694 
695 	if (skpair != NULL) {
696 		if (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) {
697 			unix_state_lock(skpair);
698 			/* No more writes */
699 			WRITE_ONCE(skpair->sk_shutdown, SHUTDOWN_MASK);
700 			if (!skb_queue_empty_lockless(&sk->sk_receive_queue) || embrion)
701 				WRITE_ONCE(skpair->sk_err, ECONNRESET);
702 			unix_state_unlock(skpair);
703 			skpair->sk_state_change(skpair);
704 			sk_wake_async(skpair, SOCK_WAKE_WAITD, POLL_HUP);
705 		}
706 
707 		unix_dgram_peer_wake_disconnect(sk, skpair);
708 		sock_put(skpair); /* It may now die */
709 	}
710 
711 	/* Try to flush out this socket. Throw out buffers at least */
712 
713 	while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
714 		if (state == TCP_LISTEN)
715 			unix_release_sock(skb->sk, 1);
716 
717 		/* passed fds are erased in the kfree_skb hook */
718 		kfree_skb_reason(skb, SKB_DROP_REASON_SOCKET_CLOSE);
719 	}
720 
721 	if (path.dentry)
722 		path_put(&path);
723 
724 	sock_put(sk);
725 
726 	/* ---- Socket is dead now and most probably destroyed ---- */
727 
728 	/*
729 	 * Fixme: BSD difference: In BSD all sockets connected to us get
730 	 *	  ECONNRESET and we die on the spot. In Linux we behave
731 	 *	  like files and pipes do and wait for the last
732 	 *	  dereference.
733 	 *
734 	 * Can't we simply set sock->err?
735 	 *
736 	 *	  What the above comment does talk about? --ANK(980817)
737 	 */
738 
739 	if (READ_ONCE(unix_tot_inflight))
740 		unix_gc();		/* Garbage collect fds */
741 }
742 
743 struct unix_peercred {
744 	struct pid *peer_pid;
745 	const struct cred *peer_cred;
746 };
747 
748 static inline int prepare_peercred(struct unix_peercred *peercred)
749 {
750 	struct pid *pid;
751 	int err;
752 
753 	pid = task_tgid(current);
754 	err = pidfs_register_pid(pid);
755 	if (likely(!err)) {
756 		peercred->peer_pid = get_pid(pid);
757 		peercred->peer_cred = get_current_cred();
758 	}
759 	return err;
760 }
761 
762 static void drop_peercred(struct unix_peercred *peercred)
763 {
764 	const struct cred *cred = NULL;
765 	struct pid *pid = NULL;
766 
767 	might_sleep();
768 
769 	swap(peercred->peer_pid, pid);
770 	swap(peercred->peer_cred, cred);
771 
772 	pidfs_put_pid(pid);
773 	put_pid(pid);
774 	put_cred(cred);
775 }
776 
777 static inline void init_peercred(struct sock *sk,
778 				 const struct unix_peercred *peercred)
779 {
780 	sk->sk_peer_pid = peercred->peer_pid;
781 	sk->sk_peer_cred = peercred->peer_cred;
782 }
783 
784 static void update_peercred(struct sock *sk, struct unix_peercred *peercred)
785 {
786 	const struct cred *old_cred;
787 	struct pid *old_pid;
788 
789 	spin_lock(&sk->sk_peer_lock);
790 	old_pid = sk->sk_peer_pid;
791 	old_cred = sk->sk_peer_cred;
792 	init_peercred(sk, peercred);
793 	spin_unlock(&sk->sk_peer_lock);
794 
795 	peercred->peer_pid = old_pid;
796 	peercred->peer_cred = old_cred;
797 }
798 
799 static void copy_peercred(struct sock *sk, struct sock *peersk)
800 {
801 	lockdep_assert_held(&unix_sk(peersk)->lock);
802 
803 	spin_lock(&sk->sk_peer_lock);
804 	sk->sk_peer_pid = get_pid(peersk->sk_peer_pid);
805 	pidfs_get_pid(sk->sk_peer_pid);
806 	sk->sk_peer_cred = get_cred(peersk->sk_peer_cred);
807 	spin_unlock(&sk->sk_peer_lock);
808 }
809 
810 static bool unix_may_passcred(const struct sock *sk)
811 {
812 	return sk->sk_scm_credentials || sk->sk_scm_pidfd;
813 }
814 
815 static int unix_listen(struct socket *sock, int backlog)
816 {
817 	int err;
818 	struct sock *sk = sock->sk;
819 	struct unix_sock *u = unix_sk(sk);
820 	struct unix_peercred peercred = {};
821 
822 	err = -EOPNOTSUPP;
823 	if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
824 		goto out;	/* Only stream/seqpacket sockets accept */
825 	err = -EINVAL;
826 	if (!READ_ONCE(u->addr))
827 		goto out;	/* No listens on an unbound socket */
828 	err = prepare_peercred(&peercred);
829 	if (err)
830 		goto out;
831 	unix_state_lock(sk);
832 	if (sk->sk_state != TCP_CLOSE && sk->sk_state != TCP_LISTEN)
833 		goto out_unlock;
834 	if (backlog > sk->sk_max_ack_backlog)
835 		wake_up_interruptible_all(&u->peer_wait);
836 	sk->sk_max_ack_backlog	= backlog;
837 	WRITE_ONCE(sk->sk_state, TCP_LISTEN);
838 
839 	/* set credentials so connect can copy them */
840 	update_peercred(sk, &peercred);
841 	err = 0;
842 
843 out_unlock:
844 	unix_state_unlock(sk);
845 	drop_peercred(&peercred);
846 out:
847 	return err;
848 }
849 
850 static int unix_release(struct socket *);
851 static int unix_bind(struct socket *, struct sockaddr *, int);
852 static int unix_stream_connect(struct socket *, struct sockaddr *,
853 			       int addr_len, int flags);
854 static int unix_socketpair(struct socket *, struct socket *);
855 static int unix_accept(struct socket *, struct socket *, struct proto_accept_arg *arg);
856 static int unix_getname(struct socket *, struct sockaddr *, int);
857 static __poll_t unix_poll(struct file *, struct socket *, poll_table *);
858 static __poll_t unix_dgram_poll(struct file *, struct socket *,
859 				    poll_table *);
860 static int unix_ioctl(struct socket *, unsigned int, unsigned long);
861 #ifdef CONFIG_COMPAT
862 static int unix_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
863 #endif
864 static int unix_shutdown(struct socket *, int);
865 static int unix_stream_sendmsg(struct socket *, struct msghdr *, size_t);
866 static int unix_stream_recvmsg(struct socket *, struct msghdr *, size_t, int);
867 static ssize_t unix_stream_splice_read(struct socket *,  loff_t *ppos,
868 				       struct pipe_inode_info *, size_t size,
869 				       unsigned int flags);
870 static int unix_dgram_sendmsg(struct socket *, struct msghdr *, size_t);
871 static int unix_dgram_recvmsg(struct socket *, struct msghdr *, size_t, int);
872 static int unix_read_skb(struct sock *sk, skb_read_actor_t recv_actor);
873 static int unix_stream_read_skb(struct sock *sk, skb_read_actor_t recv_actor);
874 static int unix_dgram_connect(struct socket *, struct sockaddr *,
875 			      int, int);
876 static int unix_seqpacket_sendmsg(struct socket *, struct msghdr *, size_t);
877 static int unix_seqpacket_recvmsg(struct socket *, struct msghdr *, size_t,
878 				  int);
879 
880 #ifdef CONFIG_PROC_FS
881 static int unix_count_nr_fds(struct sock *sk)
882 {
883 	struct sk_buff *skb;
884 	struct unix_sock *u;
885 	int nr_fds = 0;
886 
887 	spin_lock(&sk->sk_receive_queue.lock);
888 	skb = skb_peek(&sk->sk_receive_queue);
889 	while (skb) {
890 		u = unix_sk(skb->sk);
891 		nr_fds += atomic_read(&u->scm_stat.nr_fds);
892 		skb = skb_peek_next(skb, &sk->sk_receive_queue);
893 	}
894 	spin_unlock(&sk->sk_receive_queue.lock);
895 
896 	return nr_fds;
897 }
898 
899 static void unix_show_fdinfo(struct seq_file *m, struct socket *sock)
900 {
901 	struct sock *sk = sock->sk;
902 	unsigned char s_state;
903 	struct unix_sock *u;
904 	int nr_fds = 0;
905 
906 	if (sk) {
907 		s_state = READ_ONCE(sk->sk_state);
908 		u = unix_sk(sk);
909 
910 		/* SOCK_STREAM and SOCK_SEQPACKET sockets never change their
911 		 * sk_state after switching to TCP_ESTABLISHED or TCP_LISTEN.
912 		 * SOCK_DGRAM is ordinary. So, no lock is needed.
913 		 */
914 		if (sock->type == SOCK_DGRAM || s_state == TCP_ESTABLISHED)
915 			nr_fds = atomic_read(&u->scm_stat.nr_fds);
916 		else if (s_state == TCP_LISTEN)
917 			nr_fds = unix_count_nr_fds(sk);
918 
919 		seq_printf(m, "scm_fds: %u\n", nr_fds);
920 	}
921 }
922 #else
923 #define unix_show_fdinfo NULL
924 #endif
925 
926 static const struct proto_ops unix_stream_ops = {
927 	.family =	PF_UNIX,
928 	.owner =	THIS_MODULE,
929 	.release =	unix_release,
930 	.bind =		unix_bind,
931 	.connect =	unix_stream_connect,
932 	.socketpair =	unix_socketpair,
933 	.accept =	unix_accept,
934 	.getname =	unix_getname,
935 	.poll =		unix_poll,
936 	.ioctl =	unix_ioctl,
937 #ifdef CONFIG_COMPAT
938 	.compat_ioctl =	unix_compat_ioctl,
939 #endif
940 	.listen =	unix_listen,
941 	.shutdown =	unix_shutdown,
942 	.sendmsg =	unix_stream_sendmsg,
943 	.recvmsg =	unix_stream_recvmsg,
944 	.read_skb =	unix_stream_read_skb,
945 	.mmap =		sock_no_mmap,
946 	.splice_read =	unix_stream_splice_read,
947 	.set_peek_off =	sk_set_peek_off,
948 	.show_fdinfo =	unix_show_fdinfo,
949 };
950 
951 static const struct proto_ops unix_dgram_ops = {
952 	.family =	PF_UNIX,
953 	.owner =	THIS_MODULE,
954 	.release =	unix_release,
955 	.bind =		unix_bind,
956 	.connect =	unix_dgram_connect,
957 	.socketpair =	unix_socketpair,
958 	.accept =	sock_no_accept,
959 	.getname =	unix_getname,
960 	.poll =		unix_dgram_poll,
961 	.ioctl =	unix_ioctl,
962 #ifdef CONFIG_COMPAT
963 	.compat_ioctl =	unix_compat_ioctl,
964 #endif
965 	.listen =	sock_no_listen,
966 	.shutdown =	unix_shutdown,
967 	.sendmsg =	unix_dgram_sendmsg,
968 	.read_skb =	unix_read_skb,
969 	.recvmsg =	unix_dgram_recvmsg,
970 	.mmap =		sock_no_mmap,
971 	.set_peek_off =	sk_set_peek_off,
972 	.show_fdinfo =	unix_show_fdinfo,
973 };
974 
975 static const struct proto_ops unix_seqpacket_ops = {
976 	.family =	PF_UNIX,
977 	.owner =	THIS_MODULE,
978 	.release =	unix_release,
979 	.bind =		unix_bind,
980 	.connect =	unix_stream_connect,
981 	.socketpair =	unix_socketpair,
982 	.accept =	unix_accept,
983 	.getname =	unix_getname,
984 	.poll =		unix_dgram_poll,
985 	.ioctl =	unix_ioctl,
986 #ifdef CONFIG_COMPAT
987 	.compat_ioctl =	unix_compat_ioctl,
988 #endif
989 	.listen =	unix_listen,
990 	.shutdown =	unix_shutdown,
991 	.sendmsg =	unix_seqpacket_sendmsg,
992 	.recvmsg =	unix_seqpacket_recvmsg,
993 	.mmap =		sock_no_mmap,
994 	.set_peek_off =	sk_set_peek_off,
995 	.show_fdinfo =	unix_show_fdinfo,
996 };
997 
998 static void unix_close(struct sock *sk, long timeout)
999 {
1000 	/* Nothing to do here, unix socket does not need a ->close().
1001 	 * This is merely for sockmap.
1002 	 */
1003 }
1004 
1005 static bool unix_bpf_bypass_getsockopt(int level, int optname)
1006 {
1007 	if (level == SOL_SOCKET) {
1008 		switch (optname) {
1009 		case SO_PEERPIDFD:
1010 			return true;
1011 		default:
1012 			return false;
1013 		}
1014 	}
1015 
1016 	return false;
1017 }
1018 
1019 struct proto unix_dgram_proto = {
1020 	.name			= "UNIX",
1021 	.owner			= THIS_MODULE,
1022 	.obj_size		= sizeof(struct unix_sock),
1023 	.close			= unix_close,
1024 	.bpf_bypass_getsockopt	= unix_bpf_bypass_getsockopt,
1025 #ifdef CONFIG_BPF_SYSCALL
1026 	.psock_update_sk_prot	= unix_dgram_bpf_update_proto,
1027 #endif
1028 };
1029 
1030 struct proto unix_stream_proto = {
1031 	.name			= "UNIX-STREAM",
1032 	.owner			= THIS_MODULE,
1033 	.obj_size		= sizeof(struct unix_sock),
1034 	.close			= unix_close,
1035 	.bpf_bypass_getsockopt	= unix_bpf_bypass_getsockopt,
1036 #ifdef CONFIG_BPF_SYSCALL
1037 	.psock_update_sk_prot	= unix_stream_bpf_update_proto,
1038 #endif
1039 };
1040 
1041 static struct sock *unix_create1(struct net *net, struct socket *sock, int kern, int type)
1042 {
1043 	struct unix_sock *u;
1044 	struct sock *sk;
1045 	int err;
1046 
1047 	atomic_long_inc(&unix_nr_socks);
1048 	if (atomic_long_read(&unix_nr_socks) > 2 * get_max_files()) {
1049 		err = -ENFILE;
1050 		goto err;
1051 	}
1052 
1053 	if (type == SOCK_STREAM)
1054 		sk = sk_alloc(net, PF_UNIX, GFP_KERNEL, &unix_stream_proto, kern);
1055 	else /*dgram and  seqpacket */
1056 		sk = sk_alloc(net, PF_UNIX, GFP_KERNEL, &unix_dgram_proto, kern);
1057 
1058 	if (!sk) {
1059 		err = -ENOMEM;
1060 		goto err;
1061 	}
1062 
1063 	sock_init_data(sock, sk);
1064 
1065 	sk->sk_scm_rights	= 1;
1066 	sk->sk_hash		= unix_unbound_hash(sk);
1067 	sk->sk_allocation	= GFP_KERNEL_ACCOUNT;
1068 	sk->sk_write_space	= unix_write_space;
1069 	sk->sk_max_ack_backlog	= READ_ONCE(net->unx.sysctl_max_dgram_qlen);
1070 	sk->sk_destruct		= unix_sock_destructor;
1071 	lock_set_cmp_fn(&sk->sk_receive_queue.lock, unix_recvq_lock_cmp_fn, NULL);
1072 
1073 	u = unix_sk(sk);
1074 	u->listener = NULL;
1075 	u->vertex = NULL;
1076 	u->path.dentry = NULL;
1077 	u->path.mnt = NULL;
1078 	spin_lock_init(&u->lock);
1079 	lock_set_cmp_fn(&u->lock, unix_state_lock_cmp_fn, NULL);
1080 	mutex_init(&u->iolock); /* single task reading lock */
1081 	mutex_init(&u->bindlock); /* single task binding lock */
1082 	init_waitqueue_head(&u->peer_wait);
1083 	init_waitqueue_func_entry(&u->peer_wake, unix_dgram_peer_wake_relay);
1084 	memset(&u->scm_stat, 0, sizeof(struct scm_stat));
1085 	unix_insert_unbound_socket(net, sk);
1086 
1087 	sock_prot_inuse_add(net, sk->sk_prot, 1);
1088 
1089 	return sk;
1090 
1091 err:
1092 	atomic_long_dec(&unix_nr_socks);
1093 	return ERR_PTR(err);
1094 }
1095 
1096 static int unix_create(struct net *net, struct socket *sock, int protocol,
1097 		       int kern)
1098 {
1099 	struct sock *sk;
1100 
1101 	if (protocol && protocol != PF_UNIX)
1102 		return -EPROTONOSUPPORT;
1103 
1104 	sock->state = SS_UNCONNECTED;
1105 
1106 	switch (sock->type) {
1107 	case SOCK_STREAM:
1108 		sock->ops = &unix_stream_ops;
1109 		break;
1110 		/*
1111 		 *	Believe it or not BSD has AF_UNIX, SOCK_RAW though
1112 		 *	nothing uses it.
1113 		 */
1114 	case SOCK_RAW:
1115 		sock->type = SOCK_DGRAM;
1116 		fallthrough;
1117 	case SOCK_DGRAM:
1118 		sock->ops = &unix_dgram_ops;
1119 		break;
1120 	case SOCK_SEQPACKET:
1121 		sock->ops = &unix_seqpacket_ops;
1122 		break;
1123 	default:
1124 		return -ESOCKTNOSUPPORT;
1125 	}
1126 
1127 	sk = unix_create1(net, sock, kern, sock->type);
1128 	if (IS_ERR(sk))
1129 		return PTR_ERR(sk);
1130 
1131 	return 0;
1132 }
1133 
1134 static int unix_release(struct socket *sock)
1135 {
1136 	struct sock *sk = sock->sk;
1137 
1138 	if (!sk)
1139 		return 0;
1140 
1141 	sk->sk_prot->close(sk, 0);
1142 	unix_release_sock(sk, 0);
1143 	sock->sk = NULL;
1144 
1145 	return 0;
1146 }
1147 
1148 static struct sock *unix_find_bsd(struct sockaddr_un *sunaddr, int addr_len,
1149 				  int type, int flags)
1150 {
1151 	struct inode *inode;
1152 	struct path path;
1153 	struct sock *sk;
1154 	int err;
1155 
1156 	unix_mkname_bsd(sunaddr, addr_len);
1157 
1158 	if (flags & SOCK_COREDUMP) {
1159 		const struct cred *cred;
1160 		struct cred *kcred;
1161 		struct path root;
1162 
1163 		kcred = prepare_kernel_cred(&init_task);
1164 		if (!kcred) {
1165 			err = -ENOMEM;
1166 			goto fail;
1167 		}
1168 
1169 		task_lock(&init_task);
1170 		get_fs_root(init_task.fs, &root);
1171 		task_unlock(&init_task);
1172 
1173 		cred = override_creds(kcred);
1174 		err = vfs_path_lookup(root.dentry, root.mnt, sunaddr->sun_path,
1175 				      LOOKUP_BENEATH | LOOKUP_NO_SYMLINKS |
1176 				      LOOKUP_NO_MAGICLINKS, &path);
1177 		put_cred(revert_creds(cred));
1178 		path_put(&root);
1179 		if (err)
1180 			goto fail;
1181 	} else {
1182 		err = kern_path(sunaddr->sun_path, LOOKUP_FOLLOW, &path);
1183 		if (err)
1184 			goto fail;
1185 
1186 		err = path_permission(&path, MAY_WRITE);
1187 		if (err)
1188 			goto path_put;
1189 	}
1190 
1191 	err = -ECONNREFUSED;
1192 	inode = d_backing_inode(path.dentry);
1193 	if (!S_ISSOCK(inode->i_mode))
1194 		goto path_put;
1195 
1196 	sk = unix_find_socket_byinode(inode);
1197 	if (!sk)
1198 		goto path_put;
1199 
1200 	err = -EPROTOTYPE;
1201 	if (sk->sk_type == type)
1202 		touch_atime(&path);
1203 	else
1204 		goto sock_put;
1205 
1206 	path_put(&path);
1207 
1208 	return sk;
1209 
1210 sock_put:
1211 	sock_put(sk);
1212 path_put:
1213 	path_put(&path);
1214 fail:
1215 	return ERR_PTR(err);
1216 }
1217 
1218 static struct sock *unix_find_abstract(struct net *net,
1219 				       struct sockaddr_un *sunaddr,
1220 				       int addr_len, int type)
1221 {
1222 	unsigned int hash = unix_abstract_hash(sunaddr, addr_len, type);
1223 	struct dentry *dentry;
1224 	struct sock *sk;
1225 
1226 	sk = unix_find_socket_byname(net, sunaddr, addr_len, hash);
1227 	if (!sk)
1228 		return ERR_PTR(-ECONNREFUSED);
1229 
1230 	dentry = unix_sk(sk)->path.dentry;
1231 	if (dentry)
1232 		touch_atime(&unix_sk(sk)->path);
1233 
1234 	return sk;
1235 }
1236 
1237 static struct sock *unix_find_other(struct net *net,
1238 				    struct sockaddr_un *sunaddr,
1239 				    int addr_len, int type, int flags)
1240 {
1241 	struct sock *sk;
1242 
1243 	if (sunaddr->sun_path[0])
1244 		sk = unix_find_bsd(sunaddr, addr_len, type, flags);
1245 	else
1246 		sk = unix_find_abstract(net, sunaddr, addr_len, type);
1247 
1248 	return sk;
1249 }
1250 
1251 static int unix_autobind(struct sock *sk)
1252 {
1253 	struct unix_sock *u = unix_sk(sk);
1254 	unsigned int new_hash, old_hash;
1255 	struct net *net = sock_net(sk);
1256 	struct unix_address *addr;
1257 	u32 lastnum, ordernum;
1258 	int err;
1259 
1260 	err = mutex_lock_interruptible(&u->bindlock);
1261 	if (err)
1262 		return err;
1263 
1264 	if (u->addr)
1265 		goto out;
1266 
1267 	err = -ENOMEM;
1268 	addr = kzalloc(sizeof(*addr) +
1269 		       offsetof(struct sockaddr_un, sun_path) + 16, GFP_KERNEL);
1270 	if (!addr)
1271 		goto out;
1272 
1273 	addr->len = offsetof(struct sockaddr_un, sun_path) + 6;
1274 	addr->name->sun_family = AF_UNIX;
1275 	refcount_set(&addr->refcnt, 1);
1276 
1277 	old_hash = sk->sk_hash;
1278 	ordernum = get_random_u32();
1279 	lastnum = ordernum & 0xFFFFF;
1280 retry:
1281 	ordernum = (ordernum + 1) & 0xFFFFF;
1282 	sprintf(addr->name->sun_path + 1, "%05x", ordernum);
1283 
1284 	new_hash = unix_abstract_hash(addr->name, addr->len, sk->sk_type);
1285 	unix_table_double_lock(net, old_hash, new_hash);
1286 
1287 	if (__unix_find_socket_byname(net, addr->name, addr->len, new_hash)) {
1288 		unix_table_double_unlock(net, old_hash, new_hash);
1289 
1290 		/* __unix_find_socket_byname() may take long time if many names
1291 		 * are already in use.
1292 		 */
1293 		cond_resched();
1294 
1295 		if (ordernum == lastnum) {
1296 			/* Give up if all names seems to be in use. */
1297 			err = -ENOSPC;
1298 			unix_release_addr(addr);
1299 			goto out;
1300 		}
1301 
1302 		goto retry;
1303 	}
1304 
1305 	__unix_set_addr_hash(net, sk, addr, new_hash);
1306 	unix_table_double_unlock(net, old_hash, new_hash);
1307 	err = 0;
1308 
1309 out:	mutex_unlock(&u->bindlock);
1310 	return err;
1311 }
1312 
1313 static int unix_bind_bsd(struct sock *sk, struct sockaddr_un *sunaddr,
1314 			 int addr_len)
1315 {
1316 	umode_t mode = S_IFSOCK |
1317 	       (SOCK_INODE(sk->sk_socket)->i_mode & ~current_umask());
1318 	struct unix_sock *u = unix_sk(sk);
1319 	unsigned int new_hash, old_hash;
1320 	struct net *net = sock_net(sk);
1321 	struct mnt_idmap *idmap;
1322 	struct unix_address *addr;
1323 	struct dentry *dentry;
1324 	struct path parent;
1325 	int err;
1326 
1327 	addr_len = unix_mkname_bsd(sunaddr, addr_len);
1328 	addr = unix_create_addr(sunaddr, addr_len);
1329 	if (!addr)
1330 		return -ENOMEM;
1331 
1332 	/*
1333 	 * Get the parent directory, calculate the hash for last
1334 	 * component.
1335 	 */
1336 	dentry = kern_path_create(AT_FDCWD, addr->name->sun_path, &parent, 0);
1337 	if (IS_ERR(dentry)) {
1338 		err = PTR_ERR(dentry);
1339 		goto out;
1340 	}
1341 
1342 	/*
1343 	 * All right, let's create it.
1344 	 */
1345 	idmap = mnt_idmap(parent.mnt);
1346 	err = security_path_mknod(&parent, dentry, mode, 0);
1347 	if (!err)
1348 		err = vfs_mknod(idmap, d_inode(parent.dentry), dentry, mode, 0);
1349 	if (err)
1350 		goto out_path;
1351 	err = mutex_lock_interruptible(&u->bindlock);
1352 	if (err)
1353 		goto out_unlink;
1354 	if (u->addr)
1355 		goto out_unlock;
1356 
1357 	old_hash = sk->sk_hash;
1358 	new_hash = unix_bsd_hash(d_backing_inode(dentry));
1359 	unix_table_double_lock(net, old_hash, new_hash);
1360 	u->path.mnt = mntget(parent.mnt);
1361 	u->path.dentry = dget(dentry);
1362 	__unix_set_addr_hash(net, sk, addr, new_hash);
1363 	unix_table_double_unlock(net, old_hash, new_hash);
1364 	unix_insert_bsd_socket(sk);
1365 	mutex_unlock(&u->bindlock);
1366 	done_path_create(&parent, dentry);
1367 	return 0;
1368 
1369 out_unlock:
1370 	mutex_unlock(&u->bindlock);
1371 	err = -EINVAL;
1372 out_unlink:
1373 	/* failed after successful mknod?  unlink what we'd created... */
1374 	vfs_unlink(idmap, d_inode(parent.dentry), dentry, NULL);
1375 out_path:
1376 	done_path_create(&parent, dentry);
1377 out:
1378 	unix_release_addr(addr);
1379 	return err == -EEXIST ? -EADDRINUSE : err;
1380 }
1381 
1382 static int unix_bind_abstract(struct sock *sk, struct sockaddr_un *sunaddr,
1383 			      int addr_len)
1384 {
1385 	struct unix_sock *u = unix_sk(sk);
1386 	unsigned int new_hash, old_hash;
1387 	struct net *net = sock_net(sk);
1388 	struct unix_address *addr;
1389 	int err;
1390 
1391 	addr = unix_create_addr(sunaddr, addr_len);
1392 	if (!addr)
1393 		return -ENOMEM;
1394 
1395 	err = mutex_lock_interruptible(&u->bindlock);
1396 	if (err)
1397 		goto out;
1398 
1399 	if (u->addr) {
1400 		err = -EINVAL;
1401 		goto out_mutex;
1402 	}
1403 
1404 	old_hash = sk->sk_hash;
1405 	new_hash = unix_abstract_hash(addr->name, addr->len, sk->sk_type);
1406 	unix_table_double_lock(net, old_hash, new_hash);
1407 
1408 	if (__unix_find_socket_byname(net, addr->name, addr->len, new_hash))
1409 		goto out_spin;
1410 
1411 	__unix_set_addr_hash(net, sk, addr, new_hash);
1412 	unix_table_double_unlock(net, old_hash, new_hash);
1413 	mutex_unlock(&u->bindlock);
1414 	return 0;
1415 
1416 out_spin:
1417 	unix_table_double_unlock(net, old_hash, new_hash);
1418 	err = -EADDRINUSE;
1419 out_mutex:
1420 	mutex_unlock(&u->bindlock);
1421 out:
1422 	unix_release_addr(addr);
1423 	return err;
1424 }
1425 
1426 static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
1427 {
1428 	struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr;
1429 	struct sock *sk = sock->sk;
1430 	int err;
1431 
1432 	if (addr_len == offsetof(struct sockaddr_un, sun_path) &&
1433 	    sunaddr->sun_family == AF_UNIX)
1434 		return unix_autobind(sk);
1435 
1436 	err = unix_validate_addr(sunaddr, addr_len);
1437 	if (err)
1438 		return err;
1439 
1440 	if (sunaddr->sun_path[0])
1441 		err = unix_bind_bsd(sk, sunaddr, addr_len);
1442 	else
1443 		err = unix_bind_abstract(sk, sunaddr, addr_len);
1444 
1445 	return err;
1446 }
1447 
1448 static void unix_state_double_lock(struct sock *sk1, struct sock *sk2)
1449 {
1450 	if (unlikely(sk1 == sk2) || !sk2) {
1451 		unix_state_lock(sk1);
1452 		return;
1453 	}
1454 
1455 	if (sk1 > sk2)
1456 		swap(sk1, sk2);
1457 
1458 	unix_state_lock(sk1);
1459 	unix_state_lock(sk2);
1460 }
1461 
1462 static void unix_state_double_unlock(struct sock *sk1, struct sock *sk2)
1463 {
1464 	if (unlikely(sk1 == sk2) || !sk2) {
1465 		unix_state_unlock(sk1);
1466 		return;
1467 	}
1468 	unix_state_unlock(sk1);
1469 	unix_state_unlock(sk2);
1470 }
1471 
1472 static int unix_dgram_connect(struct socket *sock, struct sockaddr *addr,
1473 			      int alen, int flags)
1474 {
1475 	struct sockaddr_un *sunaddr = (struct sockaddr_un *)addr;
1476 	struct sock *sk = sock->sk;
1477 	struct sock *other;
1478 	int err;
1479 
1480 	err = -EINVAL;
1481 	if (alen < offsetofend(struct sockaddr, sa_family))
1482 		goto out;
1483 
1484 	if (addr->sa_family != AF_UNSPEC) {
1485 		err = unix_validate_addr(sunaddr, alen);
1486 		if (err)
1487 			goto out;
1488 
1489 		err = BPF_CGROUP_RUN_PROG_UNIX_CONNECT_LOCK(sk, addr, &alen);
1490 		if (err)
1491 			goto out;
1492 
1493 		if (unix_may_passcred(sk) && !READ_ONCE(unix_sk(sk)->addr)) {
1494 			err = unix_autobind(sk);
1495 			if (err)
1496 				goto out;
1497 		}
1498 
1499 restart:
1500 		other = unix_find_other(sock_net(sk), sunaddr, alen, sock->type, 0);
1501 		if (IS_ERR(other)) {
1502 			err = PTR_ERR(other);
1503 			goto out;
1504 		}
1505 
1506 		unix_state_double_lock(sk, other);
1507 
1508 		/* Apparently VFS overslept socket death. Retry. */
1509 		if (sock_flag(other, SOCK_DEAD)) {
1510 			unix_state_double_unlock(sk, other);
1511 			sock_put(other);
1512 			goto restart;
1513 		}
1514 
1515 		err = -EPERM;
1516 		if (!unix_may_send(sk, other))
1517 			goto out_unlock;
1518 
1519 		err = security_unix_may_send(sk->sk_socket, other->sk_socket);
1520 		if (err)
1521 			goto out_unlock;
1522 
1523 		WRITE_ONCE(sk->sk_state, TCP_ESTABLISHED);
1524 		WRITE_ONCE(other->sk_state, TCP_ESTABLISHED);
1525 	} else {
1526 		/*
1527 		 *	1003.1g breaking connected state with AF_UNSPEC
1528 		 */
1529 		other = NULL;
1530 		unix_state_double_lock(sk, other);
1531 	}
1532 
1533 	/*
1534 	 * If it was connected, reconnect.
1535 	 */
1536 	if (unix_peer(sk)) {
1537 		struct sock *old_peer = unix_peer(sk);
1538 
1539 		unix_peer(sk) = other;
1540 		if (!other)
1541 			WRITE_ONCE(sk->sk_state, TCP_CLOSE);
1542 		unix_dgram_peer_wake_disconnect_wakeup(sk, old_peer);
1543 
1544 		unix_state_double_unlock(sk, other);
1545 
1546 		if (other != old_peer) {
1547 			unix_dgram_disconnected(sk, old_peer);
1548 
1549 			unix_state_lock(old_peer);
1550 			if (!unix_peer(old_peer))
1551 				WRITE_ONCE(old_peer->sk_state, TCP_CLOSE);
1552 			unix_state_unlock(old_peer);
1553 		}
1554 
1555 		sock_put(old_peer);
1556 	} else {
1557 		unix_peer(sk) = other;
1558 		unix_state_double_unlock(sk, other);
1559 	}
1560 
1561 	return 0;
1562 
1563 out_unlock:
1564 	unix_state_double_unlock(sk, other);
1565 	sock_put(other);
1566 out:
1567 	return err;
1568 }
1569 
1570 static long unix_wait_for_peer(struct sock *other, long timeo)
1571 {
1572 	struct unix_sock *u = unix_sk(other);
1573 	int sched;
1574 	DEFINE_WAIT(wait);
1575 
1576 	prepare_to_wait_exclusive(&u->peer_wait, &wait, TASK_INTERRUPTIBLE);
1577 
1578 	sched = !sock_flag(other, SOCK_DEAD) &&
1579 		!(other->sk_shutdown & RCV_SHUTDOWN) &&
1580 		unix_recvq_full_lockless(other);
1581 
1582 	unix_state_unlock(other);
1583 
1584 	if (sched)
1585 		timeo = schedule_timeout(timeo);
1586 
1587 	finish_wait(&u->peer_wait, &wait);
1588 	return timeo;
1589 }
1590 
1591 static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr,
1592 			       int addr_len, int flags)
1593 {
1594 	struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr;
1595 	struct sock *sk = sock->sk, *newsk = NULL, *other = NULL;
1596 	struct unix_sock *u = unix_sk(sk), *newu, *otheru;
1597 	struct unix_peercred peercred = {};
1598 	struct net *net = sock_net(sk);
1599 	struct sk_buff *skb = NULL;
1600 	unsigned char state;
1601 	long timeo;
1602 	int err;
1603 
1604 	err = unix_validate_addr(sunaddr, addr_len);
1605 	if (err)
1606 		goto out;
1607 
1608 	err = BPF_CGROUP_RUN_PROG_UNIX_CONNECT_LOCK(sk, uaddr, &addr_len);
1609 	if (err)
1610 		goto out;
1611 
1612 	if (unix_may_passcred(sk) && !READ_ONCE(u->addr)) {
1613 		err = unix_autobind(sk);
1614 		if (err)
1615 			goto out;
1616 	}
1617 
1618 	timeo = sock_sndtimeo(sk, flags & O_NONBLOCK);
1619 
1620 	/* First of all allocate resources.
1621 	 * If we will make it after state is locked,
1622 	 * we will have to recheck all again in any case.
1623 	 */
1624 
1625 	/* create new sock for complete connection */
1626 	newsk = unix_create1(net, NULL, 0, sock->type);
1627 	if (IS_ERR(newsk)) {
1628 		err = PTR_ERR(newsk);
1629 		goto out;
1630 	}
1631 
1632 	err = prepare_peercred(&peercred);
1633 	if (err)
1634 		goto out;
1635 
1636 	/* Allocate skb for sending to listening sock */
1637 	skb = sock_wmalloc(newsk, 1, 0, GFP_KERNEL);
1638 	if (!skb) {
1639 		err = -ENOMEM;
1640 		goto out_free_sk;
1641 	}
1642 
1643 restart:
1644 	/*  Find listening sock. */
1645 	other = unix_find_other(net, sunaddr, addr_len, sk->sk_type, flags);
1646 	if (IS_ERR(other)) {
1647 		err = PTR_ERR(other);
1648 		goto out_free_skb;
1649 	}
1650 
1651 	unix_state_lock(other);
1652 
1653 	/* Apparently VFS overslept socket death. Retry. */
1654 	if (sock_flag(other, SOCK_DEAD)) {
1655 		unix_state_unlock(other);
1656 		sock_put(other);
1657 		goto restart;
1658 	}
1659 
1660 	if (other->sk_state != TCP_LISTEN ||
1661 	    other->sk_shutdown & RCV_SHUTDOWN) {
1662 		err = -ECONNREFUSED;
1663 		goto out_unlock;
1664 	}
1665 
1666 	if (unix_recvq_full_lockless(other)) {
1667 		if (!timeo) {
1668 			err = -EAGAIN;
1669 			goto out_unlock;
1670 		}
1671 
1672 		timeo = unix_wait_for_peer(other, timeo);
1673 		sock_put(other);
1674 
1675 		err = sock_intr_errno(timeo);
1676 		if (signal_pending(current))
1677 			goto out_free_skb;
1678 
1679 		goto restart;
1680 	}
1681 
1682 	/* self connect and simultaneous connect are eliminated
1683 	 * by rejecting TCP_LISTEN socket to avoid deadlock.
1684 	 */
1685 	state = READ_ONCE(sk->sk_state);
1686 	if (unlikely(state != TCP_CLOSE)) {
1687 		err = state == TCP_ESTABLISHED ? -EISCONN : -EINVAL;
1688 		goto out_unlock;
1689 	}
1690 
1691 	unix_state_lock(sk);
1692 
1693 	if (unlikely(sk->sk_state != TCP_CLOSE)) {
1694 		err = sk->sk_state == TCP_ESTABLISHED ? -EISCONN : -EINVAL;
1695 		unix_state_unlock(sk);
1696 		goto out_unlock;
1697 	}
1698 
1699 	err = security_unix_stream_connect(sk, other, newsk);
1700 	if (err) {
1701 		unix_state_unlock(sk);
1702 		goto out_unlock;
1703 	}
1704 
1705 	/* The way is open! Fastly set all the necessary fields... */
1706 
1707 	sock_hold(sk);
1708 	unix_peer(newsk) = sk;
1709 	newsk->sk_state = TCP_ESTABLISHED;
1710 	newsk->sk_type = sk->sk_type;
1711 	newsk->sk_scm_recv_flags = other->sk_scm_recv_flags;
1712 	init_peercred(newsk, &peercred);
1713 
1714 	newu = unix_sk(newsk);
1715 	newu->listener = other;
1716 	RCU_INIT_POINTER(newsk->sk_wq, &newu->peer_wq);
1717 	otheru = unix_sk(other);
1718 
1719 	/* copy address information from listening to new sock
1720 	 *
1721 	 * The contents of *(otheru->addr) and otheru->path
1722 	 * are seen fully set up here, since we have found
1723 	 * otheru in hash under its lock.  Insertion into the
1724 	 * hash chain we'd found it in had been done in an
1725 	 * earlier critical area protected by the chain's lock,
1726 	 * the same one where we'd set *(otheru->addr) contents,
1727 	 * as well as otheru->path and otheru->addr itself.
1728 	 *
1729 	 * Using smp_store_release() here to set newu->addr
1730 	 * is enough to make those stores, as well as stores
1731 	 * to newu->path visible to anyone who gets newu->addr
1732 	 * by smp_load_acquire().  IOW, the same warranties
1733 	 * as for unix_sock instances bound in unix_bind() or
1734 	 * in unix_autobind().
1735 	 */
1736 	if (otheru->path.dentry) {
1737 		path_get(&otheru->path);
1738 		newu->path = otheru->path;
1739 	}
1740 	refcount_inc(&otheru->addr->refcnt);
1741 	smp_store_release(&newu->addr, otheru->addr);
1742 
1743 	/* Set credentials */
1744 	copy_peercred(sk, other);
1745 
1746 	sock->state	= SS_CONNECTED;
1747 	WRITE_ONCE(sk->sk_state, TCP_ESTABLISHED);
1748 	sock_hold(newsk);
1749 
1750 	smp_mb__after_atomic();	/* sock_hold() does an atomic_inc() */
1751 	unix_peer(sk)	= newsk;
1752 
1753 	unix_state_unlock(sk);
1754 
1755 	/* take ten and send info to listening sock */
1756 	spin_lock(&other->sk_receive_queue.lock);
1757 	__skb_queue_tail(&other->sk_receive_queue, skb);
1758 	spin_unlock(&other->sk_receive_queue.lock);
1759 	unix_state_unlock(other);
1760 	other->sk_data_ready(other);
1761 	sock_put(other);
1762 	return 0;
1763 
1764 out_unlock:
1765 	unix_state_unlock(other);
1766 	sock_put(other);
1767 out_free_skb:
1768 	consume_skb(skb);
1769 out_free_sk:
1770 	unix_release_sock(newsk, 0);
1771 out:
1772 	drop_peercred(&peercred);
1773 	return err;
1774 }
1775 
1776 static int unix_socketpair(struct socket *socka, struct socket *sockb)
1777 {
1778 	struct unix_peercred ska_peercred = {}, skb_peercred = {};
1779 	struct sock *ska = socka->sk, *skb = sockb->sk;
1780 	int err;
1781 
1782 	err = prepare_peercred(&ska_peercred);
1783 	if (err)
1784 		return err;
1785 
1786 	err = prepare_peercred(&skb_peercred);
1787 	if (err) {
1788 		drop_peercred(&ska_peercred);
1789 		return err;
1790 	}
1791 
1792 	/* Join our sockets back to back */
1793 	sock_hold(ska);
1794 	sock_hold(skb);
1795 	unix_peer(ska) = skb;
1796 	unix_peer(skb) = ska;
1797 	init_peercred(ska, &ska_peercred);
1798 	init_peercred(skb, &skb_peercred);
1799 
1800 	ska->sk_state = TCP_ESTABLISHED;
1801 	skb->sk_state = TCP_ESTABLISHED;
1802 	socka->state  = SS_CONNECTED;
1803 	sockb->state  = SS_CONNECTED;
1804 	return 0;
1805 }
1806 
1807 static int unix_accept(struct socket *sock, struct socket *newsock,
1808 		       struct proto_accept_arg *arg)
1809 {
1810 	struct sock *sk = sock->sk;
1811 	struct sk_buff *skb;
1812 	struct sock *tsk;
1813 
1814 	arg->err = -EOPNOTSUPP;
1815 	if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
1816 		goto out;
1817 
1818 	arg->err = -EINVAL;
1819 	if (READ_ONCE(sk->sk_state) != TCP_LISTEN)
1820 		goto out;
1821 
1822 	/* If socket state is TCP_LISTEN it cannot change (for now...),
1823 	 * so that no locks are necessary.
1824 	 */
1825 
1826 	skb = skb_recv_datagram(sk, (arg->flags & O_NONBLOCK) ? MSG_DONTWAIT : 0,
1827 				&arg->err);
1828 	if (!skb) {
1829 		/* This means receive shutdown. */
1830 		if (arg->err == 0)
1831 			arg->err = -EINVAL;
1832 		goto out;
1833 	}
1834 
1835 	tsk = skb->sk;
1836 	skb_free_datagram(sk, skb);
1837 	wake_up_interruptible(&unix_sk(sk)->peer_wait);
1838 
1839 	/* attach accepted sock to socket */
1840 	unix_state_lock(tsk);
1841 	unix_update_edges(unix_sk(tsk));
1842 	newsock->state = SS_CONNECTED;
1843 	sock_graft(tsk, newsock);
1844 	unix_state_unlock(tsk);
1845 	return 0;
1846 
1847 out:
1848 	return arg->err;
1849 }
1850 
1851 
1852 static int unix_getname(struct socket *sock, struct sockaddr *uaddr, int peer)
1853 {
1854 	struct sock *sk = sock->sk;
1855 	struct unix_address *addr;
1856 	DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr, uaddr);
1857 	int err = 0;
1858 
1859 	if (peer) {
1860 		sk = unix_peer_get(sk);
1861 
1862 		err = -ENOTCONN;
1863 		if (!sk)
1864 			goto out;
1865 		err = 0;
1866 	} else {
1867 		sock_hold(sk);
1868 	}
1869 
1870 	addr = smp_load_acquire(&unix_sk(sk)->addr);
1871 	if (!addr) {
1872 		sunaddr->sun_family = AF_UNIX;
1873 		sunaddr->sun_path[0] = 0;
1874 		err = offsetof(struct sockaddr_un, sun_path);
1875 	} else {
1876 		err = addr->len;
1877 		memcpy(sunaddr, addr->name, addr->len);
1878 
1879 		if (peer)
1880 			BPF_CGROUP_RUN_SA_PROG(sk, uaddr, &err,
1881 					       CGROUP_UNIX_GETPEERNAME);
1882 		else
1883 			BPF_CGROUP_RUN_SA_PROG(sk, uaddr, &err,
1884 					       CGROUP_UNIX_GETSOCKNAME);
1885 	}
1886 	sock_put(sk);
1887 out:
1888 	return err;
1889 }
1890 
1891 /* The "user->unix_inflight" variable is protected by the garbage
1892  * collection lock, and we just read it locklessly here. If you go
1893  * over the limit, there might be a tiny race in actually noticing
1894  * it across threads. Tough.
1895  */
1896 static inline bool too_many_unix_fds(struct task_struct *p)
1897 {
1898 	struct user_struct *user = current_user();
1899 
1900 	if (unlikely(READ_ONCE(user->unix_inflight) > task_rlimit(p, RLIMIT_NOFILE)))
1901 		return !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN);
1902 	return false;
1903 }
1904 
1905 static int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb)
1906 {
1907 	if (too_many_unix_fds(current))
1908 		return -ETOOMANYREFS;
1909 
1910 	UNIXCB(skb).fp = scm->fp;
1911 	scm->fp = NULL;
1912 
1913 	if (unix_prepare_fpl(UNIXCB(skb).fp))
1914 		return -ENOMEM;
1915 
1916 	return 0;
1917 }
1918 
1919 static void unix_detach_fds(struct scm_cookie *scm, struct sk_buff *skb)
1920 {
1921 	scm->fp = UNIXCB(skb).fp;
1922 	UNIXCB(skb).fp = NULL;
1923 
1924 	unix_destroy_fpl(scm->fp);
1925 }
1926 
1927 static void unix_peek_fds(struct scm_cookie *scm, struct sk_buff *skb)
1928 {
1929 	scm->fp = scm_fp_dup(UNIXCB(skb).fp);
1930 }
1931 
1932 static void unix_destruct_scm(struct sk_buff *skb)
1933 {
1934 	struct scm_cookie scm;
1935 
1936 	memset(&scm, 0, sizeof(scm));
1937 	scm.pid  = UNIXCB(skb).pid;
1938 	if (UNIXCB(skb).fp)
1939 		unix_detach_fds(&scm, skb);
1940 
1941 	/* Alas, it calls VFS */
1942 	/* So fscking what? fput() had been SMP-safe since the last Summer */
1943 	scm_destroy(&scm);
1944 	sock_wfree(skb);
1945 }
1946 
1947 static int unix_scm_to_skb(struct scm_cookie *scm, struct sk_buff *skb, bool send_fds)
1948 {
1949 	int err = 0;
1950 
1951 	UNIXCB(skb).pid = get_pid(scm->pid);
1952 	UNIXCB(skb).uid = scm->creds.uid;
1953 	UNIXCB(skb).gid = scm->creds.gid;
1954 	UNIXCB(skb).fp = NULL;
1955 	unix_get_secdata(scm, skb);
1956 	if (scm->fp && send_fds)
1957 		err = unix_attach_fds(scm, skb);
1958 
1959 	skb->destructor = unix_destruct_scm;
1960 	return err;
1961 }
1962 
1963 /*
1964  * Some apps rely on write() giving SCM_CREDENTIALS
1965  * We include credentials if source or destination socket
1966  * asserted SOCK_PASSCRED.
1967  */
1968 static void unix_maybe_add_creds(struct sk_buff *skb, const struct sock *sk,
1969 				 const struct sock *other)
1970 {
1971 	if (UNIXCB(skb).pid)
1972 		return;
1973 
1974 	if (unix_may_passcred(sk) || unix_may_passcred(other)) {
1975 		UNIXCB(skb).pid = get_pid(task_tgid(current));
1976 		current_uid_gid(&UNIXCB(skb).uid, &UNIXCB(skb).gid);
1977 	}
1978 }
1979 
1980 static bool unix_skb_scm_eq(struct sk_buff *skb,
1981 			    struct scm_cookie *scm)
1982 {
1983 	return UNIXCB(skb).pid == scm->pid &&
1984 	       uid_eq(UNIXCB(skb).uid, scm->creds.uid) &&
1985 	       gid_eq(UNIXCB(skb).gid, scm->creds.gid) &&
1986 	       unix_secdata_eq(scm, skb);
1987 }
1988 
1989 static void scm_stat_add(struct sock *sk, struct sk_buff *skb)
1990 {
1991 	struct scm_fp_list *fp = UNIXCB(skb).fp;
1992 	struct unix_sock *u = unix_sk(sk);
1993 
1994 	if (unlikely(fp && fp->count)) {
1995 		atomic_add(fp->count, &u->scm_stat.nr_fds);
1996 		unix_add_edges(fp, u);
1997 	}
1998 }
1999 
2000 static void scm_stat_del(struct sock *sk, struct sk_buff *skb)
2001 {
2002 	struct scm_fp_list *fp = UNIXCB(skb).fp;
2003 	struct unix_sock *u = unix_sk(sk);
2004 
2005 	if (unlikely(fp && fp->count)) {
2006 		atomic_sub(fp->count, &u->scm_stat.nr_fds);
2007 		unix_del_edges(fp);
2008 	}
2009 }
2010 
2011 /*
2012  *	Send AF_UNIX data.
2013  */
2014 
2015 static int unix_dgram_sendmsg(struct socket *sock, struct msghdr *msg,
2016 			      size_t len)
2017 {
2018 	struct sock *sk = sock->sk, *other = NULL;
2019 	struct unix_sock *u = unix_sk(sk);
2020 	struct scm_cookie scm;
2021 	struct sk_buff *skb;
2022 	int data_len = 0;
2023 	int sk_locked;
2024 	long timeo;
2025 	int err;
2026 
2027 	err = scm_send(sock, msg, &scm, false);
2028 	if (err < 0)
2029 		return err;
2030 
2031 	wait_for_unix_gc(scm.fp);
2032 
2033 	if (msg->msg_flags & MSG_OOB) {
2034 		err = -EOPNOTSUPP;
2035 		goto out;
2036 	}
2037 
2038 	if (msg->msg_namelen) {
2039 		err = unix_validate_addr(msg->msg_name, msg->msg_namelen);
2040 		if (err)
2041 			goto out;
2042 
2043 		err = BPF_CGROUP_RUN_PROG_UNIX_SENDMSG_LOCK(sk,
2044 							    msg->msg_name,
2045 							    &msg->msg_namelen,
2046 							    NULL);
2047 		if (err)
2048 			goto out;
2049 	}
2050 
2051 	if (unix_may_passcred(sk) && !READ_ONCE(u->addr)) {
2052 		err = unix_autobind(sk);
2053 		if (err)
2054 			goto out;
2055 	}
2056 
2057 	if (len > READ_ONCE(sk->sk_sndbuf) - 32) {
2058 		err = -EMSGSIZE;
2059 		goto out;
2060 	}
2061 
2062 	if (len > SKB_MAX_ALLOC) {
2063 		data_len = min_t(size_t,
2064 				 len - SKB_MAX_ALLOC,
2065 				 MAX_SKB_FRAGS * PAGE_SIZE);
2066 		data_len = PAGE_ALIGN(data_len);
2067 
2068 		BUILD_BUG_ON(SKB_MAX_ALLOC < PAGE_SIZE);
2069 	}
2070 
2071 	skb = sock_alloc_send_pskb(sk, len - data_len, data_len,
2072 				   msg->msg_flags & MSG_DONTWAIT, &err,
2073 				   PAGE_ALLOC_COSTLY_ORDER);
2074 	if (!skb)
2075 		goto out;
2076 
2077 	err = unix_scm_to_skb(&scm, skb, true);
2078 	if (err < 0)
2079 		goto out_free;
2080 
2081 	skb_put(skb, len - data_len);
2082 	skb->data_len = data_len;
2083 	skb->len = len;
2084 	err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, len);
2085 	if (err)
2086 		goto out_free;
2087 
2088 	timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
2089 
2090 	if (msg->msg_namelen) {
2091 lookup:
2092 		other = unix_find_other(sock_net(sk), msg->msg_name,
2093 					msg->msg_namelen, sk->sk_type, 0);
2094 		if (IS_ERR(other)) {
2095 			err = PTR_ERR(other);
2096 			goto out_free;
2097 		}
2098 	} else {
2099 		other = unix_peer_get(sk);
2100 		if (!other) {
2101 			err = -ENOTCONN;
2102 			goto out_free;
2103 		}
2104 	}
2105 
2106 	if (sk_filter(other, skb) < 0) {
2107 		/* Toss the packet but do not return any error to the sender */
2108 		err = len;
2109 		goto out_sock_put;
2110 	}
2111 
2112 restart:
2113 	sk_locked = 0;
2114 	unix_state_lock(other);
2115 restart_locked:
2116 
2117 	if (!unix_may_send(sk, other)) {
2118 		err = -EPERM;
2119 		goto out_unlock;
2120 	}
2121 
2122 	if (unlikely(sock_flag(other, SOCK_DEAD))) {
2123 		/* Check with 1003.1g - what should datagram error */
2124 
2125 		unix_state_unlock(other);
2126 
2127 		if (sk->sk_type == SOCK_SEQPACKET) {
2128 			/* We are here only when racing with unix_release_sock()
2129 			 * is clearing @other. Never change state to TCP_CLOSE
2130 			 * unlike SOCK_DGRAM wants.
2131 			 */
2132 			err = -EPIPE;
2133 			goto out_sock_put;
2134 		}
2135 
2136 		if (!sk_locked)
2137 			unix_state_lock(sk);
2138 
2139 		if (unix_peer(sk) == other) {
2140 			unix_peer(sk) = NULL;
2141 			unix_dgram_peer_wake_disconnect_wakeup(sk, other);
2142 
2143 			WRITE_ONCE(sk->sk_state, TCP_CLOSE);
2144 			unix_state_unlock(sk);
2145 
2146 			unix_dgram_disconnected(sk, other);
2147 			sock_put(other);
2148 			err = -ECONNREFUSED;
2149 			goto out_sock_put;
2150 		}
2151 
2152 		unix_state_unlock(sk);
2153 
2154 		if (!msg->msg_namelen) {
2155 			err = -ECONNRESET;
2156 			goto out_sock_put;
2157 		}
2158 
2159 		sock_put(other);
2160 		goto lookup;
2161 	}
2162 
2163 	if (other->sk_shutdown & RCV_SHUTDOWN) {
2164 		err = -EPIPE;
2165 		goto out_unlock;
2166 	}
2167 
2168 	if (UNIXCB(skb).fp && !other->sk_scm_rights) {
2169 		err = -EPERM;
2170 		goto out_unlock;
2171 	}
2172 
2173 	if (sk->sk_type != SOCK_SEQPACKET) {
2174 		err = security_unix_may_send(sk->sk_socket, other->sk_socket);
2175 		if (err)
2176 			goto out_unlock;
2177 	}
2178 
2179 	/* other == sk && unix_peer(other) != sk if
2180 	 * - unix_peer(sk) == NULL, destination address bound to sk
2181 	 * - unix_peer(sk) == sk by time of get but disconnected before lock
2182 	 */
2183 	if (other != sk &&
2184 	    unlikely(unix_peer(other) != sk &&
2185 	    unix_recvq_full_lockless(other))) {
2186 		if (timeo) {
2187 			timeo = unix_wait_for_peer(other, timeo);
2188 
2189 			err = sock_intr_errno(timeo);
2190 			if (signal_pending(current))
2191 				goto out_sock_put;
2192 
2193 			goto restart;
2194 		}
2195 
2196 		if (!sk_locked) {
2197 			unix_state_unlock(other);
2198 			unix_state_double_lock(sk, other);
2199 		}
2200 
2201 		if (unix_peer(sk) != other ||
2202 		    unix_dgram_peer_wake_me(sk, other)) {
2203 			err = -EAGAIN;
2204 			sk_locked = 1;
2205 			goto out_unlock;
2206 		}
2207 
2208 		if (!sk_locked) {
2209 			sk_locked = 1;
2210 			goto restart_locked;
2211 		}
2212 	}
2213 
2214 	if (unlikely(sk_locked))
2215 		unix_state_unlock(sk);
2216 
2217 	if (sock_flag(other, SOCK_RCVTSTAMP))
2218 		__net_timestamp(skb);
2219 
2220 	unix_maybe_add_creds(skb, sk, other);
2221 	scm_stat_add(other, skb);
2222 	skb_queue_tail(&other->sk_receive_queue, skb);
2223 	unix_state_unlock(other);
2224 	other->sk_data_ready(other);
2225 	sock_put(other);
2226 	scm_destroy(&scm);
2227 	return len;
2228 
2229 out_unlock:
2230 	if (sk_locked)
2231 		unix_state_unlock(sk);
2232 	unix_state_unlock(other);
2233 out_sock_put:
2234 	sock_put(other);
2235 out_free:
2236 	consume_skb(skb);
2237 out:
2238 	scm_destroy(&scm);
2239 	return err;
2240 }
2241 
2242 /* We use paged skbs for stream sockets, and limit occupancy to 32768
2243  * bytes, and a minimum of a full page.
2244  */
2245 #define UNIX_SKB_FRAGS_SZ (PAGE_SIZE << get_order(32768))
2246 
2247 #if IS_ENABLED(CONFIG_AF_UNIX_OOB)
2248 static int queue_oob(struct sock *sk, struct msghdr *msg, struct sock *other,
2249 		     struct scm_cookie *scm, bool fds_sent)
2250 {
2251 	struct unix_sock *ousk = unix_sk(other);
2252 	struct sk_buff *skb;
2253 	int err;
2254 
2255 	skb = sock_alloc_send_skb(sk, 1, msg->msg_flags & MSG_DONTWAIT, &err);
2256 
2257 	if (!skb)
2258 		return err;
2259 
2260 	err = unix_scm_to_skb(scm, skb, !fds_sent);
2261 	if (err < 0)
2262 		goto out;
2263 
2264 	skb_put(skb, 1);
2265 	err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, 1);
2266 
2267 	if (err)
2268 		goto out;
2269 
2270 	unix_state_lock(other);
2271 
2272 	if (sock_flag(other, SOCK_DEAD) ||
2273 	    (other->sk_shutdown & RCV_SHUTDOWN)) {
2274 		err = -EPIPE;
2275 		goto out_unlock;
2276 	}
2277 
2278 	if (UNIXCB(skb).fp && !other->sk_scm_rights) {
2279 		err = -EPERM;
2280 		goto out_unlock;
2281 	}
2282 
2283 	unix_maybe_add_creds(skb, sk, other);
2284 	scm_stat_add(other, skb);
2285 
2286 	spin_lock(&other->sk_receive_queue.lock);
2287 	WRITE_ONCE(ousk->oob_skb, skb);
2288 	__skb_queue_tail(&other->sk_receive_queue, skb);
2289 	spin_unlock(&other->sk_receive_queue.lock);
2290 
2291 	sk_send_sigurg(other);
2292 	unix_state_unlock(other);
2293 	other->sk_data_ready(other);
2294 
2295 	return 0;
2296 out_unlock:
2297 	unix_state_unlock(other);
2298 out:
2299 	consume_skb(skb);
2300 	return err;
2301 }
2302 #endif
2303 
2304 static int unix_stream_sendmsg(struct socket *sock, struct msghdr *msg,
2305 			       size_t len)
2306 {
2307 	struct sock *sk = sock->sk;
2308 	struct sk_buff *skb = NULL;
2309 	struct sock *other = NULL;
2310 	struct scm_cookie scm;
2311 	bool fds_sent = false;
2312 	int err, sent = 0;
2313 
2314 	err = scm_send(sock, msg, &scm, false);
2315 	if (err < 0)
2316 		return err;
2317 
2318 	wait_for_unix_gc(scm.fp);
2319 
2320 	if (msg->msg_flags & MSG_OOB) {
2321 		err = -EOPNOTSUPP;
2322 #if IS_ENABLED(CONFIG_AF_UNIX_OOB)
2323 		if (len)
2324 			len--;
2325 		else
2326 #endif
2327 			goto out_err;
2328 	}
2329 
2330 	if (msg->msg_namelen) {
2331 		err = READ_ONCE(sk->sk_state) == TCP_ESTABLISHED ? -EISCONN : -EOPNOTSUPP;
2332 		goto out_err;
2333 	} else {
2334 		other = unix_peer(sk);
2335 		if (!other) {
2336 			err = -ENOTCONN;
2337 			goto out_err;
2338 		}
2339 	}
2340 
2341 	if (READ_ONCE(sk->sk_shutdown) & SEND_SHUTDOWN)
2342 		goto out_pipe;
2343 
2344 	while (sent < len) {
2345 		int size = len - sent;
2346 		int data_len;
2347 
2348 		if (unlikely(msg->msg_flags & MSG_SPLICE_PAGES)) {
2349 			skb = sock_alloc_send_pskb(sk, 0, 0,
2350 						   msg->msg_flags & MSG_DONTWAIT,
2351 						   &err, 0);
2352 		} else {
2353 			/* Keep two messages in the pipe so it schedules better */
2354 			size = min_t(int, size, (READ_ONCE(sk->sk_sndbuf) >> 1) - 64);
2355 
2356 			/* allow fallback to order-0 allocations */
2357 			size = min_t(int, size, SKB_MAX_HEAD(0) + UNIX_SKB_FRAGS_SZ);
2358 
2359 			data_len = max_t(int, 0, size - SKB_MAX_HEAD(0));
2360 
2361 			data_len = min_t(size_t, size, PAGE_ALIGN(data_len));
2362 
2363 			skb = sock_alloc_send_pskb(sk, size - data_len, data_len,
2364 						   msg->msg_flags & MSG_DONTWAIT, &err,
2365 						   get_order(UNIX_SKB_FRAGS_SZ));
2366 		}
2367 		if (!skb)
2368 			goto out_err;
2369 
2370 		/* Only send the fds in the first buffer */
2371 		err = unix_scm_to_skb(&scm, skb, !fds_sent);
2372 		if (err < 0)
2373 			goto out_free;
2374 
2375 		fds_sent = true;
2376 
2377 		if (unlikely(msg->msg_flags & MSG_SPLICE_PAGES)) {
2378 			skb->ip_summed = CHECKSUM_UNNECESSARY;
2379 			err = skb_splice_from_iter(skb, &msg->msg_iter, size,
2380 						   sk->sk_allocation);
2381 			if (err < 0)
2382 				goto out_free;
2383 
2384 			size = err;
2385 			refcount_add(size, &sk->sk_wmem_alloc);
2386 		} else {
2387 			skb_put(skb, size - data_len);
2388 			skb->data_len = data_len;
2389 			skb->len = size;
2390 			err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, size);
2391 			if (err)
2392 				goto out_free;
2393 		}
2394 
2395 		unix_state_lock(other);
2396 
2397 		if (sock_flag(other, SOCK_DEAD) ||
2398 		    (other->sk_shutdown & RCV_SHUTDOWN))
2399 			goto out_pipe_unlock;
2400 
2401 		if (UNIXCB(skb).fp && !other->sk_scm_rights) {
2402 			unix_state_unlock(other);
2403 			err = -EPERM;
2404 			goto out_free;
2405 		}
2406 
2407 		unix_maybe_add_creds(skb, sk, other);
2408 		scm_stat_add(other, skb);
2409 		skb_queue_tail(&other->sk_receive_queue, skb);
2410 		unix_state_unlock(other);
2411 		other->sk_data_ready(other);
2412 		sent += size;
2413 	}
2414 
2415 #if IS_ENABLED(CONFIG_AF_UNIX_OOB)
2416 	if (msg->msg_flags & MSG_OOB) {
2417 		err = queue_oob(sk, msg, other, &scm, fds_sent);
2418 		if (err)
2419 			goto out_err;
2420 		sent++;
2421 	}
2422 #endif
2423 
2424 	scm_destroy(&scm);
2425 
2426 	return sent;
2427 
2428 out_pipe_unlock:
2429 	unix_state_unlock(other);
2430 out_pipe:
2431 	if (!sent && !(msg->msg_flags & MSG_NOSIGNAL))
2432 		send_sig(SIGPIPE, current, 0);
2433 	err = -EPIPE;
2434 out_free:
2435 	consume_skb(skb);
2436 out_err:
2437 	scm_destroy(&scm);
2438 	return sent ? : err;
2439 }
2440 
2441 static int unix_seqpacket_sendmsg(struct socket *sock, struct msghdr *msg,
2442 				  size_t len)
2443 {
2444 	int err;
2445 	struct sock *sk = sock->sk;
2446 
2447 	err = sock_error(sk);
2448 	if (err)
2449 		return err;
2450 
2451 	if (READ_ONCE(sk->sk_state) != TCP_ESTABLISHED)
2452 		return -ENOTCONN;
2453 
2454 	if (msg->msg_namelen)
2455 		msg->msg_namelen = 0;
2456 
2457 	return unix_dgram_sendmsg(sock, msg, len);
2458 }
2459 
2460 static int unix_seqpacket_recvmsg(struct socket *sock, struct msghdr *msg,
2461 				  size_t size, int flags)
2462 {
2463 	struct sock *sk = sock->sk;
2464 
2465 	if (READ_ONCE(sk->sk_state) != TCP_ESTABLISHED)
2466 		return -ENOTCONN;
2467 
2468 	return unix_dgram_recvmsg(sock, msg, size, flags);
2469 }
2470 
2471 static void unix_copy_addr(struct msghdr *msg, struct sock *sk)
2472 {
2473 	struct unix_address *addr = smp_load_acquire(&unix_sk(sk)->addr);
2474 
2475 	if (addr) {
2476 		msg->msg_namelen = addr->len;
2477 		memcpy(msg->msg_name, addr->name, addr->len);
2478 	}
2479 }
2480 
2481 int __unix_dgram_recvmsg(struct sock *sk, struct msghdr *msg, size_t size,
2482 			 int flags)
2483 {
2484 	struct scm_cookie scm;
2485 	struct socket *sock = sk->sk_socket;
2486 	struct unix_sock *u = unix_sk(sk);
2487 	struct sk_buff *skb, *last;
2488 	long timeo;
2489 	int skip;
2490 	int err;
2491 
2492 	err = -EOPNOTSUPP;
2493 	if (flags&MSG_OOB)
2494 		goto out;
2495 
2496 	timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
2497 
2498 	do {
2499 		mutex_lock(&u->iolock);
2500 
2501 		skip = sk_peek_offset(sk, flags);
2502 		skb = __skb_try_recv_datagram(sk, &sk->sk_receive_queue, flags,
2503 					      &skip, &err, &last);
2504 		if (skb) {
2505 			if (!(flags & MSG_PEEK))
2506 				scm_stat_del(sk, skb);
2507 			break;
2508 		}
2509 
2510 		mutex_unlock(&u->iolock);
2511 
2512 		if (err != -EAGAIN)
2513 			break;
2514 	} while (timeo &&
2515 		 !__skb_wait_for_more_packets(sk, &sk->sk_receive_queue,
2516 					      &err, &timeo, last));
2517 
2518 	if (!skb) { /* implies iolock unlocked */
2519 		unix_state_lock(sk);
2520 		/* Signal EOF on disconnected non-blocking SEQPACKET socket. */
2521 		if (sk->sk_type == SOCK_SEQPACKET && err == -EAGAIN &&
2522 		    (sk->sk_shutdown & RCV_SHUTDOWN))
2523 			err = 0;
2524 		unix_state_unlock(sk);
2525 		goto out;
2526 	}
2527 
2528 	if (wq_has_sleeper(&u->peer_wait))
2529 		wake_up_interruptible_sync_poll(&u->peer_wait,
2530 						EPOLLOUT | EPOLLWRNORM |
2531 						EPOLLWRBAND);
2532 
2533 	if (msg->msg_name) {
2534 		unix_copy_addr(msg, skb->sk);
2535 
2536 		BPF_CGROUP_RUN_PROG_UNIX_RECVMSG_LOCK(sk,
2537 						      msg->msg_name,
2538 						      &msg->msg_namelen);
2539 	}
2540 
2541 	if (size > skb->len - skip)
2542 		size = skb->len - skip;
2543 	else if (size < skb->len - skip)
2544 		msg->msg_flags |= MSG_TRUNC;
2545 
2546 	err = skb_copy_datagram_msg(skb, skip, msg, size);
2547 	if (err)
2548 		goto out_free;
2549 
2550 	if (sock_flag(sk, SOCK_RCVTSTAMP))
2551 		__sock_recv_timestamp(msg, sk, skb);
2552 
2553 	memset(&scm, 0, sizeof(scm));
2554 
2555 	scm_set_cred(&scm, UNIXCB(skb).pid, UNIXCB(skb).uid, UNIXCB(skb).gid);
2556 	unix_set_secdata(&scm, skb);
2557 
2558 	if (!(flags & MSG_PEEK)) {
2559 		if (UNIXCB(skb).fp)
2560 			unix_detach_fds(&scm, skb);
2561 
2562 		sk_peek_offset_bwd(sk, skb->len);
2563 	} else {
2564 		/* It is questionable: on PEEK we could:
2565 		   - do not return fds - good, but too simple 8)
2566 		   - return fds, and do not return them on read (old strategy,
2567 		     apparently wrong)
2568 		   - clone fds (I chose it for now, it is the most universal
2569 		     solution)
2570 
2571 		   POSIX 1003.1g does not actually define this clearly
2572 		   at all. POSIX 1003.1g doesn't define a lot of things
2573 		   clearly however!
2574 
2575 		*/
2576 
2577 		sk_peek_offset_fwd(sk, size);
2578 
2579 		if (UNIXCB(skb).fp)
2580 			unix_peek_fds(&scm, skb);
2581 	}
2582 	err = (flags & MSG_TRUNC) ? skb->len - skip : size;
2583 
2584 	scm_recv_unix(sock, msg, &scm, flags);
2585 
2586 out_free:
2587 	skb_free_datagram(sk, skb);
2588 	mutex_unlock(&u->iolock);
2589 out:
2590 	return err;
2591 }
2592 
2593 static int unix_dgram_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
2594 			      int flags)
2595 {
2596 	struct sock *sk = sock->sk;
2597 
2598 #ifdef CONFIG_BPF_SYSCALL
2599 	const struct proto *prot = READ_ONCE(sk->sk_prot);
2600 
2601 	if (prot != &unix_dgram_proto)
2602 		return prot->recvmsg(sk, msg, size, flags, NULL);
2603 #endif
2604 	return __unix_dgram_recvmsg(sk, msg, size, flags);
2605 }
2606 
2607 static int unix_read_skb(struct sock *sk, skb_read_actor_t recv_actor)
2608 {
2609 	struct unix_sock *u = unix_sk(sk);
2610 	struct sk_buff *skb;
2611 	int err;
2612 
2613 	mutex_lock(&u->iolock);
2614 	skb = skb_recv_datagram(sk, MSG_DONTWAIT, &err);
2615 	mutex_unlock(&u->iolock);
2616 	if (!skb)
2617 		return err;
2618 
2619 	return recv_actor(sk, skb);
2620 }
2621 
2622 /*
2623  *	Sleep until more data has arrived. But check for races..
2624  */
2625 static long unix_stream_data_wait(struct sock *sk, long timeo,
2626 				  struct sk_buff *last, unsigned int last_len,
2627 				  bool freezable)
2628 {
2629 	unsigned int state = TASK_INTERRUPTIBLE | freezable * TASK_FREEZABLE;
2630 	struct sk_buff *tail;
2631 	DEFINE_WAIT(wait);
2632 
2633 	unix_state_lock(sk);
2634 
2635 	for (;;) {
2636 		prepare_to_wait(sk_sleep(sk), &wait, state);
2637 
2638 		tail = skb_peek_tail(&sk->sk_receive_queue);
2639 		if (tail != last ||
2640 		    (tail && tail->len != last_len) ||
2641 		    sk->sk_err ||
2642 		    (sk->sk_shutdown & RCV_SHUTDOWN) ||
2643 		    signal_pending(current) ||
2644 		    !timeo)
2645 			break;
2646 
2647 		sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
2648 		unix_state_unlock(sk);
2649 		timeo = schedule_timeout(timeo);
2650 		unix_state_lock(sk);
2651 
2652 		if (sock_flag(sk, SOCK_DEAD))
2653 			break;
2654 
2655 		sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
2656 	}
2657 
2658 	finish_wait(sk_sleep(sk), &wait);
2659 	unix_state_unlock(sk);
2660 	return timeo;
2661 }
2662 
2663 static unsigned int unix_skb_len(const struct sk_buff *skb)
2664 {
2665 	return skb->len - UNIXCB(skb).consumed;
2666 }
2667 
2668 struct unix_stream_read_state {
2669 	int (*recv_actor)(struct sk_buff *, int, int,
2670 			  struct unix_stream_read_state *);
2671 	struct socket *socket;
2672 	struct msghdr *msg;
2673 	struct pipe_inode_info *pipe;
2674 	size_t size;
2675 	int flags;
2676 	unsigned int splice_flags;
2677 };
2678 
2679 #if IS_ENABLED(CONFIG_AF_UNIX_OOB)
2680 static int unix_stream_recv_urg(struct unix_stream_read_state *state)
2681 {
2682 	struct socket *sock = state->socket;
2683 	struct sock *sk = sock->sk;
2684 	struct unix_sock *u = unix_sk(sk);
2685 	int chunk = 1;
2686 	struct sk_buff *oob_skb;
2687 
2688 	mutex_lock(&u->iolock);
2689 	unix_state_lock(sk);
2690 	spin_lock(&sk->sk_receive_queue.lock);
2691 
2692 	if (sock_flag(sk, SOCK_URGINLINE) || !u->oob_skb) {
2693 		spin_unlock(&sk->sk_receive_queue.lock);
2694 		unix_state_unlock(sk);
2695 		mutex_unlock(&u->iolock);
2696 		return -EINVAL;
2697 	}
2698 
2699 	oob_skb = u->oob_skb;
2700 
2701 	if (!(state->flags & MSG_PEEK))
2702 		WRITE_ONCE(u->oob_skb, NULL);
2703 
2704 	spin_unlock(&sk->sk_receive_queue.lock);
2705 	unix_state_unlock(sk);
2706 
2707 	chunk = state->recv_actor(oob_skb, 0, chunk, state);
2708 
2709 	if (!(state->flags & MSG_PEEK))
2710 		UNIXCB(oob_skb).consumed += 1;
2711 
2712 	mutex_unlock(&u->iolock);
2713 
2714 	if (chunk < 0)
2715 		return -EFAULT;
2716 
2717 	state->msg->msg_flags |= MSG_OOB;
2718 	return 1;
2719 }
2720 
2721 static struct sk_buff *manage_oob(struct sk_buff *skb, struct sock *sk,
2722 				  int flags, int copied)
2723 {
2724 	struct sk_buff *read_skb = NULL, *unread_skb = NULL;
2725 	struct unix_sock *u = unix_sk(sk);
2726 
2727 	if (likely(unix_skb_len(skb) && skb != READ_ONCE(u->oob_skb)))
2728 		return skb;
2729 
2730 	spin_lock(&sk->sk_receive_queue.lock);
2731 
2732 	if (!unix_skb_len(skb)) {
2733 		if (copied && (!u->oob_skb || skb == u->oob_skb)) {
2734 			skb = NULL;
2735 		} else if (flags & MSG_PEEK) {
2736 			skb = skb_peek_next(skb, &sk->sk_receive_queue);
2737 		} else {
2738 			read_skb = skb;
2739 			skb = skb_peek_next(skb, &sk->sk_receive_queue);
2740 			__skb_unlink(read_skb, &sk->sk_receive_queue);
2741 		}
2742 
2743 		if (!skb)
2744 			goto unlock;
2745 	}
2746 
2747 	if (skb != u->oob_skb)
2748 		goto unlock;
2749 
2750 	if (copied) {
2751 		skb = NULL;
2752 	} else if (!(flags & MSG_PEEK)) {
2753 		WRITE_ONCE(u->oob_skb, NULL);
2754 
2755 		if (!sock_flag(sk, SOCK_URGINLINE)) {
2756 			__skb_unlink(skb, &sk->sk_receive_queue);
2757 			unread_skb = skb;
2758 			skb = skb_peek(&sk->sk_receive_queue);
2759 		}
2760 	} else if (!sock_flag(sk, SOCK_URGINLINE)) {
2761 		skb = skb_peek_next(skb, &sk->sk_receive_queue);
2762 	}
2763 
2764 unlock:
2765 	spin_unlock(&sk->sk_receive_queue.lock);
2766 
2767 	consume_skb(read_skb);
2768 	kfree_skb_reason(unread_skb, SKB_DROP_REASON_UNIX_SKIP_OOB);
2769 
2770 	return skb;
2771 }
2772 #endif
2773 
2774 static int unix_stream_read_skb(struct sock *sk, skb_read_actor_t recv_actor)
2775 {
2776 	struct unix_sock *u = unix_sk(sk);
2777 	struct sk_buff *skb;
2778 	int err;
2779 
2780 	if (unlikely(READ_ONCE(sk->sk_state) != TCP_ESTABLISHED))
2781 		return -ENOTCONN;
2782 
2783 	mutex_lock(&u->iolock);
2784 	skb = skb_recv_datagram(sk, MSG_DONTWAIT, &err);
2785 	mutex_unlock(&u->iolock);
2786 	if (!skb)
2787 		return err;
2788 
2789 #if IS_ENABLED(CONFIG_AF_UNIX_OOB)
2790 	if (unlikely(skb == READ_ONCE(u->oob_skb))) {
2791 		bool drop = false;
2792 
2793 		unix_state_lock(sk);
2794 
2795 		if (sock_flag(sk, SOCK_DEAD)) {
2796 			unix_state_unlock(sk);
2797 			kfree_skb_reason(skb, SKB_DROP_REASON_SOCKET_CLOSE);
2798 			return -ECONNRESET;
2799 		}
2800 
2801 		spin_lock(&sk->sk_receive_queue.lock);
2802 		if (likely(skb == u->oob_skb)) {
2803 			WRITE_ONCE(u->oob_skb, NULL);
2804 			drop = true;
2805 		}
2806 		spin_unlock(&sk->sk_receive_queue.lock);
2807 
2808 		unix_state_unlock(sk);
2809 
2810 		if (drop) {
2811 			kfree_skb_reason(skb, SKB_DROP_REASON_UNIX_SKIP_OOB);
2812 			return -EAGAIN;
2813 		}
2814 	}
2815 #endif
2816 
2817 	return recv_actor(sk, skb);
2818 }
2819 
2820 static int unix_stream_read_generic(struct unix_stream_read_state *state,
2821 				    bool freezable)
2822 {
2823 	struct scm_cookie scm;
2824 	struct socket *sock = state->socket;
2825 	struct sock *sk = sock->sk;
2826 	struct unix_sock *u = unix_sk(sk);
2827 	int copied = 0;
2828 	int flags = state->flags;
2829 	int noblock = flags & MSG_DONTWAIT;
2830 	bool check_creds = false;
2831 	int target;
2832 	int err = 0;
2833 	long timeo;
2834 	int skip;
2835 	size_t size = state->size;
2836 	unsigned int last_len;
2837 
2838 	if (unlikely(READ_ONCE(sk->sk_state) != TCP_ESTABLISHED)) {
2839 		err = -EINVAL;
2840 		goto out;
2841 	}
2842 
2843 	if (unlikely(flags & MSG_OOB)) {
2844 		err = -EOPNOTSUPP;
2845 #if IS_ENABLED(CONFIG_AF_UNIX_OOB)
2846 		err = unix_stream_recv_urg(state);
2847 #endif
2848 		goto out;
2849 	}
2850 
2851 	target = sock_rcvlowat(sk, flags & MSG_WAITALL, size);
2852 	timeo = sock_rcvtimeo(sk, noblock);
2853 
2854 	memset(&scm, 0, sizeof(scm));
2855 
2856 	/* Lock the socket to prevent queue disordering
2857 	 * while sleeps in memcpy_tomsg
2858 	 */
2859 	mutex_lock(&u->iolock);
2860 
2861 	skip = max(sk_peek_offset(sk, flags), 0);
2862 
2863 	do {
2864 		struct sk_buff *skb, *last;
2865 		int chunk;
2866 
2867 redo:
2868 		unix_state_lock(sk);
2869 		if (sock_flag(sk, SOCK_DEAD)) {
2870 			err = -ECONNRESET;
2871 			goto unlock;
2872 		}
2873 		last = skb = skb_peek(&sk->sk_receive_queue);
2874 		last_len = last ? last->len : 0;
2875 
2876 again:
2877 #if IS_ENABLED(CONFIG_AF_UNIX_OOB)
2878 		if (skb) {
2879 			skb = manage_oob(skb, sk, flags, copied);
2880 			if (!skb && copied) {
2881 				unix_state_unlock(sk);
2882 				break;
2883 			}
2884 		}
2885 #endif
2886 		if (skb == NULL) {
2887 			if (copied >= target)
2888 				goto unlock;
2889 
2890 			/*
2891 			 *	POSIX 1003.1g mandates this order.
2892 			 */
2893 
2894 			err = sock_error(sk);
2895 			if (err)
2896 				goto unlock;
2897 			if (sk->sk_shutdown & RCV_SHUTDOWN)
2898 				goto unlock;
2899 
2900 			unix_state_unlock(sk);
2901 			if (!timeo) {
2902 				err = -EAGAIN;
2903 				break;
2904 			}
2905 
2906 			mutex_unlock(&u->iolock);
2907 
2908 			timeo = unix_stream_data_wait(sk, timeo, last,
2909 						      last_len, freezable);
2910 
2911 			if (signal_pending(current)) {
2912 				err = sock_intr_errno(timeo);
2913 				scm_destroy(&scm);
2914 				goto out;
2915 			}
2916 
2917 			mutex_lock(&u->iolock);
2918 			goto redo;
2919 unlock:
2920 			unix_state_unlock(sk);
2921 			break;
2922 		}
2923 
2924 		while (skip >= unix_skb_len(skb)) {
2925 			skip -= unix_skb_len(skb);
2926 			last = skb;
2927 			last_len = skb->len;
2928 			skb = skb_peek_next(skb, &sk->sk_receive_queue);
2929 			if (!skb)
2930 				goto again;
2931 		}
2932 
2933 		unix_state_unlock(sk);
2934 
2935 		if (check_creds) {
2936 			/* Never glue messages from different writers */
2937 			if (!unix_skb_scm_eq(skb, &scm))
2938 				break;
2939 		} else if (unix_may_passcred(sk)) {
2940 			/* Copy credentials */
2941 			scm_set_cred(&scm, UNIXCB(skb).pid, UNIXCB(skb).uid, UNIXCB(skb).gid);
2942 			unix_set_secdata(&scm, skb);
2943 			check_creds = true;
2944 		}
2945 
2946 		/* Copy address just once */
2947 		if (state->msg && state->msg->msg_name) {
2948 			DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr,
2949 					 state->msg->msg_name);
2950 			unix_copy_addr(state->msg, skb->sk);
2951 
2952 			BPF_CGROUP_RUN_PROG_UNIX_RECVMSG_LOCK(sk,
2953 							      state->msg->msg_name,
2954 							      &state->msg->msg_namelen);
2955 
2956 			sunaddr = NULL;
2957 		}
2958 
2959 		chunk = min_t(unsigned int, unix_skb_len(skb) - skip, size);
2960 		chunk = state->recv_actor(skb, skip, chunk, state);
2961 		if (chunk < 0) {
2962 			if (copied == 0)
2963 				copied = -EFAULT;
2964 			break;
2965 		}
2966 		copied += chunk;
2967 		size -= chunk;
2968 
2969 		/* Mark read part of skb as used */
2970 		if (!(flags & MSG_PEEK)) {
2971 			UNIXCB(skb).consumed += chunk;
2972 
2973 			sk_peek_offset_bwd(sk, chunk);
2974 
2975 			if (UNIXCB(skb).fp) {
2976 				scm_stat_del(sk, skb);
2977 				unix_detach_fds(&scm, skb);
2978 			}
2979 
2980 			if (unix_skb_len(skb))
2981 				break;
2982 
2983 			skb_unlink(skb, &sk->sk_receive_queue);
2984 			consume_skb(skb);
2985 
2986 			if (scm.fp)
2987 				break;
2988 		} else {
2989 			/* It is questionable, see note in unix_dgram_recvmsg.
2990 			 */
2991 			if (UNIXCB(skb).fp)
2992 				unix_peek_fds(&scm, skb);
2993 
2994 			sk_peek_offset_fwd(sk, chunk);
2995 
2996 			if (UNIXCB(skb).fp)
2997 				break;
2998 
2999 			skip = 0;
3000 			last = skb;
3001 			last_len = skb->len;
3002 			unix_state_lock(sk);
3003 			skb = skb_peek_next(skb, &sk->sk_receive_queue);
3004 			if (skb)
3005 				goto again;
3006 			unix_state_unlock(sk);
3007 			break;
3008 		}
3009 	} while (size);
3010 
3011 	mutex_unlock(&u->iolock);
3012 	if (state->msg)
3013 		scm_recv_unix(sock, state->msg, &scm, flags);
3014 	else
3015 		scm_destroy(&scm);
3016 out:
3017 	return copied ? : err;
3018 }
3019 
3020 static int unix_stream_read_actor(struct sk_buff *skb,
3021 				  int skip, int chunk,
3022 				  struct unix_stream_read_state *state)
3023 {
3024 	int ret;
3025 
3026 	ret = skb_copy_datagram_msg(skb, UNIXCB(skb).consumed + skip,
3027 				    state->msg, chunk);
3028 	return ret ?: chunk;
3029 }
3030 
3031 int __unix_stream_recvmsg(struct sock *sk, struct msghdr *msg,
3032 			  size_t size, int flags)
3033 {
3034 	struct unix_stream_read_state state = {
3035 		.recv_actor = unix_stream_read_actor,
3036 		.socket = sk->sk_socket,
3037 		.msg = msg,
3038 		.size = size,
3039 		.flags = flags
3040 	};
3041 
3042 	return unix_stream_read_generic(&state, true);
3043 }
3044 
3045 static int unix_stream_recvmsg(struct socket *sock, struct msghdr *msg,
3046 			       size_t size, int flags)
3047 {
3048 	struct unix_stream_read_state state = {
3049 		.recv_actor = unix_stream_read_actor,
3050 		.socket = sock,
3051 		.msg = msg,
3052 		.size = size,
3053 		.flags = flags
3054 	};
3055 
3056 #ifdef CONFIG_BPF_SYSCALL
3057 	struct sock *sk = sock->sk;
3058 	const struct proto *prot = READ_ONCE(sk->sk_prot);
3059 
3060 	if (prot != &unix_stream_proto)
3061 		return prot->recvmsg(sk, msg, size, flags, NULL);
3062 #endif
3063 	return unix_stream_read_generic(&state, true);
3064 }
3065 
3066 static int unix_stream_splice_actor(struct sk_buff *skb,
3067 				    int skip, int chunk,
3068 				    struct unix_stream_read_state *state)
3069 {
3070 	return skb_splice_bits(skb, state->socket->sk,
3071 			       UNIXCB(skb).consumed + skip,
3072 			       state->pipe, chunk, state->splice_flags);
3073 }
3074 
3075 static ssize_t unix_stream_splice_read(struct socket *sock,  loff_t *ppos,
3076 				       struct pipe_inode_info *pipe,
3077 				       size_t size, unsigned int flags)
3078 {
3079 	struct unix_stream_read_state state = {
3080 		.recv_actor = unix_stream_splice_actor,
3081 		.socket = sock,
3082 		.pipe = pipe,
3083 		.size = size,
3084 		.splice_flags = flags,
3085 	};
3086 
3087 	if (unlikely(*ppos))
3088 		return -ESPIPE;
3089 
3090 	if (sock->file->f_flags & O_NONBLOCK ||
3091 	    flags & SPLICE_F_NONBLOCK)
3092 		state.flags = MSG_DONTWAIT;
3093 
3094 	return unix_stream_read_generic(&state, false);
3095 }
3096 
3097 static int unix_shutdown(struct socket *sock, int mode)
3098 {
3099 	struct sock *sk = sock->sk;
3100 	struct sock *other;
3101 
3102 	if (mode < SHUT_RD || mode > SHUT_RDWR)
3103 		return -EINVAL;
3104 	/* This maps:
3105 	 * SHUT_RD   (0) -> RCV_SHUTDOWN  (1)
3106 	 * SHUT_WR   (1) -> SEND_SHUTDOWN (2)
3107 	 * SHUT_RDWR (2) -> SHUTDOWN_MASK (3)
3108 	 */
3109 	++mode;
3110 
3111 	unix_state_lock(sk);
3112 	WRITE_ONCE(sk->sk_shutdown, sk->sk_shutdown | mode);
3113 	other = unix_peer(sk);
3114 	if (other)
3115 		sock_hold(other);
3116 	unix_state_unlock(sk);
3117 	sk->sk_state_change(sk);
3118 
3119 	if (other &&
3120 		(sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET)) {
3121 
3122 		int peer_mode = 0;
3123 		const struct proto *prot = READ_ONCE(other->sk_prot);
3124 
3125 		if (prot->unhash)
3126 			prot->unhash(other);
3127 		if (mode&RCV_SHUTDOWN)
3128 			peer_mode |= SEND_SHUTDOWN;
3129 		if (mode&SEND_SHUTDOWN)
3130 			peer_mode |= RCV_SHUTDOWN;
3131 		unix_state_lock(other);
3132 		WRITE_ONCE(other->sk_shutdown, other->sk_shutdown | peer_mode);
3133 		unix_state_unlock(other);
3134 		other->sk_state_change(other);
3135 		if (peer_mode == SHUTDOWN_MASK)
3136 			sk_wake_async(other, SOCK_WAKE_WAITD, POLL_HUP);
3137 		else if (peer_mode & RCV_SHUTDOWN)
3138 			sk_wake_async(other, SOCK_WAKE_WAITD, POLL_IN);
3139 	}
3140 	if (other)
3141 		sock_put(other);
3142 
3143 	return 0;
3144 }
3145 
3146 long unix_inq_len(struct sock *sk)
3147 {
3148 	struct sk_buff *skb;
3149 	long amount = 0;
3150 
3151 	if (READ_ONCE(sk->sk_state) == TCP_LISTEN)
3152 		return -EINVAL;
3153 
3154 	spin_lock(&sk->sk_receive_queue.lock);
3155 	if (sk->sk_type == SOCK_STREAM ||
3156 	    sk->sk_type == SOCK_SEQPACKET) {
3157 		skb_queue_walk(&sk->sk_receive_queue, skb)
3158 			amount += unix_skb_len(skb);
3159 	} else {
3160 		skb = skb_peek(&sk->sk_receive_queue);
3161 		if (skb)
3162 			amount = skb->len;
3163 	}
3164 	spin_unlock(&sk->sk_receive_queue.lock);
3165 
3166 	return amount;
3167 }
3168 EXPORT_SYMBOL_GPL(unix_inq_len);
3169 
3170 long unix_outq_len(struct sock *sk)
3171 {
3172 	return sk_wmem_alloc_get(sk);
3173 }
3174 EXPORT_SYMBOL_GPL(unix_outq_len);
3175 
3176 static int unix_open_file(struct sock *sk)
3177 {
3178 	struct path path;
3179 	struct file *f;
3180 	int fd;
3181 
3182 	if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
3183 		return -EPERM;
3184 
3185 	if (!smp_load_acquire(&unix_sk(sk)->addr))
3186 		return -ENOENT;
3187 
3188 	path = unix_sk(sk)->path;
3189 	if (!path.dentry)
3190 		return -ENOENT;
3191 
3192 	path_get(&path);
3193 
3194 	fd = get_unused_fd_flags(O_CLOEXEC);
3195 	if (fd < 0)
3196 		goto out;
3197 
3198 	f = dentry_open(&path, O_PATH, current_cred());
3199 	if (IS_ERR(f)) {
3200 		put_unused_fd(fd);
3201 		fd = PTR_ERR(f);
3202 		goto out;
3203 	}
3204 
3205 	fd_install(fd, f);
3206 out:
3207 	path_put(&path);
3208 
3209 	return fd;
3210 }
3211 
3212 static int unix_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
3213 {
3214 	struct sock *sk = sock->sk;
3215 	long amount = 0;
3216 	int err;
3217 
3218 	switch (cmd) {
3219 	case SIOCOUTQ:
3220 		amount = unix_outq_len(sk);
3221 		err = put_user(amount, (int __user *)arg);
3222 		break;
3223 	case SIOCINQ:
3224 		amount = unix_inq_len(sk);
3225 		if (amount < 0)
3226 			err = amount;
3227 		else
3228 			err = put_user(amount, (int __user *)arg);
3229 		break;
3230 	case SIOCUNIXFILE:
3231 		err = unix_open_file(sk);
3232 		break;
3233 #if IS_ENABLED(CONFIG_AF_UNIX_OOB)
3234 	case SIOCATMARK:
3235 		{
3236 			struct unix_sock *u = unix_sk(sk);
3237 			struct sk_buff *skb;
3238 			int answ = 0;
3239 
3240 			mutex_lock(&u->iolock);
3241 
3242 			skb = skb_peek(&sk->sk_receive_queue);
3243 			if (skb) {
3244 				struct sk_buff *oob_skb = READ_ONCE(u->oob_skb);
3245 				struct sk_buff *next_skb;
3246 
3247 				next_skb = skb_peek_next(skb, &sk->sk_receive_queue);
3248 
3249 				if (skb == oob_skb ||
3250 				    (!unix_skb_len(skb) &&
3251 				     (!oob_skb || next_skb == oob_skb)))
3252 					answ = 1;
3253 			}
3254 
3255 			mutex_unlock(&u->iolock);
3256 
3257 			err = put_user(answ, (int __user *)arg);
3258 		}
3259 		break;
3260 #endif
3261 	default:
3262 		err = -ENOIOCTLCMD;
3263 		break;
3264 	}
3265 	return err;
3266 }
3267 
3268 #ifdef CONFIG_COMPAT
3269 static int unix_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
3270 {
3271 	return unix_ioctl(sock, cmd, (unsigned long)compat_ptr(arg));
3272 }
3273 #endif
3274 
3275 static __poll_t unix_poll(struct file *file, struct socket *sock, poll_table *wait)
3276 {
3277 	struct sock *sk = sock->sk;
3278 	unsigned char state;
3279 	__poll_t mask;
3280 	u8 shutdown;
3281 
3282 	sock_poll_wait(file, sock, wait);
3283 	mask = 0;
3284 	shutdown = READ_ONCE(sk->sk_shutdown);
3285 	state = READ_ONCE(sk->sk_state);
3286 
3287 	/* exceptional events? */
3288 	if (READ_ONCE(sk->sk_err))
3289 		mask |= EPOLLERR;
3290 	if (shutdown == SHUTDOWN_MASK)
3291 		mask |= EPOLLHUP;
3292 	if (shutdown & RCV_SHUTDOWN)
3293 		mask |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM;
3294 
3295 	/* readable? */
3296 	if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
3297 		mask |= EPOLLIN | EPOLLRDNORM;
3298 	if (sk_is_readable(sk))
3299 		mask |= EPOLLIN | EPOLLRDNORM;
3300 #if IS_ENABLED(CONFIG_AF_UNIX_OOB)
3301 	if (READ_ONCE(unix_sk(sk)->oob_skb))
3302 		mask |= EPOLLPRI;
3303 #endif
3304 
3305 	/* Connection-based need to check for termination and startup */
3306 	if ((sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) &&
3307 	    state == TCP_CLOSE)
3308 		mask |= EPOLLHUP;
3309 
3310 	/*
3311 	 * we set writable also when the other side has shut down the
3312 	 * connection. This prevents stuck sockets.
3313 	 */
3314 	if (unix_writable(sk, state))
3315 		mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND;
3316 
3317 	return mask;
3318 }
3319 
3320 static __poll_t unix_dgram_poll(struct file *file, struct socket *sock,
3321 				    poll_table *wait)
3322 {
3323 	struct sock *sk = sock->sk, *other;
3324 	unsigned int writable;
3325 	unsigned char state;
3326 	__poll_t mask;
3327 	u8 shutdown;
3328 
3329 	sock_poll_wait(file, sock, wait);
3330 	mask = 0;
3331 	shutdown = READ_ONCE(sk->sk_shutdown);
3332 	state = READ_ONCE(sk->sk_state);
3333 
3334 	/* exceptional events? */
3335 	if (READ_ONCE(sk->sk_err) ||
3336 	    !skb_queue_empty_lockless(&sk->sk_error_queue))
3337 		mask |= EPOLLERR |
3338 			(sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? EPOLLPRI : 0);
3339 
3340 	if (shutdown & RCV_SHUTDOWN)
3341 		mask |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM;
3342 	if (shutdown == SHUTDOWN_MASK)
3343 		mask |= EPOLLHUP;
3344 
3345 	/* readable? */
3346 	if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
3347 		mask |= EPOLLIN | EPOLLRDNORM;
3348 	if (sk_is_readable(sk))
3349 		mask |= EPOLLIN | EPOLLRDNORM;
3350 
3351 	/* Connection-based need to check for termination and startup */
3352 	if (sk->sk_type == SOCK_SEQPACKET && state == TCP_CLOSE)
3353 		mask |= EPOLLHUP;
3354 
3355 	/* No write status requested, avoid expensive OUT tests. */
3356 	if (!(poll_requested_events(wait) & (EPOLLWRBAND|EPOLLWRNORM|EPOLLOUT)))
3357 		return mask;
3358 
3359 	writable = unix_writable(sk, state);
3360 	if (writable) {
3361 		unix_state_lock(sk);
3362 
3363 		other = unix_peer(sk);
3364 		if (other && unix_peer(other) != sk &&
3365 		    unix_recvq_full_lockless(other) &&
3366 		    unix_dgram_peer_wake_me(sk, other))
3367 			writable = 0;
3368 
3369 		unix_state_unlock(sk);
3370 	}
3371 
3372 	if (writable)
3373 		mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND;
3374 	else
3375 		sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
3376 
3377 	return mask;
3378 }
3379 
3380 #ifdef CONFIG_PROC_FS
3381 
3382 #define BUCKET_SPACE (BITS_PER_LONG - (UNIX_HASH_BITS + 1) - 1)
3383 
3384 #define get_bucket(x) ((x) >> BUCKET_SPACE)
3385 #define get_offset(x) ((x) & ((1UL << BUCKET_SPACE) - 1))
3386 #define set_bucket_offset(b, o) ((b) << BUCKET_SPACE | (o))
3387 
3388 static struct sock *unix_from_bucket(struct seq_file *seq, loff_t *pos)
3389 {
3390 	unsigned long offset = get_offset(*pos);
3391 	unsigned long bucket = get_bucket(*pos);
3392 	unsigned long count = 0;
3393 	struct sock *sk;
3394 
3395 	for (sk = sk_head(&seq_file_net(seq)->unx.table.buckets[bucket]);
3396 	     sk; sk = sk_next(sk)) {
3397 		if (++count == offset)
3398 			break;
3399 	}
3400 
3401 	return sk;
3402 }
3403 
3404 static struct sock *unix_get_first(struct seq_file *seq, loff_t *pos)
3405 {
3406 	unsigned long bucket = get_bucket(*pos);
3407 	struct net *net = seq_file_net(seq);
3408 	struct sock *sk;
3409 
3410 	while (bucket < UNIX_HASH_SIZE) {
3411 		spin_lock(&net->unx.table.locks[bucket]);
3412 
3413 		sk = unix_from_bucket(seq, pos);
3414 		if (sk)
3415 			return sk;
3416 
3417 		spin_unlock(&net->unx.table.locks[bucket]);
3418 
3419 		*pos = set_bucket_offset(++bucket, 1);
3420 	}
3421 
3422 	return NULL;
3423 }
3424 
3425 static struct sock *unix_get_next(struct seq_file *seq, struct sock *sk,
3426 				  loff_t *pos)
3427 {
3428 	unsigned long bucket = get_bucket(*pos);
3429 
3430 	sk = sk_next(sk);
3431 	if (sk)
3432 		return sk;
3433 
3434 
3435 	spin_unlock(&seq_file_net(seq)->unx.table.locks[bucket]);
3436 
3437 	*pos = set_bucket_offset(++bucket, 1);
3438 
3439 	return unix_get_first(seq, pos);
3440 }
3441 
3442 static void *unix_seq_start(struct seq_file *seq, loff_t *pos)
3443 {
3444 	if (!*pos)
3445 		return SEQ_START_TOKEN;
3446 
3447 	return unix_get_first(seq, pos);
3448 }
3449 
3450 static void *unix_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3451 {
3452 	++*pos;
3453 
3454 	if (v == SEQ_START_TOKEN)
3455 		return unix_get_first(seq, pos);
3456 
3457 	return unix_get_next(seq, v, pos);
3458 }
3459 
3460 static void unix_seq_stop(struct seq_file *seq, void *v)
3461 {
3462 	struct sock *sk = v;
3463 
3464 	if (sk)
3465 		spin_unlock(&seq_file_net(seq)->unx.table.locks[sk->sk_hash]);
3466 }
3467 
3468 static int unix_seq_show(struct seq_file *seq, void *v)
3469 {
3470 
3471 	if (v == SEQ_START_TOKEN)
3472 		seq_puts(seq, "Num       RefCount Protocol Flags    Type St "
3473 			 "Inode Path\n");
3474 	else {
3475 		struct sock *s = v;
3476 		struct unix_sock *u = unix_sk(s);
3477 		unix_state_lock(s);
3478 
3479 		seq_printf(seq, "%pK: %08X %08X %08X %04X %02X %5lu",
3480 			s,
3481 			refcount_read(&s->sk_refcnt),
3482 			0,
3483 			s->sk_state == TCP_LISTEN ? __SO_ACCEPTCON : 0,
3484 			s->sk_type,
3485 			s->sk_socket ?
3486 			(s->sk_state == TCP_ESTABLISHED ? SS_CONNECTED : SS_UNCONNECTED) :
3487 			(s->sk_state == TCP_ESTABLISHED ? SS_CONNECTING : SS_DISCONNECTING),
3488 			sock_i_ino(s));
3489 
3490 		if (u->addr) {	// under a hash table lock here
3491 			int i, len;
3492 			seq_putc(seq, ' ');
3493 
3494 			i = 0;
3495 			len = u->addr->len -
3496 				offsetof(struct sockaddr_un, sun_path);
3497 			if (u->addr->name->sun_path[0]) {
3498 				len--;
3499 			} else {
3500 				seq_putc(seq, '@');
3501 				i++;
3502 			}
3503 			for ( ; i < len; i++)
3504 				seq_putc(seq, u->addr->name->sun_path[i] ?:
3505 					 '@');
3506 		}
3507 		unix_state_unlock(s);
3508 		seq_putc(seq, '\n');
3509 	}
3510 
3511 	return 0;
3512 }
3513 
3514 static const struct seq_operations unix_seq_ops = {
3515 	.start  = unix_seq_start,
3516 	.next   = unix_seq_next,
3517 	.stop   = unix_seq_stop,
3518 	.show   = unix_seq_show,
3519 };
3520 
3521 #ifdef CONFIG_BPF_SYSCALL
3522 struct bpf_unix_iter_state {
3523 	struct seq_net_private p;
3524 	unsigned int cur_sk;
3525 	unsigned int end_sk;
3526 	unsigned int max_sk;
3527 	struct sock **batch;
3528 	bool st_bucket_done;
3529 };
3530 
3531 struct bpf_iter__unix {
3532 	__bpf_md_ptr(struct bpf_iter_meta *, meta);
3533 	__bpf_md_ptr(struct unix_sock *, unix_sk);
3534 	uid_t uid __aligned(8);
3535 };
3536 
3537 static int unix_prog_seq_show(struct bpf_prog *prog, struct bpf_iter_meta *meta,
3538 			      struct unix_sock *unix_sk, uid_t uid)
3539 {
3540 	struct bpf_iter__unix ctx;
3541 
3542 	meta->seq_num--;  /* skip SEQ_START_TOKEN */
3543 	ctx.meta = meta;
3544 	ctx.unix_sk = unix_sk;
3545 	ctx.uid = uid;
3546 	return bpf_iter_run_prog(prog, &ctx);
3547 }
3548 
3549 static int bpf_iter_unix_hold_batch(struct seq_file *seq, struct sock *start_sk)
3550 
3551 {
3552 	struct bpf_unix_iter_state *iter = seq->private;
3553 	unsigned int expected = 1;
3554 	struct sock *sk;
3555 
3556 	sock_hold(start_sk);
3557 	iter->batch[iter->end_sk++] = start_sk;
3558 
3559 	for (sk = sk_next(start_sk); sk; sk = sk_next(sk)) {
3560 		if (iter->end_sk < iter->max_sk) {
3561 			sock_hold(sk);
3562 			iter->batch[iter->end_sk++] = sk;
3563 		}
3564 
3565 		expected++;
3566 	}
3567 
3568 	spin_unlock(&seq_file_net(seq)->unx.table.locks[start_sk->sk_hash]);
3569 
3570 	return expected;
3571 }
3572 
3573 static void bpf_iter_unix_put_batch(struct bpf_unix_iter_state *iter)
3574 {
3575 	while (iter->cur_sk < iter->end_sk)
3576 		sock_put(iter->batch[iter->cur_sk++]);
3577 }
3578 
3579 static int bpf_iter_unix_realloc_batch(struct bpf_unix_iter_state *iter,
3580 				       unsigned int new_batch_sz)
3581 {
3582 	struct sock **new_batch;
3583 
3584 	new_batch = kvmalloc(sizeof(*new_batch) * new_batch_sz,
3585 			     GFP_USER | __GFP_NOWARN);
3586 	if (!new_batch)
3587 		return -ENOMEM;
3588 
3589 	bpf_iter_unix_put_batch(iter);
3590 	kvfree(iter->batch);
3591 	iter->batch = new_batch;
3592 	iter->max_sk = new_batch_sz;
3593 
3594 	return 0;
3595 }
3596 
3597 static struct sock *bpf_iter_unix_batch(struct seq_file *seq,
3598 					loff_t *pos)
3599 {
3600 	struct bpf_unix_iter_state *iter = seq->private;
3601 	unsigned int expected;
3602 	bool resized = false;
3603 	struct sock *sk;
3604 
3605 	if (iter->st_bucket_done)
3606 		*pos = set_bucket_offset(get_bucket(*pos) + 1, 1);
3607 
3608 again:
3609 	/* Get a new batch */
3610 	iter->cur_sk = 0;
3611 	iter->end_sk = 0;
3612 
3613 	sk = unix_get_first(seq, pos);
3614 	if (!sk)
3615 		return NULL; /* Done */
3616 
3617 	expected = bpf_iter_unix_hold_batch(seq, sk);
3618 
3619 	if (iter->end_sk == expected) {
3620 		iter->st_bucket_done = true;
3621 		return sk;
3622 	}
3623 
3624 	if (!resized && !bpf_iter_unix_realloc_batch(iter, expected * 3 / 2)) {
3625 		resized = true;
3626 		goto again;
3627 	}
3628 
3629 	return sk;
3630 }
3631 
3632 static void *bpf_iter_unix_seq_start(struct seq_file *seq, loff_t *pos)
3633 {
3634 	if (!*pos)
3635 		return SEQ_START_TOKEN;
3636 
3637 	/* bpf iter does not support lseek, so it always
3638 	 * continue from where it was stop()-ped.
3639 	 */
3640 	return bpf_iter_unix_batch(seq, pos);
3641 }
3642 
3643 static void *bpf_iter_unix_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3644 {
3645 	struct bpf_unix_iter_state *iter = seq->private;
3646 	struct sock *sk;
3647 
3648 	/* Whenever seq_next() is called, the iter->cur_sk is
3649 	 * done with seq_show(), so advance to the next sk in
3650 	 * the batch.
3651 	 */
3652 	if (iter->cur_sk < iter->end_sk)
3653 		sock_put(iter->batch[iter->cur_sk++]);
3654 
3655 	++*pos;
3656 
3657 	if (iter->cur_sk < iter->end_sk)
3658 		sk = iter->batch[iter->cur_sk];
3659 	else
3660 		sk = bpf_iter_unix_batch(seq, pos);
3661 
3662 	return sk;
3663 }
3664 
3665 static int bpf_iter_unix_seq_show(struct seq_file *seq, void *v)
3666 {
3667 	struct bpf_iter_meta meta;
3668 	struct bpf_prog *prog;
3669 	struct sock *sk = v;
3670 	uid_t uid;
3671 	bool slow;
3672 	int ret;
3673 
3674 	if (v == SEQ_START_TOKEN)
3675 		return 0;
3676 
3677 	slow = lock_sock_fast(sk);
3678 
3679 	if (unlikely(sk_unhashed(sk))) {
3680 		ret = SEQ_SKIP;
3681 		goto unlock;
3682 	}
3683 
3684 	uid = from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk));
3685 	meta.seq = seq;
3686 	prog = bpf_iter_get_info(&meta, false);
3687 	ret = unix_prog_seq_show(prog, &meta, v, uid);
3688 unlock:
3689 	unlock_sock_fast(sk, slow);
3690 	return ret;
3691 }
3692 
3693 static void bpf_iter_unix_seq_stop(struct seq_file *seq, void *v)
3694 {
3695 	struct bpf_unix_iter_state *iter = seq->private;
3696 	struct bpf_iter_meta meta;
3697 	struct bpf_prog *prog;
3698 
3699 	if (!v) {
3700 		meta.seq = seq;
3701 		prog = bpf_iter_get_info(&meta, true);
3702 		if (prog)
3703 			(void)unix_prog_seq_show(prog, &meta, v, 0);
3704 	}
3705 
3706 	if (iter->cur_sk < iter->end_sk)
3707 		bpf_iter_unix_put_batch(iter);
3708 }
3709 
3710 static const struct seq_operations bpf_iter_unix_seq_ops = {
3711 	.start	= bpf_iter_unix_seq_start,
3712 	.next	= bpf_iter_unix_seq_next,
3713 	.stop	= bpf_iter_unix_seq_stop,
3714 	.show	= bpf_iter_unix_seq_show,
3715 };
3716 #endif
3717 #endif
3718 
3719 static const struct net_proto_family unix_family_ops = {
3720 	.family = PF_UNIX,
3721 	.create = unix_create,
3722 	.owner	= THIS_MODULE,
3723 };
3724 
3725 
3726 static int __net_init unix_net_init(struct net *net)
3727 {
3728 	int i;
3729 
3730 	net->unx.sysctl_max_dgram_qlen = 10;
3731 	if (unix_sysctl_register(net))
3732 		goto out;
3733 
3734 #ifdef CONFIG_PROC_FS
3735 	if (!proc_create_net("unix", 0, net->proc_net, &unix_seq_ops,
3736 			     sizeof(struct seq_net_private)))
3737 		goto err_sysctl;
3738 #endif
3739 
3740 	net->unx.table.locks = kvmalloc_array(UNIX_HASH_SIZE,
3741 					      sizeof(spinlock_t), GFP_KERNEL);
3742 	if (!net->unx.table.locks)
3743 		goto err_proc;
3744 
3745 	net->unx.table.buckets = kvmalloc_array(UNIX_HASH_SIZE,
3746 						sizeof(struct hlist_head),
3747 						GFP_KERNEL);
3748 	if (!net->unx.table.buckets)
3749 		goto free_locks;
3750 
3751 	for (i = 0; i < UNIX_HASH_SIZE; i++) {
3752 		spin_lock_init(&net->unx.table.locks[i]);
3753 		lock_set_cmp_fn(&net->unx.table.locks[i], unix_table_lock_cmp_fn, NULL);
3754 		INIT_HLIST_HEAD(&net->unx.table.buckets[i]);
3755 	}
3756 
3757 	return 0;
3758 
3759 free_locks:
3760 	kvfree(net->unx.table.locks);
3761 err_proc:
3762 #ifdef CONFIG_PROC_FS
3763 	remove_proc_entry("unix", net->proc_net);
3764 err_sysctl:
3765 #endif
3766 	unix_sysctl_unregister(net);
3767 out:
3768 	return -ENOMEM;
3769 }
3770 
3771 static void __net_exit unix_net_exit(struct net *net)
3772 {
3773 	kvfree(net->unx.table.buckets);
3774 	kvfree(net->unx.table.locks);
3775 	unix_sysctl_unregister(net);
3776 	remove_proc_entry("unix", net->proc_net);
3777 }
3778 
3779 static struct pernet_operations unix_net_ops = {
3780 	.init = unix_net_init,
3781 	.exit = unix_net_exit,
3782 };
3783 
3784 #if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS)
3785 DEFINE_BPF_ITER_FUNC(unix, struct bpf_iter_meta *meta,
3786 		     struct unix_sock *unix_sk, uid_t uid)
3787 
3788 #define INIT_BATCH_SZ 16
3789 
3790 static int bpf_iter_init_unix(void *priv_data, struct bpf_iter_aux_info *aux)
3791 {
3792 	struct bpf_unix_iter_state *iter = priv_data;
3793 	int err;
3794 
3795 	err = bpf_iter_init_seq_net(priv_data, aux);
3796 	if (err)
3797 		return err;
3798 
3799 	err = bpf_iter_unix_realloc_batch(iter, INIT_BATCH_SZ);
3800 	if (err) {
3801 		bpf_iter_fini_seq_net(priv_data);
3802 		return err;
3803 	}
3804 
3805 	return 0;
3806 }
3807 
3808 static void bpf_iter_fini_unix(void *priv_data)
3809 {
3810 	struct bpf_unix_iter_state *iter = priv_data;
3811 
3812 	bpf_iter_fini_seq_net(priv_data);
3813 	kvfree(iter->batch);
3814 }
3815 
3816 static const struct bpf_iter_seq_info unix_seq_info = {
3817 	.seq_ops		= &bpf_iter_unix_seq_ops,
3818 	.init_seq_private	= bpf_iter_init_unix,
3819 	.fini_seq_private	= bpf_iter_fini_unix,
3820 	.seq_priv_size		= sizeof(struct bpf_unix_iter_state),
3821 };
3822 
3823 static const struct bpf_func_proto *
3824 bpf_iter_unix_get_func_proto(enum bpf_func_id func_id,
3825 			     const struct bpf_prog *prog)
3826 {
3827 	switch (func_id) {
3828 	case BPF_FUNC_setsockopt:
3829 		return &bpf_sk_setsockopt_proto;
3830 	case BPF_FUNC_getsockopt:
3831 		return &bpf_sk_getsockopt_proto;
3832 	default:
3833 		return NULL;
3834 	}
3835 }
3836 
3837 static struct bpf_iter_reg unix_reg_info = {
3838 	.target			= "unix",
3839 	.ctx_arg_info_size	= 1,
3840 	.ctx_arg_info		= {
3841 		{ offsetof(struct bpf_iter__unix, unix_sk),
3842 		  PTR_TO_BTF_ID_OR_NULL },
3843 	},
3844 	.get_func_proto         = bpf_iter_unix_get_func_proto,
3845 	.seq_info		= &unix_seq_info,
3846 };
3847 
3848 static void __init bpf_iter_register(void)
3849 {
3850 	unix_reg_info.ctx_arg_info[0].btf_id = btf_sock_ids[BTF_SOCK_TYPE_UNIX];
3851 	if (bpf_iter_reg_target(&unix_reg_info))
3852 		pr_warn("Warning: could not register bpf iterator unix\n");
3853 }
3854 #endif
3855 
3856 static int __init af_unix_init(void)
3857 {
3858 	int i, rc = -1;
3859 
3860 	BUILD_BUG_ON(sizeof(struct unix_skb_parms) > sizeof_field(struct sk_buff, cb));
3861 
3862 	for (i = 0; i < UNIX_HASH_SIZE / 2; i++) {
3863 		spin_lock_init(&bsd_socket_locks[i]);
3864 		INIT_HLIST_HEAD(&bsd_socket_buckets[i]);
3865 	}
3866 
3867 	rc = proto_register(&unix_dgram_proto, 1);
3868 	if (rc != 0) {
3869 		pr_crit("%s: Cannot create unix_sock SLAB cache!\n", __func__);
3870 		goto out;
3871 	}
3872 
3873 	rc = proto_register(&unix_stream_proto, 1);
3874 	if (rc != 0) {
3875 		pr_crit("%s: Cannot create unix_sock SLAB cache!\n", __func__);
3876 		proto_unregister(&unix_dgram_proto);
3877 		goto out;
3878 	}
3879 
3880 	sock_register(&unix_family_ops);
3881 	register_pernet_subsys(&unix_net_ops);
3882 	unix_bpf_build_proto();
3883 
3884 #if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS)
3885 	bpf_iter_register();
3886 #endif
3887 
3888 out:
3889 	return rc;
3890 }
3891 
3892 /* Later than subsys_initcall() because we depend on stuff initialised there */
3893 fs_initcall(af_unix_init);
3894