xref: /linux/net/unix/af_unix.c (revision ff7e082ea40d70b7613e8db2cb11e3555ebcc546)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * NET4:	Implementation of BSD Unix domain sockets.
4  *
5  * Authors:	Alan Cox, <alan@lxorguk.ukuu.org.uk>
6  *
7  * Fixes:
8  *		Linus Torvalds	:	Assorted bug cures.
9  *		Niibe Yutaka	:	async I/O support.
10  *		Carsten Paeth	:	PF_UNIX check, address fixes.
11  *		Alan Cox	:	Limit size of allocated blocks.
12  *		Alan Cox	:	Fixed the stupid socketpair bug.
13  *		Alan Cox	:	BSD compatibility fine tuning.
14  *		Alan Cox	:	Fixed a bug in connect when interrupted.
15  *		Alan Cox	:	Sorted out a proper draft version of
16  *					file descriptor passing hacked up from
17  *					Mike Shaver's work.
18  *		Marty Leisner	:	Fixes to fd passing
19  *		Nick Nevin	:	recvmsg bugfix.
20  *		Alan Cox	:	Started proper garbage collector
21  *		Heiko EiBfeldt	:	Missing verify_area check
22  *		Alan Cox	:	Started POSIXisms
23  *		Andreas Schwab	:	Replace inode by dentry for proper
24  *					reference counting
25  *		Kirk Petersen	:	Made this a module
26  *	    Christoph Rohland	:	Elegant non-blocking accept/connect algorithm.
27  *					Lots of bug fixes.
28  *	     Alexey Kuznetosv	:	Repaired (I hope) bugs introduces
29  *					by above two patches.
30  *	     Andrea Arcangeli	:	If possible we block in connect(2)
31  *					if the max backlog of the listen socket
32  *					is been reached. This won't break
33  *					old apps and it will avoid huge amount
34  *					of socks hashed (this for unix_gc()
35  *					performances reasons).
36  *					Security fix that limits the max
37  *					number of socks to 2*max_files and
38  *					the number of skb queueable in the
39  *					dgram receiver.
40  *		Artur Skawina   :	Hash function optimizations
41  *	     Alexey Kuznetsov   :	Full scale SMP. Lot of bugs are introduced 8)
42  *	      Malcolm Beattie   :	Set peercred for socketpair
43  *	     Michal Ostrowski   :       Module initialization cleanup.
44  *	     Arnaldo C. Melo	:	Remove MOD_{INC,DEC}_USE_COUNT,
45  *	     				the core infrastructure is doing that
46  *	     				for all net proto families now (2.5.69+)
47  *
48  * Known differences from reference BSD that was tested:
49  *
50  *	[TO FIX]
51  *	ECONNREFUSED is not returned from one end of a connected() socket to the
52  *		other the moment one end closes.
53  *	fstat() doesn't return st_dev=0, and give the blksize as high water mark
54  *		and a fake inode identifier (nor the BSD first socket fstat twice bug).
55  *	[NOT TO FIX]
56  *	accept() returns a path name even if the connecting socket has closed
57  *		in the meantime (BSD loses the path and gives up).
58  *	accept() returns 0 length path for an unbound connector. BSD returns 16
59  *		and a null first byte in the path (but not for gethost/peername - BSD bug ??)
60  *	socketpair(...SOCK_RAW..) doesn't panic the kernel.
61  *	BSD af_unix apparently has connect forgetting to block properly.
62  *		(need to check this with the POSIX spec in detail)
63  *
64  * Differences from 2.0.0-11-... (ANK)
65  *	Bug fixes and improvements.
66  *		- client shutdown killed server socket.
67  *		- removed all useless cli/sti pairs.
68  *
69  *	Semantic changes/extensions.
70  *		- generic control message passing.
71  *		- SCM_CREDENTIALS control message.
72  *		- "Abstract" (not FS based) socket bindings.
73  *		  Abstract names are sequences of bytes (not zero terminated)
74  *		  started by 0, so that this name space does not intersect
75  *		  with BSD names.
76  */
77 
78 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
79 
80 #include <linux/bpf-cgroup.h>
81 #include <linux/btf_ids.h>
82 #include <linux/dcache.h>
83 #include <linux/errno.h>
84 #include <linux/fcntl.h>
85 #include <linux/file.h>
86 #include <linux/filter.h>
87 #include <linux/fs.h>
88 #include <linux/fs_struct.h>
89 #include <linux/init.h>
90 #include <linux/kernel.h>
91 #include <linux/mount.h>
92 #include <linux/namei.h>
93 #include <linux/net.h>
94 #include <linux/pidfs.h>
95 #include <linux/poll.h>
96 #include <linux/proc_fs.h>
97 #include <linux/sched/signal.h>
98 #include <linux/security.h>
99 #include <linux/seq_file.h>
100 #include <linux/skbuff.h>
101 #include <linux/slab.h>
102 #include <linux/socket.h>
103 #include <linux/splice.h>
104 #include <linux/string.h>
105 #include <linux/uaccess.h>
106 #include <net/af_unix.h>
107 #include <net/net_namespace.h>
108 #include <net/scm.h>
109 #include <net/tcp_states.h>
110 #include <uapi/linux/sockios.h>
111 #include <uapi/linux/termios.h>
112 
113 #include "af_unix.h"
114 
115 static atomic_long_t unix_nr_socks;
116 static struct hlist_head bsd_socket_buckets[UNIX_HASH_SIZE / 2];
117 static spinlock_t bsd_socket_locks[UNIX_HASH_SIZE / 2];
118 
119 /* SMP locking strategy:
120  *    hash table is protected with spinlock.
121  *    each socket state is protected by separate spinlock.
122  */
123 #ifdef CONFIG_PROVE_LOCKING
124 #define cmp_ptr(l, r)	(((l) > (r)) - ((l) < (r)))
125 
126 static int unix_table_lock_cmp_fn(const struct lockdep_map *a,
127 				  const struct lockdep_map *b)
128 {
129 	return cmp_ptr(a, b);
130 }
131 
132 static int unix_state_lock_cmp_fn(const struct lockdep_map *_a,
133 				  const struct lockdep_map *_b)
134 {
135 	const struct unix_sock *a, *b;
136 
137 	a = container_of(_a, struct unix_sock, lock.dep_map);
138 	b = container_of(_b, struct unix_sock, lock.dep_map);
139 
140 	if (a->sk.sk_state == TCP_LISTEN) {
141 		/* unix_stream_connect(): Before the 2nd unix_state_lock(),
142 		 *
143 		 *   1. a is TCP_LISTEN.
144 		 *   2. b is not a.
145 		 *   3. concurrent connect(b -> a) must fail.
146 		 *
147 		 * Except for 2. & 3., the b's state can be any possible
148 		 * value due to concurrent connect() or listen().
149 		 *
150 		 * 2. is detected in debug_spin_lock_before(), and 3. cannot
151 		 * be expressed as lock_cmp_fn.
152 		 */
153 		switch (b->sk.sk_state) {
154 		case TCP_CLOSE:
155 		case TCP_ESTABLISHED:
156 		case TCP_LISTEN:
157 			return -1;
158 		default:
159 			/* Invalid case. */
160 			return 0;
161 		}
162 	}
163 
164 	/* Should never happen.  Just to be symmetric. */
165 	if (b->sk.sk_state == TCP_LISTEN) {
166 		switch (b->sk.sk_state) {
167 		case TCP_CLOSE:
168 		case TCP_ESTABLISHED:
169 			return 1;
170 		default:
171 			return 0;
172 		}
173 	}
174 
175 	/* unix_state_double_lock(): ascending address order. */
176 	return cmp_ptr(a, b);
177 }
178 
179 static int unix_recvq_lock_cmp_fn(const struct lockdep_map *_a,
180 				  const struct lockdep_map *_b)
181 {
182 	const struct sock *a, *b;
183 
184 	a = container_of(_a, struct sock, sk_receive_queue.lock.dep_map);
185 	b = container_of(_b, struct sock, sk_receive_queue.lock.dep_map);
186 
187 	/* unix_collect_skb(): listener -> embryo order. */
188 	if (a->sk_state == TCP_LISTEN && unix_sk(b)->listener == a)
189 		return -1;
190 
191 	/* Should never happen.  Just to be symmetric. */
192 	if (b->sk_state == TCP_LISTEN && unix_sk(a)->listener == b)
193 		return 1;
194 
195 	return 0;
196 }
197 #endif
198 
199 static unsigned int unix_unbound_hash(struct sock *sk)
200 {
201 	unsigned long hash = (unsigned long)sk;
202 
203 	hash ^= hash >> 16;
204 	hash ^= hash >> 8;
205 	hash ^= sk->sk_type;
206 
207 	return hash & UNIX_HASH_MOD;
208 }
209 
210 static unsigned int unix_bsd_hash(struct inode *i)
211 {
212 	return i->i_ino & UNIX_HASH_MOD;
213 }
214 
215 static unsigned int unix_abstract_hash(struct sockaddr_un *sunaddr,
216 				       int addr_len, int type)
217 {
218 	__wsum csum = csum_partial(sunaddr, addr_len, 0);
219 	unsigned int hash;
220 
221 	hash = (__force unsigned int)csum_fold(csum);
222 	hash ^= hash >> 8;
223 	hash ^= type;
224 
225 	return UNIX_HASH_MOD + 1 + (hash & UNIX_HASH_MOD);
226 }
227 
228 static void unix_table_double_lock(struct net *net,
229 				   unsigned int hash1, unsigned int hash2)
230 {
231 	if (hash1 == hash2) {
232 		spin_lock(&net->unx.table.locks[hash1]);
233 		return;
234 	}
235 
236 	if (hash1 > hash2)
237 		swap(hash1, hash2);
238 
239 	spin_lock(&net->unx.table.locks[hash1]);
240 	spin_lock(&net->unx.table.locks[hash2]);
241 }
242 
243 static void unix_table_double_unlock(struct net *net,
244 				     unsigned int hash1, unsigned int hash2)
245 {
246 	if (hash1 == hash2) {
247 		spin_unlock(&net->unx.table.locks[hash1]);
248 		return;
249 	}
250 
251 	spin_unlock(&net->unx.table.locks[hash1]);
252 	spin_unlock(&net->unx.table.locks[hash2]);
253 }
254 
255 #ifdef CONFIG_SECURITY_NETWORK
256 static void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb)
257 {
258 	UNIXCB(skb).secid = scm->secid;
259 }
260 
261 static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
262 {
263 	scm->secid = UNIXCB(skb).secid;
264 }
265 
266 static inline bool unix_secdata_eq(struct scm_cookie *scm, struct sk_buff *skb)
267 {
268 	return (scm->secid == UNIXCB(skb).secid);
269 }
270 #else
271 static inline void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb)
272 { }
273 
274 static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
275 { }
276 
277 static inline bool unix_secdata_eq(struct scm_cookie *scm, struct sk_buff *skb)
278 {
279 	return true;
280 }
281 #endif /* CONFIG_SECURITY_NETWORK */
282 
283 static inline int unix_may_send(struct sock *sk, struct sock *osk)
284 {
285 	return !unix_peer(osk) || unix_peer(osk) == sk;
286 }
287 
288 static inline int unix_recvq_full_lockless(const struct sock *sk)
289 {
290 	return skb_queue_len_lockless(&sk->sk_receive_queue) > sk->sk_max_ack_backlog;
291 }
292 
293 struct sock *unix_peer_get(struct sock *s)
294 {
295 	struct sock *peer;
296 
297 	unix_state_lock(s);
298 	peer = unix_peer(s);
299 	if (peer)
300 		sock_hold(peer);
301 	unix_state_unlock(s);
302 	return peer;
303 }
304 EXPORT_SYMBOL_GPL(unix_peer_get);
305 
306 static struct unix_address *unix_create_addr(struct sockaddr_un *sunaddr,
307 					     int addr_len)
308 {
309 	struct unix_address *addr;
310 
311 	addr = kmalloc(sizeof(*addr) + addr_len, GFP_KERNEL);
312 	if (!addr)
313 		return NULL;
314 
315 	refcount_set(&addr->refcnt, 1);
316 	addr->len = addr_len;
317 	memcpy(addr->name, sunaddr, addr_len);
318 
319 	return addr;
320 }
321 
322 static inline void unix_release_addr(struct unix_address *addr)
323 {
324 	if (refcount_dec_and_test(&addr->refcnt))
325 		kfree(addr);
326 }
327 
328 /*
329  *	Check unix socket name:
330  *		- should be not zero length.
331  *	        - if started by not zero, should be NULL terminated (FS object)
332  *		- if started by zero, it is abstract name.
333  */
334 
335 static int unix_validate_addr(struct sockaddr_un *sunaddr, int addr_len)
336 {
337 	if (addr_len <= offsetof(struct sockaddr_un, sun_path) ||
338 	    addr_len > sizeof(*sunaddr))
339 		return -EINVAL;
340 
341 	if (sunaddr->sun_family != AF_UNIX)
342 		return -EINVAL;
343 
344 	return 0;
345 }
346 
347 static int unix_mkname_bsd(struct sockaddr_un *sunaddr, int addr_len)
348 {
349 	struct sockaddr_storage *addr = (struct sockaddr_storage *)sunaddr;
350 	short offset = offsetof(struct sockaddr_storage, __data);
351 
352 	BUILD_BUG_ON(offset != offsetof(struct sockaddr_un, sun_path));
353 
354 	/* This may look like an off by one error but it is a bit more
355 	 * subtle.  108 is the longest valid AF_UNIX path for a binding.
356 	 * sun_path[108] doesn't as such exist.  However in kernel space
357 	 * we are guaranteed that it is a valid memory location in our
358 	 * kernel address buffer because syscall functions always pass
359 	 * a pointer of struct sockaddr_storage which has a bigger buffer
360 	 * than 108.  Also, we must terminate sun_path for strlen() in
361 	 * getname_kernel().
362 	 */
363 	addr->__data[addr_len - offset] = 0;
364 
365 	/* Don't pass sunaddr->sun_path to strlen().  Otherwise, 108 will
366 	 * cause panic if CONFIG_FORTIFY_SOURCE=y.  Let __fortify_strlen()
367 	 * know the actual buffer.
368 	 */
369 	return strlen(addr->__data) + offset + 1;
370 }
371 
372 static void __unix_remove_socket(struct sock *sk)
373 {
374 	sk_del_node_init(sk);
375 }
376 
377 static void __unix_insert_socket(struct net *net, struct sock *sk)
378 {
379 	DEBUG_NET_WARN_ON_ONCE(!sk_unhashed(sk));
380 	sk_add_node(sk, &net->unx.table.buckets[sk->sk_hash]);
381 }
382 
383 static void __unix_set_addr_hash(struct net *net, struct sock *sk,
384 				 struct unix_address *addr, unsigned int hash)
385 {
386 	__unix_remove_socket(sk);
387 	smp_store_release(&unix_sk(sk)->addr, addr);
388 
389 	sk->sk_hash = hash;
390 	__unix_insert_socket(net, sk);
391 }
392 
393 static void unix_remove_socket(struct net *net, struct sock *sk)
394 {
395 	spin_lock(&net->unx.table.locks[sk->sk_hash]);
396 	__unix_remove_socket(sk);
397 	spin_unlock(&net->unx.table.locks[sk->sk_hash]);
398 }
399 
400 static void unix_insert_unbound_socket(struct net *net, struct sock *sk)
401 {
402 	spin_lock(&net->unx.table.locks[sk->sk_hash]);
403 	__unix_insert_socket(net, sk);
404 	spin_unlock(&net->unx.table.locks[sk->sk_hash]);
405 }
406 
407 static void unix_insert_bsd_socket(struct sock *sk)
408 {
409 	spin_lock(&bsd_socket_locks[sk->sk_hash]);
410 	sk_add_bind_node(sk, &bsd_socket_buckets[sk->sk_hash]);
411 	spin_unlock(&bsd_socket_locks[sk->sk_hash]);
412 }
413 
414 static void unix_remove_bsd_socket(struct sock *sk)
415 {
416 	if (!hlist_unhashed(&sk->sk_bind_node)) {
417 		spin_lock(&bsd_socket_locks[sk->sk_hash]);
418 		__sk_del_bind_node(sk);
419 		spin_unlock(&bsd_socket_locks[sk->sk_hash]);
420 
421 		sk_node_init(&sk->sk_bind_node);
422 	}
423 }
424 
425 static struct sock *__unix_find_socket_byname(struct net *net,
426 					      struct sockaddr_un *sunname,
427 					      int len, unsigned int hash)
428 {
429 	struct sock *s;
430 
431 	sk_for_each(s, &net->unx.table.buckets[hash]) {
432 		struct unix_sock *u = unix_sk(s);
433 
434 		if (u->addr->len == len &&
435 		    !memcmp(u->addr->name, sunname, len))
436 			return s;
437 	}
438 	return NULL;
439 }
440 
441 static inline struct sock *unix_find_socket_byname(struct net *net,
442 						   struct sockaddr_un *sunname,
443 						   int len, unsigned int hash)
444 {
445 	struct sock *s;
446 
447 	spin_lock(&net->unx.table.locks[hash]);
448 	s = __unix_find_socket_byname(net, sunname, len, hash);
449 	if (s)
450 		sock_hold(s);
451 	spin_unlock(&net->unx.table.locks[hash]);
452 	return s;
453 }
454 
455 static struct sock *unix_find_socket_byinode(struct inode *i)
456 {
457 	unsigned int hash = unix_bsd_hash(i);
458 	struct sock *s;
459 
460 	spin_lock(&bsd_socket_locks[hash]);
461 	sk_for_each_bound(s, &bsd_socket_buckets[hash]) {
462 		struct dentry *dentry = unix_sk(s)->path.dentry;
463 
464 		if (dentry && d_backing_inode(dentry) == i) {
465 			sock_hold(s);
466 			spin_unlock(&bsd_socket_locks[hash]);
467 			return s;
468 		}
469 	}
470 	spin_unlock(&bsd_socket_locks[hash]);
471 	return NULL;
472 }
473 
474 /* Support code for asymmetrically connected dgram sockets
475  *
476  * If a datagram socket is connected to a socket not itself connected
477  * to the first socket (eg, /dev/log), clients may only enqueue more
478  * messages if the present receive queue of the server socket is not
479  * "too large". This means there's a second writeability condition
480  * poll and sendmsg need to test. The dgram recv code will do a wake
481  * up on the peer_wait wait queue of a socket upon reception of a
482  * datagram which needs to be propagated to sleeping would-be writers
483  * since these might not have sent anything so far. This can't be
484  * accomplished via poll_wait because the lifetime of the server
485  * socket might be less than that of its clients if these break their
486  * association with it or if the server socket is closed while clients
487  * are still connected to it and there's no way to inform "a polling
488  * implementation" that it should let go of a certain wait queue
489  *
490  * In order to propagate a wake up, a wait_queue_entry_t of the client
491  * socket is enqueued on the peer_wait queue of the server socket
492  * whose wake function does a wake_up on the ordinary client socket
493  * wait queue. This connection is established whenever a write (or
494  * poll for write) hit the flow control condition and broken when the
495  * association to the server socket is dissolved or after a wake up
496  * was relayed.
497  */
498 
499 static int unix_dgram_peer_wake_relay(wait_queue_entry_t *q, unsigned mode, int flags,
500 				      void *key)
501 {
502 	struct unix_sock *u;
503 	wait_queue_head_t *u_sleep;
504 
505 	u = container_of(q, struct unix_sock, peer_wake);
506 
507 	__remove_wait_queue(&unix_sk(u->peer_wake.private)->peer_wait,
508 			    q);
509 	u->peer_wake.private = NULL;
510 
511 	/* relaying can only happen while the wq still exists */
512 	u_sleep = sk_sleep(&u->sk);
513 	if (u_sleep)
514 		wake_up_interruptible_poll(u_sleep, key_to_poll(key));
515 
516 	return 0;
517 }
518 
519 static int unix_dgram_peer_wake_connect(struct sock *sk, struct sock *other)
520 {
521 	struct unix_sock *u, *u_other;
522 	int rc;
523 
524 	u = unix_sk(sk);
525 	u_other = unix_sk(other);
526 	rc = 0;
527 	spin_lock(&u_other->peer_wait.lock);
528 
529 	if (!u->peer_wake.private) {
530 		u->peer_wake.private = other;
531 		__add_wait_queue(&u_other->peer_wait, &u->peer_wake);
532 
533 		rc = 1;
534 	}
535 
536 	spin_unlock(&u_other->peer_wait.lock);
537 	return rc;
538 }
539 
540 static void unix_dgram_peer_wake_disconnect(struct sock *sk,
541 					    struct sock *other)
542 {
543 	struct unix_sock *u, *u_other;
544 
545 	u = unix_sk(sk);
546 	u_other = unix_sk(other);
547 	spin_lock(&u_other->peer_wait.lock);
548 
549 	if (u->peer_wake.private == other) {
550 		__remove_wait_queue(&u_other->peer_wait, &u->peer_wake);
551 		u->peer_wake.private = NULL;
552 	}
553 
554 	spin_unlock(&u_other->peer_wait.lock);
555 }
556 
557 static void unix_dgram_peer_wake_disconnect_wakeup(struct sock *sk,
558 						   struct sock *other)
559 {
560 	unix_dgram_peer_wake_disconnect(sk, other);
561 	wake_up_interruptible_poll(sk_sleep(sk),
562 				   EPOLLOUT |
563 				   EPOLLWRNORM |
564 				   EPOLLWRBAND);
565 }
566 
567 /* preconditions:
568  *	- unix_peer(sk) == other
569  *	- association is stable
570  */
571 static int unix_dgram_peer_wake_me(struct sock *sk, struct sock *other)
572 {
573 	int connected;
574 
575 	connected = unix_dgram_peer_wake_connect(sk, other);
576 
577 	/* If other is SOCK_DEAD, we want to make sure we signal
578 	 * POLLOUT, such that a subsequent write() can get a
579 	 * -ECONNREFUSED. Otherwise, if we haven't queued any skbs
580 	 * to other and its full, we will hang waiting for POLLOUT.
581 	 */
582 	if (unix_recvq_full_lockless(other) && !sock_flag(other, SOCK_DEAD))
583 		return 1;
584 
585 	if (connected)
586 		unix_dgram_peer_wake_disconnect(sk, other);
587 
588 	return 0;
589 }
590 
591 static int unix_writable(const struct sock *sk, unsigned char state)
592 {
593 	return state != TCP_LISTEN &&
594 		(refcount_read(&sk->sk_wmem_alloc) << 2) <= READ_ONCE(sk->sk_sndbuf);
595 }
596 
597 static void unix_write_space(struct sock *sk)
598 {
599 	struct socket_wq *wq;
600 
601 	rcu_read_lock();
602 	if (unix_writable(sk, READ_ONCE(sk->sk_state))) {
603 		wq = rcu_dereference(sk->sk_wq);
604 		if (skwq_has_sleeper(wq))
605 			wake_up_interruptible_sync_poll(&wq->wait,
606 				EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND);
607 		sk_wake_async_rcu(sk, SOCK_WAKE_SPACE, POLL_OUT);
608 	}
609 	rcu_read_unlock();
610 }
611 
612 /* When dgram socket disconnects (or changes its peer), we clear its receive
613  * queue of packets arrived from previous peer. First, it allows to do
614  * flow control based only on wmem_alloc; second, sk connected to peer
615  * may receive messages only from that peer. */
616 static void unix_dgram_disconnected(struct sock *sk, struct sock *other)
617 {
618 	if (!skb_queue_empty(&sk->sk_receive_queue)) {
619 		skb_queue_purge_reason(&sk->sk_receive_queue,
620 				       SKB_DROP_REASON_UNIX_DISCONNECT);
621 
622 		wake_up_interruptible_all(&unix_sk(sk)->peer_wait);
623 
624 		/* If one link of bidirectional dgram pipe is disconnected,
625 		 * we signal error. Messages are lost. Do not make this,
626 		 * when peer was not connected to us.
627 		 */
628 		if (!sock_flag(other, SOCK_DEAD) && unix_peer(other) == sk) {
629 			WRITE_ONCE(other->sk_err, ECONNRESET);
630 			sk_error_report(other);
631 		}
632 	}
633 }
634 
635 static void unix_sock_destructor(struct sock *sk)
636 {
637 	struct unix_sock *u = unix_sk(sk);
638 
639 	skb_queue_purge_reason(&sk->sk_receive_queue, SKB_DROP_REASON_SOCKET_CLOSE);
640 
641 	DEBUG_NET_WARN_ON_ONCE(refcount_read(&sk->sk_wmem_alloc));
642 	DEBUG_NET_WARN_ON_ONCE(!sk_unhashed(sk));
643 	DEBUG_NET_WARN_ON_ONCE(sk->sk_socket);
644 	if (!sock_flag(sk, SOCK_DEAD)) {
645 		pr_info("Attempt to release alive unix socket: %p\n", sk);
646 		return;
647 	}
648 
649 	if (u->addr)
650 		unix_release_addr(u->addr);
651 
652 	atomic_long_dec(&unix_nr_socks);
653 	sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
654 #ifdef UNIX_REFCNT_DEBUG
655 	pr_debug("UNIX %p is destroyed, %ld are still alive.\n", sk,
656 		atomic_long_read(&unix_nr_socks));
657 #endif
658 }
659 
660 static unsigned int unix_skb_len(const struct sk_buff *skb)
661 {
662 	return skb->len - UNIXCB(skb).consumed;
663 }
664 
665 static void unix_release_sock(struct sock *sk, int embrion)
666 {
667 	struct unix_sock *u = unix_sk(sk);
668 	struct sock *skpair;
669 	struct sk_buff *skb;
670 	struct path path;
671 	int state;
672 
673 	unix_remove_socket(sock_net(sk), sk);
674 	unix_remove_bsd_socket(sk);
675 
676 	/* Clear state */
677 	unix_state_lock(sk);
678 	sock_orphan(sk);
679 	WRITE_ONCE(sk->sk_shutdown, SHUTDOWN_MASK);
680 	path	     = u->path;
681 	u->path.dentry = NULL;
682 	u->path.mnt = NULL;
683 	state = sk->sk_state;
684 	WRITE_ONCE(sk->sk_state, TCP_CLOSE);
685 
686 	skpair = unix_peer(sk);
687 	unix_peer(sk) = NULL;
688 
689 	unix_state_unlock(sk);
690 
691 #if IS_ENABLED(CONFIG_AF_UNIX_OOB)
692 	u->oob_skb = NULL;
693 #endif
694 
695 	wake_up_interruptible_all(&u->peer_wait);
696 
697 	if (skpair != NULL) {
698 		if (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) {
699 			struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
700 
701 #if IS_ENABLED(CONFIG_AF_UNIX_OOB)
702 			if (skb && !unix_skb_len(skb))
703 				skb = skb_peek_next(skb, &sk->sk_receive_queue);
704 #endif
705 			unix_state_lock(skpair);
706 			/* No more writes */
707 			WRITE_ONCE(skpair->sk_shutdown, SHUTDOWN_MASK);
708 			if (skb || embrion)
709 				WRITE_ONCE(skpair->sk_err, ECONNRESET);
710 			unix_state_unlock(skpair);
711 			skpair->sk_state_change(skpair);
712 			sk_wake_async(skpair, SOCK_WAKE_WAITD, POLL_HUP);
713 		}
714 
715 		unix_dgram_peer_wake_disconnect(sk, skpair);
716 		sock_put(skpair); /* It may now die */
717 	}
718 
719 	/* Try to flush out this socket. Throw out buffers at least */
720 
721 	while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
722 		if (state == TCP_LISTEN)
723 			unix_release_sock(skb->sk, 1);
724 
725 		/* passed fds are erased in the kfree_skb hook */
726 		kfree_skb_reason(skb, SKB_DROP_REASON_SOCKET_CLOSE);
727 	}
728 
729 	if (path.dentry)
730 		path_put(&path);
731 
732 	sock_put(sk);
733 
734 	/* ---- Socket is dead now and most probably destroyed ---- */
735 
736 	unix_schedule_gc(NULL);
737 }
738 
739 struct unix_peercred {
740 	struct pid *peer_pid;
741 	const struct cred *peer_cred;
742 };
743 
744 static inline int prepare_peercred(struct unix_peercred *peercred)
745 {
746 	struct pid *pid;
747 	int err;
748 
749 	pid = task_tgid(current);
750 	err = pidfs_register_pid(pid);
751 	if (likely(!err)) {
752 		peercred->peer_pid = get_pid(pid);
753 		peercred->peer_cred = get_current_cred();
754 	}
755 	return err;
756 }
757 
758 static void drop_peercred(struct unix_peercred *peercred)
759 {
760 	const struct cred *cred = NULL;
761 	struct pid *pid = NULL;
762 
763 	might_sleep();
764 
765 	swap(peercred->peer_pid, pid);
766 	swap(peercred->peer_cred, cred);
767 
768 	put_pid(pid);
769 	put_cred(cred);
770 }
771 
772 static inline void init_peercred(struct sock *sk,
773 				 const struct unix_peercred *peercred)
774 {
775 	sk->sk_peer_pid = peercred->peer_pid;
776 	sk->sk_peer_cred = peercred->peer_cred;
777 }
778 
779 static void update_peercred(struct sock *sk, struct unix_peercred *peercred)
780 {
781 	const struct cred *old_cred;
782 	struct pid *old_pid;
783 
784 	spin_lock(&sk->sk_peer_lock);
785 	old_pid = sk->sk_peer_pid;
786 	old_cred = sk->sk_peer_cred;
787 	init_peercred(sk, peercred);
788 	spin_unlock(&sk->sk_peer_lock);
789 
790 	peercred->peer_pid = old_pid;
791 	peercred->peer_cred = old_cred;
792 }
793 
794 static void copy_peercred(struct sock *sk, struct sock *peersk)
795 {
796 	lockdep_assert_held(&unix_sk(peersk)->lock);
797 
798 	spin_lock(&sk->sk_peer_lock);
799 	sk->sk_peer_pid = get_pid(peersk->sk_peer_pid);
800 	sk->sk_peer_cred = get_cred(peersk->sk_peer_cred);
801 	spin_unlock(&sk->sk_peer_lock);
802 }
803 
804 static bool unix_may_passcred(const struct sock *sk)
805 {
806 	return sk->sk_scm_credentials || sk->sk_scm_pidfd;
807 }
808 
809 static int unix_listen(struct socket *sock, int backlog)
810 {
811 	int err;
812 	struct sock *sk = sock->sk;
813 	struct unix_sock *u = unix_sk(sk);
814 	struct unix_peercred peercred = {};
815 
816 	err = -EOPNOTSUPP;
817 	if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
818 		goto out;	/* Only stream/seqpacket sockets accept */
819 	err = -EINVAL;
820 	if (!READ_ONCE(u->addr))
821 		goto out;	/* No listens on an unbound socket */
822 	err = prepare_peercred(&peercred);
823 	if (err)
824 		goto out;
825 	unix_state_lock(sk);
826 	if (sk->sk_state != TCP_CLOSE && sk->sk_state != TCP_LISTEN)
827 		goto out_unlock;
828 	if (backlog > sk->sk_max_ack_backlog)
829 		wake_up_interruptible_all(&u->peer_wait);
830 	sk->sk_max_ack_backlog	= backlog;
831 	WRITE_ONCE(sk->sk_state, TCP_LISTEN);
832 
833 	/* set credentials so connect can copy them */
834 	update_peercred(sk, &peercred);
835 	err = 0;
836 
837 out_unlock:
838 	unix_state_unlock(sk);
839 	drop_peercred(&peercred);
840 out:
841 	return err;
842 }
843 
844 static int unix_release(struct socket *);
845 static int unix_bind(struct socket *, struct sockaddr_unsized *, int);
846 static int unix_stream_connect(struct socket *, struct sockaddr_unsized *,
847 			       int addr_len, int flags);
848 static int unix_socketpair(struct socket *, struct socket *);
849 static int unix_accept(struct socket *, struct socket *, struct proto_accept_arg *arg);
850 static int unix_getname(struct socket *, struct sockaddr *, int);
851 static __poll_t unix_poll(struct file *, struct socket *, poll_table *);
852 static __poll_t unix_dgram_poll(struct file *, struct socket *,
853 				    poll_table *);
854 static int unix_ioctl(struct socket *, unsigned int, unsigned long);
855 #ifdef CONFIG_COMPAT
856 static int unix_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
857 #endif
858 static int unix_shutdown(struct socket *, int);
859 static int unix_stream_sendmsg(struct socket *, struct msghdr *, size_t);
860 static int unix_stream_recvmsg(struct socket *, struct msghdr *, size_t, int);
861 static ssize_t unix_stream_splice_read(struct socket *,  loff_t *ppos,
862 				       struct pipe_inode_info *, size_t size,
863 				       unsigned int flags);
864 static int unix_dgram_sendmsg(struct socket *, struct msghdr *, size_t);
865 static int unix_dgram_recvmsg(struct socket *, struct msghdr *, size_t, int);
866 static int unix_read_skb(struct sock *sk, skb_read_actor_t recv_actor);
867 static int unix_stream_read_skb(struct sock *sk, skb_read_actor_t recv_actor);
868 static int unix_dgram_connect(struct socket *, struct sockaddr_unsized *,
869 			      int, int);
870 static int unix_seqpacket_sendmsg(struct socket *, struct msghdr *, size_t);
871 static int unix_seqpacket_recvmsg(struct socket *, struct msghdr *, size_t,
872 				  int);
873 
874 #ifdef CONFIG_PROC_FS
875 static int unix_count_nr_fds(struct sock *sk)
876 {
877 	struct sk_buff *skb;
878 	struct unix_sock *u;
879 	int nr_fds = 0;
880 
881 	spin_lock(&sk->sk_receive_queue.lock);
882 	skb = skb_peek(&sk->sk_receive_queue);
883 	while (skb) {
884 		u = unix_sk(skb->sk);
885 		nr_fds += atomic_read(&u->scm_stat.nr_fds);
886 		skb = skb_peek_next(skb, &sk->sk_receive_queue);
887 	}
888 	spin_unlock(&sk->sk_receive_queue.lock);
889 
890 	return nr_fds;
891 }
892 
893 static void unix_show_fdinfo(struct seq_file *m, struct socket *sock)
894 {
895 	struct sock *sk = sock->sk;
896 	unsigned char s_state;
897 	struct unix_sock *u;
898 	int nr_fds = 0;
899 
900 	if (sk) {
901 		s_state = READ_ONCE(sk->sk_state);
902 		u = unix_sk(sk);
903 
904 		/* SOCK_STREAM and SOCK_SEQPACKET sockets never change their
905 		 * sk_state after switching to TCP_ESTABLISHED or TCP_LISTEN.
906 		 * SOCK_DGRAM is ordinary. So, no lock is needed.
907 		 */
908 		if (sock->type == SOCK_DGRAM || s_state == TCP_ESTABLISHED)
909 			nr_fds = atomic_read(&u->scm_stat.nr_fds);
910 		else if (s_state == TCP_LISTEN)
911 			nr_fds = unix_count_nr_fds(sk);
912 
913 		seq_printf(m, "scm_fds: %u\n", nr_fds);
914 	}
915 }
916 #else
917 #define unix_show_fdinfo NULL
918 #endif
919 
920 static bool unix_custom_sockopt(int optname)
921 {
922 	switch (optname) {
923 	case SO_INQ:
924 		return true;
925 	default:
926 		return false;
927 	}
928 }
929 
930 static int unix_setsockopt(struct socket *sock, int level, int optname,
931 			   sockptr_t optval, unsigned int optlen)
932 {
933 	struct unix_sock *u = unix_sk(sock->sk);
934 	struct sock *sk = sock->sk;
935 	int val;
936 
937 	if (level != SOL_SOCKET)
938 		return -EOPNOTSUPP;
939 
940 	if (!unix_custom_sockopt(optname))
941 		return sock_setsockopt(sock, level, optname, optval, optlen);
942 
943 	if (optlen != sizeof(int))
944 		return -EINVAL;
945 
946 	if (copy_from_sockptr(&val, optval, sizeof(val)))
947 		return -EFAULT;
948 
949 	switch (optname) {
950 	case SO_INQ:
951 		if (sk->sk_type != SOCK_STREAM)
952 			return -EINVAL;
953 
954 		if (val > 1 || val < 0)
955 			return -EINVAL;
956 
957 		WRITE_ONCE(u->recvmsg_inq, val);
958 		break;
959 	default:
960 		return -ENOPROTOOPT;
961 	}
962 
963 	return 0;
964 }
965 
966 static const struct proto_ops unix_stream_ops = {
967 	.family =	PF_UNIX,
968 	.owner =	THIS_MODULE,
969 	.release =	unix_release,
970 	.bind =		unix_bind,
971 	.connect =	unix_stream_connect,
972 	.socketpair =	unix_socketpair,
973 	.accept =	unix_accept,
974 	.getname =	unix_getname,
975 	.poll =		unix_poll,
976 	.ioctl =	unix_ioctl,
977 #ifdef CONFIG_COMPAT
978 	.compat_ioctl =	unix_compat_ioctl,
979 #endif
980 	.listen =	unix_listen,
981 	.shutdown =	unix_shutdown,
982 	.setsockopt =	unix_setsockopt,
983 	.sendmsg =	unix_stream_sendmsg,
984 	.recvmsg =	unix_stream_recvmsg,
985 	.read_skb =	unix_stream_read_skb,
986 	.mmap =		sock_no_mmap,
987 	.splice_read =	unix_stream_splice_read,
988 	.set_peek_off =	sk_set_peek_off,
989 	.show_fdinfo =	unix_show_fdinfo,
990 };
991 
992 static const struct proto_ops unix_dgram_ops = {
993 	.family =	PF_UNIX,
994 	.owner =	THIS_MODULE,
995 	.release =	unix_release,
996 	.bind =		unix_bind,
997 	.connect =	unix_dgram_connect,
998 	.socketpair =	unix_socketpair,
999 	.accept =	sock_no_accept,
1000 	.getname =	unix_getname,
1001 	.poll =		unix_dgram_poll,
1002 	.ioctl =	unix_ioctl,
1003 #ifdef CONFIG_COMPAT
1004 	.compat_ioctl =	unix_compat_ioctl,
1005 #endif
1006 	.listen =	sock_no_listen,
1007 	.shutdown =	unix_shutdown,
1008 	.sendmsg =	unix_dgram_sendmsg,
1009 	.read_skb =	unix_read_skb,
1010 	.recvmsg =	unix_dgram_recvmsg,
1011 	.mmap =		sock_no_mmap,
1012 	.set_peek_off =	sk_set_peek_off,
1013 	.show_fdinfo =	unix_show_fdinfo,
1014 };
1015 
1016 static const struct proto_ops unix_seqpacket_ops = {
1017 	.family =	PF_UNIX,
1018 	.owner =	THIS_MODULE,
1019 	.release =	unix_release,
1020 	.bind =		unix_bind,
1021 	.connect =	unix_stream_connect,
1022 	.socketpair =	unix_socketpair,
1023 	.accept =	unix_accept,
1024 	.getname =	unix_getname,
1025 	.poll =		unix_dgram_poll,
1026 	.ioctl =	unix_ioctl,
1027 #ifdef CONFIG_COMPAT
1028 	.compat_ioctl =	unix_compat_ioctl,
1029 #endif
1030 	.listen =	unix_listen,
1031 	.shutdown =	unix_shutdown,
1032 	.sendmsg =	unix_seqpacket_sendmsg,
1033 	.recvmsg =	unix_seqpacket_recvmsg,
1034 	.mmap =		sock_no_mmap,
1035 	.set_peek_off =	sk_set_peek_off,
1036 	.show_fdinfo =	unix_show_fdinfo,
1037 };
1038 
1039 static void unix_close(struct sock *sk, long timeout)
1040 {
1041 	/* Nothing to do here, unix socket does not need a ->close().
1042 	 * This is merely for sockmap.
1043 	 */
1044 }
1045 
1046 static bool unix_bpf_bypass_getsockopt(int level, int optname)
1047 {
1048 	if (level == SOL_SOCKET) {
1049 		switch (optname) {
1050 		case SO_PEERPIDFD:
1051 			return true;
1052 		default:
1053 			return false;
1054 		}
1055 	}
1056 
1057 	return false;
1058 }
1059 
1060 struct proto unix_dgram_proto = {
1061 	.name			= "UNIX",
1062 	.owner			= THIS_MODULE,
1063 	.obj_size		= sizeof(struct unix_sock),
1064 	.close			= unix_close,
1065 	.bpf_bypass_getsockopt	= unix_bpf_bypass_getsockopt,
1066 #ifdef CONFIG_BPF_SYSCALL
1067 	.psock_update_sk_prot	= unix_dgram_bpf_update_proto,
1068 #endif
1069 };
1070 
1071 struct proto unix_stream_proto = {
1072 	.name			= "UNIX-STREAM",
1073 	.owner			= THIS_MODULE,
1074 	.obj_size		= sizeof(struct unix_sock),
1075 	.close			= unix_close,
1076 	.bpf_bypass_getsockopt	= unix_bpf_bypass_getsockopt,
1077 #ifdef CONFIG_BPF_SYSCALL
1078 	.psock_update_sk_prot	= unix_stream_bpf_update_proto,
1079 #endif
1080 };
1081 
1082 static struct sock *unix_create1(struct net *net, struct socket *sock, int kern, int type)
1083 {
1084 	struct unix_sock *u;
1085 	struct sock *sk;
1086 	int err;
1087 
1088 	atomic_long_inc(&unix_nr_socks);
1089 	if (atomic_long_read(&unix_nr_socks) > 2 * get_max_files()) {
1090 		err = -ENFILE;
1091 		goto err;
1092 	}
1093 
1094 	if (type == SOCK_STREAM)
1095 		sk = sk_alloc(net, PF_UNIX, GFP_KERNEL, &unix_stream_proto, kern);
1096 	else /*dgram and  seqpacket */
1097 		sk = sk_alloc(net, PF_UNIX, GFP_KERNEL, &unix_dgram_proto, kern);
1098 
1099 	if (!sk) {
1100 		err = -ENOMEM;
1101 		goto err;
1102 	}
1103 
1104 	sock_init_data(sock, sk);
1105 
1106 	sk->sk_scm_rights	= 1;
1107 	sk->sk_hash		= unix_unbound_hash(sk);
1108 	sk->sk_allocation	= GFP_KERNEL_ACCOUNT;
1109 	sk->sk_write_space	= unix_write_space;
1110 	sk->sk_max_ack_backlog	= READ_ONCE(net->unx.sysctl_max_dgram_qlen);
1111 	sk->sk_destruct		= unix_sock_destructor;
1112 	lock_set_cmp_fn(&sk->sk_receive_queue.lock, unix_recvq_lock_cmp_fn, NULL);
1113 
1114 	u = unix_sk(sk);
1115 	u->listener = NULL;
1116 	u->vertex = NULL;
1117 	u->path.dentry = NULL;
1118 	u->path.mnt = NULL;
1119 	spin_lock_init(&u->lock);
1120 	lock_set_cmp_fn(&u->lock, unix_state_lock_cmp_fn, NULL);
1121 	mutex_init(&u->iolock); /* single task reading lock */
1122 	mutex_init(&u->bindlock); /* single task binding lock */
1123 	init_waitqueue_head(&u->peer_wait);
1124 	init_waitqueue_func_entry(&u->peer_wake, unix_dgram_peer_wake_relay);
1125 	memset(&u->scm_stat, 0, sizeof(struct scm_stat));
1126 	unix_insert_unbound_socket(net, sk);
1127 
1128 	sock_prot_inuse_add(net, sk->sk_prot, 1);
1129 
1130 	return sk;
1131 
1132 err:
1133 	atomic_long_dec(&unix_nr_socks);
1134 	return ERR_PTR(err);
1135 }
1136 
1137 static int unix_create(struct net *net, struct socket *sock, int protocol,
1138 		       int kern)
1139 {
1140 	struct sock *sk;
1141 
1142 	if (protocol && protocol != PF_UNIX)
1143 		return -EPROTONOSUPPORT;
1144 
1145 	sock->state = SS_UNCONNECTED;
1146 
1147 	switch (sock->type) {
1148 	case SOCK_STREAM:
1149 		set_bit(SOCK_CUSTOM_SOCKOPT, &sock->flags);
1150 		sock->ops = &unix_stream_ops;
1151 		break;
1152 		/*
1153 		 *	Believe it or not BSD has AF_UNIX, SOCK_RAW though
1154 		 *	nothing uses it.
1155 		 */
1156 	case SOCK_RAW:
1157 		sock->type = SOCK_DGRAM;
1158 		fallthrough;
1159 	case SOCK_DGRAM:
1160 		sock->ops = &unix_dgram_ops;
1161 		break;
1162 	case SOCK_SEQPACKET:
1163 		sock->ops = &unix_seqpacket_ops;
1164 		break;
1165 	default:
1166 		return -ESOCKTNOSUPPORT;
1167 	}
1168 
1169 	sk = unix_create1(net, sock, kern, sock->type);
1170 	if (IS_ERR(sk))
1171 		return PTR_ERR(sk);
1172 
1173 	return 0;
1174 }
1175 
1176 static int unix_release(struct socket *sock)
1177 {
1178 	struct sock *sk = sock->sk;
1179 
1180 	if (!sk)
1181 		return 0;
1182 
1183 	sk->sk_prot->close(sk, 0);
1184 	unix_release_sock(sk, 0);
1185 	sock->sk = NULL;
1186 
1187 	return 0;
1188 }
1189 
1190 static struct sock *unix_find_bsd(struct sockaddr_un *sunaddr, int addr_len,
1191 				  int type, int flags)
1192 {
1193 	struct inode *inode;
1194 	struct path path;
1195 	struct sock *sk;
1196 	int err;
1197 
1198 	unix_mkname_bsd(sunaddr, addr_len);
1199 
1200 	if (flags & SOCK_COREDUMP) {
1201 		struct path root;
1202 
1203 		task_lock(&init_task);
1204 		get_fs_root(init_task.fs, &root);
1205 		task_unlock(&init_task);
1206 
1207 		scoped_with_kernel_creds()
1208 			err = vfs_path_lookup(root.dentry, root.mnt, sunaddr->sun_path,
1209 					      LOOKUP_BENEATH | LOOKUP_NO_SYMLINKS |
1210 					      LOOKUP_NO_MAGICLINKS, &path);
1211 		path_put(&root);
1212 		if (err)
1213 			goto fail;
1214 	} else {
1215 		err = kern_path(sunaddr->sun_path, LOOKUP_FOLLOW, &path);
1216 		if (err)
1217 			goto fail;
1218 
1219 		err = path_permission(&path, MAY_WRITE);
1220 		if (err)
1221 			goto path_put;
1222 	}
1223 
1224 	err = -ECONNREFUSED;
1225 	inode = d_backing_inode(path.dentry);
1226 	if (!S_ISSOCK(inode->i_mode))
1227 		goto path_put;
1228 
1229 	sk = unix_find_socket_byinode(inode);
1230 	if (!sk)
1231 		goto path_put;
1232 
1233 	err = -EPROTOTYPE;
1234 	if (sk->sk_type == type)
1235 		touch_atime(&path);
1236 	else
1237 		goto sock_put;
1238 
1239 	path_put(&path);
1240 
1241 	return sk;
1242 
1243 sock_put:
1244 	sock_put(sk);
1245 path_put:
1246 	path_put(&path);
1247 fail:
1248 	return ERR_PTR(err);
1249 }
1250 
1251 static struct sock *unix_find_abstract(struct net *net,
1252 				       struct sockaddr_un *sunaddr,
1253 				       int addr_len, int type)
1254 {
1255 	unsigned int hash = unix_abstract_hash(sunaddr, addr_len, type);
1256 	struct dentry *dentry;
1257 	struct sock *sk;
1258 
1259 	sk = unix_find_socket_byname(net, sunaddr, addr_len, hash);
1260 	if (!sk)
1261 		return ERR_PTR(-ECONNREFUSED);
1262 
1263 	dentry = unix_sk(sk)->path.dentry;
1264 	if (dentry)
1265 		touch_atime(&unix_sk(sk)->path);
1266 
1267 	return sk;
1268 }
1269 
1270 static struct sock *unix_find_other(struct net *net,
1271 				    struct sockaddr_un *sunaddr,
1272 				    int addr_len, int type, int flags)
1273 {
1274 	struct sock *sk;
1275 
1276 	if (sunaddr->sun_path[0])
1277 		sk = unix_find_bsd(sunaddr, addr_len, type, flags);
1278 	else
1279 		sk = unix_find_abstract(net, sunaddr, addr_len, type);
1280 
1281 	return sk;
1282 }
1283 
1284 static int unix_autobind(struct sock *sk)
1285 {
1286 	struct unix_sock *u = unix_sk(sk);
1287 	unsigned int new_hash, old_hash;
1288 	struct net *net = sock_net(sk);
1289 	struct unix_address *addr;
1290 	u32 lastnum, ordernum;
1291 	int err;
1292 
1293 	err = mutex_lock_interruptible(&u->bindlock);
1294 	if (err)
1295 		return err;
1296 
1297 	if (u->addr)
1298 		goto out;
1299 
1300 	err = -ENOMEM;
1301 	addr = kzalloc(sizeof(*addr) +
1302 		       offsetof(struct sockaddr_un, sun_path) + 16, GFP_KERNEL);
1303 	if (!addr)
1304 		goto out;
1305 
1306 	addr->len = offsetof(struct sockaddr_un, sun_path) + 6;
1307 	addr->name->sun_family = AF_UNIX;
1308 	refcount_set(&addr->refcnt, 1);
1309 
1310 	old_hash = sk->sk_hash;
1311 	ordernum = get_random_u32();
1312 	lastnum = ordernum & 0xFFFFF;
1313 retry:
1314 	ordernum = (ordernum + 1) & 0xFFFFF;
1315 	sprintf(addr->name->sun_path + 1, "%05x", ordernum);
1316 
1317 	new_hash = unix_abstract_hash(addr->name, addr->len, sk->sk_type);
1318 	unix_table_double_lock(net, old_hash, new_hash);
1319 
1320 	if (__unix_find_socket_byname(net, addr->name, addr->len, new_hash)) {
1321 		unix_table_double_unlock(net, old_hash, new_hash);
1322 
1323 		/* __unix_find_socket_byname() may take long time if many names
1324 		 * are already in use.
1325 		 */
1326 		cond_resched();
1327 
1328 		if (ordernum == lastnum) {
1329 			/* Give up if all names seems to be in use. */
1330 			err = -ENOSPC;
1331 			unix_release_addr(addr);
1332 			goto out;
1333 		}
1334 
1335 		goto retry;
1336 	}
1337 
1338 	__unix_set_addr_hash(net, sk, addr, new_hash);
1339 	unix_table_double_unlock(net, old_hash, new_hash);
1340 	err = 0;
1341 
1342 out:	mutex_unlock(&u->bindlock);
1343 	return err;
1344 }
1345 
1346 static int unix_bind_bsd(struct sock *sk, struct sockaddr_un *sunaddr,
1347 			 int addr_len)
1348 {
1349 	umode_t mode = S_IFSOCK |
1350 	       (SOCK_INODE(sk->sk_socket)->i_mode & ~current_umask());
1351 	struct unix_sock *u = unix_sk(sk);
1352 	unsigned int new_hash, old_hash;
1353 	struct net *net = sock_net(sk);
1354 	struct mnt_idmap *idmap;
1355 	struct unix_address *addr;
1356 	struct dentry *dentry;
1357 	struct path parent;
1358 	int err;
1359 
1360 	addr_len = unix_mkname_bsd(sunaddr, addr_len);
1361 	addr = unix_create_addr(sunaddr, addr_len);
1362 	if (!addr)
1363 		return -ENOMEM;
1364 
1365 	/*
1366 	 * Get the parent directory, calculate the hash for last
1367 	 * component.
1368 	 */
1369 	dentry = start_creating_path(AT_FDCWD, addr->name->sun_path, &parent, 0);
1370 	if (IS_ERR(dentry)) {
1371 		err = PTR_ERR(dentry);
1372 		goto out;
1373 	}
1374 
1375 	/*
1376 	 * All right, let's create it.
1377 	 */
1378 	idmap = mnt_idmap(parent.mnt);
1379 	err = security_path_mknod(&parent, dentry, mode, 0);
1380 	if (!err)
1381 		err = vfs_mknod(idmap, d_inode(parent.dentry), dentry, mode, 0, NULL);
1382 	if (err)
1383 		goto out_path;
1384 	err = mutex_lock_interruptible(&u->bindlock);
1385 	if (err)
1386 		goto out_unlink;
1387 	if (u->addr)
1388 		goto out_unlock;
1389 
1390 	old_hash = sk->sk_hash;
1391 	new_hash = unix_bsd_hash(d_backing_inode(dentry));
1392 	unix_table_double_lock(net, old_hash, new_hash);
1393 	u->path.mnt = mntget(parent.mnt);
1394 	u->path.dentry = dget(dentry);
1395 	__unix_set_addr_hash(net, sk, addr, new_hash);
1396 	unix_table_double_unlock(net, old_hash, new_hash);
1397 	unix_insert_bsd_socket(sk);
1398 	mutex_unlock(&u->bindlock);
1399 	end_creating_path(&parent, dentry);
1400 	return 0;
1401 
1402 out_unlock:
1403 	mutex_unlock(&u->bindlock);
1404 	err = -EINVAL;
1405 out_unlink:
1406 	/* failed after successful mknod?  unlink what we'd created... */
1407 	vfs_unlink(idmap, d_inode(parent.dentry), dentry, NULL);
1408 out_path:
1409 	end_creating_path(&parent, dentry);
1410 out:
1411 	unix_release_addr(addr);
1412 	return err == -EEXIST ? -EADDRINUSE : err;
1413 }
1414 
1415 static int unix_bind_abstract(struct sock *sk, struct sockaddr_un *sunaddr,
1416 			      int addr_len)
1417 {
1418 	struct unix_sock *u = unix_sk(sk);
1419 	unsigned int new_hash, old_hash;
1420 	struct net *net = sock_net(sk);
1421 	struct unix_address *addr;
1422 	int err;
1423 
1424 	addr = unix_create_addr(sunaddr, addr_len);
1425 	if (!addr)
1426 		return -ENOMEM;
1427 
1428 	err = mutex_lock_interruptible(&u->bindlock);
1429 	if (err)
1430 		goto out;
1431 
1432 	if (u->addr) {
1433 		err = -EINVAL;
1434 		goto out_mutex;
1435 	}
1436 
1437 	old_hash = sk->sk_hash;
1438 	new_hash = unix_abstract_hash(addr->name, addr->len, sk->sk_type);
1439 	unix_table_double_lock(net, old_hash, new_hash);
1440 
1441 	if (__unix_find_socket_byname(net, addr->name, addr->len, new_hash))
1442 		goto out_spin;
1443 
1444 	__unix_set_addr_hash(net, sk, addr, new_hash);
1445 	unix_table_double_unlock(net, old_hash, new_hash);
1446 	mutex_unlock(&u->bindlock);
1447 	return 0;
1448 
1449 out_spin:
1450 	unix_table_double_unlock(net, old_hash, new_hash);
1451 	err = -EADDRINUSE;
1452 out_mutex:
1453 	mutex_unlock(&u->bindlock);
1454 out:
1455 	unix_release_addr(addr);
1456 	return err;
1457 }
1458 
1459 static int unix_bind(struct socket *sock, struct sockaddr_unsized *uaddr, int addr_len)
1460 {
1461 	struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr;
1462 	struct sock *sk = sock->sk;
1463 	int err;
1464 
1465 	if (addr_len == offsetof(struct sockaddr_un, sun_path) &&
1466 	    sunaddr->sun_family == AF_UNIX)
1467 		return unix_autobind(sk);
1468 
1469 	err = unix_validate_addr(sunaddr, addr_len);
1470 	if (err)
1471 		return err;
1472 
1473 	if (sunaddr->sun_path[0])
1474 		err = unix_bind_bsd(sk, sunaddr, addr_len);
1475 	else
1476 		err = unix_bind_abstract(sk, sunaddr, addr_len);
1477 
1478 	return err;
1479 }
1480 
1481 static void unix_state_double_lock(struct sock *sk1, struct sock *sk2)
1482 {
1483 	if (unlikely(sk1 == sk2) || !sk2) {
1484 		unix_state_lock(sk1);
1485 		return;
1486 	}
1487 
1488 	if (sk1 > sk2)
1489 		swap(sk1, sk2);
1490 
1491 	unix_state_lock(sk1);
1492 	unix_state_lock(sk2);
1493 }
1494 
1495 static void unix_state_double_unlock(struct sock *sk1, struct sock *sk2)
1496 {
1497 	if (unlikely(sk1 == sk2) || !sk2) {
1498 		unix_state_unlock(sk1);
1499 		return;
1500 	}
1501 	unix_state_unlock(sk1);
1502 	unix_state_unlock(sk2);
1503 }
1504 
1505 static int unix_dgram_connect(struct socket *sock, struct sockaddr_unsized *addr,
1506 			      int alen, int flags)
1507 {
1508 	struct sockaddr_un *sunaddr = (struct sockaddr_un *)addr;
1509 	struct sock *sk = sock->sk;
1510 	struct sock *other;
1511 	int err;
1512 
1513 	err = -EINVAL;
1514 	if (alen < offsetofend(struct sockaddr, sa_family))
1515 		goto out;
1516 
1517 	if (addr->sa_family != AF_UNSPEC) {
1518 		err = unix_validate_addr(sunaddr, alen);
1519 		if (err)
1520 			goto out;
1521 
1522 		err = BPF_CGROUP_RUN_PROG_UNIX_CONNECT_LOCK(sk, addr, &alen);
1523 		if (err)
1524 			goto out;
1525 
1526 		if (unix_may_passcred(sk) && !READ_ONCE(unix_sk(sk)->addr)) {
1527 			err = unix_autobind(sk);
1528 			if (err)
1529 				goto out;
1530 		}
1531 
1532 restart:
1533 		other = unix_find_other(sock_net(sk), sunaddr, alen, sock->type, 0);
1534 		if (IS_ERR(other)) {
1535 			err = PTR_ERR(other);
1536 			goto out;
1537 		}
1538 
1539 		unix_state_double_lock(sk, other);
1540 
1541 		/* Apparently VFS overslept socket death. Retry. */
1542 		if (sock_flag(other, SOCK_DEAD)) {
1543 			unix_state_double_unlock(sk, other);
1544 			sock_put(other);
1545 			goto restart;
1546 		}
1547 
1548 		err = -EPERM;
1549 		if (!unix_may_send(sk, other))
1550 			goto out_unlock;
1551 
1552 		err = security_unix_may_send(sk->sk_socket, other->sk_socket);
1553 		if (err)
1554 			goto out_unlock;
1555 
1556 		WRITE_ONCE(sk->sk_state, TCP_ESTABLISHED);
1557 		WRITE_ONCE(other->sk_state, TCP_ESTABLISHED);
1558 	} else {
1559 		/*
1560 		 *	1003.1g breaking connected state with AF_UNSPEC
1561 		 */
1562 		other = NULL;
1563 		unix_state_double_lock(sk, other);
1564 	}
1565 
1566 	/*
1567 	 * If it was connected, reconnect.
1568 	 */
1569 	if (unix_peer(sk)) {
1570 		struct sock *old_peer = unix_peer(sk);
1571 
1572 		unix_peer(sk) = other;
1573 		if (!other)
1574 			WRITE_ONCE(sk->sk_state, TCP_CLOSE);
1575 		unix_dgram_peer_wake_disconnect_wakeup(sk, old_peer);
1576 
1577 		unix_state_double_unlock(sk, other);
1578 
1579 		if (other != old_peer) {
1580 			unix_dgram_disconnected(sk, old_peer);
1581 
1582 			unix_state_lock(old_peer);
1583 			if (!unix_peer(old_peer))
1584 				WRITE_ONCE(old_peer->sk_state, TCP_CLOSE);
1585 			unix_state_unlock(old_peer);
1586 		}
1587 
1588 		sock_put(old_peer);
1589 	} else {
1590 		unix_peer(sk) = other;
1591 		unix_state_double_unlock(sk, other);
1592 	}
1593 
1594 	return 0;
1595 
1596 out_unlock:
1597 	unix_state_double_unlock(sk, other);
1598 	sock_put(other);
1599 out:
1600 	return err;
1601 }
1602 
1603 static long unix_wait_for_peer(struct sock *other, long timeo)
1604 {
1605 	struct unix_sock *u = unix_sk(other);
1606 	int sched;
1607 	DEFINE_WAIT(wait);
1608 
1609 	prepare_to_wait_exclusive(&u->peer_wait, &wait, TASK_INTERRUPTIBLE);
1610 
1611 	sched = !sock_flag(other, SOCK_DEAD) &&
1612 		!(other->sk_shutdown & RCV_SHUTDOWN) &&
1613 		unix_recvq_full_lockless(other);
1614 
1615 	unix_state_unlock(other);
1616 
1617 	if (sched)
1618 		timeo = schedule_timeout(timeo);
1619 
1620 	finish_wait(&u->peer_wait, &wait);
1621 	return timeo;
1622 }
1623 
1624 static int unix_stream_connect(struct socket *sock, struct sockaddr_unsized *uaddr,
1625 			       int addr_len, int flags)
1626 {
1627 	struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr;
1628 	struct sock *sk = sock->sk, *newsk = NULL, *other = NULL;
1629 	struct unix_sock *u = unix_sk(sk), *newu, *otheru;
1630 	struct unix_peercred peercred = {};
1631 	struct net *net = sock_net(sk);
1632 	struct sk_buff *skb = NULL;
1633 	unsigned char state;
1634 	long timeo;
1635 	int err;
1636 
1637 	err = unix_validate_addr(sunaddr, addr_len);
1638 	if (err)
1639 		goto out;
1640 
1641 	err = BPF_CGROUP_RUN_PROG_UNIX_CONNECT_LOCK(sk, uaddr, &addr_len);
1642 	if (err)
1643 		goto out;
1644 
1645 	if (unix_may_passcred(sk) && !READ_ONCE(u->addr)) {
1646 		err = unix_autobind(sk);
1647 		if (err)
1648 			goto out;
1649 	}
1650 
1651 	timeo = sock_sndtimeo(sk, flags & O_NONBLOCK);
1652 
1653 	/* First of all allocate resources.
1654 	 * If we will make it after state is locked,
1655 	 * we will have to recheck all again in any case.
1656 	 */
1657 
1658 	/* create new sock for complete connection */
1659 	newsk = unix_create1(net, NULL, 0, sock->type);
1660 	if (IS_ERR(newsk)) {
1661 		err = PTR_ERR(newsk);
1662 		goto out;
1663 	}
1664 
1665 	err = prepare_peercred(&peercred);
1666 	if (err)
1667 		goto out;
1668 
1669 	/* Allocate skb for sending to listening sock */
1670 	skb = sock_wmalloc(newsk, 1, 0, GFP_KERNEL);
1671 	if (!skb) {
1672 		err = -ENOMEM;
1673 		goto out_free_sk;
1674 	}
1675 
1676 restart:
1677 	/*  Find listening sock. */
1678 	other = unix_find_other(net, sunaddr, addr_len, sk->sk_type, flags);
1679 	if (IS_ERR(other)) {
1680 		err = PTR_ERR(other);
1681 		goto out_free_skb;
1682 	}
1683 
1684 	unix_state_lock(other);
1685 
1686 	/* Apparently VFS overslept socket death. Retry. */
1687 	if (sock_flag(other, SOCK_DEAD)) {
1688 		unix_state_unlock(other);
1689 		sock_put(other);
1690 		goto restart;
1691 	}
1692 
1693 	if (other->sk_state != TCP_LISTEN ||
1694 	    other->sk_shutdown & RCV_SHUTDOWN) {
1695 		err = -ECONNREFUSED;
1696 		goto out_unlock;
1697 	}
1698 
1699 	if (unix_recvq_full_lockless(other)) {
1700 		if (!timeo) {
1701 			err = -EAGAIN;
1702 			goto out_unlock;
1703 		}
1704 
1705 		timeo = unix_wait_for_peer(other, timeo);
1706 		sock_put(other);
1707 
1708 		err = sock_intr_errno(timeo);
1709 		if (signal_pending(current))
1710 			goto out_free_skb;
1711 
1712 		goto restart;
1713 	}
1714 
1715 	/* self connect and simultaneous connect are eliminated
1716 	 * by rejecting TCP_LISTEN socket to avoid deadlock.
1717 	 */
1718 	state = READ_ONCE(sk->sk_state);
1719 	if (unlikely(state != TCP_CLOSE)) {
1720 		err = state == TCP_ESTABLISHED ? -EISCONN : -EINVAL;
1721 		goto out_unlock;
1722 	}
1723 
1724 	unix_state_lock(sk);
1725 
1726 	if (unlikely(sk->sk_state != TCP_CLOSE)) {
1727 		err = sk->sk_state == TCP_ESTABLISHED ? -EISCONN : -EINVAL;
1728 		unix_state_unlock(sk);
1729 		goto out_unlock;
1730 	}
1731 
1732 	err = security_unix_stream_connect(sk, other, newsk);
1733 	if (err) {
1734 		unix_state_unlock(sk);
1735 		goto out_unlock;
1736 	}
1737 
1738 	/* The way is open! Fastly set all the necessary fields... */
1739 
1740 	sock_hold(sk);
1741 	unix_peer(newsk) = sk;
1742 	newsk->sk_state = TCP_ESTABLISHED;
1743 	newsk->sk_type = sk->sk_type;
1744 	newsk->sk_scm_recv_flags = other->sk_scm_recv_flags;
1745 	init_peercred(newsk, &peercred);
1746 
1747 	newu = unix_sk(newsk);
1748 	newu->listener = other;
1749 	RCU_INIT_POINTER(newsk->sk_wq, &newu->peer_wq);
1750 	otheru = unix_sk(other);
1751 
1752 	/* copy address information from listening to new sock
1753 	 *
1754 	 * The contents of *(otheru->addr) and otheru->path
1755 	 * are seen fully set up here, since we have found
1756 	 * otheru in hash under its lock.  Insertion into the
1757 	 * hash chain we'd found it in had been done in an
1758 	 * earlier critical area protected by the chain's lock,
1759 	 * the same one where we'd set *(otheru->addr) contents,
1760 	 * as well as otheru->path and otheru->addr itself.
1761 	 *
1762 	 * Using smp_store_release() here to set newu->addr
1763 	 * is enough to make those stores, as well as stores
1764 	 * to newu->path visible to anyone who gets newu->addr
1765 	 * by smp_load_acquire().  IOW, the same warranties
1766 	 * as for unix_sock instances bound in unix_bind() or
1767 	 * in unix_autobind().
1768 	 */
1769 	if (otheru->path.dentry) {
1770 		path_get(&otheru->path);
1771 		newu->path = otheru->path;
1772 	}
1773 	refcount_inc(&otheru->addr->refcnt);
1774 	smp_store_release(&newu->addr, otheru->addr);
1775 
1776 	/* Set credentials */
1777 	copy_peercred(sk, other);
1778 
1779 	sock->state	= SS_CONNECTED;
1780 	WRITE_ONCE(sk->sk_state, TCP_ESTABLISHED);
1781 	sock_hold(newsk);
1782 
1783 	smp_mb__after_atomic();	/* sock_hold() does an atomic_inc() */
1784 	unix_peer(sk)	= newsk;
1785 
1786 	unix_state_unlock(sk);
1787 
1788 	/* take ten and send info to listening sock */
1789 	spin_lock(&other->sk_receive_queue.lock);
1790 	__skb_queue_tail(&other->sk_receive_queue, skb);
1791 	spin_unlock(&other->sk_receive_queue.lock);
1792 	unix_state_unlock(other);
1793 	other->sk_data_ready(other);
1794 	sock_put(other);
1795 	return 0;
1796 
1797 out_unlock:
1798 	unix_state_unlock(other);
1799 	sock_put(other);
1800 out_free_skb:
1801 	consume_skb(skb);
1802 out_free_sk:
1803 	unix_release_sock(newsk, 0);
1804 out:
1805 	drop_peercred(&peercred);
1806 	return err;
1807 }
1808 
1809 static int unix_socketpair(struct socket *socka, struct socket *sockb)
1810 {
1811 	struct unix_peercred ska_peercred = {}, skb_peercred = {};
1812 	struct sock *ska = socka->sk, *skb = sockb->sk;
1813 	int err;
1814 
1815 	err = prepare_peercred(&ska_peercred);
1816 	if (err)
1817 		return err;
1818 
1819 	err = prepare_peercred(&skb_peercred);
1820 	if (err) {
1821 		drop_peercred(&ska_peercred);
1822 		return err;
1823 	}
1824 
1825 	/* Join our sockets back to back */
1826 	sock_hold(ska);
1827 	sock_hold(skb);
1828 	unix_peer(ska) = skb;
1829 	unix_peer(skb) = ska;
1830 	init_peercred(ska, &ska_peercred);
1831 	init_peercred(skb, &skb_peercred);
1832 
1833 	ska->sk_state = TCP_ESTABLISHED;
1834 	skb->sk_state = TCP_ESTABLISHED;
1835 	socka->state  = SS_CONNECTED;
1836 	sockb->state  = SS_CONNECTED;
1837 	return 0;
1838 }
1839 
1840 static int unix_accept(struct socket *sock, struct socket *newsock,
1841 		       struct proto_accept_arg *arg)
1842 {
1843 	struct sock *sk = sock->sk;
1844 	struct sk_buff *skb;
1845 	struct sock *tsk;
1846 
1847 	arg->err = -EOPNOTSUPP;
1848 	if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
1849 		goto out;
1850 
1851 	arg->err = -EINVAL;
1852 	if (READ_ONCE(sk->sk_state) != TCP_LISTEN)
1853 		goto out;
1854 
1855 	/* If socket state is TCP_LISTEN it cannot change (for now...),
1856 	 * so that no locks are necessary.
1857 	 */
1858 
1859 	skb = skb_recv_datagram(sk, (arg->flags & O_NONBLOCK) ? MSG_DONTWAIT : 0,
1860 				&arg->err);
1861 	if (!skb) {
1862 		/* This means receive shutdown. */
1863 		if (arg->err == 0)
1864 			arg->err = -EINVAL;
1865 		goto out;
1866 	}
1867 
1868 	tsk = skb->sk;
1869 	skb_free_datagram(sk, skb);
1870 	wake_up_interruptible(&unix_sk(sk)->peer_wait);
1871 
1872 	if (tsk->sk_type == SOCK_STREAM)
1873 		set_bit(SOCK_CUSTOM_SOCKOPT, &newsock->flags);
1874 
1875 	/* attach accepted sock to socket */
1876 	unix_state_lock(tsk);
1877 	unix_update_edges(unix_sk(tsk));
1878 	newsock->state = SS_CONNECTED;
1879 	sock_graft(tsk, newsock);
1880 	unix_state_unlock(tsk);
1881 	return 0;
1882 
1883 out:
1884 	return arg->err;
1885 }
1886 
1887 
1888 static int unix_getname(struct socket *sock, struct sockaddr *uaddr, int peer)
1889 {
1890 	struct sock *sk = sock->sk;
1891 	struct unix_address *addr;
1892 	DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr, uaddr);
1893 	int err = 0;
1894 
1895 	if (peer) {
1896 		sk = unix_peer_get(sk);
1897 
1898 		err = -ENOTCONN;
1899 		if (!sk)
1900 			goto out;
1901 		err = 0;
1902 	} else {
1903 		sock_hold(sk);
1904 	}
1905 
1906 	addr = smp_load_acquire(&unix_sk(sk)->addr);
1907 	if (!addr) {
1908 		sunaddr->sun_family = AF_UNIX;
1909 		sunaddr->sun_path[0] = 0;
1910 		err = offsetof(struct sockaddr_un, sun_path);
1911 	} else {
1912 		err = addr->len;
1913 		memcpy(sunaddr, addr->name, addr->len);
1914 
1915 		if (peer)
1916 			BPF_CGROUP_RUN_SA_PROG(sk, uaddr, &err,
1917 					       CGROUP_UNIX_GETPEERNAME);
1918 		else
1919 			BPF_CGROUP_RUN_SA_PROG(sk, uaddr, &err,
1920 					       CGROUP_UNIX_GETSOCKNAME);
1921 	}
1922 	sock_put(sk);
1923 out:
1924 	return err;
1925 }
1926 
1927 /* The "user->unix_inflight" variable is protected by the garbage
1928  * collection lock, and we just read it locklessly here. If you go
1929  * over the limit, there might be a tiny race in actually noticing
1930  * it across threads. Tough.
1931  */
1932 static inline bool too_many_unix_fds(struct task_struct *p)
1933 {
1934 	struct user_struct *user = current_user();
1935 
1936 	if (unlikely(READ_ONCE(user->unix_inflight) > task_rlimit(p, RLIMIT_NOFILE)))
1937 		return !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN);
1938 	return false;
1939 }
1940 
1941 static int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb)
1942 {
1943 	if (too_many_unix_fds(current))
1944 		return -ETOOMANYREFS;
1945 
1946 	UNIXCB(skb).fp = scm->fp;
1947 	scm->fp = NULL;
1948 
1949 	if (unix_prepare_fpl(UNIXCB(skb).fp))
1950 		return -ENOMEM;
1951 
1952 	return 0;
1953 }
1954 
1955 static void unix_detach_fds(struct scm_cookie *scm, struct sk_buff *skb)
1956 {
1957 	scm->fp = UNIXCB(skb).fp;
1958 	UNIXCB(skb).fp = NULL;
1959 
1960 	unix_destroy_fpl(scm->fp);
1961 }
1962 
1963 static void unix_peek_fds(struct scm_cookie *scm, struct sk_buff *skb)
1964 {
1965 	scm->fp = scm_fp_dup(UNIXCB(skb).fp);
1966 }
1967 
1968 static void unix_destruct_scm(struct sk_buff *skb)
1969 {
1970 	struct scm_cookie scm;
1971 
1972 	memset(&scm, 0, sizeof(scm));
1973 	scm.pid = UNIXCB(skb).pid;
1974 	if (UNIXCB(skb).fp)
1975 		unix_detach_fds(&scm, skb);
1976 
1977 	/* Alas, it calls VFS */
1978 	/* So fscking what? fput() had been SMP-safe since the last Summer */
1979 	scm_destroy(&scm);
1980 	sock_wfree(skb);
1981 }
1982 
1983 static int unix_scm_to_skb(struct scm_cookie *scm, struct sk_buff *skb, bool send_fds)
1984 {
1985 	int err = 0;
1986 
1987 	UNIXCB(skb).pid = get_pid(scm->pid);
1988 	UNIXCB(skb).uid = scm->creds.uid;
1989 	UNIXCB(skb).gid = scm->creds.gid;
1990 	UNIXCB(skb).fp = NULL;
1991 	unix_get_secdata(scm, skb);
1992 	if (scm->fp && send_fds)
1993 		err = unix_attach_fds(scm, skb);
1994 
1995 	skb->destructor = unix_destruct_scm;
1996 	return err;
1997 }
1998 
1999 static void unix_skb_to_scm(struct sk_buff *skb, struct scm_cookie *scm)
2000 {
2001 	scm_set_cred(scm, UNIXCB(skb).pid, UNIXCB(skb).uid, UNIXCB(skb).gid);
2002 	unix_set_secdata(scm, skb);
2003 }
2004 
2005 /**
2006  * unix_maybe_add_creds() - Adds current task uid/gid and struct pid to skb if needed.
2007  * @skb: skb to attach creds to.
2008  * @sk: Sender sock.
2009  * @other: Receiver sock.
2010  *
2011  * Some apps rely on write() giving SCM_CREDENTIALS
2012  * We include credentials if source or destination socket
2013  * asserted SOCK_PASSCRED.
2014  *
2015  * Context: May sleep.
2016  * Return: On success zero, on error a negative error code is returned.
2017  */
2018 static int unix_maybe_add_creds(struct sk_buff *skb, const struct sock *sk,
2019 				const struct sock *other)
2020 {
2021 	if (UNIXCB(skb).pid)
2022 		return 0;
2023 
2024 	if (unix_may_passcred(sk) || unix_may_passcred(other) ||
2025 	    !other->sk_socket) {
2026 		struct pid *pid;
2027 		int err;
2028 
2029 		pid = task_tgid(current);
2030 		err = pidfs_register_pid(pid);
2031 		if (unlikely(err))
2032 			return err;
2033 
2034 		UNIXCB(skb).pid = get_pid(pid);
2035 		current_uid_gid(&UNIXCB(skb).uid, &UNIXCB(skb).gid);
2036 	}
2037 
2038 	return 0;
2039 }
2040 
2041 static bool unix_skb_scm_eq(struct sk_buff *skb,
2042 			    struct scm_cookie *scm)
2043 {
2044 	return UNIXCB(skb).pid == scm->pid &&
2045 	       uid_eq(UNIXCB(skb).uid, scm->creds.uid) &&
2046 	       gid_eq(UNIXCB(skb).gid, scm->creds.gid) &&
2047 	       unix_secdata_eq(scm, skb);
2048 }
2049 
2050 static void scm_stat_add(struct sock *sk, struct sk_buff *skb)
2051 {
2052 	struct scm_fp_list *fp = UNIXCB(skb).fp;
2053 	struct unix_sock *u = unix_sk(sk);
2054 
2055 	if (unlikely(fp && fp->count)) {
2056 		atomic_add(fp->count, &u->scm_stat.nr_fds);
2057 		unix_add_edges(fp, u);
2058 	}
2059 }
2060 
2061 static void scm_stat_del(struct sock *sk, struct sk_buff *skb)
2062 {
2063 	struct scm_fp_list *fp = UNIXCB(skb).fp;
2064 	struct unix_sock *u = unix_sk(sk);
2065 
2066 	if (unlikely(fp && fp->count)) {
2067 		atomic_sub(fp->count, &u->scm_stat.nr_fds);
2068 		unix_del_edges(fp);
2069 	}
2070 }
2071 
2072 /*
2073  *	Send AF_UNIX data.
2074  */
2075 
2076 static int unix_dgram_sendmsg(struct socket *sock, struct msghdr *msg,
2077 			      size_t len)
2078 {
2079 	struct sock *sk = sock->sk, *other = NULL;
2080 	struct unix_sock *u = unix_sk(sk);
2081 	struct scm_cookie scm;
2082 	struct sk_buff *skb;
2083 	int data_len = 0;
2084 	int sk_locked;
2085 	long timeo;
2086 	int err;
2087 
2088 	err = scm_send(sock, msg, &scm, false);
2089 	if (err < 0)
2090 		return err;
2091 
2092 	if (msg->msg_flags & MSG_OOB) {
2093 		err = -EOPNOTSUPP;
2094 		goto out;
2095 	}
2096 
2097 	if (msg->msg_namelen) {
2098 		err = unix_validate_addr(msg->msg_name, msg->msg_namelen);
2099 		if (err)
2100 			goto out;
2101 
2102 		err = BPF_CGROUP_RUN_PROG_UNIX_SENDMSG_LOCK(sk,
2103 							    msg->msg_name,
2104 							    &msg->msg_namelen,
2105 							    NULL);
2106 		if (err)
2107 			goto out;
2108 	}
2109 
2110 	if (unix_may_passcred(sk) && !READ_ONCE(u->addr)) {
2111 		err = unix_autobind(sk);
2112 		if (err)
2113 			goto out;
2114 	}
2115 
2116 	if (len > READ_ONCE(sk->sk_sndbuf) - 32) {
2117 		err = -EMSGSIZE;
2118 		goto out;
2119 	}
2120 
2121 	if (len > SKB_MAX_ALLOC) {
2122 		data_len = min_t(size_t,
2123 				 len - SKB_MAX_ALLOC,
2124 				 MAX_SKB_FRAGS * PAGE_SIZE);
2125 		data_len = PAGE_ALIGN(data_len);
2126 
2127 		BUILD_BUG_ON(SKB_MAX_ALLOC < PAGE_SIZE);
2128 	}
2129 
2130 	skb = sock_alloc_send_pskb(sk, len - data_len, data_len,
2131 				   msg->msg_flags & MSG_DONTWAIT, &err,
2132 				   PAGE_ALLOC_COSTLY_ORDER);
2133 	if (!skb)
2134 		goto out;
2135 
2136 	err = unix_scm_to_skb(&scm, skb, true);
2137 	if (err < 0)
2138 		goto out_free;
2139 
2140 	skb_put(skb, len - data_len);
2141 	skb->data_len = data_len;
2142 	skb->len = len;
2143 	err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, len);
2144 	if (err)
2145 		goto out_free;
2146 
2147 	timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
2148 
2149 	if (msg->msg_namelen) {
2150 lookup:
2151 		other = unix_find_other(sock_net(sk), msg->msg_name,
2152 					msg->msg_namelen, sk->sk_type, 0);
2153 		if (IS_ERR(other)) {
2154 			err = PTR_ERR(other);
2155 			goto out_free;
2156 		}
2157 	} else {
2158 		other = unix_peer_get(sk);
2159 		if (!other) {
2160 			err = -ENOTCONN;
2161 			goto out_free;
2162 		}
2163 	}
2164 
2165 	if (sk_filter(other, skb) < 0) {
2166 		/* Toss the packet but do not return any error to the sender */
2167 		err = len;
2168 		goto out_sock_put;
2169 	}
2170 
2171 	err = unix_maybe_add_creds(skb, sk, other);
2172 	if (err)
2173 		goto out_sock_put;
2174 
2175 restart:
2176 	sk_locked = 0;
2177 	unix_state_lock(other);
2178 restart_locked:
2179 
2180 	if (!unix_may_send(sk, other)) {
2181 		err = -EPERM;
2182 		goto out_unlock;
2183 	}
2184 
2185 	if (unlikely(sock_flag(other, SOCK_DEAD))) {
2186 		/* Check with 1003.1g - what should datagram error */
2187 
2188 		unix_state_unlock(other);
2189 
2190 		if (sk->sk_type == SOCK_SEQPACKET) {
2191 			/* We are here only when racing with unix_release_sock()
2192 			 * is clearing @other. Never change state to TCP_CLOSE
2193 			 * unlike SOCK_DGRAM wants.
2194 			 */
2195 			err = -EPIPE;
2196 			goto out_sock_put;
2197 		}
2198 
2199 		if (!sk_locked)
2200 			unix_state_lock(sk);
2201 
2202 		if (unix_peer(sk) == other) {
2203 			unix_peer(sk) = NULL;
2204 			unix_dgram_peer_wake_disconnect_wakeup(sk, other);
2205 
2206 			WRITE_ONCE(sk->sk_state, TCP_CLOSE);
2207 			unix_state_unlock(sk);
2208 
2209 			unix_dgram_disconnected(sk, other);
2210 			sock_put(other);
2211 			err = -ECONNREFUSED;
2212 			goto out_sock_put;
2213 		}
2214 
2215 		unix_state_unlock(sk);
2216 
2217 		if (!msg->msg_namelen) {
2218 			err = -ECONNRESET;
2219 			goto out_sock_put;
2220 		}
2221 
2222 		sock_put(other);
2223 		goto lookup;
2224 	}
2225 
2226 	if (other->sk_shutdown & RCV_SHUTDOWN) {
2227 		err = -EPIPE;
2228 		goto out_unlock;
2229 	}
2230 
2231 	if (UNIXCB(skb).fp && !other->sk_scm_rights) {
2232 		err = -EPERM;
2233 		goto out_unlock;
2234 	}
2235 
2236 	if (sk->sk_type != SOCK_SEQPACKET) {
2237 		err = security_unix_may_send(sk->sk_socket, other->sk_socket);
2238 		if (err)
2239 			goto out_unlock;
2240 	}
2241 
2242 	/* other == sk && unix_peer(other) != sk if
2243 	 * - unix_peer(sk) == NULL, destination address bound to sk
2244 	 * - unix_peer(sk) == sk by time of get but disconnected before lock
2245 	 */
2246 	if (other != sk &&
2247 	    unlikely(unix_peer(other) != sk &&
2248 	    unix_recvq_full_lockless(other))) {
2249 		if (timeo) {
2250 			timeo = unix_wait_for_peer(other, timeo);
2251 
2252 			err = sock_intr_errno(timeo);
2253 			if (signal_pending(current))
2254 				goto out_sock_put;
2255 
2256 			goto restart;
2257 		}
2258 
2259 		if (!sk_locked) {
2260 			unix_state_unlock(other);
2261 			unix_state_double_lock(sk, other);
2262 		}
2263 
2264 		if (unix_peer(sk) != other ||
2265 		    unix_dgram_peer_wake_me(sk, other)) {
2266 			err = -EAGAIN;
2267 			sk_locked = 1;
2268 			goto out_unlock;
2269 		}
2270 
2271 		if (!sk_locked) {
2272 			sk_locked = 1;
2273 			goto restart_locked;
2274 		}
2275 	}
2276 
2277 	if (unlikely(sk_locked))
2278 		unix_state_unlock(sk);
2279 
2280 	if (sock_flag(other, SOCK_RCVTSTAMP))
2281 		__net_timestamp(skb);
2282 
2283 	scm_stat_add(other, skb);
2284 	skb_queue_tail(&other->sk_receive_queue, skb);
2285 	unix_state_unlock(other);
2286 	other->sk_data_ready(other);
2287 	sock_put(other);
2288 	scm_destroy(&scm);
2289 	return len;
2290 
2291 out_unlock:
2292 	if (sk_locked)
2293 		unix_state_unlock(sk);
2294 	unix_state_unlock(other);
2295 out_sock_put:
2296 	sock_put(other);
2297 out_free:
2298 	consume_skb(skb);
2299 out:
2300 	scm_destroy(&scm);
2301 	return err;
2302 }
2303 
2304 /* We use paged skbs for stream sockets, and limit occupancy to 32768
2305  * bytes, and a minimum of a full page.
2306  */
2307 #define UNIX_SKB_FRAGS_SZ (PAGE_SIZE << get_order(32768))
2308 
2309 #if IS_ENABLED(CONFIG_AF_UNIX_OOB)
2310 static int queue_oob(struct sock *sk, struct msghdr *msg, struct sock *other,
2311 		     struct scm_cookie *scm, bool fds_sent)
2312 {
2313 	struct unix_sock *ousk = unix_sk(other);
2314 	struct sk_buff *skb;
2315 	int err;
2316 
2317 	skb = sock_alloc_send_skb(sk, 1, msg->msg_flags & MSG_DONTWAIT, &err);
2318 
2319 	if (!skb)
2320 		return err;
2321 
2322 	err = unix_scm_to_skb(scm, skb, !fds_sent);
2323 	if (err < 0)
2324 		goto out;
2325 
2326 	err = unix_maybe_add_creds(skb, sk, other);
2327 	if (err)
2328 		goto out;
2329 
2330 	skb_put(skb, 1);
2331 	err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, 1);
2332 
2333 	if (err)
2334 		goto out;
2335 
2336 	unix_state_lock(other);
2337 
2338 	if (sock_flag(other, SOCK_DEAD) ||
2339 	    (other->sk_shutdown & RCV_SHUTDOWN)) {
2340 		err = -EPIPE;
2341 		goto out_unlock;
2342 	}
2343 
2344 	if (UNIXCB(skb).fp && !other->sk_scm_rights) {
2345 		err = -EPERM;
2346 		goto out_unlock;
2347 	}
2348 
2349 	scm_stat_add(other, skb);
2350 
2351 	spin_lock(&other->sk_receive_queue.lock);
2352 	WRITE_ONCE(ousk->oob_skb, skb);
2353 	WRITE_ONCE(ousk->inq_len, ousk->inq_len + 1);
2354 	__skb_queue_tail(&other->sk_receive_queue, skb);
2355 	spin_unlock(&other->sk_receive_queue.lock);
2356 
2357 	sk_send_sigurg(other);
2358 	unix_state_unlock(other);
2359 	other->sk_data_ready(other);
2360 
2361 	return 0;
2362 out_unlock:
2363 	unix_state_unlock(other);
2364 out:
2365 	consume_skb(skb);
2366 	return err;
2367 }
2368 #endif
2369 
2370 static int unix_stream_sendmsg(struct socket *sock, struct msghdr *msg,
2371 			       size_t len)
2372 {
2373 	struct sock *sk = sock->sk;
2374 	struct sk_buff *skb = NULL;
2375 	struct sock *other = NULL;
2376 	struct unix_sock *otheru;
2377 	struct scm_cookie scm;
2378 	bool fds_sent = false;
2379 	int err, sent = 0;
2380 
2381 	err = scm_send(sock, msg, &scm, false);
2382 	if (err < 0)
2383 		return err;
2384 
2385 	if (msg->msg_flags & MSG_OOB) {
2386 		err = -EOPNOTSUPP;
2387 #if IS_ENABLED(CONFIG_AF_UNIX_OOB)
2388 		if (len)
2389 			len--;
2390 		else
2391 #endif
2392 			goto out_err;
2393 	}
2394 
2395 	if (msg->msg_namelen) {
2396 		err = READ_ONCE(sk->sk_state) == TCP_ESTABLISHED ? -EISCONN : -EOPNOTSUPP;
2397 		goto out_err;
2398 	}
2399 
2400 	other = unix_peer(sk);
2401 	if (!other) {
2402 		err = -ENOTCONN;
2403 		goto out_err;
2404 	}
2405 
2406 	otheru = unix_sk(other);
2407 
2408 	if (READ_ONCE(sk->sk_shutdown) & SEND_SHUTDOWN)
2409 		goto out_pipe;
2410 
2411 	while (sent < len) {
2412 		int size = len - sent;
2413 		int data_len;
2414 
2415 		if (unlikely(msg->msg_flags & MSG_SPLICE_PAGES)) {
2416 			skb = sock_alloc_send_pskb(sk, 0, 0,
2417 						   msg->msg_flags & MSG_DONTWAIT,
2418 						   &err, 0);
2419 		} else {
2420 			/* Keep two messages in the pipe so it schedules better */
2421 			size = min_t(int, size, (READ_ONCE(sk->sk_sndbuf) >> 1) - 64);
2422 
2423 			/* allow fallback to order-0 allocations */
2424 			size = min_t(int, size, SKB_MAX_HEAD(0) + UNIX_SKB_FRAGS_SZ);
2425 
2426 			data_len = max_t(int, 0, size - SKB_MAX_HEAD(0));
2427 
2428 			data_len = min_t(size_t, size, PAGE_ALIGN(data_len));
2429 
2430 			skb = sock_alloc_send_pskb(sk, size - data_len, data_len,
2431 						   msg->msg_flags & MSG_DONTWAIT, &err,
2432 						   get_order(UNIX_SKB_FRAGS_SZ));
2433 		}
2434 		if (!skb)
2435 			goto out_err;
2436 
2437 		/* Only send the fds in the first buffer */
2438 		err = unix_scm_to_skb(&scm, skb, !fds_sent);
2439 		if (err < 0)
2440 			goto out_free;
2441 
2442 		fds_sent = true;
2443 
2444 		err = unix_maybe_add_creds(skb, sk, other);
2445 		if (err)
2446 			goto out_free;
2447 
2448 		if (unlikely(msg->msg_flags & MSG_SPLICE_PAGES)) {
2449 			skb->ip_summed = CHECKSUM_UNNECESSARY;
2450 			err = skb_splice_from_iter(skb, &msg->msg_iter, size);
2451 			if (err < 0)
2452 				goto out_free;
2453 
2454 			size = err;
2455 			refcount_add(size, &sk->sk_wmem_alloc);
2456 		} else {
2457 			skb_put(skb, size - data_len);
2458 			skb->data_len = data_len;
2459 			skb->len = size;
2460 			err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, size);
2461 			if (err)
2462 				goto out_free;
2463 		}
2464 
2465 		unix_state_lock(other);
2466 
2467 		if (sock_flag(other, SOCK_DEAD) ||
2468 		    (other->sk_shutdown & RCV_SHUTDOWN))
2469 			goto out_pipe_unlock;
2470 
2471 		if (UNIXCB(skb).fp && !other->sk_scm_rights) {
2472 			unix_state_unlock(other);
2473 			err = -EPERM;
2474 			goto out_free;
2475 		}
2476 
2477 		scm_stat_add(other, skb);
2478 
2479 		spin_lock(&other->sk_receive_queue.lock);
2480 		WRITE_ONCE(otheru->inq_len, otheru->inq_len + skb->len);
2481 		__skb_queue_tail(&other->sk_receive_queue, skb);
2482 		spin_unlock(&other->sk_receive_queue.lock);
2483 
2484 		unix_state_unlock(other);
2485 		other->sk_data_ready(other);
2486 		sent += size;
2487 	}
2488 
2489 #if IS_ENABLED(CONFIG_AF_UNIX_OOB)
2490 	if (msg->msg_flags & MSG_OOB) {
2491 		err = queue_oob(sk, msg, other, &scm, fds_sent);
2492 		if (err)
2493 			goto out_err;
2494 		sent++;
2495 	}
2496 #endif
2497 
2498 	scm_destroy(&scm);
2499 
2500 	return sent;
2501 
2502 out_pipe_unlock:
2503 	unix_state_unlock(other);
2504 out_pipe:
2505 	if (!sent && !(msg->msg_flags & MSG_NOSIGNAL))
2506 		send_sig(SIGPIPE, current, 0);
2507 	err = -EPIPE;
2508 out_free:
2509 	consume_skb(skb);
2510 out_err:
2511 	scm_destroy(&scm);
2512 	return sent ? : err;
2513 }
2514 
2515 static int unix_seqpacket_sendmsg(struct socket *sock, struct msghdr *msg,
2516 				  size_t len)
2517 {
2518 	int err;
2519 	struct sock *sk = sock->sk;
2520 
2521 	err = sock_error(sk);
2522 	if (err)
2523 		return err;
2524 
2525 	if (READ_ONCE(sk->sk_state) != TCP_ESTABLISHED)
2526 		return -ENOTCONN;
2527 
2528 	if (msg->msg_namelen)
2529 		msg->msg_namelen = 0;
2530 
2531 	return unix_dgram_sendmsg(sock, msg, len);
2532 }
2533 
2534 static int unix_seqpacket_recvmsg(struct socket *sock, struct msghdr *msg,
2535 				  size_t size, int flags)
2536 {
2537 	struct sock *sk = sock->sk;
2538 
2539 	if (READ_ONCE(sk->sk_state) != TCP_ESTABLISHED)
2540 		return -ENOTCONN;
2541 
2542 	return unix_dgram_recvmsg(sock, msg, size, flags);
2543 }
2544 
2545 static void unix_copy_addr(struct msghdr *msg, struct sock *sk)
2546 {
2547 	struct unix_address *addr = smp_load_acquire(&unix_sk(sk)->addr);
2548 
2549 	if (addr) {
2550 		msg->msg_namelen = addr->len;
2551 		memcpy(msg->msg_name, addr->name, addr->len);
2552 	}
2553 }
2554 
2555 int __unix_dgram_recvmsg(struct sock *sk, struct msghdr *msg, size_t size,
2556 			 int flags)
2557 {
2558 	struct scm_cookie scm;
2559 	struct socket *sock = sk->sk_socket;
2560 	struct unix_sock *u = unix_sk(sk);
2561 	struct sk_buff *skb, *last;
2562 	long timeo;
2563 	int skip;
2564 	int err;
2565 
2566 	err = -EOPNOTSUPP;
2567 	if (flags&MSG_OOB)
2568 		goto out;
2569 
2570 	timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
2571 
2572 	do {
2573 		mutex_lock(&u->iolock);
2574 
2575 		skip = sk_peek_offset(sk, flags);
2576 		skb = __skb_try_recv_datagram(sk, &sk->sk_receive_queue, flags,
2577 					      &skip, &err, &last);
2578 		if (skb) {
2579 			if (!(flags & MSG_PEEK))
2580 				scm_stat_del(sk, skb);
2581 			break;
2582 		}
2583 
2584 		mutex_unlock(&u->iolock);
2585 
2586 		if (err != -EAGAIN)
2587 			break;
2588 	} while (timeo &&
2589 		 !__skb_wait_for_more_packets(sk, &sk->sk_receive_queue,
2590 					      &err, &timeo, last));
2591 
2592 	if (!skb) { /* implies iolock unlocked */
2593 		/* Signal EOF on disconnected non-blocking SEQPACKET socket. */
2594 		if (sk->sk_type == SOCK_SEQPACKET && err == -EAGAIN &&
2595 		    (READ_ONCE(sk->sk_shutdown) & RCV_SHUTDOWN))
2596 			err = 0;
2597 		goto out;
2598 	}
2599 
2600 	if (wq_has_sleeper(&u->peer_wait))
2601 		wake_up_interruptible_sync_poll(&u->peer_wait,
2602 						EPOLLOUT | EPOLLWRNORM |
2603 						EPOLLWRBAND);
2604 
2605 	if (msg->msg_name) {
2606 		unix_copy_addr(msg, skb->sk);
2607 
2608 		BPF_CGROUP_RUN_PROG_UNIX_RECVMSG_LOCK(sk,
2609 						      msg->msg_name,
2610 						      &msg->msg_namelen);
2611 	}
2612 
2613 	if (size > skb->len - skip)
2614 		size = skb->len - skip;
2615 	else if (size < skb->len - skip)
2616 		msg->msg_flags |= MSG_TRUNC;
2617 
2618 	err = skb_copy_datagram_msg(skb, skip, msg, size);
2619 	if (err)
2620 		goto out_free;
2621 
2622 	if (sock_flag(sk, SOCK_RCVTSTAMP))
2623 		__sock_recv_timestamp(msg, sk, skb);
2624 
2625 	memset(&scm, 0, sizeof(scm));
2626 
2627 	unix_skb_to_scm(skb, &scm);
2628 
2629 	if (!(flags & MSG_PEEK)) {
2630 		if (UNIXCB(skb).fp)
2631 			unix_detach_fds(&scm, skb);
2632 
2633 		sk_peek_offset_bwd(sk, skb->len);
2634 	} else {
2635 		/* It is questionable: on PEEK we could:
2636 		   - do not return fds - good, but too simple 8)
2637 		   - return fds, and do not return them on read (old strategy,
2638 		     apparently wrong)
2639 		   - clone fds (I chose it for now, it is the most universal
2640 		     solution)
2641 
2642 		   POSIX 1003.1g does not actually define this clearly
2643 		   at all. POSIX 1003.1g doesn't define a lot of things
2644 		   clearly however!
2645 
2646 		*/
2647 
2648 		sk_peek_offset_fwd(sk, size);
2649 
2650 		if (UNIXCB(skb).fp)
2651 			unix_peek_fds(&scm, skb);
2652 	}
2653 	err = (flags & MSG_TRUNC) ? skb->len - skip : size;
2654 
2655 	scm_recv_unix(sock, msg, &scm, flags);
2656 
2657 out_free:
2658 	skb_free_datagram(sk, skb);
2659 	mutex_unlock(&u->iolock);
2660 out:
2661 	return err;
2662 }
2663 
2664 static int unix_dgram_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
2665 			      int flags)
2666 {
2667 	struct sock *sk = sock->sk;
2668 
2669 #ifdef CONFIG_BPF_SYSCALL
2670 	const struct proto *prot = READ_ONCE(sk->sk_prot);
2671 
2672 	if (prot != &unix_dgram_proto)
2673 		return prot->recvmsg(sk, msg, size, flags, NULL);
2674 #endif
2675 	return __unix_dgram_recvmsg(sk, msg, size, flags);
2676 }
2677 
2678 static int unix_read_skb(struct sock *sk, skb_read_actor_t recv_actor)
2679 {
2680 	struct unix_sock *u = unix_sk(sk);
2681 	struct sk_buff *skb;
2682 	int err;
2683 
2684 	mutex_lock(&u->iolock);
2685 	skb = skb_recv_datagram(sk, MSG_DONTWAIT, &err);
2686 	mutex_unlock(&u->iolock);
2687 	if (!skb)
2688 		return err;
2689 
2690 	return recv_actor(sk, skb);
2691 }
2692 
2693 /*
2694  *	Sleep until more data has arrived. But check for races..
2695  */
2696 static long unix_stream_data_wait(struct sock *sk, long timeo,
2697 				  struct sk_buff *last, unsigned int last_len,
2698 				  bool freezable)
2699 {
2700 	unsigned int state = TASK_INTERRUPTIBLE | freezable * TASK_FREEZABLE;
2701 	struct sk_buff *tail;
2702 	DEFINE_WAIT(wait);
2703 
2704 	unix_state_lock(sk);
2705 
2706 	for (;;) {
2707 		prepare_to_wait(sk_sleep(sk), &wait, state);
2708 
2709 		tail = skb_peek_tail(&sk->sk_receive_queue);
2710 		if (tail != last ||
2711 		    (tail && tail->len != last_len) ||
2712 		    sk->sk_err ||
2713 		    (sk->sk_shutdown & RCV_SHUTDOWN) ||
2714 		    signal_pending(current) ||
2715 		    !timeo)
2716 			break;
2717 
2718 		sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
2719 		unix_state_unlock(sk);
2720 		timeo = schedule_timeout(timeo);
2721 		unix_state_lock(sk);
2722 
2723 		if (sock_flag(sk, SOCK_DEAD))
2724 			break;
2725 
2726 		sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
2727 	}
2728 
2729 	finish_wait(sk_sleep(sk), &wait);
2730 	unix_state_unlock(sk);
2731 	return timeo;
2732 }
2733 
2734 struct unix_stream_read_state {
2735 	int (*recv_actor)(struct sk_buff *, int, int,
2736 			  struct unix_stream_read_state *);
2737 	struct socket *socket;
2738 	struct msghdr *msg;
2739 	struct pipe_inode_info *pipe;
2740 	size_t size;
2741 	int flags;
2742 	unsigned int splice_flags;
2743 };
2744 
2745 #if IS_ENABLED(CONFIG_AF_UNIX_OOB)
2746 static int unix_stream_recv_urg(struct unix_stream_read_state *state)
2747 {
2748 	struct sk_buff *oob_skb, *read_skb = NULL;
2749 	struct socket *sock = state->socket;
2750 	struct sock *sk = sock->sk;
2751 	struct unix_sock *u = unix_sk(sk);
2752 	int chunk = 1;
2753 
2754 	mutex_lock(&u->iolock);
2755 	unix_state_lock(sk);
2756 	spin_lock(&sk->sk_receive_queue.lock);
2757 
2758 	if (sock_flag(sk, SOCK_URGINLINE) || !u->oob_skb) {
2759 		spin_unlock(&sk->sk_receive_queue.lock);
2760 		unix_state_unlock(sk);
2761 		mutex_unlock(&u->iolock);
2762 		return -EINVAL;
2763 	}
2764 
2765 	oob_skb = u->oob_skb;
2766 
2767 	if (!(state->flags & MSG_PEEK)) {
2768 		WRITE_ONCE(u->oob_skb, NULL);
2769 		WRITE_ONCE(u->inq_len, u->inq_len - 1);
2770 
2771 		if (oob_skb->prev != (struct sk_buff *)&sk->sk_receive_queue &&
2772 		    !unix_skb_len(oob_skb->prev)) {
2773 			read_skb = oob_skb->prev;
2774 			__skb_unlink(read_skb, &sk->sk_receive_queue);
2775 		}
2776 	}
2777 
2778 	spin_unlock(&sk->sk_receive_queue.lock);
2779 	unix_state_unlock(sk);
2780 
2781 	chunk = state->recv_actor(oob_skb, 0, chunk, state);
2782 
2783 	if (!(state->flags & MSG_PEEK))
2784 		UNIXCB(oob_skb).consumed += 1;
2785 
2786 	mutex_unlock(&u->iolock);
2787 
2788 	consume_skb(read_skb);
2789 
2790 	if (chunk < 0)
2791 		return -EFAULT;
2792 
2793 	state->msg->msg_flags |= MSG_OOB;
2794 	return 1;
2795 }
2796 
2797 static struct sk_buff *manage_oob(struct sk_buff *skb, struct sock *sk,
2798 				  int flags, int copied)
2799 {
2800 	struct sk_buff *read_skb = NULL, *unread_skb = NULL;
2801 	struct unix_sock *u = unix_sk(sk);
2802 
2803 	if (likely(unix_skb_len(skb) && skb != READ_ONCE(u->oob_skb)))
2804 		return skb;
2805 
2806 	spin_lock(&sk->sk_receive_queue.lock);
2807 
2808 	if (!unix_skb_len(skb)) {
2809 		if (copied && (!u->oob_skb || skb == u->oob_skb)) {
2810 			skb = NULL;
2811 		} else if (flags & MSG_PEEK) {
2812 			skb = skb_peek_next(skb, &sk->sk_receive_queue);
2813 		} else {
2814 			read_skb = skb;
2815 			skb = skb_peek_next(skb, &sk->sk_receive_queue);
2816 			__skb_unlink(read_skb, &sk->sk_receive_queue);
2817 		}
2818 
2819 		if (!skb)
2820 			goto unlock;
2821 	}
2822 
2823 	if (skb != u->oob_skb)
2824 		goto unlock;
2825 
2826 	if (copied) {
2827 		skb = NULL;
2828 	} else if (!(flags & MSG_PEEK)) {
2829 		WRITE_ONCE(u->oob_skb, NULL);
2830 
2831 		if (!sock_flag(sk, SOCK_URGINLINE)) {
2832 			__skb_unlink(skb, &sk->sk_receive_queue);
2833 			unread_skb = skb;
2834 			skb = skb_peek(&sk->sk_receive_queue);
2835 		}
2836 	} else if (!sock_flag(sk, SOCK_URGINLINE)) {
2837 		skb = skb_peek_next(skb, &sk->sk_receive_queue);
2838 	}
2839 
2840 unlock:
2841 	spin_unlock(&sk->sk_receive_queue.lock);
2842 
2843 	consume_skb(read_skb);
2844 	kfree_skb_reason(unread_skb, SKB_DROP_REASON_UNIX_SKIP_OOB);
2845 
2846 	return skb;
2847 }
2848 #endif
2849 
2850 static int unix_stream_read_skb(struct sock *sk, skb_read_actor_t recv_actor)
2851 {
2852 	struct sk_buff_head *queue = &sk->sk_receive_queue;
2853 	struct unix_sock *u = unix_sk(sk);
2854 	struct sk_buff *skb;
2855 	int err;
2856 
2857 	if (unlikely(READ_ONCE(sk->sk_state) != TCP_ESTABLISHED))
2858 		return -ENOTCONN;
2859 
2860 	err = sock_error(sk);
2861 	if (err)
2862 		return err;
2863 
2864 	mutex_lock(&u->iolock);
2865 	spin_lock(&queue->lock);
2866 
2867 	skb = __skb_dequeue(queue);
2868 	if (!skb) {
2869 		spin_unlock(&queue->lock);
2870 		mutex_unlock(&u->iolock);
2871 		return -EAGAIN;
2872 	}
2873 
2874 	WRITE_ONCE(u->inq_len, u->inq_len - skb->len);
2875 
2876 #if IS_ENABLED(CONFIG_AF_UNIX_OOB)
2877 	if (skb == u->oob_skb) {
2878 		WRITE_ONCE(u->oob_skb, NULL);
2879 		spin_unlock(&queue->lock);
2880 		mutex_unlock(&u->iolock);
2881 
2882 		kfree_skb_reason(skb, SKB_DROP_REASON_UNIX_SKIP_OOB);
2883 		return -EAGAIN;
2884 	}
2885 #endif
2886 
2887 	spin_unlock(&queue->lock);
2888 	mutex_unlock(&u->iolock);
2889 
2890 	return recv_actor(sk, skb);
2891 }
2892 
2893 static int unix_stream_read_generic(struct unix_stream_read_state *state,
2894 				    bool freezable)
2895 {
2896 	int noblock = state->flags & MSG_DONTWAIT;
2897 	struct socket *sock = state->socket;
2898 	struct msghdr *msg = state->msg;
2899 	struct sock *sk = sock->sk;
2900 	size_t size = state->size;
2901 	int flags = state->flags;
2902 	bool check_creds = false;
2903 	struct scm_cookie scm;
2904 	unsigned int last_len;
2905 	struct unix_sock *u;
2906 	int copied = 0;
2907 	int err = 0;
2908 	long timeo;
2909 	int target;
2910 	int skip;
2911 
2912 	if (unlikely(READ_ONCE(sk->sk_state) != TCP_ESTABLISHED)) {
2913 		err = -EINVAL;
2914 		goto out;
2915 	}
2916 
2917 	if (unlikely(flags & MSG_OOB)) {
2918 		err = -EOPNOTSUPP;
2919 #if IS_ENABLED(CONFIG_AF_UNIX_OOB)
2920 		err = unix_stream_recv_urg(state);
2921 #endif
2922 		goto out;
2923 	}
2924 
2925 	target = sock_rcvlowat(sk, flags & MSG_WAITALL, size);
2926 	timeo = sock_rcvtimeo(sk, noblock);
2927 
2928 	memset(&scm, 0, sizeof(scm));
2929 
2930 	u = unix_sk(sk);
2931 
2932 redo:
2933 	/* Lock the socket to prevent queue disordering
2934 	 * while sleeps in memcpy_tomsg
2935 	 */
2936 	mutex_lock(&u->iolock);
2937 
2938 	skip = max(sk_peek_offset(sk, flags), 0);
2939 
2940 	do {
2941 		struct sk_buff *skb, *last;
2942 		int chunk;
2943 
2944 		unix_state_lock(sk);
2945 		if (sock_flag(sk, SOCK_DEAD)) {
2946 			err = -ECONNRESET;
2947 			goto unlock;
2948 		}
2949 		last = skb = skb_peek(&sk->sk_receive_queue);
2950 		last_len = last ? last->len : 0;
2951 
2952 again:
2953 #if IS_ENABLED(CONFIG_AF_UNIX_OOB)
2954 		if (skb) {
2955 			skb = manage_oob(skb, sk, flags, copied);
2956 			if (!skb && copied) {
2957 				unix_state_unlock(sk);
2958 				break;
2959 			}
2960 		}
2961 #endif
2962 		if (skb == NULL) {
2963 			if (copied >= target)
2964 				goto unlock;
2965 
2966 			/*
2967 			 *	POSIX 1003.1g mandates this order.
2968 			 */
2969 
2970 			err = sock_error(sk);
2971 			if (err)
2972 				goto unlock;
2973 			if (sk->sk_shutdown & RCV_SHUTDOWN)
2974 				goto unlock;
2975 
2976 			unix_state_unlock(sk);
2977 			if (!timeo) {
2978 				err = -EAGAIN;
2979 				break;
2980 			}
2981 
2982 			mutex_unlock(&u->iolock);
2983 
2984 			timeo = unix_stream_data_wait(sk, timeo, last,
2985 						      last_len, freezable);
2986 
2987 			if (signal_pending(current)) {
2988 				err = sock_intr_errno(timeo);
2989 				scm_destroy(&scm);
2990 				goto out;
2991 			}
2992 
2993 			goto redo;
2994 unlock:
2995 			unix_state_unlock(sk);
2996 			break;
2997 		}
2998 
2999 		while (skip >= unix_skb_len(skb)) {
3000 			skip -= unix_skb_len(skb);
3001 			last = skb;
3002 			last_len = skb->len;
3003 			skb = skb_peek_next(skb, &sk->sk_receive_queue);
3004 			if (!skb)
3005 				goto again;
3006 		}
3007 
3008 		unix_state_unlock(sk);
3009 
3010 		if (check_creds) {
3011 			/* Never glue messages from different writers */
3012 			if (!unix_skb_scm_eq(skb, &scm))
3013 				break;
3014 		} else if (unix_may_passcred(sk)) {
3015 			/* Copy credentials */
3016 			unix_skb_to_scm(skb, &scm);
3017 			check_creds = true;
3018 		}
3019 
3020 		/* Copy address just once */
3021 		if (msg && msg->msg_name) {
3022 			DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr, msg->msg_name);
3023 
3024 			unix_copy_addr(msg, skb->sk);
3025 			BPF_CGROUP_RUN_PROG_UNIX_RECVMSG_LOCK(sk, msg->msg_name,
3026 							      &msg->msg_namelen);
3027 
3028 			sunaddr = NULL;
3029 		}
3030 
3031 		chunk = min_t(unsigned int, unix_skb_len(skb) - skip, size);
3032 		chunk = state->recv_actor(skb, skip, chunk, state);
3033 		if (chunk < 0) {
3034 			if (copied == 0)
3035 				copied = -EFAULT;
3036 			break;
3037 		}
3038 		copied += chunk;
3039 		size -= chunk;
3040 
3041 		/* Mark read part of skb as used */
3042 		if (!(flags & MSG_PEEK)) {
3043 			UNIXCB(skb).consumed += chunk;
3044 
3045 			sk_peek_offset_bwd(sk, chunk);
3046 
3047 			if (UNIXCB(skb).fp) {
3048 				scm_stat_del(sk, skb);
3049 				unix_detach_fds(&scm, skb);
3050 			}
3051 
3052 			if (unix_skb_len(skb))
3053 				break;
3054 
3055 			spin_lock(&sk->sk_receive_queue.lock);
3056 			WRITE_ONCE(u->inq_len, u->inq_len - skb->len);
3057 			__skb_unlink(skb, &sk->sk_receive_queue);
3058 			spin_unlock(&sk->sk_receive_queue.lock);
3059 
3060 			consume_skb(skb);
3061 
3062 			if (scm.fp)
3063 				break;
3064 		} else {
3065 			/* It is questionable, see note in unix_dgram_recvmsg.
3066 			 */
3067 			if (UNIXCB(skb).fp)
3068 				unix_peek_fds(&scm, skb);
3069 
3070 			sk_peek_offset_fwd(sk, chunk);
3071 
3072 			if (UNIXCB(skb).fp)
3073 				break;
3074 
3075 			skip = 0;
3076 			last = skb;
3077 			last_len = skb->len;
3078 			unix_state_lock(sk);
3079 			skb = skb_peek_next(skb, &sk->sk_receive_queue);
3080 			if (skb)
3081 				goto again;
3082 			unix_state_unlock(sk);
3083 			break;
3084 		}
3085 	} while (size);
3086 
3087 	mutex_unlock(&u->iolock);
3088 	if (msg) {
3089 		scm_recv_unix(sock, msg, &scm, flags);
3090 
3091 		if (READ_ONCE(u->recvmsg_inq) || msg->msg_get_inq) {
3092 			msg->msg_inq = READ_ONCE(u->inq_len);
3093 			put_cmsg(msg, SOL_SOCKET, SCM_INQ,
3094 				 sizeof(msg->msg_inq), &msg->msg_inq);
3095 		}
3096 	} else {
3097 		scm_destroy(&scm);
3098 	}
3099 out:
3100 	return copied ? : err;
3101 }
3102 
3103 static int unix_stream_read_actor(struct sk_buff *skb,
3104 				  int skip, int chunk,
3105 				  struct unix_stream_read_state *state)
3106 {
3107 	int ret;
3108 
3109 	ret = skb_copy_datagram_msg(skb, UNIXCB(skb).consumed + skip,
3110 				    state->msg, chunk);
3111 	return ret ?: chunk;
3112 }
3113 
3114 int __unix_stream_recvmsg(struct sock *sk, struct msghdr *msg,
3115 			  size_t size, int flags)
3116 {
3117 	struct unix_stream_read_state state = {
3118 		.recv_actor = unix_stream_read_actor,
3119 		.socket = sk->sk_socket,
3120 		.msg = msg,
3121 		.size = size,
3122 		.flags = flags
3123 	};
3124 
3125 	return unix_stream_read_generic(&state, true);
3126 }
3127 
3128 static int unix_stream_recvmsg(struct socket *sock, struct msghdr *msg,
3129 			       size_t size, int flags)
3130 {
3131 	struct unix_stream_read_state state = {
3132 		.recv_actor = unix_stream_read_actor,
3133 		.socket = sock,
3134 		.msg = msg,
3135 		.size = size,
3136 		.flags = flags
3137 	};
3138 
3139 #ifdef CONFIG_BPF_SYSCALL
3140 	struct sock *sk = sock->sk;
3141 	const struct proto *prot = READ_ONCE(sk->sk_prot);
3142 
3143 	if (prot != &unix_stream_proto)
3144 		return prot->recvmsg(sk, msg, size, flags, NULL);
3145 #endif
3146 	return unix_stream_read_generic(&state, true);
3147 }
3148 
3149 static int unix_stream_splice_actor(struct sk_buff *skb,
3150 				    int skip, int chunk,
3151 				    struct unix_stream_read_state *state)
3152 {
3153 	return skb_splice_bits(skb, state->socket->sk,
3154 			       UNIXCB(skb).consumed + skip,
3155 			       state->pipe, chunk, state->splice_flags);
3156 }
3157 
3158 static ssize_t unix_stream_splice_read(struct socket *sock,  loff_t *ppos,
3159 				       struct pipe_inode_info *pipe,
3160 				       size_t size, unsigned int flags)
3161 {
3162 	struct unix_stream_read_state state = {
3163 		.recv_actor = unix_stream_splice_actor,
3164 		.socket = sock,
3165 		.pipe = pipe,
3166 		.size = size,
3167 		.splice_flags = flags,
3168 	};
3169 
3170 	if (unlikely(*ppos))
3171 		return -ESPIPE;
3172 
3173 	if (sock->file->f_flags & O_NONBLOCK ||
3174 	    flags & SPLICE_F_NONBLOCK)
3175 		state.flags = MSG_DONTWAIT;
3176 
3177 	return unix_stream_read_generic(&state, false);
3178 }
3179 
3180 static int unix_shutdown(struct socket *sock, int mode)
3181 {
3182 	struct sock *sk = sock->sk;
3183 	struct sock *other;
3184 
3185 	if (mode < SHUT_RD || mode > SHUT_RDWR)
3186 		return -EINVAL;
3187 	/* This maps:
3188 	 * SHUT_RD   (0) -> RCV_SHUTDOWN  (1)
3189 	 * SHUT_WR   (1) -> SEND_SHUTDOWN (2)
3190 	 * SHUT_RDWR (2) -> SHUTDOWN_MASK (3)
3191 	 */
3192 	++mode;
3193 
3194 	unix_state_lock(sk);
3195 	WRITE_ONCE(sk->sk_shutdown, sk->sk_shutdown | mode);
3196 	other = unix_peer(sk);
3197 	if (other)
3198 		sock_hold(other);
3199 	unix_state_unlock(sk);
3200 	sk->sk_state_change(sk);
3201 
3202 	if (other &&
3203 		(sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET)) {
3204 
3205 		int peer_mode = 0;
3206 		const struct proto *prot = READ_ONCE(other->sk_prot);
3207 
3208 		if (prot->unhash)
3209 			prot->unhash(other);
3210 		if (mode&RCV_SHUTDOWN)
3211 			peer_mode |= SEND_SHUTDOWN;
3212 		if (mode&SEND_SHUTDOWN)
3213 			peer_mode |= RCV_SHUTDOWN;
3214 		unix_state_lock(other);
3215 		WRITE_ONCE(other->sk_shutdown, other->sk_shutdown | peer_mode);
3216 		unix_state_unlock(other);
3217 		other->sk_state_change(other);
3218 		if (peer_mode == SHUTDOWN_MASK)
3219 			sk_wake_async(other, SOCK_WAKE_WAITD, POLL_HUP);
3220 		else if (peer_mode & RCV_SHUTDOWN)
3221 			sk_wake_async(other, SOCK_WAKE_WAITD, POLL_IN);
3222 	}
3223 	if (other)
3224 		sock_put(other);
3225 
3226 	return 0;
3227 }
3228 
3229 long unix_inq_len(struct sock *sk)
3230 {
3231 	struct sk_buff *skb;
3232 	long amount = 0;
3233 
3234 	if (READ_ONCE(sk->sk_state) == TCP_LISTEN)
3235 		return -EINVAL;
3236 
3237 	if (sk->sk_type == SOCK_STREAM)
3238 		return READ_ONCE(unix_sk(sk)->inq_len);
3239 
3240 	spin_lock(&sk->sk_receive_queue.lock);
3241 	if (sk->sk_type == SOCK_SEQPACKET) {
3242 		skb_queue_walk(&sk->sk_receive_queue, skb)
3243 			amount += unix_skb_len(skb);
3244 	} else {
3245 		skb = skb_peek(&sk->sk_receive_queue);
3246 		if (skb)
3247 			amount = skb->len;
3248 	}
3249 	spin_unlock(&sk->sk_receive_queue.lock);
3250 
3251 	return amount;
3252 }
3253 EXPORT_SYMBOL_GPL(unix_inq_len);
3254 
3255 long unix_outq_len(struct sock *sk)
3256 {
3257 	return sk_wmem_alloc_get(sk);
3258 }
3259 EXPORT_SYMBOL_GPL(unix_outq_len);
3260 
3261 static int unix_open_file(struct sock *sk)
3262 {
3263 	if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
3264 		return -EPERM;
3265 
3266 	if (!smp_load_acquire(&unix_sk(sk)->addr))
3267 		return -ENOENT;
3268 
3269 	if (!unix_sk(sk)->path.dentry)
3270 		return -ENOENT;
3271 
3272 	return FD_ADD(O_CLOEXEC, dentry_open(&unix_sk(sk)->path, O_PATH, current_cred()));
3273 }
3274 
3275 static int unix_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
3276 {
3277 	struct sock *sk = sock->sk;
3278 	long amount = 0;
3279 	int err;
3280 
3281 	switch (cmd) {
3282 	case SIOCOUTQ:
3283 		amount = unix_outq_len(sk);
3284 		err = put_user(amount, (int __user *)arg);
3285 		break;
3286 	case SIOCINQ:
3287 		amount = unix_inq_len(sk);
3288 		if (amount < 0)
3289 			err = amount;
3290 		else
3291 			err = put_user(amount, (int __user *)arg);
3292 		break;
3293 	case SIOCUNIXFILE:
3294 		err = unix_open_file(sk);
3295 		break;
3296 #if IS_ENABLED(CONFIG_AF_UNIX_OOB)
3297 	case SIOCATMARK:
3298 		{
3299 			struct unix_sock *u = unix_sk(sk);
3300 			struct sk_buff *skb;
3301 			int answ = 0;
3302 
3303 			mutex_lock(&u->iolock);
3304 
3305 			skb = skb_peek(&sk->sk_receive_queue);
3306 			if (skb) {
3307 				struct sk_buff *oob_skb = READ_ONCE(u->oob_skb);
3308 				struct sk_buff *next_skb;
3309 
3310 				next_skb = skb_peek_next(skb, &sk->sk_receive_queue);
3311 
3312 				if (skb == oob_skb ||
3313 				    (!unix_skb_len(skb) &&
3314 				     (!oob_skb || next_skb == oob_skb)))
3315 					answ = 1;
3316 			}
3317 
3318 			mutex_unlock(&u->iolock);
3319 
3320 			err = put_user(answ, (int __user *)arg);
3321 		}
3322 		break;
3323 #endif
3324 	default:
3325 		err = -ENOIOCTLCMD;
3326 		break;
3327 	}
3328 	return err;
3329 }
3330 
3331 #ifdef CONFIG_COMPAT
3332 static int unix_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
3333 {
3334 	return unix_ioctl(sock, cmd, (unsigned long)compat_ptr(arg));
3335 }
3336 #endif
3337 
3338 static __poll_t unix_poll(struct file *file, struct socket *sock, poll_table *wait)
3339 {
3340 	struct sock *sk = sock->sk;
3341 	unsigned char state;
3342 	__poll_t mask;
3343 	u8 shutdown;
3344 
3345 	sock_poll_wait(file, sock, wait);
3346 	mask = 0;
3347 	shutdown = READ_ONCE(sk->sk_shutdown);
3348 	state = READ_ONCE(sk->sk_state);
3349 
3350 	/* exceptional events? */
3351 	if (READ_ONCE(sk->sk_err))
3352 		mask |= EPOLLERR;
3353 	if (shutdown == SHUTDOWN_MASK)
3354 		mask |= EPOLLHUP;
3355 	if (shutdown & RCV_SHUTDOWN)
3356 		mask |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM;
3357 
3358 	/* readable? */
3359 	if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
3360 		mask |= EPOLLIN | EPOLLRDNORM;
3361 	if (sk_is_readable(sk))
3362 		mask |= EPOLLIN | EPOLLRDNORM;
3363 #if IS_ENABLED(CONFIG_AF_UNIX_OOB)
3364 	if (READ_ONCE(unix_sk(sk)->oob_skb))
3365 		mask |= EPOLLPRI;
3366 #endif
3367 
3368 	/* Connection-based need to check for termination and startup */
3369 	if ((sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) &&
3370 	    state == TCP_CLOSE)
3371 		mask |= EPOLLHUP;
3372 
3373 	/*
3374 	 * we set writable also when the other side has shut down the
3375 	 * connection. This prevents stuck sockets.
3376 	 */
3377 	if (unix_writable(sk, state))
3378 		mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND;
3379 
3380 	return mask;
3381 }
3382 
3383 static __poll_t unix_dgram_poll(struct file *file, struct socket *sock,
3384 				    poll_table *wait)
3385 {
3386 	struct sock *sk = sock->sk, *other;
3387 	unsigned int writable;
3388 	unsigned char state;
3389 	__poll_t mask;
3390 	u8 shutdown;
3391 
3392 	sock_poll_wait(file, sock, wait);
3393 	mask = 0;
3394 	shutdown = READ_ONCE(sk->sk_shutdown);
3395 	state = READ_ONCE(sk->sk_state);
3396 
3397 	/* exceptional events? */
3398 	if (READ_ONCE(sk->sk_err) ||
3399 	    !skb_queue_empty_lockless(&sk->sk_error_queue))
3400 		mask |= EPOLLERR |
3401 			(sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? EPOLLPRI : 0);
3402 
3403 	if (shutdown & RCV_SHUTDOWN)
3404 		mask |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM;
3405 	if (shutdown == SHUTDOWN_MASK)
3406 		mask |= EPOLLHUP;
3407 
3408 	/* readable? */
3409 	if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
3410 		mask |= EPOLLIN | EPOLLRDNORM;
3411 	if (sk_is_readable(sk))
3412 		mask |= EPOLLIN | EPOLLRDNORM;
3413 
3414 	/* Connection-based need to check for termination and startup */
3415 	if (sk->sk_type == SOCK_SEQPACKET && state == TCP_CLOSE)
3416 		mask |= EPOLLHUP;
3417 
3418 	/* No write status requested, avoid expensive OUT tests. */
3419 	if (!(poll_requested_events(wait) & (EPOLLWRBAND|EPOLLWRNORM|EPOLLOUT)))
3420 		return mask;
3421 
3422 	writable = unix_writable(sk, state);
3423 	if (writable) {
3424 		unix_state_lock(sk);
3425 
3426 		other = unix_peer(sk);
3427 		if (other && unix_peer(other) != sk &&
3428 		    unix_recvq_full_lockless(other) &&
3429 		    unix_dgram_peer_wake_me(sk, other))
3430 			writable = 0;
3431 
3432 		unix_state_unlock(sk);
3433 	}
3434 
3435 	if (writable)
3436 		mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND;
3437 	else
3438 		sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
3439 
3440 	return mask;
3441 }
3442 
3443 #ifdef CONFIG_PROC_FS
3444 
3445 #define BUCKET_SPACE (BITS_PER_LONG - (UNIX_HASH_BITS + 1) - 1)
3446 
3447 #define get_bucket(x) ((x) >> BUCKET_SPACE)
3448 #define get_offset(x) ((x) & ((1UL << BUCKET_SPACE) - 1))
3449 #define set_bucket_offset(b, o) ((b) << BUCKET_SPACE | (o))
3450 
3451 static struct sock *unix_from_bucket(struct seq_file *seq, loff_t *pos)
3452 {
3453 	unsigned long offset = get_offset(*pos);
3454 	unsigned long bucket = get_bucket(*pos);
3455 	unsigned long count = 0;
3456 	struct sock *sk;
3457 
3458 	for (sk = sk_head(&seq_file_net(seq)->unx.table.buckets[bucket]);
3459 	     sk; sk = sk_next(sk)) {
3460 		if (++count == offset)
3461 			break;
3462 	}
3463 
3464 	return sk;
3465 }
3466 
3467 static struct sock *unix_get_first(struct seq_file *seq, loff_t *pos)
3468 {
3469 	unsigned long bucket = get_bucket(*pos);
3470 	struct net *net = seq_file_net(seq);
3471 	struct sock *sk;
3472 
3473 	while (bucket < UNIX_HASH_SIZE) {
3474 		spin_lock(&net->unx.table.locks[bucket]);
3475 
3476 		sk = unix_from_bucket(seq, pos);
3477 		if (sk)
3478 			return sk;
3479 
3480 		spin_unlock(&net->unx.table.locks[bucket]);
3481 
3482 		*pos = set_bucket_offset(++bucket, 1);
3483 	}
3484 
3485 	return NULL;
3486 }
3487 
3488 static struct sock *unix_get_next(struct seq_file *seq, struct sock *sk,
3489 				  loff_t *pos)
3490 {
3491 	unsigned long bucket = get_bucket(*pos);
3492 
3493 	sk = sk_next(sk);
3494 	if (sk)
3495 		return sk;
3496 
3497 
3498 	spin_unlock(&seq_file_net(seq)->unx.table.locks[bucket]);
3499 
3500 	*pos = set_bucket_offset(++bucket, 1);
3501 
3502 	return unix_get_first(seq, pos);
3503 }
3504 
3505 static void *unix_seq_start(struct seq_file *seq, loff_t *pos)
3506 {
3507 	if (!*pos)
3508 		return SEQ_START_TOKEN;
3509 
3510 	return unix_get_first(seq, pos);
3511 }
3512 
3513 static void *unix_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3514 {
3515 	++*pos;
3516 
3517 	if (v == SEQ_START_TOKEN)
3518 		return unix_get_first(seq, pos);
3519 
3520 	return unix_get_next(seq, v, pos);
3521 }
3522 
3523 static void unix_seq_stop(struct seq_file *seq, void *v)
3524 {
3525 	struct sock *sk = v;
3526 
3527 	if (sk)
3528 		spin_unlock(&seq_file_net(seq)->unx.table.locks[sk->sk_hash]);
3529 }
3530 
3531 static int unix_seq_show(struct seq_file *seq, void *v)
3532 {
3533 
3534 	if (v == SEQ_START_TOKEN)
3535 		seq_puts(seq, "Num       RefCount Protocol Flags    Type St "
3536 			 "Inode Path\n");
3537 	else {
3538 		struct sock *s = v;
3539 		struct unix_sock *u = unix_sk(s);
3540 		unix_state_lock(s);
3541 
3542 		seq_printf(seq, "%pK: %08X %08X %08X %04X %02X %5lu",
3543 			s,
3544 			refcount_read(&s->sk_refcnt),
3545 			0,
3546 			s->sk_state == TCP_LISTEN ? __SO_ACCEPTCON : 0,
3547 			s->sk_type,
3548 			s->sk_socket ?
3549 			(s->sk_state == TCP_ESTABLISHED ? SS_CONNECTED : SS_UNCONNECTED) :
3550 			(s->sk_state == TCP_ESTABLISHED ? SS_CONNECTING : SS_DISCONNECTING),
3551 			sock_i_ino(s));
3552 
3553 		if (u->addr) {	// under a hash table lock here
3554 			int i, len;
3555 			seq_putc(seq, ' ');
3556 
3557 			i = 0;
3558 			len = u->addr->len -
3559 				offsetof(struct sockaddr_un, sun_path);
3560 			if (u->addr->name->sun_path[0]) {
3561 				len--;
3562 			} else {
3563 				seq_putc(seq, '@');
3564 				i++;
3565 			}
3566 			for ( ; i < len; i++)
3567 				seq_putc(seq, u->addr->name->sun_path[i] ?:
3568 					 '@');
3569 		}
3570 		unix_state_unlock(s);
3571 		seq_putc(seq, '\n');
3572 	}
3573 
3574 	return 0;
3575 }
3576 
3577 static const struct seq_operations unix_seq_ops = {
3578 	.start  = unix_seq_start,
3579 	.next   = unix_seq_next,
3580 	.stop   = unix_seq_stop,
3581 	.show   = unix_seq_show,
3582 };
3583 
3584 #ifdef CONFIG_BPF_SYSCALL
3585 struct bpf_unix_iter_state {
3586 	struct seq_net_private p;
3587 	unsigned int cur_sk;
3588 	unsigned int end_sk;
3589 	unsigned int max_sk;
3590 	struct sock **batch;
3591 	bool st_bucket_done;
3592 };
3593 
3594 struct bpf_iter__unix {
3595 	__bpf_md_ptr(struct bpf_iter_meta *, meta);
3596 	__bpf_md_ptr(struct unix_sock *, unix_sk);
3597 	uid_t uid __aligned(8);
3598 };
3599 
3600 static int unix_prog_seq_show(struct bpf_prog *prog, struct bpf_iter_meta *meta,
3601 			      struct unix_sock *unix_sk, uid_t uid)
3602 {
3603 	struct bpf_iter__unix ctx;
3604 
3605 	meta->seq_num--;  /* skip SEQ_START_TOKEN */
3606 	ctx.meta = meta;
3607 	ctx.unix_sk = unix_sk;
3608 	ctx.uid = uid;
3609 	return bpf_iter_run_prog(prog, &ctx);
3610 }
3611 
3612 static int bpf_iter_unix_hold_batch(struct seq_file *seq, struct sock *start_sk)
3613 
3614 {
3615 	struct bpf_unix_iter_state *iter = seq->private;
3616 	unsigned int expected = 1;
3617 	struct sock *sk;
3618 
3619 	sock_hold(start_sk);
3620 	iter->batch[iter->end_sk++] = start_sk;
3621 
3622 	for (sk = sk_next(start_sk); sk; sk = sk_next(sk)) {
3623 		if (iter->end_sk < iter->max_sk) {
3624 			sock_hold(sk);
3625 			iter->batch[iter->end_sk++] = sk;
3626 		}
3627 
3628 		expected++;
3629 	}
3630 
3631 	spin_unlock(&seq_file_net(seq)->unx.table.locks[start_sk->sk_hash]);
3632 
3633 	return expected;
3634 }
3635 
3636 static void bpf_iter_unix_put_batch(struct bpf_unix_iter_state *iter)
3637 {
3638 	while (iter->cur_sk < iter->end_sk)
3639 		sock_put(iter->batch[iter->cur_sk++]);
3640 }
3641 
3642 static int bpf_iter_unix_realloc_batch(struct bpf_unix_iter_state *iter,
3643 				       unsigned int new_batch_sz)
3644 {
3645 	struct sock **new_batch;
3646 
3647 	new_batch = kvmalloc(sizeof(*new_batch) * new_batch_sz,
3648 			     GFP_USER | __GFP_NOWARN);
3649 	if (!new_batch)
3650 		return -ENOMEM;
3651 
3652 	bpf_iter_unix_put_batch(iter);
3653 	kvfree(iter->batch);
3654 	iter->batch = new_batch;
3655 	iter->max_sk = new_batch_sz;
3656 
3657 	return 0;
3658 }
3659 
3660 static struct sock *bpf_iter_unix_batch(struct seq_file *seq,
3661 					loff_t *pos)
3662 {
3663 	struct bpf_unix_iter_state *iter = seq->private;
3664 	unsigned int expected;
3665 	bool resized = false;
3666 	struct sock *sk;
3667 
3668 	if (iter->st_bucket_done)
3669 		*pos = set_bucket_offset(get_bucket(*pos) + 1, 1);
3670 
3671 again:
3672 	/* Get a new batch */
3673 	iter->cur_sk = 0;
3674 	iter->end_sk = 0;
3675 
3676 	sk = unix_get_first(seq, pos);
3677 	if (!sk)
3678 		return NULL; /* Done */
3679 
3680 	expected = bpf_iter_unix_hold_batch(seq, sk);
3681 
3682 	if (iter->end_sk == expected) {
3683 		iter->st_bucket_done = true;
3684 		return sk;
3685 	}
3686 
3687 	if (!resized && !bpf_iter_unix_realloc_batch(iter, expected * 3 / 2)) {
3688 		resized = true;
3689 		goto again;
3690 	}
3691 
3692 	return sk;
3693 }
3694 
3695 static void *bpf_iter_unix_seq_start(struct seq_file *seq, loff_t *pos)
3696 {
3697 	if (!*pos)
3698 		return SEQ_START_TOKEN;
3699 
3700 	/* bpf iter does not support lseek, so it always
3701 	 * continue from where it was stop()-ped.
3702 	 */
3703 	return bpf_iter_unix_batch(seq, pos);
3704 }
3705 
3706 static void *bpf_iter_unix_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3707 {
3708 	struct bpf_unix_iter_state *iter = seq->private;
3709 	struct sock *sk;
3710 
3711 	/* Whenever seq_next() is called, the iter->cur_sk is
3712 	 * done with seq_show(), so advance to the next sk in
3713 	 * the batch.
3714 	 */
3715 	if (iter->cur_sk < iter->end_sk)
3716 		sock_put(iter->batch[iter->cur_sk++]);
3717 
3718 	++*pos;
3719 
3720 	if (iter->cur_sk < iter->end_sk)
3721 		sk = iter->batch[iter->cur_sk];
3722 	else
3723 		sk = bpf_iter_unix_batch(seq, pos);
3724 
3725 	return sk;
3726 }
3727 
3728 static int bpf_iter_unix_seq_show(struct seq_file *seq, void *v)
3729 {
3730 	struct bpf_iter_meta meta;
3731 	struct bpf_prog *prog;
3732 	struct sock *sk = v;
3733 	uid_t uid;
3734 	bool slow;
3735 	int ret;
3736 
3737 	if (v == SEQ_START_TOKEN)
3738 		return 0;
3739 
3740 	slow = lock_sock_fast(sk);
3741 
3742 	if (unlikely(sk_unhashed(sk))) {
3743 		ret = SEQ_SKIP;
3744 		goto unlock;
3745 	}
3746 
3747 	uid = from_kuid_munged(seq_user_ns(seq), sk_uid(sk));
3748 	meta.seq = seq;
3749 	prog = bpf_iter_get_info(&meta, false);
3750 	ret = unix_prog_seq_show(prog, &meta, v, uid);
3751 unlock:
3752 	unlock_sock_fast(sk, slow);
3753 	return ret;
3754 }
3755 
3756 static void bpf_iter_unix_seq_stop(struct seq_file *seq, void *v)
3757 {
3758 	struct bpf_unix_iter_state *iter = seq->private;
3759 	struct bpf_iter_meta meta;
3760 	struct bpf_prog *prog;
3761 
3762 	if (!v) {
3763 		meta.seq = seq;
3764 		prog = bpf_iter_get_info(&meta, true);
3765 		if (prog)
3766 			(void)unix_prog_seq_show(prog, &meta, v, 0);
3767 	}
3768 
3769 	if (iter->cur_sk < iter->end_sk)
3770 		bpf_iter_unix_put_batch(iter);
3771 }
3772 
3773 static const struct seq_operations bpf_iter_unix_seq_ops = {
3774 	.start	= bpf_iter_unix_seq_start,
3775 	.next	= bpf_iter_unix_seq_next,
3776 	.stop	= bpf_iter_unix_seq_stop,
3777 	.show	= bpf_iter_unix_seq_show,
3778 };
3779 #endif
3780 #endif
3781 
3782 static const struct net_proto_family unix_family_ops = {
3783 	.family = PF_UNIX,
3784 	.create = unix_create,
3785 	.owner	= THIS_MODULE,
3786 };
3787 
3788 
3789 static int __net_init unix_net_init(struct net *net)
3790 {
3791 	int i;
3792 
3793 	net->unx.sysctl_max_dgram_qlen = 10;
3794 	if (unix_sysctl_register(net))
3795 		goto out;
3796 
3797 #ifdef CONFIG_PROC_FS
3798 	if (!proc_create_net("unix", 0, net->proc_net, &unix_seq_ops,
3799 			     sizeof(struct seq_net_private)))
3800 		goto err_sysctl;
3801 #endif
3802 
3803 	net->unx.table.locks = kvmalloc_array(UNIX_HASH_SIZE,
3804 					      sizeof(spinlock_t), GFP_KERNEL);
3805 	if (!net->unx.table.locks)
3806 		goto err_proc;
3807 
3808 	net->unx.table.buckets = kvmalloc_array(UNIX_HASH_SIZE,
3809 						sizeof(struct hlist_head),
3810 						GFP_KERNEL);
3811 	if (!net->unx.table.buckets)
3812 		goto free_locks;
3813 
3814 	for (i = 0; i < UNIX_HASH_SIZE; i++) {
3815 		spin_lock_init(&net->unx.table.locks[i]);
3816 		lock_set_cmp_fn(&net->unx.table.locks[i], unix_table_lock_cmp_fn, NULL);
3817 		INIT_HLIST_HEAD(&net->unx.table.buckets[i]);
3818 	}
3819 
3820 	return 0;
3821 
3822 free_locks:
3823 	kvfree(net->unx.table.locks);
3824 err_proc:
3825 #ifdef CONFIG_PROC_FS
3826 	remove_proc_entry("unix", net->proc_net);
3827 err_sysctl:
3828 #endif
3829 	unix_sysctl_unregister(net);
3830 out:
3831 	return -ENOMEM;
3832 }
3833 
3834 static void __net_exit unix_net_exit(struct net *net)
3835 {
3836 	kvfree(net->unx.table.buckets);
3837 	kvfree(net->unx.table.locks);
3838 	unix_sysctl_unregister(net);
3839 	remove_proc_entry("unix", net->proc_net);
3840 }
3841 
3842 static struct pernet_operations unix_net_ops = {
3843 	.init = unix_net_init,
3844 	.exit = unix_net_exit,
3845 };
3846 
3847 #if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS)
3848 DEFINE_BPF_ITER_FUNC(unix, struct bpf_iter_meta *meta,
3849 		     struct unix_sock *unix_sk, uid_t uid)
3850 
3851 #define INIT_BATCH_SZ 16
3852 
3853 static int bpf_iter_init_unix(void *priv_data, struct bpf_iter_aux_info *aux)
3854 {
3855 	struct bpf_unix_iter_state *iter = priv_data;
3856 	int err;
3857 
3858 	err = bpf_iter_init_seq_net(priv_data, aux);
3859 	if (err)
3860 		return err;
3861 
3862 	err = bpf_iter_unix_realloc_batch(iter, INIT_BATCH_SZ);
3863 	if (err) {
3864 		bpf_iter_fini_seq_net(priv_data);
3865 		return err;
3866 	}
3867 
3868 	return 0;
3869 }
3870 
3871 static void bpf_iter_fini_unix(void *priv_data)
3872 {
3873 	struct bpf_unix_iter_state *iter = priv_data;
3874 
3875 	bpf_iter_fini_seq_net(priv_data);
3876 	kvfree(iter->batch);
3877 }
3878 
3879 static const struct bpf_iter_seq_info unix_seq_info = {
3880 	.seq_ops		= &bpf_iter_unix_seq_ops,
3881 	.init_seq_private	= bpf_iter_init_unix,
3882 	.fini_seq_private	= bpf_iter_fini_unix,
3883 	.seq_priv_size		= sizeof(struct bpf_unix_iter_state),
3884 };
3885 
3886 static const struct bpf_func_proto *
3887 bpf_iter_unix_get_func_proto(enum bpf_func_id func_id,
3888 			     const struct bpf_prog *prog)
3889 {
3890 	switch (func_id) {
3891 	case BPF_FUNC_setsockopt:
3892 		return &bpf_sk_setsockopt_proto;
3893 	case BPF_FUNC_getsockopt:
3894 		return &bpf_sk_getsockopt_proto;
3895 	default:
3896 		return NULL;
3897 	}
3898 }
3899 
3900 static struct bpf_iter_reg unix_reg_info = {
3901 	.target			= "unix",
3902 	.ctx_arg_info_size	= 1,
3903 	.ctx_arg_info		= {
3904 		{ offsetof(struct bpf_iter__unix, unix_sk),
3905 		  PTR_TO_BTF_ID_OR_NULL },
3906 	},
3907 	.get_func_proto         = bpf_iter_unix_get_func_proto,
3908 	.seq_info		= &unix_seq_info,
3909 };
3910 
3911 static void __init bpf_iter_register(void)
3912 {
3913 	unix_reg_info.ctx_arg_info[0].btf_id = btf_sock_ids[BTF_SOCK_TYPE_UNIX];
3914 	if (bpf_iter_reg_target(&unix_reg_info))
3915 		pr_warn("Warning: could not register bpf iterator unix\n");
3916 }
3917 #endif
3918 
3919 static int __init af_unix_init(void)
3920 {
3921 	int i, rc = -1;
3922 
3923 	BUILD_BUG_ON(sizeof(struct unix_skb_parms) > sizeof_field(struct sk_buff, cb));
3924 
3925 	for (i = 0; i < UNIX_HASH_SIZE / 2; i++) {
3926 		spin_lock_init(&bsd_socket_locks[i]);
3927 		INIT_HLIST_HEAD(&bsd_socket_buckets[i]);
3928 	}
3929 
3930 	rc = proto_register(&unix_dgram_proto, 1);
3931 	if (rc != 0) {
3932 		pr_crit("%s: Cannot create unix_sock SLAB cache!\n", __func__);
3933 		goto out;
3934 	}
3935 
3936 	rc = proto_register(&unix_stream_proto, 1);
3937 	if (rc != 0) {
3938 		pr_crit("%s: Cannot create unix_sock SLAB cache!\n", __func__);
3939 		proto_unregister(&unix_dgram_proto);
3940 		goto out;
3941 	}
3942 
3943 	sock_register(&unix_family_ops);
3944 	register_pernet_subsys(&unix_net_ops);
3945 	unix_bpf_build_proto();
3946 
3947 #if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS)
3948 	bpf_iter_register();
3949 #endif
3950 
3951 out:
3952 	return rc;
3953 }
3954 
3955 /* Later than subsys_initcall() because we depend on stuff initialised there */
3956 fs_initcall(af_unix_init);
3957