xref: /linux/net/unix/af_unix.c (revision 6884028cd7f275f8bcb854a347265cb1fb0e4bea)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * NET4:	Implementation of BSD Unix domain sockets.
4  *
5  * Authors:	Alan Cox, <alan@lxorguk.ukuu.org.uk>
6  *
7  * Fixes:
8  *		Linus Torvalds	:	Assorted bug cures.
9  *		Niibe Yutaka	:	async I/O support.
10  *		Carsten Paeth	:	PF_UNIX check, address fixes.
11  *		Alan Cox	:	Limit size of allocated blocks.
12  *		Alan Cox	:	Fixed the stupid socketpair bug.
13  *		Alan Cox	:	BSD compatibility fine tuning.
14  *		Alan Cox	:	Fixed a bug in connect when interrupted.
15  *		Alan Cox	:	Sorted out a proper draft version of
16  *					file descriptor passing hacked up from
17  *					Mike Shaver's work.
18  *		Marty Leisner	:	Fixes to fd passing
19  *		Nick Nevin	:	recvmsg bugfix.
20  *		Alan Cox	:	Started proper garbage collector
21  *		Heiko EiBfeldt	:	Missing verify_area check
22  *		Alan Cox	:	Started POSIXisms
23  *		Andreas Schwab	:	Replace inode by dentry for proper
24  *					reference counting
25  *		Kirk Petersen	:	Made this a module
26  *	    Christoph Rohland	:	Elegant non-blocking accept/connect algorithm.
27  *					Lots of bug fixes.
28  *	     Alexey Kuznetosv	:	Repaired (I hope) bugs introduces
29  *					by above two patches.
30  *	     Andrea Arcangeli	:	If possible we block in connect(2)
31  *					if the max backlog of the listen socket
32  *					is been reached. This won't break
33  *					old apps and it will avoid huge amount
34  *					of socks hashed (this for unix_gc()
35  *					performances reasons).
36  *					Security fix that limits the max
37  *					number of socks to 2*max_files and
38  *					the number of skb queueable in the
39  *					dgram receiver.
40  *		Artur Skawina   :	Hash function optimizations
41  *	     Alexey Kuznetsov   :	Full scale SMP. Lot of bugs are introduced 8)
42  *	      Malcolm Beattie   :	Set peercred for socketpair
43  *	     Michal Ostrowski   :       Module initialization cleanup.
44  *	     Arnaldo C. Melo	:	Remove MOD_{INC,DEC}_USE_COUNT,
45  *	     				the core infrastructure is doing that
46  *	     				for all net proto families now (2.5.69+)
47  *
48  * Known differences from reference BSD that was tested:
49  *
50  *	[TO FIX]
51  *	ECONNREFUSED is not returned from one end of a connected() socket to the
52  *		other the moment one end closes.
53  *	fstat() doesn't return st_dev=0, and give the blksize as high water mark
54  *		and a fake inode identifier (nor the BSD first socket fstat twice bug).
55  *	[NOT TO FIX]
56  *	accept() returns a path name even if the connecting socket has closed
57  *		in the meantime (BSD loses the path and gives up).
58  *	accept() returns 0 length path for an unbound connector. BSD returns 16
59  *		and a null first byte in the path (but not for gethost/peername - BSD bug ??)
60  *	socketpair(...SOCK_RAW..) doesn't panic the kernel.
61  *	BSD af_unix apparently has connect forgetting to block properly.
62  *		(need to check this with the POSIX spec in detail)
63  *
64  * Differences from 2.0.0-11-... (ANK)
65  *	Bug fixes and improvements.
66  *		- client shutdown killed server socket.
67  *		- removed all useless cli/sti pairs.
68  *
69  *	Semantic changes/extensions.
70  *		- generic control message passing.
71  *		- SCM_CREDENTIALS control message.
72  *		- "Abstract" (not FS based) socket bindings.
73  *		  Abstract names are sequences of bytes (not zero terminated)
74  *		  started by 0, so that this name space does not intersect
75  *		  with BSD names.
76  */
77 
78 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
79 
80 #include <linux/bpf-cgroup.h>
81 #include <linux/btf_ids.h>
82 #include <linux/dcache.h>
83 #include <linux/errno.h>
84 #include <linux/fcntl.h>
85 #include <linux/file.h>
86 #include <linux/filter.h>
87 #include <linux/fs.h>
88 #include <linux/fs_struct.h>
89 #include <linux/init.h>
90 #include <linux/kernel.h>
91 #include <linux/mount.h>
92 #include <linux/namei.h>
93 #include <linux/net.h>
94 #include <linux/pidfs.h>
95 #include <linux/poll.h>
96 #include <linux/proc_fs.h>
97 #include <linux/sched/signal.h>
98 #include <linux/security.h>
99 #include <linux/seq_file.h>
100 #include <linux/skbuff.h>
101 #include <linux/slab.h>
102 #include <linux/socket.h>
103 #include <linux/splice.h>
104 #include <linux/string.h>
105 #include <linux/uaccess.h>
106 #include <net/af_unix.h>
107 #include <net/net_namespace.h>
108 #include <net/scm.h>
109 #include <net/tcp_states.h>
110 #include <uapi/linux/sockios.h>
111 #include <uapi/linux/termios.h>
112 
113 #include "af_unix.h"
114 
115 static atomic_long_t unix_nr_socks;
116 static struct hlist_head bsd_socket_buckets[UNIX_HASH_SIZE / 2];
117 static spinlock_t bsd_socket_locks[UNIX_HASH_SIZE / 2];
118 
119 /* SMP locking strategy:
120  *    hash table is protected with spinlock.
121  *    each socket state is protected by separate spinlock.
122  */
123 #ifdef CONFIG_PROVE_LOCKING
124 #define cmp_ptr(l, r)	(((l) > (r)) - ((l) < (r)))
125 
126 static int unix_table_lock_cmp_fn(const struct lockdep_map *a,
127 				  const struct lockdep_map *b)
128 {
129 	return cmp_ptr(a, b);
130 }
131 
132 static int unix_state_lock_cmp_fn(const struct lockdep_map *_a,
133 				  const struct lockdep_map *_b)
134 {
135 	const struct unix_sock *a, *b;
136 
137 	a = container_of(_a, struct unix_sock, lock.dep_map);
138 	b = container_of(_b, struct unix_sock, lock.dep_map);
139 
140 	if (a->sk.sk_state == TCP_LISTEN) {
141 		/* unix_stream_connect(): Before the 2nd unix_state_lock(),
142 		 *
143 		 *   1. a is TCP_LISTEN.
144 		 *   2. b is not a.
145 		 *   3. concurrent connect(b -> a) must fail.
146 		 *
147 		 * Except for 2. & 3., the b's state can be any possible
148 		 * value due to concurrent connect() or listen().
149 		 *
150 		 * 2. is detected in debug_spin_lock_before(), and 3. cannot
151 		 * be expressed as lock_cmp_fn.
152 		 */
153 		switch (b->sk.sk_state) {
154 		case TCP_CLOSE:
155 		case TCP_ESTABLISHED:
156 		case TCP_LISTEN:
157 			return -1;
158 		default:
159 			/* Invalid case. */
160 			return 0;
161 		}
162 	}
163 
164 	/* Should never happen.  Just to be symmetric. */
165 	if (b->sk.sk_state == TCP_LISTEN) {
166 		switch (b->sk.sk_state) {
167 		case TCP_CLOSE:
168 		case TCP_ESTABLISHED:
169 			return 1;
170 		default:
171 			return 0;
172 		}
173 	}
174 
175 	/* unix_state_double_lock(): ascending address order. */
176 	return cmp_ptr(a, b);
177 }
178 
179 static int unix_recvq_lock_cmp_fn(const struct lockdep_map *_a,
180 				  const struct lockdep_map *_b)
181 {
182 	const struct sock *a, *b;
183 
184 	a = container_of(_a, struct sock, sk_receive_queue.lock.dep_map);
185 	b = container_of(_b, struct sock, sk_receive_queue.lock.dep_map);
186 
187 	/* unix_collect_skb(): listener -> embryo order. */
188 	if (a->sk_state == TCP_LISTEN && unix_sk(b)->listener == a)
189 		return -1;
190 
191 	/* Should never happen.  Just to be symmetric. */
192 	if (b->sk_state == TCP_LISTEN && unix_sk(a)->listener == b)
193 		return 1;
194 
195 	return 0;
196 }
197 #endif
198 
199 static unsigned int unix_unbound_hash(struct sock *sk)
200 {
201 	unsigned long hash = (unsigned long)sk;
202 
203 	hash ^= hash >> 16;
204 	hash ^= hash >> 8;
205 	hash ^= sk->sk_type;
206 
207 	return hash & UNIX_HASH_MOD;
208 }
209 
210 static unsigned int unix_bsd_hash(struct inode *i)
211 {
212 	return i->i_ino & UNIX_HASH_MOD;
213 }
214 
215 static unsigned int unix_abstract_hash(struct sockaddr_un *sunaddr,
216 				       int addr_len, int type)
217 {
218 	__wsum csum = csum_partial(sunaddr, addr_len, 0);
219 	unsigned int hash;
220 
221 	hash = (__force unsigned int)csum_fold(csum);
222 	hash ^= hash >> 8;
223 	hash ^= type;
224 
225 	return UNIX_HASH_MOD + 1 + (hash & UNIX_HASH_MOD);
226 }
227 
228 static void unix_table_double_lock(struct net *net,
229 				   unsigned int hash1, unsigned int hash2)
230 {
231 	if (hash1 == hash2) {
232 		spin_lock(&net->unx.table.locks[hash1]);
233 		return;
234 	}
235 
236 	if (hash1 > hash2)
237 		swap(hash1, hash2);
238 
239 	spin_lock(&net->unx.table.locks[hash1]);
240 	spin_lock(&net->unx.table.locks[hash2]);
241 }
242 
243 static void unix_table_double_unlock(struct net *net,
244 				     unsigned int hash1, unsigned int hash2)
245 {
246 	if (hash1 == hash2) {
247 		spin_unlock(&net->unx.table.locks[hash1]);
248 		return;
249 	}
250 
251 	spin_unlock(&net->unx.table.locks[hash1]);
252 	spin_unlock(&net->unx.table.locks[hash2]);
253 }
254 
255 #ifdef CONFIG_SECURITY_NETWORK
256 static void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb)
257 {
258 	UNIXCB(skb).secid = scm->secid;
259 }
260 
261 static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
262 {
263 	scm->secid = UNIXCB(skb).secid;
264 }
265 
266 static inline bool unix_secdata_eq(struct scm_cookie *scm, struct sk_buff *skb)
267 {
268 	return (scm->secid == UNIXCB(skb).secid);
269 }
270 #else
271 static inline void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb)
272 { }
273 
274 static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
275 { }
276 
277 static inline bool unix_secdata_eq(struct scm_cookie *scm, struct sk_buff *skb)
278 {
279 	return true;
280 }
281 #endif /* CONFIG_SECURITY_NETWORK */
282 
283 static inline int unix_may_send(struct sock *sk, struct sock *osk)
284 {
285 	return !unix_peer(osk) || unix_peer(osk) == sk;
286 }
287 
288 static inline int unix_recvq_full_lockless(const struct sock *sk)
289 {
290 	return skb_queue_len_lockless(&sk->sk_receive_queue) > sk->sk_max_ack_backlog;
291 }
292 
293 struct sock *unix_peer_get(struct sock *s)
294 {
295 	struct sock *peer;
296 
297 	unix_state_lock(s);
298 	peer = unix_peer(s);
299 	if (peer)
300 		sock_hold(peer);
301 	unix_state_unlock(s);
302 	return peer;
303 }
304 EXPORT_SYMBOL_GPL(unix_peer_get);
305 
306 static struct unix_address *unix_create_addr(struct sockaddr_un *sunaddr,
307 					     int addr_len)
308 {
309 	struct unix_address *addr;
310 
311 	addr = kmalloc(sizeof(*addr) + addr_len, GFP_KERNEL);
312 	if (!addr)
313 		return NULL;
314 
315 	refcount_set(&addr->refcnt, 1);
316 	addr->len = addr_len;
317 	memcpy(addr->name, sunaddr, addr_len);
318 
319 	return addr;
320 }
321 
322 static inline void unix_release_addr(struct unix_address *addr)
323 {
324 	if (refcount_dec_and_test(&addr->refcnt))
325 		kfree(addr);
326 }
327 
328 /*
329  *	Check unix socket name:
330  *		- should be not zero length.
331  *	        - if started by not zero, should be NULL terminated (FS object)
332  *		- if started by zero, it is abstract name.
333  */
334 
335 static int unix_validate_addr(struct sockaddr_un *sunaddr, int addr_len)
336 {
337 	if (addr_len <= offsetof(struct sockaddr_un, sun_path) ||
338 	    addr_len > sizeof(*sunaddr))
339 		return -EINVAL;
340 
341 	if (sunaddr->sun_family != AF_UNIX)
342 		return -EINVAL;
343 
344 	return 0;
345 }
346 
347 static int unix_mkname_bsd(struct sockaddr_un *sunaddr, int addr_len)
348 {
349 	struct sockaddr_storage *addr = (struct sockaddr_storage *)sunaddr;
350 	short offset = offsetof(struct sockaddr_storage, __data);
351 
352 	BUILD_BUG_ON(offset != offsetof(struct sockaddr_un, sun_path));
353 
354 	/* This may look like an off by one error but it is a bit more
355 	 * subtle.  108 is the longest valid AF_UNIX path for a binding.
356 	 * sun_path[108] doesn't as such exist.  However in kernel space
357 	 * we are guaranteed that it is a valid memory location in our
358 	 * kernel address buffer because syscall functions always pass
359 	 * a pointer of struct sockaddr_storage which has a bigger buffer
360 	 * than 108.  Also, we must terminate sun_path for strlen() in
361 	 * getname_kernel().
362 	 */
363 	addr->__data[addr_len - offset] = 0;
364 
365 	/* Don't pass sunaddr->sun_path to strlen().  Otherwise, 108 will
366 	 * cause panic if CONFIG_FORTIFY_SOURCE=y.  Let __fortify_strlen()
367 	 * know the actual buffer.
368 	 */
369 	return strlen(addr->__data) + offset + 1;
370 }
371 
372 static void __unix_remove_socket(struct sock *sk)
373 {
374 	sk_del_node_init(sk);
375 }
376 
377 static void __unix_insert_socket(struct net *net, struct sock *sk)
378 {
379 	DEBUG_NET_WARN_ON_ONCE(!sk_unhashed(sk));
380 	sk_add_node(sk, &net->unx.table.buckets[sk->sk_hash]);
381 }
382 
383 static void __unix_set_addr_hash(struct net *net, struct sock *sk,
384 				 struct unix_address *addr, unsigned int hash)
385 {
386 	__unix_remove_socket(sk);
387 	smp_store_release(&unix_sk(sk)->addr, addr);
388 
389 	sk->sk_hash = hash;
390 	__unix_insert_socket(net, sk);
391 }
392 
393 static void unix_remove_socket(struct net *net, struct sock *sk)
394 {
395 	spin_lock(&net->unx.table.locks[sk->sk_hash]);
396 	__unix_remove_socket(sk);
397 	spin_unlock(&net->unx.table.locks[sk->sk_hash]);
398 }
399 
400 static void unix_insert_unbound_socket(struct net *net, struct sock *sk)
401 {
402 	spin_lock(&net->unx.table.locks[sk->sk_hash]);
403 	__unix_insert_socket(net, sk);
404 	spin_unlock(&net->unx.table.locks[sk->sk_hash]);
405 }
406 
407 static void unix_insert_bsd_socket(struct sock *sk)
408 {
409 	spin_lock(&bsd_socket_locks[sk->sk_hash]);
410 	sk_add_bind_node(sk, &bsd_socket_buckets[sk->sk_hash]);
411 	spin_unlock(&bsd_socket_locks[sk->sk_hash]);
412 }
413 
414 static void unix_remove_bsd_socket(struct sock *sk)
415 {
416 	if (!hlist_unhashed(&sk->sk_bind_node)) {
417 		spin_lock(&bsd_socket_locks[sk->sk_hash]);
418 		__sk_del_bind_node(sk);
419 		spin_unlock(&bsd_socket_locks[sk->sk_hash]);
420 
421 		sk_node_init(&sk->sk_bind_node);
422 	}
423 }
424 
425 static struct sock *__unix_find_socket_byname(struct net *net,
426 					      struct sockaddr_un *sunname,
427 					      int len, unsigned int hash)
428 {
429 	struct sock *s;
430 
431 	sk_for_each(s, &net->unx.table.buckets[hash]) {
432 		struct unix_sock *u = unix_sk(s);
433 
434 		if (u->addr->len == len &&
435 		    !memcmp(u->addr->name, sunname, len))
436 			return s;
437 	}
438 	return NULL;
439 }
440 
441 static inline struct sock *unix_find_socket_byname(struct net *net,
442 						   struct sockaddr_un *sunname,
443 						   int len, unsigned int hash)
444 {
445 	struct sock *s;
446 
447 	spin_lock(&net->unx.table.locks[hash]);
448 	s = __unix_find_socket_byname(net, sunname, len, hash);
449 	if (s)
450 		sock_hold(s);
451 	spin_unlock(&net->unx.table.locks[hash]);
452 	return s;
453 }
454 
455 static struct sock *unix_find_socket_byinode(struct inode *i)
456 {
457 	unsigned int hash = unix_bsd_hash(i);
458 	struct sock *s;
459 
460 	spin_lock(&bsd_socket_locks[hash]);
461 	sk_for_each_bound(s, &bsd_socket_buckets[hash]) {
462 		struct dentry *dentry = unix_sk(s)->path.dentry;
463 
464 		if (dentry && d_backing_inode(dentry) == i) {
465 			sock_hold(s);
466 			spin_unlock(&bsd_socket_locks[hash]);
467 			return s;
468 		}
469 	}
470 	spin_unlock(&bsd_socket_locks[hash]);
471 	return NULL;
472 }
473 
474 /* Support code for asymmetrically connected dgram sockets
475  *
476  * If a datagram socket is connected to a socket not itself connected
477  * to the first socket (eg, /dev/log), clients may only enqueue more
478  * messages if the present receive queue of the server socket is not
479  * "too large". This means there's a second writeability condition
480  * poll and sendmsg need to test. The dgram recv code will do a wake
481  * up on the peer_wait wait queue of a socket upon reception of a
482  * datagram which needs to be propagated to sleeping would-be writers
483  * since these might not have sent anything so far. This can't be
484  * accomplished via poll_wait because the lifetime of the server
485  * socket might be less than that of its clients if these break their
486  * association with it or if the server socket is closed while clients
487  * are still connected to it and there's no way to inform "a polling
488  * implementation" that it should let go of a certain wait queue
489  *
490  * In order to propagate a wake up, a wait_queue_entry_t of the client
491  * socket is enqueued on the peer_wait queue of the server socket
492  * whose wake function does a wake_up on the ordinary client socket
493  * wait queue. This connection is established whenever a write (or
494  * poll for write) hit the flow control condition and broken when the
495  * association to the server socket is dissolved or after a wake up
496  * was relayed.
497  */
498 
499 static int unix_dgram_peer_wake_relay(wait_queue_entry_t *q, unsigned mode, int flags,
500 				      void *key)
501 {
502 	struct unix_sock *u;
503 	wait_queue_head_t *u_sleep;
504 
505 	u = container_of(q, struct unix_sock, peer_wake);
506 
507 	__remove_wait_queue(&unix_sk(u->peer_wake.private)->peer_wait,
508 			    q);
509 	u->peer_wake.private = NULL;
510 
511 	/* relaying can only happen while the wq still exists */
512 	u_sleep = sk_sleep(&u->sk);
513 	if (u_sleep)
514 		wake_up_interruptible_poll(u_sleep, key_to_poll(key));
515 
516 	return 0;
517 }
518 
519 static int unix_dgram_peer_wake_connect(struct sock *sk, struct sock *other)
520 {
521 	struct unix_sock *u, *u_other;
522 	int rc;
523 
524 	u = unix_sk(sk);
525 	u_other = unix_sk(other);
526 	rc = 0;
527 	spin_lock(&u_other->peer_wait.lock);
528 
529 	if (!u->peer_wake.private) {
530 		u->peer_wake.private = other;
531 		__add_wait_queue(&u_other->peer_wait, &u->peer_wake);
532 
533 		rc = 1;
534 	}
535 
536 	spin_unlock(&u_other->peer_wait.lock);
537 	return rc;
538 }
539 
540 static void unix_dgram_peer_wake_disconnect(struct sock *sk,
541 					    struct sock *other)
542 {
543 	struct unix_sock *u, *u_other;
544 
545 	u = unix_sk(sk);
546 	u_other = unix_sk(other);
547 	spin_lock(&u_other->peer_wait.lock);
548 
549 	if (u->peer_wake.private == other) {
550 		__remove_wait_queue(&u_other->peer_wait, &u->peer_wake);
551 		u->peer_wake.private = NULL;
552 	}
553 
554 	spin_unlock(&u_other->peer_wait.lock);
555 }
556 
557 static void unix_dgram_peer_wake_disconnect_wakeup(struct sock *sk,
558 						   struct sock *other)
559 {
560 	unix_dgram_peer_wake_disconnect(sk, other);
561 	wake_up_interruptible_poll(sk_sleep(sk),
562 				   EPOLLOUT |
563 				   EPOLLWRNORM |
564 				   EPOLLWRBAND);
565 }
566 
567 /* preconditions:
568  *	- unix_peer(sk) == other
569  *	- association is stable
570  */
571 static int unix_dgram_peer_wake_me(struct sock *sk, struct sock *other)
572 {
573 	int connected;
574 
575 	connected = unix_dgram_peer_wake_connect(sk, other);
576 
577 	/* If other is SOCK_DEAD, we want to make sure we signal
578 	 * POLLOUT, such that a subsequent write() can get a
579 	 * -ECONNREFUSED. Otherwise, if we haven't queued any skbs
580 	 * to other and its full, we will hang waiting for POLLOUT.
581 	 */
582 	if (unix_recvq_full_lockless(other) && !sock_flag(other, SOCK_DEAD))
583 		return 1;
584 
585 	if (connected)
586 		unix_dgram_peer_wake_disconnect(sk, other);
587 
588 	return 0;
589 }
590 
591 static int unix_writable(const struct sock *sk, unsigned char state)
592 {
593 	return state != TCP_LISTEN &&
594 		(refcount_read(&sk->sk_wmem_alloc) << 2) <= READ_ONCE(sk->sk_sndbuf);
595 }
596 
597 static void unix_write_space(struct sock *sk)
598 {
599 	struct socket_wq *wq;
600 
601 	rcu_read_lock();
602 	if (unix_writable(sk, READ_ONCE(sk->sk_state))) {
603 		wq = rcu_dereference(sk->sk_wq);
604 		if (skwq_has_sleeper(wq))
605 			wake_up_interruptible_sync_poll(&wq->wait,
606 				EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND);
607 		sk_wake_async_rcu(sk, SOCK_WAKE_SPACE, POLL_OUT);
608 	}
609 	rcu_read_unlock();
610 }
611 
612 /* When dgram socket disconnects (or changes its peer), we clear its receive
613  * queue of packets arrived from previous peer. First, it allows to do
614  * flow control based only on wmem_alloc; second, sk connected to peer
615  * may receive messages only from that peer. */
616 static void unix_dgram_disconnected(struct sock *sk, struct sock *other)
617 {
618 	if (!skb_queue_empty(&sk->sk_receive_queue)) {
619 		skb_queue_purge_reason(&sk->sk_receive_queue,
620 				       SKB_DROP_REASON_UNIX_DISCONNECT);
621 
622 		wake_up_interruptible_all(&unix_sk(sk)->peer_wait);
623 
624 		/* If one link of bidirectional dgram pipe is disconnected,
625 		 * we signal error. Messages are lost. Do not make this,
626 		 * when peer was not connected to us.
627 		 */
628 		if (!sock_flag(other, SOCK_DEAD) && unix_peer(other) == sk) {
629 			WRITE_ONCE(other->sk_err, ECONNRESET);
630 			sk_error_report(other);
631 		}
632 	}
633 }
634 
635 static void unix_sock_destructor(struct sock *sk)
636 {
637 	struct unix_sock *u = unix_sk(sk);
638 
639 	skb_queue_purge_reason(&sk->sk_receive_queue, SKB_DROP_REASON_SOCKET_CLOSE);
640 
641 	DEBUG_NET_WARN_ON_ONCE(refcount_read(&sk->sk_wmem_alloc));
642 	DEBUG_NET_WARN_ON_ONCE(!sk_unhashed(sk));
643 	DEBUG_NET_WARN_ON_ONCE(sk->sk_socket);
644 	if (!sock_flag(sk, SOCK_DEAD)) {
645 		pr_info("Attempt to release alive unix socket: %p\n", sk);
646 		return;
647 	}
648 
649 	if (u->addr)
650 		unix_release_addr(u->addr);
651 
652 	atomic_long_dec(&unix_nr_socks);
653 	sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
654 #ifdef UNIX_REFCNT_DEBUG
655 	pr_debug("UNIX %p is destroyed, %ld are still alive.\n", sk,
656 		atomic_long_read(&unix_nr_socks));
657 #endif
658 }
659 
660 static unsigned int unix_skb_len(const struct sk_buff *skb)
661 {
662 	return skb->len - UNIXCB(skb).consumed;
663 }
664 
665 static void unix_release_sock(struct sock *sk, int embrion)
666 {
667 	struct unix_sock *u = unix_sk(sk);
668 	struct sock *skpair;
669 	struct sk_buff *skb;
670 	struct path path;
671 	int state;
672 
673 	unix_remove_socket(sock_net(sk), sk);
674 	unix_remove_bsd_socket(sk);
675 
676 	/* Clear state */
677 	unix_state_lock(sk);
678 	sock_orphan(sk);
679 	WRITE_ONCE(sk->sk_shutdown, SHUTDOWN_MASK);
680 	path	     = u->path;
681 	u->path.dentry = NULL;
682 	u->path.mnt = NULL;
683 	state = sk->sk_state;
684 	WRITE_ONCE(sk->sk_state, TCP_CLOSE);
685 
686 	skpair = unix_peer(sk);
687 	unix_peer(sk) = NULL;
688 
689 	unix_state_unlock(sk);
690 
691 #if IS_ENABLED(CONFIG_AF_UNIX_OOB)
692 	u->oob_skb = NULL;
693 #endif
694 
695 	wake_up_interruptible_all(&u->peer_wait);
696 
697 	if (skpair != NULL) {
698 		if (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) {
699 			struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
700 
701 #if IS_ENABLED(CONFIG_AF_UNIX_OOB)
702 			if (skb && !unix_skb_len(skb))
703 				skb = skb_peek_next(skb, &sk->sk_receive_queue);
704 #endif
705 			unix_state_lock(skpair);
706 			/* No more writes */
707 			WRITE_ONCE(skpair->sk_shutdown, SHUTDOWN_MASK);
708 			if (skb || embrion)
709 				WRITE_ONCE(skpair->sk_err, ECONNRESET);
710 			unix_state_unlock(skpair);
711 			skpair->sk_state_change(skpair);
712 			sk_wake_async(skpair, SOCK_WAKE_WAITD, POLL_HUP);
713 		}
714 
715 		unix_dgram_peer_wake_disconnect(sk, skpair);
716 		sock_put(skpair); /* It may now die */
717 	}
718 
719 	/* Try to flush out this socket. Throw out buffers at least */
720 
721 	while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
722 		if (state == TCP_LISTEN)
723 			unix_release_sock(skb->sk, 1);
724 
725 		/* passed fds are erased in the kfree_skb hook */
726 		kfree_skb_reason(skb, SKB_DROP_REASON_SOCKET_CLOSE);
727 	}
728 
729 	if (path.dentry)
730 		path_put(&path);
731 
732 	sock_put(sk);
733 
734 	/* ---- Socket is dead now and most probably destroyed ---- */
735 
736 	unix_schedule_gc(NULL);
737 }
738 
739 struct unix_peercred {
740 	struct pid *peer_pid;
741 	const struct cred *peer_cred;
742 };
743 
744 static inline int prepare_peercred(struct unix_peercred *peercred)
745 {
746 	struct pid *pid;
747 	int err;
748 
749 	pid = task_tgid(current);
750 	err = pidfs_register_pid(pid);
751 	if (likely(!err)) {
752 		peercred->peer_pid = get_pid(pid);
753 		peercred->peer_cred = get_current_cred();
754 	}
755 	return err;
756 }
757 
758 static void drop_peercred(struct unix_peercred *peercred)
759 {
760 	const struct cred *cred = NULL;
761 	struct pid *pid = NULL;
762 
763 	might_sleep();
764 
765 	swap(peercred->peer_pid, pid);
766 	swap(peercred->peer_cred, cred);
767 
768 	put_pid(pid);
769 	put_cred(cred);
770 }
771 
772 static inline void init_peercred(struct sock *sk,
773 				 const struct unix_peercred *peercred)
774 {
775 	sk->sk_peer_pid = peercred->peer_pid;
776 	sk->sk_peer_cred = peercred->peer_cred;
777 }
778 
779 static void update_peercred(struct sock *sk, struct unix_peercred *peercred)
780 {
781 	const struct cred *old_cred;
782 	struct pid *old_pid;
783 
784 	spin_lock(&sk->sk_peer_lock);
785 	old_pid = sk->sk_peer_pid;
786 	old_cred = sk->sk_peer_cred;
787 	init_peercred(sk, peercred);
788 	spin_unlock(&sk->sk_peer_lock);
789 
790 	peercred->peer_pid = old_pid;
791 	peercred->peer_cred = old_cred;
792 }
793 
794 static void copy_peercred(struct sock *sk, struct sock *peersk)
795 {
796 	lockdep_assert_held(&unix_sk(peersk)->lock);
797 
798 	spin_lock(&sk->sk_peer_lock);
799 	sk->sk_peer_pid = get_pid(peersk->sk_peer_pid);
800 	sk->sk_peer_cred = get_cred(peersk->sk_peer_cred);
801 	spin_unlock(&sk->sk_peer_lock);
802 }
803 
804 static bool unix_may_passcred(const struct sock *sk)
805 {
806 	return sk->sk_scm_credentials || sk->sk_scm_pidfd;
807 }
808 
809 static int unix_listen(struct socket *sock, int backlog)
810 {
811 	int err;
812 	struct sock *sk = sock->sk;
813 	struct unix_sock *u = unix_sk(sk);
814 	struct unix_peercred peercred = {};
815 
816 	err = -EOPNOTSUPP;
817 	if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
818 		goto out;	/* Only stream/seqpacket sockets accept */
819 	err = -EINVAL;
820 	if (!READ_ONCE(u->addr))
821 		goto out;	/* No listens on an unbound socket */
822 	err = prepare_peercred(&peercred);
823 	if (err)
824 		goto out;
825 	unix_state_lock(sk);
826 	if (sk->sk_state != TCP_CLOSE && sk->sk_state != TCP_LISTEN)
827 		goto out_unlock;
828 	if (backlog > sk->sk_max_ack_backlog)
829 		wake_up_interruptible_all(&u->peer_wait);
830 	sk->sk_max_ack_backlog	= backlog;
831 	WRITE_ONCE(sk->sk_state, TCP_LISTEN);
832 
833 	/* set credentials so connect can copy them */
834 	update_peercred(sk, &peercred);
835 	err = 0;
836 
837 out_unlock:
838 	unix_state_unlock(sk);
839 	drop_peercred(&peercred);
840 out:
841 	return err;
842 }
843 
844 static int unix_release(struct socket *);
845 static int unix_bind(struct socket *, struct sockaddr_unsized *, int);
846 static int unix_stream_connect(struct socket *, struct sockaddr_unsized *,
847 			       int addr_len, int flags);
848 static int unix_socketpair(struct socket *, struct socket *);
849 static int unix_accept(struct socket *, struct socket *, struct proto_accept_arg *arg);
850 static int unix_getname(struct socket *, struct sockaddr *, int);
851 static __poll_t unix_poll(struct file *, struct socket *, poll_table *);
852 static __poll_t unix_dgram_poll(struct file *, struct socket *,
853 				    poll_table *);
854 static int unix_ioctl(struct socket *, unsigned int, unsigned long);
855 #ifdef CONFIG_COMPAT
856 static int unix_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
857 #endif
858 static int unix_shutdown(struct socket *, int);
859 static int unix_stream_sendmsg(struct socket *, struct msghdr *, size_t);
860 static int unix_stream_recvmsg(struct socket *, struct msghdr *, size_t, int);
861 static ssize_t unix_stream_splice_read(struct socket *,  loff_t *ppos,
862 				       struct pipe_inode_info *, size_t size,
863 				       unsigned int flags);
864 static int unix_dgram_sendmsg(struct socket *, struct msghdr *, size_t);
865 static int unix_dgram_recvmsg(struct socket *, struct msghdr *, size_t, int);
866 static int unix_read_skb(struct sock *sk, skb_read_actor_t recv_actor);
867 static int unix_stream_read_skb(struct sock *sk, skb_read_actor_t recv_actor);
868 static int unix_dgram_connect(struct socket *, struct sockaddr_unsized *,
869 			      int, int);
870 static int unix_seqpacket_sendmsg(struct socket *, struct msghdr *, size_t);
871 static int unix_seqpacket_recvmsg(struct socket *, struct msghdr *, size_t,
872 				  int);
873 
874 #ifdef CONFIG_PROC_FS
875 static int unix_count_nr_fds(struct sock *sk)
876 {
877 	struct sk_buff *skb;
878 	struct unix_sock *u;
879 	int nr_fds = 0;
880 
881 	spin_lock(&sk->sk_receive_queue.lock);
882 	skb = skb_peek(&sk->sk_receive_queue);
883 	while (skb) {
884 		u = unix_sk(skb->sk);
885 		nr_fds += atomic_read(&u->scm_stat.nr_fds);
886 		skb = skb_peek_next(skb, &sk->sk_receive_queue);
887 	}
888 	spin_unlock(&sk->sk_receive_queue.lock);
889 
890 	return nr_fds;
891 }
892 
893 static void unix_show_fdinfo(struct seq_file *m, struct socket *sock)
894 {
895 	struct sock *sk = sock->sk;
896 	unsigned char s_state;
897 	struct unix_sock *u;
898 	int nr_fds = 0;
899 
900 	if (sk) {
901 		s_state = READ_ONCE(sk->sk_state);
902 		u = unix_sk(sk);
903 
904 		/* SOCK_STREAM and SOCK_SEQPACKET sockets never change their
905 		 * sk_state after switching to TCP_ESTABLISHED or TCP_LISTEN.
906 		 * SOCK_DGRAM is ordinary. So, no lock is needed.
907 		 */
908 		if (sock->type == SOCK_DGRAM || s_state == TCP_ESTABLISHED)
909 			nr_fds = atomic_read(&u->scm_stat.nr_fds);
910 		else if (s_state == TCP_LISTEN)
911 			nr_fds = unix_count_nr_fds(sk);
912 
913 		seq_printf(m, "scm_fds: %u\n", nr_fds);
914 	}
915 }
916 #else
917 #define unix_show_fdinfo NULL
918 #endif
919 
920 static bool unix_custom_sockopt(int optname)
921 {
922 	switch (optname) {
923 	case SO_INQ:
924 		return true;
925 	default:
926 		return false;
927 	}
928 }
929 
930 static int unix_setsockopt(struct socket *sock, int level, int optname,
931 			   sockptr_t optval, unsigned int optlen)
932 {
933 	struct unix_sock *u = unix_sk(sock->sk);
934 	struct sock *sk = sock->sk;
935 	int val;
936 
937 	if (level != SOL_SOCKET)
938 		return -EOPNOTSUPP;
939 
940 	if (!unix_custom_sockopt(optname))
941 		return sock_setsockopt(sock, level, optname, optval, optlen);
942 
943 	if (optlen != sizeof(int))
944 		return -EINVAL;
945 
946 	if (copy_from_sockptr(&val, optval, sizeof(val)))
947 		return -EFAULT;
948 
949 	switch (optname) {
950 	case SO_INQ:
951 		if (sk->sk_type != SOCK_STREAM)
952 			return -EINVAL;
953 
954 		if (val > 1 || val < 0)
955 			return -EINVAL;
956 
957 		WRITE_ONCE(u->recvmsg_inq, val);
958 		break;
959 	default:
960 		return -ENOPROTOOPT;
961 	}
962 
963 	return 0;
964 }
965 
966 static const struct proto_ops unix_stream_ops = {
967 	.family =	PF_UNIX,
968 	.owner =	THIS_MODULE,
969 	.release =	unix_release,
970 	.bind =		unix_bind,
971 	.connect =	unix_stream_connect,
972 	.socketpair =	unix_socketpair,
973 	.accept =	unix_accept,
974 	.getname =	unix_getname,
975 	.poll =		unix_poll,
976 	.ioctl =	unix_ioctl,
977 #ifdef CONFIG_COMPAT
978 	.compat_ioctl =	unix_compat_ioctl,
979 #endif
980 	.listen =	unix_listen,
981 	.shutdown =	unix_shutdown,
982 	.setsockopt =	unix_setsockopt,
983 	.sendmsg =	unix_stream_sendmsg,
984 	.recvmsg =	unix_stream_recvmsg,
985 	.read_skb =	unix_stream_read_skb,
986 	.mmap =		sock_no_mmap,
987 	.splice_read =	unix_stream_splice_read,
988 	.set_peek_off =	sk_set_peek_off,
989 	.show_fdinfo =	unix_show_fdinfo,
990 };
991 
992 static const struct proto_ops unix_dgram_ops = {
993 	.family =	PF_UNIX,
994 	.owner =	THIS_MODULE,
995 	.release =	unix_release,
996 	.bind =		unix_bind,
997 	.connect =	unix_dgram_connect,
998 	.socketpair =	unix_socketpair,
999 	.accept =	sock_no_accept,
1000 	.getname =	unix_getname,
1001 	.poll =		unix_dgram_poll,
1002 	.ioctl =	unix_ioctl,
1003 #ifdef CONFIG_COMPAT
1004 	.compat_ioctl =	unix_compat_ioctl,
1005 #endif
1006 	.listen =	sock_no_listen,
1007 	.shutdown =	unix_shutdown,
1008 	.sendmsg =	unix_dgram_sendmsg,
1009 	.read_skb =	unix_read_skb,
1010 	.recvmsg =	unix_dgram_recvmsg,
1011 	.mmap =		sock_no_mmap,
1012 	.set_peek_off =	sk_set_peek_off,
1013 	.show_fdinfo =	unix_show_fdinfo,
1014 };
1015 
1016 static const struct proto_ops unix_seqpacket_ops = {
1017 	.family =	PF_UNIX,
1018 	.owner =	THIS_MODULE,
1019 	.release =	unix_release,
1020 	.bind =		unix_bind,
1021 	.connect =	unix_stream_connect,
1022 	.socketpair =	unix_socketpair,
1023 	.accept =	unix_accept,
1024 	.getname =	unix_getname,
1025 	.poll =		unix_dgram_poll,
1026 	.ioctl =	unix_ioctl,
1027 #ifdef CONFIG_COMPAT
1028 	.compat_ioctl =	unix_compat_ioctl,
1029 #endif
1030 	.listen =	unix_listen,
1031 	.shutdown =	unix_shutdown,
1032 	.sendmsg =	unix_seqpacket_sendmsg,
1033 	.recvmsg =	unix_seqpacket_recvmsg,
1034 	.mmap =		sock_no_mmap,
1035 	.set_peek_off =	sk_set_peek_off,
1036 	.show_fdinfo =	unix_show_fdinfo,
1037 };
1038 
1039 static void unix_close(struct sock *sk, long timeout)
1040 {
1041 	/* Nothing to do here, unix socket does not need a ->close().
1042 	 * This is merely for sockmap.
1043 	 */
1044 }
1045 
1046 static bool unix_bpf_bypass_getsockopt(int level, int optname)
1047 {
1048 	if (level == SOL_SOCKET) {
1049 		switch (optname) {
1050 		case SO_PEERPIDFD:
1051 			return true;
1052 		default:
1053 			return false;
1054 		}
1055 	}
1056 
1057 	return false;
1058 }
1059 
1060 struct proto unix_dgram_proto = {
1061 	.name			= "UNIX",
1062 	.owner			= THIS_MODULE,
1063 	.obj_size		= sizeof(struct unix_sock),
1064 	.close			= unix_close,
1065 	.bpf_bypass_getsockopt	= unix_bpf_bypass_getsockopt,
1066 #ifdef CONFIG_BPF_SYSCALL
1067 	.psock_update_sk_prot	= unix_dgram_bpf_update_proto,
1068 #endif
1069 };
1070 
1071 struct proto unix_stream_proto = {
1072 	.name			= "UNIX-STREAM",
1073 	.owner			= THIS_MODULE,
1074 	.obj_size		= sizeof(struct unix_sock),
1075 	.close			= unix_close,
1076 	.bpf_bypass_getsockopt	= unix_bpf_bypass_getsockopt,
1077 #ifdef CONFIG_BPF_SYSCALL
1078 	.psock_update_sk_prot	= unix_stream_bpf_update_proto,
1079 #endif
1080 };
1081 
1082 static struct sock *unix_create1(struct net *net, struct socket *sock, int kern, int type)
1083 {
1084 	struct unix_sock *u;
1085 	struct sock *sk;
1086 	int err;
1087 
1088 	atomic_long_inc(&unix_nr_socks);
1089 	if (atomic_long_read(&unix_nr_socks) > 2 * get_max_files()) {
1090 		err = -ENFILE;
1091 		goto err;
1092 	}
1093 
1094 	if (type == SOCK_STREAM)
1095 		sk = sk_alloc(net, PF_UNIX, GFP_KERNEL, &unix_stream_proto, kern);
1096 	else /*dgram and  seqpacket */
1097 		sk = sk_alloc(net, PF_UNIX, GFP_KERNEL, &unix_dgram_proto, kern);
1098 
1099 	if (!sk) {
1100 		err = -ENOMEM;
1101 		goto err;
1102 	}
1103 
1104 	sock_init_data(sock, sk);
1105 
1106 	sk->sk_scm_rights	= 1;
1107 	sk->sk_hash		= unix_unbound_hash(sk);
1108 	sk->sk_allocation	= GFP_KERNEL_ACCOUNT;
1109 	sk->sk_write_space	= unix_write_space;
1110 	sk->sk_max_ack_backlog	= READ_ONCE(net->unx.sysctl_max_dgram_qlen);
1111 	sk->sk_destruct		= unix_sock_destructor;
1112 	lock_set_cmp_fn(&sk->sk_receive_queue.lock, unix_recvq_lock_cmp_fn, NULL);
1113 
1114 	u = unix_sk(sk);
1115 	u->listener = NULL;
1116 	u->vertex = NULL;
1117 	u->path.dentry = NULL;
1118 	u->path.mnt = NULL;
1119 	spin_lock_init(&u->lock);
1120 	lock_set_cmp_fn(&u->lock, unix_state_lock_cmp_fn, NULL);
1121 	mutex_init(&u->iolock); /* single task reading lock */
1122 	mutex_init(&u->bindlock); /* single task binding lock */
1123 	init_waitqueue_head(&u->peer_wait);
1124 	init_waitqueue_func_entry(&u->peer_wake, unix_dgram_peer_wake_relay);
1125 	memset(&u->scm_stat, 0, sizeof(struct scm_stat));
1126 	unix_insert_unbound_socket(net, sk);
1127 
1128 	sock_prot_inuse_add(net, sk->sk_prot, 1);
1129 
1130 	return sk;
1131 
1132 err:
1133 	atomic_long_dec(&unix_nr_socks);
1134 	return ERR_PTR(err);
1135 }
1136 
1137 static int unix_create(struct net *net, struct socket *sock, int protocol,
1138 		       int kern)
1139 {
1140 	struct sock *sk;
1141 
1142 	if (protocol && protocol != PF_UNIX)
1143 		return -EPROTONOSUPPORT;
1144 
1145 	sock->state = SS_UNCONNECTED;
1146 
1147 	switch (sock->type) {
1148 	case SOCK_STREAM:
1149 		set_bit(SOCK_CUSTOM_SOCKOPT, &sock->flags);
1150 		sock->ops = &unix_stream_ops;
1151 		break;
1152 		/*
1153 		 *	Believe it or not BSD has AF_UNIX, SOCK_RAW though
1154 		 *	nothing uses it.
1155 		 */
1156 	case SOCK_RAW:
1157 		sock->type = SOCK_DGRAM;
1158 		fallthrough;
1159 	case SOCK_DGRAM:
1160 		sock->ops = &unix_dgram_ops;
1161 		break;
1162 	case SOCK_SEQPACKET:
1163 		sock->ops = &unix_seqpacket_ops;
1164 		break;
1165 	default:
1166 		return -ESOCKTNOSUPPORT;
1167 	}
1168 
1169 	sk = unix_create1(net, sock, kern, sock->type);
1170 	if (IS_ERR(sk))
1171 		return PTR_ERR(sk);
1172 
1173 	return 0;
1174 }
1175 
1176 static int unix_release(struct socket *sock)
1177 {
1178 	struct sock *sk = sock->sk;
1179 
1180 	if (!sk)
1181 		return 0;
1182 
1183 	sk->sk_prot->close(sk, 0);
1184 	unix_release_sock(sk, 0);
1185 	sock->sk = NULL;
1186 
1187 	return 0;
1188 }
1189 
1190 static struct sock *unix_find_bsd(struct sockaddr_un *sunaddr, int addr_len,
1191 				  int type, int flags)
1192 {
1193 	struct inode *inode;
1194 	struct path path;
1195 	struct sock *sk;
1196 	int err;
1197 
1198 	unix_mkname_bsd(sunaddr, addr_len);
1199 
1200 	if (flags & SOCK_COREDUMP) {
1201 		struct path root;
1202 
1203 		task_lock(&init_task);
1204 		get_fs_root(init_task.fs, &root);
1205 		task_unlock(&init_task);
1206 
1207 		scoped_with_kernel_creds()
1208 			err = vfs_path_lookup(root.dentry, root.mnt, sunaddr->sun_path,
1209 					      LOOKUP_BENEATH | LOOKUP_NO_SYMLINKS |
1210 					      LOOKUP_NO_MAGICLINKS, &path);
1211 		path_put(&root);
1212 		if (err)
1213 			goto fail;
1214 	} else {
1215 		err = kern_path(sunaddr->sun_path, LOOKUP_FOLLOW, &path);
1216 		if (err)
1217 			goto fail;
1218 
1219 		err = path_permission(&path, MAY_WRITE);
1220 		if (err)
1221 			goto path_put;
1222 	}
1223 
1224 	err = -ECONNREFUSED;
1225 	inode = d_backing_inode(path.dentry);
1226 	if (!S_ISSOCK(inode->i_mode))
1227 		goto path_put;
1228 
1229 	sk = unix_find_socket_byinode(inode);
1230 	if (!sk)
1231 		goto path_put;
1232 
1233 	err = -EPROTOTYPE;
1234 	if (sk->sk_type == type)
1235 		touch_atime(&path);
1236 	else
1237 		goto sock_put;
1238 
1239 	path_put(&path);
1240 
1241 	return sk;
1242 
1243 sock_put:
1244 	sock_put(sk);
1245 path_put:
1246 	path_put(&path);
1247 fail:
1248 	return ERR_PTR(err);
1249 }
1250 
1251 static struct sock *unix_find_abstract(struct net *net,
1252 				       struct sockaddr_un *sunaddr,
1253 				       int addr_len, int type)
1254 {
1255 	unsigned int hash = unix_abstract_hash(sunaddr, addr_len, type);
1256 	struct dentry *dentry;
1257 	struct sock *sk;
1258 
1259 	sk = unix_find_socket_byname(net, sunaddr, addr_len, hash);
1260 	if (!sk)
1261 		return ERR_PTR(-ECONNREFUSED);
1262 
1263 	dentry = unix_sk(sk)->path.dentry;
1264 	if (dentry)
1265 		touch_atime(&unix_sk(sk)->path);
1266 
1267 	return sk;
1268 }
1269 
1270 static struct sock *unix_find_other(struct net *net,
1271 				    struct sockaddr_un *sunaddr,
1272 				    int addr_len, int type, int flags)
1273 {
1274 	struct sock *sk;
1275 
1276 	if (sunaddr->sun_path[0])
1277 		sk = unix_find_bsd(sunaddr, addr_len, type, flags);
1278 	else
1279 		sk = unix_find_abstract(net, sunaddr, addr_len, type);
1280 
1281 	return sk;
1282 }
1283 
1284 static int unix_autobind(struct sock *sk)
1285 {
1286 	struct unix_sock *u = unix_sk(sk);
1287 	unsigned int new_hash, old_hash;
1288 	struct net *net = sock_net(sk);
1289 	struct unix_address *addr;
1290 	u32 lastnum, ordernum;
1291 	int err;
1292 
1293 	err = mutex_lock_interruptible(&u->bindlock);
1294 	if (err)
1295 		return err;
1296 
1297 	if (u->addr)
1298 		goto out;
1299 
1300 	err = -ENOMEM;
1301 	addr = kzalloc(sizeof(*addr) +
1302 		       offsetof(struct sockaddr_un, sun_path) + 16, GFP_KERNEL);
1303 	if (!addr)
1304 		goto out;
1305 
1306 	addr->len = offsetof(struct sockaddr_un, sun_path) + 6;
1307 	addr->name->sun_family = AF_UNIX;
1308 	refcount_set(&addr->refcnt, 1);
1309 
1310 	old_hash = sk->sk_hash;
1311 	ordernum = get_random_u32();
1312 	lastnum = ordernum & 0xFFFFF;
1313 retry:
1314 	ordernum = (ordernum + 1) & 0xFFFFF;
1315 	sprintf(addr->name->sun_path + 1, "%05x", ordernum);
1316 
1317 	new_hash = unix_abstract_hash(addr->name, addr->len, sk->sk_type);
1318 	unix_table_double_lock(net, old_hash, new_hash);
1319 
1320 	if (__unix_find_socket_byname(net, addr->name, addr->len, new_hash)) {
1321 		unix_table_double_unlock(net, old_hash, new_hash);
1322 
1323 		/* __unix_find_socket_byname() may take long time if many names
1324 		 * are already in use.
1325 		 */
1326 		cond_resched();
1327 
1328 		if (ordernum == lastnum) {
1329 			/* Give up if all names seems to be in use. */
1330 			err = -ENOSPC;
1331 			unix_release_addr(addr);
1332 			goto out;
1333 		}
1334 
1335 		goto retry;
1336 	}
1337 
1338 	__unix_set_addr_hash(net, sk, addr, new_hash);
1339 	unix_table_double_unlock(net, old_hash, new_hash);
1340 	err = 0;
1341 
1342 out:	mutex_unlock(&u->bindlock);
1343 	return err;
1344 }
1345 
1346 static int unix_bind_bsd(struct sock *sk, struct sockaddr_un *sunaddr,
1347 			 int addr_len)
1348 {
1349 	umode_t mode = S_IFSOCK |
1350 	       (SOCK_INODE(sk->sk_socket)->i_mode & ~current_umask());
1351 	struct unix_sock *u = unix_sk(sk);
1352 	unsigned int new_hash, old_hash;
1353 	struct net *net = sock_net(sk);
1354 	struct mnt_idmap *idmap;
1355 	struct unix_address *addr;
1356 	struct dentry *dentry;
1357 	struct path parent;
1358 	int err;
1359 
1360 	addr_len = unix_mkname_bsd(sunaddr, addr_len);
1361 	addr = unix_create_addr(sunaddr, addr_len);
1362 	if (!addr)
1363 		return -ENOMEM;
1364 
1365 	/*
1366 	 * Get the parent directory, calculate the hash for last
1367 	 * component.
1368 	 */
1369 	dentry = start_creating_path(AT_FDCWD, addr->name->sun_path, &parent, 0);
1370 	if (IS_ERR(dentry)) {
1371 		err = PTR_ERR(dentry);
1372 		goto out;
1373 	}
1374 
1375 	/*
1376 	 * All right, let's create it.
1377 	 */
1378 	idmap = mnt_idmap(parent.mnt);
1379 	err = security_path_mknod(&parent, dentry, mode, 0);
1380 	if (!err)
1381 		err = vfs_mknod(idmap, d_inode(parent.dentry), dentry, mode, 0, NULL);
1382 	if (err)
1383 		goto out_path;
1384 	err = mutex_lock_interruptible(&u->bindlock);
1385 	if (err)
1386 		goto out_unlink;
1387 	if (u->addr)
1388 		goto out_unlock;
1389 
1390 	old_hash = sk->sk_hash;
1391 	new_hash = unix_bsd_hash(d_backing_inode(dentry));
1392 	unix_table_double_lock(net, old_hash, new_hash);
1393 	u->path.mnt = mntget(parent.mnt);
1394 	u->path.dentry = dget(dentry);
1395 	__unix_set_addr_hash(net, sk, addr, new_hash);
1396 	unix_table_double_unlock(net, old_hash, new_hash);
1397 	unix_insert_bsd_socket(sk);
1398 	mutex_unlock(&u->bindlock);
1399 	end_creating_path(&parent, dentry);
1400 	return 0;
1401 
1402 out_unlock:
1403 	mutex_unlock(&u->bindlock);
1404 	err = -EINVAL;
1405 out_unlink:
1406 	/* failed after successful mknod?  unlink what we'd created... */
1407 	vfs_unlink(idmap, d_inode(parent.dentry), dentry, NULL);
1408 out_path:
1409 	end_creating_path(&parent, dentry);
1410 out:
1411 	unix_release_addr(addr);
1412 	return err == -EEXIST ? -EADDRINUSE : err;
1413 }
1414 
1415 static int unix_bind_abstract(struct sock *sk, struct sockaddr_un *sunaddr,
1416 			      int addr_len)
1417 {
1418 	struct unix_sock *u = unix_sk(sk);
1419 	unsigned int new_hash, old_hash;
1420 	struct net *net = sock_net(sk);
1421 	struct unix_address *addr;
1422 	int err;
1423 
1424 	addr = unix_create_addr(sunaddr, addr_len);
1425 	if (!addr)
1426 		return -ENOMEM;
1427 
1428 	err = mutex_lock_interruptible(&u->bindlock);
1429 	if (err)
1430 		goto out;
1431 
1432 	if (u->addr) {
1433 		err = -EINVAL;
1434 		goto out_mutex;
1435 	}
1436 
1437 	old_hash = sk->sk_hash;
1438 	new_hash = unix_abstract_hash(addr->name, addr->len, sk->sk_type);
1439 	unix_table_double_lock(net, old_hash, new_hash);
1440 
1441 	if (__unix_find_socket_byname(net, addr->name, addr->len, new_hash))
1442 		goto out_spin;
1443 
1444 	__unix_set_addr_hash(net, sk, addr, new_hash);
1445 	unix_table_double_unlock(net, old_hash, new_hash);
1446 	mutex_unlock(&u->bindlock);
1447 	return 0;
1448 
1449 out_spin:
1450 	unix_table_double_unlock(net, old_hash, new_hash);
1451 	err = -EADDRINUSE;
1452 out_mutex:
1453 	mutex_unlock(&u->bindlock);
1454 out:
1455 	unix_release_addr(addr);
1456 	return err;
1457 }
1458 
1459 static int unix_bind(struct socket *sock, struct sockaddr_unsized *uaddr, int addr_len)
1460 {
1461 	struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr;
1462 	struct sock *sk = sock->sk;
1463 	int err;
1464 
1465 	if (addr_len == offsetof(struct sockaddr_un, sun_path) &&
1466 	    sunaddr->sun_family == AF_UNIX)
1467 		return unix_autobind(sk);
1468 
1469 	err = unix_validate_addr(sunaddr, addr_len);
1470 	if (err)
1471 		return err;
1472 
1473 	if (sunaddr->sun_path[0])
1474 		err = unix_bind_bsd(sk, sunaddr, addr_len);
1475 	else
1476 		err = unix_bind_abstract(sk, sunaddr, addr_len);
1477 
1478 	return err;
1479 }
1480 
1481 static void unix_state_double_lock(struct sock *sk1, struct sock *sk2)
1482 {
1483 	if (unlikely(sk1 == sk2) || !sk2) {
1484 		unix_state_lock(sk1);
1485 		return;
1486 	}
1487 
1488 	if (sk1 > sk2)
1489 		swap(sk1, sk2);
1490 
1491 	unix_state_lock(sk1);
1492 	unix_state_lock(sk2);
1493 }
1494 
1495 static void unix_state_double_unlock(struct sock *sk1, struct sock *sk2)
1496 {
1497 	if (unlikely(sk1 == sk2) || !sk2) {
1498 		unix_state_unlock(sk1);
1499 		return;
1500 	}
1501 	unix_state_unlock(sk1);
1502 	unix_state_unlock(sk2);
1503 }
1504 
1505 static int unix_dgram_connect(struct socket *sock, struct sockaddr_unsized *addr,
1506 			      int alen, int flags)
1507 {
1508 	struct sockaddr_un *sunaddr = (struct sockaddr_un *)addr;
1509 	struct sock *sk = sock->sk;
1510 	struct sock *other;
1511 	int err;
1512 
1513 	err = -EINVAL;
1514 	if (alen < offsetofend(struct sockaddr, sa_family))
1515 		goto out;
1516 
1517 	if (addr->sa_family != AF_UNSPEC) {
1518 		err = unix_validate_addr(sunaddr, alen);
1519 		if (err)
1520 			goto out;
1521 
1522 		err = BPF_CGROUP_RUN_PROG_UNIX_CONNECT_LOCK(sk, addr, &alen);
1523 		if (err)
1524 			goto out;
1525 
1526 		if (unix_may_passcred(sk) && !READ_ONCE(unix_sk(sk)->addr)) {
1527 			err = unix_autobind(sk);
1528 			if (err)
1529 				goto out;
1530 		}
1531 
1532 restart:
1533 		other = unix_find_other(sock_net(sk), sunaddr, alen, sock->type, 0);
1534 		if (IS_ERR(other)) {
1535 			err = PTR_ERR(other);
1536 			goto out;
1537 		}
1538 
1539 		unix_state_double_lock(sk, other);
1540 
1541 		/* Apparently VFS overslept socket death. Retry. */
1542 		if (sock_flag(other, SOCK_DEAD)) {
1543 			unix_state_double_unlock(sk, other);
1544 			sock_put(other);
1545 			goto restart;
1546 		}
1547 
1548 		err = -EPERM;
1549 		if (!unix_may_send(sk, other))
1550 			goto out_unlock;
1551 
1552 		err = security_unix_may_send(sk->sk_socket, other->sk_socket);
1553 		if (err)
1554 			goto out_unlock;
1555 
1556 		WRITE_ONCE(sk->sk_state, TCP_ESTABLISHED);
1557 		WRITE_ONCE(other->sk_state, TCP_ESTABLISHED);
1558 	} else {
1559 		/*
1560 		 *	1003.1g breaking connected state with AF_UNSPEC
1561 		 */
1562 		other = NULL;
1563 		unix_state_double_lock(sk, other);
1564 	}
1565 
1566 	/*
1567 	 * If it was connected, reconnect.
1568 	 */
1569 	if (unix_peer(sk)) {
1570 		struct sock *old_peer = unix_peer(sk);
1571 
1572 		unix_peer(sk) = other;
1573 		if (!other)
1574 			WRITE_ONCE(sk->sk_state, TCP_CLOSE);
1575 		unix_dgram_peer_wake_disconnect_wakeup(sk, old_peer);
1576 
1577 		unix_state_double_unlock(sk, other);
1578 
1579 		if (other != old_peer) {
1580 			unix_dgram_disconnected(sk, old_peer);
1581 
1582 			unix_state_lock(old_peer);
1583 			if (!unix_peer(old_peer))
1584 				WRITE_ONCE(old_peer->sk_state, TCP_CLOSE);
1585 			unix_state_unlock(old_peer);
1586 		}
1587 
1588 		sock_put(old_peer);
1589 	} else {
1590 		unix_peer(sk) = other;
1591 		unix_state_double_unlock(sk, other);
1592 	}
1593 
1594 	return 0;
1595 
1596 out_unlock:
1597 	unix_state_double_unlock(sk, other);
1598 	sock_put(other);
1599 out:
1600 	return err;
1601 }
1602 
1603 static long unix_wait_for_peer(struct sock *other, long timeo)
1604 {
1605 	struct unix_sock *u = unix_sk(other);
1606 	int sched;
1607 	DEFINE_WAIT(wait);
1608 
1609 	prepare_to_wait_exclusive(&u->peer_wait, &wait, TASK_INTERRUPTIBLE);
1610 
1611 	sched = !sock_flag(other, SOCK_DEAD) &&
1612 		!(other->sk_shutdown & RCV_SHUTDOWN) &&
1613 		unix_recvq_full_lockless(other);
1614 
1615 	unix_state_unlock(other);
1616 
1617 	if (sched)
1618 		timeo = schedule_timeout(timeo);
1619 
1620 	finish_wait(&u->peer_wait, &wait);
1621 	return timeo;
1622 }
1623 
1624 static int unix_stream_connect(struct socket *sock, struct sockaddr_unsized *uaddr,
1625 			       int addr_len, int flags)
1626 {
1627 	struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr;
1628 	struct sock *sk = sock->sk, *newsk = NULL, *other = NULL;
1629 	struct unix_sock *u = unix_sk(sk), *newu, *otheru;
1630 	struct unix_peercred peercred = {};
1631 	struct net *net = sock_net(sk);
1632 	struct sk_buff *skb = NULL;
1633 	unsigned char state;
1634 	long timeo;
1635 	int err;
1636 
1637 	err = unix_validate_addr(sunaddr, addr_len);
1638 	if (err)
1639 		goto out;
1640 
1641 	err = BPF_CGROUP_RUN_PROG_UNIX_CONNECT_LOCK(sk, uaddr, &addr_len);
1642 	if (err)
1643 		goto out;
1644 
1645 	if (unix_may_passcred(sk) && !READ_ONCE(u->addr)) {
1646 		err = unix_autobind(sk);
1647 		if (err)
1648 			goto out;
1649 	}
1650 
1651 	timeo = sock_sndtimeo(sk, flags & O_NONBLOCK);
1652 
1653 	err = prepare_peercred(&peercred);
1654 	if (err)
1655 		goto out;
1656 
1657 	/* create new sock for complete connection */
1658 	newsk = unix_create1(net, NULL, 0, sock->type);
1659 	if (IS_ERR(newsk)) {
1660 		err = PTR_ERR(newsk);
1661 		goto out;
1662 	}
1663 
1664 	/* Allocate skb for sending to listening sock */
1665 	skb = sock_wmalloc(newsk, 1, 0, GFP_KERNEL);
1666 	if (!skb) {
1667 		err = -ENOMEM;
1668 		goto out_free_sk;
1669 	}
1670 
1671 restart:
1672 	/*  Find listening sock. */
1673 	other = unix_find_other(net, sunaddr, addr_len, sk->sk_type, flags);
1674 	if (IS_ERR(other)) {
1675 		err = PTR_ERR(other);
1676 		goto out_free_skb;
1677 	}
1678 
1679 	unix_state_lock(other);
1680 
1681 	/* Apparently VFS overslept socket death. Retry. */
1682 	if (sock_flag(other, SOCK_DEAD)) {
1683 		unix_state_unlock(other);
1684 		sock_put(other);
1685 		goto restart;
1686 	}
1687 
1688 	if (other->sk_state != TCP_LISTEN ||
1689 	    other->sk_shutdown & RCV_SHUTDOWN) {
1690 		err = -ECONNREFUSED;
1691 		goto out_unlock;
1692 	}
1693 
1694 	if (unix_recvq_full_lockless(other)) {
1695 		if (!timeo) {
1696 			err = -EAGAIN;
1697 			goto out_unlock;
1698 		}
1699 
1700 		timeo = unix_wait_for_peer(other, timeo);
1701 		sock_put(other);
1702 
1703 		err = sock_intr_errno(timeo);
1704 		if (signal_pending(current))
1705 			goto out_free_skb;
1706 
1707 		goto restart;
1708 	}
1709 
1710 	/* self connect and simultaneous connect are eliminated
1711 	 * by rejecting TCP_LISTEN socket to avoid deadlock.
1712 	 */
1713 	state = READ_ONCE(sk->sk_state);
1714 	if (unlikely(state != TCP_CLOSE)) {
1715 		err = state == TCP_ESTABLISHED ? -EISCONN : -EINVAL;
1716 		goto out_unlock;
1717 	}
1718 
1719 	unix_state_lock(sk);
1720 
1721 	if (unlikely(sk->sk_state != TCP_CLOSE)) {
1722 		err = sk->sk_state == TCP_ESTABLISHED ? -EISCONN : -EINVAL;
1723 		unix_state_unlock(sk);
1724 		goto out_unlock;
1725 	}
1726 
1727 	err = security_unix_stream_connect(sk, other, newsk);
1728 	if (err) {
1729 		unix_state_unlock(sk);
1730 		goto out_unlock;
1731 	}
1732 
1733 	/* The way is open! Fastly set all the necessary fields... */
1734 
1735 	sock_hold(sk);
1736 	unix_peer(newsk) = sk;
1737 	newsk->sk_state = TCP_ESTABLISHED;
1738 	newsk->sk_type = sk->sk_type;
1739 	newsk->sk_scm_recv_flags = other->sk_scm_recv_flags;
1740 	init_peercred(newsk, &peercred);
1741 
1742 	newu = unix_sk(newsk);
1743 	newu->listener = other;
1744 	RCU_INIT_POINTER(newsk->sk_wq, &newu->peer_wq);
1745 	otheru = unix_sk(other);
1746 
1747 	/* copy address information from listening to new sock
1748 	 *
1749 	 * The contents of *(otheru->addr) and otheru->path
1750 	 * are seen fully set up here, since we have found
1751 	 * otheru in hash under its lock.  Insertion into the
1752 	 * hash chain we'd found it in had been done in an
1753 	 * earlier critical area protected by the chain's lock,
1754 	 * the same one where we'd set *(otheru->addr) contents,
1755 	 * as well as otheru->path and otheru->addr itself.
1756 	 *
1757 	 * Using smp_store_release() here to set newu->addr
1758 	 * is enough to make those stores, as well as stores
1759 	 * to newu->path visible to anyone who gets newu->addr
1760 	 * by smp_load_acquire().  IOW, the same warranties
1761 	 * as for unix_sock instances bound in unix_bind() or
1762 	 * in unix_autobind().
1763 	 */
1764 	if (otheru->path.dentry) {
1765 		path_get(&otheru->path);
1766 		newu->path = otheru->path;
1767 	}
1768 	refcount_inc(&otheru->addr->refcnt);
1769 	smp_store_release(&newu->addr, otheru->addr);
1770 
1771 	/* Set credentials */
1772 	copy_peercred(sk, other);
1773 
1774 	sock->state	= SS_CONNECTED;
1775 	WRITE_ONCE(sk->sk_state, TCP_ESTABLISHED);
1776 	sock_hold(newsk);
1777 
1778 	smp_mb__after_atomic();	/* sock_hold() does an atomic_inc() */
1779 	unix_peer(sk)	= newsk;
1780 
1781 	unix_state_unlock(sk);
1782 
1783 	/* take ten and send info to listening sock */
1784 	spin_lock(&other->sk_receive_queue.lock);
1785 	__skb_queue_tail(&other->sk_receive_queue, skb);
1786 	spin_unlock(&other->sk_receive_queue.lock);
1787 	unix_state_unlock(other);
1788 	other->sk_data_ready(other);
1789 	sock_put(other);
1790 	return 0;
1791 
1792 out_unlock:
1793 	unix_state_unlock(other);
1794 	sock_put(other);
1795 out_free_skb:
1796 	consume_skb(skb);
1797 out_free_sk:
1798 	unix_release_sock(newsk, 0);
1799 out:
1800 	drop_peercred(&peercred);
1801 	return err;
1802 }
1803 
1804 static int unix_socketpair(struct socket *socka, struct socket *sockb)
1805 {
1806 	struct unix_peercred ska_peercred = {}, skb_peercred = {};
1807 	struct sock *ska = socka->sk, *skb = sockb->sk;
1808 	int err;
1809 
1810 	err = prepare_peercred(&ska_peercred);
1811 	if (err)
1812 		return err;
1813 
1814 	err = prepare_peercred(&skb_peercred);
1815 	if (err) {
1816 		drop_peercred(&ska_peercred);
1817 		return err;
1818 	}
1819 
1820 	/* Join our sockets back to back */
1821 	sock_hold(ska);
1822 	sock_hold(skb);
1823 	unix_peer(ska) = skb;
1824 	unix_peer(skb) = ska;
1825 	init_peercred(ska, &ska_peercred);
1826 	init_peercred(skb, &skb_peercred);
1827 
1828 	ska->sk_state = TCP_ESTABLISHED;
1829 	skb->sk_state = TCP_ESTABLISHED;
1830 	socka->state  = SS_CONNECTED;
1831 	sockb->state  = SS_CONNECTED;
1832 	return 0;
1833 }
1834 
1835 static int unix_accept(struct socket *sock, struct socket *newsock,
1836 		       struct proto_accept_arg *arg)
1837 {
1838 	struct sock *sk = sock->sk;
1839 	struct sk_buff *skb;
1840 	struct sock *tsk;
1841 
1842 	arg->err = -EOPNOTSUPP;
1843 	if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
1844 		goto out;
1845 
1846 	arg->err = -EINVAL;
1847 	if (READ_ONCE(sk->sk_state) != TCP_LISTEN)
1848 		goto out;
1849 
1850 	/* If socket state is TCP_LISTEN it cannot change (for now...),
1851 	 * so that no locks are necessary.
1852 	 */
1853 
1854 	skb = skb_recv_datagram(sk, (arg->flags & O_NONBLOCK) ? MSG_DONTWAIT : 0,
1855 				&arg->err);
1856 	if (!skb) {
1857 		/* This means receive shutdown. */
1858 		if (arg->err == 0)
1859 			arg->err = -EINVAL;
1860 		goto out;
1861 	}
1862 
1863 	tsk = skb->sk;
1864 	skb_free_datagram(sk, skb);
1865 	wake_up_interruptible(&unix_sk(sk)->peer_wait);
1866 
1867 	if (tsk->sk_type == SOCK_STREAM)
1868 		set_bit(SOCK_CUSTOM_SOCKOPT, &newsock->flags);
1869 
1870 	/* attach accepted sock to socket */
1871 	unix_state_lock(tsk);
1872 	unix_update_edges(unix_sk(tsk));
1873 	newsock->state = SS_CONNECTED;
1874 	sock_graft(tsk, newsock);
1875 	unix_state_unlock(tsk);
1876 	return 0;
1877 
1878 out:
1879 	return arg->err;
1880 }
1881 
1882 
1883 static int unix_getname(struct socket *sock, struct sockaddr *uaddr, int peer)
1884 {
1885 	struct sock *sk = sock->sk;
1886 	struct unix_address *addr;
1887 	DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr, uaddr);
1888 	int err = 0;
1889 
1890 	if (peer) {
1891 		sk = unix_peer_get(sk);
1892 
1893 		err = -ENOTCONN;
1894 		if (!sk)
1895 			goto out;
1896 		err = 0;
1897 	} else {
1898 		sock_hold(sk);
1899 	}
1900 
1901 	addr = smp_load_acquire(&unix_sk(sk)->addr);
1902 	if (!addr) {
1903 		sunaddr->sun_family = AF_UNIX;
1904 		sunaddr->sun_path[0] = 0;
1905 		err = offsetof(struct sockaddr_un, sun_path);
1906 	} else {
1907 		err = addr->len;
1908 		memcpy(sunaddr, addr->name, addr->len);
1909 
1910 		if (peer)
1911 			BPF_CGROUP_RUN_SA_PROG(sk, uaddr, &err,
1912 					       CGROUP_UNIX_GETPEERNAME);
1913 		else
1914 			BPF_CGROUP_RUN_SA_PROG(sk, uaddr, &err,
1915 					       CGROUP_UNIX_GETSOCKNAME);
1916 	}
1917 	sock_put(sk);
1918 out:
1919 	return err;
1920 }
1921 
1922 /* The "user->unix_inflight" variable is protected by the garbage
1923  * collection lock, and we just read it locklessly here. If you go
1924  * over the limit, there might be a tiny race in actually noticing
1925  * it across threads. Tough.
1926  */
1927 static inline bool too_many_unix_fds(struct task_struct *p)
1928 {
1929 	struct user_struct *user = current_user();
1930 
1931 	if (unlikely(READ_ONCE(user->unix_inflight) > task_rlimit(p, RLIMIT_NOFILE)))
1932 		return !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN);
1933 	return false;
1934 }
1935 
1936 static int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb)
1937 {
1938 	if (too_many_unix_fds(current))
1939 		return -ETOOMANYREFS;
1940 
1941 	UNIXCB(skb).fp = scm->fp;
1942 	scm->fp = NULL;
1943 
1944 	if (unix_prepare_fpl(UNIXCB(skb).fp))
1945 		return -ENOMEM;
1946 
1947 	return 0;
1948 }
1949 
1950 static void unix_detach_fds(struct scm_cookie *scm, struct sk_buff *skb)
1951 {
1952 	scm->fp = UNIXCB(skb).fp;
1953 	UNIXCB(skb).fp = NULL;
1954 
1955 	unix_destroy_fpl(scm->fp);
1956 }
1957 
1958 static void unix_peek_fds(struct scm_cookie *scm, struct sk_buff *skb)
1959 {
1960 	scm->fp = scm_fp_dup(UNIXCB(skb).fp);
1961 }
1962 
1963 static void unix_destruct_scm(struct sk_buff *skb)
1964 {
1965 	struct scm_cookie scm;
1966 
1967 	memset(&scm, 0, sizeof(scm));
1968 	scm.pid = UNIXCB(skb).pid;
1969 	if (UNIXCB(skb).fp)
1970 		unix_detach_fds(&scm, skb);
1971 
1972 	/* Alas, it calls VFS */
1973 	/* So fscking what? fput() had been SMP-safe since the last Summer */
1974 	scm_destroy(&scm);
1975 	sock_wfree(skb);
1976 }
1977 
1978 static int unix_scm_to_skb(struct scm_cookie *scm, struct sk_buff *skb, bool send_fds)
1979 {
1980 	int err = 0;
1981 
1982 	UNIXCB(skb).pid = get_pid(scm->pid);
1983 	UNIXCB(skb).uid = scm->creds.uid;
1984 	UNIXCB(skb).gid = scm->creds.gid;
1985 	UNIXCB(skb).fp = NULL;
1986 	unix_get_secdata(scm, skb);
1987 	if (scm->fp && send_fds)
1988 		err = unix_attach_fds(scm, skb);
1989 
1990 	skb->destructor = unix_destruct_scm;
1991 	return err;
1992 }
1993 
1994 static void unix_skb_to_scm(struct sk_buff *skb, struct scm_cookie *scm)
1995 {
1996 	scm_set_cred(scm, UNIXCB(skb).pid, UNIXCB(skb).uid, UNIXCB(skb).gid);
1997 	unix_set_secdata(scm, skb);
1998 }
1999 
2000 /**
2001  * unix_maybe_add_creds() - Adds current task uid/gid and struct pid to skb if needed.
2002  * @skb: skb to attach creds to.
2003  * @sk: Sender sock.
2004  * @other: Receiver sock.
2005  *
2006  * Some apps rely on write() giving SCM_CREDENTIALS
2007  * We include credentials if source or destination socket
2008  * asserted SOCK_PASSCRED.
2009  *
2010  * Context: May sleep.
2011  * Return: On success zero, on error a negative error code is returned.
2012  */
2013 static int unix_maybe_add_creds(struct sk_buff *skb, const struct sock *sk,
2014 				const struct sock *other)
2015 {
2016 	if (UNIXCB(skb).pid)
2017 		return 0;
2018 
2019 	if (unix_may_passcred(sk) || unix_may_passcred(other) ||
2020 	    !other->sk_socket) {
2021 		struct pid *pid;
2022 		int err;
2023 
2024 		pid = task_tgid(current);
2025 		err = pidfs_register_pid(pid);
2026 		if (unlikely(err))
2027 			return err;
2028 
2029 		UNIXCB(skb).pid = get_pid(pid);
2030 		current_uid_gid(&UNIXCB(skb).uid, &UNIXCB(skb).gid);
2031 	}
2032 
2033 	return 0;
2034 }
2035 
2036 static bool unix_skb_scm_eq(struct sk_buff *skb,
2037 			    struct scm_cookie *scm)
2038 {
2039 	return UNIXCB(skb).pid == scm->pid &&
2040 	       uid_eq(UNIXCB(skb).uid, scm->creds.uid) &&
2041 	       gid_eq(UNIXCB(skb).gid, scm->creds.gid) &&
2042 	       unix_secdata_eq(scm, skb);
2043 }
2044 
2045 static void scm_stat_add(struct sock *sk, struct sk_buff *skb)
2046 {
2047 	struct scm_fp_list *fp = UNIXCB(skb).fp;
2048 	struct unix_sock *u = unix_sk(sk);
2049 
2050 	if (unlikely(fp && fp->count)) {
2051 		atomic_add(fp->count, &u->scm_stat.nr_fds);
2052 		unix_add_edges(fp, u);
2053 	}
2054 }
2055 
2056 static void scm_stat_del(struct sock *sk, struct sk_buff *skb)
2057 {
2058 	struct scm_fp_list *fp = UNIXCB(skb).fp;
2059 	struct unix_sock *u = unix_sk(sk);
2060 
2061 	if (unlikely(fp && fp->count)) {
2062 		atomic_sub(fp->count, &u->scm_stat.nr_fds);
2063 		unix_del_edges(fp);
2064 	}
2065 }
2066 
2067 /*
2068  *	Send AF_UNIX data.
2069  */
2070 
2071 static int unix_dgram_sendmsg(struct socket *sock, struct msghdr *msg,
2072 			      size_t len)
2073 {
2074 	struct sock *sk = sock->sk, *other = NULL;
2075 	struct unix_sock *u = unix_sk(sk);
2076 	struct scm_cookie scm;
2077 	struct sk_buff *skb;
2078 	int data_len = 0;
2079 	int sk_locked;
2080 	long timeo;
2081 	int err;
2082 
2083 	err = scm_send(sock, msg, &scm, false);
2084 	if (err < 0)
2085 		return err;
2086 
2087 	if (msg->msg_flags & MSG_OOB) {
2088 		err = -EOPNOTSUPP;
2089 		goto out;
2090 	}
2091 
2092 	if (msg->msg_namelen) {
2093 		err = unix_validate_addr(msg->msg_name, msg->msg_namelen);
2094 		if (err)
2095 			goto out;
2096 
2097 		err = BPF_CGROUP_RUN_PROG_UNIX_SENDMSG_LOCK(sk,
2098 							    msg->msg_name,
2099 							    &msg->msg_namelen,
2100 							    NULL);
2101 		if (err)
2102 			goto out;
2103 	}
2104 
2105 	if (unix_may_passcred(sk) && !READ_ONCE(u->addr)) {
2106 		err = unix_autobind(sk);
2107 		if (err)
2108 			goto out;
2109 	}
2110 
2111 	if (len > READ_ONCE(sk->sk_sndbuf) - 32) {
2112 		err = -EMSGSIZE;
2113 		goto out;
2114 	}
2115 
2116 	if (len > SKB_MAX_ALLOC) {
2117 		data_len = min_t(size_t,
2118 				 len - SKB_MAX_ALLOC,
2119 				 MAX_SKB_FRAGS * PAGE_SIZE);
2120 		data_len = PAGE_ALIGN(data_len);
2121 
2122 		BUILD_BUG_ON(SKB_MAX_ALLOC < PAGE_SIZE);
2123 	}
2124 
2125 	skb = sock_alloc_send_pskb(sk, len - data_len, data_len,
2126 				   msg->msg_flags & MSG_DONTWAIT, &err,
2127 				   PAGE_ALLOC_COSTLY_ORDER);
2128 	if (!skb)
2129 		goto out;
2130 
2131 	err = unix_scm_to_skb(&scm, skb, true);
2132 	if (err < 0)
2133 		goto out_free;
2134 
2135 	skb_put(skb, len - data_len);
2136 	skb->data_len = data_len;
2137 	skb->len = len;
2138 	err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, len);
2139 	if (err)
2140 		goto out_free;
2141 
2142 	timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
2143 
2144 	if (msg->msg_namelen) {
2145 lookup:
2146 		other = unix_find_other(sock_net(sk), msg->msg_name,
2147 					msg->msg_namelen, sk->sk_type, 0);
2148 		if (IS_ERR(other)) {
2149 			err = PTR_ERR(other);
2150 			goto out_free;
2151 		}
2152 	} else {
2153 		other = unix_peer_get(sk);
2154 		if (!other) {
2155 			err = -ENOTCONN;
2156 			goto out_free;
2157 		}
2158 	}
2159 
2160 	if (sk_filter(other, skb) < 0) {
2161 		/* Toss the packet but do not return any error to the sender */
2162 		err = len;
2163 		goto out_sock_put;
2164 	}
2165 
2166 	err = unix_maybe_add_creds(skb, sk, other);
2167 	if (err)
2168 		goto out_sock_put;
2169 
2170 restart:
2171 	sk_locked = 0;
2172 	unix_state_lock(other);
2173 restart_locked:
2174 
2175 	if (!unix_may_send(sk, other)) {
2176 		err = -EPERM;
2177 		goto out_unlock;
2178 	}
2179 
2180 	if (unlikely(sock_flag(other, SOCK_DEAD))) {
2181 		/* Check with 1003.1g - what should datagram error */
2182 
2183 		unix_state_unlock(other);
2184 
2185 		if (sk->sk_type == SOCK_SEQPACKET) {
2186 			/* We are here only when racing with unix_release_sock()
2187 			 * is clearing @other. Never change state to TCP_CLOSE
2188 			 * unlike SOCK_DGRAM wants.
2189 			 */
2190 			err = -EPIPE;
2191 			goto out_sock_put;
2192 		}
2193 
2194 		if (!sk_locked)
2195 			unix_state_lock(sk);
2196 
2197 		if (unix_peer(sk) == other) {
2198 			unix_peer(sk) = NULL;
2199 			unix_dgram_peer_wake_disconnect_wakeup(sk, other);
2200 
2201 			WRITE_ONCE(sk->sk_state, TCP_CLOSE);
2202 			unix_state_unlock(sk);
2203 
2204 			unix_dgram_disconnected(sk, other);
2205 			sock_put(other);
2206 			err = -ECONNREFUSED;
2207 			goto out_sock_put;
2208 		}
2209 
2210 		unix_state_unlock(sk);
2211 
2212 		if (!msg->msg_namelen) {
2213 			err = -ECONNRESET;
2214 			goto out_sock_put;
2215 		}
2216 
2217 		sock_put(other);
2218 		goto lookup;
2219 	}
2220 
2221 	if (other->sk_shutdown & RCV_SHUTDOWN) {
2222 		err = -EPIPE;
2223 		goto out_unlock;
2224 	}
2225 
2226 	if (UNIXCB(skb).fp && !other->sk_scm_rights) {
2227 		err = -EPERM;
2228 		goto out_unlock;
2229 	}
2230 
2231 	if (sk->sk_type != SOCK_SEQPACKET) {
2232 		err = security_unix_may_send(sk->sk_socket, other->sk_socket);
2233 		if (err)
2234 			goto out_unlock;
2235 	}
2236 
2237 	/* other == sk && unix_peer(other) != sk if
2238 	 * - unix_peer(sk) == NULL, destination address bound to sk
2239 	 * - unix_peer(sk) == sk by time of get but disconnected before lock
2240 	 */
2241 	if (other != sk &&
2242 	    unlikely(unix_peer(other) != sk &&
2243 	    unix_recvq_full_lockless(other))) {
2244 		if (timeo) {
2245 			timeo = unix_wait_for_peer(other, timeo);
2246 
2247 			err = sock_intr_errno(timeo);
2248 			if (signal_pending(current))
2249 				goto out_sock_put;
2250 
2251 			goto restart;
2252 		}
2253 
2254 		if (!sk_locked) {
2255 			unix_state_unlock(other);
2256 			unix_state_double_lock(sk, other);
2257 		}
2258 
2259 		if (unix_peer(sk) != other ||
2260 		    unix_dgram_peer_wake_me(sk, other)) {
2261 			err = -EAGAIN;
2262 			sk_locked = 1;
2263 			goto out_unlock;
2264 		}
2265 
2266 		if (!sk_locked) {
2267 			sk_locked = 1;
2268 			goto restart_locked;
2269 		}
2270 	}
2271 
2272 	if (unlikely(sk_locked))
2273 		unix_state_unlock(sk);
2274 
2275 	if (sock_flag(other, SOCK_RCVTSTAMP))
2276 		__net_timestamp(skb);
2277 
2278 	scm_stat_add(other, skb);
2279 	skb_queue_tail(&other->sk_receive_queue, skb);
2280 	unix_state_unlock(other);
2281 	other->sk_data_ready(other);
2282 	sock_put(other);
2283 	scm_destroy(&scm);
2284 	return len;
2285 
2286 out_unlock:
2287 	if (sk_locked)
2288 		unix_state_unlock(sk);
2289 	unix_state_unlock(other);
2290 out_sock_put:
2291 	sock_put(other);
2292 out_free:
2293 	consume_skb(skb);
2294 out:
2295 	scm_destroy(&scm);
2296 	return err;
2297 }
2298 
2299 /* We use paged skbs for stream sockets, and limit occupancy to 32768
2300  * bytes, and a minimum of a full page.
2301  */
2302 #define UNIX_SKB_FRAGS_SZ (PAGE_SIZE << get_order(32768))
2303 
2304 #if IS_ENABLED(CONFIG_AF_UNIX_OOB)
2305 static int queue_oob(struct sock *sk, struct msghdr *msg, struct sock *other,
2306 		     struct scm_cookie *scm, bool fds_sent)
2307 {
2308 	struct unix_sock *ousk = unix_sk(other);
2309 	struct sk_buff *skb;
2310 	int err;
2311 
2312 	skb = sock_alloc_send_skb(sk, 1, msg->msg_flags & MSG_DONTWAIT, &err);
2313 
2314 	if (!skb)
2315 		return err;
2316 
2317 	err = unix_scm_to_skb(scm, skb, !fds_sent);
2318 	if (err < 0)
2319 		goto out;
2320 
2321 	err = unix_maybe_add_creds(skb, sk, other);
2322 	if (err)
2323 		goto out;
2324 
2325 	skb_put(skb, 1);
2326 	err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, 1);
2327 
2328 	if (err)
2329 		goto out;
2330 
2331 	unix_state_lock(other);
2332 
2333 	if (sock_flag(other, SOCK_DEAD) ||
2334 	    (other->sk_shutdown & RCV_SHUTDOWN)) {
2335 		err = -EPIPE;
2336 		goto out_unlock;
2337 	}
2338 
2339 	if (UNIXCB(skb).fp && !other->sk_scm_rights) {
2340 		err = -EPERM;
2341 		goto out_unlock;
2342 	}
2343 
2344 	scm_stat_add(other, skb);
2345 
2346 	spin_lock(&other->sk_receive_queue.lock);
2347 	WRITE_ONCE(ousk->oob_skb, skb);
2348 	WRITE_ONCE(ousk->inq_len, ousk->inq_len + 1);
2349 	__skb_queue_tail(&other->sk_receive_queue, skb);
2350 	spin_unlock(&other->sk_receive_queue.lock);
2351 
2352 	sk_send_sigurg(other);
2353 	unix_state_unlock(other);
2354 	other->sk_data_ready(other);
2355 
2356 	return 0;
2357 out_unlock:
2358 	unix_state_unlock(other);
2359 out:
2360 	consume_skb(skb);
2361 	return err;
2362 }
2363 #endif
2364 
2365 static int unix_stream_sendmsg(struct socket *sock, struct msghdr *msg,
2366 			       size_t len)
2367 {
2368 	struct sock *sk = sock->sk;
2369 	struct sk_buff *skb = NULL;
2370 	struct sock *other = NULL;
2371 	struct unix_sock *otheru;
2372 	struct scm_cookie scm;
2373 	bool fds_sent = false;
2374 	int err, sent = 0;
2375 
2376 	err = scm_send(sock, msg, &scm, false);
2377 	if (err < 0)
2378 		return err;
2379 
2380 	if (msg->msg_flags & MSG_OOB) {
2381 		err = -EOPNOTSUPP;
2382 #if IS_ENABLED(CONFIG_AF_UNIX_OOB)
2383 		if (len)
2384 			len--;
2385 		else
2386 #endif
2387 			goto out_err;
2388 	}
2389 
2390 	if (msg->msg_namelen) {
2391 		err = READ_ONCE(sk->sk_state) == TCP_ESTABLISHED ? -EISCONN : -EOPNOTSUPP;
2392 		goto out_err;
2393 	}
2394 
2395 	other = unix_peer(sk);
2396 	if (!other) {
2397 		err = -ENOTCONN;
2398 		goto out_err;
2399 	}
2400 
2401 	otheru = unix_sk(other);
2402 
2403 	if (READ_ONCE(sk->sk_shutdown) & SEND_SHUTDOWN)
2404 		goto out_pipe;
2405 
2406 	while (sent < len) {
2407 		int size = len - sent;
2408 		int data_len;
2409 
2410 		if (unlikely(msg->msg_flags & MSG_SPLICE_PAGES)) {
2411 			skb = sock_alloc_send_pskb(sk, 0, 0,
2412 						   msg->msg_flags & MSG_DONTWAIT,
2413 						   &err, 0);
2414 		} else {
2415 			/* Keep two messages in the pipe so it schedules better */
2416 			size = min_t(int, size, (READ_ONCE(sk->sk_sndbuf) >> 1) - 64);
2417 
2418 			/* allow fallback to order-0 allocations */
2419 			size = min_t(int, size, SKB_MAX_HEAD(0) + UNIX_SKB_FRAGS_SZ);
2420 
2421 			data_len = max_t(int, 0, size - SKB_MAX_HEAD(0));
2422 
2423 			data_len = min_t(size_t, size, PAGE_ALIGN(data_len));
2424 
2425 			skb = sock_alloc_send_pskb(sk, size - data_len, data_len,
2426 						   msg->msg_flags & MSG_DONTWAIT, &err,
2427 						   get_order(UNIX_SKB_FRAGS_SZ));
2428 		}
2429 		if (!skb)
2430 			goto out_err;
2431 
2432 		/* Only send the fds in the first buffer */
2433 		err = unix_scm_to_skb(&scm, skb, !fds_sent);
2434 		if (err < 0)
2435 			goto out_free;
2436 
2437 		fds_sent = true;
2438 
2439 		err = unix_maybe_add_creds(skb, sk, other);
2440 		if (err)
2441 			goto out_free;
2442 
2443 		if (unlikely(msg->msg_flags & MSG_SPLICE_PAGES)) {
2444 			skb->ip_summed = CHECKSUM_UNNECESSARY;
2445 			err = skb_splice_from_iter(skb, &msg->msg_iter, size);
2446 			if (err < 0)
2447 				goto out_free;
2448 
2449 			size = err;
2450 			refcount_add(size, &sk->sk_wmem_alloc);
2451 		} else {
2452 			skb_put(skb, size - data_len);
2453 			skb->data_len = data_len;
2454 			skb->len = size;
2455 			err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, size);
2456 			if (err)
2457 				goto out_free;
2458 		}
2459 
2460 		unix_state_lock(other);
2461 
2462 		if (sock_flag(other, SOCK_DEAD) ||
2463 		    (other->sk_shutdown & RCV_SHUTDOWN))
2464 			goto out_pipe_unlock;
2465 
2466 		if (UNIXCB(skb).fp && !other->sk_scm_rights) {
2467 			unix_state_unlock(other);
2468 			err = -EPERM;
2469 			goto out_free;
2470 		}
2471 
2472 		scm_stat_add(other, skb);
2473 
2474 		spin_lock(&other->sk_receive_queue.lock);
2475 		WRITE_ONCE(otheru->inq_len, otheru->inq_len + skb->len);
2476 		__skb_queue_tail(&other->sk_receive_queue, skb);
2477 		spin_unlock(&other->sk_receive_queue.lock);
2478 
2479 		unix_state_unlock(other);
2480 		other->sk_data_ready(other);
2481 		sent += size;
2482 	}
2483 
2484 #if IS_ENABLED(CONFIG_AF_UNIX_OOB)
2485 	if (msg->msg_flags & MSG_OOB) {
2486 		err = queue_oob(sk, msg, other, &scm, fds_sent);
2487 		if (err)
2488 			goto out_err;
2489 		sent++;
2490 	}
2491 #endif
2492 
2493 	scm_destroy(&scm);
2494 
2495 	return sent;
2496 
2497 out_pipe_unlock:
2498 	unix_state_unlock(other);
2499 out_pipe:
2500 	if (!sent && !(msg->msg_flags & MSG_NOSIGNAL))
2501 		send_sig(SIGPIPE, current, 0);
2502 	err = -EPIPE;
2503 out_free:
2504 	consume_skb(skb);
2505 out_err:
2506 	scm_destroy(&scm);
2507 	return sent ? : err;
2508 }
2509 
2510 static int unix_seqpacket_sendmsg(struct socket *sock, struct msghdr *msg,
2511 				  size_t len)
2512 {
2513 	int err;
2514 	struct sock *sk = sock->sk;
2515 
2516 	err = sock_error(sk);
2517 	if (err)
2518 		return err;
2519 
2520 	if (READ_ONCE(sk->sk_state) != TCP_ESTABLISHED)
2521 		return -ENOTCONN;
2522 
2523 	if (msg->msg_namelen)
2524 		msg->msg_namelen = 0;
2525 
2526 	return unix_dgram_sendmsg(sock, msg, len);
2527 }
2528 
2529 static int unix_seqpacket_recvmsg(struct socket *sock, struct msghdr *msg,
2530 				  size_t size, int flags)
2531 {
2532 	struct sock *sk = sock->sk;
2533 
2534 	if (READ_ONCE(sk->sk_state) != TCP_ESTABLISHED)
2535 		return -ENOTCONN;
2536 
2537 	return unix_dgram_recvmsg(sock, msg, size, flags);
2538 }
2539 
2540 static void unix_copy_addr(struct msghdr *msg, struct sock *sk)
2541 {
2542 	struct unix_address *addr = smp_load_acquire(&unix_sk(sk)->addr);
2543 
2544 	if (addr) {
2545 		msg->msg_namelen = addr->len;
2546 		memcpy(msg->msg_name, addr->name, addr->len);
2547 	}
2548 }
2549 
2550 int __unix_dgram_recvmsg(struct sock *sk, struct msghdr *msg, size_t size,
2551 			 int flags)
2552 {
2553 	struct scm_cookie scm;
2554 	struct socket *sock = sk->sk_socket;
2555 	struct unix_sock *u = unix_sk(sk);
2556 	struct sk_buff *skb, *last;
2557 	long timeo;
2558 	int skip;
2559 	int err;
2560 
2561 	err = -EOPNOTSUPP;
2562 	if (flags&MSG_OOB)
2563 		goto out;
2564 
2565 	timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
2566 
2567 	do {
2568 		mutex_lock(&u->iolock);
2569 
2570 		skip = sk_peek_offset(sk, flags);
2571 		skb = __skb_try_recv_datagram(sk, &sk->sk_receive_queue, flags,
2572 					      &skip, &err, &last);
2573 		if (skb) {
2574 			if (!(flags & MSG_PEEK))
2575 				scm_stat_del(sk, skb);
2576 			break;
2577 		}
2578 
2579 		mutex_unlock(&u->iolock);
2580 
2581 		if (err != -EAGAIN)
2582 			break;
2583 	} while (timeo &&
2584 		 !__skb_wait_for_more_packets(sk, &sk->sk_receive_queue,
2585 					      &err, &timeo, last));
2586 
2587 	if (!skb) { /* implies iolock unlocked */
2588 		/* Signal EOF on disconnected non-blocking SEQPACKET socket. */
2589 		if (sk->sk_type == SOCK_SEQPACKET && err == -EAGAIN &&
2590 		    (READ_ONCE(sk->sk_shutdown) & RCV_SHUTDOWN))
2591 			err = 0;
2592 		goto out;
2593 	}
2594 
2595 	if (wq_has_sleeper(&u->peer_wait))
2596 		wake_up_interruptible_sync_poll(&u->peer_wait,
2597 						EPOLLOUT | EPOLLWRNORM |
2598 						EPOLLWRBAND);
2599 
2600 	if (msg->msg_name) {
2601 		unix_copy_addr(msg, skb->sk);
2602 
2603 		BPF_CGROUP_RUN_PROG_UNIX_RECVMSG_LOCK(sk,
2604 						      msg->msg_name,
2605 						      &msg->msg_namelen);
2606 	}
2607 
2608 	if (size > skb->len - skip)
2609 		size = skb->len - skip;
2610 	else if (size < skb->len - skip)
2611 		msg->msg_flags |= MSG_TRUNC;
2612 
2613 	err = skb_copy_datagram_msg(skb, skip, msg, size);
2614 	if (err)
2615 		goto out_free;
2616 
2617 	if (sock_flag(sk, SOCK_RCVTSTAMP))
2618 		__sock_recv_timestamp(msg, sk, skb);
2619 
2620 	memset(&scm, 0, sizeof(scm));
2621 
2622 	unix_skb_to_scm(skb, &scm);
2623 
2624 	if (!(flags & MSG_PEEK)) {
2625 		if (UNIXCB(skb).fp)
2626 			unix_detach_fds(&scm, skb);
2627 
2628 		sk_peek_offset_bwd(sk, skb->len);
2629 	} else {
2630 		/* It is questionable: on PEEK we could:
2631 		   - do not return fds - good, but too simple 8)
2632 		   - return fds, and do not return them on read (old strategy,
2633 		     apparently wrong)
2634 		   - clone fds (I chose it for now, it is the most universal
2635 		     solution)
2636 
2637 		   POSIX 1003.1g does not actually define this clearly
2638 		   at all. POSIX 1003.1g doesn't define a lot of things
2639 		   clearly however!
2640 
2641 		*/
2642 
2643 		sk_peek_offset_fwd(sk, size);
2644 
2645 		if (UNIXCB(skb).fp)
2646 			unix_peek_fds(&scm, skb);
2647 	}
2648 	err = (flags & MSG_TRUNC) ? skb->len - skip : size;
2649 
2650 	scm_recv_unix(sock, msg, &scm, flags);
2651 
2652 out_free:
2653 	skb_free_datagram(sk, skb);
2654 	mutex_unlock(&u->iolock);
2655 out:
2656 	return err;
2657 }
2658 
2659 static int unix_dgram_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
2660 			      int flags)
2661 {
2662 	struct sock *sk = sock->sk;
2663 
2664 #ifdef CONFIG_BPF_SYSCALL
2665 	const struct proto *prot = READ_ONCE(sk->sk_prot);
2666 
2667 	if (prot != &unix_dgram_proto)
2668 		return prot->recvmsg(sk, msg, size, flags, NULL);
2669 #endif
2670 	return __unix_dgram_recvmsg(sk, msg, size, flags);
2671 }
2672 
2673 static int unix_read_skb(struct sock *sk, skb_read_actor_t recv_actor)
2674 {
2675 	struct unix_sock *u = unix_sk(sk);
2676 	struct sk_buff *skb;
2677 	int err;
2678 
2679 	mutex_lock(&u->iolock);
2680 	skb = skb_recv_datagram(sk, MSG_DONTWAIT, &err);
2681 	mutex_unlock(&u->iolock);
2682 	if (!skb)
2683 		return err;
2684 
2685 	return recv_actor(sk, skb);
2686 }
2687 
2688 /*
2689  *	Sleep until more data has arrived. But check for races..
2690  */
2691 static long unix_stream_data_wait(struct sock *sk, long timeo,
2692 				  struct sk_buff *last, unsigned int last_len,
2693 				  bool freezable)
2694 {
2695 	unsigned int state = TASK_INTERRUPTIBLE | freezable * TASK_FREEZABLE;
2696 	struct sk_buff *tail;
2697 	DEFINE_WAIT(wait);
2698 
2699 	unix_state_lock(sk);
2700 
2701 	for (;;) {
2702 		prepare_to_wait(sk_sleep(sk), &wait, state);
2703 
2704 		tail = skb_peek_tail(&sk->sk_receive_queue);
2705 		if (tail != last ||
2706 		    (tail && tail->len != last_len) ||
2707 		    sk->sk_err ||
2708 		    (sk->sk_shutdown & RCV_SHUTDOWN) ||
2709 		    signal_pending(current) ||
2710 		    !timeo)
2711 			break;
2712 
2713 		sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
2714 		unix_state_unlock(sk);
2715 		timeo = schedule_timeout(timeo);
2716 		unix_state_lock(sk);
2717 
2718 		if (sock_flag(sk, SOCK_DEAD))
2719 			break;
2720 
2721 		sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
2722 	}
2723 
2724 	finish_wait(sk_sleep(sk), &wait);
2725 	unix_state_unlock(sk);
2726 	return timeo;
2727 }
2728 
2729 struct unix_stream_read_state {
2730 	int (*recv_actor)(struct sk_buff *, int, int,
2731 			  struct unix_stream_read_state *);
2732 	struct socket *socket;
2733 	struct msghdr *msg;
2734 	struct pipe_inode_info *pipe;
2735 	size_t size;
2736 	int flags;
2737 	unsigned int splice_flags;
2738 };
2739 
2740 #if IS_ENABLED(CONFIG_AF_UNIX_OOB)
2741 static int unix_stream_recv_urg(struct unix_stream_read_state *state)
2742 {
2743 	struct sk_buff *oob_skb, *read_skb = NULL;
2744 	struct socket *sock = state->socket;
2745 	struct sock *sk = sock->sk;
2746 	struct unix_sock *u = unix_sk(sk);
2747 	int chunk = 1;
2748 
2749 	mutex_lock(&u->iolock);
2750 	unix_state_lock(sk);
2751 	spin_lock(&sk->sk_receive_queue.lock);
2752 
2753 	if (sock_flag(sk, SOCK_URGINLINE) || !u->oob_skb) {
2754 		spin_unlock(&sk->sk_receive_queue.lock);
2755 		unix_state_unlock(sk);
2756 		mutex_unlock(&u->iolock);
2757 		return -EINVAL;
2758 	}
2759 
2760 	oob_skb = u->oob_skb;
2761 
2762 	if (!(state->flags & MSG_PEEK)) {
2763 		WRITE_ONCE(u->oob_skb, NULL);
2764 		WRITE_ONCE(u->inq_len, u->inq_len - 1);
2765 
2766 		if (oob_skb->prev != (struct sk_buff *)&sk->sk_receive_queue &&
2767 		    !unix_skb_len(oob_skb->prev)) {
2768 			read_skb = oob_skb->prev;
2769 			__skb_unlink(read_skb, &sk->sk_receive_queue);
2770 		}
2771 	}
2772 
2773 	spin_unlock(&sk->sk_receive_queue.lock);
2774 	unix_state_unlock(sk);
2775 
2776 	chunk = state->recv_actor(oob_skb, 0, chunk, state);
2777 
2778 	if (!(state->flags & MSG_PEEK))
2779 		UNIXCB(oob_skb).consumed += 1;
2780 
2781 	mutex_unlock(&u->iolock);
2782 
2783 	consume_skb(read_skb);
2784 
2785 	if (chunk < 0)
2786 		return -EFAULT;
2787 
2788 	state->msg->msg_flags |= MSG_OOB;
2789 	return 1;
2790 }
2791 
2792 static struct sk_buff *manage_oob(struct sk_buff *skb, struct sock *sk,
2793 				  int flags, int copied)
2794 {
2795 	struct sk_buff *read_skb = NULL, *unread_skb = NULL;
2796 	struct unix_sock *u = unix_sk(sk);
2797 
2798 	if (likely(unix_skb_len(skb) && skb != READ_ONCE(u->oob_skb)))
2799 		return skb;
2800 
2801 	spin_lock(&sk->sk_receive_queue.lock);
2802 
2803 	if (!unix_skb_len(skb)) {
2804 		if (copied && (!u->oob_skb || skb == u->oob_skb)) {
2805 			skb = NULL;
2806 		} else if (flags & MSG_PEEK) {
2807 			skb = skb_peek_next(skb, &sk->sk_receive_queue);
2808 		} else {
2809 			read_skb = skb;
2810 			skb = skb_peek_next(skb, &sk->sk_receive_queue);
2811 			__skb_unlink(read_skb, &sk->sk_receive_queue);
2812 		}
2813 
2814 		if (!skb)
2815 			goto unlock;
2816 	}
2817 
2818 	if (skb != u->oob_skb)
2819 		goto unlock;
2820 
2821 	if (copied) {
2822 		skb = NULL;
2823 	} else if (!(flags & MSG_PEEK)) {
2824 		WRITE_ONCE(u->oob_skb, NULL);
2825 
2826 		if (!sock_flag(sk, SOCK_URGINLINE)) {
2827 			__skb_unlink(skb, &sk->sk_receive_queue);
2828 			unread_skb = skb;
2829 			skb = skb_peek(&sk->sk_receive_queue);
2830 		}
2831 	} else if (!sock_flag(sk, SOCK_URGINLINE)) {
2832 		skb = skb_peek_next(skb, &sk->sk_receive_queue);
2833 	}
2834 
2835 unlock:
2836 	spin_unlock(&sk->sk_receive_queue.lock);
2837 
2838 	consume_skb(read_skb);
2839 	kfree_skb_reason(unread_skb, SKB_DROP_REASON_UNIX_SKIP_OOB);
2840 
2841 	return skb;
2842 }
2843 #endif
2844 
2845 static int unix_stream_read_skb(struct sock *sk, skb_read_actor_t recv_actor)
2846 {
2847 	struct sk_buff_head *queue = &sk->sk_receive_queue;
2848 	struct unix_sock *u = unix_sk(sk);
2849 	struct sk_buff *skb;
2850 	int err;
2851 
2852 	if (unlikely(READ_ONCE(sk->sk_state) != TCP_ESTABLISHED))
2853 		return -ENOTCONN;
2854 
2855 	err = sock_error(sk);
2856 	if (err)
2857 		return err;
2858 
2859 	mutex_lock(&u->iolock);
2860 	spin_lock(&queue->lock);
2861 
2862 	skb = __skb_dequeue(queue);
2863 	if (!skb) {
2864 		spin_unlock(&queue->lock);
2865 		mutex_unlock(&u->iolock);
2866 		return -EAGAIN;
2867 	}
2868 
2869 	WRITE_ONCE(u->inq_len, u->inq_len - skb->len);
2870 
2871 #if IS_ENABLED(CONFIG_AF_UNIX_OOB)
2872 	if (skb == u->oob_skb) {
2873 		WRITE_ONCE(u->oob_skb, NULL);
2874 		spin_unlock(&queue->lock);
2875 		mutex_unlock(&u->iolock);
2876 
2877 		kfree_skb_reason(skb, SKB_DROP_REASON_UNIX_SKIP_OOB);
2878 		return -EAGAIN;
2879 	}
2880 #endif
2881 
2882 	spin_unlock(&queue->lock);
2883 	mutex_unlock(&u->iolock);
2884 
2885 	return recv_actor(sk, skb);
2886 }
2887 
2888 static int unix_stream_read_generic(struct unix_stream_read_state *state,
2889 				    bool freezable)
2890 {
2891 	int noblock = state->flags & MSG_DONTWAIT;
2892 	struct socket *sock = state->socket;
2893 	struct msghdr *msg = state->msg;
2894 	struct sock *sk = sock->sk;
2895 	size_t size = state->size;
2896 	int flags = state->flags;
2897 	bool check_creds = false;
2898 	struct scm_cookie scm;
2899 	unsigned int last_len;
2900 	struct unix_sock *u;
2901 	int copied = 0;
2902 	int err = 0;
2903 	long timeo;
2904 	int target;
2905 	int skip;
2906 
2907 	if (unlikely(READ_ONCE(sk->sk_state) != TCP_ESTABLISHED)) {
2908 		err = -EINVAL;
2909 		goto out;
2910 	}
2911 
2912 	if (unlikely(flags & MSG_OOB)) {
2913 		err = -EOPNOTSUPP;
2914 #if IS_ENABLED(CONFIG_AF_UNIX_OOB)
2915 		err = unix_stream_recv_urg(state);
2916 #endif
2917 		goto out;
2918 	}
2919 
2920 	target = sock_rcvlowat(sk, flags & MSG_WAITALL, size);
2921 	timeo = sock_rcvtimeo(sk, noblock);
2922 
2923 	memset(&scm, 0, sizeof(scm));
2924 
2925 	u = unix_sk(sk);
2926 
2927 redo:
2928 	/* Lock the socket to prevent queue disordering
2929 	 * while sleeps in memcpy_tomsg
2930 	 */
2931 	mutex_lock(&u->iolock);
2932 
2933 	skip = max(sk_peek_offset(sk, flags), 0);
2934 
2935 	do {
2936 		struct sk_buff *skb, *last;
2937 		int chunk;
2938 
2939 		unix_state_lock(sk);
2940 		if (sock_flag(sk, SOCK_DEAD)) {
2941 			err = -ECONNRESET;
2942 			goto unlock;
2943 		}
2944 		last = skb = skb_peek(&sk->sk_receive_queue);
2945 		last_len = last ? last->len : 0;
2946 
2947 again:
2948 #if IS_ENABLED(CONFIG_AF_UNIX_OOB)
2949 		if (skb) {
2950 			skb = manage_oob(skb, sk, flags, copied);
2951 			if (!skb && copied) {
2952 				unix_state_unlock(sk);
2953 				break;
2954 			}
2955 		}
2956 #endif
2957 		if (skb == NULL) {
2958 			if (copied >= target)
2959 				goto unlock;
2960 
2961 			/*
2962 			 *	POSIX 1003.1g mandates this order.
2963 			 */
2964 
2965 			err = sock_error(sk);
2966 			if (err)
2967 				goto unlock;
2968 			if (sk->sk_shutdown & RCV_SHUTDOWN)
2969 				goto unlock;
2970 
2971 			unix_state_unlock(sk);
2972 			if (!timeo) {
2973 				err = -EAGAIN;
2974 				break;
2975 			}
2976 
2977 			mutex_unlock(&u->iolock);
2978 
2979 			timeo = unix_stream_data_wait(sk, timeo, last,
2980 						      last_len, freezable);
2981 
2982 			if (signal_pending(current)) {
2983 				err = sock_intr_errno(timeo);
2984 				scm_destroy(&scm);
2985 				goto out;
2986 			}
2987 
2988 			goto redo;
2989 unlock:
2990 			unix_state_unlock(sk);
2991 			break;
2992 		}
2993 
2994 		while (skip >= unix_skb_len(skb)) {
2995 			skip -= unix_skb_len(skb);
2996 			last = skb;
2997 			last_len = skb->len;
2998 			skb = skb_peek_next(skb, &sk->sk_receive_queue);
2999 			if (!skb)
3000 				goto again;
3001 		}
3002 
3003 		unix_state_unlock(sk);
3004 
3005 		if (check_creds) {
3006 			/* Never glue messages from different writers */
3007 			if (!unix_skb_scm_eq(skb, &scm))
3008 				break;
3009 		} else if (unix_may_passcred(sk)) {
3010 			/* Copy credentials */
3011 			unix_skb_to_scm(skb, &scm);
3012 			check_creds = true;
3013 		}
3014 
3015 		/* Copy address just once */
3016 		if (msg && msg->msg_name) {
3017 			DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr, msg->msg_name);
3018 
3019 			unix_copy_addr(msg, skb->sk);
3020 			BPF_CGROUP_RUN_PROG_UNIX_RECVMSG_LOCK(sk, msg->msg_name,
3021 							      &msg->msg_namelen);
3022 
3023 			sunaddr = NULL;
3024 		}
3025 
3026 		chunk = min_t(unsigned int, unix_skb_len(skb) - skip, size);
3027 		chunk = state->recv_actor(skb, skip, chunk, state);
3028 		if (chunk < 0) {
3029 			if (copied == 0)
3030 				copied = -EFAULT;
3031 			break;
3032 		}
3033 		copied += chunk;
3034 		size -= chunk;
3035 
3036 		/* Mark read part of skb as used */
3037 		if (!(flags & MSG_PEEK)) {
3038 			UNIXCB(skb).consumed += chunk;
3039 
3040 			sk_peek_offset_bwd(sk, chunk);
3041 
3042 			if (UNIXCB(skb).fp) {
3043 				scm_stat_del(sk, skb);
3044 				unix_detach_fds(&scm, skb);
3045 			}
3046 
3047 			if (unix_skb_len(skb))
3048 				break;
3049 
3050 			spin_lock(&sk->sk_receive_queue.lock);
3051 			WRITE_ONCE(u->inq_len, u->inq_len - skb->len);
3052 			__skb_unlink(skb, &sk->sk_receive_queue);
3053 			spin_unlock(&sk->sk_receive_queue.lock);
3054 
3055 			consume_skb(skb);
3056 
3057 			if (scm.fp)
3058 				break;
3059 		} else {
3060 			/* It is questionable, see note in unix_dgram_recvmsg.
3061 			 */
3062 			if (UNIXCB(skb).fp)
3063 				unix_peek_fds(&scm, skb);
3064 
3065 			sk_peek_offset_fwd(sk, chunk);
3066 
3067 			if (UNIXCB(skb).fp)
3068 				break;
3069 
3070 			skip = 0;
3071 			last = skb;
3072 			last_len = skb->len;
3073 			unix_state_lock(sk);
3074 			skb = skb_peek_next(skb, &sk->sk_receive_queue);
3075 			if (skb)
3076 				goto again;
3077 			unix_state_unlock(sk);
3078 			break;
3079 		}
3080 	} while (size);
3081 
3082 	mutex_unlock(&u->iolock);
3083 	if (msg) {
3084 		bool do_cmsg = READ_ONCE(u->recvmsg_inq);
3085 
3086 		scm_recv_unix(sock, msg, &scm, flags);
3087 
3088 		if ((do_cmsg | msg->msg_get_inq) && (copied ?: err) >= 0) {
3089 			msg->msg_inq = READ_ONCE(u->inq_len);
3090 			if (do_cmsg)
3091 				put_cmsg(msg, SOL_SOCKET, SCM_INQ,
3092 					 sizeof(msg->msg_inq), &msg->msg_inq);
3093 		}
3094 	} else {
3095 		scm_destroy(&scm);
3096 	}
3097 out:
3098 	return copied ? : err;
3099 }
3100 
3101 static int unix_stream_read_actor(struct sk_buff *skb,
3102 				  int skip, int chunk,
3103 				  struct unix_stream_read_state *state)
3104 {
3105 	int ret;
3106 
3107 	ret = skb_copy_datagram_msg(skb, UNIXCB(skb).consumed + skip,
3108 				    state->msg, chunk);
3109 	return ret ?: chunk;
3110 }
3111 
3112 int __unix_stream_recvmsg(struct sock *sk, struct msghdr *msg,
3113 			  size_t size, int flags)
3114 {
3115 	struct unix_stream_read_state state = {
3116 		.recv_actor = unix_stream_read_actor,
3117 		.socket = sk->sk_socket,
3118 		.msg = msg,
3119 		.size = size,
3120 		.flags = flags
3121 	};
3122 
3123 	return unix_stream_read_generic(&state, true);
3124 }
3125 
3126 static int unix_stream_recvmsg(struct socket *sock, struct msghdr *msg,
3127 			       size_t size, int flags)
3128 {
3129 	struct unix_stream_read_state state = {
3130 		.recv_actor = unix_stream_read_actor,
3131 		.socket = sock,
3132 		.msg = msg,
3133 		.size = size,
3134 		.flags = flags
3135 	};
3136 
3137 #ifdef CONFIG_BPF_SYSCALL
3138 	struct sock *sk = sock->sk;
3139 	const struct proto *prot = READ_ONCE(sk->sk_prot);
3140 
3141 	if (prot != &unix_stream_proto)
3142 		return prot->recvmsg(sk, msg, size, flags, NULL);
3143 #endif
3144 	return unix_stream_read_generic(&state, true);
3145 }
3146 
3147 static int unix_stream_splice_actor(struct sk_buff *skb,
3148 				    int skip, int chunk,
3149 				    struct unix_stream_read_state *state)
3150 {
3151 	return skb_splice_bits(skb, state->socket->sk,
3152 			       UNIXCB(skb).consumed + skip,
3153 			       state->pipe, chunk, state->splice_flags);
3154 }
3155 
3156 static ssize_t unix_stream_splice_read(struct socket *sock,  loff_t *ppos,
3157 				       struct pipe_inode_info *pipe,
3158 				       size_t size, unsigned int flags)
3159 {
3160 	struct unix_stream_read_state state = {
3161 		.recv_actor = unix_stream_splice_actor,
3162 		.socket = sock,
3163 		.pipe = pipe,
3164 		.size = size,
3165 		.splice_flags = flags,
3166 	};
3167 
3168 	if (unlikely(*ppos))
3169 		return -ESPIPE;
3170 
3171 	if (sock->file->f_flags & O_NONBLOCK ||
3172 	    flags & SPLICE_F_NONBLOCK)
3173 		state.flags = MSG_DONTWAIT;
3174 
3175 	return unix_stream_read_generic(&state, false);
3176 }
3177 
3178 static int unix_shutdown(struct socket *sock, int mode)
3179 {
3180 	struct sock *sk = sock->sk;
3181 	struct sock *other;
3182 
3183 	if (mode < SHUT_RD || mode > SHUT_RDWR)
3184 		return -EINVAL;
3185 	/* This maps:
3186 	 * SHUT_RD   (0) -> RCV_SHUTDOWN  (1)
3187 	 * SHUT_WR   (1) -> SEND_SHUTDOWN (2)
3188 	 * SHUT_RDWR (2) -> SHUTDOWN_MASK (3)
3189 	 */
3190 	++mode;
3191 
3192 	unix_state_lock(sk);
3193 	WRITE_ONCE(sk->sk_shutdown, sk->sk_shutdown | mode);
3194 	other = unix_peer(sk);
3195 	if (other)
3196 		sock_hold(other);
3197 	unix_state_unlock(sk);
3198 	sk->sk_state_change(sk);
3199 
3200 	if (other &&
3201 		(sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET)) {
3202 
3203 		int peer_mode = 0;
3204 		const struct proto *prot = READ_ONCE(other->sk_prot);
3205 
3206 		if (prot->unhash)
3207 			prot->unhash(other);
3208 		if (mode&RCV_SHUTDOWN)
3209 			peer_mode |= SEND_SHUTDOWN;
3210 		if (mode&SEND_SHUTDOWN)
3211 			peer_mode |= RCV_SHUTDOWN;
3212 		unix_state_lock(other);
3213 		WRITE_ONCE(other->sk_shutdown, other->sk_shutdown | peer_mode);
3214 		unix_state_unlock(other);
3215 		other->sk_state_change(other);
3216 		if (peer_mode == SHUTDOWN_MASK)
3217 			sk_wake_async(other, SOCK_WAKE_WAITD, POLL_HUP);
3218 		else if (peer_mode & RCV_SHUTDOWN)
3219 			sk_wake_async(other, SOCK_WAKE_WAITD, POLL_IN);
3220 	}
3221 	if (other)
3222 		sock_put(other);
3223 
3224 	return 0;
3225 }
3226 
3227 long unix_inq_len(struct sock *sk)
3228 {
3229 	struct sk_buff *skb;
3230 	long amount = 0;
3231 
3232 	if (READ_ONCE(sk->sk_state) == TCP_LISTEN)
3233 		return -EINVAL;
3234 
3235 	if (sk->sk_type == SOCK_STREAM)
3236 		return READ_ONCE(unix_sk(sk)->inq_len);
3237 
3238 	spin_lock(&sk->sk_receive_queue.lock);
3239 	if (sk->sk_type == SOCK_SEQPACKET) {
3240 		skb_queue_walk(&sk->sk_receive_queue, skb)
3241 			amount += unix_skb_len(skb);
3242 	} else {
3243 		skb = skb_peek(&sk->sk_receive_queue);
3244 		if (skb)
3245 			amount = skb->len;
3246 	}
3247 	spin_unlock(&sk->sk_receive_queue.lock);
3248 
3249 	return amount;
3250 }
3251 EXPORT_SYMBOL_GPL(unix_inq_len);
3252 
3253 long unix_outq_len(struct sock *sk)
3254 {
3255 	return sk_wmem_alloc_get(sk);
3256 }
3257 EXPORT_SYMBOL_GPL(unix_outq_len);
3258 
3259 static int unix_open_file(struct sock *sk)
3260 {
3261 	if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
3262 		return -EPERM;
3263 
3264 	if (!smp_load_acquire(&unix_sk(sk)->addr))
3265 		return -ENOENT;
3266 
3267 	if (!unix_sk(sk)->path.dentry)
3268 		return -ENOENT;
3269 
3270 	return FD_ADD(O_CLOEXEC, dentry_open(&unix_sk(sk)->path, O_PATH, current_cred()));
3271 }
3272 
3273 static int unix_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
3274 {
3275 	struct sock *sk = sock->sk;
3276 	long amount = 0;
3277 	int err;
3278 
3279 	switch (cmd) {
3280 	case SIOCOUTQ:
3281 		amount = unix_outq_len(sk);
3282 		err = put_user(amount, (int __user *)arg);
3283 		break;
3284 	case SIOCINQ:
3285 		amount = unix_inq_len(sk);
3286 		if (amount < 0)
3287 			err = amount;
3288 		else
3289 			err = put_user(amount, (int __user *)arg);
3290 		break;
3291 	case SIOCUNIXFILE:
3292 		err = unix_open_file(sk);
3293 		break;
3294 #if IS_ENABLED(CONFIG_AF_UNIX_OOB)
3295 	case SIOCATMARK:
3296 		{
3297 			struct unix_sock *u = unix_sk(sk);
3298 			struct sk_buff *skb;
3299 			int answ = 0;
3300 
3301 			mutex_lock(&u->iolock);
3302 
3303 			skb = skb_peek(&sk->sk_receive_queue);
3304 			if (skb) {
3305 				struct sk_buff *oob_skb = READ_ONCE(u->oob_skb);
3306 				struct sk_buff *next_skb;
3307 
3308 				next_skb = skb_peek_next(skb, &sk->sk_receive_queue);
3309 
3310 				if (skb == oob_skb ||
3311 				    (!unix_skb_len(skb) &&
3312 				     (!oob_skb || next_skb == oob_skb)))
3313 					answ = 1;
3314 			}
3315 
3316 			mutex_unlock(&u->iolock);
3317 
3318 			err = put_user(answ, (int __user *)arg);
3319 		}
3320 		break;
3321 #endif
3322 	default:
3323 		err = -ENOIOCTLCMD;
3324 		break;
3325 	}
3326 	return err;
3327 }
3328 
3329 #ifdef CONFIG_COMPAT
3330 static int unix_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
3331 {
3332 	return unix_ioctl(sock, cmd, (unsigned long)compat_ptr(arg));
3333 }
3334 #endif
3335 
3336 static __poll_t unix_poll(struct file *file, struct socket *sock, poll_table *wait)
3337 {
3338 	struct sock *sk = sock->sk;
3339 	unsigned char state;
3340 	__poll_t mask;
3341 	u8 shutdown;
3342 
3343 	sock_poll_wait(file, sock, wait);
3344 	mask = 0;
3345 	shutdown = READ_ONCE(sk->sk_shutdown);
3346 	state = READ_ONCE(sk->sk_state);
3347 
3348 	/* exceptional events? */
3349 	if (READ_ONCE(sk->sk_err))
3350 		mask |= EPOLLERR;
3351 	if (shutdown == SHUTDOWN_MASK)
3352 		mask |= EPOLLHUP;
3353 	if (shutdown & RCV_SHUTDOWN)
3354 		mask |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM;
3355 
3356 	/* readable? */
3357 	if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
3358 		mask |= EPOLLIN | EPOLLRDNORM;
3359 	if (sk_is_readable(sk))
3360 		mask |= EPOLLIN | EPOLLRDNORM;
3361 #if IS_ENABLED(CONFIG_AF_UNIX_OOB)
3362 	if (READ_ONCE(unix_sk(sk)->oob_skb))
3363 		mask |= EPOLLPRI;
3364 #endif
3365 
3366 	/* Connection-based need to check for termination and startup */
3367 	if ((sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) &&
3368 	    state == TCP_CLOSE)
3369 		mask |= EPOLLHUP;
3370 
3371 	/*
3372 	 * we set writable also when the other side has shut down the
3373 	 * connection. This prevents stuck sockets.
3374 	 */
3375 	if (unix_writable(sk, state))
3376 		mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND;
3377 
3378 	return mask;
3379 }
3380 
3381 static __poll_t unix_dgram_poll(struct file *file, struct socket *sock,
3382 				    poll_table *wait)
3383 {
3384 	struct sock *sk = sock->sk, *other;
3385 	unsigned int writable;
3386 	unsigned char state;
3387 	__poll_t mask;
3388 	u8 shutdown;
3389 
3390 	sock_poll_wait(file, sock, wait);
3391 	mask = 0;
3392 	shutdown = READ_ONCE(sk->sk_shutdown);
3393 	state = READ_ONCE(sk->sk_state);
3394 
3395 	/* exceptional events? */
3396 	if (READ_ONCE(sk->sk_err) ||
3397 	    !skb_queue_empty_lockless(&sk->sk_error_queue))
3398 		mask |= EPOLLERR |
3399 			(sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? EPOLLPRI : 0);
3400 
3401 	if (shutdown & RCV_SHUTDOWN)
3402 		mask |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM;
3403 	if (shutdown == SHUTDOWN_MASK)
3404 		mask |= EPOLLHUP;
3405 
3406 	/* readable? */
3407 	if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
3408 		mask |= EPOLLIN | EPOLLRDNORM;
3409 	if (sk_is_readable(sk))
3410 		mask |= EPOLLIN | EPOLLRDNORM;
3411 
3412 	/* Connection-based need to check for termination and startup */
3413 	if (sk->sk_type == SOCK_SEQPACKET && state == TCP_CLOSE)
3414 		mask |= EPOLLHUP;
3415 
3416 	/* No write status requested, avoid expensive OUT tests. */
3417 	if (!(poll_requested_events(wait) & (EPOLLWRBAND|EPOLLWRNORM|EPOLLOUT)))
3418 		return mask;
3419 
3420 	writable = unix_writable(sk, state);
3421 	if (writable) {
3422 		unix_state_lock(sk);
3423 
3424 		other = unix_peer(sk);
3425 		if (other && unix_peer(other) != sk &&
3426 		    unix_recvq_full_lockless(other) &&
3427 		    unix_dgram_peer_wake_me(sk, other))
3428 			writable = 0;
3429 
3430 		unix_state_unlock(sk);
3431 	}
3432 
3433 	if (writable)
3434 		mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND;
3435 	else
3436 		sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
3437 
3438 	return mask;
3439 }
3440 
3441 #ifdef CONFIG_PROC_FS
3442 
3443 #define BUCKET_SPACE (BITS_PER_LONG - (UNIX_HASH_BITS + 1) - 1)
3444 
3445 #define get_bucket(x) ((x) >> BUCKET_SPACE)
3446 #define get_offset(x) ((x) & ((1UL << BUCKET_SPACE) - 1))
3447 #define set_bucket_offset(b, o) ((b) << BUCKET_SPACE | (o))
3448 
3449 static struct sock *unix_from_bucket(struct seq_file *seq, loff_t *pos)
3450 {
3451 	unsigned long offset = get_offset(*pos);
3452 	unsigned long bucket = get_bucket(*pos);
3453 	unsigned long count = 0;
3454 	struct sock *sk;
3455 
3456 	for (sk = sk_head(&seq_file_net(seq)->unx.table.buckets[bucket]);
3457 	     sk; sk = sk_next(sk)) {
3458 		if (++count == offset)
3459 			break;
3460 	}
3461 
3462 	return sk;
3463 }
3464 
3465 static struct sock *unix_get_first(struct seq_file *seq, loff_t *pos)
3466 {
3467 	unsigned long bucket = get_bucket(*pos);
3468 	struct net *net = seq_file_net(seq);
3469 	struct sock *sk;
3470 
3471 	while (bucket < UNIX_HASH_SIZE) {
3472 		spin_lock(&net->unx.table.locks[bucket]);
3473 
3474 		sk = unix_from_bucket(seq, pos);
3475 		if (sk)
3476 			return sk;
3477 
3478 		spin_unlock(&net->unx.table.locks[bucket]);
3479 
3480 		*pos = set_bucket_offset(++bucket, 1);
3481 	}
3482 
3483 	return NULL;
3484 }
3485 
3486 static struct sock *unix_get_next(struct seq_file *seq, struct sock *sk,
3487 				  loff_t *pos)
3488 {
3489 	unsigned long bucket = get_bucket(*pos);
3490 
3491 	sk = sk_next(sk);
3492 	if (sk)
3493 		return sk;
3494 
3495 
3496 	spin_unlock(&seq_file_net(seq)->unx.table.locks[bucket]);
3497 
3498 	*pos = set_bucket_offset(++bucket, 1);
3499 
3500 	return unix_get_first(seq, pos);
3501 }
3502 
3503 static void *unix_seq_start(struct seq_file *seq, loff_t *pos)
3504 {
3505 	if (!*pos)
3506 		return SEQ_START_TOKEN;
3507 
3508 	return unix_get_first(seq, pos);
3509 }
3510 
3511 static void *unix_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3512 {
3513 	++*pos;
3514 
3515 	if (v == SEQ_START_TOKEN)
3516 		return unix_get_first(seq, pos);
3517 
3518 	return unix_get_next(seq, v, pos);
3519 }
3520 
3521 static void unix_seq_stop(struct seq_file *seq, void *v)
3522 {
3523 	struct sock *sk = v;
3524 
3525 	if (sk)
3526 		spin_unlock(&seq_file_net(seq)->unx.table.locks[sk->sk_hash]);
3527 }
3528 
3529 static int unix_seq_show(struct seq_file *seq, void *v)
3530 {
3531 
3532 	if (v == SEQ_START_TOKEN)
3533 		seq_puts(seq, "Num       RefCount Protocol Flags    Type St "
3534 			 "Inode Path\n");
3535 	else {
3536 		struct sock *s = v;
3537 		struct unix_sock *u = unix_sk(s);
3538 		unix_state_lock(s);
3539 
3540 		seq_printf(seq, "%pK: %08X %08X %08X %04X %02X %5lu",
3541 			s,
3542 			refcount_read(&s->sk_refcnt),
3543 			0,
3544 			s->sk_state == TCP_LISTEN ? __SO_ACCEPTCON : 0,
3545 			s->sk_type,
3546 			s->sk_socket ?
3547 			(s->sk_state == TCP_ESTABLISHED ? SS_CONNECTED : SS_UNCONNECTED) :
3548 			(s->sk_state == TCP_ESTABLISHED ? SS_CONNECTING : SS_DISCONNECTING),
3549 			sock_i_ino(s));
3550 
3551 		if (u->addr) {	// under a hash table lock here
3552 			int i, len;
3553 			seq_putc(seq, ' ');
3554 
3555 			i = 0;
3556 			len = u->addr->len -
3557 				offsetof(struct sockaddr_un, sun_path);
3558 			if (u->addr->name->sun_path[0]) {
3559 				len--;
3560 			} else {
3561 				seq_putc(seq, '@');
3562 				i++;
3563 			}
3564 			for ( ; i < len; i++)
3565 				seq_putc(seq, u->addr->name->sun_path[i] ?:
3566 					 '@');
3567 		}
3568 		unix_state_unlock(s);
3569 		seq_putc(seq, '\n');
3570 	}
3571 
3572 	return 0;
3573 }
3574 
3575 static const struct seq_operations unix_seq_ops = {
3576 	.start  = unix_seq_start,
3577 	.next   = unix_seq_next,
3578 	.stop   = unix_seq_stop,
3579 	.show   = unix_seq_show,
3580 };
3581 
3582 #ifdef CONFIG_BPF_SYSCALL
3583 struct bpf_unix_iter_state {
3584 	struct seq_net_private p;
3585 	unsigned int cur_sk;
3586 	unsigned int end_sk;
3587 	unsigned int max_sk;
3588 	struct sock **batch;
3589 	bool st_bucket_done;
3590 };
3591 
3592 struct bpf_iter__unix {
3593 	__bpf_md_ptr(struct bpf_iter_meta *, meta);
3594 	__bpf_md_ptr(struct unix_sock *, unix_sk);
3595 	uid_t uid __aligned(8);
3596 };
3597 
3598 static int unix_prog_seq_show(struct bpf_prog *prog, struct bpf_iter_meta *meta,
3599 			      struct unix_sock *unix_sk, uid_t uid)
3600 {
3601 	struct bpf_iter__unix ctx;
3602 
3603 	meta->seq_num--;  /* skip SEQ_START_TOKEN */
3604 	ctx.meta = meta;
3605 	ctx.unix_sk = unix_sk;
3606 	ctx.uid = uid;
3607 	return bpf_iter_run_prog(prog, &ctx);
3608 }
3609 
3610 static int bpf_iter_unix_hold_batch(struct seq_file *seq, struct sock *start_sk)
3611 
3612 {
3613 	struct bpf_unix_iter_state *iter = seq->private;
3614 	unsigned int expected = 1;
3615 	struct sock *sk;
3616 
3617 	sock_hold(start_sk);
3618 	iter->batch[iter->end_sk++] = start_sk;
3619 
3620 	for (sk = sk_next(start_sk); sk; sk = sk_next(sk)) {
3621 		if (iter->end_sk < iter->max_sk) {
3622 			sock_hold(sk);
3623 			iter->batch[iter->end_sk++] = sk;
3624 		}
3625 
3626 		expected++;
3627 	}
3628 
3629 	spin_unlock(&seq_file_net(seq)->unx.table.locks[start_sk->sk_hash]);
3630 
3631 	return expected;
3632 }
3633 
3634 static void bpf_iter_unix_put_batch(struct bpf_unix_iter_state *iter)
3635 {
3636 	while (iter->cur_sk < iter->end_sk)
3637 		sock_put(iter->batch[iter->cur_sk++]);
3638 }
3639 
3640 static int bpf_iter_unix_realloc_batch(struct bpf_unix_iter_state *iter,
3641 				       unsigned int new_batch_sz)
3642 {
3643 	struct sock **new_batch;
3644 
3645 	new_batch = kvmalloc(sizeof(*new_batch) * new_batch_sz,
3646 			     GFP_USER | __GFP_NOWARN);
3647 	if (!new_batch)
3648 		return -ENOMEM;
3649 
3650 	bpf_iter_unix_put_batch(iter);
3651 	kvfree(iter->batch);
3652 	iter->batch = new_batch;
3653 	iter->max_sk = new_batch_sz;
3654 
3655 	return 0;
3656 }
3657 
3658 static struct sock *bpf_iter_unix_batch(struct seq_file *seq,
3659 					loff_t *pos)
3660 {
3661 	struct bpf_unix_iter_state *iter = seq->private;
3662 	unsigned int expected;
3663 	bool resized = false;
3664 	struct sock *sk;
3665 
3666 	if (iter->st_bucket_done)
3667 		*pos = set_bucket_offset(get_bucket(*pos) + 1, 1);
3668 
3669 again:
3670 	/* Get a new batch */
3671 	iter->cur_sk = 0;
3672 	iter->end_sk = 0;
3673 
3674 	sk = unix_get_first(seq, pos);
3675 	if (!sk)
3676 		return NULL; /* Done */
3677 
3678 	expected = bpf_iter_unix_hold_batch(seq, sk);
3679 
3680 	if (iter->end_sk == expected) {
3681 		iter->st_bucket_done = true;
3682 		return sk;
3683 	}
3684 
3685 	if (!resized && !bpf_iter_unix_realloc_batch(iter, expected * 3 / 2)) {
3686 		resized = true;
3687 		goto again;
3688 	}
3689 
3690 	return sk;
3691 }
3692 
3693 static void *bpf_iter_unix_seq_start(struct seq_file *seq, loff_t *pos)
3694 {
3695 	if (!*pos)
3696 		return SEQ_START_TOKEN;
3697 
3698 	/* bpf iter does not support lseek, so it always
3699 	 * continue from where it was stop()-ped.
3700 	 */
3701 	return bpf_iter_unix_batch(seq, pos);
3702 }
3703 
3704 static void *bpf_iter_unix_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3705 {
3706 	struct bpf_unix_iter_state *iter = seq->private;
3707 	struct sock *sk;
3708 
3709 	/* Whenever seq_next() is called, the iter->cur_sk is
3710 	 * done with seq_show(), so advance to the next sk in
3711 	 * the batch.
3712 	 */
3713 	if (iter->cur_sk < iter->end_sk)
3714 		sock_put(iter->batch[iter->cur_sk++]);
3715 
3716 	++*pos;
3717 
3718 	if (iter->cur_sk < iter->end_sk)
3719 		sk = iter->batch[iter->cur_sk];
3720 	else
3721 		sk = bpf_iter_unix_batch(seq, pos);
3722 
3723 	return sk;
3724 }
3725 
3726 static int bpf_iter_unix_seq_show(struct seq_file *seq, void *v)
3727 {
3728 	struct bpf_iter_meta meta;
3729 	struct bpf_prog *prog;
3730 	struct sock *sk = v;
3731 	uid_t uid;
3732 	bool slow;
3733 	int ret;
3734 
3735 	if (v == SEQ_START_TOKEN)
3736 		return 0;
3737 
3738 	slow = lock_sock_fast(sk);
3739 
3740 	if (unlikely(sk_unhashed(sk))) {
3741 		ret = SEQ_SKIP;
3742 		goto unlock;
3743 	}
3744 
3745 	uid = from_kuid_munged(seq_user_ns(seq), sk_uid(sk));
3746 	meta.seq = seq;
3747 	prog = bpf_iter_get_info(&meta, false);
3748 	ret = unix_prog_seq_show(prog, &meta, v, uid);
3749 unlock:
3750 	unlock_sock_fast(sk, slow);
3751 	return ret;
3752 }
3753 
3754 static void bpf_iter_unix_seq_stop(struct seq_file *seq, void *v)
3755 {
3756 	struct bpf_unix_iter_state *iter = seq->private;
3757 	struct bpf_iter_meta meta;
3758 	struct bpf_prog *prog;
3759 
3760 	if (!v) {
3761 		meta.seq = seq;
3762 		prog = bpf_iter_get_info(&meta, true);
3763 		if (prog)
3764 			(void)unix_prog_seq_show(prog, &meta, v, 0);
3765 	}
3766 
3767 	if (iter->cur_sk < iter->end_sk)
3768 		bpf_iter_unix_put_batch(iter);
3769 }
3770 
3771 static const struct seq_operations bpf_iter_unix_seq_ops = {
3772 	.start	= bpf_iter_unix_seq_start,
3773 	.next	= bpf_iter_unix_seq_next,
3774 	.stop	= bpf_iter_unix_seq_stop,
3775 	.show	= bpf_iter_unix_seq_show,
3776 };
3777 #endif
3778 #endif
3779 
3780 static const struct net_proto_family unix_family_ops = {
3781 	.family = PF_UNIX,
3782 	.create = unix_create,
3783 	.owner	= THIS_MODULE,
3784 };
3785 
3786 
3787 static int __net_init unix_net_init(struct net *net)
3788 {
3789 	int i;
3790 
3791 	net->unx.sysctl_max_dgram_qlen = 10;
3792 	if (unix_sysctl_register(net))
3793 		goto out;
3794 
3795 #ifdef CONFIG_PROC_FS
3796 	if (!proc_create_net("unix", 0, net->proc_net, &unix_seq_ops,
3797 			     sizeof(struct seq_net_private)))
3798 		goto err_sysctl;
3799 #endif
3800 
3801 	net->unx.table.locks = kvmalloc_array(UNIX_HASH_SIZE,
3802 					      sizeof(spinlock_t), GFP_KERNEL);
3803 	if (!net->unx.table.locks)
3804 		goto err_proc;
3805 
3806 	net->unx.table.buckets = kvmalloc_array(UNIX_HASH_SIZE,
3807 						sizeof(struct hlist_head),
3808 						GFP_KERNEL);
3809 	if (!net->unx.table.buckets)
3810 		goto free_locks;
3811 
3812 	for (i = 0; i < UNIX_HASH_SIZE; i++) {
3813 		spin_lock_init(&net->unx.table.locks[i]);
3814 		lock_set_cmp_fn(&net->unx.table.locks[i], unix_table_lock_cmp_fn, NULL);
3815 		INIT_HLIST_HEAD(&net->unx.table.buckets[i]);
3816 	}
3817 
3818 	return 0;
3819 
3820 free_locks:
3821 	kvfree(net->unx.table.locks);
3822 err_proc:
3823 #ifdef CONFIG_PROC_FS
3824 	remove_proc_entry("unix", net->proc_net);
3825 err_sysctl:
3826 #endif
3827 	unix_sysctl_unregister(net);
3828 out:
3829 	return -ENOMEM;
3830 }
3831 
3832 static void __net_exit unix_net_exit(struct net *net)
3833 {
3834 	kvfree(net->unx.table.buckets);
3835 	kvfree(net->unx.table.locks);
3836 	unix_sysctl_unregister(net);
3837 	remove_proc_entry("unix", net->proc_net);
3838 }
3839 
3840 static struct pernet_operations unix_net_ops = {
3841 	.init = unix_net_init,
3842 	.exit = unix_net_exit,
3843 };
3844 
3845 #if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS)
3846 DEFINE_BPF_ITER_FUNC(unix, struct bpf_iter_meta *meta,
3847 		     struct unix_sock *unix_sk, uid_t uid)
3848 
3849 #define INIT_BATCH_SZ 16
3850 
3851 static int bpf_iter_init_unix(void *priv_data, struct bpf_iter_aux_info *aux)
3852 {
3853 	struct bpf_unix_iter_state *iter = priv_data;
3854 	int err;
3855 
3856 	err = bpf_iter_init_seq_net(priv_data, aux);
3857 	if (err)
3858 		return err;
3859 
3860 	err = bpf_iter_unix_realloc_batch(iter, INIT_BATCH_SZ);
3861 	if (err) {
3862 		bpf_iter_fini_seq_net(priv_data);
3863 		return err;
3864 	}
3865 
3866 	return 0;
3867 }
3868 
3869 static void bpf_iter_fini_unix(void *priv_data)
3870 {
3871 	struct bpf_unix_iter_state *iter = priv_data;
3872 
3873 	bpf_iter_fini_seq_net(priv_data);
3874 	kvfree(iter->batch);
3875 }
3876 
3877 static const struct bpf_iter_seq_info unix_seq_info = {
3878 	.seq_ops		= &bpf_iter_unix_seq_ops,
3879 	.init_seq_private	= bpf_iter_init_unix,
3880 	.fini_seq_private	= bpf_iter_fini_unix,
3881 	.seq_priv_size		= sizeof(struct bpf_unix_iter_state),
3882 };
3883 
3884 static const struct bpf_func_proto *
3885 bpf_iter_unix_get_func_proto(enum bpf_func_id func_id,
3886 			     const struct bpf_prog *prog)
3887 {
3888 	switch (func_id) {
3889 	case BPF_FUNC_setsockopt:
3890 		return &bpf_sk_setsockopt_proto;
3891 	case BPF_FUNC_getsockopt:
3892 		return &bpf_sk_getsockopt_proto;
3893 	default:
3894 		return NULL;
3895 	}
3896 }
3897 
3898 static struct bpf_iter_reg unix_reg_info = {
3899 	.target			= "unix",
3900 	.ctx_arg_info_size	= 1,
3901 	.ctx_arg_info		= {
3902 		{ offsetof(struct bpf_iter__unix, unix_sk),
3903 		  PTR_TO_BTF_ID_OR_NULL },
3904 	},
3905 	.get_func_proto         = bpf_iter_unix_get_func_proto,
3906 	.seq_info		= &unix_seq_info,
3907 };
3908 
3909 static void __init bpf_iter_register(void)
3910 {
3911 	unix_reg_info.ctx_arg_info[0].btf_id = btf_sock_ids[BTF_SOCK_TYPE_UNIX];
3912 	if (bpf_iter_reg_target(&unix_reg_info))
3913 		pr_warn("Warning: could not register bpf iterator unix\n");
3914 }
3915 #endif
3916 
3917 static int __init af_unix_init(void)
3918 {
3919 	int i, rc = -1;
3920 
3921 	BUILD_BUG_ON(sizeof(struct unix_skb_parms) > sizeof_field(struct sk_buff, cb));
3922 
3923 	for (i = 0; i < UNIX_HASH_SIZE / 2; i++) {
3924 		spin_lock_init(&bsd_socket_locks[i]);
3925 		INIT_HLIST_HEAD(&bsd_socket_buckets[i]);
3926 	}
3927 
3928 	rc = proto_register(&unix_dgram_proto, 1);
3929 	if (rc != 0) {
3930 		pr_crit("%s: Cannot create unix_sock SLAB cache!\n", __func__);
3931 		goto out;
3932 	}
3933 
3934 	rc = proto_register(&unix_stream_proto, 1);
3935 	if (rc != 0) {
3936 		pr_crit("%s: Cannot create unix_sock SLAB cache!\n", __func__);
3937 		proto_unregister(&unix_dgram_proto);
3938 		goto out;
3939 	}
3940 
3941 	sock_register(&unix_family_ops);
3942 	register_pernet_subsys(&unix_net_ops);
3943 	unix_bpf_build_proto();
3944 
3945 #if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS)
3946 	bpf_iter_register();
3947 #endif
3948 
3949 out:
3950 	return rc;
3951 }
3952 
3953 /* Later than subsys_initcall() because we depend on stuff initialised there */
3954 fs_initcall(af_unix_init);
3955