xref: /linux/net/unix/af_unix.c (revision c4ee0af3fa0dc65f690fc908f02b8355f9576ea0)
1 /*
2  * NET4:	Implementation of BSD Unix domain sockets.
3  *
4  * Authors:	Alan Cox, <alan@lxorguk.ukuu.org.uk>
5  *
6  *		This program is free software; you can redistribute it and/or
7  *		modify it under the terms of the GNU General Public License
8  *		as published by the Free Software Foundation; either version
9  *		2 of the License, or (at your option) any later version.
10  *
11  * Fixes:
12  *		Linus Torvalds	:	Assorted bug cures.
13  *		Niibe Yutaka	:	async I/O support.
14  *		Carsten Paeth	:	PF_UNIX check, address fixes.
15  *		Alan Cox	:	Limit size of allocated blocks.
16  *		Alan Cox	:	Fixed the stupid socketpair bug.
17  *		Alan Cox	:	BSD compatibility fine tuning.
18  *		Alan Cox	:	Fixed a bug in connect when interrupted.
19  *		Alan Cox	:	Sorted out a proper draft version of
20  *					file descriptor passing hacked up from
21  *					Mike Shaver's work.
22  *		Marty Leisner	:	Fixes to fd passing
23  *		Nick Nevin	:	recvmsg bugfix.
24  *		Alan Cox	:	Started proper garbage collector
25  *		Heiko EiBfeldt	:	Missing verify_area check
26  *		Alan Cox	:	Started POSIXisms
27  *		Andreas Schwab	:	Replace inode by dentry for proper
28  *					reference counting
29  *		Kirk Petersen	:	Made this a module
30  *	    Christoph Rohland	:	Elegant non-blocking accept/connect algorithm.
31  *					Lots of bug fixes.
32  *	     Alexey Kuznetosv	:	Repaired (I hope) bugs introduces
33  *					by above two patches.
34  *	     Andrea Arcangeli	:	If possible we block in connect(2)
35  *					if the max backlog of the listen socket
36  *					is been reached. This won't break
37  *					old apps and it will avoid huge amount
38  *					of socks hashed (this for unix_gc()
39  *					performances reasons).
40  *					Security fix that limits the max
41  *					number of socks to 2*max_files and
42  *					the number of skb queueable in the
43  *					dgram receiver.
44  *		Artur Skawina   :	Hash function optimizations
45  *	     Alexey Kuznetsov   :	Full scale SMP. Lot of bugs are introduced 8)
46  *	      Malcolm Beattie   :	Set peercred for socketpair
47  *	     Michal Ostrowski   :       Module initialization cleanup.
48  *	     Arnaldo C. Melo	:	Remove MOD_{INC,DEC}_USE_COUNT,
49  *	     				the core infrastructure is doing that
50  *	     				for all net proto families now (2.5.69+)
51  *
52  *
53  * Known differences from reference BSD that was tested:
54  *
55  *	[TO FIX]
56  *	ECONNREFUSED is not returned from one end of a connected() socket to the
57  *		other the moment one end closes.
58  *	fstat() doesn't return st_dev=0, and give the blksize as high water mark
59  *		and a fake inode identifier (nor the BSD first socket fstat twice bug).
60  *	[NOT TO FIX]
61  *	accept() returns a path name even if the connecting socket has closed
62  *		in the meantime (BSD loses the path and gives up).
63  *	accept() returns 0 length path for an unbound connector. BSD returns 16
64  *		and a null first byte in the path (but not for gethost/peername - BSD bug ??)
65  *	socketpair(...SOCK_RAW..) doesn't panic the kernel.
66  *	BSD af_unix apparently has connect forgetting to block properly.
67  *		(need to check this with the POSIX spec in detail)
68  *
69  * Differences from 2.0.0-11-... (ANK)
70  *	Bug fixes and improvements.
71  *		- client shutdown killed server socket.
72  *		- removed all useless cli/sti pairs.
73  *
74  *	Semantic changes/extensions.
75  *		- generic control message passing.
76  *		- SCM_CREDENTIALS control message.
77  *		- "Abstract" (not FS based) socket bindings.
78  *		  Abstract names are sequences of bytes (not zero terminated)
79  *		  started by 0, so that this name space does not intersect
80  *		  with BSD names.
81  */
82 
83 #include <linux/module.h>
84 #include <linux/kernel.h>
85 #include <linux/signal.h>
86 #include <linux/sched.h>
87 #include <linux/errno.h>
88 #include <linux/string.h>
89 #include <linux/stat.h>
90 #include <linux/dcache.h>
91 #include <linux/namei.h>
92 #include <linux/socket.h>
93 #include <linux/un.h>
94 #include <linux/fcntl.h>
95 #include <linux/termios.h>
96 #include <linux/sockios.h>
97 #include <linux/net.h>
98 #include <linux/in.h>
99 #include <linux/fs.h>
100 #include <linux/slab.h>
101 #include <asm/uaccess.h>
102 #include <linux/skbuff.h>
103 #include <linux/netdevice.h>
104 #include <net/net_namespace.h>
105 #include <net/sock.h>
106 #include <net/tcp_states.h>
107 #include <net/af_unix.h>
108 #include <linux/proc_fs.h>
109 #include <linux/seq_file.h>
110 #include <net/scm.h>
111 #include <linux/init.h>
112 #include <linux/poll.h>
113 #include <linux/rtnetlink.h>
114 #include <linux/mount.h>
115 #include <net/checksum.h>
116 #include <linux/security.h>
117 #include <linux/freezer.h>
118 
119 struct hlist_head unix_socket_table[2 * UNIX_HASH_SIZE];
120 EXPORT_SYMBOL_GPL(unix_socket_table);
121 DEFINE_SPINLOCK(unix_table_lock);
122 EXPORT_SYMBOL_GPL(unix_table_lock);
123 static atomic_long_t unix_nr_socks;
124 
125 
126 static struct hlist_head *unix_sockets_unbound(void *addr)
127 {
128 	unsigned long hash = (unsigned long)addr;
129 
130 	hash ^= hash >> 16;
131 	hash ^= hash >> 8;
132 	hash %= UNIX_HASH_SIZE;
133 	return &unix_socket_table[UNIX_HASH_SIZE + hash];
134 }
135 
136 #define UNIX_ABSTRACT(sk)	(unix_sk(sk)->addr->hash < UNIX_HASH_SIZE)
137 
138 #ifdef CONFIG_SECURITY_NETWORK
139 static void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb)
140 {
141 	memcpy(UNIXSID(skb), &scm->secid, sizeof(u32));
142 }
143 
144 static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
145 {
146 	scm->secid = *UNIXSID(skb);
147 }
148 #else
149 static inline void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb)
150 { }
151 
152 static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
153 { }
154 #endif /* CONFIG_SECURITY_NETWORK */
155 
156 /*
157  *  SMP locking strategy:
158  *    hash table is protected with spinlock unix_table_lock
159  *    each socket state is protected by separate spin lock.
160  */
161 
162 static inline unsigned int unix_hash_fold(__wsum n)
163 {
164 	unsigned int hash = (__force unsigned int)n;
165 
166 	hash ^= hash>>16;
167 	hash ^= hash>>8;
168 	return hash&(UNIX_HASH_SIZE-1);
169 }
170 
171 #define unix_peer(sk) (unix_sk(sk)->peer)
172 
173 static inline int unix_our_peer(struct sock *sk, struct sock *osk)
174 {
175 	return unix_peer(osk) == sk;
176 }
177 
178 static inline int unix_may_send(struct sock *sk, struct sock *osk)
179 {
180 	return unix_peer(osk) == NULL || unix_our_peer(sk, osk);
181 }
182 
183 static inline int unix_recvq_full(struct sock const *sk)
184 {
185 	return skb_queue_len(&sk->sk_receive_queue) > sk->sk_max_ack_backlog;
186 }
187 
188 struct sock *unix_peer_get(struct sock *s)
189 {
190 	struct sock *peer;
191 
192 	unix_state_lock(s);
193 	peer = unix_peer(s);
194 	if (peer)
195 		sock_hold(peer);
196 	unix_state_unlock(s);
197 	return peer;
198 }
199 EXPORT_SYMBOL_GPL(unix_peer_get);
200 
201 static inline void unix_release_addr(struct unix_address *addr)
202 {
203 	if (atomic_dec_and_test(&addr->refcnt))
204 		kfree(addr);
205 }
206 
207 /*
208  *	Check unix socket name:
209  *		- should be not zero length.
210  *	        - if started by not zero, should be NULL terminated (FS object)
211  *		- if started by zero, it is abstract name.
212  */
213 
214 static int unix_mkname(struct sockaddr_un *sunaddr, int len, unsigned int *hashp)
215 {
216 	if (len <= sizeof(short) || len > sizeof(*sunaddr))
217 		return -EINVAL;
218 	if (!sunaddr || sunaddr->sun_family != AF_UNIX)
219 		return -EINVAL;
220 	if (sunaddr->sun_path[0]) {
221 		/*
222 		 * This may look like an off by one error but it is a bit more
223 		 * subtle. 108 is the longest valid AF_UNIX path for a binding.
224 		 * sun_path[108] doesn't as such exist.  However in kernel space
225 		 * we are guaranteed that it is a valid memory location in our
226 		 * kernel address buffer.
227 		 */
228 		((char *)sunaddr)[len] = 0;
229 		len = strlen(sunaddr->sun_path)+1+sizeof(short);
230 		return len;
231 	}
232 
233 	*hashp = unix_hash_fold(csum_partial(sunaddr, len, 0));
234 	return len;
235 }
236 
237 static void __unix_remove_socket(struct sock *sk)
238 {
239 	sk_del_node_init(sk);
240 }
241 
242 static void __unix_insert_socket(struct hlist_head *list, struct sock *sk)
243 {
244 	WARN_ON(!sk_unhashed(sk));
245 	sk_add_node(sk, list);
246 }
247 
248 static inline void unix_remove_socket(struct sock *sk)
249 {
250 	spin_lock(&unix_table_lock);
251 	__unix_remove_socket(sk);
252 	spin_unlock(&unix_table_lock);
253 }
254 
255 static inline void unix_insert_socket(struct hlist_head *list, struct sock *sk)
256 {
257 	spin_lock(&unix_table_lock);
258 	__unix_insert_socket(list, sk);
259 	spin_unlock(&unix_table_lock);
260 }
261 
262 static struct sock *__unix_find_socket_byname(struct net *net,
263 					      struct sockaddr_un *sunname,
264 					      int len, int type, unsigned int hash)
265 {
266 	struct sock *s;
267 
268 	sk_for_each(s, &unix_socket_table[hash ^ type]) {
269 		struct unix_sock *u = unix_sk(s);
270 
271 		if (!net_eq(sock_net(s), net))
272 			continue;
273 
274 		if (u->addr->len == len &&
275 		    !memcmp(u->addr->name, sunname, len))
276 			goto found;
277 	}
278 	s = NULL;
279 found:
280 	return s;
281 }
282 
283 static inline struct sock *unix_find_socket_byname(struct net *net,
284 						   struct sockaddr_un *sunname,
285 						   int len, int type,
286 						   unsigned int hash)
287 {
288 	struct sock *s;
289 
290 	spin_lock(&unix_table_lock);
291 	s = __unix_find_socket_byname(net, sunname, len, type, hash);
292 	if (s)
293 		sock_hold(s);
294 	spin_unlock(&unix_table_lock);
295 	return s;
296 }
297 
298 static struct sock *unix_find_socket_byinode(struct inode *i)
299 {
300 	struct sock *s;
301 
302 	spin_lock(&unix_table_lock);
303 	sk_for_each(s,
304 		    &unix_socket_table[i->i_ino & (UNIX_HASH_SIZE - 1)]) {
305 		struct dentry *dentry = unix_sk(s)->path.dentry;
306 
307 		if (dentry && dentry->d_inode == i) {
308 			sock_hold(s);
309 			goto found;
310 		}
311 	}
312 	s = NULL;
313 found:
314 	spin_unlock(&unix_table_lock);
315 	return s;
316 }
317 
318 static inline int unix_writable(struct sock *sk)
319 {
320 	return (atomic_read(&sk->sk_wmem_alloc) << 2) <= sk->sk_sndbuf;
321 }
322 
323 static void unix_write_space(struct sock *sk)
324 {
325 	struct socket_wq *wq;
326 
327 	rcu_read_lock();
328 	if (unix_writable(sk)) {
329 		wq = rcu_dereference(sk->sk_wq);
330 		if (wq_has_sleeper(wq))
331 			wake_up_interruptible_sync_poll(&wq->wait,
332 				POLLOUT | POLLWRNORM | POLLWRBAND);
333 		sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
334 	}
335 	rcu_read_unlock();
336 }
337 
338 /* When dgram socket disconnects (or changes its peer), we clear its receive
339  * queue of packets arrived from previous peer. First, it allows to do
340  * flow control based only on wmem_alloc; second, sk connected to peer
341  * may receive messages only from that peer. */
342 static void unix_dgram_disconnected(struct sock *sk, struct sock *other)
343 {
344 	if (!skb_queue_empty(&sk->sk_receive_queue)) {
345 		skb_queue_purge(&sk->sk_receive_queue);
346 		wake_up_interruptible_all(&unix_sk(sk)->peer_wait);
347 
348 		/* If one link of bidirectional dgram pipe is disconnected,
349 		 * we signal error. Messages are lost. Do not make this,
350 		 * when peer was not connected to us.
351 		 */
352 		if (!sock_flag(other, SOCK_DEAD) && unix_peer(other) == sk) {
353 			other->sk_err = ECONNRESET;
354 			other->sk_error_report(other);
355 		}
356 	}
357 }
358 
359 static void unix_sock_destructor(struct sock *sk)
360 {
361 	struct unix_sock *u = unix_sk(sk);
362 
363 	skb_queue_purge(&sk->sk_receive_queue);
364 
365 	WARN_ON(atomic_read(&sk->sk_wmem_alloc));
366 	WARN_ON(!sk_unhashed(sk));
367 	WARN_ON(sk->sk_socket);
368 	if (!sock_flag(sk, SOCK_DEAD)) {
369 		printk(KERN_INFO "Attempt to release alive unix socket: %p\n", sk);
370 		return;
371 	}
372 
373 	if (u->addr)
374 		unix_release_addr(u->addr);
375 
376 	atomic_long_dec(&unix_nr_socks);
377 	local_bh_disable();
378 	sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
379 	local_bh_enable();
380 #ifdef UNIX_REFCNT_DEBUG
381 	printk(KERN_DEBUG "UNIX %p is destroyed, %ld are still alive.\n", sk,
382 		atomic_long_read(&unix_nr_socks));
383 #endif
384 }
385 
386 static void unix_release_sock(struct sock *sk, int embrion)
387 {
388 	struct unix_sock *u = unix_sk(sk);
389 	struct path path;
390 	struct sock *skpair;
391 	struct sk_buff *skb;
392 	int state;
393 
394 	unix_remove_socket(sk);
395 
396 	/* Clear state */
397 	unix_state_lock(sk);
398 	sock_orphan(sk);
399 	sk->sk_shutdown = SHUTDOWN_MASK;
400 	path	     = u->path;
401 	u->path.dentry = NULL;
402 	u->path.mnt = NULL;
403 	state = sk->sk_state;
404 	sk->sk_state = TCP_CLOSE;
405 	unix_state_unlock(sk);
406 
407 	wake_up_interruptible_all(&u->peer_wait);
408 
409 	skpair = unix_peer(sk);
410 
411 	if (skpair != NULL) {
412 		if (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) {
413 			unix_state_lock(skpair);
414 			/* No more writes */
415 			skpair->sk_shutdown = SHUTDOWN_MASK;
416 			if (!skb_queue_empty(&sk->sk_receive_queue) || embrion)
417 				skpair->sk_err = ECONNRESET;
418 			unix_state_unlock(skpair);
419 			skpair->sk_state_change(skpair);
420 			sk_wake_async(skpair, SOCK_WAKE_WAITD, POLL_HUP);
421 		}
422 		sock_put(skpair); /* It may now die */
423 		unix_peer(sk) = NULL;
424 	}
425 
426 	/* Try to flush out this socket. Throw out buffers at least */
427 
428 	while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
429 		if (state == TCP_LISTEN)
430 			unix_release_sock(skb->sk, 1);
431 		/* passed fds are erased in the kfree_skb hook	      */
432 		kfree_skb(skb);
433 	}
434 
435 	if (path.dentry)
436 		path_put(&path);
437 
438 	sock_put(sk);
439 
440 	/* ---- Socket is dead now and most probably destroyed ---- */
441 
442 	/*
443 	 * Fixme: BSD difference: In BSD all sockets connected to us get
444 	 *	  ECONNRESET and we die on the spot. In Linux we behave
445 	 *	  like files and pipes do and wait for the last
446 	 *	  dereference.
447 	 *
448 	 * Can't we simply set sock->err?
449 	 *
450 	 *	  What the above comment does talk about? --ANK(980817)
451 	 */
452 
453 	if (unix_tot_inflight)
454 		unix_gc();		/* Garbage collect fds */
455 }
456 
457 static void init_peercred(struct sock *sk)
458 {
459 	put_pid(sk->sk_peer_pid);
460 	if (sk->sk_peer_cred)
461 		put_cred(sk->sk_peer_cred);
462 	sk->sk_peer_pid  = get_pid(task_tgid(current));
463 	sk->sk_peer_cred = get_current_cred();
464 }
465 
466 static void copy_peercred(struct sock *sk, struct sock *peersk)
467 {
468 	put_pid(sk->sk_peer_pid);
469 	if (sk->sk_peer_cred)
470 		put_cred(sk->sk_peer_cred);
471 	sk->sk_peer_pid  = get_pid(peersk->sk_peer_pid);
472 	sk->sk_peer_cred = get_cred(peersk->sk_peer_cred);
473 }
474 
475 static int unix_listen(struct socket *sock, int backlog)
476 {
477 	int err;
478 	struct sock *sk = sock->sk;
479 	struct unix_sock *u = unix_sk(sk);
480 	struct pid *old_pid = NULL;
481 
482 	err = -EOPNOTSUPP;
483 	if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
484 		goto out;	/* Only stream/seqpacket sockets accept */
485 	err = -EINVAL;
486 	if (!u->addr)
487 		goto out;	/* No listens on an unbound socket */
488 	unix_state_lock(sk);
489 	if (sk->sk_state != TCP_CLOSE && sk->sk_state != TCP_LISTEN)
490 		goto out_unlock;
491 	if (backlog > sk->sk_max_ack_backlog)
492 		wake_up_interruptible_all(&u->peer_wait);
493 	sk->sk_max_ack_backlog	= backlog;
494 	sk->sk_state		= TCP_LISTEN;
495 	/* set credentials so connect can copy them */
496 	init_peercred(sk);
497 	err = 0;
498 
499 out_unlock:
500 	unix_state_unlock(sk);
501 	put_pid(old_pid);
502 out:
503 	return err;
504 }
505 
506 static int unix_release(struct socket *);
507 static int unix_bind(struct socket *, struct sockaddr *, int);
508 static int unix_stream_connect(struct socket *, struct sockaddr *,
509 			       int addr_len, int flags);
510 static int unix_socketpair(struct socket *, struct socket *);
511 static int unix_accept(struct socket *, struct socket *, int);
512 static int unix_getname(struct socket *, struct sockaddr *, int *, int);
513 static unsigned int unix_poll(struct file *, struct socket *, poll_table *);
514 static unsigned int unix_dgram_poll(struct file *, struct socket *,
515 				    poll_table *);
516 static int unix_ioctl(struct socket *, unsigned int, unsigned long);
517 static int unix_shutdown(struct socket *, int);
518 static int unix_stream_sendmsg(struct kiocb *, struct socket *,
519 			       struct msghdr *, size_t);
520 static int unix_stream_recvmsg(struct kiocb *, struct socket *,
521 			       struct msghdr *, size_t, int);
522 static int unix_dgram_sendmsg(struct kiocb *, struct socket *,
523 			      struct msghdr *, size_t);
524 static int unix_dgram_recvmsg(struct kiocb *, struct socket *,
525 			      struct msghdr *, size_t, int);
526 static int unix_dgram_connect(struct socket *, struct sockaddr *,
527 			      int, int);
528 static int unix_seqpacket_sendmsg(struct kiocb *, struct socket *,
529 				  struct msghdr *, size_t);
530 static int unix_seqpacket_recvmsg(struct kiocb *, struct socket *,
531 				  struct msghdr *, size_t, int);
532 
533 static int unix_set_peek_off(struct sock *sk, int val)
534 {
535 	struct unix_sock *u = unix_sk(sk);
536 
537 	if (mutex_lock_interruptible(&u->readlock))
538 		return -EINTR;
539 
540 	sk->sk_peek_off = val;
541 	mutex_unlock(&u->readlock);
542 
543 	return 0;
544 }
545 
546 
547 static const struct proto_ops unix_stream_ops = {
548 	.family =	PF_UNIX,
549 	.owner =	THIS_MODULE,
550 	.release =	unix_release,
551 	.bind =		unix_bind,
552 	.connect =	unix_stream_connect,
553 	.socketpair =	unix_socketpair,
554 	.accept =	unix_accept,
555 	.getname =	unix_getname,
556 	.poll =		unix_poll,
557 	.ioctl =	unix_ioctl,
558 	.listen =	unix_listen,
559 	.shutdown =	unix_shutdown,
560 	.setsockopt =	sock_no_setsockopt,
561 	.getsockopt =	sock_no_getsockopt,
562 	.sendmsg =	unix_stream_sendmsg,
563 	.recvmsg =	unix_stream_recvmsg,
564 	.mmap =		sock_no_mmap,
565 	.sendpage =	sock_no_sendpage,
566 	.set_peek_off =	unix_set_peek_off,
567 };
568 
569 static const struct proto_ops unix_dgram_ops = {
570 	.family =	PF_UNIX,
571 	.owner =	THIS_MODULE,
572 	.release =	unix_release,
573 	.bind =		unix_bind,
574 	.connect =	unix_dgram_connect,
575 	.socketpair =	unix_socketpair,
576 	.accept =	sock_no_accept,
577 	.getname =	unix_getname,
578 	.poll =		unix_dgram_poll,
579 	.ioctl =	unix_ioctl,
580 	.listen =	sock_no_listen,
581 	.shutdown =	unix_shutdown,
582 	.setsockopt =	sock_no_setsockopt,
583 	.getsockopt =	sock_no_getsockopt,
584 	.sendmsg =	unix_dgram_sendmsg,
585 	.recvmsg =	unix_dgram_recvmsg,
586 	.mmap =		sock_no_mmap,
587 	.sendpage =	sock_no_sendpage,
588 	.set_peek_off =	unix_set_peek_off,
589 };
590 
591 static const struct proto_ops unix_seqpacket_ops = {
592 	.family =	PF_UNIX,
593 	.owner =	THIS_MODULE,
594 	.release =	unix_release,
595 	.bind =		unix_bind,
596 	.connect =	unix_stream_connect,
597 	.socketpair =	unix_socketpair,
598 	.accept =	unix_accept,
599 	.getname =	unix_getname,
600 	.poll =		unix_dgram_poll,
601 	.ioctl =	unix_ioctl,
602 	.listen =	unix_listen,
603 	.shutdown =	unix_shutdown,
604 	.setsockopt =	sock_no_setsockopt,
605 	.getsockopt =	sock_no_getsockopt,
606 	.sendmsg =	unix_seqpacket_sendmsg,
607 	.recvmsg =	unix_seqpacket_recvmsg,
608 	.mmap =		sock_no_mmap,
609 	.sendpage =	sock_no_sendpage,
610 	.set_peek_off =	unix_set_peek_off,
611 };
612 
613 static struct proto unix_proto = {
614 	.name			= "UNIX",
615 	.owner			= THIS_MODULE,
616 	.obj_size		= sizeof(struct unix_sock),
617 };
618 
619 /*
620  * AF_UNIX sockets do not interact with hardware, hence they
621  * dont trigger interrupts - so it's safe for them to have
622  * bh-unsafe locking for their sk_receive_queue.lock. Split off
623  * this special lock-class by reinitializing the spinlock key:
624  */
625 static struct lock_class_key af_unix_sk_receive_queue_lock_key;
626 
627 static struct sock *unix_create1(struct net *net, struct socket *sock)
628 {
629 	struct sock *sk = NULL;
630 	struct unix_sock *u;
631 
632 	atomic_long_inc(&unix_nr_socks);
633 	if (atomic_long_read(&unix_nr_socks) > 2 * get_max_files())
634 		goto out;
635 
636 	sk = sk_alloc(net, PF_UNIX, GFP_KERNEL, &unix_proto);
637 	if (!sk)
638 		goto out;
639 
640 	sock_init_data(sock, sk);
641 	lockdep_set_class(&sk->sk_receive_queue.lock,
642 				&af_unix_sk_receive_queue_lock_key);
643 
644 	sk->sk_write_space	= unix_write_space;
645 	sk->sk_max_ack_backlog	= net->unx.sysctl_max_dgram_qlen;
646 	sk->sk_destruct		= unix_sock_destructor;
647 	u	  = unix_sk(sk);
648 	u->path.dentry = NULL;
649 	u->path.mnt = NULL;
650 	spin_lock_init(&u->lock);
651 	atomic_long_set(&u->inflight, 0);
652 	INIT_LIST_HEAD(&u->link);
653 	mutex_init(&u->readlock); /* single task reading lock */
654 	init_waitqueue_head(&u->peer_wait);
655 	unix_insert_socket(unix_sockets_unbound(sk), sk);
656 out:
657 	if (sk == NULL)
658 		atomic_long_dec(&unix_nr_socks);
659 	else {
660 		local_bh_disable();
661 		sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
662 		local_bh_enable();
663 	}
664 	return sk;
665 }
666 
667 static int unix_create(struct net *net, struct socket *sock, int protocol,
668 		       int kern)
669 {
670 	if (protocol && protocol != PF_UNIX)
671 		return -EPROTONOSUPPORT;
672 
673 	sock->state = SS_UNCONNECTED;
674 
675 	switch (sock->type) {
676 	case SOCK_STREAM:
677 		sock->ops = &unix_stream_ops;
678 		break;
679 		/*
680 		 *	Believe it or not BSD has AF_UNIX, SOCK_RAW though
681 		 *	nothing uses it.
682 		 */
683 	case SOCK_RAW:
684 		sock->type = SOCK_DGRAM;
685 	case SOCK_DGRAM:
686 		sock->ops = &unix_dgram_ops;
687 		break;
688 	case SOCK_SEQPACKET:
689 		sock->ops = &unix_seqpacket_ops;
690 		break;
691 	default:
692 		return -ESOCKTNOSUPPORT;
693 	}
694 
695 	return unix_create1(net, sock) ? 0 : -ENOMEM;
696 }
697 
698 static int unix_release(struct socket *sock)
699 {
700 	struct sock *sk = sock->sk;
701 
702 	if (!sk)
703 		return 0;
704 
705 	unix_release_sock(sk, 0);
706 	sock->sk = NULL;
707 
708 	return 0;
709 }
710 
711 static int unix_autobind(struct socket *sock)
712 {
713 	struct sock *sk = sock->sk;
714 	struct net *net = sock_net(sk);
715 	struct unix_sock *u = unix_sk(sk);
716 	static u32 ordernum = 1;
717 	struct unix_address *addr;
718 	int err;
719 	unsigned int retries = 0;
720 
721 	err = mutex_lock_interruptible(&u->readlock);
722 	if (err)
723 		return err;
724 
725 	err = 0;
726 	if (u->addr)
727 		goto out;
728 
729 	err = -ENOMEM;
730 	addr = kzalloc(sizeof(*addr) + sizeof(short) + 16, GFP_KERNEL);
731 	if (!addr)
732 		goto out;
733 
734 	addr->name->sun_family = AF_UNIX;
735 	atomic_set(&addr->refcnt, 1);
736 
737 retry:
738 	addr->len = sprintf(addr->name->sun_path+1, "%05x", ordernum) + 1 + sizeof(short);
739 	addr->hash = unix_hash_fold(csum_partial(addr->name, addr->len, 0));
740 
741 	spin_lock(&unix_table_lock);
742 	ordernum = (ordernum+1)&0xFFFFF;
743 
744 	if (__unix_find_socket_byname(net, addr->name, addr->len, sock->type,
745 				      addr->hash)) {
746 		spin_unlock(&unix_table_lock);
747 		/*
748 		 * __unix_find_socket_byname() may take long time if many names
749 		 * are already in use.
750 		 */
751 		cond_resched();
752 		/* Give up if all names seems to be in use. */
753 		if (retries++ == 0xFFFFF) {
754 			err = -ENOSPC;
755 			kfree(addr);
756 			goto out;
757 		}
758 		goto retry;
759 	}
760 	addr->hash ^= sk->sk_type;
761 
762 	__unix_remove_socket(sk);
763 	u->addr = addr;
764 	__unix_insert_socket(&unix_socket_table[addr->hash], sk);
765 	spin_unlock(&unix_table_lock);
766 	err = 0;
767 
768 out:	mutex_unlock(&u->readlock);
769 	return err;
770 }
771 
772 static struct sock *unix_find_other(struct net *net,
773 				    struct sockaddr_un *sunname, int len,
774 				    int type, unsigned int hash, int *error)
775 {
776 	struct sock *u;
777 	struct path path;
778 	int err = 0;
779 
780 	if (sunname->sun_path[0]) {
781 		struct inode *inode;
782 		err = kern_path(sunname->sun_path, LOOKUP_FOLLOW, &path);
783 		if (err)
784 			goto fail;
785 		inode = path.dentry->d_inode;
786 		err = inode_permission(inode, MAY_WRITE);
787 		if (err)
788 			goto put_fail;
789 
790 		err = -ECONNREFUSED;
791 		if (!S_ISSOCK(inode->i_mode))
792 			goto put_fail;
793 		u = unix_find_socket_byinode(inode);
794 		if (!u)
795 			goto put_fail;
796 
797 		if (u->sk_type == type)
798 			touch_atime(&path);
799 
800 		path_put(&path);
801 
802 		err = -EPROTOTYPE;
803 		if (u->sk_type != type) {
804 			sock_put(u);
805 			goto fail;
806 		}
807 	} else {
808 		err = -ECONNREFUSED;
809 		u = unix_find_socket_byname(net, sunname, len, type, hash);
810 		if (u) {
811 			struct dentry *dentry;
812 			dentry = unix_sk(u)->path.dentry;
813 			if (dentry)
814 				touch_atime(&unix_sk(u)->path);
815 		} else
816 			goto fail;
817 	}
818 	return u;
819 
820 put_fail:
821 	path_put(&path);
822 fail:
823 	*error = err;
824 	return NULL;
825 }
826 
827 static int unix_mknod(const char *sun_path, umode_t mode, struct path *res)
828 {
829 	struct dentry *dentry;
830 	struct path path;
831 	int err = 0;
832 	/*
833 	 * Get the parent directory, calculate the hash for last
834 	 * component.
835 	 */
836 	dentry = kern_path_create(AT_FDCWD, sun_path, &path, 0);
837 	err = PTR_ERR(dentry);
838 	if (IS_ERR(dentry))
839 		return err;
840 
841 	/*
842 	 * All right, let's create it.
843 	 */
844 	err = security_path_mknod(&path, dentry, mode, 0);
845 	if (!err) {
846 		err = vfs_mknod(path.dentry->d_inode, dentry, mode, 0);
847 		if (!err) {
848 			res->mnt = mntget(path.mnt);
849 			res->dentry = dget(dentry);
850 		}
851 	}
852 	done_path_create(&path, dentry);
853 	return err;
854 }
855 
856 static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
857 {
858 	struct sock *sk = sock->sk;
859 	struct net *net = sock_net(sk);
860 	struct unix_sock *u = unix_sk(sk);
861 	struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr;
862 	char *sun_path = sunaddr->sun_path;
863 	int err;
864 	unsigned int hash;
865 	struct unix_address *addr;
866 	struct hlist_head *list;
867 
868 	err = -EINVAL;
869 	if (sunaddr->sun_family != AF_UNIX)
870 		goto out;
871 
872 	if (addr_len == sizeof(short)) {
873 		err = unix_autobind(sock);
874 		goto out;
875 	}
876 
877 	err = unix_mkname(sunaddr, addr_len, &hash);
878 	if (err < 0)
879 		goto out;
880 	addr_len = err;
881 
882 	err = mutex_lock_interruptible(&u->readlock);
883 	if (err)
884 		goto out;
885 
886 	err = -EINVAL;
887 	if (u->addr)
888 		goto out_up;
889 
890 	err = -ENOMEM;
891 	addr = kmalloc(sizeof(*addr)+addr_len, GFP_KERNEL);
892 	if (!addr)
893 		goto out_up;
894 
895 	memcpy(addr->name, sunaddr, addr_len);
896 	addr->len = addr_len;
897 	addr->hash = hash ^ sk->sk_type;
898 	atomic_set(&addr->refcnt, 1);
899 
900 	if (sun_path[0]) {
901 		struct path path;
902 		umode_t mode = S_IFSOCK |
903 		       (SOCK_INODE(sock)->i_mode & ~current_umask());
904 		err = unix_mknod(sun_path, mode, &path);
905 		if (err) {
906 			if (err == -EEXIST)
907 				err = -EADDRINUSE;
908 			unix_release_addr(addr);
909 			goto out_up;
910 		}
911 		addr->hash = UNIX_HASH_SIZE;
912 		hash = path.dentry->d_inode->i_ino & (UNIX_HASH_SIZE-1);
913 		spin_lock(&unix_table_lock);
914 		u->path = path;
915 		list = &unix_socket_table[hash];
916 	} else {
917 		spin_lock(&unix_table_lock);
918 		err = -EADDRINUSE;
919 		if (__unix_find_socket_byname(net, sunaddr, addr_len,
920 					      sk->sk_type, hash)) {
921 			unix_release_addr(addr);
922 			goto out_unlock;
923 		}
924 
925 		list = &unix_socket_table[addr->hash];
926 	}
927 
928 	err = 0;
929 	__unix_remove_socket(sk);
930 	u->addr = addr;
931 	__unix_insert_socket(list, sk);
932 
933 out_unlock:
934 	spin_unlock(&unix_table_lock);
935 out_up:
936 	mutex_unlock(&u->readlock);
937 out:
938 	return err;
939 }
940 
941 static void unix_state_double_lock(struct sock *sk1, struct sock *sk2)
942 {
943 	if (unlikely(sk1 == sk2) || !sk2) {
944 		unix_state_lock(sk1);
945 		return;
946 	}
947 	if (sk1 < sk2) {
948 		unix_state_lock(sk1);
949 		unix_state_lock_nested(sk2);
950 	} else {
951 		unix_state_lock(sk2);
952 		unix_state_lock_nested(sk1);
953 	}
954 }
955 
956 static void unix_state_double_unlock(struct sock *sk1, struct sock *sk2)
957 {
958 	if (unlikely(sk1 == sk2) || !sk2) {
959 		unix_state_unlock(sk1);
960 		return;
961 	}
962 	unix_state_unlock(sk1);
963 	unix_state_unlock(sk2);
964 }
965 
966 static int unix_dgram_connect(struct socket *sock, struct sockaddr *addr,
967 			      int alen, int flags)
968 {
969 	struct sock *sk = sock->sk;
970 	struct net *net = sock_net(sk);
971 	struct sockaddr_un *sunaddr = (struct sockaddr_un *)addr;
972 	struct sock *other;
973 	unsigned int hash;
974 	int err;
975 
976 	if (addr->sa_family != AF_UNSPEC) {
977 		err = unix_mkname(sunaddr, alen, &hash);
978 		if (err < 0)
979 			goto out;
980 		alen = err;
981 
982 		if (test_bit(SOCK_PASSCRED, &sock->flags) &&
983 		    !unix_sk(sk)->addr && (err = unix_autobind(sock)) != 0)
984 			goto out;
985 
986 restart:
987 		other = unix_find_other(net, sunaddr, alen, sock->type, hash, &err);
988 		if (!other)
989 			goto out;
990 
991 		unix_state_double_lock(sk, other);
992 
993 		/* Apparently VFS overslept socket death. Retry. */
994 		if (sock_flag(other, SOCK_DEAD)) {
995 			unix_state_double_unlock(sk, other);
996 			sock_put(other);
997 			goto restart;
998 		}
999 
1000 		err = -EPERM;
1001 		if (!unix_may_send(sk, other))
1002 			goto out_unlock;
1003 
1004 		err = security_unix_may_send(sk->sk_socket, other->sk_socket);
1005 		if (err)
1006 			goto out_unlock;
1007 
1008 	} else {
1009 		/*
1010 		 *	1003.1g breaking connected state with AF_UNSPEC
1011 		 */
1012 		other = NULL;
1013 		unix_state_double_lock(sk, other);
1014 	}
1015 
1016 	/*
1017 	 * If it was connected, reconnect.
1018 	 */
1019 	if (unix_peer(sk)) {
1020 		struct sock *old_peer = unix_peer(sk);
1021 		unix_peer(sk) = other;
1022 		unix_state_double_unlock(sk, other);
1023 
1024 		if (other != old_peer)
1025 			unix_dgram_disconnected(sk, old_peer);
1026 		sock_put(old_peer);
1027 	} else {
1028 		unix_peer(sk) = other;
1029 		unix_state_double_unlock(sk, other);
1030 	}
1031 	return 0;
1032 
1033 out_unlock:
1034 	unix_state_double_unlock(sk, other);
1035 	sock_put(other);
1036 out:
1037 	return err;
1038 }
1039 
1040 static long unix_wait_for_peer(struct sock *other, long timeo)
1041 {
1042 	struct unix_sock *u = unix_sk(other);
1043 	int sched;
1044 	DEFINE_WAIT(wait);
1045 
1046 	prepare_to_wait_exclusive(&u->peer_wait, &wait, TASK_INTERRUPTIBLE);
1047 
1048 	sched = !sock_flag(other, SOCK_DEAD) &&
1049 		!(other->sk_shutdown & RCV_SHUTDOWN) &&
1050 		unix_recvq_full(other);
1051 
1052 	unix_state_unlock(other);
1053 
1054 	if (sched)
1055 		timeo = schedule_timeout(timeo);
1056 
1057 	finish_wait(&u->peer_wait, &wait);
1058 	return timeo;
1059 }
1060 
1061 static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr,
1062 			       int addr_len, int flags)
1063 {
1064 	struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr;
1065 	struct sock *sk = sock->sk;
1066 	struct net *net = sock_net(sk);
1067 	struct unix_sock *u = unix_sk(sk), *newu, *otheru;
1068 	struct sock *newsk = NULL;
1069 	struct sock *other = NULL;
1070 	struct sk_buff *skb = NULL;
1071 	unsigned int hash;
1072 	int st;
1073 	int err;
1074 	long timeo;
1075 
1076 	err = unix_mkname(sunaddr, addr_len, &hash);
1077 	if (err < 0)
1078 		goto out;
1079 	addr_len = err;
1080 
1081 	if (test_bit(SOCK_PASSCRED, &sock->flags) && !u->addr &&
1082 	    (err = unix_autobind(sock)) != 0)
1083 		goto out;
1084 
1085 	timeo = sock_sndtimeo(sk, flags & O_NONBLOCK);
1086 
1087 	/* First of all allocate resources.
1088 	   If we will make it after state is locked,
1089 	   we will have to recheck all again in any case.
1090 	 */
1091 
1092 	err = -ENOMEM;
1093 
1094 	/* create new sock for complete connection */
1095 	newsk = unix_create1(sock_net(sk), NULL);
1096 	if (newsk == NULL)
1097 		goto out;
1098 
1099 	/* Allocate skb for sending to listening sock */
1100 	skb = sock_wmalloc(newsk, 1, 0, GFP_KERNEL);
1101 	if (skb == NULL)
1102 		goto out;
1103 
1104 restart:
1105 	/*  Find listening sock. */
1106 	other = unix_find_other(net, sunaddr, addr_len, sk->sk_type, hash, &err);
1107 	if (!other)
1108 		goto out;
1109 
1110 	/* Latch state of peer */
1111 	unix_state_lock(other);
1112 
1113 	/* Apparently VFS overslept socket death. Retry. */
1114 	if (sock_flag(other, SOCK_DEAD)) {
1115 		unix_state_unlock(other);
1116 		sock_put(other);
1117 		goto restart;
1118 	}
1119 
1120 	err = -ECONNREFUSED;
1121 	if (other->sk_state != TCP_LISTEN)
1122 		goto out_unlock;
1123 	if (other->sk_shutdown & RCV_SHUTDOWN)
1124 		goto out_unlock;
1125 
1126 	if (unix_recvq_full(other)) {
1127 		err = -EAGAIN;
1128 		if (!timeo)
1129 			goto out_unlock;
1130 
1131 		timeo = unix_wait_for_peer(other, timeo);
1132 
1133 		err = sock_intr_errno(timeo);
1134 		if (signal_pending(current))
1135 			goto out;
1136 		sock_put(other);
1137 		goto restart;
1138 	}
1139 
1140 	/* Latch our state.
1141 
1142 	   It is tricky place. We need to grab our state lock and cannot
1143 	   drop lock on peer. It is dangerous because deadlock is
1144 	   possible. Connect to self case and simultaneous
1145 	   attempt to connect are eliminated by checking socket
1146 	   state. other is TCP_LISTEN, if sk is TCP_LISTEN we
1147 	   check this before attempt to grab lock.
1148 
1149 	   Well, and we have to recheck the state after socket locked.
1150 	 */
1151 	st = sk->sk_state;
1152 
1153 	switch (st) {
1154 	case TCP_CLOSE:
1155 		/* This is ok... continue with connect */
1156 		break;
1157 	case TCP_ESTABLISHED:
1158 		/* Socket is already connected */
1159 		err = -EISCONN;
1160 		goto out_unlock;
1161 	default:
1162 		err = -EINVAL;
1163 		goto out_unlock;
1164 	}
1165 
1166 	unix_state_lock_nested(sk);
1167 
1168 	if (sk->sk_state != st) {
1169 		unix_state_unlock(sk);
1170 		unix_state_unlock(other);
1171 		sock_put(other);
1172 		goto restart;
1173 	}
1174 
1175 	err = security_unix_stream_connect(sk, other, newsk);
1176 	if (err) {
1177 		unix_state_unlock(sk);
1178 		goto out_unlock;
1179 	}
1180 
1181 	/* The way is open! Fastly set all the necessary fields... */
1182 
1183 	sock_hold(sk);
1184 	unix_peer(newsk)	= sk;
1185 	newsk->sk_state		= TCP_ESTABLISHED;
1186 	newsk->sk_type		= sk->sk_type;
1187 	init_peercred(newsk);
1188 	newu = unix_sk(newsk);
1189 	RCU_INIT_POINTER(newsk->sk_wq, &newu->peer_wq);
1190 	otheru = unix_sk(other);
1191 
1192 	/* copy address information from listening to new sock*/
1193 	if (otheru->addr) {
1194 		atomic_inc(&otheru->addr->refcnt);
1195 		newu->addr = otheru->addr;
1196 	}
1197 	if (otheru->path.dentry) {
1198 		path_get(&otheru->path);
1199 		newu->path = otheru->path;
1200 	}
1201 
1202 	/* Set credentials */
1203 	copy_peercred(sk, other);
1204 
1205 	sock->state	= SS_CONNECTED;
1206 	sk->sk_state	= TCP_ESTABLISHED;
1207 	sock_hold(newsk);
1208 
1209 	smp_mb__after_atomic_inc();	/* sock_hold() does an atomic_inc() */
1210 	unix_peer(sk)	= newsk;
1211 
1212 	unix_state_unlock(sk);
1213 
1214 	/* take ten and and send info to listening sock */
1215 	spin_lock(&other->sk_receive_queue.lock);
1216 	__skb_queue_tail(&other->sk_receive_queue, skb);
1217 	spin_unlock(&other->sk_receive_queue.lock);
1218 	unix_state_unlock(other);
1219 	other->sk_data_ready(other, 0);
1220 	sock_put(other);
1221 	return 0;
1222 
1223 out_unlock:
1224 	if (other)
1225 		unix_state_unlock(other);
1226 
1227 out:
1228 	kfree_skb(skb);
1229 	if (newsk)
1230 		unix_release_sock(newsk, 0);
1231 	if (other)
1232 		sock_put(other);
1233 	return err;
1234 }
1235 
1236 static int unix_socketpair(struct socket *socka, struct socket *sockb)
1237 {
1238 	struct sock *ska = socka->sk, *skb = sockb->sk;
1239 
1240 	/* Join our sockets back to back */
1241 	sock_hold(ska);
1242 	sock_hold(skb);
1243 	unix_peer(ska) = skb;
1244 	unix_peer(skb) = ska;
1245 	init_peercred(ska);
1246 	init_peercred(skb);
1247 
1248 	if (ska->sk_type != SOCK_DGRAM) {
1249 		ska->sk_state = TCP_ESTABLISHED;
1250 		skb->sk_state = TCP_ESTABLISHED;
1251 		socka->state  = SS_CONNECTED;
1252 		sockb->state  = SS_CONNECTED;
1253 	}
1254 	return 0;
1255 }
1256 
1257 static void unix_sock_inherit_flags(const struct socket *old,
1258 				    struct socket *new)
1259 {
1260 	if (test_bit(SOCK_PASSCRED, &old->flags))
1261 		set_bit(SOCK_PASSCRED, &new->flags);
1262 	if (test_bit(SOCK_PASSSEC, &old->flags))
1263 		set_bit(SOCK_PASSSEC, &new->flags);
1264 }
1265 
1266 static int unix_accept(struct socket *sock, struct socket *newsock, int flags)
1267 {
1268 	struct sock *sk = sock->sk;
1269 	struct sock *tsk;
1270 	struct sk_buff *skb;
1271 	int err;
1272 
1273 	err = -EOPNOTSUPP;
1274 	if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
1275 		goto out;
1276 
1277 	err = -EINVAL;
1278 	if (sk->sk_state != TCP_LISTEN)
1279 		goto out;
1280 
1281 	/* If socket state is TCP_LISTEN it cannot change (for now...),
1282 	 * so that no locks are necessary.
1283 	 */
1284 
1285 	skb = skb_recv_datagram(sk, 0, flags&O_NONBLOCK, &err);
1286 	if (!skb) {
1287 		/* This means receive shutdown. */
1288 		if (err == 0)
1289 			err = -EINVAL;
1290 		goto out;
1291 	}
1292 
1293 	tsk = skb->sk;
1294 	skb_free_datagram(sk, skb);
1295 	wake_up_interruptible(&unix_sk(sk)->peer_wait);
1296 
1297 	/* attach accepted sock to socket */
1298 	unix_state_lock(tsk);
1299 	newsock->state = SS_CONNECTED;
1300 	unix_sock_inherit_flags(sock, newsock);
1301 	sock_graft(tsk, newsock);
1302 	unix_state_unlock(tsk);
1303 	return 0;
1304 
1305 out:
1306 	return err;
1307 }
1308 
1309 
1310 static int unix_getname(struct socket *sock, struct sockaddr *uaddr, int *uaddr_len, int peer)
1311 {
1312 	struct sock *sk = sock->sk;
1313 	struct unix_sock *u;
1314 	DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr, uaddr);
1315 	int err = 0;
1316 
1317 	if (peer) {
1318 		sk = unix_peer_get(sk);
1319 
1320 		err = -ENOTCONN;
1321 		if (!sk)
1322 			goto out;
1323 		err = 0;
1324 	} else {
1325 		sock_hold(sk);
1326 	}
1327 
1328 	u = unix_sk(sk);
1329 	unix_state_lock(sk);
1330 	if (!u->addr) {
1331 		sunaddr->sun_family = AF_UNIX;
1332 		sunaddr->sun_path[0] = 0;
1333 		*uaddr_len = sizeof(short);
1334 	} else {
1335 		struct unix_address *addr = u->addr;
1336 
1337 		*uaddr_len = addr->len;
1338 		memcpy(sunaddr, addr->name, *uaddr_len);
1339 	}
1340 	unix_state_unlock(sk);
1341 	sock_put(sk);
1342 out:
1343 	return err;
1344 }
1345 
1346 static void unix_detach_fds(struct scm_cookie *scm, struct sk_buff *skb)
1347 {
1348 	int i;
1349 
1350 	scm->fp = UNIXCB(skb).fp;
1351 	UNIXCB(skb).fp = NULL;
1352 
1353 	for (i = scm->fp->count-1; i >= 0; i--)
1354 		unix_notinflight(scm->fp->fp[i]);
1355 }
1356 
1357 static void unix_destruct_scm(struct sk_buff *skb)
1358 {
1359 	struct scm_cookie scm;
1360 	memset(&scm, 0, sizeof(scm));
1361 	scm.pid  = UNIXCB(skb).pid;
1362 	if (UNIXCB(skb).fp)
1363 		unix_detach_fds(&scm, skb);
1364 
1365 	/* Alas, it calls VFS */
1366 	/* So fscking what? fput() had been SMP-safe since the last Summer */
1367 	scm_destroy(&scm);
1368 	sock_wfree(skb);
1369 }
1370 
1371 #define MAX_RECURSION_LEVEL 4
1372 
1373 static int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb)
1374 {
1375 	int i;
1376 	unsigned char max_level = 0;
1377 	int unix_sock_count = 0;
1378 
1379 	for (i = scm->fp->count - 1; i >= 0; i--) {
1380 		struct sock *sk = unix_get_socket(scm->fp->fp[i]);
1381 
1382 		if (sk) {
1383 			unix_sock_count++;
1384 			max_level = max(max_level,
1385 					unix_sk(sk)->recursion_level);
1386 		}
1387 	}
1388 	if (unlikely(max_level > MAX_RECURSION_LEVEL))
1389 		return -ETOOMANYREFS;
1390 
1391 	/*
1392 	 * Need to duplicate file references for the sake of garbage
1393 	 * collection.  Otherwise a socket in the fps might become a
1394 	 * candidate for GC while the skb is not yet queued.
1395 	 */
1396 	UNIXCB(skb).fp = scm_fp_dup(scm->fp);
1397 	if (!UNIXCB(skb).fp)
1398 		return -ENOMEM;
1399 
1400 	if (unix_sock_count) {
1401 		for (i = scm->fp->count - 1; i >= 0; i--)
1402 			unix_inflight(scm->fp->fp[i]);
1403 	}
1404 	return max_level;
1405 }
1406 
1407 static int unix_scm_to_skb(struct scm_cookie *scm, struct sk_buff *skb, bool send_fds)
1408 {
1409 	int err = 0;
1410 
1411 	UNIXCB(skb).pid  = get_pid(scm->pid);
1412 	UNIXCB(skb).uid = scm->creds.uid;
1413 	UNIXCB(skb).gid = scm->creds.gid;
1414 	UNIXCB(skb).fp = NULL;
1415 	if (scm->fp && send_fds)
1416 		err = unix_attach_fds(scm, skb);
1417 
1418 	skb->destructor = unix_destruct_scm;
1419 	return err;
1420 }
1421 
1422 /*
1423  * Some apps rely on write() giving SCM_CREDENTIALS
1424  * We include credentials if source or destination socket
1425  * asserted SOCK_PASSCRED.
1426  */
1427 static void maybe_add_creds(struct sk_buff *skb, const struct socket *sock,
1428 			    const struct sock *other)
1429 {
1430 	if (UNIXCB(skb).pid)
1431 		return;
1432 	if (test_bit(SOCK_PASSCRED, &sock->flags) ||
1433 	    !other->sk_socket ||
1434 	    test_bit(SOCK_PASSCRED, &other->sk_socket->flags)) {
1435 		UNIXCB(skb).pid  = get_pid(task_tgid(current));
1436 		current_uid_gid(&UNIXCB(skb).uid, &UNIXCB(skb).gid);
1437 	}
1438 }
1439 
1440 /*
1441  *	Send AF_UNIX data.
1442  */
1443 
1444 static int unix_dgram_sendmsg(struct kiocb *kiocb, struct socket *sock,
1445 			      struct msghdr *msg, size_t len)
1446 {
1447 	struct sock_iocb *siocb = kiocb_to_siocb(kiocb);
1448 	struct sock *sk = sock->sk;
1449 	struct net *net = sock_net(sk);
1450 	struct unix_sock *u = unix_sk(sk);
1451 	struct sockaddr_un *sunaddr = msg->msg_name;
1452 	struct sock *other = NULL;
1453 	int namelen = 0; /* fake GCC */
1454 	int err;
1455 	unsigned int hash;
1456 	struct sk_buff *skb;
1457 	long timeo;
1458 	struct scm_cookie tmp_scm;
1459 	int max_level;
1460 	int data_len = 0;
1461 
1462 	if (NULL == siocb->scm)
1463 		siocb->scm = &tmp_scm;
1464 	wait_for_unix_gc();
1465 	err = scm_send(sock, msg, siocb->scm, false);
1466 	if (err < 0)
1467 		return err;
1468 
1469 	err = -EOPNOTSUPP;
1470 	if (msg->msg_flags&MSG_OOB)
1471 		goto out;
1472 
1473 	if (msg->msg_namelen) {
1474 		err = unix_mkname(sunaddr, msg->msg_namelen, &hash);
1475 		if (err < 0)
1476 			goto out;
1477 		namelen = err;
1478 	} else {
1479 		sunaddr = NULL;
1480 		err = -ENOTCONN;
1481 		other = unix_peer_get(sk);
1482 		if (!other)
1483 			goto out;
1484 	}
1485 
1486 	if (test_bit(SOCK_PASSCRED, &sock->flags) && !u->addr
1487 	    && (err = unix_autobind(sock)) != 0)
1488 		goto out;
1489 
1490 	err = -EMSGSIZE;
1491 	if (len > sk->sk_sndbuf - 32)
1492 		goto out;
1493 
1494 	if (len > SKB_MAX_ALLOC)
1495 		data_len = min_t(size_t,
1496 				 len - SKB_MAX_ALLOC,
1497 				 MAX_SKB_FRAGS * PAGE_SIZE);
1498 
1499 	skb = sock_alloc_send_pskb(sk, len - data_len, data_len,
1500 				   msg->msg_flags & MSG_DONTWAIT, &err,
1501 				   PAGE_ALLOC_COSTLY_ORDER);
1502 	if (skb == NULL)
1503 		goto out;
1504 
1505 	err = unix_scm_to_skb(siocb->scm, skb, true);
1506 	if (err < 0)
1507 		goto out_free;
1508 	max_level = err + 1;
1509 	unix_get_secdata(siocb->scm, skb);
1510 
1511 	skb_put(skb, len - data_len);
1512 	skb->data_len = data_len;
1513 	skb->len = len;
1514 	err = skb_copy_datagram_from_iovec(skb, 0, msg->msg_iov, 0, len);
1515 	if (err)
1516 		goto out_free;
1517 
1518 	timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
1519 
1520 restart:
1521 	if (!other) {
1522 		err = -ECONNRESET;
1523 		if (sunaddr == NULL)
1524 			goto out_free;
1525 
1526 		other = unix_find_other(net, sunaddr, namelen, sk->sk_type,
1527 					hash, &err);
1528 		if (other == NULL)
1529 			goto out_free;
1530 	}
1531 
1532 	if (sk_filter(other, skb) < 0) {
1533 		/* Toss the packet but do not return any error to the sender */
1534 		err = len;
1535 		goto out_free;
1536 	}
1537 
1538 	unix_state_lock(other);
1539 	err = -EPERM;
1540 	if (!unix_may_send(sk, other))
1541 		goto out_unlock;
1542 
1543 	if (sock_flag(other, SOCK_DEAD)) {
1544 		/*
1545 		 *	Check with 1003.1g - what should
1546 		 *	datagram error
1547 		 */
1548 		unix_state_unlock(other);
1549 		sock_put(other);
1550 
1551 		err = 0;
1552 		unix_state_lock(sk);
1553 		if (unix_peer(sk) == other) {
1554 			unix_peer(sk) = NULL;
1555 			unix_state_unlock(sk);
1556 
1557 			unix_dgram_disconnected(sk, other);
1558 			sock_put(other);
1559 			err = -ECONNREFUSED;
1560 		} else {
1561 			unix_state_unlock(sk);
1562 		}
1563 
1564 		other = NULL;
1565 		if (err)
1566 			goto out_free;
1567 		goto restart;
1568 	}
1569 
1570 	err = -EPIPE;
1571 	if (other->sk_shutdown & RCV_SHUTDOWN)
1572 		goto out_unlock;
1573 
1574 	if (sk->sk_type != SOCK_SEQPACKET) {
1575 		err = security_unix_may_send(sk->sk_socket, other->sk_socket);
1576 		if (err)
1577 			goto out_unlock;
1578 	}
1579 
1580 	if (unix_peer(other) != sk && unix_recvq_full(other)) {
1581 		if (!timeo) {
1582 			err = -EAGAIN;
1583 			goto out_unlock;
1584 		}
1585 
1586 		timeo = unix_wait_for_peer(other, timeo);
1587 
1588 		err = sock_intr_errno(timeo);
1589 		if (signal_pending(current))
1590 			goto out_free;
1591 
1592 		goto restart;
1593 	}
1594 
1595 	if (sock_flag(other, SOCK_RCVTSTAMP))
1596 		__net_timestamp(skb);
1597 	maybe_add_creds(skb, sock, other);
1598 	skb_queue_tail(&other->sk_receive_queue, skb);
1599 	if (max_level > unix_sk(other)->recursion_level)
1600 		unix_sk(other)->recursion_level = max_level;
1601 	unix_state_unlock(other);
1602 	other->sk_data_ready(other, len);
1603 	sock_put(other);
1604 	scm_destroy(siocb->scm);
1605 	return len;
1606 
1607 out_unlock:
1608 	unix_state_unlock(other);
1609 out_free:
1610 	kfree_skb(skb);
1611 out:
1612 	if (other)
1613 		sock_put(other);
1614 	scm_destroy(siocb->scm);
1615 	return err;
1616 }
1617 
1618 /* We use paged skbs for stream sockets, and limit occupancy to 32768
1619  * bytes, and a minimun of a full page.
1620  */
1621 #define UNIX_SKB_FRAGS_SZ (PAGE_SIZE << get_order(32768))
1622 
1623 static int unix_stream_sendmsg(struct kiocb *kiocb, struct socket *sock,
1624 			       struct msghdr *msg, size_t len)
1625 {
1626 	struct sock_iocb *siocb = kiocb_to_siocb(kiocb);
1627 	struct sock *sk = sock->sk;
1628 	struct sock *other = NULL;
1629 	int err, size;
1630 	struct sk_buff *skb;
1631 	int sent = 0;
1632 	struct scm_cookie tmp_scm;
1633 	bool fds_sent = false;
1634 	int max_level;
1635 	int data_len;
1636 
1637 	if (NULL == siocb->scm)
1638 		siocb->scm = &tmp_scm;
1639 	wait_for_unix_gc();
1640 	err = scm_send(sock, msg, siocb->scm, false);
1641 	if (err < 0)
1642 		return err;
1643 
1644 	err = -EOPNOTSUPP;
1645 	if (msg->msg_flags&MSG_OOB)
1646 		goto out_err;
1647 
1648 	if (msg->msg_namelen) {
1649 		err = sk->sk_state == TCP_ESTABLISHED ? -EISCONN : -EOPNOTSUPP;
1650 		goto out_err;
1651 	} else {
1652 		err = -ENOTCONN;
1653 		other = unix_peer(sk);
1654 		if (!other)
1655 			goto out_err;
1656 	}
1657 
1658 	if (sk->sk_shutdown & SEND_SHUTDOWN)
1659 		goto pipe_err;
1660 
1661 	while (sent < len) {
1662 		size = len - sent;
1663 
1664 		/* Keep two messages in the pipe so it schedules better */
1665 		size = min_t(int, size, (sk->sk_sndbuf >> 1) - 64);
1666 
1667 		/* allow fallback to order-0 allocations */
1668 		size = min_t(int, size, SKB_MAX_HEAD(0) + UNIX_SKB_FRAGS_SZ);
1669 
1670 		data_len = max_t(int, 0, size - SKB_MAX_HEAD(0));
1671 
1672 		skb = sock_alloc_send_pskb(sk, size - data_len, data_len,
1673 					   msg->msg_flags & MSG_DONTWAIT, &err,
1674 					   get_order(UNIX_SKB_FRAGS_SZ));
1675 		if (!skb)
1676 			goto out_err;
1677 
1678 		/* Only send the fds in the first buffer */
1679 		err = unix_scm_to_skb(siocb->scm, skb, !fds_sent);
1680 		if (err < 0) {
1681 			kfree_skb(skb);
1682 			goto out_err;
1683 		}
1684 		max_level = err + 1;
1685 		fds_sent = true;
1686 
1687 		skb_put(skb, size - data_len);
1688 		skb->data_len = data_len;
1689 		skb->len = size;
1690 		err = skb_copy_datagram_from_iovec(skb, 0, msg->msg_iov,
1691 						   sent, size);
1692 		if (err) {
1693 			kfree_skb(skb);
1694 			goto out_err;
1695 		}
1696 
1697 		unix_state_lock(other);
1698 
1699 		if (sock_flag(other, SOCK_DEAD) ||
1700 		    (other->sk_shutdown & RCV_SHUTDOWN))
1701 			goto pipe_err_free;
1702 
1703 		maybe_add_creds(skb, sock, other);
1704 		skb_queue_tail(&other->sk_receive_queue, skb);
1705 		if (max_level > unix_sk(other)->recursion_level)
1706 			unix_sk(other)->recursion_level = max_level;
1707 		unix_state_unlock(other);
1708 		other->sk_data_ready(other, size);
1709 		sent += size;
1710 	}
1711 
1712 	scm_destroy(siocb->scm);
1713 	siocb->scm = NULL;
1714 
1715 	return sent;
1716 
1717 pipe_err_free:
1718 	unix_state_unlock(other);
1719 	kfree_skb(skb);
1720 pipe_err:
1721 	if (sent == 0 && !(msg->msg_flags&MSG_NOSIGNAL))
1722 		send_sig(SIGPIPE, current, 0);
1723 	err = -EPIPE;
1724 out_err:
1725 	scm_destroy(siocb->scm);
1726 	siocb->scm = NULL;
1727 	return sent ? : err;
1728 }
1729 
1730 static int unix_seqpacket_sendmsg(struct kiocb *kiocb, struct socket *sock,
1731 				  struct msghdr *msg, size_t len)
1732 {
1733 	int err;
1734 	struct sock *sk = sock->sk;
1735 
1736 	err = sock_error(sk);
1737 	if (err)
1738 		return err;
1739 
1740 	if (sk->sk_state != TCP_ESTABLISHED)
1741 		return -ENOTCONN;
1742 
1743 	if (msg->msg_namelen)
1744 		msg->msg_namelen = 0;
1745 
1746 	return unix_dgram_sendmsg(kiocb, sock, msg, len);
1747 }
1748 
1749 static int unix_seqpacket_recvmsg(struct kiocb *iocb, struct socket *sock,
1750 			      struct msghdr *msg, size_t size,
1751 			      int flags)
1752 {
1753 	struct sock *sk = sock->sk;
1754 
1755 	if (sk->sk_state != TCP_ESTABLISHED)
1756 		return -ENOTCONN;
1757 
1758 	return unix_dgram_recvmsg(iocb, sock, msg, size, flags);
1759 }
1760 
1761 static void unix_copy_addr(struct msghdr *msg, struct sock *sk)
1762 {
1763 	struct unix_sock *u = unix_sk(sk);
1764 
1765 	if (u->addr) {
1766 		msg->msg_namelen = u->addr->len;
1767 		memcpy(msg->msg_name, u->addr->name, u->addr->len);
1768 	}
1769 }
1770 
1771 static int unix_dgram_recvmsg(struct kiocb *iocb, struct socket *sock,
1772 			      struct msghdr *msg, size_t size,
1773 			      int flags)
1774 {
1775 	struct sock_iocb *siocb = kiocb_to_siocb(iocb);
1776 	struct scm_cookie tmp_scm;
1777 	struct sock *sk = sock->sk;
1778 	struct unix_sock *u = unix_sk(sk);
1779 	int noblock = flags & MSG_DONTWAIT;
1780 	struct sk_buff *skb;
1781 	int err;
1782 	int peeked, skip;
1783 
1784 	err = -EOPNOTSUPP;
1785 	if (flags&MSG_OOB)
1786 		goto out;
1787 
1788 	err = mutex_lock_interruptible(&u->readlock);
1789 	if (err) {
1790 		err = sock_intr_errno(sock_rcvtimeo(sk, noblock));
1791 		goto out;
1792 	}
1793 
1794 	skip = sk_peek_offset(sk, flags);
1795 
1796 	skb = __skb_recv_datagram(sk, flags, &peeked, &skip, &err);
1797 	if (!skb) {
1798 		unix_state_lock(sk);
1799 		/* Signal EOF on disconnected non-blocking SEQPACKET socket. */
1800 		if (sk->sk_type == SOCK_SEQPACKET && err == -EAGAIN &&
1801 		    (sk->sk_shutdown & RCV_SHUTDOWN))
1802 			err = 0;
1803 		unix_state_unlock(sk);
1804 		goto out_unlock;
1805 	}
1806 
1807 	wake_up_interruptible_sync_poll(&u->peer_wait,
1808 					POLLOUT | POLLWRNORM | POLLWRBAND);
1809 
1810 	if (msg->msg_name)
1811 		unix_copy_addr(msg, skb->sk);
1812 
1813 	if (size > skb->len - skip)
1814 		size = skb->len - skip;
1815 	else if (size < skb->len - skip)
1816 		msg->msg_flags |= MSG_TRUNC;
1817 
1818 	err = skb_copy_datagram_iovec(skb, skip, msg->msg_iov, size);
1819 	if (err)
1820 		goto out_free;
1821 
1822 	if (sock_flag(sk, SOCK_RCVTSTAMP))
1823 		__sock_recv_timestamp(msg, sk, skb);
1824 
1825 	if (!siocb->scm) {
1826 		siocb->scm = &tmp_scm;
1827 		memset(&tmp_scm, 0, sizeof(tmp_scm));
1828 	}
1829 	scm_set_cred(siocb->scm, UNIXCB(skb).pid, UNIXCB(skb).uid, UNIXCB(skb).gid);
1830 	unix_set_secdata(siocb->scm, skb);
1831 
1832 	if (!(flags & MSG_PEEK)) {
1833 		if (UNIXCB(skb).fp)
1834 			unix_detach_fds(siocb->scm, skb);
1835 
1836 		sk_peek_offset_bwd(sk, skb->len);
1837 	} else {
1838 		/* It is questionable: on PEEK we could:
1839 		   - do not return fds - good, but too simple 8)
1840 		   - return fds, and do not return them on read (old strategy,
1841 		     apparently wrong)
1842 		   - clone fds (I chose it for now, it is the most universal
1843 		     solution)
1844 
1845 		   POSIX 1003.1g does not actually define this clearly
1846 		   at all. POSIX 1003.1g doesn't define a lot of things
1847 		   clearly however!
1848 
1849 		*/
1850 
1851 		sk_peek_offset_fwd(sk, size);
1852 
1853 		if (UNIXCB(skb).fp)
1854 			siocb->scm->fp = scm_fp_dup(UNIXCB(skb).fp);
1855 	}
1856 	err = (flags & MSG_TRUNC) ? skb->len - skip : size;
1857 
1858 	scm_recv(sock, msg, siocb->scm, flags);
1859 
1860 out_free:
1861 	skb_free_datagram(sk, skb);
1862 out_unlock:
1863 	mutex_unlock(&u->readlock);
1864 out:
1865 	return err;
1866 }
1867 
1868 /*
1869  *	Sleep until more data has arrived. But check for races..
1870  */
1871 static long unix_stream_data_wait(struct sock *sk, long timeo,
1872 				  struct sk_buff *last)
1873 {
1874 	DEFINE_WAIT(wait);
1875 
1876 	unix_state_lock(sk);
1877 
1878 	for (;;) {
1879 		prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1880 
1881 		if (skb_peek_tail(&sk->sk_receive_queue) != last ||
1882 		    sk->sk_err ||
1883 		    (sk->sk_shutdown & RCV_SHUTDOWN) ||
1884 		    signal_pending(current) ||
1885 		    !timeo)
1886 			break;
1887 
1888 		set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
1889 		unix_state_unlock(sk);
1890 		timeo = freezable_schedule_timeout(timeo);
1891 		unix_state_lock(sk);
1892 		clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
1893 	}
1894 
1895 	finish_wait(sk_sleep(sk), &wait);
1896 	unix_state_unlock(sk);
1897 	return timeo;
1898 }
1899 
1900 static unsigned int unix_skb_len(const struct sk_buff *skb)
1901 {
1902 	return skb->len - UNIXCB(skb).consumed;
1903 }
1904 
1905 static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
1906 			       struct msghdr *msg, size_t size,
1907 			       int flags)
1908 {
1909 	struct sock_iocb *siocb = kiocb_to_siocb(iocb);
1910 	struct scm_cookie tmp_scm;
1911 	struct sock *sk = sock->sk;
1912 	struct unix_sock *u = unix_sk(sk);
1913 	struct sockaddr_un *sunaddr = msg->msg_name;
1914 	int copied = 0;
1915 	int check_creds = 0;
1916 	int target;
1917 	int err = 0;
1918 	long timeo;
1919 	int skip;
1920 
1921 	err = -EINVAL;
1922 	if (sk->sk_state != TCP_ESTABLISHED)
1923 		goto out;
1924 
1925 	err = -EOPNOTSUPP;
1926 	if (flags&MSG_OOB)
1927 		goto out;
1928 
1929 	target = sock_rcvlowat(sk, flags&MSG_WAITALL, size);
1930 	timeo = sock_rcvtimeo(sk, flags&MSG_DONTWAIT);
1931 
1932 	/* Lock the socket to prevent queue disordering
1933 	 * while sleeps in memcpy_tomsg
1934 	 */
1935 
1936 	if (!siocb->scm) {
1937 		siocb->scm = &tmp_scm;
1938 		memset(&tmp_scm, 0, sizeof(tmp_scm));
1939 	}
1940 
1941 	err = mutex_lock_interruptible(&u->readlock);
1942 	if (err) {
1943 		err = sock_intr_errno(timeo);
1944 		goto out;
1945 	}
1946 
1947 	do {
1948 		int chunk;
1949 		struct sk_buff *skb, *last;
1950 
1951 		unix_state_lock(sk);
1952 		last = skb = skb_peek(&sk->sk_receive_queue);
1953 again:
1954 		if (skb == NULL) {
1955 			unix_sk(sk)->recursion_level = 0;
1956 			if (copied >= target)
1957 				goto unlock;
1958 
1959 			/*
1960 			 *	POSIX 1003.1g mandates this order.
1961 			 */
1962 
1963 			err = sock_error(sk);
1964 			if (err)
1965 				goto unlock;
1966 			if (sk->sk_shutdown & RCV_SHUTDOWN)
1967 				goto unlock;
1968 
1969 			unix_state_unlock(sk);
1970 			err = -EAGAIN;
1971 			if (!timeo)
1972 				break;
1973 			mutex_unlock(&u->readlock);
1974 
1975 			timeo = unix_stream_data_wait(sk, timeo, last);
1976 
1977 			if (signal_pending(current)
1978 			    ||  mutex_lock_interruptible(&u->readlock)) {
1979 				err = sock_intr_errno(timeo);
1980 				goto out;
1981 			}
1982 
1983 			continue;
1984  unlock:
1985 			unix_state_unlock(sk);
1986 			break;
1987 		}
1988 
1989 		skip = sk_peek_offset(sk, flags);
1990 		while (skip >= unix_skb_len(skb)) {
1991 			skip -= unix_skb_len(skb);
1992 			last = skb;
1993 			skb = skb_peek_next(skb, &sk->sk_receive_queue);
1994 			if (!skb)
1995 				goto again;
1996 		}
1997 
1998 		unix_state_unlock(sk);
1999 
2000 		if (check_creds) {
2001 			/* Never glue messages from different writers */
2002 			if ((UNIXCB(skb).pid  != siocb->scm->pid) ||
2003 			    !uid_eq(UNIXCB(skb).uid, siocb->scm->creds.uid) ||
2004 			    !gid_eq(UNIXCB(skb).gid, siocb->scm->creds.gid))
2005 				break;
2006 		} else if (test_bit(SOCK_PASSCRED, &sock->flags)) {
2007 			/* Copy credentials */
2008 			scm_set_cred(siocb->scm, UNIXCB(skb).pid, UNIXCB(skb).uid, UNIXCB(skb).gid);
2009 			check_creds = 1;
2010 		}
2011 
2012 		/* Copy address just once */
2013 		if (sunaddr) {
2014 			unix_copy_addr(msg, skb->sk);
2015 			sunaddr = NULL;
2016 		}
2017 
2018 		chunk = min_t(unsigned int, unix_skb_len(skb) - skip, size);
2019 		if (skb_copy_datagram_iovec(skb, UNIXCB(skb).consumed + skip,
2020 					    msg->msg_iov, chunk)) {
2021 			if (copied == 0)
2022 				copied = -EFAULT;
2023 			break;
2024 		}
2025 		copied += chunk;
2026 		size -= chunk;
2027 
2028 		/* Mark read part of skb as used */
2029 		if (!(flags & MSG_PEEK)) {
2030 			UNIXCB(skb).consumed += chunk;
2031 
2032 			sk_peek_offset_bwd(sk, chunk);
2033 
2034 			if (UNIXCB(skb).fp)
2035 				unix_detach_fds(siocb->scm, skb);
2036 
2037 			if (unix_skb_len(skb))
2038 				break;
2039 
2040 			skb_unlink(skb, &sk->sk_receive_queue);
2041 			consume_skb(skb);
2042 
2043 			if (siocb->scm->fp)
2044 				break;
2045 		} else {
2046 			/* It is questionable, see note in unix_dgram_recvmsg.
2047 			 */
2048 			if (UNIXCB(skb).fp)
2049 				siocb->scm->fp = scm_fp_dup(UNIXCB(skb).fp);
2050 
2051 			sk_peek_offset_fwd(sk, chunk);
2052 
2053 			break;
2054 		}
2055 	} while (size);
2056 
2057 	mutex_unlock(&u->readlock);
2058 	scm_recv(sock, msg, siocb->scm, flags);
2059 out:
2060 	return copied ? : err;
2061 }
2062 
2063 static int unix_shutdown(struct socket *sock, int mode)
2064 {
2065 	struct sock *sk = sock->sk;
2066 	struct sock *other;
2067 
2068 	if (mode < SHUT_RD || mode > SHUT_RDWR)
2069 		return -EINVAL;
2070 	/* This maps:
2071 	 * SHUT_RD   (0) -> RCV_SHUTDOWN  (1)
2072 	 * SHUT_WR   (1) -> SEND_SHUTDOWN (2)
2073 	 * SHUT_RDWR (2) -> SHUTDOWN_MASK (3)
2074 	 */
2075 	++mode;
2076 
2077 	unix_state_lock(sk);
2078 	sk->sk_shutdown |= mode;
2079 	other = unix_peer(sk);
2080 	if (other)
2081 		sock_hold(other);
2082 	unix_state_unlock(sk);
2083 	sk->sk_state_change(sk);
2084 
2085 	if (other &&
2086 		(sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET)) {
2087 
2088 		int peer_mode = 0;
2089 
2090 		if (mode&RCV_SHUTDOWN)
2091 			peer_mode |= SEND_SHUTDOWN;
2092 		if (mode&SEND_SHUTDOWN)
2093 			peer_mode |= RCV_SHUTDOWN;
2094 		unix_state_lock(other);
2095 		other->sk_shutdown |= peer_mode;
2096 		unix_state_unlock(other);
2097 		other->sk_state_change(other);
2098 		if (peer_mode == SHUTDOWN_MASK)
2099 			sk_wake_async(other, SOCK_WAKE_WAITD, POLL_HUP);
2100 		else if (peer_mode & RCV_SHUTDOWN)
2101 			sk_wake_async(other, SOCK_WAKE_WAITD, POLL_IN);
2102 	}
2103 	if (other)
2104 		sock_put(other);
2105 
2106 	return 0;
2107 }
2108 
2109 long unix_inq_len(struct sock *sk)
2110 {
2111 	struct sk_buff *skb;
2112 	long amount = 0;
2113 
2114 	if (sk->sk_state == TCP_LISTEN)
2115 		return -EINVAL;
2116 
2117 	spin_lock(&sk->sk_receive_queue.lock);
2118 	if (sk->sk_type == SOCK_STREAM ||
2119 	    sk->sk_type == SOCK_SEQPACKET) {
2120 		skb_queue_walk(&sk->sk_receive_queue, skb)
2121 			amount += unix_skb_len(skb);
2122 	} else {
2123 		skb = skb_peek(&sk->sk_receive_queue);
2124 		if (skb)
2125 			amount = skb->len;
2126 	}
2127 	spin_unlock(&sk->sk_receive_queue.lock);
2128 
2129 	return amount;
2130 }
2131 EXPORT_SYMBOL_GPL(unix_inq_len);
2132 
2133 long unix_outq_len(struct sock *sk)
2134 {
2135 	return sk_wmem_alloc_get(sk);
2136 }
2137 EXPORT_SYMBOL_GPL(unix_outq_len);
2138 
2139 static int unix_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
2140 {
2141 	struct sock *sk = sock->sk;
2142 	long amount = 0;
2143 	int err;
2144 
2145 	switch (cmd) {
2146 	case SIOCOUTQ:
2147 		amount = unix_outq_len(sk);
2148 		err = put_user(amount, (int __user *)arg);
2149 		break;
2150 	case SIOCINQ:
2151 		amount = unix_inq_len(sk);
2152 		if (amount < 0)
2153 			err = amount;
2154 		else
2155 			err = put_user(amount, (int __user *)arg);
2156 		break;
2157 	default:
2158 		err = -ENOIOCTLCMD;
2159 		break;
2160 	}
2161 	return err;
2162 }
2163 
2164 static unsigned int unix_poll(struct file *file, struct socket *sock, poll_table *wait)
2165 {
2166 	struct sock *sk = sock->sk;
2167 	unsigned int mask;
2168 
2169 	sock_poll_wait(file, sk_sleep(sk), wait);
2170 	mask = 0;
2171 
2172 	/* exceptional events? */
2173 	if (sk->sk_err)
2174 		mask |= POLLERR;
2175 	if (sk->sk_shutdown == SHUTDOWN_MASK)
2176 		mask |= POLLHUP;
2177 	if (sk->sk_shutdown & RCV_SHUTDOWN)
2178 		mask |= POLLRDHUP | POLLIN | POLLRDNORM;
2179 
2180 	/* readable? */
2181 	if (!skb_queue_empty(&sk->sk_receive_queue))
2182 		mask |= POLLIN | POLLRDNORM;
2183 
2184 	/* Connection-based need to check for termination and startup */
2185 	if ((sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) &&
2186 	    sk->sk_state == TCP_CLOSE)
2187 		mask |= POLLHUP;
2188 
2189 	/*
2190 	 * we set writable also when the other side has shut down the
2191 	 * connection. This prevents stuck sockets.
2192 	 */
2193 	if (unix_writable(sk))
2194 		mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
2195 
2196 	return mask;
2197 }
2198 
2199 static unsigned int unix_dgram_poll(struct file *file, struct socket *sock,
2200 				    poll_table *wait)
2201 {
2202 	struct sock *sk = sock->sk, *other;
2203 	unsigned int mask, writable;
2204 
2205 	sock_poll_wait(file, sk_sleep(sk), wait);
2206 	mask = 0;
2207 
2208 	/* exceptional events? */
2209 	if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
2210 		mask |= POLLERR |
2211 			(sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? POLLPRI : 0);
2212 
2213 	if (sk->sk_shutdown & RCV_SHUTDOWN)
2214 		mask |= POLLRDHUP | POLLIN | POLLRDNORM;
2215 	if (sk->sk_shutdown == SHUTDOWN_MASK)
2216 		mask |= POLLHUP;
2217 
2218 	/* readable? */
2219 	if (!skb_queue_empty(&sk->sk_receive_queue))
2220 		mask |= POLLIN | POLLRDNORM;
2221 
2222 	/* Connection-based need to check for termination and startup */
2223 	if (sk->sk_type == SOCK_SEQPACKET) {
2224 		if (sk->sk_state == TCP_CLOSE)
2225 			mask |= POLLHUP;
2226 		/* connection hasn't started yet? */
2227 		if (sk->sk_state == TCP_SYN_SENT)
2228 			return mask;
2229 	}
2230 
2231 	/* No write status requested, avoid expensive OUT tests. */
2232 	if (!(poll_requested_events(wait) & (POLLWRBAND|POLLWRNORM|POLLOUT)))
2233 		return mask;
2234 
2235 	writable = unix_writable(sk);
2236 	other = unix_peer_get(sk);
2237 	if (other) {
2238 		if (unix_peer(other) != sk) {
2239 			sock_poll_wait(file, &unix_sk(other)->peer_wait, wait);
2240 			if (unix_recvq_full(other))
2241 				writable = 0;
2242 		}
2243 		sock_put(other);
2244 	}
2245 
2246 	if (writable)
2247 		mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
2248 	else
2249 		set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
2250 
2251 	return mask;
2252 }
2253 
2254 #ifdef CONFIG_PROC_FS
2255 
2256 #define BUCKET_SPACE (BITS_PER_LONG - (UNIX_HASH_BITS + 1) - 1)
2257 
2258 #define get_bucket(x) ((x) >> BUCKET_SPACE)
2259 #define get_offset(x) ((x) & ((1L << BUCKET_SPACE) - 1))
2260 #define set_bucket_offset(b, o) ((b) << BUCKET_SPACE | (o))
2261 
2262 static struct sock *unix_from_bucket(struct seq_file *seq, loff_t *pos)
2263 {
2264 	unsigned long offset = get_offset(*pos);
2265 	unsigned long bucket = get_bucket(*pos);
2266 	struct sock *sk;
2267 	unsigned long count = 0;
2268 
2269 	for (sk = sk_head(&unix_socket_table[bucket]); sk; sk = sk_next(sk)) {
2270 		if (sock_net(sk) != seq_file_net(seq))
2271 			continue;
2272 		if (++count == offset)
2273 			break;
2274 	}
2275 
2276 	return sk;
2277 }
2278 
2279 static struct sock *unix_next_socket(struct seq_file *seq,
2280 				     struct sock *sk,
2281 				     loff_t *pos)
2282 {
2283 	unsigned long bucket;
2284 
2285 	while (sk > (struct sock *)SEQ_START_TOKEN) {
2286 		sk = sk_next(sk);
2287 		if (!sk)
2288 			goto next_bucket;
2289 		if (sock_net(sk) == seq_file_net(seq))
2290 			return sk;
2291 	}
2292 
2293 	do {
2294 		sk = unix_from_bucket(seq, pos);
2295 		if (sk)
2296 			return sk;
2297 
2298 next_bucket:
2299 		bucket = get_bucket(*pos) + 1;
2300 		*pos = set_bucket_offset(bucket, 1);
2301 	} while (bucket < ARRAY_SIZE(unix_socket_table));
2302 
2303 	return NULL;
2304 }
2305 
2306 static void *unix_seq_start(struct seq_file *seq, loff_t *pos)
2307 	__acquires(unix_table_lock)
2308 {
2309 	spin_lock(&unix_table_lock);
2310 
2311 	if (!*pos)
2312 		return SEQ_START_TOKEN;
2313 
2314 	if (get_bucket(*pos) >= ARRAY_SIZE(unix_socket_table))
2315 		return NULL;
2316 
2317 	return unix_next_socket(seq, NULL, pos);
2318 }
2319 
2320 static void *unix_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2321 {
2322 	++*pos;
2323 	return unix_next_socket(seq, v, pos);
2324 }
2325 
2326 static void unix_seq_stop(struct seq_file *seq, void *v)
2327 	__releases(unix_table_lock)
2328 {
2329 	spin_unlock(&unix_table_lock);
2330 }
2331 
2332 static int unix_seq_show(struct seq_file *seq, void *v)
2333 {
2334 
2335 	if (v == SEQ_START_TOKEN)
2336 		seq_puts(seq, "Num       RefCount Protocol Flags    Type St "
2337 			 "Inode Path\n");
2338 	else {
2339 		struct sock *s = v;
2340 		struct unix_sock *u = unix_sk(s);
2341 		unix_state_lock(s);
2342 
2343 		seq_printf(seq, "%pK: %08X %08X %08X %04X %02X %5lu",
2344 			s,
2345 			atomic_read(&s->sk_refcnt),
2346 			0,
2347 			s->sk_state == TCP_LISTEN ? __SO_ACCEPTCON : 0,
2348 			s->sk_type,
2349 			s->sk_socket ?
2350 			(s->sk_state == TCP_ESTABLISHED ? SS_CONNECTED : SS_UNCONNECTED) :
2351 			(s->sk_state == TCP_ESTABLISHED ? SS_CONNECTING : SS_DISCONNECTING),
2352 			sock_i_ino(s));
2353 
2354 		if (u->addr) {
2355 			int i, len;
2356 			seq_putc(seq, ' ');
2357 
2358 			i = 0;
2359 			len = u->addr->len - sizeof(short);
2360 			if (!UNIX_ABSTRACT(s))
2361 				len--;
2362 			else {
2363 				seq_putc(seq, '@');
2364 				i++;
2365 			}
2366 			for ( ; i < len; i++)
2367 				seq_putc(seq, u->addr->name->sun_path[i]);
2368 		}
2369 		unix_state_unlock(s);
2370 		seq_putc(seq, '\n');
2371 	}
2372 
2373 	return 0;
2374 }
2375 
2376 static const struct seq_operations unix_seq_ops = {
2377 	.start  = unix_seq_start,
2378 	.next   = unix_seq_next,
2379 	.stop   = unix_seq_stop,
2380 	.show   = unix_seq_show,
2381 };
2382 
2383 static int unix_seq_open(struct inode *inode, struct file *file)
2384 {
2385 	return seq_open_net(inode, file, &unix_seq_ops,
2386 			    sizeof(struct seq_net_private));
2387 }
2388 
2389 static const struct file_operations unix_seq_fops = {
2390 	.owner		= THIS_MODULE,
2391 	.open		= unix_seq_open,
2392 	.read		= seq_read,
2393 	.llseek		= seq_lseek,
2394 	.release	= seq_release_net,
2395 };
2396 
2397 #endif
2398 
2399 static const struct net_proto_family unix_family_ops = {
2400 	.family = PF_UNIX,
2401 	.create = unix_create,
2402 	.owner	= THIS_MODULE,
2403 };
2404 
2405 
2406 static int __net_init unix_net_init(struct net *net)
2407 {
2408 	int error = -ENOMEM;
2409 
2410 	net->unx.sysctl_max_dgram_qlen = 10;
2411 	if (unix_sysctl_register(net))
2412 		goto out;
2413 
2414 #ifdef CONFIG_PROC_FS
2415 	if (!proc_create("unix", 0, net->proc_net, &unix_seq_fops)) {
2416 		unix_sysctl_unregister(net);
2417 		goto out;
2418 	}
2419 #endif
2420 	error = 0;
2421 out:
2422 	return error;
2423 }
2424 
2425 static void __net_exit unix_net_exit(struct net *net)
2426 {
2427 	unix_sysctl_unregister(net);
2428 	remove_proc_entry("unix", net->proc_net);
2429 }
2430 
2431 static struct pernet_operations unix_net_ops = {
2432 	.init = unix_net_init,
2433 	.exit = unix_net_exit,
2434 };
2435 
2436 static int __init af_unix_init(void)
2437 {
2438 	int rc = -1;
2439 
2440 	BUILD_BUG_ON(sizeof(struct unix_skb_parms) > FIELD_SIZEOF(struct sk_buff, cb));
2441 
2442 	rc = proto_register(&unix_proto, 1);
2443 	if (rc != 0) {
2444 		printk(KERN_CRIT "%s: Cannot create unix_sock SLAB cache!\n",
2445 		       __func__);
2446 		goto out;
2447 	}
2448 
2449 	sock_register(&unix_family_ops);
2450 	register_pernet_subsys(&unix_net_ops);
2451 out:
2452 	return rc;
2453 }
2454 
2455 static void __exit af_unix_exit(void)
2456 {
2457 	sock_unregister(PF_UNIX);
2458 	proto_unregister(&unix_proto);
2459 	unregister_pernet_subsys(&unix_net_ops);
2460 }
2461 
2462 /* Earlier than device_initcall() so that other drivers invoking
2463    request_module() don't end up in a loop when modprobe tries
2464    to use a UNIX socket. But later than subsys_initcall() because
2465    we depend on stuff initialised there */
2466 fs_initcall(af_unix_init);
2467 module_exit(af_unix_exit);
2468 
2469 MODULE_LICENSE("GPL");
2470 MODULE_ALIAS_NETPROTO(PF_UNIX);
2471