1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * NET4: Implementation of BSD Unix domain sockets.
4 *
5 * Authors: Alan Cox, <alan@lxorguk.ukuu.org.uk>
6 *
7 * Fixes:
8 * Linus Torvalds : Assorted bug cures.
9 * Niibe Yutaka : async I/O support.
10 * Carsten Paeth : PF_UNIX check, address fixes.
11 * Alan Cox : Limit size of allocated blocks.
12 * Alan Cox : Fixed the stupid socketpair bug.
13 * Alan Cox : BSD compatibility fine tuning.
14 * Alan Cox : Fixed a bug in connect when interrupted.
15 * Alan Cox : Sorted out a proper draft version of
16 * file descriptor passing hacked up from
17 * Mike Shaver's work.
18 * Marty Leisner : Fixes to fd passing
19 * Nick Nevin : recvmsg bugfix.
20 * Alan Cox : Started proper garbage collector
21 * Heiko EiBfeldt : Missing verify_area check
22 * Alan Cox : Started POSIXisms
23 * Andreas Schwab : Replace inode by dentry for proper
24 * reference counting
25 * Kirk Petersen : Made this a module
26 * Christoph Rohland : Elegant non-blocking accept/connect algorithm.
27 * Lots of bug fixes.
28 * Alexey Kuznetosv : Repaired (I hope) bugs introduces
29 * by above two patches.
30 * Andrea Arcangeli : If possible we block in connect(2)
31 * if the max backlog of the listen socket
32 * is been reached. This won't break
33 * old apps and it will avoid huge amount
34 * of socks hashed (this for unix_gc()
35 * performances reasons).
36 * Security fix that limits the max
37 * number of socks to 2*max_files and
38 * the number of skb queueable in the
39 * dgram receiver.
40 * Artur Skawina : Hash function optimizations
41 * Alexey Kuznetsov : Full scale SMP. Lot of bugs are introduced 8)
42 * Malcolm Beattie : Set peercred for socketpair
43 * Michal Ostrowski : Module initialization cleanup.
44 * Arnaldo C. Melo : Remove MOD_{INC,DEC}_USE_COUNT,
45 * the core infrastructure is doing that
46 * for all net proto families now (2.5.69+)
47 *
48 * Known differences from reference BSD that was tested:
49 *
50 * [TO FIX]
51 * ECONNREFUSED is not returned from one end of a connected() socket to the
52 * other the moment one end closes.
53 * fstat() doesn't return st_dev=0, and give the blksize as high water mark
54 * and a fake inode identifier (nor the BSD first socket fstat twice bug).
55 * [NOT TO FIX]
56 * accept() returns a path name even if the connecting socket has closed
57 * in the meantime (BSD loses the path and gives up).
58 * accept() returns 0 length path for an unbound connector. BSD returns 16
59 * and a null first byte in the path (but not for gethost/peername - BSD bug ??)
60 * socketpair(...SOCK_RAW..) doesn't panic the kernel.
61 * BSD af_unix apparently has connect forgetting to block properly.
62 * (need to check this with the POSIX spec in detail)
63 *
64 * Differences from 2.0.0-11-... (ANK)
65 * Bug fixes and improvements.
66 * - client shutdown killed server socket.
67 * - removed all useless cli/sti pairs.
68 *
69 * Semantic changes/extensions.
70 * - generic control message passing.
71 * - SCM_CREDENTIALS control message.
72 * - "Abstract" (not FS based) socket bindings.
73 * Abstract names are sequences of bytes (not zero terminated)
74 * started by 0, so that this name space does not intersect
75 * with BSD names.
76 */
77
78 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
79
80 #include <linux/bpf-cgroup.h>
81 #include <linux/btf_ids.h>
82 #include <linux/dcache.h>
83 #include <linux/errno.h>
84 #include <linux/fcntl.h>
85 #include <linux/file.h>
86 #include <linux/filter.h>
87 #include <linux/fs.h>
88 #include <linux/fs_struct.h>
89 #include <linux/init.h>
90 #include <linux/kernel.h>
91 #include <linux/mount.h>
92 #include <linux/namei.h>
93 #include <linux/net.h>
94 #include <linux/pidfs.h>
95 #include <linux/poll.h>
96 #include <linux/proc_fs.h>
97 #include <linux/sched/signal.h>
98 #include <linux/security.h>
99 #include <linux/seq_file.h>
100 #include <linux/skbuff.h>
101 #include <linux/slab.h>
102 #include <linux/socket.h>
103 #include <linux/splice.h>
104 #include <linux/string.h>
105 #include <linux/uaccess.h>
106 #include <net/af_unix.h>
107 #include <net/net_namespace.h>
108 #include <net/scm.h>
109 #include <net/tcp_states.h>
110 #include <uapi/linux/sockios.h>
111 #include <uapi/linux/termios.h>
112
113 #include "af_unix.h"
114
115 static atomic_long_t unix_nr_socks;
116 static struct hlist_head bsd_socket_buckets[UNIX_HASH_SIZE / 2];
117 static spinlock_t bsd_socket_locks[UNIX_HASH_SIZE / 2];
118
119 /* SMP locking strategy:
120 * hash table is protected with spinlock.
121 * each socket state is protected by separate spinlock.
122 */
123 #ifdef CONFIG_PROVE_LOCKING
124 #define cmp_ptr(l, r) (((l) > (r)) - ((l) < (r)))
125
unix_table_lock_cmp_fn(const struct lockdep_map * a,const struct lockdep_map * b)126 static int unix_table_lock_cmp_fn(const struct lockdep_map *a,
127 const struct lockdep_map *b)
128 {
129 return cmp_ptr(a, b);
130 }
131
unix_state_lock_cmp_fn(const struct lockdep_map * _a,const struct lockdep_map * _b)132 static int unix_state_lock_cmp_fn(const struct lockdep_map *_a,
133 const struct lockdep_map *_b)
134 {
135 const struct unix_sock *a, *b;
136
137 a = container_of(_a, struct unix_sock, lock.dep_map);
138 b = container_of(_b, struct unix_sock, lock.dep_map);
139
140 if (a->sk.sk_state == TCP_LISTEN) {
141 /* unix_stream_connect(): Before the 2nd unix_state_lock(),
142 *
143 * 1. a is TCP_LISTEN.
144 * 2. b is not a.
145 * 3. concurrent connect(b -> a) must fail.
146 *
147 * Except for 2. & 3., the b's state can be any possible
148 * value due to concurrent connect() or listen().
149 *
150 * 2. is detected in debug_spin_lock_before(), and 3. cannot
151 * be expressed as lock_cmp_fn.
152 */
153 switch (b->sk.sk_state) {
154 case TCP_CLOSE:
155 case TCP_ESTABLISHED:
156 case TCP_LISTEN:
157 return -1;
158 default:
159 /* Invalid case. */
160 return 0;
161 }
162 }
163
164 /* Should never happen. Just to be symmetric. */
165 if (b->sk.sk_state == TCP_LISTEN) {
166 switch (b->sk.sk_state) {
167 case TCP_CLOSE:
168 case TCP_ESTABLISHED:
169 return 1;
170 default:
171 return 0;
172 }
173 }
174
175 /* unix_state_double_lock(): ascending address order. */
176 return cmp_ptr(a, b);
177 }
178
unix_recvq_lock_cmp_fn(const struct lockdep_map * _a,const struct lockdep_map * _b)179 static int unix_recvq_lock_cmp_fn(const struct lockdep_map *_a,
180 const struct lockdep_map *_b)
181 {
182 const struct sock *a, *b;
183
184 a = container_of(_a, struct sock, sk_receive_queue.lock.dep_map);
185 b = container_of(_b, struct sock, sk_receive_queue.lock.dep_map);
186
187 /* unix_collect_skb(): listener -> embryo order. */
188 if (a->sk_state == TCP_LISTEN && unix_sk(b)->listener == a)
189 return -1;
190
191 /* Should never happen. Just to be symmetric. */
192 if (b->sk_state == TCP_LISTEN && unix_sk(a)->listener == b)
193 return 1;
194
195 return 0;
196 }
197 #endif
198
unix_unbound_hash(struct sock * sk)199 static unsigned int unix_unbound_hash(struct sock *sk)
200 {
201 unsigned long hash = (unsigned long)sk;
202
203 hash ^= hash >> 16;
204 hash ^= hash >> 8;
205 hash ^= sk->sk_type;
206
207 return hash & UNIX_HASH_MOD;
208 }
209
unix_bsd_hash(struct inode * i)210 static unsigned int unix_bsd_hash(struct inode *i)
211 {
212 return i->i_ino & UNIX_HASH_MOD;
213 }
214
unix_abstract_hash(struct sockaddr_un * sunaddr,int addr_len,int type)215 static unsigned int unix_abstract_hash(struct sockaddr_un *sunaddr,
216 int addr_len, int type)
217 {
218 __wsum csum = csum_partial(sunaddr, addr_len, 0);
219 unsigned int hash;
220
221 hash = (__force unsigned int)csum_fold(csum);
222 hash ^= hash >> 8;
223 hash ^= type;
224
225 return UNIX_HASH_MOD + 1 + (hash & UNIX_HASH_MOD);
226 }
227
unix_table_double_lock(struct net * net,unsigned int hash1,unsigned int hash2)228 static void unix_table_double_lock(struct net *net,
229 unsigned int hash1, unsigned int hash2)
230 {
231 if (hash1 == hash2) {
232 spin_lock(&net->unx.table.locks[hash1]);
233 return;
234 }
235
236 if (hash1 > hash2)
237 swap(hash1, hash2);
238
239 spin_lock(&net->unx.table.locks[hash1]);
240 spin_lock(&net->unx.table.locks[hash2]);
241 }
242
unix_table_double_unlock(struct net * net,unsigned int hash1,unsigned int hash2)243 static void unix_table_double_unlock(struct net *net,
244 unsigned int hash1, unsigned int hash2)
245 {
246 if (hash1 == hash2) {
247 spin_unlock(&net->unx.table.locks[hash1]);
248 return;
249 }
250
251 spin_unlock(&net->unx.table.locks[hash1]);
252 spin_unlock(&net->unx.table.locks[hash2]);
253 }
254
255 #ifdef CONFIG_SECURITY_NETWORK
unix_get_secdata(struct scm_cookie * scm,struct sk_buff * skb)256 static void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb)
257 {
258 UNIXCB(skb).secid = scm->secid;
259 }
260
unix_set_secdata(struct scm_cookie * scm,struct sk_buff * skb)261 static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
262 {
263 scm->secid = UNIXCB(skb).secid;
264 }
265
unix_secdata_eq(struct scm_cookie * scm,struct sk_buff * skb)266 static inline bool unix_secdata_eq(struct scm_cookie *scm, struct sk_buff *skb)
267 {
268 return (scm->secid == UNIXCB(skb).secid);
269 }
270 #else
unix_get_secdata(struct scm_cookie * scm,struct sk_buff * skb)271 static inline void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb)
272 { }
273
unix_set_secdata(struct scm_cookie * scm,struct sk_buff * skb)274 static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
275 { }
276
unix_secdata_eq(struct scm_cookie * scm,struct sk_buff * skb)277 static inline bool unix_secdata_eq(struct scm_cookie *scm, struct sk_buff *skb)
278 {
279 return true;
280 }
281 #endif /* CONFIG_SECURITY_NETWORK */
282
unix_may_send(struct sock * sk,struct sock * osk)283 static inline int unix_may_send(struct sock *sk, struct sock *osk)
284 {
285 return !unix_peer(osk) || unix_peer(osk) == sk;
286 }
287
unix_recvq_full_lockless(const struct sock * sk)288 static inline int unix_recvq_full_lockless(const struct sock *sk)
289 {
290 return skb_queue_len_lockless(&sk->sk_receive_queue) > sk->sk_max_ack_backlog;
291 }
292
unix_peer_get(struct sock * s)293 struct sock *unix_peer_get(struct sock *s)
294 {
295 struct sock *peer;
296
297 unix_state_lock(s);
298 peer = unix_peer(s);
299 if (peer)
300 sock_hold(peer);
301 unix_state_unlock(s);
302 return peer;
303 }
304 EXPORT_SYMBOL_GPL(unix_peer_get);
305
unix_create_addr(struct sockaddr_un * sunaddr,int addr_len)306 static struct unix_address *unix_create_addr(struct sockaddr_un *sunaddr,
307 int addr_len)
308 {
309 struct unix_address *addr;
310
311 addr = kmalloc(sizeof(*addr) + addr_len, GFP_KERNEL);
312 if (!addr)
313 return NULL;
314
315 refcount_set(&addr->refcnt, 1);
316 addr->len = addr_len;
317 memcpy(addr->name, sunaddr, addr_len);
318
319 return addr;
320 }
321
unix_release_addr(struct unix_address * addr)322 static inline void unix_release_addr(struct unix_address *addr)
323 {
324 if (refcount_dec_and_test(&addr->refcnt))
325 kfree(addr);
326 }
327
328 /*
329 * Check unix socket name:
330 * - should be not zero length.
331 * - if started by not zero, should be NULL terminated (FS object)
332 * - if started by zero, it is abstract name.
333 */
334
unix_validate_addr(struct sockaddr_un * sunaddr,int addr_len)335 static int unix_validate_addr(struct sockaddr_un *sunaddr, int addr_len)
336 {
337 if (addr_len <= offsetof(struct sockaddr_un, sun_path) ||
338 addr_len > sizeof(*sunaddr))
339 return -EINVAL;
340
341 if (sunaddr->sun_family != AF_UNIX)
342 return -EINVAL;
343
344 return 0;
345 }
346
unix_mkname_bsd(struct sockaddr_un * sunaddr,int addr_len)347 static int unix_mkname_bsd(struct sockaddr_un *sunaddr, int addr_len)
348 {
349 struct sockaddr_storage *addr = (struct sockaddr_storage *)sunaddr;
350 short offset = offsetof(struct sockaddr_storage, __data);
351
352 BUILD_BUG_ON(offset != offsetof(struct sockaddr_un, sun_path));
353
354 /* This may look like an off by one error but it is a bit more
355 * subtle. 108 is the longest valid AF_UNIX path for a binding.
356 * sun_path[108] doesn't as such exist. However in kernel space
357 * we are guaranteed that it is a valid memory location in our
358 * kernel address buffer because syscall functions always pass
359 * a pointer of struct sockaddr_storage which has a bigger buffer
360 * than 108. Also, we must terminate sun_path for strlen() in
361 * getname_kernel().
362 */
363 addr->__data[addr_len - offset] = 0;
364
365 /* Don't pass sunaddr->sun_path to strlen(). Otherwise, 108 will
366 * cause panic if CONFIG_FORTIFY_SOURCE=y. Let __fortify_strlen()
367 * know the actual buffer.
368 */
369 return strlen(addr->__data) + offset + 1;
370 }
371
__unix_remove_socket(struct sock * sk)372 static void __unix_remove_socket(struct sock *sk)
373 {
374 sk_del_node_init(sk);
375 }
376
__unix_insert_socket(struct net * net,struct sock * sk)377 static void __unix_insert_socket(struct net *net, struct sock *sk)
378 {
379 DEBUG_NET_WARN_ON_ONCE(!sk_unhashed(sk));
380 sk_add_node(sk, &net->unx.table.buckets[sk->sk_hash]);
381 }
382
__unix_set_addr_hash(struct net * net,struct sock * sk,struct unix_address * addr,unsigned int hash)383 static void __unix_set_addr_hash(struct net *net, struct sock *sk,
384 struct unix_address *addr, unsigned int hash)
385 {
386 __unix_remove_socket(sk);
387 smp_store_release(&unix_sk(sk)->addr, addr);
388
389 sk->sk_hash = hash;
390 __unix_insert_socket(net, sk);
391 }
392
unix_remove_socket(struct net * net,struct sock * sk)393 static void unix_remove_socket(struct net *net, struct sock *sk)
394 {
395 spin_lock(&net->unx.table.locks[sk->sk_hash]);
396 __unix_remove_socket(sk);
397 spin_unlock(&net->unx.table.locks[sk->sk_hash]);
398 }
399
unix_insert_unbound_socket(struct net * net,struct sock * sk)400 static void unix_insert_unbound_socket(struct net *net, struct sock *sk)
401 {
402 spin_lock(&net->unx.table.locks[sk->sk_hash]);
403 __unix_insert_socket(net, sk);
404 spin_unlock(&net->unx.table.locks[sk->sk_hash]);
405 }
406
unix_insert_bsd_socket(struct sock * sk)407 static void unix_insert_bsd_socket(struct sock *sk)
408 {
409 spin_lock(&bsd_socket_locks[sk->sk_hash]);
410 sk_add_bind_node(sk, &bsd_socket_buckets[sk->sk_hash]);
411 spin_unlock(&bsd_socket_locks[sk->sk_hash]);
412 }
413
unix_remove_bsd_socket(struct sock * sk)414 static void unix_remove_bsd_socket(struct sock *sk)
415 {
416 if (!hlist_unhashed(&sk->sk_bind_node)) {
417 spin_lock(&bsd_socket_locks[sk->sk_hash]);
418 __sk_del_bind_node(sk);
419 spin_unlock(&bsd_socket_locks[sk->sk_hash]);
420
421 sk_node_init(&sk->sk_bind_node);
422 }
423 }
424
__unix_find_socket_byname(struct net * net,struct sockaddr_un * sunname,int len,unsigned int hash)425 static struct sock *__unix_find_socket_byname(struct net *net,
426 struct sockaddr_un *sunname,
427 int len, unsigned int hash)
428 {
429 struct sock *s;
430
431 sk_for_each(s, &net->unx.table.buckets[hash]) {
432 struct unix_sock *u = unix_sk(s);
433
434 if (u->addr->len == len &&
435 !memcmp(u->addr->name, sunname, len))
436 return s;
437 }
438 return NULL;
439 }
440
unix_find_socket_byname(struct net * net,struct sockaddr_un * sunname,int len,unsigned int hash)441 static inline struct sock *unix_find_socket_byname(struct net *net,
442 struct sockaddr_un *sunname,
443 int len, unsigned int hash)
444 {
445 struct sock *s;
446
447 spin_lock(&net->unx.table.locks[hash]);
448 s = __unix_find_socket_byname(net, sunname, len, hash);
449 if (s)
450 sock_hold(s);
451 spin_unlock(&net->unx.table.locks[hash]);
452 return s;
453 }
454
unix_find_socket_byinode(struct inode * i)455 static struct sock *unix_find_socket_byinode(struct inode *i)
456 {
457 unsigned int hash = unix_bsd_hash(i);
458 struct sock *s;
459
460 spin_lock(&bsd_socket_locks[hash]);
461 sk_for_each_bound(s, &bsd_socket_buckets[hash]) {
462 struct dentry *dentry = unix_sk(s)->path.dentry;
463
464 if (dentry && d_backing_inode(dentry) == i) {
465 sock_hold(s);
466 spin_unlock(&bsd_socket_locks[hash]);
467 return s;
468 }
469 }
470 spin_unlock(&bsd_socket_locks[hash]);
471 return NULL;
472 }
473
474 /* Support code for asymmetrically connected dgram sockets
475 *
476 * If a datagram socket is connected to a socket not itself connected
477 * to the first socket (eg, /dev/log), clients may only enqueue more
478 * messages if the present receive queue of the server socket is not
479 * "too large". This means there's a second writeability condition
480 * poll and sendmsg need to test. The dgram recv code will do a wake
481 * up on the peer_wait wait queue of a socket upon reception of a
482 * datagram which needs to be propagated to sleeping would-be writers
483 * since these might not have sent anything so far. This can't be
484 * accomplished via poll_wait because the lifetime of the server
485 * socket might be less than that of its clients if these break their
486 * association with it or if the server socket is closed while clients
487 * are still connected to it and there's no way to inform "a polling
488 * implementation" that it should let go of a certain wait queue
489 *
490 * In order to propagate a wake up, a wait_queue_entry_t of the client
491 * socket is enqueued on the peer_wait queue of the server socket
492 * whose wake function does a wake_up on the ordinary client socket
493 * wait queue. This connection is established whenever a write (or
494 * poll for write) hit the flow control condition and broken when the
495 * association to the server socket is dissolved or after a wake up
496 * was relayed.
497 */
498
unix_dgram_peer_wake_relay(wait_queue_entry_t * q,unsigned mode,int flags,void * key)499 static int unix_dgram_peer_wake_relay(wait_queue_entry_t *q, unsigned mode, int flags,
500 void *key)
501 {
502 struct unix_sock *u;
503 wait_queue_head_t *u_sleep;
504
505 u = container_of(q, struct unix_sock, peer_wake);
506
507 __remove_wait_queue(&unix_sk(u->peer_wake.private)->peer_wait,
508 q);
509 u->peer_wake.private = NULL;
510
511 /* relaying can only happen while the wq still exists */
512 u_sleep = sk_sleep(&u->sk);
513 if (u_sleep)
514 wake_up_interruptible_poll(u_sleep, key_to_poll(key));
515
516 return 0;
517 }
518
unix_dgram_peer_wake_connect(struct sock * sk,struct sock * other)519 static int unix_dgram_peer_wake_connect(struct sock *sk, struct sock *other)
520 {
521 struct unix_sock *u, *u_other;
522 int rc;
523
524 u = unix_sk(sk);
525 u_other = unix_sk(other);
526 rc = 0;
527 spin_lock(&u_other->peer_wait.lock);
528
529 if (!u->peer_wake.private) {
530 u->peer_wake.private = other;
531 __add_wait_queue(&u_other->peer_wait, &u->peer_wake);
532
533 rc = 1;
534 }
535
536 spin_unlock(&u_other->peer_wait.lock);
537 return rc;
538 }
539
unix_dgram_peer_wake_disconnect(struct sock * sk,struct sock * other)540 static void unix_dgram_peer_wake_disconnect(struct sock *sk,
541 struct sock *other)
542 {
543 struct unix_sock *u, *u_other;
544
545 u = unix_sk(sk);
546 u_other = unix_sk(other);
547 spin_lock(&u_other->peer_wait.lock);
548
549 if (u->peer_wake.private == other) {
550 __remove_wait_queue(&u_other->peer_wait, &u->peer_wake);
551 u->peer_wake.private = NULL;
552 }
553
554 spin_unlock(&u_other->peer_wait.lock);
555 }
556
unix_dgram_peer_wake_disconnect_wakeup(struct sock * sk,struct sock * other)557 static void unix_dgram_peer_wake_disconnect_wakeup(struct sock *sk,
558 struct sock *other)
559 {
560 unix_dgram_peer_wake_disconnect(sk, other);
561 wake_up_interruptible_poll(sk_sleep(sk),
562 EPOLLOUT |
563 EPOLLWRNORM |
564 EPOLLWRBAND);
565 }
566
567 /* preconditions:
568 * - unix_peer(sk) == other
569 * - association is stable
570 */
unix_dgram_peer_wake_me(struct sock * sk,struct sock * other)571 static int unix_dgram_peer_wake_me(struct sock *sk, struct sock *other)
572 {
573 int connected;
574
575 connected = unix_dgram_peer_wake_connect(sk, other);
576
577 /* If other is SOCK_DEAD, we want to make sure we signal
578 * POLLOUT, such that a subsequent write() can get a
579 * -ECONNREFUSED. Otherwise, if we haven't queued any skbs
580 * to other and its full, we will hang waiting for POLLOUT.
581 */
582 if (unix_recvq_full_lockless(other) && !sock_flag(other, SOCK_DEAD))
583 return 1;
584
585 if (connected)
586 unix_dgram_peer_wake_disconnect(sk, other);
587
588 return 0;
589 }
590
unix_writable(const struct sock * sk,unsigned char state)591 static int unix_writable(const struct sock *sk, unsigned char state)
592 {
593 return state != TCP_LISTEN &&
594 (refcount_read(&sk->sk_wmem_alloc) << 2) <= READ_ONCE(sk->sk_sndbuf);
595 }
596
unix_write_space(struct sock * sk)597 static void unix_write_space(struct sock *sk)
598 {
599 struct socket_wq *wq;
600
601 rcu_read_lock();
602 if (unix_writable(sk, READ_ONCE(sk->sk_state))) {
603 wq = rcu_dereference(sk->sk_wq);
604 if (skwq_has_sleeper(wq))
605 wake_up_interruptible_sync_poll(&wq->wait,
606 EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND);
607 sk_wake_async_rcu(sk, SOCK_WAKE_SPACE, POLL_OUT);
608 }
609 rcu_read_unlock();
610 }
611
612 /* When dgram socket disconnects (or changes its peer), we clear its receive
613 * queue of packets arrived from previous peer. First, it allows to do
614 * flow control based only on wmem_alloc; second, sk connected to peer
615 * may receive messages only from that peer. */
unix_dgram_disconnected(struct sock * sk,struct sock * other)616 static void unix_dgram_disconnected(struct sock *sk, struct sock *other)
617 {
618 if (!skb_queue_empty(&sk->sk_receive_queue)) {
619 skb_queue_purge_reason(&sk->sk_receive_queue,
620 SKB_DROP_REASON_UNIX_DISCONNECT);
621
622 wake_up_interruptible_all(&unix_sk(sk)->peer_wait);
623
624 /* If one link of bidirectional dgram pipe is disconnected,
625 * we signal error. Messages are lost. Do not make this,
626 * when peer was not connected to us.
627 */
628 if (!sock_flag(other, SOCK_DEAD) && unix_peer(other) == sk) {
629 WRITE_ONCE(other->sk_err, ECONNRESET);
630 sk_error_report(other);
631 }
632 }
633 }
634
unix_sock_destructor(struct sock * sk)635 static void unix_sock_destructor(struct sock *sk)
636 {
637 struct unix_sock *u = unix_sk(sk);
638
639 skb_queue_purge_reason(&sk->sk_receive_queue, SKB_DROP_REASON_SOCKET_CLOSE);
640
641 DEBUG_NET_WARN_ON_ONCE(refcount_read(&sk->sk_wmem_alloc));
642 DEBUG_NET_WARN_ON_ONCE(!sk_unhashed(sk));
643 DEBUG_NET_WARN_ON_ONCE(sk->sk_socket);
644 if (!sock_flag(sk, SOCK_DEAD)) {
645 pr_info("Attempt to release alive unix socket: %p\n", sk);
646 return;
647 }
648
649 if (u->addr)
650 unix_release_addr(u->addr);
651
652 atomic_long_dec(&unix_nr_socks);
653 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
654 #ifdef UNIX_REFCNT_DEBUG
655 pr_debug("UNIX %p is destroyed, %ld are still alive.\n", sk,
656 atomic_long_read(&unix_nr_socks));
657 #endif
658 }
659
unix_skb_len(const struct sk_buff * skb)660 static unsigned int unix_skb_len(const struct sk_buff *skb)
661 {
662 return skb->len - UNIXCB(skb).consumed;
663 }
664
unix_release_sock(struct sock * sk,int embrion)665 static void unix_release_sock(struct sock *sk, int embrion)
666 {
667 struct unix_sock *u = unix_sk(sk);
668 struct sock *skpair;
669 struct sk_buff *skb;
670 struct path path;
671 int state;
672
673 unix_remove_socket(sock_net(sk), sk);
674 unix_remove_bsd_socket(sk);
675
676 /* Clear state */
677 unix_state_lock(sk);
678 sock_orphan(sk);
679 WRITE_ONCE(sk->sk_shutdown, SHUTDOWN_MASK);
680 path = u->path;
681 u->path.dentry = NULL;
682 u->path.mnt = NULL;
683 state = sk->sk_state;
684 WRITE_ONCE(sk->sk_state, TCP_CLOSE);
685
686 skpair = unix_peer(sk);
687 unix_peer(sk) = NULL;
688
689 unix_state_unlock(sk);
690
691 #if IS_ENABLED(CONFIG_AF_UNIX_OOB)
692 u->oob_skb = NULL;
693 #endif
694
695 wake_up_interruptible_all(&u->peer_wait);
696
697 if (skpair != NULL) {
698 if (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) {
699 struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
700
701 #if IS_ENABLED(CONFIG_AF_UNIX_OOB)
702 if (skb && !unix_skb_len(skb))
703 skb = skb_peek_next(skb, &sk->sk_receive_queue);
704 #endif
705 unix_state_lock(skpair);
706 /* No more writes */
707 WRITE_ONCE(skpair->sk_shutdown, SHUTDOWN_MASK);
708 if (skb || embrion)
709 WRITE_ONCE(skpair->sk_err, ECONNRESET);
710 unix_state_unlock(skpair);
711 skpair->sk_state_change(skpair);
712 sk_wake_async(skpair, SOCK_WAKE_WAITD, POLL_HUP);
713 }
714
715 unix_dgram_peer_wake_disconnect(sk, skpair);
716 sock_put(skpair); /* It may now die */
717 }
718
719 /* Try to flush out this socket. Throw out buffers at least */
720
721 while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
722 if (state == TCP_LISTEN)
723 unix_release_sock(skb->sk, 1);
724
725 /* passed fds are erased in the kfree_skb hook */
726 kfree_skb_reason(skb, SKB_DROP_REASON_SOCKET_CLOSE);
727 }
728
729 if (path.dentry)
730 path_put(&path);
731
732 sock_put(sk);
733
734 /* ---- Socket is dead now and most probably destroyed ---- */
735
736 unix_schedule_gc(NULL);
737 }
738
739 struct unix_peercred {
740 struct pid *peer_pid;
741 const struct cred *peer_cred;
742 };
743
prepare_peercred(struct unix_peercred * peercred)744 static inline int prepare_peercred(struct unix_peercred *peercred)
745 {
746 struct pid *pid;
747 int err;
748
749 pid = task_tgid(current);
750 err = pidfs_register_pid(pid);
751 if (likely(!err)) {
752 peercred->peer_pid = get_pid(pid);
753 peercred->peer_cred = get_current_cred();
754 }
755 return err;
756 }
757
drop_peercred(struct unix_peercred * peercred)758 static void drop_peercred(struct unix_peercred *peercred)
759 {
760 const struct cred *cred = NULL;
761 struct pid *pid = NULL;
762
763 might_sleep();
764
765 swap(peercred->peer_pid, pid);
766 swap(peercred->peer_cred, cred);
767
768 put_pid(pid);
769 put_cred(cred);
770 }
771
init_peercred(struct sock * sk,const struct unix_peercred * peercred)772 static inline void init_peercred(struct sock *sk,
773 const struct unix_peercred *peercred)
774 {
775 sk->sk_peer_pid = peercred->peer_pid;
776 sk->sk_peer_cred = peercred->peer_cred;
777 }
778
update_peercred(struct sock * sk,struct unix_peercred * peercred)779 static void update_peercred(struct sock *sk, struct unix_peercred *peercred)
780 {
781 const struct cred *old_cred;
782 struct pid *old_pid;
783
784 spin_lock(&sk->sk_peer_lock);
785 old_pid = sk->sk_peer_pid;
786 old_cred = sk->sk_peer_cred;
787 init_peercred(sk, peercred);
788 spin_unlock(&sk->sk_peer_lock);
789
790 peercred->peer_pid = old_pid;
791 peercred->peer_cred = old_cred;
792 }
793
copy_peercred(struct sock * sk,struct sock * peersk)794 static void copy_peercred(struct sock *sk, struct sock *peersk)
795 {
796 lockdep_assert_held(&unix_sk(peersk)->lock);
797
798 spin_lock(&sk->sk_peer_lock);
799 sk->sk_peer_pid = get_pid(peersk->sk_peer_pid);
800 sk->sk_peer_cred = get_cred(peersk->sk_peer_cred);
801 spin_unlock(&sk->sk_peer_lock);
802 }
803
unix_may_passcred(const struct sock * sk)804 static bool unix_may_passcred(const struct sock *sk)
805 {
806 return sk->sk_scm_credentials || sk->sk_scm_pidfd;
807 }
808
unix_listen(struct socket * sock,int backlog)809 static int unix_listen(struct socket *sock, int backlog)
810 {
811 int err;
812 struct sock *sk = sock->sk;
813 struct unix_sock *u = unix_sk(sk);
814 struct unix_peercred peercred = {};
815
816 err = -EOPNOTSUPP;
817 if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
818 goto out; /* Only stream/seqpacket sockets accept */
819 err = -EINVAL;
820 if (!READ_ONCE(u->addr))
821 goto out; /* No listens on an unbound socket */
822 err = prepare_peercred(&peercred);
823 if (err)
824 goto out;
825 unix_state_lock(sk);
826 if (sk->sk_state != TCP_CLOSE && sk->sk_state != TCP_LISTEN)
827 goto out_unlock;
828 if (backlog > sk->sk_max_ack_backlog)
829 wake_up_interruptible_all(&u->peer_wait);
830 sk->sk_max_ack_backlog = backlog;
831 WRITE_ONCE(sk->sk_state, TCP_LISTEN);
832
833 /* set credentials so connect can copy them */
834 update_peercred(sk, &peercred);
835 err = 0;
836
837 out_unlock:
838 unix_state_unlock(sk);
839 drop_peercred(&peercred);
840 out:
841 return err;
842 }
843
844 static int unix_release(struct socket *);
845 static int unix_bind(struct socket *, struct sockaddr_unsized *, int);
846 static int unix_stream_connect(struct socket *, struct sockaddr_unsized *,
847 int addr_len, int flags);
848 static int unix_socketpair(struct socket *, struct socket *);
849 static int unix_accept(struct socket *, struct socket *, struct proto_accept_arg *arg);
850 static int unix_getname(struct socket *, struct sockaddr *, int);
851 static __poll_t unix_poll(struct file *, struct socket *, poll_table *);
852 static __poll_t unix_dgram_poll(struct file *, struct socket *,
853 poll_table *);
854 static int unix_ioctl(struct socket *, unsigned int, unsigned long);
855 #ifdef CONFIG_COMPAT
856 static int unix_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
857 #endif
858 static int unix_shutdown(struct socket *, int);
859 static int unix_stream_sendmsg(struct socket *, struct msghdr *, size_t);
860 static int unix_stream_recvmsg(struct socket *, struct msghdr *, size_t, int);
861 static ssize_t unix_stream_splice_read(struct socket *, loff_t *ppos,
862 struct pipe_inode_info *, size_t size,
863 unsigned int flags);
864 static int unix_dgram_sendmsg(struct socket *, struct msghdr *, size_t);
865 static int unix_dgram_recvmsg(struct socket *, struct msghdr *, size_t, int);
866 static int unix_read_skb(struct sock *sk, skb_read_actor_t recv_actor);
867 static int unix_stream_read_skb(struct sock *sk, skb_read_actor_t recv_actor);
868 static int unix_dgram_connect(struct socket *, struct sockaddr_unsized *,
869 int, int);
870 static int unix_seqpacket_sendmsg(struct socket *, struct msghdr *, size_t);
871 static int unix_seqpacket_recvmsg(struct socket *, struct msghdr *, size_t,
872 int);
873
874 #ifdef CONFIG_PROC_FS
unix_count_nr_fds(struct sock * sk)875 static int unix_count_nr_fds(struct sock *sk)
876 {
877 struct sk_buff *skb;
878 struct unix_sock *u;
879 int nr_fds = 0;
880
881 spin_lock(&sk->sk_receive_queue.lock);
882 skb = skb_peek(&sk->sk_receive_queue);
883 while (skb) {
884 u = unix_sk(skb->sk);
885 nr_fds += atomic_read(&u->scm_stat.nr_fds);
886 skb = skb_peek_next(skb, &sk->sk_receive_queue);
887 }
888 spin_unlock(&sk->sk_receive_queue.lock);
889
890 return nr_fds;
891 }
892
unix_show_fdinfo(struct seq_file * m,struct socket * sock)893 static void unix_show_fdinfo(struct seq_file *m, struct socket *sock)
894 {
895 struct sock *sk = sock->sk;
896 unsigned char s_state;
897 struct unix_sock *u;
898 int nr_fds = 0;
899
900 if (sk) {
901 s_state = READ_ONCE(sk->sk_state);
902 u = unix_sk(sk);
903
904 /* SOCK_STREAM and SOCK_SEQPACKET sockets never change their
905 * sk_state after switching to TCP_ESTABLISHED or TCP_LISTEN.
906 * SOCK_DGRAM is ordinary. So, no lock is needed.
907 */
908 if (sock->type == SOCK_DGRAM || s_state == TCP_ESTABLISHED)
909 nr_fds = atomic_read(&u->scm_stat.nr_fds);
910 else if (s_state == TCP_LISTEN)
911 nr_fds = unix_count_nr_fds(sk);
912
913 seq_printf(m, "scm_fds: %u\n", nr_fds);
914 }
915 }
916 #else
917 #define unix_show_fdinfo NULL
918 #endif
919
unix_custom_sockopt(int optname)920 static bool unix_custom_sockopt(int optname)
921 {
922 switch (optname) {
923 case SO_INQ:
924 return true;
925 default:
926 return false;
927 }
928 }
929
unix_setsockopt(struct socket * sock,int level,int optname,sockptr_t optval,unsigned int optlen)930 static int unix_setsockopt(struct socket *sock, int level, int optname,
931 sockptr_t optval, unsigned int optlen)
932 {
933 struct unix_sock *u = unix_sk(sock->sk);
934 struct sock *sk = sock->sk;
935 int val;
936
937 if (level != SOL_SOCKET)
938 return -EOPNOTSUPP;
939
940 if (!unix_custom_sockopt(optname))
941 return sock_setsockopt(sock, level, optname, optval, optlen);
942
943 if (optlen != sizeof(int))
944 return -EINVAL;
945
946 if (copy_from_sockptr(&val, optval, sizeof(val)))
947 return -EFAULT;
948
949 switch (optname) {
950 case SO_INQ:
951 if (sk->sk_type != SOCK_STREAM)
952 return -EINVAL;
953
954 if (val > 1 || val < 0)
955 return -EINVAL;
956
957 WRITE_ONCE(u->recvmsg_inq, val);
958 break;
959 default:
960 return -ENOPROTOOPT;
961 }
962
963 return 0;
964 }
965
966 static const struct proto_ops unix_stream_ops = {
967 .family = PF_UNIX,
968 .owner = THIS_MODULE,
969 .release = unix_release,
970 .bind = unix_bind,
971 .connect = unix_stream_connect,
972 .socketpair = unix_socketpair,
973 .accept = unix_accept,
974 .getname = unix_getname,
975 .poll = unix_poll,
976 .ioctl = unix_ioctl,
977 #ifdef CONFIG_COMPAT
978 .compat_ioctl = unix_compat_ioctl,
979 #endif
980 .listen = unix_listen,
981 .shutdown = unix_shutdown,
982 .setsockopt = unix_setsockopt,
983 .sendmsg = unix_stream_sendmsg,
984 .recvmsg = unix_stream_recvmsg,
985 .read_skb = unix_stream_read_skb,
986 .mmap = sock_no_mmap,
987 .splice_read = unix_stream_splice_read,
988 .set_peek_off = sk_set_peek_off,
989 .show_fdinfo = unix_show_fdinfo,
990 };
991
992 static const struct proto_ops unix_dgram_ops = {
993 .family = PF_UNIX,
994 .owner = THIS_MODULE,
995 .release = unix_release,
996 .bind = unix_bind,
997 .connect = unix_dgram_connect,
998 .socketpair = unix_socketpair,
999 .accept = sock_no_accept,
1000 .getname = unix_getname,
1001 .poll = unix_dgram_poll,
1002 .ioctl = unix_ioctl,
1003 #ifdef CONFIG_COMPAT
1004 .compat_ioctl = unix_compat_ioctl,
1005 #endif
1006 .listen = sock_no_listen,
1007 .shutdown = unix_shutdown,
1008 .sendmsg = unix_dgram_sendmsg,
1009 .read_skb = unix_read_skb,
1010 .recvmsg = unix_dgram_recvmsg,
1011 .mmap = sock_no_mmap,
1012 .set_peek_off = sk_set_peek_off,
1013 .show_fdinfo = unix_show_fdinfo,
1014 };
1015
1016 static const struct proto_ops unix_seqpacket_ops = {
1017 .family = PF_UNIX,
1018 .owner = THIS_MODULE,
1019 .release = unix_release,
1020 .bind = unix_bind,
1021 .connect = unix_stream_connect,
1022 .socketpair = unix_socketpair,
1023 .accept = unix_accept,
1024 .getname = unix_getname,
1025 .poll = unix_dgram_poll,
1026 .ioctl = unix_ioctl,
1027 #ifdef CONFIG_COMPAT
1028 .compat_ioctl = unix_compat_ioctl,
1029 #endif
1030 .listen = unix_listen,
1031 .shutdown = unix_shutdown,
1032 .sendmsg = unix_seqpacket_sendmsg,
1033 .recvmsg = unix_seqpacket_recvmsg,
1034 .mmap = sock_no_mmap,
1035 .set_peek_off = sk_set_peek_off,
1036 .show_fdinfo = unix_show_fdinfo,
1037 };
1038
unix_close(struct sock * sk,long timeout)1039 static void unix_close(struct sock *sk, long timeout)
1040 {
1041 /* Nothing to do here, unix socket does not need a ->close().
1042 * This is merely for sockmap.
1043 */
1044 }
1045
unix_bpf_bypass_getsockopt(int level,int optname)1046 static bool unix_bpf_bypass_getsockopt(int level, int optname)
1047 {
1048 if (level == SOL_SOCKET) {
1049 switch (optname) {
1050 case SO_PEERPIDFD:
1051 return true;
1052 default:
1053 return false;
1054 }
1055 }
1056
1057 return false;
1058 }
1059
1060 struct proto unix_dgram_proto = {
1061 .name = "UNIX",
1062 .owner = THIS_MODULE,
1063 .obj_size = sizeof(struct unix_sock),
1064 .close = unix_close,
1065 .bpf_bypass_getsockopt = unix_bpf_bypass_getsockopt,
1066 #ifdef CONFIG_BPF_SYSCALL
1067 .psock_update_sk_prot = unix_dgram_bpf_update_proto,
1068 #endif
1069 };
1070
1071 struct proto unix_stream_proto = {
1072 .name = "UNIX-STREAM",
1073 .owner = THIS_MODULE,
1074 .obj_size = sizeof(struct unix_sock),
1075 .close = unix_close,
1076 .bpf_bypass_getsockopt = unix_bpf_bypass_getsockopt,
1077 #ifdef CONFIG_BPF_SYSCALL
1078 .psock_update_sk_prot = unix_stream_bpf_update_proto,
1079 #endif
1080 };
1081
unix_create1(struct net * net,struct socket * sock,int kern,int type)1082 static struct sock *unix_create1(struct net *net, struct socket *sock, int kern, int type)
1083 {
1084 struct unix_sock *u;
1085 struct sock *sk;
1086 int err;
1087
1088 atomic_long_inc(&unix_nr_socks);
1089 if (atomic_long_read(&unix_nr_socks) > 2 * get_max_files()) {
1090 err = -ENFILE;
1091 goto err;
1092 }
1093
1094 if (type == SOCK_STREAM)
1095 sk = sk_alloc(net, PF_UNIX, GFP_KERNEL, &unix_stream_proto, kern);
1096 else /*dgram and seqpacket */
1097 sk = sk_alloc(net, PF_UNIX, GFP_KERNEL, &unix_dgram_proto, kern);
1098
1099 if (!sk) {
1100 err = -ENOMEM;
1101 goto err;
1102 }
1103
1104 sock_init_data(sock, sk);
1105
1106 sk->sk_scm_rights = 1;
1107 sk->sk_hash = unix_unbound_hash(sk);
1108 sk->sk_allocation = GFP_KERNEL_ACCOUNT;
1109 sk->sk_write_space = unix_write_space;
1110 sk->sk_max_ack_backlog = READ_ONCE(net->unx.sysctl_max_dgram_qlen);
1111 sk->sk_destruct = unix_sock_destructor;
1112 lock_set_cmp_fn(&sk->sk_receive_queue.lock, unix_recvq_lock_cmp_fn, NULL);
1113
1114 u = unix_sk(sk);
1115 u->listener = NULL;
1116 u->vertex = NULL;
1117 u->path.dentry = NULL;
1118 u->path.mnt = NULL;
1119 spin_lock_init(&u->lock);
1120 lock_set_cmp_fn(&u->lock, unix_state_lock_cmp_fn, NULL);
1121 mutex_init(&u->iolock); /* single task reading lock */
1122 mutex_init(&u->bindlock); /* single task binding lock */
1123 init_waitqueue_head(&u->peer_wait);
1124 init_waitqueue_func_entry(&u->peer_wake, unix_dgram_peer_wake_relay);
1125 memset(&u->scm_stat, 0, sizeof(struct scm_stat));
1126 unix_insert_unbound_socket(net, sk);
1127
1128 sock_prot_inuse_add(net, sk->sk_prot, 1);
1129
1130 return sk;
1131
1132 err:
1133 atomic_long_dec(&unix_nr_socks);
1134 return ERR_PTR(err);
1135 }
1136
unix_create(struct net * net,struct socket * sock,int protocol,int kern)1137 static int unix_create(struct net *net, struct socket *sock, int protocol,
1138 int kern)
1139 {
1140 struct sock *sk;
1141
1142 if (protocol && protocol != PF_UNIX)
1143 return -EPROTONOSUPPORT;
1144
1145 sock->state = SS_UNCONNECTED;
1146
1147 switch (sock->type) {
1148 case SOCK_STREAM:
1149 set_bit(SOCK_CUSTOM_SOCKOPT, &sock->flags);
1150 sock->ops = &unix_stream_ops;
1151 break;
1152 /*
1153 * Believe it or not BSD has AF_UNIX, SOCK_RAW though
1154 * nothing uses it.
1155 */
1156 case SOCK_RAW:
1157 sock->type = SOCK_DGRAM;
1158 fallthrough;
1159 case SOCK_DGRAM:
1160 sock->ops = &unix_dgram_ops;
1161 break;
1162 case SOCK_SEQPACKET:
1163 sock->ops = &unix_seqpacket_ops;
1164 break;
1165 default:
1166 return -ESOCKTNOSUPPORT;
1167 }
1168
1169 sk = unix_create1(net, sock, kern, sock->type);
1170 if (IS_ERR(sk))
1171 return PTR_ERR(sk);
1172
1173 return 0;
1174 }
1175
unix_release(struct socket * sock)1176 static int unix_release(struct socket *sock)
1177 {
1178 struct sock *sk = sock->sk;
1179
1180 if (!sk)
1181 return 0;
1182
1183 sk->sk_prot->close(sk, 0);
1184 unix_release_sock(sk, 0);
1185 sock->sk = NULL;
1186
1187 return 0;
1188 }
1189
unix_find_bsd(struct sockaddr_un * sunaddr,int addr_len,int type,int flags)1190 static struct sock *unix_find_bsd(struct sockaddr_un *sunaddr, int addr_len,
1191 int type, int flags)
1192 {
1193 struct inode *inode;
1194 struct path path;
1195 struct sock *sk;
1196 int err;
1197
1198 unix_mkname_bsd(sunaddr, addr_len);
1199
1200 if (flags & SOCK_COREDUMP) {
1201 struct path root;
1202
1203 task_lock(&init_task);
1204 get_fs_root(init_task.fs, &root);
1205 task_unlock(&init_task);
1206
1207 scoped_with_kernel_creds()
1208 err = vfs_path_lookup(root.dentry, root.mnt, sunaddr->sun_path,
1209 LOOKUP_BENEATH | LOOKUP_NO_SYMLINKS |
1210 LOOKUP_NO_MAGICLINKS, &path);
1211 path_put(&root);
1212 if (err)
1213 goto fail;
1214 } else {
1215 err = kern_path(sunaddr->sun_path, LOOKUP_FOLLOW, &path);
1216 if (err)
1217 goto fail;
1218
1219 err = path_permission(&path, MAY_WRITE);
1220 if (err)
1221 goto path_put;
1222 }
1223
1224 err = -ECONNREFUSED;
1225 inode = d_backing_inode(path.dentry);
1226 if (!S_ISSOCK(inode->i_mode))
1227 goto path_put;
1228
1229 sk = unix_find_socket_byinode(inode);
1230 if (!sk)
1231 goto path_put;
1232
1233 err = -EPROTOTYPE;
1234 if (sk->sk_type != type)
1235 goto sock_put;
1236
1237 err = security_unix_find(&path, sk, flags);
1238 if (err)
1239 goto sock_put;
1240
1241 touch_atime(&path);
1242
1243 path_put(&path);
1244
1245 return sk;
1246
1247 sock_put:
1248 sock_put(sk);
1249 path_put:
1250 path_put(&path);
1251 fail:
1252 return ERR_PTR(err);
1253 }
1254
unix_find_abstract(struct net * net,struct sockaddr_un * sunaddr,int addr_len,int type)1255 static struct sock *unix_find_abstract(struct net *net,
1256 struct sockaddr_un *sunaddr,
1257 int addr_len, int type)
1258 {
1259 unsigned int hash = unix_abstract_hash(sunaddr, addr_len, type);
1260 struct dentry *dentry;
1261 struct sock *sk;
1262
1263 sk = unix_find_socket_byname(net, sunaddr, addr_len, hash);
1264 if (!sk)
1265 return ERR_PTR(-ECONNREFUSED);
1266
1267 dentry = unix_sk(sk)->path.dentry;
1268 if (dentry)
1269 touch_atime(&unix_sk(sk)->path);
1270
1271 return sk;
1272 }
1273
unix_find_other(struct net * net,struct sockaddr_un * sunaddr,int addr_len,int type,int flags)1274 static struct sock *unix_find_other(struct net *net,
1275 struct sockaddr_un *sunaddr,
1276 int addr_len, int type, int flags)
1277 {
1278 struct sock *sk;
1279
1280 if (sunaddr->sun_path[0])
1281 sk = unix_find_bsd(sunaddr, addr_len, type, flags);
1282 else
1283 sk = unix_find_abstract(net, sunaddr, addr_len, type);
1284
1285 return sk;
1286 }
1287
unix_autobind(struct sock * sk)1288 static int unix_autobind(struct sock *sk)
1289 {
1290 struct unix_sock *u = unix_sk(sk);
1291 unsigned int new_hash, old_hash;
1292 struct net *net = sock_net(sk);
1293 struct unix_address *addr;
1294 u32 lastnum, ordernum;
1295 int err;
1296
1297 err = mutex_lock_interruptible(&u->bindlock);
1298 if (err)
1299 return err;
1300
1301 if (u->addr)
1302 goto out;
1303
1304 err = -ENOMEM;
1305 addr = kzalloc(sizeof(*addr) +
1306 offsetof(struct sockaddr_un, sun_path) + 16, GFP_KERNEL);
1307 if (!addr)
1308 goto out;
1309
1310 addr->len = offsetof(struct sockaddr_un, sun_path) + 6;
1311 addr->name->sun_family = AF_UNIX;
1312 refcount_set(&addr->refcnt, 1);
1313
1314 old_hash = sk->sk_hash;
1315 ordernum = get_random_u32();
1316 lastnum = ordernum & 0xFFFFF;
1317 retry:
1318 ordernum = (ordernum + 1) & 0xFFFFF;
1319 sprintf(addr->name->sun_path + 1, "%05x", ordernum);
1320
1321 new_hash = unix_abstract_hash(addr->name, addr->len, sk->sk_type);
1322 unix_table_double_lock(net, old_hash, new_hash);
1323
1324 if (__unix_find_socket_byname(net, addr->name, addr->len, new_hash)) {
1325 unix_table_double_unlock(net, old_hash, new_hash);
1326
1327 /* __unix_find_socket_byname() may take long time if many names
1328 * are already in use.
1329 */
1330 cond_resched();
1331
1332 if (ordernum == lastnum) {
1333 /* Give up if all names seems to be in use. */
1334 err = -ENOSPC;
1335 unix_release_addr(addr);
1336 goto out;
1337 }
1338
1339 goto retry;
1340 }
1341
1342 __unix_set_addr_hash(net, sk, addr, new_hash);
1343 unix_table_double_unlock(net, old_hash, new_hash);
1344 err = 0;
1345
1346 out: mutex_unlock(&u->bindlock);
1347 return err;
1348 }
1349
unix_bind_bsd(struct sock * sk,struct sockaddr_un * sunaddr,int addr_len)1350 static int unix_bind_bsd(struct sock *sk, struct sockaddr_un *sunaddr,
1351 int addr_len)
1352 {
1353 umode_t mode = S_IFSOCK |
1354 (SOCK_INODE(sk->sk_socket)->i_mode & ~current_umask());
1355 struct unix_sock *u = unix_sk(sk);
1356 unsigned int new_hash, old_hash;
1357 struct net *net = sock_net(sk);
1358 struct mnt_idmap *idmap;
1359 struct unix_address *addr;
1360 struct dentry *dentry;
1361 struct path parent;
1362 int err;
1363
1364 addr_len = unix_mkname_bsd(sunaddr, addr_len);
1365 addr = unix_create_addr(sunaddr, addr_len);
1366 if (!addr)
1367 return -ENOMEM;
1368
1369 /*
1370 * Get the parent directory, calculate the hash for last
1371 * component.
1372 */
1373 dentry = start_creating_path(AT_FDCWD, addr->name->sun_path, &parent, 0);
1374 if (IS_ERR(dentry)) {
1375 err = PTR_ERR(dentry);
1376 goto out;
1377 }
1378
1379 /*
1380 * All right, let's create it.
1381 */
1382 idmap = mnt_idmap(parent.mnt);
1383 err = security_path_mknod(&parent, dentry, mode, 0);
1384 if (!err)
1385 err = vfs_mknod(idmap, d_inode(parent.dentry), dentry, mode, 0, NULL);
1386 if (err)
1387 goto out_path;
1388 err = mutex_lock_interruptible(&u->bindlock);
1389 if (err)
1390 goto out_unlink;
1391 if (u->addr)
1392 goto out_unlock;
1393
1394 old_hash = sk->sk_hash;
1395 new_hash = unix_bsd_hash(d_backing_inode(dentry));
1396 unix_table_double_lock(net, old_hash, new_hash);
1397 u->path.mnt = mntget(parent.mnt);
1398 u->path.dentry = dget(dentry);
1399 __unix_set_addr_hash(net, sk, addr, new_hash);
1400 unix_table_double_unlock(net, old_hash, new_hash);
1401 unix_insert_bsd_socket(sk);
1402 mutex_unlock(&u->bindlock);
1403 end_creating_path(&parent, dentry);
1404 return 0;
1405
1406 out_unlock:
1407 mutex_unlock(&u->bindlock);
1408 err = -EINVAL;
1409 out_unlink:
1410 /* failed after successful mknod? unlink what we'd created... */
1411 vfs_unlink(idmap, d_inode(parent.dentry), dentry, NULL);
1412 out_path:
1413 end_creating_path(&parent, dentry);
1414 out:
1415 unix_release_addr(addr);
1416 return err == -EEXIST ? -EADDRINUSE : err;
1417 }
1418
unix_bind_abstract(struct sock * sk,struct sockaddr_un * sunaddr,int addr_len)1419 static int unix_bind_abstract(struct sock *sk, struct sockaddr_un *sunaddr,
1420 int addr_len)
1421 {
1422 struct unix_sock *u = unix_sk(sk);
1423 unsigned int new_hash, old_hash;
1424 struct net *net = sock_net(sk);
1425 struct unix_address *addr;
1426 int err;
1427
1428 addr = unix_create_addr(sunaddr, addr_len);
1429 if (!addr)
1430 return -ENOMEM;
1431
1432 err = mutex_lock_interruptible(&u->bindlock);
1433 if (err)
1434 goto out;
1435
1436 if (u->addr) {
1437 err = -EINVAL;
1438 goto out_mutex;
1439 }
1440
1441 old_hash = sk->sk_hash;
1442 new_hash = unix_abstract_hash(addr->name, addr->len, sk->sk_type);
1443 unix_table_double_lock(net, old_hash, new_hash);
1444
1445 if (__unix_find_socket_byname(net, addr->name, addr->len, new_hash))
1446 goto out_spin;
1447
1448 __unix_set_addr_hash(net, sk, addr, new_hash);
1449 unix_table_double_unlock(net, old_hash, new_hash);
1450 mutex_unlock(&u->bindlock);
1451 return 0;
1452
1453 out_spin:
1454 unix_table_double_unlock(net, old_hash, new_hash);
1455 err = -EADDRINUSE;
1456 out_mutex:
1457 mutex_unlock(&u->bindlock);
1458 out:
1459 unix_release_addr(addr);
1460 return err;
1461 }
1462
unix_bind(struct socket * sock,struct sockaddr_unsized * uaddr,int addr_len)1463 static int unix_bind(struct socket *sock, struct sockaddr_unsized *uaddr, int addr_len)
1464 {
1465 struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr;
1466 struct sock *sk = sock->sk;
1467 int err;
1468
1469 if (addr_len == offsetof(struct sockaddr_un, sun_path) &&
1470 sunaddr->sun_family == AF_UNIX)
1471 return unix_autobind(sk);
1472
1473 err = unix_validate_addr(sunaddr, addr_len);
1474 if (err)
1475 return err;
1476
1477 if (sunaddr->sun_path[0])
1478 err = unix_bind_bsd(sk, sunaddr, addr_len);
1479 else
1480 err = unix_bind_abstract(sk, sunaddr, addr_len);
1481
1482 return err;
1483 }
1484
unix_state_double_lock(struct sock * sk1,struct sock * sk2)1485 static void unix_state_double_lock(struct sock *sk1, struct sock *sk2)
1486 {
1487 if (unlikely(sk1 == sk2) || !sk2) {
1488 unix_state_lock(sk1);
1489 return;
1490 }
1491
1492 if (sk1 > sk2)
1493 swap(sk1, sk2);
1494
1495 unix_state_lock(sk1);
1496 unix_state_lock(sk2);
1497 }
1498
unix_state_double_unlock(struct sock * sk1,struct sock * sk2)1499 static void unix_state_double_unlock(struct sock *sk1, struct sock *sk2)
1500 {
1501 if (unlikely(sk1 == sk2) || !sk2) {
1502 unix_state_unlock(sk1);
1503 return;
1504 }
1505 unix_state_unlock(sk1);
1506 unix_state_unlock(sk2);
1507 }
1508
unix_dgram_connect(struct socket * sock,struct sockaddr_unsized * addr,int alen,int flags)1509 static int unix_dgram_connect(struct socket *sock, struct sockaddr_unsized *addr,
1510 int alen, int flags)
1511 {
1512 struct sockaddr_un *sunaddr = (struct sockaddr_un *)addr;
1513 struct sock *sk = sock->sk;
1514 struct sock *other;
1515 int err;
1516
1517 err = -EINVAL;
1518 if (alen < offsetofend(struct sockaddr, sa_family))
1519 goto out;
1520
1521 if (addr->sa_family != AF_UNSPEC) {
1522 err = unix_validate_addr(sunaddr, alen);
1523 if (err)
1524 goto out;
1525
1526 err = BPF_CGROUP_RUN_PROG_UNIX_CONNECT_LOCK(sk, addr, &alen);
1527 if (err)
1528 goto out;
1529
1530 if (unix_may_passcred(sk) && !READ_ONCE(unix_sk(sk)->addr)) {
1531 err = unix_autobind(sk);
1532 if (err)
1533 goto out;
1534 }
1535
1536 restart:
1537 other = unix_find_other(sock_net(sk), sunaddr, alen, sock->type, 0);
1538 if (IS_ERR(other)) {
1539 err = PTR_ERR(other);
1540 goto out;
1541 }
1542
1543 unix_state_double_lock(sk, other);
1544
1545 /* Apparently VFS overslept socket death. Retry. */
1546 if (sock_flag(other, SOCK_DEAD)) {
1547 unix_state_double_unlock(sk, other);
1548 sock_put(other);
1549 goto restart;
1550 }
1551
1552 err = -EPERM;
1553 if (!unix_may_send(sk, other))
1554 goto out_unlock;
1555
1556 err = security_unix_may_send(sk->sk_socket, other->sk_socket);
1557 if (err)
1558 goto out_unlock;
1559
1560 WRITE_ONCE(sk->sk_state, TCP_ESTABLISHED);
1561 WRITE_ONCE(other->sk_state, TCP_ESTABLISHED);
1562 } else {
1563 /*
1564 * 1003.1g breaking connected state with AF_UNSPEC
1565 */
1566 other = NULL;
1567 unix_state_double_lock(sk, other);
1568 }
1569
1570 /*
1571 * If it was connected, reconnect.
1572 */
1573 if (unix_peer(sk)) {
1574 struct sock *old_peer = unix_peer(sk);
1575
1576 unix_peer(sk) = other;
1577 if (!other)
1578 WRITE_ONCE(sk->sk_state, TCP_CLOSE);
1579 unix_dgram_peer_wake_disconnect_wakeup(sk, old_peer);
1580
1581 unix_state_double_unlock(sk, other);
1582
1583 if (other != old_peer) {
1584 unix_dgram_disconnected(sk, old_peer);
1585
1586 unix_state_lock(old_peer);
1587 if (!unix_peer(old_peer))
1588 WRITE_ONCE(old_peer->sk_state, TCP_CLOSE);
1589 unix_state_unlock(old_peer);
1590 }
1591
1592 sock_put(old_peer);
1593 } else {
1594 unix_peer(sk) = other;
1595 unix_state_double_unlock(sk, other);
1596 }
1597
1598 return 0;
1599
1600 out_unlock:
1601 unix_state_double_unlock(sk, other);
1602 sock_put(other);
1603 out:
1604 return err;
1605 }
1606
unix_wait_for_peer(struct sock * other,long timeo)1607 static long unix_wait_for_peer(struct sock *other, long timeo)
1608 {
1609 struct unix_sock *u = unix_sk(other);
1610 int sched;
1611 DEFINE_WAIT(wait);
1612
1613 prepare_to_wait_exclusive(&u->peer_wait, &wait, TASK_INTERRUPTIBLE);
1614
1615 sched = !sock_flag(other, SOCK_DEAD) &&
1616 !(other->sk_shutdown & RCV_SHUTDOWN) &&
1617 unix_recvq_full_lockless(other);
1618
1619 unix_state_unlock(other);
1620
1621 if (sched)
1622 timeo = schedule_timeout(timeo);
1623
1624 finish_wait(&u->peer_wait, &wait);
1625 return timeo;
1626 }
1627
unix_stream_connect(struct socket * sock,struct sockaddr_unsized * uaddr,int addr_len,int flags)1628 static int unix_stream_connect(struct socket *sock, struct sockaddr_unsized *uaddr,
1629 int addr_len, int flags)
1630 {
1631 struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr;
1632 struct sock *sk = sock->sk, *newsk = NULL, *other = NULL;
1633 struct unix_sock *u = unix_sk(sk), *newu, *otheru;
1634 struct unix_peercred peercred = {};
1635 struct net *net = sock_net(sk);
1636 struct sk_buff *skb = NULL;
1637 unsigned char state;
1638 long timeo;
1639 int err;
1640
1641 err = unix_validate_addr(sunaddr, addr_len);
1642 if (err)
1643 goto out;
1644
1645 err = BPF_CGROUP_RUN_PROG_UNIX_CONNECT_LOCK(sk, uaddr, &addr_len);
1646 if (err)
1647 goto out;
1648
1649 if (unix_may_passcred(sk) && !READ_ONCE(u->addr)) {
1650 err = unix_autobind(sk);
1651 if (err)
1652 goto out;
1653 }
1654
1655 timeo = sock_sndtimeo(sk, flags & O_NONBLOCK);
1656
1657 err = prepare_peercred(&peercred);
1658 if (err)
1659 goto out;
1660
1661 /* create new sock for complete connection */
1662 newsk = unix_create1(net, NULL, 0, sock->type);
1663 if (IS_ERR(newsk)) {
1664 err = PTR_ERR(newsk);
1665 goto out;
1666 }
1667
1668 /* Allocate skb for sending to listening sock */
1669 skb = sock_wmalloc(newsk, 1, 0, GFP_KERNEL);
1670 if (!skb) {
1671 err = -ENOMEM;
1672 goto out_free_sk;
1673 }
1674
1675 restart:
1676 /* Find listening sock. */
1677 other = unix_find_other(net, sunaddr, addr_len, sk->sk_type, flags);
1678 if (IS_ERR(other)) {
1679 err = PTR_ERR(other);
1680 goto out_free_skb;
1681 }
1682
1683 unix_state_lock(other);
1684
1685 /* Apparently VFS overslept socket death. Retry. */
1686 if (sock_flag(other, SOCK_DEAD)) {
1687 unix_state_unlock(other);
1688 sock_put(other);
1689 goto restart;
1690 }
1691
1692 if (other->sk_state != TCP_LISTEN ||
1693 other->sk_shutdown & RCV_SHUTDOWN) {
1694 err = -ECONNREFUSED;
1695 goto out_unlock;
1696 }
1697
1698 if (unix_recvq_full_lockless(other)) {
1699 if (!timeo) {
1700 err = -EAGAIN;
1701 goto out_unlock;
1702 }
1703
1704 timeo = unix_wait_for_peer(other, timeo);
1705 sock_put(other);
1706
1707 err = sock_intr_errno(timeo);
1708 if (signal_pending(current))
1709 goto out_free_skb;
1710
1711 goto restart;
1712 }
1713
1714 /* self connect and simultaneous connect are eliminated
1715 * by rejecting TCP_LISTEN socket to avoid deadlock.
1716 */
1717 state = READ_ONCE(sk->sk_state);
1718 if (unlikely(state != TCP_CLOSE)) {
1719 err = state == TCP_ESTABLISHED ? -EISCONN : -EINVAL;
1720 goto out_unlock;
1721 }
1722
1723 unix_state_lock(sk);
1724
1725 if (unlikely(sk->sk_state != TCP_CLOSE)) {
1726 err = sk->sk_state == TCP_ESTABLISHED ? -EISCONN : -EINVAL;
1727 unix_state_unlock(sk);
1728 goto out_unlock;
1729 }
1730
1731 err = security_unix_stream_connect(sk, other, newsk);
1732 if (err) {
1733 unix_state_unlock(sk);
1734 goto out_unlock;
1735 }
1736
1737 /* The way is open! Fastly set all the necessary fields... */
1738
1739 sock_hold(sk);
1740 unix_peer(newsk) = sk;
1741 newsk->sk_state = TCP_ESTABLISHED;
1742 newsk->sk_type = sk->sk_type;
1743 newsk->sk_scm_recv_flags = other->sk_scm_recv_flags;
1744 init_peercred(newsk, &peercred);
1745
1746 newu = unix_sk(newsk);
1747 newu->listener = other;
1748 RCU_INIT_POINTER(newsk->sk_wq, &newu->peer_wq);
1749 otheru = unix_sk(other);
1750
1751 /* copy address information from listening to new sock
1752 *
1753 * The contents of *(otheru->addr) and otheru->path
1754 * are seen fully set up here, since we have found
1755 * otheru in hash under its lock. Insertion into the
1756 * hash chain we'd found it in had been done in an
1757 * earlier critical area protected by the chain's lock,
1758 * the same one where we'd set *(otheru->addr) contents,
1759 * as well as otheru->path and otheru->addr itself.
1760 *
1761 * Using smp_store_release() here to set newu->addr
1762 * is enough to make those stores, as well as stores
1763 * to newu->path visible to anyone who gets newu->addr
1764 * by smp_load_acquire(). IOW, the same warranties
1765 * as for unix_sock instances bound in unix_bind() or
1766 * in unix_autobind().
1767 */
1768 if (otheru->path.dentry) {
1769 path_get(&otheru->path);
1770 newu->path = otheru->path;
1771 }
1772 refcount_inc(&otheru->addr->refcnt);
1773 smp_store_release(&newu->addr, otheru->addr);
1774
1775 /* Set credentials */
1776 copy_peercred(sk, other);
1777
1778 sock->state = SS_CONNECTED;
1779 WRITE_ONCE(sk->sk_state, TCP_ESTABLISHED);
1780 sock_hold(newsk);
1781
1782 smp_mb__after_atomic(); /* sock_hold() does an atomic_inc() */
1783 unix_peer(sk) = newsk;
1784
1785 unix_state_unlock(sk);
1786
1787 /* take ten and send info to listening sock */
1788 spin_lock(&other->sk_receive_queue.lock);
1789 __skb_queue_tail(&other->sk_receive_queue, skb);
1790 spin_unlock(&other->sk_receive_queue.lock);
1791 unix_state_unlock(other);
1792 READ_ONCE(other->sk_data_ready)(other);
1793 sock_put(other);
1794 return 0;
1795
1796 out_unlock:
1797 unix_state_unlock(other);
1798 sock_put(other);
1799 out_free_skb:
1800 consume_skb(skb);
1801 out_free_sk:
1802 unix_release_sock(newsk, 0);
1803 out:
1804 drop_peercred(&peercred);
1805 return err;
1806 }
1807
unix_socketpair(struct socket * socka,struct socket * sockb)1808 static int unix_socketpair(struct socket *socka, struct socket *sockb)
1809 {
1810 struct unix_peercred ska_peercred = {}, skb_peercred = {};
1811 struct sock *ska = socka->sk, *skb = sockb->sk;
1812 int err;
1813
1814 err = prepare_peercred(&ska_peercred);
1815 if (err)
1816 return err;
1817
1818 err = prepare_peercred(&skb_peercred);
1819 if (err) {
1820 drop_peercred(&ska_peercred);
1821 return err;
1822 }
1823
1824 /* Join our sockets back to back */
1825 sock_hold(ska);
1826 sock_hold(skb);
1827 unix_peer(ska) = skb;
1828 unix_peer(skb) = ska;
1829 init_peercred(ska, &ska_peercred);
1830 init_peercred(skb, &skb_peercred);
1831
1832 ska->sk_state = TCP_ESTABLISHED;
1833 skb->sk_state = TCP_ESTABLISHED;
1834 socka->state = SS_CONNECTED;
1835 sockb->state = SS_CONNECTED;
1836 return 0;
1837 }
1838
unix_accept(struct socket * sock,struct socket * newsock,struct proto_accept_arg * arg)1839 static int unix_accept(struct socket *sock, struct socket *newsock,
1840 struct proto_accept_arg *arg)
1841 {
1842 struct sock *sk = sock->sk;
1843 struct sk_buff *skb;
1844 struct sock *tsk;
1845
1846 arg->err = -EOPNOTSUPP;
1847 if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
1848 goto out;
1849
1850 arg->err = -EINVAL;
1851 if (READ_ONCE(sk->sk_state) != TCP_LISTEN)
1852 goto out;
1853
1854 /* If socket state is TCP_LISTEN it cannot change (for now...),
1855 * so that no locks are necessary.
1856 */
1857
1858 skb = skb_recv_datagram(sk, (arg->flags & O_NONBLOCK) ? MSG_DONTWAIT : 0,
1859 &arg->err);
1860 if (!skb) {
1861 /* This means receive shutdown. */
1862 if (arg->err == 0)
1863 arg->err = -EINVAL;
1864 goto out;
1865 }
1866
1867 tsk = skb->sk;
1868 skb_free_datagram(sk, skb);
1869 wake_up_interruptible(&unix_sk(sk)->peer_wait);
1870
1871 if (tsk->sk_type == SOCK_STREAM)
1872 set_bit(SOCK_CUSTOM_SOCKOPT, &newsock->flags);
1873
1874 /* attach accepted sock to socket */
1875 unix_state_lock(tsk);
1876 unix_update_edges(unix_sk(tsk));
1877 newsock->state = SS_CONNECTED;
1878 sock_graft(tsk, newsock);
1879 unix_state_unlock(tsk);
1880 return 0;
1881
1882 out:
1883 return arg->err;
1884 }
1885
1886
unix_getname(struct socket * sock,struct sockaddr * uaddr,int peer)1887 static int unix_getname(struct socket *sock, struct sockaddr *uaddr, int peer)
1888 {
1889 struct sock *sk = sock->sk;
1890 struct unix_address *addr;
1891 DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr, uaddr);
1892 int err = 0;
1893
1894 if (peer) {
1895 sk = unix_peer_get(sk);
1896
1897 err = -ENOTCONN;
1898 if (!sk)
1899 goto out;
1900 err = 0;
1901 } else {
1902 sock_hold(sk);
1903 }
1904
1905 addr = smp_load_acquire(&unix_sk(sk)->addr);
1906 if (!addr) {
1907 sunaddr->sun_family = AF_UNIX;
1908 sunaddr->sun_path[0] = 0;
1909 err = offsetof(struct sockaddr_un, sun_path);
1910 } else {
1911 err = addr->len;
1912 memcpy(sunaddr, addr->name, addr->len);
1913
1914 if (peer)
1915 BPF_CGROUP_RUN_SA_PROG(sk, uaddr, &err,
1916 CGROUP_UNIX_GETPEERNAME);
1917 else
1918 BPF_CGROUP_RUN_SA_PROG(sk, uaddr, &err,
1919 CGROUP_UNIX_GETSOCKNAME);
1920 }
1921 sock_put(sk);
1922 out:
1923 return err;
1924 }
1925
1926 /* The "user->unix_inflight" variable is protected by the garbage
1927 * collection lock, and we just read it locklessly here. If you go
1928 * over the limit, there might be a tiny race in actually noticing
1929 * it across threads. Tough.
1930 */
too_many_unix_fds(struct task_struct * p)1931 static inline bool too_many_unix_fds(struct task_struct *p)
1932 {
1933 struct user_struct *user = current_user();
1934
1935 if (unlikely(READ_ONCE(user->unix_inflight) > task_rlimit(p, RLIMIT_NOFILE)))
1936 return !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN);
1937 return false;
1938 }
1939
unix_attach_fds(struct scm_cookie * scm,struct sk_buff * skb)1940 static int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb)
1941 {
1942 if (too_many_unix_fds(current))
1943 return -ETOOMANYREFS;
1944
1945 UNIXCB(skb).fp = scm->fp;
1946 scm->fp = NULL;
1947
1948 if (unix_prepare_fpl(UNIXCB(skb).fp))
1949 return -ENOMEM;
1950
1951 return 0;
1952 }
1953
unix_detach_fds(struct scm_cookie * scm,struct sk_buff * skb)1954 static void unix_detach_fds(struct scm_cookie *scm, struct sk_buff *skb)
1955 {
1956 scm->fp = UNIXCB(skb).fp;
1957 UNIXCB(skb).fp = NULL;
1958
1959 unix_destroy_fpl(scm->fp);
1960 }
1961
unix_peek_fds(struct scm_cookie * scm,struct sk_buff * skb)1962 static void unix_peek_fds(struct scm_cookie *scm, struct sk_buff *skb)
1963 {
1964 scm->fp = scm_fp_dup(UNIXCB(skb).fp);
1965
1966 unix_peek_fpl(scm->fp);
1967 }
1968
unix_destruct_scm(struct sk_buff * skb)1969 static void unix_destruct_scm(struct sk_buff *skb)
1970 {
1971 struct scm_cookie scm = {};
1972
1973 swap(scm.pid, UNIXCB(skb).pid);
1974
1975 if (UNIXCB(skb).fp)
1976 unix_detach_fds(&scm, skb);
1977
1978 scm_destroy(&scm);
1979 }
1980
unix_wfree(struct sk_buff * skb)1981 static void unix_wfree(struct sk_buff *skb)
1982 {
1983 unix_destruct_scm(skb);
1984 sock_wfree(skb);
1985 }
1986
unix_scm_to_skb(struct scm_cookie * scm,struct sk_buff * skb,bool send_fds)1987 static int unix_scm_to_skb(struct scm_cookie *scm, struct sk_buff *skb, bool send_fds)
1988 {
1989 int err = 0;
1990
1991 UNIXCB(skb).pid = get_pid(scm->pid);
1992 UNIXCB(skb).uid = scm->creds.uid;
1993 UNIXCB(skb).gid = scm->creds.gid;
1994 UNIXCB(skb).fp = NULL;
1995 unix_get_secdata(scm, skb);
1996 if (scm->fp && send_fds)
1997 err = unix_attach_fds(scm, skb);
1998
1999 skb->destructor = unix_wfree;
2000 return err;
2001 }
2002
unix_skb_to_scm(struct sk_buff * skb,struct scm_cookie * scm)2003 static void unix_skb_to_scm(struct sk_buff *skb, struct scm_cookie *scm)
2004 {
2005 scm_set_cred(scm, UNIXCB(skb).pid, UNIXCB(skb).uid, UNIXCB(skb).gid);
2006 unix_set_secdata(scm, skb);
2007 }
2008
2009 /**
2010 * unix_maybe_add_creds() - Adds current task uid/gid and struct pid to skb if needed.
2011 * @skb: skb to attach creds to.
2012 * @sk: Sender sock.
2013 * @other: Receiver sock.
2014 *
2015 * Some apps rely on write() giving SCM_CREDENTIALS
2016 * We include credentials if source or destination socket
2017 * asserted SOCK_PASSCRED.
2018 *
2019 * Context: May sleep.
2020 * Return: On success zero, on error a negative error code is returned.
2021 */
unix_maybe_add_creds(struct sk_buff * skb,const struct sock * sk,const struct sock * other)2022 static int unix_maybe_add_creds(struct sk_buff *skb, const struct sock *sk,
2023 const struct sock *other)
2024 {
2025 if (UNIXCB(skb).pid)
2026 return 0;
2027
2028 if (unix_may_passcred(sk) || unix_may_passcred(other) ||
2029 !other->sk_socket) {
2030 struct pid *pid;
2031 int err;
2032
2033 pid = task_tgid(current);
2034 err = pidfs_register_pid(pid);
2035 if (unlikely(err))
2036 return err;
2037
2038 UNIXCB(skb).pid = get_pid(pid);
2039 current_uid_gid(&UNIXCB(skb).uid, &UNIXCB(skb).gid);
2040 }
2041
2042 return 0;
2043 }
2044
unix_skb_scm_eq(struct sk_buff * skb,struct scm_cookie * scm)2045 static bool unix_skb_scm_eq(struct sk_buff *skb,
2046 struct scm_cookie *scm)
2047 {
2048 return UNIXCB(skb).pid == scm->pid &&
2049 uid_eq(UNIXCB(skb).uid, scm->creds.uid) &&
2050 gid_eq(UNIXCB(skb).gid, scm->creds.gid) &&
2051 unix_secdata_eq(scm, skb);
2052 }
2053
scm_stat_add(struct sock * sk,struct sk_buff * skb)2054 static void scm_stat_add(struct sock *sk, struct sk_buff *skb)
2055 {
2056 struct scm_fp_list *fp = UNIXCB(skb).fp;
2057 struct unix_sock *u = unix_sk(sk);
2058
2059 if (unlikely(fp && fp->count)) {
2060 atomic_add(fp->count, &u->scm_stat.nr_fds);
2061 unix_add_edges(fp, u);
2062 }
2063 }
2064
scm_stat_del(struct sock * sk,struct sk_buff * skb)2065 static void scm_stat_del(struct sock *sk, struct sk_buff *skb)
2066 {
2067 struct scm_fp_list *fp = UNIXCB(skb).fp;
2068 struct unix_sock *u = unix_sk(sk);
2069
2070 if (unlikely(fp && fp->count)) {
2071 atomic_sub(fp->count, &u->scm_stat.nr_fds);
2072 unix_del_edges(fp);
2073 }
2074 }
2075
unix_orphan_scm(struct sock * sk,struct sk_buff * skb)2076 static void unix_orphan_scm(struct sock *sk, struct sk_buff *skb)
2077 {
2078 scm_stat_del(sk, skb);
2079 unix_destruct_scm(skb);
2080 skb->destructor = sock_wfree;
2081 }
2082
2083 /*
2084 * Send AF_UNIX data.
2085 */
2086
unix_dgram_sendmsg(struct socket * sock,struct msghdr * msg,size_t len)2087 static int unix_dgram_sendmsg(struct socket *sock, struct msghdr *msg,
2088 size_t len)
2089 {
2090 struct sock *sk = sock->sk, *other = NULL;
2091 struct unix_sock *u = unix_sk(sk);
2092 struct scm_cookie scm;
2093 struct sk_buff *skb;
2094 int data_len = 0;
2095 int sk_locked;
2096 long timeo;
2097 int err;
2098
2099 err = scm_send(sock, msg, &scm, false);
2100 if (err < 0)
2101 return err;
2102
2103 if (msg->msg_flags & MSG_OOB) {
2104 err = -EOPNOTSUPP;
2105 goto out;
2106 }
2107
2108 if (msg->msg_namelen) {
2109 err = unix_validate_addr(msg->msg_name, msg->msg_namelen);
2110 if (err)
2111 goto out;
2112
2113 err = BPF_CGROUP_RUN_PROG_UNIX_SENDMSG_LOCK(sk,
2114 msg->msg_name,
2115 &msg->msg_namelen,
2116 NULL);
2117 if (err)
2118 goto out;
2119 }
2120
2121 if (unix_may_passcred(sk) && !READ_ONCE(u->addr)) {
2122 err = unix_autobind(sk);
2123 if (err)
2124 goto out;
2125 }
2126
2127 if (len > READ_ONCE(sk->sk_sndbuf) - 32) {
2128 err = -EMSGSIZE;
2129 goto out;
2130 }
2131
2132 if (len > SKB_MAX_ALLOC) {
2133 data_len = min_t(size_t,
2134 len - SKB_MAX_ALLOC,
2135 MAX_SKB_FRAGS * PAGE_SIZE);
2136 data_len = PAGE_ALIGN(data_len);
2137
2138 BUILD_BUG_ON(SKB_MAX_ALLOC < PAGE_SIZE);
2139 }
2140
2141 skb = sock_alloc_send_pskb(sk, len - data_len, data_len,
2142 msg->msg_flags & MSG_DONTWAIT, &err,
2143 PAGE_ALLOC_COSTLY_ORDER);
2144 if (!skb)
2145 goto out;
2146
2147 err = unix_scm_to_skb(&scm, skb, true);
2148 if (err < 0)
2149 goto out_free;
2150
2151 skb_put(skb, len - data_len);
2152 skb->data_len = data_len;
2153 skb->len = len;
2154 err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, len);
2155 if (err)
2156 goto out_free;
2157
2158 timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
2159
2160 if (msg->msg_namelen) {
2161 lookup:
2162 other = unix_find_other(sock_net(sk), msg->msg_name,
2163 msg->msg_namelen, sk->sk_type, 0);
2164 if (IS_ERR(other)) {
2165 err = PTR_ERR(other);
2166 goto out_free;
2167 }
2168 } else {
2169 other = unix_peer_get(sk);
2170 if (!other) {
2171 err = -ENOTCONN;
2172 goto out_free;
2173 }
2174 }
2175
2176 if (sk_filter(other, skb) < 0) {
2177 /* Toss the packet but do not return any error to the sender */
2178 err = len;
2179 goto out_sock_put;
2180 }
2181
2182 err = unix_maybe_add_creds(skb, sk, other);
2183 if (err)
2184 goto out_sock_put;
2185
2186 restart:
2187 sk_locked = 0;
2188 unix_state_lock(other);
2189 restart_locked:
2190
2191 if (!unix_may_send(sk, other)) {
2192 err = -EPERM;
2193 goto out_unlock;
2194 }
2195
2196 if (unlikely(sock_flag(other, SOCK_DEAD))) {
2197 /* Check with 1003.1g - what should datagram error */
2198
2199 unix_state_unlock(other);
2200
2201 if (sk->sk_type == SOCK_SEQPACKET) {
2202 /* We are here only when racing with unix_release_sock()
2203 * is clearing @other. Never change state to TCP_CLOSE
2204 * unlike SOCK_DGRAM wants.
2205 */
2206 err = -EPIPE;
2207 goto out_sock_put;
2208 }
2209
2210 if (!sk_locked)
2211 unix_state_lock(sk);
2212
2213 if (unix_peer(sk) == other) {
2214 unix_peer(sk) = NULL;
2215 unix_dgram_peer_wake_disconnect_wakeup(sk, other);
2216
2217 WRITE_ONCE(sk->sk_state, TCP_CLOSE);
2218 unix_state_unlock(sk);
2219
2220 unix_dgram_disconnected(sk, other);
2221 sock_put(other);
2222 err = -ECONNREFUSED;
2223 goto out_sock_put;
2224 }
2225
2226 unix_state_unlock(sk);
2227
2228 if (!msg->msg_namelen) {
2229 err = -ECONNRESET;
2230 goto out_sock_put;
2231 }
2232
2233 sock_put(other);
2234 goto lookup;
2235 }
2236
2237 if (other->sk_shutdown & RCV_SHUTDOWN) {
2238 err = -EPIPE;
2239 goto out_unlock;
2240 }
2241
2242 if (UNIXCB(skb).fp && !other->sk_scm_rights) {
2243 err = -EPERM;
2244 goto out_unlock;
2245 }
2246
2247 if (sk->sk_type != SOCK_SEQPACKET) {
2248 err = security_unix_may_send(sk->sk_socket, other->sk_socket);
2249 if (err)
2250 goto out_unlock;
2251 }
2252
2253 /* other == sk && unix_peer(other) != sk if
2254 * - unix_peer(sk) == NULL, destination address bound to sk
2255 * - unix_peer(sk) == sk by time of get but disconnected before lock
2256 */
2257 if (other != sk &&
2258 unlikely(unix_peer(other) != sk &&
2259 unix_recvq_full_lockless(other))) {
2260 if (timeo) {
2261 timeo = unix_wait_for_peer(other, timeo);
2262
2263 err = sock_intr_errno(timeo);
2264 if (signal_pending(current))
2265 goto out_sock_put;
2266
2267 goto restart;
2268 }
2269
2270 if (!sk_locked) {
2271 unix_state_unlock(other);
2272 unix_state_double_lock(sk, other);
2273 }
2274
2275 if (unix_peer(sk) != other ||
2276 unix_dgram_peer_wake_me(sk, other)) {
2277 err = -EAGAIN;
2278 sk_locked = 1;
2279 goto out_unlock;
2280 }
2281
2282 if (!sk_locked) {
2283 sk_locked = 1;
2284 goto restart_locked;
2285 }
2286 }
2287
2288 if (unlikely(sk_locked))
2289 unix_state_unlock(sk);
2290
2291 if (sock_flag(other, SOCK_RCVTSTAMP))
2292 __net_timestamp(skb);
2293
2294 scm_stat_add(other, skb);
2295 skb_queue_tail(&other->sk_receive_queue, skb);
2296 unix_state_unlock(other);
2297 READ_ONCE(other->sk_data_ready)(other);
2298 sock_put(other);
2299 scm_destroy(&scm);
2300 return len;
2301
2302 out_unlock:
2303 if (sk_locked)
2304 unix_state_unlock(sk);
2305 unix_state_unlock(other);
2306 out_sock_put:
2307 sock_put(other);
2308 out_free:
2309 consume_skb(skb);
2310 out:
2311 scm_destroy(&scm);
2312 return err;
2313 }
2314
2315 /* We use paged skbs for stream sockets, and limit occupancy to 32768
2316 * bytes, and a minimum of a full page.
2317 */
2318 #define UNIX_SKB_FRAGS_SZ (PAGE_SIZE << get_order(32768))
2319
2320 #if IS_ENABLED(CONFIG_AF_UNIX_OOB)
queue_oob(struct sock * sk,struct msghdr * msg,struct sock * other,struct scm_cookie * scm,bool fds_sent)2321 static int queue_oob(struct sock *sk, struct msghdr *msg, struct sock *other,
2322 struct scm_cookie *scm, bool fds_sent)
2323 {
2324 struct unix_sock *ousk = unix_sk(other);
2325 struct sk_buff *skb;
2326 int err;
2327
2328 skb = sock_alloc_send_skb(sk, 1, msg->msg_flags & MSG_DONTWAIT, &err);
2329
2330 if (!skb)
2331 return err;
2332
2333 err = unix_scm_to_skb(scm, skb, !fds_sent);
2334 if (err < 0)
2335 goto out;
2336
2337 err = unix_maybe_add_creds(skb, sk, other);
2338 if (err)
2339 goto out;
2340
2341 skb_put(skb, 1);
2342 err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, 1);
2343
2344 if (err)
2345 goto out;
2346
2347 unix_state_lock(other);
2348
2349 if (sock_flag(other, SOCK_DEAD) ||
2350 (other->sk_shutdown & RCV_SHUTDOWN)) {
2351 err = -EPIPE;
2352 goto out_unlock;
2353 }
2354
2355 if (UNIXCB(skb).fp && !other->sk_scm_rights) {
2356 err = -EPERM;
2357 goto out_unlock;
2358 }
2359
2360 scm_stat_add(other, skb);
2361
2362 spin_lock(&other->sk_receive_queue.lock);
2363 WRITE_ONCE(ousk->oob_skb, skb);
2364 WRITE_ONCE(ousk->inq_len, ousk->inq_len + 1);
2365 __skb_queue_tail(&other->sk_receive_queue, skb);
2366 spin_unlock(&other->sk_receive_queue.lock);
2367
2368 sk_send_sigurg(other);
2369 unix_state_unlock(other);
2370 READ_ONCE(other->sk_data_ready)(other);
2371
2372 return 0;
2373 out_unlock:
2374 unix_state_unlock(other);
2375 out:
2376 consume_skb(skb);
2377 return err;
2378 }
2379 #endif
2380
unix_stream_sendmsg(struct socket * sock,struct msghdr * msg,size_t len)2381 static int unix_stream_sendmsg(struct socket *sock, struct msghdr *msg,
2382 size_t len)
2383 {
2384 struct sock *sk = sock->sk;
2385 struct sk_buff *skb = NULL;
2386 struct sock *other = NULL;
2387 struct unix_sock *otheru;
2388 struct scm_cookie scm;
2389 bool fds_sent = false;
2390 int err, sent = 0;
2391
2392 err = scm_send(sock, msg, &scm, false);
2393 if (err < 0)
2394 return err;
2395
2396 if (msg->msg_flags & MSG_OOB) {
2397 err = -EOPNOTSUPP;
2398 #if IS_ENABLED(CONFIG_AF_UNIX_OOB)
2399 if (len)
2400 len--;
2401 else
2402 #endif
2403 goto out_err;
2404 }
2405
2406 if (msg->msg_namelen) {
2407 err = READ_ONCE(sk->sk_state) == TCP_ESTABLISHED ? -EISCONN : -EOPNOTSUPP;
2408 goto out_err;
2409 }
2410
2411 other = unix_peer(sk);
2412 if (!other) {
2413 err = -ENOTCONN;
2414 goto out_err;
2415 }
2416
2417 otheru = unix_sk(other);
2418
2419 if (READ_ONCE(sk->sk_shutdown) & SEND_SHUTDOWN)
2420 goto out_pipe;
2421
2422 while (sent < len) {
2423 int size = len - sent;
2424 int data_len;
2425
2426 if (unlikely(msg->msg_flags & MSG_SPLICE_PAGES)) {
2427 skb = sock_alloc_send_pskb(sk, 0, 0,
2428 msg->msg_flags & MSG_DONTWAIT,
2429 &err, 0);
2430 } else {
2431 /* Keep two messages in the pipe so it schedules better */
2432 size = min_t(int, size, (READ_ONCE(sk->sk_sndbuf) >> 1) - 64);
2433
2434 /* allow fallback to order-0 allocations */
2435 size = min_t(int, size, SKB_MAX_HEAD(0) + UNIX_SKB_FRAGS_SZ);
2436
2437 data_len = max_t(int, 0, size - SKB_MAX_HEAD(0));
2438
2439 data_len = min_t(size_t, size, PAGE_ALIGN(data_len));
2440
2441 skb = sock_alloc_send_pskb(sk, size - data_len, data_len,
2442 msg->msg_flags & MSG_DONTWAIT, &err,
2443 get_order(UNIX_SKB_FRAGS_SZ));
2444 }
2445 if (!skb)
2446 goto out_err;
2447
2448 /* Only send the fds in the first buffer */
2449 err = unix_scm_to_skb(&scm, skb, !fds_sent);
2450 if (err < 0)
2451 goto out_free;
2452
2453 fds_sent = true;
2454
2455 err = unix_maybe_add_creds(skb, sk, other);
2456 if (err)
2457 goto out_free;
2458
2459 if (unlikely(msg->msg_flags & MSG_SPLICE_PAGES)) {
2460 skb->ip_summed = CHECKSUM_UNNECESSARY;
2461 err = skb_splice_from_iter(skb, &msg->msg_iter, size);
2462 if (err < 0)
2463 goto out_free;
2464
2465 size = err;
2466 refcount_add(size, &sk->sk_wmem_alloc);
2467 } else {
2468 skb_put(skb, size - data_len);
2469 skb->data_len = data_len;
2470 skb->len = size;
2471 err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, size);
2472 if (err)
2473 goto out_free;
2474 }
2475
2476 unix_state_lock(other);
2477
2478 if (sock_flag(other, SOCK_DEAD) ||
2479 (other->sk_shutdown & RCV_SHUTDOWN))
2480 goto out_pipe_unlock;
2481
2482 if (UNIXCB(skb).fp && !other->sk_scm_rights) {
2483 unix_state_unlock(other);
2484 err = -EPERM;
2485 goto out_free;
2486 }
2487
2488 scm_stat_add(other, skb);
2489
2490 spin_lock(&other->sk_receive_queue.lock);
2491 WRITE_ONCE(otheru->inq_len, otheru->inq_len + skb->len);
2492 __skb_queue_tail(&other->sk_receive_queue, skb);
2493 spin_unlock(&other->sk_receive_queue.lock);
2494
2495 unix_state_unlock(other);
2496 READ_ONCE(other->sk_data_ready)(other);
2497 sent += size;
2498 }
2499
2500 #if IS_ENABLED(CONFIG_AF_UNIX_OOB)
2501 if (msg->msg_flags & MSG_OOB) {
2502 err = queue_oob(sk, msg, other, &scm, fds_sent);
2503 if (err)
2504 goto out_err;
2505 sent++;
2506 }
2507 #endif
2508
2509 scm_destroy(&scm);
2510
2511 return sent;
2512
2513 out_pipe_unlock:
2514 unix_state_unlock(other);
2515 out_pipe:
2516 if (!sent && !(msg->msg_flags & MSG_NOSIGNAL))
2517 send_sig(SIGPIPE, current, 0);
2518 err = -EPIPE;
2519 out_free:
2520 consume_skb(skb);
2521 out_err:
2522 scm_destroy(&scm);
2523 return sent ? : err;
2524 }
2525
unix_seqpacket_sendmsg(struct socket * sock,struct msghdr * msg,size_t len)2526 static int unix_seqpacket_sendmsg(struct socket *sock, struct msghdr *msg,
2527 size_t len)
2528 {
2529 int err;
2530 struct sock *sk = sock->sk;
2531
2532 err = sock_error(sk);
2533 if (err)
2534 return err;
2535
2536 if (READ_ONCE(sk->sk_state) != TCP_ESTABLISHED)
2537 return -ENOTCONN;
2538
2539 if (msg->msg_namelen)
2540 msg->msg_namelen = 0;
2541
2542 return unix_dgram_sendmsg(sock, msg, len);
2543 }
2544
unix_seqpacket_recvmsg(struct socket * sock,struct msghdr * msg,size_t size,int flags)2545 static int unix_seqpacket_recvmsg(struct socket *sock, struct msghdr *msg,
2546 size_t size, int flags)
2547 {
2548 struct sock *sk = sock->sk;
2549
2550 if (READ_ONCE(sk->sk_state) != TCP_ESTABLISHED)
2551 return -ENOTCONN;
2552
2553 return unix_dgram_recvmsg(sock, msg, size, flags);
2554 }
2555
unix_copy_addr(struct msghdr * msg,struct sock * sk)2556 static void unix_copy_addr(struct msghdr *msg, struct sock *sk)
2557 {
2558 struct unix_address *addr = smp_load_acquire(&unix_sk(sk)->addr);
2559
2560 if (addr) {
2561 msg->msg_namelen = addr->len;
2562 memcpy(msg->msg_name, addr->name, addr->len);
2563 }
2564 }
2565
__unix_dgram_recvmsg(struct sock * sk,struct msghdr * msg,size_t size,int flags)2566 int __unix_dgram_recvmsg(struct sock *sk, struct msghdr *msg, size_t size,
2567 int flags)
2568 {
2569 struct scm_cookie scm;
2570 struct socket *sock = sk->sk_socket;
2571 struct unix_sock *u = unix_sk(sk);
2572 struct sk_buff *skb, *last;
2573 long timeo;
2574 int skip;
2575 int err;
2576
2577 err = -EOPNOTSUPP;
2578 if (flags&MSG_OOB)
2579 goto out;
2580
2581 timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
2582
2583 do {
2584 mutex_lock(&u->iolock);
2585
2586 skip = sk_peek_offset(sk, flags);
2587 skb = __skb_try_recv_datagram(sk, &sk->sk_receive_queue, flags,
2588 &skip, &err, &last);
2589 if (skb) {
2590 if (!(flags & MSG_PEEK))
2591 scm_stat_del(sk, skb);
2592 break;
2593 }
2594
2595 mutex_unlock(&u->iolock);
2596
2597 if (err != -EAGAIN)
2598 break;
2599 } while (timeo &&
2600 !__skb_wait_for_more_packets(sk, &sk->sk_receive_queue,
2601 &err, &timeo, last));
2602
2603 if (!skb) { /* implies iolock unlocked */
2604 /* Signal EOF on disconnected non-blocking SEQPACKET socket. */
2605 if (sk->sk_type == SOCK_SEQPACKET && err == -EAGAIN &&
2606 (READ_ONCE(sk->sk_shutdown) & RCV_SHUTDOWN))
2607 err = 0;
2608 goto out;
2609 }
2610
2611 if (wq_has_sleeper(&u->peer_wait))
2612 wake_up_interruptible_sync_poll(&u->peer_wait,
2613 EPOLLOUT | EPOLLWRNORM |
2614 EPOLLWRBAND);
2615
2616 if (msg->msg_name) {
2617 unix_copy_addr(msg, skb->sk);
2618
2619 BPF_CGROUP_RUN_PROG_UNIX_RECVMSG_LOCK(sk,
2620 msg->msg_name,
2621 &msg->msg_namelen);
2622 }
2623
2624 if (size > skb->len - skip)
2625 size = skb->len - skip;
2626 else if (size < skb->len - skip)
2627 msg->msg_flags |= MSG_TRUNC;
2628
2629 err = skb_copy_datagram_msg(skb, skip, msg, size);
2630 if (err)
2631 goto out_free;
2632
2633 if (sock_flag(sk, SOCK_RCVTSTAMP))
2634 __sock_recv_timestamp(msg, sk, skb);
2635
2636 memset(&scm, 0, sizeof(scm));
2637
2638 unix_skb_to_scm(skb, &scm);
2639
2640 if (!(flags & MSG_PEEK)) {
2641 if (UNIXCB(skb).fp)
2642 unix_detach_fds(&scm, skb);
2643
2644 sk_peek_offset_bwd(sk, skb->len);
2645 } else {
2646 /* It is questionable: on PEEK we could:
2647 - do not return fds - good, but too simple 8)
2648 - return fds, and do not return them on read (old strategy,
2649 apparently wrong)
2650 - clone fds (I chose it for now, it is the most universal
2651 solution)
2652
2653 POSIX 1003.1g does not actually define this clearly
2654 at all. POSIX 1003.1g doesn't define a lot of things
2655 clearly however!
2656
2657 */
2658
2659 sk_peek_offset_fwd(sk, size);
2660
2661 if (UNIXCB(skb).fp)
2662 unix_peek_fds(&scm, skb);
2663 }
2664 err = (flags & MSG_TRUNC) ? skb->len - skip : size;
2665
2666 scm_recv_unix(sock, msg, &scm, flags);
2667
2668 out_free:
2669 skb_free_datagram(sk, skb);
2670 mutex_unlock(&u->iolock);
2671 out:
2672 return err;
2673 }
2674
unix_dgram_recvmsg(struct socket * sock,struct msghdr * msg,size_t size,int flags)2675 static int unix_dgram_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
2676 int flags)
2677 {
2678 struct sock *sk = sock->sk;
2679
2680 #ifdef CONFIG_BPF_SYSCALL
2681 const struct proto *prot = READ_ONCE(sk->sk_prot);
2682
2683 if (prot != &unix_dgram_proto)
2684 return prot->recvmsg(sk, msg, size, flags);
2685 #endif
2686 return __unix_dgram_recvmsg(sk, msg, size, flags);
2687 }
2688
unix_read_skb(struct sock * sk,skb_read_actor_t recv_actor)2689 static int unix_read_skb(struct sock *sk, skb_read_actor_t recv_actor)
2690 {
2691 struct unix_sock *u = unix_sk(sk);
2692 struct sk_buff *skb;
2693 int err;
2694
2695 mutex_lock(&u->iolock);
2696
2697 skb = skb_recv_datagram(sk, MSG_DONTWAIT, &err);
2698 if (!skb) {
2699 mutex_unlock(&u->iolock);
2700 return err;
2701 }
2702
2703 unix_orphan_scm(sk, skb);
2704
2705 mutex_unlock(&u->iolock);
2706
2707 return recv_actor(sk, skb);
2708 }
2709
2710 /*
2711 * Sleep until more data has arrived. But check for races..
2712 */
unix_stream_data_wait(struct sock * sk,long timeo,struct sk_buff * last,unsigned int last_len,bool freezable)2713 static long unix_stream_data_wait(struct sock *sk, long timeo,
2714 struct sk_buff *last, unsigned int last_len,
2715 bool freezable)
2716 {
2717 unsigned int state = TASK_INTERRUPTIBLE | freezable * TASK_FREEZABLE;
2718 struct sk_buff *tail;
2719 DEFINE_WAIT(wait);
2720
2721 unix_state_lock(sk);
2722
2723 for (;;) {
2724 prepare_to_wait(sk_sleep(sk), &wait, state);
2725
2726 tail = skb_peek_tail(&sk->sk_receive_queue);
2727 if (tail != last ||
2728 (tail && tail->len != last_len) ||
2729 sk->sk_err ||
2730 (sk->sk_shutdown & RCV_SHUTDOWN) ||
2731 signal_pending(current) ||
2732 !timeo)
2733 break;
2734
2735 sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
2736 unix_state_unlock(sk);
2737 timeo = schedule_timeout(timeo);
2738 unix_state_lock(sk);
2739
2740 if (sock_flag(sk, SOCK_DEAD))
2741 break;
2742
2743 sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
2744 }
2745
2746 finish_wait(sk_sleep(sk), &wait);
2747 unix_state_unlock(sk);
2748 return timeo;
2749 }
2750
2751 struct unix_stream_read_state {
2752 int (*recv_actor)(struct sk_buff *, int, int,
2753 struct unix_stream_read_state *);
2754 struct socket *socket;
2755 struct msghdr *msg;
2756 struct pipe_inode_info *pipe;
2757 size_t size;
2758 int flags;
2759 unsigned int splice_flags;
2760 };
2761
2762 #if IS_ENABLED(CONFIG_AF_UNIX_OOB)
unix_stream_recv_urg(struct unix_stream_read_state * state)2763 static int unix_stream_recv_urg(struct unix_stream_read_state *state)
2764 {
2765 struct sk_buff *oob_skb, *read_skb = NULL;
2766 struct socket *sock = state->socket;
2767 struct sock *sk = sock->sk;
2768 struct unix_sock *u = unix_sk(sk);
2769 int chunk = 1;
2770
2771 mutex_lock(&u->iolock);
2772 unix_state_lock(sk);
2773 spin_lock(&sk->sk_receive_queue.lock);
2774
2775 if (sock_flag(sk, SOCK_URGINLINE) || !u->oob_skb) {
2776 spin_unlock(&sk->sk_receive_queue.lock);
2777 unix_state_unlock(sk);
2778 mutex_unlock(&u->iolock);
2779 return -EINVAL;
2780 }
2781
2782 oob_skb = u->oob_skb;
2783
2784 if (!(state->flags & MSG_PEEK)) {
2785 WRITE_ONCE(u->oob_skb, NULL);
2786 WRITE_ONCE(u->inq_len, u->inq_len - 1);
2787
2788 if (oob_skb->prev != (struct sk_buff *)&sk->sk_receive_queue &&
2789 !unix_skb_len(oob_skb->prev)) {
2790 read_skb = oob_skb->prev;
2791 __skb_unlink(read_skb, &sk->sk_receive_queue);
2792 }
2793 }
2794
2795 spin_unlock(&sk->sk_receive_queue.lock);
2796 unix_state_unlock(sk);
2797
2798 chunk = state->recv_actor(oob_skb, 0, chunk, state);
2799
2800 if (!(state->flags & MSG_PEEK))
2801 UNIXCB(oob_skb).consumed += 1;
2802
2803 mutex_unlock(&u->iolock);
2804
2805 consume_skb(read_skb);
2806
2807 if (chunk < 0)
2808 return -EFAULT;
2809
2810 state->msg->msg_flags |= MSG_OOB;
2811 return 1;
2812 }
2813
manage_oob(struct sk_buff * skb,struct sock * sk,int flags,int copied)2814 static struct sk_buff *manage_oob(struct sk_buff *skb, struct sock *sk,
2815 int flags, int copied)
2816 {
2817 struct sk_buff *read_skb = NULL, *unread_skb = NULL;
2818 struct unix_sock *u = unix_sk(sk);
2819
2820 if (likely(unix_skb_len(skb) && skb != READ_ONCE(u->oob_skb)))
2821 return skb;
2822
2823 spin_lock(&sk->sk_receive_queue.lock);
2824
2825 if (!unix_skb_len(skb)) {
2826 if (copied && (!u->oob_skb || skb == u->oob_skb)) {
2827 skb = NULL;
2828 } else if (flags & MSG_PEEK) {
2829 skb = skb_peek_next(skb, &sk->sk_receive_queue);
2830 } else {
2831 read_skb = skb;
2832 skb = skb_peek_next(skb, &sk->sk_receive_queue);
2833 __skb_unlink(read_skb, &sk->sk_receive_queue);
2834 }
2835
2836 if (!skb)
2837 goto unlock;
2838 }
2839
2840 if (skb != u->oob_skb)
2841 goto unlock;
2842
2843 if (copied) {
2844 skb = NULL;
2845 } else if (!(flags & MSG_PEEK)) {
2846 WRITE_ONCE(u->oob_skb, NULL);
2847
2848 if (!sock_flag(sk, SOCK_URGINLINE)) {
2849 __skb_unlink(skb, &sk->sk_receive_queue);
2850 unread_skb = skb;
2851 skb = skb_peek(&sk->sk_receive_queue);
2852 }
2853 } else if (!sock_flag(sk, SOCK_URGINLINE)) {
2854 skb = skb_peek_next(skb, &sk->sk_receive_queue);
2855 }
2856
2857 unlock:
2858 spin_unlock(&sk->sk_receive_queue.lock);
2859
2860 consume_skb(read_skb);
2861 kfree_skb_reason(unread_skb, SKB_DROP_REASON_UNIX_SKIP_OOB);
2862
2863 return skb;
2864 }
2865 #endif
2866
unix_stream_read_skb(struct sock * sk,skb_read_actor_t recv_actor)2867 static int unix_stream_read_skb(struct sock *sk, skb_read_actor_t recv_actor)
2868 {
2869 struct sk_buff_head *queue = &sk->sk_receive_queue;
2870 struct unix_sock *u = unix_sk(sk);
2871 struct sk_buff *skb;
2872 int err;
2873
2874 if (unlikely(READ_ONCE(sk->sk_state) != TCP_ESTABLISHED))
2875 return -ENOTCONN;
2876
2877 err = sock_error(sk);
2878 if (err)
2879 return err;
2880
2881 mutex_lock(&u->iolock);
2882 spin_lock(&queue->lock);
2883
2884 skb = __skb_dequeue(queue);
2885 if (!skb) {
2886 spin_unlock(&queue->lock);
2887 mutex_unlock(&u->iolock);
2888 return -EAGAIN;
2889 }
2890
2891 WRITE_ONCE(u->inq_len, u->inq_len - skb->len);
2892
2893 #if IS_ENABLED(CONFIG_AF_UNIX_OOB)
2894 if (skb == u->oob_skb) {
2895 WRITE_ONCE(u->oob_skb, NULL);
2896 spin_unlock(&queue->lock);
2897 mutex_unlock(&u->iolock);
2898
2899 kfree_skb_reason(skb, SKB_DROP_REASON_UNIX_SKIP_OOB);
2900 return -EAGAIN;
2901 }
2902 #endif
2903
2904 spin_unlock(&queue->lock);
2905
2906 unix_orphan_scm(sk, skb);
2907
2908 mutex_unlock(&u->iolock);
2909
2910 return recv_actor(sk, skb);
2911 }
2912
unix_stream_read_generic(struct unix_stream_read_state * state,bool freezable)2913 static int unix_stream_read_generic(struct unix_stream_read_state *state,
2914 bool freezable)
2915 {
2916 int noblock = state->flags & MSG_DONTWAIT;
2917 struct socket *sock = state->socket;
2918 struct msghdr *msg = state->msg;
2919 struct sock *sk = sock->sk;
2920 size_t size = state->size;
2921 int flags = state->flags;
2922 bool check_creds = false;
2923 struct scm_cookie scm;
2924 unsigned int last_len;
2925 struct unix_sock *u;
2926 int copied = 0;
2927 int err = 0;
2928 long timeo;
2929 int target;
2930 int skip;
2931
2932 if (unlikely(READ_ONCE(sk->sk_state) != TCP_ESTABLISHED)) {
2933 err = -EINVAL;
2934 goto out;
2935 }
2936
2937 if (unlikely(flags & MSG_OOB)) {
2938 err = -EOPNOTSUPP;
2939 #if IS_ENABLED(CONFIG_AF_UNIX_OOB)
2940 err = unix_stream_recv_urg(state);
2941 #endif
2942 goto out;
2943 }
2944
2945 target = sock_rcvlowat(sk, flags & MSG_WAITALL, size);
2946 timeo = sock_rcvtimeo(sk, noblock);
2947
2948 memset(&scm, 0, sizeof(scm));
2949
2950 u = unix_sk(sk);
2951
2952 redo:
2953 /* Lock the socket to prevent queue disordering
2954 * while sleeps in memcpy_tomsg
2955 */
2956 mutex_lock(&u->iolock);
2957
2958 skip = max(sk_peek_offset(sk, flags), 0);
2959
2960 do {
2961 struct sk_buff *skb, *last;
2962 int chunk;
2963
2964 unix_state_lock(sk);
2965 if (sock_flag(sk, SOCK_DEAD)) {
2966 err = -ECONNRESET;
2967 goto unlock;
2968 }
2969 last = skb = skb_peek(&sk->sk_receive_queue);
2970 last_len = last ? last->len : 0;
2971
2972 again:
2973 #if IS_ENABLED(CONFIG_AF_UNIX_OOB)
2974 if (skb) {
2975 skb = manage_oob(skb, sk, flags, copied);
2976 if (!skb && copied) {
2977 unix_state_unlock(sk);
2978 break;
2979 }
2980 }
2981 #endif
2982 if (skb == NULL) {
2983 if (copied >= target)
2984 goto unlock;
2985
2986 /*
2987 * POSIX 1003.1g mandates this order.
2988 */
2989
2990 err = sock_error(sk);
2991 if (err)
2992 goto unlock;
2993 if (sk->sk_shutdown & RCV_SHUTDOWN)
2994 goto unlock;
2995
2996 unix_state_unlock(sk);
2997 if (!timeo) {
2998 err = -EAGAIN;
2999 break;
3000 }
3001
3002 mutex_unlock(&u->iolock);
3003
3004 timeo = unix_stream_data_wait(sk, timeo, last,
3005 last_len, freezable);
3006
3007 if (signal_pending(current)) {
3008 err = sock_intr_errno(timeo);
3009 scm_destroy(&scm);
3010 goto out;
3011 }
3012
3013 goto redo;
3014 unlock:
3015 unix_state_unlock(sk);
3016 break;
3017 }
3018
3019 while (skip >= unix_skb_len(skb)) {
3020 skip -= unix_skb_len(skb);
3021 last = skb;
3022 last_len = skb->len;
3023 skb = skb_peek_next(skb, &sk->sk_receive_queue);
3024 if (!skb)
3025 goto again;
3026 }
3027
3028 unix_state_unlock(sk);
3029
3030 if (check_creds) {
3031 /* Never glue messages from different writers */
3032 if (!unix_skb_scm_eq(skb, &scm))
3033 break;
3034 } else if (unix_may_passcred(sk)) {
3035 /* Copy credentials */
3036 unix_skb_to_scm(skb, &scm);
3037 check_creds = true;
3038 }
3039
3040 /* Copy address just once */
3041 if (msg && msg->msg_name) {
3042 DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr, msg->msg_name);
3043
3044 unix_copy_addr(msg, skb->sk);
3045 BPF_CGROUP_RUN_PROG_UNIX_RECVMSG_LOCK(sk, msg->msg_name,
3046 &msg->msg_namelen);
3047
3048 sunaddr = NULL;
3049 }
3050
3051 chunk = min_t(unsigned int, unix_skb_len(skb) - skip, size);
3052 chunk = state->recv_actor(skb, skip, chunk, state);
3053 if (chunk < 0) {
3054 if (copied == 0)
3055 copied = -EFAULT;
3056 break;
3057 }
3058 copied += chunk;
3059 size -= chunk;
3060
3061 /* Mark read part of skb as used */
3062 if (!(flags & MSG_PEEK)) {
3063 UNIXCB(skb).consumed += chunk;
3064
3065 sk_peek_offset_bwd(sk, chunk);
3066
3067 if (UNIXCB(skb).fp) {
3068 scm_stat_del(sk, skb);
3069 unix_detach_fds(&scm, skb);
3070 }
3071
3072 if (unix_skb_len(skb))
3073 break;
3074
3075 spin_lock(&sk->sk_receive_queue.lock);
3076 WRITE_ONCE(u->inq_len, u->inq_len - skb->len);
3077 __skb_unlink(skb, &sk->sk_receive_queue);
3078 spin_unlock(&sk->sk_receive_queue.lock);
3079
3080 consume_skb(skb);
3081
3082 if (scm.fp)
3083 break;
3084 } else {
3085 /* It is questionable, see note in unix_dgram_recvmsg.
3086 */
3087 if (UNIXCB(skb).fp)
3088 unix_peek_fds(&scm, skb);
3089
3090 sk_peek_offset_fwd(sk, chunk);
3091
3092 if (UNIXCB(skb).fp)
3093 break;
3094
3095 skip = 0;
3096 last = skb;
3097 last_len = skb->len;
3098 unix_state_lock(sk);
3099 skb = skb_peek_next(skb, &sk->sk_receive_queue);
3100 if (skb)
3101 goto again;
3102 unix_state_unlock(sk);
3103 break;
3104 }
3105 } while (size);
3106
3107 mutex_unlock(&u->iolock);
3108 if (msg) {
3109 bool do_cmsg = READ_ONCE(u->recvmsg_inq);
3110
3111 scm_recv_unix(sock, msg, &scm, flags);
3112
3113 if ((do_cmsg | msg->msg_get_inq) && (copied ?: err) >= 0) {
3114 msg->msg_inq = READ_ONCE(u->inq_len);
3115 if (do_cmsg)
3116 put_cmsg(msg, SOL_SOCKET, SCM_INQ,
3117 sizeof(msg->msg_inq), &msg->msg_inq);
3118 }
3119 } else {
3120 scm_destroy(&scm);
3121 }
3122 out:
3123 return copied ? : err;
3124 }
3125
unix_stream_read_actor(struct sk_buff * skb,int skip,int chunk,struct unix_stream_read_state * state)3126 static int unix_stream_read_actor(struct sk_buff *skb,
3127 int skip, int chunk,
3128 struct unix_stream_read_state *state)
3129 {
3130 int ret;
3131
3132 ret = skb_copy_datagram_msg(skb, UNIXCB(skb).consumed + skip,
3133 state->msg, chunk);
3134 return ret ?: chunk;
3135 }
3136
__unix_stream_recvmsg(struct sock * sk,struct msghdr * msg,size_t size,int flags)3137 int __unix_stream_recvmsg(struct sock *sk, struct msghdr *msg,
3138 size_t size, int flags)
3139 {
3140 struct unix_stream_read_state state = {
3141 .recv_actor = unix_stream_read_actor,
3142 .socket = sk->sk_socket,
3143 .msg = msg,
3144 .size = size,
3145 .flags = flags
3146 };
3147
3148 return unix_stream_read_generic(&state, true);
3149 }
3150
unix_stream_recvmsg(struct socket * sock,struct msghdr * msg,size_t size,int flags)3151 static int unix_stream_recvmsg(struct socket *sock, struct msghdr *msg,
3152 size_t size, int flags)
3153 {
3154 struct unix_stream_read_state state = {
3155 .recv_actor = unix_stream_read_actor,
3156 .socket = sock,
3157 .msg = msg,
3158 .size = size,
3159 .flags = flags
3160 };
3161
3162 #ifdef CONFIG_BPF_SYSCALL
3163 struct sock *sk = sock->sk;
3164 const struct proto *prot = READ_ONCE(sk->sk_prot);
3165
3166 if (prot != &unix_stream_proto)
3167 return prot->recvmsg(sk, msg, size, flags);
3168 #endif
3169 return unix_stream_read_generic(&state, true);
3170 }
3171
unix_stream_splice_actor(struct sk_buff * skb,int skip,int chunk,struct unix_stream_read_state * state)3172 static int unix_stream_splice_actor(struct sk_buff *skb,
3173 int skip, int chunk,
3174 struct unix_stream_read_state *state)
3175 {
3176 return skb_splice_bits(skb, state->socket->sk,
3177 UNIXCB(skb).consumed + skip,
3178 state->pipe, chunk, state->splice_flags);
3179 }
3180
unix_stream_splice_read(struct socket * sock,loff_t * ppos,struct pipe_inode_info * pipe,size_t size,unsigned int flags)3181 static ssize_t unix_stream_splice_read(struct socket *sock, loff_t *ppos,
3182 struct pipe_inode_info *pipe,
3183 size_t size, unsigned int flags)
3184 {
3185 struct unix_stream_read_state state = {
3186 .recv_actor = unix_stream_splice_actor,
3187 .socket = sock,
3188 .pipe = pipe,
3189 .size = size,
3190 .splice_flags = flags,
3191 };
3192
3193 if (unlikely(*ppos))
3194 return -ESPIPE;
3195
3196 if (sock->file->f_flags & O_NONBLOCK ||
3197 flags & SPLICE_F_NONBLOCK)
3198 state.flags = MSG_DONTWAIT;
3199
3200 return unix_stream_read_generic(&state, false);
3201 }
3202
unix_shutdown(struct socket * sock,int mode)3203 static int unix_shutdown(struct socket *sock, int mode)
3204 {
3205 struct sock *sk = sock->sk;
3206 struct sock *other;
3207
3208 if (mode < SHUT_RD || mode > SHUT_RDWR)
3209 return -EINVAL;
3210 /* This maps:
3211 * SHUT_RD (0) -> RCV_SHUTDOWN (1)
3212 * SHUT_WR (1) -> SEND_SHUTDOWN (2)
3213 * SHUT_RDWR (2) -> SHUTDOWN_MASK (3)
3214 */
3215 ++mode;
3216
3217 unix_state_lock(sk);
3218 WRITE_ONCE(sk->sk_shutdown, sk->sk_shutdown | mode);
3219 other = unix_peer(sk);
3220 if (other)
3221 sock_hold(other);
3222 unix_state_unlock(sk);
3223 sk->sk_state_change(sk);
3224
3225 if (other &&
3226 (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET)) {
3227
3228 int peer_mode = 0;
3229 const struct proto *prot = READ_ONCE(other->sk_prot);
3230
3231 if (prot->unhash)
3232 prot->unhash(other);
3233 if (mode&RCV_SHUTDOWN)
3234 peer_mode |= SEND_SHUTDOWN;
3235 if (mode&SEND_SHUTDOWN)
3236 peer_mode |= RCV_SHUTDOWN;
3237 unix_state_lock(other);
3238 WRITE_ONCE(other->sk_shutdown, other->sk_shutdown | peer_mode);
3239 unix_state_unlock(other);
3240 other->sk_state_change(other);
3241 if (peer_mode == SHUTDOWN_MASK)
3242 sk_wake_async(other, SOCK_WAKE_WAITD, POLL_HUP);
3243 else if (peer_mode & RCV_SHUTDOWN)
3244 sk_wake_async(other, SOCK_WAKE_WAITD, POLL_IN);
3245 }
3246 if (other)
3247 sock_put(other);
3248
3249 return 0;
3250 }
3251
unix_inq_len(struct sock * sk)3252 long unix_inq_len(struct sock *sk)
3253 {
3254 struct sk_buff *skb;
3255 long amount = 0;
3256
3257 if (READ_ONCE(sk->sk_state) == TCP_LISTEN)
3258 return -EINVAL;
3259
3260 if (sk->sk_type == SOCK_STREAM)
3261 return READ_ONCE(unix_sk(sk)->inq_len);
3262
3263 spin_lock(&sk->sk_receive_queue.lock);
3264 if (sk->sk_type == SOCK_SEQPACKET) {
3265 skb_queue_walk(&sk->sk_receive_queue, skb)
3266 amount += unix_skb_len(skb);
3267 } else {
3268 skb = skb_peek(&sk->sk_receive_queue);
3269 if (skb)
3270 amount = skb->len;
3271 }
3272 spin_unlock(&sk->sk_receive_queue.lock);
3273
3274 return amount;
3275 }
3276 EXPORT_SYMBOL_GPL(unix_inq_len);
3277
unix_outq_len(struct sock * sk)3278 long unix_outq_len(struct sock *sk)
3279 {
3280 return sk_wmem_alloc_get(sk);
3281 }
3282 EXPORT_SYMBOL_GPL(unix_outq_len);
3283
unix_open_file(struct sock * sk)3284 static int unix_open_file(struct sock *sk)
3285 {
3286 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
3287 return -EPERM;
3288
3289 if (!smp_load_acquire(&unix_sk(sk)->addr))
3290 return -ENOENT;
3291
3292 if (!unix_sk(sk)->path.dentry)
3293 return -ENOENT;
3294
3295 return FD_ADD(O_CLOEXEC, dentry_open(&unix_sk(sk)->path, O_PATH, current_cred()));
3296 }
3297
unix_ioctl(struct socket * sock,unsigned int cmd,unsigned long arg)3298 static int unix_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
3299 {
3300 struct sock *sk = sock->sk;
3301 long amount = 0;
3302 int err;
3303
3304 switch (cmd) {
3305 case SIOCOUTQ:
3306 amount = unix_outq_len(sk);
3307 err = put_user(amount, (int __user *)arg);
3308 break;
3309 case SIOCINQ:
3310 amount = unix_inq_len(sk);
3311 if (amount < 0)
3312 err = amount;
3313 else
3314 err = put_user(amount, (int __user *)arg);
3315 break;
3316 case SIOCUNIXFILE:
3317 err = unix_open_file(sk);
3318 break;
3319 #if IS_ENABLED(CONFIG_AF_UNIX_OOB)
3320 case SIOCATMARK:
3321 {
3322 struct unix_sock *u = unix_sk(sk);
3323 struct sk_buff *skb;
3324 int answ = 0;
3325
3326 mutex_lock(&u->iolock);
3327
3328 skb = skb_peek(&sk->sk_receive_queue);
3329 if (skb) {
3330 struct sk_buff *oob_skb = READ_ONCE(u->oob_skb);
3331 struct sk_buff *next_skb;
3332
3333 next_skb = skb_peek_next(skb, &sk->sk_receive_queue);
3334
3335 if (skb == oob_skb ||
3336 (!unix_skb_len(skb) &&
3337 (!oob_skb || next_skb == oob_skb)))
3338 answ = 1;
3339 }
3340
3341 mutex_unlock(&u->iolock);
3342
3343 err = put_user(answ, (int __user *)arg);
3344 }
3345 break;
3346 #endif
3347 default:
3348 err = -ENOIOCTLCMD;
3349 break;
3350 }
3351 return err;
3352 }
3353
3354 #ifdef CONFIG_COMPAT
unix_compat_ioctl(struct socket * sock,unsigned int cmd,unsigned long arg)3355 static int unix_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
3356 {
3357 return unix_ioctl(sock, cmd, (unsigned long)compat_ptr(arg));
3358 }
3359 #endif
3360
unix_poll(struct file * file,struct socket * sock,poll_table * wait)3361 static __poll_t unix_poll(struct file *file, struct socket *sock, poll_table *wait)
3362 {
3363 struct sock *sk = sock->sk;
3364 unsigned char state;
3365 __poll_t mask;
3366 u8 shutdown;
3367
3368 sock_poll_wait(file, sock, wait);
3369 mask = 0;
3370 shutdown = READ_ONCE(sk->sk_shutdown);
3371 state = READ_ONCE(sk->sk_state);
3372
3373 /* exceptional events? */
3374 if (READ_ONCE(sk->sk_err))
3375 mask |= EPOLLERR;
3376 if (shutdown == SHUTDOWN_MASK)
3377 mask |= EPOLLHUP;
3378 if (shutdown & RCV_SHUTDOWN)
3379 mask |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM;
3380
3381 /* readable? */
3382 if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
3383 mask |= EPOLLIN | EPOLLRDNORM;
3384 if (sk_is_readable(sk))
3385 mask |= EPOLLIN | EPOLLRDNORM;
3386 #if IS_ENABLED(CONFIG_AF_UNIX_OOB)
3387 if (READ_ONCE(unix_sk(sk)->oob_skb))
3388 mask |= EPOLLPRI;
3389 #endif
3390
3391 /* Connection-based need to check for termination and startup */
3392 if ((sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) &&
3393 state == TCP_CLOSE)
3394 mask |= EPOLLHUP;
3395
3396 /*
3397 * we set writable also when the other side has shut down the
3398 * connection. This prevents stuck sockets.
3399 */
3400 if (unix_writable(sk, state))
3401 mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND;
3402
3403 return mask;
3404 }
3405
unix_dgram_poll(struct file * file,struct socket * sock,poll_table * wait)3406 static __poll_t unix_dgram_poll(struct file *file, struct socket *sock,
3407 poll_table *wait)
3408 {
3409 struct sock *sk = sock->sk, *other;
3410 unsigned int writable;
3411 unsigned char state;
3412 __poll_t mask;
3413 u8 shutdown;
3414
3415 sock_poll_wait(file, sock, wait);
3416 mask = 0;
3417 shutdown = READ_ONCE(sk->sk_shutdown);
3418 state = READ_ONCE(sk->sk_state);
3419
3420 /* exceptional events? */
3421 if (READ_ONCE(sk->sk_err) ||
3422 !skb_queue_empty_lockless(&sk->sk_error_queue))
3423 mask |= EPOLLERR |
3424 (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? EPOLLPRI : 0);
3425
3426 if (shutdown & RCV_SHUTDOWN)
3427 mask |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM;
3428 if (shutdown == SHUTDOWN_MASK)
3429 mask |= EPOLLHUP;
3430
3431 /* readable? */
3432 if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
3433 mask |= EPOLLIN | EPOLLRDNORM;
3434 if (sk_is_readable(sk))
3435 mask |= EPOLLIN | EPOLLRDNORM;
3436
3437 /* Connection-based need to check for termination and startup */
3438 if (sk->sk_type == SOCK_SEQPACKET && state == TCP_CLOSE)
3439 mask |= EPOLLHUP;
3440
3441 /* No write status requested, avoid expensive OUT tests. */
3442 if (!(poll_requested_events(wait) & (EPOLLWRBAND|EPOLLWRNORM|EPOLLOUT)))
3443 return mask;
3444
3445 writable = unix_writable(sk, state);
3446 if (writable) {
3447 unix_state_lock(sk);
3448
3449 other = unix_peer(sk);
3450 if (other && unix_peer(other) != sk &&
3451 unix_recvq_full_lockless(other) &&
3452 unix_dgram_peer_wake_me(sk, other))
3453 writable = 0;
3454
3455 unix_state_unlock(sk);
3456 }
3457
3458 if (writable)
3459 mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND;
3460 else
3461 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
3462
3463 return mask;
3464 }
3465
3466 #ifdef CONFIG_PROC_FS
3467
3468 #define BUCKET_SPACE (BITS_PER_LONG - (UNIX_HASH_BITS + 1) - 1)
3469
3470 #define get_bucket(x) ((x) >> BUCKET_SPACE)
3471 #define get_offset(x) ((x) & ((1UL << BUCKET_SPACE) - 1))
3472 #define set_bucket_offset(b, o) ((b) << BUCKET_SPACE | (o))
3473
unix_from_bucket(struct seq_file * seq,loff_t * pos)3474 static struct sock *unix_from_bucket(struct seq_file *seq, loff_t *pos)
3475 {
3476 unsigned long offset = get_offset(*pos);
3477 unsigned long bucket = get_bucket(*pos);
3478 unsigned long count = 0;
3479 struct sock *sk;
3480
3481 for (sk = sk_head(&seq_file_net(seq)->unx.table.buckets[bucket]);
3482 sk; sk = sk_next(sk)) {
3483 if (++count == offset)
3484 break;
3485 }
3486
3487 return sk;
3488 }
3489
unix_get_first(struct seq_file * seq,loff_t * pos)3490 static struct sock *unix_get_first(struct seq_file *seq, loff_t *pos)
3491 {
3492 unsigned long bucket = get_bucket(*pos);
3493 struct net *net = seq_file_net(seq);
3494 struct sock *sk;
3495
3496 while (bucket < UNIX_HASH_SIZE) {
3497 spin_lock(&net->unx.table.locks[bucket]);
3498
3499 sk = unix_from_bucket(seq, pos);
3500 if (sk)
3501 return sk;
3502
3503 spin_unlock(&net->unx.table.locks[bucket]);
3504
3505 *pos = set_bucket_offset(++bucket, 1);
3506 }
3507
3508 return NULL;
3509 }
3510
unix_get_next(struct seq_file * seq,struct sock * sk,loff_t * pos)3511 static struct sock *unix_get_next(struct seq_file *seq, struct sock *sk,
3512 loff_t *pos)
3513 {
3514 unsigned long bucket = get_bucket(*pos);
3515
3516 sk = sk_next(sk);
3517 if (sk)
3518 return sk;
3519
3520
3521 spin_unlock(&seq_file_net(seq)->unx.table.locks[bucket]);
3522
3523 *pos = set_bucket_offset(++bucket, 1);
3524
3525 return unix_get_first(seq, pos);
3526 }
3527
unix_seq_start(struct seq_file * seq,loff_t * pos)3528 static void *unix_seq_start(struct seq_file *seq, loff_t *pos)
3529 {
3530 if (!*pos)
3531 return SEQ_START_TOKEN;
3532
3533 return unix_get_first(seq, pos);
3534 }
3535
unix_seq_next(struct seq_file * seq,void * v,loff_t * pos)3536 static void *unix_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3537 {
3538 ++*pos;
3539
3540 if (v == SEQ_START_TOKEN)
3541 return unix_get_first(seq, pos);
3542
3543 return unix_get_next(seq, v, pos);
3544 }
3545
unix_seq_stop(struct seq_file * seq,void * v)3546 static void unix_seq_stop(struct seq_file *seq, void *v)
3547 {
3548 struct sock *sk = v;
3549
3550 if (sk)
3551 spin_unlock(&seq_file_net(seq)->unx.table.locks[sk->sk_hash]);
3552 }
3553
unix_seq_show(struct seq_file * seq,void * v)3554 static int unix_seq_show(struct seq_file *seq, void *v)
3555 {
3556
3557 if (v == SEQ_START_TOKEN)
3558 seq_puts(seq, "Num RefCount Protocol Flags Type St "
3559 "Inode Path\n");
3560 else {
3561 struct sock *s = v;
3562 struct unix_sock *u = unix_sk(s);
3563 unix_state_lock(s);
3564
3565 seq_printf(seq, "%pK: %08X %08X %08X %04X %02X %5llu",
3566 s,
3567 refcount_read(&s->sk_refcnt),
3568 0,
3569 s->sk_state == TCP_LISTEN ? __SO_ACCEPTCON : 0,
3570 s->sk_type,
3571 s->sk_socket ?
3572 (s->sk_state == TCP_ESTABLISHED ? SS_CONNECTED : SS_UNCONNECTED) :
3573 (s->sk_state == TCP_ESTABLISHED ? SS_CONNECTING : SS_DISCONNECTING),
3574 sock_i_ino(s));
3575
3576 if (u->addr) { // under a hash table lock here
3577 int i, len;
3578 seq_putc(seq, ' ');
3579
3580 i = 0;
3581 len = u->addr->len -
3582 offsetof(struct sockaddr_un, sun_path);
3583 if (u->addr->name->sun_path[0]) {
3584 len--;
3585 } else {
3586 seq_putc(seq, '@');
3587 i++;
3588 }
3589 for ( ; i < len; i++)
3590 seq_putc(seq, u->addr->name->sun_path[i] ?:
3591 '@');
3592 }
3593 unix_state_unlock(s);
3594 seq_putc(seq, '\n');
3595 }
3596
3597 return 0;
3598 }
3599
3600 static const struct seq_operations unix_seq_ops = {
3601 .start = unix_seq_start,
3602 .next = unix_seq_next,
3603 .stop = unix_seq_stop,
3604 .show = unix_seq_show,
3605 };
3606
3607 #ifdef CONFIG_BPF_SYSCALL
3608 struct bpf_unix_iter_state {
3609 struct seq_net_private p;
3610 unsigned int cur_sk;
3611 unsigned int end_sk;
3612 unsigned int max_sk;
3613 struct sock **batch;
3614 bool st_bucket_done;
3615 };
3616
3617 struct bpf_iter__unix {
3618 __bpf_md_ptr(struct bpf_iter_meta *, meta);
3619 __bpf_md_ptr(struct unix_sock *, unix_sk);
3620 uid_t uid __aligned(8);
3621 };
3622
unix_prog_seq_show(struct bpf_prog * prog,struct bpf_iter_meta * meta,struct unix_sock * unix_sk,uid_t uid)3623 static int unix_prog_seq_show(struct bpf_prog *prog, struct bpf_iter_meta *meta,
3624 struct unix_sock *unix_sk, uid_t uid)
3625 {
3626 struct bpf_iter__unix ctx;
3627
3628 meta->seq_num--; /* skip SEQ_START_TOKEN */
3629 ctx.meta = meta;
3630 ctx.unix_sk = unix_sk;
3631 ctx.uid = uid;
3632 return bpf_iter_run_prog(prog, &ctx);
3633 }
3634
bpf_iter_unix_hold_batch(struct seq_file * seq,struct sock * start_sk)3635 static int bpf_iter_unix_hold_batch(struct seq_file *seq, struct sock *start_sk)
3636
3637 {
3638 struct bpf_unix_iter_state *iter = seq->private;
3639 unsigned int expected = 1;
3640 struct sock *sk;
3641
3642 sock_hold(start_sk);
3643 iter->batch[iter->end_sk++] = start_sk;
3644
3645 for (sk = sk_next(start_sk); sk; sk = sk_next(sk)) {
3646 if (iter->end_sk < iter->max_sk) {
3647 sock_hold(sk);
3648 iter->batch[iter->end_sk++] = sk;
3649 }
3650
3651 expected++;
3652 }
3653
3654 spin_unlock(&seq_file_net(seq)->unx.table.locks[start_sk->sk_hash]);
3655
3656 return expected;
3657 }
3658
bpf_iter_unix_put_batch(struct bpf_unix_iter_state * iter)3659 static void bpf_iter_unix_put_batch(struct bpf_unix_iter_state *iter)
3660 {
3661 while (iter->cur_sk < iter->end_sk)
3662 sock_put(iter->batch[iter->cur_sk++]);
3663 }
3664
bpf_iter_unix_realloc_batch(struct bpf_unix_iter_state * iter,unsigned int new_batch_sz)3665 static int bpf_iter_unix_realloc_batch(struct bpf_unix_iter_state *iter,
3666 unsigned int new_batch_sz)
3667 {
3668 struct sock **new_batch;
3669
3670 new_batch = kvmalloc(sizeof(*new_batch) * new_batch_sz,
3671 GFP_USER | __GFP_NOWARN);
3672 if (!new_batch)
3673 return -ENOMEM;
3674
3675 bpf_iter_unix_put_batch(iter);
3676 kvfree(iter->batch);
3677 iter->batch = new_batch;
3678 iter->max_sk = new_batch_sz;
3679
3680 return 0;
3681 }
3682
bpf_iter_unix_batch(struct seq_file * seq,loff_t * pos)3683 static struct sock *bpf_iter_unix_batch(struct seq_file *seq,
3684 loff_t *pos)
3685 {
3686 struct bpf_unix_iter_state *iter = seq->private;
3687 unsigned int expected;
3688 bool resized = false;
3689 struct sock *sk;
3690
3691 if (iter->st_bucket_done)
3692 *pos = set_bucket_offset(get_bucket(*pos) + 1, 1);
3693
3694 again:
3695 /* Get a new batch */
3696 iter->cur_sk = 0;
3697 iter->end_sk = 0;
3698
3699 sk = unix_get_first(seq, pos);
3700 if (!sk)
3701 return NULL; /* Done */
3702
3703 expected = bpf_iter_unix_hold_batch(seq, sk);
3704
3705 if (iter->end_sk == expected) {
3706 iter->st_bucket_done = true;
3707 return sk;
3708 }
3709
3710 if (!resized && !bpf_iter_unix_realloc_batch(iter, expected * 3 / 2)) {
3711 resized = true;
3712 goto again;
3713 }
3714
3715 return sk;
3716 }
3717
bpf_iter_unix_seq_start(struct seq_file * seq,loff_t * pos)3718 static void *bpf_iter_unix_seq_start(struct seq_file *seq, loff_t *pos)
3719 {
3720 if (!*pos)
3721 return SEQ_START_TOKEN;
3722
3723 /* bpf iter does not support lseek, so it always
3724 * continue from where it was stop()-ped.
3725 */
3726 return bpf_iter_unix_batch(seq, pos);
3727 }
3728
bpf_iter_unix_seq_next(struct seq_file * seq,void * v,loff_t * pos)3729 static void *bpf_iter_unix_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3730 {
3731 struct bpf_unix_iter_state *iter = seq->private;
3732 struct sock *sk;
3733
3734 /* Whenever seq_next() is called, the iter->cur_sk is
3735 * done with seq_show(), so advance to the next sk in
3736 * the batch.
3737 */
3738 if (iter->cur_sk < iter->end_sk)
3739 sock_put(iter->batch[iter->cur_sk++]);
3740
3741 ++*pos;
3742
3743 if (iter->cur_sk < iter->end_sk)
3744 sk = iter->batch[iter->cur_sk];
3745 else
3746 sk = bpf_iter_unix_batch(seq, pos);
3747
3748 return sk;
3749 }
3750
bpf_iter_unix_seq_show(struct seq_file * seq,void * v)3751 static int bpf_iter_unix_seq_show(struct seq_file *seq, void *v)
3752 {
3753 struct bpf_iter_meta meta;
3754 struct bpf_prog *prog;
3755 struct sock *sk = v;
3756 uid_t uid;
3757 int ret;
3758
3759 if (v == SEQ_START_TOKEN)
3760 return 0;
3761
3762 lock_sock(sk);
3763 unix_state_lock(sk);
3764
3765 if (unlikely(sock_flag(sk, SOCK_DEAD))) {
3766 ret = SEQ_SKIP;
3767 goto unlock;
3768 }
3769
3770 uid = from_kuid_munged(seq_user_ns(seq), sk_uid(sk));
3771 meta.seq = seq;
3772 prog = bpf_iter_get_info(&meta, false);
3773 ret = unix_prog_seq_show(prog, &meta, v, uid);
3774 unlock:
3775 unix_state_unlock(sk);
3776 release_sock(sk);
3777 return ret;
3778 }
3779
bpf_iter_unix_seq_stop(struct seq_file * seq,void * v)3780 static void bpf_iter_unix_seq_stop(struct seq_file *seq, void *v)
3781 {
3782 struct bpf_unix_iter_state *iter = seq->private;
3783 struct bpf_iter_meta meta;
3784 struct bpf_prog *prog;
3785
3786 if (!v) {
3787 meta.seq = seq;
3788 prog = bpf_iter_get_info(&meta, true);
3789 if (prog)
3790 (void)unix_prog_seq_show(prog, &meta, v, 0);
3791 }
3792
3793 if (iter->cur_sk < iter->end_sk)
3794 bpf_iter_unix_put_batch(iter);
3795 }
3796
3797 static const struct seq_operations bpf_iter_unix_seq_ops = {
3798 .start = bpf_iter_unix_seq_start,
3799 .next = bpf_iter_unix_seq_next,
3800 .stop = bpf_iter_unix_seq_stop,
3801 .show = bpf_iter_unix_seq_show,
3802 };
3803 #endif
3804 #endif
3805
3806 static const struct net_proto_family unix_family_ops = {
3807 .family = PF_UNIX,
3808 .create = unix_create,
3809 .owner = THIS_MODULE,
3810 };
3811
3812
unix_net_init(struct net * net)3813 static int __net_init unix_net_init(struct net *net)
3814 {
3815 int i;
3816
3817 net->unx.sysctl_max_dgram_qlen = 10;
3818 if (unix_sysctl_register(net))
3819 goto out;
3820
3821 #ifdef CONFIG_PROC_FS
3822 if (!proc_create_net("unix", 0, net->proc_net, &unix_seq_ops,
3823 sizeof(struct seq_net_private)))
3824 goto err_sysctl;
3825 #endif
3826
3827 net->unx.table.locks = kvmalloc_objs(spinlock_t, UNIX_HASH_SIZE);
3828 if (!net->unx.table.locks)
3829 goto err_proc;
3830
3831 net->unx.table.buckets = kvmalloc_objs(struct hlist_head,
3832 UNIX_HASH_SIZE);
3833 if (!net->unx.table.buckets)
3834 goto free_locks;
3835
3836 for (i = 0; i < UNIX_HASH_SIZE; i++) {
3837 spin_lock_init(&net->unx.table.locks[i]);
3838 lock_set_cmp_fn(&net->unx.table.locks[i], unix_table_lock_cmp_fn, NULL);
3839 INIT_HLIST_HEAD(&net->unx.table.buckets[i]);
3840 }
3841
3842 return 0;
3843
3844 free_locks:
3845 kvfree(net->unx.table.locks);
3846 err_proc:
3847 #ifdef CONFIG_PROC_FS
3848 remove_proc_entry("unix", net->proc_net);
3849 err_sysctl:
3850 #endif
3851 unix_sysctl_unregister(net);
3852 out:
3853 return -ENOMEM;
3854 }
3855
unix_net_exit(struct net * net)3856 static void __net_exit unix_net_exit(struct net *net)
3857 {
3858 kvfree(net->unx.table.buckets);
3859 kvfree(net->unx.table.locks);
3860 unix_sysctl_unregister(net);
3861 remove_proc_entry("unix", net->proc_net);
3862 }
3863
3864 static struct pernet_operations unix_net_ops = {
3865 .init = unix_net_init,
3866 .exit = unix_net_exit,
3867 };
3868
3869 #if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS)
DEFINE_BPF_ITER_FUNC(unix,struct bpf_iter_meta * meta,struct unix_sock * unix_sk,uid_t uid)3870 DEFINE_BPF_ITER_FUNC(unix, struct bpf_iter_meta *meta,
3871 struct unix_sock *unix_sk, uid_t uid)
3872
3873 #define INIT_BATCH_SZ 16
3874
3875 static int bpf_iter_init_unix(void *priv_data, struct bpf_iter_aux_info *aux)
3876 {
3877 struct bpf_unix_iter_state *iter = priv_data;
3878 int err;
3879
3880 err = bpf_iter_init_seq_net(priv_data, aux);
3881 if (err)
3882 return err;
3883
3884 err = bpf_iter_unix_realloc_batch(iter, INIT_BATCH_SZ);
3885 if (err) {
3886 bpf_iter_fini_seq_net(priv_data);
3887 return err;
3888 }
3889
3890 return 0;
3891 }
3892
bpf_iter_fini_unix(void * priv_data)3893 static void bpf_iter_fini_unix(void *priv_data)
3894 {
3895 struct bpf_unix_iter_state *iter = priv_data;
3896
3897 bpf_iter_fini_seq_net(priv_data);
3898 kvfree(iter->batch);
3899 }
3900
3901 static const struct bpf_iter_seq_info unix_seq_info = {
3902 .seq_ops = &bpf_iter_unix_seq_ops,
3903 .init_seq_private = bpf_iter_init_unix,
3904 .fini_seq_private = bpf_iter_fini_unix,
3905 .seq_priv_size = sizeof(struct bpf_unix_iter_state),
3906 };
3907
3908 static const struct bpf_func_proto *
bpf_iter_unix_get_func_proto(enum bpf_func_id func_id,const struct bpf_prog * prog)3909 bpf_iter_unix_get_func_proto(enum bpf_func_id func_id,
3910 const struct bpf_prog *prog)
3911 {
3912 switch (func_id) {
3913 case BPF_FUNC_setsockopt:
3914 return &bpf_sk_setsockopt_proto;
3915 case BPF_FUNC_getsockopt:
3916 return &bpf_sk_getsockopt_proto;
3917 default:
3918 return NULL;
3919 }
3920 }
3921
3922 static struct bpf_iter_reg unix_reg_info = {
3923 .target = "unix",
3924 .ctx_arg_info_size = 1,
3925 .ctx_arg_info = {
3926 { offsetof(struct bpf_iter__unix, unix_sk),
3927 PTR_TO_BTF_ID_OR_NULL },
3928 },
3929 .get_func_proto = bpf_iter_unix_get_func_proto,
3930 .seq_info = &unix_seq_info,
3931 };
3932
bpf_iter_register(void)3933 static void __init bpf_iter_register(void)
3934 {
3935 unix_reg_info.ctx_arg_info[0].btf_id = btf_sock_ids[BTF_SOCK_TYPE_UNIX];
3936 if (bpf_iter_reg_target(&unix_reg_info))
3937 pr_warn("Warning: could not register bpf iterator unix\n");
3938 }
3939 #endif
3940
af_unix_init(void)3941 static int __init af_unix_init(void)
3942 {
3943 int i, rc = -1;
3944
3945 BUILD_BUG_ON(sizeof(struct unix_skb_parms) > sizeof_field(struct sk_buff, cb));
3946
3947 for (i = 0; i < UNIX_HASH_SIZE / 2; i++) {
3948 spin_lock_init(&bsd_socket_locks[i]);
3949 INIT_HLIST_HEAD(&bsd_socket_buckets[i]);
3950 }
3951
3952 rc = proto_register(&unix_dgram_proto, 1);
3953 if (rc != 0) {
3954 pr_crit("%s: Cannot create unix_sock SLAB cache!\n", __func__);
3955 goto out;
3956 }
3957
3958 rc = proto_register(&unix_stream_proto, 1);
3959 if (rc != 0) {
3960 pr_crit("%s: Cannot create unix_sock SLAB cache!\n", __func__);
3961 proto_unregister(&unix_dgram_proto);
3962 goto out;
3963 }
3964
3965 sock_register(&unix_family_ops);
3966 register_pernet_subsys(&unix_net_ops);
3967 unix_bpf_build_proto();
3968
3969 #if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS)
3970 bpf_iter_register();
3971 #endif
3972
3973 out:
3974 return rc;
3975 }
3976
3977 /* Later than subsys_initcall() because we depend on stuff initialised there */
3978 fs_initcall(af_unix_init);
3979