xref: /linux/net/tipc/socket.c (revision 86919f9dd2dbb88bbbe4e381dc8e6b9e9a1b9eb5)
1 /*
2  * net/tipc/socket.c: TIPC socket API
3  *
4  * Copyright (c) 2001-2007, 2012-2017, Ericsson AB
5  * Copyright (c) 2004-2008, 2010-2013, Wind River Systems
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. Neither the names of the copyright holders nor the names of its
17  *    contributors may be used to endorse or promote products derived from
18  *    this software without specific prior written permission.
19  *
20  * Alternatively, this software may be distributed under the terms of the
21  * GNU General Public License ("GPL") version 2 as published by the Free
22  * Software Foundation.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34  * POSSIBILITY OF SUCH DAMAGE.
35  */
36 
37 #include <linux/rhashtable.h>
38 #include <linux/sched/signal.h>
39 
40 #include "core.h"
41 #include "name_table.h"
42 #include "node.h"
43 #include "link.h"
44 #include "name_distr.h"
45 #include "socket.h"
46 #include "bcast.h"
47 #include "netlink.h"
48 #include "group.h"
49 
50 #define CONN_TIMEOUT_DEFAULT    8000    /* default connect timeout = 8s */
51 #define CONN_PROBING_INTV	msecs_to_jiffies(3600000)  /* [ms] => 1 h */
52 #define TIPC_FWD_MSG		1
53 #define TIPC_MAX_PORT		0xffffffff
54 #define TIPC_MIN_PORT		1
55 #define TIPC_ACK_RATE		4       /* ACK at 1/4 of of rcv window size */
56 
57 enum {
58 	TIPC_LISTEN = TCP_LISTEN,
59 	TIPC_ESTABLISHED = TCP_ESTABLISHED,
60 	TIPC_OPEN = TCP_CLOSE,
61 	TIPC_DISCONNECTING = TCP_CLOSE_WAIT,
62 	TIPC_CONNECTING = TCP_SYN_SENT,
63 };
64 
65 struct sockaddr_pair {
66 	struct sockaddr_tipc sock;
67 	struct sockaddr_tipc member;
68 };
69 
70 /**
71  * struct tipc_sock - TIPC socket structure
72  * @sk: socket - interacts with 'port' and with user via the socket API
73  * @conn_type: TIPC type used when connection was established
74  * @conn_instance: TIPC instance used when connection was established
75  * @published: non-zero if port has one or more associated names
76  * @max_pkt: maximum packet size "hint" used when building messages sent by port
77  * @portid: unique port identity in TIPC socket hash table
78  * @phdr: preformatted message header used when sending messages
79  * #cong_links: list of congested links
80  * @publications: list of publications for port
81  * @blocking_link: address of the congested link we are currently sleeping on
82  * @pub_count: total # of publications port has made during its lifetime
83  * @conn_timeout: the time we can wait for an unresponded setup request
84  * @dupl_rcvcnt: number of bytes counted twice, in both backlog and rcv queue
85  * @cong_link_cnt: number of congested links
86  * @snt_unacked: # messages sent by socket, and not yet acked by peer
87  * @rcv_unacked: # messages read by user, but not yet acked back to peer
88  * @peer: 'connected' peer for dgram/rdm
89  * @node: hash table node
90  * @mc_method: cookie for use between socket and broadcast layer
91  * @rcu: rcu struct for tipc_sock
92  */
93 struct tipc_sock {
94 	struct sock sk;
95 	u32 conn_type;
96 	u32 conn_instance;
97 	int published;
98 	u32 max_pkt;
99 	u32 portid;
100 	struct tipc_msg phdr;
101 	struct list_head cong_links;
102 	struct list_head publications;
103 	u32 pub_count;
104 	atomic_t dupl_rcvcnt;
105 	u16 conn_timeout;
106 	bool probe_unacked;
107 	u16 cong_link_cnt;
108 	u16 snt_unacked;
109 	u16 snd_win;
110 	u16 peer_caps;
111 	u16 rcv_unacked;
112 	u16 rcv_win;
113 	struct sockaddr_tipc peer;
114 	struct rhash_head node;
115 	struct tipc_mc_method mc_method;
116 	struct rcu_head rcu;
117 	struct tipc_group *group;
118 	bool group_is_open;
119 };
120 
121 static int tipc_sk_backlog_rcv(struct sock *sk, struct sk_buff *skb);
122 static void tipc_data_ready(struct sock *sk);
123 static void tipc_write_space(struct sock *sk);
124 static void tipc_sock_destruct(struct sock *sk);
125 static int tipc_release(struct socket *sock);
126 static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags,
127 		       bool kern);
128 static void tipc_sk_timeout(struct timer_list *t);
129 static int tipc_sk_publish(struct tipc_sock *tsk, uint scope,
130 			   struct tipc_name_seq const *seq);
131 static int tipc_sk_withdraw(struct tipc_sock *tsk, uint scope,
132 			    struct tipc_name_seq const *seq);
133 static int tipc_sk_leave(struct tipc_sock *tsk);
134 static struct tipc_sock *tipc_sk_lookup(struct net *net, u32 portid);
135 static int tipc_sk_insert(struct tipc_sock *tsk);
136 static void tipc_sk_remove(struct tipc_sock *tsk);
137 static int __tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dsz);
138 static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dsz);
139 
140 static const struct proto_ops packet_ops;
141 static const struct proto_ops stream_ops;
142 static const struct proto_ops msg_ops;
143 static struct proto tipc_proto;
144 static const struct rhashtable_params tsk_rht_params;
145 
146 static u32 tsk_own_node(struct tipc_sock *tsk)
147 {
148 	return msg_prevnode(&tsk->phdr);
149 }
150 
151 static u32 tsk_peer_node(struct tipc_sock *tsk)
152 {
153 	return msg_destnode(&tsk->phdr);
154 }
155 
156 static u32 tsk_peer_port(struct tipc_sock *tsk)
157 {
158 	return msg_destport(&tsk->phdr);
159 }
160 
161 static  bool tsk_unreliable(struct tipc_sock *tsk)
162 {
163 	return msg_src_droppable(&tsk->phdr) != 0;
164 }
165 
166 static void tsk_set_unreliable(struct tipc_sock *tsk, bool unreliable)
167 {
168 	msg_set_src_droppable(&tsk->phdr, unreliable ? 1 : 0);
169 }
170 
171 static bool tsk_unreturnable(struct tipc_sock *tsk)
172 {
173 	return msg_dest_droppable(&tsk->phdr) != 0;
174 }
175 
176 static void tsk_set_unreturnable(struct tipc_sock *tsk, bool unreturnable)
177 {
178 	msg_set_dest_droppable(&tsk->phdr, unreturnable ? 1 : 0);
179 }
180 
181 static int tsk_importance(struct tipc_sock *tsk)
182 {
183 	return msg_importance(&tsk->phdr);
184 }
185 
186 static int tsk_set_importance(struct tipc_sock *tsk, int imp)
187 {
188 	if (imp > TIPC_CRITICAL_IMPORTANCE)
189 		return -EINVAL;
190 	msg_set_importance(&tsk->phdr, (u32)imp);
191 	return 0;
192 }
193 
194 static struct tipc_sock *tipc_sk(const struct sock *sk)
195 {
196 	return container_of(sk, struct tipc_sock, sk);
197 }
198 
199 static bool tsk_conn_cong(struct tipc_sock *tsk)
200 {
201 	return tsk->snt_unacked > tsk->snd_win;
202 }
203 
204 static u16 tsk_blocks(int len)
205 {
206 	return ((len / FLOWCTL_BLK_SZ) + 1);
207 }
208 
209 /* tsk_blocks(): translate a buffer size in bytes to number of
210  * advertisable blocks, taking into account the ratio truesize(len)/len
211  * We can trust that this ratio is always < 4 for len >= FLOWCTL_BLK_SZ
212  */
213 static u16 tsk_adv_blocks(int len)
214 {
215 	return len / FLOWCTL_BLK_SZ / 4;
216 }
217 
218 /* tsk_inc(): increment counter for sent or received data
219  * - If block based flow control is not supported by peer we
220  *   fall back to message based ditto, incrementing the counter
221  */
222 static u16 tsk_inc(struct tipc_sock *tsk, int msglen)
223 {
224 	if (likely(tsk->peer_caps & TIPC_BLOCK_FLOWCTL))
225 		return ((msglen / FLOWCTL_BLK_SZ) + 1);
226 	return 1;
227 }
228 
229 /**
230  * tsk_advance_rx_queue - discard first buffer in socket receive queue
231  *
232  * Caller must hold socket lock
233  */
234 static void tsk_advance_rx_queue(struct sock *sk)
235 {
236 	kfree_skb(__skb_dequeue(&sk->sk_receive_queue));
237 }
238 
239 /* tipc_sk_respond() : send response message back to sender
240  */
241 static void tipc_sk_respond(struct sock *sk, struct sk_buff *skb, int err)
242 {
243 	u32 selector;
244 	u32 dnode;
245 	u32 onode = tipc_own_addr(sock_net(sk));
246 
247 	if (!tipc_msg_reverse(onode, &skb, err))
248 		return;
249 
250 	dnode = msg_destnode(buf_msg(skb));
251 	selector = msg_origport(buf_msg(skb));
252 	tipc_node_xmit_skb(sock_net(sk), skb, dnode, selector);
253 }
254 
255 /**
256  * tsk_rej_rx_queue - reject all buffers in socket receive queue
257  *
258  * Caller must hold socket lock
259  */
260 static void tsk_rej_rx_queue(struct sock *sk)
261 {
262 	struct sk_buff *skb;
263 
264 	while ((skb = __skb_dequeue(&sk->sk_receive_queue)))
265 		tipc_sk_respond(sk, skb, TIPC_ERR_NO_PORT);
266 }
267 
268 static bool tipc_sk_connected(struct sock *sk)
269 {
270 	return sk->sk_state == TIPC_ESTABLISHED;
271 }
272 
273 /* tipc_sk_type_connectionless - check if the socket is datagram socket
274  * @sk: socket
275  *
276  * Returns true if connection less, false otherwise
277  */
278 static bool tipc_sk_type_connectionless(struct sock *sk)
279 {
280 	return sk->sk_type == SOCK_RDM || sk->sk_type == SOCK_DGRAM;
281 }
282 
283 /* tsk_peer_msg - verify if message was sent by connected port's peer
284  *
285  * Handles cases where the node's network address has changed from
286  * the default of <0.0.0> to its configured setting.
287  */
288 static bool tsk_peer_msg(struct tipc_sock *tsk, struct tipc_msg *msg)
289 {
290 	struct sock *sk = &tsk->sk;
291 	u32 self = tipc_own_addr(sock_net(sk));
292 	u32 peer_port = tsk_peer_port(tsk);
293 	u32 orig_node, peer_node;
294 
295 	if (unlikely(!tipc_sk_connected(sk)))
296 		return false;
297 
298 	if (unlikely(msg_origport(msg) != peer_port))
299 		return false;
300 
301 	orig_node = msg_orignode(msg);
302 	peer_node = tsk_peer_node(tsk);
303 
304 	if (likely(orig_node == peer_node))
305 		return true;
306 
307 	if (!orig_node && peer_node == self)
308 		return true;
309 
310 	if (!peer_node && orig_node == self)
311 		return true;
312 
313 	return false;
314 }
315 
316 /* tipc_set_sk_state - set the sk_state of the socket
317  * @sk: socket
318  *
319  * Caller must hold socket lock
320  *
321  * Returns 0 on success, errno otherwise
322  */
323 static int tipc_set_sk_state(struct sock *sk, int state)
324 {
325 	int oldsk_state = sk->sk_state;
326 	int res = -EINVAL;
327 
328 	switch (state) {
329 	case TIPC_OPEN:
330 		res = 0;
331 		break;
332 	case TIPC_LISTEN:
333 	case TIPC_CONNECTING:
334 		if (oldsk_state == TIPC_OPEN)
335 			res = 0;
336 		break;
337 	case TIPC_ESTABLISHED:
338 		if (oldsk_state == TIPC_CONNECTING ||
339 		    oldsk_state == TIPC_OPEN)
340 			res = 0;
341 		break;
342 	case TIPC_DISCONNECTING:
343 		if (oldsk_state == TIPC_CONNECTING ||
344 		    oldsk_state == TIPC_ESTABLISHED)
345 			res = 0;
346 		break;
347 	}
348 
349 	if (!res)
350 		sk->sk_state = state;
351 
352 	return res;
353 }
354 
355 static int tipc_sk_sock_err(struct socket *sock, long *timeout)
356 {
357 	struct sock *sk = sock->sk;
358 	int err = sock_error(sk);
359 	int typ = sock->type;
360 
361 	if (err)
362 		return err;
363 	if (typ == SOCK_STREAM || typ == SOCK_SEQPACKET) {
364 		if (sk->sk_state == TIPC_DISCONNECTING)
365 			return -EPIPE;
366 		else if (!tipc_sk_connected(sk))
367 			return -ENOTCONN;
368 	}
369 	if (!*timeout)
370 		return -EAGAIN;
371 	if (signal_pending(current))
372 		return sock_intr_errno(*timeout);
373 
374 	return 0;
375 }
376 
377 #define tipc_wait_for_cond(sock_, timeo_, condition_)			       \
378 ({                                                                             \
379 	struct sock *sk_;						       \
380 	int rc_;							       \
381 									       \
382 	while ((rc_ = !(condition_))) {					       \
383 		DEFINE_WAIT_FUNC(wait_, woken_wake_function);	               \
384 		sk_ = (sock_)->sk;					       \
385 		rc_ = tipc_sk_sock_err((sock_), timeo_);		       \
386 		if (rc_)						       \
387 			break;						       \
388 		prepare_to_wait(sk_sleep(sk_), &wait_, TASK_INTERRUPTIBLE);    \
389 		release_sock(sk_);					       \
390 		*(timeo_) = wait_woken(&wait_, TASK_INTERRUPTIBLE, *(timeo_)); \
391 		sched_annotate_sleep();				               \
392 		lock_sock(sk_);						       \
393 		remove_wait_queue(sk_sleep(sk_), &wait_);		       \
394 	}								       \
395 	rc_;								       \
396 })
397 
398 /**
399  * tipc_sk_create - create a TIPC socket
400  * @net: network namespace (must be default network)
401  * @sock: pre-allocated socket structure
402  * @protocol: protocol indicator (must be 0)
403  * @kern: caused by kernel or by userspace?
404  *
405  * This routine creates additional data structures used by the TIPC socket,
406  * initializes them, and links them together.
407  *
408  * Returns 0 on success, errno otherwise
409  */
410 static int tipc_sk_create(struct net *net, struct socket *sock,
411 			  int protocol, int kern)
412 {
413 	const struct proto_ops *ops;
414 	struct sock *sk;
415 	struct tipc_sock *tsk;
416 	struct tipc_msg *msg;
417 
418 	/* Validate arguments */
419 	if (unlikely(protocol != 0))
420 		return -EPROTONOSUPPORT;
421 
422 	switch (sock->type) {
423 	case SOCK_STREAM:
424 		ops = &stream_ops;
425 		break;
426 	case SOCK_SEQPACKET:
427 		ops = &packet_ops;
428 		break;
429 	case SOCK_DGRAM:
430 	case SOCK_RDM:
431 		ops = &msg_ops;
432 		break;
433 	default:
434 		return -EPROTOTYPE;
435 	}
436 
437 	/* Allocate socket's protocol area */
438 	sk = sk_alloc(net, AF_TIPC, GFP_KERNEL, &tipc_proto, kern);
439 	if (sk == NULL)
440 		return -ENOMEM;
441 
442 	tsk = tipc_sk(sk);
443 	tsk->max_pkt = MAX_PKT_DEFAULT;
444 	INIT_LIST_HEAD(&tsk->publications);
445 	INIT_LIST_HEAD(&tsk->cong_links);
446 	msg = &tsk->phdr;
447 
448 	/* Finish initializing socket data structures */
449 	sock->ops = ops;
450 	sock_init_data(sock, sk);
451 	tipc_set_sk_state(sk, TIPC_OPEN);
452 	if (tipc_sk_insert(tsk)) {
453 		pr_warn("Socket create failed; port number exhausted\n");
454 		return -EINVAL;
455 	}
456 
457 	/* Ensure tsk is visible before we read own_addr. */
458 	smp_mb();
459 
460 	tipc_msg_init(tipc_own_addr(net), msg, TIPC_LOW_IMPORTANCE,
461 		      TIPC_NAMED_MSG, NAMED_H_SIZE, 0);
462 
463 	msg_set_origport(msg, tsk->portid);
464 	timer_setup(&sk->sk_timer, tipc_sk_timeout, 0);
465 	sk->sk_shutdown = 0;
466 	sk->sk_backlog_rcv = tipc_sk_backlog_rcv;
467 	sk->sk_rcvbuf = sysctl_tipc_rmem[1];
468 	sk->sk_data_ready = tipc_data_ready;
469 	sk->sk_write_space = tipc_write_space;
470 	sk->sk_destruct = tipc_sock_destruct;
471 	tsk->conn_timeout = CONN_TIMEOUT_DEFAULT;
472 	tsk->group_is_open = true;
473 	atomic_set(&tsk->dupl_rcvcnt, 0);
474 
475 	/* Start out with safe limits until we receive an advertised window */
476 	tsk->snd_win = tsk_adv_blocks(RCVBUF_MIN);
477 	tsk->rcv_win = tsk->snd_win;
478 
479 	if (tipc_sk_type_connectionless(sk)) {
480 		tsk_set_unreturnable(tsk, true);
481 		if (sock->type == SOCK_DGRAM)
482 			tsk_set_unreliable(tsk, true);
483 	}
484 
485 	return 0;
486 }
487 
488 static void tipc_sk_callback(struct rcu_head *head)
489 {
490 	struct tipc_sock *tsk = container_of(head, struct tipc_sock, rcu);
491 
492 	sock_put(&tsk->sk);
493 }
494 
495 /* Caller should hold socket lock for the socket. */
496 static void __tipc_shutdown(struct socket *sock, int error)
497 {
498 	struct sock *sk = sock->sk;
499 	struct tipc_sock *tsk = tipc_sk(sk);
500 	struct net *net = sock_net(sk);
501 	long timeout = CONN_TIMEOUT_DEFAULT;
502 	u32 dnode = tsk_peer_node(tsk);
503 	struct sk_buff *skb;
504 
505 	/* Avoid that hi-prio shutdown msgs bypass msgs in link wakeup queue */
506 	tipc_wait_for_cond(sock, &timeout, (!tsk->cong_link_cnt &&
507 					    !tsk_conn_cong(tsk)));
508 
509 	/* Remove any pending SYN message */
510 	__skb_queue_purge(&sk->sk_write_queue);
511 
512 	/* Reject all unreceived messages, except on an active connection
513 	 * (which disconnects locally & sends a 'FIN+' to peer).
514 	 */
515 	while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) {
516 		if (TIPC_SKB_CB(skb)->bytes_read) {
517 			kfree_skb(skb);
518 			continue;
519 		}
520 		if (!tipc_sk_type_connectionless(sk) &&
521 		    sk->sk_state != TIPC_DISCONNECTING) {
522 			tipc_set_sk_state(sk, TIPC_DISCONNECTING);
523 			tipc_node_remove_conn(net, dnode, tsk->portid);
524 		}
525 		tipc_sk_respond(sk, skb, error);
526 	}
527 
528 	if (tipc_sk_type_connectionless(sk))
529 		return;
530 
531 	if (sk->sk_state != TIPC_DISCONNECTING) {
532 		skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE,
533 				      TIPC_CONN_MSG, SHORT_H_SIZE, 0, dnode,
534 				      tsk_own_node(tsk), tsk_peer_port(tsk),
535 				      tsk->portid, error);
536 		if (skb)
537 			tipc_node_xmit_skb(net, skb, dnode, tsk->portid);
538 		tipc_node_remove_conn(net, dnode, tsk->portid);
539 		tipc_set_sk_state(sk, TIPC_DISCONNECTING);
540 	}
541 }
542 
543 /**
544  * tipc_release - destroy a TIPC socket
545  * @sock: socket to destroy
546  *
547  * This routine cleans up any messages that are still queued on the socket.
548  * For DGRAM and RDM socket types, all queued messages are rejected.
549  * For SEQPACKET and STREAM socket types, the first message is rejected
550  * and any others are discarded.  (If the first message on a STREAM socket
551  * is partially-read, it is discarded and the next one is rejected instead.)
552  *
553  * NOTE: Rejected messages are not necessarily returned to the sender!  They
554  * are returned or discarded according to the "destination droppable" setting
555  * specified for the message by the sender.
556  *
557  * Returns 0 on success, errno otherwise
558  */
559 static int tipc_release(struct socket *sock)
560 {
561 	struct sock *sk = sock->sk;
562 	struct tipc_sock *tsk;
563 
564 	/*
565 	 * Exit if socket isn't fully initialized (occurs when a failed accept()
566 	 * releases a pre-allocated child socket that was never used)
567 	 */
568 	if (sk == NULL)
569 		return 0;
570 
571 	tsk = tipc_sk(sk);
572 	lock_sock(sk);
573 
574 	__tipc_shutdown(sock, TIPC_ERR_NO_PORT);
575 	sk->sk_shutdown = SHUTDOWN_MASK;
576 	tipc_sk_leave(tsk);
577 	tipc_sk_withdraw(tsk, 0, NULL);
578 	sk_stop_timer(sk, &sk->sk_timer);
579 	tipc_sk_remove(tsk);
580 
581 	sock_orphan(sk);
582 	/* Reject any messages that accumulated in backlog queue */
583 	release_sock(sk);
584 	tipc_dest_list_purge(&tsk->cong_links);
585 	tsk->cong_link_cnt = 0;
586 	call_rcu(&tsk->rcu, tipc_sk_callback);
587 	sock->sk = NULL;
588 
589 	return 0;
590 }
591 
592 /**
593  * tipc_bind - associate or disassocate TIPC name(s) with a socket
594  * @sock: socket structure
595  * @uaddr: socket address describing name(s) and desired operation
596  * @uaddr_len: size of socket address data structure
597  *
598  * Name and name sequence binding is indicated using a positive scope value;
599  * a negative scope value unbinds the specified name.  Specifying no name
600  * (i.e. a socket address length of 0) unbinds all names from the socket.
601  *
602  * Returns 0 on success, errno otherwise
603  *
604  * NOTE: This routine doesn't need to take the socket lock since it doesn't
605  *       access any non-constant socket information.
606  */
607 static int tipc_bind(struct socket *sock, struct sockaddr *uaddr,
608 		     int uaddr_len)
609 {
610 	struct sock *sk = sock->sk;
611 	struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr;
612 	struct tipc_sock *tsk = tipc_sk(sk);
613 	int res = -EINVAL;
614 
615 	lock_sock(sk);
616 	if (unlikely(!uaddr_len)) {
617 		res = tipc_sk_withdraw(tsk, 0, NULL);
618 		goto exit;
619 	}
620 	if (tsk->group) {
621 		res = -EACCES;
622 		goto exit;
623 	}
624 	if (uaddr_len < sizeof(struct sockaddr_tipc)) {
625 		res = -EINVAL;
626 		goto exit;
627 	}
628 	if (addr->family != AF_TIPC) {
629 		res = -EAFNOSUPPORT;
630 		goto exit;
631 	}
632 
633 	if (addr->addrtype == TIPC_ADDR_NAME)
634 		addr->addr.nameseq.upper = addr->addr.nameseq.lower;
635 	else if (addr->addrtype != TIPC_ADDR_NAMESEQ) {
636 		res = -EAFNOSUPPORT;
637 		goto exit;
638 	}
639 
640 	if ((addr->addr.nameseq.type < TIPC_RESERVED_TYPES) &&
641 	    (addr->addr.nameseq.type != TIPC_TOP_SRV) &&
642 	    (addr->addr.nameseq.type != TIPC_CFG_SRV)) {
643 		res = -EACCES;
644 		goto exit;
645 	}
646 
647 	res = (addr->scope >= 0) ?
648 		tipc_sk_publish(tsk, addr->scope, &addr->addr.nameseq) :
649 		tipc_sk_withdraw(tsk, -addr->scope, &addr->addr.nameseq);
650 exit:
651 	release_sock(sk);
652 	return res;
653 }
654 
655 /**
656  * tipc_getname - get port ID of socket or peer socket
657  * @sock: socket structure
658  * @uaddr: area for returned socket address
659  * @uaddr_len: area for returned length of socket address
660  * @peer: 0 = own ID, 1 = current peer ID, 2 = current/former peer ID
661  *
662  * Returns 0 on success, errno otherwise
663  *
664  * NOTE: This routine doesn't need to take the socket lock since it only
665  *       accesses socket information that is unchanging (or which changes in
666  *       a completely predictable manner).
667  */
668 static int tipc_getname(struct socket *sock, struct sockaddr *uaddr,
669 			int peer)
670 {
671 	struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr;
672 	struct sock *sk = sock->sk;
673 	struct tipc_sock *tsk = tipc_sk(sk);
674 
675 	memset(addr, 0, sizeof(*addr));
676 	if (peer) {
677 		if ((!tipc_sk_connected(sk)) &&
678 		    ((peer != 2) || (sk->sk_state != TIPC_DISCONNECTING)))
679 			return -ENOTCONN;
680 		addr->addr.id.ref = tsk_peer_port(tsk);
681 		addr->addr.id.node = tsk_peer_node(tsk);
682 	} else {
683 		addr->addr.id.ref = tsk->portid;
684 		addr->addr.id.node = tipc_own_addr(sock_net(sk));
685 	}
686 
687 	addr->addrtype = TIPC_ADDR_ID;
688 	addr->family = AF_TIPC;
689 	addr->scope = 0;
690 	addr->addr.name.domain = 0;
691 
692 	return sizeof(*addr);
693 }
694 
695 /**
696  * tipc_poll - read and possibly block on pollmask
697  * @file: file structure associated with the socket
698  * @sock: socket for which to calculate the poll bits
699  * @wait: ???
700  *
701  * Returns pollmask value
702  *
703  * COMMENTARY:
704  * It appears that the usual socket locking mechanisms are not useful here
705  * since the pollmask info is potentially out-of-date the moment this routine
706  * exits.  TCP and other protocols seem to rely on higher level poll routines
707  * to handle any preventable race conditions, so TIPC will do the same ...
708  *
709  * IMPORTANT: The fact that a read or write operation is indicated does NOT
710  * imply that the operation will succeed, merely that it should be performed
711  * and will not block.
712  */
713 static __poll_t tipc_poll(struct file *file, struct socket *sock,
714 			      poll_table *wait)
715 {
716 	struct sock *sk = sock->sk;
717 	struct tipc_sock *tsk = tipc_sk(sk);
718 	__poll_t revents = 0;
719 
720 	sock_poll_wait(file, sock, wait);
721 
722 	if (sk->sk_shutdown & RCV_SHUTDOWN)
723 		revents |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM;
724 	if (sk->sk_shutdown == SHUTDOWN_MASK)
725 		revents |= EPOLLHUP;
726 
727 	switch (sk->sk_state) {
728 	case TIPC_ESTABLISHED:
729 	case TIPC_CONNECTING:
730 		if (!tsk->cong_link_cnt && !tsk_conn_cong(tsk))
731 			revents |= EPOLLOUT;
732 		/* fall thru' */
733 	case TIPC_LISTEN:
734 		if (!skb_queue_empty(&sk->sk_receive_queue))
735 			revents |= EPOLLIN | EPOLLRDNORM;
736 		break;
737 	case TIPC_OPEN:
738 		if (tsk->group_is_open && !tsk->cong_link_cnt)
739 			revents |= EPOLLOUT;
740 		if (!tipc_sk_type_connectionless(sk))
741 			break;
742 		if (skb_queue_empty(&sk->sk_receive_queue))
743 			break;
744 		revents |= EPOLLIN | EPOLLRDNORM;
745 		break;
746 	case TIPC_DISCONNECTING:
747 		revents = EPOLLIN | EPOLLRDNORM | EPOLLHUP;
748 		break;
749 	}
750 	return revents;
751 }
752 
753 /**
754  * tipc_sendmcast - send multicast message
755  * @sock: socket structure
756  * @seq: destination address
757  * @msg: message to send
758  * @dlen: length of data to send
759  * @timeout: timeout to wait for wakeup
760  *
761  * Called from function tipc_sendmsg(), which has done all sanity checks
762  * Returns the number of bytes sent on success, or errno
763  */
764 static int tipc_sendmcast(struct  socket *sock, struct tipc_name_seq *seq,
765 			  struct msghdr *msg, size_t dlen, long timeout)
766 {
767 	struct sock *sk = sock->sk;
768 	struct tipc_sock *tsk = tipc_sk(sk);
769 	struct tipc_msg *hdr = &tsk->phdr;
770 	struct net *net = sock_net(sk);
771 	int mtu = tipc_bcast_get_mtu(net);
772 	struct tipc_mc_method *method = &tsk->mc_method;
773 	struct sk_buff_head pkts;
774 	struct tipc_nlist dsts;
775 	int rc;
776 
777 	if (tsk->group)
778 		return -EACCES;
779 
780 	/* Block or return if any destination link is congested */
781 	rc = tipc_wait_for_cond(sock, &timeout, !tsk->cong_link_cnt);
782 	if (unlikely(rc))
783 		return rc;
784 
785 	/* Lookup destination nodes */
786 	tipc_nlist_init(&dsts, tipc_own_addr(net));
787 	tipc_nametbl_lookup_dst_nodes(net, seq->type, seq->lower,
788 				      seq->upper, &dsts);
789 	if (!dsts.local && !dsts.remote)
790 		return -EHOSTUNREACH;
791 
792 	/* Build message header */
793 	msg_set_type(hdr, TIPC_MCAST_MSG);
794 	msg_set_hdr_sz(hdr, MCAST_H_SIZE);
795 	msg_set_lookup_scope(hdr, TIPC_CLUSTER_SCOPE);
796 	msg_set_destport(hdr, 0);
797 	msg_set_destnode(hdr, 0);
798 	msg_set_nametype(hdr, seq->type);
799 	msg_set_namelower(hdr, seq->lower);
800 	msg_set_nameupper(hdr, seq->upper);
801 
802 	/* Build message as chain of buffers */
803 	skb_queue_head_init(&pkts);
804 	rc = tipc_msg_build(hdr, msg, 0, dlen, mtu, &pkts);
805 
806 	/* Send message if build was successful */
807 	if (unlikely(rc == dlen))
808 		rc = tipc_mcast_xmit(net, &pkts, method, &dsts,
809 				     &tsk->cong_link_cnt);
810 
811 	tipc_nlist_purge(&dsts);
812 
813 	return rc ? rc : dlen;
814 }
815 
816 /**
817  * tipc_send_group_msg - send a message to a member in the group
818  * @net: network namespace
819  * @m: message to send
820  * @mb: group member
821  * @dnode: destination node
822  * @dport: destination port
823  * @dlen: total length of message data
824  */
825 static int tipc_send_group_msg(struct net *net, struct tipc_sock *tsk,
826 			       struct msghdr *m, struct tipc_member *mb,
827 			       u32 dnode, u32 dport, int dlen)
828 {
829 	u16 bc_snd_nxt = tipc_group_bc_snd_nxt(tsk->group);
830 	struct tipc_mc_method *method = &tsk->mc_method;
831 	int blks = tsk_blocks(GROUP_H_SIZE + dlen);
832 	struct tipc_msg *hdr = &tsk->phdr;
833 	struct sk_buff_head pkts;
834 	int mtu, rc;
835 
836 	/* Complete message header */
837 	msg_set_type(hdr, TIPC_GRP_UCAST_MSG);
838 	msg_set_hdr_sz(hdr, GROUP_H_SIZE);
839 	msg_set_destport(hdr, dport);
840 	msg_set_destnode(hdr, dnode);
841 	msg_set_grp_bc_seqno(hdr, bc_snd_nxt);
842 
843 	/* Build message as chain of buffers */
844 	skb_queue_head_init(&pkts);
845 	mtu = tipc_node_get_mtu(net, dnode, tsk->portid);
846 	rc = tipc_msg_build(hdr, m, 0, dlen, mtu, &pkts);
847 	if (unlikely(rc != dlen))
848 		return rc;
849 
850 	/* Send message */
851 	rc = tipc_node_xmit(net, &pkts, dnode, tsk->portid);
852 	if (unlikely(rc == -ELINKCONG)) {
853 		tipc_dest_push(&tsk->cong_links, dnode, 0);
854 		tsk->cong_link_cnt++;
855 	}
856 
857 	/* Update send window */
858 	tipc_group_update_member(mb, blks);
859 
860 	/* A broadcast sent within next EXPIRE period must follow same path */
861 	method->rcast = true;
862 	method->mandatory = true;
863 	return dlen;
864 }
865 
866 /**
867  * tipc_send_group_unicast - send message to a member in the group
868  * @sock: socket structure
869  * @m: message to send
870  * @dlen: total length of message data
871  * @timeout: timeout to wait for wakeup
872  *
873  * Called from function tipc_sendmsg(), which has done all sanity checks
874  * Returns the number of bytes sent on success, or errno
875  */
876 static int tipc_send_group_unicast(struct socket *sock, struct msghdr *m,
877 				   int dlen, long timeout)
878 {
879 	struct sock *sk = sock->sk;
880 	DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
881 	int blks = tsk_blocks(GROUP_H_SIZE + dlen);
882 	struct tipc_sock *tsk = tipc_sk(sk);
883 	struct net *net = sock_net(sk);
884 	struct tipc_member *mb = NULL;
885 	u32 node, port;
886 	int rc;
887 
888 	node = dest->addr.id.node;
889 	port = dest->addr.id.ref;
890 	if (!port && !node)
891 		return -EHOSTUNREACH;
892 
893 	/* Block or return if destination link or member is congested */
894 	rc = tipc_wait_for_cond(sock, &timeout,
895 				!tipc_dest_find(&tsk->cong_links, node, 0) &&
896 				tsk->group &&
897 				!tipc_group_cong(tsk->group, node, port, blks,
898 						 &mb));
899 	if (unlikely(rc))
900 		return rc;
901 
902 	if (unlikely(!mb))
903 		return -EHOSTUNREACH;
904 
905 	rc = tipc_send_group_msg(net, tsk, m, mb, node, port, dlen);
906 
907 	return rc ? rc : dlen;
908 }
909 
910 /**
911  * tipc_send_group_anycast - send message to any member with given identity
912  * @sock: socket structure
913  * @m: message to send
914  * @dlen: total length of message data
915  * @timeout: timeout to wait for wakeup
916  *
917  * Called from function tipc_sendmsg(), which has done all sanity checks
918  * Returns the number of bytes sent on success, or errno
919  */
920 static int tipc_send_group_anycast(struct socket *sock, struct msghdr *m,
921 				   int dlen, long timeout)
922 {
923 	DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
924 	struct sock *sk = sock->sk;
925 	struct tipc_sock *tsk = tipc_sk(sk);
926 	struct list_head *cong_links = &tsk->cong_links;
927 	int blks = tsk_blocks(GROUP_H_SIZE + dlen);
928 	struct tipc_msg *hdr = &tsk->phdr;
929 	struct tipc_member *first = NULL;
930 	struct tipc_member *mbr = NULL;
931 	struct net *net = sock_net(sk);
932 	u32 node, port, exclude;
933 	struct list_head dsts;
934 	u32 type, inst, scope;
935 	int lookups = 0;
936 	int dstcnt, rc;
937 	bool cong;
938 
939 	INIT_LIST_HEAD(&dsts);
940 
941 	type = msg_nametype(hdr);
942 	inst = dest->addr.name.name.instance;
943 	scope = msg_lookup_scope(hdr);
944 
945 	while (++lookups < 4) {
946 		exclude = tipc_group_exclude(tsk->group);
947 
948 		first = NULL;
949 
950 		/* Look for a non-congested destination member, if any */
951 		while (1) {
952 			if (!tipc_nametbl_lookup(net, type, inst, scope, &dsts,
953 						 &dstcnt, exclude, false))
954 				return -EHOSTUNREACH;
955 			tipc_dest_pop(&dsts, &node, &port);
956 			cong = tipc_group_cong(tsk->group, node, port, blks,
957 					       &mbr);
958 			if (!cong)
959 				break;
960 			if (mbr == first)
961 				break;
962 			if (!first)
963 				first = mbr;
964 		}
965 
966 		/* Start over if destination was not in member list */
967 		if (unlikely(!mbr))
968 			continue;
969 
970 		if (likely(!cong && !tipc_dest_find(cong_links, node, 0)))
971 			break;
972 
973 		/* Block or return if destination link or member is congested */
974 		rc = tipc_wait_for_cond(sock, &timeout,
975 					!tipc_dest_find(cong_links, node, 0) &&
976 					tsk->group &&
977 					!tipc_group_cong(tsk->group, node, port,
978 							 blks, &mbr));
979 		if (unlikely(rc))
980 			return rc;
981 
982 		/* Send, unless destination disappeared while waiting */
983 		if (likely(mbr))
984 			break;
985 	}
986 
987 	if (unlikely(lookups >= 4))
988 		return -EHOSTUNREACH;
989 
990 	rc = tipc_send_group_msg(net, tsk, m, mbr, node, port, dlen);
991 
992 	return rc ? rc : dlen;
993 }
994 
995 /**
996  * tipc_send_group_bcast - send message to all members in communication group
997  * @sk: socket structure
998  * @m: message to send
999  * @dlen: total length of message data
1000  * @timeout: timeout to wait for wakeup
1001  *
1002  * Called from function tipc_sendmsg(), which has done all sanity checks
1003  * Returns the number of bytes sent on success, or errno
1004  */
1005 static int tipc_send_group_bcast(struct socket *sock, struct msghdr *m,
1006 				 int dlen, long timeout)
1007 {
1008 	DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
1009 	struct sock *sk = sock->sk;
1010 	struct net *net = sock_net(sk);
1011 	struct tipc_sock *tsk = tipc_sk(sk);
1012 	struct tipc_nlist *dsts;
1013 	struct tipc_mc_method *method = &tsk->mc_method;
1014 	bool ack = method->mandatory && method->rcast;
1015 	int blks = tsk_blocks(MCAST_H_SIZE + dlen);
1016 	struct tipc_msg *hdr = &tsk->phdr;
1017 	int mtu = tipc_bcast_get_mtu(net);
1018 	struct sk_buff_head pkts;
1019 	int rc = -EHOSTUNREACH;
1020 
1021 	/* Block or return if any destination link or member is congested */
1022 	rc = tipc_wait_for_cond(sock, &timeout,
1023 				!tsk->cong_link_cnt && tsk->group &&
1024 				!tipc_group_bc_cong(tsk->group, blks));
1025 	if (unlikely(rc))
1026 		return rc;
1027 
1028 	dsts = tipc_group_dests(tsk->group);
1029 	if (!dsts->local && !dsts->remote)
1030 		return -EHOSTUNREACH;
1031 
1032 	/* Complete message header */
1033 	if (dest) {
1034 		msg_set_type(hdr, TIPC_GRP_MCAST_MSG);
1035 		msg_set_nameinst(hdr, dest->addr.name.name.instance);
1036 	} else {
1037 		msg_set_type(hdr, TIPC_GRP_BCAST_MSG);
1038 		msg_set_nameinst(hdr, 0);
1039 	}
1040 	msg_set_hdr_sz(hdr, GROUP_H_SIZE);
1041 	msg_set_destport(hdr, 0);
1042 	msg_set_destnode(hdr, 0);
1043 	msg_set_grp_bc_seqno(hdr, tipc_group_bc_snd_nxt(tsk->group));
1044 
1045 	/* Avoid getting stuck with repeated forced replicasts */
1046 	msg_set_grp_bc_ack_req(hdr, ack);
1047 
1048 	/* Build message as chain of buffers */
1049 	skb_queue_head_init(&pkts);
1050 	rc = tipc_msg_build(hdr, m, 0, dlen, mtu, &pkts);
1051 	if (unlikely(rc != dlen))
1052 		return rc;
1053 
1054 	/* Send message */
1055 	rc = tipc_mcast_xmit(net, &pkts, method, dsts, &tsk->cong_link_cnt);
1056 	if (unlikely(rc))
1057 		return rc;
1058 
1059 	/* Update broadcast sequence number and send windows */
1060 	tipc_group_update_bc_members(tsk->group, blks, ack);
1061 
1062 	/* Broadcast link is now free to choose method for next broadcast */
1063 	method->mandatory = false;
1064 	method->expires = jiffies;
1065 
1066 	return dlen;
1067 }
1068 
1069 /**
1070  * tipc_send_group_mcast - send message to all members with given identity
1071  * @sock: socket structure
1072  * @m: message to send
1073  * @dlen: total length of message data
1074  * @timeout: timeout to wait for wakeup
1075  *
1076  * Called from function tipc_sendmsg(), which has done all sanity checks
1077  * Returns the number of bytes sent on success, or errno
1078  */
1079 static int tipc_send_group_mcast(struct socket *sock, struct msghdr *m,
1080 				 int dlen, long timeout)
1081 {
1082 	struct sock *sk = sock->sk;
1083 	DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
1084 	struct tipc_sock *tsk = tipc_sk(sk);
1085 	struct tipc_group *grp = tsk->group;
1086 	struct tipc_msg *hdr = &tsk->phdr;
1087 	struct net *net = sock_net(sk);
1088 	u32 type, inst, scope, exclude;
1089 	struct list_head dsts;
1090 	u32 dstcnt;
1091 
1092 	INIT_LIST_HEAD(&dsts);
1093 
1094 	type = msg_nametype(hdr);
1095 	inst = dest->addr.name.name.instance;
1096 	scope = msg_lookup_scope(hdr);
1097 	exclude = tipc_group_exclude(grp);
1098 
1099 	if (!tipc_nametbl_lookup(net, type, inst, scope, &dsts,
1100 				 &dstcnt, exclude, true))
1101 		return -EHOSTUNREACH;
1102 
1103 	if (dstcnt == 1) {
1104 		tipc_dest_pop(&dsts, &dest->addr.id.node, &dest->addr.id.ref);
1105 		return tipc_send_group_unicast(sock, m, dlen, timeout);
1106 	}
1107 
1108 	tipc_dest_list_purge(&dsts);
1109 	return tipc_send_group_bcast(sock, m, dlen, timeout);
1110 }
1111 
1112 /**
1113  * tipc_sk_mcast_rcv - Deliver multicast messages to all destination sockets
1114  * @arrvq: queue with arriving messages, to be cloned after destination lookup
1115  * @inputq: queue with cloned messages, delivered to socket after dest lookup
1116  *
1117  * Multi-threaded: parallel calls with reference to same queues may occur
1118  */
1119 void tipc_sk_mcast_rcv(struct net *net, struct sk_buff_head *arrvq,
1120 		       struct sk_buff_head *inputq)
1121 {
1122 	u32 self = tipc_own_addr(net);
1123 	u32 type, lower, upper, scope;
1124 	struct sk_buff *skb, *_skb;
1125 	u32 portid, onode;
1126 	struct sk_buff_head tmpq;
1127 	struct list_head dports;
1128 	struct tipc_msg *hdr;
1129 	int user, mtyp, hlen;
1130 	bool exact;
1131 
1132 	__skb_queue_head_init(&tmpq);
1133 	INIT_LIST_HEAD(&dports);
1134 
1135 	skb = tipc_skb_peek(arrvq, &inputq->lock);
1136 	for (; skb; skb = tipc_skb_peek(arrvq, &inputq->lock)) {
1137 		hdr = buf_msg(skb);
1138 		user = msg_user(hdr);
1139 		mtyp = msg_type(hdr);
1140 		hlen = skb_headroom(skb) + msg_hdr_sz(hdr);
1141 		onode = msg_orignode(hdr);
1142 		type = msg_nametype(hdr);
1143 
1144 		if (mtyp == TIPC_GRP_UCAST_MSG || user == GROUP_PROTOCOL) {
1145 			spin_lock_bh(&inputq->lock);
1146 			if (skb_peek(arrvq) == skb) {
1147 				__skb_dequeue(arrvq);
1148 				__skb_queue_tail(inputq, skb);
1149 			}
1150 			kfree_skb(skb);
1151 			spin_unlock_bh(&inputq->lock);
1152 			continue;
1153 		}
1154 
1155 		/* Group messages require exact scope match */
1156 		if (msg_in_group(hdr)) {
1157 			lower = 0;
1158 			upper = ~0;
1159 			scope = msg_lookup_scope(hdr);
1160 			exact = true;
1161 		} else {
1162 			/* TIPC_NODE_SCOPE means "any scope" in this context */
1163 			if (onode == self)
1164 				scope = TIPC_NODE_SCOPE;
1165 			else
1166 				scope = TIPC_CLUSTER_SCOPE;
1167 			exact = false;
1168 			lower = msg_namelower(hdr);
1169 			upper = msg_nameupper(hdr);
1170 		}
1171 
1172 		/* Create destination port list: */
1173 		tipc_nametbl_mc_lookup(net, type, lower, upper,
1174 				       scope, exact, &dports);
1175 
1176 		/* Clone message per destination */
1177 		while (tipc_dest_pop(&dports, NULL, &portid)) {
1178 			_skb = __pskb_copy(skb, hlen, GFP_ATOMIC);
1179 			if (_skb) {
1180 				msg_set_destport(buf_msg(_skb), portid);
1181 				__skb_queue_tail(&tmpq, _skb);
1182 				continue;
1183 			}
1184 			pr_warn("Failed to clone mcast rcv buffer\n");
1185 		}
1186 		/* Append to inputq if not already done by other thread */
1187 		spin_lock_bh(&inputq->lock);
1188 		if (skb_peek(arrvq) == skb) {
1189 			skb_queue_splice_tail_init(&tmpq, inputq);
1190 			kfree_skb(__skb_dequeue(arrvq));
1191 		}
1192 		spin_unlock_bh(&inputq->lock);
1193 		__skb_queue_purge(&tmpq);
1194 		kfree_skb(skb);
1195 	}
1196 	tipc_sk_rcv(net, inputq);
1197 }
1198 
1199 /**
1200  * tipc_sk_conn_proto_rcv - receive a connection mng protocol message
1201  * @tsk: receiving socket
1202  * @skb: pointer to message buffer.
1203  */
1204 static void tipc_sk_conn_proto_rcv(struct tipc_sock *tsk, struct sk_buff *skb,
1205 				   struct sk_buff_head *inputq,
1206 				   struct sk_buff_head *xmitq)
1207 {
1208 	struct tipc_msg *hdr = buf_msg(skb);
1209 	u32 onode = tsk_own_node(tsk);
1210 	struct sock *sk = &tsk->sk;
1211 	int mtyp = msg_type(hdr);
1212 	bool conn_cong;
1213 
1214 	/* Ignore if connection cannot be validated: */
1215 	if (!tsk_peer_msg(tsk, hdr))
1216 		goto exit;
1217 
1218 	if (unlikely(msg_errcode(hdr))) {
1219 		tipc_set_sk_state(sk, TIPC_DISCONNECTING);
1220 		tipc_node_remove_conn(sock_net(sk), tsk_peer_node(tsk),
1221 				      tsk_peer_port(tsk));
1222 		sk->sk_state_change(sk);
1223 
1224 		/* State change is ignored if socket already awake,
1225 		 * - convert msg to abort msg and add to inqueue
1226 		 */
1227 		msg_set_user(hdr, TIPC_CRITICAL_IMPORTANCE);
1228 		msg_set_type(hdr, TIPC_CONN_MSG);
1229 		msg_set_size(hdr, BASIC_H_SIZE);
1230 		msg_set_hdr_sz(hdr, BASIC_H_SIZE);
1231 		__skb_queue_tail(inputq, skb);
1232 		return;
1233 	}
1234 
1235 	tsk->probe_unacked = false;
1236 
1237 	if (mtyp == CONN_PROBE) {
1238 		msg_set_type(hdr, CONN_PROBE_REPLY);
1239 		if (tipc_msg_reverse(onode, &skb, TIPC_OK))
1240 			__skb_queue_tail(xmitq, skb);
1241 		return;
1242 	} else if (mtyp == CONN_ACK) {
1243 		conn_cong = tsk_conn_cong(tsk);
1244 		tsk->snt_unacked -= msg_conn_ack(hdr);
1245 		if (tsk->peer_caps & TIPC_BLOCK_FLOWCTL)
1246 			tsk->snd_win = msg_adv_win(hdr);
1247 		if (conn_cong)
1248 			sk->sk_write_space(sk);
1249 	} else if (mtyp != CONN_PROBE_REPLY) {
1250 		pr_warn("Received unknown CONN_PROTO msg\n");
1251 	}
1252 exit:
1253 	kfree_skb(skb);
1254 }
1255 
1256 /**
1257  * tipc_sendmsg - send message in connectionless manner
1258  * @sock: socket structure
1259  * @m: message to send
1260  * @dsz: amount of user data to be sent
1261  *
1262  * Message must have an destination specified explicitly.
1263  * Used for SOCK_RDM and SOCK_DGRAM messages,
1264  * and for 'SYN' messages on SOCK_SEQPACKET and SOCK_STREAM connections.
1265  * (Note: 'SYN+' is prohibited on SOCK_STREAM.)
1266  *
1267  * Returns the number of bytes sent on success, or errno otherwise
1268  */
1269 static int tipc_sendmsg(struct socket *sock,
1270 			struct msghdr *m, size_t dsz)
1271 {
1272 	struct sock *sk = sock->sk;
1273 	int ret;
1274 
1275 	lock_sock(sk);
1276 	ret = __tipc_sendmsg(sock, m, dsz);
1277 	release_sock(sk);
1278 
1279 	return ret;
1280 }
1281 
1282 static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dlen)
1283 {
1284 	struct sock *sk = sock->sk;
1285 	struct net *net = sock_net(sk);
1286 	struct tipc_sock *tsk = tipc_sk(sk);
1287 	DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
1288 	long timeout = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT);
1289 	struct list_head *clinks = &tsk->cong_links;
1290 	bool syn = !tipc_sk_type_connectionless(sk);
1291 	struct tipc_group *grp = tsk->group;
1292 	struct tipc_msg *hdr = &tsk->phdr;
1293 	struct tipc_name_seq *seq;
1294 	struct sk_buff_head pkts;
1295 	u32 dport, dnode = 0;
1296 	u32 type, inst;
1297 	int mtu, rc;
1298 
1299 	if (unlikely(dlen > TIPC_MAX_USER_MSG_SIZE))
1300 		return -EMSGSIZE;
1301 
1302 	if (likely(dest)) {
1303 		if (unlikely(m->msg_namelen < sizeof(*dest)))
1304 			return -EINVAL;
1305 		if (unlikely(dest->family != AF_TIPC))
1306 			return -EINVAL;
1307 	}
1308 
1309 	if (grp) {
1310 		if (!dest)
1311 			return tipc_send_group_bcast(sock, m, dlen, timeout);
1312 		if (dest->addrtype == TIPC_ADDR_NAME)
1313 			return tipc_send_group_anycast(sock, m, dlen, timeout);
1314 		if (dest->addrtype == TIPC_ADDR_ID)
1315 			return tipc_send_group_unicast(sock, m, dlen, timeout);
1316 		if (dest->addrtype == TIPC_ADDR_MCAST)
1317 			return tipc_send_group_mcast(sock, m, dlen, timeout);
1318 		return -EINVAL;
1319 	}
1320 
1321 	if (unlikely(!dest)) {
1322 		dest = &tsk->peer;
1323 		if (!syn || dest->family != AF_TIPC)
1324 			return -EDESTADDRREQ;
1325 	}
1326 
1327 	if (unlikely(syn)) {
1328 		if (sk->sk_state == TIPC_LISTEN)
1329 			return -EPIPE;
1330 		if (sk->sk_state != TIPC_OPEN)
1331 			return -EISCONN;
1332 		if (tsk->published)
1333 			return -EOPNOTSUPP;
1334 		if (dest->addrtype == TIPC_ADDR_NAME) {
1335 			tsk->conn_type = dest->addr.name.name.type;
1336 			tsk->conn_instance = dest->addr.name.name.instance;
1337 		}
1338 		msg_set_syn(hdr, 1);
1339 	}
1340 
1341 	seq = &dest->addr.nameseq;
1342 	if (dest->addrtype == TIPC_ADDR_MCAST)
1343 		return tipc_sendmcast(sock, seq, m, dlen, timeout);
1344 
1345 	if (dest->addrtype == TIPC_ADDR_NAME) {
1346 		type = dest->addr.name.name.type;
1347 		inst = dest->addr.name.name.instance;
1348 		dnode = dest->addr.name.domain;
1349 		msg_set_type(hdr, TIPC_NAMED_MSG);
1350 		msg_set_hdr_sz(hdr, NAMED_H_SIZE);
1351 		msg_set_nametype(hdr, type);
1352 		msg_set_nameinst(hdr, inst);
1353 		msg_set_lookup_scope(hdr, tipc_node2scope(dnode));
1354 		dport = tipc_nametbl_translate(net, type, inst, &dnode);
1355 		msg_set_destnode(hdr, dnode);
1356 		msg_set_destport(hdr, dport);
1357 		if (unlikely(!dport && !dnode))
1358 			return -EHOSTUNREACH;
1359 	} else if (dest->addrtype == TIPC_ADDR_ID) {
1360 		dnode = dest->addr.id.node;
1361 		msg_set_type(hdr, TIPC_DIRECT_MSG);
1362 		msg_set_lookup_scope(hdr, 0);
1363 		msg_set_destnode(hdr, dnode);
1364 		msg_set_destport(hdr, dest->addr.id.ref);
1365 		msg_set_hdr_sz(hdr, BASIC_H_SIZE);
1366 	} else {
1367 		return -EINVAL;
1368 	}
1369 
1370 	/* Block or return if destination link is congested */
1371 	rc = tipc_wait_for_cond(sock, &timeout,
1372 				!tipc_dest_find(clinks, dnode, 0));
1373 	if (unlikely(rc))
1374 		return rc;
1375 
1376 	skb_queue_head_init(&pkts);
1377 	mtu = tipc_node_get_mtu(net, dnode, tsk->portid);
1378 	rc = tipc_msg_build(hdr, m, 0, dlen, mtu, &pkts);
1379 	if (unlikely(rc != dlen))
1380 		return rc;
1381 	if (unlikely(syn && !tipc_msg_skb_clone(&pkts, &sk->sk_write_queue)))
1382 		return -ENOMEM;
1383 
1384 	rc = tipc_node_xmit(net, &pkts, dnode, tsk->portid);
1385 	if (unlikely(rc == -ELINKCONG)) {
1386 		tipc_dest_push(clinks, dnode, 0);
1387 		tsk->cong_link_cnt++;
1388 		rc = 0;
1389 	}
1390 
1391 	if (unlikely(syn && !rc))
1392 		tipc_set_sk_state(sk, TIPC_CONNECTING);
1393 
1394 	return rc ? rc : dlen;
1395 }
1396 
1397 /**
1398  * tipc_sendstream - send stream-oriented data
1399  * @sock: socket structure
1400  * @m: data to send
1401  * @dsz: total length of data to be transmitted
1402  *
1403  * Used for SOCK_STREAM data.
1404  *
1405  * Returns the number of bytes sent on success (or partial success),
1406  * or errno if no data sent
1407  */
1408 static int tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dsz)
1409 {
1410 	struct sock *sk = sock->sk;
1411 	int ret;
1412 
1413 	lock_sock(sk);
1414 	ret = __tipc_sendstream(sock, m, dsz);
1415 	release_sock(sk);
1416 
1417 	return ret;
1418 }
1419 
1420 static int __tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dlen)
1421 {
1422 	struct sock *sk = sock->sk;
1423 	DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
1424 	long timeout = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT);
1425 	struct tipc_sock *tsk = tipc_sk(sk);
1426 	struct tipc_msg *hdr = &tsk->phdr;
1427 	struct net *net = sock_net(sk);
1428 	struct sk_buff_head pkts;
1429 	u32 dnode = tsk_peer_node(tsk);
1430 	int send, sent = 0;
1431 	int rc = 0;
1432 
1433 	skb_queue_head_init(&pkts);
1434 
1435 	if (unlikely(dlen > INT_MAX))
1436 		return -EMSGSIZE;
1437 
1438 	/* Handle implicit connection setup */
1439 	if (unlikely(dest)) {
1440 		rc = __tipc_sendmsg(sock, m, dlen);
1441 		if (dlen && dlen == rc) {
1442 			tsk->peer_caps = tipc_node_get_capabilities(net, dnode);
1443 			tsk->snt_unacked = tsk_inc(tsk, dlen + msg_hdr_sz(hdr));
1444 		}
1445 		return rc;
1446 	}
1447 
1448 	do {
1449 		rc = tipc_wait_for_cond(sock, &timeout,
1450 					(!tsk->cong_link_cnt &&
1451 					 !tsk_conn_cong(tsk) &&
1452 					 tipc_sk_connected(sk)));
1453 		if (unlikely(rc))
1454 			break;
1455 
1456 		send = min_t(size_t, dlen - sent, TIPC_MAX_USER_MSG_SIZE);
1457 		rc = tipc_msg_build(hdr, m, sent, send, tsk->max_pkt, &pkts);
1458 		if (unlikely(rc != send))
1459 			break;
1460 
1461 		rc = tipc_node_xmit(net, &pkts, dnode, tsk->portid);
1462 		if (unlikely(rc == -ELINKCONG)) {
1463 			tsk->cong_link_cnt = 1;
1464 			rc = 0;
1465 		}
1466 		if (likely(!rc)) {
1467 			tsk->snt_unacked += tsk_inc(tsk, send + MIN_H_SIZE);
1468 			sent += send;
1469 		}
1470 	} while (sent < dlen && !rc);
1471 
1472 	return sent ? sent : rc;
1473 }
1474 
1475 /**
1476  * tipc_send_packet - send a connection-oriented message
1477  * @sock: socket structure
1478  * @m: message to send
1479  * @dsz: length of data to be transmitted
1480  *
1481  * Used for SOCK_SEQPACKET messages.
1482  *
1483  * Returns the number of bytes sent on success, or errno otherwise
1484  */
1485 static int tipc_send_packet(struct socket *sock, struct msghdr *m, size_t dsz)
1486 {
1487 	if (dsz > TIPC_MAX_USER_MSG_SIZE)
1488 		return -EMSGSIZE;
1489 
1490 	return tipc_sendstream(sock, m, dsz);
1491 }
1492 
1493 /* tipc_sk_finish_conn - complete the setup of a connection
1494  */
1495 static void tipc_sk_finish_conn(struct tipc_sock *tsk, u32 peer_port,
1496 				u32 peer_node)
1497 {
1498 	struct sock *sk = &tsk->sk;
1499 	struct net *net = sock_net(sk);
1500 	struct tipc_msg *msg = &tsk->phdr;
1501 
1502 	msg_set_syn(msg, 0);
1503 	msg_set_destnode(msg, peer_node);
1504 	msg_set_destport(msg, peer_port);
1505 	msg_set_type(msg, TIPC_CONN_MSG);
1506 	msg_set_lookup_scope(msg, 0);
1507 	msg_set_hdr_sz(msg, SHORT_H_SIZE);
1508 
1509 	sk_reset_timer(sk, &sk->sk_timer, jiffies + CONN_PROBING_INTV);
1510 	tipc_set_sk_state(sk, TIPC_ESTABLISHED);
1511 	tipc_node_add_conn(net, peer_node, tsk->portid, peer_port);
1512 	tsk->max_pkt = tipc_node_get_mtu(net, peer_node, tsk->portid);
1513 	tsk->peer_caps = tipc_node_get_capabilities(net, peer_node);
1514 	__skb_queue_purge(&sk->sk_write_queue);
1515 	if (tsk->peer_caps & TIPC_BLOCK_FLOWCTL)
1516 		return;
1517 
1518 	/* Fall back to message based flow control */
1519 	tsk->rcv_win = FLOWCTL_MSG_WIN;
1520 	tsk->snd_win = FLOWCTL_MSG_WIN;
1521 }
1522 
1523 /**
1524  * tipc_sk_set_orig_addr - capture sender's address for received message
1525  * @m: descriptor for message info
1526  * @hdr: received message header
1527  *
1528  * Note: Address is not captured if not requested by receiver.
1529  */
1530 static void tipc_sk_set_orig_addr(struct msghdr *m, struct sk_buff *skb)
1531 {
1532 	DECLARE_SOCKADDR(struct sockaddr_pair *, srcaddr, m->msg_name);
1533 	struct tipc_msg *hdr = buf_msg(skb);
1534 
1535 	if (!srcaddr)
1536 		return;
1537 
1538 	srcaddr->sock.family = AF_TIPC;
1539 	srcaddr->sock.addrtype = TIPC_ADDR_ID;
1540 	srcaddr->sock.scope = 0;
1541 	srcaddr->sock.addr.id.ref = msg_origport(hdr);
1542 	srcaddr->sock.addr.id.node = msg_orignode(hdr);
1543 	srcaddr->sock.addr.name.domain = 0;
1544 	m->msg_namelen = sizeof(struct sockaddr_tipc);
1545 
1546 	if (!msg_in_group(hdr))
1547 		return;
1548 
1549 	/* Group message users may also want to know sending member's id */
1550 	srcaddr->member.family = AF_TIPC;
1551 	srcaddr->member.addrtype = TIPC_ADDR_NAME;
1552 	srcaddr->member.scope = 0;
1553 	srcaddr->member.addr.name.name.type = msg_nametype(hdr);
1554 	srcaddr->member.addr.name.name.instance = TIPC_SKB_CB(skb)->orig_member;
1555 	srcaddr->member.addr.name.domain = 0;
1556 	m->msg_namelen = sizeof(*srcaddr);
1557 }
1558 
1559 /**
1560  * tipc_sk_anc_data_recv - optionally capture ancillary data for received message
1561  * @m: descriptor for message info
1562  * @skb: received message buffer
1563  * @tsk: TIPC port associated with message
1564  *
1565  * Note: Ancillary data is not captured if not requested by receiver.
1566  *
1567  * Returns 0 if successful, otherwise errno
1568  */
1569 static int tipc_sk_anc_data_recv(struct msghdr *m, struct sk_buff *skb,
1570 				 struct tipc_sock *tsk)
1571 {
1572 	struct tipc_msg *msg;
1573 	u32 anc_data[3];
1574 	u32 err;
1575 	u32 dest_type;
1576 	int has_name;
1577 	int res;
1578 
1579 	if (likely(m->msg_controllen == 0))
1580 		return 0;
1581 	msg = buf_msg(skb);
1582 
1583 	/* Optionally capture errored message object(s) */
1584 	err = msg ? msg_errcode(msg) : 0;
1585 	if (unlikely(err)) {
1586 		anc_data[0] = err;
1587 		anc_data[1] = msg_data_sz(msg);
1588 		res = put_cmsg(m, SOL_TIPC, TIPC_ERRINFO, 8, anc_data);
1589 		if (res)
1590 			return res;
1591 		if (anc_data[1]) {
1592 			if (skb_linearize(skb))
1593 				return -ENOMEM;
1594 			msg = buf_msg(skb);
1595 			res = put_cmsg(m, SOL_TIPC, TIPC_RETDATA, anc_data[1],
1596 				       msg_data(msg));
1597 			if (res)
1598 				return res;
1599 		}
1600 	}
1601 
1602 	/* Optionally capture message destination object */
1603 	dest_type = msg ? msg_type(msg) : TIPC_DIRECT_MSG;
1604 	switch (dest_type) {
1605 	case TIPC_NAMED_MSG:
1606 		has_name = 1;
1607 		anc_data[0] = msg_nametype(msg);
1608 		anc_data[1] = msg_namelower(msg);
1609 		anc_data[2] = msg_namelower(msg);
1610 		break;
1611 	case TIPC_MCAST_MSG:
1612 		has_name = 1;
1613 		anc_data[0] = msg_nametype(msg);
1614 		anc_data[1] = msg_namelower(msg);
1615 		anc_data[2] = msg_nameupper(msg);
1616 		break;
1617 	case TIPC_CONN_MSG:
1618 		has_name = (tsk->conn_type != 0);
1619 		anc_data[0] = tsk->conn_type;
1620 		anc_data[1] = tsk->conn_instance;
1621 		anc_data[2] = tsk->conn_instance;
1622 		break;
1623 	default:
1624 		has_name = 0;
1625 	}
1626 	if (has_name) {
1627 		res = put_cmsg(m, SOL_TIPC, TIPC_DESTNAME, 12, anc_data);
1628 		if (res)
1629 			return res;
1630 	}
1631 
1632 	return 0;
1633 }
1634 
1635 static void tipc_sk_send_ack(struct tipc_sock *tsk)
1636 {
1637 	struct sock *sk = &tsk->sk;
1638 	struct net *net = sock_net(sk);
1639 	struct sk_buff *skb = NULL;
1640 	struct tipc_msg *msg;
1641 	u32 peer_port = tsk_peer_port(tsk);
1642 	u32 dnode = tsk_peer_node(tsk);
1643 
1644 	if (!tipc_sk_connected(sk))
1645 		return;
1646 	skb = tipc_msg_create(CONN_MANAGER, CONN_ACK, INT_H_SIZE, 0,
1647 			      dnode, tsk_own_node(tsk), peer_port,
1648 			      tsk->portid, TIPC_OK);
1649 	if (!skb)
1650 		return;
1651 	msg = buf_msg(skb);
1652 	msg_set_conn_ack(msg, tsk->rcv_unacked);
1653 	tsk->rcv_unacked = 0;
1654 
1655 	/* Adjust to and advertize the correct window limit */
1656 	if (tsk->peer_caps & TIPC_BLOCK_FLOWCTL) {
1657 		tsk->rcv_win = tsk_adv_blocks(tsk->sk.sk_rcvbuf);
1658 		msg_set_adv_win(msg, tsk->rcv_win);
1659 	}
1660 	tipc_node_xmit_skb(net, skb, dnode, msg_link_selector(msg));
1661 }
1662 
1663 static int tipc_wait_for_rcvmsg(struct socket *sock, long *timeop)
1664 {
1665 	struct sock *sk = sock->sk;
1666 	DEFINE_WAIT(wait);
1667 	long timeo = *timeop;
1668 	int err = sock_error(sk);
1669 
1670 	if (err)
1671 		return err;
1672 
1673 	for (;;) {
1674 		prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1675 		if (timeo && skb_queue_empty(&sk->sk_receive_queue)) {
1676 			if (sk->sk_shutdown & RCV_SHUTDOWN) {
1677 				err = -ENOTCONN;
1678 				break;
1679 			}
1680 			release_sock(sk);
1681 			timeo = schedule_timeout(timeo);
1682 			lock_sock(sk);
1683 		}
1684 		err = 0;
1685 		if (!skb_queue_empty(&sk->sk_receive_queue))
1686 			break;
1687 		err = -EAGAIN;
1688 		if (!timeo)
1689 			break;
1690 		err = sock_intr_errno(timeo);
1691 		if (signal_pending(current))
1692 			break;
1693 
1694 		err = sock_error(sk);
1695 		if (err)
1696 			break;
1697 	}
1698 	finish_wait(sk_sleep(sk), &wait);
1699 	*timeop = timeo;
1700 	return err;
1701 }
1702 
1703 /**
1704  * tipc_recvmsg - receive packet-oriented message
1705  * @m: descriptor for message info
1706  * @buflen: length of user buffer area
1707  * @flags: receive flags
1708  *
1709  * Used for SOCK_DGRAM, SOCK_RDM, and SOCK_SEQPACKET messages.
1710  * If the complete message doesn't fit in user area, truncate it.
1711  *
1712  * Returns size of returned message data, errno otherwise
1713  */
1714 static int tipc_recvmsg(struct socket *sock, struct msghdr *m,
1715 			size_t buflen,	int flags)
1716 {
1717 	struct sock *sk = sock->sk;
1718 	bool connected = !tipc_sk_type_connectionless(sk);
1719 	struct tipc_sock *tsk = tipc_sk(sk);
1720 	int rc, err, hlen, dlen, copy;
1721 	struct sk_buff_head xmitq;
1722 	struct tipc_msg *hdr;
1723 	struct sk_buff *skb;
1724 	bool grp_evt;
1725 	long timeout;
1726 
1727 	/* Catch invalid receive requests */
1728 	if (unlikely(!buflen))
1729 		return -EINVAL;
1730 
1731 	lock_sock(sk);
1732 	if (unlikely(connected && sk->sk_state == TIPC_OPEN)) {
1733 		rc = -ENOTCONN;
1734 		goto exit;
1735 	}
1736 	timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
1737 
1738 	/* Step rcv queue to first msg with data or error; wait if necessary */
1739 	do {
1740 		rc = tipc_wait_for_rcvmsg(sock, &timeout);
1741 		if (unlikely(rc))
1742 			goto exit;
1743 		skb = skb_peek(&sk->sk_receive_queue);
1744 		hdr = buf_msg(skb);
1745 		dlen = msg_data_sz(hdr);
1746 		hlen = msg_hdr_sz(hdr);
1747 		err = msg_errcode(hdr);
1748 		grp_evt = msg_is_grp_evt(hdr);
1749 		if (likely(dlen || err))
1750 			break;
1751 		tsk_advance_rx_queue(sk);
1752 	} while (1);
1753 
1754 	/* Collect msg meta data, including error code and rejected data */
1755 	tipc_sk_set_orig_addr(m, skb);
1756 	rc = tipc_sk_anc_data_recv(m, skb, tsk);
1757 	if (unlikely(rc))
1758 		goto exit;
1759 	hdr = buf_msg(skb);
1760 
1761 	/* Capture data if non-error msg, otherwise just set return value */
1762 	if (likely(!err)) {
1763 		copy = min_t(int, dlen, buflen);
1764 		if (unlikely(copy != dlen))
1765 			m->msg_flags |= MSG_TRUNC;
1766 		rc = skb_copy_datagram_msg(skb, hlen, m, copy);
1767 	} else {
1768 		copy = 0;
1769 		rc = 0;
1770 		if (err != TIPC_CONN_SHUTDOWN && connected && !m->msg_control)
1771 			rc = -ECONNRESET;
1772 	}
1773 	if (unlikely(rc))
1774 		goto exit;
1775 
1776 	/* Mark message as group event if applicable */
1777 	if (unlikely(grp_evt)) {
1778 		if (msg_grp_evt(hdr) == TIPC_WITHDRAWN)
1779 			m->msg_flags |= MSG_EOR;
1780 		m->msg_flags |= MSG_OOB;
1781 		copy = 0;
1782 	}
1783 
1784 	/* Caption of data or error code/rejected data was successful */
1785 	if (unlikely(flags & MSG_PEEK))
1786 		goto exit;
1787 
1788 	/* Send group flow control advertisement when applicable */
1789 	if (tsk->group && msg_in_group(hdr) && !grp_evt) {
1790 		skb_queue_head_init(&xmitq);
1791 		tipc_group_update_rcv_win(tsk->group, tsk_blocks(hlen + dlen),
1792 					  msg_orignode(hdr), msg_origport(hdr),
1793 					  &xmitq);
1794 		tipc_node_distr_xmit(sock_net(sk), &xmitq);
1795 	}
1796 
1797 	tsk_advance_rx_queue(sk);
1798 
1799 	if (likely(!connected))
1800 		goto exit;
1801 
1802 	/* Send connection flow control advertisement when applicable */
1803 	tsk->rcv_unacked += tsk_inc(tsk, hlen + dlen);
1804 	if (tsk->rcv_unacked >= tsk->rcv_win / TIPC_ACK_RATE)
1805 		tipc_sk_send_ack(tsk);
1806 exit:
1807 	release_sock(sk);
1808 	return rc ? rc : copy;
1809 }
1810 
1811 /**
1812  * tipc_recvstream - receive stream-oriented data
1813  * @m: descriptor for message info
1814  * @buflen: total size of user buffer area
1815  * @flags: receive flags
1816  *
1817  * Used for SOCK_STREAM messages only.  If not enough data is available
1818  * will optionally wait for more; never truncates data.
1819  *
1820  * Returns size of returned message data, errno otherwise
1821  */
1822 static int tipc_recvstream(struct socket *sock, struct msghdr *m,
1823 			   size_t buflen, int flags)
1824 {
1825 	struct sock *sk = sock->sk;
1826 	struct tipc_sock *tsk = tipc_sk(sk);
1827 	struct sk_buff *skb;
1828 	struct tipc_msg *hdr;
1829 	struct tipc_skb_cb *skb_cb;
1830 	bool peek = flags & MSG_PEEK;
1831 	int offset, required, copy, copied = 0;
1832 	int hlen, dlen, err, rc;
1833 	long timeout;
1834 
1835 	/* Catch invalid receive attempts */
1836 	if (unlikely(!buflen))
1837 		return -EINVAL;
1838 
1839 	lock_sock(sk);
1840 
1841 	if (unlikely(sk->sk_state == TIPC_OPEN)) {
1842 		rc = -ENOTCONN;
1843 		goto exit;
1844 	}
1845 	required = sock_rcvlowat(sk, flags & MSG_WAITALL, buflen);
1846 	timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
1847 
1848 	do {
1849 		/* Look at first msg in receive queue; wait if necessary */
1850 		rc = tipc_wait_for_rcvmsg(sock, &timeout);
1851 		if (unlikely(rc))
1852 			break;
1853 		skb = skb_peek(&sk->sk_receive_queue);
1854 		skb_cb = TIPC_SKB_CB(skb);
1855 		hdr = buf_msg(skb);
1856 		dlen = msg_data_sz(hdr);
1857 		hlen = msg_hdr_sz(hdr);
1858 		err = msg_errcode(hdr);
1859 
1860 		/* Discard any empty non-errored (SYN-) message */
1861 		if (unlikely(!dlen && !err)) {
1862 			tsk_advance_rx_queue(sk);
1863 			continue;
1864 		}
1865 
1866 		/* Collect msg meta data, incl. error code and rejected data */
1867 		if (!copied) {
1868 			tipc_sk_set_orig_addr(m, skb);
1869 			rc = tipc_sk_anc_data_recv(m, skb, tsk);
1870 			if (rc)
1871 				break;
1872 			hdr = buf_msg(skb);
1873 		}
1874 
1875 		/* Copy data if msg ok, otherwise return error/partial data */
1876 		if (likely(!err)) {
1877 			offset = skb_cb->bytes_read;
1878 			copy = min_t(int, dlen - offset, buflen - copied);
1879 			rc = skb_copy_datagram_msg(skb, hlen + offset, m, copy);
1880 			if (unlikely(rc))
1881 				break;
1882 			copied += copy;
1883 			offset += copy;
1884 			if (unlikely(offset < dlen)) {
1885 				if (!peek)
1886 					skb_cb->bytes_read = offset;
1887 				break;
1888 			}
1889 		} else {
1890 			rc = 0;
1891 			if ((err != TIPC_CONN_SHUTDOWN) && !m->msg_control)
1892 				rc = -ECONNRESET;
1893 			if (copied || rc)
1894 				break;
1895 		}
1896 
1897 		if (unlikely(peek))
1898 			break;
1899 
1900 		tsk_advance_rx_queue(sk);
1901 
1902 		/* Send connection flow control advertisement when applicable */
1903 		tsk->rcv_unacked += tsk_inc(tsk, hlen + dlen);
1904 		if (unlikely(tsk->rcv_unacked >= tsk->rcv_win / TIPC_ACK_RATE))
1905 			tipc_sk_send_ack(tsk);
1906 
1907 		/* Exit if all requested data or FIN/error received */
1908 		if (copied == buflen || err)
1909 			break;
1910 
1911 	} while (!skb_queue_empty(&sk->sk_receive_queue) || copied < required);
1912 exit:
1913 	release_sock(sk);
1914 	return copied ? copied : rc;
1915 }
1916 
1917 /**
1918  * tipc_write_space - wake up thread if port congestion is released
1919  * @sk: socket
1920  */
1921 static void tipc_write_space(struct sock *sk)
1922 {
1923 	struct socket_wq *wq;
1924 
1925 	rcu_read_lock();
1926 	wq = rcu_dereference(sk->sk_wq);
1927 	if (skwq_has_sleeper(wq))
1928 		wake_up_interruptible_sync_poll(&wq->wait, EPOLLOUT |
1929 						EPOLLWRNORM | EPOLLWRBAND);
1930 	rcu_read_unlock();
1931 }
1932 
1933 /**
1934  * tipc_data_ready - wake up threads to indicate messages have been received
1935  * @sk: socket
1936  * @len: the length of messages
1937  */
1938 static void tipc_data_ready(struct sock *sk)
1939 {
1940 	struct socket_wq *wq;
1941 
1942 	rcu_read_lock();
1943 	wq = rcu_dereference(sk->sk_wq);
1944 	if (skwq_has_sleeper(wq))
1945 		wake_up_interruptible_sync_poll(&wq->wait, EPOLLIN |
1946 						EPOLLRDNORM | EPOLLRDBAND);
1947 	rcu_read_unlock();
1948 }
1949 
1950 static void tipc_sock_destruct(struct sock *sk)
1951 {
1952 	__skb_queue_purge(&sk->sk_receive_queue);
1953 }
1954 
1955 static void tipc_sk_proto_rcv(struct sock *sk,
1956 			      struct sk_buff_head *inputq,
1957 			      struct sk_buff_head *xmitq)
1958 {
1959 	struct sk_buff *skb = __skb_dequeue(inputq);
1960 	struct tipc_sock *tsk = tipc_sk(sk);
1961 	struct tipc_msg *hdr = buf_msg(skb);
1962 	struct tipc_group *grp = tsk->group;
1963 	bool wakeup = false;
1964 
1965 	switch (msg_user(hdr)) {
1966 	case CONN_MANAGER:
1967 		tipc_sk_conn_proto_rcv(tsk, skb, inputq, xmitq);
1968 		return;
1969 	case SOCK_WAKEUP:
1970 		tipc_dest_del(&tsk->cong_links, msg_orignode(hdr), 0);
1971 		tsk->cong_link_cnt--;
1972 		wakeup = true;
1973 		break;
1974 	case GROUP_PROTOCOL:
1975 		tipc_group_proto_rcv(grp, &wakeup, hdr, inputq, xmitq);
1976 		break;
1977 	case TOP_SRV:
1978 		tipc_group_member_evt(tsk->group, &wakeup, &sk->sk_rcvbuf,
1979 				      hdr, inputq, xmitq);
1980 		break;
1981 	default:
1982 		break;
1983 	}
1984 
1985 	if (wakeup)
1986 		sk->sk_write_space(sk);
1987 
1988 	kfree_skb(skb);
1989 }
1990 
1991 /**
1992  * tipc_sk_filter_connect - check incoming message for a connection-based socket
1993  * @tsk: TIPC socket
1994  * @skb: pointer to message buffer.
1995  * Returns true if message should be added to receive queue, false otherwise
1996  */
1997 static bool tipc_sk_filter_connect(struct tipc_sock *tsk, struct sk_buff *skb)
1998 {
1999 	struct sock *sk = &tsk->sk;
2000 	struct net *net = sock_net(sk);
2001 	struct tipc_msg *hdr = buf_msg(skb);
2002 	bool con_msg = msg_connected(hdr);
2003 	u32 pport = tsk_peer_port(tsk);
2004 	u32 pnode = tsk_peer_node(tsk);
2005 	u32 oport = msg_origport(hdr);
2006 	u32 onode = msg_orignode(hdr);
2007 	int err = msg_errcode(hdr);
2008 	unsigned long delay;
2009 
2010 	if (unlikely(msg_mcast(hdr)))
2011 		return false;
2012 
2013 	switch (sk->sk_state) {
2014 	case TIPC_CONNECTING:
2015 		/* Setup ACK */
2016 		if (likely(con_msg)) {
2017 			if (err)
2018 				break;
2019 			tipc_sk_finish_conn(tsk, oport, onode);
2020 			msg_set_importance(&tsk->phdr, msg_importance(hdr));
2021 			/* ACK+ message with data is added to receive queue */
2022 			if (msg_data_sz(hdr))
2023 				return true;
2024 			/* Empty ACK-, - wake up sleeping connect() and drop */
2025 			sk->sk_data_ready(sk);
2026 			msg_set_dest_droppable(hdr, 1);
2027 			return false;
2028 		}
2029 		/* Ignore connectionless message if not from listening socket */
2030 		if (oport != pport || onode != pnode)
2031 			return false;
2032 
2033 		/* Rejected SYN */
2034 		if (err != TIPC_ERR_OVERLOAD)
2035 			break;
2036 
2037 		/* Prepare for new setup attempt if we have a SYN clone */
2038 		if (skb_queue_empty(&sk->sk_write_queue))
2039 			break;
2040 		get_random_bytes(&delay, 2);
2041 		delay %= (tsk->conn_timeout / 4);
2042 		delay = msecs_to_jiffies(delay + 100);
2043 		sk_reset_timer(sk, &sk->sk_timer, jiffies + delay);
2044 		return false;
2045 	case TIPC_OPEN:
2046 	case TIPC_DISCONNECTING:
2047 		return false;
2048 	case TIPC_LISTEN:
2049 		/* Accept only SYN message */
2050 		if (!msg_is_syn(hdr) &&
2051 		    tipc_node_get_capabilities(net, onode) & TIPC_SYN_BIT)
2052 			return false;
2053 		if (!con_msg && !err)
2054 			return true;
2055 		return false;
2056 	case TIPC_ESTABLISHED:
2057 		/* Accept only connection-based messages sent by peer */
2058 		if (likely(con_msg && !err && pport == oport && pnode == onode))
2059 			return true;
2060 		if (!tsk_peer_msg(tsk, hdr))
2061 			return false;
2062 		if (!err)
2063 			return true;
2064 		tipc_set_sk_state(sk, TIPC_DISCONNECTING);
2065 		tipc_node_remove_conn(net, pnode, tsk->portid);
2066 		sk->sk_state_change(sk);
2067 		return true;
2068 	default:
2069 		pr_err("Unknown sk_state %u\n", sk->sk_state);
2070 	}
2071 	/* Abort connection setup attempt */
2072 	tipc_set_sk_state(sk, TIPC_DISCONNECTING);
2073 	sk->sk_err = ECONNREFUSED;
2074 	sk->sk_state_change(sk);
2075 	return true;
2076 }
2077 
2078 /**
2079  * rcvbuf_limit - get proper overload limit of socket receive queue
2080  * @sk: socket
2081  * @skb: message
2082  *
2083  * For connection oriented messages, irrespective of importance,
2084  * default queue limit is 2 MB.
2085  *
2086  * For connectionless messages, queue limits are based on message
2087  * importance as follows:
2088  *
2089  * TIPC_LOW_IMPORTANCE       (2 MB)
2090  * TIPC_MEDIUM_IMPORTANCE    (4 MB)
2091  * TIPC_HIGH_IMPORTANCE      (8 MB)
2092  * TIPC_CRITICAL_IMPORTANCE  (16 MB)
2093  *
2094  * Returns overload limit according to corresponding message importance
2095  */
2096 static unsigned int rcvbuf_limit(struct sock *sk, struct sk_buff *skb)
2097 {
2098 	struct tipc_sock *tsk = tipc_sk(sk);
2099 	struct tipc_msg *hdr = buf_msg(skb);
2100 
2101 	if (unlikely(msg_in_group(hdr)))
2102 		return sk->sk_rcvbuf;
2103 
2104 	if (unlikely(!msg_connected(hdr)))
2105 		return sk->sk_rcvbuf << msg_importance(hdr);
2106 
2107 	if (likely(tsk->peer_caps & TIPC_BLOCK_FLOWCTL))
2108 		return sk->sk_rcvbuf;
2109 
2110 	return FLOWCTL_MSG_LIM;
2111 }
2112 
2113 /**
2114  * tipc_sk_filter_rcv - validate incoming message
2115  * @sk: socket
2116  * @skb: pointer to message.
2117  *
2118  * Enqueues message on receive queue if acceptable; optionally handles
2119  * disconnect indication for a connected socket.
2120  *
2121  * Called with socket lock already taken
2122  *
2123  */
2124 static void tipc_sk_filter_rcv(struct sock *sk, struct sk_buff *skb,
2125 			       struct sk_buff_head *xmitq)
2126 {
2127 	bool sk_conn = !tipc_sk_type_connectionless(sk);
2128 	struct tipc_sock *tsk = tipc_sk(sk);
2129 	struct tipc_group *grp = tsk->group;
2130 	struct tipc_msg *hdr = buf_msg(skb);
2131 	struct net *net = sock_net(sk);
2132 	struct sk_buff_head inputq;
2133 	int limit, err = TIPC_OK;
2134 
2135 	TIPC_SKB_CB(skb)->bytes_read = 0;
2136 	__skb_queue_head_init(&inputq);
2137 	__skb_queue_tail(&inputq, skb);
2138 
2139 	if (unlikely(!msg_isdata(hdr)))
2140 		tipc_sk_proto_rcv(sk, &inputq, xmitq);
2141 
2142 	if (unlikely(grp))
2143 		tipc_group_filter_msg(grp, &inputq, xmitq);
2144 
2145 	/* Validate and add to receive buffer if there is space */
2146 	while ((skb = __skb_dequeue(&inputq))) {
2147 		hdr = buf_msg(skb);
2148 		limit = rcvbuf_limit(sk, skb);
2149 		if ((sk_conn && !tipc_sk_filter_connect(tsk, skb)) ||
2150 		    (!sk_conn && msg_connected(hdr)) ||
2151 		    (!grp && msg_in_group(hdr)))
2152 			err = TIPC_ERR_NO_PORT;
2153 		else if (sk_rmem_alloc_get(sk) + skb->truesize >= limit) {
2154 			atomic_inc(&sk->sk_drops);
2155 			err = TIPC_ERR_OVERLOAD;
2156 		}
2157 
2158 		if (unlikely(err)) {
2159 			tipc_skb_reject(net, err, skb, xmitq);
2160 			err = TIPC_OK;
2161 			continue;
2162 		}
2163 		__skb_queue_tail(&sk->sk_receive_queue, skb);
2164 		skb_set_owner_r(skb, sk);
2165 		sk->sk_data_ready(sk);
2166 	}
2167 }
2168 
2169 /**
2170  * tipc_sk_backlog_rcv - handle incoming message from backlog queue
2171  * @sk: socket
2172  * @skb: message
2173  *
2174  * Caller must hold socket lock
2175  */
2176 static int tipc_sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
2177 {
2178 	unsigned int before = sk_rmem_alloc_get(sk);
2179 	struct sk_buff_head xmitq;
2180 	unsigned int added;
2181 
2182 	__skb_queue_head_init(&xmitq);
2183 
2184 	tipc_sk_filter_rcv(sk, skb, &xmitq);
2185 	added = sk_rmem_alloc_get(sk) - before;
2186 	atomic_add(added, &tipc_sk(sk)->dupl_rcvcnt);
2187 
2188 	/* Send pending response/rejected messages, if any */
2189 	tipc_node_distr_xmit(sock_net(sk), &xmitq);
2190 	return 0;
2191 }
2192 
2193 /**
2194  * tipc_sk_enqueue - extract all buffers with destination 'dport' from
2195  *                   inputq and try adding them to socket or backlog queue
2196  * @inputq: list of incoming buffers with potentially different destinations
2197  * @sk: socket where the buffers should be enqueued
2198  * @dport: port number for the socket
2199  *
2200  * Caller must hold socket lock
2201  */
2202 static void tipc_sk_enqueue(struct sk_buff_head *inputq, struct sock *sk,
2203 			    u32 dport, struct sk_buff_head *xmitq)
2204 {
2205 	unsigned long time_limit = jiffies + 2;
2206 	struct sk_buff *skb;
2207 	unsigned int lim;
2208 	atomic_t *dcnt;
2209 	u32 onode;
2210 
2211 	while (skb_queue_len(inputq)) {
2212 		if (unlikely(time_after_eq(jiffies, time_limit)))
2213 			return;
2214 
2215 		skb = tipc_skb_dequeue(inputq, dport);
2216 		if (unlikely(!skb))
2217 			return;
2218 
2219 		/* Add message directly to receive queue if possible */
2220 		if (!sock_owned_by_user(sk)) {
2221 			tipc_sk_filter_rcv(sk, skb, xmitq);
2222 			continue;
2223 		}
2224 
2225 		/* Try backlog, compensating for double-counted bytes */
2226 		dcnt = &tipc_sk(sk)->dupl_rcvcnt;
2227 		if (!sk->sk_backlog.len)
2228 			atomic_set(dcnt, 0);
2229 		lim = rcvbuf_limit(sk, skb) + atomic_read(dcnt);
2230 		if (likely(!sk_add_backlog(sk, skb, lim)))
2231 			continue;
2232 
2233 		/* Overload => reject message back to sender */
2234 		onode = tipc_own_addr(sock_net(sk));
2235 		atomic_inc(&sk->sk_drops);
2236 		if (tipc_msg_reverse(onode, &skb, TIPC_ERR_OVERLOAD))
2237 			__skb_queue_tail(xmitq, skb);
2238 		break;
2239 	}
2240 }
2241 
2242 /**
2243  * tipc_sk_rcv - handle a chain of incoming buffers
2244  * @inputq: buffer list containing the buffers
2245  * Consumes all buffers in list until inputq is empty
2246  * Note: may be called in multiple threads referring to the same queue
2247  */
2248 void tipc_sk_rcv(struct net *net, struct sk_buff_head *inputq)
2249 {
2250 	struct sk_buff_head xmitq;
2251 	u32 dnode, dport = 0;
2252 	int err;
2253 	struct tipc_sock *tsk;
2254 	struct sock *sk;
2255 	struct sk_buff *skb;
2256 
2257 	__skb_queue_head_init(&xmitq);
2258 	while (skb_queue_len(inputq)) {
2259 		dport = tipc_skb_peek_port(inputq, dport);
2260 		tsk = tipc_sk_lookup(net, dport);
2261 
2262 		if (likely(tsk)) {
2263 			sk = &tsk->sk;
2264 			if (likely(spin_trylock_bh(&sk->sk_lock.slock))) {
2265 				tipc_sk_enqueue(inputq, sk, dport, &xmitq);
2266 				spin_unlock_bh(&sk->sk_lock.slock);
2267 			}
2268 			/* Send pending response/rejected messages, if any */
2269 			tipc_node_distr_xmit(sock_net(sk), &xmitq);
2270 			sock_put(sk);
2271 			continue;
2272 		}
2273 		/* No destination socket => dequeue skb if still there */
2274 		skb = tipc_skb_dequeue(inputq, dport);
2275 		if (!skb)
2276 			return;
2277 
2278 		/* Try secondary lookup if unresolved named message */
2279 		err = TIPC_ERR_NO_PORT;
2280 		if (tipc_msg_lookup_dest(net, skb, &err))
2281 			goto xmit;
2282 
2283 		/* Prepare for message rejection */
2284 		if (!tipc_msg_reverse(tipc_own_addr(net), &skb, err))
2285 			continue;
2286 xmit:
2287 		dnode = msg_destnode(buf_msg(skb));
2288 		tipc_node_xmit_skb(net, skb, dnode, dport);
2289 	}
2290 }
2291 
2292 static int tipc_wait_for_connect(struct socket *sock, long *timeo_p)
2293 {
2294 	DEFINE_WAIT_FUNC(wait, woken_wake_function);
2295 	struct sock *sk = sock->sk;
2296 	int done;
2297 
2298 	do {
2299 		int err = sock_error(sk);
2300 		if (err)
2301 			return err;
2302 		if (!*timeo_p)
2303 			return -ETIMEDOUT;
2304 		if (signal_pending(current))
2305 			return sock_intr_errno(*timeo_p);
2306 
2307 		add_wait_queue(sk_sleep(sk), &wait);
2308 		done = sk_wait_event(sk, timeo_p,
2309 				     sk->sk_state != TIPC_CONNECTING, &wait);
2310 		remove_wait_queue(sk_sleep(sk), &wait);
2311 	} while (!done);
2312 	return 0;
2313 }
2314 
2315 /**
2316  * tipc_connect - establish a connection to another TIPC port
2317  * @sock: socket structure
2318  * @dest: socket address for destination port
2319  * @destlen: size of socket address data structure
2320  * @flags: file-related flags associated with socket
2321  *
2322  * Returns 0 on success, errno otherwise
2323  */
2324 static int tipc_connect(struct socket *sock, struct sockaddr *dest,
2325 			int destlen, int flags)
2326 {
2327 	struct sock *sk = sock->sk;
2328 	struct tipc_sock *tsk = tipc_sk(sk);
2329 	struct sockaddr_tipc *dst = (struct sockaddr_tipc *)dest;
2330 	struct msghdr m = {NULL,};
2331 	long timeout = (flags & O_NONBLOCK) ? 0 : tsk->conn_timeout;
2332 	int previous;
2333 	int res = 0;
2334 
2335 	if (destlen != sizeof(struct sockaddr_tipc))
2336 		return -EINVAL;
2337 
2338 	lock_sock(sk);
2339 
2340 	if (tsk->group) {
2341 		res = -EINVAL;
2342 		goto exit;
2343 	}
2344 
2345 	if (dst->family == AF_UNSPEC) {
2346 		memset(&tsk->peer, 0, sizeof(struct sockaddr_tipc));
2347 		if (!tipc_sk_type_connectionless(sk))
2348 			res = -EINVAL;
2349 		goto exit;
2350 	} else if (dst->family != AF_TIPC) {
2351 		res = -EINVAL;
2352 	}
2353 	if (dst->addrtype != TIPC_ADDR_ID && dst->addrtype != TIPC_ADDR_NAME)
2354 		res = -EINVAL;
2355 	if (res)
2356 		goto exit;
2357 
2358 	/* DGRAM/RDM connect(), just save the destaddr */
2359 	if (tipc_sk_type_connectionless(sk)) {
2360 		memcpy(&tsk->peer, dest, destlen);
2361 		goto exit;
2362 	}
2363 
2364 	previous = sk->sk_state;
2365 
2366 	switch (sk->sk_state) {
2367 	case TIPC_OPEN:
2368 		/* Send a 'SYN-' to destination */
2369 		m.msg_name = dest;
2370 		m.msg_namelen = destlen;
2371 
2372 		/* If connect is in non-blocking case, set MSG_DONTWAIT to
2373 		 * indicate send_msg() is never blocked.
2374 		 */
2375 		if (!timeout)
2376 			m.msg_flags = MSG_DONTWAIT;
2377 
2378 		res = __tipc_sendmsg(sock, &m, 0);
2379 		if ((res < 0) && (res != -EWOULDBLOCK))
2380 			goto exit;
2381 
2382 		/* Just entered TIPC_CONNECTING state; the only
2383 		 * difference is that return value in non-blocking
2384 		 * case is EINPROGRESS, rather than EALREADY.
2385 		 */
2386 		res = -EINPROGRESS;
2387 		/* fall thru' */
2388 	case TIPC_CONNECTING:
2389 		if (!timeout) {
2390 			if (previous == TIPC_CONNECTING)
2391 				res = -EALREADY;
2392 			goto exit;
2393 		}
2394 		timeout = msecs_to_jiffies(timeout);
2395 		/* Wait until an 'ACK' or 'RST' arrives, or a timeout occurs */
2396 		res = tipc_wait_for_connect(sock, &timeout);
2397 		break;
2398 	case TIPC_ESTABLISHED:
2399 		res = -EISCONN;
2400 		break;
2401 	default:
2402 		res = -EINVAL;
2403 	}
2404 
2405 exit:
2406 	release_sock(sk);
2407 	return res;
2408 }
2409 
2410 /**
2411  * tipc_listen - allow socket to listen for incoming connections
2412  * @sock: socket structure
2413  * @len: (unused)
2414  *
2415  * Returns 0 on success, errno otherwise
2416  */
2417 static int tipc_listen(struct socket *sock, int len)
2418 {
2419 	struct sock *sk = sock->sk;
2420 	int res;
2421 
2422 	lock_sock(sk);
2423 	res = tipc_set_sk_state(sk, TIPC_LISTEN);
2424 	release_sock(sk);
2425 
2426 	return res;
2427 }
2428 
2429 static int tipc_wait_for_accept(struct socket *sock, long timeo)
2430 {
2431 	struct sock *sk = sock->sk;
2432 	DEFINE_WAIT(wait);
2433 	int err;
2434 
2435 	/* True wake-one mechanism for incoming connections: only
2436 	 * one process gets woken up, not the 'whole herd'.
2437 	 * Since we do not 'race & poll' for established sockets
2438 	 * anymore, the common case will execute the loop only once.
2439 	*/
2440 	for (;;) {
2441 		prepare_to_wait_exclusive(sk_sleep(sk), &wait,
2442 					  TASK_INTERRUPTIBLE);
2443 		if (timeo && skb_queue_empty(&sk->sk_receive_queue)) {
2444 			release_sock(sk);
2445 			timeo = schedule_timeout(timeo);
2446 			lock_sock(sk);
2447 		}
2448 		err = 0;
2449 		if (!skb_queue_empty(&sk->sk_receive_queue))
2450 			break;
2451 		err = -EAGAIN;
2452 		if (!timeo)
2453 			break;
2454 		err = sock_intr_errno(timeo);
2455 		if (signal_pending(current))
2456 			break;
2457 	}
2458 	finish_wait(sk_sleep(sk), &wait);
2459 	return err;
2460 }
2461 
2462 /**
2463  * tipc_accept - wait for connection request
2464  * @sock: listening socket
2465  * @newsock: new socket that is to be connected
2466  * @flags: file-related flags associated with socket
2467  *
2468  * Returns 0 on success, errno otherwise
2469  */
2470 static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags,
2471 		       bool kern)
2472 {
2473 	struct sock *new_sk, *sk = sock->sk;
2474 	struct sk_buff *buf;
2475 	struct tipc_sock *new_tsock;
2476 	struct tipc_msg *msg;
2477 	long timeo;
2478 	int res;
2479 
2480 	lock_sock(sk);
2481 
2482 	if (sk->sk_state != TIPC_LISTEN) {
2483 		res = -EINVAL;
2484 		goto exit;
2485 	}
2486 	timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
2487 	res = tipc_wait_for_accept(sock, timeo);
2488 	if (res)
2489 		goto exit;
2490 
2491 	buf = skb_peek(&sk->sk_receive_queue);
2492 
2493 	res = tipc_sk_create(sock_net(sock->sk), new_sock, 0, kern);
2494 	if (res)
2495 		goto exit;
2496 	security_sk_clone(sock->sk, new_sock->sk);
2497 
2498 	new_sk = new_sock->sk;
2499 	new_tsock = tipc_sk(new_sk);
2500 	msg = buf_msg(buf);
2501 
2502 	/* we lock on new_sk; but lockdep sees the lock on sk */
2503 	lock_sock_nested(new_sk, SINGLE_DEPTH_NESTING);
2504 
2505 	/*
2506 	 * Reject any stray messages received by new socket
2507 	 * before the socket lock was taken (very, very unlikely)
2508 	 */
2509 	tsk_rej_rx_queue(new_sk);
2510 
2511 	/* Connect new socket to it's peer */
2512 	tipc_sk_finish_conn(new_tsock, msg_origport(msg), msg_orignode(msg));
2513 
2514 	tsk_set_importance(new_tsock, msg_importance(msg));
2515 	if (msg_named(msg)) {
2516 		new_tsock->conn_type = msg_nametype(msg);
2517 		new_tsock->conn_instance = msg_nameinst(msg);
2518 	}
2519 
2520 	/*
2521 	 * Respond to 'SYN-' by discarding it & returning 'ACK'-.
2522 	 * Respond to 'SYN+' by queuing it on new socket.
2523 	 */
2524 	if (!msg_data_sz(msg)) {
2525 		struct msghdr m = {NULL,};
2526 
2527 		tsk_advance_rx_queue(sk);
2528 		__tipc_sendstream(new_sock, &m, 0);
2529 	} else {
2530 		__skb_dequeue(&sk->sk_receive_queue);
2531 		__skb_queue_head(&new_sk->sk_receive_queue, buf);
2532 		skb_set_owner_r(buf, new_sk);
2533 	}
2534 	release_sock(new_sk);
2535 exit:
2536 	release_sock(sk);
2537 	return res;
2538 }
2539 
2540 /**
2541  * tipc_shutdown - shutdown socket connection
2542  * @sock: socket structure
2543  * @how: direction to close (must be SHUT_RDWR)
2544  *
2545  * Terminates connection (if necessary), then purges socket's receive queue.
2546  *
2547  * Returns 0 on success, errno otherwise
2548  */
2549 static int tipc_shutdown(struct socket *sock, int how)
2550 {
2551 	struct sock *sk = sock->sk;
2552 	int res;
2553 
2554 	if (how != SHUT_RDWR)
2555 		return -EINVAL;
2556 
2557 	lock_sock(sk);
2558 
2559 	__tipc_shutdown(sock, TIPC_CONN_SHUTDOWN);
2560 	sk->sk_shutdown = SEND_SHUTDOWN;
2561 
2562 	if (sk->sk_state == TIPC_DISCONNECTING) {
2563 		/* Discard any unreceived messages */
2564 		__skb_queue_purge(&sk->sk_receive_queue);
2565 
2566 		/* Wake up anyone sleeping in poll */
2567 		sk->sk_state_change(sk);
2568 		res = 0;
2569 	} else {
2570 		res = -ENOTCONN;
2571 	}
2572 
2573 	release_sock(sk);
2574 	return res;
2575 }
2576 
2577 static void tipc_sk_check_probing_state(struct sock *sk,
2578 					struct sk_buff_head *list)
2579 {
2580 	struct tipc_sock *tsk = tipc_sk(sk);
2581 	u32 pnode = tsk_peer_node(tsk);
2582 	u32 pport = tsk_peer_port(tsk);
2583 	u32 self = tsk_own_node(tsk);
2584 	u32 oport = tsk->portid;
2585 	struct sk_buff *skb;
2586 
2587 	if (tsk->probe_unacked) {
2588 		tipc_set_sk_state(sk, TIPC_DISCONNECTING);
2589 		sk->sk_err = ECONNABORTED;
2590 		tipc_node_remove_conn(sock_net(sk), pnode, pport);
2591 		sk->sk_state_change(sk);
2592 		return;
2593 	}
2594 	/* Prepare new probe */
2595 	skb = tipc_msg_create(CONN_MANAGER, CONN_PROBE, INT_H_SIZE, 0,
2596 			      pnode, self, pport, oport, TIPC_OK);
2597 	if (skb)
2598 		__skb_queue_tail(list, skb);
2599 	tsk->probe_unacked = true;
2600 	sk_reset_timer(sk, &sk->sk_timer, jiffies + CONN_PROBING_INTV);
2601 }
2602 
2603 static void tipc_sk_retry_connect(struct sock *sk, struct sk_buff_head *list)
2604 {
2605 	struct tipc_sock *tsk = tipc_sk(sk);
2606 
2607 	/* Try again later if dest link is congested */
2608 	if (tsk->cong_link_cnt) {
2609 		sk_reset_timer(sk, &sk->sk_timer, msecs_to_jiffies(100));
2610 		return;
2611 	}
2612 	/* Prepare SYN for retransmit */
2613 	tipc_msg_skb_clone(&sk->sk_write_queue, list);
2614 }
2615 
2616 static void tipc_sk_timeout(struct timer_list *t)
2617 {
2618 	struct sock *sk = from_timer(sk, t, sk_timer);
2619 	struct tipc_sock *tsk = tipc_sk(sk);
2620 	u32 pnode = tsk_peer_node(tsk);
2621 	struct sk_buff_head list;
2622 	int rc = 0;
2623 
2624 	skb_queue_head_init(&list);
2625 	bh_lock_sock(sk);
2626 
2627 	/* Try again later if socket is busy */
2628 	if (sock_owned_by_user(sk)) {
2629 		sk_reset_timer(sk, &sk->sk_timer, jiffies + HZ / 20);
2630 		bh_unlock_sock(sk);
2631 		return;
2632 	}
2633 
2634 	if (sk->sk_state == TIPC_ESTABLISHED)
2635 		tipc_sk_check_probing_state(sk, &list);
2636 	else if (sk->sk_state == TIPC_CONNECTING)
2637 		tipc_sk_retry_connect(sk, &list);
2638 
2639 	bh_unlock_sock(sk);
2640 
2641 	if (!skb_queue_empty(&list))
2642 		rc = tipc_node_xmit(sock_net(sk), &list, pnode, tsk->portid);
2643 
2644 	/* SYN messages may cause link congestion */
2645 	if (rc == -ELINKCONG) {
2646 		tipc_dest_push(&tsk->cong_links, pnode, 0);
2647 		tsk->cong_link_cnt = 1;
2648 	}
2649 	sock_put(sk);
2650 }
2651 
2652 static int tipc_sk_publish(struct tipc_sock *tsk, uint scope,
2653 			   struct tipc_name_seq const *seq)
2654 {
2655 	struct sock *sk = &tsk->sk;
2656 	struct net *net = sock_net(sk);
2657 	struct publication *publ;
2658 	u32 key;
2659 
2660 	if (scope != TIPC_NODE_SCOPE)
2661 		scope = TIPC_CLUSTER_SCOPE;
2662 
2663 	if (tipc_sk_connected(sk))
2664 		return -EINVAL;
2665 	key = tsk->portid + tsk->pub_count + 1;
2666 	if (key == tsk->portid)
2667 		return -EADDRINUSE;
2668 
2669 	publ = tipc_nametbl_publish(net, seq->type, seq->lower, seq->upper,
2670 				    scope, tsk->portid, key);
2671 	if (unlikely(!publ))
2672 		return -EINVAL;
2673 
2674 	list_add(&publ->binding_sock, &tsk->publications);
2675 	tsk->pub_count++;
2676 	tsk->published = 1;
2677 	return 0;
2678 }
2679 
2680 static int tipc_sk_withdraw(struct tipc_sock *tsk, uint scope,
2681 			    struct tipc_name_seq const *seq)
2682 {
2683 	struct net *net = sock_net(&tsk->sk);
2684 	struct publication *publ;
2685 	struct publication *safe;
2686 	int rc = -EINVAL;
2687 
2688 	if (scope != TIPC_NODE_SCOPE)
2689 		scope = TIPC_CLUSTER_SCOPE;
2690 
2691 	list_for_each_entry_safe(publ, safe, &tsk->publications, binding_sock) {
2692 		if (seq) {
2693 			if (publ->scope != scope)
2694 				continue;
2695 			if (publ->type != seq->type)
2696 				continue;
2697 			if (publ->lower != seq->lower)
2698 				continue;
2699 			if (publ->upper != seq->upper)
2700 				break;
2701 			tipc_nametbl_withdraw(net, publ->type, publ->lower,
2702 					      publ->upper, publ->key);
2703 			rc = 0;
2704 			break;
2705 		}
2706 		tipc_nametbl_withdraw(net, publ->type, publ->lower,
2707 				      publ->upper, publ->key);
2708 		rc = 0;
2709 	}
2710 	if (list_empty(&tsk->publications))
2711 		tsk->published = 0;
2712 	return rc;
2713 }
2714 
2715 /* tipc_sk_reinit: set non-zero address in all existing sockets
2716  *                 when we go from standalone to network mode.
2717  */
2718 void tipc_sk_reinit(struct net *net)
2719 {
2720 	struct tipc_net *tn = net_generic(net, tipc_net_id);
2721 	struct rhashtable_iter iter;
2722 	struct tipc_sock *tsk;
2723 	struct tipc_msg *msg;
2724 
2725 	rhashtable_walk_enter(&tn->sk_rht, &iter);
2726 
2727 	do {
2728 		rhashtable_walk_start(&iter);
2729 
2730 		while ((tsk = rhashtable_walk_next(&iter)) && !IS_ERR(tsk)) {
2731 			sock_hold(&tsk->sk);
2732 			rhashtable_walk_stop(&iter);
2733 			lock_sock(&tsk->sk);
2734 			msg = &tsk->phdr;
2735 			msg_set_prevnode(msg, tipc_own_addr(net));
2736 			msg_set_orignode(msg, tipc_own_addr(net));
2737 			release_sock(&tsk->sk);
2738 			rhashtable_walk_start(&iter);
2739 			sock_put(&tsk->sk);
2740 		}
2741 
2742 		rhashtable_walk_stop(&iter);
2743 	} while (tsk == ERR_PTR(-EAGAIN));
2744 
2745 	rhashtable_walk_exit(&iter);
2746 }
2747 
2748 static struct tipc_sock *tipc_sk_lookup(struct net *net, u32 portid)
2749 {
2750 	struct tipc_net *tn = net_generic(net, tipc_net_id);
2751 	struct tipc_sock *tsk;
2752 
2753 	rcu_read_lock();
2754 	tsk = rhashtable_lookup_fast(&tn->sk_rht, &portid, tsk_rht_params);
2755 	if (tsk)
2756 		sock_hold(&tsk->sk);
2757 	rcu_read_unlock();
2758 
2759 	return tsk;
2760 }
2761 
2762 static int tipc_sk_insert(struct tipc_sock *tsk)
2763 {
2764 	struct sock *sk = &tsk->sk;
2765 	struct net *net = sock_net(sk);
2766 	struct tipc_net *tn = net_generic(net, tipc_net_id);
2767 	u32 remaining = (TIPC_MAX_PORT - TIPC_MIN_PORT) + 1;
2768 	u32 portid = prandom_u32() % remaining + TIPC_MIN_PORT;
2769 
2770 	while (remaining--) {
2771 		portid++;
2772 		if ((portid < TIPC_MIN_PORT) || (portid > TIPC_MAX_PORT))
2773 			portid = TIPC_MIN_PORT;
2774 		tsk->portid = portid;
2775 		sock_hold(&tsk->sk);
2776 		if (!rhashtable_lookup_insert_fast(&tn->sk_rht, &tsk->node,
2777 						   tsk_rht_params))
2778 			return 0;
2779 		sock_put(&tsk->sk);
2780 	}
2781 
2782 	return -1;
2783 }
2784 
2785 static void tipc_sk_remove(struct tipc_sock *tsk)
2786 {
2787 	struct sock *sk = &tsk->sk;
2788 	struct tipc_net *tn = net_generic(sock_net(sk), tipc_net_id);
2789 
2790 	if (!rhashtable_remove_fast(&tn->sk_rht, &tsk->node, tsk_rht_params)) {
2791 		WARN_ON(refcount_read(&sk->sk_refcnt) == 1);
2792 		__sock_put(sk);
2793 	}
2794 }
2795 
2796 static const struct rhashtable_params tsk_rht_params = {
2797 	.nelem_hint = 192,
2798 	.head_offset = offsetof(struct tipc_sock, node),
2799 	.key_offset = offsetof(struct tipc_sock, portid),
2800 	.key_len = sizeof(u32), /* portid */
2801 	.max_size = 1048576,
2802 	.min_size = 256,
2803 	.automatic_shrinking = true,
2804 };
2805 
2806 int tipc_sk_rht_init(struct net *net)
2807 {
2808 	struct tipc_net *tn = net_generic(net, tipc_net_id);
2809 
2810 	return rhashtable_init(&tn->sk_rht, &tsk_rht_params);
2811 }
2812 
2813 void tipc_sk_rht_destroy(struct net *net)
2814 {
2815 	struct tipc_net *tn = net_generic(net, tipc_net_id);
2816 
2817 	/* Wait for socket readers to complete */
2818 	synchronize_net();
2819 
2820 	rhashtable_destroy(&tn->sk_rht);
2821 }
2822 
2823 static int tipc_sk_join(struct tipc_sock *tsk, struct tipc_group_req *mreq)
2824 {
2825 	struct net *net = sock_net(&tsk->sk);
2826 	struct tipc_group *grp = tsk->group;
2827 	struct tipc_msg *hdr = &tsk->phdr;
2828 	struct tipc_name_seq seq;
2829 	int rc;
2830 
2831 	if (mreq->type < TIPC_RESERVED_TYPES)
2832 		return -EACCES;
2833 	if (mreq->scope > TIPC_NODE_SCOPE)
2834 		return -EINVAL;
2835 	if (grp)
2836 		return -EACCES;
2837 	grp = tipc_group_create(net, tsk->portid, mreq, &tsk->group_is_open);
2838 	if (!grp)
2839 		return -ENOMEM;
2840 	tsk->group = grp;
2841 	msg_set_lookup_scope(hdr, mreq->scope);
2842 	msg_set_nametype(hdr, mreq->type);
2843 	msg_set_dest_droppable(hdr, true);
2844 	seq.type = mreq->type;
2845 	seq.lower = mreq->instance;
2846 	seq.upper = seq.lower;
2847 	tipc_nametbl_build_group(net, grp, mreq->type, mreq->scope);
2848 	rc = tipc_sk_publish(tsk, mreq->scope, &seq);
2849 	if (rc) {
2850 		tipc_group_delete(net, grp);
2851 		tsk->group = NULL;
2852 		return rc;
2853 	}
2854 	/* Eliminate any risk that a broadcast overtakes sent JOINs */
2855 	tsk->mc_method.rcast = true;
2856 	tsk->mc_method.mandatory = true;
2857 	tipc_group_join(net, grp, &tsk->sk.sk_rcvbuf);
2858 	return rc;
2859 }
2860 
2861 static int tipc_sk_leave(struct tipc_sock *tsk)
2862 {
2863 	struct net *net = sock_net(&tsk->sk);
2864 	struct tipc_group *grp = tsk->group;
2865 	struct tipc_name_seq seq;
2866 	int scope;
2867 
2868 	if (!grp)
2869 		return -EINVAL;
2870 	tipc_group_self(grp, &seq, &scope);
2871 	tipc_group_delete(net, grp);
2872 	tsk->group = NULL;
2873 	tipc_sk_withdraw(tsk, scope, &seq);
2874 	return 0;
2875 }
2876 
2877 /**
2878  * tipc_setsockopt - set socket option
2879  * @sock: socket structure
2880  * @lvl: option level
2881  * @opt: option identifier
2882  * @ov: pointer to new option value
2883  * @ol: length of option value
2884  *
2885  * For stream sockets only, accepts and ignores all IPPROTO_TCP options
2886  * (to ease compatibility).
2887  *
2888  * Returns 0 on success, errno otherwise
2889  */
2890 static int tipc_setsockopt(struct socket *sock, int lvl, int opt,
2891 			   char __user *ov, unsigned int ol)
2892 {
2893 	struct sock *sk = sock->sk;
2894 	struct tipc_sock *tsk = tipc_sk(sk);
2895 	struct tipc_group_req mreq;
2896 	u32 value = 0;
2897 	int res = 0;
2898 
2899 	if ((lvl == IPPROTO_TCP) && (sock->type == SOCK_STREAM))
2900 		return 0;
2901 	if (lvl != SOL_TIPC)
2902 		return -ENOPROTOOPT;
2903 
2904 	switch (opt) {
2905 	case TIPC_IMPORTANCE:
2906 	case TIPC_SRC_DROPPABLE:
2907 	case TIPC_DEST_DROPPABLE:
2908 	case TIPC_CONN_TIMEOUT:
2909 		if (ol < sizeof(value))
2910 			return -EINVAL;
2911 		if (get_user(value, (u32 __user *)ov))
2912 			return -EFAULT;
2913 		break;
2914 	case TIPC_GROUP_JOIN:
2915 		if (ol < sizeof(mreq))
2916 			return -EINVAL;
2917 		if (copy_from_user(&mreq, ov, sizeof(mreq)))
2918 			return -EFAULT;
2919 		break;
2920 	default:
2921 		if (ov || ol)
2922 			return -EINVAL;
2923 	}
2924 
2925 	lock_sock(sk);
2926 
2927 	switch (opt) {
2928 	case TIPC_IMPORTANCE:
2929 		res = tsk_set_importance(tsk, value);
2930 		break;
2931 	case TIPC_SRC_DROPPABLE:
2932 		if (sock->type != SOCK_STREAM)
2933 			tsk_set_unreliable(tsk, value);
2934 		else
2935 			res = -ENOPROTOOPT;
2936 		break;
2937 	case TIPC_DEST_DROPPABLE:
2938 		tsk_set_unreturnable(tsk, value);
2939 		break;
2940 	case TIPC_CONN_TIMEOUT:
2941 		tipc_sk(sk)->conn_timeout = value;
2942 		break;
2943 	case TIPC_MCAST_BROADCAST:
2944 		tsk->mc_method.rcast = false;
2945 		tsk->mc_method.mandatory = true;
2946 		break;
2947 	case TIPC_MCAST_REPLICAST:
2948 		tsk->mc_method.rcast = true;
2949 		tsk->mc_method.mandatory = true;
2950 		break;
2951 	case TIPC_GROUP_JOIN:
2952 		res = tipc_sk_join(tsk, &mreq);
2953 		break;
2954 	case TIPC_GROUP_LEAVE:
2955 		res = tipc_sk_leave(tsk);
2956 		break;
2957 	default:
2958 		res = -EINVAL;
2959 	}
2960 
2961 	release_sock(sk);
2962 
2963 	return res;
2964 }
2965 
2966 /**
2967  * tipc_getsockopt - get socket option
2968  * @sock: socket structure
2969  * @lvl: option level
2970  * @opt: option identifier
2971  * @ov: receptacle for option value
2972  * @ol: receptacle for length of option value
2973  *
2974  * For stream sockets only, returns 0 length result for all IPPROTO_TCP options
2975  * (to ease compatibility).
2976  *
2977  * Returns 0 on success, errno otherwise
2978  */
2979 static int tipc_getsockopt(struct socket *sock, int lvl, int opt,
2980 			   char __user *ov, int __user *ol)
2981 {
2982 	struct sock *sk = sock->sk;
2983 	struct tipc_sock *tsk = tipc_sk(sk);
2984 	struct tipc_name_seq seq;
2985 	int len, scope;
2986 	u32 value;
2987 	int res;
2988 
2989 	if ((lvl == IPPROTO_TCP) && (sock->type == SOCK_STREAM))
2990 		return put_user(0, ol);
2991 	if (lvl != SOL_TIPC)
2992 		return -ENOPROTOOPT;
2993 	res = get_user(len, ol);
2994 	if (res)
2995 		return res;
2996 
2997 	lock_sock(sk);
2998 
2999 	switch (opt) {
3000 	case TIPC_IMPORTANCE:
3001 		value = tsk_importance(tsk);
3002 		break;
3003 	case TIPC_SRC_DROPPABLE:
3004 		value = tsk_unreliable(tsk);
3005 		break;
3006 	case TIPC_DEST_DROPPABLE:
3007 		value = tsk_unreturnable(tsk);
3008 		break;
3009 	case TIPC_CONN_TIMEOUT:
3010 		value = tsk->conn_timeout;
3011 		/* no need to set "res", since already 0 at this point */
3012 		break;
3013 	case TIPC_NODE_RECVQ_DEPTH:
3014 		value = 0; /* was tipc_queue_size, now obsolete */
3015 		break;
3016 	case TIPC_SOCK_RECVQ_DEPTH:
3017 		value = skb_queue_len(&sk->sk_receive_queue);
3018 		break;
3019 	case TIPC_GROUP_JOIN:
3020 		seq.type = 0;
3021 		if (tsk->group)
3022 			tipc_group_self(tsk->group, &seq, &scope);
3023 		value = seq.type;
3024 		break;
3025 	default:
3026 		res = -EINVAL;
3027 	}
3028 
3029 	release_sock(sk);
3030 
3031 	if (res)
3032 		return res;	/* "get" failed */
3033 
3034 	if (len < sizeof(value))
3035 		return -EINVAL;
3036 
3037 	if (copy_to_user(ov, &value, sizeof(value)))
3038 		return -EFAULT;
3039 
3040 	return put_user(sizeof(value), ol);
3041 }
3042 
3043 static int tipc_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
3044 {
3045 	struct net *net = sock_net(sock->sk);
3046 	struct tipc_sioc_nodeid_req nr = {0};
3047 	struct tipc_sioc_ln_req lnr;
3048 	void __user *argp = (void __user *)arg;
3049 
3050 	switch (cmd) {
3051 	case SIOCGETLINKNAME:
3052 		if (copy_from_user(&lnr, argp, sizeof(lnr)))
3053 			return -EFAULT;
3054 		if (!tipc_node_get_linkname(net,
3055 					    lnr.bearer_id & 0xffff, lnr.peer,
3056 					    lnr.linkname, TIPC_MAX_LINK_NAME)) {
3057 			if (copy_to_user(argp, &lnr, sizeof(lnr)))
3058 				return -EFAULT;
3059 			return 0;
3060 		}
3061 		return -EADDRNOTAVAIL;
3062 	case SIOCGETNODEID:
3063 		if (copy_from_user(&nr, argp, sizeof(nr)))
3064 			return -EFAULT;
3065 		if (!tipc_node_get_id(net, nr.peer, nr.node_id))
3066 			return -EADDRNOTAVAIL;
3067 		if (copy_to_user(argp, &nr, sizeof(nr)))
3068 			return -EFAULT;
3069 		return 0;
3070 	default:
3071 		return -ENOIOCTLCMD;
3072 	}
3073 }
3074 
3075 static int tipc_socketpair(struct socket *sock1, struct socket *sock2)
3076 {
3077 	struct tipc_sock *tsk2 = tipc_sk(sock2->sk);
3078 	struct tipc_sock *tsk1 = tipc_sk(sock1->sk);
3079 	u32 onode = tipc_own_addr(sock_net(sock1->sk));
3080 
3081 	tsk1->peer.family = AF_TIPC;
3082 	tsk1->peer.addrtype = TIPC_ADDR_ID;
3083 	tsk1->peer.scope = TIPC_NODE_SCOPE;
3084 	tsk1->peer.addr.id.ref = tsk2->portid;
3085 	tsk1->peer.addr.id.node = onode;
3086 	tsk2->peer.family = AF_TIPC;
3087 	tsk2->peer.addrtype = TIPC_ADDR_ID;
3088 	tsk2->peer.scope = TIPC_NODE_SCOPE;
3089 	tsk2->peer.addr.id.ref = tsk1->portid;
3090 	tsk2->peer.addr.id.node = onode;
3091 
3092 	tipc_sk_finish_conn(tsk1, tsk2->portid, onode);
3093 	tipc_sk_finish_conn(tsk2, tsk1->portid, onode);
3094 	return 0;
3095 }
3096 
3097 /* Protocol switches for the various types of TIPC sockets */
3098 
3099 static const struct proto_ops msg_ops = {
3100 	.owner		= THIS_MODULE,
3101 	.family		= AF_TIPC,
3102 	.release	= tipc_release,
3103 	.bind		= tipc_bind,
3104 	.connect	= tipc_connect,
3105 	.socketpair	= tipc_socketpair,
3106 	.accept		= sock_no_accept,
3107 	.getname	= tipc_getname,
3108 	.poll		= tipc_poll,
3109 	.ioctl		= tipc_ioctl,
3110 	.listen		= sock_no_listen,
3111 	.shutdown	= tipc_shutdown,
3112 	.setsockopt	= tipc_setsockopt,
3113 	.getsockopt	= tipc_getsockopt,
3114 	.sendmsg	= tipc_sendmsg,
3115 	.recvmsg	= tipc_recvmsg,
3116 	.mmap		= sock_no_mmap,
3117 	.sendpage	= sock_no_sendpage
3118 };
3119 
3120 static const struct proto_ops packet_ops = {
3121 	.owner		= THIS_MODULE,
3122 	.family		= AF_TIPC,
3123 	.release	= tipc_release,
3124 	.bind		= tipc_bind,
3125 	.connect	= tipc_connect,
3126 	.socketpair	= tipc_socketpair,
3127 	.accept		= tipc_accept,
3128 	.getname	= tipc_getname,
3129 	.poll		= tipc_poll,
3130 	.ioctl		= tipc_ioctl,
3131 	.listen		= tipc_listen,
3132 	.shutdown	= tipc_shutdown,
3133 	.setsockopt	= tipc_setsockopt,
3134 	.getsockopt	= tipc_getsockopt,
3135 	.sendmsg	= tipc_send_packet,
3136 	.recvmsg	= tipc_recvmsg,
3137 	.mmap		= sock_no_mmap,
3138 	.sendpage	= sock_no_sendpage
3139 };
3140 
3141 static const struct proto_ops stream_ops = {
3142 	.owner		= THIS_MODULE,
3143 	.family		= AF_TIPC,
3144 	.release	= tipc_release,
3145 	.bind		= tipc_bind,
3146 	.connect	= tipc_connect,
3147 	.socketpair	= tipc_socketpair,
3148 	.accept		= tipc_accept,
3149 	.getname	= tipc_getname,
3150 	.poll		= tipc_poll,
3151 	.ioctl		= tipc_ioctl,
3152 	.listen		= tipc_listen,
3153 	.shutdown	= tipc_shutdown,
3154 	.setsockopt	= tipc_setsockopt,
3155 	.getsockopt	= tipc_getsockopt,
3156 	.sendmsg	= tipc_sendstream,
3157 	.recvmsg	= tipc_recvstream,
3158 	.mmap		= sock_no_mmap,
3159 	.sendpage	= sock_no_sendpage
3160 };
3161 
3162 static const struct net_proto_family tipc_family_ops = {
3163 	.owner		= THIS_MODULE,
3164 	.family		= AF_TIPC,
3165 	.create		= tipc_sk_create
3166 };
3167 
3168 static struct proto tipc_proto = {
3169 	.name		= "TIPC",
3170 	.owner		= THIS_MODULE,
3171 	.obj_size	= sizeof(struct tipc_sock),
3172 	.sysctl_rmem	= sysctl_tipc_rmem
3173 };
3174 
3175 /**
3176  * tipc_socket_init - initialize TIPC socket interface
3177  *
3178  * Returns 0 on success, errno otherwise
3179  */
3180 int tipc_socket_init(void)
3181 {
3182 	int res;
3183 
3184 	res = proto_register(&tipc_proto, 1);
3185 	if (res) {
3186 		pr_err("Failed to register TIPC protocol type\n");
3187 		goto out;
3188 	}
3189 
3190 	res = sock_register(&tipc_family_ops);
3191 	if (res) {
3192 		pr_err("Failed to register TIPC socket type\n");
3193 		proto_unregister(&tipc_proto);
3194 		goto out;
3195 	}
3196  out:
3197 	return res;
3198 }
3199 
3200 /**
3201  * tipc_socket_stop - stop TIPC socket interface
3202  */
3203 void tipc_socket_stop(void)
3204 {
3205 	sock_unregister(tipc_family_ops.family);
3206 	proto_unregister(&tipc_proto);
3207 }
3208 
3209 /* Caller should hold socket lock for the passed tipc socket. */
3210 static int __tipc_nl_add_sk_con(struct sk_buff *skb, struct tipc_sock *tsk)
3211 {
3212 	u32 peer_node;
3213 	u32 peer_port;
3214 	struct nlattr *nest;
3215 
3216 	peer_node = tsk_peer_node(tsk);
3217 	peer_port = tsk_peer_port(tsk);
3218 
3219 	nest = nla_nest_start(skb, TIPC_NLA_SOCK_CON);
3220 
3221 	if (nla_put_u32(skb, TIPC_NLA_CON_NODE, peer_node))
3222 		goto msg_full;
3223 	if (nla_put_u32(skb, TIPC_NLA_CON_SOCK, peer_port))
3224 		goto msg_full;
3225 
3226 	if (tsk->conn_type != 0) {
3227 		if (nla_put_flag(skb, TIPC_NLA_CON_FLAG))
3228 			goto msg_full;
3229 		if (nla_put_u32(skb, TIPC_NLA_CON_TYPE, tsk->conn_type))
3230 			goto msg_full;
3231 		if (nla_put_u32(skb, TIPC_NLA_CON_INST, tsk->conn_instance))
3232 			goto msg_full;
3233 	}
3234 	nla_nest_end(skb, nest);
3235 
3236 	return 0;
3237 
3238 msg_full:
3239 	nla_nest_cancel(skb, nest);
3240 
3241 	return -EMSGSIZE;
3242 }
3243 
3244 static int __tipc_nl_add_sk_info(struct sk_buff *skb, struct tipc_sock
3245 			  *tsk)
3246 {
3247 	struct net *net = sock_net(skb->sk);
3248 	struct sock *sk = &tsk->sk;
3249 
3250 	if (nla_put_u32(skb, TIPC_NLA_SOCK_REF, tsk->portid) ||
3251 	    nla_put_u32(skb, TIPC_NLA_SOCK_ADDR, tipc_own_addr(net)))
3252 		return -EMSGSIZE;
3253 
3254 	if (tipc_sk_connected(sk)) {
3255 		if (__tipc_nl_add_sk_con(skb, tsk))
3256 			return -EMSGSIZE;
3257 	} else if (!list_empty(&tsk->publications)) {
3258 		if (nla_put_flag(skb, TIPC_NLA_SOCK_HAS_PUBL))
3259 			return -EMSGSIZE;
3260 	}
3261 	return 0;
3262 }
3263 
3264 /* Caller should hold socket lock for the passed tipc socket. */
3265 static int __tipc_nl_add_sk(struct sk_buff *skb, struct netlink_callback *cb,
3266 			    struct tipc_sock *tsk)
3267 {
3268 	struct nlattr *attrs;
3269 	void *hdr;
3270 
3271 	hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
3272 			  &tipc_genl_family, NLM_F_MULTI, TIPC_NL_SOCK_GET);
3273 	if (!hdr)
3274 		goto msg_cancel;
3275 
3276 	attrs = nla_nest_start(skb, TIPC_NLA_SOCK);
3277 	if (!attrs)
3278 		goto genlmsg_cancel;
3279 
3280 	if (__tipc_nl_add_sk_info(skb, tsk))
3281 		goto attr_msg_cancel;
3282 
3283 	nla_nest_end(skb, attrs);
3284 	genlmsg_end(skb, hdr);
3285 
3286 	return 0;
3287 
3288 attr_msg_cancel:
3289 	nla_nest_cancel(skb, attrs);
3290 genlmsg_cancel:
3291 	genlmsg_cancel(skb, hdr);
3292 msg_cancel:
3293 	return -EMSGSIZE;
3294 }
3295 
3296 int tipc_nl_sk_walk(struct sk_buff *skb, struct netlink_callback *cb,
3297 		    int (*skb_handler)(struct sk_buff *skb,
3298 				       struct netlink_callback *cb,
3299 				       struct tipc_sock *tsk))
3300 {
3301 	struct rhashtable_iter *iter = (void *)cb->args[4];
3302 	struct tipc_sock *tsk;
3303 	int err;
3304 
3305 	rhashtable_walk_start(iter);
3306 	while ((tsk = rhashtable_walk_next(iter)) != NULL) {
3307 		if (IS_ERR(tsk)) {
3308 			err = PTR_ERR(tsk);
3309 			if (err == -EAGAIN) {
3310 				err = 0;
3311 				continue;
3312 			}
3313 			break;
3314 		}
3315 
3316 		sock_hold(&tsk->sk);
3317 		rhashtable_walk_stop(iter);
3318 		lock_sock(&tsk->sk);
3319 		err = skb_handler(skb, cb, tsk);
3320 		if (err) {
3321 			release_sock(&tsk->sk);
3322 			sock_put(&tsk->sk);
3323 			goto out;
3324 		}
3325 		release_sock(&tsk->sk);
3326 		rhashtable_walk_start(iter);
3327 		sock_put(&tsk->sk);
3328 	}
3329 	rhashtable_walk_stop(iter);
3330 out:
3331 	return skb->len;
3332 }
3333 EXPORT_SYMBOL(tipc_nl_sk_walk);
3334 
3335 int tipc_dump_start(struct netlink_callback *cb)
3336 {
3337 	return __tipc_dump_start(cb, sock_net(cb->skb->sk));
3338 }
3339 EXPORT_SYMBOL(tipc_dump_start);
3340 
3341 int __tipc_dump_start(struct netlink_callback *cb, struct net *net)
3342 {
3343 	/* tipc_nl_name_table_dump() uses cb->args[0...3]. */
3344 	struct rhashtable_iter *iter = (void *)cb->args[4];
3345 	struct tipc_net *tn = tipc_net(net);
3346 
3347 	if (!iter) {
3348 		iter = kmalloc(sizeof(*iter), GFP_KERNEL);
3349 		if (!iter)
3350 			return -ENOMEM;
3351 
3352 		cb->args[4] = (long)iter;
3353 	}
3354 
3355 	rhashtable_walk_enter(&tn->sk_rht, iter);
3356 	return 0;
3357 }
3358 
3359 int tipc_dump_done(struct netlink_callback *cb)
3360 {
3361 	struct rhashtable_iter *hti = (void *)cb->args[4];
3362 
3363 	rhashtable_walk_exit(hti);
3364 	kfree(hti);
3365 	return 0;
3366 }
3367 EXPORT_SYMBOL(tipc_dump_done);
3368 
3369 int tipc_sk_fill_sock_diag(struct sk_buff *skb, struct netlink_callback *cb,
3370 			   struct tipc_sock *tsk, u32 sk_filter_state,
3371 			   u64 (*tipc_diag_gen_cookie)(struct sock *sk))
3372 {
3373 	struct sock *sk = &tsk->sk;
3374 	struct nlattr *attrs;
3375 	struct nlattr *stat;
3376 
3377 	/*filter response w.r.t sk_state*/
3378 	if (!(sk_filter_state & (1 << sk->sk_state)))
3379 		return 0;
3380 
3381 	attrs = nla_nest_start(skb, TIPC_NLA_SOCK);
3382 	if (!attrs)
3383 		goto msg_cancel;
3384 
3385 	if (__tipc_nl_add_sk_info(skb, tsk))
3386 		goto attr_msg_cancel;
3387 
3388 	if (nla_put_u32(skb, TIPC_NLA_SOCK_TYPE, (u32)sk->sk_type) ||
3389 	    nla_put_u32(skb, TIPC_NLA_SOCK_TIPC_STATE, (u32)sk->sk_state) ||
3390 	    nla_put_u32(skb, TIPC_NLA_SOCK_INO, sock_i_ino(sk)) ||
3391 	    nla_put_u32(skb, TIPC_NLA_SOCK_UID,
3392 			from_kuid_munged(sk_user_ns(NETLINK_CB(cb->skb).sk),
3393 					 sock_i_uid(sk))) ||
3394 	    nla_put_u64_64bit(skb, TIPC_NLA_SOCK_COOKIE,
3395 			      tipc_diag_gen_cookie(sk),
3396 			      TIPC_NLA_SOCK_PAD))
3397 		goto attr_msg_cancel;
3398 
3399 	stat = nla_nest_start(skb, TIPC_NLA_SOCK_STAT);
3400 	if (!stat)
3401 		goto attr_msg_cancel;
3402 
3403 	if (nla_put_u32(skb, TIPC_NLA_SOCK_STAT_RCVQ,
3404 			skb_queue_len(&sk->sk_receive_queue)) ||
3405 	    nla_put_u32(skb, TIPC_NLA_SOCK_STAT_SENDQ,
3406 			skb_queue_len(&sk->sk_write_queue)) ||
3407 	    nla_put_u32(skb, TIPC_NLA_SOCK_STAT_DROP,
3408 			atomic_read(&sk->sk_drops)))
3409 		goto stat_msg_cancel;
3410 
3411 	if (tsk->cong_link_cnt &&
3412 	    nla_put_flag(skb, TIPC_NLA_SOCK_STAT_LINK_CONG))
3413 		goto stat_msg_cancel;
3414 
3415 	if (tsk_conn_cong(tsk) &&
3416 	    nla_put_flag(skb, TIPC_NLA_SOCK_STAT_CONN_CONG))
3417 		goto stat_msg_cancel;
3418 
3419 	nla_nest_end(skb, stat);
3420 
3421 	if (tsk->group)
3422 		if (tipc_group_fill_sock_diag(tsk->group, skb))
3423 			goto stat_msg_cancel;
3424 
3425 	nla_nest_end(skb, attrs);
3426 
3427 	return 0;
3428 
3429 stat_msg_cancel:
3430 	nla_nest_cancel(skb, stat);
3431 attr_msg_cancel:
3432 	nla_nest_cancel(skb, attrs);
3433 msg_cancel:
3434 	return -EMSGSIZE;
3435 }
3436 EXPORT_SYMBOL(tipc_sk_fill_sock_diag);
3437 
3438 int tipc_nl_sk_dump(struct sk_buff *skb, struct netlink_callback *cb)
3439 {
3440 	return tipc_nl_sk_walk(skb, cb, __tipc_nl_add_sk);
3441 }
3442 
3443 /* Caller should hold socket lock for the passed tipc socket. */
3444 static int __tipc_nl_add_sk_publ(struct sk_buff *skb,
3445 				 struct netlink_callback *cb,
3446 				 struct publication *publ)
3447 {
3448 	void *hdr;
3449 	struct nlattr *attrs;
3450 
3451 	hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
3452 			  &tipc_genl_family, NLM_F_MULTI, TIPC_NL_PUBL_GET);
3453 	if (!hdr)
3454 		goto msg_cancel;
3455 
3456 	attrs = nla_nest_start(skb, TIPC_NLA_PUBL);
3457 	if (!attrs)
3458 		goto genlmsg_cancel;
3459 
3460 	if (nla_put_u32(skb, TIPC_NLA_PUBL_KEY, publ->key))
3461 		goto attr_msg_cancel;
3462 	if (nla_put_u32(skb, TIPC_NLA_PUBL_TYPE, publ->type))
3463 		goto attr_msg_cancel;
3464 	if (nla_put_u32(skb, TIPC_NLA_PUBL_LOWER, publ->lower))
3465 		goto attr_msg_cancel;
3466 	if (nla_put_u32(skb, TIPC_NLA_PUBL_UPPER, publ->upper))
3467 		goto attr_msg_cancel;
3468 
3469 	nla_nest_end(skb, attrs);
3470 	genlmsg_end(skb, hdr);
3471 
3472 	return 0;
3473 
3474 attr_msg_cancel:
3475 	nla_nest_cancel(skb, attrs);
3476 genlmsg_cancel:
3477 	genlmsg_cancel(skb, hdr);
3478 msg_cancel:
3479 	return -EMSGSIZE;
3480 }
3481 
3482 /* Caller should hold socket lock for the passed tipc socket. */
3483 static int __tipc_nl_list_sk_publ(struct sk_buff *skb,
3484 				  struct netlink_callback *cb,
3485 				  struct tipc_sock *tsk, u32 *last_publ)
3486 {
3487 	int err;
3488 	struct publication *p;
3489 
3490 	if (*last_publ) {
3491 		list_for_each_entry(p, &tsk->publications, binding_sock) {
3492 			if (p->key == *last_publ)
3493 				break;
3494 		}
3495 		if (p->key != *last_publ) {
3496 			/* We never set seq or call nl_dump_check_consistent()
3497 			 * this means that setting prev_seq here will cause the
3498 			 * consistence check to fail in the netlink callback
3499 			 * handler. Resulting in the last NLMSG_DONE message
3500 			 * having the NLM_F_DUMP_INTR flag set.
3501 			 */
3502 			cb->prev_seq = 1;
3503 			*last_publ = 0;
3504 			return -EPIPE;
3505 		}
3506 	} else {
3507 		p = list_first_entry(&tsk->publications, struct publication,
3508 				     binding_sock);
3509 	}
3510 
3511 	list_for_each_entry_from(p, &tsk->publications, binding_sock) {
3512 		err = __tipc_nl_add_sk_publ(skb, cb, p);
3513 		if (err) {
3514 			*last_publ = p->key;
3515 			return err;
3516 		}
3517 	}
3518 	*last_publ = 0;
3519 
3520 	return 0;
3521 }
3522 
3523 int tipc_nl_publ_dump(struct sk_buff *skb, struct netlink_callback *cb)
3524 {
3525 	int err;
3526 	u32 tsk_portid = cb->args[0];
3527 	u32 last_publ = cb->args[1];
3528 	u32 done = cb->args[2];
3529 	struct net *net = sock_net(skb->sk);
3530 	struct tipc_sock *tsk;
3531 
3532 	if (!tsk_portid) {
3533 		struct nlattr **attrs;
3534 		struct nlattr *sock[TIPC_NLA_SOCK_MAX + 1];
3535 
3536 		err = tipc_nlmsg_parse(cb->nlh, &attrs);
3537 		if (err)
3538 			return err;
3539 
3540 		if (!attrs[TIPC_NLA_SOCK])
3541 			return -EINVAL;
3542 
3543 		err = nla_parse_nested(sock, TIPC_NLA_SOCK_MAX,
3544 				       attrs[TIPC_NLA_SOCK],
3545 				       tipc_nl_sock_policy, NULL);
3546 		if (err)
3547 			return err;
3548 
3549 		if (!sock[TIPC_NLA_SOCK_REF])
3550 			return -EINVAL;
3551 
3552 		tsk_portid = nla_get_u32(sock[TIPC_NLA_SOCK_REF]);
3553 	}
3554 
3555 	if (done)
3556 		return 0;
3557 
3558 	tsk = tipc_sk_lookup(net, tsk_portid);
3559 	if (!tsk)
3560 		return -EINVAL;
3561 
3562 	lock_sock(&tsk->sk);
3563 	err = __tipc_nl_list_sk_publ(skb, cb, tsk, &last_publ);
3564 	if (!err)
3565 		done = 1;
3566 	release_sock(&tsk->sk);
3567 	sock_put(&tsk->sk);
3568 
3569 	cb->args[0] = tsk_portid;
3570 	cb->args[1] = last_publ;
3571 	cb->args[2] = done;
3572 
3573 	return skb->len;
3574 }
3575