1 /* 2 * net/tipc/socket.c: TIPC socket API 3 * 4 * Copyright (c) 2001-2007, 2012-2017, Ericsson AB 5 * Copyright (c) 2004-2008, 2010-2013, Wind River Systems 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the names of the copyright holders nor the names of its 17 * contributors may be used to endorse or promote products derived from 18 * this software without specific prior written permission. 19 * 20 * Alternatively, this software may be distributed under the terms of the 21 * GNU General Public License ("GPL") version 2 as published by the Free 22 * Software Foundation. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 34 * POSSIBILITY OF SUCH DAMAGE. 35 */ 36 37 #include <linux/rhashtable.h> 38 #include <linux/sched/signal.h> 39 40 #include "core.h" 41 #include "name_table.h" 42 #include "node.h" 43 #include "link.h" 44 #include "name_distr.h" 45 #include "socket.h" 46 #include "bcast.h" 47 #include "netlink.h" 48 #include "group.h" 49 50 #define CONN_TIMEOUT_DEFAULT 8000 /* default connect timeout = 8s */ 51 #define CONN_PROBING_INTV msecs_to_jiffies(3600000) /* [ms] => 1 h */ 52 #define TIPC_FWD_MSG 1 53 #define TIPC_MAX_PORT 0xffffffff 54 #define TIPC_MIN_PORT 1 55 #define TIPC_ACK_RATE 4 /* ACK at 1/4 of of rcv window size */ 56 57 enum { 58 TIPC_LISTEN = TCP_LISTEN, 59 TIPC_ESTABLISHED = TCP_ESTABLISHED, 60 TIPC_OPEN = TCP_CLOSE, 61 TIPC_DISCONNECTING = TCP_CLOSE_WAIT, 62 TIPC_CONNECTING = TCP_SYN_SENT, 63 }; 64 65 struct sockaddr_pair { 66 struct sockaddr_tipc sock; 67 struct sockaddr_tipc member; 68 }; 69 70 /** 71 * struct tipc_sock - TIPC socket structure 72 * @sk: socket - interacts with 'port' and with user via the socket API 73 * @conn_type: TIPC type used when connection was established 74 * @conn_instance: TIPC instance used when connection was established 75 * @published: non-zero if port has one or more associated names 76 * @max_pkt: maximum packet size "hint" used when building messages sent by port 77 * @portid: unique port identity in TIPC socket hash table 78 * @phdr: preformatted message header used when sending messages 79 * #cong_links: list of congested links 80 * @publications: list of publications for port 81 * @blocking_link: address of the congested link we are currently sleeping on 82 * @pub_count: total # of publications port has made during its lifetime 83 * @probing_state: 84 * @conn_timeout: the time we can wait for an unresponded setup request 85 * @dupl_rcvcnt: number of bytes counted twice, in both backlog and rcv queue 86 * @cong_link_cnt: number of congested links 87 * @snt_unacked: # messages sent by socket, and not yet acked by peer 88 * @rcv_unacked: # messages read by user, but not yet acked back to peer 89 * @peer: 'connected' peer for dgram/rdm 90 * @node: hash table node 91 * @mc_method: cookie for use between socket and broadcast layer 92 * @rcu: rcu struct for tipc_sock 93 */ 94 struct tipc_sock { 95 struct sock sk; 96 u32 conn_type; 97 u32 conn_instance; 98 int published; 99 u32 max_pkt; 100 u32 portid; 101 struct tipc_msg phdr; 102 struct list_head cong_links; 103 struct list_head publications; 104 u32 pub_count; 105 uint conn_timeout; 106 atomic_t dupl_rcvcnt; 107 bool probe_unacked; 108 u16 cong_link_cnt; 109 u16 snt_unacked; 110 u16 snd_win; 111 u16 peer_caps; 112 u16 rcv_unacked; 113 u16 rcv_win; 114 struct sockaddr_tipc peer; 115 struct rhash_head node; 116 struct tipc_mc_method mc_method; 117 struct rcu_head rcu; 118 struct tipc_group *group; 119 bool group_is_open; 120 }; 121 122 static int tipc_sk_backlog_rcv(struct sock *sk, struct sk_buff *skb); 123 static void tipc_data_ready(struct sock *sk); 124 static void tipc_write_space(struct sock *sk); 125 static void tipc_sock_destruct(struct sock *sk); 126 static int tipc_release(struct socket *sock); 127 static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags, 128 bool kern); 129 static void tipc_sk_timeout(struct timer_list *t); 130 static int tipc_sk_publish(struct tipc_sock *tsk, uint scope, 131 struct tipc_name_seq const *seq); 132 static int tipc_sk_withdraw(struct tipc_sock *tsk, uint scope, 133 struct tipc_name_seq const *seq); 134 static int tipc_sk_leave(struct tipc_sock *tsk); 135 static struct tipc_sock *tipc_sk_lookup(struct net *net, u32 portid); 136 static int tipc_sk_insert(struct tipc_sock *tsk); 137 static void tipc_sk_remove(struct tipc_sock *tsk); 138 static int __tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dsz); 139 static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dsz); 140 141 static const struct proto_ops packet_ops; 142 static const struct proto_ops stream_ops; 143 static const struct proto_ops msg_ops; 144 static struct proto tipc_proto; 145 static const struct rhashtable_params tsk_rht_params; 146 147 static u32 tsk_own_node(struct tipc_sock *tsk) 148 { 149 return msg_prevnode(&tsk->phdr); 150 } 151 152 static u32 tsk_peer_node(struct tipc_sock *tsk) 153 { 154 return msg_destnode(&tsk->phdr); 155 } 156 157 static u32 tsk_peer_port(struct tipc_sock *tsk) 158 { 159 return msg_destport(&tsk->phdr); 160 } 161 162 static bool tsk_unreliable(struct tipc_sock *tsk) 163 { 164 return msg_src_droppable(&tsk->phdr) != 0; 165 } 166 167 static void tsk_set_unreliable(struct tipc_sock *tsk, bool unreliable) 168 { 169 msg_set_src_droppable(&tsk->phdr, unreliable ? 1 : 0); 170 } 171 172 static bool tsk_unreturnable(struct tipc_sock *tsk) 173 { 174 return msg_dest_droppable(&tsk->phdr) != 0; 175 } 176 177 static void tsk_set_unreturnable(struct tipc_sock *tsk, bool unreturnable) 178 { 179 msg_set_dest_droppable(&tsk->phdr, unreturnable ? 1 : 0); 180 } 181 182 static int tsk_importance(struct tipc_sock *tsk) 183 { 184 return msg_importance(&tsk->phdr); 185 } 186 187 static int tsk_set_importance(struct tipc_sock *tsk, int imp) 188 { 189 if (imp > TIPC_CRITICAL_IMPORTANCE) 190 return -EINVAL; 191 msg_set_importance(&tsk->phdr, (u32)imp); 192 return 0; 193 } 194 195 static struct tipc_sock *tipc_sk(const struct sock *sk) 196 { 197 return container_of(sk, struct tipc_sock, sk); 198 } 199 200 static bool tsk_conn_cong(struct tipc_sock *tsk) 201 { 202 return tsk->snt_unacked > tsk->snd_win; 203 } 204 205 static u16 tsk_blocks(int len) 206 { 207 return ((len / FLOWCTL_BLK_SZ) + 1); 208 } 209 210 /* tsk_blocks(): translate a buffer size in bytes to number of 211 * advertisable blocks, taking into account the ratio truesize(len)/len 212 * We can trust that this ratio is always < 4 for len >= FLOWCTL_BLK_SZ 213 */ 214 static u16 tsk_adv_blocks(int len) 215 { 216 return len / FLOWCTL_BLK_SZ / 4; 217 } 218 219 /* tsk_inc(): increment counter for sent or received data 220 * - If block based flow control is not supported by peer we 221 * fall back to message based ditto, incrementing the counter 222 */ 223 static u16 tsk_inc(struct tipc_sock *tsk, int msglen) 224 { 225 if (likely(tsk->peer_caps & TIPC_BLOCK_FLOWCTL)) 226 return ((msglen / FLOWCTL_BLK_SZ) + 1); 227 return 1; 228 } 229 230 /** 231 * tsk_advance_rx_queue - discard first buffer in socket receive queue 232 * 233 * Caller must hold socket lock 234 */ 235 static void tsk_advance_rx_queue(struct sock *sk) 236 { 237 kfree_skb(__skb_dequeue(&sk->sk_receive_queue)); 238 } 239 240 /* tipc_sk_respond() : send response message back to sender 241 */ 242 static void tipc_sk_respond(struct sock *sk, struct sk_buff *skb, int err) 243 { 244 u32 selector; 245 u32 dnode; 246 u32 onode = tipc_own_addr(sock_net(sk)); 247 248 if (!tipc_msg_reverse(onode, &skb, err)) 249 return; 250 251 dnode = msg_destnode(buf_msg(skb)); 252 selector = msg_origport(buf_msg(skb)); 253 tipc_node_xmit_skb(sock_net(sk), skb, dnode, selector); 254 } 255 256 /** 257 * tsk_rej_rx_queue - reject all buffers in socket receive queue 258 * 259 * Caller must hold socket lock 260 */ 261 static void tsk_rej_rx_queue(struct sock *sk) 262 { 263 struct sk_buff *skb; 264 265 while ((skb = __skb_dequeue(&sk->sk_receive_queue))) 266 tipc_sk_respond(sk, skb, TIPC_ERR_NO_PORT); 267 } 268 269 static bool tipc_sk_connected(struct sock *sk) 270 { 271 return sk->sk_state == TIPC_ESTABLISHED; 272 } 273 274 /* tipc_sk_type_connectionless - check if the socket is datagram socket 275 * @sk: socket 276 * 277 * Returns true if connection less, false otherwise 278 */ 279 static bool tipc_sk_type_connectionless(struct sock *sk) 280 { 281 return sk->sk_type == SOCK_RDM || sk->sk_type == SOCK_DGRAM; 282 } 283 284 /* tsk_peer_msg - verify if message was sent by connected port's peer 285 * 286 * Handles cases where the node's network address has changed from 287 * the default of <0.0.0> to its configured setting. 288 */ 289 static bool tsk_peer_msg(struct tipc_sock *tsk, struct tipc_msg *msg) 290 { 291 struct sock *sk = &tsk->sk; 292 u32 self = tipc_own_addr(sock_net(sk)); 293 u32 peer_port = tsk_peer_port(tsk); 294 u32 orig_node, peer_node; 295 296 if (unlikely(!tipc_sk_connected(sk))) 297 return false; 298 299 if (unlikely(msg_origport(msg) != peer_port)) 300 return false; 301 302 orig_node = msg_orignode(msg); 303 peer_node = tsk_peer_node(tsk); 304 305 if (likely(orig_node == peer_node)) 306 return true; 307 308 if (!orig_node && peer_node == self) 309 return true; 310 311 if (!peer_node && orig_node == self) 312 return true; 313 314 return false; 315 } 316 317 /* tipc_set_sk_state - set the sk_state of the socket 318 * @sk: socket 319 * 320 * Caller must hold socket lock 321 * 322 * Returns 0 on success, errno otherwise 323 */ 324 static int tipc_set_sk_state(struct sock *sk, int state) 325 { 326 int oldsk_state = sk->sk_state; 327 int res = -EINVAL; 328 329 switch (state) { 330 case TIPC_OPEN: 331 res = 0; 332 break; 333 case TIPC_LISTEN: 334 case TIPC_CONNECTING: 335 if (oldsk_state == TIPC_OPEN) 336 res = 0; 337 break; 338 case TIPC_ESTABLISHED: 339 if (oldsk_state == TIPC_CONNECTING || 340 oldsk_state == TIPC_OPEN) 341 res = 0; 342 break; 343 case TIPC_DISCONNECTING: 344 if (oldsk_state == TIPC_CONNECTING || 345 oldsk_state == TIPC_ESTABLISHED) 346 res = 0; 347 break; 348 } 349 350 if (!res) 351 sk->sk_state = state; 352 353 return res; 354 } 355 356 static int tipc_sk_sock_err(struct socket *sock, long *timeout) 357 { 358 struct sock *sk = sock->sk; 359 int err = sock_error(sk); 360 int typ = sock->type; 361 362 if (err) 363 return err; 364 if (typ == SOCK_STREAM || typ == SOCK_SEQPACKET) { 365 if (sk->sk_state == TIPC_DISCONNECTING) 366 return -EPIPE; 367 else if (!tipc_sk_connected(sk)) 368 return -ENOTCONN; 369 } 370 if (!*timeout) 371 return -EAGAIN; 372 if (signal_pending(current)) 373 return sock_intr_errno(*timeout); 374 375 return 0; 376 } 377 378 #define tipc_wait_for_cond(sock_, timeo_, condition_) \ 379 ({ \ 380 struct sock *sk_; \ 381 int rc_; \ 382 \ 383 while ((rc_ = !(condition_))) { \ 384 DEFINE_WAIT_FUNC(wait_, woken_wake_function); \ 385 sk_ = (sock_)->sk; \ 386 rc_ = tipc_sk_sock_err((sock_), timeo_); \ 387 if (rc_) \ 388 break; \ 389 prepare_to_wait(sk_sleep(sk_), &wait_, TASK_INTERRUPTIBLE); \ 390 release_sock(sk_); \ 391 *(timeo_) = wait_woken(&wait_, TASK_INTERRUPTIBLE, *(timeo_)); \ 392 sched_annotate_sleep(); \ 393 lock_sock(sk_); \ 394 remove_wait_queue(sk_sleep(sk_), &wait_); \ 395 } \ 396 rc_; \ 397 }) 398 399 /** 400 * tipc_sk_create - create a TIPC socket 401 * @net: network namespace (must be default network) 402 * @sock: pre-allocated socket structure 403 * @protocol: protocol indicator (must be 0) 404 * @kern: caused by kernel or by userspace? 405 * 406 * This routine creates additional data structures used by the TIPC socket, 407 * initializes them, and links them together. 408 * 409 * Returns 0 on success, errno otherwise 410 */ 411 static int tipc_sk_create(struct net *net, struct socket *sock, 412 int protocol, int kern) 413 { 414 const struct proto_ops *ops; 415 struct sock *sk; 416 struct tipc_sock *tsk; 417 struct tipc_msg *msg; 418 419 /* Validate arguments */ 420 if (unlikely(protocol != 0)) 421 return -EPROTONOSUPPORT; 422 423 switch (sock->type) { 424 case SOCK_STREAM: 425 ops = &stream_ops; 426 break; 427 case SOCK_SEQPACKET: 428 ops = &packet_ops; 429 break; 430 case SOCK_DGRAM: 431 case SOCK_RDM: 432 ops = &msg_ops; 433 break; 434 default: 435 return -EPROTOTYPE; 436 } 437 438 /* Allocate socket's protocol area */ 439 sk = sk_alloc(net, AF_TIPC, GFP_KERNEL, &tipc_proto, kern); 440 if (sk == NULL) 441 return -ENOMEM; 442 443 tsk = tipc_sk(sk); 444 tsk->max_pkt = MAX_PKT_DEFAULT; 445 INIT_LIST_HEAD(&tsk->publications); 446 INIT_LIST_HEAD(&tsk->cong_links); 447 msg = &tsk->phdr; 448 449 /* Finish initializing socket data structures */ 450 sock->ops = ops; 451 sock_init_data(sock, sk); 452 tipc_set_sk_state(sk, TIPC_OPEN); 453 if (tipc_sk_insert(tsk)) { 454 pr_warn("Socket create failed; port number exhausted\n"); 455 return -EINVAL; 456 } 457 458 /* Ensure tsk is visible before we read own_addr. */ 459 smp_mb(); 460 461 tipc_msg_init(tipc_own_addr(net), msg, TIPC_LOW_IMPORTANCE, 462 TIPC_NAMED_MSG, NAMED_H_SIZE, 0); 463 464 msg_set_origport(msg, tsk->portid); 465 timer_setup(&sk->sk_timer, tipc_sk_timeout, 0); 466 sk->sk_shutdown = 0; 467 sk->sk_backlog_rcv = tipc_sk_backlog_rcv; 468 sk->sk_rcvbuf = sysctl_tipc_rmem[1]; 469 sk->sk_data_ready = tipc_data_ready; 470 sk->sk_write_space = tipc_write_space; 471 sk->sk_destruct = tipc_sock_destruct; 472 tsk->conn_timeout = CONN_TIMEOUT_DEFAULT; 473 tsk->group_is_open = true; 474 atomic_set(&tsk->dupl_rcvcnt, 0); 475 476 /* Start out with safe limits until we receive an advertised window */ 477 tsk->snd_win = tsk_adv_blocks(RCVBUF_MIN); 478 tsk->rcv_win = tsk->snd_win; 479 480 if (tipc_sk_type_connectionless(sk)) { 481 tsk_set_unreturnable(tsk, true); 482 if (sock->type == SOCK_DGRAM) 483 tsk_set_unreliable(tsk, true); 484 } 485 486 return 0; 487 } 488 489 static void tipc_sk_callback(struct rcu_head *head) 490 { 491 struct tipc_sock *tsk = container_of(head, struct tipc_sock, rcu); 492 493 sock_put(&tsk->sk); 494 } 495 496 /* Caller should hold socket lock for the socket. */ 497 static void __tipc_shutdown(struct socket *sock, int error) 498 { 499 struct sock *sk = sock->sk; 500 struct tipc_sock *tsk = tipc_sk(sk); 501 struct net *net = sock_net(sk); 502 long timeout = CONN_TIMEOUT_DEFAULT; 503 u32 dnode = tsk_peer_node(tsk); 504 struct sk_buff *skb; 505 506 /* Avoid that hi-prio shutdown msgs bypass msgs in link wakeup queue */ 507 tipc_wait_for_cond(sock, &timeout, (!tsk->cong_link_cnt && 508 !tsk_conn_cong(tsk))); 509 510 /* Reject all unreceived messages, except on an active connection 511 * (which disconnects locally & sends a 'FIN+' to peer). 512 */ 513 while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) { 514 if (TIPC_SKB_CB(skb)->bytes_read) { 515 kfree_skb(skb); 516 continue; 517 } 518 if (!tipc_sk_type_connectionless(sk) && 519 sk->sk_state != TIPC_DISCONNECTING) { 520 tipc_set_sk_state(sk, TIPC_DISCONNECTING); 521 tipc_node_remove_conn(net, dnode, tsk->portid); 522 } 523 tipc_sk_respond(sk, skb, error); 524 } 525 526 if (tipc_sk_type_connectionless(sk)) 527 return; 528 529 if (sk->sk_state != TIPC_DISCONNECTING) { 530 skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE, 531 TIPC_CONN_MSG, SHORT_H_SIZE, 0, dnode, 532 tsk_own_node(tsk), tsk_peer_port(tsk), 533 tsk->portid, error); 534 if (skb) 535 tipc_node_xmit_skb(net, skb, dnode, tsk->portid); 536 tipc_node_remove_conn(net, dnode, tsk->portid); 537 tipc_set_sk_state(sk, TIPC_DISCONNECTING); 538 } 539 } 540 541 /** 542 * tipc_release - destroy a TIPC socket 543 * @sock: socket to destroy 544 * 545 * This routine cleans up any messages that are still queued on the socket. 546 * For DGRAM and RDM socket types, all queued messages are rejected. 547 * For SEQPACKET and STREAM socket types, the first message is rejected 548 * and any others are discarded. (If the first message on a STREAM socket 549 * is partially-read, it is discarded and the next one is rejected instead.) 550 * 551 * NOTE: Rejected messages are not necessarily returned to the sender! They 552 * are returned or discarded according to the "destination droppable" setting 553 * specified for the message by the sender. 554 * 555 * Returns 0 on success, errno otherwise 556 */ 557 static int tipc_release(struct socket *sock) 558 { 559 struct sock *sk = sock->sk; 560 struct tipc_sock *tsk; 561 562 /* 563 * Exit if socket isn't fully initialized (occurs when a failed accept() 564 * releases a pre-allocated child socket that was never used) 565 */ 566 if (sk == NULL) 567 return 0; 568 569 tsk = tipc_sk(sk); 570 lock_sock(sk); 571 572 __tipc_shutdown(sock, TIPC_ERR_NO_PORT); 573 sk->sk_shutdown = SHUTDOWN_MASK; 574 tipc_sk_leave(tsk); 575 tipc_sk_withdraw(tsk, 0, NULL); 576 sk_stop_timer(sk, &sk->sk_timer); 577 tipc_sk_remove(tsk); 578 579 sock_orphan(sk); 580 /* Reject any messages that accumulated in backlog queue */ 581 release_sock(sk); 582 tipc_dest_list_purge(&tsk->cong_links); 583 tsk->cong_link_cnt = 0; 584 call_rcu(&tsk->rcu, tipc_sk_callback); 585 sock->sk = NULL; 586 587 return 0; 588 } 589 590 /** 591 * tipc_bind - associate or disassocate TIPC name(s) with a socket 592 * @sock: socket structure 593 * @uaddr: socket address describing name(s) and desired operation 594 * @uaddr_len: size of socket address data structure 595 * 596 * Name and name sequence binding is indicated using a positive scope value; 597 * a negative scope value unbinds the specified name. Specifying no name 598 * (i.e. a socket address length of 0) unbinds all names from the socket. 599 * 600 * Returns 0 on success, errno otherwise 601 * 602 * NOTE: This routine doesn't need to take the socket lock since it doesn't 603 * access any non-constant socket information. 604 */ 605 static int tipc_bind(struct socket *sock, struct sockaddr *uaddr, 606 int uaddr_len) 607 { 608 struct sock *sk = sock->sk; 609 struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr; 610 struct tipc_sock *tsk = tipc_sk(sk); 611 int res = -EINVAL; 612 613 lock_sock(sk); 614 if (unlikely(!uaddr_len)) { 615 res = tipc_sk_withdraw(tsk, 0, NULL); 616 goto exit; 617 } 618 if (tsk->group) { 619 res = -EACCES; 620 goto exit; 621 } 622 if (uaddr_len < sizeof(struct sockaddr_tipc)) { 623 res = -EINVAL; 624 goto exit; 625 } 626 if (addr->family != AF_TIPC) { 627 res = -EAFNOSUPPORT; 628 goto exit; 629 } 630 631 if (addr->addrtype == TIPC_ADDR_NAME) 632 addr->addr.nameseq.upper = addr->addr.nameseq.lower; 633 else if (addr->addrtype != TIPC_ADDR_NAMESEQ) { 634 res = -EAFNOSUPPORT; 635 goto exit; 636 } 637 638 if ((addr->addr.nameseq.type < TIPC_RESERVED_TYPES) && 639 (addr->addr.nameseq.type != TIPC_TOP_SRV) && 640 (addr->addr.nameseq.type != TIPC_CFG_SRV)) { 641 res = -EACCES; 642 goto exit; 643 } 644 645 res = (addr->scope >= 0) ? 646 tipc_sk_publish(tsk, addr->scope, &addr->addr.nameseq) : 647 tipc_sk_withdraw(tsk, -addr->scope, &addr->addr.nameseq); 648 exit: 649 release_sock(sk); 650 return res; 651 } 652 653 /** 654 * tipc_getname - get port ID of socket or peer socket 655 * @sock: socket structure 656 * @uaddr: area for returned socket address 657 * @uaddr_len: area for returned length of socket address 658 * @peer: 0 = own ID, 1 = current peer ID, 2 = current/former peer ID 659 * 660 * Returns 0 on success, errno otherwise 661 * 662 * NOTE: This routine doesn't need to take the socket lock since it only 663 * accesses socket information that is unchanging (or which changes in 664 * a completely predictable manner). 665 */ 666 static int tipc_getname(struct socket *sock, struct sockaddr *uaddr, 667 int peer) 668 { 669 struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr; 670 struct sock *sk = sock->sk; 671 struct tipc_sock *tsk = tipc_sk(sk); 672 673 memset(addr, 0, sizeof(*addr)); 674 if (peer) { 675 if ((!tipc_sk_connected(sk)) && 676 ((peer != 2) || (sk->sk_state != TIPC_DISCONNECTING))) 677 return -ENOTCONN; 678 addr->addr.id.ref = tsk_peer_port(tsk); 679 addr->addr.id.node = tsk_peer_node(tsk); 680 } else { 681 addr->addr.id.ref = tsk->portid; 682 addr->addr.id.node = tipc_own_addr(sock_net(sk)); 683 } 684 685 addr->addrtype = TIPC_ADDR_ID; 686 addr->family = AF_TIPC; 687 addr->scope = 0; 688 addr->addr.name.domain = 0; 689 690 return sizeof(*addr); 691 } 692 693 /** 694 * tipc_poll - read and possibly block on pollmask 695 * @file: file structure associated with the socket 696 * @sock: socket for which to calculate the poll bits 697 * @wait: ??? 698 * 699 * Returns pollmask value 700 * 701 * COMMENTARY: 702 * It appears that the usual socket locking mechanisms are not useful here 703 * since the pollmask info is potentially out-of-date the moment this routine 704 * exits. TCP and other protocols seem to rely on higher level poll routines 705 * to handle any preventable race conditions, so TIPC will do the same ... 706 * 707 * IMPORTANT: The fact that a read or write operation is indicated does NOT 708 * imply that the operation will succeed, merely that it should be performed 709 * and will not block. 710 */ 711 static __poll_t tipc_poll(struct file *file, struct socket *sock, 712 poll_table *wait) 713 { 714 struct sock *sk = sock->sk; 715 struct tipc_sock *tsk = tipc_sk(sk); 716 __poll_t revents = 0; 717 718 sock_poll_wait(file, wait); 719 720 if (sk->sk_shutdown & RCV_SHUTDOWN) 721 revents |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM; 722 if (sk->sk_shutdown == SHUTDOWN_MASK) 723 revents |= EPOLLHUP; 724 725 switch (sk->sk_state) { 726 case TIPC_ESTABLISHED: 727 case TIPC_CONNECTING: 728 if (!tsk->cong_link_cnt && !tsk_conn_cong(tsk)) 729 revents |= EPOLLOUT; 730 /* fall thru' */ 731 case TIPC_LISTEN: 732 if (!skb_queue_empty(&sk->sk_receive_queue)) 733 revents |= EPOLLIN | EPOLLRDNORM; 734 break; 735 case TIPC_OPEN: 736 if (tsk->group_is_open && !tsk->cong_link_cnt) 737 revents |= EPOLLOUT; 738 if (!tipc_sk_type_connectionless(sk)) 739 break; 740 if (skb_queue_empty(&sk->sk_receive_queue)) 741 break; 742 revents |= EPOLLIN | EPOLLRDNORM; 743 break; 744 case TIPC_DISCONNECTING: 745 revents = EPOLLIN | EPOLLRDNORM | EPOLLHUP; 746 break; 747 } 748 return revents; 749 } 750 751 /** 752 * tipc_sendmcast - send multicast message 753 * @sock: socket structure 754 * @seq: destination address 755 * @msg: message to send 756 * @dlen: length of data to send 757 * @timeout: timeout to wait for wakeup 758 * 759 * Called from function tipc_sendmsg(), which has done all sanity checks 760 * Returns the number of bytes sent on success, or errno 761 */ 762 static int tipc_sendmcast(struct socket *sock, struct tipc_name_seq *seq, 763 struct msghdr *msg, size_t dlen, long timeout) 764 { 765 struct sock *sk = sock->sk; 766 struct tipc_sock *tsk = tipc_sk(sk); 767 struct tipc_msg *hdr = &tsk->phdr; 768 struct net *net = sock_net(sk); 769 int mtu = tipc_bcast_get_mtu(net); 770 struct tipc_mc_method *method = &tsk->mc_method; 771 struct sk_buff_head pkts; 772 struct tipc_nlist dsts; 773 int rc; 774 775 if (tsk->group) 776 return -EACCES; 777 778 /* Block or return if any destination link is congested */ 779 rc = tipc_wait_for_cond(sock, &timeout, !tsk->cong_link_cnt); 780 if (unlikely(rc)) 781 return rc; 782 783 /* Lookup destination nodes */ 784 tipc_nlist_init(&dsts, tipc_own_addr(net)); 785 tipc_nametbl_lookup_dst_nodes(net, seq->type, seq->lower, 786 seq->upper, &dsts); 787 if (!dsts.local && !dsts.remote) 788 return -EHOSTUNREACH; 789 790 /* Build message header */ 791 msg_set_type(hdr, TIPC_MCAST_MSG); 792 msg_set_hdr_sz(hdr, MCAST_H_SIZE); 793 msg_set_lookup_scope(hdr, TIPC_CLUSTER_SCOPE); 794 msg_set_destport(hdr, 0); 795 msg_set_destnode(hdr, 0); 796 msg_set_nametype(hdr, seq->type); 797 msg_set_namelower(hdr, seq->lower); 798 msg_set_nameupper(hdr, seq->upper); 799 800 /* Build message as chain of buffers */ 801 skb_queue_head_init(&pkts); 802 rc = tipc_msg_build(hdr, msg, 0, dlen, mtu, &pkts); 803 804 /* Send message if build was successful */ 805 if (unlikely(rc == dlen)) 806 rc = tipc_mcast_xmit(net, &pkts, method, &dsts, 807 &tsk->cong_link_cnt); 808 809 tipc_nlist_purge(&dsts); 810 811 return rc ? rc : dlen; 812 } 813 814 /** 815 * tipc_send_group_msg - send a message to a member in the group 816 * @net: network namespace 817 * @m: message to send 818 * @mb: group member 819 * @dnode: destination node 820 * @dport: destination port 821 * @dlen: total length of message data 822 */ 823 static int tipc_send_group_msg(struct net *net, struct tipc_sock *tsk, 824 struct msghdr *m, struct tipc_member *mb, 825 u32 dnode, u32 dport, int dlen) 826 { 827 u16 bc_snd_nxt = tipc_group_bc_snd_nxt(tsk->group); 828 struct tipc_mc_method *method = &tsk->mc_method; 829 int blks = tsk_blocks(GROUP_H_SIZE + dlen); 830 struct tipc_msg *hdr = &tsk->phdr; 831 struct sk_buff_head pkts; 832 int mtu, rc; 833 834 /* Complete message header */ 835 msg_set_type(hdr, TIPC_GRP_UCAST_MSG); 836 msg_set_hdr_sz(hdr, GROUP_H_SIZE); 837 msg_set_destport(hdr, dport); 838 msg_set_destnode(hdr, dnode); 839 msg_set_grp_bc_seqno(hdr, bc_snd_nxt); 840 841 /* Build message as chain of buffers */ 842 skb_queue_head_init(&pkts); 843 mtu = tipc_node_get_mtu(net, dnode, tsk->portid); 844 rc = tipc_msg_build(hdr, m, 0, dlen, mtu, &pkts); 845 if (unlikely(rc != dlen)) 846 return rc; 847 848 /* Send message */ 849 rc = tipc_node_xmit(net, &pkts, dnode, tsk->portid); 850 if (unlikely(rc == -ELINKCONG)) { 851 tipc_dest_push(&tsk->cong_links, dnode, 0); 852 tsk->cong_link_cnt++; 853 } 854 855 /* Update send window */ 856 tipc_group_update_member(mb, blks); 857 858 /* A broadcast sent within next EXPIRE period must follow same path */ 859 method->rcast = true; 860 method->mandatory = true; 861 return dlen; 862 } 863 864 /** 865 * tipc_send_group_unicast - send message to a member in the group 866 * @sock: socket structure 867 * @m: message to send 868 * @dlen: total length of message data 869 * @timeout: timeout to wait for wakeup 870 * 871 * Called from function tipc_sendmsg(), which has done all sanity checks 872 * Returns the number of bytes sent on success, or errno 873 */ 874 static int tipc_send_group_unicast(struct socket *sock, struct msghdr *m, 875 int dlen, long timeout) 876 { 877 struct sock *sk = sock->sk; 878 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name); 879 int blks = tsk_blocks(GROUP_H_SIZE + dlen); 880 struct tipc_sock *tsk = tipc_sk(sk); 881 struct tipc_group *grp = tsk->group; 882 struct net *net = sock_net(sk); 883 struct tipc_member *mb = NULL; 884 u32 node, port; 885 int rc; 886 887 node = dest->addr.id.node; 888 port = dest->addr.id.ref; 889 if (!port && !node) 890 return -EHOSTUNREACH; 891 892 /* Block or return if destination link or member is congested */ 893 rc = tipc_wait_for_cond(sock, &timeout, 894 !tipc_dest_find(&tsk->cong_links, node, 0) && 895 !tipc_group_cong(grp, node, port, blks, &mb)); 896 if (unlikely(rc)) 897 return rc; 898 899 if (unlikely(!mb)) 900 return -EHOSTUNREACH; 901 902 rc = tipc_send_group_msg(net, tsk, m, mb, node, port, dlen); 903 904 return rc ? rc : dlen; 905 } 906 907 /** 908 * tipc_send_group_anycast - send message to any member with given identity 909 * @sock: socket structure 910 * @m: message to send 911 * @dlen: total length of message data 912 * @timeout: timeout to wait for wakeup 913 * 914 * Called from function tipc_sendmsg(), which has done all sanity checks 915 * Returns the number of bytes sent on success, or errno 916 */ 917 static int tipc_send_group_anycast(struct socket *sock, struct msghdr *m, 918 int dlen, long timeout) 919 { 920 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name); 921 struct sock *sk = sock->sk; 922 struct tipc_sock *tsk = tipc_sk(sk); 923 struct list_head *cong_links = &tsk->cong_links; 924 int blks = tsk_blocks(GROUP_H_SIZE + dlen); 925 struct tipc_group *grp = tsk->group; 926 struct tipc_msg *hdr = &tsk->phdr; 927 struct tipc_member *first = NULL; 928 struct tipc_member *mbr = NULL; 929 struct net *net = sock_net(sk); 930 u32 node, port, exclude; 931 struct list_head dsts; 932 u32 type, inst, scope; 933 int lookups = 0; 934 int dstcnt, rc; 935 bool cong; 936 937 INIT_LIST_HEAD(&dsts); 938 939 type = msg_nametype(hdr); 940 inst = dest->addr.name.name.instance; 941 scope = msg_lookup_scope(hdr); 942 exclude = tipc_group_exclude(grp); 943 944 while (++lookups < 4) { 945 first = NULL; 946 947 /* Look for a non-congested destination member, if any */ 948 while (1) { 949 if (!tipc_nametbl_lookup(net, type, inst, scope, &dsts, 950 &dstcnt, exclude, false)) 951 return -EHOSTUNREACH; 952 tipc_dest_pop(&dsts, &node, &port); 953 cong = tipc_group_cong(grp, node, port, blks, &mbr); 954 if (!cong) 955 break; 956 if (mbr == first) 957 break; 958 if (!first) 959 first = mbr; 960 } 961 962 /* Start over if destination was not in member list */ 963 if (unlikely(!mbr)) 964 continue; 965 966 if (likely(!cong && !tipc_dest_find(cong_links, node, 0))) 967 break; 968 969 /* Block or return if destination link or member is congested */ 970 rc = tipc_wait_for_cond(sock, &timeout, 971 !tipc_dest_find(cong_links, node, 0) && 972 !tipc_group_cong(grp, node, port, 973 blks, &mbr)); 974 if (unlikely(rc)) 975 return rc; 976 977 /* Send, unless destination disappeared while waiting */ 978 if (likely(mbr)) 979 break; 980 } 981 982 if (unlikely(lookups >= 4)) 983 return -EHOSTUNREACH; 984 985 rc = tipc_send_group_msg(net, tsk, m, mbr, node, port, dlen); 986 987 return rc ? rc : dlen; 988 } 989 990 /** 991 * tipc_send_group_bcast - send message to all members in communication group 992 * @sk: socket structure 993 * @m: message to send 994 * @dlen: total length of message data 995 * @timeout: timeout to wait for wakeup 996 * 997 * Called from function tipc_sendmsg(), which has done all sanity checks 998 * Returns the number of bytes sent on success, or errno 999 */ 1000 static int tipc_send_group_bcast(struct socket *sock, struct msghdr *m, 1001 int dlen, long timeout) 1002 { 1003 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name); 1004 struct sock *sk = sock->sk; 1005 struct net *net = sock_net(sk); 1006 struct tipc_sock *tsk = tipc_sk(sk); 1007 struct tipc_group *grp = tsk->group; 1008 struct tipc_nlist *dsts = tipc_group_dests(grp); 1009 struct tipc_mc_method *method = &tsk->mc_method; 1010 bool ack = method->mandatory && method->rcast; 1011 int blks = tsk_blocks(MCAST_H_SIZE + dlen); 1012 struct tipc_msg *hdr = &tsk->phdr; 1013 int mtu = tipc_bcast_get_mtu(net); 1014 struct sk_buff_head pkts; 1015 int rc = -EHOSTUNREACH; 1016 1017 if (!dsts->local && !dsts->remote) 1018 return -EHOSTUNREACH; 1019 1020 /* Block or return if any destination link or member is congested */ 1021 rc = tipc_wait_for_cond(sock, &timeout, !tsk->cong_link_cnt && 1022 !tipc_group_bc_cong(grp, blks)); 1023 if (unlikely(rc)) 1024 return rc; 1025 1026 /* Complete message header */ 1027 if (dest) { 1028 msg_set_type(hdr, TIPC_GRP_MCAST_MSG); 1029 msg_set_nameinst(hdr, dest->addr.name.name.instance); 1030 } else { 1031 msg_set_type(hdr, TIPC_GRP_BCAST_MSG); 1032 msg_set_nameinst(hdr, 0); 1033 } 1034 msg_set_hdr_sz(hdr, GROUP_H_SIZE); 1035 msg_set_destport(hdr, 0); 1036 msg_set_destnode(hdr, 0); 1037 msg_set_grp_bc_seqno(hdr, tipc_group_bc_snd_nxt(grp)); 1038 1039 /* Avoid getting stuck with repeated forced replicasts */ 1040 msg_set_grp_bc_ack_req(hdr, ack); 1041 1042 /* Build message as chain of buffers */ 1043 skb_queue_head_init(&pkts); 1044 rc = tipc_msg_build(hdr, m, 0, dlen, mtu, &pkts); 1045 if (unlikely(rc != dlen)) 1046 return rc; 1047 1048 /* Send message */ 1049 rc = tipc_mcast_xmit(net, &pkts, method, dsts, &tsk->cong_link_cnt); 1050 if (unlikely(rc)) 1051 return rc; 1052 1053 /* Update broadcast sequence number and send windows */ 1054 tipc_group_update_bc_members(tsk->group, blks, ack); 1055 1056 /* Broadcast link is now free to choose method for next broadcast */ 1057 method->mandatory = false; 1058 method->expires = jiffies; 1059 1060 return dlen; 1061 } 1062 1063 /** 1064 * tipc_send_group_mcast - send message to all members with given identity 1065 * @sock: socket structure 1066 * @m: message to send 1067 * @dlen: total length of message data 1068 * @timeout: timeout to wait for wakeup 1069 * 1070 * Called from function tipc_sendmsg(), which has done all sanity checks 1071 * Returns the number of bytes sent on success, or errno 1072 */ 1073 static int tipc_send_group_mcast(struct socket *sock, struct msghdr *m, 1074 int dlen, long timeout) 1075 { 1076 struct sock *sk = sock->sk; 1077 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name); 1078 struct tipc_sock *tsk = tipc_sk(sk); 1079 struct tipc_group *grp = tsk->group; 1080 struct tipc_msg *hdr = &tsk->phdr; 1081 struct net *net = sock_net(sk); 1082 u32 type, inst, scope, exclude; 1083 struct list_head dsts; 1084 u32 dstcnt; 1085 1086 INIT_LIST_HEAD(&dsts); 1087 1088 type = msg_nametype(hdr); 1089 inst = dest->addr.name.name.instance; 1090 scope = msg_lookup_scope(hdr); 1091 exclude = tipc_group_exclude(grp); 1092 1093 if (!tipc_nametbl_lookup(net, type, inst, scope, &dsts, 1094 &dstcnt, exclude, true)) 1095 return -EHOSTUNREACH; 1096 1097 if (dstcnt == 1) { 1098 tipc_dest_pop(&dsts, &dest->addr.id.node, &dest->addr.id.ref); 1099 return tipc_send_group_unicast(sock, m, dlen, timeout); 1100 } 1101 1102 tipc_dest_list_purge(&dsts); 1103 return tipc_send_group_bcast(sock, m, dlen, timeout); 1104 } 1105 1106 /** 1107 * tipc_sk_mcast_rcv - Deliver multicast messages to all destination sockets 1108 * @arrvq: queue with arriving messages, to be cloned after destination lookup 1109 * @inputq: queue with cloned messages, delivered to socket after dest lookup 1110 * 1111 * Multi-threaded: parallel calls with reference to same queues may occur 1112 */ 1113 void tipc_sk_mcast_rcv(struct net *net, struct sk_buff_head *arrvq, 1114 struct sk_buff_head *inputq) 1115 { 1116 u32 self = tipc_own_addr(net); 1117 u32 type, lower, upper, scope; 1118 struct sk_buff *skb, *_skb; 1119 u32 portid, onode; 1120 struct sk_buff_head tmpq; 1121 struct list_head dports; 1122 struct tipc_msg *hdr; 1123 int user, mtyp, hlen; 1124 bool exact; 1125 1126 __skb_queue_head_init(&tmpq); 1127 INIT_LIST_HEAD(&dports); 1128 1129 skb = tipc_skb_peek(arrvq, &inputq->lock); 1130 for (; skb; skb = tipc_skb_peek(arrvq, &inputq->lock)) { 1131 hdr = buf_msg(skb); 1132 user = msg_user(hdr); 1133 mtyp = msg_type(hdr); 1134 hlen = skb_headroom(skb) + msg_hdr_sz(hdr); 1135 onode = msg_orignode(hdr); 1136 type = msg_nametype(hdr); 1137 1138 if (mtyp == TIPC_GRP_UCAST_MSG || user == GROUP_PROTOCOL) { 1139 spin_lock_bh(&inputq->lock); 1140 if (skb_peek(arrvq) == skb) { 1141 __skb_dequeue(arrvq); 1142 __skb_queue_tail(inputq, skb); 1143 } 1144 kfree_skb(skb); 1145 spin_unlock_bh(&inputq->lock); 1146 continue; 1147 } 1148 1149 /* Group messages require exact scope match */ 1150 if (msg_in_group(hdr)) { 1151 lower = 0; 1152 upper = ~0; 1153 scope = msg_lookup_scope(hdr); 1154 exact = true; 1155 } else { 1156 /* TIPC_NODE_SCOPE means "any scope" in this context */ 1157 if (onode == self) 1158 scope = TIPC_NODE_SCOPE; 1159 else 1160 scope = TIPC_CLUSTER_SCOPE; 1161 exact = false; 1162 lower = msg_namelower(hdr); 1163 upper = msg_nameupper(hdr); 1164 } 1165 1166 /* Create destination port list: */ 1167 tipc_nametbl_mc_lookup(net, type, lower, upper, 1168 scope, exact, &dports); 1169 1170 /* Clone message per destination */ 1171 while (tipc_dest_pop(&dports, NULL, &portid)) { 1172 _skb = __pskb_copy(skb, hlen, GFP_ATOMIC); 1173 if (_skb) { 1174 msg_set_destport(buf_msg(_skb), portid); 1175 __skb_queue_tail(&tmpq, _skb); 1176 continue; 1177 } 1178 pr_warn("Failed to clone mcast rcv buffer\n"); 1179 } 1180 /* Append to inputq if not already done by other thread */ 1181 spin_lock_bh(&inputq->lock); 1182 if (skb_peek(arrvq) == skb) { 1183 skb_queue_splice_tail_init(&tmpq, inputq); 1184 kfree_skb(__skb_dequeue(arrvq)); 1185 } 1186 spin_unlock_bh(&inputq->lock); 1187 __skb_queue_purge(&tmpq); 1188 kfree_skb(skb); 1189 } 1190 tipc_sk_rcv(net, inputq); 1191 } 1192 1193 /** 1194 * tipc_sk_conn_proto_rcv - receive a connection mng protocol message 1195 * @tsk: receiving socket 1196 * @skb: pointer to message buffer. 1197 */ 1198 static void tipc_sk_conn_proto_rcv(struct tipc_sock *tsk, struct sk_buff *skb, 1199 struct sk_buff_head *xmitq) 1200 { 1201 struct tipc_msg *hdr = buf_msg(skb); 1202 u32 onode = tsk_own_node(tsk); 1203 struct sock *sk = &tsk->sk; 1204 int mtyp = msg_type(hdr); 1205 bool conn_cong; 1206 1207 /* Ignore if connection cannot be validated: */ 1208 if (!tsk_peer_msg(tsk, hdr)) 1209 goto exit; 1210 1211 if (unlikely(msg_errcode(hdr))) { 1212 tipc_set_sk_state(sk, TIPC_DISCONNECTING); 1213 tipc_node_remove_conn(sock_net(sk), tsk_peer_node(tsk), 1214 tsk_peer_port(tsk)); 1215 sk->sk_state_change(sk); 1216 goto exit; 1217 } 1218 1219 tsk->probe_unacked = false; 1220 1221 if (mtyp == CONN_PROBE) { 1222 msg_set_type(hdr, CONN_PROBE_REPLY); 1223 if (tipc_msg_reverse(onode, &skb, TIPC_OK)) 1224 __skb_queue_tail(xmitq, skb); 1225 return; 1226 } else if (mtyp == CONN_ACK) { 1227 conn_cong = tsk_conn_cong(tsk); 1228 tsk->snt_unacked -= msg_conn_ack(hdr); 1229 if (tsk->peer_caps & TIPC_BLOCK_FLOWCTL) 1230 tsk->snd_win = msg_adv_win(hdr); 1231 if (conn_cong) 1232 sk->sk_write_space(sk); 1233 } else if (mtyp != CONN_PROBE_REPLY) { 1234 pr_warn("Received unknown CONN_PROTO msg\n"); 1235 } 1236 exit: 1237 kfree_skb(skb); 1238 } 1239 1240 /** 1241 * tipc_sendmsg - send message in connectionless manner 1242 * @sock: socket structure 1243 * @m: message to send 1244 * @dsz: amount of user data to be sent 1245 * 1246 * Message must have an destination specified explicitly. 1247 * Used for SOCK_RDM and SOCK_DGRAM messages, 1248 * and for 'SYN' messages on SOCK_SEQPACKET and SOCK_STREAM connections. 1249 * (Note: 'SYN+' is prohibited on SOCK_STREAM.) 1250 * 1251 * Returns the number of bytes sent on success, or errno otherwise 1252 */ 1253 static int tipc_sendmsg(struct socket *sock, 1254 struct msghdr *m, size_t dsz) 1255 { 1256 struct sock *sk = sock->sk; 1257 int ret; 1258 1259 lock_sock(sk); 1260 ret = __tipc_sendmsg(sock, m, dsz); 1261 release_sock(sk); 1262 1263 return ret; 1264 } 1265 1266 static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dlen) 1267 { 1268 struct sock *sk = sock->sk; 1269 struct net *net = sock_net(sk); 1270 struct tipc_sock *tsk = tipc_sk(sk); 1271 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name); 1272 long timeout = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT); 1273 struct list_head *clinks = &tsk->cong_links; 1274 bool syn = !tipc_sk_type_connectionless(sk); 1275 struct tipc_group *grp = tsk->group; 1276 struct tipc_msg *hdr = &tsk->phdr; 1277 struct tipc_name_seq *seq; 1278 struct sk_buff_head pkts; 1279 u32 dport, dnode = 0; 1280 u32 type, inst; 1281 int mtu, rc; 1282 1283 if (unlikely(dlen > TIPC_MAX_USER_MSG_SIZE)) 1284 return -EMSGSIZE; 1285 1286 if (likely(dest)) { 1287 if (unlikely(m->msg_namelen < sizeof(*dest))) 1288 return -EINVAL; 1289 if (unlikely(dest->family != AF_TIPC)) 1290 return -EINVAL; 1291 } 1292 1293 if (grp) { 1294 if (!dest) 1295 return tipc_send_group_bcast(sock, m, dlen, timeout); 1296 if (dest->addrtype == TIPC_ADDR_NAME) 1297 return tipc_send_group_anycast(sock, m, dlen, timeout); 1298 if (dest->addrtype == TIPC_ADDR_ID) 1299 return tipc_send_group_unicast(sock, m, dlen, timeout); 1300 if (dest->addrtype == TIPC_ADDR_MCAST) 1301 return tipc_send_group_mcast(sock, m, dlen, timeout); 1302 return -EINVAL; 1303 } 1304 1305 if (unlikely(!dest)) { 1306 dest = &tsk->peer; 1307 if (!syn || dest->family != AF_TIPC) 1308 return -EDESTADDRREQ; 1309 } 1310 1311 if (unlikely(syn)) { 1312 if (sk->sk_state == TIPC_LISTEN) 1313 return -EPIPE; 1314 if (sk->sk_state != TIPC_OPEN) 1315 return -EISCONN; 1316 if (tsk->published) 1317 return -EOPNOTSUPP; 1318 if (dest->addrtype == TIPC_ADDR_NAME) { 1319 tsk->conn_type = dest->addr.name.name.type; 1320 tsk->conn_instance = dest->addr.name.name.instance; 1321 } 1322 } 1323 1324 seq = &dest->addr.nameseq; 1325 if (dest->addrtype == TIPC_ADDR_MCAST) 1326 return tipc_sendmcast(sock, seq, m, dlen, timeout); 1327 1328 if (dest->addrtype == TIPC_ADDR_NAME) { 1329 type = dest->addr.name.name.type; 1330 inst = dest->addr.name.name.instance; 1331 dnode = dest->addr.name.domain; 1332 msg_set_type(hdr, TIPC_NAMED_MSG); 1333 msg_set_hdr_sz(hdr, NAMED_H_SIZE); 1334 msg_set_nametype(hdr, type); 1335 msg_set_nameinst(hdr, inst); 1336 msg_set_lookup_scope(hdr, tipc_node2scope(dnode)); 1337 dport = tipc_nametbl_translate(net, type, inst, &dnode); 1338 msg_set_destnode(hdr, dnode); 1339 msg_set_destport(hdr, dport); 1340 if (unlikely(!dport && !dnode)) 1341 return -EHOSTUNREACH; 1342 } else if (dest->addrtype == TIPC_ADDR_ID) { 1343 dnode = dest->addr.id.node; 1344 msg_set_type(hdr, TIPC_DIRECT_MSG); 1345 msg_set_lookup_scope(hdr, 0); 1346 msg_set_destnode(hdr, dnode); 1347 msg_set_destport(hdr, dest->addr.id.ref); 1348 msg_set_hdr_sz(hdr, BASIC_H_SIZE); 1349 } else { 1350 return -EINVAL; 1351 } 1352 1353 /* Block or return if destination link is congested */ 1354 rc = tipc_wait_for_cond(sock, &timeout, 1355 !tipc_dest_find(clinks, dnode, 0)); 1356 if (unlikely(rc)) 1357 return rc; 1358 1359 skb_queue_head_init(&pkts); 1360 mtu = tipc_node_get_mtu(net, dnode, tsk->portid); 1361 rc = tipc_msg_build(hdr, m, 0, dlen, mtu, &pkts); 1362 if (unlikely(rc != dlen)) 1363 return rc; 1364 1365 rc = tipc_node_xmit(net, &pkts, dnode, tsk->portid); 1366 if (unlikely(rc == -ELINKCONG)) { 1367 tipc_dest_push(clinks, dnode, 0); 1368 tsk->cong_link_cnt++; 1369 rc = 0; 1370 } 1371 1372 if (unlikely(syn && !rc)) 1373 tipc_set_sk_state(sk, TIPC_CONNECTING); 1374 1375 return rc ? rc : dlen; 1376 } 1377 1378 /** 1379 * tipc_sendstream - send stream-oriented data 1380 * @sock: socket structure 1381 * @m: data to send 1382 * @dsz: total length of data to be transmitted 1383 * 1384 * Used for SOCK_STREAM data. 1385 * 1386 * Returns the number of bytes sent on success (or partial success), 1387 * or errno if no data sent 1388 */ 1389 static int tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dsz) 1390 { 1391 struct sock *sk = sock->sk; 1392 int ret; 1393 1394 lock_sock(sk); 1395 ret = __tipc_sendstream(sock, m, dsz); 1396 release_sock(sk); 1397 1398 return ret; 1399 } 1400 1401 static int __tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dlen) 1402 { 1403 struct sock *sk = sock->sk; 1404 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name); 1405 long timeout = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT); 1406 struct tipc_sock *tsk = tipc_sk(sk); 1407 struct tipc_msg *hdr = &tsk->phdr; 1408 struct net *net = sock_net(sk); 1409 struct sk_buff_head pkts; 1410 u32 dnode = tsk_peer_node(tsk); 1411 int send, sent = 0; 1412 int rc = 0; 1413 1414 skb_queue_head_init(&pkts); 1415 1416 if (unlikely(dlen > INT_MAX)) 1417 return -EMSGSIZE; 1418 1419 /* Handle implicit connection setup */ 1420 if (unlikely(dest)) { 1421 rc = __tipc_sendmsg(sock, m, dlen); 1422 if (dlen && (dlen == rc)) 1423 tsk->snt_unacked = tsk_inc(tsk, dlen + msg_hdr_sz(hdr)); 1424 return rc; 1425 } 1426 1427 do { 1428 rc = tipc_wait_for_cond(sock, &timeout, 1429 (!tsk->cong_link_cnt && 1430 !tsk_conn_cong(tsk) && 1431 tipc_sk_connected(sk))); 1432 if (unlikely(rc)) 1433 break; 1434 1435 send = min_t(size_t, dlen - sent, TIPC_MAX_USER_MSG_SIZE); 1436 rc = tipc_msg_build(hdr, m, sent, send, tsk->max_pkt, &pkts); 1437 if (unlikely(rc != send)) 1438 break; 1439 1440 rc = tipc_node_xmit(net, &pkts, dnode, tsk->portid); 1441 if (unlikely(rc == -ELINKCONG)) { 1442 tsk->cong_link_cnt = 1; 1443 rc = 0; 1444 } 1445 if (likely(!rc)) { 1446 tsk->snt_unacked += tsk_inc(tsk, send + MIN_H_SIZE); 1447 sent += send; 1448 } 1449 } while (sent < dlen && !rc); 1450 1451 return sent ? sent : rc; 1452 } 1453 1454 /** 1455 * tipc_send_packet - send a connection-oriented message 1456 * @sock: socket structure 1457 * @m: message to send 1458 * @dsz: length of data to be transmitted 1459 * 1460 * Used for SOCK_SEQPACKET messages. 1461 * 1462 * Returns the number of bytes sent on success, or errno otherwise 1463 */ 1464 static int tipc_send_packet(struct socket *sock, struct msghdr *m, size_t dsz) 1465 { 1466 if (dsz > TIPC_MAX_USER_MSG_SIZE) 1467 return -EMSGSIZE; 1468 1469 return tipc_sendstream(sock, m, dsz); 1470 } 1471 1472 /* tipc_sk_finish_conn - complete the setup of a connection 1473 */ 1474 static void tipc_sk_finish_conn(struct tipc_sock *tsk, u32 peer_port, 1475 u32 peer_node) 1476 { 1477 struct sock *sk = &tsk->sk; 1478 struct net *net = sock_net(sk); 1479 struct tipc_msg *msg = &tsk->phdr; 1480 1481 msg_set_destnode(msg, peer_node); 1482 msg_set_destport(msg, peer_port); 1483 msg_set_type(msg, TIPC_CONN_MSG); 1484 msg_set_lookup_scope(msg, 0); 1485 msg_set_hdr_sz(msg, SHORT_H_SIZE); 1486 1487 sk_reset_timer(sk, &sk->sk_timer, jiffies + CONN_PROBING_INTV); 1488 tipc_set_sk_state(sk, TIPC_ESTABLISHED); 1489 tipc_node_add_conn(net, peer_node, tsk->portid, peer_port); 1490 tsk->max_pkt = tipc_node_get_mtu(net, peer_node, tsk->portid); 1491 tsk->peer_caps = tipc_node_get_capabilities(net, peer_node); 1492 if (tsk->peer_caps & TIPC_BLOCK_FLOWCTL) 1493 return; 1494 1495 /* Fall back to message based flow control */ 1496 tsk->rcv_win = FLOWCTL_MSG_WIN; 1497 tsk->snd_win = FLOWCTL_MSG_WIN; 1498 } 1499 1500 /** 1501 * tipc_sk_set_orig_addr - capture sender's address for received message 1502 * @m: descriptor for message info 1503 * @hdr: received message header 1504 * 1505 * Note: Address is not captured if not requested by receiver. 1506 */ 1507 static void tipc_sk_set_orig_addr(struct msghdr *m, struct sk_buff *skb) 1508 { 1509 DECLARE_SOCKADDR(struct sockaddr_pair *, srcaddr, m->msg_name); 1510 struct tipc_msg *hdr = buf_msg(skb); 1511 1512 if (!srcaddr) 1513 return; 1514 1515 srcaddr->sock.family = AF_TIPC; 1516 srcaddr->sock.addrtype = TIPC_ADDR_ID; 1517 srcaddr->sock.scope = 0; 1518 srcaddr->sock.addr.id.ref = msg_origport(hdr); 1519 srcaddr->sock.addr.id.node = msg_orignode(hdr); 1520 srcaddr->sock.addr.name.domain = 0; 1521 m->msg_namelen = sizeof(struct sockaddr_tipc); 1522 1523 if (!msg_in_group(hdr)) 1524 return; 1525 1526 /* Group message users may also want to know sending member's id */ 1527 srcaddr->member.family = AF_TIPC; 1528 srcaddr->member.addrtype = TIPC_ADDR_NAME; 1529 srcaddr->member.scope = 0; 1530 srcaddr->member.addr.name.name.type = msg_nametype(hdr); 1531 srcaddr->member.addr.name.name.instance = TIPC_SKB_CB(skb)->orig_member; 1532 srcaddr->member.addr.name.domain = 0; 1533 m->msg_namelen = sizeof(*srcaddr); 1534 } 1535 1536 /** 1537 * tipc_sk_anc_data_recv - optionally capture ancillary data for received message 1538 * @m: descriptor for message info 1539 * @msg: received message header 1540 * @tsk: TIPC port associated with message 1541 * 1542 * Note: Ancillary data is not captured if not requested by receiver. 1543 * 1544 * Returns 0 if successful, otherwise errno 1545 */ 1546 static int tipc_sk_anc_data_recv(struct msghdr *m, struct tipc_msg *msg, 1547 struct tipc_sock *tsk) 1548 { 1549 u32 anc_data[3]; 1550 u32 err; 1551 u32 dest_type; 1552 int has_name; 1553 int res; 1554 1555 if (likely(m->msg_controllen == 0)) 1556 return 0; 1557 1558 /* Optionally capture errored message object(s) */ 1559 err = msg ? msg_errcode(msg) : 0; 1560 if (unlikely(err)) { 1561 anc_data[0] = err; 1562 anc_data[1] = msg_data_sz(msg); 1563 res = put_cmsg(m, SOL_TIPC, TIPC_ERRINFO, 8, anc_data); 1564 if (res) 1565 return res; 1566 if (anc_data[1]) { 1567 res = put_cmsg(m, SOL_TIPC, TIPC_RETDATA, anc_data[1], 1568 msg_data(msg)); 1569 if (res) 1570 return res; 1571 } 1572 } 1573 1574 /* Optionally capture message destination object */ 1575 dest_type = msg ? msg_type(msg) : TIPC_DIRECT_MSG; 1576 switch (dest_type) { 1577 case TIPC_NAMED_MSG: 1578 has_name = 1; 1579 anc_data[0] = msg_nametype(msg); 1580 anc_data[1] = msg_namelower(msg); 1581 anc_data[2] = msg_namelower(msg); 1582 break; 1583 case TIPC_MCAST_MSG: 1584 has_name = 1; 1585 anc_data[0] = msg_nametype(msg); 1586 anc_data[1] = msg_namelower(msg); 1587 anc_data[2] = msg_nameupper(msg); 1588 break; 1589 case TIPC_CONN_MSG: 1590 has_name = (tsk->conn_type != 0); 1591 anc_data[0] = tsk->conn_type; 1592 anc_data[1] = tsk->conn_instance; 1593 anc_data[2] = tsk->conn_instance; 1594 break; 1595 default: 1596 has_name = 0; 1597 } 1598 if (has_name) { 1599 res = put_cmsg(m, SOL_TIPC, TIPC_DESTNAME, 12, anc_data); 1600 if (res) 1601 return res; 1602 } 1603 1604 return 0; 1605 } 1606 1607 static void tipc_sk_send_ack(struct tipc_sock *tsk) 1608 { 1609 struct sock *sk = &tsk->sk; 1610 struct net *net = sock_net(sk); 1611 struct sk_buff *skb = NULL; 1612 struct tipc_msg *msg; 1613 u32 peer_port = tsk_peer_port(tsk); 1614 u32 dnode = tsk_peer_node(tsk); 1615 1616 if (!tipc_sk_connected(sk)) 1617 return; 1618 skb = tipc_msg_create(CONN_MANAGER, CONN_ACK, INT_H_SIZE, 0, 1619 dnode, tsk_own_node(tsk), peer_port, 1620 tsk->portid, TIPC_OK); 1621 if (!skb) 1622 return; 1623 msg = buf_msg(skb); 1624 msg_set_conn_ack(msg, tsk->rcv_unacked); 1625 tsk->rcv_unacked = 0; 1626 1627 /* Adjust to and advertize the correct window limit */ 1628 if (tsk->peer_caps & TIPC_BLOCK_FLOWCTL) { 1629 tsk->rcv_win = tsk_adv_blocks(tsk->sk.sk_rcvbuf); 1630 msg_set_adv_win(msg, tsk->rcv_win); 1631 } 1632 tipc_node_xmit_skb(net, skb, dnode, msg_link_selector(msg)); 1633 } 1634 1635 static int tipc_wait_for_rcvmsg(struct socket *sock, long *timeop) 1636 { 1637 struct sock *sk = sock->sk; 1638 DEFINE_WAIT(wait); 1639 long timeo = *timeop; 1640 int err = sock_error(sk); 1641 1642 if (err) 1643 return err; 1644 1645 for (;;) { 1646 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); 1647 if (timeo && skb_queue_empty(&sk->sk_receive_queue)) { 1648 if (sk->sk_shutdown & RCV_SHUTDOWN) { 1649 err = -ENOTCONN; 1650 break; 1651 } 1652 release_sock(sk); 1653 timeo = schedule_timeout(timeo); 1654 lock_sock(sk); 1655 } 1656 err = 0; 1657 if (!skb_queue_empty(&sk->sk_receive_queue)) 1658 break; 1659 err = -EAGAIN; 1660 if (!timeo) 1661 break; 1662 err = sock_intr_errno(timeo); 1663 if (signal_pending(current)) 1664 break; 1665 1666 err = sock_error(sk); 1667 if (err) 1668 break; 1669 } 1670 finish_wait(sk_sleep(sk), &wait); 1671 *timeop = timeo; 1672 return err; 1673 } 1674 1675 /** 1676 * tipc_recvmsg - receive packet-oriented message 1677 * @m: descriptor for message info 1678 * @buflen: length of user buffer area 1679 * @flags: receive flags 1680 * 1681 * Used for SOCK_DGRAM, SOCK_RDM, and SOCK_SEQPACKET messages. 1682 * If the complete message doesn't fit in user area, truncate it. 1683 * 1684 * Returns size of returned message data, errno otherwise 1685 */ 1686 static int tipc_recvmsg(struct socket *sock, struct msghdr *m, 1687 size_t buflen, int flags) 1688 { 1689 struct sock *sk = sock->sk; 1690 bool connected = !tipc_sk_type_connectionless(sk); 1691 struct tipc_sock *tsk = tipc_sk(sk); 1692 int rc, err, hlen, dlen, copy; 1693 struct sk_buff_head xmitq; 1694 struct tipc_msg *hdr; 1695 struct sk_buff *skb; 1696 bool grp_evt; 1697 long timeout; 1698 1699 /* Catch invalid receive requests */ 1700 if (unlikely(!buflen)) 1701 return -EINVAL; 1702 1703 lock_sock(sk); 1704 if (unlikely(connected && sk->sk_state == TIPC_OPEN)) { 1705 rc = -ENOTCONN; 1706 goto exit; 1707 } 1708 timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); 1709 1710 /* Step rcv queue to first msg with data or error; wait if necessary */ 1711 do { 1712 rc = tipc_wait_for_rcvmsg(sock, &timeout); 1713 if (unlikely(rc)) 1714 goto exit; 1715 skb = skb_peek(&sk->sk_receive_queue); 1716 hdr = buf_msg(skb); 1717 dlen = msg_data_sz(hdr); 1718 hlen = msg_hdr_sz(hdr); 1719 err = msg_errcode(hdr); 1720 grp_evt = msg_is_grp_evt(hdr); 1721 if (likely(dlen || err)) 1722 break; 1723 tsk_advance_rx_queue(sk); 1724 } while (1); 1725 1726 /* Collect msg meta data, including error code and rejected data */ 1727 tipc_sk_set_orig_addr(m, skb); 1728 rc = tipc_sk_anc_data_recv(m, hdr, tsk); 1729 if (unlikely(rc)) 1730 goto exit; 1731 1732 /* Capture data if non-error msg, otherwise just set return value */ 1733 if (likely(!err)) { 1734 copy = min_t(int, dlen, buflen); 1735 if (unlikely(copy != dlen)) 1736 m->msg_flags |= MSG_TRUNC; 1737 rc = skb_copy_datagram_msg(skb, hlen, m, copy); 1738 } else { 1739 copy = 0; 1740 rc = 0; 1741 if (err != TIPC_CONN_SHUTDOWN && connected && !m->msg_control) 1742 rc = -ECONNRESET; 1743 } 1744 if (unlikely(rc)) 1745 goto exit; 1746 1747 /* Mark message as group event if applicable */ 1748 if (unlikely(grp_evt)) { 1749 if (msg_grp_evt(hdr) == TIPC_WITHDRAWN) 1750 m->msg_flags |= MSG_EOR; 1751 m->msg_flags |= MSG_OOB; 1752 copy = 0; 1753 } 1754 1755 /* Caption of data or error code/rejected data was successful */ 1756 if (unlikely(flags & MSG_PEEK)) 1757 goto exit; 1758 1759 /* Send group flow control advertisement when applicable */ 1760 if (tsk->group && msg_in_group(hdr) && !grp_evt) { 1761 skb_queue_head_init(&xmitq); 1762 tipc_group_update_rcv_win(tsk->group, tsk_blocks(hlen + dlen), 1763 msg_orignode(hdr), msg_origport(hdr), 1764 &xmitq); 1765 tipc_node_distr_xmit(sock_net(sk), &xmitq); 1766 } 1767 1768 tsk_advance_rx_queue(sk); 1769 1770 if (likely(!connected)) 1771 goto exit; 1772 1773 /* Send connection flow control advertisement when applicable */ 1774 tsk->rcv_unacked += tsk_inc(tsk, hlen + dlen); 1775 if (tsk->rcv_unacked >= tsk->rcv_win / TIPC_ACK_RATE) 1776 tipc_sk_send_ack(tsk); 1777 exit: 1778 release_sock(sk); 1779 return rc ? rc : copy; 1780 } 1781 1782 /** 1783 * tipc_recvstream - receive stream-oriented data 1784 * @m: descriptor for message info 1785 * @buflen: total size of user buffer area 1786 * @flags: receive flags 1787 * 1788 * Used for SOCK_STREAM messages only. If not enough data is available 1789 * will optionally wait for more; never truncates data. 1790 * 1791 * Returns size of returned message data, errno otherwise 1792 */ 1793 static int tipc_recvstream(struct socket *sock, struct msghdr *m, 1794 size_t buflen, int flags) 1795 { 1796 struct sock *sk = sock->sk; 1797 struct tipc_sock *tsk = tipc_sk(sk); 1798 struct sk_buff *skb; 1799 struct tipc_msg *hdr; 1800 struct tipc_skb_cb *skb_cb; 1801 bool peek = flags & MSG_PEEK; 1802 int offset, required, copy, copied = 0; 1803 int hlen, dlen, err, rc; 1804 long timeout; 1805 1806 /* Catch invalid receive attempts */ 1807 if (unlikely(!buflen)) 1808 return -EINVAL; 1809 1810 lock_sock(sk); 1811 1812 if (unlikely(sk->sk_state == TIPC_OPEN)) { 1813 rc = -ENOTCONN; 1814 goto exit; 1815 } 1816 required = sock_rcvlowat(sk, flags & MSG_WAITALL, buflen); 1817 timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); 1818 1819 do { 1820 /* Look at first msg in receive queue; wait if necessary */ 1821 rc = tipc_wait_for_rcvmsg(sock, &timeout); 1822 if (unlikely(rc)) 1823 break; 1824 skb = skb_peek(&sk->sk_receive_queue); 1825 skb_cb = TIPC_SKB_CB(skb); 1826 hdr = buf_msg(skb); 1827 dlen = msg_data_sz(hdr); 1828 hlen = msg_hdr_sz(hdr); 1829 err = msg_errcode(hdr); 1830 1831 /* Discard any empty non-errored (SYN-) message */ 1832 if (unlikely(!dlen && !err)) { 1833 tsk_advance_rx_queue(sk); 1834 continue; 1835 } 1836 1837 /* Collect msg meta data, incl. error code and rejected data */ 1838 if (!copied) { 1839 tipc_sk_set_orig_addr(m, skb); 1840 rc = tipc_sk_anc_data_recv(m, hdr, tsk); 1841 if (rc) 1842 break; 1843 } 1844 1845 /* Copy data if msg ok, otherwise return error/partial data */ 1846 if (likely(!err)) { 1847 offset = skb_cb->bytes_read; 1848 copy = min_t(int, dlen - offset, buflen - copied); 1849 rc = skb_copy_datagram_msg(skb, hlen + offset, m, copy); 1850 if (unlikely(rc)) 1851 break; 1852 copied += copy; 1853 offset += copy; 1854 if (unlikely(offset < dlen)) { 1855 if (!peek) 1856 skb_cb->bytes_read = offset; 1857 break; 1858 } 1859 } else { 1860 rc = 0; 1861 if ((err != TIPC_CONN_SHUTDOWN) && !m->msg_control) 1862 rc = -ECONNRESET; 1863 if (copied || rc) 1864 break; 1865 } 1866 1867 if (unlikely(peek)) 1868 break; 1869 1870 tsk_advance_rx_queue(sk); 1871 1872 /* Send connection flow control advertisement when applicable */ 1873 tsk->rcv_unacked += tsk_inc(tsk, hlen + dlen); 1874 if (unlikely(tsk->rcv_unacked >= tsk->rcv_win / TIPC_ACK_RATE)) 1875 tipc_sk_send_ack(tsk); 1876 1877 /* Exit if all requested data or FIN/error received */ 1878 if (copied == buflen || err) 1879 break; 1880 1881 } while (!skb_queue_empty(&sk->sk_receive_queue) || copied < required); 1882 exit: 1883 release_sock(sk); 1884 return copied ? copied : rc; 1885 } 1886 1887 /** 1888 * tipc_write_space - wake up thread if port congestion is released 1889 * @sk: socket 1890 */ 1891 static void tipc_write_space(struct sock *sk) 1892 { 1893 struct socket_wq *wq; 1894 1895 rcu_read_lock(); 1896 wq = rcu_dereference(sk->sk_wq); 1897 if (skwq_has_sleeper(wq)) 1898 wake_up_interruptible_sync_poll(&wq->wait, EPOLLOUT | 1899 EPOLLWRNORM | EPOLLWRBAND); 1900 rcu_read_unlock(); 1901 } 1902 1903 /** 1904 * tipc_data_ready - wake up threads to indicate messages have been received 1905 * @sk: socket 1906 * @len: the length of messages 1907 */ 1908 static void tipc_data_ready(struct sock *sk) 1909 { 1910 struct socket_wq *wq; 1911 1912 rcu_read_lock(); 1913 wq = rcu_dereference(sk->sk_wq); 1914 if (skwq_has_sleeper(wq)) 1915 wake_up_interruptible_sync_poll(&wq->wait, EPOLLIN | 1916 EPOLLRDNORM | EPOLLRDBAND); 1917 rcu_read_unlock(); 1918 } 1919 1920 static void tipc_sock_destruct(struct sock *sk) 1921 { 1922 __skb_queue_purge(&sk->sk_receive_queue); 1923 } 1924 1925 static void tipc_sk_proto_rcv(struct sock *sk, 1926 struct sk_buff_head *inputq, 1927 struct sk_buff_head *xmitq) 1928 { 1929 struct sk_buff *skb = __skb_dequeue(inputq); 1930 struct tipc_sock *tsk = tipc_sk(sk); 1931 struct tipc_msg *hdr = buf_msg(skb); 1932 struct tipc_group *grp = tsk->group; 1933 bool wakeup = false; 1934 1935 switch (msg_user(hdr)) { 1936 case CONN_MANAGER: 1937 tipc_sk_conn_proto_rcv(tsk, skb, xmitq); 1938 return; 1939 case SOCK_WAKEUP: 1940 tipc_dest_del(&tsk->cong_links, msg_orignode(hdr), 0); 1941 tsk->cong_link_cnt--; 1942 wakeup = true; 1943 break; 1944 case GROUP_PROTOCOL: 1945 tipc_group_proto_rcv(grp, &wakeup, hdr, inputq, xmitq); 1946 break; 1947 case TOP_SRV: 1948 tipc_group_member_evt(tsk->group, &wakeup, &sk->sk_rcvbuf, 1949 hdr, inputq, xmitq); 1950 break; 1951 default: 1952 break; 1953 } 1954 1955 if (wakeup) 1956 sk->sk_write_space(sk); 1957 1958 kfree_skb(skb); 1959 } 1960 1961 /** 1962 * tipc_filter_connect - Handle incoming message for a connection-based socket 1963 * @tsk: TIPC socket 1964 * @skb: pointer to message buffer. Set to NULL if buffer is consumed 1965 * 1966 * Returns true if everything ok, false otherwise 1967 */ 1968 static bool tipc_sk_filter_connect(struct tipc_sock *tsk, struct sk_buff *skb) 1969 { 1970 struct sock *sk = &tsk->sk; 1971 struct net *net = sock_net(sk); 1972 struct tipc_msg *hdr = buf_msg(skb); 1973 u32 pport = msg_origport(hdr); 1974 u32 pnode = msg_orignode(hdr); 1975 1976 if (unlikely(msg_mcast(hdr))) 1977 return false; 1978 1979 switch (sk->sk_state) { 1980 case TIPC_CONNECTING: 1981 /* Accept only ACK or NACK message */ 1982 if (unlikely(!msg_connected(hdr))) { 1983 if (pport != tsk_peer_port(tsk) || 1984 pnode != tsk_peer_node(tsk)) 1985 return false; 1986 1987 tipc_set_sk_state(sk, TIPC_DISCONNECTING); 1988 sk->sk_err = ECONNREFUSED; 1989 sk->sk_state_change(sk); 1990 return true; 1991 } 1992 1993 if (unlikely(msg_errcode(hdr))) { 1994 tipc_set_sk_state(sk, TIPC_DISCONNECTING); 1995 sk->sk_err = ECONNREFUSED; 1996 sk->sk_state_change(sk); 1997 return true; 1998 } 1999 2000 if (unlikely(!msg_isdata(hdr))) { 2001 tipc_set_sk_state(sk, TIPC_DISCONNECTING); 2002 sk->sk_err = EINVAL; 2003 sk->sk_state_change(sk); 2004 return true; 2005 } 2006 2007 tipc_sk_finish_conn(tsk, msg_origport(hdr), msg_orignode(hdr)); 2008 msg_set_importance(&tsk->phdr, msg_importance(hdr)); 2009 2010 /* If 'ACK+' message, add to socket receive queue */ 2011 if (msg_data_sz(hdr)) 2012 return true; 2013 2014 /* If empty 'ACK-' message, wake up sleeping connect() */ 2015 sk->sk_data_ready(sk); 2016 2017 /* 'ACK-' message is neither accepted nor rejected: */ 2018 msg_set_dest_droppable(hdr, 1); 2019 return false; 2020 2021 case TIPC_OPEN: 2022 case TIPC_DISCONNECTING: 2023 break; 2024 case TIPC_LISTEN: 2025 /* Accept only SYN message */ 2026 if (!msg_connected(hdr) && !(msg_errcode(hdr))) 2027 return true; 2028 break; 2029 case TIPC_ESTABLISHED: 2030 /* Accept only connection-based messages sent by peer */ 2031 if (unlikely(!tsk_peer_msg(tsk, hdr))) 2032 return false; 2033 2034 if (unlikely(msg_errcode(hdr))) { 2035 tipc_set_sk_state(sk, TIPC_DISCONNECTING); 2036 /* Let timer expire on it's own */ 2037 tipc_node_remove_conn(net, tsk_peer_node(tsk), 2038 tsk->portid); 2039 sk->sk_state_change(sk); 2040 } 2041 return true; 2042 default: 2043 pr_err("Unknown sk_state %u\n", sk->sk_state); 2044 } 2045 2046 return false; 2047 } 2048 2049 /** 2050 * rcvbuf_limit - get proper overload limit of socket receive queue 2051 * @sk: socket 2052 * @skb: message 2053 * 2054 * For connection oriented messages, irrespective of importance, 2055 * default queue limit is 2 MB. 2056 * 2057 * For connectionless messages, queue limits are based on message 2058 * importance as follows: 2059 * 2060 * TIPC_LOW_IMPORTANCE (2 MB) 2061 * TIPC_MEDIUM_IMPORTANCE (4 MB) 2062 * TIPC_HIGH_IMPORTANCE (8 MB) 2063 * TIPC_CRITICAL_IMPORTANCE (16 MB) 2064 * 2065 * Returns overload limit according to corresponding message importance 2066 */ 2067 static unsigned int rcvbuf_limit(struct sock *sk, struct sk_buff *skb) 2068 { 2069 struct tipc_sock *tsk = tipc_sk(sk); 2070 struct tipc_msg *hdr = buf_msg(skb); 2071 2072 if (unlikely(msg_in_group(hdr))) 2073 return sk->sk_rcvbuf; 2074 2075 if (unlikely(!msg_connected(hdr))) 2076 return sk->sk_rcvbuf << msg_importance(hdr); 2077 2078 if (likely(tsk->peer_caps & TIPC_BLOCK_FLOWCTL)) 2079 return sk->sk_rcvbuf; 2080 2081 return FLOWCTL_MSG_LIM; 2082 } 2083 2084 /** 2085 * tipc_sk_filter_rcv - validate incoming message 2086 * @sk: socket 2087 * @skb: pointer to message. 2088 * 2089 * Enqueues message on receive queue if acceptable; optionally handles 2090 * disconnect indication for a connected socket. 2091 * 2092 * Called with socket lock already taken 2093 * 2094 */ 2095 static void tipc_sk_filter_rcv(struct sock *sk, struct sk_buff *skb, 2096 struct sk_buff_head *xmitq) 2097 { 2098 bool sk_conn = !tipc_sk_type_connectionless(sk); 2099 struct tipc_sock *tsk = tipc_sk(sk); 2100 struct tipc_group *grp = tsk->group; 2101 struct tipc_msg *hdr = buf_msg(skb); 2102 struct net *net = sock_net(sk); 2103 struct sk_buff_head inputq; 2104 int limit, err = TIPC_OK; 2105 2106 TIPC_SKB_CB(skb)->bytes_read = 0; 2107 __skb_queue_head_init(&inputq); 2108 __skb_queue_tail(&inputq, skb); 2109 2110 if (unlikely(!msg_isdata(hdr))) 2111 tipc_sk_proto_rcv(sk, &inputq, xmitq); 2112 2113 if (unlikely(grp)) 2114 tipc_group_filter_msg(grp, &inputq, xmitq); 2115 2116 /* Validate and add to receive buffer if there is space */ 2117 while ((skb = __skb_dequeue(&inputq))) { 2118 hdr = buf_msg(skb); 2119 limit = rcvbuf_limit(sk, skb); 2120 if ((sk_conn && !tipc_sk_filter_connect(tsk, skb)) || 2121 (!sk_conn && msg_connected(hdr)) || 2122 (!grp && msg_in_group(hdr))) 2123 err = TIPC_ERR_NO_PORT; 2124 else if (sk_rmem_alloc_get(sk) + skb->truesize >= limit) { 2125 atomic_inc(&sk->sk_drops); 2126 err = TIPC_ERR_OVERLOAD; 2127 } 2128 2129 if (unlikely(err)) { 2130 tipc_skb_reject(net, err, skb, xmitq); 2131 err = TIPC_OK; 2132 continue; 2133 } 2134 __skb_queue_tail(&sk->sk_receive_queue, skb); 2135 skb_set_owner_r(skb, sk); 2136 sk->sk_data_ready(sk); 2137 } 2138 } 2139 2140 /** 2141 * tipc_sk_backlog_rcv - handle incoming message from backlog queue 2142 * @sk: socket 2143 * @skb: message 2144 * 2145 * Caller must hold socket lock 2146 */ 2147 static int tipc_sk_backlog_rcv(struct sock *sk, struct sk_buff *skb) 2148 { 2149 unsigned int before = sk_rmem_alloc_get(sk); 2150 struct sk_buff_head xmitq; 2151 unsigned int added; 2152 2153 __skb_queue_head_init(&xmitq); 2154 2155 tipc_sk_filter_rcv(sk, skb, &xmitq); 2156 added = sk_rmem_alloc_get(sk) - before; 2157 atomic_add(added, &tipc_sk(sk)->dupl_rcvcnt); 2158 2159 /* Send pending response/rejected messages, if any */ 2160 tipc_node_distr_xmit(sock_net(sk), &xmitq); 2161 return 0; 2162 } 2163 2164 /** 2165 * tipc_sk_enqueue - extract all buffers with destination 'dport' from 2166 * inputq and try adding them to socket or backlog queue 2167 * @inputq: list of incoming buffers with potentially different destinations 2168 * @sk: socket where the buffers should be enqueued 2169 * @dport: port number for the socket 2170 * 2171 * Caller must hold socket lock 2172 */ 2173 static void tipc_sk_enqueue(struct sk_buff_head *inputq, struct sock *sk, 2174 u32 dport, struct sk_buff_head *xmitq) 2175 { 2176 unsigned long time_limit = jiffies + 2; 2177 struct sk_buff *skb; 2178 unsigned int lim; 2179 atomic_t *dcnt; 2180 u32 onode; 2181 2182 while (skb_queue_len(inputq)) { 2183 if (unlikely(time_after_eq(jiffies, time_limit))) 2184 return; 2185 2186 skb = tipc_skb_dequeue(inputq, dport); 2187 if (unlikely(!skb)) 2188 return; 2189 2190 /* Add message directly to receive queue if possible */ 2191 if (!sock_owned_by_user(sk)) { 2192 tipc_sk_filter_rcv(sk, skb, xmitq); 2193 continue; 2194 } 2195 2196 /* Try backlog, compensating for double-counted bytes */ 2197 dcnt = &tipc_sk(sk)->dupl_rcvcnt; 2198 if (!sk->sk_backlog.len) 2199 atomic_set(dcnt, 0); 2200 lim = rcvbuf_limit(sk, skb) + atomic_read(dcnt); 2201 if (likely(!sk_add_backlog(sk, skb, lim))) 2202 continue; 2203 2204 /* Overload => reject message back to sender */ 2205 onode = tipc_own_addr(sock_net(sk)); 2206 atomic_inc(&sk->sk_drops); 2207 if (tipc_msg_reverse(onode, &skb, TIPC_ERR_OVERLOAD)) 2208 __skb_queue_tail(xmitq, skb); 2209 break; 2210 } 2211 } 2212 2213 /** 2214 * tipc_sk_rcv - handle a chain of incoming buffers 2215 * @inputq: buffer list containing the buffers 2216 * Consumes all buffers in list until inputq is empty 2217 * Note: may be called in multiple threads referring to the same queue 2218 */ 2219 void tipc_sk_rcv(struct net *net, struct sk_buff_head *inputq) 2220 { 2221 struct sk_buff_head xmitq; 2222 u32 dnode, dport = 0; 2223 int err; 2224 struct tipc_sock *tsk; 2225 struct sock *sk; 2226 struct sk_buff *skb; 2227 2228 __skb_queue_head_init(&xmitq); 2229 while (skb_queue_len(inputq)) { 2230 dport = tipc_skb_peek_port(inputq, dport); 2231 tsk = tipc_sk_lookup(net, dport); 2232 2233 if (likely(tsk)) { 2234 sk = &tsk->sk; 2235 if (likely(spin_trylock_bh(&sk->sk_lock.slock))) { 2236 tipc_sk_enqueue(inputq, sk, dport, &xmitq); 2237 spin_unlock_bh(&sk->sk_lock.slock); 2238 } 2239 /* Send pending response/rejected messages, if any */ 2240 tipc_node_distr_xmit(sock_net(sk), &xmitq); 2241 sock_put(sk); 2242 continue; 2243 } 2244 /* No destination socket => dequeue skb if still there */ 2245 skb = tipc_skb_dequeue(inputq, dport); 2246 if (!skb) 2247 return; 2248 2249 /* Try secondary lookup if unresolved named message */ 2250 err = TIPC_ERR_NO_PORT; 2251 if (tipc_msg_lookup_dest(net, skb, &err)) 2252 goto xmit; 2253 2254 /* Prepare for message rejection */ 2255 if (!tipc_msg_reverse(tipc_own_addr(net), &skb, err)) 2256 continue; 2257 xmit: 2258 dnode = msg_destnode(buf_msg(skb)); 2259 tipc_node_xmit_skb(net, skb, dnode, dport); 2260 } 2261 } 2262 2263 static int tipc_wait_for_connect(struct socket *sock, long *timeo_p) 2264 { 2265 DEFINE_WAIT_FUNC(wait, woken_wake_function); 2266 struct sock *sk = sock->sk; 2267 int done; 2268 2269 do { 2270 int err = sock_error(sk); 2271 if (err) 2272 return err; 2273 if (!*timeo_p) 2274 return -ETIMEDOUT; 2275 if (signal_pending(current)) 2276 return sock_intr_errno(*timeo_p); 2277 2278 add_wait_queue(sk_sleep(sk), &wait); 2279 done = sk_wait_event(sk, timeo_p, 2280 sk->sk_state != TIPC_CONNECTING, &wait); 2281 remove_wait_queue(sk_sleep(sk), &wait); 2282 } while (!done); 2283 return 0; 2284 } 2285 2286 /** 2287 * tipc_connect - establish a connection to another TIPC port 2288 * @sock: socket structure 2289 * @dest: socket address for destination port 2290 * @destlen: size of socket address data structure 2291 * @flags: file-related flags associated with socket 2292 * 2293 * Returns 0 on success, errno otherwise 2294 */ 2295 static int tipc_connect(struct socket *sock, struct sockaddr *dest, 2296 int destlen, int flags) 2297 { 2298 struct sock *sk = sock->sk; 2299 struct tipc_sock *tsk = tipc_sk(sk); 2300 struct sockaddr_tipc *dst = (struct sockaddr_tipc *)dest; 2301 struct msghdr m = {NULL,}; 2302 long timeout = (flags & O_NONBLOCK) ? 0 : tsk->conn_timeout; 2303 int previous; 2304 int res = 0; 2305 2306 if (destlen != sizeof(struct sockaddr_tipc)) 2307 return -EINVAL; 2308 2309 lock_sock(sk); 2310 2311 if (tsk->group) { 2312 res = -EINVAL; 2313 goto exit; 2314 } 2315 2316 if (dst->family == AF_UNSPEC) { 2317 memset(&tsk->peer, 0, sizeof(struct sockaddr_tipc)); 2318 if (!tipc_sk_type_connectionless(sk)) 2319 res = -EINVAL; 2320 goto exit; 2321 } else if (dst->family != AF_TIPC) { 2322 res = -EINVAL; 2323 } 2324 if (dst->addrtype != TIPC_ADDR_ID && dst->addrtype != TIPC_ADDR_NAME) 2325 res = -EINVAL; 2326 if (res) 2327 goto exit; 2328 2329 /* DGRAM/RDM connect(), just save the destaddr */ 2330 if (tipc_sk_type_connectionless(sk)) { 2331 memcpy(&tsk->peer, dest, destlen); 2332 goto exit; 2333 } 2334 2335 previous = sk->sk_state; 2336 2337 switch (sk->sk_state) { 2338 case TIPC_OPEN: 2339 /* Send a 'SYN-' to destination */ 2340 m.msg_name = dest; 2341 m.msg_namelen = destlen; 2342 2343 /* If connect is in non-blocking case, set MSG_DONTWAIT to 2344 * indicate send_msg() is never blocked. 2345 */ 2346 if (!timeout) 2347 m.msg_flags = MSG_DONTWAIT; 2348 2349 res = __tipc_sendmsg(sock, &m, 0); 2350 if ((res < 0) && (res != -EWOULDBLOCK)) 2351 goto exit; 2352 2353 /* Just entered TIPC_CONNECTING state; the only 2354 * difference is that return value in non-blocking 2355 * case is EINPROGRESS, rather than EALREADY. 2356 */ 2357 res = -EINPROGRESS; 2358 /* fall thru' */ 2359 case TIPC_CONNECTING: 2360 if (!timeout) { 2361 if (previous == TIPC_CONNECTING) 2362 res = -EALREADY; 2363 goto exit; 2364 } 2365 timeout = msecs_to_jiffies(timeout); 2366 /* Wait until an 'ACK' or 'RST' arrives, or a timeout occurs */ 2367 res = tipc_wait_for_connect(sock, &timeout); 2368 break; 2369 case TIPC_ESTABLISHED: 2370 res = -EISCONN; 2371 break; 2372 default: 2373 res = -EINVAL; 2374 } 2375 2376 exit: 2377 release_sock(sk); 2378 return res; 2379 } 2380 2381 /** 2382 * tipc_listen - allow socket to listen for incoming connections 2383 * @sock: socket structure 2384 * @len: (unused) 2385 * 2386 * Returns 0 on success, errno otherwise 2387 */ 2388 static int tipc_listen(struct socket *sock, int len) 2389 { 2390 struct sock *sk = sock->sk; 2391 int res; 2392 2393 lock_sock(sk); 2394 res = tipc_set_sk_state(sk, TIPC_LISTEN); 2395 release_sock(sk); 2396 2397 return res; 2398 } 2399 2400 static int tipc_wait_for_accept(struct socket *sock, long timeo) 2401 { 2402 struct sock *sk = sock->sk; 2403 DEFINE_WAIT(wait); 2404 int err; 2405 2406 /* True wake-one mechanism for incoming connections: only 2407 * one process gets woken up, not the 'whole herd'. 2408 * Since we do not 'race & poll' for established sockets 2409 * anymore, the common case will execute the loop only once. 2410 */ 2411 for (;;) { 2412 prepare_to_wait_exclusive(sk_sleep(sk), &wait, 2413 TASK_INTERRUPTIBLE); 2414 if (timeo && skb_queue_empty(&sk->sk_receive_queue)) { 2415 release_sock(sk); 2416 timeo = schedule_timeout(timeo); 2417 lock_sock(sk); 2418 } 2419 err = 0; 2420 if (!skb_queue_empty(&sk->sk_receive_queue)) 2421 break; 2422 err = -EAGAIN; 2423 if (!timeo) 2424 break; 2425 err = sock_intr_errno(timeo); 2426 if (signal_pending(current)) 2427 break; 2428 } 2429 finish_wait(sk_sleep(sk), &wait); 2430 return err; 2431 } 2432 2433 /** 2434 * tipc_accept - wait for connection request 2435 * @sock: listening socket 2436 * @newsock: new socket that is to be connected 2437 * @flags: file-related flags associated with socket 2438 * 2439 * Returns 0 on success, errno otherwise 2440 */ 2441 static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags, 2442 bool kern) 2443 { 2444 struct sock *new_sk, *sk = sock->sk; 2445 struct sk_buff *buf; 2446 struct tipc_sock *new_tsock; 2447 struct tipc_msg *msg; 2448 long timeo; 2449 int res; 2450 2451 lock_sock(sk); 2452 2453 if (sk->sk_state != TIPC_LISTEN) { 2454 res = -EINVAL; 2455 goto exit; 2456 } 2457 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK); 2458 res = tipc_wait_for_accept(sock, timeo); 2459 if (res) 2460 goto exit; 2461 2462 buf = skb_peek(&sk->sk_receive_queue); 2463 2464 res = tipc_sk_create(sock_net(sock->sk), new_sock, 0, kern); 2465 if (res) 2466 goto exit; 2467 security_sk_clone(sock->sk, new_sock->sk); 2468 2469 new_sk = new_sock->sk; 2470 new_tsock = tipc_sk(new_sk); 2471 msg = buf_msg(buf); 2472 2473 /* we lock on new_sk; but lockdep sees the lock on sk */ 2474 lock_sock_nested(new_sk, SINGLE_DEPTH_NESTING); 2475 2476 /* 2477 * Reject any stray messages received by new socket 2478 * before the socket lock was taken (very, very unlikely) 2479 */ 2480 tsk_rej_rx_queue(new_sk); 2481 2482 /* Connect new socket to it's peer */ 2483 tipc_sk_finish_conn(new_tsock, msg_origport(msg), msg_orignode(msg)); 2484 2485 tsk_set_importance(new_tsock, msg_importance(msg)); 2486 if (msg_named(msg)) { 2487 new_tsock->conn_type = msg_nametype(msg); 2488 new_tsock->conn_instance = msg_nameinst(msg); 2489 } 2490 2491 /* 2492 * Respond to 'SYN-' by discarding it & returning 'ACK'-. 2493 * Respond to 'SYN+' by queuing it on new socket. 2494 */ 2495 if (!msg_data_sz(msg)) { 2496 struct msghdr m = {NULL,}; 2497 2498 tsk_advance_rx_queue(sk); 2499 __tipc_sendstream(new_sock, &m, 0); 2500 } else { 2501 __skb_dequeue(&sk->sk_receive_queue); 2502 __skb_queue_head(&new_sk->sk_receive_queue, buf); 2503 skb_set_owner_r(buf, new_sk); 2504 } 2505 release_sock(new_sk); 2506 exit: 2507 release_sock(sk); 2508 return res; 2509 } 2510 2511 /** 2512 * tipc_shutdown - shutdown socket connection 2513 * @sock: socket structure 2514 * @how: direction to close (must be SHUT_RDWR) 2515 * 2516 * Terminates connection (if necessary), then purges socket's receive queue. 2517 * 2518 * Returns 0 on success, errno otherwise 2519 */ 2520 static int tipc_shutdown(struct socket *sock, int how) 2521 { 2522 struct sock *sk = sock->sk; 2523 int res; 2524 2525 if (how != SHUT_RDWR) 2526 return -EINVAL; 2527 2528 lock_sock(sk); 2529 2530 __tipc_shutdown(sock, TIPC_CONN_SHUTDOWN); 2531 sk->sk_shutdown = SEND_SHUTDOWN; 2532 2533 if (sk->sk_state == TIPC_DISCONNECTING) { 2534 /* Discard any unreceived messages */ 2535 __skb_queue_purge(&sk->sk_receive_queue); 2536 2537 /* Wake up anyone sleeping in poll */ 2538 sk->sk_state_change(sk); 2539 res = 0; 2540 } else { 2541 res = -ENOTCONN; 2542 } 2543 2544 release_sock(sk); 2545 return res; 2546 } 2547 2548 static void tipc_sk_timeout(struct timer_list *t) 2549 { 2550 struct sock *sk = from_timer(sk, t, sk_timer); 2551 struct tipc_sock *tsk = tipc_sk(sk); 2552 u32 peer_port = tsk_peer_port(tsk); 2553 u32 peer_node = tsk_peer_node(tsk); 2554 u32 own_node = tsk_own_node(tsk); 2555 u32 own_port = tsk->portid; 2556 struct net *net = sock_net(sk); 2557 struct sk_buff *skb = NULL; 2558 2559 bh_lock_sock(sk); 2560 if (!tipc_sk_connected(sk)) 2561 goto exit; 2562 2563 /* Try again later if socket is busy */ 2564 if (sock_owned_by_user(sk)) { 2565 sk_reset_timer(sk, &sk->sk_timer, jiffies + HZ / 20); 2566 goto exit; 2567 } 2568 2569 if (tsk->probe_unacked) { 2570 tipc_set_sk_state(sk, TIPC_DISCONNECTING); 2571 tipc_node_remove_conn(net, peer_node, peer_port); 2572 sk->sk_state_change(sk); 2573 goto exit; 2574 } 2575 /* Send new probe */ 2576 skb = tipc_msg_create(CONN_MANAGER, CONN_PROBE, INT_H_SIZE, 0, 2577 peer_node, own_node, peer_port, own_port, 2578 TIPC_OK); 2579 tsk->probe_unacked = true; 2580 sk_reset_timer(sk, &sk->sk_timer, jiffies + CONN_PROBING_INTV); 2581 exit: 2582 bh_unlock_sock(sk); 2583 if (skb) 2584 tipc_node_xmit_skb(net, skb, peer_node, own_port); 2585 sock_put(sk); 2586 } 2587 2588 static int tipc_sk_publish(struct tipc_sock *tsk, uint scope, 2589 struct tipc_name_seq const *seq) 2590 { 2591 struct sock *sk = &tsk->sk; 2592 struct net *net = sock_net(sk); 2593 struct publication *publ; 2594 u32 key; 2595 2596 if (scope != TIPC_NODE_SCOPE) 2597 scope = TIPC_CLUSTER_SCOPE; 2598 2599 if (tipc_sk_connected(sk)) 2600 return -EINVAL; 2601 key = tsk->portid + tsk->pub_count + 1; 2602 if (key == tsk->portid) 2603 return -EADDRINUSE; 2604 2605 publ = tipc_nametbl_publish(net, seq->type, seq->lower, seq->upper, 2606 scope, tsk->portid, key); 2607 if (unlikely(!publ)) 2608 return -EINVAL; 2609 2610 list_add(&publ->binding_sock, &tsk->publications); 2611 tsk->pub_count++; 2612 tsk->published = 1; 2613 return 0; 2614 } 2615 2616 static int tipc_sk_withdraw(struct tipc_sock *tsk, uint scope, 2617 struct tipc_name_seq const *seq) 2618 { 2619 struct net *net = sock_net(&tsk->sk); 2620 struct publication *publ; 2621 struct publication *safe; 2622 int rc = -EINVAL; 2623 2624 if (scope != TIPC_NODE_SCOPE) 2625 scope = TIPC_CLUSTER_SCOPE; 2626 2627 list_for_each_entry_safe(publ, safe, &tsk->publications, binding_sock) { 2628 if (seq) { 2629 if (publ->scope != scope) 2630 continue; 2631 if (publ->type != seq->type) 2632 continue; 2633 if (publ->lower != seq->lower) 2634 continue; 2635 if (publ->upper != seq->upper) 2636 break; 2637 tipc_nametbl_withdraw(net, publ->type, publ->lower, 2638 publ->upper, publ->key); 2639 rc = 0; 2640 break; 2641 } 2642 tipc_nametbl_withdraw(net, publ->type, publ->lower, 2643 publ->upper, publ->key); 2644 rc = 0; 2645 } 2646 if (list_empty(&tsk->publications)) 2647 tsk->published = 0; 2648 return rc; 2649 } 2650 2651 /* tipc_sk_reinit: set non-zero address in all existing sockets 2652 * when we go from standalone to network mode. 2653 */ 2654 void tipc_sk_reinit(struct net *net) 2655 { 2656 struct tipc_net *tn = net_generic(net, tipc_net_id); 2657 struct rhashtable_iter iter; 2658 struct tipc_sock *tsk; 2659 struct tipc_msg *msg; 2660 2661 rhashtable_walk_enter(&tn->sk_rht, &iter); 2662 2663 do { 2664 rhashtable_walk_start(&iter); 2665 2666 while ((tsk = rhashtable_walk_next(&iter)) && !IS_ERR(tsk)) { 2667 spin_lock_bh(&tsk->sk.sk_lock.slock); 2668 msg = &tsk->phdr; 2669 msg_set_prevnode(msg, tipc_own_addr(net)); 2670 msg_set_orignode(msg, tipc_own_addr(net)); 2671 spin_unlock_bh(&tsk->sk.sk_lock.slock); 2672 } 2673 2674 rhashtable_walk_stop(&iter); 2675 } while (tsk == ERR_PTR(-EAGAIN)); 2676 2677 rhashtable_walk_exit(&iter); 2678 } 2679 2680 static struct tipc_sock *tipc_sk_lookup(struct net *net, u32 portid) 2681 { 2682 struct tipc_net *tn = net_generic(net, tipc_net_id); 2683 struct tipc_sock *tsk; 2684 2685 rcu_read_lock(); 2686 tsk = rhashtable_lookup_fast(&tn->sk_rht, &portid, tsk_rht_params); 2687 if (tsk) 2688 sock_hold(&tsk->sk); 2689 rcu_read_unlock(); 2690 2691 return tsk; 2692 } 2693 2694 static int tipc_sk_insert(struct tipc_sock *tsk) 2695 { 2696 struct sock *sk = &tsk->sk; 2697 struct net *net = sock_net(sk); 2698 struct tipc_net *tn = net_generic(net, tipc_net_id); 2699 u32 remaining = (TIPC_MAX_PORT - TIPC_MIN_PORT) + 1; 2700 u32 portid = prandom_u32() % remaining + TIPC_MIN_PORT; 2701 2702 while (remaining--) { 2703 portid++; 2704 if ((portid < TIPC_MIN_PORT) || (portid > TIPC_MAX_PORT)) 2705 portid = TIPC_MIN_PORT; 2706 tsk->portid = portid; 2707 sock_hold(&tsk->sk); 2708 if (!rhashtable_lookup_insert_fast(&tn->sk_rht, &tsk->node, 2709 tsk_rht_params)) 2710 return 0; 2711 sock_put(&tsk->sk); 2712 } 2713 2714 return -1; 2715 } 2716 2717 static void tipc_sk_remove(struct tipc_sock *tsk) 2718 { 2719 struct sock *sk = &tsk->sk; 2720 struct tipc_net *tn = net_generic(sock_net(sk), tipc_net_id); 2721 2722 if (!rhashtable_remove_fast(&tn->sk_rht, &tsk->node, tsk_rht_params)) { 2723 WARN_ON(refcount_read(&sk->sk_refcnt) == 1); 2724 __sock_put(sk); 2725 } 2726 } 2727 2728 static const struct rhashtable_params tsk_rht_params = { 2729 .nelem_hint = 192, 2730 .head_offset = offsetof(struct tipc_sock, node), 2731 .key_offset = offsetof(struct tipc_sock, portid), 2732 .key_len = sizeof(u32), /* portid */ 2733 .max_size = 1048576, 2734 .min_size = 256, 2735 .automatic_shrinking = true, 2736 }; 2737 2738 int tipc_sk_rht_init(struct net *net) 2739 { 2740 struct tipc_net *tn = net_generic(net, tipc_net_id); 2741 2742 return rhashtable_init(&tn->sk_rht, &tsk_rht_params); 2743 } 2744 2745 void tipc_sk_rht_destroy(struct net *net) 2746 { 2747 struct tipc_net *tn = net_generic(net, tipc_net_id); 2748 2749 /* Wait for socket readers to complete */ 2750 synchronize_net(); 2751 2752 rhashtable_destroy(&tn->sk_rht); 2753 } 2754 2755 static int tipc_sk_join(struct tipc_sock *tsk, struct tipc_group_req *mreq) 2756 { 2757 struct net *net = sock_net(&tsk->sk); 2758 struct tipc_group *grp = tsk->group; 2759 struct tipc_msg *hdr = &tsk->phdr; 2760 struct tipc_name_seq seq; 2761 int rc; 2762 2763 if (mreq->type < TIPC_RESERVED_TYPES) 2764 return -EACCES; 2765 if (mreq->scope > TIPC_NODE_SCOPE) 2766 return -EINVAL; 2767 if (grp) 2768 return -EACCES; 2769 grp = tipc_group_create(net, tsk->portid, mreq, &tsk->group_is_open); 2770 if (!grp) 2771 return -ENOMEM; 2772 tsk->group = grp; 2773 msg_set_lookup_scope(hdr, mreq->scope); 2774 msg_set_nametype(hdr, mreq->type); 2775 msg_set_dest_droppable(hdr, true); 2776 seq.type = mreq->type; 2777 seq.lower = mreq->instance; 2778 seq.upper = seq.lower; 2779 tipc_nametbl_build_group(net, grp, mreq->type, mreq->scope); 2780 rc = tipc_sk_publish(tsk, mreq->scope, &seq); 2781 if (rc) { 2782 tipc_group_delete(net, grp); 2783 tsk->group = NULL; 2784 return rc; 2785 } 2786 /* Eliminate any risk that a broadcast overtakes sent JOINs */ 2787 tsk->mc_method.rcast = true; 2788 tsk->mc_method.mandatory = true; 2789 tipc_group_join(net, grp, &tsk->sk.sk_rcvbuf); 2790 return rc; 2791 } 2792 2793 static int tipc_sk_leave(struct tipc_sock *tsk) 2794 { 2795 struct net *net = sock_net(&tsk->sk); 2796 struct tipc_group *grp = tsk->group; 2797 struct tipc_name_seq seq; 2798 int scope; 2799 2800 if (!grp) 2801 return -EINVAL; 2802 tipc_group_self(grp, &seq, &scope); 2803 tipc_group_delete(net, grp); 2804 tsk->group = NULL; 2805 tipc_sk_withdraw(tsk, scope, &seq); 2806 return 0; 2807 } 2808 2809 /** 2810 * tipc_setsockopt - set socket option 2811 * @sock: socket structure 2812 * @lvl: option level 2813 * @opt: option identifier 2814 * @ov: pointer to new option value 2815 * @ol: length of option value 2816 * 2817 * For stream sockets only, accepts and ignores all IPPROTO_TCP options 2818 * (to ease compatibility). 2819 * 2820 * Returns 0 on success, errno otherwise 2821 */ 2822 static int tipc_setsockopt(struct socket *sock, int lvl, int opt, 2823 char __user *ov, unsigned int ol) 2824 { 2825 struct sock *sk = sock->sk; 2826 struct tipc_sock *tsk = tipc_sk(sk); 2827 struct tipc_group_req mreq; 2828 u32 value = 0; 2829 int res = 0; 2830 2831 if ((lvl == IPPROTO_TCP) && (sock->type == SOCK_STREAM)) 2832 return 0; 2833 if (lvl != SOL_TIPC) 2834 return -ENOPROTOOPT; 2835 2836 switch (opt) { 2837 case TIPC_IMPORTANCE: 2838 case TIPC_SRC_DROPPABLE: 2839 case TIPC_DEST_DROPPABLE: 2840 case TIPC_CONN_TIMEOUT: 2841 if (ol < sizeof(value)) 2842 return -EINVAL; 2843 if (get_user(value, (u32 __user *)ov)) 2844 return -EFAULT; 2845 break; 2846 case TIPC_GROUP_JOIN: 2847 if (ol < sizeof(mreq)) 2848 return -EINVAL; 2849 if (copy_from_user(&mreq, ov, sizeof(mreq))) 2850 return -EFAULT; 2851 break; 2852 default: 2853 if (ov || ol) 2854 return -EINVAL; 2855 } 2856 2857 lock_sock(sk); 2858 2859 switch (opt) { 2860 case TIPC_IMPORTANCE: 2861 res = tsk_set_importance(tsk, value); 2862 break; 2863 case TIPC_SRC_DROPPABLE: 2864 if (sock->type != SOCK_STREAM) 2865 tsk_set_unreliable(tsk, value); 2866 else 2867 res = -ENOPROTOOPT; 2868 break; 2869 case TIPC_DEST_DROPPABLE: 2870 tsk_set_unreturnable(tsk, value); 2871 break; 2872 case TIPC_CONN_TIMEOUT: 2873 tipc_sk(sk)->conn_timeout = value; 2874 break; 2875 case TIPC_MCAST_BROADCAST: 2876 tsk->mc_method.rcast = false; 2877 tsk->mc_method.mandatory = true; 2878 break; 2879 case TIPC_MCAST_REPLICAST: 2880 tsk->mc_method.rcast = true; 2881 tsk->mc_method.mandatory = true; 2882 break; 2883 case TIPC_GROUP_JOIN: 2884 res = tipc_sk_join(tsk, &mreq); 2885 break; 2886 case TIPC_GROUP_LEAVE: 2887 res = tipc_sk_leave(tsk); 2888 break; 2889 default: 2890 res = -EINVAL; 2891 } 2892 2893 release_sock(sk); 2894 2895 return res; 2896 } 2897 2898 /** 2899 * tipc_getsockopt - get socket option 2900 * @sock: socket structure 2901 * @lvl: option level 2902 * @opt: option identifier 2903 * @ov: receptacle for option value 2904 * @ol: receptacle for length of option value 2905 * 2906 * For stream sockets only, returns 0 length result for all IPPROTO_TCP options 2907 * (to ease compatibility). 2908 * 2909 * Returns 0 on success, errno otherwise 2910 */ 2911 static int tipc_getsockopt(struct socket *sock, int lvl, int opt, 2912 char __user *ov, int __user *ol) 2913 { 2914 struct sock *sk = sock->sk; 2915 struct tipc_sock *tsk = tipc_sk(sk); 2916 struct tipc_name_seq seq; 2917 int len, scope; 2918 u32 value; 2919 int res; 2920 2921 if ((lvl == IPPROTO_TCP) && (sock->type == SOCK_STREAM)) 2922 return put_user(0, ol); 2923 if (lvl != SOL_TIPC) 2924 return -ENOPROTOOPT; 2925 res = get_user(len, ol); 2926 if (res) 2927 return res; 2928 2929 lock_sock(sk); 2930 2931 switch (opt) { 2932 case TIPC_IMPORTANCE: 2933 value = tsk_importance(tsk); 2934 break; 2935 case TIPC_SRC_DROPPABLE: 2936 value = tsk_unreliable(tsk); 2937 break; 2938 case TIPC_DEST_DROPPABLE: 2939 value = tsk_unreturnable(tsk); 2940 break; 2941 case TIPC_CONN_TIMEOUT: 2942 value = tsk->conn_timeout; 2943 /* no need to set "res", since already 0 at this point */ 2944 break; 2945 case TIPC_NODE_RECVQ_DEPTH: 2946 value = 0; /* was tipc_queue_size, now obsolete */ 2947 break; 2948 case TIPC_SOCK_RECVQ_DEPTH: 2949 value = skb_queue_len(&sk->sk_receive_queue); 2950 break; 2951 case TIPC_GROUP_JOIN: 2952 seq.type = 0; 2953 if (tsk->group) 2954 tipc_group_self(tsk->group, &seq, &scope); 2955 value = seq.type; 2956 break; 2957 default: 2958 res = -EINVAL; 2959 } 2960 2961 release_sock(sk); 2962 2963 if (res) 2964 return res; /* "get" failed */ 2965 2966 if (len < sizeof(value)) 2967 return -EINVAL; 2968 2969 if (copy_to_user(ov, &value, sizeof(value))) 2970 return -EFAULT; 2971 2972 return put_user(sizeof(value), ol); 2973 } 2974 2975 static int tipc_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) 2976 { 2977 struct net *net = sock_net(sock->sk); 2978 struct tipc_sioc_nodeid_req nr = {0}; 2979 struct tipc_sioc_ln_req lnr; 2980 void __user *argp = (void __user *)arg; 2981 2982 switch (cmd) { 2983 case SIOCGETLINKNAME: 2984 if (copy_from_user(&lnr, argp, sizeof(lnr))) 2985 return -EFAULT; 2986 if (!tipc_node_get_linkname(net, 2987 lnr.bearer_id & 0xffff, lnr.peer, 2988 lnr.linkname, TIPC_MAX_LINK_NAME)) { 2989 if (copy_to_user(argp, &lnr, sizeof(lnr))) 2990 return -EFAULT; 2991 return 0; 2992 } 2993 return -EADDRNOTAVAIL; 2994 case SIOCGETNODEID: 2995 if (copy_from_user(&nr, argp, sizeof(nr))) 2996 return -EFAULT; 2997 if (!tipc_node_get_id(net, nr.peer, nr.node_id)) 2998 return -EADDRNOTAVAIL; 2999 if (copy_to_user(argp, &nr, sizeof(nr))) 3000 return -EFAULT; 3001 return 0; 3002 default: 3003 return -ENOIOCTLCMD; 3004 } 3005 } 3006 3007 static int tipc_socketpair(struct socket *sock1, struct socket *sock2) 3008 { 3009 struct tipc_sock *tsk2 = tipc_sk(sock2->sk); 3010 struct tipc_sock *tsk1 = tipc_sk(sock1->sk); 3011 u32 onode = tipc_own_addr(sock_net(sock1->sk)); 3012 3013 tsk1->peer.family = AF_TIPC; 3014 tsk1->peer.addrtype = TIPC_ADDR_ID; 3015 tsk1->peer.scope = TIPC_NODE_SCOPE; 3016 tsk1->peer.addr.id.ref = tsk2->portid; 3017 tsk1->peer.addr.id.node = onode; 3018 tsk2->peer.family = AF_TIPC; 3019 tsk2->peer.addrtype = TIPC_ADDR_ID; 3020 tsk2->peer.scope = TIPC_NODE_SCOPE; 3021 tsk2->peer.addr.id.ref = tsk1->portid; 3022 tsk2->peer.addr.id.node = onode; 3023 3024 tipc_sk_finish_conn(tsk1, tsk2->portid, onode); 3025 tipc_sk_finish_conn(tsk2, tsk1->portid, onode); 3026 return 0; 3027 } 3028 3029 /* Protocol switches for the various types of TIPC sockets */ 3030 3031 static const struct proto_ops msg_ops = { 3032 .owner = THIS_MODULE, 3033 .family = AF_TIPC, 3034 .release = tipc_release, 3035 .bind = tipc_bind, 3036 .connect = tipc_connect, 3037 .socketpair = tipc_socketpair, 3038 .accept = sock_no_accept, 3039 .getname = tipc_getname, 3040 .poll = tipc_poll, 3041 .ioctl = tipc_ioctl, 3042 .listen = sock_no_listen, 3043 .shutdown = tipc_shutdown, 3044 .setsockopt = tipc_setsockopt, 3045 .getsockopt = tipc_getsockopt, 3046 .sendmsg = tipc_sendmsg, 3047 .recvmsg = tipc_recvmsg, 3048 .mmap = sock_no_mmap, 3049 .sendpage = sock_no_sendpage 3050 }; 3051 3052 static const struct proto_ops packet_ops = { 3053 .owner = THIS_MODULE, 3054 .family = AF_TIPC, 3055 .release = tipc_release, 3056 .bind = tipc_bind, 3057 .connect = tipc_connect, 3058 .socketpair = tipc_socketpair, 3059 .accept = tipc_accept, 3060 .getname = tipc_getname, 3061 .poll = tipc_poll, 3062 .ioctl = tipc_ioctl, 3063 .listen = tipc_listen, 3064 .shutdown = tipc_shutdown, 3065 .setsockopt = tipc_setsockopt, 3066 .getsockopt = tipc_getsockopt, 3067 .sendmsg = tipc_send_packet, 3068 .recvmsg = tipc_recvmsg, 3069 .mmap = sock_no_mmap, 3070 .sendpage = sock_no_sendpage 3071 }; 3072 3073 static const struct proto_ops stream_ops = { 3074 .owner = THIS_MODULE, 3075 .family = AF_TIPC, 3076 .release = tipc_release, 3077 .bind = tipc_bind, 3078 .connect = tipc_connect, 3079 .socketpair = tipc_socketpair, 3080 .accept = tipc_accept, 3081 .getname = tipc_getname, 3082 .poll = tipc_poll, 3083 .ioctl = tipc_ioctl, 3084 .listen = tipc_listen, 3085 .shutdown = tipc_shutdown, 3086 .setsockopt = tipc_setsockopt, 3087 .getsockopt = tipc_getsockopt, 3088 .sendmsg = tipc_sendstream, 3089 .recvmsg = tipc_recvstream, 3090 .mmap = sock_no_mmap, 3091 .sendpage = sock_no_sendpage 3092 }; 3093 3094 static const struct net_proto_family tipc_family_ops = { 3095 .owner = THIS_MODULE, 3096 .family = AF_TIPC, 3097 .create = tipc_sk_create 3098 }; 3099 3100 static struct proto tipc_proto = { 3101 .name = "TIPC", 3102 .owner = THIS_MODULE, 3103 .obj_size = sizeof(struct tipc_sock), 3104 .sysctl_rmem = sysctl_tipc_rmem 3105 }; 3106 3107 /** 3108 * tipc_socket_init - initialize TIPC socket interface 3109 * 3110 * Returns 0 on success, errno otherwise 3111 */ 3112 int tipc_socket_init(void) 3113 { 3114 int res; 3115 3116 res = proto_register(&tipc_proto, 1); 3117 if (res) { 3118 pr_err("Failed to register TIPC protocol type\n"); 3119 goto out; 3120 } 3121 3122 res = sock_register(&tipc_family_ops); 3123 if (res) { 3124 pr_err("Failed to register TIPC socket type\n"); 3125 proto_unregister(&tipc_proto); 3126 goto out; 3127 } 3128 out: 3129 return res; 3130 } 3131 3132 /** 3133 * tipc_socket_stop - stop TIPC socket interface 3134 */ 3135 void tipc_socket_stop(void) 3136 { 3137 sock_unregister(tipc_family_ops.family); 3138 proto_unregister(&tipc_proto); 3139 } 3140 3141 /* Caller should hold socket lock for the passed tipc socket. */ 3142 static int __tipc_nl_add_sk_con(struct sk_buff *skb, struct tipc_sock *tsk) 3143 { 3144 u32 peer_node; 3145 u32 peer_port; 3146 struct nlattr *nest; 3147 3148 peer_node = tsk_peer_node(tsk); 3149 peer_port = tsk_peer_port(tsk); 3150 3151 nest = nla_nest_start(skb, TIPC_NLA_SOCK_CON); 3152 3153 if (nla_put_u32(skb, TIPC_NLA_CON_NODE, peer_node)) 3154 goto msg_full; 3155 if (nla_put_u32(skb, TIPC_NLA_CON_SOCK, peer_port)) 3156 goto msg_full; 3157 3158 if (tsk->conn_type != 0) { 3159 if (nla_put_flag(skb, TIPC_NLA_CON_FLAG)) 3160 goto msg_full; 3161 if (nla_put_u32(skb, TIPC_NLA_CON_TYPE, tsk->conn_type)) 3162 goto msg_full; 3163 if (nla_put_u32(skb, TIPC_NLA_CON_INST, tsk->conn_instance)) 3164 goto msg_full; 3165 } 3166 nla_nest_end(skb, nest); 3167 3168 return 0; 3169 3170 msg_full: 3171 nla_nest_cancel(skb, nest); 3172 3173 return -EMSGSIZE; 3174 } 3175 3176 static int __tipc_nl_add_sk_info(struct sk_buff *skb, struct tipc_sock 3177 *tsk) 3178 { 3179 struct net *net = sock_net(skb->sk); 3180 struct sock *sk = &tsk->sk; 3181 3182 if (nla_put_u32(skb, TIPC_NLA_SOCK_REF, tsk->portid) || 3183 nla_put_u32(skb, TIPC_NLA_SOCK_ADDR, tipc_own_addr(net))) 3184 return -EMSGSIZE; 3185 3186 if (tipc_sk_connected(sk)) { 3187 if (__tipc_nl_add_sk_con(skb, tsk)) 3188 return -EMSGSIZE; 3189 } else if (!list_empty(&tsk->publications)) { 3190 if (nla_put_flag(skb, TIPC_NLA_SOCK_HAS_PUBL)) 3191 return -EMSGSIZE; 3192 } 3193 return 0; 3194 } 3195 3196 /* Caller should hold socket lock for the passed tipc socket. */ 3197 static int __tipc_nl_add_sk(struct sk_buff *skb, struct netlink_callback *cb, 3198 struct tipc_sock *tsk) 3199 { 3200 struct nlattr *attrs; 3201 void *hdr; 3202 3203 hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, 3204 &tipc_genl_family, NLM_F_MULTI, TIPC_NL_SOCK_GET); 3205 if (!hdr) 3206 goto msg_cancel; 3207 3208 attrs = nla_nest_start(skb, TIPC_NLA_SOCK); 3209 if (!attrs) 3210 goto genlmsg_cancel; 3211 3212 if (__tipc_nl_add_sk_info(skb, tsk)) 3213 goto attr_msg_cancel; 3214 3215 nla_nest_end(skb, attrs); 3216 genlmsg_end(skb, hdr); 3217 3218 return 0; 3219 3220 attr_msg_cancel: 3221 nla_nest_cancel(skb, attrs); 3222 genlmsg_cancel: 3223 genlmsg_cancel(skb, hdr); 3224 msg_cancel: 3225 return -EMSGSIZE; 3226 } 3227 3228 int tipc_nl_sk_walk(struct sk_buff *skb, struct netlink_callback *cb, 3229 int (*skb_handler)(struct sk_buff *skb, 3230 struct netlink_callback *cb, 3231 struct tipc_sock *tsk)) 3232 { 3233 struct rhashtable_iter *iter = (void *)cb->args[4]; 3234 struct tipc_sock *tsk; 3235 int err; 3236 3237 rhashtable_walk_start(iter); 3238 while ((tsk = rhashtable_walk_next(iter)) != NULL) { 3239 if (IS_ERR(tsk)) { 3240 err = PTR_ERR(tsk); 3241 if (err == -EAGAIN) { 3242 err = 0; 3243 continue; 3244 } 3245 break; 3246 } 3247 3248 sock_hold(&tsk->sk); 3249 rhashtable_walk_stop(iter); 3250 lock_sock(&tsk->sk); 3251 err = skb_handler(skb, cb, tsk); 3252 if (err) { 3253 release_sock(&tsk->sk); 3254 sock_put(&tsk->sk); 3255 goto out; 3256 } 3257 release_sock(&tsk->sk); 3258 rhashtable_walk_start(iter); 3259 sock_put(&tsk->sk); 3260 } 3261 rhashtable_walk_stop(iter); 3262 out: 3263 return skb->len; 3264 } 3265 EXPORT_SYMBOL(tipc_nl_sk_walk); 3266 3267 int tipc_dump_start(struct netlink_callback *cb) 3268 { 3269 return __tipc_dump_start(cb, sock_net(cb->skb->sk)); 3270 } 3271 EXPORT_SYMBOL(tipc_dump_start); 3272 3273 int __tipc_dump_start(struct netlink_callback *cb, struct net *net) 3274 { 3275 /* tipc_nl_name_table_dump() uses cb->args[0...3]. */ 3276 struct rhashtable_iter *iter = (void *)cb->args[4]; 3277 struct tipc_net *tn = tipc_net(net); 3278 3279 if (!iter) { 3280 iter = kmalloc(sizeof(*iter), GFP_KERNEL); 3281 if (!iter) 3282 return -ENOMEM; 3283 3284 cb->args[4] = (long)iter; 3285 } 3286 3287 rhashtable_walk_enter(&tn->sk_rht, iter); 3288 return 0; 3289 } 3290 3291 int tipc_dump_done(struct netlink_callback *cb) 3292 { 3293 struct rhashtable_iter *hti = (void *)cb->args[4]; 3294 3295 rhashtable_walk_exit(hti); 3296 kfree(hti); 3297 return 0; 3298 } 3299 EXPORT_SYMBOL(tipc_dump_done); 3300 3301 int tipc_sk_fill_sock_diag(struct sk_buff *skb, struct netlink_callback *cb, 3302 struct tipc_sock *tsk, u32 sk_filter_state, 3303 u64 (*tipc_diag_gen_cookie)(struct sock *sk)) 3304 { 3305 struct sock *sk = &tsk->sk; 3306 struct nlattr *attrs; 3307 struct nlattr *stat; 3308 3309 /*filter response w.r.t sk_state*/ 3310 if (!(sk_filter_state & (1 << sk->sk_state))) 3311 return 0; 3312 3313 attrs = nla_nest_start(skb, TIPC_NLA_SOCK); 3314 if (!attrs) 3315 goto msg_cancel; 3316 3317 if (__tipc_nl_add_sk_info(skb, tsk)) 3318 goto attr_msg_cancel; 3319 3320 if (nla_put_u32(skb, TIPC_NLA_SOCK_TYPE, (u32)sk->sk_type) || 3321 nla_put_u32(skb, TIPC_NLA_SOCK_TIPC_STATE, (u32)sk->sk_state) || 3322 nla_put_u32(skb, TIPC_NLA_SOCK_INO, sock_i_ino(sk)) || 3323 nla_put_u32(skb, TIPC_NLA_SOCK_UID, 3324 from_kuid_munged(sk_user_ns(NETLINK_CB(cb->skb).sk), 3325 sock_i_uid(sk))) || 3326 nla_put_u64_64bit(skb, TIPC_NLA_SOCK_COOKIE, 3327 tipc_diag_gen_cookie(sk), 3328 TIPC_NLA_SOCK_PAD)) 3329 goto attr_msg_cancel; 3330 3331 stat = nla_nest_start(skb, TIPC_NLA_SOCK_STAT); 3332 if (!stat) 3333 goto attr_msg_cancel; 3334 3335 if (nla_put_u32(skb, TIPC_NLA_SOCK_STAT_RCVQ, 3336 skb_queue_len(&sk->sk_receive_queue)) || 3337 nla_put_u32(skb, TIPC_NLA_SOCK_STAT_SENDQ, 3338 skb_queue_len(&sk->sk_write_queue)) || 3339 nla_put_u32(skb, TIPC_NLA_SOCK_STAT_DROP, 3340 atomic_read(&sk->sk_drops))) 3341 goto stat_msg_cancel; 3342 3343 if (tsk->cong_link_cnt && 3344 nla_put_flag(skb, TIPC_NLA_SOCK_STAT_LINK_CONG)) 3345 goto stat_msg_cancel; 3346 3347 if (tsk_conn_cong(tsk) && 3348 nla_put_flag(skb, TIPC_NLA_SOCK_STAT_CONN_CONG)) 3349 goto stat_msg_cancel; 3350 3351 nla_nest_end(skb, stat); 3352 3353 if (tsk->group) 3354 if (tipc_group_fill_sock_diag(tsk->group, skb)) 3355 goto stat_msg_cancel; 3356 3357 nla_nest_end(skb, attrs); 3358 3359 return 0; 3360 3361 stat_msg_cancel: 3362 nla_nest_cancel(skb, stat); 3363 attr_msg_cancel: 3364 nla_nest_cancel(skb, attrs); 3365 msg_cancel: 3366 return -EMSGSIZE; 3367 } 3368 EXPORT_SYMBOL(tipc_sk_fill_sock_diag); 3369 3370 int tipc_nl_sk_dump(struct sk_buff *skb, struct netlink_callback *cb) 3371 { 3372 return tipc_nl_sk_walk(skb, cb, __tipc_nl_add_sk); 3373 } 3374 3375 /* Caller should hold socket lock for the passed tipc socket. */ 3376 static int __tipc_nl_add_sk_publ(struct sk_buff *skb, 3377 struct netlink_callback *cb, 3378 struct publication *publ) 3379 { 3380 void *hdr; 3381 struct nlattr *attrs; 3382 3383 hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, 3384 &tipc_genl_family, NLM_F_MULTI, TIPC_NL_PUBL_GET); 3385 if (!hdr) 3386 goto msg_cancel; 3387 3388 attrs = nla_nest_start(skb, TIPC_NLA_PUBL); 3389 if (!attrs) 3390 goto genlmsg_cancel; 3391 3392 if (nla_put_u32(skb, TIPC_NLA_PUBL_KEY, publ->key)) 3393 goto attr_msg_cancel; 3394 if (nla_put_u32(skb, TIPC_NLA_PUBL_TYPE, publ->type)) 3395 goto attr_msg_cancel; 3396 if (nla_put_u32(skb, TIPC_NLA_PUBL_LOWER, publ->lower)) 3397 goto attr_msg_cancel; 3398 if (nla_put_u32(skb, TIPC_NLA_PUBL_UPPER, publ->upper)) 3399 goto attr_msg_cancel; 3400 3401 nla_nest_end(skb, attrs); 3402 genlmsg_end(skb, hdr); 3403 3404 return 0; 3405 3406 attr_msg_cancel: 3407 nla_nest_cancel(skb, attrs); 3408 genlmsg_cancel: 3409 genlmsg_cancel(skb, hdr); 3410 msg_cancel: 3411 return -EMSGSIZE; 3412 } 3413 3414 /* Caller should hold socket lock for the passed tipc socket. */ 3415 static int __tipc_nl_list_sk_publ(struct sk_buff *skb, 3416 struct netlink_callback *cb, 3417 struct tipc_sock *tsk, u32 *last_publ) 3418 { 3419 int err; 3420 struct publication *p; 3421 3422 if (*last_publ) { 3423 list_for_each_entry(p, &tsk->publications, binding_sock) { 3424 if (p->key == *last_publ) 3425 break; 3426 } 3427 if (p->key != *last_publ) { 3428 /* We never set seq or call nl_dump_check_consistent() 3429 * this means that setting prev_seq here will cause the 3430 * consistence check to fail in the netlink callback 3431 * handler. Resulting in the last NLMSG_DONE message 3432 * having the NLM_F_DUMP_INTR flag set. 3433 */ 3434 cb->prev_seq = 1; 3435 *last_publ = 0; 3436 return -EPIPE; 3437 } 3438 } else { 3439 p = list_first_entry(&tsk->publications, struct publication, 3440 binding_sock); 3441 } 3442 3443 list_for_each_entry_from(p, &tsk->publications, binding_sock) { 3444 err = __tipc_nl_add_sk_publ(skb, cb, p); 3445 if (err) { 3446 *last_publ = p->key; 3447 return err; 3448 } 3449 } 3450 *last_publ = 0; 3451 3452 return 0; 3453 } 3454 3455 int tipc_nl_publ_dump(struct sk_buff *skb, struct netlink_callback *cb) 3456 { 3457 int err; 3458 u32 tsk_portid = cb->args[0]; 3459 u32 last_publ = cb->args[1]; 3460 u32 done = cb->args[2]; 3461 struct net *net = sock_net(skb->sk); 3462 struct tipc_sock *tsk; 3463 3464 if (!tsk_portid) { 3465 struct nlattr **attrs; 3466 struct nlattr *sock[TIPC_NLA_SOCK_MAX + 1]; 3467 3468 err = tipc_nlmsg_parse(cb->nlh, &attrs); 3469 if (err) 3470 return err; 3471 3472 if (!attrs[TIPC_NLA_SOCK]) 3473 return -EINVAL; 3474 3475 err = nla_parse_nested(sock, TIPC_NLA_SOCK_MAX, 3476 attrs[TIPC_NLA_SOCK], 3477 tipc_nl_sock_policy, NULL); 3478 if (err) 3479 return err; 3480 3481 if (!sock[TIPC_NLA_SOCK_REF]) 3482 return -EINVAL; 3483 3484 tsk_portid = nla_get_u32(sock[TIPC_NLA_SOCK_REF]); 3485 } 3486 3487 if (done) 3488 return 0; 3489 3490 tsk = tipc_sk_lookup(net, tsk_portid); 3491 if (!tsk) 3492 return -EINVAL; 3493 3494 lock_sock(&tsk->sk); 3495 err = __tipc_nl_list_sk_publ(skb, cb, tsk, &last_publ); 3496 if (!err) 3497 done = 1; 3498 release_sock(&tsk->sk); 3499 sock_put(&tsk->sk); 3500 3501 cb->args[0] = tsk_portid; 3502 cb->args[1] = last_publ; 3503 cb->args[2] = done; 3504 3505 return skb->len; 3506 } 3507