1 /* 2 * net/tipc/socket.c: TIPC socket API 3 * 4 * Copyright (c) 2001-2007, 2012-2017, Ericsson AB 5 * Copyright (c) 2004-2008, 2010-2013, Wind River Systems 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the names of the copyright holders nor the names of its 17 * contributors may be used to endorse or promote products derived from 18 * this software without specific prior written permission. 19 * 20 * Alternatively, this software may be distributed under the terms of the 21 * GNU General Public License ("GPL") version 2 as published by the Free 22 * Software Foundation. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 34 * POSSIBILITY OF SUCH DAMAGE. 35 */ 36 37 #include <linux/rhashtable.h> 38 #include <linux/sched/signal.h> 39 40 #include "core.h" 41 #include "name_table.h" 42 #include "node.h" 43 #include "link.h" 44 #include "name_distr.h" 45 #include "socket.h" 46 #include "bcast.h" 47 #include "netlink.h" 48 #include "group.h" 49 50 #define CONN_TIMEOUT_DEFAULT 8000 /* default connect timeout = 8s */ 51 #define CONN_PROBING_INTV msecs_to_jiffies(3600000) /* [ms] => 1 h */ 52 #define TIPC_FWD_MSG 1 53 #define TIPC_MAX_PORT 0xffffffff 54 #define TIPC_MIN_PORT 1 55 #define TIPC_ACK_RATE 4 /* ACK at 1/4 of of rcv window size */ 56 57 enum { 58 TIPC_LISTEN = TCP_LISTEN, 59 TIPC_ESTABLISHED = TCP_ESTABLISHED, 60 TIPC_OPEN = TCP_CLOSE, 61 TIPC_DISCONNECTING = TCP_CLOSE_WAIT, 62 TIPC_CONNECTING = TCP_SYN_SENT, 63 }; 64 65 struct sockaddr_pair { 66 struct sockaddr_tipc sock; 67 struct sockaddr_tipc member; 68 }; 69 70 /** 71 * struct tipc_sock - TIPC socket structure 72 * @sk: socket - interacts with 'port' and with user via the socket API 73 * @conn_type: TIPC type used when connection was established 74 * @conn_instance: TIPC instance used when connection was established 75 * @published: non-zero if port has one or more associated names 76 * @max_pkt: maximum packet size "hint" used when building messages sent by port 77 * @portid: unique port identity in TIPC socket hash table 78 * @phdr: preformatted message header used when sending messages 79 * #cong_links: list of congested links 80 * @publications: list of publications for port 81 * @blocking_link: address of the congested link we are currently sleeping on 82 * @pub_count: total # of publications port has made during its lifetime 83 * @probing_state: 84 * @conn_timeout: the time we can wait for an unresponded setup request 85 * @dupl_rcvcnt: number of bytes counted twice, in both backlog and rcv queue 86 * @cong_link_cnt: number of congested links 87 * @snt_unacked: # messages sent by socket, and not yet acked by peer 88 * @rcv_unacked: # messages read by user, but not yet acked back to peer 89 * @peer: 'connected' peer for dgram/rdm 90 * @node: hash table node 91 * @mc_method: cookie for use between socket and broadcast layer 92 * @rcu: rcu struct for tipc_sock 93 */ 94 struct tipc_sock { 95 struct sock sk; 96 u32 conn_type; 97 u32 conn_instance; 98 int published; 99 u32 max_pkt; 100 u32 portid; 101 struct tipc_msg phdr; 102 struct list_head cong_links; 103 struct list_head publications; 104 u32 pub_count; 105 uint conn_timeout; 106 atomic_t dupl_rcvcnt; 107 bool probe_unacked; 108 u16 cong_link_cnt; 109 u16 snt_unacked; 110 u16 snd_win; 111 u16 peer_caps; 112 u16 rcv_unacked; 113 u16 rcv_win; 114 struct sockaddr_tipc peer; 115 struct rhash_head node; 116 struct tipc_mc_method mc_method; 117 struct rcu_head rcu; 118 struct tipc_group *group; 119 bool group_is_open; 120 }; 121 122 static int tipc_sk_backlog_rcv(struct sock *sk, struct sk_buff *skb); 123 static void tipc_data_ready(struct sock *sk); 124 static void tipc_write_space(struct sock *sk); 125 static void tipc_sock_destruct(struct sock *sk); 126 static int tipc_release(struct socket *sock); 127 static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags, 128 bool kern); 129 static void tipc_sk_timeout(struct timer_list *t); 130 static int tipc_sk_publish(struct tipc_sock *tsk, uint scope, 131 struct tipc_name_seq const *seq); 132 static int tipc_sk_withdraw(struct tipc_sock *tsk, uint scope, 133 struct tipc_name_seq const *seq); 134 static int tipc_sk_leave(struct tipc_sock *tsk); 135 static struct tipc_sock *tipc_sk_lookup(struct net *net, u32 portid); 136 static int tipc_sk_insert(struct tipc_sock *tsk); 137 static void tipc_sk_remove(struct tipc_sock *tsk); 138 static int __tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dsz); 139 static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dsz); 140 141 static const struct proto_ops packet_ops; 142 static const struct proto_ops stream_ops; 143 static const struct proto_ops msg_ops; 144 static struct proto tipc_proto; 145 static const struct rhashtable_params tsk_rht_params; 146 147 static u32 tsk_own_node(struct tipc_sock *tsk) 148 { 149 return msg_prevnode(&tsk->phdr); 150 } 151 152 static u32 tsk_peer_node(struct tipc_sock *tsk) 153 { 154 return msg_destnode(&tsk->phdr); 155 } 156 157 static u32 tsk_peer_port(struct tipc_sock *tsk) 158 { 159 return msg_destport(&tsk->phdr); 160 } 161 162 static bool tsk_unreliable(struct tipc_sock *tsk) 163 { 164 return msg_src_droppable(&tsk->phdr) != 0; 165 } 166 167 static void tsk_set_unreliable(struct tipc_sock *tsk, bool unreliable) 168 { 169 msg_set_src_droppable(&tsk->phdr, unreliable ? 1 : 0); 170 } 171 172 static bool tsk_unreturnable(struct tipc_sock *tsk) 173 { 174 return msg_dest_droppable(&tsk->phdr) != 0; 175 } 176 177 static void tsk_set_unreturnable(struct tipc_sock *tsk, bool unreturnable) 178 { 179 msg_set_dest_droppable(&tsk->phdr, unreturnable ? 1 : 0); 180 } 181 182 static int tsk_importance(struct tipc_sock *tsk) 183 { 184 return msg_importance(&tsk->phdr); 185 } 186 187 static int tsk_set_importance(struct tipc_sock *tsk, int imp) 188 { 189 if (imp > TIPC_CRITICAL_IMPORTANCE) 190 return -EINVAL; 191 msg_set_importance(&tsk->phdr, (u32)imp); 192 return 0; 193 } 194 195 static struct tipc_sock *tipc_sk(const struct sock *sk) 196 { 197 return container_of(sk, struct tipc_sock, sk); 198 } 199 200 static bool tsk_conn_cong(struct tipc_sock *tsk) 201 { 202 return tsk->snt_unacked > tsk->snd_win; 203 } 204 205 static u16 tsk_blocks(int len) 206 { 207 return ((len / FLOWCTL_BLK_SZ) + 1); 208 } 209 210 /* tsk_blocks(): translate a buffer size in bytes to number of 211 * advertisable blocks, taking into account the ratio truesize(len)/len 212 * We can trust that this ratio is always < 4 for len >= FLOWCTL_BLK_SZ 213 */ 214 static u16 tsk_adv_blocks(int len) 215 { 216 return len / FLOWCTL_BLK_SZ / 4; 217 } 218 219 /* tsk_inc(): increment counter for sent or received data 220 * - If block based flow control is not supported by peer we 221 * fall back to message based ditto, incrementing the counter 222 */ 223 static u16 tsk_inc(struct tipc_sock *tsk, int msglen) 224 { 225 if (likely(tsk->peer_caps & TIPC_BLOCK_FLOWCTL)) 226 return ((msglen / FLOWCTL_BLK_SZ) + 1); 227 return 1; 228 } 229 230 /** 231 * tsk_advance_rx_queue - discard first buffer in socket receive queue 232 * 233 * Caller must hold socket lock 234 */ 235 static void tsk_advance_rx_queue(struct sock *sk) 236 { 237 kfree_skb(__skb_dequeue(&sk->sk_receive_queue)); 238 } 239 240 /* tipc_sk_respond() : send response message back to sender 241 */ 242 static void tipc_sk_respond(struct sock *sk, struct sk_buff *skb, int err) 243 { 244 u32 selector; 245 u32 dnode; 246 u32 onode = tipc_own_addr(sock_net(sk)); 247 248 if (!tipc_msg_reverse(onode, &skb, err)) 249 return; 250 251 dnode = msg_destnode(buf_msg(skb)); 252 selector = msg_origport(buf_msg(skb)); 253 tipc_node_xmit_skb(sock_net(sk), skb, dnode, selector); 254 } 255 256 /** 257 * tsk_rej_rx_queue - reject all buffers in socket receive queue 258 * 259 * Caller must hold socket lock 260 */ 261 static void tsk_rej_rx_queue(struct sock *sk) 262 { 263 struct sk_buff *skb; 264 265 while ((skb = __skb_dequeue(&sk->sk_receive_queue))) 266 tipc_sk_respond(sk, skb, TIPC_ERR_NO_PORT); 267 } 268 269 static bool tipc_sk_connected(struct sock *sk) 270 { 271 return sk->sk_state == TIPC_ESTABLISHED; 272 } 273 274 /* tipc_sk_type_connectionless - check if the socket is datagram socket 275 * @sk: socket 276 * 277 * Returns true if connection less, false otherwise 278 */ 279 static bool tipc_sk_type_connectionless(struct sock *sk) 280 { 281 return sk->sk_type == SOCK_RDM || sk->sk_type == SOCK_DGRAM; 282 } 283 284 /* tsk_peer_msg - verify if message was sent by connected port's peer 285 * 286 * Handles cases where the node's network address has changed from 287 * the default of <0.0.0> to its configured setting. 288 */ 289 static bool tsk_peer_msg(struct tipc_sock *tsk, struct tipc_msg *msg) 290 { 291 struct sock *sk = &tsk->sk; 292 u32 self = tipc_own_addr(sock_net(sk)); 293 u32 peer_port = tsk_peer_port(tsk); 294 u32 orig_node, peer_node; 295 296 if (unlikely(!tipc_sk_connected(sk))) 297 return false; 298 299 if (unlikely(msg_origport(msg) != peer_port)) 300 return false; 301 302 orig_node = msg_orignode(msg); 303 peer_node = tsk_peer_node(tsk); 304 305 if (likely(orig_node == peer_node)) 306 return true; 307 308 if (!orig_node && peer_node == self) 309 return true; 310 311 if (!peer_node && orig_node == self) 312 return true; 313 314 return false; 315 } 316 317 /* tipc_set_sk_state - set the sk_state of the socket 318 * @sk: socket 319 * 320 * Caller must hold socket lock 321 * 322 * Returns 0 on success, errno otherwise 323 */ 324 static int tipc_set_sk_state(struct sock *sk, int state) 325 { 326 int oldsk_state = sk->sk_state; 327 int res = -EINVAL; 328 329 switch (state) { 330 case TIPC_OPEN: 331 res = 0; 332 break; 333 case TIPC_LISTEN: 334 case TIPC_CONNECTING: 335 if (oldsk_state == TIPC_OPEN) 336 res = 0; 337 break; 338 case TIPC_ESTABLISHED: 339 if (oldsk_state == TIPC_CONNECTING || 340 oldsk_state == TIPC_OPEN) 341 res = 0; 342 break; 343 case TIPC_DISCONNECTING: 344 if (oldsk_state == TIPC_CONNECTING || 345 oldsk_state == TIPC_ESTABLISHED) 346 res = 0; 347 break; 348 } 349 350 if (!res) 351 sk->sk_state = state; 352 353 return res; 354 } 355 356 static int tipc_sk_sock_err(struct socket *sock, long *timeout) 357 { 358 struct sock *sk = sock->sk; 359 int err = sock_error(sk); 360 int typ = sock->type; 361 362 if (err) 363 return err; 364 if (typ == SOCK_STREAM || typ == SOCK_SEQPACKET) { 365 if (sk->sk_state == TIPC_DISCONNECTING) 366 return -EPIPE; 367 else if (!tipc_sk_connected(sk)) 368 return -ENOTCONN; 369 } 370 if (!*timeout) 371 return -EAGAIN; 372 if (signal_pending(current)) 373 return sock_intr_errno(*timeout); 374 375 return 0; 376 } 377 378 #define tipc_wait_for_cond(sock_, timeo_, condition_) \ 379 ({ \ 380 struct sock *sk_; \ 381 int rc_; \ 382 \ 383 while ((rc_ = !(condition_))) { \ 384 DEFINE_WAIT_FUNC(wait_, woken_wake_function); \ 385 sk_ = (sock_)->sk; \ 386 rc_ = tipc_sk_sock_err((sock_), timeo_); \ 387 if (rc_) \ 388 break; \ 389 prepare_to_wait(sk_sleep(sk_), &wait_, TASK_INTERRUPTIBLE); \ 390 release_sock(sk_); \ 391 *(timeo_) = wait_woken(&wait_, TASK_INTERRUPTIBLE, *(timeo_)); \ 392 sched_annotate_sleep(); \ 393 lock_sock(sk_); \ 394 remove_wait_queue(sk_sleep(sk_), &wait_); \ 395 } \ 396 rc_; \ 397 }) 398 399 /** 400 * tipc_sk_create - create a TIPC socket 401 * @net: network namespace (must be default network) 402 * @sock: pre-allocated socket structure 403 * @protocol: protocol indicator (must be 0) 404 * @kern: caused by kernel or by userspace? 405 * 406 * This routine creates additional data structures used by the TIPC socket, 407 * initializes them, and links them together. 408 * 409 * Returns 0 on success, errno otherwise 410 */ 411 static int tipc_sk_create(struct net *net, struct socket *sock, 412 int protocol, int kern) 413 { 414 struct tipc_net *tn; 415 const struct proto_ops *ops; 416 struct sock *sk; 417 struct tipc_sock *tsk; 418 struct tipc_msg *msg; 419 420 /* Validate arguments */ 421 if (unlikely(protocol != 0)) 422 return -EPROTONOSUPPORT; 423 424 switch (sock->type) { 425 case SOCK_STREAM: 426 ops = &stream_ops; 427 break; 428 case SOCK_SEQPACKET: 429 ops = &packet_ops; 430 break; 431 case SOCK_DGRAM: 432 case SOCK_RDM: 433 ops = &msg_ops; 434 break; 435 default: 436 return -EPROTOTYPE; 437 } 438 439 /* Allocate socket's protocol area */ 440 sk = sk_alloc(net, AF_TIPC, GFP_KERNEL, &tipc_proto, kern); 441 if (sk == NULL) 442 return -ENOMEM; 443 444 tsk = tipc_sk(sk); 445 tsk->max_pkt = MAX_PKT_DEFAULT; 446 INIT_LIST_HEAD(&tsk->publications); 447 INIT_LIST_HEAD(&tsk->cong_links); 448 msg = &tsk->phdr; 449 tn = net_generic(sock_net(sk), tipc_net_id); 450 451 /* Finish initializing socket data structures */ 452 sock->ops = ops; 453 sock_init_data(sock, sk); 454 tipc_set_sk_state(sk, TIPC_OPEN); 455 if (tipc_sk_insert(tsk)) { 456 pr_warn("Socket create failed; port number exhausted\n"); 457 return -EINVAL; 458 } 459 460 /* Ensure tsk is visible before we read own_addr. */ 461 smp_mb(); 462 463 tipc_msg_init(tipc_own_addr(net), msg, TIPC_LOW_IMPORTANCE, 464 TIPC_NAMED_MSG, NAMED_H_SIZE, 0); 465 466 msg_set_origport(msg, tsk->portid); 467 timer_setup(&sk->sk_timer, tipc_sk_timeout, 0); 468 sk->sk_shutdown = 0; 469 sk->sk_backlog_rcv = tipc_sk_backlog_rcv; 470 sk->sk_rcvbuf = sysctl_tipc_rmem[1]; 471 sk->sk_data_ready = tipc_data_ready; 472 sk->sk_write_space = tipc_write_space; 473 sk->sk_destruct = tipc_sock_destruct; 474 tsk->conn_timeout = CONN_TIMEOUT_DEFAULT; 475 tsk->group_is_open = true; 476 atomic_set(&tsk->dupl_rcvcnt, 0); 477 478 /* Start out with safe limits until we receive an advertised window */ 479 tsk->snd_win = tsk_adv_blocks(RCVBUF_MIN); 480 tsk->rcv_win = tsk->snd_win; 481 482 if (tipc_sk_type_connectionless(sk)) { 483 tsk_set_unreturnable(tsk, true); 484 if (sock->type == SOCK_DGRAM) 485 tsk_set_unreliable(tsk, true); 486 } 487 488 return 0; 489 } 490 491 static void tipc_sk_callback(struct rcu_head *head) 492 { 493 struct tipc_sock *tsk = container_of(head, struct tipc_sock, rcu); 494 495 sock_put(&tsk->sk); 496 } 497 498 /* Caller should hold socket lock for the socket. */ 499 static void __tipc_shutdown(struct socket *sock, int error) 500 { 501 struct sock *sk = sock->sk; 502 struct tipc_sock *tsk = tipc_sk(sk); 503 struct net *net = sock_net(sk); 504 long timeout = CONN_TIMEOUT_DEFAULT; 505 u32 dnode = tsk_peer_node(tsk); 506 struct sk_buff *skb; 507 508 /* Avoid that hi-prio shutdown msgs bypass msgs in link wakeup queue */ 509 tipc_wait_for_cond(sock, &timeout, (!tsk->cong_link_cnt && 510 !tsk_conn_cong(tsk))); 511 512 /* Reject all unreceived messages, except on an active connection 513 * (which disconnects locally & sends a 'FIN+' to peer). 514 */ 515 while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) { 516 if (TIPC_SKB_CB(skb)->bytes_read) { 517 kfree_skb(skb); 518 continue; 519 } 520 if (!tipc_sk_type_connectionless(sk) && 521 sk->sk_state != TIPC_DISCONNECTING) { 522 tipc_set_sk_state(sk, TIPC_DISCONNECTING); 523 tipc_node_remove_conn(net, dnode, tsk->portid); 524 } 525 tipc_sk_respond(sk, skb, error); 526 } 527 528 if (tipc_sk_type_connectionless(sk)) 529 return; 530 531 if (sk->sk_state != TIPC_DISCONNECTING) { 532 skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE, 533 TIPC_CONN_MSG, SHORT_H_SIZE, 0, dnode, 534 tsk_own_node(tsk), tsk_peer_port(tsk), 535 tsk->portid, error); 536 if (skb) 537 tipc_node_xmit_skb(net, skb, dnode, tsk->portid); 538 tipc_node_remove_conn(net, dnode, tsk->portid); 539 tipc_set_sk_state(sk, TIPC_DISCONNECTING); 540 } 541 } 542 543 /** 544 * tipc_release - destroy a TIPC socket 545 * @sock: socket to destroy 546 * 547 * This routine cleans up any messages that are still queued on the socket. 548 * For DGRAM and RDM socket types, all queued messages are rejected. 549 * For SEQPACKET and STREAM socket types, the first message is rejected 550 * and any others are discarded. (If the first message on a STREAM socket 551 * is partially-read, it is discarded and the next one is rejected instead.) 552 * 553 * NOTE: Rejected messages are not necessarily returned to the sender! They 554 * are returned or discarded according to the "destination droppable" setting 555 * specified for the message by the sender. 556 * 557 * Returns 0 on success, errno otherwise 558 */ 559 static int tipc_release(struct socket *sock) 560 { 561 struct sock *sk = sock->sk; 562 struct tipc_sock *tsk; 563 564 /* 565 * Exit if socket isn't fully initialized (occurs when a failed accept() 566 * releases a pre-allocated child socket that was never used) 567 */ 568 if (sk == NULL) 569 return 0; 570 571 tsk = tipc_sk(sk); 572 lock_sock(sk); 573 574 __tipc_shutdown(sock, TIPC_ERR_NO_PORT); 575 sk->sk_shutdown = SHUTDOWN_MASK; 576 tipc_sk_leave(tsk); 577 tipc_sk_withdraw(tsk, 0, NULL); 578 sk_stop_timer(sk, &sk->sk_timer); 579 tipc_sk_remove(tsk); 580 581 /* Reject any messages that accumulated in backlog queue */ 582 release_sock(sk); 583 tipc_dest_list_purge(&tsk->cong_links); 584 tsk->cong_link_cnt = 0; 585 call_rcu(&tsk->rcu, tipc_sk_callback); 586 sock->sk = NULL; 587 588 return 0; 589 } 590 591 /** 592 * tipc_bind - associate or disassocate TIPC name(s) with a socket 593 * @sock: socket structure 594 * @uaddr: socket address describing name(s) and desired operation 595 * @uaddr_len: size of socket address data structure 596 * 597 * Name and name sequence binding is indicated using a positive scope value; 598 * a negative scope value unbinds the specified name. Specifying no name 599 * (i.e. a socket address length of 0) unbinds all names from the socket. 600 * 601 * Returns 0 on success, errno otherwise 602 * 603 * NOTE: This routine doesn't need to take the socket lock since it doesn't 604 * access any non-constant socket information. 605 */ 606 static int tipc_bind(struct socket *sock, struct sockaddr *uaddr, 607 int uaddr_len) 608 { 609 struct sock *sk = sock->sk; 610 struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr; 611 struct tipc_sock *tsk = tipc_sk(sk); 612 int res = -EINVAL; 613 614 lock_sock(sk); 615 if (unlikely(!uaddr_len)) { 616 res = tipc_sk_withdraw(tsk, 0, NULL); 617 goto exit; 618 } 619 if (tsk->group) { 620 res = -EACCES; 621 goto exit; 622 } 623 if (uaddr_len < sizeof(struct sockaddr_tipc)) { 624 res = -EINVAL; 625 goto exit; 626 } 627 if (addr->family != AF_TIPC) { 628 res = -EAFNOSUPPORT; 629 goto exit; 630 } 631 632 if (addr->addrtype == TIPC_ADDR_NAME) 633 addr->addr.nameseq.upper = addr->addr.nameseq.lower; 634 else if (addr->addrtype != TIPC_ADDR_NAMESEQ) { 635 res = -EAFNOSUPPORT; 636 goto exit; 637 } 638 639 if ((addr->addr.nameseq.type < TIPC_RESERVED_TYPES) && 640 (addr->addr.nameseq.type != TIPC_TOP_SRV) && 641 (addr->addr.nameseq.type != TIPC_CFG_SRV)) { 642 res = -EACCES; 643 goto exit; 644 } 645 646 res = (addr->scope >= 0) ? 647 tipc_sk_publish(tsk, addr->scope, &addr->addr.nameseq) : 648 tipc_sk_withdraw(tsk, -addr->scope, &addr->addr.nameseq); 649 exit: 650 release_sock(sk); 651 return res; 652 } 653 654 /** 655 * tipc_getname - get port ID of socket or peer socket 656 * @sock: socket structure 657 * @uaddr: area for returned socket address 658 * @uaddr_len: area for returned length of socket address 659 * @peer: 0 = own ID, 1 = current peer ID, 2 = current/former peer ID 660 * 661 * Returns 0 on success, errno otherwise 662 * 663 * NOTE: This routine doesn't need to take the socket lock since it only 664 * accesses socket information that is unchanging (or which changes in 665 * a completely predictable manner). 666 */ 667 static int tipc_getname(struct socket *sock, struct sockaddr *uaddr, 668 int peer) 669 { 670 struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr; 671 struct sock *sk = sock->sk; 672 struct tipc_sock *tsk = tipc_sk(sk); 673 674 memset(addr, 0, sizeof(*addr)); 675 if (peer) { 676 if ((!tipc_sk_connected(sk)) && 677 ((peer != 2) || (sk->sk_state != TIPC_DISCONNECTING))) 678 return -ENOTCONN; 679 addr->addr.id.ref = tsk_peer_port(tsk); 680 addr->addr.id.node = tsk_peer_node(tsk); 681 } else { 682 addr->addr.id.ref = tsk->portid; 683 addr->addr.id.node = tipc_own_addr(sock_net(sk)); 684 } 685 686 addr->addrtype = TIPC_ADDR_ID; 687 addr->family = AF_TIPC; 688 addr->scope = 0; 689 addr->addr.name.domain = 0; 690 691 return sizeof(*addr); 692 } 693 694 /** 695 * tipc_poll - read and possibly block on pollmask 696 * @file: file structure associated with the socket 697 * @sock: socket for which to calculate the poll bits 698 * @wait: ??? 699 * 700 * Returns pollmask value 701 * 702 * COMMENTARY: 703 * It appears that the usual socket locking mechanisms are not useful here 704 * since the pollmask info is potentially out-of-date the moment this routine 705 * exits. TCP and other protocols seem to rely on higher level poll routines 706 * to handle any preventable race conditions, so TIPC will do the same ... 707 * 708 * IMPORTANT: The fact that a read or write operation is indicated does NOT 709 * imply that the operation will succeed, merely that it should be performed 710 * and will not block. 711 */ 712 static __poll_t tipc_poll(struct file *file, struct socket *sock, 713 poll_table *wait) 714 { 715 struct sock *sk = sock->sk; 716 struct tipc_sock *tsk = tipc_sk(sk); 717 __poll_t revents = 0; 718 719 sock_poll_wait(file, sk_sleep(sk), wait); 720 721 if (sk->sk_shutdown & RCV_SHUTDOWN) 722 revents |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM; 723 if (sk->sk_shutdown == SHUTDOWN_MASK) 724 revents |= EPOLLHUP; 725 726 switch (sk->sk_state) { 727 case TIPC_ESTABLISHED: 728 case TIPC_CONNECTING: 729 if (!tsk->cong_link_cnt && !tsk_conn_cong(tsk)) 730 revents |= EPOLLOUT; 731 /* fall thru' */ 732 case TIPC_LISTEN: 733 if (!skb_queue_empty(&sk->sk_receive_queue)) 734 revents |= EPOLLIN | EPOLLRDNORM; 735 break; 736 case TIPC_OPEN: 737 if (tsk->group_is_open && !tsk->cong_link_cnt) 738 revents |= EPOLLOUT; 739 if (!tipc_sk_type_connectionless(sk)) 740 break; 741 if (skb_queue_empty(&sk->sk_receive_queue)) 742 break; 743 revents |= EPOLLIN | EPOLLRDNORM; 744 break; 745 case TIPC_DISCONNECTING: 746 revents = EPOLLIN | EPOLLRDNORM | EPOLLHUP; 747 break; 748 } 749 return revents; 750 } 751 752 /** 753 * tipc_sendmcast - send multicast message 754 * @sock: socket structure 755 * @seq: destination address 756 * @msg: message to send 757 * @dlen: length of data to send 758 * @timeout: timeout to wait for wakeup 759 * 760 * Called from function tipc_sendmsg(), which has done all sanity checks 761 * Returns the number of bytes sent on success, or errno 762 */ 763 static int tipc_sendmcast(struct socket *sock, struct tipc_name_seq *seq, 764 struct msghdr *msg, size_t dlen, long timeout) 765 { 766 struct sock *sk = sock->sk; 767 struct tipc_sock *tsk = tipc_sk(sk); 768 struct tipc_msg *hdr = &tsk->phdr; 769 struct net *net = sock_net(sk); 770 int mtu = tipc_bcast_get_mtu(net); 771 struct tipc_mc_method *method = &tsk->mc_method; 772 struct sk_buff_head pkts; 773 struct tipc_nlist dsts; 774 int rc; 775 776 if (tsk->group) 777 return -EACCES; 778 779 /* Block or return if any destination link is congested */ 780 rc = tipc_wait_for_cond(sock, &timeout, !tsk->cong_link_cnt); 781 if (unlikely(rc)) 782 return rc; 783 784 /* Lookup destination nodes */ 785 tipc_nlist_init(&dsts, tipc_own_addr(net)); 786 tipc_nametbl_lookup_dst_nodes(net, seq->type, seq->lower, 787 seq->upper, &dsts); 788 if (!dsts.local && !dsts.remote) 789 return -EHOSTUNREACH; 790 791 /* Build message header */ 792 msg_set_type(hdr, TIPC_MCAST_MSG); 793 msg_set_hdr_sz(hdr, MCAST_H_SIZE); 794 msg_set_lookup_scope(hdr, TIPC_CLUSTER_SCOPE); 795 msg_set_destport(hdr, 0); 796 msg_set_destnode(hdr, 0); 797 msg_set_nametype(hdr, seq->type); 798 msg_set_namelower(hdr, seq->lower); 799 msg_set_nameupper(hdr, seq->upper); 800 801 /* Build message as chain of buffers */ 802 skb_queue_head_init(&pkts); 803 rc = tipc_msg_build(hdr, msg, 0, dlen, mtu, &pkts); 804 805 /* Send message if build was successful */ 806 if (unlikely(rc == dlen)) 807 rc = tipc_mcast_xmit(net, &pkts, method, &dsts, 808 &tsk->cong_link_cnt); 809 810 tipc_nlist_purge(&dsts); 811 812 return rc ? rc : dlen; 813 } 814 815 /** 816 * tipc_send_group_msg - send a message to a member in the group 817 * @net: network namespace 818 * @m: message to send 819 * @mb: group member 820 * @dnode: destination node 821 * @dport: destination port 822 * @dlen: total length of message data 823 */ 824 static int tipc_send_group_msg(struct net *net, struct tipc_sock *tsk, 825 struct msghdr *m, struct tipc_member *mb, 826 u32 dnode, u32 dport, int dlen) 827 { 828 u16 bc_snd_nxt = tipc_group_bc_snd_nxt(tsk->group); 829 struct tipc_mc_method *method = &tsk->mc_method; 830 int blks = tsk_blocks(GROUP_H_SIZE + dlen); 831 struct tipc_msg *hdr = &tsk->phdr; 832 struct sk_buff_head pkts; 833 int mtu, rc; 834 835 /* Complete message header */ 836 msg_set_type(hdr, TIPC_GRP_UCAST_MSG); 837 msg_set_hdr_sz(hdr, GROUP_H_SIZE); 838 msg_set_destport(hdr, dport); 839 msg_set_destnode(hdr, dnode); 840 msg_set_grp_bc_seqno(hdr, bc_snd_nxt); 841 842 /* Build message as chain of buffers */ 843 skb_queue_head_init(&pkts); 844 mtu = tipc_node_get_mtu(net, dnode, tsk->portid); 845 rc = tipc_msg_build(hdr, m, 0, dlen, mtu, &pkts); 846 if (unlikely(rc != dlen)) 847 return rc; 848 849 /* Send message */ 850 rc = tipc_node_xmit(net, &pkts, dnode, tsk->portid); 851 if (unlikely(rc == -ELINKCONG)) { 852 tipc_dest_push(&tsk->cong_links, dnode, 0); 853 tsk->cong_link_cnt++; 854 } 855 856 /* Update send window */ 857 tipc_group_update_member(mb, blks); 858 859 /* A broadcast sent within next EXPIRE period must follow same path */ 860 method->rcast = true; 861 method->mandatory = true; 862 return dlen; 863 } 864 865 /** 866 * tipc_send_group_unicast - send message to a member in the group 867 * @sock: socket structure 868 * @m: message to send 869 * @dlen: total length of message data 870 * @timeout: timeout to wait for wakeup 871 * 872 * Called from function tipc_sendmsg(), which has done all sanity checks 873 * Returns the number of bytes sent on success, or errno 874 */ 875 static int tipc_send_group_unicast(struct socket *sock, struct msghdr *m, 876 int dlen, long timeout) 877 { 878 struct sock *sk = sock->sk; 879 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name); 880 int blks = tsk_blocks(GROUP_H_SIZE + dlen); 881 struct tipc_sock *tsk = tipc_sk(sk); 882 struct tipc_group *grp = tsk->group; 883 struct net *net = sock_net(sk); 884 struct tipc_member *mb = NULL; 885 u32 node, port; 886 int rc; 887 888 node = dest->addr.id.node; 889 port = dest->addr.id.ref; 890 if (!port && !node) 891 return -EHOSTUNREACH; 892 893 /* Block or return if destination link or member is congested */ 894 rc = tipc_wait_for_cond(sock, &timeout, 895 !tipc_dest_find(&tsk->cong_links, node, 0) && 896 !tipc_group_cong(grp, node, port, blks, &mb)); 897 if (unlikely(rc)) 898 return rc; 899 900 if (unlikely(!mb)) 901 return -EHOSTUNREACH; 902 903 rc = tipc_send_group_msg(net, tsk, m, mb, node, port, dlen); 904 905 return rc ? rc : dlen; 906 } 907 908 /** 909 * tipc_send_group_anycast - send message to any member with given identity 910 * @sock: socket structure 911 * @m: message to send 912 * @dlen: total length of message data 913 * @timeout: timeout to wait for wakeup 914 * 915 * Called from function tipc_sendmsg(), which has done all sanity checks 916 * Returns the number of bytes sent on success, or errno 917 */ 918 static int tipc_send_group_anycast(struct socket *sock, struct msghdr *m, 919 int dlen, long timeout) 920 { 921 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name); 922 struct sock *sk = sock->sk; 923 struct tipc_sock *tsk = tipc_sk(sk); 924 struct list_head *cong_links = &tsk->cong_links; 925 int blks = tsk_blocks(GROUP_H_SIZE + dlen); 926 struct tipc_group *grp = tsk->group; 927 struct tipc_msg *hdr = &tsk->phdr; 928 struct tipc_member *first = NULL; 929 struct tipc_member *mbr = NULL; 930 struct net *net = sock_net(sk); 931 u32 node, port, exclude; 932 struct list_head dsts; 933 u32 type, inst, scope; 934 int lookups = 0; 935 int dstcnt, rc; 936 bool cong; 937 938 INIT_LIST_HEAD(&dsts); 939 940 type = msg_nametype(hdr); 941 inst = dest->addr.name.name.instance; 942 scope = msg_lookup_scope(hdr); 943 exclude = tipc_group_exclude(grp); 944 945 while (++lookups < 4) { 946 first = NULL; 947 948 /* Look for a non-congested destination member, if any */ 949 while (1) { 950 if (!tipc_nametbl_lookup(net, type, inst, scope, &dsts, 951 &dstcnt, exclude, false)) 952 return -EHOSTUNREACH; 953 tipc_dest_pop(&dsts, &node, &port); 954 cong = tipc_group_cong(grp, node, port, blks, &mbr); 955 if (!cong) 956 break; 957 if (mbr == first) 958 break; 959 if (!first) 960 first = mbr; 961 } 962 963 /* Start over if destination was not in member list */ 964 if (unlikely(!mbr)) 965 continue; 966 967 if (likely(!cong && !tipc_dest_find(cong_links, node, 0))) 968 break; 969 970 /* Block or return if destination link or member is congested */ 971 rc = tipc_wait_for_cond(sock, &timeout, 972 !tipc_dest_find(cong_links, node, 0) && 973 !tipc_group_cong(grp, node, port, 974 blks, &mbr)); 975 if (unlikely(rc)) 976 return rc; 977 978 /* Send, unless destination disappeared while waiting */ 979 if (likely(mbr)) 980 break; 981 } 982 983 if (unlikely(lookups >= 4)) 984 return -EHOSTUNREACH; 985 986 rc = tipc_send_group_msg(net, tsk, m, mbr, node, port, dlen); 987 988 return rc ? rc : dlen; 989 } 990 991 /** 992 * tipc_send_group_bcast - send message to all members in communication group 993 * @sk: socket structure 994 * @m: message to send 995 * @dlen: total length of message data 996 * @timeout: timeout to wait for wakeup 997 * 998 * Called from function tipc_sendmsg(), which has done all sanity checks 999 * Returns the number of bytes sent on success, or errno 1000 */ 1001 static int tipc_send_group_bcast(struct socket *sock, struct msghdr *m, 1002 int dlen, long timeout) 1003 { 1004 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name); 1005 struct sock *sk = sock->sk; 1006 struct net *net = sock_net(sk); 1007 struct tipc_sock *tsk = tipc_sk(sk); 1008 struct tipc_group *grp = tsk->group; 1009 struct tipc_nlist *dsts = tipc_group_dests(grp); 1010 struct tipc_mc_method *method = &tsk->mc_method; 1011 bool ack = method->mandatory && method->rcast; 1012 int blks = tsk_blocks(MCAST_H_SIZE + dlen); 1013 struct tipc_msg *hdr = &tsk->phdr; 1014 int mtu = tipc_bcast_get_mtu(net); 1015 struct sk_buff_head pkts; 1016 int rc = -EHOSTUNREACH; 1017 1018 if (!dsts->local && !dsts->remote) 1019 return -EHOSTUNREACH; 1020 1021 /* Block or return if any destination link or member is congested */ 1022 rc = tipc_wait_for_cond(sock, &timeout, !tsk->cong_link_cnt && 1023 !tipc_group_bc_cong(grp, blks)); 1024 if (unlikely(rc)) 1025 return rc; 1026 1027 /* Complete message header */ 1028 if (dest) { 1029 msg_set_type(hdr, TIPC_GRP_MCAST_MSG); 1030 msg_set_nameinst(hdr, dest->addr.name.name.instance); 1031 } else { 1032 msg_set_type(hdr, TIPC_GRP_BCAST_MSG); 1033 msg_set_nameinst(hdr, 0); 1034 } 1035 msg_set_hdr_sz(hdr, GROUP_H_SIZE); 1036 msg_set_destport(hdr, 0); 1037 msg_set_destnode(hdr, 0); 1038 msg_set_grp_bc_seqno(hdr, tipc_group_bc_snd_nxt(grp)); 1039 1040 /* Avoid getting stuck with repeated forced replicasts */ 1041 msg_set_grp_bc_ack_req(hdr, ack); 1042 1043 /* Build message as chain of buffers */ 1044 skb_queue_head_init(&pkts); 1045 rc = tipc_msg_build(hdr, m, 0, dlen, mtu, &pkts); 1046 if (unlikely(rc != dlen)) 1047 return rc; 1048 1049 /* Send message */ 1050 rc = tipc_mcast_xmit(net, &pkts, method, dsts, &tsk->cong_link_cnt); 1051 if (unlikely(rc)) 1052 return rc; 1053 1054 /* Update broadcast sequence number and send windows */ 1055 tipc_group_update_bc_members(tsk->group, blks, ack); 1056 1057 /* Broadcast link is now free to choose method for next broadcast */ 1058 method->mandatory = false; 1059 method->expires = jiffies; 1060 1061 return dlen; 1062 } 1063 1064 /** 1065 * tipc_send_group_mcast - send message to all members with given identity 1066 * @sock: socket structure 1067 * @m: message to send 1068 * @dlen: total length of message data 1069 * @timeout: timeout to wait for wakeup 1070 * 1071 * Called from function tipc_sendmsg(), which has done all sanity checks 1072 * Returns the number of bytes sent on success, or errno 1073 */ 1074 static int tipc_send_group_mcast(struct socket *sock, struct msghdr *m, 1075 int dlen, long timeout) 1076 { 1077 struct sock *sk = sock->sk; 1078 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name); 1079 struct tipc_sock *tsk = tipc_sk(sk); 1080 struct tipc_group *grp = tsk->group; 1081 struct tipc_msg *hdr = &tsk->phdr; 1082 struct net *net = sock_net(sk); 1083 u32 type, inst, scope, exclude; 1084 struct list_head dsts; 1085 u32 dstcnt; 1086 1087 INIT_LIST_HEAD(&dsts); 1088 1089 type = msg_nametype(hdr); 1090 inst = dest->addr.name.name.instance; 1091 scope = msg_lookup_scope(hdr); 1092 exclude = tipc_group_exclude(grp); 1093 1094 if (!tipc_nametbl_lookup(net, type, inst, scope, &dsts, 1095 &dstcnt, exclude, true)) 1096 return -EHOSTUNREACH; 1097 1098 if (dstcnt == 1) { 1099 tipc_dest_pop(&dsts, &dest->addr.id.node, &dest->addr.id.ref); 1100 return tipc_send_group_unicast(sock, m, dlen, timeout); 1101 } 1102 1103 tipc_dest_list_purge(&dsts); 1104 return tipc_send_group_bcast(sock, m, dlen, timeout); 1105 } 1106 1107 /** 1108 * tipc_sk_mcast_rcv - Deliver multicast messages to all destination sockets 1109 * @arrvq: queue with arriving messages, to be cloned after destination lookup 1110 * @inputq: queue with cloned messages, delivered to socket after dest lookup 1111 * 1112 * Multi-threaded: parallel calls with reference to same queues may occur 1113 */ 1114 void tipc_sk_mcast_rcv(struct net *net, struct sk_buff_head *arrvq, 1115 struct sk_buff_head *inputq) 1116 { 1117 u32 self = tipc_own_addr(net); 1118 u32 type, lower, upper, scope; 1119 struct sk_buff *skb, *_skb; 1120 u32 portid, oport, onode; 1121 struct sk_buff_head tmpq; 1122 struct list_head dports; 1123 struct tipc_msg *hdr; 1124 int user, mtyp, hlen; 1125 bool exact; 1126 1127 __skb_queue_head_init(&tmpq); 1128 INIT_LIST_HEAD(&dports); 1129 1130 skb = tipc_skb_peek(arrvq, &inputq->lock); 1131 for (; skb; skb = tipc_skb_peek(arrvq, &inputq->lock)) { 1132 hdr = buf_msg(skb); 1133 user = msg_user(hdr); 1134 mtyp = msg_type(hdr); 1135 hlen = skb_headroom(skb) + msg_hdr_sz(hdr); 1136 oport = msg_origport(hdr); 1137 onode = msg_orignode(hdr); 1138 type = msg_nametype(hdr); 1139 1140 if (mtyp == TIPC_GRP_UCAST_MSG || user == GROUP_PROTOCOL) { 1141 spin_lock_bh(&inputq->lock); 1142 if (skb_peek(arrvq) == skb) { 1143 __skb_dequeue(arrvq); 1144 __skb_queue_tail(inputq, skb); 1145 } 1146 kfree_skb(skb); 1147 spin_unlock_bh(&inputq->lock); 1148 continue; 1149 } 1150 1151 /* Group messages require exact scope match */ 1152 if (msg_in_group(hdr)) { 1153 lower = 0; 1154 upper = ~0; 1155 scope = msg_lookup_scope(hdr); 1156 exact = true; 1157 } else { 1158 /* TIPC_NODE_SCOPE means "any scope" in this context */ 1159 if (onode == self) 1160 scope = TIPC_NODE_SCOPE; 1161 else 1162 scope = TIPC_CLUSTER_SCOPE; 1163 exact = false; 1164 lower = msg_namelower(hdr); 1165 upper = msg_nameupper(hdr); 1166 } 1167 1168 /* Create destination port list: */ 1169 tipc_nametbl_mc_lookup(net, type, lower, upper, 1170 scope, exact, &dports); 1171 1172 /* Clone message per destination */ 1173 while (tipc_dest_pop(&dports, NULL, &portid)) { 1174 _skb = __pskb_copy(skb, hlen, GFP_ATOMIC); 1175 if (_skb) { 1176 msg_set_destport(buf_msg(_skb), portid); 1177 __skb_queue_tail(&tmpq, _skb); 1178 continue; 1179 } 1180 pr_warn("Failed to clone mcast rcv buffer\n"); 1181 } 1182 /* Append to inputq if not already done by other thread */ 1183 spin_lock_bh(&inputq->lock); 1184 if (skb_peek(arrvq) == skb) { 1185 skb_queue_splice_tail_init(&tmpq, inputq); 1186 kfree_skb(__skb_dequeue(arrvq)); 1187 } 1188 spin_unlock_bh(&inputq->lock); 1189 __skb_queue_purge(&tmpq); 1190 kfree_skb(skb); 1191 } 1192 tipc_sk_rcv(net, inputq); 1193 } 1194 1195 /** 1196 * tipc_sk_conn_proto_rcv - receive a connection mng protocol message 1197 * @tsk: receiving socket 1198 * @skb: pointer to message buffer. 1199 */ 1200 static void tipc_sk_conn_proto_rcv(struct tipc_sock *tsk, struct sk_buff *skb, 1201 struct sk_buff_head *xmitq) 1202 { 1203 struct tipc_msg *hdr = buf_msg(skb); 1204 u32 onode = tsk_own_node(tsk); 1205 struct sock *sk = &tsk->sk; 1206 int mtyp = msg_type(hdr); 1207 bool conn_cong; 1208 1209 /* Ignore if connection cannot be validated: */ 1210 if (!tsk_peer_msg(tsk, hdr)) 1211 goto exit; 1212 1213 if (unlikely(msg_errcode(hdr))) { 1214 tipc_set_sk_state(sk, TIPC_DISCONNECTING); 1215 tipc_node_remove_conn(sock_net(sk), tsk_peer_node(tsk), 1216 tsk_peer_port(tsk)); 1217 sk->sk_state_change(sk); 1218 goto exit; 1219 } 1220 1221 tsk->probe_unacked = false; 1222 1223 if (mtyp == CONN_PROBE) { 1224 msg_set_type(hdr, CONN_PROBE_REPLY); 1225 if (tipc_msg_reverse(onode, &skb, TIPC_OK)) 1226 __skb_queue_tail(xmitq, skb); 1227 return; 1228 } else if (mtyp == CONN_ACK) { 1229 conn_cong = tsk_conn_cong(tsk); 1230 tsk->snt_unacked -= msg_conn_ack(hdr); 1231 if (tsk->peer_caps & TIPC_BLOCK_FLOWCTL) 1232 tsk->snd_win = msg_adv_win(hdr); 1233 if (conn_cong) 1234 sk->sk_write_space(sk); 1235 } else if (mtyp != CONN_PROBE_REPLY) { 1236 pr_warn("Received unknown CONN_PROTO msg\n"); 1237 } 1238 exit: 1239 kfree_skb(skb); 1240 } 1241 1242 /** 1243 * tipc_sendmsg - send message in connectionless manner 1244 * @sock: socket structure 1245 * @m: message to send 1246 * @dsz: amount of user data to be sent 1247 * 1248 * Message must have an destination specified explicitly. 1249 * Used for SOCK_RDM and SOCK_DGRAM messages, 1250 * and for 'SYN' messages on SOCK_SEQPACKET and SOCK_STREAM connections. 1251 * (Note: 'SYN+' is prohibited on SOCK_STREAM.) 1252 * 1253 * Returns the number of bytes sent on success, or errno otherwise 1254 */ 1255 static int tipc_sendmsg(struct socket *sock, 1256 struct msghdr *m, size_t dsz) 1257 { 1258 struct sock *sk = sock->sk; 1259 int ret; 1260 1261 lock_sock(sk); 1262 ret = __tipc_sendmsg(sock, m, dsz); 1263 release_sock(sk); 1264 1265 return ret; 1266 } 1267 1268 static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dlen) 1269 { 1270 struct sock *sk = sock->sk; 1271 struct net *net = sock_net(sk); 1272 struct tipc_sock *tsk = tipc_sk(sk); 1273 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name); 1274 long timeout = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT); 1275 struct list_head *clinks = &tsk->cong_links; 1276 bool syn = !tipc_sk_type_connectionless(sk); 1277 struct tipc_group *grp = tsk->group; 1278 struct tipc_msg *hdr = &tsk->phdr; 1279 struct tipc_name_seq *seq; 1280 struct sk_buff_head pkts; 1281 u32 dnode, dport; 1282 u32 type, inst; 1283 int mtu, rc; 1284 1285 if (unlikely(dlen > TIPC_MAX_USER_MSG_SIZE)) 1286 return -EMSGSIZE; 1287 1288 if (likely(dest)) { 1289 if (unlikely(m->msg_namelen < sizeof(*dest))) 1290 return -EINVAL; 1291 if (unlikely(dest->family != AF_TIPC)) 1292 return -EINVAL; 1293 } 1294 1295 if (grp) { 1296 if (!dest) 1297 return tipc_send_group_bcast(sock, m, dlen, timeout); 1298 if (dest->addrtype == TIPC_ADDR_NAME) 1299 return tipc_send_group_anycast(sock, m, dlen, timeout); 1300 if (dest->addrtype == TIPC_ADDR_ID) 1301 return tipc_send_group_unicast(sock, m, dlen, timeout); 1302 if (dest->addrtype == TIPC_ADDR_MCAST) 1303 return tipc_send_group_mcast(sock, m, dlen, timeout); 1304 return -EINVAL; 1305 } 1306 1307 if (unlikely(!dest)) { 1308 dest = &tsk->peer; 1309 if (!syn || dest->family != AF_TIPC) 1310 return -EDESTADDRREQ; 1311 } 1312 1313 if (unlikely(syn)) { 1314 if (sk->sk_state == TIPC_LISTEN) 1315 return -EPIPE; 1316 if (sk->sk_state != TIPC_OPEN) 1317 return -EISCONN; 1318 if (tsk->published) 1319 return -EOPNOTSUPP; 1320 if (dest->addrtype == TIPC_ADDR_NAME) { 1321 tsk->conn_type = dest->addr.name.name.type; 1322 tsk->conn_instance = dest->addr.name.name.instance; 1323 } 1324 } 1325 1326 seq = &dest->addr.nameseq; 1327 if (dest->addrtype == TIPC_ADDR_MCAST) 1328 return tipc_sendmcast(sock, seq, m, dlen, timeout); 1329 1330 if (dest->addrtype == TIPC_ADDR_NAME) { 1331 type = dest->addr.name.name.type; 1332 inst = dest->addr.name.name.instance; 1333 dnode = dest->addr.name.domain; 1334 msg_set_type(hdr, TIPC_NAMED_MSG); 1335 msg_set_hdr_sz(hdr, NAMED_H_SIZE); 1336 msg_set_nametype(hdr, type); 1337 msg_set_nameinst(hdr, inst); 1338 msg_set_lookup_scope(hdr, tipc_node2scope(dnode)); 1339 dport = tipc_nametbl_translate(net, type, inst, &dnode); 1340 msg_set_destnode(hdr, dnode); 1341 msg_set_destport(hdr, dport); 1342 if (unlikely(!dport && !dnode)) 1343 return -EHOSTUNREACH; 1344 } else if (dest->addrtype == TIPC_ADDR_ID) { 1345 dnode = dest->addr.id.node; 1346 msg_set_type(hdr, TIPC_DIRECT_MSG); 1347 msg_set_lookup_scope(hdr, 0); 1348 msg_set_destnode(hdr, dnode); 1349 msg_set_destport(hdr, dest->addr.id.ref); 1350 msg_set_hdr_sz(hdr, BASIC_H_SIZE); 1351 } 1352 1353 /* Block or return if destination link is congested */ 1354 rc = tipc_wait_for_cond(sock, &timeout, 1355 !tipc_dest_find(clinks, dnode, 0)); 1356 if (unlikely(rc)) 1357 return rc; 1358 1359 skb_queue_head_init(&pkts); 1360 mtu = tipc_node_get_mtu(net, dnode, tsk->portid); 1361 rc = tipc_msg_build(hdr, m, 0, dlen, mtu, &pkts); 1362 if (unlikely(rc != dlen)) 1363 return rc; 1364 1365 rc = tipc_node_xmit(net, &pkts, dnode, tsk->portid); 1366 if (unlikely(rc == -ELINKCONG)) { 1367 tipc_dest_push(clinks, dnode, 0); 1368 tsk->cong_link_cnt++; 1369 rc = 0; 1370 } 1371 1372 if (unlikely(syn && !rc)) 1373 tipc_set_sk_state(sk, TIPC_CONNECTING); 1374 1375 return rc ? rc : dlen; 1376 } 1377 1378 /** 1379 * tipc_sendstream - send stream-oriented data 1380 * @sock: socket structure 1381 * @m: data to send 1382 * @dsz: total length of data to be transmitted 1383 * 1384 * Used for SOCK_STREAM data. 1385 * 1386 * Returns the number of bytes sent on success (or partial success), 1387 * or errno if no data sent 1388 */ 1389 static int tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dsz) 1390 { 1391 struct sock *sk = sock->sk; 1392 int ret; 1393 1394 lock_sock(sk); 1395 ret = __tipc_sendstream(sock, m, dsz); 1396 release_sock(sk); 1397 1398 return ret; 1399 } 1400 1401 static int __tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dlen) 1402 { 1403 struct sock *sk = sock->sk; 1404 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name); 1405 long timeout = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT); 1406 struct tipc_sock *tsk = tipc_sk(sk); 1407 struct tipc_msg *hdr = &tsk->phdr; 1408 struct net *net = sock_net(sk); 1409 struct sk_buff_head pkts; 1410 u32 dnode = tsk_peer_node(tsk); 1411 int send, sent = 0; 1412 int rc = 0; 1413 1414 skb_queue_head_init(&pkts); 1415 1416 if (unlikely(dlen > INT_MAX)) 1417 return -EMSGSIZE; 1418 1419 /* Handle implicit connection setup */ 1420 if (unlikely(dest)) { 1421 rc = __tipc_sendmsg(sock, m, dlen); 1422 if (dlen && (dlen == rc)) 1423 tsk->snt_unacked = tsk_inc(tsk, dlen + msg_hdr_sz(hdr)); 1424 return rc; 1425 } 1426 1427 do { 1428 rc = tipc_wait_for_cond(sock, &timeout, 1429 (!tsk->cong_link_cnt && 1430 !tsk_conn_cong(tsk) && 1431 tipc_sk_connected(sk))); 1432 if (unlikely(rc)) 1433 break; 1434 1435 send = min_t(size_t, dlen - sent, TIPC_MAX_USER_MSG_SIZE); 1436 rc = tipc_msg_build(hdr, m, sent, send, tsk->max_pkt, &pkts); 1437 if (unlikely(rc != send)) 1438 break; 1439 1440 rc = tipc_node_xmit(net, &pkts, dnode, tsk->portid); 1441 if (unlikely(rc == -ELINKCONG)) { 1442 tsk->cong_link_cnt = 1; 1443 rc = 0; 1444 } 1445 if (likely(!rc)) { 1446 tsk->snt_unacked += tsk_inc(tsk, send + MIN_H_SIZE); 1447 sent += send; 1448 } 1449 } while (sent < dlen && !rc); 1450 1451 return sent ? sent : rc; 1452 } 1453 1454 /** 1455 * tipc_send_packet - send a connection-oriented message 1456 * @sock: socket structure 1457 * @m: message to send 1458 * @dsz: length of data to be transmitted 1459 * 1460 * Used for SOCK_SEQPACKET messages. 1461 * 1462 * Returns the number of bytes sent on success, or errno otherwise 1463 */ 1464 static int tipc_send_packet(struct socket *sock, struct msghdr *m, size_t dsz) 1465 { 1466 if (dsz > TIPC_MAX_USER_MSG_SIZE) 1467 return -EMSGSIZE; 1468 1469 return tipc_sendstream(sock, m, dsz); 1470 } 1471 1472 /* tipc_sk_finish_conn - complete the setup of a connection 1473 */ 1474 static void tipc_sk_finish_conn(struct tipc_sock *tsk, u32 peer_port, 1475 u32 peer_node) 1476 { 1477 struct sock *sk = &tsk->sk; 1478 struct net *net = sock_net(sk); 1479 struct tipc_msg *msg = &tsk->phdr; 1480 1481 msg_set_destnode(msg, peer_node); 1482 msg_set_destport(msg, peer_port); 1483 msg_set_type(msg, TIPC_CONN_MSG); 1484 msg_set_lookup_scope(msg, 0); 1485 msg_set_hdr_sz(msg, SHORT_H_SIZE); 1486 1487 sk_reset_timer(sk, &sk->sk_timer, jiffies + CONN_PROBING_INTV); 1488 tipc_set_sk_state(sk, TIPC_ESTABLISHED); 1489 tipc_node_add_conn(net, peer_node, tsk->portid, peer_port); 1490 tsk->max_pkt = tipc_node_get_mtu(net, peer_node, tsk->portid); 1491 tsk->peer_caps = tipc_node_get_capabilities(net, peer_node); 1492 if (tsk->peer_caps & TIPC_BLOCK_FLOWCTL) 1493 return; 1494 1495 /* Fall back to message based flow control */ 1496 tsk->rcv_win = FLOWCTL_MSG_WIN; 1497 tsk->snd_win = FLOWCTL_MSG_WIN; 1498 } 1499 1500 /** 1501 * tipc_sk_set_orig_addr - capture sender's address for received message 1502 * @m: descriptor for message info 1503 * @hdr: received message header 1504 * 1505 * Note: Address is not captured if not requested by receiver. 1506 */ 1507 static void tipc_sk_set_orig_addr(struct msghdr *m, struct sk_buff *skb) 1508 { 1509 DECLARE_SOCKADDR(struct sockaddr_pair *, srcaddr, m->msg_name); 1510 struct tipc_msg *hdr = buf_msg(skb); 1511 1512 if (!srcaddr) 1513 return; 1514 1515 srcaddr->sock.family = AF_TIPC; 1516 srcaddr->sock.addrtype = TIPC_ADDR_ID; 1517 srcaddr->sock.addr.id.ref = msg_origport(hdr); 1518 srcaddr->sock.addr.id.node = msg_orignode(hdr); 1519 srcaddr->sock.addr.name.domain = 0; 1520 srcaddr->sock.scope = 0; 1521 m->msg_namelen = sizeof(struct sockaddr_tipc); 1522 1523 if (!msg_in_group(hdr)) 1524 return; 1525 1526 /* Group message users may also want to know sending member's id */ 1527 srcaddr->member.family = AF_TIPC; 1528 srcaddr->member.addrtype = TIPC_ADDR_NAME; 1529 srcaddr->member.addr.name.name.type = msg_nametype(hdr); 1530 srcaddr->member.addr.name.name.instance = TIPC_SKB_CB(skb)->orig_member; 1531 srcaddr->member.addr.name.domain = 0; 1532 m->msg_namelen = sizeof(*srcaddr); 1533 } 1534 1535 /** 1536 * tipc_sk_anc_data_recv - optionally capture ancillary data for received message 1537 * @m: descriptor for message info 1538 * @msg: received message header 1539 * @tsk: TIPC port associated with message 1540 * 1541 * Note: Ancillary data is not captured if not requested by receiver. 1542 * 1543 * Returns 0 if successful, otherwise errno 1544 */ 1545 static int tipc_sk_anc_data_recv(struct msghdr *m, struct tipc_msg *msg, 1546 struct tipc_sock *tsk) 1547 { 1548 u32 anc_data[3]; 1549 u32 err; 1550 u32 dest_type; 1551 int has_name; 1552 int res; 1553 1554 if (likely(m->msg_controllen == 0)) 1555 return 0; 1556 1557 /* Optionally capture errored message object(s) */ 1558 err = msg ? msg_errcode(msg) : 0; 1559 if (unlikely(err)) { 1560 anc_data[0] = err; 1561 anc_data[1] = msg_data_sz(msg); 1562 res = put_cmsg(m, SOL_TIPC, TIPC_ERRINFO, 8, anc_data); 1563 if (res) 1564 return res; 1565 if (anc_data[1]) { 1566 res = put_cmsg(m, SOL_TIPC, TIPC_RETDATA, anc_data[1], 1567 msg_data(msg)); 1568 if (res) 1569 return res; 1570 } 1571 } 1572 1573 /* Optionally capture message destination object */ 1574 dest_type = msg ? msg_type(msg) : TIPC_DIRECT_MSG; 1575 switch (dest_type) { 1576 case TIPC_NAMED_MSG: 1577 has_name = 1; 1578 anc_data[0] = msg_nametype(msg); 1579 anc_data[1] = msg_namelower(msg); 1580 anc_data[2] = msg_namelower(msg); 1581 break; 1582 case TIPC_MCAST_MSG: 1583 has_name = 1; 1584 anc_data[0] = msg_nametype(msg); 1585 anc_data[1] = msg_namelower(msg); 1586 anc_data[2] = msg_nameupper(msg); 1587 break; 1588 case TIPC_CONN_MSG: 1589 has_name = (tsk->conn_type != 0); 1590 anc_data[0] = tsk->conn_type; 1591 anc_data[1] = tsk->conn_instance; 1592 anc_data[2] = tsk->conn_instance; 1593 break; 1594 default: 1595 has_name = 0; 1596 } 1597 if (has_name) { 1598 res = put_cmsg(m, SOL_TIPC, TIPC_DESTNAME, 12, anc_data); 1599 if (res) 1600 return res; 1601 } 1602 1603 return 0; 1604 } 1605 1606 static void tipc_sk_send_ack(struct tipc_sock *tsk) 1607 { 1608 struct sock *sk = &tsk->sk; 1609 struct net *net = sock_net(sk); 1610 struct sk_buff *skb = NULL; 1611 struct tipc_msg *msg; 1612 u32 peer_port = tsk_peer_port(tsk); 1613 u32 dnode = tsk_peer_node(tsk); 1614 1615 if (!tipc_sk_connected(sk)) 1616 return; 1617 skb = tipc_msg_create(CONN_MANAGER, CONN_ACK, INT_H_SIZE, 0, 1618 dnode, tsk_own_node(tsk), peer_port, 1619 tsk->portid, TIPC_OK); 1620 if (!skb) 1621 return; 1622 msg = buf_msg(skb); 1623 msg_set_conn_ack(msg, tsk->rcv_unacked); 1624 tsk->rcv_unacked = 0; 1625 1626 /* Adjust to and advertize the correct window limit */ 1627 if (tsk->peer_caps & TIPC_BLOCK_FLOWCTL) { 1628 tsk->rcv_win = tsk_adv_blocks(tsk->sk.sk_rcvbuf); 1629 msg_set_adv_win(msg, tsk->rcv_win); 1630 } 1631 tipc_node_xmit_skb(net, skb, dnode, msg_link_selector(msg)); 1632 } 1633 1634 static int tipc_wait_for_rcvmsg(struct socket *sock, long *timeop) 1635 { 1636 struct sock *sk = sock->sk; 1637 DEFINE_WAIT(wait); 1638 long timeo = *timeop; 1639 int err = sock_error(sk); 1640 1641 if (err) 1642 return err; 1643 1644 for (;;) { 1645 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); 1646 if (timeo && skb_queue_empty(&sk->sk_receive_queue)) { 1647 if (sk->sk_shutdown & RCV_SHUTDOWN) { 1648 err = -ENOTCONN; 1649 break; 1650 } 1651 release_sock(sk); 1652 timeo = schedule_timeout(timeo); 1653 lock_sock(sk); 1654 } 1655 err = 0; 1656 if (!skb_queue_empty(&sk->sk_receive_queue)) 1657 break; 1658 err = -EAGAIN; 1659 if (!timeo) 1660 break; 1661 err = sock_intr_errno(timeo); 1662 if (signal_pending(current)) 1663 break; 1664 1665 err = sock_error(sk); 1666 if (err) 1667 break; 1668 } 1669 finish_wait(sk_sleep(sk), &wait); 1670 *timeop = timeo; 1671 return err; 1672 } 1673 1674 /** 1675 * tipc_recvmsg - receive packet-oriented message 1676 * @m: descriptor for message info 1677 * @buflen: length of user buffer area 1678 * @flags: receive flags 1679 * 1680 * Used for SOCK_DGRAM, SOCK_RDM, and SOCK_SEQPACKET messages. 1681 * If the complete message doesn't fit in user area, truncate it. 1682 * 1683 * Returns size of returned message data, errno otherwise 1684 */ 1685 static int tipc_recvmsg(struct socket *sock, struct msghdr *m, 1686 size_t buflen, int flags) 1687 { 1688 struct sock *sk = sock->sk; 1689 bool connected = !tipc_sk_type_connectionless(sk); 1690 struct tipc_sock *tsk = tipc_sk(sk); 1691 int rc, err, hlen, dlen, copy; 1692 struct sk_buff_head xmitq; 1693 struct tipc_msg *hdr; 1694 struct sk_buff *skb; 1695 bool grp_evt; 1696 long timeout; 1697 1698 /* Catch invalid receive requests */ 1699 if (unlikely(!buflen)) 1700 return -EINVAL; 1701 1702 lock_sock(sk); 1703 if (unlikely(connected && sk->sk_state == TIPC_OPEN)) { 1704 rc = -ENOTCONN; 1705 goto exit; 1706 } 1707 timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); 1708 1709 /* Step rcv queue to first msg with data or error; wait if necessary */ 1710 do { 1711 rc = tipc_wait_for_rcvmsg(sock, &timeout); 1712 if (unlikely(rc)) 1713 goto exit; 1714 skb = skb_peek(&sk->sk_receive_queue); 1715 hdr = buf_msg(skb); 1716 dlen = msg_data_sz(hdr); 1717 hlen = msg_hdr_sz(hdr); 1718 err = msg_errcode(hdr); 1719 grp_evt = msg_is_grp_evt(hdr); 1720 if (likely(dlen || err)) 1721 break; 1722 tsk_advance_rx_queue(sk); 1723 } while (1); 1724 1725 /* Collect msg meta data, including error code and rejected data */ 1726 tipc_sk_set_orig_addr(m, skb); 1727 rc = tipc_sk_anc_data_recv(m, hdr, tsk); 1728 if (unlikely(rc)) 1729 goto exit; 1730 1731 /* Capture data if non-error msg, otherwise just set return value */ 1732 if (likely(!err)) { 1733 copy = min_t(int, dlen, buflen); 1734 if (unlikely(copy != dlen)) 1735 m->msg_flags |= MSG_TRUNC; 1736 rc = skb_copy_datagram_msg(skb, hlen, m, copy); 1737 } else { 1738 copy = 0; 1739 rc = 0; 1740 if (err != TIPC_CONN_SHUTDOWN && connected && !m->msg_control) 1741 rc = -ECONNRESET; 1742 } 1743 if (unlikely(rc)) 1744 goto exit; 1745 1746 /* Mark message as group event if applicable */ 1747 if (unlikely(grp_evt)) { 1748 if (msg_grp_evt(hdr) == TIPC_WITHDRAWN) 1749 m->msg_flags |= MSG_EOR; 1750 m->msg_flags |= MSG_OOB; 1751 copy = 0; 1752 } 1753 1754 /* Caption of data or error code/rejected data was successful */ 1755 if (unlikely(flags & MSG_PEEK)) 1756 goto exit; 1757 1758 /* Send group flow control advertisement when applicable */ 1759 if (tsk->group && msg_in_group(hdr) && !grp_evt) { 1760 skb_queue_head_init(&xmitq); 1761 tipc_group_update_rcv_win(tsk->group, tsk_blocks(hlen + dlen), 1762 msg_orignode(hdr), msg_origport(hdr), 1763 &xmitq); 1764 tipc_node_distr_xmit(sock_net(sk), &xmitq); 1765 } 1766 1767 tsk_advance_rx_queue(sk); 1768 1769 if (likely(!connected)) 1770 goto exit; 1771 1772 /* Send connection flow control advertisement when applicable */ 1773 tsk->rcv_unacked += tsk_inc(tsk, hlen + dlen); 1774 if (tsk->rcv_unacked >= tsk->rcv_win / TIPC_ACK_RATE) 1775 tipc_sk_send_ack(tsk); 1776 exit: 1777 release_sock(sk); 1778 return rc ? rc : copy; 1779 } 1780 1781 /** 1782 * tipc_recvstream - receive stream-oriented data 1783 * @m: descriptor for message info 1784 * @buflen: total size of user buffer area 1785 * @flags: receive flags 1786 * 1787 * Used for SOCK_STREAM messages only. If not enough data is available 1788 * will optionally wait for more; never truncates data. 1789 * 1790 * Returns size of returned message data, errno otherwise 1791 */ 1792 static int tipc_recvstream(struct socket *sock, struct msghdr *m, 1793 size_t buflen, int flags) 1794 { 1795 struct sock *sk = sock->sk; 1796 struct tipc_sock *tsk = tipc_sk(sk); 1797 struct sk_buff *skb; 1798 struct tipc_msg *hdr; 1799 struct tipc_skb_cb *skb_cb; 1800 bool peek = flags & MSG_PEEK; 1801 int offset, required, copy, copied = 0; 1802 int hlen, dlen, err, rc; 1803 long timeout; 1804 1805 /* Catch invalid receive attempts */ 1806 if (unlikely(!buflen)) 1807 return -EINVAL; 1808 1809 lock_sock(sk); 1810 1811 if (unlikely(sk->sk_state == TIPC_OPEN)) { 1812 rc = -ENOTCONN; 1813 goto exit; 1814 } 1815 required = sock_rcvlowat(sk, flags & MSG_WAITALL, buflen); 1816 timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); 1817 1818 do { 1819 /* Look at first msg in receive queue; wait if necessary */ 1820 rc = tipc_wait_for_rcvmsg(sock, &timeout); 1821 if (unlikely(rc)) 1822 break; 1823 skb = skb_peek(&sk->sk_receive_queue); 1824 skb_cb = TIPC_SKB_CB(skb); 1825 hdr = buf_msg(skb); 1826 dlen = msg_data_sz(hdr); 1827 hlen = msg_hdr_sz(hdr); 1828 err = msg_errcode(hdr); 1829 1830 /* Discard any empty non-errored (SYN-) message */ 1831 if (unlikely(!dlen && !err)) { 1832 tsk_advance_rx_queue(sk); 1833 continue; 1834 } 1835 1836 /* Collect msg meta data, incl. error code and rejected data */ 1837 if (!copied) { 1838 tipc_sk_set_orig_addr(m, skb); 1839 rc = tipc_sk_anc_data_recv(m, hdr, tsk); 1840 if (rc) 1841 break; 1842 } 1843 1844 /* Copy data if msg ok, otherwise return error/partial data */ 1845 if (likely(!err)) { 1846 offset = skb_cb->bytes_read; 1847 copy = min_t(int, dlen - offset, buflen - copied); 1848 rc = skb_copy_datagram_msg(skb, hlen + offset, m, copy); 1849 if (unlikely(rc)) 1850 break; 1851 copied += copy; 1852 offset += copy; 1853 if (unlikely(offset < dlen)) { 1854 if (!peek) 1855 skb_cb->bytes_read = offset; 1856 break; 1857 } 1858 } else { 1859 rc = 0; 1860 if ((err != TIPC_CONN_SHUTDOWN) && !m->msg_control) 1861 rc = -ECONNRESET; 1862 if (copied || rc) 1863 break; 1864 } 1865 1866 if (unlikely(peek)) 1867 break; 1868 1869 tsk_advance_rx_queue(sk); 1870 1871 /* Send connection flow control advertisement when applicable */ 1872 tsk->rcv_unacked += tsk_inc(tsk, hlen + dlen); 1873 if (unlikely(tsk->rcv_unacked >= tsk->rcv_win / TIPC_ACK_RATE)) 1874 tipc_sk_send_ack(tsk); 1875 1876 /* Exit if all requested data or FIN/error received */ 1877 if (copied == buflen || err) 1878 break; 1879 1880 } while (!skb_queue_empty(&sk->sk_receive_queue) || copied < required); 1881 exit: 1882 release_sock(sk); 1883 return copied ? copied : rc; 1884 } 1885 1886 /** 1887 * tipc_write_space - wake up thread if port congestion is released 1888 * @sk: socket 1889 */ 1890 static void tipc_write_space(struct sock *sk) 1891 { 1892 struct socket_wq *wq; 1893 1894 rcu_read_lock(); 1895 wq = rcu_dereference(sk->sk_wq); 1896 if (skwq_has_sleeper(wq)) 1897 wake_up_interruptible_sync_poll(&wq->wait, EPOLLOUT | 1898 EPOLLWRNORM | EPOLLWRBAND); 1899 rcu_read_unlock(); 1900 } 1901 1902 /** 1903 * tipc_data_ready - wake up threads to indicate messages have been received 1904 * @sk: socket 1905 * @len: the length of messages 1906 */ 1907 static void tipc_data_ready(struct sock *sk) 1908 { 1909 struct socket_wq *wq; 1910 1911 rcu_read_lock(); 1912 wq = rcu_dereference(sk->sk_wq); 1913 if (skwq_has_sleeper(wq)) 1914 wake_up_interruptible_sync_poll(&wq->wait, EPOLLIN | 1915 EPOLLRDNORM | EPOLLRDBAND); 1916 rcu_read_unlock(); 1917 } 1918 1919 static void tipc_sock_destruct(struct sock *sk) 1920 { 1921 __skb_queue_purge(&sk->sk_receive_queue); 1922 } 1923 1924 static void tipc_sk_proto_rcv(struct sock *sk, 1925 struct sk_buff_head *inputq, 1926 struct sk_buff_head *xmitq) 1927 { 1928 struct sk_buff *skb = __skb_dequeue(inputq); 1929 struct tipc_sock *tsk = tipc_sk(sk); 1930 struct tipc_msg *hdr = buf_msg(skb); 1931 struct tipc_group *grp = tsk->group; 1932 bool wakeup = false; 1933 1934 switch (msg_user(hdr)) { 1935 case CONN_MANAGER: 1936 tipc_sk_conn_proto_rcv(tsk, skb, xmitq); 1937 return; 1938 case SOCK_WAKEUP: 1939 tipc_dest_del(&tsk->cong_links, msg_orignode(hdr), 0); 1940 tsk->cong_link_cnt--; 1941 wakeup = true; 1942 break; 1943 case GROUP_PROTOCOL: 1944 tipc_group_proto_rcv(grp, &wakeup, hdr, inputq, xmitq); 1945 break; 1946 case TOP_SRV: 1947 tipc_group_member_evt(tsk->group, &wakeup, &sk->sk_rcvbuf, 1948 hdr, inputq, xmitq); 1949 break; 1950 default: 1951 break; 1952 } 1953 1954 if (wakeup) 1955 sk->sk_write_space(sk); 1956 1957 kfree_skb(skb); 1958 } 1959 1960 /** 1961 * tipc_filter_connect - Handle incoming message for a connection-based socket 1962 * @tsk: TIPC socket 1963 * @skb: pointer to message buffer. Set to NULL if buffer is consumed 1964 * 1965 * Returns true if everything ok, false otherwise 1966 */ 1967 static bool tipc_sk_filter_connect(struct tipc_sock *tsk, struct sk_buff *skb) 1968 { 1969 struct sock *sk = &tsk->sk; 1970 struct net *net = sock_net(sk); 1971 struct tipc_msg *hdr = buf_msg(skb); 1972 u32 pport = msg_origport(hdr); 1973 u32 pnode = msg_orignode(hdr); 1974 1975 if (unlikely(msg_mcast(hdr))) 1976 return false; 1977 1978 switch (sk->sk_state) { 1979 case TIPC_CONNECTING: 1980 /* Accept only ACK or NACK message */ 1981 if (unlikely(!msg_connected(hdr))) { 1982 if (pport != tsk_peer_port(tsk) || 1983 pnode != tsk_peer_node(tsk)) 1984 return false; 1985 1986 tipc_set_sk_state(sk, TIPC_DISCONNECTING); 1987 sk->sk_err = ECONNREFUSED; 1988 sk->sk_state_change(sk); 1989 return true; 1990 } 1991 1992 if (unlikely(msg_errcode(hdr))) { 1993 tipc_set_sk_state(sk, TIPC_DISCONNECTING); 1994 sk->sk_err = ECONNREFUSED; 1995 sk->sk_state_change(sk); 1996 return true; 1997 } 1998 1999 if (unlikely(!msg_isdata(hdr))) { 2000 tipc_set_sk_state(sk, TIPC_DISCONNECTING); 2001 sk->sk_err = EINVAL; 2002 sk->sk_state_change(sk); 2003 return true; 2004 } 2005 2006 tipc_sk_finish_conn(tsk, msg_origport(hdr), msg_orignode(hdr)); 2007 msg_set_importance(&tsk->phdr, msg_importance(hdr)); 2008 2009 /* If 'ACK+' message, add to socket receive queue */ 2010 if (msg_data_sz(hdr)) 2011 return true; 2012 2013 /* If empty 'ACK-' message, wake up sleeping connect() */ 2014 sk->sk_data_ready(sk); 2015 2016 /* 'ACK-' message is neither accepted nor rejected: */ 2017 msg_set_dest_droppable(hdr, 1); 2018 return false; 2019 2020 case TIPC_OPEN: 2021 case TIPC_DISCONNECTING: 2022 break; 2023 case TIPC_LISTEN: 2024 /* Accept only SYN message */ 2025 if (!msg_connected(hdr) && !(msg_errcode(hdr))) 2026 return true; 2027 break; 2028 case TIPC_ESTABLISHED: 2029 /* Accept only connection-based messages sent by peer */ 2030 if (unlikely(!tsk_peer_msg(tsk, hdr))) 2031 return false; 2032 2033 if (unlikely(msg_errcode(hdr))) { 2034 tipc_set_sk_state(sk, TIPC_DISCONNECTING); 2035 /* Let timer expire on it's own */ 2036 tipc_node_remove_conn(net, tsk_peer_node(tsk), 2037 tsk->portid); 2038 sk->sk_state_change(sk); 2039 } 2040 return true; 2041 default: 2042 pr_err("Unknown sk_state %u\n", sk->sk_state); 2043 } 2044 2045 return false; 2046 } 2047 2048 /** 2049 * rcvbuf_limit - get proper overload limit of socket receive queue 2050 * @sk: socket 2051 * @skb: message 2052 * 2053 * For connection oriented messages, irrespective of importance, 2054 * default queue limit is 2 MB. 2055 * 2056 * For connectionless messages, queue limits are based on message 2057 * importance as follows: 2058 * 2059 * TIPC_LOW_IMPORTANCE (2 MB) 2060 * TIPC_MEDIUM_IMPORTANCE (4 MB) 2061 * TIPC_HIGH_IMPORTANCE (8 MB) 2062 * TIPC_CRITICAL_IMPORTANCE (16 MB) 2063 * 2064 * Returns overload limit according to corresponding message importance 2065 */ 2066 static unsigned int rcvbuf_limit(struct sock *sk, struct sk_buff *skb) 2067 { 2068 struct tipc_sock *tsk = tipc_sk(sk); 2069 struct tipc_msg *hdr = buf_msg(skb); 2070 2071 if (unlikely(msg_in_group(hdr))) 2072 return sk->sk_rcvbuf; 2073 2074 if (unlikely(!msg_connected(hdr))) 2075 return sk->sk_rcvbuf << msg_importance(hdr); 2076 2077 if (likely(tsk->peer_caps & TIPC_BLOCK_FLOWCTL)) 2078 return sk->sk_rcvbuf; 2079 2080 return FLOWCTL_MSG_LIM; 2081 } 2082 2083 /** 2084 * tipc_sk_filter_rcv - validate incoming message 2085 * @sk: socket 2086 * @skb: pointer to message. 2087 * 2088 * Enqueues message on receive queue if acceptable; optionally handles 2089 * disconnect indication for a connected socket. 2090 * 2091 * Called with socket lock already taken 2092 * 2093 */ 2094 static void tipc_sk_filter_rcv(struct sock *sk, struct sk_buff *skb, 2095 struct sk_buff_head *xmitq) 2096 { 2097 bool sk_conn = !tipc_sk_type_connectionless(sk); 2098 struct tipc_sock *tsk = tipc_sk(sk); 2099 struct tipc_group *grp = tsk->group; 2100 struct tipc_msg *hdr = buf_msg(skb); 2101 struct net *net = sock_net(sk); 2102 struct sk_buff_head inputq; 2103 int limit, err = TIPC_OK; 2104 2105 TIPC_SKB_CB(skb)->bytes_read = 0; 2106 __skb_queue_head_init(&inputq); 2107 __skb_queue_tail(&inputq, skb); 2108 2109 if (unlikely(!msg_isdata(hdr))) 2110 tipc_sk_proto_rcv(sk, &inputq, xmitq); 2111 2112 if (unlikely(grp)) 2113 tipc_group_filter_msg(grp, &inputq, xmitq); 2114 2115 /* Validate and add to receive buffer if there is space */ 2116 while ((skb = __skb_dequeue(&inputq))) { 2117 hdr = buf_msg(skb); 2118 limit = rcvbuf_limit(sk, skb); 2119 if ((sk_conn && !tipc_sk_filter_connect(tsk, skb)) || 2120 (!sk_conn && msg_connected(hdr)) || 2121 (!grp && msg_in_group(hdr))) 2122 err = TIPC_ERR_NO_PORT; 2123 else if (sk_rmem_alloc_get(sk) + skb->truesize >= limit) { 2124 atomic_inc(&sk->sk_drops); 2125 err = TIPC_ERR_OVERLOAD; 2126 } 2127 2128 if (unlikely(err)) { 2129 tipc_skb_reject(net, err, skb, xmitq); 2130 err = TIPC_OK; 2131 continue; 2132 } 2133 __skb_queue_tail(&sk->sk_receive_queue, skb); 2134 skb_set_owner_r(skb, sk); 2135 sk->sk_data_ready(sk); 2136 } 2137 } 2138 2139 /** 2140 * tipc_sk_backlog_rcv - handle incoming message from backlog queue 2141 * @sk: socket 2142 * @skb: message 2143 * 2144 * Caller must hold socket lock 2145 */ 2146 static int tipc_sk_backlog_rcv(struct sock *sk, struct sk_buff *skb) 2147 { 2148 unsigned int before = sk_rmem_alloc_get(sk); 2149 struct sk_buff_head xmitq; 2150 unsigned int added; 2151 2152 __skb_queue_head_init(&xmitq); 2153 2154 tipc_sk_filter_rcv(sk, skb, &xmitq); 2155 added = sk_rmem_alloc_get(sk) - before; 2156 atomic_add(added, &tipc_sk(sk)->dupl_rcvcnt); 2157 2158 /* Send pending response/rejected messages, if any */ 2159 tipc_node_distr_xmit(sock_net(sk), &xmitq); 2160 return 0; 2161 } 2162 2163 /** 2164 * tipc_sk_enqueue - extract all buffers with destination 'dport' from 2165 * inputq and try adding them to socket or backlog queue 2166 * @inputq: list of incoming buffers with potentially different destinations 2167 * @sk: socket where the buffers should be enqueued 2168 * @dport: port number for the socket 2169 * 2170 * Caller must hold socket lock 2171 */ 2172 static void tipc_sk_enqueue(struct sk_buff_head *inputq, struct sock *sk, 2173 u32 dport, struct sk_buff_head *xmitq) 2174 { 2175 unsigned long time_limit = jiffies + 2; 2176 struct sk_buff *skb; 2177 unsigned int lim; 2178 atomic_t *dcnt; 2179 u32 onode; 2180 2181 while (skb_queue_len(inputq)) { 2182 if (unlikely(time_after_eq(jiffies, time_limit))) 2183 return; 2184 2185 skb = tipc_skb_dequeue(inputq, dport); 2186 if (unlikely(!skb)) 2187 return; 2188 2189 /* Add message directly to receive queue if possible */ 2190 if (!sock_owned_by_user(sk)) { 2191 tipc_sk_filter_rcv(sk, skb, xmitq); 2192 continue; 2193 } 2194 2195 /* Try backlog, compensating for double-counted bytes */ 2196 dcnt = &tipc_sk(sk)->dupl_rcvcnt; 2197 if (!sk->sk_backlog.len) 2198 atomic_set(dcnt, 0); 2199 lim = rcvbuf_limit(sk, skb) + atomic_read(dcnt); 2200 if (likely(!sk_add_backlog(sk, skb, lim))) 2201 continue; 2202 2203 /* Overload => reject message back to sender */ 2204 onode = tipc_own_addr(sock_net(sk)); 2205 atomic_inc(&sk->sk_drops); 2206 if (tipc_msg_reverse(onode, &skb, TIPC_ERR_OVERLOAD)) 2207 __skb_queue_tail(xmitq, skb); 2208 break; 2209 } 2210 } 2211 2212 /** 2213 * tipc_sk_rcv - handle a chain of incoming buffers 2214 * @inputq: buffer list containing the buffers 2215 * Consumes all buffers in list until inputq is empty 2216 * Note: may be called in multiple threads referring to the same queue 2217 */ 2218 void tipc_sk_rcv(struct net *net, struct sk_buff_head *inputq) 2219 { 2220 struct sk_buff_head xmitq; 2221 u32 dnode, dport = 0; 2222 int err; 2223 struct tipc_sock *tsk; 2224 struct sock *sk; 2225 struct sk_buff *skb; 2226 2227 __skb_queue_head_init(&xmitq); 2228 while (skb_queue_len(inputq)) { 2229 dport = tipc_skb_peek_port(inputq, dport); 2230 tsk = tipc_sk_lookup(net, dport); 2231 2232 if (likely(tsk)) { 2233 sk = &tsk->sk; 2234 if (likely(spin_trylock_bh(&sk->sk_lock.slock))) { 2235 tipc_sk_enqueue(inputq, sk, dport, &xmitq); 2236 spin_unlock_bh(&sk->sk_lock.slock); 2237 } 2238 /* Send pending response/rejected messages, if any */ 2239 tipc_node_distr_xmit(sock_net(sk), &xmitq); 2240 sock_put(sk); 2241 continue; 2242 } 2243 /* No destination socket => dequeue skb if still there */ 2244 skb = tipc_skb_dequeue(inputq, dport); 2245 if (!skb) 2246 return; 2247 2248 /* Try secondary lookup if unresolved named message */ 2249 err = TIPC_ERR_NO_PORT; 2250 if (tipc_msg_lookup_dest(net, skb, &err)) 2251 goto xmit; 2252 2253 /* Prepare for message rejection */ 2254 if (!tipc_msg_reverse(tipc_own_addr(net), &skb, err)) 2255 continue; 2256 xmit: 2257 dnode = msg_destnode(buf_msg(skb)); 2258 tipc_node_xmit_skb(net, skb, dnode, dport); 2259 } 2260 } 2261 2262 static int tipc_wait_for_connect(struct socket *sock, long *timeo_p) 2263 { 2264 DEFINE_WAIT_FUNC(wait, woken_wake_function); 2265 struct sock *sk = sock->sk; 2266 int done; 2267 2268 do { 2269 int err = sock_error(sk); 2270 if (err) 2271 return err; 2272 if (!*timeo_p) 2273 return -ETIMEDOUT; 2274 if (signal_pending(current)) 2275 return sock_intr_errno(*timeo_p); 2276 2277 add_wait_queue(sk_sleep(sk), &wait); 2278 done = sk_wait_event(sk, timeo_p, 2279 sk->sk_state != TIPC_CONNECTING, &wait); 2280 remove_wait_queue(sk_sleep(sk), &wait); 2281 } while (!done); 2282 return 0; 2283 } 2284 2285 /** 2286 * tipc_connect - establish a connection to another TIPC port 2287 * @sock: socket structure 2288 * @dest: socket address for destination port 2289 * @destlen: size of socket address data structure 2290 * @flags: file-related flags associated with socket 2291 * 2292 * Returns 0 on success, errno otherwise 2293 */ 2294 static int tipc_connect(struct socket *sock, struct sockaddr *dest, 2295 int destlen, int flags) 2296 { 2297 struct sock *sk = sock->sk; 2298 struct tipc_sock *tsk = tipc_sk(sk); 2299 struct sockaddr_tipc *dst = (struct sockaddr_tipc *)dest; 2300 struct msghdr m = {NULL,}; 2301 long timeout = (flags & O_NONBLOCK) ? 0 : tsk->conn_timeout; 2302 int previous; 2303 int res = 0; 2304 2305 if (destlen != sizeof(struct sockaddr_tipc)) 2306 return -EINVAL; 2307 2308 lock_sock(sk); 2309 2310 if (tsk->group) { 2311 res = -EINVAL; 2312 goto exit; 2313 } 2314 2315 if (dst->family == AF_UNSPEC) { 2316 memset(&tsk->peer, 0, sizeof(struct sockaddr_tipc)); 2317 if (!tipc_sk_type_connectionless(sk)) 2318 res = -EINVAL; 2319 goto exit; 2320 } else if (dst->family != AF_TIPC) { 2321 res = -EINVAL; 2322 } 2323 if (dst->addrtype != TIPC_ADDR_ID && dst->addrtype != TIPC_ADDR_NAME) 2324 res = -EINVAL; 2325 if (res) 2326 goto exit; 2327 2328 /* DGRAM/RDM connect(), just save the destaddr */ 2329 if (tipc_sk_type_connectionless(sk)) { 2330 memcpy(&tsk->peer, dest, destlen); 2331 goto exit; 2332 } 2333 2334 previous = sk->sk_state; 2335 2336 switch (sk->sk_state) { 2337 case TIPC_OPEN: 2338 /* Send a 'SYN-' to destination */ 2339 m.msg_name = dest; 2340 m.msg_namelen = destlen; 2341 2342 /* If connect is in non-blocking case, set MSG_DONTWAIT to 2343 * indicate send_msg() is never blocked. 2344 */ 2345 if (!timeout) 2346 m.msg_flags = MSG_DONTWAIT; 2347 2348 res = __tipc_sendmsg(sock, &m, 0); 2349 if ((res < 0) && (res != -EWOULDBLOCK)) 2350 goto exit; 2351 2352 /* Just entered TIPC_CONNECTING state; the only 2353 * difference is that return value in non-blocking 2354 * case is EINPROGRESS, rather than EALREADY. 2355 */ 2356 res = -EINPROGRESS; 2357 /* fall thru' */ 2358 case TIPC_CONNECTING: 2359 if (!timeout) { 2360 if (previous == TIPC_CONNECTING) 2361 res = -EALREADY; 2362 goto exit; 2363 } 2364 timeout = msecs_to_jiffies(timeout); 2365 /* Wait until an 'ACK' or 'RST' arrives, or a timeout occurs */ 2366 res = tipc_wait_for_connect(sock, &timeout); 2367 break; 2368 case TIPC_ESTABLISHED: 2369 res = -EISCONN; 2370 break; 2371 default: 2372 res = -EINVAL; 2373 } 2374 2375 exit: 2376 release_sock(sk); 2377 return res; 2378 } 2379 2380 /** 2381 * tipc_listen - allow socket to listen for incoming connections 2382 * @sock: socket structure 2383 * @len: (unused) 2384 * 2385 * Returns 0 on success, errno otherwise 2386 */ 2387 static int tipc_listen(struct socket *sock, int len) 2388 { 2389 struct sock *sk = sock->sk; 2390 int res; 2391 2392 lock_sock(sk); 2393 res = tipc_set_sk_state(sk, TIPC_LISTEN); 2394 release_sock(sk); 2395 2396 return res; 2397 } 2398 2399 static int tipc_wait_for_accept(struct socket *sock, long timeo) 2400 { 2401 struct sock *sk = sock->sk; 2402 DEFINE_WAIT(wait); 2403 int err; 2404 2405 /* True wake-one mechanism for incoming connections: only 2406 * one process gets woken up, not the 'whole herd'. 2407 * Since we do not 'race & poll' for established sockets 2408 * anymore, the common case will execute the loop only once. 2409 */ 2410 for (;;) { 2411 prepare_to_wait_exclusive(sk_sleep(sk), &wait, 2412 TASK_INTERRUPTIBLE); 2413 if (timeo && skb_queue_empty(&sk->sk_receive_queue)) { 2414 release_sock(sk); 2415 timeo = schedule_timeout(timeo); 2416 lock_sock(sk); 2417 } 2418 err = 0; 2419 if (!skb_queue_empty(&sk->sk_receive_queue)) 2420 break; 2421 err = -EAGAIN; 2422 if (!timeo) 2423 break; 2424 err = sock_intr_errno(timeo); 2425 if (signal_pending(current)) 2426 break; 2427 } 2428 finish_wait(sk_sleep(sk), &wait); 2429 return err; 2430 } 2431 2432 /** 2433 * tipc_accept - wait for connection request 2434 * @sock: listening socket 2435 * @newsock: new socket that is to be connected 2436 * @flags: file-related flags associated with socket 2437 * 2438 * Returns 0 on success, errno otherwise 2439 */ 2440 static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags, 2441 bool kern) 2442 { 2443 struct sock *new_sk, *sk = sock->sk; 2444 struct sk_buff *buf; 2445 struct tipc_sock *new_tsock; 2446 struct tipc_msg *msg; 2447 long timeo; 2448 int res; 2449 2450 lock_sock(sk); 2451 2452 if (sk->sk_state != TIPC_LISTEN) { 2453 res = -EINVAL; 2454 goto exit; 2455 } 2456 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK); 2457 res = tipc_wait_for_accept(sock, timeo); 2458 if (res) 2459 goto exit; 2460 2461 buf = skb_peek(&sk->sk_receive_queue); 2462 2463 res = tipc_sk_create(sock_net(sock->sk), new_sock, 0, kern); 2464 if (res) 2465 goto exit; 2466 security_sk_clone(sock->sk, new_sock->sk); 2467 2468 new_sk = new_sock->sk; 2469 new_tsock = tipc_sk(new_sk); 2470 msg = buf_msg(buf); 2471 2472 /* we lock on new_sk; but lockdep sees the lock on sk */ 2473 lock_sock_nested(new_sk, SINGLE_DEPTH_NESTING); 2474 2475 /* 2476 * Reject any stray messages received by new socket 2477 * before the socket lock was taken (very, very unlikely) 2478 */ 2479 tsk_rej_rx_queue(new_sk); 2480 2481 /* Connect new socket to it's peer */ 2482 tipc_sk_finish_conn(new_tsock, msg_origport(msg), msg_orignode(msg)); 2483 2484 tsk_set_importance(new_tsock, msg_importance(msg)); 2485 if (msg_named(msg)) { 2486 new_tsock->conn_type = msg_nametype(msg); 2487 new_tsock->conn_instance = msg_nameinst(msg); 2488 } 2489 2490 /* 2491 * Respond to 'SYN-' by discarding it & returning 'ACK'-. 2492 * Respond to 'SYN+' by queuing it on new socket. 2493 */ 2494 if (!msg_data_sz(msg)) { 2495 struct msghdr m = {NULL,}; 2496 2497 tsk_advance_rx_queue(sk); 2498 __tipc_sendstream(new_sock, &m, 0); 2499 } else { 2500 __skb_dequeue(&sk->sk_receive_queue); 2501 __skb_queue_head(&new_sk->sk_receive_queue, buf); 2502 skb_set_owner_r(buf, new_sk); 2503 } 2504 release_sock(new_sk); 2505 exit: 2506 release_sock(sk); 2507 return res; 2508 } 2509 2510 /** 2511 * tipc_shutdown - shutdown socket connection 2512 * @sock: socket structure 2513 * @how: direction to close (must be SHUT_RDWR) 2514 * 2515 * Terminates connection (if necessary), then purges socket's receive queue. 2516 * 2517 * Returns 0 on success, errno otherwise 2518 */ 2519 static int tipc_shutdown(struct socket *sock, int how) 2520 { 2521 struct sock *sk = sock->sk; 2522 int res; 2523 2524 if (how != SHUT_RDWR) 2525 return -EINVAL; 2526 2527 lock_sock(sk); 2528 2529 __tipc_shutdown(sock, TIPC_CONN_SHUTDOWN); 2530 sk->sk_shutdown = SEND_SHUTDOWN; 2531 2532 if (sk->sk_state == TIPC_DISCONNECTING) { 2533 /* Discard any unreceived messages */ 2534 __skb_queue_purge(&sk->sk_receive_queue); 2535 2536 /* Wake up anyone sleeping in poll */ 2537 sk->sk_state_change(sk); 2538 res = 0; 2539 } else { 2540 res = -ENOTCONN; 2541 } 2542 2543 release_sock(sk); 2544 return res; 2545 } 2546 2547 static void tipc_sk_timeout(struct timer_list *t) 2548 { 2549 struct sock *sk = from_timer(sk, t, sk_timer); 2550 struct tipc_sock *tsk = tipc_sk(sk); 2551 u32 peer_port = tsk_peer_port(tsk); 2552 u32 peer_node = tsk_peer_node(tsk); 2553 u32 own_node = tsk_own_node(tsk); 2554 u32 own_port = tsk->portid; 2555 struct net *net = sock_net(sk); 2556 struct sk_buff *skb = NULL; 2557 2558 bh_lock_sock(sk); 2559 if (!tipc_sk_connected(sk)) 2560 goto exit; 2561 2562 /* Try again later if socket is busy */ 2563 if (sock_owned_by_user(sk)) { 2564 sk_reset_timer(sk, &sk->sk_timer, jiffies + HZ / 20); 2565 goto exit; 2566 } 2567 2568 if (tsk->probe_unacked) { 2569 tipc_set_sk_state(sk, TIPC_DISCONNECTING); 2570 tipc_node_remove_conn(net, peer_node, peer_port); 2571 sk->sk_state_change(sk); 2572 goto exit; 2573 } 2574 /* Send new probe */ 2575 skb = tipc_msg_create(CONN_MANAGER, CONN_PROBE, INT_H_SIZE, 0, 2576 peer_node, own_node, peer_port, own_port, 2577 TIPC_OK); 2578 tsk->probe_unacked = true; 2579 sk_reset_timer(sk, &sk->sk_timer, jiffies + CONN_PROBING_INTV); 2580 exit: 2581 bh_unlock_sock(sk); 2582 if (skb) 2583 tipc_node_xmit_skb(net, skb, peer_node, own_port); 2584 sock_put(sk); 2585 } 2586 2587 static int tipc_sk_publish(struct tipc_sock *tsk, uint scope, 2588 struct tipc_name_seq const *seq) 2589 { 2590 struct sock *sk = &tsk->sk; 2591 struct net *net = sock_net(sk); 2592 struct publication *publ; 2593 u32 key; 2594 2595 if (scope != TIPC_NODE_SCOPE) 2596 scope = TIPC_CLUSTER_SCOPE; 2597 2598 if (tipc_sk_connected(sk)) 2599 return -EINVAL; 2600 key = tsk->portid + tsk->pub_count + 1; 2601 if (key == tsk->portid) 2602 return -EADDRINUSE; 2603 2604 publ = tipc_nametbl_publish(net, seq->type, seq->lower, seq->upper, 2605 scope, tsk->portid, key); 2606 if (unlikely(!publ)) 2607 return -EINVAL; 2608 2609 list_add(&publ->binding_sock, &tsk->publications); 2610 tsk->pub_count++; 2611 tsk->published = 1; 2612 return 0; 2613 } 2614 2615 static int tipc_sk_withdraw(struct tipc_sock *tsk, uint scope, 2616 struct tipc_name_seq const *seq) 2617 { 2618 struct net *net = sock_net(&tsk->sk); 2619 struct publication *publ; 2620 struct publication *safe; 2621 int rc = -EINVAL; 2622 2623 if (scope != TIPC_NODE_SCOPE) 2624 scope = TIPC_CLUSTER_SCOPE; 2625 2626 list_for_each_entry_safe(publ, safe, &tsk->publications, binding_sock) { 2627 if (seq) { 2628 if (publ->scope != scope) 2629 continue; 2630 if (publ->type != seq->type) 2631 continue; 2632 if (publ->lower != seq->lower) 2633 continue; 2634 if (publ->upper != seq->upper) 2635 break; 2636 tipc_nametbl_withdraw(net, publ->type, publ->lower, 2637 publ->upper, publ->key); 2638 rc = 0; 2639 break; 2640 } 2641 tipc_nametbl_withdraw(net, publ->type, publ->lower, 2642 publ->upper, publ->key); 2643 rc = 0; 2644 } 2645 if (list_empty(&tsk->publications)) 2646 tsk->published = 0; 2647 return rc; 2648 } 2649 2650 /* tipc_sk_reinit: set non-zero address in all existing sockets 2651 * when we go from standalone to network mode. 2652 */ 2653 void tipc_sk_reinit(struct net *net) 2654 { 2655 struct tipc_net *tn = net_generic(net, tipc_net_id); 2656 struct rhashtable_iter iter; 2657 struct tipc_sock *tsk; 2658 struct tipc_msg *msg; 2659 2660 rhashtable_walk_enter(&tn->sk_rht, &iter); 2661 2662 do { 2663 rhashtable_walk_start(&iter); 2664 2665 while ((tsk = rhashtable_walk_next(&iter)) && !IS_ERR(tsk)) { 2666 spin_lock_bh(&tsk->sk.sk_lock.slock); 2667 msg = &tsk->phdr; 2668 msg_set_prevnode(msg, tipc_own_addr(net)); 2669 msg_set_orignode(msg, tipc_own_addr(net)); 2670 spin_unlock_bh(&tsk->sk.sk_lock.slock); 2671 } 2672 2673 rhashtable_walk_stop(&iter); 2674 } while (tsk == ERR_PTR(-EAGAIN)); 2675 } 2676 2677 static struct tipc_sock *tipc_sk_lookup(struct net *net, u32 portid) 2678 { 2679 struct tipc_net *tn = net_generic(net, tipc_net_id); 2680 struct tipc_sock *tsk; 2681 2682 rcu_read_lock(); 2683 tsk = rhashtable_lookup_fast(&tn->sk_rht, &portid, tsk_rht_params); 2684 if (tsk) 2685 sock_hold(&tsk->sk); 2686 rcu_read_unlock(); 2687 2688 return tsk; 2689 } 2690 2691 static int tipc_sk_insert(struct tipc_sock *tsk) 2692 { 2693 struct sock *sk = &tsk->sk; 2694 struct net *net = sock_net(sk); 2695 struct tipc_net *tn = net_generic(net, tipc_net_id); 2696 u32 remaining = (TIPC_MAX_PORT - TIPC_MIN_PORT) + 1; 2697 u32 portid = prandom_u32() % remaining + TIPC_MIN_PORT; 2698 2699 while (remaining--) { 2700 portid++; 2701 if ((portid < TIPC_MIN_PORT) || (portid > TIPC_MAX_PORT)) 2702 portid = TIPC_MIN_PORT; 2703 tsk->portid = portid; 2704 sock_hold(&tsk->sk); 2705 if (!rhashtable_lookup_insert_fast(&tn->sk_rht, &tsk->node, 2706 tsk_rht_params)) 2707 return 0; 2708 sock_put(&tsk->sk); 2709 } 2710 2711 return -1; 2712 } 2713 2714 static void tipc_sk_remove(struct tipc_sock *tsk) 2715 { 2716 struct sock *sk = &tsk->sk; 2717 struct tipc_net *tn = net_generic(sock_net(sk), tipc_net_id); 2718 2719 if (!rhashtable_remove_fast(&tn->sk_rht, &tsk->node, tsk_rht_params)) { 2720 WARN_ON(refcount_read(&sk->sk_refcnt) == 1); 2721 __sock_put(sk); 2722 } 2723 } 2724 2725 static const struct rhashtable_params tsk_rht_params = { 2726 .nelem_hint = 192, 2727 .head_offset = offsetof(struct tipc_sock, node), 2728 .key_offset = offsetof(struct tipc_sock, portid), 2729 .key_len = sizeof(u32), /* portid */ 2730 .max_size = 1048576, 2731 .min_size = 256, 2732 .automatic_shrinking = true, 2733 }; 2734 2735 int tipc_sk_rht_init(struct net *net) 2736 { 2737 struct tipc_net *tn = net_generic(net, tipc_net_id); 2738 2739 return rhashtable_init(&tn->sk_rht, &tsk_rht_params); 2740 } 2741 2742 void tipc_sk_rht_destroy(struct net *net) 2743 { 2744 struct tipc_net *tn = net_generic(net, tipc_net_id); 2745 2746 /* Wait for socket readers to complete */ 2747 synchronize_net(); 2748 2749 rhashtable_destroy(&tn->sk_rht); 2750 } 2751 2752 static int tipc_sk_join(struct tipc_sock *tsk, struct tipc_group_req *mreq) 2753 { 2754 struct net *net = sock_net(&tsk->sk); 2755 struct tipc_group *grp = tsk->group; 2756 struct tipc_msg *hdr = &tsk->phdr; 2757 struct tipc_name_seq seq; 2758 int rc; 2759 2760 if (mreq->type < TIPC_RESERVED_TYPES) 2761 return -EACCES; 2762 if (mreq->scope > TIPC_NODE_SCOPE) 2763 return -EINVAL; 2764 if (grp) 2765 return -EACCES; 2766 grp = tipc_group_create(net, tsk->portid, mreq, &tsk->group_is_open); 2767 if (!grp) 2768 return -ENOMEM; 2769 tsk->group = grp; 2770 msg_set_lookup_scope(hdr, mreq->scope); 2771 msg_set_nametype(hdr, mreq->type); 2772 msg_set_dest_droppable(hdr, true); 2773 seq.type = mreq->type; 2774 seq.lower = mreq->instance; 2775 seq.upper = seq.lower; 2776 tipc_nametbl_build_group(net, grp, mreq->type, mreq->scope); 2777 rc = tipc_sk_publish(tsk, mreq->scope, &seq); 2778 if (rc) { 2779 tipc_group_delete(net, grp); 2780 tsk->group = NULL; 2781 return rc; 2782 } 2783 /* Eliminate any risk that a broadcast overtakes sent JOINs */ 2784 tsk->mc_method.rcast = true; 2785 tsk->mc_method.mandatory = true; 2786 tipc_group_join(net, grp, &tsk->sk.sk_rcvbuf); 2787 return rc; 2788 } 2789 2790 static int tipc_sk_leave(struct tipc_sock *tsk) 2791 { 2792 struct net *net = sock_net(&tsk->sk); 2793 struct tipc_group *grp = tsk->group; 2794 struct tipc_name_seq seq; 2795 int scope; 2796 2797 if (!grp) 2798 return -EINVAL; 2799 tipc_group_self(grp, &seq, &scope); 2800 tipc_group_delete(net, grp); 2801 tsk->group = NULL; 2802 tipc_sk_withdraw(tsk, scope, &seq); 2803 return 0; 2804 } 2805 2806 /** 2807 * tipc_setsockopt - set socket option 2808 * @sock: socket structure 2809 * @lvl: option level 2810 * @opt: option identifier 2811 * @ov: pointer to new option value 2812 * @ol: length of option value 2813 * 2814 * For stream sockets only, accepts and ignores all IPPROTO_TCP options 2815 * (to ease compatibility). 2816 * 2817 * Returns 0 on success, errno otherwise 2818 */ 2819 static int tipc_setsockopt(struct socket *sock, int lvl, int opt, 2820 char __user *ov, unsigned int ol) 2821 { 2822 struct sock *sk = sock->sk; 2823 struct tipc_sock *tsk = tipc_sk(sk); 2824 struct tipc_group_req mreq; 2825 u32 value = 0; 2826 int res = 0; 2827 2828 if ((lvl == IPPROTO_TCP) && (sock->type == SOCK_STREAM)) 2829 return 0; 2830 if (lvl != SOL_TIPC) 2831 return -ENOPROTOOPT; 2832 2833 switch (opt) { 2834 case TIPC_IMPORTANCE: 2835 case TIPC_SRC_DROPPABLE: 2836 case TIPC_DEST_DROPPABLE: 2837 case TIPC_CONN_TIMEOUT: 2838 if (ol < sizeof(value)) 2839 return -EINVAL; 2840 if (get_user(value, (u32 __user *)ov)) 2841 return -EFAULT; 2842 break; 2843 case TIPC_GROUP_JOIN: 2844 if (ol < sizeof(mreq)) 2845 return -EINVAL; 2846 if (copy_from_user(&mreq, ov, sizeof(mreq))) 2847 return -EFAULT; 2848 break; 2849 default: 2850 if (ov || ol) 2851 return -EINVAL; 2852 } 2853 2854 lock_sock(sk); 2855 2856 switch (opt) { 2857 case TIPC_IMPORTANCE: 2858 res = tsk_set_importance(tsk, value); 2859 break; 2860 case TIPC_SRC_DROPPABLE: 2861 if (sock->type != SOCK_STREAM) 2862 tsk_set_unreliable(tsk, value); 2863 else 2864 res = -ENOPROTOOPT; 2865 break; 2866 case TIPC_DEST_DROPPABLE: 2867 tsk_set_unreturnable(tsk, value); 2868 break; 2869 case TIPC_CONN_TIMEOUT: 2870 tipc_sk(sk)->conn_timeout = value; 2871 break; 2872 case TIPC_MCAST_BROADCAST: 2873 tsk->mc_method.rcast = false; 2874 tsk->mc_method.mandatory = true; 2875 break; 2876 case TIPC_MCAST_REPLICAST: 2877 tsk->mc_method.rcast = true; 2878 tsk->mc_method.mandatory = true; 2879 break; 2880 case TIPC_GROUP_JOIN: 2881 res = tipc_sk_join(tsk, &mreq); 2882 break; 2883 case TIPC_GROUP_LEAVE: 2884 res = tipc_sk_leave(tsk); 2885 break; 2886 default: 2887 res = -EINVAL; 2888 } 2889 2890 release_sock(sk); 2891 2892 return res; 2893 } 2894 2895 /** 2896 * tipc_getsockopt - get socket option 2897 * @sock: socket structure 2898 * @lvl: option level 2899 * @opt: option identifier 2900 * @ov: receptacle for option value 2901 * @ol: receptacle for length of option value 2902 * 2903 * For stream sockets only, returns 0 length result for all IPPROTO_TCP options 2904 * (to ease compatibility). 2905 * 2906 * Returns 0 on success, errno otherwise 2907 */ 2908 static int tipc_getsockopt(struct socket *sock, int lvl, int opt, 2909 char __user *ov, int __user *ol) 2910 { 2911 struct sock *sk = sock->sk; 2912 struct tipc_sock *tsk = tipc_sk(sk); 2913 struct tipc_name_seq seq; 2914 int len, scope; 2915 u32 value; 2916 int res; 2917 2918 if ((lvl == IPPROTO_TCP) && (sock->type == SOCK_STREAM)) 2919 return put_user(0, ol); 2920 if (lvl != SOL_TIPC) 2921 return -ENOPROTOOPT; 2922 res = get_user(len, ol); 2923 if (res) 2924 return res; 2925 2926 lock_sock(sk); 2927 2928 switch (opt) { 2929 case TIPC_IMPORTANCE: 2930 value = tsk_importance(tsk); 2931 break; 2932 case TIPC_SRC_DROPPABLE: 2933 value = tsk_unreliable(tsk); 2934 break; 2935 case TIPC_DEST_DROPPABLE: 2936 value = tsk_unreturnable(tsk); 2937 break; 2938 case TIPC_CONN_TIMEOUT: 2939 value = tsk->conn_timeout; 2940 /* no need to set "res", since already 0 at this point */ 2941 break; 2942 case TIPC_NODE_RECVQ_DEPTH: 2943 value = 0; /* was tipc_queue_size, now obsolete */ 2944 break; 2945 case TIPC_SOCK_RECVQ_DEPTH: 2946 value = skb_queue_len(&sk->sk_receive_queue); 2947 break; 2948 case TIPC_GROUP_JOIN: 2949 seq.type = 0; 2950 if (tsk->group) 2951 tipc_group_self(tsk->group, &seq, &scope); 2952 value = seq.type; 2953 break; 2954 default: 2955 res = -EINVAL; 2956 } 2957 2958 release_sock(sk); 2959 2960 if (res) 2961 return res; /* "get" failed */ 2962 2963 if (len < sizeof(value)) 2964 return -EINVAL; 2965 2966 if (copy_to_user(ov, &value, sizeof(value))) 2967 return -EFAULT; 2968 2969 return put_user(sizeof(value), ol); 2970 } 2971 2972 static int tipc_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) 2973 { 2974 struct sock *sk = sock->sk; 2975 struct tipc_sioc_ln_req lnr; 2976 void __user *argp = (void __user *)arg; 2977 2978 switch (cmd) { 2979 case SIOCGETLINKNAME: 2980 if (copy_from_user(&lnr, argp, sizeof(lnr))) 2981 return -EFAULT; 2982 if (!tipc_node_get_linkname(sock_net(sk), 2983 lnr.bearer_id & 0xffff, lnr.peer, 2984 lnr.linkname, TIPC_MAX_LINK_NAME)) { 2985 if (copy_to_user(argp, &lnr, sizeof(lnr))) 2986 return -EFAULT; 2987 return 0; 2988 } 2989 return -EADDRNOTAVAIL; 2990 default: 2991 return -ENOIOCTLCMD; 2992 } 2993 } 2994 2995 static int tipc_socketpair(struct socket *sock1, struct socket *sock2) 2996 { 2997 struct tipc_sock *tsk2 = tipc_sk(sock2->sk); 2998 struct tipc_sock *tsk1 = tipc_sk(sock1->sk); 2999 u32 onode = tipc_own_addr(sock_net(sock1->sk)); 3000 3001 tsk1->peer.family = AF_TIPC; 3002 tsk1->peer.addrtype = TIPC_ADDR_ID; 3003 tsk1->peer.scope = TIPC_NODE_SCOPE; 3004 tsk1->peer.addr.id.ref = tsk2->portid; 3005 tsk1->peer.addr.id.node = onode; 3006 tsk2->peer.family = AF_TIPC; 3007 tsk2->peer.addrtype = TIPC_ADDR_ID; 3008 tsk2->peer.scope = TIPC_NODE_SCOPE; 3009 tsk2->peer.addr.id.ref = tsk1->portid; 3010 tsk2->peer.addr.id.node = onode; 3011 3012 tipc_sk_finish_conn(tsk1, tsk2->portid, onode); 3013 tipc_sk_finish_conn(tsk2, tsk1->portid, onode); 3014 return 0; 3015 } 3016 3017 /* Protocol switches for the various types of TIPC sockets */ 3018 3019 static const struct proto_ops msg_ops = { 3020 .owner = THIS_MODULE, 3021 .family = AF_TIPC, 3022 .release = tipc_release, 3023 .bind = tipc_bind, 3024 .connect = tipc_connect, 3025 .socketpair = tipc_socketpair, 3026 .accept = sock_no_accept, 3027 .getname = tipc_getname, 3028 .poll = tipc_poll, 3029 .ioctl = tipc_ioctl, 3030 .listen = sock_no_listen, 3031 .shutdown = tipc_shutdown, 3032 .setsockopt = tipc_setsockopt, 3033 .getsockopt = tipc_getsockopt, 3034 .sendmsg = tipc_sendmsg, 3035 .recvmsg = tipc_recvmsg, 3036 .mmap = sock_no_mmap, 3037 .sendpage = sock_no_sendpage 3038 }; 3039 3040 static const struct proto_ops packet_ops = { 3041 .owner = THIS_MODULE, 3042 .family = AF_TIPC, 3043 .release = tipc_release, 3044 .bind = tipc_bind, 3045 .connect = tipc_connect, 3046 .socketpair = tipc_socketpair, 3047 .accept = tipc_accept, 3048 .getname = tipc_getname, 3049 .poll = tipc_poll, 3050 .ioctl = tipc_ioctl, 3051 .listen = tipc_listen, 3052 .shutdown = tipc_shutdown, 3053 .setsockopt = tipc_setsockopt, 3054 .getsockopt = tipc_getsockopt, 3055 .sendmsg = tipc_send_packet, 3056 .recvmsg = tipc_recvmsg, 3057 .mmap = sock_no_mmap, 3058 .sendpage = sock_no_sendpage 3059 }; 3060 3061 static const struct proto_ops stream_ops = { 3062 .owner = THIS_MODULE, 3063 .family = AF_TIPC, 3064 .release = tipc_release, 3065 .bind = tipc_bind, 3066 .connect = tipc_connect, 3067 .socketpair = tipc_socketpair, 3068 .accept = tipc_accept, 3069 .getname = tipc_getname, 3070 .poll = tipc_poll, 3071 .ioctl = tipc_ioctl, 3072 .listen = tipc_listen, 3073 .shutdown = tipc_shutdown, 3074 .setsockopt = tipc_setsockopt, 3075 .getsockopt = tipc_getsockopt, 3076 .sendmsg = tipc_sendstream, 3077 .recvmsg = tipc_recvstream, 3078 .mmap = sock_no_mmap, 3079 .sendpage = sock_no_sendpage 3080 }; 3081 3082 static const struct net_proto_family tipc_family_ops = { 3083 .owner = THIS_MODULE, 3084 .family = AF_TIPC, 3085 .create = tipc_sk_create 3086 }; 3087 3088 static struct proto tipc_proto = { 3089 .name = "TIPC", 3090 .owner = THIS_MODULE, 3091 .obj_size = sizeof(struct tipc_sock), 3092 .sysctl_rmem = sysctl_tipc_rmem 3093 }; 3094 3095 /** 3096 * tipc_socket_init - initialize TIPC socket interface 3097 * 3098 * Returns 0 on success, errno otherwise 3099 */ 3100 int tipc_socket_init(void) 3101 { 3102 int res; 3103 3104 res = proto_register(&tipc_proto, 1); 3105 if (res) { 3106 pr_err("Failed to register TIPC protocol type\n"); 3107 goto out; 3108 } 3109 3110 res = sock_register(&tipc_family_ops); 3111 if (res) { 3112 pr_err("Failed to register TIPC socket type\n"); 3113 proto_unregister(&tipc_proto); 3114 goto out; 3115 } 3116 out: 3117 return res; 3118 } 3119 3120 /** 3121 * tipc_socket_stop - stop TIPC socket interface 3122 */ 3123 void tipc_socket_stop(void) 3124 { 3125 sock_unregister(tipc_family_ops.family); 3126 proto_unregister(&tipc_proto); 3127 } 3128 3129 /* Caller should hold socket lock for the passed tipc socket. */ 3130 static int __tipc_nl_add_sk_con(struct sk_buff *skb, struct tipc_sock *tsk) 3131 { 3132 u32 peer_node; 3133 u32 peer_port; 3134 struct nlattr *nest; 3135 3136 peer_node = tsk_peer_node(tsk); 3137 peer_port = tsk_peer_port(tsk); 3138 3139 nest = nla_nest_start(skb, TIPC_NLA_SOCK_CON); 3140 3141 if (nla_put_u32(skb, TIPC_NLA_CON_NODE, peer_node)) 3142 goto msg_full; 3143 if (nla_put_u32(skb, TIPC_NLA_CON_SOCK, peer_port)) 3144 goto msg_full; 3145 3146 if (tsk->conn_type != 0) { 3147 if (nla_put_flag(skb, TIPC_NLA_CON_FLAG)) 3148 goto msg_full; 3149 if (nla_put_u32(skb, TIPC_NLA_CON_TYPE, tsk->conn_type)) 3150 goto msg_full; 3151 if (nla_put_u32(skb, TIPC_NLA_CON_INST, tsk->conn_instance)) 3152 goto msg_full; 3153 } 3154 nla_nest_end(skb, nest); 3155 3156 return 0; 3157 3158 msg_full: 3159 nla_nest_cancel(skb, nest); 3160 3161 return -EMSGSIZE; 3162 } 3163 3164 static int __tipc_nl_add_sk_info(struct sk_buff *skb, struct tipc_sock 3165 *tsk) 3166 { 3167 struct net *net = sock_net(skb->sk); 3168 struct sock *sk = &tsk->sk; 3169 3170 if (nla_put_u32(skb, TIPC_NLA_SOCK_REF, tsk->portid) || 3171 nla_put_u32(skb, TIPC_NLA_SOCK_ADDR, tipc_own_addr(net))) 3172 return -EMSGSIZE; 3173 3174 if (tipc_sk_connected(sk)) { 3175 if (__tipc_nl_add_sk_con(skb, tsk)) 3176 return -EMSGSIZE; 3177 } else if (!list_empty(&tsk->publications)) { 3178 if (nla_put_flag(skb, TIPC_NLA_SOCK_HAS_PUBL)) 3179 return -EMSGSIZE; 3180 } 3181 return 0; 3182 } 3183 3184 /* Caller should hold socket lock for the passed tipc socket. */ 3185 static int __tipc_nl_add_sk(struct sk_buff *skb, struct netlink_callback *cb, 3186 struct tipc_sock *tsk) 3187 { 3188 struct nlattr *attrs; 3189 void *hdr; 3190 3191 hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, 3192 &tipc_genl_family, NLM_F_MULTI, TIPC_NL_SOCK_GET); 3193 if (!hdr) 3194 goto msg_cancel; 3195 3196 attrs = nla_nest_start(skb, TIPC_NLA_SOCK); 3197 if (!attrs) 3198 goto genlmsg_cancel; 3199 3200 if (__tipc_nl_add_sk_info(skb, tsk)) 3201 goto attr_msg_cancel; 3202 3203 nla_nest_end(skb, attrs); 3204 genlmsg_end(skb, hdr); 3205 3206 return 0; 3207 3208 attr_msg_cancel: 3209 nla_nest_cancel(skb, attrs); 3210 genlmsg_cancel: 3211 genlmsg_cancel(skb, hdr); 3212 msg_cancel: 3213 return -EMSGSIZE; 3214 } 3215 3216 int tipc_nl_sk_walk(struct sk_buff *skb, struct netlink_callback *cb, 3217 int (*skb_handler)(struct sk_buff *skb, 3218 struct netlink_callback *cb, 3219 struct tipc_sock *tsk)) 3220 { 3221 struct net *net = sock_net(skb->sk); 3222 struct tipc_net *tn = tipc_net(net); 3223 const struct bucket_table *tbl; 3224 u32 prev_portid = cb->args[1]; 3225 u32 tbl_id = cb->args[0]; 3226 struct rhash_head *pos; 3227 struct tipc_sock *tsk; 3228 int err; 3229 3230 rcu_read_lock(); 3231 tbl = rht_dereference_rcu((&tn->sk_rht)->tbl, &tn->sk_rht); 3232 for (; tbl_id < tbl->size; tbl_id++) { 3233 rht_for_each_entry_rcu(tsk, pos, tbl, tbl_id, node) { 3234 spin_lock_bh(&tsk->sk.sk_lock.slock); 3235 if (prev_portid && prev_portid != tsk->portid) { 3236 spin_unlock_bh(&tsk->sk.sk_lock.slock); 3237 continue; 3238 } 3239 3240 err = skb_handler(skb, cb, tsk); 3241 if (err) { 3242 prev_portid = tsk->portid; 3243 spin_unlock_bh(&tsk->sk.sk_lock.slock); 3244 goto out; 3245 } 3246 3247 prev_portid = 0; 3248 spin_unlock_bh(&tsk->sk.sk_lock.slock); 3249 } 3250 } 3251 out: 3252 rcu_read_unlock(); 3253 cb->args[0] = tbl_id; 3254 cb->args[1] = prev_portid; 3255 3256 return skb->len; 3257 } 3258 EXPORT_SYMBOL(tipc_nl_sk_walk); 3259 3260 int tipc_sk_fill_sock_diag(struct sk_buff *skb, struct netlink_callback *cb, 3261 struct tipc_sock *tsk, u32 sk_filter_state, 3262 u64 (*tipc_diag_gen_cookie)(struct sock *sk)) 3263 { 3264 struct sock *sk = &tsk->sk; 3265 struct nlattr *attrs; 3266 struct nlattr *stat; 3267 3268 /*filter response w.r.t sk_state*/ 3269 if (!(sk_filter_state & (1 << sk->sk_state))) 3270 return 0; 3271 3272 attrs = nla_nest_start(skb, TIPC_NLA_SOCK); 3273 if (!attrs) 3274 goto msg_cancel; 3275 3276 if (__tipc_nl_add_sk_info(skb, tsk)) 3277 goto attr_msg_cancel; 3278 3279 if (nla_put_u32(skb, TIPC_NLA_SOCK_TYPE, (u32)sk->sk_type) || 3280 nla_put_u32(skb, TIPC_NLA_SOCK_TIPC_STATE, (u32)sk->sk_state) || 3281 nla_put_u32(skb, TIPC_NLA_SOCK_INO, sock_i_ino(sk)) || 3282 nla_put_u32(skb, TIPC_NLA_SOCK_UID, 3283 from_kuid_munged(sk_user_ns(NETLINK_CB(cb->skb).sk), 3284 sock_i_uid(sk))) || 3285 nla_put_u64_64bit(skb, TIPC_NLA_SOCK_COOKIE, 3286 tipc_diag_gen_cookie(sk), 3287 TIPC_NLA_SOCK_PAD)) 3288 goto attr_msg_cancel; 3289 3290 stat = nla_nest_start(skb, TIPC_NLA_SOCK_STAT); 3291 if (!stat) 3292 goto attr_msg_cancel; 3293 3294 if (nla_put_u32(skb, TIPC_NLA_SOCK_STAT_RCVQ, 3295 skb_queue_len(&sk->sk_receive_queue)) || 3296 nla_put_u32(skb, TIPC_NLA_SOCK_STAT_SENDQ, 3297 skb_queue_len(&sk->sk_write_queue)) || 3298 nla_put_u32(skb, TIPC_NLA_SOCK_STAT_DROP, 3299 atomic_read(&sk->sk_drops))) 3300 goto stat_msg_cancel; 3301 3302 if (tsk->cong_link_cnt && 3303 nla_put_flag(skb, TIPC_NLA_SOCK_STAT_LINK_CONG)) 3304 goto stat_msg_cancel; 3305 3306 if (tsk_conn_cong(tsk) && 3307 nla_put_flag(skb, TIPC_NLA_SOCK_STAT_CONN_CONG)) 3308 goto stat_msg_cancel; 3309 3310 nla_nest_end(skb, stat); 3311 nla_nest_end(skb, attrs); 3312 3313 return 0; 3314 3315 stat_msg_cancel: 3316 nla_nest_cancel(skb, stat); 3317 attr_msg_cancel: 3318 nla_nest_cancel(skb, attrs); 3319 msg_cancel: 3320 return -EMSGSIZE; 3321 } 3322 EXPORT_SYMBOL(tipc_sk_fill_sock_diag); 3323 3324 int tipc_nl_sk_dump(struct sk_buff *skb, struct netlink_callback *cb) 3325 { 3326 return tipc_nl_sk_walk(skb, cb, __tipc_nl_add_sk); 3327 } 3328 3329 /* Caller should hold socket lock for the passed tipc socket. */ 3330 static int __tipc_nl_add_sk_publ(struct sk_buff *skb, 3331 struct netlink_callback *cb, 3332 struct publication *publ) 3333 { 3334 void *hdr; 3335 struct nlattr *attrs; 3336 3337 hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, 3338 &tipc_genl_family, NLM_F_MULTI, TIPC_NL_PUBL_GET); 3339 if (!hdr) 3340 goto msg_cancel; 3341 3342 attrs = nla_nest_start(skb, TIPC_NLA_PUBL); 3343 if (!attrs) 3344 goto genlmsg_cancel; 3345 3346 if (nla_put_u32(skb, TIPC_NLA_PUBL_KEY, publ->key)) 3347 goto attr_msg_cancel; 3348 if (nla_put_u32(skb, TIPC_NLA_PUBL_TYPE, publ->type)) 3349 goto attr_msg_cancel; 3350 if (nla_put_u32(skb, TIPC_NLA_PUBL_LOWER, publ->lower)) 3351 goto attr_msg_cancel; 3352 if (nla_put_u32(skb, TIPC_NLA_PUBL_UPPER, publ->upper)) 3353 goto attr_msg_cancel; 3354 3355 nla_nest_end(skb, attrs); 3356 genlmsg_end(skb, hdr); 3357 3358 return 0; 3359 3360 attr_msg_cancel: 3361 nla_nest_cancel(skb, attrs); 3362 genlmsg_cancel: 3363 genlmsg_cancel(skb, hdr); 3364 msg_cancel: 3365 return -EMSGSIZE; 3366 } 3367 3368 /* Caller should hold socket lock for the passed tipc socket. */ 3369 static int __tipc_nl_list_sk_publ(struct sk_buff *skb, 3370 struct netlink_callback *cb, 3371 struct tipc_sock *tsk, u32 *last_publ) 3372 { 3373 int err; 3374 struct publication *p; 3375 3376 if (*last_publ) { 3377 list_for_each_entry(p, &tsk->publications, binding_sock) { 3378 if (p->key == *last_publ) 3379 break; 3380 } 3381 if (p->key != *last_publ) { 3382 /* We never set seq or call nl_dump_check_consistent() 3383 * this means that setting prev_seq here will cause the 3384 * consistence check to fail in the netlink callback 3385 * handler. Resulting in the last NLMSG_DONE message 3386 * having the NLM_F_DUMP_INTR flag set. 3387 */ 3388 cb->prev_seq = 1; 3389 *last_publ = 0; 3390 return -EPIPE; 3391 } 3392 } else { 3393 p = list_first_entry(&tsk->publications, struct publication, 3394 binding_sock); 3395 } 3396 3397 list_for_each_entry_from(p, &tsk->publications, binding_sock) { 3398 err = __tipc_nl_add_sk_publ(skb, cb, p); 3399 if (err) { 3400 *last_publ = p->key; 3401 return err; 3402 } 3403 } 3404 *last_publ = 0; 3405 3406 return 0; 3407 } 3408 3409 int tipc_nl_publ_dump(struct sk_buff *skb, struct netlink_callback *cb) 3410 { 3411 int err; 3412 u32 tsk_portid = cb->args[0]; 3413 u32 last_publ = cb->args[1]; 3414 u32 done = cb->args[2]; 3415 struct net *net = sock_net(skb->sk); 3416 struct tipc_sock *tsk; 3417 3418 if (!tsk_portid) { 3419 struct nlattr **attrs; 3420 struct nlattr *sock[TIPC_NLA_SOCK_MAX + 1]; 3421 3422 err = tipc_nlmsg_parse(cb->nlh, &attrs); 3423 if (err) 3424 return err; 3425 3426 if (!attrs[TIPC_NLA_SOCK]) 3427 return -EINVAL; 3428 3429 err = nla_parse_nested(sock, TIPC_NLA_SOCK_MAX, 3430 attrs[TIPC_NLA_SOCK], 3431 tipc_nl_sock_policy, NULL); 3432 if (err) 3433 return err; 3434 3435 if (!sock[TIPC_NLA_SOCK_REF]) 3436 return -EINVAL; 3437 3438 tsk_portid = nla_get_u32(sock[TIPC_NLA_SOCK_REF]); 3439 } 3440 3441 if (done) 3442 return 0; 3443 3444 tsk = tipc_sk_lookup(net, tsk_portid); 3445 if (!tsk) 3446 return -EINVAL; 3447 3448 lock_sock(&tsk->sk); 3449 err = __tipc_nl_list_sk_publ(skb, cb, tsk, &last_publ); 3450 if (!err) 3451 done = 1; 3452 release_sock(&tsk->sk); 3453 sock_put(&tsk->sk); 3454 3455 cb->args[0] = tsk_portid; 3456 cb->args[1] = last_publ; 3457 cb->args[2] = done; 3458 3459 return skb->len; 3460 } 3461