1 /* 2 * net/tipc/socket.c: TIPC socket API 3 * 4 * Copyright (c) 2001-2007, 2012-2017, Ericsson AB 5 * Copyright (c) 2004-2008, 2010-2013, Wind River Systems 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the names of the copyright holders nor the names of its 17 * contributors may be used to endorse or promote products derived from 18 * this software without specific prior written permission. 19 * 20 * Alternatively, this software may be distributed under the terms of the 21 * GNU General Public License ("GPL") version 2 as published by the Free 22 * Software Foundation. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 34 * POSSIBILITY OF SUCH DAMAGE. 35 */ 36 37 #include <linux/rhashtable.h> 38 #include <linux/sched/signal.h> 39 40 #include "core.h" 41 #include "name_table.h" 42 #include "node.h" 43 #include "link.h" 44 #include "name_distr.h" 45 #include "socket.h" 46 #include "bcast.h" 47 #include "netlink.h" 48 #include "group.h" 49 50 #define CONN_TIMEOUT_DEFAULT 8000 /* default connect timeout = 8s */ 51 #define CONN_PROBING_INTV msecs_to_jiffies(3600000) /* [ms] => 1 h */ 52 #define TIPC_FWD_MSG 1 53 #define TIPC_MAX_PORT 0xffffffff 54 #define TIPC_MIN_PORT 1 55 #define TIPC_ACK_RATE 4 /* ACK at 1/4 of of rcv window size */ 56 57 enum { 58 TIPC_LISTEN = TCP_LISTEN, 59 TIPC_ESTABLISHED = TCP_ESTABLISHED, 60 TIPC_OPEN = TCP_CLOSE, 61 TIPC_DISCONNECTING = TCP_CLOSE_WAIT, 62 TIPC_CONNECTING = TCP_SYN_SENT, 63 }; 64 65 struct sockaddr_pair { 66 struct sockaddr_tipc sock; 67 struct sockaddr_tipc member; 68 }; 69 70 /** 71 * struct tipc_sock - TIPC socket structure 72 * @sk: socket - interacts with 'port' and with user via the socket API 73 * @conn_type: TIPC type used when connection was established 74 * @conn_instance: TIPC instance used when connection was established 75 * @published: non-zero if port has one or more associated names 76 * @max_pkt: maximum packet size "hint" used when building messages sent by port 77 * @portid: unique port identity in TIPC socket hash table 78 * @phdr: preformatted message header used when sending messages 79 * #cong_links: list of congested links 80 * @publications: list of publications for port 81 * @blocking_link: address of the congested link we are currently sleeping on 82 * @pub_count: total # of publications port has made during its lifetime 83 * @probing_state: 84 * @conn_timeout: the time we can wait for an unresponded setup request 85 * @dupl_rcvcnt: number of bytes counted twice, in both backlog and rcv queue 86 * @cong_link_cnt: number of congested links 87 * @snt_unacked: # messages sent by socket, and not yet acked by peer 88 * @rcv_unacked: # messages read by user, but not yet acked back to peer 89 * @peer: 'connected' peer for dgram/rdm 90 * @node: hash table node 91 * @mc_method: cookie for use between socket and broadcast layer 92 * @rcu: rcu struct for tipc_sock 93 */ 94 struct tipc_sock { 95 struct sock sk; 96 u32 conn_type; 97 u32 conn_instance; 98 int published; 99 u32 max_pkt; 100 u32 portid; 101 struct tipc_msg phdr; 102 struct list_head cong_links; 103 struct list_head publications; 104 u32 pub_count; 105 uint conn_timeout; 106 atomic_t dupl_rcvcnt; 107 bool probe_unacked; 108 u16 cong_link_cnt; 109 u16 snt_unacked; 110 u16 snd_win; 111 u16 peer_caps; 112 u16 rcv_unacked; 113 u16 rcv_win; 114 struct sockaddr_tipc peer; 115 struct rhash_head node; 116 struct tipc_mc_method mc_method; 117 struct rcu_head rcu; 118 struct tipc_group *group; 119 bool group_is_open; 120 }; 121 122 static int tipc_sk_backlog_rcv(struct sock *sk, struct sk_buff *skb); 123 static void tipc_data_ready(struct sock *sk); 124 static void tipc_write_space(struct sock *sk); 125 static void tipc_sock_destruct(struct sock *sk); 126 static int tipc_release(struct socket *sock); 127 static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags, 128 bool kern); 129 static void tipc_sk_timeout(struct timer_list *t); 130 static int tipc_sk_publish(struct tipc_sock *tsk, uint scope, 131 struct tipc_name_seq const *seq); 132 static int tipc_sk_withdraw(struct tipc_sock *tsk, uint scope, 133 struct tipc_name_seq const *seq); 134 static int tipc_sk_leave(struct tipc_sock *tsk); 135 static struct tipc_sock *tipc_sk_lookup(struct net *net, u32 portid); 136 static int tipc_sk_insert(struct tipc_sock *tsk); 137 static void tipc_sk_remove(struct tipc_sock *tsk); 138 static int __tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dsz); 139 static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dsz); 140 141 static const struct proto_ops packet_ops; 142 static const struct proto_ops stream_ops; 143 static const struct proto_ops msg_ops; 144 static struct proto tipc_proto; 145 static const struct rhashtable_params tsk_rht_params; 146 147 static u32 tsk_own_node(struct tipc_sock *tsk) 148 { 149 return msg_prevnode(&tsk->phdr); 150 } 151 152 static u32 tsk_peer_node(struct tipc_sock *tsk) 153 { 154 return msg_destnode(&tsk->phdr); 155 } 156 157 static u32 tsk_peer_port(struct tipc_sock *tsk) 158 { 159 return msg_destport(&tsk->phdr); 160 } 161 162 static bool tsk_unreliable(struct tipc_sock *tsk) 163 { 164 return msg_src_droppable(&tsk->phdr) != 0; 165 } 166 167 static void tsk_set_unreliable(struct tipc_sock *tsk, bool unreliable) 168 { 169 msg_set_src_droppable(&tsk->phdr, unreliable ? 1 : 0); 170 } 171 172 static bool tsk_unreturnable(struct tipc_sock *tsk) 173 { 174 return msg_dest_droppable(&tsk->phdr) != 0; 175 } 176 177 static void tsk_set_unreturnable(struct tipc_sock *tsk, bool unreturnable) 178 { 179 msg_set_dest_droppable(&tsk->phdr, unreturnable ? 1 : 0); 180 } 181 182 static int tsk_importance(struct tipc_sock *tsk) 183 { 184 return msg_importance(&tsk->phdr); 185 } 186 187 static int tsk_set_importance(struct tipc_sock *tsk, int imp) 188 { 189 if (imp > TIPC_CRITICAL_IMPORTANCE) 190 return -EINVAL; 191 msg_set_importance(&tsk->phdr, (u32)imp); 192 return 0; 193 } 194 195 static struct tipc_sock *tipc_sk(const struct sock *sk) 196 { 197 return container_of(sk, struct tipc_sock, sk); 198 } 199 200 static bool tsk_conn_cong(struct tipc_sock *tsk) 201 { 202 return tsk->snt_unacked > tsk->snd_win; 203 } 204 205 static u16 tsk_blocks(int len) 206 { 207 return ((len / FLOWCTL_BLK_SZ) + 1); 208 } 209 210 /* tsk_blocks(): translate a buffer size in bytes to number of 211 * advertisable blocks, taking into account the ratio truesize(len)/len 212 * We can trust that this ratio is always < 4 for len >= FLOWCTL_BLK_SZ 213 */ 214 static u16 tsk_adv_blocks(int len) 215 { 216 return len / FLOWCTL_BLK_SZ / 4; 217 } 218 219 /* tsk_inc(): increment counter for sent or received data 220 * - If block based flow control is not supported by peer we 221 * fall back to message based ditto, incrementing the counter 222 */ 223 static u16 tsk_inc(struct tipc_sock *tsk, int msglen) 224 { 225 if (likely(tsk->peer_caps & TIPC_BLOCK_FLOWCTL)) 226 return ((msglen / FLOWCTL_BLK_SZ) + 1); 227 return 1; 228 } 229 230 /** 231 * tsk_advance_rx_queue - discard first buffer in socket receive queue 232 * 233 * Caller must hold socket lock 234 */ 235 static void tsk_advance_rx_queue(struct sock *sk) 236 { 237 kfree_skb(__skb_dequeue(&sk->sk_receive_queue)); 238 } 239 240 /* tipc_sk_respond() : send response message back to sender 241 */ 242 static void tipc_sk_respond(struct sock *sk, struct sk_buff *skb, int err) 243 { 244 u32 selector; 245 u32 dnode; 246 u32 onode = tipc_own_addr(sock_net(sk)); 247 248 if (!tipc_msg_reverse(onode, &skb, err)) 249 return; 250 251 dnode = msg_destnode(buf_msg(skb)); 252 selector = msg_origport(buf_msg(skb)); 253 tipc_node_xmit_skb(sock_net(sk), skb, dnode, selector); 254 } 255 256 /** 257 * tsk_rej_rx_queue - reject all buffers in socket receive queue 258 * 259 * Caller must hold socket lock 260 */ 261 static void tsk_rej_rx_queue(struct sock *sk) 262 { 263 struct sk_buff *skb; 264 265 while ((skb = __skb_dequeue(&sk->sk_receive_queue))) 266 tipc_sk_respond(sk, skb, TIPC_ERR_NO_PORT); 267 } 268 269 static bool tipc_sk_connected(struct sock *sk) 270 { 271 return sk->sk_state == TIPC_ESTABLISHED; 272 } 273 274 /* tipc_sk_type_connectionless - check if the socket is datagram socket 275 * @sk: socket 276 * 277 * Returns true if connection less, false otherwise 278 */ 279 static bool tipc_sk_type_connectionless(struct sock *sk) 280 { 281 return sk->sk_type == SOCK_RDM || sk->sk_type == SOCK_DGRAM; 282 } 283 284 /* tsk_peer_msg - verify if message was sent by connected port's peer 285 * 286 * Handles cases where the node's network address has changed from 287 * the default of <0.0.0> to its configured setting. 288 */ 289 static bool tsk_peer_msg(struct tipc_sock *tsk, struct tipc_msg *msg) 290 { 291 struct sock *sk = &tsk->sk; 292 u32 self = tipc_own_addr(sock_net(sk)); 293 u32 peer_port = tsk_peer_port(tsk); 294 u32 orig_node, peer_node; 295 296 if (unlikely(!tipc_sk_connected(sk))) 297 return false; 298 299 if (unlikely(msg_origport(msg) != peer_port)) 300 return false; 301 302 orig_node = msg_orignode(msg); 303 peer_node = tsk_peer_node(tsk); 304 305 if (likely(orig_node == peer_node)) 306 return true; 307 308 if (!orig_node && peer_node == self) 309 return true; 310 311 if (!peer_node && orig_node == self) 312 return true; 313 314 return false; 315 } 316 317 /* tipc_set_sk_state - set the sk_state of the socket 318 * @sk: socket 319 * 320 * Caller must hold socket lock 321 * 322 * Returns 0 on success, errno otherwise 323 */ 324 static int tipc_set_sk_state(struct sock *sk, int state) 325 { 326 int oldsk_state = sk->sk_state; 327 int res = -EINVAL; 328 329 switch (state) { 330 case TIPC_OPEN: 331 res = 0; 332 break; 333 case TIPC_LISTEN: 334 case TIPC_CONNECTING: 335 if (oldsk_state == TIPC_OPEN) 336 res = 0; 337 break; 338 case TIPC_ESTABLISHED: 339 if (oldsk_state == TIPC_CONNECTING || 340 oldsk_state == TIPC_OPEN) 341 res = 0; 342 break; 343 case TIPC_DISCONNECTING: 344 if (oldsk_state == TIPC_CONNECTING || 345 oldsk_state == TIPC_ESTABLISHED) 346 res = 0; 347 break; 348 } 349 350 if (!res) 351 sk->sk_state = state; 352 353 return res; 354 } 355 356 static int tipc_sk_sock_err(struct socket *sock, long *timeout) 357 { 358 struct sock *sk = sock->sk; 359 int err = sock_error(sk); 360 int typ = sock->type; 361 362 if (err) 363 return err; 364 if (typ == SOCK_STREAM || typ == SOCK_SEQPACKET) { 365 if (sk->sk_state == TIPC_DISCONNECTING) 366 return -EPIPE; 367 else if (!tipc_sk_connected(sk)) 368 return -ENOTCONN; 369 } 370 if (!*timeout) 371 return -EAGAIN; 372 if (signal_pending(current)) 373 return sock_intr_errno(*timeout); 374 375 return 0; 376 } 377 378 #define tipc_wait_for_cond(sock_, timeo_, condition_) \ 379 ({ \ 380 struct sock *sk_; \ 381 int rc_; \ 382 \ 383 while ((rc_ = !(condition_))) { \ 384 DEFINE_WAIT_FUNC(wait_, woken_wake_function); \ 385 sk_ = (sock_)->sk; \ 386 rc_ = tipc_sk_sock_err((sock_), timeo_); \ 387 if (rc_) \ 388 break; \ 389 prepare_to_wait(sk_sleep(sk_), &wait_, TASK_INTERRUPTIBLE); \ 390 release_sock(sk_); \ 391 *(timeo_) = wait_woken(&wait_, TASK_INTERRUPTIBLE, *(timeo_)); \ 392 sched_annotate_sleep(); \ 393 lock_sock(sk_); \ 394 remove_wait_queue(sk_sleep(sk_), &wait_); \ 395 } \ 396 rc_; \ 397 }) 398 399 /** 400 * tipc_sk_create - create a TIPC socket 401 * @net: network namespace (must be default network) 402 * @sock: pre-allocated socket structure 403 * @protocol: protocol indicator (must be 0) 404 * @kern: caused by kernel or by userspace? 405 * 406 * This routine creates additional data structures used by the TIPC socket, 407 * initializes them, and links them together. 408 * 409 * Returns 0 on success, errno otherwise 410 */ 411 static int tipc_sk_create(struct net *net, struct socket *sock, 412 int protocol, int kern) 413 { 414 struct tipc_net *tn; 415 const struct proto_ops *ops; 416 struct sock *sk; 417 struct tipc_sock *tsk; 418 struct tipc_msg *msg; 419 420 /* Validate arguments */ 421 if (unlikely(protocol != 0)) 422 return -EPROTONOSUPPORT; 423 424 switch (sock->type) { 425 case SOCK_STREAM: 426 ops = &stream_ops; 427 break; 428 case SOCK_SEQPACKET: 429 ops = &packet_ops; 430 break; 431 case SOCK_DGRAM: 432 case SOCK_RDM: 433 ops = &msg_ops; 434 break; 435 default: 436 return -EPROTOTYPE; 437 } 438 439 /* Allocate socket's protocol area */ 440 sk = sk_alloc(net, AF_TIPC, GFP_KERNEL, &tipc_proto, kern); 441 if (sk == NULL) 442 return -ENOMEM; 443 444 tsk = tipc_sk(sk); 445 tsk->max_pkt = MAX_PKT_DEFAULT; 446 INIT_LIST_HEAD(&tsk->publications); 447 INIT_LIST_HEAD(&tsk->cong_links); 448 msg = &tsk->phdr; 449 tn = net_generic(sock_net(sk), tipc_net_id); 450 451 /* Finish initializing socket data structures */ 452 sock->ops = ops; 453 sock_init_data(sock, sk); 454 tipc_set_sk_state(sk, TIPC_OPEN); 455 if (tipc_sk_insert(tsk)) { 456 pr_warn("Socket create failed; port number exhausted\n"); 457 return -EINVAL; 458 } 459 460 /* Ensure tsk is visible before we read own_addr. */ 461 smp_mb(); 462 463 tipc_msg_init(tipc_own_addr(net), msg, TIPC_LOW_IMPORTANCE, 464 TIPC_NAMED_MSG, NAMED_H_SIZE, 0); 465 466 msg_set_origport(msg, tsk->portid); 467 timer_setup(&sk->sk_timer, tipc_sk_timeout, 0); 468 sk->sk_shutdown = 0; 469 sk->sk_backlog_rcv = tipc_sk_backlog_rcv; 470 sk->sk_rcvbuf = sysctl_tipc_rmem[1]; 471 sk->sk_data_ready = tipc_data_ready; 472 sk->sk_write_space = tipc_write_space; 473 sk->sk_destruct = tipc_sock_destruct; 474 tsk->conn_timeout = CONN_TIMEOUT_DEFAULT; 475 tsk->group_is_open = true; 476 atomic_set(&tsk->dupl_rcvcnt, 0); 477 478 /* Start out with safe limits until we receive an advertised window */ 479 tsk->snd_win = tsk_adv_blocks(RCVBUF_MIN); 480 tsk->rcv_win = tsk->snd_win; 481 482 if (tipc_sk_type_connectionless(sk)) { 483 tsk_set_unreturnable(tsk, true); 484 if (sock->type == SOCK_DGRAM) 485 tsk_set_unreliable(tsk, true); 486 } 487 488 return 0; 489 } 490 491 static void tipc_sk_callback(struct rcu_head *head) 492 { 493 struct tipc_sock *tsk = container_of(head, struct tipc_sock, rcu); 494 495 sock_put(&tsk->sk); 496 } 497 498 /* Caller should hold socket lock for the socket. */ 499 static void __tipc_shutdown(struct socket *sock, int error) 500 { 501 struct sock *sk = sock->sk; 502 struct tipc_sock *tsk = tipc_sk(sk); 503 struct net *net = sock_net(sk); 504 long timeout = CONN_TIMEOUT_DEFAULT; 505 u32 dnode = tsk_peer_node(tsk); 506 struct sk_buff *skb; 507 508 /* Avoid that hi-prio shutdown msgs bypass msgs in link wakeup queue */ 509 tipc_wait_for_cond(sock, &timeout, (!tsk->cong_link_cnt && 510 !tsk_conn_cong(tsk))); 511 512 /* Reject all unreceived messages, except on an active connection 513 * (which disconnects locally & sends a 'FIN+' to peer). 514 */ 515 while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) { 516 if (TIPC_SKB_CB(skb)->bytes_read) { 517 kfree_skb(skb); 518 continue; 519 } 520 if (!tipc_sk_type_connectionless(sk) && 521 sk->sk_state != TIPC_DISCONNECTING) { 522 tipc_set_sk_state(sk, TIPC_DISCONNECTING); 523 tipc_node_remove_conn(net, dnode, tsk->portid); 524 } 525 tipc_sk_respond(sk, skb, error); 526 } 527 528 if (tipc_sk_type_connectionless(sk)) 529 return; 530 531 if (sk->sk_state != TIPC_DISCONNECTING) { 532 skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE, 533 TIPC_CONN_MSG, SHORT_H_SIZE, 0, dnode, 534 tsk_own_node(tsk), tsk_peer_port(tsk), 535 tsk->portid, error); 536 if (skb) 537 tipc_node_xmit_skb(net, skb, dnode, tsk->portid); 538 tipc_node_remove_conn(net, dnode, tsk->portid); 539 tipc_set_sk_state(sk, TIPC_DISCONNECTING); 540 } 541 } 542 543 /** 544 * tipc_release - destroy a TIPC socket 545 * @sock: socket to destroy 546 * 547 * This routine cleans up any messages that are still queued on the socket. 548 * For DGRAM and RDM socket types, all queued messages are rejected. 549 * For SEQPACKET and STREAM socket types, the first message is rejected 550 * and any others are discarded. (If the first message on a STREAM socket 551 * is partially-read, it is discarded and the next one is rejected instead.) 552 * 553 * NOTE: Rejected messages are not necessarily returned to the sender! They 554 * are returned or discarded according to the "destination droppable" setting 555 * specified for the message by the sender. 556 * 557 * Returns 0 on success, errno otherwise 558 */ 559 static int tipc_release(struct socket *sock) 560 { 561 struct sock *sk = sock->sk; 562 struct tipc_sock *tsk; 563 564 /* 565 * Exit if socket isn't fully initialized (occurs when a failed accept() 566 * releases a pre-allocated child socket that was never used) 567 */ 568 if (sk == NULL) 569 return 0; 570 571 tsk = tipc_sk(sk); 572 lock_sock(sk); 573 574 __tipc_shutdown(sock, TIPC_ERR_NO_PORT); 575 sk->sk_shutdown = SHUTDOWN_MASK; 576 tipc_sk_leave(tsk); 577 tipc_sk_withdraw(tsk, 0, NULL); 578 sk_stop_timer(sk, &sk->sk_timer); 579 tipc_sk_remove(tsk); 580 581 /* Reject any messages that accumulated in backlog queue */ 582 release_sock(sk); 583 tipc_dest_list_purge(&tsk->cong_links); 584 tsk->cong_link_cnt = 0; 585 call_rcu(&tsk->rcu, tipc_sk_callback); 586 sock->sk = NULL; 587 588 return 0; 589 } 590 591 /** 592 * tipc_bind - associate or disassocate TIPC name(s) with a socket 593 * @sock: socket structure 594 * @uaddr: socket address describing name(s) and desired operation 595 * @uaddr_len: size of socket address data structure 596 * 597 * Name and name sequence binding is indicated using a positive scope value; 598 * a negative scope value unbinds the specified name. Specifying no name 599 * (i.e. a socket address length of 0) unbinds all names from the socket. 600 * 601 * Returns 0 on success, errno otherwise 602 * 603 * NOTE: This routine doesn't need to take the socket lock since it doesn't 604 * access any non-constant socket information. 605 */ 606 static int tipc_bind(struct socket *sock, struct sockaddr *uaddr, 607 int uaddr_len) 608 { 609 struct sock *sk = sock->sk; 610 struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr; 611 struct tipc_sock *tsk = tipc_sk(sk); 612 int res = -EINVAL; 613 614 lock_sock(sk); 615 if (unlikely(!uaddr_len)) { 616 res = tipc_sk_withdraw(tsk, 0, NULL); 617 goto exit; 618 } 619 if (tsk->group) { 620 res = -EACCES; 621 goto exit; 622 } 623 if (uaddr_len < sizeof(struct sockaddr_tipc)) { 624 res = -EINVAL; 625 goto exit; 626 } 627 if (addr->family != AF_TIPC) { 628 res = -EAFNOSUPPORT; 629 goto exit; 630 } 631 632 if (addr->addrtype == TIPC_ADDR_NAME) 633 addr->addr.nameseq.upper = addr->addr.nameseq.lower; 634 else if (addr->addrtype != TIPC_ADDR_NAMESEQ) { 635 res = -EAFNOSUPPORT; 636 goto exit; 637 } 638 639 if ((addr->addr.nameseq.type < TIPC_RESERVED_TYPES) && 640 (addr->addr.nameseq.type != TIPC_TOP_SRV) && 641 (addr->addr.nameseq.type != TIPC_CFG_SRV)) { 642 res = -EACCES; 643 goto exit; 644 } 645 646 res = (addr->scope >= 0) ? 647 tipc_sk_publish(tsk, addr->scope, &addr->addr.nameseq) : 648 tipc_sk_withdraw(tsk, -addr->scope, &addr->addr.nameseq); 649 exit: 650 release_sock(sk); 651 return res; 652 } 653 654 /** 655 * tipc_getname - get port ID of socket or peer socket 656 * @sock: socket structure 657 * @uaddr: area for returned socket address 658 * @uaddr_len: area for returned length of socket address 659 * @peer: 0 = own ID, 1 = current peer ID, 2 = current/former peer ID 660 * 661 * Returns 0 on success, errno otherwise 662 * 663 * NOTE: This routine doesn't need to take the socket lock since it only 664 * accesses socket information that is unchanging (or which changes in 665 * a completely predictable manner). 666 */ 667 static int tipc_getname(struct socket *sock, struct sockaddr *uaddr, 668 int peer) 669 { 670 struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr; 671 struct sock *sk = sock->sk; 672 struct tipc_sock *tsk = tipc_sk(sk); 673 674 memset(addr, 0, sizeof(*addr)); 675 if (peer) { 676 if ((!tipc_sk_connected(sk)) && 677 ((peer != 2) || (sk->sk_state != TIPC_DISCONNECTING))) 678 return -ENOTCONN; 679 addr->addr.id.ref = tsk_peer_port(tsk); 680 addr->addr.id.node = tsk_peer_node(tsk); 681 } else { 682 addr->addr.id.ref = tsk->portid; 683 addr->addr.id.node = tipc_own_addr(sock_net(sk)); 684 } 685 686 addr->addrtype = TIPC_ADDR_ID; 687 addr->family = AF_TIPC; 688 addr->scope = 0; 689 addr->addr.name.domain = 0; 690 691 return sizeof(*addr); 692 } 693 694 /** 695 * tipc_poll - read pollmask 696 * @file: file structure associated with the socket 697 * @sock: socket for which to calculate the poll bits 698 * 699 * Returns pollmask value 700 * 701 * COMMENTARY: 702 * It appears that the usual socket locking mechanisms are not useful here 703 * since the pollmask info is potentially out-of-date the moment this routine 704 * exits. TCP and other protocols seem to rely on higher level poll routines 705 * to handle any preventable race conditions, so TIPC will do the same ... 706 * 707 * IMPORTANT: The fact that a read or write operation is indicated does NOT 708 * imply that the operation will succeed, merely that it should be performed 709 * and will not block. 710 */ 711 static __poll_t tipc_poll_mask(struct socket *sock, __poll_t events) 712 { 713 struct sock *sk = sock->sk; 714 struct tipc_sock *tsk = tipc_sk(sk); 715 __poll_t revents = 0; 716 717 if (sk->sk_shutdown & RCV_SHUTDOWN) 718 revents |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM; 719 if (sk->sk_shutdown == SHUTDOWN_MASK) 720 revents |= EPOLLHUP; 721 722 switch (sk->sk_state) { 723 case TIPC_ESTABLISHED: 724 case TIPC_CONNECTING: 725 if (!tsk->cong_link_cnt && !tsk_conn_cong(tsk)) 726 revents |= EPOLLOUT; 727 /* fall thru' */ 728 case TIPC_LISTEN: 729 if (!skb_queue_empty(&sk->sk_receive_queue)) 730 revents |= EPOLLIN | EPOLLRDNORM; 731 break; 732 case TIPC_OPEN: 733 if (tsk->group_is_open && !tsk->cong_link_cnt) 734 revents |= EPOLLOUT; 735 if (!tipc_sk_type_connectionless(sk)) 736 break; 737 if (skb_queue_empty(&sk->sk_receive_queue)) 738 break; 739 revents |= EPOLLIN | EPOLLRDNORM; 740 break; 741 case TIPC_DISCONNECTING: 742 revents = EPOLLIN | EPOLLRDNORM | EPOLLHUP; 743 break; 744 } 745 return revents; 746 } 747 748 /** 749 * tipc_sendmcast - send multicast message 750 * @sock: socket structure 751 * @seq: destination address 752 * @msg: message to send 753 * @dlen: length of data to send 754 * @timeout: timeout to wait for wakeup 755 * 756 * Called from function tipc_sendmsg(), which has done all sanity checks 757 * Returns the number of bytes sent on success, or errno 758 */ 759 static int tipc_sendmcast(struct socket *sock, struct tipc_name_seq *seq, 760 struct msghdr *msg, size_t dlen, long timeout) 761 { 762 struct sock *sk = sock->sk; 763 struct tipc_sock *tsk = tipc_sk(sk); 764 struct tipc_msg *hdr = &tsk->phdr; 765 struct net *net = sock_net(sk); 766 int mtu = tipc_bcast_get_mtu(net); 767 struct tipc_mc_method *method = &tsk->mc_method; 768 struct sk_buff_head pkts; 769 struct tipc_nlist dsts; 770 int rc; 771 772 if (tsk->group) 773 return -EACCES; 774 775 /* Block or return if any destination link is congested */ 776 rc = tipc_wait_for_cond(sock, &timeout, !tsk->cong_link_cnt); 777 if (unlikely(rc)) 778 return rc; 779 780 /* Lookup destination nodes */ 781 tipc_nlist_init(&dsts, tipc_own_addr(net)); 782 tipc_nametbl_lookup_dst_nodes(net, seq->type, seq->lower, 783 seq->upper, &dsts); 784 if (!dsts.local && !dsts.remote) 785 return -EHOSTUNREACH; 786 787 /* Build message header */ 788 msg_set_type(hdr, TIPC_MCAST_MSG); 789 msg_set_hdr_sz(hdr, MCAST_H_SIZE); 790 msg_set_lookup_scope(hdr, TIPC_CLUSTER_SCOPE); 791 msg_set_destport(hdr, 0); 792 msg_set_destnode(hdr, 0); 793 msg_set_nametype(hdr, seq->type); 794 msg_set_namelower(hdr, seq->lower); 795 msg_set_nameupper(hdr, seq->upper); 796 797 /* Build message as chain of buffers */ 798 skb_queue_head_init(&pkts); 799 rc = tipc_msg_build(hdr, msg, 0, dlen, mtu, &pkts); 800 801 /* Send message if build was successful */ 802 if (unlikely(rc == dlen)) 803 rc = tipc_mcast_xmit(net, &pkts, method, &dsts, 804 &tsk->cong_link_cnt); 805 806 tipc_nlist_purge(&dsts); 807 808 return rc ? rc : dlen; 809 } 810 811 /** 812 * tipc_send_group_msg - send a message to a member in the group 813 * @net: network namespace 814 * @m: message to send 815 * @mb: group member 816 * @dnode: destination node 817 * @dport: destination port 818 * @dlen: total length of message data 819 */ 820 static int tipc_send_group_msg(struct net *net, struct tipc_sock *tsk, 821 struct msghdr *m, struct tipc_member *mb, 822 u32 dnode, u32 dport, int dlen) 823 { 824 u16 bc_snd_nxt = tipc_group_bc_snd_nxt(tsk->group); 825 struct tipc_mc_method *method = &tsk->mc_method; 826 int blks = tsk_blocks(GROUP_H_SIZE + dlen); 827 struct tipc_msg *hdr = &tsk->phdr; 828 struct sk_buff_head pkts; 829 int mtu, rc; 830 831 /* Complete message header */ 832 msg_set_type(hdr, TIPC_GRP_UCAST_MSG); 833 msg_set_hdr_sz(hdr, GROUP_H_SIZE); 834 msg_set_destport(hdr, dport); 835 msg_set_destnode(hdr, dnode); 836 msg_set_grp_bc_seqno(hdr, bc_snd_nxt); 837 838 /* Build message as chain of buffers */ 839 skb_queue_head_init(&pkts); 840 mtu = tipc_node_get_mtu(net, dnode, tsk->portid); 841 rc = tipc_msg_build(hdr, m, 0, dlen, mtu, &pkts); 842 if (unlikely(rc != dlen)) 843 return rc; 844 845 /* Send message */ 846 rc = tipc_node_xmit(net, &pkts, dnode, tsk->portid); 847 if (unlikely(rc == -ELINKCONG)) { 848 tipc_dest_push(&tsk->cong_links, dnode, 0); 849 tsk->cong_link_cnt++; 850 } 851 852 /* Update send window */ 853 tipc_group_update_member(mb, blks); 854 855 /* A broadcast sent within next EXPIRE period must follow same path */ 856 method->rcast = true; 857 method->mandatory = true; 858 return dlen; 859 } 860 861 /** 862 * tipc_send_group_unicast - send message to a member in the group 863 * @sock: socket structure 864 * @m: message to send 865 * @dlen: total length of message data 866 * @timeout: timeout to wait for wakeup 867 * 868 * Called from function tipc_sendmsg(), which has done all sanity checks 869 * Returns the number of bytes sent on success, or errno 870 */ 871 static int tipc_send_group_unicast(struct socket *sock, struct msghdr *m, 872 int dlen, long timeout) 873 { 874 struct sock *sk = sock->sk; 875 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name); 876 int blks = tsk_blocks(GROUP_H_SIZE + dlen); 877 struct tipc_sock *tsk = tipc_sk(sk); 878 struct tipc_group *grp = tsk->group; 879 struct net *net = sock_net(sk); 880 struct tipc_member *mb = NULL; 881 u32 node, port; 882 int rc; 883 884 node = dest->addr.id.node; 885 port = dest->addr.id.ref; 886 if (!port && !node) 887 return -EHOSTUNREACH; 888 889 /* Block or return if destination link or member is congested */ 890 rc = tipc_wait_for_cond(sock, &timeout, 891 !tipc_dest_find(&tsk->cong_links, node, 0) && 892 !tipc_group_cong(grp, node, port, blks, &mb)); 893 if (unlikely(rc)) 894 return rc; 895 896 if (unlikely(!mb)) 897 return -EHOSTUNREACH; 898 899 rc = tipc_send_group_msg(net, tsk, m, mb, node, port, dlen); 900 901 return rc ? rc : dlen; 902 } 903 904 /** 905 * tipc_send_group_anycast - send message to any member with given identity 906 * @sock: socket structure 907 * @m: message to send 908 * @dlen: total length of message data 909 * @timeout: timeout to wait for wakeup 910 * 911 * Called from function tipc_sendmsg(), which has done all sanity checks 912 * Returns the number of bytes sent on success, or errno 913 */ 914 static int tipc_send_group_anycast(struct socket *sock, struct msghdr *m, 915 int dlen, long timeout) 916 { 917 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name); 918 struct sock *sk = sock->sk; 919 struct tipc_sock *tsk = tipc_sk(sk); 920 struct list_head *cong_links = &tsk->cong_links; 921 int blks = tsk_blocks(GROUP_H_SIZE + dlen); 922 struct tipc_group *grp = tsk->group; 923 struct tipc_msg *hdr = &tsk->phdr; 924 struct tipc_member *first = NULL; 925 struct tipc_member *mbr = NULL; 926 struct net *net = sock_net(sk); 927 u32 node, port, exclude; 928 struct list_head dsts; 929 u32 type, inst, scope; 930 int lookups = 0; 931 int dstcnt, rc; 932 bool cong; 933 934 INIT_LIST_HEAD(&dsts); 935 936 type = msg_nametype(hdr); 937 inst = dest->addr.name.name.instance; 938 scope = msg_lookup_scope(hdr); 939 exclude = tipc_group_exclude(grp); 940 941 while (++lookups < 4) { 942 first = NULL; 943 944 /* Look for a non-congested destination member, if any */ 945 while (1) { 946 if (!tipc_nametbl_lookup(net, type, inst, scope, &dsts, 947 &dstcnt, exclude, false)) 948 return -EHOSTUNREACH; 949 tipc_dest_pop(&dsts, &node, &port); 950 cong = tipc_group_cong(grp, node, port, blks, &mbr); 951 if (!cong) 952 break; 953 if (mbr == first) 954 break; 955 if (!first) 956 first = mbr; 957 } 958 959 /* Start over if destination was not in member list */ 960 if (unlikely(!mbr)) 961 continue; 962 963 if (likely(!cong && !tipc_dest_find(cong_links, node, 0))) 964 break; 965 966 /* Block or return if destination link or member is congested */ 967 rc = tipc_wait_for_cond(sock, &timeout, 968 !tipc_dest_find(cong_links, node, 0) && 969 !tipc_group_cong(grp, node, port, 970 blks, &mbr)); 971 if (unlikely(rc)) 972 return rc; 973 974 /* Send, unless destination disappeared while waiting */ 975 if (likely(mbr)) 976 break; 977 } 978 979 if (unlikely(lookups >= 4)) 980 return -EHOSTUNREACH; 981 982 rc = tipc_send_group_msg(net, tsk, m, mbr, node, port, dlen); 983 984 return rc ? rc : dlen; 985 } 986 987 /** 988 * tipc_send_group_bcast - send message to all members in communication group 989 * @sk: socket structure 990 * @m: message to send 991 * @dlen: total length of message data 992 * @timeout: timeout to wait for wakeup 993 * 994 * Called from function tipc_sendmsg(), which has done all sanity checks 995 * Returns the number of bytes sent on success, or errno 996 */ 997 static int tipc_send_group_bcast(struct socket *sock, struct msghdr *m, 998 int dlen, long timeout) 999 { 1000 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name); 1001 struct sock *sk = sock->sk; 1002 struct net *net = sock_net(sk); 1003 struct tipc_sock *tsk = tipc_sk(sk); 1004 struct tipc_group *grp = tsk->group; 1005 struct tipc_nlist *dsts = tipc_group_dests(grp); 1006 struct tipc_mc_method *method = &tsk->mc_method; 1007 bool ack = method->mandatory && method->rcast; 1008 int blks = tsk_blocks(MCAST_H_SIZE + dlen); 1009 struct tipc_msg *hdr = &tsk->phdr; 1010 int mtu = tipc_bcast_get_mtu(net); 1011 struct sk_buff_head pkts; 1012 int rc = -EHOSTUNREACH; 1013 1014 if (!dsts->local && !dsts->remote) 1015 return -EHOSTUNREACH; 1016 1017 /* Block or return if any destination link or member is congested */ 1018 rc = tipc_wait_for_cond(sock, &timeout, !tsk->cong_link_cnt && 1019 !tipc_group_bc_cong(grp, blks)); 1020 if (unlikely(rc)) 1021 return rc; 1022 1023 /* Complete message header */ 1024 if (dest) { 1025 msg_set_type(hdr, TIPC_GRP_MCAST_MSG); 1026 msg_set_nameinst(hdr, dest->addr.name.name.instance); 1027 } else { 1028 msg_set_type(hdr, TIPC_GRP_BCAST_MSG); 1029 msg_set_nameinst(hdr, 0); 1030 } 1031 msg_set_hdr_sz(hdr, GROUP_H_SIZE); 1032 msg_set_destport(hdr, 0); 1033 msg_set_destnode(hdr, 0); 1034 msg_set_grp_bc_seqno(hdr, tipc_group_bc_snd_nxt(grp)); 1035 1036 /* Avoid getting stuck with repeated forced replicasts */ 1037 msg_set_grp_bc_ack_req(hdr, ack); 1038 1039 /* Build message as chain of buffers */ 1040 skb_queue_head_init(&pkts); 1041 rc = tipc_msg_build(hdr, m, 0, dlen, mtu, &pkts); 1042 if (unlikely(rc != dlen)) 1043 return rc; 1044 1045 /* Send message */ 1046 rc = tipc_mcast_xmit(net, &pkts, method, dsts, &tsk->cong_link_cnt); 1047 if (unlikely(rc)) 1048 return rc; 1049 1050 /* Update broadcast sequence number and send windows */ 1051 tipc_group_update_bc_members(tsk->group, blks, ack); 1052 1053 /* Broadcast link is now free to choose method for next broadcast */ 1054 method->mandatory = false; 1055 method->expires = jiffies; 1056 1057 return dlen; 1058 } 1059 1060 /** 1061 * tipc_send_group_mcast - send message to all members with given identity 1062 * @sock: socket structure 1063 * @m: message to send 1064 * @dlen: total length of message data 1065 * @timeout: timeout to wait for wakeup 1066 * 1067 * Called from function tipc_sendmsg(), which has done all sanity checks 1068 * Returns the number of bytes sent on success, or errno 1069 */ 1070 static int tipc_send_group_mcast(struct socket *sock, struct msghdr *m, 1071 int dlen, long timeout) 1072 { 1073 struct sock *sk = sock->sk; 1074 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name); 1075 struct tipc_sock *tsk = tipc_sk(sk); 1076 struct tipc_group *grp = tsk->group; 1077 struct tipc_msg *hdr = &tsk->phdr; 1078 struct net *net = sock_net(sk); 1079 u32 type, inst, scope, exclude; 1080 struct list_head dsts; 1081 u32 dstcnt; 1082 1083 INIT_LIST_HEAD(&dsts); 1084 1085 type = msg_nametype(hdr); 1086 inst = dest->addr.name.name.instance; 1087 scope = msg_lookup_scope(hdr); 1088 exclude = tipc_group_exclude(grp); 1089 1090 if (!tipc_nametbl_lookup(net, type, inst, scope, &dsts, 1091 &dstcnt, exclude, true)) 1092 return -EHOSTUNREACH; 1093 1094 if (dstcnt == 1) { 1095 tipc_dest_pop(&dsts, &dest->addr.id.node, &dest->addr.id.ref); 1096 return tipc_send_group_unicast(sock, m, dlen, timeout); 1097 } 1098 1099 tipc_dest_list_purge(&dsts); 1100 return tipc_send_group_bcast(sock, m, dlen, timeout); 1101 } 1102 1103 /** 1104 * tipc_sk_mcast_rcv - Deliver multicast messages to all destination sockets 1105 * @arrvq: queue with arriving messages, to be cloned after destination lookup 1106 * @inputq: queue with cloned messages, delivered to socket after dest lookup 1107 * 1108 * Multi-threaded: parallel calls with reference to same queues may occur 1109 */ 1110 void tipc_sk_mcast_rcv(struct net *net, struct sk_buff_head *arrvq, 1111 struct sk_buff_head *inputq) 1112 { 1113 u32 self = tipc_own_addr(net); 1114 u32 type, lower, upper, scope; 1115 struct sk_buff *skb, *_skb; 1116 u32 portid, oport, onode; 1117 struct sk_buff_head tmpq; 1118 struct list_head dports; 1119 struct tipc_msg *hdr; 1120 int user, mtyp, hlen; 1121 bool exact; 1122 1123 __skb_queue_head_init(&tmpq); 1124 INIT_LIST_HEAD(&dports); 1125 1126 skb = tipc_skb_peek(arrvq, &inputq->lock); 1127 for (; skb; skb = tipc_skb_peek(arrvq, &inputq->lock)) { 1128 hdr = buf_msg(skb); 1129 user = msg_user(hdr); 1130 mtyp = msg_type(hdr); 1131 hlen = skb_headroom(skb) + msg_hdr_sz(hdr); 1132 oport = msg_origport(hdr); 1133 onode = msg_orignode(hdr); 1134 type = msg_nametype(hdr); 1135 1136 if (mtyp == TIPC_GRP_UCAST_MSG || user == GROUP_PROTOCOL) { 1137 spin_lock_bh(&inputq->lock); 1138 if (skb_peek(arrvq) == skb) { 1139 __skb_dequeue(arrvq); 1140 __skb_queue_tail(inputq, skb); 1141 } 1142 kfree_skb(skb); 1143 spin_unlock_bh(&inputq->lock); 1144 continue; 1145 } 1146 1147 /* Group messages require exact scope match */ 1148 if (msg_in_group(hdr)) { 1149 lower = 0; 1150 upper = ~0; 1151 scope = msg_lookup_scope(hdr); 1152 exact = true; 1153 } else { 1154 /* TIPC_NODE_SCOPE means "any scope" in this context */ 1155 if (onode == self) 1156 scope = TIPC_NODE_SCOPE; 1157 else 1158 scope = TIPC_CLUSTER_SCOPE; 1159 exact = false; 1160 lower = msg_namelower(hdr); 1161 upper = msg_nameupper(hdr); 1162 } 1163 1164 /* Create destination port list: */ 1165 tipc_nametbl_mc_lookup(net, type, lower, upper, 1166 scope, exact, &dports); 1167 1168 /* Clone message per destination */ 1169 while (tipc_dest_pop(&dports, NULL, &portid)) { 1170 _skb = __pskb_copy(skb, hlen, GFP_ATOMIC); 1171 if (_skb) { 1172 msg_set_destport(buf_msg(_skb), portid); 1173 __skb_queue_tail(&tmpq, _skb); 1174 continue; 1175 } 1176 pr_warn("Failed to clone mcast rcv buffer\n"); 1177 } 1178 /* Append to inputq if not already done by other thread */ 1179 spin_lock_bh(&inputq->lock); 1180 if (skb_peek(arrvq) == skb) { 1181 skb_queue_splice_tail_init(&tmpq, inputq); 1182 kfree_skb(__skb_dequeue(arrvq)); 1183 } 1184 spin_unlock_bh(&inputq->lock); 1185 __skb_queue_purge(&tmpq); 1186 kfree_skb(skb); 1187 } 1188 tipc_sk_rcv(net, inputq); 1189 } 1190 1191 /** 1192 * tipc_sk_conn_proto_rcv - receive a connection mng protocol message 1193 * @tsk: receiving socket 1194 * @skb: pointer to message buffer. 1195 */ 1196 static void tipc_sk_conn_proto_rcv(struct tipc_sock *tsk, struct sk_buff *skb, 1197 struct sk_buff_head *xmitq) 1198 { 1199 struct tipc_msg *hdr = buf_msg(skb); 1200 u32 onode = tsk_own_node(tsk); 1201 struct sock *sk = &tsk->sk; 1202 int mtyp = msg_type(hdr); 1203 bool conn_cong; 1204 1205 /* Ignore if connection cannot be validated: */ 1206 if (!tsk_peer_msg(tsk, hdr)) 1207 goto exit; 1208 1209 if (unlikely(msg_errcode(hdr))) { 1210 tipc_set_sk_state(sk, TIPC_DISCONNECTING); 1211 tipc_node_remove_conn(sock_net(sk), tsk_peer_node(tsk), 1212 tsk_peer_port(tsk)); 1213 sk->sk_state_change(sk); 1214 goto exit; 1215 } 1216 1217 tsk->probe_unacked = false; 1218 1219 if (mtyp == CONN_PROBE) { 1220 msg_set_type(hdr, CONN_PROBE_REPLY); 1221 if (tipc_msg_reverse(onode, &skb, TIPC_OK)) 1222 __skb_queue_tail(xmitq, skb); 1223 return; 1224 } else if (mtyp == CONN_ACK) { 1225 conn_cong = tsk_conn_cong(tsk); 1226 tsk->snt_unacked -= msg_conn_ack(hdr); 1227 if (tsk->peer_caps & TIPC_BLOCK_FLOWCTL) 1228 tsk->snd_win = msg_adv_win(hdr); 1229 if (conn_cong) 1230 sk->sk_write_space(sk); 1231 } else if (mtyp != CONN_PROBE_REPLY) { 1232 pr_warn("Received unknown CONN_PROTO msg\n"); 1233 } 1234 exit: 1235 kfree_skb(skb); 1236 } 1237 1238 /** 1239 * tipc_sendmsg - send message in connectionless manner 1240 * @sock: socket structure 1241 * @m: message to send 1242 * @dsz: amount of user data to be sent 1243 * 1244 * Message must have an destination specified explicitly. 1245 * Used for SOCK_RDM and SOCK_DGRAM messages, 1246 * and for 'SYN' messages on SOCK_SEQPACKET and SOCK_STREAM connections. 1247 * (Note: 'SYN+' is prohibited on SOCK_STREAM.) 1248 * 1249 * Returns the number of bytes sent on success, or errno otherwise 1250 */ 1251 static int tipc_sendmsg(struct socket *sock, 1252 struct msghdr *m, size_t dsz) 1253 { 1254 struct sock *sk = sock->sk; 1255 int ret; 1256 1257 lock_sock(sk); 1258 ret = __tipc_sendmsg(sock, m, dsz); 1259 release_sock(sk); 1260 1261 return ret; 1262 } 1263 1264 static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dlen) 1265 { 1266 struct sock *sk = sock->sk; 1267 struct net *net = sock_net(sk); 1268 struct tipc_sock *tsk = tipc_sk(sk); 1269 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name); 1270 long timeout = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT); 1271 struct list_head *clinks = &tsk->cong_links; 1272 bool syn = !tipc_sk_type_connectionless(sk); 1273 struct tipc_group *grp = tsk->group; 1274 struct tipc_msg *hdr = &tsk->phdr; 1275 struct tipc_name_seq *seq; 1276 struct sk_buff_head pkts; 1277 u32 dport, dnode = 0; 1278 u32 type, inst; 1279 int mtu, rc; 1280 1281 if (unlikely(dlen > TIPC_MAX_USER_MSG_SIZE)) 1282 return -EMSGSIZE; 1283 1284 if (likely(dest)) { 1285 if (unlikely(m->msg_namelen < sizeof(*dest))) 1286 return -EINVAL; 1287 if (unlikely(dest->family != AF_TIPC)) 1288 return -EINVAL; 1289 } 1290 1291 if (grp) { 1292 if (!dest) 1293 return tipc_send_group_bcast(sock, m, dlen, timeout); 1294 if (dest->addrtype == TIPC_ADDR_NAME) 1295 return tipc_send_group_anycast(sock, m, dlen, timeout); 1296 if (dest->addrtype == TIPC_ADDR_ID) 1297 return tipc_send_group_unicast(sock, m, dlen, timeout); 1298 if (dest->addrtype == TIPC_ADDR_MCAST) 1299 return tipc_send_group_mcast(sock, m, dlen, timeout); 1300 return -EINVAL; 1301 } 1302 1303 if (unlikely(!dest)) { 1304 dest = &tsk->peer; 1305 if (!syn || dest->family != AF_TIPC) 1306 return -EDESTADDRREQ; 1307 } 1308 1309 if (unlikely(syn)) { 1310 if (sk->sk_state == TIPC_LISTEN) 1311 return -EPIPE; 1312 if (sk->sk_state != TIPC_OPEN) 1313 return -EISCONN; 1314 if (tsk->published) 1315 return -EOPNOTSUPP; 1316 if (dest->addrtype == TIPC_ADDR_NAME) { 1317 tsk->conn_type = dest->addr.name.name.type; 1318 tsk->conn_instance = dest->addr.name.name.instance; 1319 } 1320 } 1321 1322 seq = &dest->addr.nameseq; 1323 if (dest->addrtype == TIPC_ADDR_MCAST) 1324 return tipc_sendmcast(sock, seq, m, dlen, timeout); 1325 1326 if (dest->addrtype == TIPC_ADDR_NAME) { 1327 type = dest->addr.name.name.type; 1328 inst = dest->addr.name.name.instance; 1329 dnode = dest->addr.name.domain; 1330 msg_set_type(hdr, TIPC_NAMED_MSG); 1331 msg_set_hdr_sz(hdr, NAMED_H_SIZE); 1332 msg_set_nametype(hdr, type); 1333 msg_set_nameinst(hdr, inst); 1334 msg_set_lookup_scope(hdr, tipc_node2scope(dnode)); 1335 dport = tipc_nametbl_translate(net, type, inst, &dnode); 1336 msg_set_destnode(hdr, dnode); 1337 msg_set_destport(hdr, dport); 1338 if (unlikely(!dport && !dnode)) 1339 return -EHOSTUNREACH; 1340 } else if (dest->addrtype == TIPC_ADDR_ID) { 1341 dnode = dest->addr.id.node; 1342 msg_set_type(hdr, TIPC_DIRECT_MSG); 1343 msg_set_lookup_scope(hdr, 0); 1344 msg_set_destnode(hdr, dnode); 1345 msg_set_destport(hdr, dest->addr.id.ref); 1346 msg_set_hdr_sz(hdr, BASIC_H_SIZE); 1347 } else { 1348 return -EINVAL; 1349 } 1350 1351 /* Block or return if destination link is congested */ 1352 rc = tipc_wait_for_cond(sock, &timeout, 1353 !tipc_dest_find(clinks, dnode, 0)); 1354 if (unlikely(rc)) 1355 return rc; 1356 1357 skb_queue_head_init(&pkts); 1358 mtu = tipc_node_get_mtu(net, dnode, tsk->portid); 1359 rc = tipc_msg_build(hdr, m, 0, dlen, mtu, &pkts); 1360 if (unlikely(rc != dlen)) 1361 return rc; 1362 1363 rc = tipc_node_xmit(net, &pkts, dnode, tsk->portid); 1364 if (unlikely(rc == -ELINKCONG)) { 1365 tipc_dest_push(clinks, dnode, 0); 1366 tsk->cong_link_cnt++; 1367 rc = 0; 1368 } 1369 1370 if (unlikely(syn && !rc)) 1371 tipc_set_sk_state(sk, TIPC_CONNECTING); 1372 1373 return rc ? rc : dlen; 1374 } 1375 1376 /** 1377 * tipc_sendstream - send stream-oriented data 1378 * @sock: socket structure 1379 * @m: data to send 1380 * @dsz: total length of data to be transmitted 1381 * 1382 * Used for SOCK_STREAM data. 1383 * 1384 * Returns the number of bytes sent on success (or partial success), 1385 * or errno if no data sent 1386 */ 1387 static int tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dsz) 1388 { 1389 struct sock *sk = sock->sk; 1390 int ret; 1391 1392 lock_sock(sk); 1393 ret = __tipc_sendstream(sock, m, dsz); 1394 release_sock(sk); 1395 1396 return ret; 1397 } 1398 1399 static int __tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dlen) 1400 { 1401 struct sock *sk = sock->sk; 1402 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name); 1403 long timeout = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT); 1404 struct tipc_sock *tsk = tipc_sk(sk); 1405 struct tipc_msg *hdr = &tsk->phdr; 1406 struct net *net = sock_net(sk); 1407 struct sk_buff_head pkts; 1408 u32 dnode = tsk_peer_node(tsk); 1409 int send, sent = 0; 1410 int rc = 0; 1411 1412 skb_queue_head_init(&pkts); 1413 1414 if (unlikely(dlen > INT_MAX)) 1415 return -EMSGSIZE; 1416 1417 /* Handle implicit connection setup */ 1418 if (unlikely(dest)) { 1419 rc = __tipc_sendmsg(sock, m, dlen); 1420 if (dlen && (dlen == rc)) 1421 tsk->snt_unacked = tsk_inc(tsk, dlen + msg_hdr_sz(hdr)); 1422 return rc; 1423 } 1424 1425 do { 1426 rc = tipc_wait_for_cond(sock, &timeout, 1427 (!tsk->cong_link_cnt && 1428 !tsk_conn_cong(tsk) && 1429 tipc_sk_connected(sk))); 1430 if (unlikely(rc)) 1431 break; 1432 1433 send = min_t(size_t, dlen - sent, TIPC_MAX_USER_MSG_SIZE); 1434 rc = tipc_msg_build(hdr, m, sent, send, tsk->max_pkt, &pkts); 1435 if (unlikely(rc != send)) 1436 break; 1437 1438 rc = tipc_node_xmit(net, &pkts, dnode, tsk->portid); 1439 if (unlikely(rc == -ELINKCONG)) { 1440 tsk->cong_link_cnt = 1; 1441 rc = 0; 1442 } 1443 if (likely(!rc)) { 1444 tsk->snt_unacked += tsk_inc(tsk, send + MIN_H_SIZE); 1445 sent += send; 1446 } 1447 } while (sent < dlen && !rc); 1448 1449 return sent ? sent : rc; 1450 } 1451 1452 /** 1453 * tipc_send_packet - send a connection-oriented message 1454 * @sock: socket structure 1455 * @m: message to send 1456 * @dsz: length of data to be transmitted 1457 * 1458 * Used for SOCK_SEQPACKET messages. 1459 * 1460 * Returns the number of bytes sent on success, or errno otherwise 1461 */ 1462 static int tipc_send_packet(struct socket *sock, struct msghdr *m, size_t dsz) 1463 { 1464 if (dsz > TIPC_MAX_USER_MSG_SIZE) 1465 return -EMSGSIZE; 1466 1467 return tipc_sendstream(sock, m, dsz); 1468 } 1469 1470 /* tipc_sk_finish_conn - complete the setup of a connection 1471 */ 1472 static void tipc_sk_finish_conn(struct tipc_sock *tsk, u32 peer_port, 1473 u32 peer_node) 1474 { 1475 struct sock *sk = &tsk->sk; 1476 struct net *net = sock_net(sk); 1477 struct tipc_msg *msg = &tsk->phdr; 1478 1479 msg_set_destnode(msg, peer_node); 1480 msg_set_destport(msg, peer_port); 1481 msg_set_type(msg, TIPC_CONN_MSG); 1482 msg_set_lookup_scope(msg, 0); 1483 msg_set_hdr_sz(msg, SHORT_H_SIZE); 1484 1485 sk_reset_timer(sk, &sk->sk_timer, jiffies + CONN_PROBING_INTV); 1486 tipc_set_sk_state(sk, TIPC_ESTABLISHED); 1487 tipc_node_add_conn(net, peer_node, tsk->portid, peer_port); 1488 tsk->max_pkt = tipc_node_get_mtu(net, peer_node, tsk->portid); 1489 tsk->peer_caps = tipc_node_get_capabilities(net, peer_node); 1490 if (tsk->peer_caps & TIPC_BLOCK_FLOWCTL) 1491 return; 1492 1493 /* Fall back to message based flow control */ 1494 tsk->rcv_win = FLOWCTL_MSG_WIN; 1495 tsk->snd_win = FLOWCTL_MSG_WIN; 1496 } 1497 1498 /** 1499 * tipc_sk_set_orig_addr - capture sender's address for received message 1500 * @m: descriptor for message info 1501 * @hdr: received message header 1502 * 1503 * Note: Address is not captured if not requested by receiver. 1504 */ 1505 static void tipc_sk_set_orig_addr(struct msghdr *m, struct sk_buff *skb) 1506 { 1507 DECLARE_SOCKADDR(struct sockaddr_pair *, srcaddr, m->msg_name); 1508 struct tipc_msg *hdr = buf_msg(skb); 1509 1510 if (!srcaddr) 1511 return; 1512 1513 srcaddr->sock.family = AF_TIPC; 1514 srcaddr->sock.addrtype = TIPC_ADDR_ID; 1515 srcaddr->sock.addr.id.ref = msg_origport(hdr); 1516 srcaddr->sock.addr.id.node = msg_orignode(hdr); 1517 srcaddr->sock.addr.name.domain = 0; 1518 srcaddr->sock.scope = 0; 1519 m->msg_namelen = sizeof(struct sockaddr_tipc); 1520 1521 if (!msg_in_group(hdr)) 1522 return; 1523 1524 /* Group message users may also want to know sending member's id */ 1525 srcaddr->member.family = AF_TIPC; 1526 srcaddr->member.addrtype = TIPC_ADDR_NAME; 1527 srcaddr->member.addr.name.name.type = msg_nametype(hdr); 1528 srcaddr->member.addr.name.name.instance = TIPC_SKB_CB(skb)->orig_member; 1529 srcaddr->member.addr.name.domain = 0; 1530 m->msg_namelen = sizeof(*srcaddr); 1531 } 1532 1533 /** 1534 * tipc_sk_anc_data_recv - optionally capture ancillary data for received message 1535 * @m: descriptor for message info 1536 * @msg: received message header 1537 * @tsk: TIPC port associated with message 1538 * 1539 * Note: Ancillary data is not captured if not requested by receiver. 1540 * 1541 * Returns 0 if successful, otherwise errno 1542 */ 1543 static int tipc_sk_anc_data_recv(struct msghdr *m, struct tipc_msg *msg, 1544 struct tipc_sock *tsk) 1545 { 1546 u32 anc_data[3]; 1547 u32 err; 1548 u32 dest_type; 1549 int has_name; 1550 int res; 1551 1552 if (likely(m->msg_controllen == 0)) 1553 return 0; 1554 1555 /* Optionally capture errored message object(s) */ 1556 err = msg ? msg_errcode(msg) : 0; 1557 if (unlikely(err)) { 1558 anc_data[0] = err; 1559 anc_data[1] = msg_data_sz(msg); 1560 res = put_cmsg(m, SOL_TIPC, TIPC_ERRINFO, 8, anc_data); 1561 if (res) 1562 return res; 1563 if (anc_data[1]) { 1564 res = put_cmsg(m, SOL_TIPC, TIPC_RETDATA, anc_data[1], 1565 msg_data(msg)); 1566 if (res) 1567 return res; 1568 } 1569 } 1570 1571 /* Optionally capture message destination object */ 1572 dest_type = msg ? msg_type(msg) : TIPC_DIRECT_MSG; 1573 switch (dest_type) { 1574 case TIPC_NAMED_MSG: 1575 has_name = 1; 1576 anc_data[0] = msg_nametype(msg); 1577 anc_data[1] = msg_namelower(msg); 1578 anc_data[2] = msg_namelower(msg); 1579 break; 1580 case TIPC_MCAST_MSG: 1581 has_name = 1; 1582 anc_data[0] = msg_nametype(msg); 1583 anc_data[1] = msg_namelower(msg); 1584 anc_data[2] = msg_nameupper(msg); 1585 break; 1586 case TIPC_CONN_MSG: 1587 has_name = (tsk->conn_type != 0); 1588 anc_data[0] = tsk->conn_type; 1589 anc_data[1] = tsk->conn_instance; 1590 anc_data[2] = tsk->conn_instance; 1591 break; 1592 default: 1593 has_name = 0; 1594 } 1595 if (has_name) { 1596 res = put_cmsg(m, SOL_TIPC, TIPC_DESTNAME, 12, anc_data); 1597 if (res) 1598 return res; 1599 } 1600 1601 return 0; 1602 } 1603 1604 static void tipc_sk_send_ack(struct tipc_sock *tsk) 1605 { 1606 struct sock *sk = &tsk->sk; 1607 struct net *net = sock_net(sk); 1608 struct sk_buff *skb = NULL; 1609 struct tipc_msg *msg; 1610 u32 peer_port = tsk_peer_port(tsk); 1611 u32 dnode = tsk_peer_node(tsk); 1612 1613 if (!tipc_sk_connected(sk)) 1614 return; 1615 skb = tipc_msg_create(CONN_MANAGER, CONN_ACK, INT_H_SIZE, 0, 1616 dnode, tsk_own_node(tsk), peer_port, 1617 tsk->portid, TIPC_OK); 1618 if (!skb) 1619 return; 1620 msg = buf_msg(skb); 1621 msg_set_conn_ack(msg, tsk->rcv_unacked); 1622 tsk->rcv_unacked = 0; 1623 1624 /* Adjust to and advertize the correct window limit */ 1625 if (tsk->peer_caps & TIPC_BLOCK_FLOWCTL) { 1626 tsk->rcv_win = tsk_adv_blocks(tsk->sk.sk_rcvbuf); 1627 msg_set_adv_win(msg, tsk->rcv_win); 1628 } 1629 tipc_node_xmit_skb(net, skb, dnode, msg_link_selector(msg)); 1630 } 1631 1632 static int tipc_wait_for_rcvmsg(struct socket *sock, long *timeop) 1633 { 1634 struct sock *sk = sock->sk; 1635 DEFINE_WAIT(wait); 1636 long timeo = *timeop; 1637 int err = sock_error(sk); 1638 1639 if (err) 1640 return err; 1641 1642 for (;;) { 1643 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); 1644 if (timeo && skb_queue_empty(&sk->sk_receive_queue)) { 1645 if (sk->sk_shutdown & RCV_SHUTDOWN) { 1646 err = -ENOTCONN; 1647 break; 1648 } 1649 release_sock(sk); 1650 timeo = schedule_timeout(timeo); 1651 lock_sock(sk); 1652 } 1653 err = 0; 1654 if (!skb_queue_empty(&sk->sk_receive_queue)) 1655 break; 1656 err = -EAGAIN; 1657 if (!timeo) 1658 break; 1659 err = sock_intr_errno(timeo); 1660 if (signal_pending(current)) 1661 break; 1662 1663 err = sock_error(sk); 1664 if (err) 1665 break; 1666 } 1667 finish_wait(sk_sleep(sk), &wait); 1668 *timeop = timeo; 1669 return err; 1670 } 1671 1672 /** 1673 * tipc_recvmsg - receive packet-oriented message 1674 * @m: descriptor for message info 1675 * @buflen: length of user buffer area 1676 * @flags: receive flags 1677 * 1678 * Used for SOCK_DGRAM, SOCK_RDM, and SOCK_SEQPACKET messages. 1679 * If the complete message doesn't fit in user area, truncate it. 1680 * 1681 * Returns size of returned message data, errno otherwise 1682 */ 1683 static int tipc_recvmsg(struct socket *sock, struct msghdr *m, 1684 size_t buflen, int flags) 1685 { 1686 struct sock *sk = sock->sk; 1687 bool connected = !tipc_sk_type_connectionless(sk); 1688 struct tipc_sock *tsk = tipc_sk(sk); 1689 int rc, err, hlen, dlen, copy; 1690 struct sk_buff_head xmitq; 1691 struct tipc_msg *hdr; 1692 struct sk_buff *skb; 1693 bool grp_evt; 1694 long timeout; 1695 1696 /* Catch invalid receive requests */ 1697 if (unlikely(!buflen)) 1698 return -EINVAL; 1699 1700 lock_sock(sk); 1701 if (unlikely(connected && sk->sk_state == TIPC_OPEN)) { 1702 rc = -ENOTCONN; 1703 goto exit; 1704 } 1705 timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); 1706 1707 /* Step rcv queue to first msg with data or error; wait if necessary */ 1708 do { 1709 rc = tipc_wait_for_rcvmsg(sock, &timeout); 1710 if (unlikely(rc)) 1711 goto exit; 1712 skb = skb_peek(&sk->sk_receive_queue); 1713 hdr = buf_msg(skb); 1714 dlen = msg_data_sz(hdr); 1715 hlen = msg_hdr_sz(hdr); 1716 err = msg_errcode(hdr); 1717 grp_evt = msg_is_grp_evt(hdr); 1718 if (likely(dlen || err)) 1719 break; 1720 tsk_advance_rx_queue(sk); 1721 } while (1); 1722 1723 /* Collect msg meta data, including error code and rejected data */ 1724 tipc_sk_set_orig_addr(m, skb); 1725 rc = tipc_sk_anc_data_recv(m, hdr, tsk); 1726 if (unlikely(rc)) 1727 goto exit; 1728 1729 /* Capture data if non-error msg, otherwise just set return value */ 1730 if (likely(!err)) { 1731 copy = min_t(int, dlen, buflen); 1732 if (unlikely(copy != dlen)) 1733 m->msg_flags |= MSG_TRUNC; 1734 rc = skb_copy_datagram_msg(skb, hlen, m, copy); 1735 } else { 1736 copy = 0; 1737 rc = 0; 1738 if (err != TIPC_CONN_SHUTDOWN && connected && !m->msg_control) 1739 rc = -ECONNRESET; 1740 } 1741 if (unlikely(rc)) 1742 goto exit; 1743 1744 /* Mark message as group event if applicable */ 1745 if (unlikely(grp_evt)) { 1746 if (msg_grp_evt(hdr) == TIPC_WITHDRAWN) 1747 m->msg_flags |= MSG_EOR; 1748 m->msg_flags |= MSG_OOB; 1749 copy = 0; 1750 } 1751 1752 /* Caption of data or error code/rejected data was successful */ 1753 if (unlikely(flags & MSG_PEEK)) 1754 goto exit; 1755 1756 /* Send group flow control advertisement when applicable */ 1757 if (tsk->group && msg_in_group(hdr) && !grp_evt) { 1758 skb_queue_head_init(&xmitq); 1759 tipc_group_update_rcv_win(tsk->group, tsk_blocks(hlen + dlen), 1760 msg_orignode(hdr), msg_origport(hdr), 1761 &xmitq); 1762 tipc_node_distr_xmit(sock_net(sk), &xmitq); 1763 } 1764 1765 tsk_advance_rx_queue(sk); 1766 1767 if (likely(!connected)) 1768 goto exit; 1769 1770 /* Send connection flow control advertisement when applicable */ 1771 tsk->rcv_unacked += tsk_inc(tsk, hlen + dlen); 1772 if (tsk->rcv_unacked >= tsk->rcv_win / TIPC_ACK_RATE) 1773 tipc_sk_send_ack(tsk); 1774 exit: 1775 release_sock(sk); 1776 return rc ? rc : copy; 1777 } 1778 1779 /** 1780 * tipc_recvstream - receive stream-oriented data 1781 * @m: descriptor for message info 1782 * @buflen: total size of user buffer area 1783 * @flags: receive flags 1784 * 1785 * Used for SOCK_STREAM messages only. If not enough data is available 1786 * will optionally wait for more; never truncates data. 1787 * 1788 * Returns size of returned message data, errno otherwise 1789 */ 1790 static int tipc_recvstream(struct socket *sock, struct msghdr *m, 1791 size_t buflen, int flags) 1792 { 1793 struct sock *sk = sock->sk; 1794 struct tipc_sock *tsk = tipc_sk(sk); 1795 struct sk_buff *skb; 1796 struct tipc_msg *hdr; 1797 struct tipc_skb_cb *skb_cb; 1798 bool peek = flags & MSG_PEEK; 1799 int offset, required, copy, copied = 0; 1800 int hlen, dlen, err, rc; 1801 long timeout; 1802 1803 /* Catch invalid receive attempts */ 1804 if (unlikely(!buflen)) 1805 return -EINVAL; 1806 1807 lock_sock(sk); 1808 1809 if (unlikely(sk->sk_state == TIPC_OPEN)) { 1810 rc = -ENOTCONN; 1811 goto exit; 1812 } 1813 required = sock_rcvlowat(sk, flags & MSG_WAITALL, buflen); 1814 timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); 1815 1816 do { 1817 /* Look at first msg in receive queue; wait if necessary */ 1818 rc = tipc_wait_for_rcvmsg(sock, &timeout); 1819 if (unlikely(rc)) 1820 break; 1821 skb = skb_peek(&sk->sk_receive_queue); 1822 skb_cb = TIPC_SKB_CB(skb); 1823 hdr = buf_msg(skb); 1824 dlen = msg_data_sz(hdr); 1825 hlen = msg_hdr_sz(hdr); 1826 err = msg_errcode(hdr); 1827 1828 /* Discard any empty non-errored (SYN-) message */ 1829 if (unlikely(!dlen && !err)) { 1830 tsk_advance_rx_queue(sk); 1831 continue; 1832 } 1833 1834 /* Collect msg meta data, incl. error code and rejected data */ 1835 if (!copied) { 1836 tipc_sk_set_orig_addr(m, skb); 1837 rc = tipc_sk_anc_data_recv(m, hdr, tsk); 1838 if (rc) 1839 break; 1840 } 1841 1842 /* Copy data if msg ok, otherwise return error/partial data */ 1843 if (likely(!err)) { 1844 offset = skb_cb->bytes_read; 1845 copy = min_t(int, dlen - offset, buflen - copied); 1846 rc = skb_copy_datagram_msg(skb, hlen + offset, m, copy); 1847 if (unlikely(rc)) 1848 break; 1849 copied += copy; 1850 offset += copy; 1851 if (unlikely(offset < dlen)) { 1852 if (!peek) 1853 skb_cb->bytes_read = offset; 1854 break; 1855 } 1856 } else { 1857 rc = 0; 1858 if ((err != TIPC_CONN_SHUTDOWN) && !m->msg_control) 1859 rc = -ECONNRESET; 1860 if (copied || rc) 1861 break; 1862 } 1863 1864 if (unlikely(peek)) 1865 break; 1866 1867 tsk_advance_rx_queue(sk); 1868 1869 /* Send connection flow control advertisement when applicable */ 1870 tsk->rcv_unacked += tsk_inc(tsk, hlen + dlen); 1871 if (unlikely(tsk->rcv_unacked >= tsk->rcv_win / TIPC_ACK_RATE)) 1872 tipc_sk_send_ack(tsk); 1873 1874 /* Exit if all requested data or FIN/error received */ 1875 if (copied == buflen || err) 1876 break; 1877 1878 } while (!skb_queue_empty(&sk->sk_receive_queue) || copied < required); 1879 exit: 1880 release_sock(sk); 1881 return copied ? copied : rc; 1882 } 1883 1884 /** 1885 * tipc_write_space - wake up thread if port congestion is released 1886 * @sk: socket 1887 */ 1888 static void tipc_write_space(struct sock *sk) 1889 { 1890 struct socket_wq *wq; 1891 1892 rcu_read_lock(); 1893 wq = rcu_dereference(sk->sk_wq); 1894 if (skwq_has_sleeper(wq)) 1895 wake_up_interruptible_sync_poll(&wq->wait, EPOLLOUT | 1896 EPOLLWRNORM | EPOLLWRBAND); 1897 rcu_read_unlock(); 1898 } 1899 1900 /** 1901 * tipc_data_ready - wake up threads to indicate messages have been received 1902 * @sk: socket 1903 * @len: the length of messages 1904 */ 1905 static void tipc_data_ready(struct sock *sk) 1906 { 1907 struct socket_wq *wq; 1908 1909 rcu_read_lock(); 1910 wq = rcu_dereference(sk->sk_wq); 1911 if (skwq_has_sleeper(wq)) 1912 wake_up_interruptible_sync_poll(&wq->wait, EPOLLIN | 1913 EPOLLRDNORM | EPOLLRDBAND); 1914 rcu_read_unlock(); 1915 } 1916 1917 static void tipc_sock_destruct(struct sock *sk) 1918 { 1919 __skb_queue_purge(&sk->sk_receive_queue); 1920 } 1921 1922 static void tipc_sk_proto_rcv(struct sock *sk, 1923 struct sk_buff_head *inputq, 1924 struct sk_buff_head *xmitq) 1925 { 1926 struct sk_buff *skb = __skb_dequeue(inputq); 1927 struct tipc_sock *tsk = tipc_sk(sk); 1928 struct tipc_msg *hdr = buf_msg(skb); 1929 struct tipc_group *grp = tsk->group; 1930 bool wakeup = false; 1931 1932 switch (msg_user(hdr)) { 1933 case CONN_MANAGER: 1934 tipc_sk_conn_proto_rcv(tsk, skb, xmitq); 1935 return; 1936 case SOCK_WAKEUP: 1937 tipc_dest_del(&tsk->cong_links, msg_orignode(hdr), 0); 1938 tsk->cong_link_cnt--; 1939 wakeup = true; 1940 break; 1941 case GROUP_PROTOCOL: 1942 tipc_group_proto_rcv(grp, &wakeup, hdr, inputq, xmitq); 1943 break; 1944 case TOP_SRV: 1945 tipc_group_member_evt(tsk->group, &wakeup, &sk->sk_rcvbuf, 1946 hdr, inputq, xmitq); 1947 break; 1948 default: 1949 break; 1950 } 1951 1952 if (wakeup) 1953 sk->sk_write_space(sk); 1954 1955 kfree_skb(skb); 1956 } 1957 1958 /** 1959 * tipc_filter_connect - Handle incoming message for a connection-based socket 1960 * @tsk: TIPC socket 1961 * @skb: pointer to message buffer. Set to NULL if buffer is consumed 1962 * 1963 * Returns true if everything ok, false otherwise 1964 */ 1965 static bool tipc_sk_filter_connect(struct tipc_sock *tsk, struct sk_buff *skb) 1966 { 1967 struct sock *sk = &tsk->sk; 1968 struct net *net = sock_net(sk); 1969 struct tipc_msg *hdr = buf_msg(skb); 1970 u32 pport = msg_origport(hdr); 1971 u32 pnode = msg_orignode(hdr); 1972 1973 if (unlikely(msg_mcast(hdr))) 1974 return false; 1975 1976 switch (sk->sk_state) { 1977 case TIPC_CONNECTING: 1978 /* Accept only ACK or NACK message */ 1979 if (unlikely(!msg_connected(hdr))) { 1980 if (pport != tsk_peer_port(tsk) || 1981 pnode != tsk_peer_node(tsk)) 1982 return false; 1983 1984 tipc_set_sk_state(sk, TIPC_DISCONNECTING); 1985 sk->sk_err = ECONNREFUSED; 1986 sk->sk_state_change(sk); 1987 return true; 1988 } 1989 1990 if (unlikely(msg_errcode(hdr))) { 1991 tipc_set_sk_state(sk, TIPC_DISCONNECTING); 1992 sk->sk_err = ECONNREFUSED; 1993 sk->sk_state_change(sk); 1994 return true; 1995 } 1996 1997 if (unlikely(!msg_isdata(hdr))) { 1998 tipc_set_sk_state(sk, TIPC_DISCONNECTING); 1999 sk->sk_err = EINVAL; 2000 sk->sk_state_change(sk); 2001 return true; 2002 } 2003 2004 tipc_sk_finish_conn(tsk, msg_origport(hdr), msg_orignode(hdr)); 2005 msg_set_importance(&tsk->phdr, msg_importance(hdr)); 2006 2007 /* If 'ACK+' message, add to socket receive queue */ 2008 if (msg_data_sz(hdr)) 2009 return true; 2010 2011 /* If empty 'ACK-' message, wake up sleeping connect() */ 2012 sk->sk_data_ready(sk); 2013 2014 /* 'ACK-' message is neither accepted nor rejected: */ 2015 msg_set_dest_droppable(hdr, 1); 2016 return false; 2017 2018 case TIPC_OPEN: 2019 case TIPC_DISCONNECTING: 2020 break; 2021 case TIPC_LISTEN: 2022 /* Accept only SYN message */ 2023 if (!msg_connected(hdr) && !(msg_errcode(hdr))) 2024 return true; 2025 break; 2026 case TIPC_ESTABLISHED: 2027 /* Accept only connection-based messages sent by peer */ 2028 if (unlikely(!tsk_peer_msg(tsk, hdr))) 2029 return false; 2030 2031 if (unlikely(msg_errcode(hdr))) { 2032 tipc_set_sk_state(sk, TIPC_DISCONNECTING); 2033 /* Let timer expire on it's own */ 2034 tipc_node_remove_conn(net, tsk_peer_node(tsk), 2035 tsk->portid); 2036 sk->sk_state_change(sk); 2037 } 2038 return true; 2039 default: 2040 pr_err("Unknown sk_state %u\n", sk->sk_state); 2041 } 2042 2043 return false; 2044 } 2045 2046 /** 2047 * rcvbuf_limit - get proper overload limit of socket receive queue 2048 * @sk: socket 2049 * @skb: message 2050 * 2051 * For connection oriented messages, irrespective of importance, 2052 * default queue limit is 2 MB. 2053 * 2054 * For connectionless messages, queue limits are based on message 2055 * importance as follows: 2056 * 2057 * TIPC_LOW_IMPORTANCE (2 MB) 2058 * TIPC_MEDIUM_IMPORTANCE (4 MB) 2059 * TIPC_HIGH_IMPORTANCE (8 MB) 2060 * TIPC_CRITICAL_IMPORTANCE (16 MB) 2061 * 2062 * Returns overload limit according to corresponding message importance 2063 */ 2064 static unsigned int rcvbuf_limit(struct sock *sk, struct sk_buff *skb) 2065 { 2066 struct tipc_sock *tsk = tipc_sk(sk); 2067 struct tipc_msg *hdr = buf_msg(skb); 2068 2069 if (unlikely(msg_in_group(hdr))) 2070 return sk->sk_rcvbuf; 2071 2072 if (unlikely(!msg_connected(hdr))) 2073 return sk->sk_rcvbuf << msg_importance(hdr); 2074 2075 if (likely(tsk->peer_caps & TIPC_BLOCK_FLOWCTL)) 2076 return sk->sk_rcvbuf; 2077 2078 return FLOWCTL_MSG_LIM; 2079 } 2080 2081 /** 2082 * tipc_sk_filter_rcv - validate incoming message 2083 * @sk: socket 2084 * @skb: pointer to message. 2085 * 2086 * Enqueues message on receive queue if acceptable; optionally handles 2087 * disconnect indication for a connected socket. 2088 * 2089 * Called with socket lock already taken 2090 * 2091 */ 2092 static void tipc_sk_filter_rcv(struct sock *sk, struct sk_buff *skb, 2093 struct sk_buff_head *xmitq) 2094 { 2095 bool sk_conn = !tipc_sk_type_connectionless(sk); 2096 struct tipc_sock *tsk = tipc_sk(sk); 2097 struct tipc_group *grp = tsk->group; 2098 struct tipc_msg *hdr = buf_msg(skb); 2099 struct net *net = sock_net(sk); 2100 struct sk_buff_head inputq; 2101 int limit, err = TIPC_OK; 2102 2103 TIPC_SKB_CB(skb)->bytes_read = 0; 2104 __skb_queue_head_init(&inputq); 2105 __skb_queue_tail(&inputq, skb); 2106 2107 if (unlikely(!msg_isdata(hdr))) 2108 tipc_sk_proto_rcv(sk, &inputq, xmitq); 2109 2110 if (unlikely(grp)) 2111 tipc_group_filter_msg(grp, &inputq, xmitq); 2112 2113 /* Validate and add to receive buffer if there is space */ 2114 while ((skb = __skb_dequeue(&inputq))) { 2115 hdr = buf_msg(skb); 2116 limit = rcvbuf_limit(sk, skb); 2117 if ((sk_conn && !tipc_sk_filter_connect(tsk, skb)) || 2118 (!sk_conn && msg_connected(hdr)) || 2119 (!grp && msg_in_group(hdr))) 2120 err = TIPC_ERR_NO_PORT; 2121 else if (sk_rmem_alloc_get(sk) + skb->truesize >= limit) { 2122 atomic_inc(&sk->sk_drops); 2123 err = TIPC_ERR_OVERLOAD; 2124 } 2125 2126 if (unlikely(err)) { 2127 tipc_skb_reject(net, err, skb, xmitq); 2128 err = TIPC_OK; 2129 continue; 2130 } 2131 __skb_queue_tail(&sk->sk_receive_queue, skb); 2132 skb_set_owner_r(skb, sk); 2133 sk->sk_data_ready(sk); 2134 } 2135 } 2136 2137 /** 2138 * tipc_sk_backlog_rcv - handle incoming message from backlog queue 2139 * @sk: socket 2140 * @skb: message 2141 * 2142 * Caller must hold socket lock 2143 */ 2144 static int tipc_sk_backlog_rcv(struct sock *sk, struct sk_buff *skb) 2145 { 2146 unsigned int before = sk_rmem_alloc_get(sk); 2147 struct sk_buff_head xmitq; 2148 unsigned int added; 2149 2150 __skb_queue_head_init(&xmitq); 2151 2152 tipc_sk_filter_rcv(sk, skb, &xmitq); 2153 added = sk_rmem_alloc_get(sk) - before; 2154 atomic_add(added, &tipc_sk(sk)->dupl_rcvcnt); 2155 2156 /* Send pending response/rejected messages, if any */ 2157 tipc_node_distr_xmit(sock_net(sk), &xmitq); 2158 return 0; 2159 } 2160 2161 /** 2162 * tipc_sk_enqueue - extract all buffers with destination 'dport' from 2163 * inputq and try adding them to socket or backlog queue 2164 * @inputq: list of incoming buffers with potentially different destinations 2165 * @sk: socket where the buffers should be enqueued 2166 * @dport: port number for the socket 2167 * 2168 * Caller must hold socket lock 2169 */ 2170 static void tipc_sk_enqueue(struct sk_buff_head *inputq, struct sock *sk, 2171 u32 dport, struct sk_buff_head *xmitq) 2172 { 2173 unsigned long time_limit = jiffies + 2; 2174 struct sk_buff *skb; 2175 unsigned int lim; 2176 atomic_t *dcnt; 2177 u32 onode; 2178 2179 while (skb_queue_len(inputq)) { 2180 if (unlikely(time_after_eq(jiffies, time_limit))) 2181 return; 2182 2183 skb = tipc_skb_dequeue(inputq, dport); 2184 if (unlikely(!skb)) 2185 return; 2186 2187 /* Add message directly to receive queue if possible */ 2188 if (!sock_owned_by_user(sk)) { 2189 tipc_sk_filter_rcv(sk, skb, xmitq); 2190 continue; 2191 } 2192 2193 /* Try backlog, compensating for double-counted bytes */ 2194 dcnt = &tipc_sk(sk)->dupl_rcvcnt; 2195 if (!sk->sk_backlog.len) 2196 atomic_set(dcnt, 0); 2197 lim = rcvbuf_limit(sk, skb) + atomic_read(dcnt); 2198 if (likely(!sk_add_backlog(sk, skb, lim))) 2199 continue; 2200 2201 /* Overload => reject message back to sender */ 2202 onode = tipc_own_addr(sock_net(sk)); 2203 atomic_inc(&sk->sk_drops); 2204 if (tipc_msg_reverse(onode, &skb, TIPC_ERR_OVERLOAD)) 2205 __skb_queue_tail(xmitq, skb); 2206 break; 2207 } 2208 } 2209 2210 /** 2211 * tipc_sk_rcv - handle a chain of incoming buffers 2212 * @inputq: buffer list containing the buffers 2213 * Consumes all buffers in list until inputq is empty 2214 * Note: may be called in multiple threads referring to the same queue 2215 */ 2216 void tipc_sk_rcv(struct net *net, struct sk_buff_head *inputq) 2217 { 2218 struct sk_buff_head xmitq; 2219 u32 dnode, dport = 0; 2220 int err; 2221 struct tipc_sock *tsk; 2222 struct sock *sk; 2223 struct sk_buff *skb; 2224 2225 __skb_queue_head_init(&xmitq); 2226 while (skb_queue_len(inputq)) { 2227 dport = tipc_skb_peek_port(inputq, dport); 2228 tsk = tipc_sk_lookup(net, dport); 2229 2230 if (likely(tsk)) { 2231 sk = &tsk->sk; 2232 if (likely(spin_trylock_bh(&sk->sk_lock.slock))) { 2233 tipc_sk_enqueue(inputq, sk, dport, &xmitq); 2234 spin_unlock_bh(&sk->sk_lock.slock); 2235 } 2236 /* Send pending response/rejected messages, if any */ 2237 tipc_node_distr_xmit(sock_net(sk), &xmitq); 2238 sock_put(sk); 2239 continue; 2240 } 2241 /* No destination socket => dequeue skb if still there */ 2242 skb = tipc_skb_dequeue(inputq, dport); 2243 if (!skb) 2244 return; 2245 2246 /* Try secondary lookup if unresolved named message */ 2247 err = TIPC_ERR_NO_PORT; 2248 if (tipc_msg_lookup_dest(net, skb, &err)) 2249 goto xmit; 2250 2251 /* Prepare for message rejection */ 2252 if (!tipc_msg_reverse(tipc_own_addr(net), &skb, err)) 2253 continue; 2254 xmit: 2255 dnode = msg_destnode(buf_msg(skb)); 2256 tipc_node_xmit_skb(net, skb, dnode, dport); 2257 } 2258 } 2259 2260 static int tipc_wait_for_connect(struct socket *sock, long *timeo_p) 2261 { 2262 DEFINE_WAIT_FUNC(wait, woken_wake_function); 2263 struct sock *sk = sock->sk; 2264 int done; 2265 2266 do { 2267 int err = sock_error(sk); 2268 if (err) 2269 return err; 2270 if (!*timeo_p) 2271 return -ETIMEDOUT; 2272 if (signal_pending(current)) 2273 return sock_intr_errno(*timeo_p); 2274 2275 add_wait_queue(sk_sleep(sk), &wait); 2276 done = sk_wait_event(sk, timeo_p, 2277 sk->sk_state != TIPC_CONNECTING, &wait); 2278 remove_wait_queue(sk_sleep(sk), &wait); 2279 } while (!done); 2280 return 0; 2281 } 2282 2283 /** 2284 * tipc_connect - establish a connection to another TIPC port 2285 * @sock: socket structure 2286 * @dest: socket address for destination port 2287 * @destlen: size of socket address data structure 2288 * @flags: file-related flags associated with socket 2289 * 2290 * Returns 0 on success, errno otherwise 2291 */ 2292 static int tipc_connect(struct socket *sock, struct sockaddr *dest, 2293 int destlen, int flags) 2294 { 2295 struct sock *sk = sock->sk; 2296 struct tipc_sock *tsk = tipc_sk(sk); 2297 struct sockaddr_tipc *dst = (struct sockaddr_tipc *)dest; 2298 struct msghdr m = {NULL,}; 2299 long timeout = (flags & O_NONBLOCK) ? 0 : tsk->conn_timeout; 2300 int previous; 2301 int res = 0; 2302 2303 if (destlen != sizeof(struct sockaddr_tipc)) 2304 return -EINVAL; 2305 2306 lock_sock(sk); 2307 2308 if (tsk->group) { 2309 res = -EINVAL; 2310 goto exit; 2311 } 2312 2313 if (dst->family == AF_UNSPEC) { 2314 memset(&tsk->peer, 0, sizeof(struct sockaddr_tipc)); 2315 if (!tipc_sk_type_connectionless(sk)) 2316 res = -EINVAL; 2317 goto exit; 2318 } else if (dst->family != AF_TIPC) { 2319 res = -EINVAL; 2320 } 2321 if (dst->addrtype != TIPC_ADDR_ID && dst->addrtype != TIPC_ADDR_NAME) 2322 res = -EINVAL; 2323 if (res) 2324 goto exit; 2325 2326 /* DGRAM/RDM connect(), just save the destaddr */ 2327 if (tipc_sk_type_connectionless(sk)) { 2328 memcpy(&tsk->peer, dest, destlen); 2329 goto exit; 2330 } 2331 2332 previous = sk->sk_state; 2333 2334 switch (sk->sk_state) { 2335 case TIPC_OPEN: 2336 /* Send a 'SYN-' to destination */ 2337 m.msg_name = dest; 2338 m.msg_namelen = destlen; 2339 2340 /* If connect is in non-blocking case, set MSG_DONTWAIT to 2341 * indicate send_msg() is never blocked. 2342 */ 2343 if (!timeout) 2344 m.msg_flags = MSG_DONTWAIT; 2345 2346 res = __tipc_sendmsg(sock, &m, 0); 2347 if ((res < 0) && (res != -EWOULDBLOCK)) 2348 goto exit; 2349 2350 /* Just entered TIPC_CONNECTING state; the only 2351 * difference is that return value in non-blocking 2352 * case is EINPROGRESS, rather than EALREADY. 2353 */ 2354 res = -EINPROGRESS; 2355 /* fall thru' */ 2356 case TIPC_CONNECTING: 2357 if (!timeout) { 2358 if (previous == TIPC_CONNECTING) 2359 res = -EALREADY; 2360 goto exit; 2361 } 2362 timeout = msecs_to_jiffies(timeout); 2363 /* Wait until an 'ACK' or 'RST' arrives, or a timeout occurs */ 2364 res = tipc_wait_for_connect(sock, &timeout); 2365 break; 2366 case TIPC_ESTABLISHED: 2367 res = -EISCONN; 2368 break; 2369 default: 2370 res = -EINVAL; 2371 } 2372 2373 exit: 2374 release_sock(sk); 2375 return res; 2376 } 2377 2378 /** 2379 * tipc_listen - allow socket to listen for incoming connections 2380 * @sock: socket structure 2381 * @len: (unused) 2382 * 2383 * Returns 0 on success, errno otherwise 2384 */ 2385 static int tipc_listen(struct socket *sock, int len) 2386 { 2387 struct sock *sk = sock->sk; 2388 int res; 2389 2390 lock_sock(sk); 2391 res = tipc_set_sk_state(sk, TIPC_LISTEN); 2392 release_sock(sk); 2393 2394 return res; 2395 } 2396 2397 static int tipc_wait_for_accept(struct socket *sock, long timeo) 2398 { 2399 struct sock *sk = sock->sk; 2400 DEFINE_WAIT(wait); 2401 int err; 2402 2403 /* True wake-one mechanism for incoming connections: only 2404 * one process gets woken up, not the 'whole herd'. 2405 * Since we do not 'race & poll' for established sockets 2406 * anymore, the common case will execute the loop only once. 2407 */ 2408 for (;;) { 2409 prepare_to_wait_exclusive(sk_sleep(sk), &wait, 2410 TASK_INTERRUPTIBLE); 2411 if (timeo && skb_queue_empty(&sk->sk_receive_queue)) { 2412 release_sock(sk); 2413 timeo = schedule_timeout(timeo); 2414 lock_sock(sk); 2415 } 2416 err = 0; 2417 if (!skb_queue_empty(&sk->sk_receive_queue)) 2418 break; 2419 err = -EAGAIN; 2420 if (!timeo) 2421 break; 2422 err = sock_intr_errno(timeo); 2423 if (signal_pending(current)) 2424 break; 2425 } 2426 finish_wait(sk_sleep(sk), &wait); 2427 return err; 2428 } 2429 2430 /** 2431 * tipc_accept - wait for connection request 2432 * @sock: listening socket 2433 * @newsock: new socket that is to be connected 2434 * @flags: file-related flags associated with socket 2435 * 2436 * Returns 0 on success, errno otherwise 2437 */ 2438 static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags, 2439 bool kern) 2440 { 2441 struct sock *new_sk, *sk = sock->sk; 2442 struct sk_buff *buf; 2443 struct tipc_sock *new_tsock; 2444 struct tipc_msg *msg; 2445 long timeo; 2446 int res; 2447 2448 lock_sock(sk); 2449 2450 if (sk->sk_state != TIPC_LISTEN) { 2451 res = -EINVAL; 2452 goto exit; 2453 } 2454 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK); 2455 res = tipc_wait_for_accept(sock, timeo); 2456 if (res) 2457 goto exit; 2458 2459 buf = skb_peek(&sk->sk_receive_queue); 2460 2461 res = tipc_sk_create(sock_net(sock->sk), new_sock, 0, kern); 2462 if (res) 2463 goto exit; 2464 security_sk_clone(sock->sk, new_sock->sk); 2465 2466 new_sk = new_sock->sk; 2467 new_tsock = tipc_sk(new_sk); 2468 msg = buf_msg(buf); 2469 2470 /* we lock on new_sk; but lockdep sees the lock on sk */ 2471 lock_sock_nested(new_sk, SINGLE_DEPTH_NESTING); 2472 2473 /* 2474 * Reject any stray messages received by new socket 2475 * before the socket lock was taken (very, very unlikely) 2476 */ 2477 tsk_rej_rx_queue(new_sk); 2478 2479 /* Connect new socket to it's peer */ 2480 tipc_sk_finish_conn(new_tsock, msg_origport(msg), msg_orignode(msg)); 2481 2482 tsk_set_importance(new_tsock, msg_importance(msg)); 2483 if (msg_named(msg)) { 2484 new_tsock->conn_type = msg_nametype(msg); 2485 new_tsock->conn_instance = msg_nameinst(msg); 2486 } 2487 2488 /* 2489 * Respond to 'SYN-' by discarding it & returning 'ACK'-. 2490 * Respond to 'SYN+' by queuing it on new socket. 2491 */ 2492 if (!msg_data_sz(msg)) { 2493 struct msghdr m = {NULL,}; 2494 2495 tsk_advance_rx_queue(sk); 2496 __tipc_sendstream(new_sock, &m, 0); 2497 } else { 2498 __skb_dequeue(&sk->sk_receive_queue); 2499 __skb_queue_head(&new_sk->sk_receive_queue, buf); 2500 skb_set_owner_r(buf, new_sk); 2501 } 2502 release_sock(new_sk); 2503 exit: 2504 release_sock(sk); 2505 return res; 2506 } 2507 2508 /** 2509 * tipc_shutdown - shutdown socket connection 2510 * @sock: socket structure 2511 * @how: direction to close (must be SHUT_RDWR) 2512 * 2513 * Terminates connection (if necessary), then purges socket's receive queue. 2514 * 2515 * Returns 0 on success, errno otherwise 2516 */ 2517 static int tipc_shutdown(struct socket *sock, int how) 2518 { 2519 struct sock *sk = sock->sk; 2520 int res; 2521 2522 if (how != SHUT_RDWR) 2523 return -EINVAL; 2524 2525 lock_sock(sk); 2526 2527 __tipc_shutdown(sock, TIPC_CONN_SHUTDOWN); 2528 sk->sk_shutdown = SEND_SHUTDOWN; 2529 2530 if (sk->sk_state == TIPC_DISCONNECTING) { 2531 /* Discard any unreceived messages */ 2532 __skb_queue_purge(&sk->sk_receive_queue); 2533 2534 /* Wake up anyone sleeping in poll */ 2535 sk->sk_state_change(sk); 2536 res = 0; 2537 } else { 2538 res = -ENOTCONN; 2539 } 2540 2541 release_sock(sk); 2542 return res; 2543 } 2544 2545 static void tipc_sk_timeout(struct timer_list *t) 2546 { 2547 struct sock *sk = from_timer(sk, t, sk_timer); 2548 struct tipc_sock *tsk = tipc_sk(sk); 2549 u32 peer_port = tsk_peer_port(tsk); 2550 u32 peer_node = tsk_peer_node(tsk); 2551 u32 own_node = tsk_own_node(tsk); 2552 u32 own_port = tsk->portid; 2553 struct net *net = sock_net(sk); 2554 struct sk_buff *skb = NULL; 2555 2556 bh_lock_sock(sk); 2557 if (!tipc_sk_connected(sk)) 2558 goto exit; 2559 2560 /* Try again later if socket is busy */ 2561 if (sock_owned_by_user(sk)) { 2562 sk_reset_timer(sk, &sk->sk_timer, jiffies + HZ / 20); 2563 goto exit; 2564 } 2565 2566 if (tsk->probe_unacked) { 2567 tipc_set_sk_state(sk, TIPC_DISCONNECTING); 2568 tipc_node_remove_conn(net, peer_node, peer_port); 2569 sk->sk_state_change(sk); 2570 goto exit; 2571 } 2572 /* Send new probe */ 2573 skb = tipc_msg_create(CONN_MANAGER, CONN_PROBE, INT_H_SIZE, 0, 2574 peer_node, own_node, peer_port, own_port, 2575 TIPC_OK); 2576 tsk->probe_unacked = true; 2577 sk_reset_timer(sk, &sk->sk_timer, jiffies + CONN_PROBING_INTV); 2578 exit: 2579 bh_unlock_sock(sk); 2580 if (skb) 2581 tipc_node_xmit_skb(net, skb, peer_node, own_port); 2582 sock_put(sk); 2583 } 2584 2585 static int tipc_sk_publish(struct tipc_sock *tsk, uint scope, 2586 struct tipc_name_seq const *seq) 2587 { 2588 struct sock *sk = &tsk->sk; 2589 struct net *net = sock_net(sk); 2590 struct publication *publ; 2591 u32 key; 2592 2593 if (scope != TIPC_NODE_SCOPE) 2594 scope = TIPC_CLUSTER_SCOPE; 2595 2596 if (tipc_sk_connected(sk)) 2597 return -EINVAL; 2598 key = tsk->portid + tsk->pub_count + 1; 2599 if (key == tsk->portid) 2600 return -EADDRINUSE; 2601 2602 publ = tipc_nametbl_publish(net, seq->type, seq->lower, seq->upper, 2603 scope, tsk->portid, key); 2604 if (unlikely(!publ)) 2605 return -EINVAL; 2606 2607 list_add(&publ->binding_sock, &tsk->publications); 2608 tsk->pub_count++; 2609 tsk->published = 1; 2610 return 0; 2611 } 2612 2613 static int tipc_sk_withdraw(struct tipc_sock *tsk, uint scope, 2614 struct tipc_name_seq const *seq) 2615 { 2616 struct net *net = sock_net(&tsk->sk); 2617 struct publication *publ; 2618 struct publication *safe; 2619 int rc = -EINVAL; 2620 2621 if (scope != TIPC_NODE_SCOPE) 2622 scope = TIPC_CLUSTER_SCOPE; 2623 2624 list_for_each_entry_safe(publ, safe, &tsk->publications, binding_sock) { 2625 if (seq) { 2626 if (publ->scope != scope) 2627 continue; 2628 if (publ->type != seq->type) 2629 continue; 2630 if (publ->lower != seq->lower) 2631 continue; 2632 if (publ->upper != seq->upper) 2633 break; 2634 tipc_nametbl_withdraw(net, publ->type, publ->lower, 2635 publ->upper, publ->key); 2636 rc = 0; 2637 break; 2638 } 2639 tipc_nametbl_withdraw(net, publ->type, publ->lower, 2640 publ->upper, publ->key); 2641 rc = 0; 2642 } 2643 if (list_empty(&tsk->publications)) 2644 tsk->published = 0; 2645 return rc; 2646 } 2647 2648 /* tipc_sk_reinit: set non-zero address in all existing sockets 2649 * when we go from standalone to network mode. 2650 */ 2651 void tipc_sk_reinit(struct net *net) 2652 { 2653 struct tipc_net *tn = net_generic(net, tipc_net_id); 2654 struct rhashtable_iter iter; 2655 struct tipc_sock *tsk; 2656 struct tipc_msg *msg; 2657 2658 rhashtable_walk_enter(&tn->sk_rht, &iter); 2659 2660 do { 2661 rhashtable_walk_start(&iter); 2662 2663 while ((tsk = rhashtable_walk_next(&iter)) && !IS_ERR(tsk)) { 2664 spin_lock_bh(&tsk->sk.sk_lock.slock); 2665 msg = &tsk->phdr; 2666 msg_set_prevnode(msg, tipc_own_addr(net)); 2667 msg_set_orignode(msg, tipc_own_addr(net)); 2668 spin_unlock_bh(&tsk->sk.sk_lock.slock); 2669 } 2670 2671 rhashtable_walk_stop(&iter); 2672 } while (tsk == ERR_PTR(-EAGAIN)); 2673 } 2674 2675 static struct tipc_sock *tipc_sk_lookup(struct net *net, u32 portid) 2676 { 2677 struct tipc_net *tn = net_generic(net, tipc_net_id); 2678 struct tipc_sock *tsk; 2679 2680 rcu_read_lock(); 2681 tsk = rhashtable_lookup_fast(&tn->sk_rht, &portid, tsk_rht_params); 2682 if (tsk) 2683 sock_hold(&tsk->sk); 2684 rcu_read_unlock(); 2685 2686 return tsk; 2687 } 2688 2689 static int tipc_sk_insert(struct tipc_sock *tsk) 2690 { 2691 struct sock *sk = &tsk->sk; 2692 struct net *net = sock_net(sk); 2693 struct tipc_net *tn = net_generic(net, tipc_net_id); 2694 u32 remaining = (TIPC_MAX_PORT - TIPC_MIN_PORT) + 1; 2695 u32 portid = prandom_u32() % remaining + TIPC_MIN_PORT; 2696 2697 while (remaining--) { 2698 portid++; 2699 if ((portid < TIPC_MIN_PORT) || (portid > TIPC_MAX_PORT)) 2700 portid = TIPC_MIN_PORT; 2701 tsk->portid = portid; 2702 sock_hold(&tsk->sk); 2703 if (!rhashtable_lookup_insert_fast(&tn->sk_rht, &tsk->node, 2704 tsk_rht_params)) 2705 return 0; 2706 sock_put(&tsk->sk); 2707 } 2708 2709 return -1; 2710 } 2711 2712 static void tipc_sk_remove(struct tipc_sock *tsk) 2713 { 2714 struct sock *sk = &tsk->sk; 2715 struct tipc_net *tn = net_generic(sock_net(sk), tipc_net_id); 2716 2717 if (!rhashtable_remove_fast(&tn->sk_rht, &tsk->node, tsk_rht_params)) { 2718 WARN_ON(refcount_read(&sk->sk_refcnt) == 1); 2719 __sock_put(sk); 2720 } 2721 } 2722 2723 static const struct rhashtable_params tsk_rht_params = { 2724 .nelem_hint = 192, 2725 .head_offset = offsetof(struct tipc_sock, node), 2726 .key_offset = offsetof(struct tipc_sock, portid), 2727 .key_len = sizeof(u32), /* portid */ 2728 .max_size = 1048576, 2729 .min_size = 256, 2730 .automatic_shrinking = true, 2731 }; 2732 2733 int tipc_sk_rht_init(struct net *net) 2734 { 2735 struct tipc_net *tn = net_generic(net, tipc_net_id); 2736 2737 return rhashtable_init(&tn->sk_rht, &tsk_rht_params); 2738 } 2739 2740 void tipc_sk_rht_destroy(struct net *net) 2741 { 2742 struct tipc_net *tn = net_generic(net, tipc_net_id); 2743 2744 /* Wait for socket readers to complete */ 2745 synchronize_net(); 2746 2747 rhashtable_destroy(&tn->sk_rht); 2748 } 2749 2750 static int tipc_sk_join(struct tipc_sock *tsk, struct tipc_group_req *mreq) 2751 { 2752 struct net *net = sock_net(&tsk->sk); 2753 struct tipc_group *grp = tsk->group; 2754 struct tipc_msg *hdr = &tsk->phdr; 2755 struct tipc_name_seq seq; 2756 int rc; 2757 2758 if (mreq->type < TIPC_RESERVED_TYPES) 2759 return -EACCES; 2760 if (mreq->scope > TIPC_NODE_SCOPE) 2761 return -EINVAL; 2762 if (grp) 2763 return -EACCES; 2764 grp = tipc_group_create(net, tsk->portid, mreq, &tsk->group_is_open); 2765 if (!grp) 2766 return -ENOMEM; 2767 tsk->group = grp; 2768 msg_set_lookup_scope(hdr, mreq->scope); 2769 msg_set_nametype(hdr, mreq->type); 2770 msg_set_dest_droppable(hdr, true); 2771 seq.type = mreq->type; 2772 seq.lower = mreq->instance; 2773 seq.upper = seq.lower; 2774 tipc_nametbl_build_group(net, grp, mreq->type, mreq->scope); 2775 rc = tipc_sk_publish(tsk, mreq->scope, &seq); 2776 if (rc) { 2777 tipc_group_delete(net, grp); 2778 tsk->group = NULL; 2779 return rc; 2780 } 2781 /* Eliminate any risk that a broadcast overtakes sent JOINs */ 2782 tsk->mc_method.rcast = true; 2783 tsk->mc_method.mandatory = true; 2784 tipc_group_join(net, grp, &tsk->sk.sk_rcvbuf); 2785 return rc; 2786 } 2787 2788 static int tipc_sk_leave(struct tipc_sock *tsk) 2789 { 2790 struct net *net = sock_net(&tsk->sk); 2791 struct tipc_group *grp = tsk->group; 2792 struct tipc_name_seq seq; 2793 int scope; 2794 2795 if (!grp) 2796 return -EINVAL; 2797 tipc_group_self(grp, &seq, &scope); 2798 tipc_group_delete(net, grp); 2799 tsk->group = NULL; 2800 tipc_sk_withdraw(tsk, scope, &seq); 2801 return 0; 2802 } 2803 2804 /** 2805 * tipc_setsockopt - set socket option 2806 * @sock: socket structure 2807 * @lvl: option level 2808 * @opt: option identifier 2809 * @ov: pointer to new option value 2810 * @ol: length of option value 2811 * 2812 * For stream sockets only, accepts and ignores all IPPROTO_TCP options 2813 * (to ease compatibility). 2814 * 2815 * Returns 0 on success, errno otherwise 2816 */ 2817 static int tipc_setsockopt(struct socket *sock, int lvl, int opt, 2818 char __user *ov, unsigned int ol) 2819 { 2820 struct sock *sk = sock->sk; 2821 struct tipc_sock *tsk = tipc_sk(sk); 2822 struct tipc_group_req mreq; 2823 u32 value = 0; 2824 int res = 0; 2825 2826 if ((lvl == IPPROTO_TCP) && (sock->type == SOCK_STREAM)) 2827 return 0; 2828 if (lvl != SOL_TIPC) 2829 return -ENOPROTOOPT; 2830 2831 switch (opt) { 2832 case TIPC_IMPORTANCE: 2833 case TIPC_SRC_DROPPABLE: 2834 case TIPC_DEST_DROPPABLE: 2835 case TIPC_CONN_TIMEOUT: 2836 if (ol < sizeof(value)) 2837 return -EINVAL; 2838 if (get_user(value, (u32 __user *)ov)) 2839 return -EFAULT; 2840 break; 2841 case TIPC_GROUP_JOIN: 2842 if (ol < sizeof(mreq)) 2843 return -EINVAL; 2844 if (copy_from_user(&mreq, ov, sizeof(mreq))) 2845 return -EFAULT; 2846 break; 2847 default: 2848 if (ov || ol) 2849 return -EINVAL; 2850 } 2851 2852 lock_sock(sk); 2853 2854 switch (opt) { 2855 case TIPC_IMPORTANCE: 2856 res = tsk_set_importance(tsk, value); 2857 break; 2858 case TIPC_SRC_DROPPABLE: 2859 if (sock->type != SOCK_STREAM) 2860 tsk_set_unreliable(tsk, value); 2861 else 2862 res = -ENOPROTOOPT; 2863 break; 2864 case TIPC_DEST_DROPPABLE: 2865 tsk_set_unreturnable(tsk, value); 2866 break; 2867 case TIPC_CONN_TIMEOUT: 2868 tipc_sk(sk)->conn_timeout = value; 2869 break; 2870 case TIPC_MCAST_BROADCAST: 2871 tsk->mc_method.rcast = false; 2872 tsk->mc_method.mandatory = true; 2873 break; 2874 case TIPC_MCAST_REPLICAST: 2875 tsk->mc_method.rcast = true; 2876 tsk->mc_method.mandatory = true; 2877 break; 2878 case TIPC_GROUP_JOIN: 2879 res = tipc_sk_join(tsk, &mreq); 2880 break; 2881 case TIPC_GROUP_LEAVE: 2882 res = tipc_sk_leave(tsk); 2883 break; 2884 default: 2885 res = -EINVAL; 2886 } 2887 2888 release_sock(sk); 2889 2890 return res; 2891 } 2892 2893 /** 2894 * tipc_getsockopt - get socket option 2895 * @sock: socket structure 2896 * @lvl: option level 2897 * @opt: option identifier 2898 * @ov: receptacle for option value 2899 * @ol: receptacle for length of option value 2900 * 2901 * For stream sockets only, returns 0 length result for all IPPROTO_TCP options 2902 * (to ease compatibility). 2903 * 2904 * Returns 0 on success, errno otherwise 2905 */ 2906 static int tipc_getsockopt(struct socket *sock, int lvl, int opt, 2907 char __user *ov, int __user *ol) 2908 { 2909 struct sock *sk = sock->sk; 2910 struct tipc_sock *tsk = tipc_sk(sk); 2911 struct tipc_name_seq seq; 2912 int len, scope; 2913 u32 value; 2914 int res; 2915 2916 if ((lvl == IPPROTO_TCP) && (sock->type == SOCK_STREAM)) 2917 return put_user(0, ol); 2918 if (lvl != SOL_TIPC) 2919 return -ENOPROTOOPT; 2920 res = get_user(len, ol); 2921 if (res) 2922 return res; 2923 2924 lock_sock(sk); 2925 2926 switch (opt) { 2927 case TIPC_IMPORTANCE: 2928 value = tsk_importance(tsk); 2929 break; 2930 case TIPC_SRC_DROPPABLE: 2931 value = tsk_unreliable(tsk); 2932 break; 2933 case TIPC_DEST_DROPPABLE: 2934 value = tsk_unreturnable(tsk); 2935 break; 2936 case TIPC_CONN_TIMEOUT: 2937 value = tsk->conn_timeout; 2938 /* no need to set "res", since already 0 at this point */ 2939 break; 2940 case TIPC_NODE_RECVQ_DEPTH: 2941 value = 0; /* was tipc_queue_size, now obsolete */ 2942 break; 2943 case TIPC_SOCK_RECVQ_DEPTH: 2944 value = skb_queue_len(&sk->sk_receive_queue); 2945 break; 2946 case TIPC_GROUP_JOIN: 2947 seq.type = 0; 2948 if (tsk->group) 2949 tipc_group_self(tsk->group, &seq, &scope); 2950 value = seq.type; 2951 break; 2952 default: 2953 res = -EINVAL; 2954 } 2955 2956 release_sock(sk); 2957 2958 if (res) 2959 return res; /* "get" failed */ 2960 2961 if (len < sizeof(value)) 2962 return -EINVAL; 2963 2964 if (copy_to_user(ov, &value, sizeof(value))) 2965 return -EFAULT; 2966 2967 return put_user(sizeof(value), ol); 2968 } 2969 2970 static int tipc_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) 2971 { 2972 struct sock *sk = sock->sk; 2973 struct tipc_sioc_ln_req lnr; 2974 void __user *argp = (void __user *)arg; 2975 2976 switch (cmd) { 2977 case SIOCGETLINKNAME: 2978 if (copy_from_user(&lnr, argp, sizeof(lnr))) 2979 return -EFAULT; 2980 if (!tipc_node_get_linkname(sock_net(sk), 2981 lnr.bearer_id & 0xffff, lnr.peer, 2982 lnr.linkname, TIPC_MAX_LINK_NAME)) { 2983 if (copy_to_user(argp, &lnr, sizeof(lnr))) 2984 return -EFAULT; 2985 return 0; 2986 } 2987 return -EADDRNOTAVAIL; 2988 default: 2989 return -ENOIOCTLCMD; 2990 } 2991 } 2992 2993 static int tipc_socketpair(struct socket *sock1, struct socket *sock2) 2994 { 2995 struct tipc_sock *tsk2 = tipc_sk(sock2->sk); 2996 struct tipc_sock *tsk1 = tipc_sk(sock1->sk); 2997 u32 onode = tipc_own_addr(sock_net(sock1->sk)); 2998 2999 tsk1->peer.family = AF_TIPC; 3000 tsk1->peer.addrtype = TIPC_ADDR_ID; 3001 tsk1->peer.scope = TIPC_NODE_SCOPE; 3002 tsk1->peer.addr.id.ref = tsk2->portid; 3003 tsk1->peer.addr.id.node = onode; 3004 tsk2->peer.family = AF_TIPC; 3005 tsk2->peer.addrtype = TIPC_ADDR_ID; 3006 tsk2->peer.scope = TIPC_NODE_SCOPE; 3007 tsk2->peer.addr.id.ref = tsk1->portid; 3008 tsk2->peer.addr.id.node = onode; 3009 3010 tipc_sk_finish_conn(tsk1, tsk2->portid, onode); 3011 tipc_sk_finish_conn(tsk2, tsk1->portid, onode); 3012 return 0; 3013 } 3014 3015 /* Protocol switches for the various types of TIPC sockets */ 3016 3017 static const struct proto_ops msg_ops = { 3018 .owner = THIS_MODULE, 3019 .family = AF_TIPC, 3020 .release = tipc_release, 3021 .bind = tipc_bind, 3022 .connect = tipc_connect, 3023 .socketpair = tipc_socketpair, 3024 .accept = sock_no_accept, 3025 .getname = tipc_getname, 3026 .poll_mask = tipc_poll_mask, 3027 .ioctl = tipc_ioctl, 3028 .listen = sock_no_listen, 3029 .shutdown = tipc_shutdown, 3030 .setsockopt = tipc_setsockopt, 3031 .getsockopt = tipc_getsockopt, 3032 .sendmsg = tipc_sendmsg, 3033 .recvmsg = tipc_recvmsg, 3034 .mmap = sock_no_mmap, 3035 .sendpage = sock_no_sendpage 3036 }; 3037 3038 static const struct proto_ops packet_ops = { 3039 .owner = THIS_MODULE, 3040 .family = AF_TIPC, 3041 .release = tipc_release, 3042 .bind = tipc_bind, 3043 .connect = tipc_connect, 3044 .socketpair = tipc_socketpair, 3045 .accept = tipc_accept, 3046 .getname = tipc_getname, 3047 .poll_mask = tipc_poll_mask, 3048 .ioctl = tipc_ioctl, 3049 .listen = tipc_listen, 3050 .shutdown = tipc_shutdown, 3051 .setsockopt = tipc_setsockopt, 3052 .getsockopt = tipc_getsockopt, 3053 .sendmsg = tipc_send_packet, 3054 .recvmsg = tipc_recvmsg, 3055 .mmap = sock_no_mmap, 3056 .sendpage = sock_no_sendpage 3057 }; 3058 3059 static const struct proto_ops stream_ops = { 3060 .owner = THIS_MODULE, 3061 .family = AF_TIPC, 3062 .release = tipc_release, 3063 .bind = tipc_bind, 3064 .connect = tipc_connect, 3065 .socketpair = tipc_socketpair, 3066 .accept = tipc_accept, 3067 .getname = tipc_getname, 3068 .poll_mask = tipc_poll_mask, 3069 .ioctl = tipc_ioctl, 3070 .listen = tipc_listen, 3071 .shutdown = tipc_shutdown, 3072 .setsockopt = tipc_setsockopt, 3073 .getsockopt = tipc_getsockopt, 3074 .sendmsg = tipc_sendstream, 3075 .recvmsg = tipc_recvstream, 3076 .mmap = sock_no_mmap, 3077 .sendpage = sock_no_sendpage 3078 }; 3079 3080 static const struct net_proto_family tipc_family_ops = { 3081 .owner = THIS_MODULE, 3082 .family = AF_TIPC, 3083 .create = tipc_sk_create 3084 }; 3085 3086 static struct proto tipc_proto = { 3087 .name = "TIPC", 3088 .owner = THIS_MODULE, 3089 .obj_size = sizeof(struct tipc_sock), 3090 .sysctl_rmem = sysctl_tipc_rmem 3091 }; 3092 3093 /** 3094 * tipc_socket_init - initialize TIPC socket interface 3095 * 3096 * Returns 0 on success, errno otherwise 3097 */ 3098 int tipc_socket_init(void) 3099 { 3100 int res; 3101 3102 res = proto_register(&tipc_proto, 1); 3103 if (res) { 3104 pr_err("Failed to register TIPC protocol type\n"); 3105 goto out; 3106 } 3107 3108 res = sock_register(&tipc_family_ops); 3109 if (res) { 3110 pr_err("Failed to register TIPC socket type\n"); 3111 proto_unregister(&tipc_proto); 3112 goto out; 3113 } 3114 out: 3115 return res; 3116 } 3117 3118 /** 3119 * tipc_socket_stop - stop TIPC socket interface 3120 */ 3121 void tipc_socket_stop(void) 3122 { 3123 sock_unregister(tipc_family_ops.family); 3124 proto_unregister(&tipc_proto); 3125 } 3126 3127 /* Caller should hold socket lock for the passed tipc socket. */ 3128 static int __tipc_nl_add_sk_con(struct sk_buff *skb, struct tipc_sock *tsk) 3129 { 3130 u32 peer_node; 3131 u32 peer_port; 3132 struct nlattr *nest; 3133 3134 peer_node = tsk_peer_node(tsk); 3135 peer_port = tsk_peer_port(tsk); 3136 3137 nest = nla_nest_start(skb, TIPC_NLA_SOCK_CON); 3138 3139 if (nla_put_u32(skb, TIPC_NLA_CON_NODE, peer_node)) 3140 goto msg_full; 3141 if (nla_put_u32(skb, TIPC_NLA_CON_SOCK, peer_port)) 3142 goto msg_full; 3143 3144 if (tsk->conn_type != 0) { 3145 if (nla_put_flag(skb, TIPC_NLA_CON_FLAG)) 3146 goto msg_full; 3147 if (nla_put_u32(skb, TIPC_NLA_CON_TYPE, tsk->conn_type)) 3148 goto msg_full; 3149 if (nla_put_u32(skb, TIPC_NLA_CON_INST, tsk->conn_instance)) 3150 goto msg_full; 3151 } 3152 nla_nest_end(skb, nest); 3153 3154 return 0; 3155 3156 msg_full: 3157 nla_nest_cancel(skb, nest); 3158 3159 return -EMSGSIZE; 3160 } 3161 3162 static int __tipc_nl_add_sk_info(struct sk_buff *skb, struct tipc_sock 3163 *tsk) 3164 { 3165 struct net *net = sock_net(skb->sk); 3166 struct sock *sk = &tsk->sk; 3167 3168 if (nla_put_u32(skb, TIPC_NLA_SOCK_REF, tsk->portid) || 3169 nla_put_u32(skb, TIPC_NLA_SOCK_ADDR, tipc_own_addr(net))) 3170 return -EMSGSIZE; 3171 3172 if (tipc_sk_connected(sk)) { 3173 if (__tipc_nl_add_sk_con(skb, tsk)) 3174 return -EMSGSIZE; 3175 } else if (!list_empty(&tsk->publications)) { 3176 if (nla_put_flag(skb, TIPC_NLA_SOCK_HAS_PUBL)) 3177 return -EMSGSIZE; 3178 } 3179 return 0; 3180 } 3181 3182 /* Caller should hold socket lock for the passed tipc socket. */ 3183 static int __tipc_nl_add_sk(struct sk_buff *skb, struct netlink_callback *cb, 3184 struct tipc_sock *tsk) 3185 { 3186 struct nlattr *attrs; 3187 void *hdr; 3188 3189 hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, 3190 &tipc_genl_family, NLM_F_MULTI, TIPC_NL_SOCK_GET); 3191 if (!hdr) 3192 goto msg_cancel; 3193 3194 attrs = nla_nest_start(skb, TIPC_NLA_SOCK); 3195 if (!attrs) 3196 goto genlmsg_cancel; 3197 3198 if (__tipc_nl_add_sk_info(skb, tsk)) 3199 goto attr_msg_cancel; 3200 3201 nla_nest_end(skb, attrs); 3202 genlmsg_end(skb, hdr); 3203 3204 return 0; 3205 3206 attr_msg_cancel: 3207 nla_nest_cancel(skb, attrs); 3208 genlmsg_cancel: 3209 genlmsg_cancel(skb, hdr); 3210 msg_cancel: 3211 return -EMSGSIZE; 3212 } 3213 3214 int tipc_nl_sk_walk(struct sk_buff *skb, struct netlink_callback *cb, 3215 int (*skb_handler)(struct sk_buff *skb, 3216 struct netlink_callback *cb, 3217 struct tipc_sock *tsk)) 3218 { 3219 struct net *net = sock_net(skb->sk); 3220 struct tipc_net *tn = tipc_net(net); 3221 const struct bucket_table *tbl; 3222 u32 prev_portid = cb->args[1]; 3223 u32 tbl_id = cb->args[0]; 3224 struct rhash_head *pos; 3225 struct tipc_sock *tsk; 3226 int err; 3227 3228 rcu_read_lock(); 3229 tbl = rht_dereference_rcu((&tn->sk_rht)->tbl, &tn->sk_rht); 3230 for (; tbl_id < tbl->size; tbl_id++) { 3231 rht_for_each_entry_rcu(tsk, pos, tbl, tbl_id, node) { 3232 spin_lock_bh(&tsk->sk.sk_lock.slock); 3233 if (prev_portid && prev_portid != tsk->portid) { 3234 spin_unlock_bh(&tsk->sk.sk_lock.slock); 3235 continue; 3236 } 3237 3238 err = skb_handler(skb, cb, tsk); 3239 if (err) { 3240 prev_portid = tsk->portid; 3241 spin_unlock_bh(&tsk->sk.sk_lock.slock); 3242 goto out; 3243 } 3244 3245 prev_portid = 0; 3246 spin_unlock_bh(&tsk->sk.sk_lock.slock); 3247 } 3248 } 3249 out: 3250 rcu_read_unlock(); 3251 cb->args[0] = tbl_id; 3252 cb->args[1] = prev_portid; 3253 3254 return skb->len; 3255 } 3256 EXPORT_SYMBOL(tipc_nl_sk_walk); 3257 3258 int tipc_sk_fill_sock_diag(struct sk_buff *skb, struct netlink_callback *cb, 3259 struct tipc_sock *tsk, u32 sk_filter_state, 3260 u64 (*tipc_diag_gen_cookie)(struct sock *sk)) 3261 { 3262 struct sock *sk = &tsk->sk; 3263 struct nlattr *attrs; 3264 struct nlattr *stat; 3265 3266 /*filter response w.r.t sk_state*/ 3267 if (!(sk_filter_state & (1 << sk->sk_state))) 3268 return 0; 3269 3270 attrs = nla_nest_start(skb, TIPC_NLA_SOCK); 3271 if (!attrs) 3272 goto msg_cancel; 3273 3274 if (__tipc_nl_add_sk_info(skb, tsk)) 3275 goto attr_msg_cancel; 3276 3277 if (nla_put_u32(skb, TIPC_NLA_SOCK_TYPE, (u32)sk->sk_type) || 3278 nla_put_u32(skb, TIPC_NLA_SOCK_TIPC_STATE, (u32)sk->sk_state) || 3279 nla_put_u32(skb, TIPC_NLA_SOCK_INO, sock_i_ino(sk)) || 3280 nla_put_u32(skb, TIPC_NLA_SOCK_UID, 3281 from_kuid_munged(sk_user_ns(NETLINK_CB(cb->skb).sk), 3282 sock_i_uid(sk))) || 3283 nla_put_u64_64bit(skb, TIPC_NLA_SOCK_COOKIE, 3284 tipc_diag_gen_cookie(sk), 3285 TIPC_NLA_SOCK_PAD)) 3286 goto attr_msg_cancel; 3287 3288 stat = nla_nest_start(skb, TIPC_NLA_SOCK_STAT); 3289 if (!stat) 3290 goto attr_msg_cancel; 3291 3292 if (nla_put_u32(skb, TIPC_NLA_SOCK_STAT_RCVQ, 3293 skb_queue_len(&sk->sk_receive_queue)) || 3294 nla_put_u32(skb, TIPC_NLA_SOCK_STAT_SENDQ, 3295 skb_queue_len(&sk->sk_write_queue)) || 3296 nla_put_u32(skb, TIPC_NLA_SOCK_STAT_DROP, 3297 atomic_read(&sk->sk_drops))) 3298 goto stat_msg_cancel; 3299 3300 if (tsk->cong_link_cnt && 3301 nla_put_flag(skb, TIPC_NLA_SOCK_STAT_LINK_CONG)) 3302 goto stat_msg_cancel; 3303 3304 if (tsk_conn_cong(tsk) && 3305 nla_put_flag(skb, TIPC_NLA_SOCK_STAT_CONN_CONG)) 3306 goto stat_msg_cancel; 3307 3308 nla_nest_end(skb, stat); 3309 nla_nest_end(skb, attrs); 3310 3311 return 0; 3312 3313 stat_msg_cancel: 3314 nla_nest_cancel(skb, stat); 3315 attr_msg_cancel: 3316 nla_nest_cancel(skb, attrs); 3317 msg_cancel: 3318 return -EMSGSIZE; 3319 } 3320 EXPORT_SYMBOL(tipc_sk_fill_sock_diag); 3321 3322 int tipc_nl_sk_dump(struct sk_buff *skb, struct netlink_callback *cb) 3323 { 3324 return tipc_nl_sk_walk(skb, cb, __tipc_nl_add_sk); 3325 } 3326 3327 /* Caller should hold socket lock for the passed tipc socket. */ 3328 static int __tipc_nl_add_sk_publ(struct sk_buff *skb, 3329 struct netlink_callback *cb, 3330 struct publication *publ) 3331 { 3332 void *hdr; 3333 struct nlattr *attrs; 3334 3335 hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, 3336 &tipc_genl_family, NLM_F_MULTI, TIPC_NL_PUBL_GET); 3337 if (!hdr) 3338 goto msg_cancel; 3339 3340 attrs = nla_nest_start(skb, TIPC_NLA_PUBL); 3341 if (!attrs) 3342 goto genlmsg_cancel; 3343 3344 if (nla_put_u32(skb, TIPC_NLA_PUBL_KEY, publ->key)) 3345 goto attr_msg_cancel; 3346 if (nla_put_u32(skb, TIPC_NLA_PUBL_TYPE, publ->type)) 3347 goto attr_msg_cancel; 3348 if (nla_put_u32(skb, TIPC_NLA_PUBL_LOWER, publ->lower)) 3349 goto attr_msg_cancel; 3350 if (nla_put_u32(skb, TIPC_NLA_PUBL_UPPER, publ->upper)) 3351 goto attr_msg_cancel; 3352 3353 nla_nest_end(skb, attrs); 3354 genlmsg_end(skb, hdr); 3355 3356 return 0; 3357 3358 attr_msg_cancel: 3359 nla_nest_cancel(skb, attrs); 3360 genlmsg_cancel: 3361 genlmsg_cancel(skb, hdr); 3362 msg_cancel: 3363 return -EMSGSIZE; 3364 } 3365 3366 /* Caller should hold socket lock for the passed tipc socket. */ 3367 static int __tipc_nl_list_sk_publ(struct sk_buff *skb, 3368 struct netlink_callback *cb, 3369 struct tipc_sock *tsk, u32 *last_publ) 3370 { 3371 int err; 3372 struct publication *p; 3373 3374 if (*last_publ) { 3375 list_for_each_entry(p, &tsk->publications, binding_sock) { 3376 if (p->key == *last_publ) 3377 break; 3378 } 3379 if (p->key != *last_publ) { 3380 /* We never set seq or call nl_dump_check_consistent() 3381 * this means that setting prev_seq here will cause the 3382 * consistence check to fail in the netlink callback 3383 * handler. Resulting in the last NLMSG_DONE message 3384 * having the NLM_F_DUMP_INTR flag set. 3385 */ 3386 cb->prev_seq = 1; 3387 *last_publ = 0; 3388 return -EPIPE; 3389 } 3390 } else { 3391 p = list_first_entry(&tsk->publications, struct publication, 3392 binding_sock); 3393 } 3394 3395 list_for_each_entry_from(p, &tsk->publications, binding_sock) { 3396 err = __tipc_nl_add_sk_publ(skb, cb, p); 3397 if (err) { 3398 *last_publ = p->key; 3399 return err; 3400 } 3401 } 3402 *last_publ = 0; 3403 3404 return 0; 3405 } 3406 3407 int tipc_nl_publ_dump(struct sk_buff *skb, struct netlink_callback *cb) 3408 { 3409 int err; 3410 u32 tsk_portid = cb->args[0]; 3411 u32 last_publ = cb->args[1]; 3412 u32 done = cb->args[2]; 3413 struct net *net = sock_net(skb->sk); 3414 struct tipc_sock *tsk; 3415 3416 if (!tsk_portid) { 3417 struct nlattr **attrs; 3418 struct nlattr *sock[TIPC_NLA_SOCK_MAX + 1]; 3419 3420 err = tipc_nlmsg_parse(cb->nlh, &attrs); 3421 if (err) 3422 return err; 3423 3424 if (!attrs[TIPC_NLA_SOCK]) 3425 return -EINVAL; 3426 3427 err = nla_parse_nested(sock, TIPC_NLA_SOCK_MAX, 3428 attrs[TIPC_NLA_SOCK], 3429 tipc_nl_sock_policy, NULL); 3430 if (err) 3431 return err; 3432 3433 if (!sock[TIPC_NLA_SOCK_REF]) 3434 return -EINVAL; 3435 3436 tsk_portid = nla_get_u32(sock[TIPC_NLA_SOCK_REF]); 3437 } 3438 3439 if (done) 3440 return 0; 3441 3442 tsk = tipc_sk_lookup(net, tsk_portid); 3443 if (!tsk) 3444 return -EINVAL; 3445 3446 lock_sock(&tsk->sk); 3447 err = __tipc_nl_list_sk_publ(skb, cb, tsk, &last_publ); 3448 if (!err) 3449 done = 1; 3450 release_sock(&tsk->sk); 3451 sock_put(&tsk->sk); 3452 3453 cb->args[0] = tsk_portid; 3454 cb->args[1] = last_publ; 3455 cb->args[2] = done; 3456 3457 return skb->len; 3458 } 3459