1 /* 2 * net/tipc/socket.c: TIPC socket API 3 * 4 * Copyright (c) 2001-2007, 2012-2015, Ericsson AB 5 * Copyright (c) 2004-2008, 2010-2013, Wind River Systems 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the names of the copyright holders nor the names of its 17 * contributors may be used to endorse or promote products derived from 18 * this software without specific prior written permission. 19 * 20 * Alternatively, this software may be distributed under the terms of the 21 * GNU General Public License ("GPL") version 2 as published by the Free 22 * Software Foundation. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 34 * POSSIBILITY OF SUCH DAMAGE. 35 */ 36 37 #include <linux/rhashtable.h> 38 #include "core.h" 39 #include "name_table.h" 40 #include "node.h" 41 #include "link.h" 42 #include "name_distr.h" 43 #include "socket.h" 44 #include "bcast.h" 45 46 #define SS_LISTENING -1 /* socket is listening */ 47 #define SS_READY -2 /* socket is connectionless */ 48 49 #define CONN_TIMEOUT_DEFAULT 8000 /* default connect timeout = 8s */ 50 #define CONN_PROBING_INTERVAL msecs_to_jiffies(3600000) /* [ms] => 1 h */ 51 #define TIPC_FWD_MSG 1 52 #define TIPC_CONN_OK 0 53 #define TIPC_CONN_PROBING 1 54 #define TIPC_MAX_PORT 0xffffffff 55 #define TIPC_MIN_PORT 1 56 57 /** 58 * struct tipc_sock - TIPC socket structure 59 * @sk: socket - interacts with 'port' and with user via the socket API 60 * @connected: non-zero if port is currently connected to a peer port 61 * @conn_type: TIPC type used when connection was established 62 * @conn_instance: TIPC instance used when connection was established 63 * @published: non-zero if port has one or more associated names 64 * @max_pkt: maximum packet size "hint" used when building messages sent by port 65 * @portid: unique port identity in TIPC socket hash table 66 * @phdr: preformatted message header used when sending messages 67 * @port_list: adjacent ports in TIPC's global list of ports 68 * @publications: list of publications for port 69 * @pub_count: total # of publications port has made during its lifetime 70 * @probing_state: 71 * @probing_intv: 72 * @conn_timeout: the time we can wait for an unresponded setup request 73 * @dupl_rcvcnt: number of bytes counted twice, in both backlog and rcv queue 74 * @link_cong: non-zero if owner must sleep because of link congestion 75 * @sent_unacked: # messages sent by socket, and not yet acked by peer 76 * @rcv_unacked: # messages read by user, but not yet acked back to peer 77 * @remote: 'connected' peer for dgram/rdm 78 * @node: hash table node 79 * @rcu: rcu struct for tipc_sock 80 */ 81 struct tipc_sock { 82 struct sock sk; 83 int connected; 84 u32 conn_type; 85 u32 conn_instance; 86 int published; 87 u32 max_pkt; 88 u32 portid; 89 struct tipc_msg phdr; 90 struct list_head sock_list; 91 struct list_head publications; 92 u32 pub_count; 93 u32 probing_state; 94 unsigned long probing_intv; 95 uint conn_timeout; 96 atomic_t dupl_rcvcnt; 97 bool link_cong; 98 uint sent_unacked; 99 uint rcv_unacked; 100 struct sockaddr_tipc remote; 101 struct rhash_head node; 102 struct rcu_head rcu; 103 }; 104 105 static int tipc_backlog_rcv(struct sock *sk, struct sk_buff *skb); 106 static void tipc_data_ready(struct sock *sk); 107 static void tipc_write_space(struct sock *sk); 108 static int tipc_release(struct socket *sock); 109 static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags); 110 static int tipc_wait_for_sndmsg(struct socket *sock, long *timeo_p); 111 static void tipc_sk_timeout(unsigned long data); 112 static int tipc_sk_publish(struct tipc_sock *tsk, uint scope, 113 struct tipc_name_seq const *seq); 114 static int tipc_sk_withdraw(struct tipc_sock *tsk, uint scope, 115 struct tipc_name_seq const *seq); 116 static struct tipc_sock *tipc_sk_lookup(struct net *net, u32 portid); 117 static int tipc_sk_insert(struct tipc_sock *tsk); 118 static void tipc_sk_remove(struct tipc_sock *tsk); 119 static int __tipc_send_stream(struct socket *sock, struct msghdr *m, 120 size_t dsz); 121 static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dsz); 122 123 static const struct proto_ops packet_ops; 124 static const struct proto_ops stream_ops; 125 static const struct proto_ops msg_ops; 126 static struct proto tipc_proto; 127 128 static const struct nla_policy tipc_nl_sock_policy[TIPC_NLA_SOCK_MAX + 1] = { 129 [TIPC_NLA_SOCK_UNSPEC] = { .type = NLA_UNSPEC }, 130 [TIPC_NLA_SOCK_ADDR] = { .type = NLA_U32 }, 131 [TIPC_NLA_SOCK_REF] = { .type = NLA_U32 }, 132 [TIPC_NLA_SOCK_CON] = { .type = NLA_NESTED }, 133 [TIPC_NLA_SOCK_HAS_PUBL] = { .type = NLA_FLAG } 134 }; 135 136 static const struct rhashtable_params tsk_rht_params; 137 138 /* 139 * Revised TIPC socket locking policy: 140 * 141 * Most socket operations take the standard socket lock when they start 142 * and hold it until they finish (or until they need to sleep). Acquiring 143 * this lock grants the owner exclusive access to the fields of the socket 144 * data structures, with the exception of the backlog queue. A few socket 145 * operations can be done without taking the socket lock because they only 146 * read socket information that never changes during the life of the socket. 147 * 148 * Socket operations may acquire the lock for the associated TIPC port if they 149 * need to perform an operation on the port. If any routine needs to acquire 150 * both the socket lock and the port lock it must take the socket lock first 151 * to avoid the risk of deadlock. 152 * 153 * The dispatcher handling incoming messages cannot grab the socket lock in 154 * the standard fashion, since invoked it runs at the BH level and cannot block. 155 * Instead, it checks to see if the socket lock is currently owned by someone, 156 * and either handles the message itself or adds it to the socket's backlog 157 * queue; in the latter case the queued message is processed once the process 158 * owning the socket lock releases it. 159 * 160 * NOTE: Releasing the socket lock while an operation is sleeping overcomes 161 * the problem of a blocked socket operation preventing any other operations 162 * from occurring. However, applications must be careful if they have 163 * multiple threads trying to send (or receive) on the same socket, as these 164 * operations might interfere with each other. For example, doing a connect 165 * and a receive at the same time might allow the receive to consume the 166 * ACK message meant for the connect. While additional work could be done 167 * to try and overcome this, it doesn't seem to be worthwhile at the present. 168 * 169 * NOTE: Releasing the socket lock while an operation is sleeping also ensures 170 * that another operation that must be performed in a non-blocking manner is 171 * not delayed for very long because the lock has already been taken. 172 * 173 * NOTE: This code assumes that certain fields of a port/socket pair are 174 * constant over its lifetime; such fields can be examined without taking 175 * the socket lock and/or port lock, and do not need to be re-read even 176 * after resuming processing after waiting. These fields include: 177 * - socket type 178 * - pointer to socket sk structure (aka tipc_sock structure) 179 * - pointer to port structure 180 * - port reference 181 */ 182 183 static u32 tsk_own_node(struct tipc_sock *tsk) 184 { 185 return msg_prevnode(&tsk->phdr); 186 } 187 188 static u32 tsk_peer_node(struct tipc_sock *tsk) 189 { 190 return msg_destnode(&tsk->phdr); 191 } 192 193 static u32 tsk_peer_port(struct tipc_sock *tsk) 194 { 195 return msg_destport(&tsk->phdr); 196 } 197 198 static bool tsk_unreliable(struct tipc_sock *tsk) 199 { 200 return msg_src_droppable(&tsk->phdr) != 0; 201 } 202 203 static void tsk_set_unreliable(struct tipc_sock *tsk, bool unreliable) 204 { 205 msg_set_src_droppable(&tsk->phdr, unreliable ? 1 : 0); 206 } 207 208 static bool tsk_unreturnable(struct tipc_sock *tsk) 209 { 210 return msg_dest_droppable(&tsk->phdr) != 0; 211 } 212 213 static void tsk_set_unreturnable(struct tipc_sock *tsk, bool unreturnable) 214 { 215 msg_set_dest_droppable(&tsk->phdr, unreturnable ? 1 : 0); 216 } 217 218 static int tsk_importance(struct tipc_sock *tsk) 219 { 220 return msg_importance(&tsk->phdr); 221 } 222 223 static int tsk_set_importance(struct tipc_sock *tsk, int imp) 224 { 225 if (imp > TIPC_CRITICAL_IMPORTANCE) 226 return -EINVAL; 227 msg_set_importance(&tsk->phdr, (u32)imp); 228 return 0; 229 } 230 231 static struct tipc_sock *tipc_sk(const struct sock *sk) 232 { 233 return container_of(sk, struct tipc_sock, sk); 234 } 235 236 static int tsk_conn_cong(struct tipc_sock *tsk) 237 { 238 return tsk->sent_unacked >= TIPC_FLOWCTRL_WIN; 239 } 240 241 /** 242 * tsk_advance_rx_queue - discard first buffer in socket receive queue 243 * 244 * Caller must hold socket lock 245 */ 246 static void tsk_advance_rx_queue(struct sock *sk) 247 { 248 kfree_skb(__skb_dequeue(&sk->sk_receive_queue)); 249 } 250 251 /* tipc_sk_respond() : send response message back to sender 252 */ 253 static void tipc_sk_respond(struct sock *sk, struct sk_buff *skb, int err) 254 { 255 u32 selector; 256 u32 dnode; 257 u32 onode = tipc_own_addr(sock_net(sk)); 258 259 if (!tipc_msg_reverse(onode, &skb, err)) 260 return; 261 262 dnode = msg_destnode(buf_msg(skb)); 263 selector = msg_origport(buf_msg(skb)); 264 tipc_node_xmit_skb(sock_net(sk), skb, dnode, selector); 265 } 266 267 /** 268 * tsk_rej_rx_queue - reject all buffers in socket receive queue 269 * 270 * Caller must hold socket lock 271 */ 272 static void tsk_rej_rx_queue(struct sock *sk) 273 { 274 struct sk_buff *skb; 275 276 while ((skb = __skb_dequeue(&sk->sk_receive_queue))) 277 tipc_sk_respond(sk, skb, TIPC_ERR_NO_PORT); 278 } 279 280 /* tsk_peer_msg - verify if message was sent by connected port's peer 281 * 282 * Handles cases where the node's network address has changed from 283 * the default of <0.0.0> to its configured setting. 284 */ 285 static bool tsk_peer_msg(struct tipc_sock *tsk, struct tipc_msg *msg) 286 { 287 struct tipc_net *tn = net_generic(sock_net(&tsk->sk), tipc_net_id); 288 u32 peer_port = tsk_peer_port(tsk); 289 u32 orig_node; 290 u32 peer_node; 291 292 if (unlikely(!tsk->connected)) 293 return false; 294 295 if (unlikely(msg_origport(msg) != peer_port)) 296 return false; 297 298 orig_node = msg_orignode(msg); 299 peer_node = tsk_peer_node(tsk); 300 301 if (likely(orig_node == peer_node)) 302 return true; 303 304 if (!orig_node && (peer_node == tn->own_addr)) 305 return true; 306 307 if (!peer_node && (orig_node == tn->own_addr)) 308 return true; 309 310 return false; 311 } 312 313 /** 314 * tipc_sk_create - create a TIPC socket 315 * @net: network namespace (must be default network) 316 * @sock: pre-allocated socket structure 317 * @protocol: protocol indicator (must be 0) 318 * @kern: caused by kernel or by userspace? 319 * 320 * This routine creates additional data structures used by the TIPC socket, 321 * initializes them, and links them together. 322 * 323 * Returns 0 on success, errno otherwise 324 */ 325 static int tipc_sk_create(struct net *net, struct socket *sock, 326 int protocol, int kern) 327 { 328 struct tipc_net *tn; 329 const struct proto_ops *ops; 330 socket_state state; 331 struct sock *sk; 332 struct tipc_sock *tsk; 333 struct tipc_msg *msg; 334 335 /* Validate arguments */ 336 if (unlikely(protocol != 0)) 337 return -EPROTONOSUPPORT; 338 339 switch (sock->type) { 340 case SOCK_STREAM: 341 ops = &stream_ops; 342 state = SS_UNCONNECTED; 343 break; 344 case SOCK_SEQPACKET: 345 ops = &packet_ops; 346 state = SS_UNCONNECTED; 347 break; 348 case SOCK_DGRAM: 349 case SOCK_RDM: 350 ops = &msg_ops; 351 state = SS_READY; 352 break; 353 default: 354 return -EPROTOTYPE; 355 } 356 357 /* Allocate socket's protocol area */ 358 sk = sk_alloc(net, AF_TIPC, GFP_KERNEL, &tipc_proto, kern); 359 if (sk == NULL) 360 return -ENOMEM; 361 362 tsk = tipc_sk(sk); 363 tsk->max_pkt = MAX_PKT_DEFAULT; 364 INIT_LIST_HEAD(&tsk->publications); 365 msg = &tsk->phdr; 366 tn = net_generic(sock_net(sk), tipc_net_id); 367 tipc_msg_init(tn->own_addr, msg, TIPC_LOW_IMPORTANCE, TIPC_NAMED_MSG, 368 NAMED_H_SIZE, 0); 369 370 /* Finish initializing socket data structures */ 371 sock->ops = ops; 372 sock->state = state; 373 sock_init_data(sock, sk); 374 if (tipc_sk_insert(tsk)) { 375 pr_warn("Socket create failed; port numbrer exhausted\n"); 376 return -EINVAL; 377 } 378 msg_set_origport(msg, tsk->portid); 379 setup_timer(&sk->sk_timer, tipc_sk_timeout, (unsigned long)tsk); 380 sk->sk_backlog_rcv = tipc_backlog_rcv; 381 sk->sk_rcvbuf = sysctl_tipc_rmem[1]; 382 sk->sk_data_ready = tipc_data_ready; 383 sk->sk_write_space = tipc_write_space; 384 tsk->conn_timeout = CONN_TIMEOUT_DEFAULT; 385 tsk->sent_unacked = 0; 386 atomic_set(&tsk->dupl_rcvcnt, 0); 387 388 if (sock->state == SS_READY) { 389 tsk_set_unreturnable(tsk, true); 390 if (sock->type == SOCK_DGRAM) 391 tsk_set_unreliable(tsk, true); 392 } 393 return 0; 394 } 395 396 static void tipc_sk_callback(struct rcu_head *head) 397 { 398 struct tipc_sock *tsk = container_of(head, struct tipc_sock, rcu); 399 400 sock_put(&tsk->sk); 401 } 402 403 /** 404 * tipc_release - destroy a TIPC socket 405 * @sock: socket to destroy 406 * 407 * This routine cleans up any messages that are still queued on the socket. 408 * For DGRAM and RDM socket types, all queued messages are rejected. 409 * For SEQPACKET and STREAM socket types, the first message is rejected 410 * and any others are discarded. (If the first message on a STREAM socket 411 * is partially-read, it is discarded and the next one is rejected instead.) 412 * 413 * NOTE: Rejected messages are not necessarily returned to the sender! They 414 * are returned or discarded according to the "destination droppable" setting 415 * specified for the message by the sender. 416 * 417 * Returns 0 on success, errno otherwise 418 */ 419 static int tipc_release(struct socket *sock) 420 { 421 struct sock *sk = sock->sk; 422 struct net *net; 423 struct tipc_sock *tsk; 424 struct sk_buff *skb; 425 u32 dnode; 426 427 /* 428 * Exit if socket isn't fully initialized (occurs when a failed accept() 429 * releases a pre-allocated child socket that was never used) 430 */ 431 if (sk == NULL) 432 return 0; 433 434 net = sock_net(sk); 435 tsk = tipc_sk(sk); 436 lock_sock(sk); 437 438 /* 439 * Reject all unreceived messages, except on an active connection 440 * (which disconnects locally & sends a 'FIN+' to peer) 441 */ 442 dnode = tsk_peer_node(tsk); 443 while (sock->state != SS_DISCONNECTING) { 444 skb = __skb_dequeue(&sk->sk_receive_queue); 445 if (skb == NULL) 446 break; 447 if (TIPC_SKB_CB(skb)->handle != NULL) 448 kfree_skb(skb); 449 else { 450 if ((sock->state == SS_CONNECTING) || 451 (sock->state == SS_CONNECTED)) { 452 sock->state = SS_DISCONNECTING; 453 tsk->connected = 0; 454 tipc_node_remove_conn(net, dnode, tsk->portid); 455 } 456 tipc_sk_respond(sk, skb, TIPC_ERR_NO_PORT); 457 } 458 } 459 460 tipc_sk_withdraw(tsk, 0, NULL); 461 sk_stop_timer(sk, &sk->sk_timer); 462 tipc_sk_remove(tsk); 463 if (tsk->connected) { 464 skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE, 465 TIPC_CONN_MSG, SHORT_H_SIZE, 0, dnode, 466 tsk_own_node(tsk), tsk_peer_port(tsk), 467 tsk->portid, TIPC_ERR_NO_PORT); 468 if (skb) 469 tipc_node_xmit_skb(net, skb, dnode, tsk->portid); 470 tipc_node_remove_conn(net, dnode, tsk->portid); 471 } 472 473 /* Discard any remaining (connection-based) messages in receive queue */ 474 __skb_queue_purge(&sk->sk_receive_queue); 475 476 /* Reject any messages that accumulated in backlog queue */ 477 sock->state = SS_DISCONNECTING; 478 release_sock(sk); 479 480 call_rcu(&tsk->rcu, tipc_sk_callback); 481 sock->sk = NULL; 482 483 return 0; 484 } 485 486 /** 487 * tipc_bind - associate or disassocate TIPC name(s) with a socket 488 * @sock: socket structure 489 * @uaddr: socket address describing name(s) and desired operation 490 * @uaddr_len: size of socket address data structure 491 * 492 * Name and name sequence binding is indicated using a positive scope value; 493 * a negative scope value unbinds the specified name. Specifying no name 494 * (i.e. a socket address length of 0) unbinds all names from the socket. 495 * 496 * Returns 0 on success, errno otherwise 497 * 498 * NOTE: This routine doesn't need to take the socket lock since it doesn't 499 * access any non-constant socket information. 500 */ 501 static int tipc_bind(struct socket *sock, struct sockaddr *uaddr, 502 int uaddr_len) 503 { 504 struct sock *sk = sock->sk; 505 struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr; 506 struct tipc_sock *tsk = tipc_sk(sk); 507 int res = -EINVAL; 508 509 lock_sock(sk); 510 if (unlikely(!uaddr_len)) { 511 res = tipc_sk_withdraw(tsk, 0, NULL); 512 goto exit; 513 } 514 515 if (uaddr_len < sizeof(struct sockaddr_tipc)) { 516 res = -EINVAL; 517 goto exit; 518 } 519 if (addr->family != AF_TIPC) { 520 res = -EAFNOSUPPORT; 521 goto exit; 522 } 523 524 if (addr->addrtype == TIPC_ADDR_NAME) 525 addr->addr.nameseq.upper = addr->addr.nameseq.lower; 526 else if (addr->addrtype != TIPC_ADDR_NAMESEQ) { 527 res = -EAFNOSUPPORT; 528 goto exit; 529 } 530 531 if ((addr->addr.nameseq.type < TIPC_RESERVED_TYPES) && 532 (addr->addr.nameseq.type != TIPC_TOP_SRV) && 533 (addr->addr.nameseq.type != TIPC_CFG_SRV)) { 534 res = -EACCES; 535 goto exit; 536 } 537 538 res = (addr->scope > 0) ? 539 tipc_sk_publish(tsk, addr->scope, &addr->addr.nameseq) : 540 tipc_sk_withdraw(tsk, -addr->scope, &addr->addr.nameseq); 541 exit: 542 release_sock(sk); 543 return res; 544 } 545 546 /** 547 * tipc_getname - get port ID of socket or peer socket 548 * @sock: socket structure 549 * @uaddr: area for returned socket address 550 * @uaddr_len: area for returned length of socket address 551 * @peer: 0 = own ID, 1 = current peer ID, 2 = current/former peer ID 552 * 553 * Returns 0 on success, errno otherwise 554 * 555 * NOTE: This routine doesn't need to take the socket lock since it only 556 * accesses socket information that is unchanging (or which changes in 557 * a completely predictable manner). 558 */ 559 static int tipc_getname(struct socket *sock, struct sockaddr *uaddr, 560 int *uaddr_len, int peer) 561 { 562 struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr; 563 struct tipc_sock *tsk = tipc_sk(sock->sk); 564 struct tipc_net *tn = net_generic(sock_net(sock->sk), tipc_net_id); 565 566 memset(addr, 0, sizeof(*addr)); 567 if (peer) { 568 if ((sock->state != SS_CONNECTED) && 569 ((peer != 2) || (sock->state != SS_DISCONNECTING))) 570 return -ENOTCONN; 571 addr->addr.id.ref = tsk_peer_port(tsk); 572 addr->addr.id.node = tsk_peer_node(tsk); 573 } else { 574 addr->addr.id.ref = tsk->portid; 575 addr->addr.id.node = tn->own_addr; 576 } 577 578 *uaddr_len = sizeof(*addr); 579 addr->addrtype = TIPC_ADDR_ID; 580 addr->family = AF_TIPC; 581 addr->scope = 0; 582 addr->addr.name.domain = 0; 583 584 return 0; 585 } 586 587 /** 588 * tipc_poll - read and possibly block on pollmask 589 * @file: file structure associated with the socket 590 * @sock: socket for which to calculate the poll bits 591 * @wait: ??? 592 * 593 * Returns pollmask value 594 * 595 * COMMENTARY: 596 * It appears that the usual socket locking mechanisms are not useful here 597 * since the pollmask info is potentially out-of-date the moment this routine 598 * exits. TCP and other protocols seem to rely on higher level poll routines 599 * to handle any preventable race conditions, so TIPC will do the same ... 600 * 601 * TIPC sets the returned events as follows: 602 * 603 * socket state flags set 604 * ------------ --------- 605 * unconnected no read flags 606 * POLLOUT if port is not congested 607 * 608 * connecting POLLIN/POLLRDNORM if ACK/NACK in rx queue 609 * no write flags 610 * 611 * connected POLLIN/POLLRDNORM if data in rx queue 612 * POLLOUT if port is not congested 613 * 614 * disconnecting POLLIN/POLLRDNORM/POLLHUP 615 * no write flags 616 * 617 * listening POLLIN if SYN in rx queue 618 * no write flags 619 * 620 * ready POLLIN/POLLRDNORM if data in rx queue 621 * [connectionless] POLLOUT (since port cannot be congested) 622 * 623 * IMPORTANT: The fact that a read or write operation is indicated does NOT 624 * imply that the operation will succeed, merely that it should be performed 625 * and will not block. 626 */ 627 static unsigned int tipc_poll(struct file *file, struct socket *sock, 628 poll_table *wait) 629 { 630 struct sock *sk = sock->sk; 631 struct tipc_sock *tsk = tipc_sk(sk); 632 u32 mask = 0; 633 634 sock_poll_wait(file, sk_sleep(sk), wait); 635 636 switch ((int)sock->state) { 637 case SS_UNCONNECTED: 638 if (!tsk->link_cong) 639 mask |= POLLOUT; 640 break; 641 case SS_READY: 642 case SS_CONNECTED: 643 if (!tsk->link_cong && !tsk_conn_cong(tsk)) 644 mask |= POLLOUT; 645 /* fall thru' */ 646 case SS_CONNECTING: 647 case SS_LISTENING: 648 if (!skb_queue_empty(&sk->sk_receive_queue)) 649 mask |= (POLLIN | POLLRDNORM); 650 break; 651 case SS_DISCONNECTING: 652 mask = (POLLIN | POLLRDNORM | POLLHUP); 653 break; 654 } 655 656 return mask; 657 } 658 659 /** 660 * tipc_sendmcast - send multicast message 661 * @sock: socket structure 662 * @seq: destination address 663 * @msg: message to send 664 * @dsz: total length of message data 665 * @timeo: timeout to wait for wakeup 666 * 667 * Called from function tipc_sendmsg(), which has done all sanity checks 668 * Returns the number of bytes sent on success, or errno 669 */ 670 static int tipc_sendmcast(struct socket *sock, struct tipc_name_seq *seq, 671 struct msghdr *msg, size_t dsz, long timeo) 672 { 673 struct sock *sk = sock->sk; 674 struct tipc_sock *tsk = tipc_sk(sk); 675 struct net *net = sock_net(sk); 676 struct tipc_msg *mhdr = &tsk->phdr; 677 struct sk_buff_head *pktchain = &sk->sk_write_queue; 678 struct iov_iter save = msg->msg_iter; 679 uint mtu; 680 int rc; 681 682 msg_set_type(mhdr, TIPC_MCAST_MSG); 683 msg_set_lookup_scope(mhdr, TIPC_CLUSTER_SCOPE); 684 msg_set_destport(mhdr, 0); 685 msg_set_destnode(mhdr, 0); 686 msg_set_nametype(mhdr, seq->type); 687 msg_set_namelower(mhdr, seq->lower); 688 msg_set_nameupper(mhdr, seq->upper); 689 msg_set_hdr_sz(mhdr, MCAST_H_SIZE); 690 691 new_mtu: 692 mtu = tipc_bclink_get_mtu(); 693 rc = tipc_msg_build(mhdr, msg, 0, dsz, mtu, pktchain); 694 if (unlikely(rc < 0)) 695 return rc; 696 697 do { 698 rc = tipc_bclink_xmit(net, pktchain); 699 if (likely(!rc)) 700 return dsz; 701 702 if (rc == -ELINKCONG) { 703 tsk->link_cong = 1; 704 rc = tipc_wait_for_sndmsg(sock, &timeo); 705 if (!rc) 706 continue; 707 } 708 __skb_queue_purge(pktchain); 709 if (rc == -EMSGSIZE) { 710 msg->msg_iter = save; 711 goto new_mtu; 712 } 713 break; 714 } while (1); 715 return rc; 716 } 717 718 /** 719 * tipc_sk_mcast_rcv - Deliver multicast messages to all destination sockets 720 * @arrvq: queue with arriving messages, to be cloned after destination lookup 721 * @inputq: queue with cloned messages, delivered to socket after dest lookup 722 * 723 * Multi-threaded: parallel calls with reference to same queues may occur 724 */ 725 void tipc_sk_mcast_rcv(struct net *net, struct sk_buff_head *arrvq, 726 struct sk_buff_head *inputq) 727 { 728 struct tipc_msg *msg; 729 struct tipc_plist dports; 730 u32 portid; 731 u32 scope = TIPC_CLUSTER_SCOPE; 732 struct sk_buff_head tmpq; 733 uint hsz; 734 struct sk_buff *skb, *_skb; 735 736 __skb_queue_head_init(&tmpq); 737 tipc_plist_init(&dports); 738 739 skb = tipc_skb_peek(arrvq, &inputq->lock); 740 for (; skb; skb = tipc_skb_peek(arrvq, &inputq->lock)) { 741 msg = buf_msg(skb); 742 hsz = skb_headroom(skb) + msg_hdr_sz(msg); 743 744 if (in_own_node(net, msg_orignode(msg))) 745 scope = TIPC_NODE_SCOPE; 746 747 /* Create destination port list and message clones: */ 748 tipc_nametbl_mc_translate(net, 749 msg_nametype(msg), msg_namelower(msg), 750 msg_nameupper(msg), scope, &dports); 751 portid = tipc_plist_pop(&dports); 752 for (; portid; portid = tipc_plist_pop(&dports)) { 753 _skb = __pskb_copy(skb, hsz, GFP_ATOMIC); 754 if (_skb) { 755 msg_set_destport(buf_msg(_skb), portid); 756 __skb_queue_tail(&tmpq, _skb); 757 continue; 758 } 759 pr_warn("Failed to clone mcast rcv buffer\n"); 760 } 761 /* Append to inputq if not already done by other thread */ 762 spin_lock_bh(&inputq->lock); 763 if (skb_peek(arrvq) == skb) { 764 skb_queue_splice_tail_init(&tmpq, inputq); 765 kfree_skb(__skb_dequeue(arrvq)); 766 } 767 spin_unlock_bh(&inputq->lock); 768 __skb_queue_purge(&tmpq); 769 kfree_skb(skb); 770 } 771 tipc_sk_rcv(net, inputq); 772 } 773 774 /** 775 * tipc_sk_proto_rcv - receive a connection mng protocol message 776 * @tsk: receiving socket 777 * @skb: pointer to message buffer. 778 */ 779 static void tipc_sk_proto_rcv(struct tipc_sock *tsk, struct sk_buff *skb) 780 { 781 struct sock *sk = &tsk->sk; 782 struct tipc_msg *hdr = buf_msg(skb); 783 int mtyp = msg_type(hdr); 784 int conn_cong; 785 786 /* Ignore if connection cannot be validated: */ 787 if (!tsk_peer_msg(tsk, hdr)) 788 goto exit; 789 790 tsk->probing_state = TIPC_CONN_OK; 791 792 if (mtyp == CONN_PROBE) { 793 msg_set_type(hdr, CONN_PROBE_REPLY); 794 tipc_sk_respond(sk, skb, TIPC_OK); 795 return; 796 } else if (mtyp == CONN_ACK) { 797 conn_cong = tsk_conn_cong(tsk); 798 tsk->sent_unacked -= msg_msgcnt(hdr); 799 if (conn_cong) 800 sk->sk_write_space(sk); 801 } else if (mtyp != CONN_PROBE_REPLY) { 802 pr_warn("Received unknown CONN_PROTO msg\n"); 803 } 804 exit: 805 kfree_skb(skb); 806 } 807 808 static int tipc_wait_for_sndmsg(struct socket *sock, long *timeo_p) 809 { 810 struct sock *sk = sock->sk; 811 struct tipc_sock *tsk = tipc_sk(sk); 812 DEFINE_WAIT(wait); 813 int done; 814 815 do { 816 int err = sock_error(sk); 817 if (err) 818 return err; 819 if (sock->state == SS_DISCONNECTING) 820 return -EPIPE; 821 if (!*timeo_p) 822 return -EAGAIN; 823 if (signal_pending(current)) 824 return sock_intr_errno(*timeo_p); 825 826 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); 827 done = sk_wait_event(sk, timeo_p, !tsk->link_cong); 828 finish_wait(sk_sleep(sk), &wait); 829 } while (!done); 830 return 0; 831 } 832 833 /** 834 * tipc_sendmsg - send message in connectionless manner 835 * @sock: socket structure 836 * @m: message to send 837 * @dsz: amount of user data to be sent 838 * 839 * Message must have an destination specified explicitly. 840 * Used for SOCK_RDM and SOCK_DGRAM messages, 841 * and for 'SYN' messages on SOCK_SEQPACKET and SOCK_STREAM connections. 842 * (Note: 'SYN+' is prohibited on SOCK_STREAM.) 843 * 844 * Returns the number of bytes sent on success, or errno otherwise 845 */ 846 static int tipc_sendmsg(struct socket *sock, 847 struct msghdr *m, size_t dsz) 848 { 849 struct sock *sk = sock->sk; 850 int ret; 851 852 lock_sock(sk); 853 ret = __tipc_sendmsg(sock, m, dsz); 854 release_sock(sk); 855 856 return ret; 857 } 858 859 static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dsz) 860 { 861 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name); 862 struct sock *sk = sock->sk; 863 struct tipc_sock *tsk = tipc_sk(sk); 864 struct net *net = sock_net(sk); 865 struct tipc_msg *mhdr = &tsk->phdr; 866 u32 dnode, dport; 867 struct sk_buff_head *pktchain = &sk->sk_write_queue; 868 struct sk_buff *skb; 869 struct tipc_name_seq *seq; 870 struct iov_iter save; 871 u32 mtu; 872 long timeo; 873 int rc; 874 875 if (dsz > TIPC_MAX_USER_MSG_SIZE) 876 return -EMSGSIZE; 877 if (unlikely(!dest)) { 878 if (tsk->connected && sock->state == SS_READY) 879 dest = &tsk->remote; 880 else 881 return -EDESTADDRREQ; 882 } else if (unlikely(m->msg_namelen < sizeof(*dest)) || 883 dest->family != AF_TIPC) { 884 return -EINVAL; 885 } 886 if (unlikely(sock->state != SS_READY)) { 887 if (sock->state == SS_LISTENING) 888 return -EPIPE; 889 if (sock->state != SS_UNCONNECTED) 890 return -EISCONN; 891 if (tsk->published) 892 return -EOPNOTSUPP; 893 if (dest->addrtype == TIPC_ADDR_NAME) { 894 tsk->conn_type = dest->addr.name.name.type; 895 tsk->conn_instance = dest->addr.name.name.instance; 896 } 897 } 898 seq = &dest->addr.nameseq; 899 timeo = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT); 900 901 if (dest->addrtype == TIPC_ADDR_MCAST) { 902 return tipc_sendmcast(sock, seq, m, dsz, timeo); 903 } else if (dest->addrtype == TIPC_ADDR_NAME) { 904 u32 type = dest->addr.name.name.type; 905 u32 inst = dest->addr.name.name.instance; 906 u32 domain = dest->addr.name.domain; 907 908 dnode = domain; 909 msg_set_type(mhdr, TIPC_NAMED_MSG); 910 msg_set_hdr_sz(mhdr, NAMED_H_SIZE); 911 msg_set_nametype(mhdr, type); 912 msg_set_nameinst(mhdr, inst); 913 msg_set_lookup_scope(mhdr, tipc_addr_scope(domain)); 914 dport = tipc_nametbl_translate(net, type, inst, &dnode); 915 msg_set_destnode(mhdr, dnode); 916 msg_set_destport(mhdr, dport); 917 if (unlikely(!dport && !dnode)) 918 return -EHOSTUNREACH; 919 } else if (dest->addrtype == TIPC_ADDR_ID) { 920 dnode = dest->addr.id.node; 921 msg_set_type(mhdr, TIPC_DIRECT_MSG); 922 msg_set_lookup_scope(mhdr, 0); 923 msg_set_destnode(mhdr, dnode); 924 msg_set_destport(mhdr, dest->addr.id.ref); 925 msg_set_hdr_sz(mhdr, BASIC_H_SIZE); 926 } 927 928 save = m->msg_iter; 929 new_mtu: 930 mtu = tipc_node_get_mtu(net, dnode, tsk->portid); 931 rc = tipc_msg_build(mhdr, m, 0, dsz, mtu, pktchain); 932 if (rc < 0) 933 return rc; 934 935 do { 936 skb = skb_peek(pktchain); 937 TIPC_SKB_CB(skb)->wakeup_pending = tsk->link_cong; 938 rc = tipc_node_xmit(net, pktchain, dnode, tsk->portid); 939 if (likely(!rc)) { 940 if (sock->state != SS_READY) 941 sock->state = SS_CONNECTING; 942 return dsz; 943 } 944 if (rc == -ELINKCONG) { 945 tsk->link_cong = 1; 946 rc = tipc_wait_for_sndmsg(sock, &timeo); 947 if (!rc) 948 continue; 949 } 950 __skb_queue_purge(pktchain); 951 if (rc == -EMSGSIZE) { 952 m->msg_iter = save; 953 goto new_mtu; 954 } 955 break; 956 } while (1); 957 958 return rc; 959 } 960 961 static int tipc_wait_for_sndpkt(struct socket *sock, long *timeo_p) 962 { 963 struct sock *sk = sock->sk; 964 struct tipc_sock *tsk = tipc_sk(sk); 965 DEFINE_WAIT(wait); 966 int done; 967 968 do { 969 int err = sock_error(sk); 970 if (err) 971 return err; 972 if (sock->state == SS_DISCONNECTING) 973 return -EPIPE; 974 else if (sock->state != SS_CONNECTED) 975 return -ENOTCONN; 976 if (!*timeo_p) 977 return -EAGAIN; 978 if (signal_pending(current)) 979 return sock_intr_errno(*timeo_p); 980 981 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); 982 done = sk_wait_event(sk, timeo_p, 983 (!tsk->link_cong && 984 !tsk_conn_cong(tsk)) || 985 !tsk->connected); 986 finish_wait(sk_sleep(sk), &wait); 987 } while (!done); 988 return 0; 989 } 990 991 /** 992 * tipc_send_stream - send stream-oriented data 993 * @sock: socket structure 994 * @m: data to send 995 * @dsz: total length of data to be transmitted 996 * 997 * Used for SOCK_STREAM data. 998 * 999 * Returns the number of bytes sent on success (or partial success), 1000 * or errno if no data sent 1001 */ 1002 static int tipc_send_stream(struct socket *sock, struct msghdr *m, size_t dsz) 1003 { 1004 struct sock *sk = sock->sk; 1005 int ret; 1006 1007 lock_sock(sk); 1008 ret = __tipc_send_stream(sock, m, dsz); 1009 release_sock(sk); 1010 1011 return ret; 1012 } 1013 1014 static int __tipc_send_stream(struct socket *sock, struct msghdr *m, size_t dsz) 1015 { 1016 struct sock *sk = sock->sk; 1017 struct net *net = sock_net(sk); 1018 struct tipc_sock *tsk = tipc_sk(sk); 1019 struct tipc_msg *mhdr = &tsk->phdr; 1020 struct sk_buff_head *pktchain = &sk->sk_write_queue; 1021 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name); 1022 u32 portid = tsk->portid; 1023 int rc = -EINVAL; 1024 long timeo; 1025 u32 dnode; 1026 uint mtu, send, sent = 0; 1027 struct iov_iter save; 1028 1029 /* Handle implied connection establishment */ 1030 if (unlikely(dest)) { 1031 rc = __tipc_sendmsg(sock, m, dsz); 1032 if (dsz && (dsz == rc)) 1033 tsk->sent_unacked = 1; 1034 return rc; 1035 } 1036 if (dsz > (uint)INT_MAX) 1037 return -EMSGSIZE; 1038 1039 if (unlikely(sock->state != SS_CONNECTED)) { 1040 if (sock->state == SS_DISCONNECTING) 1041 return -EPIPE; 1042 else 1043 return -ENOTCONN; 1044 } 1045 1046 timeo = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT); 1047 dnode = tsk_peer_node(tsk); 1048 1049 next: 1050 save = m->msg_iter; 1051 mtu = tsk->max_pkt; 1052 send = min_t(uint, dsz - sent, TIPC_MAX_USER_MSG_SIZE); 1053 rc = tipc_msg_build(mhdr, m, sent, send, mtu, pktchain); 1054 if (unlikely(rc < 0)) 1055 return rc; 1056 do { 1057 if (likely(!tsk_conn_cong(tsk))) { 1058 rc = tipc_node_xmit(net, pktchain, dnode, portid); 1059 if (likely(!rc)) { 1060 tsk->sent_unacked++; 1061 sent += send; 1062 if (sent == dsz) 1063 return dsz; 1064 goto next; 1065 } 1066 if (rc == -EMSGSIZE) { 1067 __skb_queue_purge(pktchain); 1068 tsk->max_pkt = tipc_node_get_mtu(net, dnode, 1069 portid); 1070 m->msg_iter = save; 1071 goto next; 1072 } 1073 if (rc != -ELINKCONG) 1074 break; 1075 1076 tsk->link_cong = 1; 1077 } 1078 rc = tipc_wait_for_sndpkt(sock, &timeo); 1079 } while (!rc); 1080 1081 __skb_queue_purge(pktchain); 1082 return sent ? sent : rc; 1083 } 1084 1085 /** 1086 * tipc_send_packet - send a connection-oriented message 1087 * @sock: socket structure 1088 * @m: message to send 1089 * @dsz: length of data to be transmitted 1090 * 1091 * Used for SOCK_SEQPACKET messages. 1092 * 1093 * Returns the number of bytes sent on success, or errno otherwise 1094 */ 1095 static int tipc_send_packet(struct socket *sock, struct msghdr *m, size_t dsz) 1096 { 1097 if (dsz > TIPC_MAX_USER_MSG_SIZE) 1098 return -EMSGSIZE; 1099 1100 return tipc_send_stream(sock, m, dsz); 1101 } 1102 1103 /* tipc_sk_finish_conn - complete the setup of a connection 1104 */ 1105 static void tipc_sk_finish_conn(struct tipc_sock *tsk, u32 peer_port, 1106 u32 peer_node) 1107 { 1108 struct sock *sk = &tsk->sk; 1109 struct net *net = sock_net(sk); 1110 struct tipc_msg *msg = &tsk->phdr; 1111 1112 msg_set_destnode(msg, peer_node); 1113 msg_set_destport(msg, peer_port); 1114 msg_set_type(msg, TIPC_CONN_MSG); 1115 msg_set_lookup_scope(msg, 0); 1116 msg_set_hdr_sz(msg, SHORT_H_SIZE); 1117 1118 tsk->probing_intv = CONN_PROBING_INTERVAL; 1119 tsk->probing_state = TIPC_CONN_OK; 1120 tsk->connected = 1; 1121 sk_reset_timer(sk, &sk->sk_timer, jiffies + tsk->probing_intv); 1122 tipc_node_add_conn(net, peer_node, tsk->portid, peer_port); 1123 tsk->max_pkt = tipc_node_get_mtu(net, peer_node, tsk->portid); 1124 } 1125 1126 /** 1127 * set_orig_addr - capture sender's address for received message 1128 * @m: descriptor for message info 1129 * @msg: received message header 1130 * 1131 * Note: Address is not captured if not requested by receiver. 1132 */ 1133 static void set_orig_addr(struct msghdr *m, struct tipc_msg *msg) 1134 { 1135 DECLARE_SOCKADDR(struct sockaddr_tipc *, addr, m->msg_name); 1136 1137 if (addr) { 1138 addr->family = AF_TIPC; 1139 addr->addrtype = TIPC_ADDR_ID; 1140 memset(&addr->addr, 0, sizeof(addr->addr)); 1141 addr->addr.id.ref = msg_origport(msg); 1142 addr->addr.id.node = msg_orignode(msg); 1143 addr->addr.name.domain = 0; /* could leave uninitialized */ 1144 addr->scope = 0; /* could leave uninitialized */ 1145 m->msg_namelen = sizeof(struct sockaddr_tipc); 1146 } 1147 } 1148 1149 /** 1150 * tipc_sk_anc_data_recv - optionally capture ancillary data for received message 1151 * @m: descriptor for message info 1152 * @msg: received message header 1153 * @tsk: TIPC port associated with message 1154 * 1155 * Note: Ancillary data is not captured if not requested by receiver. 1156 * 1157 * Returns 0 if successful, otherwise errno 1158 */ 1159 static int tipc_sk_anc_data_recv(struct msghdr *m, struct tipc_msg *msg, 1160 struct tipc_sock *tsk) 1161 { 1162 u32 anc_data[3]; 1163 u32 err; 1164 u32 dest_type; 1165 int has_name; 1166 int res; 1167 1168 if (likely(m->msg_controllen == 0)) 1169 return 0; 1170 1171 /* Optionally capture errored message object(s) */ 1172 err = msg ? msg_errcode(msg) : 0; 1173 if (unlikely(err)) { 1174 anc_data[0] = err; 1175 anc_data[1] = msg_data_sz(msg); 1176 res = put_cmsg(m, SOL_TIPC, TIPC_ERRINFO, 8, anc_data); 1177 if (res) 1178 return res; 1179 if (anc_data[1]) { 1180 res = put_cmsg(m, SOL_TIPC, TIPC_RETDATA, anc_data[1], 1181 msg_data(msg)); 1182 if (res) 1183 return res; 1184 } 1185 } 1186 1187 /* Optionally capture message destination object */ 1188 dest_type = msg ? msg_type(msg) : TIPC_DIRECT_MSG; 1189 switch (dest_type) { 1190 case TIPC_NAMED_MSG: 1191 has_name = 1; 1192 anc_data[0] = msg_nametype(msg); 1193 anc_data[1] = msg_namelower(msg); 1194 anc_data[2] = msg_namelower(msg); 1195 break; 1196 case TIPC_MCAST_MSG: 1197 has_name = 1; 1198 anc_data[0] = msg_nametype(msg); 1199 anc_data[1] = msg_namelower(msg); 1200 anc_data[2] = msg_nameupper(msg); 1201 break; 1202 case TIPC_CONN_MSG: 1203 has_name = (tsk->conn_type != 0); 1204 anc_data[0] = tsk->conn_type; 1205 anc_data[1] = tsk->conn_instance; 1206 anc_data[2] = tsk->conn_instance; 1207 break; 1208 default: 1209 has_name = 0; 1210 } 1211 if (has_name) { 1212 res = put_cmsg(m, SOL_TIPC, TIPC_DESTNAME, 12, anc_data); 1213 if (res) 1214 return res; 1215 } 1216 1217 return 0; 1218 } 1219 1220 static void tipc_sk_send_ack(struct tipc_sock *tsk, uint ack) 1221 { 1222 struct net *net = sock_net(&tsk->sk); 1223 struct sk_buff *skb = NULL; 1224 struct tipc_msg *msg; 1225 u32 peer_port = tsk_peer_port(tsk); 1226 u32 dnode = tsk_peer_node(tsk); 1227 1228 if (!tsk->connected) 1229 return; 1230 skb = tipc_msg_create(CONN_MANAGER, CONN_ACK, INT_H_SIZE, 0, 1231 dnode, tsk_own_node(tsk), peer_port, 1232 tsk->portid, TIPC_OK); 1233 if (!skb) 1234 return; 1235 msg = buf_msg(skb); 1236 msg_set_msgcnt(msg, ack); 1237 tipc_node_xmit_skb(net, skb, dnode, msg_link_selector(msg)); 1238 } 1239 1240 static int tipc_wait_for_rcvmsg(struct socket *sock, long *timeop) 1241 { 1242 struct sock *sk = sock->sk; 1243 DEFINE_WAIT(wait); 1244 long timeo = *timeop; 1245 int err; 1246 1247 for (;;) { 1248 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); 1249 if (timeo && skb_queue_empty(&sk->sk_receive_queue)) { 1250 if (sock->state == SS_DISCONNECTING) { 1251 err = -ENOTCONN; 1252 break; 1253 } 1254 release_sock(sk); 1255 timeo = schedule_timeout(timeo); 1256 lock_sock(sk); 1257 } 1258 err = 0; 1259 if (!skb_queue_empty(&sk->sk_receive_queue)) 1260 break; 1261 err = -EAGAIN; 1262 if (!timeo) 1263 break; 1264 err = sock_intr_errno(timeo); 1265 if (signal_pending(current)) 1266 break; 1267 } 1268 finish_wait(sk_sleep(sk), &wait); 1269 *timeop = timeo; 1270 return err; 1271 } 1272 1273 /** 1274 * tipc_recvmsg - receive packet-oriented message 1275 * @m: descriptor for message info 1276 * @buf_len: total size of user buffer area 1277 * @flags: receive flags 1278 * 1279 * Used for SOCK_DGRAM, SOCK_RDM, and SOCK_SEQPACKET messages. 1280 * If the complete message doesn't fit in user area, truncate it. 1281 * 1282 * Returns size of returned message data, errno otherwise 1283 */ 1284 static int tipc_recvmsg(struct socket *sock, struct msghdr *m, size_t buf_len, 1285 int flags) 1286 { 1287 struct sock *sk = sock->sk; 1288 struct tipc_sock *tsk = tipc_sk(sk); 1289 struct sk_buff *buf; 1290 struct tipc_msg *msg; 1291 long timeo; 1292 unsigned int sz; 1293 u32 err; 1294 int res; 1295 1296 /* Catch invalid receive requests */ 1297 if (unlikely(!buf_len)) 1298 return -EINVAL; 1299 1300 lock_sock(sk); 1301 1302 if (unlikely(sock->state == SS_UNCONNECTED)) { 1303 res = -ENOTCONN; 1304 goto exit; 1305 } 1306 1307 timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); 1308 restart: 1309 1310 /* Look for a message in receive queue; wait if necessary */ 1311 res = tipc_wait_for_rcvmsg(sock, &timeo); 1312 if (res) 1313 goto exit; 1314 1315 /* Look at first message in receive queue */ 1316 buf = skb_peek(&sk->sk_receive_queue); 1317 msg = buf_msg(buf); 1318 sz = msg_data_sz(msg); 1319 err = msg_errcode(msg); 1320 1321 /* Discard an empty non-errored message & try again */ 1322 if ((!sz) && (!err)) { 1323 tsk_advance_rx_queue(sk); 1324 goto restart; 1325 } 1326 1327 /* Capture sender's address (optional) */ 1328 set_orig_addr(m, msg); 1329 1330 /* Capture ancillary data (optional) */ 1331 res = tipc_sk_anc_data_recv(m, msg, tsk); 1332 if (res) 1333 goto exit; 1334 1335 /* Capture message data (if valid) & compute return value (always) */ 1336 if (!err) { 1337 if (unlikely(buf_len < sz)) { 1338 sz = buf_len; 1339 m->msg_flags |= MSG_TRUNC; 1340 } 1341 res = skb_copy_datagram_msg(buf, msg_hdr_sz(msg), m, sz); 1342 if (res) 1343 goto exit; 1344 res = sz; 1345 } else { 1346 if ((sock->state == SS_READY) || 1347 ((err == TIPC_CONN_SHUTDOWN) || m->msg_control)) 1348 res = 0; 1349 else 1350 res = -ECONNRESET; 1351 } 1352 1353 /* Consume received message (optional) */ 1354 if (likely(!(flags & MSG_PEEK))) { 1355 if ((sock->state != SS_READY) && 1356 (++tsk->rcv_unacked >= TIPC_CONNACK_INTV)) { 1357 tipc_sk_send_ack(tsk, tsk->rcv_unacked); 1358 tsk->rcv_unacked = 0; 1359 } 1360 tsk_advance_rx_queue(sk); 1361 } 1362 exit: 1363 release_sock(sk); 1364 return res; 1365 } 1366 1367 /** 1368 * tipc_recv_stream - receive stream-oriented data 1369 * @m: descriptor for message info 1370 * @buf_len: total size of user buffer area 1371 * @flags: receive flags 1372 * 1373 * Used for SOCK_STREAM messages only. If not enough data is available 1374 * will optionally wait for more; never truncates data. 1375 * 1376 * Returns size of returned message data, errno otherwise 1377 */ 1378 static int tipc_recv_stream(struct socket *sock, struct msghdr *m, 1379 size_t buf_len, int flags) 1380 { 1381 struct sock *sk = sock->sk; 1382 struct tipc_sock *tsk = tipc_sk(sk); 1383 struct sk_buff *buf; 1384 struct tipc_msg *msg; 1385 long timeo; 1386 unsigned int sz; 1387 int sz_to_copy, target, needed; 1388 int sz_copied = 0; 1389 u32 err; 1390 int res = 0; 1391 1392 /* Catch invalid receive attempts */ 1393 if (unlikely(!buf_len)) 1394 return -EINVAL; 1395 1396 lock_sock(sk); 1397 1398 if (unlikely(sock->state == SS_UNCONNECTED)) { 1399 res = -ENOTCONN; 1400 goto exit; 1401 } 1402 1403 target = sock_rcvlowat(sk, flags & MSG_WAITALL, buf_len); 1404 timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); 1405 1406 restart: 1407 /* Look for a message in receive queue; wait if necessary */ 1408 res = tipc_wait_for_rcvmsg(sock, &timeo); 1409 if (res) 1410 goto exit; 1411 1412 /* Look at first message in receive queue */ 1413 buf = skb_peek(&sk->sk_receive_queue); 1414 msg = buf_msg(buf); 1415 sz = msg_data_sz(msg); 1416 err = msg_errcode(msg); 1417 1418 /* Discard an empty non-errored message & try again */ 1419 if ((!sz) && (!err)) { 1420 tsk_advance_rx_queue(sk); 1421 goto restart; 1422 } 1423 1424 /* Optionally capture sender's address & ancillary data of first msg */ 1425 if (sz_copied == 0) { 1426 set_orig_addr(m, msg); 1427 res = tipc_sk_anc_data_recv(m, msg, tsk); 1428 if (res) 1429 goto exit; 1430 } 1431 1432 /* Capture message data (if valid) & compute return value (always) */ 1433 if (!err) { 1434 u32 offset = (u32)(unsigned long)(TIPC_SKB_CB(buf)->handle); 1435 1436 sz -= offset; 1437 needed = (buf_len - sz_copied); 1438 sz_to_copy = (sz <= needed) ? sz : needed; 1439 1440 res = skb_copy_datagram_msg(buf, msg_hdr_sz(msg) + offset, 1441 m, sz_to_copy); 1442 if (res) 1443 goto exit; 1444 1445 sz_copied += sz_to_copy; 1446 1447 if (sz_to_copy < sz) { 1448 if (!(flags & MSG_PEEK)) 1449 TIPC_SKB_CB(buf)->handle = 1450 (void *)(unsigned long)(offset + sz_to_copy); 1451 goto exit; 1452 } 1453 } else { 1454 if (sz_copied != 0) 1455 goto exit; /* can't add error msg to valid data */ 1456 1457 if ((err == TIPC_CONN_SHUTDOWN) || m->msg_control) 1458 res = 0; 1459 else 1460 res = -ECONNRESET; 1461 } 1462 1463 /* Consume received message (optional) */ 1464 if (likely(!(flags & MSG_PEEK))) { 1465 if (unlikely(++tsk->rcv_unacked >= TIPC_CONNACK_INTV)) { 1466 tipc_sk_send_ack(tsk, tsk->rcv_unacked); 1467 tsk->rcv_unacked = 0; 1468 } 1469 tsk_advance_rx_queue(sk); 1470 } 1471 1472 /* Loop around if more data is required */ 1473 if ((sz_copied < buf_len) && /* didn't get all requested data */ 1474 (!skb_queue_empty(&sk->sk_receive_queue) || 1475 (sz_copied < target)) && /* and more is ready or required */ 1476 (!(flags & MSG_PEEK)) && /* and aren't just peeking at data */ 1477 (!err)) /* and haven't reached a FIN */ 1478 goto restart; 1479 1480 exit: 1481 release_sock(sk); 1482 return sz_copied ? sz_copied : res; 1483 } 1484 1485 /** 1486 * tipc_write_space - wake up thread if port congestion is released 1487 * @sk: socket 1488 */ 1489 static void tipc_write_space(struct sock *sk) 1490 { 1491 struct socket_wq *wq; 1492 1493 rcu_read_lock(); 1494 wq = rcu_dereference(sk->sk_wq); 1495 if (wq_has_sleeper(wq)) 1496 wake_up_interruptible_sync_poll(&wq->wait, POLLOUT | 1497 POLLWRNORM | POLLWRBAND); 1498 rcu_read_unlock(); 1499 } 1500 1501 /** 1502 * tipc_data_ready - wake up threads to indicate messages have been received 1503 * @sk: socket 1504 * @len: the length of messages 1505 */ 1506 static void tipc_data_ready(struct sock *sk) 1507 { 1508 struct socket_wq *wq; 1509 1510 rcu_read_lock(); 1511 wq = rcu_dereference(sk->sk_wq); 1512 if (wq_has_sleeper(wq)) 1513 wake_up_interruptible_sync_poll(&wq->wait, POLLIN | 1514 POLLRDNORM | POLLRDBAND); 1515 rcu_read_unlock(); 1516 } 1517 1518 /** 1519 * filter_connect - Handle all incoming messages for a connection-based socket 1520 * @tsk: TIPC socket 1521 * @skb: pointer to message buffer. Set to NULL if buffer is consumed 1522 * 1523 * Returns true if everything ok, false otherwise 1524 */ 1525 static bool filter_connect(struct tipc_sock *tsk, struct sk_buff *skb) 1526 { 1527 struct sock *sk = &tsk->sk; 1528 struct net *net = sock_net(sk); 1529 struct socket *sock = sk->sk_socket; 1530 struct tipc_msg *hdr = buf_msg(skb); 1531 1532 if (unlikely(msg_mcast(hdr))) 1533 return false; 1534 1535 switch ((int)sock->state) { 1536 case SS_CONNECTED: 1537 1538 /* Accept only connection-based messages sent by peer */ 1539 if (unlikely(!tsk_peer_msg(tsk, hdr))) 1540 return false; 1541 1542 if (unlikely(msg_errcode(hdr))) { 1543 sock->state = SS_DISCONNECTING; 1544 tsk->connected = 0; 1545 /* Let timer expire on it's own */ 1546 tipc_node_remove_conn(net, tsk_peer_node(tsk), 1547 tsk->portid); 1548 } 1549 return true; 1550 1551 case SS_CONNECTING: 1552 1553 /* Accept only ACK or NACK message */ 1554 if (unlikely(!msg_connected(hdr))) 1555 return false; 1556 1557 if (unlikely(msg_errcode(hdr))) { 1558 sock->state = SS_DISCONNECTING; 1559 sk->sk_err = ECONNREFUSED; 1560 return true; 1561 } 1562 1563 if (unlikely(!msg_isdata(hdr))) { 1564 sock->state = SS_DISCONNECTING; 1565 sk->sk_err = EINVAL; 1566 return true; 1567 } 1568 1569 tipc_sk_finish_conn(tsk, msg_origport(hdr), msg_orignode(hdr)); 1570 msg_set_importance(&tsk->phdr, msg_importance(hdr)); 1571 sock->state = SS_CONNECTED; 1572 1573 /* If 'ACK+' message, add to socket receive queue */ 1574 if (msg_data_sz(hdr)) 1575 return true; 1576 1577 /* If empty 'ACK-' message, wake up sleeping connect() */ 1578 if (waitqueue_active(sk_sleep(sk))) 1579 wake_up_interruptible(sk_sleep(sk)); 1580 1581 /* 'ACK-' message is neither accepted nor rejected: */ 1582 msg_set_dest_droppable(hdr, 1); 1583 return false; 1584 1585 case SS_LISTENING: 1586 case SS_UNCONNECTED: 1587 1588 /* Accept only SYN message */ 1589 if (!msg_connected(hdr) && !(msg_errcode(hdr))) 1590 return true; 1591 break; 1592 case SS_DISCONNECTING: 1593 break; 1594 default: 1595 pr_err("Unknown socket state %u\n", sock->state); 1596 } 1597 return false; 1598 } 1599 1600 /** 1601 * rcvbuf_limit - get proper overload limit of socket receive queue 1602 * @sk: socket 1603 * @buf: message 1604 * 1605 * For all connection oriented messages, irrespective of importance, 1606 * the default overload value (i.e. 67MB) is set as limit. 1607 * 1608 * For all connectionless messages, by default new queue limits are 1609 * as belows: 1610 * 1611 * TIPC_LOW_IMPORTANCE (4 MB) 1612 * TIPC_MEDIUM_IMPORTANCE (8 MB) 1613 * TIPC_HIGH_IMPORTANCE (16 MB) 1614 * TIPC_CRITICAL_IMPORTANCE (32 MB) 1615 * 1616 * Returns overload limit according to corresponding message importance 1617 */ 1618 static unsigned int rcvbuf_limit(struct sock *sk, struct sk_buff *buf) 1619 { 1620 struct tipc_msg *msg = buf_msg(buf); 1621 1622 if (msg_connected(msg)) 1623 return sysctl_tipc_rmem[2]; 1624 1625 return sk->sk_rcvbuf >> TIPC_CRITICAL_IMPORTANCE << 1626 msg_importance(msg); 1627 } 1628 1629 /** 1630 * filter_rcv - validate incoming message 1631 * @sk: socket 1632 * @skb: pointer to message. 1633 * 1634 * Enqueues message on receive queue if acceptable; optionally handles 1635 * disconnect indication for a connected socket. 1636 * 1637 * Called with socket lock already taken 1638 * 1639 * Returns true if message was added to socket receive queue, otherwise false 1640 */ 1641 static bool filter_rcv(struct sock *sk, struct sk_buff *skb) 1642 { 1643 struct socket *sock = sk->sk_socket; 1644 struct tipc_sock *tsk = tipc_sk(sk); 1645 struct tipc_msg *hdr = buf_msg(skb); 1646 unsigned int limit = rcvbuf_limit(sk, skb); 1647 int err = TIPC_OK; 1648 int usr = msg_user(hdr); 1649 1650 if (unlikely(msg_user(hdr) == CONN_MANAGER)) { 1651 tipc_sk_proto_rcv(tsk, skb); 1652 return false; 1653 } 1654 1655 if (unlikely(usr == SOCK_WAKEUP)) { 1656 kfree_skb(skb); 1657 tsk->link_cong = 0; 1658 sk->sk_write_space(sk); 1659 return false; 1660 } 1661 1662 /* Drop if illegal message type */ 1663 if (unlikely(msg_type(hdr) > TIPC_DIRECT_MSG)) { 1664 kfree_skb(skb); 1665 return false; 1666 } 1667 1668 /* Reject if wrong message type for current socket state */ 1669 if (unlikely(sock->state == SS_READY)) { 1670 if (msg_connected(hdr)) { 1671 err = TIPC_ERR_NO_PORT; 1672 goto reject; 1673 } 1674 } else if (unlikely(!filter_connect(tsk, skb))) { 1675 err = TIPC_ERR_NO_PORT; 1676 goto reject; 1677 } 1678 1679 /* Reject message if there isn't room to queue it */ 1680 if (unlikely(sk_rmem_alloc_get(sk) + skb->truesize >= limit)) { 1681 err = TIPC_ERR_OVERLOAD; 1682 goto reject; 1683 } 1684 1685 /* Enqueue message */ 1686 TIPC_SKB_CB(skb)->handle = NULL; 1687 __skb_queue_tail(&sk->sk_receive_queue, skb); 1688 skb_set_owner_r(skb, sk); 1689 1690 sk->sk_data_ready(sk); 1691 return true; 1692 1693 reject: 1694 tipc_sk_respond(sk, skb, err); 1695 return false; 1696 } 1697 1698 /** 1699 * tipc_backlog_rcv - handle incoming message from backlog queue 1700 * @sk: socket 1701 * @skb: message 1702 * 1703 * Caller must hold socket lock 1704 * 1705 * Returns 0 1706 */ 1707 static int tipc_backlog_rcv(struct sock *sk, struct sk_buff *skb) 1708 { 1709 unsigned int truesize = skb->truesize; 1710 1711 if (likely(filter_rcv(sk, skb))) 1712 atomic_add(truesize, &tipc_sk(sk)->dupl_rcvcnt); 1713 return 0; 1714 } 1715 1716 /** 1717 * tipc_sk_enqueue - extract all buffers with destination 'dport' from 1718 * inputq and try adding them to socket or backlog queue 1719 * @inputq: list of incoming buffers with potentially different destinations 1720 * @sk: socket where the buffers should be enqueued 1721 * @dport: port number for the socket 1722 * 1723 * Caller must hold socket lock 1724 */ 1725 static void tipc_sk_enqueue(struct sk_buff_head *inputq, struct sock *sk, 1726 u32 dport) 1727 { 1728 unsigned int lim; 1729 atomic_t *dcnt; 1730 struct sk_buff *skb; 1731 unsigned long time_limit = jiffies + 2; 1732 1733 while (skb_queue_len(inputq)) { 1734 if (unlikely(time_after_eq(jiffies, time_limit))) 1735 return; 1736 1737 skb = tipc_skb_dequeue(inputq, dport); 1738 if (unlikely(!skb)) 1739 return; 1740 1741 /* Add message directly to receive queue if possible */ 1742 if (!sock_owned_by_user(sk)) { 1743 filter_rcv(sk, skb); 1744 continue; 1745 } 1746 1747 /* Try backlog, compensating for double-counted bytes */ 1748 dcnt = &tipc_sk(sk)->dupl_rcvcnt; 1749 if (sk->sk_backlog.len) 1750 atomic_set(dcnt, 0); 1751 lim = rcvbuf_limit(sk, skb) + atomic_read(dcnt); 1752 if (likely(!sk_add_backlog(sk, skb, lim))) 1753 continue; 1754 1755 /* Overload => reject message back to sender */ 1756 tipc_sk_respond(sk, skb, TIPC_ERR_OVERLOAD); 1757 break; 1758 } 1759 } 1760 1761 /** 1762 * tipc_sk_rcv - handle a chain of incoming buffers 1763 * @inputq: buffer list containing the buffers 1764 * Consumes all buffers in list until inputq is empty 1765 * Note: may be called in multiple threads referring to the same queue 1766 */ 1767 void tipc_sk_rcv(struct net *net, struct sk_buff_head *inputq) 1768 { 1769 u32 dnode, dport = 0; 1770 int err; 1771 struct tipc_sock *tsk; 1772 struct sock *sk; 1773 struct sk_buff *skb; 1774 1775 while (skb_queue_len(inputq)) { 1776 dport = tipc_skb_peek_port(inputq, dport); 1777 tsk = tipc_sk_lookup(net, dport); 1778 1779 if (likely(tsk)) { 1780 sk = &tsk->sk; 1781 if (likely(spin_trylock_bh(&sk->sk_lock.slock))) { 1782 tipc_sk_enqueue(inputq, sk, dport); 1783 spin_unlock_bh(&sk->sk_lock.slock); 1784 } 1785 sock_put(sk); 1786 continue; 1787 } 1788 1789 /* No destination socket => dequeue skb if still there */ 1790 skb = tipc_skb_dequeue(inputq, dport); 1791 if (!skb) 1792 return; 1793 1794 /* Try secondary lookup if unresolved named message */ 1795 err = TIPC_ERR_NO_PORT; 1796 if (tipc_msg_lookup_dest(net, skb, &err)) 1797 goto xmit; 1798 1799 /* Prepare for message rejection */ 1800 if (!tipc_msg_reverse(tipc_own_addr(net), &skb, err)) 1801 continue; 1802 xmit: 1803 dnode = msg_destnode(buf_msg(skb)); 1804 tipc_node_xmit_skb(net, skb, dnode, dport); 1805 } 1806 } 1807 1808 static int tipc_wait_for_connect(struct socket *sock, long *timeo_p) 1809 { 1810 struct sock *sk = sock->sk; 1811 DEFINE_WAIT(wait); 1812 int done; 1813 1814 do { 1815 int err = sock_error(sk); 1816 if (err) 1817 return err; 1818 if (!*timeo_p) 1819 return -ETIMEDOUT; 1820 if (signal_pending(current)) 1821 return sock_intr_errno(*timeo_p); 1822 1823 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); 1824 done = sk_wait_event(sk, timeo_p, sock->state != SS_CONNECTING); 1825 finish_wait(sk_sleep(sk), &wait); 1826 } while (!done); 1827 return 0; 1828 } 1829 1830 /** 1831 * tipc_connect - establish a connection to another TIPC port 1832 * @sock: socket structure 1833 * @dest: socket address for destination port 1834 * @destlen: size of socket address data structure 1835 * @flags: file-related flags associated with socket 1836 * 1837 * Returns 0 on success, errno otherwise 1838 */ 1839 static int tipc_connect(struct socket *sock, struct sockaddr *dest, 1840 int destlen, int flags) 1841 { 1842 struct sock *sk = sock->sk; 1843 struct tipc_sock *tsk = tipc_sk(sk); 1844 struct sockaddr_tipc *dst = (struct sockaddr_tipc *)dest; 1845 struct msghdr m = {NULL,}; 1846 long timeout = (flags & O_NONBLOCK) ? 0 : tsk->conn_timeout; 1847 socket_state previous; 1848 int res = 0; 1849 1850 lock_sock(sk); 1851 1852 /* DGRAM/RDM connect(), just save the destaddr */ 1853 if (sock->state == SS_READY) { 1854 if (dst->family == AF_UNSPEC) { 1855 memset(&tsk->remote, 0, sizeof(struct sockaddr_tipc)); 1856 tsk->connected = 0; 1857 } else if (destlen != sizeof(struct sockaddr_tipc)) { 1858 res = -EINVAL; 1859 } else { 1860 memcpy(&tsk->remote, dest, destlen); 1861 tsk->connected = 1; 1862 } 1863 goto exit; 1864 } 1865 1866 /* 1867 * Reject connection attempt using multicast address 1868 * 1869 * Note: send_msg() validates the rest of the address fields, 1870 * so there's no need to do it here 1871 */ 1872 if (dst->addrtype == TIPC_ADDR_MCAST) { 1873 res = -EINVAL; 1874 goto exit; 1875 } 1876 1877 previous = sock->state; 1878 switch (sock->state) { 1879 case SS_UNCONNECTED: 1880 /* Send a 'SYN-' to destination */ 1881 m.msg_name = dest; 1882 m.msg_namelen = destlen; 1883 1884 /* If connect is in non-blocking case, set MSG_DONTWAIT to 1885 * indicate send_msg() is never blocked. 1886 */ 1887 if (!timeout) 1888 m.msg_flags = MSG_DONTWAIT; 1889 1890 res = __tipc_sendmsg(sock, &m, 0); 1891 if ((res < 0) && (res != -EWOULDBLOCK)) 1892 goto exit; 1893 1894 /* Just entered SS_CONNECTING state; the only 1895 * difference is that return value in non-blocking 1896 * case is EINPROGRESS, rather than EALREADY. 1897 */ 1898 res = -EINPROGRESS; 1899 case SS_CONNECTING: 1900 if (previous == SS_CONNECTING) 1901 res = -EALREADY; 1902 if (!timeout) 1903 goto exit; 1904 timeout = msecs_to_jiffies(timeout); 1905 /* Wait until an 'ACK' or 'RST' arrives, or a timeout occurs */ 1906 res = tipc_wait_for_connect(sock, &timeout); 1907 break; 1908 case SS_CONNECTED: 1909 res = -EISCONN; 1910 break; 1911 default: 1912 res = -EINVAL; 1913 break; 1914 } 1915 exit: 1916 release_sock(sk); 1917 return res; 1918 } 1919 1920 /** 1921 * tipc_listen - allow socket to listen for incoming connections 1922 * @sock: socket structure 1923 * @len: (unused) 1924 * 1925 * Returns 0 on success, errno otherwise 1926 */ 1927 static int tipc_listen(struct socket *sock, int len) 1928 { 1929 struct sock *sk = sock->sk; 1930 int res; 1931 1932 lock_sock(sk); 1933 1934 if (sock->state != SS_UNCONNECTED) 1935 res = -EINVAL; 1936 else { 1937 sock->state = SS_LISTENING; 1938 res = 0; 1939 } 1940 1941 release_sock(sk); 1942 return res; 1943 } 1944 1945 static int tipc_wait_for_accept(struct socket *sock, long timeo) 1946 { 1947 struct sock *sk = sock->sk; 1948 DEFINE_WAIT(wait); 1949 int err; 1950 1951 /* True wake-one mechanism for incoming connections: only 1952 * one process gets woken up, not the 'whole herd'. 1953 * Since we do not 'race & poll' for established sockets 1954 * anymore, the common case will execute the loop only once. 1955 */ 1956 for (;;) { 1957 prepare_to_wait_exclusive(sk_sleep(sk), &wait, 1958 TASK_INTERRUPTIBLE); 1959 if (timeo && skb_queue_empty(&sk->sk_receive_queue)) { 1960 release_sock(sk); 1961 timeo = schedule_timeout(timeo); 1962 lock_sock(sk); 1963 } 1964 err = 0; 1965 if (!skb_queue_empty(&sk->sk_receive_queue)) 1966 break; 1967 err = -EINVAL; 1968 if (sock->state != SS_LISTENING) 1969 break; 1970 err = -EAGAIN; 1971 if (!timeo) 1972 break; 1973 err = sock_intr_errno(timeo); 1974 if (signal_pending(current)) 1975 break; 1976 } 1977 finish_wait(sk_sleep(sk), &wait); 1978 return err; 1979 } 1980 1981 /** 1982 * tipc_accept - wait for connection request 1983 * @sock: listening socket 1984 * @newsock: new socket that is to be connected 1985 * @flags: file-related flags associated with socket 1986 * 1987 * Returns 0 on success, errno otherwise 1988 */ 1989 static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags) 1990 { 1991 struct sock *new_sk, *sk = sock->sk; 1992 struct sk_buff *buf; 1993 struct tipc_sock *new_tsock; 1994 struct tipc_msg *msg; 1995 long timeo; 1996 int res; 1997 1998 lock_sock(sk); 1999 2000 if (sock->state != SS_LISTENING) { 2001 res = -EINVAL; 2002 goto exit; 2003 } 2004 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK); 2005 res = tipc_wait_for_accept(sock, timeo); 2006 if (res) 2007 goto exit; 2008 2009 buf = skb_peek(&sk->sk_receive_queue); 2010 2011 res = tipc_sk_create(sock_net(sock->sk), new_sock, 0, 1); 2012 if (res) 2013 goto exit; 2014 security_sk_clone(sock->sk, new_sock->sk); 2015 2016 new_sk = new_sock->sk; 2017 new_tsock = tipc_sk(new_sk); 2018 msg = buf_msg(buf); 2019 2020 /* we lock on new_sk; but lockdep sees the lock on sk */ 2021 lock_sock_nested(new_sk, SINGLE_DEPTH_NESTING); 2022 2023 /* 2024 * Reject any stray messages received by new socket 2025 * before the socket lock was taken (very, very unlikely) 2026 */ 2027 tsk_rej_rx_queue(new_sk); 2028 2029 /* Connect new socket to it's peer */ 2030 tipc_sk_finish_conn(new_tsock, msg_origport(msg), msg_orignode(msg)); 2031 new_sock->state = SS_CONNECTED; 2032 2033 tsk_set_importance(new_tsock, msg_importance(msg)); 2034 if (msg_named(msg)) { 2035 new_tsock->conn_type = msg_nametype(msg); 2036 new_tsock->conn_instance = msg_nameinst(msg); 2037 } 2038 2039 /* 2040 * Respond to 'SYN-' by discarding it & returning 'ACK'-. 2041 * Respond to 'SYN+' by queuing it on new socket. 2042 */ 2043 if (!msg_data_sz(msg)) { 2044 struct msghdr m = {NULL,}; 2045 2046 tsk_advance_rx_queue(sk); 2047 __tipc_send_stream(new_sock, &m, 0); 2048 } else { 2049 __skb_dequeue(&sk->sk_receive_queue); 2050 __skb_queue_head(&new_sk->sk_receive_queue, buf); 2051 skb_set_owner_r(buf, new_sk); 2052 } 2053 release_sock(new_sk); 2054 exit: 2055 release_sock(sk); 2056 return res; 2057 } 2058 2059 /** 2060 * tipc_shutdown - shutdown socket connection 2061 * @sock: socket structure 2062 * @how: direction to close (must be SHUT_RDWR) 2063 * 2064 * Terminates connection (if necessary), then purges socket's receive queue. 2065 * 2066 * Returns 0 on success, errno otherwise 2067 */ 2068 static int tipc_shutdown(struct socket *sock, int how) 2069 { 2070 struct sock *sk = sock->sk; 2071 struct net *net = sock_net(sk); 2072 struct tipc_sock *tsk = tipc_sk(sk); 2073 struct sk_buff *skb; 2074 u32 dnode = tsk_peer_node(tsk); 2075 u32 dport = tsk_peer_port(tsk); 2076 u32 onode = tipc_own_addr(net); 2077 u32 oport = tsk->portid; 2078 int res; 2079 2080 if (how != SHUT_RDWR) 2081 return -EINVAL; 2082 2083 lock_sock(sk); 2084 2085 switch (sock->state) { 2086 case SS_CONNECTING: 2087 case SS_CONNECTED: 2088 2089 restart: 2090 dnode = tsk_peer_node(tsk); 2091 2092 /* Disconnect and send a 'FIN+' or 'FIN-' message to peer */ 2093 skb = __skb_dequeue(&sk->sk_receive_queue); 2094 if (skb) { 2095 if (TIPC_SKB_CB(skb)->handle != NULL) { 2096 kfree_skb(skb); 2097 goto restart; 2098 } 2099 tipc_sk_respond(sk, skb, TIPC_CONN_SHUTDOWN); 2100 } else { 2101 skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE, 2102 TIPC_CONN_MSG, SHORT_H_SIZE, 2103 0, dnode, onode, dport, oport, 2104 TIPC_CONN_SHUTDOWN); 2105 tipc_node_xmit_skb(net, skb, dnode, tsk->portid); 2106 } 2107 tsk->connected = 0; 2108 sock->state = SS_DISCONNECTING; 2109 tipc_node_remove_conn(net, dnode, tsk->portid); 2110 /* fall through */ 2111 2112 case SS_DISCONNECTING: 2113 2114 /* Discard any unreceived messages */ 2115 __skb_queue_purge(&sk->sk_receive_queue); 2116 2117 /* Wake up anyone sleeping in poll */ 2118 sk->sk_state_change(sk); 2119 res = 0; 2120 break; 2121 2122 default: 2123 res = -ENOTCONN; 2124 } 2125 2126 release_sock(sk); 2127 return res; 2128 } 2129 2130 static void tipc_sk_timeout(unsigned long data) 2131 { 2132 struct tipc_sock *tsk = (struct tipc_sock *)data; 2133 struct sock *sk = &tsk->sk; 2134 struct sk_buff *skb = NULL; 2135 u32 peer_port, peer_node; 2136 u32 own_node = tsk_own_node(tsk); 2137 2138 bh_lock_sock(sk); 2139 if (!tsk->connected) { 2140 bh_unlock_sock(sk); 2141 goto exit; 2142 } 2143 peer_port = tsk_peer_port(tsk); 2144 peer_node = tsk_peer_node(tsk); 2145 2146 if (tsk->probing_state == TIPC_CONN_PROBING) { 2147 if (!sock_owned_by_user(sk)) { 2148 sk->sk_socket->state = SS_DISCONNECTING; 2149 tsk->connected = 0; 2150 tipc_node_remove_conn(sock_net(sk), tsk_peer_node(tsk), 2151 tsk_peer_port(tsk)); 2152 sk->sk_state_change(sk); 2153 } else { 2154 /* Try again later */ 2155 sk_reset_timer(sk, &sk->sk_timer, (HZ / 20)); 2156 } 2157 2158 } else { 2159 skb = tipc_msg_create(CONN_MANAGER, CONN_PROBE, 2160 INT_H_SIZE, 0, peer_node, own_node, 2161 peer_port, tsk->portid, TIPC_OK); 2162 tsk->probing_state = TIPC_CONN_PROBING; 2163 sk_reset_timer(sk, &sk->sk_timer, jiffies + tsk->probing_intv); 2164 } 2165 bh_unlock_sock(sk); 2166 if (skb) 2167 tipc_node_xmit_skb(sock_net(sk), skb, peer_node, tsk->portid); 2168 exit: 2169 sock_put(sk); 2170 } 2171 2172 static int tipc_sk_publish(struct tipc_sock *tsk, uint scope, 2173 struct tipc_name_seq const *seq) 2174 { 2175 struct net *net = sock_net(&tsk->sk); 2176 struct publication *publ; 2177 u32 key; 2178 2179 if (tsk->connected) 2180 return -EINVAL; 2181 key = tsk->portid + tsk->pub_count + 1; 2182 if (key == tsk->portid) 2183 return -EADDRINUSE; 2184 2185 publ = tipc_nametbl_publish(net, seq->type, seq->lower, seq->upper, 2186 scope, tsk->portid, key); 2187 if (unlikely(!publ)) 2188 return -EINVAL; 2189 2190 list_add(&publ->pport_list, &tsk->publications); 2191 tsk->pub_count++; 2192 tsk->published = 1; 2193 return 0; 2194 } 2195 2196 static int tipc_sk_withdraw(struct tipc_sock *tsk, uint scope, 2197 struct tipc_name_seq const *seq) 2198 { 2199 struct net *net = sock_net(&tsk->sk); 2200 struct publication *publ; 2201 struct publication *safe; 2202 int rc = -EINVAL; 2203 2204 list_for_each_entry_safe(publ, safe, &tsk->publications, pport_list) { 2205 if (seq) { 2206 if (publ->scope != scope) 2207 continue; 2208 if (publ->type != seq->type) 2209 continue; 2210 if (publ->lower != seq->lower) 2211 continue; 2212 if (publ->upper != seq->upper) 2213 break; 2214 tipc_nametbl_withdraw(net, publ->type, publ->lower, 2215 publ->ref, publ->key); 2216 rc = 0; 2217 break; 2218 } 2219 tipc_nametbl_withdraw(net, publ->type, publ->lower, 2220 publ->ref, publ->key); 2221 rc = 0; 2222 } 2223 if (list_empty(&tsk->publications)) 2224 tsk->published = 0; 2225 return rc; 2226 } 2227 2228 /* tipc_sk_reinit: set non-zero address in all existing sockets 2229 * when we go from standalone to network mode. 2230 */ 2231 void tipc_sk_reinit(struct net *net) 2232 { 2233 struct tipc_net *tn = net_generic(net, tipc_net_id); 2234 const struct bucket_table *tbl; 2235 struct rhash_head *pos; 2236 struct tipc_sock *tsk; 2237 struct tipc_msg *msg; 2238 int i; 2239 2240 rcu_read_lock(); 2241 tbl = rht_dereference_rcu((&tn->sk_rht)->tbl, &tn->sk_rht); 2242 for (i = 0; i < tbl->size; i++) { 2243 rht_for_each_entry_rcu(tsk, pos, tbl, i, node) { 2244 spin_lock_bh(&tsk->sk.sk_lock.slock); 2245 msg = &tsk->phdr; 2246 msg_set_prevnode(msg, tn->own_addr); 2247 msg_set_orignode(msg, tn->own_addr); 2248 spin_unlock_bh(&tsk->sk.sk_lock.slock); 2249 } 2250 } 2251 rcu_read_unlock(); 2252 } 2253 2254 static struct tipc_sock *tipc_sk_lookup(struct net *net, u32 portid) 2255 { 2256 struct tipc_net *tn = net_generic(net, tipc_net_id); 2257 struct tipc_sock *tsk; 2258 2259 rcu_read_lock(); 2260 tsk = rhashtable_lookup_fast(&tn->sk_rht, &portid, tsk_rht_params); 2261 if (tsk) 2262 sock_hold(&tsk->sk); 2263 rcu_read_unlock(); 2264 2265 return tsk; 2266 } 2267 2268 static int tipc_sk_insert(struct tipc_sock *tsk) 2269 { 2270 struct sock *sk = &tsk->sk; 2271 struct net *net = sock_net(sk); 2272 struct tipc_net *tn = net_generic(net, tipc_net_id); 2273 u32 remaining = (TIPC_MAX_PORT - TIPC_MIN_PORT) + 1; 2274 u32 portid = prandom_u32() % remaining + TIPC_MIN_PORT; 2275 2276 while (remaining--) { 2277 portid++; 2278 if ((portid < TIPC_MIN_PORT) || (portid > TIPC_MAX_PORT)) 2279 portid = TIPC_MIN_PORT; 2280 tsk->portid = portid; 2281 sock_hold(&tsk->sk); 2282 if (!rhashtable_lookup_insert_fast(&tn->sk_rht, &tsk->node, 2283 tsk_rht_params)) 2284 return 0; 2285 sock_put(&tsk->sk); 2286 } 2287 2288 return -1; 2289 } 2290 2291 static void tipc_sk_remove(struct tipc_sock *tsk) 2292 { 2293 struct sock *sk = &tsk->sk; 2294 struct tipc_net *tn = net_generic(sock_net(sk), tipc_net_id); 2295 2296 if (!rhashtable_remove_fast(&tn->sk_rht, &tsk->node, tsk_rht_params)) { 2297 WARN_ON(atomic_read(&sk->sk_refcnt) == 1); 2298 __sock_put(sk); 2299 } 2300 } 2301 2302 static const struct rhashtable_params tsk_rht_params = { 2303 .nelem_hint = 192, 2304 .head_offset = offsetof(struct tipc_sock, node), 2305 .key_offset = offsetof(struct tipc_sock, portid), 2306 .key_len = sizeof(u32), /* portid */ 2307 .max_size = 1048576, 2308 .min_size = 256, 2309 .automatic_shrinking = true, 2310 }; 2311 2312 int tipc_sk_rht_init(struct net *net) 2313 { 2314 struct tipc_net *tn = net_generic(net, tipc_net_id); 2315 2316 return rhashtable_init(&tn->sk_rht, &tsk_rht_params); 2317 } 2318 2319 void tipc_sk_rht_destroy(struct net *net) 2320 { 2321 struct tipc_net *tn = net_generic(net, tipc_net_id); 2322 2323 /* Wait for socket readers to complete */ 2324 synchronize_net(); 2325 2326 rhashtable_destroy(&tn->sk_rht); 2327 } 2328 2329 /** 2330 * tipc_setsockopt - set socket option 2331 * @sock: socket structure 2332 * @lvl: option level 2333 * @opt: option identifier 2334 * @ov: pointer to new option value 2335 * @ol: length of option value 2336 * 2337 * For stream sockets only, accepts and ignores all IPPROTO_TCP options 2338 * (to ease compatibility). 2339 * 2340 * Returns 0 on success, errno otherwise 2341 */ 2342 static int tipc_setsockopt(struct socket *sock, int lvl, int opt, 2343 char __user *ov, unsigned int ol) 2344 { 2345 struct sock *sk = sock->sk; 2346 struct tipc_sock *tsk = tipc_sk(sk); 2347 u32 value; 2348 int res; 2349 2350 if ((lvl == IPPROTO_TCP) && (sock->type == SOCK_STREAM)) 2351 return 0; 2352 if (lvl != SOL_TIPC) 2353 return -ENOPROTOOPT; 2354 if (ol < sizeof(value)) 2355 return -EINVAL; 2356 res = get_user(value, (u32 __user *)ov); 2357 if (res) 2358 return res; 2359 2360 lock_sock(sk); 2361 2362 switch (opt) { 2363 case TIPC_IMPORTANCE: 2364 res = tsk_set_importance(tsk, value); 2365 break; 2366 case TIPC_SRC_DROPPABLE: 2367 if (sock->type != SOCK_STREAM) 2368 tsk_set_unreliable(tsk, value); 2369 else 2370 res = -ENOPROTOOPT; 2371 break; 2372 case TIPC_DEST_DROPPABLE: 2373 tsk_set_unreturnable(tsk, value); 2374 break; 2375 case TIPC_CONN_TIMEOUT: 2376 tipc_sk(sk)->conn_timeout = value; 2377 /* no need to set "res", since already 0 at this point */ 2378 break; 2379 default: 2380 res = -EINVAL; 2381 } 2382 2383 release_sock(sk); 2384 2385 return res; 2386 } 2387 2388 /** 2389 * tipc_getsockopt - get socket option 2390 * @sock: socket structure 2391 * @lvl: option level 2392 * @opt: option identifier 2393 * @ov: receptacle for option value 2394 * @ol: receptacle for length of option value 2395 * 2396 * For stream sockets only, returns 0 length result for all IPPROTO_TCP options 2397 * (to ease compatibility). 2398 * 2399 * Returns 0 on success, errno otherwise 2400 */ 2401 static int tipc_getsockopt(struct socket *sock, int lvl, int opt, 2402 char __user *ov, int __user *ol) 2403 { 2404 struct sock *sk = sock->sk; 2405 struct tipc_sock *tsk = tipc_sk(sk); 2406 int len; 2407 u32 value; 2408 int res; 2409 2410 if ((lvl == IPPROTO_TCP) && (sock->type == SOCK_STREAM)) 2411 return put_user(0, ol); 2412 if (lvl != SOL_TIPC) 2413 return -ENOPROTOOPT; 2414 res = get_user(len, ol); 2415 if (res) 2416 return res; 2417 2418 lock_sock(sk); 2419 2420 switch (opt) { 2421 case TIPC_IMPORTANCE: 2422 value = tsk_importance(tsk); 2423 break; 2424 case TIPC_SRC_DROPPABLE: 2425 value = tsk_unreliable(tsk); 2426 break; 2427 case TIPC_DEST_DROPPABLE: 2428 value = tsk_unreturnable(tsk); 2429 break; 2430 case TIPC_CONN_TIMEOUT: 2431 value = tsk->conn_timeout; 2432 /* no need to set "res", since already 0 at this point */ 2433 break; 2434 case TIPC_NODE_RECVQ_DEPTH: 2435 value = 0; /* was tipc_queue_size, now obsolete */ 2436 break; 2437 case TIPC_SOCK_RECVQ_DEPTH: 2438 value = skb_queue_len(&sk->sk_receive_queue); 2439 break; 2440 default: 2441 res = -EINVAL; 2442 } 2443 2444 release_sock(sk); 2445 2446 if (res) 2447 return res; /* "get" failed */ 2448 2449 if (len < sizeof(value)) 2450 return -EINVAL; 2451 2452 if (copy_to_user(ov, &value, sizeof(value))) 2453 return -EFAULT; 2454 2455 return put_user(sizeof(value), ol); 2456 } 2457 2458 static int tipc_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) 2459 { 2460 struct sock *sk = sock->sk; 2461 struct tipc_sioc_ln_req lnr; 2462 void __user *argp = (void __user *)arg; 2463 2464 switch (cmd) { 2465 case SIOCGETLINKNAME: 2466 if (copy_from_user(&lnr, argp, sizeof(lnr))) 2467 return -EFAULT; 2468 if (!tipc_node_get_linkname(sock_net(sk), 2469 lnr.bearer_id & 0xffff, lnr.peer, 2470 lnr.linkname, TIPC_MAX_LINK_NAME)) { 2471 if (copy_to_user(argp, &lnr, sizeof(lnr))) 2472 return -EFAULT; 2473 return 0; 2474 } 2475 return -EADDRNOTAVAIL; 2476 default: 2477 return -ENOIOCTLCMD; 2478 } 2479 } 2480 2481 /* Protocol switches for the various types of TIPC sockets */ 2482 2483 static const struct proto_ops msg_ops = { 2484 .owner = THIS_MODULE, 2485 .family = AF_TIPC, 2486 .release = tipc_release, 2487 .bind = tipc_bind, 2488 .connect = tipc_connect, 2489 .socketpair = sock_no_socketpair, 2490 .accept = sock_no_accept, 2491 .getname = tipc_getname, 2492 .poll = tipc_poll, 2493 .ioctl = tipc_ioctl, 2494 .listen = sock_no_listen, 2495 .shutdown = tipc_shutdown, 2496 .setsockopt = tipc_setsockopt, 2497 .getsockopt = tipc_getsockopt, 2498 .sendmsg = tipc_sendmsg, 2499 .recvmsg = tipc_recvmsg, 2500 .mmap = sock_no_mmap, 2501 .sendpage = sock_no_sendpage 2502 }; 2503 2504 static const struct proto_ops packet_ops = { 2505 .owner = THIS_MODULE, 2506 .family = AF_TIPC, 2507 .release = tipc_release, 2508 .bind = tipc_bind, 2509 .connect = tipc_connect, 2510 .socketpair = sock_no_socketpair, 2511 .accept = tipc_accept, 2512 .getname = tipc_getname, 2513 .poll = tipc_poll, 2514 .ioctl = tipc_ioctl, 2515 .listen = tipc_listen, 2516 .shutdown = tipc_shutdown, 2517 .setsockopt = tipc_setsockopt, 2518 .getsockopt = tipc_getsockopt, 2519 .sendmsg = tipc_send_packet, 2520 .recvmsg = tipc_recvmsg, 2521 .mmap = sock_no_mmap, 2522 .sendpage = sock_no_sendpage 2523 }; 2524 2525 static const struct proto_ops stream_ops = { 2526 .owner = THIS_MODULE, 2527 .family = AF_TIPC, 2528 .release = tipc_release, 2529 .bind = tipc_bind, 2530 .connect = tipc_connect, 2531 .socketpair = sock_no_socketpair, 2532 .accept = tipc_accept, 2533 .getname = tipc_getname, 2534 .poll = tipc_poll, 2535 .ioctl = tipc_ioctl, 2536 .listen = tipc_listen, 2537 .shutdown = tipc_shutdown, 2538 .setsockopt = tipc_setsockopt, 2539 .getsockopt = tipc_getsockopt, 2540 .sendmsg = tipc_send_stream, 2541 .recvmsg = tipc_recv_stream, 2542 .mmap = sock_no_mmap, 2543 .sendpage = sock_no_sendpage 2544 }; 2545 2546 static const struct net_proto_family tipc_family_ops = { 2547 .owner = THIS_MODULE, 2548 .family = AF_TIPC, 2549 .create = tipc_sk_create 2550 }; 2551 2552 static struct proto tipc_proto = { 2553 .name = "TIPC", 2554 .owner = THIS_MODULE, 2555 .obj_size = sizeof(struct tipc_sock), 2556 .sysctl_rmem = sysctl_tipc_rmem 2557 }; 2558 2559 /** 2560 * tipc_socket_init - initialize TIPC socket interface 2561 * 2562 * Returns 0 on success, errno otherwise 2563 */ 2564 int tipc_socket_init(void) 2565 { 2566 int res; 2567 2568 res = proto_register(&tipc_proto, 1); 2569 if (res) { 2570 pr_err("Failed to register TIPC protocol type\n"); 2571 goto out; 2572 } 2573 2574 res = sock_register(&tipc_family_ops); 2575 if (res) { 2576 pr_err("Failed to register TIPC socket type\n"); 2577 proto_unregister(&tipc_proto); 2578 goto out; 2579 } 2580 out: 2581 return res; 2582 } 2583 2584 /** 2585 * tipc_socket_stop - stop TIPC socket interface 2586 */ 2587 void tipc_socket_stop(void) 2588 { 2589 sock_unregister(tipc_family_ops.family); 2590 proto_unregister(&tipc_proto); 2591 } 2592 2593 /* Caller should hold socket lock for the passed tipc socket. */ 2594 static int __tipc_nl_add_sk_con(struct sk_buff *skb, struct tipc_sock *tsk) 2595 { 2596 u32 peer_node; 2597 u32 peer_port; 2598 struct nlattr *nest; 2599 2600 peer_node = tsk_peer_node(tsk); 2601 peer_port = tsk_peer_port(tsk); 2602 2603 nest = nla_nest_start(skb, TIPC_NLA_SOCK_CON); 2604 2605 if (nla_put_u32(skb, TIPC_NLA_CON_NODE, peer_node)) 2606 goto msg_full; 2607 if (nla_put_u32(skb, TIPC_NLA_CON_SOCK, peer_port)) 2608 goto msg_full; 2609 2610 if (tsk->conn_type != 0) { 2611 if (nla_put_flag(skb, TIPC_NLA_CON_FLAG)) 2612 goto msg_full; 2613 if (nla_put_u32(skb, TIPC_NLA_CON_TYPE, tsk->conn_type)) 2614 goto msg_full; 2615 if (nla_put_u32(skb, TIPC_NLA_CON_INST, tsk->conn_instance)) 2616 goto msg_full; 2617 } 2618 nla_nest_end(skb, nest); 2619 2620 return 0; 2621 2622 msg_full: 2623 nla_nest_cancel(skb, nest); 2624 2625 return -EMSGSIZE; 2626 } 2627 2628 /* Caller should hold socket lock for the passed tipc socket. */ 2629 static int __tipc_nl_add_sk(struct sk_buff *skb, struct netlink_callback *cb, 2630 struct tipc_sock *tsk) 2631 { 2632 int err; 2633 void *hdr; 2634 struct nlattr *attrs; 2635 struct net *net = sock_net(skb->sk); 2636 struct tipc_net *tn = net_generic(net, tipc_net_id); 2637 2638 hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, 2639 &tipc_genl_family, NLM_F_MULTI, TIPC_NL_SOCK_GET); 2640 if (!hdr) 2641 goto msg_cancel; 2642 2643 attrs = nla_nest_start(skb, TIPC_NLA_SOCK); 2644 if (!attrs) 2645 goto genlmsg_cancel; 2646 if (nla_put_u32(skb, TIPC_NLA_SOCK_REF, tsk->portid)) 2647 goto attr_msg_cancel; 2648 if (nla_put_u32(skb, TIPC_NLA_SOCK_ADDR, tn->own_addr)) 2649 goto attr_msg_cancel; 2650 2651 if (tsk->connected) { 2652 err = __tipc_nl_add_sk_con(skb, tsk); 2653 if (err) 2654 goto attr_msg_cancel; 2655 } else if (!list_empty(&tsk->publications)) { 2656 if (nla_put_flag(skb, TIPC_NLA_SOCK_HAS_PUBL)) 2657 goto attr_msg_cancel; 2658 } 2659 nla_nest_end(skb, attrs); 2660 genlmsg_end(skb, hdr); 2661 2662 return 0; 2663 2664 attr_msg_cancel: 2665 nla_nest_cancel(skb, attrs); 2666 genlmsg_cancel: 2667 genlmsg_cancel(skb, hdr); 2668 msg_cancel: 2669 return -EMSGSIZE; 2670 } 2671 2672 int tipc_nl_sk_dump(struct sk_buff *skb, struct netlink_callback *cb) 2673 { 2674 int err; 2675 struct tipc_sock *tsk; 2676 const struct bucket_table *tbl; 2677 struct rhash_head *pos; 2678 struct net *net = sock_net(skb->sk); 2679 struct tipc_net *tn = net_generic(net, tipc_net_id); 2680 u32 tbl_id = cb->args[0]; 2681 u32 prev_portid = cb->args[1]; 2682 2683 rcu_read_lock(); 2684 tbl = rht_dereference_rcu((&tn->sk_rht)->tbl, &tn->sk_rht); 2685 for (; tbl_id < tbl->size; tbl_id++) { 2686 rht_for_each_entry_rcu(tsk, pos, tbl, tbl_id, node) { 2687 spin_lock_bh(&tsk->sk.sk_lock.slock); 2688 if (prev_portid && prev_portid != tsk->portid) { 2689 spin_unlock_bh(&tsk->sk.sk_lock.slock); 2690 continue; 2691 } 2692 2693 err = __tipc_nl_add_sk(skb, cb, tsk); 2694 if (err) { 2695 prev_portid = tsk->portid; 2696 spin_unlock_bh(&tsk->sk.sk_lock.slock); 2697 goto out; 2698 } 2699 prev_portid = 0; 2700 spin_unlock_bh(&tsk->sk.sk_lock.slock); 2701 } 2702 } 2703 out: 2704 rcu_read_unlock(); 2705 cb->args[0] = tbl_id; 2706 cb->args[1] = prev_portid; 2707 2708 return skb->len; 2709 } 2710 2711 /* Caller should hold socket lock for the passed tipc socket. */ 2712 static int __tipc_nl_add_sk_publ(struct sk_buff *skb, 2713 struct netlink_callback *cb, 2714 struct publication *publ) 2715 { 2716 void *hdr; 2717 struct nlattr *attrs; 2718 2719 hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, 2720 &tipc_genl_family, NLM_F_MULTI, TIPC_NL_PUBL_GET); 2721 if (!hdr) 2722 goto msg_cancel; 2723 2724 attrs = nla_nest_start(skb, TIPC_NLA_PUBL); 2725 if (!attrs) 2726 goto genlmsg_cancel; 2727 2728 if (nla_put_u32(skb, TIPC_NLA_PUBL_KEY, publ->key)) 2729 goto attr_msg_cancel; 2730 if (nla_put_u32(skb, TIPC_NLA_PUBL_TYPE, publ->type)) 2731 goto attr_msg_cancel; 2732 if (nla_put_u32(skb, TIPC_NLA_PUBL_LOWER, publ->lower)) 2733 goto attr_msg_cancel; 2734 if (nla_put_u32(skb, TIPC_NLA_PUBL_UPPER, publ->upper)) 2735 goto attr_msg_cancel; 2736 2737 nla_nest_end(skb, attrs); 2738 genlmsg_end(skb, hdr); 2739 2740 return 0; 2741 2742 attr_msg_cancel: 2743 nla_nest_cancel(skb, attrs); 2744 genlmsg_cancel: 2745 genlmsg_cancel(skb, hdr); 2746 msg_cancel: 2747 return -EMSGSIZE; 2748 } 2749 2750 /* Caller should hold socket lock for the passed tipc socket. */ 2751 static int __tipc_nl_list_sk_publ(struct sk_buff *skb, 2752 struct netlink_callback *cb, 2753 struct tipc_sock *tsk, u32 *last_publ) 2754 { 2755 int err; 2756 struct publication *p; 2757 2758 if (*last_publ) { 2759 list_for_each_entry(p, &tsk->publications, pport_list) { 2760 if (p->key == *last_publ) 2761 break; 2762 } 2763 if (p->key != *last_publ) { 2764 /* We never set seq or call nl_dump_check_consistent() 2765 * this means that setting prev_seq here will cause the 2766 * consistence check to fail in the netlink callback 2767 * handler. Resulting in the last NLMSG_DONE message 2768 * having the NLM_F_DUMP_INTR flag set. 2769 */ 2770 cb->prev_seq = 1; 2771 *last_publ = 0; 2772 return -EPIPE; 2773 } 2774 } else { 2775 p = list_first_entry(&tsk->publications, struct publication, 2776 pport_list); 2777 } 2778 2779 list_for_each_entry_from(p, &tsk->publications, pport_list) { 2780 err = __tipc_nl_add_sk_publ(skb, cb, p); 2781 if (err) { 2782 *last_publ = p->key; 2783 return err; 2784 } 2785 } 2786 *last_publ = 0; 2787 2788 return 0; 2789 } 2790 2791 int tipc_nl_publ_dump(struct sk_buff *skb, struct netlink_callback *cb) 2792 { 2793 int err; 2794 u32 tsk_portid = cb->args[0]; 2795 u32 last_publ = cb->args[1]; 2796 u32 done = cb->args[2]; 2797 struct net *net = sock_net(skb->sk); 2798 struct tipc_sock *tsk; 2799 2800 if (!tsk_portid) { 2801 struct nlattr **attrs; 2802 struct nlattr *sock[TIPC_NLA_SOCK_MAX + 1]; 2803 2804 err = tipc_nlmsg_parse(cb->nlh, &attrs); 2805 if (err) 2806 return err; 2807 2808 err = nla_parse_nested(sock, TIPC_NLA_SOCK_MAX, 2809 attrs[TIPC_NLA_SOCK], 2810 tipc_nl_sock_policy); 2811 if (err) 2812 return err; 2813 2814 if (!sock[TIPC_NLA_SOCK_REF]) 2815 return -EINVAL; 2816 2817 tsk_portid = nla_get_u32(sock[TIPC_NLA_SOCK_REF]); 2818 } 2819 2820 if (done) 2821 return 0; 2822 2823 tsk = tipc_sk_lookup(net, tsk_portid); 2824 if (!tsk) 2825 return -EINVAL; 2826 2827 lock_sock(&tsk->sk); 2828 err = __tipc_nl_list_sk_publ(skb, cb, tsk, &last_publ); 2829 if (!err) 2830 done = 1; 2831 release_sock(&tsk->sk); 2832 sock_put(&tsk->sk); 2833 2834 cb->args[0] = tsk_portid; 2835 cb->args[1] = last_publ; 2836 cb->args[2] = done; 2837 2838 return skb->len; 2839 } 2840