1 /****************************************************************************** 2 ******************************************************************************* 3 ** 4 ** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. 5 ** Copyright (C) 2004-2007 Red Hat, Inc. All rights reserved. 6 ** 7 ** This copyrighted material is made available to anyone wishing to use, 8 ** modify, copy, or redistribute it subject to the terms and conditions 9 ** of the GNU General Public License v.2. 10 ** 11 ******************************************************************************* 12 ******************************************************************************/ 13 14 /* 15 * lowcomms.c 16 * 17 * This is the "low-level" comms layer. 18 * 19 * It is responsible for sending/receiving messages 20 * from other nodes in the cluster. 21 * 22 * Cluster nodes are referred to by their nodeids. nodeids are 23 * simply 32 bit numbers to the locking module - if they need to 24 * be expanded for the cluster infrastructure then that is it's 25 * responsibility. It is this layer's 26 * responsibility to resolve these into IP address or 27 * whatever it needs for inter-node communication. 28 * 29 * The comms level is two kernel threads that deal mainly with 30 * the receiving of messages from other nodes and passing them 31 * up to the mid-level comms layer (which understands the 32 * message format) for execution by the locking core, and 33 * a send thread which does all the setting up of connections 34 * to remote nodes and the sending of data. Threads are not allowed 35 * to send their own data because it may cause them to wait in times 36 * of high load. Also, this way, the sending thread can collect together 37 * messages bound for one node and send them in one block. 38 * 39 * lowcomms will choose to use wither TCP or SCTP as its transport layer 40 * depending on the configuration variable 'protocol'. This should be set 41 * to 0 (default) for TCP or 1 for SCTP. It shouldbe configured using a 42 * cluster-wide mechanism as it must be the same on all nodes of the cluster 43 * for the DLM to function. 44 * 45 */ 46 47 #include <asm/ioctls.h> 48 #include <net/sock.h> 49 #include <net/tcp.h> 50 #include <linux/pagemap.h> 51 #include <linux/idr.h> 52 #include <linux/file.h> 53 #include <linux/sctp.h> 54 #include <net/sctp/user.h> 55 56 #include "dlm_internal.h" 57 #include "lowcomms.h" 58 #include "midcomms.h" 59 #include "config.h" 60 61 #define NEEDED_RMEM (4*1024*1024) 62 63 struct cbuf { 64 unsigned int base; 65 unsigned int len; 66 unsigned int mask; 67 }; 68 69 static void cbuf_add(struct cbuf *cb, int n) 70 { 71 cb->len += n; 72 } 73 74 static int cbuf_data(struct cbuf *cb) 75 { 76 return ((cb->base + cb->len) & cb->mask); 77 } 78 79 static void cbuf_init(struct cbuf *cb, int size) 80 { 81 cb->base = cb->len = 0; 82 cb->mask = size-1; 83 } 84 85 static void cbuf_eat(struct cbuf *cb, int n) 86 { 87 cb->len -= n; 88 cb->base += n; 89 cb->base &= cb->mask; 90 } 91 92 static bool cbuf_empty(struct cbuf *cb) 93 { 94 return cb->len == 0; 95 } 96 97 struct connection { 98 struct socket *sock; /* NULL if not connected */ 99 uint32_t nodeid; /* So we know who we are in the list */ 100 struct mutex sock_mutex; 101 unsigned long flags; 102 #define CF_READ_PENDING 1 103 #define CF_WRITE_PENDING 2 104 #define CF_CONNECT_PENDING 3 105 #define CF_INIT_PENDING 4 106 #define CF_IS_OTHERCON 5 107 struct list_head writequeue; /* List of outgoing writequeue_entries */ 108 spinlock_t writequeue_lock; 109 int (*rx_action) (struct connection *); /* What to do when active */ 110 void (*connect_action) (struct connection *); /* What to do to connect */ 111 struct page *rx_page; 112 struct cbuf cb; 113 int retries; 114 #define MAX_CONNECT_RETRIES 3 115 int sctp_assoc; 116 struct connection *othercon; 117 struct work_struct rwork; /* Receive workqueue */ 118 struct work_struct swork; /* Send workqueue */ 119 }; 120 #define sock2con(x) ((struct connection *)(x)->sk_user_data) 121 122 /* An entry waiting to be sent */ 123 struct writequeue_entry { 124 struct list_head list; 125 struct page *page; 126 int offset; 127 int len; 128 int end; 129 int users; 130 struct connection *con; 131 }; 132 133 static struct sockaddr_storage *dlm_local_addr[DLM_MAX_ADDR_COUNT]; 134 static int dlm_local_count; 135 136 /* Work queues */ 137 static struct workqueue_struct *recv_workqueue; 138 static struct workqueue_struct *send_workqueue; 139 140 static DEFINE_IDR(connections_idr); 141 static DECLARE_MUTEX(connections_lock); 142 static int max_nodeid; 143 static struct kmem_cache *con_cache; 144 145 static void process_recv_sockets(struct work_struct *work); 146 static void process_send_sockets(struct work_struct *work); 147 148 /* 149 * If 'allocation' is zero then we don't attempt to create a new 150 * connection structure for this node. 151 */ 152 static struct connection *__nodeid2con(int nodeid, gfp_t alloc) 153 { 154 struct connection *con = NULL; 155 int r; 156 int n; 157 158 con = idr_find(&connections_idr, nodeid); 159 if (con || !alloc) 160 return con; 161 162 r = idr_pre_get(&connections_idr, alloc); 163 if (!r) 164 return NULL; 165 166 con = kmem_cache_zalloc(con_cache, alloc); 167 if (!con) 168 return NULL; 169 170 r = idr_get_new_above(&connections_idr, con, nodeid, &n); 171 if (r) { 172 kmem_cache_free(con_cache, con); 173 return NULL; 174 } 175 176 if (n != nodeid) { 177 idr_remove(&connections_idr, n); 178 kmem_cache_free(con_cache, con); 179 return NULL; 180 } 181 182 con->nodeid = nodeid; 183 mutex_init(&con->sock_mutex); 184 INIT_LIST_HEAD(&con->writequeue); 185 spin_lock_init(&con->writequeue_lock); 186 INIT_WORK(&con->swork, process_send_sockets); 187 INIT_WORK(&con->rwork, process_recv_sockets); 188 189 /* Setup action pointers for child sockets */ 190 if (con->nodeid) { 191 struct connection *zerocon = idr_find(&connections_idr, 0); 192 193 con->connect_action = zerocon->connect_action; 194 if (!con->rx_action) 195 con->rx_action = zerocon->rx_action; 196 } 197 198 if (nodeid > max_nodeid) 199 max_nodeid = nodeid; 200 201 return con; 202 } 203 204 static struct connection *nodeid2con(int nodeid, gfp_t allocation) 205 { 206 struct connection *con; 207 208 down(&connections_lock); 209 con = __nodeid2con(nodeid, allocation); 210 up(&connections_lock); 211 212 return con; 213 } 214 215 /* This is a bit drastic, but only called when things go wrong */ 216 static struct connection *assoc2con(int assoc_id) 217 { 218 int i; 219 struct connection *con; 220 221 down(&connections_lock); 222 for (i=0; i<=max_nodeid; i++) { 223 con = __nodeid2con(i, 0); 224 if (con && con->sctp_assoc == assoc_id) { 225 up(&connections_lock); 226 return con; 227 } 228 } 229 up(&connections_lock); 230 return NULL; 231 } 232 233 static int nodeid_to_addr(int nodeid, struct sockaddr *retaddr) 234 { 235 struct sockaddr_storage addr; 236 int error; 237 238 if (!dlm_local_count) 239 return -1; 240 241 error = dlm_nodeid_to_addr(nodeid, &addr); 242 if (error) 243 return error; 244 245 if (dlm_local_addr[0]->ss_family == AF_INET) { 246 struct sockaddr_in *in4 = (struct sockaddr_in *) &addr; 247 struct sockaddr_in *ret4 = (struct sockaddr_in *) retaddr; 248 ret4->sin_addr.s_addr = in4->sin_addr.s_addr; 249 } else { 250 struct sockaddr_in6 *in6 = (struct sockaddr_in6 *) &addr; 251 struct sockaddr_in6 *ret6 = (struct sockaddr_in6 *) retaddr; 252 memcpy(&ret6->sin6_addr, &in6->sin6_addr, 253 sizeof(in6->sin6_addr)); 254 } 255 256 return 0; 257 } 258 259 /* Data available on socket or listen socket received a connect */ 260 static void lowcomms_data_ready(struct sock *sk, int count_unused) 261 { 262 struct connection *con = sock2con(sk); 263 if (con && !test_and_set_bit(CF_READ_PENDING, &con->flags)) 264 queue_work(recv_workqueue, &con->rwork); 265 } 266 267 static void lowcomms_write_space(struct sock *sk) 268 { 269 struct connection *con = sock2con(sk); 270 271 if (con && !test_and_set_bit(CF_WRITE_PENDING, &con->flags)) 272 queue_work(send_workqueue, &con->swork); 273 } 274 275 static inline void lowcomms_connect_sock(struct connection *con) 276 { 277 if (!test_and_set_bit(CF_CONNECT_PENDING, &con->flags)) 278 queue_work(send_workqueue, &con->swork); 279 } 280 281 static void lowcomms_state_change(struct sock *sk) 282 { 283 if (sk->sk_state == TCP_ESTABLISHED) 284 lowcomms_write_space(sk); 285 } 286 287 /* Make a socket active */ 288 static int add_sock(struct socket *sock, struct connection *con) 289 { 290 con->sock = sock; 291 292 /* Install a data_ready callback */ 293 con->sock->sk->sk_data_ready = lowcomms_data_ready; 294 con->sock->sk->sk_write_space = lowcomms_write_space; 295 con->sock->sk->sk_state_change = lowcomms_state_change; 296 con->sock->sk->sk_user_data = con; 297 return 0; 298 } 299 300 /* Add the port number to an IPv6 or 4 sockaddr and return the address 301 length */ 302 static void make_sockaddr(struct sockaddr_storage *saddr, uint16_t port, 303 int *addr_len) 304 { 305 saddr->ss_family = dlm_local_addr[0]->ss_family; 306 if (saddr->ss_family == AF_INET) { 307 struct sockaddr_in *in4_addr = (struct sockaddr_in *)saddr; 308 in4_addr->sin_port = cpu_to_be16(port); 309 *addr_len = sizeof(struct sockaddr_in); 310 memset(&in4_addr->sin_zero, 0, sizeof(in4_addr->sin_zero)); 311 } else { 312 struct sockaddr_in6 *in6_addr = (struct sockaddr_in6 *)saddr; 313 in6_addr->sin6_port = cpu_to_be16(port); 314 *addr_len = sizeof(struct sockaddr_in6); 315 } 316 memset((char *)saddr + *addr_len, 0, sizeof(struct sockaddr_storage) - *addr_len); 317 } 318 319 /* Close a remote connection and tidy up */ 320 static void close_connection(struct connection *con, bool and_other) 321 { 322 mutex_lock(&con->sock_mutex); 323 324 if (con->sock) { 325 sock_release(con->sock); 326 con->sock = NULL; 327 } 328 if (con->othercon && and_other) { 329 /* Will only re-enter once. */ 330 close_connection(con->othercon, false); 331 } 332 if (con->rx_page) { 333 __free_page(con->rx_page); 334 con->rx_page = NULL; 335 } 336 337 /* If we are an 'othercon' then NULL the pointer to us 338 from the parent and tidy ourself up */ 339 if (test_bit(CF_IS_OTHERCON, &con->flags)) { 340 struct connection *parent = __nodeid2con(con->nodeid, 0); 341 parent->othercon = NULL; 342 kmem_cache_free(con_cache, con); 343 } 344 else { 345 /* Parent connections get reused */ 346 con->retries = 0; 347 mutex_unlock(&con->sock_mutex); 348 } 349 } 350 351 /* We only send shutdown messages to nodes that are not part of the cluster */ 352 static void sctp_send_shutdown(sctp_assoc_t associd) 353 { 354 static char outcmsg[CMSG_SPACE(sizeof(struct sctp_sndrcvinfo))]; 355 struct msghdr outmessage; 356 struct cmsghdr *cmsg; 357 struct sctp_sndrcvinfo *sinfo; 358 int ret; 359 struct connection *con; 360 361 con = nodeid2con(0,0); 362 BUG_ON(con == NULL); 363 364 outmessage.msg_name = NULL; 365 outmessage.msg_namelen = 0; 366 outmessage.msg_control = outcmsg; 367 outmessage.msg_controllen = sizeof(outcmsg); 368 outmessage.msg_flags = MSG_EOR; 369 370 cmsg = CMSG_FIRSTHDR(&outmessage); 371 cmsg->cmsg_level = IPPROTO_SCTP; 372 cmsg->cmsg_type = SCTP_SNDRCV; 373 cmsg->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo)); 374 outmessage.msg_controllen = cmsg->cmsg_len; 375 sinfo = CMSG_DATA(cmsg); 376 memset(sinfo, 0x00, sizeof(struct sctp_sndrcvinfo)); 377 378 sinfo->sinfo_flags |= MSG_EOF; 379 sinfo->sinfo_assoc_id = associd; 380 381 ret = kernel_sendmsg(con->sock, &outmessage, NULL, 0, 0); 382 383 if (ret != 0) 384 log_print("send EOF to node failed: %d", ret); 385 } 386 387 /* INIT failed but we don't know which node... 388 restart INIT on all pending nodes */ 389 static void sctp_init_failed(void) 390 { 391 int i; 392 struct connection *con; 393 394 down(&connections_lock); 395 for (i=1; i<=max_nodeid; i++) { 396 con = __nodeid2con(i, 0); 397 if (!con) 398 continue; 399 con->sctp_assoc = 0; 400 if (test_and_clear_bit(CF_CONNECT_PENDING, &con->flags)) { 401 if (!test_and_set_bit(CF_WRITE_PENDING, &con->flags)) { 402 queue_work(send_workqueue, &con->swork); 403 } 404 } 405 } 406 up(&connections_lock); 407 } 408 409 /* Something happened to an association */ 410 static void process_sctp_notification(struct connection *con, 411 struct msghdr *msg, char *buf) 412 { 413 union sctp_notification *sn = (union sctp_notification *)buf; 414 415 if (sn->sn_header.sn_type == SCTP_ASSOC_CHANGE) { 416 switch (sn->sn_assoc_change.sac_state) { 417 418 case SCTP_COMM_UP: 419 case SCTP_RESTART: 420 { 421 /* Check that the new node is in the lockspace */ 422 struct sctp_prim prim; 423 int nodeid; 424 int prim_len, ret; 425 int addr_len; 426 struct connection *new_con; 427 struct file *file; 428 sctp_peeloff_arg_t parg; 429 int parglen = sizeof(parg); 430 431 /* 432 * We get this before any data for an association. 433 * We verify that the node is in the cluster and 434 * then peel off a socket for it. 435 */ 436 if ((int)sn->sn_assoc_change.sac_assoc_id <= 0) { 437 log_print("COMM_UP for invalid assoc ID %d", 438 (int)sn->sn_assoc_change.sac_assoc_id); 439 sctp_init_failed(); 440 return; 441 } 442 memset(&prim, 0, sizeof(struct sctp_prim)); 443 prim_len = sizeof(struct sctp_prim); 444 prim.ssp_assoc_id = sn->sn_assoc_change.sac_assoc_id; 445 446 ret = kernel_getsockopt(con->sock, 447 IPPROTO_SCTP, 448 SCTP_PRIMARY_ADDR, 449 (char*)&prim, 450 &prim_len); 451 if (ret < 0) { 452 log_print("getsockopt/sctp_primary_addr on " 453 "new assoc %d failed : %d", 454 (int)sn->sn_assoc_change.sac_assoc_id, 455 ret); 456 457 /* Retry INIT later */ 458 new_con = assoc2con(sn->sn_assoc_change.sac_assoc_id); 459 if (new_con) 460 clear_bit(CF_CONNECT_PENDING, &con->flags); 461 return; 462 } 463 make_sockaddr(&prim.ssp_addr, 0, &addr_len); 464 if (dlm_addr_to_nodeid(&prim.ssp_addr, &nodeid)) { 465 int i; 466 unsigned char *b=(unsigned char *)&prim.ssp_addr; 467 log_print("reject connect from unknown addr"); 468 for (i=0; i<sizeof(struct sockaddr_storage);i++) 469 printk("%02x ", b[i]); 470 printk("\n"); 471 sctp_send_shutdown(prim.ssp_assoc_id); 472 return; 473 } 474 475 new_con = nodeid2con(nodeid, GFP_KERNEL); 476 if (!new_con) 477 return; 478 479 /* Peel off a new sock */ 480 parg.associd = sn->sn_assoc_change.sac_assoc_id; 481 ret = kernel_getsockopt(con->sock, IPPROTO_SCTP, 482 SCTP_SOCKOPT_PEELOFF, 483 (void *)&parg, &parglen); 484 if (ret) { 485 log_print("Can't peel off a socket for " 486 "connection %d to node %d: err=%d\n", 487 parg.associd, nodeid, ret); 488 } 489 file = fget(parg.sd); 490 new_con->sock = SOCKET_I(file->f_dentry->d_inode); 491 add_sock(new_con->sock, new_con); 492 fput(file); 493 put_unused_fd(parg.sd); 494 495 log_print("got new/restarted association %d nodeid %d", 496 (int)sn->sn_assoc_change.sac_assoc_id, nodeid); 497 498 /* Send any pending writes */ 499 clear_bit(CF_CONNECT_PENDING, &new_con->flags); 500 clear_bit(CF_INIT_PENDING, &con->flags); 501 if (!test_and_set_bit(CF_WRITE_PENDING, &new_con->flags)) { 502 queue_work(send_workqueue, &new_con->swork); 503 } 504 if (!test_and_set_bit(CF_READ_PENDING, &new_con->flags)) 505 queue_work(recv_workqueue, &new_con->rwork); 506 } 507 break; 508 509 case SCTP_COMM_LOST: 510 case SCTP_SHUTDOWN_COMP: 511 { 512 con = assoc2con(sn->sn_assoc_change.sac_assoc_id); 513 if (con) { 514 con->sctp_assoc = 0; 515 } 516 } 517 break; 518 519 /* We don't know which INIT failed, so clear the PENDING flags 520 * on them all. if assoc_id is zero then it will then try 521 * again */ 522 523 case SCTP_CANT_STR_ASSOC: 524 { 525 log_print("Can't start SCTP association - retrying"); 526 sctp_init_failed(); 527 } 528 break; 529 530 default: 531 log_print("unexpected SCTP assoc change id=%d state=%d", 532 (int)sn->sn_assoc_change.sac_assoc_id, 533 sn->sn_assoc_change.sac_state); 534 } 535 } 536 } 537 538 /* Data received from remote end */ 539 static int receive_from_sock(struct connection *con) 540 { 541 int ret = 0; 542 struct msghdr msg = {}; 543 struct kvec iov[2]; 544 unsigned len; 545 int r; 546 int call_again_soon = 0; 547 int nvec; 548 char incmsg[CMSG_SPACE(sizeof(struct sctp_sndrcvinfo))]; 549 550 mutex_lock(&con->sock_mutex); 551 552 if (con->sock == NULL) { 553 ret = -EAGAIN; 554 goto out_close; 555 } 556 557 if (con->rx_page == NULL) { 558 /* 559 * This doesn't need to be atomic, but I think it should 560 * improve performance if it is. 561 */ 562 con->rx_page = alloc_page(GFP_ATOMIC); 563 if (con->rx_page == NULL) 564 goto out_resched; 565 cbuf_init(&con->cb, PAGE_CACHE_SIZE); 566 } 567 568 /* Only SCTP needs these really */ 569 memset(&incmsg, 0, sizeof(incmsg)); 570 msg.msg_control = incmsg; 571 msg.msg_controllen = sizeof(incmsg); 572 573 /* 574 * iov[0] is the bit of the circular buffer between the current end 575 * point (cb.base + cb.len) and the end of the buffer. 576 */ 577 iov[0].iov_len = con->cb.base - cbuf_data(&con->cb); 578 iov[0].iov_base = page_address(con->rx_page) + cbuf_data(&con->cb); 579 iov[1].iov_len = 0; 580 nvec = 1; 581 582 /* 583 * iov[1] is the bit of the circular buffer between the start of the 584 * buffer and the start of the currently used section (cb.base) 585 */ 586 if (cbuf_data(&con->cb) >= con->cb.base) { 587 iov[0].iov_len = PAGE_CACHE_SIZE - cbuf_data(&con->cb); 588 iov[1].iov_len = con->cb.base; 589 iov[1].iov_base = page_address(con->rx_page); 590 nvec = 2; 591 } 592 len = iov[0].iov_len + iov[1].iov_len; 593 594 r = ret = kernel_recvmsg(con->sock, &msg, iov, nvec, len, 595 MSG_DONTWAIT | MSG_NOSIGNAL); 596 if (ret <= 0) 597 goto out_close; 598 599 /* Process SCTP notifications */ 600 if (msg.msg_flags & MSG_NOTIFICATION) { 601 msg.msg_control = incmsg; 602 msg.msg_controllen = sizeof(incmsg); 603 604 process_sctp_notification(con, &msg, 605 page_address(con->rx_page) + con->cb.base); 606 mutex_unlock(&con->sock_mutex); 607 return 0; 608 } 609 BUG_ON(con->nodeid == 0); 610 611 if (ret == len) 612 call_again_soon = 1; 613 cbuf_add(&con->cb, ret); 614 ret = dlm_process_incoming_buffer(con->nodeid, 615 page_address(con->rx_page), 616 con->cb.base, con->cb.len, 617 PAGE_CACHE_SIZE); 618 if (ret == -EBADMSG) { 619 log_print("lowcomms: addr=%p, base=%u, len=%u, " 620 "iov_len=%u, iov_base[0]=%p, read=%d", 621 page_address(con->rx_page), con->cb.base, con->cb.len, 622 len, iov[0].iov_base, r); 623 } 624 if (ret < 0) 625 goto out_close; 626 cbuf_eat(&con->cb, ret); 627 628 if (cbuf_empty(&con->cb) && !call_again_soon) { 629 __free_page(con->rx_page); 630 con->rx_page = NULL; 631 } 632 633 if (call_again_soon) 634 goto out_resched; 635 mutex_unlock(&con->sock_mutex); 636 return 0; 637 638 out_resched: 639 if (!test_and_set_bit(CF_READ_PENDING, &con->flags)) 640 queue_work(recv_workqueue, &con->rwork); 641 mutex_unlock(&con->sock_mutex); 642 return -EAGAIN; 643 644 out_close: 645 mutex_unlock(&con->sock_mutex); 646 if (ret != -EAGAIN) { 647 close_connection(con, false); 648 /* Reconnect when there is something to send */ 649 } 650 /* Don't return success if we really got EOF */ 651 if (ret == 0) 652 ret = -EAGAIN; 653 654 return ret; 655 } 656 657 /* Listening socket is busy, accept a connection */ 658 static int tcp_accept_from_sock(struct connection *con) 659 { 660 int result; 661 struct sockaddr_storage peeraddr; 662 struct socket *newsock; 663 int len; 664 int nodeid; 665 struct connection *newcon; 666 struct connection *addcon; 667 668 memset(&peeraddr, 0, sizeof(peeraddr)); 669 result = sock_create_kern(dlm_local_addr[0]->ss_family, SOCK_STREAM, 670 IPPROTO_TCP, &newsock); 671 if (result < 0) 672 return -ENOMEM; 673 674 mutex_lock_nested(&con->sock_mutex, 0); 675 676 result = -ENOTCONN; 677 if (con->sock == NULL) 678 goto accept_err; 679 680 newsock->type = con->sock->type; 681 newsock->ops = con->sock->ops; 682 683 result = con->sock->ops->accept(con->sock, newsock, O_NONBLOCK); 684 if (result < 0) 685 goto accept_err; 686 687 /* Get the connected socket's peer */ 688 memset(&peeraddr, 0, sizeof(peeraddr)); 689 if (newsock->ops->getname(newsock, (struct sockaddr *)&peeraddr, 690 &len, 2)) { 691 result = -ECONNABORTED; 692 goto accept_err; 693 } 694 695 /* Get the new node's NODEID */ 696 make_sockaddr(&peeraddr, 0, &len); 697 if (dlm_addr_to_nodeid(&peeraddr, &nodeid)) { 698 log_print("connect from non cluster node"); 699 sock_release(newsock); 700 mutex_unlock(&con->sock_mutex); 701 return -1; 702 } 703 704 log_print("got connection from %d", nodeid); 705 706 /* Check to see if we already have a connection to this node. This 707 * could happen if the two nodes initiate a connection at roughly 708 * the same time and the connections cross on the wire. 709 * In this case we store the incoming one in "othercon" 710 */ 711 newcon = nodeid2con(nodeid, GFP_KERNEL); 712 if (!newcon) { 713 result = -ENOMEM; 714 goto accept_err; 715 } 716 mutex_lock_nested(&newcon->sock_mutex, 1); 717 if (newcon->sock) { 718 struct connection *othercon = newcon->othercon; 719 720 if (!othercon) { 721 othercon = kmem_cache_zalloc(con_cache, GFP_KERNEL); 722 if (!othercon) { 723 log_print("failed to allocate incoming socket"); 724 mutex_unlock(&newcon->sock_mutex); 725 result = -ENOMEM; 726 goto accept_err; 727 } 728 othercon->nodeid = nodeid; 729 othercon->rx_action = receive_from_sock; 730 mutex_init(&othercon->sock_mutex); 731 INIT_WORK(&othercon->swork, process_send_sockets); 732 INIT_WORK(&othercon->rwork, process_recv_sockets); 733 set_bit(CF_IS_OTHERCON, &othercon->flags); 734 newcon->othercon = othercon; 735 othercon->sock = newsock; 736 newsock->sk->sk_user_data = othercon; 737 add_sock(newsock, othercon); 738 addcon = othercon; 739 } 740 else { 741 printk("Extra connection from node %d attempted\n", nodeid); 742 result = -EAGAIN; 743 mutex_unlock(&newcon->sock_mutex); 744 goto accept_err; 745 } 746 } 747 else { 748 newsock->sk->sk_user_data = newcon; 749 newcon->rx_action = receive_from_sock; 750 add_sock(newsock, newcon); 751 addcon = newcon; 752 } 753 754 mutex_unlock(&newcon->sock_mutex); 755 756 /* 757 * Add it to the active queue in case we got data 758 * beween processing the accept adding the socket 759 * to the read_sockets list 760 */ 761 if (!test_and_set_bit(CF_READ_PENDING, &addcon->flags)) 762 queue_work(recv_workqueue, &addcon->rwork); 763 mutex_unlock(&con->sock_mutex); 764 765 return 0; 766 767 accept_err: 768 mutex_unlock(&con->sock_mutex); 769 sock_release(newsock); 770 771 if (result != -EAGAIN) 772 log_print("error accepting connection from node: %d", result); 773 return result; 774 } 775 776 static void free_entry(struct writequeue_entry *e) 777 { 778 __free_page(e->page); 779 kfree(e); 780 } 781 782 /* Initiate an SCTP association. 783 This is a special case of send_to_sock() in that we don't yet have a 784 peeled-off socket for this association, so we use the listening socket 785 and add the primary IP address of the remote node. 786 */ 787 static void sctp_init_assoc(struct connection *con) 788 { 789 struct sockaddr_storage rem_addr; 790 char outcmsg[CMSG_SPACE(sizeof(struct sctp_sndrcvinfo))]; 791 struct msghdr outmessage; 792 struct cmsghdr *cmsg; 793 struct sctp_sndrcvinfo *sinfo; 794 struct connection *base_con; 795 struct writequeue_entry *e; 796 int len, offset; 797 int ret; 798 int addrlen; 799 struct kvec iov[1]; 800 801 if (test_and_set_bit(CF_INIT_PENDING, &con->flags)) 802 return; 803 804 if (con->retries++ > MAX_CONNECT_RETRIES) 805 return; 806 807 log_print("Initiating association with node %d", con->nodeid); 808 809 if (nodeid_to_addr(con->nodeid, (struct sockaddr *)&rem_addr)) { 810 log_print("no address for nodeid %d", con->nodeid); 811 return; 812 } 813 base_con = nodeid2con(0, 0); 814 BUG_ON(base_con == NULL); 815 816 make_sockaddr(&rem_addr, dlm_config.ci_tcp_port, &addrlen); 817 818 outmessage.msg_name = &rem_addr; 819 outmessage.msg_namelen = addrlen; 820 outmessage.msg_control = outcmsg; 821 outmessage.msg_controllen = sizeof(outcmsg); 822 outmessage.msg_flags = MSG_EOR; 823 824 spin_lock(&con->writequeue_lock); 825 e = list_entry(con->writequeue.next, struct writequeue_entry, 826 list); 827 828 BUG_ON((struct list_head *) e == &con->writequeue); 829 830 len = e->len; 831 offset = e->offset; 832 spin_unlock(&con->writequeue_lock); 833 kmap(e->page); 834 835 /* Send the first block off the write queue */ 836 iov[0].iov_base = page_address(e->page)+offset; 837 iov[0].iov_len = len; 838 839 cmsg = CMSG_FIRSTHDR(&outmessage); 840 cmsg->cmsg_level = IPPROTO_SCTP; 841 cmsg->cmsg_type = SCTP_SNDRCV; 842 cmsg->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo)); 843 sinfo = CMSG_DATA(cmsg); 844 memset(sinfo, 0x00, sizeof(struct sctp_sndrcvinfo)); 845 sinfo->sinfo_ppid = cpu_to_le32(dlm_our_nodeid()); 846 outmessage.msg_controllen = cmsg->cmsg_len; 847 848 ret = kernel_sendmsg(base_con->sock, &outmessage, iov, 1, len); 849 if (ret < 0) { 850 log_print("Send first packet to node %d failed: %d", 851 con->nodeid, ret); 852 853 /* Try again later */ 854 clear_bit(CF_CONNECT_PENDING, &con->flags); 855 clear_bit(CF_INIT_PENDING, &con->flags); 856 } 857 else { 858 spin_lock(&con->writequeue_lock); 859 e->offset += ret; 860 e->len -= ret; 861 862 if (e->len == 0 && e->users == 0) { 863 list_del(&e->list); 864 kunmap(e->page); 865 free_entry(e); 866 } 867 spin_unlock(&con->writequeue_lock); 868 } 869 } 870 871 /* Connect a new socket to its peer */ 872 static void tcp_connect_to_sock(struct connection *con) 873 { 874 int result = -EHOSTUNREACH; 875 struct sockaddr_storage saddr; 876 int addr_len; 877 struct socket *sock; 878 879 if (con->nodeid == 0) { 880 log_print("attempt to connect sock 0 foiled"); 881 return; 882 } 883 884 mutex_lock(&con->sock_mutex); 885 if (con->retries++ > MAX_CONNECT_RETRIES) 886 goto out; 887 888 /* Some odd races can cause double-connects, ignore them */ 889 if (con->sock) { 890 result = 0; 891 goto out; 892 } 893 894 /* Create a socket to communicate with */ 895 result = sock_create_kern(dlm_local_addr[0]->ss_family, SOCK_STREAM, 896 IPPROTO_TCP, &sock); 897 if (result < 0) 898 goto out_err; 899 900 memset(&saddr, 0, sizeof(saddr)); 901 if (dlm_nodeid_to_addr(con->nodeid, &saddr)) 902 goto out_err; 903 904 sock->sk->sk_user_data = con; 905 con->rx_action = receive_from_sock; 906 con->connect_action = tcp_connect_to_sock; 907 add_sock(sock, con); 908 909 make_sockaddr(&saddr, dlm_config.ci_tcp_port, &addr_len); 910 911 log_print("connecting to %d", con->nodeid); 912 result = 913 sock->ops->connect(sock, (struct sockaddr *)&saddr, addr_len, 914 O_NONBLOCK); 915 if (result == -EINPROGRESS) 916 result = 0; 917 if (result == 0) 918 goto out; 919 920 out_err: 921 if (con->sock) { 922 sock_release(con->sock); 923 con->sock = NULL; 924 } 925 /* 926 * Some errors are fatal and this list might need adjusting. For other 927 * errors we try again until the max number of retries is reached. 928 */ 929 if (result != -EHOSTUNREACH && result != -ENETUNREACH && 930 result != -ENETDOWN && result != EINVAL 931 && result != -EPROTONOSUPPORT) { 932 lowcomms_connect_sock(con); 933 result = 0; 934 } 935 out: 936 mutex_unlock(&con->sock_mutex); 937 return; 938 } 939 940 static struct socket *tcp_create_listen_sock(struct connection *con, 941 struct sockaddr_storage *saddr) 942 { 943 struct socket *sock = NULL; 944 int result = 0; 945 int one = 1; 946 int addr_len; 947 948 if (dlm_local_addr[0]->ss_family == AF_INET) 949 addr_len = sizeof(struct sockaddr_in); 950 else 951 addr_len = sizeof(struct sockaddr_in6); 952 953 /* Create a socket to communicate with */ 954 result = sock_create_kern(dlm_local_addr[0]->ss_family, SOCK_STREAM, 955 IPPROTO_TCP, &sock); 956 if (result < 0) { 957 log_print("Can't create listening comms socket"); 958 goto create_out; 959 } 960 961 result = kernel_setsockopt(sock, SOL_SOCKET, SO_REUSEADDR, 962 (char *)&one, sizeof(one)); 963 964 if (result < 0) { 965 log_print("Failed to set SO_REUSEADDR on socket: %d", result); 966 } 967 sock->sk->sk_user_data = con; 968 con->rx_action = tcp_accept_from_sock; 969 con->connect_action = tcp_connect_to_sock; 970 con->sock = sock; 971 972 /* Bind to our port */ 973 make_sockaddr(saddr, dlm_config.ci_tcp_port, &addr_len); 974 result = sock->ops->bind(sock, (struct sockaddr *) saddr, addr_len); 975 if (result < 0) { 976 log_print("Can't bind to port %d", dlm_config.ci_tcp_port); 977 sock_release(sock); 978 sock = NULL; 979 con->sock = NULL; 980 goto create_out; 981 } 982 result = kernel_setsockopt(sock, SOL_SOCKET, SO_KEEPALIVE, 983 (char *)&one, sizeof(one)); 984 if (result < 0) { 985 log_print("Set keepalive failed: %d", result); 986 } 987 988 result = sock->ops->listen(sock, 5); 989 if (result < 0) { 990 log_print("Can't listen on port %d", dlm_config.ci_tcp_port); 991 sock_release(sock); 992 sock = NULL; 993 goto create_out; 994 } 995 996 create_out: 997 return sock; 998 } 999 1000 /* Get local addresses */ 1001 static void init_local(void) 1002 { 1003 struct sockaddr_storage sas, *addr; 1004 int i; 1005 1006 dlm_local_count = 0; 1007 for (i = 0; i < DLM_MAX_ADDR_COUNT - 1; i++) { 1008 if (dlm_our_addr(&sas, i)) 1009 break; 1010 1011 addr = kmalloc(sizeof(*addr), GFP_KERNEL); 1012 if (!addr) 1013 break; 1014 memcpy(addr, &sas, sizeof(*addr)); 1015 dlm_local_addr[dlm_local_count++] = addr; 1016 } 1017 } 1018 1019 /* Bind to an IP address. SCTP allows multiple address so it can do 1020 multi-homing */ 1021 static int add_sctp_bind_addr(struct connection *sctp_con, 1022 struct sockaddr_storage *addr, 1023 int addr_len, int num) 1024 { 1025 int result = 0; 1026 1027 if (num == 1) 1028 result = kernel_bind(sctp_con->sock, 1029 (struct sockaddr *) addr, 1030 addr_len); 1031 else 1032 result = kernel_setsockopt(sctp_con->sock, SOL_SCTP, 1033 SCTP_SOCKOPT_BINDX_ADD, 1034 (char *)addr, addr_len); 1035 1036 if (result < 0) 1037 log_print("Can't bind to port %d addr number %d", 1038 dlm_config.ci_tcp_port, num); 1039 1040 return result; 1041 } 1042 1043 /* Initialise SCTP socket and bind to all interfaces */ 1044 static int sctp_listen_for_all(void) 1045 { 1046 struct socket *sock = NULL; 1047 struct sockaddr_storage localaddr; 1048 struct sctp_event_subscribe subscribe; 1049 int result = -EINVAL, num = 1, i, addr_len; 1050 struct connection *con = nodeid2con(0, GFP_KERNEL); 1051 int bufsize = NEEDED_RMEM; 1052 1053 if (!con) 1054 return -ENOMEM; 1055 1056 log_print("Using SCTP for communications"); 1057 1058 result = sock_create_kern(dlm_local_addr[0]->ss_family, SOCK_SEQPACKET, 1059 IPPROTO_SCTP, &sock); 1060 if (result < 0) { 1061 log_print("Can't create comms socket, check SCTP is loaded"); 1062 goto out; 1063 } 1064 1065 /* Listen for events */ 1066 memset(&subscribe, 0, sizeof(subscribe)); 1067 subscribe.sctp_data_io_event = 1; 1068 subscribe.sctp_association_event = 1; 1069 subscribe.sctp_send_failure_event = 1; 1070 subscribe.sctp_shutdown_event = 1; 1071 subscribe.sctp_partial_delivery_event = 1; 1072 1073 result = kernel_setsockopt(sock, SOL_SOCKET, SO_RCVBUF, 1074 (char *)&bufsize, sizeof(bufsize)); 1075 if (result) 1076 log_print("Error increasing buffer space on socket %d", result); 1077 1078 result = kernel_setsockopt(sock, SOL_SCTP, SCTP_EVENTS, 1079 (char *)&subscribe, sizeof(subscribe)); 1080 if (result < 0) { 1081 log_print("Failed to set SCTP_EVENTS on socket: result=%d", 1082 result); 1083 goto create_delsock; 1084 } 1085 1086 /* Init con struct */ 1087 sock->sk->sk_user_data = con; 1088 con->sock = sock; 1089 con->sock->sk->sk_data_ready = lowcomms_data_ready; 1090 con->rx_action = receive_from_sock; 1091 con->connect_action = sctp_init_assoc; 1092 1093 /* Bind to all interfaces. */ 1094 for (i = 0; i < dlm_local_count; i++) { 1095 memcpy(&localaddr, dlm_local_addr[i], sizeof(localaddr)); 1096 make_sockaddr(&localaddr, dlm_config.ci_tcp_port, &addr_len); 1097 1098 result = add_sctp_bind_addr(con, &localaddr, addr_len, num); 1099 if (result) 1100 goto create_delsock; 1101 ++num; 1102 } 1103 1104 result = sock->ops->listen(sock, 5); 1105 if (result < 0) { 1106 log_print("Can't set socket listening"); 1107 goto create_delsock; 1108 } 1109 1110 return 0; 1111 1112 create_delsock: 1113 sock_release(sock); 1114 con->sock = NULL; 1115 out: 1116 return result; 1117 } 1118 1119 static int tcp_listen_for_all(void) 1120 { 1121 struct socket *sock = NULL; 1122 struct connection *con = nodeid2con(0, GFP_KERNEL); 1123 int result = -EINVAL; 1124 1125 if (!con) 1126 return -ENOMEM; 1127 1128 /* We don't support multi-homed hosts */ 1129 if (dlm_local_addr[1] != NULL) { 1130 log_print("TCP protocol can't handle multi-homed hosts, " 1131 "try SCTP"); 1132 return -EINVAL; 1133 } 1134 1135 log_print("Using TCP for communications"); 1136 1137 sock = tcp_create_listen_sock(con, dlm_local_addr[0]); 1138 if (sock) { 1139 add_sock(sock, con); 1140 result = 0; 1141 } 1142 else { 1143 result = -EADDRINUSE; 1144 } 1145 1146 return result; 1147 } 1148 1149 1150 1151 static struct writequeue_entry *new_writequeue_entry(struct connection *con, 1152 gfp_t allocation) 1153 { 1154 struct writequeue_entry *entry; 1155 1156 entry = kmalloc(sizeof(struct writequeue_entry), allocation); 1157 if (!entry) 1158 return NULL; 1159 1160 entry->page = alloc_page(allocation); 1161 if (!entry->page) { 1162 kfree(entry); 1163 return NULL; 1164 } 1165 1166 entry->offset = 0; 1167 entry->len = 0; 1168 entry->end = 0; 1169 entry->users = 0; 1170 entry->con = con; 1171 1172 return entry; 1173 } 1174 1175 void *dlm_lowcomms_get_buffer(int nodeid, int len, gfp_t allocation, char **ppc) 1176 { 1177 struct connection *con; 1178 struct writequeue_entry *e; 1179 int offset = 0; 1180 int users = 0; 1181 1182 con = nodeid2con(nodeid, allocation); 1183 if (!con) 1184 return NULL; 1185 1186 spin_lock(&con->writequeue_lock); 1187 e = list_entry(con->writequeue.prev, struct writequeue_entry, list); 1188 if ((&e->list == &con->writequeue) || 1189 (PAGE_CACHE_SIZE - e->end < len)) { 1190 e = NULL; 1191 } else { 1192 offset = e->end; 1193 e->end += len; 1194 users = e->users++; 1195 } 1196 spin_unlock(&con->writequeue_lock); 1197 1198 if (e) { 1199 got_one: 1200 if (users == 0) 1201 kmap(e->page); 1202 *ppc = page_address(e->page) + offset; 1203 return e; 1204 } 1205 1206 e = new_writequeue_entry(con, allocation); 1207 if (e) { 1208 spin_lock(&con->writequeue_lock); 1209 offset = e->end; 1210 e->end += len; 1211 users = e->users++; 1212 list_add_tail(&e->list, &con->writequeue); 1213 spin_unlock(&con->writequeue_lock); 1214 goto got_one; 1215 } 1216 return NULL; 1217 } 1218 1219 void dlm_lowcomms_commit_buffer(void *mh) 1220 { 1221 struct writequeue_entry *e = (struct writequeue_entry *)mh; 1222 struct connection *con = e->con; 1223 int users; 1224 1225 spin_lock(&con->writequeue_lock); 1226 users = --e->users; 1227 if (users) 1228 goto out; 1229 e->len = e->end - e->offset; 1230 kunmap(e->page); 1231 spin_unlock(&con->writequeue_lock); 1232 1233 if (!test_and_set_bit(CF_WRITE_PENDING, &con->flags)) { 1234 queue_work(send_workqueue, &con->swork); 1235 } 1236 return; 1237 1238 out: 1239 spin_unlock(&con->writequeue_lock); 1240 return; 1241 } 1242 1243 /* Send a message */ 1244 static void send_to_sock(struct connection *con) 1245 { 1246 int ret = 0; 1247 ssize_t(*sendpage) (struct socket *, struct page *, int, size_t, int); 1248 const int msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL; 1249 struct writequeue_entry *e; 1250 int len, offset; 1251 1252 mutex_lock(&con->sock_mutex); 1253 if (con->sock == NULL) 1254 goto out_connect; 1255 1256 sendpage = con->sock->ops->sendpage; 1257 1258 spin_lock(&con->writequeue_lock); 1259 for (;;) { 1260 e = list_entry(con->writequeue.next, struct writequeue_entry, 1261 list); 1262 if ((struct list_head *) e == &con->writequeue) 1263 break; 1264 1265 len = e->len; 1266 offset = e->offset; 1267 BUG_ON(len == 0 && e->users == 0); 1268 spin_unlock(&con->writequeue_lock); 1269 kmap(e->page); 1270 1271 ret = 0; 1272 if (len) { 1273 ret = sendpage(con->sock, e->page, offset, len, 1274 msg_flags); 1275 if (ret == -EAGAIN || ret == 0) 1276 goto out; 1277 if (ret <= 0) 1278 goto send_error; 1279 } else { 1280 /* Don't starve people filling buffers */ 1281 cond_resched(); 1282 } 1283 1284 spin_lock(&con->writequeue_lock); 1285 e->offset += ret; 1286 e->len -= ret; 1287 1288 if (e->len == 0 && e->users == 0) { 1289 list_del(&e->list); 1290 kunmap(e->page); 1291 free_entry(e); 1292 continue; 1293 } 1294 } 1295 spin_unlock(&con->writequeue_lock); 1296 out: 1297 mutex_unlock(&con->sock_mutex); 1298 return; 1299 1300 send_error: 1301 mutex_unlock(&con->sock_mutex); 1302 close_connection(con, false); 1303 lowcomms_connect_sock(con); 1304 return; 1305 1306 out_connect: 1307 mutex_unlock(&con->sock_mutex); 1308 if (!test_bit(CF_INIT_PENDING, &con->flags)) 1309 lowcomms_connect_sock(con); 1310 return; 1311 } 1312 1313 static void clean_one_writequeue(struct connection *con) 1314 { 1315 struct list_head *list; 1316 struct list_head *temp; 1317 1318 spin_lock(&con->writequeue_lock); 1319 list_for_each_safe(list, temp, &con->writequeue) { 1320 struct writequeue_entry *e = 1321 list_entry(list, struct writequeue_entry, list); 1322 list_del(&e->list); 1323 free_entry(e); 1324 } 1325 spin_unlock(&con->writequeue_lock); 1326 } 1327 1328 /* Called from recovery when it knows that a node has 1329 left the cluster */ 1330 int dlm_lowcomms_close(int nodeid) 1331 { 1332 struct connection *con; 1333 1334 log_print("closing connection to node %d", nodeid); 1335 con = nodeid2con(nodeid, 0); 1336 if (con) { 1337 clean_one_writequeue(con); 1338 close_connection(con, true); 1339 } 1340 return 0; 1341 } 1342 1343 /* Receive workqueue function */ 1344 static void process_recv_sockets(struct work_struct *work) 1345 { 1346 struct connection *con = container_of(work, struct connection, rwork); 1347 int err; 1348 1349 clear_bit(CF_READ_PENDING, &con->flags); 1350 do { 1351 err = con->rx_action(con); 1352 } while (!err); 1353 } 1354 1355 /* Send workqueue function */ 1356 static void process_send_sockets(struct work_struct *work) 1357 { 1358 struct connection *con = container_of(work, struct connection, swork); 1359 1360 if (test_and_clear_bit(CF_CONNECT_PENDING, &con->flags)) { 1361 con->connect_action(con); 1362 } 1363 clear_bit(CF_WRITE_PENDING, &con->flags); 1364 send_to_sock(con); 1365 } 1366 1367 1368 /* Discard all entries on the write queues */ 1369 static void clean_writequeues(void) 1370 { 1371 int nodeid; 1372 1373 for (nodeid = 1; nodeid <= max_nodeid; nodeid++) { 1374 struct connection *con = __nodeid2con(nodeid, 0); 1375 1376 if (con) 1377 clean_one_writequeue(con); 1378 } 1379 } 1380 1381 static void work_stop(void) 1382 { 1383 destroy_workqueue(recv_workqueue); 1384 destroy_workqueue(send_workqueue); 1385 } 1386 1387 static int work_start(void) 1388 { 1389 int error; 1390 recv_workqueue = create_workqueue("dlm_recv"); 1391 error = IS_ERR(recv_workqueue); 1392 if (error) { 1393 log_print("can't start dlm_recv %d", error); 1394 return error; 1395 } 1396 1397 send_workqueue = create_singlethread_workqueue("dlm_send"); 1398 error = IS_ERR(send_workqueue); 1399 if (error) { 1400 log_print("can't start dlm_send %d", error); 1401 destroy_workqueue(recv_workqueue); 1402 return error; 1403 } 1404 1405 return 0; 1406 } 1407 1408 void dlm_lowcomms_stop(void) 1409 { 1410 int i; 1411 struct connection *con; 1412 1413 /* Set all the flags to prevent any 1414 socket activity. 1415 */ 1416 down(&connections_lock); 1417 for (i = 0; i <= max_nodeid; i++) { 1418 con = __nodeid2con(i, 0); 1419 if (con) { 1420 con->flags |= 0x0F; 1421 if (con->sock) 1422 con->sock->sk->sk_user_data = NULL; 1423 } 1424 } 1425 up(&connections_lock); 1426 1427 work_stop(); 1428 1429 down(&connections_lock); 1430 clean_writequeues(); 1431 1432 for (i = 0; i <= max_nodeid; i++) { 1433 con = __nodeid2con(i, 0); 1434 if (con) { 1435 close_connection(con, true); 1436 kmem_cache_free(con_cache, con); 1437 } 1438 } 1439 max_nodeid = 0; 1440 up(&connections_lock); 1441 kmem_cache_destroy(con_cache); 1442 idr_init(&connections_idr); 1443 } 1444 1445 int dlm_lowcomms_start(void) 1446 { 1447 int error = -EINVAL; 1448 struct connection *con; 1449 1450 init_local(); 1451 if (!dlm_local_count) { 1452 error = -ENOTCONN; 1453 log_print("no local IP address has been set"); 1454 goto out; 1455 } 1456 1457 error = -ENOMEM; 1458 con_cache = kmem_cache_create("dlm_conn", sizeof(struct connection), 1459 __alignof__(struct connection), 0, 1460 NULL); 1461 if (!con_cache) 1462 goto out; 1463 1464 /* Set some sysctl minima */ 1465 if (sysctl_rmem_max < NEEDED_RMEM) 1466 sysctl_rmem_max = NEEDED_RMEM; 1467 1468 /* Start listening */ 1469 if (dlm_config.ci_protocol == 0) 1470 error = tcp_listen_for_all(); 1471 else 1472 error = sctp_listen_for_all(); 1473 if (error) 1474 goto fail_unlisten; 1475 1476 error = work_start(); 1477 if (error) 1478 goto fail_unlisten; 1479 1480 return 0; 1481 1482 fail_unlisten: 1483 con = nodeid2con(0,0); 1484 if (con) { 1485 close_connection(con, false); 1486 kmem_cache_free(con_cache, con); 1487 } 1488 kmem_cache_destroy(con_cache); 1489 1490 out: 1491 return error; 1492 } 1493