1 /****************************************************************************** 2 ******************************************************************************* 3 ** 4 ** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. 5 ** Copyright (C) 2004-2009 Red Hat, Inc. All rights reserved. 6 ** 7 ** This copyrighted material is made available to anyone wishing to use, 8 ** modify, copy, or redistribute it subject to the terms and conditions 9 ** of the GNU General Public License v.2. 10 ** 11 ******************************************************************************* 12 ******************************************************************************/ 13 14 /* 15 * lowcomms.c 16 * 17 * This is the "low-level" comms layer. 18 * 19 * It is responsible for sending/receiving messages 20 * from other nodes in the cluster. 21 * 22 * Cluster nodes are referred to by their nodeids. nodeids are 23 * simply 32 bit numbers to the locking module - if they need to 24 * be expanded for the cluster infrastructure then that is its 25 * responsibility. It is this layer's 26 * responsibility to resolve these into IP address or 27 * whatever it needs for inter-node communication. 28 * 29 * The comms level is two kernel threads that deal mainly with 30 * the receiving of messages from other nodes and passing them 31 * up to the mid-level comms layer (which understands the 32 * message format) for execution by the locking core, and 33 * a send thread which does all the setting up of connections 34 * to remote nodes and the sending of data. Threads are not allowed 35 * to send their own data because it may cause them to wait in times 36 * of high load. Also, this way, the sending thread can collect together 37 * messages bound for one node and send them in one block. 38 * 39 * lowcomms will choose to use either TCP or SCTP as its transport layer 40 * depending on the configuration variable 'protocol'. This should be set 41 * to 0 (default) for TCP or 1 for SCTP. It should be configured using a 42 * cluster-wide mechanism as it must be the same on all nodes of the cluster 43 * for the DLM to function. 44 * 45 */ 46 47 #include <asm/ioctls.h> 48 #include <net/sock.h> 49 #include <net/tcp.h> 50 #include <linux/pagemap.h> 51 #include <linux/file.h> 52 #include <linux/mutex.h> 53 #include <linux/sctp.h> 54 #include <net/sctp/user.h> 55 #include <net/ipv6.h> 56 57 #include "dlm_internal.h" 58 #include "lowcomms.h" 59 #include "midcomms.h" 60 #include "config.h" 61 62 #define NEEDED_RMEM (4*1024*1024) 63 #define CONN_HASH_SIZE 32 64 65 struct cbuf { 66 unsigned int base; 67 unsigned int len; 68 unsigned int mask; 69 }; 70 71 static void cbuf_add(struct cbuf *cb, int n) 72 { 73 cb->len += n; 74 } 75 76 static int cbuf_data(struct cbuf *cb) 77 { 78 return ((cb->base + cb->len) & cb->mask); 79 } 80 81 static void cbuf_init(struct cbuf *cb, int size) 82 { 83 cb->base = cb->len = 0; 84 cb->mask = size-1; 85 } 86 87 static void cbuf_eat(struct cbuf *cb, int n) 88 { 89 cb->len -= n; 90 cb->base += n; 91 cb->base &= cb->mask; 92 } 93 94 static bool cbuf_empty(struct cbuf *cb) 95 { 96 return cb->len == 0; 97 } 98 99 struct connection { 100 struct socket *sock; /* NULL if not connected */ 101 uint32_t nodeid; /* So we know who we are in the list */ 102 struct mutex sock_mutex; 103 unsigned long flags; 104 #define CF_READ_PENDING 1 105 #define CF_WRITE_PENDING 2 106 #define CF_CONNECT_PENDING 3 107 #define CF_INIT_PENDING 4 108 #define CF_IS_OTHERCON 5 109 #define CF_CLOSE 6 110 struct list_head writequeue; /* List of outgoing writequeue_entries */ 111 spinlock_t writequeue_lock; 112 int (*rx_action) (struct connection *); /* What to do when active */ 113 void (*connect_action) (struct connection *); /* What to do to connect */ 114 struct page *rx_page; 115 struct cbuf cb; 116 int retries; 117 #define MAX_CONNECT_RETRIES 3 118 int sctp_assoc; 119 struct hlist_node list; 120 struct connection *othercon; 121 struct work_struct rwork; /* Receive workqueue */ 122 struct work_struct swork; /* Send workqueue */ 123 }; 124 #define sock2con(x) ((struct connection *)(x)->sk_user_data) 125 126 /* An entry waiting to be sent */ 127 struct writequeue_entry { 128 struct list_head list; 129 struct page *page; 130 int offset; 131 int len; 132 int end; 133 int users; 134 struct connection *con; 135 }; 136 137 static struct sockaddr_storage *dlm_local_addr[DLM_MAX_ADDR_COUNT]; 138 static int dlm_local_count; 139 140 /* Work queues */ 141 static struct workqueue_struct *recv_workqueue; 142 static struct workqueue_struct *send_workqueue; 143 144 static struct hlist_head connection_hash[CONN_HASH_SIZE]; 145 static DEFINE_MUTEX(connections_lock); 146 static struct kmem_cache *con_cache; 147 148 static void process_recv_sockets(struct work_struct *work); 149 static void process_send_sockets(struct work_struct *work); 150 151 152 /* This is deliberately very simple because most clusters have simple 153 sequential nodeids, so we should be able to go straight to a connection 154 struct in the array */ 155 static inline int nodeid_hash(int nodeid) 156 { 157 return nodeid & (CONN_HASH_SIZE-1); 158 } 159 160 static struct connection *__find_con(int nodeid) 161 { 162 int r; 163 struct hlist_node *h; 164 struct connection *con; 165 166 r = nodeid_hash(nodeid); 167 168 hlist_for_each_entry(con, h, &connection_hash[r], list) { 169 if (con->nodeid == nodeid) 170 return con; 171 } 172 return NULL; 173 } 174 175 /* 176 * If 'allocation' is zero then we don't attempt to create a new 177 * connection structure for this node. 178 */ 179 static struct connection *__nodeid2con(int nodeid, gfp_t alloc) 180 { 181 struct connection *con = NULL; 182 int r; 183 184 con = __find_con(nodeid); 185 if (con || !alloc) 186 return con; 187 188 con = kmem_cache_zalloc(con_cache, alloc); 189 if (!con) 190 return NULL; 191 192 r = nodeid_hash(nodeid); 193 hlist_add_head(&con->list, &connection_hash[r]); 194 195 con->nodeid = nodeid; 196 mutex_init(&con->sock_mutex); 197 INIT_LIST_HEAD(&con->writequeue); 198 spin_lock_init(&con->writequeue_lock); 199 INIT_WORK(&con->swork, process_send_sockets); 200 INIT_WORK(&con->rwork, process_recv_sockets); 201 202 /* Setup action pointers for child sockets */ 203 if (con->nodeid) { 204 struct connection *zerocon = __find_con(0); 205 206 con->connect_action = zerocon->connect_action; 207 if (!con->rx_action) 208 con->rx_action = zerocon->rx_action; 209 } 210 211 return con; 212 } 213 214 /* Loop round all connections */ 215 static void foreach_conn(void (*conn_func)(struct connection *c)) 216 { 217 int i; 218 struct hlist_node *h, *n; 219 struct connection *con; 220 221 for (i = 0; i < CONN_HASH_SIZE; i++) { 222 hlist_for_each_entry_safe(con, h, n, &connection_hash[i], list){ 223 conn_func(con); 224 } 225 } 226 } 227 228 static struct connection *nodeid2con(int nodeid, gfp_t allocation) 229 { 230 struct connection *con; 231 232 mutex_lock(&connections_lock); 233 con = __nodeid2con(nodeid, allocation); 234 mutex_unlock(&connections_lock); 235 236 return con; 237 } 238 239 /* This is a bit drastic, but only called when things go wrong */ 240 static struct connection *assoc2con(int assoc_id) 241 { 242 int i; 243 struct hlist_node *h; 244 struct connection *con; 245 246 mutex_lock(&connections_lock); 247 248 for (i = 0 ; i < CONN_HASH_SIZE; i++) { 249 hlist_for_each_entry(con, h, &connection_hash[i], list) { 250 if (con && con->sctp_assoc == assoc_id) { 251 mutex_unlock(&connections_lock); 252 return con; 253 } 254 } 255 } 256 mutex_unlock(&connections_lock); 257 return NULL; 258 } 259 260 static int nodeid_to_addr(int nodeid, struct sockaddr *retaddr) 261 { 262 struct sockaddr_storage addr; 263 int error; 264 265 if (!dlm_local_count) 266 return -1; 267 268 error = dlm_nodeid_to_addr(nodeid, &addr); 269 if (error) 270 return error; 271 272 if (dlm_local_addr[0]->ss_family == AF_INET) { 273 struct sockaddr_in *in4 = (struct sockaddr_in *) &addr; 274 struct sockaddr_in *ret4 = (struct sockaddr_in *) retaddr; 275 ret4->sin_addr.s_addr = in4->sin_addr.s_addr; 276 } else { 277 struct sockaddr_in6 *in6 = (struct sockaddr_in6 *) &addr; 278 struct sockaddr_in6 *ret6 = (struct sockaddr_in6 *) retaddr; 279 ipv6_addr_copy(&ret6->sin6_addr, &in6->sin6_addr); 280 } 281 282 return 0; 283 } 284 285 /* Data available on socket or listen socket received a connect */ 286 static void lowcomms_data_ready(struct sock *sk, int count_unused) 287 { 288 struct connection *con = sock2con(sk); 289 if (con && !test_and_set_bit(CF_READ_PENDING, &con->flags)) 290 queue_work(recv_workqueue, &con->rwork); 291 } 292 293 static void lowcomms_write_space(struct sock *sk) 294 { 295 struct connection *con = sock2con(sk); 296 297 if (con && !test_and_set_bit(CF_WRITE_PENDING, &con->flags)) 298 queue_work(send_workqueue, &con->swork); 299 } 300 301 static inline void lowcomms_connect_sock(struct connection *con) 302 { 303 if (test_bit(CF_CLOSE, &con->flags)) 304 return; 305 if (!test_and_set_bit(CF_CONNECT_PENDING, &con->flags)) 306 queue_work(send_workqueue, &con->swork); 307 } 308 309 static void lowcomms_state_change(struct sock *sk) 310 { 311 if (sk->sk_state == TCP_ESTABLISHED) 312 lowcomms_write_space(sk); 313 } 314 315 int dlm_lowcomms_connect_node(int nodeid) 316 { 317 struct connection *con; 318 319 if (nodeid == dlm_our_nodeid()) 320 return 0; 321 322 con = nodeid2con(nodeid, GFP_NOFS); 323 if (!con) 324 return -ENOMEM; 325 lowcomms_connect_sock(con); 326 return 0; 327 } 328 329 /* Make a socket active */ 330 static int add_sock(struct socket *sock, struct connection *con) 331 { 332 con->sock = sock; 333 334 /* Install a data_ready callback */ 335 con->sock->sk->sk_data_ready = lowcomms_data_ready; 336 con->sock->sk->sk_write_space = lowcomms_write_space; 337 con->sock->sk->sk_state_change = lowcomms_state_change; 338 con->sock->sk->sk_user_data = con; 339 con->sock->sk->sk_allocation = GFP_NOFS; 340 return 0; 341 } 342 343 /* Add the port number to an IPv6 or 4 sockaddr and return the address 344 length */ 345 static void make_sockaddr(struct sockaddr_storage *saddr, uint16_t port, 346 int *addr_len) 347 { 348 saddr->ss_family = dlm_local_addr[0]->ss_family; 349 if (saddr->ss_family == AF_INET) { 350 struct sockaddr_in *in4_addr = (struct sockaddr_in *)saddr; 351 in4_addr->sin_port = cpu_to_be16(port); 352 *addr_len = sizeof(struct sockaddr_in); 353 memset(&in4_addr->sin_zero, 0, sizeof(in4_addr->sin_zero)); 354 } else { 355 struct sockaddr_in6 *in6_addr = (struct sockaddr_in6 *)saddr; 356 in6_addr->sin6_port = cpu_to_be16(port); 357 *addr_len = sizeof(struct sockaddr_in6); 358 } 359 memset((char *)saddr + *addr_len, 0, sizeof(struct sockaddr_storage) - *addr_len); 360 } 361 362 /* Close a remote connection and tidy up */ 363 static void close_connection(struct connection *con, bool and_other) 364 { 365 mutex_lock(&con->sock_mutex); 366 367 if (con->sock) { 368 sock_release(con->sock); 369 con->sock = NULL; 370 } 371 if (con->othercon && and_other) { 372 /* Will only re-enter once. */ 373 close_connection(con->othercon, false); 374 } 375 if (con->rx_page) { 376 __free_page(con->rx_page); 377 con->rx_page = NULL; 378 } 379 380 con->retries = 0; 381 mutex_unlock(&con->sock_mutex); 382 } 383 384 /* We only send shutdown messages to nodes that are not part of the cluster */ 385 static void sctp_send_shutdown(sctp_assoc_t associd) 386 { 387 static char outcmsg[CMSG_SPACE(sizeof(struct sctp_sndrcvinfo))]; 388 struct msghdr outmessage; 389 struct cmsghdr *cmsg; 390 struct sctp_sndrcvinfo *sinfo; 391 int ret; 392 struct connection *con; 393 394 con = nodeid2con(0,0); 395 BUG_ON(con == NULL); 396 397 outmessage.msg_name = NULL; 398 outmessage.msg_namelen = 0; 399 outmessage.msg_control = outcmsg; 400 outmessage.msg_controllen = sizeof(outcmsg); 401 outmessage.msg_flags = MSG_EOR; 402 403 cmsg = CMSG_FIRSTHDR(&outmessage); 404 cmsg->cmsg_level = IPPROTO_SCTP; 405 cmsg->cmsg_type = SCTP_SNDRCV; 406 cmsg->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo)); 407 outmessage.msg_controllen = cmsg->cmsg_len; 408 sinfo = CMSG_DATA(cmsg); 409 memset(sinfo, 0x00, sizeof(struct sctp_sndrcvinfo)); 410 411 sinfo->sinfo_flags |= MSG_EOF; 412 sinfo->sinfo_assoc_id = associd; 413 414 ret = kernel_sendmsg(con->sock, &outmessage, NULL, 0, 0); 415 416 if (ret != 0) 417 log_print("send EOF to node failed: %d", ret); 418 } 419 420 static void sctp_init_failed_foreach(struct connection *con) 421 { 422 con->sctp_assoc = 0; 423 if (test_and_clear_bit(CF_CONNECT_PENDING, &con->flags)) { 424 if (!test_and_set_bit(CF_WRITE_PENDING, &con->flags)) 425 queue_work(send_workqueue, &con->swork); 426 } 427 } 428 429 /* INIT failed but we don't know which node... 430 restart INIT on all pending nodes */ 431 static void sctp_init_failed(void) 432 { 433 mutex_lock(&connections_lock); 434 435 foreach_conn(sctp_init_failed_foreach); 436 437 mutex_unlock(&connections_lock); 438 } 439 440 /* Something happened to an association */ 441 static void process_sctp_notification(struct connection *con, 442 struct msghdr *msg, char *buf) 443 { 444 union sctp_notification *sn = (union sctp_notification *)buf; 445 446 if (sn->sn_header.sn_type == SCTP_ASSOC_CHANGE) { 447 switch (sn->sn_assoc_change.sac_state) { 448 449 case SCTP_COMM_UP: 450 case SCTP_RESTART: 451 { 452 /* Check that the new node is in the lockspace */ 453 struct sctp_prim prim; 454 int nodeid; 455 int prim_len, ret; 456 int addr_len; 457 struct connection *new_con; 458 struct file *file; 459 sctp_peeloff_arg_t parg; 460 int parglen = sizeof(parg); 461 462 /* 463 * We get this before any data for an association. 464 * We verify that the node is in the cluster and 465 * then peel off a socket for it. 466 */ 467 if ((int)sn->sn_assoc_change.sac_assoc_id <= 0) { 468 log_print("COMM_UP for invalid assoc ID %d", 469 (int)sn->sn_assoc_change.sac_assoc_id); 470 sctp_init_failed(); 471 return; 472 } 473 memset(&prim, 0, sizeof(struct sctp_prim)); 474 prim_len = sizeof(struct sctp_prim); 475 prim.ssp_assoc_id = sn->sn_assoc_change.sac_assoc_id; 476 477 ret = kernel_getsockopt(con->sock, 478 IPPROTO_SCTP, 479 SCTP_PRIMARY_ADDR, 480 (char*)&prim, 481 &prim_len); 482 if (ret < 0) { 483 log_print("getsockopt/sctp_primary_addr on " 484 "new assoc %d failed : %d", 485 (int)sn->sn_assoc_change.sac_assoc_id, 486 ret); 487 488 /* Retry INIT later */ 489 new_con = assoc2con(sn->sn_assoc_change.sac_assoc_id); 490 if (new_con) 491 clear_bit(CF_CONNECT_PENDING, &con->flags); 492 return; 493 } 494 make_sockaddr(&prim.ssp_addr, 0, &addr_len); 495 if (dlm_addr_to_nodeid(&prim.ssp_addr, &nodeid)) { 496 int i; 497 unsigned char *b=(unsigned char *)&prim.ssp_addr; 498 log_print("reject connect from unknown addr"); 499 for (i=0; i<sizeof(struct sockaddr_storage);i++) 500 printk("%02x ", b[i]); 501 printk("\n"); 502 sctp_send_shutdown(prim.ssp_assoc_id); 503 return; 504 } 505 506 new_con = nodeid2con(nodeid, GFP_NOFS); 507 if (!new_con) 508 return; 509 510 /* Peel off a new sock */ 511 parg.associd = sn->sn_assoc_change.sac_assoc_id; 512 ret = kernel_getsockopt(con->sock, IPPROTO_SCTP, 513 SCTP_SOCKOPT_PEELOFF, 514 (void *)&parg, &parglen); 515 if (ret) { 516 log_print("Can't peel off a socket for " 517 "connection %d to node %d: err=%d\n", 518 parg.associd, nodeid, ret); 519 } 520 file = fget(parg.sd); 521 new_con->sock = SOCKET_I(file->f_dentry->d_inode); 522 add_sock(new_con->sock, new_con); 523 fput(file); 524 put_unused_fd(parg.sd); 525 526 log_print("got new/restarted association %d nodeid %d", 527 (int)sn->sn_assoc_change.sac_assoc_id, nodeid); 528 529 /* Send any pending writes */ 530 clear_bit(CF_CONNECT_PENDING, &new_con->flags); 531 clear_bit(CF_INIT_PENDING, &con->flags); 532 if (!test_and_set_bit(CF_WRITE_PENDING, &new_con->flags)) { 533 queue_work(send_workqueue, &new_con->swork); 534 } 535 if (!test_and_set_bit(CF_READ_PENDING, &new_con->flags)) 536 queue_work(recv_workqueue, &new_con->rwork); 537 } 538 break; 539 540 case SCTP_COMM_LOST: 541 case SCTP_SHUTDOWN_COMP: 542 { 543 con = assoc2con(sn->sn_assoc_change.sac_assoc_id); 544 if (con) { 545 con->sctp_assoc = 0; 546 } 547 } 548 break; 549 550 /* We don't know which INIT failed, so clear the PENDING flags 551 * on them all. if assoc_id is zero then it will then try 552 * again */ 553 554 case SCTP_CANT_STR_ASSOC: 555 { 556 log_print("Can't start SCTP association - retrying"); 557 sctp_init_failed(); 558 } 559 break; 560 561 default: 562 log_print("unexpected SCTP assoc change id=%d state=%d", 563 (int)sn->sn_assoc_change.sac_assoc_id, 564 sn->sn_assoc_change.sac_state); 565 } 566 } 567 } 568 569 /* Data received from remote end */ 570 static int receive_from_sock(struct connection *con) 571 { 572 int ret = 0; 573 struct msghdr msg = {}; 574 struct kvec iov[2]; 575 unsigned len; 576 int r; 577 int call_again_soon = 0; 578 int nvec; 579 char incmsg[CMSG_SPACE(sizeof(struct sctp_sndrcvinfo))]; 580 581 mutex_lock(&con->sock_mutex); 582 583 if (con->sock == NULL) { 584 ret = -EAGAIN; 585 goto out_close; 586 } 587 588 if (con->rx_page == NULL) { 589 /* 590 * This doesn't need to be atomic, but I think it should 591 * improve performance if it is. 592 */ 593 con->rx_page = alloc_page(GFP_ATOMIC); 594 if (con->rx_page == NULL) 595 goto out_resched; 596 cbuf_init(&con->cb, PAGE_CACHE_SIZE); 597 } 598 599 /* Only SCTP needs these really */ 600 memset(&incmsg, 0, sizeof(incmsg)); 601 msg.msg_control = incmsg; 602 msg.msg_controllen = sizeof(incmsg); 603 604 /* 605 * iov[0] is the bit of the circular buffer between the current end 606 * point (cb.base + cb.len) and the end of the buffer. 607 */ 608 iov[0].iov_len = con->cb.base - cbuf_data(&con->cb); 609 iov[0].iov_base = page_address(con->rx_page) + cbuf_data(&con->cb); 610 iov[1].iov_len = 0; 611 nvec = 1; 612 613 /* 614 * iov[1] is the bit of the circular buffer between the start of the 615 * buffer and the start of the currently used section (cb.base) 616 */ 617 if (cbuf_data(&con->cb) >= con->cb.base) { 618 iov[0].iov_len = PAGE_CACHE_SIZE - cbuf_data(&con->cb); 619 iov[1].iov_len = con->cb.base; 620 iov[1].iov_base = page_address(con->rx_page); 621 nvec = 2; 622 } 623 len = iov[0].iov_len + iov[1].iov_len; 624 625 r = ret = kernel_recvmsg(con->sock, &msg, iov, nvec, len, 626 MSG_DONTWAIT | MSG_NOSIGNAL); 627 if (ret <= 0) 628 goto out_close; 629 630 /* Process SCTP notifications */ 631 if (msg.msg_flags & MSG_NOTIFICATION) { 632 msg.msg_control = incmsg; 633 msg.msg_controllen = sizeof(incmsg); 634 635 process_sctp_notification(con, &msg, 636 page_address(con->rx_page) + con->cb.base); 637 mutex_unlock(&con->sock_mutex); 638 return 0; 639 } 640 BUG_ON(con->nodeid == 0); 641 642 if (ret == len) 643 call_again_soon = 1; 644 cbuf_add(&con->cb, ret); 645 ret = dlm_process_incoming_buffer(con->nodeid, 646 page_address(con->rx_page), 647 con->cb.base, con->cb.len, 648 PAGE_CACHE_SIZE); 649 if (ret == -EBADMSG) { 650 log_print("lowcomms: addr=%p, base=%u, len=%u, " 651 "iov_len=%u, iov_base[0]=%p, read=%d", 652 page_address(con->rx_page), con->cb.base, con->cb.len, 653 len, iov[0].iov_base, r); 654 } 655 if (ret < 0) 656 goto out_close; 657 cbuf_eat(&con->cb, ret); 658 659 if (cbuf_empty(&con->cb) && !call_again_soon) { 660 __free_page(con->rx_page); 661 con->rx_page = NULL; 662 } 663 664 if (call_again_soon) 665 goto out_resched; 666 mutex_unlock(&con->sock_mutex); 667 return 0; 668 669 out_resched: 670 if (!test_and_set_bit(CF_READ_PENDING, &con->flags)) 671 queue_work(recv_workqueue, &con->rwork); 672 mutex_unlock(&con->sock_mutex); 673 return -EAGAIN; 674 675 out_close: 676 mutex_unlock(&con->sock_mutex); 677 if (ret != -EAGAIN) { 678 close_connection(con, false); 679 /* Reconnect when there is something to send */ 680 } 681 /* Don't return success if we really got EOF */ 682 if (ret == 0) 683 ret = -EAGAIN; 684 685 return ret; 686 } 687 688 /* Listening socket is busy, accept a connection */ 689 static int tcp_accept_from_sock(struct connection *con) 690 { 691 int result; 692 struct sockaddr_storage peeraddr; 693 struct socket *newsock; 694 int len; 695 int nodeid; 696 struct connection *newcon; 697 struct connection *addcon; 698 699 memset(&peeraddr, 0, sizeof(peeraddr)); 700 result = sock_create_kern(dlm_local_addr[0]->ss_family, SOCK_STREAM, 701 IPPROTO_TCP, &newsock); 702 if (result < 0) 703 return -ENOMEM; 704 705 mutex_lock_nested(&con->sock_mutex, 0); 706 707 result = -ENOTCONN; 708 if (con->sock == NULL) 709 goto accept_err; 710 711 newsock->type = con->sock->type; 712 newsock->ops = con->sock->ops; 713 714 result = con->sock->ops->accept(con->sock, newsock, O_NONBLOCK); 715 if (result < 0) 716 goto accept_err; 717 718 /* Get the connected socket's peer */ 719 memset(&peeraddr, 0, sizeof(peeraddr)); 720 if (newsock->ops->getname(newsock, (struct sockaddr *)&peeraddr, 721 &len, 2)) { 722 result = -ECONNABORTED; 723 goto accept_err; 724 } 725 726 /* Get the new node's NODEID */ 727 make_sockaddr(&peeraddr, 0, &len); 728 if (dlm_addr_to_nodeid(&peeraddr, &nodeid)) { 729 log_print("connect from non cluster node"); 730 sock_release(newsock); 731 mutex_unlock(&con->sock_mutex); 732 return -1; 733 } 734 735 log_print("got connection from %d", nodeid); 736 737 /* Check to see if we already have a connection to this node. This 738 * could happen if the two nodes initiate a connection at roughly 739 * the same time and the connections cross on the wire. 740 * In this case we store the incoming one in "othercon" 741 */ 742 newcon = nodeid2con(nodeid, GFP_NOFS); 743 if (!newcon) { 744 result = -ENOMEM; 745 goto accept_err; 746 } 747 mutex_lock_nested(&newcon->sock_mutex, 1); 748 if (newcon->sock) { 749 struct connection *othercon = newcon->othercon; 750 751 if (!othercon) { 752 othercon = kmem_cache_zalloc(con_cache, GFP_NOFS); 753 if (!othercon) { 754 log_print("failed to allocate incoming socket"); 755 mutex_unlock(&newcon->sock_mutex); 756 result = -ENOMEM; 757 goto accept_err; 758 } 759 othercon->nodeid = nodeid; 760 othercon->rx_action = receive_from_sock; 761 mutex_init(&othercon->sock_mutex); 762 INIT_WORK(&othercon->swork, process_send_sockets); 763 INIT_WORK(&othercon->rwork, process_recv_sockets); 764 set_bit(CF_IS_OTHERCON, &othercon->flags); 765 } 766 if (!othercon->sock) { 767 newcon->othercon = othercon; 768 othercon->sock = newsock; 769 newsock->sk->sk_user_data = othercon; 770 add_sock(newsock, othercon); 771 addcon = othercon; 772 } 773 else { 774 printk("Extra connection from node %d attempted\n", nodeid); 775 result = -EAGAIN; 776 mutex_unlock(&newcon->sock_mutex); 777 goto accept_err; 778 } 779 } 780 else { 781 newsock->sk->sk_user_data = newcon; 782 newcon->rx_action = receive_from_sock; 783 add_sock(newsock, newcon); 784 addcon = newcon; 785 } 786 787 mutex_unlock(&newcon->sock_mutex); 788 789 /* 790 * Add it to the active queue in case we got data 791 * beween processing the accept adding the socket 792 * to the read_sockets list 793 */ 794 if (!test_and_set_bit(CF_READ_PENDING, &addcon->flags)) 795 queue_work(recv_workqueue, &addcon->rwork); 796 mutex_unlock(&con->sock_mutex); 797 798 return 0; 799 800 accept_err: 801 mutex_unlock(&con->sock_mutex); 802 sock_release(newsock); 803 804 if (result != -EAGAIN) 805 log_print("error accepting connection from node: %d", result); 806 return result; 807 } 808 809 static void free_entry(struct writequeue_entry *e) 810 { 811 __free_page(e->page); 812 kfree(e); 813 } 814 815 /* Initiate an SCTP association. 816 This is a special case of send_to_sock() in that we don't yet have a 817 peeled-off socket for this association, so we use the listening socket 818 and add the primary IP address of the remote node. 819 */ 820 static void sctp_init_assoc(struct connection *con) 821 { 822 struct sockaddr_storage rem_addr; 823 char outcmsg[CMSG_SPACE(sizeof(struct sctp_sndrcvinfo))]; 824 struct msghdr outmessage; 825 struct cmsghdr *cmsg; 826 struct sctp_sndrcvinfo *sinfo; 827 struct connection *base_con; 828 struct writequeue_entry *e; 829 int len, offset; 830 int ret; 831 int addrlen; 832 struct kvec iov[1]; 833 834 if (test_and_set_bit(CF_INIT_PENDING, &con->flags)) 835 return; 836 837 if (con->retries++ > MAX_CONNECT_RETRIES) 838 return; 839 840 log_print("Initiating association with node %d", con->nodeid); 841 842 if (nodeid_to_addr(con->nodeid, (struct sockaddr *)&rem_addr)) { 843 log_print("no address for nodeid %d", con->nodeid); 844 return; 845 } 846 base_con = nodeid2con(0, 0); 847 BUG_ON(base_con == NULL); 848 849 make_sockaddr(&rem_addr, dlm_config.ci_tcp_port, &addrlen); 850 851 outmessage.msg_name = &rem_addr; 852 outmessage.msg_namelen = addrlen; 853 outmessage.msg_control = outcmsg; 854 outmessage.msg_controllen = sizeof(outcmsg); 855 outmessage.msg_flags = MSG_EOR; 856 857 spin_lock(&con->writequeue_lock); 858 e = list_entry(con->writequeue.next, struct writequeue_entry, 859 list); 860 861 BUG_ON((struct list_head *) e == &con->writequeue); 862 863 len = e->len; 864 offset = e->offset; 865 spin_unlock(&con->writequeue_lock); 866 867 /* Send the first block off the write queue */ 868 iov[0].iov_base = page_address(e->page)+offset; 869 iov[0].iov_len = len; 870 871 cmsg = CMSG_FIRSTHDR(&outmessage); 872 cmsg->cmsg_level = IPPROTO_SCTP; 873 cmsg->cmsg_type = SCTP_SNDRCV; 874 cmsg->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo)); 875 sinfo = CMSG_DATA(cmsg); 876 memset(sinfo, 0x00, sizeof(struct sctp_sndrcvinfo)); 877 sinfo->sinfo_ppid = cpu_to_le32(dlm_our_nodeid()); 878 outmessage.msg_controllen = cmsg->cmsg_len; 879 880 ret = kernel_sendmsg(base_con->sock, &outmessage, iov, 1, len); 881 if (ret < 0) { 882 log_print("Send first packet to node %d failed: %d", 883 con->nodeid, ret); 884 885 /* Try again later */ 886 clear_bit(CF_CONNECT_PENDING, &con->flags); 887 clear_bit(CF_INIT_PENDING, &con->flags); 888 } 889 else { 890 spin_lock(&con->writequeue_lock); 891 e->offset += ret; 892 e->len -= ret; 893 894 if (e->len == 0 && e->users == 0) { 895 list_del(&e->list); 896 free_entry(e); 897 } 898 spin_unlock(&con->writequeue_lock); 899 } 900 } 901 902 /* Connect a new socket to its peer */ 903 static void tcp_connect_to_sock(struct connection *con) 904 { 905 int result = -EHOSTUNREACH; 906 struct sockaddr_storage saddr, src_addr; 907 int addr_len; 908 struct socket *sock = NULL; 909 910 if (con->nodeid == 0) { 911 log_print("attempt to connect sock 0 foiled"); 912 return; 913 } 914 915 mutex_lock(&con->sock_mutex); 916 if (con->retries++ > MAX_CONNECT_RETRIES) 917 goto out; 918 919 /* Some odd races can cause double-connects, ignore them */ 920 if (con->sock) { 921 result = 0; 922 goto out; 923 } 924 925 /* Create a socket to communicate with */ 926 result = sock_create_kern(dlm_local_addr[0]->ss_family, SOCK_STREAM, 927 IPPROTO_TCP, &sock); 928 if (result < 0) 929 goto out_err; 930 931 memset(&saddr, 0, sizeof(saddr)); 932 if (dlm_nodeid_to_addr(con->nodeid, &saddr)) 933 goto out_err; 934 935 sock->sk->sk_user_data = con; 936 con->rx_action = receive_from_sock; 937 con->connect_action = tcp_connect_to_sock; 938 add_sock(sock, con); 939 940 /* Bind to our cluster-known address connecting to avoid 941 routing problems */ 942 memcpy(&src_addr, dlm_local_addr[0], sizeof(src_addr)); 943 make_sockaddr(&src_addr, 0, &addr_len); 944 result = sock->ops->bind(sock, (struct sockaddr *) &src_addr, 945 addr_len); 946 if (result < 0) { 947 log_print("could not bind for connect: %d", result); 948 /* This *may* not indicate a critical error */ 949 } 950 951 make_sockaddr(&saddr, dlm_config.ci_tcp_port, &addr_len); 952 953 log_print("connecting to %d", con->nodeid); 954 result = 955 sock->ops->connect(sock, (struct sockaddr *)&saddr, addr_len, 956 O_NONBLOCK); 957 if (result == -EINPROGRESS) 958 result = 0; 959 if (result == 0) 960 goto out; 961 962 out_err: 963 if (con->sock) { 964 sock_release(con->sock); 965 con->sock = NULL; 966 } else if (sock) { 967 sock_release(sock); 968 } 969 /* 970 * Some errors are fatal and this list might need adjusting. For other 971 * errors we try again until the max number of retries is reached. 972 */ 973 if (result != -EHOSTUNREACH && result != -ENETUNREACH && 974 result != -ENETDOWN && result != -EINVAL 975 && result != -EPROTONOSUPPORT) { 976 lowcomms_connect_sock(con); 977 result = 0; 978 } 979 out: 980 mutex_unlock(&con->sock_mutex); 981 return; 982 } 983 984 static struct socket *tcp_create_listen_sock(struct connection *con, 985 struct sockaddr_storage *saddr) 986 { 987 struct socket *sock = NULL; 988 int result = 0; 989 int one = 1; 990 int addr_len; 991 992 if (dlm_local_addr[0]->ss_family == AF_INET) 993 addr_len = sizeof(struct sockaddr_in); 994 else 995 addr_len = sizeof(struct sockaddr_in6); 996 997 /* Create a socket to communicate with */ 998 result = sock_create_kern(dlm_local_addr[0]->ss_family, SOCK_STREAM, 999 IPPROTO_TCP, &sock); 1000 if (result < 0) { 1001 log_print("Can't create listening comms socket"); 1002 goto create_out; 1003 } 1004 1005 result = kernel_setsockopt(sock, SOL_SOCKET, SO_REUSEADDR, 1006 (char *)&one, sizeof(one)); 1007 1008 if (result < 0) { 1009 log_print("Failed to set SO_REUSEADDR on socket: %d", result); 1010 } 1011 sock->sk->sk_user_data = con; 1012 con->rx_action = tcp_accept_from_sock; 1013 con->connect_action = tcp_connect_to_sock; 1014 con->sock = sock; 1015 1016 /* Bind to our port */ 1017 make_sockaddr(saddr, dlm_config.ci_tcp_port, &addr_len); 1018 result = sock->ops->bind(sock, (struct sockaddr *) saddr, addr_len); 1019 if (result < 0) { 1020 log_print("Can't bind to port %d", dlm_config.ci_tcp_port); 1021 sock_release(sock); 1022 sock = NULL; 1023 con->sock = NULL; 1024 goto create_out; 1025 } 1026 result = kernel_setsockopt(sock, SOL_SOCKET, SO_KEEPALIVE, 1027 (char *)&one, sizeof(one)); 1028 if (result < 0) { 1029 log_print("Set keepalive failed: %d", result); 1030 } 1031 1032 result = sock->ops->listen(sock, 5); 1033 if (result < 0) { 1034 log_print("Can't listen on port %d", dlm_config.ci_tcp_port); 1035 sock_release(sock); 1036 sock = NULL; 1037 goto create_out; 1038 } 1039 1040 create_out: 1041 return sock; 1042 } 1043 1044 /* Get local addresses */ 1045 static void init_local(void) 1046 { 1047 struct sockaddr_storage sas, *addr; 1048 int i; 1049 1050 dlm_local_count = 0; 1051 for (i = 0; i < DLM_MAX_ADDR_COUNT - 1; i++) { 1052 if (dlm_our_addr(&sas, i)) 1053 break; 1054 1055 addr = kmalloc(sizeof(*addr), GFP_KERNEL); 1056 if (!addr) 1057 break; 1058 memcpy(addr, &sas, sizeof(*addr)); 1059 dlm_local_addr[dlm_local_count++] = addr; 1060 } 1061 } 1062 1063 /* Bind to an IP address. SCTP allows multiple address so it can do 1064 multi-homing */ 1065 static int add_sctp_bind_addr(struct connection *sctp_con, 1066 struct sockaddr_storage *addr, 1067 int addr_len, int num) 1068 { 1069 int result = 0; 1070 1071 if (num == 1) 1072 result = kernel_bind(sctp_con->sock, 1073 (struct sockaddr *) addr, 1074 addr_len); 1075 else 1076 result = kernel_setsockopt(sctp_con->sock, SOL_SCTP, 1077 SCTP_SOCKOPT_BINDX_ADD, 1078 (char *)addr, addr_len); 1079 1080 if (result < 0) 1081 log_print("Can't bind to port %d addr number %d", 1082 dlm_config.ci_tcp_port, num); 1083 1084 return result; 1085 } 1086 1087 /* Initialise SCTP socket and bind to all interfaces */ 1088 static int sctp_listen_for_all(void) 1089 { 1090 struct socket *sock = NULL; 1091 struct sockaddr_storage localaddr; 1092 struct sctp_event_subscribe subscribe; 1093 int result = -EINVAL, num = 1, i, addr_len; 1094 struct connection *con = nodeid2con(0, GFP_KERNEL); 1095 int bufsize = NEEDED_RMEM; 1096 1097 if (!con) 1098 return -ENOMEM; 1099 1100 log_print("Using SCTP for communications"); 1101 1102 result = sock_create_kern(dlm_local_addr[0]->ss_family, SOCK_SEQPACKET, 1103 IPPROTO_SCTP, &sock); 1104 if (result < 0) { 1105 log_print("Can't create comms socket, check SCTP is loaded"); 1106 goto out; 1107 } 1108 1109 /* Listen for events */ 1110 memset(&subscribe, 0, sizeof(subscribe)); 1111 subscribe.sctp_data_io_event = 1; 1112 subscribe.sctp_association_event = 1; 1113 subscribe.sctp_send_failure_event = 1; 1114 subscribe.sctp_shutdown_event = 1; 1115 subscribe.sctp_partial_delivery_event = 1; 1116 1117 result = kernel_setsockopt(sock, SOL_SOCKET, SO_RCVBUFFORCE, 1118 (char *)&bufsize, sizeof(bufsize)); 1119 if (result) 1120 log_print("Error increasing buffer space on socket %d", result); 1121 1122 result = kernel_setsockopt(sock, SOL_SCTP, SCTP_EVENTS, 1123 (char *)&subscribe, sizeof(subscribe)); 1124 if (result < 0) { 1125 log_print("Failed to set SCTP_EVENTS on socket: result=%d", 1126 result); 1127 goto create_delsock; 1128 } 1129 1130 /* Init con struct */ 1131 sock->sk->sk_user_data = con; 1132 con->sock = sock; 1133 con->sock->sk->sk_data_ready = lowcomms_data_ready; 1134 con->rx_action = receive_from_sock; 1135 con->connect_action = sctp_init_assoc; 1136 1137 /* Bind to all interfaces. */ 1138 for (i = 0; i < dlm_local_count; i++) { 1139 memcpy(&localaddr, dlm_local_addr[i], sizeof(localaddr)); 1140 make_sockaddr(&localaddr, dlm_config.ci_tcp_port, &addr_len); 1141 1142 result = add_sctp_bind_addr(con, &localaddr, addr_len, num); 1143 if (result) 1144 goto create_delsock; 1145 ++num; 1146 } 1147 1148 result = sock->ops->listen(sock, 5); 1149 if (result < 0) { 1150 log_print("Can't set socket listening"); 1151 goto create_delsock; 1152 } 1153 1154 return 0; 1155 1156 create_delsock: 1157 sock_release(sock); 1158 con->sock = NULL; 1159 out: 1160 return result; 1161 } 1162 1163 static int tcp_listen_for_all(void) 1164 { 1165 struct socket *sock = NULL; 1166 struct connection *con = nodeid2con(0, GFP_KERNEL); 1167 int result = -EINVAL; 1168 1169 if (!con) 1170 return -ENOMEM; 1171 1172 /* We don't support multi-homed hosts */ 1173 if (dlm_local_addr[1] != NULL) { 1174 log_print("TCP protocol can't handle multi-homed hosts, " 1175 "try SCTP"); 1176 return -EINVAL; 1177 } 1178 1179 log_print("Using TCP for communications"); 1180 1181 sock = tcp_create_listen_sock(con, dlm_local_addr[0]); 1182 if (sock) { 1183 add_sock(sock, con); 1184 result = 0; 1185 } 1186 else { 1187 result = -EADDRINUSE; 1188 } 1189 1190 return result; 1191 } 1192 1193 1194 1195 static struct writequeue_entry *new_writequeue_entry(struct connection *con, 1196 gfp_t allocation) 1197 { 1198 struct writequeue_entry *entry; 1199 1200 entry = kmalloc(sizeof(struct writequeue_entry), allocation); 1201 if (!entry) 1202 return NULL; 1203 1204 entry->page = alloc_page(allocation); 1205 if (!entry->page) { 1206 kfree(entry); 1207 return NULL; 1208 } 1209 1210 entry->offset = 0; 1211 entry->len = 0; 1212 entry->end = 0; 1213 entry->users = 0; 1214 entry->con = con; 1215 1216 return entry; 1217 } 1218 1219 void *dlm_lowcomms_get_buffer(int nodeid, int len, gfp_t allocation, char **ppc) 1220 { 1221 struct connection *con; 1222 struct writequeue_entry *e; 1223 int offset = 0; 1224 int users = 0; 1225 1226 con = nodeid2con(nodeid, allocation); 1227 if (!con) 1228 return NULL; 1229 1230 spin_lock(&con->writequeue_lock); 1231 e = list_entry(con->writequeue.prev, struct writequeue_entry, list); 1232 if ((&e->list == &con->writequeue) || 1233 (PAGE_CACHE_SIZE - e->end < len)) { 1234 e = NULL; 1235 } else { 1236 offset = e->end; 1237 e->end += len; 1238 users = e->users++; 1239 } 1240 spin_unlock(&con->writequeue_lock); 1241 1242 if (e) { 1243 got_one: 1244 *ppc = page_address(e->page) + offset; 1245 return e; 1246 } 1247 1248 e = new_writequeue_entry(con, allocation); 1249 if (e) { 1250 spin_lock(&con->writequeue_lock); 1251 offset = e->end; 1252 e->end += len; 1253 users = e->users++; 1254 list_add_tail(&e->list, &con->writequeue); 1255 spin_unlock(&con->writequeue_lock); 1256 goto got_one; 1257 } 1258 return NULL; 1259 } 1260 1261 void dlm_lowcomms_commit_buffer(void *mh) 1262 { 1263 struct writequeue_entry *e = (struct writequeue_entry *)mh; 1264 struct connection *con = e->con; 1265 int users; 1266 1267 spin_lock(&con->writequeue_lock); 1268 users = --e->users; 1269 if (users) 1270 goto out; 1271 e->len = e->end - e->offset; 1272 spin_unlock(&con->writequeue_lock); 1273 1274 if (!test_and_set_bit(CF_WRITE_PENDING, &con->flags)) { 1275 queue_work(send_workqueue, &con->swork); 1276 } 1277 return; 1278 1279 out: 1280 spin_unlock(&con->writequeue_lock); 1281 return; 1282 } 1283 1284 /* Send a message */ 1285 static void send_to_sock(struct connection *con) 1286 { 1287 int ret = 0; 1288 const int msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL; 1289 struct writequeue_entry *e; 1290 int len, offset; 1291 1292 mutex_lock(&con->sock_mutex); 1293 if (con->sock == NULL) 1294 goto out_connect; 1295 1296 spin_lock(&con->writequeue_lock); 1297 for (;;) { 1298 e = list_entry(con->writequeue.next, struct writequeue_entry, 1299 list); 1300 if ((struct list_head *) e == &con->writequeue) 1301 break; 1302 1303 len = e->len; 1304 offset = e->offset; 1305 BUG_ON(len == 0 && e->users == 0); 1306 spin_unlock(&con->writequeue_lock); 1307 1308 ret = 0; 1309 if (len) { 1310 ret = kernel_sendpage(con->sock, e->page, offset, len, 1311 msg_flags); 1312 if (ret == -EAGAIN || ret == 0) { 1313 cond_resched(); 1314 goto out; 1315 } 1316 if (ret <= 0) 1317 goto send_error; 1318 } 1319 /* Don't starve people filling buffers */ 1320 cond_resched(); 1321 1322 spin_lock(&con->writequeue_lock); 1323 e->offset += ret; 1324 e->len -= ret; 1325 1326 if (e->len == 0 && e->users == 0) { 1327 list_del(&e->list); 1328 free_entry(e); 1329 continue; 1330 } 1331 } 1332 spin_unlock(&con->writequeue_lock); 1333 out: 1334 mutex_unlock(&con->sock_mutex); 1335 return; 1336 1337 send_error: 1338 mutex_unlock(&con->sock_mutex); 1339 close_connection(con, false); 1340 lowcomms_connect_sock(con); 1341 return; 1342 1343 out_connect: 1344 mutex_unlock(&con->sock_mutex); 1345 if (!test_bit(CF_INIT_PENDING, &con->flags)) 1346 lowcomms_connect_sock(con); 1347 return; 1348 } 1349 1350 static void clean_one_writequeue(struct connection *con) 1351 { 1352 struct writequeue_entry *e, *safe; 1353 1354 spin_lock(&con->writequeue_lock); 1355 list_for_each_entry_safe(e, safe, &con->writequeue, list) { 1356 list_del(&e->list); 1357 free_entry(e); 1358 } 1359 spin_unlock(&con->writequeue_lock); 1360 } 1361 1362 /* Called from recovery when it knows that a node has 1363 left the cluster */ 1364 int dlm_lowcomms_close(int nodeid) 1365 { 1366 struct connection *con; 1367 1368 log_print("closing connection to node %d", nodeid); 1369 con = nodeid2con(nodeid, 0); 1370 if (con) { 1371 clear_bit(CF_CONNECT_PENDING, &con->flags); 1372 clear_bit(CF_WRITE_PENDING, &con->flags); 1373 set_bit(CF_CLOSE, &con->flags); 1374 if (cancel_work_sync(&con->swork)) 1375 log_print("canceled swork for node %d", nodeid); 1376 if (cancel_work_sync(&con->rwork)) 1377 log_print("canceled rwork for node %d", nodeid); 1378 clean_one_writequeue(con); 1379 close_connection(con, true); 1380 } 1381 return 0; 1382 } 1383 1384 /* Receive workqueue function */ 1385 static void process_recv_sockets(struct work_struct *work) 1386 { 1387 struct connection *con = container_of(work, struct connection, rwork); 1388 int err; 1389 1390 clear_bit(CF_READ_PENDING, &con->flags); 1391 do { 1392 err = con->rx_action(con); 1393 } while (!err); 1394 } 1395 1396 /* Send workqueue function */ 1397 static void process_send_sockets(struct work_struct *work) 1398 { 1399 struct connection *con = container_of(work, struct connection, swork); 1400 1401 if (test_and_clear_bit(CF_CONNECT_PENDING, &con->flags)) { 1402 con->connect_action(con); 1403 set_bit(CF_WRITE_PENDING, &con->flags); 1404 } 1405 if (test_and_clear_bit(CF_WRITE_PENDING, &con->flags)) 1406 send_to_sock(con); 1407 } 1408 1409 1410 /* Discard all entries on the write queues */ 1411 static void clean_writequeues(void) 1412 { 1413 foreach_conn(clean_one_writequeue); 1414 } 1415 1416 static void work_stop(void) 1417 { 1418 destroy_workqueue(recv_workqueue); 1419 destroy_workqueue(send_workqueue); 1420 } 1421 1422 static int work_start(void) 1423 { 1424 int error; 1425 recv_workqueue = create_workqueue("dlm_recv"); 1426 error = IS_ERR(recv_workqueue); 1427 if (error) { 1428 log_print("can't start dlm_recv %d", error); 1429 return error; 1430 } 1431 1432 send_workqueue = create_singlethread_workqueue("dlm_send"); 1433 error = IS_ERR(send_workqueue); 1434 if (error) { 1435 log_print("can't start dlm_send %d", error); 1436 destroy_workqueue(recv_workqueue); 1437 return error; 1438 } 1439 1440 return 0; 1441 } 1442 1443 static void stop_conn(struct connection *con) 1444 { 1445 con->flags |= 0x0F; 1446 if (con->sock && con->sock->sk) 1447 con->sock->sk->sk_user_data = NULL; 1448 } 1449 1450 static void free_conn(struct connection *con) 1451 { 1452 close_connection(con, true); 1453 if (con->othercon) 1454 kmem_cache_free(con_cache, con->othercon); 1455 hlist_del(&con->list); 1456 kmem_cache_free(con_cache, con); 1457 } 1458 1459 void dlm_lowcomms_stop(void) 1460 { 1461 /* Set all the flags to prevent any 1462 socket activity. 1463 */ 1464 mutex_lock(&connections_lock); 1465 foreach_conn(stop_conn); 1466 mutex_unlock(&connections_lock); 1467 1468 work_stop(); 1469 1470 mutex_lock(&connections_lock); 1471 clean_writequeues(); 1472 1473 foreach_conn(free_conn); 1474 1475 mutex_unlock(&connections_lock); 1476 kmem_cache_destroy(con_cache); 1477 } 1478 1479 int dlm_lowcomms_start(void) 1480 { 1481 int error = -EINVAL; 1482 struct connection *con; 1483 int i; 1484 1485 for (i = 0; i < CONN_HASH_SIZE; i++) 1486 INIT_HLIST_HEAD(&connection_hash[i]); 1487 1488 init_local(); 1489 if (!dlm_local_count) { 1490 error = -ENOTCONN; 1491 log_print("no local IP address has been set"); 1492 goto out; 1493 } 1494 1495 error = -ENOMEM; 1496 con_cache = kmem_cache_create("dlm_conn", sizeof(struct connection), 1497 __alignof__(struct connection), 0, 1498 NULL); 1499 if (!con_cache) 1500 goto out; 1501 1502 /* Start listening */ 1503 if (dlm_config.ci_protocol == 0) 1504 error = tcp_listen_for_all(); 1505 else 1506 error = sctp_listen_for_all(); 1507 if (error) 1508 goto fail_unlisten; 1509 1510 error = work_start(); 1511 if (error) 1512 goto fail_unlisten; 1513 1514 return 0; 1515 1516 fail_unlisten: 1517 con = nodeid2con(0,0); 1518 if (con) { 1519 close_connection(con, false); 1520 kmem_cache_free(con_cache, con); 1521 } 1522 kmem_cache_destroy(con_cache); 1523 1524 out: 1525 return error; 1526 } 1527