1 // SPDX-License-Identifier: GPL-2.0-only 2 /****************************************************************************** 3 ******************************************************************************* 4 ** 5 ** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. 6 ** Copyright (C) 2004-2009 Red Hat, Inc. All rights reserved. 7 ** 8 ** 9 ******************************************************************************* 10 ******************************************************************************/ 11 12 /* 13 * lowcomms.c 14 * 15 * This is the "low-level" comms layer. 16 * 17 * It is responsible for sending/receiving messages 18 * from other nodes in the cluster. 19 * 20 * Cluster nodes are referred to by their nodeids. nodeids are 21 * simply 32 bit numbers to the locking module - if they need to 22 * be expanded for the cluster infrastructure then that is its 23 * responsibility. It is this layer's 24 * responsibility to resolve these into IP address or 25 * whatever it needs for inter-node communication. 26 * 27 * The comms level is two kernel threads that deal mainly with 28 * the receiving of messages from other nodes and passing them 29 * up to the mid-level comms layer (which understands the 30 * message format) for execution by the locking core, and 31 * a send thread which does all the setting up of connections 32 * to remote nodes and the sending of data. Threads are not allowed 33 * to send their own data because it may cause them to wait in times 34 * of high load. Also, this way, the sending thread can collect together 35 * messages bound for one node and send them in one block. 36 * 37 * lowcomms will choose to use either TCP or SCTP as its transport layer 38 * depending on the configuration variable 'protocol'. This should be set 39 * to 0 (default) for TCP or 1 for SCTP. It should be configured using a 40 * cluster-wide mechanism as it must be the same on all nodes of the cluster 41 * for the DLM to function. 42 * 43 */ 44 45 #include <asm/ioctls.h> 46 #include <net/sock.h> 47 #include <net/tcp.h> 48 #include <linux/pagemap.h> 49 #include <linux/file.h> 50 #include <linux/mutex.h> 51 #include <linux/sctp.h> 52 #include <linux/slab.h> 53 #include <net/sctp/sctp.h> 54 #include <net/ipv6.h> 55 56 #include "dlm_internal.h" 57 #include "lowcomms.h" 58 #include "midcomms.h" 59 #include "config.h" 60 61 #define NEEDED_RMEM (4*1024*1024) 62 #define CONN_HASH_SIZE 32 63 64 /* Number of messages to send before rescheduling */ 65 #define MAX_SEND_MSG_COUNT 25 66 67 struct cbuf { 68 unsigned int base; 69 unsigned int len; 70 unsigned int mask; 71 }; 72 73 static void cbuf_add(struct cbuf *cb, int n) 74 { 75 cb->len += n; 76 } 77 78 static int cbuf_data(struct cbuf *cb) 79 { 80 return ((cb->base + cb->len) & cb->mask); 81 } 82 83 static void cbuf_init(struct cbuf *cb, int size) 84 { 85 cb->base = cb->len = 0; 86 cb->mask = size-1; 87 } 88 89 static void cbuf_eat(struct cbuf *cb, int n) 90 { 91 cb->len -= n; 92 cb->base += n; 93 cb->base &= cb->mask; 94 } 95 96 static bool cbuf_empty(struct cbuf *cb) 97 { 98 return cb->len == 0; 99 } 100 101 struct connection { 102 struct socket *sock; /* NULL if not connected */ 103 uint32_t nodeid; /* So we know who we are in the list */ 104 struct mutex sock_mutex; 105 unsigned long flags; 106 #define CF_READ_PENDING 1 107 #define CF_WRITE_PENDING 2 108 #define CF_INIT_PENDING 4 109 #define CF_IS_OTHERCON 5 110 #define CF_CLOSE 6 111 #define CF_APP_LIMITED 7 112 #define CF_CLOSING 8 113 struct list_head writequeue; /* List of outgoing writequeue_entries */ 114 spinlock_t writequeue_lock; 115 int (*rx_action) (struct connection *); /* What to do when active */ 116 void (*connect_action) (struct connection *); /* What to do to connect */ 117 struct page *rx_page; 118 struct cbuf cb; 119 int retries; 120 #define MAX_CONNECT_RETRIES 3 121 struct hlist_node list; 122 struct connection *othercon; 123 struct work_struct rwork; /* Receive workqueue */ 124 struct work_struct swork; /* Send workqueue */ 125 }; 126 #define sock2con(x) ((struct connection *)(x)->sk_user_data) 127 128 /* An entry waiting to be sent */ 129 struct writequeue_entry { 130 struct list_head list; 131 struct page *page; 132 int offset; 133 int len; 134 int end; 135 int users; 136 struct connection *con; 137 }; 138 139 struct dlm_node_addr { 140 struct list_head list; 141 int nodeid; 142 int addr_count; 143 int curr_addr_index; 144 struct sockaddr_storage *addr[DLM_MAX_ADDR_COUNT]; 145 }; 146 147 static struct listen_sock_callbacks { 148 void (*sk_error_report)(struct sock *); 149 void (*sk_data_ready)(struct sock *); 150 void (*sk_state_change)(struct sock *); 151 void (*sk_write_space)(struct sock *); 152 } listen_sock; 153 154 static LIST_HEAD(dlm_node_addrs); 155 static DEFINE_SPINLOCK(dlm_node_addrs_spin); 156 157 static struct sockaddr_storage *dlm_local_addr[DLM_MAX_ADDR_COUNT]; 158 static int dlm_local_count; 159 static int dlm_allow_conn; 160 161 /* Work queues */ 162 static struct workqueue_struct *recv_workqueue; 163 static struct workqueue_struct *send_workqueue; 164 165 static struct hlist_head connection_hash[CONN_HASH_SIZE]; 166 static DEFINE_MUTEX(connections_lock); 167 static struct kmem_cache *con_cache; 168 169 static void process_recv_sockets(struct work_struct *work); 170 static void process_send_sockets(struct work_struct *work); 171 172 173 /* This is deliberately very simple because most clusters have simple 174 sequential nodeids, so we should be able to go straight to a connection 175 struct in the array */ 176 static inline int nodeid_hash(int nodeid) 177 { 178 return nodeid & (CONN_HASH_SIZE-1); 179 } 180 181 static struct connection *__find_con(int nodeid) 182 { 183 int r; 184 struct connection *con; 185 186 r = nodeid_hash(nodeid); 187 188 hlist_for_each_entry(con, &connection_hash[r], list) { 189 if (con->nodeid == nodeid) 190 return con; 191 } 192 return NULL; 193 } 194 195 /* 196 * If 'allocation' is zero then we don't attempt to create a new 197 * connection structure for this node. 198 */ 199 static struct connection *__nodeid2con(int nodeid, gfp_t alloc) 200 { 201 struct connection *con = NULL; 202 int r; 203 204 con = __find_con(nodeid); 205 if (con || !alloc) 206 return con; 207 208 con = kmem_cache_zalloc(con_cache, alloc); 209 if (!con) 210 return NULL; 211 212 r = nodeid_hash(nodeid); 213 hlist_add_head(&con->list, &connection_hash[r]); 214 215 con->nodeid = nodeid; 216 mutex_init(&con->sock_mutex); 217 INIT_LIST_HEAD(&con->writequeue); 218 spin_lock_init(&con->writequeue_lock); 219 INIT_WORK(&con->swork, process_send_sockets); 220 INIT_WORK(&con->rwork, process_recv_sockets); 221 222 /* Setup action pointers for child sockets */ 223 if (con->nodeid) { 224 struct connection *zerocon = __find_con(0); 225 226 con->connect_action = zerocon->connect_action; 227 if (!con->rx_action) 228 con->rx_action = zerocon->rx_action; 229 } 230 231 return con; 232 } 233 234 /* Loop round all connections */ 235 static void foreach_conn(void (*conn_func)(struct connection *c)) 236 { 237 int i; 238 struct hlist_node *n; 239 struct connection *con; 240 241 for (i = 0; i < CONN_HASH_SIZE; i++) { 242 hlist_for_each_entry_safe(con, n, &connection_hash[i], list) 243 conn_func(con); 244 } 245 } 246 247 static struct connection *nodeid2con(int nodeid, gfp_t allocation) 248 { 249 struct connection *con; 250 251 mutex_lock(&connections_lock); 252 con = __nodeid2con(nodeid, allocation); 253 mutex_unlock(&connections_lock); 254 255 return con; 256 } 257 258 static struct dlm_node_addr *find_node_addr(int nodeid) 259 { 260 struct dlm_node_addr *na; 261 262 list_for_each_entry(na, &dlm_node_addrs, list) { 263 if (na->nodeid == nodeid) 264 return na; 265 } 266 return NULL; 267 } 268 269 static int addr_compare(struct sockaddr_storage *x, struct sockaddr_storage *y) 270 { 271 switch (x->ss_family) { 272 case AF_INET: { 273 struct sockaddr_in *sinx = (struct sockaddr_in *)x; 274 struct sockaddr_in *siny = (struct sockaddr_in *)y; 275 if (sinx->sin_addr.s_addr != siny->sin_addr.s_addr) 276 return 0; 277 if (sinx->sin_port != siny->sin_port) 278 return 0; 279 break; 280 } 281 case AF_INET6: { 282 struct sockaddr_in6 *sinx = (struct sockaddr_in6 *)x; 283 struct sockaddr_in6 *siny = (struct sockaddr_in6 *)y; 284 if (!ipv6_addr_equal(&sinx->sin6_addr, &siny->sin6_addr)) 285 return 0; 286 if (sinx->sin6_port != siny->sin6_port) 287 return 0; 288 break; 289 } 290 default: 291 return 0; 292 } 293 return 1; 294 } 295 296 static int nodeid_to_addr(int nodeid, struct sockaddr_storage *sas_out, 297 struct sockaddr *sa_out, bool try_new_addr) 298 { 299 struct sockaddr_storage sas; 300 struct dlm_node_addr *na; 301 302 if (!dlm_local_count) 303 return -1; 304 305 spin_lock(&dlm_node_addrs_spin); 306 na = find_node_addr(nodeid); 307 if (na && na->addr_count) { 308 memcpy(&sas, na->addr[na->curr_addr_index], 309 sizeof(struct sockaddr_storage)); 310 311 if (try_new_addr) { 312 na->curr_addr_index++; 313 if (na->curr_addr_index == na->addr_count) 314 na->curr_addr_index = 0; 315 } 316 } 317 spin_unlock(&dlm_node_addrs_spin); 318 319 if (!na) 320 return -EEXIST; 321 322 if (!na->addr_count) 323 return -ENOENT; 324 325 if (sas_out) 326 memcpy(sas_out, &sas, sizeof(struct sockaddr_storage)); 327 328 if (!sa_out) 329 return 0; 330 331 if (dlm_local_addr[0]->ss_family == AF_INET) { 332 struct sockaddr_in *in4 = (struct sockaddr_in *) &sas; 333 struct sockaddr_in *ret4 = (struct sockaddr_in *) sa_out; 334 ret4->sin_addr.s_addr = in4->sin_addr.s_addr; 335 } else { 336 struct sockaddr_in6 *in6 = (struct sockaddr_in6 *) &sas; 337 struct sockaddr_in6 *ret6 = (struct sockaddr_in6 *) sa_out; 338 ret6->sin6_addr = in6->sin6_addr; 339 } 340 341 return 0; 342 } 343 344 static int addr_to_nodeid(struct sockaddr_storage *addr, int *nodeid) 345 { 346 struct dlm_node_addr *na; 347 int rv = -EEXIST; 348 int addr_i; 349 350 spin_lock(&dlm_node_addrs_spin); 351 list_for_each_entry(na, &dlm_node_addrs, list) { 352 if (!na->addr_count) 353 continue; 354 355 for (addr_i = 0; addr_i < na->addr_count; addr_i++) { 356 if (addr_compare(na->addr[addr_i], addr)) { 357 *nodeid = na->nodeid; 358 rv = 0; 359 goto unlock; 360 } 361 } 362 } 363 unlock: 364 spin_unlock(&dlm_node_addrs_spin); 365 return rv; 366 } 367 368 int dlm_lowcomms_addr(int nodeid, struct sockaddr_storage *addr, int len) 369 { 370 struct sockaddr_storage *new_addr; 371 struct dlm_node_addr *new_node, *na; 372 373 new_node = kzalloc(sizeof(struct dlm_node_addr), GFP_NOFS); 374 if (!new_node) 375 return -ENOMEM; 376 377 new_addr = kzalloc(sizeof(struct sockaddr_storage), GFP_NOFS); 378 if (!new_addr) { 379 kfree(new_node); 380 return -ENOMEM; 381 } 382 383 memcpy(new_addr, addr, len); 384 385 spin_lock(&dlm_node_addrs_spin); 386 na = find_node_addr(nodeid); 387 if (!na) { 388 new_node->nodeid = nodeid; 389 new_node->addr[0] = new_addr; 390 new_node->addr_count = 1; 391 list_add(&new_node->list, &dlm_node_addrs); 392 spin_unlock(&dlm_node_addrs_spin); 393 return 0; 394 } 395 396 if (na->addr_count >= DLM_MAX_ADDR_COUNT) { 397 spin_unlock(&dlm_node_addrs_spin); 398 kfree(new_addr); 399 kfree(new_node); 400 return -ENOSPC; 401 } 402 403 na->addr[na->addr_count++] = new_addr; 404 spin_unlock(&dlm_node_addrs_spin); 405 kfree(new_node); 406 return 0; 407 } 408 409 /* Data available on socket or listen socket received a connect */ 410 static void lowcomms_data_ready(struct sock *sk) 411 { 412 struct connection *con; 413 414 read_lock_bh(&sk->sk_callback_lock); 415 con = sock2con(sk); 416 if (con && !test_and_set_bit(CF_READ_PENDING, &con->flags)) 417 queue_work(recv_workqueue, &con->rwork); 418 read_unlock_bh(&sk->sk_callback_lock); 419 } 420 421 static void lowcomms_write_space(struct sock *sk) 422 { 423 struct connection *con; 424 425 read_lock_bh(&sk->sk_callback_lock); 426 con = sock2con(sk); 427 if (!con) 428 goto out; 429 430 clear_bit(SOCK_NOSPACE, &con->sock->flags); 431 432 if (test_and_clear_bit(CF_APP_LIMITED, &con->flags)) { 433 con->sock->sk->sk_write_pending--; 434 clear_bit(SOCKWQ_ASYNC_NOSPACE, &con->sock->flags); 435 } 436 437 queue_work(send_workqueue, &con->swork); 438 out: 439 read_unlock_bh(&sk->sk_callback_lock); 440 } 441 442 static inline void lowcomms_connect_sock(struct connection *con) 443 { 444 if (test_bit(CF_CLOSE, &con->flags)) 445 return; 446 queue_work(send_workqueue, &con->swork); 447 cond_resched(); 448 } 449 450 static void lowcomms_state_change(struct sock *sk) 451 { 452 /* SCTP layer is not calling sk_data_ready when the connection 453 * is done, so we catch the signal through here. Also, it 454 * doesn't switch socket state when entering shutdown, so we 455 * skip the write in that case. 456 */ 457 if (sk->sk_shutdown) { 458 if (sk->sk_shutdown == RCV_SHUTDOWN) 459 lowcomms_data_ready(sk); 460 } else if (sk->sk_state == TCP_ESTABLISHED) { 461 lowcomms_write_space(sk); 462 } 463 } 464 465 int dlm_lowcomms_connect_node(int nodeid) 466 { 467 struct connection *con; 468 469 if (nodeid == dlm_our_nodeid()) 470 return 0; 471 472 con = nodeid2con(nodeid, GFP_NOFS); 473 if (!con) 474 return -ENOMEM; 475 lowcomms_connect_sock(con); 476 return 0; 477 } 478 479 static void lowcomms_error_report(struct sock *sk) 480 { 481 struct connection *con; 482 struct sockaddr_storage saddr; 483 void (*orig_report)(struct sock *) = NULL; 484 485 read_lock_bh(&sk->sk_callback_lock); 486 con = sock2con(sk); 487 if (con == NULL) 488 goto out; 489 490 orig_report = listen_sock.sk_error_report; 491 if (con->sock == NULL || 492 kernel_getpeername(con->sock, (struct sockaddr *)&saddr) < 0) { 493 printk_ratelimited(KERN_ERR "dlm: node %d: socket error " 494 "sending to node %d, port %d, " 495 "sk_err=%d/%d\n", dlm_our_nodeid(), 496 con->nodeid, dlm_config.ci_tcp_port, 497 sk->sk_err, sk->sk_err_soft); 498 } else if (saddr.ss_family == AF_INET) { 499 struct sockaddr_in *sin4 = (struct sockaddr_in *)&saddr; 500 501 printk_ratelimited(KERN_ERR "dlm: node %d: socket error " 502 "sending to node %d at %pI4, port %d, " 503 "sk_err=%d/%d\n", dlm_our_nodeid(), 504 con->nodeid, &sin4->sin_addr.s_addr, 505 dlm_config.ci_tcp_port, sk->sk_err, 506 sk->sk_err_soft); 507 } else { 508 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&saddr; 509 510 printk_ratelimited(KERN_ERR "dlm: node %d: socket error " 511 "sending to node %d at %u.%u.%u.%u, " 512 "port %d, sk_err=%d/%d\n", dlm_our_nodeid(), 513 con->nodeid, sin6->sin6_addr.s6_addr32[0], 514 sin6->sin6_addr.s6_addr32[1], 515 sin6->sin6_addr.s6_addr32[2], 516 sin6->sin6_addr.s6_addr32[3], 517 dlm_config.ci_tcp_port, sk->sk_err, 518 sk->sk_err_soft); 519 } 520 out: 521 read_unlock_bh(&sk->sk_callback_lock); 522 if (orig_report) 523 orig_report(sk); 524 } 525 526 /* Note: sk_callback_lock must be locked before calling this function. */ 527 static void save_listen_callbacks(struct socket *sock) 528 { 529 struct sock *sk = sock->sk; 530 531 listen_sock.sk_data_ready = sk->sk_data_ready; 532 listen_sock.sk_state_change = sk->sk_state_change; 533 listen_sock.sk_write_space = sk->sk_write_space; 534 listen_sock.sk_error_report = sk->sk_error_report; 535 } 536 537 static void restore_callbacks(struct socket *sock) 538 { 539 struct sock *sk = sock->sk; 540 541 write_lock_bh(&sk->sk_callback_lock); 542 sk->sk_user_data = NULL; 543 sk->sk_data_ready = listen_sock.sk_data_ready; 544 sk->sk_state_change = listen_sock.sk_state_change; 545 sk->sk_write_space = listen_sock.sk_write_space; 546 sk->sk_error_report = listen_sock.sk_error_report; 547 write_unlock_bh(&sk->sk_callback_lock); 548 } 549 550 /* Make a socket active */ 551 static void add_sock(struct socket *sock, struct connection *con) 552 { 553 struct sock *sk = sock->sk; 554 555 write_lock_bh(&sk->sk_callback_lock); 556 con->sock = sock; 557 558 sk->sk_user_data = con; 559 /* Install a data_ready callback */ 560 sk->sk_data_ready = lowcomms_data_ready; 561 sk->sk_write_space = lowcomms_write_space; 562 sk->sk_state_change = lowcomms_state_change; 563 sk->sk_allocation = GFP_NOFS; 564 sk->sk_error_report = lowcomms_error_report; 565 write_unlock_bh(&sk->sk_callback_lock); 566 } 567 568 /* Add the port number to an IPv6 or 4 sockaddr and return the address 569 length */ 570 static void make_sockaddr(struct sockaddr_storage *saddr, uint16_t port, 571 int *addr_len) 572 { 573 saddr->ss_family = dlm_local_addr[0]->ss_family; 574 if (saddr->ss_family == AF_INET) { 575 struct sockaddr_in *in4_addr = (struct sockaddr_in *)saddr; 576 in4_addr->sin_port = cpu_to_be16(port); 577 *addr_len = sizeof(struct sockaddr_in); 578 memset(&in4_addr->sin_zero, 0, sizeof(in4_addr->sin_zero)); 579 } else { 580 struct sockaddr_in6 *in6_addr = (struct sockaddr_in6 *)saddr; 581 in6_addr->sin6_port = cpu_to_be16(port); 582 *addr_len = sizeof(struct sockaddr_in6); 583 } 584 memset((char *)saddr + *addr_len, 0, sizeof(struct sockaddr_storage) - *addr_len); 585 } 586 587 /* Close a remote connection and tidy up */ 588 static void close_connection(struct connection *con, bool and_other, 589 bool tx, bool rx) 590 { 591 bool closing = test_and_set_bit(CF_CLOSING, &con->flags); 592 593 if (tx && !closing && cancel_work_sync(&con->swork)) { 594 log_print("canceled swork for node %d", con->nodeid); 595 clear_bit(CF_WRITE_PENDING, &con->flags); 596 } 597 if (rx && !closing && cancel_work_sync(&con->rwork)) { 598 log_print("canceled rwork for node %d", con->nodeid); 599 clear_bit(CF_READ_PENDING, &con->flags); 600 } 601 602 mutex_lock(&con->sock_mutex); 603 if (con->sock) { 604 restore_callbacks(con->sock); 605 sock_release(con->sock); 606 con->sock = NULL; 607 } 608 if (con->othercon && and_other) { 609 /* Will only re-enter once. */ 610 close_connection(con->othercon, false, true, true); 611 } 612 if (con->rx_page) { 613 __free_page(con->rx_page); 614 con->rx_page = NULL; 615 } 616 617 con->retries = 0; 618 mutex_unlock(&con->sock_mutex); 619 clear_bit(CF_CLOSING, &con->flags); 620 } 621 622 /* Data received from remote end */ 623 static int receive_from_sock(struct connection *con) 624 { 625 int ret = 0; 626 struct msghdr msg = {}; 627 struct kvec iov[2]; 628 unsigned len; 629 int r; 630 int call_again_soon = 0; 631 int nvec; 632 633 mutex_lock(&con->sock_mutex); 634 635 if (con->sock == NULL) { 636 ret = -EAGAIN; 637 goto out_close; 638 } 639 if (con->nodeid == 0) { 640 ret = -EINVAL; 641 goto out_close; 642 } 643 644 if (con->rx_page == NULL) { 645 /* 646 * This doesn't need to be atomic, but I think it should 647 * improve performance if it is. 648 */ 649 con->rx_page = alloc_page(GFP_ATOMIC); 650 if (con->rx_page == NULL) 651 goto out_resched; 652 cbuf_init(&con->cb, PAGE_SIZE); 653 } 654 655 /* 656 * iov[0] is the bit of the circular buffer between the current end 657 * point (cb.base + cb.len) and the end of the buffer. 658 */ 659 iov[0].iov_len = con->cb.base - cbuf_data(&con->cb); 660 iov[0].iov_base = page_address(con->rx_page) + cbuf_data(&con->cb); 661 iov[1].iov_len = 0; 662 nvec = 1; 663 664 /* 665 * iov[1] is the bit of the circular buffer between the start of the 666 * buffer and the start of the currently used section (cb.base) 667 */ 668 if (cbuf_data(&con->cb) >= con->cb.base) { 669 iov[0].iov_len = PAGE_SIZE - cbuf_data(&con->cb); 670 iov[1].iov_len = con->cb.base; 671 iov[1].iov_base = page_address(con->rx_page); 672 nvec = 2; 673 } 674 len = iov[0].iov_len + iov[1].iov_len; 675 iov_iter_kvec(&msg.msg_iter, READ, iov, nvec, len); 676 677 r = ret = sock_recvmsg(con->sock, &msg, MSG_DONTWAIT | MSG_NOSIGNAL); 678 if (ret <= 0) 679 goto out_close; 680 else if (ret == len) 681 call_again_soon = 1; 682 683 cbuf_add(&con->cb, ret); 684 ret = dlm_process_incoming_buffer(con->nodeid, 685 page_address(con->rx_page), 686 con->cb.base, con->cb.len, 687 PAGE_SIZE); 688 if (ret == -EBADMSG) { 689 log_print("lowcomms: addr=%p, base=%u, len=%u, read=%d", 690 page_address(con->rx_page), con->cb.base, 691 con->cb.len, r); 692 } 693 if (ret < 0) 694 goto out_close; 695 cbuf_eat(&con->cb, ret); 696 697 if (cbuf_empty(&con->cb) && !call_again_soon) { 698 __free_page(con->rx_page); 699 con->rx_page = NULL; 700 } 701 702 if (call_again_soon) 703 goto out_resched; 704 mutex_unlock(&con->sock_mutex); 705 return 0; 706 707 out_resched: 708 if (!test_and_set_bit(CF_READ_PENDING, &con->flags)) 709 queue_work(recv_workqueue, &con->rwork); 710 mutex_unlock(&con->sock_mutex); 711 return -EAGAIN; 712 713 out_close: 714 mutex_unlock(&con->sock_mutex); 715 if (ret != -EAGAIN) { 716 close_connection(con, true, true, false); 717 /* Reconnect when there is something to send */ 718 } 719 /* Don't return success if we really got EOF */ 720 if (ret == 0) 721 ret = -EAGAIN; 722 723 return ret; 724 } 725 726 /* Listening socket is busy, accept a connection */ 727 static int tcp_accept_from_sock(struct connection *con) 728 { 729 int result; 730 struct sockaddr_storage peeraddr; 731 struct socket *newsock; 732 int len; 733 int nodeid; 734 struct connection *newcon; 735 struct connection *addcon; 736 737 mutex_lock(&connections_lock); 738 if (!dlm_allow_conn) { 739 mutex_unlock(&connections_lock); 740 return -1; 741 } 742 mutex_unlock(&connections_lock); 743 744 mutex_lock_nested(&con->sock_mutex, 0); 745 746 if (!con->sock) { 747 mutex_unlock(&con->sock_mutex); 748 return -ENOTCONN; 749 } 750 751 result = kernel_accept(con->sock, &newsock, O_NONBLOCK); 752 if (result < 0) 753 goto accept_err; 754 755 /* Get the connected socket's peer */ 756 memset(&peeraddr, 0, sizeof(peeraddr)); 757 len = newsock->ops->getname(newsock, (struct sockaddr *)&peeraddr, 2); 758 if (len < 0) { 759 result = -ECONNABORTED; 760 goto accept_err; 761 } 762 763 /* Get the new node's NODEID */ 764 make_sockaddr(&peeraddr, 0, &len); 765 if (addr_to_nodeid(&peeraddr, &nodeid)) { 766 unsigned char *b=(unsigned char *)&peeraddr; 767 log_print("connect from non cluster node"); 768 print_hex_dump_bytes("ss: ", DUMP_PREFIX_NONE, 769 b, sizeof(struct sockaddr_storage)); 770 sock_release(newsock); 771 mutex_unlock(&con->sock_mutex); 772 return -1; 773 } 774 775 log_print("got connection from %d", nodeid); 776 777 /* Check to see if we already have a connection to this node. This 778 * could happen if the two nodes initiate a connection at roughly 779 * the same time and the connections cross on the wire. 780 * In this case we store the incoming one in "othercon" 781 */ 782 newcon = nodeid2con(nodeid, GFP_NOFS); 783 if (!newcon) { 784 result = -ENOMEM; 785 goto accept_err; 786 } 787 mutex_lock_nested(&newcon->sock_mutex, 1); 788 if (newcon->sock) { 789 struct connection *othercon = newcon->othercon; 790 791 if (!othercon) { 792 othercon = kmem_cache_zalloc(con_cache, GFP_NOFS); 793 if (!othercon) { 794 log_print("failed to allocate incoming socket"); 795 mutex_unlock(&newcon->sock_mutex); 796 result = -ENOMEM; 797 goto accept_err; 798 } 799 othercon->nodeid = nodeid; 800 othercon->rx_action = receive_from_sock; 801 mutex_init(&othercon->sock_mutex); 802 INIT_LIST_HEAD(&othercon->writequeue); 803 spin_lock_init(&othercon->writequeue_lock); 804 INIT_WORK(&othercon->swork, process_send_sockets); 805 INIT_WORK(&othercon->rwork, process_recv_sockets); 806 set_bit(CF_IS_OTHERCON, &othercon->flags); 807 } 808 mutex_lock_nested(&othercon->sock_mutex, 2); 809 if (!othercon->sock) { 810 newcon->othercon = othercon; 811 add_sock(newsock, othercon); 812 addcon = othercon; 813 mutex_unlock(&othercon->sock_mutex); 814 } 815 else { 816 printk("Extra connection from node %d attempted\n", nodeid); 817 result = -EAGAIN; 818 mutex_unlock(&othercon->sock_mutex); 819 mutex_unlock(&newcon->sock_mutex); 820 goto accept_err; 821 } 822 } 823 else { 824 newcon->rx_action = receive_from_sock; 825 /* accept copies the sk after we've saved the callbacks, so we 826 don't want to save them a second time or comm errors will 827 result in calling sk_error_report recursively. */ 828 add_sock(newsock, newcon); 829 addcon = newcon; 830 } 831 832 mutex_unlock(&newcon->sock_mutex); 833 834 /* 835 * Add it to the active queue in case we got data 836 * between processing the accept adding the socket 837 * to the read_sockets list 838 */ 839 if (!test_and_set_bit(CF_READ_PENDING, &addcon->flags)) 840 queue_work(recv_workqueue, &addcon->rwork); 841 mutex_unlock(&con->sock_mutex); 842 843 return 0; 844 845 accept_err: 846 mutex_unlock(&con->sock_mutex); 847 if (newsock) 848 sock_release(newsock); 849 850 if (result != -EAGAIN) 851 log_print("error accepting connection from node: %d", result); 852 return result; 853 } 854 855 static int sctp_accept_from_sock(struct connection *con) 856 { 857 /* Check that the new node is in the lockspace */ 858 struct sctp_prim prim; 859 int nodeid; 860 int prim_len, ret; 861 int addr_len; 862 struct connection *newcon; 863 struct connection *addcon; 864 struct socket *newsock; 865 866 mutex_lock(&connections_lock); 867 if (!dlm_allow_conn) { 868 mutex_unlock(&connections_lock); 869 return -1; 870 } 871 mutex_unlock(&connections_lock); 872 873 mutex_lock_nested(&con->sock_mutex, 0); 874 875 ret = kernel_accept(con->sock, &newsock, O_NONBLOCK); 876 if (ret < 0) 877 goto accept_err; 878 879 memset(&prim, 0, sizeof(struct sctp_prim)); 880 prim_len = sizeof(struct sctp_prim); 881 882 ret = kernel_getsockopt(newsock, IPPROTO_SCTP, SCTP_PRIMARY_ADDR, 883 (char *)&prim, &prim_len); 884 if (ret < 0) { 885 log_print("getsockopt/sctp_primary_addr failed: %d", ret); 886 goto accept_err; 887 } 888 889 make_sockaddr(&prim.ssp_addr, 0, &addr_len); 890 ret = addr_to_nodeid(&prim.ssp_addr, &nodeid); 891 if (ret) { 892 unsigned char *b = (unsigned char *)&prim.ssp_addr; 893 894 log_print("reject connect from unknown addr"); 895 print_hex_dump_bytes("ss: ", DUMP_PREFIX_NONE, 896 b, sizeof(struct sockaddr_storage)); 897 goto accept_err; 898 } 899 900 newcon = nodeid2con(nodeid, GFP_NOFS); 901 if (!newcon) { 902 ret = -ENOMEM; 903 goto accept_err; 904 } 905 906 mutex_lock_nested(&newcon->sock_mutex, 1); 907 908 if (newcon->sock) { 909 struct connection *othercon = newcon->othercon; 910 911 if (!othercon) { 912 othercon = kmem_cache_zalloc(con_cache, GFP_NOFS); 913 if (!othercon) { 914 log_print("failed to allocate incoming socket"); 915 mutex_unlock(&newcon->sock_mutex); 916 ret = -ENOMEM; 917 goto accept_err; 918 } 919 othercon->nodeid = nodeid; 920 othercon->rx_action = receive_from_sock; 921 mutex_init(&othercon->sock_mutex); 922 INIT_LIST_HEAD(&othercon->writequeue); 923 spin_lock_init(&othercon->writequeue_lock); 924 INIT_WORK(&othercon->swork, process_send_sockets); 925 INIT_WORK(&othercon->rwork, process_recv_sockets); 926 set_bit(CF_IS_OTHERCON, &othercon->flags); 927 } 928 mutex_lock_nested(&othercon->sock_mutex, 2); 929 if (!othercon->sock) { 930 newcon->othercon = othercon; 931 add_sock(newsock, othercon); 932 addcon = othercon; 933 mutex_unlock(&othercon->sock_mutex); 934 } else { 935 printk("Extra connection from node %d attempted\n", nodeid); 936 ret = -EAGAIN; 937 mutex_unlock(&othercon->sock_mutex); 938 mutex_unlock(&newcon->sock_mutex); 939 goto accept_err; 940 } 941 } else { 942 newcon->rx_action = receive_from_sock; 943 add_sock(newsock, newcon); 944 addcon = newcon; 945 } 946 947 log_print("connected to %d", nodeid); 948 949 mutex_unlock(&newcon->sock_mutex); 950 951 /* 952 * Add it to the active queue in case we got data 953 * between processing the accept adding the socket 954 * to the read_sockets list 955 */ 956 if (!test_and_set_bit(CF_READ_PENDING, &addcon->flags)) 957 queue_work(recv_workqueue, &addcon->rwork); 958 mutex_unlock(&con->sock_mutex); 959 960 return 0; 961 962 accept_err: 963 mutex_unlock(&con->sock_mutex); 964 if (newsock) 965 sock_release(newsock); 966 if (ret != -EAGAIN) 967 log_print("error accepting connection from node: %d", ret); 968 969 return ret; 970 } 971 972 static void free_entry(struct writequeue_entry *e) 973 { 974 __free_page(e->page); 975 kfree(e); 976 } 977 978 /* 979 * writequeue_entry_complete - try to delete and free write queue entry 980 * @e: write queue entry to try to delete 981 * @completed: bytes completed 982 * 983 * writequeue_lock must be held. 984 */ 985 static void writequeue_entry_complete(struct writequeue_entry *e, int completed) 986 { 987 e->offset += completed; 988 e->len -= completed; 989 990 if (e->len == 0 && e->users == 0) { 991 list_del(&e->list); 992 free_entry(e); 993 } 994 } 995 996 /* 997 * sctp_bind_addrs - bind a SCTP socket to all our addresses 998 */ 999 static int sctp_bind_addrs(struct connection *con, uint16_t port) 1000 { 1001 struct sockaddr_storage localaddr; 1002 int i, addr_len, result = 0; 1003 1004 for (i = 0; i < dlm_local_count; i++) { 1005 memcpy(&localaddr, dlm_local_addr[i], sizeof(localaddr)); 1006 make_sockaddr(&localaddr, port, &addr_len); 1007 1008 if (!i) 1009 result = kernel_bind(con->sock, 1010 (struct sockaddr *)&localaddr, 1011 addr_len); 1012 else 1013 result = kernel_setsockopt(con->sock, SOL_SCTP, 1014 SCTP_SOCKOPT_BINDX_ADD, 1015 (char *)&localaddr, addr_len); 1016 1017 if (result < 0) { 1018 log_print("Can't bind to %d addr number %d, %d.\n", 1019 port, i + 1, result); 1020 break; 1021 } 1022 } 1023 return result; 1024 } 1025 1026 /* Initiate an SCTP association. 1027 This is a special case of send_to_sock() in that we don't yet have a 1028 peeled-off socket for this association, so we use the listening socket 1029 and add the primary IP address of the remote node. 1030 */ 1031 static void sctp_connect_to_sock(struct connection *con) 1032 { 1033 struct sockaddr_storage daddr; 1034 int one = 1; 1035 int result; 1036 int addr_len; 1037 struct socket *sock; 1038 struct timeval tv = { .tv_sec = 5, .tv_usec = 0 }; 1039 1040 if (con->nodeid == 0) { 1041 log_print("attempt to connect sock 0 foiled"); 1042 return; 1043 } 1044 1045 mutex_lock(&con->sock_mutex); 1046 1047 /* Some odd races can cause double-connects, ignore them */ 1048 if (con->retries++ > MAX_CONNECT_RETRIES) 1049 goto out; 1050 1051 if (con->sock) { 1052 log_print("node %d already connected.", con->nodeid); 1053 goto out; 1054 } 1055 1056 memset(&daddr, 0, sizeof(daddr)); 1057 result = nodeid_to_addr(con->nodeid, &daddr, NULL, true); 1058 if (result < 0) { 1059 log_print("no address for nodeid %d", con->nodeid); 1060 goto out; 1061 } 1062 1063 /* Create a socket to communicate with */ 1064 result = sock_create_kern(&init_net, dlm_local_addr[0]->ss_family, 1065 SOCK_STREAM, IPPROTO_SCTP, &sock); 1066 if (result < 0) 1067 goto socket_err; 1068 1069 con->rx_action = receive_from_sock; 1070 con->connect_action = sctp_connect_to_sock; 1071 add_sock(sock, con); 1072 1073 /* Bind to all addresses. */ 1074 if (sctp_bind_addrs(con, 0)) 1075 goto bind_err; 1076 1077 make_sockaddr(&daddr, dlm_config.ci_tcp_port, &addr_len); 1078 1079 log_print("connecting to %d", con->nodeid); 1080 1081 /* Turn off Nagle's algorithm */ 1082 kernel_setsockopt(sock, SOL_SCTP, SCTP_NODELAY, (char *)&one, 1083 sizeof(one)); 1084 1085 /* 1086 * Make sock->ops->connect() function return in specified time, 1087 * since O_NONBLOCK argument in connect() function does not work here, 1088 * then, we should restore the default value of this attribute. 1089 */ 1090 kernel_setsockopt(sock, SOL_SOCKET, SO_SNDTIMEO_OLD, (char *)&tv, 1091 sizeof(tv)); 1092 result = sock->ops->connect(sock, (struct sockaddr *)&daddr, addr_len, 1093 0); 1094 memset(&tv, 0, sizeof(tv)); 1095 kernel_setsockopt(sock, SOL_SOCKET, SO_SNDTIMEO_OLD, (char *)&tv, 1096 sizeof(tv)); 1097 1098 if (result == -EINPROGRESS) 1099 result = 0; 1100 if (result == 0) 1101 goto out; 1102 1103 bind_err: 1104 con->sock = NULL; 1105 sock_release(sock); 1106 1107 socket_err: 1108 /* 1109 * Some errors are fatal and this list might need adjusting. For other 1110 * errors we try again until the max number of retries is reached. 1111 */ 1112 if (result != -EHOSTUNREACH && 1113 result != -ENETUNREACH && 1114 result != -ENETDOWN && 1115 result != -EINVAL && 1116 result != -EPROTONOSUPPORT) { 1117 log_print("connect %d try %d error %d", con->nodeid, 1118 con->retries, result); 1119 mutex_unlock(&con->sock_mutex); 1120 msleep(1000); 1121 lowcomms_connect_sock(con); 1122 return; 1123 } 1124 1125 out: 1126 mutex_unlock(&con->sock_mutex); 1127 } 1128 1129 /* Connect a new socket to its peer */ 1130 static void tcp_connect_to_sock(struct connection *con) 1131 { 1132 struct sockaddr_storage saddr, src_addr; 1133 int addr_len; 1134 struct socket *sock = NULL; 1135 int one = 1; 1136 int result; 1137 1138 if (con->nodeid == 0) { 1139 log_print("attempt to connect sock 0 foiled"); 1140 return; 1141 } 1142 1143 mutex_lock(&con->sock_mutex); 1144 if (con->retries++ > MAX_CONNECT_RETRIES) 1145 goto out; 1146 1147 /* Some odd races can cause double-connects, ignore them */ 1148 if (con->sock) 1149 goto out; 1150 1151 /* Create a socket to communicate with */ 1152 result = sock_create_kern(&init_net, dlm_local_addr[0]->ss_family, 1153 SOCK_STREAM, IPPROTO_TCP, &sock); 1154 if (result < 0) 1155 goto out_err; 1156 1157 memset(&saddr, 0, sizeof(saddr)); 1158 result = nodeid_to_addr(con->nodeid, &saddr, NULL, false); 1159 if (result < 0) { 1160 log_print("no address for nodeid %d", con->nodeid); 1161 goto out_err; 1162 } 1163 1164 con->rx_action = receive_from_sock; 1165 con->connect_action = tcp_connect_to_sock; 1166 add_sock(sock, con); 1167 1168 /* Bind to our cluster-known address connecting to avoid 1169 routing problems */ 1170 memcpy(&src_addr, dlm_local_addr[0], sizeof(src_addr)); 1171 make_sockaddr(&src_addr, 0, &addr_len); 1172 result = sock->ops->bind(sock, (struct sockaddr *) &src_addr, 1173 addr_len); 1174 if (result < 0) { 1175 log_print("could not bind for connect: %d", result); 1176 /* This *may* not indicate a critical error */ 1177 } 1178 1179 make_sockaddr(&saddr, dlm_config.ci_tcp_port, &addr_len); 1180 1181 log_print("connecting to %d", con->nodeid); 1182 1183 /* Turn off Nagle's algorithm */ 1184 kernel_setsockopt(sock, SOL_TCP, TCP_NODELAY, (char *)&one, 1185 sizeof(one)); 1186 1187 result = sock->ops->connect(sock, (struct sockaddr *)&saddr, addr_len, 1188 O_NONBLOCK); 1189 if (result == -EINPROGRESS) 1190 result = 0; 1191 if (result == 0) 1192 goto out; 1193 1194 out_err: 1195 if (con->sock) { 1196 sock_release(con->sock); 1197 con->sock = NULL; 1198 } else if (sock) { 1199 sock_release(sock); 1200 } 1201 /* 1202 * Some errors are fatal and this list might need adjusting. For other 1203 * errors we try again until the max number of retries is reached. 1204 */ 1205 if (result != -EHOSTUNREACH && 1206 result != -ENETUNREACH && 1207 result != -ENETDOWN && 1208 result != -EINVAL && 1209 result != -EPROTONOSUPPORT) { 1210 log_print("connect %d try %d error %d", con->nodeid, 1211 con->retries, result); 1212 mutex_unlock(&con->sock_mutex); 1213 msleep(1000); 1214 lowcomms_connect_sock(con); 1215 return; 1216 } 1217 out: 1218 mutex_unlock(&con->sock_mutex); 1219 return; 1220 } 1221 1222 static struct socket *tcp_create_listen_sock(struct connection *con, 1223 struct sockaddr_storage *saddr) 1224 { 1225 struct socket *sock = NULL; 1226 int result = 0; 1227 int one = 1; 1228 int addr_len; 1229 1230 if (dlm_local_addr[0]->ss_family == AF_INET) 1231 addr_len = sizeof(struct sockaddr_in); 1232 else 1233 addr_len = sizeof(struct sockaddr_in6); 1234 1235 /* Create a socket to communicate with */ 1236 result = sock_create_kern(&init_net, dlm_local_addr[0]->ss_family, 1237 SOCK_STREAM, IPPROTO_TCP, &sock); 1238 if (result < 0) { 1239 log_print("Can't create listening comms socket"); 1240 goto create_out; 1241 } 1242 1243 /* Turn off Nagle's algorithm */ 1244 kernel_setsockopt(sock, SOL_TCP, TCP_NODELAY, (char *)&one, 1245 sizeof(one)); 1246 1247 result = kernel_setsockopt(sock, SOL_SOCKET, SO_REUSEADDR, 1248 (char *)&one, sizeof(one)); 1249 1250 if (result < 0) { 1251 log_print("Failed to set SO_REUSEADDR on socket: %d", result); 1252 } 1253 write_lock_bh(&sock->sk->sk_callback_lock); 1254 sock->sk->sk_user_data = con; 1255 save_listen_callbacks(sock); 1256 con->rx_action = tcp_accept_from_sock; 1257 con->connect_action = tcp_connect_to_sock; 1258 write_unlock_bh(&sock->sk->sk_callback_lock); 1259 1260 /* Bind to our port */ 1261 make_sockaddr(saddr, dlm_config.ci_tcp_port, &addr_len); 1262 result = sock->ops->bind(sock, (struct sockaddr *) saddr, addr_len); 1263 if (result < 0) { 1264 log_print("Can't bind to port %d", dlm_config.ci_tcp_port); 1265 sock_release(sock); 1266 sock = NULL; 1267 con->sock = NULL; 1268 goto create_out; 1269 } 1270 result = kernel_setsockopt(sock, SOL_SOCKET, SO_KEEPALIVE, 1271 (char *)&one, sizeof(one)); 1272 if (result < 0) { 1273 log_print("Set keepalive failed: %d", result); 1274 } 1275 1276 result = sock->ops->listen(sock, 5); 1277 if (result < 0) { 1278 log_print("Can't listen on port %d", dlm_config.ci_tcp_port); 1279 sock_release(sock); 1280 sock = NULL; 1281 goto create_out; 1282 } 1283 1284 create_out: 1285 return sock; 1286 } 1287 1288 /* Get local addresses */ 1289 static void init_local(void) 1290 { 1291 struct sockaddr_storage sas, *addr; 1292 int i; 1293 1294 dlm_local_count = 0; 1295 for (i = 0; i < DLM_MAX_ADDR_COUNT; i++) { 1296 if (dlm_our_addr(&sas, i)) 1297 break; 1298 1299 addr = kmemdup(&sas, sizeof(*addr), GFP_NOFS); 1300 if (!addr) 1301 break; 1302 dlm_local_addr[dlm_local_count++] = addr; 1303 } 1304 } 1305 1306 /* Initialise SCTP socket and bind to all interfaces */ 1307 static int sctp_listen_for_all(void) 1308 { 1309 struct socket *sock = NULL; 1310 int result = -EINVAL; 1311 struct connection *con = nodeid2con(0, GFP_NOFS); 1312 int bufsize = NEEDED_RMEM; 1313 int one = 1; 1314 1315 if (!con) 1316 return -ENOMEM; 1317 1318 log_print("Using SCTP for communications"); 1319 1320 result = sock_create_kern(&init_net, dlm_local_addr[0]->ss_family, 1321 SOCK_STREAM, IPPROTO_SCTP, &sock); 1322 if (result < 0) { 1323 log_print("Can't create comms socket, check SCTP is loaded"); 1324 goto out; 1325 } 1326 1327 result = kernel_setsockopt(sock, SOL_SOCKET, SO_RCVBUFFORCE, 1328 (char *)&bufsize, sizeof(bufsize)); 1329 if (result) 1330 log_print("Error increasing buffer space on socket %d", result); 1331 1332 result = kernel_setsockopt(sock, SOL_SCTP, SCTP_NODELAY, (char *)&one, 1333 sizeof(one)); 1334 if (result < 0) 1335 log_print("Could not set SCTP NODELAY error %d\n", result); 1336 1337 write_lock_bh(&sock->sk->sk_callback_lock); 1338 /* Init con struct */ 1339 sock->sk->sk_user_data = con; 1340 save_listen_callbacks(sock); 1341 con->sock = sock; 1342 con->sock->sk->sk_data_ready = lowcomms_data_ready; 1343 con->rx_action = sctp_accept_from_sock; 1344 con->connect_action = sctp_connect_to_sock; 1345 1346 write_unlock_bh(&sock->sk->sk_callback_lock); 1347 1348 /* Bind to all addresses. */ 1349 if (sctp_bind_addrs(con, dlm_config.ci_tcp_port)) 1350 goto create_delsock; 1351 1352 result = sock->ops->listen(sock, 5); 1353 if (result < 0) { 1354 log_print("Can't set socket listening"); 1355 goto create_delsock; 1356 } 1357 1358 return 0; 1359 1360 create_delsock: 1361 sock_release(sock); 1362 con->sock = NULL; 1363 out: 1364 return result; 1365 } 1366 1367 static int tcp_listen_for_all(void) 1368 { 1369 struct socket *sock = NULL; 1370 struct connection *con = nodeid2con(0, GFP_NOFS); 1371 int result = -EINVAL; 1372 1373 if (!con) 1374 return -ENOMEM; 1375 1376 /* We don't support multi-homed hosts */ 1377 if (dlm_local_addr[1] != NULL) { 1378 log_print("TCP protocol can't handle multi-homed hosts, " 1379 "try SCTP"); 1380 return -EINVAL; 1381 } 1382 1383 log_print("Using TCP for communications"); 1384 1385 sock = tcp_create_listen_sock(con, dlm_local_addr[0]); 1386 if (sock) { 1387 add_sock(sock, con); 1388 result = 0; 1389 } 1390 else { 1391 result = -EADDRINUSE; 1392 } 1393 1394 return result; 1395 } 1396 1397 1398 1399 static struct writequeue_entry *new_writequeue_entry(struct connection *con, 1400 gfp_t allocation) 1401 { 1402 struct writequeue_entry *entry; 1403 1404 entry = kmalloc(sizeof(struct writequeue_entry), allocation); 1405 if (!entry) 1406 return NULL; 1407 1408 entry->page = alloc_page(allocation); 1409 if (!entry->page) { 1410 kfree(entry); 1411 return NULL; 1412 } 1413 1414 entry->offset = 0; 1415 entry->len = 0; 1416 entry->end = 0; 1417 entry->users = 0; 1418 entry->con = con; 1419 1420 return entry; 1421 } 1422 1423 void *dlm_lowcomms_get_buffer(int nodeid, int len, gfp_t allocation, char **ppc) 1424 { 1425 struct connection *con; 1426 struct writequeue_entry *e; 1427 int offset = 0; 1428 1429 con = nodeid2con(nodeid, allocation); 1430 if (!con) 1431 return NULL; 1432 1433 spin_lock(&con->writequeue_lock); 1434 e = list_entry(con->writequeue.prev, struct writequeue_entry, list); 1435 if ((&e->list == &con->writequeue) || 1436 (PAGE_SIZE - e->end < len)) { 1437 e = NULL; 1438 } else { 1439 offset = e->end; 1440 e->end += len; 1441 e->users++; 1442 } 1443 spin_unlock(&con->writequeue_lock); 1444 1445 if (e) { 1446 got_one: 1447 *ppc = page_address(e->page) + offset; 1448 return e; 1449 } 1450 1451 e = new_writequeue_entry(con, allocation); 1452 if (e) { 1453 spin_lock(&con->writequeue_lock); 1454 offset = e->end; 1455 e->end += len; 1456 e->users++; 1457 list_add_tail(&e->list, &con->writequeue); 1458 spin_unlock(&con->writequeue_lock); 1459 goto got_one; 1460 } 1461 return NULL; 1462 } 1463 1464 void dlm_lowcomms_commit_buffer(void *mh) 1465 { 1466 struct writequeue_entry *e = (struct writequeue_entry *)mh; 1467 struct connection *con = e->con; 1468 int users; 1469 1470 spin_lock(&con->writequeue_lock); 1471 users = --e->users; 1472 if (users) 1473 goto out; 1474 e->len = e->end - e->offset; 1475 spin_unlock(&con->writequeue_lock); 1476 1477 queue_work(send_workqueue, &con->swork); 1478 return; 1479 1480 out: 1481 spin_unlock(&con->writequeue_lock); 1482 return; 1483 } 1484 1485 /* Send a message */ 1486 static void send_to_sock(struct connection *con) 1487 { 1488 int ret = 0; 1489 const int msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL; 1490 struct writequeue_entry *e; 1491 int len, offset; 1492 int count = 0; 1493 1494 mutex_lock(&con->sock_mutex); 1495 if (con->sock == NULL) 1496 goto out_connect; 1497 1498 spin_lock(&con->writequeue_lock); 1499 for (;;) { 1500 e = list_entry(con->writequeue.next, struct writequeue_entry, 1501 list); 1502 if ((struct list_head *) e == &con->writequeue) 1503 break; 1504 1505 len = e->len; 1506 offset = e->offset; 1507 BUG_ON(len == 0 && e->users == 0); 1508 spin_unlock(&con->writequeue_lock); 1509 1510 ret = 0; 1511 if (len) { 1512 ret = kernel_sendpage(con->sock, e->page, offset, len, 1513 msg_flags); 1514 if (ret == -EAGAIN || ret == 0) { 1515 if (ret == -EAGAIN && 1516 test_bit(SOCKWQ_ASYNC_NOSPACE, &con->sock->flags) && 1517 !test_and_set_bit(CF_APP_LIMITED, &con->flags)) { 1518 /* Notify TCP that we're limited by the 1519 * application window size. 1520 */ 1521 set_bit(SOCK_NOSPACE, &con->sock->flags); 1522 con->sock->sk->sk_write_pending++; 1523 } 1524 cond_resched(); 1525 goto out; 1526 } else if (ret < 0) 1527 goto send_error; 1528 } 1529 1530 /* Don't starve people filling buffers */ 1531 if (++count >= MAX_SEND_MSG_COUNT) { 1532 cond_resched(); 1533 count = 0; 1534 } 1535 1536 spin_lock(&con->writequeue_lock); 1537 writequeue_entry_complete(e, ret); 1538 } 1539 spin_unlock(&con->writequeue_lock); 1540 out: 1541 mutex_unlock(&con->sock_mutex); 1542 return; 1543 1544 send_error: 1545 mutex_unlock(&con->sock_mutex); 1546 close_connection(con, true, false, true); 1547 /* Requeue the send work. When the work daemon runs again, it will try 1548 a new connection, then call this function again. */ 1549 queue_work(send_workqueue, &con->swork); 1550 return; 1551 1552 out_connect: 1553 mutex_unlock(&con->sock_mutex); 1554 queue_work(send_workqueue, &con->swork); 1555 cond_resched(); 1556 } 1557 1558 static void clean_one_writequeue(struct connection *con) 1559 { 1560 struct writequeue_entry *e, *safe; 1561 1562 spin_lock(&con->writequeue_lock); 1563 list_for_each_entry_safe(e, safe, &con->writequeue, list) { 1564 list_del(&e->list); 1565 free_entry(e); 1566 } 1567 spin_unlock(&con->writequeue_lock); 1568 } 1569 1570 /* Called from recovery when it knows that a node has 1571 left the cluster */ 1572 int dlm_lowcomms_close(int nodeid) 1573 { 1574 struct connection *con; 1575 struct dlm_node_addr *na; 1576 1577 log_print("closing connection to node %d", nodeid); 1578 con = nodeid2con(nodeid, 0); 1579 if (con) { 1580 set_bit(CF_CLOSE, &con->flags); 1581 close_connection(con, true, true, true); 1582 clean_one_writequeue(con); 1583 } 1584 1585 spin_lock(&dlm_node_addrs_spin); 1586 na = find_node_addr(nodeid); 1587 if (na) { 1588 list_del(&na->list); 1589 while (na->addr_count--) 1590 kfree(na->addr[na->addr_count]); 1591 kfree(na); 1592 } 1593 spin_unlock(&dlm_node_addrs_spin); 1594 1595 return 0; 1596 } 1597 1598 /* Receive workqueue function */ 1599 static void process_recv_sockets(struct work_struct *work) 1600 { 1601 struct connection *con = container_of(work, struct connection, rwork); 1602 int err; 1603 1604 clear_bit(CF_READ_PENDING, &con->flags); 1605 do { 1606 err = con->rx_action(con); 1607 } while (!err); 1608 } 1609 1610 /* Send workqueue function */ 1611 static void process_send_sockets(struct work_struct *work) 1612 { 1613 struct connection *con = container_of(work, struct connection, swork); 1614 1615 clear_bit(CF_WRITE_PENDING, &con->flags); 1616 if (con->sock == NULL) /* not mutex protected so check it inside too */ 1617 con->connect_action(con); 1618 if (!list_empty(&con->writequeue)) 1619 send_to_sock(con); 1620 } 1621 1622 1623 /* Discard all entries on the write queues */ 1624 static void clean_writequeues(void) 1625 { 1626 foreach_conn(clean_one_writequeue); 1627 } 1628 1629 static void work_stop(void) 1630 { 1631 if (recv_workqueue) 1632 destroy_workqueue(recv_workqueue); 1633 if (send_workqueue) 1634 destroy_workqueue(send_workqueue); 1635 } 1636 1637 static int work_start(void) 1638 { 1639 recv_workqueue = alloc_workqueue("dlm_recv", 1640 WQ_UNBOUND | WQ_MEM_RECLAIM, 1); 1641 if (!recv_workqueue) { 1642 log_print("can't start dlm_recv"); 1643 return -ENOMEM; 1644 } 1645 1646 send_workqueue = alloc_workqueue("dlm_send", 1647 WQ_UNBOUND | WQ_MEM_RECLAIM, 1); 1648 if (!send_workqueue) { 1649 log_print("can't start dlm_send"); 1650 destroy_workqueue(recv_workqueue); 1651 return -ENOMEM; 1652 } 1653 1654 return 0; 1655 } 1656 1657 static void _stop_conn(struct connection *con, bool and_other) 1658 { 1659 mutex_lock(&con->sock_mutex); 1660 set_bit(CF_CLOSE, &con->flags); 1661 set_bit(CF_READ_PENDING, &con->flags); 1662 set_bit(CF_WRITE_PENDING, &con->flags); 1663 if (con->sock && con->sock->sk) { 1664 write_lock_bh(&con->sock->sk->sk_callback_lock); 1665 con->sock->sk->sk_user_data = NULL; 1666 write_unlock_bh(&con->sock->sk->sk_callback_lock); 1667 } 1668 if (con->othercon && and_other) 1669 _stop_conn(con->othercon, false); 1670 mutex_unlock(&con->sock_mutex); 1671 } 1672 1673 static void stop_conn(struct connection *con) 1674 { 1675 _stop_conn(con, true); 1676 } 1677 1678 static void free_conn(struct connection *con) 1679 { 1680 close_connection(con, true, true, true); 1681 if (con->othercon) 1682 kmem_cache_free(con_cache, con->othercon); 1683 hlist_del(&con->list); 1684 kmem_cache_free(con_cache, con); 1685 } 1686 1687 static void work_flush(void) 1688 { 1689 int ok; 1690 int i; 1691 struct hlist_node *n; 1692 struct connection *con; 1693 1694 if (recv_workqueue) 1695 flush_workqueue(recv_workqueue); 1696 if (send_workqueue) 1697 flush_workqueue(send_workqueue); 1698 do { 1699 ok = 1; 1700 foreach_conn(stop_conn); 1701 if (recv_workqueue) 1702 flush_workqueue(recv_workqueue); 1703 if (send_workqueue) 1704 flush_workqueue(send_workqueue); 1705 for (i = 0; i < CONN_HASH_SIZE && ok; i++) { 1706 hlist_for_each_entry_safe(con, n, 1707 &connection_hash[i], list) { 1708 ok &= test_bit(CF_READ_PENDING, &con->flags); 1709 ok &= test_bit(CF_WRITE_PENDING, &con->flags); 1710 if (con->othercon) { 1711 ok &= test_bit(CF_READ_PENDING, 1712 &con->othercon->flags); 1713 ok &= test_bit(CF_WRITE_PENDING, 1714 &con->othercon->flags); 1715 } 1716 } 1717 } 1718 } while (!ok); 1719 } 1720 1721 void dlm_lowcomms_stop(void) 1722 { 1723 /* Set all the flags to prevent any 1724 socket activity. 1725 */ 1726 mutex_lock(&connections_lock); 1727 dlm_allow_conn = 0; 1728 mutex_unlock(&connections_lock); 1729 work_flush(); 1730 clean_writequeues(); 1731 foreach_conn(free_conn); 1732 work_stop(); 1733 1734 kmem_cache_destroy(con_cache); 1735 } 1736 1737 int dlm_lowcomms_start(void) 1738 { 1739 int error = -EINVAL; 1740 struct connection *con; 1741 int i; 1742 1743 for (i = 0; i < CONN_HASH_SIZE; i++) 1744 INIT_HLIST_HEAD(&connection_hash[i]); 1745 1746 init_local(); 1747 if (!dlm_local_count) { 1748 error = -ENOTCONN; 1749 log_print("no local IP address has been set"); 1750 goto fail; 1751 } 1752 1753 error = -ENOMEM; 1754 con_cache = kmem_cache_create("dlm_conn", sizeof(struct connection), 1755 __alignof__(struct connection), 0, 1756 NULL); 1757 if (!con_cache) 1758 goto fail; 1759 1760 error = work_start(); 1761 if (error) 1762 goto fail_destroy; 1763 1764 dlm_allow_conn = 1; 1765 1766 /* Start listening */ 1767 if (dlm_config.ci_protocol == 0) 1768 error = tcp_listen_for_all(); 1769 else 1770 error = sctp_listen_for_all(); 1771 if (error) 1772 goto fail_unlisten; 1773 1774 return 0; 1775 1776 fail_unlisten: 1777 dlm_allow_conn = 0; 1778 con = nodeid2con(0,0); 1779 if (con) { 1780 close_connection(con, false, true, true); 1781 kmem_cache_free(con_cache, con); 1782 } 1783 fail_destroy: 1784 kmem_cache_destroy(con_cache); 1785 fail: 1786 return error; 1787 } 1788 1789 void dlm_lowcomms_exit(void) 1790 { 1791 struct dlm_node_addr *na, *safe; 1792 1793 spin_lock(&dlm_node_addrs_spin); 1794 list_for_each_entry_safe(na, safe, &dlm_node_addrs, list) { 1795 list_del(&na->list); 1796 while (na->addr_count--) 1797 kfree(na->addr[na->addr_count]); 1798 kfree(na); 1799 } 1800 spin_unlock(&dlm_node_addrs_spin); 1801 } 1802