xref: /linux/fs/dlm/lowcomms.c (revision e60e1ee60630cafef5e430c2ae364877e061d980)
1 /******************************************************************************
2 *******************************************************************************
3 **
4 **  Copyright (C) Sistina Software, Inc.  1997-2003  All rights reserved.
5 **  Copyright (C) 2004-2009 Red Hat, Inc.  All rights reserved.
6 **
7 **  This copyrighted material is made available to anyone wishing to use,
8 **  modify, copy, or redistribute it subject to the terms and conditions
9 **  of the GNU General Public License v.2.
10 **
11 *******************************************************************************
12 ******************************************************************************/
13 
14 /*
15  * lowcomms.c
16  *
17  * This is the "low-level" comms layer.
18  *
19  * It is responsible for sending/receiving messages
20  * from other nodes in the cluster.
21  *
22  * Cluster nodes are referred to by their nodeids. nodeids are
23  * simply 32 bit numbers to the locking module - if they need to
24  * be expanded for the cluster infrastructure then that is its
25  * responsibility. It is this layer's
26  * responsibility to resolve these into IP address or
27  * whatever it needs for inter-node communication.
28  *
29  * The comms level is two kernel threads that deal mainly with
30  * the receiving of messages from other nodes and passing them
31  * up to the mid-level comms layer (which understands the
32  * message format) for execution by the locking core, and
33  * a send thread which does all the setting up of connections
34  * to remote nodes and the sending of data. Threads are not allowed
35  * to send their own data because it may cause them to wait in times
36  * of high load. Also, this way, the sending thread can collect together
37  * messages bound for one node and send them in one block.
38  *
39  * lowcomms will choose to use either TCP or SCTP as its transport layer
40  * depending on the configuration variable 'protocol'. This should be set
41  * to 0 (default) for TCP or 1 for SCTP. It should be configured using a
42  * cluster-wide mechanism as it must be the same on all nodes of the cluster
43  * for the DLM to function.
44  *
45  */
46 
47 #include <asm/ioctls.h>
48 #include <net/sock.h>
49 #include <net/tcp.h>
50 #include <linux/pagemap.h>
51 #include <linux/file.h>
52 #include <linux/mutex.h>
53 #include <linux/sctp.h>
54 #include <linux/slab.h>
55 #include <net/sctp/sctp.h>
56 #include <net/ipv6.h>
57 
58 #include "dlm_internal.h"
59 #include "lowcomms.h"
60 #include "midcomms.h"
61 #include "config.h"
62 
63 #define NEEDED_RMEM (4*1024*1024)
64 #define CONN_HASH_SIZE 32
65 
66 /* Number of messages to send before rescheduling */
67 #define MAX_SEND_MSG_COUNT 25
68 
69 struct cbuf {
70 	unsigned int base;
71 	unsigned int len;
72 	unsigned int mask;
73 };
74 
75 static void cbuf_add(struct cbuf *cb, int n)
76 {
77 	cb->len += n;
78 }
79 
80 static int cbuf_data(struct cbuf *cb)
81 {
82 	return ((cb->base + cb->len) & cb->mask);
83 }
84 
85 static void cbuf_init(struct cbuf *cb, int size)
86 {
87 	cb->base = cb->len = 0;
88 	cb->mask = size-1;
89 }
90 
91 static void cbuf_eat(struct cbuf *cb, int n)
92 {
93 	cb->len  -= n;
94 	cb->base += n;
95 	cb->base &= cb->mask;
96 }
97 
98 static bool cbuf_empty(struct cbuf *cb)
99 {
100 	return cb->len == 0;
101 }
102 
103 struct connection {
104 	struct socket *sock;	/* NULL if not connected */
105 	uint32_t nodeid;	/* So we know who we are in the list */
106 	struct mutex sock_mutex;
107 	unsigned long flags;
108 #define CF_READ_PENDING 1
109 #define CF_WRITE_PENDING 2
110 #define CF_INIT_PENDING 4
111 #define CF_IS_OTHERCON 5
112 #define CF_CLOSE 6
113 #define CF_APP_LIMITED 7
114 #define CF_CLOSING 8
115 	struct list_head writequeue;  /* List of outgoing writequeue_entries */
116 	spinlock_t writequeue_lock;
117 	int (*rx_action) (struct connection *);	/* What to do when active */
118 	void (*connect_action) (struct connection *);	/* What to do to connect */
119 	struct page *rx_page;
120 	struct cbuf cb;
121 	int retries;
122 #define MAX_CONNECT_RETRIES 3
123 	struct hlist_node list;
124 	struct connection *othercon;
125 	struct work_struct rwork; /* Receive workqueue */
126 	struct work_struct swork; /* Send workqueue */
127 };
128 #define sock2con(x) ((struct connection *)(x)->sk_user_data)
129 
130 /* An entry waiting to be sent */
131 struct writequeue_entry {
132 	struct list_head list;
133 	struct page *page;
134 	int offset;
135 	int len;
136 	int end;
137 	int users;
138 	struct connection *con;
139 };
140 
141 struct dlm_node_addr {
142 	struct list_head list;
143 	int nodeid;
144 	int addr_count;
145 	int curr_addr_index;
146 	struct sockaddr_storage *addr[DLM_MAX_ADDR_COUNT];
147 };
148 
149 static struct listen_sock_callbacks {
150 	void (*sk_error_report)(struct sock *);
151 	void (*sk_data_ready)(struct sock *);
152 	void (*sk_state_change)(struct sock *);
153 	void (*sk_write_space)(struct sock *);
154 } listen_sock;
155 
156 static LIST_HEAD(dlm_node_addrs);
157 static DEFINE_SPINLOCK(dlm_node_addrs_spin);
158 
159 static struct sockaddr_storage *dlm_local_addr[DLM_MAX_ADDR_COUNT];
160 static int dlm_local_count;
161 static int dlm_allow_conn;
162 
163 /* Work queues */
164 static struct workqueue_struct *recv_workqueue;
165 static struct workqueue_struct *send_workqueue;
166 
167 static struct hlist_head connection_hash[CONN_HASH_SIZE];
168 static DEFINE_MUTEX(connections_lock);
169 static struct kmem_cache *con_cache;
170 
171 static void process_recv_sockets(struct work_struct *work);
172 static void process_send_sockets(struct work_struct *work);
173 
174 
175 /* This is deliberately very simple because most clusters have simple
176    sequential nodeids, so we should be able to go straight to a connection
177    struct in the array */
178 static inline int nodeid_hash(int nodeid)
179 {
180 	return nodeid & (CONN_HASH_SIZE-1);
181 }
182 
183 static struct connection *__find_con(int nodeid)
184 {
185 	int r;
186 	struct connection *con;
187 
188 	r = nodeid_hash(nodeid);
189 
190 	hlist_for_each_entry(con, &connection_hash[r], list) {
191 		if (con->nodeid == nodeid)
192 			return con;
193 	}
194 	return NULL;
195 }
196 
197 /*
198  * If 'allocation' is zero then we don't attempt to create a new
199  * connection structure for this node.
200  */
201 static struct connection *__nodeid2con(int nodeid, gfp_t alloc)
202 {
203 	struct connection *con = NULL;
204 	int r;
205 
206 	con = __find_con(nodeid);
207 	if (con || !alloc)
208 		return con;
209 
210 	con = kmem_cache_zalloc(con_cache, alloc);
211 	if (!con)
212 		return NULL;
213 
214 	r = nodeid_hash(nodeid);
215 	hlist_add_head(&con->list, &connection_hash[r]);
216 
217 	con->nodeid = nodeid;
218 	mutex_init(&con->sock_mutex);
219 	INIT_LIST_HEAD(&con->writequeue);
220 	spin_lock_init(&con->writequeue_lock);
221 	INIT_WORK(&con->swork, process_send_sockets);
222 	INIT_WORK(&con->rwork, process_recv_sockets);
223 
224 	/* Setup action pointers for child sockets */
225 	if (con->nodeid) {
226 		struct connection *zerocon = __find_con(0);
227 
228 		con->connect_action = zerocon->connect_action;
229 		if (!con->rx_action)
230 			con->rx_action = zerocon->rx_action;
231 	}
232 
233 	return con;
234 }
235 
236 /* Loop round all connections */
237 static void foreach_conn(void (*conn_func)(struct connection *c))
238 {
239 	int i;
240 	struct hlist_node *n;
241 	struct connection *con;
242 
243 	for (i = 0; i < CONN_HASH_SIZE; i++) {
244 		hlist_for_each_entry_safe(con, n, &connection_hash[i], list)
245 			conn_func(con);
246 	}
247 }
248 
249 static struct connection *nodeid2con(int nodeid, gfp_t allocation)
250 {
251 	struct connection *con;
252 
253 	mutex_lock(&connections_lock);
254 	con = __nodeid2con(nodeid, allocation);
255 	mutex_unlock(&connections_lock);
256 
257 	return con;
258 }
259 
260 static struct dlm_node_addr *find_node_addr(int nodeid)
261 {
262 	struct dlm_node_addr *na;
263 
264 	list_for_each_entry(na, &dlm_node_addrs, list) {
265 		if (na->nodeid == nodeid)
266 			return na;
267 	}
268 	return NULL;
269 }
270 
271 static int addr_compare(struct sockaddr_storage *x, struct sockaddr_storage *y)
272 {
273 	switch (x->ss_family) {
274 	case AF_INET: {
275 		struct sockaddr_in *sinx = (struct sockaddr_in *)x;
276 		struct sockaddr_in *siny = (struct sockaddr_in *)y;
277 		if (sinx->sin_addr.s_addr != siny->sin_addr.s_addr)
278 			return 0;
279 		if (sinx->sin_port != siny->sin_port)
280 			return 0;
281 		break;
282 	}
283 	case AF_INET6: {
284 		struct sockaddr_in6 *sinx = (struct sockaddr_in6 *)x;
285 		struct sockaddr_in6 *siny = (struct sockaddr_in6 *)y;
286 		if (!ipv6_addr_equal(&sinx->sin6_addr, &siny->sin6_addr))
287 			return 0;
288 		if (sinx->sin6_port != siny->sin6_port)
289 			return 0;
290 		break;
291 	}
292 	default:
293 		return 0;
294 	}
295 	return 1;
296 }
297 
298 static int nodeid_to_addr(int nodeid, struct sockaddr_storage *sas_out,
299 			  struct sockaddr *sa_out, bool try_new_addr)
300 {
301 	struct sockaddr_storage sas;
302 	struct dlm_node_addr *na;
303 
304 	if (!dlm_local_count)
305 		return -1;
306 
307 	spin_lock(&dlm_node_addrs_spin);
308 	na = find_node_addr(nodeid);
309 	if (na && na->addr_count) {
310 		memcpy(&sas, na->addr[na->curr_addr_index],
311 		       sizeof(struct sockaddr_storage));
312 
313 		if (try_new_addr) {
314 			na->curr_addr_index++;
315 			if (na->curr_addr_index == na->addr_count)
316 				na->curr_addr_index = 0;
317 		}
318 	}
319 	spin_unlock(&dlm_node_addrs_spin);
320 
321 	if (!na)
322 		return -EEXIST;
323 
324 	if (!na->addr_count)
325 		return -ENOENT;
326 
327 	if (sas_out)
328 		memcpy(sas_out, &sas, sizeof(struct sockaddr_storage));
329 
330 	if (!sa_out)
331 		return 0;
332 
333 	if (dlm_local_addr[0]->ss_family == AF_INET) {
334 		struct sockaddr_in *in4  = (struct sockaddr_in *) &sas;
335 		struct sockaddr_in *ret4 = (struct sockaddr_in *) sa_out;
336 		ret4->sin_addr.s_addr = in4->sin_addr.s_addr;
337 	} else {
338 		struct sockaddr_in6 *in6  = (struct sockaddr_in6 *) &sas;
339 		struct sockaddr_in6 *ret6 = (struct sockaddr_in6 *) sa_out;
340 		ret6->sin6_addr = in6->sin6_addr;
341 	}
342 
343 	return 0;
344 }
345 
346 static int addr_to_nodeid(struct sockaddr_storage *addr, int *nodeid)
347 {
348 	struct dlm_node_addr *na;
349 	int rv = -EEXIST;
350 	int addr_i;
351 
352 	spin_lock(&dlm_node_addrs_spin);
353 	list_for_each_entry(na, &dlm_node_addrs, list) {
354 		if (!na->addr_count)
355 			continue;
356 
357 		for (addr_i = 0; addr_i < na->addr_count; addr_i++) {
358 			if (addr_compare(na->addr[addr_i], addr)) {
359 				*nodeid = na->nodeid;
360 				rv = 0;
361 				goto unlock;
362 			}
363 		}
364 	}
365 unlock:
366 	spin_unlock(&dlm_node_addrs_spin);
367 	return rv;
368 }
369 
370 int dlm_lowcomms_addr(int nodeid, struct sockaddr_storage *addr, int len)
371 {
372 	struct sockaddr_storage *new_addr;
373 	struct dlm_node_addr *new_node, *na;
374 
375 	new_node = kzalloc(sizeof(struct dlm_node_addr), GFP_NOFS);
376 	if (!new_node)
377 		return -ENOMEM;
378 
379 	new_addr = kzalloc(sizeof(struct sockaddr_storage), GFP_NOFS);
380 	if (!new_addr) {
381 		kfree(new_node);
382 		return -ENOMEM;
383 	}
384 
385 	memcpy(new_addr, addr, len);
386 
387 	spin_lock(&dlm_node_addrs_spin);
388 	na = find_node_addr(nodeid);
389 	if (!na) {
390 		new_node->nodeid = nodeid;
391 		new_node->addr[0] = new_addr;
392 		new_node->addr_count = 1;
393 		list_add(&new_node->list, &dlm_node_addrs);
394 		spin_unlock(&dlm_node_addrs_spin);
395 		return 0;
396 	}
397 
398 	if (na->addr_count >= DLM_MAX_ADDR_COUNT) {
399 		spin_unlock(&dlm_node_addrs_spin);
400 		kfree(new_addr);
401 		kfree(new_node);
402 		return -ENOSPC;
403 	}
404 
405 	na->addr[na->addr_count++] = new_addr;
406 	spin_unlock(&dlm_node_addrs_spin);
407 	kfree(new_node);
408 	return 0;
409 }
410 
411 /* Data available on socket or listen socket received a connect */
412 static void lowcomms_data_ready(struct sock *sk)
413 {
414 	struct connection *con;
415 
416 	read_lock_bh(&sk->sk_callback_lock);
417 	con = sock2con(sk);
418 	if (con && !test_and_set_bit(CF_READ_PENDING, &con->flags))
419 		queue_work(recv_workqueue, &con->rwork);
420 	read_unlock_bh(&sk->sk_callback_lock);
421 }
422 
423 static void lowcomms_write_space(struct sock *sk)
424 {
425 	struct connection *con;
426 
427 	read_lock_bh(&sk->sk_callback_lock);
428 	con = sock2con(sk);
429 	if (!con)
430 		goto out;
431 
432 	clear_bit(SOCK_NOSPACE, &con->sock->flags);
433 
434 	if (test_and_clear_bit(CF_APP_LIMITED, &con->flags)) {
435 		con->sock->sk->sk_write_pending--;
436 		clear_bit(SOCKWQ_ASYNC_NOSPACE, &con->sock->flags);
437 	}
438 
439 	queue_work(send_workqueue, &con->swork);
440 out:
441 	read_unlock_bh(&sk->sk_callback_lock);
442 }
443 
444 static inline void lowcomms_connect_sock(struct connection *con)
445 {
446 	if (test_bit(CF_CLOSE, &con->flags))
447 		return;
448 	queue_work(send_workqueue, &con->swork);
449 	cond_resched();
450 }
451 
452 static void lowcomms_state_change(struct sock *sk)
453 {
454 	/* SCTP layer is not calling sk_data_ready when the connection
455 	 * is done, so we catch the signal through here. Also, it
456 	 * doesn't switch socket state when entering shutdown, so we
457 	 * skip the write in that case.
458 	 */
459 	if (sk->sk_shutdown) {
460 		if (sk->sk_shutdown == RCV_SHUTDOWN)
461 			lowcomms_data_ready(sk);
462 	} else if (sk->sk_state == TCP_ESTABLISHED) {
463 		lowcomms_write_space(sk);
464 	}
465 }
466 
467 int dlm_lowcomms_connect_node(int nodeid)
468 {
469 	struct connection *con;
470 
471 	if (nodeid == dlm_our_nodeid())
472 		return 0;
473 
474 	con = nodeid2con(nodeid, GFP_NOFS);
475 	if (!con)
476 		return -ENOMEM;
477 	lowcomms_connect_sock(con);
478 	return 0;
479 }
480 
481 static void lowcomms_error_report(struct sock *sk)
482 {
483 	struct connection *con;
484 	struct sockaddr_storage saddr;
485 	int buflen;
486 	void (*orig_report)(struct sock *) = NULL;
487 
488 	read_lock_bh(&sk->sk_callback_lock);
489 	con = sock2con(sk);
490 	if (con == NULL)
491 		goto out;
492 
493 	orig_report = listen_sock.sk_error_report;
494 	if (con->sock == NULL ||
495 	    kernel_getpeername(con->sock, (struct sockaddr *)&saddr, &buflen)) {
496 		printk_ratelimited(KERN_ERR "dlm: node %d: socket error "
497 				   "sending to node %d, port %d, "
498 				   "sk_err=%d/%d\n", dlm_our_nodeid(),
499 				   con->nodeid, dlm_config.ci_tcp_port,
500 				   sk->sk_err, sk->sk_err_soft);
501 	} else if (saddr.ss_family == AF_INET) {
502 		struct sockaddr_in *sin4 = (struct sockaddr_in *)&saddr;
503 
504 		printk_ratelimited(KERN_ERR "dlm: node %d: socket error "
505 				   "sending to node %d at %pI4, port %d, "
506 				   "sk_err=%d/%d\n", dlm_our_nodeid(),
507 				   con->nodeid, &sin4->sin_addr.s_addr,
508 				   dlm_config.ci_tcp_port, sk->sk_err,
509 				   sk->sk_err_soft);
510 	} else {
511 		struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&saddr;
512 
513 		printk_ratelimited(KERN_ERR "dlm: node %d: socket error "
514 				   "sending to node %d at %u.%u.%u.%u, "
515 				   "port %d, sk_err=%d/%d\n", dlm_our_nodeid(),
516 				   con->nodeid, sin6->sin6_addr.s6_addr32[0],
517 				   sin6->sin6_addr.s6_addr32[1],
518 				   sin6->sin6_addr.s6_addr32[2],
519 				   sin6->sin6_addr.s6_addr32[3],
520 				   dlm_config.ci_tcp_port, sk->sk_err,
521 				   sk->sk_err_soft);
522 	}
523 out:
524 	read_unlock_bh(&sk->sk_callback_lock);
525 	if (orig_report)
526 		orig_report(sk);
527 }
528 
529 /* Note: sk_callback_lock must be locked before calling this function. */
530 static void save_listen_callbacks(struct socket *sock)
531 {
532 	struct sock *sk = sock->sk;
533 
534 	listen_sock.sk_data_ready = sk->sk_data_ready;
535 	listen_sock.sk_state_change = sk->sk_state_change;
536 	listen_sock.sk_write_space = sk->sk_write_space;
537 	listen_sock.sk_error_report = sk->sk_error_report;
538 }
539 
540 static void restore_callbacks(struct socket *sock)
541 {
542 	struct sock *sk = sock->sk;
543 
544 	write_lock_bh(&sk->sk_callback_lock);
545 	sk->sk_user_data = NULL;
546 	sk->sk_data_ready = listen_sock.sk_data_ready;
547 	sk->sk_state_change = listen_sock.sk_state_change;
548 	sk->sk_write_space = listen_sock.sk_write_space;
549 	sk->sk_error_report = listen_sock.sk_error_report;
550 	write_unlock_bh(&sk->sk_callback_lock);
551 }
552 
553 /* Make a socket active */
554 static void add_sock(struct socket *sock, struct connection *con)
555 {
556 	struct sock *sk = sock->sk;
557 
558 	write_lock_bh(&sk->sk_callback_lock);
559 	con->sock = sock;
560 
561 	sk->sk_user_data = con;
562 	/* Install a data_ready callback */
563 	sk->sk_data_ready = lowcomms_data_ready;
564 	sk->sk_write_space = lowcomms_write_space;
565 	sk->sk_state_change = lowcomms_state_change;
566 	sk->sk_allocation = GFP_NOFS;
567 	sk->sk_error_report = lowcomms_error_report;
568 	write_unlock_bh(&sk->sk_callback_lock);
569 }
570 
571 /* Add the port number to an IPv6 or 4 sockaddr and return the address
572    length */
573 static void make_sockaddr(struct sockaddr_storage *saddr, uint16_t port,
574 			  int *addr_len)
575 {
576 	saddr->ss_family =  dlm_local_addr[0]->ss_family;
577 	if (saddr->ss_family == AF_INET) {
578 		struct sockaddr_in *in4_addr = (struct sockaddr_in *)saddr;
579 		in4_addr->sin_port = cpu_to_be16(port);
580 		*addr_len = sizeof(struct sockaddr_in);
581 		memset(&in4_addr->sin_zero, 0, sizeof(in4_addr->sin_zero));
582 	} else {
583 		struct sockaddr_in6 *in6_addr = (struct sockaddr_in6 *)saddr;
584 		in6_addr->sin6_port = cpu_to_be16(port);
585 		*addr_len = sizeof(struct sockaddr_in6);
586 	}
587 	memset((char *)saddr + *addr_len, 0, sizeof(struct sockaddr_storage) - *addr_len);
588 }
589 
590 /* Close a remote connection and tidy up */
591 static void close_connection(struct connection *con, bool and_other,
592 			     bool tx, bool rx)
593 {
594 	bool closing = test_and_set_bit(CF_CLOSING, &con->flags);
595 
596 	if (tx && !closing && cancel_work_sync(&con->swork)) {
597 		log_print("canceled swork for node %d", con->nodeid);
598 		clear_bit(CF_WRITE_PENDING, &con->flags);
599 	}
600 	if (rx && !closing && cancel_work_sync(&con->rwork)) {
601 		log_print("canceled rwork for node %d", con->nodeid);
602 		clear_bit(CF_READ_PENDING, &con->flags);
603 	}
604 
605 	mutex_lock(&con->sock_mutex);
606 	if (con->sock) {
607 		restore_callbacks(con->sock);
608 		sock_release(con->sock);
609 		con->sock = NULL;
610 	}
611 	if (con->othercon && and_other) {
612 		/* Will only re-enter once. */
613 		close_connection(con->othercon, false, true, true);
614 	}
615 	if (con->rx_page) {
616 		__free_page(con->rx_page);
617 		con->rx_page = NULL;
618 	}
619 
620 	con->retries = 0;
621 	mutex_unlock(&con->sock_mutex);
622 	clear_bit(CF_CLOSING, &con->flags);
623 }
624 
625 /* Data received from remote end */
626 static int receive_from_sock(struct connection *con)
627 {
628 	int ret = 0;
629 	struct msghdr msg = {};
630 	struct kvec iov[2];
631 	unsigned len;
632 	int r;
633 	int call_again_soon = 0;
634 	int nvec;
635 
636 	mutex_lock(&con->sock_mutex);
637 
638 	if (con->sock == NULL) {
639 		ret = -EAGAIN;
640 		goto out_close;
641 	}
642 	if (con->nodeid == 0) {
643 		ret = -EINVAL;
644 		goto out_close;
645 	}
646 
647 	if (con->rx_page == NULL) {
648 		/*
649 		 * This doesn't need to be atomic, but I think it should
650 		 * improve performance if it is.
651 		 */
652 		con->rx_page = alloc_page(GFP_ATOMIC);
653 		if (con->rx_page == NULL)
654 			goto out_resched;
655 		cbuf_init(&con->cb, PAGE_SIZE);
656 	}
657 
658 	/*
659 	 * iov[0] is the bit of the circular buffer between the current end
660 	 * point (cb.base + cb.len) and the end of the buffer.
661 	 */
662 	iov[0].iov_len = con->cb.base - cbuf_data(&con->cb);
663 	iov[0].iov_base = page_address(con->rx_page) + cbuf_data(&con->cb);
664 	iov[1].iov_len = 0;
665 	nvec = 1;
666 
667 	/*
668 	 * iov[1] is the bit of the circular buffer between the start of the
669 	 * buffer and the start of the currently used section (cb.base)
670 	 */
671 	if (cbuf_data(&con->cb) >= con->cb.base) {
672 		iov[0].iov_len = PAGE_SIZE - cbuf_data(&con->cb);
673 		iov[1].iov_len = con->cb.base;
674 		iov[1].iov_base = page_address(con->rx_page);
675 		nvec = 2;
676 	}
677 	len = iov[0].iov_len + iov[1].iov_len;
678 
679 	r = ret = kernel_recvmsg(con->sock, &msg, iov, nvec, len,
680 			       MSG_DONTWAIT | MSG_NOSIGNAL);
681 	if (ret <= 0)
682 		goto out_close;
683 	else if (ret == len)
684 		call_again_soon = 1;
685 
686 	cbuf_add(&con->cb, ret);
687 	ret = dlm_process_incoming_buffer(con->nodeid,
688 					  page_address(con->rx_page),
689 					  con->cb.base, con->cb.len,
690 					  PAGE_SIZE);
691 	if (ret == -EBADMSG) {
692 		log_print("lowcomms: addr=%p, base=%u, len=%u, read=%d",
693 			  page_address(con->rx_page), con->cb.base,
694 			  con->cb.len, r);
695 	}
696 	if (ret < 0)
697 		goto out_close;
698 	cbuf_eat(&con->cb, ret);
699 
700 	if (cbuf_empty(&con->cb) && !call_again_soon) {
701 		__free_page(con->rx_page);
702 		con->rx_page = NULL;
703 	}
704 
705 	if (call_again_soon)
706 		goto out_resched;
707 	mutex_unlock(&con->sock_mutex);
708 	return 0;
709 
710 out_resched:
711 	if (!test_and_set_bit(CF_READ_PENDING, &con->flags))
712 		queue_work(recv_workqueue, &con->rwork);
713 	mutex_unlock(&con->sock_mutex);
714 	return -EAGAIN;
715 
716 out_close:
717 	mutex_unlock(&con->sock_mutex);
718 	if (ret != -EAGAIN) {
719 		close_connection(con, true, true, false);
720 		/* Reconnect when there is something to send */
721 	}
722 	/* Don't return success if we really got EOF */
723 	if (ret == 0)
724 		ret = -EAGAIN;
725 
726 	return ret;
727 }
728 
729 /* Listening socket is busy, accept a connection */
730 static int tcp_accept_from_sock(struct connection *con)
731 {
732 	int result;
733 	struct sockaddr_storage peeraddr;
734 	struct socket *newsock;
735 	int len;
736 	int nodeid;
737 	struct connection *newcon;
738 	struct connection *addcon;
739 
740 	mutex_lock(&connections_lock);
741 	if (!dlm_allow_conn) {
742 		mutex_unlock(&connections_lock);
743 		return -1;
744 	}
745 	mutex_unlock(&connections_lock);
746 
747 	mutex_lock_nested(&con->sock_mutex, 0);
748 
749 	if (!con->sock) {
750 		mutex_unlock(&con->sock_mutex);
751 		return -ENOTCONN;
752 	}
753 
754 	result = kernel_accept(con->sock, &newsock, O_NONBLOCK);
755 	if (result < 0)
756 		goto accept_err;
757 
758 	/* Get the connected socket's peer */
759 	memset(&peeraddr, 0, sizeof(peeraddr));
760 	if (newsock->ops->getname(newsock, (struct sockaddr *)&peeraddr,
761 				  &len, 2)) {
762 		result = -ECONNABORTED;
763 		goto accept_err;
764 	}
765 
766 	/* Get the new node's NODEID */
767 	make_sockaddr(&peeraddr, 0, &len);
768 	if (addr_to_nodeid(&peeraddr, &nodeid)) {
769 		unsigned char *b=(unsigned char *)&peeraddr;
770 		log_print("connect from non cluster node");
771 		print_hex_dump_bytes("ss: ", DUMP_PREFIX_NONE,
772 				     b, sizeof(struct sockaddr_storage));
773 		sock_release(newsock);
774 		mutex_unlock(&con->sock_mutex);
775 		return -1;
776 	}
777 
778 	log_print("got connection from %d", nodeid);
779 
780 	/*  Check to see if we already have a connection to this node. This
781 	 *  could happen if the two nodes initiate a connection at roughly
782 	 *  the same time and the connections cross on the wire.
783 	 *  In this case we store the incoming one in "othercon"
784 	 */
785 	newcon = nodeid2con(nodeid, GFP_NOFS);
786 	if (!newcon) {
787 		result = -ENOMEM;
788 		goto accept_err;
789 	}
790 	mutex_lock_nested(&newcon->sock_mutex, 1);
791 	if (newcon->sock) {
792 		struct connection *othercon = newcon->othercon;
793 
794 		if (!othercon) {
795 			othercon = kmem_cache_zalloc(con_cache, GFP_NOFS);
796 			if (!othercon) {
797 				log_print("failed to allocate incoming socket");
798 				mutex_unlock(&newcon->sock_mutex);
799 				result = -ENOMEM;
800 				goto accept_err;
801 			}
802 			othercon->nodeid = nodeid;
803 			othercon->rx_action = receive_from_sock;
804 			mutex_init(&othercon->sock_mutex);
805 			INIT_LIST_HEAD(&othercon->writequeue);
806 			spin_lock_init(&othercon->writequeue_lock);
807 			INIT_WORK(&othercon->swork, process_send_sockets);
808 			INIT_WORK(&othercon->rwork, process_recv_sockets);
809 			set_bit(CF_IS_OTHERCON, &othercon->flags);
810 		}
811 		mutex_lock_nested(&othercon->sock_mutex, 2);
812 		if (!othercon->sock) {
813 			newcon->othercon = othercon;
814 			add_sock(newsock, othercon);
815 			addcon = othercon;
816 			mutex_unlock(&othercon->sock_mutex);
817 		}
818 		else {
819 			printk("Extra connection from node %d attempted\n", nodeid);
820 			result = -EAGAIN;
821 			mutex_unlock(&othercon->sock_mutex);
822 			mutex_unlock(&newcon->sock_mutex);
823 			goto accept_err;
824 		}
825 	}
826 	else {
827 		newcon->rx_action = receive_from_sock;
828 		/* accept copies the sk after we've saved the callbacks, so we
829 		   don't want to save them a second time or comm errors will
830 		   result in calling sk_error_report recursively. */
831 		add_sock(newsock, newcon);
832 		addcon = newcon;
833 	}
834 
835 	mutex_unlock(&newcon->sock_mutex);
836 
837 	/*
838 	 * Add it to the active queue in case we got data
839 	 * between processing the accept adding the socket
840 	 * to the read_sockets list
841 	 */
842 	if (!test_and_set_bit(CF_READ_PENDING, &addcon->flags))
843 		queue_work(recv_workqueue, &addcon->rwork);
844 	mutex_unlock(&con->sock_mutex);
845 
846 	return 0;
847 
848 accept_err:
849 	mutex_unlock(&con->sock_mutex);
850 	if (newsock)
851 		sock_release(newsock);
852 
853 	if (result != -EAGAIN)
854 		log_print("error accepting connection from node: %d", result);
855 	return result;
856 }
857 
858 static int sctp_accept_from_sock(struct connection *con)
859 {
860 	/* Check that the new node is in the lockspace */
861 	struct sctp_prim prim;
862 	int nodeid;
863 	int prim_len, ret;
864 	int addr_len;
865 	struct connection *newcon;
866 	struct connection *addcon;
867 	struct socket *newsock;
868 
869 	mutex_lock(&connections_lock);
870 	if (!dlm_allow_conn) {
871 		mutex_unlock(&connections_lock);
872 		return -1;
873 	}
874 	mutex_unlock(&connections_lock);
875 
876 	mutex_lock_nested(&con->sock_mutex, 0);
877 
878 	ret = kernel_accept(con->sock, &newsock, O_NONBLOCK);
879 	if (ret < 0)
880 		goto accept_err;
881 
882 	memset(&prim, 0, sizeof(struct sctp_prim));
883 	prim_len = sizeof(struct sctp_prim);
884 
885 	ret = kernel_getsockopt(newsock, IPPROTO_SCTP, SCTP_PRIMARY_ADDR,
886 				(char *)&prim, &prim_len);
887 	if (ret < 0) {
888 		log_print("getsockopt/sctp_primary_addr failed: %d", ret);
889 		goto accept_err;
890 	}
891 
892 	make_sockaddr(&prim.ssp_addr, 0, &addr_len);
893 	ret = addr_to_nodeid(&prim.ssp_addr, &nodeid);
894 	if (ret) {
895 		unsigned char *b = (unsigned char *)&prim.ssp_addr;
896 
897 		log_print("reject connect from unknown addr");
898 		print_hex_dump_bytes("ss: ", DUMP_PREFIX_NONE,
899 				     b, sizeof(struct sockaddr_storage));
900 		goto accept_err;
901 	}
902 
903 	newcon = nodeid2con(nodeid, GFP_NOFS);
904 	if (!newcon) {
905 		ret = -ENOMEM;
906 		goto accept_err;
907 	}
908 
909 	mutex_lock_nested(&newcon->sock_mutex, 1);
910 
911 	if (newcon->sock) {
912 		struct connection *othercon = newcon->othercon;
913 
914 		if (!othercon) {
915 			othercon = kmem_cache_zalloc(con_cache, GFP_NOFS);
916 			if (!othercon) {
917 				log_print("failed to allocate incoming socket");
918 				mutex_unlock(&newcon->sock_mutex);
919 				ret = -ENOMEM;
920 				goto accept_err;
921 			}
922 			othercon->nodeid = nodeid;
923 			othercon->rx_action = receive_from_sock;
924 			mutex_init(&othercon->sock_mutex);
925 			INIT_LIST_HEAD(&othercon->writequeue);
926 			spin_lock_init(&othercon->writequeue_lock);
927 			INIT_WORK(&othercon->swork, process_send_sockets);
928 			INIT_WORK(&othercon->rwork, process_recv_sockets);
929 			set_bit(CF_IS_OTHERCON, &othercon->flags);
930 		}
931 		mutex_lock_nested(&othercon->sock_mutex, 2);
932 		if (!othercon->sock) {
933 			newcon->othercon = othercon;
934 			add_sock(newsock, othercon);
935 			addcon = othercon;
936 			mutex_unlock(&othercon->sock_mutex);
937 		} else {
938 			printk("Extra connection from node %d attempted\n", nodeid);
939 			ret = -EAGAIN;
940 			mutex_unlock(&othercon->sock_mutex);
941 			mutex_unlock(&newcon->sock_mutex);
942 			goto accept_err;
943 		}
944 	} else {
945 		newcon->rx_action = receive_from_sock;
946 		add_sock(newsock, newcon);
947 		addcon = newcon;
948 	}
949 
950 	log_print("connected to %d", nodeid);
951 
952 	mutex_unlock(&newcon->sock_mutex);
953 
954 	/*
955 	 * Add it to the active queue in case we got data
956 	 * between processing the accept adding the socket
957 	 * to the read_sockets list
958 	 */
959 	if (!test_and_set_bit(CF_READ_PENDING, &addcon->flags))
960 		queue_work(recv_workqueue, &addcon->rwork);
961 	mutex_unlock(&con->sock_mutex);
962 
963 	return 0;
964 
965 accept_err:
966 	mutex_unlock(&con->sock_mutex);
967 	if (newsock)
968 		sock_release(newsock);
969 	if (ret != -EAGAIN)
970 		log_print("error accepting connection from node: %d", ret);
971 
972 	return ret;
973 }
974 
975 static void free_entry(struct writequeue_entry *e)
976 {
977 	__free_page(e->page);
978 	kfree(e);
979 }
980 
981 /*
982  * writequeue_entry_complete - try to delete and free write queue entry
983  * @e: write queue entry to try to delete
984  * @completed: bytes completed
985  *
986  * writequeue_lock must be held.
987  */
988 static void writequeue_entry_complete(struct writequeue_entry *e, int completed)
989 {
990 	e->offset += completed;
991 	e->len -= completed;
992 
993 	if (e->len == 0 && e->users == 0) {
994 		list_del(&e->list);
995 		free_entry(e);
996 	}
997 }
998 
999 /*
1000  * sctp_bind_addrs - bind a SCTP socket to all our addresses
1001  */
1002 static int sctp_bind_addrs(struct connection *con, uint16_t port)
1003 {
1004 	struct sockaddr_storage localaddr;
1005 	int i, addr_len, result = 0;
1006 
1007 	for (i = 0; i < dlm_local_count; i++) {
1008 		memcpy(&localaddr, dlm_local_addr[i], sizeof(localaddr));
1009 		make_sockaddr(&localaddr, port, &addr_len);
1010 
1011 		if (!i)
1012 			result = kernel_bind(con->sock,
1013 					     (struct sockaddr *)&localaddr,
1014 					     addr_len);
1015 		else
1016 			result = kernel_setsockopt(con->sock, SOL_SCTP,
1017 						   SCTP_SOCKOPT_BINDX_ADD,
1018 						   (char *)&localaddr, addr_len);
1019 
1020 		if (result < 0) {
1021 			log_print("Can't bind to %d addr number %d, %d.\n",
1022 				  port, i + 1, result);
1023 			break;
1024 		}
1025 	}
1026 	return result;
1027 }
1028 
1029 /* Initiate an SCTP association.
1030    This is a special case of send_to_sock() in that we don't yet have a
1031    peeled-off socket for this association, so we use the listening socket
1032    and add the primary IP address of the remote node.
1033  */
1034 static void sctp_connect_to_sock(struct connection *con)
1035 {
1036 	struct sockaddr_storage daddr;
1037 	int one = 1;
1038 	int result;
1039 	int addr_len;
1040 	struct socket *sock;
1041 
1042 	if (con->nodeid == 0) {
1043 		log_print("attempt to connect sock 0 foiled");
1044 		return;
1045 	}
1046 
1047 	mutex_lock(&con->sock_mutex);
1048 
1049 	/* Some odd races can cause double-connects, ignore them */
1050 	if (con->retries++ > MAX_CONNECT_RETRIES)
1051 		goto out;
1052 
1053 	if (con->sock) {
1054 		log_print("node %d already connected.", con->nodeid);
1055 		goto out;
1056 	}
1057 
1058 	memset(&daddr, 0, sizeof(daddr));
1059 	result = nodeid_to_addr(con->nodeid, &daddr, NULL, true);
1060 	if (result < 0) {
1061 		log_print("no address for nodeid %d", con->nodeid);
1062 		goto out;
1063 	}
1064 
1065 	/* Create a socket to communicate with */
1066 	result = sock_create_kern(&init_net, dlm_local_addr[0]->ss_family,
1067 				  SOCK_STREAM, IPPROTO_SCTP, &sock);
1068 	if (result < 0)
1069 		goto socket_err;
1070 
1071 	con->rx_action = receive_from_sock;
1072 	con->connect_action = sctp_connect_to_sock;
1073 	add_sock(sock, con);
1074 
1075 	/* Bind to all addresses. */
1076 	if (sctp_bind_addrs(con, 0))
1077 		goto bind_err;
1078 
1079 	make_sockaddr(&daddr, dlm_config.ci_tcp_port, &addr_len);
1080 
1081 	log_print("connecting to %d", con->nodeid);
1082 
1083 	/* Turn off Nagle's algorithm */
1084 	kernel_setsockopt(sock, SOL_TCP, TCP_NODELAY, (char *)&one,
1085 			  sizeof(one));
1086 
1087 	result = sock->ops->connect(sock, (struct sockaddr *)&daddr, addr_len,
1088 				   O_NONBLOCK);
1089 	if (result == -EINPROGRESS)
1090 		result = 0;
1091 	if (result == 0)
1092 		goto out;
1093 
1094 bind_err:
1095 	con->sock = NULL;
1096 	sock_release(sock);
1097 
1098 socket_err:
1099 	/*
1100 	 * Some errors are fatal and this list might need adjusting. For other
1101 	 * errors we try again until the max number of retries is reached.
1102 	 */
1103 	if (result != -EHOSTUNREACH &&
1104 	    result != -ENETUNREACH &&
1105 	    result != -ENETDOWN &&
1106 	    result != -EINVAL &&
1107 	    result != -EPROTONOSUPPORT) {
1108 		log_print("connect %d try %d error %d", con->nodeid,
1109 			  con->retries, result);
1110 		mutex_unlock(&con->sock_mutex);
1111 		msleep(1000);
1112 		lowcomms_connect_sock(con);
1113 		return;
1114 	}
1115 
1116 out:
1117 	mutex_unlock(&con->sock_mutex);
1118 }
1119 
1120 /* Connect a new socket to its peer */
1121 static void tcp_connect_to_sock(struct connection *con)
1122 {
1123 	struct sockaddr_storage saddr, src_addr;
1124 	int addr_len;
1125 	struct socket *sock = NULL;
1126 	int one = 1;
1127 	int result;
1128 
1129 	if (con->nodeid == 0) {
1130 		log_print("attempt to connect sock 0 foiled");
1131 		return;
1132 	}
1133 
1134 	mutex_lock(&con->sock_mutex);
1135 	if (con->retries++ > MAX_CONNECT_RETRIES)
1136 		goto out;
1137 
1138 	/* Some odd races can cause double-connects, ignore them */
1139 	if (con->sock)
1140 		goto out;
1141 
1142 	/* Create a socket to communicate with */
1143 	result = sock_create_kern(&init_net, dlm_local_addr[0]->ss_family,
1144 				  SOCK_STREAM, IPPROTO_TCP, &sock);
1145 	if (result < 0)
1146 		goto out_err;
1147 
1148 	memset(&saddr, 0, sizeof(saddr));
1149 	result = nodeid_to_addr(con->nodeid, &saddr, NULL, false);
1150 	if (result < 0) {
1151 		log_print("no address for nodeid %d", con->nodeid);
1152 		goto out_err;
1153 	}
1154 
1155 	con->rx_action = receive_from_sock;
1156 	con->connect_action = tcp_connect_to_sock;
1157 	add_sock(sock, con);
1158 
1159 	/* Bind to our cluster-known address connecting to avoid
1160 	   routing problems */
1161 	memcpy(&src_addr, dlm_local_addr[0], sizeof(src_addr));
1162 	make_sockaddr(&src_addr, 0, &addr_len);
1163 	result = sock->ops->bind(sock, (struct sockaddr *) &src_addr,
1164 				 addr_len);
1165 	if (result < 0) {
1166 		log_print("could not bind for connect: %d", result);
1167 		/* This *may* not indicate a critical error */
1168 	}
1169 
1170 	make_sockaddr(&saddr, dlm_config.ci_tcp_port, &addr_len);
1171 
1172 	log_print("connecting to %d", con->nodeid);
1173 
1174 	/* Turn off Nagle's algorithm */
1175 	kernel_setsockopt(sock, SOL_TCP, TCP_NODELAY, (char *)&one,
1176 			  sizeof(one));
1177 
1178 	result = sock->ops->connect(sock, (struct sockaddr *)&saddr, addr_len,
1179 				   O_NONBLOCK);
1180 	if (result == -EINPROGRESS)
1181 		result = 0;
1182 	if (result == 0)
1183 		goto out;
1184 
1185 out_err:
1186 	if (con->sock) {
1187 		sock_release(con->sock);
1188 		con->sock = NULL;
1189 	} else if (sock) {
1190 		sock_release(sock);
1191 	}
1192 	/*
1193 	 * Some errors are fatal and this list might need adjusting. For other
1194 	 * errors we try again until the max number of retries is reached.
1195 	 */
1196 	if (result != -EHOSTUNREACH &&
1197 	    result != -ENETUNREACH &&
1198 	    result != -ENETDOWN &&
1199 	    result != -EINVAL &&
1200 	    result != -EPROTONOSUPPORT) {
1201 		log_print("connect %d try %d error %d", con->nodeid,
1202 			  con->retries, result);
1203 		mutex_unlock(&con->sock_mutex);
1204 		msleep(1000);
1205 		lowcomms_connect_sock(con);
1206 		return;
1207 	}
1208 out:
1209 	mutex_unlock(&con->sock_mutex);
1210 	return;
1211 }
1212 
1213 static struct socket *tcp_create_listen_sock(struct connection *con,
1214 					     struct sockaddr_storage *saddr)
1215 {
1216 	struct socket *sock = NULL;
1217 	int result = 0;
1218 	int one = 1;
1219 	int addr_len;
1220 
1221 	if (dlm_local_addr[0]->ss_family == AF_INET)
1222 		addr_len = sizeof(struct sockaddr_in);
1223 	else
1224 		addr_len = sizeof(struct sockaddr_in6);
1225 
1226 	/* Create a socket to communicate with */
1227 	result = sock_create_kern(&init_net, dlm_local_addr[0]->ss_family,
1228 				  SOCK_STREAM, IPPROTO_TCP, &sock);
1229 	if (result < 0) {
1230 		log_print("Can't create listening comms socket");
1231 		goto create_out;
1232 	}
1233 
1234 	/* Turn off Nagle's algorithm */
1235 	kernel_setsockopt(sock, SOL_TCP, TCP_NODELAY, (char *)&one,
1236 			  sizeof(one));
1237 
1238 	result = kernel_setsockopt(sock, SOL_SOCKET, SO_REUSEADDR,
1239 				   (char *)&one, sizeof(one));
1240 
1241 	if (result < 0) {
1242 		log_print("Failed to set SO_REUSEADDR on socket: %d", result);
1243 	}
1244 	write_lock_bh(&sock->sk->sk_callback_lock);
1245 	sock->sk->sk_user_data = con;
1246 	save_listen_callbacks(sock);
1247 	con->rx_action = tcp_accept_from_sock;
1248 	con->connect_action = tcp_connect_to_sock;
1249 	write_unlock_bh(&sock->sk->sk_callback_lock);
1250 
1251 	/* Bind to our port */
1252 	make_sockaddr(saddr, dlm_config.ci_tcp_port, &addr_len);
1253 	result = sock->ops->bind(sock, (struct sockaddr *) saddr, addr_len);
1254 	if (result < 0) {
1255 		log_print("Can't bind to port %d", dlm_config.ci_tcp_port);
1256 		sock_release(sock);
1257 		sock = NULL;
1258 		con->sock = NULL;
1259 		goto create_out;
1260 	}
1261 	result = kernel_setsockopt(sock, SOL_SOCKET, SO_KEEPALIVE,
1262 				 (char *)&one, sizeof(one));
1263 	if (result < 0) {
1264 		log_print("Set keepalive failed: %d", result);
1265 	}
1266 
1267 	result = sock->ops->listen(sock, 5);
1268 	if (result < 0) {
1269 		log_print("Can't listen on port %d", dlm_config.ci_tcp_port);
1270 		sock_release(sock);
1271 		sock = NULL;
1272 		goto create_out;
1273 	}
1274 
1275 create_out:
1276 	return sock;
1277 }
1278 
1279 /* Get local addresses */
1280 static void init_local(void)
1281 {
1282 	struct sockaddr_storage sas, *addr;
1283 	int i;
1284 
1285 	dlm_local_count = 0;
1286 	for (i = 0; i < DLM_MAX_ADDR_COUNT; i++) {
1287 		if (dlm_our_addr(&sas, i))
1288 			break;
1289 
1290 		addr = kmemdup(&sas, sizeof(*addr), GFP_NOFS);
1291 		if (!addr)
1292 			break;
1293 		dlm_local_addr[dlm_local_count++] = addr;
1294 	}
1295 }
1296 
1297 /* Initialise SCTP socket and bind to all interfaces */
1298 static int sctp_listen_for_all(void)
1299 {
1300 	struct socket *sock = NULL;
1301 	int result = -EINVAL;
1302 	struct connection *con = nodeid2con(0, GFP_NOFS);
1303 	int bufsize = NEEDED_RMEM;
1304 	int one = 1;
1305 
1306 	if (!con)
1307 		return -ENOMEM;
1308 
1309 	log_print("Using SCTP for communications");
1310 
1311 	result = sock_create_kern(&init_net, dlm_local_addr[0]->ss_family,
1312 				  SOCK_STREAM, IPPROTO_SCTP, &sock);
1313 	if (result < 0) {
1314 		log_print("Can't create comms socket, check SCTP is loaded");
1315 		goto out;
1316 	}
1317 
1318 	result = kernel_setsockopt(sock, SOL_SOCKET, SO_RCVBUFFORCE,
1319 				 (char *)&bufsize, sizeof(bufsize));
1320 	if (result)
1321 		log_print("Error increasing buffer space on socket %d", result);
1322 
1323 	result = kernel_setsockopt(sock, SOL_SCTP, SCTP_NODELAY, (char *)&one,
1324 				   sizeof(one));
1325 	if (result < 0)
1326 		log_print("Could not set SCTP NODELAY error %d\n", result);
1327 
1328 	write_lock_bh(&sock->sk->sk_callback_lock);
1329 	/* Init con struct */
1330 	sock->sk->sk_user_data = con;
1331 	save_listen_callbacks(sock);
1332 	con->sock = sock;
1333 	con->sock->sk->sk_data_ready = lowcomms_data_ready;
1334 	con->rx_action = sctp_accept_from_sock;
1335 	con->connect_action = sctp_connect_to_sock;
1336 
1337 	write_unlock_bh(&sock->sk->sk_callback_lock);
1338 
1339 	/* Bind to all addresses. */
1340 	if (sctp_bind_addrs(con, dlm_config.ci_tcp_port))
1341 		goto create_delsock;
1342 
1343 	result = sock->ops->listen(sock, 5);
1344 	if (result < 0) {
1345 		log_print("Can't set socket listening");
1346 		goto create_delsock;
1347 	}
1348 
1349 	return 0;
1350 
1351 create_delsock:
1352 	sock_release(sock);
1353 	con->sock = NULL;
1354 out:
1355 	return result;
1356 }
1357 
1358 static int tcp_listen_for_all(void)
1359 {
1360 	struct socket *sock = NULL;
1361 	struct connection *con = nodeid2con(0, GFP_NOFS);
1362 	int result = -EINVAL;
1363 
1364 	if (!con)
1365 		return -ENOMEM;
1366 
1367 	/* We don't support multi-homed hosts */
1368 	if (dlm_local_addr[1] != NULL) {
1369 		log_print("TCP protocol can't handle multi-homed hosts, "
1370 			  "try SCTP");
1371 		return -EINVAL;
1372 	}
1373 
1374 	log_print("Using TCP for communications");
1375 
1376 	sock = tcp_create_listen_sock(con, dlm_local_addr[0]);
1377 	if (sock) {
1378 		add_sock(sock, con);
1379 		result = 0;
1380 	}
1381 	else {
1382 		result = -EADDRINUSE;
1383 	}
1384 
1385 	return result;
1386 }
1387 
1388 
1389 
1390 static struct writequeue_entry *new_writequeue_entry(struct connection *con,
1391 						     gfp_t allocation)
1392 {
1393 	struct writequeue_entry *entry;
1394 
1395 	entry = kmalloc(sizeof(struct writequeue_entry), allocation);
1396 	if (!entry)
1397 		return NULL;
1398 
1399 	entry->page = alloc_page(allocation);
1400 	if (!entry->page) {
1401 		kfree(entry);
1402 		return NULL;
1403 	}
1404 
1405 	entry->offset = 0;
1406 	entry->len = 0;
1407 	entry->end = 0;
1408 	entry->users = 0;
1409 	entry->con = con;
1410 
1411 	return entry;
1412 }
1413 
1414 void *dlm_lowcomms_get_buffer(int nodeid, int len, gfp_t allocation, char **ppc)
1415 {
1416 	struct connection *con;
1417 	struct writequeue_entry *e;
1418 	int offset = 0;
1419 
1420 	con = nodeid2con(nodeid, allocation);
1421 	if (!con)
1422 		return NULL;
1423 
1424 	spin_lock(&con->writequeue_lock);
1425 	e = list_entry(con->writequeue.prev, struct writequeue_entry, list);
1426 	if ((&e->list == &con->writequeue) ||
1427 	    (PAGE_SIZE - e->end < len)) {
1428 		e = NULL;
1429 	} else {
1430 		offset = e->end;
1431 		e->end += len;
1432 		e->users++;
1433 	}
1434 	spin_unlock(&con->writequeue_lock);
1435 
1436 	if (e) {
1437 	got_one:
1438 		*ppc = page_address(e->page) + offset;
1439 		return e;
1440 	}
1441 
1442 	e = new_writequeue_entry(con, allocation);
1443 	if (e) {
1444 		spin_lock(&con->writequeue_lock);
1445 		offset = e->end;
1446 		e->end += len;
1447 		e->users++;
1448 		list_add_tail(&e->list, &con->writequeue);
1449 		spin_unlock(&con->writequeue_lock);
1450 		goto got_one;
1451 	}
1452 	return NULL;
1453 }
1454 
1455 void dlm_lowcomms_commit_buffer(void *mh)
1456 {
1457 	struct writequeue_entry *e = (struct writequeue_entry *)mh;
1458 	struct connection *con = e->con;
1459 	int users;
1460 
1461 	spin_lock(&con->writequeue_lock);
1462 	users = --e->users;
1463 	if (users)
1464 		goto out;
1465 	e->len = e->end - e->offset;
1466 	spin_unlock(&con->writequeue_lock);
1467 
1468 	queue_work(send_workqueue, &con->swork);
1469 	return;
1470 
1471 out:
1472 	spin_unlock(&con->writequeue_lock);
1473 	return;
1474 }
1475 
1476 /* Send a message */
1477 static void send_to_sock(struct connection *con)
1478 {
1479 	int ret = 0;
1480 	const int msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL;
1481 	struct writequeue_entry *e;
1482 	int len, offset;
1483 	int count = 0;
1484 
1485 	mutex_lock(&con->sock_mutex);
1486 	if (con->sock == NULL)
1487 		goto out_connect;
1488 
1489 	spin_lock(&con->writequeue_lock);
1490 	for (;;) {
1491 		e = list_entry(con->writequeue.next, struct writequeue_entry,
1492 			       list);
1493 		if ((struct list_head *) e == &con->writequeue)
1494 			break;
1495 
1496 		len = e->len;
1497 		offset = e->offset;
1498 		BUG_ON(len == 0 && e->users == 0);
1499 		spin_unlock(&con->writequeue_lock);
1500 
1501 		ret = 0;
1502 		if (len) {
1503 			ret = kernel_sendpage(con->sock, e->page, offset, len,
1504 					      msg_flags);
1505 			if (ret == -EAGAIN || ret == 0) {
1506 				if (ret == -EAGAIN &&
1507 				    test_bit(SOCKWQ_ASYNC_NOSPACE, &con->sock->flags) &&
1508 				    !test_and_set_bit(CF_APP_LIMITED, &con->flags)) {
1509 					/* Notify TCP that we're limited by the
1510 					 * application window size.
1511 					 */
1512 					set_bit(SOCK_NOSPACE, &con->sock->flags);
1513 					con->sock->sk->sk_write_pending++;
1514 				}
1515 				cond_resched();
1516 				goto out;
1517 			} else if (ret < 0)
1518 				goto send_error;
1519 		}
1520 
1521 		/* Don't starve people filling buffers */
1522 		if (++count >= MAX_SEND_MSG_COUNT) {
1523 			cond_resched();
1524 			count = 0;
1525 		}
1526 
1527 		spin_lock(&con->writequeue_lock);
1528 		writequeue_entry_complete(e, ret);
1529 	}
1530 	spin_unlock(&con->writequeue_lock);
1531 out:
1532 	mutex_unlock(&con->sock_mutex);
1533 	return;
1534 
1535 send_error:
1536 	mutex_unlock(&con->sock_mutex);
1537 	close_connection(con, true, false, true);
1538 	/* Requeue the send work. When the work daemon runs again, it will try
1539 	   a new connection, then call this function again. */
1540 	queue_work(send_workqueue, &con->swork);
1541 	return;
1542 
1543 out_connect:
1544 	mutex_unlock(&con->sock_mutex);
1545 	queue_work(send_workqueue, &con->swork);
1546 	cond_resched();
1547 }
1548 
1549 static void clean_one_writequeue(struct connection *con)
1550 {
1551 	struct writequeue_entry *e, *safe;
1552 
1553 	spin_lock(&con->writequeue_lock);
1554 	list_for_each_entry_safe(e, safe, &con->writequeue, list) {
1555 		list_del(&e->list);
1556 		free_entry(e);
1557 	}
1558 	spin_unlock(&con->writequeue_lock);
1559 }
1560 
1561 /* Called from recovery when it knows that a node has
1562    left the cluster */
1563 int dlm_lowcomms_close(int nodeid)
1564 {
1565 	struct connection *con;
1566 	struct dlm_node_addr *na;
1567 
1568 	log_print("closing connection to node %d", nodeid);
1569 	con = nodeid2con(nodeid, 0);
1570 	if (con) {
1571 		set_bit(CF_CLOSE, &con->flags);
1572 		close_connection(con, true, true, true);
1573 		clean_one_writequeue(con);
1574 	}
1575 
1576 	spin_lock(&dlm_node_addrs_spin);
1577 	na = find_node_addr(nodeid);
1578 	if (na) {
1579 		list_del(&na->list);
1580 		while (na->addr_count--)
1581 			kfree(na->addr[na->addr_count]);
1582 		kfree(na);
1583 	}
1584 	spin_unlock(&dlm_node_addrs_spin);
1585 
1586 	return 0;
1587 }
1588 
1589 /* Receive workqueue function */
1590 static void process_recv_sockets(struct work_struct *work)
1591 {
1592 	struct connection *con = container_of(work, struct connection, rwork);
1593 	int err;
1594 
1595 	clear_bit(CF_READ_PENDING, &con->flags);
1596 	do {
1597 		err = con->rx_action(con);
1598 	} while (!err);
1599 }
1600 
1601 /* Send workqueue function */
1602 static void process_send_sockets(struct work_struct *work)
1603 {
1604 	struct connection *con = container_of(work, struct connection, swork);
1605 
1606 	clear_bit(CF_WRITE_PENDING, &con->flags);
1607 	if (con->sock == NULL) /* not mutex protected so check it inside too */
1608 		con->connect_action(con);
1609 	if (!list_empty(&con->writequeue))
1610 		send_to_sock(con);
1611 }
1612 
1613 
1614 /* Discard all entries on the write queues */
1615 static void clean_writequeues(void)
1616 {
1617 	foreach_conn(clean_one_writequeue);
1618 }
1619 
1620 static void work_stop(void)
1621 {
1622 	destroy_workqueue(recv_workqueue);
1623 	destroy_workqueue(send_workqueue);
1624 }
1625 
1626 static int work_start(void)
1627 {
1628 	recv_workqueue = alloc_workqueue("dlm_recv",
1629 					 WQ_UNBOUND | WQ_MEM_RECLAIM, 1);
1630 	if (!recv_workqueue) {
1631 		log_print("can't start dlm_recv");
1632 		return -ENOMEM;
1633 	}
1634 
1635 	send_workqueue = alloc_workqueue("dlm_send",
1636 					 WQ_UNBOUND | WQ_MEM_RECLAIM, 1);
1637 	if (!send_workqueue) {
1638 		log_print("can't start dlm_send");
1639 		destroy_workqueue(recv_workqueue);
1640 		return -ENOMEM;
1641 	}
1642 
1643 	return 0;
1644 }
1645 
1646 static void _stop_conn(struct connection *con, bool and_other)
1647 {
1648 	mutex_lock(&con->sock_mutex);
1649 	set_bit(CF_CLOSE, &con->flags);
1650 	set_bit(CF_READ_PENDING, &con->flags);
1651 	set_bit(CF_WRITE_PENDING, &con->flags);
1652 	if (con->sock && con->sock->sk) {
1653 		write_lock_bh(&con->sock->sk->sk_callback_lock);
1654 		con->sock->sk->sk_user_data = NULL;
1655 		write_unlock_bh(&con->sock->sk->sk_callback_lock);
1656 	}
1657 	if (con->othercon && and_other)
1658 		_stop_conn(con->othercon, false);
1659 	mutex_unlock(&con->sock_mutex);
1660 }
1661 
1662 static void stop_conn(struct connection *con)
1663 {
1664 	_stop_conn(con, true);
1665 }
1666 
1667 static void free_conn(struct connection *con)
1668 {
1669 	close_connection(con, true, true, true);
1670 	if (con->othercon)
1671 		kmem_cache_free(con_cache, con->othercon);
1672 	hlist_del(&con->list);
1673 	kmem_cache_free(con_cache, con);
1674 }
1675 
1676 static void work_flush(void)
1677 {
1678 	int ok;
1679 	int i;
1680 	struct hlist_node *n;
1681 	struct connection *con;
1682 
1683 	flush_workqueue(recv_workqueue);
1684 	flush_workqueue(send_workqueue);
1685 	do {
1686 		ok = 1;
1687 		foreach_conn(stop_conn);
1688 		flush_workqueue(recv_workqueue);
1689 		flush_workqueue(send_workqueue);
1690 		for (i = 0; i < CONN_HASH_SIZE && ok; i++) {
1691 			hlist_for_each_entry_safe(con, n,
1692 						  &connection_hash[i], list) {
1693 				ok &= test_bit(CF_READ_PENDING, &con->flags);
1694 				ok &= test_bit(CF_WRITE_PENDING, &con->flags);
1695 				if (con->othercon) {
1696 					ok &= test_bit(CF_READ_PENDING,
1697 						       &con->othercon->flags);
1698 					ok &= test_bit(CF_WRITE_PENDING,
1699 						       &con->othercon->flags);
1700 				}
1701 			}
1702 		}
1703 	} while (!ok);
1704 }
1705 
1706 void dlm_lowcomms_stop(void)
1707 {
1708 	/* Set all the flags to prevent any
1709 	   socket activity.
1710 	*/
1711 	mutex_lock(&connections_lock);
1712 	dlm_allow_conn = 0;
1713 	mutex_unlock(&connections_lock);
1714 	work_flush();
1715 	clean_writequeues();
1716 	foreach_conn(free_conn);
1717 	work_stop();
1718 
1719 	kmem_cache_destroy(con_cache);
1720 }
1721 
1722 int dlm_lowcomms_start(void)
1723 {
1724 	int error = -EINVAL;
1725 	struct connection *con;
1726 	int i;
1727 
1728 	for (i = 0; i < CONN_HASH_SIZE; i++)
1729 		INIT_HLIST_HEAD(&connection_hash[i]);
1730 
1731 	init_local();
1732 	if (!dlm_local_count) {
1733 		error = -ENOTCONN;
1734 		log_print("no local IP address has been set");
1735 		goto fail;
1736 	}
1737 
1738 	error = -ENOMEM;
1739 	con_cache = kmem_cache_create("dlm_conn", sizeof(struct connection),
1740 				      __alignof__(struct connection), 0,
1741 				      NULL);
1742 	if (!con_cache)
1743 		goto fail;
1744 
1745 	error = work_start();
1746 	if (error)
1747 		goto fail_destroy;
1748 
1749 	dlm_allow_conn = 1;
1750 
1751 	/* Start listening */
1752 	if (dlm_config.ci_protocol == 0)
1753 		error = tcp_listen_for_all();
1754 	else
1755 		error = sctp_listen_for_all();
1756 	if (error)
1757 		goto fail_unlisten;
1758 
1759 	return 0;
1760 
1761 fail_unlisten:
1762 	dlm_allow_conn = 0;
1763 	con = nodeid2con(0,0);
1764 	if (con) {
1765 		close_connection(con, false, true, true);
1766 		kmem_cache_free(con_cache, con);
1767 	}
1768 fail_destroy:
1769 	kmem_cache_destroy(con_cache);
1770 fail:
1771 	return error;
1772 }
1773 
1774 void dlm_lowcomms_exit(void)
1775 {
1776 	struct dlm_node_addr *na, *safe;
1777 
1778 	spin_lock(&dlm_node_addrs_spin);
1779 	list_for_each_entry_safe(na, safe, &dlm_node_addrs, list) {
1780 		list_del(&na->list);
1781 		while (na->addr_count--)
1782 			kfree(na->addr[na->addr_count]);
1783 		kfree(na);
1784 	}
1785 	spin_unlock(&dlm_node_addrs_spin);
1786 }
1787