xref: /linux/fs/ocfs2/cluster/tcp.c (revision 64fc2a947a9873700929ec0ef02b4654a04e0476)
1 /* -*- mode: c; c-basic-offset: 8; -*-
2  *
3  * vim: noexpandtab sw=8 ts=8 sts=0:
4  *
5  * Copyright (C) 2004 Oracle.  All rights reserved.
6  *
7  * This program is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU General Public
9  * License as published by the Free Software Foundation; either
10  * version 2 of the License, or (at your option) any later version.
11  *
12  * This program is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * General Public License for more details.
16  *
17  * You should have received a copy of the GNU General Public
18  * License along with this program; if not, write to the
19  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
20  * Boston, MA 021110-1307, USA.
21  *
22  * ----
23  *
24  * Callers for this were originally written against a very simple synchronus
25  * API.  This implementation reflects those simple callers.  Some day I'm sure
26  * we'll need to move to a more robust posting/callback mechanism.
27  *
28  * Transmit calls pass in kernel virtual addresses and block copying this into
29  * the socket's tx buffers via a usual blocking sendmsg.  They'll block waiting
30  * for a failed socket to timeout.  TX callers can also pass in a poniter to an
31  * 'int' which gets filled with an errno off the wire in response to the
32  * message they send.
33  *
34  * Handlers for unsolicited messages are registered.  Each socket has a page
35  * that incoming data is copied into.  First the header, then the data.
36  * Handlers are called from only one thread with a reference to this per-socket
37  * page.  This page is destroyed after the handler call, so it can't be
38  * referenced beyond the call.  Handlers may block but are discouraged from
39  * doing so.
40  *
41  * Any framing errors (bad magic, large payload lengths) close a connection.
42  *
43  * Our sock_container holds the state we associate with a socket.  It's current
44  * framing state is held there as well as the refcounting we do around when it
45  * is safe to tear down the socket.  The socket is only finally torn down from
46  * the container when the container loses all of its references -- so as long
47  * as you hold a ref on the container you can trust that the socket is valid
48  * for use with kernel socket APIs.
49  *
50  * Connections are initiated between a pair of nodes when the node with the
51  * higher node number gets a heartbeat callback which indicates that the lower
52  * numbered node has started heartbeating.  The lower numbered node is passive
53  * and only accepts the connection if the higher numbered node is heartbeating.
54  */
55 
56 #include <linux/kernel.h>
57 #include <linux/jiffies.h>
58 #include <linux/slab.h>
59 #include <linux/idr.h>
60 #include <linux/kref.h>
61 #include <linux/net.h>
62 #include <linux/export.h>
63 #include <net/tcp.h>
64 
65 #include <linux/uaccess.h>
66 
67 #include "heartbeat.h"
68 #include "tcp.h"
69 #include "nodemanager.h"
70 #define MLOG_MASK_PREFIX ML_TCP
71 #include "masklog.h"
72 #include "quorum.h"
73 
74 #include "tcp_internal.h"
75 
76 #define SC_NODEF_FMT "node %s (num %u) at %pI4:%u"
77 #define SC_NODEF_ARGS(sc) sc->sc_node->nd_name, sc->sc_node->nd_num,	\
78 			  &sc->sc_node->nd_ipv4_address,		\
79 			  ntohs(sc->sc_node->nd_ipv4_port)
80 
81 /*
82  * In the following two log macros, the whitespace after the ',' just
83  * before ##args is intentional. Otherwise, gcc 2.95 will eat the
84  * previous token if args expands to nothing.
85  */
86 #define msglog(hdr, fmt, args...) do {					\
87 	typeof(hdr) __hdr = (hdr);					\
88 	mlog(ML_MSG, "[mag %u len %u typ %u stat %d sys_stat %d "	\
89 	     "key %08x num %u] " fmt,					\
90 	     be16_to_cpu(__hdr->magic), be16_to_cpu(__hdr->data_len), 	\
91 	     be16_to_cpu(__hdr->msg_type), be32_to_cpu(__hdr->status),	\
92 	     be32_to_cpu(__hdr->sys_status), be32_to_cpu(__hdr->key),	\
93 	     be32_to_cpu(__hdr->msg_num) ,  ##args);			\
94 } while (0)
95 
96 #define sclog(sc, fmt, args...) do {					\
97 	typeof(sc) __sc = (sc);						\
98 	mlog(ML_SOCKET, "[sc %p refs %d sock %p node %u page %p "	\
99 	     "pg_off %zu] " fmt, __sc,					\
100 	     atomic_read(&__sc->sc_kref.refcount), __sc->sc_sock,	\
101 	    __sc->sc_node->nd_num, __sc->sc_page, __sc->sc_page_off ,	\
102 	    ##args);							\
103 } while (0)
104 
105 static DEFINE_RWLOCK(o2net_handler_lock);
106 static struct rb_root o2net_handler_tree = RB_ROOT;
107 
108 static struct o2net_node o2net_nodes[O2NM_MAX_NODES];
109 
110 /* XXX someday we'll need better accounting */
111 static struct socket *o2net_listen_sock;
112 
113 /*
114  * listen work is only queued by the listening socket callbacks on the
115  * o2net_wq.  teardown detaches the callbacks before destroying the workqueue.
116  * quorum work is queued as sock containers are shutdown.. stop_listening
117  * tears down all the node's sock containers, preventing future shutdowns
118  * and queued quroum work, before canceling delayed quorum work and
119  * destroying the work queue.
120  */
121 static struct workqueue_struct *o2net_wq;
122 static struct work_struct o2net_listen_work;
123 
124 static struct o2hb_callback_func o2net_hb_up, o2net_hb_down;
125 #define O2NET_HB_PRI 0x1
126 
127 static struct o2net_handshake *o2net_hand;
128 static struct o2net_msg *o2net_keep_req, *o2net_keep_resp;
129 
130 static int o2net_sys_err_translations[O2NET_ERR_MAX] =
131 		{[O2NET_ERR_NONE]	= 0,
132 		 [O2NET_ERR_NO_HNDLR]	= -ENOPROTOOPT,
133 		 [O2NET_ERR_OVERFLOW]	= -EOVERFLOW,
134 		 [O2NET_ERR_DIED]	= -EHOSTDOWN,};
135 
136 /* can't quite avoid *all* internal declarations :/ */
137 static void o2net_sc_connect_completed(struct work_struct *work);
138 static void o2net_rx_until_empty(struct work_struct *work);
139 static void o2net_shutdown_sc(struct work_struct *work);
140 static void o2net_listen_data_ready(struct sock *sk);
141 static void o2net_sc_send_keep_req(struct work_struct *work);
142 static void o2net_idle_timer(unsigned long data);
143 static void o2net_sc_postpone_idle(struct o2net_sock_container *sc);
144 static void o2net_sc_reset_idle_timer(struct o2net_sock_container *sc);
145 
146 #ifdef CONFIG_DEBUG_FS
147 static void o2net_init_nst(struct o2net_send_tracking *nst, u32 msgtype,
148 			   u32 msgkey, struct task_struct *task, u8 node)
149 {
150 	INIT_LIST_HEAD(&nst->st_net_debug_item);
151 	nst->st_task = task;
152 	nst->st_msg_type = msgtype;
153 	nst->st_msg_key = msgkey;
154 	nst->st_node = node;
155 }
156 
157 static inline void o2net_set_nst_sock_time(struct o2net_send_tracking *nst)
158 {
159 	nst->st_sock_time = ktime_get();
160 }
161 
162 static inline void o2net_set_nst_send_time(struct o2net_send_tracking *nst)
163 {
164 	nst->st_send_time = ktime_get();
165 }
166 
167 static inline void o2net_set_nst_status_time(struct o2net_send_tracking *nst)
168 {
169 	nst->st_status_time = ktime_get();
170 }
171 
172 static inline void o2net_set_nst_sock_container(struct o2net_send_tracking *nst,
173 						struct o2net_sock_container *sc)
174 {
175 	nst->st_sc = sc;
176 }
177 
178 static inline void o2net_set_nst_msg_id(struct o2net_send_tracking *nst,
179 					u32 msg_id)
180 {
181 	nst->st_id = msg_id;
182 }
183 
184 static inline void o2net_set_sock_timer(struct o2net_sock_container *sc)
185 {
186 	sc->sc_tv_timer = ktime_get();
187 }
188 
189 static inline void o2net_set_data_ready_time(struct o2net_sock_container *sc)
190 {
191 	sc->sc_tv_data_ready = ktime_get();
192 }
193 
194 static inline void o2net_set_advance_start_time(struct o2net_sock_container *sc)
195 {
196 	sc->sc_tv_advance_start = ktime_get();
197 }
198 
199 static inline void o2net_set_advance_stop_time(struct o2net_sock_container *sc)
200 {
201 	sc->sc_tv_advance_stop = ktime_get();
202 }
203 
204 static inline void o2net_set_func_start_time(struct o2net_sock_container *sc)
205 {
206 	sc->sc_tv_func_start = ktime_get();
207 }
208 
209 static inline void o2net_set_func_stop_time(struct o2net_sock_container *sc)
210 {
211 	sc->sc_tv_func_stop = ktime_get();
212 }
213 
214 #else  /* CONFIG_DEBUG_FS */
215 # define o2net_init_nst(a, b, c, d, e)
216 # define o2net_set_nst_sock_time(a)
217 # define o2net_set_nst_send_time(a)
218 # define o2net_set_nst_status_time(a)
219 # define o2net_set_nst_sock_container(a, b)
220 # define o2net_set_nst_msg_id(a, b)
221 # define o2net_set_sock_timer(a)
222 # define o2net_set_data_ready_time(a)
223 # define o2net_set_advance_start_time(a)
224 # define o2net_set_advance_stop_time(a)
225 # define o2net_set_func_start_time(a)
226 # define o2net_set_func_stop_time(a)
227 #endif /* CONFIG_DEBUG_FS */
228 
229 #ifdef CONFIG_OCFS2_FS_STATS
230 static ktime_t o2net_get_func_run_time(struct o2net_sock_container *sc)
231 {
232 	return ktime_sub(sc->sc_tv_func_stop, sc->sc_tv_func_start);
233 }
234 
235 static void o2net_update_send_stats(struct o2net_send_tracking *nst,
236 				    struct o2net_sock_container *sc)
237 {
238 	sc->sc_tv_status_total = ktime_add(sc->sc_tv_status_total,
239 					   ktime_sub(ktime_get(),
240 						     nst->st_status_time));
241 	sc->sc_tv_send_total = ktime_add(sc->sc_tv_send_total,
242 					 ktime_sub(nst->st_status_time,
243 						   nst->st_send_time));
244 	sc->sc_tv_acquiry_total = ktime_add(sc->sc_tv_acquiry_total,
245 					    ktime_sub(nst->st_send_time,
246 						      nst->st_sock_time));
247 	sc->sc_send_count++;
248 }
249 
250 static void o2net_update_recv_stats(struct o2net_sock_container *sc)
251 {
252 	sc->sc_tv_process_total = ktime_add(sc->sc_tv_process_total,
253 					    o2net_get_func_run_time(sc));
254 	sc->sc_recv_count++;
255 }
256 
257 #else
258 
259 # define o2net_update_send_stats(a, b)
260 
261 # define o2net_update_recv_stats(sc)
262 
263 #endif /* CONFIG_OCFS2_FS_STATS */
264 
265 static inline unsigned int o2net_reconnect_delay(void)
266 {
267 	return o2nm_single_cluster->cl_reconnect_delay_ms;
268 }
269 
270 static inline unsigned int o2net_keepalive_delay(void)
271 {
272 	return o2nm_single_cluster->cl_keepalive_delay_ms;
273 }
274 
275 static inline unsigned int o2net_idle_timeout(void)
276 {
277 	return o2nm_single_cluster->cl_idle_timeout_ms;
278 }
279 
280 static inline int o2net_sys_err_to_errno(enum o2net_system_error err)
281 {
282 	int trans;
283 	BUG_ON(err >= O2NET_ERR_MAX);
284 	trans = o2net_sys_err_translations[err];
285 
286 	/* Just in case we mess up the translation table above */
287 	BUG_ON(err != O2NET_ERR_NONE && trans == 0);
288 	return trans;
289 }
290 
291 static struct o2net_node * o2net_nn_from_num(u8 node_num)
292 {
293 	BUG_ON(node_num >= ARRAY_SIZE(o2net_nodes));
294 	return &o2net_nodes[node_num];
295 }
296 
297 static u8 o2net_num_from_nn(struct o2net_node *nn)
298 {
299 	BUG_ON(nn == NULL);
300 	return nn - o2net_nodes;
301 }
302 
303 /* ------------------------------------------------------------ */
304 
305 static int o2net_prep_nsw(struct o2net_node *nn, struct o2net_status_wait *nsw)
306 {
307 	int ret;
308 
309 	spin_lock(&nn->nn_lock);
310 	ret = idr_alloc(&nn->nn_status_idr, nsw, 0, 0, GFP_ATOMIC);
311 	if (ret >= 0) {
312 		nsw->ns_id = ret;
313 		list_add_tail(&nsw->ns_node_item, &nn->nn_status_list);
314 	}
315 	spin_unlock(&nn->nn_lock);
316 	if (ret < 0)
317 		return ret;
318 
319 	init_waitqueue_head(&nsw->ns_wq);
320 	nsw->ns_sys_status = O2NET_ERR_NONE;
321 	nsw->ns_status = 0;
322 	return 0;
323 }
324 
325 static void o2net_complete_nsw_locked(struct o2net_node *nn,
326 				      struct o2net_status_wait *nsw,
327 				      enum o2net_system_error sys_status,
328 				      s32 status)
329 {
330 	assert_spin_locked(&nn->nn_lock);
331 
332 	if (!list_empty(&nsw->ns_node_item)) {
333 		list_del_init(&nsw->ns_node_item);
334 		nsw->ns_sys_status = sys_status;
335 		nsw->ns_status = status;
336 		idr_remove(&nn->nn_status_idr, nsw->ns_id);
337 		wake_up(&nsw->ns_wq);
338 	}
339 }
340 
341 static void o2net_complete_nsw(struct o2net_node *nn,
342 			       struct o2net_status_wait *nsw,
343 			       u64 id, enum o2net_system_error sys_status,
344 			       s32 status)
345 {
346 	spin_lock(&nn->nn_lock);
347 	if (nsw == NULL) {
348 		if (id > INT_MAX)
349 			goto out;
350 
351 		nsw = idr_find(&nn->nn_status_idr, id);
352 		if (nsw == NULL)
353 			goto out;
354 	}
355 
356 	o2net_complete_nsw_locked(nn, nsw, sys_status, status);
357 
358 out:
359 	spin_unlock(&nn->nn_lock);
360 	return;
361 }
362 
363 static void o2net_complete_nodes_nsw(struct o2net_node *nn)
364 {
365 	struct o2net_status_wait *nsw, *tmp;
366 	unsigned int num_kills = 0;
367 
368 	assert_spin_locked(&nn->nn_lock);
369 
370 	list_for_each_entry_safe(nsw, tmp, &nn->nn_status_list, ns_node_item) {
371 		o2net_complete_nsw_locked(nn, nsw, O2NET_ERR_DIED, 0);
372 		num_kills++;
373 	}
374 
375 	mlog(0, "completed %d messages for node %u\n", num_kills,
376 	     o2net_num_from_nn(nn));
377 }
378 
379 static int o2net_nsw_completed(struct o2net_node *nn,
380 			       struct o2net_status_wait *nsw)
381 {
382 	int completed;
383 	spin_lock(&nn->nn_lock);
384 	completed = list_empty(&nsw->ns_node_item);
385 	spin_unlock(&nn->nn_lock);
386 	return completed;
387 }
388 
389 /* ------------------------------------------------------------ */
390 
391 static void sc_kref_release(struct kref *kref)
392 {
393 	struct o2net_sock_container *sc = container_of(kref,
394 					struct o2net_sock_container, sc_kref);
395 	BUG_ON(timer_pending(&sc->sc_idle_timeout));
396 
397 	sclog(sc, "releasing\n");
398 
399 	if (sc->sc_sock) {
400 		sock_release(sc->sc_sock);
401 		sc->sc_sock = NULL;
402 	}
403 
404 	o2nm_undepend_item(&sc->sc_node->nd_item);
405 	o2nm_node_put(sc->sc_node);
406 	sc->sc_node = NULL;
407 
408 	o2net_debug_del_sc(sc);
409 
410 	if (sc->sc_page)
411 		__free_page(sc->sc_page);
412 	kfree(sc);
413 }
414 
415 static void sc_put(struct o2net_sock_container *sc)
416 {
417 	sclog(sc, "put\n");
418 	kref_put(&sc->sc_kref, sc_kref_release);
419 }
420 static void sc_get(struct o2net_sock_container *sc)
421 {
422 	sclog(sc, "get\n");
423 	kref_get(&sc->sc_kref);
424 }
425 static struct o2net_sock_container *sc_alloc(struct o2nm_node *node)
426 {
427 	struct o2net_sock_container *sc, *ret = NULL;
428 	struct page *page = NULL;
429 	int status = 0;
430 
431 	page = alloc_page(GFP_NOFS);
432 	sc = kzalloc(sizeof(*sc), GFP_NOFS);
433 	if (sc == NULL || page == NULL)
434 		goto out;
435 
436 	kref_init(&sc->sc_kref);
437 	o2nm_node_get(node);
438 	sc->sc_node = node;
439 
440 	/* pin the node item of the remote node */
441 	status = o2nm_depend_item(&node->nd_item);
442 	if (status) {
443 		mlog_errno(status);
444 		o2nm_node_put(node);
445 		goto out;
446 	}
447 	INIT_WORK(&sc->sc_connect_work, o2net_sc_connect_completed);
448 	INIT_WORK(&sc->sc_rx_work, o2net_rx_until_empty);
449 	INIT_WORK(&sc->sc_shutdown_work, o2net_shutdown_sc);
450 	INIT_DELAYED_WORK(&sc->sc_keepalive_work, o2net_sc_send_keep_req);
451 
452 	init_timer(&sc->sc_idle_timeout);
453 	sc->sc_idle_timeout.function = o2net_idle_timer;
454 	sc->sc_idle_timeout.data = (unsigned long)sc;
455 
456 	sclog(sc, "alloced\n");
457 
458 	ret = sc;
459 	sc->sc_page = page;
460 	o2net_debug_add_sc(sc);
461 	sc = NULL;
462 	page = NULL;
463 
464 out:
465 	if (page)
466 		__free_page(page);
467 	kfree(sc);
468 
469 	return ret;
470 }
471 
472 /* ------------------------------------------------------------ */
473 
474 static void o2net_sc_queue_work(struct o2net_sock_container *sc,
475 				struct work_struct *work)
476 {
477 	sc_get(sc);
478 	if (!queue_work(o2net_wq, work))
479 		sc_put(sc);
480 }
481 static void o2net_sc_queue_delayed_work(struct o2net_sock_container *sc,
482 					struct delayed_work *work,
483 					int delay)
484 {
485 	sc_get(sc);
486 	if (!queue_delayed_work(o2net_wq, work, delay))
487 		sc_put(sc);
488 }
489 static void o2net_sc_cancel_delayed_work(struct o2net_sock_container *sc,
490 					 struct delayed_work *work)
491 {
492 	if (cancel_delayed_work(work))
493 		sc_put(sc);
494 }
495 
496 static atomic_t o2net_connected_peers = ATOMIC_INIT(0);
497 
498 int o2net_num_connected_peers(void)
499 {
500 	return atomic_read(&o2net_connected_peers);
501 }
502 
503 static void o2net_set_nn_state(struct o2net_node *nn,
504 			       struct o2net_sock_container *sc,
505 			       unsigned valid, int err)
506 {
507 	int was_valid = nn->nn_sc_valid;
508 	int was_err = nn->nn_persistent_error;
509 	struct o2net_sock_container *old_sc = nn->nn_sc;
510 
511 	assert_spin_locked(&nn->nn_lock);
512 
513 	if (old_sc && !sc)
514 		atomic_dec(&o2net_connected_peers);
515 	else if (!old_sc && sc)
516 		atomic_inc(&o2net_connected_peers);
517 
518 	/* the node num comparison and single connect/accept path should stop
519 	 * an non-null sc from being overwritten with another */
520 	BUG_ON(sc && nn->nn_sc && nn->nn_sc != sc);
521 	mlog_bug_on_msg(err && valid, "err %d valid %u\n", err, valid);
522 	mlog_bug_on_msg(valid && !sc, "valid %u sc %p\n", valid, sc);
523 
524 	if (was_valid && !valid && err == 0)
525 		err = -ENOTCONN;
526 
527 	mlog(ML_CONN, "node %u sc: %p -> %p, valid %u -> %u, err %d -> %d\n",
528 	     o2net_num_from_nn(nn), nn->nn_sc, sc, nn->nn_sc_valid, valid,
529 	     nn->nn_persistent_error, err);
530 
531 	nn->nn_sc = sc;
532 	nn->nn_sc_valid = valid ? 1 : 0;
533 	nn->nn_persistent_error = err;
534 
535 	/* mirrors o2net_tx_can_proceed() */
536 	if (nn->nn_persistent_error || nn->nn_sc_valid)
537 		wake_up(&nn->nn_sc_wq);
538 
539 	if (was_valid && !was_err && nn->nn_persistent_error) {
540 		o2quo_conn_err(o2net_num_from_nn(nn));
541 		queue_delayed_work(o2net_wq, &nn->nn_still_up,
542 				   msecs_to_jiffies(O2NET_QUORUM_DELAY_MS));
543 	}
544 
545 	if (was_valid && !valid) {
546 		if (old_sc)
547 			printk(KERN_NOTICE "o2net: No longer connected to "
548 				SC_NODEF_FMT "\n", SC_NODEF_ARGS(old_sc));
549 		o2net_complete_nodes_nsw(nn);
550 	}
551 
552 	if (!was_valid && valid) {
553 		o2quo_conn_up(o2net_num_from_nn(nn));
554 		cancel_delayed_work(&nn->nn_connect_expired);
555 		printk(KERN_NOTICE "o2net: %s " SC_NODEF_FMT "\n",
556 		       o2nm_this_node() > sc->sc_node->nd_num ?
557 		       "Connected to" : "Accepted connection from",
558 		       SC_NODEF_ARGS(sc));
559 	}
560 
561 	/* trigger the connecting worker func as long as we're not valid,
562 	 * it will back off if it shouldn't connect.  This can be called
563 	 * from node config teardown and so needs to be careful about
564 	 * the work queue actually being up. */
565 	if (!valid && o2net_wq) {
566 		unsigned long delay;
567 		/* delay if we're within a RECONNECT_DELAY of the
568 		 * last attempt */
569 		delay = (nn->nn_last_connect_attempt +
570 			 msecs_to_jiffies(o2net_reconnect_delay()))
571 			- jiffies;
572 		if (delay > msecs_to_jiffies(o2net_reconnect_delay()))
573 			delay = 0;
574 		mlog(ML_CONN, "queueing conn attempt in %lu jiffies\n", delay);
575 		queue_delayed_work(o2net_wq, &nn->nn_connect_work, delay);
576 
577 		/*
578 		 * Delay the expired work after idle timeout.
579 		 *
580 		 * We might have lots of failed connection attempts that run
581 		 * through here but we only cancel the connect_expired work when
582 		 * a connection attempt succeeds.  So only the first enqueue of
583 		 * the connect_expired work will do anything.  The rest will see
584 		 * that it's already queued and do nothing.
585 		 */
586 		delay += msecs_to_jiffies(o2net_idle_timeout());
587 		queue_delayed_work(o2net_wq, &nn->nn_connect_expired, delay);
588 	}
589 
590 	/* keep track of the nn's sc ref for the caller */
591 	if ((old_sc == NULL) && sc)
592 		sc_get(sc);
593 	if (old_sc && (old_sc != sc)) {
594 		o2net_sc_queue_work(old_sc, &old_sc->sc_shutdown_work);
595 		sc_put(old_sc);
596 	}
597 }
598 
599 /* see o2net_register_callbacks() */
600 static void o2net_data_ready(struct sock *sk)
601 {
602 	void (*ready)(struct sock *sk);
603 	struct o2net_sock_container *sc;
604 
605 	read_lock_bh(&sk->sk_callback_lock);
606 	sc = sk->sk_user_data;
607 	if (sc) {
608 		sclog(sc, "data_ready hit\n");
609 		o2net_set_data_ready_time(sc);
610 		o2net_sc_queue_work(sc, &sc->sc_rx_work);
611 		ready = sc->sc_data_ready;
612 	} else {
613 		ready = sk->sk_data_ready;
614 	}
615 	read_unlock_bh(&sk->sk_callback_lock);
616 
617 	ready(sk);
618 }
619 
620 /* see o2net_register_callbacks() */
621 static void o2net_state_change(struct sock *sk)
622 {
623 	void (*state_change)(struct sock *sk);
624 	struct o2net_sock_container *sc;
625 
626 	read_lock_bh(&sk->sk_callback_lock);
627 	sc = sk->sk_user_data;
628 	if (sc == NULL) {
629 		state_change = sk->sk_state_change;
630 		goto out;
631 	}
632 
633 	sclog(sc, "state_change to %d\n", sk->sk_state);
634 
635 	state_change = sc->sc_state_change;
636 
637 	switch(sk->sk_state) {
638 	/* ignore connecting sockets as they make progress */
639 	case TCP_SYN_SENT:
640 	case TCP_SYN_RECV:
641 		break;
642 	case TCP_ESTABLISHED:
643 		o2net_sc_queue_work(sc, &sc->sc_connect_work);
644 		break;
645 	default:
646 		printk(KERN_INFO "o2net: Connection to " SC_NODEF_FMT
647 			" shutdown, state %d\n",
648 			SC_NODEF_ARGS(sc), sk->sk_state);
649 		o2net_sc_queue_work(sc, &sc->sc_shutdown_work);
650 		break;
651 	}
652 out:
653 	read_unlock_bh(&sk->sk_callback_lock);
654 	state_change(sk);
655 }
656 
657 /*
658  * we register callbacks so we can queue work on events before calling
659  * the original callbacks.  our callbacks our careful to test user_data
660  * to discover when they've reaced with o2net_unregister_callbacks().
661  */
662 static void o2net_register_callbacks(struct sock *sk,
663 				     struct o2net_sock_container *sc)
664 {
665 	write_lock_bh(&sk->sk_callback_lock);
666 
667 	/* accepted sockets inherit the old listen socket data ready */
668 	if (sk->sk_data_ready == o2net_listen_data_ready) {
669 		sk->sk_data_ready = sk->sk_user_data;
670 		sk->sk_user_data = NULL;
671 	}
672 
673 	BUG_ON(sk->sk_user_data != NULL);
674 	sk->sk_user_data = sc;
675 	sc_get(sc);
676 
677 	sc->sc_data_ready = sk->sk_data_ready;
678 	sc->sc_state_change = sk->sk_state_change;
679 	sk->sk_data_ready = o2net_data_ready;
680 	sk->sk_state_change = o2net_state_change;
681 
682 	mutex_init(&sc->sc_send_lock);
683 
684 	write_unlock_bh(&sk->sk_callback_lock);
685 }
686 
687 static int o2net_unregister_callbacks(struct sock *sk,
688 			           struct o2net_sock_container *sc)
689 {
690 	int ret = 0;
691 
692 	write_lock_bh(&sk->sk_callback_lock);
693 	if (sk->sk_user_data == sc) {
694 		ret = 1;
695 		sk->sk_user_data = NULL;
696 		sk->sk_data_ready = sc->sc_data_ready;
697 		sk->sk_state_change = sc->sc_state_change;
698 	}
699 	write_unlock_bh(&sk->sk_callback_lock);
700 
701 	return ret;
702 }
703 
704 /*
705  * this is a little helper that is called by callers who have seen a problem
706  * with an sc and want to detach it from the nn if someone already hasn't beat
707  * them to it.  if an error is given then the shutdown will be persistent
708  * and pending transmits will be canceled.
709  */
710 static void o2net_ensure_shutdown(struct o2net_node *nn,
711 			           struct o2net_sock_container *sc,
712 				   int err)
713 {
714 	spin_lock(&nn->nn_lock);
715 	if (nn->nn_sc == sc)
716 		o2net_set_nn_state(nn, NULL, 0, err);
717 	spin_unlock(&nn->nn_lock);
718 }
719 
720 /*
721  * This work queue function performs the blocking parts of socket shutdown.  A
722  * few paths lead here.  set_nn_state will trigger this callback if it sees an
723  * sc detached from the nn.  state_change will also trigger this callback
724  * directly when it sees errors.  In that case we need to call set_nn_state
725  * ourselves as state_change couldn't get the nn_lock and call set_nn_state
726  * itself.
727  */
728 static void o2net_shutdown_sc(struct work_struct *work)
729 {
730 	struct o2net_sock_container *sc =
731 		container_of(work, struct o2net_sock_container,
732 			     sc_shutdown_work);
733 	struct o2net_node *nn = o2net_nn_from_num(sc->sc_node->nd_num);
734 
735 	sclog(sc, "shutting down\n");
736 
737 	/* drop the callbacks ref and call shutdown only once */
738 	if (o2net_unregister_callbacks(sc->sc_sock->sk, sc)) {
739 		/* we shouldn't flush as we're in the thread, the
740 		 * races with pending sc work structs are harmless */
741 		del_timer_sync(&sc->sc_idle_timeout);
742 		o2net_sc_cancel_delayed_work(sc, &sc->sc_keepalive_work);
743 		sc_put(sc);
744 		kernel_sock_shutdown(sc->sc_sock, SHUT_RDWR);
745 	}
746 
747 	/* not fatal so failed connects before the other guy has our
748 	 * heartbeat can be retried */
749 	o2net_ensure_shutdown(nn, sc, 0);
750 	sc_put(sc);
751 }
752 
753 /* ------------------------------------------------------------ */
754 
755 static int o2net_handler_cmp(struct o2net_msg_handler *nmh, u32 msg_type,
756 			     u32 key)
757 {
758 	int ret = memcmp(&nmh->nh_key, &key, sizeof(key));
759 
760 	if (ret == 0)
761 		ret = memcmp(&nmh->nh_msg_type, &msg_type, sizeof(msg_type));
762 
763 	return ret;
764 }
765 
766 static struct o2net_msg_handler *
767 o2net_handler_tree_lookup(u32 msg_type, u32 key, struct rb_node ***ret_p,
768 			  struct rb_node **ret_parent)
769 {
770 	struct rb_node **p = &o2net_handler_tree.rb_node;
771 	struct rb_node *parent = NULL;
772 	struct o2net_msg_handler *nmh, *ret = NULL;
773 	int cmp;
774 
775 	while (*p) {
776 		parent = *p;
777 		nmh = rb_entry(parent, struct o2net_msg_handler, nh_node);
778 		cmp = o2net_handler_cmp(nmh, msg_type, key);
779 
780 		if (cmp < 0)
781 			p = &(*p)->rb_left;
782 		else if (cmp > 0)
783 			p = &(*p)->rb_right;
784 		else {
785 			ret = nmh;
786 			break;
787 		}
788 	}
789 
790 	if (ret_p != NULL)
791 		*ret_p = p;
792 	if (ret_parent != NULL)
793 		*ret_parent = parent;
794 
795 	return ret;
796 }
797 
798 static void o2net_handler_kref_release(struct kref *kref)
799 {
800 	struct o2net_msg_handler *nmh;
801 	nmh = container_of(kref, struct o2net_msg_handler, nh_kref);
802 
803 	kfree(nmh);
804 }
805 
806 static void o2net_handler_put(struct o2net_msg_handler *nmh)
807 {
808 	kref_put(&nmh->nh_kref, o2net_handler_kref_release);
809 }
810 
811 /* max_len is protection for the handler func.  incoming messages won't
812  * be given to the handler if their payload is longer than the max. */
813 int o2net_register_handler(u32 msg_type, u32 key, u32 max_len,
814 			   o2net_msg_handler_func *func, void *data,
815 			   o2net_post_msg_handler_func *post_func,
816 			   struct list_head *unreg_list)
817 {
818 	struct o2net_msg_handler *nmh = NULL;
819 	struct rb_node **p, *parent;
820 	int ret = 0;
821 
822 	if (max_len > O2NET_MAX_PAYLOAD_BYTES) {
823 		mlog(0, "max_len for message handler out of range: %u\n",
824 			max_len);
825 		ret = -EINVAL;
826 		goto out;
827 	}
828 
829 	if (!msg_type) {
830 		mlog(0, "no message type provided: %u, %p\n", msg_type, func);
831 		ret = -EINVAL;
832 		goto out;
833 
834 	}
835 	if (!func) {
836 		mlog(0, "no message handler provided: %u, %p\n",
837 		       msg_type, func);
838 		ret = -EINVAL;
839 		goto out;
840 	}
841 
842        	nmh = kzalloc(sizeof(struct o2net_msg_handler), GFP_NOFS);
843 	if (nmh == NULL) {
844 		ret = -ENOMEM;
845 		goto out;
846 	}
847 
848 	nmh->nh_func = func;
849 	nmh->nh_func_data = data;
850 	nmh->nh_post_func = post_func;
851 	nmh->nh_msg_type = msg_type;
852 	nmh->nh_max_len = max_len;
853 	nmh->nh_key = key;
854 	/* the tree and list get this ref.. they're both removed in
855 	 * unregister when this ref is dropped */
856 	kref_init(&nmh->nh_kref);
857 	INIT_LIST_HEAD(&nmh->nh_unregister_item);
858 
859 	write_lock(&o2net_handler_lock);
860 	if (o2net_handler_tree_lookup(msg_type, key, &p, &parent))
861 		ret = -EEXIST;
862 	else {
863 	        rb_link_node(&nmh->nh_node, parent, p);
864 		rb_insert_color(&nmh->nh_node, &o2net_handler_tree);
865 		list_add_tail(&nmh->nh_unregister_item, unreg_list);
866 
867 		mlog(ML_TCP, "registered handler func %p type %u key %08x\n",
868 		     func, msg_type, key);
869 		/* we've had some trouble with handlers seemingly vanishing. */
870 		mlog_bug_on_msg(o2net_handler_tree_lookup(msg_type, key, &p,
871 							  &parent) == NULL,
872 			        "couldn't find handler we *just* registered "
873 				"for type %u key %08x\n", msg_type, key);
874 	}
875 	write_unlock(&o2net_handler_lock);
876 	if (ret)
877 		goto out;
878 
879 out:
880 	if (ret)
881 		kfree(nmh);
882 
883 	return ret;
884 }
885 EXPORT_SYMBOL_GPL(o2net_register_handler);
886 
887 void o2net_unregister_handler_list(struct list_head *list)
888 {
889 	struct o2net_msg_handler *nmh, *n;
890 
891 	write_lock(&o2net_handler_lock);
892 	list_for_each_entry_safe(nmh, n, list, nh_unregister_item) {
893 		mlog(ML_TCP, "unregistering handler func %p type %u key %08x\n",
894 		     nmh->nh_func, nmh->nh_msg_type, nmh->nh_key);
895 		rb_erase(&nmh->nh_node, &o2net_handler_tree);
896 		list_del_init(&nmh->nh_unregister_item);
897 		kref_put(&nmh->nh_kref, o2net_handler_kref_release);
898 	}
899 	write_unlock(&o2net_handler_lock);
900 }
901 EXPORT_SYMBOL_GPL(o2net_unregister_handler_list);
902 
903 static struct o2net_msg_handler *o2net_handler_get(u32 msg_type, u32 key)
904 {
905 	struct o2net_msg_handler *nmh;
906 
907 	read_lock(&o2net_handler_lock);
908 	nmh = o2net_handler_tree_lookup(msg_type, key, NULL, NULL);
909 	if (nmh)
910 		kref_get(&nmh->nh_kref);
911 	read_unlock(&o2net_handler_lock);
912 
913 	return nmh;
914 }
915 
916 /* ------------------------------------------------------------ */
917 
918 static int o2net_recv_tcp_msg(struct socket *sock, void *data, size_t len)
919 {
920 	struct kvec vec = { .iov_len = len, .iov_base = data, };
921 	struct msghdr msg = { .msg_flags = MSG_DONTWAIT, };
922 	return kernel_recvmsg(sock, &msg, &vec, 1, len, msg.msg_flags);
923 }
924 
925 static int o2net_send_tcp_msg(struct socket *sock, struct kvec *vec,
926 			      size_t veclen, size_t total)
927 {
928 	int ret;
929 	struct msghdr msg = {.msg_flags = 0,};
930 
931 	if (sock == NULL) {
932 		ret = -EINVAL;
933 		goto out;
934 	}
935 
936 	ret = kernel_sendmsg(sock, &msg, vec, veclen, total);
937 	if (likely(ret == total))
938 		return 0;
939 	mlog(ML_ERROR, "sendmsg returned %d instead of %zu\n", ret, total);
940 	if (ret >= 0)
941 		ret = -EPIPE; /* should be smarter, I bet */
942 out:
943 	mlog(0, "returning error: %d\n", ret);
944 	return ret;
945 }
946 
947 static void o2net_sendpage(struct o2net_sock_container *sc,
948 			   void *kmalloced_virt,
949 			   size_t size)
950 {
951 	struct o2net_node *nn = o2net_nn_from_num(sc->sc_node->nd_num);
952 	ssize_t ret;
953 
954 	while (1) {
955 		mutex_lock(&sc->sc_send_lock);
956 		ret = sc->sc_sock->ops->sendpage(sc->sc_sock,
957 						 virt_to_page(kmalloced_virt),
958 						 (long)kmalloced_virt & ~PAGE_MASK,
959 						 size, MSG_DONTWAIT);
960 		mutex_unlock(&sc->sc_send_lock);
961 		if (ret == size)
962 			break;
963 		if (ret == (ssize_t)-EAGAIN) {
964 			mlog(0, "sendpage of size %zu to " SC_NODEF_FMT
965 			     " returned EAGAIN\n", size, SC_NODEF_ARGS(sc));
966 			cond_resched();
967 			continue;
968 		}
969 		mlog(ML_ERROR, "sendpage of size %zu to " SC_NODEF_FMT
970 		     " failed with %zd\n", size, SC_NODEF_ARGS(sc), ret);
971 		o2net_ensure_shutdown(nn, sc, 0);
972 		break;
973 	}
974 }
975 
976 static void o2net_init_msg(struct o2net_msg *msg, u16 data_len, u16 msg_type, u32 key)
977 {
978 	memset(msg, 0, sizeof(struct o2net_msg));
979 	msg->magic = cpu_to_be16(O2NET_MSG_MAGIC);
980 	msg->data_len = cpu_to_be16(data_len);
981 	msg->msg_type = cpu_to_be16(msg_type);
982 	msg->sys_status = cpu_to_be32(O2NET_ERR_NONE);
983 	msg->status = 0;
984 	msg->key = cpu_to_be32(key);
985 }
986 
987 static int o2net_tx_can_proceed(struct o2net_node *nn,
988 			        struct o2net_sock_container **sc_ret,
989 				int *error)
990 {
991 	int ret = 0;
992 
993 	spin_lock(&nn->nn_lock);
994 	if (nn->nn_persistent_error) {
995 		ret = 1;
996 		*sc_ret = NULL;
997 		*error = nn->nn_persistent_error;
998 	} else if (nn->nn_sc_valid) {
999 		kref_get(&nn->nn_sc->sc_kref);
1000 
1001 		ret = 1;
1002 		*sc_ret = nn->nn_sc;
1003 		*error = 0;
1004 	}
1005 	spin_unlock(&nn->nn_lock);
1006 
1007 	return ret;
1008 }
1009 
1010 /* Get a map of all nodes to which this node is currently connected to */
1011 void o2net_fill_node_map(unsigned long *map, unsigned bytes)
1012 {
1013 	struct o2net_sock_container *sc;
1014 	int node, ret;
1015 
1016 	BUG_ON(bytes < (BITS_TO_LONGS(O2NM_MAX_NODES) * sizeof(unsigned long)));
1017 
1018 	memset(map, 0, bytes);
1019 	for (node = 0; node < O2NM_MAX_NODES; ++node) {
1020 		if (!o2net_tx_can_proceed(o2net_nn_from_num(node), &sc, &ret))
1021 			continue;
1022 		if (!ret) {
1023 			set_bit(node, map);
1024 			sc_put(sc);
1025 		}
1026 	}
1027 }
1028 EXPORT_SYMBOL_GPL(o2net_fill_node_map);
1029 
1030 int o2net_send_message_vec(u32 msg_type, u32 key, struct kvec *caller_vec,
1031 			   size_t caller_veclen, u8 target_node, int *status)
1032 {
1033 	int ret = 0;
1034 	struct o2net_msg *msg = NULL;
1035 	size_t veclen, caller_bytes = 0;
1036 	struct kvec *vec = NULL;
1037 	struct o2net_sock_container *sc = NULL;
1038 	struct o2net_node *nn = o2net_nn_from_num(target_node);
1039 	struct o2net_status_wait nsw = {
1040 		.ns_node_item = LIST_HEAD_INIT(nsw.ns_node_item),
1041 	};
1042 	struct o2net_send_tracking nst;
1043 
1044 	o2net_init_nst(&nst, msg_type, key, current, target_node);
1045 
1046 	if (o2net_wq == NULL) {
1047 		mlog(0, "attempt to tx without o2netd running\n");
1048 		ret = -ESRCH;
1049 		goto out;
1050 	}
1051 
1052 	if (caller_veclen == 0) {
1053 		mlog(0, "bad kvec array length\n");
1054 		ret = -EINVAL;
1055 		goto out;
1056 	}
1057 
1058 	caller_bytes = iov_length((struct iovec *)caller_vec, caller_veclen);
1059 	if (caller_bytes > O2NET_MAX_PAYLOAD_BYTES) {
1060 		mlog(0, "total payload len %zu too large\n", caller_bytes);
1061 		ret = -EINVAL;
1062 		goto out;
1063 	}
1064 
1065 	if (target_node == o2nm_this_node()) {
1066 		ret = -ELOOP;
1067 		goto out;
1068 	}
1069 
1070 	o2net_debug_add_nst(&nst);
1071 
1072 	o2net_set_nst_sock_time(&nst);
1073 
1074 	wait_event(nn->nn_sc_wq, o2net_tx_can_proceed(nn, &sc, &ret));
1075 	if (ret)
1076 		goto out;
1077 
1078 	o2net_set_nst_sock_container(&nst, sc);
1079 
1080 	veclen = caller_veclen + 1;
1081 	vec = kmalloc(sizeof(struct kvec) * veclen, GFP_ATOMIC);
1082 	if (vec == NULL) {
1083 		mlog(0, "failed to %zu element kvec!\n", veclen);
1084 		ret = -ENOMEM;
1085 		goto out;
1086 	}
1087 
1088 	msg = kmalloc(sizeof(struct o2net_msg), GFP_ATOMIC);
1089 	if (!msg) {
1090 		mlog(0, "failed to allocate a o2net_msg!\n");
1091 		ret = -ENOMEM;
1092 		goto out;
1093 	}
1094 
1095 	o2net_init_msg(msg, caller_bytes, msg_type, key);
1096 
1097 	vec[0].iov_len = sizeof(struct o2net_msg);
1098 	vec[0].iov_base = msg;
1099 	memcpy(&vec[1], caller_vec, caller_veclen * sizeof(struct kvec));
1100 
1101 	ret = o2net_prep_nsw(nn, &nsw);
1102 	if (ret)
1103 		goto out;
1104 
1105 	msg->msg_num = cpu_to_be32(nsw.ns_id);
1106 	o2net_set_nst_msg_id(&nst, nsw.ns_id);
1107 
1108 	o2net_set_nst_send_time(&nst);
1109 
1110 	/* finally, convert the message header to network byte-order
1111 	 * and send */
1112 	mutex_lock(&sc->sc_send_lock);
1113 	ret = o2net_send_tcp_msg(sc->sc_sock, vec, veclen,
1114 				 sizeof(struct o2net_msg) + caller_bytes);
1115 	mutex_unlock(&sc->sc_send_lock);
1116 	msglog(msg, "sending returned %d\n", ret);
1117 	if (ret < 0) {
1118 		mlog(0, "error returned from o2net_send_tcp_msg=%d\n", ret);
1119 		goto out;
1120 	}
1121 
1122 	/* wait on other node's handler */
1123 	o2net_set_nst_status_time(&nst);
1124 	wait_event(nsw.ns_wq, o2net_nsw_completed(nn, &nsw));
1125 
1126 	o2net_update_send_stats(&nst, sc);
1127 
1128 	/* Note that we avoid overwriting the callers status return
1129 	 * variable if a system error was reported on the other
1130 	 * side. Callers beware. */
1131 	ret = o2net_sys_err_to_errno(nsw.ns_sys_status);
1132 	if (status && !ret)
1133 		*status = nsw.ns_status;
1134 
1135 	mlog(0, "woken, returning system status %d, user status %d\n",
1136 	     ret, nsw.ns_status);
1137 out:
1138 	o2net_debug_del_nst(&nst); /* must be before dropping sc and node */
1139 	if (sc)
1140 		sc_put(sc);
1141 	kfree(vec);
1142 	kfree(msg);
1143 	o2net_complete_nsw(nn, &nsw, 0, 0, 0);
1144 	return ret;
1145 }
1146 EXPORT_SYMBOL_GPL(o2net_send_message_vec);
1147 
1148 int o2net_send_message(u32 msg_type, u32 key, void *data, u32 len,
1149 		       u8 target_node, int *status)
1150 {
1151 	struct kvec vec = {
1152 		.iov_base = data,
1153 		.iov_len = len,
1154 	};
1155 	return o2net_send_message_vec(msg_type, key, &vec, 1,
1156 				      target_node, status);
1157 }
1158 EXPORT_SYMBOL_GPL(o2net_send_message);
1159 
1160 static int o2net_send_status_magic(struct socket *sock, struct o2net_msg *hdr,
1161 				   enum o2net_system_error syserr, int err)
1162 {
1163 	struct kvec vec = {
1164 		.iov_base = hdr,
1165 		.iov_len = sizeof(struct o2net_msg),
1166 	};
1167 
1168 	BUG_ON(syserr >= O2NET_ERR_MAX);
1169 
1170 	/* leave other fields intact from the incoming message, msg_num
1171 	 * in particular */
1172 	hdr->sys_status = cpu_to_be32(syserr);
1173 	hdr->status = cpu_to_be32(err);
1174 	hdr->magic = cpu_to_be16(O2NET_MSG_STATUS_MAGIC);  // twiddle the magic
1175 	hdr->data_len = 0;
1176 
1177 	msglog(hdr, "about to send status magic %d\n", err);
1178 	/* hdr has been in host byteorder this whole time */
1179 	return o2net_send_tcp_msg(sock, &vec, 1, sizeof(struct o2net_msg));
1180 }
1181 
1182 /* this returns -errno if the header was unknown or too large, etc.
1183  * after this is called the buffer us reused for the next message */
1184 static int o2net_process_message(struct o2net_sock_container *sc,
1185 				 struct o2net_msg *hdr)
1186 {
1187 	struct o2net_node *nn = o2net_nn_from_num(sc->sc_node->nd_num);
1188 	int ret = 0, handler_status;
1189 	enum  o2net_system_error syserr;
1190 	struct o2net_msg_handler *nmh = NULL;
1191 	void *ret_data = NULL;
1192 
1193 	msglog(hdr, "processing message\n");
1194 
1195 	o2net_sc_postpone_idle(sc);
1196 
1197 	switch(be16_to_cpu(hdr->magic)) {
1198 		case O2NET_MSG_STATUS_MAGIC:
1199 			/* special type for returning message status */
1200 			o2net_complete_nsw(nn, NULL,
1201 					   be32_to_cpu(hdr->msg_num),
1202 					   be32_to_cpu(hdr->sys_status),
1203 					   be32_to_cpu(hdr->status));
1204 			goto out;
1205 		case O2NET_MSG_KEEP_REQ_MAGIC:
1206 			o2net_sendpage(sc, o2net_keep_resp,
1207 				       sizeof(*o2net_keep_resp));
1208 			goto out;
1209 		case O2NET_MSG_KEEP_RESP_MAGIC:
1210 			goto out;
1211 		case O2NET_MSG_MAGIC:
1212 			break;
1213 		default:
1214 			msglog(hdr, "bad magic\n");
1215 			ret = -EINVAL;
1216 			goto out;
1217 			break;
1218 	}
1219 
1220 	/* find a handler for it */
1221 	handler_status = 0;
1222 	nmh = o2net_handler_get(be16_to_cpu(hdr->msg_type),
1223 				be32_to_cpu(hdr->key));
1224 	if (!nmh) {
1225 		mlog(ML_TCP, "couldn't find handler for type %u key %08x\n",
1226 		     be16_to_cpu(hdr->msg_type), be32_to_cpu(hdr->key));
1227 		syserr = O2NET_ERR_NO_HNDLR;
1228 		goto out_respond;
1229 	}
1230 
1231 	syserr = O2NET_ERR_NONE;
1232 
1233 	if (be16_to_cpu(hdr->data_len) > nmh->nh_max_len)
1234 		syserr = O2NET_ERR_OVERFLOW;
1235 
1236 	if (syserr != O2NET_ERR_NONE)
1237 		goto out_respond;
1238 
1239 	o2net_set_func_start_time(sc);
1240 	sc->sc_msg_key = be32_to_cpu(hdr->key);
1241 	sc->sc_msg_type = be16_to_cpu(hdr->msg_type);
1242 	handler_status = (nmh->nh_func)(hdr, sizeof(struct o2net_msg) +
1243 					     be16_to_cpu(hdr->data_len),
1244 					nmh->nh_func_data, &ret_data);
1245 	o2net_set_func_stop_time(sc);
1246 
1247 	o2net_update_recv_stats(sc);
1248 
1249 out_respond:
1250 	/* this destroys the hdr, so don't use it after this */
1251 	mutex_lock(&sc->sc_send_lock);
1252 	ret = o2net_send_status_magic(sc->sc_sock, hdr, syserr,
1253 				      handler_status);
1254 	mutex_unlock(&sc->sc_send_lock);
1255 	hdr = NULL;
1256 	mlog(0, "sending handler status %d, syserr %d returned %d\n",
1257 	     handler_status, syserr, ret);
1258 
1259 	if (nmh) {
1260 		BUG_ON(ret_data != NULL && nmh->nh_post_func == NULL);
1261 		if (nmh->nh_post_func)
1262 			(nmh->nh_post_func)(handler_status, nmh->nh_func_data,
1263 					    ret_data);
1264 	}
1265 
1266 out:
1267 	if (nmh)
1268 		o2net_handler_put(nmh);
1269 	return ret;
1270 }
1271 
1272 static int o2net_check_handshake(struct o2net_sock_container *sc)
1273 {
1274 	struct o2net_handshake *hand = page_address(sc->sc_page);
1275 	struct o2net_node *nn = o2net_nn_from_num(sc->sc_node->nd_num);
1276 
1277 	if (hand->protocol_version != cpu_to_be64(O2NET_PROTOCOL_VERSION)) {
1278 		printk(KERN_NOTICE "o2net: " SC_NODEF_FMT " Advertised net "
1279 		       "protocol version %llu but %llu is required. "
1280 		       "Disconnecting.\n", SC_NODEF_ARGS(sc),
1281 		       (unsigned long long)be64_to_cpu(hand->protocol_version),
1282 		       O2NET_PROTOCOL_VERSION);
1283 
1284 		/* don't bother reconnecting if its the wrong version. */
1285 		o2net_ensure_shutdown(nn, sc, -ENOTCONN);
1286 		return -1;
1287 	}
1288 
1289 	/*
1290 	 * Ensure timeouts are consistent with other nodes, otherwise
1291 	 * we can end up with one node thinking that the other must be down,
1292 	 * but isn't. This can ultimately cause corruption.
1293 	 */
1294 	if (be32_to_cpu(hand->o2net_idle_timeout_ms) !=
1295 				o2net_idle_timeout()) {
1296 		printk(KERN_NOTICE "o2net: " SC_NODEF_FMT " uses a network "
1297 		       "idle timeout of %u ms, but we use %u ms locally. "
1298 		       "Disconnecting.\n", SC_NODEF_ARGS(sc),
1299 		       be32_to_cpu(hand->o2net_idle_timeout_ms),
1300 		       o2net_idle_timeout());
1301 		o2net_ensure_shutdown(nn, sc, -ENOTCONN);
1302 		return -1;
1303 	}
1304 
1305 	if (be32_to_cpu(hand->o2net_keepalive_delay_ms) !=
1306 			o2net_keepalive_delay()) {
1307 		printk(KERN_NOTICE "o2net: " SC_NODEF_FMT " uses a keepalive "
1308 		       "delay of %u ms, but we use %u ms locally. "
1309 		       "Disconnecting.\n", SC_NODEF_ARGS(sc),
1310 		       be32_to_cpu(hand->o2net_keepalive_delay_ms),
1311 		       o2net_keepalive_delay());
1312 		o2net_ensure_shutdown(nn, sc, -ENOTCONN);
1313 		return -1;
1314 	}
1315 
1316 	if (be32_to_cpu(hand->o2hb_heartbeat_timeout_ms) !=
1317 			O2HB_MAX_WRITE_TIMEOUT_MS) {
1318 		printk(KERN_NOTICE "o2net: " SC_NODEF_FMT " uses a heartbeat "
1319 		       "timeout of %u ms, but we use %u ms locally. "
1320 		       "Disconnecting.\n", SC_NODEF_ARGS(sc),
1321 		       be32_to_cpu(hand->o2hb_heartbeat_timeout_ms),
1322 		       O2HB_MAX_WRITE_TIMEOUT_MS);
1323 		o2net_ensure_shutdown(nn, sc, -ENOTCONN);
1324 		return -1;
1325 	}
1326 
1327 	sc->sc_handshake_ok = 1;
1328 
1329 	spin_lock(&nn->nn_lock);
1330 	/* set valid and queue the idle timers only if it hasn't been
1331 	 * shut down already */
1332 	if (nn->nn_sc == sc) {
1333 		o2net_sc_reset_idle_timer(sc);
1334 		atomic_set(&nn->nn_timeout, 0);
1335 		o2net_set_nn_state(nn, sc, 1, 0);
1336 	}
1337 	spin_unlock(&nn->nn_lock);
1338 
1339 	/* shift everything up as though it wasn't there */
1340 	sc->sc_page_off -= sizeof(struct o2net_handshake);
1341 	if (sc->sc_page_off)
1342 		memmove(hand, hand + 1, sc->sc_page_off);
1343 
1344 	return 0;
1345 }
1346 
1347 /* this demuxes the queued rx bytes into header or payload bits and calls
1348  * handlers as each full message is read off the socket.  it returns -error,
1349  * == 0 eof, or > 0 for progress made.*/
1350 static int o2net_advance_rx(struct o2net_sock_container *sc)
1351 {
1352 	struct o2net_msg *hdr;
1353 	int ret = 0;
1354 	void *data;
1355 	size_t datalen;
1356 
1357 	sclog(sc, "receiving\n");
1358 	o2net_set_advance_start_time(sc);
1359 
1360 	if (unlikely(sc->sc_handshake_ok == 0)) {
1361 		if(sc->sc_page_off < sizeof(struct o2net_handshake)) {
1362 			data = page_address(sc->sc_page) + sc->sc_page_off;
1363 			datalen = sizeof(struct o2net_handshake) - sc->sc_page_off;
1364 			ret = o2net_recv_tcp_msg(sc->sc_sock, data, datalen);
1365 			if (ret > 0)
1366 				sc->sc_page_off += ret;
1367 		}
1368 
1369 		if (sc->sc_page_off == sizeof(struct o2net_handshake)) {
1370 			o2net_check_handshake(sc);
1371 			if (unlikely(sc->sc_handshake_ok == 0))
1372 				ret = -EPROTO;
1373 		}
1374 		goto out;
1375 	}
1376 
1377 	/* do we need more header? */
1378 	if (sc->sc_page_off < sizeof(struct o2net_msg)) {
1379 		data = page_address(sc->sc_page) + sc->sc_page_off;
1380 		datalen = sizeof(struct o2net_msg) - sc->sc_page_off;
1381 		ret = o2net_recv_tcp_msg(sc->sc_sock, data, datalen);
1382 		if (ret > 0) {
1383 			sc->sc_page_off += ret;
1384 			/* only swab incoming here.. we can
1385 			 * only get here once as we cross from
1386 			 * being under to over */
1387 			if (sc->sc_page_off == sizeof(struct o2net_msg)) {
1388 				hdr = page_address(sc->sc_page);
1389 				if (be16_to_cpu(hdr->data_len) >
1390 				    O2NET_MAX_PAYLOAD_BYTES)
1391 					ret = -EOVERFLOW;
1392 			}
1393 		}
1394 		if (ret <= 0)
1395 			goto out;
1396 	}
1397 
1398 	if (sc->sc_page_off < sizeof(struct o2net_msg)) {
1399 		/* oof, still don't have a header */
1400 		goto out;
1401 	}
1402 
1403 	/* this was swabbed above when we first read it */
1404 	hdr = page_address(sc->sc_page);
1405 
1406 	msglog(hdr, "at page_off %zu\n", sc->sc_page_off);
1407 
1408 	/* do we need more payload? */
1409 	if (sc->sc_page_off - sizeof(struct o2net_msg) < be16_to_cpu(hdr->data_len)) {
1410 		/* need more payload */
1411 		data = page_address(sc->sc_page) + sc->sc_page_off;
1412 		datalen = (sizeof(struct o2net_msg) + be16_to_cpu(hdr->data_len)) -
1413 			  sc->sc_page_off;
1414 		ret = o2net_recv_tcp_msg(sc->sc_sock, data, datalen);
1415 		if (ret > 0)
1416 			sc->sc_page_off += ret;
1417 		if (ret <= 0)
1418 			goto out;
1419 	}
1420 
1421 	if (sc->sc_page_off - sizeof(struct o2net_msg) == be16_to_cpu(hdr->data_len)) {
1422 		/* we can only get here once, the first time we read
1423 		 * the payload.. so set ret to progress if the handler
1424 		 * works out. after calling this the message is toast */
1425 		ret = o2net_process_message(sc, hdr);
1426 		if (ret == 0)
1427 			ret = 1;
1428 		sc->sc_page_off = 0;
1429 	}
1430 
1431 out:
1432 	sclog(sc, "ret = %d\n", ret);
1433 	o2net_set_advance_stop_time(sc);
1434 	return ret;
1435 }
1436 
1437 /* this work func is triggerd by data ready.  it reads until it can read no
1438  * more.  it interprets 0, eof, as fatal.  if data_ready hits while we're doing
1439  * our work the work struct will be marked and we'll be called again. */
1440 static void o2net_rx_until_empty(struct work_struct *work)
1441 {
1442 	struct o2net_sock_container *sc =
1443 		container_of(work, struct o2net_sock_container, sc_rx_work);
1444 	int ret;
1445 
1446 	do {
1447 		ret = o2net_advance_rx(sc);
1448 	} while (ret > 0);
1449 
1450 	if (ret <= 0 && ret != -EAGAIN) {
1451 		struct o2net_node *nn = o2net_nn_from_num(sc->sc_node->nd_num);
1452 		sclog(sc, "saw error %d, closing\n", ret);
1453 		/* not permanent so read failed handshake can retry */
1454 		o2net_ensure_shutdown(nn, sc, 0);
1455 	}
1456 
1457 	sc_put(sc);
1458 }
1459 
1460 static int o2net_set_nodelay(struct socket *sock)
1461 {
1462 	int ret, val = 1;
1463 	mm_segment_t oldfs;
1464 
1465 	oldfs = get_fs();
1466 	set_fs(KERNEL_DS);
1467 
1468 	/*
1469 	 * Dear unsuspecting programmer,
1470 	 *
1471 	 * Don't use sock_setsockopt() for SOL_TCP.  It doesn't check its level
1472 	 * argument and assumes SOL_SOCKET so, say, your TCP_NODELAY will
1473 	 * silently turn into SO_DEBUG.
1474 	 *
1475 	 * Yours,
1476 	 * Keeper of hilariously fragile interfaces.
1477 	 */
1478 	ret = sock->ops->setsockopt(sock, SOL_TCP, TCP_NODELAY,
1479 				    (char __user *)&val, sizeof(val));
1480 
1481 	set_fs(oldfs);
1482 	return ret;
1483 }
1484 
1485 static int o2net_set_usertimeout(struct socket *sock)
1486 {
1487 	int user_timeout = O2NET_TCP_USER_TIMEOUT;
1488 
1489 	return kernel_setsockopt(sock, SOL_TCP, TCP_USER_TIMEOUT,
1490 				(char *)&user_timeout, sizeof(user_timeout));
1491 }
1492 
1493 static void o2net_initialize_handshake(void)
1494 {
1495 	o2net_hand->o2hb_heartbeat_timeout_ms = cpu_to_be32(
1496 		O2HB_MAX_WRITE_TIMEOUT_MS);
1497 	o2net_hand->o2net_idle_timeout_ms = cpu_to_be32(o2net_idle_timeout());
1498 	o2net_hand->o2net_keepalive_delay_ms = cpu_to_be32(
1499 		o2net_keepalive_delay());
1500 	o2net_hand->o2net_reconnect_delay_ms = cpu_to_be32(
1501 		o2net_reconnect_delay());
1502 }
1503 
1504 /* ------------------------------------------------------------ */
1505 
1506 /* called when a connect completes and after a sock is accepted.  the
1507  * rx path will see the response and mark the sc valid */
1508 static void o2net_sc_connect_completed(struct work_struct *work)
1509 {
1510 	struct o2net_sock_container *sc =
1511 		container_of(work, struct o2net_sock_container,
1512 			     sc_connect_work);
1513 
1514 	mlog(ML_MSG, "sc sending handshake with ver %llu id %llx\n",
1515               (unsigned long long)O2NET_PROTOCOL_VERSION,
1516 	      (unsigned long long)be64_to_cpu(o2net_hand->connector_id));
1517 
1518 	o2net_initialize_handshake();
1519 	o2net_sendpage(sc, o2net_hand, sizeof(*o2net_hand));
1520 	sc_put(sc);
1521 }
1522 
1523 /* this is called as a work_struct func. */
1524 static void o2net_sc_send_keep_req(struct work_struct *work)
1525 {
1526 	struct o2net_sock_container *sc =
1527 		container_of(work, struct o2net_sock_container,
1528 			     sc_keepalive_work.work);
1529 
1530 	o2net_sendpage(sc, o2net_keep_req, sizeof(*o2net_keep_req));
1531 	sc_put(sc);
1532 }
1533 
1534 /* socket shutdown does a del_timer_sync against this as it tears down.
1535  * we can't start this timer until we've got to the point in sc buildup
1536  * where shutdown is going to be involved */
1537 static void o2net_idle_timer(unsigned long data)
1538 {
1539 	struct o2net_sock_container *sc = (struct o2net_sock_container *)data;
1540 	struct o2net_node *nn = o2net_nn_from_num(sc->sc_node->nd_num);
1541 #ifdef CONFIG_DEBUG_FS
1542 	unsigned long msecs = ktime_to_ms(ktime_get()) -
1543 		ktime_to_ms(sc->sc_tv_timer);
1544 #else
1545 	unsigned long msecs = o2net_idle_timeout();
1546 #endif
1547 
1548 	printk(KERN_NOTICE "o2net: Connection to " SC_NODEF_FMT " has been "
1549 	       "idle for %lu.%lu secs.\n",
1550 	       SC_NODEF_ARGS(sc), msecs / 1000, msecs % 1000);
1551 
1552 	/* idle timerout happen, don't shutdown the connection, but
1553 	 * make fence decision. Maybe the connection can recover before
1554 	 * the decision is made.
1555 	 */
1556 	atomic_set(&nn->nn_timeout, 1);
1557 	o2quo_conn_err(o2net_num_from_nn(nn));
1558 	queue_delayed_work(o2net_wq, &nn->nn_still_up,
1559 			msecs_to_jiffies(O2NET_QUORUM_DELAY_MS));
1560 
1561 	o2net_sc_reset_idle_timer(sc);
1562 
1563 }
1564 
1565 static void o2net_sc_reset_idle_timer(struct o2net_sock_container *sc)
1566 {
1567 	o2net_sc_cancel_delayed_work(sc, &sc->sc_keepalive_work);
1568 	o2net_sc_queue_delayed_work(sc, &sc->sc_keepalive_work,
1569 		      msecs_to_jiffies(o2net_keepalive_delay()));
1570 	o2net_set_sock_timer(sc);
1571 	mod_timer(&sc->sc_idle_timeout,
1572 	       jiffies + msecs_to_jiffies(o2net_idle_timeout()));
1573 }
1574 
1575 static void o2net_sc_postpone_idle(struct o2net_sock_container *sc)
1576 {
1577 	struct o2net_node *nn = o2net_nn_from_num(sc->sc_node->nd_num);
1578 
1579 	/* clear fence decision since the connection recover from timeout*/
1580 	if (atomic_read(&nn->nn_timeout)) {
1581 		o2quo_conn_up(o2net_num_from_nn(nn));
1582 		cancel_delayed_work(&nn->nn_still_up);
1583 		atomic_set(&nn->nn_timeout, 0);
1584 	}
1585 
1586 	/* Only push out an existing timer */
1587 	if (timer_pending(&sc->sc_idle_timeout))
1588 		o2net_sc_reset_idle_timer(sc);
1589 }
1590 
1591 /* this work func is kicked whenever a path sets the nn state which doesn't
1592  * have valid set.  This includes seeing hb come up, losing a connection,
1593  * having a connect attempt fail, etc. This centralizes the logic which decides
1594  * if a connect attempt should be made or if we should give up and all future
1595  * transmit attempts should fail */
1596 static void o2net_start_connect(struct work_struct *work)
1597 {
1598 	struct o2net_node *nn =
1599 		container_of(work, struct o2net_node, nn_connect_work.work);
1600 	struct o2net_sock_container *sc = NULL;
1601 	struct o2nm_node *node = NULL, *mynode = NULL;
1602 	struct socket *sock = NULL;
1603 	struct sockaddr_in myaddr = {0, }, remoteaddr = {0, };
1604 	int ret = 0, stop;
1605 	unsigned int timeout;
1606 	unsigned int noio_flag;
1607 
1608 	/*
1609 	 * sock_create allocates the sock with GFP_KERNEL. We must set
1610 	 * per-process flag PF_MEMALLOC_NOIO so that all allocations done
1611 	 * by this process are done as if GFP_NOIO was specified. So we
1612 	 * are not reentering filesystem while doing memory reclaim.
1613 	 */
1614 	noio_flag = memalloc_noio_save();
1615 	/* if we're greater we initiate tx, otherwise we accept */
1616 	if (o2nm_this_node() <= o2net_num_from_nn(nn))
1617 		goto out;
1618 
1619 	/* watch for racing with tearing a node down */
1620 	node = o2nm_get_node_by_num(o2net_num_from_nn(nn));
1621 	if (node == NULL)
1622 		goto out;
1623 
1624 	mynode = o2nm_get_node_by_num(o2nm_this_node());
1625 	if (mynode == NULL)
1626 		goto out;
1627 
1628 	spin_lock(&nn->nn_lock);
1629 	/*
1630 	 * see if we already have one pending or have given up.
1631 	 * For nn_timeout, it is set when we close the connection
1632 	 * because of the idle time out. So it means that we have
1633 	 * at least connected to that node successfully once,
1634 	 * now try to connect to it again.
1635 	 */
1636 	timeout = atomic_read(&nn->nn_timeout);
1637 	stop = (nn->nn_sc ||
1638 		(nn->nn_persistent_error &&
1639 		(nn->nn_persistent_error != -ENOTCONN || timeout == 0)));
1640 	spin_unlock(&nn->nn_lock);
1641 	if (stop)
1642 		goto out;
1643 
1644 	nn->nn_last_connect_attempt = jiffies;
1645 
1646 	sc = sc_alloc(node);
1647 	if (sc == NULL) {
1648 		mlog(0, "couldn't allocate sc\n");
1649 		ret = -ENOMEM;
1650 		goto out;
1651 	}
1652 
1653 	ret = sock_create(PF_INET, SOCK_STREAM, IPPROTO_TCP, &sock);
1654 	if (ret < 0) {
1655 		mlog(0, "can't create socket: %d\n", ret);
1656 		goto out;
1657 	}
1658 	sc->sc_sock = sock; /* freed by sc_kref_release */
1659 
1660 	sock->sk->sk_allocation = GFP_ATOMIC;
1661 
1662 	myaddr.sin_family = AF_INET;
1663 	myaddr.sin_addr.s_addr = mynode->nd_ipv4_address;
1664 	myaddr.sin_port = htons(0); /* any port */
1665 
1666 	ret = sock->ops->bind(sock, (struct sockaddr *)&myaddr,
1667 			      sizeof(myaddr));
1668 	if (ret) {
1669 		mlog(ML_ERROR, "bind failed with %d at address %pI4\n",
1670 		     ret, &mynode->nd_ipv4_address);
1671 		goto out;
1672 	}
1673 
1674 	ret = o2net_set_nodelay(sc->sc_sock);
1675 	if (ret) {
1676 		mlog(ML_ERROR, "setting TCP_NODELAY failed with %d\n", ret);
1677 		goto out;
1678 	}
1679 
1680 	ret = o2net_set_usertimeout(sock);
1681 	if (ret) {
1682 		mlog(ML_ERROR, "set TCP_USER_TIMEOUT failed with %d\n", ret);
1683 		goto out;
1684 	}
1685 
1686 	o2net_register_callbacks(sc->sc_sock->sk, sc);
1687 
1688 	spin_lock(&nn->nn_lock);
1689 	/* handshake completion will set nn->nn_sc_valid */
1690 	o2net_set_nn_state(nn, sc, 0, 0);
1691 	spin_unlock(&nn->nn_lock);
1692 
1693 	remoteaddr.sin_family = AF_INET;
1694 	remoteaddr.sin_addr.s_addr = node->nd_ipv4_address;
1695 	remoteaddr.sin_port = node->nd_ipv4_port;
1696 
1697 	ret = sc->sc_sock->ops->connect(sc->sc_sock,
1698 					(struct sockaddr *)&remoteaddr,
1699 					sizeof(remoteaddr),
1700 					O_NONBLOCK);
1701 	if (ret == -EINPROGRESS)
1702 		ret = 0;
1703 
1704 out:
1705 	if (ret && sc) {
1706 		printk(KERN_NOTICE "o2net: Connect attempt to " SC_NODEF_FMT
1707 		       " failed with errno %d\n", SC_NODEF_ARGS(sc), ret);
1708 		/* 0 err so that another will be queued and attempted
1709 		 * from set_nn_state */
1710 		o2net_ensure_shutdown(nn, sc, 0);
1711 	}
1712 	if (sc)
1713 		sc_put(sc);
1714 	if (node)
1715 		o2nm_node_put(node);
1716 	if (mynode)
1717 		o2nm_node_put(mynode);
1718 
1719 	memalloc_noio_restore(noio_flag);
1720 	return;
1721 }
1722 
1723 static void o2net_connect_expired(struct work_struct *work)
1724 {
1725 	struct o2net_node *nn =
1726 		container_of(work, struct o2net_node, nn_connect_expired.work);
1727 
1728 	spin_lock(&nn->nn_lock);
1729 	if (!nn->nn_sc_valid) {
1730 		printk(KERN_NOTICE "o2net: No connection established with "
1731 		       "node %u after %u.%u seconds, check network and"
1732 		       " cluster configuration.\n",
1733 		     o2net_num_from_nn(nn),
1734 		     o2net_idle_timeout() / 1000,
1735 		     o2net_idle_timeout() % 1000);
1736 
1737 		o2net_set_nn_state(nn, NULL, 0, 0);
1738 	}
1739 	spin_unlock(&nn->nn_lock);
1740 }
1741 
1742 static void o2net_still_up(struct work_struct *work)
1743 {
1744 	struct o2net_node *nn =
1745 		container_of(work, struct o2net_node, nn_still_up.work);
1746 
1747 	o2quo_hb_still_up(o2net_num_from_nn(nn));
1748 }
1749 
1750 /* ------------------------------------------------------------ */
1751 
1752 void o2net_disconnect_node(struct o2nm_node *node)
1753 {
1754 	struct o2net_node *nn = o2net_nn_from_num(node->nd_num);
1755 
1756 	/* don't reconnect until it's heartbeating again */
1757 	spin_lock(&nn->nn_lock);
1758 	atomic_set(&nn->nn_timeout, 0);
1759 	o2net_set_nn_state(nn, NULL, 0, -ENOTCONN);
1760 	spin_unlock(&nn->nn_lock);
1761 
1762 	if (o2net_wq) {
1763 		cancel_delayed_work(&nn->nn_connect_expired);
1764 		cancel_delayed_work(&nn->nn_connect_work);
1765 		cancel_delayed_work(&nn->nn_still_up);
1766 		flush_workqueue(o2net_wq);
1767 	}
1768 }
1769 
1770 static void o2net_hb_node_down_cb(struct o2nm_node *node, int node_num,
1771 				  void *data)
1772 {
1773 	o2quo_hb_down(node_num);
1774 
1775 	if (!node)
1776 		return;
1777 
1778 	if (node_num != o2nm_this_node())
1779 		o2net_disconnect_node(node);
1780 
1781 	BUG_ON(atomic_read(&o2net_connected_peers) < 0);
1782 }
1783 
1784 static void o2net_hb_node_up_cb(struct o2nm_node *node, int node_num,
1785 				void *data)
1786 {
1787 	struct o2net_node *nn = o2net_nn_from_num(node_num);
1788 
1789 	o2quo_hb_up(node_num);
1790 
1791 	BUG_ON(!node);
1792 
1793 	/* ensure an immediate connect attempt */
1794 	nn->nn_last_connect_attempt = jiffies -
1795 		(msecs_to_jiffies(o2net_reconnect_delay()) + 1);
1796 
1797 	if (node_num != o2nm_this_node()) {
1798 		/* believe it or not, accept and node hearbeating testing
1799 		 * can succeed for this node before we got here.. so
1800 		 * only use set_nn_state to clear the persistent error
1801 		 * if that hasn't already happened */
1802 		spin_lock(&nn->nn_lock);
1803 		atomic_set(&nn->nn_timeout, 0);
1804 		if (nn->nn_persistent_error)
1805 			o2net_set_nn_state(nn, NULL, 0, 0);
1806 		spin_unlock(&nn->nn_lock);
1807 	}
1808 }
1809 
1810 void o2net_unregister_hb_callbacks(void)
1811 {
1812 	o2hb_unregister_callback(NULL, &o2net_hb_up);
1813 	o2hb_unregister_callback(NULL, &o2net_hb_down);
1814 }
1815 
1816 int o2net_register_hb_callbacks(void)
1817 {
1818 	int ret;
1819 
1820 	o2hb_setup_callback(&o2net_hb_down, O2HB_NODE_DOWN_CB,
1821 			    o2net_hb_node_down_cb, NULL, O2NET_HB_PRI);
1822 	o2hb_setup_callback(&o2net_hb_up, O2HB_NODE_UP_CB,
1823 			    o2net_hb_node_up_cb, NULL, O2NET_HB_PRI);
1824 
1825 	ret = o2hb_register_callback(NULL, &o2net_hb_up);
1826 	if (ret == 0)
1827 		ret = o2hb_register_callback(NULL, &o2net_hb_down);
1828 
1829 	if (ret)
1830 		o2net_unregister_hb_callbacks();
1831 
1832 	return ret;
1833 }
1834 
1835 /* ------------------------------------------------------------ */
1836 
1837 static int o2net_accept_one(struct socket *sock, int *more)
1838 {
1839 	int ret, slen;
1840 	struct sockaddr_in sin;
1841 	struct socket *new_sock = NULL;
1842 	struct o2nm_node *node = NULL;
1843 	struct o2nm_node *local_node = NULL;
1844 	struct o2net_sock_container *sc = NULL;
1845 	struct o2net_node *nn;
1846 	unsigned int noio_flag;
1847 
1848 	/*
1849 	 * sock_create_lite allocates the sock with GFP_KERNEL. We must set
1850 	 * per-process flag PF_MEMALLOC_NOIO so that all allocations done
1851 	 * by this process are done as if GFP_NOIO was specified. So we
1852 	 * are not reentering filesystem while doing memory reclaim.
1853 	 */
1854 	noio_flag = memalloc_noio_save();
1855 
1856 	BUG_ON(sock == NULL);
1857 	*more = 0;
1858 	ret = sock_create_lite(sock->sk->sk_family, sock->sk->sk_type,
1859 			       sock->sk->sk_protocol, &new_sock);
1860 	if (ret)
1861 		goto out;
1862 
1863 	new_sock->type = sock->type;
1864 	new_sock->ops = sock->ops;
1865 	ret = sock->ops->accept(sock, new_sock, O_NONBLOCK);
1866 	if (ret < 0)
1867 		goto out;
1868 
1869 	*more = 1;
1870 	new_sock->sk->sk_allocation = GFP_ATOMIC;
1871 
1872 	ret = o2net_set_nodelay(new_sock);
1873 	if (ret) {
1874 		mlog(ML_ERROR, "setting TCP_NODELAY failed with %d\n", ret);
1875 		goto out;
1876 	}
1877 
1878 	ret = o2net_set_usertimeout(new_sock);
1879 	if (ret) {
1880 		mlog(ML_ERROR, "set TCP_USER_TIMEOUT failed with %d\n", ret);
1881 		goto out;
1882 	}
1883 
1884 	slen = sizeof(sin);
1885 	ret = new_sock->ops->getname(new_sock, (struct sockaddr *) &sin,
1886 				       &slen, 1);
1887 	if (ret < 0)
1888 		goto out;
1889 
1890 	node = o2nm_get_node_by_ip(sin.sin_addr.s_addr);
1891 	if (node == NULL) {
1892 		printk(KERN_NOTICE "o2net: Attempt to connect from unknown "
1893 		       "node at %pI4:%d\n", &sin.sin_addr.s_addr,
1894 		       ntohs(sin.sin_port));
1895 		ret = -EINVAL;
1896 		goto out;
1897 	}
1898 
1899 	if (o2nm_this_node() >= node->nd_num) {
1900 		local_node = o2nm_get_node_by_num(o2nm_this_node());
1901 		if (local_node)
1902 			printk(KERN_NOTICE "o2net: Unexpected connect attempt "
1903 					"seen at node '%s' (%u, %pI4:%d) from "
1904 					"node '%s' (%u, %pI4:%d)\n",
1905 					local_node->nd_name, local_node->nd_num,
1906 					&(local_node->nd_ipv4_address),
1907 					ntohs(local_node->nd_ipv4_port),
1908 					node->nd_name,
1909 					node->nd_num, &sin.sin_addr.s_addr,
1910 					ntohs(sin.sin_port));
1911 		ret = -EINVAL;
1912 		goto out;
1913 	}
1914 
1915 	/* this happens all the time when the other node sees our heartbeat
1916 	 * and tries to connect before we see their heartbeat */
1917 	if (!o2hb_check_node_heartbeating_from_callback(node->nd_num)) {
1918 		mlog(ML_CONN, "attempt to connect from node '%s' at "
1919 		     "%pI4:%d but it isn't heartbeating\n",
1920 		     node->nd_name, &sin.sin_addr.s_addr,
1921 		     ntohs(sin.sin_port));
1922 		ret = -EINVAL;
1923 		goto out;
1924 	}
1925 
1926 	nn = o2net_nn_from_num(node->nd_num);
1927 
1928 	spin_lock(&nn->nn_lock);
1929 	if (nn->nn_sc)
1930 		ret = -EBUSY;
1931 	else
1932 		ret = 0;
1933 	spin_unlock(&nn->nn_lock);
1934 	if (ret) {
1935 		printk(KERN_NOTICE "o2net: Attempt to connect from node '%s' "
1936 		       "at %pI4:%d but it already has an open connection\n",
1937 		       node->nd_name, &sin.sin_addr.s_addr,
1938 		       ntohs(sin.sin_port));
1939 		goto out;
1940 	}
1941 
1942 	sc = sc_alloc(node);
1943 	if (sc == NULL) {
1944 		ret = -ENOMEM;
1945 		goto out;
1946 	}
1947 
1948 	sc->sc_sock = new_sock;
1949 	new_sock = NULL;
1950 
1951 	spin_lock(&nn->nn_lock);
1952 	atomic_set(&nn->nn_timeout, 0);
1953 	o2net_set_nn_state(nn, sc, 0, 0);
1954 	spin_unlock(&nn->nn_lock);
1955 
1956 	o2net_register_callbacks(sc->sc_sock->sk, sc);
1957 	o2net_sc_queue_work(sc, &sc->sc_rx_work);
1958 
1959 	o2net_initialize_handshake();
1960 	o2net_sendpage(sc, o2net_hand, sizeof(*o2net_hand));
1961 
1962 out:
1963 	if (new_sock)
1964 		sock_release(new_sock);
1965 	if (node)
1966 		o2nm_node_put(node);
1967 	if (local_node)
1968 		o2nm_node_put(local_node);
1969 	if (sc)
1970 		sc_put(sc);
1971 
1972 	memalloc_noio_restore(noio_flag);
1973 	return ret;
1974 }
1975 
1976 /*
1977  * This function is invoked in response to one or more
1978  * pending accepts at softIRQ level. We must drain the
1979  * entire que before returning.
1980  */
1981 
1982 static void o2net_accept_many(struct work_struct *work)
1983 {
1984 	struct socket *sock = o2net_listen_sock;
1985 	int	more;
1986 	int	err;
1987 
1988 	/*
1989 	 * It is critical to note that due to interrupt moderation
1990 	 * at the network driver level, we can't assume to get a
1991 	 * softIRQ for every single conn since tcp SYN packets
1992 	 * can arrive back-to-back, and therefore many pending
1993 	 * accepts may result in just 1 softIRQ. If we terminate
1994 	 * the o2net_accept_one() loop upon seeing an err, what happens
1995 	 * to the rest of the conns in the queue? If no new SYN
1996 	 * arrives for hours, no softIRQ  will be delivered,
1997 	 * and the connections will just sit in the queue.
1998 	 */
1999 
2000 	for (;;) {
2001 		err = o2net_accept_one(sock, &more);
2002 		if (!more)
2003 			break;
2004 		cond_resched();
2005 	}
2006 }
2007 
2008 static void o2net_listen_data_ready(struct sock *sk)
2009 {
2010 	void (*ready)(struct sock *sk);
2011 
2012 	read_lock_bh(&sk->sk_callback_lock);
2013 	ready = sk->sk_user_data;
2014 	if (ready == NULL) { /* check for teardown race */
2015 		ready = sk->sk_data_ready;
2016 		goto out;
2017 	}
2018 
2019 	/* This callback may called twice when a new connection
2020 	 * is  being established as a child socket inherits everything
2021 	 * from a parent LISTEN socket, including the data_ready cb of
2022 	 * the parent. This leads to a hazard. In o2net_accept_one()
2023 	 * we are still initializing the child socket but have not
2024 	 * changed the inherited data_ready callback yet when
2025 	 * data starts arriving.
2026 	 * We avoid this hazard by checking the state.
2027 	 * For the listening socket,  the state will be TCP_LISTEN; for the new
2028 	 * socket, will be  TCP_ESTABLISHED. Also, in this case,
2029 	 * sk->sk_user_data is not a valid function pointer.
2030 	 */
2031 
2032 	if (sk->sk_state == TCP_LISTEN) {
2033 		queue_work(o2net_wq, &o2net_listen_work);
2034 	} else {
2035 		ready = NULL;
2036 	}
2037 
2038 out:
2039 	read_unlock_bh(&sk->sk_callback_lock);
2040 	if (ready != NULL)
2041 		ready(sk);
2042 }
2043 
2044 static int o2net_open_listening_sock(__be32 addr, __be16 port)
2045 {
2046 	struct socket *sock = NULL;
2047 	int ret;
2048 	struct sockaddr_in sin = {
2049 		.sin_family = PF_INET,
2050 		.sin_addr = { .s_addr = addr },
2051 		.sin_port = port,
2052 	};
2053 
2054 	ret = sock_create(PF_INET, SOCK_STREAM, IPPROTO_TCP, &sock);
2055 	if (ret < 0) {
2056 		printk(KERN_ERR "o2net: Error %d while creating socket\n", ret);
2057 		goto out;
2058 	}
2059 
2060 	sock->sk->sk_allocation = GFP_ATOMIC;
2061 
2062 	write_lock_bh(&sock->sk->sk_callback_lock);
2063 	sock->sk->sk_user_data = sock->sk->sk_data_ready;
2064 	sock->sk->sk_data_ready = o2net_listen_data_ready;
2065 	write_unlock_bh(&sock->sk->sk_callback_lock);
2066 
2067 	o2net_listen_sock = sock;
2068 	INIT_WORK(&o2net_listen_work, o2net_accept_many);
2069 
2070 	sock->sk->sk_reuse = SK_CAN_REUSE;
2071 	ret = sock->ops->bind(sock, (struct sockaddr *)&sin, sizeof(sin));
2072 	if (ret < 0) {
2073 		printk(KERN_ERR "o2net: Error %d while binding socket at "
2074 		       "%pI4:%u\n", ret, &addr, ntohs(port));
2075 		goto out;
2076 	}
2077 
2078 	ret = sock->ops->listen(sock, 64);
2079 	if (ret < 0)
2080 		printk(KERN_ERR "o2net: Error %d while listening on %pI4:%u\n",
2081 		       ret, &addr, ntohs(port));
2082 
2083 out:
2084 	if (ret) {
2085 		o2net_listen_sock = NULL;
2086 		if (sock)
2087 			sock_release(sock);
2088 	}
2089 	return ret;
2090 }
2091 
2092 /*
2093  * called from node manager when we should bring up our network listening
2094  * socket.  node manager handles all the serialization to only call this
2095  * once and to match it with o2net_stop_listening().  note,
2096  * o2nm_this_node() doesn't work yet as we're being called while it
2097  * is being set up.
2098  */
2099 int o2net_start_listening(struct o2nm_node *node)
2100 {
2101 	int ret = 0;
2102 
2103 	BUG_ON(o2net_wq != NULL);
2104 	BUG_ON(o2net_listen_sock != NULL);
2105 
2106 	mlog(ML_KTHREAD, "starting o2net thread...\n");
2107 	o2net_wq = alloc_ordered_workqueue("o2net", WQ_MEM_RECLAIM);
2108 	if (o2net_wq == NULL) {
2109 		mlog(ML_ERROR, "unable to launch o2net thread\n");
2110 		return -ENOMEM; /* ? */
2111 	}
2112 
2113 	ret = o2net_open_listening_sock(node->nd_ipv4_address,
2114 					node->nd_ipv4_port);
2115 	if (ret) {
2116 		destroy_workqueue(o2net_wq);
2117 		o2net_wq = NULL;
2118 	} else
2119 		o2quo_conn_up(node->nd_num);
2120 
2121 	return ret;
2122 }
2123 
2124 /* again, o2nm_this_node() doesn't work here as we're involved in
2125  * tearing it down */
2126 void o2net_stop_listening(struct o2nm_node *node)
2127 {
2128 	struct socket *sock = o2net_listen_sock;
2129 	size_t i;
2130 
2131 	BUG_ON(o2net_wq == NULL);
2132 	BUG_ON(o2net_listen_sock == NULL);
2133 
2134 	/* stop the listening socket from generating work */
2135 	write_lock_bh(&sock->sk->sk_callback_lock);
2136 	sock->sk->sk_data_ready = sock->sk->sk_user_data;
2137 	sock->sk->sk_user_data = NULL;
2138 	write_unlock_bh(&sock->sk->sk_callback_lock);
2139 
2140 	for (i = 0; i < ARRAY_SIZE(o2net_nodes); i++) {
2141 		struct o2nm_node *node = o2nm_get_node_by_num(i);
2142 		if (node) {
2143 			o2net_disconnect_node(node);
2144 			o2nm_node_put(node);
2145 		}
2146 	}
2147 
2148 	/* finish all work and tear down the work queue */
2149 	mlog(ML_KTHREAD, "waiting for o2net thread to exit....\n");
2150 	destroy_workqueue(o2net_wq);
2151 	o2net_wq = NULL;
2152 
2153 	sock_release(o2net_listen_sock);
2154 	o2net_listen_sock = NULL;
2155 
2156 	o2quo_conn_err(node->nd_num);
2157 }
2158 
2159 /* ------------------------------------------------------------ */
2160 
2161 int o2net_init(void)
2162 {
2163 	unsigned long i;
2164 
2165 	o2quo_init();
2166 
2167 	if (o2net_debugfs_init())
2168 		goto out;
2169 
2170 	o2net_hand = kzalloc(sizeof(struct o2net_handshake), GFP_KERNEL);
2171 	o2net_keep_req = kzalloc(sizeof(struct o2net_msg), GFP_KERNEL);
2172 	o2net_keep_resp = kzalloc(sizeof(struct o2net_msg), GFP_KERNEL);
2173 	if (!o2net_hand || !o2net_keep_req || !o2net_keep_resp)
2174 		goto out;
2175 
2176 	o2net_hand->protocol_version = cpu_to_be64(O2NET_PROTOCOL_VERSION);
2177 	o2net_hand->connector_id = cpu_to_be64(1);
2178 
2179 	o2net_keep_req->magic = cpu_to_be16(O2NET_MSG_KEEP_REQ_MAGIC);
2180 	o2net_keep_resp->magic = cpu_to_be16(O2NET_MSG_KEEP_RESP_MAGIC);
2181 
2182 	for (i = 0; i < ARRAY_SIZE(o2net_nodes); i++) {
2183 		struct o2net_node *nn = o2net_nn_from_num(i);
2184 
2185 		atomic_set(&nn->nn_timeout, 0);
2186 		spin_lock_init(&nn->nn_lock);
2187 		INIT_DELAYED_WORK(&nn->nn_connect_work, o2net_start_connect);
2188 		INIT_DELAYED_WORK(&nn->nn_connect_expired,
2189 				  o2net_connect_expired);
2190 		INIT_DELAYED_WORK(&nn->nn_still_up, o2net_still_up);
2191 		/* until we see hb from a node we'll return einval */
2192 		nn->nn_persistent_error = -ENOTCONN;
2193 		init_waitqueue_head(&nn->nn_sc_wq);
2194 		idr_init(&nn->nn_status_idr);
2195 		INIT_LIST_HEAD(&nn->nn_status_list);
2196 	}
2197 
2198 	return 0;
2199 
2200 out:
2201 	kfree(o2net_hand);
2202 	kfree(o2net_keep_req);
2203 	kfree(o2net_keep_resp);
2204 	o2net_debugfs_exit();
2205 	o2quo_exit();
2206 	return -ENOMEM;
2207 }
2208 
2209 void o2net_exit(void)
2210 {
2211 	o2quo_exit();
2212 	kfree(o2net_hand);
2213 	kfree(o2net_keep_req);
2214 	kfree(o2net_keep_resp);
2215 	o2net_debugfs_exit();
2216 }
2217