xref: /linux/drivers/block/drbd/drbd_receiver.c (revision c145211d1f9e2ef19e7b4c2b943f68366daa97af)
1 /*
2    drbd_receiver.c
3 
4    This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
5 
6    Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7    Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8    Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
9 
10    drbd is free software; you can redistribute it and/or modify
11    it under the terms of the GNU General Public License as published by
12    the Free Software Foundation; either version 2, or (at your option)
13    any later version.
14 
15    drbd is distributed in the hope that it will be useful,
16    but WITHOUT ANY WARRANTY; without even the implied warranty of
17    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18    GNU General Public License for more details.
19 
20    You should have received a copy of the GNU General Public License
21    along with drbd; see the file COPYING.  If not, write to
22    the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
23  */
24 
25 
26 #include <linux/module.h>
27 
28 #include <asm/uaccess.h>
29 #include <net/sock.h>
30 
31 #include <linux/drbd.h>
32 #include <linux/fs.h>
33 #include <linux/file.h>
34 #include <linux/in.h>
35 #include <linux/mm.h>
36 #include <linux/memcontrol.h>
37 #include <linux/mm_inline.h>
38 #include <linux/slab.h>
39 #include <linux/smp_lock.h>
40 #include <linux/pkt_sched.h>
41 #define __KERNEL_SYSCALLS__
42 #include <linux/unistd.h>
43 #include <linux/vmalloc.h>
44 #include <linux/random.h>
45 #include <linux/mm.h>
46 #include <linux/string.h>
47 #include <linux/scatterlist.h>
48 #include "drbd_int.h"
49 #include "drbd_req.h"
50 
51 #include "drbd_vli.h"
52 
53 struct flush_work {
54 	struct drbd_work w;
55 	struct drbd_epoch *epoch;
56 };
57 
58 enum finish_epoch {
59 	FE_STILL_LIVE,
60 	FE_DESTROYED,
61 	FE_RECYCLED,
62 };
63 
64 static int drbd_do_handshake(struct drbd_conf *mdev);
65 static int drbd_do_auth(struct drbd_conf *mdev);
66 
67 static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *, struct drbd_epoch *, enum epoch_event);
68 static int e_end_block(struct drbd_conf *, struct drbd_work *, int);
69 
70 static struct drbd_epoch *previous_epoch(struct drbd_conf *mdev, struct drbd_epoch *epoch)
71 {
72 	struct drbd_epoch *prev;
73 	spin_lock(&mdev->epoch_lock);
74 	prev = list_entry(epoch->list.prev, struct drbd_epoch, list);
75 	if (prev == epoch || prev == mdev->current_epoch)
76 		prev = NULL;
77 	spin_unlock(&mdev->epoch_lock);
78 	return prev;
79 }
80 
81 #define GFP_TRY	(__GFP_HIGHMEM | __GFP_NOWARN)
82 
83 static struct page *drbd_pp_first_page_or_try_alloc(struct drbd_conf *mdev)
84 {
85 	struct page *page = NULL;
86 
87 	/* Yes, testing drbd_pp_vacant outside the lock is racy.
88 	 * So what. It saves a spin_lock. */
89 	if (drbd_pp_vacant > 0) {
90 		spin_lock(&drbd_pp_lock);
91 		page = drbd_pp_pool;
92 		if (page) {
93 			drbd_pp_pool = (struct page *)page_private(page);
94 			set_page_private(page, 0); /* just to be polite */
95 			drbd_pp_vacant--;
96 		}
97 		spin_unlock(&drbd_pp_lock);
98 	}
99 	/* GFP_TRY, because we must not cause arbitrary write-out: in a DRBD
100 	 * "criss-cross" setup, that might cause write-out on some other DRBD,
101 	 * which in turn might block on the other node at this very place.  */
102 	if (!page)
103 		page = alloc_page(GFP_TRY);
104 	if (page)
105 		atomic_inc(&mdev->pp_in_use);
106 	return page;
107 }
108 
109 /* kick lower level device, if we have more than (arbitrary number)
110  * reference counts on it, which typically are locally submitted io
111  * requests.  don't use unacked_cnt, so we speed up proto A and B, too. */
112 static void maybe_kick_lo(struct drbd_conf *mdev)
113 {
114 	if (atomic_read(&mdev->local_cnt) >= mdev->net_conf->unplug_watermark)
115 		drbd_kick_lo(mdev);
116 }
117 
118 static void reclaim_net_ee(struct drbd_conf *mdev, struct list_head *to_be_freed)
119 {
120 	struct drbd_epoch_entry *e;
121 	struct list_head *le, *tle;
122 
123 	/* The EEs are always appended to the end of the list. Since
124 	   they are sent in order over the wire, they have to finish
125 	   in order. As soon as we see the first not finished we can
126 	   stop to examine the list... */
127 
128 	list_for_each_safe(le, tle, &mdev->net_ee) {
129 		e = list_entry(le, struct drbd_epoch_entry, w.list);
130 		if (drbd_bio_has_active_page(e->private_bio))
131 			break;
132 		list_move(le, to_be_freed);
133 	}
134 }
135 
136 static void drbd_kick_lo_and_reclaim_net(struct drbd_conf *mdev)
137 {
138 	LIST_HEAD(reclaimed);
139 	struct drbd_epoch_entry *e, *t;
140 
141 	maybe_kick_lo(mdev);
142 	spin_lock_irq(&mdev->req_lock);
143 	reclaim_net_ee(mdev, &reclaimed);
144 	spin_unlock_irq(&mdev->req_lock);
145 
146 	list_for_each_entry_safe(e, t, &reclaimed, w.list)
147 		drbd_free_ee(mdev, e);
148 }
149 
150 /**
151  * drbd_pp_alloc() - Returns a page, fails only if a signal comes in
152  * @mdev:	DRBD device.
153  * @retry:	whether or not to retry allocation forever (or until signalled)
154  *
155  * Tries to allocate a page, first from our own page pool, then from the
156  * kernel, unless this allocation would exceed the max_buffers setting.
157  * If @retry is non-zero, retry until DRBD frees a page somewhere else.
158  */
159 static struct page *drbd_pp_alloc(struct drbd_conf *mdev, int retry)
160 {
161 	struct page *page = NULL;
162 	DEFINE_WAIT(wait);
163 
164 	if (atomic_read(&mdev->pp_in_use) < mdev->net_conf->max_buffers) {
165 		page = drbd_pp_first_page_or_try_alloc(mdev);
166 		if (page)
167 			return page;
168 	}
169 
170 	for (;;) {
171 		prepare_to_wait(&drbd_pp_wait, &wait, TASK_INTERRUPTIBLE);
172 
173 		drbd_kick_lo_and_reclaim_net(mdev);
174 
175 		if (atomic_read(&mdev->pp_in_use) < mdev->net_conf->max_buffers) {
176 			page = drbd_pp_first_page_or_try_alloc(mdev);
177 			if (page)
178 				break;
179 		}
180 
181 		if (!retry)
182 			break;
183 
184 		if (signal_pending(current)) {
185 			dev_warn(DEV, "drbd_pp_alloc interrupted!\n");
186 			break;
187 		}
188 
189 		schedule();
190 	}
191 	finish_wait(&drbd_pp_wait, &wait);
192 
193 	return page;
194 }
195 
196 /* Must not be used from irq, as that may deadlock: see drbd_pp_alloc.
197  * Is also used from inside an other spin_lock_irq(&mdev->req_lock) */
198 static void drbd_pp_free(struct drbd_conf *mdev, struct page *page)
199 {
200 	int free_it;
201 
202 	spin_lock(&drbd_pp_lock);
203 	if (drbd_pp_vacant > (DRBD_MAX_SEGMENT_SIZE/PAGE_SIZE)*minor_count) {
204 		free_it = 1;
205 	} else {
206 		set_page_private(page, (unsigned long)drbd_pp_pool);
207 		drbd_pp_pool = page;
208 		drbd_pp_vacant++;
209 		free_it = 0;
210 	}
211 	spin_unlock(&drbd_pp_lock);
212 
213 	atomic_dec(&mdev->pp_in_use);
214 
215 	if (free_it)
216 		__free_page(page);
217 
218 	wake_up(&drbd_pp_wait);
219 }
220 
221 static void drbd_pp_free_bio_pages(struct drbd_conf *mdev, struct bio *bio)
222 {
223 	struct page *p_to_be_freed = NULL;
224 	struct page *page;
225 	struct bio_vec *bvec;
226 	int i;
227 
228 	spin_lock(&drbd_pp_lock);
229 	__bio_for_each_segment(bvec, bio, i, 0) {
230 		if (drbd_pp_vacant > (DRBD_MAX_SEGMENT_SIZE/PAGE_SIZE)*minor_count) {
231 			set_page_private(bvec->bv_page, (unsigned long)p_to_be_freed);
232 			p_to_be_freed = bvec->bv_page;
233 		} else {
234 			set_page_private(bvec->bv_page, (unsigned long)drbd_pp_pool);
235 			drbd_pp_pool = bvec->bv_page;
236 			drbd_pp_vacant++;
237 		}
238 	}
239 	spin_unlock(&drbd_pp_lock);
240 	atomic_sub(bio->bi_vcnt, &mdev->pp_in_use);
241 
242 	while (p_to_be_freed) {
243 		page = p_to_be_freed;
244 		p_to_be_freed = (struct page *)page_private(page);
245 		set_page_private(page, 0); /* just to be polite */
246 		put_page(page);
247 	}
248 
249 	wake_up(&drbd_pp_wait);
250 }
251 
252 /*
253 You need to hold the req_lock:
254  _drbd_wait_ee_list_empty()
255 
256 You must not have the req_lock:
257  drbd_free_ee()
258  drbd_alloc_ee()
259  drbd_init_ee()
260  drbd_release_ee()
261  drbd_ee_fix_bhs()
262  drbd_process_done_ee()
263  drbd_clear_done_ee()
264  drbd_wait_ee_list_empty()
265 */
266 
267 struct drbd_epoch_entry *drbd_alloc_ee(struct drbd_conf *mdev,
268 				     u64 id,
269 				     sector_t sector,
270 				     unsigned int data_size,
271 				     gfp_t gfp_mask) __must_hold(local)
272 {
273 	struct request_queue *q;
274 	struct drbd_epoch_entry *e;
275 	struct page *page;
276 	struct bio *bio;
277 	unsigned int ds;
278 
279 	if (FAULT_ACTIVE(mdev, DRBD_FAULT_AL_EE))
280 		return NULL;
281 
282 	e = mempool_alloc(drbd_ee_mempool, gfp_mask & ~__GFP_HIGHMEM);
283 	if (!e) {
284 		if (!(gfp_mask & __GFP_NOWARN))
285 			dev_err(DEV, "alloc_ee: Allocation of an EE failed\n");
286 		return NULL;
287 	}
288 
289 	bio = bio_alloc(gfp_mask & ~__GFP_HIGHMEM, div_ceil(data_size, PAGE_SIZE));
290 	if (!bio) {
291 		if (!(gfp_mask & __GFP_NOWARN))
292 			dev_err(DEV, "alloc_ee: Allocation of a bio failed\n");
293 		goto fail1;
294 	}
295 
296 	bio->bi_bdev = mdev->ldev->backing_bdev;
297 	bio->bi_sector = sector;
298 
299 	ds = data_size;
300 	while (ds) {
301 		page = drbd_pp_alloc(mdev, (gfp_mask & __GFP_WAIT));
302 		if (!page) {
303 			if (!(gfp_mask & __GFP_NOWARN))
304 				dev_err(DEV, "alloc_ee: Allocation of a page failed\n");
305 			goto fail2;
306 		}
307 		if (!bio_add_page(bio, page, min_t(int, ds, PAGE_SIZE), 0)) {
308 			drbd_pp_free(mdev, page);
309 			dev_err(DEV, "alloc_ee: bio_add_page(s=%llu,"
310 			    "data_size=%u,ds=%u) failed\n",
311 			    (unsigned long long)sector, data_size, ds);
312 
313 			q = bdev_get_queue(bio->bi_bdev);
314 			if (q->merge_bvec_fn) {
315 				struct bvec_merge_data bvm = {
316 					.bi_bdev = bio->bi_bdev,
317 					.bi_sector = bio->bi_sector,
318 					.bi_size = bio->bi_size,
319 					.bi_rw = bio->bi_rw,
320 				};
321 				int l = q->merge_bvec_fn(q, &bvm,
322 						&bio->bi_io_vec[bio->bi_vcnt]);
323 				dev_err(DEV, "merge_bvec_fn() = %d\n", l);
324 			}
325 
326 			/* dump more of the bio. */
327 			dev_err(DEV, "bio->bi_max_vecs = %d\n", bio->bi_max_vecs);
328 			dev_err(DEV, "bio->bi_vcnt = %d\n", bio->bi_vcnt);
329 			dev_err(DEV, "bio->bi_size = %d\n", bio->bi_size);
330 			dev_err(DEV, "bio->bi_phys_segments = %d\n", bio->bi_phys_segments);
331 
332 			goto fail2;
333 			break;
334 		}
335 		ds -= min_t(int, ds, PAGE_SIZE);
336 	}
337 
338 	D_ASSERT(data_size == bio->bi_size);
339 
340 	bio->bi_private = e;
341 	e->mdev = mdev;
342 	e->sector = sector;
343 	e->size = bio->bi_size;
344 
345 	e->private_bio = bio;
346 	e->block_id = id;
347 	INIT_HLIST_NODE(&e->colision);
348 	e->epoch = NULL;
349 	e->flags = 0;
350 
351 	return e;
352 
353  fail2:
354 	drbd_pp_free_bio_pages(mdev, bio);
355 	bio_put(bio);
356  fail1:
357 	mempool_free(e, drbd_ee_mempool);
358 
359 	return NULL;
360 }
361 
362 void drbd_free_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e)
363 {
364 	struct bio *bio = e->private_bio;
365 	drbd_pp_free_bio_pages(mdev, bio);
366 	bio_put(bio);
367 	D_ASSERT(hlist_unhashed(&e->colision));
368 	mempool_free(e, drbd_ee_mempool);
369 }
370 
371 int drbd_release_ee(struct drbd_conf *mdev, struct list_head *list)
372 {
373 	LIST_HEAD(work_list);
374 	struct drbd_epoch_entry *e, *t;
375 	int count = 0;
376 
377 	spin_lock_irq(&mdev->req_lock);
378 	list_splice_init(list, &work_list);
379 	spin_unlock_irq(&mdev->req_lock);
380 
381 	list_for_each_entry_safe(e, t, &work_list, w.list) {
382 		drbd_free_ee(mdev, e);
383 		count++;
384 	}
385 	return count;
386 }
387 
388 
389 /*
390  * This function is called from _asender only_
391  * but see also comments in _req_mod(,barrier_acked)
392  * and receive_Barrier.
393  *
394  * Move entries from net_ee to done_ee, if ready.
395  * Grab done_ee, call all callbacks, free the entries.
396  * The callbacks typically send out ACKs.
397  */
398 static int drbd_process_done_ee(struct drbd_conf *mdev)
399 {
400 	LIST_HEAD(work_list);
401 	LIST_HEAD(reclaimed);
402 	struct drbd_epoch_entry *e, *t;
403 	int ok = (mdev->state.conn >= C_WF_REPORT_PARAMS);
404 
405 	spin_lock_irq(&mdev->req_lock);
406 	reclaim_net_ee(mdev, &reclaimed);
407 	list_splice_init(&mdev->done_ee, &work_list);
408 	spin_unlock_irq(&mdev->req_lock);
409 
410 	list_for_each_entry_safe(e, t, &reclaimed, w.list)
411 		drbd_free_ee(mdev, e);
412 
413 	/* possible callbacks here:
414 	 * e_end_block, and e_end_resync_block, e_send_discard_ack.
415 	 * all ignore the last argument.
416 	 */
417 	list_for_each_entry_safe(e, t, &work_list, w.list) {
418 		/* list_del not necessary, next/prev members not touched */
419 		ok = e->w.cb(mdev, &e->w, !ok) && ok;
420 		drbd_free_ee(mdev, e);
421 	}
422 	wake_up(&mdev->ee_wait);
423 
424 	return ok;
425 }
426 
427 void _drbd_wait_ee_list_empty(struct drbd_conf *mdev, struct list_head *head)
428 {
429 	DEFINE_WAIT(wait);
430 
431 	/* avoids spin_lock/unlock
432 	 * and calling prepare_to_wait in the fast path */
433 	while (!list_empty(head)) {
434 		prepare_to_wait(&mdev->ee_wait, &wait, TASK_UNINTERRUPTIBLE);
435 		spin_unlock_irq(&mdev->req_lock);
436 		drbd_kick_lo(mdev);
437 		schedule();
438 		finish_wait(&mdev->ee_wait, &wait);
439 		spin_lock_irq(&mdev->req_lock);
440 	}
441 }
442 
443 void drbd_wait_ee_list_empty(struct drbd_conf *mdev, struct list_head *head)
444 {
445 	spin_lock_irq(&mdev->req_lock);
446 	_drbd_wait_ee_list_empty(mdev, head);
447 	spin_unlock_irq(&mdev->req_lock);
448 }
449 
450 /* see also kernel_accept; which is only present since 2.6.18.
451  * also we want to log which part of it failed, exactly */
452 static int drbd_accept(struct drbd_conf *mdev, const char **what,
453 		struct socket *sock, struct socket **newsock)
454 {
455 	struct sock *sk = sock->sk;
456 	int err = 0;
457 
458 	*what = "listen";
459 	err = sock->ops->listen(sock, 5);
460 	if (err < 0)
461 		goto out;
462 
463 	*what = "sock_create_lite";
464 	err = sock_create_lite(sk->sk_family, sk->sk_type, sk->sk_protocol,
465 			       newsock);
466 	if (err < 0)
467 		goto out;
468 
469 	*what = "accept";
470 	err = sock->ops->accept(sock, *newsock, 0);
471 	if (err < 0) {
472 		sock_release(*newsock);
473 		*newsock = NULL;
474 		goto out;
475 	}
476 	(*newsock)->ops  = sock->ops;
477 
478 out:
479 	return err;
480 }
481 
482 static int drbd_recv_short(struct drbd_conf *mdev, struct socket *sock,
483 		    void *buf, size_t size, int flags)
484 {
485 	mm_segment_t oldfs;
486 	struct kvec iov = {
487 		.iov_base = buf,
488 		.iov_len = size,
489 	};
490 	struct msghdr msg = {
491 		.msg_iovlen = 1,
492 		.msg_iov = (struct iovec *)&iov,
493 		.msg_flags = (flags ? flags : MSG_WAITALL | MSG_NOSIGNAL)
494 	};
495 	int rv;
496 
497 	oldfs = get_fs();
498 	set_fs(KERNEL_DS);
499 	rv = sock_recvmsg(sock, &msg, size, msg.msg_flags);
500 	set_fs(oldfs);
501 
502 	return rv;
503 }
504 
505 static int drbd_recv(struct drbd_conf *mdev, void *buf, size_t size)
506 {
507 	mm_segment_t oldfs;
508 	struct kvec iov = {
509 		.iov_base = buf,
510 		.iov_len = size,
511 	};
512 	struct msghdr msg = {
513 		.msg_iovlen = 1,
514 		.msg_iov = (struct iovec *)&iov,
515 		.msg_flags = MSG_WAITALL | MSG_NOSIGNAL
516 	};
517 	int rv;
518 
519 	oldfs = get_fs();
520 	set_fs(KERNEL_DS);
521 
522 	for (;;) {
523 		rv = sock_recvmsg(mdev->data.socket, &msg, size, msg.msg_flags);
524 		if (rv == size)
525 			break;
526 
527 		/* Note:
528 		 * ECONNRESET	other side closed the connection
529 		 * ERESTARTSYS	(on  sock) we got a signal
530 		 */
531 
532 		if (rv < 0) {
533 			if (rv == -ECONNRESET)
534 				dev_info(DEV, "sock was reset by peer\n");
535 			else if (rv != -ERESTARTSYS)
536 				dev_err(DEV, "sock_recvmsg returned %d\n", rv);
537 			break;
538 		} else if (rv == 0) {
539 			dev_info(DEV, "sock was shut down by peer\n");
540 			break;
541 		} else	{
542 			/* signal came in, or peer/link went down,
543 			 * after we read a partial message
544 			 */
545 			/* D_ASSERT(signal_pending(current)); */
546 			break;
547 		}
548 	};
549 
550 	set_fs(oldfs);
551 
552 	if (rv != size)
553 		drbd_force_state(mdev, NS(conn, C_BROKEN_PIPE));
554 
555 	return rv;
556 }
557 
558 static struct socket *drbd_try_connect(struct drbd_conf *mdev)
559 {
560 	const char *what;
561 	struct socket *sock;
562 	struct sockaddr_in6 src_in6;
563 	int err;
564 	int disconnect_on_error = 1;
565 
566 	if (!get_net_conf(mdev))
567 		return NULL;
568 
569 	what = "sock_create_kern";
570 	err = sock_create_kern(((struct sockaddr *)mdev->net_conf->my_addr)->sa_family,
571 		SOCK_STREAM, IPPROTO_TCP, &sock);
572 	if (err < 0) {
573 		sock = NULL;
574 		goto out;
575 	}
576 
577 	sock->sk->sk_rcvtimeo =
578 	sock->sk->sk_sndtimeo =  mdev->net_conf->try_connect_int*HZ;
579 
580        /* explicitly bind to the configured IP as source IP
581 	*  for the outgoing connections.
582 	*  This is needed for multihomed hosts and to be
583 	*  able to use lo: interfaces for drbd.
584 	* Make sure to use 0 as port number, so linux selects
585 	*  a free one dynamically.
586 	*/
587 	memcpy(&src_in6, mdev->net_conf->my_addr,
588 	       min_t(int, mdev->net_conf->my_addr_len, sizeof(src_in6)));
589 	if (((struct sockaddr *)mdev->net_conf->my_addr)->sa_family == AF_INET6)
590 		src_in6.sin6_port = 0;
591 	else
592 		((struct sockaddr_in *)&src_in6)->sin_port = 0; /* AF_INET & AF_SCI */
593 
594 	what = "bind before connect";
595 	err = sock->ops->bind(sock,
596 			      (struct sockaddr *) &src_in6,
597 			      mdev->net_conf->my_addr_len);
598 	if (err < 0)
599 		goto out;
600 
601 	/* connect may fail, peer not yet available.
602 	 * stay C_WF_CONNECTION, don't go Disconnecting! */
603 	disconnect_on_error = 0;
604 	what = "connect";
605 	err = sock->ops->connect(sock,
606 				 (struct sockaddr *)mdev->net_conf->peer_addr,
607 				 mdev->net_conf->peer_addr_len, 0);
608 
609 out:
610 	if (err < 0) {
611 		if (sock) {
612 			sock_release(sock);
613 			sock = NULL;
614 		}
615 		switch (-err) {
616 			/* timeout, busy, signal pending */
617 		case ETIMEDOUT: case EAGAIN: case EINPROGRESS:
618 		case EINTR: case ERESTARTSYS:
619 			/* peer not (yet) available, network problem */
620 		case ECONNREFUSED: case ENETUNREACH:
621 		case EHOSTDOWN:    case EHOSTUNREACH:
622 			disconnect_on_error = 0;
623 			break;
624 		default:
625 			dev_err(DEV, "%s failed, err = %d\n", what, err);
626 		}
627 		if (disconnect_on_error)
628 			drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
629 	}
630 	put_net_conf(mdev);
631 	return sock;
632 }
633 
634 static struct socket *drbd_wait_for_connect(struct drbd_conf *mdev)
635 {
636 	int timeo, err;
637 	struct socket *s_estab = NULL, *s_listen;
638 	const char *what;
639 
640 	if (!get_net_conf(mdev))
641 		return NULL;
642 
643 	what = "sock_create_kern";
644 	err = sock_create_kern(((struct sockaddr *)mdev->net_conf->my_addr)->sa_family,
645 		SOCK_STREAM, IPPROTO_TCP, &s_listen);
646 	if (err) {
647 		s_listen = NULL;
648 		goto out;
649 	}
650 
651 	timeo = mdev->net_conf->try_connect_int * HZ;
652 	timeo += (random32() & 1) ? timeo / 7 : -timeo / 7; /* 28.5% random jitter */
653 
654 	s_listen->sk->sk_reuse    = 1; /* SO_REUSEADDR */
655 	s_listen->sk->sk_rcvtimeo = timeo;
656 	s_listen->sk->sk_sndtimeo = timeo;
657 
658 	what = "bind before listen";
659 	err = s_listen->ops->bind(s_listen,
660 			      (struct sockaddr *) mdev->net_conf->my_addr,
661 			      mdev->net_conf->my_addr_len);
662 	if (err < 0)
663 		goto out;
664 
665 	err = drbd_accept(mdev, &what, s_listen, &s_estab);
666 
667 out:
668 	if (s_listen)
669 		sock_release(s_listen);
670 	if (err < 0) {
671 		if (err != -EAGAIN && err != -EINTR && err != -ERESTARTSYS) {
672 			dev_err(DEV, "%s failed, err = %d\n", what, err);
673 			drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
674 		}
675 	}
676 	put_net_conf(mdev);
677 
678 	return s_estab;
679 }
680 
681 static int drbd_send_fp(struct drbd_conf *mdev,
682 	struct socket *sock, enum drbd_packets cmd)
683 {
684 	struct p_header *h = (struct p_header *) &mdev->data.sbuf.header;
685 
686 	return _drbd_send_cmd(mdev, sock, cmd, h, sizeof(*h), 0);
687 }
688 
689 static enum drbd_packets drbd_recv_fp(struct drbd_conf *mdev, struct socket *sock)
690 {
691 	struct p_header *h = (struct p_header *) &mdev->data.sbuf.header;
692 	int rr;
693 
694 	rr = drbd_recv_short(mdev, sock, h, sizeof(*h), 0);
695 
696 	if (rr == sizeof(*h) && h->magic == BE_DRBD_MAGIC)
697 		return be16_to_cpu(h->command);
698 
699 	return 0xffff;
700 }
701 
702 /**
703  * drbd_socket_okay() - Free the socket if its connection is not okay
704  * @mdev:	DRBD device.
705  * @sock:	pointer to the pointer to the socket.
706  */
707 static int drbd_socket_okay(struct drbd_conf *mdev, struct socket **sock)
708 {
709 	int rr;
710 	char tb[4];
711 
712 	if (!*sock)
713 		return FALSE;
714 
715 	rr = drbd_recv_short(mdev, *sock, tb, 4, MSG_DONTWAIT | MSG_PEEK);
716 
717 	if (rr > 0 || rr == -EAGAIN) {
718 		return TRUE;
719 	} else {
720 		sock_release(*sock);
721 		*sock = NULL;
722 		return FALSE;
723 	}
724 }
725 
726 /*
727  * return values:
728  *   1 yes, we have a valid connection
729  *   0 oops, did not work out, please try again
730  *  -1 peer talks different language,
731  *     no point in trying again, please go standalone.
732  *  -2 We do not have a network config...
733  */
734 static int drbd_connect(struct drbd_conf *mdev)
735 {
736 	struct socket *s, *sock, *msock;
737 	int try, h, ok;
738 
739 	D_ASSERT(!mdev->data.socket);
740 
741 	if (test_and_clear_bit(CREATE_BARRIER, &mdev->flags))
742 		dev_err(DEV, "CREATE_BARRIER flag was set in drbd_connect - now cleared!\n");
743 
744 	if (drbd_request_state(mdev, NS(conn, C_WF_CONNECTION)) < SS_SUCCESS)
745 		return -2;
746 
747 	clear_bit(DISCARD_CONCURRENT, &mdev->flags);
748 
749 	sock  = NULL;
750 	msock = NULL;
751 
752 	do {
753 		for (try = 0;;) {
754 			/* 3 tries, this should take less than a second! */
755 			s = drbd_try_connect(mdev);
756 			if (s || ++try >= 3)
757 				break;
758 			/* give the other side time to call bind() & listen() */
759 			__set_current_state(TASK_INTERRUPTIBLE);
760 			schedule_timeout(HZ / 10);
761 		}
762 
763 		if (s) {
764 			if (!sock) {
765 				drbd_send_fp(mdev, s, P_HAND_SHAKE_S);
766 				sock = s;
767 				s = NULL;
768 			} else if (!msock) {
769 				drbd_send_fp(mdev, s, P_HAND_SHAKE_M);
770 				msock = s;
771 				s = NULL;
772 			} else {
773 				dev_err(DEV, "Logic error in drbd_connect()\n");
774 				goto out_release_sockets;
775 			}
776 		}
777 
778 		if (sock && msock) {
779 			__set_current_state(TASK_INTERRUPTIBLE);
780 			schedule_timeout(HZ / 10);
781 			ok = drbd_socket_okay(mdev, &sock);
782 			ok = drbd_socket_okay(mdev, &msock) && ok;
783 			if (ok)
784 				break;
785 		}
786 
787 retry:
788 		s = drbd_wait_for_connect(mdev);
789 		if (s) {
790 			try = drbd_recv_fp(mdev, s);
791 			drbd_socket_okay(mdev, &sock);
792 			drbd_socket_okay(mdev, &msock);
793 			switch (try) {
794 			case P_HAND_SHAKE_S:
795 				if (sock) {
796 					dev_warn(DEV, "initial packet S crossed\n");
797 					sock_release(sock);
798 				}
799 				sock = s;
800 				break;
801 			case P_HAND_SHAKE_M:
802 				if (msock) {
803 					dev_warn(DEV, "initial packet M crossed\n");
804 					sock_release(msock);
805 				}
806 				msock = s;
807 				set_bit(DISCARD_CONCURRENT, &mdev->flags);
808 				break;
809 			default:
810 				dev_warn(DEV, "Error receiving initial packet\n");
811 				sock_release(s);
812 				if (random32() & 1)
813 					goto retry;
814 			}
815 		}
816 
817 		if (mdev->state.conn <= C_DISCONNECTING)
818 			goto out_release_sockets;
819 		if (signal_pending(current)) {
820 			flush_signals(current);
821 			smp_rmb();
822 			if (get_t_state(&mdev->receiver) == Exiting)
823 				goto out_release_sockets;
824 		}
825 
826 		if (sock && msock) {
827 			ok = drbd_socket_okay(mdev, &sock);
828 			ok = drbd_socket_okay(mdev, &msock) && ok;
829 			if (ok)
830 				break;
831 		}
832 	} while (1);
833 
834 	msock->sk->sk_reuse = 1; /* SO_REUSEADDR */
835 	sock->sk->sk_reuse = 1; /* SO_REUSEADDR */
836 
837 	sock->sk->sk_allocation = GFP_NOIO;
838 	msock->sk->sk_allocation = GFP_NOIO;
839 
840 	sock->sk->sk_priority = TC_PRIO_INTERACTIVE_BULK;
841 	msock->sk->sk_priority = TC_PRIO_INTERACTIVE;
842 
843 	if (mdev->net_conf->sndbuf_size) {
844 		sock->sk->sk_sndbuf = mdev->net_conf->sndbuf_size;
845 		sock->sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
846 	}
847 
848 	if (mdev->net_conf->rcvbuf_size) {
849 		sock->sk->sk_rcvbuf = mdev->net_conf->rcvbuf_size;
850 		sock->sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
851 	}
852 
853 	/* NOT YET ...
854 	 * sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
855 	 * sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
856 	 * first set it to the P_HAND_SHAKE timeout,
857 	 * which we set to 4x the configured ping_timeout. */
858 	sock->sk->sk_sndtimeo =
859 	sock->sk->sk_rcvtimeo = mdev->net_conf->ping_timeo*4*HZ/10;
860 
861 	msock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
862 	msock->sk->sk_rcvtimeo = mdev->net_conf->ping_int*HZ;
863 
864 	/* we don't want delays.
865 	 * we use TCP_CORK where apropriate, though */
866 	drbd_tcp_nodelay(sock);
867 	drbd_tcp_nodelay(msock);
868 
869 	mdev->data.socket = sock;
870 	mdev->meta.socket = msock;
871 	mdev->last_received = jiffies;
872 
873 	D_ASSERT(mdev->asender.task == NULL);
874 
875 	h = drbd_do_handshake(mdev);
876 	if (h <= 0)
877 		return h;
878 
879 	if (mdev->cram_hmac_tfm) {
880 		/* drbd_request_state(mdev, NS(conn, WFAuth)); */
881 		switch (drbd_do_auth(mdev)) {
882 		case -1:
883 			dev_err(DEV, "Authentication of peer failed\n");
884 			return -1;
885 		case 0:
886 			dev_err(DEV, "Authentication of peer failed, trying again.\n");
887 			return 0;
888 		}
889 	}
890 
891 	if (drbd_request_state(mdev, NS(conn, C_WF_REPORT_PARAMS)) < SS_SUCCESS)
892 		return 0;
893 
894 	sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
895 	sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
896 
897 	atomic_set(&mdev->packet_seq, 0);
898 	mdev->peer_seq = 0;
899 
900 	drbd_thread_start(&mdev->asender);
901 
902 	if (!drbd_send_protocol(mdev))
903 		return -1;
904 	drbd_send_sync_param(mdev, &mdev->sync_conf);
905 	drbd_send_sizes(mdev, 0);
906 	drbd_send_uuids(mdev);
907 	drbd_send_state(mdev);
908 	clear_bit(USE_DEGR_WFC_T, &mdev->flags);
909 	clear_bit(RESIZE_PENDING, &mdev->flags);
910 
911 	return 1;
912 
913 out_release_sockets:
914 	if (sock)
915 		sock_release(sock);
916 	if (msock)
917 		sock_release(msock);
918 	return -1;
919 }
920 
921 static int drbd_recv_header(struct drbd_conf *mdev, struct p_header *h)
922 {
923 	int r;
924 
925 	r = drbd_recv(mdev, h, sizeof(*h));
926 
927 	if (unlikely(r != sizeof(*h))) {
928 		dev_err(DEV, "short read expecting header on sock: r=%d\n", r);
929 		return FALSE;
930 	};
931 	h->command = be16_to_cpu(h->command);
932 	h->length  = be16_to_cpu(h->length);
933 	if (unlikely(h->magic != BE_DRBD_MAGIC)) {
934 		dev_err(DEV, "magic?? on data m: 0x%lx c: %d l: %d\n",
935 		    (long)be32_to_cpu(h->magic),
936 		    h->command, h->length);
937 		return FALSE;
938 	}
939 	mdev->last_received = jiffies;
940 
941 	return TRUE;
942 }
943 
944 static enum finish_epoch drbd_flush_after_epoch(struct drbd_conf *mdev, struct drbd_epoch *epoch)
945 {
946 	int rv;
947 
948 	if (mdev->write_ordering >= WO_bdev_flush && get_ldev(mdev)) {
949 		rv = blkdev_issue_flush(mdev->ldev->backing_bdev, NULL);
950 		if (rv) {
951 			dev_err(DEV, "local disk flush failed with status %d\n", rv);
952 			/* would rather check on EOPNOTSUPP, but that is not reliable.
953 			 * don't try again for ANY return value != 0
954 			 * if (rv == -EOPNOTSUPP) */
955 			drbd_bump_write_ordering(mdev, WO_drain_io);
956 		}
957 		put_ldev(mdev);
958 	}
959 
960 	return drbd_may_finish_epoch(mdev, epoch, EV_BARRIER_DONE);
961 }
962 
963 static int w_flush(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
964 {
965 	struct flush_work *fw = (struct flush_work *)w;
966 	struct drbd_epoch *epoch = fw->epoch;
967 
968 	kfree(w);
969 
970 	if (!test_and_set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &epoch->flags))
971 		drbd_flush_after_epoch(mdev, epoch);
972 
973 	drbd_may_finish_epoch(mdev, epoch, EV_PUT |
974 			      (mdev->state.conn < C_CONNECTED ? EV_CLEANUP : 0));
975 
976 	return 1;
977 }
978 
979 /**
980  * drbd_may_finish_epoch() - Applies an epoch_event to the epoch's state, eventually finishes it.
981  * @mdev:	DRBD device.
982  * @epoch:	Epoch object.
983  * @ev:		Epoch event.
984  */
985 static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
986 					       struct drbd_epoch *epoch,
987 					       enum epoch_event ev)
988 {
989 	int finish, epoch_size;
990 	struct drbd_epoch *next_epoch;
991 	int schedule_flush = 0;
992 	enum finish_epoch rv = FE_STILL_LIVE;
993 
994 	spin_lock(&mdev->epoch_lock);
995 	do {
996 		next_epoch = NULL;
997 		finish = 0;
998 
999 		epoch_size = atomic_read(&epoch->epoch_size);
1000 
1001 		switch (ev & ~EV_CLEANUP) {
1002 		case EV_PUT:
1003 			atomic_dec(&epoch->active);
1004 			break;
1005 		case EV_GOT_BARRIER_NR:
1006 			set_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags);
1007 
1008 			/* Special case: If we just switched from WO_bio_barrier to
1009 			   WO_bdev_flush we should not finish the current epoch */
1010 			if (test_bit(DE_CONTAINS_A_BARRIER, &epoch->flags) && epoch_size == 1 &&
1011 			    mdev->write_ordering != WO_bio_barrier &&
1012 			    epoch == mdev->current_epoch)
1013 				clear_bit(DE_CONTAINS_A_BARRIER, &epoch->flags);
1014 			break;
1015 		case EV_BARRIER_DONE:
1016 			set_bit(DE_BARRIER_IN_NEXT_EPOCH_DONE, &epoch->flags);
1017 			break;
1018 		case EV_BECAME_LAST:
1019 			/* nothing to do*/
1020 			break;
1021 		}
1022 
1023 		if (epoch_size != 0 &&
1024 		    atomic_read(&epoch->active) == 0 &&
1025 		    test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags) &&
1026 		    epoch->list.prev == &mdev->current_epoch->list &&
1027 		    !test_bit(DE_IS_FINISHING, &epoch->flags)) {
1028 			/* Nearly all conditions are met to finish that epoch... */
1029 			if (test_bit(DE_BARRIER_IN_NEXT_EPOCH_DONE, &epoch->flags) ||
1030 			    mdev->write_ordering == WO_none ||
1031 			    (epoch_size == 1 && test_bit(DE_CONTAINS_A_BARRIER, &epoch->flags)) ||
1032 			    ev & EV_CLEANUP) {
1033 				finish = 1;
1034 				set_bit(DE_IS_FINISHING, &epoch->flags);
1035 			} else if (!test_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &epoch->flags) &&
1036 				 mdev->write_ordering == WO_bio_barrier) {
1037 				atomic_inc(&epoch->active);
1038 				schedule_flush = 1;
1039 			}
1040 		}
1041 		if (finish) {
1042 			if (!(ev & EV_CLEANUP)) {
1043 				spin_unlock(&mdev->epoch_lock);
1044 				drbd_send_b_ack(mdev, epoch->barrier_nr, epoch_size);
1045 				spin_lock(&mdev->epoch_lock);
1046 			}
1047 			dec_unacked(mdev);
1048 
1049 			if (mdev->current_epoch != epoch) {
1050 				next_epoch = list_entry(epoch->list.next, struct drbd_epoch, list);
1051 				list_del(&epoch->list);
1052 				ev = EV_BECAME_LAST | (ev & EV_CLEANUP);
1053 				mdev->epochs--;
1054 				kfree(epoch);
1055 
1056 				if (rv == FE_STILL_LIVE)
1057 					rv = FE_DESTROYED;
1058 			} else {
1059 				epoch->flags = 0;
1060 				atomic_set(&epoch->epoch_size, 0);
1061 				/* atomic_set(&epoch->active, 0); is alrady zero */
1062 				if (rv == FE_STILL_LIVE)
1063 					rv = FE_RECYCLED;
1064 			}
1065 		}
1066 
1067 		if (!next_epoch)
1068 			break;
1069 
1070 		epoch = next_epoch;
1071 	} while (1);
1072 
1073 	spin_unlock(&mdev->epoch_lock);
1074 
1075 	if (schedule_flush) {
1076 		struct flush_work *fw;
1077 		fw = kmalloc(sizeof(*fw), GFP_ATOMIC);
1078 		if (fw) {
1079 			fw->w.cb = w_flush;
1080 			fw->epoch = epoch;
1081 			drbd_queue_work(&mdev->data.work, &fw->w);
1082 		} else {
1083 			dev_warn(DEV, "Could not kmalloc a flush_work obj\n");
1084 			set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &epoch->flags);
1085 			/* That is not a recursion, only one level */
1086 			drbd_may_finish_epoch(mdev, epoch, EV_BARRIER_DONE);
1087 			drbd_may_finish_epoch(mdev, epoch, EV_PUT);
1088 		}
1089 	}
1090 
1091 	return rv;
1092 }
1093 
1094 /**
1095  * drbd_bump_write_ordering() - Fall back to an other write ordering method
1096  * @mdev:	DRBD device.
1097  * @wo:		Write ordering method to try.
1098  */
1099 void drbd_bump_write_ordering(struct drbd_conf *mdev, enum write_ordering_e wo) __must_hold(local)
1100 {
1101 	enum write_ordering_e pwo;
1102 	static char *write_ordering_str[] = {
1103 		[WO_none] = "none",
1104 		[WO_drain_io] = "drain",
1105 		[WO_bdev_flush] = "flush",
1106 		[WO_bio_barrier] = "barrier",
1107 	};
1108 
1109 	pwo = mdev->write_ordering;
1110 	wo = min(pwo, wo);
1111 	if (wo == WO_bio_barrier && mdev->ldev->dc.no_disk_barrier)
1112 		wo = WO_bdev_flush;
1113 	if (wo == WO_bdev_flush && mdev->ldev->dc.no_disk_flush)
1114 		wo = WO_drain_io;
1115 	if (wo == WO_drain_io && mdev->ldev->dc.no_disk_drain)
1116 		wo = WO_none;
1117 	mdev->write_ordering = wo;
1118 	if (pwo != mdev->write_ordering || wo == WO_bio_barrier)
1119 		dev_info(DEV, "Method to ensure write ordering: %s\n", write_ordering_str[mdev->write_ordering]);
1120 }
1121 
1122 /**
1123  * w_e_reissue() - Worker callback; Resubmit a bio, without BIO_RW_BARRIER set
1124  * @mdev:	DRBD device.
1125  * @w:		work object.
1126  * @cancel:	The connection will be closed anyways (unused in this callback)
1127  */
1128 int w_e_reissue(struct drbd_conf *mdev, struct drbd_work *w, int cancel) __releases(local)
1129 {
1130 	struct drbd_epoch_entry *e = (struct drbd_epoch_entry *)w;
1131 	struct bio *bio = e->private_bio;
1132 
1133 	/* We leave DE_CONTAINS_A_BARRIER and EE_IS_BARRIER in place,
1134 	   (and DE_BARRIER_IN_NEXT_EPOCH_ISSUED in the previous Epoch)
1135 	   so that we can finish that epoch in drbd_may_finish_epoch().
1136 	   That is necessary if we already have a long chain of Epochs, before
1137 	   we realize that BIO_RW_BARRIER is actually not supported */
1138 
1139 	/* As long as the -ENOTSUPP on the barrier is reported immediately
1140 	   that will never trigger. If it is reported late, we will just
1141 	   print that warning and continue correctly for all future requests
1142 	   with WO_bdev_flush */
1143 	if (previous_epoch(mdev, e->epoch))
1144 		dev_warn(DEV, "Write ordering was not enforced (one time event)\n");
1145 
1146 	/* prepare bio for re-submit,
1147 	 * re-init volatile members */
1148 	/* we still have a local reference,
1149 	 * get_ldev was done in receive_Data. */
1150 	bio->bi_bdev = mdev->ldev->backing_bdev;
1151 	bio->bi_sector = e->sector;
1152 	bio->bi_size = e->size;
1153 	bio->bi_idx = 0;
1154 
1155 	bio->bi_flags &= ~(BIO_POOL_MASK - 1);
1156 	bio->bi_flags |= 1 << BIO_UPTODATE;
1157 
1158 	/* don't know whether this is necessary: */
1159 	bio->bi_phys_segments = 0;
1160 	bio->bi_next = NULL;
1161 
1162 	/* these should be unchanged: */
1163 	/* bio->bi_end_io = drbd_endio_write_sec; */
1164 	/* bio->bi_vcnt = whatever; */
1165 
1166 	e->w.cb = e_end_block;
1167 
1168 	/* This is no longer a barrier request. */
1169 	bio->bi_rw &= ~(1UL << BIO_RW_BARRIER);
1170 
1171 	drbd_generic_make_request(mdev, DRBD_FAULT_DT_WR, bio);
1172 
1173 	return 1;
1174 }
1175 
1176 static int receive_Barrier(struct drbd_conf *mdev, struct p_header *h)
1177 {
1178 	int rv, issue_flush;
1179 	struct p_barrier *p = (struct p_barrier *)h;
1180 	struct drbd_epoch *epoch;
1181 
1182 	ERR_IF(h->length != (sizeof(*p)-sizeof(*h))) return FALSE;
1183 
1184 	rv = drbd_recv(mdev, h->payload, h->length);
1185 	ERR_IF(rv != h->length) return FALSE;
1186 
1187 	inc_unacked(mdev);
1188 
1189 	if (mdev->net_conf->wire_protocol != DRBD_PROT_C)
1190 		drbd_kick_lo(mdev);
1191 
1192 	mdev->current_epoch->barrier_nr = p->barrier;
1193 	rv = drbd_may_finish_epoch(mdev, mdev->current_epoch, EV_GOT_BARRIER_NR);
1194 
1195 	/* P_BARRIER_ACK may imply that the corresponding extent is dropped from
1196 	 * the activity log, which means it would not be resynced in case the
1197 	 * R_PRIMARY crashes now.
1198 	 * Therefore we must send the barrier_ack after the barrier request was
1199 	 * completed. */
1200 	switch (mdev->write_ordering) {
1201 	case WO_bio_barrier:
1202 	case WO_none:
1203 		if (rv == FE_RECYCLED)
1204 			return TRUE;
1205 		break;
1206 
1207 	case WO_bdev_flush:
1208 	case WO_drain_io:
1209 		if (rv == FE_STILL_LIVE) {
1210 			set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &mdev->current_epoch->flags);
1211 			drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
1212 			rv = drbd_flush_after_epoch(mdev, mdev->current_epoch);
1213 		}
1214 		if (rv == FE_RECYCLED)
1215 			return TRUE;
1216 
1217 		/* The asender will send all the ACKs and barrier ACKs out, since
1218 		   all EEs moved from the active_ee to the done_ee. We need to
1219 		   provide a new epoch object for the EEs that come in soon */
1220 		break;
1221 	}
1222 
1223 	/* receiver context, in the writeout path of the other node.
1224 	 * avoid potential distributed deadlock */
1225 	epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
1226 	if (!epoch) {
1227 		dev_warn(DEV, "Allocation of an epoch failed, slowing down\n");
1228 		issue_flush = !test_and_set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &mdev->current_epoch->flags);
1229 		drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
1230 		if (issue_flush) {
1231 			rv = drbd_flush_after_epoch(mdev, mdev->current_epoch);
1232 			if (rv == FE_RECYCLED)
1233 				return TRUE;
1234 		}
1235 
1236 		drbd_wait_ee_list_empty(mdev, &mdev->done_ee);
1237 
1238 		return TRUE;
1239 	}
1240 
1241 	epoch->flags = 0;
1242 	atomic_set(&epoch->epoch_size, 0);
1243 	atomic_set(&epoch->active, 0);
1244 
1245 	spin_lock(&mdev->epoch_lock);
1246 	if (atomic_read(&mdev->current_epoch->epoch_size)) {
1247 		list_add(&epoch->list, &mdev->current_epoch->list);
1248 		mdev->current_epoch = epoch;
1249 		mdev->epochs++;
1250 	} else {
1251 		/* The current_epoch got recycled while we allocated this one... */
1252 		kfree(epoch);
1253 	}
1254 	spin_unlock(&mdev->epoch_lock);
1255 
1256 	return TRUE;
1257 }
1258 
1259 /* used from receive_RSDataReply (recv_resync_read)
1260  * and from receive_Data */
1261 static struct drbd_epoch_entry *
1262 read_in_block(struct drbd_conf *mdev, u64 id, sector_t sector, int data_size) __must_hold(local)
1263 {
1264 	struct drbd_epoch_entry *e;
1265 	struct bio_vec *bvec;
1266 	struct page *page;
1267 	struct bio *bio;
1268 	int dgs, ds, i, rr;
1269 	void *dig_in = mdev->int_dig_in;
1270 	void *dig_vv = mdev->int_dig_vv;
1271 
1272 	dgs = (mdev->agreed_pro_version >= 87 && mdev->integrity_r_tfm) ?
1273 		crypto_hash_digestsize(mdev->integrity_r_tfm) : 0;
1274 
1275 	if (dgs) {
1276 		rr = drbd_recv(mdev, dig_in, dgs);
1277 		if (rr != dgs) {
1278 			dev_warn(DEV, "short read receiving data digest: read %d expected %d\n",
1279 			     rr, dgs);
1280 			return NULL;
1281 		}
1282 	}
1283 
1284 	data_size -= dgs;
1285 
1286 	ERR_IF(data_size &  0x1ff) return NULL;
1287 	ERR_IF(data_size >  DRBD_MAX_SEGMENT_SIZE) return NULL;
1288 
1289 	/* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD
1290 	 * "criss-cross" setup, that might cause write-out on some other DRBD,
1291 	 * which in turn might block on the other node at this very place.  */
1292 	e = drbd_alloc_ee(mdev, id, sector, data_size, GFP_NOIO);
1293 	if (!e)
1294 		return NULL;
1295 	bio = e->private_bio;
1296 	ds = data_size;
1297 	bio_for_each_segment(bvec, bio, i) {
1298 		page = bvec->bv_page;
1299 		rr = drbd_recv(mdev, kmap(page), min_t(int, ds, PAGE_SIZE));
1300 		kunmap(page);
1301 		if (rr != min_t(int, ds, PAGE_SIZE)) {
1302 			drbd_free_ee(mdev, e);
1303 			dev_warn(DEV, "short read receiving data: read %d expected %d\n",
1304 			     rr, min_t(int, ds, PAGE_SIZE));
1305 			return NULL;
1306 		}
1307 		ds -= rr;
1308 	}
1309 
1310 	if (dgs) {
1311 		drbd_csum(mdev, mdev->integrity_r_tfm, bio, dig_vv);
1312 		if (memcmp(dig_in, dig_vv, dgs)) {
1313 			dev_err(DEV, "Digest integrity check FAILED.\n");
1314 			drbd_bcast_ee(mdev, "digest failed",
1315 					dgs, dig_in, dig_vv, e);
1316 			drbd_free_ee(mdev, e);
1317 			return NULL;
1318 		}
1319 	}
1320 	mdev->recv_cnt += data_size>>9;
1321 	return e;
1322 }
1323 
1324 /* drbd_drain_block() just takes a data block
1325  * out of the socket input buffer, and discards it.
1326  */
1327 static int drbd_drain_block(struct drbd_conf *mdev, int data_size)
1328 {
1329 	struct page *page;
1330 	int rr, rv = 1;
1331 	void *data;
1332 
1333 	page = drbd_pp_alloc(mdev, 1);
1334 
1335 	data = kmap(page);
1336 	while (data_size) {
1337 		rr = drbd_recv(mdev, data, min_t(int, data_size, PAGE_SIZE));
1338 		if (rr != min_t(int, data_size, PAGE_SIZE)) {
1339 			rv = 0;
1340 			dev_warn(DEV, "short read receiving data: read %d expected %d\n",
1341 			     rr, min_t(int, data_size, PAGE_SIZE));
1342 			break;
1343 		}
1344 		data_size -= rr;
1345 	}
1346 	kunmap(page);
1347 	drbd_pp_free(mdev, page);
1348 	return rv;
1349 }
1350 
1351 static int recv_dless_read(struct drbd_conf *mdev, struct drbd_request *req,
1352 			   sector_t sector, int data_size)
1353 {
1354 	struct bio_vec *bvec;
1355 	struct bio *bio;
1356 	int dgs, rr, i, expect;
1357 	void *dig_in = mdev->int_dig_in;
1358 	void *dig_vv = mdev->int_dig_vv;
1359 
1360 	dgs = (mdev->agreed_pro_version >= 87 && mdev->integrity_r_tfm) ?
1361 		crypto_hash_digestsize(mdev->integrity_r_tfm) : 0;
1362 
1363 	if (dgs) {
1364 		rr = drbd_recv(mdev, dig_in, dgs);
1365 		if (rr != dgs) {
1366 			dev_warn(DEV, "short read receiving data reply digest: read %d expected %d\n",
1367 			     rr, dgs);
1368 			return 0;
1369 		}
1370 	}
1371 
1372 	data_size -= dgs;
1373 
1374 	/* optimistically update recv_cnt.  if receiving fails below,
1375 	 * we disconnect anyways, and counters will be reset. */
1376 	mdev->recv_cnt += data_size>>9;
1377 
1378 	bio = req->master_bio;
1379 	D_ASSERT(sector == bio->bi_sector);
1380 
1381 	bio_for_each_segment(bvec, bio, i) {
1382 		expect = min_t(int, data_size, bvec->bv_len);
1383 		rr = drbd_recv(mdev,
1384 			     kmap(bvec->bv_page)+bvec->bv_offset,
1385 			     expect);
1386 		kunmap(bvec->bv_page);
1387 		if (rr != expect) {
1388 			dev_warn(DEV, "short read receiving data reply: "
1389 			     "read %d expected %d\n",
1390 			     rr, expect);
1391 			return 0;
1392 		}
1393 		data_size -= rr;
1394 	}
1395 
1396 	if (dgs) {
1397 		drbd_csum(mdev, mdev->integrity_r_tfm, bio, dig_vv);
1398 		if (memcmp(dig_in, dig_vv, dgs)) {
1399 			dev_err(DEV, "Digest integrity check FAILED. Broken NICs?\n");
1400 			return 0;
1401 		}
1402 	}
1403 
1404 	D_ASSERT(data_size == 0);
1405 	return 1;
1406 }
1407 
1408 /* e_end_resync_block() is called via
1409  * drbd_process_done_ee() by asender only */
1410 static int e_end_resync_block(struct drbd_conf *mdev, struct drbd_work *w, int unused)
1411 {
1412 	struct drbd_epoch_entry *e = (struct drbd_epoch_entry *)w;
1413 	sector_t sector = e->sector;
1414 	int ok;
1415 
1416 	D_ASSERT(hlist_unhashed(&e->colision));
1417 
1418 	if (likely(drbd_bio_uptodate(e->private_bio))) {
1419 		drbd_set_in_sync(mdev, sector, e->size);
1420 		ok = drbd_send_ack(mdev, P_RS_WRITE_ACK, e);
1421 	} else {
1422 		/* Record failure to sync */
1423 		drbd_rs_failed_io(mdev, sector, e->size);
1424 
1425 		ok  = drbd_send_ack(mdev, P_NEG_ACK, e);
1426 	}
1427 	dec_unacked(mdev);
1428 
1429 	return ok;
1430 }
1431 
1432 static int recv_resync_read(struct drbd_conf *mdev, sector_t sector, int data_size) __releases(local)
1433 {
1434 	struct drbd_epoch_entry *e;
1435 
1436 	e = read_in_block(mdev, ID_SYNCER, sector, data_size);
1437 	if (!e) {
1438 		put_ldev(mdev);
1439 		return FALSE;
1440 	}
1441 
1442 	dec_rs_pending(mdev);
1443 
1444 	e->private_bio->bi_end_io = drbd_endio_write_sec;
1445 	e->private_bio->bi_rw = WRITE;
1446 	e->w.cb = e_end_resync_block;
1447 
1448 	inc_unacked(mdev);
1449 	/* corresponding dec_unacked() in e_end_resync_block()
1450 	 * respective _drbd_clear_done_ee */
1451 
1452 	spin_lock_irq(&mdev->req_lock);
1453 	list_add(&e->w.list, &mdev->sync_ee);
1454 	spin_unlock_irq(&mdev->req_lock);
1455 
1456 	drbd_generic_make_request(mdev, DRBD_FAULT_RS_WR, e->private_bio);
1457 	/* accounting done in endio */
1458 
1459 	maybe_kick_lo(mdev);
1460 	return TRUE;
1461 }
1462 
1463 static int receive_DataReply(struct drbd_conf *mdev, struct p_header *h)
1464 {
1465 	struct drbd_request *req;
1466 	sector_t sector;
1467 	unsigned int header_size, data_size;
1468 	int ok;
1469 	struct p_data *p = (struct p_data *)h;
1470 
1471 	header_size = sizeof(*p) - sizeof(*h);
1472 	data_size   = h->length  - header_size;
1473 
1474 	ERR_IF(data_size == 0) return FALSE;
1475 
1476 	if (drbd_recv(mdev, h->payload, header_size) != header_size)
1477 		return FALSE;
1478 
1479 	sector = be64_to_cpu(p->sector);
1480 
1481 	spin_lock_irq(&mdev->req_lock);
1482 	req = _ar_id_to_req(mdev, p->block_id, sector);
1483 	spin_unlock_irq(&mdev->req_lock);
1484 	if (unlikely(!req)) {
1485 		dev_err(DEV, "Got a corrupt block_id/sector pair(1).\n");
1486 		return FALSE;
1487 	}
1488 
1489 	/* hlist_del(&req->colision) is done in _req_may_be_done, to avoid
1490 	 * special casing it there for the various failure cases.
1491 	 * still no race with drbd_fail_pending_reads */
1492 	ok = recv_dless_read(mdev, req, sector, data_size);
1493 
1494 	if (ok)
1495 		req_mod(req, data_received);
1496 	/* else: nothing. handled from drbd_disconnect...
1497 	 * I don't think we may complete this just yet
1498 	 * in case we are "on-disconnect: freeze" */
1499 
1500 	return ok;
1501 }
1502 
1503 static int receive_RSDataReply(struct drbd_conf *mdev, struct p_header *h)
1504 {
1505 	sector_t sector;
1506 	unsigned int header_size, data_size;
1507 	int ok;
1508 	struct p_data *p = (struct p_data *)h;
1509 
1510 	header_size = sizeof(*p) - sizeof(*h);
1511 	data_size   = h->length  - header_size;
1512 
1513 	ERR_IF(data_size == 0) return FALSE;
1514 
1515 	if (drbd_recv(mdev, h->payload, header_size) != header_size)
1516 		return FALSE;
1517 
1518 	sector = be64_to_cpu(p->sector);
1519 	D_ASSERT(p->block_id == ID_SYNCER);
1520 
1521 	if (get_ldev(mdev)) {
1522 		/* data is submitted to disk within recv_resync_read.
1523 		 * corresponding put_ldev done below on error,
1524 		 * or in drbd_endio_write_sec. */
1525 		ok = recv_resync_read(mdev, sector, data_size);
1526 	} else {
1527 		if (__ratelimit(&drbd_ratelimit_state))
1528 			dev_err(DEV, "Can not write resync data to local disk.\n");
1529 
1530 		ok = drbd_drain_block(mdev, data_size);
1531 
1532 		drbd_send_ack_dp(mdev, P_NEG_ACK, p);
1533 	}
1534 
1535 	return ok;
1536 }
1537 
1538 /* e_end_block() is called via drbd_process_done_ee().
1539  * this means this function only runs in the asender thread
1540  */
1541 static int e_end_block(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
1542 {
1543 	struct drbd_epoch_entry *e = (struct drbd_epoch_entry *)w;
1544 	sector_t sector = e->sector;
1545 	struct drbd_epoch *epoch;
1546 	int ok = 1, pcmd;
1547 
1548 	if (e->flags & EE_IS_BARRIER) {
1549 		epoch = previous_epoch(mdev, e->epoch);
1550 		if (epoch)
1551 			drbd_may_finish_epoch(mdev, epoch, EV_BARRIER_DONE + (cancel ? EV_CLEANUP : 0));
1552 	}
1553 
1554 	if (mdev->net_conf->wire_protocol == DRBD_PROT_C) {
1555 		if (likely(drbd_bio_uptodate(e->private_bio))) {
1556 			pcmd = (mdev->state.conn >= C_SYNC_SOURCE &&
1557 				mdev->state.conn <= C_PAUSED_SYNC_T &&
1558 				e->flags & EE_MAY_SET_IN_SYNC) ?
1559 				P_RS_WRITE_ACK : P_WRITE_ACK;
1560 			ok &= drbd_send_ack(mdev, pcmd, e);
1561 			if (pcmd == P_RS_WRITE_ACK)
1562 				drbd_set_in_sync(mdev, sector, e->size);
1563 		} else {
1564 			ok  = drbd_send_ack(mdev, P_NEG_ACK, e);
1565 			/* we expect it to be marked out of sync anyways...
1566 			 * maybe assert this?  */
1567 		}
1568 		dec_unacked(mdev);
1569 	}
1570 	/* we delete from the conflict detection hash _after_ we sent out the
1571 	 * P_WRITE_ACK / P_NEG_ACK, to get the sequence number right.  */
1572 	if (mdev->net_conf->two_primaries) {
1573 		spin_lock_irq(&mdev->req_lock);
1574 		D_ASSERT(!hlist_unhashed(&e->colision));
1575 		hlist_del_init(&e->colision);
1576 		spin_unlock_irq(&mdev->req_lock);
1577 	} else {
1578 		D_ASSERT(hlist_unhashed(&e->colision));
1579 	}
1580 
1581 	drbd_may_finish_epoch(mdev, e->epoch, EV_PUT + (cancel ? EV_CLEANUP : 0));
1582 
1583 	return ok;
1584 }
1585 
1586 static int e_send_discard_ack(struct drbd_conf *mdev, struct drbd_work *w, int unused)
1587 {
1588 	struct drbd_epoch_entry *e = (struct drbd_epoch_entry *)w;
1589 	int ok = 1;
1590 
1591 	D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
1592 	ok = drbd_send_ack(mdev, P_DISCARD_ACK, e);
1593 
1594 	spin_lock_irq(&mdev->req_lock);
1595 	D_ASSERT(!hlist_unhashed(&e->colision));
1596 	hlist_del_init(&e->colision);
1597 	spin_unlock_irq(&mdev->req_lock);
1598 
1599 	dec_unacked(mdev);
1600 
1601 	return ok;
1602 }
1603 
1604 /* Called from receive_Data.
1605  * Synchronize packets on sock with packets on msock.
1606  *
1607  * This is here so even when a P_DATA packet traveling via sock overtook an Ack
1608  * packet traveling on msock, they are still processed in the order they have
1609  * been sent.
1610  *
1611  * Note: we don't care for Ack packets overtaking P_DATA packets.
1612  *
1613  * In case packet_seq is larger than mdev->peer_seq number, there are
1614  * outstanding packets on the msock. We wait for them to arrive.
1615  * In case we are the logically next packet, we update mdev->peer_seq
1616  * ourselves. Correctly handles 32bit wrap around.
1617  *
1618  * Assume we have a 10 GBit connection, that is about 1<<30 byte per second,
1619  * about 1<<21 sectors per second. So "worst" case, we have 1<<3 == 8 seconds
1620  * for the 24bit wrap (historical atomic_t guarantee on some archs), and we have
1621  * 1<<9 == 512 seconds aka ages for the 32bit wrap around...
1622  *
1623  * returns 0 if we may process the packet,
1624  * -ERESTARTSYS if we were interrupted (by disconnect signal). */
1625 static int drbd_wait_peer_seq(struct drbd_conf *mdev, const u32 packet_seq)
1626 {
1627 	DEFINE_WAIT(wait);
1628 	unsigned int p_seq;
1629 	long timeout;
1630 	int ret = 0;
1631 	spin_lock(&mdev->peer_seq_lock);
1632 	for (;;) {
1633 		prepare_to_wait(&mdev->seq_wait, &wait, TASK_INTERRUPTIBLE);
1634 		if (seq_le(packet_seq, mdev->peer_seq+1))
1635 			break;
1636 		if (signal_pending(current)) {
1637 			ret = -ERESTARTSYS;
1638 			break;
1639 		}
1640 		p_seq = mdev->peer_seq;
1641 		spin_unlock(&mdev->peer_seq_lock);
1642 		timeout = schedule_timeout(30*HZ);
1643 		spin_lock(&mdev->peer_seq_lock);
1644 		if (timeout == 0 && p_seq == mdev->peer_seq) {
1645 			ret = -ETIMEDOUT;
1646 			dev_err(DEV, "ASSERT FAILED waited 30 seconds for sequence update, forcing reconnect\n");
1647 			break;
1648 		}
1649 	}
1650 	finish_wait(&mdev->seq_wait, &wait);
1651 	if (mdev->peer_seq+1 == packet_seq)
1652 		mdev->peer_seq++;
1653 	spin_unlock(&mdev->peer_seq_lock);
1654 	return ret;
1655 }
1656 
1657 /* mirrored write */
1658 static int receive_Data(struct drbd_conf *mdev, struct p_header *h)
1659 {
1660 	sector_t sector;
1661 	struct drbd_epoch_entry *e;
1662 	struct p_data *p = (struct p_data *)h;
1663 	int header_size, data_size;
1664 	int rw = WRITE;
1665 	u32 dp_flags;
1666 
1667 	header_size = sizeof(*p) - sizeof(*h);
1668 	data_size   = h->length  - header_size;
1669 
1670 	ERR_IF(data_size == 0) return FALSE;
1671 
1672 	if (drbd_recv(mdev, h->payload, header_size) != header_size)
1673 		return FALSE;
1674 
1675 	if (!get_ldev(mdev)) {
1676 		if (__ratelimit(&drbd_ratelimit_state))
1677 			dev_err(DEV, "Can not write mirrored data block "
1678 			    "to local disk.\n");
1679 		spin_lock(&mdev->peer_seq_lock);
1680 		if (mdev->peer_seq+1 == be32_to_cpu(p->seq_num))
1681 			mdev->peer_seq++;
1682 		spin_unlock(&mdev->peer_seq_lock);
1683 
1684 		drbd_send_ack_dp(mdev, P_NEG_ACK, p);
1685 		atomic_inc(&mdev->current_epoch->epoch_size);
1686 		return drbd_drain_block(mdev, data_size);
1687 	}
1688 
1689 	/* get_ldev(mdev) successful.
1690 	 * Corresponding put_ldev done either below (on various errors),
1691 	 * or in drbd_endio_write_sec, if we successfully submit the data at
1692 	 * the end of this function. */
1693 
1694 	sector = be64_to_cpu(p->sector);
1695 	e = read_in_block(mdev, p->block_id, sector, data_size);
1696 	if (!e) {
1697 		put_ldev(mdev);
1698 		return FALSE;
1699 	}
1700 
1701 	e->private_bio->bi_end_io = drbd_endio_write_sec;
1702 	e->w.cb = e_end_block;
1703 
1704 	spin_lock(&mdev->epoch_lock);
1705 	e->epoch = mdev->current_epoch;
1706 	atomic_inc(&e->epoch->epoch_size);
1707 	atomic_inc(&e->epoch->active);
1708 
1709 	if (mdev->write_ordering == WO_bio_barrier && atomic_read(&e->epoch->epoch_size) == 1) {
1710 		struct drbd_epoch *epoch;
1711 		/* Issue a barrier if we start a new epoch, and the previous epoch
1712 		   was not a epoch containing a single request which already was
1713 		   a Barrier. */
1714 		epoch = list_entry(e->epoch->list.prev, struct drbd_epoch, list);
1715 		if (epoch == e->epoch) {
1716 			set_bit(DE_CONTAINS_A_BARRIER, &e->epoch->flags);
1717 			rw |= (1<<BIO_RW_BARRIER);
1718 			e->flags |= EE_IS_BARRIER;
1719 		} else {
1720 			if (atomic_read(&epoch->epoch_size) > 1 ||
1721 			    !test_bit(DE_CONTAINS_A_BARRIER, &epoch->flags)) {
1722 				set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &epoch->flags);
1723 				set_bit(DE_CONTAINS_A_BARRIER, &e->epoch->flags);
1724 				rw |= (1<<BIO_RW_BARRIER);
1725 				e->flags |= EE_IS_BARRIER;
1726 			}
1727 		}
1728 	}
1729 	spin_unlock(&mdev->epoch_lock);
1730 
1731 	dp_flags = be32_to_cpu(p->dp_flags);
1732 	if (dp_flags & DP_HARDBARRIER) {
1733 		dev_err(DEV, "ASSERT FAILED would have submitted barrier request\n");
1734 		/* rw |= (1<<BIO_RW_BARRIER); */
1735 	}
1736 	if (dp_flags & DP_RW_SYNC)
1737 		rw |= (1<<BIO_RW_SYNCIO) | (1<<BIO_RW_UNPLUG);
1738 	if (dp_flags & DP_MAY_SET_IN_SYNC)
1739 		e->flags |= EE_MAY_SET_IN_SYNC;
1740 
1741 	/* I'm the receiver, I do hold a net_cnt reference. */
1742 	if (!mdev->net_conf->two_primaries) {
1743 		spin_lock_irq(&mdev->req_lock);
1744 	} else {
1745 		/* don't get the req_lock yet,
1746 		 * we may sleep in drbd_wait_peer_seq */
1747 		const int size = e->size;
1748 		const int discard = test_bit(DISCARD_CONCURRENT, &mdev->flags);
1749 		DEFINE_WAIT(wait);
1750 		struct drbd_request *i;
1751 		struct hlist_node *n;
1752 		struct hlist_head *slot;
1753 		int first;
1754 
1755 		D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
1756 		BUG_ON(mdev->ee_hash == NULL);
1757 		BUG_ON(mdev->tl_hash == NULL);
1758 
1759 		/* conflict detection and handling:
1760 		 * 1. wait on the sequence number,
1761 		 *    in case this data packet overtook ACK packets.
1762 		 * 2. check our hash tables for conflicting requests.
1763 		 *    we only need to walk the tl_hash, since an ee can not
1764 		 *    have a conflict with an other ee: on the submitting
1765 		 *    node, the corresponding req had already been conflicting,
1766 		 *    and a conflicting req is never sent.
1767 		 *
1768 		 * Note: for two_primaries, we are protocol C,
1769 		 * so there cannot be any request that is DONE
1770 		 * but still on the transfer log.
1771 		 *
1772 		 * unconditionally add to the ee_hash.
1773 		 *
1774 		 * if no conflicting request is found:
1775 		 *    submit.
1776 		 *
1777 		 * if any conflicting request is found
1778 		 * that has not yet been acked,
1779 		 * AND I have the "discard concurrent writes" flag:
1780 		 *	 queue (via done_ee) the P_DISCARD_ACK; OUT.
1781 		 *
1782 		 * if any conflicting request is found:
1783 		 *	 block the receiver, waiting on misc_wait
1784 		 *	 until no more conflicting requests are there,
1785 		 *	 or we get interrupted (disconnect).
1786 		 *
1787 		 *	 we do not just write after local io completion of those
1788 		 *	 requests, but only after req is done completely, i.e.
1789 		 *	 we wait for the P_DISCARD_ACK to arrive!
1790 		 *
1791 		 *	 then proceed normally, i.e. submit.
1792 		 */
1793 		if (drbd_wait_peer_seq(mdev, be32_to_cpu(p->seq_num)))
1794 			goto out_interrupted;
1795 
1796 		spin_lock_irq(&mdev->req_lock);
1797 
1798 		hlist_add_head(&e->colision, ee_hash_slot(mdev, sector));
1799 
1800 #define OVERLAPS overlaps(i->sector, i->size, sector, size)
1801 		slot = tl_hash_slot(mdev, sector);
1802 		first = 1;
1803 		for (;;) {
1804 			int have_unacked = 0;
1805 			int have_conflict = 0;
1806 			prepare_to_wait(&mdev->misc_wait, &wait,
1807 				TASK_INTERRUPTIBLE);
1808 			hlist_for_each_entry(i, n, slot, colision) {
1809 				if (OVERLAPS) {
1810 					/* only ALERT on first iteration,
1811 					 * we may be woken up early... */
1812 					if (first)
1813 						dev_alert(DEV, "%s[%u] Concurrent local write detected!"
1814 						      "	new: %llus +%u; pending: %llus +%u\n",
1815 						      current->comm, current->pid,
1816 						      (unsigned long long)sector, size,
1817 						      (unsigned long long)i->sector, i->size);
1818 					if (i->rq_state & RQ_NET_PENDING)
1819 						++have_unacked;
1820 					++have_conflict;
1821 				}
1822 			}
1823 #undef OVERLAPS
1824 			if (!have_conflict)
1825 				break;
1826 
1827 			/* Discard Ack only for the _first_ iteration */
1828 			if (first && discard && have_unacked) {
1829 				dev_alert(DEV, "Concurrent write! [DISCARD BY FLAG] sec=%llus\n",
1830 				     (unsigned long long)sector);
1831 				inc_unacked(mdev);
1832 				e->w.cb = e_send_discard_ack;
1833 				list_add_tail(&e->w.list, &mdev->done_ee);
1834 
1835 				spin_unlock_irq(&mdev->req_lock);
1836 
1837 				/* we could probably send that P_DISCARD_ACK ourselves,
1838 				 * but I don't like the receiver using the msock */
1839 
1840 				put_ldev(mdev);
1841 				wake_asender(mdev);
1842 				finish_wait(&mdev->misc_wait, &wait);
1843 				return TRUE;
1844 			}
1845 
1846 			if (signal_pending(current)) {
1847 				hlist_del_init(&e->colision);
1848 
1849 				spin_unlock_irq(&mdev->req_lock);
1850 
1851 				finish_wait(&mdev->misc_wait, &wait);
1852 				goto out_interrupted;
1853 			}
1854 
1855 			spin_unlock_irq(&mdev->req_lock);
1856 			if (first) {
1857 				first = 0;
1858 				dev_alert(DEV, "Concurrent write! [W AFTERWARDS] "
1859 				     "sec=%llus\n", (unsigned long long)sector);
1860 			} else if (discard) {
1861 				/* we had none on the first iteration.
1862 				 * there must be none now. */
1863 				D_ASSERT(have_unacked == 0);
1864 			}
1865 			schedule();
1866 			spin_lock_irq(&mdev->req_lock);
1867 		}
1868 		finish_wait(&mdev->misc_wait, &wait);
1869 	}
1870 
1871 	list_add(&e->w.list, &mdev->active_ee);
1872 	spin_unlock_irq(&mdev->req_lock);
1873 
1874 	switch (mdev->net_conf->wire_protocol) {
1875 	case DRBD_PROT_C:
1876 		inc_unacked(mdev);
1877 		/* corresponding dec_unacked() in e_end_block()
1878 		 * respective _drbd_clear_done_ee */
1879 		break;
1880 	case DRBD_PROT_B:
1881 		/* I really don't like it that the receiver thread
1882 		 * sends on the msock, but anyways */
1883 		drbd_send_ack(mdev, P_RECV_ACK, e);
1884 		break;
1885 	case DRBD_PROT_A:
1886 		/* nothing to do */
1887 		break;
1888 	}
1889 
1890 	if (mdev->state.pdsk == D_DISKLESS) {
1891 		/* In case we have the only disk of the cluster, */
1892 		drbd_set_out_of_sync(mdev, e->sector, e->size);
1893 		e->flags |= EE_CALL_AL_COMPLETE_IO;
1894 		drbd_al_begin_io(mdev, e->sector);
1895 	}
1896 
1897 	e->private_bio->bi_rw = rw;
1898 	drbd_generic_make_request(mdev, DRBD_FAULT_DT_WR, e->private_bio);
1899 	/* accounting done in endio */
1900 
1901 	maybe_kick_lo(mdev);
1902 	return TRUE;
1903 
1904 out_interrupted:
1905 	/* yes, the epoch_size now is imbalanced.
1906 	 * but we drop the connection anyways, so we don't have a chance to
1907 	 * receive a barrier... atomic_inc(&mdev->epoch_size); */
1908 	put_ldev(mdev);
1909 	drbd_free_ee(mdev, e);
1910 	return FALSE;
1911 }
1912 
1913 static int receive_DataRequest(struct drbd_conf *mdev, struct p_header *h)
1914 {
1915 	sector_t sector;
1916 	const sector_t capacity = drbd_get_capacity(mdev->this_bdev);
1917 	struct drbd_epoch_entry *e;
1918 	struct digest_info *di = NULL;
1919 	int size, digest_size;
1920 	unsigned int fault_type;
1921 	struct p_block_req *p =
1922 		(struct p_block_req *)h;
1923 	const int brps = sizeof(*p)-sizeof(*h);
1924 
1925 	if (drbd_recv(mdev, h->payload, brps) != brps)
1926 		return FALSE;
1927 
1928 	sector = be64_to_cpu(p->sector);
1929 	size   = be32_to_cpu(p->blksize);
1930 
1931 	if (size <= 0 || (size & 0x1ff) != 0 || size > DRBD_MAX_SEGMENT_SIZE) {
1932 		dev_err(DEV, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__,
1933 				(unsigned long long)sector, size);
1934 		return FALSE;
1935 	}
1936 	if (sector + (size>>9) > capacity) {
1937 		dev_err(DEV, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__,
1938 				(unsigned long long)sector, size);
1939 		return FALSE;
1940 	}
1941 
1942 	if (!get_ldev_if_state(mdev, D_UP_TO_DATE)) {
1943 		if (__ratelimit(&drbd_ratelimit_state))
1944 			dev_err(DEV, "Can not satisfy peer's read request, "
1945 			    "no local data.\n");
1946 		drbd_send_ack_rp(mdev, h->command == P_DATA_REQUEST ? P_NEG_DREPLY :
1947 				 P_NEG_RS_DREPLY , p);
1948 		return TRUE;
1949 	}
1950 
1951 	/* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD
1952 	 * "criss-cross" setup, that might cause write-out on some other DRBD,
1953 	 * which in turn might block on the other node at this very place.  */
1954 	e = drbd_alloc_ee(mdev, p->block_id, sector, size, GFP_NOIO);
1955 	if (!e) {
1956 		put_ldev(mdev);
1957 		return FALSE;
1958 	}
1959 
1960 	e->private_bio->bi_rw = READ;
1961 	e->private_bio->bi_end_io = drbd_endio_read_sec;
1962 
1963 	switch (h->command) {
1964 	case P_DATA_REQUEST:
1965 		e->w.cb = w_e_end_data_req;
1966 		fault_type = DRBD_FAULT_DT_RD;
1967 		break;
1968 	case P_RS_DATA_REQUEST:
1969 		e->w.cb = w_e_end_rsdata_req;
1970 		fault_type = DRBD_FAULT_RS_RD;
1971 		/* Eventually this should become asynchronously. Currently it
1972 		 * blocks the whole receiver just to delay the reading of a
1973 		 * resync data block.
1974 		 * the drbd_work_queue mechanism is made for this...
1975 		 */
1976 		if (!drbd_rs_begin_io(mdev, sector)) {
1977 			/* we have been interrupted,
1978 			 * probably connection lost! */
1979 			D_ASSERT(signal_pending(current));
1980 			goto out_free_e;
1981 		}
1982 		break;
1983 
1984 	case P_OV_REPLY:
1985 	case P_CSUM_RS_REQUEST:
1986 		fault_type = DRBD_FAULT_RS_RD;
1987 		digest_size = h->length - brps ;
1988 		di = kmalloc(sizeof(*di) + digest_size, GFP_NOIO);
1989 		if (!di)
1990 			goto out_free_e;
1991 
1992 		di->digest_size = digest_size;
1993 		di->digest = (((char *)di)+sizeof(struct digest_info));
1994 
1995 		if (drbd_recv(mdev, di->digest, digest_size) != digest_size)
1996 			goto out_free_e;
1997 
1998 		e->block_id = (u64)(unsigned long)di;
1999 		if (h->command == P_CSUM_RS_REQUEST) {
2000 			D_ASSERT(mdev->agreed_pro_version >= 89);
2001 			e->w.cb = w_e_end_csum_rs_req;
2002 		} else if (h->command == P_OV_REPLY) {
2003 			e->w.cb = w_e_end_ov_reply;
2004 			dec_rs_pending(mdev);
2005 			break;
2006 		}
2007 
2008 		if (!drbd_rs_begin_io(mdev, sector)) {
2009 			/* we have been interrupted, probably connection lost! */
2010 			D_ASSERT(signal_pending(current));
2011 			goto out_free_e;
2012 		}
2013 		break;
2014 
2015 	case P_OV_REQUEST:
2016 		if (mdev->state.conn >= C_CONNECTED &&
2017 		    mdev->state.conn != C_VERIFY_T)
2018 			dev_warn(DEV, "ASSERT FAILED: got P_OV_REQUEST while being %s\n",
2019 				drbd_conn_str(mdev->state.conn));
2020 		if (mdev->ov_start_sector == ~(sector_t)0 &&
2021 		    mdev->agreed_pro_version >= 90) {
2022 			mdev->ov_start_sector = sector;
2023 			mdev->ov_position = sector;
2024 			mdev->ov_left = mdev->rs_total - BM_SECT_TO_BIT(sector);
2025 			dev_info(DEV, "Online Verify start sector: %llu\n",
2026 					(unsigned long long)sector);
2027 		}
2028 		e->w.cb = w_e_end_ov_req;
2029 		fault_type = DRBD_FAULT_RS_RD;
2030 		/* Eventually this should become asynchronous. Currently it
2031 		 * blocks the whole receiver just to delay the reading of a
2032 		 * resync data block.
2033 		 * the drbd_work_queue mechanism is made for this...
2034 		 */
2035 		if (!drbd_rs_begin_io(mdev, sector)) {
2036 			/* we have been interrupted,
2037 			 * probably connection lost! */
2038 			D_ASSERT(signal_pending(current));
2039 			goto out_free_e;
2040 		}
2041 		break;
2042 
2043 
2044 	default:
2045 		dev_err(DEV, "unexpected command (%s) in receive_DataRequest\n",
2046 		    cmdname(h->command));
2047 		fault_type = DRBD_FAULT_MAX;
2048 	}
2049 
2050 	spin_lock_irq(&mdev->req_lock);
2051 	list_add(&e->w.list, &mdev->read_ee);
2052 	spin_unlock_irq(&mdev->req_lock);
2053 
2054 	inc_unacked(mdev);
2055 
2056 	drbd_generic_make_request(mdev, fault_type, e->private_bio);
2057 	maybe_kick_lo(mdev);
2058 
2059 	return TRUE;
2060 
2061 out_free_e:
2062 	kfree(di);
2063 	put_ldev(mdev);
2064 	drbd_free_ee(mdev, e);
2065 	return FALSE;
2066 }
2067 
2068 static int drbd_asb_recover_0p(struct drbd_conf *mdev) __must_hold(local)
2069 {
2070 	int self, peer, rv = -100;
2071 	unsigned long ch_self, ch_peer;
2072 
2073 	self = mdev->ldev->md.uuid[UI_BITMAP] & 1;
2074 	peer = mdev->p_uuid[UI_BITMAP] & 1;
2075 
2076 	ch_peer = mdev->p_uuid[UI_SIZE];
2077 	ch_self = mdev->comm_bm_set;
2078 
2079 	switch (mdev->net_conf->after_sb_0p) {
2080 	case ASB_CONSENSUS:
2081 	case ASB_DISCARD_SECONDARY:
2082 	case ASB_CALL_HELPER:
2083 		dev_err(DEV, "Configuration error.\n");
2084 		break;
2085 	case ASB_DISCONNECT:
2086 		break;
2087 	case ASB_DISCARD_YOUNGER_PRI:
2088 		if (self == 0 && peer == 1) {
2089 			rv = -1;
2090 			break;
2091 		}
2092 		if (self == 1 && peer == 0) {
2093 			rv =  1;
2094 			break;
2095 		}
2096 		/* Else fall through to one of the other strategies... */
2097 	case ASB_DISCARD_OLDER_PRI:
2098 		if (self == 0 && peer == 1) {
2099 			rv = 1;
2100 			break;
2101 		}
2102 		if (self == 1 && peer == 0) {
2103 			rv = -1;
2104 			break;
2105 		}
2106 		/* Else fall through to one of the other strategies... */
2107 		dev_warn(DEV, "Discard younger/older primary did not find a decision\n"
2108 		     "Using discard-least-changes instead\n");
2109 	case ASB_DISCARD_ZERO_CHG:
2110 		if (ch_peer == 0 && ch_self == 0) {
2111 			rv = test_bit(DISCARD_CONCURRENT, &mdev->flags)
2112 				? -1 : 1;
2113 			break;
2114 		} else {
2115 			if (ch_peer == 0) { rv =  1; break; }
2116 			if (ch_self == 0) { rv = -1; break; }
2117 		}
2118 		if (mdev->net_conf->after_sb_0p == ASB_DISCARD_ZERO_CHG)
2119 			break;
2120 	case ASB_DISCARD_LEAST_CHG:
2121 		if	(ch_self < ch_peer)
2122 			rv = -1;
2123 		else if (ch_self > ch_peer)
2124 			rv =  1;
2125 		else /* ( ch_self == ch_peer ) */
2126 		     /* Well, then use something else. */
2127 			rv = test_bit(DISCARD_CONCURRENT, &mdev->flags)
2128 				? -1 : 1;
2129 		break;
2130 	case ASB_DISCARD_LOCAL:
2131 		rv = -1;
2132 		break;
2133 	case ASB_DISCARD_REMOTE:
2134 		rv =  1;
2135 	}
2136 
2137 	return rv;
2138 }
2139 
2140 static int drbd_asb_recover_1p(struct drbd_conf *mdev) __must_hold(local)
2141 {
2142 	int self, peer, hg, rv = -100;
2143 
2144 	self = mdev->ldev->md.uuid[UI_BITMAP] & 1;
2145 	peer = mdev->p_uuid[UI_BITMAP] & 1;
2146 
2147 	switch (mdev->net_conf->after_sb_1p) {
2148 	case ASB_DISCARD_YOUNGER_PRI:
2149 	case ASB_DISCARD_OLDER_PRI:
2150 	case ASB_DISCARD_LEAST_CHG:
2151 	case ASB_DISCARD_LOCAL:
2152 	case ASB_DISCARD_REMOTE:
2153 		dev_err(DEV, "Configuration error.\n");
2154 		break;
2155 	case ASB_DISCONNECT:
2156 		break;
2157 	case ASB_CONSENSUS:
2158 		hg = drbd_asb_recover_0p(mdev);
2159 		if (hg == -1 && mdev->state.role == R_SECONDARY)
2160 			rv = hg;
2161 		if (hg == 1  && mdev->state.role == R_PRIMARY)
2162 			rv = hg;
2163 		break;
2164 	case ASB_VIOLENTLY:
2165 		rv = drbd_asb_recover_0p(mdev);
2166 		break;
2167 	case ASB_DISCARD_SECONDARY:
2168 		return mdev->state.role == R_PRIMARY ? 1 : -1;
2169 	case ASB_CALL_HELPER:
2170 		hg = drbd_asb_recover_0p(mdev);
2171 		if (hg == -1 && mdev->state.role == R_PRIMARY) {
2172 			self = drbd_set_role(mdev, R_SECONDARY, 0);
2173 			 /* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE,
2174 			  * we might be here in C_WF_REPORT_PARAMS which is transient.
2175 			  * we do not need to wait for the after state change work either. */
2176 			self = drbd_change_state(mdev, CS_VERBOSE, NS(role, R_SECONDARY));
2177 			if (self != SS_SUCCESS) {
2178 				drbd_khelper(mdev, "pri-lost-after-sb");
2179 			} else {
2180 				dev_warn(DEV, "Successfully gave up primary role.\n");
2181 				rv = hg;
2182 			}
2183 		} else
2184 			rv = hg;
2185 	}
2186 
2187 	return rv;
2188 }
2189 
2190 static int drbd_asb_recover_2p(struct drbd_conf *mdev) __must_hold(local)
2191 {
2192 	int self, peer, hg, rv = -100;
2193 
2194 	self = mdev->ldev->md.uuid[UI_BITMAP] & 1;
2195 	peer = mdev->p_uuid[UI_BITMAP] & 1;
2196 
2197 	switch (mdev->net_conf->after_sb_2p) {
2198 	case ASB_DISCARD_YOUNGER_PRI:
2199 	case ASB_DISCARD_OLDER_PRI:
2200 	case ASB_DISCARD_LEAST_CHG:
2201 	case ASB_DISCARD_LOCAL:
2202 	case ASB_DISCARD_REMOTE:
2203 	case ASB_CONSENSUS:
2204 	case ASB_DISCARD_SECONDARY:
2205 		dev_err(DEV, "Configuration error.\n");
2206 		break;
2207 	case ASB_VIOLENTLY:
2208 		rv = drbd_asb_recover_0p(mdev);
2209 		break;
2210 	case ASB_DISCONNECT:
2211 		break;
2212 	case ASB_CALL_HELPER:
2213 		hg = drbd_asb_recover_0p(mdev);
2214 		if (hg == -1) {
2215 			 /* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE,
2216 			  * we might be here in C_WF_REPORT_PARAMS which is transient.
2217 			  * we do not need to wait for the after state change work either. */
2218 			self = drbd_change_state(mdev, CS_VERBOSE, NS(role, R_SECONDARY));
2219 			if (self != SS_SUCCESS) {
2220 				drbd_khelper(mdev, "pri-lost-after-sb");
2221 			} else {
2222 				dev_warn(DEV, "Successfully gave up primary role.\n");
2223 				rv = hg;
2224 			}
2225 		} else
2226 			rv = hg;
2227 	}
2228 
2229 	return rv;
2230 }
2231 
2232 static void drbd_uuid_dump(struct drbd_conf *mdev, char *text, u64 *uuid,
2233 			   u64 bits, u64 flags)
2234 {
2235 	if (!uuid) {
2236 		dev_info(DEV, "%s uuid info vanished while I was looking!\n", text);
2237 		return;
2238 	}
2239 	dev_info(DEV, "%s %016llX:%016llX:%016llX:%016llX bits:%llu flags:%llX\n",
2240 	     text,
2241 	     (unsigned long long)uuid[UI_CURRENT],
2242 	     (unsigned long long)uuid[UI_BITMAP],
2243 	     (unsigned long long)uuid[UI_HISTORY_START],
2244 	     (unsigned long long)uuid[UI_HISTORY_END],
2245 	     (unsigned long long)bits,
2246 	     (unsigned long long)flags);
2247 }
2248 
2249 /*
2250   100	after split brain try auto recover
2251     2	C_SYNC_SOURCE set BitMap
2252     1	C_SYNC_SOURCE use BitMap
2253     0	no Sync
2254    -1	C_SYNC_TARGET use BitMap
2255    -2	C_SYNC_TARGET set BitMap
2256  -100	after split brain, disconnect
2257 -1000	unrelated data
2258  */
2259 static int drbd_uuid_compare(struct drbd_conf *mdev, int *rule_nr) __must_hold(local)
2260 {
2261 	u64 self, peer;
2262 	int i, j;
2263 
2264 	self = mdev->ldev->md.uuid[UI_CURRENT] & ~((u64)1);
2265 	peer = mdev->p_uuid[UI_CURRENT] & ~((u64)1);
2266 
2267 	*rule_nr = 10;
2268 	if (self == UUID_JUST_CREATED && peer == UUID_JUST_CREATED)
2269 		return 0;
2270 
2271 	*rule_nr = 20;
2272 	if ((self == UUID_JUST_CREATED || self == (u64)0) &&
2273 	     peer != UUID_JUST_CREATED)
2274 		return -2;
2275 
2276 	*rule_nr = 30;
2277 	if (self != UUID_JUST_CREATED &&
2278 	    (peer == UUID_JUST_CREATED || peer == (u64)0))
2279 		return 2;
2280 
2281 	if (self == peer) {
2282 		int rct, dc; /* roles at crash time */
2283 
2284 		if (mdev->p_uuid[UI_BITMAP] == (u64)0 && mdev->ldev->md.uuid[UI_BITMAP] != (u64)0) {
2285 
2286 			if (mdev->agreed_pro_version < 91)
2287 				return -1001;
2288 
2289 			if ((mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1)) &&
2290 			    (mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START + 1] & ~((u64)1))) {
2291 				dev_info(DEV, "was SyncSource, missed the resync finished event, corrected myself:\n");
2292 				drbd_uuid_set_bm(mdev, 0UL);
2293 
2294 				drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid,
2295 					       mdev->state.disk >= D_NEGOTIATING ? drbd_bm_total_weight(mdev) : 0, 0);
2296 				*rule_nr = 34;
2297 			} else {
2298 				dev_info(DEV, "was SyncSource (peer failed to write sync_uuid)\n");
2299 				*rule_nr = 36;
2300 			}
2301 
2302 			return 1;
2303 		}
2304 
2305 		if (mdev->ldev->md.uuid[UI_BITMAP] == (u64)0 && mdev->p_uuid[UI_BITMAP] != (u64)0) {
2306 
2307 			if (mdev->agreed_pro_version < 91)
2308 				return -1001;
2309 
2310 			if ((mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (mdev->p_uuid[UI_BITMAP] & ~((u64)1)) &&
2311 			    (mdev->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1))) {
2312 				dev_info(DEV, "was SyncTarget, peer missed the resync finished event, corrected peer:\n");
2313 
2314 				mdev->p_uuid[UI_HISTORY_START + 1] = mdev->p_uuid[UI_HISTORY_START];
2315 				mdev->p_uuid[UI_HISTORY_START] = mdev->p_uuid[UI_BITMAP];
2316 				mdev->p_uuid[UI_BITMAP] = 0UL;
2317 
2318 				drbd_uuid_dump(mdev, "peer", mdev->p_uuid, mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]);
2319 				*rule_nr = 35;
2320 			} else {
2321 				dev_info(DEV, "was SyncTarget (failed to write sync_uuid)\n");
2322 				*rule_nr = 37;
2323 			}
2324 
2325 			return -1;
2326 		}
2327 
2328 		/* Common power [off|failure] */
2329 		rct = (test_bit(CRASHED_PRIMARY, &mdev->flags) ? 1 : 0) +
2330 			(mdev->p_uuid[UI_FLAGS] & 2);
2331 		/* lowest bit is set when we were primary,
2332 		 * next bit (weight 2) is set when peer was primary */
2333 		*rule_nr = 40;
2334 
2335 		switch (rct) {
2336 		case 0: /* !self_pri && !peer_pri */ return 0;
2337 		case 1: /*  self_pri && !peer_pri */ return 1;
2338 		case 2: /* !self_pri &&  peer_pri */ return -1;
2339 		case 3: /*  self_pri &&  peer_pri */
2340 			dc = test_bit(DISCARD_CONCURRENT, &mdev->flags);
2341 			return dc ? -1 : 1;
2342 		}
2343 	}
2344 
2345 	*rule_nr = 50;
2346 	peer = mdev->p_uuid[UI_BITMAP] & ~((u64)1);
2347 	if (self == peer)
2348 		return -1;
2349 
2350 	*rule_nr = 51;
2351 	peer = mdev->p_uuid[UI_HISTORY_START] & ~((u64)1);
2352 	if (self == peer) {
2353 		self = mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1);
2354 		peer = mdev->p_uuid[UI_HISTORY_START + 1] & ~((u64)1);
2355 		if (self == peer) {
2356 			/* The last P_SYNC_UUID did not get though. Undo the last start of
2357 			   resync as sync source modifications of the peer's UUIDs. */
2358 
2359 			if (mdev->agreed_pro_version < 91)
2360 				return -1001;
2361 
2362 			mdev->p_uuid[UI_BITMAP] = mdev->p_uuid[UI_HISTORY_START];
2363 			mdev->p_uuid[UI_HISTORY_START] = mdev->p_uuid[UI_HISTORY_START + 1];
2364 			return -1;
2365 		}
2366 	}
2367 
2368 	*rule_nr = 60;
2369 	self = mdev->ldev->md.uuid[UI_CURRENT] & ~((u64)1);
2370 	for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
2371 		peer = mdev->p_uuid[i] & ~((u64)1);
2372 		if (self == peer)
2373 			return -2;
2374 	}
2375 
2376 	*rule_nr = 70;
2377 	self = mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1);
2378 	peer = mdev->p_uuid[UI_CURRENT] & ~((u64)1);
2379 	if (self == peer)
2380 		return 1;
2381 
2382 	*rule_nr = 71;
2383 	self = mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1);
2384 	if (self == peer) {
2385 		self = mdev->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1);
2386 		peer = mdev->p_uuid[UI_HISTORY_START] & ~((u64)1);
2387 		if (self == peer) {
2388 			/* The last P_SYNC_UUID did not get though. Undo the last start of
2389 			   resync as sync source modifications of our UUIDs. */
2390 
2391 			if (mdev->agreed_pro_version < 91)
2392 				return -1001;
2393 
2394 			_drbd_uuid_set(mdev, UI_BITMAP, mdev->ldev->md.uuid[UI_HISTORY_START]);
2395 			_drbd_uuid_set(mdev, UI_HISTORY_START, mdev->ldev->md.uuid[UI_HISTORY_START + 1]);
2396 
2397 			dev_info(DEV, "Undid last start of resync:\n");
2398 
2399 			drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid,
2400 				       mdev->state.disk >= D_NEGOTIATING ? drbd_bm_total_weight(mdev) : 0, 0);
2401 
2402 			return 1;
2403 		}
2404 	}
2405 
2406 
2407 	*rule_nr = 80;
2408 	peer = mdev->p_uuid[UI_CURRENT] & ~((u64)1);
2409 	for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
2410 		self = mdev->ldev->md.uuid[i] & ~((u64)1);
2411 		if (self == peer)
2412 			return 2;
2413 	}
2414 
2415 	*rule_nr = 90;
2416 	self = mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1);
2417 	peer = mdev->p_uuid[UI_BITMAP] & ~((u64)1);
2418 	if (self == peer && self != ((u64)0))
2419 		return 100;
2420 
2421 	*rule_nr = 100;
2422 	for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
2423 		self = mdev->ldev->md.uuid[i] & ~((u64)1);
2424 		for (j = UI_HISTORY_START; j <= UI_HISTORY_END; j++) {
2425 			peer = mdev->p_uuid[j] & ~((u64)1);
2426 			if (self == peer)
2427 				return -100;
2428 		}
2429 	}
2430 
2431 	return -1000;
2432 }
2433 
2434 /* drbd_sync_handshake() returns the new conn state on success, or
2435    CONN_MASK (-1) on failure.
2436  */
2437 static enum drbd_conns drbd_sync_handshake(struct drbd_conf *mdev, enum drbd_role peer_role,
2438 					   enum drbd_disk_state peer_disk) __must_hold(local)
2439 {
2440 	int hg, rule_nr;
2441 	enum drbd_conns rv = C_MASK;
2442 	enum drbd_disk_state mydisk;
2443 
2444 	mydisk = mdev->state.disk;
2445 	if (mydisk == D_NEGOTIATING)
2446 		mydisk = mdev->new_state_tmp.disk;
2447 
2448 	dev_info(DEV, "drbd_sync_handshake:\n");
2449 	drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid, mdev->comm_bm_set, 0);
2450 	drbd_uuid_dump(mdev, "peer", mdev->p_uuid,
2451 		       mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]);
2452 
2453 	hg = drbd_uuid_compare(mdev, &rule_nr);
2454 
2455 	dev_info(DEV, "uuid_compare()=%d by rule %d\n", hg, rule_nr);
2456 
2457 	if (hg == -1000) {
2458 		dev_alert(DEV, "Unrelated data, aborting!\n");
2459 		return C_MASK;
2460 	}
2461 	if (hg == -1001) {
2462 		dev_alert(DEV, "To resolve this both sides have to support at least protocol\n");
2463 		return C_MASK;
2464 	}
2465 
2466 	if    ((mydisk == D_INCONSISTENT && peer_disk > D_INCONSISTENT) ||
2467 	    (peer_disk == D_INCONSISTENT && mydisk    > D_INCONSISTENT)) {
2468 		int f = (hg == -100) || abs(hg) == 2;
2469 		hg = mydisk > D_INCONSISTENT ? 1 : -1;
2470 		if (f)
2471 			hg = hg*2;
2472 		dev_info(DEV, "Becoming sync %s due to disk states.\n",
2473 		     hg > 0 ? "source" : "target");
2474 	}
2475 
2476 	if (hg == 100 || (hg == -100 && mdev->net_conf->always_asbp)) {
2477 		int pcount = (mdev->state.role == R_PRIMARY)
2478 			   + (peer_role == R_PRIMARY);
2479 		int forced = (hg == -100);
2480 
2481 		switch (pcount) {
2482 		case 0:
2483 			hg = drbd_asb_recover_0p(mdev);
2484 			break;
2485 		case 1:
2486 			hg = drbd_asb_recover_1p(mdev);
2487 			break;
2488 		case 2:
2489 			hg = drbd_asb_recover_2p(mdev);
2490 			break;
2491 		}
2492 		if (abs(hg) < 100) {
2493 			dev_warn(DEV, "Split-Brain detected, %d primaries, "
2494 			     "automatically solved. Sync from %s node\n",
2495 			     pcount, (hg < 0) ? "peer" : "this");
2496 			if (forced) {
2497 				dev_warn(DEV, "Doing a full sync, since"
2498 				     " UUIDs where ambiguous.\n");
2499 				hg = hg*2;
2500 			}
2501 		}
2502 	}
2503 
2504 	if (hg == -100) {
2505 		if (mdev->net_conf->want_lose && !(mdev->p_uuid[UI_FLAGS]&1))
2506 			hg = -1;
2507 		if (!mdev->net_conf->want_lose && (mdev->p_uuid[UI_FLAGS]&1))
2508 			hg = 1;
2509 
2510 		if (abs(hg) < 100)
2511 			dev_warn(DEV, "Split-Brain detected, manually solved. "
2512 			     "Sync from %s node\n",
2513 			     (hg < 0) ? "peer" : "this");
2514 	}
2515 
2516 	if (hg == -100) {
2517 		/* FIXME this log message is not correct if we end up here
2518 		 * after an attempted attach on a diskless node.
2519 		 * We just refuse to attach -- well, we drop the "connection"
2520 		 * to that disk, in a way... */
2521 		dev_alert(DEV, "Split-Brain detected, dropping connection!\n");
2522 		drbd_khelper(mdev, "split-brain");
2523 		return C_MASK;
2524 	}
2525 
2526 	if (hg > 0 && mydisk <= D_INCONSISTENT) {
2527 		dev_err(DEV, "I shall become SyncSource, but I am inconsistent!\n");
2528 		return C_MASK;
2529 	}
2530 
2531 	if (hg < 0 && /* by intention we do not use mydisk here. */
2532 	    mdev->state.role == R_PRIMARY && mdev->state.disk >= D_CONSISTENT) {
2533 		switch (mdev->net_conf->rr_conflict) {
2534 		case ASB_CALL_HELPER:
2535 			drbd_khelper(mdev, "pri-lost");
2536 			/* fall through */
2537 		case ASB_DISCONNECT:
2538 			dev_err(DEV, "I shall become SyncTarget, but I am primary!\n");
2539 			return C_MASK;
2540 		case ASB_VIOLENTLY:
2541 			dev_warn(DEV, "Becoming SyncTarget, violating the stable-data"
2542 			     "assumption\n");
2543 		}
2544 	}
2545 
2546 	if (mdev->net_conf->dry_run || test_bit(CONN_DRY_RUN, &mdev->flags)) {
2547 		if (hg == 0)
2548 			dev_info(DEV, "dry-run connect: No resync, would become Connected immediately.\n");
2549 		else
2550 			dev_info(DEV, "dry-run connect: Would become %s, doing a %s resync.",
2551 				 drbd_conn_str(hg > 0 ? C_SYNC_SOURCE : C_SYNC_TARGET),
2552 				 abs(hg) >= 2 ? "full" : "bit-map based");
2553 		return C_MASK;
2554 	}
2555 
2556 	if (abs(hg) >= 2) {
2557 		dev_info(DEV, "Writing the whole bitmap, full sync required after drbd_sync_handshake.\n");
2558 		if (drbd_bitmap_io(mdev, &drbd_bmio_set_n_write, "set_n_write from sync_handshake"))
2559 			return C_MASK;
2560 	}
2561 
2562 	if (hg > 0) { /* become sync source. */
2563 		rv = C_WF_BITMAP_S;
2564 	} else if (hg < 0) { /* become sync target */
2565 		rv = C_WF_BITMAP_T;
2566 	} else {
2567 		rv = C_CONNECTED;
2568 		if (drbd_bm_total_weight(mdev)) {
2569 			dev_info(DEV, "No resync, but %lu bits in bitmap!\n",
2570 			     drbd_bm_total_weight(mdev));
2571 		}
2572 	}
2573 
2574 	return rv;
2575 }
2576 
2577 /* returns 1 if invalid */
2578 static int cmp_after_sb(enum drbd_after_sb_p peer, enum drbd_after_sb_p self)
2579 {
2580 	/* ASB_DISCARD_REMOTE - ASB_DISCARD_LOCAL is valid */
2581 	if ((peer == ASB_DISCARD_REMOTE && self == ASB_DISCARD_LOCAL) ||
2582 	    (self == ASB_DISCARD_REMOTE && peer == ASB_DISCARD_LOCAL))
2583 		return 0;
2584 
2585 	/* any other things with ASB_DISCARD_REMOTE or ASB_DISCARD_LOCAL are invalid */
2586 	if (peer == ASB_DISCARD_REMOTE || peer == ASB_DISCARD_LOCAL ||
2587 	    self == ASB_DISCARD_REMOTE || self == ASB_DISCARD_LOCAL)
2588 		return 1;
2589 
2590 	/* everything else is valid if they are equal on both sides. */
2591 	if (peer == self)
2592 		return 0;
2593 
2594 	/* everything es is invalid. */
2595 	return 1;
2596 }
2597 
2598 static int receive_protocol(struct drbd_conf *mdev, struct p_header *h)
2599 {
2600 	struct p_protocol *p = (struct p_protocol *)h;
2601 	int header_size, data_size;
2602 	int p_proto, p_after_sb_0p, p_after_sb_1p, p_after_sb_2p;
2603 	int p_want_lose, p_two_primaries, cf;
2604 	char p_integrity_alg[SHARED_SECRET_MAX] = "";
2605 
2606 	header_size = sizeof(*p) - sizeof(*h);
2607 	data_size   = h->length  - header_size;
2608 
2609 	if (drbd_recv(mdev, h->payload, header_size) != header_size)
2610 		return FALSE;
2611 
2612 	p_proto		= be32_to_cpu(p->protocol);
2613 	p_after_sb_0p	= be32_to_cpu(p->after_sb_0p);
2614 	p_after_sb_1p	= be32_to_cpu(p->after_sb_1p);
2615 	p_after_sb_2p	= be32_to_cpu(p->after_sb_2p);
2616 	p_two_primaries = be32_to_cpu(p->two_primaries);
2617 	cf		= be32_to_cpu(p->conn_flags);
2618 	p_want_lose = cf & CF_WANT_LOSE;
2619 
2620 	clear_bit(CONN_DRY_RUN, &mdev->flags);
2621 
2622 	if (cf & CF_DRY_RUN)
2623 		set_bit(CONN_DRY_RUN, &mdev->flags);
2624 
2625 	if (p_proto != mdev->net_conf->wire_protocol) {
2626 		dev_err(DEV, "incompatible communication protocols\n");
2627 		goto disconnect;
2628 	}
2629 
2630 	if (cmp_after_sb(p_after_sb_0p, mdev->net_conf->after_sb_0p)) {
2631 		dev_err(DEV, "incompatible after-sb-0pri settings\n");
2632 		goto disconnect;
2633 	}
2634 
2635 	if (cmp_after_sb(p_after_sb_1p, mdev->net_conf->after_sb_1p)) {
2636 		dev_err(DEV, "incompatible after-sb-1pri settings\n");
2637 		goto disconnect;
2638 	}
2639 
2640 	if (cmp_after_sb(p_after_sb_2p, mdev->net_conf->after_sb_2p)) {
2641 		dev_err(DEV, "incompatible after-sb-2pri settings\n");
2642 		goto disconnect;
2643 	}
2644 
2645 	if (p_want_lose && mdev->net_conf->want_lose) {
2646 		dev_err(DEV, "both sides have the 'want_lose' flag set\n");
2647 		goto disconnect;
2648 	}
2649 
2650 	if (p_two_primaries != mdev->net_conf->two_primaries) {
2651 		dev_err(DEV, "incompatible setting of the two-primaries options\n");
2652 		goto disconnect;
2653 	}
2654 
2655 	if (mdev->agreed_pro_version >= 87) {
2656 		unsigned char *my_alg = mdev->net_conf->integrity_alg;
2657 
2658 		if (drbd_recv(mdev, p_integrity_alg, data_size) != data_size)
2659 			return FALSE;
2660 
2661 		p_integrity_alg[SHARED_SECRET_MAX-1] = 0;
2662 		if (strcmp(p_integrity_alg, my_alg)) {
2663 			dev_err(DEV, "incompatible setting of the data-integrity-alg\n");
2664 			goto disconnect;
2665 		}
2666 		dev_info(DEV, "data-integrity-alg: %s\n",
2667 		     my_alg[0] ? my_alg : (unsigned char *)"<not-used>");
2668 	}
2669 
2670 	return TRUE;
2671 
2672 disconnect:
2673 	drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
2674 	return FALSE;
2675 }
2676 
2677 /* helper function
2678  * input: alg name, feature name
2679  * return: NULL (alg name was "")
2680  *         ERR_PTR(error) if something goes wrong
2681  *         or the crypto hash ptr, if it worked out ok. */
2682 struct crypto_hash *drbd_crypto_alloc_digest_safe(const struct drbd_conf *mdev,
2683 		const char *alg, const char *name)
2684 {
2685 	struct crypto_hash *tfm;
2686 
2687 	if (!alg[0])
2688 		return NULL;
2689 
2690 	tfm = crypto_alloc_hash(alg, 0, CRYPTO_ALG_ASYNC);
2691 	if (IS_ERR(tfm)) {
2692 		dev_err(DEV, "Can not allocate \"%s\" as %s (reason: %ld)\n",
2693 			alg, name, PTR_ERR(tfm));
2694 		return tfm;
2695 	}
2696 	if (!drbd_crypto_is_hash(crypto_hash_tfm(tfm))) {
2697 		crypto_free_hash(tfm);
2698 		dev_err(DEV, "\"%s\" is not a digest (%s)\n", alg, name);
2699 		return ERR_PTR(-EINVAL);
2700 	}
2701 	return tfm;
2702 }
2703 
2704 static int receive_SyncParam(struct drbd_conf *mdev, struct p_header *h)
2705 {
2706 	int ok = TRUE;
2707 	struct p_rs_param_89 *p = (struct p_rs_param_89 *)h;
2708 	unsigned int header_size, data_size, exp_max_sz;
2709 	struct crypto_hash *verify_tfm = NULL;
2710 	struct crypto_hash *csums_tfm = NULL;
2711 	const int apv = mdev->agreed_pro_version;
2712 
2713 	exp_max_sz  = apv <= 87 ? sizeof(struct p_rs_param)
2714 		    : apv == 88 ? sizeof(struct p_rs_param)
2715 					+ SHARED_SECRET_MAX
2716 		    : /* 89 */    sizeof(struct p_rs_param_89);
2717 
2718 	if (h->length > exp_max_sz) {
2719 		dev_err(DEV, "SyncParam packet too long: received %u, expected <= %u bytes\n",
2720 		    h->length, exp_max_sz);
2721 		return FALSE;
2722 	}
2723 
2724 	if (apv <= 88) {
2725 		header_size = sizeof(struct p_rs_param) - sizeof(*h);
2726 		data_size   = h->length  - header_size;
2727 	} else /* apv >= 89 */ {
2728 		header_size = sizeof(struct p_rs_param_89) - sizeof(*h);
2729 		data_size   = h->length  - header_size;
2730 		D_ASSERT(data_size == 0);
2731 	}
2732 
2733 	/* initialize verify_alg and csums_alg */
2734 	memset(p->verify_alg, 0, 2 * SHARED_SECRET_MAX);
2735 
2736 	if (drbd_recv(mdev, h->payload, header_size) != header_size)
2737 		return FALSE;
2738 
2739 	mdev->sync_conf.rate	  = be32_to_cpu(p->rate);
2740 
2741 	if (apv >= 88) {
2742 		if (apv == 88) {
2743 			if (data_size > SHARED_SECRET_MAX) {
2744 				dev_err(DEV, "verify-alg too long, "
2745 				    "peer wants %u, accepting only %u byte\n",
2746 						data_size, SHARED_SECRET_MAX);
2747 				return FALSE;
2748 			}
2749 
2750 			if (drbd_recv(mdev, p->verify_alg, data_size) != data_size)
2751 				return FALSE;
2752 
2753 			/* we expect NUL terminated string */
2754 			/* but just in case someone tries to be evil */
2755 			D_ASSERT(p->verify_alg[data_size-1] == 0);
2756 			p->verify_alg[data_size-1] = 0;
2757 
2758 		} else /* apv >= 89 */ {
2759 			/* we still expect NUL terminated strings */
2760 			/* but just in case someone tries to be evil */
2761 			D_ASSERT(p->verify_alg[SHARED_SECRET_MAX-1] == 0);
2762 			D_ASSERT(p->csums_alg[SHARED_SECRET_MAX-1] == 0);
2763 			p->verify_alg[SHARED_SECRET_MAX-1] = 0;
2764 			p->csums_alg[SHARED_SECRET_MAX-1] = 0;
2765 		}
2766 
2767 		if (strcmp(mdev->sync_conf.verify_alg, p->verify_alg)) {
2768 			if (mdev->state.conn == C_WF_REPORT_PARAMS) {
2769 				dev_err(DEV, "Different verify-alg settings. me=\"%s\" peer=\"%s\"\n",
2770 				    mdev->sync_conf.verify_alg, p->verify_alg);
2771 				goto disconnect;
2772 			}
2773 			verify_tfm = drbd_crypto_alloc_digest_safe(mdev,
2774 					p->verify_alg, "verify-alg");
2775 			if (IS_ERR(verify_tfm)) {
2776 				verify_tfm = NULL;
2777 				goto disconnect;
2778 			}
2779 		}
2780 
2781 		if (apv >= 89 && strcmp(mdev->sync_conf.csums_alg, p->csums_alg)) {
2782 			if (mdev->state.conn == C_WF_REPORT_PARAMS) {
2783 				dev_err(DEV, "Different csums-alg settings. me=\"%s\" peer=\"%s\"\n",
2784 				    mdev->sync_conf.csums_alg, p->csums_alg);
2785 				goto disconnect;
2786 			}
2787 			csums_tfm = drbd_crypto_alloc_digest_safe(mdev,
2788 					p->csums_alg, "csums-alg");
2789 			if (IS_ERR(csums_tfm)) {
2790 				csums_tfm = NULL;
2791 				goto disconnect;
2792 			}
2793 		}
2794 
2795 
2796 		spin_lock(&mdev->peer_seq_lock);
2797 		/* lock against drbd_nl_syncer_conf() */
2798 		if (verify_tfm) {
2799 			strcpy(mdev->sync_conf.verify_alg, p->verify_alg);
2800 			mdev->sync_conf.verify_alg_len = strlen(p->verify_alg) + 1;
2801 			crypto_free_hash(mdev->verify_tfm);
2802 			mdev->verify_tfm = verify_tfm;
2803 			dev_info(DEV, "using verify-alg: \"%s\"\n", p->verify_alg);
2804 		}
2805 		if (csums_tfm) {
2806 			strcpy(mdev->sync_conf.csums_alg, p->csums_alg);
2807 			mdev->sync_conf.csums_alg_len = strlen(p->csums_alg) + 1;
2808 			crypto_free_hash(mdev->csums_tfm);
2809 			mdev->csums_tfm = csums_tfm;
2810 			dev_info(DEV, "using csums-alg: \"%s\"\n", p->csums_alg);
2811 		}
2812 		spin_unlock(&mdev->peer_seq_lock);
2813 	}
2814 
2815 	return ok;
2816 disconnect:
2817 	/* just for completeness: actually not needed,
2818 	 * as this is not reached if csums_tfm was ok. */
2819 	crypto_free_hash(csums_tfm);
2820 	/* but free the verify_tfm again, if csums_tfm did not work out */
2821 	crypto_free_hash(verify_tfm);
2822 	drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
2823 	return FALSE;
2824 }
2825 
2826 static void drbd_setup_order_type(struct drbd_conf *mdev, int peer)
2827 {
2828 	/* sorry, we currently have no working implementation
2829 	 * of distributed TCQ */
2830 }
2831 
2832 /* warn if the arguments differ by more than 12.5% */
2833 static void warn_if_differ_considerably(struct drbd_conf *mdev,
2834 	const char *s, sector_t a, sector_t b)
2835 {
2836 	sector_t d;
2837 	if (a == 0 || b == 0)
2838 		return;
2839 	d = (a > b) ? (a - b) : (b - a);
2840 	if (d > (a>>3) || d > (b>>3))
2841 		dev_warn(DEV, "Considerable difference in %s: %llus vs. %llus\n", s,
2842 		     (unsigned long long)a, (unsigned long long)b);
2843 }
2844 
2845 static int receive_sizes(struct drbd_conf *mdev, struct p_header *h)
2846 {
2847 	struct p_sizes *p = (struct p_sizes *)h;
2848 	enum determine_dev_size dd = unchanged;
2849 	unsigned int max_seg_s;
2850 	sector_t p_size, p_usize, my_usize;
2851 	int ldsc = 0; /* local disk size changed */
2852 	enum drbd_conns nconn;
2853 
2854 	ERR_IF(h->length != (sizeof(*p)-sizeof(*h))) return FALSE;
2855 	if (drbd_recv(mdev, h->payload, h->length) != h->length)
2856 		return FALSE;
2857 
2858 	p_size = be64_to_cpu(p->d_size);
2859 	p_usize = be64_to_cpu(p->u_size);
2860 
2861 	if (p_size == 0 && mdev->state.disk == D_DISKLESS) {
2862 		dev_err(DEV, "some backing storage is needed\n");
2863 		drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
2864 		return FALSE;
2865 	}
2866 
2867 	/* just store the peer's disk size for now.
2868 	 * we still need to figure out whether we accept that. */
2869 	mdev->p_size = p_size;
2870 
2871 #define min_not_zero(l, r) (l == 0) ? r : ((r == 0) ? l : min(l, r))
2872 	if (get_ldev(mdev)) {
2873 		warn_if_differ_considerably(mdev, "lower level device sizes",
2874 			   p_size, drbd_get_max_capacity(mdev->ldev));
2875 		warn_if_differ_considerably(mdev, "user requested size",
2876 					    p_usize, mdev->ldev->dc.disk_size);
2877 
2878 		/* if this is the first connect, or an otherwise expected
2879 		 * param exchange, choose the minimum */
2880 		if (mdev->state.conn == C_WF_REPORT_PARAMS)
2881 			p_usize = min_not_zero((sector_t)mdev->ldev->dc.disk_size,
2882 					     p_usize);
2883 
2884 		my_usize = mdev->ldev->dc.disk_size;
2885 
2886 		if (mdev->ldev->dc.disk_size != p_usize) {
2887 			mdev->ldev->dc.disk_size = p_usize;
2888 			dev_info(DEV, "Peer sets u_size to %lu sectors\n",
2889 			     (unsigned long)mdev->ldev->dc.disk_size);
2890 		}
2891 
2892 		/* Never shrink a device with usable data during connect.
2893 		   But allow online shrinking if we are connected. */
2894 		if (drbd_new_dev_size(mdev, mdev->ldev, 0) <
2895 		   drbd_get_capacity(mdev->this_bdev) &&
2896 		   mdev->state.disk >= D_OUTDATED &&
2897 		   mdev->state.conn < C_CONNECTED) {
2898 			dev_err(DEV, "The peer's disk size is too small!\n");
2899 			drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
2900 			mdev->ldev->dc.disk_size = my_usize;
2901 			put_ldev(mdev);
2902 			return FALSE;
2903 		}
2904 		put_ldev(mdev);
2905 	}
2906 #undef min_not_zero
2907 
2908 	if (get_ldev(mdev)) {
2909 	  dd = drbd_determin_dev_size(mdev, 0);
2910 		put_ldev(mdev);
2911 		if (dd == dev_size_error)
2912 			return FALSE;
2913 		drbd_md_sync(mdev);
2914 	} else {
2915 		/* I am diskless, need to accept the peer's size. */
2916 		drbd_set_my_capacity(mdev, p_size);
2917 	}
2918 
2919 	if (mdev->p_uuid && mdev->state.conn <= C_CONNECTED && get_ldev(mdev)) {
2920 		nconn = drbd_sync_handshake(mdev,
2921 				mdev->state.peer, mdev->state.pdsk);
2922 		put_ldev(mdev);
2923 
2924 		if (nconn == C_MASK) {
2925 			drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
2926 			return FALSE;
2927 		}
2928 
2929 		if (drbd_request_state(mdev, NS(conn, nconn)) < SS_SUCCESS) {
2930 			drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
2931 			return FALSE;
2932 		}
2933 	}
2934 
2935 	if (get_ldev(mdev)) {
2936 		if (mdev->ldev->known_size != drbd_get_capacity(mdev->ldev->backing_bdev)) {
2937 			mdev->ldev->known_size = drbd_get_capacity(mdev->ldev->backing_bdev);
2938 			ldsc = 1;
2939 		}
2940 
2941 		max_seg_s = be32_to_cpu(p->max_segment_size);
2942 		if (max_seg_s != queue_max_segment_size(mdev->rq_queue))
2943 			drbd_setup_queue_param(mdev, max_seg_s);
2944 
2945 		drbd_setup_order_type(mdev, be32_to_cpu(p->queue_order_type));
2946 		put_ldev(mdev);
2947 	}
2948 
2949 	if (mdev->state.conn > C_WF_REPORT_PARAMS) {
2950 		if (be64_to_cpu(p->c_size) !=
2951 		    drbd_get_capacity(mdev->this_bdev) || ldsc) {
2952 			/* we have different sizes, probably peer
2953 			 * needs to know my new size... */
2954 			drbd_send_sizes(mdev, 0);
2955 		}
2956 		if (test_and_clear_bit(RESIZE_PENDING, &mdev->flags) ||
2957 		    (dd == grew && mdev->state.conn == C_CONNECTED)) {
2958 			if (mdev->state.pdsk >= D_INCONSISTENT &&
2959 			    mdev->state.disk >= D_INCONSISTENT)
2960 				resync_after_online_grow(mdev);
2961 			else
2962 				set_bit(RESYNC_AFTER_NEG, &mdev->flags);
2963 		}
2964 	}
2965 
2966 	return TRUE;
2967 }
2968 
2969 static int receive_uuids(struct drbd_conf *mdev, struct p_header *h)
2970 {
2971 	struct p_uuids *p = (struct p_uuids *)h;
2972 	u64 *p_uuid;
2973 	int i;
2974 
2975 	ERR_IF(h->length != (sizeof(*p)-sizeof(*h))) return FALSE;
2976 	if (drbd_recv(mdev, h->payload, h->length) != h->length)
2977 		return FALSE;
2978 
2979 	p_uuid = kmalloc(sizeof(u64)*UI_EXTENDED_SIZE, GFP_NOIO);
2980 
2981 	for (i = UI_CURRENT; i < UI_EXTENDED_SIZE; i++)
2982 		p_uuid[i] = be64_to_cpu(p->uuid[i]);
2983 
2984 	kfree(mdev->p_uuid);
2985 	mdev->p_uuid = p_uuid;
2986 
2987 	if (mdev->state.conn < C_CONNECTED &&
2988 	    mdev->state.disk < D_INCONSISTENT &&
2989 	    mdev->state.role == R_PRIMARY &&
2990 	    (mdev->ed_uuid & ~((u64)1)) != (p_uuid[UI_CURRENT] & ~((u64)1))) {
2991 		dev_err(DEV, "Can only connect to data with current UUID=%016llX\n",
2992 		    (unsigned long long)mdev->ed_uuid);
2993 		drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
2994 		return FALSE;
2995 	}
2996 
2997 	if (get_ldev(mdev)) {
2998 		int skip_initial_sync =
2999 			mdev->state.conn == C_CONNECTED &&
3000 			mdev->agreed_pro_version >= 90 &&
3001 			mdev->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED &&
3002 			(p_uuid[UI_FLAGS] & 8);
3003 		if (skip_initial_sync) {
3004 			dev_info(DEV, "Accepted new current UUID, preparing to skip initial sync\n");
3005 			drbd_bitmap_io(mdev, &drbd_bmio_clear_n_write,
3006 					"clear_n_write from receive_uuids");
3007 			_drbd_uuid_set(mdev, UI_CURRENT, p_uuid[UI_CURRENT]);
3008 			_drbd_uuid_set(mdev, UI_BITMAP, 0);
3009 			_drbd_set_state(_NS2(mdev, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE),
3010 					CS_VERBOSE, NULL);
3011 			drbd_md_sync(mdev);
3012 		}
3013 		put_ldev(mdev);
3014 	}
3015 
3016 	/* Before we test for the disk state, we should wait until an eventually
3017 	   ongoing cluster wide state change is finished. That is important if
3018 	   we are primary and are detaching from our disk. We need to see the
3019 	   new disk state... */
3020 	wait_event(mdev->misc_wait, !test_bit(CLUSTER_ST_CHANGE, &mdev->flags));
3021 	if (mdev->state.conn >= C_CONNECTED && mdev->state.disk < D_INCONSISTENT)
3022 		drbd_set_ed_uuid(mdev, p_uuid[UI_CURRENT]);
3023 
3024 	return TRUE;
3025 }
3026 
3027 /**
3028  * convert_state() - Converts the peer's view of the cluster state to our point of view
3029  * @ps:		The state as seen by the peer.
3030  */
3031 static union drbd_state convert_state(union drbd_state ps)
3032 {
3033 	union drbd_state ms;
3034 
3035 	static enum drbd_conns c_tab[] = {
3036 		[C_CONNECTED] = C_CONNECTED,
3037 
3038 		[C_STARTING_SYNC_S] = C_STARTING_SYNC_T,
3039 		[C_STARTING_SYNC_T] = C_STARTING_SYNC_S,
3040 		[C_DISCONNECTING] = C_TEAR_DOWN, /* C_NETWORK_FAILURE, */
3041 		[C_VERIFY_S]       = C_VERIFY_T,
3042 		[C_MASK]   = C_MASK,
3043 	};
3044 
3045 	ms.i = ps.i;
3046 
3047 	ms.conn = c_tab[ps.conn];
3048 	ms.peer = ps.role;
3049 	ms.role = ps.peer;
3050 	ms.pdsk = ps.disk;
3051 	ms.disk = ps.pdsk;
3052 	ms.peer_isp = (ps.aftr_isp | ps.user_isp);
3053 
3054 	return ms;
3055 }
3056 
3057 static int receive_req_state(struct drbd_conf *mdev, struct p_header *h)
3058 {
3059 	struct p_req_state *p = (struct p_req_state *)h;
3060 	union drbd_state mask, val;
3061 	int rv;
3062 
3063 	ERR_IF(h->length != (sizeof(*p)-sizeof(*h))) return FALSE;
3064 	if (drbd_recv(mdev, h->payload, h->length) != h->length)
3065 		return FALSE;
3066 
3067 	mask.i = be32_to_cpu(p->mask);
3068 	val.i = be32_to_cpu(p->val);
3069 
3070 	if (test_bit(DISCARD_CONCURRENT, &mdev->flags) &&
3071 	    test_bit(CLUSTER_ST_CHANGE, &mdev->flags)) {
3072 		drbd_send_sr_reply(mdev, SS_CONCURRENT_ST_CHG);
3073 		return TRUE;
3074 	}
3075 
3076 	mask = convert_state(mask);
3077 	val = convert_state(val);
3078 
3079 	rv = drbd_change_state(mdev, CS_VERBOSE, mask, val);
3080 
3081 	drbd_send_sr_reply(mdev, rv);
3082 	drbd_md_sync(mdev);
3083 
3084 	return TRUE;
3085 }
3086 
3087 static int receive_state(struct drbd_conf *mdev, struct p_header *h)
3088 {
3089 	struct p_state *p = (struct p_state *)h;
3090 	enum drbd_conns nconn, oconn;
3091 	union drbd_state ns, peer_state;
3092 	enum drbd_disk_state real_peer_disk;
3093 	int rv;
3094 
3095 	ERR_IF(h->length != (sizeof(*p)-sizeof(*h)))
3096 		return FALSE;
3097 
3098 	if (drbd_recv(mdev, h->payload, h->length) != h->length)
3099 		return FALSE;
3100 
3101 	peer_state.i = be32_to_cpu(p->state);
3102 
3103 	real_peer_disk = peer_state.disk;
3104 	if (peer_state.disk == D_NEGOTIATING) {
3105 		real_peer_disk = mdev->p_uuid[UI_FLAGS] & 4 ? D_INCONSISTENT : D_CONSISTENT;
3106 		dev_info(DEV, "real peer disk state = %s\n", drbd_disk_str(real_peer_disk));
3107 	}
3108 
3109 	spin_lock_irq(&mdev->req_lock);
3110  retry:
3111 	oconn = nconn = mdev->state.conn;
3112 	spin_unlock_irq(&mdev->req_lock);
3113 
3114 	if (nconn == C_WF_REPORT_PARAMS)
3115 		nconn = C_CONNECTED;
3116 
3117 	if (mdev->p_uuid && peer_state.disk >= D_NEGOTIATING &&
3118 	    get_ldev_if_state(mdev, D_NEGOTIATING)) {
3119 		int cr; /* consider resync */
3120 
3121 		/* if we established a new connection */
3122 		cr  = (oconn < C_CONNECTED);
3123 		/* if we had an established connection
3124 		 * and one of the nodes newly attaches a disk */
3125 		cr |= (oconn == C_CONNECTED &&
3126 		       (peer_state.disk == D_NEGOTIATING ||
3127 			mdev->state.disk == D_NEGOTIATING));
3128 		/* if we have both been inconsistent, and the peer has been
3129 		 * forced to be UpToDate with --overwrite-data */
3130 		cr |= test_bit(CONSIDER_RESYNC, &mdev->flags);
3131 		/* if we had been plain connected, and the admin requested to
3132 		 * start a sync by "invalidate" or "invalidate-remote" */
3133 		cr |= (oconn == C_CONNECTED &&
3134 				(peer_state.conn >= C_STARTING_SYNC_S &&
3135 				 peer_state.conn <= C_WF_BITMAP_T));
3136 
3137 		if (cr)
3138 			nconn = drbd_sync_handshake(mdev, peer_state.role, real_peer_disk);
3139 
3140 		put_ldev(mdev);
3141 		if (nconn == C_MASK) {
3142 			nconn = C_CONNECTED;
3143 			if (mdev->state.disk == D_NEGOTIATING) {
3144 				drbd_force_state(mdev, NS(disk, D_DISKLESS));
3145 			} else if (peer_state.disk == D_NEGOTIATING) {
3146 				dev_err(DEV, "Disk attach process on the peer node was aborted.\n");
3147 				peer_state.disk = D_DISKLESS;
3148 				real_peer_disk = D_DISKLESS;
3149 			} else {
3150 				if (test_and_clear_bit(CONN_DRY_RUN, &mdev->flags))
3151 					return FALSE;
3152 				D_ASSERT(oconn == C_WF_REPORT_PARAMS);
3153 				drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
3154 				return FALSE;
3155 			}
3156 		}
3157 	}
3158 
3159 	spin_lock_irq(&mdev->req_lock);
3160 	if (mdev->state.conn != oconn)
3161 		goto retry;
3162 	clear_bit(CONSIDER_RESYNC, &mdev->flags);
3163 	ns.i = mdev->state.i;
3164 	ns.conn = nconn;
3165 	ns.peer = peer_state.role;
3166 	ns.pdsk = real_peer_disk;
3167 	ns.peer_isp = (peer_state.aftr_isp | peer_state.user_isp);
3168 	if ((nconn == C_CONNECTED || nconn == C_WF_BITMAP_S) && ns.disk == D_NEGOTIATING)
3169 		ns.disk = mdev->new_state_tmp.disk;
3170 
3171 	rv = _drbd_set_state(mdev, ns, CS_VERBOSE | CS_HARD, NULL);
3172 	ns = mdev->state;
3173 	spin_unlock_irq(&mdev->req_lock);
3174 
3175 	if (rv < SS_SUCCESS) {
3176 		drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
3177 		return FALSE;
3178 	}
3179 
3180 	if (oconn > C_WF_REPORT_PARAMS) {
3181 		if (nconn > C_CONNECTED && peer_state.conn <= C_CONNECTED &&
3182 		    peer_state.disk != D_NEGOTIATING ) {
3183 			/* we want resync, peer has not yet decided to sync... */
3184 			/* Nowadays only used when forcing a node into primary role and
3185 			   setting its disk to UpToDate with that */
3186 			drbd_send_uuids(mdev);
3187 			drbd_send_state(mdev);
3188 		}
3189 	}
3190 
3191 	mdev->net_conf->want_lose = 0;
3192 
3193 	drbd_md_sync(mdev); /* update connected indicator, la_size, ... */
3194 
3195 	return TRUE;
3196 }
3197 
3198 static int receive_sync_uuid(struct drbd_conf *mdev, struct p_header *h)
3199 {
3200 	struct p_rs_uuid *p = (struct p_rs_uuid *)h;
3201 
3202 	wait_event(mdev->misc_wait,
3203 		   mdev->state.conn == C_WF_SYNC_UUID ||
3204 		   mdev->state.conn < C_CONNECTED ||
3205 		   mdev->state.disk < D_NEGOTIATING);
3206 
3207 	/* D_ASSERT( mdev->state.conn == C_WF_SYNC_UUID ); */
3208 
3209 	ERR_IF(h->length != (sizeof(*p)-sizeof(*h))) return FALSE;
3210 	if (drbd_recv(mdev, h->payload, h->length) != h->length)
3211 		return FALSE;
3212 
3213 	/* Here the _drbd_uuid_ functions are right, current should
3214 	   _not_ be rotated into the history */
3215 	if (get_ldev_if_state(mdev, D_NEGOTIATING)) {
3216 		_drbd_uuid_set(mdev, UI_CURRENT, be64_to_cpu(p->uuid));
3217 		_drbd_uuid_set(mdev, UI_BITMAP, 0UL);
3218 
3219 		drbd_start_resync(mdev, C_SYNC_TARGET);
3220 
3221 		put_ldev(mdev);
3222 	} else
3223 		dev_err(DEV, "Ignoring SyncUUID packet!\n");
3224 
3225 	return TRUE;
3226 }
3227 
3228 enum receive_bitmap_ret { OK, DONE, FAILED };
3229 
3230 static enum receive_bitmap_ret
3231 receive_bitmap_plain(struct drbd_conf *mdev, struct p_header *h,
3232 	unsigned long *buffer, struct bm_xfer_ctx *c)
3233 {
3234 	unsigned num_words = min_t(size_t, BM_PACKET_WORDS, c->bm_words - c->word_offset);
3235 	unsigned want = num_words * sizeof(long);
3236 
3237 	if (want != h->length) {
3238 		dev_err(DEV, "%s:want (%u) != h->length (%u)\n", __func__, want, h->length);
3239 		return FAILED;
3240 	}
3241 	if (want == 0)
3242 		return DONE;
3243 	if (drbd_recv(mdev, buffer, want) != want)
3244 		return FAILED;
3245 
3246 	drbd_bm_merge_lel(mdev, c->word_offset, num_words, buffer);
3247 
3248 	c->word_offset += num_words;
3249 	c->bit_offset = c->word_offset * BITS_PER_LONG;
3250 	if (c->bit_offset > c->bm_bits)
3251 		c->bit_offset = c->bm_bits;
3252 
3253 	return OK;
3254 }
3255 
3256 static enum receive_bitmap_ret
3257 recv_bm_rle_bits(struct drbd_conf *mdev,
3258 		struct p_compressed_bm *p,
3259 		struct bm_xfer_ctx *c)
3260 {
3261 	struct bitstream bs;
3262 	u64 look_ahead;
3263 	u64 rl;
3264 	u64 tmp;
3265 	unsigned long s = c->bit_offset;
3266 	unsigned long e;
3267 	int len = p->head.length - (sizeof(*p) - sizeof(p->head));
3268 	int toggle = DCBP_get_start(p);
3269 	int have;
3270 	int bits;
3271 
3272 	bitstream_init(&bs, p->code, len, DCBP_get_pad_bits(p));
3273 
3274 	bits = bitstream_get_bits(&bs, &look_ahead, 64);
3275 	if (bits < 0)
3276 		return FAILED;
3277 
3278 	for (have = bits; have > 0; s += rl, toggle = !toggle) {
3279 		bits = vli_decode_bits(&rl, look_ahead);
3280 		if (bits <= 0)
3281 			return FAILED;
3282 
3283 		if (toggle) {
3284 			e = s + rl -1;
3285 			if (e >= c->bm_bits) {
3286 				dev_err(DEV, "bitmap overflow (e:%lu) while decoding bm RLE packet\n", e);
3287 				return FAILED;
3288 			}
3289 			_drbd_bm_set_bits(mdev, s, e);
3290 		}
3291 
3292 		if (have < bits) {
3293 			dev_err(DEV, "bitmap decoding error: h:%d b:%d la:0x%08llx l:%u/%u\n",
3294 				have, bits, look_ahead,
3295 				(unsigned int)(bs.cur.b - p->code),
3296 				(unsigned int)bs.buf_len);
3297 			return FAILED;
3298 		}
3299 		look_ahead >>= bits;
3300 		have -= bits;
3301 
3302 		bits = bitstream_get_bits(&bs, &tmp, 64 - have);
3303 		if (bits < 0)
3304 			return FAILED;
3305 		look_ahead |= tmp << have;
3306 		have += bits;
3307 	}
3308 
3309 	c->bit_offset = s;
3310 	bm_xfer_ctx_bit_to_word_offset(c);
3311 
3312 	return (s == c->bm_bits) ? DONE : OK;
3313 }
3314 
3315 static enum receive_bitmap_ret
3316 decode_bitmap_c(struct drbd_conf *mdev,
3317 		struct p_compressed_bm *p,
3318 		struct bm_xfer_ctx *c)
3319 {
3320 	if (DCBP_get_code(p) == RLE_VLI_Bits)
3321 		return recv_bm_rle_bits(mdev, p, c);
3322 
3323 	/* other variants had been implemented for evaluation,
3324 	 * but have been dropped as this one turned out to be "best"
3325 	 * during all our tests. */
3326 
3327 	dev_err(DEV, "receive_bitmap_c: unknown encoding %u\n", p->encoding);
3328 	drbd_force_state(mdev, NS(conn, C_PROTOCOL_ERROR));
3329 	return FAILED;
3330 }
3331 
3332 void INFO_bm_xfer_stats(struct drbd_conf *mdev,
3333 		const char *direction, struct bm_xfer_ctx *c)
3334 {
3335 	/* what would it take to transfer it "plaintext" */
3336 	unsigned plain = sizeof(struct p_header) *
3337 		((c->bm_words+BM_PACKET_WORDS-1)/BM_PACKET_WORDS+1)
3338 		+ c->bm_words * sizeof(long);
3339 	unsigned total = c->bytes[0] + c->bytes[1];
3340 	unsigned r;
3341 
3342 	/* total can not be zero. but just in case: */
3343 	if (total == 0)
3344 		return;
3345 
3346 	/* don't report if not compressed */
3347 	if (total >= plain)
3348 		return;
3349 
3350 	/* total < plain. check for overflow, still */
3351 	r = (total > UINT_MAX/1000) ? (total / (plain/1000))
3352 		                    : (1000 * total / plain);
3353 
3354 	if (r > 1000)
3355 		r = 1000;
3356 
3357 	r = 1000 - r;
3358 	dev_info(DEV, "%s bitmap stats [Bytes(packets)]: plain %u(%u), RLE %u(%u), "
3359 	     "total %u; compression: %u.%u%%\n",
3360 			direction,
3361 			c->bytes[1], c->packets[1],
3362 			c->bytes[0], c->packets[0],
3363 			total, r/10, r % 10);
3364 }
3365 
3366 /* Since we are processing the bitfield from lower addresses to higher,
3367    it does not matter if the process it in 32 bit chunks or 64 bit
3368    chunks as long as it is little endian. (Understand it as byte stream,
3369    beginning with the lowest byte...) If we would use big endian
3370    we would need to process it from the highest address to the lowest,
3371    in order to be agnostic to the 32 vs 64 bits issue.
3372 
3373    returns 0 on failure, 1 if we successfully received it. */
3374 static int receive_bitmap(struct drbd_conf *mdev, struct p_header *h)
3375 {
3376 	struct bm_xfer_ctx c;
3377 	void *buffer;
3378 	enum receive_bitmap_ret ret;
3379 	int ok = FALSE;
3380 
3381 	wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_bio_cnt));
3382 
3383 	drbd_bm_lock(mdev, "receive bitmap");
3384 
3385 	/* maybe we should use some per thread scratch page,
3386 	 * and allocate that during initial device creation? */
3387 	buffer	 = (unsigned long *) __get_free_page(GFP_NOIO);
3388 	if (!buffer) {
3389 		dev_err(DEV, "failed to allocate one page buffer in %s\n", __func__);
3390 		goto out;
3391 	}
3392 
3393 	c = (struct bm_xfer_ctx) {
3394 		.bm_bits = drbd_bm_bits(mdev),
3395 		.bm_words = drbd_bm_words(mdev),
3396 	};
3397 
3398 	do {
3399 		if (h->command == P_BITMAP) {
3400 			ret = receive_bitmap_plain(mdev, h, buffer, &c);
3401 		} else if (h->command == P_COMPRESSED_BITMAP) {
3402 			/* MAYBE: sanity check that we speak proto >= 90,
3403 			 * and the feature is enabled! */
3404 			struct p_compressed_bm *p;
3405 
3406 			if (h->length > BM_PACKET_PAYLOAD_BYTES) {
3407 				dev_err(DEV, "ReportCBitmap packet too large\n");
3408 				goto out;
3409 			}
3410 			/* use the page buff */
3411 			p = buffer;
3412 			memcpy(p, h, sizeof(*h));
3413 			if (drbd_recv(mdev, p->head.payload, h->length) != h->length)
3414 				goto out;
3415 			if (p->head.length <= (sizeof(*p) - sizeof(p->head))) {
3416 				dev_err(DEV, "ReportCBitmap packet too small (l:%u)\n", p->head.length);
3417 				return FAILED;
3418 			}
3419 			ret = decode_bitmap_c(mdev, p, &c);
3420 		} else {
3421 			dev_warn(DEV, "receive_bitmap: h->command neither ReportBitMap nor ReportCBitMap (is 0x%x)", h->command);
3422 			goto out;
3423 		}
3424 
3425 		c.packets[h->command == P_BITMAP]++;
3426 		c.bytes[h->command == P_BITMAP] += sizeof(struct p_header) + h->length;
3427 
3428 		if (ret != OK)
3429 			break;
3430 
3431 		if (!drbd_recv_header(mdev, h))
3432 			goto out;
3433 	} while (ret == OK);
3434 	if (ret == FAILED)
3435 		goto out;
3436 
3437 	INFO_bm_xfer_stats(mdev, "receive", &c);
3438 
3439 	if (mdev->state.conn == C_WF_BITMAP_T) {
3440 		ok = !drbd_send_bitmap(mdev);
3441 		if (!ok)
3442 			goto out;
3443 		/* Omit CS_ORDERED with this state transition to avoid deadlocks. */
3444 		ok = _drbd_request_state(mdev, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE);
3445 		D_ASSERT(ok == SS_SUCCESS);
3446 	} else if (mdev->state.conn != C_WF_BITMAP_S) {
3447 		/* admin may have requested C_DISCONNECTING,
3448 		 * other threads may have noticed network errors */
3449 		dev_info(DEV, "unexpected cstate (%s) in receive_bitmap\n",
3450 		    drbd_conn_str(mdev->state.conn));
3451 	}
3452 
3453 	ok = TRUE;
3454  out:
3455 	drbd_bm_unlock(mdev);
3456 	if (ok && mdev->state.conn == C_WF_BITMAP_S)
3457 		drbd_start_resync(mdev, C_SYNC_SOURCE);
3458 	free_page((unsigned long) buffer);
3459 	return ok;
3460 }
3461 
3462 static int receive_skip(struct drbd_conf *mdev, struct p_header *h)
3463 {
3464 	/* TODO zero copy sink :) */
3465 	static char sink[128];
3466 	int size, want, r;
3467 
3468 	dev_warn(DEV, "skipping unknown optional packet type %d, l: %d!\n",
3469 	     h->command, h->length);
3470 
3471 	size = h->length;
3472 	while (size > 0) {
3473 		want = min_t(int, size, sizeof(sink));
3474 		r = drbd_recv(mdev, sink, want);
3475 		ERR_IF(r <= 0) break;
3476 		size -= r;
3477 	}
3478 	return size == 0;
3479 }
3480 
3481 static int receive_UnplugRemote(struct drbd_conf *mdev, struct p_header *h)
3482 {
3483 	if (mdev->state.disk >= D_INCONSISTENT)
3484 		drbd_kick_lo(mdev);
3485 
3486 	/* Make sure we've acked all the TCP data associated
3487 	 * with the data requests being unplugged */
3488 	drbd_tcp_quickack(mdev->data.socket);
3489 
3490 	return TRUE;
3491 }
3492 
3493 typedef int (*drbd_cmd_handler_f)(struct drbd_conf *, struct p_header *);
3494 
3495 static drbd_cmd_handler_f drbd_default_handler[] = {
3496 	[P_DATA]	    = receive_Data,
3497 	[P_DATA_REPLY]	    = receive_DataReply,
3498 	[P_RS_DATA_REPLY]   = receive_RSDataReply,
3499 	[P_BARRIER]	    = receive_Barrier,
3500 	[P_BITMAP]	    = receive_bitmap,
3501 	[P_COMPRESSED_BITMAP]    = receive_bitmap,
3502 	[P_UNPLUG_REMOTE]   = receive_UnplugRemote,
3503 	[P_DATA_REQUEST]    = receive_DataRequest,
3504 	[P_RS_DATA_REQUEST] = receive_DataRequest,
3505 	[P_SYNC_PARAM]	    = receive_SyncParam,
3506 	[P_SYNC_PARAM89]	   = receive_SyncParam,
3507 	[P_PROTOCOL]        = receive_protocol,
3508 	[P_UUIDS]	    = receive_uuids,
3509 	[P_SIZES]	    = receive_sizes,
3510 	[P_STATE]	    = receive_state,
3511 	[P_STATE_CHG_REQ]   = receive_req_state,
3512 	[P_SYNC_UUID]       = receive_sync_uuid,
3513 	[P_OV_REQUEST]      = receive_DataRequest,
3514 	[P_OV_REPLY]        = receive_DataRequest,
3515 	[P_CSUM_RS_REQUEST]    = receive_DataRequest,
3516 	/* anything missing from this table is in
3517 	 * the asender_tbl, see get_asender_cmd */
3518 	[P_MAX_CMD]	    = NULL,
3519 };
3520 
3521 static drbd_cmd_handler_f *drbd_cmd_handler = drbd_default_handler;
3522 static drbd_cmd_handler_f *drbd_opt_cmd_handler;
3523 
3524 static void drbdd(struct drbd_conf *mdev)
3525 {
3526 	drbd_cmd_handler_f handler;
3527 	struct p_header *header = &mdev->data.rbuf.header;
3528 
3529 	while (get_t_state(&mdev->receiver) == Running) {
3530 		drbd_thread_current_set_cpu(mdev);
3531 		if (!drbd_recv_header(mdev, header)) {
3532 			drbd_force_state(mdev, NS(conn, C_PROTOCOL_ERROR));
3533 			break;
3534 		}
3535 
3536 		if (header->command < P_MAX_CMD)
3537 			handler = drbd_cmd_handler[header->command];
3538 		else if (P_MAY_IGNORE < header->command
3539 		     && header->command < P_MAX_OPT_CMD)
3540 			handler = drbd_opt_cmd_handler[header->command-P_MAY_IGNORE];
3541 		else if (header->command > P_MAX_OPT_CMD)
3542 			handler = receive_skip;
3543 		else
3544 			handler = NULL;
3545 
3546 		if (unlikely(!handler)) {
3547 			dev_err(DEV, "unknown packet type %d, l: %d!\n",
3548 			    header->command, header->length);
3549 			drbd_force_state(mdev, NS(conn, C_PROTOCOL_ERROR));
3550 			break;
3551 		}
3552 		if (unlikely(!handler(mdev, header))) {
3553 			dev_err(DEV, "error receiving %s, l: %d!\n",
3554 			    cmdname(header->command), header->length);
3555 			drbd_force_state(mdev, NS(conn, C_PROTOCOL_ERROR));
3556 			break;
3557 		}
3558 	}
3559 }
3560 
3561 static void drbd_fail_pending_reads(struct drbd_conf *mdev)
3562 {
3563 	struct hlist_head *slot;
3564 	struct hlist_node *pos;
3565 	struct hlist_node *tmp;
3566 	struct drbd_request *req;
3567 	int i;
3568 
3569 	/*
3570 	 * Application READ requests
3571 	 */
3572 	spin_lock_irq(&mdev->req_lock);
3573 	for (i = 0; i < APP_R_HSIZE; i++) {
3574 		slot = mdev->app_reads_hash+i;
3575 		hlist_for_each_entry_safe(req, pos, tmp, slot, colision) {
3576 			/* it may (but should not any longer!)
3577 			 * be on the work queue; if that assert triggers,
3578 			 * we need to also grab the
3579 			 * spin_lock_irq(&mdev->data.work.q_lock);
3580 			 * and list_del_init here. */
3581 			D_ASSERT(list_empty(&req->w.list));
3582 			/* It would be nice to complete outside of spinlock.
3583 			 * But this is easier for now. */
3584 			_req_mod(req, connection_lost_while_pending);
3585 		}
3586 	}
3587 	for (i = 0; i < APP_R_HSIZE; i++)
3588 		if (!hlist_empty(mdev->app_reads_hash+i))
3589 			dev_warn(DEV, "ASSERT FAILED: app_reads_hash[%d].first: "
3590 				"%p, should be NULL\n", i, mdev->app_reads_hash[i].first);
3591 
3592 	memset(mdev->app_reads_hash, 0, APP_R_HSIZE*sizeof(void *));
3593 	spin_unlock_irq(&mdev->req_lock);
3594 }
3595 
3596 void drbd_flush_workqueue(struct drbd_conf *mdev)
3597 {
3598 	struct drbd_wq_barrier barr;
3599 
3600 	barr.w.cb = w_prev_work_done;
3601 	init_completion(&barr.done);
3602 	drbd_queue_work(&mdev->data.work, &barr.w);
3603 	wait_for_completion(&barr.done);
3604 }
3605 
3606 static void drbd_disconnect(struct drbd_conf *mdev)
3607 {
3608 	enum drbd_fencing_p fp;
3609 	union drbd_state os, ns;
3610 	int rv = SS_UNKNOWN_ERROR;
3611 	unsigned int i;
3612 
3613 	if (mdev->state.conn == C_STANDALONE)
3614 		return;
3615 	if (mdev->state.conn >= C_WF_CONNECTION)
3616 		dev_err(DEV, "ASSERT FAILED cstate = %s, expected < WFConnection\n",
3617 				drbd_conn_str(mdev->state.conn));
3618 
3619 	/* asender does not clean up anything. it must not interfere, either */
3620 	drbd_thread_stop(&mdev->asender);
3621 	drbd_free_sock(mdev);
3622 
3623 	spin_lock_irq(&mdev->req_lock);
3624 	_drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
3625 	_drbd_wait_ee_list_empty(mdev, &mdev->sync_ee);
3626 	_drbd_wait_ee_list_empty(mdev, &mdev->read_ee);
3627 	spin_unlock_irq(&mdev->req_lock);
3628 
3629 	/* We do not have data structures that would allow us to
3630 	 * get the rs_pending_cnt down to 0 again.
3631 	 *  * On C_SYNC_TARGET we do not have any data structures describing
3632 	 *    the pending RSDataRequest's we have sent.
3633 	 *  * On C_SYNC_SOURCE there is no data structure that tracks
3634 	 *    the P_RS_DATA_REPLY blocks that we sent to the SyncTarget.
3635 	 *  And no, it is not the sum of the reference counts in the
3636 	 *  resync_LRU. The resync_LRU tracks the whole operation including
3637 	 *  the disk-IO, while the rs_pending_cnt only tracks the blocks
3638 	 *  on the fly. */
3639 	drbd_rs_cancel_all(mdev);
3640 	mdev->rs_total = 0;
3641 	mdev->rs_failed = 0;
3642 	atomic_set(&mdev->rs_pending_cnt, 0);
3643 	wake_up(&mdev->misc_wait);
3644 
3645 	/* make sure syncer is stopped and w_resume_next_sg queued */
3646 	del_timer_sync(&mdev->resync_timer);
3647 	set_bit(STOP_SYNC_TIMER, &mdev->flags);
3648 	resync_timer_fn((unsigned long)mdev);
3649 
3650 	/* wait for all w_e_end_data_req, w_e_end_rsdata_req, w_send_barrier,
3651 	 * w_make_resync_request etc. which may still be on the worker queue
3652 	 * to be "canceled" */
3653 	drbd_flush_workqueue(mdev);
3654 
3655 	/* This also does reclaim_net_ee().  If we do this too early, we might
3656 	 * miss some resync ee and pages.*/
3657 	drbd_process_done_ee(mdev);
3658 
3659 	kfree(mdev->p_uuid);
3660 	mdev->p_uuid = NULL;
3661 
3662 	if (!mdev->state.susp)
3663 		tl_clear(mdev);
3664 
3665 	drbd_fail_pending_reads(mdev);
3666 
3667 	dev_info(DEV, "Connection closed\n");
3668 
3669 	drbd_md_sync(mdev);
3670 
3671 	fp = FP_DONT_CARE;
3672 	if (get_ldev(mdev)) {
3673 		fp = mdev->ldev->dc.fencing;
3674 		put_ldev(mdev);
3675 	}
3676 
3677 	if (mdev->state.role == R_PRIMARY) {
3678 		if (fp >= FP_RESOURCE && mdev->state.pdsk >= D_UNKNOWN) {
3679 			enum drbd_disk_state nps = drbd_try_outdate_peer(mdev);
3680 			drbd_request_state(mdev, NS(pdsk, nps));
3681 		}
3682 	}
3683 
3684 	spin_lock_irq(&mdev->req_lock);
3685 	os = mdev->state;
3686 	if (os.conn >= C_UNCONNECTED) {
3687 		/* Do not restart in case we are C_DISCONNECTING */
3688 		ns = os;
3689 		ns.conn = C_UNCONNECTED;
3690 		rv = _drbd_set_state(mdev, ns, CS_VERBOSE, NULL);
3691 	}
3692 	spin_unlock_irq(&mdev->req_lock);
3693 
3694 	if (os.conn == C_DISCONNECTING) {
3695 		struct hlist_head *h;
3696 		wait_event(mdev->misc_wait, atomic_read(&mdev->net_cnt) == 0);
3697 
3698 		/* we must not free the tl_hash
3699 		 * while application io is still on the fly */
3700 		wait_event(mdev->misc_wait, atomic_read(&mdev->ap_bio_cnt) == 0);
3701 
3702 		spin_lock_irq(&mdev->req_lock);
3703 		/* paranoia code */
3704 		for (h = mdev->ee_hash; h < mdev->ee_hash + mdev->ee_hash_s; h++)
3705 			if (h->first)
3706 				dev_err(DEV, "ASSERT FAILED ee_hash[%u].first == %p, expected NULL\n",
3707 						(int)(h - mdev->ee_hash), h->first);
3708 		kfree(mdev->ee_hash);
3709 		mdev->ee_hash = NULL;
3710 		mdev->ee_hash_s = 0;
3711 
3712 		/* paranoia code */
3713 		for (h = mdev->tl_hash; h < mdev->tl_hash + mdev->tl_hash_s; h++)
3714 			if (h->first)
3715 				dev_err(DEV, "ASSERT FAILED tl_hash[%u] == %p, expected NULL\n",
3716 						(int)(h - mdev->tl_hash), h->first);
3717 		kfree(mdev->tl_hash);
3718 		mdev->tl_hash = NULL;
3719 		mdev->tl_hash_s = 0;
3720 		spin_unlock_irq(&mdev->req_lock);
3721 
3722 		crypto_free_hash(mdev->cram_hmac_tfm);
3723 		mdev->cram_hmac_tfm = NULL;
3724 
3725 		kfree(mdev->net_conf);
3726 		mdev->net_conf = NULL;
3727 		drbd_request_state(mdev, NS(conn, C_STANDALONE));
3728 	}
3729 
3730 	/* tcp_close and release of sendpage pages can be deferred.  I don't
3731 	 * want to use SO_LINGER, because apparently it can be deferred for
3732 	 * more than 20 seconds (longest time I checked).
3733 	 *
3734 	 * Actually we don't care for exactly when the network stack does its
3735 	 * put_page(), but release our reference on these pages right here.
3736 	 */
3737 	i = drbd_release_ee(mdev, &mdev->net_ee);
3738 	if (i)
3739 		dev_info(DEV, "net_ee not empty, killed %u entries\n", i);
3740 	i = atomic_read(&mdev->pp_in_use);
3741 	if (i)
3742 		dev_info(DEV, "pp_in_use = %u, expected 0\n", i);
3743 
3744 	D_ASSERT(list_empty(&mdev->read_ee));
3745 	D_ASSERT(list_empty(&mdev->active_ee));
3746 	D_ASSERT(list_empty(&mdev->sync_ee));
3747 	D_ASSERT(list_empty(&mdev->done_ee));
3748 
3749 	/* ok, no more ee's on the fly, it is safe to reset the epoch_size */
3750 	atomic_set(&mdev->current_epoch->epoch_size, 0);
3751 	D_ASSERT(list_empty(&mdev->current_epoch->list));
3752 }
3753 
3754 /*
3755  * We support PRO_VERSION_MIN to PRO_VERSION_MAX. The protocol version
3756  * we can agree on is stored in agreed_pro_version.
3757  *
3758  * feature flags and the reserved array should be enough room for future
3759  * enhancements of the handshake protocol, and possible plugins...
3760  *
3761  * for now, they are expected to be zero, but ignored.
3762  */
3763 static int drbd_send_handshake(struct drbd_conf *mdev)
3764 {
3765 	/* ASSERT current == mdev->receiver ... */
3766 	struct p_handshake *p = &mdev->data.sbuf.handshake;
3767 	int ok;
3768 
3769 	if (mutex_lock_interruptible(&mdev->data.mutex)) {
3770 		dev_err(DEV, "interrupted during initial handshake\n");
3771 		return 0; /* interrupted. not ok. */
3772 	}
3773 
3774 	if (mdev->data.socket == NULL) {
3775 		mutex_unlock(&mdev->data.mutex);
3776 		return 0;
3777 	}
3778 
3779 	memset(p, 0, sizeof(*p));
3780 	p->protocol_min = cpu_to_be32(PRO_VERSION_MIN);
3781 	p->protocol_max = cpu_to_be32(PRO_VERSION_MAX);
3782 	ok = _drbd_send_cmd( mdev, mdev->data.socket, P_HAND_SHAKE,
3783 			     (struct p_header *)p, sizeof(*p), 0 );
3784 	mutex_unlock(&mdev->data.mutex);
3785 	return ok;
3786 }
3787 
3788 /*
3789  * return values:
3790  *   1 yes, we have a valid connection
3791  *   0 oops, did not work out, please try again
3792  *  -1 peer talks different language,
3793  *     no point in trying again, please go standalone.
3794  */
3795 static int drbd_do_handshake(struct drbd_conf *mdev)
3796 {
3797 	/* ASSERT current == mdev->receiver ... */
3798 	struct p_handshake *p = &mdev->data.rbuf.handshake;
3799 	const int expect = sizeof(struct p_handshake)
3800 			  -sizeof(struct p_header);
3801 	int rv;
3802 
3803 	rv = drbd_send_handshake(mdev);
3804 	if (!rv)
3805 		return 0;
3806 
3807 	rv = drbd_recv_header(mdev, &p->head);
3808 	if (!rv)
3809 		return 0;
3810 
3811 	if (p->head.command != P_HAND_SHAKE) {
3812 		dev_err(DEV, "expected HandShake packet, received: %s (0x%04x)\n",
3813 		     cmdname(p->head.command), p->head.command);
3814 		return -1;
3815 	}
3816 
3817 	if (p->head.length != expect) {
3818 		dev_err(DEV, "expected HandShake length: %u, received: %u\n",
3819 		     expect, p->head.length);
3820 		return -1;
3821 	}
3822 
3823 	rv = drbd_recv(mdev, &p->head.payload, expect);
3824 
3825 	if (rv != expect) {
3826 		dev_err(DEV, "short read receiving handshake packet: l=%u\n", rv);
3827 		return 0;
3828 	}
3829 
3830 	p->protocol_min = be32_to_cpu(p->protocol_min);
3831 	p->protocol_max = be32_to_cpu(p->protocol_max);
3832 	if (p->protocol_max == 0)
3833 		p->protocol_max = p->protocol_min;
3834 
3835 	if (PRO_VERSION_MAX < p->protocol_min ||
3836 	    PRO_VERSION_MIN > p->protocol_max)
3837 		goto incompat;
3838 
3839 	mdev->agreed_pro_version = min_t(int, PRO_VERSION_MAX, p->protocol_max);
3840 
3841 	dev_info(DEV, "Handshake successful: "
3842 	     "Agreed network protocol version %d\n", mdev->agreed_pro_version);
3843 
3844 	return 1;
3845 
3846  incompat:
3847 	dev_err(DEV, "incompatible DRBD dialects: "
3848 	    "I support %d-%d, peer supports %d-%d\n",
3849 	    PRO_VERSION_MIN, PRO_VERSION_MAX,
3850 	    p->protocol_min, p->protocol_max);
3851 	return -1;
3852 }
3853 
3854 #if !defined(CONFIG_CRYPTO_HMAC) && !defined(CONFIG_CRYPTO_HMAC_MODULE)
3855 static int drbd_do_auth(struct drbd_conf *mdev)
3856 {
3857 	dev_err(DEV, "This kernel was build without CONFIG_CRYPTO_HMAC.\n");
3858 	dev_err(DEV, "You need to disable 'cram-hmac-alg' in drbd.conf.\n");
3859 	return -1;
3860 }
3861 #else
3862 #define CHALLENGE_LEN 64
3863 
3864 /* Return value:
3865 	1 - auth succeeded,
3866 	0 - failed, try again (network error),
3867 	-1 - auth failed, don't try again.
3868 */
3869 
3870 static int drbd_do_auth(struct drbd_conf *mdev)
3871 {
3872 	char my_challenge[CHALLENGE_LEN];  /* 64 Bytes... */
3873 	struct scatterlist sg;
3874 	char *response = NULL;
3875 	char *right_response = NULL;
3876 	char *peers_ch = NULL;
3877 	struct p_header p;
3878 	unsigned int key_len = strlen(mdev->net_conf->shared_secret);
3879 	unsigned int resp_size;
3880 	struct hash_desc desc;
3881 	int rv;
3882 
3883 	desc.tfm = mdev->cram_hmac_tfm;
3884 	desc.flags = 0;
3885 
3886 	rv = crypto_hash_setkey(mdev->cram_hmac_tfm,
3887 				(u8 *)mdev->net_conf->shared_secret, key_len);
3888 	if (rv) {
3889 		dev_err(DEV, "crypto_hash_setkey() failed with %d\n", rv);
3890 		rv = -1;
3891 		goto fail;
3892 	}
3893 
3894 	get_random_bytes(my_challenge, CHALLENGE_LEN);
3895 
3896 	rv = drbd_send_cmd2(mdev, P_AUTH_CHALLENGE, my_challenge, CHALLENGE_LEN);
3897 	if (!rv)
3898 		goto fail;
3899 
3900 	rv = drbd_recv_header(mdev, &p);
3901 	if (!rv)
3902 		goto fail;
3903 
3904 	if (p.command != P_AUTH_CHALLENGE) {
3905 		dev_err(DEV, "expected AuthChallenge packet, received: %s (0x%04x)\n",
3906 		    cmdname(p.command), p.command);
3907 		rv = 0;
3908 		goto fail;
3909 	}
3910 
3911 	if (p.length > CHALLENGE_LEN*2) {
3912 		dev_err(DEV, "expected AuthChallenge payload too big.\n");
3913 		rv = -1;
3914 		goto fail;
3915 	}
3916 
3917 	peers_ch = kmalloc(p.length, GFP_NOIO);
3918 	if (peers_ch == NULL) {
3919 		dev_err(DEV, "kmalloc of peers_ch failed\n");
3920 		rv = -1;
3921 		goto fail;
3922 	}
3923 
3924 	rv = drbd_recv(mdev, peers_ch, p.length);
3925 
3926 	if (rv != p.length) {
3927 		dev_err(DEV, "short read AuthChallenge: l=%u\n", rv);
3928 		rv = 0;
3929 		goto fail;
3930 	}
3931 
3932 	resp_size = crypto_hash_digestsize(mdev->cram_hmac_tfm);
3933 	response = kmalloc(resp_size, GFP_NOIO);
3934 	if (response == NULL) {
3935 		dev_err(DEV, "kmalloc of response failed\n");
3936 		rv = -1;
3937 		goto fail;
3938 	}
3939 
3940 	sg_init_table(&sg, 1);
3941 	sg_set_buf(&sg, peers_ch, p.length);
3942 
3943 	rv = crypto_hash_digest(&desc, &sg, sg.length, response);
3944 	if (rv) {
3945 		dev_err(DEV, "crypto_hash_digest() failed with %d\n", rv);
3946 		rv = -1;
3947 		goto fail;
3948 	}
3949 
3950 	rv = drbd_send_cmd2(mdev, P_AUTH_RESPONSE, response, resp_size);
3951 	if (!rv)
3952 		goto fail;
3953 
3954 	rv = drbd_recv_header(mdev, &p);
3955 	if (!rv)
3956 		goto fail;
3957 
3958 	if (p.command != P_AUTH_RESPONSE) {
3959 		dev_err(DEV, "expected AuthResponse packet, received: %s (0x%04x)\n",
3960 		    cmdname(p.command), p.command);
3961 		rv = 0;
3962 		goto fail;
3963 	}
3964 
3965 	if (p.length != resp_size) {
3966 		dev_err(DEV, "expected AuthResponse payload of wrong size\n");
3967 		rv = 0;
3968 		goto fail;
3969 	}
3970 
3971 	rv = drbd_recv(mdev, response , resp_size);
3972 
3973 	if (rv != resp_size) {
3974 		dev_err(DEV, "short read receiving AuthResponse: l=%u\n", rv);
3975 		rv = 0;
3976 		goto fail;
3977 	}
3978 
3979 	right_response = kmalloc(resp_size, GFP_NOIO);
3980 	if (right_response == NULL) {
3981 		dev_err(DEV, "kmalloc of right_response failed\n");
3982 		rv = -1;
3983 		goto fail;
3984 	}
3985 
3986 	sg_set_buf(&sg, my_challenge, CHALLENGE_LEN);
3987 
3988 	rv = crypto_hash_digest(&desc, &sg, sg.length, right_response);
3989 	if (rv) {
3990 		dev_err(DEV, "crypto_hash_digest() failed with %d\n", rv);
3991 		rv = -1;
3992 		goto fail;
3993 	}
3994 
3995 	rv = !memcmp(response, right_response, resp_size);
3996 
3997 	if (rv)
3998 		dev_info(DEV, "Peer authenticated using %d bytes of '%s' HMAC\n",
3999 		     resp_size, mdev->net_conf->cram_hmac_alg);
4000 	else
4001 		rv = -1;
4002 
4003  fail:
4004 	kfree(peers_ch);
4005 	kfree(response);
4006 	kfree(right_response);
4007 
4008 	return rv;
4009 }
4010 #endif
4011 
4012 int drbdd_init(struct drbd_thread *thi)
4013 {
4014 	struct drbd_conf *mdev = thi->mdev;
4015 	unsigned int minor = mdev_to_minor(mdev);
4016 	int h;
4017 
4018 	sprintf(current->comm, "drbd%d_receiver", minor);
4019 
4020 	dev_info(DEV, "receiver (re)started\n");
4021 
4022 	do {
4023 		h = drbd_connect(mdev);
4024 		if (h == 0) {
4025 			drbd_disconnect(mdev);
4026 			__set_current_state(TASK_INTERRUPTIBLE);
4027 			schedule_timeout(HZ);
4028 		}
4029 		if (h == -1) {
4030 			dev_warn(DEV, "Discarding network configuration.\n");
4031 			drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
4032 		}
4033 	} while (h == 0);
4034 
4035 	if (h > 0) {
4036 		if (get_net_conf(mdev)) {
4037 			drbdd(mdev);
4038 			put_net_conf(mdev);
4039 		}
4040 	}
4041 
4042 	drbd_disconnect(mdev);
4043 
4044 	dev_info(DEV, "receiver terminated\n");
4045 	return 0;
4046 }
4047 
4048 /* ********* acknowledge sender ******** */
4049 
4050 static int got_RqSReply(struct drbd_conf *mdev, struct p_header *h)
4051 {
4052 	struct p_req_state_reply *p = (struct p_req_state_reply *)h;
4053 
4054 	int retcode = be32_to_cpu(p->retcode);
4055 
4056 	if (retcode >= SS_SUCCESS) {
4057 		set_bit(CL_ST_CHG_SUCCESS, &mdev->flags);
4058 	} else {
4059 		set_bit(CL_ST_CHG_FAIL, &mdev->flags);
4060 		dev_err(DEV, "Requested state change failed by peer: %s (%d)\n",
4061 		    drbd_set_st_err_str(retcode), retcode);
4062 	}
4063 	wake_up(&mdev->state_wait);
4064 
4065 	return TRUE;
4066 }
4067 
4068 static int got_Ping(struct drbd_conf *mdev, struct p_header *h)
4069 {
4070 	return drbd_send_ping_ack(mdev);
4071 
4072 }
4073 
4074 static int got_PingAck(struct drbd_conf *mdev, struct p_header *h)
4075 {
4076 	/* restore idle timeout */
4077 	mdev->meta.socket->sk->sk_rcvtimeo = mdev->net_conf->ping_int*HZ;
4078 	if (!test_and_set_bit(GOT_PING_ACK, &mdev->flags))
4079 		wake_up(&mdev->misc_wait);
4080 
4081 	return TRUE;
4082 }
4083 
4084 static int got_IsInSync(struct drbd_conf *mdev, struct p_header *h)
4085 {
4086 	struct p_block_ack *p = (struct p_block_ack *)h;
4087 	sector_t sector = be64_to_cpu(p->sector);
4088 	int blksize = be32_to_cpu(p->blksize);
4089 
4090 	D_ASSERT(mdev->agreed_pro_version >= 89);
4091 
4092 	update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4093 
4094 	drbd_rs_complete_io(mdev, sector);
4095 	drbd_set_in_sync(mdev, sector, blksize);
4096 	/* rs_same_csums is supposed to count in units of BM_BLOCK_SIZE */
4097 	mdev->rs_same_csum += (blksize >> BM_BLOCK_SHIFT);
4098 	dec_rs_pending(mdev);
4099 
4100 	return TRUE;
4101 }
4102 
4103 /* when we receive the ACK for a write request,
4104  * verify that we actually know about it */
4105 static struct drbd_request *_ack_id_to_req(struct drbd_conf *mdev,
4106 	u64 id, sector_t sector)
4107 {
4108 	struct hlist_head *slot = tl_hash_slot(mdev, sector);
4109 	struct hlist_node *n;
4110 	struct drbd_request *req;
4111 
4112 	hlist_for_each_entry(req, n, slot, colision) {
4113 		if ((unsigned long)req == (unsigned long)id) {
4114 			if (req->sector != sector) {
4115 				dev_err(DEV, "_ack_id_to_req: found req %p but it has "
4116 				    "wrong sector (%llus versus %llus)\n", req,
4117 				    (unsigned long long)req->sector,
4118 				    (unsigned long long)sector);
4119 				break;
4120 			}
4121 			return req;
4122 		}
4123 	}
4124 	dev_err(DEV, "_ack_id_to_req: failed to find req %p, sector %llus in list\n",
4125 		(void *)(unsigned long)id, (unsigned long long)sector);
4126 	return NULL;
4127 }
4128 
4129 typedef struct drbd_request *(req_validator_fn)
4130 	(struct drbd_conf *mdev, u64 id, sector_t sector);
4131 
4132 static int validate_req_change_req_state(struct drbd_conf *mdev,
4133 	u64 id, sector_t sector, req_validator_fn validator,
4134 	const char *func, enum drbd_req_event what)
4135 {
4136 	struct drbd_request *req;
4137 	struct bio_and_error m;
4138 
4139 	spin_lock_irq(&mdev->req_lock);
4140 	req = validator(mdev, id, sector);
4141 	if (unlikely(!req)) {
4142 		spin_unlock_irq(&mdev->req_lock);
4143 		dev_err(DEV, "%s: got a corrupt block_id/sector pair\n", func);
4144 		return FALSE;
4145 	}
4146 	__req_mod(req, what, &m);
4147 	spin_unlock_irq(&mdev->req_lock);
4148 
4149 	if (m.bio)
4150 		complete_master_bio(mdev, &m);
4151 	return TRUE;
4152 }
4153 
4154 static int got_BlockAck(struct drbd_conf *mdev, struct p_header *h)
4155 {
4156 	struct p_block_ack *p = (struct p_block_ack *)h;
4157 	sector_t sector = be64_to_cpu(p->sector);
4158 	int blksize = be32_to_cpu(p->blksize);
4159 	enum drbd_req_event what;
4160 
4161 	update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4162 
4163 	if (is_syncer_block_id(p->block_id)) {
4164 		drbd_set_in_sync(mdev, sector, blksize);
4165 		dec_rs_pending(mdev);
4166 		return TRUE;
4167 	}
4168 	switch (be16_to_cpu(h->command)) {
4169 	case P_RS_WRITE_ACK:
4170 		D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
4171 		what = write_acked_by_peer_and_sis;
4172 		break;
4173 	case P_WRITE_ACK:
4174 		D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
4175 		what = write_acked_by_peer;
4176 		break;
4177 	case P_RECV_ACK:
4178 		D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_B);
4179 		what = recv_acked_by_peer;
4180 		break;
4181 	case P_DISCARD_ACK:
4182 		D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
4183 		what = conflict_discarded_by_peer;
4184 		break;
4185 	default:
4186 		D_ASSERT(0);
4187 		return FALSE;
4188 	}
4189 
4190 	return validate_req_change_req_state(mdev, p->block_id, sector,
4191 		_ack_id_to_req, __func__ , what);
4192 }
4193 
4194 static int got_NegAck(struct drbd_conf *mdev, struct p_header *h)
4195 {
4196 	struct p_block_ack *p = (struct p_block_ack *)h;
4197 	sector_t sector = be64_to_cpu(p->sector);
4198 
4199 	if (__ratelimit(&drbd_ratelimit_state))
4200 		dev_warn(DEV, "Got NegAck packet. Peer is in troubles?\n");
4201 
4202 	update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4203 
4204 	if (is_syncer_block_id(p->block_id)) {
4205 		int size = be32_to_cpu(p->blksize);
4206 		dec_rs_pending(mdev);
4207 		drbd_rs_failed_io(mdev, sector, size);
4208 		return TRUE;
4209 	}
4210 	return validate_req_change_req_state(mdev, p->block_id, sector,
4211 		_ack_id_to_req, __func__ , neg_acked);
4212 }
4213 
4214 static int got_NegDReply(struct drbd_conf *mdev, struct p_header *h)
4215 {
4216 	struct p_block_ack *p = (struct p_block_ack *)h;
4217 	sector_t sector = be64_to_cpu(p->sector);
4218 
4219 	update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4220 	dev_err(DEV, "Got NegDReply; Sector %llus, len %u; Fail original request.\n",
4221 	    (unsigned long long)sector, be32_to_cpu(p->blksize));
4222 
4223 	return validate_req_change_req_state(mdev, p->block_id, sector,
4224 		_ar_id_to_req, __func__ , neg_acked);
4225 }
4226 
4227 static int got_NegRSDReply(struct drbd_conf *mdev, struct p_header *h)
4228 {
4229 	sector_t sector;
4230 	int size;
4231 	struct p_block_ack *p = (struct p_block_ack *)h;
4232 
4233 	sector = be64_to_cpu(p->sector);
4234 	size = be32_to_cpu(p->blksize);
4235 	D_ASSERT(p->block_id == ID_SYNCER);
4236 
4237 	update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4238 
4239 	dec_rs_pending(mdev);
4240 
4241 	if (get_ldev_if_state(mdev, D_FAILED)) {
4242 		drbd_rs_complete_io(mdev, sector);
4243 		drbd_rs_failed_io(mdev, sector, size);
4244 		put_ldev(mdev);
4245 	}
4246 
4247 	return TRUE;
4248 }
4249 
4250 static int got_BarrierAck(struct drbd_conf *mdev, struct p_header *h)
4251 {
4252 	struct p_barrier_ack *p = (struct p_barrier_ack *)h;
4253 
4254 	tl_release(mdev, p->barrier, be32_to_cpu(p->set_size));
4255 
4256 	return TRUE;
4257 }
4258 
4259 static int got_OVResult(struct drbd_conf *mdev, struct p_header *h)
4260 {
4261 	struct p_block_ack *p = (struct p_block_ack *)h;
4262 	struct drbd_work *w;
4263 	sector_t sector;
4264 	int size;
4265 
4266 	sector = be64_to_cpu(p->sector);
4267 	size = be32_to_cpu(p->blksize);
4268 
4269 	update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4270 
4271 	if (be64_to_cpu(p->block_id) == ID_OUT_OF_SYNC)
4272 		drbd_ov_oos_found(mdev, sector, size);
4273 	else
4274 		ov_oos_print(mdev);
4275 
4276 	drbd_rs_complete_io(mdev, sector);
4277 	dec_rs_pending(mdev);
4278 
4279 	if (--mdev->ov_left == 0) {
4280 		w = kmalloc(sizeof(*w), GFP_NOIO);
4281 		if (w) {
4282 			w->cb = w_ov_finished;
4283 			drbd_queue_work_front(&mdev->data.work, w);
4284 		} else {
4285 			dev_err(DEV, "kmalloc(w) failed.");
4286 			ov_oos_print(mdev);
4287 			drbd_resync_finished(mdev);
4288 		}
4289 	}
4290 	return TRUE;
4291 }
4292 
4293 struct asender_cmd {
4294 	size_t pkt_size;
4295 	int (*process)(struct drbd_conf *mdev, struct p_header *h);
4296 };
4297 
4298 static struct asender_cmd *get_asender_cmd(int cmd)
4299 {
4300 	static struct asender_cmd asender_tbl[] = {
4301 		/* anything missing from this table is in
4302 		 * the drbd_cmd_handler (drbd_default_handler) table,
4303 		 * see the beginning of drbdd() */
4304 	[P_PING]	    = { sizeof(struct p_header), got_Ping },
4305 	[P_PING_ACK]	    = { sizeof(struct p_header), got_PingAck },
4306 	[P_RECV_ACK]	    = { sizeof(struct p_block_ack), got_BlockAck },
4307 	[P_WRITE_ACK]	    = { sizeof(struct p_block_ack), got_BlockAck },
4308 	[P_RS_WRITE_ACK]    = { sizeof(struct p_block_ack), got_BlockAck },
4309 	[P_DISCARD_ACK]	    = { sizeof(struct p_block_ack), got_BlockAck },
4310 	[P_NEG_ACK]	    = { sizeof(struct p_block_ack), got_NegAck },
4311 	[P_NEG_DREPLY]	    = { sizeof(struct p_block_ack), got_NegDReply },
4312 	[P_NEG_RS_DREPLY]   = { sizeof(struct p_block_ack), got_NegRSDReply},
4313 	[P_OV_RESULT]	    = { sizeof(struct p_block_ack), got_OVResult },
4314 	[P_BARRIER_ACK]	    = { sizeof(struct p_barrier_ack), got_BarrierAck },
4315 	[P_STATE_CHG_REPLY] = { sizeof(struct p_req_state_reply), got_RqSReply },
4316 	[P_RS_IS_IN_SYNC]   = { sizeof(struct p_block_ack), got_IsInSync },
4317 	[P_MAX_CMD]	    = { 0, NULL },
4318 	};
4319 	if (cmd > P_MAX_CMD || asender_tbl[cmd].process == NULL)
4320 		return NULL;
4321 	return &asender_tbl[cmd];
4322 }
4323 
4324 int drbd_asender(struct drbd_thread *thi)
4325 {
4326 	struct drbd_conf *mdev = thi->mdev;
4327 	struct p_header *h = &mdev->meta.rbuf.header;
4328 	struct asender_cmd *cmd = NULL;
4329 
4330 	int rv, len;
4331 	void *buf    = h;
4332 	int received = 0;
4333 	int expect   = sizeof(struct p_header);
4334 	int empty;
4335 
4336 	sprintf(current->comm, "drbd%d_asender", mdev_to_minor(mdev));
4337 
4338 	current->policy = SCHED_RR;  /* Make this a realtime task! */
4339 	current->rt_priority = 2;    /* more important than all other tasks */
4340 
4341 	while (get_t_state(thi) == Running) {
4342 		drbd_thread_current_set_cpu(mdev);
4343 		if (test_and_clear_bit(SEND_PING, &mdev->flags)) {
4344 			ERR_IF(!drbd_send_ping(mdev)) goto reconnect;
4345 			mdev->meta.socket->sk->sk_rcvtimeo =
4346 				mdev->net_conf->ping_timeo*HZ/10;
4347 		}
4348 
4349 		/* conditionally cork;
4350 		 * it may hurt latency if we cork without much to send */
4351 		if (!mdev->net_conf->no_cork &&
4352 			3 < atomic_read(&mdev->unacked_cnt))
4353 			drbd_tcp_cork(mdev->meta.socket);
4354 		while (1) {
4355 			clear_bit(SIGNAL_ASENDER, &mdev->flags);
4356 			flush_signals(current);
4357 			if (!drbd_process_done_ee(mdev)) {
4358 				dev_err(DEV, "process_done_ee() = NOT_OK\n");
4359 				goto reconnect;
4360 			}
4361 			/* to avoid race with newly queued ACKs */
4362 			set_bit(SIGNAL_ASENDER, &mdev->flags);
4363 			spin_lock_irq(&mdev->req_lock);
4364 			empty = list_empty(&mdev->done_ee);
4365 			spin_unlock_irq(&mdev->req_lock);
4366 			/* new ack may have been queued right here,
4367 			 * but then there is also a signal pending,
4368 			 * and we start over... */
4369 			if (empty)
4370 				break;
4371 		}
4372 		/* but unconditionally uncork unless disabled */
4373 		if (!mdev->net_conf->no_cork)
4374 			drbd_tcp_uncork(mdev->meta.socket);
4375 
4376 		/* short circuit, recv_msg would return EINTR anyways. */
4377 		if (signal_pending(current))
4378 			continue;
4379 
4380 		rv = drbd_recv_short(mdev, mdev->meta.socket,
4381 				     buf, expect-received, 0);
4382 		clear_bit(SIGNAL_ASENDER, &mdev->flags);
4383 
4384 		flush_signals(current);
4385 
4386 		/* Note:
4387 		 * -EINTR	 (on meta) we got a signal
4388 		 * -EAGAIN	 (on meta) rcvtimeo expired
4389 		 * -ECONNRESET	 other side closed the connection
4390 		 * -ERESTARTSYS  (on data) we got a signal
4391 		 * rv <  0	 other than above: unexpected error!
4392 		 * rv == expected: full header or command
4393 		 * rv <  expected: "woken" by signal during receive
4394 		 * rv == 0	 : "connection shut down by peer"
4395 		 */
4396 		if (likely(rv > 0)) {
4397 			received += rv;
4398 			buf	 += rv;
4399 		} else if (rv == 0) {
4400 			dev_err(DEV, "meta connection shut down by peer.\n");
4401 			goto reconnect;
4402 		} else if (rv == -EAGAIN) {
4403 			if (mdev->meta.socket->sk->sk_rcvtimeo ==
4404 			    mdev->net_conf->ping_timeo*HZ/10) {
4405 				dev_err(DEV, "PingAck did not arrive in time.\n");
4406 				goto reconnect;
4407 			}
4408 			set_bit(SEND_PING, &mdev->flags);
4409 			continue;
4410 		} else if (rv == -EINTR) {
4411 			continue;
4412 		} else {
4413 			dev_err(DEV, "sock_recvmsg returned %d\n", rv);
4414 			goto reconnect;
4415 		}
4416 
4417 		if (received == expect && cmd == NULL) {
4418 			if (unlikely(h->magic != BE_DRBD_MAGIC)) {
4419 				dev_err(DEV, "magic?? on meta m: 0x%lx c: %d l: %d\n",
4420 				    (long)be32_to_cpu(h->magic),
4421 				    h->command, h->length);
4422 				goto reconnect;
4423 			}
4424 			cmd = get_asender_cmd(be16_to_cpu(h->command));
4425 			len = be16_to_cpu(h->length);
4426 			if (unlikely(cmd == NULL)) {
4427 				dev_err(DEV, "unknown command?? on meta m: 0x%lx c: %d l: %d\n",
4428 				    (long)be32_to_cpu(h->magic),
4429 				    h->command, h->length);
4430 				goto disconnect;
4431 			}
4432 			expect = cmd->pkt_size;
4433 			ERR_IF(len != expect-sizeof(struct p_header))
4434 				goto reconnect;
4435 		}
4436 		if (received == expect) {
4437 			D_ASSERT(cmd != NULL);
4438 			if (!cmd->process(mdev, h))
4439 				goto reconnect;
4440 
4441 			buf	 = h;
4442 			received = 0;
4443 			expect	 = sizeof(struct p_header);
4444 			cmd	 = NULL;
4445 		}
4446 	}
4447 
4448 	if (0) {
4449 reconnect:
4450 		drbd_force_state(mdev, NS(conn, C_NETWORK_FAILURE));
4451 	}
4452 	if (0) {
4453 disconnect:
4454 		drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
4455 	}
4456 	clear_bit(SIGNAL_ASENDER, &mdev->flags);
4457 
4458 	D_ASSERT(mdev->state.conn < C_CONNECTED);
4459 	dev_info(DEV, "asender terminated\n");
4460 
4461 	return 0;
4462 }
4463