xref: /linux/drivers/block/drbd/drbd_main.c (revision 4e0ae876f77bc01a7e77724dea57b4b82bd53244)
1 /*
2    drbd.c
3 
4    This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
5 
6    Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7    Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8    Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
9 
10    Thanks to Carter Burden, Bart Grantham and Gennadiy Nerubayev
11    from Logicworks, Inc. for making SDP replication support possible.
12 
13    drbd is free software; you can redistribute it and/or modify
14    it under the terms of the GNU General Public License as published by
15    the Free Software Foundation; either version 2, or (at your option)
16    any later version.
17 
18    drbd is distributed in the hope that it will be useful,
19    but WITHOUT ANY WARRANTY; without even the implied warranty of
20    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
21    GNU General Public License for more details.
22 
23    You should have received a copy of the GNU General Public License
24    along with drbd; see the file COPYING.  If not, write to
25    the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
26 
27  */
28 
29 #define pr_fmt(fmt)	KBUILD_MODNAME ": " fmt
30 
31 #include <linux/module.h>
32 #include <linux/jiffies.h>
33 #include <linux/drbd.h>
34 #include <linux/uaccess.h>
35 #include <asm/types.h>
36 #include <net/sock.h>
37 #include <linux/ctype.h>
38 #include <linux/mutex.h>
39 #include <linux/fs.h>
40 #include <linux/file.h>
41 #include <linux/proc_fs.h>
42 #include <linux/init.h>
43 #include <linux/mm.h>
44 #include <linux/memcontrol.h>
45 #include <linux/mm_inline.h>
46 #include <linux/slab.h>
47 #include <linux/random.h>
48 #include <linux/reboot.h>
49 #include <linux/notifier.h>
50 #include <linux/kthread.h>
51 #include <linux/workqueue.h>
52 #define __KERNEL_SYSCALLS__
53 #include <linux/unistd.h>
54 #include <linux/vmalloc.h>
55 #include <linux/sched/signal.h>
56 
57 #include <linux/drbd_limits.h>
58 #include "drbd_int.h"
59 #include "drbd_protocol.h"
60 #include "drbd_req.h" /* only for _req_mod in tl_release and tl_clear */
61 #include "drbd_vli.h"
62 #include "drbd_debugfs.h"
63 
64 static DEFINE_MUTEX(drbd_main_mutex);
65 static int drbd_open(struct block_device *bdev, fmode_t mode);
66 static void drbd_release(struct gendisk *gd, fmode_t mode);
67 static void md_sync_timer_fn(struct timer_list *t);
68 static int w_bitmap_io(struct drbd_work *w, int unused);
69 
70 MODULE_AUTHOR("Philipp Reisner <phil@linbit.com>, "
71 	      "Lars Ellenberg <lars@linbit.com>");
72 MODULE_DESCRIPTION("drbd - Distributed Replicated Block Device v" REL_VERSION);
73 MODULE_VERSION(REL_VERSION);
74 MODULE_LICENSE("GPL");
75 MODULE_PARM_DESC(minor_count, "Approximate number of drbd devices ("
76 		 __stringify(DRBD_MINOR_COUNT_MIN) "-" __stringify(DRBD_MINOR_COUNT_MAX) ")");
77 MODULE_ALIAS_BLOCKDEV_MAJOR(DRBD_MAJOR);
78 
79 #include <linux/moduleparam.h>
80 /* thanks to these macros, if compiled into the kernel (not-module),
81  * these become boot parameters (e.g., drbd.minor_count) */
82 
83 #ifdef CONFIG_DRBD_FAULT_INJECTION
84 int drbd_enable_faults;
85 int drbd_fault_rate;
86 static int drbd_fault_count;
87 static int drbd_fault_devs;
88 /* bitmap of enabled faults */
89 module_param_named(enable_faults, drbd_enable_faults, int, 0664);
90 /* fault rate % value - applies to all enabled faults */
91 module_param_named(fault_rate, drbd_fault_rate, int, 0664);
92 /* count of faults inserted */
93 module_param_named(fault_count, drbd_fault_count, int, 0664);
94 /* bitmap of devices to insert faults on */
95 module_param_named(fault_devs, drbd_fault_devs, int, 0644);
96 #endif
97 
98 /* module parameters we can keep static */
99 static bool drbd_allow_oos; /* allow_open_on_secondary */
100 static bool drbd_disable_sendpage;
101 MODULE_PARM_DESC(allow_oos, "DONT USE!");
102 module_param_named(allow_oos, drbd_allow_oos, bool, 0);
103 module_param_named(disable_sendpage, drbd_disable_sendpage, bool, 0644);
104 
105 /* module parameters we share */
106 int drbd_proc_details; /* Detail level in proc drbd*/
107 module_param_named(proc_details, drbd_proc_details, int, 0644);
108 /* module parameters shared with defaults */
109 unsigned int drbd_minor_count = DRBD_MINOR_COUNT_DEF;
110 /* Module parameter for setting the user mode helper program
111  * to run. Default is /sbin/drbdadm */
112 char drbd_usermode_helper[80] = "/sbin/drbdadm";
113 module_param_named(minor_count, drbd_minor_count, uint, 0444);
114 module_param_string(usermode_helper, drbd_usermode_helper, sizeof(drbd_usermode_helper), 0644);
115 
116 /* in 2.6.x, our device mapping and config info contains our virtual gendisks
117  * as member "struct gendisk *vdisk;"
118  */
119 struct idr drbd_devices;
120 struct list_head drbd_resources;
121 struct mutex resources_mutex;
122 
123 struct kmem_cache *drbd_request_cache;
124 struct kmem_cache *drbd_ee_cache;	/* peer requests */
125 struct kmem_cache *drbd_bm_ext_cache;	/* bitmap extents */
126 struct kmem_cache *drbd_al_ext_cache;	/* activity log extents */
127 mempool_t drbd_request_mempool;
128 mempool_t drbd_ee_mempool;
129 mempool_t drbd_md_io_page_pool;
130 struct bio_set drbd_md_io_bio_set;
131 struct bio_set drbd_io_bio_set;
132 
133 /* I do not use a standard mempool, because:
134    1) I want to hand out the pre-allocated objects first.
135    2) I want to be able to interrupt sleeping allocation with a signal.
136    Note: This is a single linked list, the next pointer is the private
137 	 member of struct page.
138  */
139 struct page *drbd_pp_pool;
140 spinlock_t   drbd_pp_lock;
141 int          drbd_pp_vacant;
142 wait_queue_head_t drbd_pp_wait;
143 
144 DEFINE_RATELIMIT_STATE(drbd_ratelimit_state, 5 * HZ, 5);
145 
146 static const struct block_device_operations drbd_ops = {
147 	.owner =   THIS_MODULE,
148 	.open =    drbd_open,
149 	.release = drbd_release,
150 };
151 
152 struct bio *bio_alloc_drbd(gfp_t gfp_mask)
153 {
154 	struct bio *bio;
155 
156 	if (!bioset_initialized(&drbd_md_io_bio_set))
157 		return bio_alloc(gfp_mask, 1);
158 
159 	bio = bio_alloc_bioset(gfp_mask, 1, &drbd_md_io_bio_set);
160 	if (!bio)
161 		return NULL;
162 	return bio;
163 }
164 
165 #ifdef __CHECKER__
166 /* When checking with sparse, and this is an inline function, sparse will
167    give tons of false positives. When this is a real functions sparse works.
168  */
169 int _get_ldev_if_state(struct drbd_device *device, enum drbd_disk_state mins)
170 {
171 	int io_allowed;
172 
173 	atomic_inc(&device->local_cnt);
174 	io_allowed = (device->state.disk >= mins);
175 	if (!io_allowed) {
176 		if (atomic_dec_and_test(&device->local_cnt))
177 			wake_up(&device->misc_wait);
178 	}
179 	return io_allowed;
180 }
181 
182 #endif
183 
184 /**
185  * tl_release() - mark as BARRIER_ACKED all requests in the corresponding transfer log epoch
186  * @connection:	DRBD connection.
187  * @barrier_nr:	Expected identifier of the DRBD write barrier packet.
188  * @set_size:	Expected number of requests before that barrier.
189  *
190  * In case the passed barrier_nr or set_size does not match the oldest
191  * epoch of not yet barrier-acked requests, this function will cause a
192  * termination of the connection.
193  */
194 void tl_release(struct drbd_connection *connection, unsigned int barrier_nr,
195 		unsigned int set_size)
196 {
197 	struct drbd_request *r;
198 	struct drbd_request *req = NULL;
199 	int expect_epoch = 0;
200 	int expect_size = 0;
201 
202 	spin_lock_irq(&connection->resource->req_lock);
203 
204 	/* find oldest not yet barrier-acked write request,
205 	 * count writes in its epoch. */
206 	list_for_each_entry(r, &connection->transfer_log, tl_requests) {
207 		const unsigned s = r->rq_state;
208 		if (!req) {
209 			if (!(s & RQ_WRITE))
210 				continue;
211 			if (!(s & RQ_NET_MASK))
212 				continue;
213 			if (s & RQ_NET_DONE)
214 				continue;
215 			req = r;
216 			expect_epoch = req->epoch;
217 			expect_size ++;
218 		} else {
219 			if (r->epoch != expect_epoch)
220 				break;
221 			if (!(s & RQ_WRITE))
222 				continue;
223 			/* if (s & RQ_DONE): not expected */
224 			/* if (!(s & RQ_NET_MASK)): not expected */
225 			expect_size++;
226 		}
227 	}
228 
229 	/* first some paranoia code */
230 	if (req == NULL) {
231 		drbd_err(connection, "BAD! BarrierAck #%u received, but no epoch in tl!?\n",
232 			 barrier_nr);
233 		goto bail;
234 	}
235 	if (expect_epoch != barrier_nr) {
236 		drbd_err(connection, "BAD! BarrierAck #%u received, expected #%u!\n",
237 			 barrier_nr, expect_epoch);
238 		goto bail;
239 	}
240 
241 	if (expect_size != set_size) {
242 		drbd_err(connection, "BAD! BarrierAck #%u received with n_writes=%u, expected n_writes=%u!\n",
243 			 barrier_nr, set_size, expect_size);
244 		goto bail;
245 	}
246 
247 	/* Clean up list of requests processed during current epoch. */
248 	/* this extra list walk restart is paranoia,
249 	 * to catch requests being barrier-acked "unexpectedly".
250 	 * It usually should find the same req again, or some READ preceding it. */
251 	list_for_each_entry(req, &connection->transfer_log, tl_requests)
252 		if (req->epoch == expect_epoch)
253 			break;
254 	list_for_each_entry_safe_from(req, r, &connection->transfer_log, tl_requests) {
255 		if (req->epoch != expect_epoch)
256 			break;
257 		_req_mod(req, BARRIER_ACKED);
258 	}
259 	spin_unlock_irq(&connection->resource->req_lock);
260 
261 	return;
262 
263 bail:
264 	spin_unlock_irq(&connection->resource->req_lock);
265 	conn_request_state(connection, NS(conn, C_PROTOCOL_ERROR), CS_HARD);
266 }
267 
268 
269 /**
270  * _tl_restart() - Walks the transfer log, and applies an action to all requests
271  * @connection:	DRBD connection to operate on.
272  * @what:       The action/event to perform with all request objects
273  *
274  * @what might be one of CONNECTION_LOST_WHILE_PENDING, RESEND, FAIL_FROZEN_DISK_IO,
275  * RESTART_FROZEN_DISK_IO.
276  */
277 /* must hold resource->req_lock */
278 void _tl_restart(struct drbd_connection *connection, enum drbd_req_event what)
279 {
280 	struct drbd_request *req, *r;
281 
282 	list_for_each_entry_safe(req, r, &connection->transfer_log, tl_requests)
283 		_req_mod(req, what);
284 }
285 
286 void tl_restart(struct drbd_connection *connection, enum drbd_req_event what)
287 {
288 	spin_lock_irq(&connection->resource->req_lock);
289 	_tl_restart(connection, what);
290 	spin_unlock_irq(&connection->resource->req_lock);
291 }
292 
293 /**
294  * tl_clear() - Clears all requests and &struct drbd_tl_epoch objects out of the TL
295  * @device:	DRBD device.
296  *
297  * This is called after the connection to the peer was lost. The storage covered
298  * by the requests on the transfer gets marked as our of sync. Called from the
299  * receiver thread and the worker thread.
300  */
301 void tl_clear(struct drbd_connection *connection)
302 {
303 	tl_restart(connection, CONNECTION_LOST_WHILE_PENDING);
304 }
305 
306 /**
307  * tl_abort_disk_io() - Abort disk I/O for all requests for a certain device in the TL
308  * @device:	DRBD device.
309  */
310 void tl_abort_disk_io(struct drbd_device *device)
311 {
312 	struct drbd_connection *connection = first_peer_device(device)->connection;
313 	struct drbd_request *req, *r;
314 
315 	spin_lock_irq(&connection->resource->req_lock);
316 	list_for_each_entry_safe(req, r, &connection->transfer_log, tl_requests) {
317 		if (!(req->rq_state & RQ_LOCAL_PENDING))
318 			continue;
319 		if (req->device != device)
320 			continue;
321 		_req_mod(req, ABORT_DISK_IO);
322 	}
323 	spin_unlock_irq(&connection->resource->req_lock);
324 }
325 
326 static int drbd_thread_setup(void *arg)
327 {
328 	struct drbd_thread *thi = (struct drbd_thread *) arg;
329 	struct drbd_resource *resource = thi->resource;
330 	unsigned long flags;
331 	int retval;
332 
333 	snprintf(current->comm, sizeof(current->comm), "drbd_%c_%s",
334 		 thi->name[0],
335 		 resource->name);
336 
337 restart:
338 	retval = thi->function(thi);
339 
340 	spin_lock_irqsave(&thi->t_lock, flags);
341 
342 	/* if the receiver has been "EXITING", the last thing it did
343 	 * was set the conn state to "StandAlone",
344 	 * if now a re-connect request comes in, conn state goes C_UNCONNECTED,
345 	 * and receiver thread will be "started".
346 	 * drbd_thread_start needs to set "RESTARTING" in that case.
347 	 * t_state check and assignment needs to be within the same spinlock,
348 	 * so either thread_start sees EXITING, and can remap to RESTARTING,
349 	 * or thread_start see NONE, and can proceed as normal.
350 	 */
351 
352 	if (thi->t_state == RESTARTING) {
353 		drbd_info(resource, "Restarting %s thread\n", thi->name);
354 		thi->t_state = RUNNING;
355 		spin_unlock_irqrestore(&thi->t_lock, flags);
356 		goto restart;
357 	}
358 
359 	thi->task = NULL;
360 	thi->t_state = NONE;
361 	smp_mb();
362 	complete_all(&thi->stop);
363 	spin_unlock_irqrestore(&thi->t_lock, flags);
364 
365 	drbd_info(resource, "Terminating %s\n", current->comm);
366 
367 	/* Release mod reference taken when thread was started */
368 
369 	if (thi->connection)
370 		kref_put(&thi->connection->kref, drbd_destroy_connection);
371 	kref_put(&resource->kref, drbd_destroy_resource);
372 	module_put(THIS_MODULE);
373 	return retval;
374 }
375 
376 static void drbd_thread_init(struct drbd_resource *resource, struct drbd_thread *thi,
377 			     int (*func) (struct drbd_thread *), const char *name)
378 {
379 	spin_lock_init(&thi->t_lock);
380 	thi->task    = NULL;
381 	thi->t_state = NONE;
382 	thi->function = func;
383 	thi->resource = resource;
384 	thi->connection = NULL;
385 	thi->name = name;
386 }
387 
388 int drbd_thread_start(struct drbd_thread *thi)
389 {
390 	struct drbd_resource *resource = thi->resource;
391 	struct task_struct *nt;
392 	unsigned long flags;
393 
394 	/* is used from state engine doing drbd_thread_stop_nowait,
395 	 * while holding the req lock irqsave */
396 	spin_lock_irqsave(&thi->t_lock, flags);
397 
398 	switch (thi->t_state) {
399 	case NONE:
400 		drbd_info(resource, "Starting %s thread (from %s [%d])\n",
401 			 thi->name, current->comm, current->pid);
402 
403 		/* Get ref on module for thread - this is released when thread exits */
404 		if (!try_module_get(THIS_MODULE)) {
405 			drbd_err(resource, "Failed to get module reference in drbd_thread_start\n");
406 			spin_unlock_irqrestore(&thi->t_lock, flags);
407 			return false;
408 		}
409 
410 		kref_get(&resource->kref);
411 		if (thi->connection)
412 			kref_get(&thi->connection->kref);
413 
414 		init_completion(&thi->stop);
415 		thi->reset_cpu_mask = 1;
416 		thi->t_state = RUNNING;
417 		spin_unlock_irqrestore(&thi->t_lock, flags);
418 		flush_signals(current); /* otherw. may get -ERESTARTNOINTR */
419 
420 		nt = kthread_create(drbd_thread_setup, (void *) thi,
421 				    "drbd_%c_%s", thi->name[0], thi->resource->name);
422 
423 		if (IS_ERR(nt)) {
424 			drbd_err(resource, "Couldn't start thread\n");
425 
426 			if (thi->connection)
427 				kref_put(&thi->connection->kref, drbd_destroy_connection);
428 			kref_put(&resource->kref, drbd_destroy_resource);
429 			module_put(THIS_MODULE);
430 			return false;
431 		}
432 		spin_lock_irqsave(&thi->t_lock, flags);
433 		thi->task = nt;
434 		thi->t_state = RUNNING;
435 		spin_unlock_irqrestore(&thi->t_lock, flags);
436 		wake_up_process(nt);
437 		break;
438 	case EXITING:
439 		thi->t_state = RESTARTING;
440 		drbd_info(resource, "Restarting %s thread (from %s [%d])\n",
441 				thi->name, current->comm, current->pid);
442 		/* fall through */
443 	case RUNNING:
444 	case RESTARTING:
445 	default:
446 		spin_unlock_irqrestore(&thi->t_lock, flags);
447 		break;
448 	}
449 
450 	return true;
451 }
452 
453 
454 void _drbd_thread_stop(struct drbd_thread *thi, int restart, int wait)
455 {
456 	unsigned long flags;
457 
458 	enum drbd_thread_state ns = restart ? RESTARTING : EXITING;
459 
460 	/* may be called from state engine, holding the req lock irqsave */
461 	spin_lock_irqsave(&thi->t_lock, flags);
462 
463 	if (thi->t_state == NONE) {
464 		spin_unlock_irqrestore(&thi->t_lock, flags);
465 		if (restart)
466 			drbd_thread_start(thi);
467 		return;
468 	}
469 
470 	if (thi->t_state != ns) {
471 		if (thi->task == NULL) {
472 			spin_unlock_irqrestore(&thi->t_lock, flags);
473 			return;
474 		}
475 
476 		thi->t_state = ns;
477 		smp_mb();
478 		init_completion(&thi->stop);
479 		if (thi->task != current)
480 			force_sig(DRBD_SIGKILL, thi->task);
481 	}
482 
483 	spin_unlock_irqrestore(&thi->t_lock, flags);
484 
485 	if (wait)
486 		wait_for_completion(&thi->stop);
487 }
488 
489 int conn_lowest_minor(struct drbd_connection *connection)
490 {
491 	struct drbd_peer_device *peer_device;
492 	int vnr = 0, minor = -1;
493 
494 	rcu_read_lock();
495 	peer_device = idr_get_next(&connection->peer_devices, &vnr);
496 	if (peer_device)
497 		minor = device_to_minor(peer_device->device);
498 	rcu_read_unlock();
499 
500 	return minor;
501 }
502 
503 #ifdef CONFIG_SMP
504 /**
505  * drbd_calc_cpu_mask() - Generate CPU masks, spread over all CPUs
506  *
507  * Forces all threads of a resource onto the same CPU. This is beneficial for
508  * DRBD's performance. May be overwritten by user's configuration.
509  */
510 static void drbd_calc_cpu_mask(cpumask_var_t *cpu_mask)
511 {
512 	unsigned int *resources_per_cpu, min_index = ~0;
513 
514 	resources_per_cpu = kcalloc(nr_cpu_ids, sizeof(*resources_per_cpu),
515 				    GFP_KERNEL);
516 	if (resources_per_cpu) {
517 		struct drbd_resource *resource;
518 		unsigned int cpu, min = ~0;
519 
520 		rcu_read_lock();
521 		for_each_resource_rcu(resource, &drbd_resources) {
522 			for_each_cpu(cpu, resource->cpu_mask)
523 				resources_per_cpu[cpu]++;
524 		}
525 		rcu_read_unlock();
526 		for_each_online_cpu(cpu) {
527 			if (resources_per_cpu[cpu] < min) {
528 				min = resources_per_cpu[cpu];
529 				min_index = cpu;
530 			}
531 		}
532 		kfree(resources_per_cpu);
533 	}
534 	if (min_index == ~0) {
535 		cpumask_setall(*cpu_mask);
536 		return;
537 	}
538 	cpumask_set_cpu(min_index, *cpu_mask);
539 }
540 
541 /**
542  * drbd_thread_current_set_cpu() - modifies the cpu mask of the _current_ thread
543  * @device:	DRBD device.
544  * @thi:	drbd_thread object
545  *
546  * call in the "main loop" of _all_ threads, no need for any mutex, current won't die
547  * prematurely.
548  */
549 void drbd_thread_current_set_cpu(struct drbd_thread *thi)
550 {
551 	struct drbd_resource *resource = thi->resource;
552 	struct task_struct *p = current;
553 
554 	if (!thi->reset_cpu_mask)
555 		return;
556 	thi->reset_cpu_mask = 0;
557 	set_cpus_allowed_ptr(p, resource->cpu_mask);
558 }
559 #else
560 #define drbd_calc_cpu_mask(A) ({})
561 #endif
562 
563 /**
564  * drbd_header_size  -  size of a packet header
565  *
566  * The header size is a multiple of 8, so any payload following the header is
567  * word aligned on 64-bit architectures.  (The bitmap send and receive code
568  * relies on this.)
569  */
570 unsigned int drbd_header_size(struct drbd_connection *connection)
571 {
572 	if (connection->agreed_pro_version >= 100) {
573 		BUILD_BUG_ON(!IS_ALIGNED(sizeof(struct p_header100), 8));
574 		return sizeof(struct p_header100);
575 	} else {
576 		BUILD_BUG_ON(sizeof(struct p_header80) !=
577 			     sizeof(struct p_header95));
578 		BUILD_BUG_ON(!IS_ALIGNED(sizeof(struct p_header80), 8));
579 		return sizeof(struct p_header80);
580 	}
581 }
582 
583 static unsigned int prepare_header80(struct p_header80 *h, enum drbd_packet cmd, int size)
584 {
585 	h->magic   = cpu_to_be32(DRBD_MAGIC);
586 	h->command = cpu_to_be16(cmd);
587 	h->length  = cpu_to_be16(size);
588 	return sizeof(struct p_header80);
589 }
590 
591 static unsigned int prepare_header95(struct p_header95 *h, enum drbd_packet cmd, int size)
592 {
593 	h->magic   = cpu_to_be16(DRBD_MAGIC_BIG);
594 	h->command = cpu_to_be16(cmd);
595 	h->length = cpu_to_be32(size);
596 	return sizeof(struct p_header95);
597 }
598 
599 static unsigned int prepare_header100(struct p_header100 *h, enum drbd_packet cmd,
600 				      int size, int vnr)
601 {
602 	h->magic = cpu_to_be32(DRBD_MAGIC_100);
603 	h->volume = cpu_to_be16(vnr);
604 	h->command = cpu_to_be16(cmd);
605 	h->length = cpu_to_be32(size);
606 	h->pad = 0;
607 	return sizeof(struct p_header100);
608 }
609 
610 static unsigned int prepare_header(struct drbd_connection *connection, int vnr,
611 				   void *buffer, enum drbd_packet cmd, int size)
612 {
613 	if (connection->agreed_pro_version >= 100)
614 		return prepare_header100(buffer, cmd, size, vnr);
615 	else if (connection->agreed_pro_version >= 95 &&
616 		 size > DRBD_MAX_SIZE_H80_PACKET)
617 		return prepare_header95(buffer, cmd, size);
618 	else
619 		return prepare_header80(buffer, cmd, size);
620 }
621 
622 static void *__conn_prepare_command(struct drbd_connection *connection,
623 				    struct drbd_socket *sock)
624 {
625 	if (!sock->socket)
626 		return NULL;
627 	return sock->sbuf + drbd_header_size(connection);
628 }
629 
630 void *conn_prepare_command(struct drbd_connection *connection, struct drbd_socket *sock)
631 {
632 	void *p;
633 
634 	mutex_lock(&sock->mutex);
635 	p = __conn_prepare_command(connection, sock);
636 	if (!p)
637 		mutex_unlock(&sock->mutex);
638 
639 	return p;
640 }
641 
642 void *drbd_prepare_command(struct drbd_peer_device *peer_device, struct drbd_socket *sock)
643 {
644 	return conn_prepare_command(peer_device->connection, sock);
645 }
646 
647 static int __send_command(struct drbd_connection *connection, int vnr,
648 			  struct drbd_socket *sock, enum drbd_packet cmd,
649 			  unsigned int header_size, void *data,
650 			  unsigned int size)
651 {
652 	int msg_flags;
653 	int err;
654 
655 	/*
656 	 * Called with @data == NULL and the size of the data blocks in @size
657 	 * for commands that send data blocks.  For those commands, omit the
658 	 * MSG_MORE flag: this will increase the likelihood that data blocks
659 	 * which are page aligned on the sender will end up page aligned on the
660 	 * receiver.
661 	 */
662 	msg_flags = data ? MSG_MORE : 0;
663 
664 	header_size += prepare_header(connection, vnr, sock->sbuf, cmd,
665 				      header_size + size);
666 	err = drbd_send_all(connection, sock->socket, sock->sbuf, header_size,
667 			    msg_flags);
668 	if (data && !err)
669 		err = drbd_send_all(connection, sock->socket, data, size, 0);
670 	/* DRBD protocol "pings" are latency critical.
671 	 * This is supposed to trigger tcp_push_pending_frames() */
672 	if (!err && (cmd == P_PING || cmd == P_PING_ACK))
673 		drbd_tcp_nodelay(sock->socket);
674 
675 	return err;
676 }
677 
678 static int __conn_send_command(struct drbd_connection *connection, struct drbd_socket *sock,
679 			       enum drbd_packet cmd, unsigned int header_size,
680 			       void *data, unsigned int size)
681 {
682 	return __send_command(connection, 0, sock, cmd, header_size, data, size);
683 }
684 
685 int conn_send_command(struct drbd_connection *connection, struct drbd_socket *sock,
686 		      enum drbd_packet cmd, unsigned int header_size,
687 		      void *data, unsigned int size)
688 {
689 	int err;
690 
691 	err = __conn_send_command(connection, sock, cmd, header_size, data, size);
692 	mutex_unlock(&sock->mutex);
693 	return err;
694 }
695 
696 int drbd_send_command(struct drbd_peer_device *peer_device, struct drbd_socket *sock,
697 		      enum drbd_packet cmd, unsigned int header_size,
698 		      void *data, unsigned int size)
699 {
700 	int err;
701 
702 	err = __send_command(peer_device->connection, peer_device->device->vnr,
703 			     sock, cmd, header_size, data, size);
704 	mutex_unlock(&sock->mutex);
705 	return err;
706 }
707 
708 int drbd_send_ping(struct drbd_connection *connection)
709 {
710 	struct drbd_socket *sock;
711 
712 	sock = &connection->meta;
713 	if (!conn_prepare_command(connection, sock))
714 		return -EIO;
715 	return conn_send_command(connection, sock, P_PING, 0, NULL, 0);
716 }
717 
718 int drbd_send_ping_ack(struct drbd_connection *connection)
719 {
720 	struct drbd_socket *sock;
721 
722 	sock = &connection->meta;
723 	if (!conn_prepare_command(connection, sock))
724 		return -EIO;
725 	return conn_send_command(connection, sock, P_PING_ACK, 0, NULL, 0);
726 }
727 
728 int drbd_send_sync_param(struct drbd_peer_device *peer_device)
729 {
730 	struct drbd_socket *sock;
731 	struct p_rs_param_95 *p;
732 	int size;
733 	const int apv = peer_device->connection->agreed_pro_version;
734 	enum drbd_packet cmd;
735 	struct net_conf *nc;
736 	struct disk_conf *dc;
737 
738 	sock = &peer_device->connection->data;
739 	p = drbd_prepare_command(peer_device, sock);
740 	if (!p)
741 		return -EIO;
742 
743 	rcu_read_lock();
744 	nc = rcu_dereference(peer_device->connection->net_conf);
745 
746 	size = apv <= 87 ? sizeof(struct p_rs_param)
747 		: apv == 88 ? sizeof(struct p_rs_param)
748 			+ strlen(nc->verify_alg) + 1
749 		: apv <= 94 ? sizeof(struct p_rs_param_89)
750 		: /* apv >= 95 */ sizeof(struct p_rs_param_95);
751 
752 	cmd = apv >= 89 ? P_SYNC_PARAM89 : P_SYNC_PARAM;
753 
754 	/* initialize verify_alg and csums_alg */
755 	memset(p->verify_alg, 0, 2 * SHARED_SECRET_MAX);
756 
757 	if (get_ldev(peer_device->device)) {
758 		dc = rcu_dereference(peer_device->device->ldev->disk_conf);
759 		p->resync_rate = cpu_to_be32(dc->resync_rate);
760 		p->c_plan_ahead = cpu_to_be32(dc->c_plan_ahead);
761 		p->c_delay_target = cpu_to_be32(dc->c_delay_target);
762 		p->c_fill_target = cpu_to_be32(dc->c_fill_target);
763 		p->c_max_rate = cpu_to_be32(dc->c_max_rate);
764 		put_ldev(peer_device->device);
765 	} else {
766 		p->resync_rate = cpu_to_be32(DRBD_RESYNC_RATE_DEF);
767 		p->c_plan_ahead = cpu_to_be32(DRBD_C_PLAN_AHEAD_DEF);
768 		p->c_delay_target = cpu_to_be32(DRBD_C_DELAY_TARGET_DEF);
769 		p->c_fill_target = cpu_to_be32(DRBD_C_FILL_TARGET_DEF);
770 		p->c_max_rate = cpu_to_be32(DRBD_C_MAX_RATE_DEF);
771 	}
772 
773 	if (apv >= 88)
774 		strcpy(p->verify_alg, nc->verify_alg);
775 	if (apv >= 89)
776 		strcpy(p->csums_alg, nc->csums_alg);
777 	rcu_read_unlock();
778 
779 	return drbd_send_command(peer_device, sock, cmd, size, NULL, 0);
780 }
781 
782 int __drbd_send_protocol(struct drbd_connection *connection, enum drbd_packet cmd)
783 {
784 	struct drbd_socket *sock;
785 	struct p_protocol *p;
786 	struct net_conf *nc;
787 	int size, cf;
788 
789 	sock = &connection->data;
790 	p = __conn_prepare_command(connection, sock);
791 	if (!p)
792 		return -EIO;
793 
794 	rcu_read_lock();
795 	nc = rcu_dereference(connection->net_conf);
796 
797 	if (nc->tentative && connection->agreed_pro_version < 92) {
798 		rcu_read_unlock();
799 		mutex_unlock(&sock->mutex);
800 		drbd_err(connection, "--dry-run is not supported by peer");
801 		return -EOPNOTSUPP;
802 	}
803 
804 	size = sizeof(*p);
805 	if (connection->agreed_pro_version >= 87)
806 		size += strlen(nc->integrity_alg) + 1;
807 
808 	p->protocol      = cpu_to_be32(nc->wire_protocol);
809 	p->after_sb_0p   = cpu_to_be32(nc->after_sb_0p);
810 	p->after_sb_1p   = cpu_to_be32(nc->after_sb_1p);
811 	p->after_sb_2p   = cpu_to_be32(nc->after_sb_2p);
812 	p->two_primaries = cpu_to_be32(nc->two_primaries);
813 	cf = 0;
814 	if (nc->discard_my_data)
815 		cf |= CF_DISCARD_MY_DATA;
816 	if (nc->tentative)
817 		cf |= CF_DRY_RUN;
818 	p->conn_flags    = cpu_to_be32(cf);
819 
820 	if (connection->agreed_pro_version >= 87)
821 		strcpy(p->integrity_alg, nc->integrity_alg);
822 	rcu_read_unlock();
823 
824 	return __conn_send_command(connection, sock, cmd, size, NULL, 0);
825 }
826 
827 int drbd_send_protocol(struct drbd_connection *connection)
828 {
829 	int err;
830 
831 	mutex_lock(&connection->data.mutex);
832 	err = __drbd_send_protocol(connection, P_PROTOCOL);
833 	mutex_unlock(&connection->data.mutex);
834 
835 	return err;
836 }
837 
838 static int _drbd_send_uuids(struct drbd_peer_device *peer_device, u64 uuid_flags)
839 {
840 	struct drbd_device *device = peer_device->device;
841 	struct drbd_socket *sock;
842 	struct p_uuids *p;
843 	int i;
844 
845 	if (!get_ldev_if_state(device, D_NEGOTIATING))
846 		return 0;
847 
848 	sock = &peer_device->connection->data;
849 	p = drbd_prepare_command(peer_device, sock);
850 	if (!p) {
851 		put_ldev(device);
852 		return -EIO;
853 	}
854 	spin_lock_irq(&device->ldev->md.uuid_lock);
855 	for (i = UI_CURRENT; i < UI_SIZE; i++)
856 		p->uuid[i] = cpu_to_be64(device->ldev->md.uuid[i]);
857 	spin_unlock_irq(&device->ldev->md.uuid_lock);
858 
859 	device->comm_bm_set = drbd_bm_total_weight(device);
860 	p->uuid[UI_SIZE] = cpu_to_be64(device->comm_bm_set);
861 	rcu_read_lock();
862 	uuid_flags |= rcu_dereference(peer_device->connection->net_conf)->discard_my_data ? 1 : 0;
863 	rcu_read_unlock();
864 	uuid_flags |= test_bit(CRASHED_PRIMARY, &device->flags) ? 2 : 0;
865 	uuid_flags |= device->new_state_tmp.disk == D_INCONSISTENT ? 4 : 0;
866 	p->uuid[UI_FLAGS] = cpu_to_be64(uuid_flags);
867 
868 	put_ldev(device);
869 	return drbd_send_command(peer_device, sock, P_UUIDS, sizeof(*p), NULL, 0);
870 }
871 
872 int drbd_send_uuids(struct drbd_peer_device *peer_device)
873 {
874 	return _drbd_send_uuids(peer_device, 0);
875 }
876 
877 int drbd_send_uuids_skip_initial_sync(struct drbd_peer_device *peer_device)
878 {
879 	return _drbd_send_uuids(peer_device, 8);
880 }
881 
882 void drbd_print_uuids(struct drbd_device *device, const char *text)
883 {
884 	if (get_ldev_if_state(device, D_NEGOTIATING)) {
885 		u64 *uuid = device->ldev->md.uuid;
886 		drbd_info(device, "%s %016llX:%016llX:%016llX:%016llX\n",
887 		     text,
888 		     (unsigned long long)uuid[UI_CURRENT],
889 		     (unsigned long long)uuid[UI_BITMAP],
890 		     (unsigned long long)uuid[UI_HISTORY_START],
891 		     (unsigned long long)uuid[UI_HISTORY_END]);
892 		put_ldev(device);
893 	} else {
894 		drbd_info(device, "%s effective data uuid: %016llX\n",
895 				text,
896 				(unsigned long long)device->ed_uuid);
897 	}
898 }
899 
900 void drbd_gen_and_send_sync_uuid(struct drbd_peer_device *peer_device)
901 {
902 	struct drbd_device *device = peer_device->device;
903 	struct drbd_socket *sock;
904 	struct p_rs_uuid *p;
905 	u64 uuid;
906 
907 	D_ASSERT(device, device->state.disk == D_UP_TO_DATE);
908 
909 	uuid = device->ldev->md.uuid[UI_BITMAP];
910 	if (uuid && uuid != UUID_JUST_CREATED)
911 		uuid = uuid + UUID_NEW_BM_OFFSET;
912 	else
913 		get_random_bytes(&uuid, sizeof(u64));
914 	drbd_uuid_set(device, UI_BITMAP, uuid);
915 	drbd_print_uuids(device, "updated sync UUID");
916 	drbd_md_sync(device);
917 
918 	sock = &peer_device->connection->data;
919 	p = drbd_prepare_command(peer_device, sock);
920 	if (p) {
921 		p->uuid = cpu_to_be64(uuid);
922 		drbd_send_command(peer_device, sock, P_SYNC_UUID, sizeof(*p), NULL, 0);
923 	}
924 }
925 
926 /* communicated if (agreed_features & DRBD_FF_WSAME) */
927 static void
928 assign_p_sizes_qlim(struct drbd_device *device, struct p_sizes *p,
929 					struct request_queue *q)
930 {
931 	if (q) {
932 		p->qlim->physical_block_size = cpu_to_be32(queue_physical_block_size(q));
933 		p->qlim->logical_block_size = cpu_to_be32(queue_logical_block_size(q));
934 		p->qlim->alignment_offset = cpu_to_be32(queue_alignment_offset(q));
935 		p->qlim->io_min = cpu_to_be32(queue_io_min(q));
936 		p->qlim->io_opt = cpu_to_be32(queue_io_opt(q));
937 		p->qlim->discard_enabled = blk_queue_discard(q);
938 		p->qlim->write_same_capable = !!q->limits.max_write_same_sectors;
939 	} else {
940 		q = device->rq_queue;
941 		p->qlim->physical_block_size = cpu_to_be32(queue_physical_block_size(q));
942 		p->qlim->logical_block_size = cpu_to_be32(queue_logical_block_size(q));
943 		p->qlim->alignment_offset = 0;
944 		p->qlim->io_min = cpu_to_be32(queue_io_min(q));
945 		p->qlim->io_opt = cpu_to_be32(queue_io_opt(q));
946 		p->qlim->discard_enabled = 0;
947 		p->qlim->write_same_capable = 0;
948 	}
949 }
950 
951 int drbd_send_sizes(struct drbd_peer_device *peer_device, int trigger_reply, enum dds_flags flags)
952 {
953 	struct drbd_device *device = peer_device->device;
954 	struct drbd_socket *sock;
955 	struct p_sizes *p;
956 	sector_t d_size, u_size;
957 	int q_order_type;
958 	unsigned int max_bio_size;
959 	unsigned int packet_size;
960 
961 	sock = &peer_device->connection->data;
962 	p = drbd_prepare_command(peer_device, sock);
963 	if (!p)
964 		return -EIO;
965 
966 	packet_size = sizeof(*p);
967 	if (peer_device->connection->agreed_features & DRBD_FF_WSAME)
968 		packet_size += sizeof(p->qlim[0]);
969 
970 	memset(p, 0, packet_size);
971 	if (get_ldev_if_state(device, D_NEGOTIATING)) {
972 		struct request_queue *q = bdev_get_queue(device->ldev->backing_bdev);
973 		d_size = drbd_get_max_capacity(device->ldev);
974 		rcu_read_lock();
975 		u_size = rcu_dereference(device->ldev->disk_conf)->disk_size;
976 		rcu_read_unlock();
977 		q_order_type = drbd_queue_order_type(device);
978 		max_bio_size = queue_max_hw_sectors(q) << 9;
979 		max_bio_size = min(max_bio_size, DRBD_MAX_BIO_SIZE);
980 		assign_p_sizes_qlim(device, p, q);
981 		put_ldev(device);
982 	} else {
983 		d_size = 0;
984 		u_size = 0;
985 		q_order_type = QUEUE_ORDERED_NONE;
986 		max_bio_size = DRBD_MAX_BIO_SIZE; /* ... multiple BIOs per peer_request */
987 		assign_p_sizes_qlim(device, p, NULL);
988 	}
989 
990 	if (peer_device->connection->agreed_pro_version <= 94)
991 		max_bio_size = min(max_bio_size, DRBD_MAX_SIZE_H80_PACKET);
992 	else if (peer_device->connection->agreed_pro_version < 100)
993 		max_bio_size = min(max_bio_size, DRBD_MAX_BIO_SIZE_P95);
994 
995 	p->d_size = cpu_to_be64(d_size);
996 	p->u_size = cpu_to_be64(u_size);
997 	p->c_size = cpu_to_be64(trigger_reply ? 0 : drbd_get_capacity(device->this_bdev));
998 	p->max_bio_size = cpu_to_be32(max_bio_size);
999 	p->queue_order_type = cpu_to_be16(q_order_type);
1000 	p->dds_flags = cpu_to_be16(flags);
1001 
1002 	return drbd_send_command(peer_device, sock, P_SIZES, packet_size, NULL, 0);
1003 }
1004 
1005 /**
1006  * drbd_send_current_state() - Sends the drbd state to the peer
1007  * @peer_device:	DRBD peer device.
1008  */
1009 int drbd_send_current_state(struct drbd_peer_device *peer_device)
1010 {
1011 	struct drbd_socket *sock;
1012 	struct p_state *p;
1013 
1014 	sock = &peer_device->connection->data;
1015 	p = drbd_prepare_command(peer_device, sock);
1016 	if (!p)
1017 		return -EIO;
1018 	p->state = cpu_to_be32(peer_device->device->state.i); /* Within the send mutex */
1019 	return drbd_send_command(peer_device, sock, P_STATE, sizeof(*p), NULL, 0);
1020 }
1021 
1022 /**
1023  * drbd_send_state() - After a state change, sends the new state to the peer
1024  * @peer_device:      DRBD peer device.
1025  * @state:     the state to send, not necessarily the current state.
1026  *
1027  * Each state change queues an "after_state_ch" work, which will eventually
1028  * send the resulting new state to the peer. If more state changes happen
1029  * between queuing and processing of the after_state_ch work, we still
1030  * want to send each intermediary state in the order it occurred.
1031  */
1032 int drbd_send_state(struct drbd_peer_device *peer_device, union drbd_state state)
1033 {
1034 	struct drbd_socket *sock;
1035 	struct p_state *p;
1036 
1037 	sock = &peer_device->connection->data;
1038 	p = drbd_prepare_command(peer_device, sock);
1039 	if (!p)
1040 		return -EIO;
1041 	p->state = cpu_to_be32(state.i); /* Within the send mutex */
1042 	return drbd_send_command(peer_device, sock, P_STATE, sizeof(*p), NULL, 0);
1043 }
1044 
1045 int drbd_send_state_req(struct drbd_peer_device *peer_device, union drbd_state mask, union drbd_state val)
1046 {
1047 	struct drbd_socket *sock;
1048 	struct p_req_state *p;
1049 
1050 	sock = &peer_device->connection->data;
1051 	p = drbd_prepare_command(peer_device, sock);
1052 	if (!p)
1053 		return -EIO;
1054 	p->mask = cpu_to_be32(mask.i);
1055 	p->val = cpu_to_be32(val.i);
1056 	return drbd_send_command(peer_device, sock, P_STATE_CHG_REQ, sizeof(*p), NULL, 0);
1057 }
1058 
1059 int conn_send_state_req(struct drbd_connection *connection, union drbd_state mask, union drbd_state val)
1060 {
1061 	enum drbd_packet cmd;
1062 	struct drbd_socket *sock;
1063 	struct p_req_state *p;
1064 
1065 	cmd = connection->agreed_pro_version < 100 ? P_STATE_CHG_REQ : P_CONN_ST_CHG_REQ;
1066 	sock = &connection->data;
1067 	p = conn_prepare_command(connection, sock);
1068 	if (!p)
1069 		return -EIO;
1070 	p->mask = cpu_to_be32(mask.i);
1071 	p->val = cpu_to_be32(val.i);
1072 	return conn_send_command(connection, sock, cmd, sizeof(*p), NULL, 0);
1073 }
1074 
1075 void drbd_send_sr_reply(struct drbd_peer_device *peer_device, enum drbd_state_rv retcode)
1076 {
1077 	struct drbd_socket *sock;
1078 	struct p_req_state_reply *p;
1079 
1080 	sock = &peer_device->connection->meta;
1081 	p = drbd_prepare_command(peer_device, sock);
1082 	if (p) {
1083 		p->retcode = cpu_to_be32(retcode);
1084 		drbd_send_command(peer_device, sock, P_STATE_CHG_REPLY, sizeof(*p), NULL, 0);
1085 	}
1086 }
1087 
1088 void conn_send_sr_reply(struct drbd_connection *connection, enum drbd_state_rv retcode)
1089 {
1090 	struct drbd_socket *sock;
1091 	struct p_req_state_reply *p;
1092 	enum drbd_packet cmd = connection->agreed_pro_version < 100 ? P_STATE_CHG_REPLY : P_CONN_ST_CHG_REPLY;
1093 
1094 	sock = &connection->meta;
1095 	p = conn_prepare_command(connection, sock);
1096 	if (p) {
1097 		p->retcode = cpu_to_be32(retcode);
1098 		conn_send_command(connection, sock, cmd, sizeof(*p), NULL, 0);
1099 	}
1100 }
1101 
1102 static void dcbp_set_code(struct p_compressed_bm *p, enum drbd_bitmap_code code)
1103 {
1104 	BUG_ON(code & ~0xf);
1105 	p->encoding = (p->encoding & ~0xf) | code;
1106 }
1107 
1108 static void dcbp_set_start(struct p_compressed_bm *p, int set)
1109 {
1110 	p->encoding = (p->encoding & ~0x80) | (set ? 0x80 : 0);
1111 }
1112 
1113 static void dcbp_set_pad_bits(struct p_compressed_bm *p, int n)
1114 {
1115 	BUG_ON(n & ~0x7);
1116 	p->encoding = (p->encoding & (~0x7 << 4)) | (n << 4);
1117 }
1118 
1119 static int fill_bitmap_rle_bits(struct drbd_device *device,
1120 			 struct p_compressed_bm *p,
1121 			 unsigned int size,
1122 			 struct bm_xfer_ctx *c)
1123 {
1124 	struct bitstream bs;
1125 	unsigned long plain_bits;
1126 	unsigned long tmp;
1127 	unsigned long rl;
1128 	unsigned len;
1129 	unsigned toggle;
1130 	int bits, use_rle;
1131 
1132 	/* may we use this feature? */
1133 	rcu_read_lock();
1134 	use_rle = rcu_dereference(first_peer_device(device)->connection->net_conf)->use_rle;
1135 	rcu_read_unlock();
1136 	if (!use_rle || first_peer_device(device)->connection->agreed_pro_version < 90)
1137 		return 0;
1138 
1139 	if (c->bit_offset >= c->bm_bits)
1140 		return 0; /* nothing to do. */
1141 
1142 	/* use at most thus many bytes */
1143 	bitstream_init(&bs, p->code, size, 0);
1144 	memset(p->code, 0, size);
1145 	/* plain bits covered in this code string */
1146 	plain_bits = 0;
1147 
1148 	/* p->encoding & 0x80 stores whether the first run length is set.
1149 	 * bit offset is implicit.
1150 	 * start with toggle == 2 to be able to tell the first iteration */
1151 	toggle = 2;
1152 
1153 	/* see how much plain bits we can stuff into one packet
1154 	 * using RLE and VLI. */
1155 	do {
1156 		tmp = (toggle == 0) ? _drbd_bm_find_next_zero(device, c->bit_offset)
1157 				    : _drbd_bm_find_next(device, c->bit_offset);
1158 		if (tmp == -1UL)
1159 			tmp = c->bm_bits;
1160 		rl = tmp - c->bit_offset;
1161 
1162 		if (toggle == 2) { /* first iteration */
1163 			if (rl == 0) {
1164 				/* the first checked bit was set,
1165 				 * store start value, */
1166 				dcbp_set_start(p, 1);
1167 				/* but skip encoding of zero run length */
1168 				toggle = !toggle;
1169 				continue;
1170 			}
1171 			dcbp_set_start(p, 0);
1172 		}
1173 
1174 		/* paranoia: catch zero runlength.
1175 		 * can only happen if bitmap is modified while we scan it. */
1176 		if (rl == 0) {
1177 			drbd_err(device, "unexpected zero runlength while encoding bitmap "
1178 			    "t:%u bo:%lu\n", toggle, c->bit_offset);
1179 			return -1;
1180 		}
1181 
1182 		bits = vli_encode_bits(&bs, rl);
1183 		if (bits == -ENOBUFS) /* buffer full */
1184 			break;
1185 		if (bits <= 0) {
1186 			drbd_err(device, "error while encoding bitmap: %d\n", bits);
1187 			return 0;
1188 		}
1189 
1190 		toggle = !toggle;
1191 		plain_bits += rl;
1192 		c->bit_offset = tmp;
1193 	} while (c->bit_offset < c->bm_bits);
1194 
1195 	len = bs.cur.b - p->code + !!bs.cur.bit;
1196 
1197 	if (plain_bits < (len << 3)) {
1198 		/* incompressible with this method.
1199 		 * we need to rewind both word and bit position. */
1200 		c->bit_offset -= plain_bits;
1201 		bm_xfer_ctx_bit_to_word_offset(c);
1202 		c->bit_offset = c->word_offset * BITS_PER_LONG;
1203 		return 0;
1204 	}
1205 
1206 	/* RLE + VLI was able to compress it just fine.
1207 	 * update c->word_offset. */
1208 	bm_xfer_ctx_bit_to_word_offset(c);
1209 
1210 	/* store pad_bits */
1211 	dcbp_set_pad_bits(p, (8 - bs.cur.bit) & 0x7);
1212 
1213 	return len;
1214 }
1215 
1216 /**
1217  * send_bitmap_rle_or_plain
1218  *
1219  * Return 0 when done, 1 when another iteration is needed, and a negative error
1220  * code upon failure.
1221  */
1222 static int
1223 send_bitmap_rle_or_plain(struct drbd_device *device, struct bm_xfer_ctx *c)
1224 {
1225 	struct drbd_socket *sock = &first_peer_device(device)->connection->data;
1226 	unsigned int header_size = drbd_header_size(first_peer_device(device)->connection);
1227 	struct p_compressed_bm *p = sock->sbuf + header_size;
1228 	int len, err;
1229 
1230 	len = fill_bitmap_rle_bits(device, p,
1231 			DRBD_SOCKET_BUFFER_SIZE - header_size - sizeof(*p), c);
1232 	if (len < 0)
1233 		return -EIO;
1234 
1235 	if (len) {
1236 		dcbp_set_code(p, RLE_VLI_Bits);
1237 		err = __send_command(first_peer_device(device)->connection, device->vnr, sock,
1238 				     P_COMPRESSED_BITMAP, sizeof(*p) + len,
1239 				     NULL, 0);
1240 		c->packets[0]++;
1241 		c->bytes[0] += header_size + sizeof(*p) + len;
1242 
1243 		if (c->bit_offset >= c->bm_bits)
1244 			len = 0; /* DONE */
1245 	} else {
1246 		/* was not compressible.
1247 		 * send a buffer full of plain text bits instead. */
1248 		unsigned int data_size;
1249 		unsigned long num_words;
1250 		unsigned long *p = sock->sbuf + header_size;
1251 
1252 		data_size = DRBD_SOCKET_BUFFER_SIZE - header_size;
1253 		num_words = min_t(size_t, data_size / sizeof(*p),
1254 				  c->bm_words - c->word_offset);
1255 		len = num_words * sizeof(*p);
1256 		if (len)
1257 			drbd_bm_get_lel(device, c->word_offset, num_words, p);
1258 		err = __send_command(first_peer_device(device)->connection, device->vnr, sock, P_BITMAP, len, NULL, 0);
1259 		c->word_offset += num_words;
1260 		c->bit_offset = c->word_offset * BITS_PER_LONG;
1261 
1262 		c->packets[1]++;
1263 		c->bytes[1] += header_size + len;
1264 
1265 		if (c->bit_offset > c->bm_bits)
1266 			c->bit_offset = c->bm_bits;
1267 	}
1268 	if (!err) {
1269 		if (len == 0) {
1270 			INFO_bm_xfer_stats(device, "send", c);
1271 			return 0;
1272 		} else
1273 			return 1;
1274 	}
1275 	return -EIO;
1276 }
1277 
1278 /* See the comment at receive_bitmap() */
1279 static int _drbd_send_bitmap(struct drbd_device *device)
1280 {
1281 	struct bm_xfer_ctx c;
1282 	int err;
1283 
1284 	if (!expect(device->bitmap))
1285 		return false;
1286 
1287 	if (get_ldev(device)) {
1288 		if (drbd_md_test_flag(device->ldev, MDF_FULL_SYNC)) {
1289 			drbd_info(device, "Writing the whole bitmap, MDF_FullSync was set.\n");
1290 			drbd_bm_set_all(device);
1291 			if (drbd_bm_write(device)) {
1292 				/* write_bm did fail! Leave full sync flag set in Meta P_DATA
1293 				 * but otherwise process as per normal - need to tell other
1294 				 * side that a full resync is required! */
1295 				drbd_err(device, "Failed to write bitmap to disk!\n");
1296 			} else {
1297 				drbd_md_clear_flag(device, MDF_FULL_SYNC);
1298 				drbd_md_sync(device);
1299 			}
1300 		}
1301 		put_ldev(device);
1302 	}
1303 
1304 	c = (struct bm_xfer_ctx) {
1305 		.bm_bits = drbd_bm_bits(device),
1306 		.bm_words = drbd_bm_words(device),
1307 	};
1308 
1309 	do {
1310 		err = send_bitmap_rle_or_plain(device, &c);
1311 	} while (err > 0);
1312 
1313 	return err == 0;
1314 }
1315 
1316 int drbd_send_bitmap(struct drbd_device *device)
1317 {
1318 	struct drbd_socket *sock = &first_peer_device(device)->connection->data;
1319 	int err = -1;
1320 
1321 	mutex_lock(&sock->mutex);
1322 	if (sock->socket)
1323 		err = !_drbd_send_bitmap(device);
1324 	mutex_unlock(&sock->mutex);
1325 	return err;
1326 }
1327 
1328 void drbd_send_b_ack(struct drbd_connection *connection, u32 barrier_nr, u32 set_size)
1329 {
1330 	struct drbd_socket *sock;
1331 	struct p_barrier_ack *p;
1332 
1333 	if (connection->cstate < C_WF_REPORT_PARAMS)
1334 		return;
1335 
1336 	sock = &connection->meta;
1337 	p = conn_prepare_command(connection, sock);
1338 	if (!p)
1339 		return;
1340 	p->barrier = barrier_nr;
1341 	p->set_size = cpu_to_be32(set_size);
1342 	conn_send_command(connection, sock, P_BARRIER_ACK, sizeof(*p), NULL, 0);
1343 }
1344 
1345 /**
1346  * _drbd_send_ack() - Sends an ack packet
1347  * @device:	DRBD device.
1348  * @cmd:	Packet command code.
1349  * @sector:	sector, needs to be in big endian byte order
1350  * @blksize:	size in byte, needs to be in big endian byte order
1351  * @block_id:	Id, big endian byte order
1352  */
1353 static int _drbd_send_ack(struct drbd_peer_device *peer_device, enum drbd_packet cmd,
1354 			  u64 sector, u32 blksize, u64 block_id)
1355 {
1356 	struct drbd_socket *sock;
1357 	struct p_block_ack *p;
1358 
1359 	if (peer_device->device->state.conn < C_CONNECTED)
1360 		return -EIO;
1361 
1362 	sock = &peer_device->connection->meta;
1363 	p = drbd_prepare_command(peer_device, sock);
1364 	if (!p)
1365 		return -EIO;
1366 	p->sector = sector;
1367 	p->block_id = block_id;
1368 	p->blksize = blksize;
1369 	p->seq_num = cpu_to_be32(atomic_inc_return(&peer_device->device->packet_seq));
1370 	return drbd_send_command(peer_device, sock, cmd, sizeof(*p), NULL, 0);
1371 }
1372 
1373 /* dp->sector and dp->block_id already/still in network byte order,
1374  * data_size is payload size according to dp->head,
1375  * and may need to be corrected for digest size. */
1376 void drbd_send_ack_dp(struct drbd_peer_device *peer_device, enum drbd_packet cmd,
1377 		      struct p_data *dp, int data_size)
1378 {
1379 	if (peer_device->connection->peer_integrity_tfm)
1380 		data_size -= crypto_shash_digestsize(peer_device->connection->peer_integrity_tfm);
1381 	_drbd_send_ack(peer_device, cmd, dp->sector, cpu_to_be32(data_size),
1382 		       dp->block_id);
1383 }
1384 
1385 void drbd_send_ack_rp(struct drbd_peer_device *peer_device, enum drbd_packet cmd,
1386 		      struct p_block_req *rp)
1387 {
1388 	_drbd_send_ack(peer_device, cmd, rp->sector, rp->blksize, rp->block_id);
1389 }
1390 
1391 /**
1392  * drbd_send_ack() - Sends an ack packet
1393  * @device:	DRBD device
1394  * @cmd:	packet command code
1395  * @peer_req:	peer request
1396  */
1397 int drbd_send_ack(struct drbd_peer_device *peer_device, enum drbd_packet cmd,
1398 		  struct drbd_peer_request *peer_req)
1399 {
1400 	return _drbd_send_ack(peer_device, cmd,
1401 			      cpu_to_be64(peer_req->i.sector),
1402 			      cpu_to_be32(peer_req->i.size),
1403 			      peer_req->block_id);
1404 }
1405 
1406 /* This function misuses the block_id field to signal if the blocks
1407  * are is sync or not. */
1408 int drbd_send_ack_ex(struct drbd_peer_device *peer_device, enum drbd_packet cmd,
1409 		     sector_t sector, int blksize, u64 block_id)
1410 {
1411 	return _drbd_send_ack(peer_device, cmd,
1412 			      cpu_to_be64(sector),
1413 			      cpu_to_be32(blksize),
1414 			      cpu_to_be64(block_id));
1415 }
1416 
1417 int drbd_send_rs_deallocated(struct drbd_peer_device *peer_device,
1418 			     struct drbd_peer_request *peer_req)
1419 {
1420 	struct drbd_socket *sock;
1421 	struct p_block_desc *p;
1422 
1423 	sock = &peer_device->connection->data;
1424 	p = drbd_prepare_command(peer_device, sock);
1425 	if (!p)
1426 		return -EIO;
1427 	p->sector = cpu_to_be64(peer_req->i.sector);
1428 	p->blksize = cpu_to_be32(peer_req->i.size);
1429 	p->pad = 0;
1430 	return drbd_send_command(peer_device, sock, P_RS_DEALLOCATED, sizeof(*p), NULL, 0);
1431 }
1432 
1433 int drbd_send_drequest(struct drbd_peer_device *peer_device, int cmd,
1434 		       sector_t sector, int size, u64 block_id)
1435 {
1436 	struct drbd_socket *sock;
1437 	struct p_block_req *p;
1438 
1439 	sock = &peer_device->connection->data;
1440 	p = drbd_prepare_command(peer_device, sock);
1441 	if (!p)
1442 		return -EIO;
1443 	p->sector = cpu_to_be64(sector);
1444 	p->block_id = block_id;
1445 	p->blksize = cpu_to_be32(size);
1446 	return drbd_send_command(peer_device, sock, cmd, sizeof(*p), NULL, 0);
1447 }
1448 
1449 int drbd_send_drequest_csum(struct drbd_peer_device *peer_device, sector_t sector, int size,
1450 			    void *digest, int digest_size, enum drbd_packet cmd)
1451 {
1452 	struct drbd_socket *sock;
1453 	struct p_block_req *p;
1454 
1455 	/* FIXME: Put the digest into the preallocated socket buffer.  */
1456 
1457 	sock = &peer_device->connection->data;
1458 	p = drbd_prepare_command(peer_device, sock);
1459 	if (!p)
1460 		return -EIO;
1461 	p->sector = cpu_to_be64(sector);
1462 	p->block_id = ID_SYNCER /* unused */;
1463 	p->blksize = cpu_to_be32(size);
1464 	return drbd_send_command(peer_device, sock, cmd, sizeof(*p), digest, digest_size);
1465 }
1466 
1467 int drbd_send_ov_request(struct drbd_peer_device *peer_device, sector_t sector, int size)
1468 {
1469 	struct drbd_socket *sock;
1470 	struct p_block_req *p;
1471 
1472 	sock = &peer_device->connection->data;
1473 	p = drbd_prepare_command(peer_device, sock);
1474 	if (!p)
1475 		return -EIO;
1476 	p->sector = cpu_to_be64(sector);
1477 	p->block_id = ID_SYNCER /* unused */;
1478 	p->blksize = cpu_to_be32(size);
1479 	return drbd_send_command(peer_device, sock, P_OV_REQUEST, sizeof(*p), NULL, 0);
1480 }
1481 
1482 /* called on sndtimeo
1483  * returns false if we should retry,
1484  * true if we think connection is dead
1485  */
1486 static int we_should_drop_the_connection(struct drbd_connection *connection, struct socket *sock)
1487 {
1488 	int drop_it;
1489 	/* long elapsed = (long)(jiffies - device->last_received); */
1490 
1491 	drop_it =   connection->meta.socket == sock
1492 		|| !connection->ack_receiver.task
1493 		|| get_t_state(&connection->ack_receiver) != RUNNING
1494 		|| connection->cstate < C_WF_REPORT_PARAMS;
1495 
1496 	if (drop_it)
1497 		return true;
1498 
1499 	drop_it = !--connection->ko_count;
1500 	if (!drop_it) {
1501 		drbd_err(connection, "[%s/%d] sock_sendmsg time expired, ko = %u\n",
1502 			 current->comm, current->pid, connection->ko_count);
1503 		request_ping(connection);
1504 	}
1505 
1506 	return drop_it; /* && (device->state == R_PRIMARY) */;
1507 }
1508 
1509 static void drbd_update_congested(struct drbd_connection *connection)
1510 {
1511 	struct sock *sk = connection->data.socket->sk;
1512 	if (sk->sk_wmem_queued > sk->sk_sndbuf * 4 / 5)
1513 		set_bit(NET_CONGESTED, &connection->flags);
1514 }
1515 
1516 /* The idea of sendpage seems to be to put some kind of reference
1517  * to the page into the skb, and to hand it over to the NIC. In
1518  * this process get_page() gets called.
1519  *
1520  * As soon as the page was really sent over the network put_page()
1521  * gets called by some part of the network layer. [ NIC driver? ]
1522  *
1523  * [ get_page() / put_page() increment/decrement the count. If count
1524  *   reaches 0 the page will be freed. ]
1525  *
1526  * This works nicely with pages from FSs.
1527  * But this means that in protocol A we might signal IO completion too early!
1528  *
1529  * In order not to corrupt data during a resync we must make sure
1530  * that we do not reuse our own buffer pages (EEs) to early, therefore
1531  * we have the net_ee list.
1532  *
1533  * XFS seems to have problems, still, it submits pages with page_count == 0!
1534  * As a workaround, we disable sendpage on pages
1535  * with page_count == 0 or PageSlab.
1536  */
1537 static int _drbd_no_send_page(struct drbd_peer_device *peer_device, struct page *page,
1538 			      int offset, size_t size, unsigned msg_flags)
1539 {
1540 	struct socket *socket;
1541 	void *addr;
1542 	int err;
1543 
1544 	socket = peer_device->connection->data.socket;
1545 	addr = kmap(page) + offset;
1546 	err = drbd_send_all(peer_device->connection, socket, addr, size, msg_flags);
1547 	kunmap(page);
1548 	if (!err)
1549 		peer_device->device->send_cnt += size >> 9;
1550 	return err;
1551 }
1552 
1553 static int _drbd_send_page(struct drbd_peer_device *peer_device, struct page *page,
1554 		    int offset, size_t size, unsigned msg_flags)
1555 {
1556 	struct socket *socket = peer_device->connection->data.socket;
1557 	int len = size;
1558 	int err = -EIO;
1559 
1560 	/* e.g. XFS meta- & log-data is in slab pages, which have a
1561 	 * page_count of 0 and/or have PageSlab() set.
1562 	 * we cannot use send_page for those, as that does get_page();
1563 	 * put_page(); and would cause either a VM_BUG directly, or
1564 	 * __page_cache_release a page that would actually still be referenced
1565 	 * by someone, leading to some obscure delayed Oops somewhere else. */
1566 	if (drbd_disable_sendpage || (page_count(page) < 1) || PageSlab(page))
1567 		return _drbd_no_send_page(peer_device, page, offset, size, msg_flags);
1568 
1569 	msg_flags |= MSG_NOSIGNAL;
1570 	drbd_update_congested(peer_device->connection);
1571 	do {
1572 		int sent;
1573 
1574 		sent = socket->ops->sendpage(socket, page, offset, len, msg_flags);
1575 		if (sent <= 0) {
1576 			if (sent == -EAGAIN) {
1577 				if (we_should_drop_the_connection(peer_device->connection, socket))
1578 					break;
1579 				continue;
1580 			}
1581 			drbd_warn(peer_device->device, "%s: size=%d len=%d sent=%d\n",
1582 			     __func__, (int)size, len, sent);
1583 			if (sent < 0)
1584 				err = sent;
1585 			break;
1586 		}
1587 		len    -= sent;
1588 		offset += sent;
1589 	} while (len > 0 /* THINK && device->cstate >= C_CONNECTED*/);
1590 	clear_bit(NET_CONGESTED, &peer_device->connection->flags);
1591 
1592 	if (len == 0) {
1593 		err = 0;
1594 		peer_device->device->send_cnt += size >> 9;
1595 	}
1596 	return err;
1597 }
1598 
1599 static int _drbd_send_bio(struct drbd_peer_device *peer_device, struct bio *bio)
1600 {
1601 	struct bio_vec bvec;
1602 	struct bvec_iter iter;
1603 
1604 	/* hint all but last page with MSG_MORE */
1605 	bio_for_each_segment(bvec, bio, iter) {
1606 		int err;
1607 
1608 		err = _drbd_no_send_page(peer_device, bvec.bv_page,
1609 					 bvec.bv_offset, bvec.bv_len,
1610 					 bio_iter_last(bvec, iter)
1611 					 ? 0 : MSG_MORE);
1612 		if (err)
1613 			return err;
1614 		/* REQ_OP_WRITE_SAME has only one segment */
1615 		if (bio_op(bio) == REQ_OP_WRITE_SAME)
1616 			break;
1617 	}
1618 	return 0;
1619 }
1620 
1621 static int _drbd_send_zc_bio(struct drbd_peer_device *peer_device, struct bio *bio)
1622 {
1623 	struct bio_vec bvec;
1624 	struct bvec_iter iter;
1625 
1626 	/* hint all but last page with MSG_MORE */
1627 	bio_for_each_segment(bvec, bio, iter) {
1628 		int err;
1629 
1630 		err = _drbd_send_page(peer_device, bvec.bv_page,
1631 				      bvec.bv_offset, bvec.bv_len,
1632 				      bio_iter_last(bvec, iter) ? 0 : MSG_MORE);
1633 		if (err)
1634 			return err;
1635 		/* REQ_OP_WRITE_SAME has only one segment */
1636 		if (bio_op(bio) == REQ_OP_WRITE_SAME)
1637 			break;
1638 	}
1639 	return 0;
1640 }
1641 
1642 static int _drbd_send_zc_ee(struct drbd_peer_device *peer_device,
1643 			    struct drbd_peer_request *peer_req)
1644 {
1645 	struct page *page = peer_req->pages;
1646 	unsigned len = peer_req->i.size;
1647 	int err;
1648 
1649 	/* hint all but last page with MSG_MORE */
1650 	page_chain_for_each(page) {
1651 		unsigned l = min_t(unsigned, len, PAGE_SIZE);
1652 
1653 		err = _drbd_send_page(peer_device, page, 0, l,
1654 				      page_chain_next(page) ? MSG_MORE : 0);
1655 		if (err)
1656 			return err;
1657 		len -= l;
1658 	}
1659 	return 0;
1660 }
1661 
1662 static u32 bio_flags_to_wire(struct drbd_connection *connection,
1663 			     struct bio *bio)
1664 {
1665 	if (connection->agreed_pro_version >= 95)
1666 		return  (bio->bi_opf & REQ_SYNC ? DP_RW_SYNC : 0) |
1667 			(bio->bi_opf & REQ_FUA ? DP_FUA : 0) |
1668 			(bio->bi_opf & REQ_PREFLUSH ? DP_FLUSH : 0) |
1669 			(bio_op(bio) == REQ_OP_WRITE_SAME ? DP_WSAME : 0) |
1670 			(bio_op(bio) == REQ_OP_DISCARD ? DP_DISCARD : 0) |
1671 			(bio_op(bio) == REQ_OP_WRITE_ZEROES ?
1672 			  ((connection->agreed_features & DRBD_FF_WZEROES) ?
1673 			   (DP_ZEROES |(!(bio->bi_opf & REQ_NOUNMAP) ? DP_DISCARD : 0))
1674 			   : DP_DISCARD)
1675 			: 0);
1676 	else
1677 		return bio->bi_opf & REQ_SYNC ? DP_RW_SYNC : 0;
1678 }
1679 
1680 /* Used to send write or TRIM aka REQ_OP_DISCARD requests
1681  * R_PRIMARY -> Peer	(P_DATA, P_TRIM)
1682  */
1683 int drbd_send_dblock(struct drbd_peer_device *peer_device, struct drbd_request *req)
1684 {
1685 	struct drbd_device *device = peer_device->device;
1686 	struct drbd_socket *sock;
1687 	struct p_data *p;
1688 	struct p_wsame *wsame = NULL;
1689 	void *digest_out;
1690 	unsigned int dp_flags = 0;
1691 	int digest_size;
1692 	int err;
1693 
1694 	sock = &peer_device->connection->data;
1695 	p = drbd_prepare_command(peer_device, sock);
1696 	digest_size = peer_device->connection->integrity_tfm ?
1697 		      crypto_shash_digestsize(peer_device->connection->integrity_tfm) : 0;
1698 
1699 	if (!p)
1700 		return -EIO;
1701 	p->sector = cpu_to_be64(req->i.sector);
1702 	p->block_id = (unsigned long)req;
1703 	p->seq_num = cpu_to_be32(atomic_inc_return(&device->packet_seq));
1704 	dp_flags = bio_flags_to_wire(peer_device->connection, req->master_bio);
1705 	if (device->state.conn >= C_SYNC_SOURCE &&
1706 	    device->state.conn <= C_PAUSED_SYNC_T)
1707 		dp_flags |= DP_MAY_SET_IN_SYNC;
1708 	if (peer_device->connection->agreed_pro_version >= 100) {
1709 		if (req->rq_state & RQ_EXP_RECEIVE_ACK)
1710 			dp_flags |= DP_SEND_RECEIVE_ACK;
1711 		/* During resync, request an explicit write ack,
1712 		 * even in protocol != C */
1713 		if (req->rq_state & RQ_EXP_WRITE_ACK
1714 		|| (dp_flags & DP_MAY_SET_IN_SYNC))
1715 			dp_flags |= DP_SEND_WRITE_ACK;
1716 	}
1717 	p->dp_flags = cpu_to_be32(dp_flags);
1718 
1719 	if (dp_flags & (DP_DISCARD|DP_ZEROES)) {
1720 		enum drbd_packet cmd = (dp_flags & DP_ZEROES) ? P_ZEROES : P_TRIM;
1721 		struct p_trim *t = (struct p_trim*)p;
1722 		t->size = cpu_to_be32(req->i.size);
1723 		err = __send_command(peer_device->connection, device->vnr, sock, cmd, sizeof(*t), NULL, 0);
1724 		goto out;
1725 	}
1726 	if (dp_flags & DP_WSAME) {
1727 		/* this will only work if DRBD_FF_WSAME is set AND the
1728 		 * handshake agreed that all nodes and backend devices are
1729 		 * WRITE_SAME capable and agree on logical_block_size */
1730 		wsame = (struct p_wsame*)p;
1731 		digest_out = wsame + 1;
1732 		wsame->size = cpu_to_be32(req->i.size);
1733 	} else
1734 		digest_out = p + 1;
1735 
1736 	/* our digest is still only over the payload.
1737 	 * TRIM does not carry any payload. */
1738 	if (digest_size)
1739 		drbd_csum_bio(peer_device->connection->integrity_tfm, req->master_bio, digest_out);
1740 	if (wsame) {
1741 		err =
1742 		    __send_command(peer_device->connection, device->vnr, sock, P_WSAME,
1743 				   sizeof(*wsame) + digest_size, NULL,
1744 				   bio_iovec(req->master_bio).bv_len);
1745 	} else
1746 		err =
1747 		    __send_command(peer_device->connection, device->vnr, sock, P_DATA,
1748 				   sizeof(*p) + digest_size, NULL, req->i.size);
1749 	if (!err) {
1750 		/* For protocol A, we have to memcpy the payload into
1751 		 * socket buffers, as we may complete right away
1752 		 * as soon as we handed it over to tcp, at which point the data
1753 		 * pages may become invalid.
1754 		 *
1755 		 * For data-integrity enabled, we copy it as well, so we can be
1756 		 * sure that even if the bio pages may still be modified, it
1757 		 * won't change the data on the wire, thus if the digest checks
1758 		 * out ok after sending on this side, but does not fit on the
1759 		 * receiving side, we sure have detected corruption elsewhere.
1760 		 */
1761 		if (!(req->rq_state & (RQ_EXP_RECEIVE_ACK | RQ_EXP_WRITE_ACK)) || digest_size)
1762 			err = _drbd_send_bio(peer_device, req->master_bio);
1763 		else
1764 			err = _drbd_send_zc_bio(peer_device, req->master_bio);
1765 
1766 		/* double check digest, sometimes buffers have been modified in flight. */
1767 		if (digest_size > 0 && digest_size <= 64) {
1768 			/* 64 byte, 512 bit, is the largest digest size
1769 			 * currently supported in kernel crypto. */
1770 			unsigned char digest[64];
1771 			drbd_csum_bio(peer_device->connection->integrity_tfm, req->master_bio, digest);
1772 			if (memcmp(p + 1, digest, digest_size)) {
1773 				drbd_warn(device,
1774 					"Digest mismatch, buffer modified by upper layers during write: %llus +%u\n",
1775 					(unsigned long long)req->i.sector, req->i.size);
1776 			}
1777 		} /* else if (digest_size > 64) {
1778 		     ... Be noisy about digest too large ...
1779 		} */
1780 	}
1781 out:
1782 	mutex_unlock(&sock->mutex);  /* locked by drbd_prepare_command() */
1783 
1784 	return err;
1785 }
1786 
1787 /* answer packet, used to send data back for read requests:
1788  *  Peer       -> (diskless) R_PRIMARY   (P_DATA_REPLY)
1789  *  C_SYNC_SOURCE -> C_SYNC_TARGET         (P_RS_DATA_REPLY)
1790  */
1791 int drbd_send_block(struct drbd_peer_device *peer_device, enum drbd_packet cmd,
1792 		    struct drbd_peer_request *peer_req)
1793 {
1794 	struct drbd_device *device = peer_device->device;
1795 	struct drbd_socket *sock;
1796 	struct p_data *p;
1797 	int err;
1798 	int digest_size;
1799 
1800 	sock = &peer_device->connection->data;
1801 	p = drbd_prepare_command(peer_device, sock);
1802 
1803 	digest_size = peer_device->connection->integrity_tfm ?
1804 		      crypto_shash_digestsize(peer_device->connection->integrity_tfm) : 0;
1805 
1806 	if (!p)
1807 		return -EIO;
1808 	p->sector = cpu_to_be64(peer_req->i.sector);
1809 	p->block_id = peer_req->block_id;
1810 	p->seq_num = 0;  /* unused */
1811 	p->dp_flags = 0;
1812 	if (digest_size)
1813 		drbd_csum_ee(peer_device->connection->integrity_tfm, peer_req, p + 1);
1814 	err = __send_command(peer_device->connection, device->vnr, sock, cmd, sizeof(*p) + digest_size, NULL, peer_req->i.size);
1815 	if (!err)
1816 		err = _drbd_send_zc_ee(peer_device, peer_req);
1817 	mutex_unlock(&sock->mutex);  /* locked by drbd_prepare_command() */
1818 
1819 	return err;
1820 }
1821 
1822 int drbd_send_out_of_sync(struct drbd_peer_device *peer_device, struct drbd_request *req)
1823 {
1824 	struct drbd_socket *sock;
1825 	struct p_block_desc *p;
1826 
1827 	sock = &peer_device->connection->data;
1828 	p = drbd_prepare_command(peer_device, sock);
1829 	if (!p)
1830 		return -EIO;
1831 	p->sector = cpu_to_be64(req->i.sector);
1832 	p->blksize = cpu_to_be32(req->i.size);
1833 	return drbd_send_command(peer_device, sock, P_OUT_OF_SYNC, sizeof(*p), NULL, 0);
1834 }
1835 
1836 /*
1837   drbd_send distinguishes two cases:
1838 
1839   Packets sent via the data socket "sock"
1840   and packets sent via the meta data socket "msock"
1841 
1842 		    sock                      msock
1843   -----------------+-------------------------+------------------------------
1844   timeout           conf.timeout / 2          conf.timeout / 2
1845   timeout action    send a ping via msock     Abort communication
1846 					      and close all sockets
1847 */
1848 
1849 /*
1850  * you must have down()ed the appropriate [m]sock_mutex elsewhere!
1851  */
1852 int drbd_send(struct drbd_connection *connection, struct socket *sock,
1853 	      void *buf, size_t size, unsigned msg_flags)
1854 {
1855 	struct kvec iov = {.iov_base = buf, .iov_len = size};
1856 	struct msghdr msg = {.msg_flags = msg_flags | MSG_NOSIGNAL};
1857 	int rv, sent = 0;
1858 
1859 	if (!sock)
1860 		return -EBADR;
1861 
1862 	/* THINK  if (signal_pending) return ... ? */
1863 
1864 	iov_iter_kvec(&msg.msg_iter, WRITE, &iov, 1, size);
1865 
1866 	if (sock == connection->data.socket) {
1867 		rcu_read_lock();
1868 		connection->ko_count = rcu_dereference(connection->net_conf)->ko_count;
1869 		rcu_read_unlock();
1870 		drbd_update_congested(connection);
1871 	}
1872 	do {
1873 		rv = sock_sendmsg(sock, &msg);
1874 		if (rv == -EAGAIN) {
1875 			if (we_should_drop_the_connection(connection, sock))
1876 				break;
1877 			else
1878 				continue;
1879 		}
1880 		if (rv == -EINTR) {
1881 			flush_signals(current);
1882 			rv = 0;
1883 		}
1884 		if (rv < 0)
1885 			break;
1886 		sent += rv;
1887 	} while (sent < size);
1888 
1889 	if (sock == connection->data.socket)
1890 		clear_bit(NET_CONGESTED, &connection->flags);
1891 
1892 	if (rv <= 0) {
1893 		if (rv != -EAGAIN) {
1894 			drbd_err(connection, "%s_sendmsg returned %d\n",
1895 				 sock == connection->meta.socket ? "msock" : "sock",
1896 				 rv);
1897 			conn_request_state(connection, NS(conn, C_BROKEN_PIPE), CS_HARD);
1898 		} else
1899 			conn_request_state(connection, NS(conn, C_TIMEOUT), CS_HARD);
1900 	}
1901 
1902 	return sent;
1903 }
1904 
1905 /**
1906  * drbd_send_all  -  Send an entire buffer
1907  *
1908  * Returns 0 upon success and a negative error value otherwise.
1909  */
1910 int drbd_send_all(struct drbd_connection *connection, struct socket *sock, void *buffer,
1911 		  size_t size, unsigned msg_flags)
1912 {
1913 	int err;
1914 
1915 	err = drbd_send(connection, sock, buffer, size, msg_flags);
1916 	if (err < 0)
1917 		return err;
1918 	if (err != size)
1919 		return -EIO;
1920 	return 0;
1921 }
1922 
1923 static int drbd_open(struct block_device *bdev, fmode_t mode)
1924 {
1925 	struct drbd_device *device = bdev->bd_disk->private_data;
1926 	unsigned long flags;
1927 	int rv = 0;
1928 
1929 	mutex_lock(&drbd_main_mutex);
1930 	spin_lock_irqsave(&device->resource->req_lock, flags);
1931 	/* to have a stable device->state.role
1932 	 * and no race with updating open_cnt */
1933 
1934 	if (device->state.role != R_PRIMARY) {
1935 		if (mode & FMODE_WRITE)
1936 			rv = -EROFS;
1937 		else if (!drbd_allow_oos)
1938 			rv = -EMEDIUMTYPE;
1939 	}
1940 
1941 	if (!rv)
1942 		device->open_cnt++;
1943 	spin_unlock_irqrestore(&device->resource->req_lock, flags);
1944 	mutex_unlock(&drbd_main_mutex);
1945 
1946 	return rv;
1947 }
1948 
1949 static void drbd_release(struct gendisk *gd, fmode_t mode)
1950 {
1951 	struct drbd_device *device = gd->private_data;
1952 	mutex_lock(&drbd_main_mutex);
1953 	device->open_cnt--;
1954 	mutex_unlock(&drbd_main_mutex);
1955 }
1956 
1957 /* need to hold resource->req_lock */
1958 void drbd_queue_unplug(struct drbd_device *device)
1959 {
1960 	if (device->state.pdsk >= D_INCONSISTENT && device->state.conn >= C_CONNECTED) {
1961 		D_ASSERT(device, device->state.role == R_PRIMARY);
1962 		if (test_and_clear_bit(UNPLUG_REMOTE, &device->flags)) {
1963 			drbd_queue_work_if_unqueued(
1964 				&first_peer_device(device)->connection->sender_work,
1965 				&device->unplug_work);
1966 		}
1967 	}
1968 }
1969 
1970 static void drbd_set_defaults(struct drbd_device *device)
1971 {
1972 	/* Beware! The actual layout differs
1973 	 * between big endian and little endian */
1974 	device->state = (union drbd_dev_state) {
1975 		{ .role = R_SECONDARY,
1976 		  .peer = R_UNKNOWN,
1977 		  .conn = C_STANDALONE,
1978 		  .disk = D_DISKLESS,
1979 		  .pdsk = D_UNKNOWN,
1980 		} };
1981 }
1982 
1983 void drbd_init_set_defaults(struct drbd_device *device)
1984 {
1985 	/* the memset(,0,) did most of this.
1986 	 * note: only assignments, no allocation in here */
1987 
1988 	drbd_set_defaults(device);
1989 
1990 	atomic_set(&device->ap_bio_cnt, 0);
1991 	atomic_set(&device->ap_actlog_cnt, 0);
1992 	atomic_set(&device->ap_pending_cnt, 0);
1993 	atomic_set(&device->rs_pending_cnt, 0);
1994 	atomic_set(&device->unacked_cnt, 0);
1995 	atomic_set(&device->local_cnt, 0);
1996 	atomic_set(&device->pp_in_use_by_net, 0);
1997 	atomic_set(&device->rs_sect_in, 0);
1998 	atomic_set(&device->rs_sect_ev, 0);
1999 	atomic_set(&device->ap_in_flight, 0);
2000 	atomic_set(&device->md_io.in_use, 0);
2001 
2002 	mutex_init(&device->own_state_mutex);
2003 	device->state_mutex = &device->own_state_mutex;
2004 
2005 	spin_lock_init(&device->al_lock);
2006 	spin_lock_init(&device->peer_seq_lock);
2007 
2008 	INIT_LIST_HEAD(&device->active_ee);
2009 	INIT_LIST_HEAD(&device->sync_ee);
2010 	INIT_LIST_HEAD(&device->done_ee);
2011 	INIT_LIST_HEAD(&device->read_ee);
2012 	INIT_LIST_HEAD(&device->net_ee);
2013 	INIT_LIST_HEAD(&device->resync_reads);
2014 	INIT_LIST_HEAD(&device->resync_work.list);
2015 	INIT_LIST_HEAD(&device->unplug_work.list);
2016 	INIT_LIST_HEAD(&device->bm_io_work.w.list);
2017 	INIT_LIST_HEAD(&device->pending_master_completion[0]);
2018 	INIT_LIST_HEAD(&device->pending_master_completion[1]);
2019 	INIT_LIST_HEAD(&device->pending_completion[0]);
2020 	INIT_LIST_HEAD(&device->pending_completion[1]);
2021 
2022 	device->resync_work.cb  = w_resync_timer;
2023 	device->unplug_work.cb  = w_send_write_hint;
2024 	device->bm_io_work.w.cb = w_bitmap_io;
2025 
2026 	timer_setup(&device->resync_timer, resync_timer_fn, 0);
2027 	timer_setup(&device->md_sync_timer, md_sync_timer_fn, 0);
2028 	timer_setup(&device->start_resync_timer, start_resync_timer_fn, 0);
2029 	timer_setup(&device->request_timer, request_timer_fn, 0);
2030 
2031 	init_waitqueue_head(&device->misc_wait);
2032 	init_waitqueue_head(&device->state_wait);
2033 	init_waitqueue_head(&device->ee_wait);
2034 	init_waitqueue_head(&device->al_wait);
2035 	init_waitqueue_head(&device->seq_wait);
2036 
2037 	device->resync_wenr = LC_FREE;
2038 	device->peer_max_bio_size = DRBD_MAX_BIO_SIZE_SAFE;
2039 	device->local_max_bio_size = DRBD_MAX_BIO_SIZE_SAFE;
2040 }
2041 
2042 static void _drbd_set_my_capacity(struct drbd_device *device, sector_t size)
2043 {
2044 	/* set_capacity(device->this_bdev->bd_disk, size); */
2045 	set_capacity(device->vdisk, size);
2046 	device->this_bdev->bd_inode->i_size = (loff_t)size << 9;
2047 }
2048 
2049 void drbd_set_my_capacity(struct drbd_device *device, sector_t size)
2050 {
2051 	char ppb[10];
2052 	_drbd_set_my_capacity(device, size);
2053 	drbd_info(device, "size = %s (%llu KB)\n",
2054 		ppsize(ppb, size>>1), (unsigned long long)size>>1);
2055 }
2056 
2057 void drbd_device_cleanup(struct drbd_device *device)
2058 {
2059 	int i;
2060 	if (first_peer_device(device)->connection->receiver.t_state != NONE)
2061 		drbd_err(device, "ASSERT FAILED: receiver t_state == %d expected 0.\n",
2062 				first_peer_device(device)->connection->receiver.t_state);
2063 
2064 	device->al_writ_cnt  =
2065 	device->bm_writ_cnt  =
2066 	device->read_cnt     =
2067 	device->recv_cnt     =
2068 	device->send_cnt     =
2069 	device->writ_cnt     =
2070 	device->p_size       =
2071 	device->rs_start     =
2072 	device->rs_total     =
2073 	device->rs_failed    = 0;
2074 	device->rs_last_events = 0;
2075 	device->rs_last_sect_ev = 0;
2076 	for (i = 0; i < DRBD_SYNC_MARKS; i++) {
2077 		device->rs_mark_left[i] = 0;
2078 		device->rs_mark_time[i] = 0;
2079 	}
2080 	D_ASSERT(device, first_peer_device(device)->connection->net_conf == NULL);
2081 
2082 	_drbd_set_my_capacity(device, 0);
2083 	if (device->bitmap) {
2084 		/* maybe never allocated. */
2085 		drbd_bm_resize(device, 0, 1);
2086 		drbd_bm_cleanup(device);
2087 	}
2088 
2089 	drbd_backing_dev_free(device, device->ldev);
2090 	device->ldev = NULL;
2091 
2092 	clear_bit(AL_SUSPENDED, &device->flags);
2093 
2094 	D_ASSERT(device, list_empty(&device->active_ee));
2095 	D_ASSERT(device, list_empty(&device->sync_ee));
2096 	D_ASSERT(device, list_empty(&device->done_ee));
2097 	D_ASSERT(device, list_empty(&device->read_ee));
2098 	D_ASSERT(device, list_empty(&device->net_ee));
2099 	D_ASSERT(device, list_empty(&device->resync_reads));
2100 	D_ASSERT(device, list_empty(&first_peer_device(device)->connection->sender_work.q));
2101 	D_ASSERT(device, list_empty(&device->resync_work.list));
2102 	D_ASSERT(device, list_empty(&device->unplug_work.list));
2103 
2104 	drbd_set_defaults(device);
2105 }
2106 
2107 
2108 static void drbd_destroy_mempools(void)
2109 {
2110 	struct page *page;
2111 
2112 	while (drbd_pp_pool) {
2113 		page = drbd_pp_pool;
2114 		drbd_pp_pool = (struct page *)page_private(page);
2115 		__free_page(page);
2116 		drbd_pp_vacant--;
2117 	}
2118 
2119 	/* D_ASSERT(device, atomic_read(&drbd_pp_vacant)==0); */
2120 
2121 	bioset_exit(&drbd_io_bio_set);
2122 	bioset_exit(&drbd_md_io_bio_set);
2123 	mempool_exit(&drbd_md_io_page_pool);
2124 	mempool_exit(&drbd_ee_mempool);
2125 	mempool_exit(&drbd_request_mempool);
2126 	kmem_cache_destroy(drbd_ee_cache);
2127 	kmem_cache_destroy(drbd_request_cache);
2128 	kmem_cache_destroy(drbd_bm_ext_cache);
2129 	kmem_cache_destroy(drbd_al_ext_cache);
2130 
2131 	drbd_ee_cache        = NULL;
2132 	drbd_request_cache   = NULL;
2133 	drbd_bm_ext_cache    = NULL;
2134 	drbd_al_ext_cache    = NULL;
2135 
2136 	return;
2137 }
2138 
2139 static int drbd_create_mempools(void)
2140 {
2141 	struct page *page;
2142 	const int number = (DRBD_MAX_BIO_SIZE/PAGE_SIZE) * drbd_minor_count;
2143 	int i, ret;
2144 
2145 	/* caches */
2146 	drbd_request_cache = kmem_cache_create(
2147 		"drbd_req", sizeof(struct drbd_request), 0, 0, NULL);
2148 	if (drbd_request_cache == NULL)
2149 		goto Enomem;
2150 
2151 	drbd_ee_cache = kmem_cache_create(
2152 		"drbd_ee", sizeof(struct drbd_peer_request), 0, 0, NULL);
2153 	if (drbd_ee_cache == NULL)
2154 		goto Enomem;
2155 
2156 	drbd_bm_ext_cache = kmem_cache_create(
2157 		"drbd_bm", sizeof(struct bm_extent), 0, 0, NULL);
2158 	if (drbd_bm_ext_cache == NULL)
2159 		goto Enomem;
2160 
2161 	drbd_al_ext_cache = kmem_cache_create(
2162 		"drbd_al", sizeof(struct lc_element), 0, 0, NULL);
2163 	if (drbd_al_ext_cache == NULL)
2164 		goto Enomem;
2165 
2166 	/* mempools */
2167 	ret = bioset_init(&drbd_io_bio_set, BIO_POOL_SIZE, 0, 0);
2168 	if (ret)
2169 		goto Enomem;
2170 
2171 	ret = bioset_init(&drbd_md_io_bio_set, DRBD_MIN_POOL_PAGES, 0,
2172 			  BIOSET_NEED_BVECS);
2173 	if (ret)
2174 		goto Enomem;
2175 
2176 	ret = mempool_init_page_pool(&drbd_md_io_page_pool, DRBD_MIN_POOL_PAGES, 0);
2177 	if (ret)
2178 		goto Enomem;
2179 
2180 	ret = mempool_init_slab_pool(&drbd_request_mempool, number,
2181 				     drbd_request_cache);
2182 	if (ret)
2183 		goto Enomem;
2184 
2185 	ret = mempool_init_slab_pool(&drbd_ee_mempool, number, drbd_ee_cache);
2186 	if (ret)
2187 		goto Enomem;
2188 
2189 	/* drbd's page pool */
2190 	spin_lock_init(&drbd_pp_lock);
2191 
2192 	for (i = 0; i < number; i++) {
2193 		page = alloc_page(GFP_HIGHUSER);
2194 		if (!page)
2195 			goto Enomem;
2196 		set_page_private(page, (unsigned long)drbd_pp_pool);
2197 		drbd_pp_pool = page;
2198 	}
2199 	drbd_pp_vacant = number;
2200 
2201 	return 0;
2202 
2203 Enomem:
2204 	drbd_destroy_mempools(); /* in case we allocated some */
2205 	return -ENOMEM;
2206 }
2207 
2208 static void drbd_release_all_peer_reqs(struct drbd_device *device)
2209 {
2210 	int rr;
2211 
2212 	rr = drbd_free_peer_reqs(device, &device->active_ee);
2213 	if (rr)
2214 		drbd_err(device, "%d EEs in active list found!\n", rr);
2215 
2216 	rr = drbd_free_peer_reqs(device, &device->sync_ee);
2217 	if (rr)
2218 		drbd_err(device, "%d EEs in sync list found!\n", rr);
2219 
2220 	rr = drbd_free_peer_reqs(device, &device->read_ee);
2221 	if (rr)
2222 		drbd_err(device, "%d EEs in read list found!\n", rr);
2223 
2224 	rr = drbd_free_peer_reqs(device, &device->done_ee);
2225 	if (rr)
2226 		drbd_err(device, "%d EEs in done list found!\n", rr);
2227 
2228 	rr = drbd_free_peer_reqs(device, &device->net_ee);
2229 	if (rr)
2230 		drbd_err(device, "%d EEs in net list found!\n", rr);
2231 }
2232 
2233 /* caution. no locking. */
2234 void drbd_destroy_device(struct kref *kref)
2235 {
2236 	struct drbd_device *device = container_of(kref, struct drbd_device, kref);
2237 	struct drbd_resource *resource = device->resource;
2238 	struct drbd_peer_device *peer_device, *tmp_peer_device;
2239 
2240 	del_timer_sync(&device->request_timer);
2241 
2242 	/* paranoia asserts */
2243 	D_ASSERT(device, device->open_cnt == 0);
2244 	/* end paranoia asserts */
2245 
2246 	/* cleanup stuff that may have been allocated during
2247 	 * device (re-)configuration or state changes */
2248 
2249 	if (device->this_bdev)
2250 		bdput(device->this_bdev);
2251 
2252 	drbd_backing_dev_free(device, device->ldev);
2253 	device->ldev = NULL;
2254 
2255 	drbd_release_all_peer_reqs(device);
2256 
2257 	lc_destroy(device->act_log);
2258 	lc_destroy(device->resync);
2259 
2260 	kfree(device->p_uuid);
2261 	/* device->p_uuid = NULL; */
2262 
2263 	if (device->bitmap) /* should no longer be there. */
2264 		drbd_bm_cleanup(device);
2265 	__free_page(device->md_io.page);
2266 	put_disk(device->vdisk);
2267 	blk_cleanup_queue(device->rq_queue);
2268 	kfree(device->rs_plan_s);
2269 
2270 	/* not for_each_connection(connection, resource):
2271 	 * those may have been cleaned up and disassociated already.
2272 	 */
2273 	for_each_peer_device_safe(peer_device, tmp_peer_device, device) {
2274 		kref_put(&peer_device->connection->kref, drbd_destroy_connection);
2275 		kfree(peer_device);
2276 	}
2277 	memset(device, 0xfd, sizeof(*device));
2278 	kfree(device);
2279 	kref_put(&resource->kref, drbd_destroy_resource);
2280 }
2281 
2282 /* One global retry thread, if we need to push back some bio and have it
2283  * reinserted through our make request function.
2284  */
2285 static struct retry_worker {
2286 	struct workqueue_struct *wq;
2287 	struct work_struct worker;
2288 
2289 	spinlock_t lock;
2290 	struct list_head writes;
2291 } retry;
2292 
2293 static void do_retry(struct work_struct *ws)
2294 {
2295 	struct retry_worker *retry = container_of(ws, struct retry_worker, worker);
2296 	LIST_HEAD(writes);
2297 	struct drbd_request *req, *tmp;
2298 
2299 	spin_lock_irq(&retry->lock);
2300 	list_splice_init(&retry->writes, &writes);
2301 	spin_unlock_irq(&retry->lock);
2302 
2303 	list_for_each_entry_safe(req, tmp, &writes, tl_requests) {
2304 		struct drbd_device *device = req->device;
2305 		struct bio *bio = req->master_bio;
2306 		unsigned long start_jif = req->start_jif;
2307 		bool expected;
2308 
2309 		expected =
2310 			expect(atomic_read(&req->completion_ref) == 0) &&
2311 			expect(req->rq_state & RQ_POSTPONED) &&
2312 			expect((req->rq_state & RQ_LOCAL_PENDING) == 0 ||
2313 				(req->rq_state & RQ_LOCAL_ABORTED) != 0);
2314 
2315 		if (!expected)
2316 			drbd_err(device, "req=%p completion_ref=%d rq_state=%x\n",
2317 				req, atomic_read(&req->completion_ref),
2318 				req->rq_state);
2319 
2320 		/* We still need to put one kref associated with the
2321 		 * "completion_ref" going zero in the code path that queued it
2322 		 * here.  The request object may still be referenced by a
2323 		 * frozen local req->private_bio, in case we force-detached.
2324 		 */
2325 		kref_put(&req->kref, drbd_req_destroy);
2326 
2327 		/* A single suspended or otherwise blocking device may stall
2328 		 * all others as well.  Fortunately, this code path is to
2329 		 * recover from a situation that "should not happen":
2330 		 * concurrent writes in multi-primary setup.
2331 		 * In a "normal" lifecycle, this workqueue is supposed to be
2332 		 * destroyed without ever doing anything.
2333 		 * If it turns out to be an issue anyways, we can do per
2334 		 * resource (replication group) or per device (minor) retry
2335 		 * workqueues instead.
2336 		 */
2337 
2338 		/* We are not just doing generic_make_request(),
2339 		 * as we want to keep the start_time information. */
2340 		inc_ap_bio(device);
2341 		__drbd_make_request(device, bio, start_jif);
2342 	}
2343 }
2344 
2345 /* called via drbd_req_put_completion_ref(),
2346  * holds resource->req_lock */
2347 void drbd_restart_request(struct drbd_request *req)
2348 {
2349 	unsigned long flags;
2350 	spin_lock_irqsave(&retry.lock, flags);
2351 	list_move_tail(&req->tl_requests, &retry.writes);
2352 	spin_unlock_irqrestore(&retry.lock, flags);
2353 
2354 	/* Drop the extra reference that would otherwise
2355 	 * have been dropped by complete_master_bio.
2356 	 * do_retry() needs to grab a new one. */
2357 	dec_ap_bio(req->device);
2358 
2359 	queue_work(retry.wq, &retry.worker);
2360 }
2361 
2362 void drbd_destroy_resource(struct kref *kref)
2363 {
2364 	struct drbd_resource *resource =
2365 		container_of(kref, struct drbd_resource, kref);
2366 
2367 	idr_destroy(&resource->devices);
2368 	free_cpumask_var(resource->cpu_mask);
2369 	kfree(resource->name);
2370 	memset(resource, 0xf2, sizeof(*resource));
2371 	kfree(resource);
2372 }
2373 
2374 void drbd_free_resource(struct drbd_resource *resource)
2375 {
2376 	struct drbd_connection *connection, *tmp;
2377 
2378 	for_each_connection_safe(connection, tmp, resource) {
2379 		list_del(&connection->connections);
2380 		drbd_debugfs_connection_cleanup(connection);
2381 		kref_put(&connection->kref, drbd_destroy_connection);
2382 	}
2383 	drbd_debugfs_resource_cleanup(resource);
2384 	kref_put(&resource->kref, drbd_destroy_resource);
2385 }
2386 
2387 static void drbd_cleanup(void)
2388 {
2389 	unsigned int i;
2390 	struct drbd_device *device;
2391 	struct drbd_resource *resource, *tmp;
2392 
2393 	/* first remove proc,
2394 	 * drbdsetup uses it's presence to detect
2395 	 * whether DRBD is loaded.
2396 	 * If we would get stuck in proc removal,
2397 	 * but have netlink already deregistered,
2398 	 * some drbdsetup commands may wait forever
2399 	 * for an answer.
2400 	 */
2401 	if (drbd_proc)
2402 		remove_proc_entry("drbd", NULL);
2403 
2404 	if (retry.wq)
2405 		destroy_workqueue(retry.wq);
2406 
2407 	drbd_genl_unregister();
2408 
2409 	idr_for_each_entry(&drbd_devices, device, i)
2410 		drbd_delete_device(device);
2411 
2412 	/* not _rcu since, no other updater anymore. Genl already unregistered */
2413 	for_each_resource_safe(resource, tmp, &drbd_resources) {
2414 		list_del(&resource->resources);
2415 		drbd_free_resource(resource);
2416 	}
2417 
2418 	drbd_debugfs_cleanup();
2419 
2420 	drbd_destroy_mempools();
2421 	unregister_blkdev(DRBD_MAJOR, "drbd");
2422 
2423 	idr_destroy(&drbd_devices);
2424 
2425 	pr_info("module cleanup done.\n");
2426 }
2427 
2428 /**
2429  * drbd_congested() - Callback for the flusher thread
2430  * @congested_data:	User data
2431  * @bdi_bits:		Bits the BDI flusher thread is currently interested in
2432  *
2433  * Returns 1<<WB_async_congested and/or 1<<WB_sync_congested if we are congested.
2434  */
2435 static int drbd_congested(void *congested_data, int bdi_bits)
2436 {
2437 	struct drbd_device *device = congested_data;
2438 	struct request_queue *q;
2439 	char reason = '-';
2440 	int r = 0;
2441 
2442 	if (!may_inc_ap_bio(device)) {
2443 		/* DRBD has frozen IO */
2444 		r = bdi_bits;
2445 		reason = 'd';
2446 		goto out;
2447 	}
2448 
2449 	if (test_bit(CALLBACK_PENDING, &first_peer_device(device)->connection->flags)) {
2450 		r |= (1 << WB_async_congested);
2451 		/* Without good local data, we would need to read from remote,
2452 		 * and that would need the worker thread as well, which is
2453 		 * currently blocked waiting for that usermode helper to
2454 		 * finish.
2455 		 */
2456 		if (!get_ldev_if_state(device, D_UP_TO_DATE))
2457 			r |= (1 << WB_sync_congested);
2458 		else
2459 			put_ldev(device);
2460 		r &= bdi_bits;
2461 		reason = 'c';
2462 		goto out;
2463 	}
2464 
2465 	if (get_ldev(device)) {
2466 		q = bdev_get_queue(device->ldev->backing_bdev);
2467 		r = bdi_congested(q->backing_dev_info, bdi_bits);
2468 		put_ldev(device);
2469 		if (r)
2470 			reason = 'b';
2471 	}
2472 
2473 	if (bdi_bits & (1 << WB_async_congested) &&
2474 	    test_bit(NET_CONGESTED, &first_peer_device(device)->connection->flags)) {
2475 		r |= (1 << WB_async_congested);
2476 		reason = reason == 'b' ? 'a' : 'n';
2477 	}
2478 
2479 out:
2480 	device->congestion_reason = reason;
2481 	return r;
2482 }
2483 
2484 static void drbd_init_workqueue(struct drbd_work_queue* wq)
2485 {
2486 	spin_lock_init(&wq->q_lock);
2487 	INIT_LIST_HEAD(&wq->q);
2488 	init_waitqueue_head(&wq->q_wait);
2489 }
2490 
2491 struct completion_work {
2492 	struct drbd_work w;
2493 	struct completion done;
2494 };
2495 
2496 static int w_complete(struct drbd_work *w, int cancel)
2497 {
2498 	struct completion_work *completion_work =
2499 		container_of(w, struct completion_work, w);
2500 
2501 	complete(&completion_work->done);
2502 	return 0;
2503 }
2504 
2505 void drbd_flush_workqueue(struct drbd_work_queue *work_queue)
2506 {
2507 	struct completion_work completion_work;
2508 
2509 	completion_work.w.cb = w_complete;
2510 	init_completion(&completion_work.done);
2511 	drbd_queue_work(work_queue, &completion_work.w);
2512 	wait_for_completion(&completion_work.done);
2513 }
2514 
2515 struct drbd_resource *drbd_find_resource(const char *name)
2516 {
2517 	struct drbd_resource *resource;
2518 
2519 	if (!name || !name[0])
2520 		return NULL;
2521 
2522 	rcu_read_lock();
2523 	for_each_resource_rcu(resource, &drbd_resources) {
2524 		if (!strcmp(resource->name, name)) {
2525 			kref_get(&resource->kref);
2526 			goto found;
2527 		}
2528 	}
2529 	resource = NULL;
2530 found:
2531 	rcu_read_unlock();
2532 	return resource;
2533 }
2534 
2535 struct drbd_connection *conn_get_by_addrs(void *my_addr, int my_addr_len,
2536 				     void *peer_addr, int peer_addr_len)
2537 {
2538 	struct drbd_resource *resource;
2539 	struct drbd_connection *connection;
2540 
2541 	rcu_read_lock();
2542 	for_each_resource_rcu(resource, &drbd_resources) {
2543 		for_each_connection_rcu(connection, resource) {
2544 			if (connection->my_addr_len == my_addr_len &&
2545 			    connection->peer_addr_len == peer_addr_len &&
2546 			    !memcmp(&connection->my_addr, my_addr, my_addr_len) &&
2547 			    !memcmp(&connection->peer_addr, peer_addr, peer_addr_len)) {
2548 				kref_get(&connection->kref);
2549 				goto found;
2550 			}
2551 		}
2552 	}
2553 	connection = NULL;
2554 found:
2555 	rcu_read_unlock();
2556 	return connection;
2557 }
2558 
2559 static int drbd_alloc_socket(struct drbd_socket *socket)
2560 {
2561 	socket->rbuf = (void *) __get_free_page(GFP_KERNEL);
2562 	if (!socket->rbuf)
2563 		return -ENOMEM;
2564 	socket->sbuf = (void *) __get_free_page(GFP_KERNEL);
2565 	if (!socket->sbuf)
2566 		return -ENOMEM;
2567 	return 0;
2568 }
2569 
2570 static void drbd_free_socket(struct drbd_socket *socket)
2571 {
2572 	free_page((unsigned long) socket->sbuf);
2573 	free_page((unsigned long) socket->rbuf);
2574 }
2575 
2576 void conn_free_crypto(struct drbd_connection *connection)
2577 {
2578 	drbd_free_sock(connection);
2579 
2580 	crypto_free_shash(connection->csums_tfm);
2581 	crypto_free_shash(connection->verify_tfm);
2582 	crypto_free_shash(connection->cram_hmac_tfm);
2583 	crypto_free_shash(connection->integrity_tfm);
2584 	crypto_free_shash(connection->peer_integrity_tfm);
2585 	kfree(connection->int_dig_in);
2586 	kfree(connection->int_dig_vv);
2587 
2588 	connection->csums_tfm = NULL;
2589 	connection->verify_tfm = NULL;
2590 	connection->cram_hmac_tfm = NULL;
2591 	connection->integrity_tfm = NULL;
2592 	connection->peer_integrity_tfm = NULL;
2593 	connection->int_dig_in = NULL;
2594 	connection->int_dig_vv = NULL;
2595 }
2596 
2597 int set_resource_options(struct drbd_resource *resource, struct res_opts *res_opts)
2598 {
2599 	struct drbd_connection *connection;
2600 	cpumask_var_t new_cpu_mask;
2601 	int err;
2602 
2603 	if (!zalloc_cpumask_var(&new_cpu_mask, GFP_KERNEL))
2604 		return -ENOMEM;
2605 
2606 	/* silently ignore cpu mask on UP kernel */
2607 	if (nr_cpu_ids > 1 && res_opts->cpu_mask[0] != 0) {
2608 		err = bitmap_parse(res_opts->cpu_mask, DRBD_CPU_MASK_SIZE,
2609 				   cpumask_bits(new_cpu_mask), nr_cpu_ids);
2610 		if (err == -EOVERFLOW) {
2611 			/* So what. mask it out. */
2612 			cpumask_var_t tmp_cpu_mask;
2613 			if (zalloc_cpumask_var(&tmp_cpu_mask, GFP_KERNEL)) {
2614 				cpumask_setall(tmp_cpu_mask);
2615 				cpumask_and(new_cpu_mask, new_cpu_mask, tmp_cpu_mask);
2616 				drbd_warn(resource, "Overflow in bitmap_parse(%.12s%s), truncating to %u bits\n",
2617 					res_opts->cpu_mask,
2618 					strlen(res_opts->cpu_mask) > 12 ? "..." : "",
2619 					nr_cpu_ids);
2620 				free_cpumask_var(tmp_cpu_mask);
2621 				err = 0;
2622 			}
2623 		}
2624 		if (err) {
2625 			drbd_warn(resource, "bitmap_parse() failed with %d\n", err);
2626 			/* retcode = ERR_CPU_MASK_PARSE; */
2627 			goto fail;
2628 		}
2629 	}
2630 	resource->res_opts = *res_opts;
2631 	if (cpumask_empty(new_cpu_mask))
2632 		drbd_calc_cpu_mask(&new_cpu_mask);
2633 	if (!cpumask_equal(resource->cpu_mask, new_cpu_mask)) {
2634 		cpumask_copy(resource->cpu_mask, new_cpu_mask);
2635 		for_each_connection_rcu(connection, resource) {
2636 			connection->receiver.reset_cpu_mask = 1;
2637 			connection->ack_receiver.reset_cpu_mask = 1;
2638 			connection->worker.reset_cpu_mask = 1;
2639 		}
2640 	}
2641 	err = 0;
2642 
2643 fail:
2644 	free_cpumask_var(new_cpu_mask);
2645 	return err;
2646 
2647 }
2648 
2649 struct drbd_resource *drbd_create_resource(const char *name)
2650 {
2651 	struct drbd_resource *resource;
2652 
2653 	resource = kzalloc(sizeof(struct drbd_resource), GFP_KERNEL);
2654 	if (!resource)
2655 		goto fail;
2656 	resource->name = kstrdup(name, GFP_KERNEL);
2657 	if (!resource->name)
2658 		goto fail_free_resource;
2659 	if (!zalloc_cpumask_var(&resource->cpu_mask, GFP_KERNEL))
2660 		goto fail_free_name;
2661 	kref_init(&resource->kref);
2662 	idr_init(&resource->devices);
2663 	INIT_LIST_HEAD(&resource->connections);
2664 	resource->write_ordering = WO_BDEV_FLUSH;
2665 	list_add_tail_rcu(&resource->resources, &drbd_resources);
2666 	mutex_init(&resource->conf_update);
2667 	mutex_init(&resource->adm_mutex);
2668 	spin_lock_init(&resource->req_lock);
2669 	drbd_debugfs_resource_add(resource);
2670 	return resource;
2671 
2672 fail_free_name:
2673 	kfree(resource->name);
2674 fail_free_resource:
2675 	kfree(resource);
2676 fail:
2677 	return NULL;
2678 }
2679 
2680 /* caller must be under adm_mutex */
2681 struct drbd_connection *conn_create(const char *name, struct res_opts *res_opts)
2682 {
2683 	struct drbd_resource *resource;
2684 	struct drbd_connection *connection;
2685 
2686 	connection = kzalloc(sizeof(struct drbd_connection), GFP_KERNEL);
2687 	if (!connection)
2688 		return NULL;
2689 
2690 	if (drbd_alloc_socket(&connection->data))
2691 		goto fail;
2692 	if (drbd_alloc_socket(&connection->meta))
2693 		goto fail;
2694 
2695 	connection->current_epoch = kzalloc(sizeof(struct drbd_epoch), GFP_KERNEL);
2696 	if (!connection->current_epoch)
2697 		goto fail;
2698 
2699 	INIT_LIST_HEAD(&connection->transfer_log);
2700 
2701 	INIT_LIST_HEAD(&connection->current_epoch->list);
2702 	connection->epochs = 1;
2703 	spin_lock_init(&connection->epoch_lock);
2704 
2705 	connection->send.seen_any_write_yet = false;
2706 	connection->send.current_epoch_nr = 0;
2707 	connection->send.current_epoch_writes = 0;
2708 
2709 	resource = drbd_create_resource(name);
2710 	if (!resource)
2711 		goto fail;
2712 
2713 	connection->cstate = C_STANDALONE;
2714 	mutex_init(&connection->cstate_mutex);
2715 	init_waitqueue_head(&connection->ping_wait);
2716 	idr_init(&connection->peer_devices);
2717 
2718 	drbd_init_workqueue(&connection->sender_work);
2719 	mutex_init(&connection->data.mutex);
2720 	mutex_init(&connection->meta.mutex);
2721 
2722 	drbd_thread_init(resource, &connection->receiver, drbd_receiver, "receiver");
2723 	connection->receiver.connection = connection;
2724 	drbd_thread_init(resource, &connection->worker, drbd_worker, "worker");
2725 	connection->worker.connection = connection;
2726 	drbd_thread_init(resource, &connection->ack_receiver, drbd_ack_receiver, "ack_recv");
2727 	connection->ack_receiver.connection = connection;
2728 
2729 	kref_init(&connection->kref);
2730 
2731 	connection->resource = resource;
2732 
2733 	if (set_resource_options(resource, res_opts))
2734 		goto fail_resource;
2735 
2736 	kref_get(&resource->kref);
2737 	list_add_tail_rcu(&connection->connections, &resource->connections);
2738 	drbd_debugfs_connection_add(connection);
2739 	return connection;
2740 
2741 fail_resource:
2742 	list_del(&resource->resources);
2743 	drbd_free_resource(resource);
2744 fail:
2745 	kfree(connection->current_epoch);
2746 	drbd_free_socket(&connection->meta);
2747 	drbd_free_socket(&connection->data);
2748 	kfree(connection);
2749 	return NULL;
2750 }
2751 
2752 void drbd_destroy_connection(struct kref *kref)
2753 {
2754 	struct drbd_connection *connection = container_of(kref, struct drbd_connection, kref);
2755 	struct drbd_resource *resource = connection->resource;
2756 
2757 	if (atomic_read(&connection->current_epoch->epoch_size) !=  0)
2758 		drbd_err(connection, "epoch_size:%d\n", atomic_read(&connection->current_epoch->epoch_size));
2759 	kfree(connection->current_epoch);
2760 
2761 	idr_destroy(&connection->peer_devices);
2762 
2763 	drbd_free_socket(&connection->meta);
2764 	drbd_free_socket(&connection->data);
2765 	kfree(connection->int_dig_in);
2766 	kfree(connection->int_dig_vv);
2767 	memset(connection, 0xfc, sizeof(*connection));
2768 	kfree(connection);
2769 	kref_put(&resource->kref, drbd_destroy_resource);
2770 }
2771 
2772 static int init_submitter(struct drbd_device *device)
2773 {
2774 	/* opencoded create_singlethread_workqueue(),
2775 	 * to be able to say "drbd%d", ..., minor */
2776 	device->submit.wq =
2777 		alloc_ordered_workqueue("drbd%u_submit", WQ_MEM_RECLAIM, device->minor);
2778 	if (!device->submit.wq)
2779 		return -ENOMEM;
2780 
2781 	INIT_WORK(&device->submit.worker, do_submit);
2782 	INIT_LIST_HEAD(&device->submit.writes);
2783 	return 0;
2784 }
2785 
2786 enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsigned int minor)
2787 {
2788 	struct drbd_resource *resource = adm_ctx->resource;
2789 	struct drbd_connection *connection;
2790 	struct drbd_device *device;
2791 	struct drbd_peer_device *peer_device, *tmp_peer_device;
2792 	struct gendisk *disk;
2793 	struct request_queue *q;
2794 	int id;
2795 	int vnr = adm_ctx->volume;
2796 	enum drbd_ret_code err = ERR_NOMEM;
2797 
2798 	device = minor_to_device(minor);
2799 	if (device)
2800 		return ERR_MINOR_OR_VOLUME_EXISTS;
2801 
2802 	/* GFP_KERNEL, we are outside of all write-out paths */
2803 	device = kzalloc(sizeof(struct drbd_device), GFP_KERNEL);
2804 	if (!device)
2805 		return ERR_NOMEM;
2806 	kref_init(&device->kref);
2807 
2808 	kref_get(&resource->kref);
2809 	device->resource = resource;
2810 	device->minor = minor;
2811 	device->vnr = vnr;
2812 
2813 	drbd_init_set_defaults(device);
2814 
2815 	q = blk_alloc_queue_node(GFP_KERNEL, NUMA_NO_NODE);
2816 	if (!q)
2817 		goto out_no_q;
2818 	device->rq_queue = q;
2819 	q->queuedata   = device;
2820 
2821 	disk = alloc_disk(1);
2822 	if (!disk)
2823 		goto out_no_disk;
2824 	device->vdisk = disk;
2825 
2826 	set_disk_ro(disk, true);
2827 
2828 	disk->queue = q;
2829 	disk->major = DRBD_MAJOR;
2830 	disk->first_minor = minor;
2831 	disk->fops = &drbd_ops;
2832 	sprintf(disk->disk_name, "drbd%d", minor);
2833 	disk->private_data = device;
2834 
2835 	device->this_bdev = bdget(MKDEV(DRBD_MAJOR, minor));
2836 	/* we have no partitions. we contain only ourselves. */
2837 	device->this_bdev->bd_contains = device->this_bdev;
2838 
2839 	q->backing_dev_info->congested_fn = drbd_congested;
2840 	q->backing_dev_info->congested_data = device;
2841 
2842 	blk_queue_make_request(q, drbd_make_request);
2843 	blk_queue_write_cache(q, true, true);
2844 	/* Setting the max_hw_sectors to an odd value of 8kibyte here
2845 	   This triggers a max_bio_size message upon first attach or connect */
2846 	blk_queue_max_hw_sectors(q, DRBD_MAX_BIO_SIZE_SAFE >> 8);
2847 
2848 	device->md_io.page = alloc_page(GFP_KERNEL);
2849 	if (!device->md_io.page)
2850 		goto out_no_io_page;
2851 
2852 	if (drbd_bm_init(device))
2853 		goto out_no_bitmap;
2854 	device->read_requests = RB_ROOT;
2855 	device->write_requests = RB_ROOT;
2856 
2857 	id = idr_alloc(&drbd_devices, device, minor, minor + 1, GFP_KERNEL);
2858 	if (id < 0) {
2859 		if (id == -ENOSPC)
2860 			err = ERR_MINOR_OR_VOLUME_EXISTS;
2861 		goto out_no_minor_idr;
2862 	}
2863 	kref_get(&device->kref);
2864 
2865 	id = idr_alloc(&resource->devices, device, vnr, vnr + 1, GFP_KERNEL);
2866 	if (id < 0) {
2867 		if (id == -ENOSPC)
2868 			err = ERR_MINOR_OR_VOLUME_EXISTS;
2869 		goto out_idr_remove_minor;
2870 	}
2871 	kref_get(&device->kref);
2872 
2873 	INIT_LIST_HEAD(&device->peer_devices);
2874 	INIT_LIST_HEAD(&device->pending_bitmap_io);
2875 	for_each_connection(connection, resource) {
2876 		peer_device = kzalloc(sizeof(struct drbd_peer_device), GFP_KERNEL);
2877 		if (!peer_device)
2878 			goto out_idr_remove_from_resource;
2879 		peer_device->connection = connection;
2880 		peer_device->device = device;
2881 
2882 		list_add(&peer_device->peer_devices, &device->peer_devices);
2883 		kref_get(&device->kref);
2884 
2885 		id = idr_alloc(&connection->peer_devices, peer_device, vnr, vnr + 1, GFP_KERNEL);
2886 		if (id < 0) {
2887 			if (id == -ENOSPC)
2888 				err = ERR_INVALID_REQUEST;
2889 			goto out_idr_remove_from_resource;
2890 		}
2891 		kref_get(&connection->kref);
2892 		INIT_WORK(&peer_device->send_acks_work, drbd_send_acks_wf);
2893 	}
2894 
2895 	if (init_submitter(device)) {
2896 		err = ERR_NOMEM;
2897 		goto out_idr_remove_vol;
2898 	}
2899 
2900 	add_disk(disk);
2901 
2902 	/* inherit the connection state */
2903 	device->state.conn = first_connection(resource)->cstate;
2904 	if (device->state.conn == C_WF_REPORT_PARAMS) {
2905 		for_each_peer_device(peer_device, device)
2906 			drbd_connected(peer_device);
2907 	}
2908 	/* move to create_peer_device() */
2909 	for_each_peer_device(peer_device, device)
2910 		drbd_debugfs_peer_device_add(peer_device);
2911 	drbd_debugfs_device_add(device);
2912 	return NO_ERROR;
2913 
2914 out_idr_remove_vol:
2915 	idr_remove(&connection->peer_devices, vnr);
2916 out_idr_remove_from_resource:
2917 	for_each_connection(connection, resource) {
2918 		peer_device = idr_remove(&connection->peer_devices, vnr);
2919 		if (peer_device)
2920 			kref_put(&connection->kref, drbd_destroy_connection);
2921 	}
2922 	for_each_peer_device_safe(peer_device, tmp_peer_device, device) {
2923 		list_del(&peer_device->peer_devices);
2924 		kfree(peer_device);
2925 	}
2926 	idr_remove(&resource->devices, vnr);
2927 out_idr_remove_minor:
2928 	idr_remove(&drbd_devices, minor);
2929 	synchronize_rcu();
2930 out_no_minor_idr:
2931 	drbd_bm_cleanup(device);
2932 out_no_bitmap:
2933 	__free_page(device->md_io.page);
2934 out_no_io_page:
2935 	put_disk(disk);
2936 out_no_disk:
2937 	blk_cleanup_queue(q);
2938 out_no_q:
2939 	kref_put(&resource->kref, drbd_destroy_resource);
2940 	kfree(device);
2941 	return err;
2942 }
2943 
2944 void drbd_delete_device(struct drbd_device *device)
2945 {
2946 	struct drbd_resource *resource = device->resource;
2947 	struct drbd_connection *connection;
2948 	struct drbd_peer_device *peer_device;
2949 
2950 	/* move to free_peer_device() */
2951 	for_each_peer_device(peer_device, device)
2952 		drbd_debugfs_peer_device_cleanup(peer_device);
2953 	drbd_debugfs_device_cleanup(device);
2954 	for_each_connection(connection, resource) {
2955 		idr_remove(&connection->peer_devices, device->vnr);
2956 		kref_put(&device->kref, drbd_destroy_device);
2957 	}
2958 	idr_remove(&resource->devices, device->vnr);
2959 	kref_put(&device->kref, drbd_destroy_device);
2960 	idr_remove(&drbd_devices, device_to_minor(device));
2961 	kref_put(&device->kref, drbd_destroy_device);
2962 	del_gendisk(device->vdisk);
2963 	synchronize_rcu();
2964 	kref_put(&device->kref, drbd_destroy_device);
2965 }
2966 
2967 static int __init drbd_init(void)
2968 {
2969 	int err;
2970 
2971 	if (drbd_minor_count < DRBD_MINOR_COUNT_MIN || drbd_minor_count > DRBD_MINOR_COUNT_MAX) {
2972 		pr_err("invalid minor_count (%d)\n", drbd_minor_count);
2973 #ifdef MODULE
2974 		return -EINVAL;
2975 #else
2976 		drbd_minor_count = DRBD_MINOR_COUNT_DEF;
2977 #endif
2978 	}
2979 
2980 	err = register_blkdev(DRBD_MAJOR, "drbd");
2981 	if (err) {
2982 		pr_err("unable to register block device major %d\n",
2983 		       DRBD_MAJOR);
2984 		return err;
2985 	}
2986 
2987 	/*
2988 	 * allocate all necessary structs
2989 	 */
2990 	init_waitqueue_head(&drbd_pp_wait);
2991 
2992 	drbd_proc = NULL; /* play safe for drbd_cleanup */
2993 	idr_init(&drbd_devices);
2994 
2995 	mutex_init(&resources_mutex);
2996 	INIT_LIST_HEAD(&drbd_resources);
2997 
2998 	err = drbd_genl_register();
2999 	if (err) {
3000 		pr_err("unable to register generic netlink family\n");
3001 		goto fail;
3002 	}
3003 
3004 	err = drbd_create_mempools();
3005 	if (err)
3006 		goto fail;
3007 
3008 	err = -ENOMEM;
3009 	drbd_proc = proc_create_single("drbd", S_IFREG | 0444 , NULL, drbd_seq_show);
3010 	if (!drbd_proc)	{
3011 		pr_err("unable to register proc file\n");
3012 		goto fail;
3013 	}
3014 
3015 	retry.wq = create_singlethread_workqueue("drbd-reissue");
3016 	if (!retry.wq) {
3017 		pr_err("unable to create retry workqueue\n");
3018 		goto fail;
3019 	}
3020 	INIT_WORK(&retry.worker, do_retry);
3021 	spin_lock_init(&retry.lock);
3022 	INIT_LIST_HEAD(&retry.writes);
3023 
3024 	if (drbd_debugfs_init())
3025 		pr_notice("failed to initialize debugfs -- will not be available\n");
3026 
3027 	pr_info("initialized. "
3028 	       "Version: " REL_VERSION " (api:%d/proto:%d-%d)\n",
3029 	       API_VERSION, PRO_VERSION_MIN, PRO_VERSION_MAX);
3030 	pr_info("%s\n", drbd_buildtag());
3031 	pr_info("registered as block device major %d\n", DRBD_MAJOR);
3032 	return 0; /* Success! */
3033 
3034 fail:
3035 	drbd_cleanup();
3036 	if (err == -ENOMEM)
3037 		pr_err("ran out of memory\n");
3038 	else
3039 		pr_err("initialization failure\n");
3040 	return err;
3041 }
3042 
3043 static void drbd_free_one_sock(struct drbd_socket *ds)
3044 {
3045 	struct socket *s;
3046 	mutex_lock(&ds->mutex);
3047 	s = ds->socket;
3048 	ds->socket = NULL;
3049 	mutex_unlock(&ds->mutex);
3050 	if (s) {
3051 		/* so debugfs does not need to mutex_lock() */
3052 		synchronize_rcu();
3053 		kernel_sock_shutdown(s, SHUT_RDWR);
3054 		sock_release(s);
3055 	}
3056 }
3057 
3058 void drbd_free_sock(struct drbd_connection *connection)
3059 {
3060 	if (connection->data.socket)
3061 		drbd_free_one_sock(&connection->data);
3062 	if (connection->meta.socket)
3063 		drbd_free_one_sock(&connection->meta);
3064 }
3065 
3066 /* meta data management */
3067 
3068 void conn_md_sync(struct drbd_connection *connection)
3069 {
3070 	struct drbd_peer_device *peer_device;
3071 	int vnr;
3072 
3073 	rcu_read_lock();
3074 	idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
3075 		struct drbd_device *device = peer_device->device;
3076 
3077 		kref_get(&device->kref);
3078 		rcu_read_unlock();
3079 		drbd_md_sync(device);
3080 		kref_put(&device->kref, drbd_destroy_device);
3081 		rcu_read_lock();
3082 	}
3083 	rcu_read_unlock();
3084 }
3085 
3086 /* aligned 4kByte */
3087 struct meta_data_on_disk {
3088 	u64 la_size_sect;      /* last agreed size. */
3089 	u64 uuid[UI_SIZE];   /* UUIDs. */
3090 	u64 device_uuid;
3091 	u64 reserved_u64_1;
3092 	u32 flags;             /* MDF */
3093 	u32 magic;
3094 	u32 md_size_sect;
3095 	u32 al_offset;         /* offset to this block */
3096 	u32 al_nr_extents;     /* important for restoring the AL (userspace) */
3097 	      /* `-- act_log->nr_elements <-- ldev->dc.al_extents */
3098 	u32 bm_offset;         /* offset to the bitmap, from here */
3099 	u32 bm_bytes_per_bit;  /* BM_BLOCK_SIZE */
3100 	u32 la_peer_max_bio_size;   /* last peer max_bio_size */
3101 
3102 	/* see al_tr_number_to_on_disk_sector() */
3103 	u32 al_stripes;
3104 	u32 al_stripe_size_4k;
3105 
3106 	u8 reserved_u8[4096 - (7*8 + 10*4)];
3107 } __packed;
3108 
3109 
3110 
3111 void drbd_md_write(struct drbd_device *device, void *b)
3112 {
3113 	struct meta_data_on_disk *buffer = b;
3114 	sector_t sector;
3115 	int i;
3116 
3117 	memset(buffer, 0, sizeof(*buffer));
3118 
3119 	buffer->la_size_sect = cpu_to_be64(drbd_get_capacity(device->this_bdev));
3120 	for (i = UI_CURRENT; i < UI_SIZE; i++)
3121 		buffer->uuid[i] = cpu_to_be64(device->ldev->md.uuid[i]);
3122 	buffer->flags = cpu_to_be32(device->ldev->md.flags);
3123 	buffer->magic = cpu_to_be32(DRBD_MD_MAGIC_84_UNCLEAN);
3124 
3125 	buffer->md_size_sect  = cpu_to_be32(device->ldev->md.md_size_sect);
3126 	buffer->al_offset     = cpu_to_be32(device->ldev->md.al_offset);
3127 	buffer->al_nr_extents = cpu_to_be32(device->act_log->nr_elements);
3128 	buffer->bm_bytes_per_bit = cpu_to_be32(BM_BLOCK_SIZE);
3129 	buffer->device_uuid = cpu_to_be64(device->ldev->md.device_uuid);
3130 
3131 	buffer->bm_offset = cpu_to_be32(device->ldev->md.bm_offset);
3132 	buffer->la_peer_max_bio_size = cpu_to_be32(device->peer_max_bio_size);
3133 
3134 	buffer->al_stripes = cpu_to_be32(device->ldev->md.al_stripes);
3135 	buffer->al_stripe_size_4k = cpu_to_be32(device->ldev->md.al_stripe_size_4k);
3136 
3137 	D_ASSERT(device, drbd_md_ss(device->ldev) == device->ldev->md.md_offset);
3138 	sector = device->ldev->md.md_offset;
3139 
3140 	if (drbd_md_sync_page_io(device, device->ldev, sector, REQ_OP_WRITE)) {
3141 		/* this was a try anyways ... */
3142 		drbd_err(device, "meta data update failed!\n");
3143 		drbd_chk_io_error(device, 1, DRBD_META_IO_ERROR);
3144 	}
3145 }
3146 
3147 /**
3148  * drbd_md_sync() - Writes the meta data super block if the MD_DIRTY flag bit is set
3149  * @device:	DRBD device.
3150  */
3151 void drbd_md_sync(struct drbd_device *device)
3152 {
3153 	struct meta_data_on_disk *buffer;
3154 
3155 	/* Don't accidentally change the DRBD meta data layout. */
3156 	BUILD_BUG_ON(UI_SIZE != 4);
3157 	BUILD_BUG_ON(sizeof(struct meta_data_on_disk) != 4096);
3158 
3159 	del_timer(&device->md_sync_timer);
3160 	/* timer may be rearmed by drbd_md_mark_dirty() now. */
3161 	if (!test_and_clear_bit(MD_DIRTY, &device->flags))
3162 		return;
3163 
3164 	/* We use here D_FAILED and not D_ATTACHING because we try to write
3165 	 * metadata even if we detach due to a disk failure! */
3166 	if (!get_ldev_if_state(device, D_FAILED))
3167 		return;
3168 
3169 	buffer = drbd_md_get_buffer(device, __func__);
3170 	if (!buffer)
3171 		goto out;
3172 
3173 	drbd_md_write(device, buffer);
3174 
3175 	/* Update device->ldev->md.la_size_sect,
3176 	 * since we updated it on metadata. */
3177 	device->ldev->md.la_size_sect = drbd_get_capacity(device->this_bdev);
3178 
3179 	drbd_md_put_buffer(device);
3180 out:
3181 	put_ldev(device);
3182 }
3183 
3184 static int check_activity_log_stripe_size(struct drbd_device *device,
3185 		struct meta_data_on_disk *on_disk,
3186 		struct drbd_md *in_core)
3187 {
3188 	u32 al_stripes = be32_to_cpu(on_disk->al_stripes);
3189 	u32 al_stripe_size_4k = be32_to_cpu(on_disk->al_stripe_size_4k);
3190 	u64 al_size_4k;
3191 
3192 	/* both not set: default to old fixed size activity log */
3193 	if (al_stripes == 0 && al_stripe_size_4k == 0) {
3194 		al_stripes = 1;
3195 		al_stripe_size_4k = MD_32kB_SECT/8;
3196 	}
3197 
3198 	/* some paranoia plausibility checks */
3199 
3200 	/* we need both values to be set */
3201 	if (al_stripes == 0 || al_stripe_size_4k == 0)
3202 		goto err;
3203 
3204 	al_size_4k = (u64)al_stripes * al_stripe_size_4k;
3205 
3206 	/* Upper limit of activity log area, to avoid potential overflow
3207 	 * problems in al_tr_number_to_on_disk_sector(). As right now, more
3208 	 * than 72 * 4k blocks total only increases the amount of history,
3209 	 * limiting this arbitrarily to 16 GB is not a real limitation ;-)  */
3210 	if (al_size_4k > (16 * 1024 * 1024/4))
3211 		goto err;
3212 
3213 	/* Lower limit: we need at least 8 transaction slots (32kB)
3214 	 * to not break existing setups */
3215 	if (al_size_4k < MD_32kB_SECT/8)
3216 		goto err;
3217 
3218 	in_core->al_stripe_size_4k = al_stripe_size_4k;
3219 	in_core->al_stripes = al_stripes;
3220 	in_core->al_size_4k = al_size_4k;
3221 
3222 	return 0;
3223 err:
3224 	drbd_err(device, "invalid activity log striping: al_stripes=%u, al_stripe_size_4k=%u\n",
3225 			al_stripes, al_stripe_size_4k);
3226 	return -EINVAL;
3227 }
3228 
3229 static int check_offsets_and_sizes(struct drbd_device *device, struct drbd_backing_dev *bdev)
3230 {
3231 	sector_t capacity = drbd_get_capacity(bdev->md_bdev);
3232 	struct drbd_md *in_core = &bdev->md;
3233 	s32 on_disk_al_sect;
3234 	s32 on_disk_bm_sect;
3235 
3236 	/* The on-disk size of the activity log, calculated from offsets, and
3237 	 * the size of the activity log calculated from the stripe settings,
3238 	 * should match.
3239 	 * Though we could relax this a bit: it is ok, if the striped activity log
3240 	 * fits in the available on-disk activity log size.
3241 	 * Right now, that would break how resize is implemented.
3242 	 * TODO: make drbd_determine_dev_size() (and the drbdmeta tool) aware
3243 	 * of possible unused padding space in the on disk layout. */
3244 	if (in_core->al_offset < 0) {
3245 		if (in_core->bm_offset > in_core->al_offset)
3246 			goto err;
3247 		on_disk_al_sect = -in_core->al_offset;
3248 		on_disk_bm_sect = in_core->al_offset - in_core->bm_offset;
3249 	} else {
3250 		if (in_core->al_offset != MD_4kB_SECT)
3251 			goto err;
3252 		if (in_core->bm_offset < in_core->al_offset + in_core->al_size_4k * MD_4kB_SECT)
3253 			goto err;
3254 
3255 		on_disk_al_sect = in_core->bm_offset - MD_4kB_SECT;
3256 		on_disk_bm_sect = in_core->md_size_sect - in_core->bm_offset;
3257 	}
3258 
3259 	/* old fixed size meta data is exactly that: fixed. */
3260 	if (in_core->meta_dev_idx >= 0) {
3261 		if (in_core->md_size_sect != MD_128MB_SECT
3262 		||  in_core->al_offset != MD_4kB_SECT
3263 		||  in_core->bm_offset != MD_4kB_SECT + MD_32kB_SECT
3264 		||  in_core->al_stripes != 1
3265 		||  in_core->al_stripe_size_4k != MD_32kB_SECT/8)
3266 			goto err;
3267 	}
3268 
3269 	if (capacity < in_core->md_size_sect)
3270 		goto err;
3271 	if (capacity - in_core->md_size_sect < drbd_md_first_sector(bdev))
3272 		goto err;
3273 
3274 	/* should be aligned, and at least 32k */
3275 	if ((on_disk_al_sect & 7) || (on_disk_al_sect < MD_32kB_SECT))
3276 		goto err;
3277 
3278 	/* should fit (for now: exactly) into the available on-disk space;
3279 	 * overflow prevention is in check_activity_log_stripe_size() above. */
3280 	if (on_disk_al_sect != in_core->al_size_4k * MD_4kB_SECT)
3281 		goto err;
3282 
3283 	/* again, should be aligned */
3284 	if (in_core->bm_offset & 7)
3285 		goto err;
3286 
3287 	/* FIXME check for device grow with flex external meta data? */
3288 
3289 	/* can the available bitmap space cover the last agreed device size? */
3290 	if (on_disk_bm_sect < (in_core->la_size_sect+7)/MD_4kB_SECT/8/512)
3291 		goto err;
3292 
3293 	return 0;
3294 
3295 err:
3296 	drbd_err(device, "meta data offsets don't make sense: idx=%d "
3297 			"al_s=%u, al_sz4k=%u, al_offset=%d, bm_offset=%d, "
3298 			"md_size_sect=%u, la_size=%llu, md_capacity=%llu\n",
3299 			in_core->meta_dev_idx,
3300 			in_core->al_stripes, in_core->al_stripe_size_4k,
3301 			in_core->al_offset, in_core->bm_offset, in_core->md_size_sect,
3302 			(unsigned long long)in_core->la_size_sect,
3303 			(unsigned long long)capacity);
3304 
3305 	return -EINVAL;
3306 }
3307 
3308 
3309 /**
3310  * drbd_md_read() - Reads in the meta data super block
3311  * @device:	DRBD device.
3312  * @bdev:	Device from which the meta data should be read in.
3313  *
3314  * Return NO_ERROR on success, and an enum drbd_ret_code in case
3315  * something goes wrong.
3316  *
3317  * Called exactly once during drbd_adm_attach(), while still being D_DISKLESS,
3318  * even before @bdev is assigned to @device->ldev.
3319  */
3320 int drbd_md_read(struct drbd_device *device, struct drbd_backing_dev *bdev)
3321 {
3322 	struct meta_data_on_disk *buffer;
3323 	u32 magic, flags;
3324 	int i, rv = NO_ERROR;
3325 
3326 	if (device->state.disk != D_DISKLESS)
3327 		return ERR_DISK_CONFIGURED;
3328 
3329 	buffer = drbd_md_get_buffer(device, __func__);
3330 	if (!buffer)
3331 		return ERR_NOMEM;
3332 
3333 	/* First, figure out where our meta data superblock is located,
3334 	 * and read it. */
3335 	bdev->md.meta_dev_idx = bdev->disk_conf->meta_dev_idx;
3336 	bdev->md.md_offset = drbd_md_ss(bdev);
3337 	/* Even for (flexible or indexed) external meta data,
3338 	 * initially restrict us to the 4k superblock for now.
3339 	 * Affects the paranoia out-of-range access check in drbd_md_sync_page_io(). */
3340 	bdev->md.md_size_sect = 8;
3341 
3342 	if (drbd_md_sync_page_io(device, bdev, bdev->md.md_offset,
3343 				 REQ_OP_READ)) {
3344 		/* NOTE: can't do normal error processing here as this is
3345 		   called BEFORE disk is attached */
3346 		drbd_err(device, "Error while reading metadata.\n");
3347 		rv = ERR_IO_MD_DISK;
3348 		goto err;
3349 	}
3350 
3351 	magic = be32_to_cpu(buffer->magic);
3352 	flags = be32_to_cpu(buffer->flags);
3353 	if (magic == DRBD_MD_MAGIC_84_UNCLEAN ||
3354 	    (magic == DRBD_MD_MAGIC_08 && !(flags & MDF_AL_CLEAN))) {
3355 			/* btw: that's Activity Log clean, not "all" clean. */
3356 		drbd_err(device, "Found unclean meta data. Did you \"drbdadm apply-al\"?\n");
3357 		rv = ERR_MD_UNCLEAN;
3358 		goto err;
3359 	}
3360 
3361 	rv = ERR_MD_INVALID;
3362 	if (magic != DRBD_MD_MAGIC_08) {
3363 		if (magic == DRBD_MD_MAGIC_07)
3364 			drbd_err(device, "Found old (0.7) meta data magic. Did you \"drbdadm create-md\"?\n");
3365 		else
3366 			drbd_err(device, "Meta data magic not found. Did you \"drbdadm create-md\"?\n");
3367 		goto err;
3368 	}
3369 
3370 	if (be32_to_cpu(buffer->bm_bytes_per_bit) != BM_BLOCK_SIZE) {
3371 		drbd_err(device, "unexpected bm_bytes_per_bit: %u (expected %u)\n",
3372 		    be32_to_cpu(buffer->bm_bytes_per_bit), BM_BLOCK_SIZE);
3373 		goto err;
3374 	}
3375 
3376 
3377 	/* convert to in_core endian */
3378 	bdev->md.la_size_sect = be64_to_cpu(buffer->la_size_sect);
3379 	for (i = UI_CURRENT; i < UI_SIZE; i++)
3380 		bdev->md.uuid[i] = be64_to_cpu(buffer->uuid[i]);
3381 	bdev->md.flags = be32_to_cpu(buffer->flags);
3382 	bdev->md.device_uuid = be64_to_cpu(buffer->device_uuid);
3383 
3384 	bdev->md.md_size_sect = be32_to_cpu(buffer->md_size_sect);
3385 	bdev->md.al_offset = be32_to_cpu(buffer->al_offset);
3386 	bdev->md.bm_offset = be32_to_cpu(buffer->bm_offset);
3387 
3388 	if (check_activity_log_stripe_size(device, buffer, &bdev->md))
3389 		goto err;
3390 	if (check_offsets_and_sizes(device, bdev))
3391 		goto err;
3392 
3393 	if (be32_to_cpu(buffer->bm_offset) != bdev->md.bm_offset) {
3394 		drbd_err(device, "unexpected bm_offset: %d (expected %d)\n",
3395 		    be32_to_cpu(buffer->bm_offset), bdev->md.bm_offset);
3396 		goto err;
3397 	}
3398 	if (be32_to_cpu(buffer->md_size_sect) != bdev->md.md_size_sect) {
3399 		drbd_err(device, "unexpected md_size: %u (expected %u)\n",
3400 		    be32_to_cpu(buffer->md_size_sect), bdev->md.md_size_sect);
3401 		goto err;
3402 	}
3403 
3404 	rv = NO_ERROR;
3405 
3406 	spin_lock_irq(&device->resource->req_lock);
3407 	if (device->state.conn < C_CONNECTED) {
3408 		unsigned int peer;
3409 		peer = be32_to_cpu(buffer->la_peer_max_bio_size);
3410 		peer = max(peer, DRBD_MAX_BIO_SIZE_SAFE);
3411 		device->peer_max_bio_size = peer;
3412 	}
3413 	spin_unlock_irq(&device->resource->req_lock);
3414 
3415  err:
3416 	drbd_md_put_buffer(device);
3417 
3418 	return rv;
3419 }
3420 
3421 /**
3422  * drbd_md_mark_dirty() - Mark meta data super block as dirty
3423  * @device:	DRBD device.
3424  *
3425  * Call this function if you change anything that should be written to
3426  * the meta-data super block. This function sets MD_DIRTY, and starts a
3427  * timer that ensures that within five seconds you have to call drbd_md_sync().
3428  */
3429 #ifdef DEBUG
3430 void drbd_md_mark_dirty_(struct drbd_device *device, unsigned int line, const char *func)
3431 {
3432 	if (!test_and_set_bit(MD_DIRTY, &device->flags)) {
3433 		mod_timer(&device->md_sync_timer, jiffies + HZ);
3434 		device->last_md_mark_dirty.line = line;
3435 		device->last_md_mark_dirty.func = func;
3436 	}
3437 }
3438 #else
3439 void drbd_md_mark_dirty(struct drbd_device *device)
3440 {
3441 	if (!test_and_set_bit(MD_DIRTY, &device->flags))
3442 		mod_timer(&device->md_sync_timer, jiffies + 5*HZ);
3443 }
3444 #endif
3445 
3446 void drbd_uuid_move_history(struct drbd_device *device) __must_hold(local)
3447 {
3448 	int i;
3449 
3450 	for (i = UI_HISTORY_START; i < UI_HISTORY_END; i++)
3451 		device->ldev->md.uuid[i+1] = device->ldev->md.uuid[i];
3452 }
3453 
3454 void __drbd_uuid_set(struct drbd_device *device, int idx, u64 val) __must_hold(local)
3455 {
3456 	if (idx == UI_CURRENT) {
3457 		if (device->state.role == R_PRIMARY)
3458 			val |= 1;
3459 		else
3460 			val &= ~((u64)1);
3461 
3462 		drbd_set_ed_uuid(device, val);
3463 	}
3464 
3465 	device->ldev->md.uuid[idx] = val;
3466 	drbd_md_mark_dirty(device);
3467 }
3468 
3469 void _drbd_uuid_set(struct drbd_device *device, int idx, u64 val) __must_hold(local)
3470 {
3471 	unsigned long flags;
3472 	spin_lock_irqsave(&device->ldev->md.uuid_lock, flags);
3473 	__drbd_uuid_set(device, idx, val);
3474 	spin_unlock_irqrestore(&device->ldev->md.uuid_lock, flags);
3475 }
3476 
3477 void drbd_uuid_set(struct drbd_device *device, int idx, u64 val) __must_hold(local)
3478 {
3479 	unsigned long flags;
3480 	spin_lock_irqsave(&device->ldev->md.uuid_lock, flags);
3481 	if (device->ldev->md.uuid[idx]) {
3482 		drbd_uuid_move_history(device);
3483 		device->ldev->md.uuid[UI_HISTORY_START] = device->ldev->md.uuid[idx];
3484 	}
3485 	__drbd_uuid_set(device, idx, val);
3486 	spin_unlock_irqrestore(&device->ldev->md.uuid_lock, flags);
3487 }
3488 
3489 /**
3490  * drbd_uuid_new_current() - Creates a new current UUID
3491  * @device:	DRBD device.
3492  *
3493  * Creates a new current UUID, and rotates the old current UUID into
3494  * the bitmap slot. Causes an incremental resync upon next connect.
3495  */
3496 void drbd_uuid_new_current(struct drbd_device *device) __must_hold(local)
3497 {
3498 	u64 val;
3499 	unsigned long long bm_uuid;
3500 
3501 	get_random_bytes(&val, sizeof(u64));
3502 
3503 	spin_lock_irq(&device->ldev->md.uuid_lock);
3504 	bm_uuid = device->ldev->md.uuid[UI_BITMAP];
3505 
3506 	if (bm_uuid)
3507 		drbd_warn(device, "bm UUID was already set: %llX\n", bm_uuid);
3508 
3509 	device->ldev->md.uuid[UI_BITMAP] = device->ldev->md.uuid[UI_CURRENT];
3510 	__drbd_uuid_set(device, UI_CURRENT, val);
3511 	spin_unlock_irq(&device->ldev->md.uuid_lock);
3512 
3513 	drbd_print_uuids(device, "new current UUID");
3514 	/* get it to stable storage _now_ */
3515 	drbd_md_sync(device);
3516 }
3517 
3518 void drbd_uuid_set_bm(struct drbd_device *device, u64 val) __must_hold(local)
3519 {
3520 	unsigned long flags;
3521 	if (device->ldev->md.uuid[UI_BITMAP] == 0 && val == 0)
3522 		return;
3523 
3524 	spin_lock_irqsave(&device->ldev->md.uuid_lock, flags);
3525 	if (val == 0) {
3526 		drbd_uuid_move_history(device);
3527 		device->ldev->md.uuid[UI_HISTORY_START] = device->ldev->md.uuid[UI_BITMAP];
3528 		device->ldev->md.uuid[UI_BITMAP] = 0;
3529 	} else {
3530 		unsigned long long bm_uuid = device->ldev->md.uuid[UI_BITMAP];
3531 		if (bm_uuid)
3532 			drbd_warn(device, "bm UUID was already set: %llX\n", bm_uuid);
3533 
3534 		device->ldev->md.uuid[UI_BITMAP] = val & ~((u64)1);
3535 	}
3536 	spin_unlock_irqrestore(&device->ldev->md.uuid_lock, flags);
3537 
3538 	drbd_md_mark_dirty(device);
3539 }
3540 
3541 /**
3542  * drbd_bmio_set_n_write() - io_fn for drbd_queue_bitmap_io() or drbd_bitmap_io()
3543  * @device:	DRBD device.
3544  *
3545  * Sets all bits in the bitmap and writes the whole bitmap to stable storage.
3546  */
3547 int drbd_bmio_set_n_write(struct drbd_device *device) __must_hold(local)
3548 {
3549 	int rv = -EIO;
3550 
3551 	drbd_md_set_flag(device, MDF_FULL_SYNC);
3552 	drbd_md_sync(device);
3553 	drbd_bm_set_all(device);
3554 
3555 	rv = drbd_bm_write(device);
3556 
3557 	if (!rv) {
3558 		drbd_md_clear_flag(device, MDF_FULL_SYNC);
3559 		drbd_md_sync(device);
3560 	}
3561 
3562 	return rv;
3563 }
3564 
3565 /**
3566  * drbd_bmio_clear_n_write() - io_fn for drbd_queue_bitmap_io() or drbd_bitmap_io()
3567  * @device:	DRBD device.
3568  *
3569  * Clears all bits in the bitmap and writes the whole bitmap to stable storage.
3570  */
3571 int drbd_bmio_clear_n_write(struct drbd_device *device) __must_hold(local)
3572 {
3573 	drbd_resume_al(device);
3574 	drbd_bm_clear_all(device);
3575 	return drbd_bm_write(device);
3576 }
3577 
3578 static int w_bitmap_io(struct drbd_work *w, int unused)
3579 {
3580 	struct drbd_device *device =
3581 		container_of(w, struct drbd_device, bm_io_work.w);
3582 	struct bm_io_work *work = &device->bm_io_work;
3583 	int rv = -EIO;
3584 
3585 	if (work->flags != BM_LOCKED_CHANGE_ALLOWED) {
3586 		int cnt = atomic_read(&device->ap_bio_cnt);
3587 		if (cnt)
3588 			drbd_err(device, "FIXME: ap_bio_cnt %d, expected 0; queued for '%s'\n",
3589 					cnt, work->why);
3590 	}
3591 
3592 	if (get_ldev(device)) {
3593 		drbd_bm_lock(device, work->why, work->flags);
3594 		rv = work->io_fn(device);
3595 		drbd_bm_unlock(device);
3596 		put_ldev(device);
3597 	}
3598 
3599 	clear_bit_unlock(BITMAP_IO, &device->flags);
3600 	wake_up(&device->misc_wait);
3601 
3602 	if (work->done)
3603 		work->done(device, rv);
3604 
3605 	clear_bit(BITMAP_IO_QUEUED, &device->flags);
3606 	work->why = NULL;
3607 	work->flags = 0;
3608 
3609 	return 0;
3610 }
3611 
3612 /**
3613  * drbd_queue_bitmap_io() - Queues an IO operation on the whole bitmap
3614  * @device:	DRBD device.
3615  * @io_fn:	IO callback to be called when bitmap IO is possible
3616  * @done:	callback to be called after the bitmap IO was performed
3617  * @why:	Descriptive text of the reason for doing the IO
3618  *
3619  * While IO on the bitmap happens we freeze application IO thus we ensure
3620  * that drbd_set_out_of_sync() can not be called. This function MAY ONLY be
3621  * called from worker context. It MUST NOT be used while a previous such
3622  * work is still pending!
3623  *
3624  * Its worker function encloses the call of io_fn() by get_ldev() and
3625  * put_ldev().
3626  */
3627 void drbd_queue_bitmap_io(struct drbd_device *device,
3628 			  int (*io_fn)(struct drbd_device *),
3629 			  void (*done)(struct drbd_device *, int),
3630 			  char *why, enum bm_flag flags)
3631 {
3632 	D_ASSERT(device, current == first_peer_device(device)->connection->worker.task);
3633 
3634 	D_ASSERT(device, !test_bit(BITMAP_IO_QUEUED, &device->flags));
3635 	D_ASSERT(device, !test_bit(BITMAP_IO, &device->flags));
3636 	D_ASSERT(device, list_empty(&device->bm_io_work.w.list));
3637 	if (device->bm_io_work.why)
3638 		drbd_err(device, "FIXME going to queue '%s' but '%s' still pending?\n",
3639 			why, device->bm_io_work.why);
3640 
3641 	device->bm_io_work.io_fn = io_fn;
3642 	device->bm_io_work.done = done;
3643 	device->bm_io_work.why = why;
3644 	device->bm_io_work.flags = flags;
3645 
3646 	spin_lock_irq(&device->resource->req_lock);
3647 	set_bit(BITMAP_IO, &device->flags);
3648 	/* don't wait for pending application IO if the caller indicates that
3649 	 * application IO does not conflict anyways. */
3650 	if (flags == BM_LOCKED_CHANGE_ALLOWED || atomic_read(&device->ap_bio_cnt) == 0) {
3651 		if (!test_and_set_bit(BITMAP_IO_QUEUED, &device->flags))
3652 			drbd_queue_work(&first_peer_device(device)->connection->sender_work,
3653 					&device->bm_io_work.w);
3654 	}
3655 	spin_unlock_irq(&device->resource->req_lock);
3656 }
3657 
3658 /**
3659  * drbd_bitmap_io() -  Does an IO operation on the whole bitmap
3660  * @device:	DRBD device.
3661  * @io_fn:	IO callback to be called when bitmap IO is possible
3662  * @why:	Descriptive text of the reason for doing the IO
3663  *
3664  * freezes application IO while that the actual IO operations runs. This
3665  * functions MAY NOT be called from worker context.
3666  */
3667 int drbd_bitmap_io(struct drbd_device *device, int (*io_fn)(struct drbd_device *),
3668 		char *why, enum bm_flag flags)
3669 {
3670 	/* Only suspend io, if some operation is supposed to be locked out */
3671 	const bool do_suspend_io = flags & (BM_DONT_CLEAR|BM_DONT_SET|BM_DONT_TEST);
3672 	int rv;
3673 
3674 	D_ASSERT(device, current != first_peer_device(device)->connection->worker.task);
3675 
3676 	if (do_suspend_io)
3677 		drbd_suspend_io(device);
3678 
3679 	drbd_bm_lock(device, why, flags);
3680 	rv = io_fn(device);
3681 	drbd_bm_unlock(device);
3682 
3683 	if (do_suspend_io)
3684 		drbd_resume_io(device);
3685 
3686 	return rv;
3687 }
3688 
3689 void drbd_md_set_flag(struct drbd_device *device, int flag) __must_hold(local)
3690 {
3691 	if ((device->ldev->md.flags & flag) != flag) {
3692 		drbd_md_mark_dirty(device);
3693 		device->ldev->md.flags |= flag;
3694 	}
3695 }
3696 
3697 void drbd_md_clear_flag(struct drbd_device *device, int flag) __must_hold(local)
3698 {
3699 	if ((device->ldev->md.flags & flag) != 0) {
3700 		drbd_md_mark_dirty(device);
3701 		device->ldev->md.flags &= ~flag;
3702 	}
3703 }
3704 int drbd_md_test_flag(struct drbd_backing_dev *bdev, int flag)
3705 {
3706 	return (bdev->md.flags & flag) != 0;
3707 }
3708 
3709 static void md_sync_timer_fn(struct timer_list *t)
3710 {
3711 	struct drbd_device *device = from_timer(device, t, md_sync_timer);
3712 	drbd_device_post_work(device, MD_SYNC);
3713 }
3714 
3715 const char *cmdname(enum drbd_packet cmd)
3716 {
3717 	/* THINK may need to become several global tables
3718 	 * when we want to support more than
3719 	 * one PRO_VERSION */
3720 	static const char *cmdnames[] = {
3721 		[P_DATA]	        = "Data",
3722 		[P_WSAME]	        = "WriteSame",
3723 		[P_TRIM]	        = "Trim",
3724 		[P_DATA_REPLY]	        = "DataReply",
3725 		[P_RS_DATA_REPLY]	= "RSDataReply",
3726 		[P_BARRIER]	        = "Barrier",
3727 		[P_BITMAP]	        = "ReportBitMap",
3728 		[P_BECOME_SYNC_TARGET]  = "BecomeSyncTarget",
3729 		[P_BECOME_SYNC_SOURCE]  = "BecomeSyncSource",
3730 		[P_UNPLUG_REMOTE]	= "UnplugRemote",
3731 		[P_DATA_REQUEST]	= "DataRequest",
3732 		[P_RS_DATA_REQUEST]     = "RSDataRequest",
3733 		[P_SYNC_PARAM]	        = "SyncParam",
3734 		[P_SYNC_PARAM89]	= "SyncParam89",
3735 		[P_PROTOCOL]            = "ReportProtocol",
3736 		[P_UUIDS]	        = "ReportUUIDs",
3737 		[P_SIZES]	        = "ReportSizes",
3738 		[P_STATE]	        = "ReportState",
3739 		[P_SYNC_UUID]           = "ReportSyncUUID",
3740 		[P_AUTH_CHALLENGE]      = "AuthChallenge",
3741 		[P_AUTH_RESPONSE]	= "AuthResponse",
3742 		[P_PING]		= "Ping",
3743 		[P_PING_ACK]	        = "PingAck",
3744 		[P_RECV_ACK]	        = "RecvAck",
3745 		[P_WRITE_ACK]	        = "WriteAck",
3746 		[P_RS_WRITE_ACK]	= "RSWriteAck",
3747 		[P_SUPERSEDED]          = "Superseded",
3748 		[P_NEG_ACK]	        = "NegAck",
3749 		[P_NEG_DREPLY]	        = "NegDReply",
3750 		[P_NEG_RS_DREPLY]	= "NegRSDReply",
3751 		[P_BARRIER_ACK]	        = "BarrierAck",
3752 		[P_STATE_CHG_REQ]       = "StateChgRequest",
3753 		[P_STATE_CHG_REPLY]     = "StateChgReply",
3754 		[P_OV_REQUEST]          = "OVRequest",
3755 		[P_OV_REPLY]            = "OVReply",
3756 		[P_OV_RESULT]           = "OVResult",
3757 		[P_CSUM_RS_REQUEST]     = "CsumRSRequest",
3758 		[P_RS_IS_IN_SYNC]	= "CsumRSIsInSync",
3759 		[P_COMPRESSED_BITMAP]   = "CBitmap",
3760 		[P_DELAY_PROBE]         = "DelayProbe",
3761 		[P_OUT_OF_SYNC]		= "OutOfSync",
3762 		[P_RETRY_WRITE]		= "RetryWrite",
3763 		[P_RS_CANCEL]		= "RSCancel",
3764 		[P_CONN_ST_CHG_REQ]	= "conn_st_chg_req",
3765 		[P_CONN_ST_CHG_REPLY]	= "conn_st_chg_reply",
3766 		[P_RETRY_WRITE]		= "retry_write",
3767 		[P_PROTOCOL_UPDATE]	= "protocol_update",
3768 		[P_RS_THIN_REQ]         = "rs_thin_req",
3769 		[P_RS_DEALLOCATED]      = "rs_deallocated",
3770 
3771 		/* enum drbd_packet, but not commands - obsoleted flags:
3772 		 *	P_MAY_IGNORE
3773 		 *	P_MAX_OPT_CMD
3774 		 */
3775 	};
3776 
3777 	/* too big for the array: 0xfffX */
3778 	if (cmd == P_INITIAL_META)
3779 		return "InitialMeta";
3780 	if (cmd == P_INITIAL_DATA)
3781 		return "InitialData";
3782 	if (cmd == P_CONNECTION_FEATURES)
3783 		return "ConnectionFeatures";
3784 	if (cmd >= ARRAY_SIZE(cmdnames))
3785 		return "Unknown";
3786 	return cmdnames[cmd];
3787 }
3788 
3789 /**
3790  * drbd_wait_misc  -  wait for a request to make progress
3791  * @device:	device associated with the request
3792  * @i:		the struct drbd_interval embedded in struct drbd_request or
3793  *		struct drbd_peer_request
3794  */
3795 int drbd_wait_misc(struct drbd_device *device, struct drbd_interval *i)
3796 {
3797 	struct net_conf *nc;
3798 	DEFINE_WAIT(wait);
3799 	long timeout;
3800 
3801 	rcu_read_lock();
3802 	nc = rcu_dereference(first_peer_device(device)->connection->net_conf);
3803 	if (!nc) {
3804 		rcu_read_unlock();
3805 		return -ETIMEDOUT;
3806 	}
3807 	timeout = nc->ko_count ? nc->timeout * HZ / 10 * nc->ko_count : MAX_SCHEDULE_TIMEOUT;
3808 	rcu_read_unlock();
3809 
3810 	/* Indicate to wake up device->misc_wait on progress.  */
3811 	i->waiting = true;
3812 	prepare_to_wait(&device->misc_wait, &wait, TASK_INTERRUPTIBLE);
3813 	spin_unlock_irq(&device->resource->req_lock);
3814 	timeout = schedule_timeout(timeout);
3815 	finish_wait(&device->misc_wait, &wait);
3816 	spin_lock_irq(&device->resource->req_lock);
3817 	if (!timeout || device->state.conn < C_CONNECTED)
3818 		return -ETIMEDOUT;
3819 	if (signal_pending(current))
3820 		return -ERESTARTSYS;
3821 	return 0;
3822 }
3823 
3824 void lock_all_resources(void)
3825 {
3826 	struct drbd_resource *resource;
3827 	int __maybe_unused i = 0;
3828 
3829 	mutex_lock(&resources_mutex);
3830 	local_irq_disable();
3831 	for_each_resource(resource, &drbd_resources)
3832 		spin_lock_nested(&resource->req_lock, i++);
3833 }
3834 
3835 void unlock_all_resources(void)
3836 {
3837 	struct drbd_resource *resource;
3838 
3839 	for_each_resource(resource, &drbd_resources)
3840 		spin_unlock(&resource->req_lock);
3841 	local_irq_enable();
3842 	mutex_unlock(&resources_mutex);
3843 }
3844 
3845 #ifdef CONFIG_DRBD_FAULT_INJECTION
3846 /* Fault insertion support including random number generator shamelessly
3847  * stolen from kernel/rcutorture.c */
3848 struct fault_random_state {
3849 	unsigned long state;
3850 	unsigned long count;
3851 };
3852 
3853 #define FAULT_RANDOM_MULT 39916801  /* prime */
3854 #define FAULT_RANDOM_ADD	479001701 /* prime */
3855 #define FAULT_RANDOM_REFRESH 10000
3856 
3857 /*
3858  * Crude but fast random-number generator.  Uses a linear congruential
3859  * generator, with occasional help from get_random_bytes().
3860  */
3861 static unsigned long
3862 _drbd_fault_random(struct fault_random_state *rsp)
3863 {
3864 	long refresh;
3865 
3866 	if (!rsp->count--) {
3867 		get_random_bytes(&refresh, sizeof(refresh));
3868 		rsp->state += refresh;
3869 		rsp->count = FAULT_RANDOM_REFRESH;
3870 	}
3871 	rsp->state = rsp->state * FAULT_RANDOM_MULT + FAULT_RANDOM_ADD;
3872 	return swahw32(rsp->state);
3873 }
3874 
3875 static char *
3876 _drbd_fault_str(unsigned int type) {
3877 	static char *_faults[] = {
3878 		[DRBD_FAULT_MD_WR] = "Meta-data write",
3879 		[DRBD_FAULT_MD_RD] = "Meta-data read",
3880 		[DRBD_FAULT_RS_WR] = "Resync write",
3881 		[DRBD_FAULT_RS_RD] = "Resync read",
3882 		[DRBD_FAULT_DT_WR] = "Data write",
3883 		[DRBD_FAULT_DT_RD] = "Data read",
3884 		[DRBD_FAULT_DT_RA] = "Data read ahead",
3885 		[DRBD_FAULT_BM_ALLOC] = "BM allocation",
3886 		[DRBD_FAULT_AL_EE] = "EE allocation",
3887 		[DRBD_FAULT_RECEIVE] = "receive data corruption",
3888 	};
3889 
3890 	return (type < DRBD_FAULT_MAX) ? _faults[type] : "**Unknown**";
3891 }
3892 
3893 unsigned int
3894 _drbd_insert_fault(struct drbd_device *device, unsigned int type)
3895 {
3896 	static struct fault_random_state rrs = {0, 0};
3897 
3898 	unsigned int ret = (
3899 		(drbd_fault_devs == 0 ||
3900 			((1 << device_to_minor(device)) & drbd_fault_devs) != 0) &&
3901 		(((_drbd_fault_random(&rrs) % 100) + 1) <= drbd_fault_rate));
3902 
3903 	if (ret) {
3904 		drbd_fault_count++;
3905 
3906 		if (__ratelimit(&drbd_ratelimit_state))
3907 			drbd_warn(device, "***Simulating %s failure\n",
3908 				_drbd_fault_str(type));
3909 	}
3910 
3911 	return ret;
3912 }
3913 #endif
3914 
3915 const char *drbd_buildtag(void)
3916 {
3917 	/* DRBD built from external sources has here a reference to the
3918 	   git hash of the source code. */
3919 
3920 	static char buildtag[38] = "\0uilt-in";
3921 
3922 	if (buildtag[0] == 0) {
3923 #ifdef MODULE
3924 		sprintf(buildtag, "srcversion: %-24s", THIS_MODULE->srcversion);
3925 #else
3926 		buildtag[0] = 'b';
3927 #endif
3928 	}
3929 
3930 	return buildtag;
3931 }
3932 
3933 module_init(drbd_init)
3934 module_exit(drbd_cleanup)
3935 
3936 EXPORT_SYMBOL(drbd_conn_str);
3937 EXPORT_SYMBOL(drbd_role_str);
3938 EXPORT_SYMBOL(drbd_disk_str);
3939 EXPORT_SYMBOL(drbd_set_st_err_str);
3940