Lines Matching +full:disk +full:- +full:activity
1 // SPDX-License-Identifier: GPL-2.0-only
7 Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
8 Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
9 Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
52 static int drbd_open(struct gendisk *disk, blk_mode_t mode);
59 MODULE_DESCRIPTION("drbd - Distributed Replicated Block Device v" REL_VERSION);
63 __stringify(DRBD_MINOR_COUNT_MIN) "-" __stringify(DRBD_MINOR_COUNT_MAX) ")");
67 /* thanks to these macros, if compiled into the kernel (not-module),
77 /* fault rate % value - applies to all enabled faults */
113 struct kmem_cache *drbd_al_ext_cache; /* activity log extents */
138 atomic_inc(&device->local_cnt); in _get_ldev_if_state()
139 io_allowed = (device->state.disk >= mins); in _get_ldev_if_state()
141 if (atomic_dec_and_test(&device->local_cnt)) in _get_ldev_if_state()
142 wake_up(&device->misc_wait); in _get_ldev_if_state()
150 * tl_release() - mark as BARRIER_ACKED all requests in the corresponding transfer log epoch
156 * epoch of not yet barrier-acked requests, this function will cause a
167 spin_lock_irq(&connection->resource->req_lock); in tl_release()
169 /* find oldest not yet barrier-acked write request, in tl_release()
171 list_for_each_entry(r, &connection->transfer_log, tl_requests) { in tl_release()
172 const unsigned s = r->rq_state; in tl_release()
181 expect_epoch = req->epoch; in tl_release()
184 if (r->epoch != expect_epoch) in tl_release()
214 * to catch requests being barrier-acked "unexpectedly". in tl_release()
216 list_for_each_entry(req, &connection->transfer_log, tl_requests) in tl_release()
217 if (req->epoch == expect_epoch) { in tl_release()
221 req = list_prepare_entry(tmp, &connection->transfer_log, tl_requests); in tl_release()
222 list_for_each_entry_safe_from(req, r, &connection->transfer_log, tl_requests) { in tl_release()
224 if (req->epoch != expect_epoch) in tl_release()
226 peer_device = conn_peer_device(connection, req->device->vnr); in tl_release()
229 spin_unlock_irq(&connection->resource->req_lock); in tl_release()
234 spin_unlock_irq(&connection->resource->req_lock); in tl_release()
240 * _tl_restart() - Walks the transfer log, and applies an action to all requests
247 /* must hold resource->req_lock */
253 list_for_each_entry_safe(req, r, &connection->transfer_log, tl_requests) { in _tl_restart()
254 peer_device = conn_peer_device(connection, req->device->vnr); in _tl_restart()
261 spin_lock_irq(&connection->resource->req_lock); in tl_restart()
263 spin_unlock_irq(&connection->resource->req_lock); in tl_restart()
267 * tl_clear() - Clears all requests and &struct drbd_tl_epoch objects out of the TL
280 * tl_abort_disk_io() - Abort disk I/O for all requests for a certain device in the TL
285 struct drbd_connection *connection = first_peer_device(device)->connection; in tl_abort_disk_io()
288 spin_lock_irq(&connection->resource->req_lock); in tl_abort_disk_io()
289 list_for_each_entry_safe(req, r, &connection->transfer_log, tl_requests) { in tl_abort_disk_io()
290 if (!(req->rq_state & RQ_LOCAL_PENDING)) in tl_abort_disk_io()
292 if (req->device != device) in tl_abort_disk_io()
296 spin_unlock_irq(&connection->resource->req_lock); in tl_abort_disk_io()
302 struct drbd_resource *resource = thi->resource; in drbd_thread_setup()
306 snprintf(current->comm, sizeof(current->comm), "drbd_%c_%s", in drbd_thread_setup()
307 thi->name[0], in drbd_thread_setup()
308 resource->name); in drbd_thread_setup()
313 retval = thi->function(thi); in drbd_thread_setup()
315 spin_lock_irqsave(&thi->t_lock, flags); in drbd_thread_setup()
319 * if now a re-connect request comes in, conn state goes C_UNCONNECTED, in drbd_thread_setup()
327 if (thi->t_state == RESTARTING) { in drbd_thread_setup()
328 drbd_info(resource, "Restarting %s thread\n", thi->name); in drbd_thread_setup()
329 thi->t_state = RUNNING; in drbd_thread_setup()
330 spin_unlock_irqrestore(&thi->t_lock, flags); in drbd_thread_setup()
334 thi->task = NULL; in drbd_thread_setup()
335 thi->t_state = NONE; in drbd_thread_setup()
337 complete_all(&thi->stop); in drbd_thread_setup()
338 spin_unlock_irqrestore(&thi->t_lock, flags); in drbd_thread_setup()
340 drbd_info(resource, "Terminating %s\n", current->comm); in drbd_thread_setup()
344 if (thi->connection) in drbd_thread_setup()
345 kref_put(&thi->connection->kref, drbd_destroy_connection); in drbd_thread_setup()
346 kref_put(&resource->kref, drbd_destroy_resource); in drbd_thread_setup()
354 spin_lock_init(&thi->t_lock); in drbd_thread_init()
355 thi->task = NULL; in drbd_thread_init()
356 thi->t_state = NONE; in drbd_thread_init()
357 thi->function = func; in drbd_thread_init()
358 thi->resource = resource; in drbd_thread_init()
359 thi->connection = NULL; in drbd_thread_init()
360 thi->name = name; in drbd_thread_init()
365 struct drbd_resource *resource = thi->resource; in drbd_thread_start()
371 spin_lock_irqsave(&thi->t_lock, flags); in drbd_thread_start()
373 switch (thi->t_state) { in drbd_thread_start()
376 thi->name, current->comm, current->pid); in drbd_thread_start()
378 /* Get ref on module for thread - this is released when thread exits */ in drbd_thread_start()
381 spin_unlock_irqrestore(&thi->t_lock, flags); in drbd_thread_start()
385 kref_get(&resource->kref); in drbd_thread_start()
386 if (thi->connection) in drbd_thread_start()
387 kref_get(&thi->connection->kref); in drbd_thread_start()
389 init_completion(&thi->stop); in drbd_thread_start()
390 thi->reset_cpu_mask = 1; in drbd_thread_start()
391 thi->t_state = RUNNING; in drbd_thread_start()
392 spin_unlock_irqrestore(&thi->t_lock, flags); in drbd_thread_start()
393 flush_signals(current); /* otherw. may get -ERESTARTNOINTR */ in drbd_thread_start()
396 "drbd_%c_%s", thi->name[0], thi->resource->name); in drbd_thread_start()
401 if (thi->connection) in drbd_thread_start()
402 kref_put(&thi->connection->kref, drbd_destroy_connection); in drbd_thread_start()
403 kref_put(&resource->kref, drbd_destroy_resource); in drbd_thread_start()
407 spin_lock_irqsave(&thi->t_lock, flags); in drbd_thread_start()
408 thi->task = nt; in drbd_thread_start()
409 thi->t_state = RUNNING; in drbd_thread_start()
410 spin_unlock_irqrestore(&thi->t_lock, flags); in drbd_thread_start()
414 thi->t_state = RESTARTING; in drbd_thread_start()
416 thi->name, current->comm, current->pid); in drbd_thread_start()
421 spin_unlock_irqrestore(&thi->t_lock, flags); in drbd_thread_start()
436 spin_lock_irqsave(&thi->t_lock, flags); in _drbd_thread_stop()
438 if (thi->t_state == NONE) { in _drbd_thread_stop()
439 spin_unlock_irqrestore(&thi->t_lock, flags); in _drbd_thread_stop()
445 if (thi->t_state != ns) { in _drbd_thread_stop()
446 if (thi->task == NULL) { in _drbd_thread_stop()
447 spin_unlock_irqrestore(&thi->t_lock, flags); in _drbd_thread_stop()
451 thi->t_state = ns; in _drbd_thread_stop()
453 init_completion(&thi->stop); in _drbd_thread_stop()
454 if (thi->task != current) in _drbd_thread_stop()
455 send_sig(DRBD_SIGKILL, thi->task, 1); in _drbd_thread_stop()
458 spin_unlock_irqrestore(&thi->t_lock, flags); in _drbd_thread_stop()
461 wait_for_completion(&thi->stop); in _drbd_thread_stop()
466 * drbd_calc_cpu_mask() - Generate CPU masks, spread over all CPUs
483 for_each_cpu(cpu, resource->cpu_mask) in drbd_calc_cpu_mask()
503 * drbd_thread_current_set_cpu() - modifies the cpu mask of the _current_ thread
511 struct drbd_resource *resource = thi->resource; in drbd_thread_current_set_cpu()
514 if (!thi->reset_cpu_mask) in drbd_thread_current_set_cpu()
516 thi->reset_cpu_mask = 0; in drbd_thread_current_set_cpu()
517 set_cpus_allowed_ptr(p, resource->cpu_mask); in drbd_thread_current_set_cpu()
524 * drbd_header_size - size of a packet header
527 * word aligned on 64-bit architectures. (The bitmap send and receive code
532 if (connection->agreed_pro_version >= 100) { in drbd_header_size()
545 h->magic = cpu_to_be32(DRBD_MAGIC); in prepare_header80()
546 h->command = cpu_to_be16(cmd); in prepare_header80()
547 h->length = cpu_to_be16(size); in prepare_header80()
553 h->magic = cpu_to_be16(DRBD_MAGIC_BIG); in prepare_header95()
554 h->command = cpu_to_be16(cmd); in prepare_header95()
555 h->length = cpu_to_be32(size); in prepare_header95()
562 h->magic = cpu_to_be32(DRBD_MAGIC_100); in prepare_header100()
563 h->volume = cpu_to_be16(vnr); in prepare_header100()
564 h->command = cpu_to_be16(cmd); in prepare_header100()
565 h->length = cpu_to_be32(size); in prepare_header100()
566 h->pad = 0; in prepare_header100()
573 if (connection->agreed_pro_version >= 100) in prepare_header()
575 else if (connection->agreed_pro_version >= 95 && in prepare_header()
585 if (!sock->socket) in __conn_prepare_command()
587 return sock->sbuf + drbd_header_size(connection); in __conn_prepare_command()
594 mutex_lock(&sock->mutex); in conn_prepare_command()
597 mutex_unlock(&sock->mutex); in conn_prepare_command()
604 return conn_prepare_command(peer_device->connection, sock); in drbd_prepare_command()
624 header_size += prepare_header(connection, vnr, sock->sbuf, cmd, in __send_command()
626 err = drbd_send_all(connection, sock->socket, sock->sbuf, header_size, in __send_command()
629 err = drbd_send_all(connection, sock->socket, data, size, 0); in __send_command()
633 tcp_sock_set_nodelay(sock->socket->sk); in __send_command()
652 mutex_unlock(&sock->mutex); in conn_send_command()
662 err = __send_command(peer_device->connection, peer_device->device->vnr, in drbd_send_command()
664 mutex_unlock(&sock->mutex); in drbd_send_command()
672 sock = &connection->meta; in drbd_send_ping()
674 return -EIO; in drbd_send_ping()
682 sock = &connection->meta; in drbd_send_ping_ack()
684 return -EIO; in drbd_send_ping_ack()
693 const int apv = peer_device->connection->agreed_pro_version; in drbd_send_sync_param()
698 sock = &peer_device->connection->data; in drbd_send_sync_param()
701 return -EIO; in drbd_send_sync_param()
704 nc = rcu_dereference(peer_device->connection->net_conf); in drbd_send_sync_param()
708 + strlen(nc->verify_alg) + 1 in drbd_send_sync_param()
715 BUILD_BUG_ON(sizeof(p->algs) != 2 * SHARED_SECRET_MAX); in drbd_send_sync_param()
716 memset(&p->algs, 0, sizeof(p->algs)); in drbd_send_sync_param()
718 if (get_ldev(peer_device->device)) { in drbd_send_sync_param()
719 dc = rcu_dereference(peer_device->device->ldev->disk_conf); in drbd_send_sync_param()
720 p->resync_rate = cpu_to_be32(dc->resync_rate); in drbd_send_sync_param()
721 p->c_plan_ahead = cpu_to_be32(dc->c_plan_ahead); in drbd_send_sync_param()
722 p->c_delay_target = cpu_to_be32(dc->c_delay_target); in drbd_send_sync_param()
723 p->c_fill_target = cpu_to_be32(dc->c_fill_target); in drbd_send_sync_param()
724 p->c_max_rate = cpu_to_be32(dc->c_max_rate); in drbd_send_sync_param()
725 put_ldev(peer_device->device); in drbd_send_sync_param()
727 p->resync_rate = cpu_to_be32(DRBD_RESYNC_RATE_DEF); in drbd_send_sync_param()
728 p->c_plan_ahead = cpu_to_be32(DRBD_C_PLAN_AHEAD_DEF); in drbd_send_sync_param()
729 p->c_delay_target = cpu_to_be32(DRBD_C_DELAY_TARGET_DEF); in drbd_send_sync_param()
730 p->c_fill_target = cpu_to_be32(DRBD_C_FILL_TARGET_DEF); in drbd_send_sync_param()
731 p->c_max_rate = cpu_to_be32(DRBD_C_MAX_RATE_DEF); in drbd_send_sync_param()
735 strcpy(p->verify_alg, nc->verify_alg); in drbd_send_sync_param()
737 strcpy(p->csums_alg, nc->csums_alg); in drbd_send_sync_param()
750 sock = &connection->data; in __drbd_send_protocol()
753 return -EIO; in __drbd_send_protocol()
756 nc = rcu_dereference(connection->net_conf); in __drbd_send_protocol()
758 if (nc->tentative && connection->agreed_pro_version < 92) { in __drbd_send_protocol()
760 drbd_err(connection, "--dry-run is not supported by peer"); in __drbd_send_protocol()
761 return -EOPNOTSUPP; in __drbd_send_protocol()
765 if (connection->agreed_pro_version >= 87) in __drbd_send_protocol()
766 size += strlen(nc->integrity_alg) + 1; in __drbd_send_protocol()
768 p->protocol = cpu_to_be32(nc->wire_protocol); in __drbd_send_protocol()
769 p->after_sb_0p = cpu_to_be32(nc->after_sb_0p); in __drbd_send_protocol()
770 p->after_sb_1p = cpu_to_be32(nc->after_sb_1p); in __drbd_send_protocol()
771 p->after_sb_2p = cpu_to_be32(nc->after_sb_2p); in __drbd_send_protocol()
772 p->two_primaries = cpu_to_be32(nc->two_primaries); in __drbd_send_protocol()
774 if (nc->discard_my_data) in __drbd_send_protocol()
776 if (nc->tentative) in __drbd_send_protocol()
778 p->conn_flags = cpu_to_be32(cf); in __drbd_send_protocol()
780 if (connection->agreed_pro_version >= 87) in __drbd_send_protocol()
781 strcpy(p->integrity_alg, nc->integrity_alg); in __drbd_send_protocol()
791 mutex_lock(&connection->data.mutex); in drbd_send_protocol()
793 mutex_unlock(&connection->data.mutex); in drbd_send_protocol()
800 struct drbd_device *device = peer_device->device; in _drbd_send_uuids()
808 sock = &peer_device->connection->data; in _drbd_send_uuids()
812 return -EIO; in _drbd_send_uuids()
814 spin_lock_irq(&device->ldev->md.uuid_lock); in _drbd_send_uuids()
816 p->uuid[i] = cpu_to_be64(device->ldev->md.uuid[i]); in _drbd_send_uuids()
817 spin_unlock_irq(&device->ldev->md.uuid_lock); in _drbd_send_uuids()
819 device->comm_bm_set = drbd_bm_total_weight(device); in _drbd_send_uuids()
820 p->uuid[UI_SIZE] = cpu_to_be64(device->comm_bm_set); in _drbd_send_uuids()
822 uuid_flags |= rcu_dereference(peer_device->connection->net_conf)->discard_my_data ? 1 : 0; in _drbd_send_uuids()
824 uuid_flags |= test_bit(CRASHED_PRIMARY, &device->flags) ? 2 : 0; in _drbd_send_uuids()
825 uuid_flags |= device->new_state_tmp.disk == D_INCONSISTENT ? 4 : 0; in _drbd_send_uuids()
826 p->uuid[UI_FLAGS] = cpu_to_be64(uuid_flags); in _drbd_send_uuids()
845 u64 *uuid = device->ldev->md.uuid; in drbd_print_uuids()
856 (unsigned long long)device->ed_uuid); in drbd_print_uuids()
862 struct drbd_device *device = peer_device->device; in drbd_gen_and_send_sync_uuid()
867 D_ASSERT(device, device->state.disk == D_UP_TO_DATE); in drbd_gen_and_send_sync_uuid()
869 uuid = device->ldev->md.uuid[UI_BITMAP]; in drbd_gen_and_send_sync_uuid()
878 sock = &peer_device->connection->data; in drbd_gen_and_send_sync_uuid()
881 p->uuid = cpu_to_be64(uuid); in drbd_gen_and_send_sync_uuid()
888 struct drbd_device *device = peer_device->device; in drbd_send_sizes()
896 sock = &peer_device->connection->data; in drbd_send_sizes()
899 return -EIO; in drbd_send_sizes()
902 if (peer_device->connection->agreed_features & DRBD_FF_WSAME) in drbd_send_sizes()
903 packet_size += sizeof(p->qlim[0]); in drbd_send_sizes()
907 struct block_device *bdev = device->ldev->backing_bdev; in drbd_send_sizes()
910 d_size = drbd_get_max_capacity(device->ldev); in drbd_send_sizes()
912 u_size = rcu_dereference(device->ldev->disk_conf)->disk_size; in drbd_send_sizes()
917 p->qlim->physical_block_size = in drbd_send_sizes()
919 p->qlim->logical_block_size = in drbd_send_sizes()
921 p->qlim->alignment_offset = in drbd_send_sizes()
923 p->qlim->io_min = cpu_to_be32(bdev_io_min(bdev)); in drbd_send_sizes()
924 p->qlim->io_opt = cpu_to_be32(bdev_io_opt(bdev)); in drbd_send_sizes()
925 p->qlim->discard_enabled = !!bdev_max_discard_sectors(bdev); in drbd_send_sizes()
928 struct request_queue *q = device->rq_queue; in drbd_send_sizes()
930 p->qlim->physical_block_size = in drbd_send_sizes()
932 p->qlim->logical_block_size = in drbd_send_sizes()
934 p->qlim->alignment_offset = 0; in drbd_send_sizes()
935 p->qlim->io_min = cpu_to_be32(queue_io_min(q)); in drbd_send_sizes()
936 p->qlim->io_opt = cpu_to_be32(queue_io_opt(q)); in drbd_send_sizes()
937 p->qlim->discard_enabled = 0; in drbd_send_sizes()
945 if (peer_device->connection->agreed_pro_version <= 94) in drbd_send_sizes()
947 else if (peer_device->connection->agreed_pro_version < 100) in drbd_send_sizes()
950 p->d_size = cpu_to_be64(d_size); in drbd_send_sizes()
951 p->u_size = cpu_to_be64(u_size); in drbd_send_sizes()
953 p->c_size = 0; in drbd_send_sizes()
955 p->c_size = cpu_to_be64(get_capacity(device->vdisk)); in drbd_send_sizes()
956 p->max_bio_size = cpu_to_be32(max_bio_size); in drbd_send_sizes()
957 p->queue_order_type = cpu_to_be16(q_order_type); in drbd_send_sizes()
958 p->dds_flags = cpu_to_be16(flags); in drbd_send_sizes()
964 * drbd_send_current_state() - Sends the drbd state to the peer
972 sock = &peer_device->connection->data; in drbd_send_current_state()
975 return -EIO; in drbd_send_current_state()
976 p->state = cpu_to_be32(peer_device->device->state.i); /* Within the send mutex */ in drbd_send_current_state()
981 * drbd_send_state() - After a state change, sends the new state to the peer
995 sock = &peer_device->connection->data; in drbd_send_state()
998 return -EIO; in drbd_send_state()
999 p->state = cpu_to_be32(state.i); /* Within the send mutex */ in drbd_send_state()
1008 sock = &peer_device->connection->data; in drbd_send_state_req()
1011 return -EIO; in drbd_send_state_req()
1012 p->mask = cpu_to_be32(mask.i); in drbd_send_state_req()
1013 p->val = cpu_to_be32(val.i); in drbd_send_state_req()
1023 cmd = connection->agreed_pro_version < 100 ? P_STATE_CHG_REQ : P_CONN_ST_CHG_REQ; in conn_send_state_req()
1024 sock = &connection->data; in conn_send_state_req()
1027 return -EIO; in conn_send_state_req()
1028 p->mask = cpu_to_be32(mask.i); in conn_send_state_req()
1029 p->val = cpu_to_be32(val.i); in conn_send_state_req()
1038 sock = &peer_device->connection->meta; in drbd_send_sr_reply()
1041 p->retcode = cpu_to_be32(retcode); in drbd_send_sr_reply()
1050 …enum drbd_packet cmd = connection->agreed_pro_version < 100 ? P_STATE_CHG_REPLY : P_CONN_ST_CHG_RE… in conn_send_sr_reply()
1052 sock = &connection->meta; in conn_send_sr_reply()
1055 p->retcode = cpu_to_be32(retcode); in conn_send_sr_reply()
1063 p->encoding = (p->encoding & ~0xf) | code; in dcbp_set_code()
1068 p->encoding = (p->encoding & ~0x80) | (set ? 0x80 : 0); in dcbp_set_start()
1074 p->encoding = (p->encoding & (~0x7 << 4)) | (n << 4); in dcbp_set_pad_bits()
1092 use_rle = rcu_dereference(first_peer_device(device)->connection->net_conf)->use_rle; in fill_bitmap_rle_bits()
1094 if (!use_rle || first_peer_device(device)->connection->agreed_pro_version < 90) in fill_bitmap_rle_bits()
1097 if (c->bit_offset >= c->bm_bits) in fill_bitmap_rle_bits()
1101 bitstream_init(&bs, p->code, size, 0); in fill_bitmap_rle_bits()
1102 memset(p->code, 0, size); in fill_bitmap_rle_bits()
1106 /* p->encoding & 0x80 stores whether the first run length is set. in fill_bitmap_rle_bits()
1114 tmp = (toggle == 0) ? _drbd_bm_find_next_zero(device, c->bit_offset) in fill_bitmap_rle_bits()
1115 : _drbd_bm_find_next(device, c->bit_offset); in fill_bitmap_rle_bits()
1116 if (tmp == -1UL) in fill_bitmap_rle_bits()
1117 tmp = c->bm_bits; in fill_bitmap_rle_bits()
1118 rl = tmp - c->bit_offset; in fill_bitmap_rle_bits()
1136 "t:%u bo:%lu\n", toggle, c->bit_offset); in fill_bitmap_rle_bits()
1137 return -1; in fill_bitmap_rle_bits()
1141 if (bits == -ENOBUFS) /* buffer full */ in fill_bitmap_rle_bits()
1150 c->bit_offset = tmp; in fill_bitmap_rle_bits()
1151 } while (c->bit_offset < c->bm_bits); in fill_bitmap_rle_bits()
1153 len = bs.cur.b - p->code + !!bs.cur.bit; in fill_bitmap_rle_bits()
1158 c->bit_offset -= plain_bits; in fill_bitmap_rle_bits()
1160 c->bit_offset = c->word_offset * BITS_PER_LONG; in fill_bitmap_rle_bits()
1165 * update c->word_offset. */ in fill_bitmap_rle_bits()
1169 dcbp_set_pad_bits(p, (8 - bs.cur.bit) & 0x7); in fill_bitmap_rle_bits()
1183 struct drbd_device *device = peer_device->device; in send_bitmap_rle_or_plain()
1184 struct drbd_socket *sock = &peer_device->connection->data; in send_bitmap_rle_or_plain()
1185 unsigned int header_size = drbd_header_size(peer_device->connection); in send_bitmap_rle_or_plain()
1186 struct p_compressed_bm *p = sock->sbuf + header_size; in send_bitmap_rle_or_plain()
1190 DRBD_SOCKET_BUFFER_SIZE - header_size - sizeof(*p), c); in send_bitmap_rle_or_plain()
1192 return -EIO; in send_bitmap_rle_or_plain()
1196 err = __send_command(peer_device->connection, device->vnr, sock, in send_bitmap_rle_or_plain()
1199 c->packets[0]++; in send_bitmap_rle_or_plain()
1200 c->bytes[0] += header_size + sizeof(*p) + len; in send_bitmap_rle_or_plain()
1202 if (c->bit_offset >= c->bm_bits) in send_bitmap_rle_or_plain()
1209 unsigned long *p = sock->sbuf + header_size; in send_bitmap_rle_or_plain()
1211 data_size = DRBD_SOCKET_BUFFER_SIZE - header_size; in send_bitmap_rle_or_plain()
1213 c->bm_words - c->word_offset); in send_bitmap_rle_or_plain()
1216 drbd_bm_get_lel(device, c->word_offset, num_words, p); in send_bitmap_rle_or_plain()
1217 err = __send_command(peer_device->connection, device->vnr, sock, P_BITMAP, in send_bitmap_rle_or_plain()
1219 c->word_offset += num_words; in send_bitmap_rle_or_plain()
1220 c->bit_offset = c->word_offset * BITS_PER_LONG; in send_bitmap_rle_or_plain()
1222 c->packets[1]++; in send_bitmap_rle_or_plain()
1223 c->bytes[1] += header_size + len; in send_bitmap_rle_or_plain()
1225 if (c->bit_offset > c->bm_bits) in send_bitmap_rle_or_plain()
1226 c->bit_offset = c->bm_bits; in send_bitmap_rle_or_plain()
1235 return -EIO; in send_bitmap_rle_or_plain()
1245 if (!expect(device, device->bitmap)) in _drbd_send_bitmap()
1249 if (drbd_md_test_flag(device->ldev, MDF_FULL_SYNC)) { in _drbd_send_bitmap()
1254 * but otherwise process as per normal - need to tell other in _drbd_send_bitmap()
1256 drbd_err(device, "Failed to write bitmap to disk!\n"); in _drbd_send_bitmap()
1279 struct drbd_socket *sock = &peer_device->connection->data; in drbd_send_bitmap()
1280 int err = -1; in drbd_send_bitmap()
1282 mutex_lock(&sock->mutex); in drbd_send_bitmap()
1283 if (sock->socket) in drbd_send_bitmap()
1285 mutex_unlock(&sock->mutex); in drbd_send_bitmap()
1294 if (connection->cstate < C_WF_REPORT_PARAMS) in drbd_send_b_ack()
1297 sock = &connection->meta; in drbd_send_b_ack()
1301 p->barrier = barrier_nr; in drbd_send_b_ack()
1302 p->set_size = cpu_to_be32(set_size); in drbd_send_b_ack()
1307 * _drbd_send_ack() - Sends an ack packet
1320 if (peer_device->device->state.conn < C_CONNECTED) in _drbd_send_ack()
1321 return -EIO; in _drbd_send_ack()
1323 sock = &peer_device->connection->meta; in _drbd_send_ack()
1326 return -EIO; in _drbd_send_ack()
1327 p->sector = sector; in _drbd_send_ack()
1328 p->block_id = block_id; in _drbd_send_ack()
1329 p->blksize = blksize; in _drbd_send_ack()
1330 p->seq_num = cpu_to_be32(atomic_inc_return(&peer_device->device->packet_seq)); in _drbd_send_ack()
1334 /* dp->sector and dp->block_id already/still in network byte order,
1335 * data_size is payload size according to dp->head,
1340 if (peer_device->connection->peer_integrity_tfm) in drbd_send_ack_dp()
1341 data_size -= crypto_shash_digestsize(peer_device->connection->peer_integrity_tfm); in drbd_send_ack_dp()
1342 _drbd_send_ack(peer_device, cmd, dp->sector, cpu_to_be32(data_size), in drbd_send_ack_dp()
1343 dp->block_id); in drbd_send_ack_dp()
1349 _drbd_send_ack(peer_device, cmd, rp->sector, rp->blksize, rp->block_id); in drbd_send_ack_rp()
1353 * drbd_send_ack() - Sends an ack packet
1362 cpu_to_be64(peer_req->i.sector), in drbd_send_ack()
1363 cpu_to_be32(peer_req->i.size), in drbd_send_ack()
1364 peer_req->block_id); in drbd_send_ack()
1384 sock = &peer_device->connection->data; in drbd_send_rs_deallocated()
1387 return -EIO; in drbd_send_rs_deallocated()
1388 p->sector = cpu_to_be64(peer_req->i.sector); in drbd_send_rs_deallocated()
1389 p->blksize = cpu_to_be32(peer_req->i.size); in drbd_send_rs_deallocated()
1390 p->pad = 0; in drbd_send_rs_deallocated()
1400 sock = &peer_device->connection->data; in drbd_send_drequest()
1403 return -EIO; in drbd_send_drequest()
1404 p->sector = cpu_to_be64(sector); in drbd_send_drequest()
1405 p->block_id = block_id; in drbd_send_drequest()
1406 p->blksize = cpu_to_be32(size); in drbd_send_drequest()
1418 sock = &peer_device->connection->data; in drbd_send_drequest_csum()
1421 return -EIO; in drbd_send_drequest_csum()
1422 p->sector = cpu_to_be64(sector); in drbd_send_drequest_csum()
1423 p->block_id = ID_SYNCER /* unused */; in drbd_send_drequest_csum()
1424 p->blksize = cpu_to_be32(size); in drbd_send_drequest_csum()
1433 sock = &peer_device->connection->data; in drbd_send_ov_request()
1436 return -EIO; in drbd_send_ov_request()
1437 p->sector = cpu_to_be64(sector); in drbd_send_ov_request()
1438 p->block_id = ID_SYNCER /* unused */; in drbd_send_ov_request()
1439 p->blksize = cpu_to_be32(size); in drbd_send_ov_request()
1450 /* long elapsed = (long)(jiffies - device->last_received); */ in we_should_drop_the_connection()
1452 drop_it = connection->meta.socket == sock in we_should_drop_the_connection()
1453 || !connection->ack_receiver.task in we_should_drop_the_connection()
1454 || get_t_state(&connection->ack_receiver) != RUNNING in we_should_drop_the_connection()
1455 || connection->cstate < C_WF_REPORT_PARAMS; in we_should_drop_the_connection()
1460 drop_it = !--connection->ko_count; in we_should_drop_the_connection()
1463 current->comm, current->pid, connection->ko_count); in we_should_drop_the_connection()
1467 return drop_it; /* && (device->state == R_PRIMARY) */; in we_should_drop_the_connection()
1472 struct sock *sk = connection->data.socket->sk; in drbd_update_congested()
1473 if (sk->sk_wmem_queued > sk->sk_sndbuf * 4 / 5) in drbd_update_congested()
1474 set_bit(NET_CONGESTED, &connection->flags); in drbd_update_congested()
1505 socket = peer_device->connection->data.socket; in _drbd_no_send_page()
1507 err = drbd_send_all(peer_device->connection, socket, addr, size, msg_flags); in _drbd_no_send_page()
1510 peer_device->device->send_cnt += size >> 9; in _drbd_no_send_page()
1517 struct socket *socket = peer_device->connection->data.socket; in _drbd_send_page()
1521 int err = -EIO; in _drbd_send_page()
1523 /* e.g. XFS meta- & log-data is in slab pages, which have a in _drbd_send_page()
1532 drbd_update_congested(peer_device->connection); in _drbd_send_page()
1541 if (sent == -EAGAIN) { in _drbd_send_page()
1542 if (we_should_drop_the_connection(peer_device->connection, socket)) in _drbd_send_page()
1546 drbd_warn(peer_device->device, "%s: size=%d len=%d sent=%d\n", in _drbd_send_page()
1552 len -= sent; in _drbd_send_page()
1554 } while (len > 0 /* THINK && device->cstate >= C_CONNECTED*/); in _drbd_send_page()
1555 clear_bit(NET_CONGESTED, &peer_device->connection->flags); in _drbd_send_page()
1559 peer_device->device->send_cnt += size >> 9; in _drbd_send_page()
1604 bool use_sendpage = !(peer_req->flags & EE_RELEASE_TO_MEMPOOL); in _drbd_send_zc_ee()
1605 struct page *page = peer_req->pages; in _drbd_send_zc_ee()
1606 unsigned len = peer_req->i.size; in _drbd_send_zc_ee()
1622 len -= l; in _drbd_send_zc_ee()
1630 if (connection->agreed_pro_version >= 95) in bio_flags_to_wire()
1631 return (bio->bi_opf & REQ_SYNC ? DP_RW_SYNC : 0) | in bio_flags_to_wire()
1632 (bio->bi_opf & REQ_FUA ? DP_FUA : 0) | in bio_flags_to_wire()
1633 (bio->bi_opf & REQ_PREFLUSH ? DP_FLUSH : 0) | in bio_flags_to_wire()
1636 ((connection->agreed_features & DRBD_FF_WZEROES) ? in bio_flags_to_wire()
1637 (DP_ZEROES |(!(bio->bi_opf & REQ_NOUNMAP) ? DP_DISCARD : 0)) in bio_flags_to_wire()
1641 return bio->bi_opf & REQ_SYNC ? DP_RW_SYNC : 0; in bio_flags_to_wire()
1645 * R_PRIMARY -> Peer (P_DATA, P_TRIM)
1649 struct drbd_device *device = peer_device->device; in drbd_send_dblock()
1657 sock = &peer_device->connection->data; in drbd_send_dblock()
1659 digest_size = peer_device->connection->integrity_tfm ? in drbd_send_dblock()
1660 crypto_shash_digestsize(peer_device->connection->integrity_tfm) : 0; in drbd_send_dblock()
1663 return -EIO; in drbd_send_dblock()
1664 p->sector = cpu_to_be64(req->i.sector); in drbd_send_dblock()
1665 p->block_id = (unsigned long)req; in drbd_send_dblock()
1666 p->seq_num = cpu_to_be32(atomic_inc_return(&device->packet_seq)); in drbd_send_dblock()
1667 dp_flags = bio_flags_to_wire(peer_device->connection, req->master_bio); in drbd_send_dblock()
1668 if (device->state.conn >= C_SYNC_SOURCE && in drbd_send_dblock()
1669 device->state.conn <= C_PAUSED_SYNC_T) in drbd_send_dblock()
1671 if (peer_device->connection->agreed_pro_version >= 100) { in drbd_send_dblock()
1672 if (req->rq_state & RQ_EXP_RECEIVE_ACK) in drbd_send_dblock()
1676 if (req->rq_state & RQ_EXP_WRITE_ACK in drbd_send_dblock()
1680 p->dp_flags = cpu_to_be32(dp_flags); in drbd_send_dblock()
1685 t->size = cpu_to_be32(req->i.size); in drbd_send_dblock()
1686 err = __send_command(peer_device->connection, device->vnr, sock, cmd, sizeof(*t), NULL, 0); in drbd_send_dblock()
1694 drbd_csum_bio(peer_device->connection->integrity_tfm, req->master_bio, digest_out); in drbd_send_dblock()
1695 err = __send_command(peer_device->connection, device->vnr, sock, P_DATA, in drbd_send_dblock()
1696 sizeof(*p) + digest_size, NULL, req->i.size); in drbd_send_dblock()
1703 * For data-integrity enabled, we copy it as well, so we can be in drbd_send_dblock()
1709 if (!(req->rq_state & (RQ_EXP_RECEIVE_ACK | RQ_EXP_WRITE_ACK)) || digest_size) in drbd_send_dblock()
1710 err = _drbd_send_bio(peer_device, req->master_bio); in drbd_send_dblock()
1712 err = _drbd_send_zc_bio(peer_device, req->master_bio); in drbd_send_dblock()
1719 drbd_csum_bio(peer_device->connection->integrity_tfm, req->master_bio, digest); in drbd_send_dblock()
1723 (unsigned long long)req->i.sector, req->i.size); in drbd_send_dblock()
1730 mutex_unlock(&sock->mutex); /* locked by drbd_prepare_command() */ in drbd_send_dblock()
1736 * Peer -> (diskless) R_PRIMARY (P_DATA_REPLY)
1737 * C_SYNC_SOURCE -> C_SYNC_TARGET (P_RS_DATA_REPLY)
1742 struct drbd_device *device = peer_device->device; in drbd_send_block()
1748 sock = &peer_device->connection->data; in drbd_send_block()
1751 digest_size = peer_device->connection->integrity_tfm ? in drbd_send_block()
1752 crypto_shash_digestsize(peer_device->connection->integrity_tfm) : 0; in drbd_send_block()
1755 return -EIO; in drbd_send_block()
1756 p->sector = cpu_to_be64(peer_req->i.sector); in drbd_send_block()
1757 p->block_id = peer_req->block_id; in drbd_send_block()
1758 p->seq_num = 0; /* unused */ in drbd_send_block()
1759 p->dp_flags = 0; in drbd_send_block()
1761 drbd_csum_ee(peer_device->connection->integrity_tfm, peer_req, p + 1); in drbd_send_block()
1762 …err = __send_command(peer_device->connection, device->vnr, sock, cmd, sizeof(*p) + digest_size, NU… in drbd_send_block()
1765 mutex_unlock(&sock->mutex); /* locked by drbd_prepare_command() */ in drbd_send_block()
1775 sock = &peer_device->connection->data; in drbd_send_out_of_sync()
1778 return -EIO; in drbd_send_out_of_sync()
1779 p->sector = cpu_to_be64(req->i.sector); in drbd_send_out_of_sync()
1780 p->blksize = cpu_to_be32(req->i.size); in drbd_send_out_of_sync()
1791 -----------------+-------------------------+------------------------------
1808 return -EBADR; in drbd_send()
1814 if (sock == connection->data.socket) { in drbd_send()
1816 connection->ko_count = rcu_dereference(connection->net_conf)->ko_count; in drbd_send()
1822 if (rv == -EAGAIN) { in drbd_send()
1828 if (rv == -EINTR) { in drbd_send()
1837 if (sock == connection->data.socket) in drbd_send()
1838 clear_bit(NET_CONGESTED, &connection->flags); in drbd_send()
1841 if (rv != -EAGAIN) { in drbd_send()
1843 sock == connection->meta.socket ? "msock" : "sock", in drbd_send()
1854 * drbd_send_all - Send an entire buffer
1867 return -EIO; in drbd_send_all()
1871 static int drbd_open(struct gendisk *disk, blk_mode_t mode) in drbd_open() argument
1873 struct drbd_device *device = disk->private_data; in drbd_open()
1878 spin_lock_irqsave(&device->resource->req_lock, flags); in drbd_open()
1879 /* to have a stable device->state.role in drbd_open()
1882 if (device->state.role != R_PRIMARY) { in drbd_open()
1884 rv = -EROFS; in drbd_open()
1886 rv = -EMEDIUMTYPE; in drbd_open()
1890 device->open_cnt++; in drbd_open()
1891 spin_unlock_irqrestore(&device->resource->req_lock, flags); in drbd_open()
1899 struct drbd_device *device = gd->private_data; in drbd_release()
1902 device->open_cnt--; in drbd_release()
1906 /* need to hold resource->req_lock */
1909 if (device->state.pdsk >= D_INCONSISTENT && device->state.conn >= C_CONNECTED) { in drbd_queue_unplug()
1910 D_ASSERT(device, device->state.role == R_PRIMARY); in drbd_queue_unplug()
1911 if (test_and_clear_bit(UNPLUG_REMOTE, &device->flags)) { in drbd_queue_unplug()
1913 &first_peer_device(device)->connection->sender_work, in drbd_queue_unplug()
1914 &device->unplug_work); in drbd_queue_unplug()
1923 device->state = (union drbd_dev_state) { in drbd_set_defaults()
1927 .disk = D_DISKLESS, in drbd_set_defaults()
1939 atomic_set(&device->ap_bio_cnt, 0); in drbd_init_set_defaults()
1940 atomic_set(&device->ap_actlog_cnt, 0); in drbd_init_set_defaults()
1941 atomic_set(&device->ap_pending_cnt, 0); in drbd_init_set_defaults()
1942 atomic_set(&device->rs_pending_cnt, 0); in drbd_init_set_defaults()
1943 atomic_set(&device->unacked_cnt, 0); in drbd_init_set_defaults()
1944 atomic_set(&device->local_cnt, 0); in drbd_init_set_defaults()
1945 atomic_set(&device->pp_in_use_by_net, 0); in drbd_init_set_defaults()
1946 atomic_set(&device->rs_sect_in, 0); in drbd_init_set_defaults()
1947 atomic_set(&device->rs_sect_ev, 0); in drbd_init_set_defaults()
1948 atomic_set(&device->ap_in_flight, 0); in drbd_init_set_defaults()
1949 atomic_set(&device->md_io.in_use, 0); in drbd_init_set_defaults()
1951 mutex_init(&device->own_state_mutex); in drbd_init_set_defaults()
1952 device->state_mutex = &device->own_state_mutex; in drbd_init_set_defaults()
1954 spin_lock_init(&device->al_lock); in drbd_init_set_defaults()
1955 spin_lock_init(&device->peer_seq_lock); in drbd_init_set_defaults()
1957 INIT_LIST_HEAD(&device->active_ee); in drbd_init_set_defaults()
1958 INIT_LIST_HEAD(&device->sync_ee); in drbd_init_set_defaults()
1959 INIT_LIST_HEAD(&device->done_ee); in drbd_init_set_defaults()
1960 INIT_LIST_HEAD(&device->read_ee); in drbd_init_set_defaults()
1961 INIT_LIST_HEAD(&device->resync_reads); in drbd_init_set_defaults()
1962 INIT_LIST_HEAD(&device->resync_work.list); in drbd_init_set_defaults()
1963 INIT_LIST_HEAD(&device->unplug_work.list); in drbd_init_set_defaults()
1964 INIT_LIST_HEAD(&device->bm_io_work.w.list); in drbd_init_set_defaults()
1965 INIT_LIST_HEAD(&device->pending_master_completion[0]); in drbd_init_set_defaults()
1966 INIT_LIST_HEAD(&device->pending_master_completion[1]); in drbd_init_set_defaults()
1967 INIT_LIST_HEAD(&device->pending_completion[0]); in drbd_init_set_defaults()
1968 INIT_LIST_HEAD(&device->pending_completion[1]); in drbd_init_set_defaults()
1970 device->resync_work.cb = w_resync_timer; in drbd_init_set_defaults()
1971 device->unplug_work.cb = w_send_write_hint; in drbd_init_set_defaults()
1972 device->bm_io_work.w.cb = w_bitmap_io; in drbd_init_set_defaults()
1974 timer_setup(&device->resync_timer, resync_timer_fn, 0); in drbd_init_set_defaults()
1975 timer_setup(&device->md_sync_timer, md_sync_timer_fn, 0); in drbd_init_set_defaults()
1976 timer_setup(&device->start_resync_timer, start_resync_timer_fn, 0); in drbd_init_set_defaults()
1977 timer_setup(&device->request_timer, request_timer_fn, 0); in drbd_init_set_defaults()
1979 init_waitqueue_head(&device->misc_wait); in drbd_init_set_defaults()
1980 init_waitqueue_head(&device->state_wait); in drbd_init_set_defaults()
1981 init_waitqueue_head(&device->ee_wait); in drbd_init_set_defaults()
1982 init_waitqueue_head(&device->al_wait); in drbd_init_set_defaults()
1983 init_waitqueue_head(&device->seq_wait); in drbd_init_set_defaults()
1985 device->resync_wenr = LC_FREE; in drbd_init_set_defaults()
1986 device->peer_max_bio_size = DRBD_MAX_BIO_SIZE_SAFE; in drbd_init_set_defaults()
1987 device->local_max_bio_size = DRBD_MAX_BIO_SIZE_SAFE; in drbd_init_set_defaults()
1994 set_capacity_and_notify(device->vdisk, size); in drbd_set_my_capacity()
2003 if (first_peer_device(device)->connection->receiver.t_state != NONE) in drbd_device_cleanup()
2005 first_peer_device(device)->connection->receiver.t_state); in drbd_device_cleanup()
2007 device->al_writ_cnt = in drbd_device_cleanup()
2008 device->bm_writ_cnt = in drbd_device_cleanup()
2009 device->read_cnt = in drbd_device_cleanup()
2010 device->recv_cnt = in drbd_device_cleanup()
2011 device->send_cnt = in drbd_device_cleanup()
2012 device->writ_cnt = in drbd_device_cleanup()
2013 device->p_size = in drbd_device_cleanup()
2014 device->rs_start = in drbd_device_cleanup()
2015 device->rs_total = in drbd_device_cleanup()
2016 device->rs_failed = 0; in drbd_device_cleanup()
2017 device->rs_last_events = 0; in drbd_device_cleanup()
2018 device->rs_last_sect_ev = 0; in drbd_device_cleanup()
2020 device->rs_mark_left[i] = 0; in drbd_device_cleanup()
2021 device->rs_mark_time[i] = 0; in drbd_device_cleanup()
2023 D_ASSERT(device, first_peer_device(device)->connection->net_conf == NULL); in drbd_device_cleanup()
2025 set_capacity_and_notify(device->vdisk, 0); in drbd_device_cleanup()
2026 if (device->bitmap) { in drbd_device_cleanup()
2032 drbd_backing_dev_free(device, device->ldev); in drbd_device_cleanup()
2033 device->ldev = NULL; in drbd_device_cleanup()
2035 clear_bit(AL_SUSPENDED, &device->flags); in drbd_device_cleanup()
2037 D_ASSERT(device, list_empty(&device->active_ee)); in drbd_device_cleanup()
2038 D_ASSERT(device, list_empty(&device->sync_ee)); in drbd_device_cleanup()
2039 D_ASSERT(device, list_empty(&device->done_ee)); in drbd_device_cleanup()
2040 D_ASSERT(device, list_empty(&device->read_ee)); in drbd_device_cleanup()
2041 D_ASSERT(device, list_empty(&device->resync_reads)); in drbd_device_cleanup()
2042 D_ASSERT(device, list_empty(&first_peer_device(device)->connection->sender_work.q)); in drbd_device_cleanup()
2043 D_ASSERT(device, list_empty(&device->resync_work.list)); in drbd_device_cleanup()
2044 D_ASSERT(device, list_empty(&device->unplug_work.list)); in drbd_device_cleanup()
2130 return -ENOMEM; in drbd_create_mempools()
2137 rr = drbd_free_peer_reqs(device, &device->active_ee); in drbd_release_all_peer_reqs()
2141 rr = drbd_free_peer_reqs(device, &device->sync_ee); in drbd_release_all_peer_reqs()
2145 rr = drbd_free_peer_reqs(device, &device->read_ee); in drbd_release_all_peer_reqs()
2149 rr = drbd_free_peer_reqs(device, &device->done_ee); in drbd_release_all_peer_reqs()
2158 struct drbd_resource *resource = device->resource; in drbd_destroy_device()
2161 timer_shutdown_sync(&device->request_timer); in drbd_destroy_device()
2164 D_ASSERT(device, device->open_cnt == 0); in drbd_destroy_device()
2168 * device (re-)configuration or state changes */ in drbd_destroy_device()
2170 drbd_backing_dev_free(device, device->ldev); in drbd_destroy_device()
2171 device->ldev = NULL; in drbd_destroy_device()
2175 lc_destroy(device->act_log); in drbd_destroy_device()
2176 lc_destroy(device->resync); in drbd_destroy_device()
2178 kfree(device->p_uuid); in drbd_destroy_device()
2179 /* device->p_uuid = NULL; */ in drbd_destroy_device()
2181 if (device->bitmap) /* should no longer be there. */ in drbd_destroy_device()
2183 __free_page(device->md_io.page); in drbd_destroy_device()
2184 put_disk(device->vdisk); in drbd_destroy_device()
2185 kfree(device->rs_plan_s); in drbd_destroy_device()
2191 kref_put(&peer_device->connection->kref, drbd_destroy_connection); in drbd_destroy_device()
2194 if (device->submit.wq) in drbd_destroy_device()
2195 destroy_workqueue(device->submit.wq); in drbd_destroy_device()
2197 kref_put(&resource->kref, drbd_destroy_resource); in drbd_destroy_device()
2217 spin_lock_irq(&retry->lock); in do_retry()
2218 list_splice_init(&retry->writes, &writes); in do_retry()
2219 spin_unlock_irq(&retry->lock); in do_retry()
2222 struct drbd_device *device = req->device; in do_retry()
2223 struct bio *bio = req->master_bio; in do_retry()
2227 expect(device, atomic_read(&req->completion_ref) == 0) && in do_retry()
2228 expect(device, req->rq_state & RQ_POSTPONED) && in do_retry()
2229 expect(device, (req->rq_state & RQ_LOCAL_PENDING) == 0 || in do_retry()
2230 (req->rq_state & RQ_LOCAL_ABORTED) != 0); in do_retry()
2234 req, atomic_read(&req->completion_ref), in do_retry()
2235 req->rq_state); in do_retry()
2240 * frozen local req->private_bio, in case we force-detached. in do_retry()
2242 kref_put(&req->kref, drbd_req_destroy); in do_retry()
2247 * concurrent writes in multi-primary setup. in do_retry()
2263 * holds resource->req_lock */
2268 list_move_tail(&req->tl_requests, &retry.writes); in drbd_restart_request()
2274 dec_ap_bio(req->device); in drbd_restart_request()
2284 idr_destroy(&resource->devices); in drbd_destroy_resource()
2285 free_cpumask_var(resource->cpu_mask); in drbd_destroy_resource()
2286 kfree(resource->name); in drbd_destroy_resource()
2295 list_del(&connection->connections); in drbd_free_resource()
2297 kref_put(&connection->kref, drbd_destroy_connection); in drbd_free_resource()
2300 kref_put(&resource->kref, drbd_destroy_resource); in drbd_free_resource()
2330 list_del(&resource->resources); in drbd_cleanup()
2346 spin_lock_init(&wq->q_lock); in drbd_init_workqueue()
2347 INIT_LIST_HEAD(&wq->q); in drbd_init_workqueue()
2348 init_waitqueue_head(&wq->q_wait); in drbd_init_workqueue()
2361 complete(&completion_work->done); in w_complete()
2384 if (!strcmp(resource->name, name)) { in drbd_find_resource()
2385 kref_get(&resource->kref); in drbd_find_resource()
2404 if (connection->my_addr_len == my_addr_len && in conn_get_by_addrs()
2405 connection->peer_addr_len == peer_addr_len && in conn_get_by_addrs()
2406 !memcmp(&connection->my_addr, my_addr, my_addr_len) && in conn_get_by_addrs()
2407 !memcmp(&connection->peer_addr, peer_addr, peer_addr_len)) { in conn_get_by_addrs()
2408 kref_get(&connection->kref); in conn_get_by_addrs()
2421 socket->rbuf = (void *) __get_free_page(GFP_KERNEL); in drbd_alloc_socket()
2422 if (!socket->rbuf) in drbd_alloc_socket()
2423 return -ENOMEM; in drbd_alloc_socket()
2424 socket->sbuf = (void *) __get_free_page(GFP_KERNEL); in drbd_alloc_socket()
2425 if (!socket->sbuf) in drbd_alloc_socket()
2426 return -ENOMEM; in drbd_alloc_socket()
2432 free_page((unsigned long) socket->sbuf); in drbd_free_socket()
2433 free_page((unsigned long) socket->rbuf); in drbd_free_socket()
2440 crypto_free_shash(connection->csums_tfm); in conn_free_crypto()
2441 crypto_free_shash(connection->verify_tfm); in conn_free_crypto()
2442 crypto_free_shash(connection->cram_hmac_tfm); in conn_free_crypto()
2443 crypto_free_shash(connection->integrity_tfm); in conn_free_crypto()
2444 crypto_free_shash(connection->peer_integrity_tfm); in conn_free_crypto()
2445 kfree(connection->int_dig_in); in conn_free_crypto()
2446 kfree(connection->int_dig_vv); in conn_free_crypto()
2448 connection->csums_tfm = NULL; in conn_free_crypto()
2449 connection->verify_tfm = NULL; in conn_free_crypto()
2450 connection->cram_hmac_tfm = NULL; in conn_free_crypto()
2451 connection->integrity_tfm = NULL; in conn_free_crypto()
2452 connection->peer_integrity_tfm = NULL; in conn_free_crypto()
2453 connection->int_dig_in = NULL; in conn_free_crypto()
2454 connection->int_dig_vv = NULL; in conn_free_crypto()
2464 return -ENOMEM; in set_resource_options()
2467 if (nr_cpu_ids > 1 && res_opts->cpu_mask[0] != 0) { in set_resource_options()
2468 err = bitmap_parse(res_opts->cpu_mask, DRBD_CPU_MASK_SIZE, in set_resource_options()
2470 if (err == -EOVERFLOW) { in set_resource_options()
2477 res_opts->cpu_mask, in set_resource_options()
2478 strlen(res_opts->cpu_mask) > 12 ? "..." : "", in set_resource_options()
2490 resource->res_opts = *res_opts; in set_resource_options()
2493 if (!cpumask_equal(resource->cpu_mask, new_cpu_mask)) { in set_resource_options()
2494 cpumask_copy(resource->cpu_mask, new_cpu_mask); in set_resource_options()
2496 connection->receiver.reset_cpu_mask = 1; in set_resource_options()
2497 connection->ack_receiver.reset_cpu_mask = 1; in set_resource_options()
2498 connection->worker.reset_cpu_mask = 1; in set_resource_options()
2516 resource->name = kstrdup(name, GFP_KERNEL); in drbd_create_resource()
2517 if (!resource->name) in drbd_create_resource()
2519 if (!zalloc_cpumask_var(&resource->cpu_mask, GFP_KERNEL)) in drbd_create_resource()
2521 kref_init(&resource->kref); in drbd_create_resource()
2522 idr_init(&resource->devices); in drbd_create_resource()
2523 INIT_LIST_HEAD(&resource->connections); in drbd_create_resource()
2524 resource->write_ordering = WO_BDEV_FLUSH; in drbd_create_resource()
2525 list_add_tail_rcu(&resource->resources, &drbd_resources); in drbd_create_resource()
2526 mutex_init(&resource->conf_update); in drbd_create_resource()
2527 mutex_init(&resource->adm_mutex); in drbd_create_resource()
2528 spin_lock_init(&resource->req_lock); in drbd_create_resource()
2533 kfree(resource->name); in drbd_create_resource()
2550 if (drbd_alloc_socket(&connection->data)) in conn_create()
2552 if (drbd_alloc_socket(&connection->meta)) in conn_create()
2555 connection->current_epoch = kzalloc(sizeof(struct drbd_epoch), GFP_KERNEL); in conn_create()
2556 if (!connection->current_epoch) in conn_create()
2559 INIT_LIST_HEAD(&connection->transfer_log); in conn_create()
2561 INIT_LIST_HEAD(&connection->current_epoch->list); in conn_create()
2562 connection->epochs = 1; in conn_create()
2563 spin_lock_init(&connection->epoch_lock); in conn_create()
2565 connection->send.seen_any_write_yet = false; in conn_create()
2566 connection->send.current_epoch_nr = 0; in conn_create()
2567 connection->send.current_epoch_writes = 0; in conn_create()
2573 connection->cstate = C_STANDALONE; in conn_create()
2574 mutex_init(&connection->cstate_mutex); in conn_create()
2575 init_waitqueue_head(&connection->ping_wait); in conn_create()
2576 idr_init(&connection->peer_devices); in conn_create()
2578 drbd_init_workqueue(&connection->sender_work); in conn_create()
2579 mutex_init(&connection->data.mutex); in conn_create()
2580 mutex_init(&connection->meta.mutex); in conn_create()
2582 drbd_thread_init(resource, &connection->receiver, drbd_receiver, "receiver"); in conn_create()
2583 connection->receiver.connection = connection; in conn_create()
2584 drbd_thread_init(resource, &connection->worker, drbd_worker, "worker"); in conn_create()
2585 connection->worker.connection = connection; in conn_create()
2586 drbd_thread_init(resource, &connection->ack_receiver, drbd_ack_receiver, "ack_recv"); in conn_create()
2587 connection->ack_receiver.connection = connection; in conn_create()
2589 kref_init(&connection->kref); in conn_create()
2591 connection->resource = resource; in conn_create()
2596 kref_get(&resource->kref); in conn_create()
2597 list_add_tail_rcu(&connection->connections, &resource->connections); in conn_create()
2602 list_del(&resource->resources); in conn_create()
2605 kfree(connection->current_epoch); in conn_create()
2606 drbd_free_socket(&connection->meta); in conn_create()
2607 drbd_free_socket(&connection->data); in conn_create()
2615 struct drbd_resource *resource = connection->resource; in drbd_destroy_connection()
2617 if (atomic_read(&connection->current_epoch->epoch_size) != 0) in drbd_destroy_connection()
2618 drbd_err(connection, "epoch_size:%d\n", atomic_read(&connection->current_epoch->epoch_size)); in drbd_destroy_connection()
2619 kfree(connection->current_epoch); in drbd_destroy_connection()
2621 idr_destroy(&connection->peer_devices); in drbd_destroy_connection()
2623 drbd_free_socket(&connection->meta); in drbd_destroy_connection()
2624 drbd_free_socket(&connection->data); in drbd_destroy_connection()
2625 kfree(connection->int_dig_in); in drbd_destroy_connection()
2626 kfree(connection->int_dig_vv); in drbd_destroy_connection()
2628 kref_put(&resource->kref, drbd_destroy_resource); in drbd_destroy_connection()
2635 device->submit.wq = in init_submitter()
2636 alloc_ordered_workqueue("drbd%u_submit", WQ_MEM_RECLAIM, device->minor); in init_submitter()
2637 if (!device->submit.wq) in init_submitter()
2638 return -ENOMEM; in init_submitter()
2640 INIT_WORK(&device->submit.worker, do_submit); in init_submitter()
2641 INIT_LIST_HEAD(&device->submit.writes); in init_submitter()
2647 struct drbd_resource *resource = adm_ctx->resource; in drbd_create_device()
2651 struct gendisk *disk; in drbd_create_device() local
2653 int vnr = adm_ctx->volume; in drbd_create_device()
2671 /* GFP_KERNEL, we are outside of all write-out paths */ in drbd_create_device()
2675 kref_init(&device->kref); in drbd_create_device()
2677 kref_get(&resource->kref); in drbd_create_device()
2678 device->resource = resource; in drbd_create_device()
2679 device->minor = minor; in drbd_create_device()
2680 device->vnr = vnr; in drbd_create_device()
2684 disk = blk_alloc_disk(&lim, NUMA_NO_NODE); in drbd_create_device()
2685 if (IS_ERR(disk)) { in drbd_create_device()
2686 err = PTR_ERR(disk); in drbd_create_device()
2690 device->vdisk = disk; in drbd_create_device()
2691 device->rq_queue = disk->queue; in drbd_create_device()
2693 set_disk_ro(disk, true); in drbd_create_device()
2695 disk->major = DRBD_MAJOR; in drbd_create_device()
2696 disk->first_minor = minor; in drbd_create_device()
2697 disk->minors = 1; in drbd_create_device()
2698 disk->fops = &drbd_ops; in drbd_create_device()
2699 disk->flags |= GENHD_FL_NO_PART; in drbd_create_device()
2700 sprintf(disk->disk_name, "drbd%d", minor); in drbd_create_device()
2701 disk->private_data = device; in drbd_create_device()
2703 device->md_io.page = alloc_page(GFP_KERNEL); in drbd_create_device()
2704 if (!device->md_io.page) in drbd_create_device()
2709 device->read_requests = RB_ROOT; in drbd_create_device()
2710 device->write_requests = RB_ROOT; in drbd_create_device()
2714 if (id == -ENOSPC) in drbd_create_device()
2718 kref_get(&device->kref); in drbd_create_device()
2720 id = idr_alloc(&resource->devices, device, vnr, vnr + 1, GFP_KERNEL); in drbd_create_device()
2722 if (id == -ENOSPC) in drbd_create_device()
2726 kref_get(&device->kref); in drbd_create_device()
2728 INIT_LIST_HEAD(&device->peer_devices); in drbd_create_device()
2729 INIT_LIST_HEAD(&device->pending_bitmap_io); in drbd_create_device()
2734 peer_device->connection = connection; in drbd_create_device()
2735 peer_device->device = device; in drbd_create_device()
2737 list_add(&peer_device->peer_devices, &device->peer_devices); in drbd_create_device()
2738 kref_get(&device->kref); in drbd_create_device()
2740 id = idr_alloc(&connection->peer_devices, peer_device, vnr, vnr + 1, GFP_KERNEL); in drbd_create_device()
2742 if (id == -ENOSPC) in drbd_create_device()
2746 kref_get(&connection->kref); in drbd_create_device()
2747 INIT_WORK(&peer_device->send_acks_work, drbd_send_acks_wf); in drbd_create_device()
2755 err = add_disk(disk); in drbd_create_device()
2760 device->state.conn = first_connection(resource)->cstate; in drbd_create_device()
2761 if (device->state.conn == C_WF_REPORT_PARAMS) { in drbd_create_device()
2772 destroy_workqueue(device->submit.wq); in drbd_create_device()
2775 peer_device = idr_remove(&connection->peer_devices, vnr); in drbd_create_device()
2777 kref_put(&connection->kref, drbd_destroy_connection); in drbd_create_device()
2780 list_del(&peer_device->peer_devices); in drbd_create_device()
2783 idr_remove(&resource->devices, vnr); in drbd_create_device()
2790 __free_page(device->md_io.page); in drbd_create_device()
2792 put_disk(disk); in drbd_create_device()
2794 kref_put(&resource->kref, drbd_destroy_resource); in drbd_create_device()
2801 struct drbd_resource *resource = device->resource; in drbd_delete_device()
2810 idr_remove(&connection->peer_devices, device->vnr); in drbd_delete_device()
2811 kref_put(&device->kref, drbd_destroy_device); in drbd_delete_device()
2813 idr_remove(&resource->devices, device->vnr); in drbd_delete_device()
2814 kref_put(&device->kref, drbd_destroy_device); in drbd_delete_device()
2816 kref_put(&device->kref, drbd_destroy_device); in drbd_delete_device()
2817 del_gendisk(device->vdisk); in drbd_delete_device()
2819 kref_put(&device->kref, drbd_destroy_device); in drbd_delete_device()
2829 return -EINVAL; in drbd_init()
2858 err = -ENOMEM; in drbd_init()
2865 retry.wq = create_singlethread_workqueue("drbd-reissue"); in drbd_init()
2877 "Version: " REL_VERSION " (api:%d/proto:%d-%d)\n", in drbd_init()
2885 if (err == -ENOMEM) in drbd_init()
2895 mutex_lock(&ds->mutex); in drbd_free_one_sock()
2896 s = ds->socket; in drbd_free_one_sock()
2897 ds->socket = NULL; in drbd_free_one_sock()
2898 mutex_unlock(&ds->mutex); in drbd_free_one_sock()
2909 if (connection->data.socket) in drbd_free_sock()
2910 drbd_free_one_sock(&connection->data); in drbd_free_sock()
2911 if (connection->meta.socket) in drbd_free_sock()
2912 drbd_free_one_sock(&connection->meta); in drbd_free_sock()
2923 idr_for_each_entry(&connection->peer_devices, peer_device, vnr) { in conn_md_sync()
2924 struct drbd_device *device = peer_device->device; in conn_md_sync()
2926 kref_get(&device->kref); in conn_md_sync()
2929 kref_put(&device->kref, drbd_destroy_device); in conn_md_sync()
2946 /* `-- act_log->nr_elements <-- ldev->dc.al_extents */
2955 u8 reserved_u8[4096 - (7*8 + 10*4)];
2968 buffer->la_size_sect = cpu_to_be64(get_capacity(device->vdisk)); in drbd_md_write()
2970 buffer->uuid[i] = cpu_to_be64(device->ldev->md.uuid[i]); in drbd_md_write()
2971 buffer->flags = cpu_to_be32(device->ldev->md.flags); in drbd_md_write()
2972 buffer->magic = cpu_to_be32(DRBD_MD_MAGIC_84_UNCLEAN); in drbd_md_write()
2974 buffer->md_size_sect = cpu_to_be32(device->ldev->md.md_size_sect); in drbd_md_write()
2975 buffer->al_offset = cpu_to_be32(device->ldev->md.al_offset); in drbd_md_write()
2976 buffer->al_nr_extents = cpu_to_be32(device->act_log->nr_elements); in drbd_md_write()
2977 buffer->bm_bytes_per_bit = cpu_to_be32(BM_BLOCK_SIZE); in drbd_md_write()
2978 buffer->device_uuid = cpu_to_be64(device->ldev->md.device_uuid); in drbd_md_write()
2980 buffer->bm_offset = cpu_to_be32(device->ldev->md.bm_offset); in drbd_md_write()
2981 buffer->la_peer_max_bio_size = cpu_to_be32(device->peer_max_bio_size); in drbd_md_write()
2983 buffer->al_stripes = cpu_to_be32(device->ldev->md.al_stripes); in drbd_md_write()
2984 buffer->al_stripe_size_4k = cpu_to_be32(device->ldev->md.al_stripe_size_4k); in drbd_md_write()
2986 D_ASSERT(device, drbd_md_ss(device->ldev) == device->ldev->md.md_offset); in drbd_md_write()
2987 sector = device->ldev->md.md_offset; in drbd_md_write()
2989 if (drbd_md_sync_page_io(device, device->ldev, sector, REQ_OP_WRITE)) { in drbd_md_write()
2997 * drbd_md_sync() - Writes the meta data super block if the MD_DIRTY flag bit is set
3008 timer_delete(&device->md_sync_timer); in drbd_md_sync()
3010 if (!test_and_clear_bit(MD_DIRTY, &device->flags)) in drbd_md_sync()
3014 * metadata even if we detach due to a disk failure! */ in drbd_md_sync()
3024 /* Update device->ldev->md.la_size_sect, in drbd_md_sync()
3026 device->ldev->md.la_size_sect = get_capacity(device->vdisk); in drbd_md_sync()
3037 u32 al_stripes = be32_to_cpu(on_disk->al_stripes); in check_activity_log_stripe_size()
3038 u32 al_stripe_size_4k = be32_to_cpu(on_disk->al_stripe_size_4k); in check_activity_log_stripe_size()
3041 /* both not set: default to old fixed size activity log */ in check_activity_log_stripe_size()
3055 /* Upper limit of activity log area, to avoid potential overflow in check_activity_log_stripe_size()
3058 * limiting this arbitrarily to 16 GB is not a real limitation ;-) */ in check_activity_log_stripe_size()
3067 in_core->al_stripe_size_4k = al_stripe_size_4k; in check_activity_log_stripe_size()
3068 in_core->al_stripes = al_stripes; in check_activity_log_stripe_size()
3069 in_core->al_size_4k = al_size_4k; in check_activity_log_stripe_size()
3073 drbd_err(device, "invalid activity log striping: al_stripes=%u, al_stripe_size_4k=%u\n", in check_activity_log_stripe_size()
3075 return -EINVAL; in check_activity_log_stripe_size()
3080 sector_t capacity = drbd_get_capacity(bdev->md_bdev); in check_offsets_and_sizes()
3081 struct drbd_md *in_core = &bdev->md; in check_offsets_and_sizes()
3085 /* The on-disk size of the activity log, calculated from offsets, and in check_offsets_and_sizes()
3086 * the size of the activity log calculated from the stripe settings, in check_offsets_and_sizes()
3088 * Though we could relax this a bit: it is ok, if the striped activity log in check_offsets_and_sizes()
3089 * fits in the available on-disk activity log size. in check_offsets_and_sizes()
3092 * of possible unused padding space in the on disk layout. */ in check_offsets_and_sizes()
3093 if (in_core->al_offset < 0) { in check_offsets_and_sizes()
3094 if (in_core->bm_offset > in_core->al_offset) in check_offsets_and_sizes()
3096 on_disk_al_sect = -in_core->al_offset; in check_offsets_and_sizes()
3097 on_disk_bm_sect = in_core->al_offset - in_core->bm_offset; in check_offsets_and_sizes()
3099 if (in_core->al_offset != MD_4kB_SECT) in check_offsets_and_sizes()
3101 if (in_core->bm_offset < in_core->al_offset + in_core->al_size_4k * MD_4kB_SECT) in check_offsets_and_sizes()
3104 on_disk_al_sect = in_core->bm_offset - MD_4kB_SECT; in check_offsets_and_sizes()
3105 on_disk_bm_sect = in_core->md_size_sect - in_core->bm_offset; in check_offsets_and_sizes()
3109 if (in_core->meta_dev_idx >= 0) { in check_offsets_and_sizes()
3110 if (in_core->md_size_sect != MD_128MB_SECT in check_offsets_and_sizes()
3111 || in_core->al_offset != MD_4kB_SECT in check_offsets_and_sizes()
3112 || in_core->bm_offset != MD_4kB_SECT + MD_32kB_SECT in check_offsets_and_sizes()
3113 || in_core->al_stripes != 1 in check_offsets_and_sizes()
3114 || in_core->al_stripe_size_4k != MD_32kB_SECT/8) in check_offsets_and_sizes()
3118 if (capacity < in_core->md_size_sect) in check_offsets_and_sizes()
3120 if (capacity - in_core->md_size_sect < drbd_md_first_sector(bdev)) in check_offsets_and_sizes()
3127 /* should fit (for now: exactly) into the available on-disk space; in check_offsets_and_sizes()
3129 if (on_disk_al_sect != in_core->al_size_4k * MD_4kB_SECT) in check_offsets_and_sizes()
3133 if (in_core->bm_offset & 7) in check_offsets_and_sizes()
3139 if (on_disk_bm_sect < (in_core->la_size_sect+7)/MD_4kB_SECT/8/512) in check_offsets_and_sizes()
3148 in_core->meta_dev_idx, in check_offsets_and_sizes()
3149 in_core->al_stripes, in_core->al_stripe_size_4k, in check_offsets_and_sizes()
3150 in_core->al_offset, in_core->bm_offset, in_core->md_size_sect, in check_offsets_and_sizes()
3151 (unsigned long long)in_core->la_size_sect, in check_offsets_and_sizes()
3154 return -EINVAL; in check_offsets_and_sizes()
3159 * drbd_md_read() - Reads in the meta data super block
3167 * even before @bdev is assigned to @device->ldev.
3175 if (device->state.disk != D_DISKLESS) in drbd_md_read()
3184 bdev->md.meta_dev_idx = bdev->disk_conf->meta_dev_idx; in drbd_md_read()
3185 bdev->md.md_offset = drbd_md_ss(bdev); in drbd_md_read()
3188 * Affects the paranoia out-of-range access check in drbd_md_sync_page_io(). */ in drbd_md_read()
3189 bdev->md.md_size_sect = 8; in drbd_md_read()
3191 if (drbd_md_sync_page_io(device, bdev, bdev->md.md_offset, in drbd_md_read()
3194 called BEFORE disk is attached */ in drbd_md_read()
3200 magic = be32_to_cpu(buffer->magic); in drbd_md_read()
3201 flags = be32_to_cpu(buffer->flags); in drbd_md_read()
3204 /* btw: that's Activity Log clean, not "all" clean. */ in drbd_md_read()
3205 drbd_err(device, "Found unclean meta data. Did you \"drbdadm apply-al\"?\n"); in drbd_md_read()
3213 drbd_err(device, "Found old (0.7) meta data magic. Did you \"drbdadm create-md\"?\n"); in drbd_md_read()
3215 drbd_err(device, "Meta data magic not found. Did you \"drbdadm create-md\"?\n"); in drbd_md_read()
3219 if (be32_to_cpu(buffer->bm_bytes_per_bit) != BM_BLOCK_SIZE) { in drbd_md_read()
3221 be32_to_cpu(buffer->bm_bytes_per_bit), BM_BLOCK_SIZE); in drbd_md_read()
3227 bdev->md.la_size_sect = be64_to_cpu(buffer->la_size_sect); in drbd_md_read()
3229 bdev->md.uuid[i] = be64_to_cpu(buffer->uuid[i]); in drbd_md_read()
3230 bdev->md.flags = be32_to_cpu(buffer->flags); in drbd_md_read()
3231 bdev->md.device_uuid = be64_to_cpu(buffer->device_uuid); in drbd_md_read()
3233 bdev->md.md_size_sect = be32_to_cpu(buffer->md_size_sect); in drbd_md_read()
3234 bdev->md.al_offset = be32_to_cpu(buffer->al_offset); in drbd_md_read()
3235 bdev->md.bm_offset = be32_to_cpu(buffer->bm_offset); in drbd_md_read()
3237 if (check_activity_log_stripe_size(device, buffer, &bdev->md)) in drbd_md_read()
3242 if (be32_to_cpu(buffer->bm_offset) != bdev->md.bm_offset) { in drbd_md_read()
3244 be32_to_cpu(buffer->bm_offset), bdev->md.bm_offset); in drbd_md_read()
3247 if (be32_to_cpu(buffer->md_size_sect) != bdev->md.md_size_sect) { in drbd_md_read()
3249 be32_to_cpu(buffer->md_size_sect), bdev->md.md_size_sect); in drbd_md_read()
3255 spin_lock_irq(&device->resource->req_lock); in drbd_md_read()
3256 if (device->state.conn < C_CONNECTED) { in drbd_md_read()
3258 peer = be32_to_cpu(buffer->la_peer_max_bio_size); in drbd_md_read()
3260 device->peer_max_bio_size = peer; in drbd_md_read()
3262 spin_unlock_irq(&device->resource->req_lock); in drbd_md_read()
3271 * drbd_md_mark_dirty() - Mark meta data super block as dirty
3275 * the meta-data super block. This function sets MD_DIRTY, and starts a
3280 if (!test_and_set_bit(MD_DIRTY, &device->flags)) in drbd_md_mark_dirty()
3281 mod_timer(&device->md_sync_timer, jiffies + 5*HZ); in drbd_md_mark_dirty()
3289 device->ldev->md.uuid[i+1] = device->ldev->md.uuid[i]; in drbd_uuid_move_history()
3295 if (device->state.role == R_PRIMARY) in __drbd_uuid_set()
3303 device->ldev->md.uuid[idx] = val; in __drbd_uuid_set()
3310 spin_lock_irqsave(&device->ldev->md.uuid_lock, flags); in _drbd_uuid_set()
3312 spin_unlock_irqrestore(&device->ldev->md.uuid_lock, flags); in _drbd_uuid_set()
3318 spin_lock_irqsave(&device->ldev->md.uuid_lock, flags); in drbd_uuid_set()
3319 if (device->ldev->md.uuid[idx]) { in drbd_uuid_set()
3321 device->ldev->md.uuid[UI_HISTORY_START] = device->ldev->md.uuid[idx]; in drbd_uuid_set()
3324 spin_unlock_irqrestore(&device->ldev->md.uuid_lock, flags); in drbd_uuid_set()
3328 * drbd_uuid_new_current() - Creates a new current UUID
3341 spin_lock_irq(&device->ldev->md.uuid_lock); in drbd_uuid_new_current()
3342 bm_uuid = device->ldev->md.uuid[UI_BITMAP]; in drbd_uuid_new_current()
3347 device->ldev->md.uuid[UI_BITMAP] = device->ldev->md.uuid[UI_CURRENT]; in drbd_uuid_new_current()
3349 spin_unlock_irq(&device->ldev->md.uuid_lock); in drbd_uuid_new_current()
3359 spin_lock_irqsave(&device->ldev->md.uuid_lock, flags); in drbd_uuid_set_bm()
3360 if (device->ldev->md.uuid[UI_BITMAP] == 0 && val == 0) { in drbd_uuid_set_bm()
3361 spin_unlock_irqrestore(&device->ldev->md.uuid_lock, flags); in drbd_uuid_set_bm()
3367 device->ldev->md.uuid[UI_HISTORY_START] = device->ldev->md.uuid[UI_BITMAP]; in drbd_uuid_set_bm()
3368 device->ldev->md.uuid[UI_BITMAP] = 0; in drbd_uuid_set_bm()
3370 unsigned long long bm_uuid = device->ldev->md.uuid[UI_BITMAP]; in drbd_uuid_set_bm()
3374 device->ldev->md.uuid[UI_BITMAP] = val & ~((u64)1); in drbd_uuid_set_bm()
3376 spin_unlock_irqrestore(&device->ldev->md.uuid_lock, flags); in drbd_uuid_set_bm()
3382 * drbd_bmio_set_n_write() - io_fn for drbd_queue_bitmap_io() or drbd_bitmap_io()
3392 int rv = -EIO; in drbd_bmio_set_n_write()
3409 * drbd_bmio_clear_n_write() - io_fn for drbd_queue_bitmap_io() or drbd_bitmap_io()
3428 struct bm_io_work *work = &device->bm_io_work; in w_bitmap_io()
3429 int rv = -EIO; in w_bitmap_io()
3431 if (work->flags != BM_LOCKED_CHANGE_ALLOWED) { in w_bitmap_io()
3432 int cnt = atomic_read(&device->ap_bio_cnt); in w_bitmap_io()
3435 cnt, work->why); in w_bitmap_io()
3439 drbd_bm_lock(device, work->why, work->flags); in w_bitmap_io()
3440 rv = work->io_fn(device, work->peer_device); in w_bitmap_io()
3445 clear_bit_unlock(BITMAP_IO, &device->flags); in w_bitmap_io()
3446 wake_up(&device->misc_wait); in w_bitmap_io()
3448 if (work->done) in w_bitmap_io()
3449 work->done(device, rv); in w_bitmap_io()
3451 clear_bit(BITMAP_IO_QUEUED, &device->flags); in w_bitmap_io()
3452 work->why = NULL; in w_bitmap_io()
3453 work->flags = 0; in w_bitmap_io()
3459 * drbd_queue_bitmap_io() - Queues an IO operation on the whole bitmap
3481 D_ASSERT(device, current == peer_device->connection->worker.task); in drbd_queue_bitmap_io()
3483 D_ASSERT(device, !test_bit(BITMAP_IO_QUEUED, &device->flags)); in drbd_queue_bitmap_io()
3484 D_ASSERT(device, !test_bit(BITMAP_IO, &device->flags)); in drbd_queue_bitmap_io()
3485 D_ASSERT(device, list_empty(&device->bm_io_work.w.list)); in drbd_queue_bitmap_io()
3486 if (device->bm_io_work.why) in drbd_queue_bitmap_io()
3488 why, device->bm_io_work.why); in drbd_queue_bitmap_io()
3490 device->bm_io_work.peer_device = peer_device; in drbd_queue_bitmap_io()
3491 device->bm_io_work.io_fn = io_fn; in drbd_queue_bitmap_io()
3492 device->bm_io_work.done = done; in drbd_queue_bitmap_io()
3493 device->bm_io_work.why = why; in drbd_queue_bitmap_io()
3494 device->bm_io_work.flags = flags; in drbd_queue_bitmap_io()
3496 spin_lock_irq(&device->resource->req_lock); in drbd_queue_bitmap_io()
3497 set_bit(BITMAP_IO, &device->flags); in drbd_queue_bitmap_io()
3500 if (flags == BM_LOCKED_CHANGE_ALLOWED || atomic_read(&device->ap_bio_cnt) == 0) { in drbd_queue_bitmap_io()
3501 if (!test_and_set_bit(BITMAP_IO_QUEUED, &device->flags)) in drbd_queue_bitmap_io()
3502 drbd_queue_work(&peer_device->connection->sender_work, in drbd_queue_bitmap_io()
3503 &device->bm_io_work.w); in drbd_queue_bitmap_io()
3505 spin_unlock_irq(&device->resource->req_lock); in drbd_queue_bitmap_io()
3509 * drbd_bitmap_io() - Does an IO operation on the whole bitmap
3528 D_ASSERT(device, current != first_peer_device(device)->connection->worker.task); in drbd_bitmap_io()
3545 if ((device->ldev->md.flags & flag) != flag) { in drbd_md_set_flag()
3547 device->ldev->md.flags |= flag; in drbd_md_set_flag()
3553 if ((device->ldev->md.flags & flag) != 0) { in drbd_md_clear_flag()
3555 device->ldev->md.flags &= ~flag; in drbd_md_clear_flag()
3560 return (bdev->md.flags & flag) != 0; in drbd_md_test_flag()
3626 /* enum drbd_packet, but not commands - obsoleted flags: in cmdname()
3645 * drbd_wait_misc - wait for a request to make progress
3657 nc = rcu_dereference(first_peer_device(device)->connection->net_conf); in drbd_wait_misc()
3660 return -ETIMEDOUT; in drbd_wait_misc()
3662 timeout = nc->ko_count ? nc->timeout * HZ / 10 * nc->ko_count : MAX_SCHEDULE_TIMEOUT; in drbd_wait_misc()
3665 /* Indicate to wake up device->misc_wait on progress. */ in drbd_wait_misc()
3666 i->waiting = true; in drbd_wait_misc()
3667 prepare_to_wait(&device->misc_wait, &wait, TASK_INTERRUPTIBLE); in drbd_wait_misc()
3668 spin_unlock_irq(&device->resource->req_lock); in drbd_wait_misc()
3670 finish_wait(&device->misc_wait, &wait); in drbd_wait_misc()
3671 spin_lock_irq(&device->resource->req_lock); in drbd_wait_misc()
3672 if (!timeout || device->state.conn < C_CONNECTED) in drbd_wait_misc()
3673 return -ETIMEDOUT; in drbd_wait_misc()
3675 return -ERESTARTSYS; in drbd_wait_misc()
3687 spin_lock_nested(&resource->req_lock, i++); in lock_all_resources()
3695 spin_unlock(&resource->req_lock); in unlock_all_resources()
3713 * Crude but fast random-number generator. Uses a linear congruential
3721 if (!rsp->count--) { in _drbd_fault_random()
3723 rsp->state += refresh; in _drbd_fault_random()
3724 rsp->count = FAULT_RANDOM_REFRESH; in _drbd_fault_random()
3726 rsp->state = rsp->state * FAULT_RANDOM_MULT + FAULT_RANDOM_ADD; in _drbd_fault_random()
3727 return swahw32(rsp->state); in _drbd_fault_random()
3733 [DRBD_FAULT_MD_WR] = "Meta-data write", in _drbd_fault_str()
3734 [DRBD_FAULT_MD_RD] = "Meta-data read", in _drbd_fault_str()