Lines Matching +full:assigned +full:- +full:resolution +full:- +full:bits
1 /* SPDX-License-Identifier: GPL-2.0-only */
7 Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
8 Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
9 Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
29 #include <linux/backing-dev.h>
63 #define ID_SYNCER (-1ULL)
116 * stores total bits and long words
125 /* statistics; index: (h->command == P_BITMAP) */
143 c->word_offset = c->bit_offset >> 6; in bm_xfer_ctx_bit_to_word_offset()
145 c->word_offset = c->bit_offset >> 5; in bm_xfer_ctx_bit_to_word_offset()
146 c->word_offset &= ~(1UL); in bm_xfer_ctx_bit_to_word_offset()
178 * --lge */ in get_t_state()
181 return thi->t_state; in get_t_state()
300 /* drbd_epoch flag bits */
325 /* see comments on ee flag bits below */
336 ((peer_req)->opf & REQ_OP_MASK)
338 /* ee flag bits.
342 * non-atomic modification to ee->flags is ok.
350 /* explicit zero-out requested, or
413 /* flag bits per device */
417 USE_DEGR_WFC_T, /* degr-wfc-timeout instead of wfc-timeout. */
432 FORCE_DETACH, /* Force-detach from local disk, aborting any pending local IO */
438 AHEAD_TO_SYNC_SOURCE, /* Ahead -> SyncSource queued */
443 FLUSH_PENDING, /* if set, device->flush_jif is when we submitted that flush
447 GOING_DISKLESS, /* Disk is being detached, because of io-error, or admin request. */
460 /* definition of bits in bm_flags to be used in drbd_bm_lock
472 * and still allow all non-bulk operations */
478 /* testing bits, as well as setting new bits allowed, but clearing bits
479 * would be unexpected. Used during bitmap receive. Setting new bits
480 * requires sending of "out-of-sync" information, though. */
516 /* cached value of bdev->disk_conf->meta_dev_idx (see below) */
531 struct disk_conf *disk_conf; /* RCU, for updates: resource->conf_update */
562 /* flag bits per connection */
597 struct mutex conf_update; /* mutex for ready-copy-update of net_conf and disk_conf */
652 …pto_shash *integrity_tfm; /* checksums we compute, updates protected by connection->data->mutex */
676 * protected by resource->req_lock */
701 * with req->epoch == current_epoch_nr.
712 has_net_conf = rcu_dereference(connection->net_conf); in has_net_conf()
725 __update_timing_details(c->w_timing_details, &c->w_cb_nr, cb, __func__ , __LINE__ )
727 __update_timing_details(c->r_timing_details, &c->r_cb_nr, cb, __func__ , __LINE__ )
733 /* protected by ..->resource->req_lock */
829 /* block not up-to-date at mark [unit BM_BLOCK_SIZE] */
844 /* size of out-of-sync range in sectors. */
846 unsigned long ov_left; /* in bits */
880 unsigned long comm_bm_set; /* communicated number of set bits. */
884 …struct mutex *state_mutex; /* either own_state_mutex or first_peer_device(device)->connection->cst…
892 …struct fifo_buffer *rs_plan_s; /* correction values of resync planer (RCU, connection->conn_update…
899 * are deferred to this single-threaded work queue */
905 struct list_head list; /* on device->pending_bitmap_io */;
919 /* assigned from drbd_genlmsghdr */
921 /* assigned from request attributes, if present */
923 #define VOLUME_UNSPECIFIED (-1U)
947 return list_first_entry_or_null(&device->peer_devices, struct drbd_peer_device, peer_devices); in first_peer_device()
953 return idr_find(&connection->peer_devices, volume_number); in conn_peer_device()
966 list_for_each_entry(connection, &resource->connections, connections)
969 list_for_each_entry_rcu(connection, &resource->connections, connections)
972 list_for_each_entry_safe(connection, tmp, &resource->connections, connections)
975 list_for_each_entry(peer_device, &device->peer_devices, peer_devices)
978 list_for_each_entry_rcu(peer_device, &device->peer_devices, peer_devices)
981 list_for_each_entry_safe(peer_device, tmp, &device->peer_devices, peer_devices)
985 return device->minor; in device_to_minor()
1092 * |----------- md_size_sect ------------------|
1096 * ==> bitmap sectors = md_size_sect - bm_offset
1102 * |----------- md_size_sect ------------------|
1105 * | bm_offset = al_offset - Y |
1106 * ==> bitmap sectors = Y = al_offset - bm_offset
1112 * which are written in a ring-buffer, or striped ring-buffer like fashion,
1129 * variables at create-md time (or even re-configurable at runtime?).
1143 #define AL_CONTEXT_PER_TRANSACTION 919 // (4096 - 36 - 6*64)/4
1160 int rs_left; /* number of bits set (out of sync) in this extent. */
1186 * at 4k per bit resolution) */
1195 #define BM_SECT_TO_BIT(x) ((x)>>(BM_BLOCK_SHIFT-9))
1196 #define BM_BIT_TO_SECT(x) ((sector_t)(x)<<(BM_BLOCK_SHIFT-9))
1200 #define Bit2KB(bits) ((bits)<<(BM_BLOCK_SHIFT-10)) argument
1204 #define BM_SECT_TO_EXT(x) ((x)>>(BM_EXT_SHIFT-9))
1205 #define BM_BIT_TO_EXT(x) ((x) >> (BM_EXT_SHIFT - BM_BLOCK_SHIFT))
1208 #define BM_EXT_TO_SECT(x) ((sector_t)(x) << (BM_EXT_SHIFT-9))
1211 /* how many bits are covered by one bitmap extent (resync extent) */
1212 #define BM_BITS_PER_EXT (1UL << (BM_EXT_SHIFT - BM_BLOCK_SHIFT))
1214 #define BM_BLOCKS_PER_BM_EXT_MASK (BM_BITS_PER_EXT - 1)
1218 #define AL_EXT_PER_BM_SECT (1 << (BM_EXT_SHIFT - AL_EXTENT_SHIFT))
1225 * bit 0 bit 37 bit 38 bit (512*8)-1
1227 * sect. 0 `296 `304 ^(512*8*8)-1
1235 /* we have a certain meta data variant that has a fixed on-disk size of 128
1241 ((MD_128MB_SECT - MD_32kB_SECT - MD_4kB_SECT) * (1LL<<(BM_EXT_SHIFT-9)))
1252 /* corresponds to (1UL << 38) bits right now. */
1257 * Since we may live in a mixed-platform cluster,
1281 /* set/clear/test only a few bits at a time */
1335 /* We also need a standard (emergency-reserve backed) page pool
1383 DS_ERROR_SHRINK = -3,
1384 DS_ERROR_SPACE_MD = -2,
1385 DS_ERROR = -1,
1429 struct drbd_device *device = peer_device->device; in ov_out_of_sync_print()
1431 if (device->ov_last_oos_size) { in ov_out_of_sync_print()
1433 (unsigned long long)device->ov_last_oos_start, in ov_out_of_sync_print()
1434 (unsigned long)device->ov_last_oos_size); in ov_out_of_sync_print()
1436 device->ov_last_oos_size = 0; in ov_out_of_sync_print()
1492 if (!bio->bi_bdev) { in drbd_submit_bio_noacct()
1493 drbd_err(device, "drbd_submit_bio_noacct: bio->bi_bdev == NULL\n"); in drbd_submit_bio_noacct()
1494 bio->bi_status = BLK_STS_IOERR; in drbd_submit_bio_noacct()
1598 struct drbd_resource *resource = device->resource; in drbd_read_state()
1601 rv.i = device->state.i; in drbd_read_state()
1602 rv.susp = resource->susp; in drbd_read_state()
1603 rv.susp_nod = resource->susp_nod; in drbd_read_state()
1604 rv.susp_fen = resource->susp_fen; in drbd_read_state()
1624 ep = rcu_dereference(device->ldev->disk_conf)->on_io_error; in __drbd_chk_io_error_()
1631 if (device->state.disk > D_INCONSISTENT) in __drbd_chk_io_error_()
1644 * blocks, which triggers block re-allocation in lower layers. in __drbd_chk_io_error_()
1649 * Force-detach is not really an IO error, but rather a in __drbd_chk_io_error_()
1658 set_bit(WAS_IO_ERROR, &device->flags); in __drbd_chk_io_error_()
1660 set_bit(WAS_READ_ERROR, &device->flags); in __drbd_chk_io_error_()
1662 set_bit(FORCE_DETACH, &device->flags); in __drbd_chk_io_error_()
1663 if (device->state.disk > D_FAILED) { in __drbd_chk_io_error_()
1686 spin_lock_irqsave(&device->resource->req_lock, flags); in drbd_chk_io_error_()
1688 spin_unlock_irqrestore(&device->resource->req_lock, flags); in drbd_chk_io_error_()
1694 * drbd_md_first_sector() - Returns the first sector number of the meta data area
1702 switch (bdev->md.meta_dev_idx) { in drbd_md_first_sector()
1705 return bdev->md.md_offset + bdev->md.bm_offset; in drbd_md_first_sector()
1708 return bdev->md.md_offset; in drbd_md_first_sector()
1713 * drbd_md_last_sector() - Return the last sector number of the meta data area
1718 switch (bdev->md.meta_dev_idx) { in drbd_md_last_sector()
1721 return bdev->md.md_offset + MD_4kB_SECT -1; in drbd_md_last_sector()
1724 return bdev->md.md_offset + bdev->md.md_size_sect -1; in drbd_md_last_sector()
1735 * drbd_get_max_capacity() - Returns the capacity we announce to out peer
1746 switch (bdev->md.meta_dev_idx) { in drbd_get_max_capacity()
1749 s = drbd_get_capacity(bdev->backing_bdev) in drbd_get_max_capacity()
1756 drbd_get_capacity(bdev->backing_bdev)); in drbd_get_max_capacity()
1759 BM_EXT_TO_SECT(bdev->md.md_size_sect in drbd_get_max_capacity()
1760 - bdev->md.bm_offset)); in drbd_get_max_capacity()
1764 drbd_get_capacity(bdev->backing_bdev)); in drbd_get_max_capacity()
1770 * drbd_md_ss() - Return the sector number of our meta data super block
1775 const int meta_dev_idx = bdev->md.meta_dev_idx; in drbd_md_ss()
1784 return (drbd_get_capacity(bdev->backing_bdev) & ~7ULL) - 8; in drbd_md_ss()
1787 return MD_128MB_SECT * bdev->md.meta_dev_idx; in drbd_md_ss()
1794 spin_lock_irqsave(&q->q_lock, flags); in drbd_queue_work()
1795 list_add_tail(&w->list, &q->q); in drbd_queue_work()
1796 spin_unlock_irqrestore(&q->q_lock, flags); in drbd_queue_work()
1797 wake_up(&q->q_wait); in drbd_queue_work()
1804 spin_lock_irqsave(&q->q_lock, flags); in drbd_queue_work_if_unqueued()
1805 if (list_empty_careful(&w->list)) in drbd_queue_work_if_unqueued()
1806 list_add_tail(&w->list, &q->q); in drbd_queue_work_if_unqueued()
1807 spin_unlock_irqrestore(&q->q_lock, flags); in drbd_queue_work_if_unqueued()
1808 wake_up(&q->q_wait); in drbd_queue_work_if_unqueued()
1814 if (!test_and_set_bit(work_bit, &device->flags)) { in drbd_device_post_work()
1816 first_peer_device(device)->connection; in drbd_device_post_work()
1817 struct drbd_work_queue *q = &connection->sender_work; in drbd_device_post_work()
1818 if (!test_and_set_bit(DEVICE_WORK_PENDING, &connection->flags)) in drbd_device_post_work()
1819 wake_up(&q->q_wait); in drbd_device_post_work()
1826 * so it can change its sk_rcvtimeo from idle- to ping-timeout,
1831 struct task_struct *task = connection->ack_receiver.task; in wake_ack_receiver()
1832 if (task && get_t_state(&connection->ack_receiver) == RUNNING) in wake_ack_receiver()
1838 set_bit(SEND_PING, &connection->flags); in request_ping()
1879 * (drbd_make_request_common; recovery path on read io-error)
1895 atomic_inc(&device->ap_pending_cnt); in inc_ap_pending()
1901 int ap_pending_cnt = atomic_dec_return(&device->ap_pending_cnt); in __dec_ap_pending()
1904 wake_up(&device->misc_wait); in __dec_ap_pending()
1908 /* counts how many resync-related answers we still expect from the peer
1916 atomic_inc(&peer_device->device->rs_pending_cnt); in inc_rs_pending()
1923 return atomic_dec_return(&peer_device->device->rs_pending_cnt); in __dec_rs_pending()
1937 atomic_inc(&device->unacked_cnt); in inc_unacked()
1943 return atomic_dec_return(&device->unacked_cnt); in __dec_unacked()
1949 return atomic_sub_return(n, &device->unacked_cnt); in __sub_unacked()
1971 * get_ldev() - Increase the ref count on device->ldev. Returns 0 if there is no ldev
1975 * You have to call put_ldev() when finished working with device->ldev.
1984 enum drbd_disk_state disk_state = device->state.disk; in put_ldev()
1989 int i = atomic_dec_return(&device->local_cnt); in put_ldev()
2002 if (!test_and_set_bit(GOING_DISKLESS, &device->flags)) in put_ldev()
2004 wake_up(&device->misc_wait); in put_ldev()
2014 if (device->state.disk == D_DISKLESS) in _get_ldev_if_state()
2017 atomic_inc(&device->local_cnt); in _get_ldev_if_state()
2018 io_allowed = (device->state.disk >= mins); in _get_ldev_if_state()
2027 /* this throttles on-the-fly application requests
2029 * maybe re-implement using semaphores? */
2036 nc = rcu_dereference(first_peer_device(device)->connection->net_conf); in drbd_get_max_buffers()
2037 mxb = nc ? nc->max_buffers : 1000000; /* arbitrary limit on open requests */ in drbd_get_max_buffers()
2045 union drbd_dev_state s = device->state; in drbd_state_is_stable()
2079 if (first_peer_device(device)->connection->agreed_pro_version < 96) in drbd_state_is_stable()
2115 struct drbd_resource *resource = device->resource; in drbd_suspended()
2117 return resource->susp || resource->susp_fen || resource->susp_nod; in drbd_suspended()
2126 if (atomic_read(&device->suspend_cnt)) in may_inc_ap_bio()
2139 if (atomic_read(&device->ap_bio_cnt) > mxb) in may_inc_ap_bio()
2141 if (test_bit(BITMAP_IO, &device->flags)) in may_inc_ap_bio()
2150 spin_lock_irq(&device->resource->req_lock); in inc_ap_bio_cond()
2153 atomic_inc(&device->ap_bio_cnt); in inc_ap_bio_cond()
2154 spin_unlock_irq(&device->resource->req_lock); in inc_ap_bio_cond()
2169 wait_event(device->misc_wait, inc_ap_bio_cond(device)); in inc_ap_bio()
2175 int ap_bio = atomic_dec_return(&device->ap_bio_cnt); in dec_ap_bio()
2179 if (ap_bio == 0 && test_bit(BITMAP_IO, &device->flags)) { in dec_ap_bio()
2180 if (!test_and_set_bit(BITMAP_IO_QUEUED, &device->flags)) in dec_ap_bio()
2181 drbd_queue_work(&first_peer_device(device)-> in dec_ap_bio()
2182 connection->sender_work, in dec_ap_bio()
2183 &device->bm_io_work.w); in dec_ap_bio()
2190 wake_up(&device->misc_wait); in dec_ap_bio()
2195 return first_peer_device(device)->connection->agreed_pro_version >= 97 && in verify_can_do_stop_sector()
2196 first_peer_device(device)->connection->agreed_pro_version != 100; in verify_can_do_stop_sector()
2201 int changed = device->ed_uuid != val; in drbd_set_ed_uuid()
2202 device->ed_uuid = val; in drbd_set_ed_uuid()
2218 return list_first_entry_or_null(&resource->connections, in first_connection()