1 /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 /* 3 drbd_int.h 4 5 This file is part of DRBD by Philipp Reisner and Lars Ellenberg. 6 7 Copyright (C) 2001-2008, LINBIT Information Technologies GmbH. 8 Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>. 9 Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>. 10 11 12 */ 13 14 #ifndef _DRBD_INT_H 15 #define _DRBD_INT_H 16 17 #include <crypto/hash.h> 18 #include <linux/compiler.h> 19 #include <linux/types.h> 20 #include <linux/list.h> 21 #include <linux/sched/signal.h> 22 #include <linux/bitops.h> 23 #include <linux/slab.h> 24 #include <linux/ratelimit.h> 25 #include <linux/tcp.h> 26 #include <linux/mutex.h> 27 #include <linux/major.h> 28 #include <linux/blkdev.h> 29 #include <linux/backing-dev.h> 30 #include <linux/idr.h> 31 #include <linux/dynamic_debug.h> 32 #include <net/tcp.h> 33 #include <linux/lru_cache.h> 34 #include <linux/prefetch.h> 35 #include <linux/drbd_genl_api.h> 36 #include <linux/drbd.h> 37 #include "drbd_strings.h" 38 #include "drbd_state.h" 39 #include "drbd_protocol.h" 40 41 #ifdef __CHECKER__ 42 # define __protected_by(x) __attribute__((require_context(x,1,999,"rdwr"))) 43 # define __protected_read_by(x) __attribute__((require_context(x,1,999,"read"))) 44 # define __protected_write_by(x) __attribute__((require_context(x,1,999,"write"))) 45 #else 46 # define __protected_by(x) 47 # define __protected_read_by(x) 48 # define __protected_write_by(x) 49 #endif 50 51 /* shared module parameters, defined in drbd_main.c */ 52 #ifdef CONFIG_DRBD_FAULT_INJECTION 53 extern int drbd_enable_faults; 54 extern int drbd_fault_rate; 55 #endif 56 57 extern unsigned int drbd_minor_count; 58 extern char drbd_usermode_helper[]; 59 extern int drbd_proc_details; 60 61 62 /* This is used to stop/restart our threads. 63 * Cannot use SIGTERM nor SIGKILL, since these 64 * are sent out by init on runlevel changes 65 * I choose SIGHUP for now. 66 */ 67 #define DRBD_SIGKILL SIGHUP 68 69 #define ID_IN_SYNC (4711ULL) 70 #define ID_OUT_OF_SYNC (4712ULL) 71 #define ID_SYNCER (-1ULL) 72 73 #define UUID_NEW_BM_OFFSET ((u64)0x0001000000000000ULL) 74 75 struct drbd_device; 76 struct drbd_connection; 77 78 #define __drbd_printk_device(level, device, fmt, args...) \ 79 dev_printk(level, disk_to_dev((device)->vdisk), fmt, ## args) 80 #define __drbd_printk_peer_device(level, peer_device, fmt, args...) \ 81 dev_printk(level, disk_to_dev((peer_device)->device->vdisk), fmt, ## args) 82 #define __drbd_printk_resource(level, resource, fmt, args...) \ 83 printk(level "drbd %s: " fmt, (resource)->name, ## args) 84 #define __drbd_printk_connection(level, connection, fmt, args...) \ 85 printk(level "drbd %s: " fmt, (connection)->resource->name, ## args) 86 87 void drbd_printk_with_wrong_object_type(void); 88 89 #define __drbd_printk_if_same_type(obj, type, func, level, fmt, args...) \ 90 (__builtin_types_compatible_p(typeof(obj), type) || \ 91 __builtin_types_compatible_p(typeof(obj), const type)), \ 92 func(level, (const type)(obj), fmt, ## args) 93 94 #define drbd_printk(level, obj, fmt, args...) \ 95 __builtin_choose_expr( \ 96 __drbd_printk_if_same_type(obj, struct drbd_device *, \ 97 __drbd_printk_device, level, fmt, ## args), \ 98 __builtin_choose_expr( \ 99 __drbd_printk_if_same_type(obj, struct drbd_resource *, \ 100 __drbd_printk_resource, level, fmt, ## args), \ 101 __builtin_choose_expr( \ 102 __drbd_printk_if_same_type(obj, struct drbd_connection *, \ 103 __drbd_printk_connection, level, fmt, ## args), \ 104 __builtin_choose_expr( \ 105 __drbd_printk_if_same_type(obj, struct drbd_peer_device *, \ 106 __drbd_printk_peer_device, level, fmt, ## args), \ 107 drbd_printk_with_wrong_object_type())))) 108 109 #define drbd_dbg(obj, fmt, args...) \ 110 drbd_printk(KERN_DEBUG, obj, fmt, ## args) 111 #define drbd_alert(obj, fmt, args...) \ 112 drbd_printk(KERN_ALERT, obj, fmt, ## args) 113 #define drbd_err(obj, fmt, args...) \ 114 drbd_printk(KERN_ERR, obj, fmt, ## args) 115 #define drbd_warn(obj, fmt, args...) \ 116 drbd_printk(KERN_WARNING, obj, fmt, ## args) 117 #define drbd_info(obj, fmt, args...) \ 118 drbd_printk(KERN_INFO, obj, fmt, ## args) 119 #define drbd_emerg(obj, fmt, args...) \ 120 drbd_printk(KERN_EMERG, obj, fmt, ## args) 121 122 #define dynamic_drbd_dbg(device, fmt, args...) \ 123 dynamic_dev_dbg(disk_to_dev(device->vdisk), fmt, ## args) 124 125 #define D_ASSERT(device, exp) do { \ 126 if (!(exp)) \ 127 drbd_err(device, "ASSERT( " #exp " ) in %s:%d\n", __FILE__, __LINE__); \ 128 } while (0) 129 130 /** 131 * expect - Make an assertion 132 * 133 * Unlike the assert macro, this macro returns a boolean result. 134 */ 135 #define expect(exp) ({ \ 136 bool _bool = (exp); \ 137 if (!_bool) \ 138 drbd_err(device, "ASSERTION %s FAILED in %s\n", \ 139 #exp, __func__); \ 140 _bool; \ 141 }) 142 143 /* Defines to control fault insertion */ 144 enum { 145 DRBD_FAULT_MD_WR = 0, /* meta data write */ 146 DRBD_FAULT_MD_RD = 1, /* read */ 147 DRBD_FAULT_RS_WR = 2, /* resync */ 148 DRBD_FAULT_RS_RD = 3, 149 DRBD_FAULT_DT_WR = 4, /* data */ 150 DRBD_FAULT_DT_RD = 5, 151 DRBD_FAULT_DT_RA = 6, /* data read ahead */ 152 DRBD_FAULT_BM_ALLOC = 7, /* bitmap allocation */ 153 DRBD_FAULT_AL_EE = 8, /* alloc ee */ 154 DRBD_FAULT_RECEIVE = 9, /* Changes some bytes upon receiving a [rs]data block */ 155 156 DRBD_FAULT_MAX, 157 }; 158 159 extern unsigned int 160 _drbd_insert_fault(struct drbd_device *device, unsigned int type); 161 162 static inline int 163 drbd_insert_fault(struct drbd_device *device, unsigned int type) { 164 #ifdef CONFIG_DRBD_FAULT_INJECTION 165 return drbd_fault_rate && 166 (drbd_enable_faults & (1<<type)) && 167 _drbd_insert_fault(device, type); 168 #else 169 return 0; 170 #endif 171 } 172 173 /* integer division, round _UP_ to the next integer */ 174 #define div_ceil(A, B) ((A)/(B) + ((A)%(B) ? 1 : 0)) 175 /* usual integer division */ 176 #define div_floor(A, B) ((A)/(B)) 177 178 extern struct ratelimit_state drbd_ratelimit_state; 179 extern struct idr drbd_devices; /* RCU, updates: genl_lock() */ 180 extern struct list_head drbd_resources; /* RCU, updates: genl_lock() */ 181 182 extern const char *cmdname(enum drbd_packet cmd); 183 184 /* for sending/receiving the bitmap, 185 * possibly in some encoding scheme */ 186 struct bm_xfer_ctx { 187 /* "const" 188 * stores total bits and long words 189 * of the bitmap, so we don't need to 190 * call the accessor functions over and again. */ 191 unsigned long bm_bits; 192 unsigned long bm_words; 193 /* during xfer, current position within the bitmap */ 194 unsigned long bit_offset; 195 unsigned long word_offset; 196 197 /* statistics; index: (h->command == P_BITMAP) */ 198 unsigned packets[2]; 199 unsigned bytes[2]; 200 }; 201 202 extern void INFO_bm_xfer_stats(struct drbd_device *device, 203 const char *direction, struct bm_xfer_ctx *c); 204 205 static inline void bm_xfer_ctx_bit_to_word_offset(struct bm_xfer_ctx *c) 206 { 207 /* word_offset counts "native long words" (32 or 64 bit), 208 * aligned at 64 bit. 209 * Encoded packet may end at an unaligned bit offset. 210 * In case a fallback clear text packet is transmitted in 211 * between, we adjust this offset back to the last 64bit 212 * aligned "native long word", which makes coding and decoding 213 * the plain text bitmap much more convenient. */ 214 #if BITS_PER_LONG == 64 215 c->word_offset = c->bit_offset >> 6; 216 #elif BITS_PER_LONG == 32 217 c->word_offset = c->bit_offset >> 5; 218 c->word_offset &= ~(1UL); 219 #else 220 # error "unsupported BITS_PER_LONG" 221 #endif 222 } 223 224 extern unsigned int drbd_header_size(struct drbd_connection *connection); 225 226 /**********************************************************************/ 227 enum drbd_thread_state { 228 NONE, 229 RUNNING, 230 EXITING, 231 RESTARTING 232 }; 233 234 struct drbd_thread { 235 spinlock_t t_lock; 236 struct task_struct *task; 237 struct completion stop; 238 enum drbd_thread_state t_state; 239 int (*function) (struct drbd_thread *); 240 struct drbd_resource *resource; 241 struct drbd_connection *connection; 242 int reset_cpu_mask; 243 const char *name; 244 }; 245 246 static inline enum drbd_thread_state get_t_state(struct drbd_thread *thi) 247 { 248 /* THINK testing the t_state seems to be uncritical in all cases 249 * (but thread_{start,stop}), so we can read it *without* the lock. 250 * --lge */ 251 252 smp_rmb(); 253 return thi->t_state; 254 } 255 256 struct drbd_work { 257 struct list_head list; 258 int (*cb)(struct drbd_work *, int cancel); 259 }; 260 261 struct drbd_device_work { 262 struct drbd_work w; 263 struct drbd_device *device; 264 }; 265 266 #include "drbd_interval.h" 267 268 extern int drbd_wait_misc(struct drbd_device *, struct drbd_interval *); 269 270 extern void lock_all_resources(void); 271 extern void unlock_all_resources(void); 272 273 struct drbd_request { 274 struct drbd_work w; 275 struct drbd_device *device; 276 277 /* if local IO is not allowed, will be NULL. 278 * if local IO _is_ allowed, holds the locally submitted bio clone, 279 * or, after local IO completion, the ERR_PTR(error). 280 * see drbd_request_endio(). */ 281 struct bio *private_bio; 282 283 struct drbd_interval i; 284 285 /* epoch: used to check on "completion" whether this req was in 286 * the current epoch, and we therefore have to close it, 287 * causing a p_barrier packet to be send, starting a new epoch. 288 * 289 * This corresponds to "barrier" in struct p_barrier[_ack], 290 * and to "barrier_nr" in struct drbd_epoch (and various 291 * comments/function parameters/local variable names). 292 */ 293 unsigned int epoch; 294 295 struct list_head tl_requests; /* ring list in the transfer log */ 296 struct bio *master_bio; /* master bio pointer */ 297 298 /* see struct drbd_device */ 299 struct list_head req_pending_master_completion; 300 struct list_head req_pending_local; 301 302 /* for generic IO accounting */ 303 unsigned long start_jif; 304 305 /* for DRBD internal statistics */ 306 307 /* Minimal set of time stamps to determine if we wait for activity log 308 * transactions, local disk or peer. 32 bit "jiffies" are good enough, 309 * we don't expect a DRBD request to be stalled for several month. 310 */ 311 312 /* before actual request processing */ 313 unsigned long in_actlog_jif; 314 315 /* local disk */ 316 unsigned long pre_submit_jif; 317 318 /* per connection */ 319 unsigned long pre_send_jif; 320 unsigned long acked_jif; 321 unsigned long net_done_jif; 322 323 /* Possibly even more detail to track each phase: 324 * master_completion_jif 325 * how long did it take to complete the master bio 326 * (application visible latency) 327 * allocated_jif 328 * how long the master bio was blocked until we finally allocated 329 * a tracking struct 330 * in_actlog_jif 331 * how long did we wait for activity log transactions 332 * 333 * net_queued_jif 334 * when did we finally queue it for sending 335 * pre_send_jif 336 * when did we start sending it 337 * post_send_jif 338 * how long did we block in the network stack trying to send it 339 * acked_jif 340 * when did we receive (or fake, in protocol A) a remote ACK 341 * net_done_jif 342 * when did we receive final acknowledgement (P_BARRIER_ACK), 343 * or decide, e.g. on connection loss, that we do no longer expect 344 * anything from this peer for this request. 345 * 346 * pre_submit_jif 347 * post_sub_jif 348 * when did we start submiting to the lower level device, 349 * and how long did we block in that submit function 350 * local_completion_jif 351 * how long did it take the lower level device to complete this request 352 */ 353 354 355 /* once it hits 0, we may complete the master_bio */ 356 atomic_t completion_ref; 357 /* once it hits 0, we may destroy this drbd_request object */ 358 struct kref kref; 359 360 unsigned rq_state; /* see comments above _req_mod() */ 361 }; 362 363 struct drbd_epoch { 364 struct drbd_connection *connection; 365 struct list_head list; 366 unsigned int barrier_nr; 367 atomic_t epoch_size; /* increased on every request added. */ 368 atomic_t active; /* increased on every req. added, and dec on every finished. */ 369 unsigned long flags; 370 }; 371 372 /* Prototype declaration of function defined in drbd_receiver.c */ 373 int drbdd_init(struct drbd_thread *); 374 int drbd_asender(struct drbd_thread *); 375 376 /* drbd_epoch flag bits */ 377 enum { 378 DE_HAVE_BARRIER_NUMBER, 379 }; 380 381 enum epoch_event { 382 EV_PUT, 383 EV_GOT_BARRIER_NR, 384 EV_BECAME_LAST, 385 EV_CLEANUP = 32, /* used as flag */ 386 }; 387 388 struct digest_info { 389 int digest_size; 390 void *digest; 391 }; 392 393 struct drbd_peer_request { 394 struct drbd_work w; 395 struct drbd_peer_device *peer_device; 396 struct drbd_epoch *epoch; /* for writes */ 397 struct page *pages; 398 atomic_t pending_bios; 399 struct drbd_interval i; 400 /* see comments on ee flag bits below */ 401 unsigned long flags; 402 unsigned long submit_jif; 403 union { 404 u64 block_id; 405 struct digest_info *digest; 406 }; 407 }; 408 409 /* ee flag bits. 410 * While corresponding bios are in flight, the only modification will be 411 * set_bit WAS_ERROR, which has to be atomic. 412 * If no bios are in flight yet, or all have been completed, 413 * non-atomic modification to ee->flags is ok. 414 */ 415 enum { 416 __EE_CALL_AL_COMPLETE_IO, 417 __EE_MAY_SET_IN_SYNC, 418 419 /* is this a TRIM aka REQ_OP_DISCARD? */ 420 __EE_TRIM, 421 /* explicit zero-out requested, or 422 * our lower level cannot handle trim, 423 * and we want to fall back to zeroout instead */ 424 __EE_ZEROOUT, 425 426 /* In case a barrier failed, 427 * we need to resubmit without the barrier flag. */ 428 __EE_RESUBMITTED, 429 430 /* we may have several bios per peer request. 431 * if any of those fail, we set this flag atomically 432 * from the endio callback */ 433 __EE_WAS_ERROR, 434 435 /* This ee has a pointer to a digest instead of a block id */ 436 __EE_HAS_DIGEST, 437 438 /* Conflicting local requests need to be restarted after this request */ 439 __EE_RESTART_REQUESTS, 440 441 /* The peer wants a write ACK for this (wire proto C) */ 442 __EE_SEND_WRITE_ACK, 443 444 /* Is set when net_conf had two_primaries set while creating this peer_req */ 445 __EE_IN_INTERVAL_TREE, 446 447 /* for debugfs: */ 448 /* has this been submitted, or does it still wait for something else? */ 449 __EE_SUBMITTED, 450 451 /* this is/was a write request */ 452 __EE_WRITE, 453 454 /* this is/was a write same request */ 455 __EE_WRITE_SAME, 456 457 /* this originates from application on peer 458 * (not some resync or verify or other DRBD internal request) */ 459 __EE_APPLICATION, 460 461 /* If it contains only 0 bytes, send back P_RS_DEALLOCATED */ 462 __EE_RS_THIN_REQ, 463 }; 464 #define EE_CALL_AL_COMPLETE_IO (1<<__EE_CALL_AL_COMPLETE_IO) 465 #define EE_MAY_SET_IN_SYNC (1<<__EE_MAY_SET_IN_SYNC) 466 #define EE_TRIM (1<<__EE_TRIM) 467 #define EE_ZEROOUT (1<<__EE_ZEROOUT) 468 #define EE_RESUBMITTED (1<<__EE_RESUBMITTED) 469 #define EE_WAS_ERROR (1<<__EE_WAS_ERROR) 470 #define EE_HAS_DIGEST (1<<__EE_HAS_DIGEST) 471 #define EE_RESTART_REQUESTS (1<<__EE_RESTART_REQUESTS) 472 #define EE_SEND_WRITE_ACK (1<<__EE_SEND_WRITE_ACK) 473 #define EE_IN_INTERVAL_TREE (1<<__EE_IN_INTERVAL_TREE) 474 #define EE_SUBMITTED (1<<__EE_SUBMITTED) 475 #define EE_WRITE (1<<__EE_WRITE) 476 #define EE_WRITE_SAME (1<<__EE_WRITE_SAME) 477 #define EE_APPLICATION (1<<__EE_APPLICATION) 478 #define EE_RS_THIN_REQ (1<<__EE_RS_THIN_REQ) 479 480 /* flag bits per device */ 481 enum { 482 UNPLUG_REMOTE, /* sending a "UnplugRemote" could help */ 483 MD_DIRTY, /* current uuids and flags not yet on disk */ 484 USE_DEGR_WFC_T, /* degr-wfc-timeout instead of wfc-timeout. */ 485 CL_ST_CHG_SUCCESS, 486 CL_ST_CHG_FAIL, 487 CRASHED_PRIMARY, /* This node was a crashed primary. 488 * Gets cleared when the state.conn 489 * goes into C_CONNECTED state. */ 490 CONSIDER_RESYNC, 491 492 MD_NO_FUA, /* Users wants us to not use FUA/FLUSH on meta data dev */ 493 494 BITMAP_IO, /* suspend application io; 495 once no more io in flight, start bitmap io */ 496 BITMAP_IO_QUEUED, /* Started bitmap IO */ 497 WAS_IO_ERROR, /* Local disk failed, returned IO error */ 498 WAS_READ_ERROR, /* Local disk READ failed (set additionally to the above) */ 499 FORCE_DETACH, /* Force-detach from local disk, aborting any pending local IO */ 500 RESYNC_AFTER_NEG, /* Resync after online grow after the attach&negotiate finished. */ 501 RESIZE_PENDING, /* Size change detected locally, waiting for the response from 502 * the peer, if it changed there as well. */ 503 NEW_CUR_UUID, /* Create new current UUID when thawing IO */ 504 AL_SUSPENDED, /* Activity logging is currently suspended. */ 505 AHEAD_TO_SYNC_SOURCE, /* Ahead -> SyncSource queued */ 506 B_RS_H_DONE, /* Before resync handler done (already executed) */ 507 DISCARD_MY_DATA, /* discard_my_data flag per volume */ 508 READ_BALANCE_RR, 509 510 FLUSH_PENDING, /* if set, device->flush_jif is when we submitted that flush 511 * from drbd_flush_after_epoch() */ 512 513 /* cleared only after backing device related structures have been destroyed. */ 514 GOING_DISKLESS, /* Disk is being detached, because of io-error, or admin request. */ 515 516 /* to be used in drbd_device_post_work() */ 517 GO_DISKLESS, /* tell worker to schedule cleanup before detach */ 518 DESTROY_DISK, /* tell worker to close backing devices and destroy related structures. */ 519 MD_SYNC, /* tell worker to call drbd_md_sync() */ 520 RS_START, /* tell worker to start resync/OV */ 521 RS_PROGRESS, /* tell worker that resync made significant progress */ 522 RS_DONE, /* tell worker that resync is done */ 523 }; 524 525 struct drbd_bitmap; /* opaque for drbd_device */ 526 527 /* definition of bits in bm_flags to be used in drbd_bm_lock 528 * and drbd_bitmap_io and friends. */ 529 enum bm_flag { 530 /* currently locked for bulk operation */ 531 BM_LOCKED_MASK = 0xf, 532 533 /* in detail, that is: */ 534 BM_DONT_CLEAR = 0x1, 535 BM_DONT_SET = 0x2, 536 BM_DONT_TEST = 0x4, 537 538 /* so we can mark it locked for bulk operation, 539 * and still allow all non-bulk operations */ 540 BM_IS_LOCKED = 0x8, 541 542 /* (test bit, count bit) allowed (common case) */ 543 BM_LOCKED_TEST_ALLOWED = BM_DONT_CLEAR | BM_DONT_SET | BM_IS_LOCKED, 544 545 /* testing bits, as well as setting new bits allowed, but clearing bits 546 * would be unexpected. Used during bitmap receive. Setting new bits 547 * requires sending of "out-of-sync" information, though. */ 548 BM_LOCKED_SET_ALLOWED = BM_DONT_CLEAR | BM_IS_LOCKED, 549 550 /* for drbd_bm_write_copy_pages, everything is allowed, 551 * only concurrent bulk operations are locked out. */ 552 BM_LOCKED_CHANGE_ALLOWED = BM_IS_LOCKED, 553 }; 554 555 struct drbd_work_queue { 556 struct list_head q; 557 spinlock_t q_lock; /* to protect the list. */ 558 wait_queue_head_t q_wait; 559 }; 560 561 struct drbd_socket { 562 struct mutex mutex; 563 struct socket *socket; 564 /* this way we get our 565 * send/receive buffers off the stack */ 566 void *sbuf; 567 void *rbuf; 568 }; 569 570 struct drbd_md { 571 u64 md_offset; /* sector offset to 'super' block */ 572 573 u64 la_size_sect; /* last agreed size, unit sectors */ 574 spinlock_t uuid_lock; 575 u64 uuid[UI_SIZE]; 576 u64 device_uuid; 577 u32 flags; 578 u32 md_size_sect; 579 580 s32 al_offset; /* signed relative sector offset to activity log */ 581 s32 bm_offset; /* signed relative sector offset to bitmap */ 582 583 /* cached value of bdev->disk_conf->meta_dev_idx (see below) */ 584 s32 meta_dev_idx; 585 586 /* see al_tr_number_to_on_disk_sector() */ 587 u32 al_stripes; 588 u32 al_stripe_size_4k; 589 u32 al_size_4k; /* cached product of the above */ 590 }; 591 592 struct drbd_backing_dev { 593 struct block_device *backing_bdev; 594 struct block_device *md_bdev; 595 struct drbd_md md; 596 struct disk_conf *disk_conf; /* RCU, for updates: resource->conf_update */ 597 sector_t known_size; /* last known size of that backing device */ 598 }; 599 600 struct drbd_md_io { 601 struct page *page; 602 unsigned long start_jif; /* last call to drbd_md_get_buffer */ 603 unsigned long submit_jif; /* last _drbd_md_sync_page_io() submit */ 604 const char *current_use; 605 atomic_t in_use; 606 unsigned int done; 607 int error; 608 }; 609 610 struct bm_io_work { 611 struct drbd_work w; 612 char *why; 613 enum bm_flag flags; 614 int (*io_fn)(struct drbd_device *device); 615 void (*done)(struct drbd_device *device, int rv); 616 }; 617 618 struct fifo_buffer { 619 unsigned int head_index; 620 unsigned int size; 621 int total; /* sum of all values */ 622 int values[]; 623 }; 624 extern struct fifo_buffer *fifo_alloc(unsigned int fifo_size); 625 626 /* flag bits per connection */ 627 enum { 628 NET_CONGESTED, /* The data socket is congested */ 629 RESOLVE_CONFLICTS, /* Set on one node, cleared on the peer! */ 630 SEND_PING, 631 GOT_PING_ACK, /* set when we receive a ping_ack packet, ping_wait gets woken */ 632 CONN_WD_ST_CHG_REQ, /* A cluster wide state change on the connection is active */ 633 CONN_WD_ST_CHG_OKAY, 634 CONN_WD_ST_CHG_FAIL, 635 CONN_DRY_RUN, /* Expect disconnect after resync handshake. */ 636 CREATE_BARRIER, /* next P_DATA is preceded by a P_BARRIER */ 637 STATE_SENT, /* Do not change state/UUIDs while this is set */ 638 CALLBACK_PENDING, /* Whether we have a call_usermodehelper(, UMH_WAIT_PROC) 639 * pending, from drbd worker context. 640 * If set, bdi_write_congested() returns true, 641 * so shrink_page_list() would not recurse into, 642 * and potentially deadlock on, this drbd worker. 643 */ 644 DISCONNECT_SENT, 645 646 DEVICE_WORK_PENDING, /* tell worker that some device has pending work */ 647 }; 648 649 enum which_state { NOW, OLD = NOW, NEW }; 650 651 struct drbd_resource { 652 char *name; 653 #ifdef CONFIG_DEBUG_FS 654 struct dentry *debugfs_res; 655 struct dentry *debugfs_res_volumes; 656 struct dentry *debugfs_res_connections; 657 struct dentry *debugfs_res_in_flight_summary; 658 #endif 659 struct kref kref; 660 struct idr devices; /* volume number to device mapping */ 661 struct list_head connections; 662 struct list_head resources; 663 struct res_opts res_opts; 664 struct mutex conf_update; /* mutex for ready-copy-update of net_conf and disk_conf */ 665 struct mutex adm_mutex; /* mutex to serialize administrative requests */ 666 spinlock_t req_lock; 667 668 unsigned susp:1; /* IO suspended by user */ 669 unsigned susp_nod:1; /* IO suspended because no data */ 670 unsigned susp_fen:1; /* IO suspended because fence peer handler runs */ 671 672 enum write_ordering_e write_ordering; 673 674 cpumask_var_t cpu_mask; 675 }; 676 677 struct drbd_thread_timing_details 678 { 679 unsigned long start_jif; 680 void *cb_addr; 681 const char *caller_fn; 682 unsigned int line; 683 unsigned int cb_nr; 684 }; 685 686 struct drbd_connection { 687 struct list_head connections; 688 struct drbd_resource *resource; 689 #ifdef CONFIG_DEBUG_FS 690 struct dentry *debugfs_conn; 691 struct dentry *debugfs_conn_callback_history; 692 struct dentry *debugfs_conn_oldest_requests; 693 #endif 694 struct kref kref; 695 struct idr peer_devices; /* volume number to peer device mapping */ 696 enum drbd_conns cstate; /* Only C_STANDALONE to C_WF_REPORT_PARAMS */ 697 struct mutex cstate_mutex; /* Protects graceful disconnects */ 698 unsigned int connect_cnt; /* Inc each time a connection is established */ 699 700 unsigned long flags; 701 struct net_conf *net_conf; /* content protected by rcu */ 702 wait_queue_head_t ping_wait; /* Woken upon reception of a ping, and a state change */ 703 704 struct sockaddr_storage my_addr; 705 int my_addr_len; 706 struct sockaddr_storage peer_addr; 707 int peer_addr_len; 708 709 struct drbd_socket data; /* data/barrier/cstate/parameter packets */ 710 struct drbd_socket meta; /* ping/ack (metadata) packets */ 711 int agreed_pro_version; /* actually used protocol version */ 712 u32 agreed_features; 713 unsigned long last_received; /* in jiffies, either socket */ 714 unsigned int ko_count; 715 716 struct list_head transfer_log; /* all requests not yet fully processed */ 717 718 struct crypto_shash *cram_hmac_tfm; 719 struct crypto_shash *integrity_tfm; /* checksums we compute, updates protected by connection->data->mutex */ 720 struct crypto_shash *peer_integrity_tfm; /* checksums we verify, only accessed from receiver thread */ 721 struct crypto_shash *csums_tfm; 722 struct crypto_shash *verify_tfm; 723 void *int_dig_in; 724 void *int_dig_vv; 725 726 /* receiver side */ 727 struct drbd_epoch *current_epoch; 728 spinlock_t epoch_lock; 729 unsigned int epochs; 730 atomic_t current_tle_nr; /* transfer log epoch number */ 731 unsigned current_tle_writes; /* writes seen within this tl epoch */ 732 733 unsigned long last_reconnect_jif; 734 /* empty member on older kernels without blk_start_plug() */ 735 struct blk_plug receiver_plug; 736 struct drbd_thread receiver; 737 struct drbd_thread worker; 738 struct drbd_thread ack_receiver; 739 struct workqueue_struct *ack_sender; 740 741 /* cached pointers, 742 * so we can look up the oldest pending requests more quickly. 743 * protected by resource->req_lock */ 744 struct drbd_request *req_next; /* DRBD 9: todo.req_next */ 745 struct drbd_request *req_ack_pending; 746 struct drbd_request *req_not_net_done; 747 748 /* sender side */ 749 struct drbd_work_queue sender_work; 750 751 #define DRBD_THREAD_DETAILS_HIST 16 752 unsigned int w_cb_nr; /* keeps counting up */ 753 unsigned int r_cb_nr; /* keeps counting up */ 754 struct drbd_thread_timing_details w_timing_details[DRBD_THREAD_DETAILS_HIST]; 755 struct drbd_thread_timing_details r_timing_details[DRBD_THREAD_DETAILS_HIST]; 756 757 struct { 758 unsigned long last_sent_barrier_jif; 759 760 /* whether this sender thread 761 * has processed a single write yet. */ 762 bool seen_any_write_yet; 763 764 /* Which barrier number to send with the next P_BARRIER */ 765 int current_epoch_nr; 766 767 /* how many write requests have been sent 768 * with req->epoch == current_epoch_nr. 769 * If none, no P_BARRIER will be sent. */ 770 unsigned current_epoch_writes; 771 } send; 772 }; 773 774 static inline bool has_net_conf(struct drbd_connection *connection) 775 { 776 bool has_net_conf; 777 778 rcu_read_lock(); 779 has_net_conf = rcu_dereference(connection->net_conf); 780 rcu_read_unlock(); 781 782 return has_net_conf; 783 } 784 785 void __update_timing_details( 786 struct drbd_thread_timing_details *tdp, 787 unsigned int *cb_nr, 788 void *cb, 789 const char *fn, const unsigned int line); 790 791 #define update_worker_timing_details(c, cb) \ 792 __update_timing_details(c->w_timing_details, &c->w_cb_nr, cb, __func__ , __LINE__ ) 793 #define update_receiver_timing_details(c, cb) \ 794 __update_timing_details(c->r_timing_details, &c->r_cb_nr, cb, __func__ , __LINE__ ) 795 796 struct submit_worker { 797 struct workqueue_struct *wq; 798 struct work_struct worker; 799 800 /* protected by ..->resource->req_lock */ 801 struct list_head writes; 802 }; 803 804 struct drbd_peer_device { 805 struct list_head peer_devices; 806 struct drbd_device *device; 807 struct drbd_connection *connection; 808 struct work_struct send_acks_work; 809 #ifdef CONFIG_DEBUG_FS 810 struct dentry *debugfs_peer_dev; 811 #endif 812 }; 813 814 struct drbd_device { 815 struct drbd_resource *resource; 816 struct list_head peer_devices; 817 struct list_head pending_bitmap_io; 818 819 unsigned long flush_jif; 820 #ifdef CONFIG_DEBUG_FS 821 struct dentry *debugfs_minor; 822 struct dentry *debugfs_vol; 823 struct dentry *debugfs_vol_oldest_requests; 824 struct dentry *debugfs_vol_act_log_extents; 825 struct dentry *debugfs_vol_resync_extents; 826 struct dentry *debugfs_vol_data_gen_id; 827 struct dentry *debugfs_vol_ed_gen_id; 828 #endif 829 830 unsigned int vnr; /* volume number within the connection */ 831 unsigned int minor; /* device minor number */ 832 833 struct kref kref; 834 835 /* things that are stored as / read from meta data on disk */ 836 unsigned long flags; 837 838 /* configured by drbdsetup */ 839 struct drbd_backing_dev *ldev __protected_by(local); 840 841 sector_t p_size; /* partner's disk size */ 842 struct request_queue *rq_queue; 843 struct gendisk *vdisk; 844 845 unsigned long last_reattach_jif; 846 struct drbd_work resync_work; 847 struct drbd_work unplug_work; 848 struct timer_list resync_timer; 849 struct timer_list md_sync_timer; 850 struct timer_list start_resync_timer; 851 struct timer_list request_timer; 852 853 /* Used after attach while negotiating new disk state. */ 854 union drbd_state new_state_tmp; 855 856 union drbd_dev_state state; 857 wait_queue_head_t misc_wait; 858 wait_queue_head_t state_wait; /* upon each state change. */ 859 unsigned int send_cnt; 860 unsigned int recv_cnt; 861 unsigned int read_cnt; 862 unsigned int writ_cnt; 863 unsigned int al_writ_cnt; 864 unsigned int bm_writ_cnt; 865 atomic_t ap_bio_cnt; /* Requests we need to complete */ 866 atomic_t ap_actlog_cnt; /* Requests waiting for activity log */ 867 atomic_t ap_pending_cnt; /* AP data packets on the wire, ack expected */ 868 atomic_t rs_pending_cnt; /* RS request/data packets on the wire */ 869 atomic_t unacked_cnt; /* Need to send replies for */ 870 atomic_t local_cnt; /* Waiting for local completion */ 871 atomic_t suspend_cnt; 872 873 /* Interval tree of pending local requests */ 874 struct rb_root read_requests; 875 struct rb_root write_requests; 876 877 /* for statistics and timeouts */ 878 /* [0] read, [1] write */ 879 struct list_head pending_master_completion[2]; 880 struct list_head pending_completion[2]; 881 882 /* use checksums for *this* resync */ 883 bool use_csums; 884 /* blocks to resync in this run [unit BM_BLOCK_SIZE] */ 885 unsigned long rs_total; 886 /* number of resync blocks that failed in this run */ 887 unsigned long rs_failed; 888 /* Syncer's start time [unit jiffies] */ 889 unsigned long rs_start; 890 /* cumulated time in PausedSyncX state [unit jiffies] */ 891 unsigned long rs_paused; 892 /* skipped because csum was equal [unit BM_BLOCK_SIZE] */ 893 unsigned long rs_same_csum; 894 #define DRBD_SYNC_MARKS 8 895 #define DRBD_SYNC_MARK_STEP (3*HZ) 896 /* block not up-to-date at mark [unit BM_BLOCK_SIZE] */ 897 unsigned long rs_mark_left[DRBD_SYNC_MARKS]; 898 /* marks's time [unit jiffies] */ 899 unsigned long rs_mark_time[DRBD_SYNC_MARKS]; 900 /* current index into rs_mark_{left,time} */ 901 int rs_last_mark; 902 unsigned long rs_last_bcast; /* [unit jiffies] */ 903 904 /* where does the admin want us to start? (sector) */ 905 sector_t ov_start_sector; 906 sector_t ov_stop_sector; 907 /* where are we now? (sector) */ 908 sector_t ov_position; 909 /* Start sector of out of sync range (to merge printk reporting). */ 910 sector_t ov_last_oos_start; 911 /* size of out-of-sync range in sectors. */ 912 sector_t ov_last_oos_size; 913 unsigned long ov_left; /* in bits */ 914 915 struct drbd_bitmap *bitmap; 916 unsigned long bm_resync_fo; /* bit offset for drbd_bm_find_next */ 917 918 /* Used to track operations of resync... */ 919 struct lru_cache *resync; 920 /* Number of locked elements in resync LRU */ 921 unsigned int resync_locked; 922 /* resync extent number waiting for application requests */ 923 unsigned int resync_wenr; 924 925 int open_cnt; 926 u64 *p_uuid; 927 928 struct list_head active_ee; /* IO in progress (P_DATA gets written to disk) */ 929 struct list_head sync_ee; /* IO in progress (P_RS_DATA_REPLY gets written to disk) */ 930 struct list_head done_ee; /* need to send P_WRITE_ACK */ 931 struct list_head read_ee; /* [RS]P_DATA_REQUEST being read */ 932 struct list_head net_ee; /* zero-copy network send in progress */ 933 934 int next_barrier_nr; 935 struct list_head resync_reads; 936 atomic_t pp_in_use; /* allocated from page pool */ 937 atomic_t pp_in_use_by_net; /* sendpage()d, still referenced by tcp */ 938 wait_queue_head_t ee_wait; 939 struct drbd_md_io md_io; 940 spinlock_t al_lock; 941 wait_queue_head_t al_wait; 942 struct lru_cache *act_log; /* activity log */ 943 unsigned int al_tr_number; 944 int al_tr_cycle; 945 wait_queue_head_t seq_wait; 946 atomic_t packet_seq; 947 unsigned int peer_seq; 948 spinlock_t peer_seq_lock; 949 unsigned long comm_bm_set; /* communicated number of set bits. */ 950 struct bm_io_work bm_io_work; 951 u64 ed_uuid; /* UUID of the exposed data */ 952 struct mutex own_state_mutex; 953 struct mutex *state_mutex; /* either own_state_mutex or first_peer_device(device)->connection->cstate_mutex */ 954 char congestion_reason; /* Why we where congested... */ 955 atomic_t rs_sect_in; /* for incoming resync data rate, SyncTarget */ 956 atomic_t rs_sect_ev; /* for submitted resync data rate, both */ 957 int rs_last_sect_ev; /* counter to compare with */ 958 int rs_last_events; /* counter of read or write "events" (unit sectors) 959 * on the lower level device when we last looked. */ 960 int c_sync_rate; /* current resync rate after syncer throttle magic */ 961 struct fifo_buffer *rs_plan_s; /* correction values of resync planer (RCU, connection->conn_update) */ 962 int rs_in_flight; /* resync sectors in flight (to proxy, in proxy and from proxy) */ 963 atomic_t ap_in_flight; /* App sectors in flight (waiting for ack) */ 964 unsigned int peer_max_bio_size; 965 unsigned int local_max_bio_size; 966 967 /* any requests that would block in drbd_make_request() 968 * are deferred to this single-threaded work queue */ 969 struct submit_worker submit; 970 }; 971 972 struct drbd_bm_aio_ctx { 973 struct drbd_device *device; 974 struct list_head list; /* on device->pending_bitmap_io */; 975 unsigned long start_jif; 976 atomic_t in_flight; 977 unsigned int done; 978 unsigned flags; 979 #define BM_AIO_COPY_PAGES 1 980 #define BM_AIO_WRITE_HINTED 2 981 #define BM_AIO_WRITE_ALL_PAGES 4 982 #define BM_AIO_READ 8 983 int error; 984 struct kref kref; 985 }; 986 987 struct drbd_config_context { 988 /* assigned from drbd_genlmsghdr */ 989 unsigned int minor; 990 /* assigned from request attributes, if present */ 991 unsigned int volume; 992 #define VOLUME_UNSPECIFIED (-1U) 993 /* pointer into the request skb, 994 * limited lifetime! */ 995 char *resource_name; 996 struct nlattr *my_addr; 997 struct nlattr *peer_addr; 998 999 /* reply buffer */ 1000 struct sk_buff *reply_skb; 1001 /* pointer into reply buffer */ 1002 struct drbd_genlmsghdr *reply_dh; 1003 /* resolved from attributes, if possible */ 1004 struct drbd_device *device; 1005 struct drbd_resource *resource; 1006 struct drbd_connection *connection; 1007 }; 1008 1009 static inline struct drbd_device *minor_to_device(unsigned int minor) 1010 { 1011 return (struct drbd_device *)idr_find(&drbd_devices, minor); 1012 } 1013 1014 static inline struct drbd_peer_device *first_peer_device(struct drbd_device *device) 1015 { 1016 return list_first_entry_or_null(&device->peer_devices, struct drbd_peer_device, peer_devices); 1017 } 1018 1019 static inline struct drbd_peer_device * 1020 conn_peer_device(struct drbd_connection *connection, int volume_number) 1021 { 1022 return idr_find(&connection->peer_devices, volume_number); 1023 } 1024 1025 #define for_each_resource(resource, _resources) \ 1026 list_for_each_entry(resource, _resources, resources) 1027 1028 #define for_each_resource_rcu(resource, _resources) \ 1029 list_for_each_entry_rcu(resource, _resources, resources) 1030 1031 #define for_each_resource_safe(resource, tmp, _resources) \ 1032 list_for_each_entry_safe(resource, tmp, _resources, resources) 1033 1034 #define for_each_connection(connection, resource) \ 1035 list_for_each_entry(connection, &resource->connections, connections) 1036 1037 #define for_each_connection_rcu(connection, resource) \ 1038 list_for_each_entry_rcu(connection, &resource->connections, connections) 1039 1040 #define for_each_connection_safe(connection, tmp, resource) \ 1041 list_for_each_entry_safe(connection, tmp, &resource->connections, connections) 1042 1043 #define for_each_peer_device(peer_device, device) \ 1044 list_for_each_entry(peer_device, &device->peer_devices, peer_devices) 1045 1046 #define for_each_peer_device_rcu(peer_device, device) \ 1047 list_for_each_entry_rcu(peer_device, &device->peer_devices, peer_devices) 1048 1049 #define for_each_peer_device_safe(peer_device, tmp, device) \ 1050 list_for_each_entry_safe(peer_device, tmp, &device->peer_devices, peer_devices) 1051 1052 static inline unsigned int device_to_minor(struct drbd_device *device) 1053 { 1054 return device->minor; 1055 } 1056 1057 /* 1058 * function declarations 1059 *************************/ 1060 1061 /* drbd_main.c */ 1062 1063 enum dds_flags { 1064 DDSF_FORCED = 1, 1065 DDSF_NO_RESYNC = 2, /* Do not run a resync for the new space */ 1066 }; 1067 1068 extern void drbd_init_set_defaults(struct drbd_device *device); 1069 extern int drbd_thread_start(struct drbd_thread *thi); 1070 extern void _drbd_thread_stop(struct drbd_thread *thi, int restart, int wait); 1071 #ifdef CONFIG_SMP 1072 extern void drbd_thread_current_set_cpu(struct drbd_thread *thi); 1073 #else 1074 #define drbd_thread_current_set_cpu(A) ({}) 1075 #endif 1076 extern void tl_release(struct drbd_connection *, unsigned int barrier_nr, 1077 unsigned int set_size); 1078 extern void tl_clear(struct drbd_connection *); 1079 extern void drbd_free_sock(struct drbd_connection *connection); 1080 extern int drbd_send(struct drbd_connection *connection, struct socket *sock, 1081 void *buf, size_t size, unsigned msg_flags); 1082 extern int drbd_send_all(struct drbd_connection *, struct socket *, void *, size_t, 1083 unsigned); 1084 1085 extern int __drbd_send_protocol(struct drbd_connection *connection, enum drbd_packet cmd); 1086 extern int drbd_send_protocol(struct drbd_connection *connection); 1087 extern int drbd_send_uuids(struct drbd_peer_device *); 1088 extern int drbd_send_uuids_skip_initial_sync(struct drbd_peer_device *); 1089 extern void drbd_gen_and_send_sync_uuid(struct drbd_peer_device *); 1090 extern int drbd_send_sizes(struct drbd_peer_device *, int trigger_reply, enum dds_flags flags); 1091 extern int drbd_send_state(struct drbd_peer_device *, union drbd_state s); 1092 extern int drbd_send_current_state(struct drbd_peer_device *); 1093 extern int drbd_send_sync_param(struct drbd_peer_device *); 1094 extern void drbd_send_b_ack(struct drbd_connection *connection, u32 barrier_nr, 1095 u32 set_size); 1096 extern int drbd_send_ack(struct drbd_peer_device *, enum drbd_packet, 1097 struct drbd_peer_request *); 1098 extern void drbd_send_ack_rp(struct drbd_peer_device *, enum drbd_packet, 1099 struct p_block_req *rp); 1100 extern void drbd_send_ack_dp(struct drbd_peer_device *, enum drbd_packet, 1101 struct p_data *dp, int data_size); 1102 extern int drbd_send_ack_ex(struct drbd_peer_device *, enum drbd_packet, 1103 sector_t sector, int blksize, u64 block_id); 1104 extern int drbd_send_out_of_sync(struct drbd_peer_device *, struct drbd_request *); 1105 extern int drbd_send_block(struct drbd_peer_device *, enum drbd_packet, 1106 struct drbd_peer_request *); 1107 extern int drbd_send_dblock(struct drbd_peer_device *, struct drbd_request *req); 1108 extern int drbd_send_drequest(struct drbd_peer_device *, int cmd, 1109 sector_t sector, int size, u64 block_id); 1110 extern int drbd_send_drequest_csum(struct drbd_peer_device *, sector_t sector, 1111 int size, void *digest, int digest_size, 1112 enum drbd_packet cmd); 1113 extern int drbd_send_ov_request(struct drbd_peer_device *, sector_t sector, int size); 1114 1115 extern int drbd_send_bitmap(struct drbd_device *device); 1116 extern void drbd_send_sr_reply(struct drbd_peer_device *, enum drbd_state_rv retcode); 1117 extern void conn_send_sr_reply(struct drbd_connection *connection, enum drbd_state_rv retcode); 1118 extern int drbd_send_rs_deallocated(struct drbd_peer_device *, struct drbd_peer_request *); 1119 extern void drbd_backing_dev_free(struct drbd_device *device, struct drbd_backing_dev *ldev); 1120 extern void drbd_device_cleanup(struct drbd_device *device); 1121 extern void drbd_print_uuids(struct drbd_device *device, const char *text); 1122 extern void drbd_queue_unplug(struct drbd_device *device); 1123 1124 extern void conn_md_sync(struct drbd_connection *connection); 1125 extern void drbd_md_write(struct drbd_device *device, void *buffer); 1126 extern void drbd_md_sync(struct drbd_device *device); 1127 extern int drbd_md_read(struct drbd_device *device, struct drbd_backing_dev *bdev); 1128 extern void drbd_uuid_set(struct drbd_device *device, int idx, u64 val) __must_hold(local); 1129 extern void _drbd_uuid_set(struct drbd_device *device, int idx, u64 val) __must_hold(local); 1130 extern void drbd_uuid_new_current(struct drbd_device *device) __must_hold(local); 1131 extern void drbd_uuid_set_bm(struct drbd_device *device, u64 val) __must_hold(local); 1132 extern void drbd_uuid_move_history(struct drbd_device *device) __must_hold(local); 1133 extern void __drbd_uuid_set(struct drbd_device *device, int idx, u64 val) __must_hold(local); 1134 extern void drbd_md_set_flag(struct drbd_device *device, int flags) __must_hold(local); 1135 extern void drbd_md_clear_flag(struct drbd_device *device, int flags)__must_hold(local); 1136 extern int drbd_md_test_flag(struct drbd_backing_dev *, int); 1137 extern void drbd_md_mark_dirty(struct drbd_device *device); 1138 extern void drbd_queue_bitmap_io(struct drbd_device *device, 1139 int (*io_fn)(struct drbd_device *), 1140 void (*done)(struct drbd_device *, int), 1141 char *why, enum bm_flag flags); 1142 extern int drbd_bitmap_io(struct drbd_device *device, 1143 int (*io_fn)(struct drbd_device *), 1144 char *why, enum bm_flag flags); 1145 extern int drbd_bitmap_io_from_worker(struct drbd_device *device, 1146 int (*io_fn)(struct drbd_device *), 1147 char *why, enum bm_flag flags); 1148 extern int drbd_bmio_set_n_write(struct drbd_device *device) __must_hold(local); 1149 extern int drbd_bmio_clear_n_write(struct drbd_device *device) __must_hold(local); 1150 1151 /* Meta data layout 1152 * 1153 * We currently have two possible layouts. 1154 * Offsets in (512 byte) sectors. 1155 * external: 1156 * |----------- md_size_sect ------------------| 1157 * [ 4k superblock ][ activity log ][ Bitmap ] 1158 * | al_offset == 8 | 1159 * | bm_offset = al_offset + X | 1160 * ==> bitmap sectors = md_size_sect - bm_offset 1161 * 1162 * Variants: 1163 * old, indexed fixed size meta data: 1164 * 1165 * internal: 1166 * |----------- md_size_sect ------------------| 1167 * [data.....][ Bitmap ][ activity log ][ 4k superblock ][padding*] 1168 * | al_offset < 0 | 1169 * | bm_offset = al_offset - Y | 1170 * ==> bitmap sectors = Y = al_offset - bm_offset 1171 * 1172 * [padding*] are zero or up to 7 unused 512 Byte sectors to the 1173 * end of the device, so that the [4k superblock] will be 4k aligned. 1174 * 1175 * The activity log consists of 4k transaction blocks, 1176 * which are written in a ring-buffer, or striped ring-buffer like fashion, 1177 * which are writtensize used to be fixed 32kB, 1178 * but is about to become configurable. 1179 */ 1180 1181 /* Our old fixed size meta data layout 1182 * allows up to about 3.8TB, so if you want more, 1183 * you need to use the "flexible" meta data format. */ 1184 #define MD_128MB_SECT (128LLU << 11) /* 128 MB, unit sectors */ 1185 #define MD_4kB_SECT 8 1186 #define MD_32kB_SECT 64 1187 1188 /* One activity log extent represents 4M of storage */ 1189 #define AL_EXTENT_SHIFT 22 1190 #define AL_EXTENT_SIZE (1<<AL_EXTENT_SHIFT) 1191 1192 /* We could make these currently hardcoded constants configurable 1193 * variables at create-md time (or even re-configurable at runtime?). 1194 * Which will require some more changes to the DRBD "super block" 1195 * and attach code. 1196 * 1197 * updates per transaction: 1198 * This many changes to the active set can be logged with one transaction. 1199 * This number is arbitrary. 1200 * context per transaction: 1201 * This many context extent numbers are logged with each transaction. 1202 * This number is resulting from the transaction block size (4k), the layout 1203 * of the transaction header, and the number of updates per transaction. 1204 * See drbd_actlog.c:struct al_transaction_on_disk 1205 * */ 1206 #define AL_UPDATES_PER_TRANSACTION 64 // arbitrary 1207 #define AL_CONTEXT_PER_TRANSACTION 919 // (4096 - 36 - 6*64)/4 1208 1209 #if BITS_PER_LONG == 32 1210 #define LN2_BPL 5 1211 #define cpu_to_lel(A) cpu_to_le32(A) 1212 #define lel_to_cpu(A) le32_to_cpu(A) 1213 #elif BITS_PER_LONG == 64 1214 #define LN2_BPL 6 1215 #define cpu_to_lel(A) cpu_to_le64(A) 1216 #define lel_to_cpu(A) le64_to_cpu(A) 1217 #else 1218 #error "LN2 of BITS_PER_LONG unknown!" 1219 #endif 1220 1221 /* resync bitmap */ 1222 /* 16MB sized 'bitmap extent' to track syncer usage */ 1223 struct bm_extent { 1224 int rs_left; /* number of bits set (out of sync) in this extent. */ 1225 int rs_failed; /* number of failed resync requests in this extent. */ 1226 unsigned long flags; 1227 struct lc_element lce; 1228 }; 1229 1230 #define BME_NO_WRITES 0 /* bm_extent.flags: no more requests on this one! */ 1231 #define BME_LOCKED 1 /* bm_extent.flags: syncer active on this one. */ 1232 #define BME_PRIORITY 2 /* finish resync IO on this extent ASAP! App IO waiting! */ 1233 1234 /* drbd_bitmap.c */ 1235 /* 1236 * We need to store one bit for a block. 1237 * Example: 1GB disk @ 4096 byte blocks ==> we need 32 KB bitmap. 1238 * Bit 0 ==> local node thinks this block is binary identical on both nodes 1239 * Bit 1 ==> local node thinks this block needs to be synced. 1240 */ 1241 1242 #define SLEEP_TIME (HZ/10) 1243 1244 /* We do bitmap IO in units of 4k blocks. 1245 * We also still have a hardcoded 4k per bit relation. */ 1246 #define BM_BLOCK_SHIFT 12 /* 4k per bit */ 1247 #define BM_BLOCK_SIZE (1<<BM_BLOCK_SHIFT) 1248 /* mostly arbitrarily set the represented size of one bitmap extent, 1249 * aka resync extent, to 16 MiB (which is also 512 Byte worth of bitmap 1250 * at 4k per bit resolution) */ 1251 #define BM_EXT_SHIFT 24 /* 16 MiB per resync extent */ 1252 #define BM_EXT_SIZE (1<<BM_EXT_SHIFT) 1253 1254 #if (BM_EXT_SHIFT != 24) || (BM_BLOCK_SHIFT != 12) 1255 #error "HAVE YOU FIXED drbdmeta AS WELL??" 1256 #endif 1257 1258 /* thus many _storage_ sectors are described by one bit */ 1259 #define BM_SECT_TO_BIT(x) ((x)>>(BM_BLOCK_SHIFT-9)) 1260 #define BM_BIT_TO_SECT(x) ((sector_t)(x)<<(BM_BLOCK_SHIFT-9)) 1261 #define BM_SECT_PER_BIT BM_BIT_TO_SECT(1) 1262 1263 /* bit to represented kilo byte conversion */ 1264 #define Bit2KB(bits) ((bits)<<(BM_BLOCK_SHIFT-10)) 1265 1266 /* in which _bitmap_ extent (resp. sector) the bit for a certain 1267 * _storage_ sector is located in */ 1268 #define BM_SECT_TO_EXT(x) ((x)>>(BM_EXT_SHIFT-9)) 1269 #define BM_BIT_TO_EXT(x) ((x) >> (BM_EXT_SHIFT - BM_BLOCK_SHIFT)) 1270 1271 /* first storage sector a bitmap extent corresponds to */ 1272 #define BM_EXT_TO_SECT(x) ((sector_t)(x) << (BM_EXT_SHIFT-9)) 1273 /* how much _storage_ sectors we have per bitmap extent */ 1274 #define BM_SECT_PER_EXT BM_EXT_TO_SECT(1) 1275 /* how many bits are covered by one bitmap extent (resync extent) */ 1276 #define BM_BITS_PER_EXT (1UL << (BM_EXT_SHIFT - BM_BLOCK_SHIFT)) 1277 1278 #define BM_BLOCKS_PER_BM_EXT_MASK (BM_BITS_PER_EXT - 1) 1279 1280 1281 /* in one sector of the bitmap, we have this many activity_log extents. */ 1282 #define AL_EXT_PER_BM_SECT (1 << (BM_EXT_SHIFT - AL_EXTENT_SHIFT)) 1283 1284 /* the extent in "PER_EXTENT" below is an activity log extent 1285 * we need that many (long words/bytes) to store the bitmap 1286 * of one AL_EXTENT_SIZE chunk of storage. 1287 * we can store the bitmap for that many AL_EXTENTS within 1288 * one sector of the _on_disk_ bitmap: 1289 * bit 0 bit 37 bit 38 bit (512*8)-1 1290 * ...|........|........|.. // ..|........| 1291 * sect. 0 `296 `304 ^(512*8*8)-1 1292 * 1293 #define BM_WORDS_PER_EXT ( (AL_EXT_SIZE/BM_BLOCK_SIZE) / BITS_PER_LONG ) 1294 #define BM_BYTES_PER_EXT ( (AL_EXT_SIZE/BM_BLOCK_SIZE) / 8 ) // 128 1295 #define BM_EXT_PER_SECT ( 512 / BM_BYTES_PER_EXTENT ) // 4 1296 */ 1297 1298 #define DRBD_MAX_SECTORS_32 (0xffffffffLU) 1299 /* we have a certain meta data variant that has a fixed on-disk size of 128 1300 * MiB, of which 4k are our "superblock", and 32k are the fixed size activity 1301 * log, leaving this many sectors for the bitmap. 1302 */ 1303 1304 #define DRBD_MAX_SECTORS_FIXED_BM \ 1305 ((MD_128MB_SECT - MD_32kB_SECT - MD_4kB_SECT) * (1LL<<(BM_EXT_SHIFT-9))) 1306 #define DRBD_MAX_SECTORS DRBD_MAX_SECTORS_FIXED_BM 1307 /* 16 TB in units of sectors */ 1308 #if BITS_PER_LONG == 32 1309 /* adjust by one page worth of bitmap, 1310 * so we won't wrap around in drbd_bm_find_next_bit. 1311 * you should use 64bit OS for that much storage, anyways. */ 1312 #define DRBD_MAX_SECTORS_FLEX BM_BIT_TO_SECT(0xffff7fff) 1313 #else 1314 /* we allow up to 1 PiB now on 64bit architecture with "flexible" meta data */ 1315 #define DRBD_MAX_SECTORS_FLEX (1UL << 51) 1316 /* corresponds to (1UL << 38) bits right now. */ 1317 #endif 1318 1319 /* Estimate max bio size as 256 * PAGE_SIZE, 1320 * so for typical PAGE_SIZE of 4k, that is (1<<20) Byte. 1321 * Since we may live in a mixed-platform cluster, 1322 * we limit us to a platform agnostic constant here for now. 1323 * A followup commit may allow even bigger BIO sizes, 1324 * once we thought that through. */ 1325 #define DRBD_MAX_BIO_SIZE (1U << 20) 1326 #if DRBD_MAX_BIO_SIZE > (BIO_MAX_VECS << PAGE_SHIFT) 1327 #error Architecture not supported: DRBD_MAX_BIO_SIZE > BIO_MAX_SIZE 1328 #endif 1329 #define DRBD_MAX_BIO_SIZE_SAFE (1U << 12) /* Works always = 4k */ 1330 1331 #define DRBD_MAX_SIZE_H80_PACKET (1U << 15) /* Header 80 only allows packets up to 32KiB data */ 1332 #define DRBD_MAX_BIO_SIZE_P95 (1U << 17) /* Protocol 95 to 99 allows bios up to 128KiB */ 1333 1334 /* For now, don't allow more than half of what we can "activate" in one 1335 * activity log transaction to be discarded in one go. We may need to rework 1336 * drbd_al_begin_io() to allow for even larger discard ranges */ 1337 #define DRBD_MAX_BATCH_BIO_SIZE (AL_UPDATES_PER_TRANSACTION/2*AL_EXTENT_SIZE) 1338 #define DRBD_MAX_BBIO_SECTORS (DRBD_MAX_BATCH_BIO_SIZE >> 9) 1339 1340 extern int drbd_bm_init(struct drbd_device *device); 1341 extern int drbd_bm_resize(struct drbd_device *device, sector_t sectors, int set_new_bits); 1342 extern void drbd_bm_cleanup(struct drbd_device *device); 1343 extern void drbd_bm_set_all(struct drbd_device *device); 1344 extern void drbd_bm_clear_all(struct drbd_device *device); 1345 /* set/clear/test only a few bits at a time */ 1346 extern int drbd_bm_set_bits( 1347 struct drbd_device *device, unsigned long s, unsigned long e); 1348 extern int drbd_bm_clear_bits( 1349 struct drbd_device *device, unsigned long s, unsigned long e); 1350 extern int drbd_bm_count_bits( 1351 struct drbd_device *device, const unsigned long s, const unsigned long e); 1352 /* bm_set_bits variant for use while holding drbd_bm_lock, 1353 * may process the whole bitmap in one go */ 1354 extern void _drbd_bm_set_bits(struct drbd_device *device, 1355 const unsigned long s, const unsigned long e); 1356 extern int drbd_bm_test_bit(struct drbd_device *device, unsigned long bitnr); 1357 extern int drbd_bm_e_weight(struct drbd_device *device, unsigned long enr); 1358 extern int drbd_bm_read(struct drbd_device *device) __must_hold(local); 1359 extern void drbd_bm_mark_for_writeout(struct drbd_device *device, int page_nr); 1360 extern int drbd_bm_write(struct drbd_device *device) __must_hold(local); 1361 extern void drbd_bm_reset_al_hints(struct drbd_device *device) __must_hold(local); 1362 extern int drbd_bm_write_hinted(struct drbd_device *device) __must_hold(local); 1363 extern int drbd_bm_write_lazy(struct drbd_device *device, unsigned upper_idx) __must_hold(local); 1364 extern int drbd_bm_write_all(struct drbd_device *device) __must_hold(local); 1365 extern int drbd_bm_write_copy_pages(struct drbd_device *device) __must_hold(local); 1366 extern size_t drbd_bm_words(struct drbd_device *device); 1367 extern unsigned long drbd_bm_bits(struct drbd_device *device); 1368 extern sector_t drbd_bm_capacity(struct drbd_device *device); 1369 1370 #define DRBD_END_OF_BITMAP (~(unsigned long)0) 1371 extern unsigned long drbd_bm_find_next(struct drbd_device *device, unsigned long bm_fo); 1372 /* bm_find_next variants for use while you hold drbd_bm_lock() */ 1373 extern unsigned long _drbd_bm_find_next(struct drbd_device *device, unsigned long bm_fo); 1374 extern unsigned long _drbd_bm_find_next_zero(struct drbd_device *device, unsigned long bm_fo); 1375 extern unsigned long _drbd_bm_total_weight(struct drbd_device *device); 1376 extern unsigned long drbd_bm_total_weight(struct drbd_device *device); 1377 /* for receive_bitmap */ 1378 extern void drbd_bm_merge_lel(struct drbd_device *device, size_t offset, 1379 size_t number, unsigned long *buffer); 1380 /* for _drbd_send_bitmap */ 1381 extern void drbd_bm_get_lel(struct drbd_device *device, size_t offset, 1382 size_t number, unsigned long *buffer); 1383 1384 extern void drbd_bm_lock(struct drbd_device *device, char *why, enum bm_flag flags); 1385 extern void drbd_bm_unlock(struct drbd_device *device); 1386 /* drbd_main.c */ 1387 1388 extern struct kmem_cache *drbd_request_cache; 1389 extern struct kmem_cache *drbd_ee_cache; /* peer requests */ 1390 extern struct kmem_cache *drbd_bm_ext_cache; /* bitmap extents */ 1391 extern struct kmem_cache *drbd_al_ext_cache; /* activity log extents */ 1392 extern mempool_t drbd_request_mempool; 1393 extern mempool_t drbd_ee_mempool; 1394 1395 /* drbd's page pool, used to buffer data received from the peer, 1396 * or data requested by the peer. 1397 * 1398 * This does not have an emergency reserve. 1399 * 1400 * When allocating from this pool, it first takes pages from the pool. 1401 * Only if the pool is depleted will try to allocate from the system. 1402 * 1403 * The assumption is that pages taken from this pool will be processed, 1404 * and given back, "quickly", and then can be recycled, so we can avoid 1405 * frequent calls to alloc_page(), and still will be able to make progress even 1406 * under memory pressure. 1407 */ 1408 extern struct page *drbd_pp_pool; 1409 extern spinlock_t drbd_pp_lock; 1410 extern int drbd_pp_vacant; 1411 extern wait_queue_head_t drbd_pp_wait; 1412 1413 /* We also need a standard (emergency-reserve backed) page pool 1414 * for meta data IO (activity log, bitmap). 1415 * We can keep it global, as long as it is used as "N pages at a time". 1416 * 128 should be plenty, currently we probably can get away with as few as 1. 1417 */ 1418 #define DRBD_MIN_POOL_PAGES 128 1419 extern mempool_t drbd_md_io_page_pool; 1420 1421 /* We also need to make sure we get a bio 1422 * when we need it for housekeeping purposes */ 1423 extern struct bio_set drbd_md_io_bio_set; 1424 1425 /* And a bio_set for cloning */ 1426 extern struct bio_set drbd_io_bio_set; 1427 1428 extern struct mutex resources_mutex; 1429 1430 extern int conn_lowest_minor(struct drbd_connection *connection); 1431 extern enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsigned int minor); 1432 extern void drbd_destroy_device(struct kref *kref); 1433 extern void drbd_delete_device(struct drbd_device *device); 1434 1435 extern struct drbd_resource *drbd_create_resource(const char *name); 1436 extern void drbd_free_resource(struct drbd_resource *resource); 1437 1438 extern int set_resource_options(struct drbd_resource *resource, struct res_opts *res_opts); 1439 extern struct drbd_connection *conn_create(const char *name, struct res_opts *res_opts); 1440 extern void drbd_destroy_connection(struct kref *kref); 1441 extern struct drbd_connection *conn_get_by_addrs(void *my_addr, int my_addr_len, 1442 void *peer_addr, int peer_addr_len); 1443 extern struct drbd_resource *drbd_find_resource(const char *name); 1444 extern void drbd_destroy_resource(struct kref *kref); 1445 extern void conn_free_crypto(struct drbd_connection *connection); 1446 1447 /* drbd_req */ 1448 extern void do_submit(struct work_struct *ws); 1449 extern void __drbd_make_request(struct drbd_device *, struct bio *); 1450 void drbd_submit_bio(struct bio *bio); 1451 extern int drbd_read_remote(struct drbd_device *device, struct drbd_request *req); 1452 extern int is_valid_ar_handle(struct drbd_request *, sector_t); 1453 1454 1455 /* drbd_nl.c */ 1456 1457 extern struct mutex notification_mutex; 1458 1459 extern void drbd_suspend_io(struct drbd_device *device); 1460 extern void drbd_resume_io(struct drbd_device *device); 1461 extern char *ppsize(char *buf, unsigned long long size); 1462 extern sector_t drbd_new_dev_size(struct drbd_device *, struct drbd_backing_dev *, sector_t, int); 1463 enum determine_dev_size { 1464 DS_ERROR_SHRINK = -3, 1465 DS_ERROR_SPACE_MD = -2, 1466 DS_ERROR = -1, 1467 DS_UNCHANGED = 0, 1468 DS_SHRUNK = 1, 1469 DS_GREW = 2, 1470 DS_GREW_FROM_ZERO = 3, 1471 }; 1472 extern enum determine_dev_size 1473 drbd_determine_dev_size(struct drbd_device *, enum dds_flags, struct resize_parms *) __must_hold(local); 1474 extern void resync_after_online_grow(struct drbd_device *); 1475 extern void drbd_reconsider_queue_parameters(struct drbd_device *device, 1476 struct drbd_backing_dev *bdev, struct o_qlim *o); 1477 extern enum drbd_state_rv drbd_set_role(struct drbd_device *device, 1478 enum drbd_role new_role, 1479 int force); 1480 extern bool conn_try_outdate_peer(struct drbd_connection *connection); 1481 extern void conn_try_outdate_peer_async(struct drbd_connection *connection); 1482 extern enum drbd_peer_state conn_khelper(struct drbd_connection *connection, char *cmd); 1483 extern int drbd_khelper(struct drbd_device *device, char *cmd); 1484 1485 /* drbd_worker.c */ 1486 /* bi_end_io handlers */ 1487 extern void drbd_md_endio(struct bio *bio); 1488 extern void drbd_peer_request_endio(struct bio *bio); 1489 extern void drbd_request_endio(struct bio *bio); 1490 extern int drbd_worker(struct drbd_thread *thi); 1491 enum drbd_ret_code drbd_resync_after_valid(struct drbd_device *device, int o_minor); 1492 void drbd_resync_after_changed(struct drbd_device *device); 1493 extern void drbd_start_resync(struct drbd_device *device, enum drbd_conns side); 1494 extern void resume_next_sg(struct drbd_device *device); 1495 extern void suspend_other_sg(struct drbd_device *device); 1496 extern int drbd_resync_finished(struct drbd_device *device); 1497 /* maybe rather drbd_main.c ? */ 1498 extern void *drbd_md_get_buffer(struct drbd_device *device, const char *intent); 1499 extern void drbd_md_put_buffer(struct drbd_device *device); 1500 extern int drbd_md_sync_page_io(struct drbd_device *device, 1501 struct drbd_backing_dev *bdev, sector_t sector, int op); 1502 extern void drbd_ov_out_of_sync_found(struct drbd_device *, sector_t, int); 1503 extern void wait_until_done_or_force_detached(struct drbd_device *device, 1504 struct drbd_backing_dev *bdev, unsigned int *done); 1505 extern void drbd_rs_controller_reset(struct drbd_device *device); 1506 1507 static inline void ov_out_of_sync_print(struct drbd_device *device) 1508 { 1509 if (device->ov_last_oos_size) { 1510 drbd_err(device, "Out of sync: start=%llu, size=%lu (sectors)\n", 1511 (unsigned long long)device->ov_last_oos_start, 1512 (unsigned long)device->ov_last_oos_size); 1513 } 1514 device->ov_last_oos_size = 0; 1515 } 1516 1517 1518 extern void drbd_csum_bio(struct crypto_shash *, struct bio *, void *); 1519 extern void drbd_csum_ee(struct crypto_shash *, struct drbd_peer_request *, 1520 void *); 1521 /* worker callbacks */ 1522 extern int w_e_end_data_req(struct drbd_work *, int); 1523 extern int w_e_end_rsdata_req(struct drbd_work *, int); 1524 extern int w_e_end_csum_rs_req(struct drbd_work *, int); 1525 extern int w_e_end_ov_reply(struct drbd_work *, int); 1526 extern int w_e_end_ov_req(struct drbd_work *, int); 1527 extern int w_ov_finished(struct drbd_work *, int); 1528 extern int w_resync_timer(struct drbd_work *, int); 1529 extern int w_send_write_hint(struct drbd_work *, int); 1530 extern int w_send_dblock(struct drbd_work *, int); 1531 extern int w_send_read_req(struct drbd_work *, int); 1532 extern int w_e_reissue(struct drbd_work *, int); 1533 extern int w_restart_disk_io(struct drbd_work *, int); 1534 extern int w_send_out_of_sync(struct drbd_work *, int); 1535 extern int w_start_resync(struct drbd_work *, int); 1536 1537 extern void resync_timer_fn(struct timer_list *t); 1538 extern void start_resync_timer_fn(struct timer_list *t); 1539 1540 extern void drbd_endio_write_sec_final(struct drbd_peer_request *peer_req); 1541 1542 /* drbd_receiver.c */ 1543 extern int drbd_issue_discard_or_zero_out(struct drbd_device *device, 1544 sector_t start, unsigned int nr_sectors, int flags); 1545 extern int drbd_receiver(struct drbd_thread *thi); 1546 extern int drbd_ack_receiver(struct drbd_thread *thi); 1547 extern void drbd_send_ping_wf(struct work_struct *ws); 1548 extern void drbd_send_acks_wf(struct work_struct *ws); 1549 extern bool drbd_rs_c_min_rate_throttle(struct drbd_device *device); 1550 extern bool drbd_rs_should_slow_down(struct drbd_device *device, sector_t sector, 1551 bool throttle_if_app_is_waiting); 1552 extern int drbd_submit_peer_request(struct drbd_device *, 1553 struct drbd_peer_request *, const unsigned, 1554 const unsigned, const int); 1555 extern int drbd_free_peer_reqs(struct drbd_device *, struct list_head *); 1556 extern struct drbd_peer_request *drbd_alloc_peer_req(struct drbd_peer_device *, u64, 1557 sector_t, unsigned int, 1558 unsigned int, 1559 gfp_t) __must_hold(local); 1560 extern void __drbd_free_peer_req(struct drbd_device *, struct drbd_peer_request *, 1561 int); 1562 #define drbd_free_peer_req(m,e) __drbd_free_peer_req(m, e, 0) 1563 #define drbd_free_net_peer_req(m,e) __drbd_free_peer_req(m, e, 1) 1564 extern struct page *drbd_alloc_pages(struct drbd_peer_device *, unsigned int, bool); 1565 extern void drbd_set_recv_tcq(struct drbd_device *device, int tcq_enabled); 1566 extern void _drbd_clear_done_ee(struct drbd_device *device, struct list_head *to_be_freed); 1567 extern int drbd_connected(struct drbd_peer_device *); 1568 1569 /* sets the number of 512 byte sectors of our virtual device */ 1570 void drbd_set_my_capacity(struct drbd_device *device, sector_t size); 1571 1572 /* 1573 * used to submit our private bio 1574 */ 1575 static inline void drbd_submit_bio_noacct(struct drbd_device *device, 1576 int fault_type, struct bio *bio) 1577 { 1578 __release(local); 1579 if (!bio->bi_bdev) { 1580 drbd_err(device, "drbd_submit_bio_noacct: bio->bi_bdev == NULL\n"); 1581 bio->bi_status = BLK_STS_IOERR; 1582 bio_endio(bio); 1583 return; 1584 } 1585 1586 if (drbd_insert_fault(device, fault_type)) 1587 bio_io_error(bio); 1588 else 1589 submit_bio_noacct(bio); 1590 } 1591 1592 void drbd_bump_write_ordering(struct drbd_resource *resource, struct drbd_backing_dev *bdev, 1593 enum write_ordering_e wo); 1594 1595 /* drbd_proc.c */ 1596 extern struct proc_dir_entry *drbd_proc; 1597 int drbd_seq_show(struct seq_file *seq, void *v); 1598 1599 /* drbd_actlog.c */ 1600 extern bool drbd_al_begin_io_prepare(struct drbd_device *device, struct drbd_interval *i); 1601 extern int drbd_al_begin_io_nonblock(struct drbd_device *device, struct drbd_interval *i); 1602 extern void drbd_al_begin_io_commit(struct drbd_device *device); 1603 extern bool drbd_al_begin_io_fastpath(struct drbd_device *device, struct drbd_interval *i); 1604 extern void drbd_al_begin_io(struct drbd_device *device, struct drbd_interval *i); 1605 extern void drbd_al_complete_io(struct drbd_device *device, struct drbd_interval *i); 1606 extern void drbd_rs_complete_io(struct drbd_device *device, sector_t sector); 1607 extern int drbd_rs_begin_io(struct drbd_device *device, sector_t sector); 1608 extern int drbd_try_rs_begin_io(struct drbd_device *device, sector_t sector); 1609 extern void drbd_rs_cancel_all(struct drbd_device *device); 1610 extern int drbd_rs_del_all(struct drbd_device *device); 1611 extern void drbd_rs_failed_io(struct drbd_device *device, 1612 sector_t sector, int size); 1613 extern void drbd_advance_rs_marks(struct drbd_device *device, unsigned long still_to_go); 1614 1615 enum update_sync_bits_mode { RECORD_RS_FAILED, SET_OUT_OF_SYNC, SET_IN_SYNC }; 1616 extern int __drbd_change_sync(struct drbd_device *device, sector_t sector, int size, 1617 enum update_sync_bits_mode mode); 1618 #define drbd_set_in_sync(device, sector, size) \ 1619 __drbd_change_sync(device, sector, size, SET_IN_SYNC) 1620 #define drbd_set_out_of_sync(device, sector, size) \ 1621 __drbd_change_sync(device, sector, size, SET_OUT_OF_SYNC) 1622 #define drbd_rs_failed_io(device, sector, size) \ 1623 __drbd_change_sync(device, sector, size, RECORD_RS_FAILED) 1624 extern void drbd_al_shrink(struct drbd_device *device); 1625 extern int drbd_al_initialize(struct drbd_device *, void *); 1626 1627 /* drbd_nl.c */ 1628 /* state info broadcast */ 1629 struct sib_info { 1630 enum drbd_state_info_bcast_reason sib_reason; 1631 union { 1632 struct { 1633 char *helper_name; 1634 unsigned helper_exit_code; 1635 }; 1636 struct { 1637 union drbd_state os; 1638 union drbd_state ns; 1639 }; 1640 }; 1641 }; 1642 void drbd_bcast_event(struct drbd_device *device, const struct sib_info *sib); 1643 1644 extern void notify_resource_state(struct sk_buff *, 1645 unsigned int, 1646 struct drbd_resource *, 1647 struct resource_info *, 1648 enum drbd_notification_type); 1649 extern void notify_device_state(struct sk_buff *, 1650 unsigned int, 1651 struct drbd_device *, 1652 struct device_info *, 1653 enum drbd_notification_type); 1654 extern void notify_connection_state(struct sk_buff *, 1655 unsigned int, 1656 struct drbd_connection *, 1657 struct connection_info *, 1658 enum drbd_notification_type); 1659 extern void notify_peer_device_state(struct sk_buff *, 1660 unsigned int, 1661 struct drbd_peer_device *, 1662 struct peer_device_info *, 1663 enum drbd_notification_type); 1664 extern void notify_helper(enum drbd_notification_type, struct drbd_device *, 1665 struct drbd_connection *, const char *, int); 1666 1667 /* 1668 * inline helper functions 1669 *************************/ 1670 1671 /* see also page_chain_add and friends in drbd_receiver.c */ 1672 static inline struct page *page_chain_next(struct page *page) 1673 { 1674 return (struct page *)page_private(page); 1675 } 1676 #define page_chain_for_each(page) \ 1677 for (; page && ({ prefetch(page_chain_next(page)); 1; }); \ 1678 page = page_chain_next(page)) 1679 #define page_chain_for_each_safe(page, n) \ 1680 for (; page && ({ n = page_chain_next(page); 1; }); page = n) 1681 1682 1683 static inline int drbd_peer_req_has_active_page(struct drbd_peer_request *peer_req) 1684 { 1685 struct page *page = peer_req->pages; 1686 page_chain_for_each(page) { 1687 if (page_count(page) > 1) 1688 return 1; 1689 } 1690 return 0; 1691 } 1692 1693 static inline union drbd_state drbd_read_state(struct drbd_device *device) 1694 { 1695 struct drbd_resource *resource = device->resource; 1696 union drbd_state rv; 1697 1698 rv.i = device->state.i; 1699 rv.susp = resource->susp; 1700 rv.susp_nod = resource->susp_nod; 1701 rv.susp_fen = resource->susp_fen; 1702 1703 return rv; 1704 } 1705 1706 enum drbd_force_detach_flags { 1707 DRBD_READ_ERROR, 1708 DRBD_WRITE_ERROR, 1709 DRBD_META_IO_ERROR, 1710 DRBD_FORCE_DETACH, 1711 }; 1712 1713 #define __drbd_chk_io_error(m,f) __drbd_chk_io_error_(m,f, __func__) 1714 static inline void __drbd_chk_io_error_(struct drbd_device *device, 1715 enum drbd_force_detach_flags df, 1716 const char *where) 1717 { 1718 enum drbd_io_error_p ep; 1719 1720 rcu_read_lock(); 1721 ep = rcu_dereference(device->ldev->disk_conf)->on_io_error; 1722 rcu_read_unlock(); 1723 switch (ep) { 1724 case EP_PASS_ON: /* FIXME would this be better named "Ignore"? */ 1725 if (df == DRBD_READ_ERROR || df == DRBD_WRITE_ERROR) { 1726 if (__ratelimit(&drbd_ratelimit_state)) 1727 drbd_err(device, "Local IO failed in %s.\n", where); 1728 if (device->state.disk > D_INCONSISTENT) 1729 _drbd_set_state(_NS(device, disk, D_INCONSISTENT), CS_HARD, NULL); 1730 break; 1731 } 1732 fallthrough; /* for DRBD_META_IO_ERROR or DRBD_FORCE_DETACH */ 1733 case EP_DETACH: 1734 case EP_CALL_HELPER: 1735 /* Remember whether we saw a READ or WRITE error. 1736 * 1737 * Recovery of the affected area for WRITE failure is covered 1738 * by the activity log. 1739 * READ errors may fall outside that area though. Certain READ 1740 * errors can be "healed" by writing good data to the affected 1741 * blocks, which triggers block re-allocation in lower layers. 1742 * 1743 * If we can not write the bitmap after a READ error, 1744 * we may need to trigger a full sync (see w_go_diskless()). 1745 * 1746 * Force-detach is not really an IO error, but rather a 1747 * desperate measure to try to deal with a completely 1748 * unresponsive lower level IO stack. 1749 * Still it should be treated as a WRITE error. 1750 * 1751 * Meta IO error is always WRITE error: 1752 * we read meta data only once during attach, 1753 * which will fail in case of errors. 1754 */ 1755 set_bit(WAS_IO_ERROR, &device->flags); 1756 if (df == DRBD_READ_ERROR) 1757 set_bit(WAS_READ_ERROR, &device->flags); 1758 if (df == DRBD_FORCE_DETACH) 1759 set_bit(FORCE_DETACH, &device->flags); 1760 if (device->state.disk > D_FAILED) { 1761 _drbd_set_state(_NS(device, disk, D_FAILED), CS_HARD, NULL); 1762 drbd_err(device, 1763 "Local IO failed in %s. Detaching...\n", where); 1764 } 1765 break; 1766 } 1767 } 1768 1769 /** 1770 * drbd_chk_io_error: Handle the on_io_error setting, should be called from all io completion handlers 1771 * @device: DRBD device. 1772 * @error: Error code passed to the IO completion callback 1773 * @forcedetach: Force detach. I.e. the error happened while accessing the meta data 1774 * 1775 * See also drbd_main.c:after_state_ch() if (os.disk > D_FAILED && ns.disk == D_FAILED) 1776 */ 1777 #define drbd_chk_io_error(m,e,f) drbd_chk_io_error_(m,e,f, __func__) 1778 static inline void drbd_chk_io_error_(struct drbd_device *device, 1779 int error, enum drbd_force_detach_flags forcedetach, const char *where) 1780 { 1781 if (error) { 1782 unsigned long flags; 1783 spin_lock_irqsave(&device->resource->req_lock, flags); 1784 __drbd_chk_io_error_(device, forcedetach, where); 1785 spin_unlock_irqrestore(&device->resource->req_lock, flags); 1786 } 1787 } 1788 1789 1790 /** 1791 * drbd_md_first_sector() - Returns the first sector number of the meta data area 1792 * @bdev: Meta data block device. 1793 * 1794 * BTW, for internal meta data, this happens to be the maximum capacity 1795 * we could agree upon with our peer node. 1796 */ 1797 static inline sector_t drbd_md_first_sector(struct drbd_backing_dev *bdev) 1798 { 1799 switch (bdev->md.meta_dev_idx) { 1800 case DRBD_MD_INDEX_INTERNAL: 1801 case DRBD_MD_INDEX_FLEX_INT: 1802 return bdev->md.md_offset + bdev->md.bm_offset; 1803 case DRBD_MD_INDEX_FLEX_EXT: 1804 default: 1805 return bdev->md.md_offset; 1806 } 1807 } 1808 1809 /** 1810 * drbd_md_last_sector() - Return the last sector number of the meta data area 1811 * @bdev: Meta data block device. 1812 */ 1813 static inline sector_t drbd_md_last_sector(struct drbd_backing_dev *bdev) 1814 { 1815 switch (bdev->md.meta_dev_idx) { 1816 case DRBD_MD_INDEX_INTERNAL: 1817 case DRBD_MD_INDEX_FLEX_INT: 1818 return bdev->md.md_offset + MD_4kB_SECT -1; 1819 case DRBD_MD_INDEX_FLEX_EXT: 1820 default: 1821 return bdev->md.md_offset + bdev->md.md_size_sect -1; 1822 } 1823 } 1824 1825 /* Returns the number of 512 byte sectors of the device */ 1826 static inline sector_t drbd_get_capacity(struct block_device *bdev) 1827 { 1828 return bdev ? bdev_nr_sectors(bdev) : 0; 1829 } 1830 1831 /** 1832 * drbd_get_max_capacity() - Returns the capacity we announce to out peer 1833 * @bdev: Meta data block device. 1834 * 1835 * returns the capacity we announce to out peer. we clip ourselves at the 1836 * various MAX_SECTORS, because if we don't, current implementation will 1837 * oops sooner or later 1838 */ 1839 static inline sector_t drbd_get_max_capacity(struct drbd_backing_dev *bdev) 1840 { 1841 sector_t s; 1842 1843 switch (bdev->md.meta_dev_idx) { 1844 case DRBD_MD_INDEX_INTERNAL: 1845 case DRBD_MD_INDEX_FLEX_INT: 1846 s = drbd_get_capacity(bdev->backing_bdev) 1847 ? min_t(sector_t, DRBD_MAX_SECTORS_FLEX, 1848 drbd_md_first_sector(bdev)) 1849 : 0; 1850 break; 1851 case DRBD_MD_INDEX_FLEX_EXT: 1852 s = min_t(sector_t, DRBD_MAX_SECTORS_FLEX, 1853 drbd_get_capacity(bdev->backing_bdev)); 1854 /* clip at maximum size the meta device can support */ 1855 s = min_t(sector_t, s, 1856 BM_EXT_TO_SECT(bdev->md.md_size_sect 1857 - bdev->md.bm_offset)); 1858 break; 1859 default: 1860 s = min_t(sector_t, DRBD_MAX_SECTORS, 1861 drbd_get_capacity(bdev->backing_bdev)); 1862 } 1863 return s; 1864 } 1865 1866 /** 1867 * drbd_md_ss() - Return the sector number of our meta data super block 1868 * @bdev: Meta data block device. 1869 */ 1870 static inline sector_t drbd_md_ss(struct drbd_backing_dev *bdev) 1871 { 1872 const int meta_dev_idx = bdev->md.meta_dev_idx; 1873 1874 if (meta_dev_idx == DRBD_MD_INDEX_FLEX_EXT) 1875 return 0; 1876 1877 /* Since drbd08, internal meta data is always "flexible". 1878 * position: last 4k aligned block of 4k size */ 1879 if (meta_dev_idx == DRBD_MD_INDEX_INTERNAL || 1880 meta_dev_idx == DRBD_MD_INDEX_FLEX_INT) 1881 return (drbd_get_capacity(bdev->backing_bdev) & ~7ULL) - 8; 1882 1883 /* external, some index; this is the old fixed size layout */ 1884 return MD_128MB_SECT * bdev->md.meta_dev_idx; 1885 } 1886 1887 static inline void 1888 drbd_queue_work(struct drbd_work_queue *q, struct drbd_work *w) 1889 { 1890 unsigned long flags; 1891 spin_lock_irqsave(&q->q_lock, flags); 1892 list_add_tail(&w->list, &q->q); 1893 spin_unlock_irqrestore(&q->q_lock, flags); 1894 wake_up(&q->q_wait); 1895 } 1896 1897 static inline void 1898 drbd_queue_work_if_unqueued(struct drbd_work_queue *q, struct drbd_work *w) 1899 { 1900 unsigned long flags; 1901 spin_lock_irqsave(&q->q_lock, flags); 1902 if (list_empty_careful(&w->list)) 1903 list_add_tail(&w->list, &q->q); 1904 spin_unlock_irqrestore(&q->q_lock, flags); 1905 wake_up(&q->q_wait); 1906 } 1907 1908 static inline void 1909 drbd_device_post_work(struct drbd_device *device, int work_bit) 1910 { 1911 if (!test_and_set_bit(work_bit, &device->flags)) { 1912 struct drbd_connection *connection = 1913 first_peer_device(device)->connection; 1914 struct drbd_work_queue *q = &connection->sender_work; 1915 if (!test_and_set_bit(DEVICE_WORK_PENDING, &connection->flags)) 1916 wake_up(&q->q_wait); 1917 } 1918 } 1919 1920 extern void drbd_flush_workqueue(struct drbd_work_queue *work_queue); 1921 1922 /* To get the ack_receiver out of the blocking network stack, 1923 * so it can change its sk_rcvtimeo from idle- to ping-timeout, 1924 * and send a ping, we need to send a signal. 1925 * Which signal we send is irrelevant. */ 1926 static inline void wake_ack_receiver(struct drbd_connection *connection) 1927 { 1928 struct task_struct *task = connection->ack_receiver.task; 1929 if (task && get_t_state(&connection->ack_receiver) == RUNNING) 1930 send_sig(SIGXCPU, task, 1); 1931 } 1932 1933 static inline void request_ping(struct drbd_connection *connection) 1934 { 1935 set_bit(SEND_PING, &connection->flags); 1936 wake_ack_receiver(connection); 1937 } 1938 1939 extern void *conn_prepare_command(struct drbd_connection *, struct drbd_socket *); 1940 extern void *drbd_prepare_command(struct drbd_peer_device *, struct drbd_socket *); 1941 extern int conn_send_command(struct drbd_connection *, struct drbd_socket *, 1942 enum drbd_packet, unsigned int, void *, 1943 unsigned int); 1944 extern int drbd_send_command(struct drbd_peer_device *, struct drbd_socket *, 1945 enum drbd_packet, unsigned int, void *, 1946 unsigned int); 1947 1948 extern int drbd_send_ping(struct drbd_connection *connection); 1949 extern int drbd_send_ping_ack(struct drbd_connection *connection); 1950 extern int drbd_send_state_req(struct drbd_peer_device *, union drbd_state, union drbd_state); 1951 extern int conn_send_state_req(struct drbd_connection *, union drbd_state, union drbd_state); 1952 1953 static inline void drbd_thread_stop(struct drbd_thread *thi) 1954 { 1955 _drbd_thread_stop(thi, false, true); 1956 } 1957 1958 static inline void drbd_thread_stop_nowait(struct drbd_thread *thi) 1959 { 1960 _drbd_thread_stop(thi, false, false); 1961 } 1962 1963 static inline void drbd_thread_restart_nowait(struct drbd_thread *thi) 1964 { 1965 _drbd_thread_stop(thi, true, false); 1966 } 1967 1968 /* counts how many answer packets packets we expect from our peer, 1969 * for either explicit application requests, 1970 * or implicit barrier packets as necessary. 1971 * increased: 1972 * w_send_barrier 1973 * _req_mod(req, QUEUE_FOR_NET_WRITE or QUEUE_FOR_NET_READ); 1974 * it is much easier and equally valid to count what we queue for the 1975 * worker, even before it actually was queued or send. 1976 * (drbd_make_request_common; recovery path on read io-error) 1977 * decreased: 1978 * got_BarrierAck (respective tl_clear, tl_clear_barrier) 1979 * _req_mod(req, DATA_RECEIVED) 1980 * [from receive_DataReply] 1981 * _req_mod(req, WRITE_ACKED_BY_PEER or RECV_ACKED_BY_PEER or NEG_ACKED) 1982 * [from got_BlockAck (P_WRITE_ACK, P_RECV_ACK)] 1983 * for some reason it is NOT decreased in got_NegAck, 1984 * but in the resulting cleanup code from report_params. 1985 * we should try to remember the reason for that... 1986 * _req_mod(req, SEND_FAILED or SEND_CANCELED) 1987 * _req_mod(req, CONNECTION_LOST_WHILE_PENDING) 1988 * [from tl_clear_barrier] 1989 */ 1990 static inline void inc_ap_pending(struct drbd_device *device) 1991 { 1992 atomic_inc(&device->ap_pending_cnt); 1993 } 1994 1995 #define ERR_IF_CNT_IS_NEGATIVE(which, func, line) \ 1996 if (atomic_read(&device->which) < 0) \ 1997 drbd_err(device, "in %s:%d: " #which " = %d < 0 !\n", \ 1998 func, line, \ 1999 atomic_read(&device->which)) 2000 2001 #define dec_ap_pending(device) _dec_ap_pending(device, __func__, __LINE__) 2002 static inline void _dec_ap_pending(struct drbd_device *device, const char *func, int line) 2003 { 2004 if (atomic_dec_and_test(&device->ap_pending_cnt)) 2005 wake_up(&device->misc_wait); 2006 ERR_IF_CNT_IS_NEGATIVE(ap_pending_cnt, func, line); 2007 } 2008 2009 /* counts how many resync-related answers we still expect from the peer 2010 * increase decrease 2011 * C_SYNC_TARGET sends P_RS_DATA_REQUEST (and expects P_RS_DATA_REPLY) 2012 * C_SYNC_SOURCE sends P_RS_DATA_REPLY (and expects P_WRITE_ACK with ID_SYNCER) 2013 * (or P_NEG_ACK with ID_SYNCER) 2014 */ 2015 static inline void inc_rs_pending(struct drbd_device *device) 2016 { 2017 atomic_inc(&device->rs_pending_cnt); 2018 } 2019 2020 #define dec_rs_pending(device) _dec_rs_pending(device, __func__, __LINE__) 2021 static inline void _dec_rs_pending(struct drbd_device *device, const char *func, int line) 2022 { 2023 atomic_dec(&device->rs_pending_cnt); 2024 ERR_IF_CNT_IS_NEGATIVE(rs_pending_cnt, func, line); 2025 } 2026 2027 /* counts how many answers we still need to send to the peer. 2028 * increased on 2029 * receive_Data unless protocol A; 2030 * we need to send a P_RECV_ACK (proto B) 2031 * or P_WRITE_ACK (proto C) 2032 * receive_RSDataReply (recv_resync_read) we need to send a P_WRITE_ACK 2033 * receive_DataRequest (receive_RSDataRequest) we need to send back P_DATA 2034 * receive_Barrier_* we need to send a P_BARRIER_ACK 2035 */ 2036 static inline void inc_unacked(struct drbd_device *device) 2037 { 2038 atomic_inc(&device->unacked_cnt); 2039 } 2040 2041 #define dec_unacked(device) _dec_unacked(device, __func__, __LINE__) 2042 static inline void _dec_unacked(struct drbd_device *device, const char *func, int line) 2043 { 2044 atomic_dec(&device->unacked_cnt); 2045 ERR_IF_CNT_IS_NEGATIVE(unacked_cnt, func, line); 2046 } 2047 2048 #define sub_unacked(device, n) _sub_unacked(device, n, __func__, __LINE__) 2049 static inline void _sub_unacked(struct drbd_device *device, int n, const char *func, int line) 2050 { 2051 atomic_sub(n, &device->unacked_cnt); 2052 ERR_IF_CNT_IS_NEGATIVE(unacked_cnt, func, line); 2053 } 2054 2055 static inline bool is_sync_target_state(enum drbd_conns connection_state) 2056 { 2057 return connection_state == C_SYNC_TARGET || 2058 connection_state == C_PAUSED_SYNC_T; 2059 } 2060 2061 static inline bool is_sync_source_state(enum drbd_conns connection_state) 2062 { 2063 return connection_state == C_SYNC_SOURCE || 2064 connection_state == C_PAUSED_SYNC_S; 2065 } 2066 2067 static inline bool is_sync_state(enum drbd_conns connection_state) 2068 { 2069 return is_sync_source_state(connection_state) || 2070 is_sync_target_state(connection_state); 2071 } 2072 2073 /** 2074 * get_ldev() - Increase the ref count on device->ldev. Returns 0 if there is no ldev 2075 * @_device: DRBD device. 2076 * @_min_state: Minimum device state required for success. 2077 * 2078 * You have to call put_ldev() when finished working with device->ldev. 2079 */ 2080 #define get_ldev_if_state(_device, _min_state) \ 2081 (_get_ldev_if_state((_device), (_min_state)) ? \ 2082 ({ __acquire(x); true; }) : false) 2083 #define get_ldev(_device) get_ldev_if_state(_device, D_INCONSISTENT) 2084 2085 static inline void put_ldev(struct drbd_device *device) 2086 { 2087 enum drbd_disk_state disk_state = device->state.disk; 2088 /* We must check the state *before* the atomic_dec becomes visible, 2089 * or we have a theoretical race where someone hitting zero, 2090 * while state still D_FAILED, will then see D_DISKLESS in the 2091 * condition below and calling into destroy, where he must not, yet. */ 2092 int i = atomic_dec_return(&device->local_cnt); 2093 2094 /* This may be called from some endio handler, 2095 * so we must not sleep here. */ 2096 2097 __release(local); 2098 D_ASSERT(device, i >= 0); 2099 if (i == 0) { 2100 if (disk_state == D_DISKLESS) 2101 /* even internal references gone, safe to destroy */ 2102 drbd_device_post_work(device, DESTROY_DISK); 2103 if (disk_state == D_FAILED) 2104 /* all application IO references gone. */ 2105 if (!test_and_set_bit(GOING_DISKLESS, &device->flags)) 2106 drbd_device_post_work(device, GO_DISKLESS); 2107 wake_up(&device->misc_wait); 2108 } 2109 } 2110 2111 #ifndef __CHECKER__ 2112 static inline int _get_ldev_if_state(struct drbd_device *device, enum drbd_disk_state mins) 2113 { 2114 int io_allowed; 2115 2116 /* never get a reference while D_DISKLESS */ 2117 if (device->state.disk == D_DISKLESS) 2118 return 0; 2119 2120 atomic_inc(&device->local_cnt); 2121 io_allowed = (device->state.disk >= mins); 2122 if (!io_allowed) 2123 put_ldev(device); 2124 return io_allowed; 2125 } 2126 #else 2127 extern int _get_ldev_if_state(struct drbd_device *device, enum drbd_disk_state mins); 2128 #endif 2129 2130 /* this throttles on-the-fly application requests 2131 * according to max_buffers settings; 2132 * maybe re-implement using semaphores? */ 2133 static inline int drbd_get_max_buffers(struct drbd_device *device) 2134 { 2135 struct net_conf *nc; 2136 int mxb; 2137 2138 rcu_read_lock(); 2139 nc = rcu_dereference(first_peer_device(device)->connection->net_conf); 2140 mxb = nc ? nc->max_buffers : 1000000; /* arbitrary limit on open requests */ 2141 rcu_read_unlock(); 2142 2143 return mxb; 2144 } 2145 2146 static inline int drbd_state_is_stable(struct drbd_device *device) 2147 { 2148 union drbd_dev_state s = device->state; 2149 2150 /* DO NOT add a default clause, we want the compiler to warn us 2151 * for any newly introduced state we may have forgotten to add here */ 2152 2153 switch ((enum drbd_conns)s.conn) { 2154 /* new io only accepted when there is no connection, ... */ 2155 case C_STANDALONE: 2156 case C_WF_CONNECTION: 2157 /* ... or there is a well established connection. */ 2158 case C_CONNECTED: 2159 case C_SYNC_SOURCE: 2160 case C_SYNC_TARGET: 2161 case C_VERIFY_S: 2162 case C_VERIFY_T: 2163 case C_PAUSED_SYNC_S: 2164 case C_PAUSED_SYNC_T: 2165 case C_AHEAD: 2166 case C_BEHIND: 2167 /* transitional states, IO allowed */ 2168 case C_DISCONNECTING: 2169 case C_UNCONNECTED: 2170 case C_TIMEOUT: 2171 case C_BROKEN_PIPE: 2172 case C_NETWORK_FAILURE: 2173 case C_PROTOCOL_ERROR: 2174 case C_TEAR_DOWN: 2175 case C_WF_REPORT_PARAMS: 2176 case C_STARTING_SYNC_S: 2177 case C_STARTING_SYNC_T: 2178 break; 2179 2180 /* Allow IO in BM exchange states with new protocols */ 2181 case C_WF_BITMAP_S: 2182 if (first_peer_device(device)->connection->agreed_pro_version < 96) 2183 return 0; 2184 break; 2185 2186 /* no new io accepted in these states */ 2187 case C_WF_BITMAP_T: 2188 case C_WF_SYNC_UUID: 2189 case C_MASK: 2190 /* not "stable" */ 2191 return 0; 2192 } 2193 2194 switch ((enum drbd_disk_state)s.disk) { 2195 case D_DISKLESS: 2196 case D_INCONSISTENT: 2197 case D_OUTDATED: 2198 case D_CONSISTENT: 2199 case D_UP_TO_DATE: 2200 case D_FAILED: 2201 /* disk state is stable as well. */ 2202 break; 2203 2204 /* no new io accepted during transitional states */ 2205 case D_ATTACHING: 2206 case D_NEGOTIATING: 2207 case D_UNKNOWN: 2208 case D_MASK: 2209 /* not "stable" */ 2210 return 0; 2211 } 2212 2213 return 1; 2214 } 2215 2216 static inline int drbd_suspended(struct drbd_device *device) 2217 { 2218 struct drbd_resource *resource = device->resource; 2219 2220 return resource->susp || resource->susp_fen || resource->susp_nod; 2221 } 2222 2223 static inline bool may_inc_ap_bio(struct drbd_device *device) 2224 { 2225 int mxb = drbd_get_max_buffers(device); 2226 2227 if (drbd_suspended(device)) 2228 return false; 2229 if (atomic_read(&device->suspend_cnt)) 2230 return false; 2231 2232 /* to avoid potential deadlock or bitmap corruption, 2233 * in various places, we only allow new application io 2234 * to start during "stable" states. */ 2235 2236 /* no new io accepted when attaching or detaching the disk */ 2237 if (!drbd_state_is_stable(device)) 2238 return false; 2239 2240 /* since some older kernels don't have atomic_add_unless, 2241 * and we are within the spinlock anyways, we have this workaround. */ 2242 if (atomic_read(&device->ap_bio_cnt) > mxb) 2243 return false; 2244 if (test_bit(BITMAP_IO, &device->flags)) 2245 return false; 2246 return true; 2247 } 2248 2249 static inline bool inc_ap_bio_cond(struct drbd_device *device) 2250 { 2251 bool rv = false; 2252 2253 spin_lock_irq(&device->resource->req_lock); 2254 rv = may_inc_ap_bio(device); 2255 if (rv) 2256 atomic_inc(&device->ap_bio_cnt); 2257 spin_unlock_irq(&device->resource->req_lock); 2258 2259 return rv; 2260 } 2261 2262 static inline void inc_ap_bio(struct drbd_device *device) 2263 { 2264 /* we wait here 2265 * as long as the device is suspended 2266 * until the bitmap is no longer on the fly during connection 2267 * handshake as long as we would exceed the max_buffer limit. 2268 * 2269 * to avoid races with the reconnect code, 2270 * we need to atomic_inc within the spinlock. */ 2271 2272 wait_event(device->misc_wait, inc_ap_bio_cond(device)); 2273 } 2274 2275 static inline void dec_ap_bio(struct drbd_device *device) 2276 { 2277 int mxb = drbd_get_max_buffers(device); 2278 int ap_bio = atomic_dec_return(&device->ap_bio_cnt); 2279 2280 D_ASSERT(device, ap_bio >= 0); 2281 2282 if (ap_bio == 0 && test_bit(BITMAP_IO, &device->flags)) { 2283 if (!test_and_set_bit(BITMAP_IO_QUEUED, &device->flags)) 2284 drbd_queue_work(&first_peer_device(device)-> 2285 connection->sender_work, 2286 &device->bm_io_work.w); 2287 } 2288 2289 /* this currently does wake_up for every dec_ap_bio! 2290 * maybe rather introduce some type of hysteresis? 2291 * e.g. (ap_bio == mxb/2 || ap_bio == 0) ? */ 2292 if (ap_bio < mxb) 2293 wake_up(&device->misc_wait); 2294 } 2295 2296 static inline bool verify_can_do_stop_sector(struct drbd_device *device) 2297 { 2298 return first_peer_device(device)->connection->agreed_pro_version >= 97 && 2299 first_peer_device(device)->connection->agreed_pro_version != 100; 2300 } 2301 2302 static inline int drbd_set_ed_uuid(struct drbd_device *device, u64 val) 2303 { 2304 int changed = device->ed_uuid != val; 2305 device->ed_uuid = val; 2306 return changed; 2307 } 2308 2309 static inline int drbd_queue_order_type(struct drbd_device *device) 2310 { 2311 /* sorry, we currently have no working implementation 2312 * of distributed TCQ stuff */ 2313 #ifndef QUEUE_ORDERED_NONE 2314 #define QUEUE_ORDERED_NONE 0 2315 #endif 2316 return QUEUE_ORDERED_NONE; 2317 } 2318 2319 static inline struct drbd_connection *first_connection(struct drbd_resource *resource) 2320 { 2321 return list_first_entry_or_null(&resource->connections, 2322 struct drbd_connection, connections); 2323 } 2324 2325 #endif 2326