1 /* 2 drbd_int.h 3 4 This file is part of DRBD by Philipp Reisner and Lars Ellenberg. 5 6 Copyright (C) 2001-2008, LINBIT Information Technologies GmbH. 7 Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>. 8 Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>. 9 10 drbd is free software; you can redistribute it and/or modify 11 it under the terms of the GNU General Public License as published by 12 the Free Software Foundation; either version 2, or (at your option) 13 any later version. 14 15 drbd is distributed in the hope that it will be useful, 16 but WITHOUT ANY WARRANTY; without even the implied warranty of 17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18 GNU General Public License for more details. 19 20 You should have received a copy of the GNU General Public License 21 along with drbd; see the file COPYING. If not, write to 22 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. 23 24 */ 25 26 #ifndef _DRBD_INT_H 27 #define _DRBD_INT_H 28 29 #include <linux/compiler.h> 30 #include <linux/types.h> 31 #include <linux/version.h> 32 #include <linux/list.h> 33 #include <linux/sched.h> 34 #include <linux/bitops.h> 35 #include <linux/slab.h> 36 #include <linux/crypto.h> 37 #include <linux/ratelimit.h> 38 #include <linux/tcp.h> 39 #include <linux/mutex.h> 40 #include <linux/major.h> 41 #include <linux/blkdev.h> 42 #include <linux/genhd.h> 43 #include <net/tcp.h> 44 #include <linux/lru_cache.h> 45 46 #ifdef __CHECKER__ 47 # define __protected_by(x) __attribute__((require_context(x,1,999,"rdwr"))) 48 # define __protected_read_by(x) __attribute__((require_context(x,1,999,"read"))) 49 # define __protected_write_by(x) __attribute__((require_context(x,1,999,"write"))) 50 # define __must_hold(x) __attribute__((context(x,1,1), require_context(x,1,999,"call"))) 51 #else 52 # define __protected_by(x) 53 # define __protected_read_by(x) 54 # define __protected_write_by(x) 55 # define __must_hold(x) 56 #endif 57 58 #define __no_warn(lock, stmt) do { __acquire(lock); stmt; __release(lock); } while (0) 59 60 /* module parameter, defined in drbd_main.c */ 61 extern unsigned int minor_count; 62 extern int disable_sendpage; 63 extern int allow_oos; 64 extern unsigned int cn_idx; 65 66 #ifdef CONFIG_DRBD_FAULT_INJECTION 67 extern int enable_faults; 68 extern int fault_rate; 69 extern int fault_devs; 70 #endif 71 72 extern char usermode_helper[]; 73 74 75 #ifndef TRUE 76 #define TRUE 1 77 #endif 78 #ifndef FALSE 79 #define FALSE 0 80 #endif 81 82 /* I don't remember why XCPU ... 83 * This is used to wake the asender, 84 * and to interrupt sending the sending task 85 * on disconnect. 86 */ 87 #define DRBD_SIG SIGXCPU 88 89 /* This is used to stop/restart our threads. 90 * Cannot use SIGTERM nor SIGKILL, since these 91 * are sent out by init on runlevel changes 92 * I choose SIGHUP for now. 93 */ 94 #define DRBD_SIGKILL SIGHUP 95 96 /* All EEs on the free list should have ID_VACANT (== 0) 97 * freshly allocated EEs get !ID_VACANT (== 1) 98 * so if it says "cannot dereference null pointer at address 0x00000001", 99 * it is most likely one of these :( */ 100 101 #define ID_IN_SYNC (4711ULL) 102 #define ID_OUT_OF_SYNC (4712ULL) 103 104 #define ID_SYNCER (-1ULL) 105 #define ID_VACANT 0 106 #define is_syncer_block_id(id) ((id) == ID_SYNCER) 107 108 struct drbd_conf; 109 110 111 /* to shorten dev_warn(DEV, "msg"); and relatives statements */ 112 #define DEV (disk_to_dev(mdev->vdisk)) 113 114 #define D_ASSERT(exp) if (!(exp)) \ 115 dev_err(DEV, "ASSERT( " #exp " ) in %s:%d\n", __FILE__, __LINE__) 116 117 #define ERR_IF(exp) if (({ \ 118 int _b = (exp) != 0; \ 119 if (_b) dev_err(DEV, "%s: (%s) in %s:%d\n", \ 120 __func__, #exp, __FILE__, __LINE__); \ 121 _b; \ 122 })) 123 124 /* Defines to control fault insertion */ 125 enum { 126 DRBD_FAULT_MD_WR = 0, /* meta data write */ 127 DRBD_FAULT_MD_RD = 1, /* read */ 128 DRBD_FAULT_RS_WR = 2, /* resync */ 129 DRBD_FAULT_RS_RD = 3, 130 DRBD_FAULT_DT_WR = 4, /* data */ 131 DRBD_FAULT_DT_RD = 5, 132 DRBD_FAULT_DT_RA = 6, /* data read ahead */ 133 DRBD_FAULT_BM_ALLOC = 7, /* bitmap allocation */ 134 DRBD_FAULT_AL_EE = 8, /* alloc ee */ 135 DRBD_FAULT_RECEIVE = 9, /* Changes some bytes upon receiving a [rs]data block */ 136 137 DRBD_FAULT_MAX, 138 }; 139 140 #ifdef CONFIG_DRBD_FAULT_INJECTION 141 extern unsigned int 142 _drbd_insert_fault(struct drbd_conf *mdev, unsigned int type); 143 static inline int 144 drbd_insert_fault(struct drbd_conf *mdev, unsigned int type) { 145 return fault_rate && 146 (enable_faults & (1<<type)) && 147 _drbd_insert_fault(mdev, type); 148 } 149 #define FAULT_ACTIVE(_m, _t) (drbd_insert_fault((_m), (_t))) 150 151 #else 152 #define FAULT_ACTIVE(_m, _t) (0) 153 #endif 154 155 /* integer division, round _UP_ to the next integer */ 156 #define div_ceil(A, B) ((A)/(B) + ((A)%(B) ? 1 : 0)) 157 /* usual integer division */ 158 #define div_floor(A, B) ((A)/(B)) 159 160 /* drbd_meta-data.c (still in drbd_main.c) */ 161 /* 4th incarnation of the disk layout. */ 162 #define DRBD_MD_MAGIC (DRBD_MAGIC+4) 163 164 extern struct drbd_conf **minor_table; 165 extern struct ratelimit_state drbd_ratelimit_state; 166 167 /* on the wire */ 168 enum drbd_packets { 169 /* receiver (data socket) */ 170 P_DATA = 0x00, 171 P_DATA_REPLY = 0x01, /* Response to P_DATA_REQUEST */ 172 P_RS_DATA_REPLY = 0x02, /* Response to P_RS_DATA_REQUEST */ 173 P_BARRIER = 0x03, 174 P_BITMAP = 0x04, 175 P_BECOME_SYNC_TARGET = 0x05, 176 P_BECOME_SYNC_SOURCE = 0x06, 177 P_UNPLUG_REMOTE = 0x07, /* Used at various times to hint the peer */ 178 P_DATA_REQUEST = 0x08, /* Used to ask for a data block */ 179 P_RS_DATA_REQUEST = 0x09, /* Used to ask for a data block for resync */ 180 P_SYNC_PARAM = 0x0a, 181 P_PROTOCOL = 0x0b, 182 P_UUIDS = 0x0c, 183 P_SIZES = 0x0d, 184 P_STATE = 0x0e, 185 P_SYNC_UUID = 0x0f, 186 P_AUTH_CHALLENGE = 0x10, 187 P_AUTH_RESPONSE = 0x11, 188 P_STATE_CHG_REQ = 0x12, 189 190 /* asender (meta socket */ 191 P_PING = 0x13, 192 P_PING_ACK = 0x14, 193 P_RECV_ACK = 0x15, /* Used in protocol B */ 194 P_WRITE_ACK = 0x16, /* Used in protocol C */ 195 P_RS_WRITE_ACK = 0x17, /* Is a P_WRITE_ACK, additionally call set_in_sync(). */ 196 P_DISCARD_ACK = 0x18, /* Used in proto C, two-primaries conflict detection */ 197 P_NEG_ACK = 0x19, /* Sent if local disk is unusable */ 198 P_NEG_DREPLY = 0x1a, /* Local disk is broken... */ 199 P_NEG_RS_DREPLY = 0x1b, /* Local disk is broken... */ 200 P_BARRIER_ACK = 0x1c, 201 P_STATE_CHG_REPLY = 0x1d, 202 203 /* "new" commands, no longer fitting into the ordering scheme above */ 204 205 P_OV_REQUEST = 0x1e, /* data socket */ 206 P_OV_REPLY = 0x1f, 207 P_OV_RESULT = 0x20, /* meta socket */ 208 P_CSUM_RS_REQUEST = 0x21, /* data socket */ 209 P_RS_IS_IN_SYNC = 0x22, /* meta socket */ 210 P_SYNC_PARAM89 = 0x23, /* data socket, protocol version 89 replacement for P_SYNC_PARAM */ 211 P_COMPRESSED_BITMAP = 0x24, /* compressed or otherwise encoded bitmap transfer */ 212 /* P_CKPT_FENCE_REQ = 0x25, * currently reserved for protocol D */ 213 /* P_CKPT_DISABLE_REQ = 0x26, * currently reserved for protocol D */ 214 P_DELAY_PROBE = 0x27, /* is used on BOTH sockets */ 215 216 P_MAX_CMD = 0x28, 217 P_MAY_IGNORE = 0x100, /* Flag to test if (cmd > P_MAY_IGNORE) ... */ 218 P_MAX_OPT_CMD = 0x101, 219 220 /* special command ids for handshake */ 221 222 P_HAND_SHAKE_M = 0xfff1, /* First Packet on the MetaSock */ 223 P_HAND_SHAKE_S = 0xfff2, /* First Packet on the Socket */ 224 225 P_HAND_SHAKE = 0xfffe /* FIXED for the next century! */ 226 }; 227 228 static inline const char *cmdname(enum drbd_packets cmd) 229 { 230 /* THINK may need to become several global tables 231 * when we want to support more than 232 * one PRO_VERSION */ 233 static const char *cmdnames[] = { 234 [P_DATA] = "Data", 235 [P_DATA_REPLY] = "DataReply", 236 [P_RS_DATA_REPLY] = "RSDataReply", 237 [P_BARRIER] = "Barrier", 238 [P_BITMAP] = "ReportBitMap", 239 [P_BECOME_SYNC_TARGET] = "BecomeSyncTarget", 240 [P_BECOME_SYNC_SOURCE] = "BecomeSyncSource", 241 [P_UNPLUG_REMOTE] = "UnplugRemote", 242 [P_DATA_REQUEST] = "DataRequest", 243 [P_RS_DATA_REQUEST] = "RSDataRequest", 244 [P_SYNC_PARAM] = "SyncParam", 245 [P_SYNC_PARAM89] = "SyncParam89", 246 [P_PROTOCOL] = "ReportProtocol", 247 [P_UUIDS] = "ReportUUIDs", 248 [P_SIZES] = "ReportSizes", 249 [P_STATE] = "ReportState", 250 [P_SYNC_UUID] = "ReportSyncUUID", 251 [P_AUTH_CHALLENGE] = "AuthChallenge", 252 [P_AUTH_RESPONSE] = "AuthResponse", 253 [P_PING] = "Ping", 254 [P_PING_ACK] = "PingAck", 255 [P_RECV_ACK] = "RecvAck", 256 [P_WRITE_ACK] = "WriteAck", 257 [P_RS_WRITE_ACK] = "RSWriteAck", 258 [P_DISCARD_ACK] = "DiscardAck", 259 [P_NEG_ACK] = "NegAck", 260 [P_NEG_DREPLY] = "NegDReply", 261 [P_NEG_RS_DREPLY] = "NegRSDReply", 262 [P_BARRIER_ACK] = "BarrierAck", 263 [P_STATE_CHG_REQ] = "StateChgRequest", 264 [P_STATE_CHG_REPLY] = "StateChgReply", 265 [P_OV_REQUEST] = "OVRequest", 266 [P_OV_REPLY] = "OVReply", 267 [P_OV_RESULT] = "OVResult", 268 [P_CSUM_RS_REQUEST] = "CsumRSRequest", 269 [P_RS_IS_IN_SYNC] = "CsumRSIsInSync", 270 [P_COMPRESSED_BITMAP] = "CBitmap", 271 [P_DELAY_PROBE] = "DelayProbe", 272 [P_MAX_CMD] = NULL, 273 }; 274 275 if (cmd == P_HAND_SHAKE_M) 276 return "HandShakeM"; 277 if (cmd == P_HAND_SHAKE_S) 278 return "HandShakeS"; 279 if (cmd == P_HAND_SHAKE) 280 return "HandShake"; 281 if (cmd >= P_MAX_CMD) 282 return "Unknown"; 283 return cmdnames[cmd]; 284 } 285 286 /* for sending/receiving the bitmap, 287 * possibly in some encoding scheme */ 288 struct bm_xfer_ctx { 289 /* "const" 290 * stores total bits and long words 291 * of the bitmap, so we don't need to 292 * call the accessor functions over and again. */ 293 unsigned long bm_bits; 294 unsigned long bm_words; 295 /* during xfer, current position within the bitmap */ 296 unsigned long bit_offset; 297 unsigned long word_offset; 298 299 /* statistics; index: (h->command == P_BITMAP) */ 300 unsigned packets[2]; 301 unsigned bytes[2]; 302 }; 303 304 extern void INFO_bm_xfer_stats(struct drbd_conf *mdev, 305 const char *direction, struct bm_xfer_ctx *c); 306 307 static inline void bm_xfer_ctx_bit_to_word_offset(struct bm_xfer_ctx *c) 308 { 309 /* word_offset counts "native long words" (32 or 64 bit), 310 * aligned at 64 bit. 311 * Encoded packet may end at an unaligned bit offset. 312 * In case a fallback clear text packet is transmitted in 313 * between, we adjust this offset back to the last 64bit 314 * aligned "native long word", which makes coding and decoding 315 * the plain text bitmap much more convenient. */ 316 #if BITS_PER_LONG == 64 317 c->word_offset = c->bit_offset >> 6; 318 #elif BITS_PER_LONG == 32 319 c->word_offset = c->bit_offset >> 5; 320 c->word_offset &= ~(1UL); 321 #else 322 # error "unsupported BITS_PER_LONG" 323 #endif 324 } 325 326 #ifndef __packed 327 #define __packed __attribute__((packed)) 328 #endif 329 330 /* This is the layout for a packet on the wire. 331 * The byteorder is the network byte order. 332 * (except block_id and barrier fields. 333 * these are pointers to local structs 334 * and have no relevance for the partner, 335 * which just echoes them as received.) 336 * 337 * NOTE that the payload starts at a long aligned offset, 338 * regardless of 32 or 64 bit arch! 339 */ 340 struct p_header { 341 u32 magic; 342 u16 command; 343 u16 length; /* bytes of data after this header */ 344 u8 payload[0]; 345 } __packed; 346 /* 8 bytes. packet FIXED for the next century! */ 347 348 /* 349 * short commands, packets without payload, plain p_header: 350 * P_PING 351 * P_PING_ACK 352 * P_BECOME_SYNC_TARGET 353 * P_BECOME_SYNC_SOURCE 354 * P_UNPLUG_REMOTE 355 */ 356 357 /* 358 * commands with out-of-struct payload: 359 * P_BITMAP (no additional fields) 360 * P_DATA, P_DATA_REPLY (see p_data) 361 * P_COMPRESSED_BITMAP (see receive_compressed_bitmap) 362 */ 363 364 /* these defines must not be changed without changing the protocol version */ 365 #define DP_HARDBARRIER 1 366 #define DP_RW_SYNC 2 367 #define DP_MAY_SET_IN_SYNC 4 368 369 struct p_data { 370 struct p_header head; 371 u64 sector; /* 64 bits sector number */ 372 u64 block_id; /* to identify the request in protocol B&C */ 373 u32 seq_num; 374 u32 dp_flags; 375 } __packed; 376 377 /* 378 * commands which share a struct: 379 * p_block_ack: 380 * P_RECV_ACK (proto B), P_WRITE_ACK (proto C), 381 * P_DISCARD_ACK (proto C, two-primaries conflict detection) 382 * p_block_req: 383 * P_DATA_REQUEST, P_RS_DATA_REQUEST 384 */ 385 struct p_block_ack { 386 struct p_header head; 387 u64 sector; 388 u64 block_id; 389 u32 blksize; 390 u32 seq_num; 391 } __packed; 392 393 394 struct p_block_req { 395 struct p_header head; 396 u64 sector; 397 u64 block_id; 398 u32 blksize; 399 u32 pad; /* to multiple of 8 Byte */ 400 } __packed; 401 402 /* 403 * commands with their own struct for additional fields: 404 * P_HAND_SHAKE 405 * P_BARRIER 406 * P_BARRIER_ACK 407 * P_SYNC_PARAM 408 * ReportParams 409 */ 410 411 struct p_handshake { 412 struct p_header head; /* 8 bytes */ 413 u32 protocol_min; 414 u32 feature_flags; 415 u32 protocol_max; 416 417 /* should be more than enough for future enhancements 418 * for now, feature_flags and the reserverd array shall be zero. 419 */ 420 421 u32 _pad; 422 u64 reserverd[7]; 423 } __packed; 424 /* 80 bytes, FIXED for the next century */ 425 426 struct p_barrier { 427 struct p_header head; 428 u32 barrier; /* barrier number _handle_ only */ 429 u32 pad; /* to multiple of 8 Byte */ 430 } __packed; 431 432 struct p_barrier_ack { 433 struct p_header head; 434 u32 barrier; 435 u32 set_size; 436 } __packed; 437 438 struct p_rs_param { 439 struct p_header head; 440 u32 rate; 441 442 /* Since protocol version 88 and higher. */ 443 char verify_alg[0]; 444 } __packed; 445 446 struct p_rs_param_89 { 447 struct p_header head; 448 u32 rate; 449 /* protocol version 89: */ 450 char verify_alg[SHARED_SECRET_MAX]; 451 char csums_alg[SHARED_SECRET_MAX]; 452 } __packed; 453 454 enum drbd_conn_flags { 455 CF_WANT_LOSE = 1, 456 CF_DRY_RUN = 2, 457 }; 458 459 struct p_protocol { 460 struct p_header head; 461 u32 protocol; 462 u32 after_sb_0p; 463 u32 after_sb_1p; 464 u32 after_sb_2p; 465 u32 conn_flags; 466 u32 two_primaries; 467 468 /* Since protocol version 87 and higher. */ 469 char integrity_alg[0]; 470 471 } __packed; 472 473 struct p_uuids { 474 struct p_header head; 475 u64 uuid[UI_EXTENDED_SIZE]; 476 } __packed; 477 478 struct p_rs_uuid { 479 struct p_header head; 480 u64 uuid; 481 } __packed; 482 483 struct p_sizes { 484 struct p_header head; 485 u64 d_size; /* size of disk */ 486 u64 u_size; /* user requested size */ 487 u64 c_size; /* current exported size */ 488 u32 max_segment_size; /* Maximal size of a BIO */ 489 u16 queue_order_type; /* not yet implemented in DRBD*/ 490 u16 dds_flags; /* use enum dds_flags here. */ 491 } __packed; 492 493 struct p_state { 494 struct p_header head; 495 u32 state; 496 } __packed; 497 498 struct p_req_state { 499 struct p_header head; 500 u32 mask; 501 u32 val; 502 } __packed; 503 504 struct p_req_state_reply { 505 struct p_header head; 506 u32 retcode; 507 } __packed; 508 509 struct p_drbd06_param { 510 u64 size; 511 u32 state; 512 u32 blksize; 513 u32 protocol; 514 u32 version; 515 u32 gen_cnt[5]; 516 u32 bit_map_gen[5]; 517 } __packed; 518 519 struct p_discard { 520 struct p_header head; 521 u64 block_id; 522 u32 seq_num; 523 u32 pad; 524 } __packed; 525 526 /* Valid values for the encoding field. 527 * Bump proto version when changing this. */ 528 enum drbd_bitmap_code { 529 /* RLE_VLI_Bytes = 0, 530 * and other bit variants had been defined during 531 * algorithm evaluation. */ 532 RLE_VLI_Bits = 2, 533 }; 534 535 struct p_compressed_bm { 536 struct p_header head; 537 /* (encoding & 0x0f): actual encoding, see enum drbd_bitmap_code 538 * (encoding & 0x80): polarity (set/unset) of first runlength 539 * ((encoding >> 4) & 0x07): pad_bits, number of trailing zero bits 540 * used to pad up to head.length bytes 541 */ 542 u8 encoding; 543 544 u8 code[0]; 545 } __packed; 546 547 struct p_delay_probe { 548 struct p_header head; 549 u32 seq_num; /* sequence number to match the two probe packets */ 550 u32 offset; /* usecs the probe got sent after the reference time point */ 551 } __packed; 552 553 struct delay_probe { 554 struct list_head list; 555 unsigned int seq_num; 556 struct timeval time; 557 }; 558 559 /* DCBP: Drbd Compressed Bitmap Packet ... */ 560 static inline enum drbd_bitmap_code 561 DCBP_get_code(struct p_compressed_bm *p) 562 { 563 return (enum drbd_bitmap_code)(p->encoding & 0x0f); 564 } 565 566 static inline void 567 DCBP_set_code(struct p_compressed_bm *p, enum drbd_bitmap_code code) 568 { 569 BUG_ON(code & ~0xf); 570 p->encoding = (p->encoding & ~0xf) | code; 571 } 572 573 static inline int 574 DCBP_get_start(struct p_compressed_bm *p) 575 { 576 return (p->encoding & 0x80) != 0; 577 } 578 579 static inline void 580 DCBP_set_start(struct p_compressed_bm *p, int set) 581 { 582 p->encoding = (p->encoding & ~0x80) | (set ? 0x80 : 0); 583 } 584 585 static inline int 586 DCBP_get_pad_bits(struct p_compressed_bm *p) 587 { 588 return (p->encoding >> 4) & 0x7; 589 } 590 591 static inline void 592 DCBP_set_pad_bits(struct p_compressed_bm *p, int n) 593 { 594 BUG_ON(n & ~0x7); 595 p->encoding = (p->encoding & (~0x7 << 4)) | (n << 4); 596 } 597 598 /* one bitmap packet, including the p_header, 599 * should fit within one _architecture independend_ page. 600 * so we need to use the fixed size 4KiB page size 601 * most architechtures have used for a long time. 602 */ 603 #define BM_PACKET_PAYLOAD_BYTES (4096 - sizeof(struct p_header)) 604 #define BM_PACKET_WORDS (BM_PACKET_PAYLOAD_BYTES/sizeof(long)) 605 #define BM_PACKET_VLI_BYTES_MAX (4096 - sizeof(struct p_compressed_bm)) 606 #if (PAGE_SIZE < 4096) 607 /* drbd_send_bitmap / receive_bitmap would break horribly */ 608 #error "PAGE_SIZE too small" 609 #endif 610 611 union p_polymorph { 612 struct p_header header; 613 struct p_handshake handshake; 614 struct p_data data; 615 struct p_block_ack block_ack; 616 struct p_barrier barrier; 617 struct p_barrier_ack barrier_ack; 618 struct p_rs_param_89 rs_param_89; 619 struct p_protocol protocol; 620 struct p_sizes sizes; 621 struct p_uuids uuids; 622 struct p_state state; 623 struct p_req_state req_state; 624 struct p_req_state_reply req_state_reply; 625 struct p_block_req block_req; 626 } __packed; 627 628 /**********************************************************************/ 629 enum drbd_thread_state { 630 None, 631 Running, 632 Exiting, 633 Restarting 634 }; 635 636 struct drbd_thread { 637 spinlock_t t_lock; 638 struct task_struct *task; 639 struct completion stop; 640 enum drbd_thread_state t_state; 641 int (*function) (struct drbd_thread *); 642 struct drbd_conf *mdev; 643 int reset_cpu_mask; 644 }; 645 646 static inline enum drbd_thread_state get_t_state(struct drbd_thread *thi) 647 { 648 /* THINK testing the t_state seems to be uncritical in all cases 649 * (but thread_{start,stop}), so we can read it *without* the lock. 650 * --lge */ 651 652 smp_rmb(); 653 return thi->t_state; 654 } 655 656 657 /* 658 * Having this as the first member of a struct provides sort of "inheritance". 659 * "derived" structs can be "drbd_queue_work()"ed. 660 * The callback should know and cast back to the descendant struct. 661 * drbd_request and drbd_epoch_entry are descendants of drbd_work. 662 */ 663 struct drbd_work; 664 typedef int (*drbd_work_cb)(struct drbd_conf *, struct drbd_work *, int cancel); 665 struct drbd_work { 666 struct list_head list; 667 drbd_work_cb cb; 668 }; 669 670 struct drbd_tl_epoch; 671 struct drbd_request { 672 struct drbd_work w; 673 struct drbd_conf *mdev; 674 675 /* if local IO is not allowed, will be NULL. 676 * if local IO _is_ allowed, holds the locally submitted bio clone, 677 * or, after local IO completion, the ERR_PTR(error). 678 * see drbd_endio_pri(). */ 679 struct bio *private_bio; 680 681 struct hlist_node colision; 682 sector_t sector; 683 unsigned int size; 684 unsigned int epoch; /* barrier_nr */ 685 686 /* barrier_nr: used to check on "completion" whether this req was in 687 * the current epoch, and we therefore have to close it, 688 * starting a new epoch... 689 */ 690 691 /* up to here, the struct layout is identical to drbd_epoch_entry; 692 * we might be able to use that to our advantage... */ 693 694 struct list_head tl_requests; /* ring list in the transfer log */ 695 struct bio *master_bio; /* master bio pointer */ 696 unsigned long rq_state; /* see comments above _req_mod() */ 697 int seq_num; 698 unsigned long start_time; 699 }; 700 701 struct drbd_tl_epoch { 702 struct drbd_work w; 703 struct list_head requests; /* requests before */ 704 struct drbd_tl_epoch *next; /* pointer to the next barrier */ 705 unsigned int br_number; /* the barriers identifier. */ 706 int n_req; /* number of requests attached before this barrier */ 707 }; 708 709 struct drbd_request; 710 711 /* These Tl_epoch_entries may be in one of 6 lists: 712 active_ee .. data packet being written 713 sync_ee .. syncer block being written 714 done_ee .. block written, need to send P_WRITE_ACK 715 read_ee .. [RS]P_DATA_REQUEST being read 716 */ 717 718 struct drbd_epoch { 719 struct list_head list; 720 unsigned int barrier_nr; 721 atomic_t epoch_size; /* increased on every request added. */ 722 atomic_t active; /* increased on every req. added, and dec on every finished. */ 723 unsigned long flags; 724 }; 725 726 /* drbd_epoch flag bits */ 727 enum { 728 DE_BARRIER_IN_NEXT_EPOCH_ISSUED, 729 DE_BARRIER_IN_NEXT_EPOCH_DONE, 730 DE_CONTAINS_A_BARRIER, 731 DE_HAVE_BARRIER_NUMBER, 732 DE_IS_FINISHING, 733 }; 734 735 enum epoch_event { 736 EV_PUT, 737 EV_GOT_BARRIER_NR, 738 EV_BARRIER_DONE, 739 EV_BECAME_LAST, 740 EV_CLEANUP = 32, /* used as flag */ 741 }; 742 743 struct drbd_wq_barrier { 744 struct drbd_work w; 745 struct completion done; 746 }; 747 748 struct digest_info { 749 int digest_size; 750 void *digest; 751 }; 752 753 struct drbd_epoch_entry { 754 struct drbd_work w; 755 struct hlist_node colision; 756 struct drbd_epoch *epoch; 757 struct drbd_conf *mdev; 758 struct page *pages; 759 atomic_t pending_bios; 760 unsigned int size; 761 /* see comments on ee flag bits below */ 762 unsigned long flags; 763 sector_t sector; 764 u64 block_id; 765 }; 766 767 /* ee flag bits. 768 * While corresponding bios are in flight, the only modification will be 769 * set_bit WAS_ERROR, which has to be atomic. 770 * If no bios are in flight yet, or all have been completed, 771 * non-atomic modification to ee->flags is ok. 772 */ 773 enum { 774 __EE_CALL_AL_COMPLETE_IO, 775 __EE_MAY_SET_IN_SYNC, 776 777 /* This epoch entry closes an epoch using a barrier. 778 * On sucessful completion, the epoch is released, 779 * and the P_BARRIER_ACK send. */ 780 __EE_IS_BARRIER, 781 782 /* In case a barrier failed, 783 * we need to resubmit without the barrier flag. */ 784 __EE_RESUBMITTED, 785 786 /* we may have several bios per epoch entry. 787 * if any of those fail, we set this flag atomically 788 * from the endio callback */ 789 __EE_WAS_ERROR, 790 }; 791 #define EE_CALL_AL_COMPLETE_IO (1<<__EE_CALL_AL_COMPLETE_IO) 792 #define EE_MAY_SET_IN_SYNC (1<<__EE_MAY_SET_IN_SYNC) 793 #define EE_IS_BARRIER (1<<__EE_IS_BARRIER) 794 #define EE_RESUBMITTED (1<<__EE_RESUBMITTED) 795 #define EE_WAS_ERROR (1<<__EE_WAS_ERROR) 796 797 /* global flag bits */ 798 enum { 799 CREATE_BARRIER, /* next P_DATA is preceeded by a P_BARRIER */ 800 SIGNAL_ASENDER, /* whether asender wants to be interrupted */ 801 SEND_PING, /* whether asender should send a ping asap */ 802 803 STOP_SYNC_TIMER, /* tell timer to cancel itself */ 804 UNPLUG_QUEUED, /* only relevant with kernel 2.4 */ 805 UNPLUG_REMOTE, /* sending a "UnplugRemote" could help */ 806 MD_DIRTY, /* current uuids and flags not yet on disk */ 807 DISCARD_CONCURRENT, /* Set on one node, cleared on the peer! */ 808 USE_DEGR_WFC_T, /* degr-wfc-timeout instead of wfc-timeout. */ 809 CLUSTER_ST_CHANGE, /* Cluster wide state change going on... */ 810 CL_ST_CHG_SUCCESS, 811 CL_ST_CHG_FAIL, 812 CRASHED_PRIMARY, /* This node was a crashed primary. 813 * Gets cleared when the state.conn 814 * goes into C_CONNECTED state. */ 815 WRITE_BM_AFTER_RESYNC, /* A kmalloc() during resync failed */ 816 NO_BARRIER_SUPP, /* underlying block device doesn't implement barriers */ 817 CONSIDER_RESYNC, 818 819 MD_NO_BARRIER, /* meta data device does not support barriers, 820 so don't even try */ 821 SUSPEND_IO, /* suspend application io */ 822 BITMAP_IO, /* suspend application io; 823 once no more io in flight, start bitmap io */ 824 BITMAP_IO_QUEUED, /* Started bitmap IO */ 825 RESYNC_AFTER_NEG, /* Resync after online grow after the attach&negotiate finished. */ 826 NET_CONGESTED, /* The data socket is congested */ 827 828 CONFIG_PENDING, /* serialization of (re)configuration requests. 829 * if set, also prevents the device from dying */ 830 DEVICE_DYING, /* device became unconfigured, 831 * but worker thread is still handling the cleanup. 832 * reconfiguring (nl_disk_conf, nl_net_conf) is dissalowed, 833 * while this is set. */ 834 RESIZE_PENDING, /* Size change detected locally, waiting for the response from 835 * the peer, if it changed there as well. */ 836 CONN_DRY_RUN, /* Expect disconnect after resync handshake. */ 837 GOT_PING_ACK, /* set when we receive a ping_ack packet, misc wait gets woken */ 838 }; 839 840 struct drbd_bitmap; /* opaque for drbd_conf */ 841 842 /* TODO sort members for performance 843 * MAYBE group them further */ 844 845 /* THINK maybe we actually want to use the default "event/%s" worker threads 846 * or similar in linux 2.6, which uses per cpu data and threads. 847 * 848 * To be general, this might need a spin_lock member. 849 * For now, please use the mdev->req_lock to protect list_head, 850 * see drbd_queue_work below. 851 */ 852 struct drbd_work_queue { 853 struct list_head q; 854 struct semaphore s; /* producers up it, worker down()s it */ 855 spinlock_t q_lock; /* to protect the list. */ 856 }; 857 858 struct drbd_socket { 859 struct drbd_work_queue work; 860 struct mutex mutex; 861 struct socket *socket; 862 /* this way we get our 863 * send/receive buffers off the stack */ 864 union p_polymorph sbuf; 865 union p_polymorph rbuf; 866 }; 867 868 struct drbd_md { 869 u64 md_offset; /* sector offset to 'super' block */ 870 871 u64 la_size_sect; /* last agreed size, unit sectors */ 872 u64 uuid[UI_SIZE]; 873 u64 device_uuid; 874 u32 flags; 875 u32 md_size_sect; 876 877 s32 al_offset; /* signed relative sector offset to al area */ 878 s32 bm_offset; /* signed relative sector offset to bitmap */ 879 880 /* u32 al_nr_extents; important for restoring the AL 881 * is stored into sync_conf.al_extents, which in turn 882 * gets applied to act_log->nr_elements 883 */ 884 }; 885 886 /* for sync_conf and other types... */ 887 #define NL_PACKET(name, number, fields) struct name { fields }; 888 #define NL_INTEGER(pn,pr,member) int member; 889 #define NL_INT64(pn,pr,member) __u64 member; 890 #define NL_BIT(pn,pr,member) unsigned member:1; 891 #define NL_STRING(pn,pr,member,len) unsigned char member[len]; int member ## _len; 892 #include "linux/drbd_nl.h" 893 894 struct drbd_backing_dev { 895 struct block_device *backing_bdev; 896 struct block_device *md_bdev; 897 struct file *lo_file; 898 struct file *md_file; 899 struct drbd_md md; 900 struct disk_conf dc; /* The user provided config... */ 901 sector_t known_size; /* last known size of that backing device */ 902 }; 903 904 struct drbd_md_io { 905 struct drbd_conf *mdev; 906 struct completion event; 907 int error; 908 }; 909 910 struct bm_io_work { 911 struct drbd_work w; 912 char *why; 913 int (*io_fn)(struct drbd_conf *mdev); 914 void (*done)(struct drbd_conf *mdev, int rv); 915 }; 916 917 enum write_ordering_e { 918 WO_none, 919 WO_drain_io, 920 WO_bdev_flush, 921 WO_bio_barrier 922 }; 923 924 struct drbd_conf { 925 /* things that are stored as / read from meta data on disk */ 926 unsigned long flags; 927 928 /* configured by drbdsetup */ 929 struct net_conf *net_conf; /* protected by get_net_conf() and put_net_conf() */ 930 struct syncer_conf sync_conf; 931 struct drbd_backing_dev *ldev __protected_by(local); 932 933 sector_t p_size; /* partner's disk size */ 934 struct request_queue *rq_queue; 935 struct block_device *this_bdev; 936 struct gendisk *vdisk; 937 938 struct drbd_socket data; /* data/barrier/cstate/parameter packets */ 939 struct drbd_socket meta; /* ping/ack (metadata) packets */ 940 int agreed_pro_version; /* actually used protocol version */ 941 unsigned long last_received; /* in jiffies, either socket */ 942 unsigned int ko_count; 943 struct drbd_work resync_work, 944 unplug_work, 945 md_sync_work, 946 delay_probe_work; 947 struct timer_list resync_timer; 948 struct timer_list md_sync_timer; 949 struct timer_list delay_probe_timer; 950 951 /* Used after attach while negotiating new disk state. */ 952 union drbd_state new_state_tmp; 953 954 union drbd_state state; 955 wait_queue_head_t misc_wait; 956 wait_queue_head_t state_wait; /* upon each state change. */ 957 unsigned int send_cnt; 958 unsigned int recv_cnt; 959 unsigned int read_cnt; 960 unsigned int writ_cnt; 961 unsigned int al_writ_cnt; 962 unsigned int bm_writ_cnt; 963 atomic_t ap_bio_cnt; /* Requests we need to complete */ 964 atomic_t ap_pending_cnt; /* AP data packets on the wire, ack expected */ 965 atomic_t rs_pending_cnt; /* RS request/data packets on the wire */ 966 atomic_t unacked_cnt; /* Need to send replys for */ 967 atomic_t local_cnt; /* Waiting for local completion */ 968 atomic_t net_cnt; /* Users of net_conf */ 969 spinlock_t req_lock; 970 struct drbd_tl_epoch *unused_spare_tle; /* for pre-allocation */ 971 struct drbd_tl_epoch *newest_tle; 972 struct drbd_tl_epoch *oldest_tle; 973 struct list_head out_of_sequence_requests; 974 struct hlist_head *tl_hash; 975 unsigned int tl_hash_s; 976 977 /* blocks to sync in this run [unit BM_BLOCK_SIZE] */ 978 unsigned long rs_total; 979 /* number of sync IOs that failed in this run */ 980 unsigned long rs_failed; 981 /* Syncer's start time [unit jiffies] */ 982 unsigned long rs_start; 983 /* cumulated time in PausedSyncX state [unit jiffies] */ 984 unsigned long rs_paused; 985 /* block not up-to-date at mark [unit BM_BLOCK_SIZE] */ 986 unsigned long rs_mark_left; 987 /* marks's time [unit jiffies] */ 988 unsigned long rs_mark_time; 989 /* skipped because csum was equeal [unit BM_BLOCK_SIZE] */ 990 unsigned long rs_same_csum; 991 992 /* where does the admin want us to start? (sector) */ 993 sector_t ov_start_sector; 994 /* where are we now? (sector) */ 995 sector_t ov_position; 996 /* Start sector of out of sync range (to merge printk reporting). */ 997 sector_t ov_last_oos_start; 998 /* size of out-of-sync range in sectors. */ 999 sector_t ov_last_oos_size; 1000 unsigned long ov_left; /* in bits */ 1001 struct crypto_hash *csums_tfm; 1002 struct crypto_hash *verify_tfm; 1003 1004 struct drbd_thread receiver; 1005 struct drbd_thread worker; 1006 struct drbd_thread asender; 1007 struct drbd_bitmap *bitmap; 1008 unsigned long bm_resync_fo; /* bit offset for drbd_bm_find_next */ 1009 1010 /* Used to track operations of resync... */ 1011 struct lru_cache *resync; 1012 /* Number of locked elements in resync LRU */ 1013 unsigned int resync_locked; 1014 /* resync extent number waiting for application requests */ 1015 unsigned int resync_wenr; 1016 1017 int open_cnt; 1018 u64 *p_uuid; 1019 struct drbd_epoch *current_epoch; 1020 spinlock_t epoch_lock; 1021 unsigned int epochs; 1022 enum write_ordering_e write_ordering; 1023 struct list_head active_ee; /* IO in progress */ 1024 struct list_head sync_ee; /* IO in progress */ 1025 struct list_head done_ee; /* send ack */ 1026 struct list_head read_ee; /* IO in progress */ 1027 struct list_head net_ee; /* zero-copy network send in progress */ 1028 struct hlist_head *ee_hash; /* is proteced by req_lock! */ 1029 unsigned int ee_hash_s; 1030 1031 /* this one is protected by ee_lock, single thread */ 1032 struct drbd_epoch_entry *last_write_w_barrier; 1033 1034 int next_barrier_nr; 1035 struct hlist_head *app_reads_hash; /* is proteced by req_lock */ 1036 struct list_head resync_reads; 1037 atomic_t pp_in_use; 1038 wait_queue_head_t ee_wait; 1039 struct page *md_io_page; /* one page buffer for md_io */ 1040 struct page *md_io_tmpp; /* for logical_block_size != 512 */ 1041 struct mutex md_io_mutex; /* protects the md_io_buffer */ 1042 spinlock_t al_lock; 1043 wait_queue_head_t al_wait; 1044 struct lru_cache *act_log; /* activity log */ 1045 unsigned int al_tr_number; 1046 int al_tr_cycle; 1047 int al_tr_pos; /* position of the next transaction in the journal */ 1048 struct crypto_hash *cram_hmac_tfm; 1049 struct crypto_hash *integrity_w_tfm; /* to be used by the worker thread */ 1050 struct crypto_hash *integrity_r_tfm; /* to be used by the receiver thread */ 1051 void *int_dig_out; 1052 void *int_dig_in; 1053 void *int_dig_vv; 1054 wait_queue_head_t seq_wait; 1055 atomic_t packet_seq; 1056 unsigned int peer_seq; 1057 spinlock_t peer_seq_lock; 1058 unsigned int minor; 1059 unsigned long comm_bm_set; /* communicated number of set bits. */ 1060 cpumask_var_t cpu_mask; 1061 struct bm_io_work bm_io_work; 1062 u64 ed_uuid; /* UUID of the exposed data */ 1063 struct mutex state_mutex; 1064 char congestion_reason; /* Why we where congested... */ 1065 struct list_head delay_probes; /* protected by peer_seq_lock */ 1066 int data_delay; /* Delay of packets on the data-sock behind meta-sock */ 1067 unsigned int delay_seq; /* To generate sequence numbers of delay probes */ 1068 struct timeval dps_time; /* delay-probes-start-time */ 1069 unsigned int dp_volume_last; /* send_cnt of last delay probe */ 1070 int c_sync_rate; /* current resync rate after delay_probe magic */ 1071 }; 1072 1073 static inline struct drbd_conf *minor_to_mdev(unsigned int minor) 1074 { 1075 struct drbd_conf *mdev; 1076 1077 mdev = minor < minor_count ? minor_table[minor] : NULL; 1078 1079 return mdev; 1080 } 1081 1082 static inline unsigned int mdev_to_minor(struct drbd_conf *mdev) 1083 { 1084 return mdev->minor; 1085 } 1086 1087 /* returns 1 if it was successfull, 1088 * returns 0 if there was no data socket. 1089 * so wherever you are going to use the data.socket, e.g. do 1090 * if (!drbd_get_data_sock(mdev)) 1091 * return 0; 1092 * CODE(); 1093 * drbd_put_data_sock(mdev); 1094 */ 1095 static inline int drbd_get_data_sock(struct drbd_conf *mdev) 1096 { 1097 mutex_lock(&mdev->data.mutex); 1098 /* drbd_disconnect() could have called drbd_free_sock() 1099 * while we were waiting in down()... */ 1100 if (unlikely(mdev->data.socket == NULL)) { 1101 mutex_unlock(&mdev->data.mutex); 1102 return 0; 1103 } 1104 return 1; 1105 } 1106 1107 static inline void drbd_put_data_sock(struct drbd_conf *mdev) 1108 { 1109 mutex_unlock(&mdev->data.mutex); 1110 } 1111 1112 /* 1113 * function declarations 1114 *************************/ 1115 1116 /* drbd_main.c */ 1117 1118 enum chg_state_flags { 1119 CS_HARD = 1, 1120 CS_VERBOSE = 2, 1121 CS_WAIT_COMPLETE = 4, 1122 CS_SERIALIZE = 8, 1123 CS_ORDERED = CS_WAIT_COMPLETE + CS_SERIALIZE, 1124 }; 1125 1126 enum dds_flags { 1127 DDSF_FORCED = 1, 1128 DDSF_NO_RESYNC = 2, /* Do not run a resync for the new space */ 1129 }; 1130 1131 extern void drbd_init_set_defaults(struct drbd_conf *mdev); 1132 extern int drbd_change_state(struct drbd_conf *mdev, enum chg_state_flags f, 1133 union drbd_state mask, union drbd_state val); 1134 extern void drbd_force_state(struct drbd_conf *, union drbd_state, 1135 union drbd_state); 1136 extern int _drbd_request_state(struct drbd_conf *, union drbd_state, 1137 union drbd_state, enum chg_state_flags); 1138 extern int __drbd_set_state(struct drbd_conf *, union drbd_state, 1139 enum chg_state_flags, struct completion *done); 1140 extern void print_st_err(struct drbd_conf *, union drbd_state, 1141 union drbd_state, int); 1142 extern int drbd_thread_start(struct drbd_thread *thi); 1143 extern void _drbd_thread_stop(struct drbd_thread *thi, int restart, int wait); 1144 #ifdef CONFIG_SMP 1145 extern void drbd_thread_current_set_cpu(struct drbd_conf *mdev); 1146 extern void drbd_calc_cpu_mask(struct drbd_conf *mdev); 1147 #else 1148 #define drbd_thread_current_set_cpu(A) ({}) 1149 #define drbd_calc_cpu_mask(A) ({}) 1150 #endif 1151 extern void drbd_free_resources(struct drbd_conf *mdev); 1152 extern void tl_release(struct drbd_conf *mdev, unsigned int barrier_nr, 1153 unsigned int set_size); 1154 extern void tl_clear(struct drbd_conf *mdev); 1155 extern void _tl_add_barrier(struct drbd_conf *, struct drbd_tl_epoch *); 1156 extern void drbd_free_sock(struct drbd_conf *mdev); 1157 extern int drbd_send(struct drbd_conf *mdev, struct socket *sock, 1158 void *buf, size_t size, unsigned msg_flags); 1159 extern int drbd_send_protocol(struct drbd_conf *mdev); 1160 extern int drbd_send_uuids(struct drbd_conf *mdev); 1161 extern int drbd_send_uuids_skip_initial_sync(struct drbd_conf *mdev); 1162 extern int drbd_send_sync_uuid(struct drbd_conf *mdev, u64 val); 1163 extern int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply, enum dds_flags flags); 1164 extern int _drbd_send_state(struct drbd_conf *mdev); 1165 extern int drbd_send_state(struct drbd_conf *mdev); 1166 extern int _drbd_send_cmd(struct drbd_conf *mdev, struct socket *sock, 1167 enum drbd_packets cmd, struct p_header *h, 1168 size_t size, unsigned msg_flags); 1169 #define USE_DATA_SOCKET 1 1170 #define USE_META_SOCKET 0 1171 extern int drbd_send_cmd(struct drbd_conf *mdev, int use_data_socket, 1172 enum drbd_packets cmd, struct p_header *h, 1173 size_t size); 1174 extern int drbd_send_cmd2(struct drbd_conf *mdev, enum drbd_packets cmd, 1175 char *data, size_t size); 1176 extern int drbd_send_sync_param(struct drbd_conf *mdev, struct syncer_conf *sc); 1177 extern int drbd_send_b_ack(struct drbd_conf *mdev, u32 barrier_nr, 1178 u32 set_size); 1179 extern int drbd_send_ack(struct drbd_conf *mdev, enum drbd_packets cmd, 1180 struct drbd_epoch_entry *e); 1181 extern int drbd_send_ack_rp(struct drbd_conf *mdev, enum drbd_packets cmd, 1182 struct p_block_req *rp); 1183 extern int drbd_send_ack_dp(struct drbd_conf *mdev, enum drbd_packets cmd, 1184 struct p_data *dp); 1185 extern int drbd_send_ack_ex(struct drbd_conf *mdev, enum drbd_packets cmd, 1186 sector_t sector, int blksize, u64 block_id); 1187 extern int drbd_send_block(struct drbd_conf *mdev, enum drbd_packets cmd, 1188 struct drbd_epoch_entry *e); 1189 extern int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req); 1190 extern int _drbd_send_barrier(struct drbd_conf *mdev, 1191 struct drbd_tl_epoch *barrier); 1192 extern int drbd_send_drequest(struct drbd_conf *mdev, int cmd, 1193 sector_t sector, int size, u64 block_id); 1194 extern int drbd_send_drequest_csum(struct drbd_conf *mdev, 1195 sector_t sector,int size, 1196 void *digest, int digest_size, 1197 enum drbd_packets cmd); 1198 extern int drbd_send_ov_request(struct drbd_conf *mdev,sector_t sector,int size); 1199 1200 extern int drbd_send_bitmap(struct drbd_conf *mdev); 1201 extern int _drbd_send_bitmap(struct drbd_conf *mdev); 1202 extern int drbd_send_sr_reply(struct drbd_conf *mdev, int retcode); 1203 extern void drbd_free_bc(struct drbd_backing_dev *ldev); 1204 extern void drbd_mdev_cleanup(struct drbd_conf *mdev); 1205 1206 /* drbd_meta-data.c (still in drbd_main.c) */ 1207 extern void drbd_md_sync(struct drbd_conf *mdev); 1208 extern int drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev); 1209 /* maybe define them below as inline? */ 1210 extern void drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local); 1211 extern void _drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local); 1212 extern void drbd_uuid_new_current(struct drbd_conf *mdev) __must_hold(local); 1213 extern void _drbd_uuid_new_current(struct drbd_conf *mdev) __must_hold(local); 1214 extern void drbd_uuid_set_bm(struct drbd_conf *mdev, u64 val) __must_hold(local); 1215 extern void drbd_md_set_flag(struct drbd_conf *mdev, int flags) __must_hold(local); 1216 extern void drbd_md_clear_flag(struct drbd_conf *mdev, int flags)__must_hold(local); 1217 extern int drbd_md_test_flag(struct drbd_backing_dev *, int); 1218 extern void drbd_md_mark_dirty(struct drbd_conf *mdev); 1219 extern void drbd_queue_bitmap_io(struct drbd_conf *mdev, 1220 int (*io_fn)(struct drbd_conf *), 1221 void (*done)(struct drbd_conf *, int), 1222 char *why); 1223 extern int drbd_bmio_set_n_write(struct drbd_conf *mdev); 1224 extern int drbd_bmio_clear_n_write(struct drbd_conf *mdev); 1225 extern int drbd_bitmap_io(struct drbd_conf *mdev, int (*io_fn)(struct drbd_conf *), char *why); 1226 1227 1228 /* Meta data layout 1229 We reserve a 128MB Block (4k aligned) 1230 * either at the end of the backing device 1231 * or on a separate meta data device. */ 1232 1233 #define MD_RESERVED_SECT (128LU << 11) /* 128 MB, unit sectors */ 1234 /* The following numbers are sectors */ 1235 #define MD_AL_OFFSET 8 /* 8 Sectors after start of meta area */ 1236 #define MD_AL_MAX_SIZE 64 /* = 32 kb LOG ~ 3776 extents ~ 14 GB Storage */ 1237 /* Allows up to about 3.8TB */ 1238 #define MD_BM_OFFSET (MD_AL_OFFSET + MD_AL_MAX_SIZE) 1239 1240 /* Since the smalles IO unit is usually 512 byte */ 1241 #define MD_SECTOR_SHIFT 9 1242 #define MD_SECTOR_SIZE (1<<MD_SECTOR_SHIFT) 1243 1244 /* activity log */ 1245 #define AL_EXTENTS_PT ((MD_SECTOR_SIZE-12)/8-1) /* 61 ; Extents per 512B sector */ 1246 #define AL_EXTENT_SHIFT 22 /* One extent represents 4M Storage */ 1247 #define AL_EXTENT_SIZE (1<<AL_EXTENT_SHIFT) 1248 1249 #if BITS_PER_LONG == 32 1250 #define LN2_BPL 5 1251 #define cpu_to_lel(A) cpu_to_le32(A) 1252 #define lel_to_cpu(A) le32_to_cpu(A) 1253 #elif BITS_PER_LONG == 64 1254 #define LN2_BPL 6 1255 #define cpu_to_lel(A) cpu_to_le64(A) 1256 #define lel_to_cpu(A) le64_to_cpu(A) 1257 #else 1258 #error "LN2 of BITS_PER_LONG unknown!" 1259 #endif 1260 1261 /* resync bitmap */ 1262 /* 16MB sized 'bitmap extent' to track syncer usage */ 1263 struct bm_extent { 1264 int rs_left; /* number of bits set (out of sync) in this extent. */ 1265 int rs_failed; /* number of failed resync requests in this extent. */ 1266 unsigned long flags; 1267 struct lc_element lce; 1268 }; 1269 1270 #define BME_NO_WRITES 0 /* bm_extent.flags: no more requests on this one! */ 1271 #define BME_LOCKED 1 /* bm_extent.flags: syncer active on this one. */ 1272 1273 /* drbd_bitmap.c */ 1274 /* 1275 * We need to store one bit for a block. 1276 * Example: 1GB disk @ 4096 byte blocks ==> we need 32 KB bitmap. 1277 * Bit 0 ==> local node thinks this block is binary identical on both nodes 1278 * Bit 1 ==> local node thinks this block needs to be synced. 1279 */ 1280 1281 #define BM_BLOCK_SHIFT 12 /* 4k per bit */ 1282 #define BM_BLOCK_SIZE (1<<BM_BLOCK_SHIFT) 1283 /* (9+3) : 512 bytes @ 8 bits; representing 16M storage 1284 * per sector of on disk bitmap */ 1285 #define BM_EXT_SHIFT (BM_BLOCK_SHIFT + MD_SECTOR_SHIFT + 3) /* = 24 */ 1286 #define BM_EXT_SIZE (1<<BM_EXT_SHIFT) 1287 1288 #if (BM_EXT_SHIFT != 24) || (BM_BLOCK_SHIFT != 12) 1289 #error "HAVE YOU FIXED drbdmeta AS WELL??" 1290 #endif 1291 1292 /* thus many _storage_ sectors are described by one bit */ 1293 #define BM_SECT_TO_BIT(x) ((x)>>(BM_BLOCK_SHIFT-9)) 1294 #define BM_BIT_TO_SECT(x) ((sector_t)(x)<<(BM_BLOCK_SHIFT-9)) 1295 #define BM_SECT_PER_BIT BM_BIT_TO_SECT(1) 1296 1297 /* bit to represented kilo byte conversion */ 1298 #define Bit2KB(bits) ((bits)<<(BM_BLOCK_SHIFT-10)) 1299 1300 /* in which _bitmap_ extent (resp. sector) the bit for a certain 1301 * _storage_ sector is located in */ 1302 #define BM_SECT_TO_EXT(x) ((x)>>(BM_EXT_SHIFT-9)) 1303 1304 /* how much _storage_ sectors we have per bitmap sector */ 1305 #define BM_EXT_TO_SECT(x) ((sector_t)(x) << (BM_EXT_SHIFT-9)) 1306 #define BM_SECT_PER_EXT BM_EXT_TO_SECT(1) 1307 1308 /* in one sector of the bitmap, we have this many activity_log extents. */ 1309 #define AL_EXT_PER_BM_SECT (1 << (BM_EXT_SHIFT - AL_EXTENT_SHIFT)) 1310 #define BM_WORDS_PER_AL_EXT (1 << (AL_EXTENT_SHIFT-BM_BLOCK_SHIFT-LN2_BPL)) 1311 1312 #define BM_BLOCKS_PER_BM_EXT_B (BM_EXT_SHIFT - BM_BLOCK_SHIFT) 1313 #define BM_BLOCKS_PER_BM_EXT_MASK ((1<<BM_BLOCKS_PER_BM_EXT_B) - 1) 1314 1315 /* the extent in "PER_EXTENT" below is an activity log extent 1316 * we need that many (long words/bytes) to store the bitmap 1317 * of one AL_EXTENT_SIZE chunk of storage. 1318 * we can store the bitmap for that many AL_EXTENTS within 1319 * one sector of the _on_disk_ bitmap: 1320 * bit 0 bit 37 bit 38 bit (512*8)-1 1321 * ...|........|........|.. // ..|........| 1322 * sect. 0 `296 `304 ^(512*8*8)-1 1323 * 1324 #define BM_WORDS_PER_EXT ( (AL_EXT_SIZE/BM_BLOCK_SIZE) / BITS_PER_LONG ) 1325 #define BM_BYTES_PER_EXT ( (AL_EXT_SIZE/BM_BLOCK_SIZE) / 8 ) // 128 1326 #define BM_EXT_PER_SECT ( 512 / BM_BYTES_PER_EXTENT ) // 4 1327 */ 1328 1329 #define DRBD_MAX_SECTORS_32 (0xffffffffLU) 1330 #define DRBD_MAX_SECTORS_BM \ 1331 ((MD_RESERVED_SECT - MD_BM_OFFSET) * (1LL<<(BM_EXT_SHIFT-9))) 1332 #if DRBD_MAX_SECTORS_BM < DRBD_MAX_SECTORS_32 1333 #define DRBD_MAX_SECTORS DRBD_MAX_SECTORS_BM 1334 #define DRBD_MAX_SECTORS_FLEX DRBD_MAX_SECTORS_BM 1335 #elif !defined(CONFIG_LBDAF) && BITS_PER_LONG == 32 1336 #define DRBD_MAX_SECTORS DRBD_MAX_SECTORS_32 1337 #define DRBD_MAX_SECTORS_FLEX DRBD_MAX_SECTORS_32 1338 #else 1339 #define DRBD_MAX_SECTORS DRBD_MAX_SECTORS_BM 1340 /* 16 TB in units of sectors */ 1341 #if BITS_PER_LONG == 32 1342 /* adjust by one page worth of bitmap, 1343 * so we won't wrap around in drbd_bm_find_next_bit. 1344 * you should use 64bit OS for that much storage, anyways. */ 1345 #define DRBD_MAX_SECTORS_FLEX BM_BIT_TO_SECT(0xffff7fff) 1346 #else 1347 #define DRBD_MAX_SECTORS_FLEX BM_BIT_TO_SECT(0x1LU << 32) 1348 #endif 1349 #endif 1350 1351 /* Sector shift value for the "hash" functions of tl_hash and ee_hash tables. 1352 * With a value of 6 all IO in one 32K block make it to the same slot of the 1353 * hash table. */ 1354 #define HT_SHIFT 6 1355 #define DRBD_MAX_SEGMENT_SIZE (1U<<(9+HT_SHIFT)) 1356 1357 /* Number of elements in the app_reads_hash */ 1358 #define APP_R_HSIZE 15 1359 1360 extern int drbd_bm_init(struct drbd_conf *mdev); 1361 extern int drbd_bm_resize(struct drbd_conf *mdev, sector_t sectors, int set_new_bits); 1362 extern void drbd_bm_cleanup(struct drbd_conf *mdev); 1363 extern void drbd_bm_set_all(struct drbd_conf *mdev); 1364 extern void drbd_bm_clear_all(struct drbd_conf *mdev); 1365 extern int drbd_bm_set_bits( 1366 struct drbd_conf *mdev, unsigned long s, unsigned long e); 1367 extern int drbd_bm_clear_bits( 1368 struct drbd_conf *mdev, unsigned long s, unsigned long e); 1369 /* bm_set_bits variant for use while holding drbd_bm_lock */ 1370 extern void _drbd_bm_set_bits(struct drbd_conf *mdev, 1371 const unsigned long s, const unsigned long e); 1372 extern int drbd_bm_test_bit(struct drbd_conf *mdev, unsigned long bitnr); 1373 extern int drbd_bm_e_weight(struct drbd_conf *mdev, unsigned long enr); 1374 extern int drbd_bm_write_sect(struct drbd_conf *mdev, unsigned long enr) __must_hold(local); 1375 extern int drbd_bm_read(struct drbd_conf *mdev) __must_hold(local); 1376 extern int drbd_bm_write(struct drbd_conf *mdev) __must_hold(local); 1377 extern unsigned long drbd_bm_ALe_set_all(struct drbd_conf *mdev, 1378 unsigned long al_enr); 1379 extern size_t drbd_bm_words(struct drbd_conf *mdev); 1380 extern unsigned long drbd_bm_bits(struct drbd_conf *mdev); 1381 extern sector_t drbd_bm_capacity(struct drbd_conf *mdev); 1382 extern unsigned long drbd_bm_find_next(struct drbd_conf *mdev, unsigned long bm_fo); 1383 /* bm_find_next variants for use while you hold drbd_bm_lock() */ 1384 extern unsigned long _drbd_bm_find_next(struct drbd_conf *mdev, unsigned long bm_fo); 1385 extern unsigned long _drbd_bm_find_next_zero(struct drbd_conf *mdev, unsigned long bm_fo); 1386 extern unsigned long drbd_bm_total_weight(struct drbd_conf *mdev); 1387 extern int drbd_bm_rs_done(struct drbd_conf *mdev); 1388 /* for receive_bitmap */ 1389 extern void drbd_bm_merge_lel(struct drbd_conf *mdev, size_t offset, 1390 size_t number, unsigned long *buffer); 1391 /* for _drbd_send_bitmap and drbd_bm_write_sect */ 1392 extern void drbd_bm_get_lel(struct drbd_conf *mdev, size_t offset, 1393 size_t number, unsigned long *buffer); 1394 1395 extern void drbd_bm_lock(struct drbd_conf *mdev, char *why); 1396 extern void drbd_bm_unlock(struct drbd_conf *mdev); 1397 1398 extern int drbd_bm_count_bits(struct drbd_conf *mdev, const unsigned long s, const unsigned long e); 1399 /* drbd_main.c */ 1400 1401 extern struct kmem_cache *drbd_request_cache; 1402 extern struct kmem_cache *drbd_ee_cache; /* epoch entries */ 1403 extern struct kmem_cache *drbd_bm_ext_cache; /* bitmap extents */ 1404 extern struct kmem_cache *drbd_al_ext_cache; /* activity log extents */ 1405 extern mempool_t *drbd_request_mempool; 1406 extern mempool_t *drbd_ee_mempool; 1407 1408 extern struct page *drbd_pp_pool; /* drbd's page pool */ 1409 extern spinlock_t drbd_pp_lock; 1410 extern int drbd_pp_vacant; 1411 extern wait_queue_head_t drbd_pp_wait; 1412 1413 extern rwlock_t global_state_lock; 1414 1415 extern struct drbd_conf *drbd_new_device(unsigned int minor); 1416 extern void drbd_free_mdev(struct drbd_conf *mdev); 1417 1418 extern int proc_details; 1419 1420 /* drbd_req */ 1421 extern int drbd_make_request_26(struct request_queue *q, struct bio *bio); 1422 extern int drbd_read_remote(struct drbd_conf *mdev, struct drbd_request *req); 1423 extern int drbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bvm, struct bio_vec *bvec); 1424 extern int is_valid_ar_handle(struct drbd_request *, sector_t); 1425 1426 1427 /* drbd_nl.c */ 1428 extern void drbd_suspend_io(struct drbd_conf *mdev); 1429 extern void drbd_resume_io(struct drbd_conf *mdev); 1430 extern char *ppsize(char *buf, unsigned long long size); 1431 extern sector_t drbd_new_dev_size(struct drbd_conf *, struct drbd_backing_dev *, int); 1432 enum determine_dev_size { dev_size_error = -1, unchanged = 0, shrunk = 1, grew = 2 }; 1433 extern enum determine_dev_size drbd_determin_dev_size(struct drbd_conf *, enum dds_flags) __must_hold(local); 1434 extern void resync_after_online_grow(struct drbd_conf *); 1435 extern void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int) __must_hold(local); 1436 extern int drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, 1437 int force); 1438 enum drbd_disk_state drbd_try_outdate_peer(struct drbd_conf *mdev); 1439 extern int drbd_khelper(struct drbd_conf *mdev, char *cmd); 1440 1441 /* drbd_worker.c */ 1442 extern int drbd_worker(struct drbd_thread *thi); 1443 extern int drbd_alter_sa(struct drbd_conf *mdev, int na); 1444 extern void drbd_start_resync(struct drbd_conf *mdev, enum drbd_conns side); 1445 extern void resume_next_sg(struct drbd_conf *mdev); 1446 extern void suspend_other_sg(struct drbd_conf *mdev); 1447 extern int drbd_resync_finished(struct drbd_conf *mdev); 1448 /* maybe rather drbd_main.c ? */ 1449 extern int drbd_md_sync_page_io(struct drbd_conf *mdev, 1450 struct drbd_backing_dev *bdev, sector_t sector, int rw); 1451 extern void drbd_ov_oos_found(struct drbd_conf*, sector_t, int); 1452 1453 static inline void ov_oos_print(struct drbd_conf *mdev) 1454 { 1455 if (mdev->ov_last_oos_size) { 1456 dev_err(DEV, "Out of sync: start=%llu, size=%lu (sectors)\n", 1457 (unsigned long long)mdev->ov_last_oos_start, 1458 (unsigned long)mdev->ov_last_oos_size); 1459 } 1460 mdev->ov_last_oos_size=0; 1461 } 1462 1463 1464 extern void drbd_csum_bio(struct drbd_conf *, struct crypto_hash *, struct bio *, void *); 1465 extern void drbd_csum_ee(struct drbd_conf *, struct crypto_hash *, struct drbd_epoch_entry *, void *); 1466 /* worker callbacks */ 1467 extern int w_req_cancel_conflict(struct drbd_conf *, struct drbd_work *, int); 1468 extern int w_read_retry_remote(struct drbd_conf *, struct drbd_work *, int); 1469 extern int w_e_end_data_req(struct drbd_conf *, struct drbd_work *, int); 1470 extern int w_e_end_rsdata_req(struct drbd_conf *, struct drbd_work *, int); 1471 extern int w_e_end_csum_rs_req(struct drbd_conf *, struct drbd_work *, int); 1472 extern int w_e_end_ov_reply(struct drbd_conf *, struct drbd_work *, int); 1473 extern int w_e_end_ov_req(struct drbd_conf *, struct drbd_work *, int); 1474 extern int w_ov_finished(struct drbd_conf *, struct drbd_work *, int); 1475 extern int w_resync_inactive(struct drbd_conf *, struct drbd_work *, int); 1476 extern int w_resume_next_sg(struct drbd_conf *, struct drbd_work *, int); 1477 extern int w_send_write_hint(struct drbd_conf *, struct drbd_work *, int); 1478 extern int w_make_resync_request(struct drbd_conf *, struct drbd_work *, int); 1479 extern int w_send_dblock(struct drbd_conf *, struct drbd_work *, int); 1480 extern int w_send_barrier(struct drbd_conf *, struct drbd_work *, int); 1481 extern int w_send_read_req(struct drbd_conf *, struct drbd_work *, int); 1482 extern int w_prev_work_done(struct drbd_conf *, struct drbd_work *, int); 1483 extern int w_e_reissue(struct drbd_conf *, struct drbd_work *, int); 1484 1485 extern void resync_timer_fn(unsigned long data); 1486 1487 /* drbd_receiver.c */ 1488 extern int drbd_submit_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e, 1489 const unsigned rw, const int fault_type); 1490 extern int drbd_release_ee(struct drbd_conf *mdev, struct list_head *list); 1491 extern struct drbd_epoch_entry *drbd_alloc_ee(struct drbd_conf *mdev, 1492 u64 id, 1493 sector_t sector, 1494 unsigned int data_size, 1495 gfp_t gfp_mask) __must_hold(local); 1496 extern void drbd_free_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e); 1497 extern void drbd_wait_ee_list_empty(struct drbd_conf *mdev, 1498 struct list_head *head); 1499 extern void _drbd_wait_ee_list_empty(struct drbd_conf *mdev, 1500 struct list_head *head); 1501 extern void drbd_set_recv_tcq(struct drbd_conf *mdev, int tcq_enabled); 1502 extern void _drbd_clear_done_ee(struct drbd_conf *mdev, struct list_head *to_be_freed); 1503 extern void drbd_flush_workqueue(struct drbd_conf *mdev); 1504 1505 /* yes, there is kernel_setsockopt, but only since 2.6.18. we don't need to 1506 * mess with get_fs/set_fs, we know we are KERNEL_DS always. */ 1507 static inline int drbd_setsockopt(struct socket *sock, int level, int optname, 1508 char __user *optval, int optlen) 1509 { 1510 int err; 1511 if (level == SOL_SOCKET) 1512 err = sock_setsockopt(sock, level, optname, optval, optlen); 1513 else 1514 err = sock->ops->setsockopt(sock, level, optname, optval, 1515 optlen); 1516 return err; 1517 } 1518 1519 static inline void drbd_tcp_cork(struct socket *sock) 1520 { 1521 int __user val = 1; 1522 (void) drbd_setsockopt(sock, SOL_TCP, TCP_CORK, 1523 (char __user *)&val, sizeof(val)); 1524 } 1525 1526 static inline void drbd_tcp_uncork(struct socket *sock) 1527 { 1528 int __user val = 0; 1529 (void) drbd_setsockopt(sock, SOL_TCP, TCP_CORK, 1530 (char __user *)&val, sizeof(val)); 1531 } 1532 1533 static inline void drbd_tcp_nodelay(struct socket *sock) 1534 { 1535 int __user val = 1; 1536 (void) drbd_setsockopt(sock, SOL_TCP, TCP_NODELAY, 1537 (char __user *)&val, sizeof(val)); 1538 } 1539 1540 static inline void drbd_tcp_quickack(struct socket *sock) 1541 { 1542 int __user val = 2; 1543 (void) drbd_setsockopt(sock, SOL_TCP, TCP_QUICKACK, 1544 (char __user *)&val, sizeof(val)); 1545 } 1546 1547 void drbd_bump_write_ordering(struct drbd_conf *mdev, enum write_ordering_e wo); 1548 1549 /* drbd_proc.c */ 1550 extern struct proc_dir_entry *drbd_proc; 1551 extern const struct file_operations drbd_proc_fops; 1552 extern const char *drbd_conn_str(enum drbd_conns s); 1553 extern const char *drbd_role_str(enum drbd_role s); 1554 1555 /* drbd_actlog.c */ 1556 extern void drbd_al_begin_io(struct drbd_conf *mdev, sector_t sector); 1557 extern void drbd_al_complete_io(struct drbd_conf *mdev, sector_t sector); 1558 extern void drbd_rs_complete_io(struct drbd_conf *mdev, sector_t sector); 1559 extern int drbd_rs_begin_io(struct drbd_conf *mdev, sector_t sector); 1560 extern int drbd_try_rs_begin_io(struct drbd_conf *mdev, sector_t sector); 1561 extern void drbd_rs_cancel_all(struct drbd_conf *mdev); 1562 extern int drbd_rs_del_all(struct drbd_conf *mdev); 1563 extern void drbd_rs_failed_io(struct drbd_conf *mdev, 1564 sector_t sector, int size); 1565 extern int drbd_al_read_log(struct drbd_conf *mdev, struct drbd_backing_dev *); 1566 extern void __drbd_set_in_sync(struct drbd_conf *mdev, sector_t sector, 1567 int size, const char *file, const unsigned int line); 1568 #define drbd_set_in_sync(mdev, sector, size) \ 1569 __drbd_set_in_sync(mdev, sector, size, __FILE__, __LINE__) 1570 extern void __drbd_set_out_of_sync(struct drbd_conf *mdev, sector_t sector, 1571 int size, const char *file, const unsigned int line); 1572 #define drbd_set_out_of_sync(mdev, sector, size) \ 1573 __drbd_set_out_of_sync(mdev, sector, size, __FILE__, __LINE__) 1574 extern void drbd_al_apply_to_bm(struct drbd_conf *mdev); 1575 extern void drbd_al_to_on_disk_bm(struct drbd_conf *mdev); 1576 extern void drbd_al_shrink(struct drbd_conf *mdev); 1577 1578 1579 /* drbd_nl.c */ 1580 1581 void drbd_nl_cleanup(void); 1582 int __init drbd_nl_init(void); 1583 void drbd_bcast_state(struct drbd_conf *mdev, union drbd_state); 1584 void drbd_bcast_sync_progress(struct drbd_conf *mdev); 1585 void drbd_bcast_ee(struct drbd_conf *mdev, 1586 const char *reason, const int dgs, 1587 const char* seen_hash, const char* calc_hash, 1588 const struct drbd_epoch_entry* e); 1589 1590 1591 /** 1592 * DOC: DRBD State macros 1593 * 1594 * These macros are used to express state changes in easily readable form. 1595 * 1596 * The NS macros expand to a mask and a value, that can be bit ored onto the 1597 * current state as soon as the spinlock (req_lock) was taken. 1598 * 1599 * The _NS macros are used for state functions that get called with the 1600 * spinlock. These macros expand directly to the new state value. 1601 * 1602 * Besides the basic forms NS() and _NS() additional _?NS[23] are defined 1603 * to express state changes that affect more than one aspect of the state. 1604 * 1605 * E.g. NS2(conn, C_CONNECTED, peer, R_SECONDARY) 1606 * Means that the network connection was established and that the peer 1607 * is in secondary role. 1608 */ 1609 #define role_MASK R_MASK 1610 #define peer_MASK R_MASK 1611 #define disk_MASK D_MASK 1612 #define pdsk_MASK D_MASK 1613 #define conn_MASK C_MASK 1614 #define susp_MASK 1 1615 #define user_isp_MASK 1 1616 #define aftr_isp_MASK 1 1617 1618 #define NS(T, S) \ 1619 ({ union drbd_state mask; mask.i = 0; mask.T = T##_MASK; mask; }), \ 1620 ({ union drbd_state val; val.i = 0; val.T = (S); val; }) 1621 #define NS2(T1, S1, T2, S2) \ 1622 ({ union drbd_state mask; mask.i = 0; mask.T1 = T1##_MASK; \ 1623 mask.T2 = T2##_MASK; mask; }), \ 1624 ({ union drbd_state val; val.i = 0; val.T1 = (S1); \ 1625 val.T2 = (S2); val; }) 1626 #define NS3(T1, S1, T2, S2, T3, S3) \ 1627 ({ union drbd_state mask; mask.i = 0; mask.T1 = T1##_MASK; \ 1628 mask.T2 = T2##_MASK; mask.T3 = T3##_MASK; mask; }), \ 1629 ({ union drbd_state val; val.i = 0; val.T1 = (S1); \ 1630 val.T2 = (S2); val.T3 = (S3); val; }) 1631 1632 #define _NS(D, T, S) \ 1633 D, ({ union drbd_state __ns; __ns.i = D->state.i; __ns.T = (S); __ns; }) 1634 #define _NS2(D, T1, S1, T2, S2) \ 1635 D, ({ union drbd_state __ns; __ns.i = D->state.i; __ns.T1 = (S1); \ 1636 __ns.T2 = (S2); __ns; }) 1637 #define _NS3(D, T1, S1, T2, S2, T3, S3) \ 1638 D, ({ union drbd_state __ns; __ns.i = D->state.i; __ns.T1 = (S1); \ 1639 __ns.T2 = (S2); __ns.T3 = (S3); __ns; }) 1640 1641 /* 1642 * inline helper functions 1643 *************************/ 1644 1645 /* see also page_chain_add and friends in drbd_receiver.c */ 1646 static inline struct page *page_chain_next(struct page *page) 1647 { 1648 return (struct page *)page_private(page); 1649 } 1650 #define page_chain_for_each(page) \ 1651 for (; page && ({ prefetch(page_chain_next(page)); 1; }); \ 1652 page = page_chain_next(page)) 1653 #define page_chain_for_each_safe(page, n) \ 1654 for (; page && ({ n = page_chain_next(page); 1; }); page = n) 1655 1656 static inline int drbd_bio_has_active_page(struct bio *bio) 1657 { 1658 struct bio_vec *bvec; 1659 int i; 1660 1661 __bio_for_each_segment(bvec, bio, i, 0) { 1662 if (page_count(bvec->bv_page) > 1) 1663 return 1; 1664 } 1665 1666 return 0; 1667 } 1668 1669 static inline int drbd_ee_has_active_page(struct drbd_epoch_entry *e) 1670 { 1671 struct page *page = e->pages; 1672 page_chain_for_each(page) { 1673 if (page_count(page) > 1) 1674 return 1; 1675 } 1676 return 0; 1677 } 1678 1679 1680 static inline void drbd_state_lock(struct drbd_conf *mdev) 1681 { 1682 wait_event(mdev->misc_wait, 1683 !test_and_set_bit(CLUSTER_ST_CHANGE, &mdev->flags)); 1684 } 1685 1686 static inline void drbd_state_unlock(struct drbd_conf *mdev) 1687 { 1688 clear_bit(CLUSTER_ST_CHANGE, &mdev->flags); 1689 wake_up(&mdev->misc_wait); 1690 } 1691 1692 static inline int _drbd_set_state(struct drbd_conf *mdev, 1693 union drbd_state ns, enum chg_state_flags flags, 1694 struct completion *done) 1695 { 1696 int rv; 1697 1698 read_lock(&global_state_lock); 1699 rv = __drbd_set_state(mdev, ns, flags, done); 1700 read_unlock(&global_state_lock); 1701 1702 return rv; 1703 } 1704 1705 /** 1706 * drbd_request_state() - Reqest a state change 1707 * @mdev: DRBD device. 1708 * @mask: mask of state bits to change. 1709 * @val: value of new state bits. 1710 * 1711 * This is the most graceful way of requesting a state change. It is verbose 1712 * quite verbose in case the state change is not possible, and all those 1713 * state changes are globally serialized. 1714 */ 1715 static inline int drbd_request_state(struct drbd_conf *mdev, 1716 union drbd_state mask, 1717 union drbd_state val) 1718 { 1719 return _drbd_request_state(mdev, mask, val, CS_VERBOSE + CS_ORDERED); 1720 } 1721 1722 #define __drbd_chk_io_error(m,f) __drbd_chk_io_error_(m,f, __func__) 1723 static inline void __drbd_chk_io_error_(struct drbd_conf *mdev, int forcedetach, const char *where) 1724 { 1725 switch (mdev->ldev->dc.on_io_error) { 1726 case EP_PASS_ON: 1727 if (!forcedetach) { 1728 if (__ratelimit(&drbd_ratelimit_state)) 1729 dev_err(DEV, "Local IO failed in %s." 1730 "Passing error on...\n", where); 1731 break; 1732 } 1733 /* NOTE fall through to detach case if forcedetach set */ 1734 case EP_DETACH: 1735 case EP_CALL_HELPER: 1736 if (mdev->state.disk > D_FAILED) { 1737 _drbd_set_state(_NS(mdev, disk, D_FAILED), CS_HARD, NULL); 1738 dev_err(DEV, "Local IO failed in %s." 1739 "Detaching...\n", where); 1740 } 1741 break; 1742 } 1743 } 1744 1745 /** 1746 * drbd_chk_io_error: Handle the on_io_error setting, should be called from all io completion handlers 1747 * @mdev: DRBD device. 1748 * @error: Error code passed to the IO completion callback 1749 * @forcedetach: Force detach. I.e. the error happened while accessing the meta data 1750 * 1751 * See also drbd_main.c:after_state_ch() if (os.disk > D_FAILED && ns.disk == D_FAILED) 1752 */ 1753 #define drbd_chk_io_error(m,e,f) drbd_chk_io_error_(m,e,f, __func__) 1754 static inline void drbd_chk_io_error_(struct drbd_conf *mdev, 1755 int error, int forcedetach, const char *where) 1756 { 1757 if (error) { 1758 unsigned long flags; 1759 spin_lock_irqsave(&mdev->req_lock, flags); 1760 __drbd_chk_io_error_(mdev, forcedetach, where); 1761 spin_unlock_irqrestore(&mdev->req_lock, flags); 1762 } 1763 } 1764 1765 1766 /** 1767 * drbd_md_first_sector() - Returns the first sector number of the meta data area 1768 * @bdev: Meta data block device. 1769 * 1770 * BTW, for internal meta data, this happens to be the maximum capacity 1771 * we could agree upon with our peer node. 1772 */ 1773 static inline sector_t drbd_md_first_sector(struct drbd_backing_dev *bdev) 1774 { 1775 switch (bdev->dc.meta_dev_idx) { 1776 case DRBD_MD_INDEX_INTERNAL: 1777 case DRBD_MD_INDEX_FLEX_INT: 1778 return bdev->md.md_offset + bdev->md.bm_offset; 1779 case DRBD_MD_INDEX_FLEX_EXT: 1780 default: 1781 return bdev->md.md_offset; 1782 } 1783 } 1784 1785 /** 1786 * drbd_md_last_sector() - Return the last sector number of the meta data area 1787 * @bdev: Meta data block device. 1788 */ 1789 static inline sector_t drbd_md_last_sector(struct drbd_backing_dev *bdev) 1790 { 1791 switch (bdev->dc.meta_dev_idx) { 1792 case DRBD_MD_INDEX_INTERNAL: 1793 case DRBD_MD_INDEX_FLEX_INT: 1794 return bdev->md.md_offset + MD_AL_OFFSET - 1; 1795 case DRBD_MD_INDEX_FLEX_EXT: 1796 default: 1797 return bdev->md.md_offset + bdev->md.md_size_sect; 1798 } 1799 } 1800 1801 /* Returns the number of 512 byte sectors of the device */ 1802 static inline sector_t drbd_get_capacity(struct block_device *bdev) 1803 { 1804 /* return bdev ? get_capacity(bdev->bd_disk) : 0; */ 1805 return bdev ? bdev->bd_inode->i_size >> 9 : 0; 1806 } 1807 1808 /** 1809 * drbd_get_max_capacity() - Returns the capacity we announce to out peer 1810 * @bdev: Meta data block device. 1811 * 1812 * returns the capacity we announce to out peer. we clip ourselves at the 1813 * various MAX_SECTORS, because if we don't, current implementation will 1814 * oops sooner or later 1815 */ 1816 static inline sector_t drbd_get_max_capacity(struct drbd_backing_dev *bdev) 1817 { 1818 sector_t s; 1819 switch (bdev->dc.meta_dev_idx) { 1820 case DRBD_MD_INDEX_INTERNAL: 1821 case DRBD_MD_INDEX_FLEX_INT: 1822 s = drbd_get_capacity(bdev->backing_bdev) 1823 ? min_t(sector_t, DRBD_MAX_SECTORS_FLEX, 1824 drbd_md_first_sector(bdev)) 1825 : 0; 1826 break; 1827 case DRBD_MD_INDEX_FLEX_EXT: 1828 s = min_t(sector_t, DRBD_MAX_SECTORS_FLEX, 1829 drbd_get_capacity(bdev->backing_bdev)); 1830 /* clip at maximum size the meta device can support */ 1831 s = min_t(sector_t, s, 1832 BM_EXT_TO_SECT(bdev->md.md_size_sect 1833 - bdev->md.bm_offset)); 1834 break; 1835 default: 1836 s = min_t(sector_t, DRBD_MAX_SECTORS, 1837 drbd_get_capacity(bdev->backing_bdev)); 1838 } 1839 return s; 1840 } 1841 1842 /** 1843 * drbd_md_ss__() - Return the sector number of our meta data super block 1844 * @mdev: DRBD device. 1845 * @bdev: Meta data block device. 1846 */ 1847 static inline sector_t drbd_md_ss__(struct drbd_conf *mdev, 1848 struct drbd_backing_dev *bdev) 1849 { 1850 switch (bdev->dc.meta_dev_idx) { 1851 default: /* external, some index */ 1852 return MD_RESERVED_SECT * bdev->dc.meta_dev_idx; 1853 case DRBD_MD_INDEX_INTERNAL: 1854 /* with drbd08, internal meta data is always "flexible" */ 1855 case DRBD_MD_INDEX_FLEX_INT: 1856 /* sizeof(struct md_on_disk_07) == 4k 1857 * position: last 4k aligned block of 4k size */ 1858 if (!bdev->backing_bdev) { 1859 if (__ratelimit(&drbd_ratelimit_state)) { 1860 dev_err(DEV, "bdev->backing_bdev==NULL\n"); 1861 dump_stack(); 1862 } 1863 return 0; 1864 } 1865 return (drbd_get_capacity(bdev->backing_bdev) & ~7ULL) 1866 - MD_AL_OFFSET; 1867 case DRBD_MD_INDEX_FLEX_EXT: 1868 return 0; 1869 } 1870 } 1871 1872 static inline void 1873 _drbd_queue_work(struct drbd_work_queue *q, struct drbd_work *w) 1874 { 1875 list_add_tail(&w->list, &q->q); 1876 up(&q->s); 1877 } 1878 1879 static inline void 1880 drbd_queue_work_front(struct drbd_work_queue *q, struct drbd_work *w) 1881 { 1882 unsigned long flags; 1883 spin_lock_irqsave(&q->q_lock, flags); 1884 list_add(&w->list, &q->q); 1885 up(&q->s); /* within the spinlock, 1886 see comment near end of drbd_worker() */ 1887 spin_unlock_irqrestore(&q->q_lock, flags); 1888 } 1889 1890 static inline void 1891 drbd_queue_work(struct drbd_work_queue *q, struct drbd_work *w) 1892 { 1893 unsigned long flags; 1894 spin_lock_irqsave(&q->q_lock, flags); 1895 list_add_tail(&w->list, &q->q); 1896 up(&q->s); /* within the spinlock, 1897 see comment near end of drbd_worker() */ 1898 spin_unlock_irqrestore(&q->q_lock, flags); 1899 } 1900 1901 static inline void wake_asender(struct drbd_conf *mdev) 1902 { 1903 if (test_bit(SIGNAL_ASENDER, &mdev->flags)) 1904 force_sig(DRBD_SIG, mdev->asender.task); 1905 } 1906 1907 static inline void request_ping(struct drbd_conf *mdev) 1908 { 1909 set_bit(SEND_PING, &mdev->flags); 1910 wake_asender(mdev); 1911 } 1912 1913 static inline int drbd_send_short_cmd(struct drbd_conf *mdev, 1914 enum drbd_packets cmd) 1915 { 1916 struct p_header h; 1917 return drbd_send_cmd(mdev, USE_DATA_SOCKET, cmd, &h, sizeof(h)); 1918 } 1919 1920 static inline int drbd_send_ping(struct drbd_conf *mdev) 1921 { 1922 struct p_header h; 1923 return drbd_send_cmd(mdev, USE_META_SOCKET, P_PING, &h, sizeof(h)); 1924 } 1925 1926 static inline int drbd_send_ping_ack(struct drbd_conf *mdev) 1927 { 1928 struct p_header h; 1929 return drbd_send_cmd(mdev, USE_META_SOCKET, P_PING_ACK, &h, sizeof(h)); 1930 } 1931 1932 static inline void drbd_thread_stop(struct drbd_thread *thi) 1933 { 1934 _drbd_thread_stop(thi, FALSE, TRUE); 1935 } 1936 1937 static inline void drbd_thread_stop_nowait(struct drbd_thread *thi) 1938 { 1939 _drbd_thread_stop(thi, FALSE, FALSE); 1940 } 1941 1942 static inline void drbd_thread_restart_nowait(struct drbd_thread *thi) 1943 { 1944 _drbd_thread_stop(thi, TRUE, FALSE); 1945 } 1946 1947 /* counts how many answer packets packets we expect from our peer, 1948 * for either explicit application requests, 1949 * or implicit barrier packets as necessary. 1950 * increased: 1951 * w_send_barrier 1952 * _req_mod(req, queue_for_net_write or queue_for_net_read); 1953 * it is much easier and equally valid to count what we queue for the 1954 * worker, even before it actually was queued or send. 1955 * (drbd_make_request_common; recovery path on read io-error) 1956 * decreased: 1957 * got_BarrierAck (respective tl_clear, tl_clear_barrier) 1958 * _req_mod(req, data_received) 1959 * [from receive_DataReply] 1960 * _req_mod(req, write_acked_by_peer or recv_acked_by_peer or neg_acked) 1961 * [from got_BlockAck (P_WRITE_ACK, P_RECV_ACK)] 1962 * for some reason it is NOT decreased in got_NegAck, 1963 * but in the resulting cleanup code from report_params. 1964 * we should try to remember the reason for that... 1965 * _req_mod(req, send_failed or send_canceled) 1966 * _req_mod(req, connection_lost_while_pending) 1967 * [from tl_clear_barrier] 1968 */ 1969 static inline void inc_ap_pending(struct drbd_conf *mdev) 1970 { 1971 atomic_inc(&mdev->ap_pending_cnt); 1972 } 1973 1974 #define ERR_IF_CNT_IS_NEGATIVE(which) \ 1975 if (atomic_read(&mdev->which) < 0) \ 1976 dev_err(DEV, "in %s:%d: " #which " = %d < 0 !\n", \ 1977 __func__ , __LINE__ , \ 1978 atomic_read(&mdev->which)) 1979 1980 #define dec_ap_pending(mdev) do { \ 1981 typecheck(struct drbd_conf *, mdev); \ 1982 if (atomic_dec_and_test(&mdev->ap_pending_cnt)) \ 1983 wake_up(&mdev->misc_wait); \ 1984 ERR_IF_CNT_IS_NEGATIVE(ap_pending_cnt); } while (0) 1985 1986 /* counts how many resync-related answers we still expect from the peer 1987 * increase decrease 1988 * C_SYNC_TARGET sends P_RS_DATA_REQUEST (and expects P_RS_DATA_REPLY) 1989 * C_SYNC_SOURCE sends P_RS_DATA_REPLY (and expects P_WRITE_ACK whith ID_SYNCER) 1990 * (or P_NEG_ACK with ID_SYNCER) 1991 */ 1992 static inline void inc_rs_pending(struct drbd_conf *mdev) 1993 { 1994 atomic_inc(&mdev->rs_pending_cnt); 1995 } 1996 1997 #define dec_rs_pending(mdev) do { \ 1998 typecheck(struct drbd_conf *, mdev); \ 1999 atomic_dec(&mdev->rs_pending_cnt); \ 2000 ERR_IF_CNT_IS_NEGATIVE(rs_pending_cnt); } while (0) 2001 2002 /* counts how many answers we still need to send to the peer. 2003 * increased on 2004 * receive_Data unless protocol A; 2005 * we need to send a P_RECV_ACK (proto B) 2006 * or P_WRITE_ACK (proto C) 2007 * receive_RSDataReply (recv_resync_read) we need to send a P_WRITE_ACK 2008 * receive_DataRequest (receive_RSDataRequest) we need to send back P_DATA 2009 * receive_Barrier_* we need to send a P_BARRIER_ACK 2010 */ 2011 static inline void inc_unacked(struct drbd_conf *mdev) 2012 { 2013 atomic_inc(&mdev->unacked_cnt); 2014 } 2015 2016 #define dec_unacked(mdev) do { \ 2017 typecheck(struct drbd_conf *, mdev); \ 2018 atomic_dec(&mdev->unacked_cnt); \ 2019 ERR_IF_CNT_IS_NEGATIVE(unacked_cnt); } while (0) 2020 2021 #define sub_unacked(mdev, n) do { \ 2022 typecheck(struct drbd_conf *, mdev); \ 2023 atomic_sub(n, &mdev->unacked_cnt); \ 2024 ERR_IF_CNT_IS_NEGATIVE(unacked_cnt); } while (0) 2025 2026 2027 static inline void put_net_conf(struct drbd_conf *mdev) 2028 { 2029 if (atomic_dec_and_test(&mdev->net_cnt)) 2030 wake_up(&mdev->misc_wait); 2031 } 2032 2033 /** 2034 * get_net_conf() - Increase ref count on mdev->net_conf; Returns 0 if nothing there 2035 * @mdev: DRBD device. 2036 * 2037 * You have to call put_net_conf() when finished working with mdev->net_conf. 2038 */ 2039 static inline int get_net_conf(struct drbd_conf *mdev) 2040 { 2041 int have_net_conf; 2042 2043 atomic_inc(&mdev->net_cnt); 2044 have_net_conf = mdev->state.conn >= C_UNCONNECTED; 2045 if (!have_net_conf) 2046 put_net_conf(mdev); 2047 return have_net_conf; 2048 } 2049 2050 /** 2051 * get_ldev() - Increase the ref count on mdev->ldev. Returns 0 if there is no ldev 2052 * @M: DRBD device. 2053 * 2054 * You have to call put_ldev() when finished working with mdev->ldev. 2055 */ 2056 #define get_ldev(M) __cond_lock(local, _get_ldev_if_state(M,D_INCONSISTENT)) 2057 #define get_ldev_if_state(M,MINS) __cond_lock(local, _get_ldev_if_state(M,MINS)) 2058 2059 static inline void put_ldev(struct drbd_conf *mdev) 2060 { 2061 __release(local); 2062 if (atomic_dec_and_test(&mdev->local_cnt)) 2063 wake_up(&mdev->misc_wait); 2064 D_ASSERT(atomic_read(&mdev->local_cnt) >= 0); 2065 } 2066 2067 #ifndef __CHECKER__ 2068 static inline int _get_ldev_if_state(struct drbd_conf *mdev, enum drbd_disk_state mins) 2069 { 2070 int io_allowed; 2071 2072 atomic_inc(&mdev->local_cnt); 2073 io_allowed = (mdev->state.disk >= mins); 2074 if (!io_allowed) 2075 put_ldev(mdev); 2076 return io_allowed; 2077 } 2078 #else 2079 extern int _get_ldev_if_state(struct drbd_conf *mdev, enum drbd_disk_state mins); 2080 #endif 2081 2082 /* you must have an "get_ldev" reference */ 2083 static inline void drbd_get_syncer_progress(struct drbd_conf *mdev, 2084 unsigned long *bits_left, unsigned int *per_mil_done) 2085 { 2086 /* 2087 * this is to break it at compile time when we change that 2088 * (we may feel 4TB maximum storage per drbd is not enough) 2089 */ 2090 typecheck(unsigned long, mdev->rs_total); 2091 2092 /* note: both rs_total and rs_left are in bits, i.e. in 2093 * units of BM_BLOCK_SIZE. 2094 * for the percentage, we don't care. */ 2095 2096 *bits_left = drbd_bm_total_weight(mdev) - mdev->rs_failed; 2097 /* >> 10 to prevent overflow, 2098 * +1 to prevent division by zero */ 2099 if (*bits_left > mdev->rs_total) { 2100 /* doh. maybe a logic bug somewhere. 2101 * may also be just a race condition 2102 * between this and a disconnect during sync. 2103 * for now, just prevent in-kernel buffer overflow. 2104 */ 2105 smp_rmb(); 2106 dev_warn(DEV, "cs:%s rs_left=%lu > rs_total=%lu (rs_failed %lu)\n", 2107 drbd_conn_str(mdev->state.conn), 2108 *bits_left, mdev->rs_total, mdev->rs_failed); 2109 *per_mil_done = 0; 2110 } else { 2111 /* make sure the calculation happens in long context */ 2112 unsigned long tmp = 1000UL - 2113 (*bits_left >> 10)*1000UL 2114 / ((mdev->rs_total >> 10) + 1UL); 2115 *per_mil_done = tmp; 2116 } 2117 } 2118 2119 2120 /* this throttles on-the-fly application requests 2121 * according to max_buffers settings; 2122 * maybe re-implement using semaphores? */ 2123 static inline int drbd_get_max_buffers(struct drbd_conf *mdev) 2124 { 2125 int mxb = 1000000; /* arbitrary limit on open requests */ 2126 if (get_net_conf(mdev)) { 2127 mxb = mdev->net_conf->max_buffers; 2128 put_net_conf(mdev); 2129 } 2130 return mxb; 2131 } 2132 2133 static inline int drbd_state_is_stable(union drbd_state s) 2134 { 2135 2136 /* DO NOT add a default clause, we want the compiler to warn us 2137 * for any newly introduced state we may have forgotten to add here */ 2138 2139 switch ((enum drbd_conns)s.conn) { 2140 /* new io only accepted when there is no connection, ... */ 2141 case C_STANDALONE: 2142 case C_WF_CONNECTION: 2143 /* ... or there is a well established connection. */ 2144 case C_CONNECTED: 2145 case C_SYNC_SOURCE: 2146 case C_SYNC_TARGET: 2147 case C_VERIFY_S: 2148 case C_VERIFY_T: 2149 case C_PAUSED_SYNC_S: 2150 case C_PAUSED_SYNC_T: 2151 /* maybe stable, look at the disk state */ 2152 break; 2153 2154 /* no new io accepted during tansitional states 2155 * like handshake or teardown */ 2156 case C_DISCONNECTING: 2157 case C_UNCONNECTED: 2158 case C_TIMEOUT: 2159 case C_BROKEN_PIPE: 2160 case C_NETWORK_FAILURE: 2161 case C_PROTOCOL_ERROR: 2162 case C_TEAR_DOWN: 2163 case C_WF_REPORT_PARAMS: 2164 case C_STARTING_SYNC_S: 2165 case C_STARTING_SYNC_T: 2166 case C_WF_BITMAP_S: 2167 case C_WF_BITMAP_T: 2168 case C_WF_SYNC_UUID: 2169 case C_MASK: 2170 /* not "stable" */ 2171 return 0; 2172 } 2173 2174 switch ((enum drbd_disk_state)s.disk) { 2175 case D_DISKLESS: 2176 case D_INCONSISTENT: 2177 case D_OUTDATED: 2178 case D_CONSISTENT: 2179 case D_UP_TO_DATE: 2180 /* disk state is stable as well. */ 2181 break; 2182 2183 /* no new io accepted during tansitional states */ 2184 case D_ATTACHING: 2185 case D_FAILED: 2186 case D_NEGOTIATING: 2187 case D_UNKNOWN: 2188 case D_MASK: 2189 /* not "stable" */ 2190 return 0; 2191 } 2192 2193 return 1; 2194 } 2195 2196 static inline int __inc_ap_bio_cond(struct drbd_conf *mdev) 2197 { 2198 int mxb = drbd_get_max_buffers(mdev); 2199 2200 if (mdev->state.susp) 2201 return 0; 2202 if (test_bit(SUSPEND_IO, &mdev->flags)) 2203 return 0; 2204 2205 /* to avoid potential deadlock or bitmap corruption, 2206 * in various places, we only allow new application io 2207 * to start during "stable" states. */ 2208 2209 /* no new io accepted when attaching or detaching the disk */ 2210 if (!drbd_state_is_stable(mdev->state)) 2211 return 0; 2212 2213 /* since some older kernels don't have atomic_add_unless, 2214 * and we are within the spinlock anyways, we have this workaround. */ 2215 if (atomic_read(&mdev->ap_bio_cnt) > mxb) 2216 return 0; 2217 if (test_bit(BITMAP_IO, &mdev->flags)) 2218 return 0; 2219 return 1; 2220 } 2221 2222 /* I'd like to use wait_event_lock_irq, 2223 * but I'm not sure when it got introduced, 2224 * and not sure when it has 3 or 4 arguments */ 2225 static inline void inc_ap_bio(struct drbd_conf *mdev, int count) 2226 { 2227 /* compare with after_state_ch, 2228 * os.conn != C_WF_BITMAP_S && ns.conn == C_WF_BITMAP_S */ 2229 DEFINE_WAIT(wait); 2230 2231 /* we wait here 2232 * as long as the device is suspended 2233 * until the bitmap is no longer on the fly during connection 2234 * handshake as long as we would exeed the max_buffer limit. 2235 * 2236 * to avoid races with the reconnect code, 2237 * we need to atomic_inc within the spinlock. */ 2238 2239 spin_lock_irq(&mdev->req_lock); 2240 while (!__inc_ap_bio_cond(mdev)) { 2241 prepare_to_wait(&mdev->misc_wait, &wait, TASK_UNINTERRUPTIBLE); 2242 spin_unlock_irq(&mdev->req_lock); 2243 schedule(); 2244 finish_wait(&mdev->misc_wait, &wait); 2245 spin_lock_irq(&mdev->req_lock); 2246 } 2247 atomic_add(count, &mdev->ap_bio_cnt); 2248 spin_unlock_irq(&mdev->req_lock); 2249 } 2250 2251 static inline void dec_ap_bio(struct drbd_conf *mdev) 2252 { 2253 int mxb = drbd_get_max_buffers(mdev); 2254 int ap_bio = atomic_dec_return(&mdev->ap_bio_cnt); 2255 2256 D_ASSERT(ap_bio >= 0); 2257 /* this currently does wake_up for every dec_ap_bio! 2258 * maybe rather introduce some type of hysteresis? 2259 * e.g. (ap_bio == mxb/2 || ap_bio == 0) ? */ 2260 if (ap_bio < mxb) 2261 wake_up(&mdev->misc_wait); 2262 if (ap_bio == 0 && test_bit(BITMAP_IO, &mdev->flags)) { 2263 if (!test_and_set_bit(BITMAP_IO_QUEUED, &mdev->flags)) 2264 drbd_queue_work(&mdev->data.work, &mdev->bm_io_work.w); 2265 } 2266 } 2267 2268 static inline void drbd_set_ed_uuid(struct drbd_conf *mdev, u64 val) 2269 { 2270 mdev->ed_uuid = val; 2271 } 2272 2273 static inline int seq_cmp(u32 a, u32 b) 2274 { 2275 /* we assume wrap around at 32bit. 2276 * for wrap around at 24bit (old atomic_t), 2277 * we'd have to 2278 * a <<= 8; b <<= 8; 2279 */ 2280 return (s32)(a) - (s32)(b); 2281 } 2282 #define seq_lt(a, b) (seq_cmp((a), (b)) < 0) 2283 #define seq_gt(a, b) (seq_cmp((a), (b)) > 0) 2284 #define seq_ge(a, b) (seq_cmp((a), (b)) >= 0) 2285 #define seq_le(a, b) (seq_cmp((a), (b)) <= 0) 2286 /* CAUTION: please no side effects in arguments! */ 2287 #define seq_max(a, b) ((u32)(seq_gt((a), (b)) ? (a) : (b))) 2288 2289 static inline void update_peer_seq(struct drbd_conf *mdev, unsigned int new_seq) 2290 { 2291 unsigned int m; 2292 spin_lock(&mdev->peer_seq_lock); 2293 m = seq_max(mdev->peer_seq, new_seq); 2294 mdev->peer_seq = m; 2295 spin_unlock(&mdev->peer_seq_lock); 2296 if (m == new_seq) 2297 wake_up(&mdev->seq_wait); 2298 } 2299 2300 static inline void drbd_update_congested(struct drbd_conf *mdev) 2301 { 2302 struct sock *sk = mdev->data.socket->sk; 2303 if (sk->sk_wmem_queued > sk->sk_sndbuf * 4 / 5) 2304 set_bit(NET_CONGESTED, &mdev->flags); 2305 } 2306 2307 static inline int drbd_queue_order_type(struct drbd_conf *mdev) 2308 { 2309 /* sorry, we currently have no working implementation 2310 * of distributed TCQ stuff */ 2311 #ifndef QUEUE_ORDERED_NONE 2312 #define QUEUE_ORDERED_NONE 0 2313 #endif 2314 return QUEUE_ORDERED_NONE; 2315 } 2316 2317 static inline void drbd_blk_run_queue(struct request_queue *q) 2318 { 2319 if (q && q->unplug_fn) 2320 q->unplug_fn(q); 2321 } 2322 2323 static inline void drbd_kick_lo(struct drbd_conf *mdev) 2324 { 2325 if (get_ldev(mdev)) { 2326 drbd_blk_run_queue(bdev_get_queue(mdev->ldev->backing_bdev)); 2327 put_ldev(mdev); 2328 } 2329 } 2330 2331 static inline void drbd_md_flush(struct drbd_conf *mdev) 2332 { 2333 int r; 2334 2335 if (test_bit(MD_NO_BARRIER, &mdev->flags)) 2336 return; 2337 2338 r = blkdev_issue_flush(mdev->ldev->md_bdev, GFP_KERNEL, NULL, 2339 BLKDEV_IFL_WAIT); 2340 if (r) { 2341 set_bit(MD_NO_BARRIER, &mdev->flags); 2342 dev_err(DEV, "meta data flush failed with status %d, disabling md-flushes\n", r); 2343 } 2344 } 2345 2346 #endif 2347