1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 drbd_receiver.c 4 5 This file is part of DRBD by Philipp Reisner and Lars Ellenberg. 6 7 Copyright (C) 2001-2008, LINBIT Information Technologies GmbH. 8 Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>. 9 Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>. 10 11 */ 12 13 14 #include <linux/module.h> 15 16 #include <linux/uaccess.h> 17 #include <net/sock.h> 18 19 #include <linux/drbd.h> 20 #include <linux/fs.h> 21 #include <linux/file.h> 22 #include <linux/in.h> 23 #include <linux/mm.h> 24 #include <linux/memcontrol.h> 25 #include <linux/mm_inline.h> 26 #include <linux/slab.h> 27 #include <uapi/linux/sched/types.h> 28 #include <linux/sched/signal.h> 29 #include <linux/pkt_sched.h> 30 #define __KERNEL_SYSCALLS__ 31 #include <linux/unistd.h> 32 #include <linux/vmalloc.h> 33 #include <linux/random.h> 34 #include <linux/string.h> 35 #include <linux/scatterlist.h> 36 #include <linux/part_stat.h> 37 #include "drbd_int.h" 38 #include "drbd_protocol.h" 39 #include "drbd_req.h" 40 #include "drbd_vli.h" 41 42 #define PRO_FEATURES (DRBD_FF_TRIM|DRBD_FF_THIN_RESYNC|DRBD_FF_WSAME|DRBD_FF_WZEROES) 43 44 struct packet_info { 45 enum drbd_packet cmd; 46 unsigned int size; 47 unsigned int vnr; 48 void *data; 49 }; 50 51 enum finish_epoch { 52 FE_STILL_LIVE, 53 FE_DESTROYED, 54 FE_RECYCLED, 55 }; 56 57 static int drbd_do_features(struct drbd_connection *connection); 58 static int drbd_do_auth(struct drbd_connection *connection); 59 static int drbd_disconnected(struct drbd_peer_device *); 60 static void conn_wait_active_ee_empty(struct drbd_connection *connection); 61 static enum finish_epoch drbd_may_finish_epoch(struct drbd_connection *, struct drbd_epoch *, enum epoch_event); 62 static int e_end_block(struct drbd_work *, int); 63 64 65 #define GFP_TRY (__GFP_HIGHMEM | __GFP_NOWARN) 66 67 /* 68 * some helper functions to deal with single linked page lists, 69 * page->private being our "next" pointer. 70 */ 71 72 /* If at least n pages are linked at head, get n pages off. 73 * Otherwise, don't modify head, and return NULL. 74 * Locking is the responsibility of the caller. 75 */ 76 static struct page *page_chain_del(struct page **head, int n) 77 { 78 struct page *page; 79 struct page *tmp; 80 81 BUG_ON(!n); 82 BUG_ON(!head); 83 84 page = *head; 85 86 if (!page) 87 return NULL; 88 89 while (page) { 90 tmp = page_chain_next(page); 91 if (--n == 0) 92 break; /* found sufficient pages */ 93 if (tmp == NULL) 94 /* insufficient pages, don't use any of them. */ 95 return NULL; 96 page = tmp; 97 } 98 99 /* add end of list marker for the returned list */ 100 set_page_private(page, 0); 101 /* actual return value, and adjustment of head */ 102 page = *head; 103 *head = tmp; 104 return page; 105 } 106 107 /* may be used outside of locks to find the tail of a (usually short) 108 * "private" page chain, before adding it back to a global chain head 109 * with page_chain_add() under a spinlock. */ 110 static struct page *page_chain_tail(struct page *page, int *len) 111 { 112 struct page *tmp; 113 int i = 1; 114 while ((tmp = page_chain_next(page))) { 115 ++i; 116 page = tmp; 117 } 118 if (len) 119 *len = i; 120 return page; 121 } 122 123 static int page_chain_free(struct page *page) 124 { 125 struct page *tmp; 126 int i = 0; 127 page_chain_for_each_safe(page, tmp) { 128 put_page(page); 129 ++i; 130 } 131 return i; 132 } 133 134 static void page_chain_add(struct page **head, 135 struct page *chain_first, struct page *chain_last) 136 { 137 #if 1 138 struct page *tmp; 139 tmp = page_chain_tail(chain_first, NULL); 140 BUG_ON(tmp != chain_last); 141 #endif 142 143 /* add chain to head */ 144 set_page_private(chain_last, (unsigned long)*head); 145 *head = chain_first; 146 } 147 148 static struct page *__drbd_alloc_pages(struct drbd_device *device, 149 unsigned int number) 150 { 151 struct page *page = NULL; 152 struct page *tmp = NULL; 153 unsigned int i = 0; 154 155 /* Yes, testing drbd_pp_vacant outside the lock is racy. 156 * So what. It saves a spin_lock. */ 157 if (drbd_pp_vacant >= number) { 158 spin_lock(&drbd_pp_lock); 159 page = page_chain_del(&drbd_pp_pool, number); 160 if (page) 161 drbd_pp_vacant -= number; 162 spin_unlock(&drbd_pp_lock); 163 if (page) 164 return page; 165 } 166 167 /* GFP_TRY, because we must not cause arbitrary write-out: in a DRBD 168 * "criss-cross" setup, that might cause write-out on some other DRBD, 169 * which in turn might block on the other node at this very place. */ 170 for (i = 0; i < number; i++) { 171 tmp = alloc_page(GFP_TRY); 172 if (!tmp) 173 break; 174 set_page_private(tmp, (unsigned long)page); 175 page = tmp; 176 } 177 178 if (i == number) 179 return page; 180 181 /* Not enough pages immediately available this time. 182 * No need to jump around here, drbd_alloc_pages will retry this 183 * function "soon". */ 184 if (page) { 185 tmp = page_chain_tail(page, NULL); 186 spin_lock(&drbd_pp_lock); 187 page_chain_add(&drbd_pp_pool, page, tmp); 188 drbd_pp_vacant += i; 189 spin_unlock(&drbd_pp_lock); 190 } 191 return NULL; 192 } 193 194 static void reclaim_finished_net_peer_reqs(struct drbd_device *device, 195 struct list_head *to_be_freed) 196 { 197 struct drbd_peer_request *peer_req, *tmp; 198 199 /* The EEs are always appended to the end of the list. Since 200 they are sent in order over the wire, they have to finish 201 in order. As soon as we see the first not finished we can 202 stop to examine the list... */ 203 204 list_for_each_entry_safe(peer_req, tmp, &device->net_ee, w.list) { 205 if (drbd_peer_req_has_active_page(peer_req)) 206 break; 207 list_move(&peer_req->w.list, to_be_freed); 208 } 209 } 210 211 static void drbd_reclaim_net_peer_reqs(struct drbd_device *device) 212 { 213 LIST_HEAD(reclaimed); 214 struct drbd_peer_request *peer_req, *t; 215 216 spin_lock_irq(&device->resource->req_lock); 217 reclaim_finished_net_peer_reqs(device, &reclaimed); 218 spin_unlock_irq(&device->resource->req_lock); 219 list_for_each_entry_safe(peer_req, t, &reclaimed, w.list) 220 drbd_free_net_peer_req(device, peer_req); 221 } 222 223 static void conn_reclaim_net_peer_reqs(struct drbd_connection *connection) 224 { 225 struct drbd_peer_device *peer_device; 226 int vnr; 227 228 rcu_read_lock(); 229 idr_for_each_entry(&connection->peer_devices, peer_device, vnr) { 230 struct drbd_device *device = peer_device->device; 231 if (!atomic_read(&device->pp_in_use_by_net)) 232 continue; 233 234 kref_get(&device->kref); 235 rcu_read_unlock(); 236 drbd_reclaim_net_peer_reqs(device); 237 kref_put(&device->kref, drbd_destroy_device); 238 rcu_read_lock(); 239 } 240 rcu_read_unlock(); 241 } 242 243 /** 244 * drbd_alloc_pages() - Returns @number pages, retries forever (or until signalled) 245 * @peer_device: DRBD device. 246 * @number: number of pages requested 247 * @retry: whether to retry, if not enough pages are available right now 248 * 249 * Tries to allocate number pages, first from our own page pool, then from 250 * the kernel. 251 * Possibly retry until DRBD frees sufficient pages somewhere else. 252 * 253 * If this allocation would exceed the max_buffers setting, we throttle 254 * allocation (schedule_timeout) to give the system some room to breathe. 255 * 256 * We do not use max-buffers as hard limit, because it could lead to 257 * congestion and further to a distributed deadlock during online-verify or 258 * (checksum based) resync, if the max-buffers, socket buffer sizes and 259 * resync-rate settings are mis-configured. 260 * 261 * Returns a page chain linked via page->private. 262 */ 263 struct page *drbd_alloc_pages(struct drbd_peer_device *peer_device, unsigned int number, 264 bool retry) 265 { 266 struct drbd_device *device = peer_device->device; 267 struct page *page = NULL; 268 struct net_conf *nc; 269 DEFINE_WAIT(wait); 270 unsigned int mxb; 271 272 rcu_read_lock(); 273 nc = rcu_dereference(peer_device->connection->net_conf); 274 mxb = nc ? nc->max_buffers : 1000000; 275 rcu_read_unlock(); 276 277 if (atomic_read(&device->pp_in_use) < mxb) 278 page = __drbd_alloc_pages(device, number); 279 280 /* Try to keep the fast path fast, but occasionally we need 281 * to reclaim the pages we lended to the network stack. */ 282 if (page && atomic_read(&device->pp_in_use_by_net) > 512) 283 drbd_reclaim_net_peer_reqs(device); 284 285 while (page == NULL) { 286 prepare_to_wait(&drbd_pp_wait, &wait, TASK_INTERRUPTIBLE); 287 288 drbd_reclaim_net_peer_reqs(device); 289 290 if (atomic_read(&device->pp_in_use) < mxb) { 291 page = __drbd_alloc_pages(device, number); 292 if (page) 293 break; 294 } 295 296 if (!retry) 297 break; 298 299 if (signal_pending(current)) { 300 drbd_warn(device, "drbd_alloc_pages interrupted!\n"); 301 break; 302 } 303 304 if (schedule_timeout(HZ/10) == 0) 305 mxb = UINT_MAX; 306 } 307 finish_wait(&drbd_pp_wait, &wait); 308 309 if (page) 310 atomic_add(number, &device->pp_in_use); 311 return page; 312 } 313 314 /* Must not be used from irq, as that may deadlock: see drbd_alloc_pages. 315 * Is also used from inside an other spin_lock_irq(&resource->req_lock); 316 * Either links the page chain back to the global pool, 317 * or returns all pages to the system. */ 318 static void drbd_free_pages(struct drbd_device *device, struct page *page, int is_net) 319 { 320 atomic_t *a = is_net ? &device->pp_in_use_by_net : &device->pp_in_use; 321 int i; 322 323 if (page == NULL) 324 return; 325 326 if (drbd_pp_vacant > (DRBD_MAX_BIO_SIZE/PAGE_SIZE) * drbd_minor_count) 327 i = page_chain_free(page); 328 else { 329 struct page *tmp; 330 tmp = page_chain_tail(page, &i); 331 spin_lock(&drbd_pp_lock); 332 page_chain_add(&drbd_pp_pool, page, tmp); 333 drbd_pp_vacant += i; 334 spin_unlock(&drbd_pp_lock); 335 } 336 i = atomic_sub_return(i, a); 337 if (i < 0) 338 drbd_warn(device, "ASSERTION FAILED: %s: %d < 0\n", 339 is_net ? "pp_in_use_by_net" : "pp_in_use", i); 340 wake_up(&drbd_pp_wait); 341 } 342 343 /* 344 You need to hold the req_lock: 345 _drbd_wait_ee_list_empty() 346 347 You must not have the req_lock: 348 drbd_free_peer_req() 349 drbd_alloc_peer_req() 350 drbd_free_peer_reqs() 351 drbd_ee_fix_bhs() 352 drbd_finish_peer_reqs() 353 drbd_clear_done_ee() 354 drbd_wait_ee_list_empty() 355 */ 356 357 /* normal: payload_size == request size (bi_size) 358 * w_same: payload_size == logical_block_size 359 * trim: payload_size == 0 */ 360 struct drbd_peer_request * 361 drbd_alloc_peer_req(struct drbd_peer_device *peer_device, u64 id, sector_t sector, 362 unsigned int request_size, unsigned int payload_size, gfp_t gfp_mask) __must_hold(local) 363 { 364 struct drbd_device *device = peer_device->device; 365 struct drbd_peer_request *peer_req; 366 struct page *page = NULL; 367 unsigned int nr_pages = PFN_UP(payload_size); 368 369 if (drbd_insert_fault(device, DRBD_FAULT_AL_EE)) 370 return NULL; 371 372 peer_req = mempool_alloc(&drbd_ee_mempool, gfp_mask & ~__GFP_HIGHMEM); 373 if (!peer_req) { 374 if (!(gfp_mask & __GFP_NOWARN)) 375 drbd_err(device, "%s: allocation failed\n", __func__); 376 return NULL; 377 } 378 379 if (nr_pages) { 380 page = drbd_alloc_pages(peer_device, nr_pages, 381 gfpflags_allow_blocking(gfp_mask)); 382 if (!page) 383 goto fail; 384 } 385 386 memset(peer_req, 0, sizeof(*peer_req)); 387 INIT_LIST_HEAD(&peer_req->w.list); 388 drbd_clear_interval(&peer_req->i); 389 peer_req->i.size = request_size; 390 peer_req->i.sector = sector; 391 peer_req->submit_jif = jiffies; 392 peer_req->peer_device = peer_device; 393 peer_req->pages = page; 394 /* 395 * The block_id is opaque to the receiver. It is not endianness 396 * converted, and sent back to the sender unchanged. 397 */ 398 peer_req->block_id = id; 399 400 return peer_req; 401 402 fail: 403 mempool_free(peer_req, &drbd_ee_mempool); 404 return NULL; 405 } 406 407 void __drbd_free_peer_req(struct drbd_device *device, struct drbd_peer_request *peer_req, 408 int is_net) 409 { 410 might_sleep(); 411 if (peer_req->flags & EE_HAS_DIGEST) 412 kfree(peer_req->digest); 413 drbd_free_pages(device, peer_req->pages, is_net); 414 D_ASSERT(device, atomic_read(&peer_req->pending_bios) == 0); 415 D_ASSERT(device, drbd_interval_empty(&peer_req->i)); 416 if (!expect(device, !(peer_req->flags & EE_CALL_AL_COMPLETE_IO))) { 417 peer_req->flags &= ~EE_CALL_AL_COMPLETE_IO; 418 drbd_al_complete_io(device, &peer_req->i); 419 } 420 mempool_free(peer_req, &drbd_ee_mempool); 421 } 422 423 int drbd_free_peer_reqs(struct drbd_device *device, struct list_head *list) 424 { 425 LIST_HEAD(work_list); 426 struct drbd_peer_request *peer_req, *t; 427 int count = 0; 428 int is_net = list == &device->net_ee; 429 430 spin_lock_irq(&device->resource->req_lock); 431 list_splice_init(list, &work_list); 432 spin_unlock_irq(&device->resource->req_lock); 433 434 list_for_each_entry_safe(peer_req, t, &work_list, w.list) { 435 __drbd_free_peer_req(device, peer_req, is_net); 436 count++; 437 } 438 return count; 439 } 440 441 /* 442 * See also comments in _req_mod(,BARRIER_ACKED) and receive_Barrier. 443 */ 444 static int drbd_finish_peer_reqs(struct drbd_device *device) 445 { 446 LIST_HEAD(work_list); 447 LIST_HEAD(reclaimed); 448 struct drbd_peer_request *peer_req, *t; 449 int err = 0; 450 451 spin_lock_irq(&device->resource->req_lock); 452 reclaim_finished_net_peer_reqs(device, &reclaimed); 453 list_splice_init(&device->done_ee, &work_list); 454 spin_unlock_irq(&device->resource->req_lock); 455 456 list_for_each_entry_safe(peer_req, t, &reclaimed, w.list) 457 drbd_free_net_peer_req(device, peer_req); 458 459 /* possible callbacks here: 460 * e_end_block, and e_end_resync_block, e_send_superseded. 461 * all ignore the last argument. 462 */ 463 list_for_each_entry_safe(peer_req, t, &work_list, w.list) { 464 int err2; 465 466 /* list_del not necessary, next/prev members not touched */ 467 err2 = peer_req->w.cb(&peer_req->w, !!err); 468 if (!err) 469 err = err2; 470 drbd_free_peer_req(device, peer_req); 471 } 472 wake_up(&device->ee_wait); 473 474 return err; 475 } 476 477 static void _drbd_wait_ee_list_empty(struct drbd_device *device, 478 struct list_head *head) 479 { 480 DEFINE_WAIT(wait); 481 482 /* avoids spin_lock/unlock 483 * and calling prepare_to_wait in the fast path */ 484 while (!list_empty(head)) { 485 prepare_to_wait(&device->ee_wait, &wait, TASK_UNINTERRUPTIBLE); 486 spin_unlock_irq(&device->resource->req_lock); 487 io_schedule(); 488 finish_wait(&device->ee_wait, &wait); 489 spin_lock_irq(&device->resource->req_lock); 490 } 491 } 492 493 static void drbd_wait_ee_list_empty(struct drbd_device *device, 494 struct list_head *head) 495 { 496 spin_lock_irq(&device->resource->req_lock); 497 _drbd_wait_ee_list_empty(device, head); 498 spin_unlock_irq(&device->resource->req_lock); 499 } 500 501 static int drbd_recv_short(struct socket *sock, void *buf, size_t size, int flags) 502 { 503 struct kvec iov = { 504 .iov_base = buf, 505 .iov_len = size, 506 }; 507 struct msghdr msg = { 508 .msg_flags = (flags ? flags : MSG_WAITALL | MSG_NOSIGNAL) 509 }; 510 iov_iter_kvec(&msg.msg_iter, ITER_DEST, &iov, 1, size); 511 return sock_recvmsg(sock, &msg, msg.msg_flags); 512 } 513 514 static int drbd_recv(struct drbd_connection *connection, void *buf, size_t size) 515 { 516 int rv; 517 518 rv = drbd_recv_short(connection->data.socket, buf, size, 0); 519 520 if (rv < 0) { 521 if (rv == -ECONNRESET) 522 drbd_info(connection, "sock was reset by peer\n"); 523 else if (rv != -ERESTARTSYS) 524 drbd_err(connection, "sock_recvmsg returned %d\n", rv); 525 } else if (rv == 0) { 526 if (test_bit(DISCONNECT_SENT, &connection->flags)) { 527 long t; 528 rcu_read_lock(); 529 t = rcu_dereference(connection->net_conf)->ping_timeo * HZ/10; 530 rcu_read_unlock(); 531 532 t = wait_event_timeout(connection->ping_wait, connection->cstate < C_WF_REPORT_PARAMS, t); 533 534 if (t) 535 goto out; 536 } 537 drbd_info(connection, "sock was shut down by peer\n"); 538 } 539 540 if (rv != size) 541 conn_request_state(connection, NS(conn, C_BROKEN_PIPE), CS_HARD); 542 543 out: 544 return rv; 545 } 546 547 static int drbd_recv_all(struct drbd_connection *connection, void *buf, size_t size) 548 { 549 int err; 550 551 err = drbd_recv(connection, buf, size); 552 if (err != size) { 553 if (err >= 0) 554 err = -EIO; 555 } else 556 err = 0; 557 return err; 558 } 559 560 static int drbd_recv_all_warn(struct drbd_connection *connection, void *buf, size_t size) 561 { 562 int err; 563 564 err = drbd_recv_all(connection, buf, size); 565 if (err && !signal_pending(current)) 566 drbd_warn(connection, "short read (expected size %d)\n", (int)size); 567 return err; 568 } 569 570 /* quoting tcp(7): 571 * On individual connections, the socket buffer size must be set prior to the 572 * listen(2) or connect(2) calls in order to have it take effect. 573 * This is our wrapper to do so. 574 */ 575 static void drbd_setbufsize(struct socket *sock, unsigned int snd, 576 unsigned int rcv) 577 { 578 /* open coded SO_SNDBUF, SO_RCVBUF */ 579 if (snd) { 580 sock->sk->sk_sndbuf = snd; 581 sock->sk->sk_userlocks |= SOCK_SNDBUF_LOCK; 582 } 583 if (rcv) { 584 sock->sk->sk_rcvbuf = rcv; 585 sock->sk->sk_userlocks |= SOCK_RCVBUF_LOCK; 586 } 587 } 588 589 static struct socket *drbd_try_connect(struct drbd_connection *connection) 590 { 591 const char *what; 592 struct socket *sock; 593 struct sockaddr_in6 src_in6; 594 struct sockaddr_in6 peer_in6; 595 struct net_conf *nc; 596 int err, peer_addr_len, my_addr_len; 597 int sndbuf_size, rcvbuf_size, connect_int; 598 int disconnect_on_error = 1; 599 600 rcu_read_lock(); 601 nc = rcu_dereference(connection->net_conf); 602 if (!nc) { 603 rcu_read_unlock(); 604 return NULL; 605 } 606 sndbuf_size = nc->sndbuf_size; 607 rcvbuf_size = nc->rcvbuf_size; 608 connect_int = nc->connect_int; 609 rcu_read_unlock(); 610 611 my_addr_len = min_t(int, connection->my_addr_len, sizeof(src_in6)); 612 memcpy(&src_in6, &connection->my_addr, my_addr_len); 613 614 if (((struct sockaddr *)&connection->my_addr)->sa_family == AF_INET6) 615 src_in6.sin6_port = 0; 616 else 617 ((struct sockaddr_in *)&src_in6)->sin_port = 0; /* AF_INET & AF_SCI */ 618 619 peer_addr_len = min_t(int, connection->peer_addr_len, sizeof(src_in6)); 620 memcpy(&peer_in6, &connection->peer_addr, peer_addr_len); 621 622 what = "sock_create_kern"; 623 err = sock_create_kern(&init_net, ((struct sockaddr *)&src_in6)->sa_family, 624 SOCK_STREAM, IPPROTO_TCP, &sock); 625 if (err < 0) { 626 sock = NULL; 627 goto out; 628 } 629 630 sock->sk->sk_rcvtimeo = 631 sock->sk->sk_sndtimeo = connect_int * HZ; 632 drbd_setbufsize(sock, sndbuf_size, rcvbuf_size); 633 634 /* explicitly bind to the configured IP as source IP 635 * for the outgoing connections. 636 * This is needed for multihomed hosts and to be 637 * able to use lo: interfaces for drbd. 638 * Make sure to use 0 as port number, so linux selects 639 * a free one dynamically. 640 */ 641 what = "bind before connect"; 642 err = sock->ops->bind(sock, (struct sockaddr *) &src_in6, my_addr_len); 643 if (err < 0) 644 goto out; 645 646 /* connect may fail, peer not yet available. 647 * stay C_WF_CONNECTION, don't go Disconnecting! */ 648 disconnect_on_error = 0; 649 what = "connect"; 650 err = sock->ops->connect(sock, (struct sockaddr *) &peer_in6, peer_addr_len, 0); 651 652 out: 653 if (err < 0) { 654 if (sock) { 655 sock_release(sock); 656 sock = NULL; 657 } 658 switch (-err) { 659 /* timeout, busy, signal pending */ 660 case ETIMEDOUT: case EAGAIN: case EINPROGRESS: 661 case EINTR: case ERESTARTSYS: 662 /* peer not (yet) available, network problem */ 663 case ECONNREFUSED: case ENETUNREACH: 664 case EHOSTDOWN: case EHOSTUNREACH: 665 disconnect_on_error = 0; 666 break; 667 default: 668 drbd_err(connection, "%s failed, err = %d\n", what, err); 669 } 670 if (disconnect_on_error) 671 conn_request_state(connection, NS(conn, C_DISCONNECTING), CS_HARD); 672 } 673 674 return sock; 675 } 676 677 struct accept_wait_data { 678 struct drbd_connection *connection; 679 struct socket *s_listen; 680 struct completion door_bell; 681 void (*original_sk_state_change)(struct sock *sk); 682 683 }; 684 685 static void drbd_incoming_connection(struct sock *sk) 686 { 687 struct accept_wait_data *ad = sk->sk_user_data; 688 void (*state_change)(struct sock *sk); 689 690 state_change = ad->original_sk_state_change; 691 if (sk->sk_state == TCP_ESTABLISHED) 692 complete(&ad->door_bell); 693 state_change(sk); 694 } 695 696 static int prepare_listen_socket(struct drbd_connection *connection, struct accept_wait_data *ad) 697 { 698 int err, sndbuf_size, rcvbuf_size, my_addr_len; 699 struct sockaddr_in6 my_addr; 700 struct socket *s_listen; 701 struct net_conf *nc; 702 const char *what; 703 704 rcu_read_lock(); 705 nc = rcu_dereference(connection->net_conf); 706 if (!nc) { 707 rcu_read_unlock(); 708 return -EIO; 709 } 710 sndbuf_size = nc->sndbuf_size; 711 rcvbuf_size = nc->rcvbuf_size; 712 rcu_read_unlock(); 713 714 my_addr_len = min_t(int, connection->my_addr_len, sizeof(struct sockaddr_in6)); 715 memcpy(&my_addr, &connection->my_addr, my_addr_len); 716 717 what = "sock_create_kern"; 718 err = sock_create_kern(&init_net, ((struct sockaddr *)&my_addr)->sa_family, 719 SOCK_STREAM, IPPROTO_TCP, &s_listen); 720 if (err) { 721 s_listen = NULL; 722 goto out; 723 } 724 725 s_listen->sk->sk_reuse = SK_CAN_REUSE; /* SO_REUSEADDR */ 726 drbd_setbufsize(s_listen, sndbuf_size, rcvbuf_size); 727 728 what = "bind before listen"; 729 err = s_listen->ops->bind(s_listen, (struct sockaddr *)&my_addr, my_addr_len); 730 if (err < 0) 731 goto out; 732 733 ad->s_listen = s_listen; 734 write_lock_bh(&s_listen->sk->sk_callback_lock); 735 ad->original_sk_state_change = s_listen->sk->sk_state_change; 736 s_listen->sk->sk_state_change = drbd_incoming_connection; 737 s_listen->sk->sk_user_data = ad; 738 write_unlock_bh(&s_listen->sk->sk_callback_lock); 739 740 what = "listen"; 741 err = s_listen->ops->listen(s_listen, 5); 742 if (err < 0) 743 goto out; 744 745 return 0; 746 out: 747 if (s_listen) 748 sock_release(s_listen); 749 if (err < 0) { 750 if (err != -EAGAIN && err != -EINTR && err != -ERESTARTSYS) { 751 drbd_err(connection, "%s failed, err = %d\n", what, err); 752 conn_request_state(connection, NS(conn, C_DISCONNECTING), CS_HARD); 753 } 754 } 755 756 return -EIO; 757 } 758 759 static void unregister_state_change(struct sock *sk, struct accept_wait_data *ad) 760 { 761 write_lock_bh(&sk->sk_callback_lock); 762 sk->sk_state_change = ad->original_sk_state_change; 763 sk->sk_user_data = NULL; 764 write_unlock_bh(&sk->sk_callback_lock); 765 } 766 767 static struct socket *drbd_wait_for_connect(struct drbd_connection *connection, struct accept_wait_data *ad) 768 { 769 int timeo, connect_int, err = 0; 770 struct socket *s_estab = NULL; 771 struct net_conf *nc; 772 773 rcu_read_lock(); 774 nc = rcu_dereference(connection->net_conf); 775 if (!nc) { 776 rcu_read_unlock(); 777 return NULL; 778 } 779 connect_int = nc->connect_int; 780 rcu_read_unlock(); 781 782 timeo = connect_int * HZ; 783 /* 28.5% random jitter */ 784 timeo += get_random_u32_below(2) ? timeo / 7 : -timeo / 7; 785 786 err = wait_for_completion_interruptible_timeout(&ad->door_bell, timeo); 787 if (err <= 0) 788 return NULL; 789 790 err = kernel_accept(ad->s_listen, &s_estab, 0); 791 if (err < 0) { 792 if (err != -EAGAIN && err != -EINTR && err != -ERESTARTSYS) { 793 drbd_err(connection, "accept failed, err = %d\n", err); 794 conn_request_state(connection, NS(conn, C_DISCONNECTING), CS_HARD); 795 } 796 } 797 798 if (s_estab) 799 unregister_state_change(s_estab->sk, ad); 800 801 return s_estab; 802 } 803 804 static int decode_header(struct drbd_connection *, void *, struct packet_info *); 805 806 static int send_first_packet(struct drbd_connection *connection, struct drbd_socket *sock, 807 enum drbd_packet cmd) 808 { 809 if (!conn_prepare_command(connection, sock)) 810 return -EIO; 811 return conn_send_command(connection, sock, cmd, 0, NULL, 0); 812 } 813 814 static int receive_first_packet(struct drbd_connection *connection, struct socket *sock) 815 { 816 unsigned int header_size = drbd_header_size(connection); 817 struct packet_info pi; 818 struct net_conf *nc; 819 int err; 820 821 rcu_read_lock(); 822 nc = rcu_dereference(connection->net_conf); 823 if (!nc) { 824 rcu_read_unlock(); 825 return -EIO; 826 } 827 sock->sk->sk_rcvtimeo = nc->ping_timeo * 4 * HZ / 10; 828 rcu_read_unlock(); 829 830 err = drbd_recv_short(sock, connection->data.rbuf, header_size, 0); 831 if (err != header_size) { 832 if (err >= 0) 833 err = -EIO; 834 return err; 835 } 836 err = decode_header(connection, connection->data.rbuf, &pi); 837 if (err) 838 return err; 839 return pi.cmd; 840 } 841 842 /** 843 * drbd_socket_okay() - Free the socket if its connection is not okay 844 * @sock: pointer to the pointer to the socket. 845 */ 846 static bool drbd_socket_okay(struct socket **sock) 847 { 848 int rr; 849 char tb[4]; 850 851 if (!*sock) 852 return false; 853 854 rr = drbd_recv_short(*sock, tb, 4, MSG_DONTWAIT | MSG_PEEK); 855 856 if (rr > 0 || rr == -EAGAIN) { 857 return true; 858 } else { 859 sock_release(*sock); 860 *sock = NULL; 861 return false; 862 } 863 } 864 865 static bool connection_established(struct drbd_connection *connection, 866 struct socket **sock1, 867 struct socket **sock2) 868 { 869 struct net_conf *nc; 870 int timeout; 871 bool ok; 872 873 if (!*sock1 || !*sock2) 874 return false; 875 876 rcu_read_lock(); 877 nc = rcu_dereference(connection->net_conf); 878 timeout = (nc->sock_check_timeo ?: nc->ping_timeo) * HZ / 10; 879 rcu_read_unlock(); 880 schedule_timeout_interruptible(timeout); 881 882 ok = drbd_socket_okay(sock1); 883 ok = drbd_socket_okay(sock2) && ok; 884 885 return ok; 886 } 887 888 /* Gets called if a connection is established, or if a new minor gets created 889 in a connection */ 890 int drbd_connected(struct drbd_peer_device *peer_device) 891 { 892 struct drbd_device *device = peer_device->device; 893 int err; 894 895 atomic_set(&device->packet_seq, 0); 896 device->peer_seq = 0; 897 898 device->state_mutex = peer_device->connection->agreed_pro_version < 100 ? 899 &peer_device->connection->cstate_mutex : 900 &device->own_state_mutex; 901 902 err = drbd_send_sync_param(peer_device); 903 if (!err) 904 err = drbd_send_sizes(peer_device, 0, 0); 905 if (!err) 906 err = drbd_send_uuids(peer_device); 907 if (!err) 908 err = drbd_send_current_state(peer_device); 909 clear_bit(USE_DEGR_WFC_T, &device->flags); 910 clear_bit(RESIZE_PENDING, &device->flags); 911 atomic_set(&device->ap_in_flight, 0); 912 mod_timer(&device->request_timer, jiffies + HZ); /* just start it here. */ 913 return err; 914 } 915 916 /* 917 * return values: 918 * 1 yes, we have a valid connection 919 * 0 oops, did not work out, please try again 920 * -1 peer talks different language, 921 * no point in trying again, please go standalone. 922 * -2 We do not have a network config... 923 */ 924 static int conn_connect(struct drbd_connection *connection) 925 { 926 struct drbd_socket sock, msock; 927 struct drbd_peer_device *peer_device; 928 struct net_conf *nc; 929 int vnr, timeout, h; 930 bool discard_my_data, ok; 931 enum drbd_state_rv rv; 932 struct accept_wait_data ad = { 933 .connection = connection, 934 .door_bell = COMPLETION_INITIALIZER_ONSTACK(ad.door_bell), 935 }; 936 937 clear_bit(DISCONNECT_SENT, &connection->flags); 938 if (conn_request_state(connection, NS(conn, C_WF_CONNECTION), CS_VERBOSE) < SS_SUCCESS) 939 return -2; 940 941 mutex_init(&sock.mutex); 942 sock.sbuf = connection->data.sbuf; 943 sock.rbuf = connection->data.rbuf; 944 sock.socket = NULL; 945 mutex_init(&msock.mutex); 946 msock.sbuf = connection->meta.sbuf; 947 msock.rbuf = connection->meta.rbuf; 948 msock.socket = NULL; 949 950 /* Assume that the peer only understands protocol 80 until we know better. */ 951 connection->agreed_pro_version = 80; 952 953 if (prepare_listen_socket(connection, &ad)) 954 return 0; 955 956 do { 957 struct socket *s; 958 959 s = drbd_try_connect(connection); 960 if (s) { 961 if (!sock.socket) { 962 sock.socket = s; 963 send_first_packet(connection, &sock, P_INITIAL_DATA); 964 } else if (!msock.socket) { 965 clear_bit(RESOLVE_CONFLICTS, &connection->flags); 966 msock.socket = s; 967 send_first_packet(connection, &msock, P_INITIAL_META); 968 } else { 969 drbd_err(connection, "Logic error in conn_connect()\n"); 970 goto out_release_sockets; 971 } 972 } 973 974 if (connection_established(connection, &sock.socket, &msock.socket)) 975 break; 976 977 retry: 978 s = drbd_wait_for_connect(connection, &ad); 979 if (s) { 980 int fp = receive_first_packet(connection, s); 981 drbd_socket_okay(&sock.socket); 982 drbd_socket_okay(&msock.socket); 983 switch (fp) { 984 case P_INITIAL_DATA: 985 if (sock.socket) { 986 drbd_warn(connection, "initial packet S crossed\n"); 987 sock_release(sock.socket); 988 sock.socket = s; 989 goto randomize; 990 } 991 sock.socket = s; 992 break; 993 case P_INITIAL_META: 994 set_bit(RESOLVE_CONFLICTS, &connection->flags); 995 if (msock.socket) { 996 drbd_warn(connection, "initial packet M crossed\n"); 997 sock_release(msock.socket); 998 msock.socket = s; 999 goto randomize; 1000 } 1001 msock.socket = s; 1002 break; 1003 default: 1004 drbd_warn(connection, "Error receiving initial packet\n"); 1005 sock_release(s); 1006 randomize: 1007 if (get_random_u32_below(2)) 1008 goto retry; 1009 } 1010 } 1011 1012 if (connection->cstate <= C_DISCONNECTING) 1013 goto out_release_sockets; 1014 if (signal_pending(current)) { 1015 flush_signals(current); 1016 smp_rmb(); 1017 if (get_t_state(&connection->receiver) == EXITING) 1018 goto out_release_sockets; 1019 } 1020 1021 ok = connection_established(connection, &sock.socket, &msock.socket); 1022 } while (!ok); 1023 1024 if (ad.s_listen) 1025 sock_release(ad.s_listen); 1026 1027 sock.socket->sk->sk_reuse = SK_CAN_REUSE; /* SO_REUSEADDR */ 1028 msock.socket->sk->sk_reuse = SK_CAN_REUSE; /* SO_REUSEADDR */ 1029 1030 sock.socket->sk->sk_allocation = GFP_NOIO; 1031 msock.socket->sk->sk_allocation = GFP_NOIO; 1032 1033 sock.socket->sk->sk_use_task_frag = false; 1034 msock.socket->sk->sk_use_task_frag = false; 1035 1036 sock.socket->sk->sk_priority = TC_PRIO_INTERACTIVE_BULK; 1037 msock.socket->sk->sk_priority = TC_PRIO_INTERACTIVE; 1038 1039 /* NOT YET ... 1040 * sock.socket->sk->sk_sndtimeo = connection->net_conf->timeout*HZ/10; 1041 * sock.socket->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT; 1042 * first set it to the P_CONNECTION_FEATURES timeout, 1043 * which we set to 4x the configured ping_timeout. */ 1044 rcu_read_lock(); 1045 nc = rcu_dereference(connection->net_conf); 1046 1047 sock.socket->sk->sk_sndtimeo = 1048 sock.socket->sk->sk_rcvtimeo = nc->ping_timeo*4*HZ/10; 1049 1050 msock.socket->sk->sk_rcvtimeo = nc->ping_int*HZ; 1051 timeout = nc->timeout * HZ / 10; 1052 discard_my_data = nc->discard_my_data; 1053 rcu_read_unlock(); 1054 1055 msock.socket->sk->sk_sndtimeo = timeout; 1056 1057 /* we don't want delays. 1058 * we use TCP_CORK where appropriate, though */ 1059 tcp_sock_set_nodelay(sock.socket->sk); 1060 tcp_sock_set_nodelay(msock.socket->sk); 1061 1062 connection->data.socket = sock.socket; 1063 connection->meta.socket = msock.socket; 1064 connection->last_received = jiffies; 1065 1066 h = drbd_do_features(connection); 1067 if (h <= 0) 1068 return h; 1069 1070 if (connection->cram_hmac_tfm) { 1071 /* drbd_request_state(device, NS(conn, WFAuth)); */ 1072 switch (drbd_do_auth(connection)) { 1073 case -1: 1074 drbd_err(connection, "Authentication of peer failed\n"); 1075 return -1; 1076 case 0: 1077 drbd_err(connection, "Authentication of peer failed, trying again.\n"); 1078 return 0; 1079 } 1080 } 1081 1082 connection->data.socket->sk->sk_sndtimeo = timeout; 1083 connection->data.socket->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT; 1084 1085 if (drbd_send_protocol(connection) == -EOPNOTSUPP) 1086 return -1; 1087 1088 /* Prevent a race between resync-handshake and 1089 * being promoted to Primary. 1090 * 1091 * Grab and release the state mutex, so we know that any current 1092 * drbd_set_role() is finished, and any incoming drbd_set_role 1093 * will see the STATE_SENT flag, and wait for it to be cleared. 1094 */ 1095 idr_for_each_entry(&connection->peer_devices, peer_device, vnr) 1096 mutex_lock(peer_device->device->state_mutex); 1097 1098 /* avoid a race with conn_request_state( C_DISCONNECTING ) */ 1099 spin_lock_irq(&connection->resource->req_lock); 1100 set_bit(STATE_SENT, &connection->flags); 1101 spin_unlock_irq(&connection->resource->req_lock); 1102 1103 idr_for_each_entry(&connection->peer_devices, peer_device, vnr) 1104 mutex_unlock(peer_device->device->state_mutex); 1105 1106 rcu_read_lock(); 1107 idr_for_each_entry(&connection->peer_devices, peer_device, vnr) { 1108 struct drbd_device *device = peer_device->device; 1109 kref_get(&device->kref); 1110 rcu_read_unlock(); 1111 1112 if (discard_my_data) 1113 set_bit(DISCARD_MY_DATA, &device->flags); 1114 else 1115 clear_bit(DISCARD_MY_DATA, &device->flags); 1116 1117 drbd_connected(peer_device); 1118 kref_put(&device->kref, drbd_destroy_device); 1119 rcu_read_lock(); 1120 } 1121 rcu_read_unlock(); 1122 1123 rv = conn_request_state(connection, NS(conn, C_WF_REPORT_PARAMS), CS_VERBOSE); 1124 if (rv < SS_SUCCESS || connection->cstate != C_WF_REPORT_PARAMS) { 1125 clear_bit(STATE_SENT, &connection->flags); 1126 return 0; 1127 } 1128 1129 drbd_thread_start(&connection->ack_receiver); 1130 /* opencoded create_singlethread_workqueue(), 1131 * to be able to use format string arguments */ 1132 connection->ack_sender = 1133 alloc_ordered_workqueue("drbd_as_%s", WQ_MEM_RECLAIM, connection->resource->name); 1134 if (!connection->ack_sender) { 1135 drbd_err(connection, "Failed to create workqueue ack_sender\n"); 1136 return 0; 1137 } 1138 1139 mutex_lock(&connection->resource->conf_update); 1140 /* The discard_my_data flag is a single-shot modifier to the next 1141 * connection attempt, the handshake of which is now well underway. 1142 * No need for rcu style copying of the whole struct 1143 * just to clear a single value. */ 1144 connection->net_conf->discard_my_data = 0; 1145 mutex_unlock(&connection->resource->conf_update); 1146 1147 return h; 1148 1149 out_release_sockets: 1150 if (ad.s_listen) 1151 sock_release(ad.s_listen); 1152 if (sock.socket) 1153 sock_release(sock.socket); 1154 if (msock.socket) 1155 sock_release(msock.socket); 1156 return -1; 1157 } 1158 1159 static int decode_header(struct drbd_connection *connection, void *header, struct packet_info *pi) 1160 { 1161 unsigned int header_size = drbd_header_size(connection); 1162 1163 if (header_size == sizeof(struct p_header100) && 1164 *(__be32 *)header == cpu_to_be32(DRBD_MAGIC_100)) { 1165 struct p_header100 *h = header; 1166 if (h->pad != 0) { 1167 drbd_err(connection, "Header padding is not zero\n"); 1168 return -EINVAL; 1169 } 1170 pi->vnr = be16_to_cpu(h->volume); 1171 pi->cmd = be16_to_cpu(h->command); 1172 pi->size = be32_to_cpu(h->length); 1173 } else if (header_size == sizeof(struct p_header95) && 1174 *(__be16 *)header == cpu_to_be16(DRBD_MAGIC_BIG)) { 1175 struct p_header95 *h = header; 1176 pi->cmd = be16_to_cpu(h->command); 1177 pi->size = be32_to_cpu(h->length); 1178 pi->vnr = 0; 1179 } else if (header_size == sizeof(struct p_header80) && 1180 *(__be32 *)header == cpu_to_be32(DRBD_MAGIC)) { 1181 struct p_header80 *h = header; 1182 pi->cmd = be16_to_cpu(h->command); 1183 pi->size = be16_to_cpu(h->length); 1184 pi->vnr = 0; 1185 } else { 1186 drbd_err(connection, "Wrong magic value 0x%08x in protocol version %d\n", 1187 be32_to_cpu(*(__be32 *)header), 1188 connection->agreed_pro_version); 1189 return -EINVAL; 1190 } 1191 pi->data = header + header_size; 1192 return 0; 1193 } 1194 1195 static void drbd_unplug_all_devices(struct drbd_connection *connection) 1196 { 1197 if (current->plug == &connection->receiver_plug) { 1198 blk_finish_plug(&connection->receiver_plug); 1199 blk_start_plug(&connection->receiver_plug); 1200 } /* else: maybe just schedule() ?? */ 1201 } 1202 1203 static int drbd_recv_header(struct drbd_connection *connection, struct packet_info *pi) 1204 { 1205 void *buffer = connection->data.rbuf; 1206 int err; 1207 1208 err = drbd_recv_all_warn(connection, buffer, drbd_header_size(connection)); 1209 if (err) 1210 return err; 1211 1212 err = decode_header(connection, buffer, pi); 1213 connection->last_received = jiffies; 1214 1215 return err; 1216 } 1217 1218 static int drbd_recv_header_maybe_unplug(struct drbd_connection *connection, struct packet_info *pi) 1219 { 1220 void *buffer = connection->data.rbuf; 1221 unsigned int size = drbd_header_size(connection); 1222 int err; 1223 1224 err = drbd_recv_short(connection->data.socket, buffer, size, MSG_NOSIGNAL|MSG_DONTWAIT); 1225 if (err != size) { 1226 /* If we have nothing in the receive buffer now, to reduce 1227 * application latency, try to drain the backend queues as 1228 * quickly as possible, and let remote TCP know what we have 1229 * received so far. */ 1230 if (err == -EAGAIN) { 1231 tcp_sock_set_quickack(connection->data.socket->sk, 2); 1232 drbd_unplug_all_devices(connection); 1233 } 1234 if (err > 0) { 1235 buffer += err; 1236 size -= err; 1237 } 1238 err = drbd_recv_all_warn(connection, buffer, size); 1239 if (err) 1240 return err; 1241 } 1242 1243 err = decode_header(connection, connection->data.rbuf, pi); 1244 connection->last_received = jiffies; 1245 1246 return err; 1247 } 1248 /* This is blkdev_issue_flush, but asynchronous. 1249 * We want to submit to all component volumes in parallel, 1250 * then wait for all completions. 1251 */ 1252 struct issue_flush_context { 1253 atomic_t pending; 1254 int error; 1255 struct completion done; 1256 }; 1257 struct one_flush_context { 1258 struct drbd_device *device; 1259 struct issue_flush_context *ctx; 1260 }; 1261 1262 static void one_flush_endio(struct bio *bio) 1263 { 1264 struct one_flush_context *octx = bio->bi_private; 1265 struct drbd_device *device = octx->device; 1266 struct issue_flush_context *ctx = octx->ctx; 1267 1268 if (bio->bi_status) { 1269 ctx->error = blk_status_to_errno(bio->bi_status); 1270 drbd_info(device, "local disk FLUSH FAILED with status %d\n", bio->bi_status); 1271 } 1272 kfree(octx); 1273 bio_put(bio); 1274 1275 clear_bit(FLUSH_PENDING, &device->flags); 1276 put_ldev(device); 1277 kref_put(&device->kref, drbd_destroy_device); 1278 1279 if (atomic_dec_and_test(&ctx->pending)) 1280 complete(&ctx->done); 1281 } 1282 1283 static void submit_one_flush(struct drbd_device *device, struct issue_flush_context *ctx) 1284 { 1285 struct bio *bio = bio_alloc(device->ldev->backing_bdev, 0, 1286 REQ_OP_WRITE | REQ_PREFLUSH, GFP_NOIO); 1287 struct one_flush_context *octx = kmalloc(sizeof(*octx), GFP_NOIO); 1288 1289 if (!octx) { 1290 drbd_warn(device, "Could not allocate a octx, CANNOT ISSUE FLUSH\n"); 1291 /* FIXME: what else can I do now? disconnecting or detaching 1292 * really does not help to improve the state of the world, either. 1293 */ 1294 bio_put(bio); 1295 1296 ctx->error = -ENOMEM; 1297 put_ldev(device); 1298 kref_put(&device->kref, drbd_destroy_device); 1299 return; 1300 } 1301 1302 octx->device = device; 1303 octx->ctx = ctx; 1304 bio->bi_private = octx; 1305 bio->bi_end_io = one_flush_endio; 1306 1307 device->flush_jif = jiffies; 1308 set_bit(FLUSH_PENDING, &device->flags); 1309 atomic_inc(&ctx->pending); 1310 submit_bio(bio); 1311 } 1312 1313 static void drbd_flush(struct drbd_connection *connection) 1314 { 1315 if (connection->resource->write_ordering >= WO_BDEV_FLUSH) { 1316 struct drbd_peer_device *peer_device; 1317 struct issue_flush_context ctx; 1318 int vnr; 1319 1320 atomic_set(&ctx.pending, 1); 1321 ctx.error = 0; 1322 init_completion(&ctx.done); 1323 1324 rcu_read_lock(); 1325 idr_for_each_entry(&connection->peer_devices, peer_device, vnr) { 1326 struct drbd_device *device = peer_device->device; 1327 1328 if (!get_ldev(device)) 1329 continue; 1330 kref_get(&device->kref); 1331 rcu_read_unlock(); 1332 1333 submit_one_flush(device, &ctx); 1334 1335 rcu_read_lock(); 1336 } 1337 rcu_read_unlock(); 1338 1339 /* Do we want to add a timeout, 1340 * if disk-timeout is set? */ 1341 if (!atomic_dec_and_test(&ctx.pending)) 1342 wait_for_completion(&ctx.done); 1343 1344 if (ctx.error) { 1345 /* would rather check on EOPNOTSUPP, but that is not reliable. 1346 * don't try again for ANY return value != 0 1347 * if (rv == -EOPNOTSUPP) */ 1348 /* Any error is already reported by bio_endio callback. */ 1349 drbd_bump_write_ordering(connection->resource, NULL, WO_DRAIN_IO); 1350 } 1351 } 1352 } 1353 1354 /** 1355 * drbd_may_finish_epoch() - Applies an epoch_event to the epoch's state, eventually finishes it. 1356 * @connection: DRBD connection. 1357 * @epoch: Epoch object. 1358 * @ev: Epoch event. 1359 */ 1360 static enum finish_epoch drbd_may_finish_epoch(struct drbd_connection *connection, 1361 struct drbd_epoch *epoch, 1362 enum epoch_event ev) 1363 { 1364 int epoch_size; 1365 struct drbd_epoch *next_epoch; 1366 enum finish_epoch rv = FE_STILL_LIVE; 1367 1368 spin_lock(&connection->epoch_lock); 1369 do { 1370 next_epoch = NULL; 1371 1372 epoch_size = atomic_read(&epoch->epoch_size); 1373 1374 switch (ev & ~EV_CLEANUP) { 1375 case EV_PUT: 1376 atomic_dec(&epoch->active); 1377 break; 1378 case EV_GOT_BARRIER_NR: 1379 set_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags); 1380 break; 1381 case EV_BECAME_LAST: 1382 /* nothing to do*/ 1383 break; 1384 } 1385 1386 if (epoch_size != 0 && 1387 atomic_read(&epoch->active) == 0 && 1388 (test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags) || ev & EV_CLEANUP)) { 1389 if (!(ev & EV_CLEANUP)) { 1390 spin_unlock(&connection->epoch_lock); 1391 drbd_send_b_ack(epoch->connection, epoch->barrier_nr, epoch_size); 1392 spin_lock(&connection->epoch_lock); 1393 } 1394 #if 0 1395 /* FIXME: dec unacked on connection, once we have 1396 * something to count pending connection packets in. */ 1397 if (test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags)) 1398 dec_unacked(epoch->connection); 1399 #endif 1400 1401 if (connection->current_epoch != epoch) { 1402 next_epoch = list_entry(epoch->list.next, struct drbd_epoch, list); 1403 list_del(&epoch->list); 1404 ev = EV_BECAME_LAST | (ev & EV_CLEANUP); 1405 connection->epochs--; 1406 kfree(epoch); 1407 1408 if (rv == FE_STILL_LIVE) 1409 rv = FE_DESTROYED; 1410 } else { 1411 epoch->flags = 0; 1412 atomic_set(&epoch->epoch_size, 0); 1413 /* atomic_set(&epoch->active, 0); is already zero */ 1414 if (rv == FE_STILL_LIVE) 1415 rv = FE_RECYCLED; 1416 } 1417 } 1418 1419 if (!next_epoch) 1420 break; 1421 1422 epoch = next_epoch; 1423 } while (1); 1424 1425 spin_unlock(&connection->epoch_lock); 1426 1427 return rv; 1428 } 1429 1430 static enum write_ordering_e 1431 max_allowed_wo(struct drbd_backing_dev *bdev, enum write_ordering_e wo) 1432 { 1433 struct disk_conf *dc; 1434 1435 dc = rcu_dereference(bdev->disk_conf); 1436 1437 if (wo == WO_BDEV_FLUSH && !dc->disk_flushes) 1438 wo = WO_DRAIN_IO; 1439 if (wo == WO_DRAIN_IO && !dc->disk_drain) 1440 wo = WO_NONE; 1441 1442 return wo; 1443 } 1444 1445 /* 1446 * drbd_bump_write_ordering() - Fall back to an other write ordering method 1447 * @wo: Write ordering method to try. 1448 */ 1449 void drbd_bump_write_ordering(struct drbd_resource *resource, struct drbd_backing_dev *bdev, 1450 enum write_ordering_e wo) 1451 { 1452 struct drbd_device *device; 1453 enum write_ordering_e pwo; 1454 int vnr; 1455 static char *write_ordering_str[] = { 1456 [WO_NONE] = "none", 1457 [WO_DRAIN_IO] = "drain", 1458 [WO_BDEV_FLUSH] = "flush", 1459 }; 1460 1461 pwo = resource->write_ordering; 1462 if (wo != WO_BDEV_FLUSH) 1463 wo = min(pwo, wo); 1464 rcu_read_lock(); 1465 idr_for_each_entry(&resource->devices, device, vnr) { 1466 if (get_ldev(device)) { 1467 wo = max_allowed_wo(device->ldev, wo); 1468 if (device->ldev == bdev) 1469 bdev = NULL; 1470 put_ldev(device); 1471 } 1472 } 1473 1474 if (bdev) 1475 wo = max_allowed_wo(bdev, wo); 1476 1477 rcu_read_unlock(); 1478 1479 resource->write_ordering = wo; 1480 if (pwo != resource->write_ordering || wo == WO_BDEV_FLUSH) 1481 drbd_info(resource, "Method to ensure write ordering: %s\n", write_ordering_str[resource->write_ordering]); 1482 } 1483 1484 /* 1485 * Mapping "discard" to ZEROOUT with UNMAP does not work for us: 1486 * Drivers have to "announce" q->limits.max_write_zeroes_sectors, or it 1487 * will directly go to fallback mode, submitting normal writes, and 1488 * never even try to UNMAP. 1489 * 1490 * And dm-thin does not do this (yet), mostly because in general it has 1491 * to assume that "skip_block_zeroing" is set. See also: 1492 * https://www.mail-archive.com/dm-devel%40redhat.com/msg07965.html 1493 * https://www.redhat.com/archives/dm-devel/2018-January/msg00271.html 1494 * 1495 * We *may* ignore the discard-zeroes-data setting, if so configured. 1496 * 1497 * Assumption is that this "discard_zeroes_data=0" is only because the backend 1498 * may ignore partial unaligned discards. 1499 * 1500 * LVM/DM thin as of at least 1501 * LVM version: 2.02.115(2)-RHEL7 (2015-01-28) 1502 * Library version: 1.02.93-RHEL7 (2015-01-28) 1503 * Driver version: 4.29.0 1504 * still behaves this way. 1505 * 1506 * For unaligned (wrt. alignment and granularity) or too small discards, 1507 * we zero-out the initial (and/or) trailing unaligned partial chunks, 1508 * but discard all the aligned full chunks. 1509 * 1510 * At least for LVM/DM thin, with skip_block_zeroing=false, 1511 * the result is effectively "discard_zeroes_data=1". 1512 */ 1513 /* flags: EE_TRIM|EE_ZEROOUT */ 1514 int drbd_issue_discard_or_zero_out(struct drbd_device *device, sector_t start, unsigned int nr_sectors, int flags) 1515 { 1516 struct block_device *bdev = device->ldev->backing_bdev; 1517 sector_t tmp, nr; 1518 unsigned int max_discard_sectors, granularity; 1519 int alignment; 1520 int err = 0; 1521 1522 if ((flags & EE_ZEROOUT) || !(flags & EE_TRIM)) 1523 goto zero_out; 1524 1525 /* Zero-sector (unknown) and one-sector granularities are the same. */ 1526 granularity = max(bdev_discard_granularity(bdev) >> 9, 1U); 1527 alignment = (bdev_discard_alignment(bdev) >> 9) % granularity; 1528 1529 max_discard_sectors = min(bdev_max_discard_sectors(bdev), (1U << 22)); 1530 max_discard_sectors -= max_discard_sectors % granularity; 1531 if (unlikely(!max_discard_sectors)) 1532 goto zero_out; 1533 1534 if (nr_sectors < granularity) 1535 goto zero_out; 1536 1537 tmp = start; 1538 if (sector_div(tmp, granularity) != alignment) { 1539 if (nr_sectors < 2*granularity) 1540 goto zero_out; 1541 /* start + gran - (start + gran - align) % gran */ 1542 tmp = start + granularity - alignment; 1543 tmp = start + granularity - sector_div(tmp, granularity); 1544 1545 nr = tmp - start; 1546 /* don't flag BLKDEV_ZERO_NOUNMAP, we don't know how many 1547 * layers are below us, some may have smaller granularity */ 1548 err |= blkdev_issue_zeroout(bdev, start, nr, GFP_NOIO, 0); 1549 nr_sectors -= nr; 1550 start = tmp; 1551 } 1552 while (nr_sectors >= max_discard_sectors) { 1553 err |= blkdev_issue_discard(bdev, start, max_discard_sectors, 1554 GFP_NOIO); 1555 nr_sectors -= max_discard_sectors; 1556 start += max_discard_sectors; 1557 } 1558 if (nr_sectors) { 1559 /* max_discard_sectors is unsigned int (and a multiple of 1560 * granularity, we made sure of that above already); 1561 * nr is < max_discard_sectors; 1562 * I don't need sector_div here, even though nr is sector_t */ 1563 nr = nr_sectors; 1564 nr -= (unsigned int)nr % granularity; 1565 if (nr) { 1566 err |= blkdev_issue_discard(bdev, start, nr, GFP_NOIO); 1567 nr_sectors -= nr; 1568 start += nr; 1569 } 1570 } 1571 zero_out: 1572 if (nr_sectors) { 1573 err |= blkdev_issue_zeroout(bdev, start, nr_sectors, GFP_NOIO, 1574 (flags & EE_TRIM) ? 0 : BLKDEV_ZERO_NOUNMAP); 1575 } 1576 return err != 0; 1577 } 1578 1579 static bool can_do_reliable_discards(struct drbd_device *device) 1580 { 1581 struct disk_conf *dc; 1582 bool can_do; 1583 1584 if (!bdev_max_discard_sectors(device->ldev->backing_bdev)) 1585 return false; 1586 1587 rcu_read_lock(); 1588 dc = rcu_dereference(device->ldev->disk_conf); 1589 can_do = dc->discard_zeroes_if_aligned; 1590 rcu_read_unlock(); 1591 return can_do; 1592 } 1593 1594 static void drbd_issue_peer_discard_or_zero_out(struct drbd_device *device, struct drbd_peer_request *peer_req) 1595 { 1596 /* If the backend cannot discard, or does not guarantee 1597 * read-back zeroes in discarded ranges, we fall back to 1598 * zero-out. Unless configuration specifically requested 1599 * otherwise. */ 1600 if (!can_do_reliable_discards(device)) 1601 peer_req->flags |= EE_ZEROOUT; 1602 1603 if (drbd_issue_discard_or_zero_out(device, peer_req->i.sector, 1604 peer_req->i.size >> 9, peer_req->flags & (EE_ZEROOUT|EE_TRIM))) 1605 peer_req->flags |= EE_WAS_ERROR; 1606 drbd_endio_write_sec_final(peer_req); 1607 } 1608 1609 static int peer_request_fault_type(struct drbd_peer_request *peer_req) 1610 { 1611 if (peer_req_op(peer_req) == REQ_OP_READ) { 1612 return peer_req->flags & EE_APPLICATION ? 1613 DRBD_FAULT_DT_RD : DRBD_FAULT_RS_RD; 1614 } else { 1615 return peer_req->flags & EE_APPLICATION ? 1616 DRBD_FAULT_DT_WR : DRBD_FAULT_RS_WR; 1617 } 1618 } 1619 1620 /** 1621 * drbd_submit_peer_request() 1622 * @peer_req: peer request 1623 * 1624 * May spread the pages to multiple bios, 1625 * depending on bio_add_page restrictions. 1626 * 1627 * Returns 0 if all bios have been submitted, 1628 * -ENOMEM if we could not allocate enough bios, 1629 * -ENOSPC (any better suggestion?) if we have not been able to bio_add_page a 1630 * single page to an empty bio (which should never happen and likely indicates 1631 * that the lower level IO stack is in some way broken). This has been observed 1632 * on certain Xen deployments. 1633 */ 1634 /* TODO allocate from our own bio_set. */ 1635 int drbd_submit_peer_request(struct drbd_peer_request *peer_req) 1636 { 1637 struct drbd_device *device = peer_req->peer_device->device; 1638 struct bio *bios = NULL; 1639 struct bio *bio; 1640 struct page *page = peer_req->pages; 1641 sector_t sector = peer_req->i.sector; 1642 unsigned int data_size = peer_req->i.size; 1643 unsigned int n_bios = 0; 1644 unsigned int nr_pages = PFN_UP(data_size); 1645 1646 /* TRIM/DISCARD: for now, always use the helper function 1647 * blkdev_issue_zeroout(..., discard=true). 1648 * It's synchronous, but it does the right thing wrt. bio splitting. 1649 * Correctness first, performance later. Next step is to code an 1650 * asynchronous variant of the same. 1651 */ 1652 if (peer_req->flags & (EE_TRIM | EE_ZEROOUT)) { 1653 /* wait for all pending IO completions, before we start 1654 * zeroing things out. */ 1655 conn_wait_active_ee_empty(peer_req->peer_device->connection); 1656 /* add it to the active list now, 1657 * so we can find it to present it in debugfs */ 1658 peer_req->submit_jif = jiffies; 1659 peer_req->flags |= EE_SUBMITTED; 1660 1661 /* If this was a resync request from receive_rs_deallocated(), 1662 * it is already on the sync_ee list */ 1663 if (list_empty(&peer_req->w.list)) { 1664 spin_lock_irq(&device->resource->req_lock); 1665 list_add_tail(&peer_req->w.list, &device->active_ee); 1666 spin_unlock_irq(&device->resource->req_lock); 1667 } 1668 1669 drbd_issue_peer_discard_or_zero_out(device, peer_req); 1670 return 0; 1671 } 1672 1673 /* In most cases, we will only need one bio. But in case the lower 1674 * level restrictions happen to be different at this offset on this 1675 * side than those of the sending peer, we may need to submit the 1676 * request in more than one bio. 1677 * 1678 * Plain bio_alloc is good enough here, this is no DRBD internally 1679 * generated bio, but a bio allocated on behalf of the peer. 1680 */ 1681 next_bio: 1682 /* _DISCARD, _WRITE_ZEROES handled above. 1683 * REQ_OP_FLUSH (empty flush) not expected, 1684 * should have been mapped to a "drbd protocol barrier". 1685 * REQ_OP_SECURE_ERASE: I don't see how we could ever support that. 1686 */ 1687 if (!(peer_req_op(peer_req) == REQ_OP_WRITE || 1688 peer_req_op(peer_req) == REQ_OP_READ)) { 1689 drbd_err(device, "Invalid bio op received: 0x%x\n", peer_req->opf); 1690 return -EINVAL; 1691 } 1692 1693 bio = bio_alloc(device->ldev->backing_bdev, nr_pages, peer_req->opf, GFP_NOIO); 1694 /* > peer_req->i.sector, unless this is the first bio */ 1695 bio->bi_iter.bi_sector = sector; 1696 bio->bi_private = peer_req; 1697 bio->bi_end_io = drbd_peer_request_endio; 1698 1699 bio->bi_next = bios; 1700 bios = bio; 1701 ++n_bios; 1702 1703 page_chain_for_each(page) { 1704 unsigned len = min_t(unsigned, data_size, PAGE_SIZE); 1705 if (!bio_add_page(bio, page, len, 0)) 1706 goto next_bio; 1707 data_size -= len; 1708 sector += len >> 9; 1709 --nr_pages; 1710 } 1711 D_ASSERT(device, data_size == 0); 1712 D_ASSERT(device, page == NULL); 1713 1714 atomic_set(&peer_req->pending_bios, n_bios); 1715 /* for debugfs: update timestamp, mark as submitted */ 1716 peer_req->submit_jif = jiffies; 1717 peer_req->flags |= EE_SUBMITTED; 1718 do { 1719 bio = bios; 1720 bios = bios->bi_next; 1721 bio->bi_next = NULL; 1722 1723 drbd_submit_bio_noacct(device, peer_request_fault_type(peer_req), bio); 1724 } while (bios); 1725 return 0; 1726 } 1727 1728 static void drbd_remove_epoch_entry_interval(struct drbd_device *device, 1729 struct drbd_peer_request *peer_req) 1730 { 1731 struct drbd_interval *i = &peer_req->i; 1732 1733 drbd_remove_interval(&device->write_requests, i); 1734 drbd_clear_interval(i); 1735 1736 /* Wake up any processes waiting for this peer request to complete. */ 1737 if (i->waiting) 1738 wake_up(&device->misc_wait); 1739 } 1740 1741 static void conn_wait_active_ee_empty(struct drbd_connection *connection) 1742 { 1743 struct drbd_peer_device *peer_device; 1744 int vnr; 1745 1746 rcu_read_lock(); 1747 idr_for_each_entry(&connection->peer_devices, peer_device, vnr) { 1748 struct drbd_device *device = peer_device->device; 1749 1750 kref_get(&device->kref); 1751 rcu_read_unlock(); 1752 drbd_wait_ee_list_empty(device, &device->active_ee); 1753 kref_put(&device->kref, drbd_destroy_device); 1754 rcu_read_lock(); 1755 } 1756 rcu_read_unlock(); 1757 } 1758 1759 static int receive_Barrier(struct drbd_connection *connection, struct packet_info *pi) 1760 { 1761 int rv; 1762 struct p_barrier *p = pi->data; 1763 struct drbd_epoch *epoch; 1764 1765 /* FIXME these are unacked on connection, 1766 * not a specific (peer)device. 1767 */ 1768 connection->current_epoch->barrier_nr = p->barrier; 1769 connection->current_epoch->connection = connection; 1770 rv = drbd_may_finish_epoch(connection, connection->current_epoch, EV_GOT_BARRIER_NR); 1771 1772 /* P_BARRIER_ACK may imply that the corresponding extent is dropped from 1773 * the activity log, which means it would not be resynced in case the 1774 * R_PRIMARY crashes now. 1775 * Therefore we must send the barrier_ack after the barrier request was 1776 * completed. */ 1777 switch (connection->resource->write_ordering) { 1778 case WO_NONE: 1779 if (rv == FE_RECYCLED) 1780 return 0; 1781 1782 /* receiver context, in the writeout path of the other node. 1783 * avoid potential distributed deadlock */ 1784 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO); 1785 if (epoch) 1786 break; 1787 else 1788 drbd_warn(connection, "Allocation of an epoch failed, slowing down\n"); 1789 fallthrough; 1790 1791 case WO_BDEV_FLUSH: 1792 case WO_DRAIN_IO: 1793 conn_wait_active_ee_empty(connection); 1794 drbd_flush(connection); 1795 1796 if (atomic_read(&connection->current_epoch->epoch_size)) { 1797 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO); 1798 if (epoch) 1799 break; 1800 } 1801 1802 return 0; 1803 default: 1804 drbd_err(connection, "Strangeness in connection->write_ordering %d\n", 1805 connection->resource->write_ordering); 1806 return -EIO; 1807 } 1808 1809 epoch->flags = 0; 1810 atomic_set(&epoch->epoch_size, 0); 1811 atomic_set(&epoch->active, 0); 1812 1813 spin_lock(&connection->epoch_lock); 1814 if (atomic_read(&connection->current_epoch->epoch_size)) { 1815 list_add(&epoch->list, &connection->current_epoch->list); 1816 connection->current_epoch = epoch; 1817 connection->epochs++; 1818 } else { 1819 /* The current_epoch got recycled while we allocated this one... */ 1820 kfree(epoch); 1821 } 1822 spin_unlock(&connection->epoch_lock); 1823 1824 return 0; 1825 } 1826 1827 /* quick wrapper in case payload size != request_size (write same) */ 1828 static void drbd_csum_ee_size(struct crypto_shash *h, 1829 struct drbd_peer_request *r, void *d, 1830 unsigned int payload_size) 1831 { 1832 unsigned int tmp = r->i.size; 1833 r->i.size = payload_size; 1834 drbd_csum_ee(h, r, d); 1835 r->i.size = tmp; 1836 } 1837 1838 /* used from receive_RSDataReply (recv_resync_read) 1839 * and from receive_Data. 1840 * data_size: actual payload ("data in") 1841 * for normal writes that is bi_size. 1842 * for discards, that is zero. 1843 * for write same, it is logical_block_size. 1844 * both trim and write same have the bi_size ("data len to be affected") 1845 * as extra argument in the packet header. 1846 */ 1847 static struct drbd_peer_request * 1848 read_in_block(struct drbd_peer_device *peer_device, u64 id, sector_t sector, 1849 struct packet_info *pi) __must_hold(local) 1850 { 1851 struct drbd_device *device = peer_device->device; 1852 const sector_t capacity = get_capacity(device->vdisk); 1853 struct drbd_peer_request *peer_req; 1854 struct page *page; 1855 int digest_size, err; 1856 unsigned int data_size = pi->size, ds; 1857 void *dig_in = peer_device->connection->int_dig_in; 1858 void *dig_vv = peer_device->connection->int_dig_vv; 1859 unsigned long *data; 1860 struct p_trim *trim = (pi->cmd == P_TRIM) ? pi->data : NULL; 1861 struct p_trim *zeroes = (pi->cmd == P_ZEROES) ? pi->data : NULL; 1862 1863 digest_size = 0; 1864 if (!trim && peer_device->connection->peer_integrity_tfm) { 1865 digest_size = crypto_shash_digestsize(peer_device->connection->peer_integrity_tfm); 1866 /* 1867 * FIXME: Receive the incoming digest into the receive buffer 1868 * here, together with its struct p_data? 1869 */ 1870 err = drbd_recv_all_warn(peer_device->connection, dig_in, digest_size); 1871 if (err) 1872 return NULL; 1873 data_size -= digest_size; 1874 } 1875 1876 /* assume request_size == data_size, but special case trim. */ 1877 ds = data_size; 1878 if (trim) { 1879 if (!expect(peer_device, data_size == 0)) 1880 return NULL; 1881 ds = be32_to_cpu(trim->size); 1882 } else if (zeroes) { 1883 if (!expect(peer_device, data_size == 0)) 1884 return NULL; 1885 ds = be32_to_cpu(zeroes->size); 1886 } 1887 1888 if (!expect(peer_device, IS_ALIGNED(ds, 512))) 1889 return NULL; 1890 if (trim || zeroes) { 1891 if (!expect(peer_device, ds <= (DRBD_MAX_BBIO_SECTORS << 9))) 1892 return NULL; 1893 } else if (!expect(peer_device, ds <= DRBD_MAX_BIO_SIZE)) 1894 return NULL; 1895 1896 /* even though we trust out peer, 1897 * we sometimes have to double check. */ 1898 if (sector + (ds>>9) > capacity) { 1899 drbd_err(device, "request from peer beyond end of local disk: " 1900 "capacity: %llus < sector: %llus + size: %u\n", 1901 (unsigned long long)capacity, 1902 (unsigned long long)sector, ds); 1903 return NULL; 1904 } 1905 1906 /* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD 1907 * "criss-cross" setup, that might cause write-out on some other DRBD, 1908 * which in turn might block on the other node at this very place. */ 1909 peer_req = drbd_alloc_peer_req(peer_device, id, sector, ds, data_size, GFP_NOIO); 1910 if (!peer_req) 1911 return NULL; 1912 1913 peer_req->flags |= EE_WRITE; 1914 if (trim) { 1915 peer_req->flags |= EE_TRIM; 1916 return peer_req; 1917 } 1918 if (zeroes) { 1919 peer_req->flags |= EE_ZEROOUT; 1920 return peer_req; 1921 } 1922 1923 /* receive payload size bytes into page chain */ 1924 ds = data_size; 1925 page = peer_req->pages; 1926 page_chain_for_each(page) { 1927 unsigned len = min_t(int, ds, PAGE_SIZE); 1928 data = kmap(page); 1929 err = drbd_recv_all_warn(peer_device->connection, data, len); 1930 if (drbd_insert_fault(device, DRBD_FAULT_RECEIVE)) { 1931 drbd_err(device, "Fault injection: Corrupting data on receive\n"); 1932 data[0] = data[0] ^ (unsigned long)-1; 1933 } 1934 kunmap(page); 1935 if (err) { 1936 drbd_free_peer_req(device, peer_req); 1937 return NULL; 1938 } 1939 ds -= len; 1940 } 1941 1942 if (digest_size) { 1943 drbd_csum_ee_size(peer_device->connection->peer_integrity_tfm, peer_req, dig_vv, data_size); 1944 if (memcmp(dig_in, dig_vv, digest_size)) { 1945 drbd_err(device, "Digest integrity check FAILED: %llus +%u\n", 1946 (unsigned long long)sector, data_size); 1947 drbd_free_peer_req(device, peer_req); 1948 return NULL; 1949 } 1950 } 1951 device->recv_cnt += data_size >> 9; 1952 return peer_req; 1953 } 1954 1955 /* drbd_drain_block() just takes a data block 1956 * out of the socket input buffer, and discards it. 1957 */ 1958 static int drbd_drain_block(struct drbd_peer_device *peer_device, int data_size) 1959 { 1960 struct page *page; 1961 int err = 0; 1962 void *data; 1963 1964 if (!data_size) 1965 return 0; 1966 1967 page = drbd_alloc_pages(peer_device, 1, 1); 1968 1969 data = kmap(page); 1970 while (data_size) { 1971 unsigned int len = min_t(int, data_size, PAGE_SIZE); 1972 1973 err = drbd_recv_all_warn(peer_device->connection, data, len); 1974 if (err) 1975 break; 1976 data_size -= len; 1977 } 1978 kunmap(page); 1979 drbd_free_pages(peer_device->device, page, 0); 1980 return err; 1981 } 1982 1983 static int recv_dless_read(struct drbd_peer_device *peer_device, struct drbd_request *req, 1984 sector_t sector, int data_size) 1985 { 1986 struct bio_vec bvec; 1987 struct bvec_iter iter; 1988 struct bio *bio; 1989 int digest_size, err, expect; 1990 void *dig_in = peer_device->connection->int_dig_in; 1991 void *dig_vv = peer_device->connection->int_dig_vv; 1992 1993 digest_size = 0; 1994 if (peer_device->connection->peer_integrity_tfm) { 1995 digest_size = crypto_shash_digestsize(peer_device->connection->peer_integrity_tfm); 1996 err = drbd_recv_all_warn(peer_device->connection, dig_in, digest_size); 1997 if (err) 1998 return err; 1999 data_size -= digest_size; 2000 } 2001 2002 /* optimistically update recv_cnt. if receiving fails below, 2003 * we disconnect anyways, and counters will be reset. */ 2004 peer_device->device->recv_cnt += data_size>>9; 2005 2006 bio = req->master_bio; 2007 D_ASSERT(peer_device->device, sector == bio->bi_iter.bi_sector); 2008 2009 bio_for_each_segment(bvec, bio, iter) { 2010 void *mapped = bvec_kmap_local(&bvec); 2011 expect = min_t(int, data_size, bvec.bv_len); 2012 err = drbd_recv_all_warn(peer_device->connection, mapped, expect); 2013 kunmap_local(mapped); 2014 if (err) 2015 return err; 2016 data_size -= expect; 2017 } 2018 2019 if (digest_size) { 2020 drbd_csum_bio(peer_device->connection->peer_integrity_tfm, bio, dig_vv); 2021 if (memcmp(dig_in, dig_vv, digest_size)) { 2022 drbd_err(peer_device, "Digest integrity check FAILED. Broken NICs?\n"); 2023 return -EINVAL; 2024 } 2025 } 2026 2027 D_ASSERT(peer_device->device, data_size == 0); 2028 return 0; 2029 } 2030 2031 /* 2032 * e_end_resync_block() is called in ack_sender context via 2033 * drbd_finish_peer_reqs(). 2034 */ 2035 static int e_end_resync_block(struct drbd_work *w, int unused) 2036 { 2037 struct drbd_peer_request *peer_req = 2038 container_of(w, struct drbd_peer_request, w); 2039 struct drbd_peer_device *peer_device = peer_req->peer_device; 2040 struct drbd_device *device = peer_device->device; 2041 sector_t sector = peer_req->i.sector; 2042 int err; 2043 2044 D_ASSERT(device, drbd_interval_empty(&peer_req->i)); 2045 2046 if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) { 2047 drbd_set_in_sync(peer_device, sector, peer_req->i.size); 2048 err = drbd_send_ack(peer_device, P_RS_WRITE_ACK, peer_req); 2049 } else { 2050 /* Record failure to sync */ 2051 drbd_rs_failed_io(peer_device, sector, peer_req->i.size); 2052 2053 err = drbd_send_ack(peer_device, P_NEG_ACK, peer_req); 2054 } 2055 dec_unacked(device); 2056 2057 return err; 2058 } 2059 2060 static int recv_resync_read(struct drbd_peer_device *peer_device, sector_t sector, 2061 struct packet_info *pi) __releases(local) 2062 { 2063 struct drbd_device *device = peer_device->device; 2064 struct drbd_peer_request *peer_req; 2065 2066 peer_req = read_in_block(peer_device, ID_SYNCER, sector, pi); 2067 if (!peer_req) 2068 goto fail; 2069 2070 dec_rs_pending(peer_device); 2071 2072 inc_unacked(device); 2073 /* corresponding dec_unacked() in e_end_resync_block() 2074 * respective _drbd_clear_done_ee */ 2075 2076 peer_req->w.cb = e_end_resync_block; 2077 peer_req->opf = REQ_OP_WRITE; 2078 peer_req->submit_jif = jiffies; 2079 2080 spin_lock_irq(&device->resource->req_lock); 2081 list_add_tail(&peer_req->w.list, &device->sync_ee); 2082 spin_unlock_irq(&device->resource->req_lock); 2083 2084 atomic_add(pi->size >> 9, &device->rs_sect_ev); 2085 if (drbd_submit_peer_request(peer_req) == 0) 2086 return 0; 2087 2088 /* don't care for the reason here */ 2089 drbd_err(device, "submit failed, triggering re-connect\n"); 2090 spin_lock_irq(&device->resource->req_lock); 2091 list_del(&peer_req->w.list); 2092 spin_unlock_irq(&device->resource->req_lock); 2093 2094 drbd_free_peer_req(device, peer_req); 2095 fail: 2096 put_ldev(device); 2097 return -EIO; 2098 } 2099 2100 static struct drbd_request * 2101 find_request(struct drbd_device *device, struct rb_root *root, u64 id, 2102 sector_t sector, bool missing_ok, const char *func) 2103 { 2104 struct drbd_request *req; 2105 2106 /* Request object according to our peer */ 2107 req = (struct drbd_request *)(unsigned long)id; 2108 if (drbd_contains_interval(root, sector, &req->i) && req->i.local) 2109 return req; 2110 if (!missing_ok) { 2111 drbd_err(device, "%s: failed to find request 0x%lx, sector %llus\n", func, 2112 (unsigned long)id, (unsigned long long)sector); 2113 } 2114 return NULL; 2115 } 2116 2117 static int receive_DataReply(struct drbd_connection *connection, struct packet_info *pi) 2118 { 2119 struct drbd_peer_device *peer_device; 2120 struct drbd_device *device; 2121 struct drbd_request *req; 2122 sector_t sector; 2123 int err; 2124 struct p_data *p = pi->data; 2125 2126 peer_device = conn_peer_device(connection, pi->vnr); 2127 if (!peer_device) 2128 return -EIO; 2129 device = peer_device->device; 2130 2131 sector = be64_to_cpu(p->sector); 2132 2133 spin_lock_irq(&device->resource->req_lock); 2134 req = find_request(device, &device->read_requests, p->block_id, sector, false, __func__); 2135 spin_unlock_irq(&device->resource->req_lock); 2136 if (unlikely(!req)) 2137 return -EIO; 2138 2139 err = recv_dless_read(peer_device, req, sector, pi->size); 2140 if (!err) 2141 req_mod(req, DATA_RECEIVED, peer_device); 2142 /* else: nothing. handled from drbd_disconnect... 2143 * I don't think we may complete this just yet 2144 * in case we are "on-disconnect: freeze" */ 2145 2146 return err; 2147 } 2148 2149 static int receive_RSDataReply(struct drbd_connection *connection, struct packet_info *pi) 2150 { 2151 struct drbd_peer_device *peer_device; 2152 struct drbd_device *device; 2153 sector_t sector; 2154 int err; 2155 struct p_data *p = pi->data; 2156 2157 peer_device = conn_peer_device(connection, pi->vnr); 2158 if (!peer_device) 2159 return -EIO; 2160 device = peer_device->device; 2161 2162 sector = be64_to_cpu(p->sector); 2163 D_ASSERT(device, p->block_id == ID_SYNCER); 2164 2165 if (get_ldev(device)) { 2166 /* data is submitted to disk within recv_resync_read. 2167 * corresponding put_ldev done below on error, 2168 * or in drbd_peer_request_endio. */ 2169 err = recv_resync_read(peer_device, sector, pi); 2170 } else { 2171 if (drbd_ratelimit()) 2172 drbd_err(device, "Can not write resync data to local disk.\n"); 2173 2174 err = drbd_drain_block(peer_device, pi->size); 2175 2176 drbd_send_ack_dp(peer_device, P_NEG_ACK, p, pi->size); 2177 } 2178 2179 atomic_add(pi->size >> 9, &device->rs_sect_in); 2180 2181 return err; 2182 } 2183 2184 static void restart_conflicting_writes(struct drbd_device *device, 2185 sector_t sector, int size) 2186 { 2187 struct drbd_interval *i; 2188 struct drbd_request *req; 2189 2190 drbd_for_each_overlap(i, &device->write_requests, sector, size) { 2191 if (!i->local) 2192 continue; 2193 req = container_of(i, struct drbd_request, i); 2194 if (req->rq_state & RQ_LOCAL_PENDING || 2195 !(req->rq_state & RQ_POSTPONED)) 2196 continue; 2197 /* as it is RQ_POSTPONED, this will cause it to 2198 * be queued on the retry workqueue. */ 2199 __req_mod(req, CONFLICT_RESOLVED, NULL, NULL); 2200 } 2201 } 2202 2203 /* 2204 * e_end_block() is called in ack_sender context via drbd_finish_peer_reqs(). 2205 */ 2206 static int e_end_block(struct drbd_work *w, int cancel) 2207 { 2208 struct drbd_peer_request *peer_req = 2209 container_of(w, struct drbd_peer_request, w); 2210 struct drbd_peer_device *peer_device = peer_req->peer_device; 2211 struct drbd_device *device = peer_device->device; 2212 sector_t sector = peer_req->i.sector; 2213 int err = 0, pcmd; 2214 2215 if (peer_req->flags & EE_SEND_WRITE_ACK) { 2216 if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) { 2217 pcmd = (device->state.conn >= C_SYNC_SOURCE && 2218 device->state.conn <= C_PAUSED_SYNC_T && 2219 peer_req->flags & EE_MAY_SET_IN_SYNC) ? 2220 P_RS_WRITE_ACK : P_WRITE_ACK; 2221 err = drbd_send_ack(peer_device, pcmd, peer_req); 2222 if (pcmd == P_RS_WRITE_ACK) 2223 drbd_set_in_sync(peer_device, sector, peer_req->i.size); 2224 } else { 2225 err = drbd_send_ack(peer_device, P_NEG_ACK, peer_req); 2226 /* we expect it to be marked out of sync anyways... 2227 * maybe assert this? */ 2228 } 2229 dec_unacked(device); 2230 } 2231 2232 /* we delete from the conflict detection hash _after_ we sent out the 2233 * P_WRITE_ACK / P_NEG_ACK, to get the sequence number right. */ 2234 if (peer_req->flags & EE_IN_INTERVAL_TREE) { 2235 spin_lock_irq(&device->resource->req_lock); 2236 D_ASSERT(device, !drbd_interval_empty(&peer_req->i)); 2237 drbd_remove_epoch_entry_interval(device, peer_req); 2238 if (peer_req->flags & EE_RESTART_REQUESTS) 2239 restart_conflicting_writes(device, sector, peer_req->i.size); 2240 spin_unlock_irq(&device->resource->req_lock); 2241 } else 2242 D_ASSERT(device, drbd_interval_empty(&peer_req->i)); 2243 2244 drbd_may_finish_epoch(peer_device->connection, peer_req->epoch, EV_PUT + (cancel ? EV_CLEANUP : 0)); 2245 2246 return err; 2247 } 2248 2249 static int e_send_ack(struct drbd_work *w, enum drbd_packet ack) 2250 { 2251 struct drbd_peer_request *peer_req = 2252 container_of(w, struct drbd_peer_request, w); 2253 struct drbd_peer_device *peer_device = peer_req->peer_device; 2254 int err; 2255 2256 err = drbd_send_ack(peer_device, ack, peer_req); 2257 dec_unacked(peer_device->device); 2258 2259 return err; 2260 } 2261 2262 static int e_send_superseded(struct drbd_work *w, int unused) 2263 { 2264 return e_send_ack(w, P_SUPERSEDED); 2265 } 2266 2267 static int e_send_retry_write(struct drbd_work *w, int unused) 2268 { 2269 struct drbd_peer_request *peer_req = 2270 container_of(w, struct drbd_peer_request, w); 2271 struct drbd_connection *connection = peer_req->peer_device->connection; 2272 2273 return e_send_ack(w, connection->agreed_pro_version >= 100 ? 2274 P_RETRY_WRITE : P_SUPERSEDED); 2275 } 2276 2277 static bool seq_greater(u32 a, u32 b) 2278 { 2279 /* 2280 * We assume 32-bit wrap-around here. 2281 * For 24-bit wrap-around, we would have to shift: 2282 * a <<= 8; b <<= 8; 2283 */ 2284 return (s32)a - (s32)b > 0; 2285 } 2286 2287 static u32 seq_max(u32 a, u32 b) 2288 { 2289 return seq_greater(a, b) ? a : b; 2290 } 2291 2292 static void update_peer_seq(struct drbd_peer_device *peer_device, unsigned int peer_seq) 2293 { 2294 struct drbd_device *device = peer_device->device; 2295 unsigned int newest_peer_seq; 2296 2297 if (test_bit(RESOLVE_CONFLICTS, &peer_device->connection->flags)) { 2298 spin_lock(&device->peer_seq_lock); 2299 newest_peer_seq = seq_max(device->peer_seq, peer_seq); 2300 device->peer_seq = newest_peer_seq; 2301 spin_unlock(&device->peer_seq_lock); 2302 /* wake up only if we actually changed device->peer_seq */ 2303 if (peer_seq == newest_peer_seq) 2304 wake_up(&device->seq_wait); 2305 } 2306 } 2307 2308 static inline int overlaps(sector_t s1, int l1, sector_t s2, int l2) 2309 { 2310 return !((s1 + (l1>>9) <= s2) || (s1 >= s2 + (l2>>9))); 2311 } 2312 2313 /* maybe change sync_ee into interval trees as well? */ 2314 static bool overlapping_resync_write(struct drbd_device *device, struct drbd_peer_request *peer_req) 2315 { 2316 struct drbd_peer_request *rs_req; 2317 bool rv = false; 2318 2319 spin_lock_irq(&device->resource->req_lock); 2320 list_for_each_entry(rs_req, &device->sync_ee, w.list) { 2321 if (overlaps(peer_req->i.sector, peer_req->i.size, 2322 rs_req->i.sector, rs_req->i.size)) { 2323 rv = true; 2324 break; 2325 } 2326 } 2327 spin_unlock_irq(&device->resource->req_lock); 2328 2329 return rv; 2330 } 2331 2332 /* Called from receive_Data. 2333 * Synchronize packets on sock with packets on msock. 2334 * 2335 * This is here so even when a P_DATA packet traveling via sock overtook an Ack 2336 * packet traveling on msock, they are still processed in the order they have 2337 * been sent. 2338 * 2339 * Note: we don't care for Ack packets overtaking P_DATA packets. 2340 * 2341 * In case packet_seq is larger than device->peer_seq number, there are 2342 * outstanding packets on the msock. We wait for them to arrive. 2343 * In case we are the logically next packet, we update device->peer_seq 2344 * ourselves. Correctly handles 32bit wrap around. 2345 * 2346 * Assume we have a 10 GBit connection, that is about 1<<30 byte per second, 2347 * about 1<<21 sectors per second. So "worst" case, we have 1<<3 == 8 seconds 2348 * for the 24bit wrap (historical atomic_t guarantee on some archs), and we have 2349 * 1<<9 == 512 seconds aka ages for the 32bit wrap around... 2350 * 2351 * returns 0 if we may process the packet, 2352 * -ERESTARTSYS if we were interrupted (by disconnect signal). */ 2353 static int wait_for_and_update_peer_seq(struct drbd_peer_device *peer_device, const u32 peer_seq) 2354 { 2355 struct drbd_device *device = peer_device->device; 2356 DEFINE_WAIT(wait); 2357 long timeout; 2358 int ret = 0, tp; 2359 2360 if (!test_bit(RESOLVE_CONFLICTS, &peer_device->connection->flags)) 2361 return 0; 2362 2363 spin_lock(&device->peer_seq_lock); 2364 for (;;) { 2365 if (!seq_greater(peer_seq - 1, device->peer_seq)) { 2366 device->peer_seq = seq_max(device->peer_seq, peer_seq); 2367 break; 2368 } 2369 2370 if (signal_pending(current)) { 2371 ret = -ERESTARTSYS; 2372 break; 2373 } 2374 2375 rcu_read_lock(); 2376 tp = rcu_dereference(peer_device->connection->net_conf)->two_primaries; 2377 rcu_read_unlock(); 2378 2379 if (!tp) 2380 break; 2381 2382 /* Only need to wait if two_primaries is enabled */ 2383 prepare_to_wait(&device->seq_wait, &wait, TASK_INTERRUPTIBLE); 2384 spin_unlock(&device->peer_seq_lock); 2385 rcu_read_lock(); 2386 timeout = rcu_dereference(peer_device->connection->net_conf)->ping_timeo*HZ/10; 2387 rcu_read_unlock(); 2388 timeout = schedule_timeout(timeout); 2389 spin_lock(&device->peer_seq_lock); 2390 if (!timeout) { 2391 ret = -ETIMEDOUT; 2392 drbd_err(device, "Timed out waiting for missing ack packets; disconnecting\n"); 2393 break; 2394 } 2395 } 2396 spin_unlock(&device->peer_seq_lock); 2397 finish_wait(&device->seq_wait, &wait); 2398 return ret; 2399 } 2400 2401 static enum req_op wire_flags_to_bio_op(u32 dpf) 2402 { 2403 if (dpf & DP_ZEROES) 2404 return REQ_OP_WRITE_ZEROES; 2405 if (dpf & DP_DISCARD) 2406 return REQ_OP_DISCARD; 2407 else 2408 return REQ_OP_WRITE; 2409 } 2410 2411 /* see also bio_flags_to_wire() */ 2412 static blk_opf_t wire_flags_to_bio(struct drbd_connection *connection, u32 dpf) 2413 { 2414 return wire_flags_to_bio_op(dpf) | 2415 (dpf & DP_RW_SYNC ? REQ_SYNC : 0) | 2416 (dpf & DP_FUA ? REQ_FUA : 0) | 2417 (dpf & DP_FLUSH ? REQ_PREFLUSH : 0); 2418 } 2419 2420 static void fail_postponed_requests(struct drbd_device *device, sector_t sector, 2421 unsigned int size) 2422 { 2423 struct drbd_peer_device *peer_device = first_peer_device(device); 2424 struct drbd_interval *i; 2425 2426 repeat: 2427 drbd_for_each_overlap(i, &device->write_requests, sector, size) { 2428 struct drbd_request *req; 2429 struct bio_and_error m; 2430 2431 if (!i->local) 2432 continue; 2433 req = container_of(i, struct drbd_request, i); 2434 if (!(req->rq_state & RQ_POSTPONED)) 2435 continue; 2436 req->rq_state &= ~RQ_POSTPONED; 2437 __req_mod(req, NEG_ACKED, peer_device, &m); 2438 spin_unlock_irq(&device->resource->req_lock); 2439 if (m.bio) 2440 complete_master_bio(device, &m); 2441 spin_lock_irq(&device->resource->req_lock); 2442 goto repeat; 2443 } 2444 } 2445 2446 static int handle_write_conflicts(struct drbd_device *device, 2447 struct drbd_peer_request *peer_req) 2448 { 2449 struct drbd_connection *connection = peer_req->peer_device->connection; 2450 bool resolve_conflicts = test_bit(RESOLVE_CONFLICTS, &connection->flags); 2451 sector_t sector = peer_req->i.sector; 2452 const unsigned int size = peer_req->i.size; 2453 struct drbd_interval *i; 2454 bool equal; 2455 int err; 2456 2457 /* 2458 * Inserting the peer request into the write_requests tree will prevent 2459 * new conflicting local requests from being added. 2460 */ 2461 drbd_insert_interval(&device->write_requests, &peer_req->i); 2462 2463 repeat: 2464 drbd_for_each_overlap(i, &device->write_requests, sector, size) { 2465 if (i == &peer_req->i) 2466 continue; 2467 if (i->completed) 2468 continue; 2469 2470 if (!i->local) { 2471 /* 2472 * Our peer has sent a conflicting remote request; this 2473 * should not happen in a two-node setup. Wait for the 2474 * earlier peer request to complete. 2475 */ 2476 err = drbd_wait_misc(device, i); 2477 if (err) 2478 goto out; 2479 goto repeat; 2480 } 2481 2482 equal = i->sector == sector && i->size == size; 2483 if (resolve_conflicts) { 2484 /* 2485 * If the peer request is fully contained within the 2486 * overlapping request, it can be considered overwritten 2487 * and thus superseded; otherwise, it will be retried 2488 * once all overlapping requests have completed. 2489 */ 2490 bool superseded = i->sector <= sector && i->sector + 2491 (i->size >> 9) >= sector + (size >> 9); 2492 2493 if (!equal) 2494 drbd_alert(device, "Concurrent writes detected: " 2495 "local=%llus +%u, remote=%llus +%u, " 2496 "assuming %s came first\n", 2497 (unsigned long long)i->sector, i->size, 2498 (unsigned long long)sector, size, 2499 superseded ? "local" : "remote"); 2500 2501 peer_req->w.cb = superseded ? e_send_superseded : 2502 e_send_retry_write; 2503 list_add_tail(&peer_req->w.list, &device->done_ee); 2504 queue_work(connection->ack_sender, &peer_req->peer_device->send_acks_work); 2505 2506 err = -ENOENT; 2507 goto out; 2508 } else { 2509 struct drbd_request *req = 2510 container_of(i, struct drbd_request, i); 2511 2512 if (!equal) 2513 drbd_alert(device, "Concurrent writes detected: " 2514 "local=%llus +%u, remote=%llus +%u\n", 2515 (unsigned long long)i->sector, i->size, 2516 (unsigned long long)sector, size); 2517 2518 if (req->rq_state & RQ_LOCAL_PENDING || 2519 !(req->rq_state & RQ_POSTPONED)) { 2520 /* 2521 * Wait for the node with the discard flag to 2522 * decide if this request has been superseded 2523 * or needs to be retried. 2524 * Requests that have been superseded will 2525 * disappear from the write_requests tree. 2526 * 2527 * In addition, wait for the conflicting 2528 * request to finish locally before submitting 2529 * the conflicting peer request. 2530 */ 2531 err = drbd_wait_misc(device, &req->i); 2532 if (err) { 2533 _conn_request_state(connection, NS(conn, C_TIMEOUT), CS_HARD); 2534 fail_postponed_requests(device, sector, size); 2535 goto out; 2536 } 2537 goto repeat; 2538 } 2539 /* 2540 * Remember to restart the conflicting requests after 2541 * the new peer request has completed. 2542 */ 2543 peer_req->flags |= EE_RESTART_REQUESTS; 2544 } 2545 } 2546 err = 0; 2547 2548 out: 2549 if (err) 2550 drbd_remove_epoch_entry_interval(device, peer_req); 2551 return err; 2552 } 2553 2554 /* mirrored write */ 2555 static int receive_Data(struct drbd_connection *connection, struct packet_info *pi) 2556 { 2557 struct drbd_peer_device *peer_device; 2558 struct drbd_device *device; 2559 struct net_conf *nc; 2560 sector_t sector; 2561 struct drbd_peer_request *peer_req; 2562 struct p_data *p = pi->data; 2563 u32 peer_seq = be32_to_cpu(p->seq_num); 2564 u32 dp_flags; 2565 int err, tp; 2566 2567 peer_device = conn_peer_device(connection, pi->vnr); 2568 if (!peer_device) 2569 return -EIO; 2570 device = peer_device->device; 2571 2572 if (!get_ldev(device)) { 2573 int err2; 2574 2575 err = wait_for_and_update_peer_seq(peer_device, peer_seq); 2576 drbd_send_ack_dp(peer_device, P_NEG_ACK, p, pi->size); 2577 atomic_inc(&connection->current_epoch->epoch_size); 2578 err2 = drbd_drain_block(peer_device, pi->size); 2579 if (!err) 2580 err = err2; 2581 return err; 2582 } 2583 2584 /* 2585 * Corresponding put_ldev done either below (on various errors), or in 2586 * drbd_peer_request_endio, if we successfully submit the data at the 2587 * end of this function. 2588 */ 2589 2590 sector = be64_to_cpu(p->sector); 2591 peer_req = read_in_block(peer_device, p->block_id, sector, pi); 2592 if (!peer_req) { 2593 put_ldev(device); 2594 return -EIO; 2595 } 2596 2597 peer_req->w.cb = e_end_block; 2598 peer_req->submit_jif = jiffies; 2599 peer_req->flags |= EE_APPLICATION; 2600 2601 dp_flags = be32_to_cpu(p->dp_flags); 2602 peer_req->opf = wire_flags_to_bio(connection, dp_flags); 2603 if (pi->cmd == P_TRIM) { 2604 D_ASSERT(peer_device, peer_req->i.size > 0); 2605 D_ASSERT(peer_device, peer_req_op(peer_req) == REQ_OP_DISCARD); 2606 D_ASSERT(peer_device, peer_req->pages == NULL); 2607 /* need to play safe: an older DRBD sender 2608 * may mean zero-out while sending P_TRIM. */ 2609 if (0 == (connection->agreed_features & DRBD_FF_WZEROES)) 2610 peer_req->flags |= EE_ZEROOUT; 2611 } else if (pi->cmd == P_ZEROES) { 2612 D_ASSERT(peer_device, peer_req->i.size > 0); 2613 D_ASSERT(peer_device, peer_req_op(peer_req) == REQ_OP_WRITE_ZEROES); 2614 D_ASSERT(peer_device, peer_req->pages == NULL); 2615 /* Do (not) pass down BLKDEV_ZERO_NOUNMAP? */ 2616 if (dp_flags & DP_DISCARD) 2617 peer_req->flags |= EE_TRIM; 2618 } else if (peer_req->pages == NULL) { 2619 D_ASSERT(device, peer_req->i.size == 0); 2620 D_ASSERT(device, dp_flags & DP_FLUSH); 2621 } 2622 2623 if (dp_flags & DP_MAY_SET_IN_SYNC) 2624 peer_req->flags |= EE_MAY_SET_IN_SYNC; 2625 2626 spin_lock(&connection->epoch_lock); 2627 peer_req->epoch = connection->current_epoch; 2628 atomic_inc(&peer_req->epoch->epoch_size); 2629 atomic_inc(&peer_req->epoch->active); 2630 spin_unlock(&connection->epoch_lock); 2631 2632 rcu_read_lock(); 2633 nc = rcu_dereference(peer_device->connection->net_conf); 2634 tp = nc->two_primaries; 2635 if (peer_device->connection->agreed_pro_version < 100) { 2636 switch (nc->wire_protocol) { 2637 case DRBD_PROT_C: 2638 dp_flags |= DP_SEND_WRITE_ACK; 2639 break; 2640 case DRBD_PROT_B: 2641 dp_flags |= DP_SEND_RECEIVE_ACK; 2642 break; 2643 } 2644 } 2645 rcu_read_unlock(); 2646 2647 if (dp_flags & DP_SEND_WRITE_ACK) { 2648 peer_req->flags |= EE_SEND_WRITE_ACK; 2649 inc_unacked(device); 2650 /* corresponding dec_unacked() in e_end_block() 2651 * respective _drbd_clear_done_ee */ 2652 } 2653 2654 if (dp_flags & DP_SEND_RECEIVE_ACK) { 2655 /* I really don't like it that the receiver thread 2656 * sends on the msock, but anyways */ 2657 drbd_send_ack(peer_device, P_RECV_ACK, peer_req); 2658 } 2659 2660 if (tp) { 2661 /* two primaries implies protocol C */ 2662 D_ASSERT(device, dp_flags & DP_SEND_WRITE_ACK); 2663 peer_req->flags |= EE_IN_INTERVAL_TREE; 2664 err = wait_for_and_update_peer_seq(peer_device, peer_seq); 2665 if (err) 2666 goto out_interrupted; 2667 spin_lock_irq(&device->resource->req_lock); 2668 err = handle_write_conflicts(device, peer_req); 2669 if (err) { 2670 spin_unlock_irq(&device->resource->req_lock); 2671 if (err == -ENOENT) { 2672 put_ldev(device); 2673 return 0; 2674 } 2675 goto out_interrupted; 2676 } 2677 } else { 2678 update_peer_seq(peer_device, peer_seq); 2679 spin_lock_irq(&device->resource->req_lock); 2680 } 2681 /* TRIM and is processed synchronously, 2682 * we wait for all pending requests, respectively wait for 2683 * active_ee to become empty in drbd_submit_peer_request(); 2684 * better not add ourselves here. */ 2685 if ((peer_req->flags & (EE_TRIM | EE_ZEROOUT)) == 0) 2686 list_add_tail(&peer_req->w.list, &device->active_ee); 2687 spin_unlock_irq(&device->resource->req_lock); 2688 2689 if (device->state.conn == C_SYNC_TARGET) 2690 wait_event(device->ee_wait, !overlapping_resync_write(device, peer_req)); 2691 2692 if (device->state.pdsk < D_INCONSISTENT) { 2693 /* In case we have the only disk of the cluster, */ 2694 drbd_set_out_of_sync(peer_device, peer_req->i.sector, peer_req->i.size); 2695 peer_req->flags &= ~EE_MAY_SET_IN_SYNC; 2696 drbd_al_begin_io(device, &peer_req->i); 2697 peer_req->flags |= EE_CALL_AL_COMPLETE_IO; 2698 } 2699 2700 err = drbd_submit_peer_request(peer_req); 2701 if (!err) 2702 return 0; 2703 2704 /* don't care for the reason here */ 2705 drbd_err(device, "submit failed, triggering re-connect\n"); 2706 spin_lock_irq(&device->resource->req_lock); 2707 list_del(&peer_req->w.list); 2708 drbd_remove_epoch_entry_interval(device, peer_req); 2709 spin_unlock_irq(&device->resource->req_lock); 2710 if (peer_req->flags & EE_CALL_AL_COMPLETE_IO) { 2711 peer_req->flags &= ~EE_CALL_AL_COMPLETE_IO; 2712 drbd_al_complete_io(device, &peer_req->i); 2713 } 2714 2715 out_interrupted: 2716 drbd_may_finish_epoch(connection, peer_req->epoch, EV_PUT | EV_CLEANUP); 2717 put_ldev(device); 2718 drbd_free_peer_req(device, peer_req); 2719 return err; 2720 } 2721 2722 /* We may throttle resync, if the lower device seems to be busy, 2723 * and current sync rate is above c_min_rate. 2724 * 2725 * To decide whether or not the lower device is busy, we use a scheme similar 2726 * to MD RAID is_mddev_idle(): if the partition stats reveal "significant" 2727 * (more than 64 sectors) of activity we cannot account for with our own resync 2728 * activity, it obviously is "busy". 2729 * 2730 * The current sync rate used here uses only the most recent two step marks, 2731 * to have a short time average so we can react faster. 2732 */ 2733 bool drbd_rs_should_slow_down(struct drbd_peer_device *peer_device, sector_t sector, 2734 bool throttle_if_app_is_waiting) 2735 { 2736 struct drbd_device *device = peer_device->device; 2737 struct lc_element *tmp; 2738 bool throttle = drbd_rs_c_min_rate_throttle(device); 2739 2740 if (!throttle || throttle_if_app_is_waiting) 2741 return throttle; 2742 2743 spin_lock_irq(&device->al_lock); 2744 tmp = lc_find(device->resync, BM_SECT_TO_EXT(sector)); 2745 if (tmp) { 2746 struct bm_extent *bm_ext = lc_entry(tmp, struct bm_extent, lce); 2747 if (test_bit(BME_PRIORITY, &bm_ext->flags)) 2748 throttle = false; 2749 /* Do not slow down if app IO is already waiting for this extent, 2750 * and our progress is necessary for application IO to complete. */ 2751 } 2752 spin_unlock_irq(&device->al_lock); 2753 2754 return throttle; 2755 } 2756 2757 bool drbd_rs_c_min_rate_throttle(struct drbd_device *device) 2758 { 2759 struct gendisk *disk = device->ldev->backing_bdev->bd_disk; 2760 unsigned long db, dt, dbdt; 2761 unsigned int c_min_rate; 2762 int curr_events; 2763 2764 rcu_read_lock(); 2765 c_min_rate = rcu_dereference(device->ldev->disk_conf)->c_min_rate; 2766 rcu_read_unlock(); 2767 2768 /* feature disabled? */ 2769 if (c_min_rate == 0) 2770 return false; 2771 2772 curr_events = (int)part_stat_read_accum(disk->part0, sectors) - 2773 atomic_read(&device->rs_sect_ev); 2774 2775 if (atomic_read(&device->ap_actlog_cnt) 2776 || curr_events - device->rs_last_events > 64) { 2777 unsigned long rs_left; 2778 int i; 2779 2780 device->rs_last_events = curr_events; 2781 2782 /* sync speed average over the last 2*DRBD_SYNC_MARK_STEP, 2783 * approx. */ 2784 i = (device->rs_last_mark + DRBD_SYNC_MARKS-1) % DRBD_SYNC_MARKS; 2785 2786 if (device->state.conn == C_VERIFY_S || device->state.conn == C_VERIFY_T) 2787 rs_left = device->ov_left; 2788 else 2789 rs_left = drbd_bm_total_weight(device) - device->rs_failed; 2790 2791 dt = ((long)jiffies - (long)device->rs_mark_time[i]) / HZ; 2792 if (!dt) 2793 dt++; 2794 db = device->rs_mark_left[i] - rs_left; 2795 dbdt = Bit2KB(db/dt); 2796 2797 if (dbdt > c_min_rate) 2798 return true; 2799 } 2800 return false; 2801 } 2802 2803 static int receive_DataRequest(struct drbd_connection *connection, struct packet_info *pi) 2804 { 2805 struct drbd_peer_device *peer_device; 2806 struct drbd_device *device; 2807 sector_t sector; 2808 sector_t capacity; 2809 struct drbd_peer_request *peer_req; 2810 struct digest_info *di = NULL; 2811 int size, verb; 2812 struct p_block_req *p = pi->data; 2813 2814 peer_device = conn_peer_device(connection, pi->vnr); 2815 if (!peer_device) 2816 return -EIO; 2817 device = peer_device->device; 2818 capacity = get_capacity(device->vdisk); 2819 2820 sector = be64_to_cpu(p->sector); 2821 size = be32_to_cpu(p->blksize); 2822 2823 if (size <= 0 || !IS_ALIGNED(size, 512) || size > DRBD_MAX_BIO_SIZE) { 2824 drbd_err(device, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__, 2825 (unsigned long long)sector, size); 2826 return -EINVAL; 2827 } 2828 if (sector + (size>>9) > capacity) { 2829 drbd_err(device, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__, 2830 (unsigned long long)sector, size); 2831 return -EINVAL; 2832 } 2833 2834 if (!get_ldev_if_state(device, D_UP_TO_DATE)) { 2835 verb = 1; 2836 switch (pi->cmd) { 2837 case P_DATA_REQUEST: 2838 drbd_send_ack_rp(peer_device, P_NEG_DREPLY, p); 2839 break; 2840 case P_RS_THIN_REQ: 2841 case P_RS_DATA_REQUEST: 2842 case P_CSUM_RS_REQUEST: 2843 case P_OV_REQUEST: 2844 drbd_send_ack_rp(peer_device, P_NEG_RS_DREPLY , p); 2845 break; 2846 case P_OV_REPLY: 2847 verb = 0; 2848 dec_rs_pending(peer_device); 2849 drbd_send_ack_ex(peer_device, P_OV_RESULT, sector, size, ID_IN_SYNC); 2850 break; 2851 default: 2852 BUG(); 2853 } 2854 if (verb && drbd_ratelimit()) 2855 drbd_err(device, "Can not satisfy peer's read request, " 2856 "no local data.\n"); 2857 2858 /* drain possibly payload */ 2859 return drbd_drain_block(peer_device, pi->size); 2860 } 2861 2862 /* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD 2863 * "criss-cross" setup, that might cause write-out on some other DRBD, 2864 * which in turn might block on the other node at this very place. */ 2865 peer_req = drbd_alloc_peer_req(peer_device, p->block_id, sector, size, 2866 size, GFP_NOIO); 2867 if (!peer_req) { 2868 put_ldev(device); 2869 return -ENOMEM; 2870 } 2871 peer_req->opf = REQ_OP_READ; 2872 2873 switch (pi->cmd) { 2874 case P_DATA_REQUEST: 2875 peer_req->w.cb = w_e_end_data_req; 2876 /* application IO, don't drbd_rs_begin_io */ 2877 peer_req->flags |= EE_APPLICATION; 2878 goto submit; 2879 2880 case P_RS_THIN_REQ: 2881 /* If at some point in the future we have a smart way to 2882 find out if this data block is completely deallocated, 2883 then we would do something smarter here than reading 2884 the block... */ 2885 peer_req->flags |= EE_RS_THIN_REQ; 2886 fallthrough; 2887 case P_RS_DATA_REQUEST: 2888 peer_req->w.cb = w_e_end_rsdata_req; 2889 /* used in the sector offset progress display */ 2890 device->bm_resync_fo = BM_SECT_TO_BIT(sector); 2891 break; 2892 2893 case P_OV_REPLY: 2894 case P_CSUM_RS_REQUEST: 2895 di = kmalloc(sizeof(*di) + pi->size, GFP_NOIO); 2896 if (!di) 2897 goto out_free_e; 2898 2899 di->digest_size = pi->size; 2900 di->digest = (((char *)di)+sizeof(struct digest_info)); 2901 2902 peer_req->digest = di; 2903 peer_req->flags |= EE_HAS_DIGEST; 2904 2905 if (drbd_recv_all(peer_device->connection, di->digest, pi->size)) 2906 goto out_free_e; 2907 2908 if (pi->cmd == P_CSUM_RS_REQUEST) { 2909 D_ASSERT(device, peer_device->connection->agreed_pro_version >= 89); 2910 peer_req->w.cb = w_e_end_csum_rs_req; 2911 /* used in the sector offset progress display */ 2912 device->bm_resync_fo = BM_SECT_TO_BIT(sector); 2913 /* remember to report stats in drbd_resync_finished */ 2914 device->use_csums = true; 2915 } else if (pi->cmd == P_OV_REPLY) { 2916 /* track progress, we may need to throttle */ 2917 atomic_add(size >> 9, &device->rs_sect_in); 2918 peer_req->w.cb = w_e_end_ov_reply; 2919 dec_rs_pending(peer_device); 2920 /* drbd_rs_begin_io done when we sent this request, 2921 * but accounting still needs to be done. */ 2922 goto submit_for_resync; 2923 } 2924 break; 2925 2926 case P_OV_REQUEST: 2927 if (device->ov_start_sector == ~(sector_t)0 && 2928 peer_device->connection->agreed_pro_version >= 90) { 2929 unsigned long now = jiffies; 2930 int i; 2931 device->ov_start_sector = sector; 2932 device->ov_position = sector; 2933 device->ov_left = drbd_bm_bits(device) - BM_SECT_TO_BIT(sector); 2934 device->rs_total = device->ov_left; 2935 for (i = 0; i < DRBD_SYNC_MARKS; i++) { 2936 device->rs_mark_left[i] = device->ov_left; 2937 device->rs_mark_time[i] = now; 2938 } 2939 drbd_info(device, "Online Verify start sector: %llu\n", 2940 (unsigned long long)sector); 2941 } 2942 peer_req->w.cb = w_e_end_ov_req; 2943 break; 2944 2945 default: 2946 BUG(); 2947 } 2948 2949 /* Throttle, drbd_rs_begin_io and submit should become asynchronous 2950 * wrt the receiver, but it is not as straightforward as it may seem. 2951 * Various places in the resync start and stop logic assume resync 2952 * requests are processed in order, requeuing this on the worker thread 2953 * introduces a bunch of new code for synchronization between threads. 2954 * 2955 * Unlimited throttling before drbd_rs_begin_io may stall the resync 2956 * "forever", throttling after drbd_rs_begin_io will lock that extent 2957 * for application writes for the same time. For now, just throttle 2958 * here, where the rest of the code expects the receiver to sleep for 2959 * a while, anyways. 2960 */ 2961 2962 /* Throttle before drbd_rs_begin_io, as that locks out application IO; 2963 * this defers syncer requests for some time, before letting at least 2964 * on request through. The resync controller on the receiving side 2965 * will adapt to the incoming rate accordingly. 2966 * 2967 * We cannot throttle here if remote is Primary/SyncTarget: 2968 * we would also throttle its application reads. 2969 * In that case, throttling is done on the SyncTarget only. 2970 */ 2971 2972 /* Even though this may be a resync request, we do add to "read_ee"; 2973 * "sync_ee" is only used for resync WRITEs. 2974 * Add to list early, so debugfs can find this request 2975 * even if we have to sleep below. */ 2976 spin_lock_irq(&device->resource->req_lock); 2977 list_add_tail(&peer_req->w.list, &device->read_ee); 2978 spin_unlock_irq(&device->resource->req_lock); 2979 2980 update_receiver_timing_details(connection, drbd_rs_should_slow_down); 2981 if (device->state.peer != R_PRIMARY 2982 && drbd_rs_should_slow_down(peer_device, sector, false)) 2983 schedule_timeout_uninterruptible(HZ/10); 2984 update_receiver_timing_details(connection, drbd_rs_begin_io); 2985 if (drbd_rs_begin_io(device, sector)) 2986 goto out_free_e; 2987 2988 submit_for_resync: 2989 atomic_add(size >> 9, &device->rs_sect_ev); 2990 2991 submit: 2992 update_receiver_timing_details(connection, drbd_submit_peer_request); 2993 inc_unacked(device); 2994 if (drbd_submit_peer_request(peer_req) == 0) 2995 return 0; 2996 2997 /* don't care for the reason here */ 2998 drbd_err(device, "submit failed, triggering re-connect\n"); 2999 3000 out_free_e: 3001 spin_lock_irq(&device->resource->req_lock); 3002 list_del(&peer_req->w.list); 3003 spin_unlock_irq(&device->resource->req_lock); 3004 /* no drbd_rs_complete_io(), we are dropping the connection anyways */ 3005 3006 put_ldev(device); 3007 drbd_free_peer_req(device, peer_req); 3008 return -EIO; 3009 } 3010 3011 /* 3012 * drbd_asb_recover_0p - Recover after split-brain with no remaining primaries 3013 */ 3014 static int drbd_asb_recover_0p(struct drbd_peer_device *peer_device) __must_hold(local) 3015 { 3016 struct drbd_device *device = peer_device->device; 3017 int self, peer, rv = -100; 3018 unsigned long ch_self, ch_peer; 3019 enum drbd_after_sb_p after_sb_0p; 3020 3021 self = device->ldev->md.uuid[UI_BITMAP] & 1; 3022 peer = device->p_uuid[UI_BITMAP] & 1; 3023 3024 ch_peer = device->p_uuid[UI_SIZE]; 3025 ch_self = device->comm_bm_set; 3026 3027 rcu_read_lock(); 3028 after_sb_0p = rcu_dereference(peer_device->connection->net_conf)->after_sb_0p; 3029 rcu_read_unlock(); 3030 switch (after_sb_0p) { 3031 case ASB_CONSENSUS: 3032 case ASB_DISCARD_SECONDARY: 3033 case ASB_CALL_HELPER: 3034 case ASB_VIOLENTLY: 3035 drbd_err(device, "Configuration error.\n"); 3036 break; 3037 case ASB_DISCONNECT: 3038 break; 3039 case ASB_DISCARD_YOUNGER_PRI: 3040 if (self == 0 && peer == 1) { 3041 rv = -1; 3042 break; 3043 } 3044 if (self == 1 && peer == 0) { 3045 rv = 1; 3046 break; 3047 } 3048 fallthrough; /* to one of the other strategies */ 3049 case ASB_DISCARD_OLDER_PRI: 3050 if (self == 0 && peer == 1) { 3051 rv = 1; 3052 break; 3053 } 3054 if (self == 1 && peer == 0) { 3055 rv = -1; 3056 break; 3057 } 3058 /* Else fall through to one of the other strategies... */ 3059 drbd_warn(device, "Discard younger/older primary did not find a decision\n" 3060 "Using discard-least-changes instead\n"); 3061 fallthrough; 3062 case ASB_DISCARD_ZERO_CHG: 3063 if (ch_peer == 0 && ch_self == 0) { 3064 rv = test_bit(RESOLVE_CONFLICTS, &peer_device->connection->flags) 3065 ? -1 : 1; 3066 break; 3067 } else { 3068 if (ch_peer == 0) { rv = 1; break; } 3069 if (ch_self == 0) { rv = -1; break; } 3070 } 3071 if (after_sb_0p == ASB_DISCARD_ZERO_CHG) 3072 break; 3073 fallthrough; 3074 case ASB_DISCARD_LEAST_CHG: 3075 if (ch_self < ch_peer) 3076 rv = -1; 3077 else if (ch_self > ch_peer) 3078 rv = 1; 3079 else /* ( ch_self == ch_peer ) */ 3080 /* Well, then use something else. */ 3081 rv = test_bit(RESOLVE_CONFLICTS, &peer_device->connection->flags) 3082 ? -1 : 1; 3083 break; 3084 case ASB_DISCARD_LOCAL: 3085 rv = -1; 3086 break; 3087 case ASB_DISCARD_REMOTE: 3088 rv = 1; 3089 } 3090 3091 return rv; 3092 } 3093 3094 /* 3095 * drbd_asb_recover_1p - Recover after split-brain with one remaining primary 3096 */ 3097 static int drbd_asb_recover_1p(struct drbd_peer_device *peer_device) __must_hold(local) 3098 { 3099 struct drbd_device *device = peer_device->device; 3100 int hg, rv = -100; 3101 enum drbd_after_sb_p after_sb_1p; 3102 3103 rcu_read_lock(); 3104 after_sb_1p = rcu_dereference(peer_device->connection->net_conf)->after_sb_1p; 3105 rcu_read_unlock(); 3106 switch (after_sb_1p) { 3107 case ASB_DISCARD_YOUNGER_PRI: 3108 case ASB_DISCARD_OLDER_PRI: 3109 case ASB_DISCARD_LEAST_CHG: 3110 case ASB_DISCARD_LOCAL: 3111 case ASB_DISCARD_REMOTE: 3112 case ASB_DISCARD_ZERO_CHG: 3113 drbd_err(device, "Configuration error.\n"); 3114 break; 3115 case ASB_DISCONNECT: 3116 break; 3117 case ASB_CONSENSUS: 3118 hg = drbd_asb_recover_0p(peer_device); 3119 if (hg == -1 && device->state.role == R_SECONDARY) 3120 rv = hg; 3121 if (hg == 1 && device->state.role == R_PRIMARY) 3122 rv = hg; 3123 break; 3124 case ASB_VIOLENTLY: 3125 rv = drbd_asb_recover_0p(peer_device); 3126 break; 3127 case ASB_DISCARD_SECONDARY: 3128 return device->state.role == R_PRIMARY ? 1 : -1; 3129 case ASB_CALL_HELPER: 3130 hg = drbd_asb_recover_0p(peer_device); 3131 if (hg == -1 && device->state.role == R_PRIMARY) { 3132 enum drbd_state_rv rv2; 3133 3134 /* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE, 3135 * we might be here in C_WF_REPORT_PARAMS which is transient. 3136 * we do not need to wait for the after state change work either. */ 3137 rv2 = drbd_change_state(device, CS_VERBOSE, NS(role, R_SECONDARY)); 3138 if (rv2 != SS_SUCCESS) { 3139 drbd_khelper(device, "pri-lost-after-sb"); 3140 } else { 3141 drbd_warn(device, "Successfully gave up primary role.\n"); 3142 rv = hg; 3143 } 3144 } else 3145 rv = hg; 3146 } 3147 3148 return rv; 3149 } 3150 3151 /* 3152 * drbd_asb_recover_2p - Recover after split-brain with two remaining primaries 3153 */ 3154 static int drbd_asb_recover_2p(struct drbd_peer_device *peer_device) __must_hold(local) 3155 { 3156 struct drbd_device *device = peer_device->device; 3157 int hg, rv = -100; 3158 enum drbd_after_sb_p after_sb_2p; 3159 3160 rcu_read_lock(); 3161 after_sb_2p = rcu_dereference(peer_device->connection->net_conf)->after_sb_2p; 3162 rcu_read_unlock(); 3163 switch (after_sb_2p) { 3164 case ASB_DISCARD_YOUNGER_PRI: 3165 case ASB_DISCARD_OLDER_PRI: 3166 case ASB_DISCARD_LEAST_CHG: 3167 case ASB_DISCARD_LOCAL: 3168 case ASB_DISCARD_REMOTE: 3169 case ASB_CONSENSUS: 3170 case ASB_DISCARD_SECONDARY: 3171 case ASB_DISCARD_ZERO_CHG: 3172 drbd_err(device, "Configuration error.\n"); 3173 break; 3174 case ASB_VIOLENTLY: 3175 rv = drbd_asb_recover_0p(peer_device); 3176 break; 3177 case ASB_DISCONNECT: 3178 break; 3179 case ASB_CALL_HELPER: 3180 hg = drbd_asb_recover_0p(peer_device); 3181 if (hg == -1) { 3182 enum drbd_state_rv rv2; 3183 3184 /* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE, 3185 * we might be here in C_WF_REPORT_PARAMS which is transient. 3186 * we do not need to wait for the after state change work either. */ 3187 rv2 = drbd_change_state(device, CS_VERBOSE, NS(role, R_SECONDARY)); 3188 if (rv2 != SS_SUCCESS) { 3189 drbd_khelper(device, "pri-lost-after-sb"); 3190 } else { 3191 drbd_warn(device, "Successfully gave up primary role.\n"); 3192 rv = hg; 3193 } 3194 } else 3195 rv = hg; 3196 } 3197 3198 return rv; 3199 } 3200 3201 static void drbd_uuid_dump(struct drbd_device *device, char *text, u64 *uuid, 3202 u64 bits, u64 flags) 3203 { 3204 if (!uuid) { 3205 drbd_info(device, "%s uuid info vanished while I was looking!\n", text); 3206 return; 3207 } 3208 drbd_info(device, "%s %016llX:%016llX:%016llX:%016llX bits:%llu flags:%llX\n", 3209 text, 3210 (unsigned long long)uuid[UI_CURRENT], 3211 (unsigned long long)uuid[UI_BITMAP], 3212 (unsigned long long)uuid[UI_HISTORY_START], 3213 (unsigned long long)uuid[UI_HISTORY_END], 3214 (unsigned long long)bits, 3215 (unsigned long long)flags); 3216 } 3217 3218 /* 3219 100 after split brain try auto recover 3220 2 C_SYNC_SOURCE set BitMap 3221 1 C_SYNC_SOURCE use BitMap 3222 0 no Sync 3223 -1 C_SYNC_TARGET use BitMap 3224 -2 C_SYNC_TARGET set BitMap 3225 -100 after split brain, disconnect 3226 -1000 unrelated data 3227 -1091 requires proto 91 3228 -1096 requires proto 96 3229 */ 3230 3231 static int drbd_uuid_compare(struct drbd_peer_device *const peer_device, 3232 enum drbd_role const peer_role, int *rule_nr) __must_hold(local) 3233 { 3234 struct drbd_connection *const connection = peer_device->connection; 3235 struct drbd_device *device = peer_device->device; 3236 u64 self, peer; 3237 int i, j; 3238 3239 self = device->ldev->md.uuid[UI_CURRENT] & ~((u64)1); 3240 peer = device->p_uuid[UI_CURRENT] & ~((u64)1); 3241 3242 *rule_nr = 10; 3243 if (self == UUID_JUST_CREATED && peer == UUID_JUST_CREATED) 3244 return 0; 3245 3246 *rule_nr = 20; 3247 if ((self == UUID_JUST_CREATED || self == (u64)0) && 3248 peer != UUID_JUST_CREATED) 3249 return -2; 3250 3251 *rule_nr = 30; 3252 if (self != UUID_JUST_CREATED && 3253 (peer == UUID_JUST_CREATED || peer == (u64)0)) 3254 return 2; 3255 3256 if (self == peer) { 3257 int rct, dc; /* roles at crash time */ 3258 3259 if (device->p_uuid[UI_BITMAP] == (u64)0 && device->ldev->md.uuid[UI_BITMAP] != (u64)0) { 3260 3261 if (connection->agreed_pro_version < 91) 3262 return -1091; 3263 3264 if ((device->ldev->md.uuid[UI_BITMAP] & ~((u64)1)) == (device->p_uuid[UI_HISTORY_START] & ~((u64)1)) && 3265 (device->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (device->p_uuid[UI_HISTORY_START + 1] & ~((u64)1))) { 3266 drbd_info(device, "was SyncSource, missed the resync finished event, corrected myself:\n"); 3267 drbd_uuid_move_history(device); 3268 device->ldev->md.uuid[UI_HISTORY_START] = device->ldev->md.uuid[UI_BITMAP]; 3269 device->ldev->md.uuid[UI_BITMAP] = 0; 3270 3271 drbd_uuid_dump(device, "self", device->ldev->md.uuid, 3272 device->state.disk >= D_NEGOTIATING ? drbd_bm_total_weight(device) : 0, 0); 3273 *rule_nr = 34; 3274 } else { 3275 drbd_info(device, "was SyncSource (peer failed to write sync_uuid)\n"); 3276 *rule_nr = 36; 3277 } 3278 3279 return 1; 3280 } 3281 3282 if (device->ldev->md.uuid[UI_BITMAP] == (u64)0 && device->p_uuid[UI_BITMAP] != (u64)0) { 3283 3284 if (connection->agreed_pro_version < 91) 3285 return -1091; 3286 3287 if ((device->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (device->p_uuid[UI_BITMAP] & ~((u64)1)) && 3288 (device->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1)) == (device->p_uuid[UI_HISTORY_START] & ~((u64)1))) { 3289 drbd_info(device, "was SyncTarget, peer missed the resync finished event, corrected peer:\n"); 3290 3291 device->p_uuid[UI_HISTORY_START + 1] = device->p_uuid[UI_HISTORY_START]; 3292 device->p_uuid[UI_HISTORY_START] = device->p_uuid[UI_BITMAP]; 3293 device->p_uuid[UI_BITMAP] = 0UL; 3294 3295 drbd_uuid_dump(device, "peer", device->p_uuid, device->p_uuid[UI_SIZE], device->p_uuid[UI_FLAGS]); 3296 *rule_nr = 35; 3297 } else { 3298 drbd_info(device, "was SyncTarget (failed to write sync_uuid)\n"); 3299 *rule_nr = 37; 3300 } 3301 3302 return -1; 3303 } 3304 3305 /* Common power [off|failure] */ 3306 rct = (test_bit(CRASHED_PRIMARY, &device->flags) ? 1 : 0) + 3307 (device->p_uuid[UI_FLAGS] & 2); 3308 /* lowest bit is set when we were primary, 3309 * next bit (weight 2) is set when peer was primary */ 3310 *rule_nr = 40; 3311 3312 /* Neither has the "crashed primary" flag set, 3313 * only a replication link hickup. */ 3314 if (rct == 0) 3315 return 0; 3316 3317 /* Current UUID equal and no bitmap uuid; does not necessarily 3318 * mean this was a "simultaneous hard crash", maybe IO was 3319 * frozen, so no UUID-bump happened. 3320 * This is a protocol change, overload DRBD_FF_WSAME as flag 3321 * for "new-enough" peer DRBD version. */ 3322 if (device->state.role == R_PRIMARY || peer_role == R_PRIMARY) { 3323 *rule_nr = 41; 3324 if (!(connection->agreed_features & DRBD_FF_WSAME)) { 3325 drbd_warn(peer_device, "Equivalent unrotated UUIDs, but current primary present.\n"); 3326 return -(0x10000 | PRO_VERSION_MAX | (DRBD_FF_WSAME << 8)); 3327 } 3328 if (device->state.role == R_PRIMARY && peer_role == R_PRIMARY) { 3329 /* At least one has the "crashed primary" bit set, 3330 * both are primary now, but neither has rotated its UUIDs? 3331 * "Can not happen." */ 3332 drbd_err(peer_device, "Equivalent unrotated UUIDs, but both are primary. Can not resolve this.\n"); 3333 return -100; 3334 } 3335 if (device->state.role == R_PRIMARY) 3336 return 1; 3337 return -1; 3338 } 3339 3340 /* Both are secondary. 3341 * Really looks like recovery from simultaneous hard crash. 3342 * Check which had been primary before, and arbitrate. */ 3343 switch (rct) { 3344 case 0: /* !self_pri && !peer_pri */ return 0; /* already handled */ 3345 case 1: /* self_pri && !peer_pri */ return 1; 3346 case 2: /* !self_pri && peer_pri */ return -1; 3347 case 3: /* self_pri && peer_pri */ 3348 dc = test_bit(RESOLVE_CONFLICTS, &connection->flags); 3349 return dc ? -1 : 1; 3350 } 3351 } 3352 3353 *rule_nr = 50; 3354 peer = device->p_uuid[UI_BITMAP] & ~((u64)1); 3355 if (self == peer) 3356 return -1; 3357 3358 *rule_nr = 51; 3359 peer = device->p_uuid[UI_HISTORY_START] & ~((u64)1); 3360 if (self == peer) { 3361 if (connection->agreed_pro_version < 96 ? 3362 (device->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == 3363 (device->p_uuid[UI_HISTORY_START + 1] & ~((u64)1)) : 3364 peer + UUID_NEW_BM_OFFSET == (device->p_uuid[UI_BITMAP] & ~((u64)1))) { 3365 /* The last P_SYNC_UUID did not get though. Undo the last start of 3366 resync as sync source modifications of the peer's UUIDs. */ 3367 3368 if (connection->agreed_pro_version < 91) 3369 return -1091; 3370 3371 device->p_uuid[UI_BITMAP] = device->p_uuid[UI_HISTORY_START]; 3372 device->p_uuid[UI_HISTORY_START] = device->p_uuid[UI_HISTORY_START + 1]; 3373 3374 drbd_info(device, "Lost last syncUUID packet, corrected:\n"); 3375 drbd_uuid_dump(device, "peer", device->p_uuid, device->p_uuid[UI_SIZE], device->p_uuid[UI_FLAGS]); 3376 3377 return -1; 3378 } 3379 } 3380 3381 *rule_nr = 60; 3382 self = device->ldev->md.uuid[UI_CURRENT] & ~((u64)1); 3383 for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) { 3384 peer = device->p_uuid[i] & ~((u64)1); 3385 if (self == peer) 3386 return -2; 3387 } 3388 3389 *rule_nr = 70; 3390 self = device->ldev->md.uuid[UI_BITMAP] & ~((u64)1); 3391 peer = device->p_uuid[UI_CURRENT] & ~((u64)1); 3392 if (self == peer) 3393 return 1; 3394 3395 *rule_nr = 71; 3396 self = device->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1); 3397 if (self == peer) { 3398 if (connection->agreed_pro_version < 96 ? 3399 (device->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1)) == 3400 (device->p_uuid[UI_HISTORY_START] & ~((u64)1)) : 3401 self + UUID_NEW_BM_OFFSET == (device->ldev->md.uuid[UI_BITMAP] & ~((u64)1))) { 3402 /* The last P_SYNC_UUID did not get though. Undo the last start of 3403 resync as sync source modifications of our UUIDs. */ 3404 3405 if (connection->agreed_pro_version < 91) 3406 return -1091; 3407 3408 __drbd_uuid_set(device, UI_BITMAP, device->ldev->md.uuid[UI_HISTORY_START]); 3409 __drbd_uuid_set(device, UI_HISTORY_START, device->ldev->md.uuid[UI_HISTORY_START + 1]); 3410 3411 drbd_info(device, "Last syncUUID did not get through, corrected:\n"); 3412 drbd_uuid_dump(device, "self", device->ldev->md.uuid, 3413 device->state.disk >= D_NEGOTIATING ? drbd_bm_total_weight(device) : 0, 0); 3414 3415 return 1; 3416 } 3417 } 3418 3419 3420 *rule_nr = 80; 3421 peer = device->p_uuid[UI_CURRENT] & ~((u64)1); 3422 for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) { 3423 self = device->ldev->md.uuid[i] & ~((u64)1); 3424 if (self == peer) 3425 return 2; 3426 } 3427 3428 *rule_nr = 90; 3429 self = device->ldev->md.uuid[UI_BITMAP] & ~((u64)1); 3430 peer = device->p_uuid[UI_BITMAP] & ~((u64)1); 3431 if (self == peer && self != ((u64)0)) 3432 return 100; 3433 3434 *rule_nr = 100; 3435 for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) { 3436 self = device->ldev->md.uuid[i] & ~((u64)1); 3437 for (j = UI_HISTORY_START; j <= UI_HISTORY_END; j++) { 3438 peer = device->p_uuid[j] & ~((u64)1); 3439 if (self == peer) 3440 return -100; 3441 } 3442 } 3443 3444 return -1000; 3445 } 3446 3447 /* drbd_sync_handshake() returns the new conn state on success, or 3448 CONN_MASK (-1) on failure. 3449 */ 3450 static enum drbd_conns drbd_sync_handshake(struct drbd_peer_device *peer_device, 3451 enum drbd_role peer_role, 3452 enum drbd_disk_state peer_disk) __must_hold(local) 3453 { 3454 struct drbd_device *device = peer_device->device; 3455 enum drbd_conns rv = C_MASK; 3456 enum drbd_disk_state mydisk; 3457 struct net_conf *nc; 3458 int hg, rule_nr, rr_conflict, tentative, always_asbp; 3459 3460 mydisk = device->state.disk; 3461 if (mydisk == D_NEGOTIATING) 3462 mydisk = device->new_state_tmp.disk; 3463 3464 drbd_info(device, "drbd_sync_handshake:\n"); 3465 3466 spin_lock_irq(&device->ldev->md.uuid_lock); 3467 drbd_uuid_dump(device, "self", device->ldev->md.uuid, device->comm_bm_set, 0); 3468 drbd_uuid_dump(device, "peer", device->p_uuid, 3469 device->p_uuid[UI_SIZE], device->p_uuid[UI_FLAGS]); 3470 3471 hg = drbd_uuid_compare(peer_device, peer_role, &rule_nr); 3472 spin_unlock_irq(&device->ldev->md.uuid_lock); 3473 3474 drbd_info(device, "uuid_compare()=%d by rule %d\n", hg, rule_nr); 3475 3476 if (hg == -1000) { 3477 drbd_alert(device, "Unrelated data, aborting!\n"); 3478 return C_MASK; 3479 } 3480 if (hg < -0x10000) { 3481 int proto, fflags; 3482 hg = -hg; 3483 proto = hg & 0xff; 3484 fflags = (hg >> 8) & 0xff; 3485 drbd_alert(device, "To resolve this both sides have to support at least protocol %d and feature flags 0x%x\n", 3486 proto, fflags); 3487 return C_MASK; 3488 } 3489 if (hg < -1000) { 3490 drbd_alert(device, "To resolve this both sides have to support at least protocol %d\n", -hg - 1000); 3491 return C_MASK; 3492 } 3493 3494 if ((mydisk == D_INCONSISTENT && peer_disk > D_INCONSISTENT) || 3495 (peer_disk == D_INCONSISTENT && mydisk > D_INCONSISTENT)) { 3496 int f = (hg == -100) || abs(hg) == 2; 3497 hg = mydisk > D_INCONSISTENT ? 1 : -1; 3498 if (f) 3499 hg = hg*2; 3500 drbd_info(device, "Becoming sync %s due to disk states.\n", 3501 hg > 0 ? "source" : "target"); 3502 } 3503 3504 if (abs(hg) == 100) 3505 drbd_khelper(device, "initial-split-brain"); 3506 3507 rcu_read_lock(); 3508 nc = rcu_dereference(peer_device->connection->net_conf); 3509 always_asbp = nc->always_asbp; 3510 rr_conflict = nc->rr_conflict; 3511 tentative = nc->tentative; 3512 rcu_read_unlock(); 3513 3514 if (hg == 100 || (hg == -100 && always_asbp)) { 3515 int pcount = (device->state.role == R_PRIMARY) 3516 + (peer_role == R_PRIMARY); 3517 int forced = (hg == -100); 3518 3519 switch (pcount) { 3520 case 0: 3521 hg = drbd_asb_recover_0p(peer_device); 3522 break; 3523 case 1: 3524 hg = drbd_asb_recover_1p(peer_device); 3525 break; 3526 case 2: 3527 hg = drbd_asb_recover_2p(peer_device); 3528 break; 3529 } 3530 if (abs(hg) < 100) { 3531 drbd_warn(device, "Split-Brain detected, %d primaries, " 3532 "automatically solved. Sync from %s node\n", 3533 pcount, (hg < 0) ? "peer" : "this"); 3534 if (forced) { 3535 drbd_warn(device, "Doing a full sync, since" 3536 " UUIDs where ambiguous.\n"); 3537 hg = hg*2; 3538 } 3539 } 3540 } 3541 3542 if (hg == -100) { 3543 if (test_bit(DISCARD_MY_DATA, &device->flags) && !(device->p_uuid[UI_FLAGS]&1)) 3544 hg = -1; 3545 if (!test_bit(DISCARD_MY_DATA, &device->flags) && (device->p_uuid[UI_FLAGS]&1)) 3546 hg = 1; 3547 3548 if (abs(hg) < 100) 3549 drbd_warn(device, "Split-Brain detected, manually solved. " 3550 "Sync from %s node\n", 3551 (hg < 0) ? "peer" : "this"); 3552 } 3553 3554 if (hg == -100) { 3555 /* FIXME this log message is not correct if we end up here 3556 * after an attempted attach on a diskless node. 3557 * We just refuse to attach -- well, we drop the "connection" 3558 * to that disk, in a way... */ 3559 drbd_alert(device, "Split-Brain detected but unresolved, dropping connection!\n"); 3560 drbd_khelper(device, "split-brain"); 3561 return C_MASK; 3562 } 3563 3564 if (hg > 0 && mydisk <= D_INCONSISTENT) { 3565 drbd_err(device, "I shall become SyncSource, but I am inconsistent!\n"); 3566 return C_MASK; 3567 } 3568 3569 if (hg < 0 && /* by intention we do not use mydisk here. */ 3570 device->state.role == R_PRIMARY && device->state.disk >= D_CONSISTENT) { 3571 switch (rr_conflict) { 3572 case ASB_CALL_HELPER: 3573 drbd_khelper(device, "pri-lost"); 3574 fallthrough; 3575 case ASB_DISCONNECT: 3576 drbd_err(device, "I shall become SyncTarget, but I am primary!\n"); 3577 return C_MASK; 3578 case ASB_VIOLENTLY: 3579 drbd_warn(device, "Becoming SyncTarget, violating the stable-data" 3580 "assumption\n"); 3581 } 3582 } 3583 3584 if (tentative || test_bit(CONN_DRY_RUN, &peer_device->connection->flags)) { 3585 if (hg == 0) 3586 drbd_info(device, "dry-run connect: No resync, would become Connected immediately.\n"); 3587 else 3588 drbd_info(device, "dry-run connect: Would become %s, doing a %s resync.", 3589 drbd_conn_str(hg > 0 ? C_SYNC_SOURCE : C_SYNC_TARGET), 3590 abs(hg) >= 2 ? "full" : "bit-map based"); 3591 return C_MASK; 3592 } 3593 3594 if (abs(hg) >= 2) { 3595 drbd_info(device, "Writing the whole bitmap, full sync required after drbd_sync_handshake.\n"); 3596 if (drbd_bitmap_io(device, &drbd_bmio_set_n_write, "set_n_write from sync_handshake", 3597 BM_LOCKED_SET_ALLOWED, NULL)) 3598 return C_MASK; 3599 } 3600 3601 if (hg > 0) { /* become sync source. */ 3602 rv = C_WF_BITMAP_S; 3603 } else if (hg < 0) { /* become sync target */ 3604 rv = C_WF_BITMAP_T; 3605 } else { 3606 rv = C_CONNECTED; 3607 if (drbd_bm_total_weight(device)) { 3608 drbd_info(device, "No resync, but %lu bits in bitmap!\n", 3609 drbd_bm_total_weight(device)); 3610 } 3611 } 3612 3613 return rv; 3614 } 3615 3616 static enum drbd_after_sb_p convert_after_sb(enum drbd_after_sb_p peer) 3617 { 3618 /* ASB_DISCARD_REMOTE - ASB_DISCARD_LOCAL is valid */ 3619 if (peer == ASB_DISCARD_REMOTE) 3620 return ASB_DISCARD_LOCAL; 3621 3622 /* any other things with ASB_DISCARD_REMOTE or ASB_DISCARD_LOCAL are invalid */ 3623 if (peer == ASB_DISCARD_LOCAL) 3624 return ASB_DISCARD_REMOTE; 3625 3626 /* everything else is valid if they are equal on both sides. */ 3627 return peer; 3628 } 3629 3630 static int receive_protocol(struct drbd_connection *connection, struct packet_info *pi) 3631 { 3632 struct p_protocol *p = pi->data; 3633 enum drbd_after_sb_p p_after_sb_0p, p_after_sb_1p, p_after_sb_2p; 3634 int p_proto, p_discard_my_data, p_two_primaries, cf; 3635 struct net_conf *nc, *old_net_conf, *new_net_conf = NULL; 3636 char integrity_alg[SHARED_SECRET_MAX] = ""; 3637 struct crypto_shash *peer_integrity_tfm = NULL; 3638 void *int_dig_in = NULL, *int_dig_vv = NULL; 3639 3640 p_proto = be32_to_cpu(p->protocol); 3641 p_after_sb_0p = be32_to_cpu(p->after_sb_0p); 3642 p_after_sb_1p = be32_to_cpu(p->after_sb_1p); 3643 p_after_sb_2p = be32_to_cpu(p->after_sb_2p); 3644 p_two_primaries = be32_to_cpu(p->two_primaries); 3645 cf = be32_to_cpu(p->conn_flags); 3646 p_discard_my_data = cf & CF_DISCARD_MY_DATA; 3647 3648 if (connection->agreed_pro_version >= 87) { 3649 int err; 3650 3651 if (pi->size > sizeof(integrity_alg)) 3652 return -EIO; 3653 err = drbd_recv_all(connection, integrity_alg, pi->size); 3654 if (err) 3655 return err; 3656 integrity_alg[SHARED_SECRET_MAX - 1] = 0; 3657 } 3658 3659 if (pi->cmd != P_PROTOCOL_UPDATE) { 3660 clear_bit(CONN_DRY_RUN, &connection->flags); 3661 3662 if (cf & CF_DRY_RUN) 3663 set_bit(CONN_DRY_RUN, &connection->flags); 3664 3665 rcu_read_lock(); 3666 nc = rcu_dereference(connection->net_conf); 3667 3668 if (p_proto != nc->wire_protocol) { 3669 drbd_err(connection, "incompatible %s settings\n", "protocol"); 3670 goto disconnect_rcu_unlock; 3671 } 3672 3673 if (convert_after_sb(p_after_sb_0p) != nc->after_sb_0p) { 3674 drbd_err(connection, "incompatible %s settings\n", "after-sb-0pri"); 3675 goto disconnect_rcu_unlock; 3676 } 3677 3678 if (convert_after_sb(p_after_sb_1p) != nc->after_sb_1p) { 3679 drbd_err(connection, "incompatible %s settings\n", "after-sb-1pri"); 3680 goto disconnect_rcu_unlock; 3681 } 3682 3683 if (convert_after_sb(p_after_sb_2p) != nc->after_sb_2p) { 3684 drbd_err(connection, "incompatible %s settings\n", "after-sb-2pri"); 3685 goto disconnect_rcu_unlock; 3686 } 3687 3688 if (p_discard_my_data && nc->discard_my_data) { 3689 drbd_err(connection, "incompatible %s settings\n", "discard-my-data"); 3690 goto disconnect_rcu_unlock; 3691 } 3692 3693 if (p_two_primaries != nc->two_primaries) { 3694 drbd_err(connection, "incompatible %s settings\n", "allow-two-primaries"); 3695 goto disconnect_rcu_unlock; 3696 } 3697 3698 if (strcmp(integrity_alg, nc->integrity_alg)) { 3699 drbd_err(connection, "incompatible %s settings\n", "data-integrity-alg"); 3700 goto disconnect_rcu_unlock; 3701 } 3702 3703 rcu_read_unlock(); 3704 } 3705 3706 if (integrity_alg[0]) { 3707 int hash_size; 3708 3709 /* 3710 * We can only change the peer data integrity algorithm 3711 * here. Changing our own data integrity algorithm 3712 * requires that we send a P_PROTOCOL_UPDATE packet at 3713 * the same time; otherwise, the peer has no way to 3714 * tell between which packets the algorithm should 3715 * change. 3716 */ 3717 3718 peer_integrity_tfm = crypto_alloc_shash(integrity_alg, 0, 0); 3719 if (IS_ERR(peer_integrity_tfm)) { 3720 peer_integrity_tfm = NULL; 3721 drbd_err(connection, "peer data-integrity-alg %s not supported\n", 3722 integrity_alg); 3723 goto disconnect; 3724 } 3725 3726 hash_size = crypto_shash_digestsize(peer_integrity_tfm); 3727 int_dig_in = kmalloc(hash_size, GFP_KERNEL); 3728 int_dig_vv = kmalloc(hash_size, GFP_KERNEL); 3729 if (!(int_dig_in && int_dig_vv)) { 3730 drbd_err(connection, "Allocation of buffers for data integrity checking failed\n"); 3731 goto disconnect; 3732 } 3733 } 3734 3735 new_net_conf = kmalloc(sizeof(struct net_conf), GFP_KERNEL); 3736 if (!new_net_conf) 3737 goto disconnect; 3738 3739 mutex_lock(&connection->data.mutex); 3740 mutex_lock(&connection->resource->conf_update); 3741 old_net_conf = connection->net_conf; 3742 *new_net_conf = *old_net_conf; 3743 3744 new_net_conf->wire_protocol = p_proto; 3745 new_net_conf->after_sb_0p = convert_after_sb(p_after_sb_0p); 3746 new_net_conf->after_sb_1p = convert_after_sb(p_after_sb_1p); 3747 new_net_conf->after_sb_2p = convert_after_sb(p_after_sb_2p); 3748 new_net_conf->two_primaries = p_two_primaries; 3749 3750 rcu_assign_pointer(connection->net_conf, new_net_conf); 3751 mutex_unlock(&connection->resource->conf_update); 3752 mutex_unlock(&connection->data.mutex); 3753 3754 crypto_free_shash(connection->peer_integrity_tfm); 3755 kfree(connection->int_dig_in); 3756 kfree(connection->int_dig_vv); 3757 connection->peer_integrity_tfm = peer_integrity_tfm; 3758 connection->int_dig_in = int_dig_in; 3759 connection->int_dig_vv = int_dig_vv; 3760 3761 if (strcmp(old_net_conf->integrity_alg, integrity_alg)) 3762 drbd_info(connection, "peer data-integrity-alg: %s\n", 3763 integrity_alg[0] ? integrity_alg : "(none)"); 3764 3765 kvfree_rcu_mightsleep(old_net_conf); 3766 return 0; 3767 3768 disconnect_rcu_unlock: 3769 rcu_read_unlock(); 3770 disconnect: 3771 crypto_free_shash(peer_integrity_tfm); 3772 kfree(int_dig_in); 3773 kfree(int_dig_vv); 3774 conn_request_state(connection, NS(conn, C_DISCONNECTING), CS_HARD); 3775 return -EIO; 3776 } 3777 3778 /* helper function 3779 * input: alg name, feature name 3780 * return: NULL (alg name was "") 3781 * ERR_PTR(error) if something goes wrong 3782 * or the crypto hash ptr, if it worked out ok. */ 3783 static struct crypto_shash *drbd_crypto_alloc_digest_safe( 3784 const struct drbd_device *device, 3785 const char *alg, const char *name) 3786 { 3787 struct crypto_shash *tfm; 3788 3789 if (!alg[0]) 3790 return NULL; 3791 3792 tfm = crypto_alloc_shash(alg, 0, 0); 3793 if (IS_ERR(tfm)) { 3794 drbd_err(device, "Can not allocate \"%s\" as %s (reason: %ld)\n", 3795 alg, name, PTR_ERR(tfm)); 3796 return tfm; 3797 } 3798 return tfm; 3799 } 3800 3801 static int ignore_remaining_packet(struct drbd_connection *connection, struct packet_info *pi) 3802 { 3803 void *buffer = connection->data.rbuf; 3804 int size = pi->size; 3805 3806 while (size) { 3807 int s = min_t(int, size, DRBD_SOCKET_BUFFER_SIZE); 3808 s = drbd_recv(connection, buffer, s); 3809 if (s <= 0) { 3810 if (s < 0) 3811 return s; 3812 break; 3813 } 3814 size -= s; 3815 } 3816 if (size) 3817 return -EIO; 3818 return 0; 3819 } 3820 3821 /* 3822 * config_unknown_volume - device configuration command for unknown volume 3823 * 3824 * When a device is added to an existing connection, the node on which the 3825 * device is added first will send configuration commands to its peer but the 3826 * peer will not know about the device yet. It will warn and ignore these 3827 * commands. Once the device is added on the second node, the second node will 3828 * send the same device configuration commands, but in the other direction. 3829 * 3830 * (We can also end up here if drbd is misconfigured.) 3831 */ 3832 static int config_unknown_volume(struct drbd_connection *connection, struct packet_info *pi) 3833 { 3834 drbd_warn(connection, "%s packet received for volume %u, which is not configured locally\n", 3835 cmdname(pi->cmd), pi->vnr); 3836 return ignore_remaining_packet(connection, pi); 3837 } 3838 3839 static int receive_SyncParam(struct drbd_connection *connection, struct packet_info *pi) 3840 { 3841 struct drbd_peer_device *peer_device; 3842 struct drbd_device *device; 3843 struct p_rs_param_95 *p; 3844 unsigned int header_size, data_size, exp_max_sz; 3845 struct crypto_shash *verify_tfm = NULL; 3846 struct crypto_shash *csums_tfm = NULL; 3847 struct net_conf *old_net_conf, *new_net_conf = NULL; 3848 struct disk_conf *old_disk_conf = NULL, *new_disk_conf = NULL; 3849 const int apv = connection->agreed_pro_version; 3850 struct fifo_buffer *old_plan = NULL, *new_plan = NULL; 3851 unsigned int fifo_size = 0; 3852 int err; 3853 3854 peer_device = conn_peer_device(connection, pi->vnr); 3855 if (!peer_device) 3856 return config_unknown_volume(connection, pi); 3857 device = peer_device->device; 3858 3859 exp_max_sz = apv <= 87 ? sizeof(struct p_rs_param) 3860 : apv == 88 ? sizeof(struct p_rs_param) 3861 + SHARED_SECRET_MAX 3862 : apv <= 94 ? sizeof(struct p_rs_param_89) 3863 : /* apv >= 95 */ sizeof(struct p_rs_param_95); 3864 3865 if (pi->size > exp_max_sz) { 3866 drbd_err(device, "SyncParam packet too long: received %u, expected <= %u bytes\n", 3867 pi->size, exp_max_sz); 3868 return -EIO; 3869 } 3870 3871 if (apv <= 88) { 3872 header_size = sizeof(struct p_rs_param); 3873 data_size = pi->size - header_size; 3874 } else if (apv <= 94) { 3875 header_size = sizeof(struct p_rs_param_89); 3876 data_size = pi->size - header_size; 3877 D_ASSERT(device, data_size == 0); 3878 } else { 3879 header_size = sizeof(struct p_rs_param_95); 3880 data_size = pi->size - header_size; 3881 D_ASSERT(device, data_size == 0); 3882 } 3883 3884 /* initialize verify_alg and csums_alg */ 3885 p = pi->data; 3886 BUILD_BUG_ON(sizeof(p->algs) != 2 * SHARED_SECRET_MAX); 3887 memset(&p->algs, 0, sizeof(p->algs)); 3888 3889 err = drbd_recv_all(peer_device->connection, p, header_size); 3890 if (err) 3891 return err; 3892 3893 mutex_lock(&connection->resource->conf_update); 3894 old_net_conf = peer_device->connection->net_conf; 3895 if (get_ldev(device)) { 3896 new_disk_conf = kzalloc(sizeof(struct disk_conf), GFP_KERNEL); 3897 if (!new_disk_conf) { 3898 put_ldev(device); 3899 mutex_unlock(&connection->resource->conf_update); 3900 drbd_err(device, "Allocation of new disk_conf failed\n"); 3901 return -ENOMEM; 3902 } 3903 3904 old_disk_conf = device->ldev->disk_conf; 3905 *new_disk_conf = *old_disk_conf; 3906 3907 new_disk_conf->resync_rate = be32_to_cpu(p->resync_rate); 3908 } 3909 3910 if (apv >= 88) { 3911 if (apv == 88) { 3912 if (data_size > SHARED_SECRET_MAX || data_size == 0) { 3913 drbd_err(device, "verify-alg of wrong size, " 3914 "peer wants %u, accepting only up to %u byte\n", 3915 data_size, SHARED_SECRET_MAX); 3916 goto reconnect; 3917 } 3918 3919 err = drbd_recv_all(peer_device->connection, p->verify_alg, data_size); 3920 if (err) 3921 goto reconnect; 3922 /* we expect NUL terminated string */ 3923 /* but just in case someone tries to be evil */ 3924 D_ASSERT(device, p->verify_alg[data_size-1] == 0); 3925 p->verify_alg[data_size-1] = 0; 3926 3927 } else /* apv >= 89 */ { 3928 /* we still expect NUL terminated strings */ 3929 /* but just in case someone tries to be evil */ 3930 D_ASSERT(device, p->verify_alg[SHARED_SECRET_MAX-1] == 0); 3931 D_ASSERT(device, p->csums_alg[SHARED_SECRET_MAX-1] == 0); 3932 p->verify_alg[SHARED_SECRET_MAX-1] = 0; 3933 p->csums_alg[SHARED_SECRET_MAX-1] = 0; 3934 } 3935 3936 if (strcmp(old_net_conf->verify_alg, p->verify_alg)) { 3937 if (device->state.conn == C_WF_REPORT_PARAMS) { 3938 drbd_err(device, "Different verify-alg settings. me=\"%s\" peer=\"%s\"\n", 3939 old_net_conf->verify_alg, p->verify_alg); 3940 goto disconnect; 3941 } 3942 verify_tfm = drbd_crypto_alloc_digest_safe(device, 3943 p->verify_alg, "verify-alg"); 3944 if (IS_ERR(verify_tfm)) { 3945 verify_tfm = NULL; 3946 goto disconnect; 3947 } 3948 } 3949 3950 if (apv >= 89 && strcmp(old_net_conf->csums_alg, p->csums_alg)) { 3951 if (device->state.conn == C_WF_REPORT_PARAMS) { 3952 drbd_err(device, "Different csums-alg settings. me=\"%s\" peer=\"%s\"\n", 3953 old_net_conf->csums_alg, p->csums_alg); 3954 goto disconnect; 3955 } 3956 csums_tfm = drbd_crypto_alloc_digest_safe(device, 3957 p->csums_alg, "csums-alg"); 3958 if (IS_ERR(csums_tfm)) { 3959 csums_tfm = NULL; 3960 goto disconnect; 3961 } 3962 } 3963 3964 if (apv > 94 && new_disk_conf) { 3965 new_disk_conf->c_plan_ahead = be32_to_cpu(p->c_plan_ahead); 3966 new_disk_conf->c_delay_target = be32_to_cpu(p->c_delay_target); 3967 new_disk_conf->c_fill_target = be32_to_cpu(p->c_fill_target); 3968 new_disk_conf->c_max_rate = be32_to_cpu(p->c_max_rate); 3969 3970 fifo_size = (new_disk_conf->c_plan_ahead * 10 * SLEEP_TIME) / HZ; 3971 if (fifo_size != device->rs_plan_s->size) { 3972 new_plan = fifo_alloc(fifo_size); 3973 if (!new_plan) { 3974 drbd_err(device, "kmalloc of fifo_buffer failed"); 3975 put_ldev(device); 3976 goto disconnect; 3977 } 3978 } 3979 } 3980 3981 if (verify_tfm || csums_tfm) { 3982 new_net_conf = kzalloc(sizeof(struct net_conf), GFP_KERNEL); 3983 if (!new_net_conf) 3984 goto disconnect; 3985 3986 *new_net_conf = *old_net_conf; 3987 3988 if (verify_tfm) { 3989 strcpy(new_net_conf->verify_alg, p->verify_alg); 3990 new_net_conf->verify_alg_len = strlen(p->verify_alg) + 1; 3991 crypto_free_shash(peer_device->connection->verify_tfm); 3992 peer_device->connection->verify_tfm = verify_tfm; 3993 drbd_info(device, "using verify-alg: \"%s\"\n", p->verify_alg); 3994 } 3995 if (csums_tfm) { 3996 strcpy(new_net_conf->csums_alg, p->csums_alg); 3997 new_net_conf->csums_alg_len = strlen(p->csums_alg) + 1; 3998 crypto_free_shash(peer_device->connection->csums_tfm); 3999 peer_device->connection->csums_tfm = csums_tfm; 4000 drbd_info(device, "using csums-alg: \"%s\"\n", p->csums_alg); 4001 } 4002 rcu_assign_pointer(connection->net_conf, new_net_conf); 4003 } 4004 } 4005 4006 if (new_disk_conf) { 4007 rcu_assign_pointer(device->ldev->disk_conf, new_disk_conf); 4008 put_ldev(device); 4009 } 4010 4011 if (new_plan) { 4012 old_plan = device->rs_plan_s; 4013 rcu_assign_pointer(device->rs_plan_s, new_plan); 4014 } 4015 4016 mutex_unlock(&connection->resource->conf_update); 4017 synchronize_rcu(); 4018 if (new_net_conf) 4019 kfree(old_net_conf); 4020 kfree(old_disk_conf); 4021 kfree(old_plan); 4022 4023 return 0; 4024 4025 reconnect: 4026 if (new_disk_conf) { 4027 put_ldev(device); 4028 kfree(new_disk_conf); 4029 } 4030 mutex_unlock(&connection->resource->conf_update); 4031 return -EIO; 4032 4033 disconnect: 4034 kfree(new_plan); 4035 if (new_disk_conf) { 4036 put_ldev(device); 4037 kfree(new_disk_conf); 4038 } 4039 mutex_unlock(&connection->resource->conf_update); 4040 /* just for completeness: actually not needed, 4041 * as this is not reached if csums_tfm was ok. */ 4042 crypto_free_shash(csums_tfm); 4043 /* but free the verify_tfm again, if csums_tfm did not work out */ 4044 crypto_free_shash(verify_tfm); 4045 conn_request_state(peer_device->connection, NS(conn, C_DISCONNECTING), CS_HARD); 4046 return -EIO; 4047 } 4048 4049 /* warn if the arguments differ by more than 12.5% */ 4050 static void warn_if_differ_considerably(struct drbd_device *device, 4051 const char *s, sector_t a, sector_t b) 4052 { 4053 sector_t d; 4054 if (a == 0 || b == 0) 4055 return; 4056 d = (a > b) ? (a - b) : (b - a); 4057 if (d > (a>>3) || d > (b>>3)) 4058 drbd_warn(device, "Considerable difference in %s: %llus vs. %llus\n", s, 4059 (unsigned long long)a, (unsigned long long)b); 4060 } 4061 4062 static int receive_sizes(struct drbd_connection *connection, struct packet_info *pi) 4063 { 4064 struct drbd_peer_device *peer_device; 4065 struct drbd_device *device; 4066 struct p_sizes *p = pi->data; 4067 struct o_qlim *o = (connection->agreed_features & DRBD_FF_WSAME) ? p->qlim : NULL; 4068 enum determine_dev_size dd = DS_UNCHANGED; 4069 sector_t p_size, p_usize, p_csize, my_usize; 4070 sector_t new_size, cur_size; 4071 int ldsc = 0; /* local disk size changed */ 4072 enum dds_flags ddsf; 4073 4074 peer_device = conn_peer_device(connection, pi->vnr); 4075 if (!peer_device) 4076 return config_unknown_volume(connection, pi); 4077 device = peer_device->device; 4078 cur_size = get_capacity(device->vdisk); 4079 4080 p_size = be64_to_cpu(p->d_size); 4081 p_usize = be64_to_cpu(p->u_size); 4082 p_csize = be64_to_cpu(p->c_size); 4083 4084 /* just store the peer's disk size for now. 4085 * we still need to figure out whether we accept that. */ 4086 device->p_size = p_size; 4087 4088 if (get_ldev(device)) { 4089 rcu_read_lock(); 4090 my_usize = rcu_dereference(device->ldev->disk_conf)->disk_size; 4091 rcu_read_unlock(); 4092 4093 warn_if_differ_considerably(device, "lower level device sizes", 4094 p_size, drbd_get_max_capacity(device->ldev)); 4095 warn_if_differ_considerably(device, "user requested size", 4096 p_usize, my_usize); 4097 4098 /* if this is the first connect, or an otherwise expected 4099 * param exchange, choose the minimum */ 4100 if (device->state.conn == C_WF_REPORT_PARAMS) 4101 p_usize = min_not_zero(my_usize, p_usize); 4102 4103 /* Never shrink a device with usable data during connect, 4104 * or "attach" on the peer. 4105 * But allow online shrinking if we are connected. */ 4106 new_size = drbd_new_dev_size(device, device->ldev, p_usize, 0); 4107 if (new_size < cur_size && 4108 device->state.disk >= D_OUTDATED && 4109 (device->state.conn < C_CONNECTED || device->state.pdsk == D_DISKLESS)) { 4110 drbd_err(device, "The peer's disk size is too small! (%llu < %llu sectors)\n", 4111 (unsigned long long)new_size, (unsigned long long)cur_size); 4112 conn_request_state(peer_device->connection, NS(conn, C_DISCONNECTING), CS_HARD); 4113 put_ldev(device); 4114 return -EIO; 4115 } 4116 4117 if (my_usize != p_usize) { 4118 struct disk_conf *old_disk_conf, *new_disk_conf = NULL; 4119 4120 new_disk_conf = kzalloc(sizeof(struct disk_conf), GFP_KERNEL); 4121 if (!new_disk_conf) { 4122 put_ldev(device); 4123 return -ENOMEM; 4124 } 4125 4126 mutex_lock(&connection->resource->conf_update); 4127 old_disk_conf = device->ldev->disk_conf; 4128 *new_disk_conf = *old_disk_conf; 4129 new_disk_conf->disk_size = p_usize; 4130 4131 rcu_assign_pointer(device->ldev->disk_conf, new_disk_conf); 4132 mutex_unlock(&connection->resource->conf_update); 4133 kvfree_rcu_mightsleep(old_disk_conf); 4134 4135 drbd_info(device, "Peer sets u_size to %lu sectors (old: %lu)\n", 4136 (unsigned long)p_usize, (unsigned long)my_usize); 4137 } 4138 4139 put_ldev(device); 4140 } 4141 4142 device->peer_max_bio_size = be32_to_cpu(p->max_bio_size); 4143 /* Leave drbd_reconsider_queue_parameters() before drbd_determine_dev_size(). 4144 In case we cleared the QUEUE_FLAG_DISCARD from our queue in 4145 drbd_reconsider_queue_parameters(), we can be sure that after 4146 drbd_determine_dev_size() no REQ_DISCARDs are in the queue. */ 4147 4148 ddsf = be16_to_cpu(p->dds_flags); 4149 if (get_ldev(device)) { 4150 drbd_reconsider_queue_parameters(device, device->ldev, o); 4151 dd = drbd_determine_dev_size(device, ddsf, NULL); 4152 put_ldev(device); 4153 if (dd == DS_ERROR) 4154 return -EIO; 4155 drbd_md_sync(device); 4156 } else { 4157 /* 4158 * I am diskless, need to accept the peer's *current* size. 4159 * I must NOT accept the peers backing disk size, 4160 * it may have been larger than mine all along... 4161 * 4162 * At this point, the peer knows more about my disk, or at 4163 * least about what we last agreed upon, than myself. 4164 * So if his c_size is less than his d_size, the most likely 4165 * reason is that *my* d_size was smaller last time we checked. 4166 * 4167 * However, if he sends a zero current size, 4168 * take his (user-capped or) backing disk size anyways. 4169 * 4170 * Unless of course he does not have a disk himself. 4171 * In which case we ignore this completely. 4172 */ 4173 sector_t new_size = p_csize ?: p_usize ?: p_size; 4174 drbd_reconsider_queue_parameters(device, NULL, o); 4175 if (new_size == 0) { 4176 /* Ignore, peer does not know nothing. */ 4177 } else if (new_size == cur_size) { 4178 /* nothing to do */ 4179 } else if (cur_size != 0 && p_size == 0) { 4180 drbd_warn(device, "Ignored diskless peer device size (peer:%llu != me:%llu sectors)!\n", 4181 (unsigned long long)new_size, (unsigned long long)cur_size); 4182 } else if (new_size < cur_size && device->state.role == R_PRIMARY) { 4183 drbd_err(device, "The peer's device size is too small! (%llu < %llu sectors); demote me first!\n", 4184 (unsigned long long)new_size, (unsigned long long)cur_size); 4185 conn_request_state(peer_device->connection, NS(conn, C_DISCONNECTING), CS_HARD); 4186 return -EIO; 4187 } else { 4188 /* I believe the peer, if 4189 * - I don't have a current size myself 4190 * - we agree on the size anyways 4191 * - I do have a current size, am Secondary, 4192 * and he has the only disk 4193 * - I do have a current size, am Primary, 4194 * and he has the only disk, 4195 * which is larger than my current size 4196 */ 4197 drbd_set_my_capacity(device, new_size); 4198 } 4199 } 4200 4201 if (get_ldev(device)) { 4202 if (device->ldev->known_size != drbd_get_capacity(device->ldev->backing_bdev)) { 4203 device->ldev->known_size = drbd_get_capacity(device->ldev->backing_bdev); 4204 ldsc = 1; 4205 } 4206 4207 put_ldev(device); 4208 } 4209 4210 if (device->state.conn > C_WF_REPORT_PARAMS) { 4211 if (be64_to_cpu(p->c_size) != get_capacity(device->vdisk) || 4212 ldsc) { 4213 /* we have different sizes, probably peer 4214 * needs to know my new size... */ 4215 drbd_send_sizes(peer_device, 0, ddsf); 4216 } 4217 if (test_and_clear_bit(RESIZE_PENDING, &device->flags) || 4218 (dd == DS_GREW && device->state.conn == C_CONNECTED)) { 4219 if (device->state.pdsk >= D_INCONSISTENT && 4220 device->state.disk >= D_INCONSISTENT) { 4221 if (ddsf & DDSF_NO_RESYNC) 4222 drbd_info(device, "Resync of new storage suppressed with --assume-clean\n"); 4223 else 4224 resync_after_online_grow(device); 4225 } else 4226 set_bit(RESYNC_AFTER_NEG, &device->flags); 4227 } 4228 } 4229 4230 return 0; 4231 } 4232 4233 static int receive_uuids(struct drbd_connection *connection, struct packet_info *pi) 4234 { 4235 struct drbd_peer_device *peer_device; 4236 struct drbd_device *device; 4237 struct p_uuids *p = pi->data; 4238 u64 *p_uuid; 4239 int i, updated_uuids = 0; 4240 4241 peer_device = conn_peer_device(connection, pi->vnr); 4242 if (!peer_device) 4243 return config_unknown_volume(connection, pi); 4244 device = peer_device->device; 4245 4246 p_uuid = kmalloc_array(UI_EXTENDED_SIZE, sizeof(*p_uuid), GFP_NOIO); 4247 if (!p_uuid) 4248 return false; 4249 4250 for (i = UI_CURRENT; i < UI_EXTENDED_SIZE; i++) 4251 p_uuid[i] = be64_to_cpu(p->uuid[i]); 4252 4253 kfree(device->p_uuid); 4254 device->p_uuid = p_uuid; 4255 4256 if ((device->state.conn < C_CONNECTED || device->state.pdsk == D_DISKLESS) && 4257 device->state.disk < D_INCONSISTENT && 4258 device->state.role == R_PRIMARY && 4259 (device->ed_uuid & ~((u64)1)) != (p_uuid[UI_CURRENT] & ~((u64)1))) { 4260 drbd_err(device, "Can only connect to data with current UUID=%016llX\n", 4261 (unsigned long long)device->ed_uuid); 4262 conn_request_state(peer_device->connection, NS(conn, C_DISCONNECTING), CS_HARD); 4263 return -EIO; 4264 } 4265 4266 if (get_ldev(device)) { 4267 int skip_initial_sync = 4268 device->state.conn == C_CONNECTED && 4269 peer_device->connection->agreed_pro_version >= 90 && 4270 device->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED && 4271 (p_uuid[UI_FLAGS] & 8); 4272 if (skip_initial_sync) { 4273 drbd_info(device, "Accepted new current UUID, preparing to skip initial sync\n"); 4274 drbd_bitmap_io(device, &drbd_bmio_clear_n_write, 4275 "clear_n_write from receive_uuids", 4276 BM_LOCKED_TEST_ALLOWED, NULL); 4277 _drbd_uuid_set(device, UI_CURRENT, p_uuid[UI_CURRENT]); 4278 _drbd_uuid_set(device, UI_BITMAP, 0); 4279 _drbd_set_state(_NS2(device, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE), 4280 CS_VERBOSE, NULL); 4281 drbd_md_sync(device); 4282 updated_uuids = 1; 4283 } 4284 put_ldev(device); 4285 } else if (device->state.disk < D_INCONSISTENT && 4286 device->state.role == R_PRIMARY) { 4287 /* I am a diskless primary, the peer just created a new current UUID 4288 for me. */ 4289 updated_uuids = drbd_set_ed_uuid(device, p_uuid[UI_CURRENT]); 4290 } 4291 4292 /* Before we test for the disk state, we should wait until an eventually 4293 ongoing cluster wide state change is finished. That is important if 4294 we are primary and are detaching from our disk. We need to see the 4295 new disk state... */ 4296 mutex_lock(device->state_mutex); 4297 mutex_unlock(device->state_mutex); 4298 if (device->state.conn >= C_CONNECTED && device->state.disk < D_INCONSISTENT) 4299 updated_uuids |= drbd_set_ed_uuid(device, p_uuid[UI_CURRENT]); 4300 4301 if (updated_uuids) 4302 drbd_print_uuids(device, "receiver updated UUIDs to"); 4303 4304 return 0; 4305 } 4306 4307 /** 4308 * convert_state() - Converts the peer's view of the cluster state to our point of view 4309 * @ps: The state as seen by the peer. 4310 */ 4311 static union drbd_state convert_state(union drbd_state ps) 4312 { 4313 union drbd_state ms; 4314 4315 static enum drbd_conns c_tab[] = { 4316 [C_WF_REPORT_PARAMS] = C_WF_REPORT_PARAMS, 4317 [C_CONNECTED] = C_CONNECTED, 4318 4319 [C_STARTING_SYNC_S] = C_STARTING_SYNC_T, 4320 [C_STARTING_SYNC_T] = C_STARTING_SYNC_S, 4321 [C_DISCONNECTING] = C_TEAR_DOWN, /* C_NETWORK_FAILURE, */ 4322 [C_VERIFY_S] = C_VERIFY_T, 4323 [C_MASK] = C_MASK, 4324 }; 4325 4326 ms.i = ps.i; 4327 4328 ms.conn = c_tab[ps.conn]; 4329 ms.peer = ps.role; 4330 ms.role = ps.peer; 4331 ms.pdsk = ps.disk; 4332 ms.disk = ps.pdsk; 4333 ms.peer_isp = (ps.aftr_isp | ps.user_isp); 4334 4335 return ms; 4336 } 4337 4338 static int receive_req_state(struct drbd_connection *connection, struct packet_info *pi) 4339 { 4340 struct drbd_peer_device *peer_device; 4341 struct drbd_device *device; 4342 struct p_req_state *p = pi->data; 4343 union drbd_state mask, val; 4344 enum drbd_state_rv rv; 4345 4346 peer_device = conn_peer_device(connection, pi->vnr); 4347 if (!peer_device) 4348 return -EIO; 4349 device = peer_device->device; 4350 4351 mask.i = be32_to_cpu(p->mask); 4352 val.i = be32_to_cpu(p->val); 4353 4354 if (test_bit(RESOLVE_CONFLICTS, &peer_device->connection->flags) && 4355 mutex_is_locked(device->state_mutex)) { 4356 drbd_send_sr_reply(peer_device, SS_CONCURRENT_ST_CHG); 4357 return 0; 4358 } 4359 4360 mask = convert_state(mask); 4361 val = convert_state(val); 4362 4363 rv = drbd_change_state(device, CS_VERBOSE, mask, val); 4364 drbd_send_sr_reply(peer_device, rv); 4365 4366 drbd_md_sync(device); 4367 4368 return 0; 4369 } 4370 4371 static int receive_req_conn_state(struct drbd_connection *connection, struct packet_info *pi) 4372 { 4373 struct p_req_state *p = pi->data; 4374 union drbd_state mask, val; 4375 enum drbd_state_rv rv; 4376 4377 mask.i = be32_to_cpu(p->mask); 4378 val.i = be32_to_cpu(p->val); 4379 4380 if (test_bit(RESOLVE_CONFLICTS, &connection->flags) && 4381 mutex_is_locked(&connection->cstate_mutex)) { 4382 conn_send_sr_reply(connection, SS_CONCURRENT_ST_CHG); 4383 return 0; 4384 } 4385 4386 mask = convert_state(mask); 4387 val = convert_state(val); 4388 4389 rv = conn_request_state(connection, mask, val, CS_VERBOSE | CS_LOCAL_ONLY | CS_IGN_OUTD_FAIL); 4390 conn_send_sr_reply(connection, rv); 4391 4392 return 0; 4393 } 4394 4395 static int receive_state(struct drbd_connection *connection, struct packet_info *pi) 4396 { 4397 struct drbd_peer_device *peer_device; 4398 struct drbd_device *device; 4399 struct p_state *p = pi->data; 4400 union drbd_state os, ns, peer_state; 4401 enum drbd_disk_state real_peer_disk; 4402 enum chg_state_flags cs_flags; 4403 int rv; 4404 4405 peer_device = conn_peer_device(connection, pi->vnr); 4406 if (!peer_device) 4407 return config_unknown_volume(connection, pi); 4408 device = peer_device->device; 4409 4410 peer_state.i = be32_to_cpu(p->state); 4411 4412 real_peer_disk = peer_state.disk; 4413 if (peer_state.disk == D_NEGOTIATING) { 4414 real_peer_disk = device->p_uuid[UI_FLAGS] & 4 ? D_INCONSISTENT : D_CONSISTENT; 4415 drbd_info(device, "real peer disk state = %s\n", drbd_disk_str(real_peer_disk)); 4416 } 4417 4418 spin_lock_irq(&device->resource->req_lock); 4419 retry: 4420 os = ns = drbd_read_state(device); 4421 spin_unlock_irq(&device->resource->req_lock); 4422 4423 /* If some other part of the code (ack_receiver thread, timeout) 4424 * already decided to close the connection again, 4425 * we must not "re-establish" it here. */ 4426 if (os.conn <= C_TEAR_DOWN) 4427 return -ECONNRESET; 4428 4429 /* If this is the "end of sync" confirmation, usually the peer disk 4430 * transitions from D_INCONSISTENT to D_UP_TO_DATE. For empty (0 bits 4431 * set) resync started in PausedSyncT, or if the timing of pause-/ 4432 * unpause-sync events has been "just right", the peer disk may 4433 * transition from D_CONSISTENT to D_UP_TO_DATE as well. 4434 */ 4435 if ((os.pdsk == D_INCONSISTENT || os.pdsk == D_CONSISTENT) && 4436 real_peer_disk == D_UP_TO_DATE && 4437 os.conn > C_CONNECTED && os.disk == D_UP_TO_DATE) { 4438 /* If we are (becoming) SyncSource, but peer is still in sync 4439 * preparation, ignore its uptodate-ness to avoid flapping, it 4440 * will change to inconsistent once the peer reaches active 4441 * syncing states. 4442 * It may have changed syncer-paused flags, however, so we 4443 * cannot ignore this completely. */ 4444 if (peer_state.conn > C_CONNECTED && 4445 peer_state.conn < C_SYNC_SOURCE) 4446 real_peer_disk = D_INCONSISTENT; 4447 4448 /* if peer_state changes to connected at the same time, 4449 * it explicitly notifies us that it finished resync. 4450 * Maybe we should finish it up, too? */ 4451 else if (os.conn >= C_SYNC_SOURCE && 4452 peer_state.conn == C_CONNECTED) { 4453 if (drbd_bm_total_weight(device) <= device->rs_failed) 4454 drbd_resync_finished(peer_device); 4455 return 0; 4456 } 4457 } 4458 4459 /* explicit verify finished notification, stop sector reached. */ 4460 if (os.conn == C_VERIFY_T && os.disk == D_UP_TO_DATE && 4461 peer_state.conn == C_CONNECTED && real_peer_disk == D_UP_TO_DATE) { 4462 ov_out_of_sync_print(peer_device); 4463 drbd_resync_finished(peer_device); 4464 return 0; 4465 } 4466 4467 /* peer says his disk is inconsistent, while we think it is uptodate, 4468 * and this happens while the peer still thinks we have a sync going on, 4469 * but we think we are already done with the sync. 4470 * We ignore this to avoid flapping pdsk. 4471 * This should not happen, if the peer is a recent version of drbd. */ 4472 if (os.pdsk == D_UP_TO_DATE && real_peer_disk == D_INCONSISTENT && 4473 os.conn == C_CONNECTED && peer_state.conn > C_SYNC_SOURCE) 4474 real_peer_disk = D_UP_TO_DATE; 4475 4476 if (ns.conn == C_WF_REPORT_PARAMS) 4477 ns.conn = C_CONNECTED; 4478 4479 if (peer_state.conn == C_AHEAD) 4480 ns.conn = C_BEHIND; 4481 4482 /* TODO: 4483 * if (primary and diskless and peer uuid != effective uuid) 4484 * abort attach on peer; 4485 * 4486 * If this node does not have good data, was already connected, but 4487 * the peer did a late attach only now, trying to "negotiate" with me, 4488 * AND I am currently Primary, possibly frozen, with some specific 4489 * "effective" uuid, this should never be reached, really, because 4490 * we first send the uuids, then the current state. 4491 * 4492 * In this scenario, we already dropped the connection hard 4493 * when we received the unsuitable uuids (receive_uuids(). 4494 * 4495 * Should we want to change this, that is: not drop the connection in 4496 * receive_uuids() already, then we would need to add a branch here 4497 * that aborts the attach of "unsuitable uuids" on the peer in case 4498 * this node is currently Diskless Primary. 4499 */ 4500 4501 if (device->p_uuid && peer_state.disk >= D_NEGOTIATING && 4502 get_ldev_if_state(device, D_NEGOTIATING)) { 4503 int cr; /* consider resync */ 4504 4505 /* if we established a new connection */ 4506 cr = (os.conn < C_CONNECTED); 4507 /* if we had an established connection 4508 * and one of the nodes newly attaches a disk */ 4509 cr |= (os.conn == C_CONNECTED && 4510 (peer_state.disk == D_NEGOTIATING || 4511 os.disk == D_NEGOTIATING)); 4512 /* if we have both been inconsistent, and the peer has been 4513 * forced to be UpToDate with --force */ 4514 cr |= test_bit(CONSIDER_RESYNC, &device->flags); 4515 /* if we had been plain connected, and the admin requested to 4516 * start a sync by "invalidate" or "invalidate-remote" */ 4517 cr |= (os.conn == C_CONNECTED && 4518 (peer_state.conn >= C_STARTING_SYNC_S && 4519 peer_state.conn <= C_WF_BITMAP_T)); 4520 4521 if (cr) 4522 ns.conn = drbd_sync_handshake(peer_device, peer_state.role, real_peer_disk); 4523 4524 put_ldev(device); 4525 if (ns.conn == C_MASK) { 4526 ns.conn = C_CONNECTED; 4527 if (device->state.disk == D_NEGOTIATING) { 4528 drbd_force_state(device, NS(disk, D_FAILED)); 4529 } else if (peer_state.disk == D_NEGOTIATING) { 4530 drbd_err(device, "Disk attach process on the peer node was aborted.\n"); 4531 peer_state.disk = D_DISKLESS; 4532 real_peer_disk = D_DISKLESS; 4533 } else { 4534 if (test_and_clear_bit(CONN_DRY_RUN, &peer_device->connection->flags)) 4535 return -EIO; 4536 D_ASSERT(device, os.conn == C_WF_REPORT_PARAMS); 4537 conn_request_state(peer_device->connection, NS(conn, C_DISCONNECTING), CS_HARD); 4538 return -EIO; 4539 } 4540 } 4541 } 4542 4543 spin_lock_irq(&device->resource->req_lock); 4544 if (os.i != drbd_read_state(device).i) 4545 goto retry; 4546 clear_bit(CONSIDER_RESYNC, &device->flags); 4547 ns.peer = peer_state.role; 4548 ns.pdsk = real_peer_disk; 4549 ns.peer_isp = (peer_state.aftr_isp | peer_state.user_isp); 4550 if ((ns.conn == C_CONNECTED || ns.conn == C_WF_BITMAP_S) && ns.disk == D_NEGOTIATING) 4551 ns.disk = device->new_state_tmp.disk; 4552 cs_flags = CS_VERBOSE + (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED ? 0 : CS_HARD); 4553 if (ns.pdsk == D_CONSISTENT && drbd_suspended(device) && ns.conn == C_CONNECTED && os.conn < C_CONNECTED && 4554 test_bit(NEW_CUR_UUID, &device->flags)) { 4555 /* Do not allow tl_restart(RESEND) for a rebooted peer. We can only allow this 4556 for temporal network outages! */ 4557 spin_unlock_irq(&device->resource->req_lock); 4558 drbd_err(device, "Aborting Connect, can not thaw IO with an only Consistent peer\n"); 4559 tl_clear(peer_device->connection); 4560 drbd_uuid_new_current(device); 4561 clear_bit(NEW_CUR_UUID, &device->flags); 4562 conn_request_state(peer_device->connection, NS2(conn, C_PROTOCOL_ERROR, susp, 0), CS_HARD); 4563 return -EIO; 4564 } 4565 rv = _drbd_set_state(device, ns, cs_flags, NULL); 4566 ns = drbd_read_state(device); 4567 spin_unlock_irq(&device->resource->req_lock); 4568 4569 if (rv < SS_SUCCESS) { 4570 conn_request_state(peer_device->connection, NS(conn, C_DISCONNECTING), CS_HARD); 4571 return -EIO; 4572 } 4573 4574 if (os.conn > C_WF_REPORT_PARAMS) { 4575 if (ns.conn > C_CONNECTED && peer_state.conn <= C_CONNECTED && 4576 peer_state.disk != D_NEGOTIATING ) { 4577 /* we want resync, peer has not yet decided to sync... */ 4578 /* Nowadays only used when forcing a node into primary role and 4579 setting its disk to UpToDate with that */ 4580 drbd_send_uuids(peer_device); 4581 drbd_send_current_state(peer_device); 4582 } 4583 } 4584 4585 clear_bit(DISCARD_MY_DATA, &device->flags); 4586 4587 drbd_md_sync(device); /* update connected indicator, la_size_sect, ... */ 4588 4589 return 0; 4590 } 4591 4592 static int receive_sync_uuid(struct drbd_connection *connection, struct packet_info *pi) 4593 { 4594 struct drbd_peer_device *peer_device; 4595 struct drbd_device *device; 4596 struct p_rs_uuid *p = pi->data; 4597 4598 peer_device = conn_peer_device(connection, pi->vnr); 4599 if (!peer_device) 4600 return -EIO; 4601 device = peer_device->device; 4602 4603 wait_event(device->misc_wait, 4604 device->state.conn == C_WF_SYNC_UUID || 4605 device->state.conn == C_BEHIND || 4606 device->state.conn < C_CONNECTED || 4607 device->state.disk < D_NEGOTIATING); 4608 4609 /* D_ASSERT(device, device->state.conn == C_WF_SYNC_UUID ); */ 4610 4611 /* Here the _drbd_uuid_ functions are right, current should 4612 _not_ be rotated into the history */ 4613 if (get_ldev_if_state(device, D_NEGOTIATING)) { 4614 _drbd_uuid_set(device, UI_CURRENT, be64_to_cpu(p->uuid)); 4615 _drbd_uuid_set(device, UI_BITMAP, 0UL); 4616 4617 drbd_print_uuids(device, "updated sync uuid"); 4618 drbd_start_resync(device, C_SYNC_TARGET); 4619 4620 put_ldev(device); 4621 } else 4622 drbd_err(device, "Ignoring SyncUUID packet!\n"); 4623 4624 return 0; 4625 } 4626 4627 /* 4628 * receive_bitmap_plain 4629 * 4630 * Return 0 when done, 1 when another iteration is needed, and a negative error 4631 * code upon failure. 4632 */ 4633 static int 4634 receive_bitmap_plain(struct drbd_peer_device *peer_device, unsigned int size, 4635 unsigned long *p, struct bm_xfer_ctx *c) 4636 { 4637 unsigned int data_size = DRBD_SOCKET_BUFFER_SIZE - 4638 drbd_header_size(peer_device->connection); 4639 unsigned int num_words = min_t(size_t, data_size / sizeof(*p), 4640 c->bm_words - c->word_offset); 4641 unsigned int want = num_words * sizeof(*p); 4642 int err; 4643 4644 if (want != size) { 4645 drbd_err(peer_device, "%s:want (%u) != size (%u)\n", __func__, want, size); 4646 return -EIO; 4647 } 4648 if (want == 0) 4649 return 0; 4650 err = drbd_recv_all(peer_device->connection, p, want); 4651 if (err) 4652 return err; 4653 4654 drbd_bm_merge_lel(peer_device->device, c->word_offset, num_words, p); 4655 4656 c->word_offset += num_words; 4657 c->bit_offset = c->word_offset * BITS_PER_LONG; 4658 if (c->bit_offset > c->bm_bits) 4659 c->bit_offset = c->bm_bits; 4660 4661 return 1; 4662 } 4663 4664 static enum drbd_bitmap_code dcbp_get_code(struct p_compressed_bm *p) 4665 { 4666 return (enum drbd_bitmap_code)(p->encoding & 0x0f); 4667 } 4668 4669 static int dcbp_get_start(struct p_compressed_bm *p) 4670 { 4671 return (p->encoding & 0x80) != 0; 4672 } 4673 4674 static int dcbp_get_pad_bits(struct p_compressed_bm *p) 4675 { 4676 return (p->encoding >> 4) & 0x7; 4677 } 4678 4679 /* 4680 * recv_bm_rle_bits 4681 * 4682 * Return 0 when done, 1 when another iteration is needed, and a negative error 4683 * code upon failure. 4684 */ 4685 static int 4686 recv_bm_rle_bits(struct drbd_peer_device *peer_device, 4687 struct p_compressed_bm *p, 4688 struct bm_xfer_ctx *c, 4689 unsigned int len) 4690 { 4691 struct bitstream bs; 4692 u64 look_ahead; 4693 u64 rl; 4694 u64 tmp; 4695 unsigned long s = c->bit_offset; 4696 unsigned long e; 4697 int toggle = dcbp_get_start(p); 4698 int have; 4699 int bits; 4700 4701 bitstream_init(&bs, p->code, len, dcbp_get_pad_bits(p)); 4702 4703 bits = bitstream_get_bits(&bs, &look_ahead, 64); 4704 if (bits < 0) 4705 return -EIO; 4706 4707 for (have = bits; have > 0; s += rl, toggle = !toggle) { 4708 bits = vli_decode_bits(&rl, look_ahead); 4709 if (bits <= 0) 4710 return -EIO; 4711 4712 if (toggle) { 4713 e = s + rl -1; 4714 if (e >= c->bm_bits) { 4715 drbd_err(peer_device, "bitmap overflow (e:%lu) while decoding bm RLE packet\n", e); 4716 return -EIO; 4717 } 4718 _drbd_bm_set_bits(peer_device->device, s, e); 4719 } 4720 4721 if (have < bits) { 4722 drbd_err(peer_device, "bitmap decoding error: h:%d b:%d la:0x%08llx l:%u/%u\n", 4723 have, bits, look_ahead, 4724 (unsigned int)(bs.cur.b - p->code), 4725 (unsigned int)bs.buf_len); 4726 return -EIO; 4727 } 4728 /* if we consumed all 64 bits, assign 0; >> 64 is "undefined"; */ 4729 if (likely(bits < 64)) 4730 look_ahead >>= bits; 4731 else 4732 look_ahead = 0; 4733 have -= bits; 4734 4735 bits = bitstream_get_bits(&bs, &tmp, 64 - have); 4736 if (bits < 0) 4737 return -EIO; 4738 look_ahead |= tmp << have; 4739 have += bits; 4740 } 4741 4742 c->bit_offset = s; 4743 bm_xfer_ctx_bit_to_word_offset(c); 4744 4745 return (s != c->bm_bits); 4746 } 4747 4748 /* 4749 * decode_bitmap_c 4750 * 4751 * Return 0 when done, 1 when another iteration is needed, and a negative error 4752 * code upon failure. 4753 */ 4754 static int 4755 decode_bitmap_c(struct drbd_peer_device *peer_device, 4756 struct p_compressed_bm *p, 4757 struct bm_xfer_ctx *c, 4758 unsigned int len) 4759 { 4760 if (dcbp_get_code(p) == RLE_VLI_Bits) 4761 return recv_bm_rle_bits(peer_device, p, c, len - sizeof(*p)); 4762 4763 /* other variants had been implemented for evaluation, 4764 * but have been dropped as this one turned out to be "best" 4765 * during all our tests. */ 4766 4767 drbd_err(peer_device, "receive_bitmap_c: unknown encoding %u\n", p->encoding); 4768 conn_request_state(peer_device->connection, NS(conn, C_PROTOCOL_ERROR), CS_HARD); 4769 return -EIO; 4770 } 4771 4772 void INFO_bm_xfer_stats(struct drbd_peer_device *peer_device, 4773 const char *direction, struct bm_xfer_ctx *c) 4774 { 4775 /* what would it take to transfer it "plaintext" */ 4776 unsigned int header_size = drbd_header_size(peer_device->connection); 4777 unsigned int data_size = DRBD_SOCKET_BUFFER_SIZE - header_size; 4778 unsigned int plain = 4779 header_size * (DIV_ROUND_UP(c->bm_words, data_size) + 1) + 4780 c->bm_words * sizeof(unsigned long); 4781 unsigned int total = c->bytes[0] + c->bytes[1]; 4782 unsigned int r; 4783 4784 /* total can not be zero. but just in case: */ 4785 if (total == 0) 4786 return; 4787 4788 /* don't report if not compressed */ 4789 if (total >= plain) 4790 return; 4791 4792 /* total < plain. check for overflow, still */ 4793 r = (total > UINT_MAX/1000) ? (total / (plain/1000)) 4794 : (1000 * total / plain); 4795 4796 if (r > 1000) 4797 r = 1000; 4798 4799 r = 1000 - r; 4800 drbd_info(peer_device, "%s bitmap stats [Bytes(packets)]: plain %u(%u), RLE %u(%u), " 4801 "total %u; compression: %u.%u%%\n", 4802 direction, 4803 c->bytes[1], c->packets[1], 4804 c->bytes[0], c->packets[0], 4805 total, r/10, r % 10); 4806 } 4807 4808 /* Since we are processing the bitfield from lower addresses to higher, 4809 it does not matter if the process it in 32 bit chunks or 64 bit 4810 chunks as long as it is little endian. (Understand it as byte stream, 4811 beginning with the lowest byte...) If we would use big endian 4812 we would need to process it from the highest address to the lowest, 4813 in order to be agnostic to the 32 vs 64 bits issue. 4814 4815 returns 0 on failure, 1 if we successfully received it. */ 4816 static int receive_bitmap(struct drbd_connection *connection, struct packet_info *pi) 4817 { 4818 struct drbd_peer_device *peer_device; 4819 struct drbd_device *device; 4820 struct bm_xfer_ctx c; 4821 int err; 4822 4823 peer_device = conn_peer_device(connection, pi->vnr); 4824 if (!peer_device) 4825 return -EIO; 4826 device = peer_device->device; 4827 4828 drbd_bm_lock(device, "receive bitmap", BM_LOCKED_SET_ALLOWED); 4829 /* you are supposed to send additional out-of-sync information 4830 * if you actually set bits during this phase */ 4831 4832 c = (struct bm_xfer_ctx) { 4833 .bm_bits = drbd_bm_bits(device), 4834 .bm_words = drbd_bm_words(device), 4835 }; 4836 4837 for(;;) { 4838 if (pi->cmd == P_BITMAP) 4839 err = receive_bitmap_plain(peer_device, pi->size, pi->data, &c); 4840 else if (pi->cmd == P_COMPRESSED_BITMAP) { 4841 /* MAYBE: sanity check that we speak proto >= 90, 4842 * and the feature is enabled! */ 4843 struct p_compressed_bm *p = pi->data; 4844 4845 if (pi->size > DRBD_SOCKET_BUFFER_SIZE - drbd_header_size(connection)) { 4846 drbd_err(device, "ReportCBitmap packet too large\n"); 4847 err = -EIO; 4848 goto out; 4849 } 4850 if (pi->size <= sizeof(*p)) { 4851 drbd_err(device, "ReportCBitmap packet too small (l:%u)\n", pi->size); 4852 err = -EIO; 4853 goto out; 4854 } 4855 err = drbd_recv_all(peer_device->connection, p, pi->size); 4856 if (err) 4857 goto out; 4858 err = decode_bitmap_c(peer_device, p, &c, pi->size); 4859 } else { 4860 drbd_warn(device, "receive_bitmap: cmd neither ReportBitMap nor ReportCBitMap (is 0x%x)", pi->cmd); 4861 err = -EIO; 4862 goto out; 4863 } 4864 4865 c.packets[pi->cmd == P_BITMAP]++; 4866 c.bytes[pi->cmd == P_BITMAP] += drbd_header_size(connection) + pi->size; 4867 4868 if (err <= 0) { 4869 if (err < 0) 4870 goto out; 4871 break; 4872 } 4873 err = drbd_recv_header(peer_device->connection, pi); 4874 if (err) 4875 goto out; 4876 } 4877 4878 INFO_bm_xfer_stats(peer_device, "receive", &c); 4879 4880 if (device->state.conn == C_WF_BITMAP_T) { 4881 enum drbd_state_rv rv; 4882 4883 err = drbd_send_bitmap(device, peer_device); 4884 if (err) 4885 goto out; 4886 /* Omit CS_ORDERED with this state transition to avoid deadlocks. */ 4887 rv = _drbd_request_state(device, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE); 4888 D_ASSERT(device, rv == SS_SUCCESS); 4889 } else if (device->state.conn != C_WF_BITMAP_S) { 4890 /* admin may have requested C_DISCONNECTING, 4891 * other threads may have noticed network errors */ 4892 drbd_info(device, "unexpected cstate (%s) in receive_bitmap\n", 4893 drbd_conn_str(device->state.conn)); 4894 } 4895 err = 0; 4896 4897 out: 4898 drbd_bm_unlock(device); 4899 if (!err && device->state.conn == C_WF_BITMAP_S) 4900 drbd_start_resync(device, C_SYNC_SOURCE); 4901 return err; 4902 } 4903 4904 static int receive_skip(struct drbd_connection *connection, struct packet_info *pi) 4905 { 4906 drbd_warn(connection, "skipping unknown optional packet type %d, l: %d!\n", 4907 pi->cmd, pi->size); 4908 4909 return ignore_remaining_packet(connection, pi); 4910 } 4911 4912 static int receive_UnplugRemote(struct drbd_connection *connection, struct packet_info *pi) 4913 { 4914 /* Make sure we've acked all the TCP data associated 4915 * with the data requests being unplugged */ 4916 tcp_sock_set_quickack(connection->data.socket->sk, 2); 4917 return 0; 4918 } 4919 4920 static int receive_out_of_sync(struct drbd_connection *connection, struct packet_info *pi) 4921 { 4922 struct drbd_peer_device *peer_device; 4923 struct drbd_device *device; 4924 struct p_block_desc *p = pi->data; 4925 4926 peer_device = conn_peer_device(connection, pi->vnr); 4927 if (!peer_device) 4928 return -EIO; 4929 device = peer_device->device; 4930 4931 switch (device->state.conn) { 4932 case C_WF_SYNC_UUID: 4933 case C_WF_BITMAP_T: 4934 case C_BEHIND: 4935 break; 4936 default: 4937 drbd_err(device, "ASSERT FAILED cstate = %s, expected: WFSyncUUID|WFBitMapT|Behind\n", 4938 drbd_conn_str(device->state.conn)); 4939 } 4940 4941 drbd_set_out_of_sync(peer_device, be64_to_cpu(p->sector), be32_to_cpu(p->blksize)); 4942 4943 return 0; 4944 } 4945 4946 static int receive_rs_deallocated(struct drbd_connection *connection, struct packet_info *pi) 4947 { 4948 struct drbd_peer_device *peer_device; 4949 struct p_block_desc *p = pi->data; 4950 struct drbd_device *device; 4951 sector_t sector; 4952 int size, err = 0; 4953 4954 peer_device = conn_peer_device(connection, pi->vnr); 4955 if (!peer_device) 4956 return -EIO; 4957 device = peer_device->device; 4958 4959 sector = be64_to_cpu(p->sector); 4960 size = be32_to_cpu(p->blksize); 4961 4962 dec_rs_pending(peer_device); 4963 4964 if (get_ldev(device)) { 4965 struct drbd_peer_request *peer_req; 4966 4967 peer_req = drbd_alloc_peer_req(peer_device, ID_SYNCER, sector, 4968 size, 0, GFP_NOIO); 4969 if (!peer_req) { 4970 put_ldev(device); 4971 return -ENOMEM; 4972 } 4973 4974 peer_req->w.cb = e_end_resync_block; 4975 peer_req->opf = REQ_OP_DISCARD; 4976 peer_req->submit_jif = jiffies; 4977 peer_req->flags |= EE_TRIM; 4978 4979 spin_lock_irq(&device->resource->req_lock); 4980 list_add_tail(&peer_req->w.list, &device->sync_ee); 4981 spin_unlock_irq(&device->resource->req_lock); 4982 4983 atomic_add(pi->size >> 9, &device->rs_sect_ev); 4984 err = drbd_submit_peer_request(peer_req); 4985 4986 if (err) { 4987 spin_lock_irq(&device->resource->req_lock); 4988 list_del(&peer_req->w.list); 4989 spin_unlock_irq(&device->resource->req_lock); 4990 4991 drbd_free_peer_req(device, peer_req); 4992 put_ldev(device); 4993 err = 0; 4994 goto fail; 4995 } 4996 4997 inc_unacked(device); 4998 4999 /* No put_ldev() here. Gets called in drbd_endio_write_sec_final(), 5000 as well as drbd_rs_complete_io() */ 5001 } else { 5002 fail: 5003 drbd_rs_complete_io(device, sector); 5004 drbd_send_ack_ex(peer_device, P_NEG_ACK, sector, size, ID_SYNCER); 5005 } 5006 5007 atomic_add(size >> 9, &device->rs_sect_in); 5008 5009 return err; 5010 } 5011 5012 struct data_cmd { 5013 int expect_payload; 5014 unsigned int pkt_size; 5015 int (*fn)(struct drbd_connection *, struct packet_info *); 5016 }; 5017 5018 static struct data_cmd drbd_cmd_handler[] = { 5019 [P_DATA] = { 1, sizeof(struct p_data), receive_Data }, 5020 [P_DATA_REPLY] = { 1, sizeof(struct p_data), receive_DataReply }, 5021 [P_RS_DATA_REPLY] = { 1, sizeof(struct p_data), receive_RSDataReply } , 5022 [P_BARRIER] = { 0, sizeof(struct p_barrier), receive_Barrier } , 5023 [P_BITMAP] = { 1, 0, receive_bitmap } , 5024 [P_COMPRESSED_BITMAP] = { 1, 0, receive_bitmap } , 5025 [P_UNPLUG_REMOTE] = { 0, 0, receive_UnplugRemote }, 5026 [P_DATA_REQUEST] = { 0, sizeof(struct p_block_req), receive_DataRequest }, 5027 [P_RS_DATA_REQUEST] = { 0, sizeof(struct p_block_req), receive_DataRequest }, 5028 [P_SYNC_PARAM] = { 1, 0, receive_SyncParam }, 5029 [P_SYNC_PARAM89] = { 1, 0, receive_SyncParam }, 5030 [P_PROTOCOL] = { 1, sizeof(struct p_protocol), receive_protocol }, 5031 [P_UUIDS] = { 0, sizeof(struct p_uuids), receive_uuids }, 5032 [P_SIZES] = { 0, sizeof(struct p_sizes), receive_sizes }, 5033 [P_STATE] = { 0, sizeof(struct p_state), receive_state }, 5034 [P_STATE_CHG_REQ] = { 0, sizeof(struct p_req_state), receive_req_state }, 5035 [P_SYNC_UUID] = { 0, sizeof(struct p_rs_uuid), receive_sync_uuid }, 5036 [P_OV_REQUEST] = { 0, sizeof(struct p_block_req), receive_DataRequest }, 5037 [P_OV_REPLY] = { 1, sizeof(struct p_block_req), receive_DataRequest }, 5038 [P_CSUM_RS_REQUEST] = { 1, sizeof(struct p_block_req), receive_DataRequest }, 5039 [P_RS_THIN_REQ] = { 0, sizeof(struct p_block_req), receive_DataRequest }, 5040 [P_DELAY_PROBE] = { 0, sizeof(struct p_delay_probe93), receive_skip }, 5041 [P_OUT_OF_SYNC] = { 0, sizeof(struct p_block_desc), receive_out_of_sync }, 5042 [P_CONN_ST_CHG_REQ] = { 0, sizeof(struct p_req_state), receive_req_conn_state }, 5043 [P_PROTOCOL_UPDATE] = { 1, sizeof(struct p_protocol), receive_protocol }, 5044 [P_TRIM] = { 0, sizeof(struct p_trim), receive_Data }, 5045 [P_ZEROES] = { 0, sizeof(struct p_trim), receive_Data }, 5046 [P_RS_DEALLOCATED] = { 0, sizeof(struct p_block_desc), receive_rs_deallocated }, 5047 }; 5048 5049 static void drbdd(struct drbd_connection *connection) 5050 { 5051 struct packet_info pi; 5052 size_t shs; /* sub header size */ 5053 int err; 5054 5055 while (get_t_state(&connection->receiver) == RUNNING) { 5056 struct data_cmd const *cmd; 5057 5058 drbd_thread_current_set_cpu(&connection->receiver); 5059 update_receiver_timing_details(connection, drbd_recv_header_maybe_unplug); 5060 if (drbd_recv_header_maybe_unplug(connection, &pi)) 5061 goto err_out; 5062 5063 cmd = &drbd_cmd_handler[pi.cmd]; 5064 if (unlikely(pi.cmd >= ARRAY_SIZE(drbd_cmd_handler) || !cmd->fn)) { 5065 drbd_err(connection, "Unexpected data packet %s (0x%04x)", 5066 cmdname(pi.cmd), pi.cmd); 5067 goto err_out; 5068 } 5069 5070 shs = cmd->pkt_size; 5071 if (pi.cmd == P_SIZES && connection->agreed_features & DRBD_FF_WSAME) 5072 shs += sizeof(struct o_qlim); 5073 if (pi.size > shs && !cmd->expect_payload) { 5074 drbd_err(connection, "No payload expected %s l:%d\n", 5075 cmdname(pi.cmd), pi.size); 5076 goto err_out; 5077 } 5078 if (pi.size < shs) { 5079 drbd_err(connection, "%s: unexpected packet size, expected:%d received:%d\n", 5080 cmdname(pi.cmd), (int)shs, pi.size); 5081 goto err_out; 5082 } 5083 5084 if (shs) { 5085 update_receiver_timing_details(connection, drbd_recv_all_warn); 5086 err = drbd_recv_all_warn(connection, pi.data, shs); 5087 if (err) 5088 goto err_out; 5089 pi.size -= shs; 5090 } 5091 5092 update_receiver_timing_details(connection, cmd->fn); 5093 err = cmd->fn(connection, &pi); 5094 if (err) { 5095 drbd_err(connection, "error receiving %s, e: %d l: %d!\n", 5096 cmdname(pi.cmd), err, pi.size); 5097 goto err_out; 5098 } 5099 } 5100 return; 5101 5102 err_out: 5103 conn_request_state(connection, NS(conn, C_PROTOCOL_ERROR), CS_HARD); 5104 } 5105 5106 static void conn_disconnect(struct drbd_connection *connection) 5107 { 5108 struct drbd_peer_device *peer_device; 5109 enum drbd_conns oc; 5110 int vnr; 5111 5112 if (connection->cstate == C_STANDALONE) 5113 return; 5114 5115 /* We are about to start the cleanup after connection loss. 5116 * Make sure drbd_make_request knows about that. 5117 * Usually we should be in some network failure state already, 5118 * but just in case we are not, we fix it up here. 5119 */ 5120 conn_request_state(connection, NS(conn, C_NETWORK_FAILURE), CS_HARD); 5121 5122 /* ack_receiver does not clean up anything. it must not interfere, either */ 5123 drbd_thread_stop(&connection->ack_receiver); 5124 if (connection->ack_sender) { 5125 destroy_workqueue(connection->ack_sender); 5126 connection->ack_sender = NULL; 5127 } 5128 drbd_free_sock(connection); 5129 5130 rcu_read_lock(); 5131 idr_for_each_entry(&connection->peer_devices, peer_device, vnr) { 5132 struct drbd_device *device = peer_device->device; 5133 kref_get(&device->kref); 5134 rcu_read_unlock(); 5135 drbd_disconnected(peer_device); 5136 kref_put(&device->kref, drbd_destroy_device); 5137 rcu_read_lock(); 5138 } 5139 rcu_read_unlock(); 5140 5141 if (!list_empty(&connection->current_epoch->list)) 5142 drbd_err(connection, "ASSERTION FAILED: connection->current_epoch->list not empty\n"); 5143 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */ 5144 atomic_set(&connection->current_epoch->epoch_size, 0); 5145 connection->send.seen_any_write_yet = false; 5146 5147 drbd_info(connection, "Connection closed\n"); 5148 5149 if (conn_highest_role(connection) == R_PRIMARY && conn_highest_pdsk(connection) >= D_UNKNOWN) 5150 conn_try_outdate_peer_async(connection); 5151 5152 spin_lock_irq(&connection->resource->req_lock); 5153 oc = connection->cstate; 5154 if (oc >= C_UNCONNECTED) 5155 _conn_request_state(connection, NS(conn, C_UNCONNECTED), CS_VERBOSE); 5156 5157 spin_unlock_irq(&connection->resource->req_lock); 5158 5159 if (oc == C_DISCONNECTING) 5160 conn_request_state(connection, NS(conn, C_STANDALONE), CS_VERBOSE | CS_HARD); 5161 } 5162 5163 static int drbd_disconnected(struct drbd_peer_device *peer_device) 5164 { 5165 struct drbd_device *device = peer_device->device; 5166 unsigned int i; 5167 5168 /* wait for current activity to cease. */ 5169 spin_lock_irq(&device->resource->req_lock); 5170 _drbd_wait_ee_list_empty(device, &device->active_ee); 5171 _drbd_wait_ee_list_empty(device, &device->sync_ee); 5172 _drbd_wait_ee_list_empty(device, &device->read_ee); 5173 spin_unlock_irq(&device->resource->req_lock); 5174 5175 /* We do not have data structures that would allow us to 5176 * get the rs_pending_cnt down to 0 again. 5177 * * On C_SYNC_TARGET we do not have any data structures describing 5178 * the pending RSDataRequest's we have sent. 5179 * * On C_SYNC_SOURCE there is no data structure that tracks 5180 * the P_RS_DATA_REPLY blocks that we sent to the SyncTarget. 5181 * And no, it is not the sum of the reference counts in the 5182 * resync_LRU. The resync_LRU tracks the whole operation including 5183 * the disk-IO, while the rs_pending_cnt only tracks the blocks 5184 * on the fly. */ 5185 drbd_rs_cancel_all(device); 5186 device->rs_total = 0; 5187 device->rs_failed = 0; 5188 atomic_set(&device->rs_pending_cnt, 0); 5189 wake_up(&device->misc_wait); 5190 5191 del_timer_sync(&device->resync_timer); 5192 resync_timer_fn(&device->resync_timer); 5193 5194 /* wait for all w_e_end_data_req, w_e_end_rsdata_req, w_send_barrier, 5195 * w_make_resync_request etc. which may still be on the worker queue 5196 * to be "canceled" */ 5197 drbd_flush_workqueue(&peer_device->connection->sender_work); 5198 5199 drbd_finish_peer_reqs(device); 5200 5201 /* This second workqueue flush is necessary, since drbd_finish_peer_reqs() 5202 might have issued a work again. The one before drbd_finish_peer_reqs() is 5203 necessary to reclain net_ee in drbd_finish_peer_reqs(). */ 5204 drbd_flush_workqueue(&peer_device->connection->sender_work); 5205 5206 /* need to do it again, drbd_finish_peer_reqs() may have populated it 5207 * again via drbd_try_clear_on_disk_bm(). */ 5208 drbd_rs_cancel_all(device); 5209 5210 kfree(device->p_uuid); 5211 device->p_uuid = NULL; 5212 5213 if (!drbd_suspended(device)) 5214 tl_clear(peer_device->connection); 5215 5216 drbd_md_sync(device); 5217 5218 if (get_ldev(device)) { 5219 drbd_bitmap_io(device, &drbd_bm_write_copy_pages, 5220 "write from disconnected", BM_LOCKED_CHANGE_ALLOWED, NULL); 5221 put_ldev(device); 5222 } 5223 5224 /* tcp_close and release of sendpage pages can be deferred. I don't 5225 * want to use SO_LINGER, because apparently it can be deferred for 5226 * more than 20 seconds (longest time I checked). 5227 * 5228 * Actually we don't care for exactly when the network stack does its 5229 * put_page(), but release our reference on these pages right here. 5230 */ 5231 i = drbd_free_peer_reqs(device, &device->net_ee); 5232 if (i) 5233 drbd_info(device, "net_ee not empty, killed %u entries\n", i); 5234 i = atomic_read(&device->pp_in_use_by_net); 5235 if (i) 5236 drbd_info(device, "pp_in_use_by_net = %d, expected 0\n", i); 5237 i = atomic_read(&device->pp_in_use); 5238 if (i) 5239 drbd_info(device, "pp_in_use = %d, expected 0\n", i); 5240 5241 D_ASSERT(device, list_empty(&device->read_ee)); 5242 D_ASSERT(device, list_empty(&device->active_ee)); 5243 D_ASSERT(device, list_empty(&device->sync_ee)); 5244 D_ASSERT(device, list_empty(&device->done_ee)); 5245 5246 return 0; 5247 } 5248 5249 /* 5250 * We support PRO_VERSION_MIN to PRO_VERSION_MAX. The protocol version 5251 * we can agree on is stored in agreed_pro_version. 5252 * 5253 * feature flags and the reserved array should be enough room for future 5254 * enhancements of the handshake protocol, and possible plugins... 5255 * 5256 * for now, they are expected to be zero, but ignored. 5257 */ 5258 static int drbd_send_features(struct drbd_connection *connection) 5259 { 5260 struct drbd_socket *sock; 5261 struct p_connection_features *p; 5262 5263 sock = &connection->data; 5264 p = conn_prepare_command(connection, sock); 5265 if (!p) 5266 return -EIO; 5267 memset(p, 0, sizeof(*p)); 5268 p->protocol_min = cpu_to_be32(PRO_VERSION_MIN); 5269 p->protocol_max = cpu_to_be32(PRO_VERSION_MAX); 5270 p->feature_flags = cpu_to_be32(PRO_FEATURES); 5271 return conn_send_command(connection, sock, P_CONNECTION_FEATURES, sizeof(*p), NULL, 0); 5272 } 5273 5274 /* 5275 * return values: 5276 * 1 yes, we have a valid connection 5277 * 0 oops, did not work out, please try again 5278 * -1 peer talks different language, 5279 * no point in trying again, please go standalone. 5280 */ 5281 static int drbd_do_features(struct drbd_connection *connection) 5282 { 5283 /* ASSERT current == connection->receiver ... */ 5284 struct p_connection_features *p; 5285 const int expect = sizeof(struct p_connection_features); 5286 struct packet_info pi; 5287 int err; 5288 5289 err = drbd_send_features(connection); 5290 if (err) 5291 return 0; 5292 5293 err = drbd_recv_header(connection, &pi); 5294 if (err) 5295 return 0; 5296 5297 if (pi.cmd != P_CONNECTION_FEATURES) { 5298 drbd_err(connection, "expected ConnectionFeatures packet, received: %s (0x%04x)\n", 5299 cmdname(pi.cmd), pi.cmd); 5300 return -1; 5301 } 5302 5303 if (pi.size != expect) { 5304 drbd_err(connection, "expected ConnectionFeatures length: %u, received: %u\n", 5305 expect, pi.size); 5306 return -1; 5307 } 5308 5309 p = pi.data; 5310 err = drbd_recv_all_warn(connection, p, expect); 5311 if (err) 5312 return 0; 5313 5314 p->protocol_min = be32_to_cpu(p->protocol_min); 5315 p->protocol_max = be32_to_cpu(p->protocol_max); 5316 if (p->protocol_max == 0) 5317 p->protocol_max = p->protocol_min; 5318 5319 if (PRO_VERSION_MAX < p->protocol_min || 5320 PRO_VERSION_MIN > p->protocol_max) 5321 goto incompat; 5322 5323 connection->agreed_pro_version = min_t(int, PRO_VERSION_MAX, p->protocol_max); 5324 connection->agreed_features = PRO_FEATURES & be32_to_cpu(p->feature_flags); 5325 5326 drbd_info(connection, "Handshake successful: " 5327 "Agreed network protocol version %d\n", connection->agreed_pro_version); 5328 5329 drbd_info(connection, "Feature flags enabled on protocol level: 0x%x%s%s%s%s.\n", 5330 connection->agreed_features, 5331 connection->agreed_features & DRBD_FF_TRIM ? " TRIM" : "", 5332 connection->agreed_features & DRBD_FF_THIN_RESYNC ? " THIN_RESYNC" : "", 5333 connection->agreed_features & DRBD_FF_WSAME ? " WRITE_SAME" : "", 5334 connection->agreed_features & DRBD_FF_WZEROES ? " WRITE_ZEROES" : 5335 connection->agreed_features ? "" : " none"); 5336 5337 return 1; 5338 5339 incompat: 5340 drbd_err(connection, "incompatible DRBD dialects: " 5341 "I support %d-%d, peer supports %d-%d\n", 5342 PRO_VERSION_MIN, PRO_VERSION_MAX, 5343 p->protocol_min, p->protocol_max); 5344 return -1; 5345 } 5346 5347 #if !defined(CONFIG_CRYPTO_HMAC) && !defined(CONFIG_CRYPTO_HMAC_MODULE) 5348 static int drbd_do_auth(struct drbd_connection *connection) 5349 { 5350 drbd_err(connection, "This kernel was build without CONFIG_CRYPTO_HMAC.\n"); 5351 drbd_err(connection, "You need to disable 'cram-hmac-alg' in drbd.conf.\n"); 5352 return -1; 5353 } 5354 #else 5355 #define CHALLENGE_LEN 64 5356 5357 /* Return value: 5358 1 - auth succeeded, 5359 0 - failed, try again (network error), 5360 -1 - auth failed, don't try again. 5361 */ 5362 5363 static int drbd_do_auth(struct drbd_connection *connection) 5364 { 5365 struct drbd_socket *sock; 5366 char my_challenge[CHALLENGE_LEN]; /* 64 Bytes... */ 5367 char *response = NULL; 5368 char *right_response = NULL; 5369 char *peers_ch = NULL; 5370 unsigned int key_len; 5371 char secret[SHARED_SECRET_MAX]; /* 64 byte */ 5372 unsigned int resp_size; 5373 struct shash_desc *desc; 5374 struct packet_info pi; 5375 struct net_conf *nc; 5376 int err, rv; 5377 5378 /* FIXME: Put the challenge/response into the preallocated socket buffer. */ 5379 5380 rcu_read_lock(); 5381 nc = rcu_dereference(connection->net_conf); 5382 key_len = strlen(nc->shared_secret); 5383 memcpy(secret, nc->shared_secret, key_len); 5384 rcu_read_unlock(); 5385 5386 desc = kmalloc(sizeof(struct shash_desc) + 5387 crypto_shash_descsize(connection->cram_hmac_tfm), 5388 GFP_KERNEL); 5389 if (!desc) { 5390 rv = -1; 5391 goto fail; 5392 } 5393 desc->tfm = connection->cram_hmac_tfm; 5394 5395 rv = crypto_shash_setkey(connection->cram_hmac_tfm, (u8 *)secret, key_len); 5396 if (rv) { 5397 drbd_err(connection, "crypto_shash_setkey() failed with %d\n", rv); 5398 rv = -1; 5399 goto fail; 5400 } 5401 5402 get_random_bytes(my_challenge, CHALLENGE_LEN); 5403 5404 sock = &connection->data; 5405 if (!conn_prepare_command(connection, sock)) { 5406 rv = 0; 5407 goto fail; 5408 } 5409 rv = !conn_send_command(connection, sock, P_AUTH_CHALLENGE, 0, 5410 my_challenge, CHALLENGE_LEN); 5411 if (!rv) 5412 goto fail; 5413 5414 err = drbd_recv_header(connection, &pi); 5415 if (err) { 5416 rv = 0; 5417 goto fail; 5418 } 5419 5420 if (pi.cmd != P_AUTH_CHALLENGE) { 5421 drbd_err(connection, "expected AuthChallenge packet, received: %s (0x%04x)\n", 5422 cmdname(pi.cmd), pi.cmd); 5423 rv = -1; 5424 goto fail; 5425 } 5426 5427 if (pi.size > CHALLENGE_LEN * 2) { 5428 drbd_err(connection, "expected AuthChallenge payload too big.\n"); 5429 rv = -1; 5430 goto fail; 5431 } 5432 5433 if (pi.size < CHALLENGE_LEN) { 5434 drbd_err(connection, "AuthChallenge payload too small.\n"); 5435 rv = -1; 5436 goto fail; 5437 } 5438 5439 peers_ch = kmalloc(pi.size, GFP_NOIO); 5440 if (!peers_ch) { 5441 rv = -1; 5442 goto fail; 5443 } 5444 5445 err = drbd_recv_all_warn(connection, peers_ch, pi.size); 5446 if (err) { 5447 rv = 0; 5448 goto fail; 5449 } 5450 5451 if (!memcmp(my_challenge, peers_ch, CHALLENGE_LEN)) { 5452 drbd_err(connection, "Peer presented the same challenge!\n"); 5453 rv = -1; 5454 goto fail; 5455 } 5456 5457 resp_size = crypto_shash_digestsize(connection->cram_hmac_tfm); 5458 response = kmalloc(resp_size, GFP_NOIO); 5459 if (!response) { 5460 rv = -1; 5461 goto fail; 5462 } 5463 5464 rv = crypto_shash_digest(desc, peers_ch, pi.size, response); 5465 if (rv) { 5466 drbd_err(connection, "crypto_hash_digest() failed with %d\n", rv); 5467 rv = -1; 5468 goto fail; 5469 } 5470 5471 if (!conn_prepare_command(connection, sock)) { 5472 rv = 0; 5473 goto fail; 5474 } 5475 rv = !conn_send_command(connection, sock, P_AUTH_RESPONSE, 0, 5476 response, resp_size); 5477 if (!rv) 5478 goto fail; 5479 5480 err = drbd_recv_header(connection, &pi); 5481 if (err) { 5482 rv = 0; 5483 goto fail; 5484 } 5485 5486 if (pi.cmd != P_AUTH_RESPONSE) { 5487 drbd_err(connection, "expected AuthResponse packet, received: %s (0x%04x)\n", 5488 cmdname(pi.cmd), pi.cmd); 5489 rv = 0; 5490 goto fail; 5491 } 5492 5493 if (pi.size != resp_size) { 5494 drbd_err(connection, "expected AuthResponse payload of wrong size\n"); 5495 rv = 0; 5496 goto fail; 5497 } 5498 5499 err = drbd_recv_all_warn(connection, response , resp_size); 5500 if (err) { 5501 rv = 0; 5502 goto fail; 5503 } 5504 5505 right_response = kmalloc(resp_size, GFP_NOIO); 5506 if (!right_response) { 5507 rv = -1; 5508 goto fail; 5509 } 5510 5511 rv = crypto_shash_digest(desc, my_challenge, CHALLENGE_LEN, 5512 right_response); 5513 if (rv) { 5514 drbd_err(connection, "crypto_hash_digest() failed with %d\n", rv); 5515 rv = -1; 5516 goto fail; 5517 } 5518 5519 rv = !memcmp(response, right_response, resp_size); 5520 5521 if (rv) 5522 drbd_info(connection, "Peer authenticated using %d bytes HMAC\n", 5523 resp_size); 5524 else 5525 rv = -1; 5526 5527 fail: 5528 kfree(peers_ch); 5529 kfree(response); 5530 kfree(right_response); 5531 if (desc) { 5532 shash_desc_zero(desc); 5533 kfree(desc); 5534 } 5535 5536 return rv; 5537 } 5538 #endif 5539 5540 int drbd_receiver(struct drbd_thread *thi) 5541 { 5542 struct drbd_connection *connection = thi->connection; 5543 int h; 5544 5545 drbd_info(connection, "receiver (re)started\n"); 5546 5547 do { 5548 h = conn_connect(connection); 5549 if (h == 0) { 5550 conn_disconnect(connection); 5551 schedule_timeout_interruptible(HZ); 5552 } 5553 if (h == -1) { 5554 drbd_warn(connection, "Discarding network configuration.\n"); 5555 conn_request_state(connection, NS(conn, C_DISCONNECTING), CS_HARD); 5556 } 5557 } while (h == 0); 5558 5559 if (h > 0) { 5560 blk_start_plug(&connection->receiver_plug); 5561 drbdd(connection); 5562 blk_finish_plug(&connection->receiver_plug); 5563 } 5564 5565 conn_disconnect(connection); 5566 5567 drbd_info(connection, "receiver terminated\n"); 5568 return 0; 5569 } 5570 5571 /* ********* acknowledge sender ******** */ 5572 5573 static int got_conn_RqSReply(struct drbd_connection *connection, struct packet_info *pi) 5574 { 5575 struct p_req_state_reply *p = pi->data; 5576 int retcode = be32_to_cpu(p->retcode); 5577 5578 if (retcode >= SS_SUCCESS) { 5579 set_bit(CONN_WD_ST_CHG_OKAY, &connection->flags); 5580 } else { 5581 set_bit(CONN_WD_ST_CHG_FAIL, &connection->flags); 5582 drbd_err(connection, "Requested state change failed by peer: %s (%d)\n", 5583 drbd_set_st_err_str(retcode), retcode); 5584 } 5585 wake_up(&connection->ping_wait); 5586 5587 return 0; 5588 } 5589 5590 static int got_RqSReply(struct drbd_connection *connection, struct packet_info *pi) 5591 { 5592 struct drbd_peer_device *peer_device; 5593 struct drbd_device *device; 5594 struct p_req_state_reply *p = pi->data; 5595 int retcode = be32_to_cpu(p->retcode); 5596 5597 peer_device = conn_peer_device(connection, pi->vnr); 5598 if (!peer_device) 5599 return -EIO; 5600 device = peer_device->device; 5601 5602 if (test_bit(CONN_WD_ST_CHG_REQ, &connection->flags)) { 5603 D_ASSERT(device, connection->agreed_pro_version < 100); 5604 return got_conn_RqSReply(connection, pi); 5605 } 5606 5607 if (retcode >= SS_SUCCESS) { 5608 set_bit(CL_ST_CHG_SUCCESS, &device->flags); 5609 } else { 5610 set_bit(CL_ST_CHG_FAIL, &device->flags); 5611 drbd_err(device, "Requested state change failed by peer: %s (%d)\n", 5612 drbd_set_st_err_str(retcode), retcode); 5613 } 5614 wake_up(&device->state_wait); 5615 5616 return 0; 5617 } 5618 5619 static int got_Ping(struct drbd_connection *connection, struct packet_info *pi) 5620 { 5621 return drbd_send_ping_ack(connection); 5622 5623 } 5624 5625 static int got_PingAck(struct drbd_connection *connection, struct packet_info *pi) 5626 { 5627 /* restore idle timeout */ 5628 connection->meta.socket->sk->sk_rcvtimeo = connection->net_conf->ping_int*HZ; 5629 if (!test_and_set_bit(GOT_PING_ACK, &connection->flags)) 5630 wake_up(&connection->ping_wait); 5631 5632 return 0; 5633 } 5634 5635 static int got_IsInSync(struct drbd_connection *connection, struct packet_info *pi) 5636 { 5637 struct drbd_peer_device *peer_device; 5638 struct drbd_device *device; 5639 struct p_block_ack *p = pi->data; 5640 sector_t sector = be64_to_cpu(p->sector); 5641 int blksize = be32_to_cpu(p->blksize); 5642 5643 peer_device = conn_peer_device(connection, pi->vnr); 5644 if (!peer_device) 5645 return -EIO; 5646 device = peer_device->device; 5647 5648 D_ASSERT(device, peer_device->connection->agreed_pro_version >= 89); 5649 5650 update_peer_seq(peer_device, be32_to_cpu(p->seq_num)); 5651 5652 if (get_ldev(device)) { 5653 drbd_rs_complete_io(device, sector); 5654 drbd_set_in_sync(peer_device, sector, blksize); 5655 /* rs_same_csums is supposed to count in units of BM_BLOCK_SIZE */ 5656 device->rs_same_csum += (blksize >> BM_BLOCK_SHIFT); 5657 put_ldev(device); 5658 } 5659 dec_rs_pending(peer_device); 5660 atomic_add(blksize >> 9, &device->rs_sect_in); 5661 5662 return 0; 5663 } 5664 5665 static int 5666 validate_req_change_req_state(struct drbd_peer_device *peer_device, u64 id, sector_t sector, 5667 struct rb_root *root, const char *func, 5668 enum drbd_req_event what, bool missing_ok) 5669 { 5670 struct drbd_device *device = peer_device->device; 5671 struct drbd_request *req; 5672 struct bio_and_error m; 5673 5674 spin_lock_irq(&device->resource->req_lock); 5675 req = find_request(device, root, id, sector, missing_ok, func); 5676 if (unlikely(!req)) { 5677 spin_unlock_irq(&device->resource->req_lock); 5678 return -EIO; 5679 } 5680 __req_mod(req, what, peer_device, &m); 5681 spin_unlock_irq(&device->resource->req_lock); 5682 5683 if (m.bio) 5684 complete_master_bio(device, &m); 5685 return 0; 5686 } 5687 5688 static int got_BlockAck(struct drbd_connection *connection, struct packet_info *pi) 5689 { 5690 struct drbd_peer_device *peer_device; 5691 struct drbd_device *device; 5692 struct p_block_ack *p = pi->data; 5693 sector_t sector = be64_to_cpu(p->sector); 5694 int blksize = be32_to_cpu(p->blksize); 5695 enum drbd_req_event what; 5696 5697 peer_device = conn_peer_device(connection, pi->vnr); 5698 if (!peer_device) 5699 return -EIO; 5700 device = peer_device->device; 5701 5702 update_peer_seq(peer_device, be32_to_cpu(p->seq_num)); 5703 5704 if (p->block_id == ID_SYNCER) { 5705 drbd_set_in_sync(peer_device, sector, blksize); 5706 dec_rs_pending(peer_device); 5707 return 0; 5708 } 5709 switch (pi->cmd) { 5710 case P_RS_WRITE_ACK: 5711 what = WRITE_ACKED_BY_PEER_AND_SIS; 5712 break; 5713 case P_WRITE_ACK: 5714 what = WRITE_ACKED_BY_PEER; 5715 break; 5716 case P_RECV_ACK: 5717 what = RECV_ACKED_BY_PEER; 5718 break; 5719 case P_SUPERSEDED: 5720 what = CONFLICT_RESOLVED; 5721 break; 5722 case P_RETRY_WRITE: 5723 what = POSTPONE_WRITE; 5724 break; 5725 default: 5726 BUG(); 5727 } 5728 5729 return validate_req_change_req_state(peer_device, p->block_id, sector, 5730 &device->write_requests, __func__, 5731 what, false); 5732 } 5733 5734 static int got_NegAck(struct drbd_connection *connection, struct packet_info *pi) 5735 { 5736 struct drbd_peer_device *peer_device; 5737 struct drbd_device *device; 5738 struct p_block_ack *p = pi->data; 5739 sector_t sector = be64_to_cpu(p->sector); 5740 int size = be32_to_cpu(p->blksize); 5741 int err; 5742 5743 peer_device = conn_peer_device(connection, pi->vnr); 5744 if (!peer_device) 5745 return -EIO; 5746 device = peer_device->device; 5747 5748 update_peer_seq(peer_device, be32_to_cpu(p->seq_num)); 5749 5750 if (p->block_id == ID_SYNCER) { 5751 dec_rs_pending(peer_device); 5752 drbd_rs_failed_io(peer_device, sector, size); 5753 return 0; 5754 } 5755 5756 err = validate_req_change_req_state(peer_device, p->block_id, sector, 5757 &device->write_requests, __func__, 5758 NEG_ACKED, true); 5759 if (err) { 5760 /* Protocol A has no P_WRITE_ACKs, but has P_NEG_ACKs. 5761 The master bio might already be completed, therefore the 5762 request is no longer in the collision hash. */ 5763 /* In Protocol B we might already have got a P_RECV_ACK 5764 but then get a P_NEG_ACK afterwards. */ 5765 drbd_set_out_of_sync(peer_device, sector, size); 5766 } 5767 return 0; 5768 } 5769 5770 static int got_NegDReply(struct drbd_connection *connection, struct packet_info *pi) 5771 { 5772 struct drbd_peer_device *peer_device; 5773 struct drbd_device *device; 5774 struct p_block_ack *p = pi->data; 5775 sector_t sector = be64_to_cpu(p->sector); 5776 5777 peer_device = conn_peer_device(connection, pi->vnr); 5778 if (!peer_device) 5779 return -EIO; 5780 device = peer_device->device; 5781 5782 update_peer_seq(peer_device, be32_to_cpu(p->seq_num)); 5783 5784 drbd_err(device, "Got NegDReply; Sector %llus, len %u.\n", 5785 (unsigned long long)sector, be32_to_cpu(p->blksize)); 5786 5787 return validate_req_change_req_state(peer_device, p->block_id, sector, 5788 &device->read_requests, __func__, 5789 NEG_ACKED, false); 5790 } 5791 5792 static int got_NegRSDReply(struct drbd_connection *connection, struct packet_info *pi) 5793 { 5794 struct drbd_peer_device *peer_device; 5795 struct drbd_device *device; 5796 sector_t sector; 5797 int size; 5798 struct p_block_ack *p = pi->data; 5799 5800 peer_device = conn_peer_device(connection, pi->vnr); 5801 if (!peer_device) 5802 return -EIO; 5803 device = peer_device->device; 5804 5805 sector = be64_to_cpu(p->sector); 5806 size = be32_to_cpu(p->blksize); 5807 5808 update_peer_seq(peer_device, be32_to_cpu(p->seq_num)); 5809 5810 dec_rs_pending(peer_device); 5811 5812 if (get_ldev_if_state(device, D_FAILED)) { 5813 drbd_rs_complete_io(device, sector); 5814 switch (pi->cmd) { 5815 case P_NEG_RS_DREPLY: 5816 drbd_rs_failed_io(peer_device, sector, size); 5817 break; 5818 case P_RS_CANCEL: 5819 break; 5820 default: 5821 BUG(); 5822 } 5823 put_ldev(device); 5824 } 5825 5826 return 0; 5827 } 5828 5829 static int got_BarrierAck(struct drbd_connection *connection, struct packet_info *pi) 5830 { 5831 struct p_barrier_ack *p = pi->data; 5832 struct drbd_peer_device *peer_device; 5833 int vnr; 5834 5835 tl_release(connection, p->barrier, be32_to_cpu(p->set_size)); 5836 5837 rcu_read_lock(); 5838 idr_for_each_entry(&connection->peer_devices, peer_device, vnr) { 5839 struct drbd_device *device = peer_device->device; 5840 5841 if (device->state.conn == C_AHEAD && 5842 atomic_read(&device->ap_in_flight) == 0 && 5843 !test_and_set_bit(AHEAD_TO_SYNC_SOURCE, &device->flags)) { 5844 device->start_resync_timer.expires = jiffies + HZ; 5845 add_timer(&device->start_resync_timer); 5846 } 5847 } 5848 rcu_read_unlock(); 5849 5850 return 0; 5851 } 5852 5853 static int got_OVResult(struct drbd_connection *connection, struct packet_info *pi) 5854 { 5855 struct drbd_peer_device *peer_device; 5856 struct drbd_device *device; 5857 struct p_block_ack *p = pi->data; 5858 struct drbd_device_work *dw; 5859 sector_t sector; 5860 int size; 5861 5862 peer_device = conn_peer_device(connection, pi->vnr); 5863 if (!peer_device) 5864 return -EIO; 5865 device = peer_device->device; 5866 5867 sector = be64_to_cpu(p->sector); 5868 size = be32_to_cpu(p->blksize); 5869 5870 update_peer_seq(peer_device, be32_to_cpu(p->seq_num)); 5871 5872 if (be64_to_cpu(p->block_id) == ID_OUT_OF_SYNC) 5873 drbd_ov_out_of_sync_found(peer_device, sector, size); 5874 else 5875 ov_out_of_sync_print(peer_device); 5876 5877 if (!get_ldev(device)) 5878 return 0; 5879 5880 drbd_rs_complete_io(device, sector); 5881 dec_rs_pending(peer_device); 5882 5883 --device->ov_left; 5884 5885 /* let's advance progress step marks only for every other megabyte */ 5886 if ((device->ov_left & 0x200) == 0x200) 5887 drbd_advance_rs_marks(peer_device, device->ov_left); 5888 5889 if (device->ov_left == 0) { 5890 dw = kmalloc(sizeof(*dw), GFP_NOIO); 5891 if (dw) { 5892 dw->w.cb = w_ov_finished; 5893 dw->device = device; 5894 drbd_queue_work(&peer_device->connection->sender_work, &dw->w); 5895 } else { 5896 drbd_err(device, "kmalloc(dw) failed."); 5897 ov_out_of_sync_print(peer_device); 5898 drbd_resync_finished(peer_device); 5899 } 5900 } 5901 put_ldev(device); 5902 return 0; 5903 } 5904 5905 static int got_skip(struct drbd_connection *connection, struct packet_info *pi) 5906 { 5907 return 0; 5908 } 5909 5910 struct meta_sock_cmd { 5911 size_t pkt_size; 5912 int (*fn)(struct drbd_connection *connection, struct packet_info *); 5913 }; 5914 5915 static void set_rcvtimeo(struct drbd_connection *connection, bool ping_timeout) 5916 { 5917 long t; 5918 struct net_conf *nc; 5919 5920 rcu_read_lock(); 5921 nc = rcu_dereference(connection->net_conf); 5922 t = ping_timeout ? nc->ping_timeo : nc->ping_int; 5923 rcu_read_unlock(); 5924 5925 t *= HZ; 5926 if (ping_timeout) 5927 t /= 10; 5928 5929 connection->meta.socket->sk->sk_rcvtimeo = t; 5930 } 5931 5932 static void set_ping_timeout(struct drbd_connection *connection) 5933 { 5934 set_rcvtimeo(connection, 1); 5935 } 5936 5937 static void set_idle_timeout(struct drbd_connection *connection) 5938 { 5939 set_rcvtimeo(connection, 0); 5940 } 5941 5942 static struct meta_sock_cmd ack_receiver_tbl[] = { 5943 [P_PING] = { 0, got_Ping }, 5944 [P_PING_ACK] = { 0, got_PingAck }, 5945 [P_RECV_ACK] = { sizeof(struct p_block_ack), got_BlockAck }, 5946 [P_WRITE_ACK] = { sizeof(struct p_block_ack), got_BlockAck }, 5947 [P_RS_WRITE_ACK] = { sizeof(struct p_block_ack), got_BlockAck }, 5948 [P_SUPERSEDED] = { sizeof(struct p_block_ack), got_BlockAck }, 5949 [P_NEG_ACK] = { sizeof(struct p_block_ack), got_NegAck }, 5950 [P_NEG_DREPLY] = { sizeof(struct p_block_ack), got_NegDReply }, 5951 [P_NEG_RS_DREPLY] = { sizeof(struct p_block_ack), got_NegRSDReply }, 5952 [P_OV_RESULT] = { sizeof(struct p_block_ack), got_OVResult }, 5953 [P_BARRIER_ACK] = { sizeof(struct p_barrier_ack), got_BarrierAck }, 5954 [P_STATE_CHG_REPLY] = { sizeof(struct p_req_state_reply), got_RqSReply }, 5955 [P_RS_IS_IN_SYNC] = { sizeof(struct p_block_ack), got_IsInSync }, 5956 [P_DELAY_PROBE] = { sizeof(struct p_delay_probe93), got_skip }, 5957 [P_RS_CANCEL] = { sizeof(struct p_block_ack), got_NegRSDReply }, 5958 [P_CONN_ST_CHG_REPLY]={ sizeof(struct p_req_state_reply), got_conn_RqSReply }, 5959 [P_RETRY_WRITE] = { sizeof(struct p_block_ack), got_BlockAck }, 5960 }; 5961 5962 int drbd_ack_receiver(struct drbd_thread *thi) 5963 { 5964 struct drbd_connection *connection = thi->connection; 5965 struct meta_sock_cmd *cmd = NULL; 5966 struct packet_info pi; 5967 unsigned long pre_recv_jif; 5968 int rv; 5969 void *buf = connection->meta.rbuf; 5970 int received = 0; 5971 unsigned int header_size = drbd_header_size(connection); 5972 int expect = header_size; 5973 bool ping_timeout_active = false; 5974 5975 sched_set_fifo_low(current); 5976 5977 while (get_t_state(thi) == RUNNING) { 5978 drbd_thread_current_set_cpu(thi); 5979 5980 conn_reclaim_net_peer_reqs(connection); 5981 5982 if (test_and_clear_bit(SEND_PING, &connection->flags)) { 5983 if (drbd_send_ping(connection)) { 5984 drbd_err(connection, "drbd_send_ping has failed\n"); 5985 goto reconnect; 5986 } 5987 set_ping_timeout(connection); 5988 ping_timeout_active = true; 5989 } 5990 5991 pre_recv_jif = jiffies; 5992 rv = drbd_recv_short(connection->meta.socket, buf, expect-received, 0); 5993 5994 /* Note: 5995 * -EINTR (on meta) we got a signal 5996 * -EAGAIN (on meta) rcvtimeo expired 5997 * -ECONNRESET other side closed the connection 5998 * -ERESTARTSYS (on data) we got a signal 5999 * rv < 0 other than above: unexpected error! 6000 * rv == expected: full header or command 6001 * rv < expected: "woken" by signal during receive 6002 * rv == 0 : "connection shut down by peer" 6003 */ 6004 if (likely(rv > 0)) { 6005 received += rv; 6006 buf += rv; 6007 } else if (rv == 0) { 6008 if (test_bit(DISCONNECT_SENT, &connection->flags)) { 6009 long t; 6010 rcu_read_lock(); 6011 t = rcu_dereference(connection->net_conf)->ping_timeo * HZ/10; 6012 rcu_read_unlock(); 6013 6014 t = wait_event_timeout(connection->ping_wait, 6015 connection->cstate < C_WF_REPORT_PARAMS, 6016 t); 6017 if (t) 6018 break; 6019 } 6020 drbd_err(connection, "meta connection shut down by peer.\n"); 6021 goto reconnect; 6022 } else if (rv == -EAGAIN) { 6023 /* If the data socket received something meanwhile, 6024 * that is good enough: peer is still alive. */ 6025 if (time_after(connection->last_received, pre_recv_jif)) 6026 continue; 6027 if (ping_timeout_active) { 6028 drbd_err(connection, "PingAck did not arrive in time.\n"); 6029 goto reconnect; 6030 } 6031 set_bit(SEND_PING, &connection->flags); 6032 continue; 6033 } else if (rv == -EINTR) { 6034 /* maybe drbd_thread_stop(): the while condition will notice. 6035 * maybe woken for send_ping: we'll send a ping above, 6036 * and change the rcvtimeo */ 6037 flush_signals(current); 6038 continue; 6039 } else { 6040 drbd_err(connection, "sock_recvmsg returned %d\n", rv); 6041 goto reconnect; 6042 } 6043 6044 if (received == expect && cmd == NULL) { 6045 if (decode_header(connection, connection->meta.rbuf, &pi)) 6046 goto reconnect; 6047 cmd = &ack_receiver_tbl[pi.cmd]; 6048 if (pi.cmd >= ARRAY_SIZE(ack_receiver_tbl) || !cmd->fn) { 6049 drbd_err(connection, "Unexpected meta packet %s (0x%04x)\n", 6050 cmdname(pi.cmd), pi.cmd); 6051 goto disconnect; 6052 } 6053 expect = header_size + cmd->pkt_size; 6054 if (pi.size != expect - header_size) { 6055 drbd_err(connection, "Wrong packet size on meta (c: %d, l: %d)\n", 6056 pi.cmd, pi.size); 6057 goto reconnect; 6058 } 6059 } 6060 if (received == expect) { 6061 bool err; 6062 6063 err = cmd->fn(connection, &pi); 6064 if (err) { 6065 drbd_err(connection, "%ps failed\n", cmd->fn); 6066 goto reconnect; 6067 } 6068 6069 connection->last_received = jiffies; 6070 6071 if (cmd == &ack_receiver_tbl[P_PING_ACK]) { 6072 set_idle_timeout(connection); 6073 ping_timeout_active = false; 6074 } 6075 6076 buf = connection->meta.rbuf; 6077 received = 0; 6078 expect = header_size; 6079 cmd = NULL; 6080 } 6081 } 6082 6083 if (0) { 6084 reconnect: 6085 conn_request_state(connection, NS(conn, C_NETWORK_FAILURE), CS_HARD); 6086 conn_md_sync(connection); 6087 } 6088 if (0) { 6089 disconnect: 6090 conn_request_state(connection, NS(conn, C_DISCONNECTING), CS_HARD); 6091 } 6092 6093 drbd_info(connection, "ack_receiver terminated\n"); 6094 6095 return 0; 6096 } 6097 6098 void drbd_send_acks_wf(struct work_struct *ws) 6099 { 6100 struct drbd_peer_device *peer_device = 6101 container_of(ws, struct drbd_peer_device, send_acks_work); 6102 struct drbd_connection *connection = peer_device->connection; 6103 struct drbd_device *device = peer_device->device; 6104 struct net_conf *nc; 6105 int tcp_cork, err; 6106 6107 rcu_read_lock(); 6108 nc = rcu_dereference(connection->net_conf); 6109 tcp_cork = nc->tcp_cork; 6110 rcu_read_unlock(); 6111 6112 if (tcp_cork) 6113 tcp_sock_set_cork(connection->meta.socket->sk, true); 6114 6115 err = drbd_finish_peer_reqs(device); 6116 kref_put(&device->kref, drbd_destroy_device); 6117 /* get is in drbd_endio_write_sec_final(). That is necessary to keep the 6118 struct work_struct send_acks_work alive, which is in the peer_device object */ 6119 6120 if (err) { 6121 conn_request_state(connection, NS(conn, C_NETWORK_FAILURE), CS_HARD); 6122 return; 6123 } 6124 6125 if (tcp_cork) 6126 tcp_sock_set_cork(connection->meta.socket->sk, false); 6127 6128 return; 6129 } 6130