1 #include <linux/ceph/ceph_debug.h> 2 3 #include <linux/crc32c.h> 4 #include <linux/ctype.h> 5 #include <linux/highmem.h> 6 #include <linux/inet.h> 7 #include <linux/kthread.h> 8 #include <linux/net.h> 9 #include <linux/nsproxy.h> 10 #include <linux/slab.h> 11 #include <linux/socket.h> 12 #include <linux/string.h> 13 #ifdef CONFIG_BLOCK 14 #include <linux/bio.h> 15 #endif /* CONFIG_BLOCK */ 16 #include <linux/dns_resolver.h> 17 #include <net/tcp.h> 18 19 #include <linux/ceph/ceph_features.h> 20 #include <linux/ceph/libceph.h> 21 #include <linux/ceph/messenger.h> 22 #include <linux/ceph/decode.h> 23 #include <linux/ceph/pagelist.h> 24 #include <linux/export.h> 25 26 /* 27 * Ceph uses the messenger to exchange ceph_msg messages with other 28 * hosts in the system. The messenger provides ordered and reliable 29 * delivery. We tolerate TCP disconnects by reconnecting (with 30 * exponential backoff) in the case of a fault (disconnection, bad 31 * crc, protocol error). Acks allow sent messages to be discarded by 32 * the sender. 33 */ 34 35 /* 36 * We track the state of the socket on a given connection using 37 * values defined below. The transition to a new socket state is 38 * handled by a function which verifies we aren't coming from an 39 * unexpected state. 40 * 41 * -------- 42 * | NEW* | transient initial state 43 * -------- 44 * | con_sock_state_init() 45 * v 46 * ---------- 47 * | CLOSED | initialized, but no socket (and no 48 * ---------- TCP connection) 49 * ^ \ 50 * | \ con_sock_state_connecting() 51 * | ---------------------- 52 * | \ 53 * + con_sock_state_closed() \ 54 * |+--------------------------- \ 55 * | \ \ \ 56 * | ----------- \ \ 57 * | | CLOSING | socket event; \ \ 58 * | ----------- await close \ \ 59 * | ^ \ | 60 * | | \ | 61 * | + con_sock_state_closing() \ | 62 * | / \ | | 63 * | / --------------- | | 64 * | / \ v v 65 * | / -------------- 66 * | / -----------------| CONNECTING | socket created, TCP 67 * | | / -------------- connect initiated 68 * | | | con_sock_state_connected() 69 * | | v 70 * ------------- 71 * | CONNECTED | TCP connection established 72 * ------------- 73 * 74 * State values for ceph_connection->sock_state; NEW is assumed to be 0. 75 */ 76 77 #define CON_SOCK_STATE_NEW 0 /* -> CLOSED */ 78 #define CON_SOCK_STATE_CLOSED 1 /* -> CONNECTING */ 79 #define CON_SOCK_STATE_CONNECTING 2 /* -> CONNECTED or -> CLOSING */ 80 #define CON_SOCK_STATE_CONNECTED 3 /* -> CLOSING or -> CLOSED */ 81 #define CON_SOCK_STATE_CLOSING 4 /* -> CLOSED */ 82 83 /* 84 * connection states 85 */ 86 #define CON_STATE_CLOSED 1 /* -> PREOPEN */ 87 #define CON_STATE_PREOPEN 2 /* -> CONNECTING, CLOSED */ 88 #define CON_STATE_CONNECTING 3 /* -> NEGOTIATING, CLOSED */ 89 #define CON_STATE_NEGOTIATING 4 /* -> OPEN, CLOSED */ 90 #define CON_STATE_OPEN 5 /* -> STANDBY, CLOSED */ 91 #define CON_STATE_STANDBY 6 /* -> PREOPEN, CLOSED */ 92 93 /* 94 * ceph_connection flag bits 95 */ 96 #define CON_FLAG_LOSSYTX 0 /* we can close channel or drop 97 * messages on errors */ 98 #define CON_FLAG_KEEPALIVE_PENDING 1 /* we need to send a keepalive */ 99 #define CON_FLAG_WRITE_PENDING 2 /* we have data ready to send */ 100 #define CON_FLAG_SOCK_CLOSED 3 /* socket state changed to closed */ 101 #define CON_FLAG_BACKOFF 4 /* need to retry queuing delayed work */ 102 103 static bool con_flag_valid(unsigned long con_flag) 104 { 105 switch (con_flag) { 106 case CON_FLAG_LOSSYTX: 107 case CON_FLAG_KEEPALIVE_PENDING: 108 case CON_FLAG_WRITE_PENDING: 109 case CON_FLAG_SOCK_CLOSED: 110 case CON_FLAG_BACKOFF: 111 return true; 112 default: 113 return false; 114 } 115 } 116 117 static void con_flag_clear(struct ceph_connection *con, unsigned long con_flag) 118 { 119 BUG_ON(!con_flag_valid(con_flag)); 120 121 clear_bit(con_flag, &con->flags); 122 } 123 124 static void con_flag_set(struct ceph_connection *con, unsigned long con_flag) 125 { 126 BUG_ON(!con_flag_valid(con_flag)); 127 128 set_bit(con_flag, &con->flags); 129 } 130 131 static bool con_flag_test(struct ceph_connection *con, unsigned long con_flag) 132 { 133 BUG_ON(!con_flag_valid(con_flag)); 134 135 return test_bit(con_flag, &con->flags); 136 } 137 138 static bool con_flag_test_and_clear(struct ceph_connection *con, 139 unsigned long con_flag) 140 { 141 BUG_ON(!con_flag_valid(con_flag)); 142 143 return test_and_clear_bit(con_flag, &con->flags); 144 } 145 146 static bool con_flag_test_and_set(struct ceph_connection *con, 147 unsigned long con_flag) 148 { 149 BUG_ON(!con_flag_valid(con_flag)); 150 151 return test_and_set_bit(con_flag, &con->flags); 152 } 153 154 /* Slab caches for frequently-allocated structures */ 155 156 static struct kmem_cache *ceph_msg_cache; 157 static struct kmem_cache *ceph_msg_data_cache; 158 159 /* static tag bytes (protocol control messages) */ 160 static char tag_msg = CEPH_MSGR_TAG_MSG; 161 static char tag_ack = CEPH_MSGR_TAG_ACK; 162 static char tag_keepalive = CEPH_MSGR_TAG_KEEPALIVE; 163 static char tag_keepalive2 = CEPH_MSGR_TAG_KEEPALIVE2; 164 165 #ifdef CONFIG_LOCKDEP 166 static struct lock_class_key socket_class; 167 #endif 168 169 /* 170 * When skipping (ignoring) a block of input we read it into a "skip 171 * buffer," which is this many bytes in size. 172 */ 173 #define SKIP_BUF_SIZE 1024 174 175 static void queue_con(struct ceph_connection *con); 176 static void cancel_con(struct ceph_connection *con); 177 static void ceph_con_workfn(struct work_struct *); 178 static void con_fault(struct ceph_connection *con); 179 180 /* 181 * Nicely render a sockaddr as a string. An array of formatted 182 * strings is used, to approximate reentrancy. 183 */ 184 #define ADDR_STR_COUNT_LOG 5 /* log2(# address strings in array) */ 185 #define ADDR_STR_COUNT (1 << ADDR_STR_COUNT_LOG) 186 #define ADDR_STR_COUNT_MASK (ADDR_STR_COUNT - 1) 187 #define MAX_ADDR_STR_LEN 64 /* 54 is enough */ 188 189 static char addr_str[ADDR_STR_COUNT][MAX_ADDR_STR_LEN]; 190 static atomic_t addr_str_seq = ATOMIC_INIT(0); 191 192 static struct page *zero_page; /* used in certain error cases */ 193 194 const char *ceph_pr_addr(const struct sockaddr_storage *ss) 195 { 196 int i; 197 char *s; 198 struct sockaddr_in *in4 = (struct sockaddr_in *) ss; 199 struct sockaddr_in6 *in6 = (struct sockaddr_in6 *) ss; 200 201 i = atomic_inc_return(&addr_str_seq) & ADDR_STR_COUNT_MASK; 202 s = addr_str[i]; 203 204 switch (ss->ss_family) { 205 case AF_INET: 206 snprintf(s, MAX_ADDR_STR_LEN, "%pI4:%hu", &in4->sin_addr, 207 ntohs(in4->sin_port)); 208 break; 209 210 case AF_INET6: 211 snprintf(s, MAX_ADDR_STR_LEN, "[%pI6c]:%hu", &in6->sin6_addr, 212 ntohs(in6->sin6_port)); 213 break; 214 215 default: 216 snprintf(s, MAX_ADDR_STR_LEN, "(unknown sockaddr family %hu)", 217 ss->ss_family); 218 } 219 220 return s; 221 } 222 EXPORT_SYMBOL(ceph_pr_addr); 223 224 static void encode_my_addr(struct ceph_messenger *msgr) 225 { 226 memcpy(&msgr->my_enc_addr, &msgr->inst.addr, sizeof(msgr->my_enc_addr)); 227 ceph_encode_addr(&msgr->my_enc_addr); 228 } 229 230 /* 231 * work queue for all reading and writing to/from the socket. 232 */ 233 static struct workqueue_struct *ceph_msgr_wq; 234 235 static int ceph_msgr_slab_init(void) 236 { 237 BUG_ON(ceph_msg_cache); 238 ceph_msg_cache = KMEM_CACHE(ceph_msg, 0); 239 if (!ceph_msg_cache) 240 return -ENOMEM; 241 242 BUG_ON(ceph_msg_data_cache); 243 ceph_msg_data_cache = KMEM_CACHE(ceph_msg_data, 0); 244 if (ceph_msg_data_cache) 245 return 0; 246 247 kmem_cache_destroy(ceph_msg_cache); 248 ceph_msg_cache = NULL; 249 250 return -ENOMEM; 251 } 252 253 static void ceph_msgr_slab_exit(void) 254 { 255 BUG_ON(!ceph_msg_data_cache); 256 kmem_cache_destroy(ceph_msg_data_cache); 257 ceph_msg_data_cache = NULL; 258 259 BUG_ON(!ceph_msg_cache); 260 kmem_cache_destroy(ceph_msg_cache); 261 ceph_msg_cache = NULL; 262 } 263 264 static void _ceph_msgr_exit(void) 265 { 266 if (ceph_msgr_wq) { 267 destroy_workqueue(ceph_msgr_wq); 268 ceph_msgr_wq = NULL; 269 } 270 271 BUG_ON(zero_page == NULL); 272 put_page(zero_page); 273 zero_page = NULL; 274 275 ceph_msgr_slab_exit(); 276 } 277 278 int ceph_msgr_init(void) 279 { 280 if (ceph_msgr_slab_init()) 281 return -ENOMEM; 282 283 BUG_ON(zero_page != NULL); 284 zero_page = ZERO_PAGE(0); 285 get_page(zero_page); 286 287 /* 288 * The number of active work items is limited by the number of 289 * connections, so leave @max_active at default. 290 */ 291 ceph_msgr_wq = alloc_workqueue("ceph-msgr", WQ_MEM_RECLAIM, 0); 292 if (ceph_msgr_wq) 293 return 0; 294 295 pr_err("msgr_init failed to create workqueue\n"); 296 _ceph_msgr_exit(); 297 298 return -ENOMEM; 299 } 300 EXPORT_SYMBOL(ceph_msgr_init); 301 302 void ceph_msgr_exit(void) 303 { 304 BUG_ON(ceph_msgr_wq == NULL); 305 306 _ceph_msgr_exit(); 307 } 308 EXPORT_SYMBOL(ceph_msgr_exit); 309 310 void ceph_msgr_flush(void) 311 { 312 flush_workqueue(ceph_msgr_wq); 313 } 314 EXPORT_SYMBOL(ceph_msgr_flush); 315 316 /* Connection socket state transition functions */ 317 318 static void con_sock_state_init(struct ceph_connection *con) 319 { 320 int old_state; 321 322 old_state = atomic_xchg(&con->sock_state, CON_SOCK_STATE_CLOSED); 323 if (WARN_ON(old_state != CON_SOCK_STATE_NEW)) 324 printk("%s: unexpected old state %d\n", __func__, old_state); 325 dout("%s con %p sock %d -> %d\n", __func__, con, old_state, 326 CON_SOCK_STATE_CLOSED); 327 } 328 329 static void con_sock_state_connecting(struct ceph_connection *con) 330 { 331 int old_state; 332 333 old_state = atomic_xchg(&con->sock_state, CON_SOCK_STATE_CONNECTING); 334 if (WARN_ON(old_state != CON_SOCK_STATE_CLOSED)) 335 printk("%s: unexpected old state %d\n", __func__, old_state); 336 dout("%s con %p sock %d -> %d\n", __func__, con, old_state, 337 CON_SOCK_STATE_CONNECTING); 338 } 339 340 static void con_sock_state_connected(struct ceph_connection *con) 341 { 342 int old_state; 343 344 old_state = atomic_xchg(&con->sock_state, CON_SOCK_STATE_CONNECTED); 345 if (WARN_ON(old_state != CON_SOCK_STATE_CONNECTING)) 346 printk("%s: unexpected old state %d\n", __func__, old_state); 347 dout("%s con %p sock %d -> %d\n", __func__, con, old_state, 348 CON_SOCK_STATE_CONNECTED); 349 } 350 351 static void con_sock_state_closing(struct ceph_connection *con) 352 { 353 int old_state; 354 355 old_state = atomic_xchg(&con->sock_state, CON_SOCK_STATE_CLOSING); 356 if (WARN_ON(old_state != CON_SOCK_STATE_CONNECTING && 357 old_state != CON_SOCK_STATE_CONNECTED && 358 old_state != CON_SOCK_STATE_CLOSING)) 359 printk("%s: unexpected old state %d\n", __func__, old_state); 360 dout("%s con %p sock %d -> %d\n", __func__, con, old_state, 361 CON_SOCK_STATE_CLOSING); 362 } 363 364 static void con_sock_state_closed(struct ceph_connection *con) 365 { 366 int old_state; 367 368 old_state = atomic_xchg(&con->sock_state, CON_SOCK_STATE_CLOSED); 369 if (WARN_ON(old_state != CON_SOCK_STATE_CONNECTED && 370 old_state != CON_SOCK_STATE_CLOSING && 371 old_state != CON_SOCK_STATE_CONNECTING && 372 old_state != CON_SOCK_STATE_CLOSED)) 373 printk("%s: unexpected old state %d\n", __func__, old_state); 374 dout("%s con %p sock %d -> %d\n", __func__, con, old_state, 375 CON_SOCK_STATE_CLOSED); 376 } 377 378 /* 379 * socket callback functions 380 */ 381 382 /* data available on socket, or listen socket received a connect */ 383 static void ceph_sock_data_ready(struct sock *sk) 384 { 385 struct ceph_connection *con = sk->sk_user_data; 386 if (atomic_read(&con->msgr->stopping)) { 387 return; 388 } 389 390 if (sk->sk_state != TCP_CLOSE_WAIT) { 391 dout("%s on %p state = %lu, queueing work\n", __func__, 392 con, con->state); 393 queue_con(con); 394 } 395 } 396 397 /* socket has buffer space for writing */ 398 static void ceph_sock_write_space(struct sock *sk) 399 { 400 struct ceph_connection *con = sk->sk_user_data; 401 402 /* only queue to workqueue if there is data we want to write, 403 * and there is sufficient space in the socket buffer to accept 404 * more data. clear SOCK_NOSPACE so that ceph_sock_write_space() 405 * doesn't get called again until try_write() fills the socket 406 * buffer. See net/ipv4/tcp_input.c:tcp_check_space() 407 * and net/core/stream.c:sk_stream_write_space(). 408 */ 409 if (con_flag_test(con, CON_FLAG_WRITE_PENDING)) { 410 if (sk_stream_is_writeable(sk)) { 411 dout("%s %p queueing write work\n", __func__, con); 412 clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 413 queue_con(con); 414 } 415 } else { 416 dout("%s %p nothing to write\n", __func__, con); 417 } 418 } 419 420 /* socket's state has changed */ 421 static void ceph_sock_state_change(struct sock *sk) 422 { 423 struct ceph_connection *con = sk->sk_user_data; 424 425 dout("%s %p state = %lu sk_state = %u\n", __func__, 426 con, con->state, sk->sk_state); 427 428 switch (sk->sk_state) { 429 case TCP_CLOSE: 430 dout("%s TCP_CLOSE\n", __func__); 431 case TCP_CLOSE_WAIT: 432 dout("%s TCP_CLOSE_WAIT\n", __func__); 433 con_sock_state_closing(con); 434 con_flag_set(con, CON_FLAG_SOCK_CLOSED); 435 queue_con(con); 436 break; 437 case TCP_ESTABLISHED: 438 dout("%s TCP_ESTABLISHED\n", __func__); 439 con_sock_state_connected(con); 440 queue_con(con); 441 break; 442 default: /* Everything else is uninteresting */ 443 break; 444 } 445 } 446 447 /* 448 * set up socket callbacks 449 */ 450 static void set_sock_callbacks(struct socket *sock, 451 struct ceph_connection *con) 452 { 453 struct sock *sk = sock->sk; 454 sk->sk_user_data = con; 455 sk->sk_data_ready = ceph_sock_data_ready; 456 sk->sk_write_space = ceph_sock_write_space; 457 sk->sk_state_change = ceph_sock_state_change; 458 } 459 460 461 /* 462 * socket helpers 463 */ 464 465 /* 466 * initiate connection to a remote socket. 467 */ 468 static int ceph_tcp_connect(struct ceph_connection *con) 469 { 470 struct sockaddr_storage *paddr = &con->peer_addr.in_addr; 471 struct socket *sock; 472 int ret; 473 474 BUG_ON(con->sock); 475 ret = sock_create_kern(read_pnet(&con->msgr->net), paddr->ss_family, 476 SOCK_STREAM, IPPROTO_TCP, &sock); 477 if (ret) 478 return ret; 479 sock->sk->sk_allocation = GFP_NOFS; 480 481 #ifdef CONFIG_LOCKDEP 482 lockdep_set_class(&sock->sk->sk_lock, &socket_class); 483 #endif 484 485 set_sock_callbacks(sock, con); 486 487 dout("connect %s\n", ceph_pr_addr(&con->peer_addr.in_addr)); 488 489 con_sock_state_connecting(con); 490 ret = sock->ops->connect(sock, (struct sockaddr *)paddr, sizeof(*paddr), 491 O_NONBLOCK); 492 if (ret == -EINPROGRESS) { 493 dout("connect %s EINPROGRESS sk_state = %u\n", 494 ceph_pr_addr(&con->peer_addr.in_addr), 495 sock->sk->sk_state); 496 } else if (ret < 0) { 497 pr_err("connect %s error %d\n", 498 ceph_pr_addr(&con->peer_addr.in_addr), ret); 499 sock_release(sock); 500 return ret; 501 } 502 503 if (ceph_test_opt(from_msgr(con->msgr), TCP_NODELAY)) { 504 int optval = 1; 505 506 ret = kernel_setsockopt(sock, SOL_TCP, TCP_NODELAY, 507 (char *)&optval, sizeof(optval)); 508 if (ret) 509 pr_err("kernel_setsockopt(TCP_NODELAY) failed: %d", 510 ret); 511 } 512 513 con->sock = sock; 514 return 0; 515 } 516 517 static int ceph_tcp_recvmsg(struct socket *sock, void *buf, size_t len) 518 { 519 struct kvec iov = {buf, len}; 520 struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL }; 521 int r; 522 523 iov_iter_kvec(&msg.msg_iter, READ | ITER_KVEC, &iov, 1, len); 524 r = sock_recvmsg(sock, &msg, msg.msg_flags); 525 if (r == -EAGAIN) 526 r = 0; 527 return r; 528 } 529 530 static int ceph_tcp_recvpage(struct socket *sock, struct page *page, 531 int page_offset, size_t length) 532 { 533 struct bio_vec bvec = { 534 .bv_page = page, 535 .bv_offset = page_offset, 536 .bv_len = length 537 }; 538 struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL }; 539 int r; 540 541 BUG_ON(page_offset + length > PAGE_SIZE); 542 iov_iter_bvec(&msg.msg_iter, READ | ITER_BVEC, &bvec, 1, length); 543 r = sock_recvmsg(sock, &msg, msg.msg_flags); 544 if (r == -EAGAIN) 545 r = 0; 546 return r; 547 } 548 549 /* 550 * write something. @more is true if caller will be sending more data 551 * shortly. 552 */ 553 static int ceph_tcp_sendmsg(struct socket *sock, struct kvec *iov, 554 size_t kvlen, size_t len, int more) 555 { 556 struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL }; 557 int r; 558 559 if (more) 560 msg.msg_flags |= MSG_MORE; 561 else 562 msg.msg_flags |= MSG_EOR; /* superfluous, but what the hell */ 563 564 r = kernel_sendmsg(sock, &msg, iov, kvlen, len); 565 if (r == -EAGAIN) 566 r = 0; 567 return r; 568 } 569 570 static int __ceph_tcp_sendpage(struct socket *sock, struct page *page, 571 int offset, size_t size, bool more) 572 { 573 int flags = MSG_DONTWAIT | MSG_NOSIGNAL | (more ? MSG_MORE : MSG_EOR); 574 int ret; 575 576 ret = kernel_sendpage(sock, page, offset, size, flags); 577 if (ret == -EAGAIN) 578 ret = 0; 579 580 return ret; 581 } 582 583 static int ceph_tcp_sendpage(struct socket *sock, struct page *page, 584 int offset, size_t size, bool more) 585 { 586 struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL }; 587 struct bio_vec bvec; 588 int ret; 589 590 /* sendpage cannot properly handle pages with page_count == 0, 591 * we need to fallback to sendmsg if that's the case */ 592 if (page_count(page) >= 1) 593 return __ceph_tcp_sendpage(sock, page, offset, size, more); 594 595 bvec.bv_page = page; 596 bvec.bv_offset = offset; 597 bvec.bv_len = size; 598 599 if (more) 600 msg.msg_flags |= MSG_MORE; 601 else 602 msg.msg_flags |= MSG_EOR; /* superfluous, but what the hell */ 603 604 iov_iter_bvec(&msg.msg_iter, WRITE | ITER_BVEC, &bvec, 1, size); 605 ret = sock_sendmsg(sock, &msg); 606 if (ret == -EAGAIN) 607 ret = 0; 608 609 return ret; 610 } 611 612 /* 613 * Shutdown/close the socket for the given connection. 614 */ 615 static int con_close_socket(struct ceph_connection *con) 616 { 617 int rc = 0; 618 619 dout("con_close_socket on %p sock %p\n", con, con->sock); 620 if (con->sock) { 621 rc = con->sock->ops->shutdown(con->sock, SHUT_RDWR); 622 sock_release(con->sock); 623 con->sock = NULL; 624 } 625 626 /* 627 * Forcibly clear the SOCK_CLOSED flag. It gets set 628 * independent of the connection mutex, and we could have 629 * received a socket close event before we had the chance to 630 * shut the socket down. 631 */ 632 con_flag_clear(con, CON_FLAG_SOCK_CLOSED); 633 634 con_sock_state_closed(con); 635 return rc; 636 } 637 638 /* 639 * Reset a connection. Discard all incoming and outgoing messages 640 * and clear *_seq state. 641 */ 642 static void ceph_msg_remove(struct ceph_msg *msg) 643 { 644 list_del_init(&msg->list_head); 645 646 ceph_msg_put(msg); 647 } 648 static void ceph_msg_remove_list(struct list_head *head) 649 { 650 while (!list_empty(head)) { 651 struct ceph_msg *msg = list_first_entry(head, struct ceph_msg, 652 list_head); 653 ceph_msg_remove(msg); 654 } 655 } 656 657 static void reset_connection(struct ceph_connection *con) 658 { 659 /* reset connection, out_queue, msg_ and connect_seq */ 660 /* discard existing out_queue and msg_seq */ 661 dout("reset_connection %p\n", con); 662 ceph_msg_remove_list(&con->out_queue); 663 ceph_msg_remove_list(&con->out_sent); 664 665 if (con->in_msg) { 666 BUG_ON(con->in_msg->con != con); 667 ceph_msg_put(con->in_msg); 668 con->in_msg = NULL; 669 } 670 671 con->connect_seq = 0; 672 con->out_seq = 0; 673 if (con->out_msg) { 674 BUG_ON(con->out_msg->con != con); 675 ceph_msg_put(con->out_msg); 676 con->out_msg = NULL; 677 } 678 con->in_seq = 0; 679 con->in_seq_acked = 0; 680 681 con->out_skip = 0; 682 } 683 684 /* 685 * mark a peer down. drop any open connections. 686 */ 687 void ceph_con_close(struct ceph_connection *con) 688 { 689 mutex_lock(&con->mutex); 690 dout("con_close %p peer %s\n", con, 691 ceph_pr_addr(&con->peer_addr.in_addr)); 692 con->state = CON_STATE_CLOSED; 693 694 con_flag_clear(con, CON_FLAG_LOSSYTX); /* so we retry next connect */ 695 con_flag_clear(con, CON_FLAG_KEEPALIVE_PENDING); 696 con_flag_clear(con, CON_FLAG_WRITE_PENDING); 697 con_flag_clear(con, CON_FLAG_BACKOFF); 698 699 reset_connection(con); 700 con->peer_global_seq = 0; 701 cancel_con(con); 702 con_close_socket(con); 703 mutex_unlock(&con->mutex); 704 } 705 EXPORT_SYMBOL(ceph_con_close); 706 707 /* 708 * Reopen a closed connection, with a new peer address. 709 */ 710 void ceph_con_open(struct ceph_connection *con, 711 __u8 entity_type, __u64 entity_num, 712 struct ceph_entity_addr *addr) 713 { 714 mutex_lock(&con->mutex); 715 dout("con_open %p %s\n", con, ceph_pr_addr(&addr->in_addr)); 716 717 WARN_ON(con->state != CON_STATE_CLOSED); 718 con->state = CON_STATE_PREOPEN; 719 720 con->peer_name.type = (__u8) entity_type; 721 con->peer_name.num = cpu_to_le64(entity_num); 722 723 memcpy(&con->peer_addr, addr, sizeof(*addr)); 724 con->delay = 0; /* reset backoff memory */ 725 mutex_unlock(&con->mutex); 726 queue_con(con); 727 } 728 EXPORT_SYMBOL(ceph_con_open); 729 730 /* 731 * return true if this connection ever successfully opened 732 */ 733 bool ceph_con_opened(struct ceph_connection *con) 734 { 735 return con->connect_seq > 0; 736 } 737 738 /* 739 * initialize a new connection. 740 */ 741 void ceph_con_init(struct ceph_connection *con, void *private, 742 const struct ceph_connection_operations *ops, 743 struct ceph_messenger *msgr) 744 { 745 dout("con_init %p\n", con); 746 memset(con, 0, sizeof(*con)); 747 con->private = private; 748 con->ops = ops; 749 con->msgr = msgr; 750 751 con_sock_state_init(con); 752 753 mutex_init(&con->mutex); 754 INIT_LIST_HEAD(&con->out_queue); 755 INIT_LIST_HEAD(&con->out_sent); 756 INIT_DELAYED_WORK(&con->work, ceph_con_workfn); 757 758 con->state = CON_STATE_CLOSED; 759 } 760 EXPORT_SYMBOL(ceph_con_init); 761 762 763 /* 764 * We maintain a global counter to order connection attempts. Get 765 * a unique seq greater than @gt. 766 */ 767 static u32 get_global_seq(struct ceph_messenger *msgr, u32 gt) 768 { 769 u32 ret; 770 771 spin_lock(&msgr->global_seq_lock); 772 if (msgr->global_seq < gt) 773 msgr->global_seq = gt; 774 ret = ++msgr->global_seq; 775 spin_unlock(&msgr->global_seq_lock); 776 return ret; 777 } 778 779 static void con_out_kvec_reset(struct ceph_connection *con) 780 { 781 BUG_ON(con->out_skip); 782 783 con->out_kvec_left = 0; 784 con->out_kvec_bytes = 0; 785 con->out_kvec_cur = &con->out_kvec[0]; 786 } 787 788 static void con_out_kvec_add(struct ceph_connection *con, 789 size_t size, void *data) 790 { 791 int index = con->out_kvec_left; 792 793 BUG_ON(con->out_skip); 794 BUG_ON(index >= ARRAY_SIZE(con->out_kvec)); 795 796 con->out_kvec[index].iov_len = size; 797 con->out_kvec[index].iov_base = data; 798 con->out_kvec_left++; 799 con->out_kvec_bytes += size; 800 } 801 802 /* 803 * Chop off a kvec from the end. Return residual number of bytes for 804 * that kvec, i.e. how many bytes would have been written if the kvec 805 * hadn't been nuked. 806 */ 807 static int con_out_kvec_skip(struct ceph_connection *con) 808 { 809 int off = con->out_kvec_cur - con->out_kvec; 810 int skip = 0; 811 812 if (con->out_kvec_bytes > 0) { 813 skip = con->out_kvec[off + con->out_kvec_left - 1].iov_len; 814 BUG_ON(con->out_kvec_bytes < skip); 815 BUG_ON(!con->out_kvec_left); 816 con->out_kvec_bytes -= skip; 817 con->out_kvec_left--; 818 } 819 820 return skip; 821 } 822 823 #ifdef CONFIG_BLOCK 824 825 /* 826 * For a bio data item, a piece is whatever remains of the next 827 * entry in the current bio iovec, or the first entry in the next 828 * bio in the list. 829 */ 830 static void ceph_msg_data_bio_cursor_init(struct ceph_msg_data_cursor *cursor, 831 size_t length) 832 { 833 struct ceph_msg_data *data = cursor->data; 834 struct bio *bio; 835 836 BUG_ON(data->type != CEPH_MSG_DATA_BIO); 837 838 bio = data->bio; 839 BUG_ON(!bio); 840 841 cursor->resid = min(length, data->bio_length); 842 cursor->bio = bio; 843 cursor->bvec_iter = bio->bi_iter; 844 cursor->last_piece = 845 cursor->resid <= bio_iter_len(bio, cursor->bvec_iter); 846 } 847 848 static struct page *ceph_msg_data_bio_next(struct ceph_msg_data_cursor *cursor, 849 size_t *page_offset, 850 size_t *length) 851 { 852 struct ceph_msg_data *data = cursor->data; 853 struct bio *bio; 854 struct bio_vec bio_vec; 855 856 BUG_ON(data->type != CEPH_MSG_DATA_BIO); 857 858 bio = cursor->bio; 859 BUG_ON(!bio); 860 861 bio_vec = bio_iter_iovec(bio, cursor->bvec_iter); 862 863 *page_offset = (size_t) bio_vec.bv_offset; 864 BUG_ON(*page_offset >= PAGE_SIZE); 865 if (cursor->last_piece) /* pagelist offset is always 0 */ 866 *length = cursor->resid; 867 else 868 *length = (size_t) bio_vec.bv_len; 869 BUG_ON(*length > cursor->resid); 870 BUG_ON(*page_offset + *length > PAGE_SIZE); 871 872 return bio_vec.bv_page; 873 } 874 875 static bool ceph_msg_data_bio_advance(struct ceph_msg_data_cursor *cursor, 876 size_t bytes) 877 { 878 struct bio *bio; 879 struct bio_vec bio_vec; 880 881 BUG_ON(cursor->data->type != CEPH_MSG_DATA_BIO); 882 883 bio = cursor->bio; 884 BUG_ON(!bio); 885 886 bio_vec = bio_iter_iovec(bio, cursor->bvec_iter); 887 888 /* Advance the cursor offset */ 889 890 BUG_ON(cursor->resid < bytes); 891 cursor->resid -= bytes; 892 893 bio_advance_iter(bio, &cursor->bvec_iter, bytes); 894 895 if (bytes < bio_vec.bv_len) 896 return false; /* more bytes to process in this segment */ 897 898 /* Move on to the next segment, and possibly the next bio */ 899 900 if (!cursor->bvec_iter.bi_size) { 901 bio = bio->bi_next; 902 cursor->bio = bio; 903 if (bio) 904 cursor->bvec_iter = bio->bi_iter; 905 else 906 memset(&cursor->bvec_iter, 0, 907 sizeof(cursor->bvec_iter)); 908 } 909 910 if (!cursor->last_piece) { 911 BUG_ON(!cursor->resid); 912 BUG_ON(!bio); 913 /* A short read is OK, so use <= rather than == */ 914 if (cursor->resid <= bio_iter_len(bio, cursor->bvec_iter)) 915 cursor->last_piece = true; 916 } 917 918 return true; 919 } 920 #endif /* CONFIG_BLOCK */ 921 922 /* 923 * For a page array, a piece comes from the first page in the array 924 * that has not already been fully consumed. 925 */ 926 static void ceph_msg_data_pages_cursor_init(struct ceph_msg_data_cursor *cursor, 927 size_t length) 928 { 929 struct ceph_msg_data *data = cursor->data; 930 int page_count; 931 932 BUG_ON(data->type != CEPH_MSG_DATA_PAGES); 933 934 BUG_ON(!data->pages); 935 BUG_ON(!data->length); 936 937 cursor->resid = min(length, data->length); 938 page_count = calc_pages_for(data->alignment, (u64)data->length); 939 cursor->page_offset = data->alignment & ~PAGE_MASK; 940 cursor->page_index = 0; 941 BUG_ON(page_count > (int)USHRT_MAX); 942 cursor->page_count = (unsigned short)page_count; 943 BUG_ON(length > SIZE_MAX - cursor->page_offset); 944 cursor->last_piece = cursor->page_offset + cursor->resid <= PAGE_SIZE; 945 } 946 947 static struct page * 948 ceph_msg_data_pages_next(struct ceph_msg_data_cursor *cursor, 949 size_t *page_offset, size_t *length) 950 { 951 struct ceph_msg_data *data = cursor->data; 952 953 BUG_ON(data->type != CEPH_MSG_DATA_PAGES); 954 955 BUG_ON(cursor->page_index >= cursor->page_count); 956 BUG_ON(cursor->page_offset >= PAGE_SIZE); 957 958 *page_offset = cursor->page_offset; 959 if (cursor->last_piece) 960 *length = cursor->resid; 961 else 962 *length = PAGE_SIZE - *page_offset; 963 964 return data->pages[cursor->page_index]; 965 } 966 967 static bool ceph_msg_data_pages_advance(struct ceph_msg_data_cursor *cursor, 968 size_t bytes) 969 { 970 BUG_ON(cursor->data->type != CEPH_MSG_DATA_PAGES); 971 972 BUG_ON(cursor->page_offset + bytes > PAGE_SIZE); 973 974 /* Advance the cursor page offset */ 975 976 cursor->resid -= bytes; 977 cursor->page_offset = (cursor->page_offset + bytes) & ~PAGE_MASK; 978 if (!bytes || cursor->page_offset) 979 return false; /* more bytes to process in the current page */ 980 981 if (!cursor->resid) 982 return false; /* no more data */ 983 984 /* Move on to the next page; offset is already at 0 */ 985 986 BUG_ON(cursor->page_index >= cursor->page_count); 987 cursor->page_index++; 988 cursor->last_piece = cursor->resid <= PAGE_SIZE; 989 990 return true; 991 } 992 993 /* 994 * For a pagelist, a piece is whatever remains to be consumed in the 995 * first page in the list, or the front of the next page. 996 */ 997 static void 998 ceph_msg_data_pagelist_cursor_init(struct ceph_msg_data_cursor *cursor, 999 size_t length) 1000 { 1001 struct ceph_msg_data *data = cursor->data; 1002 struct ceph_pagelist *pagelist; 1003 struct page *page; 1004 1005 BUG_ON(data->type != CEPH_MSG_DATA_PAGELIST); 1006 1007 pagelist = data->pagelist; 1008 BUG_ON(!pagelist); 1009 1010 if (!length) 1011 return; /* pagelist can be assigned but empty */ 1012 1013 BUG_ON(list_empty(&pagelist->head)); 1014 page = list_first_entry(&pagelist->head, struct page, lru); 1015 1016 cursor->resid = min(length, pagelist->length); 1017 cursor->page = page; 1018 cursor->offset = 0; 1019 cursor->last_piece = cursor->resid <= PAGE_SIZE; 1020 } 1021 1022 static struct page * 1023 ceph_msg_data_pagelist_next(struct ceph_msg_data_cursor *cursor, 1024 size_t *page_offset, size_t *length) 1025 { 1026 struct ceph_msg_data *data = cursor->data; 1027 struct ceph_pagelist *pagelist; 1028 1029 BUG_ON(data->type != CEPH_MSG_DATA_PAGELIST); 1030 1031 pagelist = data->pagelist; 1032 BUG_ON(!pagelist); 1033 1034 BUG_ON(!cursor->page); 1035 BUG_ON(cursor->offset + cursor->resid != pagelist->length); 1036 1037 /* offset of first page in pagelist is always 0 */ 1038 *page_offset = cursor->offset & ~PAGE_MASK; 1039 if (cursor->last_piece) 1040 *length = cursor->resid; 1041 else 1042 *length = PAGE_SIZE - *page_offset; 1043 1044 return cursor->page; 1045 } 1046 1047 static bool ceph_msg_data_pagelist_advance(struct ceph_msg_data_cursor *cursor, 1048 size_t bytes) 1049 { 1050 struct ceph_msg_data *data = cursor->data; 1051 struct ceph_pagelist *pagelist; 1052 1053 BUG_ON(data->type != CEPH_MSG_DATA_PAGELIST); 1054 1055 pagelist = data->pagelist; 1056 BUG_ON(!pagelist); 1057 1058 BUG_ON(cursor->offset + cursor->resid != pagelist->length); 1059 BUG_ON((cursor->offset & ~PAGE_MASK) + bytes > PAGE_SIZE); 1060 1061 /* Advance the cursor offset */ 1062 1063 cursor->resid -= bytes; 1064 cursor->offset += bytes; 1065 /* offset of first page in pagelist is always 0 */ 1066 if (!bytes || cursor->offset & ~PAGE_MASK) 1067 return false; /* more bytes to process in the current page */ 1068 1069 if (!cursor->resid) 1070 return false; /* no more data */ 1071 1072 /* Move on to the next page */ 1073 1074 BUG_ON(list_is_last(&cursor->page->lru, &pagelist->head)); 1075 cursor->page = list_next_entry(cursor->page, lru); 1076 cursor->last_piece = cursor->resid <= PAGE_SIZE; 1077 1078 return true; 1079 } 1080 1081 /* 1082 * Message data is handled (sent or received) in pieces, where each 1083 * piece resides on a single page. The network layer might not 1084 * consume an entire piece at once. A data item's cursor keeps 1085 * track of which piece is next to process and how much remains to 1086 * be processed in that piece. It also tracks whether the current 1087 * piece is the last one in the data item. 1088 */ 1089 static void __ceph_msg_data_cursor_init(struct ceph_msg_data_cursor *cursor) 1090 { 1091 size_t length = cursor->total_resid; 1092 1093 switch (cursor->data->type) { 1094 case CEPH_MSG_DATA_PAGELIST: 1095 ceph_msg_data_pagelist_cursor_init(cursor, length); 1096 break; 1097 case CEPH_MSG_DATA_PAGES: 1098 ceph_msg_data_pages_cursor_init(cursor, length); 1099 break; 1100 #ifdef CONFIG_BLOCK 1101 case CEPH_MSG_DATA_BIO: 1102 ceph_msg_data_bio_cursor_init(cursor, length); 1103 break; 1104 #endif /* CONFIG_BLOCK */ 1105 case CEPH_MSG_DATA_NONE: 1106 default: 1107 /* BUG(); */ 1108 break; 1109 } 1110 cursor->need_crc = true; 1111 } 1112 1113 static void ceph_msg_data_cursor_init(struct ceph_msg *msg, size_t length) 1114 { 1115 struct ceph_msg_data_cursor *cursor = &msg->cursor; 1116 struct ceph_msg_data *data; 1117 1118 BUG_ON(!length); 1119 BUG_ON(length > msg->data_length); 1120 BUG_ON(list_empty(&msg->data)); 1121 1122 cursor->data_head = &msg->data; 1123 cursor->total_resid = length; 1124 data = list_first_entry(&msg->data, struct ceph_msg_data, links); 1125 cursor->data = data; 1126 1127 __ceph_msg_data_cursor_init(cursor); 1128 } 1129 1130 /* 1131 * Return the page containing the next piece to process for a given 1132 * data item, and supply the page offset and length of that piece. 1133 * Indicate whether this is the last piece in this data item. 1134 */ 1135 static struct page *ceph_msg_data_next(struct ceph_msg_data_cursor *cursor, 1136 size_t *page_offset, size_t *length, 1137 bool *last_piece) 1138 { 1139 struct page *page; 1140 1141 switch (cursor->data->type) { 1142 case CEPH_MSG_DATA_PAGELIST: 1143 page = ceph_msg_data_pagelist_next(cursor, page_offset, length); 1144 break; 1145 case CEPH_MSG_DATA_PAGES: 1146 page = ceph_msg_data_pages_next(cursor, page_offset, length); 1147 break; 1148 #ifdef CONFIG_BLOCK 1149 case CEPH_MSG_DATA_BIO: 1150 page = ceph_msg_data_bio_next(cursor, page_offset, length); 1151 break; 1152 #endif /* CONFIG_BLOCK */ 1153 case CEPH_MSG_DATA_NONE: 1154 default: 1155 page = NULL; 1156 break; 1157 } 1158 BUG_ON(!page); 1159 BUG_ON(*page_offset + *length > PAGE_SIZE); 1160 BUG_ON(!*length); 1161 if (last_piece) 1162 *last_piece = cursor->last_piece; 1163 1164 return page; 1165 } 1166 1167 /* 1168 * Returns true if the result moves the cursor on to the next piece 1169 * of the data item. 1170 */ 1171 static bool ceph_msg_data_advance(struct ceph_msg_data_cursor *cursor, 1172 size_t bytes) 1173 { 1174 bool new_piece; 1175 1176 BUG_ON(bytes > cursor->resid); 1177 switch (cursor->data->type) { 1178 case CEPH_MSG_DATA_PAGELIST: 1179 new_piece = ceph_msg_data_pagelist_advance(cursor, bytes); 1180 break; 1181 case CEPH_MSG_DATA_PAGES: 1182 new_piece = ceph_msg_data_pages_advance(cursor, bytes); 1183 break; 1184 #ifdef CONFIG_BLOCK 1185 case CEPH_MSG_DATA_BIO: 1186 new_piece = ceph_msg_data_bio_advance(cursor, bytes); 1187 break; 1188 #endif /* CONFIG_BLOCK */ 1189 case CEPH_MSG_DATA_NONE: 1190 default: 1191 BUG(); 1192 break; 1193 } 1194 cursor->total_resid -= bytes; 1195 1196 if (!cursor->resid && cursor->total_resid) { 1197 WARN_ON(!cursor->last_piece); 1198 BUG_ON(list_is_last(&cursor->data->links, cursor->data_head)); 1199 cursor->data = list_next_entry(cursor->data, links); 1200 __ceph_msg_data_cursor_init(cursor); 1201 new_piece = true; 1202 } 1203 cursor->need_crc = new_piece; 1204 1205 return new_piece; 1206 } 1207 1208 static size_t sizeof_footer(struct ceph_connection *con) 1209 { 1210 return (con->peer_features & CEPH_FEATURE_MSG_AUTH) ? 1211 sizeof(struct ceph_msg_footer) : 1212 sizeof(struct ceph_msg_footer_old); 1213 } 1214 1215 static void prepare_message_data(struct ceph_msg *msg, u32 data_len) 1216 { 1217 BUG_ON(!msg); 1218 BUG_ON(!data_len); 1219 1220 /* Initialize data cursor */ 1221 1222 ceph_msg_data_cursor_init(msg, (size_t)data_len); 1223 } 1224 1225 /* 1226 * Prepare footer for currently outgoing message, and finish things 1227 * off. Assumes out_kvec* are already valid.. we just add on to the end. 1228 */ 1229 static void prepare_write_message_footer(struct ceph_connection *con) 1230 { 1231 struct ceph_msg *m = con->out_msg; 1232 1233 m->footer.flags |= CEPH_MSG_FOOTER_COMPLETE; 1234 1235 dout("prepare_write_message_footer %p\n", con); 1236 con_out_kvec_add(con, sizeof_footer(con), &m->footer); 1237 if (con->peer_features & CEPH_FEATURE_MSG_AUTH) { 1238 if (con->ops->sign_message) 1239 con->ops->sign_message(m); 1240 else 1241 m->footer.sig = 0; 1242 } else { 1243 m->old_footer.flags = m->footer.flags; 1244 } 1245 con->out_more = m->more_to_follow; 1246 con->out_msg_done = true; 1247 } 1248 1249 /* 1250 * Prepare headers for the next outgoing message. 1251 */ 1252 static void prepare_write_message(struct ceph_connection *con) 1253 { 1254 struct ceph_msg *m; 1255 u32 crc; 1256 1257 con_out_kvec_reset(con); 1258 con->out_msg_done = false; 1259 1260 /* Sneak an ack in there first? If we can get it into the same 1261 * TCP packet that's a good thing. */ 1262 if (con->in_seq > con->in_seq_acked) { 1263 con->in_seq_acked = con->in_seq; 1264 con_out_kvec_add(con, sizeof (tag_ack), &tag_ack); 1265 con->out_temp_ack = cpu_to_le64(con->in_seq_acked); 1266 con_out_kvec_add(con, sizeof (con->out_temp_ack), 1267 &con->out_temp_ack); 1268 } 1269 1270 BUG_ON(list_empty(&con->out_queue)); 1271 m = list_first_entry(&con->out_queue, struct ceph_msg, list_head); 1272 con->out_msg = m; 1273 BUG_ON(m->con != con); 1274 1275 /* put message on sent list */ 1276 ceph_msg_get(m); 1277 list_move_tail(&m->list_head, &con->out_sent); 1278 1279 /* 1280 * only assign outgoing seq # if we haven't sent this message 1281 * yet. if it is requeued, resend with it's original seq. 1282 */ 1283 if (m->needs_out_seq) { 1284 m->hdr.seq = cpu_to_le64(++con->out_seq); 1285 m->needs_out_seq = false; 1286 } 1287 WARN_ON(m->data_length != le32_to_cpu(m->hdr.data_len)); 1288 1289 dout("prepare_write_message %p seq %lld type %d len %d+%d+%zd\n", 1290 m, con->out_seq, le16_to_cpu(m->hdr.type), 1291 le32_to_cpu(m->hdr.front_len), le32_to_cpu(m->hdr.middle_len), 1292 m->data_length); 1293 BUG_ON(le32_to_cpu(m->hdr.front_len) != m->front.iov_len); 1294 1295 /* tag + hdr + front + middle */ 1296 con_out_kvec_add(con, sizeof (tag_msg), &tag_msg); 1297 con_out_kvec_add(con, sizeof(con->out_hdr), &con->out_hdr); 1298 con_out_kvec_add(con, m->front.iov_len, m->front.iov_base); 1299 1300 if (m->middle) 1301 con_out_kvec_add(con, m->middle->vec.iov_len, 1302 m->middle->vec.iov_base); 1303 1304 /* fill in hdr crc and finalize hdr */ 1305 crc = crc32c(0, &m->hdr, offsetof(struct ceph_msg_header, crc)); 1306 con->out_msg->hdr.crc = cpu_to_le32(crc); 1307 memcpy(&con->out_hdr, &con->out_msg->hdr, sizeof(con->out_hdr)); 1308 1309 /* fill in front and middle crc, footer */ 1310 crc = crc32c(0, m->front.iov_base, m->front.iov_len); 1311 con->out_msg->footer.front_crc = cpu_to_le32(crc); 1312 if (m->middle) { 1313 crc = crc32c(0, m->middle->vec.iov_base, 1314 m->middle->vec.iov_len); 1315 con->out_msg->footer.middle_crc = cpu_to_le32(crc); 1316 } else 1317 con->out_msg->footer.middle_crc = 0; 1318 dout("%s front_crc %u middle_crc %u\n", __func__, 1319 le32_to_cpu(con->out_msg->footer.front_crc), 1320 le32_to_cpu(con->out_msg->footer.middle_crc)); 1321 con->out_msg->footer.flags = 0; 1322 1323 /* is there a data payload? */ 1324 con->out_msg->footer.data_crc = 0; 1325 if (m->data_length) { 1326 prepare_message_data(con->out_msg, m->data_length); 1327 con->out_more = 1; /* data + footer will follow */ 1328 } else { 1329 /* no, queue up footer too and be done */ 1330 prepare_write_message_footer(con); 1331 } 1332 1333 con_flag_set(con, CON_FLAG_WRITE_PENDING); 1334 } 1335 1336 /* 1337 * Prepare an ack. 1338 */ 1339 static void prepare_write_ack(struct ceph_connection *con) 1340 { 1341 dout("prepare_write_ack %p %llu -> %llu\n", con, 1342 con->in_seq_acked, con->in_seq); 1343 con->in_seq_acked = con->in_seq; 1344 1345 con_out_kvec_reset(con); 1346 1347 con_out_kvec_add(con, sizeof (tag_ack), &tag_ack); 1348 1349 con->out_temp_ack = cpu_to_le64(con->in_seq_acked); 1350 con_out_kvec_add(con, sizeof (con->out_temp_ack), 1351 &con->out_temp_ack); 1352 1353 con->out_more = 1; /* more will follow.. eventually.. */ 1354 con_flag_set(con, CON_FLAG_WRITE_PENDING); 1355 } 1356 1357 /* 1358 * Prepare to share the seq during handshake 1359 */ 1360 static void prepare_write_seq(struct ceph_connection *con) 1361 { 1362 dout("prepare_write_seq %p %llu -> %llu\n", con, 1363 con->in_seq_acked, con->in_seq); 1364 con->in_seq_acked = con->in_seq; 1365 1366 con_out_kvec_reset(con); 1367 1368 con->out_temp_ack = cpu_to_le64(con->in_seq_acked); 1369 con_out_kvec_add(con, sizeof (con->out_temp_ack), 1370 &con->out_temp_ack); 1371 1372 con_flag_set(con, CON_FLAG_WRITE_PENDING); 1373 } 1374 1375 /* 1376 * Prepare to write keepalive byte. 1377 */ 1378 static void prepare_write_keepalive(struct ceph_connection *con) 1379 { 1380 dout("prepare_write_keepalive %p\n", con); 1381 con_out_kvec_reset(con); 1382 if (con->peer_features & CEPH_FEATURE_MSGR_KEEPALIVE2) { 1383 struct timespec now = CURRENT_TIME; 1384 1385 con_out_kvec_add(con, sizeof(tag_keepalive2), &tag_keepalive2); 1386 ceph_encode_timespec(&con->out_temp_keepalive2, &now); 1387 con_out_kvec_add(con, sizeof(con->out_temp_keepalive2), 1388 &con->out_temp_keepalive2); 1389 } else { 1390 con_out_kvec_add(con, sizeof(tag_keepalive), &tag_keepalive); 1391 } 1392 con_flag_set(con, CON_FLAG_WRITE_PENDING); 1393 } 1394 1395 /* 1396 * Connection negotiation. 1397 */ 1398 1399 static struct ceph_auth_handshake *get_connect_authorizer(struct ceph_connection *con, 1400 int *auth_proto) 1401 { 1402 struct ceph_auth_handshake *auth; 1403 1404 if (!con->ops->get_authorizer) { 1405 con->out_connect.authorizer_protocol = CEPH_AUTH_UNKNOWN; 1406 con->out_connect.authorizer_len = 0; 1407 return NULL; 1408 } 1409 1410 auth = con->ops->get_authorizer(con, auth_proto, con->auth_retry); 1411 if (IS_ERR(auth)) 1412 return auth; 1413 1414 con->auth_reply_buf = auth->authorizer_reply_buf; 1415 con->auth_reply_buf_len = auth->authorizer_reply_buf_len; 1416 return auth; 1417 } 1418 1419 /* 1420 * We connected to a peer and are saying hello. 1421 */ 1422 static void prepare_write_banner(struct ceph_connection *con) 1423 { 1424 con_out_kvec_add(con, strlen(CEPH_BANNER), CEPH_BANNER); 1425 con_out_kvec_add(con, sizeof (con->msgr->my_enc_addr), 1426 &con->msgr->my_enc_addr); 1427 1428 con->out_more = 0; 1429 con_flag_set(con, CON_FLAG_WRITE_PENDING); 1430 } 1431 1432 static int prepare_write_connect(struct ceph_connection *con) 1433 { 1434 unsigned int global_seq = get_global_seq(con->msgr, 0); 1435 int proto; 1436 int auth_proto; 1437 struct ceph_auth_handshake *auth; 1438 1439 switch (con->peer_name.type) { 1440 case CEPH_ENTITY_TYPE_MON: 1441 proto = CEPH_MONC_PROTOCOL; 1442 break; 1443 case CEPH_ENTITY_TYPE_OSD: 1444 proto = CEPH_OSDC_PROTOCOL; 1445 break; 1446 case CEPH_ENTITY_TYPE_MDS: 1447 proto = CEPH_MDSC_PROTOCOL; 1448 break; 1449 default: 1450 BUG(); 1451 } 1452 1453 dout("prepare_write_connect %p cseq=%d gseq=%d proto=%d\n", con, 1454 con->connect_seq, global_seq, proto); 1455 1456 con->out_connect.features = 1457 cpu_to_le64(from_msgr(con->msgr)->supported_features); 1458 con->out_connect.host_type = cpu_to_le32(CEPH_ENTITY_TYPE_CLIENT); 1459 con->out_connect.connect_seq = cpu_to_le32(con->connect_seq); 1460 con->out_connect.global_seq = cpu_to_le32(global_seq); 1461 con->out_connect.protocol_version = cpu_to_le32(proto); 1462 con->out_connect.flags = 0; 1463 1464 auth_proto = CEPH_AUTH_UNKNOWN; 1465 auth = get_connect_authorizer(con, &auth_proto); 1466 if (IS_ERR(auth)) 1467 return PTR_ERR(auth); 1468 1469 con->out_connect.authorizer_protocol = cpu_to_le32(auth_proto); 1470 con->out_connect.authorizer_len = auth ? 1471 cpu_to_le32(auth->authorizer_buf_len) : 0; 1472 1473 con_out_kvec_add(con, sizeof (con->out_connect), 1474 &con->out_connect); 1475 if (auth && auth->authorizer_buf_len) 1476 con_out_kvec_add(con, auth->authorizer_buf_len, 1477 auth->authorizer_buf); 1478 1479 con->out_more = 0; 1480 con_flag_set(con, CON_FLAG_WRITE_PENDING); 1481 1482 return 0; 1483 } 1484 1485 /* 1486 * write as much of pending kvecs to the socket as we can. 1487 * 1 -> done 1488 * 0 -> socket full, but more to do 1489 * <0 -> error 1490 */ 1491 static int write_partial_kvec(struct ceph_connection *con) 1492 { 1493 int ret; 1494 1495 dout("write_partial_kvec %p %d left\n", con, con->out_kvec_bytes); 1496 while (con->out_kvec_bytes > 0) { 1497 ret = ceph_tcp_sendmsg(con->sock, con->out_kvec_cur, 1498 con->out_kvec_left, con->out_kvec_bytes, 1499 con->out_more); 1500 if (ret <= 0) 1501 goto out; 1502 con->out_kvec_bytes -= ret; 1503 if (con->out_kvec_bytes == 0) 1504 break; /* done */ 1505 1506 /* account for full iov entries consumed */ 1507 while (ret >= con->out_kvec_cur->iov_len) { 1508 BUG_ON(!con->out_kvec_left); 1509 ret -= con->out_kvec_cur->iov_len; 1510 con->out_kvec_cur++; 1511 con->out_kvec_left--; 1512 } 1513 /* and for a partially-consumed entry */ 1514 if (ret) { 1515 con->out_kvec_cur->iov_len -= ret; 1516 con->out_kvec_cur->iov_base += ret; 1517 } 1518 } 1519 con->out_kvec_left = 0; 1520 ret = 1; 1521 out: 1522 dout("write_partial_kvec %p %d left in %d kvecs ret = %d\n", con, 1523 con->out_kvec_bytes, con->out_kvec_left, ret); 1524 return ret; /* done! */ 1525 } 1526 1527 static u32 ceph_crc32c_page(u32 crc, struct page *page, 1528 unsigned int page_offset, 1529 unsigned int length) 1530 { 1531 char *kaddr; 1532 1533 kaddr = kmap(page); 1534 BUG_ON(kaddr == NULL); 1535 crc = crc32c(crc, kaddr + page_offset, length); 1536 kunmap(page); 1537 1538 return crc; 1539 } 1540 /* 1541 * Write as much message data payload as we can. If we finish, queue 1542 * up the footer. 1543 * 1 -> done, footer is now queued in out_kvec[]. 1544 * 0 -> socket full, but more to do 1545 * <0 -> error 1546 */ 1547 static int write_partial_message_data(struct ceph_connection *con) 1548 { 1549 struct ceph_msg *msg = con->out_msg; 1550 struct ceph_msg_data_cursor *cursor = &msg->cursor; 1551 bool do_datacrc = !ceph_test_opt(from_msgr(con->msgr), NOCRC); 1552 u32 crc; 1553 1554 dout("%s %p msg %p\n", __func__, con, msg); 1555 1556 if (list_empty(&msg->data)) 1557 return -EINVAL; 1558 1559 /* 1560 * Iterate through each page that contains data to be 1561 * written, and send as much as possible for each. 1562 * 1563 * If we are calculating the data crc (the default), we will 1564 * need to map the page. If we have no pages, they have 1565 * been revoked, so use the zero page. 1566 */ 1567 crc = do_datacrc ? le32_to_cpu(msg->footer.data_crc) : 0; 1568 while (cursor->resid) { 1569 struct page *page; 1570 size_t page_offset; 1571 size_t length; 1572 bool last_piece; 1573 bool need_crc; 1574 int ret; 1575 1576 page = ceph_msg_data_next(cursor, &page_offset, &length, 1577 &last_piece); 1578 ret = ceph_tcp_sendpage(con->sock, page, page_offset, 1579 length, !last_piece); 1580 if (ret <= 0) { 1581 if (do_datacrc) 1582 msg->footer.data_crc = cpu_to_le32(crc); 1583 1584 return ret; 1585 } 1586 if (do_datacrc && cursor->need_crc) 1587 crc = ceph_crc32c_page(crc, page, page_offset, length); 1588 need_crc = ceph_msg_data_advance(cursor, (size_t)ret); 1589 } 1590 1591 dout("%s %p msg %p done\n", __func__, con, msg); 1592 1593 /* prepare and queue up footer, too */ 1594 if (do_datacrc) 1595 msg->footer.data_crc = cpu_to_le32(crc); 1596 else 1597 msg->footer.flags |= CEPH_MSG_FOOTER_NOCRC; 1598 con_out_kvec_reset(con); 1599 prepare_write_message_footer(con); 1600 1601 return 1; /* must return > 0 to indicate success */ 1602 } 1603 1604 /* 1605 * write some zeros 1606 */ 1607 static int write_partial_skip(struct ceph_connection *con) 1608 { 1609 int ret; 1610 1611 dout("%s %p %d left\n", __func__, con, con->out_skip); 1612 while (con->out_skip > 0) { 1613 size_t size = min(con->out_skip, (int) PAGE_SIZE); 1614 1615 ret = ceph_tcp_sendpage(con->sock, zero_page, 0, size, true); 1616 if (ret <= 0) 1617 goto out; 1618 con->out_skip -= ret; 1619 } 1620 ret = 1; 1621 out: 1622 return ret; 1623 } 1624 1625 /* 1626 * Prepare to read connection handshake, or an ack. 1627 */ 1628 static void prepare_read_banner(struct ceph_connection *con) 1629 { 1630 dout("prepare_read_banner %p\n", con); 1631 con->in_base_pos = 0; 1632 } 1633 1634 static void prepare_read_connect(struct ceph_connection *con) 1635 { 1636 dout("prepare_read_connect %p\n", con); 1637 con->in_base_pos = 0; 1638 } 1639 1640 static void prepare_read_ack(struct ceph_connection *con) 1641 { 1642 dout("prepare_read_ack %p\n", con); 1643 con->in_base_pos = 0; 1644 } 1645 1646 static void prepare_read_seq(struct ceph_connection *con) 1647 { 1648 dout("prepare_read_seq %p\n", con); 1649 con->in_base_pos = 0; 1650 con->in_tag = CEPH_MSGR_TAG_SEQ; 1651 } 1652 1653 static void prepare_read_tag(struct ceph_connection *con) 1654 { 1655 dout("prepare_read_tag %p\n", con); 1656 con->in_base_pos = 0; 1657 con->in_tag = CEPH_MSGR_TAG_READY; 1658 } 1659 1660 static void prepare_read_keepalive_ack(struct ceph_connection *con) 1661 { 1662 dout("prepare_read_keepalive_ack %p\n", con); 1663 con->in_base_pos = 0; 1664 } 1665 1666 /* 1667 * Prepare to read a message. 1668 */ 1669 static int prepare_read_message(struct ceph_connection *con) 1670 { 1671 dout("prepare_read_message %p\n", con); 1672 BUG_ON(con->in_msg != NULL); 1673 con->in_base_pos = 0; 1674 con->in_front_crc = con->in_middle_crc = con->in_data_crc = 0; 1675 return 0; 1676 } 1677 1678 1679 static int read_partial(struct ceph_connection *con, 1680 int end, int size, void *object) 1681 { 1682 while (con->in_base_pos < end) { 1683 int left = end - con->in_base_pos; 1684 int have = size - left; 1685 int ret = ceph_tcp_recvmsg(con->sock, object + have, left); 1686 if (ret <= 0) 1687 return ret; 1688 con->in_base_pos += ret; 1689 } 1690 return 1; 1691 } 1692 1693 1694 /* 1695 * Read all or part of the connect-side handshake on a new connection 1696 */ 1697 static int read_partial_banner(struct ceph_connection *con) 1698 { 1699 int size; 1700 int end; 1701 int ret; 1702 1703 dout("read_partial_banner %p at %d\n", con, con->in_base_pos); 1704 1705 /* peer's banner */ 1706 size = strlen(CEPH_BANNER); 1707 end = size; 1708 ret = read_partial(con, end, size, con->in_banner); 1709 if (ret <= 0) 1710 goto out; 1711 1712 size = sizeof (con->actual_peer_addr); 1713 end += size; 1714 ret = read_partial(con, end, size, &con->actual_peer_addr); 1715 if (ret <= 0) 1716 goto out; 1717 1718 size = sizeof (con->peer_addr_for_me); 1719 end += size; 1720 ret = read_partial(con, end, size, &con->peer_addr_for_me); 1721 if (ret <= 0) 1722 goto out; 1723 1724 out: 1725 return ret; 1726 } 1727 1728 static int read_partial_connect(struct ceph_connection *con) 1729 { 1730 int size; 1731 int end; 1732 int ret; 1733 1734 dout("read_partial_connect %p at %d\n", con, con->in_base_pos); 1735 1736 size = sizeof (con->in_reply); 1737 end = size; 1738 ret = read_partial(con, end, size, &con->in_reply); 1739 if (ret <= 0) 1740 goto out; 1741 1742 size = le32_to_cpu(con->in_reply.authorizer_len); 1743 end += size; 1744 ret = read_partial(con, end, size, con->auth_reply_buf); 1745 if (ret <= 0) 1746 goto out; 1747 1748 dout("read_partial_connect %p tag %d, con_seq = %u, g_seq = %u\n", 1749 con, (int)con->in_reply.tag, 1750 le32_to_cpu(con->in_reply.connect_seq), 1751 le32_to_cpu(con->in_reply.global_seq)); 1752 out: 1753 return ret; 1754 1755 } 1756 1757 /* 1758 * Verify the hello banner looks okay. 1759 */ 1760 static int verify_hello(struct ceph_connection *con) 1761 { 1762 if (memcmp(con->in_banner, CEPH_BANNER, strlen(CEPH_BANNER))) { 1763 pr_err("connect to %s got bad banner\n", 1764 ceph_pr_addr(&con->peer_addr.in_addr)); 1765 con->error_msg = "protocol error, bad banner"; 1766 return -1; 1767 } 1768 return 0; 1769 } 1770 1771 static bool addr_is_blank(struct sockaddr_storage *ss) 1772 { 1773 struct in_addr *addr = &((struct sockaddr_in *)ss)->sin_addr; 1774 struct in6_addr *addr6 = &((struct sockaddr_in6 *)ss)->sin6_addr; 1775 1776 switch (ss->ss_family) { 1777 case AF_INET: 1778 return addr->s_addr == htonl(INADDR_ANY); 1779 case AF_INET6: 1780 return ipv6_addr_any(addr6); 1781 default: 1782 return true; 1783 } 1784 } 1785 1786 static int addr_port(struct sockaddr_storage *ss) 1787 { 1788 switch (ss->ss_family) { 1789 case AF_INET: 1790 return ntohs(((struct sockaddr_in *)ss)->sin_port); 1791 case AF_INET6: 1792 return ntohs(((struct sockaddr_in6 *)ss)->sin6_port); 1793 } 1794 return 0; 1795 } 1796 1797 static void addr_set_port(struct sockaddr_storage *ss, int p) 1798 { 1799 switch (ss->ss_family) { 1800 case AF_INET: 1801 ((struct sockaddr_in *)ss)->sin_port = htons(p); 1802 break; 1803 case AF_INET6: 1804 ((struct sockaddr_in6 *)ss)->sin6_port = htons(p); 1805 break; 1806 } 1807 } 1808 1809 /* 1810 * Unlike other *_pton function semantics, zero indicates success. 1811 */ 1812 static int ceph_pton(const char *str, size_t len, struct sockaddr_storage *ss, 1813 char delim, const char **ipend) 1814 { 1815 struct sockaddr_in *in4 = (struct sockaddr_in *) ss; 1816 struct sockaddr_in6 *in6 = (struct sockaddr_in6 *) ss; 1817 1818 memset(ss, 0, sizeof(*ss)); 1819 1820 if (in4_pton(str, len, (u8 *)&in4->sin_addr.s_addr, delim, ipend)) { 1821 ss->ss_family = AF_INET; 1822 return 0; 1823 } 1824 1825 if (in6_pton(str, len, (u8 *)&in6->sin6_addr.s6_addr, delim, ipend)) { 1826 ss->ss_family = AF_INET6; 1827 return 0; 1828 } 1829 1830 return -EINVAL; 1831 } 1832 1833 /* 1834 * Extract hostname string and resolve using kernel DNS facility. 1835 */ 1836 #ifdef CONFIG_CEPH_LIB_USE_DNS_RESOLVER 1837 static int ceph_dns_resolve_name(const char *name, size_t namelen, 1838 struct sockaddr_storage *ss, char delim, const char **ipend) 1839 { 1840 const char *end, *delim_p; 1841 char *colon_p, *ip_addr = NULL; 1842 int ip_len, ret; 1843 1844 /* 1845 * The end of the hostname occurs immediately preceding the delimiter or 1846 * the port marker (':') where the delimiter takes precedence. 1847 */ 1848 delim_p = memchr(name, delim, namelen); 1849 colon_p = memchr(name, ':', namelen); 1850 1851 if (delim_p && colon_p) 1852 end = delim_p < colon_p ? delim_p : colon_p; 1853 else if (!delim_p && colon_p) 1854 end = colon_p; 1855 else { 1856 end = delim_p; 1857 if (!end) /* case: hostname:/ */ 1858 end = name + namelen; 1859 } 1860 1861 if (end <= name) 1862 return -EINVAL; 1863 1864 /* do dns_resolve upcall */ 1865 ip_len = dns_query(NULL, name, end - name, NULL, &ip_addr, NULL); 1866 if (ip_len > 0) 1867 ret = ceph_pton(ip_addr, ip_len, ss, -1, NULL); 1868 else 1869 ret = -ESRCH; 1870 1871 kfree(ip_addr); 1872 1873 *ipend = end; 1874 1875 pr_info("resolve '%.*s' (ret=%d): %s\n", (int)(end - name), name, 1876 ret, ret ? "failed" : ceph_pr_addr(ss)); 1877 1878 return ret; 1879 } 1880 #else 1881 static inline int ceph_dns_resolve_name(const char *name, size_t namelen, 1882 struct sockaddr_storage *ss, char delim, const char **ipend) 1883 { 1884 return -EINVAL; 1885 } 1886 #endif 1887 1888 /* 1889 * Parse a server name (IP or hostname). If a valid IP address is not found 1890 * then try to extract a hostname to resolve using userspace DNS upcall. 1891 */ 1892 static int ceph_parse_server_name(const char *name, size_t namelen, 1893 struct sockaddr_storage *ss, char delim, const char **ipend) 1894 { 1895 int ret; 1896 1897 ret = ceph_pton(name, namelen, ss, delim, ipend); 1898 if (ret) 1899 ret = ceph_dns_resolve_name(name, namelen, ss, delim, ipend); 1900 1901 return ret; 1902 } 1903 1904 /* 1905 * Parse an ip[:port] list into an addr array. Use the default 1906 * monitor port if a port isn't specified. 1907 */ 1908 int ceph_parse_ips(const char *c, const char *end, 1909 struct ceph_entity_addr *addr, 1910 int max_count, int *count) 1911 { 1912 int i, ret = -EINVAL; 1913 const char *p = c; 1914 1915 dout("parse_ips on '%.*s'\n", (int)(end-c), c); 1916 for (i = 0; i < max_count; i++) { 1917 const char *ipend; 1918 struct sockaddr_storage *ss = &addr[i].in_addr; 1919 int port; 1920 char delim = ','; 1921 1922 if (*p == '[') { 1923 delim = ']'; 1924 p++; 1925 } 1926 1927 ret = ceph_parse_server_name(p, end - p, ss, delim, &ipend); 1928 if (ret) 1929 goto bad; 1930 ret = -EINVAL; 1931 1932 p = ipend; 1933 1934 if (delim == ']') { 1935 if (*p != ']') { 1936 dout("missing matching ']'\n"); 1937 goto bad; 1938 } 1939 p++; 1940 } 1941 1942 /* port? */ 1943 if (p < end && *p == ':') { 1944 port = 0; 1945 p++; 1946 while (p < end && *p >= '0' && *p <= '9') { 1947 port = (port * 10) + (*p - '0'); 1948 p++; 1949 } 1950 if (port == 0) 1951 port = CEPH_MON_PORT; 1952 else if (port > 65535) 1953 goto bad; 1954 } else { 1955 port = CEPH_MON_PORT; 1956 } 1957 1958 addr_set_port(ss, port); 1959 1960 dout("parse_ips got %s\n", ceph_pr_addr(ss)); 1961 1962 if (p == end) 1963 break; 1964 if (*p != ',') 1965 goto bad; 1966 p++; 1967 } 1968 1969 if (p != end) 1970 goto bad; 1971 1972 if (count) 1973 *count = i + 1; 1974 return 0; 1975 1976 bad: 1977 pr_err("parse_ips bad ip '%.*s'\n", (int)(end - c), c); 1978 return ret; 1979 } 1980 EXPORT_SYMBOL(ceph_parse_ips); 1981 1982 static int process_banner(struct ceph_connection *con) 1983 { 1984 dout("process_banner on %p\n", con); 1985 1986 if (verify_hello(con) < 0) 1987 return -1; 1988 1989 ceph_decode_addr(&con->actual_peer_addr); 1990 ceph_decode_addr(&con->peer_addr_for_me); 1991 1992 /* 1993 * Make sure the other end is who we wanted. note that the other 1994 * end may not yet know their ip address, so if it's 0.0.0.0, give 1995 * them the benefit of the doubt. 1996 */ 1997 if (memcmp(&con->peer_addr, &con->actual_peer_addr, 1998 sizeof(con->peer_addr)) != 0 && 1999 !(addr_is_blank(&con->actual_peer_addr.in_addr) && 2000 con->actual_peer_addr.nonce == con->peer_addr.nonce)) { 2001 pr_warn("wrong peer, want %s/%d, got %s/%d\n", 2002 ceph_pr_addr(&con->peer_addr.in_addr), 2003 (int)le32_to_cpu(con->peer_addr.nonce), 2004 ceph_pr_addr(&con->actual_peer_addr.in_addr), 2005 (int)le32_to_cpu(con->actual_peer_addr.nonce)); 2006 con->error_msg = "wrong peer at address"; 2007 return -1; 2008 } 2009 2010 /* 2011 * did we learn our address? 2012 */ 2013 if (addr_is_blank(&con->msgr->inst.addr.in_addr)) { 2014 int port = addr_port(&con->msgr->inst.addr.in_addr); 2015 2016 memcpy(&con->msgr->inst.addr.in_addr, 2017 &con->peer_addr_for_me.in_addr, 2018 sizeof(con->peer_addr_for_me.in_addr)); 2019 addr_set_port(&con->msgr->inst.addr.in_addr, port); 2020 encode_my_addr(con->msgr); 2021 dout("process_banner learned my addr is %s\n", 2022 ceph_pr_addr(&con->msgr->inst.addr.in_addr)); 2023 } 2024 2025 return 0; 2026 } 2027 2028 static int process_connect(struct ceph_connection *con) 2029 { 2030 u64 sup_feat = from_msgr(con->msgr)->supported_features; 2031 u64 req_feat = from_msgr(con->msgr)->required_features; 2032 u64 server_feat = ceph_sanitize_features( 2033 le64_to_cpu(con->in_reply.features)); 2034 int ret; 2035 2036 dout("process_connect on %p tag %d\n", con, (int)con->in_tag); 2037 2038 if (con->auth_reply_buf) { 2039 /* 2040 * Any connection that defines ->get_authorizer() 2041 * should also define ->verify_authorizer_reply(). 2042 * See get_connect_authorizer(). 2043 */ 2044 ret = con->ops->verify_authorizer_reply(con); 2045 if (ret < 0) { 2046 con->error_msg = "bad authorize reply"; 2047 return ret; 2048 } 2049 } 2050 2051 switch (con->in_reply.tag) { 2052 case CEPH_MSGR_TAG_FEATURES: 2053 pr_err("%s%lld %s feature set mismatch," 2054 " my %llx < server's %llx, missing %llx\n", 2055 ENTITY_NAME(con->peer_name), 2056 ceph_pr_addr(&con->peer_addr.in_addr), 2057 sup_feat, server_feat, server_feat & ~sup_feat); 2058 con->error_msg = "missing required protocol features"; 2059 reset_connection(con); 2060 return -1; 2061 2062 case CEPH_MSGR_TAG_BADPROTOVER: 2063 pr_err("%s%lld %s protocol version mismatch," 2064 " my %d != server's %d\n", 2065 ENTITY_NAME(con->peer_name), 2066 ceph_pr_addr(&con->peer_addr.in_addr), 2067 le32_to_cpu(con->out_connect.protocol_version), 2068 le32_to_cpu(con->in_reply.protocol_version)); 2069 con->error_msg = "protocol version mismatch"; 2070 reset_connection(con); 2071 return -1; 2072 2073 case CEPH_MSGR_TAG_BADAUTHORIZER: 2074 con->auth_retry++; 2075 dout("process_connect %p got BADAUTHORIZER attempt %d\n", con, 2076 con->auth_retry); 2077 if (con->auth_retry == 2) { 2078 con->error_msg = "connect authorization failure"; 2079 return -1; 2080 } 2081 con_out_kvec_reset(con); 2082 ret = prepare_write_connect(con); 2083 if (ret < 0) 2084 return ret; 2085 prepare_read_connect(con); 2086 break; 2087 2088 case CEPH_MSGR_TAG_RESETSESSION: 2089 /* 2090 * If we connected with a large connect_seq but the peer 2091 * has no record of a session with us (no connection, or 2092 * connect_seq == 0), they will send RESETSESION to indicate 2093 * that they must have reset their session, and may have 2094 * dropped messages. 2095 */ 2096 dout("process_connect got RESET peer seq %u\n", 2097 le32_to_cpu(con->in_reply.connect_seq)); 2098 pr_err("%s%lld %s connection reset\n", 2099 ENTITY_NAME(con->peer_name), 2100 ceph_pr_addr(&con->peer_addr.in_addr)); 2101 reset_connection(con); 2102 con_out_kvec_reset(con); 2103 ret = prepare_write_connect(con); 2104 if (ret < 0) 2105 return ret; 2106 prepare_read_connect(con); 2107 2108 /* Tell ceph about it. */ 2109 mutex_unlock(&con->mutex); 2110 pr_info("reset on %s%lld\n", ENTITY_NAME(con->peer_name)); 2111 if (con->ops->peer_reset) 2112 con->ops->peer_reset(con); 2113 mutex_lock(&con->mutex); 2114 if (con->state != CON_STATE_NEGOTIATING) 2115 return -EAGAIN; 2116 break; 2117 2118 case CEPH_MSGR_TAG_RETRY_SESSION: 2119 /* 2120 * If we sent a smaller connect_seq than the peer has, try 2121 * again with a larger value. 2122 */ 2123 dout("process_connect got RETRY_SESSION my seq %u, peer %u\n", 2124 le32_to_cpu(con->out_connect.connect_seq), 2125 le32_to_cpu(con->in_reply.connect_seq)); 2126 con->connect_seq = le32_to_cpu(con->in_reply.connect_seq); 2127 con_out_kvec_reset(con); 2128 ret = prepare_write_connect(con); 2129 if (ret < 0) 2130 return ret; 2131 prepare_read_connect(con); 2132 break; 2133 2134 case CEPH_MSGR_TAG_RETRY_GLOBAL: 2135 /* 2136 * If we sent a smaller global_seq than the peer has, try 2137 * again with a larger value. 2138 */ 2139 dout("process_connect got RETRY_GLOBAL my %u peer_gseq %u\n", 2140 con->peer_global_seq, 2141 le32_to_cpu(con->in_reply.global_seq)); 2142 get_global_seq(con->msgr, 2143 le32_to_cpu(con->in_reply.global_seq)); 2144 con_out_kvec_reset(con); 2145 ret = prepare_write_connect(con); 2146 if (ret < 0) 2147 return ret; 2148 prepare_read_connect(con); 2149 break; 2150 2151 case CEPH_MSGR_TAG_SEQ: 2152 case CEPH_MSGR_TAG_READY: 2153 if (req_feat & ~server_feat) { 2154 pr_err("%s%lld %s protocol feature mismatch," 2155 " my required %llx > server's %llx, need %llx\n", 2156 ENTITY_NAME(con->peer_name), 2157 ceph_pr_addr(&con->peer_addr.in_addr), 2158 req_feat, server_feat, req_feat & ~server_feat); 2159 con->error_msg = "missing required protocol features"; 2160 reset_connection(con); 2161 return -1; 2162 } 2163 2164 WARN_ON(con->state != CON_STATE_NEGOTIATING); 2165 con->state = CON_STATE_OPEN; 2166 con->auth_retry = 0; /* we authenticated; clear flag */ 2167 con->peer_global_seq = le32_to_cpu(con->in_reply.global_seq); 2168 con->connect_seq++; 2169 con->peer_features = server_feat; 2170 dout("process_connect got READY gseq %d cseq %d (%d)\n", 2171 con->peer_global_seq, 2172 le32_to_cpu(con->in_reply.connect_seq), 2173 con->connect_seq); 2174 WARN_ON(con->connect_seq != 2175 le32_to_cpu(con->in_reply.connect_seq)); 2176 2177 if (con->in_reply.flags & CEPH_MSG_CONNECT_LOSSY) 2178 con_flag_set(con, CON_FLAG_LOSSYTX); 2179 2180 con->delay = 0; /* reset backoff memory */ 2181 2182 if (con->in_reply.tag == CEPH_MSGR_TAG_SEQ) { 2183 prepare_write_seq(con); 2184 prepare_read_seq(con); 2185 } else { 2186 prepare_read_tag(con); 2187 } 2188 break; 2189 2190 case CEPH_MSGR_TAG_WAIT: 2191 /* 2192 * If there is a connection race (we are opening 2193 * connections to each other), one of us may just have 2194 * to WAIT. This shouldn't happen if we are the 2195 * client. 2196 */ 2197 con->error_msg = "protocol error, got WAIT as client"; 2198 return -1; 2199 2200 default: 2201 con->error_msg = "protocol error, garbage tag during connect"; 2202 return -1; 2203 } 2204 return 0; 2205 } 2206 2207 2208 /* 2209 * read (part of) an ack 2210 */ 2211 static int read_partial_ack(struct ceph_connection *con) 2212 { 2213 int size = sizeof (con->in_temp_ack); 2214 int end = size; 2215 2216 return read_partial(con, end, size, &con->in_temp_ack); 2217 } 2218 2219 /* 2220 * We can finally discard anything that's been acked. 2221 */ 2222 static void process_ack(struct ceph_connection *con) 2223 { 2224 struct ceph_msg *m; 2225 u64 ack = le64_to_cpu(con->in_temp_ack); 2226 u64 seq; 2227 2228 while (!list_empty(&con->out_sent)) { 2229 m = list_first_entry(&con->out_sent, struct ceph_msg, 2230 list_head); 2231 seq = le64_to_cpu(m->hdr.seq); 2232 if (seq > ack) 2233 break; 2234 dout("got ack for seq %llu type %d at %p\n", seq, 2235 le16_to_cpu(m->hdr.type), m); 2236 m->ack_stamp = jiffies; 2237 ceph_msg_remove(m); 2238 } 2239 prepare_read_tag(con); 2240 } 2241 2242 2243 static int read_partial_message_section(struct ceph_connection *con, 2244 struct kvec *section, 2245 unsigned int sec_len, u32 *crc) 2246 { 2247 int ret, left; 2248 2249 BUG_ON(!section); 2250 2251 while (section->iov_len < sec_len) { 2252 BUG_ON(section->iov_base == NULL); 2253 left = sec_len - section->iov_len; 2254 ret = ceph_tcp_recvmsg(con->sock, (char *)section->iov_base + 2255 section->iov_len, left); 2256 if (ret <= 0) 2257 return ret; 2258 section->iov_len += ret; 2259 } 2260 if (section->iov_len == sec_len) 2261 *crc = crc32c(0, section->iov_base, section->iov_len); 2262 2263 return 1; 2264 } 2265 2266 static int read_partial_msg_data(struct ceph_connection *con) 2267 { 2268 struct ceph_msg *msg = con->in_msg; 2269 struct ceph_msg_data_cursor *cursor = &msg->cursor; 2270 bool do_datacrc = !ceph_test_opt(from_msgr(con->msgr), NOCRC); 2271 struct page *page; 2272 size_t page_offset; 2273 size_t length; 2274 u32 crc = 0; 2275 int ret; 2276 2277 BUG_ON(!msg); 2278 if (list_empty(&msg->data)) 2279 return -EIO; 2280 2281 if (do_datacrc) 2282 crc = con->in_data_crc; 2283 while (cursor->resid) { 2284 page = ceph_msg_data_next(cursor, &page_offset, &length, NULL); 2285 ret = ceph_tcp_recvpage(con->sock, page, page_offset, length); 2286 if (ret <= 0) { 2287 if (do_datacrc) 2288 con->in_data_crc = crc; 2289 2290 return ret; 2291 } 2292 2293 if (do_datacrc) 2294 crc = ceph_crc32c_page(crc, page, page_offset, ret); 2295 (void) ceph_msg_data_advance(cursor, (size_t)ret); 2296 } 2297 if (do_datacrc) 2298 con->in_data_crc = crc; 2299 2300 return 1; /* must return > 0 to indicate success */ 2301 } 2302 2303 /* 2304 * read (part of) a message. 2305 */ 2306 static int ceph_con_in_msg_alloc(struct ceph_connection *con, int *skip); 2307 2308 static int read_partial_message(struct ceph_connection *con) 2309 { 2310 struct ceph_msg *m = con->in_msg; 2311 int size; 2312 int end; 2313 int ret; 2314 unsigned int front_len, middle_len, data_len; 2315 bool do_datacrc = !ceph_test_opt(from_msgr(con->msgr), NOCRC); 2316 bool need_sign = (con->peer_features & CEPH_FEATURE_MSG_AUTH); 2317 u64 seq; 2318 u32 crc; 2319 2320 dout("read_partial_message con %p msg %p\n", con, m); 2321 2322 /* header */ 2323 size = sizeof (con->in_hdr); 2324 end = size; 2325 ret = read_partial(con, end, size, &con->in_hdr); 2326 if (ret <= 0) 2327 return ret; 2328 2329 crc = crc32c(0, &con->in_hdr, offsetof(struct ceph_msg_header, crc)); 2330 if (cpu_to_le32(crc) != con->in_hdr.crc) { 2331 pr_err("read_partial_message bad hdr crc %u != expected %u\n", 2332 crc, con->in_hdr.crc); 2333 return -EBADMSG; 2334 } 2335 2336 front_len = le32_to_cpu(con->in_hdr.front_len); 2337 if (front_len > CEPH_MSG_MAX_FRONT_LEN) 2338 return -EIO; 2339 middle_len = le32_to_cpu(con->in_hdr.middle_len); 2340 if (middle_len > CEPH_MSG_MAX_MIDDLE_LEN) 2341 return -EIO; 2342 data_len = le32_to_cpu(con->in_hdr.data_len); 2343 if (data_len > CEPH_MSG_MAX_DATA_LEN) 2344 return -EIO; 2345 2346 /* verify seq# */ 2347 seq = le64_to_cpu(con->in_hdr.seq); 2348 if ((s64)seq - (s64)con->in_seq < 1) { 2349 pr_info("skipping %s%lld %s seq %lld expected %lld\n", 2350 ENTITY_NAME(con->peer_name), 2351 ceph_pr_addr(&con->peer_addr.in_addr), 2352 seq, con->in_seq + 1); 2353 con->in_base_pos = -front_len - middle_len - data_len - 2354 sizeof_footer(con); 2355 con->in_tag = CEPH_MSGR_TAG_READY; 2356 return 1; 2357 } else if ((s64)seq - (s64)con->in_seq > 1) { 2358 pr_err("read_partial_message bad seq %lld expected %lld\n", 2359 seq, con->in_seq + 1); 2360 con->error_msg = "bad message sequence # for incoming message"; 2361 return -EBADE; 2362 } 2363 2364 /* allocate message? */ 2365 if (!con->in_msg) { 2366 int skip = 0; 2367 2368 dout("got hdr type %d front %d data %d\n", con->in_hdr.type, 2369 front_len, data_len); 2370 ret = ceph_con_in_msg_alloc(con, &skip); 2371 if (ret < 0) 2372 return ret; 2373 2374 BUG_ON(!con->in_msg ^ skip); 2375 if (skip) { 2376 /* skip this message */ 2377 dout("alloc_msg said skip message\n"); 2378 con->in_base_pos = -front_len - middle_len - data_len - 2379 sizeof_footer(con); 2380 con->in_tag = CEPH_MSGR_TAG_READY; 2381 con->in_seq++; 2382 return 1; 2383 } 2384 2385 BUG_ON(!con->in_msg); 2386 BUG_ON(con->in_msg->con != con); 2387 m = con->in_msg; 2388 m->front.iov_len = 0; /* haven't read it yet */ 2389 if (m->middle) 2390 m->middle->vec.iov_len = 0; 2391 2392 /* prepare for data payload, if any */ 2393 2394 if (data_len) 2395 prepare_message_data(con->in_msg, data_len); 2396 } 2397 2398 /* front */ 2399 ret = read_partial_message_section(con, &m->front, front_len, 2400 &con->in_front_crc); 2401 if (ret <= 0) 2402 return ret; 2403 2404 /* middle */ 2405 if (m->middle) { 2406 ret = read_partial_message_section(con, &m->middle->vec, 2407 middle_len, 2408 &con->in_middle_crc); 2409 if (ret <= 0) 2410 return ret; 2411 } 2412 2413 /* (page) data */ 2414 if (data_len) { 2415 ret = read_partial_msg_data(con); 2416 if (ret <= 0) 2417 return ret; 2418 } 2419 2420 /* footer */ 2421 size = sizeof_footer(con); 2422 end += size; 2423 ret = read_partial(con, end, size, &m->footer); 2424 if (ret <= 0) 2425 return ret; 2426 2427 if (!need_sign) { 2428 m->footer.flags = m->old_footer.flags; 2429 m->footer.sig = 0; 2430 } 2431 2432 dout("read_partial_message got msg %p %d (%u) + %d (%u) + %d (%u)\n", 2433 m, front_len, m->footer.front_crc, middle_len, 2434 m->footer.middle_crc, data_len, m->footer.data_crc); 2435 2436 /* crc ok? */ 2437 if (con->in_front_crc != le32_to_cpu(m->footer.front_crc)) { 2438 pr_err("read_partial_message %p front crc %u != exp. %u\n", 2439 m, con->in_front_crc, m->footer.front_crc); 2440 return -EBADMSG; 2441 } 2442 if (con->in_middle_crc != le32_to_cpu(m->footer.middle_crc)) { 2443 pr_err("read_partial_message %p middle crc %u != exp %u\n", 2444 m, con->in_middle_crc, m->footer.middle_crc); 2445 return -EBADMSG; 2446 } 2447 if (do_datacrc && 2448 (m->footer.flags & CEPH_MSG_FOOTER_NOCRC) == 0 && 2449 con->in_data_crc != le32_to_cpu(m->footer.data_crc)) { 2450 pr_err("read_partial_message %p data crc %u != exp. %u\n", m, 2451 con->in_data_crc, le32_to_cpu(m->footer.data_crc)); 2452 return -EBADMSG; 2453 } 2454 2455 if (need_sign && con->ops->check_message_signature && 2456 con->ops->check_message_signature(m)) { 2457 pr_err("read_partial_message %p signature check failed\n", m); 2458 return -EBADMSG; 2459 } 2460 2461 return 1; /* done! */ 2462 } 2463 2464 /* 2465 * Process message. This happens in the worker thread. The callback should 2466 * be careful not to do anything that waits on other incoming messages or it 2467 * may deadlock. 2468 */ 2469 static void process_message(struct ceph_connection *con) 2470 { 2471 struct ceph_msg *msg = con->in_msg; 2472 2473 BUG_ON(con->in_msg->con != con); 2474 con->in_msg = NULL; 2475 2476 /* if first message, set peer_name */ 2477 if (con->peer_name.type == 0) 2478 con->peer_name = msg->hdr.src; 2479 2480 con->in_seq++; 2481 mutex_unlock(&con->mutex); 2482 2483 dout("===== %p %llu from %s%lld %d=%s len %d+%d (%u %u %u) =====\n", 2484 msg, le64_to_cpu(msg->hdr.seq), 2485 ENTITY_NAME(msg->hdr.src), 2486 le16_to_cpu(msg->hdr.type), 2487 ceph_msg_type_name(le16_to_cpu(msg->hdr.type)), 2488 le32_to_cpu(msg->hdr.front_len), 2489 le32_to_cpu(msg->hdr.data_len), 2490 con->in_front_crc, con->in_middle_crc, con->in_data_crc); 2491 con->ops->dispatch(con, msg); 2492 2493 mutex_lock(&con->mutex); 2494 } 2495 2496 static int read_keepalive_ack(struct ceph_connection *con) 2497 { 2498 struct ceph_timespec ceph_ts; 2499 size_t size = sizeof(ceph_ts); 2500 int ret = read_partial(con, size, size, &ceph_ts); 2501 if (ret <= 0) 2502 return ret; 2503 ceph_decode_timespec(&con->last_keepalive_ack, &ceph_ts); 2504 prepare_read_tag(con); 2505 return 1; 2506 } 2507 2508 /* 2509 * Write something to the socket. Called in a worker thread when the 2510 * socket appears to be writeable and we have something ready to send. 2511 */ 2512 static int try_write(struct ceph_connection *con) 2513 { 2514 int ret = 1; 2515 2516 dout("try_write start %p state %lu\n", con, con->state); 2517 2518 more: 2519 dout("try_write out_kvec_bytes %d\n", con->out_kvec_bytes); 2520 2521 /* open the socket first? */ 2522 if (con->state == CON_STATE_PREOPEN) { 2523 BUG_ON(con->sock); 2524 con->state = CON_STATE_CONNECTING; 2525 2526 con_out_kvec_reset(con); 2527 prepare_write_banner(con); 2528 prepare_read_banner(con); 2529 2530 BUG_ON(con->in_msg); 2531 con->in_tag = CEPH_MSGR_TAG_READY; 2532 dout("try_write initiating connect on %p new state %lu\n", 2533 con, con->state); 2534 ret = ceph_tcp_connect(con); 2535 if (ret < 0) { 2536 con->error_msg = "connect error"; 2537 goto out; 2538 } 2539 } 2540 2541 more_kvec: 2542 /* kvec data queued? */ 2543 if (con->out_kvec_left) { 2544 ret = write_partial_kvec(con); 2545 if (ret <= 0) 2546 goto out; 2547 } 2548 if (con->out_skip) { 2549 ret = write_partial_skip(con); 2550 if (ret <= 0) 2551 goto out; 2552 } 2553 2554 /* msg pages? */ 2555 if (con->out_msg) { 2556 if (con->out_msg_done) { 2557 ceph_msg_put(con->out_msg); 2558 con->out_msg = NULL; /* we're done with this one */ 2559 goto do_next; 2560 } 2561 2562 ret = write_partial_message_data(con); 2563 if (ret == 1) 2564 goto more_kvec; /* we need to send the footer, too! */ 2565 if (ret == 0) 2566 goto out; 2567 if (ret < 0) { 2568 dout("try_write write_partial_message_data err %d\n", 2569 ret); 2570 goto out; 2571 } 2572 } 2573 2574 do_next: 2575 if (con->state == CON_STATE_OPEN) { 2576 if (con_flag_test_and_clear(con, CON_FLAG_KEEPALIVE_PENDING)) { 2577 prepare_write_keepalive(con); 2578 goto more; 2579 } 2580 /* is anything else pending? */ 2581 if (!list_empty(&con->out_queue)) { 2582 prepare_write_message(con); 2583 goto more; 2584 } 2585 if (con->in_seq > con->in_seq_acked) { 2586 prepare_write_ack(con); 2587 goto more; 2588 } 2589 } 2590 2591 /* Nothing to do! */ 2592 con_flag_clear(con, CON_FLAG_WRITE_PENDING); 2593 dout("try_write nothing else to write.\n"); 2594 ret = 0; 2595 out: 2596 dout("try_write done on %p ret %d\n", con, ret); 2597 return ret; 2598 } 2599 2600 2601 2602 /* 2603 * Read what we can from the socket. 2604 */ 2605 static int try_read(struct ceph_connection *con) 2606 { 2607 int ret = -1; 2608 2609 more: 2610 dout("try_read start on %p state %lu\n", con, con->state); 2611 if (con->state != CON_STATE_CONNECTING && 2612 con->state != CON_STATE_NEGOTIATING && 2613 con->state != CON_STATE_OPEN) 2614 return 0; 2615 2616 BUG_ON(!con->sock); 2617 2618 dout("try_read tag %d in_base_pos %d\n", (int)con->in_tag, 2619 con->in_base_pos); 2620 2621 if (con->state == CON_STATE_CONNECTING) { 2622 dout("try_read connecting\n"); 2623 ret = read_partial_banner(con); 2624 if (ret <= 0) 2625 goto out; 2626 ret = process_banner(con); 2627 if (ret < 0) 2628 goto out; 2629 2630 con->state = CON_STATE_NEGOTIATING; 2631 2632 /* 2633 * Received banner is good, exchange connection info. 2634 * Do not reset out_kvec, as sending our banner raced 2635 * with receiving peer banner after connect completed. 2636 */ 2637 ret = prepare_write_connect(con); 2638 if (ret < 0) 2639 goto out; 2640 prepare_read_connect(con); 2641 2642 /* Send connection info before awaiting response */ 2643 goto out; 2644 } 2645 2646 if (con->state == CON_STATE_NEGOTIATING) { 2647 dout("try_read negotiating\n"); 2648 ret = read_partial_connect(con); 2649 if (ret <= 0) 2650 goto out; 2651 ret = process_connect(con); 2652 if (ret < 0) 2653 goto out; 2654 goto more; 2655 } 2656 2657 WARN_ON(con->state != CON_STATE_OPEN); 2658 2659 if (con->in_base_pos < 0) { 2660 /* 2661 * skipping + discarding content. 2662 * 2663 * FIXME: there must be a better way to do this! 2664 */ 2665 static char buf[SKIP_BUF_SIZE]; 2666 int skip = min((int) sizeof (buf), -con->in_base_pos); 2667 2668 dout("skipping %d / %d bytes\n", skip, -con->in_base_pos); 2669 ret = ceph_tcp_recvmsg(con->sock, buf, skip); 2670 if (ret <= 0) 2671 goto out; 2672 con->in_base_pos += ret; 2673 if (con->in_base_pos) 2674 goto more; 2675 } 2676 if (con->in_tag == CEPH_MSGR_TAG_READY) { 2677 /* 2678 * what's next? 2679 */ 2680 ret = ceph_tcp_recvmsg(con->sock, &con->in_tag, 1); 2681 if (ret <= 0) 2682 goto out; 2683 dout("try_read got tag %d\n", (int)con->in_tag); 2684 switch (con->in_tag) { 2685 case CEPH_MSGR_TAG_MSG: 2686 prepare_read_message(con); 2687 break; 2688 case CEPH_MSGR_TAG_ACK: 2689 prepare_read_ack(con); 2690 break; 2691 case CEPH_MSGR_TAG_KEEPALIVE2_ACK: 2692 prepare_read_keepalive_ack(con); 2693 break; 2694 case CEPH_MSGR_TAG_CLOSE: 2695 con_close_socket(con); 2696 con->state = CON_STATE_CLOSED; 2697 goto out; 2698 default: 2699 goto bad_tag; 2700 } 2701 } 2702 if (con->in_tag == CEPH_MSGR_TAG_MSG) { 2703 ret = read_partial_message(con); 2704 if (ret <= 0) { 2705 switch (ret) { 2706 case -EBADMSG: 2707 con->error_msg = "bad crc/signature"; 2708 /* fall through */ 2709 case -EBADE: 2710 ret = -EIO; 2711 break; 2712 case -EIO: 2713 con->error_msg = "io error"; 2714 break; 2715 } 2716 goto out; 2717 } 2718 if (con->in_tag == CEPH_MSGR_TAG_READY) 2719 goto more; 2720 process_message(con); 2721 if (con->state == CON_STATE_OPEN) 2722 prepare_read_tag(con); 2723 goto more; 2724 } 2725 if (con->in_tag == CEPH_MSGR_TAG_ACK || 2726 con->in_tag == CEPH_MSGR_TAG_SEQ) { 2727 /* 2728 * the final handshake seq exchange is semantically 2729 * equivalent to an ACK 2730 */ 2731 ret = read_partial_ack(con); 2732 if (ret <= 0) 2733 goto out; 2734 process_ack(con); 2735 goto more; 2736 } 2737 if (con->in_tag == CEPH_MSGR_TAG_KEEPALIVE2_ACK) { 2738 ret = read_keepalive_ack(con); 2739 if (ret <= 0) 2740 goto out; 2741 goto more; 2742 } 2743 2744 out: 2745 dout("try_read done on %p ret %d\n", con, ret); 2746 return ret; 2747 2748 bad_tag: 2749 pr_err("try_read bad con->in_tag = %d\n", (int)con->in_tag); 2750 con->error_msg = "protocol error, garbage tag"; 2751 ret = -1; 2752 goto out; 2753 } 2754 2755 2756 /* 2757 * Atomically queue work on a connection after the specified delay. 2758 * Bump @con reference to avoid races with connection teardown. 2759 * Returns 0 if work was queued, or an error code otherwise. 2760 */ 2761 static int queue_con_delay(struct ceph_connection *con, unsigned long delay) 2762 { 2763 if (!con->ops->get(con)) { 2764 dout("%s %p ref count 0\n", __func__, con); 2765 return -ENOENT; 2766 } 2767 2768 if (!queue_delayed_work(ceph_msgr_wq, &con->work, delay)) { 2769 dout("%s %p - already queued\n", __func__, con); 2770 con->ops->put(con); 2771 return -EBUSY; 2772 } 2773 2774 dout("%s %p %lu\n", __func__, con, delay); 2775 return 0; 2776 } 2777 2778 static void queue_con(struct ceph_connection *con) 2779 { 2780 (void) queue_con_delay(con, 0); 2781 } 2782 2783 static void cancel_con(struct ceph_connection *con) 2784 { 2785 if (cancel_delayed_work(&con->work)) { 2786 dout("%s %p\n", __func__, con); 2787 con->ops->put(con); 2788 } 2789 } 2790 2791 static bool con_sock_closed(struct ceph_connection *con) 2792 { 2793 if (!con_flag_test_and_clear(con, CON_FLAG_SOCK_CLOSED)) 2794 return false; 2795 2796 #define CASE(x) \ 2797 case CON_STATE_ ## x: \ 2798 con->error_msg = "socket closed (con state " #x ")"; \ 2799 break; 2800 2801 switch (con->state) { 2802 CASE(CLOSED); 2803 CASE(PREOPEN); 2804 CASE(CONNECTING); 2805 CASE(NEGOTIATING); 2806 CASE(OPEN); 2807 CASE(STANDBY); 2808 default: 2809 pr_warn("%s con %p unrecognized state %lu\n", 2810 __func__, con, con->state); 2811 con->error_msg = "unrecognized con state"; 2812 BUG(); 2813 break; 2814 } 2815 #undef CASE 2816 2817 return true; 2818 } 2819 2820 static bool con_backoff(struct ceph_connection *con) 2821 { 2822 int ret; 2823 2824 if (!con_flag_test_and_clear(con, CON_FLAG_BACKOFF)) 2825 return false; 2826 2827 ret = queue_con_delay(con, round_jiffies_relative(con->delay)); 2828 if (ret) { 2829 dout("%s: con %p FAILED to back off %lu\n", __func__, 2830 con, con->delay); 2831 BUG_ON(ret == -ENOENT); 2832 con_flag_set(con, CON_FLAG_BACKOFF); 2833 } 2834 2835 return true; 2836 } 2837 2838 /* Finish fault handling; con->mutex must *not* be held here */ 2839 2840 static void con_fault_finish(struct ceph_connection *con) 2841 { 2842 dout("%s %p\n", __func__, con); 2843 2844 /* 2845 * in case we faulted due to authentication, invalidate our 2846 * current tickets so that we can get new ones. 2847 */ 2848 if (con->auth_retry) { 2849 dout("auth_retry %d, invalidating\n", con->auth_retry); 2850 if (con->ops->invalidate_authorizer) 2851 con->ops->invalidate_authorizer(con); 2852 con->auth_retry = 0; 2853 } 2854 2855 if (con->ops->fault) 2856 con->ops->fault(con); 2857 } 2858 2859 /* 2860 * Do some work on a connection. Drop a connection ref when we're done. 2861 */ 2862 static void ceph_con_workfn(struct work_struct *work) 2863 { 2864 struct ceph_connection *con = container_of(work, struct ceph_connection, 2865 work.work); 2866 bool fault; 2867 2868 mutex_lock(&con->mutex); 2869 while (true) { 2870 int ret; 2871 2872 if ((fault = con_sock_closed(con))) { 2873 dout("%s: con %p SOCK_CLOSED\n", __func__, con); 2874 break; 2875 } 2876 if (con_backoff(con)) { 2877 dout("%s: con %p BACKOFF\n", __func__, con); 2878 break; 2879 } 2880 if (con->state == CON_STATE_STANDBY) { 2881 dout("%s: con %p STANDBY\n", __func__, con); 2882 break; 2883 } 2884 if (con->state == CON_STATE_CLOSED) { 2885 dout("%s: con %p CLOSED\n", __func__, con); 2886 BUG_ON(con->sock); 2887 break; 2888 } 2889 if (con->state == CON_STATE_PREOPEN) { 2890 dout("%s: con %p PREOPEN\n", __func__, con); 2891 BUG_ON(con->sock); 2892 } 2893 2894 ret = try_read(con); 2895 if (ret < 0) { 2896 if (ret == -EAGAIN) 2897 continue; 2898 if (!con->error_msg) 2899 con->error_msg = "socket error on read"; 2900 fault = true; 2901 break; 2902 } 2903 2904 ret = try_write(con); 2905 if (ret < 0) { 2906 if (ret == -EAGAIN) 2907 continue; 2908 if (!con->error_msg) 2909 con->error_msg = "socket error on write"; 2910 fault = true; 2911 } 2912 2913 break; /* If we make it to here, we're done */ 2914 } 2915 if (fault) 2916 con_fault(con); 2917 mutex_unlock(&con->mutex); 2918 2919 if (fault) 2920 con_fault_finish(con); 2921 2922 con->ops->put(con); 2923 } 2924 2925 /* 2926 * Generic error/fault handler. A retry mechanism is used with 2927 * exponential backoff 2928 */ 2929 static void con_fault(struct ceph_connection *con) 2930 { 2931 dout("fault %p state %lu to peer %s\n", 2932 con, con->state, ceph_pr_addr(&con->peer_addr.in_addr)); 2933 2934 pr_warn("%s%lld %s %s\n", ENTITY_NAME(con->peer_name), 2935 ceph_pr_addr(&con->peer_addr.in_addr), con->error_msg); 2936 con->error_msg = NULL; 2937 2938 WARN_ON(con->state != CON_STATE_CONNECTING && 2939 con->state != CON_STATE_NEGOTIATING && 2940 con->state != CON_STATE_OPEN); 2941 2942 con_close_socket(con); 2943 2944 if (con_flag_test(con, CON_FLAG_LOSSYTX)) { 2945 dout("fault on LOSSYTX channel, marking CLOSED\n"); 2946 con->state = CON_STATE_CLOSED; 2947 return; 2948 } 2949 2950 if (con->in_msg) { 2951 BUG_ON(con->in_msg->con != con); 2952 ceph_msg_put(con->in_msg); 2953 con->in_msg = NULL; 2954 } 2955 2956 /* Requeue anything that hasn't been acked */ 2957 list_splice_init(&con->out_sent, &con->out_queue); 2958 2959 /* If there are no messages queued or keepalive pending, place 2960 * the connection in a STANDBY state */ 2961 if (list_empty(&con->out_queue) && 2962 !con_flag_test(con, CON_FLAG_KEEPALIVE_PENDING)) { 2963 dout("fault %p setting STANDBY clearing WRITE_PENDING\n", con); 2964 con_flag_clear(con, CON_FLAG_WRITE_PENDING); 2965 con->state = CON_STATE_STANDBY; 2966 } else { 2967 /* retry after a delay. */ 2968 con->state = CON_STATE_PREOPEN; 2969 if (con->delay == 0) 2970 con->delay = BASE_DELAY_INTERVAL; 2971 else if (con->delay < MAX_DELAY_INTERVAL) 2972 con->delay *= 2; 2973 con_flag_set(con, CON_FLAG_BACKOFF); 2974 queue_con(con); 2975 } 2976 } 2977 2978 2979 2980 /* 2981 * initialize a new messenger instance 2982 */ 2983 void ceph_messenger_init(struct ceph_messenger *msgr, 2984 struct ceph_entity_addr *myaddr) 2985 { 2986 spin_lock_init(&msgr->global_seq_lock); 2987 2988 if (myaddr) 2989 msgr->inst.addr = *myaddr; 2990 2991 /* select a random nonce */ 2992 msgr->inst.addr.type = 0; 2993 get_random_bytes(&msgr->inst.addr.nonce, sizeof(msgr->inst.addr.nonce)); 2994 encode_my_addr(msgr); 2995 2996 atomic_set(&msgr->stopping, 0); 2997 write_pnet(&msgr->net, get_net(current->nsproxy->net_ns)); 2998 2999 dout("%s %p\n", __func__, msgr); 3000 } 3001 EXPORT_SYMBOL(ceph_messenger_init); 3002 3003 void ceph_messenger_fini(struct ceph_messenger *msgr) 3004 { 3005 put_net(read_pnet(&msgr->net)); 3006 } 3007 EXPORT_SYMBOL(ceph_messenger_fini); 3008 3009 static void msg_con_set(struct ceph_msg *msg, struct ceph_connection *con) 3010 { 3011 if (msg->con) 3012 msg->con->ops->put(msg->con); 3013 3014 msg->con = con ? con->ops->get(con) : NULL; 3015 BUG_ON(msg->con != con); 3016 } 3017 3018 static void clear_standby(struct ceph_connection *con) 3019 { 3020 /* come back from STANDBY? */ 3021 if (con->state == CON_STATE_STANDBY) { 3022 dout("clear_standby %p and ++connect_seq\n", con); 3023 con->state = CON_STATE_PREOPEN; 3024 con->connect_seq++; 3025 WARN_ON(con_flag_test(con, CON_FLAG_WRITE_PENDING)); 3026 WARN_ON(con_flag_test(con, CON_FLAG_KEEPALIVE_PENDING)); 3027 } 3028 } 3029 3030 /* 3031 * Queue up an outgoing message on the given connection. 3032 */ 3033 void ceph_con_send(struct ceph_connection *con, struct ceph_msg *msg) 3034 { 3035 /* set src+dst */ 3036 msg->hdr.src = con->msgr->inst.name; 3037 BUG_ON(msg->front.iov_len != le32_to_cpu(msg->hdr.front_len)); 3038 msg->needs_out_seq = true; 3039 3040 mutex_lock(&con->mutex); 3041 3042 if (con->state == CON_STATE_CLOSED) { 3043 dout("con_send %p closed, dropping %p\n", con, msg); 3044 ceph_msg_put(msg); 3045 mutex_unlock(&con->mutex); 3046 return; 3047 } 3048 3049 msg_con_set(msg, con); 3050 3051 BUG_ON(!list_empty(&msg->list_head)); 3052 list_add_tail(&msg->list_head, &con->out_queue); 3053 dout("----- %p to %s%lld %d=%s len %d+%d+%d -----\n", msg, 3054 ENTITY_NAME(con->peer_name), le16_to_cpu(msg->hdr.type), 3055 ceph_msg_type_name(le16_to_cpu(msg->hdr.type)), 3056 le32_to_cpu(msg->hdr.front_len), 3057 le32_to_cpu(msg->hdr.middle_len), 3058 le32_to_cpu(msg->hdr.data_len)); 3059 3060 clear_standby(con); 3061 mutex_unlock(&con->mutex); 3062 3063 /* if there wasn't anything waiting to send before, queue 3064 * new work */ 3065 if (con_flag_test_and_set(con, CON_FLAG_WRITE_PENDING) == 0) 3066 queue_con(con); 3067 } 3068 EXPORT_SYMBOL(ceph_con_send); 3069 3070 /* 3071 * Revoke a message that was previously queued for send 3072 */ 3073 void ceph_msg_revoke(struct ceph_msg *msg) 3074 { 3075 struct ceph_connection *con = msg->con; 3076 3077 if (!con) { 3078 dout("%s msg %p null con\n", __func__, msg); 3079 return; /* Message not in our possession */ 3080 } 3081 3082 mutex_lock(&con->mutex); 3083 if (!list_empty(&msg->list_head)) { 3084 dout("%s %p msg %p - was on queue\n", __func__, con, msg); 3085 list_del_init(&msg->list_head); 3086 msg->hdr.seq = 0; 3087 3088 ceph_msg_put(msg); 3089 } 3090 if (con->out_msg == msg) { 3091 BUG_ON(con->out_skip); 3092 /* footer */ 3093 if (con->out_msg_done) { 3094 con->out_skip += con_out_kvec_skip(con); 3095 } else { 3096 BUG_ON(!msg->data_length); 3097 con->out_skip += sizeof_footer(con); 3098 } 3099 /* data, middle, front */ 3100 if (msg->data_length) 3101 con->out_skip += msg->cursor.total_resid; 3102 if (msg->middle) 3103 con->out_skip += con_out_kvec_skip(con); 3104 con->out_skip += con_out_kvec_skip(con); 3105 3106 dout("%s %p msg %p - was sending, will write %d skip %d\n", 3107 __func__, con, msg, con->out_kvec_bytes, con->out_skip); 3108 msg->hdr.seq = 0; 3109 con->out_msg = NULL; 3110 ceph_msg_put(msg); 3111 } 3112 3113 mutex_unlock(&con->mutex); 3114 } 3115 3116 /* 3117 * Revoke a message that we may be reading data into 3118 */ 3119 void ceph_msg_revoke_incoming(struct ceph_msg *msg) 3120 { 3121 struct ceph_connection *con = msg->con; 3122 3123 if (!con) { 3124 dout("%s msg %p null con\n", __func__, msg); 3125 return; /* Message not in our possession */ 3126 } 3127 3128 mutex_lock(&con->mutex); 3129 if (con->in_msg == msg) { 3130 unsigned int front_len = le32_to_cpu(con->in_hdr.front_len); 3131 unsigned int middle_len = le32_to_cpu(con->in_hdr.middle_len); 3132 unsigned int data_len = le32_to_cpu(con->in_hdr.data_len); 3133 3134 /* skip rest of message */ 3135 dout("%s %p msg %p revoked\n", __func__, con, msg); 3136 con->in_base_pos = con->in_base_pos - 3137 sizeof(struct ceph_msg_header) - 3138 front_len - 3139 middle_len - 3140 data_len - 3141 sizeof(struct ceph_msg_footer); 3142 ceph_msg_put(con->in_msg); 3143 con->in_msg = NULL; 3144 con->in_tag = CEPH_MSGR_TAG_READY; 3145 con->in_seq++; 3146 } else { 3147 dout("%s %p in_msg %p msg %p no-op\n", 3148 __func__, con, con->in_msg, msg); 3149 } 3150 mutex_unlock(&con->mutex); 3151 } 3152 3153 /* 3154 * Queue a keepalive byte to ensure the tcp connection is alive. 3155 */ 3156 void ceph_con_keepalive(struct ceph_connection *con) 3157 { 3158 dout("con_keepalive %p\n", con); 3159 mutex_lock(&con->mutex); 3160 clear_standby(con); 3161 mutex_unlock(&con->mutex); 3162 if (con_flag_test_and_set(con, CON_FLAG_KEEPALIVE_PENDING) == 0 && 3163 con_flag_test_and_set(con, CON_FLAG_WRITE_PENDING) == 0) 3164 queue_con(con); 3165 } 3166 EXPORT_SYMBOL(ceph_con_keepalive); 3167 3168 bool ceph_con_keepalive_expired(struct ceph_connection *con, 3169 unsigned long interval) 3170 { 3171 if (interval > 0 && 3172 (con->peer_features & CEPH_FEATURE_MSGR_KEEPALIVE2)) { 3173 struct timespec now = CURRENT_TIME; 3174 struct timespec ts; 3175 jiffies_to_timespec(interval, &ts); 3176 ts = timespec_add(con->last_keepalive_ack, ts); 3177 return timespec_compare(&now, &ts) >= 0; 3178 } 3179 return false; 3180 } 3181 3182 static struct ceph_msg_data *ceph_msg_data_create(enum ceph_msg_data_type type) 3183 { 3184 struct ceph_msg_data *data; 3185 3186 if (WARN_ON(!ceph_msg_data_type_valid(type))) 3187 return NULL; 3188 3189 data = kmem_cache_zalloc(ceph_msg_data_cache, GFP_NOFS); 3190 if (data) 3191 data->type = type; 3192 INIT_LIST_HEAD(&data->links); 3193 3194 return data; 3195 } 3196 3197 static void ceph_msg_data_destroy(struct ceph_msg_data *data) 3198 { 3199 if (!data) 3200 return; 3201 3202 WARN_ON(!list_empty(&data->links)); 3203 if (data->type == CEPH_MSG_DATA_PAGELIST) 3204 ceph_pagelist_release(data->pagelist); 3205 kmem_cache_free(ceph_msg_data_cache, data); 3206 } 3207 3208 void ceph_msg_data_add_pages(struct ceph_msg *msg, struct page **pages, 3209 size_t length, size_t alignment) 3210 { 3211 struct ceph_msg_data *data; 3212 3213 BUG_ON(!pages); 3214 BUG_ON(!length); 3215 3216 data = ceph_msg_data_create(CEPH_MSG_DATA_PAGES); 3217 BUG_ON(!data); 3218 data->pages = pages; 3219 data->length = length; 3220 data->alignment = alignment & ~PAGE_MASK; 3221 3222 list_add_tail(&data->links, &msg->data); 3223 msg->data_length += length; 3224 } 3225 EXPORT_SYMBOL(ceph_msg_data_add_pages); 3226 3227 void ceph_msg_data_add_pagelist(struct ceph_msg *msg, 3228 struct ceph_pagelist *pagelist) 3229 { 3230 struct ceph_msg_data *data; 3231 3232 BUG_ON(!pagelist); 3233 BUG_ON(!pagelist->length); 3234 3235 data = ceph_msg_data_create(CEPH_MSG_DATA_PAGELIST); 3236 BUG_ON(!data); 3237 data->pagelist = pagelist; 3238 3239 list_add_tail(&data->links, &msg->data); 3240 msg->data_length += pagelist->length; 3241 } 3242 EXPORT_SYMBOL(ceph_msg_data_add_pagelist); 3243 3244 #ifdef CONFIG_BLOCK 3245 void ceph_msg_data_add_bio(struct ceph_msg *msg, struct bio *bio, 3246 size_t length) 3247 { 3248 struct ceph_msg_data *data; 3249 3250 BUG_ON(!bio); 3251 3252 data = ceph_msg_data_create(CEPH_MSG_DATA_BIO); 3253 BUG_ON(!data); 3254 data->bio = bio; 3255 data->bio_length = length; 3256 3257 list_add_tail(&data->links, &msg->data); 3258 msg->data_length += length; 3259 } 3260 EXPORT_SYMBOL(ceph_msg_data_add_bio); 3261 #endif /* CONFIG_BLOCK */ 3262 3263 /* 3264 * construct a new message with given type, size 3265 * the new msg has a ref count of 1. 3266 */ 3267 struct ceph_msg *ceph_msg_new(int type, int front_len, gfp_t flags, 3268 bool can_fail) 3269 { 3270 struct ceph_msg *m; 3271 3272 m = kmem_cache_zalloc(ceph_msg_cache, flags); 3273 if (m == NULL) 3274 goto out; 3275 3276 m->hdr.type = cpu_to_le16(type); 3277 m->hdr.priority = cpu_to_le16(CEPH_MSG_PRIO_DEFAULT); 3278 m->hdr.front_len = cpu_to_le32(front_len); 3279 3280 INIT_LIST_HEAD(&m->list_head); 3281 kref_init(&m->kref); 3282 INIT_LIST_HEAD(&m->data); 3283 3284 /* front */ 3285 if (front_len) { 3286 m->front.iov_base = ceph_kvmalloc(front_len, flags); 3287 if (m->front.iov_base == NULL) { 3288 dout("ceph_msg_new can't allocate %d bytes\n", 3289 front_len); 3290 goto out2; 3291 } 3292 } else { 3293 m->front.iov_base = NULL; 3294 } 3295 m->front_alloc_len = m->front.iov_len = front_len; 3296 3297 dout("ceph_msg_new %p front %d\n", m, front_len); 3298 return m; 3299 3300 out2: 3301 ceph_msg_put(m); 3302 out: 3303 if (!can_fail) { 3304 pr_err("msg_new can't create type %d front %d\n", type, 3305 front_len); 3306 WARN_ON(1); 3307 } else { 3308 dout("msg_new can't create type %d front %d\n", type, 3309 front_len); 3310 } 3311 return NULL; 3312 } 3313 EXPORT_SYMBOL(ceph_msg_new); 3314 3315 /* 3316 * Allocate "middle" portion of a message, if it is needed and wasn't 3317 * allocated by alloc_msg. This allows us to read a small fixed-size 3318 * per-type header in the front and then gracefully fail (i.e., 3319 * propagate the error to the caller based on info in the front) when 3320 * the middle is too large. 3321 */ 3322 static int ceph_alloc_middle(struct ceph_connection *con, struct ceph_msg *msg) 3323 { 3324 int type = le16_to_cpu(msg->hdr.type); 3325 int middle_len = le32_to_cpu(msg->hdr.middle_len); 3326 3327 dout("alloc_middle %p type %d %s middle_len %d\n", msg, type, 3328 ceph_msg_type_name(type), middle_len); 3329 BUG_ON(!middle_len); 3330 BUG_ON(msg->middle); 3331 3332 msg->middle = ceph_buffer_new(middle_len, GFP_NOFS); 3333 if (!msg->middle) 3334 return -ENOMEM; 3335 return 0; 3336 } 3337 3338 /* 3339 * Allocate a message for receiving an incoming message on a 3340 * connection, and save the result in con->in_msg. Uses the 3341 * connection's private alloc_msg op if available. 3342 * 3343 * Returns 0 on success, or a negative error code. 3344 * 3345 * On success, if we set *skip = 1: 3346 * - the next message should be skipped and ignored. 3347 * - con->in_msg == NULL 3348 * or if we set *skip = 0: 3349 * - con->in_msg is non-null. 3350 * On error (ENOMEM, EAGAIN, ...), 3351 * - con->in_msg == NULL 3352 */ 3353 static int ceph_con_in_msg_alloc(struct ceph_connection *con, int *skip) 3354 { 3355 struct ceph_msg_header *hdr = &con->in_hdr; 3356 int middle_len = le32_to_cpu(hdr->middle_len); 3357 struct ceph_msg *msg; 3358 int ret = 0; 3359 3360 BUG_ON(con->in_msg != NULL); 3361 BUG_ON(!con->ops->alloc_msg); 3362 3363 mutex_unlock(&con->mutex); 3364 msg = con->ops->alloc_msg(con, hdr, skip); 3365 mutex_lock(&con->mutex); 3366 if (con->state != CON_STATE_OPEN) { 3367 if (msg) 3368 ceph_msg_put(msg); 3369 return -EAGAIN; 3370 } 3371 if (msg) { 3372 BUG_ON(*skip); 3373 msg_con_set(msg, con); 3374 con->in_msg = msg; 3375 } else { 3376 /* 3377 * Null message pointer means either we should skip 3378 * this message or we couldn't allocate memory. The 3379 * former is not an error. 3380 */ 3381 if (*skip) 3382 return 0; 3383 3384 con->error_msg = "error allocating memory for incoming message"; 3385 return -ENOMEM; 3386 } 3387 memcpy(&con->in_msg->hdr, &con->in_hdr, sizeof(con->in_hdr)); 3388 3389 if (middle_len && !con->in_msg->middle) { 3390 ret = ceph_alloc_middle(con, con->in_msg); 3391 if (ret < 0) { 3392 ceph_msg_put(con->in_msg); 3393 con->in_msg = NULL; 3394 } 3395 } 3396 3397 return ret; 3398 } 3399 3400 3401 /* 3402 * Free a generically kmalloc'd message. 3403 */ 3404 static void ceph_msg_free(struct ceph_msg *m) 3405 { 3406 dout("%s %p\n", __func__, m); 3407 kvfree(m->front.iov_base); 3408 kmem_cache_free(ceph_msg_cache, m); 3409 } 3410 3411 static void ceph_msg_release(struct kref *kref) 3412 { 3413 struct ceph_msg *m = container_of(kref, struct ceph_msg, kref); 3414 struct ceph_msg_data *data, *next; 3415 3416 dout("%s %p\n", __func__, m); 3417 WARN_ON(!list_empty(&m->list_head)); 3418 3419 msg_con_set(m, NULL); 3420 3421 /* drop middle, data, if any */ 3422 if (m->middle) { 3423 ceph_buffer_put(m->middle); 3424 m->middle = NULL; 3425 } 3426 3427 list_for_each_entry_safe(data, next, &m->data, links) { 3428 list_del_init(&data->links); 3429 ceph_msg_data_destroy(data); 3430 } 3431 m->data_length = 0; 3432 3433 if (m->pool) 3434 ceph_msgpool_put(m->pool, m); 3435 else 3436 ceph_msg_free(m); 3437 } 3438 3439 struct ceph_msg *ceph_msg_get(struct ceph_msg *msg) 3440 { 3441 dout("%s %p (was %d)\n", __func__, msg, 3442 kref_read(&msg->kref)); 3443 kref_get(&msg->kref); 3444 return msg; 3445 } 3446 EXPORT_SYMBOL(ceph_msg_get); 3447 3448 void ceph_msg_put(struct ceph_msg *msg) 3449 { 3450 dout("%s %p (was %d)\n", __func__, msg, 3451 kref_read(&msg->kref)); 3452 kref_put(&msg->kref, ceph_msg_release); 3453 } 3454 EXPORT_SYMBOL(ceph_msg_put); 3455 3456 void ceph_msg_dump(struct ceph_msg *msg) 3457 { 3458 pr_debug("msg_dump %p (front_alloc_len %d length %zd)\n", msg, 3459 msg->front_alloc_len, msg->data_length); 3460 print_hex_dump(KERN_DEBUG, "header: ", 3461 DUMP_PREFIX_OFFSET, 16, 1, 3462 &msg->hdr, sizeof(msg->hdr), true); 3463 print_hex_dump(KERN_DEBUG, " front: ", 3464 DUMP_PREFIX_OFFSET, 16, 1, 3465 msg->front.iov_base, msg->front.iov_len, true); 3466 if (msg->middle) 3467 print_hex_dump(KERN_DEBUG, "middle: ", 3468 DUMP_PREFIX_OFFSET, 16, 1, 3469 msg->middle->vec.iov_base, 3470 msg->middle->vec.iov_len, true); 3471 print_hex_dump(KERN_DEBUG, "footer: ", 3472 DUMP_PREFIX_OFFSET, 16, 1, 3473 &msg->footer, sizeof(msg->footer), true); 3474 } 3475 EXPORT_SYMBOL(ceph_msg_dump); 3476