1 #include <linux/ceph/ceph_debug.h> 2 3 #include <linux/crc32c.h> 4 #include <linux/ctype.h> 5 #include <linux/highmem.h> 6 #include <linux/inet.h> 7 #include <linux/kthread.h> 8 #include <linux/net.h> 9 #include <linux/slab.h> 10 #include <linux/socket.h> 11 #include <linux/string.h> 12 #include <linux/bio.h> 13 #include <linux/blkdev.h> 14 #include <linux/dns_resolver.h> 15 #include <net/tcp.h> 16 17 #include <linux/ceph/libceph.h> 18 #include <linux/ceph/messenger.h> 19 #include <linux/ceph/decode.h> 20 #include <linux/ceph/pagelist.h> 21 #include <linux/export.h> 22 23 /* 24 * Ceph uses the messenger to exchange ceph_msg messages with other 25 * hosts in the system. The messenger provides ordered and reliable 26 * delivery. We tolerate TCP disconnects by reconnecting (with 27 * exponential backoff) in the case of a fault (disconnection, bad 28 * crc, protocol error). Acks allow sent messages to be discarded by 29 * the sender. 30 */ 31 32 /* 33 * We track the state of the socket on a given connection using 34 * values defined below. The transition to a new socket state is 35 * handled by a function which verifies we aren't coming from an 36 * unexpected state. 37 * 38 * -------- 39 * | NEW* | transient initial state 40 * -------- 41 * | con_sock_state_init() 42 * v 43 * ---------- 44 * | CLOSED | initialized, but no socket (and no 45 * ---------- TCP connection) 46 * ^ \ 47 * | \ con_sock_state_connecting() 48 * | ---------------------- 49 * | \ 50 * + con_sock_state_closed() \ 51 * |+--------------------------- \ 52 * | \ \ \ 53 * | ----------- \ \ 54 * | | CLOSING | socket event; \ \ 55 * | ----------- await close \ \ 56 * | ^ \ | 57 * | | \ | 58 * | + con_sock_state_closing() \ | 59 * | / \ | | 60 * | / --------------- | | 61 * | / \ v v 62 * | / -------------- 63 * | / -----------------| CONNECTING | socket created, TCP 64 * | | / -------------- connect initiated 65 * | | | con_sock_state_connected() 66 * | | v 67 * ------------- 68 * | CONNECTED | TCP connection established 69 * ------------- 70 * 71 * State values for ceph_connection->sock_state; NEW is assumed to be 0. 72 */ 73 74 #define CON_SOCK_STATE_NEW 0 /* -> CLOSED */ 75 #define CON_SOCK_STATE_CLOSED 1 /* -> CONNECTING */ 76 #define CON_SOCK_STATE_CONNECTING 2 /* -> CONNECTED or -> CLOSING */ 77 #define CON_SOCK_STATE_CONNECTED 3 /* -> CLOSING or -> CLOSED */ 78 #define CON_SOCK_STATE_CLOSING 4 /* -> CLOSED */ 79 80 /* 81 * connection states 82 */ 83 #define CON_STATE_CLOSED 1 /* -> PREOPEN */ 84 #define CON_STATE_PREOPEN 2 /* -> CONNECTING, CLOSED */ 85 #define CON_STATE_CONNECTING 3 /* -> NEGOTIATING, CLOSED */ 86 #define CON_STATE_NEGOTIATING 4 /* -> OPEN, CLOSED */ 87 #define CON_STATE_OPEN 5 /* -> STANDBY, CLOSED */ 88 #define CON_STATE_STANDBY 6 /* -> PREOPEN, CLOSED */ 89 90 /* 91 * ceph_connection flag bits 92 */ 93 #define CON_FLAG_LOSSYTX 0 /* we can close channel or drop 94 * messages on errors */ 95 #define CON_FLAG_KEEPALIVE_PENDING 1 /* we need to send a keepalive */ 96 #define CON_FLAG_WRITE_PENDING 2 /* we have data ready to send */ 97 #define CON_FLAG_SOCK_CLOSED 3 /* socket state changed to closed */ 98 #define CON_FLAG_BACKOFF 4 /* need to retry queuing delayed work */ 99 100 /* static tag bytes (protocol control messages) */ 101 static char tag_msg = CEPH_MSGR_TAG_MSG; 102 static char tag_ack = CEPH_MSGR_TAG_ACK; 103 static char tag_keepalive = CEPH_MSGR_TAG_KEEPALIVE; 104 105 #ifdef CONFIG_LOCKDEP 106 static struct lock_class_key socket_class; 107 #endif 108 109 /* 110 * When skipping (ignoring) a block of input we read it into a "skip 111 * buffer," which is this many bytes in size. 112 */ 113 #define SKIP_BUF_SIZE 1024 114 115 static void queue_con(struct ceph_connection *con); 116 static void con_work(struct work_struct *); 117 static void ceph_fault(struct ceph_connection *con); 118 119 /* 120 * Nicely render a sockaddr as a string. An array of formatted 121 * strings is used, to approximate reentrancy. 122 */ 123 #define ADDR_STR_COUNT_LOG 5 /* log2(# address strings in array) */ 124 #define ADDR_STR_COUNT (1 << ADDR_STR_COUNT_LOG) 125 #define ADDR_STR_COUNT_MASK (ADDR_STR_COUNT - 1) 126 #define MAX_ADDR_STR_LEN 64 /* 54 is enough */ 127 128 static char addr_str[ADDR_STR_COUNT][MAX_ADDR_STR_LEN]; 129 static atomic_t addr_str_seq = ATOMIC_INIT(0); 130 131 static struct page *zero_page; /* used in certain error cases */ 132 133 const char *ceph_pr_addr(const struct sockaddr_storage *ss) 134 { 135 int i; 136 char *s; 137 struct sockaddr_in *in4 = (struct sockaddr_in *) ss; 138 struct sockaddr_in6 *in6 = (struct sockaddr_in6 *) ss; 139 140 i = atomic_inc_return(&addr_str_seq) & ADDR_STR_COUNT_MASK; 141 s = addr_str[i]; 142 143 switch (ss->ss_family) { 144 case AF_INET: 145 snprintf(s, MAX_ADDR_STR_LEN, "%pI4:%hu", &in4->sin_addr, 146 ntohs(in4->sin_port)); 147 break; 148 149 case AF_INET6: 150 snprintf(s, MAX_ADDR_STR_LEN, "[%pI6c]:%hu", &in6->sin6_addr, 151 ntohs(in6->sin6_port)); 152 break; 153 154 default: 155 snprintf(s, MAX_ADDR_STR_LEN, "(unknown sockaddr family %hu)", 156 ss->ss_family); 157 } 158 159 return s; 160 } 161 EXPORT_SYMBOL(ceph_pr_addr); 162 163 static void encode_my_addr(struct ceph_messenger *msgr) 164 { 165 memcpy(&msgr->my_enc_addr, &msgr->inst.addr, sizeof(msgr->my_enc_addr)); 166 ceph_encode_addr(&msgr->my_enc_addr); 167 } 168 169 /* 170 * work queue for all reading and writing to/from the socket. 171 */ 172 static struct workqueue_struct *ceph_msgr_wq; 173 174 void _ceph_msgr_exit(void) 175 { 176 if (ceph_msgr_wq) { 177 destroy_workqueue(ceph_msgr_wq); 178 ceph_msgr_wq = NULL; 179 } 180 181 BUG_ON(zero_page == NULL); 182 kunmap(zero_page); 183 page_cache_release(zero_page); 184 zero_page = NULL; 185 } 186 187 int ceph_msgr_init(void) 188 { 189 BUG_ON(zero_page != NULL); 190 zero_page = ZERO_PAGE(0); 191 page_cache_get(zero_page); 192 193 ceph_msgr_wq = alloc_workqueue("ceph-msgr", WQ_NON_REENTRANT, 0); 194 if (ceph_msgr_wq) 195 return 0; 196 197 pr_err("msgr_init failed to create workqueue\n"); 198 _ceph_msgr_exit(); 199 200 return -ENOMEM; 201 } 202 EXPORT_SYMBOL(ceph_msgr_init); 203 204 void ceph_msgr_exit(void) 205 { 206 BUG_ON(ceph_msgr_wq == NULL); 207 208 _ceph_msgr_exit(); 209 } 210 EXPORT_SYMBOL(ceph_msgr_exit); 211 212 void ceph_msgr_flush(void) 213 { 214 flush_workqueue(ceph_msgr_wq); 215 } 216 EXPORT_SYMBOL(ceph_msgr_flush); 217 218 /* Connection socket state transition functions */ 219 220 static void con_sock_state_init(struct ceph_connection *con) 221 { 222 int old_state; 223 224 old_state = atomic_xchg(&con->sock_state, CON_SOCK_STATE_CLOSED); 225 if (WARN_ON(old_state != CON_SOCK_STATE_NEW)) 226 printk("%s: unexpected old state %d\n", __func__, old_state); 227 dout("%s con %p sock %d -> %d\n", __func__, con, old_state, 228 CON_SOCK_STATE_CLOSED); 229 } 230 231 static void con_sock_state_connecting(struct ceph_connection *con) 232 { 233 int old_state; 234 235 old_state = atomic_xchg(&con->sock_state, CON_SOCK_STATE_CONNECTING); 236 if (WARN_ON(old_state != CON_SOCK_STATE_CLOSED)) 237 printk("%s: unexpected old state %d\n", __func__, old_state); 238 dout("%s con %p sock %d -> %d\n", __func__, con, old_state, 239 CON_SOCK_STATE_CONNECTING); 240 } 241 242 static void con_sock_state_connected(struct ceph_connection *con) 243 { 244 int old_state; 245 246 old_state = atomic_xchg(&con->sock_state, CON_SOCK_STATE_CONNECTED); 247 if (WARN_ON(old_state != CON_SOCK_STATE_CONNECTING)) 248 printk("%s: unexpected old state %d\n", __func__, old_state); 249 dout("%s con %p sock %d -> %d\n", __func__, con, old_state, 250 CON_SOCK_STATE_CONNECTED); 251 } 252 253 static void con_sock_state_closing(struct ceph_connection *con) 254 { 255 int old_state; 256 257 old_state = atomic_xchg(&con->sock_state, CON_SOCK_STATE_CLOSING); 258 if (WARN_ON(old_state != CON_SOCK_STATE_CONNECTING && 259 old_state != CON_SOCK_STATE_CONNECTED && 260 old_state != CON_SOCK_STATE_CLOSING)) 261 printk("%s: unexpected old state %d\n", __func__, old_state); 262 dout("%s con %p sock %d -> %d\n", __func__, con, old_state, 263 CON_SOCK_STATE_CLOSING); 264 } 265 266 static void con_sock_state_closed(struct ceph_connection *con) 267 { 268 int old_state; 269 270 old_state = atomic_xchg(&con->sock_state, CON_SOCK_STATE_CLOSED); 271 if (WARN_ON(old_state != CON_SOCK_STATE_CONNECTED && 272 old_state != CON_SOCK_STATE_CLOSING && 273 old_state != CON_SOCK_STATE_CONNECTING && 274 old_state != CON_SOCK_STATE_CLOSED)) 275 printk("%s: unexpected old state %d\n", __func__, old_state); 276 dout("%s con %p sock %d -> %d\n", __func__, con, old_state, 277 CON_SOCK_STATE_CLOSED); 278 } 279 280 /* 281 * socket callback functions 282 */ 283 284 /* data available on socket, or listen socket received a connect */ 285 static void ceph_sock_data_ready(struct sock *sk, int count_unused) 286 { 287 struct ceph_connection *con = sk->sk_user_data; 288 if (atomic_read(&con->msgr->stopping)) { 289 return; 290 } 291 292 if (sk->sk_state != TCP_CLOSE_WAIT) { 293 dout("%s on %p state = %lu, queueing work\n", __func__, 294 con, con->state); 295 queue_con(con); 296 } 297 } 298 299 /* socket has buffer space for writing */ 300 static void ceph_sock_write_space(struct sock *sk) 301 { 302 struct ceph_connection *con = sk->sk_user_data; 303 304 /* only queue to workqueue if there is data we want to write, 305 * and there is sufficient space in the socket buffer to accept 306 * more data. clear SOCK_NOSPACE so that ceph_sock_write_space() 307 * doesn't get called again until try_write() fills the socket 308 * buffer. See net/ipv4/tcp_input.c:tcp_check_space() 309 * and net/core/stream.c:sk_stream_write_space(). 310 */ 311 if (test_bit(CON_FLAG_WRITE_PENDING, &con->flags)) { 312 if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk)) { 313 dout("%s %p queueing write work\n", __func__, con); 314 clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 315 queue_con(con); 316 } 317 } else { 318 dout("%s %p nothing to write\n", __func__, con); 319 } 320 } 321 322 /* socket's state has changed */ 323 static void ceph_sock_state_change(struct sock *sk) 324 { 325 struct ceph_connection *con = sk->sk_user_data; 326 327 dout("%s %p state = %lu sk_state = %u\n", __func__, 328 con, con->state, sk->sk_state); 329 330 switch (sk->sk_state) { 331 case TCP_CLOSE: 332 dout("%s TCP_CLOSE\n", __func__); 333 case TCP_CLOSE_WAIT: 334 dout("%s TCP_CLOSE_WAIT\n", __func__); 335 con_sock_state_closing(con); 336 set_bit(CON_FLAG_SOCK_CLOSED, &con->flags); 337 queue_con(con); 338 break; 339 case TCP_ESTABLISHED: 340 dout("%s TCP_ESTABLISHED\n", __func__); 341 con_sock_state_connected(con); 342 queue_con(con); 343 break; 344 default: /* Everything else is uninteresting */ 345 break; 346 } 347 } 348 349 /* 350 * set up socket callbacks 351 */ 352 static void set_sock_callbacks(struct socket *sock, 353 struct ceph_connection *con) 354 { 355 struct sock *sk = sock->sk; 356 sk->sk_user_data = con; 357 sk->sk_data_ready = ceph_sock_data_ready; 358 sk->sk_write_space = ceph_sock_write_space; 359 sk->sk_state_change = ceph_sock_state_change; 360 } 361 362 363 /* 364 * socket helpers 365 */ 366 367 /* 368 * initiate connection to a remote socket. 369 */ 370 static int ceph_tcp_connect(struct ceph_connection *con) 371 { 372 struct sockaddr_storage *paddr = &con->peer_addr.in_addr; 373 struct socket *sock; 374 int ret; 375 376 BUG_ON(con->sock); 377 ret = sock_create_kern(con->peer_addr.in_addr.ss_family, SOCK_STREAM, 378 IPPROTO_TCP, &sock); 379 if (ret) 380 return ret; 381 sock->sk->sk_allocation = GFP_NOFS; 382 383 #ifdef CONFIG_LOCKDEP 384 lockdep_set_class(&sock->sk->sk_lock, &socket_class); 385 #endif 386 387 set_sock_callbacks(sock, con); 388 389 dout("connect %s\n", ceph_pr_addr(&con->peer_addr.in_addr)); 390 391 con_sock_state_connecting(con); 392 ret = sock->ops->connect(sock, (struct sockaddr *)paddr, sizeof(*paddr), 393 O_NONBLOCK); 394 if (ret == -EINPROGRESS) { 395 dout("connect %s EINPROGRESS sk_state = %u\n", 396 ceph_pr_addr(&con->peer_addr.in_addr), 397 sock->sk->sk_state); 398 } else if (ret < 0) { 399 pr_err("connect %s error %d\n", 400 ceph_pr_addr(&con->peer_addr.in_addr), ret); 401 sock_release(sock); 402 con->error_msg = "connect error"; 403 404 return ret; 405 } 406 con->sock = sock; 407 return 0; 408 } 409 410 static int ceph_tcp_recvmsg(struct socket *sock, void *buf, size_t len) 411 { 412 struct kvec iov = {buf, len}; 413 struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL }; 414 int r; 415 416 r = kernel_recvmsg(sock, &msg, &iov, 1, len, msg.msg_flags); 417 if (r == -EAGAIN) 418 r = 0; 419 return r; 420 } 421 422 /* 423 * write something. @more is true if caller will be sending more data 424 * shortly. 425 */ 426 static int ceph_tcp_sendmsg(struct socket *sock, struct kvec *iov, 427 size_t kvlen, size_t len, int more) 428 { 429 struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL }; 430 int r; 431 432 if (more) 433 msg.msg_flags |= MSG_MORE; 434 else 435 msg.msg_flags |= MSG_EOR; /* superfluous, but what the hell */ 436 437 r = kernel_sendmsg(sock, &msg, iov, kvlen, len); 438 if (r == -EAGAIN) 439 r = 0; 440 return r; 441 } 442 443 static int ceph_tcp_sendpage(struct socket *sock, struct page *page, 444 int offset, size_t size, int more) 445 { 446 int flags = MSG_DONTWAIT | MSG_NOSIGNAL | (more ? MSG_MORE : MSG_EOR); 447 int ret; 448 449 ret = kernel_sendpage(sock, page, offset, size, flags); 450 if (ret == -EAGAIN) 451 ret = 0; 452 453 return ret; 454 } 455 456 457 /* 458 * Shutdown/close the socket for the given connection. 459 */ 460 static int con_close_socket(struct ceph_connection *con) 461 { 462 int rc = 0; 463 464 dout("con_close_socket on %p sock %p\n", con, con->sock); 465 if (con->sock) { 466 rc = con->sock->ops->shutdown(con->sock, SHUT_RDWR); 467 sock_release(con->sock); 468 con->sock = NULL; 469 } 470 471 /* 472 * Forcibly clear the SOCK_CLOSED flag. It gets set 473 * independent of the connection mutex, and we could have 474 * received a socket close event before we had the chance to 475 * shut the socket down. 476 */ 477 clear_bit(CON_FLAG_SOCK_CLOSED, &con->flags); 478 479 con_sock_state_closed(con); 480 return rc; 481 } 482 483 /* 484 * Reset a connection. Discard all incoming and outgoing messages 485 * and clear *_seq state. 486 */ 487 static void ceph_msg_remove(struct ceph_msg *msg) 488 { 489 list_del_init(&msg->list_head); 490 BUG_ON(msg->con == NULL); 491 msg->con->ops->put(msg->con); 492 msg->con = NULL; 493 494 ceph_msg_put(msg); 495 } 496 static void ceph_msg_remove_list(struct list_head *head) 497 { 498 while (!list_empty(head)) { 499 struct ceph_msg *msg = list_first_entry(head, struct ceph_msg, 500 list_head); 501 ceph_msg_remove(msg); 502 } 503 } 504 505 static void reset_connection(struct ceph_connection *con) 506 { 507 /* reset connection, out_queue, msg_ and connect_seq */ 508 /* discard existing out_queue and msg_seq */ 509 ceph_msg_remove_list(&con->out_queue); 510 ceph_msg_remove_list(&con->out_sent); 511 512 if (con->in_msg) { 513 BUG_ON(con->in_msg->con != con); 514 con->in_msg->con = NULL; 515 ceph_msg_put(con->in_msg); 516 con->in_msg = NULL; 517 con->ops->put(con); 518 } 519 520 con->connect_seq = 0; 521 con->out_seq = 0; 522 if (con->out_msg) { 523 ceph_msg_put(con->out_msg); 524 con->out_msg = NULL; 525 } 526 con->in_seq = 0; 527 con->in_seq_acked = 0; 528 } 529 530 /* 531 * mark a peer down. drop any open connections. 532 */ 533 void ceph_con_close(struct ceph_connection *con) 534 { 535 mutex_lock(&con->mutex); 536 dout("con_close %p peer %s\n", con, 537 ceph_pr_addr(&con->peer_addr.in_addr)); 538 con->state = CON_STATE_CLOSED; 539 540 clear_bit(CON_FLAG_LOSSYTX, &con->flags); /* so we retry next connect */ 541 clear_bit(CON_FLAG_KEEPALIVE_PENDING, &con->flags); 542 clear_bit(CON_FLAG_WRITE_PENDING, &con->flags); 543 clear_bit(CON_FLAG_KEEPALIVE_PENDING, &con->flags); 544 clear_bit(CON_FLAG_BACKOFF, &con->flags); 545 546 reset_connection(con); 547 con->peer_global_seq = 0; 548 cancel_delayed_work(&con->work); 549 con_close_socket(con); 550 mutex_unlock(&con->mutex); 551 } 552 EXPORT_SYMBOL(ceph_con_close); 553 554 /* 555 * Reopen a closed connection, with a new peer address. 556 */ 557 void ceph_con_open(struct ceph_connection *con, 558 __u8 entity_type, __u64 entity_num, 559 struct ceph_entity_addr *addr) 560 { 561 mutex_lock(&con->mutex); 562 dout("con_open %p %s\n", con, ceph_pr_addr(&addr->in_addr)); 563 564 BUG_ON(con->state != CON_STATE_CLOSED); 565 con->state = CON_STATE_PREOPEN; 566 567 con->peer_name.type = (__u8) entity_type; 568 con->peer_name.num = cpu_to_le64(entity_num); 569 570 memcpy(&con->peer_addr, addr, sizeof(*addr)); 571 con->delay = 0; /* reset backoff memory */ 572 mutex_unlock(&con->mutex); 573 queue_con(con); 574 } 575 EXPORT_SYMBOL(ceph_con_open); 576 577 /* 578 * return true if this connection ever successfully opened 579 */ 580 bool ceph_con_opened(struct ceph_connection *con) 581 { 582 return con->connect_seq > 0; 583 } 584 585 /* 586 * initialize a new connection. 587 */ 588 void ceph_con_init(struct ceph_connection *con, void *private, 589 const struct ceph_connection_operations *ops, 590 struct ceph_messenger *msgr) 591 { 592 dout("con_init %p\n", con); 593 memset(con, 0, sizeof(*con)); 594 con->private = private; 595 con->ops = ops; 596 con->msgr = msgr; 597 598 con_sock_state_init(con); 599 600 mutex_init(&con->mutex); 601 INIT_LIST_HEAD(&con->out_queue); 602 INIT_LIST_HEAD(&con->out_sent); 603 INIT_DELAYED_WORK(&con->work, con_work); 604 605 con->state = CON_STATE_CLOSED; 606 } 607 EXPORT_SYMBOL(ceph_con_init); 608 609 610 /* 611 * We maintain a global counter to order connection attempts. Get 612 * a unique seq greater than @gt. 613 */ 614 static u32 get_global_seq(struct ceph_messenger *msgr, u32 gt) 615 { 616 u32 ret; 617 618 spin_lock(&msgr->global_seq_lock); 619 if (msgr->global_seq < gt) 620 msgr->global_seq = gt; 621 ret = ++msgr->global_seq; 622 spin_unlock(&msgr->global_seq_lock); 623 return ret; 624 } 625 626 static void con_out_kvec_reset(struct ceph_connection *con) 627 { 628 con->out_kvec_left = 0; 629 con->out_kvec_bytes = 0; 630 con->out_kvec_cur = &con->out_kvec[0]; 631 } 632 633 static void con_out_kvec_add(struct ceph_connection *con, 634 size_t size, void *data) 635 { 636 int index; 637 638 index = con->out_kvec_left; 639 BUG_ON(index >= ARRAY_SIZE(con->out_kvec)); 640 641 con->out_kvec[index].iov_len = size; 642 con->out_kvec[index].iov_base = data; 643 con->out_kvec_left++; 644 con->out_kvec_bytes += size; 645 } 646 647 #ifdef CONFIG_BLOCK 648 static void init_bio_iter(struct bio *bio, struct bio **iter, int *seg) 649 { 650 if (!bio) { 651 *iter = NULL; 652 *seg = 0; 653 return; 654 } 655 *iter = bio; 656 *seg = bio->bi_idx; 657 } 658 659 static void iter_bio_next(struct bio **bio_iter, int *seg) 660 { 661 if (*bio_iter == NULL) 662 return; 663 664 BUG_ON(*seg >= (*bio_iter)->bi_vcnt); 665 666 (*seg)++; 667 if (*seg == (*bio_iter)->bi_vcnt) 668 init_bio_iter((*bio_iter)->bi_next, bio_iter, seg); 669 } 670 #endif 671 672 static void prepare_write_message_data(struct ceph_connection *con) 673 { 674 struct ceph_msg *msg = con->out_msg; 675 676 BUG_ON(!msg); 677 BUG_ON(!msg->hdr.data_len); 678 679 /* initialize page iterator */ 680 con->out_msg_pos.page = 0; 681 if (msg->pages) 682 con->out_msg_pos.page_pos = msg->page_alignment; 683 else 684 con->out_msg_pos.page_pos = 0; 685 #ifdef CONFIG_BLOCK 686 if (msg->bio) 687 init_bio_iter(msg->bio, &msg->bio_iter, &msg->bio_seg); 688 #endif 689 con->out_msg_pos.data_pos = 0; 690 con->out_msg_pos.did_page_crc = false; 691 con->out_more = 1; /* data + footer will follow */ 692 } 693 694 /* 695 * Prepare footer for currently outgoing message, and finish things 696 * off. Assumes out_kvec* are already valid.. we just add on to the end. 697 */ 698 static void prepare_write_message_footer(struct ceph_connection *con) 699 { 700 struct ceph_msg *m = con->out_msg; 701 int v = con->out_kvec_left; 702 703 m->footer.flags |= CEPH_MSG_FOOTER_COMPLETE; 704 705 dout("prepare_write_message_footer %p\n", con); 706 con->out_kvec_is_msg = true; 707 con->out_kvec[v].iov_base = &m->footer; 708 con->out_kvec[v].iov_len = sizeof(m->footer); 709 con->out_kvec_bytes += sizeof(m->footer); 710 con->out_kvec_left++; 711 con->out_more = m->more_to_follow; 712 con->out_msg_done = true; 713 } 714 715 /* 716 * Prepare headers for the next outgoing message. 717 */ 718 static void prepare_write_message(struct ceph_connection *con) 719 { 720 struct ceph_msg *m; 721 u32 crc; 722 723 con_out_kvec_reset(con); 724 con->out_kvec_is_msg = true; 725 con->out_msg_done = false; 726 727 /* Sneak an ack in there first? If we can get it into the same 728 * TCP packet that's a good thing. */ 729 if (con->in_seq > con->in_seq_acked) { 730 con->in_seq_acked = con->in_seq; 731 con_out_kvec_add(con, sizeof (tag_ack), &tag_ack); 732 con->out_temp_ack = cpu_to_le64(con->in_seq_acked); 733 con_out_kvec_add(con, sizeof (con->out_temp_ack), 734 &con->out_temp_ack); 735 } 736 737 BUG_ON(list_empty(&con->out_queue)); 738 m = list_first_entry(&con->out_queue, struct ceph_msg, list_head); 739 con->out_msg = m; 740 BUG_ON(m->con != con); 741 742 /* put message on sent list */ 743 ceph_msg_get(m); 744 list_move_tail(&m->list_head, &con->out_sent); 745 746 /* 747 * only assign outgoing seq # if we haven't sent this message 748 * yet. if it is requeued, resend with it's original seq. 749 */ 750 if (m->needs_out_seq) { 751 m->hdr.seq = cpu_to_le64(++con->out_seq); 752 m->needs_out_seq = false; 753 } 754 #ifdef CONFIG_BLOCK 755 else 756 m->bio_iter = NULL; 757 #endif 758 759 dout("prepare_write_message %p seq %lld type %d len %d+%d+%d %d pgs\n", 760 m, con->out_seq, le16_to_cpu(m->hdr.type), 761 le32_to_cpu(m->hdr.front_len), le32_to_cpu(m->hdr.middle_len), 762 le32_to_cpu(m->hdr.data_len), 763 m->nr_pages); 764 BUG_ON(le32_to_cpu(m->hdr.front_len) != m->front.iov_len); 765 766 /* tag + hdr + front + middle */ 767 con_out_kvec_add(con, sizeof (tag_msg), &tag_msg); 768 con_out_kvec_add(con, sizeof (m->hdr), &m->hdr); 769 con_out_kvec_add(con, m->front.iov_len, m->front.iov_base); 770 771 if (m->middle) 772 con_out_kvec_add(con, m->middle->vec.iov_len, 773 m->middle->vec.iov_base); 774 775 /* fill in crc (except data pages), footer */ 776 crc = crc32c(0, &m->hdr, offsetof(struct ceph_msg_header, crc)); 777 con->out_msg->hdr.crc = cpu_to_le32(crc); 778 con->out_msg->footer.flags = 0; 779 780 crc = crc32c(0, m->front.iov_base, m->front.iov_len); 781 con->out_msg->footer.front_crc = cpu_to_le32(crc); 782 if (m->middle) { 783 crc = crc32c(0, m->middle->vec.iov_base, 784 m->middle->vec.iov_len); 785 con->out_msg->footer.middle_crc = cpu_to_le32(crc); 786 } else 787 con->out_msg->footer.middle_crc = 0; 788 dout("%s front_crc %u middle_crc %u\n", __func__, 789 le32_to_cpu(con->out_msg->footer.front_crc), 790 le32_to_cpu(con->out_msg->footer.middle_crc)); 791 792 /* is there a data payload? */ 793 con->out_msg->footer.data_crc = 0; 794 if (m->hdr.data_len) 795 prepare_write_message_data(con); 796 else 797 /* no, queue up footer too and be done */ 798 prepare_write_message_footer(con); 799 800 set_bit(CON_FLAG_WRITE_PENDING, &con->flags); 801 } 802 803 /* 804 * Prepare an ack. 805 */ 806 static void prepare_write_ack(struct ceph_connection *con) 807 { 808 dout("prepare_write_ack %p %llu -> %llu\n", con, 809 con->in_seq_acked, con->in_seq); 810 con->in_seq_acked = con->in_seq; 811 812 con_out_kvec_reset(con); 813 814 con_out_kvec_add(con, sizeof (tag_ack), &tag_ack); 815 816 con->out_temp_ack = cpu_to_le64(con->in_seq_acked); 817 con_out_kvec_add(con, sizeof (con->out_temp_ack), 818 &con->out_temp_ack); 819 820 con->out_more = 1; /* more will follow.. eventually.. */ 821 set_bit(CON_FLAG_WRITE_PENDING, &con->flags); 822 } 823 824 /* 825 * Prepare to write keepalive byte. 826 */ 827 static void prepare_write_keepalive(struct ceph_connection *con) 828 { 829 dout("prepare_write_keepalive %p\n", con); 830 con_out_kvec_reset(con); 831 con_out_kvec_add(con, sizeof (tag_keepalive), &tag_keepalive); 832 set_bit(CON_FLAG_WRITE_PENDING, &con->flags); 833 } 834 835 /* 836 * Connection negotiation. 837 */ 838 839 static struct ceph_auth_handshake *get_connect_authorizer(struct ceph_connection *con, 840 int *auth_proto) 841 { 842 struct ceph_auth_handshake *auth; 843 844 if (!con->ops->get_authorizer) { 845 con->out_connect.authorizer_protocol = CEPH_AUTH_UNKNOWN; 846 con->out_connect.authorizer_len = 0; 847 return NULL; 848 } 849 850 /* Can't hold the mutex while getting authorizer */ 851 mutex_unlock(&con->mutex); 852 auth = con->ops->get_authorizer(con, auth_proto, con->auth_retry); 853 mutex_lock(&con->mutex); 854 855 if (IS_ERR(auth)) 856 return auth; 857 if (con->state != CON_STATE_NEGOTIATING) 858 return ERR_PTR(-EAGAIN); 859 860 con->auth_reply_buf = auth->authorizer_reply_buf; 861 con->auth_reply_buf_len = auth->authorizer_reply_buf_len; 862 return auth; 863 } 864 865 /* 866 * We connected to a peer and are saying hello. 867 */ 868 static void prepare_write_banner(struct ceph_connection *con) 869 { 870 con_out_kvec_add(con, strlen(CEPH_BANNER), CEPH_BANNER); 871 con_out_kvec_add(con, sizeof (con->msgr->my_enc_addr), 872 &con->msgr->my_enc_addr); 873 874 con->out_more = 0; 875 set_bit(CON_FLAG_WRITE_PENDING, &con->flags); 876 } 877 878 static int prepare_write_connect(struct ceph_connection *con) 879 { 880 unsigned int global_seq = get_global_seq(con->msgr, 0); 881 int proto; 882 int auth_proto; 883 struct ceph_auth_handshake *auth; 884 885 switch (con->peer_name.type) { 886 case CEPH_ENTITY_TYPE_MON: 887 proto = CEPH_MONC_PROTOCOL; 888 break; 889 case CEPH_ENTITY_TYPE_OSD: 890 proto = CEPH_OSDC_PROTOCOL; 891 break; 892 case CEPH_ENTITY_TYPE_MDS: 893 proto = CEPH_MDSC_PROTOCOL; 894 break; 895 default: 896 BUG(); 897 } 898 899 dout("prepare_write_connect %p cseq=%d gseq=%d proto=%d\n", con, 900 con->connect_seq, global_seq, proto); 901 902 con->out_connect.features = cpu_to_le64(con->msgr->supported_features); 903 con->out_connect.host_type = cpu_to_le32(CEPH_ENTITY_TYPE_CLIENT); 904 con->out_connect.connect_seq = cpu_to_le32(con->connect_seq); 905 con->out_connect.global_seq = cpu_to_le32(global_seq); 906 con->out_connect.protocol_version = cpu_to_le32(proto); 907 con->out_connect.flags = 0; 908 909 auth_proto = CEPH_AUTH_UNKNOWN; 910 auth = get_connect_authorizer(con, &auth_proto); 911 if (IS_ERR(auth)) 912 return PTR_ERR(auth); 913 914 con->out_connect.authorizer_protocol = cpu_to_le32(auth_proto); 915 con->out_connect.authorizer_len = auth ? 916 cpu_to_le32(auth->authorizer_buf_len) : 0; 917 918 con_out_kvec_reset(con); 919 con_out_kvec_add(con, sizeof (con->out_connect), 920 &con->out_connect); 921 if (auth && auth->authorizer_buf_len) 922 con_out_kvec_add(con, auth->authorizer_buf_len, 923 auth->authorizer_buf); 924 925 con->out_more = 0; 926 set_bit(CON_FLAG_WRITE_PENDING, &con->flags); 927 928 return 0; 929 } 930 931 /* 932 * write as much of pending kvecs to the socket as we can. 933 * 1 -> done 934 * 0 -> socket full, but more to do 935 * <0 -> error 936 */ 937 static int write_partial_kvec(struct ceph_connection *con) 938 { 939 int ret; 940 941 dout("write_partial_kvec %p %d left\n", con, con->out_kvec_bytes); 942 while (con->out_kvec_bytes > 0) { 943 ret = ceph_tcp_sendmsg(con->sock, con->out_kvec_cur, 944 con->out_kvec_left, con->out_kvec_bytes, 945 con->out_more); 946 if (ret <= 0) 947 goto out; 948 con->out_kvec_bytes -= ret; 949 if (con->out_kvec_bytes == 0) 950 break; /* done */ 951 952 /* account for full iov entries consumed */ 953 while (ret >= con->out_kvec_cur->iov_len) { 954 BUG_ON(!con->out_kvec_left); 955 ret -= con->out_kvec_cur->iov_len; 956 con->out_kvec_cur++; 957 con->out_kvec_left--; 958 } 959 /* and for a partially-consumed entry */ 960 if (ret) { 961 con->out_kvec_cur->iov_len -= ret; 962 con->out_kvec_cur->iov_base += ret; 963 } 964 } 965 con->out_kvec_left = 0; 966 con->out_kvec_is_msg = false; 967 ret = 1; 968 out: 969 dout("write_partial_kvec %p %d left in %d kvecs ret = %d\n", con, 970 con->out_kvec_bytes, con->out_kvec_left, ret); 971 return ret; /* done! */ 972 } 973 974 static void out_msg_pos_next(struct ceph_connection *con, struct page *page, 975 size_t len, size_t sent, bool in_trail) 976 { 977 struct ceph_msg *msg = con->out_msg; 978 979 BUG_ON(!msg); 980 BUG_ON(!sent); 981 982 con->out_msg_pos.data_pos += sent; 983 con->out_msg_pos.page_pos += sent; 984 if (sent < len) 985 return; 986 987 BUG_ON(sent != len); 988 con->out_msg_pos.page_pos = 0; 989 con->out_msg_pos.page++; 990 con->out_msg_pos.did_page_crc = false; 991 if (in_trail) 992 list_move_tail(&page->lru, 993 &msg->trail->head); 994 else if (msg->pagelist) 995 list_move_tail(&page->lru, 996 &msg->pagelist->head); 997 #ifdef CONFIG_BLOCK 998 else if (msg->bio) 999 iter_bio_next(&msg->bio_iter, &msg->bio_seg); 1000 #endif 1001 } 1002 1003 /* 1004 * Write as much message data payload as we can. If we finish, queue 1005 * up the footer. 1006 * 1 -> done, footer is now queued in out_kvec[]. 1007 * 0 -> socket full, but more to do 1008 * <0 -> error 1009 */ 1010 static int write_partial_msg_pages(struct ceph_connection *con) 1011 { 1012 struct ceph_msg *msg = con->out_msg; 1013 unsigned int data_len = le32_to_cpu(msg->hdr.data_len); 1014 size_t len; 1015 bool do_datacrc = !con->msgr->nocrc; 1016 int ret; 1017 int total_max_write; 1018 bool in_trail = false; 1019 const size_t trail_len = (msg->trail ? msg->trail->length : 0); 1020 const size_t trail_off = data_len - trail_len; 1021 1022 dout("write_partial_msg_pages %p msg %p page %d/%d offset %d\n", 1023 con, msg, con->out_msg_pos.page, msg->nr_pages, 1024 con->out_msg_pos.page_pos); 1025 1026 /* 1027 * Iterate through each page that contains data to be 1028 * written, and send as much as possible for each. 1029 * 1030 * If we are calculating the data crc (the default), we will 1031 * need to map the page. If we have no pages, they have 1032 * been revoked, so use the zero page. 1033 */ 1034 while (data_len > con->out_msg_pos.data_pos) { 1035 struct page *page = NULL; 1036 int max_write = PAGE_SIZE; 1037 int bio_offset = 0; 1038 1039 in_trail = in_trail || con->out_msg_pos.data_pos >= trail_off; 1040 if (!in_trail) 1041 total_max_write = trail_off - con->out_msg_pos.data_pos; 1042 1043 if (in_trail) { 1044 total_max_write = data_len - con->out_msg_pos.data_pos; 1045 1046 page = list_first_entry(&msg->trail->head, 1047 struct page, lru); 1048 } else if (msg->pages) { 1049 page = msg->pages[con->out_msg_pos.page]; 1050 } else if (msg->pagelist) { 1051 page = list_first_entry(&msg->pagelist->head, 1052 struct page, lru); 1053 #ifdef CONFIG_BLOCK 1054 } else if (msg->bio) { 1055 struct bio_vec *bv; 1056 1057 bv = bio_iovec_idx(msg->bio_iter, msg->bio_seg); 1058 page = bv->bv_page; 1059 bio_offset = bv->bv_offset; 1060 max_write = bv->bv_len; 1061 #endif 1062 } else { 1063 page = zero_page; 1064 } 1065 len = min_t(int, max_write - con->out_msg_pos.page_pos, 1066 total_max_write); 1067 1068 if (do_datacrc && !con->out_msg_pos.did_page_crc) { 1069 void *base; 1070 u32 crc = le32_to_cpu(msg->footer.data_crc); 1071 char *kaddr; 1072 1073 kaddr = kmap(page); 1074 BUG_ON(kaddr == NULL); 1075 base = kaddr + con->out_msg_pos.page_pos + bio_offset; 1076 crc = crc32c(crc, base, len); 1077 msg->footer.data_crc = cpu_to_le32(crc); 1078 con->out_msg_pos.did_page_crc = true; 1079 } 1080 ret = ceph_tcp_sendpage(con->sock, page, 1081 con->out_msg_pos.page_pos + bio_offset, 1082 len, 1); 1083 1084 if (do_datacrc) 1085 kunmap(page); 1086 1087 if (ret <= 0) 1088 goto out; 1089 1090 out_msg_pos_next(con, page, len, (size_t) ret, in_trail); 1091 } 1092 1093 dout("write_partial_msg_pages %p msg %p done\n", con, msg); 1094 1095 /* prepare and queue up footer, too */ 1096 if (!do_datacrc) 1097 msg->footer.flags |= CEPH_MSG_FOOTER_NOCRC; 1098 con_out_kvec_reset(con); 1099 prepare_write_message_footer(con); 1100 ret = 1; 1101 out: 1102 return ret; 1103 } 1104 1105 /* 1106 * write some zeros 1107 */ 1108 static int write_partial_skip(struct ceph_connection *con) 1109 { 1110 int ret; 1111 1112 while (con->out_skip > 0) { 1113 size_t size = min(con->out_skip, (int) PAGE_CACHE_SIZE); 1114 1115 ret = ceph_tcp_sendpage(con->sock, zero_page, 0, size, 1); 1116 if (ret <= 0) 1117 goto out; 1118 con->out_skip -= ret; 1119 } 1120 ret = 1; 1121 out: 1122 return ret; 1123 } 1124 1125 /* 1126 * Prepare to read connection handshake, or an ack. 1127 */ 1128 static void prepare_read_banner(struct ceph_connection *con) 1129 { 1130 dout("prepare_read_banner %p\n", con); 1131 con->in_base_pos = 0; 1132 } 1133 1134 static void prepare_read_connect(struct ceph_connection *con) 1135 { 1136 dout("prepare_read_connect %p\n", con); 1137 con->in_base_pos = 0; 1138 } 1139 1140 static void prepare_read_ack(struct ceph_connection *con) 1141 { 1142 dout("prepare_read_ack %p\n", con); 1143 con->in_base_pos = 0; 1144 } 1145 1146 static void prepare_read_tag(struct ceph_connection *con) 1147 { 1148 dout("prepare_read_tag %p\n", con); 1149 con->in_base_pos = 0; 1150 con->in_tag = CEPH_MSGR_TAG_READY; 1151 } 1152 1153 /* 1154 * Prepare to read a message. 1155 */ 1156 static int prepare_read_message(struct ceph_connection *con) 1157 { 1158 dout("prepare_read_message %p\n", con); 1159 BUG_ON(con->in_msg != NULL); 1160 con->in_base_pos = 0; 1161 con->in_front_crc = con->in_middle_crc = con->in_data_crc = 0; 1162 return 0; 1163 } 1164 1165 1166 static int read_partial(struct ceph_connection *con, 1167 int end, int size, void *object) 1168 { 1169 while (con->in_base_pos < end) { 1170 int left = end - con->in_base_pos; 1171 int have = size - left; 1172 int ret = ceph_tcp_recvmsg(con->sock, object + have, left); 1173 if (ret <= 0) 1174 return ret; 1175 con->in_base_pos += ret; 1176 } 1177 return 1; 1178 } 1179 1180 1181 /* 1182 * Read all or part of the connect-side handshake on a new connection 1183 */ 1184 static int read_partial_banner(struct ceph_connection *con) 1185 { 1186 int size; 1187 int end; 1188 int ret; 1189 1190 dout("read_partial_banner %p at %d\n", con, con->in_base_pos); 1191 1192 /* peer's banner */ 1193 size = strlen(CEPH_BANNER); 1194 end = size; 1195 ret = read_partial(con, end, size, con->in_banner); 1196 if (ret <= 0) 1197 goto out; 1198 1199 size = sizeof (con->actual_peer_addr); 1200 end += size; 1201 ret = read_partial(con, end, size, &con->actual_peer_addr); 1202 if (ret <= 0) 1203 goto out; 1204 1205 size = sizeof (con->peer_addr_for_me); 1206 end += size; 1207 ret = read_partial(con, end, size, &con->peer_addr_for_me); 1208 if (ret <= 0) 1209 goto out; 1210 1211 out: 1212 return ret; 1213 } 1214 1215 static int read_partial_connect(struct ceph_connection *con) 1216 { 1217 int size; 1218 int end; 1219 int ret; 1220 1221 dout("read_partial_connect %p at %d\n", con, con->in_base_pos); 1222 1223 size = sizeof (con->in_reply); 1224 end = size; 1225 ret = read_partial(con, end, size, &con->in_reply); 1226 if (ret <= 0) 1227 goto out; 1228 1229 size = le32_to_cpu(con->in_reply.authorizer_len); 1230 end += size; 1231 ret = read_partial(con, end, size, con->auth_reply_buf); 1232 if (ret <= 0) 1233 goto out; 1234 1235 dout("read_partial_connect %p tag %d, con_seq = %u, g_seq = %u\n", 1236 con, (int)con->in_reply.tag, 1237 le32_to_cpu(con->in_reply.connect_seq), 1238 le32_to_cpu(con->in_reply.global_seq)); 1239 out: 1240 return ret; 1241 1242 } 1243 1244 /* 1245 * Verify the hello banner looks okay. 1246 */ 1247 static int verify_hello(struct ceph_connection *con) 1248 { 1249 if (memcmp(con->in_banner, CEPH_BANNER, strlen(CEPH_BANNER))) { 1250 pr_err("connect to %s got bad banner\n", 1251 ceph_pr_addr(&con->peer_addr.in_addr)); 1252 con->error_msg = "protocol error, bad banner"; 1253 return -1; 1254 } 1255 return 0; 1256 } 1257 1258 static bool addr_is_blank(struct sockaddr_storage *ss) 1259 { 1260 switch (ss->ss_family) { 1261 case AF_INET: 1262 return ((struct sockaddr_in *)ss)->sin_addr.s_addr == 0; 1263 case AF_INET6: 1264 return 1265 ((struct sockaddr_in6 *)ss)->sin6_addr.s6_addr32[0] == 0 && 1266 ((struct sockaddr_in6 *)ss)->sin6_addr.s6_addr32[1] == 0 && 1267 ((struct sockaddr_in6 *)ss)->sin6_addr.s6_addr32[2] == 0 && 1268 ((struct sockaddr_in6 *)ss)->sin6_addr.s6_addr32[3] == 0; 1269 } 1270 return false; 1271 } 1272 1273 static int addr_port(struct sockaddr_storage *ss) 1274 { 1275 switch (ss->ss_family) { 1276 case AF_INET: 1277 return ntohs(((struct sockaddr_in *)ss)->sin_port); 1278 case AF_INET6: 1279 return ntohs(((struct sockaddr_in6 *)ss)->sin6_port); 1280 } 1281 return 0; 1282 } 1283 1284 static void addr_set_port(struct sockaddr_storage *ss, int p) 1285 { 1286 switch (ss->ss_family) { 1287 case AF_INET: 1288 ((struct sockaddr_in *)ss)->sin_port = htons(p); 1289 break; 1290 case AF_INET6: 1291 ((struct sockaddr_in6 *)ss)->sin6_port = htons(p); 1292 break; 1293 } 1294 } 1295 1296 /* 1297 * Unlike other *_pton function semantics, zero indicates success. 1298 */ 1299 static int ceph_pton(const char *str, size_t len, struct sockaddr_storage *ss, 1300 char delim, const char **ipend) 1301 { 1302 struct sockaddr_in *in4 = (struct sockaddr_in *) ss; 1303 struct sockaddr_in6 *in6 = (struct sockaddr_in6 *) ss; 1304 1305 memset(ss, 0, sizeof(*ss)); 1306 1307 if (in4_pton(str, len, (u8 *)&in4->sin_addr.s_addr, delim, ipend)) { 1308 ss->ss_family = AF_INET; 1309 return 0; 1310 } 1311 1312 if (in6_pton(str, len, (u8 *)&in6->sin6_addr.s6_addr, delim, ipend)) { 1313 ss->ss_family = AF_INET6; 1314 return 0; 1315 } 1316 1317 return -EINVAL; 1318 } 1319 1320 /* 1321 * Extract hostname string and resolve using kernel DNS facility. 1322 */ 1323 #ifdef CONFIG_CEPH_LIB_USE_DNS_RESOLVER 1324 static int ceph_dns_resolve_name(const char *name, size_t namelen, 1325 struct sockaddr_storage *ss, char delim, const char **ipend) 1326 { 1327 const char *end, *delim_p; 1328 char *colon_p, *ip_addr = NULL; 1329 int ip_len, ret; 1330 1331 /* 1332 * The end of the hostname occurs immediately preceding the delimiter or 1333 * the port marker (':') where the delimiter takes precedence. 1334 */ 1335 delim_p = memchr(name, delim, namelen); 1336 colon_p = memchr(name, ':', namelen); 1337 1338 if (delim_p && colon_p) 1339 end = delim_p < colon_p ? delim_p : colon_p; 1340 else if (!delim_p && colon_p) 1341 end = colon_p; 1342 else { 1343 end = delim_p; 1344 if (!end) /* case: hostname:/ */ 1345 end = name + namelen; 1346 } 1347 1348 if (end <= name) 1349 return -EINVAL; 1350 1351 /* do dns_resolve upcall */ 1352 ip_len = dns_query(NULL, name, end - name, NULL, &ip_addr, NULL); 1353 if (ip_len > 0) 1354 ret = ceph_pton(ip_addr, ip_len, ss, -1, NULL); 1355 else 1356 ret = -ESRCH; 1357 1358 kfree(ip_addr); 1359 1360 *ipend = end; 1361 1362 pr_info("resolve '%.*s' (ret=%d): %s\n", (int)(end - name), name, 1363 ret, ret ? "failed" : ceph_pr_addr(ss)); 1364 1365 return ret; 1366 } 1367 #else 1368 static inline int ceph_dns_resolve_name(const char *name, size_t namelen, 1369 struct sockaddr_storage *ss, char delim, const char **ipend) 1370 { 1371 return -EINVAL; 1372 } 1373 #endif 1374 1375 /* 1376 * Parse a server name (IP or hostname). If a valid IP address is not found 1377 * then try to extract a hostname to resolve using userspace DNS upcall. 1378 */ 1379 static int ceph_parse_server_name(const char *name, size_t namelen, 1380 struct sockaddr_storage *ss, char delim, const char **ipend) 1381 { 1382 int ret; 1383 1384 ret = ceph_pton(name, namelen, ss, delim, ipend); 1385 if (ret) 1386 ret = ceph_dns_resolve_name(name, namelen, ss, delim, ipend); 1387 1388 return ret; 1389 } 1390 1391 /* 1392 * Parse an ip[:port] list into an addr array. Use the default 1393 * monitor port if a port isn't specified. 1394 */ 1395 int ceph_parse_ips(const char *c, const char *end, 1396 struct ceph_entity_addr *addr, 1397 int max_count, int *count) 1398 { 1399 int i, ret = -EINVAL; 1400 const char *p = c; 1401 1402 dout("parse_ips on '%.*s'\n", (int)(end-c), c); 1403 for (i = 0; i < max_count; i++) { 1404 const char *ipend; 1405 struct sockaddr_storage *ss = &addr[i].in_addr; 1406 int port; 1407 char delim = ','; 1408 1409 if (*p == '[') { 1410 delim = ']'; 1411 p++; 1412 } 1413 1414 ret = ceph_parse_server_name(p, end - p, ss, delim, &ipend); 1415 if (ret) 1416 goto bad; 1417 ret = -EINVAL; 1418 1419 p = ipend; 1420 1421 if (delim == ']') { 1422 if (*p != ']') { 1423 dout("missing matching ']'\n"); 1424 goto bad; 1425 } 1426 p++; 1427 } 1428 1429 /* port? */ 1430 if (p < end && *p == ':') { 1431 port = 0; 1432 p++; 1433 while (p < end && *p >= '0' && *p <= '9') { 1434 port = (port * 10) + (*p - '0'); 1435 p++; 1436 } 1437 if (port > 65535 || port == 0) 1438 goto bad; 1439 } else { 1440 port = CEPH_MON_PORT; 1441 } 1442 1443 addr_set_port(ss, port); 1444 1445 dout("parse_ips got %s\n", ceph_pr_addr(ss)); 1446 1447 if (p == end) 1448 break; 1449 if (*p != ',') 1450 goto bad; 1451 p++; 1452 } 1453 1454 if (p != end) 1455 goto bad; 1456 1457 if (count) 1458 *count = i + 1; 1459 return 0; 1460 1461 bad: 1462 pr_err("parse_ips bad ip '%.*s'\n", (int)(end - c), c); 1463 return ret; 1464 } 1465 EXPORT_SYMBOL(ceph_parse_ips); 1466 1467 static int process_banner(struct ceph_connection *con) 1468 { 1469 dout("process_banner on %p\n", con); 1470 1471 if (verify_hello(con) < 0) 1472 return -1; 1473 1474 ceph_decode_addr(&con->actual_peer_addr); 1475 ceph_decode_addr(&con->peer_addr_for_me); 1476 1477 /* 1478 * Make sure the other end is who we wanted. note that the other 1479 * end may not yet know their ip address, so if it's 0.0.0.0, give 1480 * them the benefit of the doubt. 1481 */ 1482 if (memcmp(&con->peer_addr, &con->actual_peer_addr, 1483 sizeof(con->peer_addr)) != 0 && 1484 !(addr_is_blank(&con->actual_peer_addr.in_addr) && 1485 con->actual_peer_addr.nonce == con->peer_addr.nonce)) { 1486 pr_warning("wrong peer, want %s/%d, got %s/%d\n", 1487 ceph_pr_addr(&con->peer_addr.in_addr), 1488 (int)le32_to_cpu(con->peer_addr.nonce), 1489 ceph_pr_addr(&con->actual_peer_addr.in_addr), 1490 (int)le32_to_cpu(con->actual_peer_addr.nonce)); 1491 con->error_msg = "wrong peer at address"; 1492 return -1; 1493 } 1494 1495 /* 1496 * did we learn our address? 1497 */ 1498 if (addr_is_blank(&con->msgr->inst.addr.in_addr)) { 1499 int port = addr_port(&con->msgr->inst.addr.in_addr); 1500 1501 memcpy(&con->msgr->inst.addr.in_addr, 1502 &con->peer_addr_for_me.in_addr, 1503 sizeof(con->peer_addr_for_me.in_addr)); 1504 addr_set_port(&con->msgr->inst.addr.in_addr, port); 1505 encode_my_addr(con->msgr); 1506 dout("process_banner learned my addr is %s\n", 1507 ceph_pr_addr(&con->msgr->inst.addr.in_addr)); 1508 } 1509 1510 return 0; 1511 } 1512 1513 static void fail_protocol(struct ceph_connection *con) 1514 { 1515 reset_connection(con); 1516 BUG_ON(con->state != CON_STATE_NEGOTIATING); 1517 con->state = CON_STATE_CLOSED; 1518 } 1519 1520 static int process_connect(struct ceph_connection *con) 1521 { 1522 u64 sup_feat = con->msgr->supported_features; 1523 u64 req_feat = con->msgr->required_features; 1524 u64 server_feat = le64_to_cpu(con->in_reply.features); 1525 int ret; 1526 1527 dout("process_connect on %p tag %d\n", con, (int)con->in_tag); 1528 1529 switch (con->in_reply.tag) { 1530 case CEPH_MSGR_TAG_FEATURES: 1531 pr_err("%s%lld %s feature set mismatch," 1532 " my %llx < server's %llx, missing %llx\n", 1533 ENTITY_NAME(con->peer_name), 1534 ceph_pr_addr(&con->peer_addr.in_addr), 1535 sup_feat, server_feat, server_feat & ~sup_feat); 1536 con->error_msg = "missing required protocol features"; 1537 fail_protocol(con); 1538 return -1; 1539 1540 case CEPH_MSGR_TAG_BADPROTOVER: 1541 pr_err("%s%lld %s protocol version mismatch," 1542 " my %d != server's %d\n", 1543 ENTITY_NAME(con->peer_name), 1544 ceph_pr_addr(&con->peer_addr.in_addr), 1545 le32_to_cpu(con->out_connect.protocol_version), 1546 le32_to_cpu(con->in_reply.protocol_version)); 1547 con->error_msg = "protocol version mismatch"; 1548 fail_protocol(con); 1549 return -1; 1550 1551 case CEPH_MSGR_TAG_BADAUTHORIZER: 1552 con->auth_retry++; 1553 dout("process_connect %p got BADAUTHORIZER attempt %d\n", con, 1554 con->auth_retry); 1555 if (con->auth_retry == 2) { 1556 con->error_msg = "connect authorization failure"; 1557 return -1; 1558 } 1559 con->auth_retry = 1; 1560 ret = prepare_write_connect(con); 1561 if (ret < 0) 1562 return ret; 1563 prepare_read_connect(con); 1564 break; 1565 1566 case CEPH_MSGR_TAG_RESETSESSION: 1567 /* 1568 * If we connected with a large connect_seq but the peer 1569 * has no record of a session with us (no connection, or 1570 * connect_seq == 0), they will send RESETSESION to indicate 1571 * that they must have reset their session, and may have 1572 * dropped messages. 1573 */ 1574 dout("process_connect got RESET peer seq %u\n", 1575 le32_to_cpu(con->in_reply.connect_seq)); 1576 pr_err("%s%lld %s connection reset\n", 1577 ENTITY_NAME(con->peer_name), 1578 ceph_pr_addr(&con->peer_addr.in_addr)); 1579 reset_connection(con); 1580 ret = prepare_write_connect(con); 1581 if (ret < 0) 1582 return ret; 1583 prepare_read_connect(con); 1584 1585 /* Tell ceph about it. */ 1586 mutex_unlock(&con->mutex); 1587 pr_info("reset on %s%lld\n", ENTITY_NAME(con->peer_name)); 1588 if (con->ops->peer_reset) 1589 con->ops->peer_reset(con); 1590 mutex_lock(&con->mutex); 1591 if (con->state != CON_STATE_NEGOTIATING) 1592 return -EAGAIN; 1593 break; 1594 1595 case CEPH_MSGR_TAG_RETRY_SESSION: 1596 /* 1597 * If we sent a smaller connect_seq than the peer has, try 1598 * again with a larger value. 1599 */ 1600 dout("process_connect got RETRY_SESSION my seq %u, peer %u\n", 1601 le32_to_cpu(con->out_connect.connect_seq), 1602 le32_to_cpu(con->in_reply.connect_seq)); 1603 con->connect_seq = le32_to_cpu(con->in_reply.connect_seq); 1604 ret = prepare_write_connect(con); 1605 if (ret < 0) 1606 return ret; 1607 prepare_read_connect(con); 1608 break; 1609 1610 case CEPH_MSGR_TAG_RETRY_GLOBAL: 1611 /* 1612 * If we sent a smaller global_seq than the peer has, try 1613 * again with a larger value. 1614 */ 1615 dout("process_connect got RETRY_GLOBAL my %u peer_gseq %u\n", 1616 con->peer_global_seq, 1617 le32_to_cpu(con->in_reply.global_seq)); 1618 get_global_seq(con->msgr, 1619 le32_to_cpu(con->in_reply.global_seq)); 1620 ret = prepare_write_connect(con); 1621 if (ret < 0) 1622 return ret; 1623 prepare_read_connect(con); 1624 break; 1625 1626 case CEPH_MSGR_TAG_READY: 1627 if (req_feat & ~server_feat) { 1628 pr_err("%s%lld %s protocol feature mismatch," 1629 " my required %llx > server's %llx, need %llx\n", 1630 ENTITY_NAME(con->peer_name), 1631 ceph_pr_addr(&con->peer_addr.in_addr), 1632 req_feat, server_feat, req_feat & ~server_feat); 1633 con->error_msg = "missing required protocol features"; 1634 fail_protocol(con); 1635 return -1; 1636 } 1637 1638 BUG_ON(con->state != CON_STATE_NEGOTIATING); 1639 con->state = CON_STATE_OPEN; 1640 1641 con->peer_global_seq = le32_to_cpu(con->in_reply.global_seq); 1642 con->connect_seq++; 1643 con->peer_features = server_feat; 1644 dout("process_connect got READY gseq %d cseq %d (%d)\n", 1645 con->peer_global_seq, 1646 le32_to_cpu(con->in_reply.connect_seq), 1647 con->connect_seq); 1648 WARN_ON(con->connect_seq != 1649 le32_to_cpu(con->in_reply.connect_seq)); 1650 1651 if (con->in_reply.flags & CEPH_MSG_CONNECT_LOSSY) 1652 set_bit(CON_FLAG_LOSSYTX, &con->flags); 1653 1654 con->delay = 0; /* reset backoff memory */ 1655 1656 prepare_read_tag(con); 1657 break; 1658 1659 case CEPH_MSGR_TAG_WAIT: 1660 /* 1661 * If there is a connection race (we are opening 1662 * connections to each other), one of us may just have 1663 * to WAIT. This shouldn't happen if we are the 1664 * client. 1665 */ 1666 pr_err("process_connect got WAIT as client\n"); 1667 con->error_msg = "protocol error, got WAIT as client"; 1668 return -1; 1669 1670 default: 1671 pr_err("connect protocol error, will retry\n"); 1672 con->error_msg = "protocol error, garbage tag during connect"; 1673 return -1; 1674 } 1675 return 0; 1676 } 1677 1678 1679 /* 1680 * read (part of) an ack 1681 */ 1682 static int read_partial_ack(struct ceph_connection *con) 1683 { 1684 int size = sizeof (con->in_temp_ack); 1685 int end = size; 1686 1687 return read_partial(con, end, size, &con->in_temp_ack); 1688 } 1689 1690 1691 /* 1692 * We can finally discard anything that's been acked. 1693 */ 1694 static void process_ack(struct ceph_connection *con) 1695 { 1696 struct ceph_msg *m; 1697 u64 ack = le64_to_cpu(con->in_temp_ack); 1698 u64 seq; 1699 1700 while (!list_empty(&con->out_sent)) { 1701 m = list_first_entry(&con->out_sent, struct ceph_msg, 1702 list_head); 1703 seq = le64_to_cpu(m->hdr.seq); 1704 if (seq > ack) 1705 break; 1706 dout("got ack for seq %llu type %d at %p\n", seq, 1707 le16_to_cpu(m->hdr.type), m); 1708 m->ack_stamp = jiffies; 1709 ceph_msg_remove(m); 1710 } 1711 prepare_read_tag(con); 1712 } 1713 1714 1715 1716 1717 static int read_partial_message_section(struct ceph_connection *con, 1718 struct kvec *section, 1719 unsigned int sec_len, u32 *crc) 1720 { 1721 int ret, left; 1722 1723 BUG_ON(!section); 1724 1725 while (section->iov_len < sec_len) { 1726 BUG_ON(section->iov_base == NULL); 1727 left = sec_len - section->iov_len; 1728 ret = ceph_tcp_recvmsg(con->sock, (char *)section->iov_base + 1729 section->iov_len, left); 1730 if (ret <= 0) 1731 return ret; 1732 section->iov_len += ret; 1733 } 1734 if (section->iov_len == sec_len) 1735 *crc = crc32c(0, section->iov_base, section->iov_len); 1736 1737 return 1; 1738 } 1739 1740 static int ceph_con_in_msg_alloc(struct ceph_connection *con, int *skip); 1741 1742 static int read_partial_message_pages(struct ceph_connection *con, 1743 struct page **pages, 1744 unsigned int data_len, bool do_datacrc) 1745 { 1746 void *p; 1747 int ret; 1748 int left; 1749 1750 left = min((int)(data_len - con->in_msg_pos.data_pos), 1751 (int)(PAGE_SIZE - con->in_msg_pos.page_pos)); 1752 /* (page) data */ 1753 BUG_ON(pages == NULL); 1754 p = kmap(pages[con->in_msg_pos.page]); 1755 ret = ceph_tcp_recvmsg(con->sock, p + con->in_msg_pos.page_pos, 1756 left); 1757 if (ret > 0 && do_datacrc) 1758 con->in_data_crc = 1759 crc32c(con->in_data_crc, 1760 p + con->in_msg_pos.page_pos, ret); 1761 kunmap(pages[con->in_msg_pos.page]); 1762 if (ret <= 0) 1763 return ret; 1764 con->in_msg_pos.data_pos += ret; 1765 con->in_msg_pos.page_pos += ret; 1766 if (con->in_msg_pos.page_pos == PAGE_SIZE) { 1767 con->in_msg_pos.page_pos = 0; 1768 con->in_msg_pos.page++; 1769 } 1770 1771 return ret; 1772 } 1773 1774 #ifdef CONFIG_BLOCK 1775 static int read_partial_message_bio(struct ceph_connection *con, 1776 struct bio **bio_iter, int *bio_seg, 1777 unsigned int data_len, bool do_datacrc) 1778 { 1779 struct bio_vec *bv = bio_iovec_idx(*bio_iter, *bio_seg); 1780 void *p; 1781 int ret, left; 1782 1783 left = min((int)(data_len - con->in_msg_pos.data_pos), 1784 (int)(bv->bv_len - con->in_msg_pos.page_pos)); 1785 1786 p = kmap(bv->bv_page) + bv->bv_offset; 1787 1788 ret = ceph_tcp_recvmsg(con->sock, p + con->in_msg_pos.page_pos, 1789 left); 1790 if (ret > 0 && do_datacrc) 1791 con->in_data_crc = 1792 crc32c(con->in_data_crc, 1793 p + con->in_msg_pos.page_pos, ret); 1794 kunmap(bv->bv_page); 1795 if (ret <= 0) 1796 return ret; 1797 con->in_msg_pos.data_pos += ret; 1798 con->in_msg_pos.page_pos += ret; 1799 if (con->in_msg_pos.page_pos == bv->bv_len) { 1800 con->in_msg_pos.page_pos = 0; 1801 iter_bio_next(bio_iter, bio_seg); 1802 } 1803 1804 return ret; 1805 } 1806 #endif 1807 1808 /* 1809 * read (part of) a message. 1810 */ 1811 static int read_partial_message(struct ceph_connection *con) 1812 { 1813 struct ceph_msg *m = con->in_msg; 1814 int size; 1815 int end; 1816 int ret; 1817 unsigned int front_len, middle_len, data_len; 1818 bool do_datacrc = !con->msgr->nocrc; 1819 u64 seq; 1820 u32 crc; 1821 1822 dout("read_partial_message con %p msg %p\n", con, m); 1823 1824 /* header */ 1825 size = sizeof (con->in_hdr); 1826 end = size; 1827 ret = read_partial(con, end, size, &con->in_hdr); 1828 if (ret <= 0) 1829 return ret; 1830 1831 crc = crc32c(0, &con->in_hdr, offsetof(struct ceph_msg_header, crc)); 1832 if (cpu_to_le32(crc) != con->in_hdr.crc) { 1833 pr_err("read_partial_message bad hdr " 1834 " crc %u != expected %u\n", 1835 crc, con->in_hdr.crc); 1836 return -EBADMSG; 1837 } 1838 1839 front_len = le32_to_cpu(con->in_hdr.front_len); 1840 if (front_len > CEPH_MSG_MAX_FRONT_LEN) 1841 return -EIO; 1842 middle_len = le32_to_cpu(con->in_hdr.middle_len); 1843 if (middle_len > CEPH_MSG_MAX_DATA_LEN) 1844 return -EIO; 1845 data_len = le32_to_cpu(con->in_hdr.data_len); 1846 if (data_len > CEPH_MSG_MAX_DATA_LEN) 1847 return -EIO; 1848 1849 /* verify seq# */ 1850 seq = le64_to_cpu(con->in_hdr.seq); 1851 if ((s64)seq - (s64)con->in_seq < 1) { 1852 pr_info("skipping %s%lld %s seq %lld expected %lld\n", 1853 ENTITY_NAME(con->peer_name), 1854 ceph_pr_addr(&con->peer_addr.in_addr), 1855 seq, con->in_seq + 1); 1856 con->in_base_pos = -front_len - middle_len - data_len - 1857 sizeof(m->footer); 1858 con->in_tag = CEPH_MSGR_TAG_READY; 1859 return 0; 1860 } else if ((s64)seq - (s64)con->in_seq > 1) { 1861 pr_err("read_partial_message bad seq %lld expected %lld\n", 1862 seq, con->in_seq + 1); 1863 con->error_msg = "bad message sequence # for incoming message"; 1864 return -EBADMSG; 1865 } 1866 1867 /* allocate message? */ 1868 if (!con->in_msg) { 1869 int skip = 0; 1870 1871 dout("got hdr type %d front %d data %d\n", con->in_hdr.type, 1872 con->in_hdr.front_len, con->in_hdr.data_len); 1873 ret = ceph_con_in_msg_alloc(con, &skip); 1874 if (ret < 0) 1875 return ret; 1876 if (skip) { 1877 /* skip this message */ 1878 dout("alloc_msg said skip message\n"); 1879 BUG_ON(con->in_msg); 1880 con->in_base_pos = -front_len - middle_len - data_len - 1881 sizeof(m->footer); 1882 con->in_tag = CEPH_MSGR_TAG_READY; 1883 con->in_seq++; 1884 return 0; 1885 } 1886 1887 BUG_ON(!con->in_msg); 1888 BUG_ON(con->in_msg->con != con); 1889 m = con->in_msg; 1890 m->front.iov_len = 0; /* haven't read it yet */ 1891 if (m->middle) 1892 m->middle->vec.iov_len = 0; 1893 1894 con->in_msg_pos.page = 0; 1895 if (m->pages) 1896 con->in_msg_pos.page_pos = m->page_alignment; 1897 else 1898 con->in_msg_pos.page_pos = 0; 1899 con->in_msg_pos.data_pos = 0; 1900 1901 #ifdef CONFIG_BLOCK 1902 if (m->bio) 1903 init_bio_iter(m->bio, &m->bio_iter, &m->bio_seg); 1904 #endif 1905 } 1906 1907 /* front */ 1908 ret = read_partial_message_section(con, &m->front, front_len, 1909 &con->in_front_crc); 1910 if (ret <= 0) 1911 return ret; 1912 1913 /* middle */ 1914 if (m->middle) { 1915 ret = read_partial_message_section(con, &m->middle->vec, 1916 middle_len, 1917 &con->in_middle_crc); 1918 if (ret <= 0) 1919 return ret; 1920 } 1921 1922 /* (page) data */ 1923 while (con->in_msg_pos.data_pos < data_len) { 1924 if (m->pages) { 1925 ret = read_partial_message_pages(con, m->pages, 1926 data_len, do_datacrc); 1927 if (ret <= 0) 1928 return ret; 1929 #ifdef CONFIG_BLOCK 1930 } else if (m->bio) { 1931 BUG_ON(!m->bio_iter); 1932 ret = read_partial_message_bio(con, 1933 &m->bio_iter, &m->bio_seg, 1934 data_len, do_datacrc); 1935 if (ret <= 0) 1936 return ret; 1937 #endif 1938 } else { 1939 BUG_ON(1); 1940 } 1941 } 1942 1943 /* footer */ 1944 size = sizeof (m->footer); 1945 end += size; 1946 ret = read_partial(con, end, size, &m->footer); 1947 if (ret <= 0) 1948 return ret; 1949 1950 dout("read_partial_message got msg %p %d (%u) + %d (%u) + %d (%u)\n", 1951 m, front_len, m->footer.front_crc, middle_len, 1952 m->footer.middle_crc, data_len, m->footer.data_crc); 1953 1954 /* crc ok? */ 1955 if (con->in_front_crc != le32_to_cpu(m->footer.front_crc)) { 1956 pr_err("read_partial_message %p front crc %u != exp. %u\n", 1957 m, con->in_front_crc, m->footer.front_crc); 1958 return -EBADMSG; 1959 } 1960 if (con->in_middle_crc != le32_to_cpu(m->footer.middle_crc)) { 1961 pr_err("read_partial_message %p middle crc %u != exp %u\n", 1962 m, con->in_middle_crc, m->footer.middle_crc); 1963 return -EBADMSG; 1964 } 1965 if (do_datacrc && 1966 (m->footer.flags & CEPH_MSG_FOOTER_NOCRC) == 0 && 1967 con->in_data_crc != le32_to_cpu(m->footer.data_crc)) { 1968 pr_err("read_partial_message %p data crc %u != exp. %u\n", m, 1969 con->in_data_crc, le32_to_cpu(m->footer.data_crc)); 1970 return -EBADMSG; 1971 } 1972 1973 return 1; /* done! */ 1974 } 1975 1976 /* 1977 * Process message. This happens in the worker thread. The callback should 1978 * be careful not to do anything that waits on other incoming messages or it 1979 * may deadlock. 1980 */ 1981 static void process_message(struct ceph_connection *con) 1982 { 1983 struct ceph_msg *msg; 1984 1985 BUG_ON(con->in_msg->con != con); 1986 con->in_msg->con = NULL; 1987 msg = con->in_msg; 1988 con->in_msg = NULL; 1989 con->ops->put(con); 1990 1991 /* if first message, set peer_name */ 1992 if (con->peer_name.type == 0) 1993 con->peer_name = msg->hdr.src; 1994 1995 con->in_seq++; 1996 mutex_unlock(&con->mutex); 1997 1998 dout("===== %p %llu from %s%lld %d=%s len %d+%d (%u %u %u) =====\n", 1999 msg, le64_to_cpu(msg->hdr.seq), 2000 ENTITY_NAME(msg->hdr.src), 2001 le16_to_cpu(msg->hdr.type), 2002 ceph_msg_type_name(le16_to_cpu(msg->hdr.type)), 2003 le32_to_cpu(msg->hdr.front_len), 2004 le32_to_cpu(msg->hdr.data_len), 2005 con->in_front_crc, con->in_middle_crc, con->in_data_crc); 2006 con->ops->dispatch(con, msg); 2007 2008 mutex_lock(&con->mutex); 2009 } 2010 2011 2012 /* 2013 * Write something to the socket. Called in a worker thread when the 2014 * socket appears to be writeable and we have something ready to send. 2015 */ 2016 static int try_write(struct ceph_connection *con) 2017 { 2018 int ret = 1; 2019 2020 dout("try_write start %p state %lu\n", con, con->state); 2021 2022 more: 2023 dout("try_write out_kvec_bytes %d\n", con->out_kvec_bytes); 2024 2025 /* open the socket first? */ 2026 if (con->state == CON_STATE_PREOPEN) { 2027 BUG_ON(con->sock); 2028 con->state = CON_STATE_CONNECTING; 2029 2030 con_out_kvec_reset(con); 2031 prepare_write_banner(con); 2032 prepare_read_banner(con); 2033 2034 BUG_ON(con->in_msg); 2035 con->in_tag = CEPH_MSGR_TAG_READY; 2036 dout("try_write initiating connect on %p new state %lu\n", 2037 con, con->state); 2038 ret = ceph_tcp_connect(con); 2039 if (ret < 0) { 2040 con->error_msg = "connect error"; 2041 goto out; 2042 } 2043 } 2044 2045 more_kvec: 2046 /* kvec data queued? */ 2047 if (con->out_skip) { 2048 ret = write_partial_skip(con); 2049 if (ret <= 0) 2050 goto out; 2051 } 2052 if (con->out_kvec_left) { 2053 ret = write_partial_kvec(con); 2054 if (ret <= 0) 2055 goto out; 2056 } 2057 2058 /* msg pages? */ 2059 if (con->out_msg) { 2060 if (con->out_msg_done) { 2061 ceph_msg_put(con->out_msg); 2062 con->out_msg = NULL; /* we're done with this one */ 2063 goto do_next; 2064 } 2065 2066 ret = write_partial_msg_pages(con); 2067 if (ret == 1) 2068 goto more_kvec; /* we need to send the footer, too! */ 2069 if (ret == 0) 2070 goto out; 2071 if (ret < 0) { 2072 dout("try_write write_partial_msg_pages err %d\n", 2073 ret); 2074 goto out; 2075 } 2076 } 2077 2078 do_next: 2079 if (con->state == CON_STATE_OPEN) { 2080 /* is anything else pending? */ 2081 if (!list_empty(&con->out_queue)) { 2082 prepare_write_message(con); 2083 goto more; 2084 } 2085 if (con->in_seq > con->in_seq_acked) { 2086 prepare_write_ack(con); 2087 goto more; 2088 } 2089 if (test_and_clear_bit(CON_FLAG_KEEPALIVE_PENDING, 2090 &con->flags)) { 2091 prepare_write_keepalive(con); 2092 goto more; 2093 } 2094 } 2095 2096 /* Nothing to do! */ 2097 clear_bit(CON_FLAG_WRITE_PENDING, &con->flags); 2098 dout("try_write nothing else to write.\n"); 2099 ret = 0; 2100 out: 2101 dout("try_write done on %p ret %d\n", con, ret); 2102 return ret; 2103 } 2104 2105 2106 2107 /* 2108 * Read what we can from the socket. 2109 */ 2110 static int try_read(struct ceph_connection *con) 2111 { 2112 int ret = -1; 2113 2114 more: 2115 dout("try_read start on %p state %lu\n", con, con->state); 2116 if (con->state != CON_STATE_CONNECTING && 2117 con->state != CON_STATE_NEGOTIATING && 2118 con->state != CON_STATE_OPEN) 2119 return 0; 2120 2121 BUG_ON(!con->sock); 2122 2123 dout("try_read tag %d in_base_pos %d\n", (int)con->in_tag, 2124 con->in_base_pos); 2125 2126 if (con->state == CON_STATE_CONNECTING) { 2127 dout("try_read connecting\n"); 2128 ret = read_partial_banner(con); 2129 if (ret <= 0) 2130 goto out; 2131 ret = process_banner(con); 2132 if (ret < 0) 2133 goto out; 2134 2135 BUG_ON(con->state != CON_STATE_CONNECTING); 2136 con->state = CON_STATE_NEGOTIATING; 2137 2138 /* Banner is good, exchange connection info */ 2139 ret = prepare_write_connect(con); 2140 if (ret < 0) 2141 goto out; 2142 prepare_read_connect(con); 2143 2144 /* Send connection info before awaiting response */ 2145 goto out; 2146 } 2147 2148 if (con->state == CON_STATE_NEGOTIATING) { 2149 dout("try_read negotiating\n"); 2150 ret = read_partial_connect(con); 2151 if (ret <= 0) 2152 goto out; 2153 ret = process_connect(con); 2154 if (ret < 0) 2155 goto out; 2156 goto more; 2157 } 2158 2159 BUG_ON(con->state != CON_STATE_OPEN); 2160 2161 if (con->in_base_pos < 0) { 2162 /* 2163 * skipping + discarding content. 2164 * 2165 * FIXME: there must be a better way to do this! 2166 */ 2167 static char buf[SKIP_BUF_SIZE]; 2168 int skip = min((int) sizeof (buf), -con->in_base_pos); 2169 2170 dout("skipping %d / %d bytes\n", skip, -con->in_base_pos); 2171 ret = ceph_tcp_recvmsg(con->sock, buf, skip); 2172 if (ret <= 0) 2173 goto out; 2174 con->in_base_pos += ret; 2175 if (con->in_base_pos) 2176 goto more; 2177 } 2178 if (con->in_tag == CEPH_MSGR_TAG_READY) { 2179 /* 2180 * what's next? 2181 */ 2182 ret = ceph_tcp_recvmsg(con->sock, &con->in_tag, 1); 2183 if (ret <= 0) 2184 goto out; 2185 dout("try_read got tag %d\n", (int)con->in_tag); 2186 switch (con->in_tag) { 2187 case CEPH_MSGR_TAG_MSG: 2188 prepare_read_message(con); 2189 break; 2190 case CEPH_MSGR_TAG_ACK: 2191 prepare_read_ack(con); 2192 break; 2193 case CEPH_MSGR_TAG_CLOSE: 2194 con_close_socket(con); 2195 con->state = CON_STATE_CLOSED; 2196 goto out; 2197 default: 2198 goto bad_tag; 2199 } 2200 } 2201 if (con->in_tag == CEPH_MSGR_TAG_MSG) { 2202 ret = read_partial_message(con); 2203 if (ret <= 0) { 2204 switch (ret) { 2205 case -EBADMSG: 2206 con->error_msg = "bad crc"; 2207 ret = -EIO; 2208 break; 2209 case -EIO: 2210 con->error_msg = "io error"; 2211 break; 2212 } 2213 goto out; 2214 } 2215 if (con->in_tag == CEPH_MSGR_TAG_READY) 2216 goto more; 2217 process_message(con); 2218 if (con->state == CON_STATE_OPEN) 2219 prepare_read_tag(con); 2220 goto more; 2221 } 2222 if (con->in_tag == CEPH_MSGR_TAG_ACK) { 2223 ret = read_partial_ack(con); 2224 if (ret <= 0) 2225 goto out; 2226 process_ack(con); 2227 goto more; 2228 } 2229 2230 out: 2231 dout("try_read done on %p ret %d\n", con, ret); 2232 return ret; 2233 2234 bad_tag: 2235 pr_err("try_read bad con->in_tag = %d\n", (int)con->in_tag); 2236 con->error_msg = "protocol error, garbage tag"; 2237 ret = -1; 2238 goto out; 2239 } 2240 2241 2242 /* 2243 * Atomically queue work on a connection. Bump @con reference to 2244 * avoid races with connection teardown. 2245 */ 2246 static void queue_con(struct ceph_connection *con) 2247 { 2248 if (!con->ops->get(con)) { 2249 dout("queue_con %p ref count 0\n", con); 2250 return; 2251 } 2252 2253 if (!queue_delayed_work(ceph_msgr_wq, &con->work, 0)) { 2254 dout("queue_con %p - already queued\n", con); 2255 con->ops->put(con); 2256 } else { 2257 dout("queue_con %p\n", con); 2258 } 2259 } 2260 2261 /* 2262 * Do some work on a connection. Drop a connection ref when we're done. 2263 */ 2264 static void con_work(struct work_struct *work) 2265 { 2266 struct ceph_connection *con = container_of(work, struct ceph_connection, 2267 work.work); 2268 int ret; 2269 2270 mutex_lock(&con->mutex); 2271 restart: 2272 if (test_and_clear_bit(CON_FLAG_SOCK_CLOSED, &con->flags)) { 2273 switch (con->state) { 2274 case CON_STATE_CONNECTING: 2275 con->error_msg = "connection failed"; 2276 break; 2277 case CON_STATE_NEGOTIATING: 2278 con->error_msg = "negotiation failed"; 2279 break; 2280 case CON_STATE_OPEN: 2281 con->error_msg = "socket closed"; 2282 break; 2283 default: 2284 dout("unrecognized con state %d\n", (int)con->state); 2285 con->error_msg = "unrecognized con state"; 2286 BUG(); 2287 } 2288 goto fault; 2289 } 2290 2291 if (test_and_clear_bit(CON_FLAG_BACKOFF, &con->flags)) { 2292 dout("con_work %p backing off\n", con); 2293 if (queue_delayed_work(ceph_msgr_wq, &con->work, 2294 round_jiffies_relative(con->delay))) { 2295 dout("con_work %p backoff %lu\n", con, con->delay); 2296 mutex_unlock(&con->mutex); 2297 return; 2298 } else { 2299 con->ops->put(con); 2300 dout("con_work %p FAILED to back off %lu\n", con, 2301 con->delay); 2302 } 2303 } 2304 2305 if (con->state == CON_STATE_STANDBY) { 2306 dout("con_work %p STANDBY\n", con); 2307 goto done; 2308 } 2309 if (con->state == CON_STATE_CLOSED) { 2310 dout("con_work %p CLOSED\n", con); 2311 BUG_ON(con->sock); 2312 goto done; 2313 } 2314 if (con->state == CON_STATE_PREOPEN) { 2315 dout("con_work OPENING\n"); 2316 BUG_ON(con->sock); 2317 } 2318 2319 ret = try_read(con); 2320 if (ret == -EAGAIN) 2321 goto restart; 2322 if (ret < 0) { 2323 con->error_msg = "socket error on read"; 2324 goto fault; 2325 } 2326 2327 ret = try_write(con); 2328 if (ret == -EAGAIN) 2329 goto restart; 2330 if (ret < 0) { 2331 con->error_msg = "socket error on write"; 2332 goto fault; 2333 } 2334 2335 done: 2336 mutex_unlock(&con->mutex); 2337 done_unlocked: 2338 con->ops->put(con); 2339 return; 2340 2341 fault: 2342 ceph_fault(con); /* error/fault path */ 2343 goto done_unlocked; 2344 } 2345 2346 2347 /* 2348 * Generic error/fault handler. A retry mechanism is used with 2349 * exponential backoff 2350 */ 2351 static void ceph_fault(struct ceph_connection *con) 2352 __releases(con->mutex) 2353 { 2354 pr_err("%s%lld %s %s\n", ENTITY_NAME(con->peer_name), 2355 ceph_pr_addr(&con->peer_addr.in_addr), con->error_msg); 2356 dout("fault %p state %lu to peer %s\n", 2357 con, con->state, ceph_pr_addr(&con->peer_addr.in_addr)); 2358 2359 BUG_ON(con->state != CON_STATE_CONNECTING && 2360 con->state != CON_STATE_NEGOTIATING && 2361 con->state != CON_STATE_OPEN); 2362 2363 con_close_socket(con); 2364 2365 if (test_bit(CON_FLAG_LOSSYTX, &con->flags)) { 2366 dout("fault on LOSSYTX channel, marking CLOSED\n"); 2367 con->state = CON_STATE_CLOSED; 2368 goto out_unlock; 2369 } 2370 2371 if (con->in_msg) { 2372 BUG_ON(con->in_msg->con != con); 2373 con->in_msg->con = NULL; 2374 ceph_msg_put(con->in_msg); 2375 con->in_msg = NULL; 2376 con->ops->put(con); 2377 } 2378 2379 /* Requeue anything that hasn't been acked */ 2380 list_splice_init(&con->out_sent, &con->out_queue); 2381 2382 /* If there are no messages queued or keepalive pending, place 2383 * the connection in a STANDBY state */ 2384 if (list_empty(&con->out_queue) && 2385 !test_bit(CON_FLAG_KEEPALIVE_PENDING, &con->flags)) { 2386 dout("fault %p setting STANDBY clearing WRITE_PENDING\n", con); 2387 clear_bit(CON_FLAG_WRITE_PENDING, &con->flags); 2388 con->state = CON_STATE_STANDBY; 2389 } else { 2390 /* retry after a delay. */ 2391 con->state = CON_STATE_PREOPEN; 2392 if (con->delay == 0) 2393 con->delay = BASE_DELAY_INTERVAL; 2394 else if (con->delay < MAX_DELAY_INTERVAL) 2395 con->delay *= 2; 2396 con->ops->get(con); 2397 if (queue_delayed_work(ceph_msgr_wq, &con->work, 2398 round_jiffies_relative(con->delay))) { 2399 dout("fault queued %p delay %lu\n", con, con->delay); 2400 } else { 2401 con->ops->put(con); 2402 dout("fault failed to queue %p delay %lu, backoff\n", 2403 con, con->delay); 2404 /* 2405 * In many cases we see a socket state change 2406 * while con_work is running and end up 2407 * queuing (non-delayed) work, such that we 2408 * can't backoff with a delay. Set a flag so 2409 * that when con_work restarts we schedule the 2410 * delay then. 2411 */ 2412 set_bit(CON_FLAG_BACKOFF, &con->flags); 2413 } 2414 } 2415 2416 out_unlock: 2417 mutex_unlock(&con->mutex); 2418 /* 2419 * in case we faulted due to authentication, invalidate our 2420 * current tickets so that we can get new ones. 2421 */ 2422 if (con->auth_retry && con->ops->invalidate_authorizer) { 2423 dout("calling invalidate_authorizer()\n"); 2424 con->ops->invalidate_authorizer(con); 2425 } 2426 2427 if (con->ops->fault) 2428 con->ops->fault(con); 2429 } 2430 2431 2432 2433 /* 2434 * initialize a new messenger instance 2435 */ 2436 void ceph_messenger_init(struct ceph_messenger *msgr, 2437 struct ceph_entity_addr *myaddr, 2438 u32 supported_features, 2439 u32 required_features, 2440 bool nocrc) 2441 { 2442 msgr->supported_features = supported_features; 2443 msgr->required_features = required_features; 2444 2445 spin_lock_init(&msgr->global_seq_lock); 2446 2447 if (myaddr) 2448 msgr->inst.addr = *myaddr; 2449 2450 /* select a random nonce */ 2451 msgr->inst.addr.type = 0; 2452 get_random_bytes(&msgr->inst.addr.nonce, sizeof(msgr->inst.addr.nonce)); 2453 encode_my_addr(msgr); 2454 msgr->nocrc = nocrc; 2455 2456 atomic_set(&msgr->stopping, 0); 2457 2458 dout("%s %p\n", __func__, msgr); 2459 } 2460 EXPORT_SYMBOL(ceph_messenger_init); 2461 2462 static void clear_standby(struct ceph_connection *con) 2463 { 2464 /* come back from STANDBY? */ 2465 if (con->state == CON_STATE_STANDBY) { 2466 dout("clear_standby %p and ++connect_seq\n", con); 2467 con->state = CON_STATE_PREOPEN; 2468 con->connect_seq++; 2469 WARN_ON(test_bit(CON_FLAG_WRITE_PENDING, &con->flags)); 2470 WARN_ON(test_bit(CON_FLAG_KEEPALIVE_PENDING, &con->flags)); 2471 } 2472 } 2473 2474 /* 2475 * Queue up an outgoing message on the given connection. 2476 */ 2477 void ceph_con_send(struct ceph_connection *con, struct ceph_msg *msg) 2478 { 2479 /* set src+dst */ 2480 msg->hdr.src = con->msgr->inst.name; 2481 BUG_ON(msg->front.iov_len != le32_to_cpu(msg->hdr.front_len)); 2482 msg->needs_out_seq = true; 2483 2484 mutex_lock(&con->mutex); 2485 2486 if (con->state == CON_STATE_CLOSED) { 2487 dout("con_send %p closed, dropping %p\n", con, msg); 2488 ceph_msg_put(msg); 2489 mutex_unlock(&con->mutex); 2490 return; 2491 } 2492 2493 BUG_ON(msg->con != NULL); 2494 msg->con = con->ops->get(con); 2495 BUG_ON(msg->con == NULL); 2496 2497 BUG_ON(!list_empty(&msg->list_head)); 2498 list_add_tail(&msg->list_head, &con->out_queue); 2499 dout("----- %p to %s%lld %d=%s len %d+%d+%d -----\n", msg, 2500 ENTITY_NAME(con->peer_name), le16_to_cpu(msg->hdr.type), 2501 ceph_msg_type_name(le16_to_cpu(msg->hdr.type)), 2502 le32_to_cpu(msg->hdr.front_len), 2503 le32_to_cpu(msg->hdr.middle_len), 2504 le32_to_cpu(msg->hdr.data_len)); 2505 2506 clear_standby(con); 2507 mutex_unlock(&con->mutex); 2508 2509 /* if there wasn't anything waiting to send before, queue 2510 * new work */ 2511 if (test_and_set_bit(CON_FLAG_WRITE_PENDING, &con->flags) == 0) 2512 queue_con(con); 2513 } 2514 EXPORT_SYMBOL(ceph_con_send); 2515 2516 /* 2517 * Revoke a message that was previously queued for send 2518 */ 2519 void ceph_msg_revoke(struct ceph_msg *msg) 2520 { 2521 struct ceph_connection *con = msg->con; 2522 2523 if (!con) 2524 return; /* Message not in our possession */ 2525 2526 mutex_lock(&con->mutex); 2527 if (!list_empty(&msg->list_head)) { 2528 dout("%s %p msg %p - was on queue\n", __func__, con, msg); 2529 list_del_init(&msg->list_head); 2530 BUG_ON(msg->con == NULL); 2531 msg->con->ops->put(msg->con); 2532 msg->con = NULL; 2533 msg->hdr.seq = 0; 2534 2535 ceph_msg_put(msg); 2536 } 2537 if (con->out_msg == msg) { 2538 dout("%s %p msg %p - was sending\n", __func__, con, msg); 2539 con->out_msg = NULL; 2540 if (con->out_kvec_is_msg) { 2541 con->out_skip = con->out_kvec_bytes; 2542 con->out_kvec_is_msg = false; 2543 } 2544 msg->hdr.seq = 0; 2545 2546 ceph_msg_put(msg); 2547 } 2548 mutex_unlock(&con->mutex); 2549 } 2550 2551 /* 2552 * Revoke a message that we may be reading data into 2553 */ 2554 void ceph_msg_revoke_incoming(struct ceph_msg *msg) 2555 { 2556 struct ceph_connection *con; 2557 2558 BUG_ON(msg == NULL); 2559 if (!msg->con) { 2560 dout("%s msg %p null con\n", __func__, msg); 2561 2562 return; /* Message not in our possession */ 2563 } 2564 2565 con = msg->con; 2566 mutex_lock(&con->mutex); 2567 if (con->in_msg == msg) { 2568 unsigned int front_len = le32_to_cpu(con->in_hdr.front_len); 2569 unsigned int middle_len = le32_to_cpu(con->in_hdr.middle_len); 2570 unsigned int data_len = le32_to_cpu(con->in_hdr.data_len); 2571 2572 /* skip rest of message */ 2573 dout("%s %p msg %p revoked\n", __func__, con, msg); 2574 con->in_base_pos = con->in_base_pos - 2575 sizeof(struct ceph_msg_header) - 2576 front_len - 2577 middle_len - 2578 data_len - 2579 sizeof(struct ceph_msg_footer); 2580 ceph_msg_put(con->in_msg); 2581 con->in_msg = NULL; 2582 con->in_tag = CEPH_MSGR_TAG_READY; 2583 con->in_seq++; 2584 } else { 2585 dout("%s %p in_msg %p msg %p no-op\n", 2586 __func__, con, con->in_msg, msg); 2587 } 2588 mutex_unlock(&con->mutex); 2589 } 2590 2591 /* 2592 * Queue a keepalive byte to ensure the tcp connection is alive. 2593 */ 2594 void ceph_con_keepalive(struct ceph_connection *con) 2595 { 2596 dout("con_keepalive %p\n", con); 2597 mutex_lock(&con->mutex); 2598 clear_standby(con); 2599 mutex_unlock(&con->mutex); 2600 if (test_and_set_bit(CON_FLAG_KEEPALIVE_PENDING, &con->flags) == 0 && 2601 test_and_set_bit(CON_FLAG_WRITE_PENDING, &con->flags) == 0) 2602 queue_con(con); 2603 } 2604 EXPORT_SYMBOL(ceph_con_keepalive); 2605 2606 2607 /* 2608 * construct a new message with given type, size 2609 * the new msg has a ref count of 1. 2610 */ 2611 struct ceph_msg *ceph_msg_new(int type, int front_len, gfp_t flags, 2612 bool can_fail) 2613 { 2614 struct ceph_msg *m; 2615 2616 m = kmalloc(sizeof(*m), flags); 2617 if (m == NULL) 2618 goto out; 2619 kref_init(&m->kref); 2620 2621 m->con = NULL; 2622 INIT_LIST_HEAD(&m->list_head); 2623 2624 m->hdr.tid = 0; 2625 m->hdr.type = cpu_to_le16(type); 2626 m->hdr.priority = cpu_to_le16(CEPH_MSG_PRIO_DEFAULT); 2627 m->hdr.version = 0; 2628 m->hdr.front_len = cpu_to_le32(front_len); 2629 m->hdr.middle_len = 0; 2630 m->hdr.data_len = 0; 2631 m->hdr.data_off = 0; 2632 m->hdr.reserved = 0; 2633 m->footer.front_crc = 0; 2634 m->footer.middle_crc = 0; 2635 m->footer.data_crc = 0; 2636 m->footer.flags = 0; 2637 m->front_max = front_len; 2638 m->front_is_vmalloc = false; 2639 m->more_to_follow = false; 2640 m->ack_stamp = 0; 2641 m->pool = NULL; 2642 2643 /* middle */ 2644 m->middle = NULL; 2645 2646 /* data */ 2647 m->nr_pages = 0; 2648 m->page_alignment = 0; 2649 m->pages = NULL; 2650 m->pagelist = NULL; 2651 m->bio = NULL; 2652 m->bio_iter = NULL; 2653 m->bio_seg = 0; 2654 m->trail = NULL; 2655 2656 /* front */ 2657 if (front_len) { 2658 if (front_len > PAGE_CACHE_SIZE) { 2659 m->front.iov_base = __vmalloc(front_len, flags, 2660 PAGE_KERNEL); 2661 m->front_is_vmalloc = true; 2662 } else { 2663 m->front.iov_base = kmalloc(front_len, flags); 2664 } 2665 if (m->front.iov_base == NULL) { 2666 dout("ceph_msg_new can't allocate %d bytes\n", 2667 front_len); 2668 goto out2; 2669 } 2670 } else { 2671 m->front.iov_base = NULL; 2672 } 2673 m->front.iov_len = front_len; 2674 2675 dout("ceph_msg_new %p front %d\n", m, front_len); 2676 return m; 2677 2678 out2: 2679 ceph_msg_put(m); 2680 out: 2681 if (!can_fail) { 2682 pr_err("msg_new can't create type %d front %d\n", type, 2683 front_len); 2684 WARN_ON(1); 2685 } else { 2686 dout("msg_new can't create type %d front %d\n", type, 2687 front_len); 2688 } 2689 return NULL; 2690 } 2691 EXPORT_SYMBOL(ceph_msg_new); 2692 2693 /* 2694 * Allocate "middle" portion of a message, if it is needed and wasn't 2695 * allocated by alloc_msg. This allows us to read a small fixed-size 2696 * per-type header in the front and then gracefully fail (i.e., 2697 * propagate the error to the caller based on info in the front) when 2698 * the middle is too large. 2699 */ 2700 static int ceph_alloc_middle(struct ceph_connection *con, struct ceph_msg *msg) 2701 { 2702 int type = le16_to_cpu(msg->hdr.type); 2703 int middle_len = le32_to_cpu(msg->hdr.middle_len); 2704 2705 dout("alloc_middle %p type %d %s middle_len %d\n", msg, type, 2706 ceph_msg_type_name(type), middle_len); 2707 BUG_ON(!middle_len); 2708 BUG_ON(msg->middle); 2709 2710 msg->middle = ceph_buffer_new(middle_len, GFP_NOFS); 2711 if (!msg->middle) 2712 return -ENOMEM; 2713 return 0; 2714 } 2715 2716 /* 2717 * Allocate a message for receiving an incoming message on a 2718 * connection, and save the result in con->in_msg. Uses the 2719 * connection's private alloc_msg op if available. 2720 * 2721 * Returns 0 on success, or a negative error code. 2722 * 2723 * On success, if we set *skip = 1: 2724 * - the next message should be skipped and ignored. 2725 * - con->in_msg == NULL 2726 * or if we set *skip = 0: 2727 * - con->in_msg is non-null. 2728 * On error (ENOMEM, EAGAIN, ...), 2729 * - con->in_msg == NULL 2730 */ 2731 static int ceph_con_in_msg_alloc(struct ceph_connection *con, int *skip) 2732 { 2733 struct ceph_msg_header *hdr = &con->in_hdr; 2734 int type = le16_to_cpu(hdr->type); 2735 int front_len = le32_to_cpu(hdr->front_len); 2736 int middle_len = le32_to_cpu(hdr->middle_len); 2737 int ret = 0; 2738 2739 BUG_ON(con->in_msg != NULL); 2740 2741 if (con->ops->alloc_msg) { 2742 struct ceph_msg *msg; 2743 2744 mutex_unlock(&con->mutex); 2745 msg = con->ops->alloc_msg(con, hdr, skip); 2746 mutex_lock(&con->mutex); 2747 if (con->state != CON_STATE_OPEN) { 2748 ceph_msg_put(msg); 2749 return -EAGAIN; 2750 } 2751 con->in_msg = msg; 2752 if (con->in_msg) { 2753 con->in_msg->con = con->ops->get(con); 2754 BUG_ON(con->in_msg->con == NULL); 2755 } 2756 if (*skip) { 2757 con->in_msg = NULL; 2758 return 0; 2759 } 2760 if (!con->in_msg) { 2761 con->error_msg = 2762 "error allocating memory for incoming message"; 2763 return -ENOMEM; 2764 } 2765 } 2766 if (!con->in_msg) { 2767 con->in_msg = ceph_msg_new(type, front_len, GFP_NOFS, false); 2768 if (!con->in_msg) { 2769 pr_err("unable to allocate msg type %d len %d\n", 2770 type, front_len); 2771 return -ENOMEM; 2772 } 2773 con->in_msg->con = con->ops->get(con); 2774 BUG_ON(con->in_msg->con == NULL); 2775 con->in_msg->page_alignment = le16_to_cpu(hdr->data_off); 2776 } 2777 memcpy(&con->in_msg->hdr, &con->in_hdr, sizeof(con->in_hdr)); 2778 2779 if (middle_len && !con->in_msg->middle) { 2780 ret = ceph_alloc_middle(con, con->in_msg); 2781 if (ret < 0) { 2782 ceph_msg_put(con->in_msg); 2783 con->in_msg = NULL; 2784 } 2785 } 2786 2787 return ret; 2788 } 2789 2790 2791 /* 2792 * Free a generically kmalloc'd message. 2793 */ 2794 void ceph_msg_kfree(struct ceph_msg *m) 2795 { 2796 dout("msg_kfree %p\n", m); 2797 if (m->front_is_vmalloc) 2798 vfree(m->front.iov_base); 2799 else 2800 kfree(m->front.iov_base); 2801 kfree(m); 2802 } 2803 2804 /* 2805 * Drop a msg ref. Destroy as needed. 2806 */ 2807 void ceph_msg_last_put(struct kref *kref) 2808 { 2809 struct ceph_msg *m = container_of(kref, struct ceph_msg, kref); 2810 2811 dout("ceph_msg_put last one on %p\n", m); 2812 WARN_ON(!list_empty(&m->list_head)); 2813 2814 /* drop middle, data, if any */ 2815 if (m->middle) { 2816 ceph_buffer_put(m->middle); 2817 m->middle = NULL; 2818 } 2819 m->nr_pages = 0; 2820 m->pages = NULL; 2821 2822 if (m->pagelist) { 2823 ceph_pagelist_release(m->pagelist); 2824 kfree(m->pagelist); 2825 m->pagelist = NULL; 2826 } 2827 2828 m->trail = NULL; 2829 2830 if (m->pool) 2831 ceph_msgpool_put(m->pool, m); 2832 else 2833 ceph_msg_kfree(m); 2834 } 2835 EXPORT_SYMBOL(ceph_msg_last_put); 2836 2837 void ceph_msg_dump(struct ceph_msg *msg) 2838 { 2839 pr_debug("msg_dump %p (front_max %d nr_pages %d)\n", msg, 2840 msg->front_max, msg->nr_pages); 2841 print_hex_dump(KERN_DEBUG, "header: ", 2842 DUMP_PREFIX_OFFSET, 16, 1, 2843 &msg->hdr, sizeof(msg->hdr), true); 2844 print_hex_dump(KERN_DEBUG, " front: ", 2845 DUMP_PREFIX_OFFSET, 16, 1, 2846 msg->front.iov_base, msg->front.iov_len, true); 2847 if (msg->middle) 2848 print_hex_dump(KERN_DEBUG, "middle: ", 2849 DUMP_PREFIX_OFFSET, 16, 1, 2850 msg->middle->vec.iov_base, 2851 msg->middle->vec.iov_len, true); 2852 print_hex_dump(KERN_DEBUG, "footer: ", 2853 DUMP_PREFIX_OFFSET, 16, 1, 2854 &msg->footer, sizeof(msg->footer), true); 2855 } 2856 EXPORT_SYMBOL(ceph_msg_dump); 2857